summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format43
-rw-r--r--.mailmap1
-rw-r--r--CREDITS22
-rw-r--r--Documentation/ABI/stable/sysfs-bus-vmbus33
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io6
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs126
-rw-r--r--Documentation/ABI/testing/sysfs-block9
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram11
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio21
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-sps3028
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-output-devices6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb2
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-pattern51
-rw-r--r--Documentation/ABI/testing/sysfs-driver-habanalabs190
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg18
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html26
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html6
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg2
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg8
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg6
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html20
-rw-r--r--Documentation/RCU/stallwarn.txt15
-rw-r--r--Documentation/RCU/torture.txt169
-rw-r--r--Documentation/RCU/whatisRCU.txt4
-rw-r--r--Documentation/acpi/initrd_table_override.txt4
-rw-r--r--Documentation/admin-guide/LSM/SafeSetID.rst107
-rw-r--r--Documentation/admin-guide/LSM/index.rst14
-rw-r--r--Documentation/admin-guide/README.rst32
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst18
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt71
-rw-r--r--Documentation/admin-guide/mm/pagemap.rst9
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst104
-rw-r--r--Documentation/arm64/silicon-errata.txt2
-rw-r--r--Documentation/block/bfq-iosched.txt7
-rw-r--r--Documentation/block/null_blk.txt3
-rw-r--r--Documentation/block/queue-sysfs.txt7
-rw-r--r--Documentation/blockdev/zram.txt74
-rw-r--r--Documentation/bpf/bpf_design_QA.rst35
-rw-r--r--Documentation/bpf/btf.rst848
-rw-r--r--Documentation/bpf/index.rst7
-rw-r--r--Documentation/cgroup-v1/memcg_test.txt4
-rw-r--r--Documentation/cgroup-v1/memory.txt4
-rw-r--r--Documentation/cgroup-v1/pids.txt3
-rw-r--r--Documentation/core-api/refcount-vs-atomic.rst24
-rw-r--r--Documentation/core-api/xarray.rst15
-rw-r--r--Documentation/cpuidle/driver.txt37
-rw-r--r--Documentation/cpuidle/governor.txt28
-rw-r--r--Documentation/devicetree/bindings/Makefile6
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/armadeus.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/bhf.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/bitmain.yaml18
-rw-r--r--Documentation/devicetree/bindings/arm/compulab-boards.txt25
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-capacity.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,imx7ulp-sim.txt16
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt237
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml232
-rw-r--r--Documentation/devicetree/bindings/arm/i2se.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/olimex.txt10
-rw-r--r--Documentation/devicetree/bindings/arm/renesas.yaml238
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml17
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt155
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml22
-rw-r--r--Documentation/devicetree/bindings/arm/sp810.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/technologic.txt23
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/topology.txt2
-rw-r--r--Documentation/devicetree/bindings/bus/imx-weim.txt32
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt83
-rw-r--r--Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt6
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-slimsss.txt19
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt33
-rw-r--r--Documentation/devicetree/bindings/display/arm,pl11x.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt21
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt1
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt27
-rw-r--r--Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt1
-rw-r--r--Documentation/devicetree/bindings/firmware/nvidia,tegra210-bpmp.txt35
-rw-r--r--Documentation/devicetree/bindings/gnss/gnss.txt1
-rw-r--r--Documentation/devicetree/bindings/gnss/mediatek.txt35
-rw-r--r--Documentation/devicetree/bindings/gnss/sirfstar.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt2
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt6
-rw-r--r--Documentation/devicetree/bindings/hwmon/ad741x.txt15
-rw-r--r--Documentation/devicetree/bindings/hwmon/dps650ab.txt11
-rw-r--r--Documentation/devicetree/bindings/hwmon/hih6130.txt12
-rw-r--r--Documentation/devicetree/bindings/hwmon/ina3221.txt10
-rw-r--r--Documentation/devicetree/bindings/hwmon/lm75.txt37
-rw-r--r--Documentation/devicetree/bindings/hwmon/pwm-fan.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/accel/mma8452.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt65
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt41
-rw-r--r--Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt48
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt (renamed from Documentation/devicetree/bindings/staging/iio/adc/lpc32xx-adc.txt)0
-rw-r--r--Documentation/devicetree/bindings/iio/adc/nuvoton,npcm-adc.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti-ads124s08.txt25
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/bme680.txt11
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt20
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/sensirion,sgp30.txt15
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.txt12
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt28
-rw-r--r--Documentation/devicetree/bindings/iio/impedance-analyzer/ad5933.txt26
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bmi160.txt6
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/light/max44009.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt8
-rw-r--r--Documentation/devicetree/bindings/interconnect/interconnect.txt60
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt24
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt11
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt24
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt1
-rw-r--r--Documentation/devicetree/bindings/leds/common.txt12
-rw-r--r--Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt49
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt17
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt36
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/rcu.txt18
-rw-r--r--Documentation/devicetree/bindings/misc/qcom,fastrpc.txt78
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt6
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap.txt28
-rw-r--r--Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt60
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-quadspi.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-quadspi.txt3
-rw-r--r--Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt61
-rw-r--r--Documentation/devicetree/bindings/net/btusb.txt3
-rw-r--r--Documentation/devicetree/bindings/net/cpsw-phy-sel.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ksz.txt145
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mt7530.txt6
-rw-r--r--Documentation/devicetree/bindings/net/fsl-enetc.txt69
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt4
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt82
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-bluetooth.txt64
-rw-r--r--Documentation/devicetree/bindings/net/nixge.txt72
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ethqos.txt64
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt19
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt4
-rw-r--r--Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt46
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/cdns,dphy.txt20
-rw-r--r--Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt40
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt65
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt38
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt9
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt3
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.txt145
-rw-r--r--Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt25
-rw-r--r--Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt34
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-qoriq.txt5
-rw-r--r--Documentation/devicetree/bindings/regulator/fan53555.txt3
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.txt35
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml67
-rw-r--r--Documentation/devicetree/bindings/regulator/max77650-regulator.txt41
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt68
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt38
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt6
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65218.txt9
-rw-r--r--Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt27
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx7-src.txt7
-rw-r--r--Documentation/devicetree/bindings/reset/socfpga-reset.txt3
-rw-r--r--Documentation/devicetree/bindings/reset/uniphier-reset.txt25
-rw-r--r--Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt52
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/ingenic,uart.txt3
-rw-r--r--Documentation/devicetree/bindings/serial/milbeaut-uart.txt21
-rw-r--r--Documentation/devicetree/bindings/serial/nvidia,tegra194-tcu.txt35
-rw-r--r--Documentation/devicetree/bindings/serial/omap_serial.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.txt51
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.yaml126
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt10
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt76
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml140
-rw-r--r--Documentation/devicetree/bindings/serio/olpc,ap-sp.txt4
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt46
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau1977.txt13
-rw-r--r--Documentation/devicetree/bindings/sound/ak4458.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt123
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l36.txt168
-rw-r--r--Documentation/devicetree/bindings/sound/cs4341.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,micfil.txt32
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt26
-rw-r--r--Documentation/devicetree/bindings/sound/ingenic,jz4725b-codec.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/ingenic,jz4740-codec.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/mt6358.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt36
-rw-r--r--Documentation/devicetree/bindings/sound/mtk-btcvsd-snd.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd9335.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/simple-scu-card.txt94
-rw-r--r--Documentation/devicetree/bindings/sound/sprd-pcm.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt29
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,spdif.txt28
-rw-r--r--Documentation/devicetree/bindings/spi/atmel-quadspi.txt12
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt (renamed from Documentation/devicetree/bindings/mtd/fsl-quadspi.txt)18
-rw-r--r--Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt39
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sifive.txt37
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sprd.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/spi-stm32.txt9
-rw-r--r--Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt24
-rw-r--r--Documentation/devicetree/bindings/sram/sunxi-sram.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,imxgpt.txt39
-rw-r--r--Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt11
-rw-r--r--Documentation/devicetree/bindings/timer/nvidia,tegra210-timer.txt36
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.txt2
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt17
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt24
-rw-r--r--Documentation/devicetree/bindings/usb/keystone-usb.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usb3.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt7
-rw-r--r--Documentation/driver-api/80211/mac80211.rst3
-rw-r--r--Documentation/driver-api/component.rst17
-rw-r--r--Documentation/driver-api/device_link.rst87
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/pm/cpuidle.rst282
-rw-r--r--Documentation/driver-api/pm/index.rst7
-rw-r--r--Documentation/driver-model/bus.txt8
-rw-r--r--Documentation/fb/fbcon.txt8
-rw-r--r--Documentation/features/core/cBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/eBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/generic-idle-thread/arch-support.txt1
-rw-r--r--Documentation/features/core/jump-labels/arch-support.txt1
-rw-r--r--Documentation/features/core/tracehook/arch-support.txt1
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt1
-rw-r--r--Documentation/features/debug/gcov-profile-all/arch-support.txt1
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/kretprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/optprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/stackprotector/arch-support.txt1
-rw-r--r--Documentation/features/debug/uprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/user-ret-profiler/arch-support.txt1
-rw-r--r--Documentation/features/io/dma-contiguous/arch-support.txt1
-rw-r--r--Documentation/features/locking/cmpxchg-local/arch-support.txt1
-rw-r--r--Documentation/features/locking/lockdep/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-rwlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-spinlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/rwsem-optimized/arch-support.txt1
-rw-r--r--Documentation/features/perf/kprobes-event/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-regs/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-stackdump/arch-support.txt1
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt1
-rw-r--r--Documentation/features/sched/numa-balancing/arch-support.txt1
-rw-r--r--Documentation/features/seccomp/seccomp-filter/arch-support.txt1
-rw-r--r--Documentation/features/time/arch-tick-broadcast/arch-support.txt1
-rw-r--r--Documentation/features/time/clockevents/arch-support.txt1
-rw-r--r--Documentation/features/time/context-tracking/arch-support.txt1
-rw-r--r--Documentation/features/time/irq-time-acct/arch-support.txt1
-rw-r--r--Documentation/features/time/modern-timekeeping/arch-support.txt1
-rw-r--r--Documentation/features/time/virt-cpuacct/arch-support.txt1
-rw-r--r--Documentation/features/vm/ELF-ASLR/arch-support.txt1
-rw-r--r--Documentation/features/vm/PG_uncached/arch-support.txt1
-rw-r--r--Documentation/features/vm/THP/arch-support.txt1
-rw-r--r--Documentation/features/vm/TLB/arch-support.txt1
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt1
-rw-r--r--Documentation/features/vm/ioremap_prot/arch-support.txt1
-rw-r--r--Documentation/features/vm/numa-memblock/arch-support.txt1
-rw-r--r--Documentation/features/vm/pte_special/arch-support.txt1
-rw-r--r--Documentation/filesystems/sysfs.txt4
-rw-r--r--Documentation/filesystems/xfs.txt3
-rw-r--r--Documentation/hwmon/lm859
-rw-r--r--Documentation/interconnect/interconnect.rst94
-rw-r--r--Documentation/networking/af_xdp.rst36
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst14
-rw-r--r--Documentation/networking/device_drivers/intel/e100.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/e1000.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/e1000e.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/fm10k.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/i40e.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/iavf.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ice.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/igb.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/igbvf.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ixgb.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbe.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbevf.rst1
-rw-r--r--Documentation/networking/device_drivers/stmicro/stmmac.txt2
-rw-r--r--Documentation/networking/devlink-health.txt86
-rw-r--r--Documentation/networking/devlink-info-versions.rst43
-rw-r--r--Documentation/networking/devlink-params-mlxsw.txt10
-rw-r--r--Documentation/networking/dsa/dsa.txt23
-rw-r--r--Documentation/networking/filter.txt33
-rw-r--r--Documentation/networking/ieee802154.rst (renamed from Documentation/networking/ieee802154.txt)193
-rw-r--r--Documentation/networking/index.rst30
-rw-r--r--Documentation/networking/msg_zerocopy.rst2
-rw-r--r--Documentation/networking/operstates.txt14
-rw-r--r--Documentation/networking/phy.rst447
-rw-r--r--Documentation/networking/phy.txt427
-rw-r--r--Documentation/networking/rxrpc.txt45
-rw-r--r--Documentation/networking/sfp-phylink.rst268
-rw-r--r--Documentation/networking/snmp_counter.rst415
-rw-r--r--Documentation/networking/switchdev.txt37
-rw-r--r--Documentation/networking/timestamping.txt47
-rw-r--r--Documentation/power/energy-model.txt144
-rw-r--r--Documentation/process/applying-patches.rst117
-rw-r--r--Documentation/scheduler/sched-energy.txt425
-rw-r--r--Documentation/sound/hd-audio/models.rst4
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst35
-rw-r--r--Documentation/sound/soc/dpcm.rst10
-rw-r--r--Documentation/spi/pxa2xx10
-rw-r--r--Documentation/sysctl/fs.txt28
-rw-r--r--Documentation/sysctl/kernel.txt12
-rw-r--r--Documentation/sysctl/net.txt15
-rw-r--r--Documentation/trace/coresight-cpu-debug.txt2
-rw-r--r--Documentation/translations/it_IT/admin-guide/README.rst2
-rw-r--r--Documentation/usb/authorization.txt4
-rw-r--r--Documentation/userspace-api/spec_ctrl.rst27
-rw-r--r--Documentation/virtual/kvm/amd-memory-encryption.rst2
-rw-r--r--Documentation/x86/resctrl_ui.txt2
-rw-r--r--Kbuild18
-rw-r--r--MAINTAINERS419
-rw-r--r--Makefile12
-rw-r--r--arch/Kconfig18
-rw-r--r--arch/alpha/include/asm/a.out-core.h81
-rw-r--r--arch/alpha/include/asm/irq.h6
-rw-r--r--arch/alpha/include/asm/topology.h3
-rw-r--r--arch/alpha/include/asm/uaccess.h1
-rw-r--r--arch/alpha/include/asm/unistd.h21
-rw-r--r--arch/alpha/include/uapi/asm/mman.h4
-rw-r--r--arch/alpha/include/uapi/asm/socket.h49
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h10
-rw-r--r--arch/alpha/kernel/osf_sys.c5
-rw-r--r--arch/alpha/kernel/perf_event.c7
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl22
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/Kconfig21
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/Kbuild4
-rw-r--r--arch/arc/include/asm/arcregs.h20
-rw-r--r--arch/arc/include/asm/bitops.h6
-rw-r--r--arch/arc/include/asm/cache.h11
-rw-r--r--arch/arc/include/asm/entry-arcv2.h54
-rw-r--r--arch/arc/include/asm/perf_event.h3
-rw-r--r--arch/arc/include/asm/uaccess.h8
-rw-r--r--arch/arc/include/uapi/asm/unistd.h2
-rw-r--r--arch/arc/kernel/entry-arcv2.S4
-rw-r--r--arch/arc/kernel/head.S16
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/perf_event.c241
-rw-r--r--arch/arc/kernel/setup.c144
-rw-r--r--arch/arc/kernel/troubleshoot.c30
-rw-r--r--arch/arc/lib/memcpy-archs.S14
-rw-r--r--arch/arc/lib/memset-archs.S40
-rw-r--r--arch/arc/mm/fault.c13
-rw-r--r--arch/arc/mm/init.c3
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/Makefile16
-rw-r--r--arch/arm/boot/dts/alphascale-asm9260.dtsi3
-rw-r--r--arch/arm/boot/dts/alpine.dtsi8
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir2110.dts4
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir3220.dts4
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir5221.dts4
-rw-r--r--arch/arm/boot/dts/am335x-chiliboard.dts8
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/am335x-guardian.dts511
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts4
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi4
-rw-r--r--arch/arm/boot/dts/am335x-lxm.dts4
-rw-r--r--arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi5
-rw-r--r--arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts5
-rw-r--r--arch/arm/boot/dts/am335x-phycore-som.dtsi4
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts6
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi17
-rw-r--r--arch/arm/boot/dts/am3874-iceboard.dts496
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts2
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi17
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts5
-rw-r--r--arch/arm/boot/dts/arm-realview-eb.dtsi4
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts4
-rw-r--r--arch/arm/boot/dts/arm-realview-pb11mp.dts4
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx.dtsi4
-rw-r--r--arch/arm/boot/dts/armada-370-rd.dts42
-rw-r--r--arch/arm/boot/dts/armada-388-clearfog.dts58
-rw-r--r--arch/arm/boot/dts/armada-388-clearfog.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi41
-rw-r--r--arch/arm/boot/dts/armada-39x.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts46
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts13
-rw-r--r--arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts85
-rw-r--r--arch/arm/boot/dts/armada-xp-linksys-mamba.dts47
-rw-r--r--arch/arm/boot/dts/artpec6.dtsi3
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts10
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts55
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts145
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts5
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts8
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi1
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi34
-rw-r--r--arch/arm/boot/dts/at91-nattis-2-natte-2.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi30
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts4
-rw-r--r--arch/arm/boot/dts/at91-wb45n.dts2
-rw-r--r--arch/arm/boot/dts/at91-wb50n.dts4
-rw-r--r--arch/arm/boot/dts/at91rm9200.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi4
-rw-r--r--arch/arm/boot/dts/atlas6-evb.dts1
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi1
-rw-r--r--arch/arm/boot/dts/atlas7.dtsi1
-rw-r--r--arch/arm/boot/dts/axm55xx.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi9
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm21664-garnet.dts1
-rw-r--r--arch/arm/boot/dts/bcm21664.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm23550-sparrow.dts1
-rw-r--r--arch/arm/boot/dts/bcm23550.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm28155-ap.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-a-plus.dts4
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-plus.dts4
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero-w.dts11
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero.dts4
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi8
-rw-r--r--arch/arm/boot/dts/bcm2836-rpi-2-b.dts68
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts175
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts74
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts74
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi17
-rw-r--r--arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts1
-rw-r--r--arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts1
-rw-r--r--arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts1
-rw-r--r--arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts1
-rw-r--r--arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts1
-rw-r--r--arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-linksys-panamera.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-netgear-r8500.dts1
-rw-r--r--arch/arm/boot/dts/bcm47094-phicomm-k3.dts71
-rw-r--r--arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts4
-rw-r--r--arch/arm/boot/dts/bcm47189-luxul-xap-810.dts4
-rw-r--r--arch/arm/boot/dts/bcm47189-tenda-ac9.dts1
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi3
-rw-r--r--arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts1
-rw-r--r--arch/arm/boot/dts/bcm53573.dtsi3
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm7445.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm947189acdbmr.dts1
-rw-r--r--arch/arm/boot/dts/bcm953012er.dts1
-rw-r--r--arch/arm/boot/dts/bcm953012hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm953012k.dts1
-rw-r--r--arch/arm/boot/dts/cx92755.dtsi4
-rw-r--r--arch/arm/boot/dts/da850-evm.dts31
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts48
-rw-r--r--arch/arm/boot/dts/da850.dtsi2
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi79
-rw-r--r--arch/arm/boot/dts/dove.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi15
-rw-r--r--arch/arm/boot/dts/ep7209.dtsi4
-rw-r--r--arch/arm/boot/dts/ep7211-edb7211.dts1
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi14
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidx.dts2
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts4
-rw-r--r--arch/arm/boot/dts/exynos5422-odroid-core.dtsi7
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts8
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu4.dts4
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts2
-rw-r--r--arch/arm/boot/dts/imx27.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts16
-rw-r--r--arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi34
-rw-r--r--arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi555
-rw-r--r--arch/arm/boot/dts/imx6-logicpd-som.dtsi365
-rw-r--r--arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts5
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi595
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-draco.dts58
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-hydra.dts50
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-ursa.dts54
-rw-r--r--arch/arm/boot/dts/imx6q-logicpd.dts120
-rw-r--r--arch/arm/boot/dts/imx6q-pistachio.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts49
-rw-r--r--arch/arm/boot/dts/imx6qdl-apalis.dtsi26
-rw-r--r--arch/arm/boot/dts/imx6qdl-colibri.dtsi26
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi14
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6sll-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi21
-rw-r--r--arch/arm/boot/dts/imx6ul-phytec-pcl063.dtsi148
-rw-r--r--arch/arm/boot/dts/imx6ul-phytec-peb-eval-01.dtsi55
-rw-r--r--arch/arm/boot/dts/imx6ul-phytec-phyboard-segin-full.dts89
-rw-r--r--arch/arm/boot/dts/imx6ul-phytec-phyboard-segin.dtsi329
-rw-r--r--arch/arm/boot/dts/imx6ull-colibri.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6ull.dtsi12
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi19
-rw-r--r--arch/arm/boot/dts/integrator.dtsi10
-rw-r--r--arch/arm/boot/dts/integratorcp.dts89
-rw-r--r--arch/arm/boot/dts/kirkwood-dir665.dts47
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi4
-rw-r--r--arch/arm/boot/dts/kirkwood-linksys-viper.dts47
-rw-r--r--arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts47
-rw-r--r--arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts9
-rw-r--r--arch/arm/boot/dts/kirkwood-rd88f6281.dtsi41
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi3
-rw-r--r--arch/arm/boot/dts/lpc3250-ea3250.dts20
-rw-r--r--arch/arm/boot/dts/lpc3250-phy3250.dts87
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi32
-rw-r--r--arch/arm/boot/dts/lpc4350-hitex-eval.dts2
-rw-r--r--arch/arm/boot/dts/lpc4357-ea4357-devkit.dts2
-rw-r--r--arch/arm/boot/dts/lpc4357-myd-lpc4357.dts619
-rw-r--r--arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts1
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts1
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts1
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi19
-rw-r--r--arch/arm/boot/dts/meson.dtsi12
-rw-r--r--arch/arm/boot/dts/meson6-atv1200.dts1
-rw-r--r--arch/arm/boot/dts/meson6.dtsi8
-rw-r--r--arch/arm/boot/dts/meson8-minix-neo-x8.dts1
-rw-r--r--arch/arm/boot/dts/meson8.dtsi89
-rw-r--r--arch/arm/boot/dts/meson8b-ec100.dts131
-rw-r--r--arch/arm/boot/dts/meson8b-mxq.dts1
-rw-r--r--arch/arm/boot/dts/meson8b-odroidc1.dts10
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi83
-rw-r--r--arch/arm/boot/dts/meson8m2-mxiii-plus.dts9
-rw-r--r--arch/arm/boot/dts/meson8m2.dtsi4
-rw-r--r--arch/arm/boot/dts/milbeaut-m10v-evb.dts32
-rw-r--r--arch/arm/boot/dts/milbeaut-m10v.dtsi95
-rw-r--r--arch/arm/boot/dts/mmp2-brownstone.dts1
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi4
-rw-r--r--arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi2
-rw-r--r--arch/arm/boot/dts/moxart.dtsi3
-rw-r--r--arch/arm/boot/dts/mps2.dtsi6
-rw-r--r--arch/arm/boot/dts/mt2701-evb.dts1
-rw-r--r--arch/arm/boot/dts/mt2701.dtsi3
-rw-r--r--arch/arm/boot/dts/mt6580-evbp1.dts1
-rw-r--r--arch/arm/boot/dts/mt6580.dtsi1
-rw-r--r--arch/arm/boot/dts/mt6589-aquaris5.dts1
-rw-r--r--arch/arm/boot/dts/mt6589.dtsi3
-rw-r--r--arch/arm/boot/dts/mt6592-evb.dts2
-rw-r--r--arch/arm/boot/dts/mt6592.dtsi3
-rw-r--r--arch/arm/boot/dts/mt7623.dtsi15
-rw-r--r--arch/arm/boot/dts/mt8127-moose.dts1
-rw-r--r--arch/arm/boot/dts/mt8127.dtsi3
-rw-r--r--arch/arm/boot/dts/mt8135-evbp1.dts1
-rw-r--r--arch/arm/boot/dts/mt8135.dtsi3
-rw-r--r--arch/arm/boot/dts/nspire.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-evm-common.dtsi7
-rw-r--r--arch/arm/boot/dts/omap3-evm-processor-common.dtsi10
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi43
-rw-r--r--arch/arm/boot/dts/omap3-gta04a5.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi42
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts21
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi6
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts30
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi9
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts12
-rw-r--r--arch/arm/boot/dts/omap5-l4.dtsi2
-rw-r--r--arch/arm/boot/dts/orion5x-lacie-d2-network.dts1
-rw-r--r--arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts1
-rw-r--r--arch/arm/boot/dts/orion5x-lswsgl.dts1
-rw-r--r--arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts1
-rw-r--r--arch/arm/boot/dts/orion5x-netgear-wnr854t.dts1
-rw-r--r--arch/arm/boot/dts/orion5x-rd88f5182-nas.dts1
-rw-r--r--arch/arm/boot/dts/orion5x.dtsi4
-rw-r--r--arch/arm/boot/dts/ox810se.dtsi4
-rw-r--r--arch/arm/boot/dts/ox820.dtsi4
-rw-r--r--arch/arm/boot/dts/picoxcell-pc3x2.dtsi1
-rw-r--r--arch/arm/boot/dts/picoxcell-pc3x3.dtsi1
-rw-r--r--arch/arm/boot/dts/prima2-evb.dts1
-rw-r--r--arch/arm/boot/dts/prima2.dtsi1
-rw-r--r--arch/arm/boot/dts/pxa168.dtsi4
-rw-r--r--arch/arm/boot/dts/pxa2xx.dtsi3
-rw-r--r--arch/arm/boot/dts/pxa910.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom-apq8084.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-msm8660.dtsi25
-rw-r--r--arch/arm/boot/dts/qcom-msm8960.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts130
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi19
-rw-r--r--arch/arm/boot/dts/qcom-pm8941.dtsi1
-rw-r--r--arch/arm/boot/dts/r7s9210-rza2mevb.dts82
-rw-r--r--arch/arm/boot/dts/r7s9210.dtsi218
-rw-r--r--arch/arm/boot/dts/r8a7743.dtsi79
-rw-r--r--arch/arm/boot/dts/r8a7744.dtsi188
-rw-r--r--arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts18
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi28
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi26
-rw-r--r--arch/arm/boot/dts/r8a7790-stout.dts15
-rw-r--r--arch/arm/boot/dts/rk3036-kylin.dts1
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3066a-bqcurie2.dts1
-rw-r--r--arch/arm/boot/dts/rk3066a-mk808.dts1
-rw-r--r--arch/arm/boot/dts/rk3066a-rayeager.dts2
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi48
-rw-r--r--arch/arm/boot/dts/rk3188-bqedison2qc.dts19
-rw-r--r--arch/arm/boot/dts/rk3188-px3-evb.dts1
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3229-evb.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-fennec.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-reload.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-popmetal.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-square.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-tinker-s.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3288-vyasa.dts1
-rw-r--r--arch/arm/boot/dts/rv1108-elgin-r1.dts208
-rw-r--r--arch/arm/boot/dts/rv1108-evb.dts20
-rw-r--r--arch/arm/boot/dts/rv1108.dtsi37
-rw-r--r--arch/arm/boot/dts/s3c2416-smdk2416.dts5
-rw-r--r--arch/arm/boot/dts/s5pv210-aries.dtsi90
-rw-r--r--arch/arm/boot/dts/s5pv210-fascinate4g.dts7
-rw-r--r--arch/arm/boot/dts/s5pv210-galaxys.dts7
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi25
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi4
-rw-r--r--arch/arm/boot/dts/skeleton.dtsi18
-rw-r--r--arch/arm/boot/dts/skeleton64.dtsi14
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi16
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi18
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_chameleon96.dts130
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi4
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi4
-rw-r--r--arch/arm/boot/dts/spear600.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts85
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-u300.dts2
-rw-r--r--arch/arm/boot/dts/stm32429i-eval.dts1
-rw-r--r--arch/arm/boot/dts/stm32746g-eval.dts1
-rw-r--r--arch/arm/boot/dts/stm32f429-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi64
-rw-r--r--arch/arm/boot/dts/stm32f469-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32f746-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32f746.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32f769-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32h743i-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32h743i-eval.dts1
-rw-r--r--arch/arm/boot/dts/stm32mp157-pinctrl.dtsi7
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ed1.dts8
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ev1.dts10
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi97
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi44
-rw-r--r--arch/arm/boot/dts/sun5i-a13-q8-tablet.dts7
-rw-r--r--arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts5
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi175
-rw-r--r--arch/arm/boot/dts/sun8i-a23-q8-tablet.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi20
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi194
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts8
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts8
-rw-r--r--arch/arm/boot/dts/sun8i-h3-beelink-x2.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts25
-rw-r--r--arch/arm/boot/dts/sun8i-q8-common.dtsi37
-rw-r--r--arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts41
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi15
-rw-r--r--arch/arm/boot/dts/sun9i-a80-cubieboard4.dts44
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts44
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi65
-rw-r--r--arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi16
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi19
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts5
-rw-r--r--arch/arm/boot/dts/vf610-bk4.dts35
-rw-r--r--arch/arm/boot/dts/vf610-zii-cfu1.dts2
-rw-r--r--arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts311
-rw-r--r--arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts3
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi9
-rw-r--r--arch/arm/boot/dts/wm8505.dtsi9
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi9
-rw-r--r--arch/arm/boot/dts/wm8750.dtsi9
-rw-r--r--arch/arm/boot/dts/wm8850.dtsi9
-rw-r--r--arch/arm/boot/dts/zx296702-ad1.dts1
-rw-r--r--arch/arm/boot/dts/zx296702.dtsi4
-rw-r--r--arch/arm/boot/dts/zynq-zturn.dts2
-rw-r--r--arch/arm/configs/axm55xx_defconfig4
-rw-r--r--arch/arm/configs/bcm2835_defconfig3
-rw-r--r--arch/arm/configs/integrator_defconfig9
-rw-r--r--arch/arm/configs/lpc18xx_defconfig31
-rw-r--r--arch/arm/configs/lpc32xx_defconfig65
-rw-r--r--arch/arm/configs/milbeaut_m10v_defconfig119
-rw-r--r--arch/arm/configs/multi_v7_defconfig10
-rw-r--r--arch/arm/configs/nhk8815_defconfig41
-rw-r--r--arch/arm/configs/omap2plus_defconfig83
-rw-r--r--arch/arm/configs/pxa_defconfig5
-rw-r--r--arch/arm/configs/raumfeld_defconfig197
-rw-r--r--arch/arm/configs/s5pv210_defconfig47
-rw-r--r--arch/arm/configs/shmobile_defconfig48
-rw-r--r--arch/arm/configs/socfpga_defconfig25
-rw-r--r--arch/arm/configs/spear3xx_defconfig6
-rw-r--r--arch/arm/crypto/aes-ce-core.S26
-rw-r--r--arch/arm/crypto/crct10dif-ce-core.S568
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c25
-rw-r--r--arch/arm/crypto/sha256-armv4.pl3
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped3
-rw-r--r--arch/arm/crypto/sha512-armv4.pl3
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped3
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h10
-rw-r--r--arch/arm/include/asm/kvm_ras.h14
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h5
-rw-r--r--arch/arm/include/asm/system_misc.h5
-rw-r--r--arch/arm/include/asm/uaccess.h1
-rw-r--r--arch/arm/include/asm/unistd.h5
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h94
-rw-r--r--arch/arm/kernel/irq.c62
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c8
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/reset.c24
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c4
-rw-r--r--arch/arm/mach-davinci/Kconfig19
-rw-r--r--arch/arm/mach-davinci/Makefile3
-rw-r--r--arch/arm/mach-davinci/asp.h8
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c95
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c59
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c6
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c15
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c25
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c19
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c41
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c2
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c88
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c15
-rw-r--r--arch/arm/mach-davinci/common.c18
-rw-r--r--arch/arm/mach-davinci/cp_intc.c215
-rw-r--r--arch/arm/mach-davinci/cp_intc.h57
-rw-r--r--arch/arm/mach-davinci/da830.c128
-rw-r--r--arch/arm/mach-davinci/da850.c156
-rw-r--r--arch/arm/mach-davinci/davinci.h4
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c147
-rw-r--r--arch/arm/mach-davinci/devices.c40
-rw-r--r--arch/arm/mach-davinci/dm355.c78
-rw-r--r--arch/arm/mach-davinci/dm365.c102
-rw-r--r--arch/arm/mach-davinci/dm644x.c68
-rw-r--r--arch/arm/mach-davinci/dm646x.c82
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h14
-rw-r--r--arch/arm/mach-davinci/include/mach/cpufreq.h26
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/entry-macro.S39
-rw-r--r--arch/arm/mach-davinci/irq.c117
-rw-r--r--arch/arm/mach-davinci/irqs.h (renamed from arch/arm/mach-davinci/include/mach/irqs.h)4
-rw-r--r--arch/arm/mach-davinci/usb-da8xx.c9
-rw-r--r--arch/arm/mach-davinci/usb.c15
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c1
-rw-r--r--arch/arm/mach-exynos/platsmp.c4
-rw-r--r--arch/arm/mach-imx/Makefile3
-rw-r--r--arch/arm/mach-imx/common.h10
-rw-r--r--arch/arm/mach-imx/cpuidle-imx7ulp.c60
-rw-r--r--arch/arm/mach-imx/cpuidle.h5
-rw-r--r--arch/arm/mach-imx/mach-imx7ulp.c46
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c1
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c2
-rw-r--r--arch/arm/mach-imx/mmdc.c9
-rw-r--r--arch/arm/mach-imx/pm-imx7ulp.c49
-rw-r--r--arch/arm/mach-integrator/impd1.c8
-rw-r--r--arch/arm/mach-iop32x/n2100.c3
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c84
-rw-r--r--arch/arm/mach-lpc32xx/pm.c13
-rw-r--r--arch/arm/mach-mediatek/Kconfig4
-rw-r--r--arch/arm/mach-mediatek/mediatek.c3
-rw-r--r--arch/arm/mach-mediatek/platsmp.c2
-rw-r--r--arch/arm/mach-meson/Kconfig1
-rw-r--r--arch/arm/mach-milbeaut/Kconfig20
-rw-r--r--arch/arm/mach-milbeaut/Makefile1
-rw-r--r--arch/arm/mach-milbeaut/platsmp.c143
-rw-r--r--arch/arm/mach-mmp/brownstone.c1
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c16
-rw-r--r--arch/arm/mach-omap2/display.c7
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c36
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c131
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm/mach-orion5x/common.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-ge-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c2
-rw-r--r--arch/arm/mach-orion5x/wnr854t-setup.c2
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c2
-rw-r--r--arch/arm/mach-pxa/Kconfig18
-rw-r--r--arch/arm/mach-pxa/Makefile3
-rw-r--r--arch/arm/mach-pxa/cm-x255.c2
-rw-r--r--arch/arm/mach-pxa/cm-x270.c2
-rw-r--r--arch/arm/mach-pxa/corgi.c2
-rw-r--r--arch/arm/mach-pxa/devices.c2
-rw-r--r--arch/arm/mach-pxa/em-x270.c5
-rw-r--r--arch/arm/mach-pxa/ezx.c3
-rw-r--r--arch/arm/mach-pxa/hx4700.c25
-rw-r--r--arch/arm/mach-pxa/icontrol.c4
-rw-r--r--arch/arm/mach-pxa/include/mach/pxa25x-udc.h0
-rw-r--r--arch/arm/mach-pxa/littleton.c2
-rw-r--r--arch/arm/mach-pxa/lubbock.c2
-rw-r--r--arch/arm/mach-pxa/magician.c25
-rw-r--r--arch/arm/mach-pxa/pcm027.c2
-rw-r--r--arch/arm/mach-pxa/poodle.c2
-rw-r--r--arch/arm/mach-pxa/raumfeld.c1187
-rw-r--r--arch/arm/mach-pxa/spitz.c2
-rw-r--r--arch/arm/mach-pxa/stargate2.c6
-rw-r--r--arch/arm/mach-pxa/tosa.c2
-rw-r--r--arch/arm/mach-pxa/z2.c4
-rw-r--r--arch/arm/mach-pxa/zeus.c5
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris-dvs.c8
-rw-r--r--arch/arm/mach-sa1100/assabet.c1
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c5
-rw-r--r--arch/arm/mach-socfpga/socfpga.c3
-rw-r--r--arch/arm/mach-sunxi/sunxi.c2
-rw-r--r--arch/arm/mach-tango/pm.c6
-rw-r--r--arch/arm/mach-tango/pm.h7
-rw-r--r--arch/arm/mach-tango/setup.c2
-rw-r--r--arch/arm/mach-tegra/iomap.h9
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S2
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S25
-rw-r--r--arch/arm/mm/cache-l2x0-pmu.c9
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/net/bpf_jit_32.c53
-rw-r--r--arch/arm/net/bpf_jit_32.h2
-rw-r--r--arch/arm/plat-orion/common.c2
-rw-r--r--arch/arm/plat-pxa/ssp.c3
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c2
-rw-r--r--arch/arm/tools/syscall.tbl85
-rw-r--r--arch/arm/xen/hypercall.S3
-rw-r--r--arch/arm/xen/mm.c1
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/Kconfig.platforms14
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/actions/s700-cubieboard7.dts53
-rw-r--r--arch/arm64/boot/dts/actions/s700.dtsi66
-rw-r--r--arch/arm64/boot/dts/actions/s900.dtsi18
-rw-r--r--arch/arm64/boot/dts/al/alpine-v2.dtsi8
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts8
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts8
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts9
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts8
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts10
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi40
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi8
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi44
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi37
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg-s400.dts30
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi33
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts28
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi70
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi31
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi8
-rw-r--r--arch/arm64/boot/dts/apm/apm-shadowcat.dtsi16
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi16
-rw-r--r--arch/arm64/boot/dts/arm/Makefile1
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi106
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts277
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi108
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts12
-rw-r--r--arch/arm64/boot/dts/arm/juno-r2.dts18
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts18
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts106
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi27
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi14
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts4
-rw-r--r--arch/arm64/boot/dts/bitmain/Makefile3
-rw-r--r--arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts50
-rw-r--r--arch/arm64/boot/dts/bitmain/bm1880.dtsi119
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile3
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-a-plus.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi8
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi16
-rw-r--r--arch/arm64/boot/dts/cavium/thunder-88xx.dtsi96
-rw-r--r--arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi8
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi16
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts96
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts17
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi35
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi11
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi97
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts26
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi133
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts91
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi306
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts137
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp.dtsi446
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts3
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi16
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3670.dtsi16
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts4
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi4
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi18
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip05.dtsi32
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip06.dtsi32
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi128
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi8
-rw-r--r--arch/arm64/boot/dts/lg/lg1313.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/Makefile1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts12
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts162
-rw-r--r--arch/arm64/boot/dts/marvell/armada-372x.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi82
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7040-db.dts4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-db.dts4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi35
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi16
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110.dtsi15
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts98
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi555
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6797-evb.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6797.dtsi28
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi6
-rw-r--r--arch/arm64/boot/dts/nvidia/Makefile1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132.dtsi4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts19
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi10
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi33
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts11
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi93
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts21
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi33
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2894-0050-a08.dts9
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi1858
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-smaug.dts34
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi96
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-pins.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi38
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi24
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi280
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/pms405.dtsi79
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi19
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi359
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts84
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi1046
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1295.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile1
-rw-r--r--arch/arm64/boot/dts/renesas/cat875.dtsi44
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi17
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts106
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-ek874.dts14
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi1911
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi56
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi55
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts26
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi223
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995-draak.dts4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf.dtsi138
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi71
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile3
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts28
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi22
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi13
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi46
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi13
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts91
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4.dts66
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi703
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts606
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi110
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts83
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi12
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi55
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts10
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi55
-rw-r--r--arch/arm64/boot/dts/sprd/sc2731.dtsi56
-rw-r--r--arch/arm64/boot/dts/sprd/sc9836.dtsi8
-rw-r--r--arch/arm64/boot/dts/sprd/sc9860.dtsi16
-rw-r--r--arch/arm64/boot/dts/sprd/sp9860g-1h10.dts16
-rw-r--r--arch/arm64/boot/dts/synaptics/as370.dtsi8
-rw-r--r--arch/arm64/boot/dts/synaptics/berlin4ct.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi118
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi30
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts65
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654.dtsi8
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi8
-rw-r--r--arch/arm64/boot/dts/zte/zx296718.dtsi8
-rw-r--r--arch/arm64/configs/defconfig36
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S5
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c8
-rw-r--r--arch/arm64/crypto/aes-modes.S3
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S8
-rw-r--r--arch/arm64/crypto/chacha-neon-core.S20
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S513
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c75
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c118
-rw-r--r--arch/arm64/include/asm/acpi.h4
-rw-r--r--arch/arm64/include/asm/asm-prototypes.h2
-rw-r--r--arch/arm64/include/asm/atomic.h237
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h28
-rw-r--r--arch/arm64/include/asm/atomic_lse.h38
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/cmpxchg.h60
-rw-r--r--arch/arm64/include/asm/daifflags.h1
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/fixmap.h6
-rw-r--r--arch/arm64/include/asm/hugetlb.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/include/asm/kvm_ras.h25
-rw-r--r--arch/arm64/include/asm/memory.h15
-rw-r--r--arch/arm64/include/asm/mmu.h44
-rw-r--r--arch/arm64/include/asm/neon-intrinsics.h4
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h4
-rw-r--r--arch/arm64/include/asm/sync_bitops.h16
-rw-r--r--arch/arm64/include/asm/system_misc.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h1
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h99
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h76
-rw-r--r--arch/arm64/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm64/kernel/acpi.c31
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/kernel/head.S4
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/kaslr.c9
-rw-r--r--arch/arm64/kernel/machine_kexec.c3
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c8
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kernel/ptrace.c15
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kvm/hyp/switch.c5
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c5
-rw-r--r--arch/arm64/kvm/reset.c50
-rw-r--r--arch/arm64/kvm/sys_regs.c50
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/dump.c59
-rw-r--r--arch/arm64/mm/fault.c24
-rw-r--r--arch/arm64/mm/flush.c6
-rw-r--r--arch/arm64/mm/hugetlbpage.c20
-rw-r--r--arch/arm64/mm/init.c27
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/arm64/mm/numa.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c37
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/uapi/asm/unistd.h2
-rw-r--r--arch/csky/Kconfig1
-rw-r--r--arch/csky/include/asm/io.h25
-rw-r--r--arch/csky/include/asm/pgalloc.h43
-rw-r--r--arch/csky/include/asm/pgtable.h9
-rw-r--r--arch/csky/include/asm/processor.h4
-rw-r--r--arch/csky/include/asm/segment.h1
-rw-r--r--arch/csky/include/uapi/asm/unistd.h2
-rw-r--r--arch/csky/kernel/dumpstack.c4
-rw-r--r--arch/csky/kernel/module.c38
-rw-r--r--arch/csky/kernel/ptrace.c3
-rw-r--r--arch/csky/kernel/smp.c3
-rw-r--r--arch/csky/mm/ioremap.c14
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/Makefile2
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/segment.h6
-rw-r--r--arch/h8300/include/uapi/asm/unistd.h2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h2
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/include/asm/uaccess.h1
-rw-r--r--arch/ia64/include/asm/unistd.h14
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/include/uapi/asm/socket.h120
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h2
-rw-r--r--arch/ia64/kernel/numa.c2
-rw-r--r--arch/ia64/kernel/perfmon.c59
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl11
-rw-r--r--arch/ia64/mm/discontig.c6
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.machine2
-rw-r--r--arch/m68k/Makefile5
-rw-r--r--arch/m68k/apollo/Makefile2
-rw-r--r--arch/m68k/atari/Makefile2
-rw-r--r--arch/m68k/atari/config.c2
-rw-r--r--arch/m68k/atari/nvram.c272
-rw-r--r--arch/m68k/coldfire/m5272.c2
-rw-r--r--arch/m68k/configs/amcore_defconfig20
-rw-r--r--arch/m68k/configs/amiga_defconfig37
-rw-r--r--arch/m68k/configs/apollo_defconfig34
-rw-r--r--arch/m68k/configs/atari_defconfig33
-rw-r--r--arch/m68k/configs/bvme6000_defconfig33
-rw-r--r--arch/m68k/configs/hp300_defconfig34
-rw-r--r--arch/m68k/configs/mac_defconfig34
-rw-r--r--arch/m68k/configs/multi_defconfig34
-rw-r--r--arch/m68k/configs/mvme147_defconfig34
-rw-r--r--arch/m68k/configs/mvme16x_defconfig33
-rw-r--r--arch/m68k/configs/q40_defconfig38
-rw-r--r--arch/m68k/configs/sun3_defconfig33
-rw-r--r--arch/m68k/configs/sun3x_defconfig34
-rw-r--r--arch/m68k/emu/nfblock.c10
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/a.out-core.h68
-rw-r--r--arch/m68k/include/asm/atarihw.h6
-rw-r--r--arch/m68k/include/asm/macintosh.h4
-rw-r--r--arch/m68k/include/asm/macints.h3
-rw-r--r--arch/m68k/include/asm/segment.h7
-rw-r--r--arch/m68k/include/asm/unistd.h4
-rw-r--r--arch/m68k/kernel/setup_mm.c82
-rw-r--r--arch/m68k/kernel/signal.c3
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl88
-rw-r--r--arch/m68k/mac/misc.c192
-rw-r--r--arch/m68k/mac/via.c20
-rw-r--r--arch/m68k/mm/memory.c2
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/uaccess.h1
-rw-r--r--arch/microblaze/include/asm/unistd.h4
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl83
-rw-r--r--arch/mips/Kconfig30
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/ar7/platform.c4
-rw-r--r--arch/mips/ath79/Kconfig73
-rw-r--r--arch/mips/ath79/Makefile23
-rw-r--r--arch/mips/ath79/clock.c342
-rw-r--r--arch/mips/ath79/common.h5
-rw-r--r--arch/mips/ath79/dev-common.c159
-rw-r--r--arch/mips/ath79/dev-common.h18
-rw-r--r--arch/mips/ath79/dev-gpio-buttons.c56
-rw-r--r--arch/mips/ath79/dev-gpio-buttons.h23
-rw-r--r--arch/mips/ath79/dev-leds-gpio.c54
-rw-r--r--arch/mips/ath79/dev-leds-gpio.h21
-rw-r--r--arch/mips/ath79/dev-spi.c38
-rw-r--r--arch/mips/ath79/dev-spi.h22
-rw-r--r--arch/mips/ath79/dev-usb.c242
-rw-r--r--arch/mips/ath79/dev-usb.h17
-rw-r--r--arch/mips/ath79/dev-wmac.c155
-rw-r--r--arch/mips/ath79/dev-wmac.h17
-rw-r--r--arch/mips/ath79/irq.c169
-rw-r--r--arch/mips/ath79/mach-ap121.c92
-rw-r--r--arch/mips/ath79/mach-ap136.c156
-rw-r--r--arch/mips/ath79/mach-ap81.c100
-rw-r--r--arch/mips/ath79/mach-db120.c136
-rw-r--r--arch/mips/ath79/mach-pb44.c128
-rw-r--r--arch/mips/ath79/mach-ubnt-xm.c126
-rw-r--r--arch/mips/ath79/machtypes.h28
-rw-r--r--arch/mips/ath79/pci.c273
-rw-r--r--arch/mips/ath79/pci.h35
-rw-r--r--arch/mips/ath79/setup.c78
-rw-r--r--arch/mips/bcm47xx/buttons.c2
-rw-r--r--arch/mips/bcm47xx/leds.c10
-rw-r--r--arch/mips/bcm47xx/setup.c33
-rw-r--r--arch/mips/bcm63xx/dev-enet.c8
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts14
-rw-r--r--arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts6
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts8
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi2
-rw-r--r--arch/mips/boot/dts/xilfpga/nexys4ddr.dts8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c86
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c39
-rw-r--r--arch/mips/cavium-octeon/oct_ilm.c32
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c64
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/configs/xway_defconfig1
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/atomic.h6
-rw-r--r--arch/mips/include/asm/barrier.h55
-rw-r--r--arch/mips/include/asm/bitops.h5
-rw-r--r--arch/mips/include/asm/cacheflush.h2
-rw-r--r--arch/mips/include/asm/cmpxchg.h104
-rw-r--r--arch/mips/include/asm/cpu-features.h13
-rw-r--r--arch/mips/include/asm/cpu.h1
-rw-r--r--arch/mips/include/asm/futex.h3
-rw-r--r--arch/mips/include/asm/ginvt.h56
-rw-r--r--arch/mips/include/asm/irqflags.h2
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h4
-rw-r--r--arch/mips/include/asm/mach-ip27/irq.h12
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h9
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson32/platform.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h11
-rw-r--r--arch/mips/include/asm/mmu.h6
-rw-r--r--arch/mips/include/asm/mmu_context.h139
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h12
-rw-r--r--arch/mips/include/asm/octeon/cvmx-smix-defs.h276
-rw-r--r--arch/mips/include/asm/pci/bridge.h206
-rw-r--r--arch/mips/include/asm/pgtable.h49
-rw-r--r--arch/mips/include/asm/smp-ops.h1
-rw-r--r--arch/mips/include/asm/sn/addrs.h72
-rw-r--r--arch/mips/include/asm/sn/arch.h2
-rw-r--r--arch/mips/include/asm/sn/io.h2
-rw-r--r--arch/mips/include/asm/sn/sn0/addrs.h5
-rw-r--r--arch/mips/include/asm/tlbflush.h5
-rw-r--r--arch/mips/include/asm/uaccess.h1
-rw-r--r--arch/mips/include/asm/unistd.h17
-rw-r--r--arch/mips/include/uapi/asm/mman.h4
-rw-r--r--arch/mips/include/uapi/asm/socket.h49
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/jz4740/setup.c14
-rw-r--r--arch/mips/kernel/cmpxchg.c3
-rw-r--r--arch/mips/kernel/cpu-probe.c55
-rw-r--r--arch/mips/kernel/ftrace.c6
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/kernel/kgdb.c4
-rw-r--r--arch/mips/kernel/mips-cm.c6
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c21
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/kernel/segment.c15
-rw-r--r--arch/mips/kernel/setup.c10
-rw-r--r--arch/mips/kernel/smp.c69
-rw-r--r--arch/mips/kernel/spinlock_test.c21
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl77
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl7
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl85
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/kernel/unaligned.c17
-rw-r--r--arch/mips/kvm/emulate.c8
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/kvm/trap_emul.c30
-rw-r--r--arch/mips/kvm/vz.c8
-rw-r--r--arch/mips/lantiq/Kconfig4
-rw-r--r--arch/mips/lantiq/irq.c77
-rw-r--r--arch/mips/lantiq/xway/dma.c6
-rw-r--r--arch/mips/lantiq/xway/vmmc.c4
-rw-r--r--arch/mips/lib/dump_tlb.c22
-rw-r--r--arch/mips/loongson32/Kconfig2
-rw-r--r--arch/mips/loongson32/Platform4
-rw-r--r--arch/mips/loongson32/common/platform.c63
-rw-r--r--arch/mips/loongson32/ls1b/board.c28
-rw-r--r--arch/mips/loongson64/Platform23
-rw-r--r--arch/mips/loongson64/common/reset.c7
-rw-r--r--arch/mips/math-emu/me-debugfs.c23
-rw-r--r--arch/mips/mm/Makefile16
-rw-r--r--arch/mips/mm/c-octeon.c18
-rw-r--r--arch/mips/mm/c-r3k.c25
-rw-r--r--arch/mips/mm/c-r4k.c124
-rw-r--r--arch/mips/mm/c-tx39.c21
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/context.c291
-rw-r--r--arch/mips/mm/dma-noncoherent.c9
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/sc-debugfs.c15
-rw-r--r--arch/mips/mm/tlb-r3k.c14
-rw-r--r--arch/mips/mm/tlb-r4k.c71
-rw-r--r--arch/mips/mm/tlb-r8k.c10
-rw-r--r--arch/mips/mm/tlbex.c10
-rw-r--r--arch/mips/net/ebpf_jit.c26
-rw-r--r--arch/mips/pci/Makefile1
-rw-r--r--arch/mips/pci/fixup-ath79.c21
-rw-r--r--arch/mips/pci/msi-octeon.c4
-rw-r--r--arch/mips/pci/ops-bridge.c68
-rw-r--r--arch/mips/pci/pci-ip27.c49
-rw-r--r--arch/mips/pci/pci-octeon.c10
-rw-r--r--arch/mips/ralink/bootrom.c8
-rw-r--r--arch/mips/sgi-ip27/Makefile3
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c39
-rw-r--r--arch/mips/sgi-ip27/ip27-irq-pci.c266
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c357
-rw-r--r--arch/mips/sgi-ip27/ip27-irqno.c48
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c34
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c64
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c42
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c13
-rw-r--r--arch/mips/vdso/Makefile5
-rw-r--r--arch/nds32/Kconfig1
-rw-r--r--arch/nds32/Makefile8
-rw-r--r--arch/nds32/include/asm/uaccess.h1
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h2
-rw-r--r--arch/nds32/kernel/process.c2
-rw-r--r--arch/nios2/Kconfig1
-rw-r--r--arch/nios2/include/asm/uaccess.h1
-rw-r--r--arch/nios2/include/uapi/asm/unistd.h2
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/Makefile3
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/uaccess.h9
-rw-r--r--arch/openrisc/include/uapi/asm/unistd.h2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/boot/Makefile6
-rw-r--r--arch/parisc/include/asm/dma-mapping.h46
-rw-r--r--arch/parisc/include/asm/hardirq.h1
-rw-r--r--arch/parisc/include/asm/io.h9
-rw-r--r--arch/parisc/include/asm/pci.h4
-rw-r--r--arch/parisc/include/asm/pdc.h1
-rw-r--r--arch/parisc/include/asm/pdcpat.h4
-rw-r--r--arch/parisc/include/asm/uaccess.h1
-rw-r--r--arch/parisc/include/asm/unistd.h13
-rw-r--r--arch/parisc/include/uapi/asm/mman.h4
-rw-r--r--arch/parisc/include/uapi/asm/pdc.h26
-rw-r--r--arch/parisc/include/uapi/asm/socket.h48
-rw-r--r--arch/parisc/kernel/firmware.c24
-rw-r--r--arch/parisc/kernel/irq.c19
-rw-r--r--arch/parisc/kernel/processor.c9
-rw-r--r--arch/parisc/kernel/ptrace.c29
-rw-r--r--arch/parisc/kernel/setup.c1
-rw-r--r--arch/parisc/kernel/smp.c1
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl109
-rw-r--r--arch/parisc/kernel/traps.c4
-rw-r--r--arch/parisc/lib/iomap.c64
-rw-r--r--arch/parisc/mm/init.c52
-rw-r--r--arch/powerpc/Kconfig7
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h12
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h44
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h4
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/nvram.h9
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h3
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h8
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h4
-rw-r--r--arch/powerpc/kernel/head_8xx.S3
-rw-r--r--arch/powerpc/kernel/nvram_64.c158
-rw-r--r--arch/powerpc/kernel/paca.c3
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/setup_32.c36
-rw-r--r--arch/powerpc/kernel/signal_64.c7
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl134
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c7
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c25
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c17
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c145
-rw-r--r--arch/powerpc/mm/numa.c16
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c47
-rw-r--r--arch/powerpc/mm/pgtable-radix.c18
-rw-r--r--arch/powerpc/net/bpf_jit.h4
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c121
-rw-r--r--arch/powerpc/perf/hv-24x7.c10
-rw-r--r--arch/powerpc/perf/hv-gpci.c10
-rw-r--r--arch/powerpc/perf/imc-pmu.c19
-rw-r--r--arch/powerpc/perf/perf_regs.c6
-rw-r--r--arch/powerpc/platforms/4xx/ocm.c6
-rw-r--r--arch/powerpc/platforms/chrp/Makefile2
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c14
-rw-r--r--arch/powerpc/platforms/chrp/setup.c5
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c9
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/powermac/time.c2
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c5
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c10
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c5
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c7
-rw-r--r--arch/riscv/Kconfig7
-rw-r--r--arch/riscv/Makefile2
-rw-r--r--arch/riscv/configs/defconfig8
-rw-r--r--arch/riscv/include/asm/module.h28
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h6
-rw-r--r--arch/riscv/include/asm/pgtable.h8
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/include/asm/ptrace.h5
-rw-r--r--arch/riscv/include/asm/syscall.h10
-rw-r--r--arch/riscv/include/asm/thread_info.h6
-rw-r--r--arch/riscv/include/asm/uaccess.h1
-rw-r--r--arch/riscv/include/asm/unistd.h2
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h1
-rw-r--r--arch/riscv/kernel/asm-offsets.c1
-rw-r--r--arch/riscv/kernel/entry.S22
-rw-r--r--arch/riscv/kernel/module-sections.c30
-rw-r--r--arch/riscv/kernel/ptrace.c9
-rw-r--r--arch/riscv/kernel/setup.c11
-rw-r--r--arch/riscv/kernel/smp.c43
-rw-r--r--arch/riscv/kernel/smpboot.c6
-rw-r--r--arch/riscv/kernel/vdso.c1
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/mm/init.c3
-rw-r--r--arch/riscv/net/Makefile1
-rw-r--r--arch/riscv/net/bpf_jit_comp.c1602
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/boot/als.c20
-rw-r--r--arch/s390/boot/boot.h2
-rw-r--r--arch/s390/boot/ipl_parm.c66
-rw-r--r--arch/s390/boot/startup.c1
-rw-r--r--arch/s390/boot/string.c1
-rw-r--r--arch/s390/crypto/des_s390.c4
-rw-r--r--arch/s390/hypfs/hypfs.h6
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c8
-rw-r--r--arch/s390/hypfs/hypfs_diag.c9
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c6
-rw-r--r--arch/s390/hypfs/hypfs_sprp.c6
-rw-r--r--arch/s390/hypfs/hypfs_vm.c3
-rw-r--r--arch/s390/hypfs/inode.c11
-rw-r--r--arch/s390/include/asm/cpu_mcf.h126
-rw-r--r--arch/s390/include/asm/cpu_mf-insn.h22
-rw-r--r--arch/s390/include/asm/cpu_mf.h18
-rw-r--r--arch/s390/include/asm/diag.h12
-rw-r--r--arch/s390/include/asm/ftrace.h25
-rw-r--r--arch/s390/include/asm/jump_label.h14
-rw-r--r--arch/s390/include/asm/mmu_context.h7
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/asm/pgtable.h5
-rw-r--r--arch/s390/include/asm/pnet.h8
-rw-r--r--arch/s390/include/asm/qdio.h4
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/string.h28
-rw-r--r--arch/s390/include/asm/syscall_wrapper.h135
-rw-r--r--arch/s390/include/asm/uaccess.h1
-rw-r--r--arch/s390/include/asm/unistd.h7
-rw-r--r--arch/s390/include/asm/vx-insn.h8
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/posix_types.h6
-rw-r--r--arch/s390/include/uapi/asm/socket.h117
-rw-r--r--arch/s390/kernel/Makefile6
-rw-r--r--arch/s390/kernel/compat_linux.c235
-rw-r--r--arch/s390/kernel/compat_wrapper.c186
-rw-r--r--arch/s390/kernel/debug.c6
-rw-r--r--arch/s390/kernel/diag.c1
-rw-r--r--arch/s390/kernel/early.c6
-rw-r--r--arch/s390/kernel/entry.S4
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/kdebugfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c224
-rw-r--r--arch/s390/kernel/perf_cpum_cf_common.c201
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c693
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c6
-rw-r--r--arch/s390/kernel/setup.c62
-rw-r--r--arch/s390/kernel/smp.c11
-rw-r--r--arch/s390/kernel/swsusp.S10
-rw-r--r--arch/s390/kernel/sys_s390.c16
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl373
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/vdso.c7
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/lib/string.c28
-rw-r--r--arch/s390/mm/extmem.c133
-rw-r--r--arch/s390/mm/kasan_init.c2
-rw-r--r--arch/s390/mm/mmap.c33
-rw-r--r--arch/s390/mm/pgtable.c10
-rw-r--r--arch/s390/net/bpf_jit_comp.c70
-rw-r--r--arch/s390/net/pnet.c11
-rw-r--r--arch/s390/pci/pci.c26
-rw-r--r--arch/s390/pci/pci_debug.c15
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/boot/dts/Makefile2
-rw-r--r--arch/sh/include/asm/segment.h1
-rw-r--r--arch/sh/include/asm/unistd.h4
-rw-r--r--arch/sh/include/uapi/asm/unistd_32.h403
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl88
-rw-r--r--arch/sh/kernel/syscalls/syscalltbl.sh4
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/crypto/des_glue.c4
-rw-r--r--arch/sparc/include/asm/uaccess_32.h1
-rw-r--r--arch/sparc/include/asm/uaccess_64.h1
-rw-r--r--arch/sparc/include/asm/unistd.h13
-rw-r--r--arch/sparc/include/uapi/asm/posix_types.h10
-rw-r--r--arch/sparc/include/uapi/asm/socket.h51
-rw-r--r--arch/sparc/kernel/pci_fire.c3
-rw-r--r--arch/sparc/kernel/pci_schizo.c3
-rw-r--r--arch/sparc/kernel/psycho_common.c3
-rw-r--r--arch/sparc/kernel/sbus.c3
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c61
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl116
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/um/include/asm/a.out-core.h27
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/uapi/asm/unistd.h4
-rw-r--r--arch/x86/Kconfig18
-rw-r--r--arch/x86/Kconfig.debug10
-rw-r--r--arch/x86/boot/compressed/head_64.S10
-rw-r--r--arch/x86/boot/compressed/pgtable.h2
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c38
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c38
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c38
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c47
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S782
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c12
-rw-r--r--arch/x86/crypto/morus1280_glue.c40
-rw-r--r--arch/x86/crypto/morus640_glue.c39
-rw-r--r--arch/x86/crypto/poly1305-sse2-x86_64.S4
-rw-r--r--arch/x86/entry/entry_64_compat.S6
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl85
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl6
-rw-r--r--arch/x86/events/amd/ibs.c13
-rw-r--r--arch/x86/events/amd/iommu.c6
-rw-r--r--arch/x86/events/amd/power.c10
-rw-r--r--arch/x86/events/amd/uncore.c7
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/x86/events/intel/bts.c4
-rw-r--r--arch/x86/events/intel/core.c160
-rw-r--r--arch/x86/events/intel/cstate.c12
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/events/intel/pt.c14
-rw-r--r--arch/x86/events/intel/rapl.c9
-rw-r--r--arch/x86/events/intel/uncore.c9
-rw-r--r--arch/x86/events/intel/uncore_snb.c9
-rw-r--r--arch/x86/events/intel/uncore_snbep.c4
-rw-r--r--arch/x86/events/msr.c10
-rw-r--r--arch/x86/events/perf_event.h31
-rw-r--r--arch/x86/ia32/ia32_aout.c157
-rw-r--r--arch/x86/include/asm/a.out-core.h67
-rw-r--r--arch/x86/include/asm/alternative.h39
-rw-r--r--arch/x86/include/asm/cpu_device_id.h28
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h2
-rw-r--r--arch/x86/include/asm/intel-family.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h18
-rw-r--r--arch/x86/include/asm/msr.h16
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/paravirt.h13
-rw-r--r--arch/x86/include/asm/paravirt_types.h5
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/refcount.h22
-rw-r--r--arch/x86/include/asm/resctrl_sched.h4
-rw-r--r--arch/x86/include/asm/uaccess.h33
-rw-r--r--arch/x86/include/asm/unistd.h8
-rw-r--r--arch/x86/include/asm/uv/bios.h8
-rw-r--r--arch/x86/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/include/uapi/asm/socket.h1
-rw-r--r--arch/x86/kernel/alternative.c7
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c7
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c8
-rw-r--r--arch/x86/kernel/cpu/bugs.c16
-rw-r--r--arch/x86/kernel/cpu/match.c31
-rw-r--r--arch/x86/kernel/cpu/mce/core.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile4
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/early_printk.c4
-rw-r--r--arch/x86/kernel/ftrace.c3
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c5
-rw-r--r--arch/x86/kernel/kprobes/core.c7
-rw-r--r--arch/x86/kernel/kprobes/opt.c4
-rw-r--r--arch/x86/kernel/kvm.c7
-rw-r--r--arch/x86/kernel/process.c12
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/traps.c1
-rw-r--r--arch/x86/kernel/tsc.c30
-rw-r--r--arch/x86/kvm/Makefile4
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c19
-rw-r--r--arch/x86/kvm/svm.c34
-rw-r--r--arch/x86/kvm/trace.h2
-rw-r--r--arch/x86/kvm/vmx/evmcs.c7
-rw-r--r--arch/x86/kvm/vmx/nested.c30
-rw-r--r--arch/x86/kvm/vmx/vmx.c183
-rw-r--r--arch/x86/kvm/vmx/vmx.h10
-rw-r--r--arch/x86/kvm/x86.c17
-rw-r--r--arch/x86/lib/iomem.c33
-rw-r--r--arch/x86/lib/kaslr.c4
-rw-r--r--arch/x86/lib/usercopy_32.c8
-rw-r--r--arch/x86/mm/extable.c59
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c4
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/mm/numa.c4
-rw-r--r--arch/x86/mm/pageattr.c50
-rw-r--r--arch/x86/net/bpf_jit_comp.c46
-rw-r--r--arch/x86/net/bpf_jit_comp32.c121
-rw-r--r--arch/x86/platform/efi/Makefile1
-rw-r--r--arch/x86/platform/efi/early_printk.c240
-rw-r--r--arch/x86/platform/efi/quirks.c6
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c1
-rw-r--r--arch/x86/platform/uv/bios_uv.c23
-rw-r--r--arch/x86/um/Kconfig2
-rw-r--r--arch/x86/xen/enlighten_pv.c5
-rw-r--r--arch/x86/xen/mmu.h4
-rw-r--r--arch/x86/xen/mmu_pv.c8
-rw-r--r--arch/x86/xen/time.c12
-rw-r--r--arch/xtensa/Kconfig5
-rw-r--r--arch/xtensa/boot/dts/Makefile6
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig2
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig3
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/asm-uaccess.h2
-rw-r--r--arch/xtensa/include/asm/uaccess.h1
-rw-r--r--arch/xtensa/include/asm/unistd.h14
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild1
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h4
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h122
-rw-r--r--arch/xtensa/kernel/head.S5
-rw-r--r--arch/xtensa/kernel/smp.c41
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl78
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--block/bfq-wf2q.c11
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-iolatency.c56
-rw-r--r--block/blk-mq-debugfs-zoned.c2
-rw-r--r--block/blk-mq-debugfs.c6
-rw-r--r--block/blk-mq.c15
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/blk-wbt.c4
-rw-r--r--crypto/Kconfig14
-rw-r--r--crypto/adiantum.c4
-rw-r--r--crypto/aead.c4
-rw-r--r--crypto/aegis.h7
-rw-r--r--crypto/aegis128.c20
-rw-r--r--crypto/aegis128l.c20
-rw-r--r--crypto/aegis256.c20
-rw-r--r--crypto/af_alg.c40
-rw-r--r--crypto/ahash.c42
-rw-r--r--crypto/algapi.c63
-rw-r--r--crypto/arc4.c87
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/cbc.c131
-rw-r--r--crypto/ccm.c78
-rw-r--r--crypto/cfb.c139
-rw-r--r--crypto/chacha20poly1305.c37
-rw-r--r--crypto/crypto_null.c57
-rw-r--r--crypto/crypto_user_stat.c4
-rw-r--r--crypto/ctr.c200
-rw-r--r--crypto/des_generic.c4
-rw-r--r--crypto/ecb.c151
-rw-r--r--crypto/gcm.c75
-rw-r--r--crypto/keywrap.c198
-rw-r--r--crypto/morus1280.c19
-rw-r--r--crypto/morus640.c19
-rw-r--r--crypto/ofb.c202
-rw-r--r--crypto/pcbc.c143
-rw-r--r--crypto/rsa-pkcs1pad.c1
-rw-r--r--crypto/seqiv.c7
-rw-r--r--crypto/shash.c27
-rw-r--r--crypto/skcipher.c158
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--crypto/streebog_generic.c2
-rw-r--r--crypto/testmgr.c2732
-rw-r--r--crypto/testmgr.h10771
-rw-r--r--crypto/tgr192.c6
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_dbg.c30
-rw-r--r--drivers/acpi/acpi_video.c20
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h5
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h3
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/acmacros.h4
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h3
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h17
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbdisply.c4
-rw-r--r--drivers/acpi/acpica/dbexec.c4
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c2
-rw-r--r--drivers/acpi/acpica/dbobject.c2
-rw-r--r--drivers/acpi/acpica/dbtest.c164
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c9
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c28
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c4
-rw-r--r--drivers/acpi/acpica/evrgnini.c21
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c6
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c71
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c10
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c4
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c4
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c98
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c4
-rw-r--r--drivers/acpi/acpica/nsparse.c68
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c4
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c195
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c10
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c14
-rw-r--r--drivers/acpi/acpica/rsirq.c8
-rw-r--r--drivers/acpi/acpica/rsserial.c10
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c4
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c6
-rw-r--r--drivers/acpi/acpica/tbxfload.c27
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c20
-rw-r--r--drivers/acpi/acpica/utdelete.c4
-rw-r--r--drivers/acpi/acpica/uterror.c9
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c14
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c44
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/Kconfig12
-rw-r--r--drivers/acpi/apei/bert.c23
-rw-r--r--drivers/acpi/apei/einj.c93
-rw-r--r--drivers/acpi/apei/erst.c27
-rw-r--r--drivers/acpi/apei/ghes.c655
-rw-r--r--drivers/acpi/apei/hest.c16
-rw-r--r--drivers/acpi/arm64/iort.c5
-rw-r--r--drivers/acpi/bus.c41
-rw-r--r--drivers/acpi/cppc_acpi.c42
-rw-r--r--drivers/acpi/custom_method.c6
-rw-r--r--drivers/acpi/dptf/Makefile2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c2
-rw-r--r--drivers/acpi/ec.c208
-rw-r--r--drivers/acpi/ec_sys.c36
-rw-r--r--drivers/acpi/internal.h8
-rw-r--r--drivers/acpi/irq.c4
-rw-r--r--drivers/acpi/nfit/core.c86
-rw-r--r--drivers/acpi/nfit/intel.c8
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/pci_link.c8
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c41
-rw-r--r--drivers/acpi/power.c22
-rw-r--r--drivers/acpi/pptt.c13
-rw-r--r--drivers/acpi/processor_idle.c7
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/tables.c12
-rw-r--r--drivers/acpi/x86/utils.c5
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c593
-rw-r--r--drivers/android/binder_alloc.c303
-rw-r--r--drivers/android/binder_alloc.h47
-rw-r--r--drivers/android/binder_alloc_selftest.c7
-rw-r--r--drivers/android/binder_internal.h9
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/android/binderfs.c296
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_mvebu.c87
-rw-r--r--drivers/ata/libahci_platform.c13
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/pata_macio.c9
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c22
-rw-r--r--drivers/atm/he.c41
-rw-r--r--drivers/atm/idt77252.c16
-rw-r--r--drivers/auxdisplay/ht16k33.c2
-rw-r--r--drivers/base/base.h12
-rw-r--r--drivers/base/bus.c66
-rw-r--r--drivers/base/cacheinfo.c6
-rw-r--r--drivers/base/class.c14
-rw-r--r--drivers/base/component.c206
-rw-r--r--drivers/base/core.c246
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c188
-rw-r--r--drivers/base/devcon.c62
-rw-r--r--drivers/base/firmware_loader/Makefile4
-rw-r--r--drivers/base/firmware_loader/builtin/.gitignore (renamed from firmware/.gitignore)0
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile (renamed from firmware/Makefile)0
-rw-r--r--drivers/base/firmware_loader/fallback_table.c5
-rw-r--r--drivers/base/firmware_loader/main.c8
-rw-r--r--drivers/base/platform.c21
-rw-r--r--drivers/base/power/clock_ops.c13
-rw-r--r--drivers/base/power/common.c2
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/power/main.c26
-rw-r--r--drivers/base/power/runtime.c190
-rw-r--r--drivers/base/power/sysfs.c17
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c107
-rw-r--r--drivers/base/swnode.c23
-rw-r--r--drivers/base/test/test_async_driver_probe.c261
-rw-r--r--drivers/bcma/bcma_private.h9
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/bcma/host_pci.c2
-rw-r--r--drivers/bcma/host_soc.c4
-rw-r--r--drivers/bcma/main.c45
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c5
-rw-r--r--drivers/block/nbd.c5
-rw-r--r--drivers/block/null_blk.h1
-rw-r--r--drivers/block/rbd.c54
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/zram/zram_drv.c90
-rw-r--r--drivers/block/zram/zram_drv.h5
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/btmrvl_drv.h2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c43
-rw-r--r--drivers/bluetooth/btmtkuart.c538
-rw-r--r--drivers/bluetooth/btqca.c19
-rw-r--r--drivers/bluetooth/btqca.h8
-rw-r--r--drivers/bluetooth/btqcomsmd.c31
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/btusb.c75
-rw-r--r--drivers/bluetooth/h4_recv.h7
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c24
-rw-r--r--drivers/bluetooth/hci_qca.c121
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c13
-rw-r--r--drivers/bus/hisi_lpc.c5
-rw-r--r--drivers/bus/imx-weim.c70
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/char/Kconfig19
-rw-r--r--drivers/char/Makefile6
-rw-r--r--drivers/char/agp/efficeon-agp.c2
-rw-r--r--drivers/char/applicom.c35
-rw-r--r--drivers/char/efirtc.c23
-rw-r--r--drivers/char/generic_nvram.c159
-rw-r--r--drivers/char/hpet.c9
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c18
-rw-r--r--drivers/char/hw_random/optee-rng.c306
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c173
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c25
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/mbcs.c1
-rw-r--r--drivers/char/mwave/mwavedd.c7
-rw-r--r--drivers/char/nvram.c673
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/at91/at91sam9x5.c5
-rw-r--r--drivers/clk/at91/sama5d2.c4
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/clk.c16
-rw-r--r--drivers/clk/imx/clk-frac-pll.c5
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c2
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c4
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c14
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c2
-rw-r--r--drivers/clk/socfpga/clk-s10.c20
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clk/tegra/Kconfig5
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-dfll.c459
-rw-r--r--drivers/clk/tegra/clk-dfll.h6
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c524
-rw-r--r--drivers/clk/tegra/cvb.c12
-rw-r--r--drivers/clk/tegra/cvb.h7
-rw-r--r--drivers/clk/ti/divider.c11
-rw-r--r--drivers/clk/zynqmp/clkc.c4
-rw-r--r--drivers/clocksource/Kconfig22
-rw-r--r--drivers/clocksource/Makefile7
-rw-r--r--drivers/clocksource/arm_arch_timer.c55
-rw-r--r--drivers/clocksource/exynos_mct.c48
-rw-r--r--drivers/clocksource/timer-cs5535.c (renamed from drivers/clocksource/cs5535-clockevt.c)0
-rw-r--r--drivers/clocksource/timer-milbeaut.c161
-rw-r--r--drivers/clocksource/timer-pxa.c (renamed from drivers/clocksource/pxa_timer.c)0
-rw-r--r--drivers/clocksource/timer-riscv.c23
-rw-r--r--drivers/clocksource/timer-sun5i.c10
-rw-r--r--drivers/clocksource/timer-tango-xtal.c (renamed from drivers/clocksource/tango_xtal.c)0
-rw-r--r--drivers/clocksource/timer-tegra20.c370
-rw-r--r--drivers/clocksource/timer-ti-dm.c5
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/Kconfig.arm20
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/arm_big_little.c2
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c206
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c65
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c33
-rw-r--r--drivers/cpufreq/cpufreq.c146
-rw-r--r--drivers/cpufreq/cpufreq_stats.c16
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c5
-rw-r--r--drivers/cpufreq/e_powersaver.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c25
-rw-r--r--drivers/cpufreq/intel_pstate.c105
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c16
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c10
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c53
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c22
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c15
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c67
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c61
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c21
-rw-r--r--drivers/cpufreq/speedstep-ich.c3
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c46
-rw-r--r--drivers/cpuidle/Kconfig11
-rw-r--r--drivers/cpuidle/dt_idle_states.c15
-rw-r--r--drivers/cpuidle/governors/Makefile1
-rw-r--r--drivers/cpuidle/governors/teo.c444
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c93
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h1
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h4
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c326
-rw-r--r--drivers/crypto/bcm/Makefile2
-rw-r--r--drivers/crypto/bcm/cipher.c54
-rw-r--r--drivers/crypto/bcm/cipher.h4
-rw-r--r--drivers/crypto/bcm/util.c40
-rw-r--r--drivers/crypto/bcm/util.h6
-rw-r--r--drivers/crypto/caam/Kconfig1
-rw-r--r--drivers/crypto/caam/caamalg.c240
-rw-r--r--drivers/crypto/caam/caamalg_desc.c18
-rw-r--r--drivers/crypto/caam/caamalg_qi.c29
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c85
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caamhash.c444
-rw-r--r--drivers/crypto/caam/caamhash_desc.c68
-rw-r--r--drivers/crypto/caam/caamhash_desc.h8
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c25
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/error.c6
-rw-r--r--drivers/crypto/caam/error.h9
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c7
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c27
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h5
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c10
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c58
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c36
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c6
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c37
-rw-r--r--drivers/crypto/ccp/psp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-dev.c2
-rw-r--r--drivers/crypto/ccp/sp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-pci.c6
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c40
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c87
-rw-r--r--drivers/crypto/ccree/cc_cipher.c10
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c22
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h8
-rw-r--r--drivers/crypto/ccree/cc_driver.c20
-rw-r--r--drivers/crypto/ccree/cc_driver.h2
-rw-r--r--drivers/crypto/ccree/cc_pm.c13
-rw-r--r--drivers/crypto/ccree/cc_pm.h3
-rw-r--r--drivers/crypto/chelsio/Makefile2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c12
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c42
-rw-r--r--drivers/crypto/chelsio/chtls/Makefile3
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c6
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c12
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c1
-rw-r--r--drivers/crypto/hifn_795x.c3
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c4
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c15
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c10
-rw-r--r--drivers/crypto/marvell/cipher.c4
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c16
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-des.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c3
-rw-r--r--drivers/crypto/qat/qat_c3xxx/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c62x/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c62xvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c12
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c15
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c68
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/Makefile2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c5
-rw-r--r--drivers/crypto/qce/ablkcipher.c4
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h4
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c41
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/s5p-sss.c64
-rw-r--r--drivers/crypto/stm32/stm32-hash.c2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/crypto/talitos.c28
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c26
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c2
-rw-r--r--drivers/dio/dio.c4
-rw-r--r--drivers/dma/at_xdmac.c19
-rw-r--r--drivers/dma/bcm2835-dma.c70
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c32
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/imx-sdma.c8
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c14
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c6
-rw-r--r--drivers/edac/altera_edac.h4
-rw-r--r--drivers/extcon/Kconfig8
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-ptn5150.c339
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/bus.c9
-rw-r--r--drivers/firmware/arm_sdei.c68
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/apple-properties.c13
-rw-r--r--drivers/firmware/efi/arm-init.c6
-rw-r--r--drivers/firmware/efi/arm-runtime.c11
-rw-r--r--drivers/firmware/efi/capsule-loader.c4
-rw-r--r--drivers/firmware/efi/capsule.c4
-rw-r--r--drivers/firmware/efi/cper-arm.c14
-rw-r--r--drivers/firmware/efi/cper.c27
-rw-r--r--drivers/firmware/efi/dev-path-parser.c9
-rw-r--r--drivers/firmware/efi/earlycon.c206
-rw-r--r--drivers/firmware/efi/efi-bgrt.c5
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c4
-rw-r--r--drivers/firmware/efi/efibc.c10
-rw-r--r--drivers/firmware/efi/efivars.c58
-rw-r--r--drivers/firmware/efi/esrt.c1
-rw-r--r--drivers/firmware/efi/fake_mem.c16
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c8
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c6
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c6
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c15
-rw-r--r--drivers/firmware/efi/libstub/efistub.h12
-rw-r--r--drivers/firmware/efi/libstub/fdt.c115
-rw-r--r--drivers/firmware/efi/libstub/gop.c4
-rw-r--r--drivers/firmware/efi/libstub/random.c6
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/firmware/efi/libstub/tpm.c4
-rw-r--r--drivers/firmware/efi/memattr.c7
-rw-r--r--drivers/firmware/efi/runtime-map.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c55
-rw-r--r--drivers/firmware/efi/test/efi_test.c1
-rw-r--r--drivers/firmware/efi/test/efi_test.h2
-rw-r--r--drivers/firmware/efi/tpm.c5
-rw-r--r--drivers/firmware/efi/vars.c15
-rw-r--r--drivers/firmware/imx/misc.c38
-rw-r--r--drivers/firmware/imx/scu-pd.c1
-rw-r--r--drivers/firmware/raspberrypi.c11
-rw-r--r--drivers/firmware/tegra/Makefile3
-rw-r--r--drivers/firmware/tegra/bpmp-private.h34
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c305
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c243
-rw-r--r--drivers/firmware/tegra/bpmp.c376
-rw-r--r--drivers/firmware/ti_sci.c21
-rw-r--r--drivers/firmware/xilinx/Kconfig1
-rw-r--r--drivers/firmware/xilinx/zynqmp.c166
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/altera-ps-spi.c2
-rw-r--r--drivers/fpga/stratix10-soc.c5
-rw-r--r--drivers/gnss/Kconfig13
-rw-r--r--drivers/gnss/Makefile3
-rw-r--r--drivers/gnss/core.c1
-rw-r--r--drivers/gnss/mtk.c152
-rw-r--r--drivers/gnss/sirf.c256
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c14
-rw-r--r--drivers/gpio/gpio-mt7621.c20
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c26
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpio/gpio-vf610.c5
-rw-r--r--drivers/gpio/gpiolib-acpi.c24
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c76
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c23
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c48
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c133
-rw-r--r--drivers/gpu/drm/drm_lease.c3
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c75
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c23
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c38
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h6
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c240
-rw-r--r--drivers/gpu/drm/i915/intel_display.c50
-rw-r--r--drivers/gpu/drm/i915/intel_display.h16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c45
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c38
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h9
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c25
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c23
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c14
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c27
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c14
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c15
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h11
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c14
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c7
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h2
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c6
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/hid/hid-core.c23
-rw-r--r--drivers/hid/hid-debug.c120
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hv/channel.c13
-rw-r--r--drivers/hv/channel_mgmt.c18
-rw-r--r--drivers/hv/hv_balloon.c31
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/ring_buffer.c45
-rw-r--r--drivers/hv/vmbus_drv.c177
-rw-r--r--drivers/hwmon/ad7418.c78
-rw-r--r--drivers/hwmon/adm1029.c67
-rw-r--r--drivers/hwmon/adt7462.c4
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c97
-rw-r--r--drivers/hwmon/f71882fg.c2
-rw-r--r--drivers/hwmon/gl518sm.c120
-rw-r--r--drivers/hwmon/gl520sm.c184
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/hih6130.c21
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/i5500_temp.c14
-rw-r--r--drivers/hwmon/i5k_amb.c12
-rw-r--r--drivers/hwmon/ibmaem.c15
-rw-r--r--drivers/hwmon/ibmpex.c14
-rw-r--r--drivers/hwmon/iio_hwmon.c2
-rw-r--r--drivers/hwmon/ina209.c151
-rw-r--r--drivers/hwmon/ina3221.c36
-rw-r--r--drivers/hwmon/jc42.c8
-rw-r--r--drivers/hwmon/k8temp.c12
-rw-r--r--drivers/hwmon/lineage-pem.c77
-rw-r--r--drivers/hwmon/lm73.c34
-rw-r--r--drivers/hwmon/lm75.c6
-rw-r--r--drivers/hwmon/lm77.c45
-rw-r--r--drivers/hwmon/lm80.c142
-rw-r--r--drivers/hwmon/lm83.c62
-rw-r--r--drivers/hwmon/lm85.c43
-rw-r--r--drivers/hwmon/lm90.c12
-rw-r--r--drivers/hwmon/lm92.c39
-rw-r--r--drivers/hwmon/lm93.c630
-rw-r--r--drivers/hwmon/lm95241.c8
-rw-r--r--drivers/hwmon/lm95245.c8
-rw-r--r--drivers/hwmon/ltc2990.c32
-rw-r--r--drivers/hwmon/ltc4151.c11
-rw-r--r--drivers/hwmon/ltc4222.c56
-rw-r--r--drivers/hwmon/ltc4245.c8
-rw-r--r--drivers/hwmon/ltc4261.c28
-rw-r--r--drivers/hwmon/max16065.c225
-rw-r--r--drivers/hwmon/max1619.c33
-rw-r--r--drivers/hwmon/max31722.c8
-rw-r--r--drivers/hwmon/max31790.c6
-rw-r--r--drivers/hwmon/max6639.c92
-rw-r--r--drivers/hwmon/max6642.c31
-rw-r--r--drivers/hwmon/max6650.c6
-rw-r--r--drivers/hwmon/mc13783-adc.c49
-rw-r--r--drivers/hwmon/nct6775.c15
-rw-r--r--drivers/hwmon/nct7904.c10
-rw-r--r--drivers/hwmon/nsa320-hwmon.c8
-rw-r--r--drivers/hwmon/ntc_thermistor.c106
-rw-r--r--drivers/hwmon/occ/common.c27
-rw-r--r--drivers/hwmon/occ/common.h3
-rw-r--r--drivers/hwmon/occ/p8_i2c.c3
-rw-r--r--drivers/hwmon/occ/p9_sbe.c3
-rw-r--r--drivers/hwmon/occ/sysfs.c13
-rw-r--r--drivers/hwmon/pc87360.c427
-rw-r--r--drivers/hwmon/pc87427.c317
-rw-r--r--drivers/hwmon/pmbus/pmbus.c64
-rw-r--r--drivers/hwmon/pmbus/tps53679.c9
-rw-r--r--drivers/hwmon/powr1220.c144
-rw-r--r--drivers/hwmon/pwm-fan.c43
-rw-r--r--drivers/hwmon/sch5627.c146
-rw-r--r--drivers/hwmon/sch5636.c202
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c4
-rw-r--r--drivers/hwmon/sht15.c34
-rw-r--r--drivers/hwmon/sht21.c17
-rw-r--r--drivers/hwmon/sht3x.c50
-rw-r--r--drivers/hwmon/smsc47b397.c24
-rw-r--r--drivers/hwmon/stts751.c64
-rw-r--r--drivers/hwmon/tc654.c60
-rw-r--r--drivers/hwmon/tc74.c4
-rw-r--r--drivers/hwmon/tmp102.c4
-rw-r--r--drivers/hwmon/tmp103.c20
-rw-r--r--drivers/hwmon/tmp421.c6
-rw-r--r--drivers/hwmon/vexpress-hwmon.c25
-rw-r--r--drivers/hwmon/via-cputemp.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c119
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c12
-rw-r--r--drivers/hwtracing/coresight/coresight.c60
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c4
-rw-r--r--drivers/hwtracing/intel_th/core.c6
-rw-r--r--drivers/hwtracing/intel_th/gth.c4
-rw-r--r--drivers/hwtracing/intel_th/pti.c16
-rw-r--r--drivers/hwtracing/intel_th/sth.c4
-rw-r--r--drivers/hwtracing/stm/core.c11
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c13
-rw-r--r--drivers/i2c/busses/i2c-tegra.c15
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/i3c/master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c26
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c4
-rw-r--r--drivers/ide/ide-atapi.c9
-rw-r--r--drivers/ide/ide-io.c61
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/iio/accel/adxl345_core.c4
-rw-r--r--drivers/iio/accel/mma8452.c105
-rw-r--r--drivers/iio/accel/st_accel_core.c171
-rw-r--r--drivers/iio/adc/Kconfig80
-rw-r--r--drivers/iio/adc/Makefile7
-rw-r--r--drivers/iio/adc/ad7476.c20
-rw-r--r--drivers/iio/adc/ad7606.c (renamed from drivers/staging/iio/adc/ad7606.c)272
-rw-r--r--drivers/iio/adc/ad7606.h (renamed from drivers/staging/iio/adc/ad7606.h)17
-rw-r--r--drivers/iio/adc/ad7606_par.c (renamed from drivers/staging/iio/adc/ad7606_par.c)46
-rw-r--r--drivers/iio/adc/ad7606_spi.c (renamed from drivers/staging/iio/adc/ad7606_spi.c)35
-rw-r--r--drivers/iio/adc/ad7768-1.c655
-rw-r--r--drivers/iio/adc/axp288_adc.c76
-rw-r--r--drivers/iio/adc/exynos_adc.c19
-rw-r--r--drivers/iio/adc/ingenic-adc.c364
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c15
-rw-r--r--drivers/iio/adc/meson_saradc.c33
-rw-r--r--drivers/iio/adc/npcm_adc.c335
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c10
-rw-r--r--drivers/iio/adc/ti-ads124s08.c376
-rw-r--r--drivers/iio/adc/ti-ads8688.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c5
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/chemical/Kconfig21
-rw-r--r--drivers/iio/chemical/Makefile3
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/iio/chemical/bme680_i2c.c7
-rw-r--r--drivers/iio/chemical/bme680_spi.c8
-rw-r--r--drivers/iio/chemical/pms7003.c340
-rw-r--r--drivers/iio/chemical/sgp30.c591
-rw-r--r--drivers/iio/chemical/sps30.c548
-rw-r--r--drivers/iio/dac/Kconfig16
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5686-spi.c9
-rw-r--r--drivers/iio/dac/ad5686.c44
-rw-r--r--drivers/iio/dac/ad5686.h4
-rw-r--r--drivers/iio/dac/ad5696-i2c.c2
-rw-r--r--drivers/iio/dac/ad5758.c2
-rw-r--r--drivers/iio/dac/ti-dac7612.c184
-rw-r--r--drivers/iio/frequency/ad9523.c7
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h11
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c317
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c5
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c31
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c12
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c2
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/light/Kconfig10
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/isl29018.c46
-rw-r--r--drivers/iio/light/max44009.c555
-rw-r--r--drivers/iio/magnetometer/mag3110.c94
-rw-r--r--drivers/iio/pressure/Kconfig2
-rw-r--r--drivers/iio/pressure/st_pressure.h2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c69
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c5
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c5
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/core_priv.h1
-rw-r--r--drivers/infiniband/core/device.c13
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c11
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c62
-rw-r--r--drivers/infiniband/core/uverbs_main.c26
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c3
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c32
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c27
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c4
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c37
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c3
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c77
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h10
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c11
-rw-r--r--drivers/infiniband/hw/mlx5/main.c95
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c39
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h35
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/cap11xx.c35
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c4
-rw-r--r--drivers/input/keyboard/matrix_keypad.c2
-rw-r--r--drivers/input/keyboard/qt2160.c69
-rw-r--r--drivers/input/keyboard/st-keyscan.c4
-rw-r--r--drivers/input/misc/apanel.c24
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/pwm-vibra.c19
-rw-r--r--drivers/input/misc/uinput.c5
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/mouse/elantech.c9
-rw-r--r--drivers/input/serio/olpc_apsp.c17
-rw-r--r--drivers/input/serio/ps2-gpio.c1
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/interconnect/Kconfig15
-rw-r--r--drivers/interconnect/Makefile6
-rw-r--r--drivers/interconnect/core.c799
-rw-r--r--drivers/interconnect/qcom/Kconfig13
-rw-r--r--drivers/interconnect/qcom/Makefile5
-rw-r--r--drivers/interconnect/qcom/sdm845.c838
-rw-r--r--drivers/iommu/amd_iommu.c19
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c13
-rw-r--r--drivers/iommu/mtk_iommu_v1.c9
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig19
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c10
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c77
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c163
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c260
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c154
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-i8259.c9
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c115
-rw-r--r--drivers/irqchip/irq-ls1x.c192
-rw-r--r--drivers/irqchip/irq-madera.c2
-rw-r--r--drivers/irqchip/irq-mmp.c6
-rw-r--r--drivers/irqchip/irq-sifive-plic.c116
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c40
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c6
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c2
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c6
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/netjet.c6
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/isdn/hisax/st5481.h2
-rw-r--r--drivers/isdn/i4l/isdn_common.c7
-rw-r--r--drivers/isdn/i4l/isdn_tty.c8
-rw-r--r--drivers/isdn/i4l/isdn_v110.c2
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c2
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/led-core.c30
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/leds/leds-lp55xx-common.c4
-rw-r--r--drivers/leds/trigger/ledtrig-oneshot.c38
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c99
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c34
-rw-r--r--drivers/macintosh/via-cuda.c8
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox.c1
-rw-r--r--drivers/md/dm-crypt.c27
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin-metadata.h2
-rw-r--r--drivers/md/dm-thin.c65
-rw-r--r--drivers/md/dm.c41
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid1.c28
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c4
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c2
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c24
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/axp20x.c126
-rw-r--r--drivers/mfd/bcm2835-pm.c92
-rw-r--r--drivers/mfd/bd9571mwv.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/mfd/db8500-prcmu.c4
-rw-r--r--drivers/mfd/exynos-lpass.c4
-rw-r--r--drivers/mfd/madera-core.c5
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/mc13xxx-core.c4
-rw-r--r--drivers/mfd/mt6397-core.c3
-rw-r--r--drivers/mfd/qcom_rpm.c4
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/stmpe.c12
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c5
-rw-r--r--drivers/mfd/tps65218.c24
-rw-r--r--drivers/mfd/tps6586x.c24
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/ad525x_dpot.c24
-rw-r--r--drivers/misc/cardreader/rts5227.c64
-rw-r--r--drivers/misc/cardreader/rts5249.c32
-rw-r--r--drivers/misc/cardreader/rts5260.c136
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c40
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h5
-rw-r--r--drivers/misc/enclosure.c4
-rw-r--r--drivers/misc/fastrpc.c1401
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/habanalabs/Kconfig25
-rw-r--r--drivers/misc/habanalabs/Makefile14
-rw-r--r--drivers/misc/habanalabs/asid.c57
-rw-r--r--drivers/misc/habanalabs/command_buffer.c445
-rw-r--r--drivers/misc/habanalabs/command_submission.c780
-rw-r--r--drivers/misc/habanalabs/context.c215
-rw-r--r--drivers/misc/habanalabs/debugfs.c1077
-rw-r--r--drivers/misc/habanalabs/device.c1140
-rw-r--r--drivers/misc/habanalabs/goya/Makefile3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c5391
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h211
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c254
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c2999
-rw-r--r--drivers/misc/habanalabs/habanalabs.h1464
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c461
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c234
-rw-r--r--drivers/misc/habanalabs/hw_queue.c635
-rw-r--r--drivers/misc/habanalabs/hwmon.c458
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h335
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h191
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h61
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h49
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h181
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h1372
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h275
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h118
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h653
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h331
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h373
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h1537
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h1153
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h143
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h53
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h243
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h447
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h745
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h143
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h83
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h117
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h55
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h1607
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h373
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h347
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h313
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h209
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h465
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h323
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h887
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h139
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h227
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h179
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h105
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h45
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h186
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_fw_if.h28
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h129
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h30
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h47
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h15
-rw-r--r--drivers/misc/habanalabs/include/qman_if.h56
-rw-r--r--drivers/misc/habanalabs/irq.c327
-rw-r--r--drivers/misc/habanalabs/memory.c1723
-rw-r--r--drivers/misc/habanalabs/mmu.c906
-rw-r--r--drivers/misc/habanalabs/sysfs.c539
-rw-r--r--drivers/misc/hpilo.c14
-rw-r--r--drivers/misc/ibmvmc.c7
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/lkdtm/core.c15
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/perms.c36
-rw-r--r--drivers/misc/mei/Kconfig10
-rw-r--r--drivers/misc/mei/Makefile2
-rw-r--r--drivers/misc/mei/bus-fixup.c16
-rw-r--r--drivers/misc/mei/bus.c22
-rw-r--r--drivers/misc/mei/client.c5
-rw-r--r--drivers/misc/mei/hbm.c19
-rw-r--r--drivers/misc/mei/hdcp/Makefile7
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c849
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h377
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw.h3
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mic/Kconfig3
-rw-r--r--drivers/misc/mic/bus/scif_bus.h8
-rw-r--r--drivers/misc/mic/bus/vop_bus.h8
-rw-r--r--drivers/misc/mic/card/mic_device.c8
-rw-r--r--drivers/misc/mic/host/mic_boot.c8
-rw-r--r--drivers/misc/mic/scif/scif_map.h4
-rw-r--r--drivers/misc/mic/scif/scif_rma.c2
-rw-r--r--drivers/misc/mic/vop/vop_main.c111
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c51
-rw-r--r--drivers/misc/pvpanic.c4
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c3
-rw-r--r--drivers/misc/vmw_balloon.c56
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c9
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h2
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c39
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c63
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h4
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/block.c37
-rw-r--r--drivers/mmc/core/core.c338
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/host.c50
-rw-r--r--drivers/mmc/core/mmc.c10
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/queue.c9
-rw-r--r--drivers/mmc/core/regulator.c260
-rw-r--r--drivers/mmc/core/sd.c20
-rw-r--r--drivers/mmc/core/sd_ops.c33
-rw-r--r--drivers/mmc/core/sd_ops.h3
-rw-r--r--drivers/mmc/core/sdio.c9
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/core/sdio_io.c29
-rw-r--r--drivers/mmc/core/sdio_ops.h1
-rw-r--r--drivers/mmc/core/slot-gpio.c9
-rw-r--r--drivers/mmc/host/Kconfig6
-rw-r--r--drivers/mmc/host/atmel-mci.c8
-rw-r--r--drivers/mmc/host/bcm2835.c25
-rw-r--r--drivers/mmc/host/cb710-mmc.c42
-rw-r--r--drivers/mmc/host/cqhci.c13
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c5
-rw-r--r--drivers/mmc/host/jz4740_mmc.c73
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c33
-rw-r--r--drivers/mmc/host/mmc_spi.c3
-rw-r--r--drivers/mmc/host/mmci.c27
-rw-r--r--drivers/mmc/host/mmci.h1
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/of_mmc_spi.c22
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c19
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c1
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c2
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c310
-rw-r--r--drivers/mmc/host/sdhci-iproc.c5
-rw-r--r--drivers/mmc/host/sdhci-omap.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c10
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c140
-rw-r--r--drivers/mmc/host/sdhci-pci.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c1
-rw-r--r--drivers/mmc/host/sdhci-tegra.c286
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c2
-rw-r--r--drivers/mmc/host/sdhci.c32
-rw-r--r--drivers/mmc/host/sdhci.h6
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c28
-rw-r--r--drivers/mmc/host/tmio_mmc.h5
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c20
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c3
-rw-r--r--drivers/mtd/chips/gen_probe.c2
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/mtdram.c2
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/mtdcore.c86
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c39
-rw-r--r--drivers/mtd/nand/raw/Kconfig17
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c21
-rw-r--r--drivers/mtd/nand/raw/denali.c44
-rw-r--r--drivers/mtd/nand/raw/denali.h1
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c27
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c37
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c13
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c2
-rw-r--r--drivers/mtd/nand/raw/jz4780_bch.c9
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c5
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c1464
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c8
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c3
-rw-r--r--drivers/mtd/nand/raw/nand_base.c160
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c20
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c20
-rw-r--r--drivers/mtd/nand/raw/r852.c3
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c2073
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c732
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c21
-rw-r--r--drivers/mtd/nand/spi/core.c46
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c83
-rw-r--r--drivers/mtd/nand/spi/macronix.c8
-rw-r--r--drivers/mtd/nand/spi/toshiba.c79
-rw-r--r--drivers/mtd/spi-nor/Kconfig25
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c74
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c1224
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c3
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c35
-rw-r--r--drivers/net/Kconfig14
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c188
-rw-r--r--drivers/net/bonding/bond_main.c39
-rw-r--r--drivers/net/bonding/bond_netlink.c67
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/caif/caif_serial.c5
-rw-r--r--drivers/net/caif/caif_spi.c30
-rw-r--r--drivers/net/can/dev.c27
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/dsa/b53/b53_common.c94
-rw-r--r--drivers/net/dsa/b53/b53_priv.h5
-rw-r--r--drivers/net/dsa/b53/b53_srab.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c60
-rw-r--r--drivers/net/dsa/bcm_sf2.h8
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c206
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h4
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/lan9303-core.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c35
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c390
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c150
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h23
-rw-r--r--drivers/net/dsa/microchip/ksz_priv.h15
-rw-r--r--drivers/net/dsa/mt7530.c104
-rw-r--r--drivers/net/dsa/mt7530.h9
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c244
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c51
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h8
-rw-r--r--drivers/net/dsa/qca8k.c21
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/dsa/realtek-smi.c18
-rw-r--r--drivers/net/dsa/rtl8366rb.c3
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.c3
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c12
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c61
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c6
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c22
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c8
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/apple/mace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c30
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c116
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c68
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c8
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c178
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c152
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h197
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c256
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c26
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c11
-rw-r--r--drivers/net/ethernet/cadence/macb.h8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c264
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c25
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c219
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c263
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c450
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c336
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c252
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c68
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c22
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c16
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c237
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h31
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c429
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h80
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig31
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1604
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h230
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c210
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c597
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h533
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c199
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_msg.c164
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c943
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h55
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c144
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c255
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c681
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c78
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c97
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c97
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c105
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c808
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h55
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c133
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c155
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c192
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c13
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c28
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c8
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/Kconfig12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c64
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h10
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c33
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c25
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c244
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c252
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c129
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c195
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c812
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c378
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c264
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c197
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c97
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c94
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c89
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile3
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h34
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c1032
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c118
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c32
-rw-r--r--drivers/net/ethernet/jme.c5
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c65
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h15
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c456
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c29
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c26
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c84
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c331
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c167
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c982
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c466
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c522
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c275
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c477
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c137
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c361
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h260
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c942
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c1330
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c313
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c157
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c296
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c37
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c217
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c27
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h17
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c13
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c68
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c16
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c72
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c246
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c201
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c109
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h89
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c413
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c155
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c612
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c181
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c53
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c23
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c346
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c6
-rw-r--r--drivers/net/ethernet/ni/nixge.c130
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c261
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c126
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c121
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c11
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c572
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c295
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c4
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c27
-rw-r--r--drivers/net/ethernet/realtek/r8169.c770
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c12
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c79
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/rocker/rocker.h2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c125
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c15
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/sfc/ef10.c45
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c56
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h8
-rw-r--r--drivers/net/ethernet/sfc/mtd.c3
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c2
-rw-r--r--drivers/net/ethernet/sgi/meth.c27
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c545
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c254
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c17
-rw-r--r--drivers/net/ethernet/sun/cassini.h15
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig6
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.h6
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c8
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/defxx.c8
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c1
-rw-r--r--drivers/net/fddi/skfp/skfddi.c8
-rw-r--r--drivers/net/geneve.c21
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c26
-rw-r--r--drivers/net/hyperv/hyperv_net.h12
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c167
-rw-r--r--drivers/net/hyperv/rndis_filter.c36
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/ipvlan/Makefile3
-rw-r--r--drivers/net/ipvlan/ipvlan.h37
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c105
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c227
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c121
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/netdevsim/bpf.c5
-rw-r--r--drivers/net/netdevsim/netdev.c23
-rw-r--r--drivers/net/phy/Kconfig12
-rw-r--r--drivers/net/phy/Makefile5
-rw-r--r--drivers/net/phy/amd.c7
-rw-r--r--drivers/net/phy/aquantia.c193
-rw-r--r--drivers/net/phy/aquantia.h16
-rw-r--r--drivers/net/phy/aquantia_hwmon.c250
-rw-r--r--drivers/net/phy/aquantia_main.c283
-rw-r--r--drivers/net/phy/asix.c8
-rw-r--r--drivers/net/phy/at803x.c77
-rw-r--r--drivers/net/phy/bcm-cygnus.c10
-rw-r--r--drivers/net/phy/bcm-phy-lib.c10
-rw-r--r--drivers/net/phy/bcm-phy-lib.h10
-rw-r--r--drivers/net/phy/bcm63xx.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c6
-rw-r--r--drivers/net/phy/bcm87xx.c9
-rw-r--r--drivers/net/phy/broadcom.c6
-rw-r--r--drivers/net/phy/cicada.c7
-rw-r--r--drivers/net/phy/cortina.c15
-rw-r--r--drivers/net/phy/davicom.c7
-rw-r--r--drivers/net/phy/dp83640.c28
-rw-r--r--drivers/net/phy/dp83822.c12
-rw-r--r--drivers/net/phy/dp83848.c12
-rw-r--r--drivers/net/phy/dp83867.c62
-rw-r--r--drivers/net/phy/dp83tc811.c15
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/fixed_phy.c121
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/intel-xway.c11
-rw-r--r--drivers/net/phy/lxt.c7
-rw-r--r--drivers/net/phy/marvell.c60
-rw-r--r--drivers/net/phy/marvell10g.c237
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c10
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c6
-rw-r--r--drivers/net/phy/mdio-bitbang.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c6
-rw-r--r--drivers/net/phy/mdio-cavium.c7
-rw-r--r--drivers/net/phy/mdio-cavium.h5
-rw-r--r--drivers/net/phy/mdio-gpio.c7
-rw-r--r--drivers/net/phy/mdio-hisi-femac.c16
-rw-r--r--drivers/net/phy/mdio-i2c.c5
-rw-r--r--drivers/net/phy/mdio-i2c.h5
-rw-r--r--drivers/net/phy/mdio-moxart.c7
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c13
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c7
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c5
-rw-r--r--drivers/net/phy/mdio-mux-multiplexer.c122
-rw-r--r--drivers/net/phy/mdio-mux.c7
-rw-r--r--drivers/net/phy/mdio-octeon.c7
-rw-r--r--drivers/net/phy/mdio-sun4i.c7
-rw-r--r--drivers/net/phy/mdio-thunder.c7
-rw-r--r--drivers/net/phy/mdio-xgene.c14
-rw-r--r--drivers/net/phy/mdio-xgene.h14
-rw-r--r--drivers/net/phy/mdio_bus.c22
-rw-r--r--drivers/net/phy/mdio_device.c7
-rw-r--r--drivers/net/phy/meson-gxl.c13
-rw-r--r--drivers/net/phy/micrel.c64
-rw-r--r--drivers/net/phy/microchip.c14
-rw-r--r--drivers/net/phy/mscc.c1
-rw-r--r--drivers/net/phy/national.c7
-rw-r--r--drivers/net/phy/phy-c45.c313
-rw-r--r--drivers/net/phy/phy-core.c354
-rw-r--r--drivers/net/phy/phy.c152
-rw-r--r--drivers/net/phy/phy_device.c410
-rw-r--r--drivers/net/phy/phy_led_triggers.c14
-rw-r--r--drivers/net/phy/phylink.c64
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c23
-rw-r--r--drivers/net/phy/rockchip.c9
-rw-r--r--drivers/net/phy/sfp-bus.c2
-rw-r--r--drivers/net/phy/sfp.c31
-rw-r--r--drivers/net/phy/sfp.h2
-rw-r--r--drivers/net/phy/smsc.c6
-rw-r--r--drivers/net/phy/spi_ks8995.c5
-rw-r--r--drivers/net/phy/ste10Xp.c7
-rw-r--r--drivers/net/phy/swphy.c16
-rw-r--r--drivers/net/phy/teranetics.c9
-rw-r--r--drivers/net/phy/uPD60620.c7
-rw-r--r--drivers/net/phy/vitesse.c9
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c16
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/sb1000.c11
-rw-r--r--drivers/net/tap.c4
-rw-r--r--drivers/net/team/team.c32
-rw-r--r--drivers/net/team/team_mode_loadbalance.c15
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/asix_devices.c9
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/cdc_ether.c34
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/pegasus.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c31
-rw-r--r--drivers/net/usb/r8152.c5
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/net/virtio_net.c182
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c589
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/dscc4.c52
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c71
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wan/lmc/Makefile2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c3
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/wanxl.c7
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wimax/i2400m/rx.c4
-rw-r--r--drivers/net/wimax/i2400m/usb.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c188
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h31
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c76
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h44
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h33
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c30
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h29
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h135
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c153
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h48
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c257
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c65
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h16
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c242
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h47
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c83
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c93
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h44
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c197
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h68
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c135
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h121
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h13
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c70
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c247
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/regd.h2
-rw-r--r--drivers/net/wireless/ath/regd_common.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c23
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c254
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h51
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h16
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c83
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c36
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c35
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c577
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c135
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c114
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c88
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c81
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c71
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h878
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tof.h393
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c1302
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c813
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c654
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c322
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c577
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c177
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c519
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c305
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c297
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c254
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c75
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c14
-rw-r--r--drivers/net/wireless/marvell/libertas/debugfs.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c9
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c38
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h18
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c105
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h70
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h72
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c186
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c73
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c215
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c168
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h86
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c578
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c1749
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c709
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c483
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h253
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h774
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c102
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c167
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c96
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c210
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.h2
-rw-r--r--drivers/net/wireless/quantenna/Makefile1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h19
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c83
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c92
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h24
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c21
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.c31
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c197
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h63
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h28
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/trans.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/trans.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c143
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c27
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c93
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c8
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c39
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c51
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c73
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h85
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c138
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c313
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c138
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c95
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c50
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c92
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h54
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c48
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c70
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h75
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c121
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c128
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c260
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c158
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c213
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/stats.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/stats.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h403
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c13
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c106
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c174
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c39
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c9
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h23
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h35
-rw-r--r--drivers/net/wireless/st/cw1200/debug.c26
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.c4
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c1
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.c59
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c20
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h10
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c15
-rw-r--r--drivers/net/wireless/virt_wifi.c12
-rw-r--r--drivers/net/xen-netback/hash.c2
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c25
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h30
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c44
-rw-r--r--drivers/nvdimm/bus.c11
-rw-r--r--drivers/nvdimm/dimm.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c217
-rw-r--r--drivers/nvme/host/rdma.c64
-rw-r--r--drivers/nvme/host/tcp.c35
-rw-r--r--drivers/nvme/target/rdma.c15
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/nvmem/Kconfig12
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/bcm-ocotp.c37
-rw-r--r--drivers/nvmem/core.c42
-rw-r--r--drivers/nvmem/imx-ocotp.c13
-rw-r--r--drivers/nvmem/sc27xx-efuse.c12
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c86
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/of_mdio.c9
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/pdt.c1
-rw-r--r--drivers/of/property.c1
-rw-r--r--drivers/opp/core.c103
-rw-r--r--drivers/opp/debugfs.c110
-rw-r--r--drivers/opp/of.c101
-rw-r--r--drivers/opp/opp.h17
-rw-r--r--drivers/parisc/ccio-dma.c11
-rw-r--r--drivers/parisc/dino.c11
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/hppb.c2
-rw-r--r--drivers/parisc/iommu.h55
-rw-r--r--drivers/parisc/iosapic.c17
-rw-r--r--drivers/parisc/lba_pci.c30
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parport/daisy.c32
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/pci/Kconfig22
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c16
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/msi.c61
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c10
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/pci/switch/switchtec.c8
-rw-r--r--drivers/perf/arm-cci.c10
-rw-r--r--drivers/perf/arm-ccn.c6
-rw-r--r--drivers/perf/arm_dsu_pmu.c9
-rw-r--r--drivers/perf/arm_pmu.c15
-rw-r--r--drivers/perf/arm_spe_pmu.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c9
-rw-r--r--drivers/perf/qcom_l3_pmu.c8
-rw-r--r--drivers/perf/thunderx2_pmu.c10
-rw-r--r--drivers/perf/xgene_pmu.c6
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c4
-rw-r--r--drivers/phy/cadence/Kconfig13
-rw-r--r--drivers/phy/cadence/Makefile1
-rw-r--r--drivers/phy/cadence/cdns-dphy.c391
-rw-r--r--drivers/phy/freescale/Kconfig2
-rw-r--r--drivers/phy/marvell/Kconfig31
-rw-r--r--drivers/phy/marvell/Makefile3
-rw-r--r--drivers/phy/marvell/phy-armada375-usb2.c13
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c237
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c5
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c318
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c278
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-sata.c9
-rw-r--r--drivers/phy/phy-core-mipi-dphy.c8
-rw-r--r--drivers/phy/phy-core.c12
-rw-r--r--drivers/phy/qualcomm/phy-ath79-usb.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c143
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c40
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h19
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c48
-rw-r--r--drivers/phy/ti/Kconfig7
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c4
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c105
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/mediatek/Kconfig3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c44
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/goldfish/Kconfig4
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c15
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c14
-rw-r--r--drivers/powercap/intel_rapl.c2
-rw-r--r--drivers/ptp/Kconfig4
-rw-r--r--drivers/ptp/Makefile4
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/ptp/ptp_clock.c2
-rw-r--r--drivers/ptp/ptp_qoriq.c518
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/rapidio/devices/tsi721.c22
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/rapidio/rio-sysfs.c5
-rw-r--r--drivers/regulator/88pm8607.c38
-rw-r--r--drivers/regulator/Kconfig27
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/act8865-regulator.c5
-rw-r--r--drivers/regulator/act8945a-regulator.c11
-rw-r--r--drivers/regulator/arizona-ldo1.c56
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/axp20x-regulator.c27
-rw-r--r--drivers/regulator/bcm590xx-regulator.c4
-rw-r--r--drivers/regulator/bd70528-regulator.c289
-rw-r--r--drivers/regulator/bd718x7-regulator.c215
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c8
-rw-r--r--drivers/regulator/core.c88
-rw-r--r--drivers/regulator/cpcap-regulator.c106
-rw-r--r--drivers/regulator/da9052-regulator.c64
-rw-r--r--drivers/regulator/da9055-regulator.c46
-rw-r--r--drivers/regulator/da9062-regulator.c37
-rw-r--r--drivers/regulator/da9063-regulator.c37
-rw-r--r--drivers/regulator/da9210-regulator.c53
-rw-r--r--drivers/regulator/fan53555.c109
-rw-r--r--drivers/regulator/fixed.c35
-rw-r--r--drivers/regulator/gpio-regulator.c194
-rw-r--r--drivers/regulator/helpers.c125
-rw-r--r--drivers/regulator/hi655x-regulator.c1
-rw-r--r--drivers/regulator/isl6271a-regulator.c13
-rw-r--r--drivers/regulator/lm363x-regulator.c6
-rw-r--r--drivers/regulator/lochnagar-regulator.c7
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/lp3972.c4
-rw-r--r--drivers/regulator/lp872x.c82
-rw-r--r--drivers/regulator/lp873x-regulator.c51
-rw-r--r--drivers/regulator/lp8755.c6
-rw-r--r--drivers/regulator/lp87565-regulator.c4
-rw-r--r--drivers/regulator/lp8788-buck.c40
-rw-r--r--drivers/regulator/lp8788-ldo.c4
-rw-r--r--drivers/regulator/ltc3676.c65
-rw-r--r--drivers/regulator/max14577-regulator.c1
-rw-r--r--drivers/regulator/max77620-regulator.c12
-rw-r--r--drivers/regulator/max77650-regulator.c498
-rw-r--r--drivers/regulator/max77802-regulator.c6
-rw-r--r--drivers/regulator/mc13783-regulator.c82
-rw-r--r--drivers/regulator/mc13892-regulator.c72
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c4
-rw-r--r--drivers/regulator/mc13xxx.h24
-rw-r--r--drivers/regulator/mcp16502.c2
-rw-r--r--drivers/regulator/mt6311-regulator.c10
-rw-r--r--drivers/regulator/of_regulator.c4
-rw-r--r--drivers/regulator/palmas-regulator.c2
-rw-r--r--drivers/regulator/pv88060-regulator.c62
-rw-r--r--drivers/regulator/pv88080-regulator.c55
-rw-r--r--drivers/regulator/pv88090-regulator.c53
-rw-r--r--drivers/regulator/pwm-regulator.c17
-rw-r--r--drivers/regulator/qcom_smd-regulator.c104
-rw-r--r--drivers/regulator/rk808-regulator.c64
-rw-r--r--drivers/regulator/rt5033-regulator.c4
-rw-r--r--drivers/regulator/s2mpa01.c10
-rw-r--r--drivers/regulator/s2mps11.c6
-rw-r--r--drivers/regulator/s5m8767.c8
-rw-r--r--drivers/regulator/stm32-vrefbuf.c121
-rw-r--r--drivers/regulator/stpmic1_regulator.c300
-rw-r--r--drivers/regulator/tps65218-regulator.c23
-rw-r--r--drivers/regulator/twl-regulator.c7
-rw-r--r--drivers/regulator/twl6030-regulator.c88
-rw-r--r--drivers/regulator/uniphier-regulator.c8
-rw-r--r--drivers/regulator/wm831x-dcdc.c85
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c9
-rw-r--r--drivers/reset/Kconfig32
-rw-r--r--drivers/reset/Makefile5
-rw-r--r--drivers/reset/core.c42
-rw-r--r--drivers/reset/reset-brcmstb.c132
-rw-r--r--drivers/reset/reset-hsdk.c1
-rw-r--r--drivers/reset/reset-imx7.c172
-rw-r--r--drivers/reset/reset-simple.c13
-rw-r--r--drivers/reset/reset-socfpga.c88
-rw-r--r--drivers/reset/reset-sunxi.c1
-rw-r--r--drivers/reset/reset-uniphier-glue.c (renamed from drivers/reset/reset-uniphier-usb3.c)50
-rw-r--r--drivers/reset/reset-zynqmp.c114
-rw-r--r--drivers/s390/block/dasd.c19
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/cio/qdio_setup.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c55
-rw-r--r--drivers/s390/crypto/ap_bus.c22
-rw-r--r--drivers/s390/crypto/ap_bus.h3
-rw-r--r--drivers/s390/crypto/ap_queue.c7
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c44
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c4
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h1
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/ism_drv.c27
-rw-r--r--drivers/s390/net/qeth_core.h167
-rw-r--r--drivers/s390/net/qeth_core_main.c1223
-rw-r--r--drivers/s390/net/qeth_core_mpc.c23
-rw-r--r--drivers/s390/net/qeth_core_mpc.h17
-rw-r--r--drivers/s390/net/qeth_core_sys.c64
-rw-r--r--drivers/s390/net/qeth_ethtool.c370
-rw-r--r--drivers/s390/net/qeth_l2_main.c556
-rw-r--r--drivers/s390/net/qeth_l3_main.c366
-rw-r--r--drivers/s390/net/qeth_l3_sys.c12
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c12
-rw-r--r--drivers/scsi/3w-9xxx.c14
-rw-r--r--drivers/scsi/3w-sas.c17
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/aacraid/linit.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c16
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c18
-rw-r--r--drivers/scsi/atari_scsi.c10
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c11
-rw-r--r--drivers/scsi/bfa/bfad.c18
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c49
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c44
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/csiostor/csio_attr.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c9
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c28
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c20
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/libfc/fc_lport.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c35
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c15
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c8
-rw-r--r--drivers/scsi/mesh.c5
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c6
-rw-r--r--drivers/scsi/qedf/qedf_main.c29
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c39
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_debug.c41
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_pm.c26
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/sd_zbc.c20
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c34
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c12
-rw-r--r--drivers/slimbus/core.c45
-rw-r--r--drivers/soc/amlogic/meson-canvas.c18
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c196
-rw-r--r--drivers/soc/bcm/Kconfig12
-rw-r--r--drivers/soc/bcm/Makefile1
-rw-r--r--drivers/soc/bcm/bcm2835-power.c661
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h6
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c93
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c46
-rw-r--r--drivers/soc/fsl/dpio/dpio.c39
-rw-r--r--drivers/soc/fsl/dpio/dpio.h9
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c5
-rw-r--r--drivers/soc/fsl/guts.c10
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c2
-rw-r--r--drivers/soc/fsl/qbman/qman.c9
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c55
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/imx/gpcv2.c76
-rw-r--r--drivers/soc/lantiq/Makefile1
-rw-r--r--drivers/soc/lantiq/gphy.c224
-rw-r--r--drivers/soc/qcom/Kconfig18
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c6
-rw-r--r--drivers/soc/qcom/llcc-slice.c101
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c7
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c32
-rw-r--r--drivers/soc/qcom/rpmh.c37
-rw-r--r--drivers/soc/qcom/rpmhpd.c406
-rw-r--r--drivers/soc/qcom/rpmpd.c315
-rw-r--r--drivers/soc/qcom/smd-rpm.c1
-rw-r--r--drivers/soc/renesas/Kconfig2
-rw-r--r--drivers/soc/renesas/r8a774c0-sysc.c23
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c12
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c2
-rw-r--r--drivers/soc/tegra/pmc.c424
-rw-r--r--drivers/soc/ti/knav_dma.c2
-rw-r--r--drivers/soc/xilinx/Kconfig20
-rw-r--r--drivers/soc/xilinx/Makefile2
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c321
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c178
-rw-r--r--drivers/spi/Kconfig36
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/atmel-quadspi.c270
-rw-r--r--drivers/spi/spi-ath79.c114
-rw-r--r--drivers/spi/spi-atmel.c102
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bitbang.c13
-rw-r--r--drivers/spi/spi-cadence.c84
-rw-r--r--drivers/spi/spi-clps711x.c23
-rw-r--r--drivers/spi/spi-davinci.c54
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw.c54
-rw-r--r--drivers/spi/spi-fsl-dspi.c42
-rw-r--r--drivers/spi/spi-fsl-lpspi.c92
-rw-r--r--drivers/spi/spi-fsl-qspi.c966
-rw-r--r--drivers/spi/spi-geni-qcom.c56
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-mem.c72
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-npcm-pspi.c3
-rw-r--r--drivers/spi/spi-nxp-fspi.c1106
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-pic32-sqi.c6
-rw-r--r--drivers/spi/spi-pl022.c30
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c58
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c157
-rw-r--r--drivers/spi/spi-pxa2xx.h4
-rw-r--r--drivers/spi/spi-rspi.c170
-rw-r--r--drivers/spi/spi-sh-hspi.c39
-rw-r--r--drivers/spi/spi-sh-msiof.c184
-rw-r--r--drivers/spi/spi-sifive.c448
-rw-r--r--drivers/spi/spi-sprd.c354
-rw-r--r--drivers/spi/spi-stm32.c1403
-rw-r--r--drivers/spi/spi-ti-qspi.c6
-rw-r--r--drivers/spi/spi-topcliff-pch.c6
-rw-r--r--drivers/spi/spi.c115
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile13
-rw-r--r--drivers/staging/android/ashmem.c70
-rw-r--r--drivers/staging/android/ion/Makefile2
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c98
-rw-r--r--drivers/staging/android/ion/ion.c86
-rw-r--r--drivers/staging/android/ion/ion.h42
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c19
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c25
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c6
-rw-r--r--drivers/staging/android/ion/ion_heap.c8
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c2
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c10
-rw-r--r--drivers/staging/android/uapi/ion.h2
-rw-r--r--drivers/staging/android/vsoc.c1
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c444
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c71
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h4
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c31
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h2
-rw-r--r--drivers/staging/erofs/Documentation/filesystems/erofs.txt208
-rw-r--r--drivers/staging/erofs/Makefile2
-rw-r--r--drivers/staging/erofs/data.c37
-rw-r--r--drivers/staging/erofs/dir.c12
-rw-r--r--drivers/staging/erofs/inode.c41
-rw-r--r--drivers/staging/erofs/internal.h147
-rw-r--r--drivers/staging/erofs/namei.c194
-rw-r--r--drivers/staging/erofs/super.c29
-rw-r--r--drivers/staging/erofs/unzip_vle.c165
-rw-r--r--drivers/staging/erofs/unzip_vle.h23
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c21
-rw-r--r--drivers/staging/erofs/utils.c58
-rw-r--r--drivers/staging/erofs/xattr.c115
-rw-r--r--drivers/staging/erofs/xattr.h10
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c52
-rw-r--r--drivers/staging/fbtft/fb_bd663474.c6
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c6
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c6
-rw-r--r--drivers/staging/fbtft/fb_ili9340.c2
-rw-r--r--drivers/staging/fbtft/fb_pcd8544.c4
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c4
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c6
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c6
-rw-r--r--drivers/staging/fbtft/fb_ssd1305.c4
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c4
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c6
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c10
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c4
-rw-r--r--drivers/staging/fbtft/fb_tinylcd.c2
-rw-r--r--drivers/staging/fbtft/fb_tls8204.c6
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c4
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c6
-rw-r--r--drivers/staging/fbtft/fb_upd161704.c6
-rw-r--r--drivers/staging/fbtft/fb_watterott.c4
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c6
-rw-r--r--drivers/staging/fbtft/fbtft-core.c178
-rw-r--r--drivers/staging/fbtft/fbtft-io.c26
-rw-r--r--drivers/staging/fbtft/fbtft.h21
-rw-r--r--drivers/staging/fbtft/fbtft_device.c344
-rw-r--r--drivers/staging/fbtft/flexfb.c12
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h2
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.h2
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c76
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h2
-rw-r--r--drivers/staging/fwserial/fwserial.c1
-rw-r--r--drivers/staging/gasket/gasket_interrupt.c1
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c10
-rw-r--r--drivers/staging/greybus/Kconfig1
-rw-r--r--drivers/staging/greybus/TODO2
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c152
-rw-r--r--drivers/staging/greybus/arche-platform.c124
-rw-r--r--drivers/staging/greybus/audio_topology.c1
-rw-r--r--drivers/staging/greybus/bundle.c2
-rw-r--r--drivers/staging/greybus/connection.h2
-rw-r--r--drivers/staging/greybus/control.c1
-rw-r--r--drivers/staging/greybus/core.c2
-rw-r--r--drivers/staging/greybus/gpio.c156
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c12
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h12
-rw-r--r--drivers/staging/gs_fpgaboot/io.c16
-rw-r--r--drivers/staging/gs_fpgaboot/io.h12
-rw-r--r--drivers/staging/iio/adc/Kconfig34
-rw-r--r--drivers/staging/iio/adc/Makefile4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c243
-rw-r--r--drivers/staging/iio/adc/ad7816.c7
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c6
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c4
-rw-r--r--drivers/staging/iio/addac/adt7316.c143
-rw-r--r--drivers/staging/iio/cdc/Kconfig10
-rw-r--r--drivers/staging/iio/cdc/Makefile1
-rw-r--r--drivers/staging/iio/cdc/ad7152.c552
-rw-r--r--drivers/staging/iio/frequency/ad9834.c54
-rw-r--r--drivers/staging/iio/frequency/ad9834.h28
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c57
-rw-r--r--drivers/staging/ks7010/Makefile2
-rw-r--r--drivers/staging/ks7010/TODO2
-rw-r--r--drivers/staging/ks7010/ks_hostif.c119
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2
-rw-r--r--drivers/staging/ks7010/michael_mic.c127
-rw-r--r--drivers/staging/ks7010/michael_mic.h21
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile2
-rw-r--r--drivers/staging/most/Makefile2
-rw-r--r--drivers/staging/most/cdev/Makefile2
-rw-r--r--drivers/staging/most/cdev/cdev.c5
-rw-r--r--drivers/staging/most/dim2/Makefile2
-rw-r--r--drivers/staging/most/i2c/Makefile2
-rw-r--r--drivers/staging/most/net/Makefile2
-rw-r--r--drivers/staging/most/sound/Makefile2
-rw-r--r--drivers/staging/most/usb/Makefile2
-rw-r--r--drivers/staging/most/video/Makefile2
-rw-r--r--drivers/staging/mt7621-dma/Kconfig6
-rw-r--r--drivers/staging/mt7621-dma/Makefile1
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c11
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts4
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi59
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c1
-rw-r--r--drivers/staging/mt7621-eth/ethtool.h11
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c3
-rw-r--r--drivers/staging/mt7621-mmc/Kconfig2
-rw-r--r--drivers/staging/mt7621-mmc/dbg.c1
-rw-r--r--drivers/staging/mt7621-mmc/mt6575_sd.h2
-rw-r--r--drivers/staging/mt7621-pci-phy/Kconfig7
-rw-r--r--drivers/staging/mt7621-pci-phy/Makefile1
-rw-r--r--drivers/staging/mt7621-pci-phy/TODO4
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt54
-rw-r--r--drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c387
-rw-r--r--drivers/staging/mt7621-pci/Makefile2
-rw-r--r--drivers/staging/mt7621-pci/TODO8
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c300
-rw-r--r--drivers/staging/mt7621-pinctrl/Kconfig1
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c49
-rw-r--r--drivers/staging/mt7621-spi/spi-mt7621.c72
-rw-r--r--drivers/staging/netlogic/Kconfig2
-rw-r--r--drivers/staging/netlogic/platform_net.c51
-rw-r--r--drivers/staging/netlogic/platform_net.h32
-rw-r--r--drivers/staging/netlogic/xlr_net.c31
-rw-r--r--drivers/staging/netlogic/xlr_net.h33
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h2
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/ralink-gdma/Kconfig6
-rw-r--r--drivers/staging/ralink-gdma/Makefile3
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c (renamed from drivers/staging/mt7621-dma/ralink-gdma.c)6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c8
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c44
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c38
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c18
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c11
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c67
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h1
-rw-r--r--drivers/staging/rtl8188eu/include/odm_hwconfig.h9
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h12
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c34
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c72
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c26
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c7
-rw-r--r--drivers/staging/rtl8192e/dot11d.c120
-rw-r--r--drivers/staging/rtl8192e/dot11d.h77
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c23
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c8
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c22
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c4
-rw-r--r--drivers/staging/rtl8192u/Makefile2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c24
-rw-r--r--drivers/staging/rtl8712/ieee80211.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c16
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h1
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/rtlwifi/Kconfig2
-rw-r--r--drivers/staging/rtlwifi/efuse.c6
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c5
-rw-r--r--drivers/staging/rtlwifi/pci.h3
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/rts5208/ms.c5
-rw-r--r--drivers/staging/rts5208/sd.c7
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c6
-rw-r--r--drivers/staging/speakup/Kconfig32
-rw-r--r--drivers/staging/speakup/kobjects.c2
-rw-r--r--drivers/staging/speakup/main.c14
-rw-r--r--drivers/staging/speakup/speakup_decext.c3
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c3
-rw-r--r--drivers/staging/speakup/speakup_soft.c4
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h9
-rw-r--r--drivers/staging/speakup/spk_ttyio.c26
-rw-r--r--drivers/staging/speakup/varhandlers.c4
-rw-r--r--drivers/staging/unisys/visorhba/Makefile3
-rw-r--r--drivers/staging/unisys/visornic/Makefile3
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Makefile3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c1
-rw-r--r--drivers/staging/vt6655/baseband.c10
-rw-r--r--drivers/staging/vt6655/baseband.h2
-rw-r--r--drivers/staging/vt6655/card.c16
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/device_main.c23
-rw-r--r--drivers/staging/vt6656/key.c4
-rw-r--r--drivers/staging/vt6656/mac.h2
-rw-r--r--drivers/staging/wilc1000/Makefile2
-rw-r--r--drivers/staging/wilc1000/host_interface.c1169
-rw-r--r--drivers/staging/wilc1000/host_interface.h165
-rw-r--r--drivers/staging/wilc1000/wilc_mon.c (renamed from drivers/staging/wilc1000/linux_mon.c)77
-rw-r--r--drivers/staging/wilc1000/wilc_netdev.c (renamed from drivers/staging/wilc1000/linux_wlan.c)430
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c187
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c4
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c644
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h7
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h14
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c59
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h38
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c39
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h40
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c3
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c5
-rw-r--r--drivers/staging/xgifb/Kconfig11
-rw-r--r--drivers/staging/xgifb/Makefile4
-rw-r--r--drivers/staging/xgifb/TODO13
-rw-r--r--drivers/staging/xgifb/XGI_main.h365
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c2084
-rw-r--r--drivers/staging/xgifb/XGIfb.h109
-rw-r--r--drivers/staging/xgifb/vb_def.h257
-rw-r--r--drivers/staging/xgifb/vb_init.c1367
-rw-r--r--drivers/staging/xgifb/vb_init.h6
-rw-r--r--drivers/staging/xgifb/vb_setmode.c5528
-rw-r--r--drivers/staging/xgifb/vb_setmode.h24
-rw-r--r--drivers/staging/xgifb/vb_struct.h165
-rw-r--r--drivers/staging/xgifb/vb_table.h2513
-rw-r--r--drivers/staging/xgifb/vb_util.h46
-rw-r--r--drivers/staging/xgifb/vgatypes.h51
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_configfs.c8
-rw-r--r--drivers/target/target_core_user.c89
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c8
-rw-r--r--drivers/tee/optee/device.c160
-rw-r--r--drivers/tee/optee/optee_msg.h26
-rw-r--r--drivers/tee/optee/optee_private.h3
-rw-r--r--drivers/tee/optee/optee_smc.h26
-rw-r--r--drivers/tee/optee/supp.c10
-rw-r--r--drivers/tee/tee_core.c78
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c30
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/tty/Kconfig24
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/ipwireless/hardware.c2
-rw-r--r--drivers/tty/n_gsm.c246
-rw-r--r--drivers/tty/n_hdlc.c3
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/nozomi.c10
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c17
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c13
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c3
-rw-r--r--drivers/tty/serial/8250/8250_of.c5
-rw-r--r--drivers/tty/serial/8250/8250_omap.c75
-rw-r--r--drivers/tty/serial/8250/8250_pci.c179
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c4
-rw-r--r--drivers/tty/serial/Kconfig34
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/clps711x.c23
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c31
-rw-r--r--drivers/tty/serial/fsl_lpuart.c210
-rw-r--r--drivers/tty/serial/lantiq.c36
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c4
-rw-r--r--drivers/tty/serial/max310x.c21
-rw-r--r--drivers/tty/serial/meson_uart.c13
-rw-r--r--drivers/tty/serial/mps2-uart.c138
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c283
-rw-r--r--drivers/tty/serial/samsung.c42
-rw-r--r--drivers/tty/serial/sc16is7xx.c4
-rw-r--r--drivers/tty/serial/serial_core.c20
-rw-r--r--drivers/tty/serial/sh-sci.c80
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/tty/serial/tegra-tcu.c298
-rw-r--r--drivers/tty/serial/xilinx_uartps.c40
-rw-r--r--drivers/tty/synclink.c54
-rw-r--r--drivers/tty/sysrq.c8
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/tty/tty_io.c26
-rw-r--r--drivers/tty/tty_ldisc.c47
-rw-r--r--drivers/tty/vt/vc_screen.c48
-rw-r--r--drivers/tty/vt/vt.c127
-rw-r--r--drivers/uio/uio.c16
-rw-r--r--drivers/uio/uio_pci_generic.c17
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/README54
-rw-r--r--drivers/usb/atm/Kconfig1
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c9
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c1
-rw-r--r--drivers/usb/chipidea/core.c46
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c7
-rw-r--r--drivers/usb/class/Kconfig1
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/Kconfig13
-rw-r--r--drivers/usb/core/config.c14
-rw-r--r--drivers/usb/core/devio.c10
-rw-r--r--drivers/usb/core/driver.c23
-rw-r--r--drivers/usb/core/generic.c41
-rw-r--r--drivers/usb/core/hcd.c56
-rw-r--r--drivers/usb/core/hub.c66
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/ledtrig-usbport.c17
-rw-r--r--drivers/usb/core/message.c10
-rw-r--r--drivers/usb/core/phy.c28
-rw-r--r--drivers/usb/core/phy.h2
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/core/urb.c5
-rw-r--r--drivers/usb/core/usb-acpi.c163
-rw-r--r--drivers/usb/core/usb.c37
-rw-r--r--drivers/usb/core/usb.h10
-rw-r--r--drivers/usb/dwc2/Kconfig2
-rw-r--r--drivers/usb/dwc2/gadget.c116
-rw-r--r--drivers/usb/dwc2/hcd.c4
-rw-r--r--drivers/usb/dwc3/Kconfig8
-rw-r--r--drivers/usb/dwc3/core.h12
-rw-r--r--drivers/usb/dwc3/debug.h156
-rw-r--r--drivers/usb/dwc3/drd.c9
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c9
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c11
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c1
-rw-r--r--drivers/usb/dwc3/gadget.c71
-rw-r--r--drivers/usb/dwc3/gadget.h4
-rw-r--r--drivers/usb/dwc3/trace.h10
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/epautoconf.c41
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c10
-rw-r--r--drivers/usb/gadget/function/u_ecm.h2
-rw-r--r--drivers/usb/gadget/function/u_eem.h2
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h2
-rw-r--r--drivers/usb/gadget/function/u_fs.h2
-rw-r--r--drivers/usb/gadget/function/u_gether.h2
-rw-r--r--drivers/usb/gadget/function/u_hid.h2
-rw-r--r--drivers/usb/gadget/function/u_midi.h2
-rw-r--r--drivers/usb/gadget/function/u_ncm.h2
-rw-r--r--drivers/usb/gadget/function/u_printer.h2
-rw-r--r--drivers/usb/gadget/function/u_rndis.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c35
-rw-r--r--drivers/usb/gadget/function/u_uac2.h2
-rw-r--r--drivers/usb/gadget/function/u_uvc.h2
-rw-r--r--drivers/usb/gadget/function/uvc.h2
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c10
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.h2
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/legacy/Kconfig1
-rw-r--r--drivers/usb/gadget/legacy/inode.c40
-rw-r--r--drivers/usb/gadget/u_f.c2
-rw-r--r--drivers/usb/gadget/u_f.h2
-rw-r--r--drivers/usb/gadget/u_os_desc.h2
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c13
-rw-r--r--drivers/usb/gadget/udc/core.c8
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c11
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c6
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c4
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c17
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/ehci-fsl.c69
-rw-r--r--drivers/usb/host/ehci-mv.c1
-rw-r--r--drivers/usb/host/ehci-orion.c44
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c6
-rw-r--r--drivers/usb/host/ohci-at91.c7
-rw-r--r--drivers/usb/host/ohci-da8xx.c118
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c10
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/whci/Makefile (renamed from drivers/usb/host/whci/Kbuild)2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c6
-rw-r--r--drivers/usb/host/xhci-debugfs.h1
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-mvebu.c11
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-plat.c7
-rw-r--r--drivers/usb/host/xhci-tegra.c4
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/image/Kconfig1
-rw-r--r--drivers/usb/isp1760/Kconfig2
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/misc/ftdi-elan.c26
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig1
-rw-r--r--drivers/usb/misc/sisusbvga/Makefile3
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c32
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h15
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c127
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c5
-rw-r--r--drivers/usb/misc/usb251xb.c15
-rw-r--r--drivers/usb/misc/usb3503.c38
-rw-r--r--drivers/usb/misc/usbtest.c28
-rw-r--r--drivers/usb/mon/Kconfig1
-rw-r--r--drivers/usb/mtu3/Kconfig2
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/jz4740.c10
-rw-r--r--drivers/usb/musb/musb_gadget.c13
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/musb/musbhsdma.c21
-rw-r--r--drivers/usb/phy/Kconfig3
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c2
-rw-r--r--drivers/usb/renesas_usbhs/rza.c2
-rw-r--r--drivers/usb/roles/Kconfig2
-rw-r--r--drivers/usb/roles/Makefile2
-rw-r--r--drivers/usb/roles/class.c21
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c130
-rw-r--r--drivers/usb/serial/ftdi_sio.c17
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/keyspan_usa26msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa28msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa49msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa67msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa90msg.h1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/Kconfig1
-rw-r--r--drivers/usb/storage/karma.c2
-rw-r--r--drivers/usb/storage/scsiglue.c8
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/altmodes/Kconfig1
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/class.c33
-rw-r--r--drivers/usb/typec/mux.c94
-rw-r--r--drivers/usb/typec/mux/Kconfig2
-rw-r--r--drivers/usb/typec/tcpm/Kconfig2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c71
-rw-r--r--drivers/usb/typec/tps6598x.c79
-rw-r--r--drivers/usb/typec/ucsi/Kconfig2
-rw-r--r--drivers/usb/typec/ucsi/debug.h65
-rw-r--r--drivers/usb/typec/ucsi/trace.c59
-rw-r--r--drivers/usb/typec/ucsi/trace.h7
-rw-r--r--drivers/usb/usbip/Kconfig2
-rw-r--r--drivers/usb/usbip/README7
-rw-r--r--drivers/usb/usbip/vhci_hcd.c6
-rw-r--r--drivers/usb/usbip/vudc_dev.c3
-rw-r--r--drivers/usb/wusbcore/Kconfig1
-rw-r--r--drivers/usb/wusbcore/cbaf.c15
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c5
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c15
-rw-r--r--drivers/usb/wusbcore/wusbhc.c6
-rw-r--r--drivers/uwb/drp-ie.c5
-rw-r--r--drivers/vfio/mdev/mdev_core.c16
-rw-r--r--drivers/vfio/mdev/mdev_private.h5
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c6
-rw-r--r--drivers/vfio/pci/trace.h8
-rw-r--r--drivers/vfio/pci/vfio_pci.c90
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c36
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h6
-rw-r--r--drivers/vfio/platform/reset/Makefile2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/vfio.c8
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/scsi.c22
-rw-r--r--drivers/vhost/vhost.c112
-rw-r--r--drivers/vhost/vhost.h7
-rw-r--r--drivers/vhost/vsock.c4
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c28
-rw-r--r--drivers/video/console/vgacon.c7
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/controlfb.c42
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c19
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/imsttfb.c23
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c7
-rw-r--r--drivers/video/fbdev/offb.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/fbdev/platinumfb.c21
-rw-r--r--drivers/video/fbdev/valkyriefb.c30
-rw-r--r--drivers/video/logo/Kconfig9
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--drivers/virtio/virtio_balloon.c98
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/virtio/virtio_ring.c15
-rw-r--r--drivers/watchdog/bcm2835_wdt.c26
-rw-r--r--drivers/watchdog/mt7621_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/tqmx86_wdt.c8
-rw-r--r--drivers/xen/balloon.c18
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/pvcalls-back.c9
-rw-r--r--drivers/xen/pvcalls-front.c104
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--fs/Makefile3
-rw-r--r--fs/afs/cell.c1
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/inode.c3
-rw-r--r--fs/afs/protocol_yfs.h11
-rw-r--r--fs/afs/rxrpc.c53
-rw-r--r--fs/afs/server_list.c4
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/aio.c95
-rw-r--r--fs/autofs/expire.c3
-rw-r--r--fs/autofs/inode.c4
-rw-r--r--fs/binfmt_aout.c83
-rw-r--r--fs/binfmt_script.c57
-rw-r--r--fs/block_dev.c28
-rw-r--r--fs/btrfs/acl.c9
-rw-r--r--fs/btrfs/async-thread.c10
-rw-r--r--fs/btrfs/backref.c22
-rw-r--r--fs/btrfs/compression.c253
-rw-r--r--fs/btrfs/compression.h52
-rw-r--r--fs/btrfs/ctree.c150
-rw-r--r--fs/btrfs/ctree.h68
-rw-r--r--fs/btrfs/delayed-ref.c15
-rw-r--r--fs/btrfs/delayed-ref.h11
-rw-r--r--fs/btrfs/dev-replace.c9
-rw-r--r--fs/btrfs/disk-io.c51
-rw-r--r--fs/btrfs/extent-tree.c309
-rw-r--r--fs/btrfs/extent_io.c87
-rw-r--r--fs/btrfs/extent_io.h15
-rw-r--r--fs/btrfs/extent_map.c5
-rw-r--r--fs/btrfs/extent_map.h1
-rw-r--r--fs/btrfs/file.c3
-rw-r--r--fs/btrfs/inode.c212
-rw-r--r--fs/btrfs/ioctl.c109
-rw-r--r--fs/btrfs/locking.c108
-rw-r--r--fs/btrfs/locking.h15
-rw-r--r--fs/btrfs/lzo.c31
-rw-r--r--fs/btrfs/qgroup.c372
-rw-r--r--fs/btrfs/qgroup.h120
-rw-r--r--fs/btrfs/ref-verify.c4
-rw-r--r--fs/btrfs/relocation.c119
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/scrub.c49
-rw-r--r--fs/btrfs/super.c16
-rw-r--r--fs/btrfs/transaction.c33
-rw-r--r--fs/btrfs/tree-defrag.c2
-rw-r--r--fs/btrfs/tree-log.c282
-rw-r--r--fs/btrfs/volumes.c216
-rw-r--r--fs/btrfs/volumes.h5
-rw-r--r--fs/btrfs/zlib.c45
-rw-r--r--fs/btrfs/zstd.c316
-rw-r--r--fs/buffer.c19
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/quota.c13
-rw-r--r--fs/ceph/snap.c3
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h20
-rw-r--r--fs/cifs/cifssmb.c65
-rw-r--r--fs/cifs/connect.c28
-rw-r--r--fs/cifs/dfs_cache.c1
-rw-r--r--fs/cifs/file.c56
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/cifs/smb2file.c8
-rw-r--r--fs/cifs/smb2inode.c17
-rw-r--r--fs/cifs/smb2misc.c7
-rw-r--r--fs/cifs/smb2ops.c72
-rw-r--r--fs/cifs/smb2pdu.c108
-rw-r--r--fs/cifs/smb2pdu.h19
-rw-r--r--fs/cifs/trace.c10
-rw-r--r--fs/cifs/trace.h10
-rw-r--r--fs/cifs/transport.c113
-rw-r--r--fs/crypto/keyinfo.c4
-rw-r--r--fs/dcache.c38
-rw-r--r--fs/debugfs/inode.c40
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/drop_caches.c8
-rw-r--r--fs/ecryptfs/crypto.c5
-rw-r--r--fs/exec.c6
-rw-r--r--fs/ext2/dir.c35
-rw-r--r--fs/ext2/ext2.h17
-rw-r--r--fs/ext2/file.c1
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c30
-rw-r--r--fs/ext2/namei.c2
-rw-r--r--fs/ext2/super.c44
-rw-r--r--fs/ext2/symlink.c2
-rw-r--r--fs/ext2/xattr.c1
-rw-r--r--fs/ext4/fsync.c13
-rw-r--r--fs/f2fs/debug.c20
-rw-r--r--fs/f2fs/f2fs.h4
-rw-r--r--fs/f2fs/super.c5
-rw-r--r--fs/file.c1
-rw-r--r--fs/fs-writeback.c40
-rw-r--r--fs/fs_types.c105
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/glops.c1
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c190
-rw-r--r--fs/gfs2/lops.h4
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/recovery.c123
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/hugetlbfs/inode.c75
-rw-r--r--fs/inode.c15
-rw-r--r--fs/iomap.c37
-rw-r--r--fs/kernfs/dir.c2
-rw-r--r--fs/kernfs/file.c31
-rw-r--r--fs/kernfs/inode.c2
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/kernfs/mount.c15
-rw-r--r--fs/locks.c32
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/nfs4file.c8
-rw-r--r--fs/nfs/nfs4idmap.c31
-rw-r--r--fs/nfs/super.c5
-rw-r--r--fs/nfs/write.c20
-rw-r--r--fs/nfsd/nfsctl.c4
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--fs/notify/fanotify/Kconfig1
-rw-r--r--fs/notify/fanotify/fanotify.c267
-rw-r--r--fs/notify/fanotify/fanotify.h116
-rw-r--r--fs/notify/fanotify/fanotify_user.c373
-rw-r--r--fs/notify/fsnotify.c15
-rw-r--r--fs/notify/inotify/inotify.h1
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c18
-rw-r--r--fs/notify/inotify/inotify_user.c11
-rw-r--r--fs/notify/mark.c42
-rw-r--r--fs/notify/notification.c42
-rw-r--r--fs/ocfs2/alloc.c159
-rw-r--r--fs/ocfs2/cluster/nodemanager.c14
-rw-r--r--fs/ocfs2/dlmglue.c5
-rw-r--r--fs/ocfs2/ocfs2.h1
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/ocfs2/slot_map.c8
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/orangefs/file.c4
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/proc/array.c16
-rw-r--r--fs/proc/base.c72
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/internal.h4
-rw-r--r--fs/proc/page.c4
-rw-r--r--fs/proc/proc_net.c20
-rw-r--r--fs/proc/root.c2
-rw-r--r--fs/proc/self.c16
-rw-r--r--fs/proc/stat.c89
-rw-r--r--fs/proc/task_mmu.c30
-rw-r--r--fs/proc/task_nommu.c4
-rw-r--r--fs/proc/thread_self.c16
-rw-r--r--fs/pstore/ram.c12
-rw-r--r--fs/read_write.c6
-rw-r--r--fs/select.c4
-rw-r--r--fs/splice.c14
-rw-r--r--fs/statfs.c14
-rw-r--r--fs/sysfs/dir.c3
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--fs/sysfs/group.c3
-rw-r--r--fs/sysfs/symlink.c3
-rw-r--r--fs/timerfd.c4
-rw-r--r--fs/udf/super.c51
-rw-r--r--fs/utimes.c10
-rw-r--r--fs/xfs/libxfs/xfs_ag.c6
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c12
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c74
-rw-r--r--fs/xfs/libxfs/xfs_attr.c17
-rw-r--r--fs/xfs/libxfs/xfs_attr.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c21
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c302
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h16
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c13
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c49
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h3
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c17
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h1
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c10
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c12
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c100
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c10
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c4
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c3
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c29
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c13
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c11
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_sb.c7
-rw-r--r--fs/xfs/libxfs/xfs_shared.h4
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c3
-rw-r--r--fs/xfs/libxfs/xfs_types.c24
-rw-r--r--fs/xfs/libxfs/xfs_types.h3
-rw-r--r--fs/xfs/scrub/agheader.c10
-rw-r--r--fs/xfs/scrub/agheader_repair.c12
-rw-r--r--fs/xfs/scrub/attr.c11
-rw-r--r--fs/xfs/scrub/bmap.c27
-rw-r--r--fs/xfs/scrub/dir.c6
-rw-r--r--fs/xfs/scrub/ialloc.c330
-rw-r--r--fs/xfs/scrub/repair.c14
-rw-r--r--fs/xfs/scrub/repair.h3
-rw-r--r--fs/xfs/scrub/rtbitmap.c5
-rw-r--r--fs/xfs/scrub/trace.h45
-rw-r--r--fs/xfs/xfs_aops.c264
-rw-r--r--fs/xfs/xfs_aops.h24
-rw-r--r--fs/xfs/xfs_attr_list.c1
-rw-r--r--fs/xfs/xfs_bmap_util.c9
-rw-r--r--fs/xfs/xfs_buf.c63
-rw-r--r--fs/xfs/xfs_buf.h8
-rw-r--r--fs/xfs/xfs_error.c6
-rw-r--r--fs/xfs/xfs_error.h1
-rw-r--r--fs/xfs/xfs_file.c31
-rw-r--r--fs/xfs/xfs_fsops.c1
-rw-r--r--fs/xfs/xfs_globals.c2
-rw-r--r--fs/xfs/xfs_inode.c769
-rw-r--r--fs/xfs/xfs_inode.h3
-rw-r--r--fs/xfs/xfs_iomap.c518
-rw-r--r--fs/xfs/xfs_iomap.h7
-rw-r--r--fs/xfs/xfs_iops.c21
-rw-r--r--fs/xfs/xfs_log_recover.c14
-rw-r--r--fs/xfs/xfs_mount.c5
-rw-r--r--fs/xfs/xfs_mount.h10
-rw-r--r--fs/xfs/xfs_ondisk.h21
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c150
-rw-r--r--fs/xfs/xfs_reflink.h18
-rw-r--r--fs/xfs/xfs_super.c22
-rw-r--r--fs/xfs/xfs_sysctl.h1
-rw-r--r--fs/xfs/xfs_sysfs.c24
-rw-r--r--fs/xfs/xfs_trace.h115
-rw-r--r--fs/xfs/xfs_trans_bmap.c1
-rw-r--r--fs/xfs/xfs_trans_buf.c2
-rw-r--r--fs/xfs/xfs_trans_extfree.c1
-rw-r--r--fs/xfs/xfs_trans_refcount.c1
-rw-r--r--fs/xfs/xfs_trans_rmap.c1
-rw-r--r--fs/xfs/xfs_xattr.c3
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h5
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h4
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h18
-rw-r--r--include/acpi/acrestyp.h16
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h19
-rw-r--r--include/acpi/actbl2.h17
-rw-r--r--include/acpi/actbl3.h21
-rw-r--r--include/acpi/actypes.h14
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/cppc_acpi.h1
-rw-r--r--include/acpi/ghes.h4
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/acintel.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/asm-generic/atomic-instrumented.h1659
-rw-r--r--include/asm-generic/atomic-long.h1174
-rw-r--r--include/asm-generic/bug.h3
-rw-r--r--include/asm-generic/iomap.h22
-rw-r--r--include/asm-generic/page.h2
-rw-r--r--include/asm-generic/pgtable.h18
-rw-r--r--include/asm-generic/shmparam.h (renamed from include/uapi/asm-generic/shmparam.h)0
-rw-r--r--include/asm-generic/uaccess.h1
-rw-r--r--include/crypto/algapi.h8
-rw-r--r--include/crypto/arc4.h13
-rw-r--r--include/crypto/if_alg.h7
-rw-r--r--include/crypto/internal/cryptouser.h2
-rw-r--r--include/crypto/internal/hash.h6
-rw-r--r--include/crypto/internal/skcipher.h15
-rw-r--r--include/crypto/morus1280_glue.h7
-rw-r--r--include/crypto/morus640_glue.h7
-rw-r--r--include/crypto/morus_common.h7
-rw-r--r--include/crypto/streebog.h2
-rw-r--r--include/crypto/xts.h4
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_audio_component.h1
-rw-r--r--include/drm/drm_dp_helper.h7
-rw-r--r--include/drm/drm_dp_mst_helper.h3
-rw-r--r--include/drm/drm_hdcp.h18
-rw-r--r--include/drm/i915_component.h5
-rw-r--r--include/drm/i915_drm.h15
-rw-r--r--include/drm/i915_mei_hdcp_interface.h149
-rw-r--r--include/dt-bindings/clock/ath79-clk.h4
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h26
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/r8a7778-clock.h2
-rw-r--r--include/dt-bindings/iio/adc/ingenic,adc.h10
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm845.h143
-rw-r--r--include/dt-bindings/power/mt8173-power.h6
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h39
-rw-r--r--include/dt-bindings/power/xlnx-zynqmp-power.h39
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-reset.h3
-rw-r--r--include/dt-bindings/reset/amlogic,meson-g12a-reset.h134
-rw-r--r--include/dt-bindings/reset/imx8mq-reset.h64
-rw-r--r--include/dt-bindings/reset/xlnx-zynqmp-resets.h130
-rw-r--r--include/dt-bindings/soc/bcm2835-pm.h28
-rw-r--r--include/keys/request_key_auth-type.h36
-rw-r--r--include/keys/user-type.h2
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/arm_sdei.h9
-rw-r--r--include/linux/async.h82
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/atomic-fallback.h2295
-rw-r--r--include/linux/atomic.h1241
-rw-r--r--include/linux/backing-dev-defs.h1
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/balloon_compaction.h34
-rw-r--r--include/linux/bcma/bcma.h11
-rw-r--r--include/linux/bcma/bcma_soc.h1
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blktrace_api.h8
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h103
-rw-r--r--include/linux/bpf_types.h2
-rw-r--r--include/linux/bpf_verifier.h8
-rw-r--r--include/linux/bpfilter.h15
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/capability.h5
-rw-r--r--include/linux/ceph/libceph.h6
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup-defs.h6
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/compaction.h7
-rw-r--r--include/linux/compat.h104
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler-intel.h4
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/component.h76
-rw-r--r--include/linux/console_struct.h2
-rw-r--r--include/linux/coresight.h7
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpufreq.h48
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/davinci_emac.h1
-rw-r--r--include/linux/dcache.h7
-rw-r--r--include/linux/device.h48
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/efi.h48
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/ethtool.h19
-rw-r--r--include/linux/fanotify.h26
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fcntl.h2
-rw-r--r--include/linux/filter.h73
-rw-r--r--include/linux/firmware/imx/svc/misc.h3
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h184
-rw-r--r--include/linux/frontswap.h7
-rw-r--r--include/linux/fs.h36
-rw-r--r--include/linux/fs_types.h75
-rw-r--r--include/linux/fsl/guts.h2
-rw-r--r--include/linux/fsl/mc.h1
-rw-r--r--include/linux/fsl/ptp_qoriq.h70
-rw-r--r--include/linux/fsl_devices.h7
-rw-r--r--include/linux/fsnotify.h73
-rw-r--r--include/linux/fsnotify_backend.h67
-rw-r--r--include/linux/gfp.h30
-rw-r--r--include/linux/gnss.h1
-rw-r--r--include/linux/hid-debug.h9
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hugetlb.h70
-rw-r--r--include/linux/hyperv.h149
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/ieee80211.h117
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/igmp.h19
-rw-r--r--include/linux/ihex.h29
-rw-r--r--include/linux/iio/common/st_sensors.h1
-rw-r--r--include/linux/in.h5
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/initrd.h3
-rw-r--r--include/linux/interconnect-provider.h142
-rw-r--r--include/linux/interconnect.h59
-rw-r--r--include/linux/interrupt.h52
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h64
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h64
-rw-r--r--include/linux/ip.h5
-rw-r--r--include/linux/ipv6.h9
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/irqchip/irq-davinci-aintc.h27
-rw-r--r--include/linux/irqchip/irq-davinci-cp-intc.h25
-rw-r--r--include/linux/irqdesc.h7
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/kasan-checks.h2
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/kernfs.h6
-rw-r--r--include/linux/key-type.h22
-rw-r--r--include/linux/ksm.h7
-rw-r--r--include/linux/kthread.h10
-rw-r--r--include/linux/leds.h13
-rw-r--r--include/linux/libnvdimm.h2
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/lockdep.h54
-rw-r--r--include/linux/lsm_hooks.h45
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/mdev.h2
-rw-r--r--include/linux/mdio.h44
-rw-r--r--include/linux/mei_cl_bus.h2
-rw-r--r--include/linux/memblock.h3
-rw-r--r--include/linux/memcontrol.h47
-rw-r--r--include/linux/memory_hotplug.h20
-rw-r--r--include/linux/mfd/bcm2835-pm.h14
-rw-r--r--include/linux/mfd/cros_ec_commands.h94
-rw-r--r--include/linux/mfd/ingenic-tcu.h2
-rw-r--r--include/linux/mfd/madera/core.h7
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h4
-rw-r--r--include/linux/mfd/tmio.h2
-rw-r--r--include/linux/mfd/tps65218.h3
-rw-r--r--include/linux/mlx5/device.h9
-rw-r--r--include/linux/mlx5/driver.h68
-rw-r--r--include/linux/mlx5/eswitch.h19
-rw-r--r--include/linux/mlx5/mlx5_ifc.h91
-rw-r--r--include/linux/mlx5/port.h32
-rw-r--r--include/linux/mlx5/vport.h35
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmc/card.h4
-rw-r--r--include/linux/mmc/host.h12
-rw-r--r--include/linux/mmc/sd.h6
-rw-r--r--include/linux/mmc/slot-gpio.h2
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mtd/rawnand.h26
-rw-r--r--include/linux/mtd/spi-nor.h16
-rw-r--r--include/linux/netdev_features.h24
-rw-r--r--include/linux/netdevice.h70
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h17
-rw-r--r--include/linux/netfilter/x_tables.h4
-rw-r--r--include/linux/netfilter_ipv4.h6
-rw-r--r--include/linux/netfilter_ipv6.h60
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nodemask.h8
-rw-r--r--include/linux/nvram.h133
-rw-r--r--include/linux/objagg.h21
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/page-flags.h44
-rw-r--r--include/linux/pagemap.h31
-rw-r--r--include/linux/parport.h13
-rw-r--r--include/linux/pci-dma-compat.h2
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/percpu-rwsem.h24
-rw-r--r--include/linux/perf_event.h48
-rw-r--r--include/linux/phy.h203
-rw-r--r--include/linux/phy/phy-mipi-dphy.h13
-rw-r--r--include/linux/phy/phy.h1
-rw-r--r--include/linux/phy_fixed.h23
-rw-r--r--include/linux/phylink.h8
-rw-r--r--include/linux/platform_data/b53.h2
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h19
-rw-r--r--include/linux/platform_data/dsa.h68
-rw-r--r--include/linux/platform_data/mv88e6xxx.h2
-rw-r--r--include/linux/platform_data/spi-ath79.h (renamed from arch/mips/include/asm/mach-ath79/ath79_spi_platform.h)0
-rw-r--r--include/linux/platform_data/usb-davinci.h14
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm.h7
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/pm_opp.h18
-rw-r--r--include/linux/pm_runtime.h4
-rw-r--r--include/linux/poison.h2
-rw-r--r--include/linux/posix-clock.h2
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/ptr_ring.h1
-rw-r--r--include/linux/qcom_scm.h1
-rw-r--r--include/linux/qed/qed_chain.h31
-rw-r--r--include/linux/qed/qed_if.h22
-rw-r--r--include/linux/qed/qede_rdma.h10
-rw-r--r--include/linux/rcu_node_tree.h17
-rw-r--r--include/linux/rcu_segcblist.h17
-rw-r--r--include/linux/rcu_sync.h15
-rw-r--r--include/linux/rcupdate.h91
-rw-r--r--include/linux/rcutiny.h17
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/regmap.h31
-rw-r--r--include/linux/regulator/driver.h30
-rw-r--r--include/linux/regulator/fixed.h10
-rw-r--r--include/linux/regulator/gpio-regulator.h18
-rw-r--r--include/linux/reset.h15
-rw-r--r--include/linux/reset/socfpga.h7
-rw-r--r--include/linux/reset/sunxi.h7
-rw-r--r--include/linux/rhashtable.h8
-rw-r--r--include/linux/sched.h60
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/sched/mm.h48
-rw-r--r--include/linux/sched/signal.h5
-rw-r--r--include/linux/sched/sysctl.h7
-rw-r--r--include/linux/sched/task.h4
-rw-r--r--include/linux/sched/task_stack.h2
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/sched/wake_q.h10
-rw-r--r--include/linux/security.h43
-rw-r--r--include/linux/selinux.h35
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/skbuff.h53
-rw-r--r--include/linux/slub_def.h12
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h6
-rw-r--r--include/linux/socket.h8
-rw-r--r--include/linux/spi/pxa2xx_spi.h4
-rw-r--r--include/linux/spi/spi-mem.h5
-rw-r--r--include/linux/spi/spi.h29
-rw-r--r--include/linux/srcu.h18
-rw-r--r--include/linux/srcutiny.h17
-rw-r--r--include/linux/srcutree.h20
-rw-r--r--include/linux/statfs.h3
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/syscalls.h72
-rw-r--r--include/linux/tee_drv.h50
-rw-r--r--include/linux/time32.h32
-rw-r--r--include/linux/time64.h8
-rw-r--r--include/linux/timex.h4
-rw-r--r--include/linux/torture.h20
-rw-r--r--include/linux/umh.h2
-rw-r--r--include/linux/usb/hcd.h10
-rw-r--r--include/linux/usb/role.h2
-rw-r--r--include/linux/usb/tcpm.h6
-rw-r--r--include/linux/usb/typec_dp.h4
-rw-r--r--include/linux/usb/typec_mux.h3
-rw-r--r--include/linux/usb/wusb.h16
-rw-r--r--include/linux/virtio_config.h13
-rw-r--r--include/linux/virtio_net.h19
-rw-r--r--include/linux/vmw_vmci_defs.h7
-rw-r--r--include/linux/wait.h6
-rw-r--r--include/linux/workqueue.h30
-rw-r--r--include/linux/xarray.h227
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/addrconf.h28
-rw-r--r--include/net/af_rxrpc.h16
-rw-r--r--include/net/ax25.h12
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h12
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bond_3ad.h17
-rw-r--r--include/net/cfg80211.h288
-rw-r--r--include/net/devlink.h384
-rw-r--r--include/net/dsa.h72
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/flow_offload.h203
-rw-r--r--include/net/icmp.h9
-rw-r--r--include/net/ieee80211_radiotap.h9
-rw-r--r--include/net/inet_connection_sock.h29
-rw-r--r--include/net/inet_frag.h16
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6_frag.h11
-rw-r--r--include/net/l3mdev.h3
-rw-r--r--include/net/lwtunnel.h2
-rw-r--r--include/net/mac80211.h223
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netfilter/br_netfilter.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h1
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h12
-rw-r--r--include/net/netfilter/nf_conntrack_core.h5
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h122
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/net/netfilter/nf_nat.h49
-rw-r--r--include/net/netfilter/nf_nat_core.h29
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h50
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h16
-rw-r--r--include/net/netfilter/nf_reject.h27
-rw-r--r--include/net/netfilter/nf_tables.h46
-rw-r--r--include/net/netfilter/nf_tables_core.h16
-rw-r--r--include/net/netfilter/nft_masq.h22
-rw-r--r--include/net/netfilter/nft_redir.h22
-rw-r--r--include/net/netlink.h8
-rw-r--r--include/net/netns/conntrack.h30
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/xdp.h13
-rw-r--r--include/net/phonet/pep.h5
-rw-r--r--include/net/pkt_cls.h28
-rw-r--r--include/net/sch_generic.h102
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/smc.h1
-rw-r--r--include/net/sock.h3
-rw-r--r--include/net/switchdev.h84
-rw-r--r--include/net/tc_act/tc_csum.h2
-rw-r--r--include/net/tc_act/tc_gact.h2
-rw-r--r--include/net/tc_act/tc_mirred.h4
-rw-r--r--include/net/tc_act/tc_pedit.h2
-rw-r--r--include/net/tc_act/tc_sample.h2
-rw-r--r--include/net/tc_act/tc_skbedit.h2
-rw-r--r--include/net/tc_act/tc_tunnel_key.h4
-rw-r--r--include/net/tc_act/tc_vlan.h2
-rw-r--r--include/net/tcp.h20
-rw-r--r--include/net/tls.h141
-rw-r--r--include/net/vxlan.h37
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/net/xfrm.h12
-rw-r--r--include/rdma/ib_verbs.h24
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h4
-rw-r--r--include/soc/fsl/dpaa2-io.h15
-rw-r--r--include/soc/tegra/bpmp.h13
-rw-r--r--include/soc/tegra/pmc.h6
-rw-r--r--include/sound/compress_driver.h6
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/cs35l36.h43
-rw-r--r--include/sound/dmaengine_pcm.h4
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/hda_component.h5
-rw-r--r--include/sound/hda_register.h2
-rw-r--r--include/sound/hda_verbs.h2
-rw-r--r--include/sound/hdaudio.h3
-rw-r--r--include/sound/info.h36
-rw-r--r--include/sound/memalloc.h1
-rw-r--r--include/sound/pcm.h29
-rw-r--r--include/sound/simple_card_utils.h4
-rw-r--r--include/sound/soc-acpi.h33
-rw-r--r--include/sound/soc-dapm.h27
-rw-r--r--include/sound/soc-topology.h8
-rw-r--r--include/sound/soc.h79
-rw-r--r--include/trace/events/afs.h2
-rw-r--r--include/trace/events/btrfs.h30
-rw-r--r--include/trace/events/devlink.h129
-rw-r--r--include/trace/events/mlxsw.h119
-rw-r--r--include/trace/events/neigh.h206
-rw-r--r--include/trace/events/spi.h31
-rw-r--r--include/uapi/asm-generic/mman-common.h4
-rw-r--r--include/uapi/asm-generic/socket.h50
-rw-r--r--include/uapi/asm-generic/unistd.h149
-rw-r--r--include/uapi/linux/android/binder.h19
-rw-r--r--include/uapi/linux/android/binderfs.h (renamed from include/uapi/linux/android/binder_ctl.h)10
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/batadv_packet.h4
-rw-r--r--include/uapi/linux/batman_adv.h192
-rw-r--r--include/uapi/linux/blkzoned.h1
-rw-r--r--include/uapi/linux/bpf.h116
-rw-r--r--include/uapi/linux/btrfs.h2
-rw-r--r--include/uapi/linux/devlink.h47
-rw-r--r--include/uapi/linux/errqueue.h5
-rw-r--r--include/uapi/linux/ethtool.h35
-rw-r--r--include/uapi/linux/fanotify.h29
-rw-r--r--include/uapi/linux/fcntl.h1
-rw-r--r--include/uapi/linux/icmpv6.h2
-rw-r--r--include/uapi/linux/if_bonding.h24
-rw-r--r--include/uapi/linux/if_link.h1
-rw-r--r--include/uapi/linux/igmp.h1
-rw-r--r--include/uapi/linux/iio/types.h7
-rw-r--r--include/uapi/linux/in.h11
-rw-r--r--include/uapi/linux/in6.h1
-rw-r--r--include/uapi/linux/inet_diag.h16
-rw-r--r--include/uapi/linux/input.h6
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/mdio.h19
-rw-r--r--include/uapi/linux/mman.h4
-rw-r--r--include/uapi/linux/mroute.h9
-rw-r--r--include/uapi/linux/mroute6.h9
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h19
-rw-r--r--include/uapi/linux/nl80211.h44
-rw-r--r--include/uapi/linux/perf_event.h55
-rw-r--r--include/uapi/linux/pkt_cls.h58
-rw-r--r--include/uapi/linux/pkt_sched.h3
-rw-r--r--include/uapi/linux/pmu.h2
-rw-r--r--include/uapi/linux/prctl.h1
-rw-r--r--include/uapi/linux/ptp_clock.h2
-rw-r--r--include/uapi/linux/rds.h11
-rw-r--r--include/uapi/linux/sctp.h4
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--include/uapi/linux/tc_act/tc_connmark.h2
-rw-r--r--include/uapi/linux/tc_act/tc_csum.h2
-rw-r--r--include/uapi/linux/tc_act/tc_gact.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ipt.h3
-rw-r--r--include/uapi/linux/tc_act/tc_mirred.h1
-rw-r--r--include/uapi/linux/tc_act/tc_nat.h2
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_sample.h2
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h2
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h2
-rw-r--r--include/uapi/linux/time.h29
-rw-r--r--include/uapi/linux/time_types.h36
-rw-r--r--include/uapi/linux/timex.h39
-rw-r--r--include/uapi/linux/tls.h19
-rw-r--r--include/uapi/linux/virtio_config.h6
-rw-r--r--include/uapi/linux/virtio_ring.h10
-rw-r--r--include/uapi/linux/xdp_diag.h72
-rw-r--r--include/uapi/misc/fastrpc.h41
-rw-r--r--include/uapi/misc/habanalabs.h450
-rw-r--r--include/uapi/rdma/hns-abi.h5
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h1
-rw-r--r--include/xen/arm/page-coherent.h97
-rw-r--r--init/Kconfig14
-rw-r--r--init/init_task.c9
-rw-r--r--init/initramfs.c6
-rw-r--r--init/main.c3
-rw-r--r--ipc/mqueue.c16
-rw-r--r--ipc/msg.c39
-rw-r--r--ipc/sem.c41
-rw-r--r--ipc/shm.c40
-rw-r--r--ipc/syscall.c32
-rw-r--r--ipc/util.h21
-rw-r--r--kernel/Kconfig.locks3
-rw-r--r--kernel/async.c53
-rw-r--r--kernel/bpf/arraymap.c23
-rw-r--r--kernel/bpf/btf.c163
-rw-r--r--kernel/bpf/cgroup.c6
-rw-r--r--kernel/bpf/core.c317
-rw-r--r--kernel/bpf/disasm.c34
-rw-r--r--kernel/bpf/hashtab.c67
-rw-r--r--kernel/bpf/helpers.c96
-rw-r--r--kernel/bpf/local_storage.c16
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/map_in_map.c23
-rw-r--r--kernel/bpf/offload.c45
-rw-r--r--kernel/bpf/percpu_freelist.c41
-rw-r--r--kernel/bpf/percpu_freelist.h4
-rw-r--r--kernel/bpf/stackmap.c20
-rw-r--r--kernel/bpf/syscall.c104
-rw-r--r--kernel/bpf/verifier.c1034
-rw-r--r--kernel/capability.c45
-rw-r--r--kernel/cgroup/cgroup-internal.h2
-rw-r--r--kernel/cgroup/cgroup-v1.c58
-rw-r--r--kernel/cgroup/cgroup.c54
-rw-r--r--kernel/cgroup/cpuset.c13
-rw-r--r--kernel/cgroup/pids.c4
-rw-r--r--kernel/cgroup/rstat.c10
-rw-r--r--kernel/compat.c64
-rw-r--r--kernel/cpu.c47
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/cred.c13
-rw-r--r--kernel/dma/swiotlb.c2
-rw-r--r--kernel/events/callchain.c3
-rw-r--r--kernel/events/core.c374
-rw-r--r--kernel/events/hw_breakpoint.c15
-rw-r--r--kernel/events/internal.h5
-rw-r--r--kernel/events/ring_buffer.c16
-rw-r--r--kernel/events/uprobes.c15
-rw-r--r--kernel/exit.c16
-rw-r--r--kernel/fork.c38
-rw-r--r--kernel/futex.c69
-rw-r--r--kernel/irq/affinity.c121
-rw-r--r--kernel/irq/chip.c66
-rw-r--r--kernel/irq/debugfs.c8
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/internals.h10
-rw-r--r--kernel/irq/irqdesc.c44
-rw-r--r--kernel/irq/irqdomain.c16
-rw-r--r--kernel/irq/manage.c409
-rw-r--r--kernel/kallsyms.c2
-rw-r--r--kernel/kprobes.c21
-rw-r--r--kernel/kthread.c54
-rw-r--r--kernel/locking/lockdep.c973
-rw-r--r--kernel/locking/lockdep_internals.h7
-rw-r--r--kernel/locking/lockdep_proc.c12
-rw-r--r--kernel/locking/locktorture.c21
-rw-r--r--kernel/locking/qspinlock.c21
-rw-r--r--kernel/locking/qspinlock_stat.h21
-rw-r--r--kernel/locking/rtmutex.c37
-rw-r--r--kernel/locking/rwsem-xadd.c9
-rw-r--r--kernel/power/energy_model.c57
-rw-r--r--kernel/power/qos.c8
-rw-r--r--kernel/power/snapshot.c17
-rw-r--r--kernel/rcu/Kconfig30
-rw-r--r--kernel/rcu/rcu.h21
-rw-r--r--kernel/rcu/rcu_segcblist.c17
-rw-r--r--kernel/rcu/rcu_segcblist.h17
-rw-r--r--kernel/rcu/rcuperf.c27
-rw-r--r--kernel/rcu/rcutorture.c59
-rw-r--r--kernel/rcu/srcutiny.c17
-rw-r--r--kernel/rcu/srcutree.c72
-rw-r--r--kernel/rcu/sync.c15
-rw-r--r--kernel/rcu/tiny.c19
-rw-r--r--kernel/rcu/tree.c269
-rw-r--r--kernel/rcu/tree.h53
-rw-r--r--kernel/rcu/tree_exp.h201
-rw-r--r--kernel/rcu/tree_plugin.h238
-rw-r--r--kernel/rcu/update.c19
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/core.c108
-rw-r--r--kernel/sched/cpufreq.c4
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c474
-rw-r--r--kernel/sched/isolation.c2
-rw-r--r--kernel/sched/pelt.c45
-rw-r--r--kernel/sched/pelt.h114
-rw-r--r--kernel/sched/psi.c23
-rw-r--r--kernel/sched/rt.c6
-rw-r--r--kernel/sched/sched.h56
-rw-r--r--kernel/sched/topology.c37
-rw-r--r--kernel/seccomp.c10
-rw-r--r--kernel/signal.c68
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/sys.c13
-rw-r--r--kernel/sys_ni.c25
-rw-r--r--kernel/sysctl.c50
-rw-r--r--kernel/time/Kconfig29
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--kernel/time/ntp.c18
-rw-r--r--kernel/time/ntp_internal.h2
-rw-r--r--kernel/time/posix-clock.c2
-rw-r--r--kernel/time/posix-cpu-timers.c12
-rw-r--r--kernel/time/posix-stubs.c25
-rw-r--r--kernel/time/posix-timers.c72
-rw-r--r--kernel/time/posix-timers.h2
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/time/time.c92
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--kernel/time/timekeeping_debug.c11
-rw-r--r--kernel/time/timer.c4
-rw-r--r--kernel/torture.c25
-rw-r--r--kernel/trace/bpf_trace.c17
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_events_filter.c5
-rw-r--r--kernel/trace/trace_irqsoff.c9
-rw-r--r--kernel/trace/trace_kprobe.c22
-rw-r--r--kernel/trace/trace_preemptirq.c5
-rw-r--r--kernel/trace/trace_probe_tmpl.h6
-rw-r--r--kernel/trace/trace_uprobe.c9
-rw-r--r--kernel/umh.c33
-rw-r--r--kernel/workqueue.c175
-rw-r--r--kernel/workqueue_internal.h6
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/Kconfig.kasan32
-rw-r--r--lib/Makefile1
-rw-r--r--lib/assoc_array.c8
-rw-r--r--lib/bsearch.c2
-rw-r--r--lib/cpumask.c3
-rw-r--r--lib/crc32.c4
-rw-r--r--lib/devres.c4
-rw-r--r--lib/int_sqrt.c2
-rw-r--r--lib/iomap.c140
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/objagg.c583
-rw-r--r--lib/refcount.c18
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c13
-rw-r--r--lib/smp_processor_id.c7
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_kasan.c24
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_objagg.c199
-rw-r--r--lib/test_rhashtable.c36
-rw-r--r--lib/test_vmalloc.c551
-rw-r--r--lib/test_xarray.c57
-rw-r--r--lib/xarray.c92
-rw-r--r--mm/Kconfig.debug17
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/cma.c4
-rw-r--r--mm/cma_debug.c11
-rw-r--r--mm/compaction.c1039
-rw-r--r--mm/debug.c4
-rw-r--r--mm/dmapool.c13
-rw-r--r--mm/failslab.c14
-rw-r--r--mm/filemap.c93
-rw-r--r--mm/gup.c203
-rw-r--r--mm/gup_benchmark.c8
-rw-r--r--mm/huge_memory.c37
-rw-r--r--mm/hugetlb.c117
-rw-r--r--mm/internal.h24
-rw-r--r--mm/kasan/Makefile3
-rw-r--r--mm/kasan/common.c84
-rw-r--r--mm/kasan/generic.c19
-rw-r--r--mm/kasan/generic_report.c3
-rw-r--r--mm/kasan/init.c6
-rw-r--r--mm/kasan/kasan.h3
-rw-r--r--mm/kasan/tags.c2
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/kmemleak.c10
-rw-r--r--mm/ksm.c77
-rw-r--r--mm/list_lru.c3
-rw-r--r--mm/maccess.c6
-rw-r--r--mm/memblock.c14
-rw-r--r--mm/memcontrol.c150
-rw-r--r--mm/memfd.c3
-rw-r--r--mm/memory-failure.c33
-rw-r--r--mm/memory.c98
-rw-r--r--mm/memory_hotplug.c140
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/mempool.c8
-rw-r--r--mm/migrate.c50
-rw-r--r--mm/mincore.c94
-rw-r--r--mm/mlock.c14
-rw-r--r--mm/mmap.c22
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/mremap.c17
-rw-r--r--mm/oom_kill.c77
-rw-r--r--mm/page-writeback.c24
-rw-r--r--mm/page_alloc.c200
-rw-r--r--mm/page_ext.c7
-rw-r--r--mm/page_idle.c8
-rw-r--r--mm/page_owner.c8
-rw-r--r--mm/page_poison.c4
-rw-r--r--mm/percpu-km.c2
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c753
-rw-r--r--mm/slab.c55
-rw-r--r--mm/slab.h11
-rw-r--r--mm/slab_common.c15
-rw-r--r--mm/slub.c77
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swap.c33
-rw-r--r--mm/swap_state.c23
-rw-r--r--mm/swapfile.c487
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/usercopy.c9
-rw-r--r--mm/userfaultfd.c11
-rw-r--r--mm/util.c41
-rw-r--r--mm/vmalloc.c459
-rw-r--r--mm/vmscan.c98
-rw-r--r--mm/vmstat.c15
-rw-r--r--mm/workingset.c5
-rw-r--r--net/6lowpan/debugfs.c42
-rw-r--r--net/8021q/vlan_dev.c1
-rw-r--r--net/Kconfig13
-rw-r--r--net/appletalk/atalk_proc.c58
-rw-r--r--net/appletalk/ddp.c37
-rw-r--r--net/appletalk/sysctl_net_atalk.c5
-rw-r--r--net/atm/proc.c3
-rw-r--r--net/ax25/ax25_ip.c4
-rw-r--r--net/ax25/ax25_route.c19
-rw-r--r--net/batman-adv/Kconfig2
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/bat_algo.c2
-rw-r--r--net/batman-adv/bat_algo.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.h2
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/bat_v.h2
-rw-r--r--net/batman-adv/bat_v_elp.c5
-rw-r--r--net/batman-adv/bat_v_elp.h2
-rw-r--r--net/batman-adv/bat_v_ogm.c2
-rw-r--r--net/batman-adv/bat_v_ogm.h2
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h2
-rw-r--r--net/batman-adv/debugfs.c2
-rw-r--r--net/batman-adv/debugfs.h2
-rw-r--r--net/batman-adv/distributed-arp-table.c403
-rw-r--r--net/batman-adv/distributed-arp-table.h21
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/batman-adv/fragmentation.h2
-rw-r--r--net/batman-adv/gateway_client.c3
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c3
-rw-r--r--net/batman-adv/gateway_common.h8
-rw-r--r--net/batman-adv/hard-interface.c7
-rw-r--r--net/batman-adv/hard-interface.h2
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h2
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/icmp_socket.h2
-rw-r--r--net/batman-adv/log.c2
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/batman-adv/main.h4
-rw-r--r--net/batman-adv/multicast.c6
-rw-r--r--net/batman-adv/multicast.h2
-rw-r--r--net/batman-adv/netlink.c1082
-rw-r--r--net/batman-adv/netlink.h8
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/network-coding.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/batman-adv/routing.h2
-rw-r--r--net/batman-adv/send.c2
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c19
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/sysfs.c66
-rw-r--r--net/batman-adv/sysfs.h2
-rw-r--r--net/batman-adv/tp_meter.c2
-rw-r--r--net/batman-adv/tp_meter.h2
-rw-r--r--net/batman-adv/trace.c2
-rw-r--r--net/batman-adv/trace.h2
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/batman-adv/translation-table.h2
-rw-r--r--net/batman-adv/tvlv.c2
-rw-r--r--net/batman-adv/tvlv.h2
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/6lowpan.c11
-rw-r--r--net/bluetooth/a2mp.c2
-rw-r--r--net/bluetooth/af_bluetooth.c16
-rw-r--r--net/bluetooth/hci_core.c48
-rw-r--r--net/bluetooth/hci_event.c8
-rw-r--r--net/bluetooth/hci_sock.c7
-rw-r--r--net/bluetooth/l2cap_core.c84
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/mgmt.c14
-rw-r--r--net/bluetooth/rfcomm/core.c1
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bpf/test_run.c141
-rw-r--r--net/bpfilter/Makefile2
-rw-r--r--net/bpfilter/bpfilter_kern.c76
-rw-r--r--net/bpfilter/bpfilter_umh_blob.S2
-rw-r--r--net/bpfilter/main.c2
-rw-r--r--net/bridge/br_fdb.c8
-rw-r--r--net/bridge/br_forward.c10
-rw-r--r--net/bridge/br_multicast.c186
-rw-r--r--net/bridge/br_netfilter_hooks.c10
-rw-r--r--net/bridge/br_netfilter_ipv6.c1
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_switchdev.c28
-rw-r--r--net/bridge/br_vlan.c26
-rw-r--r--net/bridge/netfilter/ebtables.c152
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c11
-rw-r--r--net/caif/cfpkt_skbuff.c16
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c30
-rw-r--r--net/ceph/ceph_common.c11
-rw-r--r--net/ceph/debugfs.c2
-rw-r--r--net/ceph/messenger.c20
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/compat.c94
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c105
-rw-r--r--net/core/devlink.c1923
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/ethtool.c262
-rw-r--r--net/core/filter.c672
-rw-r--r--net/core/flow_dissector.c92
-rw-r--r--net/core/flow_offload.c153
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/lwt_bpf.c266
-rw-r--r--net/core/lwtunnel.c16
-rw-r--r--net/core/neighbour.c26
-rw-r--r--net/core/net-sysfs.c27
-rw-r--r--net/core/net-traces.c8
-rw-r--r--net/core/net_namespace.c38
-rw-r--r--net/core/page_pool.c22
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/rtnetlink.c128
-rw-r--r--net/core/scm.c27
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/skmsg.c28
-rw-r--r--net/core/sock.c255
-rw-r--r--net/core/sysctl_net_core.c18
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/decnet/dn_fib.c2
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/dsa/dsa2.c19
-rw-r--r--net/dsa/dsa_priv.h10
-rw-r--r--net/dsa/master.c60
-rw-r--r--net/dsa/port.c69
-rw-r--r--net/dsa/slave.c186
-rw-r--r--net/dsa/switch.c42
-rw-r--r--net/dsa/tag_dsa.c9
-rw-r--r--net/dsa/tag_edsa.c9
-rw-r--r--net/dsa/tag_ksz.c36
-rw-r--r--net/ethernet/eth.c13
-rw-r--r--net/ieee802154/6lowpan/reassembly.c142
-rw-r--r--net/ipv4/af_inet.c11
-rw-r--r--net/ipv4/bpfilter/sockopt.c58
-rw-r--r--net/ipv4/cipso_ipv4.c20
-rw-r--r--net/ipv4/devinet.c90
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c15
-rw-r--r--net/ipv4/fou.c12
-rw-r--r--net/ipv4/gre_demux.c17
-rw-r--r--net/ipv4/icmp.c11
-rw-r--r--net/ipv4/igmp.c104
-rw-r--r--net/ipv4/inet_diag.c10
-rw-r--r--net/ipv4/inet_fragment.c301
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_fragment.c291
-rw-r--r--net/ipv4/ip_gre.c176
-rw-r--r--net/ipv4/ip_input.c12
-rw-r--r--net/ipv4/ip_options.c22
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ip_tunnel.c108
-rw-r--r--net/ipv4/ip_tunnel_core.c18
-rw-r--r--net/ipv4/ip_vti.c50
-rw-r--r--net/ipv4/ipconfig.c27
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c149
-rw-r--r--net/ipv4/netfilter.c18
-rw-r--r--net/ipv4/netfilter/Kconfig50
-rw-r--r--net/ipv4/netfilter/Makefile7
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/netfilter/iptable_nat.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c387
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c7
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c9
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c87
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c90
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c82
-rw-r--r--net/ipv4/netlink.c17
-rw-r--r--net/ipv4/route.c88
-rw-r--r--net/ipv4/tcp.c108
-rw-r--r--net/ipv4/tcp_bbr.c180
-rw-r--r--net/ipv4/tcp_input.c43
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv4/tcp_minisocks.c55
-rw-r--r--net/ipv4/tcp_output.c83
-rw-r--r--net/ipv4/tcp_timer.c89
-rw-r--r--net/ipv4/udp.c24
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv4/udp_tunnel.c15
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv6/addrconf.c107
-rw-r--r--net/ipv6/addrconf_core.c6
-rw-r--r--net/ipv6/addrlabel.c47
-rw-r--r--net/ipv6/af_inet6.c21
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fou6.c19
-rw-r--r--net/ipv6/icmp.c40
-rw-r--r--net/ipv6/ila/ila_xlat.c17
-rw-r--r--net/ipv6/ip6_gre.c96
-rw-r--r--net/ipv6/ip6_offload.c33
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_udp_tunnel.c15
-rw-r--r--net/ipv6/ip6mr.c87
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/mcast_snoop.c84
-rw-r--r--net/ipv6/netfilter.c17
-rw-r--r--net/ipv6/netfilter/Kconfig48
-rw-r--r--net/ipv6/netfilter/Makefile7
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c261
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c411
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c223
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c85
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c9
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c91
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c83
-rw-r--r--net/ipv6/reassembly.c234
-rw-r--r--net/ipv6/route.c131
-rw-r--r--net/ipv6/seg6.c4
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c38
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/kcm/kcmsock.c4
-rw-r--r--net/key/af_key.c42
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/l2tp/l2tp_core.h20
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/cfg.c24
-rw-r--r--net/mac80211/debugfs.c8
-rw-r--r--net/mac80211/debugfs_sta.c103
-rw-r--r--net/mac80211/driver-ops.h38
-rw-r--r--net/mac80211/ht.c8
-rw-r--r--net/mac80211/ibss.c8
-rw-r--r--net/mac80211/ieee80211_i.h39
-rw-r--r--net/mac80211/main.c28
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/mesh.h8
-rw-r--r--net/mac80211/mesh_hwmp.c32
-rw-r--r--net/mac80211/mesh_pathtbl.c173
-rw-r--r--net/mac80211/mesh_plink.c4
-rw-r--r--net/mac80211/mlme.c269
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c25
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c7
-rw-r--r--net/mac80211/rx.c34
-rw-r--r--net/mac80211/scan.c160
-rw-r--r--net/mac80211/spectmgmt.c6
-rw-r--r--net/mac80211/sta_info.c46
-rw-r--r--net/mac80211/sta_info.h38
-rw-r--r--net/mac80211/status.c6
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/trace.h46
-rw-r--r--net/mac80211/tx.c166
-rw-r--r--net/mac80211/util.c143
-rw-r--r--net/mpls/af_mpls.c106
-rw-r--r--net/mpls/mpls_iptunnel.c4
-rw-r--r--net/netfilter/Kconfig16
-rw-r--r--net/netfilter/Makefile6
-rw-r--r--net/netfilter/ipvs/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c59
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c41
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c4
-rw-r--r--net/netfilter/nf_conntrack_amanda.c9
-rw-r--r--net/netfilter/nf_conntrack_core.c251
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto.c514
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c134
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c85
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c196
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c67
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c69
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c128
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c210
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c80
-rw-r--r--net/netfilter/nf_conntrack_sip.c42
-rw-r--r--net/netfilter/nf_conntrack_standalone.c427
-rw-r--r--net/netfilter/nf_flow_table_core.c7
-rw-r--r--net/netfilter/nf_nat_core.c209
-rw-r--r--net/netfilter/nf_nat_helper.c15
-rw-r--r--net/netfilter/nf_nat_masquerade.c (renamed from net/ipv4/netfilter/nf_nat_masquerade_ipv4.c)208
-rw-r--r--net/netfilter/nf_nat_proto.c744
-rw-r--r--net/netfilter/nf_tables_api.c247
-rw-r--r--net/netfilter/nf_tables_core.c40
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c19
-rw-r--r--net/netfilter/nfnetlink_osf.c4
-rw-r--r--net/netfilter/nft_bitwise.c5
-rw-r--r--net/netfilter/nft_byteorder.c6
-rw-r--r--net/netfilter/nft_chain_nat.c108
-rw-r--r--net/netfilter/nft_cmp.c6
-rw-r--r--net/netfilter/nft_compat.c193
-rw-r--r--net/netfilter/nft_counter.c2
-rw-r--r--net/netfilter/nft_ct.c4
-rw-r--r--net/netfilter/nft_dynset.c23
-rw-r--r--net/netfilter/nft_flow_offload.c13
-rw-r--r--net/netfilter/nft_hash.c121
-rw-r--r--net/netfilter/nft_immediate.c12
-rw-r--r--net/netfilter/nft_lookup.c18
-rw-r--r--net/netfilter/nft_masq.c180
-rw-r--r--net/netfilter/nft_meta.c12
-rw-r--r--net/netfilter/nft_nat.c2
-rw-r--r--net/netfilter/nft_objref.c23
-rw-r--r--net/netfilter/nft_payload.c6
-rw-r--r--net/netfilter/nft_quota.c2
-rw-r--r--net/netfilter/nft_range.c5
-rw-r--r--net/netfilter/nft_redir.c154
-rw-r--r--net/netfilter/nft_rt.c6
-rw-r--r--net/netfilter/nft_set_hash.c38
-rw-r--r--net/netfilter/nft_tunnel.c41
-rw-r--r--net/netfilter/utils.c25
-rw-r--r--net/netfilter/x_tables.c6
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c14
-rw-r--r--net/netfilter/xt_addrtype.c16
-rw-r--r--net/netfilter/xt_nat.c2
-rw-r--r--net/netfilter/xt_physdev.c9
-rw-r--r--net/netfilter/xt_recent.c4
-rw-r--r--net/netlabel/netlabel_kapi.c3
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/netrom/nr_timer.c20
-rw-r--r--net/nfc/llcp_commands.c20
-rw-r--r--net/nfc/llcp_core.c24
-rw-r--r--net/openvswitch/Kconfig2
-rw-r--r--net/openvswitch/conntrack.c14
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/openvswitch/meter.c3
-rw-r--r--net/packet/af_packet.c35
-rw-r--r--net/phonet/pep.c32
-rw-r--r--net/qrtr/qrtr.c3
-rw-r--r--net/rds/af_rds.c47
-rw-r--r--net/rds/bind.c6
-rw-r--r--net/rds/connection.c21
-rw-r--r--net/rds/ib.c11
-rw-r--r--net/rds/ib.h4
-rw-r--r--net/rds/ib_cm.c72
-rw-r--r--net/rds/ib_recv.c4
-rw-r--r--net/rds/ib_send.c9
-rw-r--r--net/rds/message.c4
-rw-r--r--net/rds/rdma_transport.c14
-rw-r--r--net/rds/rdma_transport.h6
-rw-r--r--net/rds/rds.h18
-rw-r--r--net/rds/recv.c19
-rw-r--r--net/rds/send.c9
-rw-r--r--net/rds/tcp.c8
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rds/threads.c1
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/rxrpc/af_rxrpc.c70
-rw-r--r--net/rxrpc/ar-internal.h19
-rw-r--r--net/rxrpc/call_object.c97
-rw-r--r--net/rxrpc/conn_client.c5
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/recvmsg.c3
-rw-r--r--net/rxrpc/sendmsg.c24
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/sched/act_csum.c33
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ife.c2
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c4
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/act_simple.c4
-rw-r--r--net/sched/act_skbedit.c5
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/act_tunnel_key.c49
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/cls_api.c1330
-rw-r--r--net/sched/cls_basic.c41
-rw-r--r--net/sched/cls_bpf.c17
-rw-r--r--net/sched/cls_cgroup.c17
-rw-r--r--net/sched/cls_flow.c17
-rw-r--r--net/sched/cls_flower.c122
-rw-r--r--net/sched/cls_fw.c20
-rw-r--r--net/sched/cls_matchall.c45
-rw-r--r--net/sched/cls_route.c21
-rw-r--r--net/sched/cls_rsvp.h23
-rw-r--r--net/sched/cls_tcindex.c106
-rw-r--r--net/sched/cls_u32.c22
-rw-r--r--net/sched/sch_api.c26
-rw-r--r--net/sched/sch_cake.c160
-rw-r--r--net/sched/sch_cbs.c3
-rw-r--r--net/sched/sch_drr.c7
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_generic.c23
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_netem.c10
-rw-r--r--net/sched/sch_pie.c110
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_qfq.c20
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/associola.c9
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/diag.c1
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/offload.c1
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c11
-rw-r--r--net/sctp/socket.c778
-rw-r--r--net/sctp/stream.c82
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/smc/af_smc.c125
-rw-r--r--net/smc/smc.h6
-rw-r--r--net/smc/smc_cdc.c52
-rw-r--r--net/smc/smc_cdc.h62
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/smc/smc_close.c16
-rw-r--r--net/smc/smc_core.c17
-rw-r--r--net/smc/smc_core.h20
-rw-r--r--net/smc/smc_diag.c3
-rw-r--r--net/smc/smc_ib.c31
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/smc/smc_llc.c3
-rw-r--r--net/smc/smc_netns.h20
-rw-r--r--net/smc/smc_pnet.c671
-rw-r--r--net/smc/smc_pnet.h13
-rw-r--r--net/smc/smc_tx.c81
-rw-r--r--net/smc/smc_wr.c46
-rw-r--r--net/smc/smc_wr.h1
-rw-r--r--net/socket.c133
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c12
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c49
-rw-r--r--net/sunrpc/clnt.c20
-rw-r--r--net/sunrpc/debugfs.c2
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c105
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c9
-rw-r--r--net/sunrpc/xprtrdma/verbs.c13
-rw-r--r--net/sunrpc/xprtsock.c22
-rw-r--r--net/switchdev/switchdev.c269
-rw-r--r--net/tipc/link.c19
-rw-r--r--net/tipc/msg.h22
-rw-r--r--net/tipc/netlink_compat.c54
-rw-r--r--net/tipc/node.c11
-rw-r--r--net/tipc/socket.c21
-rw-r--r--net/tipc/topsrv.c5
-rw-r--r--net/tipc/trace.c4
-rw-r--r--net/tls/tls_device.c55
-rw-r--r--net/tls/tls_device_fallback.c3
-rw-r--r--net/tls/tls_main.c154
-rw-r--r--net/tls/tls_sw.c657
-rw-r--r--net/unix/af_unix.c57
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/vmw_vsock/af_vsock.c6
-rw-r--r--net/vmw_vsock/virtio_transport.c29
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--net/wireless/ap.c2
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h15
-rw-r--r--net/wireless/mlme.c8
-rw-r--r--net/wireless/nl80211.c170
-rw-r--r--net/wireless/nl80211.h3
-rw-r--r--net/wireless/pmsr.c28
-rw-r--r--net/wireless/reg.c90
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/scan.c595
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/util.c41
-rw-r--r--net/wireless/wext-compat.c2
-rw-r--r--net/x25/af_x25.c19
-rw-r--r--net/xdp/Kconfig8
-rw-r--r--net/xdp/Makefile1
-rw-r--r--net/xdp/xdp_umem.c41
-rw-r--r--net/xdp/xsk.c56
-rw-r--r--net/xdp/xsk.h12
-rw-r--r--net/xdp/xsk_diag.c191
-rw-r--r--net/xfrm/xfrm_interface.c4
-rw-r--r--net/xfrm/xfrm_policy.c67
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c15
-rw-r--r--samples/bpf/.gitignore1
-rw-r--r--samples/bpf/Makefile23
-rw-r--r--samples/bpf/asm_goto_workaround.h16
-rw-r--r--samples/bpf/bpf_insn.h20
-rwxr-xr-xsamples/bpf/do_hbm_test.sh436
-rw-r--r--samples/bpf/fds_example.c10
-rw-r--r--samples/bpf/hbm.c441
-rw-r--r--samples/bpf/hbm.h31
-rw-r--r--samples/bpf/hbm_kern.h137
-rw-r--r--samples/bpf/hbm_out_kern.c157
-rw-r--r--samples/bpf/load_sock_ops.c97
-rw-r--r--samples/bpf/sock_example.c2
-rw-r--r--samples/bpf/sockex1_user.c25
-rw-r--r--samples/bpf/sockex2_user.c23
-rw-r--r--samples/bpf/sockex3_user.c2
-rw-r--r--samples/bpf/task_fd_query_kern.c2
-rw-r--r--samples/bpf/task_fd_query_user.c2
-rw-r--r--samples/bpf/tcp_basertt_kern.c2
-rw-r--r--samples/bpf/tcp_bpf.readme14
-rw-r--r--samples/bpf/tcp_bufs_kern.c2
-rw-r--r--samples/bpf/tcp_clamp_kern.c2
-rw-r--r--samples/bpf/tcp_cong_kern.c2
-rw-r--r--samples/bpf/tcp_iw_kern.c2
-rw-r--r--samples/bpf/tcp_rwnd_kern.c2
-rw-r--r--samples/bpf/tcp_synrto_kern.c2
-rw-r--r--samples/bpf/tcp_tos_reflect_kern.c2
-rw-r--r--samples/bpf/test_cgrp2_attach2.c14
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c2
-rw-r--r--samples/bpf/tracex2_user.c2
-rw-r--r--samples/bpf/tracex3_kern.c2
-rw-r--r--samples/bpf/xdp1_user.c36
-rw-r--r--samples/bpf/xdp_adjust_tail_user.c38
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c196
-rw-r--r--samples/bpf/xdp_redirect_map_user.c106
-rw-r--r--samples/bpf/xdp_redirect_user.c103
-rw-r--r--samples/bpf/xdp_router_ipv4_user.c179
-rw-r--r--samples/bpf/xdp_rxq_info_user.c41
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c81
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c71
-rw-r--r--samples/bpf/xdpsock.h11
-rw-r--r--samples/bpf/xdpsock_kern.c56
-rw-r--r--samples/bpf/xdpsock_user.c829
-rw-r--r--samples/mei/mei-amt-version.c2
-rw-r--r--samples/seccomp/Makefile1
-rw-r--r--samples/vfio-mdev/mbochs.c8
-rw-r--r--samples/vfio-mdev/mdpy.c8
-rw-r--r--samples/vfio-mdev/mtty.c17
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.kasan7
-rwxr-xr-xscripts/atomic/atomic-tbl.sh186
-rwxr-xr-xscripts/atomic/atomics.tbl41
-rwxr-xr-xscripts/atomic/check-atomics.sh33
-rwxr-xr-xscripts/atomic/fallbacks/acquire9
-rwxr-xr-xscripts/atomic/fallbacks/add_negative16
-rwxr-xr-xscripts/atomic/fallbacks/add_unless16
-rwxr-xr-xscripts/atomic/fallbacks/andnot7
-rwxr-xr-xscripts/atomic/fallbacks/dec7
-rwxr-xr-xscripts/atomic/fallbacks/dec_and_test15
-rwxr-xr-xscripts/atomic/fallbacks/dec_if_positive15
-rwxr-xr-xscripts/atomic/fallbacks/dec_unless_positive14
-rwxr-xr-xscripts/atomic/fallbacks/fence11
-rwxr-xr-xscripts/atomic/fallbacks/fetch_add_unless23
-rwxr-xr-xscripts/atomic/fallbacks/inc7
-rwxr-xr-xscripts/atomic/fallbacks/inc_and_test15
-rwxr-xr-xscripts/atomic/fallbacks/inc_not_zero14
-rwxr-xr-xscripts/atomic/fallbacks/inc_unless_negative14
-rwxr-xr-xscripts/atomic/fallbacks/read_acquire7
-rwxr-xr-xscripts/atomic/fallbacks/release8
-rwxr-xr-xscripts/atomic/fallbacks/set_release7
-rwxr-xr-xscripts/atomic/fallbacks/sub_and_test16
-rwxr-xr-xscripts/atomic/fallbacks/try_cmpxchg11
-rwxr-xr-xscripts/atomic/gen-atomic-fallback.sh181
-rwxr-xr-xscripts/atomic/gen-atomic-instrumented.sh182
-rwxr-xr-xscripts/atomic/gen-atomic-long.sh101
-rw-r--r--scripts/atomic/gen-atomics.sh20
-rwxr-xr-xscripts/checksyscalls.sh53
-rw-r--r--scripts/coccinelle/api/alloc/alloc_cast.cocci8
-rw-r--r--scripts/coccinelle/api/alloc/zalloc-simple.cocci11
-rwxr-xr-xscripts/decode_stacktrace.sh9
-rw-r--r--scripts/gcc-plugins/Kconfig4
-rw-r--r--scripts/gcc-plugins/arm_ssp_per_task_plugin.c23
-rw-r--r--scripts/kallsyms.c4
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--scripts/mod/devicetable-offsets.c3
-rw-r--r--scripts/mod/file2alias.c19
-rw-r--r--scripts/mod/modpost.c2
-rwxr-xr-xscripts/ver_linux6
-rw-r--r--security/Kconfig45
-rw-r--r--security/Makefile2
-rw-r--r--security/apparmor/Kconfig16
-rw-r--r--security/apparmor/capability.c14
-rw-r--r--security/apparmor/domain.c9
-rw-r--r--security/apparmor/include/capability.h2
-rw-r--r--security/apparmor/include/cred.h16
-rw-r--r--security/apparmor/include/file.h5
-rw-r--r--security/apparmor/include/lib.h4
-rw-r--r--security/apparmor/include/task.h18
-rw-r--r--security/apparmor/ipc.c3
-rw-r--r--security/apparmor/lsm.c69
-rw-r--r--security/apparmor/resource.c2
-rw-r--r--security/apparmor/task.c6
-rw-r--r--security/commoncap.c28
-rw-r--r--security/integrity/iint.c2
-rw-r--r--security/integrity/ima/ima_appraise.c1
-rw-r--r--security/integrity/ima/ima_policy.c4
-rw-r--r--security/integrity/ima/ima_template_lib.c1
-rw-r--r--security/keys/internal.h13
-rw-r--r--security/keys/key.c5
-rw-r--r--security/keys/keyctl.c3
-rw-r--r--security/keys/keyring.c5
-rw-r--r--security/keys/proc.c3
-rw-r--r--security/keys/process_keys.c4
-rw-r--r--security/keys/request_key.c77
-rw-r--r--security/keys/request_key_auth.c18
-rw-r--r--security/loadpin/loadpin.c8
-rw-r--r--security/lsm_audit.c10
-rw-r--r--security/safesetid/Kconfig14
-rw-r--r--security/safesetid/Makefile7
-rw-r--r--security/safesetid/lsm.c277
-rw-r--r--security/safesetid/lsm.h33
-rw-r--r--security/safesetid/securityfs.c193
-rw-r--r--security/security.c655
-rw-r--r--security/selinux/Kconfig15
-rw-r--r--security/selinux/Makefile2
-rw-r--r--security/selinux/avc.c199
-rw-r--r--security/selinux/exports.c23
-rw-r--r--security/selinux/hooks.c420
-rw-r--r--security/selinux/include/audit.h3
-rw-r--r--security/selinux/include/avc.h6
-rw-r--r--security/selinux/include/objsec.h38
-rw-r--r--security/selinux/include/security.h3
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/selinux/ss/services.c38
-rw-r--r--security/selinux/xfrm.c4
-rw-r--r--security/smack/smack.h44
-rw-r--r--security/smack/smack_access.c6
-rw-r--r--security/smack/smack_lsm.c317
-rw-r--r--security/smack/smackfs.c18
-rw-r--r--security/tomoyo/audit.c31
-rw-r--r--security/tomoyo/common.c199
-rw-r--r--security/tomoyo/common.h51
-rw-r--r--security/tomoyo/condition.c59
-rw-r--r--security/tomoyo/domain.c76
-rw-r--r--security/tomoyo/file.c20
-rw-r--r--security/tomoyo/gc.c19
-rw-r--r--security/tomoyo/group.c5
-rw-r--r--security/tomoyo/load_policy.c8
-rw-r--r--security/tomoyo/memory.c9
-rw-r--r--security/tomoyo/mount.c2
-rw-r--r--security/tomoyo/realpath.c18
-rw-r--r--security/tomoyo/securityfs_if.c30
-rw-r--r--security/tomoyo/tomoyo.c160
-rw-r--r--security/tomoyo/util.c23
-rw-r--r--security/yama/yama_lsm.c12
-rw-r--r--sound/ac97/bus.c2
-rw-r--r--sound/aoa/core/gpio-feature.c1
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c8
-rw-r--r--sound/arm/aaci.c4
-rw-r--r--sound/arm/pxa2xx-ac97.c1
-rw-r--r--sound/atmel/ac97c.c4
-rw-r--r--sound/core/compress_offload.c14
-rw-r--r--sound/core/info.c131
-rw-r--r--sound/core/init.c31
-rw-r--r--sound/core/memalloc.c2
-rw-r--r--sound/core/oss/pcm_oss.c1
-rw-r--r--sound/core/pcm.c163
-rw-r--r--sound/core/pcm_lib.c22
-rw-r--r--sound/core/pcm_local.h1
-rw-r--r--sound/core/pcm_memory.c62
-rw-r--r--sound/core/pcm_native.c315
-rw-r--r--sound/drivers/aloop.c14
-rw-r--r--sound/drivers/dummy.c12
-rw-r--r--sound/drivers/opl4/opl4_proc.c4
-rw-r--r--sound/drivers/pcsp/pcsp.c1
-rw-r--r--sound/drivers/vx/vx_core.c9
-rw-r--r--sound/firewire/Kconfig1
-rw-r--r--sound/firewire/bebob/bebob.c14
-rw-r--r--sound/firewire/bebob/bebob_proc.c12
-rw-r--r--sound/firewire/dice/dice-proc.c12
-rw-r--r--sound/firewire/dice/dice.c12
-rw-r--r--sound/firewire/digi00x/digi00x-proc.c16
-rw-r--r--sound/firewire/fireface/Makefile4
-rw-r--r--sound/firewire/fireface/ff-midi.c2
-rw-r--r--sound/firewire/fireface/ff-pcm.c2
-rw-r--r--sound/firewire/fireface/ff-proc.c229
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c161
-rw-r--r--sound/firewire/fireface/ff-protocol-ff800.c143
-rw-r--r--sound/firewire/fireface/ff-protocol-former.c597
-rw-r--r--sound/firewire/fireface/ff-protocol-latter.c430
-rw-r--r--sound/firewire/fireface/ff-stream.c40
-rw-r--r--sound/firewire/fireface/ff-transaction.c143
-rw-r--r--sound/firewire/fireface/ff.c28
-rw-r--r--sound/firewire/fireface/ff.h23
-rw-r--r--sound/firewire/fireworks/fireworks_proc.c12
-rw-r--r--sound/firewire/motu/amdtp-motu.c4
-rw-r--r--sound/firewire/motu/motu-proc.c12
-rw-r--r--sound/firewire/oxfw/oxfw-proc.c12
-rw-r--r--sound/firewire/tascam/tascam-proc.c12
-rw-r--r--sound/hda/hdac_component.c4
-rw-r--r--sound/hda/hdac_controller.c8
-rw-r--r--sound/hda/hdac_i915.c10
-rw-r--r--sound/hda/hdac_stream.c44
-rw-r--r--sound/i2c/other/ak4113.c5
-rw-r--r--sound/i2c/other/ak4114.c5
-rw-r--r--sound/i2c/other/ak4xxx-adda.c8
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c3
-rw-r--r--sound/isa/als100.c1
-rw-r--r--sound/isa/cmi8328.c1
-rw-r--r--sound/isa/cmi8330.c3
-rw-r--r--sound/isa/es1688/es1688.c2
-rw-r--r--sound/isa/es1688/es1688_lib.c6
-rw-r--r--sound/isa/es18xx.c4
-rw-r--r--sound/isa/gus/gus_irq.c5
-rw-r--r--sound/isa/gus/gus_main.c13
-rw-r--r--sound/isa/gus/gus_mem.c6
-rw-r--r--sound/isa/gus/gus_pcm.c4
-rw-r--r--sound/isa/opti9xx/miro.c5
-rw-r--r--sound/isa/sb/jazz16.c1
-rw-r--r--sound/isa/sb/sb16.c1
-rw-r--r--sound/isa/sb/sb16_csp.c5
-rw-r--r--sound/isa/sb/sb16_main.c12
-rw-r--r--sound/isa/sb/sb8.c1
-rw-r--r--sound/isa/sb/sb8_main.c2
-rw-r--r--sound/isa/sscape.c7
-rw-r--r--sound/isa/wss/wss_lib.c3
-rw-r--r--sound/mips/hal2.c31
-rw-r--r--sound/mips/sgio2audio.c7
-rw-r--r--sound/parisc/harmony.c10
-rw-r--r--sound/pci/ac97/ac97_proc.c26
-rw-r--r--sound/pci/ad1889.c13
-rw-r--r--sound/pci/ak4531_codec.c5
-rw-r--r--sound/pci/ali5451/ali5451.c8
-rw-r--r--sound/pci/als300.c1
-rw-r--r--sound/pci/als4000.c1
-rw-r--r--sound/pci/asihpi/asihpi.c6
-rw-r--r--sound/pci/atiixp.c24
-rw-r--r--sound/pci/atiixp_modem.c8
-rw-r--r--sound/pci/aw2/aw2-alsa.c40
-rw-r--r--sound/pci/azt3328.c4
-rw-r--r--sound/pci/bt87x.c10
-rw-r--r--sound/pci/ca0106/ca0106_main.c19
-rw-r--r--sound/pci/ca0106/ca0106_proc.c40
-rw-r--r--sound/pci/cmipci.c9
-rw-r--r--sound/pci/cs4281.c5
-rw-r--r--sound/pci/cs46xx/cs46xx_dsp_spos.h6
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c6
-rw-r--r--sound/pci/cs46xx/dsp_spos.c134
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c19
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pm.c1
-rw-r--r--sound/pci/ctxfi/ctatc.c8
-rw-r--r--sound/pci/echoaudio/echoaudio.c19
-rw-r--r--sound/pci/emu10k1/emu10k1.c6
-rw-r--r--sound/pci/emu10k1/emu10k1x.c12
-rw-r--r--sound/pci/emu10k1/emupcm.c22
-rw-r--r--sound/pci/emu10k1/emuproc.c81
-rw-r--r--sound/pci/emu10k1/p16v.c17
-rw-r--r--sound/pci/ens1370.c9
-rw-r--r--sound/pci/es1938.c1
-rw-r--r--sound/pci/es1968.c1
-rw-r--r--sound/pci/fm801.c1
-rw-r--r--sound/pci/hda/hda_beep.c151
-rw-r--r--sound/pci/hda/hda_beep.h5
-rw-r--r--sound/pci/hda/hda_bind.c3
-rw-r--r--sound/pci/hda/hda_codec.c56
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/hda_proc.c9
-rw-r--r--sound/pci/hda/hda_tegra.c132
-rw-r--r--sound/pci/hda/patch_ca0132.c4
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c10
-rw-r--r--sound/pci/hda/patch_realtek.c260
-rw-r--r--sound/pci/ice1712/ews.c7
-rw-r--r--sound/pci/ice1712/ice1712.c8
-rw-r--r--sound/pci/ice1712/ice1724.c8
-rw-r--r--sound/pci/ice1712/pontis.c12
-rw-r--r--sound/pci/ice1712/prodigy192.c5
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c8
-rw-r--r--sound/pci/ice1712/quartet.c4
-rw-r--r--sound/pci/intel8x0.c8
-rw-r--r--sound/pci/intel8x0m.c9
-rw-r--r--sound/pci/korg1212/korg1212.c6
-rw-r--r--sound/pci/lola/lola_proc.c16
-rw-r--r--sound/pci/lx6464es/lx6464es.c16
-rw-r--r--sound/pci/maestro3.c1
-rw-r--r--sound/pci/mixart/mixart.c6
-rw-r--r--sound/pci/nm256/nm256.c1
-rw-r--r--sound/pci/oxygen/oxygen_lib.c12
-rw-r--r--sound/pci/oxygen/pcm1796.h1
-rw-r--r--sound/pci/oxygen/xonar_pcm179x.c71
-rw-r--r--sound/pci/pcxhr/pcxhr.c21
-rw-r--r--sound/pci/riptide/riptide.c7
-rw-r--r--sound/pci/rme32.c5
-rw-r--r--sound/pci/rme96.c7
-rw-r--r--sound/pci/rme9652/hdsp.c5
-rw-r--r--sound/pci/rme9652/hdspm.c75
-rw-r--r--sound/pci/rme9652/rme9652.c6
-rw-r--r--sound/pci/sis7019.c1
-rw-r--r--sound/pci/sonicvibes.c6
-rw-r--r--sound/pci/trident/trident_main.c8
-rw-r--r--sound/pci/via82xx.c8
-rw-r--r--sound/pci/via82xx_modem.c16
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c10
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_core.c6
-rw-r--r--sound/ppc/pmac.c1
-rw-r--r--sound/ppc/snd_ps3.c6
-rw-r--r--sound/ppc/tumbler.c1
-rw-r--r--sound/sh/aica.c14
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/amd/acp-pcm-dma.c26
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c14
-rw-r--r--sound/soc/codecs/Kconfig67
-rw-r--r--sound/soc/codecs/Makefile14
-rw-r--r--sound/soc/codecs/ad193x.c76
-rw-r--r--sound/soc/codecs/ad193x.h8
-rw-r--r--sound/soc/codecs/adau1977.c12
-rw-r--r--sound/soc/codecs/adau7002.c45
-rw-r--r--sound/soc/codecs/ak4118.c1
-rw-r--r--sound/soc/codecs/ak4458.c79
-rw-r--r--sound/soc/codecs/cros_ec_codec.c441
-rw-r--r--sound/soc/codecs/cs35l36.c1957
-rw-r--r--sound/soc/codecs/cs35l36.h446
-rw-r--r--sound/soc/codecs/cs4271.c4
-rw-r--r--sound/soc/codecs/cs4341.c346
-rw-r--r--sound/soc/codecs/cs47l24.c52
-rw-r--r--sound/soc/codecs/da7219-aad.c2
-rw-r--r--sound/soc/codecs/da7219.c197
-rw-r--r--sound/soc/codecs/da7219.h2
-rw-r--r--sound/soc/codecs/dmic.c4
-rw-r--r--sound/soc/codecs/es8316.c200
-rw-r--r--sound/soc/codecs/es8316.h7
-rw-r--r--sound/soc/codecs/hdac_hdmi.c203
-rw-r--r--sound/soc/codecs/hdmi-codec.c4
-rw-r--r--sound/soc/codecs/jz4725b.c598
-rw-r--r--sound/soc/codecs/jz4740.c26
-rw-r--r--sound/soc/codecs/max98090.c15
-rw-r--r--sound/soc/codecs/max98373.c6
-rw-r--r--sound/soc/codecs/max9860.c3
-rw-r--r--sound/soc/codecs/max98927.c8
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c3
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c23
-rw-r--r--sound/soc/codecs/mt6351.c4
-rw-r--r--sound/soc/codecs/mt6358.c2336
-rw-r--r--sound/soc/codecs/mt6358.h2314
-rw-r--r--sound/soc/codecs/nau8824.c2
-rw-r--r--sound/soc/codecs/nau8825.c1
-rw-r--r--sound/soc/codecs/pcm186x.c8
-rw-r--r--sound/soc/codecs/pcm3060.c35
-rw-r--r--sound/soc/codecs/pcm3060.h5
-rw-r--r--sound/soc/codecs/pcm512x.c48
-rw-r--r--sound/soc/codecs/rk3328_codec.c519
-rw-r--r--sound/soc/codecs/rk3328_codec.h210
-rw-r--r--sound/soc/codecs/rl6347a.c4
-rw-r--r--sound/soc/codecs/rt274.c24
-rw-r--r--sound/soc/codecs/rt286.c8
-rw-r--r--sound/soc/codecs/rt298.c12
-rw-r--r--sound/soc/codecs/rt5514-spi.c2
-rw-r--r--sound/soc/codecs/rt5514.c1
-rw-r--r--sound/soc/codecs/rt5640.c6
-rw-r--r--sound/soc/codecs/rt5645.c66
-rw-r--r--sound/soc/codecs/rt5651.c67
-rw-r--r--sound/soc/codecs/rt5651.h1
-rw-r--r--sound/soc/codecs/rt5670.c54
-rw-r--r--sound/soc/codecs/rt5677.c8
-rw-r--r--sound/soc/codecs/rt5682.c11
-rw-r--r--sound/soc/codecs/rt5682.h24
-rw-r--r--sound/soc/codecs/sgtl5000.c19
-rw-r--r--sound/soc/codecs/ssm2602.c71
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c16
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c35
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h1
-rw-r--r--sound/soc/codecs/tlv320aic3x.c25
-rw-r--r--sound/soc/codecs/tscs42xx.c2
-rw-r--r--sound/soc/codecs/wcd-clsh-v2.c576
-rw-r--r--sound/soc/codecs/wcd-clsh-v2.h49
-rw-r--r--sound/soc/codecs/wcd9335.c5244
-rw-r--r--sound/soc/codecs/wcd9335.h640
-rw-r--r--sound/soc/codecs/wm5102.c64
-rw-r--r--sound/soc/codecs/wm5110.c80
-rw-r--r--sound/soc/codecs/wm8741.c22
-rw-r--r--sound/soc/codecs/wm8770.c18
-rw-r--r--sound/soc/codecs/wm8904.c21
-rw-r--r--sound/soc/codecs/wm8962.c9
-rw-r--r--sound/soc/codecs/wm8995.c29
-rw-r--r--sound/soc/codecs/wm8996.c9
-rw-r--r--sound/soc/codecs/wm8997.c60
-rw-r--r--sound/soc/codecs/wm8998.c60
-rw-r--r--sound/soc/codecs/wm_adsp.c498
-rw-r--r--sound/soc/codecs/wm_adsp.h4
-rw-r--r--sound/soc/dwc/dwc-pcm.c3
-rw-r--r--sound/soc/fsl/Kconfig9
-rw-r--r--sound/soc/fsl/Makefile2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c1
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/fsl/fsl_micfil.c826
-rw-r--r--sound/soc/fsl/fsl_micfil.h283
-rw-r--r--sound/soc/fsl/fsl_sai.c8
-rw-r--r--sound/soc/fsl/fsl_spdif.c12
-rw-r--r--sound/soc/fsl/fsl_spdif.h2
-rw-r--r--sound/soc/fsl/fsl_ssi.c6
-rw-r--r--sound/soc/fsl/imx-audmux.c24
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c12
-rw-r--r--sound/soc/fsl/imx-spdif.c4
-rw-r--r--sound/soc/generic/Kconfig17
-rw-r--r--sound/soc/generic/Makefile4
-rw-r--r--sound/soc/generic/audio-graph-card.c481
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c501
-rw-r--r--sound/soc/generic/simple-card-utils.c25
-rw-r--r--sound/soc/generic/simple-card.c479
-rw-r--r--sound/soc/generic/simple-scu-card.c474
-rw-r--r--sound/soc/intel/Kconfig2
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c19
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c45
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c2
-rw-r--r--sound/soc/intel/atom/sst/sst_loader.c2
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c3
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-pcm.c17
-rw-r--r--sound/soc/intel/boards/Kconfig1
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c14
-rw-r--r--sound/soc/intel/boards/broadwell.c18
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c114
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c13
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c8
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c336
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c23
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c180
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c12
-rw-r--r--sound/soc/intel/boards/cht_bsw_nau8824.c12
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c9
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c30
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c68
-rw-r--r--sound/soc/intel/boards/haswell.c17
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c207
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c25
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c92
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c70
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c5
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c10
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hda-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c20
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c5
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c19
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c14
-rw-r--r--sound/soc/intel/skylake/skl-messages.c2
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c12
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/intel/skylake/skl.c13
-rw-r--r--sound/soc/mediatek/Kconfig19
-rw-r--r--sound/soc/mediatek/Makefile1
-rw-r--r--sound/soc/mediatek/common/Makefile2
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c6
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c1364
-rw-r--r--sound/soc/mediatek/mt8183/Makefile13
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-clk.c611
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-clk.h38
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-common.h108
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c1237
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-adda.c501
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-hostless.c118
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-i2s.c1040
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-pcm.c318
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-dai-tdm.c639
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-interconnection.h33
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-reg.h1666
-rw-r--r--sound/soc/meson/axg-fifo.c7
-rw-r--r--sound/soc/pxa/Makefile1
-rw-r--r--sound/soc/qcom/Kconfig4
-rw-r--r--sound/soc/qcom/apq8016_sbc.c21
-rw-r--r--sound/soc/qcom/apq8096.c71
-rw-r--r--sound/soc/qcom/common.c10
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c17
-rw-r--r--sound/soc/qcom/sdm845.c33
-rw-r--r--sound/soc/samsung/dma.h3
-rw-r--r--sound/soc/samsung/dmaengine.c16
-rw-r--r--sound/soc/samsung/i2s.c731
-rw-r--r--sound/soc/samsung/odroid.c206
-rw-r--r--sound/soc/samsung/pcm.c2
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c2
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c2
-rw-r--r--sound/soc/samsung/spdif.c2
-rw-r--r--sound/soc/sh/dma-sh7760.c2
-rw-r--r--sound/soc/sh/fsi.c5
-rw-r--r--sound/soc/sh/rcar/core.c53
-rw-r--r--sound/soc/sh/rcar/gen.c24
-rw-r--r--sound/soc/sh/rcar/rsnd.h27
-rw-r--r--sound/soc/sh/rcar/src.c125
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/sh/rcar/ssiu.c26
-rw-r--r--sound/soc/sh/siu_pcm.c15
-rw-r--r--sound/soc/soc-compress.c177
-rw-r--r--sound/soc/soc-core.c295
-rw-r--r--sound/soc/soc-dapm.c257
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c18
-rw-r--r--sound/soc/soc-pcm.c5
-rw-r--r--sound/soc/soc-topology.c179
-rw-r--r--sound/soc/sprd/Kconfig6
-rw-r--r--sound/soc/sprd/Makefile4
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.c562
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.h15
-rw-r--r--sound/soc/stm/stm32_adfsdm.c5
-rw-r--r--sound/soc/stm/stm32_i2s.c136
-rw-r--r--sound/soc/stm/stm32_sai.c11
-rw-r--r--sound/soc/stm/stm32_sai_sub.c8
-rw-r--r--sound/soc/sunxi/sun4i-codec.c149
-rw-r--r--sound/soc/sunxi/sun50i-codec-analog.c4
-rw-r--r--sound/soc/ti/davinci-mcasp.c311
-rw-r--r--sound/soc/txx9/txx9aclc.c4
-rw-r--r--sound/soc/uniphier/aio-dma.c3
-rw-r--r--sound/soc/xilinx/Kconfig16
-rw-r--r--sound/soc/xilinx/Makefile4
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c709
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c15
-rw-r--r--sound/soc/xilinx/xlnx_spdif.c339
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c6
-rw-r--r--sound/sparc/dbri.c23
-rw-r--r--sound/spi/at73c213.c2
-rw-r--r--sound/synth/emux/emux_proc.c4
-rw-r--r--sound/usb/card.c3
-rw-r--r--sound/usb/card.h1
-rw-r--r--sound/usb/format.c2
-rw-r--r--sound/usb/line6/driver.c4
-rw-r--r--sound/usb/line6/pod.c3
-rw-r--r--sound/usb/mixer.c42
-rw-r--r--sound/usb/mixer_quirks.c6
-rw-r--r--sound/usb/pcm.c13
-rw-r--r--sound/usb/proc.c14
-rw-r--r--sound/usb/quirks-table.h71
-rw-r--r--sound/usb/quirks.c121
-rw-r--r--sound/usb/stream.c36
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c21
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c19
-rw-r--r--sound/x86/intel_hdmi_audio.c15
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--tools/arch/powerpc/include/uapi/asm/unistd.h404
-rw-r--r--tools/arch/riscv/include/uapi/asm/bitsperlong.h25
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst5
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst85
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst29
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst9
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst5
-rw-r--r--tools/bpf/bpftool/Makefile9
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool114
-rw-r--r--tools/bpf/bpftool/btf_dumper.c111
-rw-r--r--tools/bpf/bpftool/cfg.c9
-rw-r--r--tools/bpf/bpftool/common.c6
-rw-r--r--tools/bpf/bpftool/feature.c764
-rw-r--r--tools/bpf/bpftool/json_writer.c7
-rw-r--r--tools/bpf/bpftool/json_writer.h5
-rw-r--r--tools/bpf/bpftool/main.c3
-rw-r--r--tools/bpf/bpftool/main.h4
-rw-r--r--tools/bpf/bpftool/map.c265
-rw-r--r--tools/bpf/bpftool/prog.c28
-rw-r--r--tools/build/Makefile.feature10
-rw-r--r--tools/build/feature/test-all.c10
-rw-r--r--tools/build/feature/test-get_current_dir_name.c1
-rw-r--r--tools/build/feature/test-libpython.c1
-rw-r--r--tools/build/feature/test-reallocarray.c2
-rw-r--r--tools/build/feature/test-sched_getcpu.c2
-rw-r--r--tools/build/feature/test-setns.c1
-rw-r--r--tools/firmware/ihex2fw.c17
-rw-r--r--tools/iio/iio_event_monitor.c14
-rw-r--r--tools/iio/iio_generic_buffer.c2
-rw-r--r--tools/include/linux/filter.h20
-rw-r--r--tools/include/linux/numa.h16
-rw-r--r--tools/include/linux/rbtree.h52
-rw-r--r--tools/include/linux/rbtree_augmented.h60
-rw-r--r--tools/include/nolibc/nolibc.h (renamed from tools/testing/selftests/rcutorture/bin/nolibc.h)118
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/asm/bitsperlong.h4
-rw-r--r--tools/include/uapi/drm/i915_drm.h8
-rw-r--r--tools/include/uapi/linux/bpf.h116
-rw-r--r--tools/include/uapi/linux/ethtool.h51
-rw-r--r--tools/include/uapi/linux/fs.h60
-rw-r--r--tools/include/uapi/linux/if_link.h20
-rw-r--r--tools/include/uapi/linux/if_xdp.h78
-rw-r--r--tools/include/uapi/linux/in.h10
-rw-r--r--tools/include/uapi/linux/kvm.h19
-rw-r--r--tools/include/uapi/linux/mount.h58
-rw-r--r--tools/include/uapi/linux/perf_event.h55
-rw-r--r--tools/include/uapi/linux/pkt_sched.h1163
-rw-r--r--tools/include/uapi/linux/prctl.h9
-rw-r--r--tools/include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--tools/include/uapi/linux/vhost.h113
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile26
-rw-r--r--tools/lib/bpf/README.rst29
-rw-r--r--tools/lib/bpf/bpf.c99
-rw-r--r--tools/lib/bpf/bpf.h3
-rw-r--r--tools/lib/bpf/btf.c2198
-rw-r--r--tools/lib/bpf/btf.h46
-rw-r--r--tools/lib/bpf/libbpf.c210
-rw-r--r--tools/lib/bpf/libbpf.h44
-rw-r--r--tools/lib/bpf/libbpf.map30
-rw-r--r--tools/lib/bpf/libbpf_probes.c242
-rw-r--r--tools/lib/bpf/libbpf_util.h30
-rw-r--r--tools/lib/bpf/netlink.c85
-rw-r--r--tools/lib/bpf/test_libbpf.cpp4
-rw-r--r--tools/lib/bpf/xsk.c723
-rw-r--r--tools/lib/bpf/xsk.h203
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h2
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h11
-rwxr-xr-xtools/lib/lockdep/run_tests.sh6
-rw-r--r--tools/lib/lockdep/tests/ABBA.c9
-rw-r--r--tools/lib/rbtree.c178
-rw-r--r--tools/lib/traceevent/event-parse-api.c4
-rw-r--r--tools/lib/traceevent/event-parse-local.h4
-rw-r--r--tools/lib/traceevent/event-parse.c129
-rw-r--r--tools/lib/traceevent/event-parse.h17
-rw-r--r--tools/lib/traceevent/plugin_kvm.c2
-rw-r--r--tools/lib/traceevent/trace-seq.c17
-rw-r--r--tools/memory-model/.gitignore1
-rw-r--r--tools/memory-model/README2
-rw-r--r--tools/memory-model/linux-kernel.bell3
-rw-r--r--tools/memory-model/linux-kernel.cat4
-rw-r--r--tools/memory-model/linux-kernel.def1
-rw-r--r--tools/memory-model/scripts/README70
-rwxr-xr-xtools/memory-model/scripts/checkalllitmus.sh53
-rw-r--r--tools/memory-model/scripts/checkghlitmus.sh65
-rwxr-xr-xtools/memory-model/scripts/checklitmus.sh74
-rw-r--r--tools/memory-model/scripts/checklitmushist.sh60
-rw-r--r--tools/memory-model/scripts/cmplitmushist.sh87
-rw-r--r--tools/memory-model/scripts/initlitmushist.sh68
-rw-r--r--tools/memory-model/scripts/judgelitmus.sh78
-rw-r--r--tools/memory-model/scripts/newlitmushist.sh61
-rw-r--r--tools/memory-model/scripts/parseargs.sh136
-rw-r--r--tools/memory-model/scripts/runlitmushist.sh87
-rw-r--r--tools/perf/Build10
-rw-r--r--tools/perf/Documentation/perf-c2c.txt16
-rw-r--r--tools/perf/Documentation/perf-config.txt31
-rw-r--r--tools/perf/Documentation/perf-mem.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt19
-rw-r--r--tools/perf/Documentation/perf-script.txt6
-rw-r--r--tools/perf/Documentation/perf-trace.txt8
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt11
-rw-r--r--tools/perf/Makefile.config14
-rw-r--r--tools/perf/Makefile.perf36
-rw-r--r--tools/perf/arch/Build4
-rw-r--r--tools/perf/arch/arm/Build4
-rw-r--r--tools/perf/arch/arm/tests/Build7
-rw-r--r--tools/perf/arch/arm/tests/arch-tests.c4
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm/tests/vectors-page.c24
-rw-r--r--tools/perf/arch/arm/util/Build8
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c98
-rw-r--r--tools/perf/arch/arm/util/cs-etm.h3
-rw-r--r--tools/perf/arch/arm/util/pmu.c3
-rw-r--r--tools/perf/arch/arm64/Build4
-rw-r--r--tools/perf/arch/arm64/tests/Build6
-rw-r--r--tools/perf/arch/arm64/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm64/util/Build12
-rw-r--r--tools/perf/arch/nds32/Build2
-rw-r--r--tools/perf/arch/nds32/util/Build2
-rw-r--r--tools/perf/arch/powerpc/Build4
-rw-r--r--tools/perf/arch/powerpc/Makefile15
-rwxr-xr-xtools/perf/arch/powerpc/entry/syscalls/mksyscalltbl22
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl427
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h3
-rw-r--r--tools/perf/arch/powerpc/tests/Build6
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/powerpc/util/Build17
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c2
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c11
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c1
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c3
-rw-r--r--tools/perf/arch/s390/Build2
-rw-r--r--tools/perf/arch/s390/util/Build12
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c1
-rw-r--r--tools/perf/arch/sh/Build2
-rw-r--r--tools/perf/arch/sh/util/Build2
-rw-r--r--tools/perf/arch/sparc/Build2
-rw-r--r--tools/perf/arch/sparc/util/Build2
-rw-r--r--tools/perf/arch/x86/Build4
-rw-r--r--tools/perf/arch/x86/tests/Build14
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/x86/util/Build30
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c1
-rw-r--r--tools/perf/arch/xtensa/Build2
-rw-r--r--tools/perf/arch/xtensa/util/Build2
-rw-r--r--tools/perf/bench/numa.c7
-rw-r--r--tools/perf/builtin-annotate.c9
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-buildid-list.c8
-rw-r--r--tools/perf/builtin-c2c.c23
-rw-r--r--tools/perf/builtin-diff.c22
-rw-r--r--tools/perf/builtin-evlist.c4
-rw-r--r--tools/perf/builtin-inject.c12
-rw-r--r--tools/perf/builtin-kallsyms.c1
-rw-r--r--tools/perf/builtin-kmem.c7
-rw-r--r--tools/perf/builtin-kvm.c8
-rw-r--r--tools/perf/builtin-list.c8
-rw-r--r--tools/perf/builtin-lock.c8
-rw-r--r--tools/perf/builtin-mem.c9
-rw-r--r--tools/perf/builtin-probe.c1
-rw-r--r--tools/perf/builtin-record.c74
-rw-r--r--tools/perf/builtin-report.c37
-rw-r--r--tools/perf/builtin-sched.c63
-rw-r--r--tools/perf/builtin-script.c40
-rw-r--r--tools/perf/builtin-stat.c19
-rw-r--r--tools/perf/builtin-timechart.c8
-rw-r--r--tools/perf/builtin-top.c30
-rw-r--r--tools/perf/builtin-trace.c78
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/design.txt4
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c18
-rw-r--r--tools/perf/examples/bpf/augmented_syscalls.c22
-rw-r--r--tools/perf/examples/bpf/etcsnoop.c18
-rw-r--r--tools/perf/include/bpf/bpf.h16
-rw-r--r--tools/perf/perf-read-vdso.c6
-rw-r--r--tools/perf/perf.h9
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/metrics.json2245
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/metrics.json1982
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json2
-rw-r--r--tools/perf/scripts/Build4
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py2
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py2
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py511
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py21
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py24
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py10
-rw-r--r--tools/perf/scripts/python/netdev-times.py82
-rw-r--r--tools/perf/scripts/python/powerpc-hcalls.py18
-rw-r--r--tools/perf/scripts/python/sched-migration.py2
-rw-r--r--tools/perf/scripts/python/sctop.py24
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py7
-rw-r--r--tools/perf/scripts/python/stat-cpi.py11
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py22
-rw-r--r--tools/perf/scripts/python/syscall-counts.py18
-rw-r--r--tools/perf/tests/attr.py33
-rw-r--r--tools/perf/tests/bp_account.c1
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/evsel-tp-sched.c8
-rw-r--r--tools/perf/tests/hists_common.c9
-rw-r--r--tools/perf/tests/hists_cumulate.c15
-rw-r--r--tools/perf/tests/hists_filter.c1
-rw-r--r--tools/perf/tests/hists_link.c8
-rw-r--r--tools/perf/tests/hists_output.c33
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c1
-rw-r--r--tools/perf/tests/parse-events.c30
-rw-r--r--tools/perf/tests/pmu.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/sdt.c1
-rw-r--r--tools/perf/tests/shell/lib/probe.sh5
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh3
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh1
-rw-r--r--tools/perf/tests/tests.h5
-rw-r--r--tools/perf/trace/beauty/Build26
-rw-r--r--tools/perf/trace/beauty/ioctl.c2
-rwxr-xr-xtools/perf/trace/beauty/mount_flags.sh4
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh2
-rw-r--r--tools/perf/trace/beauty/waitid_options.c2
-rw-r--r--tools/perf/ui/Build18
-rw-r--r--tools/perf/ui/browsers/Build10
-rw-r--r--tools/perf/ui/browsers/annotate.c17
-rw-r--r--tools/perf/ui/browsers/header.c2
-rw-r--r--tools/perf/ui/browsers/hists.c23
-rw-r--r--tools/perf/ui/browsers/map.c1
-rw-r--r--tools/perf/ui/gtk/annotate.c3
-rw-r--r--tools/perf/ui/gtk/hists.c7
-rw-r--r--tools/perf/ui/hist.c1
-rw-r--r--tools/perf/ui/stdio/hist.c7
-rw-r--r--tools/perf/ui/tui/Build8
-rw-r--r--tools/perf/util/Build276
-rw-r--r--tools/perf/util/annotate.c14
-rw-r--r--tools/perf/util/annotate.h14
-rw-r--r--tools/perf/util/auxtrace.c27
-rw-r--r--tools/perf/util/auxtrace.h5
-rw-r--r--tools/perf/util/block-range.c2
-rw-r--r--tools/perf/util/block-range.h6
-rw-r--r--tools/perf/util/bpf-event.c263
-rw-r--r--tools/perf/util/bpf-event.h38
-rw-r--r--tools/perf/util/bpf-loader.c31
-rw-r--r--tools/perf/util/bpf-loader.h7
-rw-r--r--tools/perf/util/bpf_map.c72
-rw-r--r--tools/perf/util/bpf_map.h22
-rw-r--r--tools/perf/util/branch.h27
-rw-r--r--tools/perf/util/build-id.c14
-rw-r--r--tools/perf/util/build-id.h3
-rw-r--r--tools/perf/util/c++/Build4
-rw-r--r--tools/perf/util/c++/clang.cpp2
-rw-r--r--tools/perf/util/callchain.c49
-rw-r--r--tools/perf/util/callchain.h22
-rw-r--r--tools/perf/util/color.c39
-rw-r--r--tools/perf/util/color.h1
-rw-r--r--tools/perf/util/color_config.c47
-rw-r--r--tools/perf/util/comm.c1
-rw-r--r--tools/perf/util/comm.h4
-rw-r--r--tools/perf/util/config.c1
-rw-r--r--tools/perf/util/cpu-set-sched.h50
-rw-r--r--tools/perf/util/cpumap.c23
-rw-r--r--tools/perf/util/cpumap.h1
-rw-r--r--tools/perf/util/cputopo.c277
-rw-r--r--tools/perf/util/cputopo.h33
-rw-r--r--tools/perf/util/cs-etm-decoder/Build2
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c41
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h16
-rw-r--r--tools/perf/util/cs-etm.c832
-rw-r--r--tools/perf/util/cs-etm.h57
-rw-r--r--tools/perf/util/data-convert-bt.c4
-rw-r--r--tools/perf/util/data.c175
-rw-r--r--tools/perf/util/data.h16
-rw-r--r--tools/perf/util/db-export.c1
-rw-r--r--tools/perf/util/drv_configs.c78
-rw-r--r--tools/perf/util/drv_configs.h26
-rw-r--r--tools/perf/util/dso.c11
-rw-r--r--tools/perf/util/dso.h17
-rw-r--r--tools/perf/util/event.c43
-rw-r--r--tools/perf/util/event.h60
-rw-r--r--tools/perf/util/evlist.c6
-rw-r--r--tools/perf/util/evlist.h6
-rw-r--r--tools/perf/util/evsel.c27
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/find-map.c (renamed from tools/perf/util/find-vdso-map.c)7
-rw-r--r--tools/perf/util/header.c289
-rw-r--r--tools/perf/util/hist.c220
-rw-r--r--tools/perf/util/hist.h19
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt-decoder/Build2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c39
-rw-r--r--tools/perf/util/intel-pt.c23
-rw-r--r--tools/perf/util/intlist.h2
-rw-r--r--tools/perf/util/jitdump.c1
-rw-r--r--tools/perf/util/kvm-stat.h7
-rw-r--r--tools/perf/util/machine.c114
-rw-r--r--tools/perf/util/machine.h17
-rw-r--r--tools/perf/util/map.c14
-rw-r--r--tools/perf/util/map.h100
-rw-r--r--tools/perf/util/map_groups.h91
-rw-r--r--tools/perf/util/map_symbol.h22
-rw-r--r--tools/perf/util/mem-events.c2
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/metricgroup.h3
-rw-r--r--tools/perf/util/mmap.c105
-rw-r--r--tools/perf/util/mmap.h3
-rw-r--r--tools/perf/util/ordered-events.c6
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/parse-events.y4
-rw-r--r--tools/perf/util/pmu.c2
-rw-r--r--tools/perf/util/pmu.h5
-rw-r--r--tools/perf/util/probe-event.c6
-rw-r--r--tools/perf/util/probe-event.h5
-rw-r--r--tools/perf/util/probe-file.c1
-rw-r--r--tools/perf/util/rb_resort.h8
-rw-r--r--tools/perf/util/rblist.c28
-rw-r--r--tools/perf/util/rblist.h2
-rw-r--r--tools/perf/util/s390-cpumcf-kernel.h62
-rw-r--r--tools/perf/util/s390-cpumsf.c89
-rw-r--r--tools/perf/util/s390-sample-raw.c222
-rw-r--r--tools/perf/util/sample-raw.c18
-rw-r--r--tools/perf/util/sample-raw.h14
-rw-r--r--tools/perf/util/scripting-engines/Build4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c17
-rw-r--r--tools/perf/util/session.c110
-rw-r--r--tools/perf/util/setup.py9
-rw-r--r--tools/perf/util/sort.c18
-rw-r--r--tools/perf/util/sort.h7
-rw-r--r--tools/perf/util/srccode.h13
-rw-r--r--tools/perf/util/srcline.c45
-rw-r--r--tools/perf/util/srcline.h13
-rw-r--r--tools/perf/util/stat-display.c1
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/strbuf.c1
-rw-r--r--tools/perf/util/strlist.h2
-rw-r--r--tools/perf/util/symbol-elf.c25
-rw-r--r--tools/perf/util/symbol-minimal.c1
-rw-r--r--tools/perf/util/symbol.c91
-rw-r--r--tools/perf/util/symbol.h102
-rw-r--r--tools/perf/util/symbol_conf.h73
-rw-r--r--tools/perf/util/symbol_fprintf.c3
-rw-r--r--tools/perf/util/thread-stack.c235
-rw-r--r--tools/perf/util/thread-stack.h3
-rw-r--r--tools/perf/util/thread.c1
-rw-r--r--tools/perf/util/thread.h8
-rw-r--r--tools/perf/util/tool.h5
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c1
-rw-r--r--tools/perf/util/unwind-libunwind.c1
-rw-r--r--tools/perf/util/util.c82
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--tools/perf/util/vdso.c7
-rw-r--r--tools/perf/util/zlib.c1
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/testing/nvdimm/dimm_devs.c4
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/Makefile136
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h40
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h39
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c6
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c43
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c249
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c75
-rw-r--r--tools/testing/selftests/bpf/prog_tests/obj_name.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_access.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_md_access.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/queue_stack_map.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/signal_pending.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c165
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_estats.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c132
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_noinline.c82
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c (renamed from tools/testing/selftests/bpf/bpf_flow.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c (renamed from tools/testing/selftests/bpf/connect4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c (renamed from tools/testing/selftests/bpf/connect6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c (renamed from tools/testing/selftests/bpf/dev_cgroup.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/get_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c (renamed from tools/testing/selftests/bpf/netcnt_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_map_ret0.c (renamed from tools/testing/selftests/bpf/sample_map_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_ret0.c (renamed from tools/testing/selftests/bpf/sample_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c (renamed from tools/testing/selftests/bpf/sendmsg4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c (renamed from tools/testing/selftests/bpf/sendmsg6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c (renamed from tools/testing/selftests/bpf/socket_cookie_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c (renamed from tools/testing/selftests/bpf/sockmap_parse_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c (renamed from tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c (renamed from tools/testing/selftests/bpf/sockmap_verdict_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_adjust_tail.c (renamed from tools/testing/selftests/bpf/test_adjust_tail.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c (renamed from tools/testing/selftests/bpf/test_btf_haskv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c (renamed from tools/testing/selftests/bpf/test_btf_nokv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c (renamed from tools/testing/selftests/bpf/test_get_stack_rawtp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c (renamed from tools/testing/selftests/bpf/test_l4lb.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c (renamed from tools/testing/selftests/bpf/test_l4lb_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c (renamed from tools/testing/selftests/bpf/test_lirc_mode2_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c85
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c (renamed from tools/testing/selftests/bpf/test_lwt_seg6local.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c (renamed from tools/testing/selftests/bpf/test_map_in_map.c)4
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c66
-rw-r--r--tools/testing/selftests/bpf/progs/test_obj_id.c (renamed from tools/testing/selftests/bpf/test_obj_id.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c (renamed from tools/testing/selftests/bpf/test_pkt_access.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c (renamed from tools/testing/selftests/bpf/test_pkt_md_access.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_map.c (renamed from tools/testing/selftests/bpf/test_queue_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c (renamed from tools/testing/selftests/bpf/test_select_reuseport_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c (renamed from tools/testing/selftests/bpf/test_sk_lookup_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c152
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockhash_kern.c (renamed from tools/testing/selftests/bpf/test_sockhash_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.c (renamed from tools/testing/selftests/bpf/test_sockmap_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c108
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_map.c (renamed from tools/testing/selftests/bpf/test_stack_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c (renamed from tools/testing/selftests/bpf/test_stacktrace_build_id.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c (renamed from tools/testing/selftests/bpf/test_stacktrace_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c (renamed from tools/testing/selftests/bpf/test_tcp_estats.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c (renamed from tools/testing/selftests/bpf/test_tcpbpf_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c (renamed from tools/testing/selftests/bpf/test_tcpnotify_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c (renamed from tools/testing/selftests/bpf/test_tracepoint.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c (renamed from tools/testing/selftests/bpf/test_tunnel_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c (renamed from tools/testing/selftests/bpf/test_xdp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c (renamed from tools/testing/selftests/bpf/test_xdp_meta.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c (renamed from tools/testing/selftests/bpf/test_xdp_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c (renamed from tools/testing/selftests/bpf/test_xdp_redirect.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c (renamed from tools/testing/selftests/bpf/test_xdp_vlan.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c (renamed from tools/testing/selftests/bpf/xdp_dummy.c)0
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py3
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py5
-rw-r--r--tools/testing/selftests/bpf/test_btf.c1315
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c2
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c2
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c2
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c32
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c10
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh376
-rw-r--r--tools/testing/selftests/bpf/test_maps.c41
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py135
-rw-r--r--tools/testing/selftests/bpf/test_progs.c1794
-rw-r--r--tools/testing/selftests/bpf/test_progs.h93
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_sock.c11
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c55
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c328
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c6
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c1
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c8
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c15455
-rw-r--r--tools/testing/selftests/bpf/verifier/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c219
-rw-r--r--tools/testing/selftests/bpf/verifier/basic.c23
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_call.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_instr.c134
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stack.c64
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stx_ldx.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c508
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_deduction.c124
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c406
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c44
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c1942
-rw-r--r--tools/testing/selftests/bpf/verifier/cfg.c70
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_skb.c197
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_storage.c220
-rw-r--r--tools/testing/selftests/bpf/verifier/const_or.c60
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c93
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_msg.c181
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c1034
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c159
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c633
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c40
-rw-r--r--tools/testing/selftests/bpf/verifier/div0.c184
-rw-r--r--tools/testing/selftests/bpf/verifier/div_overflow.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c614
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_packet_access.c460
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_value_access.c953
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c88
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c746
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c167
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c180
-rw-r--r--tools/testing/selftests/bpf/verifier/junk_insn.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_abs.c286
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_dw.c36
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c141
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_ind.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/lwt.c189
-rw-r--r--tools/testing/selftests/bpf/verifier/map_in_map.c62
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr_mixing.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ret_val.c65
-rw-r--r--tools/testing/selftests/bpf/verifier/masking.c322
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c235
-rw-r--r--tools/testing/selftests/bpf/verifier/perf_event_sample_period.c59
-rw-r--r--tools/testing/selftests/bpf/verifier/prevent_map_lookup.c74
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c305
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c607
-rw-r--r--tools/testing/selftests/bpf/verifier/runtime_jit.c80
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c156
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c384
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c76
-rw-r--r--tools/testing/selftests/bpf/verifier/spin_lock.c333
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c317
-rw-r--r--tools/testing/selftests/bpf/verifier/uninit.c39
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c522
-rw-r--r--tools/testing/selftests/bpf/verifier/value.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/value_adj_spill.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/value_or_null.c152
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c838
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c66
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c97
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c900
-rwxr-xr-xtools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh13
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh200
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh20
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh459
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh126
-rw-r--r--tools/testing/selftests/filesystems/binderfs/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c275
-rw-r--r--tools/testing/selftests/filesystems/binderfs/config3
-rw-r--r--tools/testing/selftests/firmware/config1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh9
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh2
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c9
-rw-r--r--tools/testing/selftests/ir/Makefile2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c6
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c74
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/config3
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh47
-rw-r--r--tools/testing/selftests/net/forwarding/config2
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample3
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh63
-rw-r--r--tools/testing/selftests/net/forwarding/ipip_lib.sh349
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh9
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_flower.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh88
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh5
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric.sh567
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_symmetric.sh551
-rw-r--r--tools/testing/selftests/net/ip_defrag.c121
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh25
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh96
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh140
-rw-r--r--tools/testing/selftests/net/tls.c164
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh8
-rw-r--r--tools/testing/selftests/net/udpgso.c1
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c42
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh153
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rw-r--r--tools/testing/selftests/netfilter/config2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh762
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile3
-rw-r--r--tools/testing/selftests/networking/timestamping/rxtimestamp.c1
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c2
-rw-r--r--tools/testing/selftests/proc/.gitignore2
-rw-r--r--tools/testing/selftests/proc/Makefile2
-rw-r--r--tools/testing/selftests/proc/proc-loadavg-001.c2
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c406
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c2
-rw-r--r--tools/testing/selftests/proc/proc-self-syscall.c3
-rw-r--r--tools/testing/selftests/proc/proc-self-wchan.c2
-rw-r--r--tools/testing/selftests/proc/read.c14
-rw-r--r--tools/testing/selftests/proc/setns-dcache.c129
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh27
-rw-r--r--tools/testing/selftests/rtc/rtctest.c109
-rw-r--r--tools/testing/selftests/safesetid/.gitignore1
-rw-r--r--tools/testing/selftests/safesetid/Makefile8
-rw-r--r--tools/testing/selftests/safesetid/config2
-rw-r--r--tools/testing/selftests/safesetid/safesetid-test.c334
-rwxr-xr-xtools/testing/selftests/safesetid/safesetid-test.sh26
-rw-r--r--tools/testing/selftests/seccomp/Makefile2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c82
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/TdcPlugin.py4
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt5
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py16
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json88
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json31
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json177
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py15
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py58
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py1
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_multibatch.py65
-rw-r--r--tools/testing/selftests/timers/Makefile2
-rw-r--r--tools/testing/selftests/tmpfs/.gitignore1
-rw-r--r--tools/testing/selftests/tmpfs/Makefile7
-rw-r--r--tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c67
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c1
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests16
-rw-r--r--tools/testing/selftests/vm/test_vmalloc.sh176
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c2
-rw-r--r--tools/testing/selftests/x86/protection_keys.c41
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c1
-rw-r--r--tools/thermal/tmon/Makefile2
-rw-r--r--tools/vm/page-types.c2
-rw-r--r--tools/vm/page_owner_sort.c4
-rw-r--r--tools/vm/slabinfo.c35
-rw-r--r--virt/kvm/arm/arm.c10
-rw-r--r--virt/kvm/arm/mmu.c13
-rw-r--r--virt/kvm/arm/psci.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c118
-rw-r--r--virt/kvm/kvm_main.c16
7881 files changed, 343747 insertions, 146845 deletions
diff --git a/.clang-format b/.clang-format
index e6080f5834a3..bc2ffb2a0b53 100644
--- a/.clang-format
+++ b/.clang-format
@@ -72,6 +72,10 @@ ForEachMacros:
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
+ - '__ata_qc_for_each'
+ - 'ata_qc_for_each'
+ - 'ata_qc_for_each_raw'
+ - 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- 'bio_for_each_integrity_vec'
@@ -85,6 +89,7 @@ ForEachMacros:
- 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
+ - 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
@@ -103,6 +108,8 @@ ForEachMacros:
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
+ - 'drm_atomic_for_each_plane_damage'
+ - 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_encoder'
@@ -121,11 +128,21 @@ ForEachMacros:
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
+ - 'for_each_card_components'
+ - 'for_each_card_links'
+ - 'for_each_card_links_safe'
+ - 'for_each_card_prelinks'
+ - 'for_each_card_rtds'
+ - 'for_each_card_rtds_safe'
+ - 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_cmsghdr'
- 'for_each_compatible_node'
+ - 'for_each_component_dais'
+ - 'for_each_component_dais_safe'
+ - 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
@@ -133,6 +150,10 @@ ForEachMacros:
- 'for_each_cpu_wrap'
- 'for_each_dev_addr'
- 'for_each_dma_cap_mask'
+ - 'for_each_dpcm_be'
+ - 'for_each_dpcm_be_rollback'
+ - 'for_each_dpcm_be_safe'
+ - 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
@@ -149,6 +170,7 @@ ForEachMacros:
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
+ - 'for_each_link_codecs'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
@@ -160,6 +182,7 @@ ForEachMacros:
- 'for_each_mem_range_rev'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
+ - 'for_each_msi_entry_safe'
- 'for_each_net'
- 'for_each_netdev'
- 'for_each_netdev_continue'
@@ -183,12 +206,14 @@ ForEachMacros:
- 'for_each_node_with_property'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
+ - 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_plane_in_state'
+ - 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
@@ -206,14 +231,17 @@ ForEachMacros:
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_property_of_node'
+ - 'for_each_registered_fb'
- 'for_each_reserved_mem_region'
- - 'for_each_resv_unavail_range'
+ - 'for_each_rtd_codec_dai'
+ - 'for_each_rtd_codec_dai_rollback'
- 'for_each_rtdcom'
- 'for_each_rtdcom_safe'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_sg'
- 'for_each_sg_page'
+ - 'for_each_sibling_event'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_zone'
@@ -251,6 +279,8 @@ ForEachMacros:
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
+ - 'i3c_bus_for_each_i2cdev'
+ - 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
@@ -267,11 +297,14 @@ ForEachMacros:
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
+ - 'list_for_each_codec'
+ - 'list_for_each_codec_safe'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
+ - 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
@@ -291,6 +324,7 @@ ForEachMacros:
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
+ - 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
@@ -357,12 +391,14 @@ ForEachMacros:
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
+ - 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each'
+ - 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@@ -371,6 +407,11 @@ ForEachMacros:
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
+ - 'virtio_device_for_each_vq'
+ - 'xa_for_each'
+ - 'xas_for_each'
+ - 'xas_for_each_conflict'
+ - 'xas_for_each_marked'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
diff --git a/.mailmap b/.mailmap
index ea98fcc197e4..37e1847c7988 100644
--- a/.mailmap
+++ b/.mailmap
@@ -123,6 +123,7 @@ Mark Brown <broonie@sirena.org.uk>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
+Mathieu Othacehe <m.othacehe@gmail.com>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
Matthew Wilcox <willy@infradead.org> <mawilcox@linuxonhyperv.com>
diff --git a/CREDITS b/CREDITS
index e818eb6a3e71..8e0342620a06 100644
--- a/CREDITS
+++ b/CREDITS
@@ -842,10 +842,9 @@ D: ax25-utils maintainer.
N: Helge Deller
E: deller@gmx.de
-E: hdeller@redhat.de
-D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver
-S: Schimmelsrain 1
-S: D-69231 Rauenberg
+W: http://www.parisc-linux.org/
+D: PA-RISC Linux architecture maintainer
+D: LASI-, ASP-, WAX-, LCD/LED-driver
S: Germany
N: Jean Delvare
@@ -1222,7 +1221,7 @@ S: Brazil
N: Oded Gabbay
E: oded.gabbay@gmail.com
-D: AMD KFD maintainer
+D: HabanaLabs and AMD KFD maintainer
S: 12 Shraga Raphaeli
S: Petah-Tikva, 4906418
S: Israel
@@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape
S: South Africa
N: Grant Grundler
-E: grundler@parisc-linux.org
+E: grantgrundler@gmail.com
W: http://obmouse.sourceforge.net/
W: http://www.parisc-linux.org/
D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver
@@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206
S: USA
N: Kyle McMartin
-E: kyle@parisc-linux.org
+E: kyle@mcmartin.ca
D: Linux/PARISC hacker
D: AD1889 sound driver
S: Ottawa, Canada
@@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct
S: Cupertino, CA 95014
S: USA
-N: Thibaut Varene
-E: T-Bone@parisc-linux.org
-W: http://www.parisc-linux.org/~varenet/
-P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063
+N: Thibaut Varène
+E: hacks+kernel@slashdirt.org
+W: http://hacks.slashdirt.org/
D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there
D: AD1889 sound driver
-S: Paris, France
+S: France
N: Heikki Vatiainen
E: hessu@cs.tut.fi
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 3fed8fdb873d..826689dcc2e6 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -146,3 +146,36 @@ KernelVersion: 4.16
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Binary file created by uio_hv_generic for ring buffer
Users: Userspace drivers
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/intr_in_full
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Number of guest to host interrupts caused by the inbound ring
+ buffer transitioning from full to not full while a packet is
+ waiting for buffer space to become available
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/intr_out_empty
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Number of guest to host interrupts caused by the outbound ring
+ buffer transitioning from empty to not empty
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_full_first
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Number of write operations that were the first to encounter an
+ outbound ring buffer full condition
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_full_total
+Date: February 2019
+KernelVersion: 5.0
+Contact: Michael Kelley <mikelley@microsoft.com>
+Description: Total number of write operations that encountered an outbound
+ ring buffer full condition
+Users: Debugging tools
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index 9b642669cb16..169fe08a649b 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
cpld3_version
Date: November 2018
-KernelVersion: 4.21
+KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files show with which CPLD versions have been burned
on LED board.
@@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
jtag_enable
Date: November 2018
-KernelVersion: 4.21
+KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files enable and disable the access to the JTAG domain.
By default access to the JTAG domain is disabled.
@@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
reset_voltmon_upgrade_fail
Date: November 2018
-KernelVersion: 4.21
+KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files show the system reset cause, as following: ComEx
power fail, reset from ComEx, system platform reset, reset
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
new file mode 100644
index 000000000000..2f5b80be07a3
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -0,0 +1,126 @@
+What: /sys/kernel/debug/habanalabs/hl<n>/addr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the device address to be used for read or write through
+ PCI bar. The acceptable value is a string that starts with "0x"
+
+What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about the currently allocated
+ command buffers
+
+What: /sys/kernel/debug/habanalabs/hl<n>/command_submission
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about the currently active
+ command submissions
+
+What: /sys/kernel/debug/habanalabs/hl<n>/command_submission_jobs
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with detailed information about each JOB (CB) of
+ each active command submission
+
+What: /sys/kernel/debug/habanalabs/hl<n>/data32
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the root user to read or write directly through the
+ device's PCI bar. Writing to this file generates a write
+ transaction while reading from the file generates a read
+ transcation. This custom interface is needed (instead of using
+ the generic Linux user-space PCI mapping) because the DDR bar
+ is very small compared to the DDR memory and only the driver can
+ move the bar before and after the transaction
+
+What: /sys/kernel/debug/habanalabs/hl<n>/device
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Enables the root user to set the device to specific state.
+ Valid values are "disable", "enable", "suspend", "resume".
+ User can read this property to see the valid values
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_addr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets I2C device address for I2C transaction that is generated
+ by the device's CPU
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_bus
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets I2C bus address for I2C transaction that is generated by
+ the device's CPU
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_data
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Triggers an I2C transaction that is generated by the device's
+ CPU. Writing to this file generates a write transaction while
+ reading from the file generates a read transcation
+
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_reg
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets I2C register id for I2C transaction that is generated by
+ the device's CPU
+
+What: /sys/kernel/debug/habanalabs/hl<n>/led0
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the state of the first S/W led on the device
+
+What: /sys/kernel/debug/habanalabs/hl<n>/led1
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the state of the second S/W led on the device
+
+What: /sys/kernel/debug/habanalabs/hl<n>/led2
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the state of the third S/W led on the device
+
+What: /sys/kernel/debug/habanalabs/hl<n>/mmu
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the hop values and physical address for a given ASID
+ and virtual address. The user should write the ASID and VA into
+ the file and then read the file to get the result.
+ e.g. to display info about VA 0x1000 for ASID 1 you need to do:
+ echo "1 0x1000" > /sys/kernel/debug/habanalabs/hl0/mmu
+
+What: /sys/kernel/debug/habanalabs/hl<n>/set_power_state
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Sets the PCI power state. Valid values are "1" for D0 and "2"
+ for D3Hot
+
+What: /sys/kernel/debug/habanalabs/hl<n>/userptr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about the currently user
+ pointers (user virtual addresses) that are pinned and mapped
+ to DMA addresses
+
+What: /sys/kernel/debug/habanalabs/hl<n>/vm
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays a list with information about all the active virtual
+ address mappings per ASID
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 7710d4022b19..dfad7427817c 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -279,3 +279,12 @@ Description:
size in 512B sectors of the zones of the device, with
the eventual exception of the last zone of the device
which may be smaller.
+
+What: /sys/block/<disk>/queue/io_timeout
+Date: November 2018
+Contact: Weiping Zhang <zhangweiping@didiglobal.com>
+Description:
+ io_timeout is the request timeout in milliseconds. If a request
+ does not complete in this time then the block driver timeout
+ handler is invoked. That timeout handler can decide to retry
+ the request, to fail it or to start a device recovery strategy.
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 9d2339a485c8..14b2bf2e5105 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -122,11 +122,18 @@ Description:
statistics (bd_count, bd_reads, bd_writes) in a format
similar to block layer statistics file format.
+What: /sys/block/zram<id>/writeback_limit_enable
+Date: November 2018
+Contact: Minchan Kim <minchan@kernel.org>
+Description:
+ The writeback_limit_enable file is read-write and specifies
+ eanbe of writeback_limit feature. "1" means eable the feature.
+ No limit "0" is the initial state.
+
What: /sys/block/zram<id>/writeback_limit
Date: November 2018
Contact: Minchan Kim <minchan@kernel.org>
Description:
The writeback_limit file is read-write and specifies the maximum
amount of writeback ZRAM can do. The limit could be changed
- in run time and "0" means disable the limit.
- No limit is the initial state.
+ in run time.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 8127a08e366d..864f8efd12e5 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1554,6 +1554,10 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentration_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_co2_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_co2_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentration_ethanol_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_ethanol_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentration_h2_raw
+What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_h2_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentration_voc_raw
What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
KernelVersion: 4.3
@@ -1684,4 +1688,19 @@ KernelVersion: 4.18
Contact: linux-iio@vger.kernel.org
Description:
Raw (unscaled) phase difference reading from channel Y
- that can be processed to radians. \ No newline at end of file
+ that can be processed to radians.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm1_input
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm1_input
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm2p5_input
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm2p5_input
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm4_input
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm4_input
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm10_input
+What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm10_input
+KernelVersion: 4.22
+Contact: linux-iio@vger.kernel.org
+Description:
+ Mass concentration reading of particulate matter in ug / m3.
+ pmX consists of particles with aerodynamic diameter less or
+ equal to X micrometers.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-sps30 b/Documentation/ABI/testing/sysfs-bus-iio-sps30
new file mode 100644
index 000000000000..143df8e89d08
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-sps30
@@ -0,0 +1,28 @@
+What: /sys/bus/iio/devices/iio:deviceX/start_cleaning
+Date: December 2018
+KernelVersion: 4.22
+Contact: linux-iio@vger.kernel.org
+Description:
+ Writing 1 starts sensor self cleaning. Internal fan accelerates
+ to its maximum speed and keeps spinning for about 10 seconds in
+ order to blow out accumulated dust.
+
+What: /sys/bus/iio/devices/iio:deviceX/cleaning_period
+Date: January 2019
+KernelVersion: 5.1
+Contact: linux-iio@vger.kernel.org
+Description:
+ Sensor is capable of triggering self cleaning periodically.
+ Period can be changed by writing a new value here. Upon reading
+ the current one is returned. Units are seconds.
+
+ Writing 0 disables periodical self cleaning entirely.
+
+What: /sys/bus/iio/devices/iio:deviceX/cleaning_period_available
+Date: January 2019
+KernelVersion: 5.1
+Contact: linux-iio@vger.kernel.org
+Description:
+ The range of available values in seconds represented as the
+ minimum value, the step and the maximum value, all enclosed in
+ square brackets.
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
index 4d48a9451866..d1f667104944 100644
--- a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
@@ -3,11 +3,13 @@ Date: June 2015
KernelVersion: 4.3
Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Description: (RW) Writes of 1 or 0 enable or disable trace output to this
- output device. Reads return current status.
+ output device. Reads return current status. Requires that the
+ correstponding output port driver be loaded.
What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/port
Date: June 2015
KernelVersion: 4.3
Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Description: (RO) Port number, corresponding to this output device on the
- switch (GTH).
+ switch (GTH) or "unassigned" if the corresponding output
+ port driver is not loaded.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 559baa5c418c..614d216dff1d 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -186,7 +186,7 @@ Contact: Lan Tianyu <tianyu.lan@intel.com>
Description:
Some platforms provide usb port connect types through ACPI.
This attribute is to expose these information to user space.
- The file will read "hotplug", "wired" and "not used" if the
+ The file will read "hotplug", "hardwired" and "not used" if the
information is available, and "unknown" otherwise.
What: /sys/bus/usb/devices/.../(hub interface)/portX/location
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
index 1e5d172e0646..bd92ef9d6faa 100644
--- a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
@@ -7,55 +7,10 @@ Description:
timer. It can do gradual dimming and step change of brightness.
The pattern is given by a series of tuples, of brightness and
- duration (ms). The LED is expected to traverse the series and
- each brightness value for the specified duration. Duration of
- 0 means brightness should immediately change to new value, and
- writing malformed pattern deactivates any active one.
+ duration (ms).
- 1. For gradual dimming, the dimming interval now is set as 50
- milliseconds. So the tuple with duration less than dimming
- interval (50ms) is treated as a step change of brightness,
- i.e. the subsequent brightness will be applied without adding
- intervening dimming intervals.
-
- The gradual dimming format of the software pattern values should be:
- "brightness_1 duration_1 brightness_2 duration_2 brightness_3
- duration_3 ...". For example:
-
- echo 0 1000 255 2000 > pattern
-
- It will make the LED go gradually from zero-intensity to max (255)
- intensity in 1000 milliseconds, then back to zero intensity in 2000
- milliseconds:
-
- LED brightness
- ^
- 255-| / \ / \ /
- | / \ / \ /
- | / \ / \ /
- | / \ / \ /
- 0-| / \/ \/
- +---0----1----2----3----4----5----6------------> time (s)
-
- 2. To make the LED go instantly from one brightness value to another,
- we should use zero-time lengths (the brightness must be same as
- the previous tuple's). So the format should be:
- "brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
- brightness_2 0 ...". For example:
-
- echo 0 1000 0 0 255 2000 255 0 > pattern
-
- It will make the LED stay off for one second, then stay at max brightness
- for two seconds:
-
- LED brightness
- ^
- 255-| +---------+ +---------+
- | | | | |
- | | | | |
- | | | | |
- 0-| -----+ +----+ +----
- +---0----1----2----3----4----5----6------------> time (s)
+ The exact format is described in:
+ Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt
What: /sys/class/leds/<led>/hw_pattern
Date: September 2018
diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs
new file mode 100644
index 000000000000..78b2bcf316a3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-habanalabs
@@ -0,0 +1,190 @@
+What: /sys/class/habanalabs/hl<n>/armcp_kernel_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Linux kernel running on the device's CPU
+
+What: /sys/class/habanalabs/hl<n>/armcp_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the application running on the device's CPU
+
+What: /sys/class/habanalabs/hl<n>/cpld_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Device's CPLD F/W
+
+What: /sys/class/habanalabs/hl<n>/device_type
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the code name of the device according to its type.
+ The supported values are: "GOYA"
+
+What: /sys/class/habanalabs/hl<n>/eeprom
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: A binary file attribute that contains the contents of the
+ on-board EEPROM
+
+What: /sys/class/habanalabs/hl<n>/fuse_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the device's version from the eFuse
+
+What: /sys/class/habanalabs/hl<n>/hard_reset
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Interface to trigger a hard-reset operation for the device.
+ Hard-reset will reset ALL internal components of the device
+ except for the PCI interface and the internal PLLs
+
+What: /sys/class/habanalabs/hl<n>/hard_reset_cnt
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays how many times the device have undergone a hard-reset
+ operation since the driver was loaded
+
+What: /sys/class/habanalabs/hl<n>/high_pll
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency for MME, TPC
+ and IC when the power management profile is set to "automatic".
+
+What: /sys/class/habanalabs/hl<n>/ic_clk
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency of the
+ Interconnect fabric. Writes to this parameter affect the device
+ only when the power management profile is set to "manual" mode.
+ The device IC clock might be set to lower value then the
+ maximum. The user should read the ic_clk_curr to see the actual
+ frequency value of the IC
+
+What: /sys/class/habanalabs/hl<n>/ic_clk_curr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the current clock frequency of the Interconnect fabric
+
+What: /sys/class/habanalabs/hl<n>/infineon_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Device's power supply F/W code
+
+What: /sys/class/habanalabs/hl<n>/max_power
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum power consumption of the
+ device in milliwatts.
+
+What: /sys/class/habanalabs/hl<n>/mme_clk
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency of the
+ MME compute engine. Writes to this parameter affect the device
+ only when the power management profile is set to "manual" mode.
+ The device MME clock might be set to lower value then the
+ maximum. The user should read the mme_clk_curr to see the actual
+ frequency value of the MME
+
+What: /sys/class/habanalabs/hl<n>/mme_clk_curr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the current clock frequency of the MME compute engine
+
+What: /sys/class/habanalabs/hl<n>/pci_addr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the PCI address of the device. This is needed so the
+ user would be able to open a device based on its PCI address
+
+What: /sys/class/habanalabs/hl<n>/pm_mng_profile
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Power management profile. Values are "auto", "manual". In "auto"
+ mode, the driver will set the maximum clock frequency to a high
+ value when a user-space process opens the device's file (unless
+ it was already opened by another process). The driver will set
+ the max clock frequency to a low value when there are no user
+ processes that are opened on the device's file. In "manual"
+ mode, the user sets the maximum clock frequency by writing to
+ ic_clk, mme_clk and tpc_clk
+
+
+What: /sys/class/habanalabs/hl<n>/preboot_btl_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the device's preboot F/W code
+
+What: /sys/class/habanalabs/hl<n>/soft_reset
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Interface to trigger a soft-reset operation for the device.
+ Soft-reset will reset only the compute and DMA engines of the
+ device
+
+What: /sys/class/habanalabs/hl<n>/soft_reset_cnt
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays how many times the device have undergone a soft-reset
+ operation since the driver was loaded
+
+What: /sys/class/habanalabs/hl<n>/status
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Status of the card: "Operational", "Malfunction", "In reset".
+
+What: /sys/class/habanalabs/hl<n>/thermal_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the Device's thermal daemon
+
+What: /sys/class/habanalabs/hl<n>/tpc_clk
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency of the
+ TPC compute engines. Writes to this parameter affect the device
+ only when the power management profile is set to "manual" mode.
+ The device TPC clock might be set to lower value then the
+ maximum. The user should read the tpc_clk_curr to see the actual
+ frequency value of the TPC
+
+What: /sys/class/habanalabs/hl<n>/tpc_clk_curr
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the current clock frequency of the TPC compute engines
+
+What: /sys/class/habanalabs/hl<n>/uboot_ver
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Version of the u-boot running on the device's CPU
+
+What: /sys/class/habanalabs/hl<n>/write_open_cnt
+Date: Jan 2019
+KernelVersion: 5.1
+Contact: oded.gabbay@gmail.com
+Description: Displays the total number of user processes that are currently
+ opened on the device's file
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg
index e4233ac93c2b..6189ffcc6aff 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg
@@ -328,13 +328,13 @@
inkscape:window-height="1148"
id="namedview90"
showgrid="true"
- inkscape:zoom="0.80021373"
- inkscape:cx="462.49289"
- inkscape:cy="473.6718"
+ inkscape:zoom="0.69092787"
+ inkscape:cx="476.34085"
+ inkscape:cy="712.80957"
inkscape:window-x="770"
inkscape:window-y="24"
inkscape:window-maximized="0"
- inkscape:current-layer="g4114-9-3-9"
+ inkscape:current-layer="g4"
inkscape:snap-grids="false"
fit-margin-top="5"
fit-margin-right="5"
@@ -813,14 +813,18 @@
<text
sodipodi:linespacing="125%"
id="text4110-5-7-6-2-4-0"
- y="841.88086"
+ y="670.74316"
x="1460.1007"
style="font-size:267.24359131px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
- y="841.88086"
+ y="670.74316"
+ x="1460.1007"
+ sodipodi:role="line"
+ id="tspan4925-1-2-4-5">Request</tspan><tspan
+ y="1004.7976"
x="1460.1007"
sodipodi:role="line"
- id="tspan4925-1-2-4-5">reched_cpu()</tspan></text>
+ id="tspan3100">context switch</tspan></text>
</g>
</g>
</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index 8e4f873b979f..19e7a5fb6b73 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -72,10 +72,10 @@ will ignore it because idle and offline CPUs are already residing
in quiescent states.
Otherwise, the expedited grace period will use
<tt>smp_call_function_single()</tt> to send the CPU an IPI, which
-is handled by <tt>sync_rcu_exp_handler()</tt>.
+is handled by <tt>rcu_exp_handler()</tt>.
<p>
-However, because this is preemptible RCU, <tt>sync_rcu_exp_handler()</tt>
+However, because this is preemptible RCU, <tt>rcu_exp_handler()</tt>
can check to see if the CPU is currently running in an RCU read-side
critical section.
If not, the handler can immediately report a quiescent state.
@@ -145,19 +145,18 @@ expedited grace period is shown in the following diagram:
<p><img src="ExpSchedFlow.svg" alt="ExpSchedFlow.svg" width="55%">
<p>
-As with RCU-preempt's <tt>synchronize_rcu_expedited()</tt>,
+As with RCU-preempt, RCU-sched's
<tt>synchronize_sched_expedited()</tt> ignores offline and
idle CPUs, again because they are in remotely detectable
quiescent states.
-However, the <tt>synchronize_rcu_expedited()</tt> handler
-is <tt>sync_sched_exp_handler()</tt>, and because the
+However, because the
<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
leave no trace of their invocation, in general it is not possible to tell
whether or not the current CPU is in an RCU read-side critical section.
-The best that <tt>sync_sched_exp_handler()</tt> can do is to check
+The best that RCU-sched's <tt>rcu_exp_handler()</tt> can do is to check
for idle, on the off-chance that the CPU went idle while the IPI
was in flight.
-If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports
+If the CPU is idle, then <tt>rcu_exp_handler()</tt> reports
the quiescent state.
<p> Otherwise, the handler forces a future context switch by setting the
@@ -298,19 +297,18 @@ Instead, the task pushing the grace period forward will include the
idle CPUs in the mask passed to <tt>rcu_report_exp_cpu_mult()</tt>.
<p>
-For RCU-sched, there is an additional check for idle in the IPI
-handler, <tt>sync_sched_exp_handler()</tt>.
+For RCU-sched, there is an additional check:
If the IPI has interrupted the idle loop, then
-<tt>sync_sched_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt>
+<tt>rcu_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt>
to report the corresponding quiescent state.
<p>
For RCU-preempt, there is no specific check for idle in the
-IPI handler (<tt>sync_rcu_exp_handler()</tt>), but because
+IPI handler (<tt>rcu_exp_handler()</tt>), but because
RCU read-side critical sections are not permitted within the
-idle loop, if <tt>sync_rcu_exp_handler()</tt> sees that the CPU is within
+idle loop, if <tt>rcu_exp_handler()</tt> sees that the CPU is within
RCU read-side critical section, the CPU cannot possibly be idle.
-Otherwise, <tt>sync_rcu_exp_handler()</tt> invokes
+Otherwise, <tt>rcu_exp_handler()</tt> invokes
<tt>rcu_report_exp_rdp()</tt> to report the corresponding quiescent
state, regardless of whether or not that quiescent state was due to
the CPU being idle.
@@ -625,6 +623,8 @@ checks, but only during the mid-boot dead zone.
<p>
With this refinement, synchronous grace periods can now be used from
task context pretty much any time during the life of the kernel.
+That is, aside from some points in the suspend, hibernate, or shutdown
+code path.
<h3><a name="Summary">
Summary</a></h3>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index e4d94fba6c89..8d21af02b1f0 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -485,13 +485,13 @@ section that the grace period must wait on.
noted by <tt>rcu_node_context_switch()</tt> on the left.
On the other hand, if the CPU takes a scheduler-clock interrupt
while executing in usermode, a quiescent state will be noted by
-<tt>rcu_check_callbacks()</tt> on the right.
+<tt>rcu_sched_clock_irq()</tt> on the right.
Either way, the passage through a quiescent state will be noted
in a per-CPU variable.
<p>The next time an <tt>RCU_SOFTIRQ</tt> handler executes on
this CPU (for example, after the next scheduler-clock
-interrupt), <tt>__rcu_process_callbacks()</tt> will invoke
+interrupt), <tt>rcu_core()</tt> will invoke
<tt>rcu_check_quiescent_state()</tt>, which will notice the
recorded quiescent state, and invoke
<tt>rcu_report_qs_rdp()</tt>.
@@ -651,7 +651,7 @@ to end.
These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
which is usually invoked by <tt>__note_gp_changes()</tt>.
As shown in the diagram below, this invocation can be triggered by
-the scheduling-clock interrupt (<tt>rcu_check_callbacks()</tt> on
+the scheduling-clock interrupt (<tt>rcu_sched_clock_irq()</tt> on
the left) or by idle entry (<tt>rcu_cleanup_after_idle()</tt> on
the right, but only for kernels build with
<tt>CONFIG_RCU_FAST_NO_HZ=y</tt>).
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
index 832408313d93..3fcf0c17cef2 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
@@ -349,7 +349,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq()</text>
<rect
x="7069.6187"
y="5087.4678"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index acd73c7ad0f4..2bcd742d6e49 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -3902,7 +3902,7 @@
font-style="normal"
y="-4418.6582"
x="3745.7725"
- xml:space="preserve">rcu_check_callbacks()</text>
+ xml:space="preserve">rcu_sched_clock_irq()</text>
</g>
<g
transform="translate(-850.30204,55463.106)"
@@ -3924,7 +3924,7 @@
font-style="normal"
y="-4418.6582"
x="3745.7725"
- xml:space="preserve">rcu_process_callbacks()</text>
+ xml:space="preserve">rcu_core()</text>
<text
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
id="text202-7-5-3-27-0"
@@ -3933,7 +3933,7 @@
font-style="normal"
y="-4165.7954"
x="3745.7725"
- xml:space="preserve">rcu_check_quiescent_state())</text>
+ xml:space="preserve">rcu_check_quiescent_state()</text>
<text
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
id="text202-7-5-3-27-0-9"
@@ -4968,7 +4968,7 @@
font-weight="bold"
font-size="192"
id="text202-7-5-19"
- style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks()</text>
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq()</text>
<rect
x="5314.2671"
y="82817.688"
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
index 149bec2a4493..779c9ac31a52 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -775,7 +775,7 @@
font-style="normal"
y="-4418.6582"
x="3745.7725"
- xml:space="preserve">rcu_check_callbacks()</text>
+ xml:space="preserve">rcu_sched_clock_irq()</text>
</g>
<g
transform="translate(399.7744,828.86448)"
@@ -797,7 +797,7 @@
font-style="normal"
y="-4418.6582"
x="3745.7725"
- xml:space="preserve">rcu_process_callbacks()</text>
+ xml:space="preserve">rcu_core()</text>
<text
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
id="text202-7-5-3-27-0"
@@ -806,7 +806,7 @@
font-style="normal"
y="-4165.7954"
x="3745.7725"
- xml:space="preserve">rcu_check_quiescent_state())</text>
+ xml:space="preserve">rcu_check_quiescent_state()</text>
<text
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
id="text202-7-5-3-27-0-9"
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 9fca73e03a98..5a9238a2883c 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -3099,7 +3099,7 @@ If you block forever in one of a given domain's SRCU read-side critical
sections, then that domain's grace periods will also be blocked forever.
Of course, one good way to block forever is to deadlock, which can
happen if any operation in a given domain's SRCU read-side critical
-section can block waiting, either directly or indirectly, for that domain's
+section can wait, either directly or indirectly, for that domain's
grace period to elapse.
For example, this results in a self-deadlock:
@@ -3139,12 +3139,18 @@ API, which, in combination with <tt>srcu_read_unlock()</tt>,
guarantees a full memory barrier.
<p>
-Also unlike other RCU flavors, SRCU's callbacks-wait function
-<tt>srcu_barrier()</tt> may be invoked from CPU-hotplug notifiers,
-though this is not necessarily a good idea.
-The reason that this is possible is that SRCU is insensitive
-to whether or not a CPU is online, which means that <tt>srcu_barrier()</tt>
-need not exclude CPU-hotplug operations.
+Also unlike other RCU flavors, <tt>synchronize_srcu()</tt> may <b>not</b>
+be invoked from CPU-hotplug notifiers, due to the fact that SRCU grace
+periods make use of timers and the possibility of timers being temporarily
+&ldquo;stranded&rdquo; on the outgoing CPU.
+This stranding of timers means that timers posted to the outgoing CPU
+will not fire until late in the CPU-hotplug process.
+The problem is that if a notifier is waiting on an SRCU grace period,
+that grace period is waiting on a timer, and that timer is stranded on the
+outgoing CPU, then the notifier will never be awakened, in other words,
+deadlock has occurred.
+This same situation of course also prohibits <tt>srcu_barrier()</tt>
+from being invoked from CPU-hotplug notifiers.
<p>
SRCU also differs from other RCU flavors in that SRCU's expedited and
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 073dbc12d1ea..1ab70c37921f 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -219,17 +219,18 @@ an estimate of the total number of RCU callbacks queued across all CPUs
In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
for each CPU:
- 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D
+ 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 Nonlazy posted: ..D
The "last_accelerate:" prints the low-order 16 bits (in hex) of the
jiffies counter when this CPU last invoked rcu_try_advance_all_cbs()
from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from
-rcu_prepare_for_idle(). The "nonlazy_posted:" prints the number
-of non-lazy callbacks posted since the last call to rcu_needs_cpu().
-Finally, an "L" indicates that there are currently no non-lazy callbacks
-("." is printed otherwise, as shown above) and "D" indicates that
-dyntick-idle processing is enabled ("." is printed otherwise, for example,
-if disabled via the "nohz=" kernel boot parameter).
+rcu_prepare_for_idle(). The "Nonlazy posted:" indicates lazy-callback
+status, so that an "l" indicates that all callbacks were lazy at the start
+of the last idle period and an "L" indicates that there are currently
+no non-lazy callbacks (in both cases, "." is printed otherwise, as
+shown above) and "D" indicates that dyntick-idle processing is enabled
+("." is printed otherwise, for example, if disabled via the "nohz="
+kernel boot parameter).
If the grace period ends just as the stall warning starts printing,
there will be a spurious stall-warning message, which will include
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 55918b54808b..a41a0384d20c 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -10,173 +10,8 @@ status messages via printk(), which can be examined via the dmesg
command (perhaps grepping for "torture"). The test is started
when the module is loaded, and stops when the module is unloaded.
-
-MODULE PARAMETERS
-
-This module has the following parameters:
-
-fqs_duration Duration (in microseconds) of artificially induced bursts
- of force_quiescent_state() invocations. In RCU
- implementations having force_quiescent_state(), these
- bursts help force races between forcing a given grace
- period and that grace period ending on its own.
-
-fqs_holdoff Holdoff time (in microseconds) between consecutive calls
- to force_quiescent_state() within a burst.
-
-fqs_stutter Wait time (in seconds) between consecutive bursts
- of calls to force_quiescent_state().
-
-gp_normal Make the fake writers use normal synchronous grace-period
- primitives.
-
-gp_exp Make the fake writers use expedited synchronous grace-period
- primitives. If both gp_normal and gp_exp are set, or
- if neither gp_normal nor gp_exp are set, then randomly
- choose the primitive so that about 50% are normal and
- 50% expedited. By default, neither are set, which
- gives best overall test coverage.
-
-irqreader Says to invoke RCU readers from irq level. This is currently
- done via timers. Defaults to "1" for variants of RCU that
- permit this. (Or, more accurately, variants of RCU that do
- -not- permit this know to ignore this variable.)
-
-n_barrier_cbs If this is nonzero, RCU barrier testing will be conducted,
- in which case n_barrier_cbs specifies the number of
- RCU callbacks (and corresponding kthreads) to use for
- this testing. The value cannot be negative. If you
- specify this to be non-zero when torture_type indicates a
- synchronous RCU implementation (one for which a member of
- the synchronize_rcu() rather than the call_rcu() family is
- used -- see the documentation for torture_type below), an
- error will be reported and no testing will be carried out.
-
-nfakewriters This is the number of RCU fake writer threads to run. Fake
- writer threads repeatedly use the synchronous "wait for
- current readers" function of the interface selected by
- torture_type, with a delay between calls to allow for various
- different numbers of writers running in parallel.
- nfakewriters defaults to 4, which provides enough parallelism
- to trigger special cases caused by multiple writers, such as
- the synchronize_srcu() early return optimization.
-
-nreaders This is the number of RCU reading threads supported.
- The default is twice the number of CPUs. Why twice?
- To properly exercise RCU implementations with preemptible
- read-side critical sections.
-
-onoff_interval
- The number of seconds between each attempt to execute a
- randomly selected CPU-hotplug operation. Defaults to
- zero, which disables CPU hotplugging. In HOTPLUG_CPU=n
- kernels, rcutorture will silently refuse to do any
- CPU-hotplug operations regardless of what value is
- specified for onoff_interval.
-
-onoff_holdoff The number of seconds to wait until starting CPU-hotplug
- operations. This would normally only be used when
- rcutorture was built into the kernel and started
- automatically at boot time, in which case it is useful
- in order to avoid confusing boot-time code with CPUs
- coming and going.
-
-shuffle_interval
- The number of seconds to keep the test threads affinitied
- to a particular subset of the CPUs, defaults to 3 seconds.
- Used in conjunction with test_no_idle_hz.
-
-shutdown_secs The number of seconds to run the test before terminating
- the test and powering off the system. The default is
- zero, which disables test termination and system shutdown.
- This capability is useful for automated testing.
-
-stall_cpu The number of seconds that a CPU should be stalled while
- within both an rcu_read_lock() and a preempt_disable().
- This stall happens only once per rcutorture run.
- If you need multiple stalls, use modprobe and rmmod to
- repeatedly run rcutorture. The default for stall_cpu
- is zero, which prevents rcutorture from stalling a CPU.
-
- Note that attempts to rmmod rcutorture while the stall
- is ongoing will hang, so be careful what value you
- choose for this module parameter! In addition, too-large
- values for stall_cpu might well induce failures and
- warnings in other parts of the kernel. You have been
- warned!
-
-stall_cpu_holdoff
- The number of seconds to wait after rcutorture starts
- before stalling a CPU. Defaults to 10 seconds.
-
-stat_interval The number of seconds between output of torture
- statistics (via printk()). Regardless of the interval,
- statistics are printed when the module is unloaded.
- Setting the interval to zero causes the statistics to
- be printed -only- when the module is unloaded, and this
- is the default.
-
-stutter The length of time to run the test before pausing for this
- same period of time. Defaults to "stutter=5", so as
- to run and pause for (roughly) five-second intervals.
- Specifying "stutter=0" causes the test to run continuously
- without pausing, which is the old default behavior.
-
-test_boost Whether or not to test the ability of RCU to do priority
- boosting. Defaults to "test_boost=1", which performs
- RCU priority-inversion testing only if the selected
- RCU implementation supports priority boosting. Specifying
- "test_boost=0" never performs RCU priority-inversion
- testing. Specifying "test_boost=2" performs RCU
- priority-inversion testing even if the selected RCU
- implementation does not support RCU priority boosting,
- which can be used to test rcutorture's ability to
- carry out RCU priority-inversion testing.
-
-test_boost_interval
- The number of seconds in an RCU priority-inversion test
- cycle. Defaults to "test_boost_interval=7". It is
- usually wise for this value to be relatively prime to
- the value selected for "stutter".
-
-test_boost_duration
- The number of seconds to do RCU priority-inversion testing
- within any given "test_boost_interval". Defaults to
- "test_boost_duration=4".
-
-test_no_idle_hz Whether or not to test the ability of RCU to operate in
- a kernel that disables the scheduling-clock interrupt to
- idle CPUs. Boolean parameter, "1" to test, "0" otherwise.
- Defaults to omitting this test.
-
-torture_type The type of RCU to test, with string values as follows:
-
- "rcu": rcu_read_lock(), rcu_read_unlock() and call_rcu(),
- along with expedited, synchronous, and polling
- variants.
-
- "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and
- call_rcu_bh(), along with expedited and synchronous
- variants.
-
- "rcu_busted": This tests an intentionally incorrect version
- of RCU in order to help test rcutorture itself.
-
- "srcu": srcu_read_lock(), srcu_read_unlock() and
- call_srcu(), along with expedited and
- synchronous variants.
-
- "sched": preempt_disable(), preempt_enable(), and
- call_rcu_sched(), along with expedited,
- synchronous, and polling variants.
-
- "tasks": voluntary context switch and call_rcu_tasks(),
- along with expedited and synchronous variants.
-
- Defaults to "rcu".
-
-verbose Enable debug printk()s. Default is disabled.
-
+Module parameters are prefixed by "rcutorture." in
+Documentation/admin-guide/kernel-parameters.txt.
OUTPUT
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 4a6854318b17..1ace20815bb1 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -302,7 +302,7 @@ rcu_dereference()
must prohibit. The rcu_dereference_protected() variant takes
a lockdep expression to indicate which locks must be acquired
by the caller. If the indicated protection is not provided,
- a lockdep splat is emitted. See RCU/Design/Requirements.html
+ a lockdep splat is emitted. See RCU/Design/Requirements/Requirements.html
and the API's code comments for more details and example usage.
The following diagram shows how each API communicates among the
@@ -560,7 +560,7 @@ presents two such "toy" implementations of RCU, one that is implemented
in terms of familiar locking primitives, and another that more closely
resembles "classic" RCU. Both are way too simple for real-world use,
lacking both functionality and performance. However, they are useful
-in getting a feel for how RCU works. See kernel/rcupdate.c for a
+in getting a feel for how RCU works. See kernel/rcu/update.c for a
production-quality implementation, and see:
http://www.rdrop.com/users/paulmck/RCU
diff --git a/Documentation/acpi/initrd_table_override.txt b/Documentation/acpi/initrd_table_override.txt
index eb651a6aa285..30437a6db373 100644
--- a/Documentation/acpi/initrd_table_override.txt
+++ b/Documentation/acpi/initrd_table_override.txt
@@ -14,6 +14,10 @@ upgrade the ACPI execution environment that is defined by the ACPI tables
via upgrading the ACPI tables provided by the BIOS with an instrumented,
modified, more recent version one, or installing brand new ACPI tables.
+When building initrd with kernel in a single image, option
+ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD should also be true for this
+feature to work.
+
For a full list of ACPI tables that can be upgraded/installed, take a look
at the char *table_sigs[MAX_ACPI_SIGNATURE]; definition in
drivers/acpi/tables.c.
diff --git a/Documentation/admin-guide/LSM/SafeSetID.rst b/Documentation/admin-guide/LSM/SafeSetID.rst
new file mode 100644
index 000000000000..212434ef65ad
--- /dev/null
+++ b/Documentation/admin-guide/LSM/SafeSetID.rst
@@ -0,0 +1,107 @@
+=========
+SafeSetID
+=========
+SafeSetID is an LSM module that gates the setid family of syscalls to restrict
+UID/GID transitions from a given UID/GID to only those approved by a
+system-wide whitelist. These restrictions also prohibit the given UIDs/GIDs
+from obtaining auxiliary privileges associated with CAP_SET{U/G}ID, such as
+allowing a user to set up user namespace UID mappings.
+
+
+Background
+==========
+In absence of file capabilities, processes spawned on a Linux system that need
+to switch to a different user must be spawned with CAP_SETUID privileges.
+CAP_SETUID is granted to programs running as root or those running as a non-root
+user that have been explicitly given the CAP_SETUID runtime capability. It is
+often preferable to use Linux runtime capabilities rather than file
+capabilities, since using file capabilities to run a program with elevated
+privileges opens up possible security holes since any user with access to the
+file can exec() that program to gain the elevated privileges.
+
+While it is possible to implement a tree of processes by giving full
+CAP_SET{U/G}ID capabilities, this is often at odds with the goals of running a
+tree of processes under non-root user(s) in the first place. Specifically,
+since CAP_SETUID allows changing to any user on the system, including the root
+user, it is an overpowered capability for what is needed in this scenario,
+especially since programs often only call setuid() to drop privileges to a
+lesser-privileged user -- not elevate privileges. Unfortunately, there is no
+generally feasible way in Linux to restrict the potential UIDs that a user can
+switch to through setuid() beyond allowing a switch to any user on the system.
+This SafeSetID LSM seeks to provide a solution for restricting setid
+capabilities in such a way.
+
+The main use case for this LSM is to allow a non-root program to transition to
+other untrusted uids without full blown CAP_SETUID capabilities. The non-root
+program would still need CAP_SETUID to do any kind of transition, but the
+additional restrictions imposed by this LSM would mean it is a "safer" version
+of CAP_SETUID since the non-root program cannot take advantage of CAP_SETUID to
+do any unapproved actions (e.g. setuid to uid 0 or create/enter new user
+namespace). The higher level goal is to allow for uid-based sandboxing of system
+services without having to give out CAP_SETUID all over the place just so that
+non-root programs can drop to even-lesser-privileged uids. This is especially
+relevant when one non-root daemon on the system should be allowed to spawn other
+processes as different uids, but its undesirable to give the daemon a
+basically-root-equivalent CAP_SETUID.
+
+
+Other Approaches Considered
+===========================
+
+Solve this problem in userspace
+-------------------------------
+For candidate applications that would like to have restricted setid capabilities
+as implemented in this LSM, an alternative option would be to simply take away
+setid capabilities from the application completely and refactor the process
+spawning semantics in the application (e.g. by using a privileged helper program
+to do process spawning and UID/GID transitions). Unfortunately, there are a
+number of semantics around process spawning that would be affected by this, such
+as fork() calls where the program doesn???t immediately call exec() after the
+fork(), parent processes specifying custom environment variables or command line
+args for spawned child processes, or inheritance of file handles across a
+fork()/exec(). Because of this, as solution that uses a privileged helper in
+userspace would likely be less appealing to incorporate into existing projects
+that rely on certain process-spawning semantics in Linux.
+
+Use user namespaces
+-------------------
+Another possible approach would be to run a given process tree in its own user
+namespace and give programs in the tree setid capabilities. In this way,
+programs in the tree could change to any desired UID/GID in the context of their
+own user namespace, and only approved UIDs/GIDs could be mapped back to the
+initial system user namespace, affectively preventing privilege escalation.
+Unfortunately, it is not generally feasible to use user namespaces in isolation,
+without pairing them with other namespace types, which is not always an option.
+Linux checks for capabilities based off of the user namespace that ???owns??? some
+entity. For example, Linux has the notion that network namespaces are owned by
+the user namespace in which they were created. A consequence of this is that
+capability checks for access to a given network namespace are done by checking
+whether a task has the given capability in the context of the user namespace
+that owns the network namespace -- not necessarily the user namespace under
+which the given task runs. Therefore spawning a process in a new user namespace
+effectively prevents it from accessing the network namespace owned by the
+initial namespace. This is a deal-breaker for any application that expects to
+retain the CAP_NET_ADMIN capability for the purpose of adjusting network
+configurations. Using user namespaces in isolation causes problems regarding
+other system interactions, including use of pid namespaces and device creation.
+
+Use an existing LSM
+-------------------
+None of the other in-tree LSMs have the capability to gate setid transitions, or
+even employ the security_task_fix_setuid hook at all. SELinux says of that hook:
+"Since setuid only affects the current process, and since the SELinux controls
+are not based on the Linux identity attributes, SELinux does not need to control
+this operation."
+
+
+Directions for use
+==================
+This LSM hooks the setid syscalls to make sure transitions are allowed if an
+applicable restriction policy is in place. Policies are configured through
+securityfs by writing to the safesetid/add_whitelist_policy and
+safesetid/flush_whitelist_policies files at the location where securityfs is
+mounted. The format for adding a policy is '<UID>:<UID>', using literal
+numbers, such as '123:456'. To flush the policies, any write to the file is
+sufficient. Again, configuring a policy for a UID will prevent that UID from
+obtaining auxiliary setid privileges, such as allowing a user to set up user
+namespace UID mappings.
diff --git a/Documentation/admin-guide/LSM/index.rst b/Documentation/admin-guide/LSM/index.rst
index c980dfe9abf1..a6ba95fbaa9f 100644
--- a/Documentation/admin-guide/LSM/index.rst
+++ b/Documentation/admin-guide/LSM/index.rst
@@ -17,9 +17,8 @@ MAC extensions, other extensions can be built using the LSM to provide
specific changes to system operation when these tweaks are not available
in the core functionality of Linux itself.
-Without a specific LSM built into the kernel, the default LSM will be the
-Linux capabilities system. Most LSMs choose to extend the capabilities
-system, building their checks on top of the defined capability hooks.
+The Linux capabilities modules will always be included. This may be
+followed by any number of "minor" modules and at most one "major" module.
For more details on capabilities, see ``capabilities(7)`` in the Linux
man-pages project.
@@ -30,6 +29,14 @@ order in which checks are made. The capability module will always
be first, followed by any "minor" modules (e.g. Yama) and then
the one "major" module (e.g. SELinux) if there is one configured.
+Process attributes associated with "major" security modules should
+be accessed and maintained using the special files in ``/proc/.../attr``.
+A security module may maintain a module specific subdirectory there,
+named after the module. ``/proc/.../attr/smack`` is provided by the Smack
+security module and contains all its special files. The files directly
+in ``/proc/.../attr`` remain as legacy interfaces for modules that provide
+subdirectories.
+
.. toctree::
:maxdepth: 1
@@ -39,3 +46,4 @@ the one "major" module (e.g. SELinux) if there is one configured.
Smack
tomoyo
Yama
+ SafeSetID
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 0797eec76be1..47e577264198 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,9 +1,9 @@
.. _readme:
-Linux kernel release 4.x <http://kernel.org/>
+Linux kernel release 5.x <http://kernel.org/>
=============================================
-These are the release notes for Linux version 4. Read them carefully,
+These are the release notes for Linux version 5. Read them carefully,
as they tell you what this is all about, explain how to install the
kernel, and what to do if something goes wrong.
@@ -63,7 +63,7 @@ Installing the kernel source
directory where you have permissions (e.g. your home directory) and
unpack it::
- xz -cd linux-4.X.tar.xz | tar xvf -
+ xz -cd linux-5.x.tar.xz | tar xvf -
Replace "X" with the version number of the latest kernel.
@@ -72,26 +72,26 @@ Installing the kernel source
files. They should match the library, and not get messed up by
whatever the kernel-du-jour happens to be.
- - You can also upgrade between 4.x releases by patching. Patches are
+ - You can also upgrade between 5.x releases by patching. Patches are
distributed in the xz format. To install by patching, get all the
newer patch files, enter the top level directory of the kernel source
- (linux-4.X) and execute::
+ (linux-5.x) and execute::
- xz -cd ../patch-4.x.xz | patch -p1
+ xz -cd ../patch-5.x.xz | patch -p1
- Replace "x" for all versions bigger than the version "X" of your current
+ Replace "x" for all versions bigger than the version "x" of your current
source tree, **in_order**, and you should be ok. You may want to remove
the backup files (some-file-name~ or some-file-name.orig), and make sure
that there are no failed patches (some-file-name# or some-file-name.rej).
If there are, either you or I have made a mistake.
- Unlike patches for the 4.x kernels, patches for the 4.x.y kernels
+ Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
(also known as the -stable kernels) are not incremental but instead apply
- directly to the base 4.x kernel. For example, if your base kernel is 4.0
- and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1
- and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and
- want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is,
- patch -R) **before** applying the 4.0.3 patch. You can read more on this in
+ directly to the base 5.x kernel. For example, if your base kernel is 5.0
+ and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
+ and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
+ want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
+ patch -R) **before** applying the 5.0.3 patch. You can read more on this in
:ref:`Documentation/process/applying-patches.rst <applying_patches>`.
Alternatively, the script patch-kernel can be used to automate this
@@ -114,7 +114,7 @@ Installing the kernel source
Software requirements
---------------------
- Compiling and running the 4.x kernels requires up-to-date
+ Compiling and running the 5.x kernels requires up-to-date
versions of various software packages. Consult
:ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
required and how to get updates for these packages. Beware that using
@@ -132,12 +132,12 @@ Build directory for the kernel
place for the output files (including .config).
Example::
- kernel source code: /usr/src/linux-4.X
+ kernel source code: /usr/src/linux-5.x
build directory: /home/name/build/kernel
To configure and build the kernel, use::
- cd /usr/src/linux-4.X
+ cd /usr/src/linux-5.x
make O=/home/name/build/kernel menuconfig
make O=/home/name/build/kernel
sudo make O=/home/name/build/kernel modules_install install
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 7bf3f129c68b..20f92c16ffbf 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1189,6 +1189,10 @@ PAGE_SIZE multiple when read back.
Amount of cached filesystem data that was modified and
is currently being written back to disk
+ anon_thp
+ Amount of memory used in anonymous mappings backed by
+ transparent hugepages
+
inactive_anon, active_anon, inactive_file, active_file, unevictable
Amount of memory, swap-backed and filesystem-backed,
on the internal memory management lists used by the
@@ -1248,6 +1252,18 @@ PAGE_SIZE multiple when read back.
Amount of reclaimed lazyfree pages
+ thp_fault_alloc
+
+ Number of transparent hugepages which were allocated to satisfy
+ a page fault, including COW faults. This counter is not present
+ when CONFIG_TRANSPARENT_HUGEPAGE is not set.
+
+ thp_collapse_alloc
+
+ Number of transparent hugepages which were allocated to allow
+ collapsing an existing range of pages. This counter is not
+ present when CONFIG_TRANSPARENT_HUGEPAGE is not set.
+
memory.swap.current
A read-only single value file which exists on non-root
cgroups.
@@ -1503,7 +1519,7 @@ protected workload.
The limits are only applied at the peer level in the hierarchy. This means that
in the diagram below, only groups A, B, and C will influence each other, and
-groups D and F will influence each other. Group G will influence nobody.
+groups D and F will influence each other. Group G will influence nobody::
[root]
/ | \
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index b799bcf67d7b..42379633801f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -461,6 +461,11 @@
possible to determine what the correct size should be.
This option provides an override for these situations.
+ carrier_timeout=
+ [NET] Specifies amount of time (in seconds) that
+ the kernel should wait for a network carrier. By default
+ it waits 120 seconds.
+
ca_keys= [KEYS] This parameter identifies a specific key(s) on
the system trusted keyring to be used for certificate
trust validation.
@@ -910,6 +915,10 @@
The filter can be disabled or changed to another
driver later using sysfs.
+ driver_async_probe= [KNL]
+ List of driver names to be probed asynchronously.
+ Format: <driver_name1>,<driver_name2>...
+
drm.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
Broken monitors, graphic adapters, KVMs and EDIDless
panels may send no or incorrect EDID data sets.
@@ -1073,9 +1082,15 @@
specified address. The serial port must already be
setup and configured. Options are not yet supported.
+ efifb,[options]
+ Start an early, unaccelerated console on the EFI
+ memory mapped framebuffer (if available). On cache
+ coherent non-x86 systems that use system memory for
+ the framebuffer, pass the 'ram' option so that it is
+ mapped with the correct attributes.
+
earlyprintk= [X86,SH,ARM,M68k,S390]
earlyprintk=vga
- earlyprintk=efi
earlyprintk=sclp
earlyprintk=xen
earlyprintk=serial[,ttySn[,baudrate]]
@@ -1696,12 +1711,11 @@
By default, super page will be supported if Intel IOMMU
has the capability. With this option, super page will
not be supported.
- sm_off [Default Off]
- By default, scalable mode will be supported if the
+ sm_on [Default Off]
+ By default, scalable mode will be disabled even if the
hardware advertises that it has support for the scalable
mode translation. With this option set, scalable mode
- will not be used even on hardware which claims to support
- it.
+ will be used on hardware which claims to support it.
tboot_noforce [Default Off]
Do not force the Intel IOMMU enabled under tboot.
By default, tboot will force Intel IOMMU on, which
@@ -2319,6 +2333,10 @@
lsm.debug [SECURITY] Enable LSM initialization debugging output.
+ lsm=lsm1,...,lsmN
+ [SECURITY] Choose order of LSM initialization. This
+ overrides CONFIG_LSM, and the "security=" parameter.
+
machvec= [IA-64] Force the use of a particular machine-vector
(machvec) in a generic kernel.
Example: machvec=hpzx1_swiotlb
@@ -3654,19 +3672,6 @@
latencies, which will choose a value aligned
with the appropriate hardware boundaries.
- rcutree.jiffies_till_sched_qs= [KNL]
- Set required age in jiffies for a
- given grace period before RCU starts
- soliciting quiescent-state help from
- rcu_note_context_switch(). If not specified, the
- kernel will calculate a value based on the most
- recent settings of rcutree.jiffies_till_first_fqs
- and rcutree.jiffies_till_next_fqs.
- This calculated value may be viewed in
- rcutree.jiffies_to_sched_qs. Any attempt to
- set rcutree.jiffies_to_sched_qs will be
- cheerfully overwritten.
-
rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
first attempt to force quiescent states.
@@ -3678,6 +3683,20 @@
quiescent states. Units are jiffies, minimum
value is one, and maximum value is HZ.
+ rcutree.jiffies_till_sched_qs= [KNL]
+ Set required age in jiffies for a
+ given grace period before RCU starts
+ soliciting quiescent-state help from
+ rcu_note_context_switch() and cond_resched().
+ If not specified, the kernel will calculate
+ a value based on the most recent settings
+ of rcutree.jiffies_till_first_fqs
+ and rcutree.jiffies_till_next_fqs.
+ This calculated value may be viewed in
+ rcutree.jiffies_to_sched_qs. Any attempt to set
+ rcutree.jiffies_to_sched_qs will be cheerfully
+ overwritten.
+
rcutree.kthread_prio= [KNL,BOOT]
Set the SCHED_FIFO priority of the RCU per-CPU
kthreads (rcuc/N). This value is also used for
@@ -3721,6 +3740,11 @@
This wake_up() will be accompanied by a
WARN_ONCE() splat and an ftrace_dump().
+ rcutree.sysrq_rcu= [KNL]
+ Commandeer a sysrq key to dump out Tree RCU's
+ rcu_node tree with an eye towards determining
+ why a new grace period has not yet started.
+
rcuperf.gp_async= [KNL]
Measure performance of asynchronous
grace-period primitives such as call_rcu().
@@ -4090,11 +4114,9 @@
Note: increases power consumption, thus should only be
enabled if running jitter sensitive (HPC/RT) workloads.
- security= [SECURITY] Choose a security module to enable at boot.
- If this boot parameter is not specified, only the first
- security module asking for security registration will be
- loaded. An invalid security module name will be treated
- as if no module has been chosen.
+ security= [SECURITY] Choose a legacy "major" security module to
+ enable at boot. This has been deprecated by the
+ "lsm=" parameter.
selinux= [SELINUX] Disable or enable SELinux at boot time.
Format: { "0" | "1" }
@@ -4698,7 +4720,8 @@
usbcore.authorized_default=
[USB] Default USB device authorization:
(default -1 = authorized except for wireless USB,
- 0 = not authorized, 1 = authorized)
+ 0 = not authorized, 1 = authorized, 2 = authorized
+ if device connected to internal port)
usbcore.autosuspend=
[USB] The autosuspend time delay (in seconds) used
diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst
index 3f7bade2c231..340a5aee9b80 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -75,9 +75,10 @@ number of times a page is mapped.
20. NOPAGE
21. KSM
22. THP
- 23. BALLOON
+ 23. OFFLINE
24. ZERO_PAGE
25. IDLE
+ 26. PGTABLE
* ``/proc/kpagecgroup``. This file contains a 64-bit inode number of the
memory cgroup each page is charged to, indexed by PFN. Only available when
@@ -118,8 +119,8 @@ Short descriptions to the page flags
identical memory pages dynamically shared between one or more processes
22 - THP
contiguous pages which construct transparent hugepages
-23 - BALLOON
- balloon compaction page
+23 - OFFLINE
+ page is logically offline
24 - ZERO_PAGE
zero page for pfn_zero or huge_zero page
25 - IDLE
@@ -128,6 +129,8 @@ Short descriptions to the page flags
Note that this flag may be stale in case the page was accessed via
a PTE. To make sure the flag is up-to-date one has to read
``/sys/kernel/mm/page_idle/bitmap`` first.
+26 - PGTABLE
+ page is in use as a page table
IO related page flags
---------------------
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 106379e2619f..9c58b35a81cb 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -155,14 +155,14 @@ governor uses that information depends on what algorithm is implemented by it
and that is the primary reason for having more than one governor in the
``CPUIdle`` subsystem.
-There are two ``CPUIdle`` governors available, ``menu`` and ``ladder``. Which
-of them is used depends on the configuration of the kernel and in particular on
-whether or not the scheduler tick can be `stopped by the idle
-loop <idle-cpus-and-tick_>`_. It is possible to change the governor at run time
-if the ``cpuidle_sysfs_switch`` command line parameter has been passed to the
-kernel, but that is not safe in general, so it should not be done on production
-systems (that may change in the future, though). The name of the ``CPUIdle``
-governor currently used by the kernel can be read from the
+There are three ``CPUIdle`` governors available, ``menu``, `TEO <teo-gov_>`_
+and ``ladder``. Which of them is used by default depends on the configuration
+of the kernel and in particular on whether or not the scheduler tick can be
+`stopped by the idle loop <idle-cpus-and-tick_>`_. It is possible to change the
+governor at run time if the ``cpuidle_sysfs_switch`` command line parameter has
+been passed to the kernel, but that is not safe in general, so it should not be
+done on production systems (that may change in the future, though). The name of
+the ``CPUIdle`` governor currently used by the kernel can be read from the
:file:`current_governor_ro` (or :file:`current_governor` if
``cpuidle_sysfs_switch`` is present in the kernel command line) file under
:file:`/sys/devices/system/cpu/cpuidle/` in ``sysfs``.
@@ -256,6 +256,8 @@ the ``menu`` governor by default and if it is not tickless, the default
``CPUIdle`` governor on it will be ``ladder``.
+.. _menu-gov:
+
The ``menu`` Governor
=====================
@@ -333,6 +335,92 @@ that time, the governor may need to select a shallower state with a suitable
target residency.
+.. _teo-gov:
+
+The Timer Events Oriented (TEO) Governor
+========================================
+
+The timer events oriented (TEO) governor is an alternative ``CPUIdle`` governor
+for tickless systems. It follows the same basic strategy as the ``menu`` `one
+<menu-gov_>`_: it always tries to find the deepest idle state suitable for the
+given conditions. However, it applies a different approach to that problem.
+
+First, it does not use sleep length correction factors, but instead it attempts
+to correlate the observed idle duration values with the available idle states
+and use that information to pick up the idle state that is most likely to
+"match" the upcoming CPU idle interval. Second, it does not take the tasks
+that were running on the given CPU in the past and are waiting on some I/O
+operations to complete now at all (there is no guarantee that they will run on
+the same CPU when they become runnable again) and the pattern detection code in
+it avoids taking timer wakeups into account. It also only uses idle duration
+values less than the current time till the closest timer (with the scheduler
+tick excluded) for that purpose.
+
+Like in the ``menu`` governor `case <menu-gov_>`_, the first step is to obtain
+the *sleep length*, which is the time until the closest timer event with the
+assumption that the scheduler tick will be stopped (that also is the upper bound
+on the time until the next CPU wakeup). That value is then used to preselect an
+idle state on the basis of three metrics maintained for each idle state provided
+by the ``CPUIdle`` driver: ``hits``, ``misses`` and ``early_hits``.
+
+The ``hits`` and ``misses`` metrics measure the likelihood that a given idle
+state will "match" the observed (post-wakeup) idle duration if it "matches" the
+sleep length. They both are subject to decay (after a CPU wakeup) every time
+the target residency of the idle state corresponding to them is less than or
+equal to the sleep length and the target residency of the next idle state is
+greater than the sleep length (that is, when the idle state corresponding to
+them "matches" the sleep length). The ``hits`` metric is increased if the
+former condition is satisfied and the target residency of the given idle state
+is less than or equal to the observed idle duration and the target residency of
+the next idle state is greater than the observed idle duration at the same time
+(that is, it is increased when the given idle state "matches" both the sleep
+length and the observed idle duration). In turn, the ``misses`` metric is
+increased when the given idle state "matches" the sleep length only and the
+observed idle duration is too short for its target residency.
+
+The ``early_hits`` metric measures the likelihood that a given idle state will
+"match" the observed (post-wakeup) idle duration if it does not "match" the
+sleep length. It is subject to decay on every CPU wakeup and it is increased
+when the idle state corresponding to it "matches" the observed (post-wakeup)
+idle duration and the target residency of the next idle state is less than or
+equal to the sleep length (i.e. the idle state "matching" the sleep length is
+deeper than the given one).
+
+The governor walks the list of idle states provided by the ``CPUIdle`` driver
+and finds the last (deepest) one with the target residency less than or equal
+to the sleep length. Then, the ``hits`` and ``misses`` metrics of that idle
+state are compared with each other and it is preselected if the ``hits`` one is
+greater (which means that that idle state is likely to "match" the observed idle
+duration after CPU wakeup). If the ``misses`` one is greater, the governor
+preselects the shallower idle state with the maximum ``early_hits`` metric
+(or if there are multiple shallower idle states with equal ``early_hits``
+metric which also is the maximum, the shallowest of them will be preselected).
+[If there is a wakeup latency constraint coming from the `PM QoS framework
+<cpu-pm-qos_>`_ which is hit before reaching the deepest idle state with the
+target residency within the sleep length, the deepest idle state with the exit
+latency within the constraint is preselected without consulting the ``hits``,
+``misses`` and ``early_hits`` metrics.]
+
+Next, the governor takes several idle duration values observed most recently
+into consideration and if at least a half of them are greater than or equal to
+the target residency of the preselected idle state, that idle state becomes the
+final candidate to ask for. Otherwise, the average of the most recent idle
+duration values below the target residency of the preselected idle state is
+computed and the governor walks the idle states shallower than the preselected
+one and finds the deepest of them with the target residency within that average.
+That idle state is then taken as the final candidate to ask for.
+
+Still, at this point the governor may need to refine the idle state selection if
+it has not decided to `stop the scheduler tick <idle-cpus-and-tick_>`_. That
+generally happens if the target residency of the idle state selected so far is
+less than the tick period and the tick has not been stopped already (in a
+previous iteration of the idle loop). Then, like in the ``menu`` governor
+`case <menu-gov_>`_, the sleep length used in the previous computations may not
+reflect the real time until the closest timer event and if it really is greater
+than that time, a shallower state with a suitable target residency may need to
+be selected.
+
+
.. _idle-states-representation:
Representation of Idle States
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 1f09d043d086..ddb8ce5333ba 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -44,6 +44,8 @@ stable kernels.
| Implementor | Component | Erratum ID | Kconfig |
+----------------+-----------------+-----------------+-----------------------------+
+| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+| | | | |
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
index 8d8d8f06cab2..98a8dd5ee385 100644
--- a/Documentation/block/bfq-iosched.txt
+++ b/Documentation/block/bfq-iosched.txt
@@ -357,6 +357,13 @@ video playing/streaming, a very low drop rate may be more important
than maximum throughput. In these cases, consider setting the
strict_guarantees parameter.
+slice_idle_us
+-------------
+
+Controls the same tuning parameter as slice_idle, but in microseconds.
+Either tunable can be used to set idling behavior. Afterwards, the
+other tunable will reflect the newly set value in sysfs.
+
strict_guarantees
-----------------
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index ea2dafe49ae8..4cad1024fff7 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -88,7 +88,8 @@ shared_tags=[0/1]: Default: 0
zoned=[0/1]: Default: 0
0: Block device is exposed as a random-access block device.
- 1: Block device is exposed as a host-managed zoned block device.
+ 1: Block device is exposed as a host-managed zoned block device. Requires
+ CONFIG_BLK_DEV_ZONED.
zone_size=[MB]: Default: 256
Per zone size when exposed as a zoned block device. Must be a power of two.
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 39e286d7afc9..83b457e24bba 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -67,6 +67,13 @@ If set to a value larger than 0, the kernel will put the process issuing
IO to sleep for this amount of microseconds before entering classic
polling.
+io_timeout (RW)
+---------------
+io_timeout is the request timeout in milliseconds. If a request does not
+complete in this time then the block driver timeout handler is invoked.
+That timeout handler can decide to retry the request, to fail it or to start
+a device recovery strategy.
+
iostats (RW)
-------------
This file is used to control (on/off) the iostats accounting of the
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 436c5e98e1b6..4df0ce271085 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -156,22 +156,23 @@ Per-device statistics are exported as various nodes under /sys/block/zram<id>/
A brief description of exported device attributes. For more details please
read Documentation/ABI/testing/sysfs-block-zram.
-Name access description
----- ------ -----------
-disksize RW show and set the device's disk size
-initstate RO shows the initialization state of the device
-reset WO trigger device reset
-mem_used_max WO reset the `mem_used_max' counter (see later)
-mem_limit WO specifies the maximum amount of memory ZRAM can use
- to store the compressed data
-writeback_limit WO specifies the maximum amount of write IO zram can
- write out to backing device as 4KB unit
-max_comp_streams RW the number of possible concurrent compress operations
-comp_algorithm RW show and change the compression algorithm
-compact WO trigger memory compaction
-debug_stat RO this file is used for zram debugging purposes
-backing_dev RW set up backend storage for zram to write out
-idle WO mark allocated slot as idle
+Name access description
+---- ------ -----------
+disksize RW show and set the device's disk size
+initstate RO shows the initialization state of the device
+reset WO trigger device reset
+mem_used_max WO reset the `mem_used_max' counter (see later)
+mem_limit WO specifies the maximum amount of memory ZRAM can use
+ to store the compressed data
+writeback_limit WO specifies the maximum amount of write IO zram can
+ write out to backing device as 4KB unit
+writeback_limit_enable RW show and set writeback_limit feature
+max_comp_streams RW the number of possible concurrent compress operations
+comp_algorithm RW show and change the compression algorithm
+compact WO trigger memory compaction
+debug_stat RO this file is used for zram debugging purposes
+backing_dev RW set up backend storage for zram to write out
+idle WO mark allocated slot as idle
User space is advised to use the following files to read the device statistics.
@@ -280,32 +281,51 @@ With the command, zram writeback idle pages from memory to the storage.
If there are lots of write IO with flash device, potentially, it has
flash wearout problem so that admin needs to design write limitation
to guarantee storage health for entire product life.
-To overcome the concern, zram supports "writeback_limit".
-The "writeback_limit"'s default value is 0 so that it doesn't limit
-any writeback. If admin want to measure writeback count in a certain
-period, he could know it via /sys/block/zram0/bd_stat's 3rd column.
+
+To overcome the concern, zram supports "writeback_limit" feature.
+The "writeback_limit_enable"'s default value is 0 so that it doesn't limit
+any writeback. IOW, if admin want to apply writeback budget, he should
+enable writeback_limit_enable via
+
+ $ echo 1 > /sys/block/zramX/writeback_limit_enable
+
+Once writeback_limit_enable is set, zram doesn't allow any writeback
+until admin set the budget via /sys/block/zramX/writeback_limit.
+
+(If admin doesn't enable writeback_limit_enable, writeback_limit's value
+assigned via /sys/block/zramX/writeback_limit is meaninless.)
If admin want to limit writeback as per-day 400M, he could do it
like below.
- MB_SHIFT=20
- 4K_SHIFT=12
- echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
- /sys/block/zram0/writeback_limit.
+ $ MB_SHIFT=20
+ $ 4K_SHIFT=12
+ $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+ /sys/block/zram0/writeback_limit.
+ $ echo 1 > /sys/block/zram0/writeback_limit_enable
-If admin want to allow further write again, he could do it like below
+If admin want to allow further write again once the bugdet is exausted,
+he could do it like below
- echo 0 > /sys/block/zram0/writeback_limit
+ $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+ /sys/block/zram0/writeback_limit
If admin want to see remaining writeback budget since he set,
- cat /sys/block/zram0/writeback_limit
+ $ cat /sys/block/zramX/writeback_limit
+
+If admin want to disable writeback limit, he could do
+
+ $ echo 0 > /sys/block/zramX/writeback_limit_enable
The writeback_limit count will reset whenever you reset zram(e.g.,
system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of
writeback happened until you reset the zram to allocate extra writeback
budget in next setting is user's job.
+If admin want to measure writeback count in a certain period, he could
+know it via /sys/block/zram0/bd_stat's 3rd column.
+
= memory tracking
With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 6780a6d81745..10453c627135 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -36,27 +36,27 @@ consideration important quirks of other architectures) and
defines calling convention that is compatible with C calling
convention of the linux kernel on those architectures.
-Q: can multiple return values be supported in the future?
+Q: Can multiple return values be supported in the future?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A: NO. BPF allows only register R0 to be used as return value.
-Q: can more than 5 function arguments be supported in the future?
+Q: Can more than 5 function arguments be supported in the future?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A: NO. BPF calling convention only allows registers R1-R5 to be used
as arguments. BPF is not a standalone instruction set.
(unlike x64 ISA that allows msft, cdecl and other conventions)
-Q: can BPF programs access instruction pointer or return address?
+Q: Can BPF programs access instruction pointer or return address?
-----------------------------------------------------------------
A: NO.
-Q: can BPF programs access stack pointer ?
+Q: Can BPF programs access stack pointer ?
------------------------------------------
A: NO.
Only frame pointer (register R10) is accessible.
From compiler point of view it's necessary to have stack pointer.
-For example LLVM defines register R11 as stack pointer in its
+For example, LLVM defines register R11 as stack pointer in its
BPF backend, but it makes sure that generated code never uses it.
Q: Does C-calling convention diminishes possible use cases?
@@ -66,8 +66,8 @@ A: YES.
BPF design forces addition of major functionality in the form
of kernel helper functions and kernel objects like BPF maps with
seamless interoperability between them. It lets kernel call into
-BPF programs and programs call kernel helpers with zero overhead.
-As all of them were native C code. That is particularly the case
+BPF programs and programs call kernel helpers with zero overhead,
+as all of them were native C code. That is particularly the case
for JITed BPF programs that are indistinguishable from
native kernel C code.
@@ -75,9 +75,9 @@ Q: Does it mean that 'innovative' extensions to BPF code are disallowed?
------------------------------------------------------------------------
A: Soft yes.
-At least for now until BPF core has support for
+At least for now, until BPF core has support for
bpf-to-bpf calls, indirect calls, loops, global variables,
-jump tables, read only sections and all other normal constructs
+jump tables, read-only sections, and all other normal constructs
that C code can produce.
Q: Can loops be supported in a safe way?
@@ -109,16 +109,16 @@ For example why BPF_JNE and other compare and jumps are not cpu-like?
A: This was necessary to avoid introducing flags into ISA which are
impossible to make generic and efficient across CPU architectures.
-Q: why BPF_DIV instruction doesn't map to x64 div?
+Q: Why BPF_DIV instruction doesn't map to x64 div?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A: Because if we picked one-to-one relationship to x64 it would have made
it more complicated to support on arm64 and other archs. Also it
needs div-by-zero runtime check.
-Q: why there is no BPF_SDIV for signed divide operation?
+Q: Why there is no BPF_SDIV for signed divide operation?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A: Because it would be rarely used. llvm errors in such case and
-prints a suggestion to use unsigned divide instead
+prints a suggestion to use unsigned divide instead.
Q: Why BPF has implicit prologue and epilogue?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI?
------------------------------
A: YES. BPF instructions, arguments to BPF programs, set of helper
functions and their arguments, recognized return codes are all part
-of ABI. However when tracing programs are using bpf_probe_read() helper
-to walk kernel internal datastructures and compile with kernel
-internal headers these accesses can and will break with newer
-kernels. The union bpf_attr -> kern_version is checked at load time
-to prevent accidentally loading kprobe-based bpf programs written
-for a different kernel. Networking programs don't do kern_version check.
+of ABI. However there is one specific exception to tracing programs
+which are using helpers like bpf_probe_read() to walk kernel internal
+data structures and compile with kernel internal headers. Both of these
+kernel internals are subject to change and can break with newer kernels
+such that the program needs to be adapted accordingly.
Q: How much stack space a BPF program uses?
-------------------------------------------
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
new file mode 100644
index 000000000000..9a60a5d60e38
--- /dev/null
+++ b/Documentation/bpf/btf.rst
@@ -0,0 +1,848 @@
+=====================
+BPF Type Format (BTF)
+=====================
+
+1. Introduction
+***************
+
+BTF (BPF Type Format) is the metadata format which encodes the debug info
+related to BPF program/map. The name BTF was used initially to describe data
+types. The BTF was later extended to include function info for defined
+subroutines, and line info for source/line information.
+
+The debug info is used for map pretty print, function signature, etc. The
+function signature enables better bpf program/function kernel symbol. The line
+info helps generate source annotated translated byte code, jited code and
+verifier log.
+
+The BTF specification contains two parts,
+ * BTF kernel API
+ * BTF ELF file format
+
+The kernel API is the contract between user space and kernel. The kernel
+verifies the BTF info before using it. The ELF file format is a user space
+contract between ELF file and libbpf loader.
+
+The type and string sections are part of the BTF kernel API, describing the
+debug info (mostly types related) referenced by the bpf program. These two
+sections are discussed in details in :ref:`BTF_Type_String`.
+
+.. _BTF_Type_String:
+
+2. BTF Type and String Encoding
+*******************************
+
+The file ``include/uapi/linux/btf.h`` provides high-level definition of how
+types/strings are encoded.
+
+The beginning of data blob must be::
+
+ struct btf_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 type_off; /* offset of type section */
+ __u32 type_len; /* length of type section */
+ __u32 str_off; /* offset of string section */
+ __u32 str_len; /* length of string section */
+ };
+
+The magic is ``0xeB9F``, which has different encoding for big and little
+endian systems, and can be used to test whether BTF is generated for big- or
+little-endian target. The ``btf_header`` is designed to be extensible with
+``hdr_len`` equal to ``sizeof(struct btf_header)`` when a data blob is
+generated.
+
+2.1 String Encoding
+===================
+
+The first string in the string section must be a null string. The rest of
+string table is a concatenation of other null-terminated strings.
+
+2.2 Type Encoding
+=================
+
+The type id ``0`` is reserved for ``void`` type. The type section is parsed
+sequentially and type id is assigned to each recognized type starting from id
+``1``. Currently, the following types are supported::
+
+ #define BTF_KIND_INT 1 /* Integer */
+ #define BTF_KIND_PTR 2 /* Pointer */
+ #define BTF_KIND_ARRAY 3 /* Array */
+ #define BTF_KIND_STRUCT 4 /* Struct */
+ #define BTF_KIND_UNION 5 /* Union */
+ #define BTF_KIND_ENUM 6 /* Enumeration */
+ #define BTF_KIND_FWD 7 /* Forward */
+ #define BTF_KIND_TYPEDEF 8 /* Typedef */
+ #define BTF_KIND_VOLATILE 9 /* Volatile */
+ #define BTF_KIND_CONST 10 /* Const */
+ #define BTF_KIND_RESTRICT 11 /* Restrict */
+ #define BTF_KIND_FUNC 12 /* Function */
+ #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+
+Note that the type section encodes debug info, not just pure types.
+``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
+
+Each type contains the following common data::
+
+ struct btf_type {
+ __u32 name_off;
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members)
+ * bits 16-23: unused
+ * bits 24-27: kind (e.g. int, ptr, array...etc)
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
+ */
+ __u32 info;
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
+ * "type" is a type_id referring to another type.
+ */
+ union {
+ __u32 size;
+ __u32 type;
+ };
+ };
+
+For certain kinds, the common data are followed by kind-specific data. The
+``name_off`` in ``struct btf_type`` specifies the offset in the string table.
+The following sections detail encoding of each kind.
+
+2.2.1 BTF_KIND_INT
+~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: any valid offset
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_INT
+ * ``info.vlen``: 0
+ * ``size``: the size of the int type in bytes.
+
+``btf_type`` is followed by a ``u32`` with the following bits arrangement::
+
+ #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
+ #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
+ #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
+
+The ``BTF_INT_ENCODING`` has the following attributes::
+
+ #define BTF_INT_SIGNED (1 << 0)
+ #define BTF_INT_CHAR (1 << 1)
+ #define BTF_INT_BOOL (1 << 2)
+
+The ``BTF_INT_ENCODING()`` provides extra information: signedness, char, or
+bool, for the int type. The char and bool encoding are mostly useful for
+pretty print. At most one encoding can be specified for the int type.
+
+The ``BTF_INT_BITS()`` specifies the number of actual bits held by this int
+type. For example, a 4-bit bitfield encodes ``BTF_INT_BITS()`` equals to 4.
+The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
+for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
+
+The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
+for this int. For example, a bitfield struct member has: * btf member bit
+offset 100 from the start of the structure, * btf member pointing to an int
+type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
+
+Then in the struct memory layout, this member will occupy ``4`` bits starting
+from bits ``100 + 2 = 102``.
+
+Alternatively, the bitfield struct member can be the following to access the
+same bits as the above:
+
+ * btf member bit offset 102,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
+
+The original intention of ``BTF_INT_OFFSET()`` is to provide flexibility of
+bitfield encoding. Currently, both llvm and pahole generate
+``BTF_INT_OFFSET() = 0`` for all int types.
+
+2.2.2 BTF_KIND_PTR
+~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_PTR
+ * ``info.vlen``: 0
+ * ``type``: the pointee type of the pointer
+
+No additional type data follow ``btf_type``.
+
+2.2.3 BTF_KIND_ARRAY
+~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_ARRAY
+ * ``info.vlen``: 0
+ * ``size/type``: 0, not used
+
+``btf_type`` is followed by one ``struct btf_array``::
+
+ struct btf_array {
+ __u32 type;
+ __u32 index_type;
+ __u32 nelems;
+ };
+
+The ``struct btf_array`` encoding:
+ * ``type``: the element type
+ * ``index_type``: the index type
+ * ``nelems``: the number of elements for this array (``0`` is also allowed).
+
+The ``index_type`` can be any regular int type (``u8``, ``u16``, ``u32``,
+``u64``, ``unsigned __int128``). The original design of including
+``index_type`` follows DWARF, which has an ``index_type`` for its array type.
+Currently in BTF, beyond type verification, the ``index_type`` is not used.
+
+The ``struct btf_array`` allows chaining through element type to represent
+multidimensional arrays. For example, for ``int a[5][6]``, the following type
+information illustrates the chaining:
+
+ * [1]: int
+ * [2]: array, ``btf_array.type = [1]``, ``btf_array.nelems = 6``
+ * [3]: array, ``btf_array.type = [2]``, ``btf_array.nelems = 5``
+
+Currently, both pahole and llvm collapse multidimensional array into
+one-dimensional array, e.g., for ``a[5][6]``, the ``btf_array.nelems`` is
+equal to ``30``. This is because the original use case is map pretty print
+where the whole array is dumped out so one-dimensional array is enough. As
+more BTF usage is explored, pahole and llvm can be changed to generate proper
+chained representation for multidimensional arrays.
+
+2.2.4 BTF_KIND_STRUCT
+~~~~~~~~~~~~~~~~~~~~~
+2.2.5 BTF_KIND_UNION
+~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0 or offset to a valid C identifier
+ * ``info.kind_flag``: 0 or 1
+ * ``info.kind``: BTF_KIND_STRUCT or BTF_KIND_UNION
+ * ``info.vlen``: the number of struct/union members
+ * ``info.size``: the size of the struct/union in bytes
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_member``.::
+
+ struct btf_member {
+ __u32 name_off;
+ __u32 type;
+ __u32 offset;
+ };
+
+``struct btf_member`` encoding:
+ * ``name_off``: offset to a valid C identifier
+ * ``type``: the member type
+ * ``offset``: <see below>
+
+If the type info ``kind_flag`` is not set, the offset contains only bit offset
+of the member. Note that the base type of the bitfield can only be int or enum
+type. If the bitfield size is 32, the base type can be either int or enum
+type. If the bitfield size is not 32, the base type must be int, and int type
+``BTF_INT_BITS()`` encodes the bitfield size.
+
+If the ``kind_flag`` is set, the ``btf_member.offset`` contains both member
+bitfield size and bit offset. The bitfield size and bit offset are calculated
+as below.::
+
+ #define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
+ #define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
+
+In this case, if the base type is an int type, it must be a regular int type:
+
+ * ``BTF_INT_OFFSET()`` must be 0.
+ * ``BTF_INT_BITS()`` must be equal to ``{1,2,4,8,16} * 8``.
+
+The following kernel patch introduced ``kind_flag`` and explained why both
+modes exist:
+
+ https://github.com/torvalds/linux/commit/9d5f9f701b1891466fb3dbb1806ad97716f95cc3#diff-fa650a64fdd3968396883d2fe8215ff3
+
+2.2.6 BTF_KIND_ENUM
+~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0 or offset to a valid C identifier
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_ENUM
+ * ``info.vlen``: number of enum values
+ * ``size``: 4
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_enum``.::
+
+ struct btf_enum {
+ __u32 name_off;
+ __s32 val;
+ };
+
+The ``btf_enum`` encoding:
+ * ``name_off``: offset to a valid C identifier
+ * ``val``: any value
+
+2.2.7 BTF_KIND_FWD
+~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid C identifier
+ * ``info.kind_flag``: 0 for struct, 1 for union
+ * ``info.kind``: BTF_KIND_FWD
+ * ``info.vlen``: 0
+ * ``type``: 0
+
+No additional type data follow ``btf_type``.
+
+2.2.8 BTF_KIND_TYPEDEF
+~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid C identifier
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_TYPEDEF
+ * ``info.vlen``: 0
+ * ``type``: the type which can be referred by name at ``name_off``
+
+No additional type data follow ``btf_type``.
+
+2.2.9 BTF_KIND_VOLATILE
+~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_VOLATILE
+ * ``info.vlen``: 0
+ * ``type``: the type with ``volatile`` qualifier
+
+No additional type data follow ``btf_type``.
+
+2.2.10 BTF_KIND_CONST
+~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_CONST
+ * ``info.vlen``: 0
+ * ``type``: the type with ``const`` qualifier
+
+No additional type data follow ``btf_type``.
+
+2.2.11 BTF_KIND_RESTRICT
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_RESTRICT
+ * ``info.vlen``: 0
+ * ``type``: the type with ``restrict`` qualifier
+
+No additional type data follow ``btf_type``.
+
+2.2.12 BTF_KIND_FUNC
+~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid C identifier
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_FUNC
+ * ``info.vlen``: 0
+ * ``type``: a BTF_KIND_FUNC_PROTO type
+
+No additional type data follow ``btf_type``.
+
+A BTF_KIND_FUNC defines not a type, but a subprogram (function) whose
+signature is defined by ``type``. The subprogram is thus an instance of that
+type. The BTF_KIND_FUNC may in turn be referenced by a func_info in the
+:ref:`BTF_Ext_Section` (ELF) or in the arguments to :ref:`BPF_Prog_Load`
+(ABI).
+
+2.2.13 BTF_KIND_FUNC_PROTO
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: 0
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_FUNC_PROTO
+ * ``info.vlen``: # of parameters
+ * ``type``: the return type
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_param``.::
+
+ struct btf_param {
+ __u32 name_off;
+ __u32 type;
+ };
+
+If a BTF_KIND_FUNC_PROTO type is referred by a BTF_KIND_FUNC type, then
+``btf_param.name_off`` must point to a valid C identifier except for the
+possible last argument representing the variable argument. The btf_param.type
+refers to parameter type.
+
+If the function has variable arguments, the last parameter is encoded with
+``name_off = 0`` and ``type = 0``.
+
+3. BTF Kernel API
+*****************
+
+The following bpf syscall command involves BTF:
+ * BPF_BTF_LOAD: load a blob of BTF data into kernel
+ * BPF_MAP_CREATE: map creation with btf key and value type info.
+ * BPF_PROG_LOAD: prog load with btf function and line info.
+ * BPF_BTF_GET_FD_BY_ID: get a btf fd
+ * BPF_OBJ_GET_INFO_BY_FD: btf, func_info, line_info
+ and other btf related info are returned.
+
+The workflow typically looks like:
+::
+
+ Application:
+ BPF_BTF_LOAD
+ |
+ v
+ BPF_MAP_CREATE and BPF_PROG_LOAD
+ |
+ V
+ ......
+
+ Introspection tool:
+ ......
+ BPF_{PROG,MAP}_GET_NEXT_ID (get prog/map id's)
+ |
+ V
+ BPF_{PROG,MAP}_GET_FD_BY_ID (get a prog/map fd)
+ |
+ V
+ BPF_OBJ_GET_INFO_BY_FD (get bpf_prog_info/bpf_map_info with btf_id)
+ | |
+ V |
+ BPF_BTF_GET_FD_BY_ID (get btf_fd) |
+ | |
+ V |
+ BPF_OBJ_GET_INFO_BY_FD (get btf) |
+ | |
+ V V
+ pretty print types, dump func signatures and line info, etc.
+
+
+3.1 BPF_BTF_LOAD
+================
+
+Load a blob of BTF data into kernel. A blob of data, described in
+:ref:`BTF_Type_String`, can be directly loaded into the kernel. A ``btf_fd``
+is returned to a userspace.
+
+3.2 BPF_MAP_CREATE
+==================
+
+A map can be created with ``btf_fd`` and specified key/value type id.::
+
+ __u32 btf_fd; /* fd pointing to a BTF type data */
+ __u32 btf_key_type_id; /* BTF type_id of the key */
+ __u32 btf_value_type_id; /* BTF type_id of the value */
+
+In libbpf, the map can be defined with extra annotation like below:
+::
+
+ struct bpf_map_def SEC("maps") btf_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct ipv_counts),
+ .max_entries = 4,
+ };
+ BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
+
+Here, the parameters for macro BPF_ANNOTATE_KV_PAIR are map name, key and
+value types for the map. During ELF parsing, libbpf is able to extract
+key/value type_id's and assign them to BPF_MAP_CREATE attributes
+automatically.
+
+.. _BPF_Prog_Load:
+
+3.3 BPF_PROG_LOAD
+=================
+
+During prog_load, func_info and line_info can be passed to kernel with proper
+values for the following attributes:
+::
+
+ __u32 insn_cnt;
+ __aligned_u64 insns;
+ ......
+ __u32 prog_btf_fd; /* fd pointing to BTF type data */
+ __u32 func_info_rec_size; /* userspace bpf_func_info size */
+ __aligned_u64 func_info; /* func info */
+ __u32 func_info_cnt; /* number of bpf_func_info records */
+ __u32 line_info_rec_size; /* userspace bpf_line_info size */
+ __aligned_u64 line_info; /* line info */
+ __u32 line_info_cnt; /* number of bpf_line_info records */
+
+The func_info and line_info are an array of below, respectively.::
+
+ struct bpf_func_info {
+ __u32 insn_off; /* [0, insn_cnt - 1] */
+ __u32 type_id; /* pointing to a BTF_KIND_FUNC type */
+ };
+ struct bpf_line_info {
+ __u32 insn_off; /* [0, insn_cnt - 1] */
+ __u32 file_name_off; /* offset to string table for the filename */
+ __u32 line_off; /* offset to string table for the source line */
+ __u32 line_col; /* line number and column number */
+ };
+
+func_info_rec_size is the size of each func_info record, and
+line_info_rec_size is the size of each line_info record. Passing the record
+size to kernel make it possible to extend the record itself in the future.
+
+Below are requirements for func_info:
+ * func_info[0].insn_off must be 0.
+ * the func_info insn_off is in strictly increasing order and matches
+ bpf func boundaries.
+
+Below are requirements for line_info:
+ * the first insn in each func must have a line_info record pointing to it.
+ * the line_info insn_off is in strictly increasing order.
+
+For line_info, the line number and column number are defined as below:
+::
+
+ #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
+ #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
+
+3.4 BPF_{PROG,MAP}_GET_NEXT_ID
+
+In kernel, every loaded program, map or btf has a unique id. The id won't
+change during the lifetime of a program, map, or btf.
+
+The bpf syscall command BPF_{PROG,MAP}_GET_NEXT_ID returns all id's, one for
+each command, to user space, for bpf program or maps, respectively, so an
+inspection tool can inspect all programs and maps.
+
+3.5 BPF_{PROG,MAP}_GET_FD_BY_ID
+
+An introspection tool cannot use id to get details about program or maps.
+A file descriptor needs to be obtained first for reference-counting purpose.
+
+3.6 BPF_OBJ_GET_INFO_BY_FD
+==========================
+
+Once a program/map fd is acquired, an introspection tool can get the detailed
+information from kernel about this fd, some of which are BTF-related. For
+example, ``bpf_map_info`` returns ``btf_id`` and key/value type ids.
+``bpf_prog_info`` returns ``btf_id``, func_info, and line info for translated
+bpf byte codes, and jited_line_info.
+
+3.7 BPF_BTF_GET_FD_BY_ID
+========================
+
+With ``btf_id`` obtained in ``bpf_map_info`` and ``bpf_prog_info``, bpf
+syscall command BPF_BTF_GET_FD_BY_ID can retrieve a btf fd. Then, with
+command BPF_OBJ_GET_INFO_BY_FD, the btf blob, originally loaded into the
+kernel with BPF_BTF_LOAD, can be retrieved.
+
+With the btf blob, ``bpf_map_info``, and ``bpf_prog_info``, an introspection
+tool has full btf knowledge and is able to pretty print map key/values, dump
+func signatures and line info, along with byte/jit codes.
+
+4. ELF File Format Interface
+****************************
+
+4.1 .BTF section
+================
+
+The .BTF section contains type and string data. The format of this section is
+same as the one describe in :ref:`BTF_Type_String`.
+
+.. _BTF_Ext_Section:
+
+4.2 .BTF.ext section
+====================
+
+The .BTF.ext section encodes func_info and line_info which needs loader
+manipulation before loading into the kernel.
+
+The specification for .BTF.ext section is defined at ``tools/lib/bpf/btf.h``
+and ``tools/lib/bpf/btf.c``.
+
+The current header of .BTF.ext section::
+
+ struct btf_ext_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 func_info_off;
+ __u32 func_info_len;
+ __u32 line_info_off;
+ __u32 line_info_len;
+ };
+
+It is very similar to .BTF section. Instead of type/string section, it
+contains func_info and line_info section. See :ref:`BPF_Prog_Load` for details
+about func_info and line_info record format.
+
+The func_info is organized as below.::
+
+ func_info_rec_size
+ btf_ext_info_sec for section #1 /* func_info for section #1 */
+ btf_ext_info_sec for section #2 /* func_info for section #2 */
+ ...
+
+``func_info_rec_size`` specifies the size of ``bpf_func_info`` structure when
+.BTF.ext is generated. ``btf_ext_info_sec``, defined below, is a collection of
+func_info for each specific ELF section.::
+
+ struct btf_ext_info_sec {
+ __u32 sec_name_off; /* offset to section name */
+ __u32 num_info;
+ /* Followed by num_info * record_size number of bytes */
+ __u8 data[0];
+ };
+
+Here, num_info must be greater than 0.
+
+The line_info is organized as below.::
+
+ line_info_rec_size
+ btf_ext_info_sec for section #1 /* line_info for section #1 */
+ btf_ext_info_sec for section #2 /* line_info for section #2 */
+ ...
+
+``line_info_rec_size`` specifies the size of ``bpf_line_info`` structure when
+.BTF.ext is generated.
+
+The interpretation of ``bpf_func_info->insn_off`` and
+``bpf_line_info->insn_off`` is different between kernel API and ELF API. For
+kernel API, the ``insn_off`` is the instruction offset in the unit of ``struct
+bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the
+beginning of section (``btf_ext_info_sec->sec_name_off``).
+
+5. Using BTF
+************
+
+5.1 bpftool map pretty print
+============================
+
+With BTF, the map key/value can be printed based on fields rather than simply
+raw bytes. This is especially valuable for large structure or if your data
+structure has bitfields. For example, for the following map,::
+
+ enum A { A1, A2, A3, A4, A5 };
+ typedef enum A ___A;
+ struct tmp_t {
+ char a1:4;
+ int a2:4;
+ int :4;
+ __u32 a3:4;
+ int b;
+ ___A b1:4;
+ enum A b2:4;
+ };
+ struct bpf_map_def SEC("maps") tmpmap = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct tmp_t),
+ .max_entries = 1,
+ };
+ BPF_ANNOTATE_KV_PAIR(tmpmap, int, struct tmp_t);
+
+bpftool is able to pretty print like below:
+::
+
+ [{
+ "key": 0,
+ "value": {
+ "a1": 0x2,
+ "a2": 0x4,
+ "a3": 0x6,
+ "b": 7,
+ "b1": 0x8,
+ "b2": 0xa
+ }
+ }
+ ]
+
+5.2 bpftool prog dump
+=====================
+
+The following is an example showing how func_info and line_info can help prog
+dump with better kernel symbol names, function prototypes and line
+information.::
+
+ $ bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
+ [...]
+ int test_long_fname_2(struct dummy_tracepoint_args * arg):
+ bpf_prog_44a040bf25481309_test_long_fname_2:
+ ; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+ 0: push %rbp
+ 1: mov %rsp,%rbp
+ 4: sub $0x30,%rsp
+ b: sub $0x28,%rbp
+ f: mov %rbx,0x0(%rbp)
+ 13: mov %r13,0x8(%rbp)
+ 17: mov %r14,0x10(%rbp)
+ 1b: mov %r15,0x18(%rbp)
+ 1f: xor %eax,%eax
+ 21: mov %rax,0x20(%rbp)
+ 25: xor %esi,%esi
+ ; int key = 0;
+ 27: mov %esi,-0x4(%rbp)
+ ; if (!arg->sock)
+ 2a: mov 0x8(%rdi),%rdi
+ ; if (!arg->sock)
+ 2e: cmp $0x0,%rdi
+ 32: je 0x0000000000000070
+ 34: mov %rbp,%rsi
+ ; counts = bpf_map_lookup_elem(&btf_map, &key);
+ [...]
+
+5.3 Verifier Log
+================
+
+The following is an example of how line_info can help debugging verification
+failure.::
+
+ /* The code at tools/testing/selftests/bpf/test_xdp_noinline.c
+ * is modified as below.
+ */
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+ /*
+ if (data + 4 > data_end)
+ return XDP_DROP;
+ */
+ *(u32 *)data = dst->dst;
+
+ $ bpftool prog load ./test_xdp_noinline.o /sys/fs/bpf/test_xdp_noinline type xdp
+ ; data = (void *)(long)xdp->data;
+ 224: (79) r2 = *(u64 *)(r10 -112)
+ 225: (61) r2 = *(u32 *)(r2 +0)
+ ; *(u32 *)data = dst->dst;
+ 226: (63) *(u32 *)(r2 +0) = r1
+ invalid access to packet, off=0 size=4, R2(id=0,off=0,r=0)
+ R2 offset is outside of the packet
+
+6. BTF Generation
+*****************
+
+You need latest pahole
+
+ https://git.kernel.org/pub/scm/devel/pahole/pahole.git/
+
+or llvm (8.0 or later). The pahole acts as a dwarf2btf converter. It doesn't
+support .BTF.ext and btf BTF_KIND_FUNC type yet. For example,::
+
+ -bash-4.4$ cat t.c
+ struct t {
+ int a:2;
+ int b:3;
+ int c:2;
+ } g;
+ -bash-4.4$ gcc -c -O2 -g t.c
+ -bash-4.4$ pahole -JV t.o
+ File t.o:
+ [1] STRUCT t kind_flag=1 size=4 vlen=3
+ a type_id=2 bitfield_size=2 bits_offset=0
+ b type_id=2 bitfield_size=3 bits_offset=2
+ c type_id=2 bitfield_size=2 bits_offset=5
+ [2] INT int size=4 bit_offset=0 nr_bits=32 encoding=SIGNED
+
+The llvm is able to generate .BTF and .BTF.ext directly with -g for bpf target
+only. The assembly code (-S) is able to show the BTF encoding in assembly
+format.::
+
+ -bash-4.4$ cat t2.c
+ typedef int __int32;
+ struct t2 {
+ int a2;
+ int (*f2)(char q1, __int32 q2, ...);
+ int (*f3)();
+ } g2;
+ int main() { return 0; }
+ int test() { return 0; }
+ -bash-4.4$ clang -c -g -O2 -target bpf t2.c
+ -bash-4.4$ readelf -S t2.o
+ ......
+ [ 8] .BTF PROGBITS 0000000000000000 00000247
+ 000000000000016e 0000000000000000 0 0 1
+ [ 9] .BTF.ext PROGBITS 0000000000000000 000003b5
+ 0000000000000060 0000000000000000 0 0 1
+ [10] .rel.BTF.ext REL 0000000000000000 000007e0
+ 0000000000000040 0000000000000010 16 9 8
+ ......
+ -bash-4.4$ clang -S -g -O2 -target bpf t2.c
+ -bash-4.4$ cat t2.s
+ ......
+ .section .BTF,"",@progbits
+ .short 60319 # 0xeb9f
+ .byte 1
+ .byte 0
+ .long 24
+ .long 0
+ .long 220
+ .long 220
+ .long 122
+ .long 0 # BTF_KIND_FUNC_PROTO(id = 1)
+ .long 218103808 # 0xd000000
+ .long 2
+ .long 83 # BTF_KIND_INT(id = 2)
+ .long 16777216 # 0x1000000
+ .long 4
+ .long 16777248 # 0x1000020
+ ......
+ .byte 0 # string offset=0
+ .ascii ".text" # string offset=1
+ .byte 0
+ .ascii "/home/yhs/tmp-pahole/t2.c" # string offset=7
+ .byte 0
+ .ascii "int main() { return 0; }" # string offset=33
+ .byte 0
+ .ascii "int test() { return 0; }" # string offset=58
+ .byte 0
+ .ascii "int" # string offset=83
+ ......
+ .section .BTF.ext,"",@progbits
+ .short 60319 # 0xeb9f
+ .byte 1
+ .byte 0
+ .long 24
+ .long 0
+ .long 28
+ .long 28
+ .long 44
+ .long 8 # FuncInfo
+ .long 1 # FuncInfo section string offset=1
+ .long 2
+ .long .Lfunc_begin0
+ .long 3
+ .long .Lfunc_begin1
+ .long 5
+ .long 16 # LineInfo
+ .long 1 # LineInfo section string offset=1
+ .long 2
+ .long .Ltmp0
+ .long 7
+ .long 33
+ .long 7182 # Line 7 Col 14
+ .long .Ltmp3
+ .long 7
+ .long 58
+ .long 8206 # Line 8 Col 14
+
+7. Testing
+**********
+
+Kernel bpf selftest `test_btf.c` provides extensive set of BTF-related tests.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 00a8450a602f..4e77932959cc 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -15,6 +15,13 @@ that goes into great technical depth about the BPF Architecture.
The primary info for the bpf syscall is available in the `man-pages`_
for `bpf(2)`_.
+BPF Type Format (BTF)
+=====================
+
+.. toctree::
+ :maxdepth: 1
+
+ btf
Frequently asked questions (FAQ)
diff --git a/Documentation/cgroup-v1/memcg_test.txt b/Documentation/cgroup-v1/memcg_test.txt
index 5c7f310f32bb..621e29ffb358 100644
--- a/Documentation/cgroup-v1/memcg_test.txt
+++ b/Documentation/cgroup-v1/memcg_test.txt
@@ -107,9 +107,9 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
8. LRU
Each memcg has its own private LRU. Now, its handling is under global
- VM's control (means that it's handled under global zone_lru_lock).
+ VM's control (means that it's handled under global pgdat->lru_lock).
Almost all routines around memcg's LRU is called by global LRU's
- list management functions under zone_lru_lock().
+ list management functions under pgdat->lru_lock.
A special function is mem_cgroup_isolate_pages(). This scans
memcg's private LRU and call __isolate_lru_page() to extract a page
diff --git a/Documentation/cgroup-v1/memory.txt b/Documentation/cgroup-v1/memory.txt
index 3682e99234c2..a347fc9293e5 100644
--- a/Documentation/cgroup-v1/memory.txt
+++ b/Documentation/cgroup-v1/memory.txt
@@ -267,11 +267,11 @@ When oom event notifier is registered, event will be delivered.
Other lock order is following:
PG_locked.
mm->page_table_lock
- zone_lru_lock
+ pgdat->lru_lock
lock_page_cgroup.
In many cases, just lock_page_cgroup() is called.
per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
- zone_lru_lock, it has no lock of its own.
+ pgdat->lru_lock, it has no lock of its own.
2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM)
diff --git a/Documentation/cgroup-v1/pids.txt b/Documentation/cgroup-v1/pids.txt
index 1a078b5d281a..e105d708ccde 100644
--- a/Documentation/cgroup-v1/pids.txt
+++ b/Documentation/cgroup-v1/pids.txt
@@ -33,6 +33,9 @@ limit in the hierarchy is followed).
pids.current tracks all child cgroup hierarchies, so parent/pids.current is a
superset of parent/child/pids.current.
+The pids.events file contains event counters:
+ - max: Number of times fork failed because limit was hit.
+
Example
-------
diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst
index 322851bada16..976e85adffe8 100644
--- a/Documentation/core-api/refcount-vs-atomic.rst
+++ b/Documentation/core-api/refcount-vs-atomic.rst
@@ -54,6 +54,13 @@ must propagate to all other CPUs before the release operation
(A-cumulative property). This is implemented using
:c:func:`smp_store_release`.
+An ACQUIRE memory ordering guarantees that all post loads and
+stores (all po-later instructions) on the same CPU are
+completed after the acquire operation. It also guarantees that all
+po-later stores on the same CPU must propagate to all other CPUs
+after the acquire operation executes. This is implemented using
+:c:func:`smp_acquire__after_ctrl_dep`.
+
A control dependency (on success) for refcounters guarantees that
if a reference for an object was successfully obtained (reference
counter increment or addition happened, function returned true),
@@ -119,13 +126,24 @@ Memory ordering guarantees changes:
result of obtaining pointer to the object!
-case 5) - decrement-based RMW ops that return a value
------------------------------------------------------
+case 5) - generic dec/sub decrement-based RMW ops that return a value
+---------------------------------------------------------------------
Function changes:
* :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test`
* :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test`
+
+Memory ordering guarantees changes:
+
+ * fully ordered --> RELEASE ordering + ACQUIRE ordering on success
+
+
+case 6) other decrement-based RMW ops that return a value
+---------------------------------------------------------
+
+Function changes:
+
* no atomic counterpart --> :c:func:`refcount_dec_if_one`
* ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)``
@@ -136,7 +154,7 @@ Memory ordering guarantees changes:
.. note:: :c:func:`atomic_add_unless` only provides full order on success.
-case 6) - lock-based RMW
+case 7) - lock-based RMW
------------------------
Function changes:
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index 6a6d67acaf69..5d54b27c6eba 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -108,12 +108,13 @@ some, but not all of the other indices changing.
Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
will not need to allocate memory. The :c:func:`xa_reserve` function
-will store a reserved entry at the indicated index. Users of the normal
-API will see this entry as containing ``NULL``. If you do not need to
-use the reserved entry, you can call :c:func:`xa_release` to remove the
-unused entry. If another user has stored to the entry in the meantime,
-:c:func:`xa_release` will do nothing; if instead you want the entry to
-become ``NULL``, you should use :c:func:`xa_erase`.
+will store a reserved entry at the indicated index. Users of the
+normal API will see this entry as containing ``NULL``. If you do
+not need to use the reserved entry, you can call :c:func:`xa_release`
+to remove the unused entry. If another user has stored to the entry
+in the meantime, :c:func:`xa_release` will do nothing; if instead you
+want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
+Using :c:func:`xa_insert` on a reserved entry will fail.
If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
will return ``true``.
@@ -183,6 +184,8 @@ Takes xa_lock internally:
* :c:func:`xa_store_bh`
* :c:func:`xa_store_irq`
* :c:func:`xa_insert`
+ * :c:func:`xa_insert_bh`
+ * :c:func:`xa_insert_irq`
* :c:func:`xa_erase`
* :c:func:`xa_erase_bh`
* :c:func:`xa_erase_irq`
diff --git a/Documentation/cpuidle/driver.txt b/Documentation/cpuidle/driver.txt
deleted file mode 100644
index 1b0d81d92583..000000000000
--- a/Documentation/cpuidle/driver.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
- Supporting multiple CPU idle levels in kernel
-
- cpuidle drivers
-
-
-
-
-cpuidle driver hooks into the cpuidle infrastructure and handles the
-architecture/platform dependent part of CPU idle states. Driver
-provides the platform idle state detection capability and also
-has mechanisms in place to support actual entry-exit into CPU idle states.
-
-cpuidle driver initializes the cpuidle_device structure for each CPU device
-and registers with cpuidle using cpuidle_register_device.
-
-If all the idle states are the same, the wrapper function cpuidle_register
-could be used instead.
-
-It can also support the dynamic changes (like battery <-> AC), by using
-cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device,
-cpuidle_resume_and_unlock.
-
-Interfaces:
-extern int cpuidle_register(struct cpuidle_driver *drv,
- const struct cpumask *const coupled_cpus);
-extern int cpuidle_unregister(struct cpuidle_driver *drv);
-extern int cpuidle_register_driver(struct cpuidle_driver *drv);
-extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
-extern int cpuidle_register_device(struct cpuidle_device *dev);
-extern void cpuidle_unregister_device(struct cpuidle_device *dev);
-
-extern void cpuidle_pause_and_lock(void);
-extern void cpuidle_resume_and_unlock(void);
-extern int cpuidle_enable_device(struct cpuidle_device *dev);
-extern void cpuidle_disable_device(struct cpuidle_device *dev);
diff --git a/Documentation/cpuidle/governor.txt b/Documentation/cpuidle/governor.txt
deleted file mode 100644
index d9020f5e847b..000000000000
--- a/Documentation/cpuidle/governor.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-
- Supporting multiple CPU idle levels in kernel
-
- cpuidle governors
-
-
-
-
-cpuidle governor is policy routine that decides what idle state to enter at
-any given time. cpuidle core uses different callbacks to the governor.
-
-* enable() to enable governor for a particular device
-* disable() to disable governor for a particular device
-* select() to select an idle state to enter
-* reflect() called after returning from the idle state, which can be used
- by the governor for some record keeping.
-
-More than one governor can be registered at the same time and
-users can switch between drivers using /sysfs interface (when enabled).
-More than one governor part is supported for developers to easily experiment
-with different governors. By default, most optimal governor based on your
-kernel configuration and platform will be selected by cpuidle.
-
-Interfaces:
-extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-struct cpuidle_governor
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6e5cef0ed6fb..50daa0b3b032 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA)
quiet_cmd_mk_schema = SCHEMA $@
cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^)
-DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml')
+DT_DOCS = $(shell \
+ cd $(srctree)/$(src) && \
+ find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
+ )
+
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt
index 8dbc259081e4..7f40cb5f490b 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.txt
+++ b/Documentation/devicetree/bindings/arm/amlogic.txt
@@ -109,6 +109,7 @@ Board compatible values (alphabetically, grouped by SoC):
- "amlogic,s400" (Meson axg a113d)
- "amlogic,u200" (Meson g12a s905d2)
+ - "amediatech,x96-max" (Meson g12a s905x2)
Amlogic Meson Firmware registers Interface
------------------------------------------
diff --git a/Documentation/devicetree/bindings/arm/armadeus.txt b/Documentation/devicetree/bindings/arm/armadeus.txt
deleted file mode 100644
index 9821283ff516..000000000000
--- a/Documentation/devicetree/bindings/arm/armadeus.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-Armadeus i.MX Platforms Device Tree Bindings
------------------------------------------------
-
-APF51: i.MX51 based module.
-Required root node properties:
- - compatible = "armadeus,imx51-apf51", "fsl,imx51";
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index 14f319f694b7..3363d9ae4e74 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -147,6 +147,7 @@ required properties:
- compatible: Should be "atmel,<chip>-sfr", "syscon" or
"atmel,<chip>-sfrbu", "syscon"
<chip> can be "sama5d3", "sama5d4" or "sama5d2".
+ It also can be "microchip,sam9x60-sfr", "syscon".
- reg: Should contain registers location and length
sfr@f0038000 {
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
index 0dcc3ea5adff..245328f36580 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
@@ -30,6 +30,10 @@ Raspberry Pi 2 Model B
Required root node properties:
compatible = "raspberrypi,2-model-b", "brcm,bcm2836";
+Raspberry Pi 3 Model A+
+Required root node properties:
+compatible = "raspberrypi,3-model-a-plus", "brcm,bcm2837";
+
Raspberry Pi 3 Model B
Required root node properties:
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
diff --git a/Documentation/devicetree/bindings/arm/bhf.txt b/Documentation/devicetree/bindings/arm/bhf.txt
deleted file mode 100644
index 886b503caf9c..000000000000
--- a/Documentation/devicetree/bindings/arm/bhf.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-Beckhoff Automation Platforms Device Tree Bindings
---------------------------------------------------
-
-CX9020 Embedded PC
-Required root node properties:
- - compatible = "bhf,cx9020", "fsl,imx53";
diff --git a/Documentation/devicetree/bindings/arm/bitmain.yaml b/Documentation/devicetree/bindings/arm/bitmain.yaml
new file mode 100644
index 000000000000..0efdb4ac028e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bitmain.yaml
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bitmain.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bitmain platform device tree bindings
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - bitmain,sophon-edge
+ - const: bitmain,bm1880
+...
diff --git a/Documentation/devicetree/bindings/arm/compulab-boards.txt b/Documentation/devicetree/bindings/arm/compulab-boards.txt
deleted file mode 100644
index 42a10285af9c..000000000000
--- a/Documentation/devicetree/bindings/arm/compulab-boards.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-CompuLab SB-SOM is a multi-module baseboard capable of carrying:
- - CM-T43
- - CM-T54
- - CM-QS600
- - CL-SOM-AM57x
- - CL-SOM-iMX7
-modules with minor modifications to the SB-SOM assembly.
-
-Required root node properties:
- - compatible = should be "compulab,sb-som"
-
-Compulab CL-SOM-iMX7 is a miniature System-on-Module (SoM) based on
-Freescale i.MX7 ARM Cortex-A7 System-on-Chip.
-
-Required root node properties:
- - compatible = "compulab,cl-som-imx7", "fsl,imx7d";
-
-Compulab SBC-iMX7 is a single board computer based on the
-Freescale i.MX7 system-on-chip. SBC-iMX7 is implemented with
-the CL-SOM-iMX7 System-on-Module providing most of the functions,
-and SB-SOM-iMX7 carrier board providing additional peripheral
-functions and connectors.
-
-Required root node properties:
- - compatible = "compulab,sbc-imx7", "compulab,cl-som-imx7", "fsl,imx7d";
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
index 84262cdb8d29..96fa46cb133c 100644
--- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -235,4 +235,4 @@ cpus {
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 298c17b327c6..365dcf384d73 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -228,6 +228,7 @@ patternProperties:
- renesas,r9a06g032-smp
- rockchip,rk3036-smp
- rockchip,rk3066-smp
+ - socionext,milbeaut-m10v-smp
- ste,dbx500-smp
cpu-release-addr:
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,imx7ulp-sim.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,imx7ulp-sim.txt
new file mode 100644
index 000000000000..7d0c7f002401
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,imx7ulp-sim.txt
@@ -0,0 +1,16 @@
+Freescale i.MX7ULP System Integration Module
+----------------------------------------------
+The system integration module (SIM) provides system control and chip configuration
+registers. In this module, chip revision information is located in JTAG ID register,
+and a set of registers have been made available in DGO domain for SW use, with the
+objective to maintain its value between system resets.
+
+Required properties:
+- compatible: Should be "fsl,imx7ulp-sim".
+- reg: Specifies base physical address and size of the register sets.
+
+Example:
+sim: sim@410a3000 {
+ compatible = "fsl,imx7ulp-sim", "syscon";
+ reg = <0x410a3000 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
index 27784b6edfed..72d481c8dd48 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
@@ -58,7 +58,11 @@ This binding for the SCU power domain providers uses the generic power
domain binding[2].
Required properties:
-- compatible: Should be "fsl,imx8qxp-scu-pd".
+- compatible: Should be one of:
+ "fsl,imx8qm-scu-pd",
+ "fsl,imx8qxp-scu-pd"
+ followed by "fsl,scu-pd"
+
- #power-domain-cells: Must be 1. Contains the Resource ID used by
SCU commands.
See detailed Resource ID list from:
@@ -70,7 +74,10 @@ Clock bindings based on SCU Message Protocol
This binding uses the common clock binding[1].
Required properties:
-- compatible: Should be "fsl,imx8qxp-clock".
+- compatible: Should be one of:
+ "fsl,imx8qm-clock"
+ "fsl,imx8qxp-clock"
+ followed by "fsl,scu-clk"
- #clock-cells: Should be 1. Contains the Clock ID value.
- clocks: List of clock specifiers, must contain an entry for
each required entry in clock-names
@@ -137,7 +144,7 @@ firmware {
&lsio_mu1 1 3>;
clk: clk {
- compatible = "fsl,imx8qxp-clk";
+ compatible = "fsl,imx8qxp-clk", "fsl,scu-clk";
#clock-cells = <1>;
};
@@ -154,7 +161,7 @@ firmware {
};
pd: imx8qx-pd {
- compatible = "fsl,imx8qxp-scu-pd";
+ compatible = "fsl,imx8qxp-scu-pd", "fsl,scu-pd";
#power-domain-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
deleted file mode 100644
index 7fbc42484001..000000000000
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ /dev/null
@@ -1,237 +0,0 @@
-Freescale i.MX Platforms Device Tree Bindings
------------------------------------------------
-
-i.MX23 Evaluation Kit
-Required root node properties:
- - compatible = "fsl,imx23-evk", "fsl,imx23";
-
-i.MX25 Product Development Kit
-Required root node properties:
- - compatible = "fsl,imx25-pdk", "fsl,imx25";
-
-i.MX27 Product Development Kit
-Required root node properties:
- - compatible = "fsl,imx27-pdk", "fsl,imx27";
-
-i.MX28 Evaluation Kit
-Required root node properties:
- - compatible = "fsl,imx28-evk", "fsl,imx28";
-
-i.MX51 Babbage Board
-Required root node properties:
- - compatible = "fsl,imx51-babbage", "fsl,imx51";
-
-i.MX53 Automotive Reference Design Board
-Required root node properties:
- - compatible = "fsl,imx53-ard", "fsl,imx53";
-
-i.MX53 Evaluation Kit
-Required root node properties:
- - compatible = "fsl,imx53-evk", "fsl,imx53";
-
-i.MX53 Quick Start Board
-Required root node properties:
- - compatible = "fsl,imx53-qsb", "fsl,imx53";
-
-i.MX53 Smart Mobile Reference Design Board
-Required root node properties:
- - compatible = "fsl,imx53-smd", "fsl,imx53";
-
-i.MX6 Quad Armadillo2 Board
-Required root node properties:
- - compatible = "fsl,imx6q-arm2", "fsl,imx6q";
-
-i.MX6 Quad SABRE Lite Board
-Required root node properties:
- - compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
-
-i.MX6 Quad SABRE Smart Device Board
-Required root node properties:
- - compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
-
-i.MX6 Quad SABRE Automotive Board
-Required root node properties:
- - compatible = "fsl,imx6q-sabreauto", "fsl,imx6q";
-
-i.MX6SLL EVK board
-Required root node properties:
- - compatible = "fsl,imx6sll-evk", "fsl,imx6sll";
-
-i.MX6 Quad Plus SABRE Smart Device Board
-Required root node properties:
- - compatible = "fsl,imx6qp-sabresd", "fsl,imx6qp";
-
-i.MX6 Quad Plus SABRE Automotive Board
-Required root node properties:
- - compatible = "fsl,imx6qp-sabreauto", "fsl,imx6qp";
-
-i.MX6 DualLite SABRE Smart Device Board
-Required root node properties:
- - compatible = "fsl,imx6dl-sabresd", "fsl,imx6dl";
-
-i.MX6 DualLite/Solo SABRE Automotive Board
-Required root node properties:
- - compatible = "fsl,imx6dl-sabreauto", "fsl,imx6dl";
-
-i.MX6 SoloLite EVK Board
-Required root node properties:
- - compatible = "fsl,imx6sl-evk", "fsl,imx6sl";
-
-i.MX6 UltraLite 14x14 EVK Board
-Required root node properties:
- - compatible = "fsl,imx6ul-14x14-evk", "fsl,imx6ul";
-
-i.MX6 UltraLiteLite 14x14 EVK Board
-Required root node properties:
- - compatible = "fsl,imx6ull-14x14-evk", "fsl,imx6ull";
-
-i.MX6 ULZ 14x14 EVK Board
-Required root node properties:
- - compatible = "fsl,imx6ulz-14x14-evk", "fsl,imx6ull", "fsl,imx6ulz";
-
-i.MX6 SoloX SDB Board
-Required root node properties:
- - compatible = "fsl,imx6sx-sdb", "fsl,imx6sx";
-
-i.MX6 SoloX Sabre Auto Board
-Required root node properties:
- - compatible = "fsl,imx6sx-sabreauto", "fsl,imx6sx";
-
-i.MX7 SabreSD Board
-Required root node properties:
- - compatible = "fsl,imx7d-sdb", "fsl,imx7d";
-
-i.MX7ULP Evaluation Kit
-Required root node properties:
- - compatible = "fsl,imx7ulp-evk", "fsl,imx7ulp";
-
-Generic i.MX boards
--------------------
-
-No iomux setup is done for these boards, so this must have been configured
-by the bootloader for boards to work with the generic bindings.
-
-i.MX27 generic board
-Required root node properties:
- - compatible = "fsl,imx27";
-
-i.MX51 generic board
-Required root node properties:
- - compatible = "fsl,imx51";
-
-i.MX53 generic board
-Required root node properties:
- - compatible = "fsl,imx53";
-
-i.MX6q generic board
-Required root node properties:
- - compatible = "fsl,imx6q";
-
-i.MX7ULP generic board
-Required root node properties:
- - compatible = "fsl,imx7ulp";
-
-Freescale Vybrid Platform Device Tree Bindings
-----------------------------------------------
-
-For the Vybrid SoC familiy all variants with DDR controller are supported,
-which is the VF5xx and VF6xx series. Out of historical reasons, in most
-places the kernel uses vf610 to refer to the whole familiy.
-The compatible string "fsl,vf610m4" is used for the secondary Cortex-M4
-core support.
-
-Required root node compatible property (one of them):
- - compatible = "fsl,vf500";
- - compatible = "fsl,vf510";
- - compatible = "fsl,vf600";
- - compatible = "fsl,vf610";
- - compatible = "fsl,vf610m4";
-
-Freescale LS1021A Platform Device Tree Bindings
-------------------------------------------------
-
-Required root node compatible properties:
- - compatible = "fsl,ls1021a";
-
-Freescale ARMv8 based Layerscape SoC family Device Tree Bindings
-----------------------------------------------------------------
-
-LS1012A SoC
-Required root node properties:
- - compatible = "fsl,ls1012a";
-
-LS1012A ARMv8 based RDB Board
-Required root node properties:
- - compatible = "fsl,ls1012a-rdb", "fsl,ls1012a";
-
-LS1012A ARMv8 based FRDM Board
-Required root node properties:
- - compatible = "fsl,ls1012a-frdm", "fsl,ls1012a";
-
-LS1012A ARMv8 based QDS Board
-Required root node properties:
- - compatible = "fsl,ls1012a-qds", "fsl,ls1012a";
-
-LS1043A SoC
-Required root node properties:
- - compatible = "fsl,ls1043a";
-
-LS1043A ARMv8 based RDB Board
-Required root node properties:
- - compatible = "fsl,ls1043a-rdb", "fsl,ls1043a";
-
-LS1043A ARMv8 based QDS Board
-Required root node properties:
- - compatible = "fsl,ls1043a-qds", "fsl,ls1043a";
-
-LS1046A SoC
-Required root node properties:
- - compatible = "fsl,ls1046a";
-
-LS1046A ARMv8 based QDS Board
-Required root node properties:
- - compatible = "fsl,ls1046a-qds", "fsl,ls1046a";
-
-LS1046A ARMv8 based RDB Board
-Required root node properties:
- - compatible = "fsl,ls1046a-rdb", "fsl,ls1046a";
-
-LS1088A SoC
-Required root node properties:
- - compatible = "fsl,ls1088a";
-
-LS1088A ARMv8 based QDS Board
-Required root node properties:
- - compatible = "fsl,ls1088a-qds", "fsl,ls1088a";
-
-LS1088A ARMv8 based RDB Board
-Required root node properties:
- - compatible = "fsl,ls1088a-rdb", "fsl,ls1088a";
-
-LS2080A SoC
-Required root node properties:
- - compatible = "fsl,ls2080a";
-
-LS2080A ARMv8 based Simulator model
-Required root node properties:
- - compatible = "fsl,ls2080a-simu", "fsl,ls2080a";
-
-LS2080A ARMv8 based QDS Board
-Required root node properties:
- - compatible = "fsl,ls2080a-qds", "fsl,ls2080a";
-
-LS2080A ARMv8 based RDB Board
-Required root node properties:
- - compatible = "fsl,ls2080a-rdb", "fsl,ls2080a";
-
-LS2088A SoC
-Required root node properties:
- - compatible = "fsl,ls2088a";
-
-LS2088A ARMv8 based QDS Board
-Required root node properties:
- - compatible = "fsl,ls2088a-qds", "fsl,ls2088a";
-
-LS2088A ARMv8 based RDB Board
-Required root node properties:
- - compatible = "fsl,ls2088a-rdb", "fsl,ls2088a";
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
new file mode 100644
index 000000000000..7e2cd6ad26bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -0,0 +1,232 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/arm/fsl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX Platforms Device Tree Bindings
+
+maintainers:
+ - Shawn Guo <shawnguo@kernel.org>
+ - Li Yang <leoyang.li@nxp.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: i.MX23 based Boards
+ items:
+ - enum:
+ - fsl,imx23-evk
+ - olimex,imx23-olinuxino
+ - const: fsl,imx23
+
+ - description: i.MX25 Product Development Kit
+ items:
+ - enum:
+ - fsl,imx25-pdk
+ - const: fsl,imx25
+
+ - description: i.MX27 Product Development Kit
+ items:
+ - enum:
+ - fsl,imx27-pdk
+ - const: fsl,imx27
+
+ - description: i.MX28 based Boards
+ items:
+ - enum:
+ - fsl,imx28-evk
+ - i2se,duckbill
+ - i2se,duckbill-2
+ - technologic,imx28-ts4600
+ - const: fsl,imx28
+ - description: i.MX28 Duckbill 2 based Boards
+ items:
+ - enum:
+ - i2se,duckbill-2-485
+ - i2se,duckbill-2-enocean
+ - i2se,duckbill-2-spi
+ - const: i2se,duckbill-2
+ - const: fsl,imx28
+
+ - description: i.MX51 Babbage Board
+ items:
+ - enum:
+ - armadeus,imx51-apf51
+ - fsl,imx51-babbage
+ - technologic,imx51-ts4800
+ - const: fsl,imx51
+
+ - description: i.MX53 based Boards
+ items:
+ - enum:
+ - bhf,cx9020
+ - fsl,imx53-ard
+ - fsl,imx53-evk
+ - fsl,imx53-qsb
+ - fsl,imx53-smd
+ - const: fsl,imx53
+
+ - description: i.MX6Q based Boards
+ items:
+ - enum:
+ - fsl,imx6q-arm2
+ - fsl,imx6q-sabreauto
+ - fsl,imx6q-sabrelite
+ - fsl,imx6q-sabresd
+ - technologic,imx6q-ts4900
+ - technologic,imx6q-ts7970
+ - const: fsl,imx6q
+
+ - description: i.MX6QP based Boards
+ items:
+ - enum:
+ - fsl,imx6qp-sabreauto # i.MX6 Quad Plus SABRE Automotive Board
+ - fsl,imx6qp-sabresd # i.MX6 Quad Plus SABRE Smart Device Board
+ - const: fsl,imx6qp
+
+ - description: i.MX6DL based Boards
+ items:
+ - enum:
+ - fsl,imx6dl-sabreauto # i.MX6 DualLite/Solo SABRE Automotive Board
+ - fsl,imx6dl-sabresd # i.MX6 DualLite SABRE Smart Device Board
+ - technologic,imx6dl-ts4900
+ - technologic,imx6dl-ts7970
+ - ysoft,imx6dl-yapp4-draco # i.MX6 DualLite Y Soft IOTA Draco board
+ - ysoft,imx6dl-yapp4-hydra # i.MX6 DualLite Y Soft IOTA Hydra board
+ - ysoft,imx6dl-yapp4-ursa # i.MX6 Solo Y Soft IOTA Ursa board
+ - const: fsl,imx6dl
+
+ - description: i.MX6SL based Boards
+ items:
+ - enum:
+ - fsl,imx6sl-evk # i.MX6 SoloLite EVK Board
+ - const: fsl,imx6sl
+
+ - description: i.MX6SLL based Boards
+ items:
+ - enum:
+ - fsl,imx6sll-evk
+ - const: fsl,imx6sll
+
+ - description: i.MX6SX based Boards
+ items:
+ - enum:
+ - fsl,imx6sx-sabreauto # i.MX6 SoloX Sabre Auto Board
+ - fsl,imx6sx-sdb # i.MX6 SoloX SDB Board
+ - const: fsl,imx6sx
+
+ - description: i.MX6UL based Boards
+ items:
+ - enum:
+ - fsl,imx6ul-14x14-evk # i.MX6 UltraLite 14x14 EVK Board
+ - const: fsl,imx6ul
+
+ - description: i.MX6ULL based Boards
+ items:
+ - enum:
+ - fsl,imx6ull-14x14-evk # i.MX6 UltraLiteLite 14x14 EVK Board
+ - const: fsl,imx6ull
+
+ - description: i.MX6ULZ based Boards
+ items:
+ - enum:
+ - fsl,imx6ulz-14x14-evk # i.MX6 ULZ 14x14 EVK Board
+ - const: fsl,imx6ull # This seems odd. Should be last?
+ - const: fsl,imx6ulz
+
+ - description: i.MX7D based Boards
+ items:
+ - enum:
+ - fsl,imx7d-sdb # i.MX7 SabreSD Board
+ - const: fsl,imx7d
+
+ - description:
+ Compulab SBC-iMX7 is a single board computer based on the
+ Freescale i.MX7 system-on-chip. SBC-iMX7 is implemented with
+ the CL-SOM-iMX7 System-on-Module providing most of the functions,
+ and SB-SOM-iMX7 carrier board providing additional peripheral
+ functions and connectors.
+ items:
+ - const: compulab,sbc-imx7
+ - const: compulab,cl-som-imx7
+ - const: fsl,imx7d
+
+ - description: i.MX8QXP based Boards
+ items:
+ - enum:
+ - fsl,imx8qxp-mek # i.MX8QXP MEK Board
+ - const: fsl,imx8qxp
+
+ - description:
+ Freescale Vybrid Platform Device Tree Bindings
+
+ For the Vybrid SoC familiy all variants with DDR controller are supported,
+ which is the VF5xx and VF6xx series. Out of historical reasons, in most
+ places the kernel uses vf610 to refer to the whole familiy.
+ The compatible string "fsl,vf610m4" is used for the secondary Cortex-M4
+ core support.
+ items:
+ - enum:
+ - fsl,vf500
+ - fsl,vf510
+ - fsl,vf600
+ - fsl,vf610
+ - fsl,vf610m4
+
+ - description: LS1012A based Boards
+ items:
+ - enum:
+ - ebs-systart,oxalis
+ - fsl,ls1012a-rdb
+ - fsl,ls1012a-frdm
+ - fsl,ls1012a-qds
+ - const: fsl,ls1012a
+
+ - description: LS1021A based Boards
+ items:
+ - enum:
+ - fsl,ls1021a-moxa-uc-8410a
+ - fsl,ls1021a-qds
+ - fsl,ls1021a-twr
+ - const: fsl,ls1021a
+
+ - description: LS1043A based Boards
+ items:
+ - enum:
+ - fsl,ls1043a-rdb
+ - fsl,ls1043a-qds
+ - const: fsl,ls1043a
+
+ - description: LS1046A based Boards
+ items:
+ - enum:
+ - fsl,ls1046a-qds
+ - fsl,ls1046a-rdb
+ - const: fsl,ls1046a
+
+ - description: LS1088A based Boards
+ items:
+ - enum:
+ - fsl,ls1088a-qds
+ - fsl,ls1088a-rdb
+ - const: fsl,ls1088a
+
+ - description: LS2080A based Boards
+ items:
+ - enum:
+ - fsl,ls2080a-simu
+ - fsl,ls2080a-qds
+ - fsl,ls2080a-rdb
+ - const: fsl,ls2080a
+
+ - description: LS2088A based Boards
+ items:
+ - enum:
+ - fsl,ls2088a-qds
+ - fsl,ls2088a-rdb
+ - const: fsl,ls2088a
+
+...
diff --git a/Documentation/devicetree/bindings/arm/i2se.txt b/Documentation/devicetree/bindings/arm/i2se.txt
deleted file mode 100644
index dbd54a3aa07d..000000000000
--- a/Documentation/devicetree/bindings/arm/i2se.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-I2SE Device Tree Bindings
--------------------------
-
-Duckbill Board
-Required root node properties:
- - compatible = "i2se,duckbill", "fsl,imx28";
-
-Duckbill 2 Board
-Required root node properties:
- - compatible = "i2se,duckbill-2", "fsl,imx28";
-
-Duckbill 2 485 Board
-Required root node properties:
- - compatible = "i2se,duckbill-2-485", "i2se,duckbill-2", "fsl,imx28";
-
-Duckbill 2 EnOcean Board
-Required root node properties:
- - compatible = "i2se,duckbill-2-enocean", "i2se,duckbill-2", "fsl,imx28";
-
-Duckbill 2 SPI Board
-Required root node properties:
- - compatible = "i2se,duckbill-2-spi", "i2se,duckbill-2", "fsl,imx28";
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index 8f0937db55c5..45730ba60af5 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -684,7 +684,7 @@ cpus {
===========================================
[1] ARM Linux Kernel documentation - CPUs bindings
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
[2] ARM Linux Kernel documentation - PSCI bindings
Documentation/devicetree/bindings/arm/psci.txt
diff --git a/Documentation/devicetree/bindings/arm/mediatek.txt b/Documentation/devicetree/bindings/arm/mediatek.txt
index 8f260e5cfd16..56ac7896d6d8 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek.txt
@@ -15,11 +15,12 @@ compatible: Must contain one of
"mediatek,mt6795"
"mediatek,mt6797"
"mediatek,mt7622"
- "mediatek,mt7623" which is referred to MT7623N SoC
- "mediatek,mt7623a"
+ "mediatek,mt7623"
+ "mediatek,mt7629"
"mediatek,mt8127"
"mediatek,mt8135"
"mediatek,mt8173"
+ "mediatek,mt8183"
Supported boards:
@@ -57,6 +58,9 @@ Supported boards:
- Reference board variant 1 for MT7622:
Required root node properties:
- compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
+- Bananapi BPI-R64 for MT7622:
+ Required root node properties:
+ - compatible = "bananapi,bpi-r64", "mediatek,mt7622";
- Reference board for MT7623a with eMMC:
Required root node properties:
- compatible = "mediatek,mt7623a-rfb-emmc", "mediatek,mt7623";
@@ -68,6 +72,9 @@ Supported boards:
- compatible = "mediatek,mt7623n-rfb-emmc", "mediatek,mt7623";
- Bananapi BPI-R2 board:
- compatible = "bananapi,bpi-r2", "mediatek,mt7623";
+- Reference board for MT7629:
+ Required root node properties:
+ - compatible = "mediatek,mt7629-rfb", "mediatek,mt7629";
- MTK mt8127 tablet moose EVB:
Required root node properties:
- compatible = "mediatek,mt8127-moose", "mediatek,mt8127";
@@ -77,3 +84,6 @@ Supported boards:
- MTK mt8173 tablet EVB:
Required root node properties:
- compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
+- Evaluation board for MT8183:
+ Required root node properties:
+ - compatible = "mediatek,mt8183-evb", "mediatek,mt8183";
diff --git a/Documentation/devicetree/bindings/arm/olimex.txt b/Documentation/devicetree/bindings/arm/olimex.txt
deleted file mode 100644
index d726aeca56be..000000000000
--- a/Documentation/devicetree/bindings/arm/olimex.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Olimex Device Tree Bindings
----------------------------
-
-SAM9-L9260 Board
-Required root node properties:
- - compatible = "olimex,sam9-l9260", "atmel,at91sam9260";
-
-i.MX23 Olinuxino Low Cost Board
-Required root node properties:
- - compatible = "olimex,imx23-olinuxino", "fsl,imx23";
diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml
new file mode 100644
index 000000000000..19f379863d50
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/renesas.yaml
@@ -0,0 +1,238 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/shmobile.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: Emma Mobile EV2
+ items:
+ - enum:
+ - renesas,kzm9d # Kyoto Microcomputer Co. KZM-A9-Dual
+ - const: renesas,emev2
+
+ - description: RZ/A1H (R7S72100)
+ items:
+ - enum:
+ - renesas,genmai # Genmai (RTK772100BC00000BR)
+ - renesas,gr-peach # GR-Peach (X28A-M01-E/F)
+ - renesas,rskrza1 # RSKRZA1 (YR0K77210C000BE)
+ - const: renesas,r7s72100
+
+ - description: RZ/A2 (R7S9210)
+ items:
+ - enum:
+ - renesas,rza2mevb # RZ/A2M Eval Board (RTK7921053S00000BE)
+ - const: renesas,r7s9210
+
+ - description: SH-Mobile AG5 (R8A73A00/SH73A0)
+ items:
+ - enum:
+ - renesas,kzm9g # Kyoto Microcomputer Co. KZM-A9-GT
+ - const: renesas,sh73a0
+
+ - description: R-Mobile APE6 (R8A73A40)
+ items:
+ - enum:
+ - renesas,ape6evm
+ - const: renesas,r8a73a4
+
+ - description: R-Mobile A1 (R8A77400)
+ items:
+ - enum:
+ - renesas,armadillo800eva # Atmark Techno Armadillo-800 EVA
+ - const: renesas,r8a7740
+
+ - description: RZ/G1H (R8A77420)
+ items:
+ - const: renesas,r8a7742
+
+ - description: RZ/G1M (R8A77430)
+ items:
+ - enum:
+ # iWave Systems RZ/G1M Qseven Development Platform (iW-RainboW-G20D-Qseven)
+ - iwave,g20d
+ - const: iwave,g20m
+ - const: renesas,r8a7743
+
+ - items:
+ - enum:
+ # iWave Systems RZ/G1M Qseven System On Module (iW-RainboW-G20M-Qseven)
+ - iwave,g20m
+ - renesas,sk-rzg1m # SK-RZG1M (YR8A77430S000BE)
+ - const: renesas,r8a7743
+
+ - description: RZ/G1N (R8A77440)
+ items:
+ - enum:
+ # iWave Systems RZ/G1N Qseven Development Platform (iW-RainboW-G20D-Qseven)
+ - iwave,g20d
+ - const: iwave,g20m
+ - const: renesas,r8a7744
+
+ - items:
+ - enum:
+ # iWave Systems RZ/G1N Qseven System On Module (iW-RainboW-G20M-Qseven)
+ - iwave,g20m
+ - const: renesas,r8a7744
+
+ - description: RZ/G1E (R8A77450)
+ items:
+ - enum:
+ - iwave,g22m # iWave Systems RZ/G1E SODIMM System On Module (iW-RainboW-G22M-SM)
+ - renesas,sk-rzg1e # SK-RZG1E (YR8A77450S000BE)
+ - const: renesas,r8a7745
+
+ - description: iWave Systems RZ/G1E SODIMM SOM Development Platform (iW-RainboW-G22D)
+ items:
+ - const: iwave,g22d
+ - const: iwave,g22m
+ - const: renesas,r8a7745
+
+ - description: RZ/G1C (R8A77470)
+ items:
+ - enum:
+ - iwave,g23s #iWave Systems RZ/G1C Single Board Computer (iW-RainboW-G23S)
+ - const: renesas,r8a77470
+
+ - description: RZ/G2M (R8A774A1)
+ items:
+ - const: renesas,r8a774a1
+
+ - description: RZ/G2E (R8A774C0)
+ items:
+ - enum:
+ - si-linux,cat874 # Silicon Linux RZ/G2E 96board platform (CAT874)
+ - const: renesas,r8a774c0
+
+ - items:
+ - enum:
+ - si-linux,cat875 # Silicon Linux sub board for CAT874 (CAT875)
+ - const: si-linux,cat874
+ - const: renesas,r8a774c0
+
+ - description: R-Car M1A (R8A77781)
+ items:
+ - enum:
+ - renesas,bockw
+ - const: renesas,r8a7778
+
+ - description: R-Car H1 (R8A77790)
+ items:
+ - enum:
+ - renesas,marzen # Marzen (R0P7779A00010S)
+ - const: renesas,r8a7779
+
+ - description: R-Car H2 (R8A77900)
+ items:
+ - enum:
+ - renesas,lager # Lager (RTP0RC7790SEB00010S)
+ - renesas,stout # Stout (ADAS Starterkit, Y-R-CAR-ADAS-SKH2-BOARD)
+ - const: renesas,r8a7790
+
+ - description: R-Car M2-W (R8A77910)
+ items:
+ - enum:
+ - renesas,henninger
+ - renesas,koelsch # Koelsch (RTP0RC7791SEB00010S)
+ - renesas,porter # Porter (M2-LCDP)
+ - const: renesas,r8a7791
+
+ - description: R-Car V2H (R8A77920)
+ items:
+ - enum:
+ - renesas,blanche # Blanche (RTP0RC7792SEB00010S)
+ - renesas,wheat # Wheat (RTP0RC7792ASKB0000JE)
+ - const: renesas,r8a7792
+
+ - description: R-Car M2-N (R8A77930)
+ items:
+ - enum:
+ - renesas,gose # Gose (RTP0RC7793SEB00010S)
+ - const: renesas,r8a7793
+
+ - description: R-Car E2 (R8A77940)
+ items:
+ - enum:
+ - renesas,alt # Alt (RTP0RC7794SEB00010S)
+ - renesas,silk # SILK (RTP0RC7794LCB00011S)
+ - const: renesas,r8a7794
+
+ - description: R-Car H3 (R8A77950)
+ items:
+ - enum:
+ # H3ULCB (R-Car Starter Kit Premier, RTP0RC7795SKBX0010SA00 (H3 ES1.1))
+ # H3ULCB (R-Car Starter Kit Premier, RTP0RC77951SKBX010SA00 (H3 ES2.0))
+ - renesas,h3ulcb
+ - renesas,salvator-x # Salvator-X (RTP0RC7795SIPB0010S)
+ - renesas,salvator-xs # Salvator-XS (Salvator-X 2nd version, RTP0RC7795SIPB0012S)
+ - const: renesas,r8a7795
+
+ - description: R-Car M3-W (R8A77960)
+ items:
+ - enum:
+ - renesas,m3ulcb # M3ULCB (R-Car Starter Kit Pro, RTP0RC7796SKBX0010SA09 (M3 ES1.0))
+ - renesas,salvator-x # Salvator-X (RTP0RC7796SIPB0011S)
+ - renesas,salvator-xs # Salvator-XS (Salvator-X 2nd version, RTP0RC7796SIPB0012S)
+ - const: renesas,r8a7796
+
+ - description: Kingfisher (SBEV-RCAR-KF-M03)
+ items:
+ - const: shimafuji,kingfisher
+ - enum:
+ - renesas,h3ulcb
+ - renesas,m3ulcb
+ - enum:
+ - renesas,r8a7795
+ - renesas,r8a7796
+
+ - description: R-Car M3-N (R8A77965)
+ items:
+ - enum:
+ - renesas,m3nulcb # M3NULCB (R-Car Starter Kit Pro, RTP0RC77965SKBX010SA00 (M3-N ES1.1))
+ - renesas,salvator-x # Salvator-X (RTP0RC7796SIPB0011S (M3-N))
+ - renesas,salvator-xs # Salvator-XS (Salvator-X 2nd version, RTP0RC77965SIPB012S)
+ - const: renesas,r8a77965
+
+ - description: R-Car V3M (R8A77970)
+ items:
+ - enum:
+ - renesas,eagle # Eagle (RTP0RC77970SEB0010S)
+ - renesas,v3msk # V3MSK (Y-ASK-RCAR-V3M-WS10)
+ - const: renesas,r8a77970
+
+ - description: R-Car V3H (R8A77980)
+ items:
+ - enum:
+ - renesas,condor # Condor (RTP0RC77980SEB0010SS/RTP0RC77980SEB0010SA01)
+ - renesas,v3hsk # V3HSK (Y-ASK-RCAR-V3H-WS10)
+ - const: renesas,r8a77980
+
+ - description: R-Car E3 (R8A77990)
+ items:
+ - enum:
+ - renesas,ebisu # Ebisu (RTP0RC77990SEB0010S)
+ - const: renesas,r8a77990
+
+ - description: R-Car D3 (R8A77995)
+ items:
+ - enum:
+ - renesas,draak # Draak (RTP0RC77995SEB0010S)
+ - const: renesas,r8a77995
+
+ - description: RZ/N1D (R9A06G032)
+ items:
+ - enum:
+ - renesas,rzn1d400-db # RZN1D-DB (RZ/N1D Demo Board for the RZ/N1D 400 pins package)
+ - const: renesas,r9a06g032
+
+...
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index b12958bda09c..061a03edf9c8 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -60,6 +60,11 @@ properties:
- const: chipspark,rayeager-px2
- const: rockchip,rk3066a
+ - description: Elgin RV1108 R1
+ items:
+ - const: elgin,rv1108-r1
+ - const: rockchip,rv1108
+
- description: Firefly Firefly-RK3288
items:
- enum:
@@ -87,6 +92,13 @@ properties:
- const: firefly,roc-rk3399-pc
- const: rockchip,rk3399
+ - description: FriendlyElec NanoPi4 series boards
+ items:
+ - enum:
+ - friendlyarm,nanopc-t4
+ - friendlyarm,nanopi-m4
+ - const: rockchip,rk3399
+
- description: GeekBuying GeekBox
items:
- const: geekbuying,geekbox
@@ -317,6 +329,11 @@ properties:
- const: radxa,rock
- const: rockchip,rk3188
+ - description: Radxa ROCK Pi 4
+ items:
+ - const: radxa,rockpi4
+ - const: rockchip,rk3399
+
- description: Radxa Rock2 Square
items:
- const: radxa,rock2-square
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
deleted file mode 100644
index 7f91c2a8b54e..000000000000
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ /dev/null
@@ -1,155 +0,0 @@
-Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings
---------------------------------------------------------------------
-
-SoCs:
-
- - Emma Mobile EV2
- compatible = "renesas,emev2"
- - RZ/A1H (R7S72100)
- compatible = "renesas,r7s72100"
- - RZ/A2 (R7S9210)
- compatible = "renesas,r7s9210"
- - SH-Mobile AG5 (R8A73A00/SH73A0)
- compatible = "renesas,sh73a0"
- - R-Mobile APE6 (R8A73A40)
- compatible = "renesas,r8a73a4"
- - R-Mobile A1 (R8A77400)
- compatible = "renesas,r8a7740"
- - RZ/G1H (R8A77420)
- compatible = "renesas,r8a7742"
- - RZ/G1M (R8A77430)
- compatible = "renesas,r8a7743"
- - RZ/G1N (R8A77440)
- compatible = "renesas,r8a7744"
- - RZ/G1E (R8A77450)
- compatible = "renesas,r8a7745"
- - RZ/G1C (R8A77470)
- compatible = "renesas,r8a77470"
- - RZ/G2M (R8A774A1)
- compatible = "renesas,r8a774a1"
- - RZ/G2E (R8A774C0)
- compatible = "renesas,r8a774c0"
- - R-Car M1A (R8A77781)
- compatible = "renesas,r8a7778"
- - R-Car H1 (R8A77790)
- compatible = "renesas,r8a7779"
- - R-Car H2 (R8A77900)
- compatible = "renesas,r8a7790"
- - R-Car M2-W (R8A77910)
- compatible = "renesas,r8a7791"
- - R-Car V2H (R8A77920)
- compatible = "renesas,r8a7792"
- - R-Car M2-N (R8A77930)
- compatible = "renesas,r8a7793"
- - R-Car E2 (R8A77940)
- compatible = "renesas,r8a7794"
- - R-Car H3 (R8A77950)
- compatible = "renesas,r8a7795"
- - R-Car M3-W (R8A77960)
- compatible = "renesas,r8a7796"
- - R-Car M3-N (R8A77965)
- compatible = "renesas,r8a77965"
- - R-Car V3M (R8A77970)
- compatible = "renesas,r8a77970"
- - R-Car V3H (R8A77980)
- compatible = "renesas,r8a77980"
- - R-Car E3 (R8A77990)
- compatible = "renesas,r8a77990"
- - R-Car D3 (R8A77995)
- compatible = "renesas,r8a77995"
- - RZ/N1D (R9A06G032)
- compatible = "renesas,r9a06g032"
-
-Boards:
-
- - Alt (RTP0RC7794SEB00010S)
- compatible = "renesas,alt", "renesas,r8a7794"
- - APE6-EVM
- compatible = "renesas,ape6evm", "renesas,r8a73a4"
- - Atmark Techno Armadillo-800 EVA
- compatible = "renesas,armadillo800eva", "renesas,r8a7740"
- - Blanche (RTP0RC7792SEB00010S)
- compatible = "renesas,blanche", "renesas,r8a7792"
- - BOCK-W
- compatible = "renesas,bockw", "renesas,r8a7778"
- - Condor (RTP0RC77980SEB0010SS/RTP0RC77980SEB0010SA01)
- compatible = "renesas,condor", "renesas,r8a77980"
- - Draak (RTP0RC77995SEB0010S)
- compatible = "renesas,draak", "renesas,r8a77995"
- - Eagle (RTP0RC77970SEB0010S)
- compatible = "renesas,eagle", "renesas,r8a77970"
- - Ebisu (RTP0RC77990SEB0010S)
- compatible = "renesas,ebisu", "renesas,r8a77990"
- - Genmai (RTK772100BC00000BR)
- compatible = "renesas,genmai", "renesas,r7s72100"
- - GR-Peach (X28A-M01-E/F)
- compatible = "renesas,gr-peach", "renesas,r7s72100"
- - Gose (RTP0RC7793SEB00010S)
- compatible = "renesas,gose", "renesas,r8a7793"
- - H3ULCB (R-Car Starter Kit Premier, RTP0RC7795SKBX0010SA00 (H3 ES1.1))
- H3ULCB (R-Car Starter Kit Premier, RTP0RC77951SKBX010SA00 (H3 ES2.0))
- compatible = "renesas,h3ulcb", "renesas,r8a7795"
- - Henninger
- compatible = "renesas,henninger", "renesas,r8a7791"
- - iWave Systems RZ/G1C Single Board Computer (iW-RainboW-G23S)
- compatible = "iwave,g23s", "renesas,r8a77470"
- - iWave Systems RZ/G1E SODIMM SOM Development Platform (iW-RainboW-G22D)
- compatible = "iwave,g22d", "iwave,g22m", "renesas,r8a7745"
- - iWave Systems RZ/G1E SODIMM System On Module (iW-RainboW-G22M-SM)
- compatible = "iwave,g22m", "renesas,r8a7745"
- - iWave Systems RZ/G1M Qseven Development Platform (iW-RainboW-G20D-Qseven)
- compatible = "iwave,g20d", "iwave,g20m", "renesas,r8a7743"
- - iWave Systems RZ/G1M Qseven System On Module (iW-RainboW-G20M-Qseven)
- compatible = "iwave,g20m", "renesas,r8a7743"
- - iWave Systems RZ/G1N Qseven Development Platform (iW-RainboW-G20D-Qseven)
- compatible = "iwave,g20d", "iwave,g20m", "renesas,r8a7744"
- - iWave Systems RZ/G1N Qseven System On Module (iW-RainboW-G20M-Qseven)
- compatible = "iwave,g20m", "renesas,r8a7744"
- - Kingfisher (SBEV-RCAR-KF-M03)
- compatible = "shimafuji,kingfisher"
- - Koelsch (RTP0RC7791SEB00010S)
- compatible = "renesas,koelsch", "renesas,r8a7791"
- - Kyoto Microcomputer Co. KZM-A9-Dual
- compatible = "renesas,kzm9d", "renesas,emev2"
- - Kyoto Microcomputer Co. KZM-A9-GT
- compatible = "renesas,kzm9g", "renesas,sh73a0"
- - Lager (RTP0RC7790SEB00010S)
- compatible = "renesas,lager", "renesas,r8a7790"
- - M3ULCB (R-Car Starter Kit Pro, RTP0RC7796SKBX0010SA09 (M3 ES1.0))
- compatible = "renesas,m3ulcb", "renesas,r8a7796"
- - M3NULCB (R-Car Starter Kit Pro, RTP0RC77965SKBX010SA00 (M3-N ES1.1))
- compatible = "renesas,m3nulcb", "renesas,r8a77965"
- - Marzen (R0P7779A00010S)
- compatible = "renesas,marzen", "renesas,r8a7779"
- - Porter (M2-LCDP)
- compatible = "renesas,porter", "renesas,r8a7791"
- - RSKRZA1 (YR0K77210C000BE)
- compatible = "renesas,rskrza1", "renesas,r7s72100"
- - RZN1D-DB (RZ/N1D Demo Board for the RZ/N1D 400 pins package)
- compatible = "renesas,rzn1d400-db", "renesas,r9a06g032"
- - Salvator-X (RTP0RC7795SIPB0010S)
- compatible = "renesas,salvator-x", "renesas,r8a7795"
- - Salvator-X (RTP0RC7796SIPB0011S)
- compatible = "renesas,salvator-x", "renesas,r8a7796"
- - Salvator-X (RTP0RC7796SIPB0011S (M3-N))
- compatible = "renesas,salvator-x", "renesas,r8a77965"
- - Salvator-XS (Salvator-X 2nd version, RTP0RC7795SIPB0012S)
- compatible = "renesas,salvator-xs", "renesas,r8a7795"
- - Salvator-XS (Salvator-X 2nd version, RTP0RC7796SIPB0012S)
- compatible = "renesas,salvator-xs", "renesas,r8a7796"
- - Salvator-XS (Salvator-X 2nd version, RTP0RC77965SIPB012S)
- compatible = "renesas,salvator-xs", "renesas,r8a77965"
- - SILK (RTP0RC7794LCB00011S)
- compatible = "renesas,silk", "renesas,r8a7794"
- - SK-RZG1E (YR8A77450S000BE)
- compatible = "renesas,sk-rzg1e", "renesas,r8a7745"
- - SK-RZG1M (YR8A77430S000BE)
- compatible = "renesas,sk-rzg1m", "renesas,r8a7743"
- - Stout (ADAS Starterkit, Y-R-CAR-ADAS-SKH2-BOARD)
- compatible = "renesas,stout", "renesas,r8a7790"
- - V3HSK (Y-ASK-RCAR-V3H-WS10)
- compatible = "renesas,v3hsk", "renesas,r8a77980"
- - V3MSK (Y-ASK-RCAR-V3M-WS10)
- compatible = "renesas,v3msk", "renesas,r8a77970"
- - Wheat (RTP0RC7792ASKB0000JE)
- compatible = "renesas,wheat", "renesas,r8a7792"
diff --git a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
new file mode 100644
index 000000000000..aae53fc3cb1e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/milbeaut.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Milbeaut platforms device tree bindings
+
+maintainers:
+ - Taichi Sugaya <sugaya.taichi@socionext.com>
+ - Takao Orito <orito.takao@socionext.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - socionext,milbeaut-m10v-evb
+ - const: socionext,sc2000a
+...
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt
index 1b2ab1ff5587..46652bf65147 100644
--- a/Documentation/devicetree/bindings/arm/sp810.txt
+++ b/Documentation/devicetree/bindings/arm/sp810.txt
@@ -4,7 +4,7 @@ SP810 System Controller
Required properties:
- compatible: standard compatible string for a Primecell peripheral,
- see Documentation/devicetree/bindings/arm/primecell.txt
+ see Documentation/devicetree/bindings/arm/primecell.yaml
for more details
should be: "arm,sp810", "arm,primecell"
diff --git a/Documentation/devicetree/bindings/arm/technologic.txt b/Documentation/devicetree/bindings/arm/technologic.txt
deleted file mode 100644
index f1cedc00dcab..000000000000
--- a/Documentation/devicetree/bindings/arm/technologic.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Technologic Systems Platforms Device Tree Bindings
---------------------------------------------------
-
-TS-4600 is a System-on-Module based on the Freescale i.MX28 System-on-Chip.
-It can be mounted on a carrier board providing additional peripheral connectors.
-Required root node properties:
- - compatible = "technologic,imx28-ts4600", "fsl,imx28"
-
-TS-4800 board
-Required root node properties:
- - compatible = "technologic,imx51-ts4800", "fsl,imx51";
-
-TS-4900 is a System-on-Module based on the Freescale i.MX6 System-on-Chip.
-It can be mounted on a carrier board providing additional peripheral connectors.
-Required root node properties:
- - compatible = "technologic,imx6dl-ts4900", "fsl,imx6dl"
- - compatible = "technologic,imx6q-ts4900", "fsl,imx6q"
-
-TS-7970 is a System-on-Module based on the Freescale i.MX6 System-on-Chip.
-It can be mounted on a carrier board providing additional peripheral connectors.
-Required root node properties:
- - compatible = "technologic,imx6dl-ts7970", "fsl,imx6dl"
- - compatible = "technologic,imx6q-ts7970", "fsl,imx6q"
diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
index fbcde8a7e067..60b38eb5c61a 100644
--- a/Documentation/devicetree/bindings/arm/tegra.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra.yaml
@@ -87,9 +87,11 @@ properties:
- const: nvidia,tegra124
- items:
- enum:
+ - nvidia,darcy
- nvidia,p2371-0000
- nvidia,p2371-2180
- nvidia,p2571
+ - nvidia,p2894-0050-a08
- const: nvidia,tegra210
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt
index de9eb0486630..b0d80c0fb265 100644
--- a/Documentation/devicetree/bindings/arm/topology.txt
+++ b/Documentation/devicetree/bindings/arm/topology.txt
@@ -472,4 +472,4 @@ cpus {
===============================================================================
[1] ARM Linux kernel documentation
- Documentation/devicetree/bindings/arm/cpus.txt
+ Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/bus/imx-weim.txt b/Documentation/devicetree/bindings/bus/imx-weim.txt
index 683eaf3aed79..dda7d6d66479 100644
--- a/Documentation/devicetree/bindings/bus/imx-weim.txt
+++ b/Documentation/devicetree/bindings/bus/imx-weim.txt
@@ -47,9 +47,9 @@ Optional properties:
Timing property for child nodes. It is mandatory, not optional.
- fsl,weim-cs-timing: The timing array, contains timing values for the
- child node. We can get the CS index from the child
- node's "reg" property. The number of registers depends
- on the selected chip.
+ child node. We get the CS indexes from the address
+ ranges in the child node's "reg" property.
+ The number of registers depends on the selected chip:
For i.MX1, i.MX21 ("fsl,imx1-weim") there are two
registers: CSxU, CSxL.
For i.MX25, i.MX27, i.MX31 and i.MX35 ("fsl,imx27-weim")
@@ -80,3 +80,29 @@ Example for an imx6q-sabreauto board, the NOR flash connected to the WEIM:
0x0000c000 0x1404a38e 0x00000000>;
};
};
+
+Example for an imx6q-based board, a multi-chipselect device connected to WEIM:
+
+In this case, both chip select 0 and 1 will be configured with the same timing
+array values.
+
+ weim: weim@21b8000 {
+ compatible = "fsl,imx6q-weim";
+ reg = <0x021b8000 0x4000>;
+ clocks = <&clks 196>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x08000000 0x02000000
+ 1 0 0x0a000000 0x02000000
+ 2 0 0x0c000000 0x02000000
+ 3 0 0x0e000000 0x02000000>;
+ fsl,weim-cs-gpr = <&gpr>;
+
+ acme@0 {
+ compatible = "acme,whatever";
+ reg = <0 0 0x100>, <0 0x400000 0x800>,
+ <1 0x400000 0x800>;
+ fsl,weim-cs-timing = <0x024400b1 0x00001010 0x20081100
+ 0x00000000 0xa0000240 0x00000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
index af376a01f2b7..23b52dc02266 100644
--- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
@@ -18,4 +18,4 @@ Required Properties:
Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume.
-All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>.
+All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt
index dff236f524a7..958e0ad78c52 100644
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt
+++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt
@@ -8,10 +8,11 @@ the fast CPU cluster. It consists of a free-running voltage controlled
oscillator connected to the CPU voltage rail (VDD_CPU), and a closed loop
control module that will automatically adjust the VDD_CPU voltage by
communicating with an off-chip PMIC either via an I2C bus or via PWM signals.
-Currently only the I2C mode is supported by these bindings.
Required properties:
-- compatible : should be "nvidia,tegra124-dfll"
+- compatible : should be one of:
+ - "nvidia,tegra124-dfll": for Tegra124
+ - "nvidia,tegra210-dfll": for Tegra210
- reg : Defines the following set of registers, in the order listed:
- registers for the DFLL control logic.
- registers for the I2C output logic.
@@ -45,10 +46,31 @@ Required properties for the control loop parameters:
Optional properties for the control loop parameters:
- nvidia,cg-scale: Boolean value, see the field DFLL_PARAMS_CG_SCALE in the TRM.
+Optional properties for mode selection:
+- nvidia,pwm-to-pmic: Use PWM to control regulator rather then I2C.
+
Required properties for I2C mode:
- nvidia,i2c-fs-rate: I2C transfer rate, if using full speed mode.
-Example:
+Required properties for PWM mode:
+- nvidia,pwm-period-nanoseconds: period of PWM square wave in nanoseconds.
+- nvidia,pwm-tristate-microvolts: Regulator voltage in micro volts when PWM
+ control is disabled and the PWM output is tristated. Note that this voltage is
+ configured in hardware, typically via a resistor divider.
+- nvidia,pwm-min-microvolts: Regulator voltage in micro volts when PWM control
+ is enabled and PWM output is low. Hence, this is the minimum output voltage
+ that the regulator supports when PWM control is enabled.
+- nvidia,pwm-voltage-step-microvolts: Voltage increase in micro volts
+ corresponding to a 1/33th increase in duty cycle. Eg the voltage for 2/33th
+ duty cycle would be: nvidia,pwm-min-microvolts +
+ nvidia,pwm-voltage-step-microvolts * 2.
+- pinctrl-0: I/O pad configuration when PWM control is enabled.
+- pinctrl-1: I/O pad configuration when PWM control is disabled.
+- pinctrl-names: must include the following entries:
+ - dvfs_pwm_enable: I/O pad configuration when PWM control is enabled.
+ - dvfs_pwm_disable: I/O pad configuration when PWM control is disabled.
+
+Example for I2C:
clock@70110000 {
compatible = "nvidia,tegra124-dfll";
@@ -76,3 +98,58 @@ clock@70110000 {
nvidia,i2c-fs-rate = <400000>;
};
+
+Example for PWM:
+
+clock@70110000 {
+ compatible = "nvidia,tegra124-dfll";
+ reg = <0 0x70110000 0 0x100>, /* DFLL control */
+ <0 0x70110000 0 0x100>, /* I2C output control */
+ <0 0x70110100 0 0x100>, /* Integrated I2C controller */
+ <0 0x70110200 0 0x100>; /* Look-up table RAM */
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_DFLL_SOC>,
+ <&tegra_car TEGRA210_CLK_DFLL_REF>,
+ <&tegra_car TEGRA124_CLK_I2C5>;;
+ clock-names = "soc", "ref", "i2c";
+ resets = <&tegra_car TEGRA124_RST_DFLL_DVCO>;
+ reset-names = "dvco";
+ #clock-cells = <0>;
+ clock-output-names = "dfllCPU_out";
+
+ nvidia,sample-rate = <25000>;
+ nvidia,droop-ctrl = <0x00000f00>;
+ nvidia,force-mode = <1>;
+ nvidia,cf = <6>;
+ nvidia,ci = <0>;
+ nvidia,cg = <2>;
+
+ nvidia,pwm-min-microvolts = <708000>; /* 708mV */
+ nvidia,pwm-period-nanoseconds = <2500>; /* 2.5us */
+ nvidia,pwm-to-pmic;
+ nvidia,pwm-tristate-microvolts = <1000000>;
+ nvidia,pwm-voltage-step-microvolts = <19200>; /* 19.2mV */
+
+ pinctrl-names = "dvfs_pwm_enable", "dvfs_pwm_disable";
+ pinctrl-0 = <&dvfs_pwm_active_state>;
+ pinctrl-1 = <&dvfs_pwm_inactive_state>;
+};
+
+/* pinmux nodes added for completeness. Binding doc can be found in:
+ * Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt
+ */
+
+pinmux: pinmux@700008d4 {
+ dvfs_pwm_active_state: dvfs_pwm_active {
+ dvfs_pwm_pbb1 {
+ nvidia,pins = "dvfs_pwm_pbb1";
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ };
+ };
+ dvfs_pwm_inactive_state: dvfs_pwm_inactive {
+ dvfs_pwm_pbb1 {
+ nvidia,pins = "dvfs_pwm_pbb1";
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt
index b1669fbfb740..03196d5ea515 100644
--- a/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt
+++ b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt
@@ -9,11 +9,9 @@ Required properties:
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- cpu_g: Clock mux for the fast CPU cluster.
- - cpu_lp: Clock mux for the low-power CPU cluster.
- pll_x: Fast PLL clocksource.
- pll_p: Auxiliary PLL used during fast PLL rate changes.
- dfll: Fast DFLL clocksource that also automatically scales CPU voltage.
-- vdd-cpu-supply: Regulator for CPU voltage
Optional properties:
- clock-latency: Specify the possible maximum transition latency for clock,
@@ -31,13 +29,11 @@ cpus {
reg = <0>;
clocks = <&tegra_car TEGRA124_CLK_CCLK_G>,
- <&tegra_car TEGRA124_CLK_CCLK_LP>,
<&tegra_car TEGRA124_CLK_PLL_X>,
<&tegra_car TEGRA124_CLK_PLL_P>,
<&dfll>;
- clock-names = "cpu_g", "cpu_lp", "pll_x", "pll_p", "dfll";
+ clock-names = "cpu_g", "pll_x", "pll_p", "dfll";
clock-latency = <300000>;
- vdd-cpu-supply: <&vdd_cpu>;
};
<...>
diff --git a/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt b/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt
new file mode 100644
index 000000000000..7ec9a5a7727a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt
@@ -0,0 +1,19 @@
+Samsung SoC SlimSSS (Slim Security SubSystem) module
+
+The SlimSSS module in Exynos5433 SoC supports the following:
+-- Feeder (FeedCtrl)
+-- Advanced Encryption Standard (AES) with ECB,CBC,CTR,XTS and (CBC/XTS)/CTS
+-- SHA-1/SHA-256 and (SHA-1/SHA-256)/HMAC
+
+Required properties:
+
+- compatible : Should contain entry for slimSSS version:
+ - "samsung,exynos5433-slim-sss" for Exynos5433 SoC.
+- reg : Offset and length of the register set for the module
+- interrupts : interrupt specifiers of SlimSSS module interrupts (one feed
+ control interrupt).
+
+- clocks : list of clock phandle and specifier pairs for all clocks listed in
+ clock-names property.
+- clock-names : list of device clock input names; should contain "pclk" and
+ "aclk" for slim-sss in Exynos5433.
diff --git a/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt b/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt
new file mode 100644
index 000000000000..aaa6c24c8e70
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt
@@ -0,0 +1,33 @@
+Meson specific Simple Framebuffer bindings
+
+This binding documents meson specific extensions to the simple-framebuffer
+bindings. The meson simplefb u-boot code relies on the devicetree containing
+pre-populated simplefb nodes.
+
+These extensions are intended so that u-boot can select the right node based
+on which pipeline is being used. As such they are solely intended for
+firmware / bootloader use, and the OS should ignore them.
+
+Required properties:
+- compatible: "amlogic,simple-framebuffer", "simple-framebuffer"
+- amlogic,pipeline, one of:
+ "vpu-cvbs"
+ "vpu-hdmi"
+
+Example:
+
+chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ simplefb_hdmi: framebuffer-hdmi {
+ compatible = "amlogic,simple-framebuffer",
+ "simple-framebuffer";
+ amlogic,pipeline = "vpu-hdmi";
+ clocks = <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_CLK81>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+ power-domains = <&pwrc_vpu>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt
index ef89ab46b2c9..572fa2773ec4 100644
--- a/Documentation/devicetree/bindings/display/arm,pl11x.txt
+++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt
@@ -1,6 +1,6 @@
* ARM PrimeCell Color LCD Controller PL110/PL111
-See also Documentation/devicetree/bindings/arm/primecell.txt
+See also Documentation/devicetree/bindings/arm/primecell.yaml
Required properties:
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
index f5725bb6c61c..525a4bfd8634 100644
--- a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
@@ -31,28 +31,7 @@ Required subnodes:
- one subnode per DSI device connected on the DSI bus. Each DSI device should
contain a reg property encoding its virtual channel.
-Cadence DPHY
-============
-
-Cadence DPHY block.
-
-Required properties:
-- compatible: should be set to "cdns,dphy".
-- reg: physical base address and length of the DPHY registers.
-- clocks: DPHY reference clocks.
-- clock-names: must contain "psm" and "pll_ref".
-- #phy-cells: must be set to 0.
-
-
Example:
- dphy0: dphy@fd0e0000{
- compatible = "cdns,dphy";
- reg = <0x0 0xfd0e0000 0x0 0x1000>;
- clocks = <&psm_clk>, <&pll_ref_clk>;
- clock-names = "psm", "pll_ref";
- #phy-cells = <0>;
- };
-
dsi0: dsi@fd0c0000 {
compatible = "cdns,dsi";
reg = <0x0 0xfd0c0000 0x0 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index ac8df3b871f9..f8759145ce1a 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -27,7 +27,6 @@ Example:
reg = <0x04300000 0x20000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 80 0>;
- interrupt-names = "kgsl_3d0_irq";
clock-names =
"core",
"iface",
diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt
new file mode 100644
index 000000000000..936fbdf12815
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt
@@ -0,0 +1,27 @@
+* PTN5150 CC (Configuration Channel) Logic device
+
+PTN5150 is a small thin low power CC logic chip supporting the USB Type-C
+connector application with CC control logic detection and indication functions.
+It is interfaced to the host controller using an I2C interface.
+
+Required properties:
+- compatible: should be "nxp,ptn5150"
+- reg: specifies the I2C slave address of the device
+- int-gpio: should contain a phandle and GPIO specifier for the GPIO pin
+ connected to the PTN5150's INTB pin.
+- vbus-gpio: should contain a phandle and GPIO specifier for the GPIO pin which
+ is used to control VBUS.
+- pinctrl-names : a pinctrl state named "default" must be defined.
+- pinctrl-0 : phandle referencing pin configuration of interrupt and vbus
+ control.
+
+Example:
+ ptn5150@1d {
+ compatible = "nxp,ptn5150";
+ reg = <0x1d>;
+ int-gpio = <&msmgpio 78 GPIO_ACTIVE_HIGH>;
+ vbus-gpio = <&msmgpio 148 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ptn5150_default>;
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
index 0c10802c8327..ff380dadb5f9 100644
--- a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
+++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
@@ -8,7 +8,6 @@ which can create the interprocessor communication (IPC) between the CPU
and BPMP.
Required properties:
-- name : Should be bpmp
- compatible
Array of strings
One of:
diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra210-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra210-bpmp.txt
new file mode 100644
index 000000000000..68d814e8c09d
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra210-bpmp.txt
@@ -0,0 +1,35 @@
+NVIDIA Tegra210 Boot and Power Management Processor (BPMP)
+
+The Boot and Power Management Processor (BPMP) is a co-processor found
+in Tegra210 SoC. It is designed to handle the early stages of the boot
+process as well as to assisting in entering deep low power state
+(suspend to ram), and also offloading DRAM memory clock scaling on
+some platforms. The binding document defines the resources that would
+be used by the BPMP T210 firmware driver, which can create the
+interprocessor communication (IPC) between the CPU and BPMP.
+
+Required properties:
+- compatible
+ Array of strings
+ One of:
+ - "nvidia,tegra210-bpmp"
+- reg: physical base address and length for HW synchornization primitives
+ 1) base address and length to Tegra 'atomics' hardware
+ 2) base address and length to Tegra 'semaphore' hardware
+- interrupts: specifies the interrupt number for receiving messages ("rx")
+ and for triggering messages ("tx")
+
+Optional properties:
+- #clock-cells : Should be 1 for platforms where DRAM clock control is
+ offloaded to bpmp.
+
+Example:
+
+bpmp@70016000 {
+ compatible = "nvidia,tegra210-bpmp";
+ reg = <0x0 0x70016000 0x0 0x2000
+ 0x0 0x60001000 0x0 0x1000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "tx", "rx";
+};
diff --git a/Documentation/devicetree/bindings/gnss/gnss.txt b/Documentation/devicetree/bindings/gnss/gnss.txt
index f1e4a2ff47c5..f547bd4549fe 100644
--- a/Documentation/devicetree/bindings/gnss/gnss.txt
+++ b/Documentation/devicetree/bindings/gnss/gnss.txt
@@ -17,6 +17,7 @@ Required properties:
represents
Optional properties:
+- lna-supply : Separate supply for an LNA
- enable-gpios : GPIO used to enable the device
- timepulse-gpios : Time pulse GPIO
diff --git a/Documentation/devicetree/bindings/gnss/mediatek.txt b/Documentation/devicetree/bindings/gnss/mediatek.txt
new file mode 100644
index 000000000000..80cb802813c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/gnss/mediatek.txt
@@ -0,0 +1,35 @@
+Mediatek-based GNSS Receiver DT binding
+
+Mediatek chipsets are used in GNSS-receiver modules produced by several
+vendors and can use a UART interface.
+
+Please see Documentation/devicetree/bindings/gnss/gnss.txt for generic
+properties.
+
+Required properties:
+
+- compatible : Must be
+
+ "globaltop,pa6h"
+
+- vcc-supply : Main voltage regulator (pin name: VCC)
+
+Optional properties:
+
+- current-speed : Default UART baud rate
+- gnss-fix-gpios : GPIO used to determine device position fix state
+ (pin name: FIX, 3D_FIX)
+- reset-gpios : GPIO used to reset the device (pin name: RESET, NRESET)
+- timepulse-gpios : Time pulse GPIO (pin name: PPS1, 1PPS)
+- vbackup-supply : Backup voltage regulator (pin name: VBAT, VBACKUP)
+
+Example:
+
+serial@1234 {
+ compatible = "ns16550a";
+
+ gnss {
+ compatible = "globaltop,pa6h";
+ vcc-supply = <&vcc_3v3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gnss/sirfstar.txt b/Documentation/devicetree/bindings/gnss/sirfstar.txt
index 648d183cdb77..f4252b6b660b 100644
--- a/Documentation/devicetree/bindings/gnss/sirfstar.txt
+++ b/Documentation/devicetree/bindings/gnss/sirfstar.txt
@@ -12,6 +12,7 @@ Required properties:
"fastrax,uc430"
"linx,r4"
+ "wi2wi,w2sg0004"
"wi2wi,w2sg0008i"
"wi2wi,w2sg0084i"
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 38ca2201e8ae..2e097b57f170 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -14,8 +14,6 @@ Required properties:
"marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
SoCs (either from AP or CP), see
- Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
- and
Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
for specific details about the offset property.
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt
index 3f128e4f95c6..ae63f09fda7d 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt
@@ -13,6 +13,8 @@ Required properties:
+ allwinner,sun8i-h3-mali
+ allwinner,sun50i-a64-mali
+ allwinner,sun50i-h5-mali
+ + amlogic,meson8-mali
+ + amlogic,meson8b-mali
+ amlogic,meson-gxbb-mali
+ amlogic,meson-gxl-mali
+ rockchip,rk3036-mali
@@ -82,6 +84,10 @@ to specify one more vendor-specific compatible, among:
Required properties:
* resets: phandle to the reset line for the GPU
+ - amlogic,meson8-mali and amlogic,meson8b-mali
+ Required properties:
+ * resets: phandle to the reset line for the GPU
+
- Rockchip variants:
Required properties:
* resets: phandle to the reset line for the GPU
diff --git a/Documentation/devicetree/bindings/hwmon/ad741x.txt b/Documentation/devicetree/bindings/hwmon/ad741x.txt
new file mode 100644
index 000000000000..9102152c8410
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ad741x.txt
@@ -0,0 +1,15 @@
+* AD7416/AD7417/AD7418 Temperature Sensor Device Tree Bindings
+
+Required properties:
+- compatible: one of
+ "adi,ad7416"
+ "adi,ad7417"
+ "adi,ad7418"
+- reg: I2C address
+
+Example:
+
+hwmon@28 {
+ compatible = "adi,ad7418";
+ reg = <0x28>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/dps650ab.txt b/Documentation/devicetree/bindings/hwmon/dps650ab.txt
new file mode 100644
index 000000000000..76780e795899
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/dps650ab.txt
@@ -0,0 +1,11 @@
+Bindings for Delta Electronics DPS-650-AB power supply
+
+Required properties:
+- compatible : "delta,dps650ab"
+- reg : I2C address, one of 0x58, 0x59.
+
+Example:
+ dps650ab@58 {
+ compatible = "delta,dps650ab";
+ reg = <0x58>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/hih6130.txt b/Documentation/devicetree/bindings/hwmon/hih6130.txt
new file mode 100644
index 000000000000..2c43837af4c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/hih6130.txt
@@ -0,0 +1,12 @@
+Honeywell Humidicon HIH-6130 humidity/temperature sensor
+--------------------------------------------------------
+
+Requires node properties:
+- compatible : "honeywell,hi6130"
+- reg : the I2C address of the device. This is 0x27.
+
+Example:
+ hih6130@27 {
+ compatible = "honeywell,hih6130";
+ reg = <0x27>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/ina3221.txt b/Documentation/devicetree/bindings/hwmon/ina3221.txt
index a7b25caa2b8e..fa63b6171407 100644
--- a/Documentation/devicetree/bindings/hwmon/ina3221.txt
+++ b/Documentation/devicetree/bindings/hwmon/ina3221.txt
@@ -6,6 +6,16 @@ Texas Instruments INA3221 Device Tree Bindings
- reg: I2C address
Optional properties:
+ - ti,single-shot: This chip has two power modes: single-shot (chip takes one
+ measurement and then shuts itself down) and continuous (
+ chip takes continuous measurements). The continuous mode is
+ more reliable and suitable for hardware monitor type device,
+ but the single-shot mode is more power-friendly and useful
+ for battery-powered device which cares power consumptions
+ while still needs some measurements occasionally.
+ If this property is present, the single-shot mode will be
+ used, instead of the default continuous one for monitoring.
+
= The node contains optional child nodes for three channels =
= Each child node describes the information of input source =
diff --git a/Documentation/devicetree/bindings/hwmon/lm75.txt b/Documentation/devicetree/bindings/hwmon/lm75.txt
new file mode 100644
index 000000000000..12d8cf7cf592
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/lm75.txt
@@ -0,0 +1,37 @@
+*LM75 hwmon sensor.
+
+Required properties:
+- compatible: manufacturer and chip name, one of
+ "adi,adt75",
+ "dallas,ds1775",
+ "dallas,ds75",
+ "dallas,ds7505",
+ "gmt,g751",
+ "national,lm75",
+ "national,lm75a",
+ "national,lm75b",
+ "maxim,max6625",
+ "maxim,max6626",
+ "maxim,max31725",
+ "maxim,max31726",
+ "maxim,mcp980x",
+ "st,stds75",
+ "st,stlm75",
+ "microchip,tcn75",
+ "ti,tmp100",
+ "ti,tmp101",
+ "ti,tmp105",
+ "ti,tmp112",
+ "ti,tmp175",
+ "ti,tmp275",
+ "ti,tmp75",
+ "ti,tmp75c",
+
+- reg: I2C bus address of the device
+
+Example:
+
+sensor@48 {
+ compatible = "st,stlm75";
+ reg = <0x48>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
index c6d533202d3e..49ca5d83ed13 100644
--- a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
+++ b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@ -6,6 +6,9 @@ Required properties:
- cooling-levels : PWM duty cycle values in a range from 0 to 255
which correspond to thermal cooling states
+Optional properties:
+- fan-supply : phandle to the regulator that provides power to the fan
+
Example:
fan0: pwm-fan {
compatible = "pwm-fan";
diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
index 2100e9af379c..e132394375a1 100644
--- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt
+++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
@@ -20,6 +20,10 @@ Optional properties:
- interrupt-names: should contain "INT1" and/or "INT2", the accelerometer's
interrupt line in use.
+ - vdd-supply: phandle to the regulator that provides vdd power to the accelerometer.
+
+ - vddio-supply: phandle to the regulator that provides vddio power to the accelerometer.
+
Example:
mma8453fc@1d {
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
new file mode 100644
index 000000000000..d7b6241ca881
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
@@ -0,0 +1,65 @@
+Analog Devices AD7606 Simultaneous Sampling ADC
+
+Required properties for the AD7606:
+
+- compatible: Must be one of
+ * "adi,ad7605-4"
+ * "adi,ad7606-8"
+ * "adi,ad7606-6"
+ * "adi,ad7606-4"
+- reg: SPI chip select number for the device
+- spi-max-frequency: Max SPI frequency to use
+ see: Documentation/devicetree/bindings/spi/spi-bus.txt
+- spi-cpha: See Documentation/devicetree/bindings/spi/spi-bus.txt
+- avcc-supply: phandle to the Avcc power supply
+- interrupts: IRQ line for the ADC
+ see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+- adi,conversion-start-gpios: must be the device tree identifier of the CONVST pin.
+ This logic input is used to initiate conversions on the analog
+ input channels. As the line is active high, it should be marked
+ GPIO_ACTIVE_HIGH.
+
+Optional properties:
+
+- reset-gpios: must be the device tree identifier of the RESET pin. If specified,
+ it will be asserted during driver probe. As the line is active high,
+ it should be marked GPIO_ACTIVE_HIGH.
+- standby-gpios: must be the device tree identifier of the STBY pin. This pin is used
+ to place the AD7606 into one of two power-down modes, Standby mode or
+ Shutdown mode. As the line is active low, it should be marked
+ GPIO_ACTIVE_LOW.
+- adi,first-data-gpios: must be the device tree identifier of the FRSTDATA pin.
+ The FRSTDATA output indicates when the first channel, V1, is
+ being read back on either the parallel, byte or serial interface.
+ As the line is active high, it should be marked GPIO_ACTIVE_HIGH.
+- adi,range-gpios: must be the device tree identifier of the RANGE pin. The polarity on
+ this pin determines the input range of the analog input channels. If
+ this pin is tied to a logic high, the analog input range is ±10V for
+ all channels. If this pin is tied to a logic low, the analog input range
+ is ±5V for all channels. As the line is active high, it should be marked
+ GPIO_ACTIVE_HIGH.
+- adi,oversampling-ratio-gpios: must be the device tree identifier of the over-sampling
+ mode pins. As the line is active high, it should be marked
+ GPIO_ACTIVE_HIGH.
+
+Example:
+
+ adc@0 {
+ compatible = "adi,ad7606-8";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpol;
+
+ avcc-supply = <&adc_vref>;
+
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-parent = <&gpio>;
+
+ adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
+ adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH
+ &gpio 23 GPIO_ACTIVE_HIGH
+ &gpio 26 GPIO_ACTIVE_HIGH>;
+ standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt b/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt
new file mode 100644
index 000000000000..9f5b88cf680d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt
@@ -0,0 +1,41 @@
+Analog Devices AD7768-1 ADC device driver
+
+Required properties for the AD7768-1:
+
+- compatible: Must be "adi,ad7768-1"
+- reg: SPI chip select number for the device
+- spi-max-frequency: Max SPI frequency to use
+ see: Documentation/devicetree/bindings/spi/spi-bus.txt
+- clocks: phandle to the master clock (mclk)
+ see: Documentation/devicetree/bindings/clock/clock-bindings.txt
+- clock-names: Must be "mclk".
+- interrupts: IRQ line for the ADC
+ see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+- vref-supply: vref supply can be used as reference for conversion
+- adi,sync-in-gpios: must be the device tree identifier of the SYNC-IN pin. Enables
+ synchronization of multiple devices that require simultaneous sampling.
+ A pulse is always required if the configuration is changed in any way, for example
+ if the filter decimation rate changes. As the line is active low, it should
+ be marked GPIO_ACTIVE_LOW.
+
+Optional properties:
+
+ - reset-gpios : GPIO spec for the RESET pin. If specified, it will be asserted during
+ driver probe. As the line is active low, it should be marked GPIO_ACTIVE_LOW.
+
+Example:
+
+ adc@0 {
+ compatible = "adi,ad7768-1";
+ reg = <0>;
+ spi-max-frequency = <2000000>;
+ spi-cpol;
+ spi-cpha;
+ vref-supply = <&adc_vref>;
+ interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+ interrupt-parent = <&gpio>;
+ adi,sync-in-gpios = <&gpio 22 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ clocks = <&ad7768_mclk>;
+ clock-names = "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
index 325090e43ce6..75c775954102 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
@@ -23,6 +23,10 @@ Required properties:
- #io-channel-cells: must be 1, see ../iio-bindings.txt
Optional properties:
+- amlogic,hhi-sysctrl: phandle to the syscon which contains the 5th bit
+ of the TSC (temperature sensor coefficient) on
+ Meson8b and Meson8m2 (which used to calibrate the
+ temperature sensor)
- nvmem-cells: phandle to the temperature_calib eFuse cells
- nvmem-cell-names: if present (to enable the temperature sensor
calibration) this must contain "temperature_calib"
diff --git a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt
new file mode 100644
index 000000000000..f01159f20d87
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt
@@ -0,0 +1,48 @@
+* Ingenic JZ47xx ADC controller IIO bindings
+
+Required properties:
+
+- compatible: Should be one of:
+ * ingenic,jz4725b-adc
+ * ingenic,jz4740-adc
+- reg: ADC controller registers location and length.
+- clocks: phandle to the SoC's ADC clock.
+- clock-names: Must be set to "adc".
+- #io-channel-cells: Must be set to <1> to indicate channels are selected
+ by index.
+
+ADC clients must use the format described in iio-bindings.txt, giving
+a phandle and IIO specifier pair ("io-channels") to the ADC controller.
+
+Example:
+
+#include <dt-bindings/iio/adc/ingenic,adc.h>
+
+adc: adc@10070000 {
+ compatible = "ingenic,jz4740-adc";
+ #io-channel-cells = <1>;
+
+ reg = <0x10070000 0x30>;
+
+ clocks = <&cgu JZ4740_CLK_ADC>;
+ clock-names = "adc";
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+};
+
+adc-keys {
+ ...
+ compatible = "adc-keys";
+ io-channels = <&adc INGENIC_ADC_AUX>;
+ io-channel-names = "buttons";
+ ...
+};
+
+battery {
+ ...
+ compatible = "ingenic,jz4740-battery";
+ io-channels = <&adc INGENIC_ADC_BATTERY>;
+ io-channel-names = "battery";
+ ...
+};
diff --git a/Documentation/devicetree/bindings/staging/iio/adc/lpc32xx-adc.txt b/Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt
index b3629d3a9adf..b3629d3a9adf 100644
--- a/Documentation/devicetree/bindings/staging/iio/adc/lpc32xx-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/nuvoton,npcm-adc.txt b/Documentation/devicetree/bindings/iio/adc/nuvoton,npcm-adc.txt
new file mode 100644
index 000000000000..eb939fe77836
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/nuvoton,npcm-adc.txt
@@ -0,0 +1,24 @@
+Nuvoton NPCM Analog to Digital Converter (ADC)
+
+The NPCM ADC is a 10-bit converter for eight channel inputs.
+
+Required properties:
+- compatible: "nuvoton,npcm750-adc" for the NPCM7XX BMC.
+- reg: specifies physical base address and size of the registers.
+- interrupts: Contain the ADC interrupt with flags for falling edge.
+
+Optional properties:
+- clocks: phandle of ADC reference clock, in case the clock is not
+ added the ADC will use the default ADC sample rate.
+- vref-supply: The regulator supply ADC reference voltage, in case the
+ vref-supply is not added the ADC will use internal voltage
+ reference.
+
+Example:
+
+adc: adc@f000c000 {
+ compatible = "nuvoton,npcm750-adc";
+ reg = <0xf000c000 0x8>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_ADC>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
index a10c1f89037d..e1fe02f3e3e9 100644
--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
@@ -11,11 +11,13 @@ New driver handles the following
Required properties:
- compatible: Must be "samsung,exynos-adc-v1"
- for exynos4412/5250 controllers.
+ for Exynos5250 controllers.
Must be "samsung,exynos-adc-v2" for
future controllers.
Must be "samsung,exynos3250-adc" for
controllers compatible with ADC of Exynos3250.
+ Must be "samsung,exynos4212-adc" for
+ controllers compatible with ADC of Exynos4212 and Exynos4412.
Must be "samsung,exynos7-adc" for
the ADC in Exynos7 and compatibles
Must be "samsung,s3c2410-adc" for
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-ads124s08.txt b/Documentation/devicetree/bindings/iio/adc/ti-ads124s08.txt
new file mode 100644
index 000000000000..ecf807bb32f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti-ads124s08.txt
@@ -0,0 +1,25 @@
+* Texas Instruments' ads124s08 and ads124s06 ADC chip
+
+Required properties:
+ - compatible :
+ "ti,ads124s08"
+ "ti,ads124s06"
+ - reg : spi chip select number for the device
+
+Recommended properties:
+ - spi-max-frequency : Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+ - spi-cpha : Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Optional properties:
+ - reset-gpios : GPIO pin used to reset the device.
+
+Example:
+adc@0 {
+ compatible = "ti,ads124s08";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpha;
+ reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/bme680.txt b/Documentation/devicetree/bindings/iio/chemical/bme680.txt
new file mode 100644
index 000000000000..7f3827cfb2ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/bme680.txt
@@ -0,0 +1,11 @@
+Bosch Sensortec BME680 pressure/temperature/humidity/voc sensors
+
+Required properties:
+- compatible: must be "bosch,bme680"
+
+Example:
+
+bme680@76 {
+ compatible = "bosch,bme680";
+ reg = <0x76>;
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt
new file mode 100644
index 000000000000..7b5f06f324c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt
@@ -0,0 +1,20 @@
+* Plantower PMS7003 particulate matter sensor
+
+Required properties:
+- compatible: must be "plantower,pms7003"
+- vcc-supply: phandle to the regulator that provides power to the sensor
+
+Optional properties:
+- plantower,set-gpios: phandle to the GPIO connected to the SET line
+- reset-gpios: phandle to the GPIO connected to the RESET line
+
+Refer to serial/slave-device.txt for generic serial attached device bindings.
+
+Example:
+
+&uart0 {
+ air-pollution-sensor {
+ compatible = "plantower,pms7003";
+ vcc-supply = <&reg_vcc5v0>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/sensirion,sgp30.txt b/Documentation/devicetree/bindings/iio/chemical/sensirion,sgp30.txt
new file mode 100644
index 000000000000..5844ed58173c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/sensirion,sgp30.txt
@@ -0,0 +1,15 @@
+* Sensirion SGP30/SGPC3 multi-pixel Gas Sensor
+
+Required properties:
+
+ - compatible: must be one of
+ "sensirion,sgp30"
+ "sensirion,sgpc3"
+ - reg: the I2C address of the sensor
+
+Example:
+
+gas@58 {
+ compatible = "sensirion,sgp30";
+ reg = <0x58>;
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.txt b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.txt
new file mode 100644
index 000000000000..6eee2709b5b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.txt
@@ -0,0 +1,12 @@
+* Sensirion SPS30 particulate matter sensor
+
+Required properties:
+- compatible: must be "sensirion,sps30"
+- reg: the I2C address of the sensor
+
+Example:
+
+sps30@69 {
+ compatible = "sensirion,sps30";
+ reg = <0x69>;
+};
diff --git a/Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt b/Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
new file mode 100644
index 000000000000..639c94ed83e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
@@ -0,0 +1,28 @@
+* Texas Instruments Dual, 12-Bit Serial Input Digital-to-Analog Converter
+
+The DAC7612 is a dual, 12-bit digital-to-analog converter (DAC) with guaranteed
+12-bit monotonicity performance over the industrial temperature range.
+Is is programmable through an SPI interface.
+
+The internal DACs are loaded when the LOADDACS pin is pulled down.
+
+http://www.ti.com/lit/ds/sbas106/sbas106.pdf
+
+Required Properties:
+- compatible: Should be one of:
+ "ti,dac7612"
+ "ti,dac7612u"
+ "ti,dac7612ub"
+- reg: Definition as per Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Optional Properties:
+- ti,loaddacs-gpios: GPIO descriptor for the LOADDACS pin.
+- spi-*: Definition as per Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+ dac@1 {
+ compatible = "ti,dac7612";
+ reg = <0x1>;
+ ti,loaddacs-gpios = <&msmgpio 25 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/impedance-analyzer/ad5933.txt b/Documentation/devicetree/bindings/iio/impedance-analyzer/ad5933.txt
new file mode 100644
index 000000000000..5ff38728ff91
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/impedance-analyzer/ad5933.txt
@@ -0,0 +1,26 @@
+Analog Devices AD5933/AD5934 Impedance Converter, Network Analyzer
+
+https://www.analog.com/media/en/technical-documentation/data-sheets/AD5933.pdf
+https://www.analog.com/media/en/technical-documentation/data-sheets/AD5934.pdf
+
+Required properties:
+ - compatible : should be one of
+ "adi,ad5933"
+ "adi,ad5934"
+ - reg : the I2C address.
+ - vdd-supply : The regulator supply for DVDD, AVDD1 and AVDD2 when they
+ are connected together.
+
+Optional properties:
+- clocks : external clock reference.
+- clock-names : must be "mclk" if clocks is set.
+
+Example for a I2C device node:
+
+ impedance-analyzer@0d {
+ compatible = "adi,adxl345";
+ reg = <0x0d>;
+ vdd-supply = <&vdd_supply>;
+ clocks = <&ref_clk>;
+ clock-names = "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/iio/imu/bmi160.txt b/Documentation/devicetree/bindings/iio/imu/bmi160.txt
index 0c1c105fb503..900c169de00f 100644
--- a/Documentation/devicetree/bindings/iio/imu/bmi160.txt
+++ b/Documentation/devicetree/bindings/iio/imu/bmi160.txt
@@ -9,9 +9,11 @@ Required properties:
- spi-max-frequency : set maximum clock frequency (only for SPI)
Optional properties:
- - interrupts : interrupt mapping for IRQ, must be IRQ_TYPE_LEVEL_LOW
+ - interrupts : interrupt mapping for IRQ
- interrupt-names : set to "INT1" if INT1 pin should be used as interrupt
input, set to "INT2" if INT2 pin should be used instead
+ - drive-open-drain : set if the specified interrupt pin should be configured as
+ open drain. If not set, defaults to push-pull.
Examples:
@@ -20,7 +22,7 @@ bmi160@68 {
reg = <0x68>;
interrupt-parent = <&gpio4>;
- interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <12 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "INT1";
};
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index 6ab9a9d196b0..268bf7568e19 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -11,6 +11,7 @@ Required properties:
"invensense,mpu9250"
"invensense,mpu9255"
"invensense,icm20608"
+ "invensense,icm20602"
- reg : the I2C address of the sensor
- interrupts: interrupt mapping for IRQ. It should be configured with flags
IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
diff --git a/Documentation/devicetree/bindings/iio/light/max44009.txt b/Documentation/devicetree/bindings/iio/light/max44009.txt
new file mode 100644
index 000000000000..4a98848e35c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/max44009.txt
@@ -0,0 +1,24 @@
+* MAX44009 Ambient Light Sensor
+
+Required properties:
+
+- compatible: should be "maxim,max44009"
+- reg: the I2C address of the device (default is <0x4a>)
+
+Optional properties:
+
+- interrupts: interrupt mapping for GPIO IRQ. Should be configured with
+ IRQ_TYPE_EDGE_FALLING.
+
+Refer to interrupt-controller/interrupts.txt for generic interrupt client
+node bindings.
+
+Example:
+
+light-sensor@4a {
+ compatible = "maxim,max44009";
+ reg = <0x4a>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index ddcb95509599..52ee4baec6f0 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -77,3 +77,4 @@ Pressure sensors:
- st,lps22hb-press
- st,lps33hw
- st,lps35hw
+- st,lps22hh
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
index b1163bf97146..aad5e34965eb 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt
@@ -2,7 +2,12 @@
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Required properties:
+- mfd
+ compatible: Should be
+ "ti,am3359-tscadc" for AM335x/AM437x SoCs
+ "ti,am654-tscadc", "ti,am3359-tscadc" for AM654 SoCs
- child "tsc"
+ compatible: Should be "ti,am3359-tsc".
ti,wires: Wires refer to application modes i.e. 4/5/8 wire touchscreen
support on the platform.
ti,x-plate-resistance: X plate resistance
@@ -25,6 +30,9 @@ Required properties:
AIN0 = 0, AIN1 = 1 and so on till AIN7 = 7.
XP = 0, XN = 1, YP = 2, YN = 3.
- child "adc"
+ compatible: Should be
+ "ti,am3359-adc" for AM335x/AM437x SoCs
+ "ti,am654-adc", "ti,am3359-adc" for AM654 SoCs
ti,adc-channels: List of analog inputs available for ADC.
AIN0 = 0, AIN1 = 1 and so on till AIN7 = 7.
diff --git a/Documentation/devicetree/bindings/interconnect/interconnect.txt b/Documentation/devicetree/bindings/interconnect/interconnect.txt
new file mode 100644
index 000000000000..5a3c575b387a
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/interconnect.txt
@@ -0,0 +1,60 @@
+Interconnect Provider Device Tree Bindings
+=========================================
+
+The purpose of this document is to define a common set of generic interconnect
+providers/consumers properties.
+
+
+= interconnect providers =
+
+The interconnect provider binding is intended to represent the interconnect
+controllers in the system. Each provider registers a set of interconnect
+nodes, which expose the interconnect related capabilities of the interconnect
+to consumer drivers. These capabilities can be throughput, latency, priority
+etc. The consumer drivers set constraints on interconnect path (or endpoints)
+depending on the use case. Interconnect providers can also be interconnect
+consumers, such as in the case where two network-on-chip fabrics interface
+directly.
+
+Required properties:
+- compatible : contains the interconnect provider compatible string
+- #interconnect-cells : number of cells in a interconnect specifier needed to
+ encode the interconnect node id
+
+Example:
+
+ snoc: interconnect@580000 {
+ compatible = "qcom,msm8916-snoc";
+ #interconnect-cells = <1>;
+ reg = <0x580000 0x14000>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+ <&rpmcc RPM_SMD_SNOC_A_CLK>;
+ };
+
+
+= interconnect consumers =
+
+The interconnect consumers are device nodes which dynamically express their
+bandwidth requirements along interconnect paths they are connected to. There
+can be multiple interconnect providers on a SoC and the consumer may consume
+multiple paths from different providers depending on use case and the
+components it has to interact with.
+
+Required properties:
+interconnects : Pairs of phandles and interconnect provider specifier to denote
+ the edge source and destination ports of the interconnect path.
+
+Optional properties:
+interconnect-names : List of interconnect path name strings sorted in the same
+ order as the interconnects property. Consumers drivers will use
+ interconnect-names to match interconnect paths with interconnect
+ specifier pairs.
+
+Example:
+
+ sdhci@7864000 {
+ ...
+ interconnects = <&pnoc MASTER_SDCC_1 &bimc SLAVE_EBI_CH0>;
+ interconnect-names = "sdhc-mem";
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
new file mode 100644
index 000000000000..5c4f1d911630
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
@@ -0,0 +1,24 @@
+Qualcomm SDM845 Network-On-Chip interconnect driver binding
+-----------------------------------------------------------
+
+SDM845 interconnect providers support system bandwidth requirements through
+RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
+able to communicate with the BCM through the Resource State Coordinator (RSC)
+associated with each execution environment. Provider nodes must reside within
+an RPMh device node pertaining to their RSC and each provider maps to a single
+RPMh resource.
+
+Required properties :
+- compatible : shall contain only one of the following:
+ "qcom,sdm845-rsc-hlos"
+- #interconnect-cells : should contain 1
+
+Examples:
+
+apps_rsc: rsc {
+ rsc_hlos: interconnect {
+ compatible = "qcom,sdm845-rsc-hlos";
+ #interconnect-cells = <1>;
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index b83bb8249074..a3be5298a5eb 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -78,7 +78,7 @@ Sub-nodes:
PPI affinity can be expressed as a single "ppi-partitions" node,
containing a set of sub-nodes, each with the following property:
- affinity: Should be a list of phandles to CPU nodes (as described in
-Documentation/devicetree/bindings/arm/cpus.txt).
+ Documentation/devicetree/bindings/arm/cpus.yaml).
GICv3 has one or more Interrupt Translation Services (ITS) that are
used to route Message Signalled Interrupts (MSI) to the CPUs.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
index 45790ce6f5b9..582991c426ee 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
@@ -6,8 +6,9 @@ Required properties:
- "fsl,imx8m-irqsteer"
- "fsl,imx-irqsteer"
- reg: Physical base address and size of registers.
-- interrupts: Should contain the parent interrupt line used to multiplex the
- input interrupts.
+- interrupts: Should contain the up to 8 parent interrupt lines used to
+ multiplex the input interrupts. They should be specified sequentially
+ from output 0 to 7.
- clocks: Should contain one clock for entry in clock-names
see Documentation/devicetree/bindings/clock/clock-bindings.txt
- clock-names:
@@ -16,8 +17,8 @@ Required properties:
- #interrupt-cells: Specifies the number of cells needed to encode an
interrupt source. The value must be 1.
- fsl,channel: The output channel that all input IRQs should be steered into.
-- fsl,irq-groups: Number of IRQ groups managed by this controller instance.
- Each group manages 64 input interrupts.
+- fsl,num-irqs: Number of input interrupts of this channel.
+ Should be multiple of 32 input interrupts and up to 512 interrupts.
Example:
@@ -28,7 +29,7 @@ Example:
clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>;
clock-names = "ipg";
fsl,channel = <0>;
- fsl,irq-groups = <1>;
+ fsl,num-irqs = <64>;
interrupt-controller;
#interrupt-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt
new file mode 100644
index 000000000000..a63ed9fcb535
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt
@@ -0,0 +1,24 @@
+Loongson ls1x Interrupt Controller
+
+Required properties:
+
+- compatible : should be "loongson,ls1x-intc". Valid strings are:
+
+- reg : Specifies base physical address and size of the registers.
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The value shall be 2.
+- interrupts : Specifies the CPU interrupt the controller is connected to.
+
+Example:
+
+intc: interrupt-controller@1fd01040 {
+ compatible = "loongson,ls1x-intc";
+ reg = <0x1fd01040 0x18>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
index 33a98eb44949..c5d589108a94 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
@@ -9,6 +9,7 @@ Required properties:
"mediatek,mt8135-sysirq", "mediatek,mt6577-sysirq": for MT8135
"mediatek,mt8127-sysirq", "mediatek,mt6577-sysirq": for MT8127
"mediatek,mt7622-sysirq", "mediatek,mt6577-sysirq": for MT7622
+ "mediatek,mt7623-sysirq", "mediatek,mt6577-sysirq": for MT7623
"mediatek,mt6795-sysirq", "mediatek,mt6577-sysirq": for MT6795
"mediatek,mt6797-sysirq", "mediatek,mt6577-sysirq": for MT6797
"mediatek,mt6765-sysirq", "mediatek,mt6577-sysirq": for MT6765
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index aa1399814a2a..70876ac11367 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -37,6 +37,18 @@ Optional properties for child nodes:
"ide-disk" - LED indicates IDE disk activity (deprecated),
in new implementations use "disk-activity"
"timer" - LED flashes at a fixed, configurable rate
+ "pattern" - LED alters the brightness for the specified duration with one
+ software timer (requires "led-pattern" property)
+
+- led-pattern : Array of integers with default pattern for certain triggers.
+ Each trigger may parse this property differently:
+ - one-shot : two numbers specifying delay on and delay off (in ms),
+ - timer : two numbers specifying delay on and delay off (in ms),
+ - pattern : the pattern is given by a series of tuples, of
+ brightness and duration (in ms). The exact format is
+ described in:
+ Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt
+
- led-max-microamp : Maximum LED supply current in microamperes. This property
can be made mandatory for the board configurations
diff --git a/Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt b/Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt
new file mode 100644
index 000000000000..d3696680bfc8
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt
@@ -0,0 +1,49 @@
+* Pattern format for LED pattern trigger
+
+The pattern is given by a series of tuples, of brightness and duration (ms).
+The LED is expected to traverse the series and each brightness value for the
+specified duration. Duration of 0 means brightness should immediately change to
+new value, and writing malformed pattern deactivates any active one.
+
+1. For gradual dimming, the dimming interval now is set as 50 milliseconds. So
+the tuple with duration less than dimming interval (50ms) is treated as a step
+change of brightness, i.e. the subsequent brightness will be applied without
+adding intervening dimming intervals.
+
+The gradual dimming format of the software pattern values should be:
+"brightness_1 duration_1 brightness_2 duration_2 brightness_3 duration_3 ...".
+For example (using sysfs interface):
+
+echo 0 1000 255 2000 > pattern
+
+It will make the LED go gradually from zero-intensity to max (255) intensity in
+1000 milliseconds, then back to zero intensity in 2000 milliseconds:
+
+LED brightness
+ ^
+255-| / \ / \ /
+ | / \ / \ /
+ | / \ / \ /
+ | / \ / \ /
+ 0-| / \/ \/
+ +---0----1----2----3----4----5----6------------> time (s)
+
+2. To make the LED go instantly from one brightness value to another, we should
+use zero-time lengths (the brightness must be same as the previous tuple's). So
+the format should be: "brightness_1 duration_1 brightness_1 0 brightness_2
+duration_2 brightness_2 0 ...".
+For example (using sysfs interface):
+
+echo 0 1000 0 0 255 2000 255 0 > pattern
+
+It will make the LED stay off for one second, then stay at max brightness for
+two seconds:
+
+LED brightness
+ ^
+255-| +---------+ +---------+
+ | | | | |
+ | | | | |
+ | | | | |
+ 0-| -----+ +----+ +----
+ +---0----1----2----3----4----5----6------------> time (s)
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
index a4b056761eaa..d5f68ac78d15 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
@@ -23,6 +23,20 @@ Required properties:
Optional properties:
- clock-output-names : Should contain name for output clock.
+- rohm,reset-snvs-powered : Transfer BD718x7 to SNVS state at reset.
+
+The BD718x7 supports two different HW states as reset target states. States
+are called as SNVS and READY. At READY state all the PMIC power outputs go
+down and OTP is reload. At the SNVS state all other logic and external
+devices apart from the SNVS power domain are shut off. Please refer to NXP
+i.MX8 documentation for further information regarding SNVS state. When a
+reset is done via SNVS state the PMIC OTP data is not reload. This causes
+power outputs that have been under SW control to stay down when reset has
+switched power state to SNVS. If reset is done via READY state the power
+outputs will be returned to HW control by OTP loading. Thus the reset
+target state is set to READY by default. If SNVS state is used the boot
+crucial regulators must have the regulator-always-on and regulator-boot-on
+properties set in regulator node.
Example:
@@ -43,6 +57,7 @@ Example:
#clock-cells = <0>;
clocks = <&osc 0>;
clock-output-names = "bd71837-32k-out";
+ rohm,reset-snvs-powered;
regulators {
buck1: BUCK1 {
@@ -50,8 +65,10 @@ Example:
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
+ regulator-always-on;
regulator-ramp-delay = <1250>;
};
+ // [...]
};
};
diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt
deleted file mode 100644
index a0c19bd1ce66..000000000000
--- a/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Lantiq XWAY SoC GPHY binding
-============================
-
-This binding describes a software-defined ethernet PHY, provided by the RCU
-module on newer Lantiq XWAY SoCs (xRX200 and newer).
-
--------------------------------------------------------------------------------
-Required properties:
-- compatible : Should be one of
- "lantiq,xrx200a1x-gphy"
- "lantiq,xrx200a2x-gphy"
- "lantiq,xrx300-gphy"
- "lantiq,xrx330-gphy"
-- reg : Addrress of the GPHY FW load address register
-- resets : Must reference the RCU GPHY reset bit
-- reset-names : One entry, value must be "gphy" or optional "gphy2"
-- clocks : A reference to the (PMU) GPHY clock gate
-
-Optional properties:
-- lantiq,gphy-mode : GPHY_MODE_GE (default) or GPHY_MODE_FE as defined in
- <dt-bindings/mips/lantiq_xway_gphy.h>
-
-
--------------------------------------------------------------------------------
-Example for the GPHys on the xRX200 SoCs:
-
-#include <dt-bindings/mips/lantiq_rcu_gphy.h>
- gphy0: gphy@20 {
- compatible = "lantiq,xrx200a2x-gphy";
- reg = <0x20 0x4>;
-
- resets = <&reset0 31 30>, <&reset1 7 7>;
- reset-names = "gphy", "gphy2";
- clocks = <&pmu0 XRX200_PMU_GATE_GPHY>;
- lantiq,gphy-mode = <GPHY_MODE_GE>;
- };
diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
index 7f0822b4beae..58d51f480c9e 100644
--- a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
+++ b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
@@ -26,24 +26,6 @@ Example of the RCU bindings on a xRX200 SoC:
ranges = <0x0 0x203000 0x100>;
big-endian;
- gphy0: gphy@20 {
- compatible = "lantiq,xrx200a2x-gphy";
- reg = <0x20 0x4>;
-
- resets = <&reset0 31 30>, <&reset1 7 7>;
- reset-names = "gphy", "gphy2";
- lantiq,gphy-mode = <GPHY_MODE_GE>;
- };
-
- gphy1: gphy@68 {
- compatible = "lantiq,xrx200a2x-gphy";
- reg = <0x68 0x4>;
-
- resets = <&reset0 29 28>, <&reset1 6 6>;
- reset-names = "gphy", "gphy2";
- lantiq,gphy-mode = <GPHY_MODE_GE>;
- };
-
reset0: reset-controller@10 {
compatible = "lantiq,xrx200-reset";
reg = <0x10 4>, <0x14 4>;
diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
new file mode 100644
index 000000000000..2a1827ab50d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
@@ -0,0 +1,78 @@
+Qualcomm Technologies, Inc. FastRPC Driver
+
+The FastRPC implements an IPC (Inter-Processor Communication)
+mechanism that allows for clients to transparently make remote method
+invocations across DSP and APPS boundaries. This enables developers
+to offload tasks to the DSP and free up the application processor for
+other tasks.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,fastrpc"
+
+- label
+ Usage: required
+ Value type: <string>
+ Definition: should specify the dsp domain name this fastrpc
+ corresponds to. must be one of this: "adsp", "mdsp", "sdsp", "cdsp"
+
+- #address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 1
+
+- #size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0
+
+= COMPUTE BANKS
+Each subnode of the Fastrpc represents compute context banks available
+on the dsp.
+- All Compute context banks MUST contain the following properties:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,fastrpc-compute-cb"
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Context Bank ID.
+
+- qcom,nsessions:
+ Usage: Optional
+ Value type: <u32>
+ Defination: A value indicating how many sessions can share this
+ context bank. Defaults to 1 when this property
+ is not specified.
+
+Example:
+
+adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+ ...
+ smd-edge {
+ label = "lpass";
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,smd-channels = "fastrpcsmd-apps-dsp";
+ label = "adsp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cb@1 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ };
+
+ cb@2 {
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <2>;
+ };
+ ...
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 9201a7d8d7b0..540c65ed9cba 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -15,6 +15,7 @@ Required properties:
"fsl,imx6q-usdhc"
"fsl,imx6sl-usdhc"
"fsl,imx6sx-usdhc"
+ "fsl,imx6ull-usdhc"
"fsl,imx7d-usdhc"
"fsl,imx8qxp-usdhc"
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index f5a0923b34ca..cdbcfd3a4ff2 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -62,6 +62,8 @@ Optional properties:
be referred to mmc-pwrseq-simple.txt. But now it's reused as a tunable delay
waiting for I/O signalling and card power supply to be stable, regardless of
whether pwrseq-simple is used. Default to 10ms if no available.
+- supports-cqe : The presence of this property indicates that the corresponding
+ MMC host controller supports HW command queue feature.
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
polarity properties, we have to fix the meaning of the "normal" and "inverted"
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
index 32b4b4e41923..2cecdc71d94c 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
@@ -39,12 +39,16 @@ sdhci@c8000200 {
bus-width = <8>;
};
-Optional properties for Tegra210 and Tegra186:
+Optional properties for Tegra210, Tegra186 and Tegra194:
- pinctrl-names, pinctrl-0, pinctrl-1 : Specify pad voltage
configurations. Valid pinctrl-names are "sdmmc-3v3" and "sdmmc-1v8"
for controllers supporting multiple voltage levels. The order of names
should correspond to the pin configuration states in pinctrl-0 and
pinctrl-1.
+- pinctrl-names : "sdmmc-3v3-drv" and "sdmmc-1v8-drv" are applicable for
+ Tegra210 where pad config registers are in the pinmux register domain
+ for pull-up-strength and pull-down-strength values configuration when
+ using pads at 3V3 and 1V8 levels.
- nvidia,only-1-8-v : The presence of this property indicates that the
controller operates at a 1.8 V fixed I/O voltage.
- nvidia,pad-autocal-pull-up-offset-3v3,
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap.txt b/Documentation/devicetree/bindings/mmc/ti-omap.txt
index 8de579969763..02fd31cf361d 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap.txt
@@ -24,31 +24,3 @@ Examples:
dmas = <&sdma 61 &sdma 62>;
dma-names = "tx", "rx";
};
-
-* TI MMC host controller for OMAP1 and 2420
-
-The MMC Host Controller on TI OMAP1 and 2420 family provides
-an interface for MMC, SD, and SDIO types of memory cards.
-
-This file documents differences between the core properties described
-by mmc.txt and the properties used by the omap mmc driver.
-
-Note that this driver will not work with omap2430 or later omaps,
-please see the omap hsmmc driver for the current omaps.
-
-Required properties:
-- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers
-- ti,hwmods: For 2420, must be "msdi<n>", where n is controller
- instance starting 1
-
-Examples:
-
- msdi1: mmc@4809c000 {
- compatible = "ti,omap2420-mmc";
- ti,hwmods = "msdi1";
- reg = <0x4809c000 0x80>;
- interrupts = <83>;
- dmas = <&sdma 61 &sdma 62>;
- dma-names = "tx", "rx";
- };
-
diff --git a/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
new file mode 100644
index 000000000000..3983c11e062c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
@@ -0,0 +1,60 @@
+Amlogic NAND Flash Controller (NFC) for GXBB/GXL/AXG family SoCs
+
+This file documents the properties in addition to those available in
+the MTD NAND bindings.
+
+Required properties:
+- compatible : contains one of:
+ - "amlogic,meson-gxl-nfc"
+ - "amlogic,meson-axg-nfc"
+- clocks :
+ A list of phandle + clock-specifier pairs for the clocks listed
+ in clock-names.
+
+- clock-names: Should contain the following:
+ "core" - NFC module gate clock
+ "device" - device clock from eMMC sub clock controller
+ "rx" - rx clock phase
+ "tx" - tx clock phase
+
+- amlogic,mmc-syscon : Required for NAND clocks, it's shared with SD/eMMC
+ controller port C
+
+Optional children nodes:
+Children nodes represent the available nand chips.
+
+Other properties:
+see Documentation/devicetree/bindings/mtd/nand.txt for generic bindings.
+
+Example demonstrate on AXG SoC:
+
+ sd_emmc_c_clkc: mmc@7000 {
+ compatible = "amlogic,meson-axg-mmc-clkc", "syscon";
+ reg = <0x0 0x7000 0x0 0x800>;
+ };
+
+ nand-controller@7800 {
+ compatible = "amlogic,meson-axg-nfc";
+ reg = <0x0 0x7800 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>;
+
+ clocks = <&clkc CLKID_SD_EMMC_C>,
+ <&sd_emmc_c_clkc CLKID_MMC_DIV>,
+ <&sd_emmc_c_clkc CLKID_MMC_PHASE_RX>,
+ <&sd_emmc_c_clkc CLKID_MMC_PHASE_TX>;
+ clock-names = "core", "device", "rx", "tx";
+ amlogic,mmc-syscon = <&sd_emmc_c_clkc>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins>;
+
+ nand@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-on-flash-bbt;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
index bb2075df9b38..4345c3a6f530 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible : should be one of the following:
Generic default - "cdns,qspi-nor".
For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor".
+ For TI AM654 SoC - "ti,am654-ospi", "cdns,qspi-nor".
- reg : Contains two entries, each of which is a tuple consisting of a
physical address and length. The first entry is the address and
length of the controller register set. The second entry is the
diff --git a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
index 56d3668e2c50..a12e3b5c495d 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
@@ -1,4 +1,4 @@
-* Serial NOR flash controller for MTK MT81xx (and similar)
+* Serial NOR flash controller for MediaTek SoCs
Required properties:
- compatible: For mt8173, compatible should be "mediatek,mt8173-nor",
@@ -10,6 +10,7 @@ Required properties:
"mediatek,mt2712-nor", "mediatek,mt8173-nor"
"mediatek,mt7622-nor", "mediatek,mt8173-nor"
"mediatek,mt7623-nor", "mediatek,mt8173-nor"
+ "mediatek,mt7629-nor", "mediatek,mt8173-nor"
"mediatek,mt8173-nor"
- reg: physical base address and length of the controller's register
- clocks: the phandle of the clocks needed by the nor controller
diff --git a/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt
new file mode 100644
index 000000000000..ad2bef826582
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt
@@ -0,0 +1,61 @@
+STMicroelectronics Flexible Memory Controller 2 (FMC2)
+NAND Interface
+
+Required properties:
+- compatible: Should be one of:
+ * st,stm32mp15-fmc2
+- reg: NAND flash controller memory areas.
+ First region contains the register location.
+ Regions 2 to 4 respectively contain the data, command,
+ and address space for CS0.
+ Regions 5 to 7 contain the same areas for CS1.
+- interrupts: The interrupt number
+- pinctrl-0: Standard Pinctrl phandle (see: pinctrl/pinctrl-bindings.txt)
+- clocks: The clock needed by the NAND flash controller
+
+Optional properties:
+- resets: Reference to a reset controller asserting the FMC controller
+- dmas: DMA specifiers (see: dma/stm32-mdma.txt)
+- dma-names: Must be "tx", "rx" and "ecc"
+
+* NAND device bindings:
+
+Required properties:
+- reg: describes the CS lines assigned to the NAND device.
+
+Optional properties:
+- nand-on-flash-bbt: see nand.txt
+- nand-ecc-strength: see nand.txt
+- nand-ecc-step-size: see nand.txt
+
+The following ECC strength and step size are currently supported:
+ - nand-ecc-strength = <1>, nand-ecc-step-size = <512> (Hamming)
+ - nand-ecc-strength = <4>, nand-ecc-step-size = <512> (BCH4)
+ - nand-ecc-strength = <8>, nand-ecc-step-size = <512> (BCH8) (default)
+
+Example:
+
+ fmc: nand-controller@58002000 {
+ compatible = "st,stm32mp15-fmc2";
+ reg = <0x58002000 0x1000>,
+ <0x80000000 0x1000>,
+ <0x88010000 0x1000>,
+ <0x88020000 0x1000>,
+ <0x81000000 0x1000>,
+ <0x89010000 0x1000>,
+ <0x89020000 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc FMC_K>;
+ resets = <&rcc FMC_R>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&fmc_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ nand-on-flash-bbt;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt
index 37d67926dd6d..b1ad6ee68e90 100644
--- a/Documentation/devicetree/bindings/net/btusb.txt
+++ b/Documentation/devicetree/bindings/net/btusb.txt
@@ -9,6 +9,9 @@ Required properties:
(more may be added later) are:
"usb1286,204e" (Marvell 8997)
+ "usbcf3,e300" (Qualcomm QCA6174A)
+ "usb4ca,301a" (Qualcomm QCA6174A (Lite-On))
+
Also, vendors that use btusb may have device additional properties, e.g:
Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
diff --git a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
index 764c0c79b43d..5d76f991c027 100644
--- a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
+++ b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
@@ -1,4 +1,4 @@
-TI CPSW Phy mode Selection Device Tree Bindings
+TI CPSW Phy mode Selection Device Tree Bindings (DEPRECATED)
-----------------------------------------------
Required properties:
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
index 0f407fb371ce..e7db7268fd0f 100644
--- a/Documentation/devicetree/bindings/net/dsa/ksz.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -7,6 +7,11 @@ Required properties:
of the following:
- "microchip,ksz9477"
- "microchip,ksz9897"
+ - "microchip,ksz9896"
+ - "microchip,ksz9567"
+ - "microchip,ksz8565"
+ - "microchip,ksz9893"
+ - "microchip,ksz9563"
Optional properties:
@@ -19,58 +24,96 @@ Examples:
Ethernet switch connected via SPI to the host, CPU port wired to eth0:
- eth0: ethernet@10001000 {
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
+ eth0: ethernet@10001000 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
- spi1: spi@f8008000 {
- pinctrl-0 = <&pinctrl_spi_ksz>;
- cs-gpios = <&pioC 25 0>;
- id = <1>;
+ spi1: spi@f8008000 {
+ pinctrl-0 = <&pinctrl_spi_ksz>;
+ cs-gpios = <&pioC 25 0>;
+ id = <1>;
- ksz9477: ksz9477@0 {
- compatible = "microchip,ksz9477";
- reg = <0>;
+ ksz9477: ksz9477@0 {
+ compatible = "microchip,ksz9477";
+ reg = <0>;
- spi-max-frequency = <44000000>;
- spi-cpha;
- spi-cpol;
+ spi-max-frequency = <44000000>;
+ spi-cpha;
+ spi-cpol;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- label = "lan1";
- };
- port@1 {
- reg = <1>;
- label = "lan2";
- };
- port@2 {
- reg = <2>;
- label = "lan3";
- };
- port@3 {
- reg = <3>;
- label = "lan4";
- };
- port@4 {
- reg = <4>;
- label = "lan5";
- };
- port@5 {
- reg = <5>;
- label = "cpu";
- ethernet = <&eth0>;
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- };
- };
- };
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ port@4 {
+ reg = <4>;
+ label = "lan5";
+ };
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&eth0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ ksz8565: ksz8565@0 {
+ compatible = "microchip,ksz8565";
+ reg = <0>;
+
+ spi-max-frequency = <44000000>;
+ spi-cpha;
+ spi-cpol;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&eth0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
index aa3527f71fdc..47aa205ee0bd 100644
--- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt
+++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
@@ -3,12 +3,16 @@ Mediatek MT7530 Ethernet switch
Required properties:
-- compatible: Must be compatible = "mediatek,mt7530";
+- compatible: may be compatible = "mediatek,mt7530"
+ or compatible = "mediatek,mt7621"
- #address-cells: Must be 1.
- #size-cells: Must be 0.
- mediatek,mcm: Boolean; if defined, indicates that either MT7530 is the part
on multi-chip module belong to MT7623A has or the remotely standalone
chip as the function MT7623N reference board provided for.
+
+If compatible mediatek,mt7530 is set then the following properties are required
+
- core-supply: Phandle to the regulator node necessary for the core power.
- io-supply: Phandle to the regulator node necessary for the I/O power.
See Documentation/devicetree/bindings/regulator/mt6323-regulator.txt
diff --git a/Documentation/devicetree/bindings/net/fsl-enetc.txt b/Documentation/devicetree/bindings/net/fsl-enetc.txt
new file mode 100644
index 000000000000..c812e25ae90f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl-enetc.txt
@@ -0,0 +1,69 @@
+* ENETC ethernet device tree bindings
+
+Depending on board design and ENETC port type (internal or
+external) there are two supported link modes specified by
+below device tree bindings.
+
+Required properties:
+
+- reg : Specifies PCIe Device Number and Function
+ Number of the ENETC endpoint device, according
+ to parent node bindings.
+- compatible : Should be "fsl,enetc".
+
+1) The ENETC external port is connected to a MDIO configurable phy:
+
+In this case, the ENETC node should include a "mdio" sub-node
+that in turn should contain the "ethernet-phy" node describing the
+external phy. Below properties are required, their bindings
+already defined in ethernet.txt or phy.txt, under
+Documentation/devicetree/bindings/net/*.
+
+Required:
+
+- phy-handle : Phandle to a PHY on the MDIO bus.
+ Defined in ethernet.txt.
+
+- phy-connection-type : Defined in ethernet.txt.
+
+- mdio : "mdio" node, defined in mdio.txt.
+
+- ethernet-phy : "ethernet-phy" node, defined in phy.txt.
+
+Example:
+
+ ethernet@0,0 {
+ compatible = "fsl,enetc";
+ reg = <0x000000 0 0 0 0>;
+ phy-handle = <&sgmii_phy0>;
+ phy-connection-type = "sgmii";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ sgmii_phy0: ethernet-phy@2 {
+ reg = <0x2>;
+ };
+ };
+ };
+
+2) The ENETC port is an internal port or has a fixed-link external
+connection:
+
+In this case, the ENETC port node defines a fixed link connection,
+as specified by "fixed-link.txt", under
+Documentation/devicetree/bindings/net/*.
+
+Required:
+
+- fixed-link : "fixed-link" node, defined in "fixed-link.txt".
+
+Example:
+ ethernet@0,2 {
+ compatible = "fsl,enetc";
+ reg = <0x000200 0 0 0 0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 3e17ac1d5d58..174f292d8a3e 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -3,8 +3,8 @@
Required properties:
- compatible: Should be "cdns,[<chip>-]{macb|gem}"
Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
- Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs or the 10/100Mbit IP
- available on sama5d3 SoCs.
+ Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs.
+ Use "cdns,sam9x60-macb" for Microchip sam9x60 SoC.
Use "cdns,np4-macb" for NP4 SoC devices.
Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index bedcfd5a52cd..691f886cfc4a 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -19,7 +19,7 @@ Optional properties:
"marvell,armada-370-neta" and 9800B for others.
- clock-names: List of names corresponding to clocks property; shall be
"core" for core clock and "bus" for the optional bus clock.
-
+- phys: comphy for the ethernet port, see ../phy/phy-bindings.txt
Optional properties (valid only for Armada XP/38x):
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt b/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt
new file mode 100644
index 000000000000..534e38058fe0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt
@@ -0,0 +1,82 @@
+Properties for an MDIO bus multiplexer consumer device
+
+This is a special case of MDIO mux when MDIO mux is defined as a consumer
+of a mux producer device. The mux producer can be of any type like mmio mux
+producer, gpio mux producer or generic register based mux producer.
+
+Required properties in addition to the MDIO Bus multiplexer properties:
+
+- compatible : should be "mmio-mux-multiplexer"
+- mux-controls : mux controller node to use for operating the mux
+- mdio-parent-bus : phandle to the parent MDIO bus.
+
+each child node of mdio bus multiplexer consumer device represent a mdio
+bus.
+
+for more information please refer
+Documentation/devicetree/bindings/mux/mux-controller.txt
+and Documentation/devicetree/bindings/net/mdio-mux.txt
+
+Example:
+In below example the Mux producer and consumer are separate nodes.
+
+&i2c0 {
+ fpga@66 { // fpga connected to i2c
+ compatible = "fsl,lx2160aqds-fpga", "fsl,fpga-qixis-i2c",
+ "simple-mfd";
+ reg = <0x66>;
+
+ mux: mux-controller { // Mux Producer
+ compatible = "reg-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */
+ <0x54 0x07>; /* 1: reg 0x54, bits 2:0 */
+ };
+ };
+};
+
+mdio-mux-1 { // Mux consumer
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mux 0>;
+ mdio-parent-bus = <&emdio1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio@8 {
+ reg = <0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ ..
+ ..
+};
+
+mdio-mux-2 { // Mux consumer
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mux 1>;
+ mdio-parent-bus = <&emdio2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mdio@1 {
+ reg = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ ..
+ ..
+};
diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
index 14ceb2a5b4e8..41a7dcc80f5b 100644
--- a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
@@ -33,3 +33,67 @@ Example:
clock-names = "ref";
};
};
+
+MediaTek UART based Bluetooth Devices
+==================================
+
+This device is a serial attached device to UART device and thus it must be a
+child node of the serial node with UART.
+
+Please refer to the following documents for generic properties:
+
+ Documentation/devicetree/bindings/serial/slave-device.txt
+
+Required properties:
+
+- compatible: Must be
+ "mediatek,mt7663u-bluetooth": for MT7663U device
+ "mediatek,mt7668u-bluetooth": for MT7668U device
+- vcc-supply: Main voltage regulator
+- pinctrl-names: Should be "default", "runtime"
+- pinctrl-0: Should contain UART RXD low when the device is powered up to
+ enter proper bootstrap mode.
+- pinctrl-1: Should contain UART mode pin ctrl
+
+Optional properties:
+
+- reset-gpios: GPIO used to reset the device whose initial state keeps low,
+ if the GPIO is missing, then board-level design should be
+ guaranteed.
+- current-speed: Current baud rate of the device whose defaults to 921600
+
+Example:
+
+ uart1_pins_boot: uart1-default {
+ pins-dat {
+ pinmux = <MT7623_PIN_81_URXD1_FUNC_GPIO81>;
+ output-low;
+ };
+ };
+
+ uart1_pins_runtime: uart1-runtime {
+ pins-dat {
+ pinmux = <MT7623_PIN_81_URXD1_FUNC_URXD1>,
+ <MT7623_PIN_82_UTXD1_FUNC_UTXD1>;
+ };
+ };
+
+ uart1: serial@11003000 {
+ compatible = "mediatek,mt7623-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11003000 0 0x400>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_UART1_SEL>,
+ <&pericfg CLK_PERI_UART1>;
+ clock-names = "baud", "bus";
+
+ bluetooth {
+ compatible = "mediatek,mt7663u-bluetooth";
+ vcc-supply = <&reg_5v>;
+ reset-gpios = <&pio 24 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default", "runtime";
+ pinctrl-0 = <&uart1_pins_boot>;
+ pinctrl-1 = <&uart1_pins_runtime>;
+ current-speed = <921600>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/nixge.txt b/Documentation/devicetree/bindings/net/nixge.txt
index e55af7f0881a..85d7240a9b20 100644
--- a/Documentation/devicetree/bindings/net/nixge.txt
+++ b/Documentation/devicetree/bindings/net/nixge.txt
@@ -1,17 +1,53 @@
* NI XGE Ethernet controller
Required properties:
-- compatible: Should be "ni,xge-enet-2.00"
-- reg: Address and length of the register set for the device
+- compatible: Should be "ni,xge-enet-3.00", but can be "ni,xge-enet-2.00" for
+ older device trees with DMA engines co-located in the address map,
+ with the one reg entry to describe the whole device.
+- reg: Address and length of the register set for the device. It contains the
+ information of registers in the same order as described by reg-names.
+- reg-names: Should contain the reg names
+ "dma": DMA engine control and status region
+ "ctrl": MDIO and PHY control and status region
- interrupts: Should contain tx and rx interrupt
- interrupt-names: Should be "rx" and "tx"
- phy-mode: See ethernet.txt file in the same directory.
-- phy-handle: See ethernet.txt file in the same directory.
- nvmem-cells: Phandle of nvmem cell containing the MAC address
- nvmem-cell-names: Should be "address"
+Optional properties:
+- mdio subnode to indicate presence of MDIO controller
+- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
+ Use instead of phy-handle.
+- phy-handle: See ethernet.txt file in the same directory.
+
Examples (10G generic PHY):
nixge0: ethernet@40000000 {
+ compatible = "ni,xge-enet-3.00";
+ reg = <0x40000000 0x4000
+ 0x41002000 0x2000>;
+ reg-names = "dma", "ctrl";
+
+ nvmem-cells = <&eth1_addr>;
+ nvmem-cell-names = "address";
+
+ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>, <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx", "tx";
+ interrupt-parent = <&intc>;
+
+ phy-mode = "xgmii";
+ phy-handle = <&ethernet_phy1>;
+
+ mdio {
+ ethernet_phy1: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <4>;
+ };
+ };
+ };
+
+Examples (10G generic PHY, no MDIO):
+ nixge0: ethernet@40000000 {
compatible = "ni,xge-enet-2.00";
reg = <0x40000000 0x6000>;
@@ -24,9 +60,33 @@ Examples (10G generic PHY):
phy-mode = "xgmii";
phy-handle = <&ethernet_phy1>;
+ };
+
+Examples (1G generic fixed-link + MDIO):
+ nixge0: ethernet@40000000 {
+ compatible = "ni,xge-enet-2.00";
+ reg = <0x40000000 0x6000>;
- ethernet_phy1: ethernet-phy@4 {
- compatible = "ethernet-phy-ieee802.3-c45";
- reg = <4>;
+ nvmem-cells = <&eth1_addr>;
+ nvmem-cell-names = "address";
+
+ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>, <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx", "tx";
+ interrupt-parent = <&intc>;
+
+ phy-mode = "xgmii";
+
+ fixed-link {
+ speed = <1000>;
+ pause;
+ link-gpios = <&gpio0 63 GPIO_ACTIVE_HIGH>;
+ };
+
+ mdio {
+ ethernet_phy1: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <4>;
+ };
};
+
};
diff --git a/Documentation/devicetree/bindings/net/qcom,ethqos.txt b/Documentation/devicetree/bindings/net/qcom,ethqos.txt
new file mode 100644
index 000000000000..fcf5035810b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,ethqos.txt
@@ -0,0 +1,64 @@
+Qualcomm Ethernet ETHQOS device
+
+This documents dwmmac based ethernet device which supports Gigabit
+ethernet for version v2.3.0 onwards.
+
+This device has following properties:
+
+Required properties:
+
+- compatible: Should be qcom,qcs404-ethqos"
+
+- reg: Address and length of the register set for the device
+
+- reg-names: Should contain register names "stmmaceth", "rgmii"
+
+- clocks: Should contain phandle to clocks
+
+- clock-names: Should contain clock names "stmmaceth", "pclk",
+ "ptp_ref", "rgmii"
+
+- interrupts: Should contain phandle to interrupts
+
+- interrupt-names: Should contain interrupt names "macirq", "eth_lpi"
+
+Rest of the properties are defined in stmmac.txt file in same directory
+
+
+Example:
+
+ethernet: ethernet@7a80000 {
+ compatible = "qcom,qcs404-ethqos";
+ reg = <0x07a80000 0x10000>,
+ <0x07a96000 0x100>;
+ reg-names = "stmmaceth", "rgmii";
+ clock-names = "stmmaceth", "pclk", "ptp_ref", "rgmii";
+ clocks = <&gcc GCC_ETH_AXI_CLK>,
+ <&gcc GCC_ETH_SLAVE_AHB_CLK>,
+ <&gcc GCC_ETH_PTP_CLK>,
+ <&gcc GCC_ETH_RGMII_CLK>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ snps,reset-gpio = <&tlmm 60 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+
+ snps,txpbl = <8>;
+ snps,rxpbl = <2>;
+ snps,aal;
+ snps,tso;
+
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii";
+
+ mdio {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: phy@4 {
+ device_type = "ethernet-phy";
+ reg = <0x4>;
+ };
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
index 0c17a0ec9b7b..7b9a776230c0 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
@@ -4,6 +4,13 @@ This node provides properties for configuring the MediaTek mt76xx wireless
device. The node is expected to be specified as a child node of the PCI
controller to which the wireless chip is connected.
+Alternatively, it can specify the wireless part of the MT7628/MT7688 SoC.
+For SoC, use the compatible string "mediatek,mt7628-wmac" and the following
+properties:
+
+- reg: Address and length of the register set for the device.
+- interrupts: Main device interrupt
+
Optional properties:
- mac-address: See ethernet.txt in the parent directory
@@ -30,3 +37,15 @@ Optional nodes:
};
};
};
+
+MT7628 example:
+
+wmac: wmac@10300000 {
+ compatible = "mediatek,mt7628-wmac";
+ reg = <0x10300000 0x100000>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+
+ mediatek,mtd-eeprom = <&factory 0x0000>;
+};
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 792bc5fafeb9..7a999a135e56 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -1,7 +1,7 @@
Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
This binding represents the on-chip eFuse OTP controller found on
-i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL and i.MX6SLL SoCs.
+i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ and i.MX6SLL SoCs.
Required properties:
- compatible: should be one of
@@ -9,8 +9,10 @@ Required properties:
"fsl,imx6sl-ocotp" (i.MX6SL), or
"fsl,imx6sx-ocotp" (i.MX6SX),
"fsl,imx6ul-ocotp" (i.MX6UL),
+ "fsl,imx6ull-ocotp" (i.MX6ULL/ULZ),
"fsl,imx7d-ocotp" (i.MX7D/S),
"fsl,imx6sll-ocotp" (i.MX6SLL),
+ "fsl,imx7ulp-ocotp" (i.MX7ULP),
followed by "syscon".
- #address-cells : Should be 1
- #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
new file mode 100644
index 000000000000..4881561b3a02
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
@@ -0,0 +1,46 @@
+--------------------------------------------------------------------------
+= Zynq UltraScale+ MPSoC nvmem firmware driver binding =
+--------------------------------------------------------------------------
+The nvmem_firmware node provides access to the hardware related data
+like soc revision, IDCODE... etc, By using the firmware interface.
+
+Required properties:
+- compatible: should be "xlnx,zynqmp-nvmem-fw"
+
+= Data cells =
+Are child nodes of silicon id, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+-------
+ Example
+-------
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ nvmem_firmware {
+ compatible = "xlnx,zynqmp-nvmem-fw";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* Data cells */
+ soc_revision: soc_revision {
+ reg = <0x0 0x4>;
+ };
+ };
+ };
+};
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+ pcap {
+ ...
+
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index c396c4c0af92..76b6c79604a5 100644
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -129,6 +129,9 @@ Optional properties:
- opp-microamp-<name>: Named opp-microamp property. Similar to
opp-microvolt-<name> property, but for microamp instead.
+- opp-level: A value representing the performance level of the device,
+ expressed as a 32-bit integer.
+
- clock-latency-ns: Specifies the maximum possible transition latency (in
nanoseconds) for switching to this OPP from any other OPP.
diff --git a/Documentation/devicetree/bindings/phy/cdns,dphy.txt b/Documentation/devicetree/bindings/phy/cdns,dphy.txt
new file mode 100644
index 000000000000..1095bc4e72d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/cdns,dphy.txt
@@ -0,0 +1,20 @@
+Cadence DPHY
+============
+
+Cadence DPHY block.
+
+Required properties:
+- compatible: should be set to "cdns,dphy".
+- reg: physical base address and length of the DPHY registers.
+- clocks: DPHY reference clocks.
+- clock-names: must contain "psm" and "pll_ref".
+- #phy-cells: must be set to 0.
+
+Example:
+ dphy0: dphy@fd0e0000{
+ compatible = "cdns,dphy";
+ reg = <0x0 0xfd0e0000 0x0 0x1000>;
+ clocks = <&psm_clk>, <&pll_ref_clk>;
+ clock-names = "psm", "pll_ref";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt b/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt
new file mode 100644
index 000000000000..ad49e5c01334
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt
@@ -0,0 +1,40 @@
+mvebu armada 38x comphy driver
+------------------------------
+
+This comphy controller can be found on Marvell Armada 38x. It provides a
+number of shared PHYs used by various interfaces (network, sata, usb,
+PCIe...).
+
+Required properties:
+
+- compatible: should be "marvell,armada-380-comphy"
+- reg: should contain the comphy register location and length.
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+
+A sub-node is required for each comphy lane provided by the comphy.
+
+Required properties (child nodes):
+
+- reg: comphy lane number.
+- #phy-cells : from the generic phy bindings, must be 1. Defines the
+ input port to use for a given comphy lane.
+
+Example:
+
+ comphy: phy@18300 {
+ compatible = "marvell,armada-380-comphy";
+ reg = <0x18300 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpm_comphy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ cpm_comphy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
index bfcf80341657..cf2cd86db267 100644
--- a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
+++ b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
@@ -1,16 +1,27 @@
-mvebu comphy driver
--------------------
+MVEBU comphy drivers
+--------------------
-A comphy controller can be found on Marvell Armada 7k/8k on the CP110. It
-provides a number of shared PHYs used by various interfaces (network, sata,
-usb, PCIe...).
+COMPHY controllers can be found on the following Marvell MVEBU SoCs:
+* Armada 7k/8k (on the CP110)
+* Armada 3700
+It provides a number of shared PHYs used by various interfaces (network, SATA,
+USB, PCIe...).
Required properties:
-- compatible: should be "marvell,comphy-cp110"
-- reg: should contain the comphy register location and length.
-- marvell,system-controller: should contain a phandle to the
- system controller node.
+- compatible: should be one of:
+ * "marvell,comphy-cp110" for Armada 7k/8k
+ * "marvell,comphy-a3700" for Armada 3700
+- reg: should contain the COMPHY register(s) location(s) and length(s).
+ * 1 entry for Armada 7k/8k
+ * 4 entries for Armada 3700 along with the corresponding reg-names
+ properties, memory areas are:
+ * Generic COMPHY registers
+ * Lane 1 (PCIe/GbE)
+ * Lane 0 (USB3/GbE)
+ * Lane 2 (SATA/USB3)
+- marvell,system-controller: should contain a phandle to the system
+ controller node (only for Armada 7k/8k)
- #address-cells: should be 1.
- #size-cells: should be 0.
@@ -18,11 +29,11 @@ A sub-node is required for each comphy lane provided by the comphy.
Required properties (child nodes):
-- reg: comphy lane number.
-- #phy-cells : from the generic phy bindings, must be 1. Defines the
+- reg: COMPHY lane number.
+- #phy-cells : from the generic PHY bindings, must be 1. Defines the
input port to use for a given comphy lane.
-Example:
+Examples:
cpm_comphy: phy@120000 {
compatible = "marvell,comphy-cp110";
@@ -41,3 +52,33 @@ Example:
#phy-cells = <1>;
};
};
+
+ comphy: phy@18300 {
+ compatible = "marvell,comphy-a3700";
+ reg = <0x18300 0x300>,
+ <0x1F000 0x400>,
+ <0x5C000 0x400>,
+ <0xe0178 0x8>;
+ reg-names = "comphy",
+ "lane1_pcie_gbe",
+ "lane0_usb3_gbe",
+ "lane2_sata_usb3";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+
+ comphy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ comphy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+
+ comphy2: phy@2 {
+ reg = <2>;
+ #phy-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
new file mode 100644
index 000000000000..aa99ceec73b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
@@ -0,0 +1,38 @@
+MVEBU A3700 UTMI PHY
+--------------------
+
+USB2 UTMI+ PHY controllers can be found on the following Marvell MVEBU SoCs:
+* Armada 3700
+
+On Armada 3700, there are two USB controllers, one is compatible with the USB2
+and USB3 specifications and supports OTG. The other one is USB2 compliant and
+only supports host mode. Both of these controllers come with a slightly
+different UTMI PHY.
+
+Required Properties:
+
+- compatible: Should be one of:
+ * "marvell,a3700-utmi-host-phy" for the PHY connected to
+ the USB2 host-only controller.
+ * "marvell,a3700-utmi-otg-phy" for the PHY connected to
+ the USB3 and USB2 OTG capable controller.
+- reg: PHY IP register range.
+- marvell,usb-misc-reg: handle on the "USB miscellaneous registers" shared
+ region covering registers related to both the host
+ controller and the PHY.
+- #phy-cells: Standard property (Documentation: phy-bindings.txt) Should be 0.
+
+
+Example:
+
+ usb2_utmi_host_phy: phy@5f000 {
+ compatible = "marvell,armada-3700-utmi-host-phy";
+ reg = <0x5f000 0x800>;
+ marvell,usb-misc-reg = <&usb2_syscon>;
+ #phy-cells = <0>;
+ };
+
+ usb2_syscon: system-controller@5f800 {
+ compatible = "marvell,armada-3700-usb2-host-misc", "syscon";
+ reg = <0x5f800 0x800>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
index 074a7b3b0425..00639baae74a 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
@@ -23,6 +23,8 @@ Optional properties:
register files". When set driver will request its
phandle as one companion-grf for some special SoCs
(e.g RV1108).
+ - extcon : phandle to the extcon device providing the cable state for
+ the otg phy.
Required nodes : a sub-node is required for each port the phy provides.
The sub-node name is used to identify host or otg port,
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index 41a1074228ba..5d181fc3cc18 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -9,6 +9,8 @@ Required properties:
"qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074
"qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
"qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
+ "qcom,msm8998-qmp-usb3-phy" for USB3 QMP V3 phy on msm8998,
+ "qcom,msm8998-qmp-ufs-phy" for UFS QMP phy on msm8998,
"qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
"qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845,
"qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845.
@@ -42,6 +44,10 @@ Required properties:
"aux", "cfg_ahb", "ref".
For "qcom,msm8996-qmp-usb3-phy" must contain:
"aux", "cfg_ahb", "ref".
+ For "qcom,msm8998-qmp-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,msm8998-qmp-ufs-phy" must contain:
+ "ref", "ref_aux".
For "qcom,sdm845-qmp-usb3-phy" must contain:
"aux", "cfg_ahb", "ref", "com_aux".
For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
@@ -61,6 +67,9 @@ Required properties:
"phy", "common", "cfg".
For "qcom,msm8996-qmp-usb3-phy" must contain
"phy", "common".
+ For "qcom,msm8998-qmp-usb3-phy" must contain
+ "phy", "common".
+ For "qcom,msm8998-qmp-ufs-phy": no resets are listed.
For "qcom,sdm845-qmp-usb3-phy" must contain:
"phy", "common".
For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
diff --git a/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt
index 03025d97998b..fe29f9e0af6d 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt
@@ -6,6 +6,7 @@ QUSB2 controller supports LS/FS/HS usb connectivity on Qualcomm chipsets.
Required properties:
- compatible: compatible list, contains
"qcom,msm8996-qusb2-phy" for 14nm PHY on msm8996,
+ "qcom,msm8998-qusb2-phy" for 10nm PHY on msm8998,
"qcom,sdm845-qusb2-phy" for 10nm PHY on sdm845.
- reg: offset and length of the PHY register set.
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index de7b5393c163..ad9c290d8f15 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -6,6 +6,8 @@ This file provides information on what the device node for the R-Car generation
Required properties:
- compatible: "renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
SoC.
+ "renesas,usb2-phy-r8a774c0" if the device is a part of an R8A774C0
+ SoC.
"renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
SoC.
"renesas,usb2-phy-r8a7796" if the device is a part of an R8A7796
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index 57dfda8a7a1d..8f93c3b694a7 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -35,6 +35,7 @@ Required properties:
DRA7x
Should be "ti,dra7x-usb2-phy2" for the 2nd instance of USB2 PHY
in DRA7x
+ Should be "ti,am654-usb2" for the USB2 PHYs on AM654.
- reg : Address and length of the register set for the device.
- #phy-cells: determine the number of cells that should be given in the
phandle while referencing this phy.
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
index 7c947a996df1..7c7e972aaa42 100644
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
@@ -32,6 +32,9 @@ Required properties:
Optional properties:
- power-supply: Power supply used to power the domain
+- clocks: a number of phandles to clocks that need to be enabled during
+ domain power-up sequencing to ensure reset propagation into devices
+ located inside this power domain
Example:
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.txt b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
new file mode 100644
index 000000000000..980e5413d18f
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
@@ -0,0 +1,145 @@
+Qualcomm RPM/RPMh Power domains
+
+For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
+which then translates it into a corresponding voltage on a rail
+
+Required Properties:
+ - compatible: Should be one of the following
+ * qcom,msm8996-rpmpd: RPM Power domain for the msm8996 family of SoC
+ * qcom,sdm845-rpmhpd: RPMh Power domain for the sdm845 family of SoC
+ - #power-domain-cells: number of cells in Power domain specifier
+ must be 1.
+ - operating-points-v2: Phandle to the OPP table for the Power domain.
+ Refer to Documentation/devicetree/bindings/power/power_domain.txt
+ and Documentation/devicetree/bindings/opp/opp.txt for more details
+
+Refer to <dt-bindings/power/qcom-rpmpd.h> for the level values for
+various OPPs for different platforms as well as Power domain indexes
+
+Example: rpmh power domain controller and OPP table
+
+#include <dt-bindings/power/qcom-rpmhpd.h>
+
+opp-level values specified in the OPP tables for RPMh power domains
+should use the RPMH_REGULATOR_LEVEL_* constants from
+<dt-bindings/power/qcom-rpmhpd.h>
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sdm845-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs: opp3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_nom: opp6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp10 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
+
+Example: rpm power domain controller and OPP table
+
+ rpmpd: power-controller {
+ compatible = "qcom,msm8996-rpmpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmpd_opp_table>;
+
+ rpmpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmpd_opp_low: opp1 {
+ opp-level = <1>;
+ };
+
+ rpmpd_opp_ret: opp2 {
+ opp-level = <2>;
+ };
+
+ rpmpd_opp_svs: opp3 {
+ opp-level = <3>;
+ };
+
+ rpmpd_opp_normal: opp4 {
+ opp-level = <4>;
+ };
+
+ rpmpd_opp_high: opp5 {
+ opp-level = <5>;
+ };
+
+ rpmpd_opp_turbo: opp6 {
+ opp-level = <6>;
+ };
+ };
+ };
+
+Example: Client/Consumer device using OPP table
+
+ leaky-device0@12350000 {
+ compatible = "foo,i-leak-current";
+ reg = <0x12350000 0x1000>;
+ power-domains = <&rpmhpd SDM845_MX>;
+ operating-points-v2 = <&leaky_opp_table>;
+ };
+
+
+ leaky_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp1 {
+ opp-hz = /bits/ 64 <144000>;
+ required-opps = <&rpmhpd_opp_low>;
+ };
+
+ opp2 {
+ opp-hz = /bits/ 64 <400000>;
+ required-opps = <&rpmhpd_opp_ret>;
+ };
+
+ opp3 {
+ opp-hz = /bits/ 64 <20000000>;
+ required-opps = <&rpmpd_opp_svs>;
+ };
+
+ opp4 {
+ opp-hz = /bits/ 64 <25000000>;
+ required-opps = <&rpmpd_opp_normal>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
new file mode 100644
index 000000000000..d366f1eb623a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
@@ -0,0 +1,25 @@
+--------------------------------------------------------------------
+Device Tree Bindings for the Xilinx Zynq MPSoC Power Management
+--------------------------------------------------------------------
+The zynqmp-power node describes the power management configurations.
+It will control remote suspend/shutdown interfaces.
+
+Required properties:
+ - compatible: Must contain: "xlnx,zynqmp-power"
+ - interrupts: Interrupt specifier
+
+-------
+Example
+-------
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ zynqmp_power: zynqmp-power {
+ compatible = "xlnx,zynqmp-power";
+ interrupts = <0 35 4>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt b/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt
new file mode 100644
index 000000000000..8d1b8200ebd0
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt
@@ -0,0 +1,34 @@
+-----------------------------------------------------------
+Device Tree Bindings for the Xilinx Zynq MPSoC PM domains
+-----------------------------------------------------------
+The binding for zynqmp-power-controller follow the common
+generic PM domain binding[1].
+
+[1] Documentation/devicetree/bindings/power/power_domain.txt
+
+== Zynq MPSoC Generic PM Domain Node ==
+
+Required property:
+ - Below property should be in zynqmp-firmware node.
+ - #power-domain-cells: Number of cells in a PM domain specifier. Must be 1.
+
+Power domain ID indexes are mentioned in
+include/dt-bindings/power/xlnx-zynqmp-power.h.
+
+-------
+Example
+-------
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ ...
+ #power-domain-cells = <1>;
+ ...
+ };
+};
+
+sata {
+ ...
+ power-domains = <&zynqmp_firmware 28>;
+ ...
+};
diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
index c5d0e7998e2b..454c937076a2 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
@@ -17,6 +17,11 @@ Clock Properties:
- fsl,tmr-fiper1 Fixed interval period pulse generator.
- fsl,tmr-fiper2 Fixed interval period pulse generator.
- fsl,max-adj Maximum frequency adjustment in parts per billion.
+ - fsl,extts-fifo The presence of this property indicates hardware
+ support for the external trigger stamp FIFO.
+ - little-endian The presence of this property indicates the 1588 timer
+ IP block is little-endian mode. The default endian mode
+ is big-endian.
These properties set the operational parameters for the PTP
clock. You must choose these carefully for the clock to work right.
diff --git a/Documentation/devicetree/bindings/regulator/fan53555.txt b/Documentation/devicetree/bindings/regulator/fan53555.txt
index 54a3f2c80e3a..e7fc045281d1 100644
--- a/Documentation/devicetree/bindings/regulator/fan53555.txt
+++ b/Documentation/devicetree/bindings/regulator/fan53555.txt
@@ -1,7 +1,8 @@
Binding for Fairchild FAN53555 regulators
Required properties:
- - compatible: one of "fcs,fan53555", "silergy,syr827", "silergy,syr828"
+ - compatible: one of "fcs,fan53555", "fcs,fan53526", "silergy,syr827" or
+ "silergy,syr828"
- reg: I2C address
Optional properties:
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
deleted file mode 100644
index 0c2a6c8a1536..000000000000
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Fixed Voltage regulators
-
-Required properties:
-- compatible: Must be "regulator-fixed";
-- regulator-name: Defined in regulator.txt as optional, but required here.
-
-Optional properties:
-- gpio: gpio to use for enable control
-- startup-delay-us: startup time in microseconds
-- enable-active-high: Polarity of GPIO is Active high
-If this property is missing, the default assumed is Active low.
-- gpio-open-drain: GPIO is open drain type.
- If this property is missing then default assumption is false.
--vin-supply: Input supply name.
-
-Any property defined as part of the core regulator
-binding, defined in regulator.txt, can also be used.
-However a fixed voltage regulator is expected to have the
-regulator-min-microvolt and regulator-max-microvolt
-to be the same.
-
-Example:
-
- abc: fixedregulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "fixed-supply";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- gpio = <&gpio1 16 0>;
- startup-delay-us = <70000>;
- enable-active-high;
- regulator-boot-on;
- gpio-open-drain;
- vin-supply = <&parent_reg>;
- };
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
new file mode 100644
index 000000000000..d289c2f7455a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/fixed-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Fixed Voltage regulators
+
+maintainers:
+ - Liam Girdwood <lgirdwood@gmail.com>
+ - Mark Brown <broonie@kernel.org>
+
+description:
+ Any property defined as part of the core regulator binding, defined in
+ regulator.txt, can also be used. However a fixed voltage regulator is
+ expected to have the regulator-min-microvolt and regulator-max-microvolt
+ to be the same.
+
+properties:
+ compatible:
+ const: regulator-fixed
+
+ regulator-name: true
+
+ gpio:
+ description: gpio to use for enable control
+ maxItems: 1
+
+ startup-delay-us:
+ description: startup time in microseconds
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ enable-active-high:
+ description:
+ Polarity of GPIO is Active high. If this property is missing,
+ the default assumed is Active low.
+ type: boolean
+
+ gpio-open-drain:
+ description:
+ GPIO is open drain type. If this property is missing then default
+ assumption is false.
+ type: boolean
+
+ vin-supply:
+ description: Input supply phandle.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - regulator-name
+
+examples:
+ - |
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio1 16 0>;
+ startup-delay-us = <70000>;
+ enable-active-high;
+ regulator-boot-on;
+ gpio-open-drain;
+ vin-supply = <&parent_reg>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/regulator/max77650-regulator.txt b/Documentation/devicetree/bindings/regulator/max77650-regulator.txt
new file mode 100644
index 000000000000..f1cbe813c30f
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max77650-regulator.txt
@@ -0,0 +1,41 @@
+Regulator driver for MAX77650 PMIC from Maxim Integrated.
+
+This module is part of the MAX77650 MFD device. For more details
+see Documentation/devicetree/bindings/mfd/max77650.txt.
+
+The regulator controller is represented as a sub-node of the PMIC node
+on the device tree.
+
+The device has a single LDO regulator and a SIMO buck-boost regulator with
+three independent power rails.
+
+Required properties:
+--------------------
+- compatible: Must be "maxim,max77650-regulator"
+
+Each rail must be instantiated under the regulators subnode of the top PMIC
+node. Up to four regulators can be defined. For standard regulator properties
+refer to Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Available regulator compatible strings are: "ldo", "sbb0", "sbb1", "sbb2".
+
+Example:
+--------
+
+ regulators {
+ compatible = "maxim,max77650-regulator";
+
+ max77650_ldo: regulator@0 {
+ regulator-compatible = "ldo";
+ regulator-name = "max77650-ldo";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <2937500>;
+ };
+
+ max77650_sbb0: regulator@1 {
+ regulator-compatible = "sbb0";
+ regulator-name = "max77650-sbb0";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1587500>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index f9be1acf891c..4d3b12b92cb3 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -8,7 +8,7 @@ Optional properties:
- fsl,pfuze-support-disable-sw: Boolean, if present disable all unused switch
regulators to save power consumption. Attention, ensure that all important
regulators (e.g. DDR ref, DDR supply) has set the "regulator-always-on"
- property. If not present, the switched regualtors are always on and can't be
+ property. If not present, the switched regulators are always on and can't be
disabled. This binding is a workaround to keep backward compatibility with
old dtb's which rely on the fact that the switched regulators are always on
and don't mark them explicit as "regulator-always-on".
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt
new file mode 100644
index 000000000000..698cfc3bc3dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt
@@ -0,0 +1,68 @@
+ROHM BD70528 Power Management Integrated Circuit regulator bindings
+
+Required properties:
+ - regulator-name: should be "buck1", "buck2", "buck3", "ldo1", "ldo2", "ldo3",
+ "led_ldo1", "led_ldo2"
+
+List of regulators provided by this controller. BD70528 regulators node
+should be sub node of the BD70528 MFD node. See BD70528 MFD bindings at
+Documentation/devicetree/bindings/mfd/rohm,bd70528-pmic.txt
+
+The valid names for BD70528 regulator nodes are:
+BUCK1, BUCK2, BUCK3, LDO1, LDO2, LDO3, LED_LDO1, LED_LDO2
+
+Optional properties:
+- Any optional property defined in bindings/regulator/regulator.txt
+
+Example:
+regulators {
+ buck1: BUCK1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-boot-on;
+ regulator-ramp-delay = <125>;
+ };
+ buck2: BUCK2 {
+ regulator-name = "buck2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-ramp-delay = <125>;
+ };
+ buck3: BUCK3 {
+ regulator-name = "buck3";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-ramp-delay = <250>;
+ };
+ ldo1: LDO1 {
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+ ldo2: LDO2 {
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ };
+
+ ldo3: LDO3 {
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ led_ldo1: LED_LDO1 {
+ regulator-name = "led_ldo1";
+ regulator-min-microvolt = <200000>;
+ regulator-max-microvolt = <300000>;
+ };
+ led_ldo2: LED_LDO2 {
+ regulator-name = "led_ldo2";
+ regulator-min-microvolt = <200000>;
+ regulator-max-microvolt = <300000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
index 4b98ca26e61a..cbce62c22b60 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
@@ -27,8 +27,38 @@ BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6
LDO1, LDO2, LDO3, LDO4, LDO5, LDO6
Optional properties:
+- rohm,dvs-run-voltage : PMIC default "RUN" state voltage in uV.
+ See below table for bucks which support this.
+- rohm,dvs-idle-voltage : PMIC default "IDLE" state voltage in uV.
+ See below table for bucks which support this.
+- rohm,dvs-suspend-voltage : PMIC default "SUSPEND" state voltage in uV.
+ See below table for bucks which support this.
- Any optional property defined in bindings/regulator/regulator.txt
+Supported default DVS states:
+
+BD71837:
+buck | dvs-run-voltage | dvs-idle-voltage | dvs-suspend-voltage
+-----------------------------------------------------------------------------
+1 | supported | supported | supported
+----------------------------------------------------------------------------
+2 | supported | supported | not supported
+----------------------------------------------------------------------------
+3 | supported | not supported | not supported
+----------------------------------------------------------------------------
+4 | supported | not supported | not supported
+----------------------------------------------------------------------------
+rest | not supported | not supported | not supported
+
+BD71847:
+buck | dvs-run-voltage | dvs-idle-voltage | dvs-suspend-voltage
+-----------------------------------------------------------------------------
+1 | supported | supported | supported
+----------------------------------------------------------------------------
+2 | supported | supported | not supported
+----------------------------------------------------------------------------
+rest | not supported | not supported | not supported
+
Example:
regulators {
buck1: BUCK1 {
@@ -36,7 +66,11 @@ regulators {
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
+ regulator-always-on;
regulator-ramp-delay = <1250>;
+ rohm,dvs-run-voltage = <900000>;
+ rohm,dvs-idle-voltage = <850000>;
+ rohm,dvs-suspend-voltage = <800000>;
};
buck2: BUCK2 {
regulator-name = "buck2";
@@ -45,18 +79,22 @@ regulators {
regulator-boot-on;
regulator-always-on;
regulator-ramp-delay = <1250>;
+ rohm,dvs-run-voltage = <1000000>;
+ rohm,dvs-idle-voltage = <900000>;
};
buck3: BUCK3 {
regulator-name = "buck3";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
+ rohm,dvs-run-voltage = <1000000>;
};
buck4: BUCK4 {
regulator-name = "buck4";
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
+ rohm,dvs-run-voltage = <1000000>;
};
buck5: BUCK5 {
regulator-name = "buck5";
diff --git a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt
index a3f476240565..6189df71ea98 100644
--- a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt
@@ -23,16 +23,14 @@ Switches are fixed voltage regulators with only enable/disable capability.
Optional properties:
- st,mask-reset: mask reset for this regulator: the regulator configuration
is maintained during pmic reset.
-- regulator-pull-down: enable high pull down
- if not specified light pull down is used
- regulator-over-current-protection:
if set, all regulators are switched off in case of over-current detection
on this regulator,
if not set, the driver only sends an over-current event.
-- interrupt-parent: phandle to the parent interrupt controller
- interrupts: index of current limit detection interrupt
- <regulator>-supply: phandle to the parent supply/regulator node
each regulator supply can be described except vref_ddr.
+- regulator-active-discharge: can be used on pwr_sw1 and pwr_sw2.
Example:
regulators {
@@ -43,7 +41,6 @@ regulators {
vdd_core: buck1 {
regulator-name = "vdd_core";
interrupts = <IT_CURLIM_BUCK1 0>;
- interrupt-parent = <&pmic>;
st,mask-reset;
regulator-pull-down;
regulator-min-microvolt = <700000>;
@@ -53,7 +50,6 @@ regulators {
v3v3: buck4 {
regulator-name = "v3v3";
interrupts = <IT_CURLIM_BUCK4 0>;
- interrupt-parent = <&mypmic>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
diff --git a/Documentation/devicetree/bindings/regulator/tps65218.txt b/Documentation/devicetree/bindings/regulator/tps65218.txt
index 02f0e9bbfbf8..54aded3b78e2 100644
--- a/Documentation/devicetree/bindings/regulator/tps65218.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65218.txt
@@ -71,8 +71,13 @@ tps65218: tps65218@24 {
regulator-always-on;
};
+ ls2: regulator-ls2 {
+ regulator-min-microamp = <100000>;
+ regulator-max-microamp = <1000000>;
+ };
+
ls3: regulator-ls3 {
- regulator-min-microvolt = <100000>;
- regulator-max-microvolt = <1000000>;
+ regulator-min-microamp = <100000>;
+ regulator-max-microamp = <1000000>;
};
};
diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
new file mode 100644
index 000000000000..6e5341b4f891
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
@@ -0,0 +1,27 @@
+Broadcom STB SW_INIT-style reset controller
+===========================================
+
+Broadcom STB SoCs have a SW_INIT-style reset controller with separate
+SET/CLEAR/STATUS registers and possibly multiple banks, each of 32 bit
+reset lines.
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required properties:
+- compatible: should be brcm,brcmstb-reset
+- reg: register base and length
+- #reset-cells: must be set to 1
+
+Example:
+
+ reset: reset-controller@8404318 {
+ compatible = "brcm,brcmstb-reset";
+ reg = <0x8404318 0x30>;
+ #reset-cells = <1>;
+ };
+
+ &ethernet_switch {
+ resets = <&reset>;
+ reset-names = "switch";
+ };
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
index 1ab1d109318e..2ecf33815d18 100644
--- a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
+++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
@@ -5,7 +5,9 @@ Please also refer to reset.txt in this directory for common reset
controller binding usage.
Required properties:
-- compatible: Should be "fsl,imx7d-src", "syscon"
+- compatible:
+ - For i.MX7 SoCs should be "fsl,imx7d-src", "syscon"
+ - For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon"
- reg: should be register base and length as documented in the
datasheet
- interrupts: Should contain SRC interrupt
@@ -44,4 +46,5 @@ Example:
For list of all valid reset indicies see
-<dt-bindings/reset/imx7-reset.h>
+<dt-bindings/reset/imx7-reset.h> for i.MX7 and
+<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ
diff --git a/Documentation/devicetree/bindings/reset/socfpga-reset.txt b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
index 98c9f560e5c5..38fe34fd8b8a 100644
--- a/Documentation/devicetree/bindings/reset/socfpga-reset.txt
+++ b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
@@ -1,7 +1,8 @@
Altera SOCFPGA Reset Manager
Required properties:
-- compatible : "altr,rst-mgr"
+- compatible : "altr,rst-mgr" for (Cyclone5/Arria5/Arria10)
+ "altr,stratix10-rst-mgr","altr,rst-mgr" for Stratix10 ARM64 SoC
- reg : Should contain 1 register ranges(address and length)
- altr,modrst-offset : Should contain the offset of the first modrst register.
- #reset-cells: 1
diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
index 101743dda223..ea005177d20a 100644
--- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt
+++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
@@ -120,27 +120,30 @@ Example:
};
-USB3 core reset
----------------
+Peripheral core reset in glue layer
+-----------------------------------
-USB3 core reset belongs to USB3 glue layer. Before using the core reset,
-it is necessary to control the clocks and resets to enable this layer.
-These clocks and resets should be described in each property.
+Some peripheral core reset belongs to its own glue layer. Before using
+this core reset, it is necessary to control the clocks and resets to enable
+this layer. These clocks and resets should be described in each property.
Required properties:
- compatible: Should be
- "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC
- "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC
- "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC
- "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC
+ "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC USB3
+ "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC USB3
+ "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC USB3
+ "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC USB3
+ "socionext,uniphier-pro4-ahci-reset" - for Pro4 SoC AHCI
+ "socionext,uniphier-pxs2-ahci-reset" - for PXs2 SoC AHCI
+ "socionext,uniphier-pxs3-ahci-reset" - for PXs3 SoC AHCI
- #reset-cells: Should be 1.
- reg: Specifies offset and length of the register set for the device.
-- clocks: A list of phandles to the clock gate for USB3 glue layer.
+- clocks: A list of phandles to the clock gate for the glue layer.
According to the clock-names, appropriate clocks are required.
- clock-names: Should contain
"gio", "link" - for Pro4 SoC
"link" - for others
-- resets: A list of phandles to the reset control for USB3 glue layer.
+- resets: A list of phandles to the reset control for the glue layer.
According to the reset-names, appropriate resets are required.
- reset-names: Should contain
"gio", "link" - for Pro4 SoC
diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt
new file mode 100644
index 000000000000..27a45fe5ecf1
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt
@@ -0,0 +1,52 @@
+--------------------------------------------------------------------------
+ = Zynq UltraScale+ MPSoC reset driver binding =
+--------------------------------------------------------------------------
+The Zynq UltraScale+ MPSoC has several different resets.
+
+See Chapter 36 of the Zynq UltraScale+ MPSoC TRM (UG) for more information
+about zynqmp resets.
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required Properties:
+- compatible: "xlnx,zynqmp-reset"
+- #reset-cells: Specifies the number of cells needed to encode reset
+ line, should be 1
+
+-------
+Example
+-------
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ zynqmp_reset: reset-controller {
+ compatible = "xlnx,zynqmp-reset";
+ #reset-cells = <1>;
+ };
+ };
+};
+
+Specifying reset lines connected to IP modules
+==============================================
+
+Device nodes that need access to reset lines should
+specify them as a reset phandle in their corresponding node as
+specified in reset.txt.
+
+For list of all valid reset indicies see
+<dt-bindings/reset/xlnx-zynqmp-resets.h>
+
+Example:
+
+serdes: zynqmp_phy@fd400000 {
+ ...
+
+ resets = <&zynqmp_reset ZYNQMP_RESET_SATA>;
+ reset-names = "sata_rst";
+
+ ...
+};
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
index da50321da34d..3cba12f855b7 100644
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
@@ -21,6 +21,7 @@ Required properties:
- "altr,16550-FIFO128"
- "fsl,16550-FIFO64"
- "fsl,ns16550"
+ - "intel,xscale-uart"
- "ti,da830-uart"
- "aspeed,ast2400-vuart"
- "aspeed,ast2500-vuart"
diff --git a/Documentation/devicetree/bindings/serial/ingenic,uart.txt b/Documentation/devicetree/bindings/serial/ingenic,uart.txt
index c3c6406d5cfe..24ed8769f4af 100644
--- a/Documentation/devicetree/bindings/serial/ingenic,uart.txt
+++ b/Documentation/devicetree/bindings/serial/ingenic,uart.txt
@@ -6,7 +6,8 @@ Required properties:
- "ingenic,jz4760-uart",
- "ingenic,jz4770-uart",
- "ingenic,jz4775-uart",
- - "ingenic,jz4780-uart".
+ - "ingenic,jz4780-uart",
+ - "ingenic,x1000-uart".
- reg : offset and length of the register set for the device.
- interrupts : should contain uart interrupt.
- clocks : phandles to the module & baud clocks.
diff --git a/Documentation/devicetree/bindings/serial/milbeaut-uart.txt b/Documentation/devicetree/bindings/serial/milbeaut-uart.txt
new file mode 100644
index 000000000000..3d2fb1a7ba94
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/milbeaut-uart.txt
@@ -0,0 +1,21 @@
+Socionext Milbeaut UART controller
+
+Required properties:
+- compatible: should be "socionext,milbeaut-usio-uart".
+- reg: offset and length of the register set for the device.
+- interrupts: two interrupts specifier.
+- interrupt-names: should be "rx", "tx".
+- clocks: phandle to the input clock.
+
+Optional properties:
+- auto-flow-control: flow control enable.
+
+Example:
+ usio1: usio_uart@1e700010 {
+ compatible = "socionext,milbeaut-usio-uart";
+ reg = <0x1e700010 0x10>;
+ interrupts = <0 141 0x4>, <0 149 0x4>;
+ interrupt-names = "rx", "tx";
+ clocks = <&clk 2>;
+ auto-flow-control;
+ };
diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra194-tcu.txt b/Documentation/devicetree/bindings/serial/nvidia,tegra194-tcu.txt
new file mode 100644
index 000000000000..085a8591accd
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/nvidia,tegra194-tcu.txt
@@ -0,0 +1,35 @@
+NVIDIA Tegra Combined UART (TCU)
+
+The TCU is a system for sharing a hardware UART instance among multiple
+systems within the Tegra SoC. It is implemented through a mailbox-
+based protocol where each "virtual UART" has a pair of mailboxes, one
+for transmitting and one for receiving, that is used to communicate
+with the hardware implementing the TCU.
+
+Required properties:
+- name : Should be tcu
+- compatible
+ Array of strings
+ One of:
+ - "nvidia,tegra194-tcu"
+- mbox-names:
+ "rx" - Mailbox for receiving data from hardware UART
+ "tx" - Mailbox for transmitting data to hardware UART
+- mboxes: Mailboxes corresponding to the mbox-names.
+
+This node is a mailbox consumer. See the following files for details of
+the mailbox subsystem, and the specifiers implemented by the relevant
+provider(s):
+
+- .../mailbox/mailbox.txt
+- .../mailbox/nvidia,tegra186-hsp.txt
+
+Example bindings:
+-----------------
+
+tcu: tcu {
+ compatible = "nvidia,tegra194-tcu";
+ mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_SM 0>,
+ <&hsp_aon TEGRA_HSP_MBOX_TYPE_SM 1>;
+ mbox-names = "rx", "tx";
+};
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
index c35d5ece1156..0a9b5444f4e6 100644
--- a/Documentation/devicetree/bindings/serial/omap_serial.txt
+++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
@@ -22,6 +22,8 @@ Optional properties:
- dma-names : "rx" for receive channel, "tx" for transmit channel.
- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
- rs485-rts-active-high: drive RTS high when sending (default is low).
+- clocks: phandle to the functional clock as per
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
Example:
diff --git a/Documentation/devicetree/bindings/serial/pl011.txt b/Documentation/devicetree/bindings/serial/pl011.txt
deleted file mode 100644
index 77863aefe9ef..000000000000
--- a/Documentation/devicetree/bindings/serial/pl011.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-* ARM AMBA Primecell PL011 serial UART
-
-Required properties:
-- compatible: must be "arm,primecell", "arm,pl011", "zte,zx296702-uart"
-- reg: exactly one register range with length 0x1000
-- interrupts: exactly one interrupt specifier
-
-Optional properties:
-- pinctrl:
- When present, must have one state named "default",
- and may contain a second name named "sleep". The former
- state sets up pins for ordinary operation whereas
- the latter state will put the associated pins to sleep
- when the UART is unused
-- clocks:
- When present, the first clock listed must correspond to
- the clock named UARTCLK on the IP block, i.e. the clock
- to the external serial line, whereas the second clock
- must correspond to the PCLK clocking the internal logic
- of the block. Just listing one clock (the first one) is
- deprecated.
-- clock-names:
- When present, the first clock listed must be named
- "uartclk" and the second clock listed must be named
- "apb_pclk"
-- dmas:
- When present, may have one or two dma channels.
- The first one must be named "rx", the second one
- must be named "tx".
-- auto-poll:
- Enables polling when using RX DMA.
-- poll-rate-ms:
- Rate at which poll occurs when auto-poll is set,
- default 100ms.
-- poll-timeout-ms:
- Poll timeout when auto-poll is set, default
- 3000ms.
-
-See also bindings/arm/primecell.txt
-
-Example:
-
-uart@80120000 {
- compatible = "arm,pl011", "arm,primecell";
- reg = <0x80120000 0x1000>;
- interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&dma 13 0 0x2>, <&dma 13 0 0x0>;
- dma-names = "rx", "tx";
- clocks = <&foo_clk>, <&bar_clk>;
- clock-names = "uartclk", "apb_pclk";
-};
diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml
new file mode 100644
index 000000000000..1a64d59152aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/pl011.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/pl011.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM AMBA Primecell PL011 serial UART
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+allOf:
+ - $ref: /schemas/serial.yaml#
+
+# Need a custom select here or 'arm,primecell' will match on lots of nodes
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - arm,pl011
+ - zte,zx296702-uart
+ required:
+ - compatible
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: arm,pl011
+ - const: arm,primecell
+ - items:
+ - const: zte,zx296702-uart
+ - const: arm,primecell
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ pinctrl-0: true
+ pinctrl-1: true
+
+ pinctrl-names:
+ description:
+ When present, must have one state named "default",
+ and may contain a second name named "sleep". The former
+ state sets up pins for ordinary operation whereas
+ the latter state will put the associated pins to sleep
+ when the UART is unused
+ minItems: 1
+ items:
+ - const: default
+ - const: sleep
+
+ clocks:
+ description:
+ When present, the first clock listed must correspond to
+ the clock named UARTCLK on the IP block, i.e. the clock
+ to the external serial line, whereas the second clock
+ must correspond to the PCLK clocking the internal logic
+ of the block. Just listing one clock (the first one) is
+ deprecated.
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: uartclk
+ - const: apb_pclk
+
+ dmas:
+ minItems: 1
+ maxItems: 2
+
+ dma-names:
+ minItems: 1
+ items:
+ - const: rx
+ - const: tx
+
+ auto-poll:
+ description:
+ Enables polling when using RX DMA.
+ type: boolean
+
+ poll-rate-ms:
+ description:
+ Rate at which poll occurs when auto-poll is set.
+ default 100ms.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - default: 100
+
+ poll-timeout-ms:
+ description:
+ Poll timeout when auto-poll is set, default
+ 3000ms.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - default: 3000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+dependencies:
+ poll-rate-ms: [ auto-poll ]
+ poll-timeout-ms: [ auto-poll ]
+
+additionalProperties: false
+
+examples:
+ - |
+ serial@80120000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x80120000 0x1000>;
+ interrupts = <0 11 4>;
+ dmas = <&dma 13 0 0x2>, <&dma 13 0 0x0>;
+ dma-names = "rx", "tx";
+ clocks = <&foo_clk>, <&bar_clk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt b/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt
deleted file mode 100644
index 8b9e0d4dc2e4..000000000000
--- a/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Renesas RZ/N1 UART
-
-This controller is based on the Synopsys DesignWare ABP UART and inherits all
-properties defined in snps-dw-apb-uart.txt except for the compatible property.
-
-Required properties:
-- compatible : The device specific string followed by the generic RZ/N1 string.
- Therefore it must be one of:
- "renesas,r9a06g032-uart", "renesas,rzn1-uart"
- "renesas,r9a06g033-uart", "renesas,rzn1-uart"
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 20232ad05d89..dd63151dc8b6 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -29,7 +29,9 @@ Required properties:
- "renesas,scif-r8a774c0" for R8A774C0 (RZ/G2E) SCIF compatible UART.
- "renesas,hscif-r8a774c0" for R8A774C0 (RZ/G2E) HSCIF compatible UART.
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
+ - "renesas,hscif-r8a7778" for R8A7778 (R-Car M1) HSCIF compatible UART.
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
+ - "renesas,hscif-r8a7779" for R8A7779 (R-Car H1) HSCIF compatible UART.
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
- "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART.
- "renesas,scifb-r8a7790" for R8A7790 (R-Car H2) SCIFB compatible UART.
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
deleted file mode 100644
index 12bbe9f22560..000000000000
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-* Synopsys DesignWare ABP UART
-
-Required properties:
-- compatible : "snps,dw-apb-uart"
-- reg : offset and length of the register set for the device.
-- interrupts : should contain uart interrupt.
-
-Clock handling:
-The clock rate of the input clock needs to be supplied by one of
-- clock-frequency : the input clock frequency for the UART.
-- clocks : phandle to the input clock
-
-The supplying peripheral clock can also be handled, needing a second property
-- clock-names: tuple listing input clock names.
- Required elements: "baudclk", "apb_pclk"
-
-Optional properties:
-- snps,uart-16550-compatible : reflects the value of UART_16550_COMPATIBLE
- configuration parameter. Define this if your UART does not implement the busy
- functionality.
-- resets : phandle to the parent reset controller.
-- reg-shift : quantity to shift the register offsets by. If this property is
- not present then the register offsets are not shifted.
-- reg-io-width : the size (in bytes) of the IO accesses that should be
- performed on the device. If this property is not present then single byte
- accesses are used.
-- dcd-override : Override the DCD modem status signal. This signal will always
- be reported as active instead of being obtained from the modem status
- register. Define this if your serial port does not use this pin.
-- dsr-override : Override the DTS modem status signal. This signal will always
- be reported as active instead of being obtained from the modem status
- register. Define this if your serial port does not use this pin.
-- cts-override : Override the CTS modem status signal. This signal will always
- be reported as active instead of being obtained from the modem status
- register. Define this if your serial port does not use this pin.
-- ri-override : Override the RI modem status signal. This signal will always be
- reported as inactive instead of being obtained from the modem status register.
- Define this if your serial port does not use this pin.
-
-Example:
-
- uart@80230000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x80230000 0x100>;
- clock-frequency = <3686400>;
- interrupts = <10>;
- reg-shift = <2>;
- reg-io-width = <4>;
- dcd-override;
- dsr-override;
- cts-override;
- ri-override;
- };
-
-Example with one clock:
-
- uart@80230000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x80230000 0x100>;
- clocks = <&baudclk>;
- interrupts = <10>;
- reg-shift = <2>;
- reg-io-width = <4>;
- };
-
-Example with two clocks:
-
- uart@80230000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x80230000 0x100>;
- clocks = <&baudclk>, <&apb_pclk>;
- clock-names = "baudclk", "apb_pclk";
- interrupts = <10>;
- reg-shift = <2>;
- reg-io-width = <4>;
- };
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
new file mode 100644
index 000000000000..b42002542690
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/snps-dw-apb-uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare ABP UART
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+allOf:
+ - $ref: /schemas/serial.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r9a06g032-uart
+ - renesas,r9a06g033-uart
+ - const: renesas,rzn1-uart
+ - items:
+ - enum:
+ - rockchip,px30-uart
+ - rockchip,rk3036-uart
+ - rockchip,rk3066-uart
+ - rockchip,rk3188-uart
+ - rockchip,rk3288-uart
+ - rockchip,rk3328-uart
+ - rockchip,rk3368-uart
+ - rockchip,rk3399-uart
+ - rockchip,rv1108-uart
+ - const: snps,dw-apb-uart
+ - items:
+ - enum:
+ - brcm,bcm11351-dw-apb-uart
+ - brcm,bcm21664-dw-apb-uart
+ - const: snps,dw-apb-uart
+ - const: snps,dw-apb-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency: true
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: baudclk
+ - const: apb_pclk
+
+ snps,uart-16550-compatible:
+ description: reflects the value of UART_16550_COMPATIBLE configuration
+ parameter. Define this if your UART does not implement the busy functionality.
+ type: boolean
+
+ resets:
+ maxItems: 1
+
+ reg-shift: true
+
+ reg-io-width: true
+
+ dcd-override:
+ description: Override the DCD modem status signal. This signal will
+ always be reported as active instead of being obtained from the modem
+ status register. Define this if your serial port does not use this
+ pin.
+ type: boolean
+
+ dsr-override:
+ description: Override the DTS modem status signal. This signal will
+ always be reported as active instead of being obtained from the modem
+ status register. Define this if your serial port does not use this
+ pin.
+ type: boolean
+
+ cts-override:
+ description: Override the CTS modem status signal. This signal will
+ always be reported as active instead of being obtained from the modem
+ status register. Define this if your serial port does not use this
+ pin.
+ type: boolean
+
+ ri-override:
+ description: Override the RI modem status signal. This signal will always
+ be reported as inactive instead of being obtained from the modem status
+ register. Define this if your serial port does not use this pin.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ serial@80230000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x80230000 0x100>;
+ clock-frequency = <3686400>;
+ interrupts = <10>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ dcd-override;
+ dsr-override;
+ cts-override;
+ ri-override;
+ };
+
+ - |
+ // Example with one clock:
+ serial@80230000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x80230000 0x100>;
+ clocks = <&baudclk>;
+ interrupts = <10>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ - |
+ // Example with two clocks:
+ serial@80230000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x80230000 0x100>;
+ clocks = <&baudclk>, <&apb_pclk>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <10>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
index 36603419d6f8..0e72183f52bc 100644
--- a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
+++ b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
@@ -4,14 +4,10 @@ Required properties:
- compatible : "olpc,ap-sp"
- reg : base address and length of SoC's WTM registers
- interrupts : SP-AP interrupt
-- clocks : phandle + clock-specifier for the clock that drives the WTM
-- clock-names: should be "sp"
Example:
ap-sp@d4290000 {
compatible = "olpc,ap-sp";
reg = <0xd4290000 0x1000>;
interrupts = <40>;
- clocks = <&soc_clocks MMP2_CLK_SP>;
- clock-names = "sp";
}
diff --git a/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt b/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt
index 205a54bcd7c7..6bf6b43f8dd8 100644
--- a/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt
+++ b/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt
@@ -9,6 +9,8 @@ Required properties:
"amlogic,meson-gx-clk-measure" for GX SoCs
"amlogic,meson8-clk-measure" for Meson8 SoCs
"amlogic,meson8b-clk-measure" for Meson8b SoCs
+ "amlogic,meson-axg-clk-measure" for AXG SoCs
+ "amlogic,meson-g12a-clk-measure" for G12a SoCs
- reg: base address and size of the Clock Measurer register space.
Example:
diff --git a/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt
new file mode 100644
index 000000000000..3b7d32956391
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt
@@ -0,0 +1,46 @@
+BCM2835 PM (Power domains, watchdog)
+
+The PM block controls power domains and some reset lines, and includes
+a watchdog timer. This binding supersedes the brcm,bcm2835-pm-wdt
+binding which covered some of PM's register range and functionality.
+
+Required properties:
+
+- compatible: Should be "brcm,bcm2835-pm"
+- reg: Specifies base physical address and size of the two
+ register ranges ("PM" and "ASYNC_BRIDGE" in that
+ order)
+- clocks: a) v3d: The V3D clock from CPRMAN
+ b) peri_image: The PERI_IMAGE clock from CPRMAN
+ c) h264: The H264 clock from CPRMAN
+ d) isp: The ISP clock from CPRMAN
+- #reset-cells: Should be 1. This property follows the reset controller
+ bindings[1].
+- #power-domain-cells: Should be 1. This property follows the power domain
+ bindings[2].
+
+Optional properties:
+
+- timeout-sec: Contains the watchdog timeout in seconds
+- system-power-controller: Whether the watchdog is controlling the
+ system power. This node follows the power controller bindings[3].
+
+[1] Documentation/devicetree/bindings/reset/reset.txt
+[2] Documentation/devicetree/bindings/power/power_domain.txt
+[3] Documentation/devicetree/bindings/power/power-controller.txt
+
+Example:
+
+pm {
+ compatible = "brcm,bcm2835-pm", "brcm,bcm2835-pm-wdt";
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+ reg = <0x7e100000 0x114>,
+ <0x7e00a000 0x24>;
+ clocks = <&clocks BCM2835_CLOCK_V3D>,
+ <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+ <&clocks BCM2835_CLOCK_H264>,
+ <&clocks BCM2835_CLOCK_ISP>;
+ clock-names = "v3d", "peri_image", "h264", "isp";
+ system-power-controller;
+};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
index 0b8cc533ca83..cf759e5f9b10 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
@@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function
= EXAMPLE
The following example represents the GLINK RPM node on a MSM8996 device, with
the function for the "rpm_request" channel defined, which is used for
-regualtors and root clocks.
+regulators and root clocks.
apcs_glb: mailbox@9820000 {
compatible = "qcom,msm8996-apcs-hmss-global";
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt
index ec95705ba692..f3fa313963d5 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt
@@ -23,6 +23,7 @@ resources.
"qcom,rpm-msm8916"
"qcom,rpm-msm8974"
"qcom,rpm-msm8998"
+ "qcom,rpm-sdm660"
"qcom,rpm-qcs404"
- qcom,smd-channels:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
index a35af2dafdad..49e1d72d3648 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
@@ -41,12 +41,12 @@ processor ID) and a string identifier.
- qcom,local-pid:
Usage: required
Value type: <u32>
- Definition: specifies the identfier of the local endpoint of this edge
+ Definition: specifies the identifier of the local endpoint of this edge
- qcom,remote-pid:
Usage: required
Value type: <u32>
- Definition: specifies the identfier of the remote endpoint of this edge
+ Definition: specifies the identifier of the remote endpoint of this edge
= SUBNODES
Each SMP2P pair contain a set of inbound and outbound entries, these are
diff --git a/Documentation/devicetree/bindings/sound/adi,adau1977.txt b/Documentation/devicetree/bindings/sound/adi,adau1977.txt
index e79aeef73f28..9225472c80b4 100644
--- a/Documentation/devicetree/bindings/sound/adi,adau1977.txt
+++ b/Documentation/devicetree/bindings/sound/adi,adau1977.txt
@@ -17,12 +17,18 @@ Required properties:
Documentation/devicetree/bindings/regulator/regulator.txt
Optional properties:
- - reset-gpio: the reset pin for the chip, for more details consult
+ - reset-gpios: the reset pin for the chip, for more details consult
Documentation/devicetree/bindings/gpio/gpio.txt
- DVDD-supply: supply voltage for the digital core, please consult
Documentation/devicetree/bindings/regulator/regulator.txt
+- adi,micbias: configures the voltage setting for the MICBIAS pin.
+ Select 0/1/2/3/4/5/6/7/8 to specify MICBIAS voltage
+ 5V/5.5V/6V/6.5V/7V/7.5V/8V/8.5V/9V
+ If not specified the default value will be "7" meaning 8.5 Volts.
+ This property is only valid for the ADAU1977
+
For required properties on SPI, please consult
Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -40,7 +46,8 @@ Examples:
AVDD-supply = <&regulator>;
DVDD-supply = <&regulator_digital>;
- reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>;
+ adi,micbias = <3>;
+ reset-gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
};
adau1977_i2c: adau1977@11 {
@@ -50,5 +57,5 @@ Examples:
AVDD-supply = <&regulator>;
DVDD-supply = <&regulator_digital>;
- reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
};
diff --git a/Documentation/devicetree/bindings/sound/ak4458.txt b/Documentation/devicetree/bindings/sound/ak4458.txt
index 7839be78448d..e5820235e0d5 100644
--- a/Documentation/devicetree/bindings/sound/ak4458.txt
+++ b/Documentation/devicetree/bindings/sound/ak4458.txt
@@ -4,7 +4,7 @@ This device supports I2C mode.
Required properties:
-- compatible : "asahi-kasei,ak4458"
+- compatible : "asahi-kasei,ak4458" or "asahi-kasei,ak4497"
- reg : The I2C address of the device for I2C
Optional properties:
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
deleted file mode 100644
index 62d42768a00b..000000000000
--- a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
+++ /dev/null
@@ -1,123 +0,0 @@
-Audio-Graph-SCU-Card:
-
-Audio-Graph-SCU-Card is "Audio-Graph-Card" + "ALSA DPCM".
-
-It is based on common bindings for device graphs.
-see ${LINUX}/Documentation/devicetree/bindings/graph.txt
-
-Basically, Audio-Graph-SCU-Card property is same as
-Simple-Card / Simple-SCU-Card / Audio-Graph-Card.
-see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt
- ${LINUX}/Documentation/devicetree/bindings/sound/simple-scu-card.txt
- ${LINUX}/Documentation/devicetree/bindings/sound/audio-graph-card.txt
-
-Below are same as Simple-Card / Audio-Graph-Card.
-
-- label
-- dai-format
-- frame-master
-- bitclock-master
-- bitclock-inversion
-- frame-inversion
-- dai-tdm-slot-num
-- dai-tdm-slot-width
-- clocks / system-clock-frequency
-
-Below are same as Simple-SCU-Card.
-
-- convert-rate
-- convert-channels
-- prefix
-- routing
-
-Required properties:
-
-- compatible : "audio-graph-scu-card";
-- dais : list of CPU DAI port{s}
-
-Example 1. Sampling Rate Conversion
-
- sound_card {
- compatible = "audio-graph-scu-card";
-
- label = "sound-card";
- prefix = "codec";
- routing = "codec Playback", "DAI0 Playback",
- "DAI0 Capture", "codec Capture";
- convert-rate = <48000>;
-
- dais = <&cpu_port>;
- };
-
- audio-codec {
- ...
-
- port {
- codec_endpoint: endpoint {
- remote-endpoint = <&cpu_endpoint>;
- };
- };
- };
-
- dai-controller {
- ...
- cpu_port: port {
- cpu_endpoint: endpoint {
- remote-endpoint = <&codec_endpoint>;
-
- dai-format = "left_j";
- ...
- };
- };
- };
-
-Example 2. 2 CPU 1 Codec (Mixing)
-
- sound_card {
- compatible = "audio-graph-scu-card";
-
- label = "sound-card";
- routing = "codec Playback", "DAI0 Playback",
- "codec Playback", "DAI1 Playback",
- "DAI0 Capture", "codec Capture";
-
- dais = <&cpu_port0
- &cpu_port1>;
- };
-
- audio-codec {
- ...
-
- audio-graph-card,prefix = "codec";
- audio-graph-card,convert-rate = <48000>;
- port {
- codec_endpoint0: endpoint {
- remote-endpoint = <&cpu_endpoint0>;
- };
- codec_endpoint1: endpoint {
- remote-endpoint = <&cpu_endpoint1>;
- };
- };
- };
-
- dai-controller {
- ...
- ports {
- cpu_port0: port {
- cpu_endpoint0: endpoint {
- remote-endpoint = <&codec_endpoint0>;
-
- dai-format = "left_j";
- ...
- };
- };
- cpu_port1: port {
- cpu_endpoint1: endpoint {
- remote-endpoint = <&codec_endpoint1>;
-
- dai-format = "left_j";
- ...
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/sound/cs35l36.txt b/Documentation/devicetree/bindings/sound/cs35l36.txt
new file mode 100644
index 000000000000..912bd162b477
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs35l36.txt
@@ -0,0 +1,168 @@
+CS35L36 Speaker Amplifier
+
+Required properties:
+
+ - compatible : "cirrus,cs35l36"
+
+ - reg : the I2C address of the device for I2C
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ - cirrus,boost-ctl-millivolt : Boost Voltage Value. Configures the boost
+ converter's output voltage in mV. The range is from 2550mV to 12000mV with
+ increments of 50mV.
+ (Default) VP
+
+ - cirrus,boost-peak-milliamp : Boost-converter peak current limit in mA.
+ Configures the peak current by monitoring the current through the boost FET.
+ Range starts at 1600mA and goes to a maximum of 4500mA with increments of
+ 50mA.
+ (Default) 4.50 Amps
+
+ - cirrus,boost-ind-nanohenry : Inductor estimation LBST reference value.
+ Seeds the digital boost converter's inductor estimation block with the initial
+ inductance value to reference.
+
+ 1000 = 1uH (Default)
+ 1200 = 1.2uH
+
+Optional properties:
+ - cirrus,multi-amp-mode : Boolean to determine if there are more than
+ one amplifier in the system. If more than one it is best to Hi-Z the ASP
+ port to prevent bus contention on the output signal
+
+ - cirrus,boost-ctl-select : Boost conerter control source selection.
+ Selects the source of the BST_CTL target VBST voltage for the boost
+ converter to generate.
+ 0x00 - Control Port Value
+ 0x01 - Class H Tracking (Default)
+ 0x10 - MultiDevice Sync Value
+
+ - cirrus,amp-pcm-inv : Boolean to determine Amplifier will invert incoming
+ PCM data
+
+ - cirrus,imon-pol-inv : Boolean to determine Amplifier will invert the
+ polarity of outbound IMON feedback data
+
+ - cirrus,vmon-pol-inv : Boolean to determine Amplifier will invert the
+ polarity of outbound VMON feedback data
+
+ - cirrus,dcm-mode-enable : Boost converter automatic DCM Mode enable.
+ This enables the digital boost converter to operate in a low power
+ (Discontinuous Conduction) mode during low loading conditions.
+
+ - cirrus,weak-fet-disable : Boolean : The strength of the output drivers is
+ reduced when operating in a Weak-FET Drive Mode and must not be used to drive
+ a large load.
+
+ - cirrus,classh-wk-fet-delay : Weak-FET entry delay. Controls the delay
+ (in ms) before the Class H algorithm switches to the weak-FET voltage
+ (after the audio falls and remains below the value specified in WKFET_AMP_THLD).
+
+ 0 = 0ms
+ 1 = 5ms
+ 2 = 10ms
+ 3 = 50ms
+ 4 = 100ms (Default)
+ 5 = 200ms
+ 6 = 500ms
+ 7 = 1000ms
+
+ - cirrus,classh-weak-fet-thld-millivolt : Weak-FET amplifier drive threshold.
+ Configures the signal threshold at which the PWM output stage enters
+ weak-FET operation. The range is 50mV to 700mV in 50mV increments.
+
+ - cirrus,temp-warn-threshold : Amplifier overtemperature warning threshold.
+ Configures the threshold at which the overtemperature warning condition occurs.
+ When the threshold is met, the overtemperature warning attenuation is applied
+ and the TEMP_WARN_EINT interrupt status bit is set.
+ If TEMP_WARN_MASK = 0, INTb is asserted.
+
+ 0 = 105C
+ 1 = 115C
+ 2 = 125C (Default)
+ 3 = 135C
+
+ - cirrus,irq-drive-select : Selects the driver type of the selected interrupt
+ output.
+
+ 0 = Open-drain
+ 1 = Push-pull (Default)
+
+ - cirrus,irq-gpio-select : Selects the pin to serve as the programmable
+ interrupt output.
+
+ 0 = PDM_DATA / SWIRE_SD / INT (Default)
+ 1 = GPIO
+
+Optional properties for the "cirrus,vpbr-config" Sub-node
+
+ - cirrus,vpbr-en : VBST brownout prevention enable. Configures whether the
+ VBST brownout prevention algorithm is enabled or disabled.
+
+ 0 = VBST brownout prevention disabled (default)
+ 1 = VBST brownout prevention enabled
+
+ See Section 7.31.1 VPBR Config for configuration options & further details
+
+ - cirrus,vpbr-thld : Initial VPBR threshold. Configures the VP brownout
+ threshold voltage
+
+ - cirrus,cirrus,vpbr-atk-rate : Attenuation attack step rate. Configures the
+ amount delay between consecutive volume attenuation steps when a brownout
+ condition is present and the VP brownout condition is in an attacking state.
+
+ - cirrus,vpbr-atk-vol : VP brownout prevention step size. Configures the VP
+ brownout prevention attacking attenuation step size when operating in either
+ digital volume or analog gain modes.
+
+ - cirrus,vpbr-max-attn : Maximum attenuation that the VP brownout prevention
+ can apply to the audio signal.
+
+ - cirrus,vpbr-wait : Configures the delay time between a brownout condition
+ no longer being present and the VP brownout prevention entering an attenuation
+ release state.
+
+ - cirrus,vpbr-rel-rate : Attenuation release step rate. Configures the delay
+ between consecutive volume attenuation release steps when a brownout condition
+ is not longer present and the VP brownout is in an attenuation release state.
+
+ - cirrus,vpbr-mute-en : During the attack state, if the vpbr-max-attn value
+ is reached, the error condition still remains, and this bit is set, the audio
+ is muted.
+
+Example:
+
+cs35l36: cs35l36@40 {
+ compatible = "cirrus,cs35l36";
+ reg = <0x40>;
+ VA-supply = <&dummy_vreg>;
+ VP-supply = <&dummy_vreg>;
+ reset-gpios = <&gpio0 54 0>;
+ interrupt-parent = <&gpio8>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ cirrus,boost-ind-nanohenry = <1000>;
+ cirrus,boost-ctl-millivolt = <10000>;
+ cirrus,boost-peak-milliamp = <4500>;
+ cirrus,boost-ctl-select = <0x00>;
+ cirrus,weak-fet-delay = <0x04>;
+ cirrus,weak-fet-thld = <0x01>;
+ cirrus,temp-warn-threshold = <0x01>;
+ cirrus,multi-amp-mode;
+ cirrus,irq-drive-select = <0x01>;
+ cirrus,irq-gpio-select = <0x01>;
+
+ cirrus,vpbr-config {
+ cirrus,vpbr-en = <0x00>;
+ cirrus,vpbr-thld = <0x05>;
+ cirrus,vpbr-atk-rate = <0x02>;
+ cirrus,vpbr-atk-vol = <0x01>;
+ cirrus,vpbr-max-attn = <0x09>;
+ cirrus,vpbr-wait = <0x01>;
+ cirrus,vpbr-rel-rate = <0x05>;
+ cirrus,vpbr-mute-en = <0x00>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/cs4341.txt b/Documentation/devicetree/bindings/sound/cs4341.txt
new file mode 100644
index 000000000000..12b4aa8ef0db
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs4341.txt
@@ -0,0 +1,22 @@
+Cirrus Logic CS4341 audio DAC
+
+This device supports both I2C and SPI (configured with pin strapping
+on the board).
+
+Required properties:
+ - compatible: "cirrus,cs4341a"
+ - reg : the I2C address of the device for I2C, the chip select
+ number for SPI.
+
+For required properties on I2C-bus, please consult
+Documentation/devicetree/bindings/i2c/i2c.txt
+For required properties on SPI-bus, please consult
+Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+ codec: cs4341@0 {
+ #sound-dai-cells = <0>;
+ compatible = "cirrus,cs4341a";
+ reg = <0>;
+ spi-max-frequency = <6000000>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
index b279b6072bd5..a58f79f5345c 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
@@ -45,6 +45,23 @@ Optional properties:
- fck_parent : Should contain a valid clock name which will be used as parent
for the McASP fck
+Optional GPIO support:
+If any McASP pin need to be used as GPIO then the McASP node must have:
+...
+ gpio-controller
+ #gpio-cells = <2>;
+...
+
+When requesting a GPIO, the first parameter is the PIN index in McASP_P*
+registers.
+For example to request the AXR2 pin of mcasp8:
+function-gpios = <&mcasp8 2 0>;
+
+Or to request the ACLKR pin of mcasp8:
+function-gpios = <&mcasp8 29 0>;
+
+For generic gpio information, please refer to bindings/gpio/gpio.txt
+
Example:
mcasp0: mcasp0@1d00000 {
diff --git a/Documentation/devicetree/bindings/sound/fsl,micfil.txt b/Documentation/devicetree/bindings/sound/fsl,micfil.txt
new file mode 100644
index 000000000000..53e227b15277
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,micfil.txt
@@ -0,0 +1,32 @@
+NXP MICFIL Digital Audio Interface (MICFIL).
+
+The MICFIL digital interface provides a 16-bit audio signal from a PDM
+microphone bitstream in a configurable output sampling rate.
+
+Required properties:
+
+ - compatible : Compatible list, contains "fsl,imx8mm-micfil"
+
+ - reg : Offset and length of the register set for the device.
+
+ - interrupts : Contains the micfil interrupts.
+
+ - clocks : Must contain an entry for each entry in clock-names.
+
+ - clock-names : Must include the "ipg_clk" for register access and
+ "ipg_clk_app" for internal micfil clock.
+
+ - dmas : Generic dma devicetree binding as described in
+ Documentation/devicetree/bindings/dma/dma.txt.
+
+Example:
+micfil: micfil@30080000 {
+ compatible = "fsl,imx8mm-micfil";
+ reg = <0x0 0x30080000 0x0 0x10000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_PDM_IPG>,
+ <&clk IMX8MM_CLK_PDM_ROOT>;
+ clock-names = "ipg_clk", "ipg_clk_app";
+ dmas = <&sdma2 24 26 0x80000000>;
+};
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
new file mode 100644
index 000000000000..1084f7f22eea
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
@@ -0,0 +1,26 @@
+* Audio codec controlled by ChromeOS EC
+
+Google's ChromeOS EC codec is a digital mic codec provided by the
+Embedded Controller (EC) and is controlled via a host-command interface.
+
+An EC codec node should only be found as a sub-node of the EC node (see
+Documentation/devicetree/bindings/mfd/cros-ec.txt).
+
+Required properties:
+- compatible: Must contain "google,cros-ec-codec"
+- #sound-dai-cells: Should be 1. The cell specifies number of DAIs.
+- max-dmic-gain: A number for maximum gain in dB on digital microphone.
+
+Example:
+
+cros-ec@0 {
+ compatible = "google,cros-ec-spi";
+
+ ...
+
+ cros_ec_codec: ec-codec {
+ compatible = "google,cros-ec-codec";
+ #sound-dai-cells = <1>;
+ max-dmic-gain = <43>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/ingenic,jz4725b-codec.txt b/Documentation/devicetree/bindings/sound/ingenic,jz4725b-codec.txt
new file mode 100644
index 000000000000..05adc0d47b13
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ingenic,jz4725b-codec.txt
@@ -0,0 +1,20 @@
+Ingenic JZ4725B codec controller
+
+Required properties:
+- compatible : "ingenic,jz4725b-codec"
+- reg : codec registers location and length
+- clocks : phandle to the AIC clock.
+- clock-names: must be set to "aic".
+- #sound-dai-cells: Must be set to 0.
+
+Example:
+
+codec: audio-codec@100200a4 {
+ compatible = "ingenic,jz4725b-codec";
+ reg = <0x100200a4 0x8>;
+
+ #sound-dai-cells = <0>;
+
+ clocks = <&cgu JZ4725B_CLK_AIC>;
+ clock-names = "aic";
+};
diff --git a/Documentation/devicetree/bindings/sound/ingenic,jz4740-codec.txt b/Documentation/devicetree/bindings/sound/ingenic,jz4740-codec.txt
new file mode 100644
index 000000000000..1ffcade87e7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ingenic,jz4740-codec.txt
@@ -0,0 +1,20 @@
+Ingenic JZ4740 codec controller
+
+Required properties:
+- compatible : "ingenic,jz4740-codec"
+- reg : codec registers location and length
+- clocks : phandle to the AIC clock.
+- clock-names: must be set to "aic".
+- #sound-dai-cells: Must be set to 0.
+
+Example:
+
+codec: audio-codec@10020080 {
+ compatible = "ingenic,jz4740-codec";
+ reg = <0x10020080 0x8>;
+
+ #sound-dai-cells = <0>;
+
+ clocks = <&cgu JZ4740_CLK_AIC>;
+ clock-names = "aic";
+};
diff --git a/Documentation/devicetree/bindings/sound/mt6358.txt b/Documentation/devicetree/bindings/sound/mt6358.txt
new file mode 100644
index 000000000000..5465730013a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt6358.txt
@@ -0,0 +1,18 @@
+Mediatek MT6358 Audio Codec
+
+The communication between MT6358 and SoC is through Mediatek PMIC wrapper.
+For more detail, please visit Mediatek PMIC wrapper documentation.
+
+Must be a child node of PMIC wrapper.
+
+Required properties:
+
+- compatible : "mediatek,mt6358-sound".
+- Avdd-supply : power source of AVDD
+
+Example:
+
+mt6358_snd {
+ compatible = "mediatek,mt6358-sound";
+ Avdd-supply = <&mt6358_vaud28_reg>;
+};
diff --git a/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
new file mode 100644
index 000000000000..396ba38619f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
@@ -0,0 +1,36 @@
+Mediatek AFE PCM controller for mt8183
+
+Required properties:
+- compatible = "mediatek,mt68183-audio";
+- reg: register location and size
+- interrupts: should contain AFE interrupt
+- power-domains: should define the power domain
+- clocks: Must contain an entry for each entry in clock-names
+- clock-names: should have these clock names:
+ "infra_sys_audio_clk",
+ "mtkaif_26m_clk",
+ "top_mux_audio",
+ "top_mux_aud_intbus",
+ "top_sys_pll3_d4",
+ "top_clk26m_clk";
+
+Example:
+
+ afe: mt8183-afe-pcm@11220000 {
+ compatible = "mediatek,mt8183-audio";
+ reg = <0 0x11220000 0 0x1000>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8183_POWER_DOMAIN_AUDIO>;
+ clocks = <&infrasys CLK_INFRA_AUDIO>,
+ <&infrasys CLK_INFRA_AUDIO_26M_BCLK>,
+ <&topckgen CLK_TOP_MUX_AUDIO>,
+ <&topckgen CLK_TOP_MUX_AUD_INTBUS>,
+ <&topckgen CLK_TOP_SYSPLL_D2_D4>,
+ <&clk26m>;
+ clock-names = "infra_sys_audio_clk",
+ "mtkaif_26m_clk",
+ "top_mux_audio",
+ "top_mux_aud_intbus",
+ "top_sys_pll_d2_d4",
+ "top_clk26m_clk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/mtk-btcvsd-snd.txt b/Documentation/devicetree/bindings/sound/mtk-btcvsd-snd.txt
new file mode 100644
index 000000000000..679e44839b48
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mtk-btcvsd-snd.txt
@@ -0,0 +1,24 @@
+Mediatek ALSA BT SCO CVSD/MSBC Driver
+
+Required properties:
+- compatible = "mediatek,mtk-btcvsd-snd";
+- reg: register location and size of PKV and SRAM_BANK2
+- interrupts: should contain BTSCO interrupt
+- mediatek,infracfg: the phandles of INFRASYS
+- mediatek,offset: Array contains of register offset and mask
+ infra_misc_offset,
+ infra_conn_bt_cvsd_mask,
+ cvsd_mcu_read_offset,
+ cvsd_mcu_write_offset,
+ cvsd_packet_indicator_offset
+
+Example:
+
+ mtk-btcvsd-snd@18000000 {
+ compatible = "mediatek,mtk-btcvsd-snd";
+ reg=<0 0x18000000 0 0x1000>,
+ <0 0x18080000 0 0x8000>;
+ interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_LOW>;
+ mediatek,infracfg = <&infrasys>;
+ mediatek,offset = <0xf00 0x800 0xfd0 0xfd4 0xfd8>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
index 44d27456e8a4..21cd310963b1 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
@@ -13,6 +13,10 @@ Required properties:
See ../reset/reset.txt for details.
- reset-names : Must include the following entries: hda, hda2hdmi, hda2codec_2x
+Optional properties:
+- nvidia,model : The user-visible name of this sound complex. Since the property
+ is optional, legacy boards can use default name provided in hda driver.
+
Example:
hda@70030000 {
@@ -27,4 +31,5 @@ hda@70030000 {
<&tegra_car 128>, /* hda2hdmi */
<&tegra_car 111>; /* hda2codec_2x */
reset-names = "hda", "hda2hdmi", "hda2codec_2x";
+ nvidia,model = "jetson-tk1-hda";
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
index fdcea3d12ee5..e7d17dda55db 100644
--- a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
@@ -30,6 +30,7 @@ Required properties
- vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node.
- vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node.
- vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node.
+
Optional Properties:
- qcom,mbhc-vthreshold-low: Array of 5 threshold voltages in mV for 5 buttons
detection on headset when the mbhc is powered up
@@ -92,9 +93,9 @@ spmi_bus {
"cdc_ear_cnp_int",
"cdc_hphr_cnp_int",
"cdc_hphl_cnp_int";
- VDD-CDC-IO-supply = <&pm8916_l5>;
- VDD-CDC-TX-RX-CX-supply = <&pm8916_l5>;
- VDD-MICBIAS-supply = <&pm8916_l13>;
+ vdd-cdc-io-supply = <&pm8916_l5>;
+ vdd-cdc-tx-rx-cx-supply = <&pm8916_l5>;
+ vdd-micbias-supply = <&pm8916_l13>;
#sound-dai-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
index 1d8d49e30af7..5d6ea66a863f 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
@@ -34,12 +34,12 @@ Required properties with SLIMbus Interface:
Definition: Interrupt names of WCD INTR1 and INTR2
Should be: "intr1", "intr2"
-- reset-gpio:
+- reset-gpios:
Usage: required
Value type: <String Array>
Definition: Reset gpio line
-- qcom,ifd:
+- slim-ifc-dev:
Usage: required
Value type: <phandle>
Definition: SLIM interface device
@@ -104,13 +104,13 @@ Required properties with SLIMbus Interface:
Value type: <u32>
Definition: Must be 1
-codec@1{
+audio-codec@1{
compatible = "slim217,1a0";
reg = <1 0>;
interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "intr2"
- reset-gpio = <&msmgpio 64 0>;
- qcom,ifd = <&wc9335_ifd>;
+ reset-gpios = <&msmgpio 64 0>;
+ slim-ifc-dev = <&wc9335_ifd>;
clock-names = "mclk", "native";
clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
<&rpmcc RPM_SMD_BB_CLK1>;
diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt b/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt
new file mode 100644
index 000000000000..2469588c7ccb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt
@@ -0,0 +1,23 @@
+* Rockchip Rk3328 internal codec
+
+Required properties:
+
+- compatible: "rockchip,rk3328-codec"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- rockchip,grf: the phandle of the syscon node for GRF register.
+- clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names.
+- clock-names: should be "pclk".
+- spk-depop-time-ms: speak depop time msec.
+
+Example for rk3328 internal codec:
+
+codec: codec@ff410000 {
+ compatible = "rockchip,rk3328-codec";
+ reg = <0x0 0xff410000 0x0 0x1000>;
+ rockchip,grf = <&grf>;
+ clocks = <&cru PCLK_ACODEC>;
+ clock-names = "pclk";
+ spk-depop-time-ms = 100;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 9c58f724396a..9d9ff5184939 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -37,6 +37,15 @@ VDDIO 1.8V 2.5V 3.3V
2 = 3.33 mA 5.74 mA 8.03 mA
3 = 4.99 mA 8.61 mA 12.05 mA
+- sclk-strength: the SCLK pad strength. Possible values are:
+0, 1, 2 and 3 as per the table below:
+
+VDDIO 1.8V 2.5V 3.3V
+0 = Disable
+1 = 1.66 mA 2.87 mA 4.02 mA
+2 = 3.33 mA 5.74 mA 8.03 mA
+3 = 4.99 mA 8.61 mA 12.05 mA
+
Example:
sgtl5000: codec@a {
diff --git a/Documentation/devicetree/bindings/sound/simple-scu-card.txt b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
deleted file mode 100644
index 3a2f71616cda..000000000000
--- a/Documentation/devicetree/bindings/sound/simple-scu-card.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-ASoC Simple SCU Sound Card
-
-Simple SCU Sound Card is "Simple Sound Card" + "ALSA DPCM".
-For example, you can use this driver if you want to exchange sampling rate convert,
-Mixing, etc...
-
-Required properties:
-
-- compatible : "simple-scu-audio-card"
- "renesas,rsrc-card"
-Optional properties:
-
-- simple-audio-card,name : see simple-audio-card.txt
-- simple-audio-card,cpu : see simple-audio-card.txt
-- simple-audio-card,codec : see simple-audio-card.txt
-
-Optional subnode properties:
-
-- simple-audio-card,format : see simple-audio-card.txt
-- simple-audio-card,frame-master : see simple-audio-card.txt
-- simple-audio-card,bitclock-master : see simple-audio-card.txt
-- simple-audio-card,bitclock-inversion : see simple-audio-card.txt
-- simple-audio-card,frame-inversion : see simple-audio-card.txt
-- simple-audio-card,convert-rate : platform specified sampling rate convert
-- simple-audio-card,convert-channels : platform specified converted channel size (2 - 8 ch)
-- simple-audio-card,prefix : see routing
-- simple-audio-card,widgets : Please refer to widgets.txt.
-- simple-audio-card,routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources.
- use audio-prefix if some components is using same sink/sources naming.
- it can be used if compatible was "renesas,rsrc-card";
-
-Required CPU/CODEC subnodes properties:
-
-- sound-dai : see simple-audio-card.txt
-
-Optional CPU/CODEC subnodes properties:
-
-- clocks / system-clock-frequency : see simple-audio-card.txt
-
-Example 1. Sampling Rate Conversion
-
-sound {
- compatible = "simple-scu-audio-card";
-
- simple-audio-card,name = "rsnd-ak4643";
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&sndcodec>;
- simple-audio-card,frame-master = <&sndcodec>;
-
- simple-audio-card,convert-rate = <48000>;
-
- simple-audio-card,prefix = "ak4642";
- simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
- "DAI0 Capture", "ak4642 Capture";
-
- sndcpu: simple-audio-card,cpu {
- sound-dai = <&rcar_sound>;
- };
-
- sndcodec: simple-audio-card,codec {
- sound-dai = <&ak4643>;
- system-clock-frequency = <11289600>;
- };
-};
-
-Example 2. 2 CPU 1 Codec (Mixing)
-
-sound {
- compatible = "simple-scu-audio-card";
-
- simple-audio-card,name = "rsnd-ak4643";
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&dpcmcpu>;
- simple-audio-card,frame-master = <&dpcmcpu>;
-
- simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
- "ak4642 Playback", "DAI1 Playback";
-
- dpcmcpu: cpu@0 {
- sound-dai = <&rcar_sound 0>;
- };
-
- cpu@1 {
- sound-dai = <&rcar_sound 1>;
- };
-
- codec {
- prefix = "ak4642";
- sound-dai = <&ak4643>;
- clocks = <&audio_clock>;
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/sprd-pcm.txt b/Documentation/devicetree/bindings/sound/sprd-pcm.txt
new file mode 100644
index 000000000000..4b23e84b2e57
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sprd-pcm.txt
@@ -0,0 +1,23 @@
+* Spreadtrum DMA platfrom bindings
+
+Required properties:
+- compatible: Should be "sprd,pcm-platform".
+- dmas: Specify the list of DMA controller phandle and DMA request line ordered pairs.
+- dma-names: Identifier string for each DMA request line in the dmas property.
+ These strings correspond 1:1 with the ordered pairs in dmas.
+
+Example:
+
+ audio_platform:platform@0 {
+ compatible = "sprd,pcm-platform";
+ dmas = <&agcp_dma 1 1>, <&agcp_dma 2 2>,
+ <&agcp_dma 3 3>, <&agcp_dma 4 4>,
+ <&agcp_dma 5 5>, <&agcp_dma 6 6>,
+ <&agcp_dma 7 7>, <&agcp_dma 8 8>,
+ <&agcp_dma 9 9>, <&agcp_dma 10 10>;
+ dma-names = "normal_p_l", "normal_p_r",
+ "normal_c_l", "normal_c_r",
+ "voice_c", "fast_p",
+ "loop_c", "loop_p",
+ "voip_c", "voip_p";
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
new file mode 100644
index 000000000000..cbc93c8f4963
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
@@ -0,0 +1,29 @@
+Device-Tree bindings for Xilinx PL audio formatter
+
+The IP core supports DMA, data formatting(AES<->PCM conversion)
+of audio samples.
+
+Required properties:
+ - compatible: "xlnx,audio-formatter-1.0"
+ - interrupt-names: Names specified to list of interrupts in same
+ order mentioned under "interrupts".
+ List of supported interrupt names are:
+ "irq_mm2s" : interrupt from MM2S block
+ "irq_s2mm" : interrupt from S2MM block
+ - interrupts-parent: Phandle for interrupt controller.
+ - interrupts: List of Interrupt numbers.
+ - reg: Base address and size of the IP core instance.
+ - clock-names: List of input clocks.
+ Required elements: "s_axi_lite_aclk", "aud_mclk"
+ - clocks: Input clock specifier. Refer to common clock bindings.
+
+Example:
+ audio_ss_0_audio_formatter_0: audio_formatter@80010000 {
+ compatible = "xlnx,audio-formatter-1.0";
+ interrupt-names = "irq_mm2s", "irq_s2mm";
+ interrupt-parent = <&gic>;
+ interrupts = <0 104 4>, <0 105 4>;
+ reg = <0x0 0x80010000 0x0 0x1000>;
+ clock-names = "s_axi_lite_aclk", "aud_mclk";
+ clocks = <&clk 71>, <&clk_wiz_1 0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
new file mode 100644
index 000000000000..15c2d64d247c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
@@ -0,0 +1,28 @@
+Device-Tree bindings for Xilinx SPDIF IP
+
+The IP supports playback and capture of SPDIF audio
+
+Required properties:
+ - compatible: "xlnx,spdif-2.0"
+ - clock-names: List of input clocks.
+ Required elements: "s_axi_aclk", "aud_clk_i"
+ - clocks: Input clock specifier. Refer to common clock bindings.
+ - reg: Base address and address length of the IP core instance.
+ - interrupts-parent: Phandle for interrupt controller.
+ - interrupts: List of Interrupt numbers.
+ - xlnx,spdif-mode: 0 :- receiver mode
+ 1 :- transmitter mode
+ - xlnx,aud_clk_i: input audio clock value.
+
+Example:
+ spdif_0: spdif@80010000 {
+ clock-names = "aud_clk_i", "s_axi_aclk";
+ clocks = <&misc_clk_0>, <&clk 71>;
+ compatible = "xlnx,spdif-2.0";
+ interrupt-names = "spdif_interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 91 4>;
+ reg = <0x0 0x80010000 0x0 0x10000>;
+ xlnx,spdif-mode = <1>;
+ xlnx,aud_clk_i = <49152913>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/atmel-quadspi.txt b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt
index b93c1e2f25dd..7c40ea694352 100644
--- a/Documentation/devicetree/bindings/spi/atmel-quadspi.txt
+++ b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt
@@ -1,14 +1,19 @@
* Atmel Quad Serial Peripheral Interface (QSPI)
Required properties:
-- compatible: Should be "atmel,sama5d2-qspi".
+- compatible: Should be one of the following:
+ - "atmel,sama5d2-qspi"
+ - "microchip,sam9x60-qspi"
- reg: Should contain the locations and lengths of the base registers
and the mapped memory.
- reg-names: Should contain the resource reg names:
- qspi_base: configuration register address space
- qspi_mmap: memory mapped address space
- interrupts: Should contain the interrupt for the device.
-- clocks: The phandle of the clock needed by the QSPI controller.
+- clocks: Should reference the peripheral clock and the QSPI system
+ clock if available.
+- clock-names: Should contain "pclk" for the peripheral clock and "qspick"
+ for the system clock when available.
- #address-cells: Should be <1>.
- #size-cells: Should be <0>.
@@ -19,7 +24,8 @@ spi@f0020000 {
reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>;
reg-names = "qspi_base", "qspi_mmap";
interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&spi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 52>;
+ clock-names = "pclk";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
index e3c48b20b1a6..2d3264140cc5 100644
--- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
@@ -10,6 +10,7 @@ Required properties:
- "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35
- "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51
- "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
+ - "fsl,imx8mq-ecspi" for SPI compatible with the one integrated on i.MX8M
- reg : Offset and length of the register set for the device
- interrupts : Should contain CSPI/eCSPI interrupt
- clocks : Clock specifiers for both ipg and per clocks.
diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt
index 483e9cfac1b1..e8f1d627d288 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt
@@ -14,15 +14,13 @@ Required properties:
- clocks : The clocks needed by the QuadSPI controller
- clock-names : Should contain the name of the clocks: "qspi_en" and "qspi".
-Optional properties:
- - fsl,qspi-has-second-chip: The controller has two buses, bus A and bus B.
- Each bus can be connected with two NOR flashes.
- Most of the time, each bus only has one NOR flash
- connected, this is the default case.
- But if there are two NOR flashes connected to the
- bus, you should enable this property.
- (Please check the board's schematic.)
- - big-endian : That means the IP register is big endian
+Required SPI slave node properties:
+ - reg: There are two buses (A and B) with two chip selects each.
+ This encodes to which bus and CS the flash is connected:
+ <0>: Bus A, CS 0
+ <1>: Bus A, CS 1
+ <2>: Bus B, CS 0
+ <3>: Bus B, CS 1
Example:
@@ -40,7 +38,7 @@ qspi0: quadspi@40044000 {
};
};
-Example showing the usage of two SPI NOR devices:
+Example showing the usage of two SPI NOR devices on bus A:
&qspi2 {
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
new file mode 100644
index 000000000000..2cd67eb727d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
@@ -0,0 +1,39 @@
+* NXP Flex Serial Peripheral Interface (FSPI)
+
+Required properties:
+ - compatible : Should be "nxp,lx2160a-fspi"
+ - reg : First contains the register location and length,
+ Second contains the memory mapping address and length
+ - reg-names : Should contain the resource reg names:
+ - fspi_base: configuration register address space
+ - fspi_mmap: memory mapped address space
+ - interrupts : Should contain the interrupt for the device
+
+Required SPI slave node properties:
+ - reg : There are two buses (A and B) with two chip selects each.
+ This encodes to which bus and CS the flash is connected:
+ - <0>: Bus A, CS 0
+ - <1>: Bus A, CS 1
+ - <2>: Bus B, CS 0
+ - <3>: Bus B, CS 1
+
+Example showing the usage of two SPI NOR slave devices on bus A:
+
+fspi0: spi@20c0000 {
+ compatible = "nxp,lx2160a-fspi";
+ reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>;
+ reg-names = "fspi_base", "fspi_mmap";
+ interrupts = <0 25 0x4>; /* Level high type */
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "fspi_en", "fspi";
+
+ mt35xu512aba0: flash@0 {
+ reg = <0>;
+ ....
+ };
+
+ mt35xu512aba1: flash@1 {
+ reg = <1>;
+ ....
+ };
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-sifive.txt b/Documentation/devicetree/bindings/spi/spi-sifive.txt
new file mode 100644
index 000000000000..3f5c6e438972
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sifive.txt
@@ -0,0 +1,37 @@
+SiFive SPI controller Device Tree Bindings
+------------------------------------------
+
+Required properties:
+- compatible : Should be "sifive,<chip>-spi" and "sifive,spi<version>".
+ Supported compatible strings are:
+ "sifive,fu540-c000-spi" for the SiFive SPI v0 as integrated
+ onto the SiFive FU540 chip, and "sifive,spi0" for the SiFive
+ SPI v0 IP block with no chip integration tweaks.
+ Please refer to sifive-blocks-ip-versioning.txt for details
+- reg : Physical base address and size of SPI registers map
+ A second (optional) range can indicate memory mapped flash
+- interrupts : Must contain one entry
+- interrupt-parent : Must be core interrupt controller
+- clocks : Must reference the frequency given to the controller
+- #address-cells : Must be '1', indicating which CS to use
+- #size-cells : Must be '0'
+
+Optional properties:
+- sifive,fifo-depth : Depth of hardware queues; defaults to 8
+- sifive,max-bits-per-word : Maximum bits per word; defaults to 8
+
+SPI RTL that corresponds to the IP block version numbers can be found here:
+https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/spi
+
+Example:
+ spi: spi@10040000 {
+ compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10040000 0x0 0x1000 0x0 0x20000000 0x0 0x10000000>;
+ interrupt-parent = <&plic>;
+ interrupts = <51>;
+ clocks = <&tlclk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ sifive,fifo-depth = <8>;
+ sifive,max-bits-per-word = <8>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-sprd.txt b/Documentation/devicetree/bindings/spi/spi-sprd.txt
index bad211a19da4..3c7eacce0ee3 100644
--- a/Documentation/devicetree/bindings/spi/spi-sprd.txt
+++ b/Documentation/devicetree/bindings/spi/spi-sprd.txt
@@ -14,6 +14,11 @@ Required properties:
address on the SPI bus. Should be set to 1.
- #size-cells: Should be set to 0.
+Optional properties:
+dma-names: Should contain names of the SPI used DMA channel.
+dmas: Should contain DMA channels and DMA slave ids which the SPI used
+ sorted in the same order as the dma-names property.
+
Example:
spi0: spi@70a00000{
compatible = "sprd,sc9860-spi";
@@ -21,6 +26,8 @@ spi0: spi@70a00000{
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "spi", "source","enable";
clocks = <&clk_spi0>, <&ext_26m>, <&clk_ap_apb_gates 5>;
+ dma-names = "rx_chn", "tx_chn";
+ dmas = <&apdma 11 11>, <&apdma 12 12>;
#address-cells = <1>;
#size-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/spi/spi-stm32.txt b/Documentation/devicetree/bindings/spi/spi-stm32.txt
index 1b3fa2c119d5..d82755c63eaf 100644
--- a/Documentation/devicetree/bindings/spi/spi-stm32.txt
+++ b/Documentation/devicetree/bindings/spi/spi-stm32.txt
@@ -7,7 +7,9 @@ from 4 to 32-bit data size. Although it can be configured as master or slave,
only master is supported by the driver.
Required properties:
-- compatible: Must be "st,stm32h7-spi".
+- compatible: Should be one of:
+ "st,stm32h7-spi"
+ "st,stm32f4-spi"
- reg: Offset and length of the device's register set.
- interrupts: Must contain the interrupt id.
- clocks: Must contain an entry for spiclk (which feeds the internal clock
@@ -30,8 +32,9 @@ Child nodes represent devices on the SPI bus
See ../spi/spi-bus.txt
Optional properties:
-- st,spi-midi-ns: (Master Inter-Data Idleness) minimum time delay in
- nanoseconds inserted between two consecutive data frames.
+- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time
+ delay in nanoseconds inserted between two consecutive data
+ frames.
Example:
diff --git a/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt b/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt
new file mode 100644
index 000000000000..194f6a3c1c1e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt
@@ -0,0 +1,24 @@
+Milbeaut SRAM for smp bringup
+
+Milbeaut SoCs use a part of the sram for the bringup of the secondary cores.
+Once they get powered up in the bootloader, they stay at the specific part
+of the sram.
+Therefore the part needs to be added as the sub-node of mmio-sram.
+
+Required sub-node properties:
+- compatible : should be "socionext,milbeaut-smp-sram"
+
+Example:
+
+ sram: sram@0 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x10000>;
+
+ smp-sram@f100 {
+ compatible = "socionext,milbeaut-smp-sram";
+ reg = <0xf100 0x20>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sram/sunxi-sram.txt b/Documentation/devicetree/bindings/sram/sunxi-sram.txt
index ab5a70bb9a64..380246a805f2 100644
--- a/Documentation/devicetree/bindings/sram/sunxi-sram.txt
+++ b/Documentation/devicetree/bindings/sram/sunxi-sram.txt
@@ -63,6 +63,7 @@ The valid sections compatible for H5 are:
The valid sections compatible for H6 are:
- allwinner,sun50i-h6-sram-c, allwinner,sun50i-a64-sram-c
+ - allwinner,sun50i-h6-sram-c1, allwinner,sun4i-a10-sram-c1
The valid sections compatible for F1C100s are:
- allwinner,suniv-f1c100s-sram-d, allwinner,sun4i-a10-sram-d
diff --git a/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt b/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt
index 9809b11f7180..5d8fd5b52598 100644
--- a/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt
+++ b/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt
@@ -2,17 +2,44 @@ Freescale i.MX General Purpose Timer (GPT)
Required properties:
-- compatible : should be "fsl,<soc>-gpt"
-- reg : Specifies base physical address and size of the registers.
-- interrupts : A list of 4 interrupts; one per timer channel.
-- clocks : The clocks provided by the SoC to drive the timer.
+- compatible : should be one of following:
+ for i.MX1:
+ - "fsl,imx1-gpt";
+ for i.MX21:
+ - "fsl,imx21-gpt";
+ for i.MX27:
+ - "fsl,imx27-gpt", "fsl,imx21-gpt";
+ for i.MX31:
+ - "fsl,imx31-gpt";
+ for i.MX25:
+ - "fsl,imx25-gpt", "fsl,imx31-gpt";
+ for i.MX50:
+ - "fsl,imx50-gpt", "fsl,imx31-gpt";
+ for i.MX51:
+ - "fsl,imx51-gpt", "fsl,imx31-gpt";
+ for i.MX53:
+ - "fsl,imx53-gpt", "fsl,imx31-gpt";
+ for i.MX6Q:
+ - "fsl,imx6q-gpt", "fsl,imx31-gpt";
+ for i.MX6DL:
+ - "fsl,imx6dl-gpt";
+ for i.MX6SL:
+ - "fsl,imx6sl-gpt", "fsl,imx6dl-gpt";
+ for i.MX6SX:
+ - "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
+- reg : specifies base physical address and size of the registers.
+- interrupts : should be the gpt interrupt.
+- clocks : the clocks provided by the SoC to drive the timer, must contain
+ an entry for each entry in clock-names.
+- clock-names : must include "ipg" entry first, then "per" entry.
Example:
gpt1: timer@10003000 {
- compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
reg = <0x10003000 0x1000>;
interrupts = <26>;
- clocks = <&clks 46>, <&clks 61>;
+ clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>,
+ <&clks IMX27_CLK_PER1_GATE>;
clock-names = "ipg", "per";
};
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
index 18d4d0166c76..ff7c567a7972 100644
--- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -1,7 +1,7 @@
-Mediatek Timers
+MediaTek Timers
---------------
-Mediatek SoCs have two different timers on different platforms,
+MediaTek SoCs have two different timers on different platforms,
- GPT (General Purpose Timer)
- SYST (System Timer)
@@ -9,6 +9,7 @@ The proper timer will be selected automatically by driver.
Required properties:
- compatible should contain:
+ For those SoCs that use GPT
* "mediatek,mt2701-timer" for MT2701 compatible timers (GPT)
* "mediatek,mt6580-timer" for MT6580 compatible timers (GPT)
* "mediatek,mt6589-timer" for MT6589 compatible timers (GPT)
@@ -17,7 +18,11 @@ Required properties:
* "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
* "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
* "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
- * "mediatek,mt6765-timer" for MT6765 compatible timers (SYST)
+
+ For those SoCs that use SYST
+ * "mediatek,mt7629-timer" for MT7629 compatible timers (SYST)
+ * "mediatek,mt6765-timer" for MT6765 and all above compatible timers (SYST)
+
- reg: Should contain location and length for timer register.
- clocks: Should contain system clock.
diff --git a/Documentation/devicetree/bindings/timer/nvidia,tegra210-timer.txt b/Documentation/devicetree/bindings/timer/nvidia,tegra210-timer.txt
new file mode 100644
index 000000000000..032cda96fe0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nvidia,tegra210-timer.txt
@@ -0,0 +1,36 @@
+NVIDIA Tegra210 timer
+
+The Tegra210 timer provides fourteen 29-bit timer counters and one 32-bit
+timestamp counter. The TMRs run at either a fixed 1 MHz clock rate derived
+from the oscillator clock (TMR0-TMR9) or directly at the oscillator clock
+(TMR10-TMR13). Each TMR can be programmed to generate one-shot, periodic,
+or watchdog interrupts.
+
+Required properties:
+- compatible : "nvidia,tegra210-timer".
+- reg : Specifies base physical address and size of the registers.
+- interrupts : A list of 14 interrupts; one per each timer channels 0 through
+ 13.
+- clocks : Must contain one entry, for the module clock.
+ See ../clocks/clock-bindings.txt for details.
+
+timer@60005000 {
+ compatible = "nvidia,tegra210-timer";
+ reg = <0x0 0x60005000 0x0 0x400>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_TIMER>;
+ clock-names = "timer";
+};
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
index 862a80f0380a..c0594450e9ef 100644
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
@@ -32,6 +32,8 @@ Required Properties:
- "renesas,r8a77470-cmt1" for the 48-bit CMT1 device included in r8a77470.
- "renesas,r8a774a1-cmt0" for the 32-bit CMT0 device included in r8a774a1.
- "renesas,r8a774a1-cmt1" for the 48-bit CMT1 device included in r8a774a1.
+ - "renesas,r8a774c0-cmt0" for the 32-bit CMT0 device included in r8a774c0.
+ - "renesas,r8a774c0-cmt1" for the 48-bit CMT1 device included in r8a774c0.
- "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790.
- "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790.
- "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791.
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.txt b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
index 4ddff85837da..13ad07416bdd 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
@@ -10,6 +10,7 @@ Required Properties:
- compatible: must contain one or more of the following:
- "renesas,tmu-r8a7740" for the r8a7740 TMU
+ - "renesas,tmu-r8a774c0" for the r8a774C0 TMU
- "renesas,tmu-r8a7778" for the r8a7778 TMU
- "renesas,tmu-r8a7779" for the r8a7779 TMU
- "renesas,tmu-r8a77970" for the r8a77970 TMU
diff --git a/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt b/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt
new file mode 100644
index 000000000000..ac44c4b67530
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt
@@ -0,0 +1,17 @@
+Milbeaut SoCs Timer Controller
+
+Required properties:
+
+- compatible : should be "socionext,milbeaut-timer".
+- reg : Specifies base physical address and size of the registers.
+- interrupts : The interrupt of the first timer.
+- clocks: phandle to the input clk.
+
+Example:
+
+timer {
+ compatible = "socionext,milbeaut-timer";
+ reg = <0x1e000050 0x20>
+ interrupts = <0 91 4>;
+ clocks = <&clk 4>;
+};
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index cc64ec63a6ad..d79fb22bde39 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -322,6 +322,8 @@ properties:
- ti,ads7830
# Temperature Monitoring and Fan Control
- ti,amc6821
+ # Temperature sensor with integrated fan control
+ - ti,lm96000
# I2C Touch-Screen Controller
- ti,tsc2003
# Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index adae82385dd6..a254386a91ad 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -93,6 +93,7 @@ i.mx specific properties
- over-current-active-low: over current signal polarity is active low.
- over-current-active-high: over current signal polarity is active high.
It's recommended to specify the over current polarity.
+- power-active-high: power signal polarity is active high
- external-vbus-divider: enables off-chip resistor divider for Vbus
Example:
diff --git a/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt b/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt
new file mode 100644
index 000000000000..620355cee63f
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt
@@ -0,0 +1,24 @@
+Ingenic JZ4740 MUSB driver
+
+Required properties:
+
+- compatible: Must be "ingenic,jz4740-musb"
+- reg: Address range of the UDC register set
+- interrupts: IRQ number related to the UDC hardware
+- interrupt-names: must be "mc"
+- clocks: phandle to the "udc" clock
+- clock-names: must be "udc"
+
+Example:
+
+udc: usb@13040000 {
+ compatible = "ingenic,jz4740-musb";
+ reg = <0x13040000 0x10000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+ interrupt-names = "mc";
+
+ clocks = <&cgu JZ4740_CLK_UDC>;
+ clock-names = "udc";
+};
diff --git a/Documentation/devicetree/bindings/usb/keystone-usb.txt b/Documentation/devicetree/bindings/usb/keystone-usb.txt
index f96e09f784cc..77df82e36138 100644
--- a/Documentation/devicetree/bindings/usb/keystone-usb.txt
+++ b/Documentation/devicetree/bindings/usb/keystone-usb.txt
@@ -3,7 +3,9 @@ TI Keystone Soc USB Controller
DWC3 GLUE
Required properties:
- - compatible: should be "ti,keystone-dwc3".
+ - compatible: should be
+ "ti,keystone-dwc3" for Keystone 2 SoCs
+ "ti,am654-dwc3" for AM654 SoC
- #address-cells, #size-cells : should be '1' if the device has sub-nodes
with 'reg' property.
- reg : Address and length of the register set for the USB subsystem on
@@ -21,7 +23,7 @@ SoCs only:
- clock-names: Must be "usb".
-The following are mandatory properties for Keystone 2 66AK2G SoCs only:
+The following are mandatory properties for 66AK2G and AM654:
- power-domains: Should contain a phandle to a PM domain provider node
and an args specifier containing the USB device id
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt b/Documentation/devicetree/bindings/usb/qcom,dwc3.txt
index 95afdcf3c337..cb695aa3fba4 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: Compatible list, contains
"qcom,dwc3"
"qcom,msm8996-dwc3" for msm8996 SOC.
+ "qcom,msm8998-dwc3" for msm8998 SOC.
"qcom,sdm845-dwc3" for sdm845 SOC.
- reg: Offset and length of register set for QSCRATCH wrapper
- power-domains: specifies a phandle to PM domain provider node
diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
index d366555166d0..35039e720515 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
@@ -3,6 +3,7 @@ Renesas Electronics USB3.0 Peripheral driver
Required properties:
- compatible: Must contain one of the following:
- "renesas,r8a774a1-usb3-peri"
+ - "renesas,r8a774c0-usb3-peri"
- "renesas,r8a7795-usb3-peri"
- "renesas,r8a7796-usb3-peri"
- "renesas,r8a77965-usb3-peri"
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index 90719f501852..d93b6a1504f2 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -7,6 +7,7 @@ Required properties:
- "renesas,usbhs-r8a7744" for r8a7744 (RZ/G1N) compatible device
- "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device
- "renesas,usbhs-r8a774a1" for r8a774a1 (RZ/G2M) compatible device
+ - "renesas,usbhs-r8a774c0" for r8a774c0 (RZ/G2E) compatible device
- "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device
- "renesas,usbhs-r8a7791" for r8a7791 (R-Car M2-W) compatible device
- "renesas,usbhs-r8a7792" for r8a7792 (R-Car V2H) compatible device
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index 168ff819e827..17915f64b8ee 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -64,6 +64,8 @@ Optional properties :
- power-on-time-ms : Specifies the time it takes from the time the host
initiates the power-on sequence to a port until the port has adequate
power. The value is given in ms in a 0 - 510 range (default is 100ms).
+ - swap-dx-lanes : Specifies the ports which will swap the differential-pair
+ (D+/D-), default is not-swapped.
Examples:
usb2512b@2c {
@@ -81,4 +83,6 @@ Examples:
manufacturer = "Foo";
product = "Foo-Bar";
serial = "1234567890A";
+ /* correct misplaced usb connectors on port 1,2 */
+ swap-dx-lanes = <1 2>;
};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 389508584f48..e604e7f73629 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -24,6 +24,7 @@ amarula Amarula Solutions
amazon Amazon.com, Inc.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
amd Advanced Micro Devices (AMD), Inc.
+amediatech Shenzhen Amediatech Technology Co., Ltd
amlogic Amlogic, Inc.
ampire Ampire Co., Ltd.
ams AMS AG
@@ -65,6 +66,7 @@ bticino Bticino International
calxeda Calxeda
capella Capella Microsystems, Inc
cascoda Cascoda, Ltd.
+catalyst Catalyst Semiconductor, Inc.
cavium Cavium, Inc.
cdns Cadence Design Systems Inc.
cdtech CDTech(H.K.) Electronics Limited
@@ -108,11 +110,13 @@ dongwoon Dongwoon Anatech
dptechnics DPTechnics
dragino Dragino Technology Co., Limited
ea Embedded Artists AB
+ebs-systart EBS-SYSTART GmbH
ebv EBV Elektronik
eckelmann Eckelmann AG
edt Emerging Display Technologies
eeti eGalax_eMPIA Technology Inc
elan Elan Microelectronic Corp.
+elgin Elgin S/A.
embest Shenzhen Embest Technology Co., Ltd.
emlid Emlid, Ltd.
emmicro EM Microelectronic
@@ -150,6 +154,7 @@ geniatech Geniatech, Inc.
giantec Giantec Semiconductor, Inc.
giantplus Giantplus Technology Co., Ltd.
globalscale Globalscale Technologies, Inc.
+globaltop GlobalTop Technology, Inc.
gmt Global Mixed-mode Technology, Inc.
goodix Shenzhen Huiding Technology Co., Ltd.
google Google, Inc.
@@ -273,6 +278,7 @@ nintendo Nintendo
nlt NLT Technologies, Ltd.
nokia Nokia
nordic Nordic Semiconductor
+novtech NovTech, Inc.
nutsboard NutsBoard
nuvoton Nuvoton Technology Corporation
nvd New Vision Display
@@ -304,6 +310,7 @@ phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
pine64 Pine64
pixcir PIXCIR MICROELECTRONICS Co., Ltd
+plantower Plantower Co., Ltd
plathome Plat'Home Co., Ltd.
plda PLDA
plx Broadcom Corporation (formerly PLX Technology)
diff --git a/Documentation/driver-api/80211/mac80211.rst b/Documentation/driver-api/80211/mac80211.rst
index 85a8335e80b6..eab40bcf3987 100644
--- a/Documentation/driver-api/80211/mac80211.rst
+++ b/Documentation/driver-api/80211/mac80211.rst
@@ -126,6 +126,9 @@ functions/definitions
:functions: ieee80211_rx_status
.. kernel-doc:: include/net/mac80211.h
+ :functions: mac80211_rx_encoding_flags
+
+.. kernel-doc:: include/net/mac80211.h
:functions: mac80211_rx_flags
.. kernel-doc:: include/net/mac80211.h
diff --git a/Documentation/driver-api/component.rst b/Documentation/driver-api/component.rst
new file mode 100644
index 000000000000..2da4a8f20607
--- /dev/null
+++ b/Documentation/driver-api/component.rst
@@ -0,0 +1,17 @@
+======================================
+Component Helper for Aggregate Drivers
+======================================
+
+.. kernel-doc:: drivers/base/component.c
+ :doc: overview
+
+
+API
+===
+
+.. kernel-doc:: include/linux/component.h
+ :internal:
+
+.. kernel-doc:: drivers/base/component.c
+ :export:
+
diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst
index d6763272e747..ae1e3d0394b0 100644
--- a/Documentation/driver-api/device_link.rst
+++ b/Documentation/driver-api/device_link.rst
@@ -1,6 +1,9 @@
.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>`
.. |struct generic_pm_domain| replace:: :c:type:`struct generic_pm_domain <generic_pm_domain>`
+
+.. _device_link:
+
============
Device links
============
@@ -25,8 +28,8 @@ suspend/resume and shutdown ordering.
Device links allow representation of such dependencies in the driver core.
-In its standard form, a device link combines *both* dependency types:
-It guarantees correct suspend/resume and shutdown ordering between a
+In its standard or *managed* form, a device link combines *both* dependency
+types: It guarantees correct suspend/resume and shutdown ordering between a
"supplier" device and its "consumer" devices, and it guarantees driver
presence on the supplier. The consumer devices are not probed before the
supplier is bound to a driver, and they're unbound before the supplier
@@ -59,18 +62,24 @@ device ``->probe`` callback or a boot-time PCI quirk.
Another example for an inconsistent state would be a device link that
represents a driver presence dependency, yet is added from the consumer's
-``->probe`` callback while the supplier hasn't probed yet: Had the driver
-core known about the device link earlier, it wouldn't have probed the
+``->probe`` callback while the supplier hasn't started to probe yet: Had the
+driver core known about the device link earlier, it wouldn't have probed the
consumer in the first place. The onus is thus on the consumer to check
presence of the supplier after adding the link, and defer probing on
-non-presence.
-
-If a device link is added in the ``->probe`` callback of the supplier or
-consumer driver, it is typically deleted in its ``->remove`` callback for
-symmetry. That way, if the driver is compiled as a module, the device
-link is added on module load and orderly deleted on unload. The same
-restrictions that apply to device link addition (e.g. exclusion of a
-parallel suspend/resume transition) apply equally to deletion.
+non-presence. [Note that it is valid to create a link from the consumer's
+``->probe`` callback while the supplier is still probing, but the consumer must
+know that the supplier is functional already at the link creation time (that is
+the case, for instance, if the consumer has just acquired some resources that
+would not have been available had the supplier not been functional then).]
+
+If a device link with ``DL_FLAG_STATELESS`` set (i.e. a stateless device link)
+is added in the ``->probe`` callback of the supplier or consumer driver, it is
+typically deleted in its ``->remove`` callback for symmetry. That way, if the
+driver is compiled as a module, the device link is added on module load and
+orderly deleted on unload. The same restrictions that apply to device link
+addition (e.g. exclusion of a parallel suspend/resume transition) apply equally
+to deletion. Device links with ``DL_FLAG_STATELESS`` unset (i.e. managed
+device links) are deleted automatically by the driver core.
Several flags may be specified on device link addition, two of which
have already been mentioned above: ``DL_FLAG_STATELESS`` to express that no
@@ -80,25 +89,55 @@ integration is desired.
Two other flags are specifically targeted at use cases where the device
link is added from the consumer's ``->probe`` callback: ``DL_FLAG_RPM_ACTIVE``
-can be specified to runtime resume the supplier upon addition of the
-device link. ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be
-automatically purged when the consumer fails to probe or later unbinds.
-This obviates the need to explicitly delete the link in the ``->remove``
-callback or in the error path of the ``->probe`` callback.
+can be specified to runtime resume the supplier and prevent it from suspending
+before the consumer is runtime suspended. ``DL_FLAG_AUTOREMOVE_CONSUMER``
+causes the device link to be automatically purged when the consumer fails to
+probe or later unbinds.
Similarly, when the device link is added from supplier's ``->probe`` callback,
``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically
purged when the supplier fails to probe or later unbinds.
+If neither ``DL_FLAG_AUTOREMOVE_CONSUMER`` nor ``DL_FLAG_AUTOREMOVE_SUPPLIER``
+is set, ``DL_FLAG_AUTOPROBE_CONSUMER`` can be used to request the driver core
+to probe for a driver for the consumer driver on the link automatically after
+a driver has been bound to the supplier device.
+
+Note, however, that any combinations of ``DL_FLAG_AUTOREMOVE_CONSUMER``,
+``DL_FLAG_AUTOREMOVE_SUPPLIER`` or ``DL_FLAG_AUTOPROBE_CONSUMER`` with
+``DL_FLAG_STATELESS`` are invalid and cannot be used.
+
Limitations
===========
-Driver authors should be aware that a driver presence dependency (i.e. when
-``DL_FLAG_STATELESS`` is not specified on link addition) may cause probing of
-the consumer to be deferred indefinitely. This can become a problem if the
-consumer is required to probe before a certain initcall level is reached.
-Worse, if the supplier driver is blacklisted or missing, the consumer will
-never be probed.
+Driver authors should be aware that a driver presence dependency for managed
+device links (i.e. when ``DL_FLAG_STATELESS`` is not specified on link addition)
+may cause probing of the consumer to be deferred indefinitely. This can become
+a problem if the consumer is required to probe before a certain initcall level
+is reached. Worse, if the supplier driver is blacklisted or missing, the
+consumer will never be probed.
+
+Moreover, managed device links cannot be deleted directly. They are deleted
+by the driver core when they are not necessary any more in accordance with the
+``DL_FLAG_AUTOREMOVE_CONSUMER`` and ``DL_FLAG_AUTOREMOVE_SUPPLIER`` flags.
+However, stateless device links (i.e. device links with ``DL_FLAG_STATELESS``
+set) are expected to be removed by whoever called :c:func:`device_link_add()`
+to add them with the help of either :c:func:`device_link_del()` or
+:c:func:`device_link_remove()`.
+
+Passing ``DL_FLAG_RPM_ACTIVE`` along with ``DL_FLAG_STATELESS`` to
+:c:func:`device_link_add()` may cause the PM-runtime usage counter of the
+supplier device to remain nonzero after a subsequent invocation of either
+:c:func:`device_link_del()` or :c:func:`device_link_remove()` to remove the
+device link returned by it. This happens if :c:func:`device_link_add()` is
+called twice in a row for the same consumer-supplier pair without removing the
+link between these calls, in which case allowing the PM-runtime usage counter
+of the supplier to drop on an attempt to remove the link may cause it to be
+suspended while the consumer is still PM-runtime-active and that has to be
+avoided. [To work around this limitation it is sufficient to let the consumer
+runtime suspend at least once, or call :c:func:`pm_runtime_set_suspended()` for
+it with PM-runtime disabled, between the :c:func:`device_link_add()` and
+:c:func:`device_link_del()` or :c:func:`device_link_remove()` calls.]
Sometimes drivers depend on optional resources. They are able to operate
in a degraded mode (reduced feature set or performance) when those resources
@@ -282,4 +321,4 @@ API
===
.. kernel-doc:: drivers/base/core.c
- :functions: device_link_add device_link_del
+ :functions: device_link_add device_link_del device_link_remove
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index ab38ced66a44..c0b600ed9961 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -22,6 +22,7 @@ available subsections can be seen below.
device_connection
dma-buf
device_link
+ component
message-based
sound
frame-buffer
diff --git a/Documentation/driver-api/pm/cpuidle.rst b/Documentation/driver-api/pm/cpuidle.rst
new file mode 100644
index 000000000000..5842ab621a58
--- /dev/null
+++ b/Documentation/driver-api/pm/cpuidle.rst
@@ -0,0 +1,282 @@
+.. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor <cpuidle_governor>`
+.. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device <cpuidle_device>`
+.. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver <cpuidle_driver>`
+.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>`
+
+========================
+CPU Idle Time Management
+========================
+
+::
+
+ Copyright (c) 2019 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+
+CPU Idle Time Management Subsystem
+==================================
+
+Every time one of the logical CPUs in the system (the entities that appear to
+fetch and execute instructions: hardware threads, if present, or processor
+cores) is idle after an interrupt or equivalent wakeup event, which means that
+there are no tasks to run on it except for the special "idle" task associated
+with it, there is an opportunity to save energy for the processor that it
+belongs to. That can be done by making the idle logical CPU stop fetching
+instructions from memory and putting some of the processor's functional units
+depended on by it into an idle state in which they will draw less power.
+
+However, there may be multiple different idle states that can be used in such a
+situation in principle, so it may be necessary to find the most suitable one
+(from the kernel perspective) and ask the processor to use (or "enter") that
+particular idle state. That is the role of the CPU idle time management
+subsystem in the kernel, called ``CPUIdle``.
+
+The design of ``CPUIdle`` is modular and based on the code duplication avoidance
+principle, so the generic code that in principle need not depend on the hardware
+or platform design details in it is separate from the code that interacts with
+the hardware. It generally is divided into three categories of functional
+units: *governors* responsible for selecting idle states to ask the processor
+to enter, *drivers* that pass the governors' decisions on to the hardware and
+the *core* providing a common framework for them.
+
+
+CPU Idle Time Governors
+=======================
+
+A CPU idle time (``CPUIdle``) governor is a bundle of policy code invoked when
+one of the logical CPUs in the system turns out to be idle. Its role is to
+select an idle state to ask the processor to enter in order to save some energy.
+
+``CPUIdle`` governors are generic and each of them can be used on any hardware
+platform that the Linux kernel can run on. For this reason, data structures
+operated on by them cannot depend on any hardware architecture or platform
+design details as well.
+
+The governor itself is represented by a |struct cpuidle_governor| object
+containing four callback pointers, :c:member:`enable`, :c:member:`disable`,
+:c:member:`select`, :c:member:`reflect`, a :c:member:`rating` field described
+below, and a name (string) used for identifying it.
+
+For the governor to be available at all, that object needs to be registered
+with the ``CPUIdle`` core by calling :c:func:`cpuidle_register_governor()` with
+a pointer to it passed as the argument. If successful, that causes the core to
+add the governor to the global list of available governors and, if it is the
+only one in the list (that is, the list was empty before) or the value of its
+:c:member:`rating` field is greater than the value of that field for the
+governor currently in use, or the name of the new governor was passed to the
+kernel as the value of the ``cpuidle.governor=`` command line parameter, the new
+governor will be used from that point on (there can be only one ``CPUIdle``
+governor in use at a time). Also, if ``cpuidle_sysfs_switch`` is passed to the
+kernel in the command line, user space can choose the ``CPUIdle`` governor to
+use at run time via ``sysfs``.
+
+Once registered, ``CPUIdle`` governors cannot be unregistered, so it is not
+practical to put them into loadable kernel modules.
+
+The interface between ``CPUIdle`` governors and the core consists of four
+callbacks:
+
+:c:member:`enable`
+ ::
+
+ int (*enable) (struct cpuidle_driver *drv, struct cpuidle_device *dev);
+
+ The role of this callback is to prepare the governor for handling the
+ (logical) CPU represented by the |struct cpuidle_device| object pointed
+ to by the ``dev`` argument. The |struct cpuidle_driver| object pointed
+ to by the ``drv`` argument represents the ``CPUIdle`` driver to be used
+ with that CPU (among other things, it should contain the list of
+ |struct cpuidle_state| objects representing idle states that the
+ processor holding the given CPU can be asked to enter).
+
+ It may fail, in which case it is expected to return a negative error
+ code, and that causes the kernel to run the architecture-specific
+ default code for idle CPUs on the CPU in question instead of ``CPUIdle``
+ until the ``->enable()`` governor callback is invoked for that CPU
+ again.
+
+:c:member:`disable`
+ ::
+
+ void (*disable) (struct cpuidle_driver *drv, struct cpuidle_device *dev);
+
+ Called to make the governor stop handling the (logical) CPU represented
+ by the |struct cpuidle_device| object pointed to by the ``dev``
+ argument.
+
+ It is expected to reverse any changes made by the ``->enable()``
+ callback when it was last invoked for the target CPU, free all memory
+ allocated by that callback and so on.
+
+:c:member:`select`
+ ::
+
+ int (*select) (struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ bool *stop_tick);
+
+ Called to select an idle state for the processor holding the (logical)
+ CPU represented by the |struct cpuidle_device| object pointed to by the
+ ``dev`` argument.
+
+ The list of idle states to take into consideration is represented by the
+ :c:member:`states` array of |struct cpuidle_state| objects held by the
+ |struct cpuidle_driver| object pointed to by the ``drv`` argument (which
+ represents the ``CPUIdle`` driver to be used with the CPU at hand). The
+ value returned by this callback is interpreted as an index into that
+ array (unless it is a negative error code).
+
+ The ``stop_tick`` argument is used to indicate whether or not to stop
+ the scheduler tick before asking the processor to enter the selected
+ idle state. When the ``bool`` variable pointed to by it (which is set
+ to ``true`` before invoking this callback) is cleared to ``false``, the
+ processor will be asked to enter the selected idle state without
+ stopping the scheduler tick on the given CPU (if the tick has been
+ stopped on that CPU already, however, it will not be restarted before
+ asking the processor to enter the idle state).
+
+ This callback is mandatory (i.e. the :c:member:`select` callback pointer
+ in |struct cpuidle_governor| must not be ``NULL`` for the registration
+ of the governor to succeed).
+
+:c:member:`reflect`
+ ::
+
+ void (*reflect) (struct cpuidle_device *dev, int index);
+
+ Called to allow the governor to evaluate the accuracy of the idle state
+ selection made by the ``->select()`` callback (when it was invoked last
+ time) and possibly use the result of that to improve the accuracy of
+ idle state selections in the future.
+
+In addition, ``CPUIdle`` governors are required to take power management
+quality of service (PM QoS) constraints on the processor wakeup latency into
+account when selecting idle states. In order to obtain the current effective
+PM QoS wakeup latency constraint for a given CPU, a ``CPUIdle`` governor is
+expected to pass the number of the CPU to
+:c:func:`cpuidle_governor_latency_req()`. Then, the governor's ``->select()``
+callback must not return the index of an indle state whose
+:c:member:`exit_latency` value is greater than the number returned by that
+function.
+
+
+CPU Idle Time Management Drivers
+================================
+
+CPU idle time management (``CPUIdle``) drivers provide an interface between the
+other parts of ``CPUIdle`` and the hardware.
+
+First of all, a ``CPUIdle`` driver has to populate the :c:member:`states` array
+of |struct cpuidle_state| objects included in the |struct cpuidle_driver| object
+representing it. Going forward this array will represent the list of available
+idle states that the processor hardware can be asked to enter shared by all of
+the logical CPUs handled by the given driver.
+
+The entries in the :c:member:`states` array are expected to be sorted by the
+value of the :c:member:`target_residency` field in |struct cpuidle_state| in
+the ascending order (that is, index 0 should correspond to the idle state with
+the minimum value of :c:member:`target_residency`). [Since the
+:c:member:`target_residency` value is expected to reflect the "depth" of the
+idle state represented by the |struct cpuidle_state| object holding it, this
+sorting order should be the same as the ascending sorting order by the idle
+state "depth".]
+
+Three fields in |struct cpuidle_state| are used by the existing ``CPUIdle``
+governors for computations related to idle state selection:
+
+:c:member:`target_residency`
+ Minimum time to spend in this idle state including the time needed to
+ enter it (which may be substantial) to save more energy than could
+ be saved by staying in a shallower idle state for the same amount of
+ time, in microseconds.
+
+:c:member:`exit_latency`
+ Maximum time it will take a CPU asking the processor to enter this idle
+ state to start executing the first instruction after a wakeup from it,
+ in microseconds.
+
+:c:member:`flags`
+ Flags representing idle state properties. Currently, governors only use
+ the ``CPUIDLE_FLAG_POLLING`` flag which is set if the given object
+ does not represent a real idle state, but an interface to a software
+ "loop" that can be used in order to avoid asking the processor to enter
+ any idle state at all. [There are other flags used by the ``CPUIdle``
+ core in special situations.]
+
+The :c:member:`enter` callback pointer in |struct cpuidle_state|, which must not
+be ``NULL``, points to the routine to execute in order to ask the processor to
+enter this particular idle state:
+
+::
+
+ void (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ int index);
+
+The first two arguments of it point to the |struct cpuidle_device| object
+representing the logical CPU running this callback and the
+|struct cpuidle_driver| object representing the driver itself, respectively,
+and the last one is an index of the |struct cpuidle_state| entry in the driver's
+:c:member:`states` array representing the idle state to ask the processor to
+enter.
+
+The analogous ``->enter_s2idle()`` callback in |struct cpuidle_state| is used
+only for implementing the suspend-to-idle system-wide power management feature.
+The difference between in and ``->enter()`` is that it must not re-enable
+interrupts at any point (even temporarily) or attempt to change the states of
+clock event devices, which the ``->enter()`` callback may do sometimes.
+
+Once the :c:member:`states` array has been populated, the number of valid
+entries in it has to be stored in the :c:member:`state_count` field of the
+|struct cpuidle_driver| object representing the driver. Moreover, if any
+entries in the :c:member:`states` array represent "coupled" idle states (that
+is, idle states that can only be asked for if multiple related logical CPUs are
+idle), the :c:member:`safe_state_index` field in |struct cpuidle_driver| needs
+to be the index of an idle state that is not "coupled" (that is, one that can be
+asked for if only one logical CPU is idle).
+
+In addition to that, if the given ``CPUIdle`` driver is only going to handle a
+subset of logical CPUs in the system, the :c:member:`cpumask` field in its
+|struct cpuidle_driver| object must point to the set (mask) of CPUs that will be
+handled by it.
+
+A ``CPUIdle`` driver can only be used after it has been registered. If there
+are no "coupled" idle state entries in the driver's :c:member:`states` array,
+that can be accomplished by passing the driver's |struct cpuidle_driver| object
+to :c:func:`cpuidle_register_driver()`. Otherwise, :c:func:`cpuidle_register()`
+should be used for this purpose.
+
+However, it also is necessary to register |struct cpuidle_device| objects for
+all of the logical CPUs to be handled by the given ``CPUIdle`` driver with the
+help of :c:func:`cpuidle_register_device()` after the driver has been registered
+and :c:func:`cpuidle_register_driver()`, unlike :c:func:`cpuidle_register()`,
+does not do that automatically. For this reason, the drivers that use
+:c:func:`cpuidle_register_driver()` to register themselves must also take care
+of registering the |struct cpuidle_device| objects as needed, so it is generally
+recommended to use :c:func:`cpuidle_register()` for ``CPUIdle`` driver
+registration in all cases.
+
+The registration of a |struct cpuidle_device| object causes the ``CPUIdle``
+``sysfs`` interface to be created and the governor's ``->enable()`` callback to
+be invoked for the logical CPU represented by it, so it must take place after
+registering the driver that will handle the CPU in question.
+
+``CPUIdle`` drivers and |struct cpuidle_device| objects can be unregistered
+when they are not necessary any more which allows some resources associated with
+them to be released. Due to dependencies between them, all of the
+|struct cpuidle_device| objects representing CPUs handled by the given
+``CPUIdle`` driver must be unregistered, with the help of
+:c:func:`cpuidle_unregister_device()`, before calling
+:c:func:`cpuidle_unregister_driver()` to unregister the driver. Alternatively,
+:c:func:`cpuidle_unregister()` can be called to unregister a ``CPUIdle`` driver
+along with all of the |struct cpuidle_device| objects representing CPUs handled
+by it.
+
+``CPUIdle`` drivers can respond to runtime system configuration changes that
+lead to modifications of the list of available processor idle states (which can
+happen, for example, when the system's power source is switched from AC to
+battery or the other way around). Upon a notification of such a change,
+a ``CPUIdle`` driver is expected to call :c:func:`cpuidle_pause_and_lock()` to
+turn ``CPUIdle`` off temporarily and then :c:func:`cpuidle_disable_device()` for
+all of the |struct cpuidle_device| objects representing CPUs affected by that
+change. Next, it can update its :c:member:`states` array in accordance with
+the new configuration of the system, call :c:func:`cpuidle_enable_device()` for
+all of the relevant |struct cpuidle_device| objects and invoke
+:c:func:`cpuidle_resume_and_unlock()` to allow ``CPUIdle`` to be used again.
diff --git a/Documentation/driver-api/pm/index.rst b/Documentation/driver-api/pm/index.rst
index 2f6d0e9cf6b7..56975c6bc789 100644
--- a/Documentation/driver-api/pm/index.rst
+++ b/Documentation/driver-api/pm/index.rst
@@ -1,9 +1,10 @@
-=======================
-Device Power Management
-=======================
+===============================
+CPU and Device Power Management
+===============================
.. toctree::
+ cpuidle
devices
notifiers
types
diff --git a/Documentation/driver-model/bus.txt b/Documentation/driver-model/bus.txt
index b577a45b93ea..c247b488a567 100644
--- a/Documentation/driver-model/bus.txt
+++ b/Documentation/driver-model/bus.txt
@@ -124,11 +124,11 @@ struct bus_attribute {
ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
};
-Bus drivers can export attributes using the BUS_ATTR macro that works
-similarly to the DEVICE_ATTR macro for devices. For example, a definition
-like this:
+Bus drivers can export attributes using the BUS_ATTR_RW macro that works
+similarly to the DEVICE_ATTR_RW macro for devices. For example, a
+definition like this:
-static BUS_ATTR(debug,0644,show_debug,store_debug);
+static BUS_ATTR_RW(debug);
is equivalent to declaring:
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 62af30511a95..60a5ec04e8f0 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -163,6 +163,14 @@ C. Boot options
be preserved until there actually is some text is output to the console.
This option causes fbcon to bind immediately to the fbdev device.
+7. fbcon=logo-pos:<location>
+
+ The only possible 'location' is 'center' (without quotes), and when
+ given, the bootup logo is moved from the default top-left corner
+ location to the center of the framebuffer. If more than one logo is
+ displayed due to multiple CPUs, the collected line of logos is moved
+ as a whole.
+
C. Attaching, Detaching and Unloading
Before going on to how to attach, detach and unload the framebuffer console, an
diff --git a/Documentation/features/core/cBPF-JIT/arch-support.txt b/Documentation/features/core/cBPF-JIT/arch-support.txt
index 90459cdde314..8620c38d4db0 100644
--- a/Documentation/features/core/cBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/cBPF-JIT/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/core/eBPF-JIT/arch-support.txt b/Documentation/features/core/eBPF-JIT/arch-support.txt
index c90a0382fe66..9ae6e8d0d10d 100644
--- a/Documentation/features/core/eBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/eBPF-JIT/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/core/generic-idle-thread/arch-support.txt b/Documentation/features/core/generic-idle-thread/arch-support.txt
index 0ef6acdb991c..365df2c2ff0b 100644
--- a/Documentation/features/core/generic-idle-thread/arch-support.txt
+++ b/Documentation/features/core/generic-idle-thread/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | ok |
diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt
index 60111395f932..7fc2e243dee9 100644
--- a/Documentation/features/core/jump-labels/arch-support.txt
+++ b/Documentation/features/core/jump-labels/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt
index f44c274e40ed..d344b99aae1e 100644
--- a/Documentation/features/core/tracehook/arch-support.txt
+++ b/Documentation/features/core/tracehook/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | ok |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | ok |
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index 282ecc8ea1da..304dcd461795 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt
index 01b2b3004e0a..059d58a549c7 100644
--- a/Documentation/features/debug/gcov-profile-all/arch-support.txt
+++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index 3b4dff22329f..3e6b8f07d5d0 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | ok |
| hexagon: | ok |
| ia64: | TODO |
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 7e963d0ae646..68f266944d5f 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt
index 4ada027faf16..f4e45bd58fea 100644
--- a/Documentation/features/debug/kprobes/arch-support.txt
+++ b/Documentation/features/debug/kprobes/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt
index 044e13fcca5d..1d5651ef11f8 100644
--- a/Documentation/features/debug/kretprobes/arch-support.txt
+++ b/Documentation/features/debug/kretprobes/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
diff --git a/Documentation/features/debug/optprobes/arch-support.txt b/Documentation/features/debug/optprobes/arch-support.txt
index dce7669c918f..fb297a88f62c 100644
--- a/Documentation/features/debug/optprobes/arch-support.txt
+++ b/Documentation/features/debug/optprobes/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt
index 954ac1c95553..9999ea521f3e 100644
--- a/Documentation/features/debug/stackprotector/arch-support.txt
+++ b/Documentation/features/debug/stackprotector/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 1a3f9d3229bf..1c577d0cfc7f 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/debug/user-ret-profiler/arch-support.txt b/Documentation/features/debug/user-ret-profiler/arch-support.txt
index 1d78d1069a5f..6bfa36b0e017 100644
--- a/Documentation/features/debug/user-ret-profiler/arch-support.txt
+++ b/Documentation/features/debug/user-ret-profiler/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt
index 30c072d2b67c..eb28b5c97ca6 100644
--- a/Documentation/features/io/dma-contiguous/arch-support.txt
+++ b/Documentation/features/io/dma-contiguous/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt
index 51704a2dc8d1..242ff5a6586e 100644
--- a/Documentation/features/locking/cmpxchg-local/arch-support.txt
+++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/locking/lockdep/arch-support.txt b/Documentation/features/locking/lockdep/arch-support.txt
index bd39c5edd460..941fd5b1094d 100644
--- a/Documentation/features/locking/lockdep/arch-support.txt
+++ b/Documentation/features/locking/lockdep/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | TODO |
diff --git a/Documentation/features/locking/queued-rwlocks/arch-support.txt b/Documentation/features/locking/queued-rwlocks/arch-support.txt
index da7aff3bee0b..c683da198f31 100644
--- a/Documentation/features/locking/queued-rwlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-rwlocks/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | ok |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
index 478e9101322c..e3080b82aefd 100644
--- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/locking/rwsem-optimized/arch-support.txt b/Documentation/features/locking/rwsem-optimized/arch-support.txt
index e54b1f1a8091..7521d7500fbe 100644
--- a/Documentation/features/locking/rwsem-optimized/arch-support.txt
+++ b/Documentation/features/locking/rwsem-optimized/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt
index 7331402d1887..d8278bf62b85 100644
--- a/Documentation/features/perf/kprobes-event/arch-support.txt
+++ b/Documentation/features/perf/kprobes-event/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | ok |
| ia64: | TODO |
diff --git a/Documentation/features/perf/perf-regs/arch-support.txt b/Documentation/features/perf/perf-regs/arch-support.txt
index 53feeee6cdad..687d049d9cee 100644
--- a/Documentation/features/perf/perf-regs/arch-support.txt
+++ b/Documentation/features/perf/perf-regs/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/perf/perf-stackdump/arch-support.txt b/Documentation/features/perf/perf-stackdump/arch-support.txt
index 16164348e0ea..90996e3d18a8 100644
--- a/Documentation/features/perf/perf-stackdump/arch-support.txt
+++ b/Documentation/features/perf/perf-stackdump/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index c7858dd1ea8f..8a521a622966 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -34,6 +34,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/sched/numa-balancing/arch-support.txt b/Documentation/features/sched/numa-balancing/arch-support.txt
index c68bb2c2cb62..350823692f28 100644
--- a/Documentation/features/sched/numa-balancing/arch-support.txt
+++ b/Documentation/features/sched/numa-balancing/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | .. |
| arm64: | ok |
| c6x: | .. |
+ | csky: | .. |
| h8300: | .. |
| hexagon: | .. |
| ia64: | TODO |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index d4271b493b41..4fe6c3c3be5c 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
index 83d9e68462bb..593536f7925b 100644
--- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt
+++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/time/clockevents/arch-support.txt b/Documentation/features/time/clockevents/arch-support.txt
index 3d4908fce6da..7a27157da408 100644
--- a/Documentation/features/time/clockevents/arch-support.txt
+++ b/Documentation/features/time/clockevents/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | ok |
+ | csky: | ok |
| h8300: | ok |
| hexagon: | ok |
| ia64: | TODO |
diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt
index c29974afffaa..048bfb6d3872 100644
--- a/Documentation/features/time/context-tracking/arch-support.txt
+++ b/Documentation/features/time/context-tracking/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt
index 8d73c463ec27..a14bbad8e948 100644
--- a/Documentation/features/time/irq-time-acct/arch-support.txt
+++ b/Documentation/features/time/irq-time-acct/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | .. |
diff --git a/Documentation/features/time/modern-timekeeping/arch-support.txt b/Documentation/features/time/modern-timekeeping/arch-support.txt
index e7c6ea6b8fb3..2855dfe2464d 100644
--- a/Documentation/features/time/modern-timekeeping/arch-support.txt
+++ b/Documentation/features/time/modern-timekeeping/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | ok |
| c6x: | ok |
+ | csky: | ok |
| h8300: | ok |
| hexagon: | ok |
| ia64: | ok |
diff --git a/Documentation/features/time/virt-cpuacct/arch-support.txt b/Documentation/features/time/virt-cpuacct/arch-support.txt
index 4646457461cf..fb0d0cab9cab 100644
--- a/Documentation/features/time/virt-cpuacct/arch-support.txt
+++ b/Documentation/features/time/virt-cpuacct/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt
index 1f71d090ff2c..adc25878d217 100644
--- a/Documentation/features/vm/ELF-ASLR/arch-support.txt
+++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt
index fbd5aa463b0a..f05588f9e4b4 100644
--- a/Documentation/features/vm/PG_uncached/arch-support.txt
+++ b/Documentation/features/vm/PG_uncached/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | ok |
diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt
index 5d7ecc378f29..cdfe8925f881 100644
--- a/Documentation/features/vm/THP/arch-support.txt
+++ b/Documentation/features/vm/THP/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | .. |
+ | csky: | .. |
| h8300: | .. |
| hexagon: | .. |
| ia64: | TODO |
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
index f7af9678eb66..2bdd3b6cee3c 100644
--- a/Documentation/features/vm/TLB/arch-support.txt
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | TODO |
| c6x: | .. |
+ | csky: | TODO |
| h8300: | .. |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index d0713ccc7117..019131c5acce 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt
index 326e4797bc65..3a6b87de6a19 100644
--- a/Documentation/features/vm/ioremap_prot/arch-support.txt
+++ b/Documentation/features/vm/ioremap_prot/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | TODO |
| arm64: | TODO |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/features/vm/numa-memblock/arch-support.txt b/Documentation/features/vm/numa-memblock/arch-support.txt
index 1a988052cd24..3004beb0fd71 100644
--- a/Documentation/features/vm/numa-memblock/arch-support.txt
+++ b/Documentation/features/vm/numa-memblock/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | .. |
| arm64: | ok |
| c6x: | .. |
+ | csky: | .. |
| h8300: | .. |
| hexagon: | .. |
| ia64: | ok |
diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt
index a8378424bc98..2dc5df6a1cf5 100644
--- a/Documentation/features/vm/pte_special/arch-support.txt
+++ b/Documentation/features/vm/pte_special/arch-support.txt
@@ -11,6 +11,7 @@
| arm: | ok |
| arm64: | ok |
| c6x: | TODO |
+ | csky: | TODO |
| h8300: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index a1426cabcef1..41411b0c60a3 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -344,7 +344,9 @@ struct bus_attribute {
Declaring:
-BUS_ATTR(_name, _mode, _show, _store)
+static BUS_ATTR_RW(name);
+static BUS_ATTR_RO(name);
+static BUS_ATTR_WO(name);
Creation/Removal:
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 9ccfd1bc6201..a5cbb5e0e3db 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -272,7 +272,7 @@ The following sysctls are available for the XFS filesystem:
XFS_ERRLEVEL_LOW: 1
XFS_ERRLEVEL_HIGH: 5
- fs.xfs.panic_mask (Min: 0 Default: 0 Max: 255)
+ fs.xfs.panic_mask (Min: 0 Default: 0 Max: 256)
Causes certain error conditions to call BUG(). Value is a bitmask;
OR together the tags which represent errors which should cause panics:
@@ -285,6 +285,7 @@ The following sysctls are available for the XFS filesystem:
XFS_PTAG_SHUTDOWN_IOERROR 0x00000020
XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040
XFS_PTAG_FSBLOCK_ZERO 0x00000080
+ XFS_PTAG_VERIFIER_ERROR 0x00000100
This option is intended for debugging only.
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index 7c49feaa79d2..2329c383efe4 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -3,9 +3,13 @@ Kernel driver lm85
Supported chips:
* National Semiconductor LM85 (B and C versions)
- Prefix: 'lm85'
+ Prefix: 'lm85b' or 'lm85c'
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
Datasheet: http://www.national.com/pf/LM/LM85.html
+ * Texas Instruments LM96000
+ Prefix: 'lm9600'
+ Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+ Datasheet: http://www.ti.com/lit/ds/symlink/lm96000.pdf
* Analog Devices ADM1027
Prefix: 'adm1027'
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
@@ -136,6 +140,9 @@ of voltage and temperature channels.
SMSC EMC6D103S is similar to EMC6D103, but does not support pwm#_auto_pwm_minctl
and temp#_auto_temp_off.
+The LM96000 supports additional high frequency PWM modes (22.5 kHz, 24 kHz,
+25.7 kHz, 27.7 kHz and 30 kHz), which can be configured on a per-PWM basis.
+
Hardware Configurations
-----------------------
diff --git a/Documentation/interconnect/interconnect.rst b/Documentation/interconnect/interconnect.rst
new file mode 100644
index 000000000000..b8107dcc4cd3
--- /dev/null
+++ b/Documentation/interconnect/interconnect.rst
@@ -0,0 +1,94 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+GENERIC SYSTEM INTERCONNECT SUBSYSTEM
+=====================================
+
+Introduction
+------------
+
+This framework is designed to provide a standard kernel interface to control
+the settings of the interconnects on an SoC. These settings can be throughput,
+latency and priority between multiple interconnected devices or functional
+blocks. This can be controlled dynamically in order to save power or provide
+maximum performance.
+
+The interconnect bus is hardware with configurable parameters, which can be
+set on a data path according to the requests received from various drivers.
+An example of interconnect buses are the interconnects between various
+components or functional blocks in chipsets. There can be multiple interconnects
+on an SoC that can be multi-tiered.
+
+Below is a simplified diagram of a real-world SoC interconnect bus topology.
+
+::
+
+ +----------------+ +----------------+
+ | HW Accelerator |--->| M NoC |<---------------+
+ +----------------+ +----------------+ |
+ | | +------------+
+ +-----+ +-------------+ V +------+ | |
+ | DDR | | +--------+ | PCIe | | |
+ +-----+ | | Slaves | +------+ | |
+ ^ ^ | +--------+ | | C NoC |
+ | | V V | |
+ +------------------+ +------------------------+ | | +-----+
+ | |-->| |-->| |-->| CPU |
+ | |-->| |<--| | +-----+
+ | Mem NoC | | S NoC | +------------+
+ | |<--| |---------+ |
+ | |<--| |<------+ | | +--------+
+ +------------------+ +------------------------+ | | +-->| Slaves |
+ ^ ^ ^ ^ ^ | | +--------+
+ | | | | | | V
+ +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
+ | CPUs | | | GPU | | DSP | | Masters |-->| P NoC |-->| Slaves |
+ +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
+ |
+ +-------+
+ | Modem |
+ +-------+
+
+Terminology
+-----------
+
+Interconnect provider is the software definition of the interconnect hardware.
+The interconnect providers on the above diagram are M NoC, S NoC, C NoC, P NoC
+and Mem NoC.
+
+Interconnect node is the software definition of the interconnect hardware
+port. Each interconnect provider consists of multiple interconnect nodes,
+which are connected to other SoC components including other interconnect
+providers. The point on the diagram where the CPUs connect to the memory is
+called an interconnect node, which belongs to the Mem NoC interconnect provider.
+
+Interconnect endpoints are the first or the last element of the path. Every
+endpoint is a node, but not every node is an endpoint.
+
+Interconnect path is everything between two endpoints including all the nodes
+that have to be traversed to reach from a source to destination node. It may
+include multiple master-slave pairs across several interconnect providers.
+
+Interconnect consumers are the entities which make use of the data paths exposed
+by the providers. The consumers send requests to providers requesting various
+throughput, latency and priority. Usually the consumers are device drivers, that
+send request based on their needs. An example for a consumer is a video decoder
+that supports various formats and image sizes.
+
+Interconnect providers
+----------------------
+
+Interconnect provider is an entity that implements methods to initialize and
+configure interconnect bus hardware. The interconnect provider drivers should
+be registered with the interconnect provider core.
+
+.. kernel-doc:: include/linux/interconnect-provider.h
+
+Interconnect consumers
+----------------------
+
+Interconnect consumers are the clients which use the interconnect APIs to
+get paths between endpoints and set their bandwidth/latency/QoS requirements
+for these interconnect paths.
+
+.. kernel-doc:: include/linux/interconnect.h
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 4ae4f9d8f8fe..e14d7d40fc75 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -295,6 +295,41 @@ using::
For XDP_SKB mode, use the switch "-S" instead of "-N" and all options
can be displayed with "-h", as usual.
+FAQ
+=======
+
+Q: I am not seeing any traffic on the socket. What am I doing wrong?
+
+A: When a netdev of a physical NIC is initialized, Linux usually
+ allocates one Rx and Tx queue pair per core. So on a 8 core system,
+ queue ids 0 to 7 will be allocated, one per core. In the AF_XDP
+ bind call or the xsk_socket__create libbpf function call, you
+ specify a specific queue id to bind to and it is only the traffic
+ towards that queue you are going to get on you socket. So in the
+ example above, if you bind to queue 0, you are NOT going to get any
+ traffic that is distributed to queues 1 through 7. If you are
+ lucky, you will see the traffic, but usually it will end up on one
+ of the queues you have not bound to.
+
+ There are a number of ways to solve the problem of getting the
+ traffic you want to the queue id you bound to. If you want to see
+ all the traffic, you can force the netdev to only have 1 queue, queue
+ id 0, and then bind to queue 0. You can use ethtool to do this::
+
+ sudo ethtool -L <interface> combined 1
+
+ If you want to only see part of the traffic, you can program the
+ NIC through ethtool to filter out your traffic to a single queue id
+ that you can bind your XDP socket to. Here is one example in which
+ UDP traffic to and from port 4242 are sent to queue 2::
+
+ sudo ethtool -N <interface> rx-flow-hash udp4 fn
+ sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \
+ 4242 action 2
+
+ A number of other ways are possible all up to the capabilitites of
+ the NIC you have.
+
Credits
=======
@@ -309,4 +344,3 @@ Credits
- Michael S. Tsirkin
- Qi Z Zhang
- Willem de Bruijn
-
diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst b/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst
index a188466b6698..5045df990a4c 100644
--- a/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst
+++ b/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst
@@ -27,11 +27,12 @@ Driver Overview
The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and
provides services that:
- A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue
+
+ A. allow other drivers, such as the Ethernet driver, to enqueue and dequeue
frames for their respective objects
- B) allow drivers to register callbacks for data availability notifications
+ B. allow drivers to register callbacks for data availability notifications
when data becomes available on a queue or channel
- C) allow drivers to manage hardware buffer pools
+ C. allow drivers to manage hardware buffer pools
The Linux DPIO driver consists of 3 primary components--
DPIO object driver-- fsl-mc driver that manages the DPIO object
@@ -140,11 +141,10 @@ QBman portal interface (qbman-portal.c)
The qbman-portal component provides APIs to do the low level hardware
bit twiddling for operations such as:
- -initializing Qman software portals
-
- -building and sending portal commands
- -portal interrupt configuration and processing
+ - initializing Qman software portals
+ - building and sending portal commands
+ - portal interrupt configuration and processing
The qbman-portal APIs are not public to other drivers, and are
only used by dpio-service.
diff --git a/Documentation/networking/device_drivers/intel/e100.rst b/Documentation/networking/device_drivers/intel/e100.rst
index 5e2839b4ec92..2b9f4887beda 100644
--- a/Documentation/networking/device_drivers/intel/e100.rst
+++ b/Documentation/networking/device_drivers/intel/e100.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==============================================================
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
==============================================================
diff --git a/Documentation/networking/device_drivers/intel/e1000.rst b/Documentation/networking/device_drivers/intel/e1000.rst
index 6379d4d20771..956560b6e745 100644
--- a/Documentation/networking/device_drivers/intel/e1000.rst
+++ b/Documentation/networking/device_drivers/intel/e1000.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+===========================================================
Linux* Base Driver for Intel(R) Ethernet Network Connection
===========================================================
diff --git a/Documentation/networking/device_drivers/intel/e1000e.rst b/Documentation/networking/device_drivers/intel/e1000e.rst
index 33554e5416c5..01999f05509c 100644
--- a/Documentation/networking/device_drivers/intel/e1000e.rst
+++ b/Documentation/networking/device_drivers/intel/e1000e.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+======================================================
Linux* Driver for Intel(R) Ethernet Network Connection
======================================================
diff --git a/Documentation/networking/device_drivers/intel/fm10k.rst b/Documentation/networking/device_drivers/intel/fm10k.rst
index bf5e5942f28d..ac3269e34f55 100644
--- a/Documentation/networking/device_drivers/intel/fm10k.rst
+++ b/Documentation/networking/device_drivers/intel/fm10k.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==============================================================
Linux* Base Driver for Intel(R) Ethernet Multi-host Controller
==============================================================
diff --git a/Documentation/networking/device_drivers/intel/i40e.rst b/Documentation/networking/device_drivers/intel/i40e.rst
index 0cc16c525d10..848fd388fa6e 100644
--- a/Documentation/networking/device_drivers/intel/i40e.rst
+++ b/Documentation/networking/device_drivers/intel/i40e.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==================================================================
Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series
==================================================================
diff --git a/Documentation/networking/device_drivers/intel/iavf.rst b/Documentation/networking/device_drivers/intel/iavf.rst
index f8b42b64eb28..2d0c3baa1752 100644
--- a/Documentation/networking/device_drivers/intel/iavf.rst
+++ b/Documentation/networking/device_drivers/intel/iavf.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==================================================================
Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function
==================================================================
diff --git a/Documentation/networking/device_drivers/intel/ice.rst b/Documentation/networking/device_drivers/intel/ice.rst
index 4d118b827bbb..c220aa2711c6 100644
--- a/Documentation/networking/device_drivers/intel/ice.rst
+++ b/Documentation/networking/device_drivers/intel/ice.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+===================================================================
Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series
===================================================================
diff --git a/Documentation/networking/device_drivers/intel/igb.rst b/Documentation/networking/device_drivers/intel/igb.rst
index e87a4a72ea2d..fc8cfaa5dcfa 100644
--- a/Documentation/networking/device_drivers/intel/igb.rst
+++ b/Documentation/networking/device_drivers/intel/igb.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+===========================================================
Linux* Base Driver for Intel(R) Ethernet Network Connection
===========================================================
diff --git a/Documentation/networking/device_drivers/intel/igbvf.rst b/Documentation/networking/device_drivers/intel/igbvf.rst
index a8a9ffa4f8d3..9cddabe8108e 100644
--- a/Documentation/networking/device_drivers/intel/igbvf.rst
+++ b/Documentation/networking/device_drivers/intel/igbvf.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+============================================================
Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet
============================================================
diff --git a/Documentation/networking/device_drivers/intel/ixgb.rst b/Documentation/networking/device_drivers/intel/ixgb.rst
index 8bd80e27843d..945018207a92 100644
--- a/Documentation/networking/device_drivers/intel/ixgb.rst
+++ b/Documentation/networking/device_drivers/intel/ixgb.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+=====================================================================
Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection
=====================================================================
diff --git a/Documentation/networking/device_drivers/intel/ixgbe.rst b/Documentation/networking/device_drivers/intel/ixgbe.rst
index 86d887a63606..c7d25483fedb 100644
--- a/Documentation/networking/device_drivers/intel/ixgbe.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbe.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+=============================================================================
Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
=============================================================================
diff --git a/Documentation/networking/device_drivers/intel/ixgbevf.rst b/Documentation/networking/device_drivers/intel/ixgbevf.rst
index 56cde6366c2f..5d4977360157 100644
--- a/Documentation/networking/device_drivers/intel/ixgbevf.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbevf.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+=============================================================
Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet
=============================================================
diff --git a/Documentation/networking/device_drivers/stmicro/stmmac.txt b/Documentation/networking/device_drivers/stmicro/stmmac.txt
index 2bb07078f535..1ae979fd90d2 100644
--- a/Documentation/networking/device_drivers/stmicro/stmmac.txt
+++ b/Documentation/networking/device_drivers/stmicro/stmmac.txt
@@ -267,7 +267,7 @@ static struct fixed_phy_status stmmac0_fixed_phy_status = {
During the board's device_init we can configure the first
MAC for fixed_link by calling:
- fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status, -1);
+ fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status);
and the second one, with a real PHY device attached to the bus,
by using the stmmac_mdio_bus_data structure (to provide the id, the
reset procedure etc).
diff --git a/Documentation/networking/devlink-health.txt b/Documentation/networking/devlink-health.txt
new file mode 100644
index 000000000000..1db3fbea0831
--- /dev/null
+++ b/Documentation/networking/devlink-health.txt
@@ -0,0 +1,86 @@
+The health mechanism is targeted for Real Time Alerting, in order to know when
+something bad had happened to a PCI device
+- Provide alert debug information
+- Self healing
+- If problem needs vendor support, provide a way to gather all needed debugging
+ information.
+
+The main idea is to unify and centralize driver health reports in the
+generic devlink instance and allow the user to set different
+attributes of the health reporting and recovery procedures.
+
+The devlink health reporter:
+Device driver creates a "health reporter" per each error/health type.
+Error/Health type can be a known/generic (eg pci error, fw error, rx/tx error)
+or unknown (driver specific).
+For each registered health reporter a driver can issue error/health reports
+asynchronously. All health reports handling is done by devlink.
+Device driver can provide specific callbacks for each "health reporter", e.g.
+ - Recovery procedures
+ - Diagnostics and object dump procedures
+ - OOB initial parameters
+Different parts of the driver can register different types of health reporters
+with different handlers.
+
+Once an error is reported, devlink health will do the following actions:
+ * A log is being send to the kernel trace events buffer
+ * Health status and statistics are being updated for the reporter instance
+ * Object dump is being taken and saved at the reporter instance (as long as
+ there is no other dump which is already stored)
+ * Auto recovery attempt is being done. Depends on:
+ - Auto-recovery configuration
+ - Grace period vs. time passed since last recover
+
+The user interface:
+User can access/change each reporter's parameters and driver specific callbacks
+via devlink, e.g per error type (per health reporter)
+ - Configure reporter's generic parameters (like: disable/enable auto recovery)
+ - Invoke recovery procedure
+ - Run diagnostics
+ - Object dump
+
+The devlink health interface (via netlink):
+DEVLINK_CMD_HEALTH_REPORTER_GET
+ Retrieves status and configuration info per DEV and reporter.
+DEVLINK_CMD_HEALTH_REPORTER_SET
+ Allows reporter-related configuration setting.
+DEVLINK_CMD_HEALTH_REPORTER_RECOVER
+ Triggers a reporter's recovery procedure.
+DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE
+ Retrieves diagnostics data from a reporter on a device.
+DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET
+ Retrieves the last stored dump. Devlink health
+ saves a single dump. If an dump is not already stored by the devlink
+ for this reporter, devlink generates a new dump.
+ dump output is defined by the reporter.
+DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR
+ Clears the last saved dump file for the specified reporter.
+
+
+ netlink
+ +--------------------------+
+ | |
+ | + |
+ | | |
+ +--------------------------+
+ |request for ops
+ |(diagnose,
+ mlx5_core devlink |recover,
+ |dump)
++--------+ +--------------------------+
+| | | reporter| |
+| | | +---------v----------+ |
+| | ops execution | | | |
+| <----------------------------------+ | |
+| | | | | |
+| | | + ^------------------+ |
+| | | | request for ops |
+| | | | (recover, dump) |
+| | | | |
+| | | +-+------------------+ |
+| | health report | | health handler | |
+| +-------------------------------> | |
+| | | +--------------------+ |
+| | health reporter create | |
+| +----------------------------> |
++--------+ +--------------------------+
diff --git a/Documentation/networking/devlink-info-versions.rst b/Documentation/networking/devlink-info-versions.rst
new file mode 100644
index 000000000000..c79ad8593383
--- /dev/null
+++ b/Documentation/networking/devlink-info-versions.rst
@@ -0,0 +1,43 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+=====================
+Devlink info versions
+=====================
+
+board.id
+========
+
+Unique identifier of the board design.
+
+board.rev
+=========
+
+Board design revision.
+
+board.manufacture
+=================
+
+An identifier of the company or the facility which produced the part.
+
+fw.mgmt
+=======
+
+Control unit firmware version. This firmware is responsible for house
+keeping tasks, PHY control etc. but not the packet-by-packet data path
+operation.
+
+fw.app
+======
+
+Data path microcode controlling high-speed packet processing.
+
+fw.undi
+=======
+
+UNDI software, may include the UEFI driver, firmware or both.
+
+fw.ncsi
+=======
+
+Version of the software responsible for supporting/handling the
+Network Controller Sideband Interface.
diff --git a/Documentation/networking/devlink-params-mlxsw.txt b/Documentation/networking/devlink-params-mlxsw.txt
new file mode 100644
index 000000000000..c63ea9fc7009
--- /dev/null
+++ b/Documentation/networking/devlink-params-mlxsw.txt
@@ -0,0 +1,10 @@
+fw_load_policy [DEVICE, GENERIC]
+ Configuration mode: driverinit
+
+acl_region_rehash_interval [DEVICE, DRIVER-SPECIFIC]
+ Sets an interval for periodic ACL region rehashes.
+ The value is in milliseconds, minimal value is "3000".
+ Value "0" disables the periodic work.
+ The first rehash will be run right after value is set.
+ Type: u32
+ Configuration mode: runtime
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 25170ad7d25b..43ef767bc440 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -236,19 +236,6 @@ description.
Design limitations
==================
-DSA is a platform device driver
--------------------------------
-
-DSA is implemented as a DSA platform device driver which is convenient because
-it will register the entire DSA switch tree attached to a master network device
-in one-shot, facilitating the device creation and simplifying the device driver
-model a bit, this comes however with a number of limitations:
-
-- building DSA and its switch drivers as modules is currently not working
-- the device driver parenting does not necessarily reflect the original
- bus/device the switch can be created from
-- supporting non-MDIO and non-MMIO (platform) switches is not possible
-
Limits on the number of devices and ports
-----------------------------------------
@@ -533,16 +520,12 @@ Bridge VLAN filtering
function that the driver has to call for each VLAN the given port is a member
of. A switchdev object is used to carry the VID and bridge flags.
-- port_fdb_prepare: bridge layer function invoked when the bridge prepares the
- installation of a Forwarding Database entry. If the operation is not
- supported, this function should return -EOPNOTSUPP to inform the bridge code
- to fallback to a software implementation. No hardware setup must be done in
- this function. See port_fdb_add for this and details.
-
- port_fdb_add: bridge layer function invoked when the bridge wants to install a
Forwarding Database entry, the switch hardware should be programmed with the
specified address in the specified VLAN Id in the forwarding database
- associated with this VLAN ID
+ associated with this VLAN ID. If the operation is not supported, this
+ function should return -EOPNOTSUPP to inform the bridge code to fallback to
+ a software implementation.
Note: VLAN ID 0 corresponds to the port private database, which, in the context
of DSA, would be the its port-based VLAN, used by the associated bridge device.
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 2196b824e96c..319e5e041f38 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -464,10 +464,11 @@ breakpoints: 0 1
JIT compiler
------------
-The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
-ARM, ARM64, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT
-compiler is transparently invoked for each attached filter from user space
-or for internal kernel users if it has been previously enabled by root:
+The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC,
+PowerPC, ARM, ARM64, MIPS, RISC-V and s390 and can be enabled through
+CONFIG_BPF_JIT. The JIT compiler is transparently invoked for each
+attached filter from user space or for internal kernel users if it has
+been previously enabled by root:
echo 1 > /proc/sys/net/core/bpf_jit_enable
@@ -603,9 +604,10 @@ got from bpf_prog_create(), and 'ctx' the given context (e.g.
skb pointer). All constraints and restrictions from bpf_check_classic() apply
before a conversion to the new layout is being done behind the scenes!
-Currently, the classic BPF format is being used for JITing on most 32-bit
-architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64, arm32 perform
-JIT compilation from eBPF instruction set.
+Currently, the classic BPF format is being used for JITing on most
+32-bit architectures, whereas x86-64, aarch64, s390x, powerpc64,
+sparc64, arm32, riscv (RV64G) perform JIT compilation from eBPF
+instruction set.
Some core changes of the new internal format:
@@ -827,7 +829,7 @@ tracing filters may do to maintain counters of events, for example. Register R9
is not used by socket filters either, but more complex filters may be running
out of registers and would have to resort to spill/fill to stack.
-Internal BPF can used as generic assembler for last step performance
+Internal BPF can be used as a generic assembler for last step performance
optimizations, socket filters and seccomp are using it as assembler. Tracing
filters may use it as assembler to generate code from kernel. In kernel usage
may not be bounded by security considerations, since generated internal BPF code
@@ -865,7 +867,7 @@ Three LSB bits store instruction class which is one of:
BPF_STX 0x03 BPF_STX 0x03
BPF_ALU 0x04 BPF_ALU 0x04
BPF_JMP 0x05 BPF_JMP 0x05
- BPF_RET 0x06 [ class 6 unused, for future if needed ]
+ BPF_RET 0x06 BPF_JMP32 0x06
BPF_MISC 0x07 BPF_ALU64 0x07
When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ...
@@ -902,9 +904,9 @@ If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:
BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */
BPF_END 0xd0 /* eBPF only: endianness conversion */
-If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
+If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of:
- BPF_JA 0x00
+ BPF_JA 0x00 /* BPF_JMP only */
BPF_JEQ 0x10
BPF_JGT 0x20
BPF_JGE 0x30
@@ -912,8 +914,8 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
BPF_JNE 0x50 /* eBPF only: jump != */
BPF_JSGT 0x60 /* eBPF only: signed '>' */
BPF_JSGE 0x70 /* eBPF only: signed '>=' */
- BPF_CALL 0x80 /* eBPF only: function call */
- BPF_EXIT 0x90 /* eBPF only: function return */
+ BPF_CALL 0x80 /* eBPF BPF_JMP only: function call */
+ BPF_EXIT 0x90 /* eBPF BPF_JMP only: function return */
BPF_JLT 0xa0 /* eBPF only: unsigned '<' */
BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */
BPF_JSLT 0xc0 /* eBPF only: signed '<' */
@@ -936,8 +938,9 @@ Classic BPF wastes the whole BPF_RET class to represent a single 'ret'
operation. Classic BPF_RET | BPF_K means copy imm32 into return register
and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT
in eBPF means function exit only. The eBPF program needs to store return
-value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently
-unused and reserved for future use.
+value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is used as
+BPF_JMP32 to mean exactly the same operations as BPF_JMP, but with 32-bit wide
+operands for the comparisons instead.
For load and store instructions the 8-bit 'code' field is divided as:
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.rst
index e74d8e1da0e2..36ca823a1122 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.rst
@@ -1,54 +1,79 @@
-
- Linux IEEE 802.15.4 implementation
-
+===============================
+IEEE 802.15.4 Developer's Guide
+===============================
Introduction
============
The IEEE 802.15.4 working group focuses on standardization of the bottom
two layers: Medium Access Control (MAC) and Physical access (PHY). And there
are mainly two options available for upper layers:
- - ZigBee - proprietary protocol from the ZigBee Alliance
- - 6LoWPAN - IPv6 networking over low rate personal area networks
+
+- ZigBee - proprietary protocol from the ZigBee Alliance
+- 6LoWPAN - IPv6 networking over low rate personal area networks
The goal of the Linux-wpan is to provide a complete implementation
of the IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
of protocols for organizing Low-Rate Wireless Personal Area Networks.
The stack is composed of three main parts:
- - IEEE 802.15.4 layer; We have chosen to use plain Berkeley socket API,
- the generic Linux networking stack to transfer IEEE 802.15.4 data
- messages and a special protocol over netlink for configuration/management
- - MAC - provides access to shared channel and reliable data delivery
- - PHY - represents device drivers
+- IEEE 802.15.4 layer; We have chosen to use plain Berkeley socket API,
+ the generic Linux networking stack to transfer IEEE 802.15.4 data
+ messages and a special protocol over netlink for configuration/management
+- MAC - provides access to shared channel and reliable data delivery
+- PHY - represents device drivers
Socket API
==========
-int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0);
-.....
+.. c:function:: int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0);
The address family, socket addresses etc. are defined in the
include/net/af_ieee802154.h header or in the special header
in the userspace package (see either http://wpan.cakelab.org/ or the
git tree at https://github.com/linux-wpan/wpan-tools).
+6LoWPAN Linux implementation
+============================
+
+The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80
+octets of actual MAC payload once security is turned on, on a wireless link
+with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format
+[RFC4944] was specified to carry IPv6 datagrams over such constrained links,
+taking into account limited bandwidth, memory, or energy resources that are
+expected in applications such as wireless Sensor Networks. [RFC4944] defines
+a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header
+to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
+compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
+relatively large IPv6 and UDP headers down to (in the best case) several bytes.
+
+In September 2011 the standard update was published - [RFC6282].
+It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
+used in this Linux implementation.
+
+All the code related to 6lowpan you may find in files: net/6lowpan/*
+and net/ieee802154/6lowpan/*
+
+To setup a 6LoWPAN interface you need:
+1. Add IEEE802.15.4 interface and set channel and PAN ID;
+2. Add 6lowpan interface by command like:
+# ip link add link wpan0 name lowpan0 type lowpan
+3. Bring up 'lowpan0' interface
-Kernel side
-=============
+Drivers
+=======
Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
1) 'HardMAC'. The MAC layer is implemented in the device itself, the device
- exports a management (e.g. MLME) and data API.
+exports a management (e.g. MLME) and data API.
2) 'SoftMAC' or just radio. These types of devices are just radio transceivers
- possibly with some kinds of acceleration like automatic CRC computation and
- comparation, automagic ACK handling, address matching, etc.
+possibly with some kinds of acceleration like automatic CRC computation and
+comparation, automagic ACK handling, address matching, etc.
Those types of devices require different approach to be hooked into Linux kernel.
-
HardMAC
-=======
+-------
See the header include/net/ieee802154_netdev.h. You have to implement Linux
net_device, with .type = ARPHRD_IEEE802154. Data is exchanged with socket family
@@ -64,9 +89,8 @@ net_device with a pointer to struct ieee802154_mlme_ops instance. The fields
assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional.
All other fields are required.
-
SoftMAC
-=======
+-------
The MAC is the middle layer in the IEEE 802.15.4 Linux stack. This moment it
provides interface for drivers registration and management of slave interfaces.
@@ -79,99 +103,78 @@ This layer is going to be extended soon.
See header include/net/mac802154.h and several drivers in
drivers/net/ieee802154/.
+Fake drivers
+------------
+
+In addition there is a driver available which simulates a real device with
+SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
+provides a possibility to test and debug the stack without usage of real hardware.
Device drivers API
==================
The include/net/mac802154.h defines following functions:
- - struct ieee802154_hw *
- ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops):
- allocation of IEEE 802.15.4 compatible hardware device
- - void ieee802154_free_hw(struct ieee802154_hw *hw):
- freeing allocated hardware device
+.. c:function:: struct ieee802154_dev *ieee802154_alloc_device (size_t priv_size, struct ieee802154_ops *ops)
- - int ieee802154_register_hw(struct ieee802154_hw *hw):
- register PHY which is the allocated hardware device, in the system
+Allocation of IEEE 802.15.4 compatible device.
- - void ieee802154_unregister_hw(struct ieee802154_hw *hw):
- freeing registered PHY
+.. c:function:: void ieee802154_free_device(struct ieee802154_dev *dev)
- - void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
- u8 lqi):
- telling 802.15.4 module there is a new received frame in the skb with
- the RF Link Quality Indicator (LQI) from the hardware device
+Freeing allocated device.
- - void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
- bool ifs_handling):
- telling 802.15.4 module the frame in the skb is or going to be
- transmitted through the hardware device
+.. c:function:: int ieee802154_register_device(struct ieee802154_dev *dev)
+
+Register PHY in the system.
+
+.. c:function:: void ieee802154_unregister_device(struct ieee802154_dev *dev)
+
+Freeing registered PHY.
+
+.. c:function:: void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi):
+
+Telling 802.15.4 module there is a new received frame in the skb with
+the RF Link Quality Indicator (LQI) from the hardware device.
+
+.. c:function:: void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, bool ifs_handling):
+
+Telling 802.15.4 module the frame in the skb is or going to be
+transmitted through the hardware device
The device driver must implement the following callbacks in the IEEE 802.15.4
-operations structure at least:
-struct ieee802154_ops {
- ...
- int (*start)(struct ieee802154_hw *hw);
- void (*stop)(struct ieee802154_hw *hw);
- ...
- int (*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb);
- int (*ed)(struct ieee802154_hw *hw, u8 *level);
- int (*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel);
- ...
-};
-
- - int start(struct ieee802154_hw *hw):
- handler that 802.15.4 module calls for the hardware device initialization.
-
- - void stop(struct ieee802154_hw *hw):
- handler that 802.15.4 module calls for the hardware device cleanup.
-
- - int xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb):
- handler that 802.15.4 module calls for each frame in the skb going to be
- transmitted through the hardware device.
-
- - int ed(struct ieee802154_hw *hw, u8 *level):
- handler that 802.15.4 module calls for Energy Detection from the hardware
- device.
-
- - int set_channel(struct ieee802154_hw *hw, u8 page, u8 channel):
- set radio for listening on specific channel of the hardware device.
+operations structure at least::
-Moreover IEEE 802.15.4 device operations structure should be filled.
+ struct ieee802154_ops {
+ ...
+ int (*start)(struct ieee802154_hw *hw);
+ void (*stop)(struct ieee802154_hw *hw);
+ ...
+ int (*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb);
+ int (*ed)(struct ieee802154_hw *hw, u8 *level);
+ int (*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel);
+ ...
+ };
-Fake drivers
-============
+.. c:function:: int start(struct ieee802154_hw *hw):
-In addition there is a driver available which simulates a real device with
-SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
-provides a possibility to test and debug the stack without usage of real hardware.
+Handler that 802.15.4 module calls for the hardware device initialization.
-See sources in drivers/net/ieee802154 folder for more details.
+.. c:function:: void stop(struct ieee802154_hw *hw):
+Handler that 802.15.4 module calls for the hardware device cleanup.
-6LoWPAN Linux implementation
-============================
+.. c:function:: int xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb):
-The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80
-octets of actual MAC payload once security is turned on, on a wireless link
-with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format
-[RFC4944] was specified to carry IPv6 datagrams over such constrained links,
-taking into account limited bandwidth, memory, or energy resources that are
-expected in applications such as wireless Sensor Networks. [RFC4944] defines
-a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header
-to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
-compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
-relatively large IPv6 and UDP headers down to (in the best case) several bytes.
+Handler that 802.15.4 module calls for each frame in the skb going to be
+transmitted through the hardware device.
-In September 2011 the standard update was published - [RFC6282].
-It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
-used in this Linux implementation.
+.. c:function:: int ed(struct ieee802154_hw *hw, u8 *level):
-All the code related to 6lowpan you may find in files: net/6lowpan/*
-and net/ieee802154/6lowpan/*
+Handler that 802.15.4 module calls for Energy Detection from the hardware
+device.
-To setup a 6LoWPAN interface you need:
-1. Add IEEE802.15.4 interface and set channel and PAN ID;
-2. Add 6lowpan interface by command like:
- # ip link add link wpan0 name lowpan0 type lowpan
-3. Bring up 'lowpan0' interface
+.. c:function:: int set_channel(struct ieee802154_hw *hw, u8 page, u8 channel):
+
+Set radio for listening on specific channel of the hardware device.
+
+Moreover IEEE 802.15.4 device operations structure should be filled.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 6a47629ef8ed..f0da1b001514 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -11,24 +11,28 @@ Contents:
batman-adv
can
can_ucan_protocol
- dpaa2/index
- e100
- e1000
- e1000e
- fm10k
- igb
- igbvf
- ixgb
- ixgbe
- ixgbevf
- i40e
- iavf
- ice
+ device_drivers/freescale/dpaa2/index
+ device_drivers/intel/e100
+ device_drivers/intel/e1000
+ device_drivers/intel/e1000e
+ device_drivers/intel/fm10k
+ device_drivers/intel/igb
+ device_drivers/intel/igbvf
+ device_drivers/intel/ixgb
+ device_drivers/intel/ixgbe
+ device_drivers/intel/ixgbevf
+ device_drivers/intel/i40e
+ device_drivers/intel/iavf
+ device_drivers/intel/ice
+ devlink-info-versions
+ ieee802154
kapi
z8530book
msg_zerocopy
failover
net_failover
+ phy
+ sfp-phylink
alias
bridge
snmp_counter
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index fe46d4867e2d..18c1415e7bfa 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -7,7 +7,7 @@ Intro
=====
The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
-The feature is currently implemented for TCP sockets.
+The feature is currently implemented for TCP and UDP sockets.
Opportunity and Caveats
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt
index 355c6d8ef8ad..b203d1334822 100644
--- a/Documentation/networking/operstates.txt
+++ b/Documentation/networking/operstates.txt
@@ -22,8 +22,9 @@ and changeable from userspace under certain rules.
2. Querying from userspace
Both admin and operational state can be queried via the netlink
-operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK
-to be notified of updates. This is important for setting from userspace.
+operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK
+to be notified of updates while the interface is admin up. This is
+important for setting from userspace.
These values contain interface state:
@@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to
complete. Corresponding functions are netif_dormant_on() to set the
flag, netif_dormant_off() to clear it and netif_dormant() to query.
-On device allocation, networking core sets the flags equivalent to
-netif_carrier_ok() and !netif_dormant().
+On device allocation, both flags __LINK_STATE_NOCARRIER and
+__LINK_STATE_DORMANT are cleared, so the effective state is equivalent
+to netif_carrier_ok() and !netif_dormant().
Whenever the driver CHANGES one of these flags, a workqueue event is
@@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the
driver. Afterwards, the userspace application can set IFLA_OPERSTATE
to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
netif_carrier_off() or netif_dormant_on(). Changes made by userspace
-are multicasted on the netlink group RTMGRP_LINK.
+are multicasted on the netlink group RTNLGRP_LINK.
So basically a 802.1X supplicant interacts with the kernel like this:
--subscribe to RTMGRP_LINK
+-subscribe to RTNLGRP_LINK
-set IFLA_LINKMODE to 1 via RTM_SETLINK
-query RTM_GETLINK once to get initial state
-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
new file mode 100644
index 000000000000..0dd90d7df5ec
--- /dev/null
+++ b/Documentation/networking/phy.rst
@@ -0,0 +1,447 @@
+=====================
+PHY Abstraction Layer
+=====================
+
+Purpose
+=======
+
+Most network devices consist of set of registers which provide an interface
+to a MAC layer, which communicates with the physical connection through a
+PHY. The PHY concerns itself with negotiating link parameters with the link
+partner on the other side of the network connection (typically, an ethernet
+cable), and provides a register interface to allow drivers to determine what
+settings were chosen, and to configure what settings are allowed.
+
+While these devices are distinct from the network devices, and conform to a
+standard layout for the registers, it has been common practice to integrate
+the PHY management code with the network driver. This has resulted in large
+amounts of redundant code. Also, on embedded systems with multiple (and
+sometimes quite different) ethernet controllers connected to the same
+management bus, it is difficult to ensure safe use of the bus.
+
+Since the PHYs are devices, and the management busses through which they are
+accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
+In doing so, it has these goals:
+
+#. Increase code-reuse
+#. Increase overall code-maintainability
+#. Speed development time for new network drivers, and for new systems
+
+Basically, this layer is meant to provide an interface to PHY devices which
+allows network driver writers to write as little code as possible, while
+still providing a full feature set.
+
+The MDIO bus
+============
+
+Most network devices are connected to a PHY by means of a management bus.
+Different devices use different busses (though some share common interfaces).
+In order to take advantage of the PAL, each bus interface needs to be
+registered as a distinct device.
+
+#. read and write functions must be implemented. Their prototypes are::
+
+ int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+ int read(struct mii_bus *bus, int mii_id, int regnum);
+
+ mii_id is the address on the bus for the PHY, and regnum is the register
+ number. These functions are guaranteed not to be called from interrupt
+ time, so it is safe for them to block, waiting for an interrupt to signal
+ the operation is complete
+
+#. A reset function is optional. This is used to return the bus to an
+ initialized state.
+
+#. A probe function is needed. This function should set up anything the bus
+ driver needs, setup the mii_bus structure, and register with the PAL using
+ mdiobus_register. Similarly, there's a remove function to undo all of
+ that (use mdiobus_unregister).
+
+#. Like any driver, the device_driver structure must be configured, and init
+ exit functions are used to register the driver.
+
+#. The bus must also be declared somewhere as a device, and registered.
+
+As an example for how one driver implemented an mdio bus driver, see
+drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file
+for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/")
+
+(RG)MII/electrical interface considerations
+===========================================
+
+The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin
+electrical signal interface using a synchronous 125Mhz clock signal and several
+data lines. Due to this design decision, a 1.5ns to 2ns delay must be added
+between the clock line (RXC or TXC) and the data lines to let the PHY (clock
+sink) have enough setup and hold times to sample the data lines correctly. The
+PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let
+the PHY driver and optionally the MAC driver, implement the required delay. The
+values of phy_interface_t must be understood from the perspective of the PHY
+device itself, leading to the following:
+
+* PHY_INTERFACE_MODE_RGMII: the PHY is not responsible for inserting any
+ internal delay by itself, it assumes that either the Ethernet MAC (if capable
+ or the PCB traces) insert the correct 1.5-2ns delay
+
+* PHY_INTERFACE_MODE_RGMII_TXID: the PHY should insert an internal delay
+ for the transmit data lines (TXD[3:0]) processed by the PHY device
+
+* PHY_INTERFACE_MODE_RGMII_RXID: the PHY should insert an internal delay
+ for the receive data lines (RXD[3:0]) processed by the PHY device
+
+* PHY_INTERFACE_MODE_RGMII_ID: the PHY should insert internal delays for
+ both transmit AND receive data lines from/to the PHY device
+
+Whenever possible, use the PHY side RGMII delay for these reasons:
+
+* PHY devices may offer sub-nanosecond granularity in how they allow a
+ receiver/transmitter side delay (e.g: 0.5, 1.0, 1.5ns) to be specified. Such
+ precision may be required to account for differences in PCB trace lengths
+
+* PHY devices are typically qualified for a large range of applications
+ (industrial, medical, automotive...), and they provide a constant and
+ reliable delay across temperature/pressure/voltage ranges
+
+* PHY device drivers in PHYLIB being reusable by nature, being able to
+ configure correctly a specified delay enables more designs with similar delay
+ requirements to be operate correctly
+
+For cases where the PHY is not capable of providing this delay, but the
+Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
+should be PHY_INTERFACE_MODE_RGMII, and the Ethernet MAC driver should be
+configured correctly in order to provide the required transmit and/or receive
+side delay from the perspective of the PHY device. Conversely, if the Ethernet
+MAC driver looks at the phy_interface_t value, for any other mode but
+PHY_INTERFACE_MODE_RGMII, it should make sure that the MAC-level delays are
+disabled.
+
+In case neither the Ethernet MAC, nor the PHY are capable of providing the
+required delays, as defined per the RGMII standard, several options may be
+available:
+
+* Some SoCs may offer a pin pad/mux/controller capable of configuring a given
+ set of pins'strength, delays, and voltage; and it may be a suitable
+ option to insert the expected 2ns RGMII delay.
+
+* Modifying the PCB design to include a fixed delay (e.g: using a specifically
+ designed serpentine), which may not require software configuration at all.
+
+Common problems with RGMII delay mismatch
+-----------------------------------------
+
+When there is a RGMII delay mismatch between the Ethernet MAC and the PHY, this
+will most likely result in the clock and data line signals to be unstable when
+the PHY or MAC take a snapshot of these signals to translate them into logical
+1 or 0 states and reconstruct the data being transmitted/received. Typical
+symptoms include:
+
+* Transmission/reception partially works, and there is frequent or occasional
+ packet loss observed
+
+* Ethernet MAC may report some or all packets ingressing with a FCS/CRC error,
+ or just discard them all
+
+* Switching to lower speeds such as 10/100Mbits/sec makes the problem go away
+ (since there is enough setup/hold time in that case)
+
+Connecting to a PHY
+===================
+
+Sometime during startup, the network driver needs to establish a connection
+between the PHY device, and the network device. At this time, the PHY's bus
+and drivers need to all have been loaded, so it is ready for the connection.
+At this point, there are several ways to connect to the PHY:
+
+#. The PAL handles everything, and only calls the network driver when
+ the link state changes, so it can react.
+
+#. The PAL handles everything except interrupts (usually because the
+ controller has the interrupt registers).
+
+#. The PAL handles everything, but checks in with the driver every second,
+ allowing the network driver to react first to any changes before the PAL
+ does.
+
+#. The PAL serves only as a library of functions, with the network device
+ manually calling functions to update status, and configure the PHY
+
+
+Letting the PHY Abstraction Layer do Everything
+===============================================
+
+If you choose option 1 (The hope is that every driver can, but to still be
+useful to drivers that can't), connecting to the PHY is simple:
+
+First, you need a function to react to changes in the link state. This
+function follows this protocol::
+
+ static void adjust_link(struct net_device *dev);
+
+Next, you need to know the device name of the PHY connected to this device.
+The name will look something like, "0:00", where the first number is the
+bus id, and the second is the PHY's address on that bus. Typically,
+the bus is responsible for making its ID unique.
+
+Now, to connect, just call this function::
+
+ phydev = phy_connect(dev, phy_name, &adjust_link, interface);
+
+*phydev* is a pointer to the phy_device structure which represents the PHY.
+If phy_connect is successful, it will return the pointer. dev, here, is the
+pointer to your net_device. Once done, this function will have started the
+PHY's software state machine, and registered for the PHY's interrupt, if it
+has one. The phydev structure will be populated with information about the
+current state, though the PHY will not yet be truly operational at this
+point.
+
+PHY-specific flags should be set in phydev->dev_flags prior to the call
+to phy_connect() such that the underlying PHY driver can check for flags
+and perform specific operations based on them.
+This is useful if the system has put hardware restrictions on
+the PHY/controller, of which the PHY needs to be aware.
+
+*interface* is a u32 which specifies the connection type used
+between the controller and the PHY. Examples are GMII, MII,
+RGMII, and SGMII. For a full list, see include/linux/phy.h
+
+Now just make sure that phydev->supported and phydev->advertising have any
+values pruned from them which don't make sense for your controller (a 10/100
+controller may be connected to a gigabit capable PHY, so you would need to
+mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
+for these bitfields. Note that you should not SET any bits, except the
+SUPPORTED_Pause and SUPPORTED_AsymPause bits (see below), or the PHY may get
+put into an unsupported state.
+
+Lastly, once the controller is ready to handle network traffic, you call
+phy_start(phydev). This tells the PAL that you are ready, and configures the
+PHY to connect to the network. If the MAC interrupt of your network driver
+also handles PHY status changes, just set phydev->irq to PHY_IGNORE_INTERRUPT
+before you call phy_start and use phy_mac_interrupt() from the network
+driver. If you don't want to use interrupts, set phydev->irq to PHY_POLL.
+phy_start() enables the PHY interrupts (if applicable) and starts the
+phylib state machine.
+
+When you want to disconnect from the network (even if just briefly), you call
+phy_stop(phydev). This function also stops the phylib state machine and
+disables PHY interrupts.
+
+Pause frames / flow control
+===========================
+
+The PHY does not participate directly in flow control/pause frames except by
+making sure that the SUPPORTED_Pause and SUPPORTED_AsymPause bits are set in
+MII_ADVERTISE to indicate towards the link partner that the Ethernet MAC
+controller supports such a thing. Since flow control/pause frames generation
+involves the Ethernet MAC driver, it is recommended that this driver takes care
+of properly indicating advertisement and support for such features by setting
+the SUPPORTED_Pause and SUPPORTED_AsymPause bits accordingly. This can be done
+either before or after phy_connect() and/or as a result of implementing the
+ethtool::set_pauseparam feature.
+
+
+Keeping Close Tabs on the PAL
+=============================
+
+It is possible that the PAL's built-in state machine needs a little help to
+keep your network device and the PHY properly in sync. If so, you can
+register a helper function when connecting to the PHY, which will be called
+every second before the state machine reacts to any changes. To do this, you
+need to manually call phy_attach() and phy_prepare_link(), and then call
+phy_start_machine() with the second argument set to point to your special
+handler.
+
+Currently there are no examples of how to use this functionality, and testing
+on it has been limited because the author does not have any drivers which use
+it (they all use option 1). So Caveat Emptor.
+
+Doing it all yourself
+=====================
+
+There's a remote chance that the PAL's built-in state machine cannot track
+the complex interactions between the PHY and your network device. If this is
+so, you can simply call phy_attach(), and not call phy_start_machine or
+phy_prepare_link(). This will mean that phydev->state is entirely yours to
+handle (phy_start and phy_stop toggle between some of the states, so you
+might need to avoid them).
+
+An effort has been made to make sure that useful functionality can be
+accessed without the state-machine running, and most of these functions are
+descended from functions which did not interact with a complex state-machine.
+However, again, no effort has been made so far to test running without the
+state machine, so tryer beware.
+
+Here is a brief rundown of the functions::
+
+ int phy_read(struct phy_device *phydev, u16 regnum);
+ int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
+
+Simple read/write primitives. They invoke the bus's read/write function
+pointers.
+::
+
+ void phy_print_status(struct phy_device *phydev);
+
+A convenience function to print out the PHY status neatly.
+::
+
+ void phy_request_interrupt(struct phy_device *phydev);
+
+Requests the IRQ for the PHY interrupts.
+::
+
+ struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
+ phy_interface_t interface);
+
+Attaches a network device to a particular PHY, binding the PHY to a generic
+driver if none was found during bus initialization.
+::
+
+ int phy_start_aneg(struct phy_device *phydev);
+
+Using variables inside the phydev structure, either configures advertising
+and resets autonegotiation, or disables autonegotiation, and configures
+forced settings.
+::
+
+ static inline int phy_read_status(struct phy_device *phydev);
+
+Fills the phydev structure with up-to-date information about the current
+settings in the PHY.
+::
+
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+
+Ethtool convenience functions.
+::
+
+ int phy_mii_ioctl(struct phy_device *phydev,
+ struct mii_ioctl_data *mii_data, int cmd);
+
+The MII ioctl. Note that this function will completely screw up the state
+machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
+use this only to write registers which are not standard, and don't set off
+a renegotiation.
+
+PHY Device Drivers
+==================
+
+With the PHY Abstraction Layer, adding support for new PHYs is
+quite easy. In some cases, no work is required at all! However,
+many PHYs require a little hand-holding to get up-and-running.
+
+Generic PHY driver
+------------------
+
+If the desired PHY doesn't have any errata, quirks, or special
+features you want to support, then it may be best to not add
+support, and let the PHY Abstraction Layer's Generic PHY Driver
+do all of the work.
+
+Writing a PHY driver
+--------------------
+
+If you do need to write a PHY driver, the first thing to do is
+make sure it can be matched with an appropriate PHY device.
+This is done during bus initialization by reading the device's
+UID (stored in registers 2 and 3), then comparing it to each
+driver's phy_id field by ANDing it with each driver's
+phy_id_mask field. Also, it needs a name. Here's an example::
+
+ static struct phy_driver dm9161_driver = {
+ .phy_id = 0x0181b880,
+ .name = "Davicom DM9161E",
+ .phy_id_mask = 0x0ffffff0,
+ ...
+ }
+
+Next, you need to specify what features (speed, duplex, autoneg,
+etc) your PHY device and driver support. Most PHYs support
+PHY_BASIC_FEATURES, but you can look in include/mii.h for other
+features.
+
+Each driver consists of a number of function pointers, documented
+in include/linux/phy.h under the phy_driver structure.
+
+Of these, only config_aneg and read_status are required to be
+assigned by the driver code. The rest are optional. Also, it is
+preferred to use the generic phy driver's versions of these two
+functions if at all possible: genphy_read_status and
+genphy_config_aneg. If this is not possible, it is likely that
+you only need to perform some actions before and after invoking
+these functions, and so your functions will wrap the generic
+ones.
+
+Feel free to look at the Marvell, Cicada, and Davicom drivers in
+drivers/net/phy/ for examples (the lxt and qsemi drivers have
+not been tested as of this writing).
+
+The PHY's MMD register accesses are handled by the PAL framework
+by default, but can be overridden by a specific PHY driver if
+required. This could be the case if a PHY was released for
+manufacturing before the MMD PHY register definitions were
+standardized by the IEEE. Most modern PHYs will be able to use
+the generic PAL framework for accessing the PHY's MMD registers.
+An example of such usage is for Energy Efficient Ethernet support,
+implemented in the PAL. This support uses the PAL to access MMD
+registers for EEE query and configuration if the PHY supports
+the IEEE standard access mechanisms, or can use the PHY's specific
+access interfaces if overridden by the specific PHY driver. See
+the Micrel driver in drivers/net/phy/ for an example of how this
+can be implemented.
+
+Board Fixups
+============
+
+Sometimes the specific interaction between the platform and the PHY requires
+special handling. For instance, to change where the PHY's clock input is,
+or to add a delay to account for latency issues in the data path. In order
+to support such contingencies, the PHY Layer allows platform code to register
+fixups to be run when the PHY is brought up (or subsequently reset).
+
+When the PHY Layer brings up a PHY it checks to see if there are any fixups
+registered for it, matching based on UID (contained in the PHY device's phy_id
+field) and the bus identifier (contained in phydev->dev.bus_id). Both must
+match, however two constants, PHY_ANY_ID and PHY_ANY_UID, are provided as
+wildcards for the bus ID and UID, respectively.
+
+When a match is found, the PHY layer will invoke the run function associated
+with the fixup. This function is passed a pointer to the phy_device of
+interest. It should therefore only operate on that PHY.
+
+The platform code can either register the fixup using phy_register_fixup()::
+
+ int phy_register_fixup(const char *phy_id,
+ u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+
+Or using one of the two stubs, phy_register_fixup_for_uid() and
+phy_register_fixup_for_id()::
+
+ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+ int phy_register_fixup_for_id(const char *phy_id,
+ int (*run)(struct phy_device *));
+
+The stubs set one of the two matching criteria, and set the other one to
+match anything.
+
+When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module,
+unregister fixup and free allocate memory are required.
+
+Call one of following function before unloading module::
+
+ int phy_unregister_fixup(const char *phy_id, u32 phy_uid, u32 phy_uid_mask);
+ int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+ int phy_register_fixup_for_id(const char *phy_id);
+
+Standards
+=========
+
+IEEE Standard 802.3: CSMA/CD Access Method and Physical Layer Specifications, Section Two:
+http://standards.ieee.org/getieee802/download/802.3-2008_section2.pdf
+
+RGMII v1.3:
+http://web.archive.org/web/20160303212629/http://www.hp.com/rnd/pdfs/RGMIIv1_3.pdf
+
+RGMII v2.0:
+http://web.archive.org/web/20160303171328/http://www.hp.com/rnd/pdfs/RGMIIv2_0_final_hp.pdf
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
deleted file mode 100644
index bdec0f700bc1..000000000000
--- a/Documentation/networking/phy.txt
+++ /dev/null
@@ -1,427 +0,0 @@
-
--------
-PHY Abstraction Layer
-(Updated 2008-04-08)
-
-Purpose
-
- Most network devices consist of set of registers which provide an interface
- to a MAC layer, which communicates with the physical connection through a
- PHY. The PHY concerns itself with negotiating link parameters with the link
- partner on the other side of the network connection (typically, an ethernet
- cable), and provides a register interface to allow drivers to determine what
- settings were chosen, and to configure what settings are allowed.
-
- While these devices are distinct from the network devices, and conform to a
- standard layout for the registers, it has been common practice to integrate
- the PHY management code with the network driver. This has resulted in large
- amounts of redundant code. Also, on embedded systems with multiple (and
- sometimes quite different) ethernet controllers connected to the same
- management bus, it is difficult to ensure safe use of the bus.
-
- Since the PHYs are devices, and the management busses through which they are
- accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
- In doing so, it has these goals:
-
- 1) Increase code-reuse
- 2) Increase overall code-maintainability
- 3) Speed development time for new network drivers, and for new systems
-
- Basically, this layer is meant to provide an interface to PHY devices which
- allows network driver writers to write as little code as possible, while
- still providing a full feature set.
-
-The MDIO bus
-
- Most network devices are connected to a PHY by means of a management bus.
- Different devices use different busses (though some share common interfaces).
- In order to take advantage of the PAL, each bus interface needs to be
- registered as a distinct device.
-
- 1) read and write functions must be implemented. Their prototypes are:
-
- int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
- int read(struct mii_bus *bus, int mii_id, int regnum);
-
- mii_id is the address on the bus for the PHY, and regnum is the register
- number. These functions are guaranteed not to be called from interrupt
- time, so it is safe for them to block, waiting for an interrupt to signal
- the operation is complete
-
- 2) A reset function is optional. This is used to return the bus to an
- initialized state.
-
- 3) A probe function is needed. This function should set up anything the bus
- driver needs, setup the mii_bus structure, and register with the PAL using
- mdiobus_register. Similarly, there's a remove function to undo all of
- that (use mdiobus_unregister).
-
- 4) Like any driver, the device_driver structure must be configured, and init
- exit functions are used to register the driver.
-
- 5) The bus must also be declared somewhere as a device, and registered.
-
- As an example for how one driver implemented an mdio bus driver, see
- drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file
- for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/")
-
-(RG)MII/electrical interface considerations
-
- The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin
- electrical signal interface using a synchronous 125Mhz clock signal and several
- data lines. Due to this design decision, a 1.5ns to 2ns delay must be added
- between the clock line (RXC or TXC) and the data lines to let the PHY (clock
- sink) have enough setup and hold times to sample the data lines correctly. The
- PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let
- the PHY driver and optionally the MAC driver, implement the required delay. The
- values of phy_interface_t must be understood from the perspective of the PHY
- device itself, leading to the following:
-
- * PHY_INTERFACE_MODE_RGMII: the PHY is not responsible for inserting any
- internal delay by itself, it assumes that either the Ethernet MAC (if capable
- or the PCB traces) insert the correct 1.5-2ns delay
-
- * PHY_INTERFACE_MODE_RGMII_TXID: the PHY should insert an internal delay
- for the transmit data lines (TXD[3:0]) processed by the PHY device
-
- * PHY_INTERFACE_MODE_RGMII_RXID: the PHY should insert an internal delay
- for the receive data lines (RXD[3:0]) processed by the PHY device
-
- * PHY_INTERFACE_MODE_RGMII_ID: the PHY should insert internal delays for
- both transmit AND receive data lines from/to the PHY device
-
- Whenever possible, use the PHY side RGMII delay for these reasons:
-
- * PHY devices may offer sub-nanosecond granularity in how they allow a
- receiver/transmitter side delay (e.g: 0.5, 1.0, 1.5ns) to be specified. Such
- precision may be required to account for differences in PCB trace lengths
-
- * PHY devices are typically qualified for a large range of applications
- (industrial, medical, automotive...), and they provide a constant and
- reliable delay across temperature/pressure/voltage ranges
-
- * PHY device drivers in PHYLIB being reusable by nature, being able to
- configure correctly a specified delay enables more designs with similar delay
- requirements to be operate correctly
-
- For cases where the PHY is not capable of providing this delay, but the
- Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
- should be PHY_INTERFACE_MODE_RGMII, and the Ethernet MAC driver should be
- configured correctly in order to provide the required transmit and/or receive
- side delay from the perspective of the PHY device. Conversely, if the Ethernet
- MAC driver looks at the phy_interface_t value, for any other mode but
- PHY_INTERFACE_MODE_RGMII, it should make sure that the MAC-level delays are
- disabled.
-
- In case neither the Ethernet MAC, nor the PHY are capable of providing the
- required delays, as defined per the RGMII standard, several options may be
- available:
-
- * Some SoCs may offer a pin pad/mux/controller capable of configuring a given
- set of pins'strength, delays, and voltage; and it may be a suitable
- option to insert the expected 2ns RGMII delay.
-
- * Modifying the PCB design to include a fixed delay (e.g: using a specifically
- designed serpentine), which may not require software configuration at all.
-
-Common problems with RGMII delay mismatch
-
- When there is a RGMII delay mismatch between the Ethernet MAC and the PHY, this
- will most likely result in the clock and data line signals to be unstable when
- the PHY or MAC take a snapshot of these signals to translate them into logical
- 1 or 0 states and reconstruct the data being transmitted/received. Typical
- symptoms include:
-
- * Transmission/reception partially works, and there is frequent or occasional
- packet loss observed
-
- * Ethernet MAC may report some or all packets ingressing with a FCS/CRC error,
- or just discard them all
-
- * Switching to lower speeds such as 10/100Mbits/sec makes the problem go away
- (since there is enough setup/hold time in that case)
-
-
-Connecting to a PHY
-
- Sometime during startup, the network driver needs to establish a connection
- between the PHY device, and the network device. At this time, the PHY's bus
- and drivers need to all have been loaded, so it is ready for the connection.
- At this point, there are several ways to connect to the PHY:
-
- 1) The PAL handles everything, and only calls the network driver when
- the link state changes, so it can react.
-
- 2) The PAL handles everything except interrupts (usually because the
- controller has the interrupt registers).
-
- 3) The PAL handles everything, but checks in with the driver every second,
- allowing the network driver to react first to any changes before the PAL
- does.
-
- 4) The PAL serves only as a library of functions, with the network device
- manually calling functions to update status, and configure the PHY
-
-
-Letting the PHY Abstraction Layer do Everything
-
- If you choose option 1 (The hope is that every driver can, but to still be
- useful to drivers that can't), connecting to the PHY is simple:
-
- First, you need a function to react to changes in the link state. This
- function follows this protocol:
-
- static void adjust_link(struct net_device *dev);
-
- Next, you need to know the device name of the PHY connected to this device.
- The name will look something like, "0:00", where the first number is the
- bus id, and the second is the PHY's address on that bus. Typically,
- the bus is responsible for making its ID unique.
-
- Now, to connect, just call this function:
-
- phydev = phy_connect(dev, phy_name, &adjust_link, interface);
-
- phydev is a pointer to the phy_device structure which represents the PHY. If
- phy_connect is successful, it will return the pointer. dev, here, is the
- pointer to your net_device. Once done, this function will have started the
- PHY's software state machine, and registered for the PHY's interrupt, if it
- has one. The phydev structure will be populated with information about the
- current state, though the PHY will not yet be truly operational at this
- point.
-
- PHY-specific flags should be set in phydev->dev_flags prior to the call
- to phy_connect() such that the underlying PHY driver can check for flags
- and perform specific operations based on them.
- This is useful if the system has put hardware restrictions on
- the PHY/controller, of which the PHY needs to be aware.
-
- interface is a u32 which specifies the connection type used
- between the controller and the PHY. Examples are GMII, MII,
- RGMII, and SGMII. For a full list, see include/linux/phy.h
-
- Now just make sure that phydev->supported and phydev->advertising have any
- values pruned from them which don't make sense for your controller (a 10/100
- controller may be connected to a gigabit capable PHY, so you would need to
- mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
- for these bitfields. Note that you should not SET any bits, except the
- SUPPORTED_Pause and SUPPORTED_AsymPause bits (see below), or the PHY may get
- put into an unsupported state.
-
- Lastly, once the controller is ready to handle network traffic, you call
- phy_start(phydev). This tells the PAL that you are ready, and configures the
- PHY to connect to the network. If you want to handle your own interrupts,
- just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start.
- Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL.
-
- When you want to disconnect from the network (even if just briefly), you call
- phy_stop(phydev).
-
-Pause frames / flow control
-
- The PHY does not participate directly in flow control/pause frames except by
- making sure that the SUPPORTED_Pause and SUPPORTED_AsymPause bits are set in
- MII_ADVERTISE to indicate towards the link partner that the Ethernet MAC
- controller supports such a thing. Since flow control/pause frames generation
- involves the Ethernet MAC driver, it is recommended that this driver takes care
- of properly indicating advertisement and support for such features by setting
- the SUPPORTED_Pause and SUPPORTED_AsymPause bits accordingly. This can be done
- either before or after phy_connect() and/or as a result of implementing the
- ethtool::set_pauseparam feature.
-
-
-Keeping Close Tabs on the PAL
-
- It is possible that the PAL's built-in state machine needs a little help to
- keep your network device and the PHY properly in sync. If so, you can
- register a helper function when connecting to the PHY, which will be called
- every second before the state machine reacts to any changes. To do this, you
- need to manually call phy_attach() and phy_prepare_link(), and then call
- phy_start_machine() with the second argument set to point to your special
- handler.
-
- Currently there are no examples of how to use this functionality, and testing
- on it has been limited because the author does not have any drivers which use
- it (they all use option 1). So Caveat Emptor.
-
-Doing it all yourself
-
- There's a remote chance that the PAL's built-in state machine cannot track
- the complex interactions between the PHY and your network device. If this is
- so, you can simply call phy_attach(), and not call phy_start_machine or
- phy_prepare_link(). This will mean that phydev->state is entirely yours to
- handle (phy_start and phy_stop toggle between some of the states, so you
- might need to avoid them).
-
- An effort has been made to make sure that useful functionality can be
- accessed without the state-machine running, and most of these functions are
- descended from functions which did not interact with a complex state-machine.
- However, again, no effort has been made so far to test running without the
- state machine, so tryer beware.
-
- Here is a brief rundown of the functions:
-
- int phy_read(struct phy_device *phydev, u16 regnum);
- int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
-
- Simple read/write primitives. They invoke the bus's read/write function
- pointers.
-
- void phy_print_status(struct phy_device *phydev);
-
- A convenience function to print out the PHY status neatly.
-
- int phy_start_interrupts(struct phy_device *phydev);
- int phy_stop_interrupts(struct phy_device *phydev);
-
- Requests the IRQ for the PHY interrupts, then enables them for
- start, or disables then frees them for stop.
-
- struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
- phy_interface_t interface);
-
- Attaches a network device to a particular PHY, binding the PHY to a generic
- driver if none was found during bus initialization.
-
- int phy_start_aneg(struct phy_device *phydev);
-
- Using variables inside the phydev structure, either configures advertising
- and resets autonegotiation, or disables autonegotiation, and configures
- forced settings.
-
- static inline int phy_read_status(struct phy_device *phydev);
-
- Fills the phydev structure with up-to-date information about the current
- settings in the PHY.
-
- int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
-
- Ethtool convenience functions.
-
- int phy_mii_ioctl(struct phy_device *phydev,
- struct mii_ioctl_data *mii_data, int cmd);
-
- The MII ioctl. Note that this function will completely screw up the state
- machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
- use this only to write registers which are not standard, and don't set off
- a renegotiation.
-
-
-PHY Device Drivers
-
- With the PHY Abstraction Layer, adding support for new PHYs is
- quite easy. In some cases, no work is required at all! However,
- many PHYs require a little hand-holding to get up-and-running.
-
-Generic PHY driver
-
- If the desired PHY doesn't have any errata, quirks, or special
- features you want to support, then it may be best to not add
- support, and let the PHY Abstraction Layer's Generic PHY Driver
- do all of the work.
-
-Writing a PHY driver
-
- If you do need to write a PHY driver, the first thing to do is
- make sure it can be matched with an appropriate PHY device.
- This is done during bus initialization by reading the device's
- UID (stored in registers 2 and 3), then comparing it to each
- driver's phy_id field by ANDing it with each driver's
- phy_id_mask field. Also, it needs a name. Here's an example:
-
- static struct phy_driver dm9161_driver = {
- .phy_id = 0x0181b880,
- .name = "Davicom DM9161E",
- .phy_id_mask = 0x0ffffff0,
- ...
- }
-
- Next, you need to specify what features (speed, duplex, autoneg,
- etc) your PHY device and driver support. Most PHYs support
- PHY_BASIC_FEATURES, but you can look in include/mii.h for other
- features.
-
- Each driver consists of a number of function pointers, documented
- in include/linux/phy.h under the phy_driver structure.
-
- Of these, only config_aneg and read_status are required to be
- assigned by the driver code. The rest are optional. Also, it is
- preferred to use the generic phy driver's versions of these two
- functions if at all possible: genphy_read_status and
- genphy_config_aneg. If this is not possible, it is likely that
- you only need to perform some actions before and after invoking
- these functions, and so your functions will wrap the generic
- ones.
-
- Feel free to look at the Marvell, Cicada, and Davicom drivers in
- drivers/net/phy/ for examples (the lxt and qsemi drivers have
- not been tested as of this writing).
-
- The PHY's MMD register accesses are handled by the PAL framework
- by default, but can be overridden by a specific PHY driver if
- required. This could be the case if a PHY was released for
- manufacturing before the MMD PHY register definitions were
- standardized by the IEEE. Most modern PHYs will be able to use
- the generic PAL framework for accessing the PHY's MMD registers.
- An example of such usage is for Energy Efficient Ethernet support,
- implemented in the PAL. This support uses the PAL to access MMD
- registers for EEE query and configuration if the PHY supports
- the IEEE standard access mechanisms, or can use the PHY's specific
- access interfaces if overridden by the specific PHY driver. See
- the Micrel driver in drivers/net/phy/ for an example of how this
- can be implemented.
-
-Board Fixups
-
- Sometimes the specific interaction between the platform and the PHY requires
- special handling. For instance, to change where the PHY's clock input is,
- or to add a delay to account for latency issues in the data path. In order
- to support such contingencies, the PHY Layer allows platform code to register
- fixups to be run when the PHY is brought up (or subsequently reset).
-
- When the PHY Layer brings up a PHY it checks to see if there are any fixups
- registered for it, matching based on UID (contained in the PHY device's phy_id
- field) and the bus identifier (contained in phydev->dev.bus_id). Both must
- match, however two constants, PHY_ANY_ID and PHY_ANY_UID, are provided as
- wildcards for the bus ID and UID, respectively.
-
- When a match is found, the PHY layer will invoke the run function associated
- with the fixup. This function is passed a pointer to the phy_device of
- interest. It should therefore only operate on that PHY.
-
- The platform code can either register the fixup using phy_register_fixup():
-
- int phy_register_fixup(const char *phy_id,
- u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
-
- Or using one of the two stubs, phy_register_fixup_for_uid() and
- phy_register_fixup_for_id():
-
- int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
- int phy_register_fixup_for_id(const char *phy_id,
- int (*run)(struct phy_device *));
-
- The stubs set one of the two matching criteria, and set the other one to
- match anything.
-
- When phy_register_fixup() or *_for_uid()/*_for_id() is called at module,
- unregister fixup and free allocate memory are required.
-
- Call one of following function before unloading module.
-
- int phy_unregister_fixup(const char *phy_id, u32 phy_uid, u32 phy_uid_mask);
- int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
- int phy_register_fixup_for_id(const char *phy_id);
-
-Standards
-
- IEEE Standard 802.3: CSMA/CD Access Method and Physical Layer Specifications, Section Two:
- http://standards.ieee.org/getieee802/download/802.3-2008_section2.pdf
-
- RGMII v1.3:
- http://web.archive.org/web/20160303212629/http://www.hp.com/rnd/pdfs/RGMIIv1_3.pdf
-
- RGMII v2.0:
- http://web.archive.org/web/20160303171328/http://www.hp.com/rnd/pdfs/RGMIIv2_0_final_hp.pdf
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index c9d052e0cf51..2df5894353d6 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1000,51 +1000,6 @@ The kernel interface functions are as follows:
size should be set when the call is begun. tx_total_len may not be less
than zero.
- (*) Check to see the completion state of a call so that the caller can assess
- whether it needs to be retried.
-
- enum rxrpc_call_completion {
- RXRPC_CALL_SUCCEEDED,
- RXRPC_CALL_REMOTELY_ABORTED,
- RXRPC_CALL_LOCALLY_ABORTED,
- RXRPC_CALL_LOCAL_ERROR,
- RXRPC_CALL_NETWORK_ERROR,
- };
-
- int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
- enum rxrpc_call_completion *_compl,
- u32 *_abort_code);
-
- On return, -EINPROGRESS will be returned if the call is still ongoing; if
- it is finished, *_compl will be set to indicate the manner of completion,
- *_abort_code will be set to any abort code that occurred. 0 will be
- returned on a successful completion, -ECONNABORTED will be returned if the
- client failed due to a remote abort and anything else will return an
- appropriate error code.
-
- The caller should look at this information to decide if it's worth
- retrying the call.
-
- (*) Retry a client call.
-
- int rxrpc_kernel_retry_call(struct socket *sock,
- struct rxrpc_call *call,
- struct sockaddr_rxrpc *srx,
- struct key *key);
-
- This attempts to partially reinitialise a call and submit it again while
- reusing the original call's Tx queue to avoid the need to repackage and
- re-encrypt the data to be sent. call indicates the call to retry, srx the
- new address to send it to and key the encryption key to use for signing or
- encrypting the packets.
-
- For this to work, the first Tx data packet must still be in the transmit
- queue, and currently this is only permitted for local and network errors
- and the call must not have been aborted. Any partially constructed Tx
- packet is left as is and can continue being filled afterwards.
-
- It returns 0 if the call was requeued and an error otherwise.
-
(*) Get call RTT.
u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);
diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst
new file mode 100644
index 000000000000..5bd26cb07244
--- /dev/null
+++ b/Documentation/networking/sfp-phylink.rst
@@ -0,0 +1,268 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======
+phylink
+=======
+
+Overview
+========
+
+phylink is a mechanism to support hot-pluggable networking modules
+without needing to re-initialise the adapter on hot-plug events.
+
+phylink supports conventional phylib-based setups, fixed link setups
+and SFP (Small Formfactor Pluggable) modules at present.
+
+Modes of operation
+==================
+
+phylink has several modes of operation, which depend on the firmware
+settings.
+
+1. PHY mode
+
+ In PHY mode, we use phylib to read the current link settings from
+ the PHY, and pass them to the MAC driver. We expect the MAC driver
+ to configure exactly the modes that are specified without any
+ negotiation being enabled on the link.
+
+2. Fixed mode
+
+ Fixed mode is the same as PHY mode as far as the MAC driver is
+ concerned.
+
+3. In-band mode
+
+ In-band mode is used with 802.3z, SGMII and similar interface modes,
+ and we are expecting to use and honor the in-band negotiation or
+ control word sent across the serdes channel.
+
+By example, what this means is that:
+
+.. code-block:: none
+
+ &eth {
+ phy = <&phy>;
+ phy-mode = "sgmii";
+ };
+
+does not use in-band SGMII signalling. The PHY is expected to follow
+exactly the settings given to it in its :c:func:`mac_config` function.
+The link should be forced up or down appropriately in the
+:c:func:`mac_link_up` and :c:func:`mac_link_down` functions.
+
+.. code-block:: none
+
+ &eth {
+ managed = "in-band-status";
+ phy = <&phy>;
+ phy-mode = "sgmii";
+ };
+
+uses in-band mode, where results from the PHY's negotiation are passed
+to the MAC through the SGMII control word, and the MAC is expected to
+acknowledge the control word. The :c:func:`mac_link_up` and
+:c:func:`mac_link_down` functions must not force the MAC side link
+up and down.
+
+Rough guide to converting a network driver to sfp/phylink
+=========================================================
+
+This guide briefly describes how to convert a network driver from
+phylib to the sfp/phylink support. Please send patches to improve
+this documentation.
+
+1. Optionally split the network driver's phylib update function into
+ three parts dealing with link-down, link-up and reconfiguring the
+ MAC settings. This can be done as a separate preparation commit.
+
+ An example of this preparation can be found in git commit fc548b991fb0.
+
+2. Replace::
+
+ select FIXED_PHY
+ select PHYLIB
+
+ with::
+
+ select PHYLINK
+
+ in the driver's Kconfig stanza.
+
+3. Add::
+
+ #include <linux/phylink.h>
+
+ to the driver's list of header files.
+
+4. Add::
+
+ struct phylink *phylink;
+
+ to the driver's private data structure. We shall refer to the
+ driver's private data pointer as ``priv`` below, and the driver's
+ private data structure as ``struct foo_priv``.
+
+5. Replace the following functions:
+
+ .. flat-table::
+ :header-rows: 1
+ :widths: 1 1
+ :stub-columns: 0
+
+ * - Original function
+ - Replacement function
+ * - phy_start(phydev)
+ - phylink_start(priv->phylink)
+ * - phy_stop(phydev)
+ - phylink_stop(priv->phylink)
+ * - phy_mii_ioctl(phydev, ifr, cmd)
+ - phylink_mii_ioctl(priv->phylink, ifr, cmd)
+ * - phy_ethtool_get_wol(phydev, wol)
+ - phylink_ethtool_get_wol(priv->phylink, wol)
+ * - phy_ethtool_set_wol(phydev, wol)
+ - phylink_ethtool_set_wol(priv->phylink, wol)
+ * - phy_disconnect(phydev)
+ - phylink_disconnect_phy(priv->phylink)
+
+ Please note that some of these functions must be called under the
+ rtnl lock, and will warn if not. This will normally be the case,
+ except if these are called from the driver suspend/resume paths.
+
+6. Add/replace ksettings get/set methods with:
+
+ .. code-block:: c
+
+ static int foo_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+ {
+ struct foo_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+ }
+
+ static int foo_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+ {
+ struct foo_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+ }
+
+7. Replace the call to:
+
+ phy_dev = of_phy_connect(dev, node, link_func, flags, phy_interface);
+
+ and associated code with a call to:
+
+ err = phylink_of_phy_connect(priv->phylink, node, flags);
+
+ For the most part, ``flags`` can be zero; these flags are passed to
+ the of_phy_attach() inside this function call if a PHY is specified
+ in the DT node ``node``.
+
+ ``node`` should be the DT node which contains the network phy property,
+ fixed link properties, and will also contain the sfp property.
+
+ The setup of fixed links should also be removed; these are handled
+ internally by phylink.
+
+ of_phy_connect() was also passed a function pointer for link updates.
+ This function is replaced by a different form of MAC updates
+ described below in (8).
+
+ Manipulation of the PHY's supported/advertised happens within phylink
+ based on the validate callback, see below in (8).
+
+ Note that the driver no longer needs to store the ``phy_interface``,
+ and also note that ``phy_interface`` becomes a dynamic property,
+ just like the speed, duplex etc. settings.
+
+ Finally, note that the MAC driver has no direct access to the PHY
+ anymore; that is because in the phylink model, the PHY can be
+ dynamic.
+
+8. Add a :c:type:`struct phylink_mac_ops <phylink_mac_ops>` instance to
+ the driver, which is a table of function pointers, and implement
+ these functions. The old link update function for
+ :c:func:`of_phy_connect` becomes three methods: :c:func:`mac_link_up`,
+ :c:func:`mac_link_down`, and :c:func:`mac_config`. If step 1 was
+ performed, then the functionality will have been split there.
+
+ It is important that if in-band negotiation is used,
+ :c:func:`mac_link_up` and :c:func:`mac_link_down` do not prevent the
+ in-band negotiation from completing, since these functions are called
+ when the in-band link state changes - otherwise the link will never
+ come up.
+
+ The :c:func:`validate` method should mask the supplied supported mask,
+ and ``state->advertising`` with the supported ethtool link modes.
+ These are the new ethtool link modes, so bitmask operations must be
+ used. For an example, see drivers/net/ethernet/marvell/mvneta.c.
+
+ The :c:func:`mac_link_state` method is used to read the link state
+ from the MAC, and report back the settings that the MAC is currently
+ using. This is particularly important for in-band negotiation
+ methods such as 1000base-X and SGMII.
+
+ The :c:func:`mac_config` method is used to update the MAC with the
+ requested state, and must avoid unnecessarily taking the link down
+ when making changes to the MAC configuration. This means the
+ function should modify the state and only take the link down when
+ absolutely necessary to change the MAC configuration. An example
+ of how to do this can be found in :c:func:`mvneta_mac_config` in
+ drivers/net/ethernet/marvell/mvneta.c.
+
+ For further information on these methods, please see the inline
+ documentation in :c:type:`struct phylink_mac_ops <phylink_mac_ops>`.
+
+9. Remove calls to of_parse_phandle() for the PHY,
+ of_phy_register_fixed_link() for fixed links etc. from the probe
+ function, and replace with:
+
+ .. code-block:: c
+
+ struct phylink *phylink;
+
+ phylink = phylink_create(dev, node, phy_mode, &phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ fail probe;
+ }
+
+ priv->phylink = phylink;
+
+ and arrange to destroy the phylink in the probe failure path as
+ appropriate and the removal path too by calling:
+
+ .. code-block:: c
+
+ phylink_destroy(priv->phylink);
+
+10. Arrange for MAC link state interrupts to be forwarded into
+ phylink, via:
+
+ .. code-block:: c
+
+ phylink_mac_change(priv->phylink, link_is_up);
+
+ where ``link_is_up`` is true if the link is currently up or false
+ otherwise.
+
+11. Verify that the driver does not call::
+
+ netif_carrier_on()
+ netif_carrier_off()
+
+ as these will interfere with phylink's tracking of the link state,
+ and cause phylink to omit calls via the :c:func:`mac_link_up` and
+ :c:func:`mac_link_down` methods.
+
+Network drivers should call phylink_stop() and phylink_start() via their
+suspend/resume paths, which ensures that the appropriate
+:c:type:`struct phylink_mac_ops <phylink_mac_ops>` methods are called
+as necessary.
+
+For information describing the SFP cage in DT, please see the binding
+documentation in the kernel source tree
+``Documentation/devicetree/bindings/net/sff,sfp.txt``
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index b0dfdaaca512..52b026be028f 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -1,16 +1,17 @@
-===========
+============
SNMP counter
-===========
+============
This document explains the meaning of SNMP counters.
General IPv4 counters
-====================
+=====================
All layer 4 packets and ICMP packets will change these counters, but
these counters won't be changed by layer 2 packets (such as STP) or
ARP packets.
* IpInReceives
+
Defined in `RFC1213 ipInReceives`_
.. _RFC1213 ipInReceives: https://tools.ietf.org/html/rfc1213#page-26
@@ -23,6 +24,7 @@ and so on). It indicates the number of aggregated segments after
GRO/LRO.
* IpInDelivers
+
Defined in `RFC1213 ipInDelivers`_
.. _RFC1213 ipInDelivers: https://tools.ietf.org/html/rfc1213#page-28
@@ -33,6 +35,7 @@ supported protocols will be delivered, if someone listens on the raw
socket, all valid IP packets will be delivered.
* IpOutRequests
+
Defined in `RFC1213 ipOutRequests`_
.. _RFC1213 ipOutRequests: https://tools.ietf.org/html/rfc1213#page-28
@@ -42,6 +45,7 @@ multicast packets, and would always be updated together with
IpExtOutOctets.
* IpExtInOctets and IpExtOutOctets
+
They are Linux kernel extensions, no RFC definitions. Please note,
RFC1213 indeed defines ifInOctets and ifOutOctets, but they
are different things. The ifInOctets and ifOutOctets include the MAC
@@ -49,6 +53,7 @@ layer header size but IpExtInOctets and IpExtOutOctets don't, they
only include the IP layer header and the IP layer data.
* IpExtInNoECTPkts, IpExtInECT1Pkts, IpExtInECT0Pkts, IpExtInCEPkts
+
They indicate the number of four kinds of ECN IP packets, please refer
`Explicit Congestion Notification`_ for more details.
@@ -60,6 +65,7 @@ for the same packet, you might find that IpInReceives count 1, but
IpExtInNoECTPkts counts 2 or more.
* IpInHdrErrors
+
Defined in `RFC1213 ipInHdrErrors`_. It indicates the packet is
dropped due to the IP header error. It might happen in both IP input
and IP forward paths.
@@ -67,6 +73,7 @@ and IP forward paths.
.. _RFC1213 ipInHdrErrors: https://tools.ietf.org/html/rfc1213#page-27
* IpInAddrErrors
+
Defined in `RFC1213 ipInAddrErrors`_. It will be increased in two
scenarios: (1) The IP address is invalid. (2) The destination IP
address is not a local address and IP forwarding is not enabled
@@ -74,6 +81,7 @@ address is not a local address and IP forwarding is not enabled
.. _RFC1213 ipInAddrErrors: https://tools.ietf.org/html/rfc1213#page-27
* IpExtInNoRoutes
+
This counter means the packet is dropped when the IP stack receives a
packet and can't find a route for it from the route table. It might
happen when IP forwarding is enabled and the destination IP address is
@@ -81,6 +89,7 @@ not a local address and there is no route for the destination IP
address.
* IpInUnknownProtos
+
Defined in `RFC1213 ipInUnknownProtos`_. It will be increased if the
layer 4 protocol is unsupported by kernel. If an application is using
raw socket, kernel will always deliver the packet to the raw socket
@@ -89,10 +98,12 @@ and this counter won't be increased.
.. _RFC1213 ipInUnknownProtos: https://tools.ietf.org/html/rfc1213#page-27
* IpExtInTruncatedPkts
+
For IPv4 packet, it means the actual data size is smaller than the
"Total Length" field in the IPv4 header.
* IpInDiscards
+
Defined in `RFC1213 ipInDiscards`_. It indicates the packet is dropped
in the IP receiving path and due to kernel internal reasons (e.g. no
enough memory).
@@ -100,20 +111,23 @@ enough memory).
.. _RFC1213 ipInDiscards: https://tools.ietf.org/html/rfc1213#page-28
* IpOutDiscards
+
Defined in `RFC1213 ipOutDiscards`_. It indicates the packet is
dropped in the IP sending path and due to kernel internal reasons.
.. _RFC1213 ipOutDiscards: https://tools.ietf.org/html/rfc1213#page-28
* IpOutNoRoutes
+
Defined in `RFC1213 ipOutNoRoutes`_. It indicates the packet is
dropped in the IP sending path and no route is found for it.
.. _RFC1213 ipOutNoRoutes: https://tools.ietf.org/html/rfc1213#page-29
ICMP counters
-============
+=============
* IcmpInMsgs and IcmpOutMsgs
+
Defined by `RFC1213 icmpInMsgs`_ and `RFC1213 icmpOutMsgs`_
.. _RFC1213 icmpInMsgs: https://tools.ietf.org/html/rfc1213#page-41
@@ -126,6 +140,7 @@ IcmpOutMsgs would still be updated if the IP header is constructed by
a userspace program.
* ICMP named types
+
| These counters include most of common ICMP types, they are:
| IcmpInDestUnreachs: `RFC1213 icmpInDestUnreachs`_
| IcmpInTimeExcds: `RFC1213 icmpInTimeExcds`_
@@ -180,6 +195,7 @@ straightforward. The 'In' counter means kernel receives such a packet
and the 'Out' counter means kernel sends such a packet.
* ICMP numeric types
+
They are IcmpMsgInType[N] and IcmpMsgOutType[N], the [N] indicates the
ICMP type number. These counters track all kinds of ICMP packets. The
ICMP type number definition could be found in the `ICMP parameters`_
@@ -192,12 +208,14 @@ IcmpMsgOutType8 would increase 1. And if kernel gets an ICMP Echo Reply
packet, IcmpMsgInType0 would increase 1.
* IcmpInCsumErrors
+
This counter indicates the checksum of the ICMP packet is
wrong. Kernel verifies the checksum after updating the IcmpInMsgs and
before updating IcmpMsgInType[N]. If a packet has bad checksum, the
IcmpInMsgs would be updated but none of IcmpMsgInType[N] would be updated.
* IcmpInErrors and IcmpOutErrors
+
Defined by `RFC1213 icmpInErrors`_ and `RFC1213 icmpOutErrors`_
.. _RFC1213 icmpInErrors: https://tools.ietf.org/html/rfc1213#page-41
@@ -209,7 +227,7 @@ and the sending packet path use IcmpOutErrors. When IcmpInCsumErrors
is increased, IcmpInErrors would always be increased too.
relationship of the ICMP counters
--------------------------------
+---------------------------------
The sum of IcmpMsgOutType[N] is always equal to IcmpOutMsgs, as they
are updated at the same time. The sum of IcmpMsgInType[N] plus
IcmpInErrors should be equal or larger than IcmpInMsgs. When kernel
@@ -229,8 +247,9 @@ IcmpInMsgs should be less than the sum of IcmpMsgOutType[N] plus
IcmpInErrors.
General TCP counters
-==================
+====================
* TcpInSegs
+
Defined in `RFC1213 tcpInSegs`_
.. _RFC1213 tcpInSegs: https://tools.ietf.org/html/rfc1213#page-48
@@ -247,6 +266,7 @@ isn't aware of GRO. So if two packets are merged by GRO, the TcpInSegs
counter would only increase 1.
* TcpOutSegs
+
Defined in `RFC1213 tcpOutSegs`_
.. _RFC1213 tcpOutSegs: https://tools.ietf.org/html/rfc1213#page-48
@@ -258,6 +278,7 @@ GSO, so if a packet would be split to 2 by GSO, TcpOutSegs will
increase 2.
* TcpActiveOpens
+
Defined in `RFC1213 tcpActiveOpens`_
.. _RFC1213 tcpActiveOpens: https://tools.ietf.org/html/rfc1213#page-47
@@ -267,6 +288,7 @@ state. Every time TcpActiveOpens increases 1, TcpOutSegs should always
increase 1.
* TcpPassiveOpens
+
Defined in `RFC1213 tcpPassiveOpens`_
.. _RFC1213 tcpPassiveOpens: https://tools.ietf.org/html/rfc1213#page-47
@@ -275,6 +297,7 @@ It means the TCP layer receives a SYN, replies a SYN+ACK, come into
the SYN-RCVD state.
* TcpExtTCPRcvCoalesce
+
When packets are received by the TCP layer and are not be read by the
application, the TCP layer will try to merge them. This counter
indicate how many packets are merged in such situation. If GRO is
@@ -282,12 +305,14 @@ enabled, lots of packets would be merged by GRO, these packets
wouldn't be counted to TcpExtTCPRcvCoalesce.
* TcpExtTCPAutoCorking
+
When sending packets, the TCP layer will try to merge small packets to
a bigger one. This counter increase 1 for every packet merged in such
situation. Please refer to the LWN article for more details:
https://lwn.net/Articles/576263/
* TcpExtTCPOrigDataSent
+
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
explaination below::
@@ -297,6 +322,7 @@ explaination below::
more useful to track the TCP retransmission rate.
* TCPSynRetrans
+
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
explaination below::
@@ -304,6 +330,7 @@ explaination below::
retransmissions into SYN, fast-retransmits, timeout retransmits, etc.
* TCPFastOpenActiveFail
+
This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
explaination below::
@@ -313,6 +340,7 @@ explaination below::
.. _kernel commit f19c29e3e391: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f19c29e3e391a66a273e9afebaf01917245148cd
* TcpExtListenOverflows and TcpExtListenDrops
+
When kernel receives a SYN from a client, and if the TCP accept queue
is full, kernel will drop the SYN and add 1 to TcpExtListenOverflows.
At the same time kernel will also add 1 to TcpExtListenDrops. When a
@@ -337,6 +365,54 @@ to the accept queue.
TCP Fast Open
+=============
+* TcpEstabResets
+
+Defined in `RFC1213 tcpEstabResets`_.
+
+.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48
+
+* TcpAttemptFails
+
+Defined in `RFC1213 tcpAttemptFails`_.
+
+.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48
+
+* TcpOutRsts
+
+Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
+the 'segments sent containing the RST flag', but in linux kernel, this
+couner indicates the segments kerenl tried to send. The sending
+process might be failed due to some errors (e.g. memory alloc failed).
+
+.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
+
+* TcpExtTCPSpuriousRtxHostQueues
+
+When the TCP stack wants to retransmit a packet, and finds that packet
+is not lost in the network, but the packet is not sent yet, the TCP
+stack would give up the retransmission and update this counter. It
+might happen if a packet stays too long time in a qdisc or driver
+queue.
+
+* TcpEstabResets
+
+The socket receives a RST packet in Establish or CloseWait state.
+
+* TcpExtTCPKeepAlive
+
+This counter indicates many keepalive packets were sent. The keepalive
+won't be enabled by default. A userspace program could enable it by
+setting the SO_KEEPALIVE socket option.
+
+* TcpExtTCPSpuriousRTOs
+
+The spurious retransmission timeout detected by the `F-RTO`_
+algorithm.
+
+.. _F-RTO: https://tools.ietf.org/html/rfc5682
+
+TCP Fast Path
============
When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel
@@ -370,22 +446,23 @@ will disable the fast path at first, and try to enable it after kernel
receives packets.
* TcpExtTCPPureAcks and TcpExtTCPHPAcks
+
If a packet set ACK flag and has no data, it is a pure ACK packet, if
kernel handles it in the fast path, TcpExtTCPHPAcks will increase 1,
if kernel handles it in the slow path, TcpExtTCPPureAcks will
increase 1.
* TcpExtTCPHPHits
+
If a TCP packet has data (which means it is not a pure ACK packet),
and this packet is handled in the fast path, TcpExtTCPHPHits will
increase 1.
TCP abort
-========
-
-
+=========
* TcpExtTCPAbortOnData
+
It means TCP layer has data in flight, but need to close the
connection. So TCP layer sends a RST to the other side, indicate the
connection is not closed very graceful. An easy way to increase this
@@ -404,11 +481,13 @@ when the application closes a connection, kernel will send a RST
immediately and increase the TcpExtTCPAbortOnData counter.
* TcpExtTCPAbortOnClose
+
This counter means the application has unread data in the TCP layer when
the application wants to close the TCP connection. In such a situation,
kernel will send a RST to the other side of the TCP connection.
* TcpExtTCPAbortOnMemory
+
When an application closes a TCP connection, kernel still need to track
the connection, let it complete the TCP disconnect process. E.g. an
app calls the close method of a socket, kernel sends fin to the other
@@ -430,10 +509,12 @@ the tcp_mem. Please refer the tcp_mem section in the `TCP man page`_:
* TcpExtTCPAbortOnTimeout
+
This counter will increase when any of the TCP timers expire. In such
situation, kernel won't send RST, just give up the connection.
* TcpExtTCPAbortOnLinger
+
When a TCP connection comes into FIN_WAIT_2 state, instead of waiting
for the fin packet from the other side, kernel could send a RST and
delete the socket immediately. This is not the default behavior of
@@ -441,6 +522,7 @@ Linux kernel TCP stack. By configuring the TCP_LINGER2 socket option,
you could let kernel follow this behavior.
* TcpExtTCPAbortFailed
+
The kernel TCP layer will send RST if the `RFC2525 2.17 section`_ is
satisfied. If an internal error occurs during this process,
TcpExtTCPAbortFailed will be increased.
@@ -448,7 +530,7 @@ TcpExtTCPAbortFailed will be increased.
.. _RFC2525 2.17 section: https://tools.ietf.org/html/rfc2525#page-50
TCP Hybrid Slow Start
-====================
+=====================
The Hybrid Slow Start algorithm is an enhancement of the traditional
TCP congestion window Slow Start algorithm. It uses two pieces of
information to detect whether the max bandwidth of the TCP path is
@@ -464,23 +546,27 @@ relate with the Hybrid Slow Start algorithm.
.. _Hybrid Slow Start paper: https://pdfs.semanticscholar.org/25e9/ef3f03315782c7f1cbcd31b587857adae7d1.pdf
* TcpExtTCPHystartTrainDetect
+
How many times the ACK train length threshold is detected
* TcpExtTCPHystartTrainCwnd
+
The sum of CWND detected by ACK train length. Dividing this value by
TcpExtTCPHystartTrainDetect is the average CWND which detected by the
ACK train length.
* TcpExtTCPHystartDelayDetect
+
How many times the packet delay threshold is detected.
* TcpExtTCPHystartDelayCwnd
+
The sum of CWND detected by packet delay. Dividing this value by
TcpExtTCPHystartDelayDetect is the average CWND which detected by the
packet delay.
TCP retransmission and congestion control
-======================================
+=========================================
The TCP protocol has two retransmission mechanisms: SACK and fast
recovery. They are exclusive with each other. When SACK is enabled,
the kernel TCP stack would use SACK, or kernel would use fast
@@ -499,12 +585,14 @@ https://pdfs.semanticscholar.org/0e9c/968d09ab2e53e24c4dca5b2d67c7f7140f8e.pdf
.. _RFC6582: https://tools.ietf.org/html/rfc6582
* TcpExtTCPRenoRecovery and TcpExtTCPSackRecovery
+
When the congestion control comes into Recovery state, if sack is
used, TcpExtTCPSackRecovery increases 1, if sack is not used,
TcpExtTCPRenoRecovery increases 1. These two counters mean the TCP
stack begins to retransmit the lost packets.
* TcpExtTCPSACKReneging
+
A packet was acknowledged by SACK, but the receiver has dropped this
packet, so the sender needs to retransmit this packet. In this
situation, the sender adds 1 to TcpExtTCPSACKReneging. A receiver
@@ -515,6 +603,7 @@ the RTO expires for this packet, then the sender assumes this packet
has been dropped by the receiver.
* TcpExtTCPRenoReorder
+
The reorder packet is detected by fast recovery. It would only be used
if SACK is disabled. The fast recovery algorithm detects recorder by
the duplicate ACK number. E.g., if retransmission is triggered, and
@@ -525,6 +614,7 @@ order packet. Thus the sender would find more ACks than its
expectation, and the sender knows out of order occurs.
* TcpExtTCPTSReorder
+
The reorder packet is detected when a hole is filled. E.g., assume the
sender sends packet 1,2,3,4,5, and the receiving order is
1,2,4,5,3. When the sender receives the ACK of packet 3 (which will
@@ -534,6 +624,7 @@ fill the hole), two conditions will let TcpExtTCPTSReorder increase
than the retransmission timestamp.
* TcpExtTCPSACKReorder
+
The reorder packet detected by SACK. The SACK has two methods to
detect reorder: (1) DSACK is received by the sender. It means the
sender sends the same packet more than one times. And the only reason
@@ -545,6 +636,28 @@ packet yet, the sender would know packet 4 is out of order. The TCP
stack of kernel will increase TcpExtTCPSACKReorder for both of the
above scenarios.
+* TcpExtTCPSlowStartRetrans
+
+The TCP stack wants to retransmit a packet and the congestion control
+state is 'Loss'.
+
+* TcpExtTCPFastRetrans
+
+The TCP stack wants to retransmit a packet and the congestion control
+state is not 'Loss'.
+
+* TcpExtTCPLostRetransmit
+
+A SACK points out that a retransmission packet is lost again.
+
+* TcpExtTCPRetransFail
+
+The TCP stack tries to deliver a retransmission packet to lower layers
+but the lower layers return an error.
+
+* TcpExtTCPSynRetrans
+
+The TCP stack retransmits a SYN packet.
DSACK
=====
@@ -558,39 +671,95 @@ sender side.
.. _RFC2883 : https://tools.ietf.org/html/rfc2883
* TcpExtTCPDSACKOldSent
+
The TCP stack receives a duplicate packet which has been acked, so it
sends a DSACK to the sender.
* TcpExtTCPDSACKOfoSent
+
The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender.
* TcpExtTCPDSACKRecv
-The TCP stack receives a DSACK, which indicate an acknowledged
+The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received.
* TcpExtTCPDSACKOfoRecv
+
The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received.
+invalid SACK and DSACK
+====================
+When a SACK (or DSACK) block is invalid, a corresponding counter would
+be updated. The validation method is base on the start/end sequence
+number of the SACK block. For more details, please refer the comment
+of the function tcp_is_sackblock_valid in the kernel source code. A
+SACK option could have up to 4 blocks, they are checked
+individually. E.g., if 3 blocks of a SACk is invalid, the
+corresponding counter would be updated 3 times. The comment of the
+`Add counters for discarded SACK blocks`_ patch has additional
+explaination:
+
+.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
+
+* TcpExtTCPSACKDiscard
+This counter indicates how many SACK blocks are invalid. If the invalid
+SACK block is caused by ACK recording, the TCP stack will only ignore
+it and won't update this counter.
+
+* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
+When a DSACK block is invalid, one of these two counters would be
+updated. Which counter will be updated depends on the undo_marker flag
+of the TCP socket. If the undo_marker is not set, the TCP stack isn't
+likely to re-transmit any packets, and we still receive an invalid
+DSACK block, the reason might be that the packet is duplicated in the
+middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo
+will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
+will be updated. As implied in its name, it might be an old packet.
+
+SACK shift
+=========
+The linux networking stack stores data in sk_buff struct (skb for
+short). If a SACK block acrosses multiple skb, the TCP stack will try
+to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
+10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and
+15 in skb2 would be moved to skb1. This operation is 'shift'. If a
+SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has
+seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
+discard, this operation is 'merge'.
+
+* TcpExtTCPSackShifted
+A skb is shifted
+
+* TcpExtTCPSackMerged
+A skb is merged
+
+* TcpExtTCPSackShiftFallback
+A skb should be shifted or merged, but the TCP stack doesn't do it for
+some reasons.
+
TCP out of order
-===============
+================
* TcpExtTCPOFOQueue
+
The TCP layer receives an out of order packet and has enough memory
to queue it.
* TcpExtTCPOFODrop
+
The TCP layer receives an out of order packet but doesn't have enough
memory, so drops it. Such packets won't be counted into
TcpExtTCPOFOQueue.
* TcpExtTCPOFOMerge
+
The received out of order packet has an overlay with the previous
packet. the overlay part will be dropped. All of TcpExtTCPOFOMerge
packets will also be counted into TcpExtTCPOFOQueue.
TCP PAWS
-=======
+========
PAWS (Protection Against Wrapped Sequence numbers) is an algorithm
which is used to drop old packets. It depends on the TCP
timestamps. For detail information, please refer the `timestamp wiki`_
@@ -600,13 +769,15 @@ and the `RFC of PAWS`_.
.. _timestamp wiki: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_timestamps
* TcpExtPAWSActive
+
Packets are dropped by PAWS in Syn-Sent status.
* TcpExtPAWSEstab
+
Packets are dropped by PAWS in any status other than Syn-Sent.
TCP ACK skip
-===========
+============
In some scenarios, kernel would avoid sending duplicate ACKs too
frequently. Please find more details in the tcp_invalid_ratelimit
section of the `sysctl document`_. When kernel decides to skip an ACK
@@ -618,6 +789,7 @@ it has no data.
.. _sysctl document: https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
* TcpExtTCPACKSkippedSynRecv
+
The ACK is skipped in Syn-Recv status. The Syn-Recv status means the
TCP stack receives a SYN and replies SYN+ACK. Now the TCP stack is
waiting for an ACK. Generally, the TCP stack doesn't need to send ACK
@@ -631,6 +803,7 @@ increase TcpExtTCPACKSkippedSynRecv.
* TcpExtTCPACKSkippedPAWS
+
The ACK is skipped due to PAWS (Protect Against Wrapped Sequence
numbers) check fails. If the PAWS check fails in Syn-Recv, Fin-Wait-2
or Time-Wait statuses, the skipped ACK would be counted to
@@ -639,18 +812,22 @@ TcpExtTCPACKSkippedTimeWait. In all other statuses, the skipped ACK
would be counted to TcpExtTCPACKSkippedPAWS.
* TcpExtTCPACKSkippedSeq
+
The sequence number is out of window and the timestamp passes the PAWS
check and the TCP status is not Syn-Recv, Fin-Wait-2, and Time-Wait.
* TcpExtTCPACKSkippedFinWait2
+
The ACK is skipped in Fin-Wait-2 status, the reason would be either
PAWS check fails or the received sequence number is out of window.
* TcpExtTCPACKSkippedTimeWait
+
Tha ACK is skipped in Time-Wait status, the reason would be either
PAWS check failed or the received sequence number is out of window.
* TcpExtTCPACKSkippedChallenge
+
The ACK is skipped if the ACK is a challenge ACK. The RFC 5961 defines
3 kind of challenge ACK, please refer `RFC 5961 section 3.2`_,
`RFC 5961 section 4.2`_ and `RFC 5961 section 5.2`_. Besides these
@@ -662,12 +839,194 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_).
.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9
.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11
+TCP receive window
+==================
+* TcpExtTCPWantZeroWindowAdv
+
+Depending on current memory usage, the TCP stack tries to set receive
+window to zero. But the receive window might still be a no-zero
+value. For example, if the previous window size is 10, and the TCP
+stack receives 3 bytes, the current window size would be 7 even if the
+window size calculated by the memory usage is zero.
+
+* TcpExtTCPToZeroWindowAdv
+
+The TCP receive window is set to zero from a no-zero value.
+
+* TcpExtTCPFromZeroWindowAdv
+
+The TCP receive window is set to no-zero value from zero.
+
+
+Delayed ACK
+===========
+The TCP Delayed ACK is a technique which is used for reducing the
+packet count in the network. For more details, please refer the
+`Delayed ACK wiki`_
+
+.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment
+
+* TcpExtDelayedACKs
+
+A delayed ACK timer expires. The TCP stack will send a pure ACK packet
+and exit the delayed ACK mode.
+
+* TcpExtDelayedACKLocked
+
+A delayed ACK timer expires, but the TCP stack can't send an ACK
+immediately due to the socket is locked by a userspace program. The
+TCP stack will send a pure ACK later (after the userspace program
+unlock the socket). When the TCP stack sends the pure ACK later, the
+TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK
+mode.
+
+* TcpExtDelayedACKLost
+
+It will be updated when the TCP stack receives a packet which has been
+ACKed. A Delayed ACK loss might cause this issue, but it would also be
+triggered by other reasons, such as a packet is duplicated in the
+network.
+
+Tail Loss Probe (TLP)
+=====================
+TLP is an algorithm which is used to detect TCP packet loss. For more
+details, please refer the `TLP paper`_.
+
+.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01
+
+* TcpExtTCPLossProbes
+
+A TLP probe packet is sent.
+
+* TcpExtTCPLossProbeRecovery
+
+A packet loss is detected and recovered by TLP.
+
+TCP Fast Open
+=============
+TCP Fast Open is a technology which allows data transfer before the
+3-way handshake complete. Please refer the `TCP Fast Open wiki`_ for a
+general description.
+
+.. _TCP Fast Open wiki: https://en.wikipedia.org/wiki/TCP_Fast_Open
+
+* TcpExtTCPFastOpenActive
+
+When the TCP stack receives an ACK packet in the SYN-SENT status, and
+the ACK packet acknowledges the data in the SYN packet, the TCP stack
+understand the TFO cookie is accepted by the other side, then it
+updates this counter.
+
+* TcpExtTCPFastOpenActiveFail
+
+This counter indicates that the TCP stack initiated a TCP Fast Open,
+but it failed. This counter would be updated in three scenarios: (1)
+the other side doesn't acknowledge the data in the SYN packet. (2) The
+SYN packet which has the TFO cookie is timeout at least once. (3)
+after the 3-way handshake, the retransmission timeout happens
+net.ipv4.tcp_retries1 times, because some middle-boxes may black-hole
+fast open after the handshake.
+
+* TcpExtTCPFastOpenPassive
+
+This counter indicates how many times the TCP stack accepts the fast
+open request.
+
+* TcpExtTCPFastOpenPassiveFail
+
+This counter indicates how many times the TCP stack rejects the fast
+open request. It is caused by either the TFO cookie is invalid or the
+TCP stack finds an error during the socket creating process.
+
+* TcpExtTCPFastOpenListenOverflow
+
+When the pending fast open request number is larger than
+fastopenq->max_qlen, the TCP stack will reject the fast open request
+and update this counter. When this counter is updated, the TCP stack
+won't update TcpExtTCPFastOpenPassive or
+TcpExtTCPFastOpenPassiveFail. The fastopenq->max_qlen is set by the
+TCP_FASTOPEN socket operation and it could not be larger than
+net.core.somaxconn. For example:
+
+setsockopt(sfd, SOL_TCP, TCP_FASTOPEN, &qlen, sizeof(qlen));
+
+* TcpExtTCPFastOpenCookieReqd
+
+This counter indicates how many times a client wants to request a TFO
+cookie.
+
+SYN cookies
+===========
+SYN cookies are used to mitigate SYN flood, for details, please refer
+the `SYN cookies wiki`_.
+
+.. _SYN cookies wiki: https://en.wikipedia.org/wiki/SYN_cookies
+
+* TcpExtSyncookiesSent
+
+It indicates how many SYN cookies are sent.
+
+* TcpExtSyncookiesRecv
+
+How many reply packets of the SYN cookies the TCP stack receives.
+
+* TcpExtSyncookiesFailed
+
+The MSS decoded from the SYN cookie is invalid. When this counter is
+updated, the received packet won't be treated as a SYN cookie and the
+TcpExtSyncookiesRecv counter wont be updated.
+
+Challenge ACK
+=============
+For details of challenge ACK, please refer the explaination of
+TcpExtTCPACKSkippedChallenge.
+
+* TcpExtTCPChallengeACK
+
+The number of challenge acks sent.
+
+* TcpExtTCPSYNChallenge
+
+The number of challenge acks sent in response to SYN packets. After
+updates this counter, the TCP stack might send a challenge ACK and
+update the TcpExtTCPChallengeACK counter, or it might also skip to
+send the challenge and update the TcpExtTCPACKSkippedChallenge.
+
+prune
+=====
+When a socket is under memory pressure, the TCP stack will try to
+reclaim memory from the receiving queue and out of order queue. One of
+the reclaiming method is 'collapse', which means allocate a big sbk,
+copy the contiguous skbs to the single big skb, and free these
+contiguous skbs.
+
+* TcpExtPruneCalled
+
+The TCP stack tries to reclaim memory for a socket. After updates this
+counter, the TCP stack will try to collapse the out of order queue and
+the receiving queue. If the memory is still not enough, the TCP stack
+will try to discard packets from the out of order queue (and update the
+TcpExtOfoPruned counter)
+
+* TcpExtOfoPruned
+
+The TCP stack tries to discard packet on the out of order queue.
+
+* TcpExtRcvPruned
+
+After 'collapse' and discard packets from the out of order queue, if
+the actually used memory is still larger than the max allowed memory,
+this counter will be updated. It means the 'prune' fails.
+
+* TcpExtTCPRcvCollapsed
+
+This counter indicates how many skbs are freed during 'collapse'.
examples
-=======
+========
ping test
---------
+---------
Run the ping command against the public dns server 8.8.8.8::
nstatuser@nstat-a:~$ ping 8.8.8.8 -c 1
@@ -711,7 +1070,7 @@ and its corresponding Echo Reply packet are constructed by:
So the IpExtInOctets and IpExtOutOctets are 20+16+48=84.
tcp 3-way handshake
-------------------
+-------------------
On server side, we run::
nstatuser@nstat-b:~$ nc -lknv 0.0.0.0 9000
@@ -753,7 +1112,7 @@ ACK, so client sent 2 packets, received 1 packet, TcpInSegs increased
1, TcpOutSegs increased 2.
TCP normal traffic
------------------
+------------------
Run nc on server::
nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000
@@ -876,7 +1235,7 @@ and the packet received from client qualified for fast path, so it
was counted into 'TcpExtTCPHPHits'.
TcpExtTCPAbortOnClose
---------------------
+---------------------
On the server side, we run below python script::
import socket
@@ -910,7 +1269,7 @@ If we run tcpdump on the server side, we could find the server sent a
RST after we type Ctrl-C.
TcpExtTCPAbortOnMemory and TcpExtTCPAbortOnTimeout
------------------------------------------------
+---------------------------------------------------
Below is an example which let the orphan socket count be higher than
net.ipv4.tcp_max_orphans.
Change tcp_max_orphans to a smaller value on client::
@@ -1032,7 +1391,7 @@ FIN_WAIT_1 state finally. So we wait for a few minutes, we could find
TcpExtTCPAbortOnTimeout 10 0.0
TcpExtTCPAbortOnLinger
----------------------
+----------------------
The server side code::
nstatuser@nstat-b:~$ cat server_linger.py
@@ -1077,7 +1436,7 @@ After run client_linger.py, check the output of nstat::
TcpExtTCPAbortOnLinger 1 0.0
TcpExtTCPRcvCoalesce
--------------------
+--------------------
On the server, we run a program which listen on TCP port 9000, but
doesn't read any data::
@@ -1137,7 +1496,7 @@ the receiving queue. So the TCP layer merged the two packets, and we
could find the TcpExtTCPRcvCoalesce increased 1.
TcpExtListenOverflows and TcpExtListenDrops
-----------------------------------------
+-------------------------------------------
On server, run the nc command, listen on port 9000::
nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000
@@ -1185,7 +1544,7 @@ TcpExtListenOverflows and TcpExtListenDrops would be larger, because
the SYN of the 4th nc was dropped, the client was retrying.
IpInAddrErrors, IpExtInNoRoutes and IpOutNoRoutes
-----------------------------------------------
+-------------------------------------------------
server A IP address: 192.168.122.250
server B IP address: 192.168.122.251
Prepare on server A, add a route to server B::
@@ -1280,7 +1639,7 @@ a route for the 8.8.8.8 IP address, so server B increased
IpOutNoRoutes.
TcpExtTCPACKSkippedSynRecv
-------------------------
+--------------------------
In this test, we send 3 same SYN packets from client to server. The
first SYN will let server create a socket, set it to Syn-Recv status,
and reply a SYN/ACK. The second SYN will let server reply the SYN/ACK
@@ -1328,7 +1687,7 @@ Check snmp cunter on nstat-b::
As we expected, TcpExtTCPACKSkippedSynRecv is 1.
TcpExtTCPACKSkippedPAWS
-----------------------
+-----------------------
To trigger PAWS, we could send an old SYN.
On nstat-b, let nc listen on port 9000::
@@ -1365,7 +1724,7 @@ failed, the nstat-b replied an ACK for the first SYN, skipped the ACK
for the second SYN, and updated TcpExtTCPACKSkippedPAWS.
TcpExtTCPACKSkippedSeq
---------------------
+----------------------
To trigger TcpExtTCPACKSkippedSeq, we send packets which have valid
timestamp (to pass PAWS check) but the sequence number is out of
window. The linux TCP stack would avoid to skip if the packet has
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 82236a17b5e6..86174ce8cd13 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -92,11 +92,11 @@ device.
Switch ID
^^^^^^^^^
-The switchdev driver must implement the switchdev op switchdev_port_attr_get
-for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same
-physical ID for each port of a switch. The ID must be unique between switches
-on the same system. The ID does not need to be unique between switches on
-different systems.
+The switchdev driver must implement the net_device operation
+ndo_get_port_parent_id for each port netdev, returning the same physical ID for
+each port of a switch. The ID must be unique between switches on the same
+system. The ID does not need to be unique between switches on different
+systems.
The switch ID is used to locate ports on a switch and to know if aggregated
ports belong to the same switch.
@@ -196,7 +196,7 @@ The switch device will learn/forget source MAC address/VLAN on ingress packets
and notify the switch driver of the mac/vlan/port tuples. The switch driver,
in turn, will notify the bridge driver using the switchdev notifier call:
- err = call_switchdev_notifiers(val, dev, info);
+ err = call_switchdev_notifiers(val, dev, info, extack);
Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when
forgetting, and info points to a struct switchdev_notifier_fdb_info. On
@@ -232,10 +232,8 @@ Learning_sync attribute enables syncing of the learned/forgotten FDB entry to
the bridge's FDB. It's possible, but not optimal, to enable learning on the
device port and on the bridge port, and disable learning_sync.
-To support learning and learning_sync port attributes, the driver implements
-switchdev op switchdev_port_attr_get/set for
-SWITCHDEV_ATTR_PORT_ID_BRIDGE_FLAGS. The driver should initialize the attributes
-to the hardware defaults.
+To support learning, the driver implements switchdev op
+switchdev_port_attr_set for SWITCHDEV_ATTR_PORT_ID_{PRE}_BRIDGE_FLAGS.
FDB Ageing
^^^^^^^^^^
@@ -373,22 +371,3 @@ The driver can monitor for updates to arp_tbl using the netevent notifier
NETEVENT_NEIGH_UPDATE. The device can be programmed with resolved nexthops
for the routes as arp_tbl updates. The driver implements ndo_neigh_destroy
to know when arp_tbl neighbor entries are purged from the port.
-
-Transaction item queue
-^^^^^^^^^^^^^^^^^^^^^^
-
-For switchdev ops attr_set and obj_add, there is a 2 phase transaction model
-used. First phase is to "prepare" anything needed, including various checks,
-memory allocation, etc. The goal is to handle the stuff that is not unlikely
-to fail here. The second phase is to "commit" the actual changes.
-
-Switchdev provides an infrastructure for sharing items (for example memory
-allocations) between the two phases.
-
-The object created by a driver in "prepare" phase and it is queued up by:
-switchdev_trans_item_enqueue()
-During the "commit" phase, the driver gets the object by:
-switchdev_trans_item_dequeue()
-
-If a transaction is aborted during "prepare" phase, switchdev code will handle
-cleanup of the queued-up objects.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 1be0b6f9e0cb..bbdaf8990031 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -6,11 +6,21 @@ The interfaces for receiving network packages timestamps are:
* SO_TIMESTAMP
Generates a timestamp for each incoming packet in (not necessarily
monotonic) system time. Reports the timestamp via recvmsg() in a
- control message as struct timeval (usec resolution).
+ control message in usec resolution.
+ SO_TIMESTAMP is defined as SO_TIMESTAMP_NEW or SO_TIMESTAMP_OLD
+ based on the architecture type and time_t representation of libc.
+ Control message format is in struct __kernel_old_timeval for
+ SO_TIMESTAMP_OLD and in struct __kernel_sock_timeval for
+ SO_TIMESTAMP_NEW options respectively.
* SO_TIMESTAMPNS
Same timestamping mechanism as SO_TIMESTAMP, but reports the
- timestamp as struct timespec (nsec resolution).
+ timestamp as struct timespec in nsec resolution.
+ SO_TIMESTAMPNS is defined as SO_TIMESTAMPNS_NEW or SO_TIMESTAMPNS_OLD
+ based on the architecture type and time_t representation of libc.
+ Control message format is in struct timespec for SO_TIMESTAMPNS_OLD
+ and in struct __kernel_timespec for SO_TIMESTAMPNS_NEW options
+ respectively.
* IP_MULTICAST_LOOP + SO_TIMESTAMP[NS]
Only for multicast:approximate transmit timestamp obtained by
@@ -22,7 +32,7 @@ The interfaces for receiving network packages timestamps are:
timestamps for stream sockets.
-1.1 SO_TIMESTAMP:
+1.1 SO_TIMESTAMP (also SO_TIMESTAMP_OLD and SO_TIMESTAMP_NEW):
This socket option enables timestamping of datagrams on the reception
path. Because the destination socket, if any, is not known early in
@@ -31,15 +41,25 @@ same is true for all early receive timestamp options.
For interface details, see `man 7 socket`.
+Always use SO_TIMESTAMP_NEW timestamp to always get timestamp in
+struct __kernel_sock_timeval format.
-1.2 SO_TIMESTAMPNS:
+SO_TIMESTAMP_OLD returns incorrect timestamps after the year 2038
+on 32 bit machines.
+
+1.2 SO_TIMESTAMPNS (also SO_TIMESTAMPNS_OLD and SO_TIMESTAMPNS_NEW):
This option is identical to SO_TIMESTAMP except for the returned data type.
Its struct timespec allows for higher resolution (ns) timestamps than the
timeval of SO_TIMESTAMP (ms).
+Always use SO_TIMESTAMPNS_NEW timestamp to always get timestamp in
+struct __kernel_timespec format.
+
+SO_TIMESTAMPNS_OLD returns incorrect timestamps after the year 2038
+on 32 bit machines.
-1.3 SO_TIMESTAMPING:
+1.3 SO_TIMESTAMPING (also SO_TIMESTAMPING_OLD and SO_TIMESTAMPING_NEW):
Supports multiple types of timestamp requests. As a result, this
socket option takes a bitmap of flags, not a boolean. In
@@ -323,10 +343,23 @@ SO_TIMESTAMP and SO_TIMESTAMPNS records can be retrieved.
These timestamps are returned in a control message with cmsg_level
SOL_SOCKET, cmsg_type SCM_TIMESTAMPING, and payload of type
+For SO_TIMESTAMPING_OLD:
+
struct scm_timestamping {
struct timespec ts[3];
};
+For SO_TIMESTAMPING_NEW:
+
+struct scm_timestamping64 {
+ struct __kernel_timespec ts[3];
+
+Always use SO_TIMESTAMPING_NEW timestamp to always get timestamp in
+struct scm_timestamping64 format.
+
+SO_TIMESTAMPING_OLD returns incorrect timestamps after the year 2038
+on 32 bit machines.
+
The structure can return up to three timestamps. This is a legacy
feature. At least one field is non-zero at any time. Most timestamps
are passed in ts[0]. Hardware timestamps are passed in ts[2].
@@ -417,7 +450,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set.
Hardware time stamping must also be initialized for each device driver
that is expected to do hardware time stamping. The parameter is defined in
-/include/linux/net_tstamp.h as:
+include/uapi/linux/net_tstamp.h as:
struct hwtstamp_config {
int flags; /* no flags defined right now, must be zero */
@@ -487,7 +520,7 @@ enum {
HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
/* for the complete list of values, please check
- * the include file /include/linux/net_tstamp.h
+ * the include file include/uapi/linux/net_tstamp.h
*/
};
diff --git a/Documentation/power/energy-model.txt b/Documentation/power/energy-model.txt
new file mode 100644
index 000000000000..a2b0ae4c76bd
--- /dev/null
+++ b/Documentation/power/energy-model.txt
@@ -0,0 +1,144 @@
+ ====================
+ Energy Model of CPUs
+ ====================
+
+1. Overview
+-----------
+
+The Energy Model (EM) framework serves as an interface between drivers knowing
+the power consumed by CPUs at various performance levels, and the kernel
+subsystems willing to use that information to make energy-aware decisions.
+
+The source of the information about the power consumed by CPUs can vary greatly
+from one platform to another. These power costs can be estimated using
+devicetree data in some cases. In others, the firmware will know better.
+Alternatively, userspace might be best positioned. And so on. In order to avoid
+each and every client subsystem to re-implement support for each and every
+possible source of information on its own, the EM framework intervenes as an
+abstraction layer which standardizes the format of power cost tables in the
+kernel, hence enabling to avoid redundant work.
+
+The figure below depicts an example of drivers (Arm-specific here, but the
+approach is applicable to any architecture) providing power costs to the EM
+framework, and interested clients reading the data from it.
+
+ +---------------+ +-----------------+ +---------------+
+ | Thermal (IPA) | | Scheduler (EAS) | | Other |
+ +---------------+ +-----------------+ +---------------+
+ | | em_pd_energy() |
+ | | em_cpu_get() |
+ +---------+ | +---------+
+ | | |
+ v v v
+ +---------------------+
+ | Energy Model |
+ | Framework |
+ +---------------------+
+ ^ ^ ^
+ | | | em_register_perf_domain()
+ +----------+ | +---------+
+ | | |
+ +---------------+ +---------------+ +--------------+
+ | cpufreq-dt | | arm_scmi | | Other |
+ +---------------+ +---------------+ +--------------+
+ ^ ^ ^
+ | | |
+ +--------------+ +---------------+ +--------------+
+ | Device Tree | | Firmware | | ? |
+ +--------------+ +---------------+ +--------------+
+
+The EM framework manages power cost tables per 'performance domain' in the
+system. A performance domain is a group of CPUs whose performance is scaled
+together. Performance domains generally have a 1-to-1 mapping with CPUFreq
+policies. All CPUs in a performance domain are required to have the same
+micro-architecture. CPUs in different performance domains can have different
+micro-architectures.
+
+
+2. Core APIs
+------------
+
+ 2.1 Config options
+
+CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
+
+
+ 2.2 Registration of performance domains
+
+Drivers are expected to register performance domains into the EM framework by
+calling the following API:
+
+ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+ struct em_data_callback *cb);
+
+Drivers must specify the CPUs of the performance domains using the cpumask
+argument, and provide a callback function returning <frequency, power> tuples
+for each capacity state. The callback function provided by the driver is free
+to fetch data from any relevant location (DT, firmware, ...), and by any mean
+deemed necessary. See Section 3. for an example of driver implementing this
+callback, and kernel/power/energy_model.c for further documentation on this
+API.
+
+
+ 2.3 Accessing performance domains
+
+Subsystems interested in the energy model of a CPU can retrieve it using the
+em_cpu_get() API. The energy model tables are allocated once upon creation of
+the performance domains, and kept in memory untouched.
+
+The energy consumed by a performance domain can be estimated using the
+em_pd_energy() API. The estimation is performed assuming that the schedutil
+CPUfreq governor is in use.
+
+More details about the above APIs can be found in include/linux/energy_model.h.
+
+
+3. Example driver
+-----------------
+
+This section provides a simple example of a CPUFreq driver registering a
+performance domain in the Energy Model framework using the (fake) 'foo'
+protocol. The driver implements an est_power() function to be provided to the
+EM framework.
+
+ -> drivers/cpufreq/foo_cpufreq.c
+
+01 static int est_power(unsigned long *mW, unsigned long *KHz, int cpu)
+02 {
+03 long freq, power;
+04
+05 /* Use the 'foo' protocol to ceil the frequency */
+06 freq = foo_get_freq_ceil(cpu, *KHz);
+07 if (freq < 0);
+08 return freq;
+09
+10 /* Estimate the power cost for the CPU at the relevant freq. */
+11 power = foo_estimate_power(cpu, freq);
+12 if (power < 0);
+13 return power;
+14
+15 /* Return the values to the EM framework */
+16 *mW = power;
+17 *KHz = freq;
+18
+19 return 0;
+20 }
+21
+22 static int foo_cpufreq_init(struct cpufreq_policy *policy)
+23 {
+24 struct em_data_callback em_cb = EM_DATA_CB(est_power);
+25 int nr_opp, ret;
+26
+27 /* Do the actual CPUFreq init work ... */
+28 ret = do_foo_cpufreq_init(policy);
+29 if (ret)
+30 return ret;
+31
+32 /* Find the number of OPPs for this policy */
+33 nr_opp = foo_get_nr_opp(policy);
+34
+35 /* And register the new performance domain */
+36 em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+37
+38 return 0;
+39 }
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst
index dc2ddc345044..fbb9297e6360 100644
--- a/Documentation/process/applying-patches.rst
+++ b/Documentation/process/applying-patches.rst
@@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to
generate a patch representing the differences between two patches and then
apply the result.
-This will let you move from something like 4.7.2 to 4.7.3 in a single
+This will let you move from something like 5.7.2 to 5.7.3 in a single
step. The -z flag to interdiff will even let you feed it patches in gzip or
bzip2 compressed form directly without the use of zcat or bzcat or manual
decompression.
-Here's how you'd go from 4.7.2 to 4.7.3 in a single step::
+Here's how you'd go from 5.7.2 to 5.7.3 in a single step::
- interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1
+ interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1
Although interdiff may save you a step or two you are generally advised to
do the additional steps since interdiff can get things wrong in some cases.
@@ -245,62 +245,67 @@ The patches are available at http://kernel.org/
Most recent patches are linked from the front page, but they also have
specific homes.
-The 4.x.y (-stable) and 4.x patches live at
+The 5.x.y (-stable) and 5.x patches live at
- https://www.kernel.org/pub/linux/kernel/v4.x/
+ https://www.kernel.org/pub/linux/kernel/v5.x/
-The -rc patches live at
+The -rc patches are not stored on the webserver but are generated on
+demand from git tags such as
- https://www.kernel.org/pub/linux/kernel/v4.x/testing/
+ https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0
+The stable -rc patches live at
-The 4.x kernels
+ https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/
+
+
+The 5.x kernels
===============
These are the base stable releases released by Linus. The highest numbered
release is the most recent.
If regressions or other serious flaws are found, then a -stable fix patch
-will be released (see below) on top of this base. Once a new 4.x base
+will be released (see below) on top of this base. Once a new 5.x base
kernel is released, a patch is made available that is a delta between the
-previous 4.x kernel and the new one.
+previous 5.x kernel and the new one.
-To apply a patch moving from 4.6 to 4.7, you'd do the following (note
-that such patches do **NOT** apply on top of 4.x.y kernels but on top of the
-base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to
-first revert the 4.x.y patch).
+To apply a patch moving from 5.6 to 5.7, you'd do the following (note
+that such patches do **NOT** apply on top of 5.x.y kernels but on top of the
+base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to
+first revert the 5.x.y patch).
Here are some examples::
- # moving from 4.6 to 4.7
+ # moving from 5.6 to 5.7
- $ cd ~/linux-4.6 # change to kernel source dir
- $ patch -p1 < ../patch-4.7 # apply the 4.7 patch
+ $ cd ~/linux-5.6 # change to kernel source dir
+ $ patch -p1 < ../patch-5.7 # apply the 5.7 patch
$ cd ..
- $ mv linux-4.6 linux-4.7 # rename source dir
+ $ mv linux-5.6 linux-5.7 # rename source dir
- # moving from 4.6.1 to 4.7
+ # moving from 5.6.1 to 5.7
- $ cd ~/linux-4.6.1 # change to kernel source dir
- $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch
- # source dir is now 4.6
- $ patch -p1 < ../patch-4.7 # apply new 4.7 patch
+ $ cd ~/linux-5.6.1 # change to kernel source dir
+ $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch
+ # source dir is now 5.6
+ $ patch -p1 < ../patch-5.7 # apply new 5.7 patch
$ cd ..
- $ mv linux-4.6.1 linux-4.7 # rename source dir
+ $ mv linux-5.6.1 linux-5.7 # rename source dir
-The 4.x.y kernels
+The 5.x.y kernels
=================
Kernels with 3-digit versions are -stable kernels. They contain small(ish)
critical fixes for security problems or significant regressions discovered
-in a given 4.x kernel.
+in a given 5.x kernel.
This is the recommended branch for users who want the most recent stable
kernel and are not interested in helping test development/experimental
versions.
-If no 4.x.y kernel is available, then the highest numbered 4.x kernel is
+If no 5.x.y kernel is available, then the highest numbered 5.x kernel is
the current stable kernel.
.. note::
@@ -308,23 +313,23 @@ the current stable kernel.
The -stable team usually do make incremental patches available as well
as patches against the latest mainline release, but I only cover the
non-incremental ones below. The incremental ones can be found at
- https://www.kernel.org/pub/linux/kernel/v4.x/incr/
+ https://www.kernel.org/pub/linux/kernel/v5.x/incr/
-These patches are not incremental, meaning that for example the 4.7.3
-patch does not apply on top of the 4.7.2 kernel source, but rather on top
-of the base 4.7 kernel source.
+These patches are not incremental, meaning that for example the 5.7.3
+patch does not apply on top of the 5.7.2 kernel source, but rather on top
+of the base 5.7 kernel source.
-So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel
-source you have to first back out the 4.7.2 patch (so you are left with a
-base 4.7 kernel source) and then apply the new 4.7.3 patch.
+So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel
+source you have to first back out the 5.7.2 patch (so you are left with a
+base 5.7 kernel source) and then apply the new 5.7.3 patch.
Here's a small example::
- $ cd ~/linux-4.7.2 # change to the kernel source dir
- $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch
- $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch
+ $ cd ~/linux-5.7.2 # change to the kernel source dir
+ $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch
+ $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch
$ cd ..
- $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir
+ $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir
The -rc kernels
===============
@@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing
development kernels but do not want to run some of the really experimental
stuff (such people should see the sections about -next and -mm kernels below).
-The -rc patches are not incremental, they apply to a base 4.x kernel, just
-like the 4.x.y patches described above. The kernel version before the -rcN
+The -rc patches are not incremental, they apply to a base 5.x kernel, just
+like the 5.x.y patches described above. The kernel version before the -rcN
suffix denotes the version of the kernel that this -rc kernel will eventually
turn into.
-So, 4.8-rc5 means that this is the fifth release candidate for the 4.8
-kernel and the patch should be applied on top of the 4.7 kernel source.
+So, 5.8-rc5 means that this is the fifth release candidate for the 5.8
+kernel and the patch should be applied on top of the 5.7 kernel source.
Here are 3 examples of how to apply these patches::
- # first an example of moving from 4.7 to 4.8-rc3
+ # first an example of moving from 5.7 to 5.8-rc3
- $ cd ~/linux-4.7 # change to the 4.7 source dir
- $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch
+ $ cd ~/linux-5.7 # change to the 5.7 source dir
+ $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch
$ cd ..
- $ mv linux-4.7 linux-4.8-rc3 # rename the source dir
+ $ mv linux-5.7 linux-5.8-rc3 # rename the source dir
- # now let's move from 4.8-rc3 to 4.8-rc5
+ # now let's move from 5.8-rc3 to 5.8-rc5
- $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir
- $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch
- $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch
+ $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir
+ $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch
+ $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch
$ cd ..
- $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir
+ $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir
- # finally let's try and move from 4.7.3 to 4.8-rc5
+ # finally let's try and move from 5.7.3 to 5.8-rc5
- $ cd ~/linux-4.7.3 # change to the kernel source dir
- $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch
- $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch
+ $ cd ~/linux-5.7.3 # change to the kernel source dir
+ $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch
+ $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch
$ cd ..
- $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir
+ $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir
The -mm patches and the linux-next tree
diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt
new file mode 100644
index 000000000000..197d81f4b836
--- /dev/null
+++ b/Documentation/scheduler/sched-energy.txt
@@ -0,0 +1,425 @@
+ =======================
+ Energy Aware Scheduling
+ =======================
+
+1. Introduction
+---------------
+
+Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict
+the impact of its decisions on the energy consumed by CPUs. EAS relies on an
+Energy Model (EM) of the CPUs to select an energy efficient CPU for each task,
+with a minimal impact on throughput. This document aims at providing an
+introduction on how EAS works, what are the main design decisions behind it, and
+details what is needed to get it to run.
+
+Before going any further, please note that at the time of writing:
+
+ /!\ EAS does not support platforms with symmetric CPU topologies /!\
+
+EAS operates only on heterogeneous CPU topologies (such as Arm big.LITTLE)
+because this is where the potential for saving energy through scheduling is
+the highest.
+
+The actual EM used by EAS is _not_ maintained by the scheduler, but by a
+dedicated framework. For details about this framework and what it provides,
+please refer to its documentation (see Documentation/power/energy-model.txt).
+
+
+2. Background and Terminology
+-----------------------------
+
+To make it clear from the start:
+ - energy = [joule] (resource like a battery on powered devices)
+ - power = energy/time = [joule/second] = [watt]
+
+The goal of EAS is to minimize energy, while still getting the job done. That
+is, we want to maximize:
+
+ performance [inst/s]
+ --------------------
+ power [W]
+
+which is equivalent to minimizing:
+
+ energy [J]
+ -----------
+ instruction
+
+while still getting 'good' performance. It is essentially an alternative
+optimization objective to the current performance-only objective for the
+scheduler. This alternative considers two objectives: energy-efficiency and
+performance.
+
+The idea behind introducing an EM is to allow the scheduler to evaluate the
+implications of its decisions rather than blindly applying energy-saving
+techniques that may have positive effects only on some platforms. At the same
+time, the EM must be as simple as possible to minimize the scheduler latency
+impact.
+
+In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time
+for the scheduler to decide where a task should run (during wake-up), the EM
+is used to break the tie between several good CPU candidates and pick the one
+that is predicted to yield the best energy consumption without harming the
+system's throughput. The predictions made by EAS rely on specific elements of
+knowledge about the platform's topology, which include the 'capacity' of CPUs,
+and their respective energy costs.
+
+
+3. Topology information
+-----------------------
+
+EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to
+differentiate CPUs with different computing throughput. The 'capacity' of a CPU
+represents the amount of work it can absorb when running at its highest
+frequency compared to the most capable CPU of the system. Capacity values are
+normalized in a 1024 range, and are comparable with the utilization signals of
+tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks
+to capacity and utilization values, EAS is able to estimate how big/busy a
+task/CPU is, and to take this into consideration when evaluating performance vs
+energy trade-offs. The capacity of CPUs is provided via arch-specific code
+through the arch_scale_cpu_capacity() callback.
+
+The rest of platform knowledge used by EAS is directly read from the Energy
+Model (EM) framework. The EM of a platform is composed of a power cost table
+per 'performance domain' in the system (see Documentation/power/energy-model.txt
+for futher details about performance domains).
+
+The scheduler manages references to the EM objects in the topology code when the
+scheduling domains are built, or re-built. For each root domain (rd), the
+scheduler maintains a singly linked list of all performance domains intersecting
+the current rd->span. Each node in the list contains a pointer to a struct
+em_perf_domain as provided by the EM framework.
+
+The lists are attached to the root domains in order to cope with exclusive
+cpuset configurations. Since the boundaries of exclusive cpusets do not
+necessarily match those of performance domains, the lists of different root
+domains can contain duplicate elements.
+
+Example 1.
+ Let us consider a platform with 12 CPUs, split in 3 performance domains
+ (pd0, pd4 and pd8), organized as follows:
+
+ CPUs: 0 1 2 3 4 5 6 7 8 9 10 11
+ PDs: |--pd0--|--pd4--|---pd8---|
+ RDs: |----rd1----|-----rd2-----|
+
+ Now, consider that userspace decided to split the system with two
+ exclusive cpusets, hence creating two independent root domains, each
+ containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the
+ above figure. Since pd4 intersects with both rd1 and rd2, it will be
+ present in the linked list '->pd' attached to each of them:
+ * rd1->pd: pd0 -> pd4
+ * rd2->pd: pd4 -> pd8
+
+ Please note that the scheduler will create two duplicate list nodes for
+ pd4 (one for each list). However, both just hold a pointer to the same
+ shared data structure of the EM framework.
+
+Since the access to these lists can happen concurrently with hotplug and other
+things, they are protected by RCU, like the rest of topology structures
+manipulated by the scheduler.
+
+EAS also maintains a static key (sched_energy_present) which is enabled when at
+least one root domain meets all conditions for EAS to start. Those conditions
+are summarized in Section 6.
+
+
+4. Energy-Aware task placement
+------------------------------
+
+EAS overrides the CFS task wake-up balancing code. It uses the EM of the
+platform and the PELT signals to choose an energy-efficient target CPU during
+wake-up balance. When EAS is enabled, select_task_rq_fair() calls
+find_energy_efficient_cpu() to do the placement decision. This function looks
+for the CPU with the highest spare capacity (CPU capacity - CPU utilization) in
+each performance domain since it is the one which will allow us to keep the
+frequency the lowest. Then, the function checks if placing the task there could
+save energy compared to leaving it on prev_cpu, i.e. the CPU where the task ran
+in its previous activation.
+
+find_energy_efficient_cpu() uses compute_energy() to estimate what will be the
+energy consumed by the system if the waking task was migrated. compute_energy()
+looks at the current utilization landscape of the CPUs and adjusts it to
+'simulate' the task migration. The EM framework provides the em_pd_energy() API
+which computes the expected energy consumption of each performance domain for
+the given utilization landscape.
+
+An example of energy-optimized task placement decision is detailed below.
+
+Example 2.
+ Let us consider a (fake) platform with 2 independent performance domains
+ composed of two CPUs each. CPU0 and CPU1 are little CPUs; CPU2 and CPU3
+ are big.
+
+ The scheduler must decide where to place a task P whose util_avg = 200
+ and prev_cpu = 0.
+
+ The current utilization landscape of the CPUs is depicted on the graph
+ below. CPUs 0-3 have a util_avg of 400, 100, 600 and 500 respectively
+ Each performance domain has three Operating Performance Points (OPPs).
+ The CPU capacity and power cost associated with each OPP is listed in
+ the Energy Model table. The util_avg of P is shown on the figures
+ below as 'PP'.
+
+ CPU util.
+ 1024 - - - - - - - Energy Model
+ +-----------+-------------+
+ | Little | Big |
+ 768 ============= +-----+-----+------+------+
+ | Cap | Pwr | Cap | Pwr |
+ +-----+-----+------+------+
+ 512 =========== - ##- - - - - | 170 | 50 | 512 | 400 |
+ ## ## | 341 | 150 | 768 | 800 |
+ 341 -PP - - - - ## ## | 512 | 300 | 1024 | 1700 |
+ PP ## ## +-----+-----+------+------+
+ 170 -## - - - - ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+ Current OPP: ===== Other OPP: - - - util_avg (100 each): ##
+
+
+ find_energy_efficient_cpu() will first look for the CPUs with the
+ maximum spare capacity in the two performance domains. In this example,
+ CPU1 and CPU3. Then it will estimate the energy of the system if P was
+ placed on either of them, and check if that would save some energy
+ compared to leaving P on CPU0. EAS assumes that OPPs follow utilization
+ (which is coherent with the behaviour of the schedutil CPUFreq
+ governor, see Section 6. for more details on this topic).
+
+ Case 1. P is migrated to CPU1
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ 1024 - - - - - - -
+
+ Energy calculation:
+ 768 ============= * CPU0: 200 / 341 * 150 = 88
+ * CPU1: 300 / 341 * 150 = 131
+ * CPU2: 600 / 768 * 800 = 625
+ 512 - - - - - - - ##- - - - - * CPU3: 500 / 768 * 800 = 520
+ ## ## => total_energy = 1364
+ 341 =========== ## ##
+ PP ## ##
+ 170 -## - - PP- ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+
+ Case 2. P is migrated to CPU3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ 1024 - - - - - - -
+
+ Energy calculation:
+ 768 ============= * CPU0: 200 / 341 * 150 = 88
+ * CPU1: 100 / 341 * 150 = 43
+ PP * CPU2: 600 / 768 * 800 = 625
+ 512 - - - - - - - ##- - -PP - * CPU3: 700 / 768 * 800 = 729
+ ## ## => total_energy = 1485
+ 341 =========== ## ##
+ ## ##
+ 170 -## - - - - ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+
+ Case 3. P stays on prev_cpu / CPU 0
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ 1024 - - - - - - -
+
+ Energy calculation:
+ 768 ============= * CPU0: 400 / 512 * 300 = 234
+ * CPU1: 100 / 512 * 300 = 58
+ * CPU2: 600 / 768 * 800 = 625
+ 512 =========== - ##- - - - - * CPU3: 500 / 768 * 800 = 520
+ ## ## => total_energy = 1437
+ 341 -PP - - - - ## ##
+ PP ## ##
+ 170 -## - - - - ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+
+ From these calculations, the Case 1 has the lowest total energy. So CPU 1
+ is be the best candidate from an energy-efficiency standpoint.
+
+Big CPUs are generally more power hungry than the little ones and are thus used
+mainly when a task doesn't fit the littles. However, little CPUs aren't always
+necessarily more energy-efficient than big CPUs. For some systems, the high OPPs
+of the little CPUs can be less energy-efficient than the lowest OPPs of the
+bigs, for example. So, if the little CPUs happen to have enough utilization at
+a specific point in time, a small task waking up at that moment could be better
+of executing on the big side in order to save energy, even though it would fit
+on the little side.
+
+And even in the case where all OPPs of the big CPUs are less energy-efficient
+than those of the little, using the big CPUs for a small task might still, under
+specific conditions, save energy. Indeed, placing a task on a little CPU can
+result in raising the OPP of the entire performance domain, and that will
+increase the cost of the tasks already running there. If the waking task is
+placed on a big CPU, its own execution cost might be higher than if it was
+running on a little, but it won't impact the other tasks of the little CPUs
+which will keep running at a lower OPP. So, when considering the total energy
+consumed by CPUs, the extra cost of running that one task on a big core can be
+smaller than the cost of raising the OPP on the little CPUs for all the other
+tasks.
+
+The examples above would be nearly impossible to get right in a generic way, and
+for all platforms, without knowing the cost of running at different OPPs on all
+CPUs of the system. Thanks to its EM-based design, EAS should cope with them
+correctly without too many troubles. However, in order to ensure a minimal
+impact on throughput for high-utilization scenarios, EAS also implements another
+mechanism called 'over-utilization'.
+
+
+5. Over-utilization
+-------------------
+
+From a general standpoint, the use-cases where EAS can help the most are those
+involving a light/medium CPU utilization. Whenever long CPU-bound tasks are
+being run, they will require all of the available CPU capacity, and there isn't
+much that can be done by the scheduler to save energy without severly harming
+throughput. In order to avoid hurting performance with EAS, CPUs are flagged as
+'over-utilized' as soon as they are used at more than 80% of their compute
+capacity. As long as no CPUs are over-utilized in a root domain, load balancing
+is disabled and EAS overridess the wake-up balancing code. EAS is likely to load
+the most energy efficient CPUs of the system more than the others if that can be
+done without harming throughput. So, the load-balancer is disabled to prevent
+it from breaking the energy-efficient task placement found by EAS. It is safe to
+do so when the system isn't overutilized since being below the 80% tipping point
+implies that:
+
+ a. there is some idle time on all CPUs, so the utilization signals used by
+ EAS are likely to accurately represent the 'size' of the various tasks
+ in the system;
+ b. all tasks should already be provided with enough CPU capacity,
+ regardless of their nice values;
+ c. since there is spare capacity all tasks must be blocking/sleeping
+ regularly and balancing at wake-up is sufficient.
+
+As soon as one CPU goes above the 80% tipping point, at least one of the three
+assumptions above becomes incorrect. In this scenario, the 'overutilized' flag
+is raised for the entire root domain, EAS is disabled, and the load-balancer is
+re-enabled. By doing so, the scheduler falls back onto load-based algorithms for
+wake-up and load balance under CPU-bound conditions. This provides a better
+respect of the nice values of tasks.
+
+Since the notion of overutilization largely relies on detecting whether or not
+there is some idle time in the system, the CPU capacity 'stolen' by higher
+(than CFS) scheduling classes (as well as IRQ) must be taken into account. As
+such, the detection of overutilization accounts for the capacity used not only
+by CFS tasks, but also by the other scheduling classes and IRQ.
+
+
+6. Dependencies and requirements for EAS
+----------------------------------------
+
+Energy Aware Scheduling depends on the CPUs of the system having specific
+hardware properties and on other features of the kernel being enabled. This
+section lists these dependencies and provides hints as to how they can be met.
+
+
+ 6.1 - Asymmetric CPU topology
+
+As mentioned in the introduction, EAS is only supported on platforms with
+asymmetric CPU topologies for now. This requirement is checked at run-time by
+looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling
+domains are built.
+
+The flag is set/cleared automatically by the scheduler topology code whenever
+there are CPUs with different capacities in a root domain. The capacities of
+CPUs are provided by arch-specific code through the arch_scale_cpu_capacity()
+callback. As an example, arm and arm64 share an implementation of this callback
+which uses a combination of CPUFreq data and device-tree bindings to compute the
+capacity of CPUs (see drivers/base/arch_topology.c for more details).
+
+So, in order to use EAS on your platform your architecture must implement the
+arch_scale_cpu_capacity() callback, and some of the CPUs must have a lower
+capacity than others.
+
+Please note that EAS is not fundamentally incompatible with SMP, but no
+significant savings on SMP platforms have been observed yet. This restriction
+could be amended in the future if proven otherwise.
+
+
+ 6.2 - Energy Model presence
+
+EAS uses the EM of a platform to estimate the impact of scheduling decisions on
+energy. So, your platform must provide power cost tables to the EM framework in
+order to make EAS start. To do so, please refer to documentation of the
+independent EM framework in Documentation/power/energy-model.txt.
+
+Please also note that the scheduling domains need to be re-built after the
+EM has been registered in order to start EAS.
+
+
+ 6.3 - Energy Model complexity
+
+The task wake-up path is very latency-sensitive. When the EM of a platform is
+too complex (too many CPUs, too many performance domains, too many performance
+states, ...), the cost of using it in the wake-up path can become prohibitive.
+The energy-aware wake-up algorithm has a complexity of:
+
+ C = Nd * (Nc + Ns)
+
+with: Nd the number of performance domains; Nc the number of CPUs; and Ns the
+total number of OPPs (ex: for two perf. domains with 4 OPPs each, Ns = 8).
+
+A complexity check is performed at the root domain level, when scheduling
+domains are built. EAS will not start on a root domain if its C happens to be
+higher than the completely arbitrary EM_MAX_COMPLEXITY threshold (2048 at the
+time of writing).
+
+If you really want to use EAS but the complexity of your platform's Energy
+Model is too high to be used with a single root domain, you're left with only
+two possible options:
+
+ 1. split your system into separate, smaller, root domains using exclusive
+ cpusets and enable EAS locally on each of them. This option has the
+ benefit to work out of the box but the drawback of preventing load
+ balance between root domains, which can result in an unbalanced system
+ overall;
+ 2. submit patches to reduce the complexity of the EAS wake-up algorithm,
+ hence enabling it to cope with larger EMs in reasonable time.
+
+
+ 6.4 - Schedutil governor
+
+EAS tries to predict at which OPP will the CPUs be running in the close future
+in order to estimate their energy consumption. To do so, it is assumed that OPPs
+of CPUs follow their utilization.
+
+Although it is very difficult to provide hard guarantees regarding the accuracy
+of this assumption in practice (because the hardware might not do what it is
+told to do, for example), schedutil as opposed to other CPUFreq governors at
+least _requests_ frequencies calculated using the utilization signals.
+Consequently, the only sane governor to use together with EAS is schedutil,
+because it is the only one providing some degree of consistency between
+frequency requests and energy predictions.
+
+Using EAS with any other governor than schedutil is not supported.
+
+
+ 6.5 Scale-invariant utilization signals
+
+In order to make accurate prediction across CPUs and for all performance
+states, EAS needs frequency-invariant and CPU-invariant PELT signals. These can
+be obtained using the architecture-defined arch_scale{cpu,freq}_capacity()
+callbacks.
+
+Using EAS on a platform that doesn't implement these two callbacks is not
+supported.
+
+
+ 6.6 Multithreading (SMT)
+
+EAS in its current form is SMT unaware and is not able to leverage
+multithreaded hardware to save energy. EAS considers threads as independent
+CPUs, which can actually be counter-productive for both performance and energy.
+
+EAS on SMT is not supported.
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 368a07a165f5..7d7c191102a7 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -254,10 +254,12 @@ alc274-dell-aio
ALC274 fixups on Dell AIO machines
alc255-dummy-lineout
Dell Precision 3930 fixups
-alc255-dell-headset"},
+alc255-dell-headset
Dell Precision 3630 fixups
alc295-hp-x360
HP Spectre X360 fixups
+alc-sense-combo
+ Headset button support for Chrome platform
ALC66x/67x/892
==============
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index b37234afdfa1..6b154dbb02cc 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -3520,14 +3520,14 @@ allocator will try to get an area as large as possible within the
given size.
The second argument (type) and the third argument (device pointer) are
-dependent on the bus. In the case of the ISA bus, pass
-:c:func:`snd_dma_isa_data()` as the third argument with
+dependent on the bus. For normal devices, pass the device pointer
+(typically identical as ``card->dev``) to the third argument with
``SNDRV_DMA_TYPE_DEV`` type. For the continuous buffer unrelated to the
bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type and the
``snd_dma_continuous_data(GFP_KERNEL)`` device pointer, where
-``GFP_KERNEL`` is the kernel allocation flag to use. For the PCI
-scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with
-``snd_dma_pci_data(pci)`` (see the `Non-Contiguous Buffers`_
+``GFP_KERNEL`` is the kernel allocation flag to use. For the
+scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with the device
+pointer (see the `Non-Contiguous Buffers`_
section).
Once the buffer is pre-allocated, you can use the allocator in the
@@ -3924,15 +3924,12 @@ The scheme of the real suspend job is as follows.
2. Call :c:func:`snd_power_change_state()` with
``SNDRV_CTL_POWER_D3hot`` to change the power status.
-3. Call :c:func:`snd_pcm_suspend_all()` to suspend the running
- PCM streams.
-
-4. If AC97 codecs are used, call :c:func:`snd_ac97_suspend()` for
+3. If AC97 codecs are used, call :c:func:`snd_ac97_suspend()` for
each codec.
-5. Save the register values if necessary.
+4. Save the register values if necessary.
-6. Stop the hardware if necessary.
+5. Stop the hardware if necessary.
A typical code would be like:
@@ -3946,12 +3943,10 @@ A typical code would be like:
/* (2) */
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
/* (3) */
- snd_pcm_suspend_all(chip->pcm);
- /* (4) */
snd_ac97_suspend(chip->ac97);
- /* (5) */
+ /* (4) */
snd_mychip_save_registers(chip);
- /* (6) */
+ /* (5) */
snd_mychip_stop_hardware(chip);
return 0;
}
@@ -3994,13 +3989,9 @@ A typical code would be like:
return 0;
}
-As shown in the above, it's better to save registers after suspending
-the PCM operations via :c:func:`snd_pcm_suspend_all()` or
-:c:func:`snd_pcm_suspend()`. It means that the PCM streams are
-already stopped when the register snapshot is taken. But, remember that
-you don't have to restart the PCM stream in the resume callback. It'll
-be restarted via trigger call with ``SNDRV_PCM_TRIGGER_RESUME`` when
-necessary.
+Note that, at the time this callback gets called, the PCM stream has
+been already suspended via its own PM ops calling
+:c:func:`snd_pcm_suspend_all()` internally.
OK, we have all callbacks now. Let's set them up. In the initialization
of the card, make sure that you can get the chip data from the card
diff --git a/Documentation/sound/soc/dpcm.rst b/Documentation/sound/soc/dpcm.rst
index f6845b2278ea..77f67ded53de 100644
--- a/Documentation/sound/soc/dpcm.rst
+++ b/Documentation/sound/soc/dpcm.rst
@@ -13,7 +13,7 @@ drivers that expose several ALSA PCMs and can route to multiple DAIs.
The DPCM runtime routing is determined by the ALSA mixer settings in the same
way as the analog signal is routed in an ASoC codec driver. DPCM uses a DAPM
graph representing the DSP internal audio paths and uses the mixer settings to
-determine the patch used by each ALSA PCM.
+determine the path used by each ALSA PCM.
DPCM re-uses all the existing component codec, platform and DAI drivers without
any modifications.
@@ -101,7 +101,7 @@ The audio driver processes this as follows :-
4. Machine driver or audio HAL enables the speaker path.
-5. DPCM runs the PCM ops for startup(), hw_params(), prepapre() and
+5. DPCM runs the PCM ops for startup(), hw_params(), prepare() and
trigger(start) for DAI1 Speakers since the path is enabled.
In this example, the machine driver or userspace audio HAL can alter the routing
@@ -221,7 +221,7 @@ like a BT phone call :-
This allows the host CPU to sleep while the DSP, MODEM DAI and the BT DAI are
still in operation.
-A BE DAI link can also set the codec to a dummy device if the code is a device
+A BE DAI link can also set the codec to a dummy device if the codec is a device
that is managed externally.
Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the
@@ -249,7 +249,7 @@ configuration.
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- /* The DSP will covert the FE rate to 48k, stereo */
+ /* The DSP will convert the FE rate to 48k, stereo */
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
@@ -386,5 +386,3 @@ This means creating a new FE that is connected with a virtual path to both
DAI links. The DAI links will be started when the FE PCM is started and stopped
when the FE PCM is stopped. Note that the FE PCM cannot read or write data in
this configuration.
-
-
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
index 13a0b7fb192f..551325b66b23 100644
--- a/Documentation/spi/pxa2xx
+++ b/Documentation/spi/pxa2xx
@@ -21,15 +21,15 @@ Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
"platform device". The master configuration is passed to the driver via a table
found in include/linux/spi/pxa2xx_spi.h:
-struct pxa2xx_spi_master {
+struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
};
-The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of
+The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of
slave device (chips) attached to this SPI master.
-The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should
+The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should
be used. This caused the driver to acquire two DMA channels: rx_channel and
tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
See the "PXA2xx Developer Manual" section "DMA Controller".
@@ -51,7 +51,7 @@ static struct resource pxa_spi_nssp_resources[] = {
},
};
-static struct pxa2xx_spi_master pxa_nssp_master_info = {
+static struct pxa2xx_spi_controller pxa_nssp_master_info = {
.num_chipselect = 1, /* Matches the number of chips attached to NSSP */
.enable_dma = 1, /* Enables NSSP DMA */
};
@@ -206,7 +206,7 @@ DMA and PIO I/O Support
-----------------------
The pxa2xx_spi driver supports both DMA and interrupt driven PIO message
transfers. The driver defaults to PIO mode and DMA transfers must be enabled
-by setting the "enable_dma" flag in the "pxa2xx_spi_master" structure. The DMA
+by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure. The DMA
mode supports both coherent and stream based DMA mappings.
The following logic is used to determine the type of I/O to be used on
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 819caf8ca05f..ebc679bcb2dc 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -56,26 +56,34 @@ of any kernel data structures.
dentry-state:
-From linux/fs/dentry.c:
+From linux/include/linux/dcache.h:
--------------------------------------------------------------
-struct {
+struct dentry_stat_t dentry_stat {
int nr_dentry;
int nr_unused;
int age_limit; /* age in seconds */
int want_pages; /* pages requested by system */
- int dummy[2];
-} dentry_stat = {0, 0, 45, 0,};
---------------------------------------------------------------
-
-Dentries are dynamically allocated and deallocated, and
-nr_dentry seems to be 0 all the time. Hence it's safe to
-assume that only nr_unused, age_limit and want_pages are
-used. Nr_unused seems to be exactly what its name says.
+ int nr_negative; /* # of unused negative dentries */
+ int dummy; /* Reserved for future use */
+};
+--------------------------------------------------------------
+
+Dentries are dynamically allocated and deallocated.
+
+nr_dentry shows the total number of dentries allocated (active
++ unused). nr_unused shows the number of dentries that are not
+actively used, but are saved in the LRU list for future reuse.
+
Age_limit is the age in seconds after which dcache entries
can be reclaimed when memory is short and want_pages is
nonzero when shrink_dcache_pages() has been called and the
dcache isn't pruned yet.
+nr_negative shows the number of unused dentries that are also
+negative dentries which do not map to any files. Instead,
+they help speeding up rejection of non-existing files provided
+by the users.
+
==============================================================
dquot-max & dquot-nr:
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index c0527d8a468a..379063e58326 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -79,6 +79,7 @@ show up in /proc/sys/kernel:
- reboot-cmd [ SPARC only ]
- rtsig-max
- rtsig-nr
+- sched_energy_aware
- seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst
- sem
- sem_next_id [ sysv ipc ]
@@ -890,6 +891,17 @@ rtsig-nr shows the number of RT signals currently queued.
==============================================================
+sched_energy_aware:
+
+Enables/disables Energy Aware Scheduling (EAS). EAS starts
+automatically on platforms where it can run (that is,
+platforms with asymmetric CPU topologies and having an Energy
+Model available). If your platform happens to meet the
+requirements for EAS but you do not want to use it, change
+this value to 0.
+
+==============================================================
+
sched_schedstats:
Enables/disables scheduler statistics. Enabling this feature
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 2793d4eac55f..2ae91d3873bb 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -52,6 +52,7 @@ two flavors of JITs, the newer eBPF JIT currently supported on:
- sparc64
- mips64
- s390x
+ - riscv
And the older cBPF JIT supported on the following archs:
- mips
@@ -291,6 +292,20 @@ user space is responsible for creating them if needed.
Default : 0 (for compatibility reasons)
+devconf_inherit_init_net
+----------------------------
+
+Controls if a new network namespace should inherit all current
+settings under /proc/sys/net/{ipv4,ipv6}/conf/{all,default}/. By
+default, we keep the current behavior: for IPv4 we inherit all current
+settings from init_net and for IPv6 we reset all settings to default.
+
+If set to 1, both IPv4 and IPv6 settings are forced to inherit from
+current ones in init_net. If set to 2, both IPv4 and IPv6 settings are
+forced to reset to their default values.
+
+Default : 0 (for compatibility reasons)
+
2. /proc/sys/net/unix - Parameters for Unix domain sockets
-------------------------------------------------------
diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt
index 89ab09e78e8d..f07e38094b40 100644
--- a/Documentation/trace/coresight-cpu-debug.txt
+++ b/Documentation/trace/coresight-cpu-debug.txt
@@ -165,7 +165,7 @@ Do some work...
The same can also be done from an application program.
Disable specific CPU's specific idle state from cpuidle sysfs (see
-Documentation/cpuidle/sysfs.txt):
+Documentation/admin-guide/pm/cpuidle.rst):
# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst
index 80f5ffc94a9e..b37166817842 100644
--- a/Documentation/translations/it_IT/admin-guide/README.rst
+++ b/Documentation/translations/it_IT/admin-guide/README.rst
@@ -4,7 +4,7 @@
.. _it_readme:
-Rilascio del kernel Linux 4.x <http://kernel.org/>
+Rilascio del kernel Linux 5.x <http://kernel.org/>
===================================================
.. warning::
diff --git a/Documentation/usb/authorization.txt b/Documentation/usb/authorization.txt
index f901ec77439c..9dd1dc7b1009 100644
--- a/Documentation/usb/authorization.txt
+++ b/Documentation/usb/authorization.txt
@@ -34,7 +34,9 @@ $ echo 1 > /sys/bus/usb/devices/usbX/authorized_default
By default, Wired USB devices are authorized by default to
connect. Wireless USB hosts deauthorize by default all new connected
devices (this is so because we need to do an authentication phase
-before authorizing).
+before authorizing). Writing "2" to the authorized_default attribute
+causes kernel to only authorize by default devices connected to internal
+USB ports.
Example system lockdown (lame)
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
index c4dbe6f7cdae..1129c7550a48 100644
--- a/Documentation/userspace-api/spec_ctrl.rst
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -28,18 +28,20 @@ PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
the following meaning:
-==== ===================== ===================================================
-Bit Define Description
-==== ===================== ===================================================
-0 PR_SPEC_PRCTL Mitigation can be controlled per task by
- PR_SET_SPECULATION_CTRL.
-1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
- disabled.
-2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
- enabled.
-3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
- subsequent prctl(..., PR_SPEC_ENABLE) will fail.
-==== ===================== ===================================================
+==== ====================== ==================================================
+Bit Define Description
+==== ====================== ==================================================
+0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+ PR_SET_SPECULATION_CTRL.
+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+ disabled.
+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+ enabled.
+3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
+ subsequent prctl(..., PR_SPEC_ENABLE) will fail.
+4 PR_SPEC_DISABLE_NOEXEC Same as PR_SPEC_DISABLE, but the state will be
+ cleared on :manpage:`execve(2)`.
+==== ====================== ==================================================
If all bits are 0 the CPU is not affected by the speculation misfeature.
@@ -92,6 +94,7 @@ Speculation misfeature controls
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE_NOEXEC, 0, 0);
- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
(Mitigate Spectre V2 style attacks against user processes)
diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst
index 71d6d257074f..659bbc093b52 100644
--- a/Documentation/virtual/kvm/amd-memory-encryption.rst
+++ b/Documentation/virtual/kvm/amd-memory-encryption.rst
@@ -242,6 +242,6 @@ References
==========
.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf
-.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf
.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34)
.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf
diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt
index d9aed8303984..c1f95b59e14d 100644
--- a/Documentation/x86/resctrl_ui.txt
+++ b/Documentation/x86/resctrl_ui.txt
@@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com>
Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
-This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
+This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo
flag bits:
RDT (Resource Director Technology) Allocation - "rdt_a"
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
diff --git a/Kbuild b/Kbuild
index 65db5bef2e36..4a4c47c38d1d 100644
--- a/Kbuild
+++ b/Kbuild
@@ -6,7 +6,8 @@
# 2) Generate timeconst.h
# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
# 4) Check for missing system calls
-# 5) Generate constants.py (may need bounds.h)
+# 5) check atomics headers are up-to-date
+# 6) Generate constants.py (may need bounds.h)
#####
# 1) Generate bounds.h
@@ -59,7 +60,20 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
$(call cmd,syscalls)
#####
-# 5) Generate constants for Python GDB integration
+# 5) Check atomic headers are up-to-date
+#
+
+always += old-atomics
+targets += old-atomics
+
+quiet_cmd_atomics = CALL $<
+ cmd_atomics = $(CONFIG_SHELL) $<
+
+old-atomics: scripts/atomic/check-atomics.sh FORCE
+ $(call cmd,atomics)
+
+#####
+# 6) Generate constants for Python GDB integration
#
extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py
diff --git a/MAINTAINERS b/MAINTAINERS
index 32d444476a90..09d47cd02a14 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -331,6 +331,7 @@ ACPI APEI
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Len Brown <lenb@kernel.org>
L: linux-acpi@vger.kernel.org
+R: James Morse <james.morse@arm.com>
R: Tony Luck <tony.luck@intel.com>
R: Borislav Petkov <bp@alien8.de>
F: drivers/acpi/apei/
@@ -409,8 +410,7 @@ F: drivers/platform/x86/wmi.c
F: include/uapi/linux/wmi.h
AD1889 ALSA SOUND DRIVER
-M: Thibaut Varene <T-Bone@parisc-linux.org>
-W: http://wiki.parisc-linux.org/AD1889
+W: https://parisc.wiki.kernel.org/index.php/AD1889
L: linux-parisc@vger.kernel.org
S: Maintained
F: sound/pci/ad1889.*
@@ -854,6 +854,22 @@ S: Supported
F: drivers/iio/adc/ad7124.c
F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt
+ANALOG DEVICES INC AD7606 DRIVER
+M: Stefan Popa <stefan.popa@analog.com>
+L: linux-iio@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/iio/adc/ad7606.c
+F: Documentation/devicetree/bindings/iio/adc/ad7606.txt
+
+ANALOG DEVICES INC AD7768-1 DRIVER
+M: Stefan Popa <stefan.popa@analog.com>
+L: linux-iio@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/iio/adc/ad7768-1.c
+F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt
+
ANALOG DEVICES INC AD9389B DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
@@ -1037,26 +1053,26 @@ F: drivers/net/appletalk/
F: net/appletalk/
APPLIED MICRO (APM) X-GENE DEVICE TREE SUPPORT
-M: Duc Dang <dhdang@apm.com>
+M: Khuong Dinh <khuong@os.amperecomputing.com>
S: Supported
F: arch/arm64/boot/dts/apm/
APPLIED MICRO (APM) X-GENE SOC EDAC
-M: Loc Ho <lho@apm.com>
+M: Khuong Dinh <khuong@os.amperecomputing.com>
S: Supported
F: drivers/edac/xgene_edac.c
F: Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
-M: Iyappan Subramanian <isubramanian@apm.com>
-M: Keyur Chudgar <kchudgar@apm.com>
+M: Iyappan Subramanian <iyappan@os.amperecomputing.com>
+M: Keyur Chudgar <keyur@os.amperecomputing.com>
S: Supported
F: drivers/net/ethernet/apm/xgene-v2/
APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
-M: Iyappan Subramanian <isubramanian@apm.com>
-M: Keyur Chudgar <kchudgar@apm.com>
-M: Quan Nguyen <qnguyen@apm.com>
+M: Iyappan Subramanian <iyappan@os.amperecomputing.com>
+M: Keyur Chudgar <keyur@os.amperecomputing.com>
+M: Quan Nguyen <quan@os.amperecomputing.com>
S: Supported
F: drivers/net/ethernet/apm/xgene/
F: drivers/net/phy/mdio-xgene.c
@@ -1064,7 +1080,7 @@ F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
APPLIED MICRO (APM) X-GENE SOC PMU
-M: Tai Nguyen <ttnguyen@apm.com>
+M: Khuong Dinh <khuong@os.amperecomputing.com>
S: Supported
F: drivers/perf/xgene_pmu.c
F: Documentation/perf/xgene-pmu.txt
@@ -1372,6 +1388,13 @@ F: arch/arm/mach-aspeed/
F: arch/arm/boot/dts/aspeed-*
N: aspeed
+ARM/BITMAIN ARCHITECTURE
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm64/boot/dts/bitmain/
+F: Documentation/devicetree/bindings/arm/bitmain.yaml
+
ARM/CALXEDA HIGHBANK ARCHITECTURE
M: Rob Herring <robh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1531,21 +1554,14 @@ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
-R: Fabio Estevam <fabio.estevam@nxp.com>
+R: Fabio Estevam <festevam@gmail.com>
R: NXP Linux Team <linux-imx@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
-F: arch/arm/mach-imx/
-F: arch/arm/mach-mxs/
-F: arch/arm/boot/dts/imx*
-F: arch/arm/configs/imx*_defconfig
-F: arch/arm64/boot/dts/freescale/imx*
-F: drivers/clk/imx/
-F: drivers/firmware/imx/
-F: drivers/soc/imx/
-F: include/linux/firmware/imx/
-F: include/soc/imx/
+N: imx
+N: mxs
+X: drivers/media/i2c/
ARM/FREESCALE VYBRID ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
@@ -1737,6 +1753,7 @@ F: arch/arm/configs/mvebu_*_defconfig
F: arch/arm/mach-mvebu/
F: arch/arm64/boot/dts/marvell/armada*
F: drivers/cpufreq/armada-37xx-cpufreq.c
+F: drivers/cpufreq/armada-8k-cpufreq.c
F: drivers/cpufreq/mvebu-cpufreq.c
F: drivers/irqchip/irq-armada-370-xp.c
F: drivers/irqchip/irq-mvebu-*
@@ -1948,19 +1965,37 @@ M: David Brown <david.brown@linaro.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/soc/qcom/
+F: Documentation/devicetree/bindings/*/qcom*
F: arch/arm/boot/dts/qcom-*.dts
F: arch/arm/boot/dts/qcom-*.dtsi
F: arch/arm/mach-qcom/
-F: arch/arm64/boot/dts/qcom/*
+F: arch/arm64/boot/dts/qcom/
+F: drivers/*/qcom/
+F: drivers/*/qcom*
+F: drivers/*/*/qcom/
+F: drivers/*/*/qcom*
+F: drivers/*/pm8???-*
+F: drivers/bluetooth/btqcomsmd.c
+F: drivers/clocksource/timer-qcom.c
+F: drivers/extcon/extcon-qcom*
+F: drivers/iommu/msm*
F: drivers/i2c/busses/i2c-qup.c
-F: drivers/clk/qcom/
-F: drivers/dma/qcom/
-F: drivers/soc/qcom/
+F: drivers/i2c/busses/i2c-qcom-geni.c
+F: drivers/mfd/ssbi.c
+F: drivers/mmc/host/mmci_qcom*
+F: drivers/mmc/host/sdhci_msm.c
+F: drivers/pci/controller/dwc/pcie-qcom.c
+F: drivers/phy/qualcomm/
+F: drivers/power/*/msm*
+F: drivers/reset/reset-qcom-*
+F: drivers/scsi/ufs/ufs-qcom.*
F: drivers/spi/spi-qup.c
+F: drivers/spi/spi-geni-qcom.c
+F: drivers/spi/spi-qcom-qspi.c
F: drivers/tty/serial/msm_serial.c
-F: drivers/*/pm8???-*
-F: drivers/mfd/ssbi.c
-F: drivers/firmware/qcom_scm*
+F: drivers/usb/dwc3/dwc3-qcom.c
+F: include/dt-bindings/*/qcom*
+F: include/linux/*/qcom*
T: git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git
ARM/RADISYS ENP2611 MACHINE SUPPORT
@@ -1997,7 +2032,7 @@ Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
S: Supported
F: arch/arm64/boot/dts/renesas/
-F: Documentation/devicetree/bindings/arm/shmobile.txt
+F: Documentation/devicetree/bindings/arm/renesas.yaml
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
@@ -2109,6 +2144,8 @@ Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
S: Supported
F: arch/arm/boot/dts/emev2*
+F: arch/arm/boot/dts/gr-peach*
+F: arch/arm/boot/dts/iwg20d-q7*
F: arch/arm/boot/dts/r7s*
F: arch/arm/boot/dts/r8a*
F: arch/arm/boot/dts/r9a*
@@ -2116,7 +2153,7 @@ F: arch/arm/boot/dts/sh*
F: arch/arm/configs/shmobile_defconfig
F: arch/arm/include/debug/renesas-scif.S
F: arch/arm/mach-shmobile/
-F: Documentation/devicetree/bindings/arm/shmobile.txt
+F: Documentation/devicetree/bindings/arm/renesas.yaml
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
@@ -2609,6 +2646,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/*/include/asm/atomic*.h
F: include/*/atomic*.h
+F: scripts/atomic/
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
M: Bradley Grove <linuxdrivers@attotech.com>
@@ -2848,8 +2886,11 @@ F: include/uapi/linux/if_bonding.h
BPF (Safe dynamic programs and tools)
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
+R: Martin KaFai Lau <kafai@fb.com>
+R: Song Liu <songliubraving@fb.com>
+R: Yonghong Song <yhs@fb.com>
L: netdev@vger.kernel.org
-L: linux-kernel@vger.kernel.org
+L: bpf@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
@@ -2873,10 +2914,13 @@ F: samples/bpf/
F: tools/bpf/
F: tools/lib/bpf/
F: tools/testing/selftests/bpf/
+K: bpf
+N: bpf
BPF JIT for ARM
M: Shubham Bansal <illusionist.neo@gmail.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/arm/net/
@@ -2885,18 +2929,21 @@ M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
M: Zi Shen Lim <zlim.lnx@gmail.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
BPF JIT for MIPS (32-BIT AND 64-BIT)
M: Paul Burton <paul.burton@mips.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/mips/net/
BPF JIT for NFP NICs
M: Jakub Kicinski <jakub.kicinski@netronome.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Supported
F: drivers/net/ethernet/netronome/nfp/bpf/
@@ -2904,13 +2951,21 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT)
M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Sandipan Das <sandipan@linux.ibm.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/powerpc/net/
+BPF JIT for RISC-V (RV64G)
+M: Björn Töpel <bjorn.topel@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: arch/riscv/net/
+
BPF JIT for S390
M: Martin Schwidefsky <schwidefsky@de.ibm.com>
M: Heiko Carstens <heiko.carstens@de.ibm.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/s390/net/
X: arch/s390/net/pnet.c
@@ -2918,12 +2973,14 @@ X: arch/s390/net/pnet.c
BPF JIT for SPARC (32-BIT AND 64-BIT)
M: David S. Miller <davem@davemloft.net>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/sparc/net/
BPF JIT for X86 32-BIT
M: Wang YanQing <udknight@gmail.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/x86/net/bpf_jit_comp32.c
@@ -2931,6 +2988,7 @@ BPF JIT for X86 64-BIT
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Supported
F: arch/x86/net/
X: arch/x86/net/bpf_jit_comp32.c
@@ -3052,8 +3110,8 @@ F: include/linux/bcm963xx_nvram.h
F: include/linux/bcm963xx_tag.h
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@cavium.com>
-M: Dept-GELinuxNICDev@cavium.com
+M: Rasesh Mody <rmody@marvell.com>
+M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2.*
@@ -3072,9 +3130,9 @@ S: Supported
F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <ariel.elior@cavium.com>
-M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
-M: everest-linux-l2@cavium.com
+M: Ariel Elior <aelior@marvell.com>
+M: Sudarsana Kalluru <skalluru@marvell.com>
+M: GR-everest-linux-l2@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
@@ -3249,9 +3307,9 @@ S: Supported
F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@cavium.com>
-M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
-M: Dept-GELinuxNICDev@cavium.com
+M: Rasesh Mody <rmody@marvell.com>
+M: Sudarsana Kalluru <skalluru@marvell.com>
+M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/brocade/bna/
@@ -3385,9 +3443,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic*
F: drivers/media/platform/marvell-ccic/
CAIF NETWORK LAYER
-M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
L: netdev@vger.kernel.org
-S: Supported
+S: Orphan
F: Documentation/networking/caif/
F: drivers/net/caif/
F: include/uapi/linux/caif/
@@ -3471,10 +3528,9 @@ F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
CAVIUM LIQUIDIO NETWORK DRIVER
-M: Derek Chickles <derek.chickles@caviumnetworks.com>
-M: Satanand Burla <satananda.burla@caviumnetworks.com>
-M: Felix Manlunas <felix.manlunas@caviumnetworks.com>
-M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
+M: Derek Chickles <dchickles@marvell.com>
+M: Satanand Burla <sburla@marvell.com>
+M: Felix Manlunas <fmanlunas@marvell.com>
L: netdev@vger.kernel.org
W: http://www.cavium.com
S: Supported
@@ -3512,7 +3568,6 @@ F: include/linux/spi/cc2520.h
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER
-M: Yael Chemla <yael.chemla@foss.arm.com>
M: Gilad Ben-Yossef <gilad@benyossef.com>
L: linux-crypto@vger.kernel.org
S: Supported
@@ -3688,6 +3743,14 @@ N: cros_ec
N: cros-ec
F: drivers/power/supply/cros_usbpd-charger.c
+CHROMEOS EC CODEC DRIVER
+M: Cheng-Yi Chiang <cychiang@chromium.org>
+S: Maintained
+R: Enric Balletbo i Serra <enric.balletbo@collabora.com>
+R: Guenter Roeck <groeck@chromium.org>
+F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
+F: sound/soc/codecs/cros_ec_codec.*
+
CIRRUS LOGIC AUDIO CODEC DRIVERS
M: Brian Austin <brian.austin@cirrus.com>
M: Paul Handrigan <Paul.Handrigan@cirrus.com>
@@ -3907,9 +3970,10 @@ M: Johannes Weiner <hannes@cmpxchg.org>
L: cgroups@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
-F: Documentation/cgroup*
+F: Documentation/admin-guide/cgroup-v2.rst
+F: Documentation/cgroup-v1/
F: include/linux/cgroup*
-F: kernel/cgroup*
+F: kernel/cgroup/
CONTROL GROUP - CPUSET
M: Li Zefan <lizefan@huawei.com>
@@ -3951,14 +4015,16 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ti/cpmac.c
-CPU FREQUENCY DRIVERS
+CPU FREQUENCY SCALING FRAMEWORK
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Viresh Kumar <viresh.kumar@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
-T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git (For ARM Updates)
B: https://bugzilla.kernel.org
+F: Documentation/admin-guide/pm/cpufreq.rst
+F: Documentation/admin-guide/pm/intel_pstate.rst
F: Documentation/cpu-freq/
F: Documentation/devicetree/bindings/cpufreq/
F: drivers/cpufreq/
@@ -3977,6 +4043,7 @@ F: drivers/cpufreq/arm_big_little.c
CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com>
M: Shuah Khan <shuah@kernel.org>
+M: Shuah Khan <skhan@linuxfoundation.org>
L: linux-pm@vger.kernel.org
S: Maintained
F: tools/power/cpupower/
@@ -4006,13 +4073,15 @@ S: Supported
F: drivers/cpuidle/cpuidle-exynos.c
F: arch/arm/mach-exynos/pm.c
-CPUIDLE DRIVERS
+CPU IDLE TIME MANAGEMENT FRAMEWORK
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
B: https://bugzilla.kernel.org
+F: Documentation/admin-guide/pm/cpuidle.rst
+F: Documentation/driver-api/pm/cpuidle.rst
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
@@ -4120,7 +4189,7 @@ S: Maintained
F: drivers/media/dvb-frontends/cxd2820r*
CXGB3 ETHERNET DRIVER (CXGB3)
-M: Arjun Vynipadath <arjun@chelsio.com>
+M: Vishal Kulkarni <vishal@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -4149,7 +4218,7 @@ S: Supported
F: drivers/crypto/chelsio
CXGB4 ETHERNET DRIVER (CXGB4)
-M: Arjun Vynipadath <arjun@chelsio.com>
+M: Vishal Kulkarni <vishal@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -5178,7 +5247,7 @@ DRM DRIVERS FOR XEN
M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
T: git git://anongit.freedesktop.org/drm/drm-misc
L: dri-devel@lists.freedesktop.org
-L: xen-devel@lists.xen.org
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: drivers/gpu/drm/xen/
F: Documentation/gpu/xen-front.rst
@@ -5879,6 +5948,7 @@ L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/*
F: include/linux/fs.h
+F: include/linux/fs_types.h
F: include/uapi/linux/fs.h
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
@@ -6021,6 +6091,12 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
+FREESCALE ENETC ETHERNET DRIVERS
+M: Claudiu Manoil <claudiu.manoil@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/enetc/
+
FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
M: Claudiu Manoil <claudiu.manoil@nxp.com>
L: netdev@vger.kernel.org
@@ -6084,15 +6160,17 @@ FREESCALE QORIQ PTP CLOCK DRIVER
M: Yangbo Lu <yangbo.lu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
+F: drivers/net/ethernet/freescale/enetc/enetc_ptp.c
F: drivers/ptp/ptp_qoriq.c
+F: drivers/ptp/ptp_qoriq_debugfs.c
F: include/linux/fsl/ptp_qoriq.h
F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
FREESCALE QUAD SPI DRIVER
M: Han Xu <han.xu@nxp.com>
-L: linux-mtd@lists.infradead.org
+L: linux-spi@vger.kernel.org
S: Maintained
-F: drivers/mtd/spi-nor/fsl-quadspi.c
+F: drivers/spi/spi-fsl-qspi.c
FREESCALE QUICC ENGINE LIBRARY
M: Qiang Zhao <qiang.zhao@nxp.com>
@@ -6143,7 +6221,7 @@ FREESCALE SOC SOUND DRIVERS
M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
-R: Fabio Estevam <fabio.estevam@nxp.com>
+R: Fabio Estevam <festevam@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@@ -6639,6 +6717,15 @@ F: drivers/clocksource/h8300_*.c
F: drivers/clk/h8300/
F: drivers/irqchip/irq-renesas-h8*.c
+HABANALABS PCI DRIVER
+M: Oded Gabbay <oded.gabbay@gmail.com>
+T: git https://github.com/HabanaAI/linux.git
+S: Supported
+F: drivers/misc/habanalabs/
+F: include/uapi/misc/habanalabs.h
+F: Documentation/ABI/testing/sysfs-driver-habanalabs
+F: Documentation/ABI/testing/debugfs-driver-habanalabs
+
HACKRF MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -6996,7 +7083,7 @@ M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
M: Sasha Levin <sashal@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
-L: devel@linuxdriverproject.org
+L: linux-hyperv@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/microsoft/netvsc.txt
F: arch/x86/include/asm/mshyperv.h
@@ -7161,6 +7248,7 @@ F: drivers/i2c/i2c-stub.c
I3C SUBSYSTEM
M: Boris Brezillon <bbrezillon@kernel.org>
L: linux-i3c@lists.infradead.org
+C: irc://chat.freenode.net/linux-i3c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-i3c
@@ -7880,6 +7968,16 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-intel-mid.c
+INTERCONNECT API
+M: Georgi Djakov <georgi.djakov@linaro.org>
+S: Maintained
+F: Documentation/interconnect/
+F: Documentation/devicetree/bindings/interconnect/
+F: drivers/interconnect/
+F: include/dt-bindings/interconnect/
+F: include/linux/interconnect-provider.h
+F: include/linux/interconnect.h
+
INVENSENSE MPU-3050 GYROSCOPE DRIVER
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-iio@vger.kernel.org
@@ -8256,6 +8354,7 @@ F: include/uapi/linux/sunrpc/
KERNEL SELFTEST FRAMEWORK
M: Shuah Khan <shuah@kernel.org>
+M: Shuah Khan <skhan@linuxfoundation.org>
L: linux-kselftest@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
Q: https://patchwork.kernel.org/project/linux-kselftest/list/
@@ -8478,6 +8577,7 @@ L7 BPF FRAMEWORK
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: include/linux/skmsg.h
F: net/core/skmsg.c
@@ -9080,6 +9180,14 @@ F: drivers/gpu/drm/armada/
F: include/uapi/drm/armada_drm.h
F: Documentation/devicetree/bindings/display/armada/
+MARVELL ARMADA 3700 PHY DRIVERS
+M: Miquel Raynal <miquel.raynal@bootlin.com>
+S: Maintained
+F: drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+F: drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+F: Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
+F: Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
+
MARVELL CRYPTO DRIVER
M: Boris Brezillon <bbrezillon@kernel.org>
M: Arnaud Ebalard <arno@natisbad.org>
@@ -9783,6 +9891,14 @@ F: kernel/sched/membarrier.c
F: include/uapi/linux/membarrier.h
F: arch/powerpc/include/asm/membarrier.h
+MEMBLOCK
+M: Mike Rapoport <rppt@linux.ibm.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/memblock.h
+F: mm/memblock.c
+F: Documentation/core-api/boot-time-mm.rst
+
MEMORY MANAGEMENT
L: linux-mm@kvack.org
W: http://www.linux-mm.org
@@ -9849,6 +9965,18 @@ F: drivers/media/platform/meson/ao-cec.c
F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
T: git git://linuxtv.org/media_tree.git
+MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS
+M: Liang Yang <liang.yang@amlogic.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/nand/raw/meson_*
+F: Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
+
+METHODE UDPU SUPPORT
+M: Vladimir Vid <vladimir.vid@sartura.hr>
+S: Maintained
+F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+
MICROBLAZE ARCHITECTURE
M: Michal Simek <monstr@monstr.eu>
W: http://www.monstr.eu/fdt/
@@ -10573,6 +10701,7 @@ F: Documentation/devicetree/bindings/net/dsa/
F: net/dsa/
F: include/net/dsa.h
F: include/linux/dsa/
+F: include/linux/platform_data/dsa.h
F: drivers/net/dsa/
NETWORKING [GENERAL]
@@ -10686,9 +10815,9 @@ S: Maintained
F: drivers/net/netdevsim/*
NETXEN (1/10) GbE SUPPORT
-M: Manish Chopra <manish.chopra@cavium.com>
-M: Rahul Verma <rahul.verma@cavium.com>
-M: Dept-GELinuxNICDev@cavium.com
+M: Manish Chopra <manishc@marvell.com>
+M: Rahul Verma <rahulv@marvell.com>
+M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
@@ -10788,6 +10917,12 @@ F: drivers/power/supply/bq27xxx_battery_i2c.c
F: drivers/power/supply/isp1704_charger.c
F: drivers/power/supply/rx51_battery.c
+NOLIBC HEADER FILE
+M: Willy Tarreau <w@1wt.eu>
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
+F: tools/include/nolibc/
+
NTB AMD DRIVER
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
L: linux-ntb@googlegroups.com
@@ -10889,7 +11024,7 @@ F: include/linux/nvmem-consumer.h
F: include/linux/nvmem-provider.h
NXP SGTL5000 DRIVER
-M: Fabio Estevam <fabio.estevam@nxp.com>
+M: Fabio Estevam <festevam@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -10927,6 +11062,14 @@ F: lib/objagg.c
F: lib/test_objagg.c
F: include/linux/objagg.h
+NXP FSPI DRIVER
+R: Yogesh Gaur <yogeshgaur.83@gmail.com>
+M: Ashish Kumar <ashish.kumar@nxp.com>
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: drivers/spi/spi-nxp-fspi.c
+F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
+
OBJTOOL
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Peter Zijlstra <peterz@infradead.org>
@@ -11260,6 +11403,11 @@ M: Jens Wiklander <jens.wiklander@linaro.org>
S: Maintained
F: drivers/tee/optee/
+OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
+M: Sumit Garg <sumit.garg@linaro.org>
+S: Maintained
+F: drivers/char/hw_random/optee-rng.c
+
OPA-VNIC DRIVER
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
M: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
@@ -11303,10 +11451,12 @@ F: include/dt-bindings/
OPENCORES I2C BUS DRIVER
M: Peter Korsgaard <peter@korsgaard.com>
+M: Andrew Lunn <andrew@lunn.ch>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-ocores
F: drivers/i2c/busses/i2c-ocores.c
+F: include/linux/platform_data/i2c-ocores.h
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
@@ -11477,7 +11627,7 @@ F: Documentation/blockdev/paride.txt
F: drivers/block/paride/
PARISC ARCHITECTURE
-M: "James E.J. Bottomley" <jejb@parisc-linux.org>
+M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
M: Helge Deller <deller@gmx.de>
L: linux-parisc@vger.kernel.org
W: http://www.parisc-linux.org/
@@ -11557,7 +11707,7 @@ F: Documentation/devicetree/bindings/pci/altera-pcie.txt
F: drivers/pci/controller/pcie-altera.c
PCI DRIVER FOR APPLIEDMICRO XGENE
-M: Tanmay Inamdar <tinamdar@apm.com>
+M: Toan Le <toan@os.amperecomputing.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
S: Maintained
@@ -11735,7 +11885,7 @@ F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
F: drivers/pci/controller/pcie-altera-msi.c
PCI MSI DRIVER FOR APPLIEDMICRO XGENE
-M: Duc Dang <dhdang@apm.com>
+M: Toan Le <toan@os.amperecomputing.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
S: Maintained
@@ -12224,14 +12374,6 @@ S: Maintained
F: drivers/net/ppp/pptp.c
W: http://sourceforge.net/projects/accel-pptp
-PREEMPTIBLE KERNEL
-M: Robert Love <rml@tech9.net>
-L: kpreempt-tech@lists.sourceforge.net
-W: https://www.kernel.org/pub/linux/kernel/people/rml/preempt-kernel
-S: Supported
-F: Documentation/preempt-locking.txt
-F: include/linux/preempt.h
-
PRINTK
M: Petr Mladek <pmladek@suse.com>
M: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
@@ -12472,8 +12614,8 @@ S: Supported
F: drivers/scsi/qedi/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Ariel Elior <Ariel.Elior@cavium.com>
-M: everest-linux-l2@cavium.com
+M: Ariel Elior <aelior@marvell.com>
+M: GR-everest-linux-l2@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
@@ -12481,8 +12623,8 @@ F: include/linux/qed/
F: drivers/net/ethernet/qlogic/qede/
QLOGIC QL4xxx RDMA DRIVER
-M: Michal Kalderon <Michal.Kalderon@cavium.com>
-M: Ariel Elior <Ariel.Elior@cavium.com>
+M: Michal Kalderon <mkalderon@marvell.com>
+M: Ariel Elior <aelior@marvell.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qedr/
@@ -12502,7 +12644,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/
QLOGIC QLA3XXX NETWORK DRIVER
-M: Dept-GELinuxNICDev@cavium.com
+M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
@@ -12516,16 +12658,16 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
-M: Manish Chopra <manish.chopra@cavium.com>
-M: Dept-GELinuxNICDev@cavium.com
+M: Shahed Shaikh <shshaikh@marvell.com>
+M: Manish Chopra <manishc@marvell.com>
+M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Manish Chopra <manish.chopra@cavium.com>
-M: Dept-GELinuxNICDev@cavium.com
+M: Manish Chopra <manishc@marvell.com>
+M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
@@ -12593,11 +12735,11 @@ F: Documentation/media/v4l-drivers/qcom_camss.rst
F: drivers/media/platform/qcom/camss/
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
-M: Ilia Lin <ilia.lin@gmail.com>
-L: linux-pm@vger.kernel.org
-S: Maintained
-F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
-F: drivers/cpufreq/qcom-cpufreq-kryo.c
+M: Ilia Lin <ilia.lin@kernel.org>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
+F: drivers/cpufreq/qcom-cpufreq-kryo.c
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
M: Timur Tabi <timur@kernel.org>
@@ -12605,6 +12747,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/qualcomm/emac/
+QUALCOMM ETHQOS ETHERNET DRIVER
+M: Vinod Koul <vkoul@kernel.org>
+M: Niklas Cassel <niklas.cassel@linaro.org>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+F: Documentation/devicetree/bindings/net/qcom,dwmac.txt
+
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Alok Chauhan <alokc@codeaurora.org>
M: Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
@@ -12864,6 +13014,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
F: drivers/net/dsa/realtek-smi*
F: drivers/net/dsa/rtl83*
+REDPINE WIRELESS DRIVER
+M: Amitkumar Karwar <amitkarwar@gmail.com>
+M: Siva Rebbagondla <siva8118@gmail.com>
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: drivers/net/wireless/rsi/
+
REGISTER MAP ABSTRACTION
M: Mark Brown <broonie@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -12954,6 +13111,7 @@ F: drivers/reset/
F: Documentation/devicetree/bindings/reset/
F: include/dt-bindings/reset/
F: include/linux/reset.h
+F: include/linux/reset/
F: include/linux/reset-controller.h
RESTARTABLE SEQUENCES SUPPORT
@@ -13454,6 +13612,7 @@ F: kernel/sched/
F: include/linux/sched.h
F: include/uapi/linux/sched.h
F: include/linux/wait.h
+F: include/linux/preempt.h
SCR24X CHIP CARD INTERFACE DRIVER
M: Lubomir Rintel <lkundrak@v3.sk>
@@ -13582,11 +13741,18 @@ F: drivers/mmc/host/sdhci-brcmstb*
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
M: Adrian Hunter <adrian.hunter@intel.com>
L: linux-mmc@vger.kernel.org
-T: git git://git.infradead.org/users/ahunter/linux-sdhci.git
S: Maintained
F: drivers/mmc/host/sdhci*
F: include/linux/mmc/sdhci*
+EMMC CMDQ HOST CONTROLLER INTERFACE (CQHCI) DRIVER
+M: Adrian Hunter <adrian.hunter@intel.com>
+M: Ritesh Harjani <riteshh@codeaurora.org>
+M: Asutosh Das <asutoshd@codeaurora.org>
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/cqhci*
+
SYNOPSYS SDHCI COMPLIANT DWC MSHC DRIVER
M: Prabu Thangamuthu <prabu.t@synopsys.com>
M: Manjunath M B <manjumb@synopsys.com>
@@ -13692,6 +13858,15 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/sfc/
+SFF/SFP/SFP+ MODULE SUPPORT
+M: Russell King <linux@armlinux.org.uk>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/phy/phylink.c
+F: drivers/net/phy/sfp*
+F: include/linux/phylink.h
+F: include/linux/sfp.h
+
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
S: Maintained
@@ -13713,6 +13888,7 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Ursula Braun <ubraun@linux.ibm.com>
+M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -13820,8 +13996,9 @@ F: drivers/media/mmc/siano/
SIFIVE DRIVERS
M: Palmer Dabbelt <palmer@sifive.com>
+M: Paul Walmsley <paul.walmsley@sifive.com>
L: linux-riscv@lists.infradead.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
+T: git git://github.com/sifive/riscv-linux.git
S: Supported
K: sifive
N: sifive
@@ -14303,6 +14480,7 @@ F: arch/arm/mach-spear/
SPI NOR SUBSYSTEM
M: Marek Vasut <marek.vasut@gmail.com>
+M: Tudor Ambarus <tudor.ambarus@microchip.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -14432,6 +14610,11 @@ M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
S: Odd Fixes
F: drivers/staging/rtl8712/
+STAGING - REALTEK RTL8188EU DRIVERS
+M: Larry Finger <Larry.Finger@lwfinger.net>
+S: Odd Fixes
+F: drivers/staging/rtl8188eu/
+
STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
@@ -14462,11 +14645,6 @@ L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/staging/wilc1000/
-STAGING - XGI Z7,Z9,Z11 PCI DISPLAY DRIVER
-M: Arnaud Patard <arnaud.patard@rtp-net.org>
-S: Odd Fixes
-F: drivers/staging/xgifb/
-
STAGING SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
@@ -14671,7 +14849,7 @@ S: Maintained
F: drivers/tty/serial/8250/8250_dw.c
SYNOPSYS DESIGNWARE APB GPIO DRIVER
-M: Hoan Tran <hotran@apm.com>
+M: Hoan Tran <hoan@os.amperecomputing.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-dwapb.c
@@ -15061,6 +15239,13 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: sound/soc/ti/
+Texas Instruments' DAC7612 DAC Driver
+M: Ricardo Ribalda <ricardo@ribalda.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+F: drivers/iio/dac/ti-dac7612.c
+F: Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
+
THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -15372,12 +15557,11 @@ F: mm/shmem.c
TOMOYO SECURITY MODULE
M: Kentaro Takeda <takedakn@nttdata.co.jp>
M: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
-L: tomoyo-dev-en@lists.sourceforge.jp (subscribers-only, for developers in English)
-L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English)
-L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
-L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
-W: http://tomoyo.sourceforge.jp/
-T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/
+L: tomoyo-dev-en@lists.osdn.me (subscribers-only, for developers in English)
+L: tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English)
+L: tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese)
+L: tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese)
+W: https://tomoyo.osdn.jp/
S: Maintained
F: security/tomoyo/
@@ -15802,7 +15986,6 @@ M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
L: usb-storage@lists.one-eyed-alien.net
S: Maintained
-W: http://www.one-eyed-alien.net/~mdharm/linux-usb/
F: drivers/usb/storage/
USB MIDI DRIVER
@@ -15834,6 +16017,7 @@ F: drivers/usb/common/usb-otg-fsm.c
USB OVER IP DRIVER
M: Valentina Manea <valentina.manea.m@gmail.com>
M: Shuah Khan <shuah@kernel.org>
+M: Shuah Khan <skhan@linuxfoundation.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/usb/usbip_protocol.txt
@@ -16631,6 +16815,15 @@ S: Maintained
F: drivers/platform/x86/
F: drivers/platform/olpc/
+X86 PLATFORM DRIVERS - ARCH
+R: Darren Hart <dvhart@infradead.org>
+R: Andy Shevchenko <andy@infradead.org>
+L: platform-driver-x86@vger.kernel.org
+L: x86@kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
+S: Maintained
+F: arch/x86/platform
+
X86 VDSO
M: Andy Lutomirski <luto@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -16663,10 +16856,30 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/tuners/tuner-xc2028.*
+XDP (eXpress Data Path)
+M: Alexei Starovoitov <ast@kernel.org>
+M: Daniel Borkmann <daniel@iogearbox.net>
+M: David S. Miller <davem@davemloft.net>
+M: Jakub Kicinski <jakub.kicinski@netronome.com>
+M: Jesper Dangaard Brouer <hawk@kernel.org>
+M: John Fastabend <john.fastabend@gmail.com>
+L: netdev@vger.kernel.org
+L: xdp-newbies@vger.kernel.org
+L: bpf@vger.kernel.org
+S: Supported
+F: net/core/xdp.c
+F: include/net/xdp.h
+F: kernel/bpf/devmap.c
+F: kernel/bpf/cpumap.c
+F: include/trace/events/xdp.h
+K: xdp
+N: xdp
+
XDP SOCKETS (AF_XDP)
M: Björn Töpel <bjorn.topel@intel.com>
M: Magnus Karlsson <magnus.karlsson@intel.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/xskmap.c
F: net/xdp/
diff --git a/Makefile b/Makefile
index 8c55b6404e19..f070e0d65186 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
NAME = Shy Crocodile
# *DOCUMENTATION*
@@ -583,7 +583,7 @@ export KBUILD_MODULES KBUILD_BUILTIN
ifeq ($(KBUILD_EXTMOD),)
# Objects we will link into vmlinux / subdirs we need to visit
init-y := init/
-drivers-y := drivers/ sound/ firmware/
+drivers-y := drivers/ sound/
net-y := net/
libs-y := lib/
core-y := usr/
@@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION
endif
endif
+PHONY += prepare0
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
@@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc
# archprepare is used in arch Makefiles and when processed asm symlink,
# version.h and scripts_basic is processed / created.
-# Listed in dependency order
-PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
+PHONY += prepare archprepare prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
@@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
mrproper-dirs := $(addprefix _mrproper_,scripts)
-PHONY += $(mrproper-dirs) mrproper archmrproper
+PHONY += $(mrproper-dirs) mrproper
$(mrproper-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
-mrproper: clean archmrproper $(mrproper-dirs)
+mrproper: clean $(mrproper-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)
diff --git a/arch/Kconfig b/arch/Kconfig
index 4cfb6de48f79..33687dddd86a 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -276,6 +276,16 @@ config ARCH_THREAD_STACK_ALLOCATOR
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
+config ARCH_32BIT_OFF_T
+ bool
+ depends on !64BIT
+ help
+ All new 32-bit architectures should have 64-bit off_t type on
+ userspace side which corresponds to the loff_t kernel type. This
+ is the requirement for modern ABIs. Some existing architectures
+ still support 32-bit off_t. This option is enabled for all such
+ architectures explicitly.
+
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help
@@ -701,6 +711,9 @@ config HAVE_ARCH_HASH
file which provides platform-specific implementations of some
functions in <linux/hash.h> or fs/namei.c.
+config HAVE_ARCH_NVRAM_OPS
+ bool
+
config ISA_BUS_API
def_bool ISA
@@ -759,7 +772,7 @@ config 64BIT_TIME
handling.
config COMPAT_32BIT_TIME
- def_bool (!64BIT && 64BIT_TIME) || COMPAT
+ def_bool !64BIT || COMPAT
help
This enables 32 bit time_t support in addition to 64 bit time_t support.
This is relevant on all 32-bit architectures, and 64-bit architectures
@@ -885,6 +898,9 @@ config HAVE_ARCH_PREL32_RELOCATIONS
architectures, and don't require runtime relocation on relocatable
kernels.
+config ARCH_USE_MEMREMAP_PROT
+ bool
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/include/asm/a.out-core.h b/arch/alpha/include/asm/a.out-core.h
deleted file mode 100644
index 1610d078b064..000000000000
--- a/arch/alpha/include/asm/a.out-core.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/mm_types.h>
-
-/*
- * Fill in the user structure for an ECOFF core dump.
- */
-static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump)
-{
- /* switch stack follows right below pt_regs: */
- struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
-
- dump->magic = CMAGIC;
- dump->start_code = current->mm->start_code;
- dump->start_data = current->mm->start_data;
- dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((current->mm->end_code - dump->start_code)
- >> PAGE_SHIFT);
- dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data)
- >> PAGE_SHIFT);
- dump->u_ssize = (current->mm->start_stack - dump->start_stack
- + PAGE_SIZE-1) >> PAGE_SHIFT;
-
- /*
- * We store the registers in an order/format that is
- * compatible with DEC Unix/OSF/1 as this makes life easier
- * for gdb.
- */
- dump->regs[EF_V0] = pt->r0;
- dump->regs[EF_T0] = pt->r1;
- dump->regs[EF_T1] = pt->r2;
- dump->regs[EF_T2] = pt->r3;
- dump->regs[EF_T3] = pt->r4;
- dump->regs[EF_T4] = pt->r5;
- dump->regs[EF_T5] = pt->r6;
- dump->regs[EF_T6] = pt->r7;
- dump->regs[EF_T7] = pt->r8;
- dump->regs[EF_S0] = sw->r9;
- dump->regs[EF_S1] = sw->r10;
- dump->regs[EF_S2] = sw->r11;
- dump->regs[EF_S3] = sw->r12;
- dump->regs[EF_S4] = sw->r13;
- dump->regs[EF_S5] = sw->r14;
- dump->regs[EF_S6] = sw->r15;
- dump->regs[EF_A3] = pt->r19;
- dump->regs[EF_A4] = pt->r20;
- dump->regs[EF_A5] = pt->r21;
- dump->regs[EF_T8] = pt->r22;
- dump->regs[EF_T9] = pt->r23;
- dump->regs[EF_T10] = pt->r24;
- dump->regs[EF_T11] = pt->r25;
- dump->regs[EF_RA] = pt->r26;
- dump->regs[EF_T12] = pt->r27;
- dump->regs[EF_AT] = pt->r28;
- dump->regs[EF_SP] = rdusp();
- dump->regs[EF_PS] = pt->ps;
- dump->regs[EF_PC] = pt->pc;
- dump->regs[EF_GP] = pt->gp;
- dump->regs[EF_A0] = pt->r16;
- dump->regs[EF_A1] = pt->r17;
- dump->regs[EF_A2] = pt->r18;
- memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 4d17cacd1462..432402c8e47f 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -56,15 +56,15 @@
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
- defined(CONFIG_ALPHA_SHARK) || \
- defined(CONFIG_ALPHA_EIGER)
+ defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
#elif defined(CONFIG_ALPHA_TITAN)
#define NR_IRQS 80
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
- defined(CONFIG_ALPHA_TAKARA)
+ defined(CONFIG_ALPHA_TAKARA) || \
+ defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_WILDFIRE)
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index e6e13a85796a..5a77a40567fa 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -4,6 +4,7 @@
#include <linux/smp.h>
#include <linux/threads.h>
+#include <linux/numa.h>
#include <asm/machvec.h>
#ifdef CONFIG_NUMA
@@ -29,7 +30,7 @@ static const struct cpumask *cpumask_of_node(int node)
{
int cpu;
- if (node == -1)
+ if (node == NUMA_NO_NODE)
return cpu_all_mask;
cpumask_clear(&node_to_cpumask_map[node]);
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index cf4ac791a592..1fe2b56cb861 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -18,7 +18,6 @@
#define USER_DS ((mm_segment_t) { -0x40000000000UL })
#define get_fs() (current_thread_info()->addr_limit)
-#define get_ds() (KERNEL_DS)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#define segment_eq(a, b) ((a).seg == (b).seg)
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 21b706a5b772..986f5da9b7d8 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -19,25 +19,4 @@
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-/*
- * Ignore legacy syscalls that we don't use.
- */
-#define __IGNORE_alarm
-#define __IGNORE_creat
-#define __IGNORE_getegid
-#define __IGNORE_geteuid
-#define __IGNORE_getgid
-#define __IGNORE_getpid
-#define __IGNORE_getppid
-#define __IGNORE_getuid
-#define __IGNORE_pause
-#define __IGNORE_time
-#define __IGNORE_utime
-#define __IGNORE_umount2
-
-/* Alpha doesn't have protection keys. */
-#define __IGNORE_pkey_mprotect
-#define __IGNORE_pkey_alloc
-#define __IGNORE_pkey_free
-
#endif /* _ALPHA_UNISTD_H */
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index f9d4e6b6d4bd..ac23379b7a87 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -10,9 +10,7 @@
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */
#define MAP_FIXED 0x100 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x10 /* don't use a file */
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 065fb372e355..0d0fddb7e738 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -3,6 +3,7 @@
#define _UAPI_ASM_SOCKET_H
#include <asm/sockios.h>
+#include <asm/bitsperlong.h>
/* For setsockopt(2) */
/*
@@ -30,8 +31,8 @@
#define SO_RCVBUFFORCE 0x100b
#define SO_RCVLOWAT 0x1010
#define SO_SNDLOWAT 0x1011
-#define SO_RCVTIMEO 0x1012
-#define SO_SNDTIMEO 0x1013
+#define SO_RCVTIMEO_OLD 0x1012
+#define SO_SNDTIMEO_OLD 0x1013
#define SO_ACCEPTCONN 0x1014
#define SO_PROTOCOL 0x1028
#define SO_DOMAIN 0x1029
@@ -51,13 +52,9 @@
#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_PEERSEC 30
#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 19
@@ -66,9 +63,6 @@
#define SO_MARK 36
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
#define SO_RXQ_OVFL 40
#define SO_WIFI_STATUS 41
@@ -115,4 +109,41 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
+#define SO_TIMESTAMP_OLD 29
+#define SO_TIMESTAMPNS_OLD 35
+#define SO_TIMESTAMPING_OLD 37
+
+#define SO_TIMESTAMP_NEW 63
+#define SO_TIMESTAMPNS_NEW 64
+#define SO_TIMESTAMPING_NEW 65
+
+#define SO_RCVTIMEO_NEW 66
+#define SO_SNDTIMEO_NEW 67
+
+#if !defined(__KERNEL__)
+
+#if __BITS_PER_LONG == 64
+#define SO_TIMESTAMP SO_TIMESTAMP_OLD
+#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD
+#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD
+
+#define SO_RCVTIMEO SO_RCVTIMEO_OLD
+#define SO_SNDTIMEO SO_SNDTIMEO_OLD
+#else
+#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW)
+#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW)
+#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW)
+
+#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW)
+#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW)
+#endif
+
+#define SCM_TIMESTAMP SO_TIMESTAMP
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#endif
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index 9ba724f116f1..71fd5db06866 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -2,6 +2,16 @@
#ifndef _UAPI_ALPHA_UNISTD_H
#define _UAPI_ALPHA_UNISTD_H
+/* These are traditionally the names linux-alpha uses for
+ * the two otherwise generic system calls */
+#define __NR_umount __NR_umount2
+#define __NR_osf_shmat __NR_shmat
+
+/* These return an extra value but can be used as aliases */
+#define __NR_getpid __NR_getxpid
+#define __NR_getuid __NR_getxuid
+#define __NR_getgid __NR_getxgid
+
#include <asm/unistd_32.h>
#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 792586038808..bf497b8b0ec6 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1253,7 +1253,7 @@ struct timex32 {
SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
{
- struct timex txc;
+ struct __kernel_timex txc;
int ret;
/* copy relevant bits of struct timex. */
@@ -1270,7 +1270,8 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
(copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
offsetof(struct timex32, tick))) ||
- (put_tv_to_tv32(&txc_p->time, &txc.time)))
+ (put_user(txc.time.tv_sec, &txc_p->time.tv_sec)) ||
+ (put_user(txc.time.tv_usec, &txc_p->time.tv_usec)))
return -EFAULT;
return ret;
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 5613aa378a83..4341ccf5c0c4 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -630,12 +630,6 @@ static int __hw_perf_event_init(struct perf_event *event)
return ev;
}
- /* The EV67 does not support mode exclusion */
- if (attr->exclude_kernel || attr->exclude_user
- || attr->exclude_hv || attr->exclude_idle) {
- return -EPERM;
- }
-
/*
* We place the event type in event_base here and leave calculation
* of the codes to programme the PMU for alpha_pmu_enable() because
@@ -771,6 +765,7 @@ static struct pmu pmu = {
.start = alpha_pmu_start,
.stop = alpha_pmu_stop,
.read = alpha_pmu_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 7b56a53be5e3..63ed39cbd3bd 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -29,7 +29,7 @@
19 common lseek sys_lseek
20 common getxpid sys_getxpid
21 common osf_mount sys_osf_mount
-22 common umount sys_umount
+22 common umount2 sys_umount
23 common setuid sys_setuid
24 common getxuid sys_getxuid
25 common exec_with_loader sys_ni_syscall
@@ -174,17 +174,17 @@
187 common osf_alt_sigpending sys_ni_syscall
188 common osf_alt_setsid sys_ni_syscall
199 common osf_swapon sys_swapon
-200 common msgctl sys_msgctl
+200 common msgctl sys_old_msgctl
201 common msgget sys_msgget
202 common msgrcv sys_msgrcv
203 common msgsnd sys_msgsnd
-204 common semctl sys_semctl
+204 common semctl sys_old_semctl
205 common semget sys_semget
206 common semop sys_semop
207 common osf_utsname sys_osf_utsname
208 common lchown sys_lchown
-209 common osf_shmat sys_shmat
-210 common shmctl sys_shmctl
+209 common shmat sys_shmat
+210 common shmctl sys_old_shmctl
211 common shmdt sys_shmdt
212 common shmget sys_shmget
213 common osf_mvalid sys_ni_syscall
@@ -451,3 +451,15 @@
520 common preadv2 sys_preadv2
521 common pwritev2 sys_pwritev2
522 common statx sys_statx
+523 common io_pgetevents sys_io_pgetevents
+524 common pkey_mprotect sys_pkey_mprotect
+525 common pkey_alloc sys_pkey_alloc
+526 common pkey_free sys_pkey_free
+527 common rseq sys_rseq
+528 common statfs64 sys_statfs64
+529 common fstatfs64 sys_fstatfs64
+530 common getegid sys_getegid
+531 common geteuid sys_geteuid
+532 common getppid sys_getppid
+# all other architectures have common numbers for new syscall, alpha
+# is the exception.
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index d73dc473fbb9..188fc9256baf 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
- (r) <= 18 ? (r)+8 : (r)-10])
+ (r) <= 18 ? (r)+10 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 376366a7db81..2061b652d9c3 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -14,6 +14,7 @@ config ARC
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
+ select ARCH_32BIT_OFF_T
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
@@ -191,7 +192,6 @@ config NR_CPUS
config ARC_SMP_HALT_ON_RESET
bool "Enable Halt-on-reset boot mode"
- default y if ARC_UBOOT_SUPPORT
help
In SMP configuration cores can be configured as Halt-on-reset
or they could all start at same time. For Halt-on-reset, non
@@ -407,6 +407,14 @@ config ARC_HAS_ACCL_REGS
(also referred to as r58:r59). These can also be used by gcc as GPR so
kernel needs to save/restore per process
+config ARC_IRQ_NO_AUTOSAVE
+ bool "Disable hardware autosave regfile on interrupts"
+ default n
+ help
+ On HS cores, taken interrupt auto saves the regfile on stack.
+ This is programmable and can be optionally disabled in which case
+ software INTERRUPT_PROLOGUE/EPILGUE do the needed work
+
endif # ISA_ARCV2
endmenu # "ARC CPU Configuration"
@@ -515,17 +523,6 @@ config ARC_DBG_TLB_PARANOIA
endif
-config ARC_UBOOT_SUPPORT
- bool "Support uboot arg Handling"
- help
- ARC Linux by default checks for uboot provided args as pointers to
- external cmdline or DTB. This however breaks in absence of uboot,
- when booting from Metaware debugger directly, as the registers are
- not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
- registers look like uboot args to kernel which then chokes.
- So only enable the uboot arg checking/processing if users are sure
- of uboot being in play.
-
config ARC_BUILTIN_DTB_NAME
string "Built in DTB"
help
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 6e84060e7c90..621f59407d76 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
# CONFIG_ARC_HAS_LLSC is not set
CONFIG_ARC_KVADDR_SIZE=402
CONFIG_ARC_EMUL_UNALIGNED=y
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_PREEMPT=y
CONFIG_NET=y
CONFIG_UNIX=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index 1e59a2e9c602..e447ace6fa1c 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
CONFIG_PREEMPT=y
CONFIG_NET=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index b5c3f6c54b03..c82cdb10aaf4 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -15,8 +15,6 @@ CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set
-# CONFIG_ARC_SMP_HALT_ON_RESET is not set
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
CONFIG_PREEMPT=y
CONFIG_NET=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index feed50ce89fa..caa270261521 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -3,23 +3,19 @@ generic-y += bugs.h
generic-y += compat.h
generic-y += device.h
generic-y += div64.h
-generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += extable.h
-generic-y += fb.h
generic-y += ftrace.h
generic-y += hardirq.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
-generic-y += kmap_types.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += parport.h
-generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += topology.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 49bfbd879caa..a27eafdc8260 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
#endif
};
+struct bcr_uarch_build_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, prod:8, maj:8, min:8;
+#else
+ unsigned int min:8, maj:8, prod:8, pad:8;
+#endif
+};
+
struct bcr_mpy {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
@@ -216,6 +224,14 @@ struct bcr_fp_arcv2 {
#endif
};
+struct bcr_actionpoint {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:21, min:1, num:2, ver:8;
+#else
+ unsigned int ver:8, num:2, min:1, pad:21;
+#endif
+};
+
#include <soc/arc/timers.h>
struct bcr_bpu_arcompact {
@@ -283,7 +299,7 @@ struct cpuinfo_arc_cache {
};
struct cpuinfo_arc_bpu {
- unsigned int ver, full, num_cache, num_pred;
+ unsigned int ver, full, num_cache, num_pred, ret_stk;
};
struct cpuinfo_arc_ccm {
@@ -302,7 +318,7 @@ struct cpuinfo_arc {
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
- debug:1, ap:1, smart:1, rtt:1, pad3:4,
+ ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
struct bcr_mpy extn_mpy;
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index ee9246184033..202b74c339f0 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{
- int n;
+ unsigned long n;
asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index f393b663413e..2ad77fb43639 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 309f4e6721b3..225e7df2d8ed 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -17,6 +17,33 @@
;
; Now manually save: r12, sp, fp, gp, r25
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+.ifnc \called_from, exception
+ st.as r9, [sp, -10] ; save r9 in it's final stack slot
+ sub sp, sp, 12 ; skip JLI, LDI, EI
+
+ PUSH lp_count
+ PUSHAX lp_start
+ PUSHAX lp_end
+ PUSH blink
+
+ PUSH r11
+ PUSH r10
+
+ sub sp, sp, 4 ; skip r9
+
+ PUSH r8
+ PUSH r7
+ PUSH r6
+ PUSH r5
+ PUSH r4
+ PUSH r3
+ PUSH r2
+ PUSH r1
+ PUSH r0
+.endif
+#endif
+
#ifdef CONFIG_ARC_HAS_ACCL_REGS
PUSH r59
PUSH r58
@@ -86,6 +113,33 @@
POP r59
#endif
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+.ifnc \called_from, exception
+ POP r0
+ POP r1
+ POP r2
+ POP r3
+ POP r4
+ POP r5
+ POP r6
+ POP r7
+ POP r8
+ POP r9
+ POP r10
+ POP r11
+
+ POP blink
+ POPAX lp_end
+ POPAX lp_start
+
+ POP r9
+ mov lp_count, r9
+
+ add sp, sp, 12 ; skip JLI, LDI, EI
+ ld.as r9, [sp, -10] ; reload r9 which got clobbered
+.endif
+#endif
+
.endm
/*------------------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 9185541035cc..6958545390f0 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+ /* All jump instructions that are taken */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index c9173c02081c..eabc3efa6c6d 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 3b3543fd151c..5eafa1115162 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -18,10 +18,12 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
#define sys_mmap2 sys_mmap_pgoff
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cc558a25b8fa..562089d62d9d 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -209,7 +209,9 @@ restore_regs:
;####### Return from Intr #######
debug_marker_l1:
- bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+ ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+ btst r0, STATUS_DE_BIT ; Z flag set if bit clear
+ bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
.Lisr_ret_fast_path:
; Handle special case #1: (Entry via Exception, Return via IRQ)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 8b90d25a15cc..30e090625916 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
+#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Unaligned access is disabled at reset, so re-enable early as
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+ ; by default
+ lr r5, [status32]
+ bset r5, r5, STATUS_AD_BIT
+ kflag r5
+#endif
.endm
.section .init.text, "ax",@progbits
@@ -90,15 +100,13 @@ ENTRY(stext)
st.ab 0, [r5, 4]
1:
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
- ; r1 = magic number (board identity, unused as of now
+ ; r1 = magic number (always zero as of now)
; r2 = pointer to uboot provided cmdline or external DTB in mem
- ; These are handled later in setup_arch()
+ ; These are handled later in handle_uboot_args()
st r0, [@uboot_tag]
st r2, [@uboot_arg]
-#endif
; setup "current" tsk and optionally cache it in dedicated r25
mov r9, @init_task
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 067ea362fb3e..cf18b3e5a934 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
*(unsigned int *)&ictrl = 0;
+#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
ictrl.save_blink = 1;
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
+#endif
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8aec462d90fb..861a8aea51f9 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -1,15 +1,10 @@
-/*
- * Linux performance counter support for ARC700 series
- *
- * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com)
- *
- * This code is inspired by the perf support of various other architectures.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Linux performance counter support for ARC CPUs.
+// This code is inspired by the perf support of various other architectures.
+//
+// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
+
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -19,12 +14,31 @@
#include <asm/arcregs.h>
#include <asm/stacktrace.h>
+/* HW holds 8 symbols + one for null terminator */
+#define ARCPMU_EVENT_NAME_LEN 9
+
+enum arc_pmu_attr_groups {
+ ARCPMU_ATTR_GR_EVENTS,
+ ARCPMU_ATTR_GR_FORMATS,
+ ARCPMU_NR_ATTR_GR
+};
+
+struct arc_pmu_raw_event_entry {
+ char name[ARCPMU_EVENT_NAME_LEN];
+};
+
struct arc_pmu {
struct pmu pmu;
unsigned int irq;
int n_counters;
+ int n_events;
u64 max_period;
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
+
+ struct arc_pmu_raw_event_entry *raw_entry;
+ struct attribute **attrs;
+ struct perf_pmu_events_attr *attr;
+ const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
};
struct arc_pmu_cpu {
@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
{
struct arc_callchain_trace *ctrl = data;
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
+
perf_callchain_store(entry, addr);
if (ctrl->depth++ < 3)
@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
return -1;
}
-void
-perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
{
struct arc_callchain_trace ctrl = {
.depth = 0,
@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
}
-void
-perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
{
/*
* User stack can't be unwound trivially with kernel dwarf unwinder
@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
/* read counter #idx; note that counter# != event# on ARC! */
-static uint64_t arc_pmu_read_counter(int idx)
+static u64 arc_pmu_read_counter(int idx)
{
- uint32_t tmp;
- uint64_t result;
+ u32 tmp;
+ u64 result;
/*
* ARC supports making 'snapshots' of the counters, so we don't
@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
write_aux_reg(ARC_REG_PCT_INDEX, idx);
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
- result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
+ result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
result |= read_aux_reg(ARC_REG_PCT_SNAPL);
return result;
@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
static void arc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
- uint64_t prev_raw_count = local64_read(&hwc->prev_count);
- uint64_t new_raw_count = arc_pmu_read_counter(idx);
- int64_t delta = new_raw_count - prev_raw_count;
+ u64 prev_raw_count = local64_read(&hwc->prev_count);
+ u64 new_raw_count = arc_pmu_read_counter(idx);
+ s64 delta = new_raw_count - prev_raw_count;
/*
* We aren't afraid of hwc->prev_count changing beneath our feet
@@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
int ret;
if (!is_sampling_event(event)) {
- hwc->sample_period = arc_pmu->max_period;
+ hwc->sample_period = arc_pmu->max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
pr_debug("init cache event with h/w %08x \'%s\'\n",
(int)hwc->config, arc_pmu_ev_hw_map[ret]);
return 0;
+
+ case PERF_TYPE_RAW:
+ if (event->attr.config >= arc_pmu->n_events)
+ return -ENOENT;
+
+ hwc->config |= event->attr.config;
+ pr_debug("init raw event with idx %lld \'%s\'\n",
+ event->attr.config,
+ arc_pmu->raw_entry[event->attr.config].name);
+
+ return 0;
+
default:
return -ENOENT;
}
@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
/* starts all counters */
static void arc_pmu_enable(struct pmu *pmu)
{
- uint32_t tmp;
+ u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
}
@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
/* stops all counters */
static void arc_pmu_disable(struct pmu *pmu)
{
- uint32_t tmp;
+ u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
}
@@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
- } else if (unlikely(left <= 0)) {
+ } else if (unlikely(left <= 0)) {
/* left underflowed by less than period. */
left += period;
local64_set(&hwc->period_left, left);
@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* Write value */
- write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value);
- write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32));
+ write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
+ write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
perf_event_update_userpage(event);
@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
/* Enable interrupt for this counter */
if (is_sampling_event(event))
write_aux_reg(ARC_REG_PCT_INT_CTRL,
- read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
/* enable ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
* Reset interrupt flag by writing of 1. This is required
* to make sure pending interrupt was not left.
*/
- write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+ write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
write_aux_reg(ARC_REG_PCT_INT_CTRL,
- read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx));
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
}
if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
if (is_sampling_event(event)) {
/* Mimic full counter overflow as other arches do */
- write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period);
+ write_aux_reg(ARC_REG_PCT_INT_CNTL,
+ lower_32_bits(arc_pmu->max_period));
write_aux_reg(ARC_REG_PCT_INT_CNTH,
- (arc_pmu->max_period >> 32));
+ upper_32_bits(arc_pmu->max_period));
}
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
idx = __ffs(active_ints);
/* Reset interrupt flag by writing of 1 */
- write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+ write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
/*
* On reset of "interrupt active" bit corresponding
@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
* Now we need to re-enable interrupt for the counter.
*/
write_aux_reg(ARC_REG_PCT_INT_CTRL,
- read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+ read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
event = pmu_cpu->act_counter[idx];
hwc = &event->hw;
@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
arc_pmu_stop(event, 0);
}
- active_ints &= ~(1U << idx);
+ active_ints &= ~BIT(idx);
} while (active_ints);
done:
@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
}
+/* Event field occupies the bottom 15 bits of our config field */
+PMU_FORMAT_ATTR(event, "config:0-14");
+static struct attribute *arc_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group arc_pmu_format_attr_gr = {
+ .name = "format",
+ .attrs = arc_pmu_format_attrs,
+};
+
+static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+}
+
+/*
+ * We don't add attrs here as we don't have pre-defined list of perf events.
+ * We will generate and add attrs dynamically in probe() after we read HW
+ * configuration.
+ */
+static struct attribute_group arc_pmu_events_attr_gr = {
+ .name = "events",
+};
+
+static void arc_pmu_add_raw_event_attr(int j, char *str)
+{
+ memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
+ arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
+ arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
+ arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
+ arc_pmu->attr[j].id = j;
+ arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
+}
+
+static int arc_pmu_raw_alloc(struct device *dev)
+{
+ arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+ sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->attr)
+ return -ENOMEM;
+
+ arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+ sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->attrs)
+ return -ENOMEM;
+
+ arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
+ sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
+ if (!arc_pmu->raw_entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline bool event_in_hw_event_map(int i, char *name)
+{
+ if (!arc_pmu_ev_hw_map[i])
+ return false;
+
+ if (!strlen(arc_pmu_ev_hw_map[i]))
+ return false;
+
+ if (strcmp(arc_pmu_ev_hw_map[i], name))
+ return false;
+
+ return true;
+}
+
+static void arc_pmu_map_hw_event(int j, char *str)
+{
+ int i;
+
+ /* See if HW condition has been mapped to a perf event_id */
+ for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
+ if (event_in_hw_event_map(i, str)) {
+ pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
+ i, str, j);
+ arc_pmu->ev_hw_idx[i] = j;
+ }
+ }
+}
+
static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
- int i, j, has_interrupts;
+ int i, has_interrupts;
int counter_size; /* in bits */
union cc_name {
struct {
- uint32_t word0, word1;
+ u32 word0, word1;
char sentinel;
} indiv;
- char str[9];
+ char str[ARCPMU_EVENT_NAME_LEN];
} cc_name;
@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
return -ENODEV;
}
BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
- BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
+ if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
+ return -EINVAL;
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
- BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */
+ if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
+ return -EINVAL;
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu)
return -ENOMEM;
+ arc_pmu->n_events = cc_bcr.c;
+
+ if (arc_pmu_raw_alloc(&pdev->dev))
+ return -ENOMEM;
+
has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
arc_pmu->n_counters = pct_bcr.c;
@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
arc_pmu->n_counters, counter_size, cc_bcr.c,
- has_interrupts ? ", [overflow IRQ support]":"");
+ has_interrupts ? ", [overflow IRQ support]" : "");
- cc_name.str[8] = 0;
+ cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
arc_pmu->ev_hw_idx[i] = -1;
/* loop thru all available h/w condition indexes */
- for (j = 0; j < cc_bcr.c; j++) {
- write_aux_reg(ARC_REG_CC_INDEX, j);
+ for (i = 0; i < cc_bcr.c; i++) {
+ write_aux_reg(ARC_REG_CC_INDEX, i);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
- /* See if it has been mapped to a perf event_id */
- for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
- if (arc_pmu_ev_hw_map[i] &&
- !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
- strlen(arc_pmu_ev_hw_map[i])) {
- pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
- i, cc_name.str, j);
- arc_pmu->ev_hw_idx[i] = j;
- }
- }
+ arc_pmu_map_hw_event(i, cc_name.str);
+ arc_pmu_add_raw_event_attr(i, cc_name.str);
}
+ arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
+ arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
+ arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
+
arc_pmu->pmu = (struct pmu) {
.pmu_enable = arc_pmu_enable,
.pmu_disable = arc_pmu_disable,
@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.start = arc_pmu_start,
.stop = arc_pmu_stop,
.read = arc_pmu_read,
+ .attr_groups = arc_pmu->attr_groups,
};
if (has_interrupts) {
@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
} else
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
- return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
+ /*
+ * perf parser doesn't really like '-' symbol in events name, so let's
+ * use '_' in arc pct name as it goes to kernel PMU event prefix.
+ */
+ return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
}
-#ifdef CONFIG_OF
static const struct of_device_id arc_pmu_match[] = {
{ .compatible = "snps,arc700-pct" },
{ .compatible = "snps,archs-pct" },
{},
};
MODULE_DEVICE_TABLE(of, arc_pmu_match);
-#endif
static struct platform_driver arc_pmu_driver = {
.driver = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 2e018b8c2e19..7b2340996cf8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl;
struct bcr_isa_arcv2 isa;
+ struct bcr_actionpoint ap;
FIX_PTR(cpu);
@@ -195,20 +196,40 @@ static void read_arc_build_cfg_regs(void)
cpu->bpu.full = bpu.ft;
cpu->bpu.num_cache = 256 << bpu.bce;
cpu->bpu.num_pred = 2048 << bpu.pte;
+ cpu->bpu.ret_stk = 4 << bpu.rse;
if (cpu->core.family >= 0x54) {
- unsigned int exec_ctrl;
- READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
- cpu->extn.dual_enb = !(exec_ctrl & 1);
+ struct bcr_uarch_build_arcv2 uarch;
- /* dual issue always present for this core */
- cpu->extn.dual = 1;
+ /*
+ * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
+ * dual issue only (HS4x). But next uarch rev (1:0)
+ * allows it be configured for single issue (HS3x)
+ * Ensure we fiddle with dual issue only on HS4x
+ */
+ READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
+
+ if (uarch.prod == 4) {
+ unsigned int exec_ctrl;
+
+ /* dual issue hardware always present */
+ cpu->extn.dual = 1;
+
+ READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+
+ /* dual issue hardware enabled ? */
+ cpu->extn.dual_enb = !(exec_ctrl & 1);
+
+ }
}
}
- READ_BCR(ARC_REG_AP_BCR, bcr);
- cpu->extn.ap = bcr.ver ? 1 : 0;
+ READ_BCR(ARC_REG_AP_BCR, ap);
+ if (ap.ver) {
+ cpu->extn.ap_num = 2 << ap.num;
+ cpu->extn.ap_full = !ap.min;
+ }
READ_BCR(ARC_REG_SMART_BCR, bcr);
cpu->extn.smart = bcr.ver ? 1 : 0;
@@ -216,8 +237,6 @@ static void read_arc_build_cfg_regs(void)
READ_BCR(ARC_REG_RTT_BCR, bcr);
cpu->extn.rtt = bcr.ver ? 1 : 0;
- cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
-
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
/* some hacks for lack of feature BCR info in old ARC700 cores */
@@ -299,10 +318,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
- "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"),
- cpu->bpu.num_cache, cpu->bpu.num_pred);
+ cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
if (is_isa_arcv2()) {
struct bcr_lpb lpb;
@@ -336,11 +355,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
- if (cpu->extn.debug)
- n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n",
- IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
+ if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
+ n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
IS_AVAIL1(cpu->extn.smart, "smaRT "),
IS_AVAIL1(cpu->extn.rtt, "RTT "));
+ if (cpu->extn.ap_num) {
+ n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
+ cpu->extn.ap_num,
+ cpu->extn.ap_full ? "full":"min");
+ }
+ n += scnprintf(buf + n, len - n, "\n");
+ }
if (cpu->dccm.sz || cpu->iccm.sz)
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
@@ -453,43 +478,78 @@ void setup_processor(void)
arc_chk_core_config();
}
-static inline int is_kernel(unsigned long addr)
+static inline bool uboot_arg_invalid(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
- return 1;
- return 0;
+ /*
+ * Check that it is a untranslated address (although MMU is not enabled
+ * yet, it being a high address ensures this is not by fluke)
+ */
+ if (addr < PAGE_OFFSET)
+ return true;
+
+ /* Check that address doesn't clobber resident kernel image */
+ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
}
-void __init setup_arch(char **cmdline_p)
+#define IGNORE_ARGS "Ignore U-boot args: "
+
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
+#define UBOOT_TAG_NONE 0
+#define UBOOT_TAG_CMDLINE 1
+#define UBOOT_TAG_DTB 2
+
+void __init handle_uboot_args(void)
{
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
- /* make sure that uboot passed pointer to cmdline/dtb is valid */
- if (uboot_tag && is_kernel((unsigned long)uboot_arg))
- panic("Invalid uboot arg\n");
-
- /* See if u-boot passed an external Device Tree blob */
- machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
- if (!machine_desc)
-#endif
- {
- /* No, so try the embedded one */
+ bool use_embedded_dtb = true;
+ bool append_cmdline = false;
+
+ /* check that we know this tag */
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_tag != UBOOT_TAG_CMDLINE &&
+ uboot_tag != UBOOT_TAG_DTB) {
+ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
+ goto ignore_uboot_args;
+ }
+
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_arg_invalid((unsigned long)uboot_arg)) {
+ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
+ goto ignore_uboot_args;
+ }
+
+ /* see if U-boot passed an external Device Tree blob */
+ if (uboot_tag == UBOOT_TAG_DTB) {
+ machine_desc = setup_machine_fdt((void *)uboot_arg);
+
+ /* external Device Tree blob is invalid - use embedded one */
+ use_embedded_dtb = !machine_desc;
+ }
+
+ if (uboot_tag == UBOOT_TAG_CMDLINE)
+ append_cmdline = true;
+
+ignore_uboot_args:
+
+ if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Embedded DT invalid\n");
+ }
- /*
- * If we are here, it is established that @uboot_arg didn't
- * point to DT blob. Instead if u-boot says it is cmdline,
- * append to embedded DT cmdline.
- * setup_machine_fdt() would have populated @boot_command_line
- */
- if (uboot_tag == 1) {
- /* Ensure a whitespace between the 2 cmdlines */
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, uboot_arg,
- COMMAND_LINE_SIZE);
- }
+ /*
+ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
+ * append processing can only happen after.
+ */
+ if (append_cmdline) {
+ /* Ensure a whitespace between the 2 cmdlines */
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
}
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index e8d9fb452346..215f515442e0 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,6 +18,8 @@
#include <asm/arcregs.h>
#include <asm/irqflags.h>
+#define ARC_PATH_MAX 256
+
/*
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
* -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
print_reg_file(&(cregs->r13), 13);
}
-static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+static void print_task_path_n_nm(struct task_struct *tsk)
{
char *path_nm = NULL;
struct mm_struct *mm;
struct file *exe_file;
+ char buf[ARC_PATH_MAX];
mm = get_task_mm(tsk);
if (!mm)
@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
mmput(mm);
if (exe_file) {
- path_nm = file_path(exe_file, buf, 255);
+ path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
fput(exe_file);
}
@@ -80,10 +83,9 @@ done:
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
}
-static void show_faulting_vma(unsigned long address, char *buf)
+static void show_faulting_vma(unsigned long address)
{
struct vm_area_struct *vma;
- char *nm = buf;
struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
* if the container VMA is not found
*/
if (vma && (vma->vm_start <= address)) {
+ char buf[ARC_PATH_MAX];
+ char *nm = "?";
+
if (vma->vm_file) {
- nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+ nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
if (IS_ERR(nm))
nm = "?";
}
@@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct callee_regs *cregs;
- char *buf;
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return;
+ /*
+ * generic code calls us with preemption disabled, but some calls
+ * here could sleep, so re-enable to avoid lockdep splat
+ */
+ preempt_enable();
- print_task_path_n_nm(tsk, buf);
+ print_task_path_n_nm(tsk);
show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs);
@@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
(void *)regs->blink, (void *)regs->ret);
if (user_mode(regs))
- show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+ show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32);
@@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
if (cregs)
show_callee_regs(cregs);
- free_page((unsigned long)buf);
+ preempt_disable();
}
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index d61044dd8b58..ea14b0bf3116 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -25,15 +25,11 @@
#endif
#ifdef CONFIG_ARC_HAS_LL64
-# define PREFETCH_READ(RX) prefetch [RX, 56]
-# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5
# define ZOLAND 0x1F
#else
-# define PREFETCH_READ(RX) prefetch [RX, 28]
-# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4
@@ -41,8 +37,6 @@
#endif
ENTRY_CFI(memcpy)
- prefetch [r1] ; Prefetch the read location
- prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy32_64bytes
;; LOOP START
LOADX (r6, r1)
- PREFETCH_READ (r1)
- PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_1
;; LOOP START
ld.ab r6, [r1, 4]
- prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 24)
or r7, r7, r5
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_2
;; LOOP START
ld.ab r6, [r1, 4]
- prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 16)
or r7, r7, r5
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_3
;; LOOP START
ld.ab r6, [r1, 4]
- prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 62ad4bcb841a..f230bb7092fd 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -7,11 +7,39 @@
*/
#include <linux/linkage.h>
+#include <asm/cache.h>
-#undef PREALLOC_NOT_AVAIL
+/*
+ * The memset implementation below is optimized to use prefetchw and prealloc
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
+ * If you want to implement optimized memset for other possible L1 data cache
+ * line lengths (32B and 128B) you should rewrite code carefully checking
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
+ * don't belongs to memset area.
+ */
+
+#if L1_CACHE_SHIFT == 6
+
+.macro PREALLOC_INSTR reg, off
+ prealloc [\reg, \off]
+.endm
+
+.macro PREFETCHW_INSTR reg, off
+ prefetchw [\reg, \off]
+.endm
+
+#else
+
+.macro PREALLOC_INSTR
+.endm
+
+.macro PREFETCHW_INSTR
+.endm
+
+#endif
ENTRY_CFI(memset)
- prefetchw [r0] ; Prefetch the write location
+ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
lpnz @.Lset64bytes
;; LOOP START
-#ifdef PREALLOC_NOT_AVAIL
- prefetchw [r3, 64] ;Prefetch the next write location
-#else
- prealloc [r3, 64]
-#endif
+ PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
+
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
- prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index a1d723197084..8df1638259f3 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -141,12 +141,17 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
- /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (fatal_signal_pending(current)) {
- if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
- up_read(&mm->mmap_sem);
- if (user_mode(regs))
+
+ /*
+ * if fault retry, mmap_sem already relinquished by core mm
+ * so OK to return to user mode (with signal handled first)
+ */
+ if (fault & VM_FAULT_RETRY) {
+ if (!user_mode(regs))
+ goto no_context;
return;
+ }
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 43bf4c3a1290..e1ab2d7f1d64 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -119,7 +119,8 @@ void __init setup_arch_memory(void)
*/
memblock_add_node(low_mem_start, low_mem_sz, 0);
- memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index f25c085b9874..23e00216e5a5 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
depends on ISA_ARCV2
select ARC_HAS_ACCL_REGS
+ select ARC_IRQ_NO_AUTOSAVE
select CLK_HSDK
select RESET_HSDK
select HAVE_PCI
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 664e918e2624..e8a4d3ffb6eb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2,6 +2,7 @@
config ARM
bool
default y
+ select ARCH_32BIT_OFF_T
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC
select ARCH_HAS_DEBUG_VIRTUAL if MMU
@@ -589,11 +590,13 @@ config ARCH_DAVINCI
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select RESET_CONTROLLER
+ select SPARSE_IRQ
select USE_OF
select ZONE_DMA
help
@@ -750,6 +753,8 @@ source "arch/arm/mach-mediatek/Kconfig"
source "arch/arm/mach-meson/Kconfig"
+source "arch/arm/mach-milbeaut/Kconfig"
+
source "arch/arm/mach-mmp/Kconfig"
source "arch/arm/mach-moxart/Kconfig"
@@ -1400,6 +1405,7 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
+ select GENERIC_IRQ_MIGRATION
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 9db3c584b2cb..00000e91ad65 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -190,6 +190,7 @@ machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_MXC) += imx
machine-$(CONFIG_ARCH_MEDIATEK) += mediatek
+machine-$(CONFIG_ARCH_MILBEAUT) += milbeaut
machine-$(CONFIG_ARCH_MXS) += mxs
machine-$(CONFIG_ARCH_NETX) += netx
machine-$(CONFIG_ARCH_NOMADIK) += nomadik
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index bd40148a15b2..f4f5aeaf3298 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -79,6 +79,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += \
bcm2835-rpi-a-plus.dtb \
bcm2835-rpi-cm1-io1.dtb \
bcm2836-rpi-2-b.dtb \
+ bcm2837-rpi-3-a-plus.dtb \
bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
bcm2837-rpi-cm3-io3.dtb \
@@ -115,6 +116,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm47094-luxul-xwr-3100.dtb \
bcm47094-luxul-xwr-3150-v1.dtb \
bcm47094-netgear-r8500.dtb \
+ bcm47094-phicomm-k3.dtb \
bcm94708.dtb \
bcm94709.dtb \
bcm953012er.dtb \
@@ -313,7 +315,8 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \
dtb-$(CONFIG_ARCH_LPC18XX) += \
lpc4337-ciaa.dtb \
lpc4350-hitex-eval.dtb \
- lpc4357-ea4357-devkit.dtb
+ lpc4357-ea4357-devkit.dtb \
+ lpc4357-myd-lpc4357.dtb
dtb-$(CONFIG_ARCH_LPC32XX) += \
lpc3250-ea3250.dtb \
lpc3250-phy3250.dtb
@@ -445,6 +448,9 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-wandboard.dtb \
imx6dl-wandboard-revb1.dtb \
imx6dl-wandboard-revd1.dtb \
+ imx6dl-yapp4-draco.dtb \
+ imx6dl-yapp4-hydra.dtb \
+ imx6dl-yapp4-ursa.dtb \
imx6q-apalis-eval.dtb \
imx6q-apalis-ixora.dtb \
imx6q-apalis-ixora-v1.1.dtb \
@@ -561,6 +567,7 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ul-opos6uldev.dtb \
imx6ul-pico-hobbit.dtb \
imx6ul-pico-pi.dtb \
+ imx6ul-phytec-phyboard-segin-full.dtb \
imx6ul-tx6ul-0010.dtb \
imx6ul-tx6ul-0011.dtb \
imx6ul-tx6ul-mainboard.dtb \
@@ -599,6 +606,7 @@ dtb-$(CONFIG_SOC_VF610) += \
vf610-zii-dev-rev-b.dtb \
vf610-zii-dev-rev-c.dtb \
vf610-zii-scu4-aib.dtb \
+ vf610-zii-ssmb-dtu.dtb \
vf610-zii-ssmb-spu3.dtb
dtb-$(CONFIG_ARCH_MXS) += \
imx23-evk.dtb \
@@ -700,6 +708,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
omap3-thunder.dtb \
omap3-zoom3.dtb
dtb-$(CONFIG_SOC_TI81XX) += \
+ am3874-iceboard.dtb \
dm8148-evm.dtb \
dm8148-t410.dtb \
dm8168-evm.dtb \
@@ -719,6 +728,7 @@ dtb-$(CONFIG_SOC_AM33XX) += \
am335x-cm-t335.dtb \
am335x-evm.dtb \
am335x-evmsk.dtb \
+ am335x-guardian.dtb \
am335x-icev2.dtb \
am335x-lxm.dtb \
am335x-moxa-uc-2101.dtb \
@@ -843,6 +853,7 @@ dtb-$(CONFIG_ARCH_RENESAS) += \
r7s72100-genmai.dtb \
r7s72100-gr-peach.dtb \
r7s72100-rskrza1.dtb \
+ r7s9210-rza2mevb.dtb \
r8a73a4-ape6evm.dtb \
r8a7740-armadillo800eva.dtb \
r8a7743-iwg20d-q7.dtb \
@@ -868,6 +879,7 @@ dtb-$(CONFIG_ARCH_RENESAS) += \
r9a06g032-rzn1d400-db.dtb \
sh73a0-kzm9g.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += \
+ rv1108-elgin-r1.dtb \
rv1108-evb.dtb \
rk3036-evb.dtb \
rk3036-kylin.dtb \
@@ -919,6 +931,7 @@ dtb-$(CONFIG_ARCH_SOCFPGA) += \
socfpga_arria10_socdk_nand.dtb \
socfpga_arria10_socdk_qspi.dtb \
socfpga_arria10_socdk_sdmmc.dtb \
+ socfpga_cyclone5_chameleon96.dtb \
socfpga_cyclone5_mcvevk.dtb \
socfpga_cyclone5_socdk.dtb \
socfpga_cyclone5_de0_nano_soc.dtb \
@@ -1233,6 +1246,7 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += \
mt7623n-bananapi-bpi-r2.dtb \
mt8127-moose.dtb \
mt8135-evbp1.dtb
+dtb-$(CONFIG_ARCH_MILBEAUT) += milbeaut-m10v-evb.dtb
dtb-$(CONFIG_ARCH_ZX) += zx296702-ad1.dtb
dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-ast2500-evb.dtb \
diff --git a/arch/arm/boot/dts/alphascale-asm9260.dtsi b/arch/arm/boot/dts/alphascale-asm9260.dtsi
index 907fc7bfc418..2ce6038536fd 100644
--- a/arch/arm/boot/dts/alphascale-asm9260.dtsi
+++ b/arch/arm/boot/dts/alphascale-asm9260.dtsi
@@ -4,10 +4,11 @@
* Licensed under the X11 license or the GPL v2 (or later)
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/alphascale,asm9260.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupt-parent = <&icoll>;
memory {
diff --git a/arch/arm/boot/dts/alpine.dtsi b/arch/arm/boot/dts/alpine.dtsi
index 731df7a8c4e6..d3036ea823d1 100644
--- a/arch/arm/boot/dts/alpine.dtsi
+++ b/arch/arm/boot/dts/alpine.dtsi
@@ -25,12 +25,18 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton64.dtsi"
/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
/* SOC compatibility */
compatible = "al,alpine";
+ memory {
+ device_type = "memory";
+ reg = <0 0 0 0>;
+ };
+
/* CPU Configuration */
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/am335x-baltos-ir2110.dts b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
index 75de1e723303..50dcf1290ac6 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir2110.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
@@ -72,7 +72,3 @@
dual_emac_res_vlan = <2>;
phy-handle = <&phy1>;
};
-
-&phy_sel {
- rmii-clock-ext = <1>;
-};
diff --git a/arch/arm/boot/dts/am335x-baltos-ir3220.dts b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
index 1b215c425c57..f3f1abd26470 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir3220.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
@@ -114,7 +114,3 @@
dual_emac_res_vlan = <2>;
phy-handle = <&phy1>;
};
-
-&phy_sel {
- rmii-clock-ext = <1>;
-};
diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
index 832ead864dc5..42f473f0ed77 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
@@ -133,10 +133,6 @@
phy-handle = <&phy1>;
};
-&phy_sel {
- rmii-clock-ext = <1>;
-};
-
&dcan1 {
pinctrl-names = "default";
pinctrl-0 = <&dcan1_pins>;
diff --git a/arch/arm/boot/dts/am335x-chiliboard.dts b/arch/arm/boot/dts/am335x-chiliboard.dts
index 9c2a947aacf5..bffa5dce54ec 100644
--- a/arch/arm/boot/dts/am335x-chiliboard.dts
+++ b/arch/arm/boot/dts/am335x-chiliboard.dts
@@ -14,6 +14,10 @@
compatible = "grinn,am335x-chiliboard", "grinn,am335x-chilisom",
"ti,am33xx";
+ chosen {
+ stdout-path = &uart0;
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -151,10 +155,6 @@
phy-mode = "rmii";
};
-&phy_sel {
- rmii-clock-ext;
-};
-
/* USB */
&usb {
status = "okay";
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index b67f5fee1469..dce5be5df97b 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -729,7 +729,7 @@
&cpsw_emac0 {
phy-handle = <&ethphy0>;
- phy-mode = "rgmii-txid";
+ phy-mode = "rgmii-id";
};
&tscadc {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 172c0224e7f6..b128998097ce 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -651,13 +651,13 @@
&cpsw_emac0 {
phy-handle = <&ethphy0>;
- phy-mode = "rgmii-txid";
+ phy-mode = "rgmii-id";
dual_emac_res_vlan = <1>;
};
&cpsw_emac1 {
phy-handle = <&ethphy1>;
- phy-mode = "rgmii-txid";
+ phy-mode = "rgmii-id";
dual_emac_res_vlan = <2>;
};
diff --git a/arch/arm/boot/dts/am335x-guardian.dts b/arch/arm/boot/dts/am335x-guardian.dts
new file mode 100644
index 000000000000..c9611ea4b884
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-guardian.dts
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018 Robert Bosch Power Tools GmbH
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Bosch AM335x Guardian";
+ compatible = "bosch,am335x-guardian", "ti,am33xx";
+
+ chosen {
+ stdout-path = &uart0;
+ tick-timer = &timer2;
+ };
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&dcdc2_reg>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pins>;
+
+ button21 {
+ label = "guardian-power-button";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio2 21 0>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_pins>;
+
+ led1 {
+ label = "green:heartbeat";
+ gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led2 {
+ label = "green:mmc0";
+ gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+ };
+
+ panel {
+ compatible = "ti,tilcdc,panel";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&lcd_pins_default &lcd_disen_pins>;
+ pinctrl-1 = <&lcd_pins_sleep>;
+
+ display-timings {
+ 320x240 {
+ hactive = <320>;
+ vactive = <240>;
+ hback-porch = <68>;
+ hfront-porch = <20>;
+ hsync-len = <1>;
+ vback-porch = <18>;
+ vfront-porch = <4>;
+ vsync-len = <1>;
+ clock-frequency = <9000000>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ };
+ };
+ panel-info {
+ ac-bias = <255>;
+ ac-bias-intrpt = <0>;
+ dma-burst-sz = <16>;
+ bpp = <24>;
+ bus-width = <16>;
+ fdd = <0x80>;
+ sync-edge = <0>;
+ sync-ctrl = <1>;
+ raster-order = <0>;
+ fifo-th = <0>;
+ };
+
+ };
+
+ pwm7: dmtimer-pwm {
+ compatible = "ti,omap-dmtimer-pwm";
+ ti,timers = <&timer7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dmtimer7_pins>;
+ };
+
+ vmmcsd_fixed: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vmmcsd_fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&cppi41dma {
+ status = "okay";
+};
+
+&elm {
+ status = "okay";
+};
+
+&gpmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nandflash_pins>;
+ ranges = <0 0 0x08000000 0x1000000>; /* CS0: 16MB for NAND */
+ status = "okay";
+
+ nand@0,0 {
+ compatible = "ti,omap2-nand";
+ reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+ interrupt-parent = <&gpmc>;
+ interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+ <1 IRQ_TYPE_NONE>; /* termcount */
+ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
+ ti,nand-ecc-opt = "bch16";
+ ti,elm-id = <&elm>;
+ nand-bus-width = <8>;
+ gpmc,device-width = <1>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-on-ns = <0>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,clk-activation-ns = <0>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
+
+ /*
+ * MTD partition table
+ *
+ * All SPL-* partitions are sized to minimal length which can
+ * be independently programmable. For NAND flash this is equal
+ * to size of erase-block.
+ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "SPL";
+ reg = <0x0 0x40000>;
+ };
+
+ partition@1 {
+ label = "SPL.backup1";
+ reg = <0x40000 0x40000>;
+ };
+
+ partition@2 {
+ label = "SPL.backup2";
+ reg = <0x80000 0x40000>;
+ };
+
+ partition@3 {
+ label = "SPL.backup3";
+ reg = <0xc0000 0x40000>;
+ };
+
+ partition@4 {
+ label = "u-boot";
+ reg = <0x100000 0x100000>;
+ };
+
+ partition@5 {
+ label = "u-boot.backup1";
+ reg = <0x200000 0x100000>;
+ };
+
+ partition@6 {
+ label = "u-boot-env";
+ reg = <0x300000 0x40000>;
+ };
+
+ partition@7 {
+ label = "u-boot-env.backup1";
+ reg = <0x340000 0x40000>;
+ };
+
+ partition@8 {
+ label = "UBI";
+ reg = <0x380000 0x1fc80000>;
+ };
+ };
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ tps: tps@24 {
+ reg = <0x24>;
+ };
+};
+
+&lcdc {
+ blue-and-red-wiring = "crossed";
+ status = "okay";
+};
+
+&mmc1 {
+ bus-width = <0x4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&vmmcsd_fixed>;
+ status = "okay";
+};
+
+&rtc {
+ clocks = <&clk_32768_ck>, <&clk_24mhz_clkctrl AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL 0>;
+ clock-names = "ext-clk", "int-clk";
+ system-power-controller;
+};
+
+&spi0 {
+ ti,pindir-d0-out-d1-in;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ status = "okay";
+};
+
+#include "tps65217.dtsi"
+
+&tps {
+ ti,pmic-shutdown-controller;
+ interrupt-parent = <&intc>;
+ interrupts = <7>; /* NMI */
+
+ backlight {
+ isel = <1>; /* 1 - ISET1, 2 ISET2 */
+ fdim = <100>; /* TPS65217_BL_FDIM_100HZ */
+ default-brightness = <100>;
+ };
+
+ regulators {
+ dcdc1_reg: regulator@0 {
+ regulator-name = "vdds_dpr";
+ regulator-always-on;
+ };
+
+ dcdc2_reg: regulator@1 {
+ regulator-name = "vdd_mpu";
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <1351500>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc3_reg: regulator@2 {
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: regulator@3 {
+ regulator-name = "vio,vrtc,vdds";
+ regulator-always-on;
+ };
+
+ ldo2_reg: regulator@4 {
+ regulator-name = "vdd_3v3aux";
+ regulator-always-on;
+ };
+
+ ldo3_reg: regulator@5 {
+ regulator-name = "vdd_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo4_reg: regulator@6 {
+ regulator-name = "vdd_3v3a";
+ regulator-always-on;
+ };
+ };
+};
+
+&tscadc {
+ status = "okay";
+
+ adc {
+ ti,adc-channels = <0 1 2 3 4 5 6>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb_ctrl_mod {
+ status = "okay";
+};
+
+&usb0 {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usb0_phy {
+ status = "okay";
+};
+
+&usb1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb1_phy {
+ status = "okay";
+};
+
+&am33xx_pinmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&clkout2_pin &gpio_pins>;
+
+ clkout2_pin: pinmux_clkout2_pin {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
+ >;
+ };
+
+ dmtimer7_pins: pinmux_dmtimer7_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE5)
+ >;
+ };
+
+ gpio_keys_pins: pinmux_gpio_keys_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x940, PIN_INPUT | MUX_MODE7)
+ >;
+ };
+
+ gpio_pins: pinmux_gpio_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x928, PIN_OUTPUT | MUX_MODE7)
+ AM33XX_IOPAD(0x990, PIN_OUTPUT | MUX_MODE7)
+ >;
+ };
+
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0)
+ >;
+ };
+
+ lcd_disen_pins: pinmux_lcd_disen_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x9a4, PIN_OUTPUT_PULLUP | SLEWCTRL_SLOW | MUX_MODE7)
+ >;
+ };
+
+ lcd_pins_default: pinmux_lcd_pins_default {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x820, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x824, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x828, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x82c, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x830, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x834, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x838, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x83c, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1)
+ AM33XX_IOPAD(0x8a0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8a4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8a8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8ac, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8b0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8b4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8b8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8bc, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8c0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8c4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8c8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8cc, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8d0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8d4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8d8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8dc, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8e0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8e4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8e8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ AM33XX_IOPAD(0x8ec, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0)
+ >;
+ };
+
+ lcd_pins_sleep: pinmux_lcd_pins_sleep {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x8a0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8a4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8a8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8ac, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8b0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8b4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8b8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8bc, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8c0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8c4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8c8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8cc, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8d0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8d4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8d8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8dc, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8e0, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7)
+ AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7)
+ >;
+ };
+
+ leds_pins: pinmux_leds_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x868, PIN_OUTPUT | MUX_MODE7)
+ AM33XX_IOPAD(0x86c, PIN_OUTPUT | MUX_MODE7)
+ >;
+ };
+
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7)
+ >;
+ };
+
+ spi0_pins: pinmux_spi0_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x950, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
+ AM33XX_IOPAD(0x954, PIN_OUTPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x95c, PIN_OUTPUT_PULLUP | MUX_MODE0)
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
+ >;
+ };
+
+ nandflash_pins: pinmux_nandflash_pins {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x800, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x804, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x808, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x80c, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x810, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x814, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x818, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x81c, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x870, PIN_INPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x874, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0)
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index f2005ecca74f..9ac775c71072 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -484,10 +484,6 @@
dual_emac;
};
-&phy_sel {
- rmii-clock-ext;
-};
-
&davinci_mdio {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&davinci_mdio_default>;
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 55b4c94cfafb..cbd22f25de95 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -123,10 +123,6 @@
phy-mode = "rmii";
};
-&phy_sel {
- rmii-clock-ext;
-};
-
&elm {
status = "okay";
};
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts
index 481edcfaf121..d0e8e720a4d6 100644
--- a/arch/arm/boot/dts/am335x-lxm.dts
+++ b/arch/arm/boot/dts/am335x-lxm.dts
@@ -328,10 +328,6 @@
dual_emac_res_vlan = <3>;
};
-&phy_sel {
- rmii-clock-ext;
-};
-
&mac {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cpsw_default>;
diff --git a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
index 14f781953475..cb5913a69837 100644
--- a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
+++ b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
@@ -159,11 +159,6 @@
status = "okay";
};
-&phy_sel {
- reg= <0x44e10650 0xf5>;
- rmii-clock-ext;
-};
-
&sham {
status = "okay";
};
diff --git a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
index 5a58efc0c874..e562ce40f290 100644
--- a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
+++ b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
@@ -446,11 +446,6 @@
dual_emac_res_vlan = <2>;
};
-&phy_sel {
- reg= <0x44e10650 0xf5>;
- rmii-clock-ext;
-};
-
&sham {
status = "okay";
};
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 428a25e952b0..015adb626b03 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -100,10 +100,6 @@
status = "okay";
};
-&phy_sel {
- rmii-clock-ext;
-};
-
/* I2C Busses */
&am33xx_pinmux {
i2c0_pins: pinmux_i2c0 {
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index d0fd68873689..bfbe27a80006 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* support for the bosch am335x based shc c3 board
*
* Copyright, C) 2015 Heiko Schocher <hs@denx.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/dts-v1/;
@@ -215,7 +213,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
bus-width = <0x4>;
- cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
cd-inverted;
max-frequency = <26000000>;
vmmc-supply = <&vmmcsd_fixed>;
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 7b818d9d2eab..f459ec316a22 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -279,17 +279,9 @@
#pinctrl-cells = <1>;
ranges = <0 0 0x2000>;
- phy_sel: cpsw-phy-sel@650 {
- compatible = "ti,am3352-cpsw-phy-sel";
- reg= <0x650 0x4>;
- reg-names = "gmii-sel";
- };
-
am33xx_pinmux: pinmux@800 {
compatible = "pinctrl-single";
reg = <0x800 0x238>;
- #address-cells = <1>;
- #size-cells = <0>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0x7f>;
@@ -302,6 +294,12 @@
#size-cells = <1>;
ranges = <0 0 0x800>;
+ phy_gmii_sel: phy-gmii-sel {
+ compatible = "ti,am3352-phy-gmii-sel";
+ reg = <0x650 0x4>;
+ #phy-cells = <2>;
+ };
+
scm_clocks: clocks {
#address-cells = <1>;
#size-cells = <0>;
@@ -717,7 +715,6 @@
interrupts = <40 41 42 43>;
ranges = <0 0 0x8000>;
syscon = <&scm_conf>;
- cpsw-phy-sel = <&phy_sel>;
status = "disabled";
davinci_mdio: mdio@1000 {
@@ -733,11 +730,13 @@
cpsw_emac0: slave@200 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1 1>;
};
cpsw_emac1: slave@300 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2 1>;
};
};
};
diff --git a/arch/arm/boot/dts/am3874-iceboard.dts b/arch/arm/boot/dts/am3874-iceboard.dts
new file mode 100644
index 000000000000..883fb85135d4
--- /dev/null
+++ b/arch/arm/boot/dts/am3874-iceboard.dts
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device tree for Winterland IceBoard
+ *
+ * http://mcgillcosmology.com
+ * http://threespeedlogic.com
+ *
+ * This is an ARM + FPGA instrumentation board used at telescopes in
+ * Antarctica (the South Pole Telescope), Chile (POLARBEAR), and at the DRAO
+ * observatory in British Columbia (CHIME).
+ *
+ * Copyright (c) 2019 Three-Speed Logic, Inc. <gsmecher@threespeedlogic.com>
+ */
+
+/dts-v1/;
+
+#include "dm814x.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Winterland IceBoard";
+ compatible = "ti,dm8148", "ti,dm814";
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ bootargs = "earlycon";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>; /* 1 GB */
+ };
+
+ vmmcsd_fixed: fixedregulator0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vmmcsd_fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+};
+
+/* The MAC provides internal delay for the transmit path ONLY, which is enabled
+ * provided no -id/-txid/-rxid suffix is provided to "phy-mode".
+ *
+ * The receive path is delayed at the PHY. The recommended register settings
+ * are 0xf0 for the control bits, and 0x7777 for the data bits. However, the
+ * conversion code in the kernel lies: the PHY's registers are 120 ps per tap,
+ * and the kernel assumes 200 ps per tap. So we have fudged the numbers here to
+ * obtain the correct register settings.
+ */
+&mac { dual_emac = <1>; };
+&cpsw_emac0 {
+ phy-handle = <&ethphy0>;
+ phy-mode = "rgmii";
+ dual_emac_res_vlan = <1>;
+};
+&cpsw_emac1 {
+ phy-handle = <&ethphy1>;
+ phy-mode = "rgmii";
+ dual_emac_res_vlan = <2>;
+};
+
+&davinci_mdio {
+ ethphy0: ethernet-phy@0 {
+ reg = <0x2>;
+
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+
+ rxd3-skew-ps = <0>;
+ rxd2-skew-ps = <0>;
+ rxd1-skew-ps = <0>;
+ rxd0-skew-ps = <0>;
+
+ phy-reset-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ reg = <0x1>;
+
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+
+ rxd3-skew-ps = <0>;
+ rxd2-skew-ps = <0>;
+ rxd1-skew-ps = <0>;
+ rxd0-skew-ps = <0>;
+
+ phy-reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc1 { status = "disabled"; };
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ vmmc-supply = <&vmmcsd_fixed>;
+ bus-width = <4>;
+};
+&mmc3 { status = "disabled"; };
+
+&i2c1 {
+ /* Most I2C activity happens through this port, with the sole exception
+ * of the backplane. Since there are multiply assigned addresses, the
+ * "i2c-mux-idle-disconnect" is important.
+ */
+
+ pca9548@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ /* FMC A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ i2c-mux-idle-disconnect;
+ };
+
+ i2c@1 {
+ /* FMC B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ i2c-mux-idle-disconnect;
+ };
+
+ i2c@2 {
+ /* QSFP A */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ i2c-mux-idle-disconnect;
+ };
+
+ i2c@3 {
+ /* QSFP B */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ i2c-mux-idle-disconnect;
+ };
+
+ i2c@4 {
+ /* SFP */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ i2c-mux-idle-disconnect;
+ };
+
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ i2c-mux-idle-disconnect;
+
+ ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <5000>; };
+ ina230@41 { compatible = "ti,ina230"; reg = <0x41>; shunt-resistor = <5000>; };
+ ina230@42 { compatible = "ti,ina230"; reg = <0x42>; shunt-resistor = <5000>; };
+
+ ina230@44 { compatible = "ti,ina230"; reg = <0x44>; shunt-resistor = <5000>; };
+ ina230@45 { compatible = "ti,ina230"; reg = <0x45>; shunt-resistor = <5000>; };
+ ina230@46 { compatible = "ti,ina230"; reg = <0x46>; shunt-resistor = <5000>; };
+
+ ina230@47 { compatible = "ti,ina230"; reg = <0x47>; shunt-resistor = <5500>; };
+ ina230@48 { compatible = "ti,ina230"; reg = <0x48>; shunt-resistor = <2360>; };
+ ina230@49 { compatible = "ti,ina230"; reg = <0x49>; shunt-resistor = <2360>; };
+ ina230@43 { compatible = "ti,ina230"; reg = <0x43>; shunt-resistor = <2360>; };
+ ina230@4b { compatible = "ti,ina230"; reg = <0x4b>; shunt-resistor = <5500>; };
+ ina230@4c { compatible = "ti,ina230"; reg = <0x4c>; shunt-resistor = <2360>; };
+ ina230@4d { compatible = "ti,ina230"; reg = <0x4d>; shunt-resistor = <770>; };
+ ina230@4e { compatible = "ti,ina230"; reg = <0x4e>; shunt-resistor = <770>; };
+ ina230@4f { compatible = "ti,ina230"; reg = <0x4f>; shunt-resistor = <770>; };
+ };
+
+ i2c@6 {
+ /* Backplane */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ i2c-mux-idle-disconnect;
+ };
+
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ i2c-mux-idle-disconnect;
+
+ u41: pca9575@20 {
+ compatible = "nxp,pca9575";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "FMCA_EN_12V0", "FMCA_EN_3V3", "FMCA_EN_VADJ", "FMCA_PG_M2C",
+ "FMCA_PG_C2M", "FMCA_PRSNT_M2C_L", "FMCA_CLK_DIR", "SFP_LOS",
+ "FMCB_EN_12V0", "FMCB_EN_3V3", "FMCB_EN_VADJ", "FMCB_PG_M2C",
+ "FMCB_PG_C2M", "FMCB_PRSNT_M2C_L", "FMCB_CLK_DIR", "SFP_ModPrsL";
+ reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ };
+
+ u42: pca9575@21 {
+ compatible = "nxp,pca9575";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ "QSFPA_ModPrsL", "QSFPA_IntL", "QSFPA_ResetL", "QSFPA_ModSelL",
+ "QSFPA_LPMode", "QSFPB_ModPrsL", "QSFPB_IntL", "QSFPB_ResetL",
+ "SFP_TxFault", "SFP_TxDisable", "SFP_RS0", "SFP_RS1",
+ "QSFPB_ModSelL", "QSFPB_LPMode", "SEL_SFP", "ARM_MR";
+ reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ };
+
+ u48: pca9575@22 {
+ compatible = "nxp,pca9575";
+ reg=<0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ sw-gpios = <&u48 0 0>, <&u48 1 0>, <&u48 2 0>, <&u48 3 0>,
+ <&u48 4 0>, <&u48 5 0>, <&u48 6 0>, <&u48 7 0>;
+ led-gpios = <&u48 7 0>, <&u48 6 0>, <&u48 5 0>, <&u48 4 0>,
+ <&u48 3 0>, <&u48 2 0>, <&u48 1 0>, <&u48 0 0>;
+
+ gpio-line-names =
+ "GP_SW1", "GP_SW2", "GP_SW3", "GP_SW4",
+ "GP_SW5", "GP_SW6", "GP_SW7", "GP_SW8",
+ "GP_LED8", "GP_LED7", "GP_LED6", "GP_LED5",
+ "GP_LED4", "GP_LED3", "GP_LED2", "GP_LED1";
+ reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ };
+
+ u59: pca9575@23 {
+ compatible = "nxp,pca9575";
+ reg=<0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names =
+ "GP_LED9", "GP_LED10", "GP_LED11", "GP_LED12",
+ "GTX1V8PowerFault", "PHYAPowerFault", "PHYBPowerFault", "ArmPowerFault",
+ "BP_SLOW_GPIO0", "BP_SLOW_GPIO1", "BP_SLOW_GPIO2", "BP_SLOW_GPIO3",
+ "BP_SLOW_GPIO4", "BP_SLOW_GPIO5", "__unused_u59_p16", "__unused_u59_p17";
+ reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ };
+
+ tmp100@48 { compatible = "ti,tmp100"; reg = <0x48>; };
+ tmp100@4a { compatible = "ti,tmp100"; reg = <0x4a>; };
+ tmp100@4b { compatible = "ti,tmp100"; reg = <0x4b>; };
+ tmp100@4c { compatible = "ti,tmp100"; reg = <0x4c>; };
+
+ /* EEPROM bank and serial number are treated as separate devices */
+ at24c01@57 { compatible = "atmel,24c01"; reg = <0x57>; };
+ at24cs01@5f { compatible = "atmel,24cs01"; reg = <0x5f>; };
+ };
+ };
+};
+
+&i2c2 {
+ pca9548@71 {
+ compatible = "nxp,pca9548";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@6 {
+ /* Backplane */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ multi-master;
+
+ /* All backplanes should have this -- it's how we know they're there. */
+ at24c08@54 { compatible="atmel,24c08"; reg=<0x54>; };
+ at24cs08@5c { compatible="atmel,24cs08"; reg=<0x5c>; };
+
+ /* 16 slot backplane */
+ tmp421@4d { compatible="ti,tmp421"; reg=<0x4d>; };
+ tmp421@4e { compatible="ti,tmp421"; reg=<0x4e>; };
+ ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <2360>; };
+ amc6821@18 { compatible = "ti,amc6821"; reg = <0x18>; };
+
+ /* Single slot backplane */
+ };
+ };
+};
+
+&pincntl {
+ mmc2_pins: pinmux_mmc2_pins {
+ pinctrl-single,pins = <
+ DM814X_IOPAD(0x0800, PIN_INPUT | 0x1) /* SD1_CLK */
+ DM814X_IOPAD(0x0804, PIN_INPUT_PULLUP | 0x1) /* SD1_CMD */
+ DM814X_IOPAD(0x0808, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[0] */
+ DM814X_IOPAD(0x080c, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[1] */
+ DM814X_IOPAD(0x0810, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[2] */
+ DM814X_IOPAD(0x0814, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[3] */
+ DM814X_IOPAD(0x0924, PIN_INPUT_PULLUP | 0x40) /* SD1_POW */
+ DM814X_IOPAD(0x0928, PIN_INPUT | 0x40) /* SD1_SDWP */
+ DM814X_IOPAD(0x093C, PIN_INPUT | 0x2) /* SD1_SDCD */
+ >;
+ };
+
+ usb0_pins: pinmux_usb0_pins {
+ pinctrl-single,pins = <
+ DM814X_IOPAD(0x0c34, PIN_OUTPUT | 0x1) /* USB0_DRVVBUS */
+ >;
+ };
+
+ usb1_pins: pinmux_usb1_pins {
+ pinctrl-single,pins = <
+ DM814X_IOPAD(0x0834, PIN_OUTPUT | 0x80) /* USB1_DRVVBUS */
+ >;
+ };
+
+ gpio1_pins: pinmux_gpio1_pins {
+ pinctrl-single,pins = <
+ DM814X_IOPAD(0x081c, PIN_OUTPUT | 0x80) /* PROGRAM_B */
+ DM814X_IOPAD(0x0820, PIN_INPUT | 0x80) /* INIT_B */
+ DM814X_IOPAD(0x0824, PIN_INPUT | 0x80) /* DONE */
+
+ DM814X_IOPAD(0x0838, PIN_INPUT_PULLUP | 0x80) /* FMCA_TMS */
+ DM814X_IOPAD(0x083c, PIN_INPUT_PULLUP | 0x80) /* FMCA_TCK */
+ DM814X_IOPAD(0x0898, PIN_INPUT_PULLUP | 0x80) /* FMCA_TDO */
+ DM814X_IOPAD(0x089c, PIN_INPUT_PULLUP | 0x80) /* FMCA_TDI */
+ DM814X_IOPAD(0x08ac, PIN_INPUT_PULLUP | 0x80) /* FMCA_TRST */
+
+ DM814X_IOPAD(0x08b0, PIN_INPUT_PULLUP | 0x80) /* FMCB_TMS */
+ DM814X_IOPAD(0x0a88, PIN_INPUT_PULLUP | 0x80) /* FMCB_TCK */
+ DM814X_IOPAD(0x0a8c, PIN_INPUT_PULLUP | 0x80) /* FMCB_TDO */
+ DM814X_IOPAD(0x08bc, PIN_INPUT_PULLUP | 0x80) /* FMCB_TDI */
+ DM814X_IOPAD(0x0a94, PIN_INPUT_PULLUP | 0x80) /* FMCB_TRST */
+
+ DM814X_IOPAD(0x08d4, PIN_INPUT_PULLUP | 0x80) /* FPGA_TMS */
+ DM814X_IOPAD(0x0aa8, PIN_INPUT_PULLUP | 0x80) /* FPGA_TCK */
+ DM814X_IOPAD(0x0adc, PIN_INPUT_PULLUP | 0x80) /* FPGA_TDO */
+ DM814X_IOPAD(0x0ab0, PIN_INPUT_PULLUP | 0x80) /* FPGA_TDI */
+ >;
+ };
+
+ gpio2_pins: pinmux_gpio2_pins {
+ pinctrl-single,pins = <
+ DM814X_IOPAD(0x090c, PIN_INPUT_PULLUP | 0x80) /* PHY A IRQ */
+ DM814X_IOPAD(0x0910, PIN_INPUT_PULLUP | 0x80) /* PHY A RESET */
+ DM814X_IOPAD(0x08f4, PIN_INPUT_PULLUP | 0x80) /* PHY B IRQ */
+ DM814X_IOPAD(0x08f8, PIN_INPUT_PULLUP | 0x80) /* PHY B RESET */
+
+ //DM814X_IOPAD(0x0a14, PIN_INPUT_PULLUP | 0x80) /* ARM IRQ */
+ //DM814X_IOPAD(0x0900, PIN_INPUT | 0x80) /* GPIO IRQ */
+ DM814X_IOPAD(0x0a2c, PIN_INPUT_PULLUP | 0x80) /* GPIO RESET */
+ >;
+ };
+
+ gpio4_pins: pinmux_gpio4_pins {
+ pinctrl-single,pins = <
+ /* The PLL doesn't react well to the SPI controller reset, so
+ * we force the CS lines to pull up as GPIOs until we're ready.
+ * See https://e2e.ti.com/support/processors/f/791/t/276011?Linux-support-for-AM3874-DM8148-in-Arago-linux-omap3
+ */
+ DM814X_IOPAD(0x0b3c, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO0 */
+ DM814X_IOPAD(0x0b40, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO1 */
+ DM814X_IOPAD(0x0b44, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO2 */
+ DM814X_IOPAD(0x0b48, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO3 */
+ DM814X_IOPAD(0x0b4c, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO4 */
+ DM814X_IOPAD(0x0b50, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO5 */
+ >;
+ };
+
+ spi2_pins: pinmux_spi2_pins {
+ pinctrl-single,pins = <
+ DM814X_IOPAD(0x0950, PIN_INPUT_PULLUP | 0x80) /* PLL SPI CS1 as GPIO */
+ DM814X_IOPAD(0x0818, PIN_INPUT_PULLUP | 0x80) /* PLL SPI CS2 as GPIO */
+ >;
+ };
+
+ spi4_pins: pinmux_spi4_pins {
+ pinctrl-single,pins = <
+ DM814X_IOPAD(0x0a7c, 0x20)
+ DM814X_IOPAD(0x0b74, 0x20)
+ DM814X_IOPAD(0x0b78, PIN_OUTPUT | 0x20)
+ DM814X_IOPAD(0x0b7c, PIN_OUTPUT_PULLDOWN | 0x20)
+ DM814X_IOPAD(0x0b80, PIN_INPUT | 0x20)
+ >;
+ };
+};
+
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio1_pins>;
+ gpio-line-names =
+ "", "PROGRAM_B", "INIT_B", "DONE", /* 0-3 */
+ "", "", "", "", /* 4-7 */
+ "FMCA_TMS", "FMCA_TCK", "FMCA_TDO", "FMCA_TDI", /* 8-11 */
+ "", "", "", "FMCA_TRST", /* 12-15 */
+ "FMCB_TMS", "FMCB_TCK", "FMCB_TDO", "FMCB_TDI", /* 16-19 */
+ "FMCB_TRST", "", "", "", /* 20-23 */
+ "FPGA_TMS", "FPGA_TCK", "FPGA_TDO", "FPGA_TDI", /* 24-27 */
+ "", "", "", ""; /* 28-31 */
+};
+
+&gpio2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio2_pins>;
+ gpio-line-names =
+ "PHYA_IRQ_N", "PHYA_RESET_N", "", "", /* 0-3 */
+ "", "", "", "PHYB_IRQ_N", /* 4-7 */
+ "PHYB_RESET_N", "ARM_IRQ", "GPIO_IRQ", ""; /* 8-11 */
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ /*pinctrl-0 = <&gpio3_pins>;*/
+ gpio-line-names =
+ "", "", "ARMClkSel0", "", /* 0-3 */
+ "EnFPGARef", "", "", "ARMClkSel1"; /* 4-7 */
+};
+
+&gpio4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio4_pins>;
+ gpio-line-names =
+ "BP_ARM_GPIO0", "BP_ARM_GPIO1", "BP_ARM_GPIO2", "BP_ARM_GPIO3",
+ "BP_ARM_GPIO4", "BP_ARM_GPIO5";
+};
+
+&usb0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins>;
+ dr_mode = "host";
+};
+
+&usb1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_pins>;
+ dr_mode = "host";
+};
+
+&mcspi1 {
+ s25fl256@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+
+ fsbl@0 {
+ /* 256 kB */
+ label = "U-Boot-min";
+ reg = <0 0x40000>;
+ };
+ ssbl@1 {
+ /* 512 kB */
+ label = "U-Boot";
+ reg = <0x40000 0x80000>;
+ };
+ bootenv@2 {
+ /* 256 kB */
+ label = "U-Boot Env";
+ reg = <0xc0000 0x40000>;
+ };
+ kernel@3 {
+ /* 4 MB */
+ label = "Kernel";
+ reg = <0x100000 0x400000>;
+ };
+ ipmi@4 {
+ label = "IPMI FRU";
+ reg = <0x500000 0x40000>;
+ };
+ fs@5 {
+ label = "File System";
+ reg = <0x540000 0x1ac0000>;
+ };
+ };
+};
+
+&mcspi3 {
+ /* DMA event numbers stolen from MCASP */
+ dmas = <&edma_xbar 8 0 16 &edma_xbar 9 0 17
+ &edma_xbar 10 0 18 &edma_xbar 11 0 19>;
+ dma-names = "tx0", "rx0", "tx1", "rx1";
+};
+
+&mcspi4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi4_pins>;
+
+ /* DMA event numbers stolen from MCASP, MCBSP */
+ dmas = <&edma_xbar 12 0 20 &edma_xbar 13 0 21>;
+ dma-names = "tx0", "rx0";
+};
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index f4a20cade808..4c6ee37ea573 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -71,7 +71,7 @@
pinctrl-0 = <&matrix_keypad_default>;
pinctrl-1 = <&matrix_keypad_sleep>;
- linux,wakeup;
+ wakeup-source;
row-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH /* Bank0, pin3 */
&gpio4 3 GPIO_ACTIVE_HIGH /* Bank4, pin3 */
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index ca0896f80248..85c6f4ff1824 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -280,12 +280,6 @@
#size-cells = <1>;
ranges = <0 0 0x4000>;
- phy_sel: cpsw-phy-sel@650 {
- compatible = "ti,am43xx-cpsw-phy-sel";
- reg= <0x650 0x4>;
- reg-names = "gmii-sel";
- };
-
am43xx_pinmux: pinmux@800 {
compatible = "ti,am437-padconf",
"pinctrl-single";
@@ -300,11 +294,17 @@
};
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x800>;
#address-cells = <1>;
#size-cells = <1>;
+ phy_gmii_sel: phy-gmii-sel {
+ compatible = "ti,am43xx-phy-gmii-sel";
+ reg = <0x650 0x4>;
+ #phy-cells = <2>;
+ };
+
scm_clocks: clocks {
#address-cells = <1>;
#size-cells = <0>;
@@ -555,7 +555,6 @@
cpts_clock_shift = <29>;
ranges = <0 0 0x8000>;
syscon = <&scm_conf>;
- cpsw-phy-sel = <&phy_sel>;
davinci_mdio: mdio@1000 {
compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio";
@@ -572,11 +571,13 @@
cpsw_emac0: slave@200 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1 0>;
};
cpsw_emac1: slave@300 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2 0>;
};
};
};
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 4ea753b3ee43..9dfd80e3b76e 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -584,10 +584,7 @@
&cpsw_emac0 {
phy-handle = <&ethphy0>;
phy-mode = "rmii";
-};
-
-&phy_sel {
- rmii-clock-ext;
+ phys = <&phy_gmii_sel 1 1>;
};
&i2c0 {
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
index 0e4c7c4c8c09..610506723ea5 100644
--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -22,9 +22,10 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "arm,realview-eb";
chosen { };
@@ -38,6 +39,7 @@
};
memory {
+ device_type = "memory";
/* 128 MiB memory @ 0x0 */
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index 83e0fbc4a1a1..cbbb8878daa3 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -23,9 +23,10 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "ARM RealView PB1176";
compatible = "arm,realview-pb1176";
@@ -40,6 +41,7 @@
};
memory {
+ device_type = "memory";
/* 128 MiB memory @ 0x0 */
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 2f6aa24a0b67..2015619ca22c 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -23,9 +23,10 @@
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "ARM RealView PB11MPcore";
compatible = "arm,realview-pb11mp";
@@ -39,6 +40,7 @@
};
memory {
+ device_type = "memory";
/*
* The PB11MPCore has 512 MiB memory @ 0x70000000
* and the first 256 are also remapped @ 0x00000000
diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi
index 916a97734f84..a81e9c282432 100644
--- a/arch/arm/boot/dts/arm-realview-pbx.dtsi
+++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi
@@ -22,9 +22,10 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "arm,realview-pbx";
chosen { };
@@ -39,6 +40,7 @@
};
memory {
+ device_type = "memory";
/* 128 MiB memory @ 0x0 */
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
index 2bfb3108b5b2..c910d157a686 100644
--- a/arch/arm/boot/dts/armada-370-rd.dts
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -114,48 +114,6 @@
};
};
};
-
- dsa {
- status = "disabled";
-
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
-
- dsa,ethernet = <&eth1>;
- dsa,mii-bus = <&mdio>;
-
- switch@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x10 0>; /* MDIO address 16, switch 0 in tree */
-
- port@0 {
- reg = <0>;
- label = "lan0";
- };
-
- port@1 {
- reg = <1>;
- label = "lan1";
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- };
-
- port@3 {
- reg = <3>;
- label = "lan3";
- };
-
- port@5 {
- reg = <5>;
- label = "cpu";
- };
- };
- };
};
&pciec {
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts
index 89a354b43978..20f8d4667753 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dts
+++ b/arch/arm/boot/dts/armada-388-clearfog.dts
@@ -30,64 +30,6 @@
};
};
- dsa@0 {
- status = "disabled";
-
- compatible = "marvell,dsa";
- dsa,ethernet = <&eth1>;
- dsa,mii-bus = <&mdio>;
- pinctrl-0 = <&clearfog_dsa0_clk_pins &clearfog_dsa0_pins>;
- pinctrl-names = "default";
- #address-cells = <2>;
- #size-cells = <0>;
-
- switch@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <4 0>;
-
- port@0 {
- reg = <0>;
- label = "lan5";
- };
-
- port@1 {
- reg = <1>;
- label = "lan4";
- };
-
- port@2 {
- reg = <2>;
- label = "lan3";
- };
-
- port@3 {
- reg = <3>;
- label = "lan2";
- };
-
- port@4 {
- reg = <4>;
- label = "lan1";
- };
-
- port@5 {
- reg = <5>;
- label = "cpu";
- };
-
- port@6 {
- /* 88E1512 external phy */
- reg = <6>;
- label = "lan6";
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
pinctrl-0 = <&rear_button_pins>;
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dtsi b/arch/arm/boot/dts/armada-388-clearfog.dtsi
index 1b0d0680c8b6..0d81600ca247 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dtsi
+++ b/arch/arm/boot/dts/armada-388-clearfog.dtsi
@@ -93,6 +93,7 @@
bm,pool-long = <2>;
bm,pool-short = <1>;
buffer-manager = <&bm>;
+ phys = <&comphy1 1>;
phy-mode = "sgmii";
status = "okay";
};
@@ -103,6 +104,7 @@
bm,pool-short = <1>;
buffer-manager = <&bm>;
managed = "in-band-status";
+ phys = <&comphy5 2>;
phy-mode = "sgmii";
sfp = <&sfp>;
status = "okay";
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 929459c42760..96c18703e471 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -9,13 +9,15 @@
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*/
-#include "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
model = "Marvell Armada 38x family SoC";
compatible = "marvell,armada380";
@@ -335,6 +337,43 @@
#clock-cells = <1>;
};
+ comphy: phy@18300 {
+ compatible = "marvell,armada-380-comphy";
+ reg = <0x18300 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ comphy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ comphy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+
+ comphy2: phy@2 {
+ reg = <2>;
+ #phy-cells = <1>;
+ };
+
+ comphy3: phy@3 {
+ reg = <3>;
+ #phy-cells = <1>;
+ };
+
+ comphy4: phy@4 {
+ reg = <4>;
+ #phy-cells = <1>;
+ };
+
+ comphy5: phy@5 {
+ reg = <5>;
+ #phy-cells = <1>;
+ };
+ };
+
coreclk: mvebu-sar@18600 {
compatible = "marvell,armada-380-core-clock";
reg = <0x18600 0x04>;
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index f0c949831efb..b1b86934c688 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -7,13 +7,14 @@
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*/
-#include "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Marvell Armada 39x family SoC";
compatible = "marvell,armada390";
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index f3ac7483afed..5d04dc68cf57 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -144,30 +144,32 @@
status = "okay";
};
- nand@d0000 {
+ nand-controller@d0000 {
status = "okay";
- label = "pxa3xx_nand-0";
- num-cs = <1>;
- marvell,nand-keep-config;
- nand-on-flash-bbt;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "U-Boot";
- reg = <0 0x800000>;
- };
- partition@800000 {
- label = "Linux";
- reg = <0x800000 0x800000>;
- };
- partition@1000000 {
- label = "Filesystem";
- reg = <0x1000000 0x3f000000>;
+ nand@0 {
+ reg = <0>;
+ label = "pxa3xx_nand-0";
+ nand-rb = <0>;
+ nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x800000>;
+ };
+ partition@800000 {
+ label = "Linux";
+ reg = <0x800000 0x800000>;
+ };
+ partition@1000000 {
+ label = "Filesystem";
+ reg = <0x1000000 0x3f000000>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 1139e9469a83..b4cca507cf13 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -160,12 +160,15 @@
status = "okay";
};
- nand@d0000 {
+ nand-controller@d0000 {
status = "okay";
- label = "pxa3xx_nand-0";
- num-cs = <1>;
- marvell,nand-keep-config;
- nand-on-flash-bbt;
+
+ nand@0 {
+ reg = <0>;
+ label = "pxa3xx_nand-0";
+ nand-rb = <0>;
+ nand-on-flash-bbt;
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index bbbb38888bb8..87dcb502f72d 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -81,49 +81,52 @@
};
- nand@d0000 {
+ nand-controller@d0000 {
status = "okay";
- label = "pxa3xx_nand-0";
- num-cs = <1>;
- marvell,nand-keep-config;
- nand-on-flash-bbt;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "u-boot";
- reg = <0x00000000 0x000e0000>;
- read-only;
- };
-
- partition@e0000 {
- label = "u-boot-env";
- reg = <0x000e0000 0x00020000>;
- read-only;
- };
-
- partition@100000 {
- label = "u-boot-env2";
- reg = <0x00100000 0x00020000>;
- read-only;
- };
-
- partition@120000 {
- label = "zImage";
- reg = <0x00120000 0x00400000>;
- };
-
- partition@520000 {
- label = "initrd";
- reg = <0x00520000 0x00400000>;
- };
- partition@e00000 {
- label = "boot";
- reg = <0x00e00000 0x3f200000>;
+ nand@0 {
+ reg = <0>;
+ label = "pxa3xx_nand-0";
+ nand-rb = <0>;
+ nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x00000000 0x000e0000>;
+ read-only;
+ };
+
+ partition@e0000 {
+ label = "u-boot-env";
+ reg = <0x000e0000 0x00020000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot-env2";
+ reg = <0x00100000 0x00020000>;
+ read-only;
+ };
+
+ partition@120000 {
+ label = "zImage";
+ reg = <0x00120000 0x00400000>;
+ };
+
+ partition@520000 {
+ label = "initrd";
+ reg = <0x00520000 0x00400000>;
+ };
+
+ partition@e00000 {
+ label = "boot";
+ reg = <0x00e00000 0x3f200000>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
index 7a2606c3b62e..8480a16919a0 100644
--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -210,53 +210,6 @@
compatible = "pwm-fan";
pwms = <&gpio0 24 4000>;
};
-
- dsa {
- status = "disabled";
-
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
-
- dsa,ethernet = <&eth0>;
- dsa,mii-bus = <&mdio>;
-
- switch@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0>; /* MDIO address 0, switch 0 in tree */
-
- port@0 {
- reg = <0>;
- label = "lan4";
- };
-
- port@1 {
- reg = <1>;
- label = "lan3";
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- };
-
- port@3 {
- reg = <3>;
- label = "lan1";
- };
-
- port@4 {
- reg = <4>;
- label = "internet";
- };
-
- port@5 {
- reg = <5>;
- label = "cpu";
- };
- };
- };
};
&pciec {
diff --git a/arch/arm/boot/dts/artpec6.dtsi b/arch/arm/boot/dts/artpec6.dtsi
index 3e4115c2cd75..037157e6c5ee 100644
--- a/arch/arm/boot/dts/artpec6.dtsi
+++ b/arch/arm/boot/dts/artpec6.dtsi
@@ -43,9 +43,10 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/dma/nbpfaxi.h>
#include <dt-bindings/clock/axis,artpec6-clkctrl.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "axis,artpec6";
interrupt-parent = <&intc>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts b/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts
index bdfd8c9f3a7c..521afbea2c5b 100644
--- a/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts
@@ -173,6 +173,16 @@
};
};
};
+
+ dps650ab@58 {
+ compatible = "delta,dps650ab";
+ reg = <0x58>;
+ };
+
+ dps650ab@59 {
+ compatible = "delta,dps650ab";
+ reg = <0x59>;
+ };
};
&i2c9 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
index f8e7b71af7e6..4c2dcac738e8 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
@@ -21,6 +21,17 @@
memory@80000000 {
reg = <0x80000000 0x20000000>;
};
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>,
+ <&adc 4>, <&adc 5>, <&adc 6>;
+ };
+
+ iio-hwmon-battery {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 7>;
+ };
};
&fmc {
@@ -43,6 +54,16 @@
};
};
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>;
+};
+
+&lpc_ctrl {
+ // Enable lpc clock
+ status = "okay";
+};
+
&uart1 {
// Host Console
status = "okay";
@@ -51,11 +72,33 @@
&pinctrl_rxd1_default>;
};
+&uart2 {
+ // SoL Host Console
+ status = "okay";
+};
+
+&uart3 {
+ // SoL BMC Console
+ status = "okay";
+};
+
&uart5 {
// BMC Console
status = "okay";
};
+&kcs2 {
+ // BMC KCS channel 2
+ status = "okay";
+ kcs_addr = <0xca8>;
+};
+
+&kcs3 {
+ // BMC KCS channel 3
+ status = "okay";
+ kcs_addr = <0xca2>;
+};
+
&mac0 {
status = "okay";
@@ -64,6 +107,10 @@
use-ncsi;
};
+&adc {
+ status = "okay";
+};
+
&i2c0 {
status = "okay";
//Airmax Conn B, CPU0 PIROM, CPU1 PIROM
@@ -122,6 +169,10 @@
&i2c8 {
status = "okay";
+ tmp421@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
//Mezz Sensor SMBus
};
@@ -140,7 +191,7 @@
};
fan@1 {
- reg = <0x00>;
- aspeed,fan-tach-ch = /bits/ 8 <0x01>;
+ reg = <0x01>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02>;
};
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts b/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts
new file mode 100644
index 000000000000..2337ee23f5c4
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Inspur Corporation
+/dts-v1/;
+
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+
+/ {
+ model = "ON5263M5 BMC";
+ compatible = "inspur,on5263m5-bmc", "aspeed,ast2500";
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "earlyprintk";
+ };
+
+ memory {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vga_memory: framebuffer@9f000000 {
+ no-map;
+ reg = <0x9f000000 0x01000000>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ bmc_alive {
+ label = "bmc_alive";
+ gpios = <&gpio ASPEED_GPIO(I, 1) GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "timer";
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>,
+ <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>;
+ };
+
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+#include "openbmc-flash-layout.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ };
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii1_default>;
+ use-ncsi;
+};
+
+&mac1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
+};
+
+&i2c6 {
+ status = "okay";
+
+ tmp421@4e {
+ compatible = "ti,tmp421";
+ reg = <0x4e>;
+ };
+
+ tmp112@48 {
+ compatible = "ti,tmp112";
+ reg = <0x48>;
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ pagesize = <32>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+
+ adm1278@11 {
+ compatible = "adi,adm1278";
+ reg = <0x11>;
+ };
+};
+
+&gfx {
+ status = "okay";
+};
+
+&pinctrl {
+ aspeed,external-nodes = <&gfx &lhc>;
+};
+
+&pwm_tacho {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default>;
+
+ fan@0 {
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00 0x01>;
+ };
+
+ fan@1 {
+ reg = <0x01>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02 0x03>;
+ };
+};
+
+&adc {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
index 9aa1d4467453..b854ac0bae9a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
@@ -169,6 +169,11 @@
&i2c3 {
status = "okay";
+
+ occ-hwmon@50 {
+ compatible = "ibm,p8-occ-hwmon";
+ reg = <0x50>;
+ };
};
&i2c4 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts b/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts
index 385c0f4b69ee..0d7c6339da46 100644
--- a/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts
@@ -116,6 +116,10 @@
status = "okay";
};
+&lpc_ctrl {
+ status = "okay";
+};
+
&lpc_snoop {
status = "okay";
snoop-ports = <0x80>;
@@ -134,6 +138,10 @@
pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
};
+&uart1 {
+ status = "okay";
+};
+
&uart5 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 69f6b9d2e7e7..9549f867aa1e 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -197,6 +197,7 @@
gpio-ranges = <&pinctrl 0 0 220>;
clocks = <&syscon ASPEED_CLK_APB>;
interrupt-controller;
+ #interrupt-cells = <2>;
};
timer: timer@1e782000 {
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index d107459fc0f8..3e4ed081505c 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -250,6 +250,7 @@
gpio-ranges = <&pinctrl 0 0 220>;
clocks = <&syscon ASPEED_CLK_APB>;
interrupt-controller;
+ #interrupt-cells = <2>;
};
timer: timer@1e782000 {
@@ -330,8 +331,32 @@
ranges = <0x0 0x1e789000 0x1000>;
lpc_bmc: lpc-bmc@0 {
- compatible = "aspeed,ast2500-lpc-bmc";
+ compatible = "aspeed,ast2500-lpc-bmc", "simple-mfd", "syscon";
reg = <0x0 0x80>;
+ reg-io-width = <4>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x80>;
+
+ kcs1: kcs1@0 {
+ compatible = "aspeed,ast2500-kcs-bmc";
+ interrupts = <8>;
+ kcs_chan = <1>;
+ status = "disabled";
+ };
+ kcs2: kcs2@0 {
+ compatible = "aspeed,ast2500-kcs-bmc";
+ interrupts = <8>;
+ kcs_chan = <2>;
+ status = "disabled";
+ };
+ kcs3: kcs3@0 {
+ compatible = "aspeed,ast2500-kcs-bmc";
+ interrupts = <8>;
+ kcs_chan = <3>;
+ status = "disabled";
+ };
};
lpc_host: lpc-host@80 {
@@ -343,6 +368,13 @@
#size-cells = <1>;
ranges = <0x0 0x80 0x1e0>;
+ kcs4: kcs4@0 {
+ compatible = "aspeed,ast2500-kcs-bmc";
+ interrupts = <8>;
+ kcs_chan = <4>;
+ status = "disabled";
+ };
+
lpc_ctrl: lpc-ctrl@0 {
compatible = "aspeed,ast2500-lpc-ctrl";
reg = <0x0 0x80>;
diff --git a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
index 0f6d335125e2..f245944bd5d7 100644
--- a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
+++ b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
@@ -22,7 +22,7 @@
wakeup {
label = "Wakeup";
linux,code = <10>;
- gpio-key,wakeup;
+ wakeup-source;
gpios = <&pioB 27 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index cf0087b4c9e1..33a159c0163f 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -62,6 +62,20 @@
ahb {
apb {
+ qspi1: spi@f0024000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi1_default>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ m25p,fast-read;
+ };
+ };
+
macb0: ethernet@f8008000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_macb0_default>;
@@ -78,6 +92,22 @@
pinctrl@fc038000 {
+ pinctrl_qspi1_default: qspi1_default {
+ sck_cs {
+ pinmux = <PIN_PB5__QSPI1_SCK>,
+ <PIN_PB6__QSPI1_CS>;
+ bias-disable;
+ };
+
+ data {
+ pinmux = <PIN_PB7__QSPI1_IO0>,
+ <PIN_PB8__QSPI1_IO1>,
+ <PIN_PB9__QSPI1_IO2>,
+ <PIN_PB10__QSPI1_IO3>;
+ bias-pull-up;
+ };
+ };
+
pinctrl_macb0_default: macb0_default {
pinmux = <PIN_PD9__GTXCK>,
<PIN_PD10__GTXEN>,
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index 4a258867ddf1..a48180555ef5 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -109,6 +109,10 @@
status = "okay";
};
+ qspi1: spi@f0024000 {
+ status = "okay";
+ };
+
spi0: spi@f8000000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0_default>;
diff --git a/arch/arm/boot/dts/at91-wb45n.dts b/arch/arm/boot/dts/at91-wb45n.dts
index 5b9512a6c89c..54d130c92185 100644
--- a/arch/arm/boot/dts/at91-wb45n.dts
+++ b/arch/arm/boot/dts/at91-wb45n.dts
@@ -22,7 +22,7 @@
label = "IRQBTN";
linux,code = <99>;
gpios = <&pioB 18 GPIO_ACTIVE_LOW>;
- gpio-key,wakeup = <1>;
+ wakeup-source;
};
};
};
diff --git a/arch/arm/boot/dts/at91-wb50n.dts b/arch/arm/boot/dts/at91-wb50n.dts
index 8cecc7051a86..a5e45bb95c04 100644
--- a/arch/arm/boot/dts/at91-wb50n.dts
+++ b/arch/arm/boot/dts/at91-wb50n.dts
@@ -23,7 +23,7 @@
label = "BTNESC";
linux,code = <1>; /* ESC button */
gpios = <&pioA 10 GPIO_ACTIVE_LOW>;
- gpio-key,wakeup = <1>;
+ wakeup-source;
};
irqbtn@31 {
@@ -31,7 +31,7 @@
label = "IRQBTN";
linux,code = <99>; /* SysReq button */
gpios = <&pioE 31 GPIO_ACTIVE_LOW>;
- gpio-key,wakeup = <1>;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index 2ad69a7fbc00..5a882a053816 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -10,13 +10,14 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91RM9200 family SoC";
compatible = "atmel,at91rm9200";
interrupt-parent = <&aic>;
@@ -49,6 +50,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x04000000>;
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 7cd9c3bc4dfb..3b58b94b53c9 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -8,13 +8,14 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91SAM9260 family SoC";
compatible = "atmel,at91sam9260";
interrupt-parent = <&aic>;
@@ -46,6 +47,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x04000000>;
};
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index 01d700b63b45..a907a1fdd24c 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -6,13 +6,14 @@
* Licensed under GPLv2 only.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91SAM9261 family SoC";
compatible = "atmel,at91sam9261";
interrupt-parent = <&aic>;
@@ -43,6 +44,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index c5766da4e54e..3fb63d81f18e 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -6,13 +6,14 @@
* Licensed under GPLv2 only.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91SAM9263 family SoC";
compatible = "atmel,at91sam9263";
interrupt-parent = <&aic>;
@@ -45,6 +46,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index d16db1fa7e15..f36819607131 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -9,7 +9,6 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/dma/at91.h>
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -17,6 +16,8 @@
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91SAM9G45 family SoC";
compatible = "atmel,at91sam9g45";
interrupt-parent = <&aic>;
@@ -51,6 +52,7 @@
};
memory {
+ device_type = "memory";
reg = <0x70000000 0x10000000>;
};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 37cb81f457b5..f71d65e6e510 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -7,7 +7,6 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/dma/at91.h>
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -15,6 +14,8 @@
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91SAM9N12 SoC";
compatible = "atmel,at91sam9n12";
interrupt-parent = <&aic>;
@@ -47,6 +48,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x10000000>;
};
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index 3862ff2f26e0..6b5777f3c20b 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -7,7 +7,6 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/clock/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -15,6 +14,8 @@
#include <dt-bindings/pwm/pwm.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91SAM9RL family SoC";
compatible = "atmel,at91sam9rl", "atmel,at91sam9";
interrupt-parent = <&aic>;
@@ -48,6 +49,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x04000000>;
};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 07443a387a8f..79c4956d3902 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -9,7 +9,6 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/dma/at91.h>
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -17,6 +16,8 @@
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel AT91SAM9x5 family SoC";
compatible = "atmel,at91sam9x5";
interrupt-parent = <&aic>;
@@ -49,6 +50,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x10000000>;
};
diff --git a/arch/arm/boot/dts/atlas6-evb.dts b/arch/arm/boot/dts/atlas6-evb.dts
index ab042ca8dea1..40882419309d 100644
--- a/arch/arm/boot/dts/atlas6-evb.dts
+++ b/arch/arm/boot/dts/atlas6-evb.dts
@@ -15,6 +15,7 @@
compatible = "sirf,atlas6-cb", "sirf,atlas6";
memory {
+ device_type = "memory";
reg = <0x00000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 29598667420b..5587b98032a3 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -6,7 +6,6 @@
* Licensed under GPLv2 or later.
*/
-/include/ "skeleton.dtsi"
/ {
compatible = "sirf,atlas6";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi
index 83449b33de6b..f3de9af35b4d 100644
--- a/arch/arm/boot/dts/atlas7.dtsi
+++ b/arch/arm/boot/dts/atlas7.dtsi
@@ -6,7 +6,6 @@
* Licensed under GPLv2 or later.
*/
-/include/ "skeleton.dtsi"
/ {
compatible = "sirf,atlas7";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/axm55xx.dtsi b/arch/arm/boot/dts/axm55xx.dtsi
index 47799f59faa5..2a93d3ee3b66 100644
--- a/arch/arm/boot/dts/axm55xx.dtsi
+++ b/arch/arm/boot/dts/axm55xx.dtsi
@@ -12,9 +12,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/lsi,axm5516-clks.h>
-#include "skeleton64.dtsi"
-
/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
interrupt-parent = <&gic>;
aliases {
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 253df7170a4e..5f7b46503a51 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -34,9 +34,9 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/bcm-cygnus.h>
-#include "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "brcm,cygnus";
model = "Broadcom Cygnus SoC";
interrupt-parent = <&gic>;
@@ -45,6 +45,11 @@
ethernet0 = &eth0;
};
+ memory {
+ device_type = "memory";
+ reg = <0 0>;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 0d2538b46139..6925b30c2253 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -34,9 +34,9 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/bcm-nsp.h>
-#include "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "brcm,nsp";
model = "Broadcom Northstar Plus SoC";
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index db7cded1b7ad..b99c2e579622 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -16,9 +16,9 @@
#include "dt-bindings/clock/bcm281xx.h"
-#include "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "BCM11351 SoC";
compatible = "brcm,bcm11351";
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/bcm21664-garnet.dts b/arch/arm/boot/dts/bcm21664-garnet.dts
index e87cb26ddf84..8b045cfab64b 100644
--- a/arch/arm/boot/dts/bcm21664-garnet.dts
+++ b/arch/arm/boot/dts/bcm21664-garnet.dts
@@ -22,6 +22,7 @@
compatible = "brcm,bcm21664-garnet", "brcm,bcm21664";
memory {
+ device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
diff --git a/arch/arm/boot/dts/bcm21664.dtsi b/arch/arm/boot/dts/bcm21664.dtsi
index 266f2611dc22..758daa334148 100644
--- a/arch/arm/boot/dts/bcm21664.dtsi
+++ b/arch/arm/boot/dts/bcm21664.dtsi
@@ -16,9 +16,9 @@
#include "dt-bindings/clock/bcm21664.h"
-#include "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "BCM21664 SoC";
compatible = "brcm,bcm21664";
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/bcm23550-sparrow.dts b/arch/arm/boot/dts/bcm23550-sparrow.dts
index 4d525ccb48c8..1c66b15f3013 100644
--- a/arch/arm/boot/dts/bcm23550-sparrow.dts
+++ b/arch/arm/boot/dts/bcm23550-sparrow.dts
@@ -46,6 +46,7 @@
};
memory {
+ device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
};
diff --git a/arch/arm/boot/dts/bcm23550.dtsi b/arch/arm/boot/dts/bcm23550.dtsi
index a7a643f38385..701198f5f498 100644
--- a/arch/arm/boot/dts/bcm23550.dtsi
+++ b/arch/arm/boot/dts/bcm23550.dtsi
@@ -36,9 +36,9 @@
/* BCM23550 and BCM21664 have almost identical clocks */
#include "dt-bindings/clock/bcm21664.h"
-#include "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "BCM23550 SoC";
compatible = "brcm,bcm23550";
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/bcm28155-ap.dts b/arch/arm/boot/dts/bcm28155-ap.dts
index 9ce91dd60cb6..fbfca83bd28f 100644
--- a/arch/arm/boot/dts/bcm28155-ap.dts
+++ b/arch/arm/boot/dts/bcm28155-ap.dts
@@ -22,6 +22,7 @@
compatible = "brcm,bcm28155-ap", "brcm,bcm11351";
memory {
+ device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1 GB */
};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
index 2cd9c5e4f892..db8a6017f220 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
@@ -31,8 +31,8 @@
* "FOO" = GPIO line named "FOO" on the schematic
* "FOO_N" = GPIO line named "FOO" on schematic, active low
*/
- gpio-line-names = "SDA0",
- "SCL0",
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
"SDA1",
"SCL1",
"GPIO_GCLK",
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
index cfbdaacbaeba..1e40d672b055 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
@@ -33,8 +33,8 @@
* "FOO" = GPIO line named "FOO" on the schematic
* "FOO_N" = GPIO line named "FOO" on schematic, active low
*/
- gpio-line-names = "SDA0",
- "SCL0",
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
"SDA1",
"SCL1",
"GPIO_GCLK",
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
index 644d907bafbb..ba0167df6c5f 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
@@ -25,8 +25,6 @@
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
- pinctrl-0 = <&wl_on>;
reset-gpios = <&gpio 41 GPIO_ACTIVE_LOW>;
};
};
@@ -40,8 +38,8 @@
* "FOO" = GPIO line named "FOO" on the schematic
* "FOO_N" = GPIO line named "FOO" on schematic, active low
*/
- gpio-line-names = "GPIO0",
- "GPIO1",
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
"SDA1",
"SCL1",
"GPIO_GCLK",
@@ -98,11 +96,6 @@
"SD_DATA3_R";
pinctrl-0 = <&gpioout &alt0>;
-
- wl_on: wl-on {
- brcm,pins = <41>;
- brcm,function = <BCM2835_FSEL_GPIO_OUT>;
- };
};
&hdmi {
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero.dts b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
index 00323ba8f7de..3b35a8a4a55f 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
@@ -28,8 +28,8 @@
* "FOO" = GPIO line named "FOO" on the schematic
* "FOO_N" = GPIO line named "FOO" on schematic, active low
*/
- gpio-line-names = "SDA0",
- "SCL0",
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
"SDA1",
"SCL1",
"GPIO_GCLK",
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index 29f970f864dc..715d50c64529 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -1,7 +1,7 @@
#include <dt-bindings/power/raspberrypi-power.h>
/ {
- memory {
+ memory@0 {
device_type = "memory";
reg = <0 0x10000000>;
};
@@ -19,8 +19,6 @@
soc {
firmware: firmware {
compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
- #address-cells = <0>;
- #size-cells = <0>;
mboxes = <&mailbox>;
};
@@ -87,10 +85,6 @@
power-domains = <&power RPI_POWER_DOMAIN_USB>;
};
-&v3d {
- power-domains = <&power RPI_POWER_DOMAIN_V3D>;
-};
-
&hdmi {
power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
status = "okay";
diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
index ac4408b34b58..7b4e651bafdd 100644
--- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
+++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
@@ -9,7 +9,7 @@
compatible = "raspberrypi,2-model-b", "brcm,bcm2836";
model = "Raspberry Pi 2 Model B";
- memory {
+ memory@0 {
reg = <0 0x40000000>;
};
@@ -28,6 +28,72 @@
};
&gpio {
+ /*
+ * Taken from rpi_SCH_2b_1p2_reduced.pdf and
+ * the official GPU firmware DT blob.
+ *
+ * Legend:
+ * "NC" = not connected (no rail from the SoC)
+ * "FOO" = GPIO line named "FOO" on the schematic
+ * "FOO_N" = GPIO line named "FOO" on schematic, active low
+ */
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
+ "SDA1",
+ "SCL1",
+ "GPIO_GCLK",
+ "GPIO5",
+ "GPIO6",
+ "SPI_CE1_N",
+ "SPI_CE0_N",
+ "SPI_MISO",
+ "SPI_MOSI",
+ "SPI_SCLK",
+ "GPIO12",
+ "GPIO13",
+ /* Serial port */
+ "TXD0",
+ "RXD0",
+ "GPIO16",
+ "GPIO17",
+ "GPIO18",
+ "GPIO19",
+ "GPIO20",
+ "GPIO21",
+ "GPIO22",
+ "GPIO23",
+ "GPIO24",
+ "GPIO25",
+ "GPIO26",
+ "GPIO27",
+ "SDA0",
+ "SCL0",
+ "", /* GPIO30 */
+ "LAN_RUN",
+ "CAM_GPIO1",
+ "", /* GPIO33 */
+ "", /* GPIO34 */
+ "PWR_LOW_N",
+ "", /* GPIO36 */
+ "", /* GPIO37 */
+ "USB_LIMIT",
+ "", /* GPIO39 */
+ "PWM0_OUT",
+ "CAM_GPIO0",
+ "SMPS_SCL",
+ "SMPS_SDA",
+ "ETHCLK",
+ "PWM1_OUT",
+ "HDMI_HPD_N",
+ "STATUS_LED",
+ /* Used by SD Card */
+ "SD_CLK_R",
+ "SD_CMD_R",
+ "SD_DATA0_R",
+ "SD_DATA1_R",
+ "SD_DATA2_R",
+ "SD_DATA3_R";
+
pinctrl-0 = <&gpioout &alt0 &i2s_alt0>;
/* I2S interface */
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
new file mode 100644
index 000000000000..7f4437a8eedb
--- /dev/null
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+#include "bcm2837.dtsi"
+#include "bcm2836-rpi.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
+
+/ {
+ compatible = "raspberrypi,3-model-a-plus", "brcm,bcm2837";
+ model = "Raspberry Pi 3 Model A+";
+
+ chosen {
+ /* 8250 auxiliary UART instead of pl011 */
+ stdout-path = "serial1:115200n8";
+ };
+
+ memory@0 {
+ reg = <0 0x20000000>;
+ };
+
+ leds {
+ act {
+ gpios = <&gpio 29 GPIO_ACTIVE_HIGH>;
+ };
+
+ pwr {
+ label = "PWR";
+ gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&firmware {
+ expgpio: gpio {
+ compatible = "raspberrypi,firmware-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "",
+ "BT_WL_ON",
+ "STATUS_LED_R",
+ "",
+ "",
+ "CAM_GPIO0",
+ "CAM_GPIO1",
+ "";
+ status = "okay";
+ };
+};
+
+&gpio {
+ /*
+ * This is mostly based on the official GPU firmware DT blob.
+ *
+ * Legend:
+ * "NC" = not connected (no rail from the SoC)
+ * "FOO" = GPIO line named "FOO" on the schematic
+ * "FOO_N" = GPIO line named "FOO" on schematic, active low
+ */
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
+ "SDA1",
+ "SCL1",
+ "GPIO_GCLK",
+ "GPIO5",
+ "GPIO6",
+ "SPI_CE1_N",
+ "SPI_CE0_N",
+ "SPI_MISO",
+ "SPI_MOSI",
+ "SPI_SCLK",
+ "GPIO12",
+ "GPIO13",
+ /* Serial port */
+ "TXD1",
+ "RXD1",
+ "GPIO16",
+ "GPIO17",
+ "GPIO18",
+ "GPIO19",
+ "GPIO20",
+ "GPIO21",
+ "GPIO22",
+ "GPIO23",
+ "GPIO24",
+ "GPIO25",
+ "GPIO26",
+ "GPIO27",
+ "HDMI_HPD_N",
+ "STATUS_LED_G",
+ /* Used by BT module */
+ "CTS0",
+ "RTS0",
+ "TXD0",
+ "RXD0",
+ /* Used by Wifi */
+ "SD1_CLK",
+ "SD1_CMD",
+ "SD1_DATA0",
+ "SD1_DATA1",
+ "SD1_DATA2",
+ "SD1_DATA3",
+ "PWM0_OUT",
+ "PWM1_OUT",
+ "", /* GPIO42 */
+ "WIFI_CLK",
+ "SDA0",
+ "SCL0",
+ "SMPS_SCL",
+ "SMPS_SDA",
+ /* Used by SD Card */
+ "SD_CLK_R",
+ "SD_CMD_R",
+ "SD_DATA0_R",
+ "SD_DATA1_R",
+ "SD_DATA2_R",
+ "SD_DATA3_R";
+};
+
+&hdmi {
+ hpd-gpios = <&gpio 28 GPIO_ACTIVE_LOW>;
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_gpio40 &pwm1_gpio41>;
+ status = "okay";
+};
+
+/*
+ * SDHCI is used to control the SDIO for wireless
+ *
+ * WL_REG_ON and BT_REG_ON of the CYW43455 Wifi/BT module are driven
+ * by a single GPIO. We can't give GPIO control to one of the drivers,
+ * otherwise the other part would get unexpectedly disturbed.
+ */
+&sdhci {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_gpio34>;
+ status = "okay";
+ bus-width = <4>;
+ non-removable;
+
+ brcmf: wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+/* SDHOST is used to drive the SD card */
+&sdhost {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhost_gpio48>;
+ status = "okay";
+ bus-width = <4>;
+};
+
+/* uart0 communicates with the BT module */
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_ctsrts_gpio30 &uart0_gpio32 &gpclk2_gpio43>;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ max-speed = <2000000>;
+ };
+};
+
+/* uart1 is mapped to the pin header */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_gpio14>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
index 42bb09044cc7..c6fa34c24100 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
@@ -14,7 +14,7 @@
stdout-path = "serial1:115200n8";
};
- memory {
+ memory@0 {
reg = <0 0x40000000>;
};
@@ -42,7 +42,7 @@
#gpio-cells = <2>;
gpio-line-names = "BT_ON",
"WL_ON",
- "STATUS_LED",
+ "STATUS_LED_R",
"LAN_RUN",
"",
"CAM_GPIO0",
@@ -52,6 +52,76 @@
};
};
+&gpio {
+ /*
+ * Taken from rpi_SCH_3bplus_1p0_reduced.pdf and
+ * the official GPU firmware DT blob.
+ *
+ * Legend:
+ * "NC" = not connected (no rail from the SoC)
+ * "FOO" = GPIO line named "FOO" on the schematic
+ * "FOO_N" = GPIO line named "FOO" on schematic, active low
+ */
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
+ "SDA1",
+ "SCL1",
+ "GPIO_GCLK",
+ "GPIO5",
+ "GPIO6",
+ "SPI_CE1_N",
+ "SPI_CE0_N",
+ "SPI_MISO",
+ "SPI_MOSI",
+ "SPI_SCLK",
+ "GPIO12",
+ "GPIO13",
+ /* Serial port */
+ "TXD1",
+ "RXD1",
+ "GPIO16",
+ "GPIO17",
+ "GPIO18",
+ "GPIO19",
+ "GPIO20",
+ "GPIO21",
+ "GPIO22",
+ "GPIO23",
+ "GPIO24",
+ "GPIO25",
+ "GPIO26",
+ "GPIO27",
+ "HDMI_HPD_N",
+ "STATUS_LED_G",
+ /* Used by BT module */
+ "CTS0",
+ "RTS0",
+ "TXD0",
+ "RXD0",
+ /* Used by Wifi */
+ "SD1_CLK",
+ "SD1_CMD",
+ "SD1_DATA0",
+ "SD1_DATA1",
+ "SD1_DATA2",
+ "SD1_DATA3",
+ "PWM0_OUT",
+ "PWM1_OUT",
+ "ETHCLK",
+ "WIFI_CLK",
+ "SDA0",
+ "SCL0",
+ "SMPS_SCL",
+ "SMPS_SDA",
+ /* Used by SD Card */
+ "SD_CLK_R",
+ "SD_CMD_R",
+ "SD_DATA0_R",
+ "SD_DATA1_R",
+ "SD_DATA2_R",
+ "SD_DATA3_R";
+};
+
&hdmi {
hpd-gpios = <&gpio 28 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index 0c155dd4f396..ce71f578c51a 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -14,7 +14,7 @@
stdout-path = "serial1:115200n8";
};
- memory {
+ memory@0 {
reg = <0 0x40000000>;
};
@@ -39,7 +39,7 @@
"WL_ON",
"STATUS_LED",
"LAN_RUN",
- "HPD_N",
+ "HDMI_HPD_N",
"CAM_GPIO0",
"CAM_GPIO1",
"PWR_LOW_N";
@@ -47,6 +47,76 @@
};
};
+&gpio {
+ /*
+ * Taken from rpi_SCH_3b_1p2_reduced.pdf and
+ * the official GPU firmware DT blob.
+ *
+ * Legend:
+ * "NC" = not connected (no rail from the SoC)
+ * "FOO" = GPIO line named "FOO" on the schematic
+ * "FOO_N" = GPIO line named "FOO" on schematic, active low
+ */
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
+ "SDA1",
+ "SCL1",
+ "GPIO_GCLK",
+ "GPIO5",
+ "GPIO6",
+ "SPI_CE1_N",
+ "SPI_CE0_N",
+ "SPI_MISO",
+ "SPI_MOSI",
+ "SPI_SCLK",
+ "GPIO12",
+ "GPIO13",
+ /* Serial port */
+ "TXD1",
+ "RXD1",
+ "GPIO16",
+ "GPIO17",
+ "GPIO18",
+ "GPIO19",
+ "GPIO20",
+ "GPIO21",
+ "GPIO22",
+ "GPIO23",
+ "GPIO24",
+ "GPIO25",
+ "GPIO26",
+ "GPIO27",
+ "", /* GPIO 28 */
+ "LAN_RUN_BOOT",
+ /* Used by BT module */
+ "CTS0",
+ "RTS0",
+ "TXD0",
+ "RXD0",
+ /* Used by Wifi */
+ "SD1_CLK",
+ "SD1_CMD",
+ "SD1_DATA0",
+ "SD1_DATA1",
+ "SD1_DATA2",
+ "SD1_DATA3",
+ "PWM0_OUT",
+ "PWM1_OUT",
+ "ETHCLK",
+ "WIFI_CLK",
+ "SDA0",
+ "SCL0",
+ "SMPS_SCL",
+ "SMPS_SDA",
+ /* Used by SD Card */
+ "SD_CLK_R",
+ "SD_CMD_R",
+ "SD_DATA0_R",
+ "SD_DATA1_R",
+ "SD_DATA2_R",
+ "SD_DATA3_R";
+};
+
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_gpio40 &pwm1_gpio41>;
diff --git a/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi b/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
index 4a89a1885a3d..81399b2c5af9 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
+++ b/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
@@ -4,7 +4,7 @@
#include "bcm2836-rpi.dtsi"
/ {
- memory {
+ memory@0 {
reg = <0 0x40000000>;
};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 31b29646b14c..9777644c6c2b 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -3,6 +3,7 @@
#include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/soc/bcm2835-pm.h>
/* firmware-provided startup stubs live here, where the secondary CPUs are
* spinning.
@@ -120,9 +121,18 @@
#interrupt-cells = <2>;
};
- watchdog@7e100000 {
- compatible = "brcm,bcm2835-pm-wdt";
- reg = <0x7e100000 0x28>;
+ pm: watchdog@7e100000 {
+ compatible = "brcm,bcm2835-pm", "brcm,bcm2835-pm-wdt";
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+ reg = <0x7e100000 0x114>,
+ <0x7e00a000 0x24>;
+ clocks = <&clocks BCM2835_CLOCK_V3D>,
+ <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+ <&clocks BCM2835_CLOCK_H264>,
+ <&clocks BCM2835_CLOCK_ISP>;
+ clock-names = "v3d", "peri_image", "h264", "isp";
+ system-power-controller;
};
clocks: cprman@7e101000 {
@@ -629,6 +639,7 @@
compatible = "brcm,bcm2835-v3d";
reg = <0x7ec00000 0x1000>;
interrupts = <1 10>;
+ power-domains = <&pm BCM2835_POWER_DOMAIN_GRAFX_V3D>;
};
vc4: gpu {
diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
index 76a2bab3bc6f..fe842f2f1ca7 100644
--- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
+++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
@@ -20,6 +20,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
index 69e3570e03dd..6fcbb0509ba0 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
@@ -20,6 +20,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
index 0f6f0fe13bfb..b3e8cc90b13f 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
@@ -20,6 +20,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts b/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts
index f77089744996..fdeaa895512f 100644
--- a/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts
+++ b/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts
@@ -16,6 +16,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts
index 4d427863756f..0d510cb15ec3 100644
--- a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts
+++ b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts
@@ -17,6 +17,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts
index 189cc3dcd6ef..962e89edba11 100644
--- a/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts
+++ b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts
@@ -16,6 +16,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
index 03c1ab188576..658a56ff8a5c 100644
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
@@ -20,6 +20,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
index 36efe410dcd7..5fd47eec4407 100644
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -17,6 +17,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
index 3e5e9972cd97..6604be6ff0a0 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
@@ -17,6 +17,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x18000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts b/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts
index 7fd85475893d..567ebbd5a0e9 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts
@@ -16,6 +16,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
index 7acbecd42950..ac2d136ed334 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
@@ -17,6 +17,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x18000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
index f4558d9d2769..74371e821b1a 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
@@ -17,6 +17,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
index bdad7267255a..b44af63ee310 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
@@ -17,6 +17,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x18000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
index 30719380b6c0..eebc0d43e220 100644
--- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
+++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
@@ -17,6 +17,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x18000000>;
};
diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
new file mode 100644
index 000000000000..ec09c0426d16
--- /dev/null
+++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright (C) 2017 Hamster Tian <haotia@gmail.com>
+ * Copyright (C) 2019 Hao Dong <halbertdong@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "bcm47094.dtsi"
+#include "bcm5301x-nand-cs0-bch4.dtsi"
+
+/ {
+ compatible = "phicomm,k3", "brcm,bcm47094", "brcm,bcm4708";
+ model = "Phicomm K3";
+
+ memory {
+ reg = <0x00000000 0x08000000
+ 0x88000000 0x18000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb3_phy {
+ status = "okay";
+};
+
+&nandcs {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x0000000 0x0080000>;
+ read-only;
+ };
+
+ partition@80000 {
+ label = "nvram";
+ reg = <0x0080000 0x0100000>;
+ };
+
+ partition@180000{
+ label = "phicomm";
+ reg = <0x0180000 0x0280000>;
+ read-only;
+ };
+
+ partition@400000 {
+ label = "firmware";
+ reg = <0x0400000 0x7C00000>;
+ compatible = "brcm,trx";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
index 74c83b0ca54e..eb59508578e4 100644
--- a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
+++ b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright 2017 Luxul Inc.
- *
- * Licensed under the ISC license.
*/
/dts-v1/;
@@ -17,6 +16,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
index 214df18f3a75..4c71f5e95e00 100644
--- a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
+++ b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright 2017 Luxul Inc.
- *
- * Licensed under the ISC license.
*/
/dts-v1/;
@@ -17,6 +16,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
index e15e2a1e9d8c..5ad53ea52d0a 100644
--- a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
+++ b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
@@ -16,6 +16,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index fd7af943fb0b..ac5266ee8d4c 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -13,9 +13,10 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupt-parent = <&gic>;
chipcommonA {
diff --git a/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts b/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts
index 431cda514230..2e7fda9b998c 100644
--- a/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts
+++ b/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts
@@ -20,6 +20,7 @@
};
memory@0 {
+ device_type = "memory";
reg = <0x00000000 0x08000000>,
<0x68000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi
index 5054fa9eb0d0..b29695bd4855 100644
--- a/arch/arm/boot/dts/bcm53573.dtsi
+++ b/arch/arm/boot/dts/bcm53573.dtsi
@@ -7,9 +7,10 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupt-parent = <&gic>;
aliases {
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index f59764008b9c..e6a41e1b27fd 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -6,9 +6,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "brcm,bcm63138";
model = "Broadcom BCM63138 DSL SoC";
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi
index c859aa6f358c..504a63236a5e 100644
--- a/arch/arm/boot/dts/bcm7445.dtsi
+++ b/arch/arm/boot/dts/bcm7445.dtsi
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
-
/ {
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/bcm947189acdbmr.dts b/arch/arm/boot/dts/bcm947189acdbmr.dts
index ef263412fea5..4991700ae6b0 100644
--- a/arch/arm/boot/dts/bcm947189acdbmr.dts
+++ b/arch/arm/boot/dts/bcm947189acdbmr.dts
@@ -18,6 +18,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm953012er.dts b/arch/arm/boot/dts/bcm953012er.dts
index 17f63c7a0437..250a1d6f2d05 100644
--- a/arch/arm/boot/dts/bcm953012er.dts
+++ b/arch/arm/boot/dts/bcm953012er.dts
@@ -40,6 +40,7 @@
compatible = "brcm,bcm953012er", "brcm,brcm53012", "brcm,bcm4708";
memory {
+ device_type = "memory";
reg = <0x00000000 0x8000000>;
};
diff --git a/arch/arm/boot/dts/bcm953012hr.dts b/arch/arm/boot/dts/bcm953012hr.dts
index 11b0f5ed99e6..9140be7ec053 100644
--- a/arch/arm/boot/dts/bcm953012hr.dts
+++ b/arch/arm/boot/dts/bcm953012hr.dts
@@ -46,6 +46,7 @@
};
memory@80000000 {
+ device_type = "memory";
reg = <0x80000000 0x10000000>;
};
};
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index e798055d6989..52c4c6c9d3f1 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -44,6 +44,7 @@
};
memory {
+ device_type = "memory";
reg = <0x80000000 0x10000000>;
};
};
diff --git a/arch/arm/boot/dts/cx92755.dtsi b/arch/arm/boot/dts/cx92755.dtsi
index a5a23c376418..d2e8f36f8c60 100644
--- a/arch/arm/boot/dts/cx92755.dtsi
+++ b/arch/arm/boot/dts/cx92755.dtsi
@@ -44,9 +44,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "cnxt,cx92755";
interrupt-parent = <&intc>;
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index a3c9b346721d..f04bc3e15332 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -94,6 +94,28 @@
regulator-boot-on;
};
+ baseboard_3v3: fixedregulator-3v3 {
+ /* TPS73701DCQ */
+ compatible = "regulator-fixed";
+ regulator-name = "baseboard_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vbat>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ baseboard_1v8: fixedregulator-1v8 {
+ /* TPS73701DCQ */
+ compatible = "regulator-fixed";
+ regulator-name = "baseboard_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vbat>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
backlight_lcd: backlight-regulator {
compatible = "regulator-fixed";
regulator-name = "lcd_backlight_pwr";
@@ -105,7 +127,7 @@
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 EVM";
+ simple-audio-card,name = "DA850-OMAPL138 EVM";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
@@ -210,10 +232,9 @@
/* Regulators */
IOVDD-supply = <&vdcdc2_reg>;
- /* Derived from VBAT: Baseboard 3.3V / 1.8V */
- AVDD-supply = <&vbat>;
- DRVDD-supply = <&vbat>;
- DVDD-supply = <&vbat>;
+ AVDD-supply = <&baseboard_3v3>;
+ DRVDD-supply = <&baseboard_3v3>;
+ DVDD-supply = <&baseboard_1v8>;
};
tca6416: gpio@20 {
compatible = "ti,tca6416";
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index 0177e3ed20fe..26f453dc8370 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -39,17 +39,51 @@
};
};
+ vcc_5vd: fixedregulator-vcc_5vd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_5vd";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ };
+
+ vcc_3v3d: fixedregulator-vcc_3v3d {
+ /* TPS650250 - VDCDC1 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3d";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_5vd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_1v8d: fixedregulator-vcc_1v8d {
+ /* TPS650250 - VDCDC2 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8d";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_5vd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 LCDK";
+ simple-audio-card,name = "DA850-OMAPL138 LCDK";
simple-audio-card,widgets =
"Line", "Line In",
- "Line", "Line Out";
+ "Line", "Line Out",
+ "Microphone", "Mic Jack";
simple-audio-card,routing =
"LINE1L", "Line In",
"LINE1R", "Line In",
"Line Out", "LLOUT",
- "Line Out", "RLOUT";
+ "Line Out", "RLOUT",
+ "MIC3L", "Mic Jack",
+ "MIC3R", "Mic Jack",
+ "Mic Jack", "Mic Bias";
simple-audio-card,format = "dsp_b";
simple-audio-card,bitclock-master = <&link0_codec>;
simple-audio-card,frame-master = <&link0_codec>;
@@ -220,7 +254,15 @@
#sound-dai-cells = <0>;
compatible = "ti,tlv320aic3106";
reg = <0x18>;
+ adc-settle-ms = <40>;
+ ai3x-micbias-vg = <1>; /* 2.0V */
status = "okay";
+
+ /* Regulators */
+ IOVDD-supply = <&vcc_3v3d>;
+ AVDD-supply = <&vcc_3v3d>;
+ DRVDD-supply = <&vcc_3v3d>;
+ DVDD-supply = <&vcc_1v8d>;
};
};
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 47aa53ba6b92..559659b399d0 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -476,7 +476,7 @@
clocksource: timer@20000 {
compatible = "ti,da830-timer";
reg = <0x20000 0x1000>;
- interrupts = <12>, <13>;
+ interrupts = <21>, <22>;
interrupt-names = "tint12", "tint34";
clocks = <&pll0_auxclk>;
};
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 601c57afd4fe..95de9f214c14 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -222,6 +222,30 @@
#interrupt-cells = <2>;
};
+ gpio3: gpio@1ac000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio3";
+ ti,gpio-always-on;
+ reg = <0x1ac000 0x2000>;
+ interrupts = <32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@1ae000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio4";
+ ti,gpio-always-on;
+ reg = <0x1ae000 0x2000>;
+ interrupts = <62>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
i2c2: i2c@2a000 {
compatible = "ti,omap4-i2c";
#address-cells = <1>;
@@ -240,10 +264,48 @@
ti,spi-num-cs = <4>;
ti,hwmods = "mcspi1";
dmas = <&edma 16 0 &edma 17 0
- &edma 18 0 &edma 19 0>;
+ &edma 18 0 &edma 19 0
+ &edma 20 0 &edma 21 0
+ &edma 22 0 &edma 23 0>;
+
+ dma-names = "tx0", "rx0", "tx1", "rx1",
+ "tx2", "rx2", "tx3", "rx3";
+ };
+
+ mcspi2: spi@1a0000 {
+ compatible = "ti,omap4-mcspi";
+ reg = <0x1a0000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <125>;
+ ti,spi-num-cs = <4>;
+ ti,hwmods = "mcspi2";
+ dmas = <&edma 42 0 &edma 43 0
+ &edma 44 0 &edma 45 0>;
dma-names = "tx0", "rx0", "tx1", "rx1";
};
+ /* Board must configure dmas with edma_xbar for EDMA */
+ mcspi3: spi@1a2000 {
+ compatible = "ti,omap4-mcspi";
+ reg = <0x1a2000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <126>;
+ ti,spi-num-cs = <4>;
+ ti,hwmods = "mcspi3";
+ };
+
+ mcspi4: spi@1a4000 {
+ compatible = "ti,omap4-mcspi";
+ reg = <0x1a4000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <127>;
+ ti,spi-num-cs = <4>;
+ ti,hwmods = "mcspi4";
+ };
+
timer1: timer@2e000 {
compatible = "ti,dm814-timer";
reg = <0x2e000 0x2000>;
@@ -343,6 +405,12 @@
#size-cells = <1>;
ranges = <0 0 0x800>;
+ phy_gmii_sel: phy-gmii-sel {
+ compatible = "ti,dm814-phy-gmii-sel";
+ reg = <0x650 0x4>;
+ #phy-cells = <1>;
+ };
+
scm_clocks: clocks {
#address-cells = <1>;
#size-cells = <0>;
@@ -549,17 +617,14 @@
cpsw_emac0: slave@4a100200 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1>;
+
};
cpsw_emac1: slave@4a100300 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
- };
-
- phy_sel: cpsw-phy-sel@48140650 {
- compatible = "ti,am3352-cpsw-phy-sel";
- reg= <0x48140650 0x4>;
- reg-names = "gmii-sel";
+ phys = <&phy_gmii_sel 2>;
};
};
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 250ad0535e8c..2e8a3977219f 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/include/ "skeleton.dtsi"
-
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "marvell,dove";
model = "Marvell Armada 88AP510 SoC";
interrupt-parent = <&intc>;
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index bb45cb7fc3b6..414f1cd68733 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -77,18 +77,18 @@
};
};
+ phy_gmii_sel: phy-gmii-sel {
+ compatible = "ti,dra7xx-phy-gmii-sel";
+ reg = <0x554 0x4>;
+ #phy-cells = <1>;
+ };
+
scm_conf_clocks: clocks {
#address-cells = <1>;
#size-cells = <0>;
};
};
- phy_sel: cpsw-phy-sel@554 {
- compatible = "ti,dra7xx-cpsw-phy-sel";
- reg= <0x554 0x4>;
- reg-names = "gmii-sel";
- };
-
dra7_pmx_core: pinmux@1400 {
compatible = "ti,dra7-padconf",
"pinctrl-single";
@@ -3099,7 +3099,6 @@
<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
ranges = <0 0 0x4000>;
syscon = <&scm_conf>;
- cpsw-phy-sel = <&phy_sel>;
status = "disabled";
davinci_mdio: mdio@1000 {
@@ -3114,11 +3113,13 @@
cpsw_emac0: slave@200 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1>;
};
cpsw_emac1: slave@300 {
/* Filled in by U-Boot */
mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2>;
};
};
};
diff --git a/arch/arm/boot/dts/ep7209.dtsi b/arch/arm/boot/dts/ep7209.dtsi
index aaf1261d2ee4..0e74222a5eae 100644
--- a/arch/arm/boot/dts/ep7209.dtsi
+++ b/arch/arm/boot/dts/ep7209.dtsi
@@ -6,11 +6,11 @@
/dts-v1/;
-#include "skeleton.dtsi"
-
#include <dt-bindings/clock/clps711x-clock.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Cirrus Logic EP7209";
compatible = "cirrus,ep7209";
diff --git a/arch/arm/boot/dts/ep7211-edb7211.dts b/arch/arm/boot/dts/ep7211-edb7211.dts
index bc9d5b697452..3475c7777cbc 100644
--- a/arch/arm/boot/dts/ep7211-edb7211.dts
+++ b/arch/arm/boot/dts/ep7211-edb7211.dts
@@ -12,6 +12,7 @@
compatible = "cirrus,edb7211", "cirrus,ep7211", "cirrus,ep7209";
memory {
+ device_type = "memory";
reg = <0xc0000000 0x02000000>;
};
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 608d17454179..5892a9f7622f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -168,6 +168,9 @@
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
+ clock-names = "clkout8";
+ clocks = <&cmu CLK_FIN_PLL>;
+ #clock-cells = <1>;
};
mipi_phy: video-phy {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 3a9eb1e91c45..08d3a0a7b4eb 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -49,7 +49,7 @@
};
emmc_pwrseq: pwrseq {
- pinctrl-0 = <&sd1_cd>;
+ pinctrl-0 = <&emmc_rstn>;
pinctrl-names = "default";
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
@@ -165,12 +165,6 @@
cpu0-supply = <&buck2_reg>;
};
-/* RSTN signal for eMMC */
-&sd1_cd {
- samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
- samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-};
-
&pinctrl_1 {
gpio_power_key: power_key {
samsung,pins = "gpx1-3";
@@ -188,6 +182,11 @@
samsung,pins = "gpx3-7";
samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
};
+
+ emmc_rstn: emmc-rstn {
+ samsung,pins = "gpk1-2";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
};
&ehci {
@@ -390,7 +389,6 @@
regulator-name = "LDO20_1.8V";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-boot-on;
};
ldo21_reg: LDO21 {
diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts
index 348556fcdd9d..a2251581f6b6 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx.dts
@@ -53,7 +53,7 @@
regulator-name = "p3v3_en";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpio = <&gpa1 1 GPIO_ACTIVE_LOW>;
+ gpio = <&gpa1 1 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 2ca9319f48f2..dc6fa6fe83f1 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -23,7 +23,7 @@
};
chosen {
- bootargs = "console=ttySAC2,115200";
+ stdout-path = "serial2:115200n8";
};
gpio_keys {
@@ -100,7 +100,7 @@
regulator-name = "VDD_33ON_2.8V";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
- gpio = <&gpx1 1 GPIO_ACTIVE_LOW>;
+ gpio = <&gpx1 1 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index bf09eab90f8a..25d95de15c9b 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -36,6 +36,11 @@
};
};
+&adc {
+ vdd-supply = <&ldo4_reg>;
+ status = "okay";
+};
+
&bus_wcore {
devfreq-events = <&nocp_mem0_0>, <&nocp_mem0_1>,
<&nocp_mem1_0>, <&nocp_mem1_1>;
@@ -468,7 +473,7 @@
buck8_reg: BUCK8 {
regulator-name = "vdd_1.8v_ldo";
regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1500000>;
+ regulator-max-microvolt = <2000000>;
regulator-always-on;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
index e84544b220b9..51a843bd65ed 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
@@ -24,7 +24,9 @@
"Headphone Jack", "MICBIAS",
"IN1", "Headphone Jack",
"Speakers", "SPKL",
- "Speakers", "SPKR";
+ "Speakers", "SPKR",
+ "I2S Playback", "Mixer DAI TX",
+ "HiFi Playback", "Mixer DAI TX";
assigned-clocks = <&clock CLK_MOUT_EPLL>,
<&clock CLK_MOUT_MAU_EPLL>,
@@ -51,7 +53,7 @@
<196608000>;
cpu {
- sound-dai = <&i2s0 0>;
+ sound-dai = <&i2s0 0>, <&i2s0 1>;
};
codec {
sound-dai = <&hdmi>, <&max98090>;
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index b299e541cac0..5f195ad7e467 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -362,11 +362,6 @@
};
};
-&adc {
- vdd-supply = <&ldo4_reg>;
- status = "okay";
-};
-
&hdmi {
status = "okay";
ddc = <&i2c_2>;
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
index 0db935f2b836..c19b5a51ca44 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
@@ -18,6 +18,14 @@
compatible = "hardkernel,odroid-xu3-lite", "samsung,exynos5800", "samsung,exynos5";
};
+&arm_a7_pmu {
+ status = "disabled";
+};
+
+&arm_a15_pmu {
+ status = "disabled";
+};
+
&pwm {
/*
* PWM 0 -- fan
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
index 122174ea9e0a..892d389d6d09 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
@@ -33,6 +33,8 @@
compatible = "samsung,odroid-xu3-audio";
model = "Odroid-XU4";
+ samsung,audio-routing = "I2S Playback", "Mixer DAI TX";
+
assigned-clocks = <&clock CLK_MOUT_EPLL>,
<&clock CLK_MOUT_MAU_EPLL>,
<&clock CLK_MOUT_USER_MAU_EPLL>,
@@ -58,7 +60,7 @@
<196608000>;
cpu {
- sound-dai = <&i2s0 0>;
+ sound-dai = <&i2s0 0>, <&i2s0 1>;
};
codec {
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index cc0c3cf89eaa..592111c8d6fd 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -443,7 +443,7 @@
};
display-controller@6a000000 {
- status = "disabled";
+ status = "okay";
port@0 {
reg = <0>;
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 26ff5d419bfc..3652f5556b29 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -40,7 +40,7 @@
spi2 = &cspi3;
};
- aitc: aitc-interrupt-controller@e0000000 {
+ aitc: aitc-interrupt-controller@10040000 {
compatible = "fsl,imx27-aitc", "fsl,avic";
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts b/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts
index 2967a748d859..a2eea58510dc 100644
--- a/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts
+++ b/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts
@@ -21,12 +21,28 @@
};
};
+&esdhc1 {
+ status = "okay";
+};
+
&owire {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_owire>;
status = "okay";
};
+&pmic {
+ fsl,mc13xxx-uses-rtc;
+
+ regulators {
+ vcoincell_reg: vcoincell {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ };
+ };
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
diff --git a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
index 82d8df097ef1..d90ba5fe4f1b 100644
--- a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
+++ b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
@@ -37,7 +37,6 @@
reg = <0>;
interrupt-parent = <&gpio1>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
- fsl,mc13xxx-uses-rtc;
regulators {
sw1_reg: sw1 {
@@ -142,16 +141,17 @@
pwgt2spi_reg: pwgt2spi {
regulator-always-on;
};
-
- vcoincell_reg: vcoincell {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-always-on;
- };
};
};
};
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1>;
+ max-frequency = <50000000>;
+ bus-width = <1>;
+};
+
&esdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc2>;
@@ -174,9 +174,12 @@
};
&i2c2 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
clock-frequency = <400000>;
+ scl-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
status = "okay";
mma7455l@1d {
@@ -241,6 +244,14 @@
>;
};
+ pinctrl_esdhc1: esdhc1grp {
+ fsl,pins = <
+ MX51_PAD_SD1_CLK__SD1_CLK 0x400021d5
+ MX51_PAD_SD1_CMD__SD1_CMD 0x400020d5
+ MX51_PAD_SD1_DATA0__SD1_DATA0 0x400020d5
+ >;
+ };
+
pinctrl_esdhc2: esdhc2grp {
fsl,pins = <
MX51_PAD_SD2_CMD__SD2_CMD 0x400020d5
@@ -282,6 +293,13 @@
>;
};
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX51_PAD_GPIO1_2__GPIO1_2 0x400001ed
+ MX51_PAD_GPIO1_3__GPIO1_3 0x400001ed
+ >;
+ };
+
pinctrl_nfc: nfcgrp {
fsl,pins = <
MX51_PAD_NANDF_D0__NANDF_D0 0x80000000
diff --git a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
new file mode 100644
index 000000000000..fb01fa6e4224
--- /dev/null
+++ b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Logic PD, Inc.
+
+/ {
+ keyboard {
+ compatible = "gpio-keys";
+
+ btn0 {
+ gpios = <&pcf8575 0 GPIO_ACTIVE_LOW>;
+ label = "btn0";
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+
+ btn1 {
+ gpios = <&pcf8575 1 GPIO_ACTIVE_LOW>;
+ label = "btn1";
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+
+ btn2 {
+ gpios = <&pcf8575 2 GPIO_ACTIVE_LOW>;
+ label = "btn2";
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+
+ btn3 {
+ gpios = <&pcf8575 3 GPIO_ACTIVE_LOW>;
+ label = "btn3";
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ gen-led0 {
+ label = "led0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led0>;
+ gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu0";
+ };
+
+ gen-led1 {
+ label = "led1";
+ gpios = <&pcf8575 8 GPIO_ACTIVE_HIGH>;
+ };
+
+ gen-led2 {
+ label = "led2";
+ gpios = <&pcf8575 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ gen-led3 {
+ label = "led3";
+ gpios = <&pcf8575 10 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+ };
+
+ reg_usb_otg_vbus: regulator-otg-vbus {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb_otg>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 15 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_h1_vbus: regulator-usb-h1-vbus {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb_h1_vbus>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_3v3: regulator-3v3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_3v3>;
+ compatible = "regulator-fixed";
+ regulator-name = "reg_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_enet: regulator-ethernet {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_enet>;
+ compatible = "regulator-fixed";
+ regulator-name = "ethernet-supply";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 31 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <70000>;
+ enable-active-high;
+ vin-supply = <&sw4_reg>;
+ };
+
+ reg_audio: regulator-audio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_audio>;
+ compatible = "regulator-fixed";
+ regulator-name = "3v3_aud";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_3v3>;
+ };
+
+ reg_hdmi: regulator-hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_hdmi>;
+ compatible = "regulator-fixed";
+ regulator-name = "hdmi-supply";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_3v3>;
+ };
+
+ reg_uart3: regulator-uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_uart3>;
+ compatible = "regulator-fixed";
+ regulator-name = "uart3-supply";
+ gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_3v3>;
+ };
+
+ reg_1v8: regulator-1v8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_1v8>;
+ compatible = "regulator-fixed";
+ regulator-name = "1v8-supply";
+ gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_3v3>;
+ };
+
+ reg_pcie: regulator-pcie {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_pcie>;
+ regulator-name = "mpcie_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_mipi: regulator-mipi {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_mipi>;
+ regulator-name = "mipi_pwr_en";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-wm8962";
+ model = "wm8962-audio";
+ ssi-controller = <&ssi2>;
+ audio-codec = <&wm8962>;
+ audio-routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "Ext Spk", "SPKOUTL",
+ "Ext Spk", "SPKOUTR",
+ "AMIC", "MICBIAS",
+ "IN3R", "AMIC";
+ mux-int-port = <2>;
+ mux-ext-port = <4>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "disabled";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii";
+ phy-reset-duration = <10>;
+ phy-reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
+ phy-supply = <&reg_enet>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ wm8962: audio-codec@1a {
+ compatible = "wlf,wm8962";
+ reg = <0x1a>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ clock-names = "xclk";
+ DCVDD-supply = <&reg_audio>;
+ DBVDD-supply = <&reg_audio>;
+ AVDD-supply = <&reg_audio>;
+ CPVDD-supply = <&reg_audio>;
+ MICVDD-supply = <&reg_audio>;
+ PLLVDD-supply = <&reg_audio>;
+ SPKVDD1-supply = <&reg_audio>;
+ SPKVDD2-supply = <&reg_audio>;
+ gpio-cfg = <
+ 0x0000 /* 0:Default */
+ 0x0000 /* 1:Default */
+ 0x0013 /* 2:FN_DMICCLK */
+ 0x0000 /* 3:Default */
+ 0x8014 /* 4:FN_DMICCDAT */
+ 0x0000 /* 5:Default */
+ >;
+ };
+};
+
+&i2c3 {
+ ov5640: camera@10 {
+ compatible = "ovti,ov5640";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ov5640>;
+ reg = <0x10>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ clock-names = "xclk";
+ DOVDD-supply = <&reg_mipi>;
+ AVDD-supply = <&reg_mipi>;
+ DVDD-supply = <&reg_mipi>;
+ reset-gpios = <&gpio3 26 GPIO_ACTIVE_LOW>;
+ powerdown-gpios = <&gpio3 27 GPIO_ACTIVE_HIGH>;
+
+ port {
+ ov5640_to_mipi_csi2: endpoint {
+ remote-endpoint = <&mipi_csi2_in>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+
+ pcf8575: gpio@20 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcf8574>;
+ compatible = "nxp,pcf8575";
+ reg = <0x20>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ lines-initial-states = <0x0710>;
+ wakeup-source;
+ };
+};
+
+&ipu1_csi1_from_mipi_vc1 {
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+};
+
+&mipi_csi {
+ status = "okay";
+
+ port@0 {
+ reg = <0>;
+
+ mipi_csi2_in: endpoint {
+ remote-endpoint = <&ov5640_to_mipi_csi2>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_pcie>;
+ status = "okay";
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+};
+
+&ssi2 {
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ vmmc-supply = <&reg_3v3>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT20__AUD4_TXC 0x130b0
+ MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x110b0
+ MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0
+ MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1
+ MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_KEY_ROW1__ECSPI1_SS0 0x100b1
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x13030
+ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0 /* ENET_INT */
+ MX6QDL_PAD_ENET_RX_ER__GPIO1_IO24 0x1b0b0 /* ETHR_nRST */
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_led0: led0grp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x1b0b0
+ >;
+ };
+
+ pinctrl_ov5640: ov5640grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D26__GPIO3_IO26 0x1b0b1
+ MX6QDL_PAD_EIM_D27__GPIO3_IO27 0x1b0b1
+ >;
+ };
+
+ pinctrl_pcf8574: pcf8575grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x1b0b0
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_reg_1v8: reg1v8grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_3v3: reg3v3grp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_audio: reg-audiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_enet: reg-enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_hdmi: reg-hdmigrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_mipi: reg-mipigrp {
+ fsl,pins = <MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x1b0b1>;
+ };
+
+ pinctrl_reg_pcie: reg-pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_uart3: reguart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_usb_h1_vbus: usbh1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b0
+ >;
+ };
+
+ pinctrl_reg_usb_otg: reg-usb-otggrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x1b0b0
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0xd17059
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17069
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10069
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17069
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17069
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17069
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17069
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: h100-usdhc2-100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: h100-usdhc2-200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
+ >;
+ };
+
+};
diff --git a/arch/arm/boot/dts/imx6-logicpd-som.dtsi b/arch/arm/boot/dts/imx6-logicpd-som.dtsi
new file mode 100644
index 000000000000..7ceae3573248
--- /dev/null
+++ b/arch/arm/boot/dts/imx6-logicpd-som.dtsi
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Logic PD, Inc.
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ memory@10000000 {
+ device_type = "memory";
+ reg = <0x10000000 0x80000000>;
+ };
+
+ reg_wl18xx_vmmc: regulator-wl18xx {
+ compatible = "regulator-fixed";
+ regulator-name = "vwl1837";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+ <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand>;
+ nand-on-flash-bbt;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ pfuze100: pmic@8 {
+ compatible = "fsl,pfuze100";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-name = "vddcore";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-name = "vddsoc";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "gen_3v3";
+ regulator-boot-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-name = "sw3a_vddr";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-name = "sw3b_vddr";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "gen_rgmii";
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ regulator-name = "gen_5v0";
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "gen_vsns";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "gen_1v5";
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-name = "vgen2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-name = "gen_vadj_0";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-name = "gen_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-name = "gen_vadj_1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-name = "gen_2v5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ coin_reg: coin {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ temperature-sensor@49 {
+ compatible = "ti,tmp102";
+ reg = <0x49>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ temperature-sensor@4a {
+ compatible = "ti,tmp102";
+ reg = <0x4a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tempsense>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ pagesize = <32>;
+ read-only; /* Manufacturing EEPROM programmed at factory */
+ reg = <0x51>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ pagesize = <32>;
+ reg = <0x52>;
+ };
+};
+
+/* Reroute power feeding the CPU to come from the external PMIC */
+&reg_arm
+{
+ vin-supply = <&sw1a_reg>;
+};
+
+&reg_soc
+{
+ vin-supply = <&sw1c_reg>;
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_gpmi_nand: gpmi-nandgrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0x0b0b1
+ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0x0b0b1
+ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0x0b0b1
+ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0x0b000
+ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0x0b0b1
+ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0x0b0b1
+ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0x0b0b1
+ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0x0b0b1
+ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0x0b0b1
+ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0x0b0b1
+ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0x0b0b1
+ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0x0b0b1
+ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0x0b0b1
+ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0x0b0b1
+ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0x0b0b1
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = < /* Enable ARM Debugger */
+ MX6QDL_PAD_CSI0_MCLK__ARM_TRACE_CTL 0x1b0b0
+ MX6QDL_PAD_CSI0_PIXCLK__ARM_EVENTO 0x1b0b0
+ MX6QDL_PAD_CSI0_VSYNC__ARM_TRACE00 0x1b0b0
+ MX6QDL_PAD_CSI0_DATA_EN__ARM_TRACE_CLK 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT4__ARM_TRACE01 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT5__ARM_TRACE02 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT6__ARM_TRACE03 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT7__ARM_TRACE04 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT8__ARM_TRACE05 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT9__ARM_TRACE06 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT10__ARM_TRACE07 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT11__ARM_TRACE08 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT12__ARM_TRACE09 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT13__ARM_TRACE10 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT14__ARM_TRACE11 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT15__ARM_TRACE12 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT16__ARM_TRACE13 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT17__ARM_TRACE14 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT18__ARM_TRACE15 0x1b0b0
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_tempsense: tempsensegrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x1b0b0
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x13059 /* BT_EN */
+ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT5__UART2_RTS_B 0x1b0b1
+ MX6QDL_PAD_SD4_DAT6__UART2_CTS_B 0x1b0b1
+ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170B9
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100B9
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170B9
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170B9
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170B9
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170B9
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17049
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10049
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17049
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17049
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17049
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17049
+ MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x130b0 /* WL_IRQ */
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x17059 /* WLAN_EN */
+ >;
+ };
+};
+
+&snvs_poweroff {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "ti,wl1837-st";
+ enable-gpios = <&gpio7 8 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ non-removable;
+ keep-power-in-suspend;
+ wakeup-source;
+ vmmc-supply = <&sw2_reg>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ non-removable;
+ cap-power-off-card;
+ keep-power-in-suspend;
+ wakeup-source;
+ vmmc-supply = <&reg_wl18xx_vmmc>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1837";
+ reg = <2>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ tcxo-clock-frequency = <26000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
index d5f7a1703aae..9a5d6c94cca4 100644
--- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
@@ -228,10 +228,11 @@
&weim {
status = "okay";
- /* weim memory map: 32MB on CS0, 32MB on CS1, 32MB on CS2 */
+ /* weim memory map: 32MB on CS0, CS1, CS2 and CS3 */
ranges = <0 0 0x08000000 0x02000000
1 0 0x0a000000 0x02000000
- 2 0 0x0c000000 0x02000000>;
+ 2 0 0x0c000000 0x02000000
+ 3 0 0x0e000000 0x02000000>;
/* SRAM on Colibri nEXT_CS0 */
sram@0,0 {
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
new file mode 100644
index 000000000000..b715ab0fa1ff
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2015-2018 Y Soft Corporation, a.s.
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm1 0 500000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 32 64 128 255>;
+ default-brightness-level = <32>;
+ num-interpolated-steps = <8>;
+ power-supply = <&sw2_reg>;
+ status = "disabled";
+ };
+
+ lcd_display: display {
+ compatible = "fsl,imx-parallel-display";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interface-pix-fmt = "rgb24";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1>;
+ status = "disabled";
+
+ port@0 {
+ reg = <0>;
+
+ lcd_display_in: endpoint {
+ remote-endpoint = <&ipu1_di0_disp0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ lcd_display_out: endpoint {
+ remote-endpoint = <&lcd_panel_in>;
+ };
+ };
+ };
+
+ panel: panel {
+ compatible = "dataimage,scf0700c48ggu18";
+ power-supply = <&sw2_reg>;
+ status = "disabled";
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
+
+ reg_pcie: regulator-pcie {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_reg>;
+ regulator-name = "MPCIE_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ status = "disabled";
+ };
+
+ reg_usb_h1_vbus: regulator-usb-h1-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh1_vbus>;
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ status = "disabled";
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_vbus>;
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ status = "okay";
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii-id";
+ phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <20>;
+ phy-supply = <&sw2_reg>;
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy_port2: phy@1 {
+ reg = <1>;
+ };
+
+ phy_port3: phy@2 {
+ reg = <2>;
+ };
+
+ switch@0 {
+ compatible = "qca,qca8334";
+ reg = <0>;
+
+ switch_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: port@0 {
+ reg = <0>;
+ label = "cpu";
+ phy-mode = "rgmii";
+ ethernet = <&fec>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "eth2";
+ phy-handle = <&phy_port2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "eth1";
+ phy-handle = <&phy_port3>;
+ };
+ };
+ };
+ };
+};
+
+&hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi_cec>;
+ ddc-i2c-bus = <&i2c2>;
+ status = "disabled";
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ pmic@8 {
+ compatible = "fsl,pfuze200";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
+ reg = <0x8>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vsnvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
+ leds: led-controller@30 {
+ compatible = "ti,lp5562";
+ reg = <0x30>;
+ clock-mode = /bits/ 8 <1>;
+ status = "disabled";
+
+ chan0 {
+ chan-name = "R";
+ led-cur = /bits/ 8 <0x20>;
+ max-cur = /bits/ 8 <0x60>;
+ };
+
+ chan1 {
+ chan-name = "G";
+ led-cur = /bits/ 8 <0x20>;
+ max-cur = /bits/ 8 <0x60>;
+ };
+
+ chan2 {
+ chan-name = "B";
+ led-cur = /bits/ 8 <0x20>;
+ max-cur = /bits/ 8 <0x60>;
+ };
+
+ chan3 {
+ chan-name = "W";
+ led-cur = /bits/ 8 <0x0>;
+ max-cur = /bits/ 8 <0x0>;
+ };
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c128";
+ reg = <0x57>;
+ pagesize = <64>;
+ status = "okay";
+ };
+
+ touchscreen: touchscreen@5c {
+ compatible = "pixcir,pixcir_tangoc";
+ reg = <0x5c>;
+ pinctrl-0 = <&pinctrl_touch>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+ attb-gpio = <&gpio4 5 GPIO_ACTIVE_HIGH>;
+ reset-gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ status = "disabled";
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "disabled";
+
+ oled: oled@3d {
+ compatible = "solomon,ssd1305fb-i2c";
+ reg = <0x3d>;
+ solomon,height = <64>;
+ solomon,width = <128>;
+ solomon,page-offset = <0>;
+ solomon,prechargep2 = <15>;
+ reset-gpios = <&gpio_oled 1 GPIO_ACTIVE_LOW>;
+ vbat-supply = <&sw2_reg>;
+ status = "disabled";
+ };
+
+ gpio_oled: gpio@41 {
+ compatible = "nxp,pca9536";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x41>;
+ vcc-supply = <&sw2_reg>;
+ status = "disabled";
+ };
+};
+
+&iomuxc {
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b020
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b020
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b020
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b020
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b020
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b020
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b020
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b020
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b020
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b020
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b020
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b020
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b020
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b020
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b010
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x1b010
+ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b098
+ >;
+ };
+
+ pinctrl_hdmi_cec: hdmicecgrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A25__HDMI_TX_CEC_LINE 0x1b898
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b899
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b899
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b899
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b899
+ >;
+ };
+
+ pinctrl_ipu1: ipu1grp {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
+ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
+ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
+ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
+ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
+ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
+ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
+ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
+ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
+ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
+ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
+ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
+ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
+ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
+ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
+ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
+ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
+ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
+ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
+ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
+ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
+ MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
+ MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
+ MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
+ MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
+ MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
+ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b098
+ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x1b098
+ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x1b098
+ >;
+ };
+
+ pinctrl_pcie_reg: pciereggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x1b098
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x1b098
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_9__PWM1_OUT 0x8
+ >;
+ };
+
+ pinctrl_touch: touchgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b098
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b098
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0a8
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0a8
+ >;
+ };
+
+ pinctrl_usbh1: usbh1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D30__USB_H1_OC 0x1b098
+ >;
+ };
+
+ pinctrl_usbh1_vbus: usbh1-vbus {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x98
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x1b098
+ MX6QDL_PAD_EIM_D21__USB_OTG_OC 0x1b098
+ >;
+ };
+
+ pinctrl_usbotg_vbus: usbotg-vbus {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x98
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x1b018
+ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x1b018
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc4: usdhc4grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x1f069
+ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10069
+ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17069
+ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17069
+ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17069
+ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17069
+ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17069
+ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17069
+ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17069
+ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17069
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__WDOG2_B 0x1b0b0
+ >;
+ };
+};
+
+&ipu1_di0_disp0 {
+ remote-endpoint = <&lcd_display_in>;
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio7 12 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_pcie>;
+ status = "disabled";
+};
+
+&pwm1 {
+ #pwm-cells = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "disabled";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&usbh1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh1>;
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "disabled";
+};
+
+&usbotg {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ vbus-supply = <&reg_usb_otg_vbus>;
+ srp-disable;
+ hnp-disable;
+ adp-disable;
+ status = "okay";
+};
+
+&usbphy1 {
+ fsl,tx-d-cal = <106>;
+ status = "okay";
+};
+
+&usbphy2 {
+ fsl,tx-d-cal = <109>;
+ status = "disabled";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <4>;
+ cd-gpios = <&gpio7 8 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 22 GPIO_ACTIVE_HIGH>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ wakeup-source;
+ vmmc-supply = <&sw2_reg>;
+ status = "disabled";
+};
+
+&usdhc4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4>;
+ bus-width = <8>;
+ non-removable;
+ no-1-8-v;
+ keep-power-in-suspend;
+ vmmc-supply = <&sw2_reg>;
+ status = "okay";
+};
+
+&wdog1 {
+ status = "disabled";
+};
+
+&wdog2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-draco.dts b/arch/arm/boot/dts/imx6dl-yapp4-draco.dts
new file mode 100644
index 000000000000..a38c407fd837
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-yapp4-draco.dts
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2015-2018 Y Soft Corporation, a.s.
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6dl-yapp4-common.dtsi"
+
+/ {
+ model = "Y Soft IOTA Draco i.MX6Solo board";
+ compatible = "ysoft,imx6dl-yapp4-draco", "fsl,imx6dl";
+
+ memory@10000000 {
+ device_type = "memory";
+ reg = <0x10000000 0x20000000>;
+ };
+};
+
+&backlight {
+ status = "okay";
+};
+
+&lcd_display {
+ status = "okay";
+};
+
+&leds {
+ status = "okay";
+};
+
+&panel {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&reg_usb_h1_vbus {
+ status = "okay";
+};
+
+&touchscreen {
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&usbphy2 {
+ status = "okay";
+};
+
+&usdhc3 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-hydra.dts b/arch/arm/boot/dts/imx6dl-yapp4-hydra.dts
new file mode 100644
index 000000000000..f97927064750
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-yapp4-hydra.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2015-2018 Y Soft Corporation, a.s.
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6dl-yapp4-common.dtsi"
+
+/ {
+ model = "Y Soft IOTA Hydra i.MX6DualLite board";
+ compatible = "ysoft,imx6dl-yapp4-hydra", "fsl,imx6dl";
+
+ memory@10000000 {
+ device_type = "memory";
+ reg = <0x10000000 0x80000000>;
+ };
+};
+
+&gpio_oled {
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&leds {
+ status = "okay";
+};
+
+&oled {
+ status = "okay";
+};
+
+&pcie {
+ status = "okay";
+};
+
+&reg_pcie {
+ status = "okay";
+};
+
+&usdhc3 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
new file mode 100644
index 000000000000..0d594e4bd559
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2015-2018 Y Soft Corporation, a.s.
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6dl-yapp4-common.dtsi"
+
+/ {
+ model = "Y Soft IOTA Ursa i.MX6Solo board";
+ compatible = "ysoft,imx6dl-yapp4-ursa", "fsl,imx6dl";
+
+ memory@10000000 {
+ device_type = "memory";
+ reg = <0x10000000 0x20000000>;
+ };
+};
+
+&backlight {
+ status = "okay";
+};
+
+&lcd_display {
+ status = "okay";
+};
+
+&panel {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&reg_usb_h1_vbus {
+ status = "okay";
+};
+
+&switch_ports {
+ /delete-node/ port@2;
+};
+
+&touchscreen {
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&usbphy2 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts
new file mode 100644
index 000000000000..45eb0b7f75f8
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-logicpd.dts
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Logic PD, Inc.
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6-logicpd-som.dtsi"
+#include "imx6-logicpd-baseboard.dtsi"
+
+/ {
+ model = "Logic PD i.MX6QD SOM-M3";
+ compatible = "fsl,imx6q";
+
+ backlight: backlight-lvds {
+ compatible = "pwm-backlight";
+ pwms = <&pwm3 0 20000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ power-supply = <&reg_lcd>;
+ };
+
+ panel-lvds0 {
+ compatible = "okaya,rs800480t-7x0gp";
+
+ port {
+ panel_in_lvds0: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
+
+ reg_lcd: regulator-lcd {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_reg>;
+ compatible = "regulator-fixed";
+ regulator-name = "lcd_panel_pwr";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio4 17 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_3v3>;
+ startup-delay-us = <500000>;
+ };
+
+ reg_lcd_reset: regulator-lcd-reset {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_reset>;
+ compatible = "regulator-fixed";
+ regulator-name = "nLCD_RESET";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_lcd>;
+ };
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
+ <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
+ <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+ <&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c3>;
+ status = "okay";
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <24>;
+ status = "okay";
+
+ port@4 {
+ reg = <4>;
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in_lvds0>;
+ };
+ };
+ };
+
+};
+
+&pwm3 {
+ status = "okay";
+};
+
+&reg_hdmi {
+ regulator-always-on; /* Without this, the level shifter on HDMI doesn't turn on */
+};
+
+&iomuxc {
+ pinctrl_lcd_reg: lcdreg {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x100b0 /* R_LCD_PANEL_PWR */
+ >;
+ };
+
+ pinctrl_lcd_reset: lcdreset {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x100b0 /* LCD_nRESET */
+ >;
+ };
+
+ pinctrl_touchscreen: touchscreengrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x1b0b0 /* TOUCH_nPINTDAV */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-pistachio.dts b/arch/arm/boot/dts/imx6q-pistachio.dts
index 5edf858c8b86..a31b17eaf51c 100644
--- a/arch/arm/boot/dts/imx6q-pistachio.dts
+++ b/arch/arm/boot/dts/imx6q-pistachio.dts
@@ -103,7 +103,7 @@
power {
label = "Power Button";
gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
- gpio-key,wakeup;
+ wakeup-source;
linux,code = <KEY_POWER>;
};
};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index 279b15e9ae2e..2ce8399a10ba 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -1,49 +1,6 @@
-/*
- * Copyright 2014 Soeren Moch <smoch@web.de>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this file; if not, write to the Free
- * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
- * MA 02110-1301 USA
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+//
+// Copyright 2014 Soeren Moch <smoch@web.de>
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index 8380f1b26826..7c4ad541c3f5 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -332,11 +332,17 @@
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
+ /* 3.25 MHz ADC clock speed */
+ st,adc-freq = <1>;
+ /* 12-bit ADC */
+ st,mod-12b = <1>;
+ /* internal ADC reference */
+ st,ref-sel = <0>;
+ /* ADC converstion time: 80 clocks */
+ st,sample-time = <4>;
stmpe_touchscreen {
compatible = "st,stmpe-ts";
- /* 3.25 MHz ADC clock speed */
- st,adc-freq = <1>;
/* 8 sample average control */
st,ave-ctrl = <3>;
/* 7 length fractional part in z */
@@ -346,17 +352,17 @@
* current limit value
*/
st,i-drive = <1>;
- /* 12-bit ADC */
- st,mod-12b = <1>;
- /* internal ADC reference */
- st,ref-sel = <0>;
- /* ADC converstion time: 80 clocks */
- st,sample-time = <4>;
/* 1 ms panel driver settling time */
st,settling = <3>;
/* 5 ms touch detect interrupt delay */
st,touch-det-delay = <5>;
};
+
+ stmpe_adc {
+ compatible = "st,stmpe-adc";
+ /* forbid to use ADC channels 3-0 (touch) */
+ st,norequest-mask = <0x0F>;
+ };
};
};
@@ -369,8 +375,8 @@
pinctrl-names = "default", "recovery";
pinctrl-0 = <&pinctrl_i2c3>;
pinctrl-1 = <&pinctrl_i2c3_recovery>;
- scl-gpios = <&gpio3 17 GPIO_ACTIVE_HIGH>;
- sda-gpios = <&gpio3 18 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpio3 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
index 87e15e7cb32b..1beac22266ed 100644
--- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
@@ -262,11 +262,17 @@
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
+ /* 3.25 MHz ADC clock speed */
+ st,adc-freq = <1>;
+ /* 12-bit ADC */
+ st,mod-12b = <1>;
+ /* internal ADC reference */
+ st,ref-sel = <0>;
+ /* ADC converstion time: 80 clocks */
+ st,sample-time = <4>;
stmpe_touchscreen {
compatible = "st,stmpe-ts";
- /* 3.25 MHz ADC clock speed */
- st,adc-freq = <1>;
/* 8 sample average control */
st,ave-ctrl = <3>;
/* 7 length fractional part in z */
@@ -276,17 +282,17 @@
* current limit value
*/
st,i-drive = <1>;
- /* 12-bit ADC */
- st,mod-12b = <1>;
- /* internal ADC reference */
- st,ref-sel = <0>;
- /* ADC converstion time: 80 clocks */
- st,sample-time = <4>;
/* 1 ms panel driver settling time */
st,settling = <3>;
/* 5 ms touch detect interrupt delay */
st,touch-det-delay = <5>;
};
+
+ stmpe_adc {
+ compatible = "st,stmpe-adc";
+ /* forbid to use ADC channels 3-0 (touch) */
+ st,norequest-mask = <0x0F>;
+ };
};
};
@@ -298,8 +304,8 @@
pinctrl-names = "default", "recovery";
pinctrl-0 = <&pinctrl_i2c3>;
pinctrl-1 = <&pinctrl_i2c3_recovery>;
- scl-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
- sda-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 1b50b01e9bac..433bf09a1954 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -89,10 +89,23 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
+ phy-handle = <&ethphy>;
phy-mode = "rgmii";
phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
phy-supply = <&vdd_eth_io_reg>;
status = "disabled";
+
+ fec_mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ txc-skew-ps = <1680>;
+ rxc-skew-ps = <1860>;
+ };
+ };
};
&gpmi {
@@ -117,6 +130,7 @@
reg = <0x58>;
interrupt-parent = <&gpio2>;
interrupts = <9 IRQ_TYPE_LEVEL_LOW>; /* active-low GPIO2_9 */
+ interrupt-controller;
regulators {
vddcore_reg: bcore1 {
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 8930aec6464c..a0705066ccba 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -64,7 +64,6 @@
regulator-max-microvolt = <3300000>;
gpio = <&gpio2 31 GPIO_ACTIVE_HIGH>;
enable-active-high;
- regulator-always-on;
};
gpio-keys {
@@ -250,6 +249,8 @@
pinctrl-0 = <&pinctrl_i2c1_mma8451_int>;
interrupt-parent = <&gpio1>;
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ vdd-supply = <&reg_sensors>;
+ vddio-supply = <&reg_sensors>;
};
ov5642: camera@3c {
@@ -440,6 +441,8 @@
pinctrl-0 = <&pinctrl_i2c3_mag3110_int>;
interrupt-parent = <&gpio3>;
interrupts = <16 IRQ_TYPE_EDGE_RISING>;
+ vdd-supply = <&reg_sensors>;
+ vddio-supply = <&reg_sensors>;
};
light-sensor@44 {
@@ -449,6 +452,7 @@
pinctrl-0 = <&pinctrl_i2c3_isl29023_int>;
interrupt-parent = <&gpio3>;
interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ vcc-supply = <&reg_sensors>;
};
};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index e7524e73efb4..4b4813f176cd 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -338,7 +338,7 @@
compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
reg = <0x02080000 0x4000>;
interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SL_CLK_PWM1>,
+ clocks = <&clks IMX6SL_CLK_PERCLK>,
<&clks IMX6SL_CLK_PWM1>;
clock-names = "ipg", "per";
};
@@ -348,7 +348,7 @@
compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
reg = <0x02084000 0x4000>;
interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SL_CLK_PWM2>,
+ clocks = <&clks IMX6SL_CLK_PERCLK>,
<&clks IMX6SL_CLK_PWM2>;
clock-names = "ipg", "per";
};
@@ -358,7 +358,7 @@
compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
reg = <0x02088000 0x4000>;
interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SL_CLK_PWM3>,
+ clocks = <&clks IMX6SL_CLK_PERCLK>,
<&clks IMX6SL_CLK_PWM3>;
clock-names = "ipg", "per";
};
@@ -368,7 +368,7 @@
compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm";
reg = <0x0208c000 0x4000>;
interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SL_CLK_PWM4>,
+ clocks = <&clks IMX6SL_CLK_PERCLK>,
<&clks IMX6SL_CLK_PWM4>;
clock-names = "ipg", "per";
};
diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts
index d8163705363e..4a31a415f88e 100644
--- a/arch/arm/boot/dts/imx6sll-evk.dts
+++ b/arch/arm/boot/dts/imx6sll-evk.dts
@@ -309,7 +309,7 @@
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
keep-power-in-suspend;
- enable-sdio-wakeup;
+ wakeup-source;
vmmc-supply = <&reg_sd3_vmmc>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 272ff6133ec1..5b16e65f7696 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -467,7 +467,7 @@
};
gpt: gpt@2098000 {
- compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
+ compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_GPT_BUS>,
@@ -785,6 +785,18 @@
clocks = <&clks IMX6SX_CLK_GPU>;
};
+ pd_disp: power-domain@2 {
+ reg = <2>;
+ #power-domain-cells = <0>;
+ clocks = <&clks IMX6SX_CLK_PXP_AXI>,
+ <&clks IMX6SX_CLK_DISPLAY_AXI>,
+ <&clks IMX6SX_CLK_LCDIF1_PIX>,
+ <&clks IMX6SX_CLK_LCDIF_APB>,
+ <&clks IMX6SX_CLK_LCDIF2_PIX>,
+ <&clks IMX6SX_CLK_CSI>,
+ <&clks IMX6SX_CLK_VADC>;
+ };
+
pd_pci: power-domain@3 {
reg = <3>;
#power-domain-cells = <0>;
@@ -1205,6 +1217,7 @@
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_PXP_AXI>;
clock-names = "axi";
+ power-domains = <&pd_disp>;
status = "disabled";
};
@@ -1226,6 +1239,7 @@
<&clks IMX6SX_CLK_LCDIF_APB>,
<&clks IMX6SX_CLK_DISPLAY_AXI>;
clock-names = "pix", "axi", "disp_axi";
+ power-domains = <&pd_disp>;
status = "disabled";
};
@@ -1237,6 +1251,7 @@
<&clks IMX6SX_CLK_LCDIF_APB>,
<&clks IMX6SX_CLK_DISPLAY_AXI>;
clock-names = "pix", "axi", "disp_axi";
+ power-domains = <&pd_disp>;
status = "disabled";
};
@@ -1246,6 +1261,7 @@
clocks = <&clks IMX6SX_CLK_VADC>,
<&clks IMX6SX_CLK_CSI>;
clock-names = "vadc", "csi";
+ power-domains = <&pd_disp>;
status = "disabled";
};
};
@@ -1370,7 +1386,8 @@
<&clks IMX6SX_CLK_PCIE_REF_125M>,
<&clks IMX6SX_CLK_DISPLAY_AXI>;
clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_inbound_axi";
- power-domains = <&pd_pci>;
+ power-domains = <&pd_disp>, <&pd_pci>;
+ power-domain-names = "pcie", "pcie_phy";
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/imx6ul-phytec-pcl063.dtsi b/arch/arm/boot/dts/imx6ul-phytec-pcl063.dtsi
new file mode 100644
index 000000000000..fc2997449b49
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-phytec-pcl063.dtsi
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 PHYTEC Messtechnik GmbH
+ * Author: Christian Hemp <c.hemp@phytec.de>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "imx6ul.dtsi"
+
+/ {
+ model = "Phytec phyCORE i.MX6 UltraLite";
+ compatible = "phytec,imx6ul-pcl063", "fsl,imx6ul";
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ /*
+ * Set the minimum memory size here and
+ * let the bootloader set the real size.
+ */
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x8000000>;
+ };
+
+ gpio_leds_som: leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpioleds_som>;
+ compatible = "gpio-leds";
+
+ led_green {
+ label = "phycore:green";
+ gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET_REF>;
+ clock-names = "rmii-ref";
+ };
+ };
+};
+
+&gpmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand>;
+ nand-on-flash-bbt;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 =<&pinctrl_i2c1>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ eeprom@52 {
+ compatible = "catalyst,24c32", "atmel,24c32";
+ reg = <0x52>;
+ };
+};
+
+&snvs_poweroff {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO07__ENET1_MDC 0x1b0b0
+ MX6UL_PAD_GPIO1_IO06__ENET1_MDIO 0x1b0b0
+ MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x4001b031
+ MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0x17059
+ >;
+ };
+
+ pinctrl_gpioleds_som: gpioledssomgrp {
+ fsl,pins = <MX6UL_PAD_SNVS_TAMPER4__GPIO5_IO04 0x0b0b0>;
+ };
+
+ pinctrl_gpmi_nand: gpminandgrp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_CLE__RAWNAND_CLE 0x0b0b1
+ MX6UL_PAD_NAND_ALE__RAWNAND_ALE 0x0b0b1
+ MX6UL_PAD_NAND_WP_B__RAWNAND_WP_B 0x0b0b1
+ MX6UL_PAD_NAND_READY_B__RAWNAND_READY_B 0x0b000
+ MX6UL_PAD_NAND_CE0_B__RAWNAND_CE0_B 0x0b0b1
+ MX6UL_PAD_NAND_RE_B__RAWNAND_RE_B 0x0b0b1
+ MX6UL_PAD_NAND_WE_B__RAWNAND_WE_B 0x0b0b1
+ MX6UL_PAD_NAND_DATA00__RAWNAND_DATA00 0x0b0b1
+ MX6UL_PAD_NAND_DATA01__RAWNAND_DATA01 0x0b0b1
+ MX6UL_PAD_NAND_DATA02__RAWNAND_DATA02 0x0b0b1
+ MX6UL_PAD_NAND_DATA03__RAWNAND_DATA03 0x0b0b1
+ MX6UL_PAD_NAND_DATA04__RAWNAND_DATA04 0x0b0b1
+ MX6UL_PAD_NAND_DATA05__RAWNAND_DATA05 0x0b0b1
+ MX6UL_PAD_NAND_DATA06__RAWNAND_DATA06 0x0b0b1
+ MX6UL_PAD_NAND_DATA07__RAWNAND_DATA07 0x0b0b1
+ >;
+ };
+
+ pinctrl_i2c1: i2cgrp {
+ fsl,pins = <
+ MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0
+ MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
+ >;
+ };
+
+};
diff --git a/arch/arm/boot/dts/imx6ul-phytec-peb-eval-01.dtsi b/arch/arm/boot/dts/imx6ul-phytec-peb-eval-01.dtsi
new file mode 100644
index 000000000000..e2f38f39a6ad
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-phytec-peb-eval-01.dtsi
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 PHYTEC Messtechnik
+ * Author: Christian Hemp <c.hemp@phytec.de>
+ */
+
+#include <dt-bindings/input/input.h>
+
+/ {
+ gpio_keys: gpio-keys {
+ compatible = "gpio-key";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_keys>;
+ status = "disabled";
+
+ power {
+ label = "Power Button";
+ gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ wakeup-source;
+ };
+ };
+
+ user_leds: leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_user_leds>;
+ status = "disabled";
+
+ led_yellow {
+ gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ led_red {
+ gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl_gpio_keys: gpio_keysgrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00 0x79
+ >;
+ };
+
+ pinctrl_user_leds: user_ledsgrp {
+ fsl,pins = <
+ MX6UL_PAD_JTAG_MOD__GPIO1_IO10 0x79
+ MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0x79
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin-full.dts b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin-full.dts
new file mode 100644
index 000000000000..b6a1407a9d44
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin-full.dts
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 PHYTEC Messtechnik GmbH
+ * Author: Christian Hemp <c.hemp@phytec.de>
+ */
+
+/dts-v1/;
+#include "imx6ul-phytec-pcl063.dtsi"
+#include "imx6ul-phytec-phyboard-segin.dtsi"
+#include "imx6ul-phytec-peb-eval-01.dtsi"
+
+/ {
+ model = "Phytec phyBOARD-Segin i.MX6 UltraLite Full Featured";
+ compatible = "phytec,imx6ul-pbacd10", "phytec,imx6ul-pcl063", "fsl,imx6ul";
+};
+
+&adc1 {
+ status = "okay";
+};
+
+&can1 {
+ status = "okay";
+};
+
+&tlv320 {
+ status = "okay";
+};
+
+&ecspi3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi3>;
+ cs-gpios = <&gpio1 20 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&fec2 {
+ status = "okay";
+};
+
+&i2c_rtc {
+ status = "okay";
+};
+
+&reg_can1_en {
+ status = "okay";
+};
+
+&reg_sound_1v8 {
+ status = "okay";
+};
+
+&reg_sound_3v3 {
+ status = "okay";
+};
+
+&sai2 {
+ status = "okay";
+};
+
+&sound {
+ status = "okay";
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&usbotg1 {
+ status = "okay";
+};
+
+&usbotg2 {
+ status = "okay";
+};
+
+&usdhc1 {
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_ecspi3: ecspi3grp {
+ fsl,pins = <
+ MX6UL_PAD_UART2_RTS_B__ECSPI3_MISO 0x10b0
+ MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI 0x10b0
+ MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK 0x10b0
+ MX6UL_PAD_UART2_TX_DATA__GPIO1_IO20 0x10b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin.dtsi b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin.dtsi
new file mode 100644
index 000000000000..7bf439a77d2c
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin.dtsi
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 PHYTEC Messtechnik GmbH
+ * Author: Christian Hemp <c.hemp@phytec.de>
+ */
+
+/ {
+ model = "Phytec phyBOARD-Segin i.MX6 UltraLite";
+ compatible = "phytec,imx6ul-pbacd-10", "phytec,imx6ul-pcl063", "fsl,imx6ul";
+
+ aliases {
+ rtc0 = &i2c_rtc;
+ rtc1 = &snvs_rtc;
+ };
+
+ reg_sound_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "i2s-audio-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ status = "disabled";
+ };
+
+ reg_sound_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "i2s-audio-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ status = "disabled";
+ };
+
+ reg_can1_en: regulator-can1 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&princtrl_flexcan1_en>;
+ regulator-name = "Can";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ status = "disabled";
+ };
+
+ reg_adc1_vref_3v3: regulator-vref-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vref-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ sound: sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "phyBOARD-Segin-TLV320AIC3007";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,widgets =
+ "Line", "Line In",
+ "Line", "Line Out",
+ "Speaker", "Speaker";
+ simple-audio-card,routing =
+ "Line Out", "LLOUT",
+ "Line Out", "RLOUT",
+ "Speaker", "SPOP",
+ "Speaker", "SPOM",
+ "LINE1L", "Line In",
+ "LINE1R", "Line In";
+ status = "disabled";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ };
+
+ dailink_master: simple-audio-card,codec {
+ sound-dai = <&tlv320>;
+ clocks = <&clks IMX6UL_CLK_SAI2>;
+ };
+ };
+
+};
+
+&adc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc1>;
+ vref-supply = <&reg_adc1_vref_3v3>;
+ /*
+ * driver can not separate a specific channel so we request 4 channels
+ * here - we need only the fourth channel
+ */
+ num-channels = <4>;
+ status = "disabled";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_can1_en>;
+ status = "disabled";
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
+ assigned-clock-rates = <786432000>;
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet2>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
+ status = "disabled";
+};
+
+&i2c1 {
+ tlv320: codec@18 {
+ compatible = "ti,tlv320aic3007";
+ #sound-dai-cells = <0>;
+ reg = <0x18>;
+ AVDD-supply = <&reg_sound_3v3>;
+ IOVDD-supply = <&reg_sound_3v3>;
+ DRVDD-supply = <&reg_sound_3v3>;
+ DVDD-supply = <&reg_sound_1v8>;
+ status = "disabled";
+ };
+
+ stmpe: touchscreen@44 {
+ compatible = "st,stmpe811";
+ reg = <0x44>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_stmpe>;
+ status = "disabled";
+
+ touchscreen {
+ compatible = "st,stmpe-ts";
+ st,sample-time = <4>;
+ st,mod-12b = <1>;
+ st,ref-sel = <0>;
+ st,adc-freq = <1>;
+ st,ave-ctrl = <1>;
+ st,touch-det-delay = <2>;
+ st,settling = <2>;
+ st,fraction-z = <7>;
+ st,i-drive = <1>;
+ touchscreen-inverted-x = <1>;
+ touchscreen-inverted-y = <1>;
+ };
+ };
+
+ i2c_rtc: rtc@68 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc_int>;
+ compatible = "microcrystal,rv4162";
+ reg = <0x68>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ status = "disabled";
+ };
+};
+
+&mdio {
+ ethphy1: ethernet-phy@2 {
+ reg = <2>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+ clock-names = "rmii-ref";
+ };
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+ status = "disabled";
+};
+
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ assigned-clocks = <&clks IMX6UL_CLK_SAI2_SEL>,
+ <&clks IMX6UL_CLK_SAI2>;
+ assigned-clock-parents = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
+ assigned-clock-rates = <0>, <19200000>;
+ fsl,sai-mclk-direction-output;
+ status = "disabled";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ uart-has-rtscts;
+ status = "disabled";
+};
+
+&usbotg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_otg1_id>;
+ dr_mode = "otg";
+ status = "disabled";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ disable-over-current;
+ status = "disabled";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ wakeup-source;
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_adc1: adc1grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0
+ >;
+ };
+
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER 0x1b0b0
+ MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x4001b031
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1 {
+ fsl,pins = <
+ MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX 0x0b0b0
+ MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX 0x0b0b0
+ >;
+ };
+
+ princtrl_flexcan1_en: flexcan1engrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02 0x17059
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO04__PWM3_OUT 0x0b0b0
+ >;
+ };
+
+ pinctrl_rtc_int: rtcintgrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER1__GPIO5_IO01 0x17059
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK 0x17088
+ MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC 0x17088
+ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x11088
+ MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA 0x11088
+ MX6UL_PAD_JTAG_TMS__SAI2_MCLK 0x17088
+ >;
+ };
+
+ pinctrl_stmpe: stmpegrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER3__GPIO5_IO03 0x17059
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6UL_PAD_UART5_TX_DATA__UART5_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x1b0b1
+ MX6UL_PAD_GPIO1_IO08__UART5_DCE_RTS 0x1b0b1
+ MX6UL_PAD_GPIO1_IO09__UART5_DCE_CTS 0x1b0b1
+ >;
+ };
+
+ pinctrl_usb_otg1_id: usbotg1idgrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x17059
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x10059
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x17059
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x17059
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x17059
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059
+ MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170b9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170b9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170b9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170f9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170f9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170f9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi
index 6c63a7384611..9ad1da159768 100644
--- a/arch/arm/boot/dts/imx6ull-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi
@@ -94,16 +94,16 @@
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
- sda-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
- scl-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
+ sda-gpios = <&gpio1 29 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
};
&i2c2 {
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
- sda-gpios = <&gpio1 31 GPIO_ACTIVE_LOW>;
- scl-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
+ sda-gpios = <&gpio1 31 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
ad7879@2c {
diff --git a/arch/arm/boot/dts/imx6ull.dtsi b/arch/arm/boot/dts/imx6ull.dtsi
index f3668fe69eac..22e4a307fa59 100644
--- a/arch/arm/boot/dts/imx6ull.dtsi
+++ b/arch/arm/boot/dts/imx6ull.dtsi
@@ -30,6 +30,18 @@
>;
};
+&ocotp {
+ compatible = "fsl,imx6ull-ocotp", "syscon";
+};
+
+&usdhc1 {
+ compatible = "fsl,imx6ull-usdhc", "fsl,imx6sx-usdhc";
+};
+
+&usdhc2 {
+ compatible = "fsl,imx6ull-usdhc", "fsl,imx6sx-usdhc";
+};
+
/ {
soc {
aips3: aips-bus@2200000 {
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index 931b2754b099..fca6e50f37c8 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -199,9 +199,13 @@
assigned-clock-parents = <&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>;
};
- smc1: smc1@40410000 {
+ smc1: clock-controller@40410000 {
compatible = "fsl,imx7ulp-smc1";
reg = <0x40410000 0x1000>;
+ #clock-cells = <1>;
+ clocks = <&scg1 IMX7ULP_CLK_CORE_DIV>,
+ <&scg1 IMX7ULP_CLK_HSRUN_CORE_DIV>;
+ clock-names = "divcore", "hsrun_divcore";
};
pcc3: clock-controller@40b30000 {
@@ -343,4 +347,17 @@
gpio-ranges = <&iomuxc1 0 96 32>;
};
};
+
+ m4aips1: bus@41080000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x41080000 0x80000>;
+ ranges;
+
+ sim: sim@410a3000 {
+ compatible = "fsl,imx7ulp-sim", "syscon";
+ reg = <0x410a3000 0x1000>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/integrator.dtsi b/arch/arm/boot/dts/integrator.dtsi
index 4d58638d104b..1612a869a4f7 100644
--- a/arch/arm/boot/dts/integrator.dtsi
+++ b/arch/arm/boot/dts/integrator.dtsi
@@ -3,9 +3,15 @@
* SoC core Device Tree for the ARM Integrator platforms
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
core-module@10000000 {
compatible = "arm,core-module-integrator", "syscon", "simple-mfd";
reg = <0x10000000 0x200>;
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index a185ab8759fa..01fa229e1bd0 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -192,6 +192,43 @@
interrupts = <27>;
};
+ bridge {
+ compatible = "ti,ths8134a", "ti,ths8134";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ vga_bridge_in: endpoint {
+ remote-endpoint = <&clcd_pads_vga_dac>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ vga_bridge_out: endpoint {
+ remote-endpoint = <&vga_con_in>;
+ };
+ };
+ };
+ };
+
+ vga {
+ compatible = "vga-connector";
+
+ port {
+ vga_con_in: endpoint {
+ remote-endpoint = <&vga_bridge_out>;
+ };
+ };
+ };
+
fpga {
/*
* These PrimeCells are at the same location and using
@@ -254,39 +291,27 @@
interrupts = <22>;
clocks = <&auxosc>, <&pclk>;
clock-names = "clcdclk", "apb_pclk";
+ /* 640x480 16bpp @ 25.175MHz is 36827428 bytes/s */
+ max-memory-bandwidth = <40000000>;
- port {
- /*
- * The VGA connected is implemented with a
- * THS8134A triple DAC that can be run in 24bit
- * or 16bit RGB mode.
- */
- clcd_pads: endpoint {
- remote-endpoint = <&clcd_panel>;
- arm,pl11x,tft-r0g0b0-pads = <1 7 13>;
- };
- };
-
- panel {
- compatible = "panel-dpi";
-
- port {
- clcd_panel: endpoint {
- remote-endpoint = <&clcd_pads>;
- };
- };
-
- /* Standard 640x480 VGA timings */
- panel-timing {
- clock-frequency = <25175000>;
- hactive = <640>;
- hback-porch = <48>;
- hfront-porch = <16>;
- hsync-len = <96>;
- vactive = <480>;
- vback-porch = <33>;
- vfront-porch = <10>;
- vsync-len = <2>;
+ /*
+ * This port is routed through a PLD (Programmable
+ * Logic Device) that routes the output from the CLCD
+ * (after transformations) to the VGA DAC and also an
+ * external panel connector. The PLD is essential for
+ * supporting RGB565/BGR565.
+ *
+ * The signals from the port thus reaches two endpoints.
+ * The PLD is managed through a few special bits in the
+ * FPGA "sysreg".
+ *
+ * This arrangement can be clearly seen in
+ * ARM DUI 0225D, page 3-41, figure 3-19.
+ */
+ port@0 {
+ clcd_pads_vga_dac: endpoint {
+ remote-endpoint = <&vga_bridge_in>;
+ arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
};
};
};
diff --git a/arch/arm/boot/dts/kirkwood-dir665.dts b/arch/arm/boot/dts/kirkwood-dir665.dts
index 31ceacd841de..b3ad3f607d31 100644
--- a/arch/arm/boot/dts/kirkwood-dir665.dts
+++ b/arch/arm/boot/dts/kirkwood-dir665.dts
@@ -190,53 +190,6 @@
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
};
};
-
- dsa {
- status = "disabled";
-
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
-
- dsa,ethernet = <&eth0port>;
- dsa,mii-bus = <&mdio>;
-
- switch@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0 0>; /* MDIO address 0, switch 0 in tree */
-
- port@0 {
- reg = <0>;
- label = "lan4";
- };
-
- port@1 {
- reg = <1>;
- label = "lan3";
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- };
-
- port@3 {
- reg = <3>;
- label = "lan1";
- };
-
- port@4 {
- reg = <4>;
- label = "wan";
- };
-
- port@6 {
- reg = <6>;
- label = "cpu";
- };
- };
- };
};
&mdio {
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index cbaf06f2f78e..eb917462b219 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -36,8 +36,8 @@
compatible = "gpio-fan";
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
pinctrl-names = "default";
- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
- &gpio1 13 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
+ &gpio1 13 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0
3000 1
6000 2>;
diff --git a/arch/arm/boot/dts/kirkwood-linksys-viper.dts b/arch/arm/boot/dts/kirkwood-linksys-viper.dts
index a7d659b7145a..2f9660f3b457 100644
--- a/arch/arm/boot/dts/kirkwood-linksys-viper.dts
+++ b/arch/arm/boot/dts/kirkwood-linksys-viper.dts
@@ -66,53 +66,6 @@
gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
};
};
-
- dsa {
- status = "disabled";
-
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
-
- dsa,ethernet = <&eth0port>;
- dsa,mii-bus = <&mdio>;
-
- switch@16,0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <16 0>; /* MDIO address 16, switch 0 in tree */
-
- port@0 {
- reg = <0>;
- label = "ethernet1";
- };
-
- port@1 {
- reg = <1>;
- label = "ethernet2";
- };
-
- port@2 {
- reg = <2>;
- label = "ethernet3";
- };
-
- port@3 {
- reg = <3>;
- label = "ethernet4";
- };
-
- port@4 {
- reg = <4>;
- label = "internet";
- };
-
- port@5 {
- reg = <5>;
- label = "cpu";
- };
- };
- };
};
&pinctrl {
diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
index 86d532916d56..2e1a75348908 100644
--- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
+++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
@@ -107,53 +107,6 @@
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
};
};
-
- dsa {
- status = "disabled";
-
- compatible = "marvell,dsa";
- #address-cells = <1>;
- #size-cells = <0>;
-
- dsa,ethernet = <&eth0port>;
- dsa,mii-bus = <&mdio>;
-
- switch@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0 0>; /* MDIO address 0, switch 0 in tree */
-
- port@0 {
- reg = <0>;
- label = "lan1";
- };
-
- port@1 {
- reg = <1>;
- label = "lan2";
- };
-
- port@2 {
- reg = <2>;
- label = "lan3";
- };
-
- port@3 {
- reg = <3>;
- label = "lan4";
- };
-
- port@4 {
- reg = <4>;
- label = "wan";
- };
-
- port@5 {
- reg = <5>;
- label = "cpu";
- };
- };
- };
};
&mdio {
diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts
index a9fee2c2bcaf..9d88301daf0e 100644
--- a/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts
+++ b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts
@@ -16,15 +16,6 @@
model = "Marvell RD88f6281 Reference design, with Z0 SoC";
compatible = "marvell,rd88f6281-z0", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood";
- dsa {
- switch@0 {
- reg = <0 0>; /* MDIO address 0, switch 0 in tree */
- port@4 {
- reg = <4>;
- label = "wan";
- };
- };
- };
};
&eth1 {
diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi
index 0f22f0e6f56b..f1f8eee132e8 100644
--- a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi
+++ b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi
@@ -48,47 +48,6 @@
cd-gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
/* No WP GPIO */
};
- };
-
- dsa {
- status = "disabled";
-
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
-
- dsa,ethernet = <&eth0port>;
- dsa,mii-bus = <&mdio>;
-
- switch@0 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- label = "lan1";
- };
-
- port@1 {
- reg = <1>;
- label = "lan2";
- };
-
- port@2 {
- reg = <2>;
- label = "lan3";
- };
-
- port@3 {
- reg = <3>;
- label = "lan4";
- };
-
- port@5 {
- reg = <5>;
- label = "cpu";
- };
- };
};
};
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 81c7eda2c442..2161e23bd98e 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/include/ "skeleton.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "marvell,kirkwood";
interrupt-parent = <&intc>;
diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
index 58ea0a4e7afa..f46a11827ef6 100644
--- a/arch/arm/boot/dts/lpc3250-ea3250.dts
+++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
@@ -17,64 +17,70 @@
/ {
model = "Embedded Artists LPC3250 board based on NXP LPC3250";
compatible = "ea,ea3250", "nxp,lpc3250";
- #address-cells = <1>;
- #size-cells = <1>;
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x4000000>;
};
- gpio_keys {
+ gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
autorepeat;
- button@21 {
+
+ button {
label = "Interrupt Key";
linux,code = <103>;
gpios = <&gpio 4 1 0>; /* GPI_P3 1 */
};
+
key1 {
label = "KEY1";
linux,code = <1>;
gpios = <&pca9532 0 0>;
};
+
key2 {
label = "KEY2";
linux,code = <2>;
gpios = <&pca9532 1 0>;
};
+
key3 {
label = "KEY3";
linux,code = <3>;
gpios = <&pca9532 2 0>;
};
+
key4 {
label = "KEY4";
linux,code = <4>;
gpios = <&pca9532 3 0>;
};
+
joy0 {
label = "Joystick Key 0";
linux,code = <10>;
gpios = <&gpio 2 0 0>; /* P2.0 */
};
+
joy1 {
label = "Joystick Key 1";
linux,code = <11>;
gpios = <&gpio 2 1 0>; /* P2.1 */
};
+
joy2 {
label = "Joystick Key 2";
linux,code = <12>;
gpios = <&gpio 2 2 0>; /* P2.2 */
};
+
joy3 {
label = "Joystick Key 3";
linux,code = <13>;
gpios = <&gpio 2 3 0>; /* P2.3 */
};
+
joy4 {
label = "Joystick Key 4";
linux,code = <14>;
diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
index 1e1c2f517a82..ebd19258e22b 100644
--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
+++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
@@ -1,6 +1,7 @@
/*
* PHYTEC phyCORE-LPC3250 board
*
+ * Copyright (C) 2015-2019 Vladimir Zapolskiy <vz@mleia.com>
* Copyright 2012 Roland Stigge <stigge@antcom.de>
*
* The code contained herein is licensed under the GNU General Public
@@ -17,45 +18,12 @@
/ {
model = "PHYTEC phyCORE-LPC3250 board based on NXP LPC3250";
compatible = "phytec,phy3250", "nxp,lpc3250";
- #address-cells = <1>;
- #size-cells = <1>;
- memory {
+ memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x4000000>;
};
- regulators {
- backlight_reg: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "backlight_reg";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- gpio = <&gpio 5 4 0>;
- enable-active-high;
- regulator-boot-on;
- };
-
- lcd_reg: regulator@1 {
- compatible = "regulator-fixed";
- regulator-name = "lcd_reg";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- gpio = <&gpio 5 0 0>;
- enable-active-high;
- regulator-boot-on;
- };
-
- sd_reg: regulator@2 {
- compatible = "regulator-fixed";
- regulator-name = "sd_reg";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- gpio = <&gpio 5 5 0>;
- enable-active-high;
- };
- };
-
leds {
compatible = "gpio-leds";
@@ -69,10 +37,59 @@
linux,default-trigger = "heartbeat";
};
};
+
+ panel: panel {
+ compatible = "sharp,lq035q7db03";
+ power-supply = <&reg_lcd>;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&cldc_output>;
+ };
+ };
+ };
+
+ reg_backlight: regulator-backlight {
+ compatible = "regulator-fixed";
+ regulator-name = "backlight";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio 5 4 0>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ reg_lcd: regulator-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio 5 0 0>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ reg_sd: regulator-sd {
+ compatible = "regulator-fixed";
+ regulator-name = "sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio 5 5 0>;
+ enable-active-high;
+ regulator-boot-on;
+ };
};
&clcd {
+ max-memory-bandwidth = <18710000>;
status = "okay";
+
+ port {
+ cldc_output: endpoint {
+ remote-endpoint = <&panel_input>;
+ arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+ };
+ };
};
&i2c1 {
@@ -130,7 +147,7 @@
cd-gpios = <&gpio 3 1 0>;
cd-inverted;
bus-width = <4>;
- vmmc-supply = <&sd_reg>;
+ vmmc-supply = <&reg_sd>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index b7303a4e4236..20b38f4ade37 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -11,12 +11,12 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include "skeleton.dtsi"
-
#include <dt-bindings/clock/lpc32xx-clock.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "nxp,lpc3220";
interrupt-parent = <&mic>;
@@ -139,11 +139,11 @@
};
clcd: clcd@31040000 {
- compatible = "arm,pl110", "arm,primecell";
+ compatible = "arm,pl111", "arm,primecell";
reg = <0x31040000 0x1000>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk LPC32XX_CLK_LCD>;
- clock-names = "apb_pclk";
+ clocks = <&clk LPC32XX_CLK_LCD>, <&clk LPC32XX_CLK_LCD>;
+ clock-names = "clcdclk", "apb_pclk";
status = "disabled";
};
@@ -230,7 +230,7 @@
status = "disabled";
};
- i2s1: i2s@2009C000 {
+ i2s1: i2s@2009c000 {
compatible = "nxp,lpc3220-i2s";
reg = <0x2009C000 0x1000>;
};
@@ -273,7 +273,7 @@
status = "disabled";
};
- i2c1: i2c@400A0000 {
+ i2c1: i2c@400a0000 {
compatible = "nxp,pnx-i2c";
reg = <0x400A0000 0x100>;
interrupt-parent = <&sic1>;
@@ -284,7 +284,7 @@
clocks = <&clk LPC32XX_CLK_I2C1>;
};
- i2c2: i2c@400A8000 {
+ i2c2: i2c@400a8000 {
compatible = "nxp,pnx-i2c";
reg = <0x400A8000 0x100>;
interrupt-parent = <&sic1>;
@@ -295,7 +295,7 @@
clocks = <&clk LPC32XX_CLK_I2C2>;
};
- mpwm: mpwm@400E8000 {
+ mpwm: mpwm@400e8000 {
compatible = "nxp,lpc3220-motor-pwm";
reg = <0x400E8000 0x78>;
status = "disabled";
@@ -394,7 +394,7 @@
#gpio-cells = <3>; /* bank, pin, flags */
};
- timer4: timer@4002C000 {
+ timer4: timer@4002c000 {
compatible = "nxp,lpc3220-timer";
reg = <0x4002C000 0x1000>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
@@ -412,7 +412,7 @@
status = "disabled";
};
- watchdog: watchdog@4003C000 {
+ watchdog: watchdog@4003c000 {
compatible = "nxp,pnx4008-wdt";
reg = <0x4003C000 0x1000>;
clocks = <&clk LPC32XX_CLK_WDOG>;
@@ -451,7 +451,7 @@
status = "disabled";
};
- timer1: timer@4004C000 {
+ timer1: timer@4004c000 {
compatible = "nxp,lpc3220-timer";
reg = <0x4004C000 0x1000>;
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -462,7 +462,9 @@
key: key@40050000 {
compatible = "nxp,lpc3220-key";
reg = <0x40050000 0x1000>;
- interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LPC32XX_CLK_KEY>;
+ interrupt-parent = <&sic1>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -475,7 +477,7 @@
status = "disabled";
};
- pwm1: pwm@4005C000 {
+ pwm1: pwm@4005c000 {
compatible = "nxp,lpc3220-pwm";
reg = <0x4005C000 0x4>;
clocks = <&clk LPC32XX_CLK_PWM1>;
@@ -484,7 +486,7 @@
status = "disabled";
};
- pwm2: pwm@4005C004 {
+ pwm2: pwm@4005c004 {
compatible = "nxp,lpc3220-pwm";
reg = <0x4005C004 0x4>;
clocks = <&clk LPC32XX_CLK_PWM2>;
diff --git a/arch/arm/boot/dts/lpc4350-hitex-eval.dts b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
index 8b973f537d3a..93d0c2e99e7c 100644
--- a/arch/arm/boot/dts/lpc4350-hitex-eval.dts
+++ b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
@@ -40,8 +40,6 @@
pca_buttons {
compatible = "gpio-keys-polled";
- #address-cells = <1>;
- #size-cells = <0>;
poll-interval = <100>;
autorepeat;
diff --git a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
index 02b23fa29d75..224f80a4a31d 100644
--- a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
+++ b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
@@ -57,8 +57,6 @@
compatible = "gpio-keys-polled";
pinctrl-names = "default";
pinctrl-0 = <&gpio_joystick_pins>;
- #address-cells = <1>;
- #size-cells = <0>;
poll-interval = <100>;
autorepeat;
diff --git a/arch/arm/boot/dts/lpc4357-myd-lpc4357.dts b/arch/arm/boot/dts/lpc4357-myd-lpc4357.dts
new file mode 100644
index 000000000000..1f84654df50c
--- /dev/null
+++ b/arch/arm/boot/dts/lpc4357-myd-lpc4357.dts
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * MYIR Tech MYD-LPC4357 Development Board with 800x480 7" TFT panel
+ *
+ * Copyright (C) 2016-2018 Vladimir Zapolskiy <vz@mleia.com>
+ */
+
+/dts-v1/;
+
+#include "lpc18xx.dtsi"
+#include "lpc4357.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "MYIR Tech LPC4357 Development Board";
+ compatible = "myir,myd-lpc4357", "nxp,lpc4357";
+
+ chosen {
+ stdout-path = "serial3:115200n8";
+ };
+
+ memory@28000000 {
+ device_type = "memory";
+ reg = <0x28000000 0x2000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ led1 {
+ gpios = <&gpio LPC_GPIO(6,15) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led2 {
+ gpios = <&gpio LPC_GPIO(6,16) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led3 {
+ gpios = <&gpio LPC_GPIO(6,17) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led4 {
+ gpios = <&gpio LPC_GPIO(6,10) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led5 {
+ gpios = <&gpio LPC_GPIO(7,14) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led6 {
+ gpios = <&gpio LPC_GPIO(6,14) GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ panel: panel {
+ compatible = "innolux,at070tn92";
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&lcdc_output>;
+ };
+ };
+ };
+
+ vcc: vcc_fixed {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-supply";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vmmc: vmmc_fixed {
+ compatible = "regulator-fixed";
+ regulator-name = "vmmc-supply";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&pinctrl {
+ can0_pins: can0-pins {
+ can_rd_cfg {
+ pins = "p3_1";
+ function = "can0";
+ input-enable;
+ };
+
+ can_td_cfg {
+ pins = "p3_2";
+ function = "can0";
+ };
+ };
+
+ can1_pins: can1-pins {
+ can_rd_cfg {
+ pins = "pe_1";
+ function = "can1";
+ input-enable;
+ };
+
+ can_td_cfg {
+ pins = "pe_0";
+ function = "can1";
+ };
+ };
+
+ emc_pins: emc-pins {
+ emc_addr0_22_cfg {
+ pins = "p2_9", "p2_10", "p2_11", "p2_12",
+ "p2_13", "p1_0", "p1_1", "p1_2",
+ "p2_8", "p2_7", "p2_6", "p2_2",
+ "p2_1", "p2_0", "p6_8", "p6_7",
+ "pd_16", "pd_15", "pe_0", "pe_1",
+ "pe_2", "pe_3", "pe_4";
+ function = "emc";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_data0_15_cfg {
+ pins = "p1_7", "p1_8", "p1_9", "p1_10",
+ "p1_11", "p1_12", "p1_13", "p1_14",
+ "p5_4", "p5_5", "p5_6", "p5_7",
+ "p5_0", "p5_1", "p5_2", "p5_3";
+ function = "emc";
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_we_oe_cfg {
+ pins = "p1_6", "p1_3";
+ function = "emc";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_cs0_cfg {
+ pins = "p1_5";
+ function = "emc";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_sdram_dqm0_1_cfg {
+ pins = "p6_12", "p6_10";
+ function = "emc";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_sdram_ras_cas_cfg {
+ pins = "p6_5", "p6_4";
+ function = "emc";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_sdram_dycs0_cfg {
+ pins = "p6_9";
+ function = "emc";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_sdram_cke_cfg {
+ pins = "p6_11";
+ function = "emc";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ emc_sdram_clock_cfg {
+ pins = "clk0";
+ function = "emc";
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <1>;
+ bias-disable;
+ };
+ };
+
+ enet_rmii_pins: enet-rmii-pins {
+ enet_rmii_rxd_cfg {
+ pins = "p1_15", "p0_0";
+ function = "enet";
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ enet_rmii_txd_cfg {
+ pins = "p1_18", "p1_20";
+ function = "enet";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ enet_rmii_rx_dv_cfg {
+ pins = "p1_16";
+ function = "enet";
+ input-enable;
+ input-schmitt-disable;
+ bias-disable;
+ };
+
+ enet_mdio_cfg {
+ pins = "p1_17";
+ function = "enet";
+ input-enable;
+ input-schmitt-disable;
+ bias-disable;
+ };
+
+ enet_mdc_cfg {
+ pins = "pc_1";
+ function = "enet";
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ enet_rmii_tx_en_cfg {
+ pins = "p0_1";
+ function = "enet";
+ bias-disable;
+ };
+
+ enet_ref_clk_cfg {
+ pins = "p1_19";
+ function = "enet";
+ slew-rate = <1>;
+ input-enable;
+ input-schmitt-disable;
+ bias-disable;
+ };
+ };
+
+ i2c0_pins: i2c0-pins {
+ i2c0_pins_cfg {
+ pins = "i2c0_scl", "i2c0_sda";
+ function = "i2c0";
+ input-enable;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ i2c1_pins_cfg {
+ pins = "pe_15", "pe_13";
+ function = "i2c1";
+ input-enable;
+ };
+ };
+
+ lcd_pins: lcd-pins {
+ lcd_vd0_23_cfg {
+ pins = "p4_1", "p4_4", "p4_3", "p4_2",
+ "p8_7", "p8_6", "p8_5", "p8_4",
+ "p7_5", "p4_8", "p4_10", "p4_9",
+ "p8_3", "pb_6", "pb_5", "pb_4",
+ "p7_4", "p7_3", "p7_2", "p7_1",
+ "pb_3", "pb_2", "pb_1", "pb_0";
+ function = "lcd";
+ };
+
+ lcd_vsync_en_dclk_lp_pwr_cfg {
+ pins = "p4_5", "p4_6", "p4_7", "p7_6", "p7_7";
+ function = "lcd";
+ };
+ };
+
+ led_pins: led-pins {
+ led_1_6_cfg {
+ pins = "pd_1", "pd_2", "pd_3", "pc_11", "pe_14", "pd_0";
+ function = "gpio";
+ bias-pull-down;
+ };
+ };
+
+ sdmmc_pins: sdmmc-pins {
+ sdmmc_clk_cfg {
+ pins = "pc_0";
+ function = "sdmmc";
+ slew-rate = <1>;
+ bias-pull-down;
+ };
+
+ sdmmc_cmd_dat0_3_cfg {
+ pins = "pc_4", "pc_5", "pc_6", "pc_7", "pc_10";
+ function = "sdmmc";
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ sdmmc_cd_cfg {
+ pins = "pc_8";
+ function = "sdmmc";
+ input-enable;
+ bias-pull-down;
+ };
+ };
+
+ spifi_pins: spifi-pins {
+ spifi_sck_cfg {
+ pins = "p3_3";
+ function = "spifi";
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ spifi_mosi_miso_sio2_sio3_cfg {
+ pins = "p3_7", "p3_6", "p3_5", "p3_4";
+ function = "spifi";
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <1>;
+ bias-disable;
+ };
+
+ spifi_cs_cfg {
+ pins = "p3_8";
+ function = "spifi";
+ bias-disable;
+ };
+ };
+
+ ssp1_pins: ssp1-pins {
+ ssp1_sck_cfg {
+ pins = "pf_4";
+ function = "ssp1";
+ slew-rate = <1>;
+ bias-pull-down;
+ };
+
+ ssp1_miso_cfg {
+ pins = "pf_6";
+ function = "ssp1";
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <1>;
+ bias-pull-down;
+ };
+
+ ssp1_mosi_cfg {
+ pins = "pf_7";
+ function = "ssp1";
+ slew-rate = <1>;
+ bias-pull-down;
+ };
+
+ ssp1_ssel_cfg {
+ pins = "pf_5";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ uart0_rxd_cfg {
+ pins = "pf_11";
+ function = "uart0";
+ input-enable;
+ input-schmitt-disable;
+ bias-disable;
+ };
+
+ uart0_clk_dir_txd_cfg {
+ pins = "pf_8", "pf_9", "pf_10";
+ function = "uart0";
+ bias-pull-down;
+ };
+ };
+
+ uart1_pins: uart1-pins {
+ uart1_rxd_cfg {
+ pins = "pc_14";
+ function = "uart1";
+ bias-disable;
+ input-enable;
+ input-schmitt-disable;
+ };
+
+ uart1_dtr_txd_cfg {
+ pins = "pc_12", "pc_13";
+ function = "uart1";
+ bias-pull-down;
+ };
+ };
+
+ uart2_pins: uart2-pins {
+ uart2_rxd_cfg {
+ pins = "pa_2";
+ function = "uart2";
+ bias-disable;
+ input-enable;
+ input-schmitt-disable;
+ };
+
+ uart2_txd_cfg {
+ pins = "pa_1";
+ function = "uart2";
+ bias-pull-down;
+ };
+ };
+
+ uart3_pins: uart3-pins {
+ uart3_rx_cfg {
+ pins = "p2_4";
+ function = "uart3";
+ bias-disable;
+ input-enable;
+ input-schmitt-disable;
+ };
+
+ uart3_tx_cfg {
+ pins = "p2_3";
+ function = "uart3";
+ bias-pull-down;
+ };
+ };
+
+ usb0_pins: usb0-pins {
+ usb0_pwr_enable_cfg {
+ pins = "p6_3";
+ function = "usb0";
+ };
+
+ usb0_pwr_fault_cfg {
+ pins = "p8_0";
+ function = "usb0";
+ bias-disable;
+ input-enable;
+ };
+ };
+};
+
+&adc1 {
+ status = "okay";
+ vref-supply = <&vcc>;
+};
+
+&can0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins>;
+};
+
+/* Pin conflict with EMC, muxed by JP5 and JP6 */
+&can1 {
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1_pins>;
+};
+
+&emc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&emc_pins>;
+
+ cs0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ mpmc,cs = <0>;
+ mpmc,memory-width = <16>;
+ mpmc,byte-lane-low;
+ mpmc,write-enable-delay = <0>;
+ mpmc,output-enable-delay = <0>;
+ mpmc,read-access-delay = <70>;
+ mpmc,page-mode-read-delay = <70>;
+
+ /* SST/Microchip SST39VF1601 */
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x400000>;
+ bank-width = <2>;
+ };
+ };
+};
+
+&enet_tx_clk {
+ clock-frequency = <50000000>;
+};
+
+&i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ clock-frequency = <400000>;
+};
+
+&i2c1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ clock-frequency = <400000>;
+
+ sensor@49 {
+ compatible = "lm75";
+ reg = <0x49>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c512";
+ reg = <0x50>;
+ };
+};
+
+&lcdc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_pins>;
+
+ max-memory-bandwidth = <92240000>;
+
+ port {
+ lcdc_output: endpoint {
+ remote-endpoint = <&panel_input>;
+ arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+ };
+ };
+};
+
+&mac {
+ status = "okay";
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&enet_rmii_pins>;
+ phy-handle = <&phy1>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+};
+
+&mmcsd {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_pins>;
+ bus-width = <4>;
+ vmmc-supply = <&vmmc>;
+};
+
+/* Pin conflict with SSP0, the latter is routed to J17 pin header */
+&spifi {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spifi_pins>;
+
+ /* Atmel AT25DF321A */
+ flash {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <51000000>;
+ spi-cpol;
+ spi-cpha;
+ };
+};
+
+&ssp1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ssp1_pins>;
+ num-cs = <1>;
+ cs-gpios = <&gpio LPC_GPIO(7,19) GPIO_ACTIVE_LOW>;
+};
+
+/* Routed to J17 pin header */
+&uart0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+};
+
+/* RS485 */
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+};
+
+/* Routed to J17 pin header */
+&uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+};
+
+&uart3 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+};
+
+&usb0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins>;
+};
diff --git a/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts b/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts
index 6a83f30029ea..ba1ddd93b8f8 100644
--- a/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts
+++ b/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts
@@ -18,6 +18,7 @@
/ {
model = "Moxa UC-8410A";
+ compatible = "fsl,ls1021a-moxa-uc-8410a", "fsl,ls1021a";
aliases {
enet0_rgmii_phy = &rgmii_phy0;
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 923a25760516..ca60730dda40 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -51,6 +51,7 @@
/ {
model = "LS1021A QDS Board";
+ compatible = "fsl,ls1021a-qds", "fsl,ls1021a";
aliases {
enet0_rgmii_phy = &rgmii_phy1;
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index 8b48c3c7cd21..97e1fb7ea932 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -51,6 +51,7 @@
/ {
model = "LS1021A TWR Board";
+ compatible = "fsl,ls1021a-twr", "fsl,ls1021a";
aliases {
enet2_rgmii_phy = &rgmii_phy1;
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index ed0941292172..b4f2723ecd86 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -45,11 +45,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton64.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
compatible = "fsl,ls1021a";
interrupt-parent = <&gic>;
@@ -88,6 +89,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x0>;
+ };
+
sysclk: sysclk {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -125,6 +131,13 @@
interrupt-parent = <&gic>;
ranges;
+ ddr: memory-controller@1080000 {
+ compatible = "fsl,qoriq-memory-controller";
+ reg = <0x0 0x1080000 0x0 0x1000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ big-endian;
+ };
+
gic: interrupt-controller@1400000 {
compatible = "arm,gic-400", "arm,cortex-a7-gic";
#interrupt-cells = <3>;
@@ -706,6 +719,7 @@
fsl,tmr-fiper1 = <999999995>;
fsl,tmr-fiper2 = <99990>;
fsl,max-adj = <499999999>;
+ fsl,extts-fifo;
};
enet0: ethernet@2d10000 {
@@ -811,6 +825,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
pcie@3400000 {
@@ -824,6 +839,7 @@
#size-cells = <2>;
device_type = "pci";
num-lanes = <4>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -848,6 +864,7 @@
#size-cells = <2>;
device_type = "pci";
num-lanes = <4>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index e4645f612712..6f54a8897574 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -47,9 +47,10 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-/include/ "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupt-parent = <&gic>;
L2: l2-cache-controller@c4200000 {
@@ -72,6 +73,13 @@
#size-cells = <1>;
ranges = <0x0 0xc1100000 0x200000>;
+ hhi: system-controller@4000 {
+ compatible = "amlogic,meson-hhi-sysctrl",
+ "simple-mfd",
+ "syscon";
+ reg = <0x4000 0x400>;
+ };
+
assist: assist@7c00 {
compatible = "amlogic,meson-mx-assist", "syscon";
reg = <0x7c00 0x200>;
@@ -274,7 +282,7 @@
compatible = "amlogic,meson6-dwmac", "snps,dwmac";
reg = <0xc9410000 0x10000
0xc1108108 0x4>;
- interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/meson6-atv1200.dts b/arch/arm/boot/dts/meson6-atv1200.dts
index fc48cff71ddf..997e69c5963e 100644
--- a/arch/arm/boot/dts/meson6-atv1200.dts
+++ b/arch/arm/boot/dts/meson6-atv1200.dts
@@ -61,6 +61,7 @@
};
memory {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
};
diff --git a/arch/arm/boot/dts/meson6.dtsi b/arch/arm/boot/dts/meson6.dtsi
index ca978ab952cd..65585255910a 100644
--- a/arch/arm/boot/dts/meson6.dtsi
+++ b/arch/arm/boot/dts/meson6.dtsi
@@ -70,6 +70,14 @@
};
};
+ apb2: bus@d0000000 {
+ compatible = "simple-bus";
+ reg = <0xd0000000 0x40000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xd0000000 0x40000>;
+ };
+
xtal: xtal-clk {
compatible = "fixed-clock";
clock-frequency = <24000000>;
diff --git a/arch/arm/boot/dts/meson8-minix-neo-x8.dts b/arch/arm/boot/dts/meson8-minix-neo-x8.dts
index 55fb090a40ef..8686abd5de7f 100644
--- a/arch/arm/boot/dts/meson8-minix-neo-x8.dts
+++ b/arch/arm/boot/dts/meson8-minix-neo-x8.dts
@@ -57,6 +57,7 @@
};
memory {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index e5cd325d7ea8..a9781243453e 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -166,6 +166,32 @@
};
};
+ gpu_opp_table: gpu-opp-table {
+ compatible = "operating-points-v2";
+
+ opp-182150000 {
+ opp-hz = /bits/ 64 <182150000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-318750000 {
+ opp-hz = /bits/ 64 <318750000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-425000000 {
+ opp-hz = /bits/ 64 <425000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-510000000 {
+ opp-hz = /bits/ 64 <510000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-637500000 {
+ opp-hz = /bits/ 64 <637500000>;
+ opp-microvolt = <1150000>;
+ turbo-mode;
+ };
+ };
+
pmu {
compatible = "arm,cortex-a9-pmu";
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
@@ -201,6 +227,46 @@
no-map;
};
};
+
+ apb: bus@d0000000 {
+ compatible = "simple-bus";
+ reg = <0xd0000000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xd0000000 0x200000>;
+
+ mali: gpu@c0000 {
+ compatible = "amlogic,meson8-mali", "arm,mali-450";
+ reg = <0xc0000 0x40000>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp", "gpmmu", "pp", "pmu",
+ "pp0", "ppmmu0", "pp1", "ppmmu1",
+ "pp2", "ppmmu2", "pp4", "ppmmu4",
+ "pp5", "ppmmu5", "pp6", "ppmmu6";
+ resets = <&reset RESET_MALI>;
+ clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
+ clock-names = "bus", "core";
+ operating-points-v2 = <&gpu_opp_table>;
+ switch-delay = <0xffff>;
+ };
+ };
}; /* end of / */
&aobus {
@@ -261,13 +327,6 @@
};
&cbus {
- clkc: clock-controller@4000 {
- #clock-cells = <1>;
- #reset-cells = <1>;
- compatible = "amlogic,meson8-clkc";
- reg = <0x8000 0x4>, <0x4000 0x400>;
- };
-
reset: reset-controller@4404 {
compatible = "amlogic,meson8b-reset";
reg = <0x4404 0x9c>;
@@ -390,6 +449,11 @@
compatible = "amlogic,meson8-efuse";
clocks = <&clkc CLKID_EFUSE>;
clock-names = "core";
+
+ temperature_calib: calib@1f4 {
+ /* only the upper two bytes are relevant */
+ reg = <0x1f4 0x4>;
+ };
};
&ethmac {
@@ -402,6 +466,14 @@
status = "okay";
};
+&hhi {
+ clkc: clock-controller {
+ compatible = "amlogic,meson8-clkc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+};
+
&hwrng {
compatible = "amlogic,meson8-rng", "amlogic,meson-rng";
clocks = <&clkc CLKID_RNG0>;
@@ -469,6 +541,9 @@
clocks = <&clkc CLKID_XTAL>,
<&clkc CLKID_SAR_ADC>;
clock-names = "clkin", "core";
+ amlogic,hhi-sysctrl = <&hhi>;
+ nvmem-cells = <&temperature_calib>;
+ nvmem-cell-names = "temperature_calib";
};
&sdio {
diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
index 0872f6e3abf5..3ca9638fad09 100644
--- a/arch/arm/boot/dts/meson8b-ec100.dts
+++ b/arch/arm/boot/dts/meson8b-ec100.dts
@@ -23,6 +23,7 @@
};
memory {
+ device_type = "memory";
reg = <0x40000000 0x40000000>;
};
@@ -64,6 +65,11 @@
timeout-ms = <20000>;
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&saradc 8>;
+ };
+
leds {
compatible = "gpio-leds";
@@ -83,6 +89,9 @@
};
usb_vbus: regulator-usb-vbus {
+ /*
+ * Silergy SY6288CCAC-GP 2A Power Distribution Switch.
+ */
compatible = "regulator-fixed";
regulator-name = "USB_VBUS";
@@ -90,11 +99,20 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v>;
+
+ /*
+ * signal name from the schematics: USB_PWR_EN
+ */
gpio = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
vcc_5v: regulator-vcc5v {
+ /*
+ * supplied by the main power input which called PWR_5V_STB
+ * in the schematics
+ */
compatible = "regulator-fixed";
regulator-name = "VCC5V";
@@ -102,6 +120,9 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
+ /*
+ * signal name from the schematics: 3V3_5V_EN
+ */
gpio = <&gpio GPIODV_29 GPIO_ACTIVE_LOW>;
regulator-boot-on;
@@ -109,12 +130,18 @@
};
vcck: regulator-vcck {
+ /*
+ * Silergy SY8089AAC-GP 2A continuous, 3A peak, 1MHz
+ * Synchronous Step Down Regulator.
+ */
compatible = "pwm-regulator";
regulator-name = "VCCK";
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
+ vin-supply = <&vcc_5v>;
+
pwms = <&pwm_cd 0 1148 0>;
pwm-dutycycle-range = <100 0>;
@@ -123,19 +150,66 @@
};
vcc_1v8: regulator-vcc1v8 {
+ /*
+ * ABLIC S-1339D18-M5001-GP
+ */
compatible = "regulator-fixed";
regulator-name = "VCC1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+
+ vin-supply = <&vcc_3v3>;
};
vcc_3v3: regulator-vcc3v3 {
+ /*
+ * Silergy SY8089AAC-GP 2A continuous, 3A peak, 1MHz
+ * Synchronous Step Down Regulator. Also called
+ * VDDIO_AO3.3V in the schematics.
+ */
compatible = "regulator-fixed";
regulator-name = "VCC3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+
+ vin-supply = <&vcc_5v>;
+ };
+
+ vcc_ddr3: regulator-vcc-ddr3 {
+ /*
+ * Silergy SY8089AAC-GP 2A continuous, 3A peak, 1MHz
+ * Synchronous Step Down Regulator. Also called
+ * DDR3_1.5V in the schematics.
+ */
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC_DDR3_1V5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+
+ vin-supply = <&vcc_5v>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vcc_rtc: regulator-vcc-rtc {
+ /*
+ * Global Mixed-mode Technology Inc. G918T12U-GP
+ */
+ compatible = "regulator-fixed";
+
+ regulator-name = "VCC_RTC";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ /*
+ * When the board is powered then the input is VCC3V3,
+ * otherwise power is taken from the coin cell battery.
+ */
+ vin-supply = <&vcc_3v3>;
};
};
@@ -164,6 +238,10 @@
eth_phy0: ethernet-phy@0 {
/* IC Plus IP101A/G (0x02430c54) */
reg = <0>;
+ icplus,select-interrupt;
+ interrupt-parent = <&gpio_intc>;
+ /* GPIOH_3 */
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
};
};
};
@@ -205,13 +283,62 @@
cap-sd-highspeed;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};
};
+&gpio_ao {
+ gpio-line-names = "Linux_TX", "Linux_RX",
+ "SLP_S5_N", "USB2_OC_FLAG#",
+ "HUB_RST", "USB_PWR_EN",
+ "I2S_IN", "SLP_S1_N",
+ "TCK", "TMS", "TDI", "TDO",
+ "HDMI_CEC", "5640_IRQ",
+ "MUTE", "S805_TEST#";
+};
+
+&gpio {
+ gpio-line-names = /* Bank GPIOX */
+ "WIFI_SD_D0", "WIFI_SD_D1", "WIFI_SD_D2",
+ "WIFI_SD_D3", "BTPCM_DOUT", "BTPCM_DIN",
+ "BTPCM_SYNC", "BTPCM_CLK", "WIFI_SD_CLK",
+ "WIFI_SD_CMD", "WIFI_32K", "WIFI_PWREN",
+ "UART_B_TX", "UART_B_RX", "UART_B_CTS_N",
+ "UART_B_RTS_N", "BT_EN", "WIFI_WAKE_HOST",
+ /* Bank GPIOY */
+ "", "", "", "", "", "", "", "", "", "",
+ "", "",
+ /* Bank GPIODV */
+ "VCCK_PWM_C", "I2C_SDA_A", "I2C_SCL_A",
+ "I2C_SDA_B", "I2C_SCL_B", "VDDEE_PWM_D",
+ "VDDEE_PWM 3V3_5V_EN",
+ /* Bank GPIOH */
+ "HDMI_HPD", "HDMI_I2C_SDA", "HDMI_I2C_SCL",
+ "RMII_IRQ", "RMII_RST#", "RMII_TXD1",
+ "RMII_TXD0", "AV_select_1", "AV_select_2",
+ "MCU_Control_S",
+ /* Bank CARD */
+ "SD_D1_B", "SD_D0_B", "SD_CLK_8726MX",
+ "SD_CMD_8726MX", "SD_D3_B", "SD_D2_B",
+ "CARD_EN_DET (CARD_DET)",
+ /* Bank BOOT */
+ "NAND_D0 (EMMC)", "NAND_D1 (EMMC)",
+ "NAND_D2 (EMMC)", "NAND_D3 (EMMC)",
+ "NAND_D4 (EMMC)", "NAND_D5 (EMMC)",
+ "NAND_D6 (EMMC)", "NAND_D7 (EMMC)",
+ "NAND_CS1 (EMMC)", "NAND_CS2 iNAND_RS1 (EMMC)",
+ "NAND_nR/B iNAND_CMD (EMMC)", "NAND_ALE (EMMC)",
+ "NAND_CLE (EMMC)", "nRE_S1 NAND_nRE (EMMC)",
+ "nWE_S1 NAND_nWE (EMMC)", "", "", "SPI_CS",
+ /* Bank DIF */
+ "RMII_RXD1", "RMII_RXD0", "RMII_CRS_DV",
+ "RMII_50M_IN", "GPIODIF_4", "GPIODIF_5",
+ "RMII_TXEN", "CPUETH_25MOUT", "RMII_MDC",
+ "RMII_MDIO";
+};
+
&pwm_cd {
status = "okay";
pinctrl-0 = <&pwm_c1_pins>;
diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts
index 5c9b76af8d42..08ddd7fb0bf8 100644
--- a/arch/arm/boot/dts/meson8b-mxq.dts
+++ b/arch/arm/boot/dts/meson8b-mxq.dts
@@ -60,6 +60,7 @@
};
memory {
+ device_type = "memory";
reg = <0x40000000 0x40000000>;
};
};
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index 58669abda259..3b0e0f8fbc23 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -62,6 +62,7 @@
};
memory {
+ device_type = "memory";
reg = <0x40000000 0x40000000>;
};
@@ -118,6 +119,11 @@
1800000 1>;
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&saradc 8>;
+ };
+
vcc_1v8: regulator-vcc-1v8 {
/*
* RICHTEK RT9179 configured for a fixed output voltage of
@@ -221,7 +227,6 @@
/* Realtek RTL8211F (0x001cc916) */
eth_phy: ethernet-phy@0 {
reg = <0>;
- eee-broken-1000t;
interrupt-parent = <&gpio_intc>;
/* GPIOH_3 */
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -273,8 +278,7 @@
cap-sd-highspeed;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&tflash_vdd>;
vqmmc-supply = <&tf_io>;
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 22d775460767..fe84a8c3ce81 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -158,6 +158,32 @@
};
};
+ gpu_opp_table: gpu-opp-table {
+ compatible = "operating-points-v2";
+
+ opp-255000000 {
+ opp-hz = /bits/ 64 <255000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-364300000 {
+ opp-hz = /bits/ 64 <364300000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-425000000 {
+ opp-hz = /bits/ 64 <425000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-510000000 {
+ opp-hz = /bits/ 64 <510000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-637500000 {
+ opp-hz = /bits/ 64 <637500000>;
+ opp-microvolt = <1150000>;
+ turbo-mode;
+ };
+ };
+
pmu {
compatible = "arm,cortex-a5-pmu";
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
@@ -178,6 +204,34 @@
no-map;
};
};
+
+ apb: bus@d0000000 {
+ compatible = "simple-bus";
+ reg = <0xd0000000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xd0000000 0x200000>;
+
+ mali: gpu@c0000 {
+ compatible = "amlogic,meson8b-mali", "arm,mali-450";
+ reg = <0xc0000 0x40000>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp", "gpmmu", "pp", "pmu",
+ "pp0", "ppmmu0", "pp1", "ppmmu1";
+ resets = <&reset RESET_MALI>;
+ clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
+ clock-names = "bus", "core";
+ operating-points-v2 = <&gpu_opp_table>;
+ switch-delay = <0xffff>;
+ };
+ };
}; /* end of / */
&aobus {
@@ -222,13 +276,6 @@
};
&cbus {
- clkc: clock-controller@4000 {
- #clock-cells = <1>;
- #reset-cells = <1>;
- compatible = "amlogic,meson8b-clkc";
- reg = <0x8000 0x4>, <0x4000 0x400>;
- };
-
reset: reset-controller@4404 {
compatible = "amlogic,meson8b-reset";
reg = <0x4404 0x9c>;
@@ -270,9 +317,7 @@
groups = "eth_tx_clk",
"eth_tx_en",
"eth_txd1_0",
- "eth_txd1_1",
"eth_txd0_0",
- "eth_txd0_1",
"eth_rx_clk",
"eth_rx_dv",
"eth_rxd1",
@@ -281,7 +326,9 @@
"eth_mdc",
"eth_ref_clk",
"eth_txd2",
- "eth_txd3";
+ "eth_txd3",
+ "eth_rxd3",
+ "eth_rxd2";
function = "ethernet";
bias-disable;
};
@@ -360,6 +407,11 @@
compatible = "amlogic,meson8b-efuse";
clocks = <&clkc CLKID_EFUSE>;
clock-names = "core";
+
+ temperature_calib: calib@1f4 {
+ /* only the upper two bytes are relevant */
+ reg = <0x1f4 0x4>;
+ };
};
&ethmac {
@@ -383,6 +435,14 @@
status = "okay";
};
+&hhi {
+ clkc: clock-controller {
+ compatible = "amlogic,meson8-clkc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+};
+
&hwrng {
compatible = "amlogic,meson8b-rng", "amlogic,meson-rng";
clocks = <&clkc CLKID_RNG0>;
@@ -450,6 +510,9 @@
clocks = <&clkc CLKID_XTAL>,
<&clkc CLKID_SAR_ADC>;
clock-names = "clkin", "core";
+ amlogic,hhi-sysctrl = <&hhi>;
+ nvmem-cells = <&temperature_calib>;
+ nvmem-cell-names = "temperature_calib";
};
&sdio {
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
index f5853610b20b..29d830ae4bf4 100644
--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
@@ -28,6 +28,7 @@
};
memory {
+ device_type = "memory";
reg = <0x40000000 0x80000000>;
};
@@ -44,6 +45,11 @@
};
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&saradc 8>;
+ };
+
vcc_3v3: regulator-vcc3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC3V3";
@@ -206,8 +212,7 @@
cap-sd-highspeed;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};
diff --git a/arch/arm/boot/dts/meson8m2.dtsi b/arch/arm/boot/dts/meson8m2.dtsi
index d1a28c2adac5..bb87b251e16d 100644
--- a/arch/arm/boot/dts/meson8m2.dtsi
+++ b/arch/arm/boot/dts/meson8m2.dtsi
@@ -50,6 +50,10 @@
};
};
+&saradc {
+ compatible = "amlogic,meson8m2-saradc", "amlogic,meson-saradc";
+};
+
&wdt {
compatible = "amlogic,meson8m2-wdt", "amlogic,meson8b-wdt";
};
diff --git a/arch/arm/boot/dts/milbeaut-m10v-evb.dts b/arch/arm/boot/dts/milbeaut-m10v-evb.dts
new file mode 100644
index 000000000000..614f60c6b0a2
--- /dev/null
+++ b/arch/arm/boot/dts/milbeaut-m10v-evb.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Socionext Milbeaut M10V Evaluation Board */
+/dts-v1/;
+#include "milbeaut-m10v.dtsi"
+
+/ {
+ model = "Socionext M10V EVB";
+ compatible = "socionext,milbeaut-m10v-evb", "socionext,sc2000a";
+
+ aliases {
+ serial0 = &uart1;
+ };
+
+ chosen {
+ bootargs = "rootwait earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ uclk40xi: uclk40xi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <40000000>;
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x40000000 0x80000000>;
+ };
+
+};
diff --git a/arch/arm/boot/dts/milbeaut-m10v.dtsi b/arch/arm/boot/dts/milbeaut-m10v.dtsi
new file mode 100644
index 000000000000..aa7c6caeb750
--- /dev/null
+++ b/arch/arm/boot/dts/milbeaut-m10v.dtsi
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "socionext,sc2000a";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "socionext,milbeaut-m10v-smp";
+ cpu@f00 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0xf00>;
+ };
+ cpu@f01 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0xf01>;
+ };
+ cpu@f02 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0xf02>;
+ };
+ cpu@f03 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0xf03>;
+ };
+ };
+
+ timer { /* The Generic Timer */
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ clock-frequency = <40000000>;
+ always-on;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-parent = <&gic>;
+
+ gic: interrupt-controller@1d000000 {
+ compatible = "arm,cortex-a7-gic";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x1d001000 0x1000>,
+ <0x1d002000 0x1000>; /* CPU I/f base and size */
+ };
+
+ timer@1e000050 { /* 32-bit Reload Timers */
+ compatible = "socionext,milbeaut-timer";
+ reg = <0x1e000050 0x20>;
+ interrupts = <0 91 4>;
+ };
+
+ uart1: serial@1e700010 { /* PE4, PE5 */
+ /* Enable this as ttyUSI0 */
+ compatible = "socionext,milbeaut-usio-uart";
+ reg = <0x1e700010 0x10>;
+ interrupts = <0 141 0x4>, <0 149 0x4>;
+ interrupt-names = "rx", "tx";
+ };
+
+ };
+
+ sram@0 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x10000>;
+ smp-sram@f100 {
+ compatible = "socionext,milbeaut-smp-sram";
+ reg = <0xf100 0x20>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
index 350208c5e1ed..3da038ba5733 100644
--- a/arch/arm/boot/dts/mmp2-brownstone.dts
+++ b/arch/arm/boot/dts/mmp2-brownstone.dts
@@ -19,6 +19,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index ee03e0846740..f02fb97f515c 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -7,10 +7,12 @@
* publishhed by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/marvell,mmp2.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
aliases {
serial0 = &uart1;
serial1 = &uart2;
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index ddc7a7bb33c0..f57acf8f66b9 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -105,7 +105,7 @@
interrupts-extended = <
&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
- &cpcap 48 1
+ &cpcap 48 0
>;
interrupt-names =
"id_ground", "id_float", "se0conn", "vbusvld",
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index da7b3237bfe9..cbf17656bcc7 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -5,10 +5,11 @@
* Licensed under GPLv2 or later.
*/
-/include/ "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/irq.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "moxa,moxart";
model = "MOXART";
interrupt-parent = <&intc>;
diff --git a/arch/arm/boot/dts/mps2.dtsi b/arch/arm/boot/dts/mps2.dtsi
index 23467390558d..96fb5a5cf4d3 100644
--- a/arch/arm/boot/dts/mps2.dtsi
+++ b/arch/arm/boot/dts/mps2.dtsi
@@ -171,7 +171,7 @@
uart0: serial@4000 {
compatible = "arm,mps2-uart";
reg = <0x4000 0x1000>;
- interrupts = <0 1 12>;
+ interrupts = <0>, <1>, <12>;
clocks = <&sysclk>;
status = "disabled";
};
@@ -179,7 +179,7 @@
uart1: serial@5000 {
compatible = "arm,mps2-uart";
reg = <0x5000 0x1000>;
- interrupts = <2 3 12>;
+ interrupts = <2>, <3>, <12>;
clocks = <&sysclk>;
status = "disabled";
};
@@ -187,7 +187,7 @@
uart2: serial@6000 {
compatible = "arm,mps2-uart";
reg = <0x6000 0x1000>;
- interrupts = <4 5 12>;
+ interrupts = <4>, <5>, <12>;
clocks = <&sysclk>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/mt2701-evb.dts b/arch/arm/boot/dts/mt2701-evb.dts
index be0edb3dae6c..88f8fd22302a 100644
--- a/arch/arm/boot/dts/mt2701-evb.dts
+++ b/arch/arm/boot/dts/mt2701-evb.dts
@@ -13,6 +13,7 @@
compatible = "mediatek,mt2701-evb", "mediatek,mt2701";
memory {
+ device_type = "memory";
reg = <0 0x80000000 0 0x40000000>;
};
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 180377e56ef4..51e1305c6471 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -12,10 +12,11 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <dt-bindings/reset/mt2701-resets.h>
-#include "skeleton64.dtsi"
#include "mt2701-pinfunc.h"
/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
compatible = "mediatek,mt2701";
interrupt-parent = <&cirq>;
diff --git a/arch/arm/boot/dts/mt6580-evbp1.dts b/arch/arm/boot/dts/mt6580-evbp1.dts
index ca137897ed60..755a0774a8ee 100644
--- a/arch/arm/boot/dts/mt6580-evbp1.dts
+++ b/arch/arm/boot/dts/mt6580-evbp1.dts
@@ -22,6 +22,7 @@
};
memory {
+ device_type = "memory";
reg = <0x80000000 0x20000000>;
};
};
diff --git a/arch/arm/boot/dts/mt6580.dtsi b/arch/arm/boot/dts/mt6580.dtsi
index 2bdc5ed12fca..9e17698c0609 100644
--- a/arch/arm/boot/dts/mt6580.dtsi
+++ b/arch/arm/boot/dts/mt6580.dtsi
@@ -7,7 +7,6 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
/ {
compatible = "mediatek,mt6580";
diff --git a/arch/arm/boot/dts/mt6589-aquaris5.dts b/arch/arm/boot/dts/mt6589-aquaris5.dts
index 7bbaa1279a26..1e7079a3b449 100644
--- a/arch/arm/boot/dts/mt6589-aquaris5.dts
+++ b/arch/arm/boot/dts/mt6589-aquaris5.dts
@@ -18,6 +18,7 @@
};
memory {
+ device_type = "memory";
reg = <0x80000000 0x40000000>;
};
diff --git a/arch/arm/boot/dts/mt6589.dtsi b/arch/arm/boot/dts/mt6589.dtsi
index 28df8495686a..f3ccb70c0779 100644
--- a/arch/arm/boot/dts/mt6589.dtsi
+++ b/arch/arm/boot/dts/mt6589.dtsi
@@ -7,9 +7,10 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "mediatek,mt6589";
interrupt-parent = <&sysirq>;
diff --git a/arch/arm/boot/dts/mt6592-evb.dts b/arch/arm/boot/dts/mt6592-evb.dts
index 02849f6548e3..5e00c1cca2d1 100644
--- a/arch/arm/boot/dts/mt6592-evb.dts
+++ b/arch/arm/boot/dts/mt6592-evb.dts
@@ -13,7 +13,7 @@
compatible = "mediatek,mt6592-evb", "mediatek,mt6592";
memory {
+ device_type = "memory";
reg = <0x80000000 0x40000000>;
};
};
-
diff --git a/arch/arm/boot/dts/mt6592.dtsi b/arch/arm/boot/dts/mt6592.dtsi
index 8696ac891d60..3716f8db951c 100644
--- a/arch/arm/boot/dts/mt6592.dtsi
+++ b/arch/arm/boot/dts/mt6592.dtsi
@@ -7,9 +7,10 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "mediatek,mt6592";
interrupt-parent = <&sysirq>;
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
index 98f115966391..a79f0b6c3429 100644
--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -187,17 +187,26 @@
cooling-maps {
map0 {
trip = <&cpu_passive>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
map1 {
trip = <&cpu_active>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
map2 {
trip = <&cpu_hot>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
};
diff --git a/arch/arm/boot/dts/mt8127-moose.dts b/arch/arm/boot/dts/mt8127-moose.dts
index 308829b2da86..560687af87dc 100644
--- a/arch/arm/boot/dts/mt8127-moose.dts
+++ b/arch/arm/boot/dts/mt8127-moose.dts
@@ -13,6 +13,7 @@
compatible = "mediatek,mt8127-moose", "mediatek,mt8127";
memory {
+ device_type = "memory";
reg = <0 0x80000000 0 0x40000000>;
};
};
diff --git a/arch/arm/boot/dts/mt8127.dtsi b/arch/arm/boot/dts/mt8127.dtsi
index 3adfc6f7859c..aced173c2a52 100644
--- a/arch/arm/boot/dts/mt8127.dtsi
+++ b/arch/arm/boot/dts/mt8127.dtsi
@@ -7,9 +7,10 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "skeleton64.dtsi"
/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
compatible = "mediatek,mt8127";
interrupt-parent = <&sysirq>;
diff --git a/arch/arm/boot/dts/mt8135-evbp1.dts b/arch/arm/boot/dts/mt8135-evbp1.dts
index 0ace7a40a60d..f6147fe62f41 100644
--- a/arch/arm/boot/dts/mt8135-evbp1.dts
+++ b/arch/arm/boot/dts/mt8135-evbp1.dts
@@ -13,6 +13,7 @@
compatible = "mediatek,mt8135-evbp1", "mediatek,mt8135";
memory {
+ device_type = "memory";
reg = <0 0x80000000 0 0x40000000>;
};
};
diff --git a/arch/arm/boot/dts/mt8135.dtsi b/arch/arm/boot/dts/mt8135.dtsi
index 688069dc1533..0e4e835026db 100644
--- a/arch/arm/boot/dts/mt8135.dtsi
+++ b/arch/arm/boot/dts/mt8135.dtsi
@@ -9,10 +9,11 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/mt8135-resets.h>
-#include "skeleton64.dtsi"
#include "mt8135-pinfunc.h"
/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
compatible = "mediatek,mt8135";
interrupt-parent = <&sysirq>;
diff --git a/arch/arm/boot/dts/nspire.dtsi b/arch/arm/boot/dts/nspire.dtsi
index 1a5ae4cd107f..5a3c1f9d1832 100644
--- a/arch/arm/boot/dts/nspire.dtsi
+++ b/arch/arm/boot/dts/nspire.dtsi
@@ -9,9 +9,9 @@
*
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupt-parent = <&intc>;
cpus {
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
index 4c1227d1e79b..17c89df6ce6b 100644
--- a/arch/arm/boot/dts/omap3-evm-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -122,6 +122,7 @@
};
&mmc2 {
+ interrupts-extended = <&intc 86 &omap3_pmx_core 0x12e>;
vmmc-supply = <&wl12xx_vmmc>;
non-removable;
bus-width = <4>;
@@ -132,8 +133,10 @@
wlcore: wlcore@2 {
compatible = "ti,wl1271";
reg = <2>;
- interrupt-parent = <&gpio5>;
- interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 149 */
+ /* gpio_149 with uart1_rts pad as wakeirq */
+ interrupts-extended = <&gpio5 21 IRQ_TYPE_EDGE_RISING>,
+ <&omap3_pmx_core 0x14e>;
+ interrupt-names = "irq", "wakeup";
ref-clock-frequency = <38400000>;
};
};
diff --git a/arch/arm/boot/dts/omap3-evm-processor-common.dtsi b/arch/arm/boot/dts/omap3-evm-processor-common.dtsi
index ce7f42f9448c..b4109f48ec18 100644
--- a/arch/arm/boot/dts/omap3-evm-processor-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-processor-common.dtsi
@@ -86,6 +86,10 @@
OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
+ OMAP3_CORE1_IOPAD(0x2164, PIN_OUTPUT | MUX_MODE1) /* sdmmc2_dat4.sdmmc2_dir_dat0 */
+ OMAP3_CORE1_IOPAD(0x2166, PIN_OUTPUT | MUX_MODE1) /* sdmmc2_dat5.sdmmc2_dir_dat1 */
+ OMAP3_CORE1_IOPAD(0x2168, PIN_OUTPUT | MUX_MODE1) /* sdmmc2_dat6.sdmmc2_dir_cmd */
+ OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT | MUX_MODE1) /* sdmmc2_dat7.sdmmc2_clkin */
>;
};
@@ -127,9 +131,13 @@
>;
};
+ /*
+ * Note that gpio_150 pulled high with internal pull to prevent wlcore
+ * reset on return from off mode in idle.
+ */
wl12xx_gpio: pinmux_wl12xx_gpio {
pinctrl-single,pins = <
- OMAP3_CORE1_IOPAD(0x2180, PIN_OUTPUT | MUX_MODE4) /* uart1_cts.gpio_150 */
+ OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE7) /* uart1_cts.gpio_150 */
OMAP3_CORE1_IOPAD(0x217e, PIN_INPUT | MUX_MODE4) /* uart1_rts.gpio_149 */
>;
};
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index e53d32691308..04f2b53d4d3d 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -32,6 +32,14 @@
display1 = &tv0;
};
+ ldo_3v3: fixedregulator {
+ compatible = "regulator-fixed";
+ regulator-name = "ldo_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
/* fixed 26MHz oscillator */
hfclk_26m: oscillator {
#clock-cells = <0>;
@@ -116,6 +124,7 @@
spi-cpol;
spi-cpha;
+ backlight= <&backlight>;
label = "lcd";
port {
lcd_in: endpoint {
@@ -125,7 +134,7 @@
};
};
- backlight {
+ backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm11 0 12000000 0>;
pwm-names = "backlight";
@@ -224,6 +233,15 @@
};
};
+&omap3_pmx_wkup {
+ gpio1_pins: pinmux_gpio1_pins {
+ pinctrl-single,pins = <
+ OMAP3_WKUP_IOPAD(0x2a14, PIN_INPUT | PIN_OFF_WAKEUPENABLE | MUX_MODE4) /* sys_boot5.gpio_7 */
+ OMAP3_WKUP_IOPAD(0x2a1a, PIN_INPUT | PIN_OFF_WAKEUPENABLE | MUX_MODE4) /* sys_clkout.gpio_10 */
+ >;
+ };
+};
+
&omap3_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
@@ -312,6 +330,12 @@
>;
};
+ gps_pins: pinmux_gps_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* gpio145 */
+ >;
+ };
+
hdq_pins: hdq_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21c6, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda.hdq */
@@ -636,6 +660,11 @@
status = "disabled";
};
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio1_pins>;
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
@@ -644,6 +673,14 @@
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
+ gnss: gnss {
+ compatible = "wi2wi,w2sg0004";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gps_pins>;
+ sirf,onoff-gpios = <&gpio5 17 GPIO_ACTIVE_HIGH>;
+ lna-supply = <&vsim>;
+ vcc-supply = <&ldo_3v3>;
+ };
};
&uart3 {
@@ -714,11 +751,7 @@
vdda-supply = <&vdac>;
- #address-cells = <1>;
- #size-cells = <0>;
-
port {
- reg = <0>;
venc_out: endpoint {
remote-endpoint = <&opa_in>;
ti,channels = <1>;
diff --git a/arch/arm/boot/dts/omap3-gta04a5.dts b/arch/arm/boot/dts/omap3-gta04a5.dts
index bd232b1b24cb..223b47ac596e 100644
--- a/arch/arm/boot/dts/omap3-gta04a5.dts
+++ b/arch/arm/boot/dts/omap3-gta04a5.dts
@@ -82,7 +82,7 @@
/*
* for WL183x module see
- * http://lxr.free-electrons.com/source/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
+ * Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt
*/
&wifi_pwrseq {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 182a53991c90..826920e6b878 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -814,7 +814,7 @@
/* For debugging, it is often good idea to remove this GPIO.
It means you can remove back cover (to reboot by removing
battery) and still use the MMC card. */
- cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
+ cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
};
/* most boards use vaux3, only some old versions use vmmc2 instead */
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 0d9b85317529..e142e6c70a59 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -370,6 +370,19 @@
compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
+ /*
+ * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
+ * bootloader set values when booted with v4.19 using both N950
+ * and N9 devices (OneNAND Manufacturer: Samsung):
+ *
+ * gpmc cs0 before gpmc_cs_program_settings:
+ * cs0 GPMC_CS_CONFIG1: 0xfd001202
+ * cs0 GPMC_CS_CONFIG2: 0x00181800
+ * cs0 GPMC_CS_CONFIG3: 0x00030300
+ * cs0 GPMC_CS_CONFIG4: 0x18001804
+ * cs0 GPMC_CS_CONFIG5: 0x03171d1d
+ * cs0 GPMC_CS_CONFIG6: 0x97080000
+ */
gpmc,sync-read;
gpmc,sync-write;
gpmc,burst-length = <16>;
@@ -379,26 +392,27 @@
gpmc,device-width = <2>;
gpmc,mux-add-data = <2>;
gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <87>;
- gpmc,cs-wr-off-ns = <87>;
+ gpmc,cs-rd-off-ns = <122>;
+ gpmc,cs-wr-off-ns = <122>;
gpmc,adv-on-ns = <0>;
- gpmc,adv-rd-off-ns = <10>;
- gpmc,adv-wr-off-ns = <10>;
- gpmc,oe-on-ns = <15>;
- gpmc,oe-off-ns = <87>;
+ gpmc,adv-rd-off-ns = <15>;
+ gpmc,adv-wr-off-ns = <15>;
+ gpmc,oe-on-ns = <20>;
+ gpmc,oe-off-ns = <122>;
gpmc,we-on-ns = <0>;
- gpmc,we-off-ns = <87>;
- gpmc,rd-cycle-ns = <112>;
- gpmc,wr-cycle-ns = <112>;
- gpmc,access-ns = <81>;
+ gpmc,we-off-ns = <122>;
+ gpmc,rd-cycle-ns = <148>;
+ gpmc,wr-cycle-ns = <148>;
+ gpmc,access-ns = <117>;
gpmc,page-burst-access-ns = <15>;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,wait-monitoring-ns = <0>;
- gpmc,clk-activation-ns = <5>;
- gpmc,wr-data-mux-bus-ns = <30>;
- gpmc,wr-access-ns = <81>;
- gpmc,sync-clk-ps = <15000>;
+ gpmc,clk-activation-ns = <10>;
+ gpmc,wr-data-mux-bus-ns = <40>;
+ gpmc,wr-access-ns = <117>;
+
+ gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
/*
* MTD partition table corresponding to Nokia's MeeGo 1.2
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 04758a2a87f0..e21ec929f096 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -359,20 +359,24 @@
&mmc3 {
vmmc-supply = <&wl12xx_vmmc>;
+ /* uart2_tx.sdmmc3_dat1 pad as wakeirq */
interrupts-extended = <&wakeupgen GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core 0xde>;
-
+ interrupt-names = "irq", "wakeup";
non-removable;
bus-width = <4>;
cap-power-off-card;
+ keep-power-in-suspend;
#address-cells = <1>;
#size-cells = <0>;
wlcore: wlcore@2 {
compatible = "ti,wl1285", "ti,wl1283";
reg = <2>;
- interrupt-parent = <&gpio4>;
- interrupts = <4 IRQ_TYPE_EDGE_RISING>; /* gpio100 */
+ /* gpio_100 with gpmc_wait2 pad as wakeirq */
+ interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>,
+ <&omap4_pmx_core 0x4e>;
+ interrupt-names = "irq", "wakeup";
ref-clock-frequency = <26000000>;
tcxo-clock-frequency = <26000000>;
};
@@ -644,6 +648,17 @@
};
};
+/* Configure pwm clock source for timers 8 & 9 */
+&timer8 {
+ assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+};
+
+&timer9 {
+ assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+};
+
/*
* As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
* uart1 wakeirq.
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 27895c1604b9..926f018823a4 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -485,8 +485,10 @@
wlcore: wlcore@2 {
compatible = "ti,wl1271";
reg = <2>;
- interrupt-parent = <&gpio2>;
- interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 53 */
+ /* gpio_53 with gpmc_ncs3 pad as wakeup */
+ interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_RISING>,
+ <&omap4_pmx_core 0x3a>;
+ interrupt-names = "irq", "wakeup";
ref-clock-frequency = <38400000>;
};
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 9dc7ec7655cb..c88817bdcc56 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -26,6 +26,9 @@
};
vdd_eth: fixedregulator-vdd-eth {
+ pinctrl-names = "default";
+ pinctrl-0 = <&enet_enable_gpio>;
+
compatible = "regulator-fixed";
regulator-name = "VDD_ETH";
regulator-min-microvolt = <3300000>;
@@ -352,6 +355,29 @@
OMAP4_IOPAD(0x152, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */
>;
};
+
+ /* gpio_48 for ENET_ENABLE */
+ enet_enable_gpio: pinmux_enet_enable_gpio {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x070, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* gpmc_a24.gpio_48 */
+ >;
+ };
+
+ ks8851_pins: pinmux_ks8851_pins {
+ pinctrl-single,pins = <
+ /* ENET_INT */
+ OMAP4_IOPAD(0x054, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad10.gpio_34 */
+ /*
+ * Misterious pin which makes the ethernet working
+ * The legacy board file requested this pin on boot
+ * (ETH_KS8851_QUART) and set it to high, similarly to
+ * the ENET_ENABLE pin.
+ * We could use gpio-hog to keep it high, but let's use
+ * it as a reset GPIO for ks8851.
+ */
+ OMAP4_IOPAD(0x13a, PIN_OUTPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.gpio_138 */
+ >;
+ };
};
&i2c1 {
@@ -452,12 +478,16 @@
pinctrl-0 = <&mcspi1_pins>;
eth@0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ks8851_pins>;
+
compatible = "ks8851";
spi-max-frequency = <24000000>;
reg = <0>;
interrupt-parent = <&gpio2>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>; /* gpio line 34 */
vdd-supply = <&vdd_eth>;
+ reset-gpios = <&gpio5 10 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index bc853ebeda22..61a06f6add3c 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -317,7 +317,8 @@
palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
pinctrl-single,pins = <
- OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+ /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
>;
};
@@ -385,7 +386,8 @@
palmas: palmas@48 {
compatible = "ti,palmas";
- interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x48>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -651,7 +653,8 @@
pinctrl-names = "default";
pinctrl-0 = <&twl6040_pins>;
- interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
/* audpwron gpio defined in the board specific dts */
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5e21fb430a65..e78d3718f145 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -181,6 +181,13 @@
OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
>;
};
+
+ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+ pinctrl-single,pins = <
+ /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
+ >;
+ };
};
&omap5_pmx_core {
@@ -414,8 +421,11 @@
palmas: palmas@48 {
compatible = "ti,palmas";
- interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
reg = <0x48>;
+ pinctrl-0 = <&palmas_sys_nirq_pins>;
+ pinctrl-names = "default";
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <2>;
ti,system-power-controller;
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi
index 9c7e309d9c2c..0960348002ad 100644
--- a/arch/arm/boot/dts/omap5-l4.dtsi
+++ b/arch/arm/boot/dts/omap5-l4.dtsi
@@ -1046,8 +1046,6 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,syss-mask = <1>;
- ti,no-reset-on-init;
- ti,no-idle-on-init;
/* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */
clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>;
clock-names = "fck";
diff --git a/arch/arm/boot/dts/orion5x-lacie-d2-network.dts b/arch/arm/boot/dts/orion5x-lacie-d2-network.dts
index 8c2449da6f00..422958d13d42 100644
--- a/arch/arm/boot/dts/orion5x-lacie-d2-network.dts
+++ b/arch/arm/boot/dts/orion5x-lacie-d2-network.dts
@@ -19,6 +19,7 @@
compatible = "lacie,d2-network", "marvell,orion5x-88f5182", "marvell,orion5x";
memory {
+ device_type = "memory";
reg = <0x00000000 0x4000000>; /* 64 MB */
};
diff --git a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
index b545d0f228a5..0043e0040153 100644
--- a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
+++ b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
@@ -25,6 +25,7 @@
compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x";
memory {
+ device_type = "memory";
reg = <0x00000000 0x4000000>; /* 64 MB */
};
diff --git a/arch/arm/boot/dts/orion5x-lswsgl.dts b/arch/arm/boot/dts/orion5x-lswsgl.dts
index 0d97ded66257..2fbc17d6dfa4 100644
--- a/arch/arm/boot/dts/orion5x-lswsgl.dts
+++ b/arch/arm/boot/dts/orion5x-lswsgl.dts
@@ -55,6 +55,7 @@
compatible = "buffalo,lswsgl", "marvell,orion5x-88f5182", "marvell,orion5x";
memory {
+ device_type = "memory";
reg = <0x00000000 0x8000000>; /* 128 MB */
};
diff --git a/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts b/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts
index 0324cb54939d..0ca6208a267d 100644
--- a/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts
+++ b/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts
@@ -19,6 +19,7 @@
compatible = "maxtor,shared-storage-2", "marvell,orion5x-88f5182", "marvell,orion5x";
memory {
+ device_type = "memory";
reg = <0x00000000 0x4000000>; /* 64 MB */
};
diff --git a/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts b/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts
index 9f6ae4e1de06..ea081afa469d 100644
--- a/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts
+++ b/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts
@@ -21,6 +21,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x2000000>; /* 32 MB */
};
diff --git a/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts b/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts
index d1817af53e0b..487324f7c54e 100644
--- a/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts
+++ b/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts
@@ -16,6 +16,7 @@
compatible = "marvell,rd-88f5182-nas", "marvell,orion5x-88f5182", "marvell,orion5x";
memory {
+ device_type = "memory";
reg = <0x00000000 0x4000000>; /* 64 MB */
};
diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi
index fbccfbbab223..61e631b3fd8b 100644
--- a/arch/arm/boot/dts/orion5x.dtsi
+++ b/arch/arm/boot/dts/orion5x.dtsi
@@ -6,11 +6,11 @@
* warranty of any kind, whether express or implied.
*/
-#include "skeleton.dtsi"
-
#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Marvell Orion5x SoC";
compatible = "marvell,orion5x";
interrupt-parent = <&intc>;
diff --git a/arch/arm/boot/dts/ox810se.dtsi b/arch/arm/boot/dts/ox810se.dtsi
index c2b48a1838eb..3a26650de4eb 100644
--- a/arch/arm/boot/dts/ox810se.dtsi
+++ b/arch/arm/boot/dts/ox810se.dtsi
@@ -6,11 +6,12 @@
* Licensed under GPLv2 or later
*/
-/include/ "skeleton.dtsi"
#include <dt-bindings/clock/oxsemi,ox810se.h>
#include <dt-bindings/reset/oxsemi,ox810se.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "oxsemi,ox810se";
cpus {
@@ -25,6 +26,7 @@
};
memory {
+ device_type = "memory";
/* Max 256MB @ 0x48000000 */
reg = <0x48000000 0x10000000>;
};
diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi
index 085bbd33eadc..f3239586f38d 100644
--- a/arch/arm/boot/dts/ox820.dtsi
+++ b/arch/arm/boot/dts/ox820.dtsi
@@ -6,12 +6,13 @@
* Licensed under GPLv2 or later
*/
-/include/ "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/oxsemi,ox820.h>
#include <dt-bindings/reset/oxsemi,ox820.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "oxsemi,ox820";
cpus {
@@ -35,6 +36,7 @@
};
memory {
+ device_type = "memory";
/* Max 512MB @ 0x60000000 */
reg = <0x60000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
index a1266cf8776c..291a28f34762 100644
--- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
+++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
@@ -10,7 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-/include/ "skeleton.dtsi"
/ {
model = "Picochip picoXcell PC3X2";
compatible = "picochip,pc3x2";
diff --git a/arch/arm/boot/dts/picoxcell-pc3x3.dtsi b/arch/arm/boot/dts/picoxcell-pc3x3.dtsi
index d78cd207eca1..bf9a39ea76b0 100644
--- a/arch/arm/boot/dts/picoxcell-pc3x3.dtsi
+++ b/arch/arm/boot/dts/picoxcell-pc3x3.dtsi
@@ -10,7 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-/include/ "skeleton.dtsi"
/ {
model = "Picochip picoXcell PC3X3";
compatible = "picochip,pc3x3";
diff --git a/arch/arm/boot/dts/prima2-evb.dts b/arch/arm/boot/dts/prima2-evb.dts
index 57286b4e7b87..55594b3bbc99 100644
--- a/arch/arm/boot/dts/prima2-evb.dts
+++ b/arch/arm/boot/dts/prima2-evb.dts
@@ -15,6 +15,7 @@
compatible = "sirf,prima2", "sirf,prima2-cb";
memory {
+ device_type = "memory";
reg = <0x00000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 1ca1a9aa953f..54d4f8850e22 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -6,7 +6,6 @@
* Licensed under GPLv2 or later.
*/
-/include/ "skeleton.dtsi"
/ {
compatible = "sirf,prima2";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/pxa168.dtsi b/arch/arm/boot/dts/pxa168.dtsi
index b899e25cbb1b..7137f3550183 100644
--- a/arch/arm/boot/dts/pxa168.dtsi
+++ b/arch/arm/boot/dts/pxa168.dtsi
@@ -7,10 +7,12 @@
* publishhed by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/marvell,pxa168.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
aliases {
serial0 = &uart1;
serial1 = &uart2;
diff --git a/arch/arm/boot/dts/pxa2xx.dtsi b/arch/arm/boot/dts/pxa2xx.dtsi
index e83879d97aea..bd6bf6d9300f 100644
--- a/arch/arm/boot/dts/pxa2xx.dtsi
+++ b/arch/arm/boot/dts/pxa2xx.dtsi
@@ -6,7 +6,6 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include "dt-bindings/clock/pxa-clock.h"
#define PMGROUP(pin) #pin
@@ -29,6 +28,8 @@
}
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Marvell PXA2xx family SoC";
compatible = "marvell,pxa2xx";
interrupt-parent = <&pxairq>;
diff --git a/arch/arm/boot/dts/pxa910.dtsi b/arch/arm/boot/dts/pxa910.dtsi
index 0868f6729be1..c88553a8ee29 100644
--- a/arch/arm/boot/dts/pxa910.dtsi
+++ b/arch/arm/boot/dts/pxa910.dtsi
@@ -7,10 +7,12 @@
* publishhed by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/marvell,pxa910.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
aliases {
serial0 = &uart1;
serial1 = &uart2;
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 48c3cf427610..1374c2e52c20 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-#include "skeleton.dtsi"
#include <dt-bindings/clock/qcom,gcc-msm8960.h>
#include <dt-bindings/reset/qcom,gcc-msm8960.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
@@ -10,6 +9,8 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Qualcomm APQ8064";
compatible = "qcom,apq8064";
interrupt-parent = <&intc>;
@@ -94,6 +95,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
thermal-zones {
cpu-thermal0 {
polling-delay-passive = <250>;
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index 899f28533ed7..0a0fb147ebb9 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-#include "skeleton.dtsi"
-
#include <dt-bindings/clock/qcom,gcc-apq8084.h>
#include <dt-bindings/gpio/gpio.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Qualcomm APQ 8084";
compatible = "qcom,apq8084";
interrupt-parent = <&intc>;
@@ -87,6 +87,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
firmware {
scm {
compatible = "qcom,scm";
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 2d56008d8d6b..9e75f97770ce 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -13,12 +13,14 @@
/dts-v1/;
-#include "skeleton.dtsi"
#include <dt-bindings/clock/qcom,gcc-ipq4019.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
model = "Qualcomm Technologies, Inc. IPQ4019";
compatible = "qcom,ipq4019";
interrupt-parent = <&intc>;
@@ -133,6 +135,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
pmu {
compatible = "arm,cortex-a7-pmu";
interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
@@ -396,7 +403,7 @@
ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
0x82000000 0 0x40300000 0x40300000 0 0x400000>;
- interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index f793cd1ad6d0..16c0da97932c 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-#include "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-ipq806x.h>
#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
@@ -11,6 +10,8 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Qualcomm IPQ8064";
compatible = "qcom,ipq8064";
interrupt-parent = <&intc>;
@@ -45,6 +46,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) |
diff --git a/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi b/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
index 7869898e392d..26b034bd19d2 100644
--- a/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
@@ -50,6 +50,7 @@
compatible = "swir,wp8548", "qcom,mdm9615";
memory {
+ device_type = "memory";
reg = <0x48000000 0x7F00000>;
};
};
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index c852b69229c9..e49f67ad5dbc 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -45,8 +45,6 @@
/dts-v1/;
-/include/ "skeleton.dtsi"
-
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-mdm9615.h>
#include <dt-bindings/reset/qcom,gcc-mdm9615.h>
@@ -54,6 +52,8 @@
#include <dt-bindings/soc/qcom,gsbi.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Qualcomm MDM9615";
compatible = "qcom,mdm9615";
interrupt-parent = <&intc>;
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index 70698941f64c..993107ed1476 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -1,14 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "skeleton.dtsi"
-
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8660.h>
#include <dt-bindings/soc/qcom,gsbi.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Qualcomm MSM8660";
compatible = "qcom,msm8660";
interrupt-parent = <&intc>;
@@ -39,6 +39,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
cpu-pmu {
compatible = "qcom,scorpion-mp-pmu";
interrupts = <1 9 0x304>;
@@ -133,6 +138,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ status = "disabled";
syscon-tcsr = <&tcsr>;
@@ -140,7 +146,7 @@
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16540000 0x1000>,
<0x16500000 0x1000>;
- interrupts = <GIC_SPI 156 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI6_UART_CLK>, <&gcc GSBI6_H_CLK>;
clock-names = "core", "iface";
status = "disabled";
@@ -149,7 +155,7 @@
gsbi6_i2c: i2c@16580000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x16580000 0x1000>;
- interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI6_QUP_CLK>, <&gcc GSBI6_H_CLK>;
clock-names = "core", "iface";
#address-cells = <1>;
@@ -167,6 +173,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ status = "disabled";
syscon-tcsr = <&tcsr>;
@@ -174,7 +181,7 @@
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16640000 0x1000>,
<0x16600000 0x1000>;
- interrupts = <GIC_SPI 158 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI7_UART_CLK>, <&gcc GSBI7_H_CLK>;
clock-names = "core", "iface";
status = "disabled";
@@ -183,7 +190,7 @@
gsbi7_i2c: i2c@16680000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x16680000 0x1000>;
- interrupts = <GIC_SPI 159 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI7_QUP_CLK>, <&gcc GSBI7_H_CLK>;
clock-names = "core", "iface";
#address-cells = <1>;
@@ -207,7 +214,7 @@
gsbi8_i2c: i2c@19880000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x19880000 0x1000>;
- interrupts = <GIC_SPI 161 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI8_QUP_CLK>, <&gcc GSBI8_H_CLK>;
clock-names = "core", "iface";
#address-cells = <1>;
@@ -232,7 +239,7 @@
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x19c40000 0x1000>,
<0x19c00000 0x1000>;
- interrupts = <0 195 IRQ_TYPE_NONE>;
+ interrupts = <0 195 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI12_UART_CLK>, <&gcc GSBI12_H_CLK>;
clock-names = "core", "iface";
status = "disabled";
@@ -241,7 +248,7 @@
gsbi12_i2c: i2c@19c80000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x19c80000 0x1000>;
- interrupts = <0 196 IRQ_TYPE_NONE>;
+ interrupts = <0 196 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GSBI12_QUP_CLK>, <&gcc GSBI12_H_CLK>;
clock-names = "core", "iface";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index 1733d8f40ab1..f2aeaccdc1ad 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -1,14 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "skeleton.dtsi"
-
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8960.h>
#include <dt-bindings/mfd/qcom-rpm.h>
#include <dt-bindings/soc/qcom,gsbi.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Qualcomm MSM8960";
compatible = "qcom,msm8960";
interrupt-parent = <&intc>;
@@ -44,6 +44,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 10 0x304>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
index 51444c53fc72..b3b04736a159 100644
--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -220,6 +220,20 @@
};
};
};
+
+ vreg_wlan: wlan-regulator {
+ compatible = "regulator-fixed";
+
+ regulator-name = "wl-reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&msmgpio 26 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_regulator_pin>;
+ };
};
&soc {
@@ -242,6 +256,30 @@
};
};
+ sdhc2_pin_a: sdhc2-pin-active {
+ clk {
+ pins = "sdc2_clk";
+ drive-strength = <6>;
+ bias-disable;
+ };
+
+ cmd-data {
+ pins = "sdc2_cmd", "sdc2_data";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
+ i2c1_pins: i2c1 {
+ mux {
+ pins = "gpio2", "gpio3";
+ function = "blsp_i2c1";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
i2c3_pins: i2c3 {
mux {
pins = "gpio10", "gpio11";
@@ -283,6 +321,32 @@
pinctrl-0 = <&sdhc1_pin_a>;
};
+ sdhci@f98a4900 {
+ status = "ok";
+
+ max-frequency = <100000000>;
+ bus-width = <4>;
+ non-removable;
+ vmmc-supply = <&vreg_wlan>;
+ vqmmc-supply = <&pm8941_s3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhc2_pin_a>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bcrmf@1 {
+ compatible = "brcm,bcm4339-fmac", "brcm,bcm4329-fmac";
+ reg = <1>;
+
+ brcm,drive-strength = <10>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_sleep_clk_pin>;
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -342,6 +406,24 @@
};
};
+ i2c@f9923000 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ clock-frequency = <100000>;
+ qcom,src-freq = <50000000>;
+
+ charger: bq24192@6b {
+ compatible = "ti,bq24192";
+ reg = <0x6b>;
+ interrupts-extended = <&spmi_bus 0 0xd5 0 IRQ_TYPE_EDGE_FALLING>;
+
+ omit-battery-class;
+
+ usb_otg_vbus: usb-otg-vbus { };
+ };
+ };
+
i2c@f9925000 {
status = "ok";
pinctrl-names = "default";
@@ -359,6 +441,31 @@
amstaos,proximity-diodes = <0>;
};
};
+
+ usb@f9a55000 {
+ status = "ok";
+
+ phys = <&usb_hs1_phy>;
+ phy-select = <&tcsr 0xb000 0>;
+
+ extcon = <&charger>, <&usb_id>;
+ vbus-supply = <&usb_otg_vbus>;
+
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+
+ ulpi {
+ phy@a {
+ status = "ok";
+
+ v1p8-supply = <&pm8941_l6>;
+ v3p3-supply = <&pm8941_l24>;
+
+ qcom,init-seq = /bits/ 8 <0x1 0x64>;
+ };
+ };
+ };
};
&spmi_bus {
@@ -371,6 +478,29 @@
bias-pull-up;
power-source = <PM8941_GPIO_S3>;
};
+
+ wlan_sleep_clk_pin: wl-sleep-clk {
+ pins = "gpio16";
+ function = "func2";
+
+ output-high;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ wlan_regulator_pin: wl-reg-active {
+ pins = "gpio17";
+ function = "normal";
+
+ bias-disable;
+ power-source = <PM8941_GPIO_S3>;
+ };
+
+ otg {
+ gpio-hog;
+ gpios = <35 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "otg-gpio";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index ca266a5f021d..45b5c8ef0374 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -6,9 +6,10 @@
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/reset/qcom,gcc-msm8974.h>
#include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Qualcomm MSM8974";
compatible = "qcom,msm8974";
interrupt-parent = <&intc>;
@@ -130,6 +131,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
thermal-zones {
cpu-thermal0 {
polling-delay-passive = <250>;
@@ -706,6 +712,17 @@
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
};
+ i2c@f9923000 {
+ status = "disabled";
+ compatible = "qcom,i2c-qup-v2.1.1";
+ reg = <0xf9923000 0x1000>;
+ interrupts = <0 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
i2c@f9924000 {
status = "disabled";
compatible = "qcom,i2c-qup-v2.1.1";
diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi
index 2515c5c217ac..9a91b758f7aa 100644
--- a/arch/arm/boot/dts/qcom-pm8941.dtsi
+++ b/arch/arm/boot/dts/qcom-pm8941.dtsi
@@ -63,6 +63,7 @@
compatible = "qcom,pm8941-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
gpio-controller;
+ gpio-ranges = <&pm8941_gpios 0 0 36>;
#gpio-cells = <2>;
interrupts = <0 0xc0 0 IRQ_TYPE_NONE>,
<0 0xc1 0 IRQ_TYPE_NONE>,
diff --git a/arch/arm/boot/dts/r7s9210-rza2mevb.dts b/arch/arm/boot/dts/r7s9210-rza2mevb.dts
new file mode 100644
index 000000000000..991e09de1219
--- /dev/null
+++ b/arch/arm/boot/dts/r7s9210-rza2mevb.dts
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the RZA2MEVB board
+ *
+ * Copyright (C) 2018 Renesas Electronics
+ *
+ */
+
+/dts-v1/;
+#include "r7s9210.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/r7s9210-pinctrl.h>
+
+/ {
+ model = "RZA2MEVB";
+ compatible = "renesas,rza2mevb", "renesas,r7s9210";
+
+ aliases {
+ serial0 = &scif4;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x40000000 0x00800000>; /* HyperRAM */
+ };
+
+ lbsc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ red {
+ gpios = <&pinctrl RZA2_PIN(PORT6, 0) GPIO_ACTIVE_HIGH>;
+ };
+ green {
+ gpios = <&pinctrl RZA2_PIN(PORTC, 1) GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+/* EXTAL */
+&extal_clk {
+ clock-frequency = <24000000>; /* 24MHz */
+};
+
+/* RTC_X1 */
+&rtc_x1_clk {
+ clock-frequency = <32768>;
+};
+
+&pinctrl {
+ /* Serial Console */
+ scif4_pins: serial4 {
+ pinmux = <RZA2_PINMUX(PORT9, 0, 4)>, /* TxD4 */
+ <RZA2_PINMUX(PORT9, 1, 4)>; /* RxD4 */
+ };
+};
+
+/* High resolution System tick timers */
+&ostm0 {
+ status = "okay";
+};
+
+&ostm1 {
+ status = "okay";
+};
+
+/* Serial Console */
+&scif4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&scif4_pins>;
+
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/r7s9210.dtsi b/arch/arm/boot/dts/r7s9210.dtsi
new file mode 100644
index 000000000000..22baa96f5974
--- /dev/null
+++ b/arch/arm/boot/dts/r7s9210.dtsi
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the R7S9210 SoC
+ *
+ * Copyright (C) 2018 Renesas Electronics Corporation
+ *
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/r7s9210-cpg-mssr.h>
+
+/ {
+ compatible = "renesas,r7s9210";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* External clocks */
+ extal_clk: extal {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ /* Value must be set by board */
+ clock-frequency = <0>;
+ };
+
+ rtc_x1_clk: rtc_x1 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ /* If clk present, value (32678) must be set by board */
+ clock-frequency = <0>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ clock-frequency = <528000000>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ L2: cache-controller@1f003000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x1f003000 0x1000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ arm,early-bresp-disable;
+ arm,full-line-zero-disable;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ scif0: serial@e8007000 {
+ compatible = "renesas,scif-r7s9210";
+ reg = <0xe8007000 0x18>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD 47>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ scif1: serial@e8007800 {
+ compatible = "renesas,scif-r7s9210";
+ reg = <0xe8007800 0x18>;
+ interrupts = <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD 46>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ scif2: serial@e8008000 {
+ compatible = "renesas,scif-r7s9210";
+ reg = <0xe8008000 0x18>;
+ interrupts = <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD 45>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ scif3: serial@e8008800 {
+ compatible = "renesas,scif-r7s9210";
+ reg = <0xe8008800 0x18>;
+ interrupts = <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD 44>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ scif4: serial@e8009000 {
+ compatible = "renesas,scif-r7s9210";
+ reg = <0xe8009000 0x18>;
+ interrupts = <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 291 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi",
+ "bri", "dri", "tei";
+ clocks = <&cpg CPG_MOD 43>;
+ clock-names = "fck";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm0: timer@e803b000 {
+ compatible = "renesas,r7s9210-ostm", "renesas,ostm";
+ reg = <0xe803b000 0x30>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 36>;
+ clock-names = "ostm0";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm1: timer@e803c000 {
+ compatible = "renesas,r7s9210-ostm", "renesas,ostm";
+ reg = <0xe803c000 0x30>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 35>;
+ clock-names = "ostm1";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ ostm2: timer@e803d000 {
+ compatible = "renesas,r7s9210-ostm", "renesas,ostm";
+ reg = <0xe803d000 0x30>;
+ interrupts = <GIC_SPI 58 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&cpg CPG_MOD 34>;
+ clock-names = "ostm2";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@e8221000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0xe8221000 0x1000>,
+ <0xe8222000 0x1000>;
+ };
+
+ cpg: clock-controller@fcfe0010 {
+ compatible = "renesas,r7s9210-cpg-mssr";
+ reg = <0xfcfe0010 0x455>;
+ clocks = <&extal_clk>;
+ clock-names = "extal";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ };
+
+ wdt: watchdog@fcfe7000 {
+ compatible = "renesas,r7s9210-wdt", "renesas,rza-wdt";
+ reg = <0xfcfe7000 0x26>;
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_CORE R7S9210_CLK_P0>;
+ };
+
+ bsid: chipid@fcfe8004 {
+ compatible = "renesas,bsid";
+ reg = <0xfcfe8004 4>;
+ };
+
+ pinctrl: pin-controller@fcffe000 {
+ compatible = "renesas,r7s9210-pinctrl";
+ reg = <0xfcffe000 0x1000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 176>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index 3cc33f7ff7fe..de981d629bdd 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -15,25 +15,6 @@
#address-cells = <2>;
#size-cells = <2>;
- aliases {
- i2c0 = &i2c0;
- i2c1 = &i2c1;
- i2c2 = &i2c2;
- i2c3 = &i2c3;
- i2c4 = &i2c4;
- i2c5 = &i2c5;
- i2c6 = &iic0;
- i2c7 = &iic1;
- i2c8 = &iic3;
- spi0 = &qspi;
- spi1 = &msiof0;
- spi2 = &msiof1;
- spi3 = &msiof2;
- vin0 = &vin0;
- vin1 = &vin1;
- vin2 = &vin2;
- };
-
/*
* The external audio clocks are configured as 0 Hz fixed frequency
* clocks by default.
@@ -154,6 +135,16 @@
#size-cells = <2>;
ranges;
+ rwdt: watchdog@e6020000 {
+ compatible = "renesas,r8a7743-wdt",
+ "renesas,rcar-gen2-wdt";
+ reg = <0 0xe6020000 0 0x0c>;
+ clocks = <&cpg CPG_MOD 402>;
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 402>;
+ status = "disabled";
+ };
+
gpio0: gpio@e6050000 {
compatible = "renesas,gpio-r8a7743",
"renesas,rcar-gen2-gpio";
@@ -310,16 +301,6 @@
reg = <0 0xe6160000 0 0x100>;
};
- rwdt: watchdog@e6020000 {
- compatible = "renesas,r8a7743-wdt",
- "renesas,rcar-gen2-wdt";
- reg = <0 0xe6020000 0 0x0c>;
- clocks = <&cpg CPG_MOD 402>;
- power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
- resets = <&cpg 402>;
- status = "disabled";
- };
-
sysc: system-controller@e6180000 {
compatible = "renesas,r8a7743-sysc";
reg = <0 0xe6180000 0 0x200>;
@@ -564,9 +545,7 @@
/* doesn't need pinmux */
#address-cells = <1>;
#size-cells = <0>;
- compatible = "renesas,iic-r8a7743",
- "renesas,rcar-gen2-iic",
- "renesas,rmobile-iic";
+ compatible = "renesas,iic-r8a7743";
reg = <0 0xe60b0000 0 0x425>;
interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 926>;
@@ -1681,15 +1660,12 @@
du: display@feb00000 {
compatible = "renesas,du-r8a7743";
- reg = <0 0xfeb00000 0 0x40000>,
- <0 0xfeb90000 0 0x1c>;
- reg-names = "du", "lvds.0";
+ reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>,
- <&cpg CPG_MOD 726>;
- clock-names = "du.0", "du.1", "lvds.0";
+ <&cpg CPG_MOD 723>;
+ clock-names = "du.0", "du.1";
status = "disabled";
ports {
@@ -1704,6 +1680,33 @@
port@1 {
reg = <1>;
du_out_lvds0: endpoint {
+ remote-endpoint = <&lvds0_in>;
+ };
+ };
+ };
+ };
+
+ lvds0: lvds@feb90000 {
+ compatible = "renesas,r8a7743-lvds";
+ reg = <0 0xfeb90000 0 0x1c>;
+ clocks = <&cpg CPG_MOD 726>;
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 726>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_in: endpoint {
+ remote-endpoint = <&du_out_lvds0>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
};
};
};
diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi
index 04148d608fc4..fa74a262107b 100644
--- a/arch/arm/boot/dts/r8a7744.dtsi
+++ b/arch/arm/boot/dts/r8a7744.dtsi
@@ -998,6 +998,54 @@
status = "disabled";
};
+ msiof0: spi@e6e20000 {
+ compatible = "renesas,msiof-r8a7744",
+ "renesas,rcar-gen2-msiof";
+ reg = <0 0xe6e20000 0 0x0064>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 000>;
+ dmas = <&dmac0 0x51>, <&dmac0 0x52>,
+ <&dmac1 0x51>, <&dmac1 0x52>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ resets = <&cpg 000>;
+ status = "disabled";
+ };
+
+ msiof1: spi@e6e10000 {
+ compatible = "renesas,msiof-r8a7744",
+ "renesas,rcar-gen2-msiof";
+ reg = <0 0xe6e10000 0 0x0064>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 208>;
+ dmas = <&dmac0 0x55>, <&dmac0 0x56>,
+ <&dmac1 0x55>, <&dmac1 0x56>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ resets = <&cpg 208>;
+ status = "disabled";
+ };
+
+ msiof2: spi@e6e00000 {
+ compatible = "renesas,msiof-r8a7744",
+ "renesas,rcar-gen2-msiof";
+ reg = <0 0xe6e00000 0 0x0064>;
+ interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 205>;
+ dmas = <&dmac0 0x41>, <&dmac0 0x42>,
+ <&dmac1 0x41>, <&dmac1 0x42>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ resets = <&cpg 205>;
+ status = "disabled";
+ };
+
pwm0: pwm@e6e30000 {
compatible = "renesas,pwm-r8a7744", "renesas,pwm-rcar";
reg = <0 0xe6e30000 0 0x8>;
@@ -1068,54 +1116,6 @@
status = "disabled";
};
- msiof0: spi@e6e20000 {
- compatible = "renesas,msiof-r8a7744",
- "renesas,rcar-gen2-msiof";
- reg = <0 0xe6e20000 0 0x0064>;
- interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 000>;
- dmas = <&dmac0 0x51>, <&dmac0 0x52>,
- <&dmac1 0x51>, <&dmac1 0x52>;
- dma-names = "tx", "rx", "tx", "rx";
- power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
- #address-cells = <1>;
- #size-cells = <0>;
- resets = <&cpg 000>;
- status = "disabled";
- };
-
- msiof1: spi@e6e10000 {
- compatible = "renesas,msiof-r8a7744",
- "renesas,rcar-gen2-msiof";
- reg = <0 0xe6e10000 0 0x0064>;
- interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 208>;
- dmas = <&dmac0 0x55>, <&dmac0 0x56>,
- <&dmac1 0x55>, <&dmac1 0x56>;
- dma-names = "tx", "rx", "tx", "rx";
- power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
- #address-cells = <1>;
- #size-cells = <0>;
- resets = <&cpg 208>;
- status = "disabled";
- };
-
- msiof2: spi@e6e00000 {
- compatible = "renesas,msiof-r8a7744",
- "renesas,rcar-gen2-msiof";
- reg = <0 0xe6e00000 0 0x0064>;
- interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 205>;
- dmas = <&dmac0 0x41>, <&dmac0 0x42>,
- <&dmac1 0x41>, <&dmac1 0x42>;
- dma-names = "tx", "rx", "tx", "rx";
- power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
- #address-cells = <1>;
- #size-cells = <0>;
- resets = <&cpg 205>;
- status = "disabled";
- };
-
can0: can@e6e80000 {
compatible = "renesas,can-r8a7744",
"renesas,rcar-gen2-can";
@@ -1589,33 +1589,6 @@
resets = <&cpg 408>;
};
- vsp@fe928000 {
- compatible = "renesas,vsp1";
- reg = <0 0xfe928000 0 0x8000>;
- interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 131>;
- power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
- resets = <&cpg 131>;
- };
-
- vsp@fe930000 {
- compatible = "renesas,vsp1";
- reg = <0 0xfe930000 0 0x8000>;
- interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 128>;
- power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
- resets = <&cpg 128>;
- };
-
- vsp@fe938000 {
- compatible = "renesas,vsp1";
- reg = <0 0xfe938000 0 0x8000>;
- interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 127>;
- power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
- resets = <&cpg 127>;
- };
-
pciec: pcie@fe000000 {
compatible = "renesas,pcie-r8a7744",
"renesas,pcie-rcar-gen2";
@@ -1644,9 +1617,42 @@
status = "disabled";
};
+ vsp@fe928000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe928000 0 0x8000>;
+ interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 131>;
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 131>;
+ };
+
+ vsp@fe930000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe930000 0 0x8000>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 128>;
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 128>;
+ };
+
+ vsp@fe938000 {
+ compatible = "renesas,vsp1";
+ reg = <0 0xfe938000 0 0x8000>;
+ interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 127>;
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 127>;
+ };
+
du: display@feb00000 {
- reg = <0 0xfeb00000 0 0x40000>,
- <0 0xfeb90000 0 0x1c>;
+ compatible = "renesas,du-r8a7744";
+ reg = <0 0xfeb00000 0 0x40000>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>;
+ clock-names = "du.0", "du.1";
+ status = "disabled";
ports {
#address-cells = <1>;
@@ -1660,10 +1666,36 @@
port@1 {
reg = <1>;
du_out_lvds0: endpoint {
+ remote-endpoint = <&lvds0_in>;
+ };
+ };
+ };
+ };
+
+ lvds0: lvds@feb90000 {
+ compatible = "renesas,r8a7744-lvds";
+ reg = <0 0xfeb90000 0 0x1c>;
+ clocks = <&cpg CPG_MOD 726>;
+ power-domains = <&sysc R8A7744_PD_ALWAYS_ON>;
+ resets = <&cpg 726>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_in: endpoint {
+ remote-endpoint = <&du_out_lvds0>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
};
};
};
- /* placeholder */
};
prr: chipid@ff000044 {
diff --git a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
index 40b7f98d6013..77d18242ef59 100644
--- a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
+++ b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
@@ -84,12 +84,30 @@
clock-frequency = <20000000>;
};
+&i2c3 {
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ rtc@51 {
+ compatible = "nxp,pcf85263";
+ reg = <0x51>;
+ };
+};
+
&pfc {
avb_pins: avb {
groups = "avb_mdio", "avb_gmii_tx_rx";
function = "avb";
};
+ i2c3_pins: i2c3 {
+ groups = "i2c3_c";
+ function = "i2c3";
+ };
+
mmc_pins_uhs: mmc_uhs {
groups = "mmc_data8", "mmc_ctrl";
function = "mmc";
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 05db0ccad7a6..10d996d2941f 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -367,6 +367,30 @@
status = "disabled";
};
+ hscif0: serial@ffe48000 {
+ compatible = "renesas,hscif-r8a7778",
+ "renesas,rcar-gen1-hscif", "renesas,hscif";
+ reg = <0xffe48000 96>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_HSCIF0>,
+ <&cpg_clocks R8A7778_CLK_S>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&cpg_clocks>;
+ status = "disabled";
+ };
+
+ hscif1: serial@ffe49000 {
+ compatible = "renesas,hscif-r8a7778",
+ "renesas,rcar-gen1-hscif", "renesas,hscif";
+ reg = <0xffe49000 96>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7778_CLK_HSCIF1>,
+ <&cpg_clocks R8A7778_CLK_S>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&cpg_clocks>;
+ status = "disabled";
+ };
+
mmcif: mmc@ffe4e000 {
compatible = "renesas,mmcif-r8a7778", "renesas,sh-mmcif";
reg = <0xffe4e000 0x100>;
@@ -535,6 +559,8 @@
<&cpg_clocks R8A7778_CLK_P>,
<&cpg_clocks R8A7778_CLK_P>,
<&cpg_clocks R8A7778_CLK_P>,
+ <&cpg_clocks R8A7778_CLK_S>,
+ <&cpg_clocks R8A7778_CLK_S>,
<&cpg_clocks R8A7778_CLK_P>,
<&cpg_clocks R8A7778_CLK_P>,
<&cpg_clocks R8A7778_CLK_P>,
@@ -551,6 +577,7 @@
R8A7778_CLK_SCIF0 R8A7778_CLK_SCIF1
R8A7778_CLK_SCIF2 R8A7778_CLK_SCIF3
R8A7778_CLK_SCIF4 R8A7778_CLK_SCIF5
+ R8A7778_CLK_HSCIF0 R8A7778_CLK_HSCIF1
R8A7778_CLK_TMU0 R8A7778_CLK_TMU1
R8A7778_CLK_TMU2 R8A7778_CLK_SSI0
R8A7778_CLK_SSI1 R8A7778_CLK_SSI2
@@ -560,6 +587,7 @@
clock-output-names =
"i2c0", "i2c1", "i2c2", "i2c3", "scif0",
"scif1", "scif2", "scif3", "scif4", "scif5",
+ "hscif0", "hscif1",
"tmu0", "tmu1", "tmu2", "ssi0", "ssi1",
"ssi2", "ssi3", "sru", "hspi";
};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 3bc133d9489c..3ff259207527 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -287,6 +287,32 @@
status = "disabled";
};
+ hscif0: serial@ffe48000 {
+ compatible = "renesas,hscif-r8a7779",
+ "renesas,rcar-gen1-hscif", "renesas,hscif";
+ reg = <0xffe48000 96>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7779_CLK_HSCIF0>,
+ <&cpg_clocks R8A7779_CLK_S>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&cpg_clocks>;
+ status = "disabled";
+ };
+
+ hscif1: serial@ffe49000 {
+ compatible = "renesas,hscif-r8a7779",
+ "renesas,rcar-gen1-hscif", "renesas,hscif";
+ reg = <0xffe49000 96>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7779_CLK_HSCIF1>,
+ <&cpg_clocks R8A7779_CLK_S>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&cpg_clocks>;
+ status = "disabled";
+ };
+
pfc: pin-controller@fffc0000 {
compatible = "renesas,pfc-r8a7779";
reg = <0xfffc0000 0x23c>;
diff --git a/arch/arm/boot/dts/r8a7790-stout.dts b/arch/arm/boot/dts/r8a7790-stout.dts
index 629da4cee1b9..7a7d3b84d1a6 100644
--- a/arch/arm/boot/dts/r8a7790-stout.dts
+++ b/arch/arm/boot/dts/r8a7790-stout.dts
@@ -94,9 +94,8 @@
status = "okay";
clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 722>,
- <&cpg CPG_MOD 726>, <&cpg CPG_MOD 725>,
<&osc1_clk>;
- clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1", "dclkin.0";
+ clock-names = "du.0", "du.1", "du.2", "dclkin.0";
ports {
port@0 {
@@ -104,11 +103,21 @@
remote-endpoint = <&adv7511_in>;
};
};
+ };
+};
+
+&lvds0 {
+ ports {
port@1 {
lvds_connector0: endpoint {
};
};
- port@2 {
+ };
+};
+
+&lvds1 {
+ ports {
+ port@1 {
lvds_connector1: endpoint {
};
};
diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts
index 0fd19f9723df..0173eb11ec28 100644
--- a/arch/arm/boot/dts/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rk3036-kylin.dts
@@ -310,7 +310,6 @@
};
&i2s {
- #sound-dai-cells = <0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index d560fc4051c5..59c90863b0e7 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -289,6 +289,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&i2s_bus>;
+ #sound-dai-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
index 1c925f20dba0..0a56a2f1bc4d 100644
--- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts
+++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
@@ -171,7 +171,6 @@
pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus4>;
bus-width = <4>;
- disable-wp;
};
&pwm3 {
diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts
index b6a8a82d219e..9d2216d71f70 100644
--- a/arch/arm/boot/dts/rk3066a-mk808.dts
+++ b/arch/arm/boot/dts/rk3066a-mk808.dts
@@ -101,7 +101,6 @@
&mmc1 {
bus-width = <4>;
- disable-wp;
non-removable;
pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus4>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts
index cd126b927ba8..949fa800582d 100644
--- a/arch/arm/boot/dts/rk3066a-rayeager.dts
+++ b/arch/arm/boot/dts/rk3066a-rayeager.dts
@@ -147,7 +147,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_rst>;
@@ -309,7 +308,6 @@
&mmc1 {
bus-width = <4>;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>;
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index 30dc8af0bdcb..653127a377fa 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -44,6 +44,11 @@
};
};
+ display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vop0_out>, <&vop1_out>;
+ };
+
sram: sram@10080000 {
compatible = "mmio-sram";
reg = <0x10080000 0x10000>;
@@ -57,6 +62,48 @@
};
};
+ vop0: vop@1010c000 {
+ compatible = "rockchip,rk3066-vop";
+ reg = <0x1010c000 0x19c>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_LCDC0>,
+ <&cru DCLK_LCDC0>,
+ <&cru HCLK_LCDC0>;
+ clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+ power-domains = <&power RK3066_PD_VIO>;
+ resets = <&cru SRST_LCDC0_AXI>,
+ <&cru SRST_LCDC0_AHB>,
+ <&cru SRST_LCDC0_DCLK>;
+ reset-names = "axi", "ahb", "dclk";
+ status = "disabled";
+
+ vop0_out: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ vop1: vop@1010e000 {
+ compatible = "rockchip,rk3066-vop";
+ reg = <0x1010e000 0x19c>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_LCDC1>,
+ <&cru DCLK_LCDC1>,
+ <&cru HCLK_LCDC1>;
+ clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+ power-domains = <&power RK3066_PD_VIO>;
+ resets = <&cru SRST_LCDC1_AXI>,
+ <&cru SRST_LCDC1_AHB>,
+ <&cru SRST_LCDC1_DCLK>;
+ reset-names = "axi", "ahb", "dclk";
+ status = "disabled";
+
+ vop1_out: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
i2s0: i2s@10118000 {
compatible = "rockchip,rk3066-i2s";
reg = <0x10118000 0x2000>;
@@ -669,6 +716,7 @@
<&cru SCLK_CIF0>,
<&cru ACLK_CIF0>,
<&cru HCLK_CIF0>,
+ <&cru HCLK_HDMI>,
<&cru ACLK_IPP>,
<&cru HCLK_IPP>,
<&cru ACLK_RGA>,
diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
index a7477a09fbe8..c8b62bbd6a4a 100644
--- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts
+++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
@@ -227,7 +227,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd>;
@@ -408,6 +407,21 @@
&i2c2 {
clock-frequency = <400000>;
status = "okay";
+
+ ft5606: touchscreen@3e {
+ compatible = "edt,edt-ft5506";
+ reg = <0x3e>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PB7 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tp_int &tp_rst>;
+ reset-gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_LOW>;
+ touchscreen-inverted-y;
+ /* hw ts resolution does not match display */
+ touchscreen-size-y = <1024>;
+ touchscreen-size-x = <768>;
+ touchscreen-swapped-x-y;
+ };
};
&i2c3 {
@@ -446,7 +460,6 @@
&mmc1 {
bus-width = <4>;
cap-sd-highspeed;
- cap-mmc-highspeed;
keep-power-in-suspend;
mmc-pwrseq = <&sdio_pwrseq>;
non-removable;
@@ -526,7 +539,7 @@
};
cif1_pdn: cif1-pdn {
- rockchip,pins = <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
cif_avdd_en: cif-avdd-en {
diff --git a/arch/arm/boot/dts/rk3188-px3-evb.dts b/arch/arm/boot/dts/rk3188-px3-evb.dts
index 9ae65c767c90..c0eaa9c5490b 100644
--- a/arch/arm/boot/dts/rk3188-px3-evb.dts
+++ b/arch/arm/boot/dts/rk3188-px3-evb.dts
@@ -62,7 +62,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_rst>;
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 4acb501dd3f8..3ed49898f4b2 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -719,7 +719,6 @@
pm_qos = <&qos_lcdc0>,
<&qos_lcdc1>,
<&qos_cif0>,
- <&qos_cif1>,
<&qos_ipp>,
<&qos_rga>;
};
diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts
index 4df7accc3ad7..350497a3ca86 100644
--- a/arch/arm/boot/dts/rk3229-evb.dts
+++ b/arch/arm/boot/dts/rk3229-evb.dts
@@ -137,7 +137,6 @@
&emmc {
cap-mmc-highspeed;
- disable-wp;
non-removable;
status = "okay";
};
diff --git a/arch/arm/boot/dts/rk3288-fennec.dts b/arch/arm/boot/dts/rk3288-fennec.dts
index b1b56dfdfdba..29af26e6d442 100644
--- a/arch/arm/boot/dts/rk3288-fennec.dts
+++ b/arch/arm/boot/dts/rk3288-fennec.dts
@@ -37,7 +37,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>;
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts
index 58ea8bed040a..3a646c5f4fcf 100644
--- a/arch/arm/boot/dts/rk3288-firefly-reload.dts
+++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts
@@ -254,7 +254,6 @@
bus-width = <4>;
cap-sd-highspeed;
cap-sdio-irq;
- disable-wp;
mmc-pwrseq = <&sdio_pwrseq>;
non-removable;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index 504ab1177aa7..fb7365b604bb 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -87,7 +87,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_pwr>, <&emmc_bus8>;
diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts
index 596435e03132..6a51940398b5 100644
--- a/arch/arm/boot/dts/rk3288-popmetal.dts
+++ b/arch/arm/boot/dts/rk3288-popmetal.dts
@@ -109,7 +109,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
non-removable;
diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
index 6a30cadad88a..5b7e1c9e92e1 100644
--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
@@ -133,7 +133,6 @@
bus-width = <4>;
cap-sd-highspeed;
cap-sdio-irq;
- disable-wp;
mmc-pwrseq = <&sdio_pwrseq>;
non-removable;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/rk3288-tinker-s.dts b/arch/arm/boot/dts/rk3288-tinker-s.dts
index 37093922b482..d97da89bcd51 100644
--- a/arch/arm/boot/dts/rk3288-tinker-s.dts
+++ b/arch/arm/boot/dts/rk3288-tinker-s.dts
@@ -15,7 +15,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>;
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index d8bf939a3aff..0bc2409f6903 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -10,6 +10,10 @@
#include "rk3288.dtsi"
/ {
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
/*
* The default coreboot on veyron devices ignores memory@0 nodes
* and would instead create another memory node.
diff --git a/arch/arm/boot/dts/rk3288-vyasa.dts b/arch/arm/boot/dts/rk3288-vyasa.dts
index 4856a9fc0aea..40b232eb5011 100644
--- a/arch/arm/boot/dts/rk3288-vyasa.dts
+++ b/arch/arm/boot/dts/rk3288-vyasa.dts
@@ -121,7 +121,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>;
diff --git a/arch/arm/boot/dts/rv1108-elgin-r1.dts b/arch/arm/boot/dts/rv1108-elgin-r1.dts
new file mode 100644
index 000000000000..1c4507b66fdd
--- /dev/null
+++ b/arch/arm/boot/dts/rv1108-elgin-r1.dts
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/*
+ * Copyright (C) 2018 O.S. Systems Software LTDA.
+ */
+
+/dts-v1/;
+
+#include "rv1108.dtsi"
+
+/ {
+ model = "Elgin RV1108 R1 board";
+ compatible = "elgin,rv1108-r1", "rockchip,rv1108";
+
+ memory@60000000 {
+ device_type = "memory";
+ reg = <0x60000000 0x08000000>;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ vcc_sys: vsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_core>;
+};
+
+&emmc {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ no-sd;
+ no-sdio;
+ non-removable;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ status = "okay";
+};
+
+&gmac {
+ clock_in_out = "output";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rmii_pins>;
+ snps,reset-gpio = <&gpio1 RK_PC1 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-rising-time-ns = <275>;
+ i2c-scl-falling-time-ns = <16>;
+ status = "okay";
+
+ rk805: pmic@18 {
+ compatible = "rockchip,rk805";
+ reg = <0x18>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB4 IRQ_TYPE_LEVEL_LOW>;
+ rockchip,system-power-controller;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc5-supply = <&vdd_buck2>;
+ vcc6-supply = <&vdd_buck2>;
+
+ regulators {
+ vdd_core: DCDC_REG1 {
+ regulator-name= "vdd_core";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vdd_buck2: DCDC_REG2 {
+ regulator-name= "vdd_buck2";
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name= "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-name= "vcc_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd_10: LDO_REG1 {
+ regulator-name= "vdd_10";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_18: LDO_REG2 {
+ regulator-name= "vcc_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd10_pmu: LDO_REG3 {
+ regulator-name= "vdd10_pmu";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+ };
+ };
+};
+
+&spi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spim1_clk &spim1_cs0 &spim1_tx &spim1_rx>;
+ status = "okay";
+
+ dh2228fv: dac@0 {
+ compatible = "rohm,dh2228fv";
+ reg = <0>;
+ spi-max-frequency = <24000000>;
+ spi-cpha;
+ spi-cpol;
+ };
+};
+
+&u2phy {
+ status = "okay";
+
+ u2phy_host: host-port {
+ status = "okay";
+ };
+
+ u2phy_otg: otg-port {
+ status = "okay";
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host_ehci {
+ status = "okay";
+};
+
+&usb_host_ohci {
+ status = "okay";
+};
+
+&usb_otg {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rv1108-evb.dts b/arch/arm/boot/dts/rv1108-evb.dts
index 203d83e3bbf5..30f3d0470ad9 100644
--- a/arch/arm/boot/dts/rv1108-evb.dts
+++ b/arch/arm/boot/dts/rv1108-evb.dts
@@ -97,8 +97,8 @@
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
- regulator-state-enabled;
- regulator-state-uv = <900000>;
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
};
};
@@ -107,7 +107,7 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <2000000>;
regulator-state-mem {
- regulator-state-disabled;
+ regulator-off-in-suspend;
};
};
@@ -116,7 +116,7 @@
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
- regulator-state-enabled;
+ regulator-on-in-suspend;
};
};
@@ -127,8 +127,8 @@
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
- regulator-state-enabled;
- regulator-state-uv = <3300000>;
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
};
};
@@ -139,7 +139,7 @@
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
- regulator-state-disabled;
+ regulator-off-in-suspend;
};
};
@@ -150,7 +150,7 @@
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
- regulator-state-disabled;
+ regulator-off-in-suspend;
};
};
@@ -161,8 +161,8 @@
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
- regulator-state-enabled;
- regulator-state-uv = <1000000>;
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
};
};
};
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index d31370ff28f4..f47ac86d2852 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -207,6 +207,7 @@
clocks = <&cru SCLK_SPI>, <&cru PCLK_SPI>;
clock-names = "spiclk", "apb_pclk";
dmas = <&pdma 8>, <&pdma 9>;
+ dma-names = "tx", "rx";
#dma-cells = <2>;
#address-cells = <1>;
#size-cells = <0>;
@@ -833,6 +834,42 @@
};
};
+ spim0 {
+ spim0_clk: spim0-clk {
+ rockchip,pins = <1 RK_PD0 RK_FUNC_2 &pcfg_pull_up>;
+ };
+
+ spim0_cs0: spim0-cs0 {
+ rockchip,pins = <1 RK_PD1 RK_FUNC_2 &pcfg_pull_up>;
+ };
+
+ spim0_tx: spim0-tx {
+ rockchip,pins = <1 RK_PD3 RK_FUNC_2 &pcfg_pull_up>;
+ };
+
+ spim0_rx: spim0-rx {
+ rockchip,pins = <1 RK_PD2 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ };
+
+ spim1 {
+ spim1_clk: spim1-clk {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ spim1_cs0: spim1-cs0 {
+ rockchip,pins = <0 RK_PA4 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ spim1_rx: spim1-rx {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ spim1_tx: spim1-tx {
+ rockchip,pins = <0 RK_PA7 RK_FUNC_1 &pcfg_pull_up>;
+ };
+ };
+
tsadc {
otp_out: otp-out {
rockchip,pins = <0 RK_PB7 RK_FUNC_1 &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/s3c2416-smdk2416.dts b/arch/arm/boot/dts/s3c2416-smdk2416.dts
index 5164386aff3a..cb371bf72f64 100644
--- a/arch/arm/boot/dts/s3c2416-smdk2416.dts
+++ b/arch/arm/boot/dts/s3c2416-smdk2416.dts
@@ -19,9 +19,12 @@
clocks {
compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
- xti: xti {
+ xti: xti@0 {
compatible = "fixed-clock";
+ reg = <0>;
clock-frequency = <12000000>;
clock-output-names = "xti";
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
index 575094ea7024..8ff70b856334 100644
--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
+++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
@@ -23,6 +23,31 @@
0x50000000 0x08000000>;
};
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mfc_left: region@43000000 {
+ compatible = "shared-dma-pool";
+ no-map;
+ reg = <0x43000000 0x2000000>;
+ };
+
+ mfc_right: region@51000000 {
+ compatible = "shared-dma-pool";
+ no-map;
+ reg = <0x51000000 0x2000000>;
+ };
+ };
+
+ vibrator_pwr: regulator-fixed-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vibrator-en";
+ enable-active-high;
+ gpio = <&gpj1 1 GPIO_ACTIVE_HIGH>;
+ };
+
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpg1 2 GPIO_ACTIVE_LOW>;
@@ -296,6 +321,22 @@
reg = <0x36>;
};
};
+
+ vibrator: pwm-vibrator {
+ compatible = "pwm-vibrator";
+ pwms = <&pwm 1 44642 0>;
+ pwm-names = "enable";
+ vcc-supply = <&vibrator_pwr>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1_out>;
+ };
+
+ poweroff: syscon-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&pmu_syscon>;
+ offset = <0x681c>; /* PS_HOLD_CONTROL */
+ value = <0x5200>;
+ };
};
&fimd {
@@ -329,6 +370,27 @@
status = "okay";
};
+&i2c2 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+ samsung,i2c-slave-addr = <0x10>;
+ status = "okay";
+
+ touchscreen@4a {
+ compatible = "atmel,maxtouch";
+ reg = <0x4a>;
+ interrupt-parent = <&gpj0>;
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_irq>;
+ reset-gpios = <&gpj1 3 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&mfc {
+ memory-region = <&mfc_left>, <&mfc_right>;
+};
+
&pinctrl0 {
wlan_bt_en: wlan-bt-en {
samsung,pins = "gpb-5";
@@ -350,6 +412,13 @@
samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
+ bt_host_wake: bt-host-wake {
+ samsung,pins = "gph2-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
tf_detect: tf-detect {
samsung,pins = "gph3-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
@@ -362,6 +431,17 @@
samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
+
+ ts_irq: ts-irq {
+ samsung,pins = "gpj0-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+};
+
+&pwm {
+ samsung,pwm-outputs = <1>;
};
&sdhci1 {
@@ -399,6 +479,16 @@
&uart0 {
status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ max-speed = <115200>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_data &uart0_fctl &bt_host_wake>;
+ shutdown-gpios = <&gpb 3 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gph2 5 GPIO_ACTIVE_HIGH>;
+ };
};
&uart1 {
diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
index ccf761b1babf..07a8d9bbe5b8 100644
--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
@@ -11,13 +11,6 @@
chosen {
stdout-path = &uart2;
- /*
- * It's hard to change those parameters in stock bootloader,
- * since it requires special hardware/cable.
- * Let's hardocde bootargs for now, till u-boot port is finished,
- * with which it should be easier.
- */
- bootargs = "root=/dev/mmcblk1p1 rw rootwait ignore_loglevel earlyprintk";
};
gpio-keys {
diff --git a/arch/arm/boot/dts/s5pv210-galaxys.dts b/arch/arm/boot/dts/s5pv210-galaxys.dts
index 842276749717..cf161bbfbacf 100644
--- a/arch/arm/boot/dts/s5pv210-galaxys.dts
+++ b/arch/arm/boot/dts/s5pv210-galaxys.dts
@@ -11,13 +11,6 @@
chosen {
stdout-path = &uart2;
- /*
- * It's hard to change those parameters in stock bootloader,
- * since it requires special hardware/cable.
- * Let's hardocde bootargs for now, till u-boot port is finished,
- * with which it should be easier.
- */
- bootargs = "root=/dev/mmcblk2p1 rw rootwait ignore_loglevel earlyprintk";
};
nand_pwrseq: nand-pwrseq {
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index 12eac8930eac..a44d5eb56bed 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -25,6 +25,8 @@
aliases {
csis0 = &csis0;
+ dmc0 = &dmc0;
+ dmc1 = &dmc1;
fimc0 = &fimc0;
fimc1 = &fimc1;
fimc2 = &fimc2;
@@ -78,7 +80,7 @@
};
};
- onenand: onenand@b0000000 {
+ onenand: onenand@b0600000 {
compatible = "samsung,s5pv210-onenand";
reg = <0xb0600000 0x2000>,
<0xb0000000 0x20000>,
@@ -511,7 +513,7 @@
};
fimd: fimd@f8000000 {
- compatible = "samsung,exynos4210-fimd";
+ compatible = "samsung,s5pv210-fimd";
interrupt-parent = <&vic2>;
reg = <0xf8000000 0x20000>;
interrupt-names = "fifo", "vsync", "lcd_sys";
@@ -521,6 +523,16 @@
status = "disabled";
};
+ dmc0: dmc@f0000000 {
+ compatible = "samsung,s5pv210-dmc";
+ reg = <0xf0000000 0x1000>;
+ };
+
+ dmc1: dmc@f1400000 {
+ compatible = "samsung,s5pv210-dmc";
+ reg = <0xf1400000 0x1000>;
+ };
+
g2d: g2d@fa000000 {
compatible = "samsung,s5pv210-g2d";
reg = <0xfa000000 0x1000>;
@@ -542,6 +554,15 @@
#dma-requests = <1>;
};
+ rotator: rotator@fa300000 {
+ compatible = "samsung,s5pv210-rotator";
+ reg = <0xfa300000 0x1000>;
+ interrupt-parent = <&vic2>;
+ interrupts = <4>;
+ clocks = <&clocks CLK_ROTATOR>;
+ clock-names = "rotator";
+ };
+
i2c1: i2c@fab00000 {
compatible = "samsung,s3c2440-i2c";
reg = <0xfab00000 0x1000>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index dc2280d9127f..d159ee42ef29 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -43,13 +43,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/dma/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/at91.h>
#include <dt-bindings/iio/adc/at91-sama5d2_adc.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel SAMA5D2 family SoC";
compatible = "atmel,sama5d2";
interrupt-parent = <&aic>;
@@ -113,6 +114,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 1408fa4a62e4..02198772eb81 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -8,7 +8,6 @@
* Licensed under GPLv2 or later.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/dma/at91.h>
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -16,6 +15,8 @@
#include <dt-bindings/clock/at91.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel SAMA5D3 family SoC";
compatible = "atmel,sama5d3", "atmel,sama5";
interrupt-parent = <&aic>;
@@ -56,6 +57,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x8000000>;
};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 2604fd07dd53..6c1e41f94549 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -43,7 +43,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/clock/at91.h>
#include <dt-bindings/dma/at91.h>
#include <dt-bindings/pinctrl/at91.h>
@@ -51,6 +50,8 @@
#include <dt-bindings/gpio/gpio.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
model = "Atmel SAMA5D4 family SoC";
compatible = "atmel,sama5d4";
interrupt-parent = <&aic>;
@@ -90,6 +91,7 @@
};
memory {
+ device_type = "memory";
reg = <0x20000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/skeleton.dtsi b/arch/arm/boot/dts/skeleton.dtsi
deleted file mode 100644
index 34eda68d9ea2..000000000000
--- a/arch/arm/boot/dts/skeleton.dtsi
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file is deprecated, and will be removed once existing users have been
- * updated. New dts{,i} files should *not* include skeleton.dtsi, and should
- * instead explicitly provide the below nodes only as required.
- *
- * Skeleton device tree; the bare minimum needed to boot; just include and
- * add a compatible value. The bootloader will typically populate the memory
- * node.
- */
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- chosen { };
- aliases { };
- memory { device_type = "memory"; reg = <0 0>; };
-};
diff --git a/arch/arm/boot/dts/skeleton64.dtsi b/arch/arm/boot/dts/skeleton64.dtsi
deleted file mode 100644
index 54e637752b9d..000000000000
--- a/arch/arm/boot/dts/skeleton64.dtsi
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Skeleton device tree in the 64 bits version; the bare minimum
- * needed to boot; just include and add a compatible value. The
- * bootloader will typically populate the memory node.
- */
-
-/ {
- #address-cells = <2>;
- #size-cells = <2>;
- chosen { };
- aliases { };
- memory { device_type = "memory"; reg = <0 0 0 0>; };
-};
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index dcb8fba3d709..ec1966480f2f 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -84,6 +84,7 @@
#dma-requests = <32>;
clocks = <&l4_main_clk>;
clock-names = "apb_pclk";
+ resets = <&rst DMA_RESET>;
};
};
@@ -100,6 +101,7 @@
reg = <0xffc00000 0x1000>;
interrupts = <0 131 4>, <0 132 4>, <0 133 4>, <0 134 4>;
clocks = <&can0_clk>;
+ resets = <&rst CAN0_RESET>;
status = "disabled";
};
@@ -108,6 +110,7 @@
reg = <0xffc01000 0x1000>;
interrupts = <0 135 4>, <0 136 4>, <0 137 4>, <0 138 4>;
clocks = <&can1_clk>;
+ resets = <&rst CAN1_RESET>;
status = "disabled";
};
@@ -585,6 +588,7 @@
compatible = "snps,dw-apb-gpio";
reg = <0xff708000 0x1000>;
clocks = <&l4_mp_clk>;
+ resets = <&rst GPIO0_RESET>;
status = "disabled";
porta: gpio-controller@0 {
@@ -605,6 +609,7 @@
compatible = "snps,dw-apb-gpio";
reg = <0xff709000 0x1000>;
clocks = <&l4_mp_clk>;
+ resets = <&rst GPIO1_RESET>;
status = "disabled";
portb: gpio-controller@0 {
@@ -625,6 +630,7 @@
compatible = "snps,dw-apb-gpio";
reg = <0xff70a000 0x1000>;
clocks = <&l4_mp_clk>;
+ resets = <&rst GPIO2_RESET>;
status = "disabled";
portc: gpio-controller@0 {
@@ -735,6 +741,7 @@
#size-cells = <0>;
clocks = <&l4_mp_clk>, <&sdmmc_clk_divided>;
clock-names = "biu", "ciu";
+ resets = <&rst SDMMC_RESET>;
status = "disabled";
};
@@ -748,6 +755,7 @@
interrupts = <0x0 0x90 0x4>;
clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
clock-names = "nand", "nand_x", "ecc";
+ resets = <&rst NAND_RESET>;
status = "disabled";
};
@@ -767,6 +775,7 @@
cdns,fifo-width = <4>;
cdns,trigger-address = <0x00000000>;
clocks = <&qspi_clk>;
+ resets = <&rst QSPI_RESET>;
status = "disabled";
};
@@ -785,6 +794,7 @@
sdr: sdr@ffc25000 {
compatible = "altr,sdr-ctl", "syscon";
reg = <0xffc25000 0x1000>;
+ resets = <&rst SDR_RESET>;
};
sdramedac {
@@ -801,6 +811,7 @@
interrupts = <0 154 4>;
num-cs = <4>;
clocks = <&spi_m_clk>;
+ resets = <&rst SPIM0_RESET>;
status = "disabled";
};
@@ -812,6 +823,7 @@
interrupts = <0 155 4>;
num-cs = <4>;
clocks = <&spi_m_clk>;
+ resets = <&rst SPIM1_RESET>;
status = "disabled";
};
@@ -878,6 +890,7 @@
dmas = <&pdma 28>,
<&pdma 29>;
dma-names = "tx", "rx";
+ resets = <&rst UART0_RESET>;
};
uart1: serial1@ffc03000 {
@@ -890,6 +903,7 @@
dmas = <&pdma 30>,
<&pdma 31>;
dma-names = "tx", "rx";
+ resets = <&rst UART1_RESET>;
};
usbphy0: usbphy {
@@ -929,6 +943,7 @@
reg = <0xffd02000 0x1000>;
interrupts = <0 171 4>;
clocks = <&osc1>;
+ resets = <&rst L4WD0_RESET>;
status = "disabled";
};
@@ -937,6 +952,7 @@
reg = <0xffd03000 0x1000>;
interrupts = <0 172 4>;
clocks = <&osc1>;
+ resets = <&rst L4WD1_RESET>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index e41fa23481c3..ae24599d5829 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -470,6 +470,7 @@
tx-fifo-depth = <4096>;
rx-fifo-depth = <16384>;
clocks = <&l4_mp_clk>;
+ resets = <&rst EMAC2_RESET>;
clock-names = "stmmaceth";
snps,axi-config = <&socfpga_axi_setup>;
status = "disabled";
@@ -480,6 +481,7 @@
#size-cells = <0>;
compatible = "snps,dw-apb-gpio";
reg = <0xffc02900 0x100>;
+ resets = <&rst GPIO0_RESET>;
status = "disabled";
porta: gpio-controller@0 {
@@ -499,6 +501,7 @@
#size-cells = <0>;
compatible = "snps,dw-apb-gpio";
reg = <0xffc02a00 0x100>;
+ resets = <&rst GPIO1_RESET>;
status = "disabled";
portb: gpio-controller@0 {
@@ -518,6 +521,7 @@
#size-cells = <0>;
compatible = "snps,dw-apb-gpio";
reg = <0xffc02b00 0x100>;
+ resets = <&rst GPIO2_RESET>;
status = "disabled";
portc: gpio-controller@0 {
@@ -548,6 +552,7 @@
reg = <0xffc02200 0x100>;
interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&l4_sp_clk>;
+ resets = <&rst I2C0_RESET>;
status = "disabled";
};
@@ -558,6 +563,7 @@
reg = <0xffc02300 0x100>;
interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&l4_sp_clk>;
+ resets = <&rst I2C1_RESET>;
status = "disabled";
};
@@ -568,6 +574,7 @@
reg = <0xffc02400 0x100>;
interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&l4_sp_clk>;
+ resets = <&rst I2C2_RESET>;
status = "disabled";
};
@@ -578,6 +585,7 @@
reg = <0xffc02500 0x100>;
interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&l4_sp_clk>;
+ resets = <&rst I2C3_RESET>;
status = "disabled";
};
@@ -588,6 +596,7 @@
reg = <0xffc02600 0x100>;
interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&l4_sp_clk>;
+ resets = <&rst I2C4_RESET>;
status = "disabled";
};
@@ -600,6 +609,7 @@
num-cs = <4>;
/*32bit_access;*/
clocks = <&spi_m_clk>;
+ resets = <&rst SPIM0_RESET>;
status = "disabled";
};
@@ -614,6 +624,7 @@
tx-dma-channel = <&pdma 16>;
rx-dma-channel = <&pdma 17>;
clocks = <&spi_m_clk>;
+ resets = <&rst SPIM1_RESET>;
status = "disabled";
};
@@ -642,6 +653,7 @@
fifo-depth = <0x400>;
clocks = <&l4_mp_clk>, <&sdmmc_clk>;
clock-names = "biu", "ciu";
+ resets = <&rst SDMMC_RESET>;
status = "disabled";
};
@@ -655,6 +667,7 @@
interrupts = <0 99 4>;
clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
clock-names = "nand", "nand_x", "ecc";
+ resets = <&rst NAND_RESET>;
status = "disabled";
};
@@ -739,6 +752,7 @@
cdns,fifo-width = <4>;
cdns,trigger-address = <0x00000000>;
clocks = <&qspi_clk>;
+ resets = <&rst QSPI_RESET>;
status = "disabled";
};
@@ -815,6 +829,7 @@
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&l4_sp_clk>;
+ resets = <&rst UART0_RESET>;
status = "disabled";
};
@@ -825,6 +840,7 @@
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&l4_sp_clk>;
+ resets = <&rst UART1_RESET>;
status = "disabled";
};
@@ -865,6 +881,7 @@
reg = <0xffd00200 0x100>;
interrupts = <0 119 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&l4_sys_free_clk>;
+ resets = <&rst L4WD0_RESET>;
status = "disabled";
};
@@ -873,6 +890,7 @@
reg = <0xffd00300 0x100>;
interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&l4_sys_free_clk>;
+ resets = <&rst L4WD1_RESET>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_chameleon96.dts b/arch/arm/boot/dts/socfpga_cyclone5_chameleon96.dts
new file mode 100644
index 000000000000..f6561766d83f
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_cyclone5_chameleon96.dts
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Device Tree file for the Chameleon96
+ *
+ * Copyright (c) 2018 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+#include "socfpga_cyclone5.dtsi"
+
+/ {
+ model = "Novetech Chameleon96";
+ compatible = "novtech,chameleon96", "altr,socfpga-cyclone5", "altr,socfpga";
+
+ chosen {
+ bootargs = "earlyprintk";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ name = "memory";
+ device_type = "memory";
+ reg = <0x0 0x20000000>; /* 512MB */
+ };
+
+ regulator_3_3v: 3-3-v-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ user_led1 {
+ label = "green:user1";
+ gpios = <&porta 14 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ user_led2 {
+ label = "green:user2";
+ gpios = <&porta 22 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "mmc0";
+ };
+
+ user_led3 {
+ label = "green:user3";
+ gpios = <&porta 25 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "none";
+ };
+
+ user_led4 {
+ label = "green:user4";
+ gpios = <&portb 3 GPIO_ACTIVE_LOW>;
+ panic-indicator;
+ linux,default-trigger = "none";
+ };
+ };
+};
+
+&gpio0 {
+ status = "okay";
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&i2c0 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+};
+
+&i2c1 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ /* On High speed expansion */
+ label = "HS-I2C2";
+ status = "okay";
+};
+
+&mmc0 {
+ vmmc-supply = <&regulator_3_3v>;
+ vqmmc-supply = <&regulator_3_3v>;
+ status = "okay";
+};
+
+&spi0 {
+ /* On High speed expansion */
+ label = "HS-SPI1";
+ status = "okay";
+};
+
+&spi1 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+};
+
+&uart0 {
+ /* On Low speed expansion */
+ label = "LS-UART1";
+ status = "okay";
+};
+
+&uart1 {
+ /* On Low speed expansion */
+ label = "LS-UART0";
+ status = "okay";
+};
+
+&usbphy0 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 086b4b333249..390df643a174 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -11,9 +11,9 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupt-parent = <&gic>;
cpus {
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 118135d75899..c47380763cae 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -11,9 +11,9 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupt-parent = <&vic>;
cpus {
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index 00166eb9be86..0a634fb07452 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -9,9 +9,9 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "st,spear600";
cpus {
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 12afdc7467e7..04066f9cb8a3 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Device Tree for the ST-Ericsson Nomadik S8815 board
- * Produced by Calao Systems
+ * Device Tree for the ST Microelectronics Nomadik NHK8815 board
*/
/dts-v1/;
@@ -182,43 +181,12 @@
pinctrl-names = "default";
pinctrl-0 = <&clcd_24bit_mux>;
port {
- nomadik_clcd_pads: endpoint {
+ nomadik_clcd: endpoint {
remote-endpoint = <&nomadik_clcd_panel>;
arm,pl11x,tft-r0g0b0-pads = <16 8 0>;
};
};
- /*
- * WVGA connector 21
- * WVGA (800x480): 4.3" TPG110 TDO43MTEA2 24-bit RGB
- * with TPO touch screen.
- */
- panel {
- compatible = "tpo,tpg110", "panel-dpi";
- grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>;
- scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
- scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
- sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
- backlight = <&bl>;
-
- port {
- nomadik_clcd_panel: endpoint {
- remote-endpoint = <&nomadik_clcd_pads>;
- };
- };
-
- panel-timing {
- clock-frequency = <33200000>;
- hactive = <800>;
- hback-porch = <216>;
- hfront-porch = <40>;
- hsync-len = <1>;
- vactive = <480>;
- vback-porch = <35>;
- vfront-porch = <10>;
- vsync-len = <1>;
- };
- };
};
/* Activate RX/TX and CTS/RTS on UART 0 */
@@ -233,6 +201,55 @@
};
};
+ spi {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * As we're dealing with 3wire SPI, we only define SCK
+ * and MOSI (in the spec MOSI is called "SDA").
+ */
+ gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ /*
+ * It's not actually active high, but the frameworks assume
+ * the polarity of the passed-in GPIO is "normal" (active
+ * high) then actively drives the line low to select the
+ * chip.
+ */
+ cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+
+ /*
+ * WVGA connector 21
+ * WVGA (800x480): 4.3" TPG110 TDO43MTEA2 24-bit RGB
+ * with TPO touch screen.
+ */
+ panel: display@0 {
+ /*
+ * The TPO display driver is connected to a
+ * 5.7" OSD OSD057VA01CT TFT display.
+ */
+ compatible = "tpo,tpg110";
+ reg = <0>;
+ spi-3wire;
+ /* 320 ns min period ~= 3 MHz */
+ spi-max-frequency = <3000000>;
+ /* Width and height from the OSD data sheet */
+ width-mm = <116>;
+ height-mm = <87>;
+ grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>;
+ backlight = <&bl>;
+
+ port {
+ nomadik_clcd_panel: endpoint {
+ remote-endpoint = <&nomadik_clcd>;
+ };
+ };
+ };
+ };
+
bl: backlight {
compatible = "pwm-backlight";
pwms = <&stmpe0_pwm 0 500000>;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index fca76a696d9d..f78b4eabd68c 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -4,13 +4,13 @@
*/
#include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
/ {
#address-cells = <1>;
#size-cells = <1>;
memory {
+ device_type = "memory";
reg = <0x00000000 0x04000000>,
<0x08000000 0x04000000>;
};
diff --git a/arch/arm/boot/dts/ste-u300.dts b/arch/arm/boot/dts/ste-u300.dts
index 1bd1aba3322f..f4e7660fead7 100644
--- a/arch/arm/boot/dts/ste-u300.dts
+++ b/arch/arm/boot/dts/ste-u300.dts
@@ -4,7 +4,6 @@
*/
/dts-v1/;
-/include/ "skeleton.dtsi"
/ {
model = "ST-Ericsson U300";
@@ -22,6 +21,7 @@
};
memory {
+ device_type = "memory";
reg = <0x48000000 0x03c00000>;
};
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index ed7d7f46465e..73ea84df7bf4 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -61,6 +61,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x2000000>;
};
diff --git a/arch/arm/boot/dts/stm32746g-eval.dts b/arch/arm/boot/dts/stm32746g-eval.dts
index 8c081eaf20fe..d90b0d1e18c7 100644
--- a/arch/arm/boot/dts/stm32746g-eval.dts
+++ b/arch/arm/boot/dts/stm32746g-eval.dts
@@ -55,6 +55,7 @@
};
memory {
+ device_type = "memory";
reg = <0xc0000000 0x2000000>;
};
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index 5ceb2cf3777f..e19d0fe7dbda 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -60,6 +60,7 @@
};
memory {
+ device_type = "memory";
reg = <0x90000000 0x800000>;
};
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index 8d6f028ae285..588b6ef94e93 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -45,12 +45,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
#include "armv7-m.dtsi"
#include <dt-bindings/clock/stm32fx-clock.h>
#include <dt-bindings/mfd/stm32f4-rcc.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
clocks {
clk_hse: clk-hse {
#clock-cells = <0>;
@@ -314,6 +316,26 @@
status = "disabled";
};
+ spi2: spi@40003800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32f4-spi";
+ reg = <0x40003800 0x400>;
+ interrupts = <36>;
+ clocks = <&rcc 0 STM32F4_APB1_CLOCK(SPI2)>;
+ status = "disabled";
+ };
+
+ spi3: spi@40003c00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32f4-spi";
+ reg = <0x40003c00 0x400>;
+ interrupts = <51>;
+ clocks = <&rcc 0 STM32F4_APB1_CLOCK(SPI3)>;
+ status = "disabled";
+ };
+
usart2: serial@40004400 {
compatible = "st,stm32-uart";
reg = <0x40004400 0x400>;
@@ -523,6 +545,26 @@
status = "disabled";
};
+ spi1: spi@40013000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32f4-spi";
+ reg = <0x40013000 0x400>;
+ interrupts = <35>;
+ clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI1)>;
+ status = "disabled";
+ };
+
+ spi4: spi@40013400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32f4-spi";
+ reg = <0x40013400 0x400>;
+ interrupts = <84>;
+ clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI4)>;
+ status = "disabled";
+ };
+
syscfg: system-config@40013800 {
compatible = "syscon";
reg = <0x40013800 0x400>;
@@ -587,6 +629,26 @@
};
};
+ spi5: spi@40015000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32f4-spi";
+ reg = <0x40015000 0x400>;
+ interrupts = <85>;
+ clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI5)>;
+ status = "disabled";
+ };
+
+ spi6: spi@40015400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32f4-spi";
+ reg = <0x40015400 0x400>;
+ interrupts = <86>;
+ clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI6)>;
+ status = "disabled";
+ };
+
pwrcfg: power-config@40007000 {
compatible = "syscon";
reg = <0x40007000 0x400>;
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index 7937b43d7788..a3ff04940aec 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -61,6 +61,7 @@
};
memory {
+ device_type = "memory";
reg = <0x00000000 0x1000000>;
};
diff --git a/arch/arm/boot/dts/stm32f746-disco.dts b/arch/arm/boot/dts/stm32f746-disco.dts
index e3a7bd338d61..0ba9c5b08ab9 100644
--- a/arch/arm/boot/dts/stm32f746-disco.dts
+++ b/arch/arm/boot/dts/stm32f746-disco.dts
@@ -56,6 +56,7 @@
};
memory {
+ device_type = "memory";
reg = <0xC0000000 0x800000>;
};
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index f48d06a80d1d..a25b7000a3a1 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -40,12 +40,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
#include "armv7-m.dtsi"
#include <dt-bindings/clock/stm32fx-clock.h>
#include <dt-bindings/mfd/stm32f7-rcc.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
clocks {
clk_hse: clk-hse {
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/stm32f769-disco.dts b/arch/arm/boot/dts/stm32f769-disco.dts
index 483d896e2bc1..3c7216844a9b 100644
--- a/arch/arm/boot/dts/stm32f769-disco.dts
+++ b/arch/arm/boot/dts/stm32f769-disco.dts
@@ -56,6 +56,7 @@
};
memory {
+ device_type = "memory";
reg = <0xC0000000 0x1000000>;
};
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index cbdd69ca9e7a..299af0723790 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -40,13 +40,15 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
#include "armv7-m.dtsi"
#include <dt-bindings/clock/stm32h7-clks.h>
#include <dt-bindings/mfd/stm32h7-rcc.h>
#include <dt-bindings/interrupt-controller/irq.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
clocks {
clk_hse: clk-hse {
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/stm32h743i-disco.dts b/arch/arm/boot/dts/stm32h743i-disco.dts
index 45e088c55741..f8040356fe2d 100644
--- a/arch/arm/boot/dts/stm32h743i-disco.dts
+++ b/arch/arm/boot/dts/stm32h743i-disco.dts
@@ -54,6 +54,7 @@
};
memory {
+ device_type = "memory";
reg = <0xd0000000 0x2000000>;
};
diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts
index 3f8e0c4a998d..ef34fa2f79ea 100644
--- a/arch/arm/boot/dts/stm32h743i-eval.dts
+++ b/arch/arm/boot/dts/stm32h743i-eval.dts
@@ -54,6 +54,7 @@
};
memory {
+ device_type = "memory";
reg = <0xd0000000 0x2000000>;
};
diff --git a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
index c4851271e810..9ec4694e93a7 100644
--- a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
@@ -246,6 +246,13 @@
};
};
+ m_can1_sleep_pins_a: m_can1-sleep@0 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 13, ANALOG)>, /* CAN1_TX */
+ <STM32_PINMUX('I', 9, ANALOG)>; /* CAN1_RX */
+ };
+ };
+
pwm2_pins_a: pwm2-0 {
pins {
pinmux = <STM32_PINMUX('A', 3, AF1)>; /* TIM2_CH4 */
diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts
index f77bea49c079..d66edb0c66cd 100644
--- a/arch/arm/boot/dts/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts
@@ -17,6 +17,7 @@
};
memory@c0000000 {
+ device_type = "memory";
reg = <0xC0000000 0x40000000>;
};
@@ -49,6 +50,10 @@
};
};
+&dts {
+ status = "okay";
+};
+
&i2c4 {
pinctrl-names = "default";
pinctrl-0 = <&i2c4_pins_a>;
@@ -72,6 +77,9 @@
&timers6 {
status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
timer@5 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
index 063ee8ac5dcb..b6aca40b9b90 100644
--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
@@ -124,8 +124,9 @@
};
&m_can1 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&m_can1_pins_a>;
+ pinctrl-1 = <&m_can1_sleep_pins_a>;
status = "okay";
};
@@ -161,6 +162,9 @@
};
&timers2 {
+ /* spare dmas for other usage (un-delete to enable pwm capture) */
+ /delete-property/dmas;
+ /delete-property/dma-names;
status = "disabled";
pwm {
pinctrl-0 = <&pwm2_pins_a>;
@@ -173,6 +177,8 @@
};
&timers8 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
status = "disabled";
pwm {
pinctrl-0 = <&pwm8_pins_a>;
@@ -185,6 +191,8 @@
};
&timers12 {
+ /delete-property/dmas;
+ /delete-property/dma-names;
status = "disabled";
pwm {
pinctrl-0 = <&pwm12_pins_a>;
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index 8bf1c17f8cef..f8bbfff5950b 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -84,6 +84,31 @@
};
};
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&dts>;
+
+ trips {
+ cpu_alert1: cpu-alert1 {
+ temperature = <85000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ cpu-crit {
+ temperature = <120000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ };
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -98,6 +123,12 @@
reg = <0x40000000 0x400>;
clocks = <&rcc TIM2_K>;
clock-names = "int";
+ dmas = <&dmamux1 18 0x400 0x1>,
+ <&dmamux1 19 0x400 0x1>,
+ <&dmamux1 20 0x400 0x1>,
+ <&dmamux1 21 0x400 0x1>,
+ <&dmamux1 22 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4", "up";
status = "disabled";
pwm {
@@ -119,6 +150,13 @@
reg = <0x40001000 0x400>;
clocks = <&rcc TIM3_K>;
clock-names = "int";
+ dmas = <&dmamux1 23 0x400 0x1>,
+ <&dmamux1 24 0x400 0x1>,
+ <&dmamux1 25 0x400 0x1>,
+ <&dmamux1 26 0x400 0x1>,
+ <&dmamux1 27 0x400 0x1>,
+ <&dmamux1 28 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
status = "disabled";
pwm {
@@ -140,6 +178,11 @@
reg = <0x40002000 0x400>;
clocks = <&rcc TIM4_K>;
clock-names = "int";
+ dmas = <&dmamux1 29 0x400 0x1>,
+ <&dmamux1 30 0x400 0x1>,
+ <&dmamux1 31 0x400 0x1>,
+ <&dmamux1 32 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4";
status = "disabled";
pwm {
@@ -161,6 +204,13 @@
reg = <0x40003000 0x400>;
clocks = <&rcc TIM5_K>;
clock-names = "int";
+ dmas = <&dmamux1 55 0x400 0x1>,
+ <&dmamux1 56 0x400 0x1>,
+ <&dmamux1 57 0x400 0x1>,
+ <&dmamux1 58 0x400 0x1>,
+ <&dmamux1 59 0x400 0x1>,
+ <&dmamux1 60 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig";
status = "disabled";
pwm {
@@ -182,6 +232,8 @@
reg = <0x40004000 0x400>;
clocks = <&rcc TIM6_K>;
clock-names = "int";
+ dmas = <&dmamux1 69 0x400 0x1>;
+ dma-names = "up";
status = "disabled";
timer@5 {
@@ -198,6 +250,8 @@
reg = <0x40005000 0x400>;
clocks = <&rcc TIM7_K>;
clock-names = "int";
+ dmas = <&dmamux1 70 0x400 0x1>;
+ dma-names = "up";
status = "disabled";
timer@6 {
@@ -465,6 +519,15 @@
reg = <0x44000000 0x400>;
clocks = <&rcc TIM1_K>;
clock-names = "int";
+ dmas = <&dmamux1 11 0x400 0x1>,
+ <&dmamux1 12 0x400 0x1>,
+ <&dmamux1 13 0x400 0x1>,
+ <&dmamux1 14 0x400 0x1>,
+ <&dmamux1 15 0x400 0x1>,
+ <&dmamux1 16 0x400 0x1>,
+ <&dmamux1 17 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4",
+ "up", "trig", "com";
status = "disabled";
pwm {
@@ -486,6 +549,15 @@
reg = <0x44001000 0x400>;
clocks = <&rcc TIM8_K>;
clock-names = "int";
+ dmas = <&dmamux1 47 0x400 0x1>,
+ <&dmamux1 48 0x400 0x1>,
+ <&dmamux1 49 0x400 0x1>,
+ <&dmamux1 50 0x400 0x1>,
+ <&dmamux1 51 0x400 0x1>,
+ <&dmamux1 52 0x400 0x1>,
+ <&dmamux1 53 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4",
+ "up", "trig", "com";
status = "disabled";
pwm {
@@ -543,6 +615,11 @@
reg = <0x44006000 0x400>;
clocks = <&rcc TIM15_K>;
clock-names = "int";
+ dmas = <&dmamux1 105 0x400 0x1>,
+ <&dmamux1 106 0x400 0x1>,
+ <&dmamux1 107 0x400 0x1>,
+ <&dmamux1 108 0x400 0x1>;
+ dma-names = "ch1", "up", "trig", "com";
status = "disabled";
pwm {
@@ -564,6 +641,9 @@
reg = <0x44007000 0x400>;
clocks = <&rcc TIM16_K>;
clock-names = "int";
+ dmas = <&dmamux1 109 0x400 0x1>,
+ <&dmamux1 110 0x400 0x1>;
+ dma-names = "ch1", "up";
status = "disabled";
pwm {
@@ -584,6 +664,9 @@
reg = <0x44008000 0x400>;
clocks = <&rcc TIM17_K>;
clock-names = "int";
+ dmas = <&dmamux1 111 0x400 0x1>,
+ <&dmamux1 112 0x400 0x1>;
+ dma-names = "ch1", "up";
status = "disabled";
pwm {
@@ -684,14 +767,14 @@
m_can1: can@4400e000 {
compatible = "bosch,m_can";
- reg = <0x4400e000 0x400>, <0x44011000 0x2800>;
+ reg = <0x4400e000 0x400>, <0x44011000 0x1400>;
reg-names = "m_can", "message_ram";
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "int0", "int1";
clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
clock-names = "hclk", "cclk";
- bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
+ bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
status = "disabled";
};
@@ -908,6 +991,16 @@
status = "disabled";
};
+ dts: thermal@50028000 {
+ compatible = "st,stm32-thermal";
+ reg = <0x50028000 0x100>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc TMPSENS>;
+ clock-names = "pclk";
+ #thermal-sensor-cells = <0>;
+ status = "disabled";
+ };
+
cryp1: cryp@54001000 {
compatible = "st,stm32mp1-cryp";
reg = <0x54001000 0x400>;
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5d46bb0139fa..73c3ac42095f 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -184,6 +184,26 @@
status = "disabled";
};
+ pmu {
+ compatible = "arm,cortex-a8-pmu";
+ interrupts = <3>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Address must be kept in the lower 256 MiBs of DRAM for VE. */
+ default-pool {
+ compatible = "shared-dma-pool";
+ size = <0x6000000>;
+ alloc-ranges = <0x4a000000 0x6000000>;
+ reusable;
+ linux,cma-default;
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -224,6 +244,19 @@
status = "disabled";
};
};
+
+ sram_c: sram@1d00000 {
+ compatible = "mmio-sram";
+ reg = <0x01d00000 0xd0000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x01d00000 0xd0000>;
+
+ ve_sram: sram-section@0 {
+ compatible = "allwinner,sun4i-a10-sram-c1";
+ reg = <0x000000 0x80000>;
+ };
+ };
};
dma: dma-controller@1c02000 {
@@ -394,6 +427,17 @@
};
};
+ video-codec@1c0e000 {
+ compatible = "allwinner,sun4i-a10-video-engine";
+ reg = <0x01c0e000 0x1000>;
+ clocks = <&ccu CLK_AHB_VE>, <&ccu CLK_VE>,
+ <&ccu CLK_DRAM_VE>;
+ clock-names = "ahb", "mod", "ram";
+ resets = <&ccu RST_VE>;
+ interrupts = <53>;
+ allwinner,sram = <&ve_sram 1>;
+ };
+
mmc0: mmc@1c0f000 {
compatible = "allwinner,sun4i-a10-mmc";
reg = <0x01c0f000 0x1000>;
diff --git a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
index a89f29fa3e40..7257f39b31ce 100644
--- a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
@@ -49,14 +49,15 @@
compatible = "allwinner,q8-a13", "allwinner,sun5i-a13";
panel: panel {
- compatible = "urt,umsh-8596md-t", "simple-panel";
+ compatible = "bananapi,s070wv20-ct16", "simple-panel";
+ power-supply = <&reg_vcc3v3>;
+ enable-gpios = <&axp_gpio 0 GPIO_ACTIVE_HIGH>; /* AXP GPIO0 */
+ backlight = <&backlight>;
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
- /* TODO: lcd panel uses axp gpio0 as enable pin */
- backlight = <&backlight>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
index 6202aabedbfe..5b1f0e198eb6 100644
--- a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
@@ -54,7 +54,7 @@
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
- /* TODO: backlight uses axp gpio1 as enable pin */
+ enable-gpios = <&axp_gpio 1 GPIO_ACTIVE_HIGH>; /* AXP GPIO1 */
};
chosen {
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 353d90f99b40..13304b8c5139 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -216,6 +216,7 @@
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
+ clock-output-names = "osc24M";
};
osc32k: clk-32k {
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 556b1b591c5d..81bc85d398c1 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -191,6 +191,11 @@
};
&pio {
+ vcc-pa-supply = <&reg_vcc3v3>;
+ vcc-pc-supply = <&reg_vcc3v3>;
+ vcc-pe-supply = <&reg_vcc3v3>;
+ vcc-pf-supply = <&reg_vcc3v3>;
+ vcc-pg-supply = <&reg_vcc3v3>;
gpio-line-names =
/* PA */
"ERXD3", "ERXD2", "ERXD1", "ERXD0", "ETXD3",
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index a9c123de5d2c..43fe215e83ea 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -68,6 +68,12 @@
};
};
+ de: display-engine {
+ /* compatible gets set in SoC specific dtsi file */
+ allwinner,pipelines = <&fe0>;
+ status = "disabled";
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -155,6 +161,55 @@
#dma-cells = <1>;
};
+ nfc: nand@1c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>;
+ clock-names = "ahb", "mod";
+ resets = <&ccu RST_BUS_NAND>;
+ reset-names = "ahb";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ tcon0: lcd-controller@1c0c000 {
+ /* compatible gets set in SoC specific dtsi file */
+ reg = <0x01c0c000 0x1000>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_LCD>,
+ <&ccu CLK_LCD_CH0>;
+ clock-names = "ahb",
+ "tcon-ch0";
+ clock-output-names = "tcon-pixel-clock";
+ resets = <&ccu RST_BUS_LCD>;
+ reset-names = "lcd";
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tcon0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ tcon0_in_drc0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&drc0_out_tcon0>;
+ };
+ };
+
+ tcon0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ };
+ };
+
mmc0: mmc@1c0f000 {
compatible = "allwinner,sun7i-a20-mmc";
reg = <0x01c0f000 0x1000>;
@@ -214,21 +269,6 @@
#size-cells = <0>;
};
- nfc: nand@1c03000 {
- compatible = "allwinner,sun4i-a10-nand";
- reg = <0x01c03000 0x1000>;
- interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>;
- clock-names = "ahb", "mod";
- resets = <&ccu RST_BUS_NAND>;
- reset-names = "ahb";
- pinctrl-names = "default";
- pinctrl-0 = <&nand_pins &nand_pins_cs0 &nand_pins_rb0>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
usb_otg: usb@1c19000 {
/* compatible gets set in SoC specific dtsi file */
reg = <0x01c19000 0x0400>;
@@ -572,6 +612,111 @@
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
+ fe0: display-frontend@1e00000 {
+ /* compatible gets set in SoC specific dtsi file */
+ reg = <0x01e00000 0x20000>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DE_FE>, <&ccu CLK_DE_FE>,
+ <&ccu CLK_DRAM_DE_FE>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&ccu RST_BUS_DE_FE>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fe0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ fe0_out_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_in_fe0>;
+ };
+ };
+ };
+ };
+
+ be0: display-backend@1e60000 {
+ /* compatible gets set in SoC specific dtsi file */
+ reg = <0x01e60000 0x10000>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>,
+ <&ccu CLK_DRAM_DE_BE>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&ccu RST_BUS_DE_BE>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ be0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ be0_in_fe0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&fe0_out_be0>;
+ };
+ };
+
+ be0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ be0_out_drc0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&drc0_in_be0>;
+ };
+ };
+ };
+ };
+
+ drc0: drc@1e70000 {
+ /* compatible gets set in SoC specific dtsi file */
+ reg = <0x01e70000 0x10000>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DRC>, <&ccu CLK_DRC>,
+ <&ccu CLK_DRAM_DRC>;
+ clock-names = "ahb", "mod", "ram";
+ resets = <&ccu RST_BUS_DRC>;
+
+ assigned-clocks = <&ccu CLK_DRC>;
+ assigned-clock-rates = <300000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ drc0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ drc0_in_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_out_drc0>;
+ };
+ };
+
+ drc0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ drc0_out_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_in_drc0>;
+ };
+ };
+ };
+ };
+
rtc: rtc@1f00000 {
compatible = "allwinner,sun8i-a23-rtc";
reg = <0x01f00000 0x400>;
diff --git a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
index b6958e8f2f01..d4dab7c28398 100644
--- a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
@@ -61,3 +61,7 @@
"Headset Mic", "HBIAS";
status = "okay";
};
+
+&panel {
+ compatible = "bananapi,s070wv20-ct16", "simple-panel";
+};
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index d00055e9eef5..a5e884a8b2ae 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -62,10 +62,26 @@
};
};
+&be0 {
+ compatible = "allwinner,sun8i-a23-display-backend";
+};
+
&ccu {
compatible = "allwinner,sun8i-a23-ccu";
};
+&de {
+ compatible = "allwinner,sun8i-a23-display-engine";
+};
+
+&drc0 {
+ compatible = "allwinner,sun8i-a23-drc";
+};
+
+&fe0 {
+ compatible = "allwinner,sun8i-a23-display-frontend";
+};
+
&pio {
compatible = "allwinner,sun8i-a23-pinctrl";
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
@@ -73,6 +89,10 @@
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
};
+&tcon0 {
+ compatible = "allwinner,sun8i-a23-tcon";
+};
+
&usb_otg {
compatible = "allwinner,sun6i-a31-musb";
};
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 626152c30f50..1111a6498102 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -159,12 +159,6 @@
};
};
- de: display-engine {
- compatible = "allwinner,sun8i-a33-display-engine";
- allwinner,pipelines = <&fe0>;
- status = "disabled";
- };
-
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&ths>;
@@ -209,47 +203,6 @@
};
soc {
- tcon0: lcd-controller@1c0c000 {
- compatible = "allwinner,sun8i-a33-tcon";
- reg = <0x01c0c000 0x1000>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_LCD>,
- <&ccu CLK_LCD_CH0>;
- clock-names = "ahb",
- "tcon-ch0";
- clock-output-names = "tcon-pixel-clock";
- resets = <&ccu RST_BUS_LCD>;
- reset-names = "lcd";
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- tcon0_in_drc0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&drc0_out_tcon0>;
- };
- };
-
- tcon0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- tcon0_out_dsi: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&dsi_in_tcon0>;
- };
- };
- };
- };
-
video-codec@1c0e000 {
compatible = "allwinner,sun8i-a33-video-engine";
reg = <0x01c0e000 0x1000>;
@@ -339,115 +292,6 @@
status = "disabled";
#phy-cells = <0>;
};
-
- fe0: display-frontend@1e00000 {
- compatible = "allwinner,sun8i-a33-display-frontend";
- reg = <0x01e00000 0x20000>;
- interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_DE_FE>, <&ccu CLK_DE_FE>,
- <&ccu CLK_DRAM_DE_FE>;
- clock-names = "ahb", "mod",
- "ram";
- resets = <&ccu RST_BUS_DE_FE>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- fe0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- fe0_out_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_in_fe0>;
- };
- };
- };
- };
-
- be0: display-backend@1e60000 {
- compatible = "allwinner,sun8i-a33-display-backend";
- reg = <0x01e60000 0x10000>, <0x01e80000 0x1000>;
- reg-names = "be", "sat";
- interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>,
- <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_BUS_SAT>;
- clock-names = "ahb", "mod",
- "ram", "sat";
- resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>;
- reset-names = "be", "sat";
- assigned-clocks = <&ccu CLK_DE_BE>;
- assigned-clock-rates = <300000000>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- be0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- be0_in_fe0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&fe0_out_be0>;
- };
- };
-
- be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- be0_out_drc0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&drc0_in_be0>;
- };
- };
- };
- };
-
- drc0: drc@1e70000 {
- compatible = "allwinner,sun8i-a33-drc";
- reg = <0x01e70000 0x10000>;
- interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_DRC>, <&ccu CLK_DRC>,
- <&ccu CLK_DRAM_DRC>;
- clock-names = "ahb", "mod", "ram";
- resets = <&ccu RST_BUS_DRC>;
-
- assigned-clocks = <&ccu CLK_DRC>;
- assigned-clock-rates = <300000000>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- drc0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- drc0_in_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_out_drc0>;
- };
- };
-
- drc0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- drc0_out_tcon0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&tcon0_in_drc0>;
- };
- };
- };
- };
};
thermal-zones {
@@ -524,10 +368,37 @@
};
};
+&be0 {
+ compatible = "allwinner,sun8i-a33-display-backend";
+ /* A33 has an extra "SAT" module packed inside the display backend */
+ reg = <0x01e60000 0x10000>, <0x01e80000 0x1000>;
+ reg-names = "be", "sat";
+ clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>,
+ <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_BUS_SAT>;
+ clock-names = "ahb", "mod",
+ "ram", "sat";
+ resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>;
+ reset-names = "be", "sat";
+ assigned-clocks = <&ccu CLK_DE_BE>;
+ assigned-clock-rates = <300000000>;
+};
+
&ccu {
compatible = "allwinner,sun8i-a33-ccu";
};
+&de {
+ compatible = "allwinner,sun8i-a33-display-engine";
+};
+
+&drc0 {
+ compatible = "allwinner,sun8i-a33-drc";
+};
+
+&fe0 {
+ compatible = "allwinner,sun8i-a33-display-frontend";
+};
+
&mali {
operating-points-v2 = <&mali_opp_table>;
};
@@ -544,6 +415,17 @@
};
+&tcon0 {
+ compatible = "allwinner,sun8i-a33-tcon";
+};
+
+&tcon0_out {
+ tcon0_out_dsi: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dsi_in_tcon0>;
+ };
+};
+
&usb_otg {
compatible = "allwinner,sun8i-a33-musb";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
index 1c012a4def16..9c006fc18821 100644
--- a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
@@ -154,6 +154,10 @@
#include "axp81x.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
regulator-always-on;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index 7d30d3e530fb..838be7b3715f 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -237,6 +237,14 @@
#include "axp81x.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
regulator-always-on;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
index a5a9f5a0603e..fcbec3d7ccd7 100644
--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
@@ -247,6 +247,14 @@
#include "axp81x.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
regulator-always-on;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 5d23667dc2d2..25540b7694d5 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -53,7 +53,7 @@
aliases {
serial0 = &uart0;
- /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+ ethernet0 = &emac;
ethernet1 = &sdiowifi;
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
index 65cba1050802..4ec94d72f021 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
@@ -67,6 +67,21 @@
pinctrl-names = "default";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
};
+
+ connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+};
+
+&de {
+ status = "okay";
};
&ehci1 {
@@ -94,6 +109,16 @@
};
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi
index 719ad769b837..53104f4ccacc 100644
--- a/arch/arm/boot/dts/sun8i-q8-common.dtsi
+++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi
@@ -49,6 +49,26 @@
ethernet0 = &sdio_wifi;
};
+ panel: panel {
+ /* Tablet dts should provide panel compatible */
+ backlight = <&backlight>;
+ enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+ power-supply = <&reg_dc1sw>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel_input: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_out_lcd>;
+ };
+ };
+ };
+
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
/*
@@ -64,6 +84,10 @@
};
};
+&de {
+ status = "okay";
+};
+
&ehci0 {
status = "okay";
};
@@ -90,6 +114,19 @@
};
};
+&tcon0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_rgb666_pins>;
+ status = "okay";
+};
+
+&tcon0_out {
+ tcon0_out_lcd: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_input>;
+ };
+};
+
&usbphy {
usb1_vbus-supply = <&reg_dldo1>;
};
diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
index 438b7b44dab3..c488aaacbd68 100644
--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
@@ -102,6 +102,8 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&pio 6 10 GPIO_ACTIVE_LOW>; /* PG10 WIFI_EN */
+ clocks = <&ccu CLK_OUTA>;
+ clock-names = "ext_clock";
};
};
@@ -196,6 +198,11 @@
status = "okay";
};
+&pio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&clk_out_a_pin>;
+};
+
&reg_aldo2 {
regulator-always-on;
regulator-min-microvolt = <2500000>;
@@ -250,12 +257,27 @@
regulator-name = "vcc-wifi-io";
};
+/*
+ * Our WiFi chip needs both DLDO2 and DLDO3 to be powered at the same
+ * time, with the two being in sync, to be able to meet maximum power
+ * consumption during transmits. Since this is not really supported
+ * right now, just use the two as always on, and we will fix it later.
+ */
+
&reg_dldo2 {
+ regulator-always-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-name = "vcc-wifi";
};
+&reg_dldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi-2";
+};
+
&reg_dldo4 {
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
@@ -278,6 +300,25 @@
status = "okay";
};
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pg_pins>, <&uart3_rts_cts_pg_pins>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&ccu CLK_OUTA>;
+ clock-names = "lpo";
+ vbat-supply = <&reg_dldo2>;
+ vddio-supply = <&reg_dldo1>;
+ device-wakeup-gpios = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */
+ /* TODO host wake line connected to PMIC GPIO pins */
+ shutdown-gpios = <&pio 7 12 GPIO_ACTIVE_HIGH>; /* PH12 */
+ max-speed = <1500000>;
+ };
+};
+
&usbphy {
usb1_vbus-supply = <&reg_vcc5v0>;
usb2_vbus-supply = <&reg_vcc5v0>;
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index 89762dbefe42..06b685869f52 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -342,6 +342,11 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
+ clk_out_a_pin: clk-out-a-pin {
+ pins = "PI12";
+ function = "clk_out_a";
+ };
+
gmac_rgmii_pins: gmac-rgmii-pins {
pins = "PA0", "PA1", "PA2", "PA3",
"PA4", "PA5", "PA6", "PA7",
@@ -389,6 +394,16 @@
pins = "PB22", "PB23";
function = "uart0";
};
+
+ uart3_pg_pins: uart3-pg-pins {
+ pins = "PG6", "PG7";
+ function = "uart3";
+ };
+
+ uart3_rts_cts_pg_pins: uart3-rts-cts-pg-pins {
+ pins = "PG8", "PG9";
+ function = "uart3";
+ };
};
wdt: watchdog@1c20c90 {
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index 85da85faf869..28c034928d67 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -133,6 +133,19 @@
status = "okay";
};
+&gmac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac_rgmii_pins>;
+ phy = <&phy1>;
+ phy-mode = "rgmii";
+ phy-supply = <&reg_cldo1>;
+ status = "okay";
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
+
&i2c3 {
pinctrl-names = "default";
pinctrl-0 = <&i2c3_pins>;
@@ -183,10 +196,26 @@
clocks = <&ac100_rtc 0>;
};
+&pio {
+ vcc-pa-supply = <&reg_ldo_io1>;
+ vcc-pb-supply = <&reg_aldo2>;
+ vcc-pc-supply = <&reg_dcdc1>;
+ vcc-pd-supply = <&reg_dc1sw>;
+ vcc-pe-supply = <&reg_eldo2>;
+ vcc-pf-supply = <&reg_dcdc1>;
+ vcc-pg-supply = <&reg_ldo_io0>;
+ vcc-ph-supply = <&reg_dcdc1>;
+};
+
&r_ir {
status = "okay";
};
+&r_pio {
+ vcc-pl-supply = <&reg_dldo2>;
+ vcc-pm-supply = <&reg_eldo3>;
+};
+
&r_rsb {
status = "okay";
@@ -217,6 +246,10 @@
/* unused */
};
+ reg_dc1sw: dc1sw {
+ regulator-name = "vcc-pd";
+ };
+
reg_dc5ldo: dc5ldo {
regulator-always-on;
regulator-min-microvolt = <800000>;
@@ -271,7 +304,6 @@
};
reg_dldo2: dldo2 {
- regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-name = "vcc-pl";
@@ -290,14 +322,12 @@
};
reg_eldo3: eldo3 {
- regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-name = "vcc-pm-codec-io1";
};
reg_ldo_io0: ldo_io0 {
- regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-name = "vcc-pg";
@@ -385,6 +415,14 @@
*/
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ /*
+ * The PHY requires 20ms after all voltages
+ * are applied until core logic is ready and
+ * 30ms after the reset pin is de-asserted.
+ * Set a 100ms delay to account for PMIC
+ * ramp time and board traces.
+ */
+ regulator-enable-ramp-delay = <100000>;
regulator-name = "vcc-gmac-phy";
};
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 58a199b0e494..864715ec3cb0 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -120,6 +120,19 @@
status = "okay";
};
+&gmac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac_rgmii_pins>;
+ phy = <&phy1>;
+ phy-mode = "rgmii";
+ phy-supply = <&reg_cldo1>;
+ status = "okay";
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
@@ -172,10 +185,26 @@
clocks = <&ac100_rtc 0>;
};
+&pio {
+ vcc-pa-supply = <&reg_ldo_io1>;
+ vcc-pb-supply = <&reg_aldo2>;
+ vcc-pc-supply = <&reg_dcdc1>;
+ vcc-pd-supply = <&reg_dcdc1>;
+ vcc-pe-supply = <&reg_eldo2>;
+ vcc-pf-supply = <&reg_dcdc1>;
+ vcc-pg-supply = <&reg_ldo_io0>;
+ vcc-ph-supply = <&reg_dcdc1>;
+};
+
&r_ir {
status = "okay";
};
+&r_pio {
+ vcc-pl-supply = <&reg_dldo2>;
+ vcc-pm-supply = <&reg_eldo3>;
+};
+
&r_rsb {
status = "okay";
@@ -213,6 +242,10 @@
regulator-name = "vdd-cpus-09-usbh";
};
+ dc1sw {
+ /* unused */
+ };
+
reg_dcdc1: dcdc1 {
regulator-always-on;
regulator-min-microvolt = <3000000>;
@@ -260,7 +293,6 @@
};
reg_dldo2: dldo2 {
- regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-name = "vcc-pl";
@@ -279,14 +311,12 @@
};
reg_eldo3: eldo3 {
- regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-name = "vcc-pm-codec-io1";
};
reg_ldo_io0: ldo_io0 {
- regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-name = "vcc-pg";
@@ -374,6 +404,14 @@
*/
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ /*
+ * The PHY requires 20ms after all voltages
+ * are applied until core logic is ready and
+ * 30ms after the reset pin is de-asserted.
+ * Set a 100ms delay to account for PMIC
+ * ramp time and board traces.
+ */
+ regulator-enable-ramp-delay = <100000>;
regulator-name = "vcc-gmac-phy";
};
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index d9532fb1ef65..6fb292e0b662 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -56,6 +56,10 @@
#size-cells = <2>;
interrupt-parent = <&gic>;
+ aliases {
+ ethernet0 = &gmac;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -183,6 +187,37 @@
clock-output-names = "osc32k";
};
+ /*
+ * The following two are dummy clocks, placeholders
+ * used in the gmac_tx clock. The gmac driver will
+ * choose one parent depending on the PHY interface
+ * mode, using clk_set_rate auto-reparenting.
+ *
+ * The actual TX clock rate is not controlled by the
+ * gmac_tx clock.
+ */
+ mii_phy_tx_clk: mii_phy_tx_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25000000>;
+ clock-output-names = "mii_phy_tx";
+ };
+
+ gmac_int_tx_clk: gmac_int_tx_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "gmac_int_tx";
+ };
+
+ gmac_tx_clk: clk@800030 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun7i-a20-gmac-clk";
+ reg = <0x00800030 0x4>;
+ clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
+ clock-output-names = "gmac_tx";
+ };
+
cpus_clk: clk@8001410 {
compatible = "allwinner,sun9i-a80-cpus-clk";
reg = <0x08001410 0x4>;
@@ -283,6 +318,23 @@
};
};
+ gmac: ethernet@830000 {
+ compatible = "allwinner,sun7i-a20-gmac";
+ reg = <0x00830000 0x1054>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&ccu CLK_BUS_GMAC>, <&gmac_tx_clk>;
+ clock-names = "stmmaceth", "allwinner_gmac_tx";
+ resets = <&ccu RST_BUS_GMAC>;
+ reset-names = "stmmaceth";
+ snps,pbl = <2>;
+ snps,fixed-burst;
+ snps,force_sf_dma_mode;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
ehci0: usb@a00000 {
compatible = "allwinner,sun9i-a80-ehci", "generic-ehci";
reg = <0x00a00000 0x100>;
@@ -948,6 +1000,19 @@
#size-cells = <0>;
#gpio-cells = <3>;
+ gmac_rgmii_pins: gmac-rgmii-pins {
+ allwinner,pins = "PA0", "PA1", "PA2", "PA3",
+ "PA4", "PA5", "PA7", "PA8",
+ "PA9", "PA10", "PA12", "PA13",
+ "PA15", "PA16", "PA17";
+ allwinner,function = "gmac";
+ /*
+ * data lines in RGMII mode use DDR mode
+ * and need a higher signal drive strength
+ */
+ drive-strength = <40>;
+ };
+
i2c3_pins: i2c3-pins {
pins = "PG10", "PG11";
function = "i2c3";
diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
index b3283aeb5b7d..3bed375b9c03 100644
--- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
+++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
@@ -103,6 +103,8 @@
compatible = "mmc-pwrseq-simple";
pinctrl-names = "default";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
+ clocks = <&rtc 1>;
+ clock-names = "ext_clock";
};
};
@@ -215,7 +217,19 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
- status = "okay";
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&rtc 1>;
+ clock-names = "lpo";
+ vbat-supply = <&reg_vcc3v3>;
+ vddio-supply = <&reg_vcc3v3>;
+ device-wakeup-gpios = <&pio 6 13 GPIO_ACTIVE_HIGH>; /* PG13 */
+ host-wakeup-gpios = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */
+ shutdown-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+ };
};
&usb_otg {
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index a4c757c0b741..d74a6cbbfdf4 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -740,8 +740,7 @@
};
csi: camera@1cb0000 {
- compatible = "allwinner,sun8i-h3-csi",
- "allwinner,sun6i-a31-csi";
+ compatible = "allwinner,sun8i-h3-csi";
reg = <0x01cb0000 0x1000>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_CSI>,
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 1788556b4977..97a5c3504bbe 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -1087,7 +1087,7 @@
status = "okay";
spi-max-frequency = <25000000>;
spi-flash@0 {
- compatible = "winbond,w25q32dw";
+ compatible = "winbond,w25q32dw", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <20000000>;
};
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 9151b3ebb839..33bbb1c5285d 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1656,7 +1656,7 @@
status = "okay";
spi-max-frequency = <25000000>;
spi-flash@0 {
- compatible = "winbond,w25q32dw";
+ compatible = "winbond,w25q32dw", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <20000000>;
};
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index d5f11d6d987e..a1acd872bcf2 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -13,10 +13,25 @@
stdout-path = "serial0:115200n8";
};
- memory@80000000 {
+ /*
+ * Note that recent version of the device tree compiler (starting with
+ * version 1.4.2) warn about this node containing a reg property, but
+ * missing a unit-address. However, the bootloader on these Chromebook
+ * devices relies on the full name of this node to be exactly /memory.
+ * Adding the unit-address causes the bootloader to create a /memory
+ * node and write the memory bank configuration to that node, which in
+ * turn leads the kernel to believe that the device has 2 GiB of
+ * memory instead of the amount detected by the bootloader.
+ *
+ * The name of this node is effectively ABI and must not be changed.
+ */
+ memory {
+ device_type = "memory";
reg = <0x0 0x80000000 0x0 0x80000000>;
};
+ /delete-node/ memory@80000000;
+
host1x@50000000 {
hdmi@54280000 {
status = "okay";
@@ -355,7 +370,7 @@
spi-max-frequency = <25000000>;
flash@0 {
- compatible = "winbond,w25q32dw";
+ compatible = "winbond,w25q32dw", "jedec,spi-nor";
spi-max-frequency = <25000000>;
reg = <0>;
};
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 82d139648ef1..4882b61fb680 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -879,7 +879,7 @@
status = "okay";
spi-max-frequency = <25000000>;
spi-flash@0 {
- compatible = "winbond,w25q32dw";
+ compatible = "winbond,w25q32dw", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <20000000>;
};
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 9eb26dc15f6b..3e5ac096d85e 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -287,7 +287,7 @@
status = "okay";
spi-max-frequency = <48000000>;
spi-flash@0 {
- compatible = "winbond,w25q80bl";
+ compatible = "winbond,w25q80bl", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <48000000>;
};
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index b0d40ac8ac6e..a3b0f3555cd2 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -1886,7 +1886,7 @@
status = "okay";
spi-max-frequency = <25000000>;
spi-flash@1 {
- compatible = "winbond,w25q32";
+ compatible = "winbond,w25q32", "jedec,spi-nor";
reg = <1>;
spi-max-frequency = <20000000>;
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index fb9222b479d2..7ce61edd52f5 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -360,7 +360,7 @@
status = "okay";
spi-max-frequency = <25000000>;
spi-flash@1 {
- compatible = "winbond,w25q32";
+ compatible = "winbond,w25q32", "jedec,spi-nor";
reg = <1>;
spi-max-frequency = <20000000>;
};
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 6f4f60ba5429..269e6bf99ccb 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "skeleton.dtsi"
/ {
model = "ARM Versatile AB";
@@ -21,6 +20,7 @@
};
memory {
+ device_type = "memory";
reg = <0x0 0x08000000>;
};
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index a9569d15de41..d3963e9eaf48 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -133,7 +133,7 @@
mmci@50000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
- interrupts = <9 10>;
+ interrupts = <9>, <10>;
cd-gpios = <&v2m_mmc_gpios 0 0>;
wp-gpios = <&v2m_mmc_gpios 1 0>;
max-frequency = <12000000>;
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index fd42e1194179..798c97aff7fa 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -133,7 +133,7 @@
mmci@5000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x05000 0x1000>;
- interrupts = <9 10>;
+ interrupts = <9>, <10>;
cd-gpios = <&v2m_mmc_gpios 0 0>;
wp-gpios = <&v2m_mmc_gpios 1 0>;
max-frequency = <12000000>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index a2ccacd07f4f..00cd9f5bef2e 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -42,6 +42,7 @@
cci-control-port = <&cci_control1>;
cpu-idle-states = <&CLUSTER_SLEEP_BIG>;
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <990>;
};
cpu1: cpu@1 {
@@ -51,6 +52,7 @@
cci-control-port = <&cci_control1>;
cpu-idle-states = <&CLUSTER_SLEEP_BIG>;
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <990>;
};
cpu2: cpu@2 {
@@ -60,6 +62,7 @@
cci-control-port = <&cci_control2>;
cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
capacity-dmips-mhz = <516>;
+ dynamic-power-coefficient = <133>;
};
cpu3: cpu@3 {
@@ -69,6 +72,7 @@
cci-control-port = <&cci_control2>;
cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
capacity-dmips-mhz = <516>;
+ dynamic-power-coefficient = <133>;
};
cpu4: cpu@4 {
@@ -78,6 +82,7 @@
cci-control-port = <&cci_control2>;
cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
capacity-dmips-mhz = <516>;
+ dynamic-power-coefficient = <133>;
};
idle-states {
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts
index 689c8930dce3..3fa0cbe456db 100644
--- a/arch/arm/boot/dts/vf610-bk4.dts
+++ b/arch/arm/boot/dts/vf610-bk4.dts
@@ -60,6 +60,29 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ spi-gpio {
+ compatible = "spi-gpio";
+ pinctrl-0 = <&pinctrl_gpio_spi>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* PTD12 ->RPIO[91] */
+ sck-gpios = <&gpio2 27 GPIO_ACTIVE_LOW>;
+ /* PTD10 ->RPIO[89] */
+ miso-gpios = <&gpio2 25 GPIO_ACTIVE_HIGH>;
+ num-chipselects = <0>;
+
+ gpio@0 {
+ compatible = "pisosr-gpio";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* PTB18 -> RGPIO[40] */
+ load-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ spi-max-frequency = <100000>;
+ };
+ };
};
&adc0 {
@@ -110,11 +133,11 @@
bus-num = <3>;
status = "okay";
spi-slave;
+ #address-cells = <0>;
- slave@0 {
+ slave {
compatible = "lwn,bk4";
spi-max-frequency = <30000000>;
- reg = <0>;
};
};
@@ -431,6 +454,14 @@
>;
};
+ pinctrl_gpio_spi: pinctrl-gpio-spi {
+ fsl,pins = <
+ VF610_PAD_PTB18__GPIO_40 0x1183
+ VF610_PAD_PTD10__GPIO_89 0x1183
+ VF610_PAD_PTD12__GPIO_91 0x1183
+ >;
+ };
+
pinctrl_i2c2: i2c2grp {
fsl,pins = <
VF610_PAD_PTA22__I2C2_SCL 0x34df
diff --git a/arch/arm/boot/dts/vf610-zii-cfu1.dts b/arch/arm/boot/dts/vf610-zii-cfu1.dts
index 7cdcc5fe8282..445c7dc306b2 100644
--- a/arch/arm/boot/dts/vf610-zii-cfu1.dts
+++ b/arch/arm/boot/dts/vf610-zii-cfu1.dts
@@ -207,7 +207,7 @@
};
&i2c0 {
- clock-frequency = <100000>;
+ clock-frequency = <400000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
status = "okay";
diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
new file mode 100644
index 000000000000..2b10672fadbd
--- /dev/null
+++ b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+/*
+ * Device tree file for ZII's SSMB DTU board
+ *
+ * SSMB - SPU3 Switch Management Board
+ * DTU - Digital Tapping Unit
+ *
+ * Copyright (C) 2015-2019 Zodiac Inflight Innovations
+ *
+ * Based on an original 'vf610-twr.dts' which is Copyright 2015,
+ * Freescale Semiconductor, Inc.
+ */
+
+/dts-v1/;
+#include "vf610.dtsi"
+
+/ {
+ model = "ZII VF610 SSMB DTU Board";
+ compatible = "zii,vf610dtu", "zii,vf610dev", "fsl,vf610";
+
+ chosen {
+ stdout-path = &uart0;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&pinctrl_leds_debug>;
+ pinctrl-names = "default";
+
+ led-debug {
+ label = "zii:green:debug1";
+ gpios = <&gpio2 18 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ max-brightness = <1>;
+ };
+ };
+
+ reg_vcc_3v3_mcu: regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_mcu";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&adc0 {
+ vref-supply = <&reg_vcc_3v3_mcu>;
+ status = "okay";
+};
+
+&adc1 {
+ vref-supply = <&reg_vcc_3v3_mcu>;
+ status = "okay";
+};
+
+&edma0 {
+ status = "okay";
+};
+
+&edma1 {
+ status = "okay";
+};
+
+&esdhc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc0>;
+ bus-width = <8>;
+ non-removable;
+ no-1-8-v;
+ keep-power-in-suspend;
+ status = "okay";
+};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&fec1 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ status = "okay";
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+
+ mdio1: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ switch0: switch0@0 {
+ compatible = "marvell,mv88e6190";
+ pinctrl-0 = <&pinctrl_gpio_switch0>;
+ pinctrl-names = "default";
+ reg = <0>;
+ eeprom-length = <65536>;
+ reset-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&fec1>;
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "eth_cu_100_3";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "eth_cu_1000_4";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "eth_cu_1000_5";
+ };
+
+ port@8 {
+ reg = <8>;
+ label = "eth_cu_1000_1";
+ };
+
+ port@9 {
+ reg = <9>;
+ label = "eth_cu_1000_2";
+ phy-handle = <&phy9>;
+ phy-mode = "sgmii";
+ managed = "in-band-status";
+ };
+ };
+
+ mdio1 {
+ compatible = "marvell,mv88e6xxx-mdio-external";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy9: phy9@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ pinctrl-0 = <&pinctrl_gpio_phy9>;
+ pinctrl-names = "default";
+ interrupt-parent = <&gpio2>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0>;
+ };
+ };
+ };
+ };
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c0>;
+ status = "okay";
+
+ gpio6: gpio-expander@22 {
+ compatible = "nxp,pca9554";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ /* On SSMB */
+ temperature-sensor@48 {
+ compatible = "national,lm75";
+ reg = <0x48>;
+ };
+
+ /* On DSB */
+ temperature-sensor@4d {
+ compatible = "national,lm75";
+ reg = <0x4d>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c04";
+ reg = <0x50>;
+ label = "nameplate";
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c04";
+ reg = <0x52>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_dspi1: dspi1grp {
+ fsl,pins = <
+ VF610_PAD_PTD5__DSPI1_CS0 0x1182
+ VF610_PAD_PTD4__DSPI1_CS1 0x1182
+ VF610_PAD_PTC6__DSPI1_SIN 0x1181
+ VF610_PAD_PTC7__DSPI1_SOUT 0x1182
+ VF610_PAD_PTC8__DSPI1_SCK 0x1182
+ >;
+ };
+
+ pinctrl_esdhc0: esdhc0grp {
+ fsl,pins = <
+ VF610_PAD_PTC0__ESDHC0_CLK 0x31ef
+ VF610_PAD_PTC1__ESDHC0_CMD 0x31ef
+ VF610_PAD_PTC2__ESDHC0_DAT0 0x31ef
+ VF610_PAD_PTC3__ESDHC0_DAT1 0x31ef
+ VF610_PAD_PTC4__ESDHC0_DAT2 0x31ef
+ VF610_PAD_PTC5__ESDHC0_DAT3 0x31ef
+ VF610_PAD_PTD23__ESDHC0_DAT4 0x31ef
+ VF610_PAD_PTD22__ESDHC0_DAT5 0x31ef
+ VF610_PAD_PTD21__ESDHC0_DAT6 0x31ef
+ VF610_PAD_PTD20__ESDHC0_DAT7 0x31ef
+ >;
+ };
+
+ pinctrl_esdhc1: esdhc1grp {
+ fsl,pins = <
+ VF610_PAD_PTA24__ESDHC1_CLK 0x31ef
+ VF610_PAD_PTA25__ESDHC1_CMD 0x31ef
+ VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef
+ VF610_PAD_PTA27__ESDHC1_DAT1 0x31ef
+ VF610_PAD_PTA28__ESDHC1_DATA2 0x31ef
+ VF610_PAD_PTA29__ESDHC1_DAT3 0x31ef
+ >;
+ };
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ VF610_PAD_PTA6__RMII_CLKIN 0x30d1
+ VF610_PAD_PTC9__ENET_RMII1_MDC 0x30d2
+ VF610_PAD_PTC10__ENET_RMII1_MDIO 0x30d3
+ VF610_PAD_PTC11__ENET_RMII1_CRS 0x30d1
+ VF610_PAD_PTC12__ENET_RMII1_RXD1 0x30d1
+ VF610_PAD_PTC13__ENET_RMII1_RXD0 0x30d1
+ VF610_PAD_PTC14__ENET_RMII1_RXER 0x30d1
+ VF610_PAD_PTC15__ENET_RMII1_TXD1 0x30d2
+ VF610_PAD_PTC16__ENET_RMII1_TXD0 0x30d2
+ VF610_PAD_PTC17__ENET_RMII1_TXEN 0x30d2
+ >;
+ };
+
+ pinctrl_gpio_phy9: pinctrl-gpio-phy9 {
+ fsl,pins = <
+ VF610_PAD_PTB24__GPIO_94 0x219d
+ >;
+ };
+
+ pinctrl_gpio_switch0: pinctrl-gpio-switch0 {
+ fsl,pins = <
+ VF610_PAD_PTE2__GPIO_107 0x31c2
+ VF610_PAD_PTB28__GPIO_98 0x219d
+ >;
+ };
+
+ pinctrl_i2c0: i2c0grp {
+ fsl,pins = <
+ VF610_PAD_PTB14__I2C0_SCL 0x37ff
+ VF610_PAD_PTB15__I2C0_SDA 0x37ff
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ VF610_PAD_PTB16__I2C1_SCL 0x37ff
+ VF610_PAD_PTB17__I2C1_SDA 0x37ff
+ >;
+ };
+
+ pinctrl_leds_debug: pinctrl-leds-debug {
+ fsl,pins = <
+ VF610_PAD_PTD3__GPIO_82 0x31c2
+ >;
+ };
+
+ pinctrl_uart0: uart0grp {
+ fsl,pins = <
+ VF610_PAD_PTB10__UART0_TX 0x21a2
+ VF610_PAD_PTB11__UART0_RX 0x21a1
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
index 757af56e8ee7..0d9fe5ac83a3 100644
--- a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
+++ b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
@@ -99,6 +99,8 @@
non-removable;
no-1-8-v;
keep-power-in-suspend;
+ no-sdio;
+ no-sd;
status = "okay";
};
@@ -106,6 +108,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
bus-width = <4>;
+ no-sdio;
status = "okay";
};
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index 1929ad390d88..8b5af039b072 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -6,9 +6,9 @@
* Licensed under GPLv2 or later
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "via,vt8500";
cpus {
@@ -21,6 +21,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
aliases {
serial0 = &uart0;
serial1 = &uart1;
diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi
index e9ef539e13d3..cca6747304c4 100644
--- a/arch/arm/boot/dts/wm8505.dtsi
+++ b/arch/arm/boot/dts/wm8505.dtsi
@@ -6,9 +6,9 @@
* Licensed under GPLv2 or later
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "wm,wm8505";
cpus {
@@ -21,6 +21,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
aliases {
serial0 = &uart0;
serial1 = &uart1;
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index e12213d16693..00d01769a68f 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -6,9 +6,9 @@
* Licensed under GPLv2 or later
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "wm,wm8650";
cpus {
@@ -21,6 +21,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
aliases {
serial0 = &uart0;
serial1 = &uart1;
diff --git a/arch/arm/boot/dts/wm8750.dtsi b/arch/arm/boot/dts/wm8750.dtsi
index 46d076d7302b..54d8f7d9bb33 100644
--- a/arch/arm/boot/dts/wm8750.dtsi
+++ b/arch/arm/boot/dts/wm8750.dtsi
@@ -6,9 +6,9 @@
* Licensed under GPLv2 or later
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "wm,wm8750";
cpus {
@@ -21,6 +21,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
aliases {
serial0 = &uart0;
serial1 = &uart1;
diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi
index 8fbccfbe75f3..c572d777077f 100644
--- a/arch/arm/boot/dts/wm8850.dtsi
+++ b/arch/arm/boot/dts/wm8850.dtsi
@@ -6,9 +6,9 @@
* Licensed under GPLv2 or later
*/
-/include/ "skeleton.dtsi"
-
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "wm,wm8850";
cpus {
@@ -22,6 +22,11 @@
};
};
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>;
+ };
+
aliases {
serial0 = &uart0;
serial1 = &uart1;
diff --git a/arch/arm/boot/dts/zx296702-ad1.dts b/arch/arm/boot/dts/zx296702-ad1.dts
index eedd3fcbc002..bd9400840023 100644
--- a/arch/arm/boot/dts/zx296702-ad1.dts
+++ b/arch/arm/boot/dts/zx296702-ad1.dts
@@ -14,6 +14,7 @@
};
memory {
+ device_type = "memory";
reg = <0x50000000 0x20000000>;
};
};
diff --git a/arch/arm/boot/dts/zx296702.dtsi b/arch/arm/boot/dts/zx296702.dtsi
index 240e7a23d81f..afd98de029be 100644
--- a/arch/arm/boot/dts/zx296702.dtsi
+++ b/arch/arm/boot/dts/zx296702.dtsi
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-#include "skeleton.dtsi"
#include <dt-bindings/clock/zx296702-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/zynq-zturn.dts b/arch/arm/boot/dts/zynq-zturn.dts
index b38704657960..5ec616ebca08 100644
--- a/arch/arm/boot/dts/zynq-zturn.dts
+++ b/arch/arm/boot/dts/zynq-zturn.dts
@@ -54,7 +54,7 @@
label = "K1";
gpios = <&gpio0 0x32 0x1>;
linux,code = <0x66>;
- gpio-key,wakeup;
+ wakeup-source;
autorepeat;
};
};
diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig
index 8e17e7ed1f02..53864316bee1 100644
--- a/arch/arm/configs/axm55xx_defconfig
+++ b/arch/arm/configs/axm55xx_defconfig
@@ -155,10 +155,6 @@ CONFIG_PMBUS=y
CONFIG_SENSORS_LTC2978=y
CONFIG_WATCHDOG=y
CONFIG_ARM_SP805_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_HID_A4TECH=y
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index bb6a35fb1dd7..bd55a2be51fc 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -91,6 +91,8 @@ CONFIG_THERMAL=y
CONFIG_BCM2835_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_BCM2835_WDT=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_DRM=y
CONFIG_DRM_VC4=y
CONFIG_FB_SIMPLE=y
@@ -129,6 +131,7 @@ CONFIG_DMADEVICES=y
CONFIG_DMA_BCM2835=y
CONFIG_STAGING=y
CONFIG_SND_BCM2835=m
+CONFIG_VIDEO_BCM2835=m
CONFIG_MAILBOX=y
CONFIG_BCM2835_MBOX=y
# CONFIG_IOMMU_SUPPORT is not set
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 69cb8f1efcea..747550c7af2f 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -27,6 +27,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPUFREQ_DT=y
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -53,13 +54,17 @@ CONFIG_E100=y
CONFIG_SMC91X=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_ARMCLCD=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_VGA_CONSOLE is not set
+CONFIG_LOGO=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index 23df2518203d..e3d5e15d66d1 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -1,5 +1,6 @@
CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-"
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
@@ -17,22 +18,20 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
-CONFIG_ARM_SINGLE_ARMV7M=y
CONFIG_ARCH_LPC18XX=y
CONFIG_SET_MEM_PARAM=y
CONFIG_DRAM_BASE=0x28000000
CONFIG_DRAM_SIZE=0x02000000
CONFIG_FLASH_MEM_BASE=0x1b000000
CONFIG_FLASH_SIZE=0x00080000
-CONFIG_PREEMPT=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
CONFIG_BINFMT_SHARED_FLAT=y
@@ -69,7 +68,6 @@ CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_FARADAY is not set
@@ -90,7 +88,6 @@ CONFIG_STMMAC_ETH=y
CONFIG_SMSC_PHY=y
# CONFIG_USB_NET_DRIVERS is not set
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -101,13 +98,11 @@ CONFIG_KEYBOARD_GPIO_POLLED=y
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
CONFIG_I2C_LPC2K=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
@@ -121,8 +116,10 @@ CONFIG_WATCHDOG=y
CONFIG_LPC18XX_WATCHDOG=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
+CONFIG_DRM=y
+CONFIG_DRM_PL111=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
@@ -144,15 +141,14 @@ CONFIG_AMBA_PL08X=y
CONFIG_LPC18XX_DMAMUX=y
CONFIG_MEMORY=y
CONFIG_ARM_PL172_MPMC=y
-CONFIG_PWM=y
-CONFIG_PWM_LPC18XX_SCT=y
CONFIG_IIO=y
CONFIG_MMA7455_I2C=y
CONFIG_LPC18XX_ADC=y
CONFIG_LPC18XX_DAC=y
CONFIG_IIO_SYSFS_TRIGGER=y
+CONFIG_PWM=y
+CONFIG_PWM_LPC18XX_SCT=y
CONFIG_PHY_LPC18XX_USB_OTG=y
-CONFIG_NVMEM=y
CONFIG_NVMEM_LPC18XX_EEPROM=y
CONFIG_EXT2_FS=y
# CONFIG_FILE_LOCKING is not set
@@ -160,9 +156,10 @@ CONFIG_EXT2_FS=y
# CONFIG_INOTIFY_USER is not set
CONFIG_JFFS2_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
@@ -170,5 +167,3 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
-CONFIG_CRC_ITU_T=y
-CONFIG_CRC7=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 0b54b4024e51..e752fb704df0 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -1,6 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
@@ -11,13 +12,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
-CONFIG_JUMP_LABEL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_LPC32XX=y
-CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
@@ -26,6 +21,11 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="console=ttyS0,115200n81 root=/dev/ram0"
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -56,6 +56,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_SRAM=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_SCSI=y
@@ -75,9 +76,6 @@ CONFIG_LPC_ENET=y
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_SMSC_PHY=y
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -92,48 +90,39 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_HS_LPC32XX=y
+CONFIG_SERIAL_HS_LPC32XX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PNX=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_EM=y
CONFIG_GPIO_GENERIC_PLATFORM=y
-CONFIG_GPIO_PL061=y
-CONFIG_GPIO_ADP5588=y
-CONFIG_GPIO_ADNP=y
-CONFIG_GPIO_MAX7300=y
-CONFIG_GPIO_MAX732X=y
CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_74X164=y
-CONFIG_GPIO_MAX7301=y
-CONFIG_GPIO_MC33880=y
-CONFIG_PINCTRL_MCP23S08=y
-CONFIG_PINCTRL_SX150X=y
CONFIG_SENSORS_DS620=y
CONFIG_SENSORS_MAX6639=y
CONFIG_WATCHDOG=y
CONFIG_PNX4008_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PL111=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_DEBUG=y
CONFIG_SND_DEBUG_VERBOSE=y
+CONFIG_SND_SEQUENCER=y
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_ARM is not set
# CONFIG_SND_SPI is not set
@@ -146,7 +135,6 @@ CONFIG_USB_LPC32XX=y
CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SPI=y
CONFIG_NEW_LEDS=y
@@ -169,10 +157,10 @@ CONFIG_RTC_DRV_LPC32XX=y
CONFIG_DMADEVICES=y
CONFIG_AMBA_PL08X=y
CONFIG_STAGING=y
-CONFIG_LPC32XX_ADC=y
CONFIG_MEMORY=y
CONFIG_ARM_PL172_MPMC=y
CONFIG_IIO=y
+CONFIG_LPC32XX_ADC=y
CONFIG_MAX517=y
CONFIG_PWM=y
CONFIG_PWM_LPC32XX=y
@@ -186,18 +174,27 @@ CONFIG_JFFS2_FS_WBUF_VERIFY=y
CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=5
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
-# CONFIG_ARM_UNWIND is not set
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig
new file mode 100644
index 000000000000..7c07f9893a0f
--- /dev/null
+++ b/arch/arm/configs/milbeaut_m10v_defconfig
@@ -0,0 +1,119 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_ARCH_MILBEAUT=y
+CONFIG_ARCH_MILBEAUT_M10V=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_VDSO is not set
+# CONFIG_CACHE_L2X0 is not set
+CONFIG_ARM_ERRATA_430973=y
+CONFIG_ARM_ERRATA_720789=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_ARM_ERRATA_754327=y
+CONFIG_ARM_ERRATA_764369=y
+CONFIG_ARM_ERRATA_775420=y
+CONFIG_ARM_ERRATA_798181=y
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_HAVE_ARM_ARCH_TIMER=y
+CONFIG_NR_CPUS=16
+CONFIG_THUMB2_KERNEL=y
+# CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11 is not set
+# CONFIG_ARM_PATCH_IDIV is not set
+CONFIG_HIGHMEM=y
+CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_EFI=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_QORIQ_CPUFREQ=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=m
+CONFIG_CRYPTO_SHA1_ARM_CE=m
+CONFIG_CRYPTO_SHA2_ARM_CE=m
+CONFIG_CRYPTO_SHA512_ARM=m
+CONFIG_CRYPTO_AES_ARM=m
+CONFIG_CRYPTO_AES_ARM_BS=m
+CONFIG_CRYPTO_AES_ARM_CE=m
+CONFIG_CRYPTO_GHASH_ARM_CE=m
+CONFIG_CRYPTO_CRC32_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMDLINE_PARTITION=y
+CONFIG_CMA=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_OF_OVERLAY=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_SRAM=y
+CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_INPUT_MATRIXKMAP=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_LIBPS2=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_DEV_BUS=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_SYNC_FILE=y
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_SOC_BRCMSTB=y
+CONFIG_MEMORY=y
+# CONFIG_ARM_PMU is not set
+CONFIG_EXT4_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SEQIV=m
+# CONFIG_CRYPTO_ECHAINIV is not set
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_ITU_T=m
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 5bee34a7ff2e..c75051b9392c 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -54,6 +54,8 @@ CONFIG_SOC_VF610=y
CONFIG_ARCH_KEYSTONE=y
CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MILBEAUT=y
+CONFIG_ARCH_MILBEAUT_M10V=y
CONFIG_ARCH_MVEBU=y
CONFIG_MACH_ARMADA_370=y
CONFIG_MACH_ARMADA_375=y
@@ -76,6 +78,7 @@ CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_EMEV2=y
CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R7S9210=y
CONFIG_ARCH_R8A73A4=y
CONFIG_ARCH_R8A7740=y
CONFIG_ARCH_R8A7743=y
@@ -404,6 +407,7 @@ CONFIG_SPI_XILINX=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_PINCTRL_AS3722=y
+CONFIG_PINCTRL_RZA2=y
CONFIG_PINCTRL_PALMAS=y
CONFIG_PINCTRL_APQ8064=y
CONFIG_PINCTRL_APQ8084=y
@@ -631,6 +635,7 @@ CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
CONFIG_DRM_DUMB_VGA_DAC=m
CONFIG_DRM_NXP_PTN3460=m
CONFIG_DRM_PARADE_PS8622=m
+CONFIG_DRM_SII902X=m
CONFIG_DRM_SII9234=m
CONFIG_DRM_TOSHIBA_TC358764=m
CONFIG_DRM_I2C_ADV7511=m
@@ -641,7 +646,7 @@ CONFIG_DRM_STM_DSI=m
CONFIG_DRM_VC4=m
CONFIG_DRM_ETNAVIV=m
CONFIG_DRM_MXSFB=m
-CONFIG_FB_ARMCLCD=y
+CONFIG_DRM_PL111=m
CONFIG_FB_EFI=y
CONFIG_FB_WM8505=y
CONFIG_FB_SH_MOBILE_LCDC=y
@@ -826,6 +831,7 @@ CONFIG_RTC_DRV_MAX8997=m
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
CONFIG_RTC_DRV_RS5C372=m
+CONFIG_RTC_DRV_PCF85363=m
CONFIG_RTC_DRV_BQ32K=m
CONFIG_RTC_DRV_TWL4030=y
CONFIG_RTC_DRV_PALMAS=y
@@ -1028,3 +1034,5 @@ CONFIG_CRYPTO_AES_ARM_CE=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_CRC32_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_GCC_PLUGINS=y
+CONFIG_GCC_PLUGIN_STRUCTLEAK=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 0ac44acd5bc4..5f4c6aaa07f6 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -3,6 +3,7 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -10,16 +11,16 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_NOMADIK=y
CONFIG_MACH_NOMADIK_8815NHK=y
-CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -49,12 +50,12 @@ CONFIG_MTD=y
CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSMC=y
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
CONFIG_MTD_ONENAND_GENERIC=y
+CONFIG_MTD_NAND_ECC_SMC=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -75,7 +76,6 @@ CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -88,13 +88,22 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_GPIO=y
-CONFIG_DEBUG_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
CONFIG_GPIO_STMPE=y
# CONFIG_HWMON is not set
CONFIG_MFD_STMPE=y
+CONFIG_MFD_STW481X=y
CONFIG_REGULATOR=y
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_TPO_TPG110=y
+CONFIG_DRM_PL111=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -105,6 +114,11 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PL031=y
CONFIG_DMADEVICES=y
CONFIG_AMBA_PL08X=y
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_ST_ACCEL_3AXIS=y
+CONFIG_PWM=y
+CONFIG_PWM_STMPE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_FUSE_FS=y
@@ -121,13 +135,12 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_DES=y
CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 9c6f436d1b12..3f03ec6d2644 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -25,16 +25,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MULTI_V6=y
CONFIG_POWER_AVS_OMAP=y
CONFIG_POWER_AVS_OMAP_CLASS3=y
@@ -48,15 +38,8 @@ CONFIG_SOC_AM43XX=y
CONFIG_SOC_DRA7XX=y
CONFIG_ARM_THUMBEE=y
CONFIG_ARM_ERRATA_411920=y
-CONFIG_PCI=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_DRA7XX_EP=y
-CONFIG_PCI_ENDPOINT=y
-CONFIG_PCI_ENDPOINT_CONFIGFS=y
-CONFIG_PCI_EPF_TEST=m
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
@@ -74,8 +57,27 @@ CONFIG_CPUFREQ_DT=m
CONFIG_ARM_TI_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_KERNEL_MODE_NEON=y
-CONFIG_BINFMT_MISC=y
CONFIG_PM_DEBUG=y
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=m
+CONFIG_CRYPTO_SHA256_ARM=m
+CONFIG_CRYPTO_SHA512_ARM=m
+CONFIG_CRYPTO_AES_ARM=m
+CONFIG_CRYPTO_AES_ARM_BS=m
+CONFIG_CRYPTO_GHASH_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=y
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -118,6 +120,12 @@ CONFIG_AF_RXRPC=m
CONFIG_RXKAD=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_DRA7XX_EP=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_PCI_ENDPOINT_CONFIGFS=y
+CONFIG_PCI_EPF_TEST=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
@@ -132,13 +140,13 @@ CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=m
+CONFIG_MTD_ONENAND=y
+CONFIG_MTD_ONENAND_VERIFY_WRITE=y
+CONFIG_MTD_ONENAND_OMAP2=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ECC_BCH=y
CONFIG_MTD_NAND_OMAP2=y
CONFIG_MTD_NAND_OMAP_BCH=y
-CONFIG_MTD_ONENAND=y
-CONFIG_MTD_ONENAND_VERIFY_WRITE=y
-CONFIG_MTD_ONENAND_OMAP2=y
CONFIG_MTD_SPI_NOR=m
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
@@ -155,7 +163,6 @@ CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_AHCI_DM816=m
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
CONFIG_DM9000=y
@@ -283,11 +290,9 @@ CONFIG_HWMON=m
CONFIG_SENSORS_GPIO_FAN=m
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_TMP102=m
-CONFIG_THERMAL=m
CONFIG_THERMAL_GOV_FAIR_SHARE=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_CPU_THERMAL=y
-CONFIG_TI_SOC_THERMAL=m
CONFIG_TI_THERMAL=y
CONFIG_OMAP4_THERMAL=y
CONFIG_OMAP5_THERMAL=y
@@ -381,13 +386,12 @@ CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
-CONFIG_SND_SOC_TLV320AIC3X=m
CONFIG_SND_SOC_DAVINCI_MCASP=m
CONFIG_SND_SOC_NOKIA_RX51=m
-CONFIG_SND_SOC_OMAP_HDMI=m
-CONFIG_SND_SOC_OMAP_ABE_TWL6040=m
CONFIG_SND_SOC_OMAP3_PANDORA=m
CONFIG_SND_SOC_OMAP3_TWL4030=m
+CONFIG_SND_SOC_OMAP_ABE_TWL6040=m
+CONFIG_SND_SOC_OMAP_HDMI=m
CONFIG_SND_SOC_CPCAP=m
CONFIG_SND_SOC_TLV320AIC23_I2C=m
CONFIG_SND_SIMPLE_CARD=m
@@ -475,8 +479,6 @@ CONFIG_RTC_DRV_PALMAS=m
CONFIG_RTC_DRV_OMAP=m
CONFIG_RTC_DRV_CPCAP=m
CONFIG_DMADEVICES=y
-CONFIG_DMA_OMAP=y
-CONFIG_TI_EDMA=y
CONFIG_OMAP_IOMMU=y
CONFIG_REMOTEPROC=m
CONFIG_OMAP_REMOTEPROC=m
@@ -531,24 +533,8 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_SPLIT=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_SCHEDSTATS=y
-CONFIG_PROVE_LOCKING=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_SECURITY=y
CONFIG_CRYPTO_MICHAEL_MIC=y
-CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM_NEON=m
-CONFIG_CRYPTO_SHA256_ARM=m
-CONFIG_CRYPTO_SHA512_ARM=m
-CONFIG_CRYPTO_AES_ARM=m
-CONFIG_CRYPTO_AES_ARM_BS=m
-CONFIG_CRYPTO_GHASH_ARM_CE=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=y
@@ -557,3 +543,10 @@ CONFIG_LIBCRC32C=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_SPLIT=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 6bb506edb1f5..d4654755b09c 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -66,9 +66,6 @@ CONFIG_MACH_MIOA701=y
CONFIG_PXA_EZX=y
CONFIG_MACH_MP900C=y
CONFIG_ARCH_PXA_PALM=y
-CONFIG_MACH_RAUMFELD_RC=y
-CONFIG_MACH_RAUMFELD_CONNECTOR=y
-CONFIG_MACH_RAUMFELD_SPEAKER=y
CONFIG_PXA_SHARPSL=y
CONFIG_MACH_POODLE=y
CONFIG_MACH_CORGI=y
@@ -482,7 +479,6 @@ CONFIG_SND_PCM_OSS=m
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
-CONFIG_SND_PXA2XX_AC97=m
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
CONFIG_SND_ATMEL_SOC=m
@@ -498,7 +494,6 @@ CONFIG_SND_PXA2XX_SOC_E800=m
CONFIG_SND_PXA2XX_SOC_EM_X270=m
CONFIG_SND_PXA2XX_SOC_PALM27X=y
CONFIG_SND_SOC_ZYLONITE=m
-CONFIG_SND_SOC_RAUMFELD=m
CONFIG_SND_PXA2XX_SOC_HX4700=m
CONFIG_SND_PXA2XX_SOC_MAGICIAN=m
CONFIG_SND_PXA2XX_SOC_MIOA701=m
diff --git a/arch/arm/configs/raumfeld_defconfig b/arch/arm/configs/raumfeld_defconfig
deleted file mode 100644
index 2dd56e9a484e..000000000000
--- a/arch/arm/configs/raumfeld_defconfig
+++ /dev/null
@@ -1,197 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_PXA=y
-CONFIG_MACH_RAUMFELD_RC=y
-CONFIG_MACH_RAUMFELD_CONNECTOR=y
-CONFIG_MACH_RAUMFELD_SPEAKER=y
-CONFIG_NO_HZ=y
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_CMDLINE="console=ttyS0,115200 rw"
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_IDLE=y
-CONFIG_PM=y
-CONFIG_APM_EMULATION=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_SYN_COOKIES=y
-CONFIG_IPV6=y
-CONFIG_CFG80211=y
-CONFIG_MAC80211=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_NFTL=y
-CONFIG_NFTL_RW=y
-CONFIG_MTD_BLOCK2MTD=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_MARVELL=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_ISL29003=y
-CONFIG_IIO=y
-CONFIG_AD5446=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMSC911X=y
-CONFIG_LIBERTAS=y
-CONFIG_LIBERTAS_SDIO=m
-CONFIG_USB_USBNET=y
-# CONFIG_USB_NET_AX8817X is not set
-# CONFIG_USB_NET_NET1080 is not set
-CONFIG_USB_NET_MCS7830=y
-# CONFIG_USB_NET_CDC_SUBSET is not set
-# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_EETI=m
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_GPIO_ROTARY_ENCODER=y
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_PXA=y
-CONFIG_SPI=y
-CONFIG_SPI_DEBUG=y
-CONFIG_SPI_GPIO=y
-CONFIG_SPI_SPIDEV=y
-CONFIG_DEBUG_GPIO=y
-CONFIG_W1_MASTER_GPIO=m
-CONFIG_POWER_SUPPLY=y
-CONFIG_PDA_POWER=y
-CONFIG_BATTERY_DS2760=m
-CONFIG_SENSORS_LIS3_SPI=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_DEBUG=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_REGULATOR_MAX8660=y
-CONFIG_FB=y
-CONFIG_FB_PXA=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_BACKLIGHT_GENERIC is not set
-CONFIG_BACKLIGHT_PWM=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_PXA2XX_SOC=y
-CONFIG_SND_SOC_RAUMFELD=y
-CONFIG_HID_DRAGONRISE=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_TWINHAN=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_GREENASIA=y
-CONFIG_HID_SMARTJOYPLUS=y
-CONFIG_HID_TOPSEED=y
-CONFIG_HID_THRUSTMASTER=y
-CONFIG_HID_ZEROPLUS=y
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_MMC=y
-CONFIG_MMC_PXA=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_LT3593=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_BACKLIGHT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_PXA=y
-CONFIG_DMADEVICES=y
-CONFIG_UIO=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_FSCACHE=y
-CONFIG_FSCACHE_STATS=y
-CONFIG_CACHEFILES=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_UBIFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFS_FSCACHE=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=y
-CONFIG_NLS_CODEPAGE_775=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_CODEPAGE_852=y
-CONFIG_NLS_CODEPAGE_855=y
-CONFIG_NLS_CODEPAGE_857=y
-CONFIG_NLS_CODEPAGE_860=y
-CONFIG_NLS_CODEPAGE_861=y
-CONFIG_NLS_CODEPAGE_862=y
-CONFIG_NLS_CODEPAGE_863=y
-CONFIG_NLS_CODEPAGE_864=y
-CONFIG_NLS_CODEPAGE_865=y
-CONFIG_NLS_CODEPAGE_866=y
-CONFIG_NLS_CODEPAGE_869=y
-CONFIG_NLS_CODEPAGE_936=y
-CONFIG_NLS_CODEPAGE_950=y
-CONFIG_NLS_CODEPAGE_932=y
-CONFIG_NLS_CODEPAGE_949=y
-CONFIG_NLS_CODEPAGE_874=y
-CONFIG_NLS_ISO8859_8=y
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_CODEPAGE_1251=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=y
-CONFIG_NLS_ISO8859_3=y
-CONFIG_NLS_ISO8859_4=y
-CONFIG_NLS_ISO8859_5=y
-CONFIG_NLS_ISO8859_6=y
-CONFIG_NLS_ISO8859_7=y
-CONFIG_NLS_ISO8859_9=y
-CONFIG_NLS_ISO8859_13=y
-CONFIG_NLS_ISO8859_14=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_KOI8_R=y
-CONFIG_NLS_KOI8_U=y
-CONFIG_NLS_UTF8=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
index a077597369f1..fd4f28aabda6 100644
--- a/arch/arm/configs/s5pv210_defconfig
+++ b/arch/arm/configs/s5pv210_defconfig
@@ -1,24 +1,30 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_CGROUPS=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_ARCH_S5PV210=y
CONFIG_VMSPLIT_2G=y
-CONFIG_PREEMPT=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -27,6 +33,11 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_BCM=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -44,21 +55,35 @@ CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PWM_VIBRA=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
CONFIG_HW_RANDOM=y
CONFIG_I2C_GPIO=y
+CONFIG_I2C_S3C2410=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_MAX17040=y
# CONFIG_HWMON is not set
CONFIG_MFD_MAX8998=y
CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_MAX8998=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
CONFIG_DRM=y
CONFIG_DRM_EXYNOS=y
CONFIG_DRM_EXYNOS_FIMD=y
CONFIG_DRM_EXYNOS_DPI=y
+CONFIG_DRM_EXYNOS_ROTATOR=y
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_EHCI_HCD=y
@@ -72,6 +97,9 @@ CONFIG_MMC_SDHCI_S3C=y
CONFIG_MMC_SDHCI_S3C_DMA=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MAX8998=m
+CONFIG_DMADEVICES=y
+CONFIG_PWM=y
+CONFIG_PWM_SAMSUNG=y
CONFIG_PHY_SAMSUNG_USB2=m
CONFIG_PHY_S5PV210_USB2=y
CONFIG_EXT2_FS=y
@@ -87,6 +115,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_CRC_CCITT=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
@@ -96,7 +125,3 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_DEBUG_S3C_UART1=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 9e5a5ade6cab..9b0efac101ab 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -8,29 +8,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
CONFIG_ARCH_RENESAS=y
-CONFIG_ARCH_EMEV2=y
-CONFIG_ARCH_R7S72100=y
-CONFIG_ARCH_R8A73A4=y
-CONFIG_ARCH_R8A7740=y
-CONFIG_ARCH_R8A7743=y
-CONFIG_ARCH_R8A7744=y
-CONFIG_ARCH_R8A7745=y
-CONFIG_ARCH_R8A77470=y
-CONFIG_ARCH_R8A7778=y
-CONFIG_ARCH_R8A7779=y
-CONFIG_ARCH_R8A7790=y
-CONFIG_ARCH_R8A7791=y
-CONFIG_ARCH_R8A7792=y
-CONFIG_ARCH_R8A7793=y
-CONFIG_ARCH_R8A7794=y
-CONFIG_ARCH_R9A06G032=y
-CONFIG_ARCH_SH73A0=y
CONFIG_PL310_ERRATA_588369=y
CONFIG_ARM_ERRATA_754322=y
-CONFIG_PCI=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_RCAR_GEN2=y
-CONFIG_PCIE_RCAR=y
CONFIG_SMP=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
@@ -58,6 +37,10 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_CAN=y
CONFIG_CAN_RCAR=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_RCAR_GEN2=y
+CONFIG_PCIE_RCAR=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
@@ -91,7 +74,6 @@ CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_8250_EM=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MUX=y
CONFIG_I2C_DEMUX_PINCTRL=y
CONFIG_I2C_EMEV2=y
CONFIG_I2C_GPIO=y
@@ -104,6 +86,7 @@ CONFIG_SPI_RSPI=y
CONFIG_SPI_SH_MSIOF=y
CONFIG_SPI_SH_HSPI=y
CONFIG_PINCTRL_RZA1=y
+CONFIG_PINCTRL_RZA2=y
CONFIG_GPIO_EM=y
CONFIG_GPIO_RCAR=y
CONFIG_GPIO_PCF857X=y
@@ -177,17 +160,35 @@ CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RS5C372=y
+CONFIG_RTC_DRV_PCF85363=y
CONFIG_RTC_DRV_BQ32K=y
CONFIG_RTC_DRV_S35390A=y
CONFIG_RTC_DRV_RX8581=y
CONFIG_RTC_DRV_DA9063=y
CONFIG_DMADEVICES=y
-CONFIG_SH_DMAE=y
CONFIG_RCAR_DMAC=y
CONFIG_RENESAS_USB_DMAC=y
CONFIG_STAGING=y
CONFIG_STAGING_BOARD=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ARCH_EMEV2=y
+CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R7S9210=y
+CONFIG_ARCH_R8A73A4=y
+CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7743=y
+CONFIG_ARCH_R8A7744=y
+CONFIG_ARCH_R8A7745=y
+CONFIG_ARCH_R8A77470=y
+CONFIG_ARCH_R8A7778=y
+CONFIG_ARCH_R8A7779=y
+CONFIG_ARCH_R8A7790=y
+CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7792=y
+CONFIG_ARCH_R8A7793=y
+CONFIG_ARCH_R8A7794=y
+CONFIG_ARCH_R9A06G032=y
+CONFIG_ARCH_SH73A0=y
CONFIG_IIO=y
CONFIG_AK8975=y
CONFIG_PWM=y
@@ -211,4 +212,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_KERNEL=y
-# CONFIG_ARM_UNWIND is not set
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 371fca4e1ab7..08d1b3e11d68 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -9,27 +9,20 @@ CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SOCFPGA=y
CONFIG_ARM_THUMBEE=y
-CONFIG_PCI=y
-CONFIG_PCI_MSI=y
-CONFIG_PCIE_ALTERA=y
-CONFIG_PCIE_ALTERA_MSI=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -48,6 +41,10 @@ CONFIG_CAN=y
CONFIG_CAN_C_CAN=y
CONFIG_CAN_C_CAN_PLATFORM=y
CONFIG_CAN_DEBUG_DEVICES=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_ALTERA=y
+CONFIG_PCIE_ALTERA_MSI=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -60,7 +57,7 @@ CONFIG_MTD_SPI_NOR=y
# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
CONFIG_SPI_CADENCE_QUADSPI=y
CONFIG_OF_OVERLAY=y
-CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -131,12 +128,12 @@ CONFIG_DMADEVICES=y
CONFIG_PL330_DMA=y
CONFIG_DMATEST=m
CONFIG_FPGA=y
-CONFIG_FPGA_REGION=y
CONFIG_FPGA_MGR_SOCFPGA=y
CONFIG_FPGA_MGR_SOCFPGA_A10=y
CONFIG_FPGA_BRIDGE=y
CONFIG_SOCFPGA_FPGA_BRIDGE=y
CONFIG_ALTERA_FREEZE_BRIDGE=y
+CONFIG_FPGA_REGION=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 2c5e8df33191..f1b52fb3461b 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -52,8 +52,10 @@ CONFIG_GPIO_PL061=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_ARM_SP805_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
+CONFIG_DRM=y
+CONFIG_DRM_PL111=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S
index ba8e6a32fdc9..bc53bcaa772e 100644
--- a/arch/arm/crypto/aes-ce-core.S
+++ b/arch/arm/crypto/aes-ce-core.S
@@ -317,25 +317,27 @@ ENTRY(ce_aes_ctr_encrypt)
.Lctrloop:
vmov q0, q6
bl aes_encrypt
- subs r4, r4, #1
- bmi .Lctrtailblock @ blocks < 0 means tail block
- vld1.8 {q3}, [r1]!
- veor q3, q0, q3
- vst1.8 {q3}, [r0]!
adds r6, r6, #1 @ increment BE ctr
rev ip, r6
vmov s27, ip
bcs .Lctrcarry
- teq r4, #0
+
+.Lctrcarrydone:
+ subs r4, r4, #1
+ bmi .Lctrtailblock @ blocks < 0 means tail block
+ vld1.8 {q3}, [r1]!
+ veor q3, q0, q3
+ vst1.8 {q3}, [r0]!
bne .Lctrloop
+
.Lctrout:
- vst1.8 {q6}, [r5]
+ vst1.8 {q6}, [r5] @ return next CTR value
pop {r4-r6, pc}
.Lctrtailblock:
- vst1.8 {q0}, [r0, :64] @ return just the key stream
- pop {r4-r6, pc}
+ vst1.8 {q0}, [r0, :64] @ return the key stream
+ b .Lctrout
.Lctrcarry:
.irp sreg, s26, s25, s24
@@ -344,11 +346,9 @@ ENTRY(ce_aes_ctr_encrypt)
adds ip, ip, #1
rev ip, ip
vmov \sreg, ip
- bcc 0f
+ bcc .Lctrcarrydone
.endr
-0: teq r4, #0
- beq .Lctrout
- b .Lctrloop
+ b .Lctrcarrydone
ENDPROC(ce_aes_ctr_encrypt)
/*
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
index ce45ba0c0687..86be258a803f 100644
--- a/arch/arm/crypto/crct10dif-ce-core.S
+++ b/arch/arm/crypto/crct10dif-ce-core.S
@@ -2,12 +2,14 @@
// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
//
// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+// Copyright (C) 2019 Google LLC <ebiggers@google.com>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
+// Derived from the x86 version:
//
// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
//
@@ -54,19 +56,11 @@
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Function API:
-// UINT16 crc_t10dif_pcl(
-// UINT16 init_crc, //initial CRC value, 16 bits
-// const unsigned char *buf, //buffer pointer to calculate CRC on
-// UINT64 len //buffer length in bytes (64-bit data)
-// );
-//
// Reference paper titled "Fast CRC Computation for Generic
// Polynomials Using PCLMULQDQ Instruction"
// URL: http://www.intel.com/content/dam/www/public/us/en/documents
// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
//
-//
#include <linux/linkage.h>
#include <asm/assembler.h>
@@ -78,13 +72,14 @@
#endif
.text
+ .arch armv7-a
.fpu crypto-neon-fp-armv8
- arg1_low32 .req r0
- arg2 .req r1
- arg3 .req r2
+ init_crc .req r0
+ buf .req r1
+ len .req r2
- qzr .req q13
+ fold_consts_ptr .req ip
q0l .req d0
q0h .req d1
@@ -102,82 +97,35 @@
q6h .req d13
q7l .req d14
q7h .req d15
-
-ENTRY(crc_t10dif_pmull)
- vmov.i8 qzr, #0 // init zero register
-
- // adjust the 16-bit initial_crc value, scale it to 32 bits
- lsl arg1_low32, arg1_low32, #16
-
- // check if smaller than 256
- cmp arg3, #256
-
- // for sizes less than 128, we can't fold 64B at a time...
- blt _less_than_128
-
- // load the initial crc value
- // crc value does not need to be byte-reflected, but it needs
- // to be moved to the high part of the register.
- // because data will be byte-reflected and will align with
- // initial crc at correct place.
- vmov s0, arg1_low32 // initial crc
- vext.8 q10, qzr, q0, #4
-
- // receive the initial 64B data, xor the initial crc value
- vld1.64 {q0-q1}, [arg2, :128]!
- vld1.64 {q2-q3}, [arg2, :128]!
- vld1.64 {q4-q5}, [arg2, :128]!
- vld1.64 {q6-q7}, [arg2, :128]!
-CPU_LE( vrev64.8 q0, q0 )
-CPU_LE( vrev64.8 q1, q1 )
-CPU_LE( vrev64.8 q2, q2 )
-CPU_LE( vrev64.8 q3, q3 )
-CPU_LE( vrev64.8 q4, q4 )
-CPU_LE( vrev64.8 q5, q5 )
-CPU_LE( vrev64.8 q6, q6 )
-CPU_LE( vrev64.8 q7, q7 )
-
- vswp d0, d1
- vswp d2, d3
- vswp d4, d5
- vswp d6, d7
- vswp d8, d9
- vswp d10, d11
- vswp d12, d13
- vswp d14, d15
-
- // XOR the initial_crc value
- veor.8 q0, q0, q10
-
- adr ip, rk3
- vld1.64 {q10}, [ip, :128] // xmm10 has rk3 and rk4
-
- //
- // we subtract 256 instead of 128 to save one instruction from the loop
- //
- sub arg3, arg3, #256
-
- // at this section of the code, there is 64*x+y (0<=y<64) bytes of
- // buffer. The _fold_64_B_loop will fold 64B at a time
- // until we have 64+y Bytes of buffer
-
-
- // fold 64B at a time. This section of the code folds 4 vector
- // registers in parallel
-_fold_64_B_loop:
-
- .macro fold64, reg1, reg2
- vld1.64 {q11-q12}, [arg2, :128]!
-
- vmull.p64 q8, \reg1\()h, d21
- vmull.p64 \reg1, \reg1\()l, d20
- vmull.p64 q9, \reg2\()h, d21
- vmull.p64 \reg2, \reg2\()l, d20
-
-CPU_LE( vrev64.8 q11, q11 )
-CPU_LE( vrev64.8 q12, q12 )
- vswp d22, d23
- vswp d24, d25
+ q8l .req d16
+ q8h .req d17
+ q9l .req d18
+ q9h .req d19
+ q10l .req d20
+ q10h .req d21
+ q11l .req d22
+ q11h .req d23
+ q12l .req d24
+ q12h .req d25
+
+ FOLD_CONSTS .req q10
+ FOLD_CONST_L .req q10l
+ FOLD_CONST_H .req q10h
+
+ // Fold reg1, reg2 into the next 32 data bytes, storing the result back
+ // into reg1, reg2.
+ .macro fold_32_bytes, reg1, reg2
+ vld1.64 {q11-q12}, [buf]!
+
+ vmull.p64 q8, \reg1\()h, FOLD_CONST_H
+ vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L
+ vmull.p64 q9, \reg2\()h, FOLD_CONST_H
+ vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L
+
+CPU_LE( vrev64.8 q11, q11 )
+CPU_LE( vrev64.8 q12, q12 )
+ vswp q11l, q11h
+ vswp q12l, q12h
veor.8 \reg1, \reg1, q8
veor.8 \reg2, \reg2, q9
@@ -185,242 +133,248 @@ CPU_LE( vrev64.8 q12, q12 )
veor.8 \reg2, \reg2, q12
.endm
- fold64 q0, q1
- fold64 q2, q3
- fold64 q4, q5
- fold64 q6, q7
-
- subs arg3, arg3, #128
-
- // check if there is another 64B in the buffer to be able to fold
- bge _fold_64_B_loop
-
- // at this point, the buffer pointer is pointing at the last y Bytes
- // of the buffer the 64B of folded data is in 4 of the vector
- // registers: v0, v1, v2, v3
-
- // fold the 8 vector registers to 1 vector register with different
- // constants
-
- adr ip, rk9
- vld1.64 {q10}, [ip, :128]!
-
- .macro fold16, reg, rk
- vmull.p64 q8, \reg\()l, d20
- vmull.p64 \reg, \reg\()h, d21
- .ifnb \rk
- vld1.64 {q10}, [ip, :128]!
+ // Fold src_reg into dst_reg, optionally loading the next fold constants
+ .macro fold_16_bytes, src_reg, dst_reg, load_next_consts
+ vmull.p64 q8, \src_reg\()l, FOLD_CONST_L
+ vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H
+ .ifnb \load_next_consts
+ vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
.endif
- veor.8 q7, q7, q8
- veor.8 q7, q7, \reg
+ veor.8 \dst_reg, \dst_reg, q8
+ veor.8 \dst_reg, \dst_reg, \src_reg
.endm
- fold16 q0, rk11
- fold16 q1, rk13
- fold16 q2, rk15
- fold16 q3, rk17
- fold16 q4, rk19
- fold16 q5, rk1
- fold16 q6
-
- // instead of 64, we add 48 to the loop counter to save 1 instruction
- // from the loop instead of a cmp instruction, we use the negative
- // flag with the jl instruction
- adds arg3, arg3, #(128-16)
- blt _final_reduction_for_128
-
- // now we have 16+y bytes left to reduce. 16 Bytes is in register v7
- // and the rest is in memory. We can fold 16 bytes at a time if y>=16
- // continue folding 16B at a time
-
-_16B_reduction_loop:
- vmull.p64 q8, d14, d20
- vmull.p64 q7, d15, d21
- veor.8 q7, q7, q8
+ .macro __adrl, out, sym
+ movw \out, #:lower16:\sym
+ movt \out, #:upper16:\sym
+ .endm
- vld1.64 {q0}, [arg2, :128]!
-CPU_LE( vrev64.8 q0, q0 )
- vswp d0, d1
- veor.8 q7, q7, q0
- subs arg3, arg3, #16
-
- // instead of a cmp instruction, we utilize the flags with the
- // jge instruction equivalent of: cmp arg3, 16-16
- // check if there is any more 16B in the buffer to be able to fold
- bge _16B_reduction_loop
-
- // now we have 16+z bytes left to reduce, where 0<= z < 16.
- // first, we reduce the data in the xmm7 register
-
-_final_reduction_for_128:
- // check if any more data to fold. If not, compute the CRC of
- // the final 128 bits
- adds arg3, arg3, #16
- beq _128_done
-
- // here we are getting data that is less than 16 bytes.
- // since we know that there was data before the pointer, we can
- // offset the input pointer before the actual point, to receive
- // exactly 16 bytes. after that the registers need to be adjusted.
-_get_last_two_regs:
- add arg2, arg2, arg3
- sub arg2, arg2, #16
- vld1.64 {q1}, [arg2]
-CPU_LE( vrev64.8 q1, q1 )
- vswp d2, d3
-
- // get rid of the extra data that was loaded before
- // load the shift constant
- adr ip, tbl_shf_table + 16
- sub ip, ip, arg3
- vld1.8 {q0}, [ip]
-
- // shift v2 to the left by arg3 bytes
- vtbl.8 d4, {d14-d15}, d0
- vtbl.8 d5, {d14-d15}, d1
-
- // shift v7 to the right by 16-arg3 bytes
- vmov.i8 q9, #0x80
- veor.8 q0, q0, q9
- vtbl.8 d18, {d14-d15}, d0
- vtbl.8 d19, {d14-d15}, d1
-
- // blend
- vshr.s8 q0, q0, #7 // convert to 8-bit mask
- vbsl.8 q0, q2, q1
-
- // fold 16 Bytes
- vmull.p64 q8, d18, d20
- vmull.p64 q7, d19, d21
+//
+// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
+//
+// Assumes len >= 16.
+//
+ENTRY(crc_t10dif_pmull)
+
+ // For sizes less than 256 bytes, we can't fold 128 bytes at a time.
+ cmp len, #256
+ blt .Lless_than_256_bytes
+
+ __adrl fold_consts_ptr, .Lfold_across_128_bytes_consts
+
+ // Load the first 128 data bytes. Byte swapping is necessary to make
+ // the bit order match the polynomial coefficient order.
+ vld1.64 {q0-q1}, [buf]!
+ vld1.64 {q2-q3}, [buf]!
+ vld1.64 {q4-q5}, [buf]!
+ vld1.64 {q6-q7}, [buf]!
+CPU_LE( vrev64.8 q0, q0 )
+CPU_LE( vrev64.8 q1, q1 )
+CPU_LE( vrev64.8 q2, q2 )
+CPU_LE( vrev64.8 q3, q3 )
+CPU_LE( vrev64.8 q4, q4 )
+CPU_LE( vrev64.8 q5, q5 )
+CPU_LE( vrev64.8 q6, q6 )
+CPU_LE( vrev64.8 q7, q7 )
+ vswp q0l, q0h
+ vswp q1l, q1h
+ vswp q2l, q2h
+ vswp q3l, q3h
+ vswp q4l, q4h
+ vswp q5l, q5h
+ vswp q6l, q6h
+ vswp q7l, q7h
+
+ // XOR the first 16 data *bits* with the initial CRC value.
+ vmov.i8 q8h, #0
+ vmov.u16 q8h[3], init_crc
+ veor q0h, q0h, q8h
+
+ // Load the constants for folding across 128 bytes.
+ vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
+
+ // Subtract 128 for the 128 data bytes just consumed. Subtract another
+ // 128 to simplify the termination condition of the following loop.
+ sub len, len, #256
+
+ // While >= 128 data bytes remain (not counting q0-q7), fold the 128
+ // bytes q0-q7 into them, storing the result back into q0-q7.
+.Lfold_128_bytes_loop:
+ fold_32_bytes q0, q1
+ fold_32_bytes q2, q3
+ fold_32_bytes q4, q5
+ fold_32_bytes q6, q7
+ subs len, len, #128
+ bge .Lfold_128_bytes_loop
+
+ // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7.
+
+ // Fold across 64 bytes.
+ vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
+ fold_16_bytes q0, q4
+ fold_16_bytes q1, q5
+ fold_16_bytes q2, q6
+ fold_16_bytes q3, q7, 1
+ // Fold across 32 bytes.
+ fold_16_bytes q4, q6
+ fold_16_bytes q5, q7, 1
+ // Fold across 16 bytes.
+ fold_16_bytes q6, q7
+
+ // Add 128 to get the correct number of data bytes remaining in 0...127
+ // (not counting q7), following the previous extra subtraction by 128.
+ // Then subtract 16 to simplify the termination condition of the
+ // following loop.
+ adds len, len, #(128-16)
+
+ // While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7
+ // into them, storing the result back into q7.
+ blt .Lfold_16_bytes_loop_done
+.Lfold_16_bytes_loop:
+ vmull.p64 q8, q7l, FOLD_CONST_L
+ vmull.p64 q7, q7h, FOLD_CONST_H
veor.8 q7, q7, q8
+ vld1.64 {q0}, [buf]!
+CPU_LE( vrev64.8 q0, q0 )
+ vswp q0l, q0h
veor.8 q7, q7, q0
-
-_128_done:
- // compute crc of a 128-bit value
- vldr d20, rk5
- vldr d21, rk6 // rk5 and rk6 in xmm10
-
- // 64b fold
- vext.8 q0, qzr, q7, #8
- vmull.p64 q7, d15, d20
+ subs len, len, #16
+ bge .Lfold_16_bytes_loop
+
+.Lfold_16_bytes_loop_done:
+ // Add 16 to get the correct number of data bytes remaining in 0...15
+ // (not counting q7), following the previous extra subtraction by 16.
+ adds len, len, #16
+ beq .Lreduce_final_16_bytes
+
+.Lhandle_partial_segment:
+ // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
+ // 16 bytes are in q7 and the rest are the remaining data in 'buf'. To
+ // do this without needing a fold constant for each possible 'len',
+ // redivide the bytes into a first chunk of 'len' bytes and a second
+ // chunk of 16 bytes, then fold the first chunk into the second.
+
+ // q0 = last 16 original data bytes
+ add buf, buf, len
+ sub buf, buf, #16
+ vld1.64 {q0}, [buf]
+CPU_LE( vrev64.8 q0, q0 )
+ vswp q0l, q0h
+
+ // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes.
+ __adrl r3, .Lbyteshift_table + 16
+ sub r3, r3, len
+ vld1.8 {q2}, [r3]
+ vtbl.8 q1l, {q7l-q7h}, q2l
+ vtbl.8 q1h, {q7l-q7h}, q2h
+
+ // q3 = first chunk: q7 right-shifted by '16-len' bytes.
+ vmov.i8 q3, #0x80
+ veor.8 q2, q2, q3
+ vtbl.8 q3l, {q7l-q7h}, q2l
+ vtbl.8 q3h, {q7l-q7h}, q2h
+
+ // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
+ vshr.s8 q2, q2, #7
+
+ // q2 = second chunk: 'len' bytes from q0 (low-order bytes),
+ // then '16-len' bytes from q1 (high-order bytes).
+ vbsl.8 q2, q1, q0
+
+ // Fold the first chunk into the second chunk, storing the result in q7.
+ vmull.p64 q0, q3l, FOLD_CONST_L
+ vmull.p64 q7, q3h, FOLD_CONST_H
veor.8 q7, q7, q0
+ veor.8 q7, q7, q2
+
+.Lreduce_final_16_bytes:
+ // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC.
+
+ // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
+ vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
+
+ // Fold the high 64 bits into the low 64 bits, while also multiplying by
+ // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
+ // whose low 48 bits are 0.
+ vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x))
+ veor.8 q0h, q0h, q7l // + low bits * x^64
+
+ // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
+ // value congruent to x^64 * M(x) and whose low 48 bits are 0.
+ vmov.i8 q1, #0
+ vmov s4, s3 // extract high 32 bits
+ vmov s3, s5 // zero high 32 bits
+ vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x))
+ veor.8 q0, q0, q1 // + low bits
+
+ // Load G(x) and floor(x^48 / G(x)).
+ vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]
+
+ // Use Barrett reduction to compute the final CRC value.
+ vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x))
+ vshr.u64 q1l, q1l, #32 // /= x^32
+ vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x)
+ vshr.u64 q0l, q0l, #48
+ veor.8 q0l, q0l, q1l // + low 16 nonzero bits
+ // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0.
+
+ vmov.u16 r0, q0l[0]
+ bx lr
- // 32b fold
- vext.8 q0, q7, qzr, #12
- vmov s31, s3
- vmull.p64 q0, d0, d21
- veor.8 q7, q0, q7
-
- // barrett reduction
-_barrett:
- vldr d20, rk7
- vldr d21, rk8
-
- vmull.p64 q0, d15, d20
- vext.8 q0, qzr, q0, #12
- vmull.p64 q0, d1, d21
- vext.8 q0, qzr, q0, #12
- veor.8 q7, q7, q0
- vmov r0, s29
+.Lless_than_256_bytes:
+ // Checksumming a buffer of length 16...255 bytes
-_cleanup:
- // scale the result back to 16 bits
- lsr r0, r0, #16
- bx lr
+ __adrl fold_consts_ptr, .Lfold_across_16_bytes_consts
-_less_than_128:
- teq arg3, #0
- beq _cleanup
+ // Load the first 16 data bytes.
+ vld1.64 {q7}, [buf]!
+CPU_LE( vrev64.8 q7, q7 )
+ vswp q7l, q7h
- vmov.i8 q0, #0
- vmov s3, arg1_low32 // get the initial crc value
+ // XOR the first 16 data *bits* with the initial CRC value.
+ vmov.i8 q0h, #0
+ vmov.u16 q0h[3], init_crc
+ veor.8 q7h, q7h, q0h
- vld1.64 {q7}, [arg2, :128]!
-CPU_LE( vrev64.8 q7, q7 )
- vswp d14, d15
- veor.8 q7, q7, q0
+ // Load the fold-across-16-bytes constants.
+ vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
- cmp arg3, #16
- beq _128_done // exactly 16 left
- blt _less_than_16_left
-
- // now if there is, load the constants
- vldr d20, rk1
- vldr d21, rk2 // rk1 and rk2 in xmm10
-
- // check if there is enough buffer to be able to fold 16B at a time
- subs arg3, arg3, #32
- addlt arg3, arg3, #16
- blt _get_last_two_regs
- b _16B_reduction_loop
-
-_less_than_16_left:
- // shl r9, 4
- adr ip, tbl_shf_table + 16
- sub ip, ip, arg3
- vld1.8 {q0}, [ip]
- vmov.i8 q9, #0x80
- veor.8 q0, q0, q9
- vtbl.8 d18, {d14-d15}, d0
- vtbl.8 d15, {d14-d15}, d1
- vmov d14, d18
- b _128_done
+ cmp len, #16
+ beq .Lreduce_final_16_bytes // len == 16
+ subs len, len, #32
+ addlt len, len, #16
+ blt .Lhandle_partial_segment // 17 <= len <= 31
+ b .Lfold_16_bytes_loop // 32 <= len <= 255
ENDPROC(crc_t10dif_pmull)
-// precomputed constants
-// these constants are precomputed from the poly:
-// 0x8bb70000 (0x8bb7 scaled to 32 bits)
+ .section ".rodata", "a"
.align 4
-// Q = 0x18BB70000
-// rk1 = 2^(32*3) mod Q << 32
-// rk2 = 2^(32*5) mod Q << 32
-// rk3 = 2^(32*15) mod Q << 32
-// rk4 = 2^(32*17) mod Q << 32
-// rk5 = 2^(32*3) mod Q << 32
-// rk6 = 2^(32*2) mod Q << 32
-// rk7 = floor(2^64/Q)
-// rk8 = Q
-
-rk3: .quad 0x9d9d000000000000
-rk4: .quad 0x7cf5000000000000
-rk5: .quad 0x2d56000000000000
-rk6: .quad 0x1368000000000000
-rk7: .quad 0x00000001f65a57f8
-rk8: .quad 0x000000018bb70000
-rk9: .quad 0xceae000000000000
-rk10: .quad 0xbfd6000000000000
-rk11: .quad 0x1e16000000000000
-rk12: .quad 0x713c000000000000
-rk13: .quad 0xf7f9000000000000
-rk14: .quad 0x80a6000000000000
-rk15: .quad 0x044c000000000000
-rk16: .quad 0xe658000000000000
-rk17: .quad 0xad18000000000000
-rk18: .quad 0xa497000000000000
-rk19: .quad 0x6ee3000000000000
-rk20: .quad 0xe7b5000000000000
-rk1: .quad 0x2d56000000000000
-rk2: .quad 0x06df000000000000
-
-tbl_shf_table:
-// use these values for shift constants for the tbl/tbx instruction
-// different alignments result in values as shown:
-// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
-// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
-// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
-// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
-// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
-// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
-// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
-// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
-// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
-// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
-// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
-// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
-// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
-// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
-// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
+// Fold constants precomputed from the polynomial 0x18bb7
+// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+.Lfold_across_128_bytes_consts:
+ .quad 0x0000000000006123 // x^(8*128) mod G(x)
+ .quad 0x0000000000002295 // x^(8*128+64) mod G(x)
+// .Lfold_across_64_bytes_consts:
+ .quad 0x0000000000001069 // x^(4*128) mod G(x)
+ .quad 0x000000000000dd31 // x^(4*128+64) mod G(x)
+// .Lfold_across_32_bytes_consts:
+ .quad 0x000000000000857d // x^(2*128) mod G(x)
+ .quad 0x0000000000007acc // x^(2*128+64) mod G(x)
+.Lfold_across_16_bytes_consts:
+ .quad 0x000000000000a010 // x^(1*128) mod G(x)
+ .quad 0x0000000000001faa // x^(1*128+64) mod G(x)
+// .Lfinal_fold_consts:
+ .quad 0x1368000000000000 // x^48 * (x^48 mod G(x))
+ .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x))
+// .Lbarrett_reduction_consts:
+ .quad 0x0000000000018bb7 // G(x)
+ .quad 0x00000001f65a57f8 // floor(x^48 / G(x))
+
+// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
+// len] is the index vector to shift left by 'len' bytes, and is also {0x80,
+// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.
+.Lbyteshift_table:
.byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
.byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
index d428355cf38d..3d6b800b8396 100644
--- a/arch/arm/crypto/crct10dif-ce-glue.c
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
@@ -21,7 +21,7 @@
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
-asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u32 len);
+asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
static int crct10dif_init(struct shash_desc *desc)
{
@@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
- unsigned int l;
- if (!may_use_simd()) {
- *crc = crc_t10dif_generic(*crc, data, length);
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull(*crc, data, length);
+ kernel_neon_end();
} else {
- if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
- ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
-
- *crc = crc_t10dif_generic(*crc, data, l);
-
- length -= l;
- data += l;
- }
- if (length > 0) {
- kernel_neon_begin();
- *crc = crc_t10dif_pmull(*crc, data, length);
- kernel_neon_end();
- }
+ *crc = crc_t10dif_generic(*crc, data, length);
}
+
return 0;
}
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
index b9ec44060ed3..a03cf4dfb781 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -212,10 +212,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
index 3b58300d611c..054aae0edfce 100644
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -93,10 +93,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
index fb5d15048c0b..788c17b56ecc 100644
--- a/arch/arm/crypto/sha512-armv4.pl
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
index b1c334a49cda..710ea309769e 100644
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index c883fcbe93b6..46d41140df27 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -25,7 +25,6 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
-extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index ca56537b61bc..50e89869178a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -147,6 +148,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t;
+struct vcpu_reset_state {
+ unsigned long pc;
+ unsigned long r0;
+ bool be;
+ bool reset;
+};
+
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
+ struct vcpu_reset_state reset_state;
+
/* Detect first run of a vcpu */
bool has_run_once;
};
diff --git a/arch/arm/include/asm/kvm_ras.h b/arch/arm/include/asm/kvm_ras.h
new file mode 100644
index 000000000000..e9577292dfe4
--- /dev/null
+++ b/arch/arm/include/asm/kvm_ras.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - Arm Ltd */
+
+#ifndef __ARM_KVM_RAS_H__
+#define __ARM_KVM_RAS_H__
+
+#include <linux/types.h>
+
+static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
+{
+ return -1;
+}
+
+#endif /* __ARM_KVM_RAS_H__ */
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index c4b1d4fb1797..de2089501b8b 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
#define S2_PMD_MASK PMD_MASK
#define S2_PMD_SIZE PMD_SIZE
+static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
+{
+ return true;
+}
+
#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 8e76db83c498..66f6a3ae68d2 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -38,11 +38,6 @@ static inline void harden_branch_predictor(void)
extern unsigned int user_debug;
-static inline int handle_guest_sea(phys_addr_t addr, unsigned int esr)
-{
- return -1;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 42aa4a22803c..ae5a0df5316e 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -59,7 +59,6 @@ extern int __put_user_bad(void);
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
-#define get_ds() (KERNEL_DS)
#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 88ef2ce1f69a..7a39e77984ef 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -26,10 +26,10 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_OLD_MMAP
#define __ARCH_WANT_SYS_OLD_SELECT
-#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_UTIME32
#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
-#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_TIME32
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_ALARM
@@ -45,7 +45,6 @@
* Unimplemented (or alternatively implemented) syscalls
*/
#define __IGNORE_fadvise64_64
-#define __IGNORE_migrate_pages
#ifdef __ARM_EABI__
/*
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index b3ef061d8b74..2c403e7c782d 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <linux/dma-mapping.h>
+#include <asm/page.h>
#include <xen/arm/page-coherent.h>
+
+static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dev_dma_ops)
+ return dev->archdata.dev_dma_ops;
+ return get_arch_dma_ops(NULL);
+}
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+ return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+ xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can span across
+ * multiple Xen pages, it's not possible for it to contain a
+ * mix of local and foreign Xen pages. So if the first xen_pfn
+ * == mfn the page is local otherwise it's a foreign page
+ * grant-mapped in dom0. If the page is local we can safely
+ * call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (local)
+ xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+ * multiple Xen page, it's not possible to have a mix of local and
+ * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+ * foreign mfn will always return false. If the page is local we can
+ * safely call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (pfn_valid(pfn)) {
+ if (xen_get_dma_ops(hwdev)->unmap_page)
+ xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+ } else
+ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
+ xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn)) {
+ if (xen_get_dma_ops(hwdev)->sync_single_for_device)
+ xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+ } else
+ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9908dacf9229..844861368cd5 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,7 +31,6 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
return nr_irqs;
}
#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = irq_data_get_affinity_mask(d);
- struct irq_chip *c;
- bool ret = false;
-
- /*
- * If this is a per-CPU interrupt, or the affinity does not
- * include this CPU, then we have nothing to do.
- */
- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
- return false;
-
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
- ret = true;
- }
-
- c = irq_data_get_irq_chip(d);
- if (!c->irq_set_affinity)
- pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
- return ret;
-}
-
-/*
- * The current CPU has been marked offline. Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
- unsigned int i;
- struct irq_desc *desc;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for_each_irq_desc(i, desc) {
- bool affinity_broken;
-
- raw_spin_lock(&desc->lock);
- affinity_broken = migrate_one_irq(desc);
- raw_spin_unlock(&desc->lock);
-
- if (affinity_broken)
- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
- i, smp_processor_id());
- }
-
- local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 16601d1442d1..72cc0862a30e 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -150,7 +150,7 @@ void __show_regs(struct pt_regs *regs)
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
- else if (fs == get_ds())
+ else if (fs == KERNEL_DS)
segment = "kernel";
else
segment = "user";
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3bf82232b1be..1d6f5ea522f4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -254,7 +254,7 @@ int __cpu_disable(void)
/*
* OK - migrate IRQs away from this CPU
*/
- migrate_irqs();
+ irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 92ab36f38795..acd054a42ba2 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -317,10 +317,10 @@ struct oabi_sembuf {
asmlinkage long sys_oabi_semtimedop(int semid,
struct oabi_sembuf __user *tsops,
unsigned nsops,
- const struct timespec __user *timeout)
+ const struct old_timespec32 __user *timeout)
{
struct sembuf *sops;
- struct timespec local_timeout;
+ struct old_timespec32 local_timeout;
long err;
int i;
@@ -350,7 +350,7 @@ asmlinkage long sys_oabi_semtimedop(int semid,
} else {
mm_segment_t fs = get_fs();
set_fs(KERNEL_DS);
- err = sys_semtimedop(semid, sops, nsops, timeout);
+ err = sys_semtimedop_time32(semid, sops, nsops, timeout);
set_fs(fs);
}
kfree(sops);
@@ -375,7 +375,7 @@ asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
return sys_oabi_semtimedop(first,
(struct oabi_sembuf __user *)ptr,
second,
- (const struct timespec __user *)fifth);
+ (const struct old_timespec32 __user *)fifth);
default:
return sys_ipc(call, first, second, third, ptr, fifth);
}
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 222c1635bc7a..e8bd288fd5be 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
reset_coproc_regs(vcpu, table, num);
for (num = 1; num < NR_CP15_REGS; num++)
- if (vcpu_cp15(vcpu, num) == 0x42424242)
- panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
+ WARN(vcpu_cp15(vcpu, num) == 0x42424242,
+ "Didn't reset vcpu_cp15(vcpu, %zi)", num);
}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 5ed0c3ee33d6..e53327912adc 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -26,6 +26,7 @@
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <kvm/arm_arch_timer.h>
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset CP15 registers */
kvm_reset_coprocs(vcpu);
+ /*
+ * Additional reset state handling that PSCI may have imposed on us.
+ * Must be done after all the sys_reg reset.
+ */
+ if (READ_ONCE(vcpu->arch.reset_state.reset)) {
+ unsigned long target_pc = vcpu->arch.reset_state.pc;
+
+ /* Gracefully handle Thumb2 entry point */
+ if (target_pc & 1) {
+ target_pc &= ~1UL;
+ vcpu_set_thumb(vcpu);
+ }
+
+ /* Propagate caller endianness */
+ if (vcpu->arch.reset_state.be)
+ kvm_vcpu_set_be(vcpu);
+
+ *vcpu_pc(vcpu) = target_pc;
+ vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
+
+ vcpu->arch.reset_state.reset = false;
+ }
+
/* Reset arch_timer context */
return kvm_timer_vcpu_reset(vcpu);
}
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index a067adf9f1ee..4ef1e55f4a0b 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -167,6 +167,7 @@ config ARCH_BCM2835
select BCM2835_TIMER
select PINCTRL
select PINCTRL_BCM2835
+ select MFD_CORE
help
This enables support for the Broadcom BCM2835 and BCM2836 SoCs.
This SoC is used in the Raspberry Pi and Roku 2 devices.
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a..95a11d5b3587 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
} else /* remote PCI bus */
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
- return base + (where & 0xffc) + (devfn << 12);
+ return base + where + (devfn << 12);
}
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
@@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
u32 mask = (0x1ull << (size * 8)) - 1;
int shift = (where % 4) * 8;
- ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
(where & 0xffc) == PCI_CLASS_REVISION)
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index da8a039d65f9..5a59cebc7d0a 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -1,13 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
if ARCH_DAVINCI
-config AINTC
- bool
-
-config CP_INTC
- bool
- select IRQ_DOMAIN
-
config ARCH_DAVINCI_DMx
bool
@@ -17,17 +10,17 @@ comment "DaVinci Core Type"
config ARCH_DAVINCI_DM644x
bool "DaVinci 644x based system"
- select AINTC
+ select DAVINCI_AINTC
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DM355
bool "DaVinci 355 based system"
- select AINTC
+ select DAVINCI_AINTC
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DM646x
bool "DaVinci 646x based system"
- select AINTC
+ select DAVINCI_AINTC
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DA830
@@ -36,20 +29,20 @@ config ARCH_DAVINCI_DA830
select ARCH_DAVINCI_DA8XX
# needed on silicon revs 1.0, 1.1:
select CPU_DCACHE_WRITETHROUGH if !CPU_DCACHE_DISABLE
- select CP_INTC
+ select DAVINCI_CP_INTC
config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138/AM18x based system"
depends on !ARCH_DAVINCI_DMx || (AUTO_ZRELADDR && ARM_PATCH_PHYS_VIRT)
select ARCH_DAVINCI_DA8XX
- select CP_INTC
+ select DAVINCI_CP_INTC
config ARCH_DAVINCI_DA8XX
bool
config ARCH_DAVINCI_DM365
bool "DaVinci 365 based system"
- select AINTC
+ select DAVINCI_AINTC
select ARCH_DAVINCI_DMx
comment "DaVinci Board Type"
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 93d271b4d84b..f76a8482784f 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -18,9 +18,6 @@ obj-$(CONFIG_ARCH_DAVINCI_DM365) += dm365.o devices.o
obj-$(CONFIG_ARCH_DAVINCI_DA830) += da830.o devices-da8xx.o usb-da8xx.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += da850.o devices-da8xx.o usb-da8xx.o
-obj-$(CONFIG_AINTC) += irq.o
-obj-$(CONFIG_CP_INTC) += cp_intc.o
-
# Board specific
obj-$(CONFIG_MACH_DA8XX_DT) += da8xx-dt.o pdata-quirks.o
obj-$(CONFIG_MACH_DAVINCI_EVM) += board-dm644x-evm.o
diff --git a/arch/arm/mach-davinci/asp.h b/arch/arm/mach-davinci/asp.h
index 495aa6907cbc..d0ecd1d0f084 100644
--- a/arch/arm/mach-davinci/asp.h
+++ b/arch/arm/mach-davinci/asp.h
@@ -49,9 +49,9 @@
#define DAVINCI_DA830_DMA_MCASP2_AXEVT 5
/* Interrupts */
-#define DAVINCI_ASP0_RX_INT IRQ_MBRINT
-#define DAVINCI_ASP0_TX_INT IRQ_MBXINT
-#define DAVINCI_ASP1_RX_INT IRQ_MBRINT
-#define DAVINCI_ASP1_TX_INT IRQ_MBXINT
+#define DAVINCI_ASP0_RX_INT DAVINCI_INTC_IRQ(IRQ_MBRINT)
+#define DAVINCI_ASP0_TX_INT DAVINCI_INTC_IRQ(IRQ_MBXINT)
+#define DAVINCI_ASP1_RX_INT DAVINCI_INTC_IRQ(IRQ_MBRINT)
+#define DAVINCI_ASP1_TX_INT DAVINCI_INTC_IRQ(IRQ_MBXINT)
#endif /* __ASM_ARCH_DAVINCI_ASP_H */
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index e52ec1619b70..ff097ecfa451 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/platform_data/pcf857x.h>
-#include <linux/platform_data/at24.h>
+#include <linux/property.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
@@ -36,10 +36,11 @@
#include <asm/mach/arch.h>
#include <mach/common.h>
-#include "cp_intc.h"
#include <mach/mux.h>
#include <mach/da8xx.h>
+#include "irqs.h"
+
#define DA830_EVM_PHY_ID ""
/*
* USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4].
@@ -52,62 +53,19 @@ static const short da830_evm_usb11_pins[] = {
-1
};
-static da8xx_ocic_handler_t da830_evm_usb_ocic_handler;
-
-static int da830_evm_usb_set_power(unsigned port, int on)
-{
- gpio_set_value(ON_BD_USB_DRV, on);
- return 0;
-}
-
-static int da830_evm_usb_get_power(unsigned port)
-{
- return gpio_get_value(ON_BD_USB_DRV);
-}
-
-static int da830_evm_usb_get_oci(unsigned port)
-{
- return !gpio_get_value(ON_BD_USB_OVC);
-}
-
-static irqreturn_t da830_evm_usb_ocic_irq(int, void *);
-
-static int da830_evm_usb_ocic_notify(da8xx_ocic_handler_t handler)
-{
- int irq = gpio_to_irq(ON_BD_USB_OVC);
- int error = 0;
-
- if (handler != NULL) {
- da830_evm_usb_ocic_handler = handler;
-
- error = request_irq(irq, da830_evm_usb_ocic_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "OHCI over-current indicator", NULL);
- if (error)
- pr_err("%s: could not request IRQ to watch over-current indicator changes\n",
- __func__);
- } else
- free_irq(irq, NULL);
-
- return error;
-}
+static struct gpiod_lookup_table da830_evm_usb_gpio_lookup = {
+ .dev_id = "ohci-da8xx",
+ .table = {
+ GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, "vbus", 0),
+ GPIO_LOOKUP("davinci_gpio", ON_BD_USB_OVC, "oc", 0),
+ },
+};
static struct da8xx_ohci_root_hub da830_evm_usb11_pdata = {
- .set_power = da830_evm_usb_set_power,
- .get_power = da830_evm_usb_get_power,
- .get_oci = da830_evm_usb_get_oci,
- .ocic_notify = da830_evm_usb_ocic_notify,
-
/* TPS2065 switch @ 5V */
.potpgt = (3 + 1) / 2, /* 3 ms max */
};
-static irqreturn_t da830_evm_usb_ocic_irq(int irq, void *dev_id)
-{
- da830_evm_usb_ocic_handler(&da830_evm_usb11_pdata, 1);
- return IRQ_HANDLED;
-}
-
static __init void da830_evm_usb_init(void)
{
int ret;
@@ -142,21 +100,7 @@ static __init void da830_evm_usb_init(void)
return;
}
- ret = gpio_request(ON_BD_USB_DRV, "ON_BD_USB_DRV");
- if (ret) {
- pr_err("%s: failed to request GPIO for USB 1.1 port power control: %d\n",
- __func__, ret);
- return;
- }
- gpio_direction_output(ON_BD_USB_DRV, 0);
-
- ret = gpio_request(ON_BD_USB_OVC, "ON_BD_USB_OVC");
- if (ret) {
- pr_err("%s: failed to request GPIO for USB 1.1 port over-current indicator: %d\n",
- __func__, ret);
- return;
- }
- gpio_direction_input(ON_BD_USB_OVC);
+ gpiod_add_lookup_table(&da830_evm_usb_gpio_lookup);
ret = da8xx_register_usb11(&da830_evm_usb11_pdata);
if (ret)
@@ -208,9 +152,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
.dev_id = "da830-mmc.0",
.table = {
/* gpio chip 1 contains gpio range 32-63 */
- GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd",
+ GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp",
+ GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_LOW),
},
};
@@ -457,12 +401,9 @@ static struct nvmem_cell_lookup da830_evm_nvmem_cell_lookup = {
.con_id = "mac-address",
};
-static struct at24_platform_data da830_evm_i2c_eeprom_info = {
- .byte_len = SZ_256K / 8,
- .page_size = 64,
- .flags = AT24_FLAG_ADDR16,
- .setup = davinci_get_mac_addr,
- .context = (void *)0x7f00,
+static const struct property_entry da830_evm_i2c_eeprom_properties[] = {
+ PROPERTY_ENTRY_U32("pagesize", 64),
+ { }
};
static int __init da830_evm_ui_expander_setup(struct i2c_client *client,
@@ -496,7 +437,7 @@ static struct pcf857x_platform_data __initdata da830_evm_ui_expander_info = {
static struct i2c_board_info __initdata da830_evm_i2c_devices[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
- .platform_data = &da830_evm_i2c_eeprom_info,
+ .properties = da830_evm_i2c_eeprom_properties,
},
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
@@ -693,7 +634,7 @@ static void __init da830_evm_map_io(void)
MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.atag_offset = 0x100,
.map_io = da830_evm_map_io,
- .init_irq = cp_intc_init,
+ .init_irq = da830_init_irq,
.init_time = da830_init_time,
.init_machine = da830_evm_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6a29baf0a289..1fdc9283a8c5 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -43,9 +43,10 @@
#include <linux/spi/flash.h>
#include <mach/common.h>
-#include "cp_intc.h"
#include <mach/da8xx.h>
#include <mach/mux.h>
+
+#include "irqs.h"
#include "sram.h"
#include <asm/mach-types.h>
@@ -150,32 +151,6 @@ static struct spi_board_info da850evm_spi_info[] = {
},
};
-#ifdef CONFIG_MTD
-static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
-{
- char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
- size_t retlen;
-
- if (!strcmp(mtd->name, "MAC-Address")) {
- mtd_read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
- if (retlen == ETH_ALEN)
- pr_info("Read MAC addr from SPI Flash: %pM\n",
- mac_addr);
- }
-}
-
-static struct mtd_notifier da850evm_spi_notifier = {
- .add = da850_evm_m25p80_notify_add,
-};
-
-static void da850_evm_setup_mac_addr(void)
-{
- register_mtd_user(&da850evm_spi_notifier);
-}
-#else
-static void da850_evm_setup_mac_addr(void) { }
-#endif
-
static struct mtd_partition da850_evm_norflash_partition[] = {
{
.name = "bootloaders + env",
@@ -805,9 +780,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
.dev_id = "da830-mmc.0",
.table = {
/* gpio chip 2 contains gpio range 64-95 */
- GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
+ GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
+ GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_HIGH),
},
};
@@ -1064,6 +1039,17 @@ static const short da850_evm_rmii_pins[] = {
-1
};
+static struct gpiod_hog da850_evm_emac_gpio_hogs[] = {
+ {
+ .chip_label = "davinci_gpio",
+ .chip_hwnum = DA850_MII_MDIO_CLKEN_PIN,
+ .line_name = "mdio_clk_en",
+ .lflags = 0,
+ /* dflags set in da850_evm_config_emac() */
+ },
+ { }
+};
+
static int __init da850_evm_config_emac(void)
{
void __iomem *cfg_chip3_base;
@@ -1102,14 +1088,9 @@ static int __init da850_evm_config_emac(void)
if (ret)
pr_warn("%s:GPIO(2,6) mux setup failed\n", __func__);
- ret = gpio_request(DA850_MII_MDIO_CLKEN_PIN, "mdio_clk_en");
- if (ret) {
- pr_warn("Cannot open GPIO %d\n", DA850_MII_MDIO_CLKEN_PIN);
- return ret;
- }
-
- /* Enable/Disable MII MDIO clock */
- gpio_direction_output(DA850_MII_MDIO_CLKEN_PIN, rmii_en);
+ da850_evm_emac_gpio_hogs[0].dflags = rmii_en ? GPIOD_OUT_HIGH
+ : GPIOD_OUT_LOW;
+ gpiod_add_hogs(da850_evm_emac_gpio_hogs);
soc_info->emac_pdata->phy_id = DA850_EVM_PHY_ID;
@@ -1494,8 +1475,6 @@ static __init void da850_evm_init(void)
if (ret)
pr_warn("%s: SATA registration failed: %d\n", __func__, ret);
- da850_evm_setup_mac_addr();
-
ret = da8xx_register_rproc();
if (ret)
pr_warn("%s: dsp/rproc registration failed: %d\n",
@@ -1521,7 +1500,7 @@ static void __init da850_evm_map_io(void)
MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.atag_offset = 0x100,
.map_io = da850_evm_map_io,
- .init_irq = cp_intc_init,
+ .init_irq = da850_init_irq,
.init_time = da850_init_time,
.init_machine = da850_evm_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index f53a461a606f..64d81fc86f14 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -117,9 +117,9 @@ static struct platform_device davinci_nand_device = {
static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
.dev_id = "i2c_davinci.1",
.table = {
- GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda",
+ GPIO_LOOKUP("davinci_gpio", DM355_I2C_SDA_PIN, "sda",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl",
+ GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
},
};
@@ -438,7 +438,7 @@ static __init void dm355_evm_init(void)
MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.atag_offset = 0x100,
.map_io = dm355_evm_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm355_init_irq,
.init_time = dm355_init_time,
.init_machine = dm355_evm_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 0fdf1d03eb11..b9e9950dd300 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -273,7 +273,7 @@ static __init void dm355_leopard_init(void)
MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.atag_offset = 0x100,
.map_io = dm355_leopard_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm355_init_irq,
.init_time = dm355_init_time,
.init_machine = dm355_leopard_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index e3b0b701e395..150a36f333df 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -18,7 +18,7 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/platform_data/at24.h>
+#include <linux/property.h>
#include <linux/leds.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -225,18 +225,15 @@ static struct nvmem_cell_lookup davinci_nvmem_cell_lookup = {
.con_id = "mac-address",
};
-static struct at24_platform_data eeprom_info = {
- .byte_len = (256*1024) / 8,
- .page_size = 64,
- .flags = AT24_FLAG_ADDR16,
- .setup = davinci_get_mac_addr,
- .context = (void *)0x7f00,
+static const struct property_entry eeprom_properties[] = {
+ PROPERTY_ENTRY_U32("pagesize", 64),
+ { }
};
static struct i2c_board_info i2c_info[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
- .platform_data = &eeprom_info,
+ .properties = eeprom_properties,
},
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
@@ -834,7 +831,7 @@ static __init void dm365_evm_init(void)
MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.atag_offset = 0x100,
.map_io = dm365_evm_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm365_init_irq,
.init_time = dm365_init_time,
.init_machine = dm365_evm_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index e1428115067f..de15f782816e 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -16,8 +16,8 @@
#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/platform_data/pcf857x.h>
-#include <linux/platform_data/at24.h>
#include <linux/platform_data/gpio-davinci.h>
+#include <linux/property.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
@@ -36,9 +36,10 @@
#include <asm/mach/arch.h>
#include <mach/common.h>
-#include <linux/platform_data/i2c-davinci.h>
-#include <mach/serial.h>
#include <mach/mux.h>
+#include <mach/serial.h>
+
+#include <linux/platform_data/i2c-davinci.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mmc-davinci.h>
#include <linux/platform_data/usb-davinci.h>
@@ -46,6 +47,7 @@
#include <linux/platform_data/ti-aemif.h>
#include "davinci.h"
+#include "irqs.h"
#define DM644X_EVM_PHY_ID "davinci_mdio-0:01"
#define LXT971_PHY_ID (0x001378e2)
@@ -532,12 +534,9 @@ static struct nvmem_cell_lookup dm644evm_nvmem_cell_lookup = {
.con_id = "mac-address",
};
-static struct at24_platform_data eeprom_info = {
- .byte_len = (256*1024) / 8,
- .page_size = 64,
- .flags = AT24_FLAG_ADDR16,
- .setup = davinci_get_mac_addr,
- .context = (void *)0x7f00,
+static const struct property_entry eeprom_properties[] = {
+ PROPERTY_ENTRY_U32("pagesize", 64),
+ { }
};
/*
@@ -647,7 +646,7 @@ static struct i2c_board_info __initdata i2c_info[] = {
},
{
I2C_BOARD_INFO("24c256", 0x50),
- .platform_data = &eeprom_info,
+ .properties = eeprom_properties,
},
{
I2C_BOARD_INFO("tlv320aic33", 0x1b),
@@ -660,9 +659,9 @@ static struct i2c_board_info __initdata i2c_info[] = {
static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
.dev_id = "i2c_davinci.1",
.table = {
- GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda",
+ GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SDA_PIN, "sda",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl",
+ GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
},
};
@@ -889,7 +888,7 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
/* Maintainer: MontaVista Software <source@mvista.com> */
.atag_offset = 0x100,
.map_io = davinci_evm_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm644x_init_irq,
.init_time = dm644x_init_time,
.init_machine = davinci_evm_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 8d5be6dd2019..4600b617f9b4 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -22,7 +22,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/platform_data/at24.h>
+#include <linux/property.h>
#include <linux/platform_data/pcf857x.h>
#include <linux/platform_data/ti-aemif.h>
@@ -44,10 +44,10 @@
#include <asm/mach/arch.h>
#include <mach/common.h>
-#include <mach/irqs.h>
#include <mach/serial.h>
#include "davinci.h"
+#include "irqs.h"
#define NAND_BLOCK_SIZE SZ_128K
@@ -364,12 +364,9 @@ static struct nvmem_cell_lookup dm646x_evm_nvmem_cell_lookup = {
.con_id = "mac-address",
};
-static struct at24_platform_data eeprom_info = {
- .byte_len = (256*1024) / 8,
- .page_size = 64,
- .flags = AT24_FLAG_ADDR16,
- .setup = davinci_get_mac_addr,
- .context = (void *)0x7f00,
+static const struct property_entry eeprom_properties[] = {
+ PROPERTY_ENTRY_U32("pagesize", 64),
+ { }
};
#endif
@@ -440,7 +437,7 @@ static void evm_init_cpld(void)
static struct i2c_board_info __initdata i2c_info[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
- .platform_data = &eeprom_info,
+ .properties = eeprom_properties,
},
{
I2C_BOARD_INFO("pcf8574a", 0x38),
@@ -863,7 +860,7 @@ static __init void evm_init(void)
MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.atag_offset = 0x100,
.map_io = davinci_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm646x_init_irq,
.init_time = dm646x_evm_init_time,
.init_machine = evm_init,
.init_late = davinci_init_late,
@@ -873,7 +870,7 @@ MACHINE_END
MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.atag_offset = 0x100,
.map_io = davinci_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm646x_init_irq,
.init_time = dm6467t_evm_init_time,
.init_machine = evm_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 8df16e81b69e..dfce421c0579 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -14,11 +14,13 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mtd/partitions.h>
+#include <linux/notifier.h>
+#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/regulator/machine.h>
#include <linux/i2c.h>
-#include <linux/platform_data/at24.h>
#include <linux/etherdevice.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
@@ -27,7 +29,6 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
-#include "cp_intc.h"
#include <mach/da8xx.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
@@ -117,11 +118,15 @@ static void mityomapl138_cpufreq_init(const char *partnum)
static void mityomapl138_cpufreq_init(const char *partnum) { }
#endif
-static void read_factory_config(struct nvmem_device *nvmem, void *context)
+static int read_factory_config(struct notifier_block *nb,
+ unsigned long event, void *data)
{
int ret;
const char *partnum = NULL;
- struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct nvmem_device *nvmem = data;
+
+ if (strcmp(nvmem_dev_name(nvmem), "1-00500") != 0)
+ return NOTIFY_DONE;
if (!IS_BUILTIN(CONFIG_NVMEM)) {
pr_warn("Factory Config not available without CONFIG_NVMEM\n");
@@ -147,21 +152,20 @@ static void read_factory_config(struct nvmem_device *nvmem, void *context)
goto bad_config;
}
- pr_info("Found MAC = %pM\n", factory_config.mac);
- if (is_valid_ether_addr(factory_config.mac))
- memcpy(soc_info->emac_pdata->mac_addr,
- factory_config.mac, ETH_ALEN);
- else
- pr_warn("Invalid MAC found in factory config block\n");
-
partnum = factory_config.partnum;
pr_info("Part Number = %s\n", partnum);
bad_config:
/* default maximum speed is valid for all platforms */
mityomapl138_cpufreq_init(partnum);
+
+ return NOTIFY_STOP;
}
+static struct notifier_block mityomapl138_nvmem_notifier = {
+ .notifier_call = read_factory_config,
+};
+
/*
* We don't define a cell for factory config as it will be accessed from the
* board file using the nvmem notifier chain.
@@ -187,12 +191,10 @@ static struct nvmem_cell_lookup mityomapl138_nvmem_cell_lookup = {
.con_id = "mac-address",
};
-static struct at24_platform_data mityomapl138_fd_chip = {
- .byte_len = 256,
- .page_size = 8,
- .flags = AT24_FLAG_READONLY | AT24_FLAG_IRUGO,
- .setup = read_factory_config,
- .context = NULL,
+static const struct property_entry mityomapl138_fd_chip_properties[] = {
+ PROPERTY_ENTRY_U32("pagesize", 8),
+ PROPERTY_ENTRY_BOOL("read-only"),
+ { }
};
static struct davinci_i2c_platform_data mityomap_i2c_0_pdata = {
@@ -321,7 +323,7 @@ static struct i2c_board_info __initdata mityomap_tps65023_info[] = {
},
{
I2C_BOARD_INFO("24c02", 0x50),
- .platform_data = &mityomapl138_fd_chip,
+ .properties = mityomapl138_fd_chip_properties,
},
};
@@ -569,6 +571,7 @@ static void __init mityomapl138_init(void)
davinci_serial_init(da8xx_serial_device);
+ nvmem_register_notifier(&mityomapl138_nvmem_notifier);
nvmem_add_cell_table(&mityomapl138_nvmem_cell_table);
nvmem_add_cell_lookups(&mityomapl138_nvmem_cell_lookup, 1);
@@ -624,7 +627,7 @@ static void __init mityomapl138_map_io(void)
MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.atag_offset = 0x100,
.map_io = mityomapl138_map_io,
- .init_irq = cp_intc_init,
+ .init_irq = da850_init_irq,
.init_time = da850_init_time,
.init_machine = mityomapl138_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index efdaa27241c5..ce99f782811a 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -231,7 +231,7 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
/* Maintainer: Neuros Technologies <neuros@groups.google.com> */
.atag_offset = 0x100,
.map_io = davinci_ntosd2_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm644x_init_irq,
.init_time = dm644x_init_time,
.init_machine = davinci_ntosd2_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 8e8d51f4a276..0896af2bed24 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -27,7 +27,6 @@
#include <asm/mach/arch.h>
#include <mach/common.h>
-#include "cp_intc.h"
#include <mach/da8xx.h>
#include <mach/mux.h>
@@ -134,9 +133,9 @@ static const short hawk_mmcsd0_pins[] = {
static struct gpiod_lookup_table mmc_gpios_table = {
.dev_id = "da830-mmc.0",
.table = {
- GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd",
+ GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp",
+ GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_LOW),
},
};
@@ -294,66 +293,24 @@ static int omapl138_hawk_register_aemif(void)
return platform_device_register(&omapl138_hawk_aemif_device);
}
-static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id);
-static da8xx_ocic_handler_t hawk_usb_ocic_handler;
-
static const short da850_hawk_usb11_pins[] = {
DA850_GPIO2_4, DA850_GPIO6_13,
-1
};
-static int hawk_usb_set_power(unsigned port, int on)
-{
- gpio_set_value(DA850_USB1_VBUS_PIN, on);
- return 0;
-}
-
-static int hawk_usb_get_power(unsigned port)
-{
- return gpio_get_value(DA850_USB1_VBUS_PIN);
-}
-
-static int hawk_usb_get_oci(unsigned port)
-{
- return !gpio_get_value(DA850_USB1_OC_PIN);
-}
-
-static int hawk_usb_ocic_notify(da8xx_ocic_handler_t handler)
-{
- int irq = gpio_to_irq(DA850_USB1_OC_PIN);
- int error = 0;
-
- if (handler != NULL) {
- hawk_usb_ocic_handler = handler;
-
- error = request_irq(irq, omapl138_hawk_usb_ocic_irq,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
- "OHCI over-current indicator", NULL);
- if (error)
- pr_err("%s: could not request IRQ to watch "
- "over-current indicator changes\n", __func__);
- } else {
- free_irq(irq, NULL);
- }
- return error;
-}
+static struct gpiod_lookup_table hawk_usb_gpio_lookup = {
+ .dev_id = "ohci-da8xx",
+ .table = {
+ GPIO_LOOKUP("davinci_gpio", DA850_USB1_VBUS_PIN, "vbus", 0),
+ GPIO_LOOKUP("davinci_gpio", DA850_USB1_OC_PIN, "oc", 0),
+ },
+};
static struct da8xx_ohci_root_hub omapl138_hawk_usb11_pdata = {
- .set_power = hawk_usb_set_power,
- .get_power = hawk_usb_get_power,
- .get_oci = hawk_usb_get_oci,
- .ocic_notify = hawk_usb_ocic_notify,
/* TPS2087 switch @ 5V */
.potpgt = (3 + 1) / 2, /* 3 ms max */
};
-static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id)
-{
- hawk_usb_ocic_handler(&omapl138_hawk_usb11_pdata, 1);
- return IRQ_HANDLED;
-}
-
static __init void omapl138_hawk_usb_init(void)
{
int ret;
@@ -374,34 +331,13 @@ static __init void omapl138_hawk_usb_init(void)
pr_warn("%s: USB PHY registration failed: %d\n",
__func__, ret);
- ret = gpio_request_one(DA850_USB1_VBUS_PIN,
- GPIOF_DIR_OUT, "USB1 VBUS");
- if (ret < 0) {
- pr_err("%s: failed to request GPIO for USB 1.1 port "
- "power control: %d\n", __func__, ret);
- return;
- }
-
- ret = gpio_request_one(DA850_USB1_OC_PIN,
- GPIOF_DIR_IN, "USB1 OC");
- if (ret < 0) {
- pr_err("%s: failed to request GPIO for USB 1.1 port "
- "over-current indicator: %d\n", __func__, ret);
- goto usb11_setup_oc_fail;
- }
+ gpiod_add_lookup_table(&hawk_usb_gpio_lookup);
ret = da8xx_register_usb11(&omapl138_hawk_usb11_pdata);
- if (ret) {
+ if (ret)
pr_warn("%s: USB 1.1 registration failed: %d\n", __func__, ret);
- goto usb11_setup_fail;
- }
return;
-
-usb11_setup_fail:
- gpio_free(DA850_USB1_OC_PIN);
-usb11_setup_oc_fail:
- gpio_free(DA850_USB1_VBUS_PIN);
}
static __init void omapl138_hawk_init(void)
@@ -462,7 +398,7 @@ static void __init omapl138_hawk_map_io(void)
MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.atag_offset = 0x100,
.map_io = omapl138_hawk_map_io,
- .init_irq = cp_intc_init,
+ .init_irq = da850_init_irq,
.init_time = da850_init_time,
.init_machine = omapl138_hawk_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 792bb84d5011..bcdefde2f401 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -26,7 +26,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/platform_data/at24.h>
+#include <linux/property.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
@@ -92,16 +92,15 @@ static struct platform_device davinci_sffsdr_nandflash_device = {
.resource = davinci_sffsdr_nandflash_resource,
};
-static struct at24_platform_data eeprom_info = {
- .byte_len = (64*1024) / 8,
- .page_size = 32,
- .flags = AT24_FLAG_ADDR16,
+static const struct property_entry eeprom_properties[] = {
+ PROPERTY_ENTRY_U32("pagesize", 32),
+ { }
};
static struct i2c_board_info __initdata i2c_info[] = {
{
- I2C_BOARD_INFO("24lc64", 0x50),
- .platform_data = &eeprom_info,
+ I2C_BOARD_INFO("24c64", 0x50),
+ .properties = eeprom_properties,
},
/* Other I2C devices:
* MSP430, addr 0x23 (not used)
@@ -153,7 +152,7 @@ static __init void davinci_sffsdr_init(void)
MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
.atag_offset = 0x100,
.map_io = davinci_sffsdr_map_io,
- .init_irq = davinci_irq_init,
+ .init_irq = dm644x_init_irq,
.init_time = dm644x_init_time,
.init_machine = davinci_sffsdr_init,
.init_late = davinci_init_late,
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index e1d0f0d841ff..ae61d19f9b3a 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -23,24 +23,6 @@
struct davinci_soc_info davinci_soc_info;
EXPORT_SYMBOL(davinci_soc_info);
-void __iomem *davinci_intc_base;
-int davinci_intc_type;
-
-void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context)
-{
- char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
- off_t offset = (off_t)context;
-
- if (!IS_BUILTIN(CONFIG_NVMEM)) {
- pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n");
- return;
- }
-
- /* Read MAC addr from EEPROM */
- if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
- pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
-}
-
static int __init davinci_init_id(struct davinci_soc_info *soc_info)
{
int i;
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
deleted file mode 100644
index 94085d21018e..000000000000
--- a/arch/arm/mach-davinci/cp_intc.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * TI Common Platform Interrupt Controller (cp_intc) driver
- *
- * Author: Steve Chen <schen@mvista.com>
- * Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/irqchip.h>
-#include <linux/irqdomain.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include <mach/common.h>
-#include "cp_intc.h"
-
-static inline unsigned int cp_intc_read(unsigned offset)
-{
- return __raw_readl(davinci_intc_base + offset);
-}
-
-static inline void cp_intc_write(unsigned long value, unsigned offset)
-{
- __raw_writel(value, davinci_intc_base + offset);
-}
-
-static void cp_intc_ack_irq(struct irq_data *d)
-{
- cp_intc_write(d->hwirq, CP_INTC_SYS_STAT_IDX_CLR);
-}
-
-/* Disable interrupt */
-static void cp_intc_mask_irq(struct irq_data *d)
-{
- /* XXX don't know why we need to disable nIRQ here... */
- cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR);
- cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_CLR);
- cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
-}
-
-/* Enable interrupt */
-static void cp_intc_unmask_irq(struct irq_data *d)
-{
- cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_SET);
-}
-
-static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type)
-{
- unsigned reg = BIT_WORD(d->hwirq);
- unsigned mask = BIT_MASK(d->hwirq);
- unsigned polarity = cp_intc_read(CP_INTC_SYS_POLARITY(reg));
- unsigned type = cp_intc_read(CP_INTC_SYS_TYPE(reg));
-
- switch (flow_type) {
- case IRQ_TYPE_EDGE_RISING:
- polarity |= mask;
- type |= mask;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- polarity &= ~mask;
- type |= mask;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- polarity |= mask;
- type &= ~mask;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- polarity &= ~mask;
- type &= ~mask;
- break;
- default:
- return -EINVAL;
- }
-
- cp_intc_write(polarity, CP_INTC_SYS_POLARITY(reg));
- cp_intc_write(type, CP_INTC_SYS_TYPE(reg));
-
- return 0;
-}
-
-static struct irq_chip cp_intc_irq_chip = {
- .name = "cp_intc",
- .irq_ack = cp_intc_ack_irq,
- .irq_mask = cp_intc_mask_irq,
- .irq_unmask = cp_intc_unmask_irq,
- .irq_set_type = cp_intc_set_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static struct irq_domain *cp_intc_domain;
-
-static int cp_intc_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
-
- irq_set_chip(virq, &cp_intc_irq_chip);
- irq_set_probe(virq);
- irq_set_handler(virq, handle_edge_irq);
- return 0;
-}
-
-static const struct irq_domain_ops cp_intc_host_ops = {
- .map = cp_intc_host_map,
- .xlate = irq_domain_xlate_onetwocell,
-};
-
-int __init cp_intc_of_init(struct device_node *node, struct device_node *parent)
-{
- u32 num_irq = davinci_soc_info.intc_irq_num;
- u8 *irq_prio = davinci_soc_info.intc_irq_prios;
- u32 *host_map = davinci_soc_info.intc_host_map;
- unsigned num_reg = BITS_TO_LONGS(num_irq);
- int i, irq_base;
-
- davinci_intc_type = DAVINCI_INTC_TYPE_CP_INTC;
- if (node) {
- davinci_intc_base = of_iomap(node, 0);
- if (of_property_read_u32(node, "ti,intc-size", &num_irq))
- pr_warn("unable to get intc-size, default to %d\n",
- num_irq);
- } else {
- davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_8K);
- }
- if (WARN_ON(!davinci_intc_base))
- return -EINVAL;
-
- cp_intc_write(0, CP_INTC_GLOBAL_ENABLE);
-
- /* Disable all host interrupts */
- cp_intc_write(0, CP_INTC_HOST_ENABLE(0));
-
- /* Disable system interrupts */
- for (i = 0; i < num_reg; i++)
- cp_intc_write(~0, CP_INTC_SYS_ENABLE_CLR(i));
-
- /* Set to normal mode, no nesting, no priority hold */
- cp_intc_write(0, CP_INTC_CTRL);
- cp_intc_write(0, CP_INTC_HOST_CTRL);
-
- /* Clear system interrupt status */
- for (i = 0; i < num_reg; i++)
- cp_intc_write(~0, CP_INTC_SYS_STAT_CLR(i));
-
- /* Enable nIRQ (what about nFIQ?) */
- cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
-
- /*
- * Priority is determined by host channel: lower channel number has
- * higher priority i.e. channel 0 has highest priority and channel 31
- * had the lowest priority.
- */
- num_reg = (num_irq + 3) >> 2; /* 4 channels per register */
- if (irq_prio) {
- unsigned j, k;
- u32 val;
-
- for (k = i = 0; i < num_reg; i++) {
- for (val = j = 0; j < 4; j++, k++) {
- val >>= 8;
- if (k < num_irq)
- val |= irq_prio[k] << 24;
- }
-
- cp_intc_write(val, CP_INTC_CHAN_MAP(i));
- }
- } else {
- /*
- * Default everything to channel 15 if priority not specified.
- * Note that channel 0-1 are mapped to nFIQ and channels 2-31
- * are mapped to nIRQ.
- */
- for (i = 0; i < num_reg; i++)
- cp_intc_write(0x0f0f0f0f, CP_INTC_CHAN_MAP(i));
- }
-
- if (host_map)
- for (i = 0; host_map[i] != -1; i++)
- cp_intc_write(host_map[i], CP_INTC_HOST_MAP(i));
-
- irq_base = irq_alloc_descs(-1, 0, num_irq, 0);
- if (irq_base < 0) {
- pr_warn("Couldn't allocate IRQ numbers\n");
- irq_base = 0;
- }
-
- /* create a legacy host */
- cp_intc_domain = irq_domain_add_legacy(node, num_irq,
- irq_base, 0, &cp_intc_host_ops, NULL);
-
- if (!cp_intc_domain) {
- pr_err("cp_intc: failed to allocate irq host!\n");
- return -EINVAL;
- }
-
- /* Enable global interrupt */
- cp_intc_write(1, CP_INTC_GLOBAL_ENABLE);
-
- return 0;
-}
-
-void __init cp_intc_init(void)
-{
- cp_intc_of_init(NULL, NULL);
-}
-
-IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", cp_intc_of_init);
diff --git a/arch/arm/mach-davinci/cp_intc.h b/arch/arm/mach-davinci/cp_intc.h
deleted file mode 100644
index 827bbe9baed4..000000000000
--- a/arch/arm/mach-davinci/cp_intc.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * TI Common Platform Interrupt Controller (cp_intc) definitions
- *
- * Author: Steve Chen <schen@mvista.com>
- * Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef __ASM_HARDWARE_CP_INTC_H
-#define __ASM_HARDWARE_CP_INTC_H
-
-#define CP_INTC_REV 0x00
-#define CP_INTC_CTRL 0x04
-#define CP_INTC_HOST_CTRL 0x0C
-#define CP_INTC_GLOBAL_ENABLE 0x10
-#define CP_INTC_GLOBAL_NESTING_LEVEL 0x1C
-#define CP_INTC_SYS_STAT_IDX_SET 0x20
-#define CP_INTC_SYS_STAT_IDX_CLR 0x24
-#define CP_INTC_SYS_ENABLE_IDX_SET 0x28
-#define CP_INTC_SYS_ENABLE_IDX_CLR 0x2C
-#define CP_INTC_GLOBAL_WAKEUP_ENABLE 0x30
-#define CP_INTC_HOST_ENABLE_IDX_SET 0x34
-#define CP_INTC_HOST_ENABLE_IDX_CLR 0x38
-#define CP_INTC_PACING_PRESCALE 0x40
-#define CP_INTC_VECTOR_BASE 0x50
-#define CP_INTC_VECTOR_SIZE 0x54
-#define CP_INTC_VECTOR_NULL 0x58
-#define CP_INTC_PRIO_IDX 0x80
-#define CP_INTC_PRIO_VECTOR 0x84
-#define CP_INTC_SECURE_ENABLE 0x90
-#define CP_INTC_SECURE_PRIO_IDX 0x94
-#define CP_INTC_PACING_PARAM(n) (0x0100 + (n << 4))
-#define CP_INTC_PACING_DEC(n) (0x0104 + (n << 4))
-#define CP_INTC_PACING_MAP(n) (0x0108 + (n << 4))
-#define CP_INTC_SYS_RAW_STAT(n) (0x0200 + (n << 2))
-#define CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2))
-#define CP_INTC_SYS_ENABLE_SET(n) (0x0300 + (n << 2))
-#define CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2))
-#define CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2))
-#define CP_INTC_HOST_MAP(n) (0x0800 + (n << 2))
-#define CP_INTC_HOST_PRIO_IDX(n) (0x0900 + (n << 2))
-#define CP_INTC_SYS_POLARITY(n) (0x0D00 + (n << 2))
-#define CP_INTC_SYS_TYPE(n) (0x0D80 + (n << 2))
-#define CP_INTC_WAKEUP_ENABLE(n) (0x0E00 + (n << 2))
-#define CP_INTC_DEBUG_SELECT(n) (0x0F00 + (n << 2))
-#define CP_INTC_SYS_SECURE_ENABLE(n) (0x1000 + (n << 2))
-#define CP_INTC_HOST_NESTING_LEVEL(n) (0x1100 + (n << 2))
-#define CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2))
-#define CP_INTC_HOST_PRIO_VECTOR(n) (0x1600 + (n << 2))
-#define CP_INTC_VECTOR_ADDR(n) (0x2000 + (n << 2))
-
-void cp_intc_init(void);
-int cp_intc_of_init(struct device_node *, struct device_node *);
-
-#endif /* __ASM_HARDWARE_CP_INTC_H */
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 2cc9fe4c3a91..63511f638ce4 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -12,6 +12,7 @@
#include <linux/clk/davinci.h>
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/platform_data/gpio-davinci.h>
#include <asm/mach/map.h>
@@ -19,9 +20,9 @@
#include <mach/common.h>
#include <mach/cputype.h>
#include <mach/da8xx.h>
-#include <mach/irqs.h>
#include <mach/time.h>
+#include "irqs.h"
#include "mux.h"
/* Offsets of the 8 compare registers on the da830 */
@@ -623,101 +624,6 @@ const short da830_eqep1_pins[] __initconst = {
-1
};
-/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
-static u8 da830_default_priorities[DA830_N_CP_INTC_IRQ] = {
- [IRQ_DA8XX_COMMTX] = 7,
- [IRQ_DA8XX_COMMRX] = 7,
- [IRQ_DA8XX_NINT] = 7,
- [IRQ_DA8XX_EVTOUT0] = 7,
- [IRQ_DA8XX_EVTOUT1] = 7,
- [IRQ_DA8XX_EVTOUT2] = 7,
- [IRQ_DA8XX_EVTOUT3] = 7,
- [IRQ_DA8XX_EVTOUT4] = 7,
- [IRQ_DA8XX_EVTOUT5] = 7,
- [IRQ_DA8XX_EVTOUT6] = 7,
- [IRQ_DA8XX_EVTOUT7] = 7,
- [IRQ_DA8XX_CCINT0] = 7,
- [IRQ_DA8XX_CCERRINT] = 7,
- [IRQ_DA8XX_TCERRINT0] = 7,
- [IRQ_DA8XX_AEMIFINT] = 7,
- [IRQ_DA8XX_I2CINT0] = 7,
- [IRQ_DA8XX_MMCSDINT0] = 7,
- [IRQ_DA8XX_MMCSDINT1] = 7,
- [IRQ_DA8XX_ALLINT0] = 7,
- [IRQ_DA8XX_RTC] = 7,
- [IRQ_DA8XX_SPINT0] = 7,
- [IRQ_DA8XX_TINT12_0] = 7,
- [IRQ_DA8XX_TINT34_0] = 7,
- [IRQ_DA8XX_TINT12_1] = 7,
- [IRQ_DA8XX_TINT34_1] = 7,
- [IRQ_DA8XX_UARTINT0] = 7,
- [IRQ_DA8XX_KEYMGRINT] = 7,
- [IRQ_DA830_MPUERR] = 7,
- [IRQ_DA8XX_CHIPINT0] = 7,
- [IRQ_DA8XX_CHIPINT1] = 7,
- [IRQ_DA8XX_CHIPINT2] = 7,
- [IRQ_DA8XX_CHIPINT3] = 7,
- [IRQ_DA8XX_TCERRINT1] = 7,
- [IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7,
- [IRQ_DA8XX_C0_RX_PULSE] = 7,
- [IRQ_DA8XX_C0_TX_PULSE] = 7,
- [IRQ_DA8XX_C0_MISC_PULSE] = 7,
- [IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7,
- [IRQ_DA8XX_C1_RX_PULSE] = 7,
- [IRQ_DA8XX_C1_TX_PULSE] = 7,
- [IRQ_DA8XX_C1_MISC_PULSE] = 7,
- [IRQ_DA8XX_MEMERR] = 7,
- [IRQ_DA8XX_GPIO0] = 7,
- [IRQ_DA8XX_GPIO1] = 7,
- [IRQ_DA8XX_GPIO2] = 7,
- [IRQ_DA8XX_GPIO3] = 7,
- [IRQ_DA8XX_GPIO4] = 7,
- [IRQ_DA8XX_GPIO5] = 7,
- [IRQ_DA8XX_GPIO6] = 7,
- [IRQ_DA8XX_GPIO7] = 7,
- [IRQ_DA8XX_GPIO8] = 7,
- [IRQ_DA8XX_I2CINT1] = 7,
- [IRQ_DA8XX_LCDINT] = 7,
- [IRQ_DA8XX_UARTINT1] = 7,
- [IRQ_DA8XX_MCASPINT] = 7,
- [IRQ_DA8XX_ALLINT1] = 7,
- [IRQ_DA8XX_SPINT1] = 7,
- [IRQ_DA8XX_UHPI_INT1] = 7,
- [IRQ_DA8XX_USB_INT] = 7,
- [IRQ_DA8XX_IRQN] = 7,
- [IRQ_DA8XX_RWAKEUP] = 7,
- [IRQ_DA8XX_UARTINT2] = 7,
- [IRQ_DA8XX_DFTSSINT] = 7,
- [IRQ_DA8XX_EHRPWM0] = 7,
- [IRQ_DA8XX_EHRPWM0TZ] = 7,
- [IRQ_DA8XX_EHRPWM1] = 7,
- [IRQ_DA8XX_EHRPWM1TZ] = 7,
- [IRQ_DA830_EHRPWM2] = 7,
- [IRQ_DA830_EHRPWM2TZ] = 7,
- [IRQ_DA8XX_ECAP0] = 7,
- [IRQ_DA8XX_ECAP1] = 7,
- [IRQ_DA8XX_ECAP2] = 7,
- [IRQ_DA830_EQEP0] = 7,
- [IRQ_DA830_EQEP1] = 7,
- [IRQ_DA830_T12CMPINT0_0] = 7,
- [IRQ_DA830_T12CMPINT1_0] = 7,
- [IRQ_DA830_T12CMPINT2_0] = 7,
- [IRQ_DA830_T12CMPINT3_0] = 7,
- [IRQ_DA830_T12CMPINT4_0] = 7,
- [IRQ_DA830_T12CMPINT5_0] = 7,
- [IRQ_DA830_T12CMPINT6_0] = 7,
- [IRQ_DA830_T12CMPINT7_0] = 7,
- [IRQ_DA830_T12CMPINT0_1] = 7,
- [IRQ_DA830_T12CMPINT1_1] = 7,
- [IRQ_DA830_T12CMPINT2_1] = 7,
- [IRQ_DA830_T12CMPINT3_1] = 7,
- [IRQ_DA830_T12CMPINT4_1] = 7,
- [IRQ_DA830_T12CMPINT5_1] = 7,
- [IRQ_DA830_T12CMPINT6_1] = 7,
- [IRQ_DA830_T12CMPINT7_1] = 7,
- [IRQ_DA8XX_ARMCLKSTOPREQ] = 7,
-};
-
static struct map_desc da830_io_desc[] = {
{
.virtual = IO_VIRT,
@@ -772,17 +678,17 @@ int __init da830_register_gpio(void)
static struct davinci_timer_instance da830_timer_instance[2] = {
{
.base = DA8XX_TIMER64P0_BASE,
- .bottom_irq = IRQ_DA8XX_TINT12_0,
- .top_irq = IRQ_DA8XX_TINT34_0,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_0),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_0),
.cmp_off = DA830_CMP12_0,
- .cmp_irq = IRQ_DA830_T12CMPINT0_0,
+ .cmp_irq = DAVINCI_INTC_IRQ(IRQ_DA830_T12CMPINT0_0),
},
{
.base = DA8XX_TIMER64P1_BASE,
- .bottom_irq = IRQ_DA8XX_TINT12_1,
- .top_irq = IRQ_DA8XX_TINT34_1,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_1),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_1),
.cmp_off = DA830_CMP12_0,
- .cmp_irq = IRQ_DA830_T12CMPINT0_1,
+ .cmp_irq = DAVINCI_INTC_IRQ(IRQ_DA830_T12CMPINT0_1),
},
};
@@ -806,10 +712,6 @@ static const struct davinci_soc_info davinci_soc_info_da830 = {
.pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da830_pins,
.pinmux_pins_num = ARRAY_SIZE(da830_pins),
- .intc_base = DA8XX_CP_INTC_BASE,
- .intc_type = DAVINCI_INTC_TYPE_CP_INTC,
- .intc_irq_prios = da830_default_priorities,
- .intc_irq_num = DA830_N_CP_INTC_IRQ,
.timer_info = &da830_timer_info,
.emac_pdata = &da8xx_emac_pdata,
};
@@ -822,6 +724,20 @@ void __init da830_init(void)
WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module");
}
+static const struct davinci_cp_intc_config da830_cp_intc_config = {
+ .reg = {
+ .start = DA8XX_CP_INTC_BASE,
+ .end = DA8XX_CP_INTC_BASE + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .num_irqs = DA830_N_CP_INTC_IRQ,
+};
+
+void __init da830_init_irq(void)
+{
+ davinci_cp_intc_init(&da830_cp_intc_config);
+}
+
void __init da830_init_time(void)
{
void __iomem *pll;
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index e7b78df2bfef..67ab71ba3ad3 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -18,9 +18,11 @@
#include <linux/cpufreq.h>
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/platform_data/clk-da8xx-cfgchip.h>
#include <linux/platform_data/clk-davinci-pll.h>
+#include <linux/platform_data/davinci-cpufreq.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -29,13 +31,12 @@
#include <asm/mach/map.h>
#include <mach/common.h>
-#include <mach/cpufreq.h>
#include <mach/cputype.h>
#include <mach/da8xx.h>
-#include <mach/irqs.h>
#include <mach/pm.h>
#include <mach/time.h>
+#include "irqs.h"
#include "mux.h"
#define DA850_PLL1_BASE 0x01e1a000
@@ -298,111 +299,6 @@ const short da850_vpif_display_pins[] __initconst = {
-1
};
-/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */
-static u8 da850_default_priorities[DA850_N_CP_INTC_IRQ] = {
- [IRQ_DA8XX_COMMTX] = 7,
- [IRQ_DA8XX_COMMRX] = 7,
- [IRQ_DA8XX_NINT] = 7,
- [IRQ_DA8XX_EVTOUT0] = 7,
- [IRQ_DA8XX_EVTOUT1] = 7,
- [IRQ_DA8XX_EVTOUT2] = 7,
- [IRQ_DA8XX_EVTOUT3] = 7,
- [IRQ_DA8XX_EVTOUT4] = 7,
- [IRQ_DA8XX_EVTOUT5] = 7,
- [IRQ_DA8XX_EVTOUT6] = 7,
- [IRQ_DA8XX_EVTOUT7] = 7,
- [IRQ_DA8XX_CCINT0] = 7,
- [IRQ_DA8XX_CCERRINT] = 7,
- [IRQ_DA8XX_TCERRINT0] = 7,
- [IRQ_DA8XX_AEMIFINT] = 7,
- [IRQ_DA8XX_I2CINT0] = 7,
- [IRQ_DA8XX_MMCSDINT0] = 7,
- [IRQ_DA8XX_MMCSDINT1] = 7,
- [IRQ_DA8XX_ALLINT0] = 7,
- [IRQ_DA8XX_RTC] = 7,
- [IRQ_DA8XX_SPINT0] = 7,
- [IRQ_DA8XX_TINT12_0] = 7,
- [IRQ_DA8XX_TINT34_0] = 7,
- [IRQ_DA8XX_TINT12_1] = 7,
- [IRQ_DA8XX_TINT34_1] = 7,
- [IRQ_DA8XX_UARTINT0] = 7,
- [IRQ_DA8XX_KEYMGRINT] = 7,
- [IRQ_DA850_MPUADDRERR0] = 7,
- [IRQ_DA8XX_CHIPINT0] = 7,
- [IRQ_DA8XX_CHIPINT1] = 7,
- [IRQ_DA8XX_CHIPINT2] = 7,
- [IRQ_DA8XX_CHIPINT3] = 7,
- [IRQ_DA8XX_TCERRINT1] = 7,
- [IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7,
- [IRQ_DA8XX_C0_RX_PULSE] = 7,
- [IRQ_DA8XX_C0_TX_PULSE] = 7,
- [IRQ_DA8XX_C0_MISC_PULSE] = 7,
- [IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7,
- [IRQ_DA8XX_C1_RX_PULSE] = 7,
- [IRQ_DA8XX_C1_TX_PULSE] = 7,
- [IRQ_DA8XX_C1_MISC_PULSE] = 7,
- [IRQ_DA8XX_MEMERR] = 7,
- [IRQ_DA8XX_GPIO0] = 7,
- [IRQ_DA8XX_GPIO1] = 7,
- [IRQ_DA8XX_GPIO2] = 7,
- [IRQ_DA8XX_GPIO3] = 7,
- [IRQ_DA8XX_GPIO4] = 7,
- [IRQ_DA8XX_GPIO5] = 7,
- [IRQ_DA8XX_GPIO6] = 7,
- [IRQ_DA8XX_GPIO7] = 7,
- [IRQ_DA8XX_GPIO8] = 7,
- [IRQ_DA8XX_I2CINT1] = 7,
- [IRQ_DA8XX_LCDINT] = 7,
- [IRQ_DA8XX_UARTINT1] = 7,
- [IRQ_DA8XX_MCASPINT] = 7,
- [IRQ_DA8XX_ALLINT1] = 7,
- [IRQ_DA8XX_SPINT1] = 7,
- [IRQ_DA8XX_UHPI_INT1] = 7,
- [IRQ_DA8XX_USB_INT] = 7,
- [IRQ_DA8XX_IRQN] = 7,
- [IRQ_DA8XX_RWAKEUP] = 7,
- [IRQ_DA8XX_UARTINT2] = 7,
- [IRQ_DA8XX_DFTSSINT] = 7,
- [IRQ_DA8XX_EHRPWM0] = 7,
- [IRQ_DA8XX_EHRPWM0TZ] = 7,
- [IRQ_DA8XX_EHRPWM1] = 7,
- [IRQ_DA8XX_EHRPWM1TZ] = 7,
- [IRQ_DA850_SATAINT] = 7,
- [IRQ_DA850_TINTALL_2] = 7,
- [IRQ_DA8XX_ECAP0] = 7,
- [IRQ_DA8XX_ECAP1] = 7,
- [IRQ_DA8XX_ECAP2] = 7,
- [IRQ_DA850_MMCSDINT0_1] = 7,
- [IRQ_DA850_MMCSDINT1_1] = 7,
- [IRQ_DA850_T12CMPINT0_2] = 7,
- [IRQ_DA850_T12CMPINT1_2] = 7,
- [IRQ_DA850_T12CMPINT2_2] = 7,
- [IRQ_DA850_T12CMPINT3_2] = 7,
- [IRQ_DA850_T12CMPINT4_2] = 7,
- [IRQ_DA850_T12CMPINT5_2] = 7,
- [IRQ_DA850_T12CMPINT6_2] = 7,
- [IRQ_DA850_T12CMPINT7_2] = 7,
- [IRQ_DA850_T12CMPINT0_3] = 7,
- [IRQ_DA850_T12CMPINT1_3] = 7,
- [IRQ_DA850_T12CMPINT2_3] = 7,
- [IRQ_DA850_T12CMPINT3_3] = 7,
- [IRQ_DA850_T12CMPINT4_3] = 7,
- [IRQ_DA850_T12CMPINT5_3] = 7,
- [IRQ_DA850_T12CMPINT6_3] = 7,
- [IRQ_DA850_T12CMPINT7_3] = 7,
- [IRQ_DA850_RPIINT] = 7,
- [IRQ_DA850_VPIFINT] = 7,
- [IRQ_DA850_CCINT1] = 7,
- [IRQ_DA850_CCERRINT1] = 7,
- [IRQ_DA850_TCERRINT2] = 7,
- [IRQ_DA850_TINTALL_3] = 7,
- [IRQ_DA850_MCBSP0RINT] = 7,
- [IRQ_DA850_MCBSP0XINT] = 7,
- [IRQ_DA850_MCBSP1RINT] = 7,
- [IRQ_DA850_MCBSP1XINT] = 7,
- [IRQ_DA8XX_ARMCLKSTOPREQ] = 7,
-};
-
static struct map_desc da850_io_desc[] = {
{
.virtual = IO_VIRT,
@@ -439,23 +335,23 @@ static struct davinci_id da850_ids[] = {
static struct davinci_timer_instance da850_timer_instance[4] = {
{
.base = DA8XX_TIMER64P0_BASE,
- .bottom_irq = IRQ_DA8XX_TINT12_0,
- .top_irq = IRQ_DA8XX_TINT34_0,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_0),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_0),
},
{
.base = DA8XX_TIMER64P1_BASE,
- .bottom_irq = IRQ_DA8XX_TINT12_1,
- .top_irq = IRQ_DA8XX_TINT34_1,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_1),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_1),
},
{
.base = DA850_TIMER64P2_BASE,
- .bottom_irq = IRQ_DA850_TINT12_2,
- .top_irq = IRQ_DA850_TINT34_2,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT12_2),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT34_2),
},
{
.base = DA850_TIMER64P3_BASE,
- .bottom_irq = IRQ_DA850_TINT12_3,
- .top_irq = IRQ_DA850_TINT34_3,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT12_3),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT34_3),
},
};
@@ -658,8 +554,8 @@ static struct platform_device da850_vpif_dev = {
static struct resource da850_vpif_display_resource[] = {
{
- .start = IRQ_DA850_VPIFINT,
- .end = IRQ_DA850_VPIFINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -677,13 +573,13 @@ static struct platform_device da850_vpif_display_dev = {
static struct resource da850_vpif_capture_resource[] = {
{
- .start = IRQ_DA850_VPIFINT,
- .end = IRQ_DA850_VPIFINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA850_VPIFINT,
- .end = IRQ_DA850_VPIFINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -738,10 +634,6 @@ static const struct davinci_soc_info davinci_soc_info_da850 = {
.pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da850_pins,
.pinmux_pins_num = ARRAY_SIZE(da850_pins),
- .intc_base = DA8XX_CP_INTC_BASE,
- .intc_type = DAVINCI_INTC_TYPE_CP_INTC,
- .intc_irq_prios = da850_default_priorities,
- .intc_irq_num = DA850_N_CP_INTC_IRQ,
.timer_info = &da850_timer_info,
.emac_pdata = &da8xx_emac_pdata,
.sram_dma = DA8XX_SHARED_RAM_BASE,
@@ -760,6 +652,20 @@ void __init da850_init(void)
WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module");
}
+static const struct davinci_cp_intc_config da850_cp_intc_config = {
+ .reg = {
+ .start = DA8XX_CP_INTC_BASE,
+ .end = DA8XX_CP_INTC_BASE + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .num_irqs = DA850_N_CP_INTC_IRQ,
+};
+
+void __init da850_init_irq(void)
+{
+ davinci_cp_intc_init(&da850_cp_intc_config);
+}
+
void __init da850_init_time(void)
{
void __iomem *pll0;
diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h
index db4c95ef4d5c..56c1835c42e5 100644
--- a/arch/arm/mach-davinci/davinci.h
+++ b/arch/arm/mach-davinci/davinci.h
@@ -88,6 +88,7 @@ int davinci_init_wdt(void);
/* DM355 function declarations */
void dm355_init(void);
void dm355_init_time(void);
+void dm355_init_irq(void);
void dm355_register_clocks(void);
void dm355_init_spi0(unsigned chipselect_mask,
const struct spi_board_info *info, unsigned len);
@@ -97,6 +98,7 @@ int dm355_gpio_register(void);
/* DM365 function declarations */
void dm365_init(void);
+void dm365_init_irq(void);
void dm365_init_time(void);
void dm365_register_clocks(void);
void dm365_init_asp(void);
@@ -110,6 +112,7 @@ int dm365_gpio_register(void);
/* DM644x function declarations */
void dm644x_init(void);
+void dm644x_init_irq(void);
void dm644x_init_devices(void);
void dm644x_init_time(void);
void dm644x_register_clocks(void);
@@ -119,6 +122,7 @@ int dm644x_gpio_register(void);
/* DM646x function declarations */
void dm646x_init(void);
+void dm646x_init_irq(void);
void dm646x_init_time(unsigned long ref_clk_rate, unsigned long aux_clkin_rate);
void dm646x_register_clocks(void);
void dm646x_init_mcasp0(struct snd_platform_data *pdata);
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index cf78da5ab054..b8dc674e06bc 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -28,6 +28,7 @@
#include "asp.h"
#include "cpuidle.h"
+#include "irqs.h"
#include "sram.h"
#define DA8XX_TPCC_BASE 0x01c00000
@@ -64,7 +65,7 @@ void __iomem *da8xx_syscfg1_base;
static struct plat_serial8250_port da8xx_serial0_pdata[] = {
{
.mapbase = DA8XX_UART0_BASE,
- .irq = IRQ_DA8XX_UARTINT0,
+ .irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_UARTINT0),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -77,7 +78,7 @@ static struct plat_serial8250_port da8xx_serial0_pdata[] = {
static struct plat_serial8250_port da8xx_serial1_pdata[] = {
{
.mapbase = DA8XX_UART1_BASE,
- .irq = IRQ_DA8XX_UARTINT1,
+ .irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_UARTINT1),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -90,7 +91,7 @@ static struct plat_serial8250_port da8xx_serial1_pdata[] = {
static struct plat_serial8250_port da8xx_serial2_pdata[] = {
{
.mapbase = DA8XX_UART2_BASE,
- .irq = IRQ_DA8XX_UARTINT2,
+ .irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_UARTINT2),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -171,12 +172,12 @@ static struct resource da8xx_edma0_resources[] = {
},
{
.name = "edma3_ccint",
- .start = IRQ_DA8XX_CCINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CCINT0),
.flags = IORESOURCE_IRQ,
},
{
.name = "edma3_ccerrint",
- .start = IRQ_DA8XX_CCERRINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CCERRINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -196,12 +197,12 @@ static struct resource da850_edma1_resources[] = {
},
{
.name = "edma3_ccint",
- .start = IRQ_DA850_CCINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA850_CCINT1),
.flags = IORESOURCE_IRQ,
},
{
.name = "edma3_ccerrint",
- .start = IRQ_DA850_CCERRINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA850_CCERRINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -306,8 +307,8 @@ static struct resource da8xx_i2c_resources0[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DA8XX_I2CINT0,
- .end = IRQ_DA8XX_I2CINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT0),
.flags = IORESOURCE_IRQ,
},
};
@@ -326,8 +327,8 @@ static struct resource da8xx_i2c_resources1[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DA8XX_I2CINT1,
- .end = IRQ_DA8XX_I2CINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -382,23 +383,23 @@ static struct resource da8xx_emac_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DA8XX_C0_RX_THRESH_PULSE,
- .end = IRQ_DA8XX_C0_RX_THRESH_PULSE,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_THRESH_PULSE),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_THRESH_PULSE),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_C0_RX_PULSE,
- .end = IRQ_DA8XX_C0_RX_PULSE,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_PULSE),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_PULSE),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_C0_TX_PULSE,
- .end = IRQ_DA8XX_C0_TX_PULSE,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_TX_PULSE),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_TX_PULSE),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_C0_MISC_PULSE,
- .end = IRQ_DA8XX_C0_MISC_PULSE,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_MISC_PULSE),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_MISC_PULSE),
.flags = IORESOURCE_IRQ,
},
};
@@ -470,7 +471,7 @@ static struct resource da830_mcasp1_resources[] = {
},
{
.name = "common",
- .start = IRQ_DA8XX_MCASPINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -505,7 +506,7 @@ static struct resource da830_mcasp2_resources[] = {
},
{
.name = "common",
- .start = IRQ_DA8XX_MCASPINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -540,7 +541,7 @@ static struct resource da850_mcasp_resources[] = {
},
{
.name = "common",
- .start = IRQ_DA8XX_MCASPINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -588,43 +589,43 @@ static struct resource da8xx_pruss_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DA8XX_EVTOUT0,
- .end = IRQ_DA8XX_EVTOUT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_EVTOUT1,
- .end = IRQ_DA8XX_EVTOUT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT1),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_EVTOUT2,
- .end = IRQ_DA8XX_EVTOUT2,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT2),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT2),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_EVTOUT3,
- .end = IRQ_DA8XX_EVTOUT3,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT3),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT3),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_EVTOUT4,
- .end = IRQ_DA8XX_EVTOUT4,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT4),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT4),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_EVTOUT5,
- .end = IRQ_DA8XX_EVTOUT5,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT5),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT5),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_EVTOUT6,
- .end = IRQ_DA8XX_EVTOUT6,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT6),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT6),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_EVTOUT7,
- .end = IRQ_DA8XX_EVTOUT7,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT7),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT7),
.flags = IORESOURCE_IRQ,
},
};
@@ -674,8 +675,8 @@ static struct resource da8xx_lcdc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = { /* interrupt */
- .start = IRQ_DA8XX_LCDINT,
- .end = IRQ_DA8XX_LCDINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_LCDINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_LCDINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -700,48 +701,48 @@ static struct resource da8xx_gpio_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
- .start = IRQ_DA8XX_GPIO0,
- .end = IRQ_DA8XX_GPIO0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO1,
- .end = IRQ_DA8XX_GPIO1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO1),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO2,
- .end = IRQ_DA8XX_GPIO2,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO2),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO2),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO3,
- .end = IRQ_DA8XX_GPIO3,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO3),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO3),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO4,
- .end = IRQ_DA8XX_GPIO4,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO4),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO4),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO5,
- .end = IRQ_DA8XX_GPIO5,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO5),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO5),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO6,
- .end = IRQ_DA8XX_GPIO6,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO6),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO6),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO7,
- .end = IRQ_DA8XX_GPIO7,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO7),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO7),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DA8XX_GPIO8,
- .end = IRQ_DA8XX_GPIO8,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO8),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO8),
.flags = IORESOURCE_IRQ,
},
};
@@ -766,8 +767,8 @@ static struct resource da8xx_mmcsd0_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
- .start = IRQ_DA8XX_MMCSDINT0,
- .end = IRQ_DA8XX_MMCSDINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MMCSDINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_MMCSDINT0),
.flags = IORESOURCE_IRQ,
},
};
@@ -793,8 +794,8 @@ static struct resource da850_mmcsd1_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
- .start = IRQ_DA850_MMCSDINT0_1,
- .end = IRQ_DA850_MMCSDINT0_1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA850_MMCSDINT0_1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA850_MMCSDINT0_1),
.flags = IORESOURCE_IRQ,
},
};
@@ -845,8 +846,8 @@ static struct resource da8xx_rproc_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* dsp irq */
- .start = IRQ_DA8XX_CHIPINT0,
- .end = IRQ_DA8XX_CHIPINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CHIPINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_CHIPINT0),
.flags = IORESOURCE_IRQ,
},
};
@@ -936,13 +937,13 @@ static struct resource da8xx_rtc_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* timer irq */
- .start = IRQ_DA8XX_RTC,
- .end = IRQ_DA8XX_RTC,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC),
.flags = IORESOURCE_IRQ,
},
{ /* alarm irq */
- .start = IRQ_DA8XX_RTC,
- .end = IRQ_DA8XX_RTC,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC),
.flags = IORESOURCE_IRQ,
},
};
@@ -1009,8 +1010,8 @@ static struct resource da8xx_spi0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_DA8XX_SPINT0,
- .end = IRQ_DA8XX_SPINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT0),
.flags = IORESOURCE_IRQ,
},
};
@@ -1022,8 +1023,8 @@ static struct resource da8xx_spi1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_DA8XX_SPINT1,
- .end = IRQ_DA8XX_SPINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -1103,7 +1104,7 @@ static struct resource da850_sata_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DA850_SATAINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA850_SATAINT),
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index e8dbbb7479ab..40bd8029e457 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -11,21 +11,20 @@
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-davinci.h>
+#include <linux/platform_data/mmc-davinci.h>
+#include <linux/platform_data/edma.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <mach/hardware.h>
-#include <linux/platform_data/i2c-davinci.h>
-#include <mach/irqs.h>
#include <mach/cputype.h>
#include <mach/mux.h>
-#include <linux/platform_data/mmc-davinci.h>
#include <mach/time.h>
-#include <linux/platform_data/edma.h>
-
#include "davinci.h"
+#include "irqs.h"
#define DAVINCI_I2C_BASE 0x01C21000
#define DAVINCI_ATA_BASE 0x01C66000
@@ -56,7 +55,7 @@ static struct resource i2c_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_I2C,
+ .start = DAVINCI_INTC_IRQ(IRQ_I2C),
.flags = IORESOURCE_IRQ,
},
};
@@ -84,8 +83,8 @@ static struct resource ide_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_IDE,
- .end = IRQ_IDE,
+ .start = DAVINCI_INTC_IRQ(IRQ_IDE),
+ .end = DAVINCI_INTC_IRQ(IRQ_IDE),
.flags = IORESOURCE_IRQ,
},
};
@@ -133,11 +132,11 @@ static struct resource mmcsd0_resources[] = {
},
/* IRQs: MMC/SD, then SDIO */
{
- .start = IRQ_MMCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_MMCINT),
.flags = IORESOURCE_IRQ,
}, {
/* different on dm355 */
- .start = IRQ_SDIOINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_SDIOINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -163,10 +162,10 @@ static struct resource mmcsd1_resources[] = {
},
/* IRQs: MMC/SD, then SDIO */
{
- .start = IRQ_DM355_MMCINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_MMCINT1),
.flags = IORESOURCE_IRQ,
}, {
- .start = IRQ_DM355_SDIOINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_SDIOINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -219,7 +218,8 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
mmcsd1_resources[0].start = DM365_MMCSD1_BASE;
mmcsd1_resources[0].end = DM365_MMCSD1_BASE +
SZ_4K - 1;
- mmcsd1_resources[2].start = IRQ_DM365_SDIOINT1;
+ mmcsd1_resources[2].start = DAVINCI_INTC_IRQ(
+ IRQ_DM365_SDIOINT1);
davinci_mmcsd1_device.name = "da830-mmc";
} else
break;
@@ -230,7 +230,8 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
if (cpu_is_davinci_dm355()) {
mmcsd0_resources[0].start = DM355_MMCSD0_BASE;
mmcsd0_resources[0].end = DM355_MMCSD0_BASE + SZ_4K - 1;
- mmcsd0_resources[2].start = IRQ_DM355_SDIOINT0;
+ mmcsd0_resources[2].start = DAVINCI_INTC_IRQ(
+ IRQ_DM355_SDIOINT0);
/* expose all 6 MMC0 signals: CLK, CMD, DATA[0..3] */
davinci_cfg_reg(DM355_MMCSD0);
@@ -241,7 +242,8 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
mmcsd0_resources[0].start = DM365_MMCSD0_BASE;
mmcsd0_resources[0].end = DM365_MMCSD0_BASE +
SZ_4K - 1;
- mmcsd0_resources[2].start = IRQ_DM365_SDIOINT0;
+ mmcsd0_resources[2].start = DAVINCI_INTC_IRQ(
+ IRQ_DM365_SDIOINT0);
davinci_mmcsd0_device.name = "da830-mmc";
} else if (cpu_is_davinci_dm644x()) {
/* REVISIT: should this be in board-init code? */
@@ -313,13 +315,13 @@ int davinci_gpio_register(struct resource *res, int size, void *pdata)
struct davinci_timer_instance davinci_timer_instance[2] = {
{
.base = DAVINCI_TIMER0_BASE,
- .bottom_irq = IRQ_TINT0_TINT12,
- .top_irq = IRQ_TINT0_TINT34,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_TINT0_TINT12),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_TINT0_TINT34),
},
{
.base = DAVINCI_TIMER1_BASE,
- .bottom_irq = IRQ_TINT1_TINT12,
- .top_irq = IRQ_TINT1_TINT34,
+ .bottom_irq = DAVINCI_INTC_IRQ(IRQ_TINT1_TINT12),
+ .top_irq = DAVINCI_INTC_IRQ(IRQ_TINT1_TINT34),
},
};
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 4c6e0bef4509..4a482445b9a2 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_data/spi-davinci.h>
@@ -26,13 +27,13 @@
#include <mach/common.h>
#include <mach/cputype.h>
-#include <mach/irqs.h>
#include <mach/mux.h>
#include <mach/serial.h>
#include <mach/time.h>
#include "asp.h"
#include "davinci.h"
+#include "irqs.h"
#include "mux.h"
#define DM355_UART2_BASE (IO_PHYS + 0x206000)
@@ -53,7 +54,7 @@ static struct resource dm355_spi0_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DM355_SPINT0_0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_SPINT0_0),
.flags = IORESOURCE_IRQ,
},
};
@@ -273,12 +274,12 @@ static struct resource edma_resources[] = {
},
{
.name = "edma3_ccint",
- .start = IRQ_CCINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCINT0),
.flags = IORESOURCE_IRQ,
},
{
.name = "edma3_ccerrint",
- .start = IRQ_CCERRINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT),
.flags = IORESOURCE_IRQ,
},
/* not using (or muxing) TC*_ERR */
@@ -358,13 +359,13 @@ static struct platform_device dm355_vpss_device = {
static struct resource vpfe_resources[] = {
{
- .start = IRQ_VDINT0,
- .end = IRQ_VDINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_VDINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_VDINT0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_VDINT1,
- .end = IRQ_VDINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_VDINT1),
+ .end = DAVINCI_INTC_IRQ(IRQ_VDINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -422,8 +423,8 @@ static struct platform_device dm355_osd_dev = {
static struct resource dm355_venc_resources[] = {
{
- .start = IRQ_VENCINT,
- .end = IRQ_VENCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_VENCINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_VENCINT),
.flags = IORESOURCE_IRQ,
},
/* venc registers io space */
@@ -442,8 +443,8 @@ static struct resource dm355_venc_resources[] = {
static struct resource dm355_v4l2_disp_resources[] = {
{
- .start = IRQ_VENCINT,
- .end = IRQ_VENCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_VENCINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_VENCINT),
.flags = IORESOURCE_IRQ,
},
/* venc registers io space */
@@ -547,38 +548,38 @@ static struct resource dm355_gpio_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
- .start = IRQ_DM355_GPIOBNK0,
- .end = IRQ_DM355_GPIOBNK0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM355_GPIOBNK1,
- .end = IRQ_DM355_GPIOBNK1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK1),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM355_GPIOBNK2,
- .end = IRQ_DM355_GPIOBNK2,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK2),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK2),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM355_GPIOBNK3,
- .end = IRQ_DM355_GPIOBNK3,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK3),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK3),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM355_GPIOBNK4,
- .end = IRQ_DM355_GPIOBNK4,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK4),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK4),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM355_GPIOBNK5,
- .end = IRQ_DM355_GPIOBNK5,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK5),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK5),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM355_GPIOBNK6,
- .end = IRQ_DM355_GPIOBNK6,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK6),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK6),
.flags = IORESOURCE_IRQ,
},
};
@@ -632,7 +633,7 @@ static struct davinci_timer_info dm355_timer_info = {
static struct plat_serial8250_port dm355_serial0_platform_data[] = {
{
.mapbase = DAVINCI_UART0_BASE,
- .irq = IRQ_UARTINT0,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -645,7 +646,7 @@ static struct plat_serial8250_port dm355_serial0_platform_data[] = {
static struct plat_serial8250_port dm355_serial1_platform_data[] = {
{
.mapbase = DAVINCI_UART1_BASE,
- .irq = IRQ_UARTINT1,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -658,7 +659,7 @@ static struct plat_serial8250_port dm355_serial1_platform_data[] = {
static struct plat_serial8250_port dm355_serial2_platform_data[] = {
{
.mapbase = DM355_UART2_BASE,
- .irq = IRQ_DM355_UARTINT2,
+ .irq = DAVINCI_INTC_IRQ(IRQ_DM355_UARTINT2),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -704,10 +705,6 @@ static const struct davinci_soc_info davinci_soc_info_dm355 = {
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm355_pins,
.pinmux_pins_num = ARRAY_SIZE(dm355_pins),
- .intc_base = DAVINCI_ARM_INTC_BASE,
- .intc_type = DAVINCI_INTC_TYPE_AINTC,
- .intc_irq_prios = dm355_default_priorities,
- .intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm355_timer_info,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
@@ -793,6 +790,21 @@ int __init dm355_init_video(struct vpfe_config *vpfe_cfg,
return 0;
}
+static const struct davinci_aintc_config dm355_aintc_config = {
+ .reg = {
+ .start = DAVINCI_ARM_INTC_BASE,
+ .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .num_irqs = 64,
+ .prios = dm355_default_priorities,
+};
+
+void __init dm355_init_irq(void)
+{
+ davinci_aintc_init(&dm355_aintc_config);
+}
+
static int __init dm355_init_devices(void)
{
struct platform_device *edma_pdev;
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 01fb2b0c82de..8e0a77315add 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_data/keyscan-davinci.h>
@@ -31,13 +32,13 @@
#include <mach/common.h>
#include <mach/cputype.h>
-#include <mach/irqs.h>
#include <mach/mux.h>
#include <mach/serial.h>
#include <mach/time.h>
#include "asp.h"
#include "davinci.h"
+#include "irqs.h"
#include "mux.h"
#define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */
@@ -224,7 +225,7 @@ static struct resource dm365_spi0_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DM365_SPIINT0_0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_SPIINT0_0),
.flags = IORESOURCE_IRQ,
},
};
@@ -266,43 +267,43 @@ static struct resource dm365_gpio_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
- .start = IRQ_DM365_GPIO0,
- .end = IRQ_DM365_GPIO0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_GPIO1,
- .end = IRQ_DM365_GPIO1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO1),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_GPIO2,
- .end = IRQ_DM365_GPIO2,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO2),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO2),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_GPIO3,
- .end = IRQ_DM365_GPIO3,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO3),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO3),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_GPIO4,
- .end = IRQ_DM365_GPIO4,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO4),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO4),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_GPIO5,
- .end = IRQ_DM365_GPIO5,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO5),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO5),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_GPIO6,
- .end = IRQ_DM365_GPIO6,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO6),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO6),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_GPIO7,
- .end = IRQ_DM365_GPIO7,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO7),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO7),
.flags = IORESOURCE_IRQ,
},
};
@@ -336,23 +337,23 @@ static struct resource dm365_emac_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DM365_EMAC_RXTHRESH,
- .end = IRQ_DM365_EMAC_RXTHRESH,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXTHRESH),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXTHRESH),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_EMAC_RXPULSE,
- .end = IRQ_DM365_EMAC_RXPULSE,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXPULSE),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXPULSE),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_EMAC_TXPULSE,
- .end = IRQ_DM365_EMAC_TXPULSE,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_TXPULSE),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_TXPULSE),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM365_EMAC_MISCPULSE,
- .end = IRQ_DM365_EMAC_MISCPULSE,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_MISCPULSE),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_MISCPULSE),
.flags = IORESOURCE_IRQ,
},
};
@@ -518,12 +519,12 @@ static struct resource edma_resources[] = {
},
{
.name = "edma3_ccint",
- .start = IRQ_CCINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCINT0),
.flags = IORESOURCE_IRQ,
},
{
.name = "edma3_ccerrint",
- .start = IRQ_CCERRINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT),
.flags = IORESOURCE_IRQ,
},
/* not using TC*_ERR */
@@ -597,7 +598,7 @@ static struct resource dm365_rtc_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DM365_RTCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_RTCINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -627,8 +628,8 @@ static struct resource dm365_ks_resources[] = {
},
{
/* interrupt */
- .start = IRQ_DM365_KEYINT,
- .end = IRQ_DM365_KEYINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM365_KEYINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM365_KEYINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -669,7 +670,7 @@ static struct davinci_timer_info dm365_timer_info = {
static struct plat_serial8250_port dm365_serial0_platform_data[] = {
{
.mapbase = DAVINCI_UART0_BASE,
- .irq = IRQ_UARTINT0,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -682,7 +683,7 @@ static struct plat_serial8250_port dm365_serial0_platform_data[] = {
static struct plat_serial8250_port dm365_serial1_platform_data[] = {
{
.mapbase = DM365_UART1_BASE,
- .irq = IRQ_UARTINT1,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -721,10 +722,6 @@ static const struct davinci_soc_info davinci_soc_info_dm365 = {
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm365_pins,
.pinmux_pins_num = ARRAY_SIZE(dm365_pins),
- .intc_base = DAVINCI_ARM_INTC_BASE,
- .intc_type = DAVINCI_INTC_TYPE_AINTC,
- .intc_irq_prios = dm365_default_priorities,
- .intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm365_timer_info,
.emac_pdata = &dm365_emac_pdata,
.sram_dma = 0x00010000,
@@ -822,13 +819,13 @@ static struct platform_device dm365_vpss_device = {
static struct resource vpfe_resources[] = {
{
- .start = IRQ_VDINT0,
- .end = IRQ_VDINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_VDINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_VDINT0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_VDINT1,
- .end = IRQ_VDINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_VDINT1),
+ .end = DAVINCI_INTC_IRQ(IRQ_VDINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -909,8 +906,8 @@ static struct platform_device dm365_osd_dev = {
static struct resource dm365_venc_resources[] = {
{
- .start = IRQ_VENCINT,
- .end = IRQ_VENCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_VENCINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_VENCINT),
.flags = IORESOURCE_IRQ,
},
/* venc registers io space */
@@ -929,8 +926,8 @@ static struct resource dm365_venc_resources[] = {
static struct resource dm365_v4l2_disp_resources[] = {
{
- .start = IRQ_VENCINT,
- .end = IRQ_VENCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_VENCINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_VENCINT),
.flags = IORESOURCE_IRQ,
},
/* venc registers io space */
@@ -1052,6 +1049,21 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
return 0;
}
+static const struct davinci_aintc_config dm365_aintc_config = {
+ .reg = {
+ .start = DAVINCI_ARM_INTC_BASE,
+ .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .num_irqs = 64,
+ .prios = dm365_default_priorities,
+};
+
+void __init dm365_init_irq(void)
+{
+ davinci_aintc_init(&dm365_aintc_config);
+}
+
static int __init dm365_init_devices(void)
{
struct platform_device *edma_pdev;
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 38f92b7d413e..cecc7ceb8d34 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -14,6 +14,7 @@
#include <linux/clkdev.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_device.h>
@@ -23,13 +24,13 @@
#include <mach/common.h>
#include <mach/cputype.h>
-#include <mach/irqs.h>
#include <mach/mux.h>
#include <mach/serial.h>
#include <mach/time.h>
#include "asp.h"
#include "davinci.h"
+#include "irqs.h"
#include "mux.h"
/*
@@ -59,8 +60,8 @@ static struct resource dm644x_emac_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_EMACINT,
- .end = IRQ_EMACINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_EMACINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_EMACINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -260,12 +261,12 @@ static struct resource edma_resources[] = {
},
{
.name = "edma3_ccint",
- .start = IRQ_CCINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCINT0),
.flags = IORESOURCE_IRQ,
},
{
.name = "edma3_ccerrint",
- .start = IRQ_CCERRINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT),
.flags = IORESOURCE_IRQ,
},
/* not using TC*_ERR */
@@ -330,13 +331,13 @@ static struct platform_device dm644x_vpss_device = {
static struct resource dm644x_vpfe_resources[] = {
{
- .start = IRQ_VDINT0,
- .end = IRQ_VDINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_VDINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_VDINT0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_VDINT1,
- .end = IRQ_VDINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_VDINT1),
+ .end = DAVINCI_INTC_IRQ(IRQ_VDINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -442,8 +443,8 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
static struct resource dm644x_v4l2_disp_resources[] = {
{
- .start = IRQ_VENCINT,
- .end = IRQ_VENCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_VENCINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_VENCINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -491,28 +492,28 @@ static struct resource dm644_gpio_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
- .start = IRQ_GPIOBNK0,
- .end = IRQ_GPIOBNK0,
+ .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK0),
+ .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_GPIOBNK1,
- .end = IRQ_GPIOBNK1,
+ .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK1),
+ .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK1),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_GPIOBNK2,
- .end = IRQ_GPIOBNK2,
+ .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK2),
+ .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK2),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_GPIOBNK3,
- .end = IRQ_GPIOBNK3,
+ .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK3),
+ .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK3),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_GPIOBNK4,
- .end = IRQ_GPIOBNK4,
+ .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK4),
+ .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK4),
.flags = IORESOURCE_IRQ,
},
};
@@ -573,7 +574,7 @@ static struct davinci_timer_info dm644x_timer_info = {
static struct plat_serial8250_port dm644x_serial0_platform_data[] = {
{
.mapbase = DAVINCI_UART0_BASE,
- .irq = IRQ_UARTINT0,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -586,7 +587,7 @@ static struct plat_serial8250_port dm644x_serial0_platform_data[] = {
static struct plat_serial8250_port dm644x_serial1_platform_data[] = {
{
.mapbase = DAVINCI_UART1_BASE,
- .irq = IRQ_UARTINT1,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -599,7 +600,7 @@ static struct plat_serial8250_port dm644x_serial1_platform_data[] = {
static struct plat_serial8250_port dm644x_serial2_platform_data[] = {
{
.mapbase = DAVINCI_UART2_BASE,
- .irq = IRQ_UARTINT2,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT2),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM,
@@ -645,10 +646,6 @@ static const struct davinci_soc_info davinci_soc_info_dm644x = {
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm644x_pins,
.pinmux_pins_num = ARRAY_SIZE(dm644x_pins),
- .intc_base = DAVINCI_ARM_INTC_BASE,
- .intc_type = DAVINCI_INTC_TYPE_AINTC,
- .intc_irq_prios = dm644x_default_priorities,
- .intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm644x_timer_info,
.emac_pdata = &dm644x_emac_pdata,
.sram_dma = 0x00008000,
@@ -729,6 +726,21 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg,
return 0;
}
+static const struct davinci_aintc_config dm644x_aintc_config = {
+ .reg = {
+ .start = DAVINCI_ARM_INTC_BASE,
+ .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .num_irqs = 64,
+ .prios = dm644x_default_priorities,
+};
+
+void __init dm644x_init_irq(void)
+{
+ davinci_aintc_init(&dm644x_aintc_config);
+}
+
void __init dm644x_init_devices(void)
{
struct platform_device *edma_pdev;
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 7dc54b2a610f..f33392f77a03 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_device.h>
@@ -24,13 +25,13 @@
#include <mach/common.h>
#include <mach/cputype.h>
-#include <mach/irqs.h>
#include <mach/mux.h>
#include <mach/serial.h>
#include <mach/time.h>
#include "asp.h"
#include "davinci.h"
+#include "irqs.h"
#include "mux.h"
#define DAVINCI_VPIF_BASE (0x01C12000)
@@ -62,23 +63,23 @@ static struct resource dm646x_emac_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DM646X_EMACRXTHINT,
- .end = IRQ_DM646X_EMACRXTHINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXTHINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXTHINT),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM646X_EMACRXINT,
- .end = IRQ_DM646X_EMACRXINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXINT),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM646X_EMACTXINT,
- .end = IRQ_DM646X_EMACTXINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACTXINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACTXINT),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM646X_EMACMISCINT,
- .end = IRQ_DM646X_EMACMISCINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACMISCINT),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACMISCINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -273,12 +274,12 @@ static struct resource edma_resources[] = {
},
{
.name = "edma3_ccint",
- .start = IRQ_CCINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCINT0),
.flags = IORESOURCE_IRQ,
},
{
.name = "edma3_ccerrint",
- .start = IRQ_CCERRINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT),
.flags = IORESOURCE_IRQ,
},
/* not using TC*_ERR */
@@ -315,12 +316,12 @@ static struct resource dm646x_mcasp0_resources[] = {
},
{
.name = "tx",
- .start = IRQ_DM646X_MCASP0TXINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP0TXINT),
.flags = IORESOURCE_IRQ,
},
{
.name = "rx",
- .start = IRQ_DM646X_MCASP0RXINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP0RXINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -341,7 +342,7 @@ static struct resource dm646x_mcasp1_resources[] = {
},
{
.name = "tx",
- .start = IRQ_DM646X_MCASP1TXINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP1TXINT),
.flags = IORESOURCE_IRQ,
},
};
@@ -388,13 +389,13 @@ static struct platform_device vpif_dev = {
static struct resource vpif_display_resource[] = {
{
- .start = IRQ_DM646X_VP_VERTINT2,
- .end = IRQ_DM646X_VP_VERTINT2,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT2),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT2),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM646X_VP_VERTINT3,
- .end = IRQ_DM646X_VP_VERTINT3,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT3),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT3),
.flags = IORESOURCE_IRQ,
},
};
@@ -412,13 +413,13 @@ static struct platform_device vpif_display_dev = {
static struct resource vpif_capture_resource[] = {
{
- .start = IRQ_DM646X_VP_VERTINT0,
- .end = IRQ_DM646X_VP_VERTINT0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM646X_VP_VERTINT1,
- .end = IRQ_DM646X_VP_VERTINT1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT1),
.flags = IORESOURCE_IRQ,
},
};
@@ -441,18 +442,18 @@ static struct resource dm646x_gpio_resources[] = {
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
- .start = IRQ_DM646X_GPIOBNK0,
- .end = IRQ_DM646X_GPIOBNK0,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK0),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK0),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM646X_GPIOBNK1,
- .end = IRQ_DM646X_GPIOBNK1,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK1),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK1),
.flags = IORESOURCE_IRQ,
},
{
- .start = IRQ_DM646X_GPIOBNK2,
- .end = IRQ_DM646X_GPIOBNK2,
+ .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK2),
+ .end = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK2),
.flags = IORESOURCE_IRQ,
},
};
@@ -513,7 +514,7 @@ static struct davinci_timer_info dm646x_timer_info = {
static struct plat_serial8250_port dm646x_serial0_platform_data[] = {
{
.mapbase = DAVINCI_UART0_BASE,
- .irq = IRQ_UARTINT0,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM32,
@@ -526,7 +527,7 @@ static struct plat_serial8250_port dm646x_serial0_platform_data[] = {
static struct plat_serial8250_port dm646x_serial1_platform_data[] = {
{
.mapbase = DAVINCI_UART1_BASE,
- .irq = IRQ_UARTINT1,
+ .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM32,
@@ -539,7 +540,7 @@ static struct plat_serial8250_port dm646x_serial1_platform_data[] = {
static struct plat_serial8250_port dm646x_serial2_platform_data[] = {
{
.mapbase = DAVINCI_UART2_BASE,
- .irq = IRQ_DM646X_UARTINT2,
+ .irq = DAVINCI_INTC_IRQ(IRQ_DM646X_UARTINT2),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM32,
@@ -585,10 +586,6 @@ static const struct davinci_soc_info davinci_soc_info_dm646x = {
.pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm646x_pins,
.pinmux_pins_num = ARRAY_SIZE(dm646x_pins),
- .intc_base = DAVINCI_ARM_INTC_BASE,
- .intc_type = DAVINCI_INTC_TYPE_AINTC,
- .intc_irq_prios = dm646x_default_priorities,
- .intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm646x_timer_info,
.emac_pdata = &dm646x_emac_pdata,
.sram_dma = 0x10010000,
@@ -690,6 +687,21 @@ void __init dm646x_register_clocks(void)
platform_device_register(&dm646x_pll2_device);
}
+static const struct davinci_aintc_config dm646x_aintc_config = {
+ .reg = {
+ .start = DAVINCI_ARM_INTC_BASE,
+ .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .num_irqs = 64,
+ .prios = dm646x_default_priorities,
+};
+
+void __init dm646x_init_irq(void)
+{
+ davinci_aintc_init(&dm646x_aintc_config);
+}
+
static int __init dm646x_init_devices(void)
{
int ret = 0;
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index b577e13a9c23..9526e5da0d33 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -17,11 +17,12 @@
#include <linux/types.h>
#include <linux/reboot.h>
-void davinci_timer_init(struct clk *clk);
+#include <asm/irq.h>
+
+#define DAVINCI_INTC_START NR_IRQS
+#define DAVINCI_INTC_IRQ(_irqnum) (DAVINCI_INTC_START + (_irqnum))
-extern void davinci_irq_init(void);
-extern void __iomem *davinci_intc_base;
-extern int davinci_intc_type;
+void davinci_timer_init(struct clk *clk);
struct davinci_timer_instance {
u32 base;
@@ -57,11 +58,6 @@ struct davinci_soc_info {
u32 pinmux_base;
const struct mux_config *pinmux_pins;
unsigned long pinmux_pins_num;
- u32 intc_base;
- int intc_type;
- u8 *intc_irq_prios;
- unsigned long intc_irq_num;
- u32 *intc_host_map;
struct davinci_timer_info *timer_info;
int gpio_type;
u32 gpio_base;
diff --git a/arch/arm/mach-davinci/include/mach/cpufreq.h b/arch/arm/mach-davinci/include/mach/cpufreq.h
deleted file mode 100644
index 3c089cfb6cd6..000000000000
--- a/arch/arm/mach-davinci/include/mach/cpufreq.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * TI DaVinci CPUFreq platform support.
- *
- * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef _MACH_DAVINCI_CPUFREQ_H
-#define _MACH_DAVINCI_CPUFREQ_H
-
-#include <linux/cpufreq.h>
-
-struct davinci_cpufreq_config {
- struct cpufreq_frequency_table *freq_table;
- int (*set_voltage) (unsigned int index);
- int (*init) (void);
-};
-
-#endif
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index ab4a57f433f4..1618b30661a9 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -88,10 +88,12 @@ extern unsigned int da850_max_speed;
#define DA8XX_ARM_RAM_BASE 0xffff0000
void da830_init(void);
+void da830_init_irq(void);
void da830_init_time(void);
void da830_register_clocks(void);
void da850_init(void);
+void da850_init_irq(void);
void da850_init_time(void);
void da850_register_clocks(void);
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S
deleted file mode 100644
index cf5f573eb5fd..000000000000
--- a/arch/arm/mach-davinci/include/mach/entry-macro.S
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Low-level IRQ helper macros for TI DaVinci-based platforms
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <mach/irqs.h>
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =davinci_intc_base
- ldr \base, [\base]
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-#if defined(CONFIG_AINTC) && defined(CONFIG_CP_INTC)
- ldr \tmp, =davinci_intc_type
- ldr \tmp, [\tmp]
- cmp \tmp, #DAVINCI_INTC_TYPE_CP_INTC
- beq 1001f
-#endif
-#if defined(CONFIG_AINTC)
- ldr \tmp, [\base, #0x14]
- movs \tmp, \tmp, lsr #2
- sub \irqnr, \tmp, #1
- b 1002f
-#endif
-#if defined(CONFIG_CP_INTC)
-1001: ldr \irqnr, [\base, #0x80] /* get irq number */
- mov \tmp, \irqnr, lsr #31
- and \irqnr, \irqnr, #0xff /* irq is in bits 0-9 */
- and \tmp, \tmp, #0x1
- cmp \tmp, #0x1
-#endif
-1002:
- .endm
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
deleted file mode 100644
index 952dc126c390..000000000000
--- a/arch/arm/mach-davinci/irq.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Interrupt handler for DaVinci boards.
- *
- * Copyright (C) 2006 Texas Instruments.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/cputype.h>
-#include <mach/common.h>
-#include <asm/mach/irq.h>
-
-#define FIQ_REG0_OFFSET 0x0000
-#define FIQ_REG1_OFFSET 0x0004
-#define IRQ_REG0_OFFSET 0x0008
-#define IRQ_REG1_OFFSET 0x000C
-#define IRQ_ENT_REG0_OFFSET 0x0018
-#define IRQ_ENT_REG1_OFFSET 0x001C
-#define IRQ_INCTL_REG_OFFSET 0x0020
-#define IRQ_EABASE_REG_OFFSET 0x0024
-#define IRQ_INTPRI0_REG_OFFSET 0x0030
-#define IRQ_INTPRI7_REG_OFFSET 0x004C
-
-static inline void davinci_irq_writel(unsigned long value, int offset)
-{
- __raw_writel(value, davinci_intc_base + offset);
-}
-
-static __init void
-davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
-{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
-
- gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
- if (!gc) {
- pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
- __func__, irq_start);
- return;
- }
-
- ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
-
- ct->regs.ack = IRQ_REG0_OFFSET;
- ct->regs.mask = IRQ_ENT_REG0_OFFSET;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-}
-
-/* ARM Interrupt Controller Initialization */
-void __init davinci_irq_init(void)
-{
- unsigned i, j;
- const u8 *davinci_def_priorities = davinci_soc_info.intc_irq_prios;
-
- davinci_intc_type = DAVINCI_INTC_TYPE_AINTC;
- davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_4K);
- if (WARN_ON(!davinci_intc_base))
- return;
-
- /* Clear all interrupt requests */
- davinci_irq_writel(~0x0, FIQ_REG0_OFFSET);
- davinci_irq_writel(~0x0, FIQ_REG1_OFFSET);
- davinci_irq_writel(~0x0, IRQ_REG0_OFFSET);
- davinci_irq_writel(~0x0, IRQ_REG1_OFFSET);
-
- /* Disable all interrupts */
- davinci_irq_writel(0x0, IRQ_ENT_REG0_OFFSET);
- davinci_irq_writel(0x0, IRQ_ENT_REG1_OFFSET);
-
- /* Interrupts disabled immediately, IRQ entry reflects all */
- davinci_irq_writel(0x0, IRQ_INCTL_REG_OFFSET);
-
- /* we don't use the hardware vector table, just its entry addresses */
- davinci_irq_writel(0, IRQ_EABASE_REG_OFFSET);
-
- /* Clear all interrupt requests */
- davinci_irq_writel(~0x0, FIQ_REG0_OFFSET);
- davinci_irq_writel(~0x0, FIQ_REG1_OFFSET);
- davinci_irq_writel(~0x0, IRQ_REG0_OFFSET);
- davinci_irq_writel(~0x0, IRQ_REG1_OFFSET);
-
- for (i = IRQ_INTPRI0_REG_OFFSET; i <= IRQ_INTPRI7_REG_OFFSET; i += 4) {
- u32 pri;
-
- for (j = 0, pri = 0; j < 32; j += 4, davinci_def_priorities++)
- pri |= (*davinci_def_priorities & 0x07) << j;
- davinci_irq_writel(pri, i);
- }
-
- for (i = 0, j = 0; i < davinci_soc_info.intc_irq_num; i += 32, j += 0x04)
- davinci_alloc_gc(davinci_intc_base + j, i, 32);
-
- irq_set_handler(IRQ_TINT1_TINT34, handle_level_irq);
-}
diff --git a/arch/arm/mach-davinci/include/mach/irqs.h b/arch/arm/mach-davinci/irqs.h
index edb2ca62321a..8f9fc7a56ce8 100644
--- a/arch/arm/mach-davinci/include/mach/irqs.h
+++ b/arch/arm/mach-davinci/irqs.h
@@ -30,9 +30,6 @@
/* Base address */
#define DAVINCI_ARM_INTC_BASE 0x01C48000
-#define DAVINCI_INTC_TYPE_AINTC 0
-#define DAVINCI_INTC_TYPE_CP_INTC 1
-
/* Interrupt lines */
#define IRQ_VDINT0 0
#define IRQ_VDINT1 1
@@ -404,6 +401,5 @@
/* da850 currently has the most gpio pins (144) */
#define DAVINCI_N_GPIO 144
/* da850 currently has the most irqs so use DA850_N_CP_INTC_IRQ */
-#define NR_IRQS (DA850_N_CP_INTC_IRQ + DAVINCI_N_GPIO)
#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-davinci/usb-da8xx.c b/arch/arm/mach-davinci/usb-da8xx.c
index c17ce66a3d95..25f21ee86f1a 100644
--- a/arch/arm/mach-davinci/usb-da8xx.c
+++ b/arch/arm/mach-davinci/usb-da8xx.c
@@ -18,7 +18,8 @@
#include <mach/common.h>
#include <mach/cputype.h>
#include <mach/da8xx.h>
-#include <mach/irqs.h>
+
+#include "irqs.h"
#define DA8XX_USB0_BASE 0x01e00000
#define DA8XX_USB1_BASE 0x01e25000
@@ -70,7 +71,7 @@ static struct resource da8xx_usb20_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_DA8XX_USB_INT,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_USB_INT),
.flags = IORESOURCE_IRQ,
.name = "mc",
},
@@ -105,8 +106,8 @@ static struct resource da8xx_usb11_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_DA8XX_IRQN,
- .end = IRQ_DA8XX_IRQN,
+ .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_IRQN),
+ .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_IRQN),
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index 31ed7aa47227..dd8db61cdd1c 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -2,16 +2,16 @@
/*
* USB
*/
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-
+#include <linux/platform_data/usb-davinci.h>
#include <linux/usb/musb.h>
#include <mach/common.h>
-#include <mach/irqs.h>
#include <mach/cputype.h>
-#include <linux/platform_data/usb-davinci.h>
+
+#include "irqs.h"
#define DAVINCI_USB_OTG_BASE 0x01c64000
@@ -38,7 +38,7 @@ static struct resource usb_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IRQ_USBINT,
+ .start = DAVINCI_INTC_IRQ(IRQ_USBINT),
.flags = IORESOURCE_IRQ,
.name = "mc"
},
@@ -70,8 +70,9 @@ void __init davinci_setup_usb(unsigned mA, unsigned potpgt_ms)
if (cpu_is_davinci_dm646x()) {
/* Override the defaults as DM6467 uses different IRQs. */
- usb_dev.resource[1].start = IRQ_DM646X_USBINT;
- usb_dev.resource[2].start = IRQ_DM646X_USBDMAINT;
+ usb_dev.resource[1].start = DAVINCI_INTC_IRQ(IRQ_DM646X_USBINT);
+ usb_dev.resource[2].start = DAVINCI_INTC_IRQ(
+ IRQ_DM646X_USBDMAINT);
} else /* other devices don't have dedicated CPPI IRQ */
usb_dev.num_resources = 2;
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index c6a533699b00..85b74ac943f0 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -26,7 +26,6 @@
#include <mach/gpio-ep93xx.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
-#include <mach/gpio-ep93xx.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index c39ffd2e2fe6..b6da7edbbd2f 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -336,9 +336,9 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
/* wait max 10 ms until cpu1 is on */
while (exynos_cpu_power_state(core_id)
!= S5P_CORE_LOCAL_PWR_EN) {
- if (timeout-- == 0)
+ if (timeout == 0)
break;
-
+ timeout--;
mdelay(1);
}
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 8af2f7e91d13..35ff620537e6 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -29,9 +29,10 @@ obj-$(CONFIG_SOC_IMX6SL) += cpuidle-imx6sl.o
obj-$(CONFIG_SOC_IMX6SLL) += cpuidle-imx6sx.o
obj-$(CONFIG_SOC_IMX6SX) += cpuidle-imx6sx.o
obj-$(CONFIG_SOC_IMX6UL) += cpuidle-imx6sx.o
+obj-$(CONFIG_SOC_IMX7ULP) += cpuidle-imx7ulp.o
endif
-ifdef CONFIG_SND_IMX_SOC
+ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
obj-y += ssi-fiq.o
obj-y += ssi-fiq-ksym.o
endif
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index bc915e5b4d56..c51764a85fd7 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -72,6 +72,15 @@ enum mxc_cpu_pwr_mode {
STOP_POWER_OFF, /* STOP + SRPG */
};
+enum ulp_cpu_pwr_mode {
+ ULP_PM_HSRUN, /* High speed run mode */
+ ULP_PM_RUN, /* Run mode */
+ ULP_PM_WAIT, /* Wait mode */
+ ULP_PM_STOP, /* Stop mode */
+ ULP_PM_VLPS, /* Very low power stop mode */
+ ULP_PM_VLLS, /* very low leakage stop mode */
+};
+
void imx_enable_cpu(int cpu, bool enable);
void imx_set_cpu_jump(int cpu, void *jump_addr);
u32 imx_get_cpu_arg(int cpu);
@@ -98,6 +107,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode);
void imx6_set_int_mem_clk_lpm(bool enable);
void imx6sl_set_wait_clk(bool enter);
int imx_mmdc_get_ddr_type(void);
+int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode);
void imx_cpu_die(unsigned int cpu);
int imx_cpu_kill(unsigned int cpu);
diff --git a/arch/arm/mach-imx/cpuidle-imx7ulp.c b/arch/arm/mach-imx/cpuidle-imx7ulp.c
new file mode 100644
index 000000000000..ca86c967d19e
--- /dev/null
+++ b/arch/arm/mach-imx/cpuidle-imx7ulp.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ * Anson Huang <Anson.Huang@nxp.com>
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/module.h>
+#include <asm/cpuidle.h>
+
+#include "common.h"
+#include "cpuidle.h"
+
+static int imx7ulp_enter_wait(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ if (index == 1)
+ imx7ulp_set_lpm(ULP_PM_WAIT);
+ else
+ imx7ulp_set_lpm(ULP_PM_STOP);
+
+ cpu_do_idle();
+
+ imx7ulp_set_lpm(ULP_PM_RUN);
+
+ return index;
+}
+
+static struct cpuidle_driver imx7ulp_cpuidle_driver = {
+ .name = "imx7ulp_cpuidle",
+ .owner = THIS_MODULE,
+ .states = {
+ /* WFI */
+ ARM_CPUIDLE_WFI_STATE,
+ /* WAIT */
+ {
+ .exit_latency = 50,
+ .target_residency = 75,
+ .enter = imx7ulp_enter_wait,
+ .name = "WAIT",
+ .desc = "PSTOP2",
+ },
+ /* STOP */
+ {
+ .exit_latency = 100,
+ .target_residency = 150,
+ .enter = imx7ulp_enter_wait,
+ .name = "STOP",
+ .desc = "PSTOP1",
+ },
+ },
+ .state_count = 3,
+ .safe_state_index = 0,
+};
+
+int __init imx7ulp_cpuidle_init(void)
+{
+ return cpuidle_register(&imx7ulp_cpuidle_driver, NULL);
+}
diff --git a/arch/arm/mach-imx/cpuidle.h b/arch/arm/mach-imx/cpuidle.h
index f9140128ba05..7694c8f810a4 100644
--- a/arch/arm/mach-imx/cpuidle.h
+++ b/arch/arm/mach-imx/cpuidle.h
@@ -15,6 +15,7 @@ extern int imx5_cpuidle_init(void);
extern int imx6q_cpuidle_init(void);
extern int imx6sl_cpuidle_init(void);
extern int imx6sx_cpuidle_init(void);
+extern int imx7ulp_cpuidle_init(void);
#else
static inline int imx5_cpuidle_init(void)
{
@@ -32,4 +33,8 @@ static inline int imx6sx_cpuidle_init(void)
{
return 0;
}
+static inline int imx7ulp_cpuidle_init(void)
+{
+ return 0;
+}
#endif
diff --git a/arch/arm/mach-imx/mach-imx7ulp.c b/arch/arm/mach-imx/mach-imx7ulp.c
index 33937ebf66b5..11ac71aaf965 100644
--- a/arch/arm/mach-imx/mach-imx7ulp.c
+++ b/arch/arm/mach-imx/mach-imx7ulp.c
@@ -6,17 +6,57 @@
*/
#include <linux/irqchip.h>
+#include <linux/mfd/syscon.h>
#include <linux/of_platform.h>
+#include <linux/regmap.h>
#include <asm/mach/arch.h>
#include "common.h"
+#include "cpuidle.h"
#include "hardware.h"
+#define SIM_JTAG_ID_REG 0x8c
+
+static void __init imx7ulp_set_revision(void)
+{
+ struct regmap *sim;
+ u32 revision;
+
+ sim = syscon_regmap_lookup_by_compatible("fsl,imx7ulp-sim");
+ if (IS_ERR(sim)) {
+ pr_warn("failed to find fsl,imx7ulp-sim regmap!\n");
+ return;
+ }
+
+ if (regmap_read(sim, SIM_JTAG_ID_REG, &revision)) {
+ pr_warn("failed to read sim regmap!\n");
+ return;
+ }
+
+ /*
+ * bit[31:28] of JTAG_ID register defines revision as below from B0:
+ * 0001 B0
+ * 0010 B1
+ */
+ switch (revision >> 28) {
+ case 1:
+ imx_set_soc_revision(IMX_CHIP_REVISION_2_0);
+ break;
+ case 2:
+ imx_set_soc_revision(IMX_CHIP_REVISION_2_1);
+ break;
+ default:
+ imx_set_soc_revision(IMX_CHIP_REVISION_1_0);
+ break;
+ }
+}
+
static void __init imx7ulp_init_machine(void)
{
imx7ulp_pm_init();
mxc_set_cpu_type(MXC_CPU_IMX7ULP);
+ imx7ulp_set_revision();
of_platform_default_populate(NULL, NULL, imx_soc_device_init());
}
@@ -25,7 +65,13 @@ static const char *const imx7ulp_dt_compat[] __initconst = {
NULL,
};
+static void __init imx7ulp_init_late(void)
+{
+ imx7ulp_cpuidle_init();
+}
+
DT_MACHINE_START(IMX7ulp, "Freescale i.MX7ULP (Device Tree)")
.init_machine = imx7ulp_init_machine,
.dt_compat = imx7ulp_dt_compat,
+ .init_late = imx7ulp_init_late,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 2e1e540f2e5a..d278fb672d40 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -205,7 +205,6 @@ static struct regulator_init_data mx21ads_lcd_regulator_init_data = {
static struct fixed_voltage_config mx21ads_lcd_regulator_pdata = {
.supply_name = "LCD",
.microvolts = 3300000,
- .enable_high = 1,
.init_data = &mx21ads_lcd_regulator_init_data,
};
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index f5e04047ed13..6dd7f57c332f 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -237,7 +237,7 @@ static struct fixed_voltage_config mx27ads_lcd_regulator_pdata = {
static struct gpiod_lookup_table mx27ads_lcd_regulator_gpiod_table = {
.dev_id = "reg-fixed-voltage.0", /* Let's hope ID 0 is what we get */
.table = {
- GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_LOW),
{ },
},
};
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index e49e06834516..fce4b426c379 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -294,13 +294,7 @@ static int mmdc_pmu_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.sample_period)
+ if (event->attr.sample_period)
return -EINVAL;
if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
@@ -456,6 +450,7 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
.start = mmdc_pmu_event_start,
.stop = mmdc_pmu_event_stop,
.read = mmdc_pmu_event_update,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
},
.mmdc_base = mmdc_base,
.dev = dev,
diff --git a/arch/arm/mach-imx/pm-imx7ulp.c b/arch/arm/mach-imx/pm-imx7ulp.c
index cf6a380c2b8d..7b2f7387e662 100644
--- a/arch/arm/mach-imx/pm-imx7ulp.c
+++ b/arch/arm/mach-imx/pm-imx7ulp.c
@@ -9,21 +9,60 @@
#include <linux/of.h>
#include <linux/of_address.h>
+#include "common.h"
+
#define SMC_PMCTRL 0x10
#define BP_PMCTRL_PSTOPO 16
#define PSTOPO_PSTOP3 0x3
+#define PSTOPO_PSTOP2 0x2
+#define PSTOPO_PSTOP1 0x1
+#define BP_PMCTRL_RUNM 8
+#define RUNM_RUN 0
+#define BP_PMCTRL_STOPM 0
+#define STOPM_STOP 0
+
+#define BM_PMCTRL_PSTOPO (3 << BP_PMCTRL_PSTOPO)
+#define BM_PMCTRL_RUNM (3 << BP_PMCTRL_RUNM)
+#define BM_PMCTRL_STOPM (7 << BP_PMCTRL_STOPM)
+
+static void __iomem *smc1_base;
+
+int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode)
+{
+ u32 val = readl_relaxed(smc1_base + SMC_PMCTRL);
+
+ /* clear all */
+ val &= ~(BM_PMCTRL_RUNM | BM_PMCTRL_STOPM | BM_PMCTRL_PSTOPO);
+
+ switch (mode) {
+ case ULP_PM_RUN:
+ /* system/bus clock enabled */
+ val |= PSTOPO_PSTOP3 << BP_PMCTRL_PSTOPO;
+ break;
+ case ULP_PM_WAIT:
+ /* system clock disabled, bus clock enabled */
+ val |= PSTOPO_PSTOP2 << BP_PMCTRL_PSTOPO;
+ break;
+ case ULP_PM_STOP:
+ /* system/bus clock disabled */
+ val |= PSTOPO_PSTOP1 << BP_PMCTRL_PSTOPO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(val, smc1_base + SMC_PMCTRL);
+
+ return 0;
+}
void __init imx7ulp_pm_init(void)
{
struct device_node *np;
- void __iomem *smc1_base;
np = of_find_compatible_node(NULL, NULL, "fsl,imx7ulp-smc1");
smc1_base = of_iomap(np, 0);
WARN_ON(!smc1_base);
- /* Partial Stop mode 3 with system/bus clock enabled */
- writel_relaxed(PSTOPO_PSTOP3 << BP_PMCTRL_PSTOPO,
- smc1_base + SMC_PMCTRL);
- iounmap(smc1_base);
+ imx7ulp_set_lpm(ULP_PM_RUN);
}
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index a109f6482413..8dfad012dfae 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -390,10 +390,14 @@ static int __ref impd1_probe(struct lm_device *dev)
char *mmciname;
lookup = devm_kzalloc(&dev->dev,
- sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
+ struct_size(lookup, table, 3),
GFP_KERNEL);
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
+ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
+ "lm%x:00700", dev->id);
+ if (!lookup || !chipname || !mmciname)
+ return -ENOMEM;
+
lookup->dev_id = mmciname;
/*
* Offsets on GPIO block 1:
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 3b73813c6b04..23e8c93515d4 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
/*
* N2100 PCI.
*/
-static int __init
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index e48cc06c2aec..b3be60a8e467 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -45,73 +45,6 @@
#include <mach/board.h>
#include "common.h"
-/*
- * AMBA LCD controller
- */
-static struct clcd_panel conn_lcd_panel = {
- .mode = {
- .name = "QVGA portrait",
- .refresh = 60,
- .xres = 240,
- .yres = 320,
- .pixclock = 191828,
- .left_margin = 22,
- .right_margin = 11,
- .upper_margin = 2,
- .lower_margin = 1,
- .hsync_len = 5,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = (TIM2_IVS | TIM2_IHS),
- .cntl = (CNTL_BGR | CNTL_LCDTFT | CNTL_LCDVCOMP(1) |
- CNTL_LCDBPP16_565),
- .bpp = 16,
-};
-#define PANEL_SIZE (3 * SZ_64K)
-
-static int lpc32xx_clcd_setup(struct clcd_fb *fb)
-{
- dma_addr_t dma;
-
- fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, PANEL_SIZE, &dma,
- GFP_KERNEL);
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map framebuffer\n");
- return -ENOMEM;
- }
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = PANEL_SIZE;
- fb->panel = &conn_lcd_panel;
-
- return 0;
-}
-
-static int lpc32xx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
-}
-
-static void lpc32xx_clcd_remove(struct clcd_fb *fb)
-{
- dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
- fb->fb.fix.smem_start);
-}
-
-static struct clcd_board lpc32xx_clcd_data = {
- .name = "Phytec LCD",
- .check = clcdfb_check,
- .decode = clcdfb_decode,
- .setup = lpc32xx_clcd_setup,
- .mmap = lpc32xx_clcd_mmap,
- .remove = lpc32xx_clcd_remove,
-};
-
static struct pl08x_channel_data pl08x_slave_channels[] = {
{
.bus_id = "nand-slc",
@@ -148,11 +81,6 @@ static struct pl08x_platform_data pl08x_pd = {
.mem_buses = PL08X_AHB1,
};
-static struct mmci_platform_data lpc32xx_mmci_data = {
- .ocr_mask = MMC_VDD_30_31 | MMC_VDD_31_32 |
- MMC_VDD_32_33 | MMC_VDD_33_34,
-};
-
static struct lpc32xx_slc_platform_data lpc32xx_slc_data = {
.dma_filter = pl08x_filter_id,
};
@@ -164,10 +92,7 @@ static struct lpc32xx_mlc_platform_data lpc32xx_mlc_data = {
static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = {
OF_DEV_AUXDATA("arm,pl022", 0x20084000, "dev:ssp0", NULL),
OF_DEV_AUXDATA("arm,pl022", 0x2008C000, "dev:ssp1", NULL),
- OF_DEV_AUXDATA("arm,pl110", 0x31040000, "dev:clcd", &lpc32xx_clcd_data),
OF_DEV_AUXDATA("arm,pl080", 0x31000000, "pl08xdmac", &pl08x_pd),
- OF_DEV_AUXDATA("arm,pl18x", 0x20098000, "20098000.sd",
- &lpc32xx_mmci_data),
OF_DEV_AUXDATA("nxp,lpc3220-slc", 0x20020000, "20020000.flash",
&lpc32xx_slc_data),
OF_DEV_AUXDATA("nxp,lpc3220-mlc", 0x200a8000, "200a8000.flash",
@@ -177,15 +102,6 @@ static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = {
static void __init lpc3250_machine_init(void)
{
- u32 tmp;
-
- /* Setup LCD muxing to RGB565 */
- tmp = __raw_readl(LPC32XX_CLKPWR_LCDCLK_CTRL) &
- ~(LPC32XX_CLKPWR_LCDCTRL_LCDTYPE_MSK |
- LPC32XX_CLKPWR_LCDCTRL_PSCALE_MSK);
- tmp |= LPC32XX_CLKPWR_LCDCTRL_LCDTYPE_TFT16;
- __raw_writel(tmp, LPC32XX_CLKPWR_LCDCLK_CTRL);
-
lpc32xx_serial_init();
/* Test clock needed for UDA1380 initial init */
diff --git a/arch/arm/mach-lpc32xx/pm.c b/arch/arm/mach-lpc32xx/pm.c
index 62471570d586..32bca351a73b 100644
--- a/arch/arm/mach-lpc32xx/pm.c
+++ b/arch/arm/mach-lpc32xx/pm.c
@@ -86,17 +86,10 @@ static int lpc32xx_pm_enter(suspend_state_t state)
void *iram_swap_area;
/* Allocate some space for temporary IRAM storage */
- iram_swap_area = kmalloc(lpc32xx_sys_suspend_sz, GFP_KERNEL);
- if (!iram_swap_area) {
- printk(KERN_ERR
- "PM Suspend: cannot allocate memory to save portion "
- "of SRAM\n");
+ iram_swap_area = kmemdup((void *)TEMP_IRAM_AREA,
+ lpc32xx_sys_suspend_sz, GFP_KERNEL);
+ if (!iram_swap_area)
return -ENOMEM;
- }
-
- /* Backup a small area of IRAM used for the suspend code */
- memcpy(iram_swap_area, (void *) TEMP_IRAM_AREA,
- lpc32xx_sys_suspend_sz);
/*
* Copy code to suspend system into IRAM. The suspend code
diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig
index 91cc461f7b04..11ed264f0731 100644
--- a/arch/arm/mach-mediatek/Kconfig
+++ b/arch/arm/mach-mediatek/Kconfig
@@ -26,6 +26,10 @@ config MACH_MT7623
bool "MediaTek MT7623 SoCs support"
default ARCH_MEDIATEK
+config MACH_MT7629
+ bool "MediaTek MT7629 SoCs support"
+ default ARCH_MEDIATEK
+
config MACH_MT8127
bool "MediaTek MT8127 SoCs support"
default ARCH_MEDIATEK
diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c
index 6910b4e0d913..b6a81ba1ce32 100644
--- a/arch/arm/mach-mediatek/mediatek.c
+++ b/arch/arm/mach-mediatek/mediatek.c
@@ -30,7 +30,6 @@ static void __init mediatek_timer_init(void)
if (of_machine_is_compatible("mediatek,mt6589") ||
of_machine_is_compatible("mediatek,mt7623") ||
- of_machine_is_compatible("mediatek,mt7623a") ||
of_machine_is_compatible("mediatek,mt8135") ||
of_machine_is_compatible("mediatek,mt8127")) {
/* turn on GPT6 which ungates arch timer clocks */
@@ -50,7 +49,7 @@ static const char * const mediatek_board_dt_compat[] = {
"mediatek,mt6589",
"mediatek,mt6592",
"mediatek,mt7623",
- "mediatek,mt7623a",
+ "mediatek,mt7629",
"mediatek,mt8127",
"mediatek,mt8135",
NULL,
diff --git a/arch/arm/mach-mediatek/platsmp.c b/arch/arm/mach-mediatek/platsmp.c
index 6882ff07aaa6..c9d7c0458452 100644
--- a/arch/arm/mach-mediatek/platsmp.c
+++ b/arch/arm/mach-mediatek/platsmp.c
@@ -60,7 +60,7 @@ static const struct of_device_id mtk_tz_smp_boot_infos[] __initconst = {
static const struct of_device_id mtk_smp_boot_infos[] __initconst = {
{ .compatible = "mediatek,mt6589", .data = &mtk_mt6589_boot },
{ .compatible = "mediatek,mt7623", .data = &mtk_mt7623_boot },
- { .compatible = "mediatek,mt7623a", .data = &mtk_mt7623_boot },
+ { .compatible = "mediatek,mt7629", .data = &mtk_mt7623_boot },
{},
};
diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig
index b16831697183..15e9cb75738e 100644
--- a/arch/arm/mach-meson/Kconfig
+++ b/arch/arm/mach-meson/Kconfig
@@ -9,7 +9,6 @@ menuconfig ARCH_MESON
select PINCTRL
select PINCTRL_MESON
select COMMON_CLK
- select COMMON_CLK_AMLOGIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mach-milbeaut/Kconfig b/arch/arm/mach-milbeaut/Kconfig
new file mode 100644
index 000000000000..6a576fd8521e
--- /dev/null
+++ b/arch/arm/mach-milbeaut/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig ARCH_MILBEAUT
+ bool "Socionext Milbeaut SoCs"
+ depends on ARCH_MULTI_V7
+ select ARM_GIC
+ help
+ This enables support for Socionext Milbeaut SoCs
+
+if ARCH_MILBEAUT
+
+config ARCH_MILBEAUT_M10V
+ bool "Milbeaut SC2000/M10V platform"
+ select ARM_ARCH_TIMER
+ select MILBEAUT_TIMER
+ select PINCTRL
+ select PINCTRL_MILBEAUT
+ help
+ Support for Socionext's MILBEAUT M10V based systems
+
+endif
diff --git a/arch/arm/mach-milbeaut/Makefile b/arch/arm/mach-milbeaut/Makefile
new file mode 100644
index 000000000000..ce5ea062047a
--- /dev/null
+++ b/arch/arm/mach-milbeaut/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SMP) += platsmp.o
diff --git a/arch/arm/mach-milbeaut/platsmp.c b/arch/arm/mach-milbeaut/platsmp.c
new file mode 100644
index 000000000000..591543c81399
--- /dev/null
+++ b/arch/arm/mach-milbeaut/platsmp.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright: (C) 2018 Socionext Inc.
+ * Copyright: (C) 2015 Linaro Ltd.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/of_address.h>
+#include <linux/suspend.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/idmap.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+#define M10V_MAX_CPU 4
+#define KERNEL_UNBOOT_FLAG 0x12345678
+
+static void __iomem *m10v_smp_base;
+
+static int m10v_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ if (!m10v_smp_base)
+ return -ENXIO;
+
+ mpidr = cpu_logical_map(l_cpu);
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ if (cpu >= M10V_MAX_CPU)
+ return -EINVAL;
+
+ pr_info("%s: cpu %u l_cpu %u cluster %u\n",
+ __func__, cpu, l_cpu, cluster);
+
+ writel(__pa_symbol(secondary_startup), m10v_smp_base + cpu * 4);
+ arch_send_wakeup_ipi_mask(cpumask_of(l_cpu));
+
+ return 0;
+}
+
+static void m10v_smp_init(unsigned int max_cpus)
+{
+ unsigned int mpidr, cpu, cluster;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "socionext,milbeaut-smp-sram");
+ if (!np)
+ return;
+
+ m10v_smp_base = of_iomap(np, 0);
+ if (!m10v_smp_base)
+ return;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster);
+
+ for (cpu = 0; cpu < M10V_MAX_CPU; cpu++)
+ writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
+}
+
+static void m10v_cpu_die(unsigned int l_cpu)
+{
+ gic_cpu_if_down(0);
+ v7_exit_coherency_flush(louis);
+ wfi();
+}
+
+static int m10v_cpu_kill(unsigned int l_cpu)
+{
+ unsigned int mpidr, cpu;
+
+ mpidr = cpu_logical_map(l_cpu);
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
+ writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
+
+ return 1;
+}
+
+static struct smp_operations m10v_smp_ops __initdata = {
+ .smp_prepare_cpus = m10v_smp_init,
+ .smp_boot_secondary = m10v_boot_secondary,
+ .cpu_die = m10v_cpu_die,
+ .cpu_kill = m10v_cpu_kill,
+};
+CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops);
+
+static int m10v_pm_valid(suspend_state_t state)
+{
+ return (state == PM_SUSPEND_STANDBY) || (state == PM_SUSPEND_MEM);
+}
+
+typedef void (*phys_reset_t)(unsigned long);
+static phys_reset_t phys_reset;
+
+static int m10v_die(unsigned long arg)
+{
+ setup_mm_for_reboot();
+ asm("wfi");
+ /* Boot just like a secondary */
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(cpu_resume));
+
+ return 0;
+}
+
+static int m10v_pm_enter(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ asm("wfi");
+ break;
+ case PM_SUSPEND_MEM:
+ cpu_pm_enter();
+ cpu_suspend(0, m10v_die);
+ cpu_pm_exit();
+ break;
+ }
+ return 0;
+}
+
+static const struct platform_suspend_ops m10v_pm_ops = {
+ .valid = m10v_pm_valid,
+ .enter = m10v_pm_enter,
+};
+
+struct clk *m10v_clclk_register(struct device *cpu_dev);
+
+static int __init m10v_pm_init(void)
+{
+ if (of_machine_is_compatible("socionext,milbeaut-evb"))
+ suspend_set_ops(&m10v_pm_ops);
+
+ return 0;
+}
+late_initcall(m10v_pm_init);
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index a04e249c654b..d2560fb1e835 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -149,7 +149,6 @@ static struct regulator_init_data brownstone_v_5vp_data = {
static struct fixed_voltage_config brownstone_v_5vp = {
.supply_name = "v_5vp",
.microvolts = 5000000,
- .enable_high = 1,
.enabled_at_boot = 1,
.init_data = &brownstone_v_5vp_data,
};
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index c4c0a8ea11e4..be30c3c061b4 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -267,7 +267,6 @@ static struct fixed_voltage_config modem_nreset_config = {
.supply_name = "modem_nreset",
.microvolts = 3300000,
.startup_delay = 25000,
- .enable_high = 1,
.enabled_at_boot = 1,
.init_data = &modem_nreset_data,
};
@@ -533,7 +532,6 @@ static struct regulator_init_data keybrd_pwr_initdata = {
static struct fixed_voltage_config keybrd_pwr_config = {
.supply_name = "keybrd_pwr",
.microvolts = 5000000,
- .enable_high = 1,
.init_data = &keybrd_pwr_initdata,
};
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index a8b291f00109..dae514c8276a 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
(cx->mpu_logic_state == PWRDM_POWER_OFF);
+ /* Enter broadcast mode for periodic timers */
+ tick_broadcast_enable();
+
+ /* Enter broadcast mode for one-shot timers */
tick_broadcast_enter();
/*
@@ -218,15 +222,6 @@ fail:
return index;
}
-/*
- * For each cpu, setup the broadcast timer because local timers
- * stops for the states above C1.
- */
-static void omap_setup_broadcast_timer(void *arg)
-{
- tick_broadcast_enable();
-}
-
static struct cpuidle_driver omap4_idle_driver = {
.name = "omap4_idle",
.owner = THIS_MODULE,
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
if (!cpu_clkdm[0] || !cpu_clkdm[1])
return -ENODEV;
- /* Configure the broadcast timer on each cpu */
- on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
-
return cpuidle_register(idle_driver, cpu_online_mask);
}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index f86b72d1d59e..1444b4b4bd9f 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
u32 enable_mask, enable_shift;
u32 pipd_mask, pipd_shift;
u32 reg;
+ int ret;
if (dsi_id == 0) {
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
return -ENODEV;
}
- regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
+ ret = regmap_read(omap4_dsi_mux_syscon,
+ OMAP4_DSIPHY_SYSCON_OFFSET,
+ &reg);
+ if (ret)
+ return ret;
reg &= ~enable_mask;
reg &= ~pipd_mask;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index fc5fb776a710..17558be4bf0a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -50,6 +50,9 @@
#define OMAP4_NR_BANKS 4
#define OMAP4_NR_IRQS 128
+#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
+#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
+
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
irq_chip_unmask_parent(d);
}
+/*
+ * The sys_nirq pins bypass peripheral modules and are wired directly
+ * to MPUSS wakeupgen. They get automatically inverted for GIC.
+ */
+static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ bool inverted = false;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ type &= ~IRQ_TYPE_LEVEL_MASK;
+ type |= IRQ_TYPE_LEVEL_HIGH;
+ inverted = true;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type &= ~IRQ_TYPE_EDGE_BOTH;
+ type |= IRQ_TYPE_EDGE_RISING;
+ inverted = true;
+ break;
+ default:
+ break;
+ }
+
+ if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
+ d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
+ pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
+ d->hwirq);
+
+ return irq_chip_set_type_parent(d, type);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_type = wakeupgen_irq_set_type,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index 8e44e2728620..debcd88ab971 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -432,6 +432,13 @@ static struct omap_hwmod dm81xx_i2c2_hwmod = {
.class = &i2c_class,
};
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c2 = {
+ .master = &dm81xx_l4_ls_hwmod,
+ .slave = &dm81xx_i2c2_hwmod,
+ .clk = "sysclk6_ck",
+ .user = OCP_USER_MPU,
+};
+
static struct omap_hwmod_class_sysconfig dm81xx_elm_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
@@ -443,13 +450,6 @@ static struct omap_hwmod_class_sysconfig dm81xx_elm_sysc = {
.sysc_fields = &omap_hwmod_sysc_type1,
};
-static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c2 = {
- .master = &dm81xx_l4_ls_hwmod,
- .slave = &dm81xx_i2c2_hwmod,
- .clk = "sysclk6_ck",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_class dm81xx_elm_hwmod_class = {
.name = "elm",
.sysc = &dm81xx_elm_sysc,
@@ -539,6 +539,58 @@ static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio2 = {
.user = OCP_USER_MPU,
};
+static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
+ { .role = "dbclk", .clk = "sysclk18_ck" },
+};
+
+static struct omap_hwmod dm81xx_gpio3_hwmod = {
+ .name = "gpio3",
+ .clkdm_name = "alwon_l3s_clkdm",
+ .class = &dm81xx_gpio_hwmod_class,
+ .main_clk = "sysclk6_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio3_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
+};
+
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio3 = {
+ .master = &dm81xx_l4_ls_hwmod,
+ .slave = &dm81xx_gpio3_hwmod,
+ .clk = "sysclk6_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
+ { .role = "dbclk", .clk = "sysclk18_ck" },
+};
+
+static struct omap_hwmod dm81xx_gpio4_hwmod = {
+ .name = "gpio4",
+ .clkdm_name = "alwon_l3s_clkdm",
+ .class = &dm81xx_gpio_hwmod_class,
+ .main_clk = "sysclk6_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .opt_clks = gpio4_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
+};
+
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio4 = {
+ .master = &dm81xx_l4_ls_hwmod,
+ .slave = &dm81xx_gpio4_hwmod,
+ .clk = "sysclk6_ck",
+ .user = OCP_USER_MPU,
+};
+
static struct omap_hwmod_class_sysconfig dm81xx_gpmc_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x10,
@@ -1133,6 +1185,45 @@ static struct omap_hwmod dm81xx_mcspi1_hwmod = {
.class = &dm816x_mcspi_class,
};
+static struct omap_hwmod dm81xx_mcspi2_hwmod = {
+ .name = "mcspi2",
+ .clkdm_name = "alwon_l3s_clkdm",
+ .main_clk = "sysclk10_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .class = &dm816x_mcspi_class,
+};
+
+static struct omap_hwmod dm81xx_mcspi3_hwmod = {
+ .name = "mcspi3",
+ .clkdm_name = "alwon_l3s_clkdm",
+ .main_clk = "sysclk10_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .class = &dm816x_mcspi_class,
+};
+
+static struct omap_hwmod dm81xx_mcspi4_hwmod = {
+ .name = "mcspi4",
+ .clkdm_name = "alwon_l3s_clkdm",
+ .main_clk = "sysclk10_ck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .class = &dm816x_mcspi_class,
+};
+
static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi1 = {
.master = &dm81xx_l4_ls_hwmod,
.slave = &dm81xx_mcspi1_hwmod,
@@ -1140,6 +1231,27 @@ static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi1 = {
.user = OCP_USER_MPU,
};
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi2 = {
+ .master = &dm81xx_l4_ls_hwmod,
+ .slave = &dm81xx_mcspi2_hwmod,
+ .clk = "sysclk6_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi3 = {
+ .master = &dm81xx_l4_ls_hwmod,
+ .slave = &dm81xx_mcspi3_hwmod,
+ .clk = "sysclk6_ck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi4 = {
+ .master = &dm81xx_l4_ls_hwmod,
+ .slave = &dm81xx_mcspi4_hwmod,
+ .clk = "sysclk6_ck",
+ .user = OCP_USER_MPU,
+};
+
static struct omap_hwmod_class_sysconfig dm81xx_mailbox_sysc = {
.rev_offs = 0x000,
.sysc_offs = 0x010,
@@ -1378,8 +1490,13 @@ static struct omap_hwmod_ocp_if *dm814x_hwmod_ocp_ifs[] __initdata = {
&dm81xx_l4_ls__i2c2,
&dm81xx_l4_ls__gpio1,
&dm81xx_l4_ls__gpio2,
+ &dm81xx_l4_ls__gpio3,
+ &dm81xx_l4_ls__gpio4,
&dm81xx_l4_ls__elm,
&dm81xx_l4_ls__mcspi1,
+ &dm81xx_l4_ls__mcspi2,
+ &dm81xx_l4_ls__mcspi3,
+ &dm81xx_l4_ls__mcspi4,
&dm814x_l4_ls__mmc1,
&dm814x_l4_ls__mmc2,
&ti81xx_l4_ls__rtc,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 8a5b6ed4ec36..a2ecc5e69abb 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -330,7 +330,6 @@ static struct fixed_voltage_config pandora_vwlan = {
.supply_name = "vwlan",
.microvolts = 1800000, /* 1.8V */
.startup_delay = 50000, /* 50ms */
- .enable_high = 1,
.init_data = &pandora_vmmc3,
};
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 83a7ec4c16d0..c67f92bfa30e 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -20,7 +20,7 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/cpu.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/system_misc.h>
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index a3c1336d30c9..c65ab7db36ad 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -16,7 +16,7 @@
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index 252efe29bd1a..76b8138d9d79 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -17,7 +17,7 @@
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
#include <linux/i2c.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index f4f1dbe1d91d..5f388a1ed1e4 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -18,7 +18,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index d162d4c7f85d..83589a28a491 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -15,7 +15,7 @@
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index 9250bb2e429c..cea08d4a2597 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -18,7 +18,7 @@
#include <linux/leds.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index dc8e4f4b7ade..8839c72fdee3 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -454,24 +454,6 @@ config MACH_TREO680
Say Y here if you intend to run this kernel on Palm Treo 680
smartphone.
-config MACH_RAUMFELD_RC
- bool "Raumfeld Controller"
- select CPU_PXA300
- select POWER_SUPPLY
- select PXA3xx
-
-config MACH_RAUMFELD_CONNECTOR
- bool "Raumfeld Connector"
- select CPU_PXA300
- select POWER_SUPPLY
- select PXA3xx
-
-config MACH_RAUMFELD_SPEAKER
- bool "Raumfeld Speaker"
- select CPU_PXA300
- select POWER_SUPPLY
- select PXA3xx
-
config PXA_SHARPSL
bool "SHARP Zaurus SL-5600, SL-C7xx and SL-Cxx00 Models"
select SHARP_PARAM
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 0a8e9611052f..f70728930c4f 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -86,9 +86,6 @@ obj-$(CONFIG_MACH_POODLE) += poodle.o
obj-$(CONFIG_MACH_TOSA) += tosa.o
obj-$(CONFIG_MACH_ICONTROL) += icontrol.o mxm8x10.o
obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o
-obj-$(CONFIG_MACH_RAUMFELD_RC) += raumfeld.o
-obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o
-obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o
obj-$(CONFIG_MACH_ZIPIT2) += z2.o
obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o
diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c
index fa8e7dd4d898..4401dfcd7e68 100644
--- a/arch/arm/mach-pxa/cm-x255.c
+++ b/arch/arm/mach-pxa/cm-x255.c
@@ -98,7 +98,7 @@ static unsigned long cmx255_pin_config[] = {
};
#if defined(CONFIG_SPI_PXA2XX)
-static struct pxa2xx_spi_master pxa_ssp_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
index f7081a50dc67..279eeca7add0 100644
--- a/arch/arm/mach-pxa/cm-x270.c
+++ b/arch/arm/mach-pxa/cm-x270.c
@@ -313,7 +313,7 @@ static inline void cmx270_init_mmc(void) {}
#endif
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
-static struct pxa2xx_spi_master cm_x270_spi_info = {
+static struct pxa2xx_spi_controller cm_x270_spi_info = {
.num_chipselect = 1,
.enable_dma = 1,
};
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index c9732cace5e3..7ecf559bd71c 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -530,7 +530,7 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
};
#if IS_ENABLED(CONFIG_SPI_PXA2XX)
-static struct pxa2xx_spi_master corgi_spi_info = {
+static struct pxa2xx_spi_controller corgi_spi_info = {
.num_chipselect = 3,
};
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index a24783a03827..524d6093e0c7 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -1065,7 +1065,7 @@ struct platform_device pxa93x_device_gpio = {
/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1.
* See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */
-void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info)
+void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info)
{
struct platform_device *pd;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 32c1edeb3f14..fa3adb073a0f 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -689,7 +689,7 @@ static inline void em_x270_init_lcd(void) {}
#endif
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
-static struct pxa2xx_spi_master em_x270_spi_info = {
+static struct pxa2xx_spi_controller em_x270_spi_info = {
.num_chipselect = 1,
};
@@ -703,7 +703,7 @@ static struct tdo24m_platform_data em_x270_tdo24m_pdata = {
.model = TDO35S,
};
-static struct pxa2xx_spi_master em_x270_spi_2_info = {
+static struct pxa2xx_spi_controller em_x270_spi_2_info = {
.num_chipselect = 1,
.enable_dma = 1,
};
@@ -976,7 +976,6 @@ static struct fixed_voltage_config camera_dummy_config = {
.supply_name = "camera_vdd",
.input_supply = "vcc cam",
.microvolts = 2800000,
- .enable_high = 0,
.init_data = &camera_dummy_initdata,
};
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 565965e9acc7..5e110e70ce5a 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -714,7 +714,6 @@ static struct regulator_init_data camera_regulator_initdata = {
static struct fixed_voltage_config camera_regulator_config = {
.supply_name = "camera_vdd",
.microvolts = 2800000,
- .enable_high = 0,
.init_data = &camera_regulator_initdata,
};
@@ -730,7 +729,7 @@ static struct gpiod_lookup_table camera_supply_gpiod_table = {
.dev_id = "reg-fixed-voltage.1",
.table = {
GPIO_LOOKUP("gpio-pxa", GPIO50_nCAM_EN,
- NULL, GPIO_ACTIVE_HIGH),
+ NULL, GPIO_ACTIVE_LOW),
{ },
},
};
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index b79b757fdd41..1d6b1d2fb6a9 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/fb.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
@@ -629,7 +630,7 @@ static struct spi_board_info tsc2046_board_info[] __initdata = {
},
};
-static struct pxa2xx_spi_master pxa_ssp2_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
.num_chipselect = 1,
.enable_dma = 1,
};
@@ -702,9 +703,7 @@ static struct regulator_init_data bq24022_init_data = {
.consumer_supplies = bq24022_consumers,
};
-static struct gpio bq24022_gpios[] = {
- { GPIO96_HX4700_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" },
-};
+static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW };
static struct gpio_regulator_state bq24022_states[] = {
{ .value = 100000, .gpios = (0 << 0) },
@@ -714,12 +713,10 @@ static struct gpio_regulator_state bq24022_states[] = {
static struct gpio_regulator_config bq24022_info = {
.supply_name = "bq24022",
- .enable_gpio = GPIO72_HX4700_BQ24022_nCHARGE_EN,
- .enable_high = 0,
.enabled_at_boot = 0,
- .gpios = bq24022_gpios,
- .nr_gpios = ARRAY_SIZE(bq24022_gpios),
+ .gflags = bq24022_gpiod_gflags,
+ .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags),
.states = bq24022_states,
.nr_states = ARRAY_SIZE(bq24022_states),
@@ -736,6 +733,17 @@ static struct platform_device bq24022 = {
},
};
+static struct gpiod_lookup_table bq24022_gpiod_table = {
+ .dev_id = "gpio-regulator",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO96_HX4700_BQ24022_ISET2,
+ NULL, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO72_HX4700_BQ24022_nCHARGE_EN,
+ "enable", GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
/*
* StrataFlash
*/
@@ -878,6 +886,7 @@ static void __init hx4700_init(void)
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
+ gpiod_add_lookup_table(&bq24022_gpiod_table);
platform_add_devices(devices, ARRAY_SIZE(devices));
pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index cbaf4f6edcda..7e30452e3840 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -115,12 +115,12 @@ static struct spi_board_info mcp251x_board_info[] = {
}
};
-static struct pxa2xx_spi_master pxa_ssp3_spi_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp3_spi_master_info = {
.num_chipselect = 2,
.enable_dma = 1
};
-static struct pxa2xx_spi_master pxa_ssp4_spi_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp4_spi_master_info = {
.num_chipselect = 2,
.enable_dma = 1
};
diff --git a/arch/arm/mach-pxa/include/mach/pxa25x-udc.h b/arch/arm/mach-pxa/include/mach/pxa25x-udc.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/arch/arm/mach-pxa/include/mach/pxa25x-udc.h
+++ /dev/null
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 39db4898dc4a..464b8bd2bcb9 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -191,7 +191,7 @@ static inline void littleton_init_lcd(void) {};
#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
-static struct pxa2xx_spi_master littleton_spi_info = {
+static struct pxa2xx_spi_controller littleton_spi_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index a1391e113ef4..c1bd0d544981 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -197,7 +197,7 @@ static struct platform_device sa1111_device = {
* (to J5) and poking board registers (as done below). Else it's only useful
* for the temperature sensors.
*/
-static struct pxa2xx_spi_master pxa_ssp_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 08b079653c3f..75abc21083eb 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -645,9 +645,8 @@ static struct regulator_init_data bq24022_init_data = {
.consumer_supplies = bq24022_consumers,
};
-static struct gpio bq24022_gpios[] = {
- { EGPIO_MAGICIAN_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" },
-};
+
+static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW };
static struct gpio_regulator_state bq24022_states[] = {
{ .value = 100000, .gpios = (0 << 0) },
@@ -657,12 +656,10 @@ static struct gpio_regulator_state bq24022_states[] = {
static struct gpio_regulator_config bq24022_info = {
.supply_name = "bq24022",
- .enable_gpio = GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
- .enable_high = 0,
.enabled_at_boot = 1,
- .gpios = bq24022_gpios,
- .nr_gpios = ARRAY_SIZE(bq24022_gpios),
+ .gflags = bq24022_gpiod_gflags,
+ .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags),
.states = bq24022_states,
.nr_states = ARRAY_SIZE(bq24022_states),
@@ -679,6 +676,17 @@ static struct platform_device bq24022 = {
},
};
+static struct gpiod_lookup_table bq24022_gpiod_table = {
+ .dev_id = "gpio-regulator",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2,
+ NULL, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
+ "enable", GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
/*
* fixed regulator for ads7846
*/
@@ -932,7 +940,7 @@ struct pxa2xx_spi_chip tsc2046_chip_info = {
.gpio_cs = GPIO14_MAGICIAN_TSC2046_CS,
};
-static struct pxa2xx_spi_master magician_spi_info = {
+static struct pxa2xx_spi_controller magician_spi_info = {
.num_chipselect = 1,
.enable_dma = 1,
};
@@ -1027,6 +1035,7 @@ static void __init magician_init(void)
regulator_register_always_on(0, "power", pwm_backlight_supply,
ARRAY_SIZE(pwm_backlight_supply), 5000000);
+ gpiod_add_lookup_table(&bq24022_gpiod_table);
platform_add_devices(ARRAY_AND_SIZE(devices));
}
diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c
index ccca9f7575c3..e2e613449660 100644
--- a/arch/arm/mach-pxa/pcm027.c
+++ b/arch/arm/mach-pxa/pcm027.c
@@ -132,7 +132,7 @@ static struct platform_device smc91x_device = {
/*
* SPI host and devices
*/
-static struct pxa2xx_spi_master pxa_ssp_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index c2a43d4cfd3e..9450a523cd0b 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -196,7 +196,7 @@ struct platform_device poodle_locomo_device = {
EXPORT_SYMBOL(poodle_locomo_device);
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
-static struct pxa2xx_spi_master poodle_spi_info = {
+static struct pxa2xx_spi_controller poodle_spi_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
deleted file mode 100644
index e1db072756f2..000000000000
--- a/arch/arm/mach-pxa/raumfeld.c
+++ /dev/null
@@ -1,1187 +0,0 @@
-/*
- * arch/arm/mach-pxa/raumfeld.c
- *
- * Support for the following Raumfeld devices:
- *
- * * Controller
- * * Connector
- * * Speaker S/M
- *
- * See http://www.raumfeld.com for details.
- *
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/property.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
-#include <linux/smsc911x.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-#include <linux/leds.h>
-#include <linux/w1-gpio.h>
-#include <linux/sched.h>
-#include <linux/pwm.h>
-#include <linux/pwm_backlight.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/i2c-pxa.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_gpio.h>
-#include <linux/lis3lv02d.h>
-#include <linux/pda_power.h>
-#include <linux/power_supply.h>
-#include <linux/regulator/max8660.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/consumer.h>
-#include <linux/delay.h>
-
-#include <asm/system_info.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "pxa300.h"
-#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <linux/platform_data/video-pxafb.h>
-#include <linux/platform_data/mmc-pxamci.h>
-#include <linux/platform_data/mtd-nand-pxa3xx.h>
-
-#include "generic.h"
-#include "devices.h"
-
-/* common GPIO definitions */
-
-/* inputs */
-#define GPIO_ON_OFF (14)
-#define GPIO_VOLENC_A (19)
-#define GPIO_VOLENC_B (20)
-#define GPIO_CHARGE_DONE (23)
-#define GPIO_CHARGE_IND (27)
-#define GPIO_TOUCH_IRQ (32)
-#define GPIO_ETH_IRQ (40)
-#define GPIO_SPI_MISO (98)
-#define GPIO_ACCEL_IRQ (104)
-#define GPIO_RESCUE_BOOT (115)
-#define GPIO_DOCK_DETECT (116)
-#define GPIO_KEY1 (117)
-#define GPIO_KEY2 (118)
-#define GPIO_KEY3 (119)
-#define GPIO_CHARGE_USB_OK (112)
-#define GPIO_CHARGE_DC_OK (101)
-#define GPIO_CHARGE_USB_SUSP (102)
-
-/* outputs */
-#define GPIO_SHUTDOWN_SUPPLY (16)
-#define GPIO_SHUTDOWN_BATT (18)
-#define GPIO_CHRG_PEN2 (31)
-#define GPIO_TFT_VA_EN (33)
-#define GPIO_SPDIF_CS (34)
-#define GPIO_LED2 (35)
-#define GPIO_LED1 (36)
-#define GPIO_SPDIF_RESET (38)
-#define GPIO_SPI_CLK (95)
-#define GPIO_MCLK_DAC_CS (96)
-#define GPIO_SPI_MOSI (97)
-#define GPIO_W1_PULLUP_ENABLE (105)
-#define GPIO_DISPLAY_ENABLE (106)
-#define GPIO_MCLK_RESET (111)
-#define GPIO_W2W_RESET (113)
-#define GPIO_W2W_PDN (114)
-#define GPIO_CODEC_RESET (120)
-#define GPIO_AUDIO_VA_ENABLE (124)
-#define GPIO_ACCEL_CS (125)
-#define GPIO_ONE_WIRE (126)
-
-/*
- * GPIO configurations
- */
-static mfp_cfg_t raumfeld_controller_pin_config[] __initdata = {
- /* UART1 */
- GPIO77_UART1_RXD,
- GPIO78_UART1_TXD,
- GPIO79_UART1_CTS,
- GPIO81_UART1_DSR,
- GPIO83_UART1_DTR,
- GPIO84_UART1_RTS,
-
- /* UART3 */
- GPIO110_UART3_RXD,
-
- /* USB Host */
- GPIO0_2_USBH_PEN,
- GPIO1_2_USBH_PWR,
-
- /* I2C */
- GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
- GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
-
- /* SPI */
- GPIO34_GPIO, /* SPDIF_CS */
- GPIO96_GPIO, /* MCLK_CS */
- GPIO125_GPIO, /* ACCEL_CS */
-
- /* MMC */
- GPIO3_MMC1_DAT0,
- GPIO4_MMC1_DAT1,
- GPIO5_MMC1_DAT2,
- GPIO6_MMC1_DAT3,
- GPIO7_MMC1_CLK,
- GPIO8_MMC1_CMD,
-
- /* One-wire */
- GPIO126_GPIO | MFP_LPM_FLOAT,
- GPIO105_GPIO | MFP_PULL_LOW | MFP_LPM_PULL_LOW,
-
- /* CHRG_USB_OK */
- GPIO101_GPIO | MFP_PULL_HIGH,
- /* CHRG_USB_OK */
- GPIO112_GPIO | MFP_PULL_HIGH,
- /* CHRG_USB_SUSP */
- GPIO102_GPIO,
- /* DISPLAY_ENABLE */
- GPIO106_GPIO,
- /* DOCK_DETECT */
- GPIO116_GPIO | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
-
- /* LCD */
- GPIO54_LCD_LDD_0,
- GPIO55_LCD_LDD_1,
- GPIO56_LCD_LDD_2,
- GPIO57_LCD_LDD_3,
- GPIO58_LCD_LDD_4,
- GPIO59_LCD_LDD_5,
- GPIO60_LCD_LDD_6,
- GPIO61_LCD_LDD_7,
- GPIO62_LCD_LDD_8,
- GPIO63_LCD_LDD_9,
- GPIO64_LCD_LDD_10,
- GPIO65_LCD_LDD_11,
- GPIO66_LCD_LDD_12,
- GPIO67_LCD_LDD_13,
- GPIO68_LCD_LDD_14,
- GPIO69_LCD_LDD_15,
- GPIO70_LCD_LDD_16,
- GPIO71_LCD_LDD_17,
- GPIO72_LCD_FCLK,
- GPIO73_LCD_LCLK,
- GPIO74_LCD_PCLK,
- GPIO75_LCD_BIAS,
-};
-
-static mfp_cfg_t raumfeld_connector_pin_config[] __initdata = {
- /* UART1 */
- GPIO77_UART1_RXD,
- GPIO78_UART1_TXD,
- GPIO79_UART1_CTS,
- GPIO81_UART1_DSR,
- GPIO83_UART1_DTR,
- GPIO84_UART1_RTS,
-
- /* UART3 */
- GPIO110_UART3_RXD,
-
- /* USB Host */
- GPIO0_2_USBH_PEN,
- GPIO1_2_USBH_PWR,
-
- /* I2C */
- GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
- GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
-
- /* SPI */
- GPIO34_GPIO, /* SPDIF_CS */
- GPIO96_GPIO, /* MCLK_CS */
- GPIO125_GPIO, /* ACCEL_CS */
-
- /* MMC */
- GPIO3_MMC1_DAT0,
- GPIO4_MMC1_DAT1,
- GPIO5_MMC1_DAT2,
- GPIO6_MMC1_DAT3,
- GPIO7_MMC1_CLK,
- GPIO8_MMC1_CMD,
-
- /* Ethernet */
- GPIO1_nCS2, /* CS */
- GPIO40_GPIO | MFP_PULL_HIGH, /* IRQ */
-
- /* SSP for I2S */
- GPIO85_SSP1_SCLK,
- GPIO89_SSP1_EXTCLK,
- GPIO86_SSP1_FRM,
- GPIO87_SSP1_TXD,
- GPIO88_SSP1_RXD,
- GPIO90_SSP1_SYSCLK,
-
- /* SSP2 for S/PDIF */
- GPIO25_SSP2_SCLK,
- GPIO26_SSP2_FRM,
- GPIO27_SSP2_TXD,
- GPIO29_SSP2_EXTCLK,
-
- /* LEDs */
- GPIO35_GPIO | MFP_LPM_PULL_LOW,
- GPIO36_GPIO | MFP_LPM_DRIVE_HIGH,
-};
-
-static mfp_cfg_t raumfeld_speaker_pin_config[] __initdata = {
- /* UART1 */
- GPIO77_UART1_RXD,
- GPIO78_UART1_TXD,
- GPIO79_UART1_CTS,
- GPIO81_UART1_DSR,
- GPIO83_UART1_DTR,
- GPIO84_UART1_RTS,
-
- /* UART3 */
- GPIO110_UART3_RXD,
-
- /* USB Host */
- GPIO0_2_USBH_PEN,
- GPIO1_2_USBH_PWR,
-
- /* I2C */
- GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
- GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
-
- /* SPI */
- GPIO34_GPIO, /* SPDIF_CS */
- GPIO96_GPIO, /* MCLK_CS */
- GPIO125_GPIO, /* ACCEL_CS */
-
- /* MMC */
- GPIO3_MMC1_DAT0,
- GPIO4_MMC1_DAT1,
- GPIO5_MMC1_DAT2,
- GPIO6_MMC1_DAT3,
- GPIO7_MMC1_CLK,
- GPIO8_MMC1_CMD,
-
- /* Ethernet */
- GPIO1_nCS2, /* CS */
- GPIO40_GPIO | MFP_PULL_HIGH, /* IRQ */
-
- /* SSP for I2S */
- GPIO85_SSP1_SCLK,
- GPIO89_SSP1_EXTCLK,
- GPIO86_SSP1_FRM,
- GPIO87_SSP1_TXD,
- GPIO88_SSP1_RXD,
- GPIO90_SSP1_SYSCLK,
-
- /* LEDs */
- GPIO35_GPIO | MFP_LPM_PULL_LOW,
- GPIO36_GPIO | MFP_LPM_DRIVE_HIGH,
-};
-
-/*
- * SMSC LAN9220 Ethernet
- */
-
-static struct resource smc91x_resources[] = {
- {
- .start = PXA3xx_CS2_PHYS,
- .end = PXA3xx_CS2_PHYS + 0xfffff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = PXA_GPIO_TO_IRQ(GPIO_ETH_IRQ),
- .end = PXA_GPIO_TO_IRQ(GPIO_ETH_IRQ),
- .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
- }
-};
-
-static struct smsc911x_platform_config raumfeld_smsc911x_config = {
- .phy_interface = PHY_INTERFACE_MODE_MII,
- .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
- .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
- .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
-};
-
-static struct platform_device smc91x_device = {
- .name = "smsc911x",
- .id = -1,
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
- .dev = {
- .platform_data = &raumfeld_smsc911x_config,
- }
-};
-
-/**
- * NAND
- */
-
-static struct mtd_partition raumfeld_nand_partitions[] = {
- {
- .name = "Bootloader",
- .offset = 0,
- .size = 0xa0000,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "BootloaderEnvironment",
- .offset = 0xa0000,
- .size = 0x20000,
- },
- {
- .name = "BootloaderSplashScreen",
- .offset = 0xc0000,
- .size = 0x60000,
- },
- {
- .name = "UBI",
- .offset = 0x120000,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct pxa3xx_nand_platform_data raumfeld_nand_info = {
- .keep_config = 1,
- .parts = raumfeld_nand_partitions,
- .nr_parts = ARRAY_SIZE(raumfeld_nand_partitions),
-};
-
-/**
- * USB (OHCI) support
- */
-
-static struct pxaohci_platform_data raumfeld_ohci_info = {
- .port_mode = PMM_GLOBAL_MODE,
- .flags = ENABLE_PORT1,
-};
-
-/**
- * Rotary encoder input device
- */
-
-static struct gpiod_lookup_table raumfeld_rotary_gpios_table = {
- .dev_id = "rotary-encoder.0",
- .table = {
- GPIO_LOOKUP_IDX("gpio-0",
- GPIO_VOLENC_A, NULL, 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("gpio-0",
- GPIO_VOLENC_B, NULL, 1, GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static const struct property_entry raumfeld_rotary_properties[] __initconst = {
- PROPERTY_ENTRY_U32("rotary-encoder,steps-per-period", 24),
- PROPERTY_ENTRY_U32("linux,axis", REL_X),
- PROPERTY_ENTRY_U32("rotary-encoder,relative_axis", 1),
- { },
-};
-
-static struct platform_device rotary_encoder_device = {
- .name = "rotary-encoder",
- .id = 0,
-};
-
-/**
- * GPIO buttons
- */
-
-static struct gpio_keys_button gpio_keys_button[] = {
- {
- .code = KEY_F1,
- .type = EV_KEY,
- .gpio = GPIO_KEY1,
- .active_low = 1,
- .wakeup = 0,
- .debounce_interval = 5, /* ms */
- .desc = "Button 1",
- },
- {
- .code = KEY_F2,
- .type = EV_KEY,
- .gpio = GPIO_KEY2,
- .active_low = 1,
- .wakeup = 0,
- .debounce_interval = 5, /* ms */
- .desc = "Button 2",
- },
- {
- .code = KEY_F3,
- .type = EV_KEY,
- .gpio = GPIO_KEY3,
- .active_low = 1,
- .wakeup = 0,
- .debounce_interval = 5, /* ms */
- .desc = "Button 3",
- },
- {
- .code = KEY_F4,
- .type = EV_KEY,
- .gpio = GPIO_RESCUE_BOOT,
- .active_low = 0,
- .wakeup = 0,
- .debounce_interval = 5, /* ms */
- .desc = "rescue boot button",
- },
- {
- .code = KEY_F5,
- .type = EV_KEY,
- .gpio = GPIO_DOCK_DETECT,
- .active_low = 1,
- .wakeup = 0,
- .debounce_interval = 5, /* ms */
- .desc = "dock detect",
- },
- {
- .code = KEY_F6,
- .type = EV_KEY,
- .gpio = GPIO_ON_OFF,
- .active_low = 0,
- .wakeup = 0,
- .debounce_interval = 5, /* ms */
- .desc = "on_off button",
- },
-};
-
-static struct gpio_keys_platform_data gpio_keys_platform_data = {
- .buttons = gpio_keys_button,
- .nbuttons = ARRAY_SIZE(gpio_keys_button),
- .rep = 0,
-};
-
-static struct platform_device raumfeld_gpio_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &gpio_keys_platform_data,
- }
-};
-
-/**
- * GPIO LEDs
- */
-
-static struct gpio_led raumfeld_leds[] = {
- {
- .name = "raumfeld:1",
- .gpio = GPIO_LED1,
- .active_low = 1,
- .default_state = LEDS_GPIO_DEFSTATE_ON,
- },
- {
- .name = "raumfeld:2",
- .gpio = GPIO_LED2,
- .active_low = 0,
- .default_state = LEDS_GPIO_DEFSTATE_OFF,
- }
-};
-
-static struct gpio_led_platform_data raumfeld_led_platform_data = {
- .leds = raumfeld_leds,
- .num_leds = ARRAY_SIZE(raumfeld_leds),
-};
-
-static struct platform_device raumfeld_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &raumfeld_led_platform_data,
- },
-};
-
-/**
- * One-wire (W1 bus) support
- */
-
-static void w1_enable_external_pullup(int enable)
-{
- gpio_set_value(GPIO_W1_PULLUP_ENABLE, enable);
- msleep(100);
-}
-
-static struct gpiod_lookup_table raumfeld_w1_gpiod_table = {
- .dev_id = "w1-gpio",
- .table = {
- GPIO_LOOKUP_IDX("gpio-pxa", GPIO_ONE_WIRE, NULL, 0,
- GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- },
-};
-
-static struct w1_gpio_platform_data w1_gpio_platform_data = {
- .enable_external_pullup = w1_enable_external_pullup,
-};
-
-static struct platform_device raumfeld_w1_gpio_device = {
- .name = "w1-gpio",
- .dev = {
- .platform_data = &w1_gpio_platform_data
- }
-};
-
-static void __init raumfeld_w1_init(void)
-{
- int ret = gpio_request(GPIO_W1_PULLUP_ENABLE,
- "W1 external pullup enable");
-
- if (ret < 0)
- pr_warn("Unable to request GPIO_W1_PULLUP_ENABLE\n");
- else
- gpio_direction_output(GPIO_W1_PULLUP_ENABLE, 0);
-
- gpiod_add_lookup_table(&raumfeld_w1_gpiod_table);
- platform_device_register(&raumfeld_w1_gpio_device);
-}
-
-/**
- * Framebuffer device
- */
-
-static struct pwm_lookup raumfeld_pwm_lookup[] = {
- PWM_LOOKUP("pxa27x-pwm.0", 0, "pwm-backlight", NULL, 10000,
- PWM_POLARITY_NORMAL),
-};
-
-/* PWM controlled backlight */
-static struct platform_pwm_backlight_data raumfeld_pwm_backlight_data = {
- .max_brightness = 100,
- .dft_brightness = 100,
- .enable_gpio = -1,
-};
-
-static struct platform_device raumfeld_pwm_backlight_device = {
- .name = "pwm-backlight",
- .dev = {
- .parent = &pxa27x_device_pwm0.dev,
- .platform_data = &raumfeld_pwm_backlight_data,
- }
-};
-
-/* LT3593 controlled backlight */
-static struct gpio_led raumfeld_lt3593_led = {
- .name = "backlight",
- .gpio = mfp_to_gpio(MFP_PIN_GPIO17),
- .default_state = LEDS_GPIO_DEFSTATE_ON,
-};
-
-static struct gpio_led_platform_data raumfeld_lt3593_platform_data = {
- .leds = &raumfeld_lt3593_led,
- .num_leds = 1,
-};
-
-static struct platform_device raumfeld_lt3593_device = {
- .name = "leds-lt3593",
- .id = -1,
- .dev = {
- .platform_data = &raumfeld_lt3593_platform_data,
- },
-};
-
-static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
- .pixclock = 111000,
- .xres = 480,
- .yres = 272,
- .bpp = 16,
- .hsync_len = 41,
- .left_margin = 2,
- .right_margin = 1,
- .vsync_len = 10,
- .upper_margin = 3,
- .lower_margin = 1,
- .sync = 0,
-};
-
-static struct pxafb_mach_info raumfeld_sharp_lcd_info = {
- .modes = &sharp_lq043t3dx02_mode,
- .num_modes = 1,
- .video_mem_size = 0x400000,
- .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
-#ifdef CONFIG_PXA3XX_GCU
- .acceleration_enabled = 1,
-#endif
-};
-
-static void __init raumfeld_lcd_init(void)
-{
- int ret;
-
- ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
- if (ret < 0)
- pr_warn("Unable to request GPIO_TFT_VA_EN\n");
- else
- gpio_direction_output(GPIO_TFT_VA_EN, 1);
-
- msleep(100);
-
- ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
- if (ret < 0)
- pr_warn("Unable to request GPIO_DISPLAY_ENABLE\n");
- else
- gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
-
- /* Hardware revision 2 has the backlight regulator controlled
- * by an LT3593, earlier and later devices use PWM for that. */
- if ((system_rev & 0xff) == 2) {
- platform_device_register(&raumfeld_lt3593_device);
- } else {
- mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
- pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
- pwm_add_table(raumfeld_pwm_lookup,
- ARRAY_SIZE(raumfeld_pwm_lookup));
- platform_device_register(&raumfeld_pwm_backlight_device);
- }
-
- pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
- platform_device_register(&pxa3xx_device_gcu);
-}
-
-/**
- * SPI devices
- */
-
-static struct spi_gpio_platform_data raumfeld_spi_platform_data = {
- .num_chipselect = 3,
-};
-
-static struct platform_device raumfeld_spi_device = {
- .name = "spi_gpio",
- .id = 0,
- .dev = {
- .platform_data = &raumfeld_spi_platform_data,
- }
-};
-
-static struct gpiod_lookup_table raumfeld_spi_gpiod_table = {
- .dev_id = "spi_gpio",
- .table = {
- GPIO_LOOKUP("gpio-0", GPIO_SPI_CLK,
- "sck", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-0", GPIO_SPI_MOSI,
- "mosi", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-0", GPIO_SPI_MISO,
- "miso", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX("gpio-0", GPIO_SPDIF_CS,
- "cs", 0, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX("gpio-0", GPIO_ACCEL_CS,
- "cs", 1, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX("gpio-0", GPIO_MCLK_DAC_CS,
- "cs", 2, GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static struct lis3lv02d_platform_data lis3_pdata = {
- .click_flags = LIS3_CLICK_SINGLE_X |
- LIS3_CLICK_SINGLE_Y |
- LIS3_CLICK_SINGLE_Z,
- .irq_cfg = LIS3_IRQ1_CLICK | LIS3_IRQ2_CLICK,
- .wakeup_flags = LIS3_WAKEUP_X_LO | LIS3_WAKEUP_X_HI |
- LIS3_WAKEUP_Y_LO | LIS3_WAKEUP_Y_HI |
- LIS3_WAKEUP_Z_LO | LIS3_WAKEUP_Z_HI,
- .wakeup_thresh = 10,
- .click_thresh_x = 10,
- .click_thresh_y = 10,
- .click_thresh_z = 10,
-};
-
-#define SPI_AK4104 \
-{ \
- .modalias = "ak4104-codec", \
- .max_speed_hz = 10000, \
- .bus_num = 0, \
- .chip_select = 0, \
-}
-
-#define SPI_LIS3 \
-{ \
- .modalias = "lis3lv02d_spi", \
- .max_speed_hz = 1000000, \
- .bus_num = 0, \
- .chip_select = 1, \
- .platform_data = &lis3_pdata, \
- .irq = PXA_GPIO_TO_IRQ(GPIO_ACCEL_IRQ), \
-}
-
-#define SPI_DAC7512 \
-{ \
- .modalias = "dac7512", \
- .max_speed_hz = 1000000, \
- .bus_num = 0, \
- .chip_select = 2, \
-}
-
-static struct spi_board_info connector_spi_devices[] __initdata = {
- SPI_AK4104,
- SPI_DAC7512,
-};
-
-static struct spi_board_info speaker_spi_devices[] __initdata = {
- SPI_DAC7512,
-};
-
-static struct spi_board_info controller_spi_devices[] __initdata = {
- SPI_LIS3,
-};
-
-/**
- * MMC for Marvell Libertas 8688 via SDIO
- */
-
-static int raumfeld_mci_init(struct device *dev, irq_handler_t isr, void *data)
-{
- gpio_set_value(GPIO_W2W_RESET, 1);
- gpio_set_value(GPIO_W2W_PDN, 1);
-
- return 0;
-}
-
-static void raumfeld_mci_exit(struct device *dev, void *data)
-{
- gpio_set_value(GPIO_W2W_RESET, 0);
- gpio_set_value(GPIO_W2W_PDN, 0);
-}
-
-static struct pxamci_platform_data raumfeld_mci_platform_data = {
- .init = raumfeld_mci_init,
- .exit = raumfeld_mci_exit,
- .detect_delay_ms = 200,
-};
-
-/*
- * External power / charge logic
- */
-
-static int power_supply_init(struct device *dev)
-{
- return 0;
-}
-
-static void power_supply_exit(struct device *dev)
-{
-}
-
-static int raumfeld_is_ac_online(void)
-{
- return !gpio_get_value(GPIO_CHARGE_DC_OK);
-}
-
-static int raumfeld_is_usb_online(void)
-{
- return 0;
-}
-
-static char *raumfeld_power_supplicants[] = { "ds2760-battery.0" };
-
-static void raumfeld_power_signal_charged(void)
-{
- struct power_supply *psy =
- power_supply_get_by_name(raumfeld_power_supplicants[0]);
-
- if (psy) {
- power_supply_set_battery_charged(psy);
- power_supply_put(psy);
- }
-}
-
-static int raumfeld_power_resume(void)
-{
- /* check if GPIO_CHARGE_DONE went low while we were sleeping */
- if (!gpio_get_value(GPIO_CHARGE_DONE))
- raumfeld_power_signal_charged();
-
- return 0;
-}
-
-static struct pda_power_pdata power_supply_info = {
- .init = power_supply_init,
- .is_ac_online = raumfeld_is_ac_online,
- .is_usb_online = raumfeld_is_usb_online,
- .exit = power_supply_exit,
- .supplied_to = raumfeld_power_supplicants,
- .num_supplicants = ARRAY_SIZE(raumfeld_power_supplicants),
- .resume = raumfeld_power_resume,
-};
-
-static struct resource power_supply_resources[] = {
- {
- .name = "ac",
- .flags = IORESOURCE_IRQ |
- IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
- .start = GPIO_CHARGE_DC_OK,
- .end = GPIO_CHARGE_DC_OK,
- },
-};
-
-static irqreturn_t charge_done_irq(int irq, void *dev_id)
-{
- raumfeld_power_signal_charged();
- return IRQ_HANDLED;
-}
-
-static struct platform_device raumfeld_power_supply = {
- .name = "pda-power",
- .id = -1,
- .dev = {
- .platform_data = &power_supply_info,
- },
- .resource = power_supply_resources,
- .num_resources = ARRAY_SIZE(power_supply_resources),
-};
-
-static void __init raumfeld_power_init(void)
-{
- int ret;
-
- /* Set PEN2 high to enable maximum charge current */
- ret = gpio_request(GPIO_CHRG_PEN2, "CHRG_PEN2");
- if (ret < 0)
- pr_warn("Unable to request GPIO_CHRG_PEN2\n");
- else
- gpio_direction_output(GPIO_CHRG_PEN2, 1);
-
- ret = gpio_request(GPIO_CHARGE_DC_OK, "CABLE_DC_OK");
- if (ret < 0)
- pr_warn("Unable to request GPIO_CHARGE_DC_OK\n");
-
- ret = gpio_request(GPIO_CHARGE_USB_SUSP, "CHARGE_USB_SUSP");
- if (ret < 0)
- pr_warn("Unable to request GPIO_CHARGE_USB_SUSP\n");
- else
- gpio_direction_output(GPIO_CHARGE_USB_SUSP, 0);
-
- power_supply_resources[0].start = gpio_to_irq(GPIO_CHARGE_DC_OK);
- power_supply_resources[0].end = gpio_to_irq(GPIO_CHARGE_DC_OK);
-
- ret = request_irq(gpio_to_irq(GPIO_CHARGE_DONE),
- &charge_done_irq, IORESOURCE_IRQ_LOWEDGE,
- "charge_done", NULL);
-
- if (ret < 0)
- printk(KERN_ERR "%s: unable to register irq %d\n", __func__,
- GPIO_CHARGE_DONE);
- else
- platform_device_register(&raumfeld_power_supply);
-}
-
-/* Fixed regulator for AUDIO_VA, 0-0048 maps to the cs4270 codec device */
-
-static struct regulator_consumer_supply audio_va_consumer_supply =
- REGULATOR_SUPPLY("va", "0-0048");
-
-static struct regulator_init_data audio_va_initdata = {
- .consumer_supplies = &audio_va_consumer_supply,
- .num_consumer_supplies = 1,
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct fixed_voltage_config audio_va_config = {
- .supply_name = "audio_va",
- .microvolts = 5000000,
- .enable_high = 1,
- .enabled_at_boot = 0,
- .init_data = &audio_va_initdata,
-};
-
-static struct platform_device audio_va_device = {
- .name = "reg-fixed-voltage",
- .id = 0,
- .dev = {
- .platform_data = &audio_va_config,
- },
-};
-
-static struct gpiod_lookup_table audio_va_gpiod_table = {
- .dev_id = "reg-fixed-voltage.0",
- .table = {
- GPIO_LOOKUP("gpio-pxa", GPIO_AUDIO_VA_ENABLE,
- NULL, GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-/* Dummy supplies for Codec's VD/VLC */
-
-static struct regulator_consumer_supply audio_dummy_supplies[] = {
- REGULATOR_SUPPLY("vd", "0-0048"),
- REGULATOR_SUPPLY("vlc", "0-0048"),
-};
-
-static struct regulator_init_data audio_dummy_initdata = {
- .consumer_supplies = audio_dummy_supplies,
- .num_consumer_supplies = ARRAY_SIZE(audio_dummy_supplies),
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct fixed_voltage_config audio_dummy_config = {
- .supply_name = "audio_vd",
- .microvolts = 3300000,
- .init_data = &audio_dummy_initdata,
-};
-
-static struct platform_device audio_supply_dummy_device = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &audio_dummy_config,
- },
-};
-
-static struct platform_device *audio_regulator_devices[] = {
- &audio_va_device,
- &audio_supply_dummy_device,
-};
-
-/**
- * Regulator support via MAX8660
- */
-
-static struct regulator_consumer_supply vcc_mmc_supply =
- REGULATOR_SUPPLY("vmmc", "pxa2xx-mci.0");
-
-static struct regulator_init_data vcc_mmc_init_data = {
- .constraints = {
- .min_uV = 3300000,
- .max_uV = 3300000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL,
- .valid_ops_mask = REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_MODE,
- },
- .consumer_supplies = &vcc_mmc_supply,
- .num_consumer_supplies = 1,
-};
-
-static struct max8660_subdev_data max8660_v6_subdev_data = {
- .id = MAX8660_V6,
- .name = "vmmc",
- .platform_data = &vcc_mmc_init_data,
-};
-
-static struct max8660_platform_data max8660_pdata = {
- .subdevs = &max8660_v6_subdev_data,
- .num_subdevs = 1,
-};
-
-/**
- * I2C devices
- */
-
-static struct i2c_board_info raumfeld_pwri2c_board_info = {
- .type = "max8660",
- .addr = 0x34,
- .platform_data = &max8660_pdata,
-};
-
-static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
- .type = "cs4270",
- .addr = 0x48,
-};
-
-static struct gpiod_lookup_table raumfeld_controller_gpios_table = {
- .dev_id = "0-000a",
- .table = {
- GPIO_LOOKUP("gpio-pxa",
- GPIO_TOUCH_IRQ, "attn", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static const struct resource raumfeld_controller_resources[] __initconst = {
- {
- .start = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
- .end = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
- .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH,
- },
-};
-
-static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
- .type = "eeti_ts",
- .addr = 0x0a,
- .resources = raumfeld_controller_resources,
- .num_resources = ARRAY_SIZE(raumfeld_controller_resources),
-};
-
-static struct platform_device *raumfeld_common_devices[] = {
- &raumfeld_gpio_keys_device,
- &raumfeld_led_device,
- &raumfeld_spi_device,
-};
-
-static void __init raumfeld_audio_init(void)
-{
- int ret;
-
- ret = gpio_request(GPIO_CODEC_RESET, "cs4270 reset");
- if (ret < 0)
- pr_warn("unable to request GPIO_CODEC_RESET\n");
- else
- gpio_direction_output(GPIO_CODEC_RESET, 1);
-
- ret = gpio_request(GPIO_SPDIF_RESET, "ak4104 s/pdif reset");
- if (ret < 0)
- pr_warn("unable to request GPIO_SPDIF_RESET\n");
- else
- gpio_direction_output(GPIO_SPDIF_RESET, 1);
-
- ret = gpio_request(GPIO_MCLK_RESET, "MCLK reset");
- if (ret < 0)
- pr_warn("unable to request GPIO_MCLK_RESET\n");
- else
- gpio_direction_output(GPIO_MCLK_RESET, 1);
-
- gpiod_add_lookup_table(&audio_va_gpiod_table);
- platform_add_devices(ARRAY_AND_SIZE(audio_regulator_devices));
-}
-
-static void __init raumfeld_common_init(void)
-{
- int ret;
-
- /* The on/off button polarity has changed after revision 1 */
- if ((system_rev & 0xff) > 1) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(gpio_keys_button); i++)
- if (!strcmp(gpio_keys_button[i].desc, "on_off button"))
- gpio_keys_button[i].active_low = 1;
- }
-
- enable_irq_wake(IRQ_WAKEUP0);
-
- pxa3xx_set_nand_info(&raumfeld_nand_info);
- pxa3xx_set_i2c_power_info(NULL);
- pxa_set_ohci_info(&raumfeld_ohci_info);
- pxa_set_mci_info(&raumfeld_mci_platform_data);
- pxa_set_i2c_info(NULL);
- pxa_set_ffuart_info(NULL);
-
- ret = gpio_request(GPIO_W2W_RESET, "Wi2Wi reset");
- if (ret < 0)
- pr_warn("Unable to request GPIO_W2W_RESET\n");
- else
- gpio_direction_output(GPIO_W2W_RESET, 0);
-
- ret = gpio_request(GPIO_W2W_PDN, "Wi2Wi powerup");
- if (ret < 0)
- pr_warn("Unable to request GPIO_W2W_PDN\n");
- else
- gpio_direction_output(GPIO_W2W_PDN, 0);
-
- /* this can be used to switch off the device */
- ret = gpio_request(GPIO_SHUTDOWN_SUPPLY, "supply shutdown");
- if (ret < 0)
- pr_warn("Unable to request GPIO_SHUTDOWN_SUPPLY\n");
- else
- gpio_direction_output(GPIO_SHUTDOWN_SUPPLY, 0);
-
- gpiod_add_lookup_table(&raumfeld_spi_gpiod_table);
- platform_add_devices(ARRAY_AND_SIZE(raumfeld_common_devices));
- i2c_register_board_info(1, &raumfeld_pwri2c_board_info, 1);
-}
-
-static void __init __maybe_unused raumfeld_controller_init(void)
-{
- int ret;
-
- pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_controller_pin_config));
-
- gpiod_add_lookup_table(&raumfeld_rotary_gpios_table);
- device_add_properties(&rotary_encoder_device.dev,
- raumfeld_rotary_properties);
- platform_device_register(&rotary_encoder_device);
-
- spi_register_board_info(ARRAY_AND_SIZE(controller_spi_devices));
-
- gpiod_add_lookup_table(&raumfeld_controller_gpios_table);
- i2c_register_board_info(0, &raumfeld_controller_i2c_board_info, 1);
-
- ret = gpio_request(GPIO_SHUTDOWN_BATT, "battery shutdown");
- if (ret < 0)
- pr_warn("Unable to request GPIO_SHUTDOWN_BATT\n");
- else
- gpio_direction_output(GPIO_SHUTDOWN_BATT, 0);
-
- raumfeld_common_init();
- raumfeld_power_init();
- raumfeld_lcd_init();
- raumfeld_w1_init();
-}
-
-static void __init __maybe_unused raumfeld_connector_init(void)
-{
- pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_connector_pin_config));
- spi_register_board_info(ARRAY_AND_SIZE(connector_spi_devices));
- i2c_register_board_info(0, &raumfeld_connector_i2c_board_info, 1);
-
- platform_device_register(&smc91x_device);
-
- raumfeld_audio_init();
- raumfeld_common_init();
-}
-
-static void __init __maybe_unused raumfeld_speaker_init(void)
-{
- pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_speaker_pin_config));
- spi_register_board_info(ARRAY_AND_SIZE(speaker_spi_devices));
- i2c_register_board_info(0, &raumfeld_connector_i2c_board_info, 1);
-
- platform_device_register(&smc91x_device);
-
- gpiod_add_lookup_table(&raumfeld_rotary_gpios_table);
- device_add_properties(&rotary_encoder_device.dev,
- raumfeld_rotary_properties);
- platform_device_register(&rotary_encoder_device);
-
- raumfeld_audio_init();
- raumfeld_common_init();
-}
-
-/* physical memory regions */
-#define RAUMFELD_SDRAM_BASE 0xa0000000 /* SDRAM region */
-
-#ifdef CONFIG_MACH_RAUMFELD_RC
-MACHINE_START(RAUMFELD_RC, "Raumfeld Controller")
- .atag_offset = 0x100,
- .init_machine = raumfeld_controller_init,
- .map_io = pxa3xx_map_io,
- .nr_irqs = PXA_NR_IRQS,
- .init_irq = pxa3xx_init_irq,
- .handle_irq = pxa3xx_handle_irq,
- .init_time = pxa_timer_init,
- .restart = pxa_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_RAUMFELD_CONNECTOR
-MACHINE_START(RAUMFELD_CONNECTOR, "Raumfeld Connector")
- .atag_offset = 0x100,
- .init_machine = raumfeld_connector_init,
- .map_io = pxa3xx_map_io,
- .nr_irqs = PXA_NR_IRQS,
- .init_irq = pxa3xx_init_irq,
- .handle_irq = pxa3xx_handle_irq,
- .init_time = pxa_timer_init,
- .restart = pxa_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_RAUMFELD_SPEAKER
-MACHINE_START(RAUMFELD_SPEAKER, "Raumfeld Speaker")
- .atag_offset = 0x100,
- .init_machine = raumfeld_speaker_init,
- .map_io = pxa3xx_map_io,
- .nr_irqs = PXA_NR_IRQS,
- .init_irq = pxa3xx_init_irq,
- .handle_irq = pxa3xx_handle_irq,
- .init_time = pxa_timer_init,
- .restart = pxa_restart,
-MACHINE_END
-#endif
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 306818e2cf54..8dac824a85df 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -572,7 +572,7 @@ static struct spi_board_info spitz_spi_devices[] = {
},
};
-static struct pxa2xx_spi_master spitz_spi_info = {
+static struct pxa2xx_spi_controller spitz_spi_info = {
.num_chipselect = 3,
};
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index e0d6c872270a..c28d19b126a7 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -337,15 +337,15 @@ static struct platform_device stargate2_flash_device = {
.num_resources = 1,
};
-static struct pxa2xx_spi_master pxa_ssp_master_0_info = {
+static struct pxa2xx_spi_controller pxa_ssp_master_0_info = {
.num_chipselect = 1,
};
-static struct pxa2xx_spi_master pxa_ssp_master_1_info = {
+static struct pxa2xx_spi_controller pxa_ssp_master_1_info = {
.num_chipselect = 1,
};
-static struct pxa2xx_spi_master pxa_ssp_master_2_info = {
+static struct pxa2xx_spi_controller pxa_ssp_master_2_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index e8a93c088c35..7439798d58e4 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -813,7 +813,7 @@ static struct platform_device tosa_bt_device = {
.dev.platform_data = &tosa_bt_data,
};
-static struct pxa2xx_spi_master pxa_ssp_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index e2353e75bb28..ad082e11e2a4 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -607,12 +607,12 @@ static struct spi_board_info spi_board_info[] __initdata = {
},
};
-static struct pxa2xx_spi_master pxa_ssp1_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp1_master_info = {
.num_chipselect = 1,
.enable_dma = 1,
};
-static struct pxa2xx_spi_master pxa_ssp2_master_info = {
+static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
.num_chipselect = 1,
};
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index c411f79d4cb5..3fd1119c14d5 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -391,7 +391,7 @@ static struct platform_device zeus_sram_device = {
};
/* SPI interface on SSP3 */
-static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
+static struct pxa2xx_spi_controller pxa2xx_spi_ssp3_master_info = {
.num_chipselect = 1,
.enable_dma = 1,
};
@@ -426,7 +426,7 @@ static struct gpiod_lookup_table can_regulator_gpiod_table = {
.dev_id = "reg-fixed-voltage.0",
.table = {
GPIO_LOOKUP("gpio-pxa", ZEUS_CAN_SHDN_GPIO,
- NULL, GPIO_ACTIVE_HIGH),
+ NULL, GPIO_ACTIVE_LOW),
{ },
},
};
@@ -547,7 +547,6 @@ static struct regulator_init_data zeus_ohci_regulator_data = {
static struct fixed_voltage_config zeus_ohci_regulator_config = {
.supply_name = "vbus2",
.microvolts = 5000000, /* 5.0V */
- .enable_high = 1,
.startup_delay = 0,
.init_data = &zeus_ohci_regulator_data,
};
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
index 058ce73137e8..5d819b6ea428 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
@@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
switch (val) {
case CPUFREQ_PRECHANGE:
- if (old_dvs & !new_dvs ||
- cur_dvs & !new_dvs) {
+ if ((old_dvs && !new_dvs) ||
+ (cur_dvs && !new_dvs)) {
pr_debug("%s: exiting dvs\n", __func__);
cur_dvs = false;
gpio_set_value(OSIRIS_GPIO_DVS, 1);
}
break;
case CPUFREQ_POSTCHANGE:
- if (!old_dvs & new_dvs ||
- !cur_dvs & new_dvs) {
+ if ((!old_dvs && new_dvs) ||
+ (!cur_dvs && new_dvs)) {
pr_debug("entering dvs\n");
cur_dvs = true;
gpio_set_value(OSIRIS_GPIO_DVS, 0);
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index dfa42496ec27..d09c3f236186 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -469,7 +469,6 @@ static struct regulator_consumer_supply assabet_cf_vcc_consumers[] = {
static struct fixed_voltage_config assabet_cf_vcc_pdata __initdata = {
.supply_name = "cf-power",
.microvolts = 3300000,
- .enable_high = 1,
};
static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = {
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index 8e50daa99151..dc526ef2e9b3 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -40,6 +40,7 @@
struct regulator_quirk {
struct list_head list;
const struct of_device_id *id;
+ struct device_node *np;
struct of_phandle_args irq_args;
struct i2c_msg i2c_msg;
bool shared; /* IRQ line is shared */
@@ -101,6 +102,9 @@ static int regulator_quirk_notify(struct notifier_block *nb,
if (!pos->shared)
continue;
+ if (pos->np->parent != client->dev.parent->of_node)
+ continue;
+
dev_info(&client->dev, "clearing %s@0x%02x interrupts\n",
pos->id->compatible, pos->i2c_msg.addr);
@@ -165,6 +169,7 @@ static int __init rcar_gen2_regulator_quirk(void)
memcpy(&quirk->i2c_msg, id->data, sizeof(quirk->i2c_msg));
quirk->id = id;
+ quirk->np = np;
quirk->i2c_msg.addr = addr;
ret = of_irq_parse_one(np, 0, argsa);
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 5fb6f79059a8..816da0eb6616 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/reboot.h>
+#include <linux/reset/socfpga.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
@@ -64,6 +65,7 @@ static void __init socfpga_init_irq(void)
if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
socfpga_init_ocram_ecc();
+ socfpga_reset_init();
}
static void __init socfpga_arria10_init_irq(void)
@@ -74,6 +76,7 @@ static void __init socfpga_arria10_init_irq(void)
socfpga_init_arria10_l2_ecc();
if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
socfpga_init_arria10_ocram_ecc();
+ socfpga_reset_init();
}
static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 8a7f301839c2..933b6930f024 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -14,6 +14,7 @@
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/reset/sunxi.h>
#include <asm/mach/arch.h>
#include <asm/secure_cntvoff.h>
@@ -37,7 +38,6 @@ static const char * const sun6i_board_dt_compat[] = {
NULL,
};
-extern void __init sun6i_reset_init(void);
static void __init sun6i_timer_init(void)
{
of_clk_init(NULL);
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
index 028e50c6383f..a32c3b631484 100644
--- a/arch/arm/mach-tango/pm.c
+++ b/arch/arm/mach-tango/pm.c
@@ -3,6 +3,7 @@
#include <linux/suspend.h>
#include <asm/suspend.h>
#include "smc.h"
+#include "pm.h"
static int tango_pm_powerdown(unsigned long arg)
{
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init tango_pm_init(void)
+void __init tango_pm_init(void)
{
suspend_set_ops(&tango_pm_ops);
- return 0;
}
-
-late_initcall(tango_pm_init);
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
new file mode 100644
index 000000000000..35ea705a0ee2
--- /dev/null
+++ b/arch/arm/mach-tango/pm.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_SUSPEND
+void __init tango_pm_init(void);
+#else
+#define tango_pm_init NULL
+#endif
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
index 677dd7b5efd9..824f90737b04 100644
--- a/arch/arm/mach-tango/setup.c
+++ b/arch/arm/mach-tango/setup.c
@@ -2,6 +2,7 @@
#include <asm/mach/arch.h>
#include <asm/hardware/cache-l2x0.h>
#include "smc.h"
+#include "pm.h"
static void tango_l2c_write(unsigned long val, unsigned int reg)
{
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
.dt_compat = tango_dt_compat,
.l2c_aux_mask = ~0,
.l2c_write_sec = tango_l2c_write,
+ .init_late = tango_pm_init,
MACHINE_END
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index 9e5b2f869fc8..9bc291e76887 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -79,15 +79,24 @@
#define TEGRA_PMC_BASE 0x7000E400
#define TEGRA_PMC_SIZE SZ_256
+#define TEGRA_MC_BASE 0x7000F000
+#define TEGRA_MC_SIZE SZ_1K
+
#define TEGRA_EMC_BASE 0x7000F400
#define TEGRA_EMC_SIZE SZ_1K
+#define TEGRA114_MC_BASE 0x70019000
+#define TEGRA114_MC_SIZE SZ_4K
+
#define TEGRA_EMC0_BASE 0x7001A000
#define TEGRA_EMC0_SIZE SZ_2K
#define TEGRA_EMC1_BASE 0x7001A800
#define TEGRA_EMC1_SIZE SZ_2K
+#define TEGRA124_MC_BASE 0x70019000
+#define TEGRA124_MC_SIZE SZ_4K
+
#define TEGRA124_EMC_BASE 0x7001B000
#define TEGRA124_EMC_SIZE SZ_2K
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index 5c8e638ee51a..dedeebfccc55 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -32,7 +32,6 @@
#define EMC_CFG 0xc
#define EMC_ADR_CFG 0x10
-#define EMC_REFRESH 0x70
#define EMC_NOP 0xdc
#define EMC_SELF_REF 0xe0
#define EMC_REQ_CTRL 0x2b0
@@ -397,7 +396,6 @@ padload_done:
mov r1, #1
str r1, [r0, #EMC_NOP]
str r1, [r0, #EMC_NOP]
- str r1, [r0, #EMC_REFRESH]
emc_device_mask r1, r0
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index dd4a67dabd91..d0b4c486ddbf 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -29,7 +29,6 @@
#define EMC_CFG 0xc
#define EMC_ADR_CFG 0x10
#define EMC_TIMING_CONTROL 0x28
-#define EMC_REFRESH 0x70
#define EMC_NOP 0xdc
#define EMC_SELF_REF 0xe0
#define EMC_MRW 0xe8
@@ -45,6 +44,8 @@
#define EMC_XM2VTTGENPADCTRL 0x310
#define EMC_XM2VTTGENPADCTRL2 0x314
+#define MC_EMEM_ARB_CFG 0x90
+
#define PMC_CTRL 0x0
#define PMC_CTRL_SIDE_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */
@@ -419,6 +420,22 @@ _pll_m_c_x_done:
movweq r0, #:lower16:TEGRA124_EMC_BASE
movteq r0, #:upper16:TEGRA124_EMC_BASE
+ cmp r10, #TEGRA30
+ moveq r2, #0x20
+ movweq r4, #:lower16:TEGRA_MC_BASE
+ movteq r4, #:upper16:TEGRA_MC_BASE
+ cmp r10, #TEGRA114
+ moveq r2, #0x34
+ movweq r4, #:lower16:TEGRA114_MC_BASE
+ movteq r4, #:upper16:TEGRA114_MC_BASE
+ cmp r10, #TEGRA124
+ moveq r2, #0x20
+ movweq r4, #:lower16:TEGRA124_MC_BASE
+ movteq r4, #:upper16:TEGRA124_MC_BASE
+
+ ldr r1, [r5, r2] @ restore MC_EMEM_ARB_CFG
+ str r1, [r4, #MC_EMEM_ARB_CFG]
+
exit_self_refresh:
ldr r1, [r5, #0xC] @ restore EMC_XM2VTTGENPADCTRL
str r1, [r0, #EMC_XM2VTTGENPADCTRL]
@@ -459,7 +476,6 @@ emc_wait_auto_cal_onetime:
cmp r10, #TEGRA30
streq r1, [r0, #EMC_NOP]
streq r1, [r0, #EMC_NOP]
- streq r1, [r0, #EMC_REFRESH]
emc_device_mask r1, r0
@@ -521,6 +537,8 @@ zcal_done:
ldr r1, [r5, #0x0] @ restore EMC_CFG
str r1, [r0, #EMC_CFG]
+ emc_timing_update r1, r0
+
/* Tegra114 had dual EMC channel, now config the other one */
cmp r10, #TEGRA114
bne __no_dual_emc_chanl
@@ -546,6 +564,7 @@ tegra30_sdram_pad_address:
.word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14
.word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18
.word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c
+ .word TEGRA_MC_BASE + MC_EMEM_ARB_CFG @0x20
tegra30_sdram_pad_address_end:
tegra114_sdram_pad_address:
@@ -562,6 +581,7 @@ tegra114_sdram_pad_address:
.word TEGRA_EMC1_BASE + EMC_AUTO_CAL_INTERVAL @0x28
.word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL @0x2c
.word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL2 @0x30
+ .word TEGRA114_MC_BASE + MC_EMEM_ARB_CFG @0x34
tegra114_sdram_pad_adress_end:
tegra124_sdram_pad_address:
@@ -573,6 +593,7 @@ tegra124_sdram_pad_address:
.word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14
.word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18
.word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c
+ .word TEGRA124_MC_BASE + MC_EMEM_ARB_CFG @0x20
tegra124_sdram_pad_address_end:
tegra30_sdram_pad_size:
diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c
index afe5b4c7b164..99bcd074916a 100644
--- a/arch/arm/mm/cache-l2x0-pmu.c
+++ b/arch/arm/mm/cache-l2x0-pmu.c
@@ -314,14 +314,6 @@ static int l2x0_pmu_event_init(struct perf_event *event)
event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
@@ -544,6 +536,7 @@ static __init int l2x0_pmu_init(void)
.del = l2x0_pmu_event_del,
.event_init = l2x0_pmu_event_init,
.attr_groups = l2x0_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
l2x0_pmu_reset();
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f1e2922e447c..1e3e08a1c456 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev)
return;
arm_teardown_iommu_dma_ops(dev);
+ /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
+ set_dma_ops(dev, NULL);
}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 25b3ee85066e..c8bfbbfdfcc3 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1083,12 +1083,17 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src,
/* Arithmatic Operation */
static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
- const u8 rn, struct jit_ctx *ctx, u8 op) {
+ const u8 rn, struct jit_ctx *ctx, u8 op,
+ bool is_jmp64) {
switch (op) {
case BPF_JSET:
- emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
- emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
- emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
+ if (is_jmp64) {
+ emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
+ emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
+ emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
+ } else {
+ emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
+ }
break;
case BPF_JEQ:
case BPF_JNE:
@@ -1096,18 +1101,25 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
case BPF_JGE:
case BPF_JLE:
case BPF_JLT:
- emit(ARM_CMP_R(rd, rm), ctx);
- _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
+ if (is_jmp64) {
+ emit(ARM_CMP_R(rd, rm), ctx);
+ /* Only compare low halve if high halve are equal. */
+ _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
+ } else {
+ emit(ARM_CMP_R(rt, rn), ctx);
+ }
break;
case BPF_JSLE:
case BPF_JSGT:
emit(ARM_CMP_R(rn, rt), ctx);
- emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
+ if (is_jmp64)
+ emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
break;
case BPF_JSLT:
case BPF_JSGE:
emit(ARM_CMP_R(rt, rn), ctx);
- emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
+ if (is_jmp64)
+ emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
break;
}
}
@@ -1615,6 +1627,17 @@ exit:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
/* Setup source registers */
rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
@@ -1641,6 +1664,17 @@ exit:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
if (off == 0)
break;
rm = tmp2[0];
@@ -1652,7 +1686,8 @@ go_jmp:
rd = arm_bpf_get_reg64(dst, tmp, ctx);
/* Check for the condition */
- emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code));
+ emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
+ BPF_CLASS(code) == BPF_JMP);
/* Setup JUMP instruction */
jmp_offset = bpf2a32_offset(i+off, i, ctx);
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index f4e58bcdaa43..13a05f759552 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -62,6 +62,7 @@
#define ARM_INST_ADDS_I 0x02900000
#define ARM_INST_AND_R 0x00000000
+#define ARM_INST_ANDS_R 0x00100000
#define ARM_INST_AND_I 0x02000000
#define ARM_INST_BIC_R 0x01c00000
@@ -172,6 +173,7 @@
#define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm)
#define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm)
+#define ARM_ANDS_R(rd, rn, rm) _AL3_R(ARM_INST_ANDS, rd, rn, rm)
#define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm)
#define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index a2399fd66e97..a6c81ce00f52 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -18,7 +18,7 @@
#include <linux/clkdev.h>
#include <linux/mv643xx_eth.h>
#include <linux/mv643xx_i2c.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
#include <linux/platform_data/dma-mv_xor.h>
#include <linux/platform_data/usb-ehci-orion.h>
#include <plat/common.h>
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ed36dcab80f1..f51919974183 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
if (ssp == NULL)
return -ENODEV;
- iounmap(ssp->mmio_base);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
- kfree(ssp);
return 0;
}
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 2c118a6ab358..0dc23fc227ed 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
- memcpy(code, (unsigned char *)optprobe_template_entry,
+ memcpy(code, (unsigned long *)&optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 8edf93b4490f..9016f4081bb9 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -24,7 +24,7 @@
10 common unlink sys_unlink
11 common execve sys_execve
12 common chdir sys_chdir
-13 oabi time sys_time
+13 oabi time sys_time32
14 common mknod sys_mknod
15 common chmod sys_chmod
16 common lchown sys_lchown16
@@ -36,12 +36,12 @@
22 oabi umount sys_oldumount
23 common setuid sys_setuid16
24 common getuid sys_getuid16
-25 oabi stime sys_stime
+25 oabi stime sys_stime32
26 common ptrace sys_ptrace
27 oabi alarm sys_alarm
# 28 was sys_fstat
29 common pause sys_pause
-30 oabi utime sys_utime
+30 oabi utime sys_utime32
# 31 was sys_stty
# 32 was sys_gtty
33 common access sys_access
@@ -137,7 +137,7 @@
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
# 123 was sys_modify_ldt
-124 common adjtimex sys_adjtimex
+124 common adjtimex sys_adjtimex_time32
125 common mprotect sys_mprotect
126 common sigprocmask sys_sigprocmask
# 127 was sys_create_module
@@ -174,8 +174,8 @@
158 common sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
163 common mremap sys_mremap
164 common setresuid sys_setresuid16
165 common getresuid sys_getresuid16
@@ -190,7 +190,7 @@
174 common rt_sigaction sys_rt_sigaction
175 common rt_sigprocmask sys_rt_sigprocmask
176 common rt_sigpending sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32
178 common rt_sigqueueinfo sys_rt_sigqueueinfo
179 common rt_sigsuspend sys_rt_sigsuspend
180 common pread64 sys_pread64 sys_oabi_pread64
@@ -254,12 +254,12 @@
237 common fremovexattr sys_fremovexattr
238 common tkill sys_tkill
239 common sendfile64 sys_sendfile64
-240 common futex sys_futex
+240 common futex sys_futex_time32
241 common sched_setaffinity sys_sched_setaffinity
242 common sched_getaffinity sys_sched_getaffinity
243 common io_setup sys_io_setup
244 common io_destroy sys_io_destroy
-245 common io_getevents sys_io_getevents
+245 common io_getevents sys_io_getevents_time32
246 common io_submit sys_io_submit
247 common io_cancel sys_io_cancel
248 common exit_group sys_exit_group
@@ -272,26 +272,26 @@
# 255 for get_thread_area
256 common set_tid_address sys_set_tid_address
257 common timer_create sys_timer_create
-258 common timer_settime sys_timer_settime
-259 common timer_gettime sys_timer_gettime
+258 common timer_settime sys_timer_settime32
+259 common timer_gettime sys_timer_gettime32
260 common timer_getoverrun sys_timer_getoverrun
261 common timer_delete sys_timer_delete
-262 common clock_settime sys_clock_settime
-263 common clock_gettime sys_clock_gettime
-264 common clock_getres sys_clock_getres
-265 common clock_nanosleep sys_clock_nanosleep
+262 common clock_settime sys_clock_settime32
+263 common clock_gettime sys_clock_gettime32
+264 common clock_getres sys_clock_getres_time32
+265 common clock_nanosleep sys_clock_nanosleep_time32
266 common statfs64 sys_statfs64_wrapper
267 common fstatfs64 sys_fstatfs64_wrapper
268 common tgkill sys_tgkill
-269 common utimes sys_utimes
+269 common utimes sys_utimes_time32
270 common arm_fadvise64_64 sys_arm_fadvise64_64
271 common pciconfig_iobase sys_pciconfig_iobase
272 common pciconfig_read sys_pciconfig_read
273 common pciconfig_write sys_pciconfig_write
274 common mq_open sys_mq_open
275 common mq_unlink sys_mq_unlink
-276 common mq_timedsend sys_mq_timedsend
-277 common mq_timedreceive sys_mq_timedreceive
+276 common mq_timedsend sys_mq_timedsend_time32
+277 common mq_timedreceive sys_mq_timedreceive_time32
278 common mq_notify sys_mq_notify
279 common mq_getsetattr sys_mq_getsetattr
280 common waitid sys_waitid
@@ -314,19 +314,19 @@
297 common recvmsg sys_recvmsg
298 common semop sys_semop sys_oabi_semop
299 common semget sys_semget
-300 common semctl sys_semctl
+300 common semctl sys_old_semctl
301 common msgsnd sys_msgsnd
302 common msgrcv sys_msgrcv
303 common msgget sys_msgget
-304 common msgctl sys_msgctl
+304 common msgctl sys_old_msgctl
305 common shmat sys_shmat
306 common shmdt sys_shmdt
307 common shmget sys_shmget
-308 common shmctl sys_shmctl
+308 common shmctl sys_old_shmctl
309 common add_key sys_add_key
310 common request_key sys_request_key
311 common keyctl sys_keyctl
-312 common semtimedop sys_semtimedop sys_oabi_semtimedop
+312 common semtimedop sys_semtimedop_time32 sys_oabi_semtimedop
313 common vserver
314 common ioprio_set sys_ioprio_set
315 common ioprio_get sys_ioprio_get
@@ -340,7 +340,7 @@
323 common mkdirat sys_mkdirat
324 common mknodat sys_mknodat
325 common fchownat sys_fchownat
-326 common futimesat sys_futimesat
+326 common futimesat sys_futimesat_time32
327 common fstatat64 sys_fstatat64 sys_oabi_fstatat64
328 common unlinkat sys_unlinkat
329 common renameat sys_renameat
@@ -349,8 +349,8 @@
332 common readlinkat sys_readlinkat
333 common fchmodat sys_fchmodat
334 common faccessat sys_faccessat
-335 common pselect6 sys_pselect6
-336 common ppoll sys_ppoll
+335 common pselect6 sys_pselect6_time32
+336 common ppoll sys_ppoll_time32
337 common unshare sys_unshare
338 common set_robust_list sys_set_robust_list
339 common get_robust_list sys_get_robust_list
@@ -362,13 +362,13 @@
345 common getcpu sys_getcpu
346 common epoll_pwait sys_epoll_pwait
347 common kexec_load sys_kexec_load
-348 common utimensat sys_utimensat
+348 common utimensat sys_utimensat_time32
349 common signalfd sys_signalfd
350 common timerfd_create sys_timerfd_create
351 common eventfd sys_eventfd
352 common fallocate sys_fallocate
-353 common timerfd_settime sys_timerfd_settime
-354 common timerfd_gettime sys_timerfd_gettime
+353 common timerfd_settime sys_timerfd_settime32
+354 common timerfd_gettime sys_timerfd_gettime32
355 common signalfd4 sys_signalfd4
356 common eventfd2 sys_eventfd2
357 common epoll_create1 sys_epoll_create1
@@ -379,14 +379,14 @@
362 common pwritev sys_pwritev
363 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
364 common perf_event_open sys_perf_event_open
-365 common recvmmsg sys_recvmmsg
+365 common recvmmsg sys_recvmmsg_time32
366 common accept4 sys_accept4
367 common fanotify_init sys_fanotify_init
368 common fanotify_mark sys_fanotify_mark
369 common prlimit64 sys_prlimit64
370 common name_to_handle_at sys_name_to_handle_at
371 common open_by_handle_at sys_open_by_handle_at
-372 common clock_adjtime sys_clock_adjtime
+372 common clock_adjtime sys_clock_adjtime32
373 common syncfs sys_syncfs
374 common sendmmsg sys_sendmmsg
375 common setns sys_setns
@@ -413,4 +413,27 @@
396 common pkey_free sys_pkey_free
397 common statx sys_statx
398 common rseq sys_rseq
-399 common io_pgetevents sys_io_pgetevents
+399 common io_pgetevents sys_io_pgetevents_time32
+400 common migrate_pages sys_migrate_pages
+401 common kexec_file_load sys_kexec_file_load
+# 402 is unused
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index b0b80c0f09f3..b11bba542fac 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -113,8 +113,7 @@ ENTRY(privcmd_call)
/*
* Disable userspace access from kernel. This is fine to do it
- * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
- * called before.
+ * unconditionally as no set_fs(KERNEL_DS) is called before.
*/
uaccess_disable r4
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index cb44aa290e73..e1d44b903dfc 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -7,7 +7,6 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a4168d366127..cfbf307d6dc4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1467,6 +1467,10 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
+config ARCH_ENABLE_HUGEPAGE_MIGRATION
+ def_bool y
+ depends on HUGETLB_PAGE && MIGRATION
+
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 251ecf34cb02..c5f6a57f16b8 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -52,6 +52,11 @@ config ARCH_BERLIN
help
This enables support for Marvell Berlin SoC Family
+config ARCH_BITMAIN
+ bool "Bitmain SoC Platforms"
+ help
+ This enables support for the Bitmain SoC Family.
+
config ARCH_BRCMSTB
bool "Broadcom Set-Top-Box SoCs"
select BRCMSTB_L2_IRQ
@@ -112,12 +117,13 @@ config ARCH_MESON
bool "Amlogic Platforms"
select PINCTRL
select PINCTRL_MESON
- select COMMON_CLK_AMLOGIC
select COMMON_CLK_GXBB
select COMMON_CLK_AXG
+ select COMMON_CLK_G12A
select MESON_IRQ_GPIO
help
- This enables support for the Amlogic S905 SoCs.
+ This enables support for the arm64 based Amlogic SoCs
+ such as the s905, S905X/D, S912, A113X/D or S905X/D2
config ARCH_MVEBU
bool "Marvell EBU SoC Family"
@@ -146,6 +152,10 @@ config ARCH_MXC
bool "ARMv8 based NXP i.MX SoC family"
select ARM64_ERRATUM_843419
select ARM64_ERRATUM_845719
+ select IMX_GPCV2
+ select IMX_GPCV2_PM_DOMAINS
+ select PM
+ select PM_GENERIC_DOMAINS
help
This enables support for the ARMv8 based SoCs in the
NXP i.MX family.
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 4690364d584b..5bc7533a12c7 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -7,6 +7,7 @@ subdir-y += amd
subdir-y += amlogic
subdir-y += apm
subdir-y += arm
+subdir-y += bitmain
subdir-y += broadcom
subdir-y += cavium
subdir-y += exynos
diff --git a/arch/arm64/boot/dts/actions/s700-cubieboard7.dts b/arch/arm64/boot/dts/actions/s700-cubieboard7.dts
index 28f3f4a0f7f0..63e375cd9eb4 100644
--- a/arch/arm64/boot/dts/actions/s700-cubieboard7.dts
+++ b/arch/arm64/boot/dts/actions/s700-cubieboard7.dts
@@ -30,6 +30,59 @@
};
};
+&i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_default>;
+};
+
+&i2c1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_default>;
+};
+
+&i2c2 {
+ status = "disabled";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_default>;
+};
+
+&pinctrl {
+ i2c0_default: i2c0_default {
+ pinmux {
+ groups = "i2c0_mfp";
+ function = "i2c0";
+ };
+ pinconf {
+ pins = "i2c0_sclk", "i2c0_sdata";
+ bias-pull-up;
+ };
+ };
+
+ i2c1_default: i2c1_default {
+ pinmux {
+ groups = "i2c1_dummy";
+ function = "i2c1";
+ };
+ pinconf {
+ pins = "i2c1_sclk", "i2c1_sdata";
+ bias-pull-up;
+ };
+ };
+
+ i2c2_default: i2c2_default {
+ pinmux {
+ groups = "i2c2_dummy";
+ function = "i2c2";
+ };
+ pinconf {
+ pins = "i2c2_sclk", "i2c2_sdata";
+ bias-pull-up;
+ };
+ };
+};
+
&timer {
clocks = <&hosc>;
};
diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi
index 192c7b39c8c1..2006ad5424fa 100644
--- a/arch/arm64/boot/dts/actions/s700.dtsi
+++ b/arch/arm64/boot/dts/actions/s700.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/clock/actions,s700-cmu.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/actions,s700-reset.h>
/ {
compatible = "actions,s700";
@@ -18,28 +19,28 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
};
@@ -172,6 +173,47 @@
reg = <0x0 0xe0168000 0x0 0x1000>;
clocks = <&hosc>, <&losc>;
#clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ i2c0: i2c@e0170000 {
+ compatible = "actions,s700-i2c";
+ reg = <0 0xe0170000 0 0x1000>;
+ clocks = <&cmu CLK_I2C0>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@e0174000 {
+ compatible = "actions,s700-i2c";
+ reg = <0 0xe0174000 0 0x1000>;
+ clocks = <&cmu CLK_I2C1>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@e0178000 {
+ compatible = "actions,s700-i2c";
+ reg = <0 0xe0178000 0 0x1000>;
+ clocks = <&cmu CLK_I2C2>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@e017c000 {
+ compatible = "actions,s700-i2c";
+ reg = <0 0xe017c000 0 0x1000>;
+ clocks = <&cmu CLK_I2C3>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
};
sps: power-controller@e01b0100 {
@@ -186,5 +228,21 @@
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "timer1";
};
+
+ pinctrl: pinctrl@e01b0000 {
+ compatible = "actions,s700-pinctrl";
+ reg = <0x0 0xe01b0000 0x0 0x1000>;
+ clocks = <&cmu CLK_GPIO>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 136>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/actions/s900.dtsi b/arch/arm64/boot/dts/actions/s900.dtsi
index 491ddccc9038..df3a68a3ac97 100644
--- a/arch/arm64/boot/dts/actions/s900.dtsi
+++ b/arch/arm64/boot/dts/actions/s900.dtsi
@@ -5,6 +5,7 @@
#include <dt-bindings/clock/actions,s900-cmu.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/actions,s900-reset.h>
/ {
compatible = "actions,s900";
@@ -18,28 +19,28 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
};
@@ -184,6 +185,7 @@
reg = <0x0 0xe0160000 0x0 0x1000>;
clocks = <&hosc>, <&losc>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
i2c0: i2c@e0170000 {
@@ -253,6 +255,14 @@
gpio-controller;
gpio-ranges = <&pinctrl 0 0 146>;
#gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
};
timer: timer@e0228000 {
diff --git a/arch/arm64/boot/dts/al/alpine-v2.dtsi b/arch/arm64/boot/dts/al/alpine-v2.dtsi
index 5b7bef684256..d5e7e2bb4e6c 100644
--- a/arch/arm64/boot/dts/al/alpine-v2.dtsi
+++ b/arch/arm64/boot/dts/al/alpine-v2.dtsi
@@ -47,28 +47,28 @@
#size-cells = <0>;
cpu@0 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu@1 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu@2 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu@3 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x0 0x3>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 9d0afd7d50ec..7793ebb5d2b8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -222,6 +222,14 @@
#include "axp803.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
/*
* This regulator also drives the PE pingroup GPIOs,
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
index 31884dbc8838..f4e78531f639 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
@@ -186,6 +186,10 @@
#include "axp803.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
&reg_aldo2 {
regulator-always-on;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
index f7a4bccaa5d4..01a9a52edae4 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
@@ -177,6 +177,14 @@
#include "axp803.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
regulator-always-on;
regulator-min-microvolt = <2800000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index b0c64f75792c..510f661229dc 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -188,11 +188,20 @@
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
};
};
#include "axp803.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 216f2f5db5ef..c0b9cc7a6b3a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -169,6 +169,14 @@
#include "axp803.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
&reg_aldo2 {
regulator-always-on;
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
index c455b24dd079..7b7b14ba58e6 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
@@ -145,6 +145,14 @@
#include "axp803.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
regulator-always-on;
regulator-min-microvolt = <2800000>;
@@ -239,7 +247,7 @@
};
/*
- * The A64 chip cannot work without this regulator off, although
+ * The A64 chip cannot work with this regulator off, although
* it seems to be only driving the AR100 core.
* Maybe we don't still know well about CPUs domain.
*/
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 837a03dee875..e628d063931b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -84,7 +84,7 @@
#size-cells = <0>;
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0>;
enable-method = "psci";
@@ -92,7 +92,7 @@
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <1>;
enable-method = "psci";
@@ -100,7 +100,7 @@
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <2>;
enable-method = "psci";
@@ -108,7 +108,7 @@
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <3>;
enable-method = "psci";
@@ -142,6 +142,15 @@
clock-output-names = "ext-osc32k";
};
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
psci {
compatible = "arm,psci-0.2";
method = "smc";
@@ -191,6 +200,7 @@
timer {
compatible = "arm,armv8-timer";
+ allwinner,erratum-unknown1;
interrupts = <GIC_PPI 13
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 14
@@ -390,7 +400,7 @@
};
video-codec@1c0e000 {
- compatible = "allwinner,sun50i-h5-video-engine";
+ compatible = "allwinner,sun50i-a64-video-engine";
reg = <0x01c0e000 0x1000>;
clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>,
<&ccu CLK_DRAM_VE>;
@@ -549,6 +559,12 @@
interrupt-controller;
#interrupt-cells = <3>;
+ csi_pins: csi-pins {
+ pins = "PE0", "PE2", "PE3", "PE4", "PE5", "PE6",
+ "PE7", "PE8", "PE9", "PE10", "PE11";
+ function = "csi";
+ };
+
i2c0_pins: i2c0_pins {
pins = "PH0", "PH1";
function = "i2c0";
@@ -916,6 +932,20 @@
status = "disabled";
};
+ csi: csi@1cb0000 {
+ compatible = "allwinner,sun50i-a64-csi";
+ reg = <0x01cb0000 0x1000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_CSI>,
+ <&ccu CLK_CSI_SCLK>,
+ <&ccu CLK_DRAM_CSI>;
+ clock-names = "bus", "mod", "ram";
+ resets = <&ccu RST_BUS_CSI>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&csi_pins>;
+ status = "disabled";
+ };
+
hdmi: hdmi@1ee0000 {
compatible = "allwinner,sun50i-a64-dw-hdmi",
"allwinner,sun8i-a83t-dw-hdmi";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts
index 95e113ce8699..d68bdfea2271 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts
@@ -12,3 +12,7 @@
model = "Libre Computer Board ALL-H3-CC H5";
compatible = "libretech,all-h3-cc-h5", "allwinner,sun50i-h5";
};
+
+&mmc2 {
+ mmc-ddr-3_3v;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index c22621b4b8e9..96acafd3a852 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -48,28 +48,28 @@
#size-cells = <0>;
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0>;
enable-method = "psci";
};
cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <1>;
enable-method = "psci";
};
cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <2>;
enable-method = "psci";
};
cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <3>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index d93a7add67e7..c9e861a50a63 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -22,28 +22,28 @@
#size-cells = <0>;
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0>;
enable-method = "psci";
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <1>;
enable-method = "psci";
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <2>;
enable-method = "psci";
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <3>;
enable-method = "psci";
@@ -167,6 +167,20 @@
reg = <0x0000 0x1e000>;
};
};
+
+ sram_c1: sram@1a00000 {
+ compatible = "mmio-sram";
+ reg = <0x01a00000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x01a00000 0x200000>;
+
+ ve_sram: sram-section@0 {
+ compatible = "allwinner,sun50i-h6-sram-c1",
+ "allwinner,sun4i-a10-sram-c1";
+ reg = <0x000000 0x200000>;
+ };
+ };
};
ccu: clock@3001000 {
@@ -178,17 +192,6 @@
#reset-cells = <1>;
};
- gic: interrupt-controller@3021000 {
- compatible = "arm,gic-400";
- reg = <0x03021000 0x1000>,
- <0x03022000 0x2000>,
- <0x03024000 0x2000>,
- <0x03026000 0x2000>;
- interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
- interrupt-controller;
- #interrupt-cells = <3>;
- };
-
pio: pinctrl@300b000 {
compatible = "allwinner,sun50i-h6-pinctrl";
reg = <0x0300b000 0x400>;
@@ -239,6 +242,17 @@
};
};
+ gic: interrupt-controller@3021000 {
+ compatible = "arm,gic-400";
+ reg = <0x03021000 0x1000>,
+ <0x03022000 0x2000>,
+ <0x03024000 0x2000>,
+ <0x03026000 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+
mmc0: mmc@4020000 {
compatible = "allwinner,sun50i-h6-mmc",
"allwinner,sun50i-a64-mmc";
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index b2c9bb664595..7c649f6b14cb 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -42,28 +42,28 @@
#size-cells = <0>;
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
reg = <0x0>;
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
reg = <0x1>;
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
reg = <0x2>;
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
reg = <0x3>;
@@ -161,6 +161,7 @@
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 1>;
status = "disabled";
};
@@ -177,6 +178,7 @@
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 2>;
status = "disabled";
};
@@ -193,6 +195,7 @@
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 3>;
status = "disabled";
};
@@ -303,6 +306,7 @@
clocks = <&clkmgr STRATIX10_L4_MP_CLK>,
<&clkmgr STRATIX10_SDMMC_CLK>;
clock-names = "biu", "ciu";
+ iommus = <&smmu 5>;
status = "disabled";
};
@@ -336,6 +340,29 @@
reg = <0xffd11000 0x1000>;
};
+ smmu: iommu@fa000000 {
+ compatible = "arm,mmu-500", "arm,smmu-v2";
+ reg = <0xfa000000 0x40000>;
+ #global-interrupts = <2>;
+ #iommu-cells = <1>;
+ clocks = <&clkmgr STRATIX10_L4_MAIN_CLK>;
+ clock-names = "iommu";
+ interrupt-parent = <&intc>;
+ interrupts = <0 128 4>, /* Global Secure Fault */
+ <0 129 4>, /* Global Non-secure Fault */
+ /* Non-secure Context Interrupts (32) */
+ <0 138 4>, <0 139 4>, <0 140 4>, <0 141 4>,
+ <0 142 4>, <0 143 4>, <0 144 4>, <0 145 4>,
+ <0 146 4>, <0 147 4>, <0 148 4>, <0 149 4>,
+ <0 150 4>, <0 151 4>, <0 152 4>, <0 153 4>,
+ <0 154 4>, <0 155 4>, <0 156 4>, <0 157 4>,
+ <0 158 4>, <0 159 4>, <0 160 4>, <0 161 4>,
+ <0 162 4>, <0 163 4>, <0 164 4>, <0 165 4>,
+ <0 166 4>, <0 167 4>, <0 168 4>, <0 169 4>;
+ stream-match-mask = <0x7ff0>;
+ status = "disabled";
+ };
+
spi0: spi@ffda4000 {
compatible = "snps,dw-apb-ssi";
#address-cells = <1>;
@@ -445,6 +472,7 @@
resets = <&rst USB0_RESET>, <&rst USB0_OCP_RESET>;
reset-names = "dwc2", "dwc2-ecc";
clocks = <&clkmgr STRATIX10_USB_CLK>;
+ iommus = <&smmu 6>;
status = "disabled";
};
@@ -457,6 +485,7 @@
resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>;
reset-names = "dwc2", "dwc2-ecc";
clocks = <&clkmgr STRATIX10_USB_CLK>;
+ iommus = <&smmu 7>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index f12efa27c636..0821fed4c074 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-u200.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12a-x96-max.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nanopi-k2.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nexbox-a95x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-odroidc2.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
index 824eba98db2c..75fe1a2c49d0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
@@ -95,6 +95,13 @@
sound-name-prefix = "MIC";
};
+ spdif_dir: audio-codec-4 {
+ #sound-dai-cells = <0>;
+ compatible = "linux,spdif-dir";
+ status = "okay";
+ sound-name-prefix = "DIR";
+ };
+
emmc_pwrseq: emmc-pwrseq {
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
@@ -249,6 +256,9 @@
"TODDR_A IN 2", "TDMIN_C OUT",
"TODDR_B IN 2", "TDMIN_C OUT",
"TODDR_C IN 2", "TDMIN_C OUT",
+ "TODDR_A IN 3", "SPDIFIN Capture",
+ "TODDR_B IN 3", "SPDIFIN Capture",
+ "TODDR_C IN 3", "SPDIFIN Capture",
"TODDR_A IN 4", "PDM Capture",
"TODDR_B IN 4", "PDM Capture",
"TODDR_C IN 4", "PDM Capture",
@@ -326,6 +336,14 @@
};
dai-link-8 {
+ sound-dai = <&spdifin>;
+
+ codec {
+ sound-dai = <&spdif_dir>;
+ };
+ };
+
+ dai-link-9 {
sound-dai = <&pdm>;
codec {
@@ -446,7 +464,8 @@
bus-width = <4>;
cap-sd-highspeed;
- max-frequency = <100000000>;
+ sd-uhs-sdr104;
+ max-frequency = <200000000>;
non-removable;
disable-wp;
@@ -469,9 +488,8 @@
pinctrl-names = "default", "clk-gate";
bus-width = <8>;
- cap-sd-highspeed;
cap-mmc-highspeed;
- max-frequency = <180000000>;
+ max-frequency = <200000000>;
non-removable;
disable-wp;
mmc-ddr-1_8v;
@@ -483,6 +501,12 @@
vqmmc-supply = <&vddio_boot>;
};
+&spdifin {
+ pinctrl-0 = <&spdif_in_a19_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
&spdifout {
pinctrl-0 = <&spdif_out_a20_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index fffd55787981..34704fecf756 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -68,7 +68,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -77,7 +77,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -86,7 +86,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -95,7 +95,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -111,6 +111,14 @@
compatible = "amlogic,meson-gxbb-sm";
};
+ efuse: efuse {
+ compatible = "amlogic,meson-gxbb-efuse";
+ clocks = <&clkc CLKID_EFUSE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ read-only;
+ };
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -1260,6 +1268,18 @@
status = "disabled";
};
+ spdifin: audio-controller@400 {
+ compatible = "amlogic,axg-spdifin";
+ reg = <0x0 0x400 0x0 0x30>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "SPDIFIN";
+ interrupts = <GIC_SPI 87 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_audio AUD_CLKID_SPDIFIN>,
+ <&clkc_audio AUD_CLKID_SPDIFIN_CLK>;
+ clock-names = "pclk", "refclk";
+ status = "disabled";
+ };
+
spdifout: audio-controller@480 {
compatible = "amlogic,axg-spdifout";
reg = <0x0 0x480 0x0 0x50>;
@@ -1590,6 +1610,11 @@
status = "disabled";
};
+ clk_msr: clock-measure@18000 {
+ compatible = "amlogic,meson-axg-clk-measure";
+ reg = <0x0 0x18000 0x0 0x10>;
+ };
+
i2c3: i2c@1c000 {
compatible = "amlogic,meson-axg-i2c";
reg = <0x0 0x1c000 0x0 0x20>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
new file mode 100644
index 000000000000..c62d3d5706ff
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre SAS. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "meson-g12a.dtsi"
+
+/ {
+ compatible = "amediatech,x96-max", "amlogic,u200", "amlogic,g12a";
+ model = "Shenzhen Amediatech Technology Co., Ltd X96 Max";
+
+ aliases {
+ serial0 = &uart_AO;
+ };
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+};
+
+&uart_AO {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index 3b82a975c663..17c6217f8a84 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -20,7 +20,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -28,7 +28,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -36,7 +36,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -44,7 +44,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -78,20 +78,41 @@
#size-cells = <2>;
ranges;
- periphs: periphs@ff634000 {
+ apb: bus@ff600000 {
compatible = "simple-bus";
- reg = <0x0 0xff634000 0x0 0x2000>;
+ reg = <0x0 0xff600000 0x0 0x200000>;
#address-cells = <2>;
#size-cells = <2>;
- ranges = <0x0 0x0 0x0 0xff634000 0x0 0x2000>;
- };
+ ranges = <0x0 0x0 0x0 0xff600000 0x0 0x200000>;
+
+ periphs: bus@34400 {
+ compatible = "simple-bus";
+ reg = <0x0 0x34400 0x0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x34400 0x0 0x400>;
+ };
- hiubus: bus@ff63c000 {
- compatible = "simple-bus";
- reg = <0x0 0xff63c000 0x0 0x1c00>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges = <0x0 0x0 0x0 0xff63c000 0x0 0x1c00>;
+ hiu: bus@3c000 {
+ compatible = "simple-bus";
+ reg = <0x0 0x3c000 0x0 0x1400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x3c000 0x0 0x1400>;
+
+ hhi: system-controller@0 {
+ compatible = "amlogic,meson-gx-hhi-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0 0 0 0x400>;
+
+ clkc: clock-controller {
+ compatible = "amlogic,g12a-clkc";
+ #clock-cells = <1>;
+ clocks = <&xtal>;
+ clock-names = "xtal";
+ };
+ };
+ };
};
aobus: bus@ff800000 {
@@ -102,7 +123,8 @@
ranges = <0x0 0x0 0x0 0xff800000 0x0 0x100000>;
uart_AO: serial@3000 {
- compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
+ compatible = "amlogic,meson-gx-uart",
+ "amlogic,meson-ao-uart";
reg = <0x0 0x3000 0x0 0x18>;
interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>;
clocks = <&xtal>, <&xtal>, <&xtal>;
@@ -111,7 +133,8 @@
};
uart_AO_B: serial@4000 {
- compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
+ compatible = "amlogic,meson-gx-uart",
+ "amlogic,meson-ao-uart";
reg = <0x0 0x4000 0x0 0x18>;
interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
clocks = <&xtal>, <&xtal>, <&xtal>;
@@ -135,18 +158,15 @@
cbus: bus@ffd00000 {
compatible = "simple-bus";
- reg = <0x0 0xffd00000 0x0 0x25000>;
+ reg = <0x0 0xffd00000 0x0 0x100000>;
#address-cells = <2>;
#size-cells = <2>;
- ranges = <0x0 0x0 0x0 0xffd00000 0x0 0x25000>;
- };
+ ranges = <0x0 0x0 0x0 0xffd00000 0x0 0x100000>;
- apb: apb@ffe00000 {
- compatible = "simple-bus";
- reg = <0x0 0xffe00000 0x0 0x200000>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges = <0x0 0x0 0x0 0xffe00000 0x0 0x200000>;
+ clk_msr: clock-measure@18000 {
+ compatible = "amlogic,meson-g12a-clk-measure";
+ reg = <0x0 0x18000 0x0 0x10>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index e14e0ce7e89f..016641a41694 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -187,8 +187,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 44c5c51ff1fa..6772709b9e19 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -50,13 +50,35 @@
};
};
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ simplefb_cvbs: framebuffer-cvbs {
+ compatible = "amlogic,simple-framebuffer",
+ "simple-framebuffer";
+ amlogic,pipeline = "vpu-cvbs";
+ power-domains = <&pwrc_vpu>;
+ status = "disabled";
+ };
+
+ simplefb_hdmi: framebuffer-hdmi {
+ compatible = "amlogic,simple-framebuffer",
+ "simple-framebuffer";
+ amlogic,pipeline = "vpu-hdmi";
+ power-domains = <&pwrc_vpu>;
+ status = "disabled";
+ };
+ };
+
cpus {
#address-cells = <0x2>;
#size-cells = <0x0>;
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -65,7 +87,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -74,7 +96,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -83,7 +105,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -510,6 +532,7 @@
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
#size-cells = <0>;
+ amlogic,canvas = <&canvas>;
/* CVBS VDAC output port */
cvbs_vdac_port: port@0 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index 8cd50b75171d..ade2ee09ae96 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -305,8 +305,7 @@
max-frequency = <200000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddio_ao3v3>;
vqmmc-supply = <&vddio_tf>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 4cf7f6e80c6a..25105ac96d55 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -238,8 +238,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 2e1cd5e3a246..1cc9dc68ef00 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -258,8 +258,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&tflash_vdd>;
vqmmc-supply = <&tf_io>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index ce862266b9aa..0be0f2a5d2fe 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -196,8 +196,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 93a4acf2c46c..ad4d50bd9d77 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -154,8 +154,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
index ec09bb5792b7..2d2db783c44c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
@@ -211,8 +211,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vcc_3v3>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index a7b883ced0a8..a60d3652beee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -784,6 +784,12 @@
resets = <&reset RESET_SD_EMMC_C>;
};
+&simplefb_hdmi {
+ clocks = <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_CLK81>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+};
+
&spicc {
clocks = <&clkc CLKID_SPICC>;
clock-names = "core";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
index f1c410e2da2b..796baea7a0bf 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
@@ -131,8 +131,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index db293440e4ca..255cede7b447 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -238,8 +238,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
vqmmc-supply = <&vcc_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 6739697be1de..9cbdb85fb591 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -183,8 +183,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_card>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index a1b31013ab6e..bc811a2faf42 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -137,8 +137,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index d5c3d78aafeb..3093ae421b17 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -787,6 +787,12 @@
resets = <&reset RESET_SD_EMMC_C>;
};
+&simplefb_hdmi {
+ clocks = <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_CLK81>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+};
+
&spicc {
clocks = <&clkc CLKID_SPICC>;
clock-names = "core";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 3c3a667a8df8..3f086ed7de05 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -356,8 +356,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index f7a1cffab4a8..8acfd40090d2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -147,8 +147,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index 7212dc4531e4..7fa20a8ede17 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -170,8 +170,7 @@
max-frequency = <100000000>;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_boot>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index 247888d68a3a..ed3a3d5adf31 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -44,7 +44,7 @@
cpu4: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x100>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -53,7 +53,7 @@
cpu5: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x101>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -62,7 +62,7 @@
cpu6: cpu@102 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x102>;
enable-method = "psci";
next-level-cache = <&l2>;
@@ -71,7 +71,7 @@
cpu7: cpu@103 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x103>;
enable-method = "psci";
next-level-cache = <&l2>;
diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
index d8ecd1661461..7faea28a37b0 100644
--- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
@@ -21,7 +21,7 @@
cpu@0 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x000>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -31,7 +31,7 @@
};
cpu@1 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x001>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -41,7 +41,7 @@
};
cpu@100 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x100>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -51,7 +51,7 @@
};
cpu@101 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x101>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -61,7 +61,7 @@
};
cpu@200 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x200>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -71,7 +71,7 @@
};
cpu@201 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x201>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -81,7 +81,7 @@
};
cpu@300 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x300>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -91,7 +91,7 @@
};
cpu@301 {
device_type = "cpu";
- compatible = "apm,strega", "arm,armv8";
+ compatible = "apm,strega";
reg = <0x0 0x301>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 00e82b8e9a19..94d637d17262 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -21,7 +21,7 @@
cpu@0 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x000>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -29,7 +29,7 @@
};
cpu@1 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x001>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -37,7 +37,7 @@
};
cpu@100 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x100>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -45,7 +45,7 @@
};
cpu@101 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x101>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -53,7 +53,7 @@
};
cpu@200 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x200>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -61,7 +61,7 @@
};
cpu@201 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x201>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -69,7 +69,7 @@
};
cpu@300 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x300>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
@@ -77,7 +77,7 @@
};
cpu@301 {
device_type = "cpu";
- compatible = "apm,potenza", "arm,armv8";
+ compatible = "apm,potenza";
reg = <0x0 0x301>;
enable-method = "spin-table";
cpu-release-addr = <0x1 0x0000fff8>;
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index 5b45144b371a..800da2e84f3f 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -5,3 +5,4 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += \
dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb juno-r2.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += fvp-base-revc.dtb
diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi
index 851abf34fc80..15fe81738e94 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi
@@ -14,6 +14,6 @@
<0x0 0x2c002000 0 0x2000>,
<0x0 0x2c004000 0 0x2000>,
<0x0 0x2c006000 0 0x2000>;
- interrupts = <1 9 0xf04>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
};
diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi
index 91fc5c60d88b..f2c75c756039 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi
@@ -17,7 +17,7 @@
<0x0 0x2c000000 0x0 0x2000>,
<0x0 0x2c010000 0x0 0x2000>,
<0x0 0x2c02f000 0x0 0x2000>;
- interrupts = <1 9 4>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
its: its@2f020000 {
compatible = "arm,gic-v3-its";
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index e080277d27ae..3f78373f708a 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -7,6 +7,8 @@
/dts-v1/;
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
/memreserve/ 0x80000000 0x00010000;
/ {
@@ -67,26 +69,26 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xf08>,
- <1 14 0xf08>,
- <1 11 0xf08>,
- <1 10 0xf08>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
clock-frequency = <100000000>;
};
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <0 60 4>,
- <0 61 4>,
- <0 62 4>,
- <0 63 4>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
};
watchdog@2a440000 {
compatible = "arm,sbsa-gwdt";
reg = <0x0 0x2a440000 0 0x1000>,
<0x0 0x2a450000 0 0x1000>;
- interrupts = <0 27 4>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
timeout-sec = <30>;
};
@@ -105,49 +107,49 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic 0 0 0 0 4>,
- <0 0 1 &gic 0 0 0 1 4>,
- <0 0 2 &gic 0 0 0 2 4>,
- <0 0 3 &gic 0 0 0 3 4>,
- <0 0 4 &gic 0 0 0 4 4>,
- <0 0 5 &gic 0 0 0 5 4>,
- <0 0 6 &gic 0 0 0 6 4>,
- <0 0 7 &gic 0 0 0 7 4>,
- <0 0 8 &gic 0 0 0 8 4>,
- <0 0 9 &gic 0 0 0 9 4>,
- <0 0 10 &gic 0 0 0 10 4>,
- <0 0 11 &gic 0 0 0 11 4>,
- <0 0 12 &gic 0 0 0 12 4>,
- <0 0 13 &gic 0 0 0 13 4>,
- <0 0 14 &gic 0 0 0 14 4>,
- <0 0 15 &gic 0 0 0 15 4>,
- <0 0 16 &gic 0 0 0 16 4>,
- <0 0 17 &gic 0 0 0 17 4>,
- <0 0 18 &gic 0 0 0 18 4>,
- <0 0 19 &gic 0 0 0 19 4>,
- <0 0 20 &gic 0 0 0 20 4>,
- <0 0 21 &gic 0 0 0 21 4>,
- <0 0 22 &gic 0 0 0 22 4>,
- <0 0 23 &gic 0 0 0 23 4>,
- <0 0 24 &gic 0 0 0 24 4>,
- <0 0 25 &gic 0 0 0 25 4>,
- <0 0 26 &gic 0 0 0 26 4>,
- <0 0 27 &gic 0 0 0 27 4>,
- <0 0 28 &gic 0 0 0 28 4>,
- <0 0 29 &gic 0 0 0 29 4>,
- <0 0 30 &gic 0 0 0 30 4>,
- <0 0 31 &gic 0 0 0 31 4>,
- <0 0 32 &gic 0 0 0 32 4>,
- <0 0 33 &gic 0 0 0 33 4>,
- <0 0 34 &gic 0 0 0 34 4>,
- <0 0 35 &gic 0 0 0 35 4>,
- <0 0 36 &gic 0 0 0 36 4>,
- <0 0 37 &gic 0 0 0 37 4>,
- <0 0 38 &gic 0 0 0 38 4>,
- <0 0 39 &gic 0 0 0 39 4>,
- <0 0 40 &gic 0 0 0 40 4>,
- <0 0 41 &gic 0 0 0 41 4>,
- <0 0 42 &gic 0 0 0 42 4>;
+ interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
ethernet@2,02000000 {
compatible = "smsc,lan91c111";
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
new file mode 100644
index 000000000000..687707020ec1
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Ltd. Fast Models
+ *
+ * Architecture Envelope Model (AEM) ARMv8-A
+ * ARMAEMv8AMPCT
+ *
+ * FVP Base RevC
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/memreserve/ 0x80000000 0x00010000;
+
+#include "rtsm_ve-motherboard.dtsi"
+#include "rtsm_ve-motherboard-rs2.dtsi"
+
+/ {
+ model = "FVP Base RevC";
+ compatible = "arm,fvp-base-revc", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x000>;
+ enable-method = "psci";
+ };
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ };
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ };
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ };
+ cpu4: cpu@10000 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x10000>;
+ enable-method = "psci";
+ };
+ cpu5: cpu@10100 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x10100>;
+ enable-method = "psci";
+ };
+ cpu6: cpu@10200 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x10200>;
+ enable-method = "psci";
+ };
+ cpu7: cpu@10300 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x10300>;
+ enable-method = "psci";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0 0x80000000>,
+ <0x00000008 0x80000000 0 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* Chipselect 2,00000000 is physically at 0x18000000 */
+ vram: vram@18000000 {
+ /* 8 MB of designated video RAM */
+ compatible = "shared-dma-pool";
+ reg = <0x00000000 0x18000000 0 0x00800000>;
+ no-map;
+ };
+ };
+
+ gic: interrupt-controller@2f000000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ reg = <0x0 0x2f000000 0 0x10000>, // GICD
+ <0x0 0x2f100000 0 0x200000>, // GICR
+ <0x0 0x2c000000 0 0x2000>, // GICC
+ <0x0 0x2c010000 0 0x2000>, // GICH
+ <0x0 0x2c02f000 0 0x2000>; // GICV
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ its: its@2f020000 {
+ #msi-cells = <1>;
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0x2f020000 0x0 0x20000>; // GITS
+ msi-controller;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ spe-pmu {
+ compatible = "arm,statistical-profiling-extension-v1";
+ interrupts = <GIC_PPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pci: pci@40000000 {
+ #address-cells = <0x3>;
+ #size-cells = <0x2>;
+ #interrupt-cells = <0x1>;
+ compatible = "pci-host-ecam-generic";
+ device_type = "pci";
+ bus-range = <0x0 0x1>;
+ reg = <0x0 0x40000000 0x0 0x10000000>;
+ ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
+ interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ msi-map = <0x0 &its 0x0 0x10000>;
+ iommu-map = <0x0 &smmu 0x0 0x10000>;
+
+ dma-coherent;
+ };
+
+ smmu: smmu@2b400000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0x2b400000 0x0 0x100000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "priq", "cmdq-sync", "gerror";
+ dma-coherent;
+ #iommu-cells = <1>;
+ msi-parent = <&its 0x10000>;
+ };
+
+ panel {
+ compatible = "arm,rtsm-display", "panel-dpi";
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&clcd_pads>;
+ };
+ };
+
+ panel-timing {
+ clock-frequency = <63500127>;
+ hactive = <1024>;
+ hback-porch = <152>;
+ hfront-porch = <48>;
+ hsync-len = <104>;
+ vactive = <768>;
+ vback-porch = <23>;
+ vfront-porch = <3>;
+ vsync-len = <4>;
+ };
+ };
+
+ smb@8000000 {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 43 &gic 0 0 GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 44 &gic 0 0 GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+
+ motherboard {
+ iofpga@3,00000000 {
+ clcd@1f0000 {
+ max-memory-bandwidth = <130000000>; /* 16bpp @ 63.5MHz */
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index ed774ee8f659..7446e0dc154d 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -18,7 +18,7 @@
status = "disabled";
frame@2a830000 {
frame-number = <1>;
- interrupts = <0 60 4>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x0 0x2a830000 0x0 0x10000>;
};
};
@@ -220,6 +220,41 @@
};
};
+ replicator@20120000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0 0x20120000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&etr_in_port>;
+ };
+ };
+ };
+ in-ports {
+ port {
+ replicator_in_port0: endpoint {
+ };
+ };
+ };
+ };
+
cpu_debug0: cpu-debug@22010000 {
compatible = "arm,coresight-cpu-debug", "arm,primecell";
reg = <0x0 0x22010000 0x0 0x1000>;
@@ -452,41 +487,6 @@
};
};
- replicator@20120000 {
- compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
- reg = <0 0x20120000 0 0x1000>;
-
- clocks = <&soc_smc50mhz>;
- clock-names = "apb_pclk";
- power-domains = <&scpi_devpd 0>;
-
- out-ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* replicator output ports */
- port@0 {
- reg = <0>;
- replicator_out_port0: endpoint {
- remote-endpoint = <&tpiu_in_port>;
- };
- };
-
- port@1 {
- reg = <1>;
- replicator_out_port1: endpoint {
- remote-endpoint = <&etr_in_port>;
- };
- };
- };
- in-ports {
- port {
- replicator_in_port0: endpoint {
- };
- };
- };
- };
-
sram: sram@2e000000 {
compatible = "arm,juno-sram-ns", "mmio-sram";
reg = <0x0 0x2e000000 0x0 0x8000>;
@@ -520,10 +520,10 @@
<0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &gic 0 0 0 136 4>,
- <0 0 0 2 &gic 0 0 0 137 4>,
- <0 0 0 3 &gic 0 0 0 138 4>,
- <0 0 0 4 &gic 0 0 0 139 4>;
+ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
msi-parent = <&v2m_0>;
status = "disabled";
iommu-map-mask = <0x0>; /* RC has no means to output PCI RID */
@@ -787,19 +787,19 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 15>;
- interrupt-map = <0 0 0 &gic 0 0 0 68 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 1 &gic 0 0 0 69 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 2 &gic 0 0 0 70 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 3 &gic 0 0 0 160 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 4 &gic 0 0 0 161 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 5 &gic 0 0 0 162 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 6 &gic 0 0 0 163 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 7 &gic 0 0 0 164 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 8 &gic 0 0 0 165 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 9 &gic 0 0 0 166 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 10 &gic 0 0 0 167 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 11 &gic 0 0 0 168 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 12 &gic 0 0 0 169 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic 0 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic 0 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic 0 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic 0 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic 0 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic 0 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic 0 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic 0 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic 0 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic 0 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
};
site2: tlx@60000000 {
@@ -809,6 +809,6 @@
ranges = <0 0 0x60000000 0x10000000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0>;
- interrupt-map = <0 0 &gic 0 0 0 168 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index b2b7ced633cf..5f290090b0cf 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -85,7 +85,7 @@
};
A57_0: cpu@0 {
- compatible = "arm,cortex-a57","arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0 0x0>;
device_type = "cpu";
enable-method = "psci";
@@ -102,7 +102,7 @@
};
A57_1: cpu@1 {
- compatible = "arm,cortex-a57","arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0 0x1>;
device_type = "cpu";
enable-method = "psci";
@@ -119,7 +119,7 @@
};
A53_0: cpu@100 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x100>;
device_type = "cpu";
enable-method = "psci";
@@ -136,7 +136,7 @@
};
A53_1: cpu@101 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x101>;
device_type = "cpu";
enable-method = "psci";
@@ -153,7 +153,7 @@
};
A53_2: cpu@102 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x102>;
device_type = "cpu";
enable-method = "psci";
@@ -170,7 +170,7 @@
};
A53_3: cpu@103 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x103>;
device_type = "cpu";
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index ab77adb4f3c2..305300dd521c 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -85,7 +85,7 @@
};
A72_0: cpu@0 {
- compatible = "arm,cortex-a72","arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x0>;
device_type = "cpu";
enable-method = "psci";
@@ -99,10 +99,11 @@
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <450>;
};
A72_1: cpu@1 {
- compatible = "arm,cortex-a72","arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x1>;
device_type = "cpu";
enable-method = "psci";
@@ -116,10 +117,11 @@
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <450>;
};
A53_0: cpu@100 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x100>;
device_type = "cpu";
enable-method = "psci";
@@ -133,10 +135,11 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <485>;
+ dynamic-power-coefficient = <140>;
};
A53_1: cpu@101 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x101>;
device_type = "cpu";
enable-method = "psci";
@@ -150,10 +153,11 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <485>;
+ dynamic-power-coefficient = <140>;
};
A53_2: cpu@102 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x102>;
device_type = "cpu";
enable-method = "psci";
@@ -167,10 +171,11 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <485>;
+ dynamic-power-coefficient = <140>;
};
A53_3: cpu@103 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x103>;
device_type = "cpu";
enable-method = "psci";
@@ -184,6 +189,7 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <485>;
+ dynamic-power-coefficient = <140>;
};
A72_L2: l2-cache0 {
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index 08d4ba1716c3..f00cffbd032c 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -84,7 +84,7 @@
};
A57_0: cpu@0 {
- compatible = "arm,cortex-a57","arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0 0x0>;
device_type = "cpu";
enable-method = "psci";
@@ -98,10 +98,11 @@
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <530>;
};
A57_1: cpu@1 {
- compatible = "arm,cortex-a57","arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0 0x1>;
device_type = "cpu";
enable-method = "psci";
@@ -115,10 +116,11 @@
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <530>;
};
A53_0: cpu@100 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x100>;
device_type = "cpu";
enable-method = "psci";
@@ -132,10 +134,11 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <578>;
+ dynamic-power-coefficient = <140>;
};
A53_1: cpu@101 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x101>;
device_type = "cpu";
enable-method = "psci";
@@ -149,10 +152,11 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <578>;
+ dynamic-power-coefficient = <140>;
};
A53_2: cpu@102 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x102>;
device_type = "cpu";
enable-method = "psci";
@@ -166,10 +170,11 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <578>;
+ dynamic-power-coefficient = <140>;
};
A53_3: cpu@103 {
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x103>;
device_type = "cpu";
enable-method = "psci";
@@ -183,6 +188,7 @@
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <578>;
+ dynamic-power-coefficient = <140>;
};
A57_L2: l2-cache0 {
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index fe4fda473c0a..6e685d883303 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -10,6 +10,8 @@
/dts-v1/;
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
/memreserve/ 0x80000000 0x00010000;
#include "rtsm_ve-motherboard.dtsi"
@@ -101,24 +103,24 @@
<0x0 0x2c002000 0 0x2000>,
<0x0 0x2c004000 0 0x2000>,
<0x0 0x2c006000 0 0x2000>;
- interrupts = <1 9 0xf04>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xf08>,
- <1 14 0xf08>,
- <1 11 0xf08>,
- <1 10 0xf08>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
clock-frequency = <100000000>;
};
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <0 60 4>,
- <0 61 4>,
- <0 62 4>,
- <0 63 4>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
};
panel {
@@ -144,48 +146,48 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic 0 0 4>,
- <0 0 1 &gic 0 1 4>,
- <0 0 2 &gic 0 2 4>,
- <0 0 3 &gic 0 3 4>,
- <0 0 4 &gic 0 4 4>,
- <0 0 5 &gic 0 5 4>,
- <0 0 6 &gic 0 6 4>,
- <0 0 7 &gic 0 7 4>,
- <0 0 8 &gic 0 8 4>,
- <0 0 9 &gic 0 9 4>,
- <0 0 10 &gic 0 10 4>,
- <0 0 11 &gic 0 11 4>,
- <0 0 12 &gic 0 12 4>,
- <0 0 13 &gic 0 13 4>,
- <0 0 14 &gic 0 14 4>,
- <0 0 15 &gic 0 15 4>,
- <0 0 16 &gic 0 16 4>,
- <0 0 17 &gic 0 17 4>,
- <0 0 18 &gic 0 18 4>,
- <0 0 19 &gic 0 19 4>,
- <0 0 20 &gic 0 20 4>,
- <0 0 21 &gic 0 21 4>,
- <0 0 22 &gic 0 22 4>,
- <0 0 23 &gic 0 23 4>,
- <0 0 24 &gic 0 24 4>,
- <0 0 25 &gic 0 25 4>,
- <0 0 26 &gic 0 26 4>,
- <0 0 27 &gic 0 27 4>,
- <0 0 28 &gic 0 28 4>,
- <0 0 29 &gic 0 29 4>,
- <0 0 30 &gic 0 30 4>,
- <0 0 31 &gic 0 31 4>,
- <0 0 32 &gic 0 32 4>,
- <0 0 33 &gic 0 33 4>,
- <0 0 34 &gic 0 34 4>,
- <0 0 35 &gic 0 35 4>,
- <0 0 36 &gic 0 36 4>,
- <0 0 37 &gic 0 37 4>,
- <0 0 38 &gic 0 38 4>,
- <0 0 39 &gic 0 39 4>,
- <0 0 40 &gic 0 40 4>,
- <0 0 41 &gic 0 41 4>,
- <0 0 42 &gic 0 42 4>;
+ interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
new file mode 100644
index 000000000000..57b0b9d7f3fa
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Ltd. Fast Models
+ *
+ * "rs2" extension for the v2m motherboard
+ */
+/ {
+ smb@8000000 {
+ motherboard {
+ arm,v2m-memory-map = "rs2";
+
+ iofpga@3,00000000 {
+ virtio-p9@140000 {
+ compatible = "virtio,mmio";
+ reg = <0x140000 0x200>;
+ interrupts = <43>;
+ };
+
+ virtio-net@150000 {
+ compatible = "virtio,mmio";
+ reg = <0x150000 0x200>;
+ interrupts = <44>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index b25f3cbd3da8..454cf6c44c49 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -86,7 +86,7 @@
mmci@50000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
- interrupts = <9 10>;
+ interrupts = <9>, <10>;
cd-gpios = <&v2m_sysreg 0 0>;
wp-gpios = <&v2m_sysreg 1 0>;
max-frequency = <12000000>;
@@ -167,6 +167,12 @@
clock-names = "timclken1", "timclken2", "apb_pclk";
};
+ virtio-block@130000 {
+ compatible = "virtio,mmio";
+ reg = <0x130000 0x200>;
+ interrupts = <42>;
+ };
+
rtc@170000 {
compatible = "arm,pl031", "arm,primecell";
reg = <0x170000 0x1000>;
@@ -193,12 +199,6 @@
};
};
};
-
- virtio-block@130000 {
- compatible = "virtio,mmio";
- reg = <0x130000 0x200>;
- interrupts = <42>;
- };
};
v2m_fixed_3v3: v2m-3v3 {
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
index 8981c3d2ff18..22383c26bb03 100644
--- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -43,14 +43,14 @@
cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0>;
next-level-cache = <&L2_0>;
};
cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 1>;
next-level-cache = <&L2_0>;
};
diff --git a/arch/arm64/boot/dts/bitmain/Makefile b/arch/arm64/boot/dts/bitmain/Makefile
new file mode 100644
index 000000000000..be90a6071be0
--- /dev/null
+++ b/arch/arm64/boot/dts/bitmain/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+dtb-$(CONFIG_ARCH_BITMAIN) += bm1880-sophon-edge.dtb
diff --git a/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
new file mode 100644
index 000000000000..6a3255597138
--- /dev/null
+++ b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+/dts-v1/;
+
+#include "bm1880.dtsi"
+
+/ {
+ compatible = "bitmain,sophon-edge", "bitmain,bm1880";
+ model = "Sophon Edge";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart2;
+ serial2 = &uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x1 0x00000000 0x0 0x40000000>; // 1GB
+ };
+
+ uart_clk: uart-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <500000000>;
+ #clock-cells = <0>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+ clocks = <&uart_clk>;
+};
+
+&uart1 {
+ status = "okay";
+ clocks = <&uart_clk>;
+};
+
+&uart2 {
+ status = "okay";
+ clocks = <&uart_clk>;
+};
diff --git a/arch/arm64/boot/dts/bitmain/bm1880.dtsi b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
new file mode 100644
index 000000000000..55a4769e0de2
--- /dev/null
+++ b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "bitmain,bm1880";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x1>;
+ enable-method = "psci";
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ secmon@100000000 {
+ reg = <0x1 0x00000000 0x0 0x20000>;
+ no-map;
+ };
+
+ jpu@130000000 {
+ reg = <0x1 0x30000000 0x0 0x08000000>; // 128M
+ no-map;
+ };
+
+ vpu@138000000 {
+ reg = <0x1 0x38000000 0x0 0x08000000>; // 128M
+ no-map;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic: interrupt-controller@50001000 {
+ compatible = "arm,gic-400";
+ reg = <0x0 0x50001000 0x0 0x1000>,
+ <0x0 0x50002000 0x0 0x2000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+
+ uart0: serial@58018000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x0 0x58018000 0x0 0x2000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart1: serial@5801A000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x0 0x5801a000 0x0 0x2000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart2: serial@5801C000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x0 0x5801c000 0x0 0x2000>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart3: serial@5801E000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x0 0x5801e000 0x0 0x2000>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index 667ca989c11b..d1d31ccad758 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb \
+dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-a-plus.dtb \
+ bcm2837-rpi-3-b.dtb \
bcm2837-rpi-3-b-plus.dtb \
bcm2837-rpi-cm3-io3.dtb
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-a-plus.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-a-plus.dts
new file mode 100644
index 000000000000..f0ec56a1c4d7
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-a-plus.dts
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "arm/bcm2837-rpi-3-a-plus.dts"
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index ea854f689fda..15f7b0ed3836 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -47,7 +47,7 @@
A57_0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0 0>;
enable-method = "psci";
next-level-cache = <&CLUSTER0_L2>;
@@ -55,7 +55,7 @@
A57_1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0 1>;
enable-method = "psci";
next-level-cache = <&CLUSTER0_L2>;
@@ -63,7 +63,7 @@
A57_2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0 2>;
enable-method = "psci";
next-level-cache = <&CLUSTER0_L2>;
@@ -71,7 +71,7 @@
A57_3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0 3>;
enable-method = "psci";
next-level-cache = <&CLUSTER0_L2>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index cfeaa855bd05..35c4670c00d1 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -44,7 +44,7 @@
cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&CLUSTER0_L2>;
@@ -52,7 +52,7 @@
cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&CLUSTER0_L2>;
@@ -60,7 +60,7 @@
cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x100>;
enable-method = "psci";
next-level-cache = <&CLUSTER1_L2>;
@@ -68,7 +68,7 @@
cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x101>;
enable-method = "psci";
next-level-cache = <&CLUSTER1_L2>;
@@ -76,7 +76,7 @@
cpu@200 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x200>;
enable-method = "psci";
next-level-cache = <&CLUSTER2_L2>;
@@ -84,7 +84,7 @@
cpu@201 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x201>;
enable-method = "psci";
next-level-cache = <&CLUSTER2_L2>;
@@ -92,7 +92,7 @@
cpu@300 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x300>;
enable-method = "psci";
next-level-cache = <&CLUSTER3_L2>;
@@ -100,7 +100,7 @@
cpu@301 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x301>;
enable-method = "psci";
next-level-cache = <&CLUSTER3_L2>;
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index 1a9103b269cb..e0a71795261b 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -64,289 +64,289 @@
cpu@0 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x000>;
enable-method = "psci";
};
cpu@1 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x001>;
enable-method = "psci";
};
cpu@2 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x002>;
enable-method = "psci";
};
cpu@3 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x003>;
enable-method = "psci";
};
cpu@4 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x004>;
enable-method = "psci";
};
cpu@5 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x005>;
enable-method = "psci";
};
cpu@6 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x006>;
enable-method = "psci";
};
cpu@7 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x007>;
enable-method = "psci";
};
cpu@8 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x008>;
enable-method = "psci";
};
cpu@9 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x009>;
enable-method = "psci";
};
cpu@a {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x00a>;
enable-method = "psci";
};
cpu@b {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x00b>;
enable-method = "psci";
};
cpu@c {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x00c>;
enable-method = "psci";
};
cpu@d {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x00d>;
enable-method = "psci";
};
cpu@e {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x00e>;
enable-method = "psci";
};
cpu@f {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x00f>;
enable-method = "psci";
};
cpu@100 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x100>;
enable-method = "psci";
};
cpu@101 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x101>;
enable-method = "psci";
};
cpu@102 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x102>;
enable-method = "psci";
};
cpu@103 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x103>;
enable-method = "psci";
};
cpu@104 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x104>;
enable-method = "psci";
};
cpu@105 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x105>;
enable-method = "psci";
};
cpu@106 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x106>;
enable-method = "psci";
};
cpu@107 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x107>;
enable-method = "psci";
};
cpu@108 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x108>;
enable-method = "psci";
};
cpu@109 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x109>;
enable-method = "psci";
};
cpu@10a {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x10a>;
enable-method = "psci";
};
cpu@10b {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x10b>;
enable-method = "psci";
};
cpu@10c {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x10c>;
enable-method = "psci";
};
cpu@10d {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x10d>;
enable-method = "psci";
};
cpu@10e {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x10e>;
enable-method = "psci";
};
cpu@10f {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x10f>;
enable-method = "psci";
};
cpu@200 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x200>;
enable-method = "psci";
};
cpu@201 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x201>;
enable-method = "psci";
};
cpu@202 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x202>;
enable-method = "psci";
};
cpu@203 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x203>;
enable-method = "psci";
};
cpu@204 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x204>;
enable-method = "psci";
};
cpu@205 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x205>;
enable-method = "psci";
};
cpu@206 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x206>;
enable-method = "psci";
};
cpu@207 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x207>;
enable-method = "psci";
};
cpu@208 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x208>;
enable-method = "psci";
};
cpu@209 {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x209>;
enable-method = "psci";
};
cpu@20a {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x20a>;
enable-method = "psci";
};
cpu@20b {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x20b>;
enable-method = "psci";
};
cpu@20c {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x20c>;
enable-method = "psci";
};
cpu@20d {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x20d>;
enable-method = "psci";
};
cpu@20e {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x20e>;
enable-method = "psci";
};
cpu@20f {
device_type = "cpu";
- compatible = "cavium,thunder", "arm,armv8";
+ compatible = "cavium,thunder";
reg = <0x0 0x20f>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
index ff5c4c47b22b..0b7c935a4778 100644
--- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
@@ -27,28 +27,28 @@
cpu@0 {
device_type = "cpu";
- compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ compatible = "cavium,thunder2", "brcm,vulcan";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu@1 {
device_type = "cpu";
- compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ compatible = "cavium,thunder2", "brcm,vulcan";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu@2 {
device_type = "cpu";
- compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ compatible = "cavium,thunder2", "brcm,vulcan";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu@3 {
device_type = "cpu";
- compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ compatible = "cavium,thunder2", "brcm,vulcan";
reg = <0x0 0x3>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index e7cd3b67d818..a04e80327b6e 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -29,7 +29,7 @@
cpu0: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x100>;
clock-frequency = <1300000000>;
@@ -41,7 +41,7 @@
cpu1: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x101>;
clock-frequency = <1300000000>;
@@ -51,7 +51,7 @@
cpu2: cpu@102 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x102>;
clock-frequency = <1300000000>;
@@ -61,7 +61,7 @@
cpu3: cpu@103 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x103>;
clock-frequency = <1300000000>;
@@ -71,7 +71,7 @@
cpu4: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
enable-method = "psci";
reg = <0x0>;
clock-frequency = <1900000000>;
@@ -83,7 +83,7 @@
cpu5: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
enable-method = "psci";
reg = <0x1>;
clock-frequency = <1900000000>;
@@ -93,7 +93,7 @@
cpu6: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
enable-method = "psci";
reg = <0x2>;
clock-frequency = <1900000000>;
@@ -103,7 +103,7 @@
cpu7: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
enable-method = "psci";
reg = <0x3>;
clock-frequency = <1900000000>;
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index 75ad724c487e..967558a93d82 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -34,28 +34,28 @@
cpu_atlas0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0>;
enable-method = "psci";
};
cpu_atlas1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x1>;
enable-method = "psci";
};
cpu_atlas2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x2>;
enable-method = "psci";
};
cpu_atlas3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x3>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index f9be2426f83c..13604e558dc1 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frwy.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-oxalis.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds.dtb
@@ -20,3 +21,4 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts
new file mode 100644
index 000000000000..7c726267ec8f
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Oxalis
+ *
+ * Copyright (c) 2019 Manivannan Sadhasivam
+ *
+ */
+
+/dts-v1/;
+
+#include "fsl-ls1012a.dtsi"
+
+/ {
+ model = "Oxalis";
+ compatible = "ebs-systart,oxalis", "fsl,ls1012a";
+
+ sys_mclk: clock-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "Speaker Ext",
+ "Line", "Line In Jack";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "Microphone Jack", "Mic Bias",
+ "LINE_IN", "Line In Jack",
+ "Headphone Jack", "HP_OUT",
+ "Speaker Ext", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ frame-master;
+ bitclock-master;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&codec>;
+ frame-master;
+ bitclock-master;
+ system-clock-frequency = <25000000>;
+ };
+ };
+};
+
+&duart0 {
+ status = "okay";
+};
+
+&duart1 {
+ status = "okay";
+};
+
+&esdhc1 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ codec: audio-codec@a {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,sgtl5000";
+ reg = <0xa>;
+ VDDA-supply = <&reg_1p8v>;
+ VDDIO-supply = <&reg_1p8v>;
+ clocks = <&sys_mclk>;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&sai2 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 816f3a4537e3..1ce0042b2a14 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -446,6 +446,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
sata: sata@3200000 {
@@ -486,6 +487,7 @@
#size-cells = <2>;
device_type = "pci";
num-lanes = <4>;
+ num-viewport = <2>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index fdeb4176fc33..f86b054a74ae 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -71,3 +71,20 @@
&duart1 {
status = "okay";
};
+
+&enetc_port0 {
+ phy-handle = <&sgmii_phy0>;
+ phy-connection-type = "sgmii";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ sgmii_phy0: ethernet-phy@2 {
+ reg = <0x2>;
+ };
+ };
+};
+
+&enetc_port1 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index a8cf92af05fb..2896bbcfa3bb 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -335,5 +335,40 @@
<GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ pcie@1f0000000 { /* Integrated Endpoint Root Complex */
+ compatible = "pci-host-ecam-generic";
+ reg = <0x01 0xf0000000 0x0 0x100000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ msi-parent = <&its>;
+ device_type = "pci";
+ bus-range = <0x0 0x0>;
+ dma-coherent;
+ msi-map = <0 &its 0x17 0xe>;
+ iommu-map = <0 &smmu 0x17 0xe>;
+ /* PF0-6 BAR0 - non-prefetchable memory */
+ ranges = <0x82000000 0x0 0x00000000 0x1 0xf8000000 0x0 0x160000
+ /* PF0-6 BAR2 - prefetchable memory */
+ 0xc2000000 0x0 0x00000000 0x1 0xf8160000 0x0 0x070000
+ /* PF0: VF0-1 BAR0 - non-prefetchable memory */
+ 0x82000000 0x0 0x00000000 0x1 0xf81d0000 0x0 0x020000
+ /* PF0: VF0-1 BAR2 - prefetchable memory */
+ 0xc2000000 0x0 0x00000000 0x1 0xf81f0000 0x0 0x020000
+ /* PF1: VF0-1 BAR0 - non-prefetchable memory */
+ 0x82000000 0x0 0x00000000 0x1 0xf8210000 0x0 0x020000
+ /* PF1: VF0-1 BAR2 - prefetchable memory */
+ 0xc2000000 0x0 0x00000000 0x1 0xf8230000 0x0 0x020000>;
+
+ enetc_port0: ethernet@0,0 {
+ compatible = "fsl,enetc";
+ reg = <0x000000 0 0 0 0>;
+ };
+ enetc_port1: ethernet@0,1 {
+ compatible = "fsl,enetc";
+ reg = <0x000100 0 0 0 0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
index 8a500940f124..1aac81da7e37 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -137,7 +137,7 @@
&qspi {
status = "okay";
- qflash0: s25fl128s@0 {
+ qflash0: flash@0 {
compatible = "spansion,m25p80";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 70057b4e46e8..6fd6116509cc 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -611,6 +611,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
usb1: usb3@3000000 {
@@ -620,6 +621,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
usb2: usb3@3100000 {
@@ -629,6 +631,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
sata: sata@3200000 {
@@ -675,6 +678,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <4>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -701,6 +705,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <2>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -727,6 +732,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <2>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
index 2f220ec4947b..eec62c63dafe 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
@@ -165,7 +165,7 @@
&qspi {
status = "okay";
- qflash0: s25fl128s@0 {
+ qflash0: flash@0 {
compatible = "spansion,m25p80";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
index 07c665c6e0dc..6a6514d0e5a9 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
@@ -101,7 +101,7 @@
&qspi {
status = "okay";
- qflash0: s25fs512s@0 {
+ qflash0: flash@0 {
compatible = "spansion,m25p80";
#address-cells = <1>;
#size-cells = <1>;
@@ -111,7 +111,7 @@
reg = <0>;
};
- qflash1: s25fs512s@1 {
+ qflash1: flash@1 {
compatible = "spansion,m25p80";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 9a2106e60e19..0e762ca92558 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -202,6 +202,7 @@
compatible = "fsl,ifc", "simple-bus";
reg = <0x0 0x1530000 0x0 0x10000>;
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
};
qspi: spi@1550000 {
@@ -424,6 +425,7 @@
reg = <0x00 0x21c0500 0x0 0x100>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
+ status = "disabled";
};
duart1: serial@21c0600 {
@@ -431,6 +433,7 @@
reg = <0x00 0x21c0600 0x0 0x100>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
+ status = "disabled";
};
duart2: serial@21d0500 {
@@ -438,6 +441,7 @@
reg = <0x0 0x21d0500 0x0 0x100>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
+ status = "disabled";
};
duart3: serial@21d0600 {
@@ -445,6 +449,7 @@
reg = <0x0 0x21d0600 0x0 0x100>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
+ status = "disabled";
};
gpio0: gpio@2300000 {
@@ -572,6 +577,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
usb1: usb@3000000 {
@@ -581,6 +587,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
usb2: usb@3100000 {
@@ -590,6 +597,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
sata: sata@3200000 {
@@ -644,6 +652,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <4>;
+ num-viewport = <8>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -670,6 +679,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <2>;
+ num-viewport = <8>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -696,6 +706,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <2>;
+ num-viewport = <8>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index de93b42b1f51..661137ffa319 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -377,6 +377,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
status = "disabled";
};
@@ -452,6 +453,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <4>;
+ num-viewport = <256>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -477,6 +479,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <4>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -502,6 +505,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <8>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
@@ -515,6 +519,96 @@
status = "disabled";
};
+ smmu: iommu@5000000 {
+ compatible = "arm,mmu-500";
+ reg = <0 0x5000000 0 0x800000>;
+ #iommu-cells = <1>;
+ stream-match-mask = <0x7C00>;
+ #global-interrupts = <12>;
+ // global secure fault
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ // combined secure
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ // global non-secure fault
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ // combined non-secure
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ // performance counter interrupts 0-7
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+ // per context interrupt, 64 interrupts
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
cluster1_core0_watchdog: wdt@c000000 {
compatible = "arm,sp805-wdt", "arm,primecell";
reg = <0x0 0xc000000 0x0 0x1000>;
@@ -576,6 +670,8 @@
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
msi-parent = <&its>;
+ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
+ dma-coherent;
#address-cells = <3>;
#size-cells = <1>;
@@ -649,5 +745,4 @@
method = "smc";
};
};
-
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index 6d6ca166f86b..d7e78dcd153d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -627,6 +627,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <4>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
@@ -648,6 +649,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <4>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
@@ -669,6 +671,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <8>;
+ num-viewport = <256>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
@@ -690,6 +693,7 @@
device_type = "pci";
dma-coherent;
num-lanes = <4>;
+ num-viewport = <6>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
@@ -727,6 +731,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
usb1: usb3@3110000 {
@@ -737,6 +742,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
ccn@4000000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
index 6481e5f20e69..9df37b159415 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
@@ -50,6 +50,32 @@
status = "okay";
};
+&fspi {
+ status = "okay";
+
+ mt35xu512aba0: flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,m25p80";
+ m25p,fast-read;
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ spi-rx-bus-width = <8>;
+ spi-tx-bus-width = <8>;
+ };
+
+ mt35xu512aba1: flash@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,m25p80";
+ m25p,fast-read;
+ spi-max-frequency = <50000000>;
+ reg = <1>;
+ spi-rx-bus-width = <8>;
+ spi-tx-bus-width = <8>;
+ };
+};
+
&i2c0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index a79f5c1ea56d..fe87204850b5 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -398,6 +398,7 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
crypto: crypto@8000000 {
compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
@@ -542,6 +543,19 @@
status = "disabled";
};
+ fspi: spi@20c0000 {
+ compatible = "nxp,lx2160a-fspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x20c0000 0x0 0x10000>,
+ <0x0 0x20000000 0x0 0x10000000>;
+ reg-names = "fspi_base", "fspi_mmap";
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "fspi_en", "fspi";
+ status = "disabled";
+ };
+
esdhc0: esdhc@2140000 {
compatible = "fsl,esdhc";
reg = <0x0 0x2140000 0x0 0x10000>;
@@ -658,6 +672,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
status = "disabled";
};
@@ -668,6 +683,7 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
status = "disabled";
};
@@ -762,5 +778,122 @@
<GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
};
+
+ fsl_mc: fsl-mc@80c000000 {
+ compatible = "fsl,qoriq-mc";
+ reg = <0x00000008 0x0c000000 0 0x40>,
+ <0x00000000 0x08340000 0 0x40000>;
+ msi-parent = <&its>;
+ /* iommu-map property is fixed up by u-boot */
+ iommu-map = <0 &smmu 0 0>;
+ dma-coherent;
+ #address-cells = <3>;
+ #size-cells = <1>;
+
+ /*
+ * Region type 0x0 - MC portals
+ * Region type 0x1 - QBMAN portals
+ */
+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
+
+ /*
+ * Define the maximum number of MACs present on the SoC.
+ */
+ dpmacs {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpmac1: dpmac@1 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x1>;
+ };
+
+ dpmac2: dpmac@2 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x2>;
+ };
+
+ dpmac3: dpmac@3 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x3>;
+ };
+
+ dpmac4: dpmac@4 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x4>;
+ };
+
+ dpmac5: dpmac@5 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x5>;
+ };
+
+ dpmac6: dpmac@6 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x6>;
+ };
+
+ dpmac7: dpmac@7 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x7>;
+ };
+
+ dpmac8: dpmac@8 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x8>;
+ };
+
+ dpmac9: dpmac@9 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x9>;
+ };
+
+ dpmac10: dpmac@a {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xa>;
+ };
+
+ dpmac11: dpmac@b {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xb>;
+ };
+
+ dpmac12: dpmac@c {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xc>;
+ };
+
+ dpmac13: dpmac@d {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xd>;
+ };
+
+ dpmac14: dpmac@e {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xe>;
+ };
+
+ dpmac15: dpmac@f {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xf>;
+ };
+
+ dpmac16: dpmac@10 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x10>;
+ };
+
+ dpmac17: dpmac@11 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x11>;
+ };
+
+ dpmac18: dpmac@12 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x12>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 64acccc4bfcb..54737bf1772f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -37,7 +37,19 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
};
&i2c1 {
@@ -137,6 +149,29 @@
status = "okay";
};
+&usb3_phy1 {
+ status = "okay";
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&qspi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi>;
+ status = "okay";
+
+ n25q256a: flash@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "micron,n25q256a", "jedec,spi-nor";
+ spi-max-frequency = <29000000>;
+ };
+};
+
&usdhc1 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -195,6 +230,18 @@
>;
};
+ pinctrl_qspi: qspigrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x82
+ MX8MQ_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x82
+ MX8MQ_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x82
+ MX8MQ_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x82
+ MX8MQ_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x82
+ MX8MQ_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x82
+
+ >;
+ };
+
pinctrl_reg_usdhc2: regusdhc2grpgpio {
fsl,pins = <
MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
@@ -227,34 +274,34 @@
pinctrl_usdhc1_100mhz: usdhc1-100grp {
fsl,pins = <
- MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85
- MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5
- MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5
- MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5
- MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5
- MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5
- MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5
- MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5
- MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5
- MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5
- MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d
MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
>;
};
pinctrl_usdhc1_200mhz: usdhc1-200grp {
fsl,pins = <
- MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87
- MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7
- MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7
- MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7
- MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7
- MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7
- MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7
- MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7
- MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7
- MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7
- MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f
MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 8e9d6d5ed7b2..9155bd4784eb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -5,13 +5,13 @@
*/
#include <dt-bindings/clock/imx8mq-clock.h>
+#include <dt-bindings/power/imx8mq-power.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "imx8mq-pinfunc.h"
/ {
- /* This should really be the GPC, but we need a driver for this first */
- interrupt-parent = <&gic>;
+ interrupt-parent = <&gpc>;
#address-cells = <2>;
#size-cells = <2>;
@@ -25,6 +25,9 @@
serial1 = &uart2;
serial2 = &uart3;
serial3 = &uart4;
+ spi0 = &ecspi1;
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
};
ckil: clock-ckil {
@@ -117,6 +120,13 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ interrupt-affinity = <&A53_0>, <&A53_1>, <&A53_2>, <&A53_3>;
+ };
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -137,6 +147,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x3e000000>;
+ dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>;
bus@30000000 { /* AIPS1 */
compatible = "fsl,imx8mq-aips-bus", "simple-bus";
@@ -199,6 +210,30 @@
#interrupt-cells = <2>;
};
+ wdog1: watchdog@30280000 {
+ compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt";
+ reg = <0x30280000 0x10000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_WDOG1_ROOT>;
+ status = "disabled";
+ };
+
+ wdog2: watchdog@30290000 {
+ compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt";
+ reg = <0x30290000 0x10000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_WDOG2_ROOT>;
+ status = "disabled";
+ };
+
+ wdog3: watchdog@302a0000 {
+ compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt";
+ reg = <0x302a0000 0x10000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_WDOG3_ROOT>;
+ status = "disabled";
+ };
+
iomuxc: iomuxc@30330000 {
compatible = "fsl,imx8mq-iomuxc";
reg = <0x30330000 0x10000>;
@@ -215,6 +250,20 @@
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
};
+ snvs: snvs@30370000 {
+ compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+ reg = <0x30370000 0x10000>;
+
+ snvs_rtc: snvs-rtc-lp{
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ regmap =<&snvs>;
+ offset = <0x34>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ };
+
clk: clock-controller@30380000 {
compatible = "fsl,imx8mq-ccm";
reg = <0x30380000 0x10000>;
@@ -229,43 +278,172 @@
"clk_ext3", "clk_ext4";
};
- wdog1: watchdog@30280000 {
- compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt";
- reg = <0x30280000 0x10000>;
- interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_WDOG1_ROOT>;
+ gpc: gpc@303a0000 {
+ compatible = "fsl,imx8mq-gpc";
+ reg = <0x303a0000 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ pgc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pgc_mipi: power-domain@0 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_MIPI>;
+ };
+
+ pgc_pcie1: power-domain@1 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_PCIE1>;
+ };
+
+ pgc_otg1: power-domain@2 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_USB_OTG1>;
+ };
+
+ pgc_otg2: power-domain@3 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_USB_OTG2>;
+ };
+
+ pgc_ddr1: power-domain@4 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_DDR1>;
+ };
+
+ pgc_gpu: power-domain@5 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_GPU>;
+ clocks = <&clk IMX8MQ_CLK_GPU_ROOT>,
+ <&clk IMX8MQ_CLK_GPU_SHADER_DIV>,
+ <&clk IMX8MQ_CLK_GPU_AXI>,
+ <&clk IMX8MQ_CLK_GPU_AHB>;
+ };
+
+ pgc_vpu: power-domain@6 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_VPU>;
+ };
+
+ pgc_disp: power-domain@7 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_DISP>;
+ };
+
+ pgc_mipi_csi1: power-domain@8 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_MIPI_CSI1>;
+ };
+
+ pgc_mipi_csi2: power-domain@9 {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_MIPI_CSI2>;
+ };
+
+ pgc_pcie2: power-domain@a {
+ #power-domain-cells = <0>;
+ reg = <IMX8M_POWER_DOMAIN_PCIE2>;
+ };
+ };
+ };
+ };
+
+ bus@30400000 { /* AIPS2 */
+ compatible = "fsl,imx8mq-aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x30400000 0x30400000 0x400000>;
+
+ pwm1: pwm@30660000 {
+ compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm";
+ reg = <0x30660000 0x10000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_PWM1_ROOT>,
+ <&clk IMX8MQ_CLK_PWM1_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
status = "disabled";
};
- wdog2: watchdog@30290000 {
- compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt";
- reg = <0x30290000 0x10000>;
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_WDOG2_ROOT>;
+ pwm2: pwm@30670000 {
+ compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm";
+ reg = <0x30670000 0x10000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_PWM2_ROOT>,
+ <&clk IMX8MQ_CLK_PWM2_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
status = "disabled";
};
- wdog3: watchdog@302a0000 {
- compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt";
- reg = <0x302a0000 0x10000>;
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_WDOG3_ROOT>;
+ pwm3: pwm@30680000 {
+ compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm";
+ reg = <0x30680000 0x10000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_PWM3_ROOT>,
+ <&clk IMX8MQ_CLK_PWM3_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
status = "disabled";
};
- };
- bus@30400000 { /* AIPS2 */
- compatible = "fsl,imx8mq-aips-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x30400000 0x30400000 0x400000>;
+ pwm4: pwm@30690000 {
+ compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm";
+ reg = <0x30690000 0x10000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_PWM4_ROOT>,
+ <&clk IMX8MQ_CLK_PWM4_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
};
bus@30800000 { /* AIPS3 */
compatible = "fsl,imx8mq-aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x30800000 0x30800000 0x400000>;
+ ranges = <0x30800000 0x30800000 0x400000>,
+ <0x08000000 0x08000000 0x10000000>;
+
+ ecspi1: spi@30820000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mq-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30820000 0x10000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_ECSPI1_ROOT>,
+ <&clk IMX8MQ_CLK_ECSPI1_ROOT>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi2: spi@30830000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mq-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30830000 0x10000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_ECSPI2_ROOT>,
+ <&clk IMX8MQ_CLK_ECSPI2_ROOT>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi3: spi@30840000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mq-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30840000 0x10000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_ECSPI3_ROOT>,
+ <&clk IMX8MQ_CLK_ECSPI3_ROOT>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
uart1: serial@30860000 {
compatible = "fsl,imx8mq-uart",
@@ -360,6 +538,8 @@
<&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
<&clk IMX8MQ_CLK_USDHC1_ROOT>;
clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
+ assigned-clock-rates = <400000000>;
fsl,tuning-start-tap = <20>;
fsl,tuning-step = <2>;
bus-width = <4>;
@@ -381,6 +561,20 @@
status = "disabled";
};
+ qspi0: spi@30bb0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mq-qspi", "fsl,imx7d-qspi";
+ reg = <0x30bb0000 0x10000>,
+ <0x08000000 0x10000000>;
+ reg-names = "QuadSPI", "QuadSPI-memory";
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_QSPI_ROOT>,
+ <&clk IMX8MQ_CLK_QSPI_ROOT>;
+ clock-names = "qspi_en", "qspi";
+ status = "disabled";
+ };
+
fec1: ethernet@30be0000 {
compatible = "fsl,imx8mq-fec", "fsl,imx6sx-fec";
reg = <0x30be0000 0x10000>;
@@ -400,6 +594,70 @@
};
};
+ usb_dwc3_0: usb@38100000 {
+ compatible = "fsl,imx8mq-dwc3", "snps,dwc3";
+ reg = <0x38100000 0x10000>;
+ clocks = <&clk IMX8MQ_CLK_USB_BUS>,
+ <&clk IMX8MQ_CLK_USB_CORE_REF>,
+ <&clk IMX8MQ_CLK_USB1_CTRL_ROOT>;
+ clock-names = "bus_early", "ref", "suspend";
+ assigned-clocks = <&clk IMX8MQ_CLK_USB_BUS>,
+ <&clk IMX8MQ_CLK_USB_CORE_REF>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS2_PLL_500M>,
+ <&clk IMX8MQ_SYS1_PLL_100M>;
+ assigned-clock-rates = <500000000>, <100000000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg1>;
+ usb3-resume-missing-cas;
+ status = "disabled";
+ };
+
+ usb3_phy0: usb-phy@381f0040 {
+ compatible = "fsl,imx8mq-usb-phy";
+ reg = <0x381f0040 0x40>;
+ clocks = <&clk IMX8MQ_CLK_USB1_PHY_ROOT>;
+ clock-names = "phy";
+ assigned-clocks = <&clk IMX8MQ_CLK_USB_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_100M>;
+ assigned-clock-rates = <100000000>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ usb_dwc3_1: usb@38200000 {
+ compatible = "fsl,imx8mq-dwc3", "snps,dwc3";
+ reg = <0x38200000 0x10000>;
+ clocks = <&clk IMX8MQ_CLK_USB_BUS>,
+ <&clk IMX8MQ_CLK_USB_CORE_REF>,
+ <&clk IMX8MQ_CLK_USB2_CTRL_ROOT>;
+ clock-names = "bus_early", "ref", "suspend";
+ assigned-clocks = <&clk IMX8MQ_CLK_USB_BUS>,
+ <&clk IMX8MQ_CLK_USB_CORE_REF>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS2_PLL_500M>,
+ <&clk IMX8MQ_SYS1_PLL_100M>;
+ assigned-clock-rates = <500000000>, <100000000>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg2>;
+ usb3-resume-missing-cas;
+ status = "disabled";
+ };
+
+ usb3_phy1: usb-phy@382f0040 {
+ compatible = "fsl,imx8mq-usb-phy";
+ reg = <0x382f0040 0x40>;
+ clocks = <&clk IMX8MQ_CLK_USB2_PHY_ROOT>;
+ clock-names = "phy";
+ assigned-clocks = <&clk IMX8MQ_CLK_USB_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_100M>;
+ assigned-clock-rates = <100000000>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@38800000 {
compatible = "arm,gic-v3";
reg = <0x38800000 0x10000>, /* GIC Dist */
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
new file mode 100644
index 000000000000..03aad66545c5
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2017~2018 NXP
+ */
+
+/dts-v1/;
+
+#include "imx8qxp.dtsi"
+
+/ {
+ model = "Freescale i.MX8QXP MEK";
+ compatible = "fsl,imx8qxp-mek", "fsl,imx8qxp";
+
+ chosen {
+ stdout-path = &adma_lpuart0;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0 0x40000000>;
+ };
+
+ reg_usdhc2_vmmc: usdhc2-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "SD1_SPWR";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&adma_lpuart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart0>;
+ status = "okay";
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+ };
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <8>;
+ no-sd;
+ no-sdio;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ IMX8QXP_ENET0_MDC_CONN_ENET0_MDC 0x06000020
+ IMX8QXP_ENET0_MDIO_CONN_ENET0_MDIO 0x06000020
+ IMX8QXP_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL 0x06000020
+ IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC 0x06000020
+ IMX8QXP_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 0x06000020
+ IMX8QXP_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 0x06000020
+ IMX8QXP_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 0x06000020
+ IMX8QXP_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 0x06000020
+ IMX8QXP_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC 0x06000020
+ IMX8QXP_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL 0x06000020
+ IMX8QXP_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 0x06000020
+ IMX8QXP_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 0x06000020
+ IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 0x06000020
+ IMX8QXP_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 0x06000020
+ >;
+ };
+
+ pinctrl_lpuart0: lpuart0grp {
+ fsl,pins = <
+ IMX8QXP_UART0_RX_ADMA_UART0_RX 0x06000020
+ IMX8QXP_UART0_TX_ADMA_UART0_TX 0x06000020
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041
+ IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD 0x00000021
+ IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 0x00000021
+ IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 0x00000021
+ IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 0x00000021
+ IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 0x00000021
+ IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 0x00000021
+ IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 0x00000021
+ IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 0x00000021
+ IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 0x00000021
+ IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE 0x00000041
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK 0x06000041
+ IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD 0x00000021
+ IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 0x00000021
+ IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 0x00000021
+ IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 0x00000021
+ IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 0x00000021
+ IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT 0x00000021
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
new file mode 100644
index 000000000000..4c3dd95ed488
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/clock/imx8-clock.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/pads-imx8qxp.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc3;
+ serial0 = &adma_lpuart0;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ /* We have 1 clusters with 4 Cortex-A35 cores */
+ A35_0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&A35_L2>;
+ };
+
+ A35_1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&A35_L2>;
+ };
+
+ A35_2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ next-level-cache = <&A35_L2>;
+ };
+
+ A35_3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ next-level-cache = <&A35_L2>;
+ };
+
+ A35_L2: l2-cache0 {
+ compatible = "cache";
+ };
+ };
+
+ gic: interrupt-controller@51a00000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x51a00000 0 0x10000>, /* GIC Dist */
+ <0x0 0x51b00000 0 0xc0000>; /* GICR (RD_base + SGI_base) */
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ scu {
+ compatible = "fsl,imx-scu";
+ mbox-names = "tx0", "tx1", "tx2", "tx3",
+ "rx0", "rx1", "rx2", "rx3";
+ mboxes = <&lsio_mu1 0 0
+ &lsio_mu1 0 1
+ &lsio_mu1 0 2
+ &lsio_mu1 0 3
+ &lsio_mu1 1 0
+ &lsio_mu1 1 1
+ &lsio_mu1 1 2
+ &lsio_mu1 1 3>;
+
+ clk: clock-controller {
+ compatible = "fsl,imx8qxp-clk";
+ #clock-cells = <1>;
+ clocks = <&xtal32k &xtal24m>;
+ clock-names = "xtal_32KHz", "xtal_24Mhz";
+ };
+
+ iomuxc: pinctrl {
+ compatible = "fsl,imx8qxp-iomuxc";
+ };
+
+ pd: imx8qx-pd {
+ compatible = "fsl,imx8qxp-scu-pd";
+ #power-domain-cells = <1>;
+ };
+
+ rtc: rtc {
+ compatible = "fsl,imx8qxp-sc-rtc";
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* Physical Secure */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* Physical Non-Secure */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* Virtual */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* Hypervisor */
+ };
+
+ xtal32k: clock-xtal32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "xtal_32KHz";
+ };
+
+ xtal24m: clock-xtal24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "xtal_24MHz";
+ };
+
+ adma_subsys: bus@59000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x59000000 0x0 0x59000000 0x2000000>;
+
+ adma_lpcg: clock-controller@59000000 {
+ compatible = "fsl,imx8qxp-lpcg-adma";
+ reg = <0x59000000 0x2000000>;
+ #clock-cells = <1>;
+ };
+
+ adma_lpuart0: serial@5a060000 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+ reg = <0x5a060000 0x1000>;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_UART0_BAUD_CLK>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_UART_0>;
+ status = "disabled";
+ };
+
+ adma_i2c0: i2c@5a800000 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x5a800000 0x4000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C0_CLK>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_ADMA_I2C0_CLK>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_0>;
+ status = "disabled";
+ };
+
+ adma_i2c1: i2c@5a810000 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x5a810000 0x4000>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C1_CLK>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_ADMA_I2C1_CLK>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_1>;
+ status = "disabled";
+ };
+
+ adma_i2c2: i2c@5a820000 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x5a820000 0x4000>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C2_CLK>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_ADMA_I2C2_CLK>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_2>;
+ status = "disabled";
+ };
+
+ adma_i2c3: i2c@5a830000 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+ reg = <0x5a830000 0x4000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C3_CLK>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_ADMA_I2C3_CLK>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_3>;
+ status = "disabled";
+ };
+ };
+
+ conn_subsys: bus@5b000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x5b000000 0x0 0x5b000000 0x1000000>;
+
+ conn_lpcg: clock-controller@5b200000 {
+ compatible = "fsl,imx8qxp-lpcg-conn";
+ reg = <0x5b200000 0xb0000>;
+ #clock-cells = <1>;
+ };
+
+ usdhc1: mmc@5b010000 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b010000 0x10000>;
+ clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC0_IPG_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC0_PER_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC0_HCLK>;
+ clock-names = "ipg", "per", "ahb";
+ assigned-clocks = <&clk IMX_CONN_SDHC0_CLK>;
+ assigned-clock-rates = <200000000>;
+ power-domains = <&pd IMX_SC_R_SDHC_0>;
+ status = "disabled";
+ };
+
+ usdhc2: mmc@5b020000 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b020000 0x10000>;
+ clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC1_IPG_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC1_PER_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC1_HCLK>;
+ clock-names = "ipg", "per", "ahb";
+ assigned-clocks = <&clk IMX_CONN_SDHC1_CLK>;
+ assigned-clock-rates = <200000000>;
+ power-domains = <&pd IMX_SC_R_SDHC_1>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+ usdhc3: mmc@5b030000 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b030000 0x10000>;
+ clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC2_IPG_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC2_PER_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC2_HCLK>;
+ clock-names = "ipg", "per", "ahb";
+ assigned-clocks = <&clk IMX_CONN_SDHC2_CLK>;
+ assigned-clock-rates = <200000000>;
+ power-domains = <&pd IMX_SC_R_SDHC_2>;
+ status = "disabled";
+ };
+
+ fec1: ethernet@5b040000 {
+ compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec";
+ reg = <0x5b040000 0x10000>;
+ interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&conn_lpcg IMX_CONN_LPCG_ENET0_IPG_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_ENET0_AHB_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_ENET0_TX_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_ENET0_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
+ fsl,num-tx-queues=<3>;
+ fsl,num-rx-queues=<3>;
+ power-domains = <&pd IMX_SC_R_ENET_0>;
+ status = "disabled";
+ };
+
+ fec2: ethernet@5b050000 {
+ compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec";
+ reg = <0x5b050000 0x10000>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&conn_lpcg IMX_CONN_LPCG_ENET1_IPG_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_ENET1_AHB_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_ENET1_TX_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_ENET1_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
+ fsl,num-tx-queues=<3>;
+ fsl,num-rx-queues=<3>;
+ power-domains = <&pd IMX_SC_R_ENET_1>;
+ status = "disabled";
+ };
+ };
+
+ lsio_subsys: bus@5d000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x5d000000 0x0 0x5d000000 0x1000000>;
+
+ lsio_lpcg: clock-controller@5d400000 {
+ compatible = "fsl,imx8qxp-lpcg-lsio";
+ reg = <0x5d400000 0x400000>;
+ #clock-cells = <1>;
+ };
+
+ lsio_mu0: mailbox@5d1b0000 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ reg = <0x5d1b0000 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <0>;
+ status = "disabled";
+ };
+
+ lsio_mu1: mailbox@5d1c0000 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ reg = <0x5d1c0000 0x10000>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ };
+
+ lsio_mu3: mailbox@5d1e0000 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ reg = <0x5d1e0000 0x10000>;
+ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <0>;
+ status = "disabled";
+ };
+
+ lsio_mu4: mailbox@5d1f0000 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ reg = <0x5d1f0000 0x10000>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <0>;
+ status = "disabled";
+ };
+
+ lsio_gpio0: gpio@5d080000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d080000 0x10000>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_0>;
+ };
+
+ lsio_gpio1: gpio@5d090000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d090000 0x10000>;
+ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_1>;
+ };
+
+ lsio_gpio2: gpio@5d0a0000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d0a0000 0x10000>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_2>;
+ };
+
+ lsio_gpio3: gpio@5d0b0000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d0b0000 0x10000>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_3>;
+ };
+
+ lsio_gpio4: gpio@5d0c0000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d0c0000 0x10000>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_4>;
+ };
+
+ lsio_gpio5: gpio@5d0d0000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d0d0000 0x10000>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_5>;
+ };
+
+ lsio_gpio6: gpio@5d0e0000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d0e0000 0x10000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_6>;
+ };
+
+ lsio_gpio7: gpio@5d0f0000 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+ reg = <0x5d0f0000 0x10000>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_7>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
index 46435466f1ab..e035cf195b19 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -581,8 +581,7 @@
sd-uhs-sdr50;
sd-uhs-sdr104;
disable-wp;
- cd-inverted;
- cd-gpios = <&gpio25 3 0>;
+ cd-gpios = <&gpio25 3 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&sd_pmx_func
&sd_clk_cfg_func
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 20ae40df61d5..2f19e0e5b7cf 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -56,7 +56,7 @@
};
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x0>;
enable-method = "psci";
@@ -70,7 +70,7 @@
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x1>;
enable-method = "psci";
@@ -83,7 +83,7 @@
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x2>;
enable-method = "psci";
@@ -96,7 +96,7 @@
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x3>;
enable-method = "psci";
@@ -109,7 +109,7 @@
};
cpu4: cpu@100 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x100>;
enable-method = "psci";
@@ -123,7 +123,7 @@
};
cpu5: cpu@101 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x101>;
enable-method = "psci";
@@ -136,7 +136,7 @@
};
cpu6: cpu@102 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x102>;
enable-method = "psci";
@@ -149,7 +149,7 @@
};
cpu7: cpu@103 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x103>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
index a5bd6d80b226..2ed06e4588b8 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
@@ -56,56 +56,56 @@
};
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x3>;
enable-method = "psci";
};
cpu4: cpu@100 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x100>;
enable-method = "psci";
};
cpu5: cpu@101 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x101>;
enable-method = "psci";
};
cpu6: cpu@102 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x102>;
enable-method = "psci";
};
cpu7: cpu@103 {
- compatible = "arm,cortex-a73", "arm,armv8";
+ compatible = "arm,cortex-a73";
device_type = "cpu";
reg = <0x0 0x103>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
index 32716c96b457..c563d3eb2d98 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS File for HiSilicon Poplar Development Board
*
* Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
- *
- * Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
index 7c0fddd7c8cf..13821a0ff524 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DTS File for HiSilicon Hi3798cv200 SoC.
*
* Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
- *
- * Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <dt-bindings/clock/histb-clock.h>
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 610235028cc7..c14205cd6bf5 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -118,6 +118,7 @@
reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
clocks = <&pmic>;
clock-names = "ext_clock";
+ post-power-on-delay-ms = <10>;
power-off-delay-us = <10>;
};
@@ -300,7 +301,6 @@
dwmmc_0: dwmmc0@f723d000 {
cap-mmc-highspeed;
- mmc-hs200-1_8v;
non-removable;
bus-width = <0x8>;
vmmc-supply = <&ldo19>;
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index aec9e371c2a7..108e2a4227f6 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -81,7 +81,7 @@
};
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x0>;
enable-method = "psci";
@@ -94,7 +94,7 @@
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x1>;
enable-method = "psci";
@@ -107,7 +107,7 @@
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x2>;
enable-method = "psci";
@@ -120,7 +120,7 @@
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x3>;
enable-method = "psci";
@@ -133,7 +133,7 @@
};
cpu4: cpu@100 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x100>;
enable-method = "psci";
@@ -146,7 +146,7 @@
};
cpu5: cpu@101 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x101>;
enable-method = "psci";
@@ -159,7 +159,7 @@
};
cpu6: cpu@102 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x102>;
enable-method = "psci";
@@ -172,7 +172,7 @@
};
cpu7: cpu@103 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0 0x103>;
enable-method = "psci";
@@ -319,6 +319,8 @@
clock-names = "uartclk", "apb_pclk";
pinctrl-names = "default";
pinctrl-0 = <&uart1_pmx_func &uart1_cfg_func1 &uart1_cfg_func2>;
+ dmas = <&dma0 8 &dma0 9>;
+ dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/hisilicon/hip05.dtsi b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
index 4b472a302cd8..d321edc09c3f 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
@@ -87,7 +87,7 @@
cpu0: cpu@20000 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20000>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -95,7 +95,7 @@
cpu1: cpu@20001 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20001>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -103,7 +103,7 @@
cpu2: cpu@20002 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20002>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -111,7 +111,7 @@
cpu3: cpu@20003 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20003>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -119,7 +119,7 @@
cpu4: cpu@20100 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20100>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -127,7 +127,7 @@
cpu5: cpu@20101 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20101>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -135,7 +135,7 @@
cpu6: cpu@20102 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20102>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -143,7 +143,7 @@
cpu7: cpu@20103 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20103>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -151,7 +151,7 @@
cpu8: cpu@20200 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20200>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -159,7 +159,7 @@
cpu9: cpu@20201 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20201>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -167,7 +167,7 @@
cpu10: cpu@20202 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20202>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -175,7 +175,7 @@
cpu11: cpu@20203 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20203>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -183,7 +183,7 @@
cpu12: cpu@20300 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20300>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -191,7 +191,7 @@
cpu13: cpu@20301 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20301>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -199,7 +199,7 @@
cpu14: cpu@20302 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20302>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -207,7 +207,7 @@
cpu15: cpu@20303 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x20303>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
index d78a6a755d03..56625587b6de 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -87,7 +87,7 @@
cpu0: cpu@10000 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10000>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -95,7 +95,7 @@
cpu1: cpu@10001 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10001>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -103,7 +103,7 @@
cpu2: cpu@10002 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10002>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -111,7 +111,7 @@
cpu3: cpu@10003 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10003>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -119,7 +119,7 @@
cpu4: cpu@10100 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10100>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -127,7 +127,7 @@
cpu5: cpu@10101 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10101>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -135,7 +135,7 @@
cpu6: cpu@10102 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10102>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -143,7 +143,7 @@
cpu7: cpu@10103 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10103>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -151,7 +151,7 @@
cpu8: cpu@10200 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10200>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -159,7 +159,7 @@
cpu9: cpu@10201 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10201>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -167,7 +167,7 @@
cpu10: cpu@10202 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10202>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -175,7 +175,7 @@
cpu11: cpu@10203 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10203>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -183,7 +183,7 @@
cpu12: cpu@10300 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10300>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -191,7 +191,7 @@
cpu13: cpu@10301 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10301>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -199,7 +199,7 @@
cpu14: cpu@10302 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10302>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -207,7 +207,7 @@
cpu15: cpu@10303 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x10303>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index c33adefc3061..28bd4389441f 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -270,7 +270,7 @@
cpu0: cpu@10000 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10000>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -279,7 +279,7 @@
cpu1: cpu@10001 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10001>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -288,7 +288,7 @@
cpu2: cpu@10002 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10002>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -297,7 +297,7 @@
cpu3: cpu@10003 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10003>;
enable-method = "psci";
next-level-cache = <&cluster0_l2>;
@@ -306,7 +306,7 @@
cpu4: cpu@10100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10100>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -315,7 +315,7 @@
cpu5: cpu@10101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10101>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -324,7 +324,7 @@
cpu6: cpu@10102 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10102>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -333,7 +333,7 @@
cpu7: cpu@10103 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10103>;
enable-method = "psci";
next-level-cache = <&cluster1_l2>;
@@ -342,7 +342,7 @@
cpu8: cpu@10200 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10200>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -351,7 +351,7 @@
cpu9: cpu@10201 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10201>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -360,7 +360,7 @@
cpu10: cpu@10202 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10202>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -369,7 +369,7 @@
cpu11: cpu@10203 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10203>;
enable-method = "psci";
next-level-cache = <&cluster2_l2>;
@@ -378,7 +378,7 @@
cpu12: cpu@10300 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10300>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -387,7 +387,7 @@
cpu13: cpu@10301 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10301>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -396,7 +396,7 @@
cpu14: cpu@10302 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10302>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -405,7 +405,7 @@
cpu15: cpu@10303 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x10303>;
enable-method = "psci";
next-level-cache = <&cluster3_l2>;
@@ -414,7 +414,7 @@
cpu16: cpu@30000 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30000>;
enable-method = "psci";
next-level-cache = <&cluster4_l2>;
@@ -423,7 +423,7 @@
cpu17: cpu@30001 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30001>;
enable-method = "psci";
next-level-cache = <&cluster4_l2>;
@@ -432,7 +432,7 @@
cpu18: cpu@30002 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30002>;
enable-method = "psci";
next-level-cache = <&cluster4_l2>;
@@ -441,7 +441,7 @@
cpu19: cpu@30003 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30003>;
enable-method = "psci";
next-level-cache = <&cluster4_l2>;
@@ -450,7 +450,7 @@
cpu20: cpu@30100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30100>;
enable-method = "psci";
next-level-cache = <&cluster5_l2>;
@@ -459,7 +459,7 @@
cpu21: cpu@30101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30101>;
enable-method = "psci";
next-level-cache = <&cluster5_l2>;
@@ -468,7 +468,7 @@
cpu22: cpu@30102 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30102>;
enable-method = "psci";
next-level-cache = <&cluster5_l2>;
@@ -477,7 +477,7 @@
cpu23: cpu@30103 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30103>;
enable-method = "psci";
next-level-cache = <&cluster5_l2>;
@@ -486,7 +486,7 @@
cpu24: cpu@30200 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30200>;
enable-method = "psci";
next-level-cache = <&cluster6_l2>;
@@ -495,7 +495,7 @@
cpu25: cpu@30201 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30201>;
enable-method = "psci";
next-level-cache = <&cluster6_l2>;
@@ -504,7 +504,7 @@
cpu26: cpu@30202 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30202>;
enable-method = "psci";
next-level-cache = <&cluster6_l2>;
@@ -513,7 +513,7 @@
cpu27: cpu@30203 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30203>;
enable-method = "psci";
next-level-cache = <&cluster6_l2>;
@@ -522,7 +522,7 @@
cpu28: cpu@30300 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30300>;
enable-method = "psci";
next-level-cache = <&cluster7_l2>;
@@ -531,7 +531,7 @@
cpu29: cpu@30301 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30301>;
enable-method = "psci";
next-level-cache = <&cluster7_l2>;
@@ -540,7 +540,7 @@
cpu30: cpu@30302 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30302>;
enable-method = "psci";
next-level-cache = <&cluster7_l2>;
@@ -549,7 +549,7 @@
cpu31: cpu@30303 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x30303>;
enable-method = "psci";
next-level-cache = <&cluster7_l2>;
@@ -558,7 +558,7 @@
cpu32: cpu@50000 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50000>;
enable-method = "psci";
next-level-cache = <&cluster8_l2>;
@@ -567,7 +567,7 @@
cpu33: cpu@50001 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50001>;
enable-method = "psci";
next-level-cache = <&cluster8_l2>;
@@ -576,7 +576,7 @@
cpu34: cpu@50002 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50002>;
enable-method = "psci";
next-level-cache = <&cluster8_l2>;
@@ -585,7 +585,7 @@
cpu35: cpu@50003 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50003>;
enable-method = "psci";
next-level-cache = <&cluster8_l2>;
@@ -594,7 +594,7 @@
cpu36: cpu@50100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50100>;
enable-method = "psci";
next-level-cache = <&cluster9_l2>;
@@ -603,7 +603,7 @@
cpu37: cpu@50101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50101>;
enable-method = "psci";
next-level-cache = <&cluster9_l2>;
@@ -612,7 +612,7 @@
cpu38: cpu@50102 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50102>;
enable-method = "psci";
next-level-cache = <&cluster9_l2>;
@@ -621,7 +621,7 @@
cpu39: cpu@50103 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50103>;
enable-method = "psci";
next-level-cache = <&cluster9_l2>;
@@ -630,7 +630,7 @@
cpu40: cpu@50200 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50200>;
enable-method = "psci";
next-level-cache = <&cluster10_l2>;
@@ -639,7 +639,7 @@
cpu41: cpu@50201 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50201>;
enable-method = "psci";
next-level-cache = <&cluster10_l2>;
@@ -648,7 +648,7 @@
cpu42: cpu@50202 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50202>;
enable-method = "psci";
next-level-cache = <&cluster10_l2>;
@@ -657,7 +657,7 @@
cpu43: cpu@50203 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50203>;
enable-method = "psci";
next-level-cache = <&cluster10_l2>;
@@ -666,7 +666,7 @@
cpu44: cpu@50300 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50300>;
enable-method = "psci";
next-level-cache = <&cluster11_l2>;
@@ -675,7 +675,7 @@
cpu45: cpu@50301 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50301>;
enable-method = "psci";
next-level-cache = <&cluster11_l2>;
@@ -684,7 +684,7 @@
cpu46: cpu@50302 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50302>;
enable-method = "psci";
next-level-cache = <&cluster11_l2>;
@@ -693,7 +693,7 @@
cpu47: cpu@50303 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x50303>;
enable-method = "psci";
next-level-cache = <&cluster11_l2>;
@@ -702,7 +702,7 @@
cpu48: cpu@70000 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70000>;
enable-method = "psci";
next-level-cache = <&cluster12_l2>;
@@ -711,7 +711,7 @@
cpu49: cpu@70001 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70001>;
enable-method = "psci";
next-level-cache = <&cluster12_l2>;
@@ -720,7 +720,7 @@
cpu50: cpu@70002 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70002>;
enable-method = "psci";
next-level-cache = <&cluster12_l2>;
@@ -729,7 +729,7 @@
cpu51: cpu@70003 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70003>;
enable-method = "psci";
next-level-cache = <&cluster12_l2>;
@@ -738,7 +738,7 @@
cpu52: cpu@70100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70100>;
enable-method = "psci";
next-level-cache = <&cluster13_l2>;
@@ -747,7 +747,7 @@
cpu53: cpu@70101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70101>;
enable-method = "psci";
next-level-cache = <&cluster13_l2>;
@@ -756,7 +756,7 @@
cpu54: cpu@70102 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70102>;
enable-method = "psci";
next-level-cache = <&cluster13_l2>;
@@ -765,7 +765,7 @@
cpu55: cpu@70103 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70103>;
enable-method = "psci";
next-level-cache = <&cluster13_l2>;
@@ -774,7 +774,7 @@
cpu56: cpu@70200 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70200>;
enable-method = "psci";
next-level-cache = <&cluster14_l2>;
@@ -783,7 +783,7 @@
cpu57: cpu@70201 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70201>;
enable-method = "psci";
next-level-cache = <&cluster14_l2>;
@@ -792,7 +792,7 @@
cpu58: cpu@70202 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70202>;
enable-method = "psci";
next-level-cache = <&cluster14_l2>;
@@ -801,7 +801,7 @@
cpu59: cpu@70203 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70203>;
enable-method = "psci";
next-level-cache = <&cluster14_l2>;
@@ -810,7 +810,7 @@
cpu60: cpu@70300 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70300>;
enable-method = "psci";
next-level-cache = <&cluster15_l2>;
@@ -819,7 +819,7 @@
cpu61: cpu@70301 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70301>;
enable-method = "psci";
next-level-cache = <&cluster15_l2>;
@@ -828,7 +828,7 @@
cpu62: cpu@70302 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70302>;
enable-method = "psci";
next-level-cache = <&cluster15_l2>;
@@ -837,7 +837,7 @@
cpu63: cpu@70303 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x70303>;
enable-method = "psci";
next-level-cache = <&cluster15_l2>;
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index 4bde7b6f2b11..c8dc9c20fba3 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -21,27 +21,27 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
next-level-cache = <&L2_0>;
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&L2_0>;
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&L2_0>;
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&L2_0>;
diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
index 16ced1ff1ad3..82c6645b58b7 100644
--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
@@ -21,27 +21,27 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
next-level-cache = <&L2_0>;
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&L2_0>;
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&L2_0>;
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&L2_0>;
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
index 2eff1f927471..caed4334f27d 100644
--- a/arch/arm64/boot/dts/marvell/Makefile
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -2,6 +2,7 @@
# Mvebu SoC Family
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-uDPU.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-clearfog-gt-8k.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-db.dtb
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
index 846003bb480c..6be019e1888e 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
@@ -46,11 +46,16 @@
/* J9 */
&pcie0 {
status = "okay";
+ phys = <&comphy1 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
};
/* J6 */
&sata {
status = "okay";
+ phys = <&comphy2 0>;
+ phy-names = "sata-phy";
};
/* J1 */
@@ -156,6 +161,11 @@
reg = <0>;
label = "cpu";
ethernet = <&eth0>;
+ phy-mode = "rgmii-id";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
port@1 {
@@ -196,6 +206,8 @@
};
&eth0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
phy-mode = "rgmii-id";
status = "okay";
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
new file mode 100644
index 000000000000..bd4aab6092e0
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device tree for the uDPU board.
+ * Based on Marvell Armada 3720 development board (DB-88F3720-DDR3)
+ * Copyright (C) 2016 Marvell
+ * Copyright (C) 2019 Methode Electronics
+ * Copyright (C) 2019 Telus
+ *
+ * Vladimir Vid <vladimir.vid@sartura.hr>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-372x.dtsi"
+
+/ {
+ model = "Methode uDPU Board";
+ compatible = "methode,udpu", "marvell,armada3720";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
+ };
+
+ leds {
+ pinctrl-names = "default";
+ compatible = "gpio-leds";
+
+ power1 {
+ label = "udpu:green:power";
+ gpios = <&gpionb 11 GPIO_ACTIVE_LOW>;
+ };
+
+ power2 {
+ label = "udpu:red:power";
+ gpios = <&gpionb 12 GPIO_ACTIVE_LOW>;
+ };
+
+ network1 {
+ label = "udpu:green:network";
+ gpios = <&gpionb 13 GPIO_ACTIVE_LOW>;
+ };
+
+ network2 {
+ label = "udpu:red:network";
+ gpios = <&gpionb 14 GPIO_ACTIVE_LOW>;
+ };
+
+ alarm1 {
+ label = "udpu:green:alarm";
+ gpios = <&gpionb 15 GPIO_ACTIVE_LOW>;
+ };
+
+ alarm2 {
+ label = "udpu:red:alarm";
+ gpios = <&gpionb 16 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ sfp_eth0: sfp-eth0 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c0>;
+ los-gpio = <&gpiosb 2 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpio = <&gpiosb 3 GPIO_ACTIVE_LOW>;
+ tx-disable-gpio = <&gpiosb 4 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpio = <&gpiosb 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ sfp_eth1: sfp-eth1 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c1>;
+ los-gpio = <&gpiosb 7 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpio = <&gpiosb 8 GPIO_ACTIVE_LOW>;
+ tx-disable-gpio = <&gpiosb 9 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpio = <&gpiosb 10 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&sdhci0 {
+ status = "okay";
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs400-1_8v;
+ marvell,pad-type = "fixed-1-8v";
+ non-removable;
+ no-sd;
+ no-sdio;
+};
+
+&spi0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_quad_pins>;
+
+ m25p80@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <54000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* only bootloader is located on the SPI */
+ partition@0 {
+ label = "uboot";
+ reg = <0 0x400000>;
+ };
+ };
+ };
+};
+
+&i2c0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+};
+
+&i2c1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+
+ lm75@48 {
+ status = "okay";
+ compatible = "lm75";
+ reg = <0x48>;
+ };
+
+ lm75@49 {
+ status = "okay";
+ compatible = "lm75";
+ reg = <0x49>;
+ };
+};
+
+&eth0 {
+ phy-mode = "sgmii";
+ status = "okay";
+ managed = "in-band-status";
+ sfp = <&sfp_eth0>;
+};
+
+&eth1 {
+ phy-mode = "sgmii";
+ status = "okay";
+ managed = "in-band-status";
+ sfp = <&sfp_eth1>;
+};
+
+&usb3 {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-372x.dtsi b/arch/arm64/boot/dts/marvell/armada-372x.dtsi
index 6800945a88ad..5ce55bdbb995 100644
--- a/arch/arm64/boot/dts/marvell/armada-372x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-372x.dtsi
@@ -18,7 +18,7 @@
cpus {
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x1>;
clocks = <&nb_periph_clk 16>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index e05594ea15fb..f43c43168b00 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -42,7 +42,7 @@
#size-cells = <0>;
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0>;
clocks = <&nb_periph_clk 16>;
enable-method = "psci";
@@ -247,6 +247,35 @@
reg = <0x14000 0x60>;
};
+ comphy: phy@18300 {
+ compatible = "marvell,comphy-a3700";
+ reg = <0x18300 0x300>,
+ <0x1F000 0x400>,
+ <0x5C000 0x400>,
+ <0xe0178 0x8>;
+ reg-names = "comphy",
+ "lane1_pcie_gbe",
+ "lane0_usb3_gbe",
+ "lane2_sata_usb3";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ comphy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ comphy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+
+ comphy2: phy@2 {
+ reg = <2>;
+ #phy-cells = <1>;
+ };
+ };
+
pinctrl_sb: pinctrl@18800 {
compatible = "marvell,armada3710-sb-pinctrl",
"syscon", "simple-mfd";
@@ -271,11 +300,25 @@
function = "mii";
};
+ smi_pins: smi-pins {
+ groups = "smi";
+ function = "smi";
+ };
+
sdio_pins: sdio-pins {
groups = "sdio_sb";
function = "sdio";
};
+ pcie_reset_pins: pcie-reset-pins {
+ groups = "pcie1";
+ function = "pcie";
+ };
+
+ pcie_clkreq_pins: pcie-clkreq-pins {
+ groups = "pcie1_clkreq";
+ function = "pcie";
+ };
};
eth0: ethernet@30000 {
@@ -305,18 +348,50 @@
compatible = "marvell,armada3700-xhci",
"generic-xhci";
reg = <0x58000 0x4000>;
+ marvell,usb-misc-reg = <&usb32_syscon>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&sb_periph_clk 12>;
+ phys = <&comphy0 0>, <&usb2_utmi_otg_phy>;
+ phy-names = "usb3-phy", "usb2-utmi-otg-phy";
status = "disabled";
};
+ usb2_utmi_otg_phy: phy@5d000 {
+ compatible = "marvell,a3700-utmi-otg-phy";
+ reg = <0x5d000 0x800>;
+ marvell,usb-misc-reg = <&usb32_syscon>;
+ #phy-cells = <0>;
+ };
+
+ usb32_syscon: system-controller@5d800 {
+ compatible = "marvell,armada-3700-usb2-host-device-misc",
+ "syscon";
+ reg = <0x5d800 0x800>;
+ };
+
usb2: usb@5e000 {
compatible = "marvell,armada-3700-ehci";
- reg = <0x5e000 0x2000>;
+ reg = <0x5e000 0x1000>;
+ marvell,usb-misc-reg = <&usb2_syscon>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb2_utmi_host_phy>;
+ phy-names = "usb2-utmi-host-phy";
status = "disabled";
};
+ usb2_utmi_host_phy: phy@5f000 {
+ compatible = "marvell,a3700-utmi-host-phy";
+ reg = <0x5f000 0x800>;
+ marvell,usb-misc-reg = <&usb2_syscon>;
+ #phy-cells = <0>;
+ };
+
+ usb2_syscon: system-controller@5f800 {
+ compatible = "marvell,armada-3700-usb2-host-misc",
+ "syscon";
+ reg = <0x5f800 0x800>;
+ };
+
xor@60900 {
compatible = "marvell,armada-3700-xor";
reg = <0x60900 0x100>,
@@ -368,8 +443,9 @@
sata: sata@e0000 {
compatible = "marvell,armada-3700-ahci";
- reg = <0xe0000 0x2000>;
+ reg = <0xe0000 0x178>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&nb_periph_clk 1>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
index 412efdb46e7c..d20d84ce7ca8 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
@@ -66,8 +66,6 @@
status = "okay";
spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <10000000>;
@@ -169,8 +167,6 @@
status = "okay";
spi-flash@0 {
- #address-cells = <0x1>;
- #size-cells = <0x1>;
compatible = "jedec,spi-nor";
reg = <0x0>;
spi-max-frequency = <20000000>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index 5b4a9609e31f..2468762283a5 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -351,7 +351,7 @@
reg = <0>;
pinctrl-names = "default";
pinctrl-0 = <&cp0_copper_eth_phy_reset>;
- reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>;
reset-assert-us = <10000>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
index 1bac437369a1..9f4f939ab65f 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
@@ -81,8 +81,6 @@
status = "okay";
spi-flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <10000000>;
@@ -214,8 +212,6 @@
status = "okay";
spi-flash@0 {
- #address-cells = <0x1>;
- #size-cells = <0x1>;
compatible = "jedec,spi-nor";
reg = <0x0>;
spi-max-frequency = <20000000>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
index 29ea7e81ec4c..329f8ceeebea 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
@@ -183,7 +183,7 @@
pinctrl-0 = <&cp0_pcie_pins>;
num-lanes = <4>;
num-viewport = <8>;
- reset-gpio = <&cp0_gpio1 20 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
index d3c0636558ff..861fd21922c4 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
@@ -17,13 +17,13 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x000>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x001>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
index 01ea662afba8..2baafe12ebd4 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
@@ -17,25 +17,25 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x000>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x001>;
enable-method = "psci";
};
cpu2: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x100>;
enable-method = "psci";
};
cpu3: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x101>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 7d94c1fa592a..91dad7e4ee59 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -28,6 +28,23 @@
method = "smc";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * This area matches the mapping done with a
+ * mainline U-Boot, and should be updated by the
+ * bootloader.
+ */
+
+ psci-area@4000000 {
+ reg = <0x0 0x4000000 0x0 0x200000>;
+ no-map;
+ };
+ };
+
ap806 {
#address-cells = <2>;
#size-cells = <2>;
@@ -266,6 +283,8 @@
ap_thermal: thermal-sensor@80 {
compatible = "marvell,armada-ap806-thermal";
reg = <0x80 0x10>;
+ interrupt-parent = <&sei>;
+ interrupts = <18>;
#thermal-sensor-cells = <1>;
};
};
@@ -276,16 +295,26 @@
* The thermal IP features one internal sensor plus, if applicable, one
* remote channel wired to one sensor per CPU.
*
+ * Only one thermal zone per AP/CP may trigger interrupts at a time, the
+ * first one that will have a critical trip point will be chosen.
+ *
* The cooling maps are always empty as there are no cooling devices.
*/
thermal-zones {
ap_thermal_ic: ap-thermal-ic {
- polling-delay-passive = <1000>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>; /* Interrupt driven */
+ polling-delay = <0>; /* Interrupt driven */
thermal-sensors = <&ap_thermal 0>;
- trips { };
+ trips {
+ ap_crit: ap-crit {
+ temperature = <100000>; /* mC degrees */
+ hysteresis = <2000>; /* mC degrees */
+ type = "critical";
+ };
+ };
+
cooling-maps { };
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi
index b788cb63caf2..d1a7143ef3d4 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi
@@ -15,49 +15,49 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x000>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x001>;
enable-method = "psci";
};
cpu2: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x100>;
enable-method = "psci";
};
cpu3: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x101>;
enable-method = "psci";
};
cpu4: cpu@200 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x200>;
enable-method = "psci";
};
cpu5: cpu@201 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x201>;
enable-method = "psci";
};
cpu6: cpu@300 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x300>;
enable-method = "psci";
};
cpu7: cpu@301 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x301>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
index b9d9f31e3ba1..4d6e4a097f72 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
@@ -28,12 +28,19 @@
*/
thermal-zones {
CP110_LABEL(thermal_ic): CP110_NODE_NAME(thermal-ic) {
- polling-delay-passive = <1000>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>; /* Interrupt driven */
+ polling-delay = <0>; /* Interrupt driven */
thermal-sensors = <&CP110_LABEL(thermal) 0>;
- trips { };
+ trips {
+ CP110_LABEL(crit): crit {
+ temperature = <100000>; /* mC degrees */
+ hysteresis = <2000>; /* mC degrees */
+ type = "critical";
+ };
+ };
+
cooling-maps { };
};
};
@@ -259,6 +266,8 @@
CP110_LABEL(thermal): thermal-sensor@70 {
compatible = "marvell,armada-cp110-thermal";
reg = <0x70 0x10>;
+ interrupts-extended =
+ <&CP110_LABEL(icu_sei) 116 IRQ_TYPE_LEVEL_HIGH>;
#thermal-sensor-cells = <1>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 4ce9d6ca0bf7..2b91daf5c1a6 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -6,6 +6,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "mt2712e.dtsi"
/ {
@@ -39,6 +40,53 @@
regulator-max-microvolt = <1000000>;
};
+ extcon_usb: extcon_iddig {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pio 12 GPIO_ACTIVE_HIGH>;
+ };
+
+ extcon_usb1: extcon_iddig1 {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>;
+ };
+
+ usb_p0_vbus: regulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "p0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 13 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p1_vbus: regulator@3 {
+ compatible = "regulator-fixed";
+ regulator-name = "p1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 15 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p2_vbus: regulator@4 {
+ compatible = "regulator-fixed";
+ regulator-name = "p2_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_p3_vbus: regulator@5 {
+ compatible = "regulator-fixed";
+ regulator-name = "p3_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 17 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
};
&auxadc {
@@ -57,7 +105,57 @@
proc-supply = <&cpus_fixed_vproc1>;
};
+&pio {
+ usb0_id_pins_float: usb0_iddig {
+ pins_iddig {
+ pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
+ bias-pull-up;
+ };
+ };
+
+ usb1_id_pins_float: usb1_iddig {
+ pins_iddig {
+ pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
+ bias-pull-up;
+ };
+ };
+};
+
+&ssusb {
+ vbus-supply = <&usb_p0_vbus>;
+ extcon = <&extcon_usb>;
+ dr_mode = "otg";
+ wakeup-source;
+ mediatek,u3p-dis-msk = <0x1>;
+ //enable-manual-drd;
+ //maximum-speed = "full-speed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_pins_float>;
+ status = "okay";
+};
+
+&ssusb1 {
+ vbus-supply = <&usb_p1_vbus>;
+ extcon = <&extcon_usb1>;
+ dr_mode = "otg";
+ //mediatek,u3p-dis-msk = <0x1>;
+ enable-manual-drd;
+ wakeup-source;
+ //maximum-speed = "full-speed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_id_pins_float>;
+ status = "okay";
+};
+
&uart0 {
status = "okay";
};
+&usb_host0 {
+ vbus-supply = <&usb_p2_vbus>;
+ status = "okay";
+};
+
+&usb_host1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index ee627a7c7b45..976d92a94738 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -8,6 +8,8 @@
#include <dt-bindings/clock/mt2712-clk.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/memory/mt2712-larb-port.h>
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/power/mt2712-power.h>
#include "mt2712-pinfunc.h"
@@ -312,12 +314,33 @@
status = "disabled";
};
+ iommu0: iommu@10205000 {
+ compatible = "mediatek,mt2712-m4u";
+ reg = <0 0x10205000 0 0x1000>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_M4U>;
+ clock-names = "bclk";
+ mediatek,larbs = <&larb0 &larb1 &larb2
+ &larb3 &larb6>;
+ #iommu-cells = <1>;
+ };
+
apmixedsys: syscon@10209000 {
compatible = "mediatek,mt2712-apmixedsys", "syscon";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
+ iommu1: iommu@1020a000 {
+ compatible = "mediatek,mt2712-m4u";
+ reg = <0 0x1020a000 0 0x1000>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_M4U>;
+ clock-names = "bclk";
+ mediatek,larbs = <&larb4 &larb5 &larb7>;
+ #iommu-cells = <1>;
+ };
+
mcucfg: syscon@10220000 {
compatible = "mediatek,mt2712-mcucfg", "syscon";
reg = <0 0x10220000 0 0x1000>;
@@ -395,6 +418,210 @@
status = "disabled";
};
+ pwm: pwm@11006000 {
+ compatible = "mediatek,mt2712-pwm";
+ reg = <0 0x11006000 0 0x1000>;
+ #pwm-cells = <2>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_PWM_SEL>,
+ <&pericfg CLK_PERI_PWM>,
+ <&pericfg CLK_PERI_PWM0>,
+ <&pericfg CLK_PERI_PWM1>,
+ <&pericfg CLK_PERI_PWM2>,
+ <&pericfg CLK_PERI_PWM3>,
+ <&pericfg CLK_PERI_PWM4>,
+ <&pericfg CLK_PERI_PWM5>,
+ <&pericfg CLK_PERI_PWM6>,
+ <&pericfg CLK_PERI_PWM7>;
+ clock-names = "top",
+ "main",
+ "pwm1",
+ "pwm2",
+ "pwm3",
+ "pwm4",
+ "pwm5",
+ "pwm6",
+ "pwm7",
+ "pwm8";
+ status = "disabled";
+ };
+
+ i2c0: i2c@11007000 {
+ compatible = "mediatek,mt2712-i2c";
+ reg = <0 0x11007000 0 0x90>,
+ <0 0x11000180 0 0x80>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <4>;
+ clocks = <&pericfg CLK_PERI_I2C0>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@11008000 {
+ compatible = "mediatek,mt2712-i2c";
+ reg = <0 0x11008000 0 0x90>,
+ <0 0x11000200 0 0x80>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <4>;
+ clocks = <&pericfg CLK_PERI_I2C1>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@11009000 {
+ compatible = "mediatek,mt2712-i2c";
+ reg = <0 0x11009000 0 0x90>,
+ <0 0x11000280 0 0x80>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <4>;
+ clocks = <&pericfg CLK_PERI_I2C2>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi0: spi@1100a000 {
+ compatible = "mediatek,mt2712-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x1100a000 0 0x100>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&pericfg CLK_PERI_SPI0>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk";
+ status = "disabled";
+ };
+
+ nandc: nfi@1100e000 {
+ compatible = "mediatek,mt2712-nfc";
+ reg = <0 0x1100e000 0 0x1000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_NFI2X_EN>, <&pericfg CLK_PERI_NFI>;
+ clock-names = "nfi_clk", "pad_clk";
+ ecc-engine = <&bch>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ bch: ecc@1100f000 {
+ compatible = "mediatek,mt2712-ecc";
+ reg = <0 0x1100f000 0 0x1000>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_NFI1X_CK_EN>;
+ clock-names = "nfiecc_clk";
+ status = "disabled";
+ };
+
+ i2c3: i2c@11010000 {
+ compatible = "mediatek,mt2712-i2c";
+ reg = <0 0x11010000 0 0x90>,
+ <0 0x11000300 0 0x80>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <4>;
+ clocks = <&pericfg CLK_PERI_I2C3>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@11011000 {
+ compatible = "mediatek,mt2712-i2c";
+ reg = <0 0x11011000 0 0x90>,
+ <0 0x11000380 0 0x80>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <4>;
+ clocks = <&pericfg CLK_PERI_I2C4>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@11013000 {
+ compatible = "mediatek,mt2712-i2c";
+ reg = <0 0x11013000 0 0x90>,
+ <0 0x11000100 0 0x80>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <4>;
+ clocks = <&pericfg CLK_PERI_I2C5>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@11015000 {
+ compatible = "mediatek,mt2712-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x11015000 0 0x100>;
+ interrupts = <GIC_SPI 284 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&pericfg CLK_PERI_SPI2>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk";
+ status = "disabled";
+ };
+
+ spi3: spi@11016000 {
+ compatible = "mediatek,mt2712-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x11016000 0 0x100>;
+ interrupts = <GIC_SPI 285 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&pericfg CLK_PERI_SPI3>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk";
+ status = "disabled";
+ };
+
+ spi4: spi@10012000 {
+ compatible = "mediatek,mt2712-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x10012000 0 0x100>;
+ interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&infracfg CLK_INFRA_AO_SPI0>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk";
+ status = "disabled";
+ };
+
+ spi5: spi@11018000 {
+ compatible = "mediatek,mt2712-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x11018000 0 0x100>;
+ interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&pericfg CLK_PERI_SPI5>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk";
+ status = "disabled";
+ };
+
uart4: serial@11019000 {
compatible = "mediatek,mt2712-uart",
"mediatek,mt6577-uart";
@@ -405,6 +632,228 @@
status = "disabled";
};
+ mmc0: mmc@11230000 {
+ compatible = "mediatek,mt2712-mmc";
+ reg = <0 0x11230000 0 0x1000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_MSDC30_0>,
+ <&pericfg CLK_PERI_MSDC50_0_HCLK_EN>,
+ <&pericfg CLK_PERI_MSDC30_0_QTR_EN>,
+ <&pericfg CLK_PERI_MSDC50_0_EN>;
+ clock-names = "source", "hclk", "bus_clk", "source_cg";
+ status = "disabled";
+ };
+
+ mmc1: mmc@11240000 {
+ compatible = "mediatek,mt2712-mmc";
+ reg = <0 0x11240000 0 0x1000>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_MSDC30_1>,
+ <&topckgen CLK_TOP_AXI_SEL>,
+ <&pericfg CLK_PERI_MSDC30_1_EN>;
+ clock-names = "source", "hclk", "source_cg";
+ status = "disabled";
+ };
+
+ mmc2: mmc@11250000 {
+ compatible = "mediatek,mt2712-mmc";
+ reg = <0 0x11250000 0 0x1000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_MSDC30_2>,
+ <&topckgen CLK_TOP_AXI_SEL>,
+ <&pericfg CLK_PERI_MSDC30_2_EN>;
+ clock-names = "source", "hclk", "source_cg";
+ status = "disabled";
+ };
+
+ ssusb: usb@11271000 {
+ compatible = "mediatek,mt2712-mtu3", "mediatek,mtu3";
+ reg = <0 0x11271000 0 0x3000>,
+ <0 0x11280700 0 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&u2port0 PHY_TYPE_USB2>,
+ <&u2port1 PHY_TYPE_USB2>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ mediatek,syscon-wakeup = <&pericfg 0x510 2>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb_host0: xhci@11270000 {
+ compatible = "mediatek,mt2712-xhci",
+ "mediatek,mtk-xhci";
+ reg = <0 0x11270000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_USB>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>;
+ clock-names = "sys_ck", "ref_ck";
+ status = "disabled";
+ };
+ };
+
+ u3phy0: usb-phy@11290000 {
+ compatible = "mediatek,mt2712-u3phy";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "okay";
+
+ u2port0: usb-phy@11290000 {
+ reg = <0 0x11290000 0 0x700>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+
+ u2port1: usb-phy@11298000 {
+ reg = <0 0x11298000 0 0x700>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+
+ u3port0: usb-phy@11298700 {
+ reg = <0 0x11298700 0 0x900>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+ };
+
+ ssusb1: usb@112c1000 {
+ compatible = "mediatek,mt2712-mtu3", "mediatek,mtu3";
+ reg = <0 0x112c1000 0 0x3000>,
+ <0 0x112d0700 0 0x0100>;
+ reg-names = "mac", "ippc";
+ interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&u2port2 PHY_TYPE_USB2>,
+ <&u2port3 PHY_TYPE_USB2>,
+ <&u3port1 PHY_TYPE_USB3>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_USB2>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>;
+ clock-names = "sys_ck";
+ mediatek,syscon-wakeup = <&pericfg 0x514 2>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb_host1: xhci@112c0000 {
+ compatible = "mediatek,mt2712-xhci",
+ "mediatek,mtk-xhci";
+ reg = <0 0x112c0000 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_USB2>;
+ clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>;
+ clock-names = "sys_ck", "ref_ck";
+ status = "disabled";
+ };
+ };
+
+ u3phy1: usb-phy@112e0000 {
+ compatible = "mediatek,mt2712-u3phy";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "okay";
+
+ u2port2: usb-phy@112e0000 {
+ reg = <0 0x112e0000 0 0x700>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+
+ u2port3: usb-phy@112e8000 {
+ reg = <0 0x112e8000 0 0x700>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+
+ u3port1: usb-phy@112e8700 {
+ reg = <0 0x112e8700 0 0x900>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+ };
+
+ pcie: pcie@11700000 {
+ compatible = "mediatek,mt2712-pcie";
+ device_type = "pci";
+ reg = <0 0x11700000 0 0x1000>,
+ <0 0x112ff000 0 0x1000>;
+ reg-names = "port0", "port1";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>,
+ <&topckgen CLK_TOP_PE2_MAC_P1_SEL>,
+ <&pericfg CLK_PERI_PCIE0>,
+ <&pericfg CLK_PERI_PCIE1>;
+ clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1";
+ phys = <&u3port0 PHY_TYPE_PCIE>, <&u3port1 PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy0", "pcie-phy1";
+ bus-range = <0x00 0xff>;
+ ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
+
+ pcie0: pcie@0,0 {
+ device_type = "pci";
+ status = "disabled";
+ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ num-lanes = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+ <0 0 0 3 &pcie_intc0 2>,
+ <0 0 0 4 &pcie_intc0 3>;
+ pcie_intc0: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ pcie1: pcie@1,0 {
+ device_type = "pci";
+ status = "disabled";
+ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ num-lanes = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+ <0 0 0 2 &pcie_intc1 1>,
+ <0 0 0 3 &pcie_intc1 2>,
+ <0 0 0 4 &pcie_intc1 3>;
+ pcie_intc1: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+ };
+
mfgcfg: syscon@13000000 {
compatible = "mediatek,mt2712-mfgcfg", "syscon";
reg = <0 0x13000000 0 0x1000>;
@@ -417,12 +866,85 @@
#clock-cells = <1>;
};
+ larb0: larb@14021000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x14021000 0 0x1000>;
+ mediatek,smi = <&smi_common0>;
+ mediatek,larb-id = <0>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_SMI_LARB0>,
+ <&mmsys CLK_MM_SMI_LARB0>;
+ clock-names = "apb", "smi";
+ };
+
+ smi_common0: smi@14022000 {
+ compatible = "mediatek,mt2712-smi-common";
+ reg = <0 0x14022000 0 0x1000>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_SMI_COMMON>,
+ <&mmsys CLK_MM_SMI_COMMON>;
+ clock-names = "apb", "smi";
+ };
+
+ larb4: larb@14027000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x14027000 0 0x1000>;
+ mediatek,smi = <&smi_common1>;
+ mediatek,larb-id = <4>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_SMI_LARB4>,
+ <&mmsys CLK_MM_SMI_LARB4>;
+ clock-names = "apb", "smi";
+ };
+
+ larb5: larb@14030000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x14030000 0 0x1000>;
+ mediatek,smi = <&smi_common1>;
+ mediatek,larb-id = <5>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_SMI_LARB5>,
+ <&mmsys CLK_MM_SMI_LARB5>;
+ clock-names = "apb", "smi";
+ };
+
+ smi_common1: smi@14031000 {
+ compatible = "mediatek,mt2712-smi-common";
+ reg = <0 0x14031000 0 0x1000>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_SMI_COMMON1>,
+ <&mmsys CLK_MM_SMI_COMMON1>;
+ clock-names = "apb", "smi";
+ };
+
+ larb7: larb@14032000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x14032000 0 0x1000>;
+ mediatek,smi = <&smi_common1>;
+ mediatek,larb-id = <7>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_SMI_LARB7>,
+ <&mmsys CLK_MM_SMI_LARB7>;
+ clock-names = "apb", "smi";
+ };
+
imgsys: syscon@15000000 {
compatible = "mediatek,mt2712-imgsys", "syscon";
reg = <0 0x15000000 0 0x1000>;
#clock-cells = <1>;
};
+ larb2: larb@15001000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x15001000 0 0x1000>;
+ mediatek,smi = <&smi_common0>;
+ mediatek,larb-id = <2>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_ISP>;
+ clocks = <&imgsys CLK_IMG_SMI_LARB2>,
+ <&imgsys CLK_IMG_SMI_LARB2>;
+ clock-names = "apb", "smi";
+ };
+
bdpsys: syscon@15010000 {
compatible = "mediatek,mt2712-bdpsys", "syscon";
reg = <0 0x15010000 0 0x1000>;
@@ -435,12 +957,45 @@
#clock-cells = <1>;
};
+ larb1: larb@16010000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x16010000 0 0x1000>;
+ mediatek,smi = <&smi_common0>;
+ mediatek,larb-id = <1>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_VDEC>;
+ clocks = <&vdecsys CLK_VDEC_CKEN>,
+ <&vdecsys CLK_VDEC_LARB1_CKEN>;
+ clock-names = "apb", "smi";
+ };
+
vencsys: syscon@18000000 {
compatible = "mediatek,mt2712-vencsys", "syscon";
reg = <0 0x18000000 0 0x1000>;
#clock-cells = <1>;
};
+ larb3: larb@18001000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x18001000 0 0x1000>;
+ mediatek,smi = <&smi_common0>;
+ mediatek,larb-id = <3>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_VENC>;
+ clocks = <&vencsys CLK_VENC_SMI_COMMON_CON>,
+ <&vencsys CLK_VENC_VENC>;
+ clock-names = "apb", "smi";
+ };
+
+ larb6: larb@18002000 {
+ compatible = "mediatek,mt2712-smi-larb";
+ reg = <0 0x18002000 0 0x1000>;
+ mediatek,smi = <&smi_common0>;
+ mediatek,larb-id = <6>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_VENC>;
+ clocks = <&vencsys CLK_VENC_SMI_COMMON_CON>,
+ <&vencsys CLK_VENC_VENC>;
+ clock-names = "apb", "smi";
+ };
+
jpgdecsys: syscon@19000000 {
compatible = "mediatek,mt2712-jpgdecsys", "syscon";
reg = <0 0x19000000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt6797-evb.dts b/arch/arm64/boot/dts/mediatek/mt6797-evb.dts
index c79109c65409..237e869a5fa1 100644
--- a/arch/arm64/boot/dts/mediatek/mt6797-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt6797-evb.dts
@@ -33,4 +33,6 @@
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts b/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts
index 742938a1a548..13939d55b85b 100644
--- a/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts
+++ b/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts
@@ -30,4 +30,6 @@
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins_a>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
index 4beaa71107d7..2b2a69c7567f 100644
--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/clock/mt6797-clk.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/mt6797-pinfunc.h>
/ {
compatible = "mediatek,mt6797";
@@ -129,6 +130,33 @@
#clock-cells = <1>;
};
+ pio: pinctrl@10005000 {
+ compatible = "mediatek,mt6797-pinctrl";
+ reg = <0 0x10005000 0 0x1000>,
+ <0 0x10002000 0 0x400>,
+ <0 0x10002400 0 0x400>,
+ <0 0x10002800 0 0x400>,
+ <0 0x10002C00 0 0x400>;
+ reg-names = "gpio", "iocfgl", "iocfgb",
+ "iocfgr", "iocfgt";
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ uart0_pins_a: uart0 {
+ pins0 {
+ pinmux = <MT6797_GPIO234__FUNC_UTXD0>,
+ <MT6797_GPIO235__FUNC_URXD0>;
+ };
+ };
+
+ uart1_pins_a: uart1 {
+ pins1 {
+ pinmux = <MT6797_GPIO232__FUNC_URXD1>,
+ <MT6797_GPIO233__FUNC_UTXD1>;
+ };
+ };
+ };
+
scpsys: scpsys@10006000 {
compatible = "mediatek,mt6797-scpsys";
#power-domain-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 8fc4aa77f012..4b1f5ae710eb 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -70,7 +70,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
clocks = <&infracfg CLK_INFRA_MUX1_SEL>,
<&apmixedsys CLK_APMIXED_MAIN_CORE_EN>;
@@ -84,7 +84,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
clocks = <&infracfg CLK_INFRA_MUX1_SEL>,
<&apmixedsys CLK_APMIXED_MAIN_CORE_EN>;
@@ -170,17 +170,20 @@
cooling-maps {
map0 {
trip = <&cpu_passive>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
map1 {
trip = <&cpu_active>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
map2 {
trip = <&cpu_hot>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 412ffd4d426b..c3c360161c5d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -276,12 +276,14 @@
cooling-maps {
map@0 {
trip = <&target>;
- cooling-device = <&cpu0 0 0>;
+ cooling-device = <&cpu0 0 0>,
+ <&cpu1 0 0>;
contribution = <3072>;
};
map@1 {
trip = <&target>;
- cooling-device = <&cpu2 0 0>;
+ cooling-device = <&cpu2 0 0>,
+ <&cpu3 0 0>;
contribution = <1024>;
};
};
diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile
index 7c13d7df484e..6b8ab5568481 100644
--- a/arch/arm64/boot/dts/nvidia/Makefile
+++ b/arch/arm64/boot/dts/nvidia/Makefile
@@ -4,5 +4,6 @@ dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-0000.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-2180.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2571.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-smaug.dtb
+dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2894-0050-a08.dtb
dtb-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-p2771-0000.dtb
dtb-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-p2972-0000.dtb
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index fa5a7c4bc807..631a7f77c386 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -1082,13 +1082,13 @@
cpu@0 {
device_type = "cpu";
- compatible = "nvidia,denver", "arm,armv8";
+ compatible = "nvidia,denver";
reg = <0>;
};
cpu@1 {
device_type = "cpu";
- compatible = "nvidia,denver", "arm,armv8";
+ compatible = "nvidia,denver";
reg = <1>;
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 65487eee2ce6..31457f32e4d0 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -26,7 +26,8 @@
reg = <0x74>;
interrupt-parent = <&gpio>;
- interrupts = <TEGRA_MAIN_GPIO(Y, 0) GPIO_ACTIVE_LOW>;
+ interrupts = <TEGRA186_MAIN_GPIO(Y, 0)
+ GPIO_ACTIVE_LOW>;
#gpio-cells = <2>;
gpio-controller;
@@ -37,7 +38,8 @@
reg = <0x77>;
interrupt-parent = <&gpio>;
- interrupts = <TEGRA_MAIN_GPIO(Y, 6) GPIO_ACTIVE_LOW>;
+ interrupts = <TEGRA186_MAIN_GPIO(Y, 6)
+ GPIO_ACTIVE_LOW>;
#gpio-cells = <2>;
gpio-controller;
@@ -52,6 +54,7 @@
};
hda@3510000 {
+ nvidia,model = "jetson-tx2-hda";
status = "okay";
};
@@ -108,7 +111,8 @@
hdmi-supply = <&vdd_hdmi>;
nvidia,ddc-i2c-bus = <&ddc>;
- nvidia,hpd-gpio = <&gpio TEGRA_MAIN_GPIO(P, 1) GPIO_ACTIVE_LOW>;
+ nvidia,hpd-gpio = <&gpio TEGRA186_MAIN_GPIO(P, 1)
+ GPIO_ACTIVE_LOW>;
};
dpaux@155c0000 {
@@ -121,7 +125,7 @@
power {
label = "Power";
- gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 0)
+ gpios = <&gpio_aon TEGRA186_AON_GPIO(FF, 0)
GPIO_ACTIVE_LOW>;
linux,input-type = <EV_KEY>;
linux,code = <KEY_POWER>;
@@ -132,7 +136,7 @@
volume-up {
label = "Volume Up";
- gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 1)
+ gpios = <&gpio_aon TEGRA186_AON_GPIO(FF, 1)
GPIO_ACTIVE_LOW>;
linux,input-type = <EV_KEY>;
linux,code = <KEY_VOLUMEUP>;
@@ -141,7 +145,7 @@
volume-down {
label = "Volume Down";
- gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 2)
+ gpios = <&gpio_aon TEGRA186_AON_GPIO(FF, 2)
GPIO_ACTIVE_LOW>;
linux,input-type = <EV_KEY>;
linux,code = <KEY_VOLUMEDOWN>;
@@ -158,7 +162,8 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpio = <&gpio TEGRA_MAIN_GPIO(P, 6) GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio TEGRA186_MAIN_GPIO(P, 6)
+ GPIO_ACTIVE_HIGH>;
enable-active-high;
vin-supply = <&vdd_3v3_sys>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index b539561e7877..89a2da46efae 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -34,7 +34,8 @@
ethernet@2490000 {
status = "okay";
- phy-reset-gpios = <&gpio TEGRA_MAIN_GPIO(M, 4) GPIO_ACTIVE_LOW>;
+ phy-reset-gpios = <&gpio TEGRA186_MAIN_GPIO(M, 4)
+ GPIO_ACTIVE_LOW>;
phy-handle = <&phy>;
phy-mode = "rgmii";
@@ -46,7 +47,8 @@
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0x0>;
interrupt-parent = <&gpio>;
- interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <TEGRA186_MAIN_GPIO(M, 5)
+ IRQ_TYPE_LEVEL_LOW>;
};
};
};
@@ -91,8 +93,8 @@
/* SDMMC1 (SD/MMC) */
sdhci@3400000 {
- cd-gpios = <&gpio TEGRA_MAIN_GPIO(P, 5) GPIO_ACTIVE_LOW>;
- wp-gpios = <&gpio TEGRA_MAIN_GPIO(P, 4) GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&gpio TEGRA186_MAIN_GPIO(P, 5) GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio TEGRA186_MAIN_GPIO(P, 4) GPIO_ACTIVE_HIGH>;
vqmmc-supply = <&vddio_sdmmc1>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 22815db4a3ed..bb2045be8814 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -136,7 +136,7 @@
};
gen1_i2c: i2c@3160000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x03160000 0x0 0x10000>;
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -149,7 +149,7 @@
};
cam_i2c: i2c@3180000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x03180000 0x0 0x10000>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -163,7 +163,7 @@
/* shares pads with dpaux1 */
dp_aux_ch1_i2c: i2c@3190000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x03190000 0x0 0x10000>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -177,7 +177,7 @@
/* controlled by BPMP, should not be enabled */
pwr_i2c: i2c@31a0000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x031a0000 0x0 0x10000>;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -191,7 +191,7 @@
/* shares pads with dpaux0 */
dp_aux_ch0_i2c: i2c@31b0000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x031b0000 0x0 0x10000>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -204,7 +204,7 @@
};
gen7_i2c: i2c@31c0000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x031c0000 0x0 0x10000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -217,7 +217,7 @@
};
gen9_i2c: i2c@31e0000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x031e0000 0x0 0x10000>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -315,10 +315,13 @@
nvidia,pad-autocal-pull-down-offset-hs400 = <0x05>;
nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x0a>;
nvidia,pad-autocal-pull-down-offset-1v8-timeout = <0x0a>;
+ nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x0a>;
+ nvidia,pad-autocal-pull-down-offset-3v3-timeout = <0x0a>;
nvidia,default-tap = <0x5>;
nvidia,default-trim = <0x9>;
nvidia,dqs-trim = <63>;
mmc-hs400-1_8v;
+ supports-cqe;
status = "disabled";
};
@@ -375,7 +378,7 @@
};
gen2_i2c: i2c@c240000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x0c240000 0x0 0x10000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -388,7 +391,7 @@
};
gen8_i2c: i2c@c250000 {
- compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c";
reg = <0x0 0x0c250000 0x0 0x10000>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -982,37 +985,37 @@
#size-cells = <0>;
cpu@0 {
- compatible = "nvidia,tegra186-denver", "arm,armv8";
+ compatible = "nvidia,tegra186-denver";
device_type = "cpu";
reg = <0x000>;
};
cpu@1 {
- compatible = "nvidia,tegra186-denver", "arm,armv8";
+ compatible = "nvidia,tegra186-denver";
device_type = "cpu";
reg = <0x001>;
};
cpu@2 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x100>;
};
cpu@3 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x101>;
};
cpu@4 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x102>;
};
cpu@5 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
device_type = "cpu";
reg = <0x103>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index 22a1c267aed9..246c1ebbd055 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -10,7 +10,7 @@
aliases {
sdhci0 = "/cbb/sdhci@3460000";
sdhci1 = "/cbb/sdhci@3400000";
- serial0 = &uartb;
+ serial0 = &tcu;
i2c0 = "/bpmp/i2c";
i2c1 = "/cbb/i2c@3160000";
i2c2 = "/cbb/i2c@c240000";
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index adf351010ff5..b62e96945846 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -11,20 +11,21 @@
compatible = "nvidia,p2972-0000", "nvidia,tegra194";
cbb {
- /* SDMMC1 (SD/MMC) */
- sdhci@3400000 {
+ ddc: i2c@31c0000 {
status = "okay";
};
- ddc: i2c@31c0000 {
+ /* SDMMC1 (SD/MMC) */
+ sdhci@3400000 {
status = "okay";
};
- pwm@c340000 {
+ hda@3510000 {
+ nvidia,model = "jetson-xavier-hda";
status = "okay";
};
- hda@3510000 {
+ pwm@c340000 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 6dfa1ca0b851..c77ca211fa8f 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -303,6 +303,17 @@
clock-names = "sdhci";
resets = <&bpmp TEGRA194_RESET_SDMMC1>;
reset-names = "sdhci";
+ nvidia,pad-autocal-pull-up-offset-3v3-timeout =
+ <0x07>;
+ nvidia,pad-autocal-pull-down-offset-3v3-timeout =
+ <0x07>;
+ nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x06>;
+ nvidia,pad-autocal-pull-down-offset-1v8-timeout =
+ <0x07>;
+ nvidia,pad-autocal-pull-up-offset-sdr104 = <0x00>;
+ nvidia,pad-autocal-pull-down-offset-sdr104 = <0x00>;
+ nvidia,default-tap = <0x9>;
+ nvidia,default-trim = <0x5>;
status = "disabled";
};
@@ -314,6 +325,18 @@
clock-names = "sdhci";
resets = <&bpmp TEGRA194_RESET_SDMMC3>;
reset-names = "sdhci";
+ nvidia,pad-autocal-pull-up-offset-1v8 = <0x00>;
+ nvidia,pad-autocal-pull-down-offset-1v8 = <0x7a>;
+ nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x07>;
+ nvidia,pad-autocal-pull-down-offset-3v3-timeout =
+ <0x07>;
+ nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x06>;
+ nvidia,pad-autocal-pull-down-offset-1v8-timeout =
+ <0x07>;
+ nvidia,pad-autocal-pull-up-offset-sdr104 = <0x00>;
+ nvidia,pad-autocal-pull-down-offset-sdr104 = <0x00>;
+ nvidia,default-tap = <0x9>;
+ nvidia,default-trim = <0x5>;
status = "disabled";
};
@@ -323,8 +346,24 @@
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&bpmp TEGRA194_CLK_SDMMC4>;
clock-names = "sdhci";
+ assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
+ <&bpmp TEGRA194_CLK_PLLC4>;
+ assigned-clock-parents =
+ <&bpmp TEGRA194_CLK_PLLC4>;
resets = <&bpmp TEGRA194_RESET_SDMMC4>;
reset-names = "sdhci";
+ nvidia,pad-autocal-pull-up-offset-hs400 = <0x00>;
+ nvidia,pad-autocal-pull-down-offset-hs400 = <0x00>;
+ nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x0a>;
+ nvidia,pad-autocal-pull-down-offset-1v8-timeout =
+ <0x0a>;
+ nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x0a>;
+ nvidia,pad-autocal-pull-down-offset-3v3-timeout =
+ <0x0a>;
+ nvidia,default-tap = <0x8>;
+ nvidia,default-trim = <0x14>;
+ nvidia,dqs-trim = <40>;
+ supports-cqe;
status = "disabled";
};
@@ -367,10 +406,35 @@
};
hsp_top0: hsp@3c00000 {
- compatible = "nvidia,tegra186-hsp";
+ compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
reg = <0x03c00000 0xa0000>;
- interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "doorbell";
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "doorbell", "shared0", "shared1", "shared2",
+ "shared3", "shared4", "shared5", "shared6",
+ "shared7";
+ #mbox-cells = <2>;
+ };
+
+ hsp_aon: hsp@c150000 {
+ compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
+ reg = <0x0c150000 0xa0000>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ /*
+ * Shared interrupt 0 is routed only to AON/SPE, so
+ * we only have 4 shared interrupts for the CCPLEX.
+ */
+ interrupt-names = "shared1", "shared2", "shared3", "shared4";
#mbox-cells = <2>;
};
@@ -871,56 +935,56 @@
#size-cells = <0>;
cpu@0 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x10000>;
enable-method = "psci";
};
cpu@1 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x10001>;
enable-method = "psci";
};
cpu@2 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x100>;
enable-method = "psci";
};
cpu@3 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x101>;
enable-method = "psci";
};
cpu@4 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x200>;
enable-method = "psci";
};
cpu@5 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x201>;
enable-method = "psci";
};
cpu@6 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x10300>;
enable-method = "psci";
};
cpu@7 {
- compatible = "nvidia,tegra194-carmel", "arm,armv8";
+ compatible = "nvidia,tegra194-carmel";
device_type = "cpu";
reg = <0x10301>;
enable-method = "psci";
@@ -933,6 +997,13 @@
method = "smc";
};
+ tcu: tcu {
+ compatible = "nvidia,tegra194-tcu";
+ mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_SM TEGRA_HSP_SM_RX(0)>,
+ <&hsp_aon TEGRA_HSP_MBOX_TYPE_SM TEGRA_HSP_SM_TX(1)>;
+ mbox-names = "rx", "tx";
+ };
+
thermal-zones {
cpu {
thermal-sensors = <&{/bpmp/thermal}
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 37e3c46e753f..9fad0d27278e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -78,4 +78,25 @@
};
};
};
+
+ clock@70110000 {
+ status = "okay";
+
+ nvidia,cf = <6>;
+ nvidia,ci = <0>;
+ nvidia,cg = <2>;
+ nvidia,droop-ctrl = <0x00000f00>;
+ nvidia,force-mode = <1>;
+ nvidia,sample-rate = <25000>;
+
+ nvidia,pwm-min-microvolts = <708000>;
+ nvidia,pwm-period-nanoseconds = <2500>; /* 2.5us */
+ nvidia,pwm-to-pmic;
+ nvidia,pwm-tristate-microvolts = <1000000>;
+ nvidia,pwm-voltage-step-microvolts = <19200>;
+
+ pinctrl-names = "dvfs_pwm_enable", "dvfs_pwm_disable";
+ pinctrl-0 = <&dvfs_pwm_active_state>;
+ pinctrl-1 = <&dvfs_pwm_inactive_state>;
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index a96e6ee70c21..95e890d8a119 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1278,6 +1278,20 @@
nvidia,open-drain = <TEGRA_PIN_DISABLE>;
};
};
+
+ dvfs_pwm_active_state: dvfs_pwm_active {
+ dvfs_pwm_pbb1 {
+ nvidia,pins = "dvfs_pwm_pbb1";
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ };
+ };
+
+ dvfs_pwm_inactive_state: dvfs_pwm_inactive {
+ dvfs_pwm_pbb1 {
+ nvidia,pins = "dvfs_pwm_pbb1";
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ };
+ };
};
pwm@7000a000 {
@@ -1303,6 +1317,16 @@
clock-frequency = <100000>;
};
+ sata@70020000 {
+ status = "okay";
+ phys = <&{/padctl@7009f000/pads/sata/lanes/sata-0}>;
+ };
+
+ hda@70030000 {
+ nvidia,model = "jetson-tx1-hda";
+ status = "okay";
+ };
+
usb@70090000 {
phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>,
<&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>,
@@ -1325,15 +1349,6 @@
status = "okay";
};
- sata@70020000 {
- status = "okay";
- phys = <&{/padctl@7009f000/pads/sata/lanes/sata-0}>;
- };
-
- hda@70030000 {
- status = "okay";
- };
-
padctl@7009f000 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2894-0050-a08.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2894-0050-a08.dts
new file mode 100644
index 000000000000..7ffb351b5882
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2894-0050-a08.dts
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include "tegra210-p2894.dtsi"
+
+/ {
+ model = "NVIDIA Shield TV";
+ compatible = "nvidia,p2894-0050-a08", "nvidia,darcy", "nvidia,tegra210";
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
new file mode 100644
index 000000000000..3ddf173ccc18
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
@@ -0,0 +1,1858 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/mfd/max77620.h>
+#include <dt-bindings/pinctrl/pinctrl-tegra.h>
+#include "tegra210.dtsi"
+
+/ {
+ aliases {
+ serial0 = &uarta;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0xc0000000>;
+ };
+
+ pinmux: pinmux@700008d4 {
+ status = "okay";
+ pinctrl-names = "boot";
+ pinctrl-0 = <&state_boot>;
+
+ state_boot: pinmux {
+ pex_l0_rst_n_pa0 {
+ nvidia,pins = "pex_l0_rst_n_pa0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l0_clkreq_n_pa1 {
+ nvidia,pins = "pex_l0_clkreq_n_pa1";
+ nvidia,function = "pe0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ pex_wake_n_pa2 {
+ nvidia,pins = "pex_wake_n_pa2";
+ nvidia,function = "pe";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_rst_n_pa3 {
+ nvidia,pins = "pex_l1_rst_n_pa3";
+ nvidia,function = "pe1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_clkreq_n_pa4 {
+ nvidia,pins = "pex_l1_clkreq_n_pa4";
+ nvidia,function = "pe1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ sata_led_active_pa5 {
+ nvidia,pins = "sata_led_active_pa5";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pa6 {
+ nvidia,pins = "pa6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_fs_pb0 {
+ nvidia,pins = "dap1_fs_pb0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_din_pb1 {
+ nvidia,pins = "dap1_din_pb1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_dout_pb2 {
+ nvidia,pins = "dap1_dout_pb2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_sclk_pb3 {
+ nvidia,pins = "dap1_sclk_pb3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi2_mosi_pb4 {
+ nvidia,pins = "spi2_mosi_pb4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi2_miso_pb5 {
+ nvidia,pins = "spi2_miso_pb5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi2_sck_pb6 {
+ nvidia,pins = "spi2_sck_pb6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi2_cs0_pb7 {
+ nvidia,pins = "spi2_cs0_pb7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi1_mosi_pc0 {
+ nvidia,pins = "spi1_mosi_pc0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi1_miso_pc1 {
+ nvidia,pins = "spi1_miso_pc1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi1_sck_pc2 {
+ nvidia,pins = "spi1_sck_pc2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi1_cs0_pc3 {
+ nvidia,pins = "spi1_cs0_pc3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi1_cs1_pc4 {
+ nvidia,pins = "spi1_cs1_pc4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi4_sck_pc5 {
+ nvidia,pins = "spi4_sck_pc5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi4_cs0_pc6 {
+ nvidia,pins = "spi4_cs0_pc6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi4_mosi_pc7 {
+ nvidia,pins = "spi4_mosi_pc7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spi4_miso_pd0 {
+ nvidia,pins = "spi4_miso_pd0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_tx_pd1 {
+ nvidia,pins = "uart3_tx_pd1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rx_pd2 {
+ nvidia,pins = "uart3_rx_pd2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rts_pd3 {
+ nvidia,pins = "uart3_rts_pd3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_cts_pd4 {
+ nvidia,pins = "uart3_cts_pd4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dmic1_clk_pe0 {
+ nvidia,pins = "dmic1_clk_pe0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dmic1_dat_pe1 {
+ nvidia,pins = "dmic1_dat_pe1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dmic2_clk_pe2 {
+ nvidia,pins = "dmic2_clk_pe2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dmic2_dat_pe3 {
+ nvidia,pins = "dmic2_dat_pe3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dmic3_clk_pe4 {
+ nvidia,pins = "dmic3_clk_pe4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dmic3_dat_pe5 {
+ nvidia,pins = "dmic3_dat_pe5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pe6 {
+ nvidia,pins = "pe6";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pe7 {
+ nvidia,pins = "pe7";
+ nvidia,function = "pwm3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ gen3_i2c_scl_pf0 {
+ nvidia,pins = "gen3_i2c_scl_pf0";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ gen3_i2c_sda_pf1 {
+ nvidia,pins = "gen3_i2c_sda_pf1";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_tx_pg0 {
+ nvidia,pins = "uart2_tx_pg0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rx_pg1 {
+ nvidia,pins = "uart2_rx_pg1";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rts_pg2 {
+ nvidia,pins = "uart2_rts_pg2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_cts_pg3 {
+ nvidia,pins = "uart2_cts_pg3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ wifi_en_ph0 {
+ nvidia,pins = "wifi_en_ph0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ wifi_rst_ph1 {
+ nvidia,pins = "wifi_rst_ph1";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ wifi_wake_ap_ph2 {
+ nvidia,pins = "wifi_wake_ap_ph2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ ap_wake_bt_ph3 {
+ nvidia,pins = "ap_wake_bt_ph3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ bt_rst_ph4 {
+ nvidia,pins = "bt_rst_ph4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ bt_wake_ap_ph5 {
+ nvidia,pins = "bt_wake_ap_ph5";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ ph6 {
+ nvidia,pins = "ph6";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ ap_wake_nfc_ph7 {
+ nvidia,pins = "ap_wake_nfc_ph7";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ nfc_en_pi0 {
+ nvidia,pins = "nfc_en_pi0";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ nfc_int_pi1 {
+ nvidia,pins = "nfc_int_pi1";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ gps_en_pi2 {
+ nvidia,pins = "gps_en_pi2";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ gps_rst_pi3 {
+ nvidia,pins = "gps_rst_pi3";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart4_tx_pi4 {
+ nvidia,pins = "uart4_tx_pi4";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart4_rx_pi5 {
+ nvidia,pins = "uart4_rx_pi5";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart4_rts_pi6 {
+ nvidia,pins = "uart4_rts_pi6";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart4_cts_pi7 {
+ nvidia,pins = "uart4_cts_pi7";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ gen1_i2c_sda_pj0 {
+ nvidia,pins = "gen1_i2c_sda_pj0";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ gen1_i2c_scl_pj1 {
+ nvidia,pins = "gen1_i2c_scl_pj1";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ gen2_i2c_scl_pj2 {
+ nvidia,pins = "gen2_i2c_scl_pj2";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_sda_pj3 {
+ nvidia,pins = "gen2_i2c_sda_pj3";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ dap4_fs_pj4 {
+ nvidia,pins = "dap4_fs_pj4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_din_pj5 {
+ nvidia,pins = "dap4_din_pj5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_dout_pj6 {
+ nvidia,pins = "dap4_dout_pj6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_sclk_pj7 {
+ nvidia,pins = "dap4_sclk_pj7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk0 {
+ nvidia,pins = "pk0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk1 {
+ nvidia,pins = "pk1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk2 {
+ nvidia,pins = "pk2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk3 {
+ nvidia,pins = "pk3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk4 {
+ nvidia,pins = "pk4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk5 {
+ nvidia,pins = "pk5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk6 {
+ nvidia,pins = "pk6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pk7 {
+ nvidia,pins = "pk7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pl0 {
+ nvidia,pins = "pl0";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pl1 {
+ nvidia,pins = "pl1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_clk_pm0 {
+ nvidia,pins = "sdmmc1_clk_pm0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_cmd_pm1 {
+ nvidia,pins = "sdmmc1_cmd_pm1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_dat3_pm2 {
+ nvidia,pins = "sdmmc1_dat3_pm2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_dat2_pm3 {
+ nvidia,pins = "sdmmc1_dat2_pm3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_dat1_pm4 {
+ nvidia,pins = "sdmmc1_dat1_pm4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc1_dat0_pm5 {
+ nvidia,pins = "sdmmc1_dat0_pm5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_clk_pp0 {
+ nvidia,pins = "sdmmc3_clk_pp0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_cmd_pp1 {
+ nvidia,pins = "sdmmc3_cmd_pp1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_dat3_pp2 {
+ nvidia,pins = "sdmmc3_dat3_pp2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_dat2_pp3 {
+ nvidia,pins = "sdmmc3_dat2_pp3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_dat1_pp4 {
+ nvidia,pins = "sdmmc3_dat1_pp4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_dat0_pp5 {
+ nvidia,pins = "sdmmc3_dat0_pp5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam1_mclk_ps0 {
+ nvidia,pins = "cam1_mclk_ps0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam2_mclk_ps1 {
+ nvidia,pins = "cam2_mclk_ps1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam_i2c_scl_ps2 {
+ nvidia,pins = "cam_i2c_scl_ps2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ cam_i2c_sda_ps3 {
+ nvidia,pins = "cam_i2c_sda_ps3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ cam_rst_ps4 {
+ nvidia,pins = "cam_rst_ps4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam_af_en_ps5 {
+ nvidia,pins = "cam_af_en_ps5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam_flash_en_ps6 {
+ nvidia,pins = "cam_flash_en_ps6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam1_pwdn_ps7 {
+ nvidia,pins = "cam1_pwdn_ps7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam2_pwdn_pt0 {
+ nvidia,pins = "cam2_pwdn_pt0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cam1_strobe_pt1 {
+ nvidia,pins = "cam1_strobe_pt1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart1_tx_pu0 {
+ nvidia,pins = "uart1_tx_pu0";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart1_rx_pu1 {
+ nvidia,pins = "uart1_rx_pu1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart1_rts_pu2 {
+ nvidia,pins = "uart1_rts_pu2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ uart1_cts_pu3 {
+ nvidia,pins = "uart1_cts_pu3";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_bl_pwm_pv0 {
+ nvidia,pins = "lcd_bl_pwm_pv0";
+ nvidia,function = "pwm0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_bl_en_pv1 {
+ nvidia,pins = "lcd_bl_en_pv1";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_rst_pv2 {
+ nvidia,pins = "lcd_rst_pv2";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_gpio1_pv3 {
+ nvidia,pins = "lcd_gpio1_pv3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_gpio2_pv4 {
+ nvidia,pins = "lcd_gpio2_pv4";
+ nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ ap_ready_pv5 {
+ nvidia,pins = "ap_ready_pv5";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ touch_rst_pv6 {
+ nvidia,pins = "touch_rst_pv6";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ touch_clk_pv7 {
+ nvidia,pins = "touch_clk_pv7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ modem_wake_ap_px0 {
+ nvidia,pins = "modem_wake_ap_px0";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ touch_int_px1 {
+ nvidia,pins = "touch_int_px1";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ motion_int_px2 {
+ nvidia,pins = "motion_int_px2";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ als_prox_int_px3 {
+ nvidia,pins = "als_prox_int_px3";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ temp_alert_px4 {
+ nvidia,pins = "temp_alert_px4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ button_power_on_px5 {
+ nvidia,pins = "button_power_on_px5";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ button_vol_up_px6 {
+ nvidia,pins = "button_vol_up_px6";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ button_vol_down_px7 {
+ nvidia,pins = "button_vol_down_px7";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ button_slide_sw_py0 {
+ nvidia,pins = "button_slide_sw_py0";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ button_home_py1 {
+ nvidia,pins = "button_home_py1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ lcd_te_py2 {
+ nvidia,pins = "lcd_te_py2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pwr_i2c_scl_py3 {
+ nvidia,pins = "pwr_i2c_scl_py3";
+ nvidia,function = "i2cpmu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ pwr_i2c_sda_py4 {
+ nvidia,pins = "pwr_i2c_sda_py4";
+ nvidia,function = "i2cpmu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ clk_32k_out_py5 {
+ nvidia,pins = "clk_32k_out_py5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pz0 {
+ nvidia,pins = "pz0";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pz1 {
+ nvidia,pins = "pz1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pz2 {
+ nvidia,pins = "pz2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pz3 {
+ nvidia,pins = "pz3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pz4 {
+ nvidia,pins = "pz4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pz5 {
+ nvidia,pins = "pz5";
+ nvidia,function = "soc";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap2_fs_paa0 {
+ nvidia,pins = "dap2_fs_paa0";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap2_sclk_paa1 {
+ nvidia,pins = "dap2_sclk_paa1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap2_din_paa2 {
+ nvidia,pins = "dap2_din_paa2";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dap2_dout_paa3 {
+ nvidia,pins = "dap2_dout_paa3";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ aud_mclk_pbb0 {
+ nvidia,pins = "aud_mclk_pbb0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dvfs_pwm_pbb1 {
+ nvidia,pins = "dvfs_pwm_pbb1";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ dvfs_clk_pbb2 {
+ nvidia,pins = "dvfs_clk_pbb2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x1_aud_pbb3 {
+ nvidia,pins = "gpio_x1_aud_pbb3";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x3_aud_pbb4 {
+ nvidia,pins = "gpio_x3_aud_pbb4";
+ nvidia,function = "rsvd0";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ hdmi_cec_pcc0 {
+ nvidia,pins = "hdmi_cec_pcc0";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ hdmi_int_dp_hpd_pcc1 {
+ nvidia,pins = "hdmi_int_dp_hpd_pcc1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_out_pcc2 {
+ nvidia,pins = "spdif_out_pcc2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_in_pcc3 {
+ nvidia,pins = "spdif_in_pcc3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ usb_vbus_en0_pcc4 {
+ nvidia,pins = "usb_vbus_en0_pcc4";
+ nvidia,function = "usb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ usb_vbus_en1_pcc5 {
+ nvidia,pins = "usb_vbus_en1_pcc5";
+ nvidia,function = "usb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+ };
+ dp_hpd0_pcc6 {
+ nvidia,pins = "dp_hpd0_pcc6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pcc7 {
+ nvidia,pins = "pcc7";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+ };
+ spi2_cs1_pdd0 {
+ nvidia,pins = "spi2_cs1_pdd0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ qspi_sck_pee0 {
+ nvidia,pins = "qspi_sck_pee0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ qspi_cs_n_pee1 {
+ nvidia,pins = "qspi_cs_n_pee1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ qspi_io0_pee2 {
+ nvidia,pins = "qspi_io0_pee2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ qspi_io1_pee3 {
+ nvidia,pins = "qspi_io1_pee3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ qspi_io2_pee4 {
+ nvidia,pins = "qspi_io2_pee4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ qspi_io3_pee5 {
+ nvidia,pins = "qspi_io3_pee5";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ core_pwr_req {
+ nvidia,pins = "core_pwr_req";
+ nvidia,function = "core";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ cpu_pwr_req {
+ nvidia,pins = "cpu_pwr_req";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pwr_int_n {
+ nvidia,pins = "pwr_int_n";
+ nvidia,function = "pmi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ clk_32k_in {
+ nvidia,pins = "clk_32k_in";
+ nvidia,function = "clk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ jtag_rtck {
+ nvidia,pins = "jtag_rtck";
+ nvidia,function = "jtag";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ clk_req {
+ nvidia,pins = "clk_req";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ shutdown {
+ nvidia,pins = "shutdown";
+ nvidia,function = "shutdown";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ };
+ };
+
+ serial@70006000 {
+ status = "okay";
+ };
+
+ i2c@7000d000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ max77620: max77620@3c {
+ compatible = "maxim,max77620";
+ reg = <0x3c>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77620_default>;
+
+ max77620_default: pinmux@0 {
+ pin_gpio0 {
+ pins = "gpio0";
+ function = "gpio";
+ };
+
+ pin_gpio1 {
+ pins = "gpio1";
+ function = "fps-out";
+ drive-push-pull = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <7>;
+ maxim,active-fps-power-down-slot = <0>;
+ };
+
+ pin_gpio2_3 {
+ pins = "gpio2", "gpio3";
+ function = "fps-out";
+ drive-open-drain = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ pin_gpio4 {
+ pins = "gpio4";
+ function = "32k-out1";
+ };
+
+ pin_gpio5_6_7 {
+ pins = "gpio5", "gpio6", "gpio7";
+ function = "gpio";
+ drive-push-pull = <1>;
+ };
+
+ pin_gpio2 {
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ pin_gpio3 {
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+ };
+
+ spmic-default-output-high {
+ gpio-hog;
+ output-high;
+ gpios = <2 GPIO_ACTIVE_HIGH 7 GPIO_ACTIVE_HIGH>;
+ };
+
+ fps {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fps0 {
+ reg = <0>;
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ };
+
+ fps1 {
+ reg = <1>;
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN1>;
+ maxim,device-state-on-disabled-event = <MAX77620_FPS_INACTIVE_STATE_SLEEP>;
+ };
+
+ fps2 {
+ reg = <2>;
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ };
+ };
+
+ regulators {
+ in-ldo0-1-supply = <&max77620_sd2>;
+ in-ldo7-8-supply = <&max77620_sd2>;
+
+ max77620_sd0: sd0 {
+ regulator-name = "vdd-core";
+ regulator-enable-ramp-delay = <146>;
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <27500>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+
+ max77620_sd1: sd1 {
+ regulator-name = "vddio-ddr";
+ regulator-enable-ramp-delay = <130>;
+ regulator-ramp-delay = <27500>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ max77620_sd2: sd2 {
+ regulator-name = "vdd-pre-reg";
+ regulator-enable-ramp-delay = <176>;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-ramp-delay = <27500>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,suspend-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_sd3: sd3 {
+ regulator-name = "vdd-1v8";
+ regulator-enable-ramp-delay = <242>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <27500>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ max77620_ldo0: ldo0 {
+ regulator-name = "avdd-sys";
+ regulator-enable-ramp-delay = <26>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-ramp-delay = <100000>;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_ldo1: ldo1 {
+ regulator-name = "vdd-pex";
+ regulator-enable-ramp-delay = <22>;
+ regulator-min-microvolt = <1075000>;
+ regulator-max-microvolt = <1075000>;
+ regulator-ramp-delay = <100000>;
+ regulator-always-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_ldo2: ldo2 {
+ regulator-name = "vddio-sdmmc3";
+ regulator-enable-ramp-delay = <62>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_ldo3: ldo3 {
+ regulator-name = "vdd-3v3-eth";
+ regulator-enable-ramp-delay = <50>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <100000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_ldo4: ldo4 {
+ regulator-name = "vdd-rtc";
+ regulator-enable-ramp-delay = <22>;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-ramp-delay = <100000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ max77620_ldo5: ldo5 {
+ regulator-name = "avdd-ts-hv";
+ regulator-enable-ramp-delay = <62>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_ldo6: ldo6 {
+ regulator-name = "vdd-ts";
+ regulator-enable-ramp-delay = <36>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <100000>;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_ldo7: ldo7 {
+ regulator-name = "vdd-gen-pll-edp";
+ regulator-enable-ramp-delay = <24>;
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <100000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,suspend-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ max77620_ldo8: ldo8 {
+ regulator-name = "vdd-hdmi-dp";
+ regulator-enable-ramp-delay = <22>;
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-ramp-delay = <100000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+ };
+ };
+ };
+
+ pmc@7000e400 {
+ nvidia,invert-interrupt;
+ nvidia,suspend-mode = <0>;
+ nvidia,cpu-pwr-good-time = <0>;
+ nvidia,cpu-pwr-off-time = <0>;
+ nvidia,core-pwr-good-time = <4587 3876>;
+ nvidia,core-pwr-off-time = <39065>;
+ nvidia,core-power-req-active-high;
+ nvidia,sys-clock-req-active-high;
+ status = "okay";
+ };
+
+ sdhci@700b0600 {
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+ };
+
+ clocks {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clk32k_in: clock@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ status = "okay";
+
+ power {
+ debounce-interval = <30>;
+ gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>;
+ label = "Power";
+ linux,code = <KEY_POWER>;
+ wakeup-event-action = <EV_ACT_ASSERTED>;
+ wakeup-source;
+ };
+ };
+
+ cpus {
+ cpu@0 {
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ enable-method = "psci";
+ };
+
+ cpu@2 {
+ enable-method = "psci";
+ };
+
+ cpu@3 {
+ enable-method = "psci";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ device_type = "fixed-regulators";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ battery_reg: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "vdd-ac-bat";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ vdd_3v3: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "vdd-3v3";
+ regulator-enable-ramp-delay = <160>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+
+ gpio = <&max77620 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ max77620_gpio7: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "max77620-gpio7";
+ regulator-enable-ramp-delay = <240>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&max77620_ldo0>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&max77620 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ lcd_bl_en: regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <3>;
+ regulator-name = "lcd-bl-en";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+
+ gpio = <&gpio TEGRA_GPIO(V, 1) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ en_vdd_sd: regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <4>;
+ regulator-name = "en-vdd-sd";
+ regulator-enable-ramp-delay = <472>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vdd_3v3>;
+
+ gpio = <&gpio TEGRA_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ en_vdd_cam: regulator@5 {
+ compatible = "regulator-fixed";
+ reg = <5>;
+ regulator-name = "en-vdd-cam";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&gpio TEGRA_GPIO(S, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vdd_sys_boost: regulator@6 {
+ compatible = "regulator-fixed";
+ reg = <6>;
+ regulator-name = "vdd-sys-boost";
+ regulator-enable-ramp-delay = <3090>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+
+ gpio = <&max77620 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vdd_hdmi: regulator@7 {
+ compatible = "regulator-fixed";
+ reg = <7>;
+ regulator-name = "vdd-hdmi";
+ regulator-enable-ramp-delay = <468>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vdd_sys_boost>;
+ regulator-boot-on;
+
+ gpio = <&gpio TEGRA_GPIO(CC, 7) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ en_vdd_cpu_fixed: regulator@8 {
+ compatible = "regulator-fixed";
+ reg = <8>;
+ regulator-name = "vdd-cpu-fixed";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vdd_aux_3v3: regulator@9 {
+ compatible = "regulator-fixed";
+ reg = <9>;
+ regulator-name = "aux-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vdd_snsr_pm: regulator@10 {
+ compatible = "regulator-fixed";
+ reg = <10>;
+ regulator-name = "snsr_pm";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ enable-active-high;
+ };
+
+ vdd_usb_5v0: regulator@11 {
+ compatible = "regulator-fixed";
+ reg = <11>;
+ status = "disabled";
+ regulator-name = "vdd-usb-5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vdd_3v3>;
+
+ enable-active-high;
+ };
+
+ vdd_cdc_1v2_aud: regulator@101 {
+ compatible = "regulator-fixed";
+ reg = <101>;
+ status = "disabled";
+ regulator-name = "vdd_cdc_1v2_aud";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ startup-delay-us = <250000>;
+
+ enable-active-high;
+ };
+
+ vdd_disp_3v0: regulator@12 {
+ compatible = "regulator-fixed";
+ reg = <12>;
+ regulator-name = "vdd-disp-3v0";
+ regulator-enable-ramp-delay = <232>;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+
+ gpio = <&gpio TEGRA_GPIO(I, 3) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vdd_fan: regulator@13 {
+ compatible = "regulator-fixed";
+ reg = <13>;
+ regulator-name = "vdd-fan";
+ regulator-enable-ramp-delay = <284>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio TEGRA_GPIO(E, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_vbus1: regulator@14 {
+ compatible = "regulator-fixed";
+ reg = <14>;
+ regulator-name = "usb-vbus1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio TEGRA_GPIO(CC, 5) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ gpio-open-drain;
+ };
+
+ usb_vbus2: regulator@15 {
+ compatible = "regulator-fixed";
+ reg = <15>;
+ regulator-name = "usb-vbus2";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ gpio-open-drain;
+ };
+
+ vdd_3v3_eth: regulator@16 {
+ compatible = "regulator-fixed";
+ reg = <16>;
+ regulator-name = "vdd-3v3-eth-a02";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&gpio TEGRA_GPIO(D, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ gpio-open-drain;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index 43cae4798870..a4b8f668a6d4 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -1340,10 +1340,29 @@
status = "okay";
clock-frequency = <1000000>;
+ max77621_cpu: max77621@1b {
+ compatible = "maxim,max77621";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(Y, 1) IRQ_TYPE_LEVEL_LOW>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1231250>;
+ regulator-name = "PPVAR_CPU";
+ regulator-ramp-delay = <12500>;
+ maxim,dvs-default-state = <1>;
+ maxim,enable-active-discharge;
+ maxim,enable-bias-control;
+ maxim,enable-etr;
+ maxim,enable-gpio = <&max77620 5 0>;
+ maxim,externally-enable;
+ };
+
max77620: max77620@3c {
compatible = "maxim,max77620";
reg = <0x3c>;
- interrupts = <0 86 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
@@ -1679,6 +1698,18 @@
status = "okay";
};
+ clock@70110000 {
+ status = "okay";
+ nvidia,cf = <6>;
+ nvidia,ci = <0>;
+ nvidia,cg = <2>;
+ nvidia,droop-ctrl = <0x00000f00>;
+ nvidia,force-mode = <1>;
+ nvidia,i2c-fs-rate = <400000>;
+ nvidia,sample-rate = <12500>;
+ vdd-cpu-supply = <&max77621_cpu>;
+ };
+
aconnect@702c0000 {
status = "okay";
@@ -1724,7 +1755,6 @@
gpio-keys {
compatible = "gpio-keys";
- gpio-keys,name = "gpio-keys";
power {
label = "Power";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 2205d66b0443..6574396d2257 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/memory/tegra210-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h>
+#include <dt-bindings/reset/tegra210-car.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
@@ -469,13 +470,55 @@
apbmisc@70000800 {
compatible = "nvidia,tegra210-apbmisc", "nvidia,tegra20-apbmisc";
reg = <0x0 0x70000800 0x0 0x64>, /* Chip revision */
- <0x0 0x7000e864 0x0 0x04>; /* Strapping options */
+ <0x0 0x70000008 0x0 0x04>; /* Strapping options */
};
pinmux: pinmux@700008d4 {
compatible = "nvidia,tegra210-pinmux";
reg = <0x0 0x700008d4 0x0 0x29c>, /* Pad control registers */
<0x0 0x70003000 0x0 0x294>; /* Mux registers */
+ sdmmc1_3v3_drv: sdmmc1-3v3-drv {
+ sdmmc1 {
+ nvidia,pins = "drive_sdmmc1";
+ nvidia,pull-down-strength = <0x8>;
+ nvidia,pull-up-strength = <0x8>;
+ };
+ };
+ sdmmc1_1v8_drv: sdmmc1-1v8-drv {
+ sdmmc1 {
+ nvidia,pins = "drive_sdmmc1";
+ nvidia,pull-down-strength = <0x4>;
+ nvidia,pull-up-strength = <0x3>;
+ };
+ };
+ sdmmc2_1v8_drv: sdmmc2-1v8-drv {
+ sdmmc2 {
+ nvidia,pins = "drive_sdmmc2";
+ nvidia,pull-down-strength = <0x10>;
+ nvidia,pull-up-strength = <0x10>;
+ };
+ };
+ sdmmc3_3v3_drv: sdmmc3-3v3-drv {
+ sdmmc3 {
+ nvidia,pins = "drive_sdmmc3";
+ nvidia,pull-down-strength = <0x8>;
+ nvidia,pull-up-strength = <0x8>;
+ };
+ };
+ sdmmc3_1v8_drv: sdmmc3-1v8-drv {
+ sdmmc3 {
+ nvidia,pins = "drive_sdmmc3";
+ nvidia,pull-down-strength = <0x4>;
+ nvidia,pull-up-strength = <0x3>;
+ };
+ };
+ sdmmc4_1v8_drv: sdmmc4-1v8-drv {
+ sdmmc4 {
+ nvidia,pins = "drive_sdmmc4";
+ nvidia,pull-down-strength = <0x10>;
+ nvidia,pull-up-strength = <0x10>;
+ };
+ };
};
/*
@@ -554,7 +597,7 @@
};
i2c@7000c000 {
- compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c";
reg = <0x0 0x7000c000 0x0 0x100>;
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -569,7 +612,7 @@
};
i2c@7000c400 {
- compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c";
reg = <0x0 0x7000c400 0x0 0x100>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -584,7 +627,7 @@
};
i2c@7000c500 {
- compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c";
reg = <0x0 0x7000c500 0x0 0x100>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -599,7 +642,7 @@
};
i2c@7000c700 {
- compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c";
reg = <0x0 0x7000c700 0x0 0x100>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -617,7 +660,7 @@
};
i2c@7000d000 {
- compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c";
reg = <0x0 0x7000d000 0x0 0x100>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -632,7 +675,7 @@
};
i2c@7000d100 {
- compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
+ compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c";
reg = <0x0 0x7000d100 0x0 0x100>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -1050,9 +1093,12 @@
clock-names = "sdhci";
resets = <&tegra_car 14>;
reset-names = "sdhci";
- pinctrl-names = "sdmmc-3v3", "sdmmc-1v8";
+ pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
+ "sdmmc-3v3-drv", "sdmmc-1v8-drv";
pinctrl-0 = <&sdmmc1_3v3>;
pinctrl-1 = <&sdmmc1_1v8>;
+ pinctrl-2 = <&sdmmc1_3v3_drv>;
+ pinctrl-3 = <&sdmmc1_1v8_drv>;
nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>;
nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>;
nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>;
@@ -1075,6 +1121,8 @@
clock-names = "sdhci";
resets = <&tegra_car 9>;
reset-names = "sdhci";
+ pinctrl-names = "sdmmc-1v8-drv";
+ pinctrl-0 = <&sdmmc2_1v8_drv>;
nvidia,pad-autocal-pull-up-offset-1v8 = <0x05>;
nvidia,pad-autocal-pull-down-offset-1v8 = <0x05>;
nvidia,default-tap = <0x8>;
@@ -1090,9 +1138,12 @@
clock-names = "sdhci";
resets = <&tegra_car 69>;
reset-names = "sdhci";
- pinctrl-names = "sdmmc-3v3", "sdmmc-1v8";
+ pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
+ "sdmmc-3v3-drv", "sdmmc-1v8-drv";
pinctrl-0 = <&sdmmc3_3v3>;
pinctrl-1 = <&sdmmc3_1v8>;
+ pinctrl-2 = <&sdmmc3_3v3_drv>;
+ pinctrl-3 = <&sdmmc3_1v8_drv>;
nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>;
nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>;
nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>;
@@ -1110,6 +1161,9 @@
clock-names = "sdhci";
resets = <&tegra_car 15>;
reset-names = "sdhci";
+ pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv";
+ pinctrl-0 = <&sdmmc4_1v8_drv>;
+ pinctrl-1 = <&sdmmc4_1v8_drv>;
nvidia,pad-autocal-pull-up-offset-1v8 = <0x05>;
nvidia,pad-autocal-pull-down-offset-1v8 = <0x05>;
nvidia,default-tap = <0x8>;
@@ -1131,6 +1185,24 @@
#nvidia,mipi-calibrate-cells = <1>;
};
+ dfll: clock@70110000 {
+ compatible = "nvidia,tegra210-dfll";
+ reg = <0 0x70110000 0 0x100>, /* DFLL control */
+ <0 0x70110000 0 0x100>, /* I2C output control */
+ <0 0x70110100 0 0x100>, /* Integrated I2C controller */
+ <0 0x70110200 0 0x100>; /* Look-up table RAM */
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_DFLL_SOC>,
+ <&tegra_car TEGRA210_CLK_DFLL_REF>,
+ <&tegra_car TEGRA210_CLK_I2C5>;
+ clock-names = "soc", "ref", "i2c";
+ resets = <&tegra_car TEGRA210_RST_DFLL_DVCO>;
+ reset-names = "dvco";
+ #clock-cells = <0>;
+ clock-output-names = "dfllCPU_out";
+ status = "disabled";
+ };
+
aconnect@702c0000 {
compatible = "nvidia,tegra210-aconnect";
clocks = <&tegra_car TEGRA210_CLK_APE>,
@@ -1285,6 +1357,12 @@
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0>;
+ clocks = <&tegra_car TEGRA210_CLK_CCLK_G>,
+ <&tegra_car TEGRA210_CLK_PLL_X>,
+ <&tegra_car TEGRA210_CLK_PLL_P_OUT4>,
+ <&dfll>;
+ clock-names = "cpu_g", "pll_x", "pll_p", "dfll";
+ clock-latency = <300000>;
};
cpu@1 {
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 46feedf7c989..134617d87a1a 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -644,6 +644,8 @@
l11 {
regulator-min-microvolt = <1750000>;
regulator-max-microvolt = <3337000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
};
l12 {
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 18226980f7c3..aea1dbc3f53e 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -441,7 +441,7 @@
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0>;
next-level-cache = <&L2_0>;
enable-method = "psci";
@@ -449,7 +449,7 @@
CPU1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x1>;
next-level-cache = <&L2_0>;
@@ -457,7 +457,7 @@
CPU2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x2>;
next-level-cache = <&L2_0>;
@@ -465,7 +465,7 @@
CPU3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
enable-method = "psci";
reg = <0x3>;
next-level-cache = <&L2_0>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index c5348c3da5a2..0803ca8c02da 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -106,48 +106,48 @@
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0>;
next-level-cache = <&L2_0>;
enable-method = "psci";
cpu-idle-states = <&CPU_SPC>;
- clocks = <&apcs 0>;
+ clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
};
CPU1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x1>;
next-level-cache = <&L2_0>;
enable-method = "psci";
cpu-idle-states = <&CPU_SPC>;
- clocks = <&apcs 0>;
+ clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
};
CPU2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x2>;
next-level-cache = <&L2_0>;
enable-method = "psci";
cpu-idle-states = <&CPU_SPC>;
- clocks = <&apcs 0>;
+ clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
};
CPU3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x3>;
next-level-cache = <&L2_0>;
enable-method = "psci";
cpu-idle-states = <&CPU_SPC>;
- clocks = <&apcs 0>;
+ clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
index cf5cacdd624d..50cefb822d6d 100644
--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
@@ -38,7 +38,7 @@
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index f33c41d01c86..6a4049aae0c3 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -40,7 +40,7 @@
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
index 8d5114d16d09..131878db9852 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
@@ -139,7 +139,7 @@
};
pinconf {
- pins = "gpio4", "gpiio5", "gpio6", "gpio7";
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
drive-strength = <2>;
bias-disable;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 99b7495455a6..c761269caf80 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -306,6 +306,40 @@
#clock-cells = <1>;
};
+ rpmpd: power-controller {
+ compatible = "qcom,msm8996-rpmpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmpd_opp_table>;
+
+ rpmpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmpd_opp1: opp1 {
+ opp-level = <1>;
+ };
+
+ rpmpd_opp2: opp2 {
+ opp-level = <2>;
+ };
+
+ rpmpd_opp3: opp3 {
+ opp-level = <3>;
+ };
+
+ rpmpd_opp4: opp4 {
+ opp-level = <4>;
+ };
+
+ rpmpd_opp5: opp5 {
+ opp-level = <5>;
+ };
+
+ rpmpd_opp6: opp6 {
+ opp-level = <6>;
+ };
+ };
+ };
+
pm8994-regulators {
compatible = "qcom,rpm-pm8994-regulators";
@@ -404,7 +438,7 @@
};
intc: interrupt-controller@9bc0000 {
- compatible = "arm,gic-v3";
+ compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-controller;
#redistributor-regions = <1>;
@@ -966,7 +1000,7 @@
clock-names = "iface",
"bus";
#iommu-cells = <1>;
- status = "ok";
+ status = "disabled";
};
camss: camss@a00000 {
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index 50e9033aa7f6..f0901067b043 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -65,6 +65,13 @@
status = "okay";
};
+&qusb2phy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l12a_1p8>;
+ vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
+};
+
&rpm_requests {
pm8998-regulators {
compatible = "qcom,rpm-pm8998-regulators";
@@ -192,6 +199,8 @@
vreg_l21a_2p95: l21 {
regulator-min-microvolt = <2960000>;
regulator-max-microvolt = <2960000>;
+ regulator-allow-set-load;
+ regulator-system-load = <800000>;
};
vreg_l22a_2p85: l22 {
regulator-min-microvolt = <2864000>;
@@ -257,3 +266,18 @@
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
};
+
+&usb3 {
+ status = "okay";
+};
+
+&usb3_dwc3 {
+ dr_mode = "host"; /* Force to host until we have Type-C hooked up */
+};
+
+&usb3phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l1a_0p875>;
+ vdda-pll-supply = <&vreg_l2a_1p2>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 8d41b69ec2da..3fd0769fe648 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -3,6 +3,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8998.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/gpio/gpio.h>
/ {
@@ -37,7 +38,7 @@
};
memory@86200000 {
- reg = <0x0 0x86200000 0x0 0x2600000>;
+ reg = <0x0 0x86200000 0x0 0x2d00000>;
no-map;
};
@@ -266,6 +267,11 @@
rpm_requests: rpm-requests {
compatible = "qcom,rpm-msm8998";
qcom,glink-channels = "rpm_requests";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-msm8998", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
};
};
@@ -540,6 +546,11 @@
reg = <0x780000 0x621c>;
#address-cells = <1>;
#size-cells = <1>;
+
+ qusb2_hstx_trim: hstx-trim@423a {
+ reg = <0x423a 0x1>;
+ bits = <0 4>;
+ };
};
gcc: clock-controller@100000 {
@@ -607,6 +618,93 @@
#mbox-cells = <1>;
};
+ usb3: usb@a8f8800 {
+ compatible = "qcom,msm8998-dwc3", "qcom,dwc3";
+ reg = <0x0a8f8800 0x400>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_AGGRE1_USB3_AXI_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>;
+ clock-names = "cfg_noc", "core", "iface", "mock_utmi",
+ "sleep";
+
+ assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <120000000>;
+
+ interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq";
+
+ power-domains = <&gcc USB_30_GDSC>;
+
+ resets = <&gcc GCC_USB_30_BCR>;
+
+ usb3_dwc3: dwc3@a800000 {
+ compatible = "snps,dwc3";
+ reg = <0x0a800000 0xcd00>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ phys = <&qusb2phy>, <&usb1_ssphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+ };
+ };
+
+ usb3phy: phy@c010000 {
+ compatible = "qcom,msm8998-qmp-usb3-phy";
+ reg = <0x0c010000 0x18c>;
+ status = "disabled";
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "phy", "common";
+
+ usb1_ssphy: lane@c010200 {
+ reg = <0xc010200 0x128>,
+ <0xc010400 0x200>,
+ <0xc010c00 0x20c>,
+ <0xc010600 0x128>,
+ <0xc010800 0x200>;
+ #phy-cells = <0>;
+ clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ };
+ };
+
+ qusb2phy: phy@c012000 {
+ compatible = "qcom,msm8998-qusb2-phy";
+ reg = <0x0c012000 0x2a8>;
+ status = "disabled";
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>;
+ clock-names = "cfg_ahb", "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+
+ nvmem-cells = <&qusb2_hstx_trim>;
+ };
+
sdhc2: sdhci@c0a4900 {
compatible = "qcom,sdhci-msm-v4";
reg = <0xc0a4900 0x314>, <0xc0a4000 0x800>;
@@ -624,6 +722,186 @@
status = "disabled";
};
+ blsp1_i2c1: i2c@c175000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c175000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp1_i2c2: i2c@c176000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c176000 0x600>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp1_i2c3: i2c@c177000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c177000 0x600>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp1_i2c4: i2c@c178000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c178000 0x600>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp1_i2c5: i2c@c179000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c179000 0x600>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp1_i2c6: i2c@c17a000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c17a000 0x600>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp2_i2c0: i2c@c1b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c1b5000 0x600>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp2_i2c1: i2c@c1b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c1b6000 0x600>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp2_i2c2: i2c@c1b7000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c1b7000 0x600>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP2_QUP3_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp2_i2c3: i2c@c1b8000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c1b8000 0x600>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP2_QUP4_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp2_i2c4: i2c@c1b9000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c1b9000 0x600>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP2_QUP5_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ blsp2_i2c5: i2c@c1ba000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x0c175000 0x600>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_BLSP2_QUP6_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <400000>;
+
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
blsp2_uart1: serial@c1b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xc1b0000 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index 15a37cbcd216..9dd2df1cbf47 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -32,6 +32,12 @@
bias-pull-up;
linux,code = <KEY_POWER>;
};
+
+ watchdog {
+ compatible = "qcom,pm8916-wdt";
+ interrupts = <0x0 0x8 6 IRQ_TYPE_EDGE_RISING>;
+ timeout-sec = <60>;
+ };
};
pm8916_gpios: gpios@c000 {
diff --git a/arch/arm64/boot/dts/qcom/pms405.dtsi b/arch/arm64/boot/dts/qcom/pms405.dtsi
index ad2b62dfc9f6..1bb836d1e8aa 100644
--- a/arch/arm64/boot/dts/qcom/pms405.dtsi
+++ b/arch/arm64/boot/dts/qcom/pms405.dtsi
@@ -3,6 +3,32 @@
#include <dt-bindings/spmi/spmi.h>
#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ thermal-zones {
+ pms405 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&pms405_temp>;
+
+ trips {
+ pms405_alert0: pms405-alert0 {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ pms405_crit: pms405-crit {
+ temperature = <125000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
&spmi_bus {
pms405_0: pms405@0 {
@@ -45,6 +71,59 @@
};
};
+ pms405_temp: temp-alarm@2400 {
+ compatible = "qcom,spmi-temp-alarm";
+ reg = <0x2400>;
+ interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
+ io-channels = <&pms405_adc ADC5_DIE_TEMP>;
+ io-channel-names = "thermal";
+ #thermal-sensor-cells = <0>;
+ };
+
+ pms405_adc: adc@3100 {
+ compatible = "qcom,pms405-adc", "qcom,spmi-adc-rev2";
+ reg = <0x3100>;
+ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+
+ ref_gnd {
+ reg = <ADC5_REF_GND>;
+ qcom,pre-scaling = <1 1>;
+ };
+
+ vref_1p25 {
+ reg = <ADC5_1P25VREF>;
+ qcom,pre-scaling = <1 1>;
+ };
+
+ vph_pwr {
+ reg = <ADC5_VPH_PWR>;
+ qcom,pre-scaling = <1 3>;
+ };
+
+ die_temp {
+ reg = <ADC5_DIE_TEMP>;
+ qcom,pre-scaling = <1 1>;
+ };
+
+ xo_therm_100k_pu {
+ reg = <ADC5_XO_THERM_100K_PU>;
+ qcom,pre-scaling = <1 1>;
+ };
+
+ amux_thm1_100k_pu {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,pre-scaling = <1 1>;
+ };
+
+ amux_thm3_100k_pu {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,pre-scaling = <1 1>;
+ };
+ };
+
rtc@6000 {
compatible = "qcom,pm8941-rtc";
reg = <0x6000>;
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index a39924efebe4..50b3589c7f15 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -127,6 +127,7 @@
status = "ok";
mmc-ddr-1_8v;
+ mmc-hs400-1_8v;
bus-width = <8>;
non-removable;
@@ -186,3 +187,21 @@
};
};
};
+
+&wifi {
+ status = "okay";
+};
+
+/* PINCTRL - additions to nodes defined in qcs404.dtsi */
+
+&blsp1_uart2_default {
+ rx {
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ tx {
+ drive-strength = <2>;
+ bias-disable;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index 9b5c16562bbe..e8fd26633d57 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -3,6 +3,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-qcs404.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
/ {
interrupt-parent = <&intc>;
@@ -224,6 +225,11 @@
rpm_requests: glink-channel {
compatible = "qcom,rpm-qcs404";
qcom,glink-channels = "rpm_requests";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-qcs404";
+ #clock-cells = <1>;
+ };
};
};
@@ -272,6 +278,105 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+
+ blsp1_i2c0_default: blsp1-i2c0-default {
+ pins = "gpio32", "gpio33";
+ function = "blsp_i2c0";
+ };
+
+ blsp1_i2c1_default: blsp1-i2c1-default {
+ pins = "gpio24", "gpio25";
+ function = "blsp_i2c1";
+ };
+
+ blsp1_i2c2_default: blsp1-i2c2-default {
+ sda {
+ pins = "gpio19";
+ function = "blsp_i2c_sda_a2";
+ };
+
+ scl {
+ pins = "gpio20";
+ function = "blsp_i2c_scl_a2";
+ };
+ };
+
+ blsp1_i2c3_default: blsp1-i2c3-default {
+ pins = "gpio84", "gpio85";
+ function = "blsp_i2c3";
+ };
+
+ blsp1_i2c4_default: blsp1-i2c4-default {
+ pins = "gpio117", "gpio118";
+ function = "blsp_i2c4";
+ };
+
+ blsp1_uart0_default: blsp1-uart0-default {
+ pins = "gpio30", "gpio31", "gpio32", "gpio33";
+ function = "blsp_uart0";
+ };
+
+ blsp1_uart1_default: blsp1-uart1-default {
+ pins = "gpio22", "gpio23";
+ function = "blsp_uart1";
+ };
+
+ blsp1_uart2_default: blsp1-uart2-default {
+ rx {
+ pins = "gpio18";
+ function = "blsp_uart_rx_a2";
+ };
+
+ tx {
+ pins = "gpio17";
+ function = "blsp_uart_tx_a2";
+ };
+ };
+
+ blsp1_uart3_default: blsp1-uart3-default {
+ pins = "gpio82", "gpio83", "gpio84", "gpio85";
+ function = "blsp_uart3";
+ };
+
+ blsp2_i2c0_default: blsp2-i2c0-default {
+ pins = "gpio28", "gpio29";
+ function = "blsp_i2c5";
+ };
+
+ blsp1_spi0_default: blsp1-spi0-default {
+ pins = "gpio30", "gpio31", "gpio32", "gpio33";
+ function = "blsp_spi0";
+ };
+
+ blsp1_spi1_default: blsp1-spi1-default {
+ pins = "gpio22", "gpio23", "gpio24", "gpio25";
+ function = "blsp_spi1";
+ };
+
+ blsp1_spi2_default: blsp1-spi2-default {
+ pins = "gpio17", "gpio18", "gpio19", "gpio20";
+ function = "blsp_spi2";
+ };
+
+ blsp1_spi3_default: blsp1-spi3-default {
+ pins = "gpio82", "gpio83", "gpio84", "gpio85";
+ function = "blsp_spi3";
+ };
+
+ blsp1_spi4_default: blsp1-spi4-default {
+ pins = "gpio37", "gpio38", "gpio117", "gpio118";
+ function = "blsp_spi4";
+ };
+
+ blsp2_spi0_default: blsp2-spi0-default {
+ pins = "gpio26", "gpio27", "gpio28", "gpio29";
+ function = "blsp_spi5";
+ };
+
+ blsp2_uart0_default: blsp2-uart0-default {
+ pins = "gpio26", "gpio27", "gpio28", "gpio29";
+ function = "blsp_uart5";
+ };
};
gcc: clock-controller@1800000 {
@@ -335,6 +440,32 @@
status = "okay";
};
+ blsp1_uart0: serial@78af000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078af000 0x200>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART0_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 1>, <&blsp1_dma 0>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_uart0_default>;
+ status = "disabled";
+ };
+
+ blsp1_uart1: serial@78b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078b0000 0x200>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 3>, <&blsp1_dma 2>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ status = "disabled";
+ };
+
blsp1_uart2: serial@78b1000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x078b1000 0x200>;
@@ -343,9 +474,237 @@
clock-names = "core", "iface";
dmas = <&blsp1_dma 5>, <&blsp1_dma 4>;
dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_uart2_default>;
status = "okay";
};
+ wifi: wifi@a000000 {
+ compatible = "qcom,wcn3990-wifi";
+ reg = <0xa000000 0x800000>;
+ reg-names = "membase";
+ memory-region = <&wlan_msa_mem>;
+ interrupts = <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ blsp1_uart3: serial@78b2000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078b2000 0x200>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp1_dma 7>, <&blsp1_dma 6>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_uart3_default>;
+ status = "disabled";
+ };
+
+ blsp1_i2c0: i2c@78b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b5000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP0_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_i2c0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_spi0: spi@78b5000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b5000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP0_SPI_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_i2c1: i2c@78b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b6000 0x600>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_i2c1_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_spi1: spi@78b6000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b6000 0x600>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_spi1_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@78b7000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b7000 0x600>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_spi2: spi@78b7000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b7000 0x600>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_spi2_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_i2c3: i2c@78b8000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b8000 0x600>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_i2c3_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_spi3: spi@78b8000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b8000 0x600>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_spi3_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_i2c4: i2c@78b9000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b9000 0x600>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_i2c4_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp1_spi4: spi@78b9000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b9000 0x600>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp1_spi4_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_dma: dma@7ac4000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x07ac4000 0x17000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,controlled-remotely = <1>;
+ qcom,ee = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart0: serial@7aef000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x07aef000 0x200>;
+ interrupts = <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART0_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp2_dma 1>, <&blsp2_dma 0>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp2_uart0_default>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@7af5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07af5000 0x600>;
+ interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP0_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi0: spi@7af5000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07af5000 0x600>;
+ interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP0_SPI_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp2_spi0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
intc: interrupt-controller@b000000 {
compatible = "qcom,msm-qgic2";
interrupt-controller;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index b3def0358177..af8c6a2445a2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -7,6 +7,7 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "sdm845.dtsi"
@@ -346,7 +347,9 @@
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&i2c10 {
@@ -358,14 +361,36 @@
status = "okay";
};
-&tlmm {
- gpio-reserved-ranges = <0 4>, <81 4>;
+&sdhc_2 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdc2_clk &sdc2_cmd &sdc2_data &sd_card_det_n>;
+
+ vmmc-supply = <&vreg_l21a_2p95>;
+ vqmmc-supply = <&vddpx_2>;
+
+ cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
};
&uart9 {
status = "okay";
};
+&ufs_mem_hc {
+ status = "okay";
+
+ vcc-supply = <&vreg_l20a_2p95>;
+ vcc-max-microamp = <600000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vdda_ufs1_core>;
+ vdda-pll-supply = <&vdda_ufs1_1p2>;
+};
+
&usb_1 {
status = "okay";
};
@@ -427,6 +452,14 @@
vdda-pll-supply = <&vdda_usb2_ss_core>;
};
+&wifi {
+ status = "okay";
+ vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+};
+
/* PINCTRL - additions to nodes defined in sdm845.dtsi */
&qup_i2c10_default {
@@ -450,3 +483,48 @@
bias-pull-up;
};
};
+
+&tlmm {
+ gpio-reserved-ranges = <0 4>, <81 4>;
+
+ sdc2_clk: sdc2-clk {
+ pinconf {
+ pins = "sdc2_clk";
+ bias-disable;
+
+ /*
+ * It seems that mmc_test reports errors if drive
+ * strength is not 16 on clk, cmd, and data pins.
+ */
+ drive-strength = <16>;
+ };
+ };
+
+ sdc2_cmd: sdc2-cmd {
+ pinconf {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <16>;
+ };
+ };
+
+ sdc2_data: sdc2-data {
+ pinconf {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <16>;
+ };
+ };
+
+ sd_card_det_n: sd-card-det-n {
+ pinmux {
+ pins = "gpio126";
+ function = "gpio";
+ };
+
+ pinconf {
+ pins = "gpio126";
+ bias-pull-up;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index c27cbd3bcb0a..5308f1671824 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -7,12 +7,17 @@
#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+#include <dt-bindings/clock/qcom,lpass-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,videocc-sdm845.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/phy/phy-qcom-qusb2.h>
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
+#include <dt-bindings/reset/qcom,sdm845-pdc.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&intc>;
@@ -88,6 +93,21 @@
reg = <0 0x86200000 0 0x2d00000>;
no-map;
};
+
+ wlan_msa_mem: memory@96700000 {
+ reg = <0 0x96700000 0 0x100000>;
+ no-map;
+ };
+
+ mpss_region: memory@8e000000 {
+ reg = <0 0x8e000000 0 0x7800000>;
+ no-map;
+ };
+
+ mba_region: memory@96500000 {
+ reg = <0 0x96500000 0 0x200000>;
+ no-map;
+ };
};
cpus {
@@ -99,6 +119,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x0>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
compatible = "cache";
@@ -114,6 +136,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x100>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_100>;
L2_100: l2-cache {
compatible = "cache";
@@ -126,6 +150,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x200>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_200>;
L2_200: l2-cache {
compatible = "cache";
@@ -138,6 +164,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x300>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_300>;
L2_300: l2-cache {
compatible = "cache";
@@ -150,6 +178,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x400>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_400>;
L2_400: l2-cache {
compatible = "cache";
@@ -162,6 +192,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x500>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_500>;
L2_500: l2-cache {
compatible = "cache";
@@ -174,6 +206,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x600>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_600>;
L2_600: l2-cache {
compatible = "cache";
@@ -186,6 +220,8 @@
compatible = "qcom,kryo385";
reg = <0x0 0x700>;
enable-method = "psci";
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_700>;
L2_700: l2-cache {
compatible = "cache";
@@ -222,6 +258,12 @@
};
};
+ firmware {
+ scm {
+ compatible = "qcom,scm-sdm845", "qcom,scm";
+ };
+ };
+
tcsr_mutex: hwlock {
compatible = "qcom,tcsr-mutex";
syscon = <&tcsr_mutex_regs 0 0x1000>;
@@ -328,14 +370,15 @@
};
soc: soc {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0 0xffffffff>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x10 0>;
+ dma-ranges = <0 0 0 0 0x10 0>;
compatible = "simple-bus";
gcc: clock-controller@100000 {
compatible = "qcom,gcc-sdm845";
- reg = <0x100000 0x1f0000>;
+ reg = <0 0x00100000 0 0x1f0000>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -343,7 +386,7 @@
qfprom@784000 {
compatible = "qcom,qfprom";
- reg = <0x784000 0x8ff>;
+ reg = <0 0x00784000 0 0x8ff>;
#address-cells = <1>;
#size-cells = <1>;
@@ -360,25 +403,25 @@
rng: rng@793000 {
compatible = "qcom,prng-ee";
- reg = <0x00793000 0x1000>;
+ reg = <0 0x00793000 0 0x1000>;
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
};
qupv3_id_0: geniqup@8c0000 {
compatible = "qcom,geni-se-qup";
- reg = <0x8c0000 0x6000>;
+ reg = <0 0x008c0000 0 0x6000>;
clock-names = "m-ahb", "s-ahb";
clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
status = "disabled";
i2c0: i2c@880000 {
compatible = "qcom,geni-i2c";
- reg = <0x880000 0x4000>;
+ reg = <0 0x00880000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
pinctrl-names = "default";
@@ -391,7 +434,7 @@
spi0: spi@880000 {
compatible = "qcom,geni-spi";
- reg = <0x880000 0x4000>;
+ reg = <0 0x00880000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
pinctrl-names = "default";
@@ -404,7 +447,7 @@
uart0: serial@880000 {
compatible = "qcom,geni-uart";
- reg = <0x880000 0x4000>;
+ reg = <0 0x00880000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
pinctrl-names = "default";
@@ -415,7 +458,7 @@
i2c1: i2c@884000 {
compatible = "qcom,geni-i2c";
- reg = <0x884000 0x4000>;
+ reg = <0 0x00884000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
pinctrl-names = "default";
@@ -428,7 +471,7 @@
spi1: spi@884000 {
compatible = "qcom,geni-spi";
- reg = <0x884000 0x4000>;
+ reg = <0 0x00884000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
pinctrl-names = "default";
@@ -441,7 +484,7 @@
uart1: serial@884000 {
compatible = "qcom,geni-uart";
- reg = <0x884000 0x4000>;
+ reg = <0 0x00884000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
pinctrl-names = "default";
@@ -452,7 +495,7 @@
i2c2: i2c@888000 {
compatible = "qcom,geni-i2c";
- reg = <0x888000 0x4000>;
+ reg = <0 0x00888000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
pinctrl-names = "default";
@@ -465,7 +508,7 @@
spi2: spi@888000 {
compatible = "qcom,geni-spi";
- reg = <0x888000 0x4000>;
+ reg = <0 0x00888000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
pinctrl-names = "default";
@@ -478,7 +521,7 @@
uart2: serial@888000 {
compatible = "qcom,geni-uart";
- reg = <0x888000 0x4000>;
+ reg = <0 0x00888000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
pinctrl-names = "default";
@@ -489,7 +532,7 @@
i2c3: i2c@88c000 {
compatible = "qcom,geni-i2c";
- reg = <0x88c000 0x4000>;
+ reg = <0 0x0088c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
pinctrl-names = "default";
@@ -502,7 +545,7 @@
spi3: spi@88c000 {
compatible = "qcom,geni-spi";
- reg = <0x88c000 0x4000>;
+ reg = <0 0x0088c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
pinctrl-names = "default";
@@ -515,7 +558,7 @@
uart3: serial@88c000 {
compatible = "qcom,geni-uart";
- reg = <0x88c000 0x4000>;
+ reg = <0 0x0088c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
pinctrl-names = "default";
@@ -526,7 +569,7 @@
i2c4: i2c@890000 {
compatible = "qcom,geni-i2c";
- reg = <0x890000 0x4000>;
+ reg = <0 0x00890000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
pinctrl-names = "default";
@@ -539,7 +582,7 @@
spi4: spi@890000 {
compatible = "qcom,geni-spi";
- reg = <0x890000 0x4000>;
+ reg = <0 0x00890000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
pinctrl-names = "default";
@@ -552,7 +595,7 @@
uart4: serial@890000 {
compatible = "qcom,geni-uart";
- reg = <0x890000 0x4000>;
+ reg = <0 0x00890000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
pinctrl-names = "default";
@@ -563,7 +606,7 @@
i2c5: i2c@894000 {
compatible = "qcom,geni-i2c";
- reg = <0x894000 0x4000>;
+ reg = <0 0x00894000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
pinctrl-names = "default";
@@ -576,7 +619,7 @@
spi5: spi@894000 {
compatible = "qcom,geni-spi";
- reg = <0x894000 0x4000>;
+ reg = <0 0x00894000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
pinctrl-names = "default";
@@ -589,7 +632,7 @@
uart5: serial@894000 {
compatible = "qcom,geni-uart";
- reg = <0x894000 0x4000>;
+ reg = <0 0x00894000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
pinctrl-names = "default";
@@ -600,7 +643,7 @@
i2c6: i2c@898000 {
compatible = "qcom,geni-i2c";
- reg = <0x898000 0x4000>;
+ reg = <0 0x00898000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
pinctrl-names = "default";
@@ -613,7 +656,7 @@
spi6: spi@898000 {
compatible = "qcom,geni-spi";
- reg = <0x898000 0x4000>;
+ reg = <0 0x00898000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
pinctrl-names = "default";
@@ -626,7 +669,7 @@
uart6: serial@898000 {
compatible = "qcom,geni-uart";
- reg = <0x898000 0x4000>;
+ reg = <0 0x00898000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
pinctrl-names = "default";
@@ -637,7 +680,7 @@
i2c7: i2c@89c000 {
compatible = "qcom,geni-i2c";
- reg = <0x89c000 0x4000>;
+ reg = <0 0x0089c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
pinctrl-names = "default";
@@ -650,7 +693,7 @@
spi7: spi@89c000 {
compatible = "qcom,geni-spi";
- reg = <0x89c000 0x4000>;
+ reg = <0 0x0089c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
pinctrl-names = "default";
@@ -663,7 +706,7 @@
uart7: serial@89c000 {
compatible = "qcom,geni-uart";
- reg = <0x89c000 0x4000>;
+ reg = <0 0x0089c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
pinctrl-names = "default";
@@ -675,18 +718,18 @@
qupv3_id_1: geniqup@ac0000 {
compatible = "qcom,geni-se-qup";
- reg = <0xac0000 0x6000>;
+ reg = <0 0x00ac0000 0 0x6000>;
clock-names = "m-ahb", "s-ahb";
clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
status = "disabled";
i2c8: i2c@a80000 {
compatible = "qcom,geni-i2c";
- reg = <0xa80000 0x4000>;
+ reg = <0 0x00a80000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
pinctrl-names = "default";
@@ -699,7 +742,7 @@
spi8: spi@a80000 {
compatible = "qcom,geni-spi";
- reg = <0xa80000 0x4000>;
+ reg = <0 0x00a80000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
pinctrl-names = "default";
@@ -712,7 +755,7 @@
uart8: serial@a80000 {
compatible = "qcom,geni-uart";
- reg = <0xa80000 0x4000>;
+ reg = <0 0x00a80000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
pinctrl-names = "default";
@@ -723,7 +766,7 @@
i2c9: i2c@a84000 {
compatible = "qcom,geni-i2c";
- reg = <0xa84000 0x4000>;
+ reg = <0 0x00a84000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
pinctrl-names = "default";
@@ -736,7 +779,7 @@
spi9: spi@a84000 {
compatible = "qcom,geni-spi";
- reg = <0xa84000 0x4000>;
+ reg = <0 0x00a84000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
pinctrl-names = "default";
@@ -749,7 +792,7 @@
uart9: serial@a84000 {
compatible = "qcom,geni-debug-uart";
- reg = <0xa84000 0x4000>;
+ reg = <0 0x00a84000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
pinctrl-names = "default";
@@ -760,7 +803,7 @@
i2c10: i2c@a88000 {
compatible = "qcom,geni-i2c";
- reg = <0xa88000 0x4000>;
+ reg = <0 0x00a88000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
pinctrl-names = "default";
@@ -773,7 +816,7 @@
spi10: spi@a88000 {
compatible = "qcom,geni-spi";
- reg = <0xa88000 0x4000>;
+ reg = <0 0x00a88000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
pinctrl-names = "default";
@@ -786,7 +829,7 @@
uart10: serial@a88000 {
compatible = "qcom,geni-uart";
- reg = <0xa88000 0x4000>;
+ reg = <0 0x00a88000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
pinctrl-names = "default";
@@ -797,7 +840,7 @@
i2c11: i2c@a8c000 {
compatible = "qcom,geni-i2c";
- reg = <0xa8c000 0x4000>;
+ reg = <0 0x00a8c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
pinctrl-names = "default";
@@ -810,7 +853,7 @@
spi11: spi@a8c000 {
compatible = "qcom,geni-spi";
- reg = <0xa8c000 0x4000>;
+ reg = <0 0x00a8c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
pinctrl-names = "default";
@@ -823,7 +866,7 @@
uart11: serial@a8c000 {
compatible = "qcom,geni-uart";
- reg = <0xa8c000 0x4000>;
+ reg = <0 0x00a8c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
pinctrl-names = "default";
@@ -834,7 +877,7 @@
i2c12: i2c@a90000 {
compatible = "qcom,geni-i2c";
- reg = <0xa90000 0x4000>;
+ reg = <0 0x00a90000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
pinctrl-names = "default";
@@ -847,7 +890,7 @@
spi12: spi@a90000 {
compatible = "qcom,geni-spi";
- reg = <0xa90000 0x4000>;
+ reg = <0 0x00a90000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
pinctrl-names = "default";
@@ -860,7 +903,7 @@
uart12: serial@a90000 {
compatible = "qcom,geni-uart";
- reg = <0xa90000 0x4000>;
+ reg = <0 0x00a90000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
pinctrl-names = "default";
@@ -871,7 +914,7 @@
i2c13: i2c@a94000 {
compatible = "qcom,geni-i2c";
- reg = <0xa94000 0x4000>;
+ reg = <0 0x00a94000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
pinctrl-names = "default";
@@ -884,7 +927,7 @@
spi13: spi@a94000 {
compatible = "qcom,geni-spi";
- reg = <0xa94000 0x4000>;
+ reg = <0 0x00a94000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
pinctrl-names = "default";
@@ -897,7 +940,7 @@
uart13: serial@a94000 {
compatible = "qcom,geni-uart";
- reg = <0xa94000 0x4000>;
+ reg = <0 0x00a94000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
pinctrl-names = "default";
@@ -908,7 +951,7 @@
i2c14: i2c@a98000 {
compatible = "qcom,geni-i2c";
- reg = <0xa98000 0x4000>;
+ reg = <0 0x00a98000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
pinctrl-names = "default";
@@ -921,7 +964,7 @@
spi14: spi@a98000 {
compatible = "qcom,geni-spi";
- reg = <0xa98000 0x4000>;
+ reg = <0 0x00a98000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
pinctrl-names = "default";
@@ -934,7 +977,7 @@
uart14: serial@a98000 {
compatible = "qcom,geni-uart";
- reg = <0xa98000 0x4000>;
+ reg = <0 0x00a98000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>;
pinctrl-names = "default";
@@ -945,7 +988,7 @@
i2c15: i2c@a9c000 {
compatible = "qcom,geni-i2c";
- reg = <0xa9c000 0x4000>;
+ reg = <0 0x00a9c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
pinctrl-names = "default";
@@ -958,7 +1001,7 @@
spi15: spi@a9c000 {
compatible = "qcom,geni-spi";
- reg = <0xa9c000 0x4000>;
+ reg = <0 0x00a9c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
pinctrl-names = "default";
@@ -971,7 +1014,7 @@
uart15: serial@a9c000 {
compatible = "qcom,geni-uart";
- reg = <0xa9c000 0x4000>;
+ reg = <0 0x00a9c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>;
pinctrl-names = "default";
@@ -981,19 +1024,121 @@
};
};
+ ufs_mem_hc: ufshc@1d84000 {
+ compatible = "qcom,sdm845-ufshc", "qcom,ufshc",
+ "jedec,ufs-2.0";
+ reg = <0 0x01d84000 0 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufs_mem_phy_lanes>;
+ phy-names = "ufsphy";
+ lanes-per-direction = <2>;
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ iommus = <&apps_smmu 0x100 0xf>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ status = "disabled";
+ };
+
+ ufs_mem_phy: phy@1d87000 {
+ compatible = "qcom,sdm845-qmp-ufs-phy";
+ reg = <0 0x01d87000 0 0x18c>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clock-names = "ref",
+ "ref_aux";
+ clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ status = "disabled";
+
+ ufs_mem_phy_lanes: lanes@1d87400 {
+ reg = <0 0x01d87400 0 0x108>,
+ <0 0x01d87600 0 0x1e0>,
+ <0 0x01d87c00 0 0x1dc>,
+ <0 0x01d87800 0 0x108>,
+ <0 0x01d87a00 0 0x1e0>;
+ #phy-cells = <0>;
+ };
+ };
+
tcsr_mutex_regs: syscon@1f40000 {
compatible = "syscon";
- reg = <0x1f40000 0x40000>;
+ reg = <0 0x01f40000 0 0x40000>;
};
tlmm: pinctrl@3400000 {
compatible = "qcom,sdm845-pinctrl";
- reg = <0x03400000 0xc00000>;
+ reg = <0 0x03400000 0 0xc00000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 150>;
+
+ qspi_clk: qspi-clk {
+ pinmux {
+ pins = "gpio95";
+ function = "qspi_clk";
+ };
+ };
+
+ qspi_cs0: qspi-cs0 {
+ pinmux {
+ pins = "gpio90";
+ function = "qspi_cs";
+ };
+ };
+
+ qspi_cs1: qspi-cs1 {
+ pinmux {
+ pins = "gpio89";
+ function = "qspi_cs";
+ };
+ };
+
+ qspi_data01: qspi-data01 {
+ pinmux-data {
+ pins = "gpio91", "gpio92";
+ function = "qspi_data";
+ };
+ };
+
+ qspi_data12: qspi-data12 {
+ pinmux-data {
+ pins = "gpio93", "gpio94";
+ function = "qspi_data";
+ };
+ };
qup_i2c0_default: qup-i2c0-default {
pinmux {
@@ -1348,9 +1493,47 @@
};
};
+ gpucc: clock-controller@5090000 {
+ compatible = "qcom,sdm845-gpucc";
+ reg = <0 0x05090000 0 0x9000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+ };
+
+ sdhc_2: sdhci@8804000 {
+ compatible = "qcom,sdm845-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0 0x08804000 0 0x1000>;
+
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>;
+ clock-names = "iface", "core";
+ iommus = <&apps_smmu 0xa0 0xf>;
+
+ status = "disabled";
+ };
+
+ qspi: spi@88df000 {
+ compatible = "qcom,sdm845-qspi", "qcom,qspi-v1";
+ reg = <0 0x088df000 0 0x600>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <&gcc GCC_QSPI_CORE_CLK>;
+ clock-names = "iface", "core";
+ status = "disabled";
+ };
+
usb_1_hsphy: phy@88e2000 {
compatible = "qcom,sdm845-qusb2-phy";
- reg = <0x88e2000 0x400>;
+ reg = <0 0x088e2000 0 0x400>;
status = "disabled";
#phy-cells = <0>;
@@ -1365,7 +1548,7 @@
usb_2_hsphy: phy@88e3000 {
compatible = "qcom,sdm845-qusb2-phy";
- reg = <0x88e3000 0x400>;
+ reg = <0 0x088e3000 0 0x400>;
status = "disabled";
#phy-cells = <0>;
@@ -1380,13 +1563,13 @@
usb_1_qmpphy: phy@88e9000 {
compatible = "qcom,sdm845-qmp-usb3-phy";
- reg = <0x88e9000 0x18c>,
- <0x88e8000 0x10>;
+ reg = <0 0x088e9000 0 0x18c>,
+ <0 0x088e8000 0 0x10>;
reg-names = "reg-base", "dp_com";
status = "disabled";
#clock-cells = <1>;
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
@@ -1399,11 +1582,13 @@
<&gcc GCC_USB3_PHY_PRIM_BCR>;
reset-names = "phy", "common";
- usb_1_ssphy: lane@88e9200 {
- reg = <0x88e9200 0x128>,
- <0x88e9400 0x200>,
- <0x88e9c00 0x218>,
- <0x88e9a00 0x100>;
+ usb_1_ssphy: lanes@88e9200 {
+ reg = <0 0x088e9200 0 0x128>,
+ <0 0x088e9400 0 0x200>,
+ <0 0x088e9c00 0 0x218>,
+ <0 0x088e9600 0 0x128>,
+ <0 0x088e9800 0 0x200>,
+ <0 0x088e9a00 0 0x100>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
clock-names = "pipe0";
@@ -1413,11 +1598,11 @@
usb_2_qmpphy: phy@88eb000 {
compatible = "qcom,sdm845-qmp-usb3-uni-phy";
- reg = <0x88eb000 0x18c>;
+ reg = <0 0x088eb000 0 0x18c>;
status = "disabled";
#clock-cells = <1>;
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
@@ -1431,10 +1616,10 @@
reset-names = "phy", "common";
usb_2_ssphy: lane@88eb200 {
- reg = <0x88eb200 0x128>,
- <0x88eb400 0x1fc>,
- <0x88eb800 0x218>,
- <0x88e9600 0x70>;
+ reg = <0 0x088eb200 0 0x128>,
+ <0 0x088eb400 0 0x1fc>,
+ <0 0x088eb800 0 0x218>,
+ <0 0x088eb600 0 0x70>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
clock-names = "pipe0";
@@ -1444,11 +1629,12 @@
usb_1: usb@a6f8800 {
compatible = "qcom,sdm845-dwc3", "qcom,dwc3";
- reg = <0xa6f8800 0x400>;
+ reg = <0 0x0a6f8800 0 0x400>;
status = "disabled";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
+ dma-ranges;
clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
@@ -1475,8 +1661,9 @@
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
- reg = <0xa600000 0xcd00>;
+ reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x740 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
@@ -1486,11 +1673,12 @@
usb_2: usb@a8f8800 {
compatible = "qcom,sdm845-dwc3", "qcom,dwc3";
- reg = <0xa8f8800 0x400>;
+ reg = <0 0x0a8f8800 0 0x400>;
status = "disabled";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
+ dma-ranges;
clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
<&gcc GCC_USB30_SEC_MASTER_CLK>,
@@ -1517,8 +1705,9 @@
usb_2_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
- reg = <0xa800000 0xcd00>;
+ reg = <0 0x0a800000 0 0xcd00>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x760 0>;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
phys = <&usb_2_hsphy>, <&usb_2_ssphy>;
@@ -1526,43 +1715,260 @@
};
};
+ videocc: clock-controller@ab00000 {
+ compatible = "qcom,sdm845-videocc";
+ reg = <0 0x0ab00000 0 0x10000>;
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ mdss: mdss@ae00000 {
+ compatible = "qcom,sdm845-mdss";
+ reg = <0 0x0ae00000 0 0x1000>;
+ reg-names = "mdss";
+
+ power-domains = <&dispcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_DISP_AHB_CLK>,
+ <&gcc GCC_DISP_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>;
+ clock-names = "iface", "bus", "core";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
+ assigned-clock-rates = <300000000>;
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_smmu 0x880 0x8>,
+ <&apps_smmu 0xc80 0x8>;
+
+ status = "disabled";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ mdss_mdp: mdp@ae01000 {
+ compatible = "qcom,sdm845-dpu";
+ reg = <0 0x0ae01000 0 0x8f000>,
+ <0 0x0aeb0000 0 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "iface", "bus", "core", "vsync";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <300000000>,
+ <19200000>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&dsi1_in>;
+ };
+ };
+ };
+ };
+
+ dsi0: dsi@ae94000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0 0x0ae94000 0 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC0_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ phys = <&dsi0_phy>;
+ phy-names = "dsi";
+
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ };
+ };
+ };
+ };
+
+ dsi0_phy: dsi-phy@ae94400 {
+ compatible = "qcom,dsi-phy-10nm";
+ reg = <0 0x0ae94400 0 0x200>,
+ <0 0x0ae94600 0 0x280>,
+ <0 0x0ae94a00 0 0x1e0>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "iface";
+
+ status = "disabled";
+ };
+
+ dsi1: dsi@ae96000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0 0x0ae96000 0 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC1_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ phys = <&dsi1_phy>;
+ phy-names = "dsi";
+
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ dsi1_phy: dsi-phy@ae96400 {
+ compatible = "qcom,dsi-phy-10nm";
+ reg = <0 0x0ae96400 0 0x200>,
+ <0 0x0ae96600 0 0x280>,
+ <0 0x0ae96a00 0 0x10e>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "iface";
+
+ status = "disabled";
+ };
+ };
+
dispcc: clock-controller@af00000 {
compatible = "qcom,sdm845-dispcc";
- reg = <0xaf00000 0x10000>;
+ reg = <0 0x0af00000 0 0x10000>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
};
+ pdc_reset: reset-controller@b2e0000 {
+ compatible = "qcom,sdm845-pdc-global";
+ reg = <0 0x0b2e0000 0 0x20000>;
+ #reset-cells = <1>;
+ };
+
tsens0: thermal-sensor@c263000 {
compatible = "qcom,sdm845-tsens", "qcom,tsens-v2";
- reg = <0xc263000 0x1ff>, /* TM */
- <0xc222000 0x1ff>; /* SROT */
+ reg = <0 0x0c263000 0 0x1ff>, /* TM */
+ <0 0x0c222000 0 0x1ff>; /* SROT */
#qcom,sensors = <13>;
#thermal-sensor-cells = <1>;
};
tsens1: thermal-sensor@c265000 {
compatible = "qcom,sdm845-tsens", "qcom,tsens-v2";
- reg = <0xc265000 0x1ff>, /* TM */
- <0xc223000 0x1ff>; /* SROT */
+ reg = <0 0x0c265000 0 0x1ff>, /* TM */
+ <0 0x0c223000 0 0x1ff>; /* SROT */
#qcom,sensors = <8>;
#thermal-sensor-cells = <1>;
};
aoss_reset: reset-controller@c2a0000 {
compatible = "qcom,sdm845-aoss-cc";
- reg = <0xc2a0000 0x31000>;
+ reg = <0 0x0c2a0000 0 0x31000>;
#reset-cells = <1>;
};
spmi_bus: spmi@c440000 {
compatible = "qcom,spmi-pmic-arb";
- reg = <0xc440000 0x1100>,
- <0xc600000 0x2000000>,
- <0xe600000 0x100000>,
- <0xe700000 0xa0000>,
- <0xc40a000 0x26000>;
+ reg = <0 0x0c440000 0 0x1100>,
+ <0 0x0c600000 0 0x2000000>,
+ <0 0x0e600000 0 0x100000>,
+ <0 0x0e700000 0 0xa0000>,
+ <0 0x0c40a000 0 0x26000>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupt-names = "periph_irq";
interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
@@ -1575,18 +1981,98 @@
cell-index = <0>;
};
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,sdm845-smmu-500", "arm,mmu-500";
+ reg = <0 0x15000000 0 0x80000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ lpasscc: clock-controller@17014000 {
+ compatible = "qcom,sdm845-lpasscc";
+ reg = <0 0x17014000 0 0x1f004>, <0 0x17300000 0 0x200>;
+ reg-names = "cc", "qdsp6ss";
+ #clock-cells = <1>;
+ status = "disabled";
+ };
+
apss_shared: mailbox@17990000 {
compatible = "qcom,sdm845-apss-shared";
- reg = <0x17990000 0x1000>;
+ reg = <0 0x17990000 0 0x1000>;
#mbox-cells = <1>;
};
apps_rsc: rsc@179c0000 {
label = "apps_rsc";
compatible = "qcom,rpmh-rsc";
- reg = <0x179c0000 0x10000>,
- <0x179d0000 0x10000>,
- <0x179e0000 0x10000>;
+ reg = <0 0x179c0000 0 0x10000>,
+ <0 0x179d0000 0 0x10000>,
+ <0 0x179e0000 0 0x10000>;
reg-names = "drv-0", "drv-1", "drv-2";
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
@@ -1602,85 +2088,175 @@
compatible = "qcom,sdm845-rpmh-clk";
#clock-cells = <1>;
};
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sdm845-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp1 {
+ opp-level = <16>;
+ };
+
+ rpmhpd_opp_min_svs: opp2 {
+ opp-level = <48>;
+ };
+
+ rpmhpd_opp_low_svs: opp3 {
+ opp-level = <64>;
+ };
+
+ rpmhpd_opp_svs: opp4 {
+ opp-level = <128>;
+ };
+
+ rpmhpd_opp_svs_l1: opp5 {
+ opp-level = <192>;
+ };
+
+ rpmhpd_opp_nom: opp6 {
+ opp-level = <256>;
+ };
+
+ rpmhpd_opp_nom_l1: opp7 {
+ opp-level = <320>;
+ };
+
+ rpmhpd_opp_nom_l2: opp8 {
+ opp-level = <336>;
+ };
+
+ rpmhpd_opp_turbo: opp9 {
+ opp-level = <384>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp10 {
+ opp-level = <416>;
+ };
+ };
+ };
+
+ rsc_hlos: interconnect {
+ compatible = "qcom,sdm845-rsc-hlos";
+ #interconnect-cells = <1>;
+ };
};
intc: interrupt-controller@17a00000 {
compatible = "arm,gic-v3";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x17a00000 0x10000>, /* GICD */
- <0x17a60000 0x100000>; /* GICR * 8 */
+ reg = <0 0x17a00000 0 0x10000>, /* GICD */
+ <0 0x17a60000 0 0x100000>; /* GICR * 8 */
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
gic-its@17a40000 {
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
- reg = <0x17a40000 0x20000>;
+ reg = <0 0x17a40000 0 0x20000>;
status = "disabled";
};
};
timer@17c90000 {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
compatible = "arm,armv7-timer-mem";
- reg = <0x17c90000 0x1000>;
+ reg = <0 0x17c90000 0 0x1000>;
frame@17ca0000 {
frame-number = <0>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x17ca0000 0x1000>,
- <0x17cb0000 0x1000>;
+ reg = <0 0x17ca0000 0 0x1000>,
+ <0 0x17cb0000 0 0x1000>;
};
frame@17cc0000 {
frame-number = <1>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x17cc0000 0x1000>;
+ reg = <0 0x17cc0000 0 0x1000>;
status = "disabled";
};
frame@17cd0000 {
frame-number = <2>;
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x17cd0000 0x1000>;
+ reg = <0 0x17cd0000 0 0x1000>;
status = "disabled";
};
frame@17ce0000 {
frame-number = <3>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x17ce0000 0x1000>;
+ reg = <0 0x17ce0000 0 0x1000>;
status = "disabled";
};
frame@17cf0000 {
frame-number = <4>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x17cf0000 0x1000>;
+ reg = <0 0x17cf0000 0 0x1000>;
status = "disabled";
};
frame@17d00000 {
frame-number = <5>;
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x17d00000 0x1000>;
+ reg = <0 0x17d00000 0 0x1000>;
status = "disabled";
};
frame@17d10000 {
frame-number = <6>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x17d10000 0x1000>;
+ reg = <0 0x17d10000 0 0x1000>;
status = "disabled";
};
};
+
+ cpufreq_hw: cpufreq@17d43000 {
+ compatible = "qcom,cpufreq-hw";
+ reg = <0 0x17d43000 0 0x1400>, <0 0x17d45800 0 0x1400>;
+ reg-names = "freq-domain0", "freq-domain1";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #freq-domain-cells = <1>;
+ };
+
+ wifi: wifi@18800000 {
+ compatible = "qcom,wcn3990-wifi";
+ status = "disabled";
+ reg = <0 0x18800000 0 0x800000>;
+ reg-names = "membase";
+ memory-region = <&wlan_msa_mem>;
+ clock-names = "cxo_ref_clk_pin";
+ clocks = <&rpmhcc RPMH_RF_CLK2>;
+ interrupts =
+ <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x0040 0x1>;
+ };
};
thermal-zones {
@@ -1691,18 +2267,41 @@
thermal-sensors = <&tsens0 1>;
trips {
- cpu_alert0: trip0 {
- temperature = <75000>;
+ cpu0_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu0_alert1: trip-point@1 {
+ temperature = <95000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit0: trip1 {
+ cpu0_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu0_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu0_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu1-thermal {
@@ -1712,18 +2311,41 @@
thermal-sensors = <&tsens0 2>;
trips {
- cpu_alert1: trip0 {
- temperature = <75000>;
+ cpu1_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu1_alert1: trip-point@1 {
+ temperature = <95000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit1: trip1 {
+ cpu1_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu1_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu1_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu2-thermal {
@@ -1733,18 +2355,41 @@
thermal-sensors = <&tsens0 3>;
trips {
- cpu_alert2: trip0 {
- temperature = <75000>;
+ cpu2_alert0: trip-point@0 {
+ temperature = <90000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit2: trip1 {
+ cpu2_alert1: trip-point@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu2_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu2_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu2_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu3-thermal {
@@ -1754,18 +2399,41 @@
thermal-sensors = <&tsens0 4>;
trips {
- cpu_alert3: trip0 {
- temperature = <75000>;
+ cpu3_alert0: trip-point@0 {
+ temperature = <90000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit3: trip1 {
+ cpu3_alert1: trip-point@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu3_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu3_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu3_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu4-thermal {
@@ -1775,18 +2443,41 @@
thermal-sensors = <&tsens0 7>;
trips {
- cpu_alert4: trip0 {
- temperature = <75000>;
+ cpu4_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu4_alert1: trip-point@1 {
+ temperature = <95000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit4: trip1 {
+ cpu4_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu4_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu4_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu5-thermal {
@@ -1796,18 +2487,41 @@
thermal-sensors = <&tsens0 8>;
trips {
- cpu_alert5: trip0 {
- temperature = <75000>;
+ cpu5_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu5_alert1: trip-point@1 {
+ temperature = <95000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit5: trip1 {
+ cpu5_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu5_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu5_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu6-thermal {
@@ -1817,18 +2531,41 @@
thermal-sensors = <&tsens0 9>;
trips {
- cpu_alert6: trip0 {
- temperature = <75000>;
+ cpu6_alert0: trip-point@0 {
+ temperature = <90000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit6: trip1 {
+ cpu6_alert1: trip-point@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu6_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu6_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu6_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu7-thermal {
@@ -1838,18 +2575,41 @@
thermal-sensors = <&tsens0 10>;
trips {
- cpu_alert7: trip0 {
- temperature = <75000>;
+ cpu7_alert0: trip-point@0 {
+ temperature = <90000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit7: trip1 {
+ cpu7_alert1: trip-point@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu7_crit: cpu_crit {
temperature = <110000>;
hysteresis = <1000>;
type = "critical";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu7_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu7_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/realtek/rtd1295.dtsi b/arch/arm64/boot/dts/realtek/rtd1295.dtsi
index 8d9ac05d17dc..41d7858da826 100644
--- a/arch/arm64/boot/dts/realtek/rtd1295.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd1295.dtsi
@@ -17,28 +17,28 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
next-level-cache = <&l2>;
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
next-level-cache = <&l2>;
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
next-level-cache = <&l2>;
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
next-level-cache = <&l2>;
};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index a8ce6594342d..6cde526547e4 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-cat874.dtb r8a774c0-ek874.dtb
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-h3ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-xs.dtb
diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi
new file mode 100644
index 000000000000..14db66755a89
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/cat875.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Silicon Linux sub board for CAT874 (CAT875)
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+/ {
+ model = "Silicon Linux sub board for CAT874 (CAT875)";
+
+ aliases {
+ ethernet0 = &avb;
+ };
+};
+
+&avb {
+ pinctrl-0 = <&avb_pins>;
+ pinctrl-names = "default";
+ renesas,no-ether-link;
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii";
+ status = "okay";
+
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pciec0 {
+ status = "okay";
+};
+
+&pfc {
+ avb_pins: avb {
+ mux {
+ groups = "avb_mii";
+ function = "avb";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index 20745a8528c5..ef3cff2dd1b6 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -61,7 +61,7 @@
#size-cells = <0>;
a57_0: cpu@0 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0>;
device_type = "cpu";
power-domains = <&sysc R8A774A1_PD_CA57_CPU0>;
@@ -71,7 +71,7 @@
};
a57_1: cpu@1 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x1>;
device_type = "cpu";
power-domains = <&sysc R8A774A1_PD_CA57_CPU1>;
@@ -81,7 +81,7 @@
};
a53_0: cpu@100 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x100>;
device_type = "cpu";
power-domains = <&sysc R8A774A1_PD_CA53_CPU0>;
@@ -91,7 +91,7 @@
};
a53_1: cpu@101 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x101>;
device_type = "cpu";
power-domains = <&sysc R8A774A1_PD_CA53_CPU1>;
@@ -101,7 +101,7 @@
};
a53_2: cpu@102 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x102>;
device_type = "cpu";
power-domains = <&sysc R8A774A1_PD_CA53_CPU2>;
@@ -111,7 +111,7 @@
};
a53_3: cpu@103 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x103>;
device_type = "cpu";
power-domains = <&sysc R8A774A1_PD_CA53_CPU3>;
@@ -599,7 +599,7 @@
hsusb: usb@e6590000 {
compatible = "renesas,usbhs-r8a774a1",
"renesas,rcar-gen3-usbhs";
- reg = <0 0xe6590000 0 0x100>;
+ reg = <0 0xe6590000 0 0x200>;
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 704>;
dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
@@ -1011,6 +1011,9 @@
<&cpg CPG_CORE R8A774A1_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
new file mode 100644
index 000000000000..96ee0d2c6357
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Silicon Linux RZ/G2E 96board platform (CAT874)
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a774c0.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Silicon Linux RZ/G2E 96board platform (CAT874)";
+ compatible = "si-linux,cat874", "renesas,r8a774c0";
+
+ aliases {
+ serial0 = &scif2;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0x78000000>;
+ };
+
+ vcc_sdhi0: regulator-vcc-sdhi0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "SDHI0 Vcc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vccq_sdhi0: regulator-vccq-sdhi0 {
+ compatible = "regulator-gpio";
+
+ regulator-name = "SDHI0 VccQ";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 1
+ 1800000 0>;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <48000000>;
+};
+
+&pcie_bus_clk {
+ clock-frequency = <100000000>;
+};
+
+&pciec0 {
+ /* Map all possible DDR as inbound ranges */
+ dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
+};
+
+&pfc {
+ scif2_pins: scif2 {
+ groups = "scif2_data_a";
+ function = "scif2";
+ };
+
+ sdhi0_pins: sd0 {
+ groups = "sdhi0_data4", "sdhi0_ctrl";
+ function = "sdhi0";
+ power-source = <3300>;
+ };
+
+ sdhi0_pins_uhs: sd0_uhs {
+ groups = "sdhi0_data4", "sdhi0_ctrl";
+ function = "sdhi0";
+ power-source = <1800>;
+ };
+};
+
+&scif2 {
+ pinctrl-0 = <&scif2_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-1 = <&sdhi0_pins_uhs>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&vcc_sdhi0>;
+ vqmmc-supply = <&vccq_sdhi0>;
+ cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874.dts
new file mode 100644
index 000000000000..e7b6619ab224
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Silicon Linux RZ/G2E evaluation kit (EK874)
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include "r8a774c0-cat874.dts"
+#include "cat875.dtsi"
+
+/ {
+ model = "Silicon Linux RZ/G2E evaluation kit EK874 (CAT874 + CAT875)";
+ compatible = "si-linux,cat875", "si-linux,cat874", "renesas,r8a774c0";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
new file mode 100644
index 000000000000..61a0afb74e63
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -0,0 +1,1911 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the RZ/G2E (R8A774C0) SoC
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/r8a774c0-sysc.h>
+
+/ {
+ compatible = "renesas,r8a774c0";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ /*
+ * The external audio clocks are configured as 0 Hz fixed frequency
+ * clocks by default.
+ * Boards that provide audio clocks should override them.
+ */
+ audio_clk_a: audio_clk_a {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ audio_clk_b: audio_clk_b {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ audio_clk_c: audio_clk_c {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ /* External CAN clock - to be overridden by boards that provide it */
+ can_clk: can {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ cluster1_opp: opp_table10 {
+ compatible = "operating-points-v2";
+ opp-shared;
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <820000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <820000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <820000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ a53_0: cpu@0 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A774C0_PD_CA53_CPU0>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ clocks =<&cpg CPG_CORE R8A774C0_CLK_Z2>;
+ operating-points-v2 = <&cluster1_opp>;
+ };
+
+ a53_1: cpu@1 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <1>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A774C0_PD_CA53_CPU1>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ clocks =<&cpg CPG_CORE R8A774C0_CLK_Z2>;
+ operating-points-v2 = <&cluster1_opp>;
+ };
+
+ L2_CA53: cache-controller-0 {
+ compatible = "cache";
+ power-domains = <&sysc R8A774C0_PD_CA53_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
+ };
+
+ extal_clk: extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ /* External PCIe clock - can be overridden by the board */
+ pcie_bus_clk: pcie_bus {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ pmu_a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts-extended = <&gic GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&a53_0>, <&a53_1>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ /* External SCIF clock - to be overridden by boards that provide it */
+ scif_clk: scif {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ rwdt: watchdog@e6020000 {
+ compatible = "renesas,r8a774c0-wdt",
+ "renesas,rcar-gen3-wdt";
+ reg = <0 0xe6020000 0 0x0c>;
+ clocks = <&cpg CPG_MOD 402>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 402>;
+ status = "disabled";
+ };
+
+ gpio0: gpio@e6050000 {
+ compatible = "renesas,gpio-r8a774c0",
+ "renesas,rcar-gen3-gpio";
+ reg = <0 0xe6050000 0 0x50>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 0 18>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 912>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 912>;
+ };
+
+ gpio1: gpio@e6051000 {
+ compatible = "renesas,gpio-r8a774c0",
+ "renesas,rcar-gen3-gpio";
+ reg = <0 0xe6051000 0 0x50>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 32 23>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 911>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 911>;
+ };
+
+ gpio2: gpio@e6052000 {
+ compatible = "renesas,gpio-r8a774c0",
+ "renesas,rcar-gen3-gpio";
+ reg = <0 0xe6052000 0 0x50>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 64 26>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 910>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 910>;
+ };
+
+ gpio3: gpio@e6053000 {
+ compatible = "renesas,gpio-r8a774c0",
+ "renesas,rcar-gen3-gpio";
+ reg = <0 0xe6053000 0 0x50>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 96 16>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 909>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 909>;
+ };
+
+ gpio4: gpio@e6054000 {
+ compatible = "renesas,gpio-r8a774c0",
+ "renesas,rcar-gen3-gpio";
+ reg = <0 0xe6054000 0 0x50>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 128 11>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 908>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 908>;
+ };
+
+ gpio5: gpio@e6055000 {
+ compatible = "renesas,gpio-r8a774c0",
+ "renesas,rcar-gen3-gpio";
+ reg = <0 0xe6055000 0 0x50>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 160 20>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 907>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 907>;
+ };
+
+ gpio6: gpio@e6055400 {
+ compatible = "renesas,gpio-r8a774c0",
+ "renesas,rcar-gen3-gpio";
+ reg = <0 0xe6055400 0 0x50>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 192 18>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 906>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 906>;
+ };
+
+ pfc: pin-controller@e6060000 {
+ compatible = "renesas,pfc-r8a774c0";
+ reg = <0 0xe6060000 0 0x508>;
+ };
+
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a774c0-cmt0",
+ "renesas,rcar-gen3-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 303>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 303>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a774c0-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 302>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 302>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a774c0-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 301>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 301>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a774c0-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 300>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 300>;
+ status = "disabled";
+ };
+
+ cpg: clock-controller@e6150000 {
+ compatible = "renesas,r8a774c0-cpg-mssr";
+ reg = <0 0xe6150000 0 0x1000>;
+ clocks = <&extal_clk>;
+ clock-names = "extal";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ #reset-cells = <1>;
+ };
+
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a774c0-rst";
+ reg = <0 0xe6160000 0 0x0200>;
+ };
+
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,r8a774c0-sysc";
+ reg = <0 0xe6180000 0 0x0400>;
+ #power-domain-cells = <1>;
+ };
+
+ thermal: thermal@e6190000 {
+ compatible = "renesas,thermal-r8a774c0";
+ reg = <0 0xe6190000 0 0x10>, <0 0xe6190100 0 0x38>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ intc_ex: interrupt-controller@e61c0000 {
+ compatible = "renesas,intc-ex-r8a774c0", "renesas,irqc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0 0xe61c0000 0 0x200>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 407>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 407>;
+ };
+
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a774c0", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 125>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 125>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a774c0", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a774c0", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 123>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 123>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a774c0", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 122>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 122>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a774c0", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 121>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 121>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@e6500000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe6500000 0 0x40>;
+ interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 931>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 931>;
+ dmas = <&dmac1 0x91>, <&dmac1 0x90>,
+ <&dmac2 0x91>, <&dmac2 0x90>;
+ dma-names = "tx", "rx", "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@e6508000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe6508000 0 0x40>;
+ interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 930>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 930>;
+ dmas = <&dmac1 0x93>, <&dmac1 0x92>,
+ <&dmac2 0x93>, <&dmac2 0x92>;
+ dma-names = "tx", "rx", "tx", "rx";
+ i2c-scl-internal-delay-ns = <6>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@e6510000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe6510000 0 0x40>;
+ interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 929>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 929>;
+ dmas = <&dmac1 0x95>, <&dmac1 0x94>,
+ <&dmac2 0x95>, <&dmac2 0x94>;
+ dma-names = "tx", "rx", "tx", "rx";
+ i2c-scl-internal-delay-ns = <6>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@e66d0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66d0000 0 0x40>;
+ interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 928>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 928>;
+ dmas = <&dmac0 0x97>, <&dmac0 0x96>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <110>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@e66d8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66d8000 0 0x40>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 927>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 927>;
+ dmas = <&dmac0 0x99>, <&dmac0 0x98>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <6>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@e66e0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66e0000 0 0x40>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 919>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 919>;
+ dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <6>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@e66e8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe66e8000 0 0x40>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 918>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 918>;
+ dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
+ dma-names = "tx", "rx";
+ i2c-scl-internal-delay-ns = <6>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@e6690000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,i2c-r8a774c0",
+ "renesas,rcar-gen3-i2c";
+ reg = <0 0xe6690000 0 0x40>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1003>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 1003>;
+ i2c-scl-internal-delay-ns = <6>;
+ status = "disabled";
+ };
+
+ i2c_dvfs: i2c@e60b0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,iic-r8a774c0";
+ reg = <0 0xe60b0000 0 0x15>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 926>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 926>;
+ dmas = <&dmac0 0x11>, <&dmac0 0x10>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ hscif0: serial@e6540000 {
+ compatible = "renesas,hscif-r8a774c0",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6540000 0 0x60>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 520>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x31>, <&dmac1 0x30>,
+ <&dmac2 0x31>, <&dmac2 0x30>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 520>;
+ status = "disabled";
+ };
+
+ hscif1: serial@e6550000 {
+ compatible = "renesas,hscif-r8a774c0",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6550000 0 0x60>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 519>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x33>, <&dmac1 0x32>,
+ <&dmac2 0x33>, <&dmac2 0x32>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 519>;
+ status = "disabled";
+ };
+
+ hscif2: serial@e6560000 {
+ compatible = "renesas,hscif-r8a774c0",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6560000 0 0x60>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 518>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x35>, <&dmac1 0x34>,
+ <&dmac2 0x35>, <&dmac2 0x34>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 518>;
+ status = "disabled";
+ };
+
+ hscif3: serial@e66a0000 {
+ compatible = "renesas,hscif-r8a774c0",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe66a0000 0 0x60>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 517>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x37>, <&dmac0 0x36>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 517>;
+ status = "disabled";
+ };
+
+ hscif4: serial@e66b0000 {
+ compatible = "renesas,hscif-r8a774c0",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe66b0000 0 0x60>;
+ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 516>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x39>, <&dmac0 0x38>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 516>;
+ status = "disabled";
+ };
+
+ hsusb: usb@e6590000 {
+ compatible = "renesas,usbhs-r8a774c0",
+ "renesas,rcar-gen3-usbhs";
+ reg = <0 0xe6590000 0 0x200>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>, <&cpg CPG_MOD 703>;
+ dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
+ <&usb_dmac1 0>, <&usb_dmac1 1>;
+ dma-names = "ch0", "ch1", "ch2", "ch3";
+ renesas,buswait = <11>;
+ phys = <&usb2_phy0>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 704>, <&cpg 703>;
+ status = "disabled";
+ };
+
+ usb_dmac0: dma-controller@e65a0000 {
+ compatible = "renesas,r8a774c0-usb-dmac",
+ "renesas,usb-dmac";
+ reg = <0 0xe65a0000 0 0x100>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1";
+ clocks = <&cpg CPG_MOD 330>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 330>;
+ #dma-cells = <1>;
+ dma-channels = <2>;
+ };
+
+ usb_dmac1: dma-controller@e65b0000 {
+ compatible = "renesas,r8a774c0-usb-dmac",
+ "renesas,usb-dmac";
+ reg = <0 0xe65b0000 0 0x100>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1";
+ clocks = <&cpg CPG_MOD 331>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 331>;
+ #dma-cells = <1>;
+ dma-channels = <2>;
+ };
+
+ dmac0: dma-controller@e6700000 {
+ compatible = "renesas,dmac-r8a774c0",
+ "renesas,rcar-dmac";
+ reg = <0 0xe6700000 0 0x10000>;
+ interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15";
+ clocks = <&cpg CPG_MOD 219>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 219>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>,
+ <&ipmmu_ds0 2>, <&ipmmu_ds0 3>,
+ <&ipmmu_ds0 4>, <&ipmmu_ds0 5>,
+ <&ipmmu_ds0 6>, <&ipmmu_ds0 7>,
+ <&ipmmu_ds0 8>, <&ipmmu_ds0 9>,
+ <&ipmmu_ds0 10>, <&ipmmu_ds0 11>,
+ <&ipmmu_ds0 12>, <&ipmmu_ds0 13>,
+ <&ipmmu_ds0 14>, <&ipmmu_ds0 15>;
+ };
+
+ dmac1: dma-controller@e7300000 {
+ compatible = "renesas,dmac-r8a774c0",
+ "renesas,rcar-dmac";
+ reg = <0 0xe7300000 0 0x10000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15";
+ clocks = <&cpg CPG_MOD 218>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 218>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ iommus = <&ipmmu_ds1 0>, <&ipmmu_ds1 1>,
+ <&ipmmu_ds1 2>, <&ipmmu_ds1 3>,
+ <&ipmmu_ds1 4>, <&ipmmu_ds1 5>,
+ <&ipmmu_ds1 6>, <&ipmmu_ds1 7>,
+ <&ipmmu_ds1 8>, <&ipmmu_ds1 9>,
+ <&ipmmu_ds1 10>, <&ipmmu_ds1 11>,
+ <&ipmmu_ds1 12>, <&ipmmu_ds1 13>,
+ <&ipmmu_ds1 14>, <&ipmmu_ds1 15>;
+ };
+
+ dmac2: dma-controller@e7310000 {
+ compatible = "renesas,dmac-r8a774c0",
+ "renesas,rcar-dmac";
+ reg = <0 0xe7310000 0 0x10000>;
+ interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15";
+ clocks = <&cpg CPG_MOD 217>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 217>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ iommus = <&ipmmu_ds1 16>, <&ipmmu_ds1 17>,
+ <&ipmmu_ds1 18>, <&ipmmu_ds1 19>,
+ <&ipmmu_ds1 20>, <&ipmmu_ds1 21>,
+ <&ipmmu_ds1 22>, <&ipmmu_ds1 23>,
+ <&ipmmu_ds1 24>, <&ipmmu_ds1 25>,
+ <&ipmmu_ds1 26>, <&ipmmu_ds1 27>,
+ <&ipmmu_ds1 28>, <&ipmmu_ds1 29>,
+ <&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
+ };
+
+ ipmmu_ds0: mmu@e6740000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xe6740000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 0>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_ds1: mmu@e7740000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xe7740000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 1>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_hc: mmu@e6570000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xe6570000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 2>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_mm: mmu@e67b0000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xe67b0000 0 0x1000>;
+ interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_mp: mmu@ec670000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xec670000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 4>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_pv0: mmu@fd800000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xfd800000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 6>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_vc0: mmu@fe6b0000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xfe6b0000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 12>;
+ power-domains = <&sysc R8A774C0_PD_A3VC>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_vi0: mmu@febd0000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xfebd0000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 14>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ ipmmu_vp0: mmu@fe990000 {
+ compatible = "renesas,ipmmu-r8a774c0";
+ reg = <0 0xfe990000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 16>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+ avb: ethernet@e6800000 {
+ compatible = "renesas,etheravb-r8a774c0",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0 0xe6800000 0 0x800>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15",
+ "ch16", "ch17", "ch18", "ch19",
+ "ch20", "ch21", "ch22", "ch23",
+ "ch24";
+ clocks = <&cpg CPG_MOD 812>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 812>;
+ phy-mode = "rgmii";
+ iommus = <&ipmmu_ds0 16>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ can0: can@e6c30000 {
+ compatible = "renesas,can-r8a774c0",
+ "renesas,rcar-gen3-can";
+ reg = <0 0xe6c30000 0 0x1000>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 916>, <&can_clk>;
+ clock-names = "clkp1", "can_clk";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
+ status = "disabled";
+ };
+
+ can1: can@e6c38000 {
+ compatible = "renesas,can-r8a774c0",
+ "renesas,rcar-gen3-can";
+ reg = <0 0xe6c38000 0 0x1000>;
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 915>, <&can_clk>;
+ clock-names = "clkp1", "can_clk";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 915>;
+ status = "disabled";
+ };
+
+ pwm0: pwm@e6e30000 {
+ compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
+ reg = <0 0xe6e30000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@e6e31000 {
+ compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
+ reg = <0 0xe6e31000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@e6e32000 {
+ compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
+ reg = <0 0xe6e32000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@e6e33000 {
+ compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
+ reg = <0 0xe6e33000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@e6e34000 {
+ compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
+ reg = <0 0xe6e34000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@e6e35000 {
+ compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
+ reg = <0 0xe6e35000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@e6e36000 {
+ compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
+ reg = <0 0xe6e36000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ scif0: serial@e6e60000 {
+ compatible = "renesas,scif-r8a774c0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e60000 0 64>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 207>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x51>, <&dmac1 0x50>,
+ <&dmac2 0x51>, <&dmac2 0x50>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 207>;
+ status = "disabled";
+ };
+
+ scif1: serial@e6e68000 {
+ compatible = "renesas,scif-r8a774c0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e68000 0 64>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 206>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x53>, <&dmac1 0x52>,
+ <&dmac2 0x53>, <&dmac2 0x52>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 206>;
+ status = "disabled";
+ };
+
+ scif2: serial@e6e88000 {
+ compatible = "renesas,scif-r8a774c0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e88000 0 64>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 310>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 310>;
+ status = "disabled";
+ };
+
+ scif3: serial@e6c50000 {
+ compatible = "renesas,scif-r8a774c0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c50000 0 64>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 204>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x57>, <&dmac0 0x56>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
+ status = "disabled";
+ };
+
+ scif4: serial@e6c40000 {
+ compatible = "renesas,scif-r8a774c0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c40000 0 64>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 203>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x59>, <&dmac0 0x58>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 203>;
+ status = "disabled";
+ };
+
+ scif5: serial@e6f30000 {
+ compatible = "renesas,scif-r8a774c0",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6f30000 0 64>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 202>,
+ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
+ <&dmac2 0x5b>, <&dmac2 0x5a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 202>;
+ status = "disabled";
+ };
+
+ msiof0: spi@e6e90000 {
+ compatible = "renesas,msiof-r8a774c0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6e90000 0 0x0064>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 211>;
+ dmas = <&dmac1 0x41>, <&dmac1 0x40>,
+ <&dmac2 0x41>, <&dmac2 0x40>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 211>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof1: spi@e6ea0000 {
+ compatible = "renesas,msiof-r8a774c0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6ea0000 0 0x0064>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 210>;
+ dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+ <&dmac2 0x43>, <&dmac2 0x42>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 210>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof2: spi@e6c00000 {
+ compatible = "renesas,msiof-r8a774c0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6c00000 0 0x0064>;
+ interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 209>;
+ dmas = <&dmac0 0x45>, <&dmac0 0x44>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 209>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ msiof3: spi@e6c10000 {
+ compatible = "renesas,msiof-r8a774c0",
+ "renesas,rcar-gen3-msiof";
+ reg = <0 0xe6c10000 0 0x0064>;
+ interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 208>;
+ dmas = <&dmac0 0x47>, <&dmac0 0x46>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 208>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ vin4: video@e6ef4000 {
+ compatible = "renesas,vin-r8a774c0";
+ reg = <0 0xe6ef4000 0 0x1000>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 807>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 807>;
+ renesas,id = <4>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin4csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint= <&csi40vin4>;
+ };
+ };
+ };
+ };
+
+ vin5: video@e6ef5000 {
+ compatible = "renesas,vin-r8a774c0";
+ reg = <0 0xe6ef5000 0 0x1000>;
+ interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 806>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 806>;
+ renesas,id = <5>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin5csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint= <&csi40vin5>;
+ };
+ };
+ };
+ };
+
+ rcar_sound: sound@ec500000 {
+ /*
+ * #sound-dai-cells is required
+ *
+ * Single DAI : #sound-dai-cells = <0>; <&rcar_sound>;
+ * Multi DAI : #sound-dai-cells = <1>; <&rcar_sound N>;
+ */
+ /*
+ * #clock-cells is required for audio_clkout0/1/2/3
+ *
+ * clkout : #clock-cells = <0>; <&rcar_sound>;
+ * clkout0/1/2/3: #clock-cells = <1>; <&rcar_sound N>;
+ */
+ compatible = "renesas,rcar_sound-r8a774c0",
+ "renesas,rcar_sound-gen3";
+ reg = <0 0xec500000 0 0x1000>, /* SCU */
+ <0 0xec5a0000 0 0x100>, /* ADG */
+ <0 0xec540000 0 0x1000>, /* SSIU */
+ <0 0xec541000 0 0x280>, /* SSI */
+ <0 0xec760000 0 0x200>; /* Audio DMAC peri peri*/
+ reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+ clocks = <&cpg CPG_MOD 1005>,
+ <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+ <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+ <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+ <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+ <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+ <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+ <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+ <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+ <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+ <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+ <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+ <&audio_clk_a>, <&audio_clk_b>,
+ <&audio_clk_c>,
+ <&cpg CPG_CORE R8A774C0_CLK_ZA2>;
+ clock-names = "ssi-all",
+ "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+ "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+ "ssi.1", "ssi.0",
+ "src.9", "src.8", "src.7", "src.6",
+ "src.5", "src.4", "src.3", "src.2",
+ "src.1", "src.0",
+ "mix.1", "mix.0",
+ "ctu.1", "ctu.0",
+ "dvc.0", "dvc.1",
+ "clk_a", "clk_b", "clk_c", "clk_i";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 1005>,
+ <&cpg 1006>, <&cpg 1007>,
+ <&cpg 1008>, <&cpg 1009>,
+ <&cpg 1010>, <&cpg 1011>,
+ <&cpg 1012>, <&cpg 1013>,
+ <&cpg 1014>, <&cpg 1015>;
+ reset-names = "ssi-all",
+ "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+ "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+ "ssi.1", "ssi.0";
+ status = "disabled";
+
+ rcar_sound,dvc {
+ dvc0: dvc-0 {
+ dmas = <&audma0 0xbc>;
+ dma-names = "tx";
+ };
+ dvc1: dvc-1 {
+ dmas = <&audma0 0xbe>;
+ dma-names = "tx";
+ };
+ };
+
+ rcar_sound,mix {
+ mix0: mix-0 { };
+ mix1: mix-1 { };
+ };
+
+ rcar_sound,ctu {
+ ctu00: ctu-0 { };
+ ctu01: ctu-1 { };
+ ctu02: ctu-2 { };
+ ctu03: ctu-3 { };
+ ctu10: ctu-4 { };
+ ctu11: ctu-5 { };
+ ctu12: ctu-6 { };
+ ctu13: ctu-7 { };
+ };
+
+ rcar_sound,src {
+ src0: src-0 {
+ interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x85>, <&audma0 0x9a>;
+ dma-names = "rx", "tx";
+ };
+ src1: src-1 {
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x87>, <&audma0 0x9c>;
+ dma-names = "rx", "tx";
+ };
+ src2: src-2 {
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x89>, <&audma0 0x9e>;
+ dma-names = "rx", "tx";
+ };
+ src3: src-3 {
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8b>, <&audma0 0xa0>;
+ dma-names = "rx", "tx";
+ };
+ src4: src-4 {
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8d>, <&audma0 0xb0>;
+ dma-names = "rx", "tx";
+ };
+ src5: src-5 {
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x8f>, <&audma0 0xb2>;
+ dma-names = "rx", "tx";
+ };
+ src6: src-6 {
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x91>, <&audma0 0xb4>;
+ dma-names = "rx", "tx";
+ };
+ src7: src-7 {
+ interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x93>, <&audma0 0xb6>;
+ dma-names = "rx", "tx";
+ };
+ src8: src-8 {
+ interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x95>, <&audma0 0xb8>;
+ dma-names = "rx", "tx";
+ };
+ src9: src-9 {
+ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x97>, <&audma0 0xba>;
+ dma-names = "rx", "tx";
+ };
+ };
+
+ rcar_sound,ssi {
+ ssi0: ssi-0 {
+ interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x01>, <&audma0 0x02>,
+ <&audma0 0x15>, <&audma0 0x16>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi1: ssi-1 {
+ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x03>, <&audma0 0x04>,
+ <&audma0 0x49>, <&audma0 0x4a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi2: ssi-2 {
+ interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x05>, <&audma0 0x06>,
+ <&audma0 0x63>, <&audma0 0x64>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi3: ssi-3 {
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x07>, <&audma0 0x08>,
+ <&audma0 0x6f>, <&audma0 0x70>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi4: ssi-4 {
+ interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x09>, <&audma0 0x0a>,
+ <&audma0 0x71>, <&audma0 0x72>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi5: ssi-5 {
+ interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0b>, <&audma0 0x0c>,
+ <&audma0 0x73>, <&audma0 0x74>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi6: ssi-6 {
+ interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0d>, <&audma0 0x0e>,
+ <&audma0 0x75>, <&audma0 0x76>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi7: ssi-7 {
+ interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x0f>, <&audma0 0x10>,
+ <&audma0 0x79>, <&audma0 0x7a>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi8: ssi-8 {
+ interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x11>, <&audma0 0x12>,
+ <&audma0 0x7b>, <&audma0 0x7c>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ ssi9: ssi-9 {
+ interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&audma0 0x13>, <&audma0 0x14>,
+ <&audma0 0x7d>, <&audma0 0x7e>;
+ dma-names = "rx", "tx", "rxu", "txu";
+ };
+ };
+ };
+
+ audma0: dma-controller@ec700000 {
+ compatible = "renesas,dmac-r8a774c0",
+ "renesas,rcar-dmac";
+ reg = <0 0xec700000 0 0x10000>;
+ interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15";
+ clocks = <&cpg CPG_MOD 502>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 502>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>,
+ <&ipmmu_mp 2>, <&ipmmu_mp 3>,
+ <&ipmmu_mp 4>, <&ipmmu_mp 5>,
+ <&ipmmu_mp 6>, <&ipmmu_mp 7>,
+ <&ipmmu_mp 8>, <&ipmmu_mp 9>,
+ <&ipmmu_mp 10>, <&ipmmu_mp 11>,
+ <&ipmmu_mp 12>, <&ipmmu_mp 13>,
+ <&ipmmu_mp 14>, <&ipmmu_mp 15>;
+ };
+
+ xhci0: usb@ee000000 {
+ compatible = "renesas,xhci-r8a774c0",
+ "renesas,rcar-gen3-xhci";
+ reg = <0 0xee000000 0 0xc00>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 328>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 328>;
+ status = "disabled";
+ };
+
+ usb3_peri0: usb@ee020000 {
+ compatible = "renesas,r8a774c0-usb3-peri",
+ "renesas,rcar-gen3-usb3-peri";
+ reg = <0 0xee020000 0 0x400>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 328>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 328>;
+ status = "disabled";
+ };
+
+ ohci0: usb@ee080000 {
+ compatible = "generic-ohci";
+ reg = <0 0xee080000 0 0x100>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ phys = <&usb2_phy0>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ status = "disabled";
+ };
+
+ ehci0: usb@ee080100 {
+ compatible = "generic-ehci";
+ reg = <0 0xee080100 0 0x100>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ phys = <&usb2_phy0>;
+ phy-names = "usb";
+ companion = <&ohci0>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ status = "disabled";
+ };
+
+ usb2_phy0: usb-phy@ee080200 {
+ compatible = "renesas,usb2-phy-r8a774c0",
+ "renesas,rcar-gen3-usb2-phy";
+ reg = <0 0xee080200 0 0x700>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhi0: sd@ee100000 {
+ compatible = "renesas,sdhi-r8a774c0",
+ "renesas,rcar-gen3-sdhi";
+ reg = <0 0xee100000 0 0x2000>;
+ interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 314>;
+ max-frequency = <200000000>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 314>;
+ status = "disabled";
+ };
+
+ sdhi1: sd@ee120000 {
+ compatible = "renesas,sdhi-r8a774c0",
+ "renesas,rcar-gen3-sdhi";
+ reg = <0 0xee120000 0 0x2000>;
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 313>;
+ max-frequency = <200000000>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 313>;
+ status = "disabled";
+ };
+
+ sdhi3: sd@ee160000 {
+ compatible = "renesas,sdhi-r8a774c0",
+ "renesas,rcar-gen3-sdhi";
+ reg = <0 0xee160000 0 0x2000>;
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 311>;
+ max-frequency = <200000000>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 311>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@f1010000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xf1010000 0 0x1000>,
+ <0x0 0xf1020000 0 0x20000>,
+ <0x0 0xf1040000 0 0x20000>,
+ <0x0 0xf1060000 0 0x20000>;
+ interrupts = <GIC_PPI 9
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&cpg CPG_MOD 408>;
+ clock-names = "clk";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 408>;
+ };
+
+ pciec0: pcie@fe000000 {
+ compatible = "renesas,pcie-r8a774c0",
+ "renesas,pcie-rcar-gen3";
+ reg = <0 0xfe000000 0 0x80000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ device_type = "pci";
+ ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
+ 0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
+ 0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
+ 0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+ /* Map all possible DDR as inbound ranges */
+ dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
+ clock-names = "pcie", "pcie_bus";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 319>;
+ status = "disabled";
+ };
+
+ vspb0: vsp@fe960000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfe960000 0 0x8000>;
+ interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 626>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 626>;
+ renesas,fcp = <&fcpvb0>;
+ };
+
+ fcpvb0: fcp@fe96f000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfe96f000 0 0x200>;
+ clocks = <&cpg CPG_MOD 607>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 607>;
+ iommus = <&ipmmu_vp0 5>;
+ };
+
+ vspi0: vsp@fe9a0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfe9a0000 0 0x8000>;
+ interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 631>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 631>;
+ renesas,fcp = <&fcpvi0>;
+ };
+
+ fcpvi0: fcp@fe9af000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfe9af000 0 0x200>;
+ clocks = <&cpg CPG_MOD 611>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 611>;
+ iommus = <&ipmmu_vp0 8>;
+ };
+
+ vspd0: vsp@fea20000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea20000 0 0x7000>;
+ interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 623>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 623>;
+ renesas,fcp = <&fcpvd0>;
+ };
+
+ fcpvd0: fcp@fea27000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfea27000 0 0x200>;
+ clocks = <&cpg CPG_MOD 603>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 603>;
+ iommus = <&ipmmu_vi0 8>;
+ };
+
+ vspd1: vsp@fea28000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea28000 0 0x7000>;
+ interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 622>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 622>;
+ renesas,fcp = <&fcpvd1>;
+ };
+
+ fcpvd1: fcp@fea2f000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfea2f000 0 0x200>;
+ clocks = <&cpg CPG_MOD 602>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 602>;
+ iommus = <&ipmmu_vi0 9>;
+ };
+
+ csi40: csi2@feaa0000 {
+ compatible = "renesas,r8a774c0-csi2",
+ "renesas,rcar-gen3-csi2";
+ reg = <0 0xfeaa0000 0 0x10000>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 716>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ csi40vin4: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vin4csi40>;
+ };
+ csi40vin5: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vin5csi40>;
+ };
+ };
+ };
+ };
+
+ du: display@feb00000 {
+ compatible = "renesas,du-r8a774c0";
+ reg = <0 0xfeb00000 0 0x80000>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>;
+ clock-names = "du.0", "du.1";
+ vsps = <&vspd0 0 &vspd1 0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ du_out_rgb: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ du_out_lvds0: endpoint {
+ remote-endpoint = <&lvds0_in>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ du_out_lvds1: endpoint {
+ remote-endpoint = <&lvds1_in>;
+ };
+ };
+ };
+ };
+
+ lvds0: lvds-encoder@feb90000 {
+ compatible = "renesas,r8a774c0-lvds";
+ reg = <0 0xfeb90000 0 0x20>;
+ clocks = <&cpg CPG_MOD 727>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 727>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_in: endpoint {
+ remote-endpoint = <&du_out_lvds0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
+ };
+ };
+ };
+ };
+
+ lvds1: lvds-encoder@feb90100 {
+ compatible = "renesas,r8a774c0-lvds";
+ reg = <0 0xfeb90100 0 0x20>;
+ clocks = <&cpg CPG_MOD 727>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 726>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds1_in: endpoint {
+ remote-endpoint = <&du_out_lvds1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ lvds1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ prr: chipid@fff00044 {
+ compatible = "renesas,prr";
+ reg = <0 0xfff00044 0 4>;
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&thermal>;
+
+ trips {
+ cpu-crit {
+ temperature = <120000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ /* External USB clocks - can be overridden by the board */
+ usb3s0_clk: usb3s0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ usb_extal_clk: usb_extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index af9605d5db27..abeac3059383 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -149,7 +149,7 @@
};
a57_0: cpu@0 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA57_CPU0>;
@@ -162,7 +162,7 @@
};
a57_1: cpu@1 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x1>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA57_CPU1>;
@@ -175,7 +175,7 @@
};
a57_2: cpu@2 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x2>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA57_CPU2>;
@@ -188,7 +188,7 @@
};
a57_3: cpu@3 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x3>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA57_CPU3>;
@@ -201,7 +201,7 @@
};
a53_0: cpu@100 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x100>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA53_CPU0>;
@@ -213,7 +213,7 @@
};
a53_1: cpu@101 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x101>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA53_CPU1>;
@@ -225,7 +225,7 @@
};
a53_2: cpu@102 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x102>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA53_CPU2>;
@@ -237,7 +237,7 @@
};
a53_3: cpu@103 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x103>;
device_type = "cpu";
power-domains = <&sysc R8A7795_PD_CA53_CPU3>;
@@ -2174,53 +2174,53 @@
rcar_sound,ssi {
ssi0: ssi-0 {
interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x01>, <&audma1 0x02>;
+ dma-names = "rx", "tx";
};
ssi1: ssi-1 {
interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x03>, <&audma1 0x04>;
+ dma-names = "rx", "tx";
};
ssi2: ssi-2 {
interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x05>, <&audma1 0x06>;
+ dma-names = "rx", "tx";
};
ssi3: ssi-3 {
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x07>, <&audma1 0x08>;
+ dma-names = "rx", "tx";
};
ssi4: ssi-4 {
interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x09>, <&audma1 0x0a>;
+ dma-names = "rx", "tx";
};
ssi5: ssi-5 {
interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0b>, <&audma1 0x0c>;
+ dma-names = "rx", "tx";
};
ssi6: ssi-6 {
interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0d>, <&audma1 0x0e>;
+ dma-names = "rx", "tx";
};
ssi7: ssi-7 {
interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0f>, <&audma1 0x10>;
+ dma-names = "rx", "tx";
};
ssi8: ssi-8 {
interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x11>, <&audma1 0x12>;
+ dma-names = "rx", "tx";
};
ssi9: ssi-9 {
interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x13>, <&audma1 0x14>;
+ dma-names = "rx", "tx";
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
index 8860be65342e..31f12059355e 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
@@ -29,11 +29,10 @@
clocks = <&cpg CPG_MOD 724>,
<&cpg CPG_MOD 723>,
<&cpg CPG_MOD 722>,
- <&cpg CPG_MOD 727>,
<&versaclock6 1>,
<&x21_clk>,
<&versaclock6 2>;
- clock-names = "du.0", "du.1", "du.2", "lvds.0",
+ clock-names = "du.0", "du.1", "du.2",
"dclkin.0", "dclkin.1", "dclkin.2";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index afedbf5728ec..cdf784899cf8 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -154,7 +154,7 @@
};
a57_0: cpu@0 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0>;
device_type = "cpu";
power-domains = <&sysc R8A7796_PD_CA57_CPU0>;
@@ -167,7 +167,7 @@
};
a57_1: cpu@1 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x1>;
device_type = "cpu";
power-domains = <&sysc R8A7796_PD_CA57_CPU1>;
@@ -180,7 +180,7 @@
};
a53_0: cpu@100 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x100>;
device_type = "cpu";
power-domains = <&sysc R8A7796_PD_CA53_CPU0>;
@@ -192,7 +192,7 @@
};
a53_1: cpu@101 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x101>;
device_type = "cpu";
power-domains = <&sysc R8A7796_PD_CA53_CPU1>;
@@ -204,7 +204,7 @@
};
a53_2: cpu@102 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x102>;
device_type = "cpu";
power-domains = <&sysc R8A7796_PD_CA53_CPU2>;
@@ -216,7 +216,7 @@
};
a53_3: cpu@103 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x103>;
device_type = "cpu";
power-domains = <&sysc R8A7796_PD_CA53_CPU3>;
@@ -1262,6 +1262,9 @@
<&cpg CPG_CORE R8A7796_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";
@@ -2110,53 +2113,53 @@
rcar_sound,ssi {
ssi0: ssi-0 {
interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x01>, <&audma1 0x02>;
+ dma-names = "rx", "tx";
};
ssi1: ssi-1 {
interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x03>, <&audma1 0x04>;
+ dma-names = "rx", "tx";
};
ssi2: ssi-2 {
interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x05>, <&audma1 0x06>;
+ dma-names = "rx", "tx";
};
ssi3: ssi-3 {
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x07>, <&audma1 0x08>;
+ dma-names = "rx", "tx";
};
ssi4: ssi-4 {
interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x09>, <&audma1 0x0a>;
+ dma-names = "rx", "tx";
};
ssi5: ssi-5 {
interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0b>, <&audma1 0x0c>;
+ dma-names = "rx", "tx";
};
ssi6: ssi-6 {
interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0d>, <&audma1 0x0e>;
+ dma-names = "rx", "tx";
};
ssi7: ssi-7 {
interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0f>, <&audma1 0x10>;
+ dma-names = "rx", "tx";
};
ssi8: ssi-8 {
interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x11>, <&audma1 0x12>;
+ dma-names = "rx", "tx";
};
ssi9: ssi-9 {
interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x13>, <&audma1 0x14>;
+ dma-names = "rx", "tx";
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 6dc9b1fef830..9763d108e183 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -105,7 +105,7 @@
#size-cells = <0>;
a57_0: cpu@0 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x0>;
device_type = "cpu";
power-domains = <&sysc R8A77965_PD_CA57_CPU0>;
@@ -116,7 +116,7 @@
};
a57_1: cpu@1 {
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a57";
reg = <0x1>;
device_type = "cpu";
power-domains = <&sysc R8A77965_PD_CA57_CPU1>;
@@ -1068,6 +1068,9 @@
<&cpg CPG_CORE R8A77965_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
index 0dbcb4cccc18..15cc9fed2e16 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
@@ -108,6 +108,8 @@
phy0: ethernet-phy@0 {
rxc-skew-ps = <1500>;
reg = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 563428d1cdc2..5b6164d4b8e3 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -37,7 +37,7 @@
a53_0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0>;
clocks = <&cpg CPG_CORE R8A77970_CLK_Z2>;
power-domains = <&sysc R8A77970_PD_CA53_CPU0>;
@@ -47,7 +47,7 @@
a53_1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <1>;
clocks = <&cpg CPG_CORE R8A77970_CLK_Z2>;
power-domains = <&sysc R8A77970_PD_CA53_CPU1>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 5bd9b2547c36..4081622d548a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -38,7 +38,7 @@
a53_0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0>;
clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>;
power-domains = <&sysc R8A77980_PD_CA53_CPU0>;
@@ -48,7 +48,7 @@
a53_1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <1>;
clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>;
power-domains = <&sysc R8A77980_PD_CA53_CPU1>;
@@ -58,7 +58,7 @@
a53_2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <2>;
clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>;
power-domains = <&sysc R8A77980_PD_CA53_CPU2>;
@@ -68,7 +68,7 @@
a53_3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <3>;
clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>;
power-domains = <&sysc R8A77980_PD_CA53_CPU3>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index 62bdddcbbae7..144c0820cf60 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -39,6 +39,16 @@
clock-frequency = <11289600>;
};
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm3 0 50000>;
+
+ brightness-levels = <512 511 505 494 473 440 392 327 241 133 0>;
+ default-brightness-level = <10>;
+
+ power-supply = <&reg_12p0v>;
+ };
+
cvbs-in {
compatible = "composite-video-connector";
label = "CVBS IN";
@@ -159,16 +169,13 @@
};
rsnd_ak4613: sound {
- compatible = "simple-scu-audio-card";
+ compatible = "simple-audio-card";
simple-audio-card,name = "rsnd-ak4613";
simple-audio-card,format = "left_j";
simple-audio-card,bitclock-master = <&sndcpu>;
simple-audio-card,frame-master = <&sndcpu>;
- simple-audio-card,prefix = "ak4613";
- simple-audio-card,routing = "ak4613 Playback", "DAI0 Playback",
- "DAI0 Capture", "ak4613 Capture";
sndcpu: simple-audio-card,cpu {
sound-dai = <&rcar_sound>;
};
@@ -184,6 +191,15 @@
clock-frequency = <24576000>;
};
+ reg_12p0v: regulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "D12.0V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
x13_clk: x13 {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -248,7 +264,6 @@
pinctrl-names = "default";
renesas,no-ether-link;
phy-handle = <&phy0>;
- phy-mode = "rgmii-txid";
status = "okay";
phy0: ethernet-phy@0 {
@@ -680,6 +695,7 @@
vmmc-supply = <&reg_3p3v>;
vqmmc-supply = <&reg_1p8v>;
mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
bus-width = <8>;
non-removable;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index b2f606e286ce..a69faa60ea4d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -55,26 +55,51 @@
clock-frequency = <0>;
};
+ cluster1_opp: opp_table10 {
+ compatible = "operating-points-v2";
+ opp-shared;
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <820000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <820000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <820000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
a53_0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0>;
device_type = "cpu";
power-domains = <&sysc R8A77990_PD_CA53_CPU0>;
next-level-cache = <&L2_CA53>;
enable-method = "psci";
+ clocks =<&cpg CPG_CORE R8A77990_CLK_Z2>;
+ operating-points-v2 = <&cluster1_opp>;
};
a53_1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <1>;
device_type = "cpu";
power-domains = <&sysc R8A77990_PD_CA53_CPU1>;
next-level-cache = <&L2_CA53>;
enable-method = "psci";
+ clocks =<&cpg CPG_CORE R8A77990_CLK_Z2>;
+ operating-points-v2 = <&cluster1_opp>;
};
L2_CA53: cache-controller-0 {
@@ -240,6 +265,74 @@
resets = <&cpg 906>;
};
+ pfc: pin-controller@e6060000 {
+ compatible = "renesas,pfc-r8a77990";
+ reg = <0 0xe6060000 0 0x508>;
+ };
+
+ i2c_dvfs: i2c@e60b0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,iic-r8a77990";
+ reg = <0 0xe60b0000 0 0x15>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 926>;
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 926>;
+ dmas = <&dmac0 0x11>, <&dmac0 0x10>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ cpg: clock-controller@e6150000 {
+ compatible = "renesas,r8a77990-cpg-mssr";
+ reg = <0 0xe6150000 0 0x1000>;
+ clocks = <&extal_clk>;
+ clock-names = "extal";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ #reset-cells = <1>;
+ };
+
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a77990-rst";
+ reg = <0 0xe6160000 0 0x0200>;
+ };
+
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,r8a77990-sysc";
+ reg = <0 0xe6180000 0 0x0400>;
+ #power-domain-cells = <1>;
+ };
+
+ thermal: thermal@e6190000 {
+ compatible = "renesas,thermal-r8a77990";
+ reg = <0 0xe6190000 0 0x10>, <0 0xe6190100 0 0x38>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ intc_ex: interrupt-controller@e61c0000 {
+ compatible = "renesas,intc-ex-r8a77990", "renesas,irqc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0 0xe61c0000 0 0x200>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 407>;
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 407>;
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -369,74 +462,6 @@
status = "disabled";
};
- pfc: pin-controller@e6060000 {
- compatible = "renesas,pfc-r8a77990";
- reg = <0 0xe6060000 0 0x508>;
- };
-
- i2c_dvfs: i2c@e60b0000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "renesas,iic-r8a77990";
- reg = <0 0xe60b0000 0 0x15>;
- interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 926>;
- power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
- resets = <&cpg 926>;
- dmas = <&dmac0 0x11>, <&dmac0 0x10>;
- dma-names = "tx", "rx";
- status = "disabled";
- };
-
- cpg: clock-controller@e6150000 {
- compatible = "renesas,r8a77990-cpg-mssr";
- reg = <0 0xe6150000 0 0x1000>;
- clocks = <&extal_clk>;
- clock-names = "extal";
- #clock-cells = <2>;
- #power-domain-cells = <0>;
- #reset-cells = <1>;
- };
-
- rst: reset-controller@e6160000 {
- compatible = "renesas,r8a77990-rst";
- reg = <0 0xe6160000 0 0x0200>;
- };
-
- sysc: system-controller@e6180000 {
- compatible = "renesas,r8a77990-sysc";
- reg = <0 0xe6180000 0 0x0400>;
- #power-domain-cells = <1>;
- };
-
- thermal: thermal@e6190000 {
- compatible = "renesas,thermal-r8a77990";
- reg = <0 0xe6190000 0 0x10>, <0 0xe6190100 0 0x38>;
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 522>;
- power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
- resets = <&cpg 522>;
- #thermal-sensor-cells = <0>;
- };
-
- intc_ex: interrupt-controller@e61c0000 {
- compatible = "renesas,intc-ex-r8a77990", "renesas,irqc";
- #interrupt-cells = <2>;
- interrupt-controller;
- reg = <0 0xe61c0000 0 0x200>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 407>;
- power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
- resets = <&cpg 407>;
- };
-
hscif0: serial@e6540000 {
compatible = "renesas,hscif-r8a77990",
"renesas,rcar-gen3-hscif",
@@ -993,7 +1018,9 @@
<&cpg CPG_CORE R8A77990_CLK_S3D1C>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
-
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";
@@ -1526,6 +1553,33 @@
resets = <&cpg 408>;
};
+ pciec0: pcie@fe000000 {
+ compatible = "renesas,pcie-r8a77990",
+ "renesas,pcie-rcar-gen3";
+ reg = <0 0xfe000000 0 0x80000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ device_type = "pci";
+ ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
+ 0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
+ 0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
+ 0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+ /* Map all possible DDR as inbound ranges */
+ dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
+ clock-names = "pcie", "pcie_bus";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 319>;
+ status = "disabled";
+ };
+
vspb0: vsp@fe960000 {
compatible = "renesas,vsp2";
reg = <0 0xfe960000 0 0x8000>;
@@ -1724,33 +1778,6 @@
};
};
- pciec0: pcie@fe000000 {
- compatible = "renesas,pcie-r8a77990",
- "renesas,pcie-rcar-gen3";
- reg = <0 0xfe000000 0 0x80000>;
- #address-cells = <3>;
- #size-cells = <2>;
- bus-range = <0x00 0xff>;
- device_type = "pci";
- ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
- 0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
- 0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
- 0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
- /* Map all possible DDR as inbound ranges */
- dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
- clock-names = "pcie", "pcie_bus";
- power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
- resets = <&cpg 319>;
- status = "disabled";
- };
-
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
index 89df9bc844c0..db2bed1751b8 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
@@ -28,8 +28,8 @@
compatible = "pwm-backlight";
pwms = <&pwm1 0 50000>;
- brightness-levels = <256 128 64 16 8 4 0>;
- default-brightness-level = <6>;
+ brightness-levels = <512 511 505 494 473 440 392 327 241 133 0>;
+ default-brightness-level = <10>;
power-supply = <&reg_12p0v>;
enable-gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index 8530d9fc1371..5bf3af246e14 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -27,7 +27,7 @@
#size-cells = <0>;
a53_0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0>;
device_type = "cpu";
power-domains = <&sysc R8A77995_PD_CA53_CPU0>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index f66d990b92f1..a225c2457274 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -764,6 +764,7 @@
vqmmc-supply = <&reg_1p8v>;
bus-width = <8>;
mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
non-removable;
fixed-emmc-driver-type = <1>;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
index 1b316d79df88..7a09576b3112 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
@@ -6,11 +6,38 @@
* Copyright (C) 2017 Cogent Embedded, Inc.
*/
+/*
+ * SSI-PCM3168A
+ * aplay -D plughw:0,2 xxx.wav
+ * arecord -D plughw:0,3 xxx.wav
+ */
+
/ {
aliases {
serial1 = &hscif0;
serial2 = &scif1;
};
+
+ clksndsel: clksndsel {
+ #clock-cells = <0>;
+ compatible = "gpio-mux-clock";
+ clocks = <&cs2000>, <&audio_clk_a>; /* clk8snd, clksnd */
+ select-gpios = <&gpio_exp_75 13 GPIO_ACTIVE_HIGH>;
+ };
+
+ snd_3p3v: regulator-snd_3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "snd-3.3v";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ snd_vcc5v: regulator-snd_vcc5v {
+ compatible = "regulator-fixed";
+ regulator-name = "snd-vcc5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
};
&can0 {
@@ -44,6 +71,7 @@
};
&i2c2 {
+ /* U11 */
gpio_exp_74: gpio@74 {
compatible = "ti,tca9539";
reg = <0x74>;
@@ -53,6 +81,13 @@
interrupt-parent = <&gpio6>;
interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+ audio_out_off {
+ gpio-hog;
+ gpios = <0 GPIO_ACTIVE_HIGH>; /* P00 */
+ output-high;
+ line-name = "Audio_Out_OFF";
+ };
+
hub_pwen {
gpio-hog;
gpios = <6 GPIO_ACTIVE_HIGH>;
@@ -80,8 +115,16 @@
output-high;
line-name = "OTG EXTLPn";
};
+
+ snd_rst {
+ gpio-hog;
+ gpios = <15 GPIO_ACTIVE_HIGH>; /* P17 */
+ output-high;
+ line-name = "SND_RST";
+ };
};
+ /* U5 */
gpio_exp_75: gpio@75 {
compatible = "ti,tca9539";
reg = <0x75>;
@@ -98,6 +141,48 @@
#size-cells = <0>;
reg = <0x71>;
reset-gpios = <&gpio5 3 GPIO_ACTIVE_LOW>;
+
+ /* Audio_SDA, Audio_SCL */
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+
+ pcm3168a: audio-codec@44 {
+ #sound-dai-cells = <0>;
+ compatible = "ti,pcm3168a";
+ reg = <0x44>;
+ clocks = <&clksndsel>;
+ clock-names = "scki";
+
+ VDD1-supply = <&snd_3p3v>;
+ VDD2-supply = <&snd_3p3v>;
+ VCCAD1-supply = <&snd_vcc5v>;
+ VCCAD2-supply = <&snd_vcc5v>;
+ VCCDA1-supply = <&snd_vcc5v>;
+ VCCDA2-supply = <&snd_vcc5v>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mclk-fs = <512>;
+ port@0 {
+ reg = <0>;
+ pcm3168a_endpoint_p: endpoint {
+ remote-endpoint = <&rsnd_for_pcm3168a_play>;
+ clocks = <&clksndsel>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ pcm3168a_endpoint_c: endpoint {
+ remote-endpoint = <&rsnd_for_pcm3168a_capture>;
+ clocks = <&clksndsel>;
+ };
+ };
+ };
+ };
+ };
};
};
@@ -173,6 +258,11 @@
groups = "usb0";
function = "usb0";
};
+
+ sound_pcm_pins: sound-pcm {
+ groups = "ssi349_ctrl", "ssi3_data", "ssi4_data";
+ function = "ssi";
+ };
};
&scif1 {
@@ -193,3 +283,51 @@
&xhci0 {
status = "okay";
};
+
+&sound_card {
+ dais = <&rsnd_port0 /* ak4613 */
+ &rsnd_port1 /* HDMI0 */
+ &rsnd_port2 /* pcm3168a playback */
+ &rsnd_port3 /* pcm3168a capture */
+ >;
+};
+
+&rcar_sound {
+ pinctrl-0 = <&sound_pins
+ &sound_clk_pins
+ &sound_pcm_pins>;
+
+ ports {
+ /* rsnd_port0/1 are on salvator-common */
+ rsnd_port2: port@2 {
+ reg = <2>;
+ rsnd_for_pcm3168a_play: endpoint {
+ remote-endpoint = <&pcm3168a_endpoint_p>;
+
+ dai-format = "i2s";
+ bitclock-master = <&rsnd_for_pcm3168a_play>;
+ frame-master = <&rsnd_for_pcm3168a_play>;
+ dai-tdm-slot-num = <8>;
+
+ playback = <&ssi3>;
+ };
+ };
+ rsnd_port3: port@3 {
+ reg = <3>;
+ rsnd_for_pcm3168a_capture: endpoint {
+ remote-endpoint = <&pcm3168a_endpoint_c>;
+
+ dai-format = "i2s";
+ bitclock-master = <&rsnd_for_pcm3168a_capture>;
+ frame-master = <&rsnd_for_pcm3168a_capture>;
+ dai-tdm-slot-num = <6>;
+
+ capture = <&ssi4>;
+ };
+ };
+ };
+};
+
+&ssi4 {
+ shared-pin;
+};
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index de694fdae067..e70e1bac2be4 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -6,6 +6,14 @@
* Copyright (C) 2016 Cogent Embedded, Inc.
*/
+/*
+ * SSI-AK4613
+ * aplay -D plughw:0,0 xxx.wav
+ * arecord -D plughw:0,0 xxx.wav
+ * SSI-HDMI
+ * aplay -D plughw:0,1 xxx.wav
+ */
+
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
@@ -83,20 +91,13 @@
regulator-always-on;
};
- rsnd_ak4613: sound {
- compatible = "simple-audio-card";
-
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&sndcpu>;
- simple-audio-card,frame-master = <&sndcpu>;
-
- sndcpu: simple-audio-card,cpu {
- sound-dai = <&rcar_sound>;
- };
+ sound_card: sound {
+ compatible = "audio-graph-card";
+ label = "rcar-sound";
- sndcodec: simple-audio-card,codec {
- sound-dai = <&ak4613>;
- };
+ dais = <&rsnd_port0 /* ak4613 */
+ &rsnd_port1 /* HDMI0 */
+ >;
};
vcc_sdhi0: regulator-vcc-sdhi0 {
@@ -182,6 +183,12 @@
remote-endpoint = <&hdmi0_con>;
};
};
+ port@2 {
+ reg = <2>;
+ dw_hdmi0_snd_in: endpoint {
+ remote-endpoint = <&rsnd_for_hdmi>;
+ };
+ };
};
};
@@ -211,6 +218,12 @@
asahi-kasei,out4-single-end;
asahi-kasei,out5-single-end;
asahi-kasei,out6-single-end;
+
+ port {
+ ak4613_endpoint: endpoint {
+ remote-endpoint = <&rsnd_for_ak4613>;
+ };
+ };
};
cs2000: clk-multiplier@4f {
@@ -384,10 +397,33 @@
<&audio_clk_c>,
<&cpg CPG_CORE CPG_AUDIO_CLK_I>;
- rcar_sound,dai {
- dai0 {
- playback = <&ssi0 &src0 &dvc0>;
- capture = <&ssi1 &src1 &dvc1>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ rsnd_port0: port@0 {
+ reg = <0>;
+ rsnd_for_ak4613: endpoint {
+ remote-endpoint = <&ak4613_endpoint>;
+
+ dai-format = "left_j";
+ bitclock-master = <&rsnd_for_ak4613>;
+ frame-master = <&rsnd_for_ak4613>;
+
+ playback = <&ssi0 &src0 &dvc0>;
+ capture = <&ssi1 &src1 &dvc1>;
+ };
+ };
+ rsnd_port1: port@1 {
+ reg = <1>;
+ rsnd_for_hdmi: endpoint {
+ remote-endpoint = <&dw_hdmi0_snd_in>;
+
+ dai-format = "i2s";
+ bitclock-master = <&rsnd_for_hdmi>;
+ frame-master = <&rsnd_for_hdmi>;
+
+ playback = <&ssi2>;
+ };
};
};
};
@@ -427,6 +463,7 @@
vqmmc-supply = <&reg_1p8v>;
bus-width = <8>;
mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
non-removable;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index de0c406c20cc..1b28fa72ea0b 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -16,8 +16,11 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-bob.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-kevin.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-inx.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-kd.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopc-t4.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rock-pi-4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rock960.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rockpro64.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index 9aa8d5ef9e45..eb992d60e6ba 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -40,7 +40,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x0>;
enable-method = "psci";
clocks = <&cru ARMCLK>;
@@ -52,7 +52,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x1>;
enable-method = "psci";
clocks = <&cru ARMCLK>;
@@ -64,7 +64,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x2>;
enable-method = "psci";
clocks = <&cru ARMCLK>;
@@ -76,7 +76,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x3>;
enable-method = "psci";
clocks = <&cru ARMCLK>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 99d0d9912950..33c44e857247 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -104,6 +104,7 @@
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&rgmiim1_pins>;
+ snps,force_thresh_dma_mode;
snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index bd937d68ca3b..2157a528276b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -40,6 +40,7 @@
pinctrl-0 = <&usb30_host_drv>;
regulator-name = "vcc_host_5v";
regulator-always-on;
+ regulator-boot-on;
vin-supply = <&vcc_sys>;
};
@@ -51,6 +52,7 @@
pinctrl-0 = <&usb20_host_drv>;
regulator-name = "vcc_host1_5v";
regulator-always-on;
+ regulator-boot-on;
vin-supply = <&vcc_sys>;
};
@@ -66,7 +68,8 @@
sound {
compatible = "audio-graph-card";
label = "rockchip,rk3328";
- dais = <&spdif_p0>;
+ dais = <&i2s1_p0
+ &spdif_p0>;
};
spdif-dit {
@@ -81,6 +84,16 @@
};
};
+&codec {
+ status = "okay";
+
+ port@0 {
+ codec_p0_0: endpoint {
+ remote-endpoint = <&i2s1_p0_0>;
+ };
+ };
+};
+
&cpu0 {
cpu-supply = <&vdd_arm>;
};
@@ -243,6 +256,18 @@
};
};
+&i2s1 {
+ status = "okay";
+
+ i2s1_p0: port {
+ i2s1_p0_0: endpoint {
+ dai-format = "i2s";
+ mclk-fs = <256>;
+ remote-endpoint = <&codec_p0_0>;
+ };
+ };
+};
+
&io_domains {
status = "okay";
@@ -290,7 +315,6 @@
&spdif {
pinctrl-0 = <&spdifm0_tx>;
status = "okay";
- #sound-dai-cells = <0>;
spdif_p0: port {
spdif_p0_0: endpoint {
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index ecd7f19c3542..84f14b132e8f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -37,7 +37,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
clocks = <&cru ARMCLK>;
#cooling-cells = <2>;
@@ -49,7 +49,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
clocks = <&cru ARMCLK>;
#cooling-cells = <2>;
@@ -61,7 +61,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
clocks = <&cru ARMCLK>;
#cooling-cells = <2>;
@@ -73,7 +73,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
clocks = <&cru ARMCLK>;
#cooling-cells = <2>;
@@ -184,6 +184,7 @@
clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac 11>, <&dmac 12>;
dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -195,6 +196,7 @@
clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac 14>, <&dmac 15>;
dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -206,6 +208,7 @@
clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac 0>, <&dmac 1>;
dma-names = "tx", "rx";
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -219,6 +222,7 @@
dma-names = "tx";
pinctrl-names = "default";
pinctrl-0 = <&spdifm2_tx>;
+ #sound-dai-cells = <0>;
status = "disabled";
};
@@ -672,6 +676,16 @@
};
};
+ codec: codec@ff410000 {
+ compatible = "rockchip,rk3328-codec";
+ reg = <0x0 0xff410000 0x0 0x1000>;
+ clocks = <&cru PCLK_ACODECPHY>, <&cru SCLK_I2S1>;
+ clock-names = "pclk", "mclk";
+ rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
hdmiphy: phy@ff430000 {
compatible = "rockchip,rk3328-hdmi-phy";
reg = <0x0 0xff430000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
index 4de089149c50..e96eb62f362b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
@@ -116,7 +116,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
mmc-pwrseq = <&emmc_pwrseq>;
non-removable;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 6b9b1ac1994c..8fa550cbd1a4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -78,7 +78,6 @@
bus-width = <8>;
cap-mmc-highspeed;
clock-frequency = <150000000>;
- disable-wp;
non-removable;
vmmc-supply = <&vcc_io>;
vqmmc-supply = <&vcc18_flash>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
index 1315972412df..1b35d612b660 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
@@ -139,7 +139,6 @@
&emmc {
bus-width = <8>;
clock-frequency = <150000000>;
- disable-wp;
mmc-hs200-1_8v;
non-removable;
vmmc-supply = <&vcc33_io>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index 96147d93dd1d..f5aa3cad67c5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -161,7 +161,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
mmc-pwrseq = <&emmc_pwrseq>;
mmc-hs200-1_2v;
mmc-hs200-1_8v;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
index fc1bf078a41f..41edcfd53184 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
@@ -48,7 +48,6 @@
bus-width = <8>;
cap-mmc-highspeed;
clock-frequency = <150000000>;
- disable-wp;
mmc-hs200-1_8v;
no-sdio;
no-sd;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index 7452bedf1a7e..d34064c65f10 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -149,7 +149,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
mmc-pwrseq = <&emmc_pwrseq>;
non-removable;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 7014d10b954c..06e7c31d7d07 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -73,7 +73,7 @@
cpu_l0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -81,7 +81,7 @@
cpu_l1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -89,7 +89,7 @@
cpu_l2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -97,7 +97,7 @@
cpu_l3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -105,7 +105,7 @@
cpu_b0: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x100>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -113,7 +113,7 @@
cpu_b1: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x101>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -121,7 +121,7 @@
cpu_b2: cpu@102 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x102>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -129,7 +129,7 @@
cpu_b3: cpu@103 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x103>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index 1ee0dc0d9f10..d1cf404b8708 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -22,7 +22,7 @@
backlight = <&backlight>;
power-supply = <&pp3300_disp>;
- ports {
+ port {
panel_in_edp: endpoint {
remote-endpoint = <&edp_out_panel>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
index c400be64170e..931640e9aed4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
@@ -200,6 +200,19 @@
pinctrl-0 = <&bl_en>;
pwm-delay-us = <10000>;
};
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l>;
+
+ wake_on_bt: wake-on-bt {
+ label = "Wake-on-Bluetooth";
+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_WAKEUP>;
+ wakeup-source;
+ };
+ };
};
&ppvar_bigcpu {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index 81e73103fa78..15e254a77391 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -43,7 +43,7 @@
backlight = <&backlight>;
power-supply = <&pp3300_disp>;
- ports {
+ port {
panel_in_edp: endpoint {
remote-endpoint = <&edp_out_panel>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index fc50b3ef758c..62ea7d6a7d4a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -175,6 +175,21 @@
pinctrl-0 = <&dmic_en>;
wakeup-delay-ms = <250>;
};
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pen_eject_odl>;
+
+ pen-insert {
+ label = "Pen Insert";
+ /* Insert = low, eject = high */
+ gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ linux,code = <SW_PEN_INSERTED>;
+ linux,input-type = <EV_SW>;
+ wakeup-source;
+ };
+ };
};
/* pp900_s0 aliases */
@@ -328,20 +343,6 @@ camera: &i2c7 {
<400000000>;
};
-&gpio_keys {
- pinctrl-names = "default";
- pinctrl-0 = <&bt_host_wake_l>, <&pen_eject_odl>;
-
- pen-insert {
- label = "Pen Insert";
- /* Insert = low, eject = high */
- gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
- linux,code = <SW_PEN_INSERTED>;
- linux,input-type = <EV_SW>;
- wakeup-source;
- };
-};
-
&i2c_tunnel {
google,remote-bus = <0>;
};
@@ -437,8 +438,19 @@ camera: &i2c7 {
status = "okay";
};
-&wake_on_bt {
- gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+&usb_host0_ohci {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qca_bt: bluetooth@1 {
+ compatible = "usbcf3,e300", "usb4ca,301a";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "wakeup";
+ };
};
/* PINCTRL OVERRIDES */
@@ -455,7 +467,7 @@ camera: &i2c7 {
};
&bt_host_wake_l {
- rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_none>;
};
&ec_ap_int_l {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index ea607a601a86..da03fa9c5662 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -269,19 +269,6 @@
#clock-cells = <0>;
};
- gpio_keys: gpio-keys {
- compatible = "gpio-keys";
- pinctrl-names = "default";
- pinctrl-0 = <&bt_host_wake_l>;
-
- wake_on_bt: wake-on-bt {
- label = "Wake-on-Bluetooth";
- gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_WAKEUP>;
- wakeup-source;
- };
- };
-
max98357a: max98357a {
compatible = "maxim,max98357a";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
new file mode 100644
index 000000000000..84433cf02be9
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * FriendlyElec NanoPC-T4 board device tree source
+ *
+ * Copyright (c) 2018 FriendlyElec Computer Tech. Co., Ltd.
+ * (http://www.friendlyarm.com)
+ *
+ * Copyright (c) 2018 Collabora Ltd.
+ */
+
+/dts-v1/;
+#include "rk3399-nanopi4.dtsi"
+
+/ {
+ model = "FriendlyElec NanoPC-T4";
+ compatible = "friendlyarm,nanopc-t4", "rockchip,rk3399";
+
+ vcc12v0_sys: vcc12v0-sys {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <12000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-name = "vcc12v0_sys";
+ };
+
+ vcc5v0_host0: vcc5v0-host0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc5v0_host0";
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <18000>;
+ };
+ };
+
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_rx>;
+ };
+};
+
+&pinctrl {
+ ir {
+ ir_rx: ir-rx {
+ /* external pullup to VCC3V3_SYS, despite being 1.8V :/ */
+ rockchip,pins = <0 RK_PA6 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+};
+
+&sdhci {
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+};
+
+&u2phy0_host {
+ phy-supply = <&vcc5v0_host0>;
+};
+
+&u2phy1_host {
+ phy-supply = <&vcc5v0_host0>;
+};
+
+&vcc5v0_sys {
+ vin-supply = <&vcc12v0_sys>;
+};
+
+&vcc3v3_sys {
+ vin-supply = <&vcc12v0_sys>;
+};
+
+&vbus_typec {
+ enable-active-high;
+ gpios = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vcc5v0_sys>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4.dts
new file mode 100644
index 000000000000..60358ab8c7df
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4.dts
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * FriendlyElec NanoPi M4 board device tree source
+ *
+ * Copyright (c) 2018 FriendlyElec Computer Tech. Co., Ltd.
+ * (http://www.friendlyarm.com)
+ *
+ * Copyright (c) 2018 Collabora Ltd.
+ * Copyright (c) 2019 Arm Ltd.
+ */
+
+/dts-v1/;
+#include "rk3399-nanopi4.dtsi"
+
+/ {
+ model = "FriendlyElec NanoPi M4";
+ compatible = "friendlyarm,nanopi-m4", "rockchip,rk3399";
+
+ vdd_5v: vdd-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_5v";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc5v0_core: vcc5v0-core {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_core";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd_5v>;
+ };
+
+ vcc5v0_usb1: vcc5v0-usb1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb1";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_usb2: vcc5v0-usb2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb2";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&vcc3v3_sys {
+ vin-supply = <&vcc5v0_core>;
+};
+
+&u2phy0_host {
+ phy-supply = <&vcc5v0_usb1>;
+};
+
+&u2phy1_host {
+ phy-supply = <&vcc5v0_usb2>;
+};
+
+&vbus_typec {
+ regulator-always-on;
+ vin-supply = <&vdd_5v>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
new file mode 100644
index 000000000000..d325e117287b
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * RK3399-based FriendlyElec boards device tree source
+ *
+ * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Copyright (c) 2018 FriendlyElec Computer Tech. Co., Ltd.
+ * (http://www.friendlyarm.com)
+ *
+ * Copyright (c) 2018 Collabora Ltd.
+ * Copyright (c) 2019 Arm Ltd.
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/linux-event-codes.h>
+#include "rk3399.dtsi"
+#include "rk3399-opp.dtsi"
+
+/ {
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ clkin_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "clkin_gmac";
+ #clock-cells = <0>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc3v3_sys";
+ };
+
+ vcc5v0_sys: vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_sys";
+ vin-supply = <&vdd_5v>;
+ };
+
+ /* switched by pmic_sleep */
+ vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc1v8_s3";
+ vin-supply = <&vcc_1v8>;
+ };
+
+ vcc3v0_sd: vcc3v0-sd {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_pwr_h>;
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc3v0_sd";
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vbus_typec: vbus-typec {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vbus_typec";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ pinctrl-names = "default";
+ pinctrl-0 = <&power_key>;
+
+ power {
+ debounce-interval = <100>;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
+ label = "GPIO Key Power";
+ linux,code = <KEY_POWER>;
+ wakeup-source;
+ };
+ };
+
+ leds: gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_gpio>;
+
+ status {
+ gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ label = "status_led";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on_h>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&emmc_phy {
+ status = "okay";
+};
+
+&gmac {
+ assigned-clock-parents = <&clkin_gmac>;
+ assigned-clocks = <&cru SCLK_RMII_SRC>;
+ clock_in_out = "input";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ phy-mode = "rgmii";
+ phy-supply = <&vcc3v3_s3>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 50000>;
+ snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ tx_delay = <0x28>;
+ rx_delay = <0x11>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_cec>;
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-rising-time-ns = <160>;
+ i2c-scl-falling-time-ns = <30>;
+ status = "okay";
+
+ vdd_cpu_b: regulator@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cpu_b_sleep>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vdd_cpu_b";
+ regulator-ramp-delay = <1000>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: regulator@41 {
+ compatible = "silergy,syr828";
+ reg = <0x41>;
+ fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpu_sleep>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vdd_gpu";
+ regulator-ramp-delay = <1000>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ clock-output-names = "xin32k", "rtc_clko_wifi";
+ #clock-cells = <1>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+ vcc10-supply = <&vcc3v3_sys>;
+ vcc11-supply = <&vcc3v3_sys>;
+ vcc12-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcc_3v0>;
+
+ regulators {
+ vdd_center: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-name = "vdd_center";
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_l: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-name = "vdd_cpu_l";
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_ddr";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v8_cam: LDO_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc1v8_cam";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v0_touch: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc3v0_touch";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v8_pmupll: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc1v8_pmupll";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_sdio: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-init-microvolt = <3000000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_sdio";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcca3v0_codec: LDO_REG5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcca3v0_codec";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v5: LDO_REG6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc_1v5";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1500000>;
+ };
+ };
+
+ vcca1v8_codec: LDO_REG7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_codec";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v0: LDO_REG8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc_3v0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc3v3_s3: SWITCH_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc3v3_s3";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_s0: SWITCH_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc3v3_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <200000>;
+ i2c-scl-rising-time-ns = <150>;
+ i2c-scl-falling-time-ns = <30>;
+ status = "okay";
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ i2c-scl-rising-time-ns = <160>;
+ i2c-scl-falling-time-ns = <30>;
+ status = "okay";
+
+ fusb0: typec-portc@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PA2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&fusb0_int>;
+ vbus-supply = <&vbus_typec>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&io_domains {
+ bt656-supply = <&vcc_1v8>;
+ audio-supply = <&vcca1v8_codec>;
+ sdmmc-supply = <&vcc_sdio>;
+ gpio1830-supply = <&vcc_3v0>;
+ status = "okay";
+};
+
+&pcie_phy {
+ assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>;
+ assigned-clock-rates = <100000000>;
+ assigned-clocks = <&cru SCLK_PCIEPHY_REF>;
+ status = "okay";
+};
+
+&pcie0 {
+ ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
+ max-link-speed = <2>;
+ num-lanes = <4>;
+ status = "okay";
+};
+
+&pinctrl {
+ fusb30x {
+ fusb0_int: fusb0-int {
+ rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ gpio-leds {
+ leds_gpio: leds-gpio {
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ cpu_b_sleep: cpu-b-sleep {
+ rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ gpu_sleep: gpu-sleep {
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ rockchip-key {
+ power_key: power-key {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ sdio {
+ bt_host_wake_l: bt-host-wake-l {
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_reg_on_h: bt-reg-on-h {
+ /* external pullup to VCC1V8_PMUPLL */
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_l: bt-wake-l {
+ rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wifi_reg_on_h: wifi-reg_on-h {
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc {
+ sdmmc0_det_l: sdmmc0-det-l {
+ rockchip,pins = <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ sdmmc0_pwr_h: sdmmc0-pwr-h {
+ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmu1830-supply = <&vcc_3v0>;
+ status = "okay";
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "active";
+ pinctrl-0 = <&pwm2_pin_pull_down>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca1v8_s3>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ mmc-hs200-1_8v;
+ non-removable;
+ status = "okay";
+};
+
+&sdio0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_bus4 &sdmmc_clk &sdmmc_cmd &sdmmc0_det_l>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v0_sd>;
+ vqmmc-supply = <&vcc_sdio>;
+ status = "okay";
+};
+
+&tcphy0 {
+ status = "okay";
+};
+
+&tcphy1 {
+ status = "okay";
+};
+
+&tsadc {
+ /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-mode = <1>;
+ /* tshut polarity 0:LOW 1:HIGH */
+ rockchip,hw-tshut-polarity = <1>;
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_host {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_host {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer &uart0_rts &uart0_cts>;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&rk808 1>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio2 RK_PD2 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
+ max-speed = <4000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_reg_on_h &bt_host_wake_l &bt_wake_l>;
+ vbat-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcc_1v8>;
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usbdrd3_0 {
+ status = "okay";
+};
+
+&usbdrd3_1 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
new file mode 100644
index 000000000000..4a543f2117d4
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Akash Gajjar <Akash_Gajjar@mentor.com>
+ * Copyright (c) 2019 Pragnesh Patel <Pragnesh_Patel@mentor.com>
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/pwm/pwm.h>
+#include "rk3399.dtsi"
+#include "rk3399-opp.dtsi"
+
+/ {
+ model = "Radxa ROCK Pi 4";
+ compatible = "radxa,rockpi4", "rockchip,rk3399";
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ clkin_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "clkin_gmac";
+ #clock-cells = <0>;
+ };
+
+ vcc12v_dcin: dc-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc5v0_sys: vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc3v3_pcie: vcc3v3-pcie-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 RK_PD2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwr_en>;
+ regulator-name = "vcc3v3_pcie";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ regulator-name = "vcc5v0_host";
+ regulator-always-on;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_typec: vcc5v0-typec-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_typec_en>;
+ regulator-name = "vcc5v0_typec";
+ regulator-always-on;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc_lan: vcc3v3-phy-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_lan";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log: vdd-log {
+ compatible = "pwm-regulator";
+ pwms = <&pwm2 0 25000 1>;
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&emmc_phy {
+ status = "okay";
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_RMII_SRC>;
+ assigned-clock-parents = <&clkin_gmac>;
+ clock_in_out = "input";
+ phy-supply = <&vcc_lan>;
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 50000>;
+ tx_delay = <0x28>;
+ rx_delay = <0x11>;
+ status = "okay";
+};
+
+&hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_cec>;
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-rising-time-ns = <168>;
+ i2c-scl-falling-time-ns = <4>;
+ status = "okay";
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk808-clkout2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc5v0_sys>;
+ vcc12-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcc_1v8>;
+
+ regulators {
+ vdd_center: DCDC_REG1 {
+ regulator-name = "vdd_center";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_l: DCDC_REG2 {
+ regulator-name = "vdd_cpu_l";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG4 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v8_codec: LDO_REG1 {
+ regulator-name = "vcc1v8_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v8_hdmi: LDO_REG2 {
+ regulator-name = "vcc1v8_hdmi";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_1v8: LDO_REG3 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_sdio: LDO_REG4 {
+ regulator-name = "vcc_sdio";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcca3v0_codec: LDO_REG5 {
+ regulator-name = "vcca3v0_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v5: LDO_REG6 {
+ regulator-name = "vcc_1v5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1500000>;
+ };
+ };
+
+ vcc0v9_hdmi: LDO_REG7 {
+ regulator-name = "vcc0v9_hdmi";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v0: LDO_REG8 {
+ regulator-name = "vcc_3v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc_cam: SWITCH_REG1 {
+ regulator-name = "vcc_cam";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_mipi: SWITCH_REG2 {
+ regulator-name = "vcc_mipi";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ vdd_cpu_b: regulator@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vsel1_gpio>;
+ regulator-name = "vdd_cpu_b";
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: regulator@41 {
+ compatible = "silergy,syr828";
+ reg = <0x41>;
+ fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vsel2_gpio>;
+ regulator-name = "vdd_gpu";
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c1 {
+ i2c-scl-rising-time-ns = <300>;
+ i2c-scl-falling-time-ns = <15>;
+ status = "okay";
+};
+
+&i2c3 {
+ i2c-scl-rising-time-ns = <450>;
+ i2c-scl-falling-time-ns = <15>;
+ status = "okay";
+};
+
+&i2c4 {
+ i2c-scl-rising-time-ns = <600>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+};
+
+&i2s0 {
+ rockchip,playback-channels = <8>;
+ rockchip,capture-channels = <8>;
+ status = "okay";
+};
+
+&i2s1 {
+ rockchip,playback-channels = <2>;
+ rockchip,capture-channels = <2>;
+ status = "okay";
+};
+
+&i2s2 {
+ status = "okay";
+};
+
+&io_domains {
+ status = "okay";
+
+ bt656-supply = <&vcc_3v0>;
+ audio-supply = <&vcc_3v0>;
+ sdmmc-supply = <&vcc_sdio>;
+ gpio1830-supply = <&vcc_3v0>;
+};
+
+&pmu_io_domains {
+ status = "okay";
+
+ pmu1830-supply = <&vcc_3v0>;
+};
+
+&pinctrl {
+ pcie {
+ pcie_pwr_en: pcie-pwr-en {
+ rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ vsel1_gpio: vsel1-gpio {
+ rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ vsel2_gpio: vsel2-gpio {
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ usb-typec {
+ vcc5v0_typec_en: vcc5v0-typec-en {
+ rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb2 {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins = <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&saradc {
+ status = "okay";
+
+ vref-supply = <&vcc_1v8>;
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ max-frequency = <150000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cd &sdmmc_cmd &sdmmc_bus4>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ non-removable;
+ status = "okay";
+};
+
+&tcphy0 {
+ status = "okay";
+};
+
+&tcphy1 {
+ status = "okay";
+};
+
+&tsadc {
+ status = "okay";
+
+ /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-mode = <1>;
+ /* tshut polarity 0:LOW 1:HIGH */
+ rockchip,hw-tshut-polarity = <1>;
+};
+
+&u2phy0 {
+ status = "okay";
+
+ u2phy0_otg: otg-port {
+ status = "okay";
+ };
+
+ u2phy0_host: host-port {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+ };
+};
+
+&u2phy1 {
+ status = "okay";
+
+ u2phy1_otg: otg-port {
+ status = "okay";
+ };
+
+ u2phy1_host: host-port {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usbdrd3_0 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+ status = "okay";
+ dr_mode = "otg";
+};
+
+&usbdrd3_1 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index 56abbb08c133..2927db4dda9d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -9,6 +9,15 @@
#include "rk3399-opp.dtsi"
/ {
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ };
+
vcc1v8_s0: vcc1v8-s0 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8_s0";
@@ -94,6 +103,10 @@
status = "okay";
};
+&hdmi_sound {
+ status = "okay";
+};
+
&i2c0 {
clock-frequency = <400000>;
i2c-scl-rising-time-ns = <168>;
@@ -336,6 +349,10 @@
status = "okay";
};
+&i2s2 {
+ status = "okay";
+};
+
&io_domains {
bt656-supply = <&vcc1v8_s0>; /* bt656_gpio2ab_ms */
audio-supply = <&vcc1v8_s0>; /* audio_gpio3d4a_ms */
@@ -362,6 +379,20 @@
};
&pinctrl {
+ bt {
+ bt_enable_h: bt-enable-h {
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_host_wake_l: bt-host-wake-l {
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_l: bt-wake-l {
+ rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
sdmmc {
sdmmc_bus1: sdmmc-bus1 {
rockchip,pins =
@@ -387,6 +418,26 @@
};
};
+ sdio0 {
+ sdio0_bus4: sdio0-bus4 {
+ rockchip,pins =
+ <2 20 RK_FUNC_1 &pcfg_pull_up_20ma>,
+ <2 21 RK_FUNC_1 &pcfg_pull_up_20ma>,
+ <2 22 RK_FUNC_1 &pcfg_pull_up_20ma>,
+ <2 23 RK_FUNC_1 &pcfg_pull_up_20ma>;
+ };
+
+ sdio0_cmd: sdio0-cmd {
+ rockchip,pins =
+ <2 24 RK_FUNC_1 &pcfg_pull_up_20ma>;
+ };
+
+ sdio0_clk: sdio0-clk {
+ rockchip,pins =
+ <2 25 RK_FUNC_1 &pcfg_pull_none_20ma>;
+ };
+ };
+
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins =
@@ -403,6 +454,19 @@
<1 14 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
+
+ sdio-pwrseq {
+ wifi_enable_h: wifi-enable-h {
+ rockchip,pins =
+ <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_host_wake_l: wifi-host-wake-l {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&pwm2 {
@@ -413,6 +477,32 @@
status = "okay";
};
+&sdio0 {
+ bus-width = <4>;
+ clock-frequency = <50000000>;
+ cap-sdio-irq;
+ cap-sd-highspeed;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+ sd-uhs-sdr104;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_host_wake_l>;
+ };
+};
+
&sdhci {
bus-width = <8>;
mmc-hs400-1_8v;
@@ -437,10 +527,28 @@
status = "okay";
};
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <1>;
+ rockchip,hw-tshut-temp = <110000>;
+ status = "okay";
+};
+
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_xfer &uart0_cts>;
+ pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&rk808 1>;
+ clock-names = "ext_clock";
+ device-wakeup-gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
+ };
};
&uart2 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
index be78172abc09..1f2394e0587d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
@@ -25,15 +25,6 @@
#clock-cells = <0>;
};
- dc_12v: dc-12v {
- compatible = "regulator-fixed";
- regulator-name = "dc_12v";
- regulator-always-on;
- regulator-boot-on;
- regulator-min-microvolt = <12000000>;
- regulator-max-microvolt = <12000000>;
- };
-
gpio-keys {
compatible = "gpio-keys";
autorepeat;
@@ -83,6 +74,15 @@
reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
};
+ vcc12v_dcin: vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
/* switched by pmic_sleep */
vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
compatible = "regulator-fixed";
@@ -103,7 +103,7 @@
regulator-name = "vcc3v3_pcie";
regulator-always-on;
regulator-boot-on;
- vin-supply = <&dc_12v>;
+ vin-supply = <&vcc12v_dcin>;
};
vcc3v3_sys: vcc3v3-sys {
@@ -113,7 +113,7 @@
regulator-boot-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
};
/* Actually 3 regulators (host0, 1, 2) controlled by the same gpio */
@@ -125,7 +125,7 @@
pinctrl-0 = <&vcc5v0_host_en>;
regulator-name = "vcc5v0_host";
regulator-always-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_usb>;
};
vcc5v0_typec: vcc5v0-typec-regulator {
@@ -136,17 +136,27 @@
pinctrl-0 = <&vcc5v0_typec_en>;
regulator-name = "vcc5v0_typec";
regulator-always-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_usb>;
};
- vcc_sys: vcc-sys {
+ vcc5v0_sys: vcc5v0-sys {
compatible = "regulator-fixed";
- regulator-name = "vcc_sys";
+ regulator-name = "vcc5v0_sys";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
- vin-supply = <&dc_12v>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb: vcc5v0-usb {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
};
vdd_log: vdd-log {
@@ -157,7 +167,7 @@
regulator-boot-on;
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1400000>;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
};
};
@@ -212,6 +222,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
&i2c0 {
clock-frequency = <400000>;
i2c-scl-rising-time-ns = <168>;
@@ -230,18 +245,18 @@
rockchip,system-power-controller;
wakeup-source;
- vcc1-supply = <&vcc_sys>;
- vcc2-supply = <&vcc_sys>;
- vcc3-supply = <&vcc_sys>;
- vcc4-supply = <&vcc_sys>;
- vcc6-supply = <&vcc_sys>;
- vcc7-supply = <&vcc_sys>;
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
vcc8-supply = <&vcc3v3_sys>;
- vcc9-supply = <&vcc_sys>;
- vcc10-supply = <&vcc_sys>;
- vcc11-supply = <&vcc_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc5v0_sys>;
vcc12-supply = <&vcc3v3_sys>;
- vddio-supply = <&vcc1v8_pmu>;
+ vddio-supply = <&vcca_1v8>;
regulators {
vdd_center: DCDC_REG1 {
@@ -311,8 +326,8 @@
};
};
- vcc1v8_pmu: LDO_REG3 {
- regulator-name = "vcc1v8_pmu";
+ vcca_1v8: LDO_REG3 {
+ regulator-name = "vcca_1v8";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
@@ -413,7 +428,7 @@
regulator-ramp-delay = <1000>;
regulator-always-on;
regulator-boot-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
regulator-state-mem {
regulator-off-in-suspend;
@@ -432,7 +447,7 @@
regulator-ramp-delay = <1000>;
regulator-always-on;
regulator-boot-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
regulator-state-mem {
regulator-off-in-suspend;
@@ -522,12 +537,6 @@
};
};
- lcd-panel {
- lcd_panel_reset: lcd-panel-reset {
- rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>;
- };
- };
-
pcie {
pcie_pwr_en: pcie-pwr-en {
rockchip,pins = <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index 0b8f1edbd746..808ea77f951d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -91,7 +91,7 @@
pinctrl-0 = <&lcd_panel_reset>;
power-supply = <&vcc3v3_s0>;
- ports {
+ port {
panel_in_edp: endpoint {
remote-endpoint = <&edp_out_panel>;
};
@@ -219,7 +219,6 @@
cap-sd-highspeed;
cap-sdio-irq;
clock-frequency = <50000000>;
- disable-wp;
keep-power-in-suspend;
max-frequency = <50000000>;
mmc-pwrseq = <&sdio_pwrseq>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 6cc1c9fa4ea6..db9d948c0b03 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -68,7 +68,7 @@
cpu_l0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
clocks = <&cru ARMCLKL>;
@@ -79,7 +79,7 @@
cpu_l1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
clocks = <&cru ARMCLKL>;
@@ -90,7 +90,7 @@
cpu_l2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
clocks = <&cru ARMCLKL>;
@@ -101,7 +101,7 @@
cpu_l3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
clocks = <&cru ARMCLKL>;
@@ -112,7 +112,7 @@
cpu_b0: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x100>;
enable-method = "psci";
clocks = <&cru ARMCLKB>;
@@ -123,7 +123,7 @@
cpu_b1: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x0 0x101>;
enable-method = "psci";
clocks = <&cru ARMCLKB>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index 31ba52b14e99..a3cd475b48d2 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -33,7 +33,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x000>;
clocks = <&sys_clk 33>;
enable-method = "psci";
@@ -42,7 +42,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x001>;
clocks = <&sys_clk 33>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
index d7ae28afef7d..9ca692ed1b2b 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
@@ -145,10 +145,10 @@
};
};
-&nand {
+&usb {
status = "okay";
};
-&usb {
+&nand {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index 4a0c46cb11cd..017f6328c191 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -43,7 +43,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0 0x000>;
clocks = <&sys_clk 32>;
enable-method = "psci";
@@ -53,7 +53,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0 0x001>;
clocks = <&sys_clk 32>;
enable-method = "psci";
@@ -63,7 +63,7 @@
cpu2: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x100>;
clocks = <&sys_clk 33>;
enable-method = "psci";
@@ -73,7 +73,7 @@
cpu3: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x101>;
clocks = <&sys_clk 33>;
enable-method = "psci";
@@ -869,6 +869,53 @@
};
};
+ pcie: pcie@66000000 {
+ compatible = "socionext,uniphier-pcie", "snps,dw-pcie";
+ status = "disabled";
+ reg-names = "dbi", "link", "config";
+ reg = <0x66000000 0x1000>, <0x66010000 0x10000>,
+ <0x2fff0000 0x10000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ clocks = <&sys_clk 24>;
+ resets = <&sys_rst 24>;
+ num-lanes = <1>;
+ num-viewport = <1>;
+ bus-range = <0x0 0xff>;
+ device_type = "pci";
+ ranges =
+ /* downstream I/O */
+ <0x81000000 0 0x00000000 0x2ffe0000 0 0x00010000>,
+ /* non-prefetchable memory */
+ <0x82000000 0 0x20000000 0x20000000 0 0x0ffe0000>;
+ #interrupt-cells = <1>;
+ interrupt-names = "dma", "msi";
+ interrupts = <0 224 4>, <0 225 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>, /* INTA */
+ <0 0 0 2 &pcie_intc 1>, /* INTB */
+ <0 0 0 3 &pcie_intc 2>, /* INTC */
+ <0 0 0 4 &pcie_intc 3>; /* INTD */
+ phy-names = "pcie-phy";
+ phys = <&pcie_phy>;
+
+ pcie_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 226 4>;
+ };
+ };
+
+ pcie_phy: phy@66038000 {
+ compatible = "socionext,uniphier-ld20-pcie-phy";
+ reg = <0x66038000 0x4000>;
+ #phy-cells = <0>;
+ clocks = <&sys_clk 24>;
+ resets = <&sys_rst 24>;
+ socionext,syscon = <&soc_glue>;
+ };
+
nand: nand@68000000 {
compatible = "socionext,uniphier-denali-nand-v5b";
status = "disabled";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
index a41f7cac952a..1965e4dfe4a4 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
@@ -101,14 +101,18 @@
};
};
-&nand {
+&usb0 {
status = "okay";
};
-&usb0 {
+&usb1 {
status = "okay";
};
-&usb1 {
+&pcie {
+ status = "okay";
+};
+
+&nand {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 4f57c9e9d7a8..bb97abe1a55f 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -39,7 +39,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x000>;
clocks = <&sys_clk 33>;
enable-method = "psci";
@@ -48,7 +48,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x001>;
clocks = <&sys_clk 33>;
enable-method = "psci";
@@ -57,7 +57,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x002>;
clocks = <&sys_clk 33>;
enable-method = "psci";
@@ -66,7 +66,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0 0x003>;
clocks = <&sys_clk 33>;
enable-method = "psci";
@@ -727,6 +727,53 @@
};
};
+ pcie: pcie@66000000 {
+ compatible = "socionext,uniphier-pcie", "snps,dw-pcie";
+ status = "disabled";
+ reg-names = "dbi", "link", "config";
+ reg = <0x66000000 0x1000>, <0x66010000 0x10000>,
+ <0x2fff0000 0x10000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ clocks = <&sys_clk 24>;
+ resets = <&sys_rst 24>;
+ num-lanes = <1>;
+ num-viewport = <1>;
+ bus-range = <0x0 0xff>;
+ device_type = "pci";
+ ranges =
+ /* downstream I/O */
+ <0x81000000 0 0x00000000 0x2ffe0000 0 0x00010000>,
+ /* non-prefetchable memory */
+ <0x82000000 0 0x20000000 0x20000000 0 0x0ffe0000>;
+ #interrupt-cells = <1>;
+ interrupt-names = "dma", "msi";
+ interrupts = <0 224 4>, <0 225 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>, /* INTA */
+ <0 0 0 2 &pcie_intc 1>, /* INTB */
+ <0 0 0 3 &pcie_intc 2>, /* INTC */
+ <0 0 0 4 &pcie_intc 3>; /* INTD */
+ phy-names = "pcie-phy";
+ phys = <&pcie_phy>;
+
+ pcie_intc: legacy-interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 226 4>;
+ };
+ };
+
+ pcie_phy: phy@66038000 {
+ compatible = "socionext,uniphier-pxs3-pcie-phy";
+ reg = <0x66038000 0x4000>;
+ #phy-cells = <0>;
+ clocks = <&sys_clk 24>;
+ resets = <&sys_rst 24>;
+ socionext,syscon = <&soc_glue>;
+ };
+
nand: nand@68000000 {
compatible = "socionext,uniphier-denali-nand-v5b";
status = "disabled";
diff --git a/arch/arm64/boot/dts/sprd/sc2731.dtsi b/arch/arm64/boot/dts/sprd/sc2731.dtsi
index 82bd642d770b..e15409f55f43 100644
--- a/arch/arm64/boot/dts/sprd/sc2731.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc2731.dtsi
@@ -13,12 +13,18 @@
spi-max-frequency = <26000000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
+ charger@0 {
+ compatible = "sprd,sc2731-charger";
+ reg = <0x0>;
+ monitored-battery = <&bat>;
+ };
+
led-controller@200 {
- compatible = "sprd,sc27xx-bltc", "sprd,sc2731-bltc";
+ compatible = "sprd,sc2731-bltc";
reg = <0x200>;
#address-cells = <1>;
#size-cells = <0>;
@@ -40,17 +46,17 @@
};
rtc@280 {
- compatible = "sprd,sc27xx-rtc", "sprd,sc2731-rtc";
+ compatible = "sprd,sc2731-rtc";
reg = <0x280>;
interrupt-parent = <&sc2731_pmic>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <2>;
};
pmic_eic: gpio@300 {
- compatible = "sprd,sc27xx-eic";
+ compatible = "sprd,sc2731-eic";
reg = <0x300>;
interrupt-parent = <&sc2731_pmic>;
- interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <5>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -58,29 +64,57 @@
};
efuse@380 {
- compatible = "sprd,sc27xx-efuse", "sprd,sc2731-efuse";
+ compatible = "sprd,sc2731-efuse";
reg = <0x380>;
#address-cells = <1>;
#size-cells = <1>;
hwlocks = <&hwlock 12>;
+
+ fgu_calib: calib@6 {
+ reg = <0x6 0x2>;
+ bits = <0 9>;
+ };
+
+ adc_big_scale: calib@24 {
+ reg = <0x24 0x2>;
+ };
+
+ adc_small_scale: calib@26 {
+ reg = <0x26 0x2>;
+ };
};
pmic_adc: adc@480 {
- compatible = "sprd,sc27xx-adc", "sprd,sc2731-adc";
+ compatible = "sprd,sc2731-adc";
reg = <0x480>;
interrupt-parent = <&sc2731_pmic>;
- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0>;
#io-channel-cells = <1>;
hwlocks = <&hwlock 4>;
+ nvmem-cell-names = "big_scale_calib", "small_scale_calib";
+ nvmem-cells = <&adc_big_scale>, <&adc_small_scale>;
+ };
+
+ fgu@a00 {
+ compatible = "sprd,sc2731-fgu";
+ reg = <0xa00>;
+ bat-detect-gpio = <&pmic_eic 9 GPIO_ACTIVE_HIGH>;
+ io-channels = <&pmic_adc 3>, <&pmic_adc 6>;
+ io-channel-names = "bat-temp", "charge-vol";
+ monitored-battery = <&bat>;
+ nvmem-cell-names = "fgu_calib";
+ nvmem-cells = <&fgu_calib>;
+ interrupt-parent = <&sc2731_pmic>;
+ interrupts = <4>;
};
vibrator@ec8 {
- compatible = "sprd,sc27xx-vibrator", "sprd,sc2731-vibrator";
+ compatible = "sprd,sc2731-vibrator";
reg = <0xec8>;
};
regulators {
- compatible = "sprd,sc27xx-regulator";
+ compatible = "sprd,sc2731-regulator";
vddarm0: BUCK_CPU0 {
regulator-name = "vddarm0";
diff --git a/arch/arm64/boot/dts/sprd/sc9836.dtsi b/arch/arm64/boot/dts/sprd/sc9836.dtsi
index 4bcdbb709c01..286d7173f94f 100644
--- a/arch/arm64/boot/dts/sprd/sc9836.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9836.dtsi
@@ -18,28 +18,28 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/sprd/sc9860.dtsi b/arch/arm64/boot/dts/sprd/sc9860.dtsi
index 5f57bf055cde..b25d19977170 100644
--- a/arch/arm64/boot/dts/sprd/sc9860.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9860.dtsi
@@ -50,7 +50,7 @@
CPU0: cpu@530000 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530000>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
@@ -58,7 +58,7 @@
CPU1: cpu@530001 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530001>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
@@ -66,7 +66,7 @@
CPU2: cpu@530002 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530002>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
@@ -74,7 +74,7 @@
CPU3: cpu@530003 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530003>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
@@ -82,7 +82,7 @@
CPU4: cpu@530100 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530100>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
@@ -90,7 +90,7 @@
CPU5: cpu@530101 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530101>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
@@ -98,7 +98,7 @@
CPU6: cpu@530102 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530102>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
@@ -106,7 +106,7 @@
CPU7: cpu@530103 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x530103>;
enable-method = "psci";
cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
diff --git a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
index 985ebb5d157e..6b95fd94cee3 100644
--- a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
+++ b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
@@ -39,6 +39,22 @@
#size-cells = <2>;
ranges;
};
+
+ bat: battery {
+ compatible = "simple-battery";
+ charge-full-design-microamp-hours = <1900000>;
+ charge-term-current-microamp = <120000>;
+ constant_charge_voltage_max_microvolt = <4350000>;
+ internal-resistance-micro-ohms = <250000>;
+ ocv-capacity-celsius = <20>;
+ ocv-capacity-table-0 = <4185000 100>, <4113000 95>, <4066000 90>,
+ <4022000 85>, <3983000 80>, <3949000 75>,
+ <3917000 70>, <3889000 65>, <3864000 60>,
+ <3835000 55>, <3805000 50>, <3787000 45>,
+ <3777000 40>, <3773000 35>, <3770000 30>,
+ <3765000 25>, <3752000 20>, <3724000 15>,
+ <3680000 10>, <3605000 5>, <3400000 0>;
+ };
};
&uart0 {
diff --git a/arch/arm64/boot/dts/synaptics/as370.dtsi b/arch/arm64/boot/dts/synaptics/as370.dtsi
index 7331acf3874e..addeb0efc616 100644
--- a/arch/arm64/boot/dts/synaptics/as370.dtsi
+++ b/arch/arm64/boot/dts/synaptics/as370.dtsi
@@ -23,7 +23,7 @@
#size-cells = <0>;
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0>;
enable-method = "psci";
@@ -32,7 +32,7 @@
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x1>;
enable-method = "psci";
@@ -41,7 +41,7 @@
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x2>;
enable-method = "psci";
@@ -50,7 +50,7 @@
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x3>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
index 216767e2edf6..15625b99e336 100644
--- a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
+++ b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi
@@ -27,7 +27,7 @@
#size-cells = <0>;
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x0>;
enable-method = "psci";
@@ -36,7 +36,7 @@
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x1>;
enable-method = "psci";
@@ -45,7 +45,7 @@
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x2>;
enable-method = "psci";
@@ -54,7 +54,7 @@
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
reg = <0x3>;
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 272cf8fc8d30..752455269fab 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -6,6 +6,26 @@
*/
&cbass_main {
+ msmc_ram: sram@70000000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x70000000 0x0 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x70000000 0x200000>;
+
+ atf-sram@0 {
+ reg = <0x0 0x20000>;
+ };
+
+ sysfw-sram@f0000 {
+ reg = <0xf0000 0x10000>;
+ };
+
+ l3cache-sram@100000 {
+ reg = <0x100000 0x100000>;
+ };
+ };
+
gic500: interrupt-controller@1800000 {
compatible = "arm,gic-v3";
#address-cells = <2>;
@@ -191,4 +211,102 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ sdhci0: sdhci@4f80000 {
+ compatible = "ti,am654-sdhci-5.1";
+ reg = <0x0 0x4f80000 0x0 0x260>, <0x0 0x4f90000 0x0 0x134>;
+ power-domains = <&k3_pds 47>;
+ clocks = <&k3_clks 47 0>, <&k3_clks 47 1>;
+ clock-names = "clk_ahb", "clk_xin";
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ ti,otap-del-sel = <0x2>;
+ ti,trm-icp = <0x8>;
+ dma-coherent;
+ };
+
+ scm_conf: scm_conf@100000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0 0x00100000 0 0x1c000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x00100000 0x1c000>;
+ };
+
+ dwc3_0: dwc3@4000000 {
+ compatible = "ti,am654-dwc3";
+ reg = <0x0 0x4000000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x4000000 0x20000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
+ power-domains = <&k3_pds 151>;
+ assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>;
+ assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
+ <&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */
+
+ usb0: usb@10000 {
+ compatible = "snps,dwc3";
+ reg = <0x10000 0x10000>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "peripheral",
+ "host",
+ "otg";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
+ phys = <&usb0_phy>;
+ phy-names = "usb2-phy";
+ snps,dis_u3_susphy_quirk;
+ };
+ };
+
+ usb0_phy: phy@4100000 {
+ compatible = "ti,am654-usb2", "ti,omap-usb2";
+ reg = <0x0 0x4100000 0x0 0x54>;
+ syscon-phy-power = <&scm_conf 0x4000>;
+ clocks = <&k3_clks 151 0>, <&k3_clks 151 1>;
+ clock-names = "wkupclk", "refclk";
+ #phy-cells = <0>;
+ };
+
+ dwc3_1: dwc3@4020000 {
+ compatible = "ti,am654-dwc3";
+ reg = <0x0 0x4020000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x4020000 0x20000>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
+ power-domains = <&k3_pds 152>;
+ assigned-clocks = <&k3_clks 152 2>;
+ assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
+
+ usb1: usb@10000 {
+ compatible = "snps,dwc3";
+ reg = <0x10000 0x10000>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "peripheral",
+ "host",
+ "otg";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
+ phys = <&usb1_phy>;
+ phy-names = "usb2-phy";
+ };
+ };
+
+ usb1_phy: phy@4110000 {
+ compatible = "ti,am654-usb2", "ti,omap-usb2";
+ reg = <0x0 0x4110000 0x0 0x54>;
+ syscon-phy-power = <&scm_conf 0x4020>;
+ clocks = <&k3_clks 152 0>, <&k3_clks 152 1>;
+ clock-names = "wkupclk", "refclk";
+ #phy-cells = <0>;
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 593f718e8fb5..6f7d2b316ded 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -57,4 +57,34 @@
#address-cells = <1>;
#size-cells = <0>;
};
+
+ tscadc0: tscadc@40200000 {
+ compatible = "ti,am654-tscadc", "ti,am3359-tscadc";
+ reg = <0x0 0x40200000 0x0 0x1000>;
+ interrupts = <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 0 2>;
+ assigned-clocks = <&k3_clks 0 2>;
+ assigned-clock-rates = <60000000>;
+ clock-names = "adc_tsc_fck";
+
+ adc {
+ #io-channel-cells = <1>;
+ compatible = "ti,am654-adc", "ti,am3359-adc";
+ };
+ };
+
+ tscadc1: tscadc@40210000 {
+ compatible = "ti,am654-tscadc", "ti,am3359-tscadc";
+ reg = <0x0 0x40210000 0x0 0x1000>;
+ interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&k3_clks 1 2>;
+ assigned-clocks = <&k3_clks 1 2>;
+ assigned-clock-rates = <60000000>;
+ clock-names = "adc_tsc_fck";
+
+ adc {
+ #io-channel-cells = <1>;
+ compatible = "ti,am654-adc", "ti,am3359-adc";
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index e41fc3a5987b..cf1aa276a1ea 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -69,6 +69,29 @@
AM65X_IOPAD(0x01bc, PIN_OUTPUT, 0) /* (AG13) SPI0_CS0 */
>;
};
+
+ main_mmc0_pins_default: main-mmc0-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x01a8, PIN_INPUT_PULLDOWN, 0) /* (B25) MMC0_CLK */
+ AM65X_IOPAD(0x01ac, PIN_INPUT_PULLUP, 0) /* (B27) MMC0_CMD */
+ AM65X_IOPAD(0x01a4, PIN_INPUT_PULLUP, 0) /* (A26) MMC0_DAT0 */
+ AM65X_IOPAD(0x01a0, PIN_INPUT_PULLUP, 0) /* (E25) MMC0_DAT1 */
+ AM65X_IOPAD(0x019c, PIN_INPUT_PULLUP, 0) /* (C26) MMC0_DAT2 */
+ AM65X_IOPAD(0x0198, PIN_INPUT_PULLUP, 0) /* (A25) MMC0_DAT3 */
+ AM65X_IOPAD(0x0194, PIN_INPUT_PULLUP, 0) /* (E24) MMC0_DAT4 */
+ AM65X_IOPAD(0x0190, PIN_INPUT_PULLUP, 0) /* (A24) MMC0_DAT5 */
+ AM65X_IOPAD(0x018c, PIN_INPUT_PULLUP, 0) /* (B26) MMC0_DAT6 */
+ AM65X_IOPAD(0x0188, PIN_INPUT_PULLUP, 0) /* (D25) MMC0_DAT7 */
+ AM65X_IOPAD(0x01b4, PIN_INPUT_PULLUP, 0) /* (A23) MMC0_SDCD */
+ AM65X_IOPAD(0x01b0, PIN_INPUT, 0) /* (C25) MMC0_DS */
+ >;
+ };
+
+ usb1_pins_default: usb1_pins_default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x02c0, PIN_OUTPUT, 0) /* (AC8) USB1_DRVVBUS */
+ >;
+ };
};
&main_pmx1 {
@@ -163,3 +186,45 @@
#size-cells= <1>;
};
};
+
+&sdhci0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc0_pins_default>;
+ bus-width = <8>;
+ non-removable;
+ ti,driver-strength-ohm = <50>;
+};
+
+&dwc3_1 {
+ status = "okay";
+};
+
+&usb1_phy {
+ status = "okay";
+};
+
+&usb1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_pins_default>;
+ dr_mode = "otg";
+};
+
+&dwc3_0 {
+ status = "disabled";
+};
+
+&usb0_phy {
+ status = "disabled";
+};
+
+&tscadc0 {
+ adc {
+ ti,adc-channels = <0 1 2 3 4 5 6 7>;
+ };
+};
+
+&tscadc1 {
+ adc {
+ ti,adc-channels = <0 1 2 3 4 5 6 7>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am654.dtsi b/arch/arm64/boot/dts/ti/k3-am654.dtsi
index 2affa6f6617e..b221abf43ac2 100644
--- a/arch/arm64/boot/dts/ti/k3-am654.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am654.dtsi
@@ -34,7 +34,7 @@
};
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x000>;
device_type = "cpu";
enable-method = "psci";
@@ -48,7 +48,7 @@
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x001>;
device_type = "cpu";
enable-method = "psci";
@@ -62,7 +62,7 @@
};
cpu2: cpu@100 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x100>;
device_type = "cpu";
enable-method = "psci";
@@ -76,7 +76,7 @@
};
cpu3: cpu@101 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x101>;
device_type = "cpu";
enable-method = "psci";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index 13a0a028df98..e5699d0d91e4 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -101,6 +101,7 @@
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
+ post-power-on-delay-ms = <10>;
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index fa4fd777d90e..9aa67340a4d8 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -22,7 +22,7 @@
#size-cells = <0>;
cpu0: cpu@0 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
operating-points-v2 = <&cpu_opp_table>;
@@ -31,7 +31,7 @@
};
cpu1: cpu@1 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
reg = <0x1>;
@@ -40,7 +40,7 @@
};
cpu2: cpu@2 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
reg = <0x2>;
@@ -49,7 +49,7 @@
};
cpu3: cpu@3 {
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
device_type = "cpu";
enable-method = "psci";
reg = <0x3>;
diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi
index 6eef64761009..cc54837ff4ba 100644
--- a/arch/arm64/boot/dts/zte/zx296718.dtsi
+++ b/arch/arm64/boot/dts/zte/zx296718.dtsi
@@ -86,7 +86,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
clocks = <&topcrm A53_GATE>;
@@ -95,7 +95,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
clocks = <&topcrm A53_GATE>;
@@ -104,7 +104,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
clocks = <&topcrm A53_GATE>;
@@ -113,7 +113,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a53","arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
clocks = <&topcrm A53_GATE>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 3ef443cfbab6..2d9c39033c1a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -43,6 +43,7 @@ CONFIG_ARCH_HISI=y
CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_MESON=y
CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_MXC=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_R8A774A1=y
@@ -113,6 +114,8 @@ CONFIG_ARM_SCPI_PROTOCOL=y
CONFIG_RASPBERRYPI_FIRMWARE=y
CONFIG_TI_SCI_PROTOCOL=y
CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_IMX_SCU=y
+CONFIG_IMX_SCU_PD=y
CONFIG_ACPI=y
CONFIG_ACPI_APEI=y
CONFIG_ACPI_APEI_GHES=y
@@ -245,6 +248,7 @@ CONFIG_NET_XGENE=y
CONFIG_ATL1C=m
CONFIG_MACB=y
CONFIG_THUNDER_NIC_PF=y
+CONFIG_FEC=y
CONFIG_HIX5HD2_GMAC=y
CONFIG_HNS_DSAF=y
CONFIG_HNS_ENET=y
@@ -319,6 +323,9 @@ CONFIG_SERIAL_MESON_CONSOLE=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_TEGRA_TCU=y
+CONFIG_SERIAL_IMX=y
+CONFIG_SERIAL_IMX_CONSOLE=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
@@ -326,6 +333,8 @@ CONFIG_SERIAL_QCOM_GENI=y
CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
CONFIG_SERIAL_XILINX_PS_UART=y
CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_FSL_LPUART=y
+CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
CONFIG_SERIAL_MVEBU_UART=y
CONFIG_SERIAL_DEV_BUS=y
CONFIG_VIRTIO_CONSOLE=y
@@ -339,6 +348,7 @@ CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
CONFIG_I2C_BCM2835=m
CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_GPIO=m
CONFIG_I2C_IMX=y
CONFIG_I2C_MESON=y
CONFIG_I2C_MV64XXX=y
@@ -362,9 +372,12 @@ CONFIG_SPI_ROCKCHIP=y
CONFIG_SPI_QUP=y
CONFIG_SPI_S3C64XX=y
CONFIG_SPI_SPIDEV=m
+CONFIG_SPI_NXP_FLEXSPI=y
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_IMX8MQ=y
+CONFIG_PINCTRL_IMX8QXP=y
CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_MSM8916=y
CONFIG_PINCTRL_MSM8994=y
@@ -401,6 +414,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_RCAR_THERMAL=y
CONFIG_RCAR_GEN3_THERMAL=y
CONFIG_ARMADA_THERMAL=y
CONFIG_BCM2835_THERMAL=m
@@ -412,6 +426,7 @@ CONFIG_UNIPHIER_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_ARM_SP805_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
+CONFIG_IMX2_WDT=y
CONFIG_MESON_GXBB_WATCHDOG=m
CONFIG_MESON_WATCHDOG=m
CONFIG_RENESAS_WDT=y
@@ -437,6 +452,8 @@ CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_HI6421V530=y
CONFIG_REGULATOR_HI655X=y
CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_MAX8973=y
+CONFIG_REGULATOR_PFUZE100=y
CONFIG_REGULATOR_PWM=y
CONFIG_REGULATOR_QCOM_RPMH=y
CONFIG_REGULATOR_QCOM_SMD_RPM=y
@@ -455,12 +472,14 @@ CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
# CONFIG_DVB_NET is not set
+CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_SUN6I_CSI=m
CONFIG_VIDEO_RENESAS_FCP=m
CONFIG_VIDEO_RENESAS_VSP1=m
CONFIG_DRM=m
@@ -485,13 +504,16 @@ CONFIG_DRM_SUN8I_DW_HDMI=m
CONFIG_DRM_SUN8I_MIXER=m
CONFIG_DRM_TEGRA=m
CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_SII902X=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_VC4=m
CONFIG_DRM_HISI_HIBMC=m
CONFIG_DRM_HISI_KIRIN=m
CONFIG_DRM_MESON=m
+CONFIG_DRM_PL111=m
CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_LP855X=m
@@ -506,11 +528,16 @@ CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
CONFIG_SND_SOC_RK3399_GRU_SOUND=m
+CONFIG_SND_MESON_AXG_SOUND_CARD=m
CONFIG_SND_SOC_SAMSUNG=y
CONFIG_SND_SOC_RCAR=m
CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SOC_PCM3168A_I2C=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_TAS571X=m
CONFIG_I2C_HID=m
CONFIG_USB=y
CONFIG_USB_OTG=y
@@ -547,6 +574,7 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_ARASAN=y
CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_ESDHC_IMX=y
CONFIG_MMC_SDHCI_TEGRA=y
CONFIG_MMC_SDHCI_F_SDH30=y
CONFIG_MMC_MESON_GX=y
@@ -586,6 +614,7 @@ CONFIG_RTC_DRV_PL031=y
CONFIG_RTC_DRV_SUN6I=y
CONFIG_RTC_DRV_ARMADA38X=y
CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_IMX_SC=m
CONFIG_RTC_DRV_XGENE=y
CONFIG_DMADEVICES=y
CONFIG_DMA_BCM2835=m
@@ -613,6 +642,8 @@ CONFIG_COMMON_CLK_CS2000_CP=y
CONFIG_COMMON_CLK_S2MPS11=y
CONFIG_CLK_QORIQ=y
CONFIG_COMMON_CLK_PWM=y
+CONFIG_CLK_IMX8MQ=y
+CONFIG_CLK_IMX8QXP=y
CONFIG_TI_SCI_CLK=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_QCOM_CLK_SMD_RPM=y
@@ -627,6 +658,7 @@ CONFIG_SDM_GCC_845=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ARM_MHU=y
+CONFIG_IMX_MBOX=y
CONFIG_PLATFORM_MHU=y
CONFIG_BCM2835_MBOX=y
CONFIG_TI_MESSAGE_MANAGER=y
@@ -644,6 +676,7 @@ CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=m
CONFIG_RPMSG_QCOM_SMD=y
CONFIG_RASPBERRYPI_POWER=y
+CONFIG_IMX_GPCV2_PM_DOMAINS=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_GENI_SE=y
CONFIG_QCOM_GLINK_SSR=m
@@ -689,6 +722,7 @@ CONFIG_PHY_HISI_INNO_USB2=y
CONFIG_PHY_MVEBU_CP110_COMPHY=y
CONFIG_PHY_QCOM_QMP=m
CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_RCAR_GEN3_PCIE=y
CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_RCAR_GEN3_USB3=m
CONFIG_PHY_ROCKCHIP_EMMC=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index e3a375c4cb83..1b151442dac1 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b
-8: mov w7, w8
+8: cbz w8, 91f
+ mov w7, w8
add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1
bne 9b
- eor v0.16b, v0.16b, v1.16b
+91: eor v0.16b, v0.16b, v1.16b
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 68b11aa690e4..5fc6f51908fd 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
abytes -= added;
}
- while (abytes > AES_BLOCK_SIZE) {
+ while (abytes >= AES_BLOCK_SIZE) {
__aes_arm64_encrypt(key->key_enc, mac, mac,
num_rounds(key));
crypto_xor(mac, in, AES_BLOCK_SIZE);
@@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
num_rounds(key));
crypto_xor(mac, in, abytes);
*macp = abytes;
- } else {
- *macp = 0;
}
}
}
@@ -255,7 +253,7 @@ static int ccm_encrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, true);
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
if (may_use_simd()) {
while (walk.nbytes) {
@@ -313,7 +311,7 @@ static int ccm_decrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- err = skcipher_walk_aead_decrypt(&walk, req, true);
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
if (may_use_simd()) {
while (walk.nbytes) {
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 67700045a0e0..4c7ce231963c 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -320,8 +320,7 @@ AES_ENTRY(aes_ctr_encrypt)
.Lctrtailblock:
st1 {v0.16b}, [x0]
- ldp x29, x30, [sp], #16
- ret
+ b .Lctrout
.Lctrcarry:
umov x7, v4.d[0] /* load upper word of ctr */
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index e613a87f8b53..8432c8d0dea6 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
8: next_ctr v0
st1 {v0.16b}, [x24]
- cbz x23, 0f
+ cbz x23, .Lctr_done
cond_yield_neon 98b
b 99b
-0: frame_pop
+.Lctr_done:
+ frame_pop
ret
/*
* If we are handling the tail of the input (x6 != NULL), return the
* final keystream block back to the caller.
*/
+0: cbz x25, 8b
+ st1 {v0.16b}, [x25]
+ b 8b
1: cbz x25, 8b
st1 {v1.16b}, [x25]
b 8b
diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S
index 021bb9e9784b..706c4e10e9e2 100644
--- a/arch/arm64/crypto/chacha-neon-core.S
+++ b/arch/arm64/crypto/chacha-neon-core.S
@@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon)
mov w3, w2
bl chacha_permute
- st1 {v0.16b}, [x1], #16
- st1 {v3.16b}, [x1]
+ st1 {v0.4s}, [x1], #16
+ st1 {v3.4s}, [x1]
ldp x29, x30, [sp], #16
ret
@@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon)
add v3.4s, v3.4s, v19.4s
add a2, a2, w8
add a3, a3, w9
+CPU_BE( rev a0, a0 )
+CPU_BE( rev a1, a1 )
+CPU_BE( rev a2, a2 )
+CPU_BE( rev a3, a3 )
ld4r {v24.4s-v27.4s}, [x0], #16
ld4r {v28.4s-v31.4s}, [x0]
@@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon)
add v7.4s, v7.4s, v23.4s
add a6, a6, w8
add a7, a7, w9
+CPU_BE( rev a4, a4 )
+CPU_BE( rev a5, a5 )
+CPU_BE( rev a6, a6 )
+CPU_BE( rev a7, a7 )
// x8[0-3] += s2[0]
// x9[0-3] += s2[1]
@@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon)
add v11.4s, v11.4s, v27.4s
add a10, a10, w8
add a11, a11, w9
+CPU_BE( rev a8, a8 )
+CPU_BE( rev a9, a9 )
+CPU_BE( rev a10, a10 )
+CPU_BE( rev a11, a11 )
// x12[0-3] += s3[0]
// x13[0-3] += s3[1]
@@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon)
add v15.4s, v15.4s, v31.4s
add a14, a14, w8
add a15, a15, w9
+CPU_BE( rev a12, a12 )
+CPU_BE( rev a13, a13 )
+CPU_BE( rev a14, a14 )
+CPU_BE( rev a15, a15 )
// interleave 32-bit words in state n, n+1
ldp w6, w7, [x2], #64
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index 9e82e8e8ed05..e545b42e6a46 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -2,12 +2,14 @@
// Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
//
// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+// Copyright (C) 2019 Google LLC <ebiggers@google.com>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
+// Derived from the x86 version:
//
// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
//
@@ -54,19 +56,11 @@
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Function API:
-// UINT16 crc_t10dif_pcl(
-// UINT16 init_crc, //initial CRC value, 16 bits
-// const unsigned char *buf, //buffer pointer to calculate CRC on
-// UINT64 len //buffer length in bytes (64-bit data)
-// );
-//
// Reference paper titled "Fast CRC Computation for Generic
// Polynomials Using PCLMULQDQ Instruction"
// URL: http://www.intel.com/content/dam/www/public/us/en/documents
// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
//
-//
#include <linux/linkage.h>
#include <asm/assembler.h>
@@ -74,14 +68,14 @@
.text
.cpu generic+crypto
- arg1_low32 .req w19
- arg2 .req x20
- arg3 .req x21
+ init_crc .req w19
+ buf .req x20
+ len .req x21
+ fold_consts_ptr .req x22
- vzr .req v13
+ fold_consts .req v10
ad .req v14
- bd .req v10
k00_16 .req v15
k32_48 .req v16
@@ -143,11 +137,11 @@ __pmull_p8_core:
ext t5.8b, ad.8b, ad.8b, #2 // A2
ext t6.8b, ad.8b, ad.8b, #3 // A3
- pmull t4.8h, t4.8b, bd.8b // F = A1*B
+ pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B
pmull t8.8h, ad.8b, bd1.8b // E = A*B1
- pmull t5.8h, t5.8b, bd.8b // H = A2*B
+ pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B
pmull t7.8h, ad.8b, bd2.8b // G = A*B2
- pmull t6.8h, t6.8b, bd.8b // J = A3*B
+ pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B
pmull t9.8h, ad.8b, bd3.8b // I = A*B3
pmull t3.8h, ad.8b, bd4.8b // K = A*B4
b 0f
@@ -157,11 +151,11 @@ __pmull_p8_core:
tbl t5.16b, {ad.16b}, perm2.16b // A2
tbl t6.16b, {ad.16b}, perm3.16b // A3
- pmull2 t4.8h, t4.16b, bd.16b // F = A1*B
+ pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B
pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
- pmull2 t5.8h, t5.16b, bd.16b // H = A2*B
+ pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B
pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
- pmull2 t6.8h, t6.16b, bd.16b // J = A3*B
+ pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B
pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
@@ -203,14 +197,14 @@ __pmull_p8_core:
ENDPROC(__pmull_p8_core)
.macro __pmull_p8, rq, ad, bd, i
- .ifnc \bd, v10
+ .ifnc \bd, fold_consts
.err
.endif
mov ad.16b, \ad\().16b
.ifb \i
- pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B
+ pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B
.else
- pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B
+ pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B
.endif
bl .L__pmull_p8_core\i
@@ -219,17 +213,19 @@ ENDPROC(__pmull_p8_core)
eor \rq\().16b, \rq\().16b, t6.16b
.endm
- .macro fold64, p, reg1, reg2
- ldp q11, q12, [arg2], #0x20
+ // Fold reg1, reg2 into the next 32 data bytes, storing the result back
+ // into reg1, reg2.
+ .macro fold_32_bytes, p, reg1, reg2
+ ldp q11, q12, [buf], #0x20
- __pmull_\p v8, \reg1, v10, 2
- __pmull_\p \reg1, \reg1, v10
+ __pmull_\p v8, \reg1, fold_consts, 2
+ __pmull_\p \reg1, \reg1, fold_consts
CPU_LE( rev64 v11.16b, v11.16b )
CPU_LE( rev64 v12.16b, v12.16b )
- __pmull_\p v9, \reg2, v10, 2
- __pmull_\p \reg2, \reg2, v10
+ __pmull_\p v9, \reg2, fold_consts, 2
+ __pmull_\p \reg2, \reg2, fold_consts
CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
@@ -240,15 +236,16 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
eor \reg2\().16b, \reg2\().16b, v12.16b
.endm
- .macro fold16, p, reg, rk
- __pmull_\p v8, \reg, v10
- __pmull_\p \reg, \reg, v10, 2
- .ifnb \rk
- ldr_l q10, \rk, x8
- __pmull_pre_\p v10
+ // Fold src_reg into dst_reg, optionally loading the next fold constants
+ .macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts
+ __pmull_\p v8, \src_reg, fold_consts
+ __pmull_\p \src_reg, \src_reg, fold_consts, 2
+ .ifnb \load_next_consts
+ ld1 {fold_consts.2d}, [fold_consts_ptr], #16
+ __pmull_pre_\p fold_consts
.endif
- eor v7.16b, v7.16b, v8.16b
- eor v7.16b, v7.16b, \reg\().16b
+ eor \dst_reg\().16b, \dst_reg\().16b, v8.16b
+ eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b
.endm
.macro __pmull_p64, rd, rn, rm, n
@@ -260,40 +257,27 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
.endm
.macro crc_t10dif_pmull, p
- frame_push 3, 128
+ frame_push 4, 128
- mov arg1_low32, w0
- mov arg2, x1
- mov arg3, x2
-
- movi vzr.16b, #0 // init zero register
+ mov init_crc, w0
+ mov buf, x1
+ mov len, x2
__pmull_init_\p
- // adjust the 16-bit initial_crc value, scale it to 32 bits
- lsl arg1_low32, arg1_low32, #16
-
- // check if smaller than 256
- cmp arg3, #256
-
- // for sizes less than 128, we can't fold 64B at a time...
- b.lt .L_less_than_128_\@
+ // For sizes less than 256 bytes, we can't fold 128 bytes at a time.
+ cmp len, #256
+ b.lt .Lless_than_256_bytes_\@
- // load the initial crc value
- // crc value does not need to be byte-reflected, but it needs
- // to be moved to the high part of the register.
- // because data will be byte-reflected and will align with
- // initial crc at correct place.
- movi v10.16b, #0
- mov v10.s[3], arg1_low32 // initial crc
-
- // receive the initial 64B data, xor the initial crc value
- ldp q0, q1, [arg2]
- ldp q2, q3, [arg2, #0x20]
- ldp q4, q5, [arg2, #0x40]
- ldp q6, q7, [arg2, #0x60]
- add arg2, arg2, #0x80
+ adr_l fold_consts_ptr, .Lfold_across_128_bytes_consts
+ // Load the first 128 data bytes. Byte swapping is necessary to make
+ // the bit order match the polynomial coefficient order.
+ ldp q0, q1, [buf]
+ ldp q2, q3, [buf, #0x20]
+ ldp q4, q5, [buf, #0x40]
+ ldp q6, q7, [buf, #0x60]
+ add buf, buf, #0x80
CPU_LE( rev64 v0.16b, v0.16b )
CPU_LE( rev64 v1.16b, v1.16b )
CPU_LE( rev64 v2.16b, v2.16b )
@@ -302,7 +286,6 @@ CPU_LE( rev64 v4.16b, v4.16b )
CPU_LE( rev64 v5.16b, v5.16b )
CPU_LE( rev64 v6.16b, v6.16b )
CPU_LE( rev64 v7.16b, v7.16b )
-
CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 )
@@ -312,36 +295,29 @@ CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 )
CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 )
CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
- // XOR the initial_crc value
- eor v0.16b, v0.16b, v10.16b
-
- ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4
- // type of pmull instruction
- // will determine which constant to use
- __pmull_pre_\p v10
+ // XOR the first 16 data *bits* with the initial CRC value.
+ movi v8.16b, #0
+ mov v8.h[7], init_crc
+ eor v0.16b, v0.16b, v8.16b
- //
- // we subtract 256 instead of 128 to save one instruction from the loop
- //
- sub arg3, arg3, #256
+ // Load the constants for folding across 128 bytes.
+ ld1 {fold_consts.2d}, [fold_consts_ptr]
+ __pmull_pre_\p fold_consts
- // at this section of the code, there is 64*x+y (0<=y<64) bytes of
- // buffer. The _fold_64_B_loop will fold 64B at a time
- // until we have 64+y Bytes of buffer
+ // Subtract 128 for the 128 data bytes just consumed. Subtract another
+ // 128 to simplify the termination condition of the following loop.
+ sub len, len, #256
- // fold 64B at a time. This section of the code folds 4 vector
- // registers in parallel
-.L_fold_64_B_loop_\@:
+ // While >= 128 data bytes remain (not counting v0-v7), fold the 128
+ // bytes v0-v7 into them, storing the result back into v0-v7.
+.Lfold_128_bytes_loop_\@:
+ fold_32_bytes \p, v0, v1
+ fold_32_bytes \p, v2, v3
+ fold_32_bytes \p, v4, v5
+ fold_32_bytes \p, v6, v7
- fold64 \p, v0, v1
- fold64 \p, v2, v3
- fold64 \p, v4, v5
- fold64 \p, v6, v7
-
- subs arg3, arg3, #128
-
- // check if there is another 64B in the buffer to be able to fold
- b.lt .L_fold_64_B_end_\@
+ subs len, len, #128
+ b.lt .Lfold_128_bytes_loop_done_\@
if_will_cond_yield_neon
stp q0, q1, [sp, #.Lframe_local_offset]
@@ -353,228 +329,207 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
ldp q2, q3, [sp, #.Lframe_local_offset + 32]
ldp q4, q5, [sp, #.Lframe_local_offset + 64]
ldp q6, q7, [sp, #.Lframe_local_offset + 96]
- ldr_l q10, rk3, x8
- movi vzr.16b, #0 // init zero register
+ ld1 {fold_consts.2d}, [fold_consts_ptr]
__pmull_init_\p
- __pmull_pre_\p v10
+ __pmull_pre_\p fold_consts
endif_yield_neon
- b .L_fold_64_B_loop_\@
-
-.L_fold_64_B_end_\@:
- // at this point, the buffer pointer is pointing at the last y Bytes
- // of the buffer the 64B of folded data is in 4 of the vector
- // registers: v0, v1, v2, v3
-
- // fold the 8 vector registers to 1 vector register with different
- // constants
-
- ldr_l q10, rk9, x8
- __pmull_pre_\p v10
-
- fold16 \p, v0, rk11
- fold16 \p, v1, rk13
- fold16 \p, v2, rk15
- fold16 \p, v3, rk17
- fold16 \p, v4, rk19
- fold16 \p, v5, rk1
- fold16 \p, v6
-
- // instead of 64, we add 48 to the loop counter to save 1 instruction
- // from the loop instead of a cmp instruction, we use the negative
- // flag with the jl instruction
- adds arg3, arg3, #(128-16)
- b.lt .L_final_reduction_for_128_\@
-
- // now we have 16+y bytes left to reduce. 16 Bytes is in register v7
- // and the rest is in memory. We can fold 16 bytes at a time if y>=16
- // continue folding 16B at a time
-
-.L_16B_reduction_loop_\@:
- __pmull_\p v8, v7, v10
- __pmull_\p v7, v7, v10, 2
+ b .Lfold_128_bytes_loop_\@
+
+.Lfold_128_bytes_loop_done_\@:
+
+ // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
+
+ // Fold across 64 bytes.
+ add fold_consts_ptr, fold_consts_ptr, #16
+ ld1 {fold_consts.2d}, [fold_consts_ptr], #16
+ __pmull_pre_\p fold_consts
+ fold_16_bytes \p, v0, v4
+ fold_16_bytes \p, v1, v5
+ fold_16_bytes \p, v2, v6
+ fold_16_bytes \p, v3, v7, 1
+ // Fold across 32 bytes.
+ fold_16_bytes \p, v4, v6
+ fold_16_bytes \p, v5, v7, 1
+ // Fold across 16 bytes.
+ fold_16_bytes \p, v6, v7
+
+ // Add 128 to get the correct number of data bytes remaining in 0...127
+ // (not counting v7), following the previous extra subtraction by 128.
+ // Then subtract 16 to simplify the termination condition of the
+ // following loop.
+ adds len, len, #(128-16)
+
+ // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7
+ // into them, storing the result back into v7.
+ b.lt .Lfold_16_bytes_loop_done_\@
+.Lfold_16_bytes_loop_\@:
+ __pmull_\p v8, v7, fold_consts
+ __pmull_\p v7, v7, fold_consts, 2
eor v7.16b, v7.16b, v8.16b
-
- ldr q0, [arg2], #16
+ ldr q0, [buf], #16
CPU_LE( rev64 v0.16b, v0.16b )
CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
eor v7.16b, v7.16b, v0.16b
- subs arg3, arg3, #16
-
- // instead of a cmp instruction, we utilize the flags with the
- // jge instruction equivalent of: cmp arg3, 16-16
- // check if there is any more 16B in the buffer to be able to fold
- b.ge .L_16B_reduction_loop_\@
-
- // now we have 16+z bytes left to reduce, where 0<= z < 16.
- // first, we reduce the data in the xmm7 register
-
-.L_final_reduction_for_128_\@:
- // check if any more data to fold. If not, compute the CRC of
- // the final 128 bits
- adds arg3, arg3, #16
- b.eq .L_128_done_\@
-
- // here we are getting data that is less than 16 bytes.
- // since we know that there was data before the pointer, we can
- // offset the input pointer before the actual point, to receive
- // exactly 16 bytes. after that the registers need to be adjusted.
-.L_get_last_two_regs_\@:
- add arg2, arg2, arg3
- ldr q1, [arg2, #-16]
-CPU_LE( rev64 v1.16b, v1.16b )
-CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
-
- // get rid of the extra data that was loaded before
- // load the shift constant
- adr_l x4, tbl_shf_table + 16
- sub x4, x4, arg3
- ld1 {v0.16b}, [x4]
-
- // shift v2 to the left by arg3 bytes
- tbl v2.16b, {v7.16b}, v0.16b
-
- // shift v7 to the right by 16-arg3 bytes
- movi v9.16b, #0x80
- eor v0.16b, v0.16b, v9.16b
- tbl v7.16b, {v7.16b}, v0.16b
-
- // blend
- sshr v0.16b, v0.16b, #7 // convert to 8-bit mask
- bsl v0.16b, v2.16b, v1.16b
-
- // fold 16 Bytes
- __pmull_\p v8, v7, v10
- __pmull_\p v7, v7, v10, 2
- eor v7.16b, v7.16b, v8.16b
- eor v7.16b, v7.16b, v0.16b
+ subs len, len, #16
+ b.ge .Lfold_16_bytes_loop_\@
+
+.Lfold_16_bytes_loop_done_\@:
+ // Add 16 to get the correct number of data bytes remaining in 0...15
+ // (not counting v7), following the previous extra subtraction by 16.
+ adds len, len, #16
+ b.eq .Lreduce_final_16_bytes_\@
+
+.Lhandle_partial_segment_\@:
+ // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
+ // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To
+ // do this without needing a fold constant for each possible 'len',
+ // redivide the bytes into a first chunk of 'len' bytes and a second
+ // chunk of 16 bytes, then fold the first chunk into the second.
+
+ // v0 = last 16 original data bytes
+ add buf, buf, len
+ ldr q0, [buf, #-16]
+CPU_LE( rev64 v0.16b, v0.16b )
+CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
-.L_128_done_\@:
- // compute crc of a 128-bit value
- ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10
- __pmull_pre_\p v10
+ // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes.
+ adr_l x4, .Lbyteshift_table + 16
+ sub x4, x4, len
+ ld1 {v2.16b}, [x4]
+ tbl v1.16b, {v7.16b}, v2.16b
- // 64b fold
- ext v0.16b, vzr.16b, v7.16b, #8
- mov v7.d[0], v7.d[1]
- __pmull_\p v7, v7, v10
- eor v7.16b, v7.16b, v0.16b
+ // v3 = first chunk: v7 right-shifted by '16-len' bytes.
+ movi v3.16b, #0x80
+ eor v2.16b, v2.16b, v3.16b
+ tbl v3.16b, {v7.16b}, v2.16b
- // 32b fold
- ext v0.16b, v7.16b, vzr.16b, #4
- mov v7.s[3], vzr.s[0]
- __pmull_\p v0, v0, v10, 2
- eor v7.16b, v7.16b, v0.16b
+ // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
+ sshr v2.16b, v2.16b, #7
- // barrett reduction
- ldr_l q10, rk7, x8
- __pmull_pre_\p v10
- mov v0.d[0], v7.d[1]
+ // v2 = second chunk: 'len' bytes from v0 (low-order bytes),
+ // then '16-len' bytes from v1 (high-order bytes).
+ bsl v2.16b, v1.16b, v0.16b
- __pmull_\p v0, v0, v10
- ext v0.16b, vzr.16b, v0.16b, #12
- __pmull_\p v0, v0, v10, 2
- ext v0.16b, vzr.16b, v0.16b, #12
+ // Fold the first chunk into the second chunk, storing the result in v7.
+ __pmull_\p v0, v3, fold_consts
+ __pmull_\p v7, v3, fold_consts, 2
eor v7.16b, v7.16b, v0.16b
- mov w0, v7.s[1]
-
-.L_cleanup_\@:
- // scale the result back to 16 bits
- lsr x0, x0, #16
+ eor v7.16b, v7.16b, v2.16b
+
+.Lreduce_final_16_bytes_\@:
+ // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
+
+ movi v2.16b, #0 // init zero register
+
+ // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
+ ld1 {fold_consts.2d}, [fold_consts_ptr], #16
+ __pmull_pre_\p fold_consts
+
+ // Fold the high 64 bits into the low 64 bits, while also multiplying by
+ // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
+ // whose low 48 bits are 0.
+ ext v0.16b, v2.16b, v7.16b, #8
+ __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x))
+ eor v0.16b, v0.16b, v7.16b // + low bits * x^64
+
+ // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
+ // value congruent to x^64 * M(x) and whose low 48 bits are 0.
+ ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
+ mov v0.s[3], v2.s[0] // zero high 32 bits
+ __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x))
+ eor v0.16b, v0.16b, v1.16b // + low bits
+
+ // Load G(x) and floor(x^48 / G(x)).
+ ld1 {fold_consts.2d}, [fold_consts_ptr]
+ __pmull_pre_\p fold_consts
+
+ // Use Barrett reduction to compute the final CRC value.
+ __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x))
+ ushr v1.2d, v1.2d, #32 // /= x^32
+ __pmull_\p v1, v1, fold_consts // *= G(x)
+ ushr v0.2d, v0.2d, #48
+ eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
+ // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
+
+ umov w0, v0.h[0]
frame_pop
ret
-.L_less_than_128_\@:
- cbz arg3, .L_cleanup_\@
+.Lless_than_256_bytes_\@:
+ // Checksumming a buffer of length 16...255 bytes
- movi v0.16b, #0
- mov v0.s[3], arg1_low32 // get the initial crc value
+ adr_l fold_consts_ptr, .Lfold_across_16_bytes_consts
- ldr q7, [arg2], #0x10
+ // Load the first 16 data bytes.
+ ldr q7, [buf], #0x10
CPU_LE( rev64 v7.16b, v7.16b )
CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
- eor v7.16b, v7.16b, v0.16b // xor the initial crc value
-
- cmp arg3, #16
- b.eq .L_128_done_\@ // exactly 16 left
- b.lt .L_less_than_16_left_\@
-
- ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10
- __pmull_pre_\p v10
-
- // update the counter. subtract 32 instead of 16 to save one
- // instruction from the loop
- subs arg3, arg3, #32
- b.ge .L_16B_reduction_loop_\@
-
- add arg3, arg3, #16
- b .L_get_last_two_regs_\@
-
-.L_less_than_16_left_\@:
- // shl r9, 4
- adr_l x0, tbl_shf_table + 16
- sub x0, x0, arg3
- ld1 {v0.16b}, [x0]
- movi v9.16b, #0x80
- eor v0.16b, v0.16b, v9.16b
- tbl v7.16b, {v7.16b}, v0.16b
- b .L_128_done_\@
+
+ // XOR the first 16 data *bits* with the initial CRC value.
+ movi v0.16b, #0
+ mov v0.h[7], init_crc
+ eor v7.16b, v7.16b, v0.16b
+
+ // Load the fold-across-16-bytes constants.
+ ld1 {fold_consts.2d}, [fold_consts_ptr], #16
+ __pmull_pre_\p fold_consts
+
+ cmp len, #16
+ b.eq .Lreduce_final_16_bytes_\@ // len == 16
+ subs len, len, #32
+ b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255
+ add len, len, #16
+ b .Lhandle_partial_segment_\@ // 17 <= len <= 31
.endm
+//
+// u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len);
+//
+// Assumes len >= 16.
+//
ENTRY(crc_t10dif_pmull_p8)
crc_t10dif_pmull p8
ENDPROC(crc_t10dif_pmull_p8)
.align 5
+//
+// u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
+//
+// Assumes len >= 16.
+//
ENTRY(crc_t10dif_pmull_p64)
crc_t10dif_pmull p64
ENDPROC(crc_t10dif_pmull_p64)
-// precomputed constants
-// these constants are precomputed from the poly:
-// 0x8bb70000 (0x8bb7 scaled to 32 bits)
.section ".rodata", "a"
.align 4
-// Q = 0x18BB70000
-// rk1 = 2^(32*3) mod Q << 32
-// rk2 = 2^(32*5) mod Q << 32
-// rk3 = 2^(32*15) mod Q << 32
-// rk4 = 2^(32*17) mod Q << 32
-// rk5 = 2^(32*3) mod Q << 32
-// rk6 = 2^(32*2) mod Q << 32
-// rk7 = floor(2^64/Q)
-// rk8 = Q
-
-rk1: .octa 0x06df0000000000002d56000000000000
-rk3: .octa 0x7cf50000000000009d9d000000000000
-rk5: .octa 0x13680000000000002d56000000000000
-rk7: .octa 0x000000018bb7000000000001f65a57f8
-rk9: .octa 0xbfd6000000000000ceae000000000000
-rk11: .octa 0x713c0000000000001e16000000000000
-rk13: .octa 0x80a6000000000000f7f9000000000000
-rk15: .octa 0xe658000000000000044c000000000000
-rk17: .octa 0xa497000000000000ad18000000000000
-rk19: .octa 0xe7b50000000000006ee3000000000000
-
-tbl_shf_table:
-// use these values for shift constants for the tbl/tbx instruction
-// different alignments result in values as shown:
-// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
-// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
-// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
-// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
-// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
-// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
-// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
-// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
-// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
-// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
-// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
-// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
-// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
-// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
-// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
+// Fold constants precomputed from the polynomial 0x18bb7
+// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+.Lfold_across_128_bytes_consts:
+ .quad 0x0000000000006123 // x^(8*128) mod G(x)
+ .quad 0x0000000000002295 // x^(8*128+64) mod G(x)
+// .Lfold_across_64_bytes_consts:
+ .quad 0x0000000000001069 // x^(4*128) mod G(x)
+ .quad 0x000000000000dd31 // x^(4*128+64) mod G(x)
+// .Lfold_across_32_bytes_consts:
+ .quad 0x000000000000857d // x^(2*128) mod G(x)
+ .quad 0x0000000000007acc // x^(2*128+64) mod G(x)
+.Lfold_across_16_bytes_consts:
+ .quad 0x000000000000a010 // x^(1*128) mod G(x)
+ .quad 0x0000000000001faa // x^(1*128+64) mod G(x)
+// .Lfinal_fold_consts:
+ .quad 0x1368000000000000 // x^48 * (x^48 mod G(x))
+ .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x))
+// .Lbarrett_reduction_consts:
+ .quad 0x0000000000018bb7 // G(x)
+ .quad 0x00000001f65a57f8 // floor(x^48 / G(x))
+
+// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
+// len] is the index vector to shift left by 'len' bytes, and is also {0x80,
+// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.
+.Lbyteshift_table:
.byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
.byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index b461d62023f2..dd325829ee44 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -22,10 +22,8 @@
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
-asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len);
-asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len);
-
-static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len);
+asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len);
+asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
static int crct10dif_init(struct shash_desc *desc)
{
@@ -35,30 +33,33 @@ static int crct10dif_init(struct shash_desc *desc)
return 0;
}
-static int crct10dif_update(struct shash_desc *desc, const u8 *data,
+static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
- unsigned int l;
- if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
- ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull_p8(*crc, data, length);
+ kernel_neon_end();
+ } else {
+ *crc = crc_t10dif_generic(*crc, data, length);
+ }
- *crc = crc_t10dif_generic(*crc, data, l);
+ return 0;
+}
- length -= l;
- data += l;
- }
+static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ u16 *crc = shash_desc_ctx(desc);
- if (length > 0) {
- if (may_use_simd()) {
- kernel_neon_begin();
- *crc = crc_t10dif_pmull(*crc, data, length);
- kernel_neon_end();
- } else {
- *crc = crc_t10dif_generic(*crc, data, length);
- }
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull_p64(*crc, data, length);
+ kernel_neon_end();
+ } else {
+ *crc = crc_t10dif_generic(*crc, data, length);
}
return 0;
@@ -72,10 +73,22 @@ static int crct10dif_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static struct shash_alg crc_t10dif_alg = {
+static struct shash_alg crc_t10dif_alg[] = {{
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.init = crct10dif_init,
- .update = crct10dif_update,
+ .update = crct10dif_update_pmull_p8,
+ .final = crct10dif_final,
+ .descsize = CRC_T10DIF_DIGEST_SIZE,
+
+ .base.cra_name = "crct10dif",
+ .base.cra_driver_name = "crct10dif-arm64-neon",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = CRC_T10DIF_DIGEST_SIZE,
+ .init = crct10dif_init,
+ .update = crct10dif_update_pmull_p64,
.final = crct10dif_final,
.descsize = CRC_T10DIF_DIGEST_SIZE,
@@ -84,21 +97,25 @@ static struct shash_alg crc_t10dif_alg = {
.base.cra_priority = 200,
.base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
-};
+}};
static int __init crc_t10dif_mod_init(void)
{
if (elf_hwcap & HWCAP_PMULL)
- crc_t10dif_pmull = crc_t10dif_pmull_p64;
+ return crypto_register_shashes(crc_t10dif_alg,
+ ARRAY_SIZE(crc_t10dif_alg));
else
- crc_t10dif_pmull = crc_t10dif_pmull_p8;
-
- return crypto_register_shash(&crc_t10dif_alg);
+ /* only register the first array element */
+ return crypto_register_shash(crc_t10dif_alg);
}
static void __exit crc_t10dif_mod_exit(void)
{
- crypto_unregister_shash(&crc_t10dif_alg);
+ if (elf_hwcap & HWCAP_PMULL)
+ crypto_unregister_shashes(crc_t10dif_alg,
+ ARRAY_SIZE(crc_t10dif_alg));
+ else
+ crypto_unregister_shash(crc_t10dif_alg);
}
module_cpu_feature_match(ASIMD, crc_t10dif_mod_init);
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 067d8937d5af..791ad422c427 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -60,10 +60,6 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
struct ghash_key const *k,
const char *head);
-static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k,
- const char *head);
-
asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
const u8 src[], struct ghash_key const *k,
u8 ctr[], u32 const rk[], int rounds,
@@ -87,11 +83,15 @@ static int ghash_init(struct shash_desc *desc)
}
static void ghash_do_update(int blocks, u64 dg[], const char *src,
- struct ghash_key *key, const char *head)
+ struct ghash_key *key, const char *head,
+ void (*simd_update)(int blocks, u64 dg[],
+ const char *src,
+ struct ghash_key const *k,
+ const char *head))
{
if (likely(may_use_simd())) {
kernel_neon_begin();
- pmull_ghash_update(blocks, dg, src, key, head);
+ simd_update(blocks, dg, src, key, head);
kernel_neon_end();
} else {
be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
@@ -119,8 +119,12 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
/* avoid hogging the CPU for too long */
#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
-static int ghash_update(struct shash_desc *desc, const u8 *src,
- unsigned int len)
+static int __ghash_update(struct shash_desc *desc, const u8 *src,
+ unsigned int len,
+ void (*simd_update)(int blocks, u64 dg[],
+ const char *src,
+ struct ghash_key const *k,
+ const char *head))
{
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
@@ -146,7 +150,8 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
int chunk = min(blocks, MAX_BLOCKS);
ghash_do_update(chunk, ctx->digest, src, key,
- partial ? ctx->buf : NULL);
+ partial ? ctx->buf : NULL,
+ simd_update);
blocks -= chunk;
src += chunk * GHASH_BLOCK_SIZE;
@@ -158,7 +163,19 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
return 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_update_p8(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
+{
+ return __ghash_update(desc, src, len, pmull_ghash_update_p8);
+}
+
+static int ghash_update_p64(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
+{
+ return __ghash_update(desc, src, len, pmull_ghash_update_p64);
+}
+
+static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
@@ -168,7 +185,28 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
- ghash_do_update(1, ctx->digest, ctx->buf, key, NULL);
+ ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
+ pmull_ghash_update_p8);
+ }
+ put_unaligned_be64(ctx->digest[1], dst);
+ put_unaligned_be64(ctx->digest[0], dst + 8);
+
+ *ctx = (struct ghash_desc_ctx){};
+ return 0;
+}
+
+static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+ if (partial) {
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+
+ memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
+
+ ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
+ pmull_ghash_update_p64);
}
put_unaligned_be64(ctx->digest[1], dst);
put_unaligned_be64(ctx->digest[0], dst + 8);
@@ -224,7 +262,21 @@ static int ghash_setkey(struct crypto_shash *tfm,
return __ghash_setkey(key, inkey, keylen);
}
-static struct shash_alg ghash_alg = {
+static struct shash_alg ghash_alg[] = {{
+ .base.cra_name = "ghash",
+ .base.cra_driver_name = "ghash-neon",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = GHASH_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ghash_key),
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update_p8,
+ .final = ghash_final_p8,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+}, {
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-ce",
.base.cra_priority = 200,
@@ -234,11 +286,11 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
- .update = ghash_update,
- .final = ghash_final,
+ .update = ghash_update_p64,
+ .final = ghash_final_p64,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
-};
+}};
static int num_rounds(struct crypto_aes_ctx *ctx)
{
@@ -301,7 +353,8 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
int blocks = count / GHASH_BLOCK_SIZE;
ghash_do_update(blocks, dg, src, &ctx->ghash_key,
- *buf_count ? buf : NULL);
+ *buf_count ? buf : NULL,
+ pmull_ghash_update_p64);
src += blocks * GHASH_BLOCK_SIZE;
count %= GHASH_BLOCK_SIZE;
@@ -345,7 +398,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
if (buf_count) {
memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL,
+ pmull_ghash_update_p64);
}
}
@@ -358,7 +412,8 @@ static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx,
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(cryptlen * 8);
- ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL);
+ ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL,
+ pmull_ghash_update_p64);
put_unaligned_be64(dg[1], mac);
put_unaligned_be64(dg[0], mac + 8);
@@ -434,7 +489,7 @@ static int gcm_encrypt(struct aead_request *req)
ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
walk.dst.virt.addr, &ctx->ghash_key,
- NULL);
+ NULL, pmull_ghash_update_p64);
err = skcipher_walk_done(&walk,
walk.nbytes % (2 * AES_BLOCK_SIZE));
@@ -469,7 +524,8 @@ static int gcm_encrypt(struct aead_request *req)
memcpy(buf, dst, nbytes);
memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
- ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
+ pmull_ghash_update_p64);
err = skcipher_walk_done(&walk, 0);
}
@@ -558,7 +614,8 @@ static int gcm_decrypt(struct aead_request *req)
u8 *src = walk.src.virt.addr;
ghash_do_update(blocks, dg, walk.src.virt.addr,
- &ctx->ghash_key, NULL);
+ &ctx->ghash_key, NULL,
+ pmull_ghash_update_p64);
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -602,7 +659,8 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(buf, src, nbytes);
memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
- ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
+ pmull_ghash_update_p64);
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
walk.nbytes);
@@ -650,26 +708,30 @@ static int __init ghash_ce_mod_init(void)
return -ENODEV;
if (elf_hwcap & HWCAP_PMULL)
- pmull_ghash_update = pmull_ghash_update_p64;
-
+ ret = crypto_register_shashes(ghash_alg,
+ ARRAY_SIZE(ghash_alg));
else
- pmull_ghash_update = pmull_ghash_update_p8;
+ /* only register the first array element */
+ ret = crypto_register_shash(ghash_alg);
- ret = crypto_register_shash(&ghash_alg);
if (ret)
return ret;
if (elf_hwcap & HWCAP_PMULL) {
ret = crypto_register_aead(&gcm_aes_alg);
if (ret)
- crypto_unregister_shash(&ghash_alg);
+ crypto_unregister_shashes(ghash_alg,
+ ARRAY_SIZE(ghash_alg));
}
return ret;
}
static void __exit ghash_ce_mod_exit(void)
{
- crypto_unregister_shash(&ghash_alg);
+ if (elf_hwcap & HWCAP_PMULL)
+ crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
+ else
+ crypto_unregister_shash(ghash_alg);
crypto_unregister_aead(&gcm_aes_alg);
}
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 2def77ec14be..7628efbe6c12 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -18,6 +18,7 @@
#include <asm/cputype.h>
#include <asm/io.h>
+#include <asm/ptrace.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -110,9 +111,10 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
-
+int apei_claim_sea(struct pt_regs *regs);
#else
static inline void acpi_init_cpus(void) { }
+static inline int apei_claim_sea(struct pt_regs *regs) { return -ENOENT; }
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h
index 2173ad32d550..1c9a3a0c5fa5 100644
--- a/arch/arm64/include/asm/asm-prototypes.h
+++ b/arch/arm64/include/asm/asm-prototypes.h
@@ -2,7 +2,7 @@
#ifndef __ASM_PROTOTYPES_H
#define __ASM_PROTOTYPES_H
/*
- * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC
+ * CONFIG_MODVERSIONS requires a C declaration to generate the appropriate CRC
* for each symbol. Since commit:
*
* 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm")
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 9bca54dda75c..1f4e9ee641c9 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -42,124 +42,131 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_add_return_acquire atomic_add_return_acquire
-#define atomic_add_return_release atomic_add_return_release
-#define atomic_add_return atomic_add_return
-
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#define atomic_sub_return_release atomic_sub_return_release
-#define atomic_sub_return atomic_sub_return
-
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#define atomic_fetch_add_release atomic_fetch_add_release
-#define atomic_fetch_add atomic_fetch_add
-
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#define atomic_fetch_sub atomic_fetch_sub
-
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#define atomic_fetch_and_release atomic_fetch_and_release
-#define atomic_fetch_and atomic_fetch_and
-
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#define atomic_fetch_andnot atomic_fetch_andnot
-
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#define atomic_fetch_or_release atomic_fetch_or_release
-#define atomic_fetch_or atomic_fetch_or
-
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#define atomic_fetch_xor atomic_fetch_xor
-
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
-#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new))
-#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new))
-#define atomic_xchg(v, new) xchg(&((v)->counter), (new))
-
-#define atomic_cmpxchg_relaxed(v, old, new) \
- cmpxchg_relaxed(&((v)->counter), (old), (new))
-#define atomic_cmpxchg_acquire(v, old, new) \
- cmpxchg_acquire(&((v)->counter), (old), (new))
-#define atomic_cmpxchg_release(v, old, new) \
- cmpxchg_release(&((v)->counter), (old), (new))
-#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
-
-#define atomic_andnot atomic_andnot
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#define arch_atomic_add_return arch_atomic_add_return
+
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#define arch_atomic_sub_return arch_atomic_sub_return
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic_xchg_acquire(v, new) \
+ arch_xchg_acquire(&((v)->counter), (new))
+#define arch_atomic_xchg_release(v, new) \
+ arch_xchg_release(&((v)->counter), (new))
+#define arch_atomic_xchg(v, new) \
+ arch_xchg(&((v)->counter), (new))
+
+#define arch_atomic_cmpxchg_relaxed(v, old, new) \
+ arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg_acquire(v, old, new) \
+ arch_cmpxchg_acquire(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg_release(v, old, new) \
+ arch_cmpxchg_release(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg(v, old, new) \
+ arch_cmpxchg(&((v)->counter), (old), (new))
+
+#define arch_atomic_andnot arch_atomic_andnot
/*
- * 64-bit atomic operations.
+ * 64-bit arch_atomic operations.
*/
-#define ATOMIC64_INIT ATOMIC_INIT
-#define atomic64_read atomic_read
-#define atomic64_set atomic_set
-
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#define atomic64_add_return_release atomic64_add_return_release
-#define atomic64_add_return atomic64_add_return
-
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#define atomic64_sub_return_release atomic64_sub_return_release
-#define atomic64_sub_return atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#define atomic64_fetch_add atomic64_fetch_add
-
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#define atomic64_fetch_sub atomic64_fetch_sub
-
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#define atomic64_fetch_and atomic64_fetch_and
-
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#define atomic64_fetch_or atomic64_fetch_or
-
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#define atomic64_fetch_xor atomic64_fetch_xor
-
-#define atomic64_xchg_relaxed atomic_xchg_relaxed
-#define atomic64_xchg_acquire atomic_xchg_acquire
-#define atomic64_xchg_release atomic_xchg_release
-#define atomic64_xchg atomic_xchg
-
-#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire
-#define atomic64_cmpxchg_release atomic_cmpxchg_release
-#define atomic64_cmpxchg atomic_cmpxchg
-
-#define atomic64_andnot atomic64_andnot
-
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define ATOMIC64_INIT ATOMIC_INIT
+#define arch_atomic64_read arch_atomic_read
+#define arch_atomic64_set arch_atomic_set
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#define arch_atomic64_add_return arch_atomic64_add_return
+
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+
+#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire
+#define arch_atomic64_xchg_release arch_atomic_xchg_release
+#define arch_atomic64_xchg arch_atomic_xchg
+
+#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release
+#define arch_atomic64_cmpxchg arch_atomic_cmpxchg
+
+#define arch_atomic64_andnot arch_atomic64_andnot
+
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+
+#include <asm-generic/atomic-instrumented.h>
#endif
#endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index af7b99005453..e321293e0c89 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -39,7 +39,7 @@
#define ATOMIC_OP(op, asm_op) \
__LL_SC_INLINE void \
-__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
+__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
-__LL_SC_EXPORT(atomic_##op);
+__LL_SC_EXPORT(arch_atomic_##op);
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
-__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
+__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic_##op##_return##name);
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
-__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
+__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int val, result; \
@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
#define ATOMIC64_OP(op, asm_op) \
__LL_SC_INLINE void \
-__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
+__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
{ \
long result; \
unsigned long tmp; \
@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
-__LL_SC_EXPORT(atomic64_##op);
+__LL_SC_EXPORT(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
-__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
+__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
{ \
long result; \
unsigned long tmp; \
@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic64_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
-__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
+__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \
long result, val; \
unsigned long tmp; \
@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic64_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
#undef ATOMIC64_OP
__LL_SC_INLINE long
-__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
{
long result;
unsigned long tmp;
@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
return result;
}
-__LL_SC_EXPORT(atomic64_dec_if_positive);
+__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
__LL_SC_INLINE u##sz \
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index a424355240c5..9256a3921e4b 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -25,9 +25,9 @@
#error "please don't include this file directly"
#endif
-#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
+#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op)
#define ATOMIC_OP(op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
-static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
-static inline int atomic_add_return##name(int i, atomic_t *v) \
+static inline int arch_atomic_add_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC_OP_ADD_RETURN
-static inline void atomic_and(int i, atomic_t *v)
+static inline void arch_atomic_and(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v)
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
-static inline int atomic_fetch_and##name(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND
-static inline void atomic_sub(int i, atomic_t *v)
+static inline void arch_atomic_sub(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v)
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
-static inline int atomic_sub_return##name(int i, atomic_t *v) \
+static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC_OP_SUB_RETURN
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
-static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC_FETCH_OP_SUB
#undef __LL_SC_ATOMIC
-#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
+#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \
-static inline void atomic64_##op(long i, atomic64_t *v) \
+static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
-static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
-static inline long atomic64_add_return##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC64_OP_ADD_RETURN
-static inline void atomic64_and(long i, atomic64_t *v)
+static inline void arch_atomic64_and(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
-static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND
-static inline void atomic64_sub(long i, atomic64_t *v)
+static inline void arch_atomic64_sub(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
-static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
-static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC64_FETCH_OP_SUB
-static inline long atomic64_dec_if_positive(atomic64_t *v)
+static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
register long x0 asm ("x0") = (long)v;
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 13dd42c3ad4e..926434f413fa 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -58,6 +58,10 @@
*/
#define ARCH_DMA_MINALIGN (128)
+#ifdef CONFIG_KASAN_SW_TAGS
+#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 3f9376f1c409..e6ea0f42e097 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -110,10 +110,10 @@ __XCHG_GEN(_mb)
})
/* xchg */
-#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
-#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
-#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
-#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
+#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
+#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
+#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
+#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb)
})
/* cmpxchg */
-#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
-#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
-#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
-#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
-#define cmpxchg_local cmpxchg_relaxed
+#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
+#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
+#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
+#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
+#define arch_cmpxchg_local arch_cmpxchg_relaxed
/* cmpxchg64 */
-#define cmpxchg64_relaxed cmpxchg_relaxed
-#define cmpxchg64_acquire cmpxchg_acquire
-#define cmpxchg64_release cmpxchg_release
-#define cmpxchg64 cmpxchg
-#define cmpxchg64_local cmpxchg_local
+#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
+#define arch_cmpxchg64_acquire arch_cmpxchg_acquire
+#define arch_cmpxchg64_release arch_cmpxchg_release
+#define arch_cmpxchg64 arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg_local
/* cmpxchg_double */
#define system_has_cmpxchg_double() 1
@@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb)
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
})
-#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
-({\
- int __ret;\
- __cmpxchg_double_check(ptr1, ptr2); \
- __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
- (unsigned long)(n1), (unsigned long)(n2), \
- ptr1); \
- __ret; \
+#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ __cmpxchg_double_check(ptr1, ptr2); \
+ __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2), \
+ ptr1); \
+ __ret; \
})
-#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
-({\
- int __ret;\
- __cmpxchg_double_check(ptr1, ptr2); \
- __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
- (unsigned long)(n1), (unsigned long)(n2), \
- ptr1); \
- __ret; \
+#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ __cmpxchg_double_check(ptr1, ptr2); \
+ __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2), \
+ ptr1); \
+ __ret; \
})
#define __CMPWAIT_CASE(w, sfx, sz) \
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 8d91f2233135..fa90779fc752 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -20,6 +20,7 @@
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 3dd3d664c5c5..4658c937e173 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -20,9 +20,6 @@ struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
-#ifdef CONFIG_XEN
- const struct dma_map_ops *dev_dma_ops;
-#endif
};
struct pdev_archdata {
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index ec1e6d6fa14c..f987b8a8f325 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -55,7 +55,11 @@ enum fixed_addresses {
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
FIX_APEI_GHES_IRQ,
- FIX_APEI_GHES_NMI,
+ FIX_APEI_GHES_SEA,
+#ifdef CONFIG_ARM_SDE_INTERFACE
+ FIX_APEI_GHES_SDEI_NORMAL,
+ FIX_APEI_GHES_SDEI_CRITICAL,
+#endif
#endif /* CONFIG_ACPI_APEI_GHES */
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index fb6609875455..c6a07a3b433e 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -20,6 +20,11 @@
#include <asm/page.h>
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
+extern bool arch_hugetlb_migration_supported(struct hstate *h);
+#endif
+
#define __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(pte_t *ptep)
{
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7732d0ba4e60..da3fc7324d68 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -48,6 +48,7 @@
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -208,6 +209,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t;
+struct vcpu_reset_state {
+ unsigned long pc;
+ unsigned long r0;
+ bool be;
+ bool reset;
+};
+
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch {
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;
+ /* Additional reset state */
+ struct vcpu_reset_state reset_state;
+
/* True when deferrable sysregs are loaded on the physical CPU,
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
bool sysregs_loaded_on_cpu;
diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h
new file mode 100644
index 000000000000..8ac6ee77437c
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_ras.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - Arm Ltd */
+
+#ifndef __ARM64_KVM_RAS_H__
+#define __ARM64_KVM_RAS_H__
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#include <asm/acpi.h>
+
+/*
+ * Was this synchronous external abort a RAS notification?
+ * Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
+ */
+static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
+{
+ /* apei_claim_sea(NULL) expects to mask interrupts itself */
+ lockdep_assert_irqs_enabled();
+
+ return apei_claim_sea(NULL);
+}
+
+#endif /* __ARM64_KVM_RAS_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index e1ec947e7c0c..b01ef0180a03 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -80,11 +80,7 @@
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
-#ifdef CONFIG_KASAN_EXTRA
-#define KASAN_THREAD_SHIFT 2
-#else
#define KASAN_THREAD_SHIFT 1
-#endif /* CONFIG_KASAN_EXTRA */
#else
#define KASAN_SHADOW_SIZE (0)
#define KASAN_THREAD_SHIFT 0
@@ -332,6 +328,17 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_addr_valid(kaddr) \
(_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
+/*
+ * Given that the GIC architecture permits ITS implementations that can only be
+ * configured with a LPI table address once, GICv3 systems with many CPUs may
+ * end up reserving a lot of different regions after a kexec for their LPI
+ * tables (one per CPU), as we are forced to reuse the same memory after kexec
+ * (and thus reserve it persistently with EFI beforehand)
+ */
+#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
+# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
+#endif
+
#include <asm-generic/memory_model.h>
#endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 7689c7aa1d77..3e8063f4f9d3 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_MMU_H
#define __ASM_MMU_H
+#include <asm/cputype.h>
+
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
#define USER_ASID_BIT 48
#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
@@ -44,6 +46,48 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
+static inline bool arm64_kernel_use_ng_mappings(void)
+{
+ bool tx1_bug;
+
+ /* What's a kpti? Use global mappings if we don't know. */
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+ return false;
+
+ /*
+ * Note: this function is called before the CPU capabilities have
+ * been configured, so our early mappings will be global. If we
+ * later determine that kpti is required, then
+ * kpti_install_ng_mappings() will make them non-global.
+ */
+ if (arm64_kernel_unmapped_at_el0())
+ return true;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return false;
+
+ /*
+ * KASLR is enabled so we're going to be enabling kpti on non-broken
+ * CPUs regardless of their susceptibility to Meltdown. Rather
+ * than force everybody to go through the G -> nG dance later on,
+ * just put down non-global mappings from the beginning.
+ */
+ if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+ tx1_bug = false;
+#ifndef MODULE
+ } else if (!static_branch_likely(&arm64_const_caps_ready)) {
+ extern const struct midr_range cavium_erratum_27456_cpus[];
+
+ tx1_bug = is_midr_in_range_list(read_cpuid_id(),
+ cavium_erratum_27456_cpus);
+#endif
+ } else {
+ tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
+ }
+
+ return !tx1_bug && kaslr_offset() > 0;
+}
+
typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data {
diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h
index 2ba6c6b9541f..71abfc7612b2 100644
--- a/arch/arm64/include/asm/neon-intrinsics.h
+++ b/arch/arm64/include/asm/neon-intrinsics.h
@@ -36,4 +36,8 @@
#include <arm_neon.h>
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
+#endif
+
#endif /* __ASM_NEON_INTRINSICS_H */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 78b942c1bea4..986e41c4c32b 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -37,8 +37,8 @@
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
-#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
+#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h
index eee31a9f72a5..e9c1a02c2154 100644
--- a/arch/arm64/include/asm/sync_bitops.h
+++ b/arch/arm64/include/asm/sync_bitops.h
@@ -15,13 +15,13 @@
* ops which are SMP safe even on a UP kernel.
*/
-#define sync_set_bit(nr, p) set_bit(nr, p)
-#define sync_clear_bit(nr, p) clear_bit(nr, p)
-#define sync_change_bit(nr, p) change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
-#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+#define sync_set_bit(nr, p) set_bit(nr, p)
+#define sync_clear_bit(nr, p) clear_bit(nr, p)
+#define sync_change_bit(nr, p) change_bit(nr, p)
+#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
+#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
+#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
+#define sync_test_bit(nr, addr) test_bit(nr, addr)
+#define arch_sync_cmpxchg arch_cmpxchg
#endif
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 0e2a0ecaf484..32693f34f431 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -46,8 +46,6 @@ extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-int handle_guest_sea(phys_addr_t addr, unsigned int esr);
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 547d7a0c9d05..f1e5c9165809 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -34,7 +34,6 @@
#include <asm/memory.h>
#include <asm/extable.h>
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index a7b1fc58ffdf..d1dd93436e1e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 400
+#define __NR_compat_syscalls 424
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 04ee190b90fe..5590f2623690 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -270,7 +270,7 @@ __SYSCALL(__NR_uname, sys_newuname)
/* 123 was sys_modify_ldt */
__SYSCALL(123, sys_ni_syscall)
#define __NR_adjtimex 124
-__SYSCALL(__NR_adjtimex, compat_sys_adjtimex)
+__SYSCALL(__NR_adjtimex, sys_adjtimex_time32)
#define __NR_mprotect 125
__SYSCALL(__NR_mprotect, sys_mprotect)
#define __NR_sigprocmask 126
@@ -344,9 +344,9 @@ __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 160
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
#define __NR_sched_rr_get_interval 161
-__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval)
+__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32)
#define __NR_nanosleep 162
-__SYSCALL(__NR_nanosleep, compat_sys_nanosleep)
+__SYSCALL(__NR_nanosleep, sys_nanosleep_time32)
#define __NR_mremap 163
__SYSCALL(__NR_mremap, sys_mremap)
#define __NR_setresuid 164
@@ -376,7 +376,7 @@ __SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 176
__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
#define __NR_rt_sigtimedwait 177
-__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait)
+__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#define __NR_rt_sigqueueinfo 178
__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigsuspend 179
@@ -502,7 +502,7 @@ __SYSCALL(__NR_tkill, sys_tkill)
#define __NR_sendfile64 239
__SYSCALL(__NR_sendfile64, sys_sendfile64)
#define __NR_futex 240
-__SYSCALL(__NR_futex, compat_sys_futex)
+__SYSCALL(__NR_futex, sys_futex_time32)
#define __NR_sched_setaffinity 241
__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
#define __NR_sched_getaffinity 242
@@ -512,7 +512,7 @@ __SYSCALL(__NR_io_setup, compat_sys_io_setup)
#define __NR_io_destroy 244
__SYSCALL(__NR_io_destroy, sys_io_destroy)
#define __NR_io_getevents 245
-__SYSCALL(__NR_io_getevents, compat_sys_io_getevents)
+__SYSCALL(__NR_io_getevents, sys_io_getevents_time32)
#define __NR_io_submit 246
__SYSCALL(__NR_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 247
@@ -538,21 +538,21 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_timer_create 257
__SYSCALL(__NR_timer_create, compat_sys_timer_create)
#define __NR_timer_settime 258
-__SYSCALL(__NR_timer_settime, compat_sys_timer_settime)
+__SYSCALL(__NR_timer_settime, sys_timer_settime32)
#define __NR_timer_gettime 259
-__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime)
+__SYSCALL(__NR_timer_gettime, sys_timer_gettime32)
#define __NR_timer_getoverrun 260
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
#define __NR_timer_delete 261
__SYSCALL(__NR_timer_delete, sys_timer_delete)
#define __NR_clock_settime 262
-__SYSCALL(__NR_clock_settime, compat_sys_clock_settime)
+__SYSCALL(__NR_clock_settime, sys_clock_settime32)
#define __NR_clock_gettime 263
-__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime)
+__SYSCALL(__NR_clock_gettime, sys_clock_gettime32)
#define __NR_clock_getres 264
-__SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
+__SYSCALL(__NR_clock_getres, sys_clock_getres_time32)
#define __NR_clock_nanosleep 265
-__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
+__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep_time32)
#define __NR_statfs64 266
__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64)
#define __NR_fstatfs64 267
@@ -560,7 +560,7 @@ __SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64)
#define __NR_tgkill 268
__SYSCALL(__NR_tgkill, sys_tgkill)
#define __NR_utimes 269
-__SYSCALL(__NR_utimes, compat_sys_utimes)
+__SYSCALL(__NR_utimes, sys_utimes_time32)
#define __NR_arm_fadvise64_64 270
__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64)
#define __NR_pciconfig_iobase 271
@@ -574,9 +574,9 @@ __SYSCALL(__NR_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 275
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
#define __NR_mq_timedsend 276
-__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend)
+__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend_time32)
#define __NR_mq_timedreceive 277
-__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive)
+__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive_time32)
#define __NR_mq_notify 278
__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 279
@@ -622,7 +622,7 @@ __SYSCALL(__NR_semop, sys_semop)
#define __NR_semget 299
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 300
-__SYSCALL(__NR_semctl, compat_sys_semctl)
+__SYSCALL(__NR_semctl, compat_sys_old_semctl)
#define __NR_msgsnd 301
__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
#define __NR_msgrcv 302
@@ -630,7 +630,7 @@ __SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
#define __NR_msgget 303
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 304
-__SYSCALL(__NR_msgctl, compat_sys_msgctl)
+__SYSCALL(__NR_msgctl, compat_sys_old_msgctl)
#define __NR_shmat 305
__SYSCALL(__NR_shmat, compat_sys_shmat)
#define __NR_shmdt 306
@@ -638,7 +638,7 @@ __SYSCALL(__NR_shmdt, sys_shmdt)
#define __NR_shmget 307
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 308
-__SYSCALL(__NR_shmctl, compat_sys_shmctl)
+__SYSCALL(__NR_shmctl, compat_sys_old_shmctl)
#define __NR_add_key 309
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 310
@@ -646,7 +646,7 @@ __SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 311
__SYSCALL(__NR_keyctl, compat_sys_keyctl)
#define __NR_semtimedop 312
-__SYSCALL(__NR_semtimedop, compat_sys_semtimedop)
+__SYSCALL(__NR_semtimedop, sys_semtimedop_time32)
#define __NR_vserver 313
__SYSCALL(__NR_vserver, sys_ni_syscall)
#define __NR_ioprio_set 314
@@ -674,7 +674,7 @@ __SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_fchownat 325
__SYSCALL(__NR_fchownat, sys_fchownat)
#define __NR_futimesat 326
-__SYSCALL(__NR_futimesat, compat_sys_futimesat)
+__SYSCALL(__NR_futimesat, sys_futimesat_time32)
#define __NR_fstatat64 327
__SYSCALL(__NR_fstatat64, sys_fstatat64)
#define __NR_unlinkat 328
@@ -692,9 +692,9 @@ __SYSCALL(__NR_fchmodat, sys_fchmodat)
#define __NR_faccessat 334
__SYSCALL(__NR_faccessat, sys_faccessat)
#define __NR_pselect6 335
-__SYSCALL(__NR_pselect6, compat_sys_pselect6)
+__SYSCALL(__NR_pselect6, compat_sys_pselect6_time32)
#define __NR_ppoll 336
-__SYSCALL(__NR_ppoll, compat_sys_ppoll)
+__SYSCALL(__NR_ppoll, compat_sys_ppoll_time32)
#define __NR_unshare 337
__SYSCALL(__NR_unshare, sys_unshare)
#define __NR_set_robust_list 338
@@ -718,7 +718,7 @@ __SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
#define __NR_kexec_load 347
__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
#define __NR_utimensat 348
-__SYSCALL(__NR_utimensat, compat_sys_utimensat)
+__SYSCALL(__NR_utimensat, sys_utimensat_time32)
#define __NR_signalfd 349
__SYSCALL(__NR_signalfd, compat_sys_signalfd)
#define __NR_timerfd_create 350
@@ -728,9 +728,9 @@ __SYSCALL(__NR_eventfd, sys_eventfd)
#define __NR_fallocate 352
__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate)
#define __NR_timerfd_settime 353
-__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
+__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime32)
#define __NR_timerfd_gettime 354
-__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime)
+__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime32)
#define __NR_signalfd4 355
__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
#define __NR_eventfd2 356
@@ -752,7 +752,7 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
#define __NR_perf_event_open 364
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_recvmmsg 365
-__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg)
+__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg_time32)
#define __NR_accept4 366
__SYSCALL(__NR_accept4, sys_accept4)
#define __NR_fanotify_init 367
@@ -766,7 +766,7 @@ __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 371
__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
#define __NR_clock_adjtime 372
-__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime)
+__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime32)
#define __NR_syncfs 373
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_sendmmsg 374
@@ -821,6 +821,51 @@ __SYSCALL(__NR_statx, sys_statx)
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_io_pgetevents 399
__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_migrate_pages 400
+__SYSCALL(__NR_migrate_pages, compat_sys_migrate_pages)
+#define __NR_kexec_file_load 401
+__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+/* 402 is unused */
+#define __NR_clock_gettime64 403
+__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
+#define __NR_clock_settime64 404
+__SYSCALL(__NR_clock_settime64, sys_clock_settime)
+#define __NR_clock_adjtime64 405
+__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
+#define __NR_clock_getres_time64 406
+__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
+#define __NR_clock_nanosleep_time64 407
+__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
+#define __NR_timer_gettime64 408
+__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
+#define __NR_timer_settime64 409
+__SYSCALL(__NR_timer_settime64, sys_timer_settime)
+#define __NR_timerfd_gettime64 410
+__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
+#define __NR_timerfd_settime64 411
+__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
+#define __NR_utimensat_time64 412
+__SYSCALL(__NR_utimensat_time64, sys_utimensat)
+#define __NR_pselect6_time64 413
+__SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
+#define __NR_ppoll_time64 414
+__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
+#define __NR_io_pgetevents_time64 416
+__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+#define __NR_recvmmsg_time64 417
+__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
+#define __NR_mq_timedsend_time64 418
+__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
+#define __NR_mq_timedreceive_time64 419
+__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
+#define __NR_semtimedop_time64 420
+__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
+#define __NR_rt_sigtimedwait_time64 421
+__SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
+#define __NR_futex_time64 422
+__SYSCALL(__NR_futex_time64, sys_futex)
+#define __NR_sched_rr_get_interval_time64 423
+__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index b3ef061d8b74..d88e56b90b93 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
+#define _ASM_ARM64_XEN_PAGE_COHERENT_H
+
+#include <linux/dma-mapping.h>
+#include <asm/page.h>
#include <xen/arm/page-coherent.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+ return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+ dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+
+ if (pfn_valid(pfn))
+ dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
+ else
+ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn))
+ dma_direct_sync_single_for_device(hwdev, handle, size, dir);
+ else
+ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
+ if (local)
+ dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+ * multiple Xen page, it's not possible to have a mix of local and
+ * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+ * foreign mfn will always return false. If the page is local we can
+ * safely call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (pfn_valid(pfn))
+ dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
+ else
+ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index dae1584cf017..4703d218663a 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -17,5 +17,7 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 44e3c351e1ea..803f0494dd3e 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -27,8 +27,10 @@
#include <linux/smp.h>
#include <linux/serial_core.h>
+#include <acpi/ghes.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
#include <asm/pgtable.h>
#include <asm/smp_plat.h>
@@ -256,3 +258,32 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_NORMAL_NC);
return __pgprot(PROT_DEVICE_nGnRnE);
}
+
+/*
+ * Claim Synchronous External Aborts as a firmware first notification.
+ *
+ * Used by KVM and the arch do_sea handler.
+ * @regs may be NULL when called from process context.
+ */
+int apei_claim_sea(struct pt_regs *regs)
+{
+ int err = -ENOENT;
+ unsigned long current_flags;
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return err;
+
+ current_flags = arch_local_save_flags();
+
+ /*
+ * SEA can interrupt SError, mask it and describe this as an NMI so
+ * that APEI defers the handling.
+ */
+ local_daif_restore(DAIF_ERRCTX);
+ nmi_enter();
+ err = ghes_notify_sea();
+ nmi_exit();
+ local_daif_restore(current_flags);
+
+ return err;
+}
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 09ac548c9d44..9950bb0cbd52 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -553,7 +553,7 @@ static const struct midr_range arm64_repeat_tlbi_cpus[] = {
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
-static const struct midr_range cavium_erratum_27456_cpus[] = {
+const struct midr_range cavium_erratum_27456_cpus[] = {
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
/* Cavium ThunderX, T81 pass 1.0 */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4f272399de89..f6d84e2c92fe 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -983,7 +983,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/* Useful for KASLR robustness */
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return true;
+ return kaslr_offset() > 0;
/* Don't force KPTI for CPUs that are not vulnerable */
if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
@@ -1003,7 +1003,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
static bool kpti_applied = false;
int cpu = smp_processor_id();
- if (kpti_applied)
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (kpti_applied || kaslr_offset() > 0)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c7213674cb24..eecf7927dab0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -475,6 +475,7 @@ ENDPROC(__primary_switched)
ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET
+EXPORT_SYMBOL(kimage_vaddr)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
@@ -538,8 +539,7 @@ set_hcr:
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #24, #4
- cmp x0, #1
- b.ne 3f
+ cbz x0, 3f
mrs_s x0, SYS_ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 29cdc99688f3..9859e1178e6b 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
dcache_clean_range(__idmap_text_start, __idmap_text_end);
/* Clean kvm setup code to PoC? */
- if (el2_reset_needed())
+ if (el2_reset_needed()) {
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+ dcache_clean_range(__hyp_text_start, __hyp_text_end);
+ }
/* make the crash dump kernel image protected again */
crash_post_resume();
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fbaa374..17f325ba831e 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -28,6 +28,8 @@
#include <asm/virt.h>
.text
+ .pushsection .hyp.text, "ax"
+
.align 11
ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index f0e6ab8abe9c..b09b6f75f759 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/types.h>
+#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret;
}
-static __init const u8 *get_cmdline(void *fdt)
+static __init const u8 *kaslr_get_cmdline(void *fdt)
{
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
@@ -87,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,
@@ -109,7 +111,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
- cmdline = get_cmdline(fdt);
+ cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
return 0;
@@ -169,5 +171,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
+ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
return offset;
}
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index aa9c94113700..66b5d697d943 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -321,7 +321,7 @@ void crash_post_resume(void)
* but does not hold any data of loaded kernel image.
*
* Note that all the pages in crash dump kernel memory have been initially
- * marked as Reserved in kexec_reserve_crashkres_pages().
+ * marked as Reserved as memory was allocated via memblock_reserve().
*
* In hibernation, the pages which are Reserved and yet "nosave" are excluded
* from the hibernation iamge. crash_is_nosave() does thich check for crash
@@ -361,7 +361,6 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) {
page = phys_to_page(addr);
- ClearPageReserved(page);
free_reserved_page(page);
}
}
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 10e33860e47a..58871333737a 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -87,7 +87,9 @@ static int setup_dtb(struct kimage *image,
/* add kaslr-seed */
ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
- if (ret && (ret != -FDT_ERR_NOTFOUND))
+ if (ret == -FDT_ERR_NOTFOUND)
+ ret = 0;
+ else if (ret)
goto out;
if (rng_is_initialized()) {
@@ -118,10 +120,12 @@ static int create_dtb(struct kimage *image,
{
void *buf;
size_t buf_size;
+ size_t cmdline_len;
int ret;
+ cmdline_len = cmdline ? strlen(cmdline) : 0;
buf_size = fdt_totalsize(initial_boot_params)
- + strlen(cmdline) + DTB_EXTRA_SPACE;
+ + cmdline_len + DTB_EXTRA_SPACE;
for (;;) {
buf = vmalloc(buf_size);
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a5b338b2542..f17afb99890c 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
addr < (unsigned long)__entry_text_end) ||
(addr >= (unsigned long)__idmap_text_start &&
addr < (unsigned long)__idmap_text_end) ||
+ (addr >= (unsigned long)__hyp_text_start &&
+ addr < (unsigned long)__hyp_text_end) ||
!!search_exception_tables(addr))
return true;
if (!is_kernel_in_hyp_mode()) {
- if ((addr >= (unsigned long)__hyp_text_start &&
- addr < (unsigned long)__hyp_text_end) ||
- (addr >= (unsigned long)__hyp_idmap_text_start &&
+ if ((addr >= (unsigned long)__hyp_idmap_text_start &&
addr < (unsigned long)__hyp_idmap_text_end))
return true;
}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9dce33b0e260..ddaea0fd2fa4 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs)
}
/*
- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
- * We also take into account DIT (bit 24), which is not yet documented, and
- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
- * allocated an EL0 meaning in future.
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
* Userspace cannot use these until they have an architectural meaning.
* Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
* We also reserve IL for the kernel; SS is handled dynamically.
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
- GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+ GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
static int valid_compat_regs(struct user_pt_regs *regs)
{
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 4b0e1231625c..009849328289 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p)
arm64_memblock_init();
paging_init();
- efi_apply_persistent_mem_reservations();
acpi_table_upgrade();
@@ -340,6 +339,9 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
+ /* Init percpu seeds for random tags after cpus are set up. */
+ kasan_init_tags();
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Make sure init_thread_info.ttbr0 always generates translation
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index b0b1478094b4..421ebf6f7086 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -23,6 +23,7 @@
#include <kvm/arm_psci.h>
#include <asm/cpufeature.h>
+#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
write_sysreg(kvm_get_hyp_vector(), vbar_el1);
}
+NOKPROBE_SYMBOL(activate_traps_vhe);
static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
{
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void)
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
+NOKPROBE_SYMBOL(deactivate_traps_vhe);
static void __hyp_text __deactivate_traps_nvhe(void)
{
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
return exit_code;
}
+NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
/* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
read_sysreg_el2(esr), read_sysreg_el2(far),
read_sysreg(hpfar_el2), par, vcpu);
}
+NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 68d6f7c3b237..b426e2cf973c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -18,6 +18,7 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
+#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_common_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_common_state(ctxt);
__sysreg_save_el2_return_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_common_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_common_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
{
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b72a3dd56204..f16a5f8ff2b4 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -32,6 +32,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
/* Maximum phys_shift supported for any VM on this host */
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* This function finds the right table above and sets the registers on
* the virtual CPU struct to their architecturally defined reset
* values.
+ *
+ * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
+ * ioctl or as part of handling a request issued by another VCPU in the PSCI
+ * handling code. In the first case, the VCPU will not be loaded, and in the
+ * second case the VCPU will be loaded. Because this function operates purely
+ * on the memory-backed valus of system registers, we want to do a full put if
+ * we were loaded (handling a request) and load the values back at the end of
+ * the function. Otherwise we leave the state alone. In both cases, we
+ * disable preemption around the vcpu reset as we would otherwise race with
+ * preempt notifiers which also call put/load.
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
const struct kvm_regs *cpu_reset;
+ int ret = -EINVAL;
+ bool loaded;
+
+ preempt_disable();
+ loaded = (vcpu->cpu != -1);
+ if (loaded)
+ kvm_arch_vcpu_put(vcpu);
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpu_has_32bit_el1())
- return -EINVAL;
+ goto out;
cpu_reset = &default_regs_reset32;
} else {
cpu_reset = &default_regs_reset;
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset system registers */
kvm_reset_sys_regs(vcpu);
+ /*
+ * Additional reset state handling that PSCI may have imposed on us.
+ * Must be done after all the sys_reg reset.
+ */
+ if (vcpu->arch.reset_state.reset) {
+ unsigned long target_pc = vcpu->arch.reset_state.pc;
+
+ /* Gracefully handle Thumb2 entry point */
+ if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+ target_pc &= ~1UL;
+ vcpu_set_thumb(vcpu);
+ }
+
+ /* Propagate caller endianness */
+ if (vcpu->arch.reset_state.be)
+ kvm_vcpu_set_be(vcpu);
+
+ *vcpu_pc(vcpu) = target_pc;
+ vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
+
+ vcpu->arch.reset_state.reset = false;
+ }
+
/* Reset PMU */
kvm_pmu_vcpu_reset(vcpu);
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
/* Reset timer */
- return kvm_timer_vcpu_reset(vcpu);
+ ret = kvm_timer_vcpu_reset(vcpu);
+out:
+ if (loaded)
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ preempt_enable();
+ return ret;
}
void kvm_set_ipa_limit(void)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e3e37228ae4e..c936aa40c3f4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
-static bool trap_undef(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *r)
+/*
+ * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
+ * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
+ * system, these registers should UNDEF. LORID_EL1 being a RO register, we
+ * treat it separately.
+ */
+static bool trap_loregion(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
- kvm_inject_undefined(vcpu);
- return false;
+ u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
+ (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+
+ if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
+ kvm_inject_undefined(vcpu);
+ return false;
+ }
+
+ if (p->is_write && sr == SYS_LORID_EL1)
+ return write_to_read_only(vcpu, p, r);
+
+ return trap_raz_wi(vcpu, p, r);
}
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
if (val & ptrauth_mask)
kvm_debug("ptrauth unsupported for guests, suppressing\n");
val &= ~ptrauth_mask;
- } else if (id == SYS_ID_AA64MMFR1_EL1) {
- if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
- kvm_debug("LORegions unsupported for guests, suppressing\n");
-
- val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
}
return val;
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
- { SYS_DESC(SYS_LORSA_EL1), trap_undef },
- { SYS_DESC(SYS_LOREA_EL1), trap_undef },
- { SYS_DESC(SYS_LORN_EL1), trap_undef },
- { SYS_DESC(SYS_LORC_EL1), trap_undef },
- { SYS_DESC(SYS_LORID_EL1), trap_undef },
+ { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
+ { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
+ { SYS_DESC(SYS_LORN_EL1), trap_loregion },
+ { SYS_DESC(SYS_LORC_EL1), trap_loregion },
+ { SYS_DESC(SYS_LORID_EL1), trap_loregion },
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
table = get_target_table(vcpu->arch.target, true, &num);
reset_sys_reg_descs(vcpu, table, num);
- for (num = 1; num < NR_SYS_REGS; num++)
- if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
- panic("Didn't reset __vcpu_sys_reg(%zi)", num);
+ for (num = 1; num < NR_SYS_REGS; num++) {
+ if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
+ "Didn't reset __vcpu_sys_reg(%zi)\n", num))
+ break;
+ }
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index fb0908456a1f..78c0a72f822c 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
#ifdef CONFIG_XEN
- if (xen_initial_domain()) {
- dev->archdata.dev_dma_ops = dev->dma_ops;
+ if (xen_initial_domain())
dev->dma_ops = xen_dma_ops;
- }
#endif
}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index fcb1f2a6d7c6..99bb8facb5cb 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
}
-static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
+static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
+ unsigned long end)
{
- pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
- unsigned long addr;
- unsigned i;
+ unsigned long addr = start;
+ pte_t *ptep = pte_offset_kernel(pmdp, start);
- for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
- addr = start + i * PAGE_SIZE;
+ do {
note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
- }
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
}
-static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
+static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
+ unsigned long end)
{
- pmd_t *pmdp = pmd_offset(pudp, 0UL);
- unsigned long addr;
- unsigned i;
+ unsigned long next, addr = start;
+ pmd_t *pmdp = pmd_offset(pudp, start);
- for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
+ do {
pmd_t pmd = READ_ONCE(*pmdp);
+ next = pmd_addr_end(addr, end);
- addr = start + i * PMD_SIZE;
if (pmd_none(pmd) || pmd_sect(pmd)) {
note_page(st, addr, 3, pmd_val(pmd));
} else {
BUG_ON(pmd_bad(pmd));
- walk_pte(st, pmdp, addr);
+ walk_pte(st, pmdp, addr, next);
}
- }
+ } while (pmdp++, addr = next, addr != end);
}
-static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
+static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
+ unsigned long end)
{
- pud_t *pudp = pud_offset(pgdp, 0UL);
- unsigned long addr;
- unsigned i;
+ unsigned long next, addr = start;
+ pud_t *pudp = pud_offset(pgdp, start);
- for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
+ do {
pud_t pud = READ_ONCE(*pudp);
+ next = pud_addr_end(addr, end);
- addr = start + i * PUD_SIZE;
if (pud_none(pud) || pud_sect(pud)) {
note_page(st, addr, 2, pud_val(pud));
} else {
BUG_ON(pud_bad(pud));
- walk_pmd(st, pudp, addr);
+ walk_pmd(st, pudp, addr, next);
}
- }
+ } while (pudp++, addr = next, addr != end);
}
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
unsigned long start)
{
- pgd_t *pgdp = pgd_offset(mm, 0UL);
- unsigned i;
- unsigned long addr;
+ unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
+ unsigned long next, addr = start;
+ pgd_t *pgdp = pgd_offset(mm, start);
- for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
+ do {
pgd_t pgd = READ_ONCE(*pgdp);
+ next = pgd_addr_end(addr, end);
- addr = start + i * PGDIR_SIZE;
if (pgd_none(pgd)) {
note_page(st, addr, 1, pgd_val(pgd));
} else {
BUG_ON(pgd_bad(pgd));
- walk_pud(st, pgdp, addr);
+ walk_pud(st, pgdp, addr, next);
}
- }
+ } while (pgdp++, addr = next, addr != end);
}
void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index efb7b2cbead5..e1c84c2e1cab 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -18,6 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/extable.h>
#include <linux/signal.h>
#include <linux/mm.h>
@@ -33,6 +34,7 @@
#include <linux/preempt.h>
#include <linux/hugetlb.h>
+#include <asm/acpi.h>
#include <asm/bug.h>
#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
@@ -47,8 +49,6 @@
#include <asm/tlbflush.h>
#include <asm/traps.h>
-#include <acpi/ghes.h>
-
struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr,
struct pt_regs *regs);
@@ -643,19 +643,10 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
inf = esr_to_fault_info(esr);
/*
- * Synchronous aborts may interrupt code which had interrupts masked.
- * Before calling out into the wider kernel tell the interested
- * subsystems.
+ * Return value ignored as we rely on signal merging.
+ * Future patches will make this more robust.
*/
- if (IS_ENABLED(CONFIG_ACPI_APEI_SEA)) {
- if (interrupts_enabled(regs))
- nmi_enter();
-
- ghes_notify_sea();
-
- if (interrupts_enabled(regs))
- nmi_exit();
- }
+ apei_claim_sea(regs);
if (esr & ESR_ELx_FnV)
siaddr = NULL;
@@ -733,11 +724,6 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
};
-int handle_guest_sea(phys_addr_t addr, unsigned int esr)
-{
- return ghes_notify_sea();
-}
-
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 30695a868107..5c9073bace83 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
__clean_dcache_area_pou(kaddr, len);
__flush_icache_all();
} else {
- flush_icache_range(addr, addr + len);
+ /*
+ * Don't issue kick_all_cpus_sync() after I-cache invalidation
+ * for user mappings.
+ */
+ __flush_icache_range(addr, addr + len);
}
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 28cbc22d7e30..6b4a47b3adf4 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -27,6 +27,26 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+bool arch_hugetlb_migration_supported(struct hstate *h)
+{
+ size_t pagesize = huge_page_size(h);
+
+ switch (pagesize) {
+#ifdef CONFIG_ARM64_4K_PAGES
+ case PUD_SIZE:
+#endif
+ case PMD_SIZE:
+ case CONT_PMD_SIZE:
+ case CONT_PTE_SIZE:
+ return true;
+ }
+ pr_warn("%s: unrecognized huge page size 0x%lx\n",
+ __func__, pagesize);
+ return false;
+}
+#endif
+
int pmd_huge(pmd_t pmd)
{
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 7205a9085b4d..c38976b70069 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -118,35 +118,10 @@ static void __init reserve_crashkernel(void)
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
-
-static void __init kexec_reserve_crashkres_pages(void)
-{
-#ifdef CONFIG_HIBERNATION
- phys_addr_t addr;
- struct page *page;
-
- if (!crashk_res.end)
- return;
-
- /*
- * To reduce the size of hibernation image, all the pages are
- * marked as Reserved initially.
- */
- for (addr = crashk_res.start; addr < (crashk_res.end + 1);
- addr += PAGE_SIZE) {
- page = phys_to_page(addr);
- SetPageReserved(page);
- }
-#endif
-}
#else
static void __init reserve_crashkernel(void)
{
}
-
-static void __init kexec_reserve_crashkres_pages(void)
-{
-}
#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_CRASH_DUMP
@@ -586,8 +561,6 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
memblock_free_all();
- kexec_reserve_crashkres_pages();
-
mem_init_print_info(NULL);
/*
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 4b55b15707a3..f37a86d2a69d 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -252,8 +252,6 @@ void __init kasan_init(void)
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
- kasan_init_tags();
-
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n");
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index ae34e3a1cef1..7a0a555b366a 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -120,7 +120,7 @@ static void __init setup_node_to_cpumask_map(void)
}
/* cpumask_of_node() will now work */
- pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
+ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
/*
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 1542df00b23c..aaddc0217e73 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -362,7 +362,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
- const bool is64 = BPF_CLASS(code) == BPF_ALU64;
+ const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
+ BPF_CLASS(code) == BPF_JMP;
const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 jmp_cond;
s32 jmp_offset;
@@ -559,7 +560,17 @@ emit_bswap_uxt:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
- emit(A64_CMP(1, dst, src), ctx);
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ emit(A64_CMP(is64, dst, src), ctx);
emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx);
check_imm19(jmp_offset);
@@ -601,7 +612,8 @@ emit_cond_jmp:
emit(A64_B_(jmp_cond, jmp_offset), ctx);
break;
case BPF_JMP | BPF_JSET | BPF_X:
- emit(A64_TST(1, dst, src), ctx);
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ emit(A64_TST(is64, dst, src), ctx);
goto emit_cond_jmp;
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -614,12 +626,23 @@ emit_cond_jmp:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
- emit_a64_mov_i(1, tmp, imm, ctx);
- emit(A64_CMP(1, dst, tmp), ctx);
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_CMP(is64, dst, tmp), ctx);
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K:
- emit_a64_mov_i(1, tmp, imm, ctx);
- emit(A64_TST(1, dst, tmp), ctx);
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_TST(is64, dst, tmp), ctx);
goto emit_cond_jmp;
/* function call */
case BPF_JMP | BPF_CALL:
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 456e154674d1..e5cd3c5f8399 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -6,6 +6,7 @@
config C6X
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select CLKDEV_LOOKUP
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 33a2c94fed0d..63b4a1705182 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += pgalloc.h
generic-y += preempt.h
generic-y += segment.h
generic-y += serial.h
+generic-y += shmparam.h
generic-y += tlbflush.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h
index 6b2fe792de9d..79b724c39d9b 100644
--- a/arch/c6x/include/uapi/asm/unistd.h
+++ b/arch/c6x/include/uapi/asm/unistd.h
@@ -17,7 +17,9 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_TIME32_SYSCALLS
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 398113c845f5..6959e0b1e956 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -1,5 +1,6 @@
config CSKY
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index ecae6b358f95..c1dfa9c10e36 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -15,6 +15,31 @@ extern void iounmap(void *addr);
extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
size_t size, unsigned long flags);
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.
+ *
+ * For CACHEV1 (807, 810), store instruction could fast retire, so we need
+ * another mb() to prevent st fast retire.
+ *
+ * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
+ * fast retire.
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
+
+#ifdef CONFIG_CPU_HAS_CACHEV2
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
+#else
+#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
+#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
+#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
+#endif
+
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bf4f4a0e140e..d213bb47b717 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -24,41 +24,34 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
extern void pgd_init(unsigned long *p);
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
- unsigned long *kaddr, i;
+ unsigned long i;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL,
- PTE_ORDER);
- kaddr = (unsigned long *)pte;
- if (address & 0x80000000)
- for (i = 0; i < (PAGE_SIZE/4); i++)
- *(kaddr + i) = 0x1;
- else
- clear_page(kaddr);
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
+ (pte + i)->pte_low = _PAGE_GLOBAL;
return pte;
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
+static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
- unsigned long *kaddr, i;
-
- pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER);
- if (pte) {
- kaddr = kmap_atomic(pte);
- if (address & 0x80000000) {
- for (i = 0; i < (PAGE_SIZE/4); i++)
- *(kaddr + i) = 0x1;
- } else
- clear_page(kaddr);
- kunmap_atomic(kaddr);
- pgtable_page_ctor(pte);
+
+ pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!pte)
+ return NULL;
+
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
}
+
return pte;
}
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index edfcbb25fd9f..dcea277c09ae 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -45,8 +45,8 @@
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define pte_clear(mm, addr, ptep) set_pte((ptep), \
- (((unsigned int)addr&0x80000000)?__pte(1):__pte(0)))
-#define pte_none(pte) (!(pte_val(pte)&0xfffffffe))
+ (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
+#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+
/*
* Macro to make mark a page protection value as "uncacheable". Note
* that "protection" is really a misnomer here as the protection value
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 8f454810514f..21e0bd5293dd 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -49,7 +49,7 @@ struct thread_struct {
};
#define INIT_THREAD { \
- .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \
+ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
.sr = DEFAULT_PSR_VALUE, \
}
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
#define task_pt_regs(p) \
- ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1)
+ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
#define cpu_relax() barrier()
diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h
index ffdc4c47ff43..db2640d5f575 100644
--- a/arch/csky/include/asm/segment.h
+++ b/arch/csky/include/asm/segment.h
@@ -9,7 +9,6 @@ typedef struct {
} mm_segment_t;
#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF })
-#define get_ds() KERNEL_DS
#define USER_DS ((mm_segment_t) { 0x80000000UL })
#define get_fs() (current_thread_info()->addr_limit)
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index 224c9a9ab45b..ec60e49cea66 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -2,6 +2,8 @@
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c
index 659253e9989c..d67f9777cfd9 100644
--- a/arch/csky/kernel/dumpstack.c
+++ b/arch/csky/kernel/dumpstack.c
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
if (task)
stack = (unsigned long *)thread_saved_fp(task);
else
+#ifdef CONFIG_STACKTRACE
+ asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
+#else
stack = (unsigned long *)&stack;
+#endif
}
show_trace(stack);
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
index 65abab0c7a47..b5ad7d9de18c 100644
--- a/arch/csky/kernel/module.c
+++ b/arch/csky/kernel/module.c
@@ -12,7 +12,7 @@
#include <linux/spinlock.h>
#include <asm/pgtable.h>
-#if defined(__CSKYABIV2__)
+#ifdef CONFIG_CPU_CK810
#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0)
@@ -25,6 +25,26 @@
*(uint16_t *)(addr) = 0xE8Fa; \
*((uint16_t *)(addr) + 1) = 0x0000; \
} while (0)
+
+static void jsri_2_lrw_jsr(uint32_t *location)
+{
+ uint16_t *location_tmp = (uint16_t *)location;
+
+ if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
+ return;
+
+ if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
+ /* jsri 0x... --> lrw r26, 0x... */
+ CHANGE_JSRI_TO_LRW(location);
+ /* lsli r0, r0 --> jsr r26 */
+ SET_JSR32_R26(location + 1);
+ }
+}
+#else
+static void inline jsri_2_lrw_jsr(uint32_t *location)
+{
+ return;
+}
#endif
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
@@ -35,9 +55,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
Elf32_Sym *sym;
uint32_t *location;
short *temp;
-#if defined(__CSKYABIV2__)
- uint16_t *location_tmp;
-#endif
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
@@ -59,18 +76,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
case R_CSKY_PCRELJSR_IMM11BY2:
break;
case R_CSKY_PCRELJSR_IMM26BY2:
-#if defined(__CSKYABIV2__)
- location_tmp = (uint16_t *)location;
- if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
- break;
-
- if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
- /* jsri 0x... --> lrw r26, 0x... */
- CHANGE_JSRI_TO_LRW(location);
- /* lsli r0, r0 --> jsr r26 */
- SET_JSR32_R26(location + 1);
- }
-#endif
+ jsri_2_lrw_jsr(location);
break;
case R_CSKY_ADDR_HI16:
temp = ((short *)location) + 1;
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 57f1afe19a52..f2f12fff36f7 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -8,6 +8,7 @@
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/uaccess.h>
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target,
static const struct user_regset csky_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
- .n = ELF_NGREG,
+ .n = sizeof(struct pt_regs) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = &gpr_get,
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index ddc4dd79f282..b07a534b3062 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
unsigned long mask = 1 << cpu;
- secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8;
+ secondary_stack =
+ (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
secondary_hint = mfcr("cr31");
secondary_ccr = mfcr("cr18");
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
index cb7c03e5cd21..8473b6bdf512 100644
--- a/arch/csky/mm/ioremap.c
+++ b/arch/csky/mm/ioremap.c
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr)
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
EXPORT_SYMBOL(iounmap);
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ if (!pfn_valid(pfn)) {
+ vma_prot.pgprot |= _PAGE_SO;
+ return pgprot_noncached(vma_prot);
+ } else if (file->f_flags & O_SYNC) {
+ return pgprot_noncached(vma_prot);
+ }
+
+ return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 6472a0685470..c071da34e081 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config H8300
def_bool y
+ select ARCH_32BIT_OFF_T
select GENERIC_ATOMIC64
select HAVE_UID16
select VIRT_TO_BUS
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index 4003ddc616e1..f801f3708a89 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/
boot := arch/h8300/boot
-archmrproper:
-
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index cd400d353d18..961c1dc064e1 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -40,6 +40,7 @@ generic-y += preempt.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += serial.h
+generic-y += shmparam.h
generic-y += sizes.h
generic-y += spinlock.h
generic-y += timex.h
diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h
index 9adbf7e1aaa3..a407978f9f9f 100644
--- a/arch/h8300/include/asm/segment.h
+++ b/arch/h8300/include/asm/segment.h
@@ -33,12 +33,6 @@ static inline mm_segment_t get_fs(void)
return USER_DS;
}
-static inline mm_segment_t get_ds(void)
-{
- /* return the supervisor data space code */
- return KERNEL_DS;
-}
-
#define segment_eq(a, b) ((a).seg == (b).seg)
#endif /* __ASSEMBLY__ */
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
index 628195823816..eb7bc0012af5 100644
--- a/arch/h8300/include/uapi/asm/unistd.h
+++ b/arch/h8300/include/uapi/asm/unistd.h
@@ -2,5 +2,7 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index fb2fbfcfc532..ac441680dcc0 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,6 +4,7 @@ comment "Linux Kernel Configuration for Hexagon"
config HEXAGON
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_PREEMPT
select HAVE_OPROFILE
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 47c4da3d64a4..b25fd42aa0f4 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += rwsem.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
+generic-y += shmparam.h
generic-y += sizes.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
index c91ca7d02461..432c4db1b623 100644
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -30,9 +30,11 @@
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 320d86f192ee..171290f9f1de 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig
NM := $(CROSS_COMPILE)nm -B
READELF := $(CROSS_COMPILE)readelf
-export AWK
-
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 306d469e43da..89782ad3fb88 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -48,7 +48,6 @@
#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 0b08ebd2dfde..9ba6110b10b9 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -12,20 +12,6 @@
#define NR_syscalls __NR_syscalls /* length of syscall table */
-/*
- * The following defines stop scripts/checksyscalls.sh from complaining about
- * unimplemented system calls. Glibc provides for each of these by using
- * more modern equivalent system calls.
- */
-#define __IGNORE_fork /* clone() */
-#define __IGNORE_time /* gettimeofday() */
-#define __IGNORE_alarm /* setitimer(ITIMER_REAL, ... */
-#define __IGNORE_pause /* rt_sigprocmask(), rt_sigsuspend() */
-#define __IGNORE_utime /* utimes() */
-#define __IGNORE_getpgrp /* getpgid() */
-#define __IGNORE_vfork /* clone() */
-#define __IGNORE_umount2 /* umount() */
-
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_UTIME
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 5b819e53c397..b71c5f787783 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_64.h
generic-y += kvm_para.h
+generic-y += socket.h
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
deleted file mode 100644
index c872c4e6bafb..000000000000
--- a/arch/ia64/include/uapi/asm/socket.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_IA64_SOCKET_H
-#define _ASM_IA64_SOCKET_H
-
-/*
- * Socket related defines.
- *
- * Based on <asm-i386/socket.h>.
- *
- * Modified 1998-2000
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-#define SO_REUSEPORT 15
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-#define SO_GET_FILTER SO_ATTACH_FILTER
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-
-#define SO_WIFI_STATUS 41
-#define SCM_WIFI_STATUS SO_WIFI_STATUS
-#define SO_PEEK_OFF 42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS 43
-
-#define SO_LOCK_FILTER 44
-
-#define SO_SELECT_ERR_QUEUE 45
-
-#define SO_BUSY_POLL 46
-
-#define SO_MAX_PACING_RATE 47
-
-#define SO_BPF_EXTENSIONS 48
-
-#define SO_INCOMING_CPU 49
-
-#define SO_ATTACH_BPF 50
-#define SO_DETACH_BPF SO_DETACH_FILTER
-
-#define SO_ATTACH_REUSEPORT_CBPF 51
-#define SO_ATTACH_REUSEPORT_EBPF 52
-
-#define SO_CNX_ADVICE 53
-
-#define SCM_TIMESTAMPING_OPT_STATS 54
-
-#define SO_MEMINFO 55
-
-#define SO_INCOMING_NAPI_ID 56
-
-#define SO_COOKIE 57
-
-#define SCM_TIMESTAMPING_PKTINFO 58
-
-#define SO_PEERGROUPS 59
-
-#define SO_ZEROCOPY 60
-
-#define SO_TXTIME 61
-#define SCM_TXTIME SO_TXTIME
-
-#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index b2513922dcb5..013e0bcacc39 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -15,6 +15,8 @@
#define __NR_Linux 1024
+#define __NR_umount __NR_umount2
+
#include <asm/unistd_64.h>
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index 92c376279c6d..1315da6c7aeb 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -74,7 +74,7 @@ void __init build_cpu_to_node_map(void)
cpumask_clear(&node_to_cpu_mask[node]);
for_each_possible_early_cpu(cpu) {
- node = -1;
+ node = NUMA_NO_NODE;
for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
node = node_cpuid[i].nid;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 46bff1661836..7a969f4c3534 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -583,17 +583,6 @@ pfm_put_task(struct task_struct *task)
if (task != current) put_task_struct(task);
}
-static inline void
-pfm_reserve_page(unsigned long a)
-{
- SetPageReserved(vmalloc_to_page((void *)a));
-}
-static inline void
-pfm_unreserve_page(unsigned long a)
-{
- ClearPageReserved(vmalloc_to_page((void*)a));
-}
-
static inline unsigned long
pfm_protect_ctx_ctxsw(pfm_context_t *x)
{
@@ -816,44 +805,6 @@ pfm_reset_msgq(pfm_context_t *ctx)
DPRINT(("ctx=%p msgq reset\n", ctx));
}
-static void *
-pfm_rvmalloc(unsigned long size)
-{
- void *mem;
- unsigned long addr;
-
- size = PAGE_ALIGN(size);
- mem = vzalloc(size);
- if (mem) {
- //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
- addr = (unsigned long)mem;
- while (size > 0) {
- pfm_reserve_page(addr);
- addr+=PAGE_SIZE;
- size-=PAGE_SIZE;
- }
- }
- return mem;
-}
-
-static void
-pfm_rvfree(void *mem, unsigned long size)
-{
- unsigned long addr;
-
- if (mem) {
- DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
- addr = (unsigned long) mem;
- while ((long) size > 0) {
- pfm_unreserve_page(addr);
- addr+=PAGE_SIZE;
- size-=PAGE_SIZE;
- }
- vfree(mem);
- }
- return;
-}
-
static pfm_context_t *
pfm_context_alloc(int ctx_flags)
{
@@ -1498,7 +1449,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx)
/*
* free the buffer
*/
- pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
+ vfree(ctx->ctx_smpl_hdr);
ctx->ctx_smpl_hdr = NULL;
ctx->ctx_smpl_size = 0UL;
@@ -2137,7 +2088,7 @@ doit:
* All memory free operations (especially for vmalloc'ed memory)
* MUST be done with interrupts ENABLED.
*/
- if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
+ vfree(smpl_buf_addr);
/*
* return the memory used by the context
@@ -2266,10 +2217,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
/*
* We do the easy to undo allocations first.
- *
- * pfm_rvmalloc(), clears the buffer, so there is no leak
*/
- smpl_buf = pfm_rvmalloc(size);
+ smpl_buf = vzalloc(size);
if (smpl_buf == NULL) {
DPRINT(("Can't allocate sampling buffer\n"));
return -ENOMEM;
@@ -2346,7 +2295,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
error:
vm_area_free(vma);
error_kmem:
- pfm_rvfree(smpl_buf, size);
+ vfree(smpl_buf);
return -ENOMEM;
}
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index b22203b40bfe..ab9cda5f6136 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -29,7 +29,7 @@
17 common getpid sys_getpid
18 common getppid sys_getppid
19 common mount sys_mount
-20 common umount sys_umount
+20 common umount2 sys_umount
21 common setuid sys_setuid
22 common getuid sys_getuid
23 common geteuid sys_geteuid
@@ -335,3 +335,12 @@
323 common copy_file_range sys_copy_file_range
324 common preadv2 sys_preadv2
325 common pwritev2 sys_pwritev2
+326 common statx sys_statx
+327 common io_pgetevents sys_io_pgetevents
+328 common perf_event_open sys_perf_event_open
+329 common seccomp sys_seccomp
+330 common pkey_mprotect sys_pkey_mprotect
+331 common pkey_alloc sys_pkey_alloc
+332 common pkey_free sys_pkey_free
+333 common rseq sys_rseq
+# 334 through 423 are reserved to sync up with other architectures
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 8a965784340c..f9c36750c6a4 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -227,7 +227,7 @@ void __init setup_per_cpu_areas(void)
* CPUs are put into groups according to node. Walk cpu_map
* and create new groups at node boundaries.
*/
- prev_node = -1;
+ prev_node = NUMA_NO_NODE;
ai->nr_groups = 0;
for (unit = 0; unit < nr_units; unit++) {
cpu = cpu_map[unit];
@@ -435,7 +435,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
{
void *ptr = NULL;
u8 best = 0xff;
- int bestnode = -1, node, anynode = 0;
+ int bestnode = NUMA_NO_NODE, node, anynode = 0;
for_each_online_node(node) {
if (node_isset(node, memory_less_mask))
@@ -447,7 +447,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
anynode = node;
}
- if (bestnode == -1)
+ if (bestnode == NUMA_NO_NODE)
bestnode = anynode;
ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index e173ea2ff395..b54206408f91 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -2,6 +2,7 @@
config M68K
bool
default y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 328ba83d735b..c01e103492fd 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -16,6 +16,7 @@ config ATARI
bool "Atari support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select HAVE_ARCH_NVRAM_OPS
help
This option enables support for the 68000-based Atari series of
computers (including the TT, Falcon and Medusa). If you plan to use
@@ -26,6 +27,7 @@ config MAC
bool "Macintosh support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select HAVE_ARCH_NVRAM_OPS
help
This option enables support for the Apple Macintosh series of
computers (yes, there is experimental support now, at least for part
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index f00ca53f8c14..482513b9af2c 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
KBUILD_AFLAGS += $(cpuflags-y)
-KBUILD_CFLAGS += $(cpuflags-y) -pipe
+KBUILD_CFLAGS += $(cpuflags-y)
+
+KBUILD_CFLAGS += -pipe -ffreestanding
+
ifdef CONFIG_MMU
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
diff --git a/arch/m68k/apollo/Makefile b/arch/m68k/apollo/Makefile
index 76a057962c38..01856a858fda 100644
--- a/arch/m68k/apollo/Makefile
+++ b/arch/m68k/apollo/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile for Linux arch/m68k/amiga source directory
+# Makefile for Linux arch/m68k/apollo source directory
#
obj-y := config.o dn_ints.o
diff --git a/arch/m68k/atari/Makefile b/arch/m68k/atari/Makefile
index 0cac723306f9..0b86bb6cfa87 100644
--- a/arch/m68k/atari/Makefile
+++ b/arch/m68k/atari/Makefile
@@ -6,3 +6,5 @@ obj-y := config.o time.o debug.o ataints.o stdma.o \
atasound.o stram.o
obj-$(CONFIG_ATARI_KBD_CORE) += atakeyb.o
+
+obj-$(CONFIG_NVRAM:m=y) += nvram.o
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index bd96702a1ad0..4fcc4b1df1c0 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -148,7 +148,7 @@ int __init atari_parse_bootinfo(const struct bi_record *record)
/* Parse the Atari-specific switches= option. */
static int __init atari_switches_setup(char *str)
{
- char switches[strlen(str) + 1];
+ char switches[COMMAND_LINE_SIZE];
char *p;
int ovsc_shift;
char *args = switches;
diff --git a/arch/m68k/atari/nvram.c b/arch/m68k/atari/nvram.c
new file mode 100644
index 000000000000..7000d2443aa3
--- /dev/null
+++ b/arch/m68k/atari/nvram.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CMOS/NV-RAM driver for Atari. Adapted from drivers/char/nvram.c.
+ * Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ * idea by and with help from Richard Jelinek <rj@suse.de>
+ * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+ * Further contributions from Cesar Barros, Erik Gilling, Tim Hockin and
+ * Wim Van Sebroeck.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+#include <linux/module.h>
+#include <linux/nvram.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+
+#define NVRAM_BYTES 50
+
+/* It is worth noting that these functions all access bytes of general
+ * purpose memory in the NVRAM - that is to say, they all add the
+ * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not
+ * know about the RTC cruft.
+ */
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
+ */
+
+static unsigned char __nvram_read_byte(int i)
+{
+ return CMOS_READ(NVRAM_FIRST_BYTE + i);
+}
+
+/* This races nicely with trying to read with checksum checking */
+static void __nvram_write_byte(unsigned char c, int i)
+{
+ CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
+}
+
+/* On Ataris, the checksum is over all bytes except the checksum bytes
+ * themselves; these are at the very end.
+ */
+#define ATARI_CKS_RANGE_START 0
+#define ATARI_CKS_RANGE_END 47
+#define ATARI_CKS_LOC 48
+
+static int __nvram_check_checksum(void)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
+ (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
+}
+
+static void __nvram_set_checksum(void)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(~sum, ATARI_CKS_LOC);
+ __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
+}
+
+long atari_nvram_set_checksum(void)
+{
+ spin_lock_irq(&rtc_lock);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+}
+
+long atari_nvram_initialize(void)
+{
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ __nvram_write_byte(0, i);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+}
+
+ssize_t atari_nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ *p = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
+
+ *ppos = i;
+ return p - buf;
+}
+
+ssize_t atari_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ __nvram_write_byte(*p, i);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+
+ *ppos = i;
+ return p - buf;
+}
+
+ssize_t atari_nvram_get_size(void)
+{
+ return NVRAM_BYTES;
+}
+
+#ifdef CONFIG_PROC_FS
+static struct {
+ unsigned char val;
+ const char *name;
+} boot_prefs[] = {
+ { 0x80, "TOS" },
+ { 0x40, "ASV" },
+ { 0x20, "NetBSD (?)" },
+ { 0x10, "Linux" },
+ { 0x00, "unspecified" },
+};
+
+static const char * const languages[] = {
+ "English (US)",
+ "German",
+ "French",
+ "English (UK)",
+ "Spanish",
+ "Italian",
+ "6 (undefined)",
+ "Swiss (French)",
+ "Swiss (German)",
+};
+
+static const char * const dateformat[] = {
+ "MM%cDD%cYY",
+ "DD%cMM%cYY",
+ "YY%cMM%cDD",
+ "YY%cDD%cMM",
+ "4 (undefined)",
+ "5 (undefined)",
+ "6 (undefined)",
+ "7 (undefined)",
+};
+
+static const char * const colors[] = {
+ "2", "4", "16", "256", "65536", "??", "??", "??"
+};
+
+static void atari_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
+{
+ int checksum;
+ int i;
+ unsigned int vmode;
+
+ spin_lock_irq(&rtc_lock);
+ checksum = __nvram_check_checksum();
+ spin_unlock_irq(&rtc_lock);
+
+ seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not ");
+
+ seq_puts(seq, "Boot preference : ");
+ for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i)
+ if (nvram[1] == boot_prefs[i].val) {
+ seq_printf(seq, "%s\n", boot_prefs[i].name);
+ break;
+ }
+ if (i < 0)
+ seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
+
+ seq_printf(seq, "SCSI arbitration : %s\n",
+ (nvram[16] & 0x80) ? "on" : "off");
+ seq_puts(seq, "SCSI host ID : ");
+ if (nvram[16] & 0x80)
+ seq_printf(seq, "%d\n", nvram[16] & 7);
+ else
+ seq_puts(seq, "n/a\n");
+
+ if (!MACH_IS_FALCON)
+ return;
+
+ seq_puts(seq, "OS language : ");
+ if (nvram[6] < ARRAY_SIZE(languages))
+ seq_printf(seq, "%s\n", languages[nvram[6]]);
+ else
+ seq_printf(seq, "%u (undefined)\n", nvram[6]);
+ seq_puts(seq, "Keyboard language: ");
+ if (nvram[7] < ARRAY_SIZE(languages))
+ seq_printf(seq, "%s\n", languages[nvram[7]]);
+ else
+ seq_printf(seq, "%u (undefined)\n", nvram[7]);
+ seq_puts(seq, "Date format : ");
+ seq_printf(seq, dateformat[nvram[8] & 7],
+ nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
+ seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
+ seq_puts(seq, "Boot delay : ");
+ if (nvram[10] == 0)
+ seq_puts(seq, "default\n");
+ else
+ seq_printf(seq, "%ds%s\n", nvram[10],
+ nvram[10] < 8 ? ", no memory test" : "");
+
+ vmode = (nvram[14] << 8) | nvram[15];
+ seq_printf(seq,
+ "Video mode : %s colors, %d columns, %s %s monitor\n",
+ colors[vmode & 7], vmode & 8 ? 80 : 40,
+ vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
+ seq_printf(seq,
+ " %soverscan, compat. mode %s%s\n",
+ vmode & 64 ? "" : "no ", vmode & 128 ? "on" : "off",
+ vmode & 256 ?
+ (vmode & 16 ? ", line doubling" : ", half screen") : "");
+}
+
+static int nvram_proc_read(struct seq_file *seq, void *offset)
+{
+ unsigned char contents[NVRAM_BYTES];
+ int i;
+
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ contents[i] = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
+
+ atari_nvram_proc_read(contents, seq, offset);
+
+ return 0;
+}
+
+static int __init atari_nvram_init(void)
+{
+ if (!(MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK)))
+ return -ENODEV;
+
+ if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
+ pr_err("nvram: can't create /proc/driver/nvram\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+device_initcall(atari_nvram_init);
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index ad1185c68df7..6b3ab583c698 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -127,7 +127,7 @@ static struct fixed_phy_status nettel_fixed_phy_status __initdata = {
static int __init init_BSP(void)
{
m5272_uarts_init();
- fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status, -1);
+ fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
return 0;
}
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index 131b4101ae5d..1ba10d57ddb1 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -55,27 +55,7 @@ CONFIG_MTD_UCLINUX=y
CONFIG_MTD_PLATRAM=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_DM9000=y
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index bfd4648e76e3..525421ae277d 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -38,7 +38,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -304,7 +303,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -342,7 +340,6 @@ CONFIG_BLK_DEV_GAYLE=y
CONFIG_BLK_DEV_BUDDHA=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -399,44 +396,12 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_3COM is not set
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CIRRUS is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -612,6 +577,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -621,6 +587,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 81112af1e478..db0e654a88d5 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -34,7 +34,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -300,7 +299,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -327,7 +325,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -378,35 +375,6 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -569,6 +537,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -578,6 +547,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 6d4b6023a2f0..1451168eb789 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -41,7 +41,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -307,7 +306,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -342,7 +340,6 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_FALCON_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -394,37 +391,9 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_ATARILANCE=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -590,6 +559,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -599,6 +569,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 3306dff09d3c..b0d3609f5bb3 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -31,7 +31,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -297,7 +296,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -324,7 +322,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -376,35 +373,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_BVME6000_NET=y
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -561,6 +530,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -570,6 +540,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index c15e15b68d39..4ed7c151347c 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -33,7 +33,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -299,7 +298,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -326,7 +324,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -377,36 +374,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_HPLANCE=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -571,6 +539,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -580,6 +549,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 1a0ce0d11267..0dc544e1ce1f 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -32,7 +32,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -301,7 +300,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -333,7 +331,6 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -393,39 +390,10 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MACMACE=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
CONFIG_MAC89x0=y
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
CONFIG_MAC8390=y
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -593,6 +561,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -602,6 +571,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 9758839b74bd..5a7b7b0d6e72 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -52,7 +52,6 @@ CONFIG_MINIX_SUBPARTITION=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -321,7 +320,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -366,7 +364,6 @@ CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -437,9 +434,6 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_3COM is not set
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
CONFIG_ATARILANCE=y
@@ -447,43 +441,17 @@ CONFIG_HPLANCE=y
CONFIG_MVME147_NET=y
CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
CONFIG_MAC89x0=y
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_BVME6000_NET=y
CONFIG_MVME16x_NET=y
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MACSONIC=y
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
CONFIG_XSURF100=y
CONFIG_HYDRA=y
CONFIG_MAC8390=y
CONFIG_NE2000=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -675,6 +643,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -684,6 +653,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index f5526731ccdb..71eb9be1803b 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -30,7 +30,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -296,7 +295,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -323,7 +321,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -375,36 +372,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MVME147_NET=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -561,6 +529,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -570,6 +539,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 5db58ff4b107..ea2ebd4241c0 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -31,7 +31,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -297,7 +296,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -324,7 +322,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -376,35 +373,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_MVME16x_NET=y
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -561,6 +530,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -570,6 +540,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index b645230da128..cef6dc47c725 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -32,7 +32,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -298,7 +297,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -332,7 +330,6 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -383,40 +380,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_3COM is not set
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_VENDOR_AMD is not set
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CIRRUS is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
CONFIG_NE2000=y
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -584,6 +548,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -593,6 +558,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 4afe2100947e..69f2282dc4e9 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -28,7 +28,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -294,7 +293,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -321,7 +319,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -373,36 +370,8 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_SUN3_82586=y
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SUN is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -563,6 +532,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -572,6 +542,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index bd22893d0dc3..e91267e868b2 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -28,7 +28,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
-CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
@@ -294,7 +293,6 @@ CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
@@ -321,7 +319,6 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -373,36 +370,7 @@ CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
-# CONFIG_NET_VENDOR_ALACRITECH is not set
-# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
-# CONFIG_NET_VENDOR_AQUANTIA is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_AURORA is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CADENCE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CORTINA is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HUAWEI is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROSEMI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
-# CONFIG_NET_VENDOR_SOCIONEXT is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -563,6 +531,7 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -572,6 +541,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_STREEBOG=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 38049357d6d3..40712e49381b 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -155,18 +155,22 @@ out:
static int __init nfhd_init(void)
{
u32 blocks, bsize;
+ int ret;
int i;
nfhd_id = nf_get_id("XHDI");
if (!nfhd_id)
return -ENODEV;
- major_num = register_blkdev(major_num, "nfhd");
- if (major_num <= 0) {
+ ret = register_blkdev(major_num, "nfhd");
+ if (ret < 0) {
pr_warn("nfhd: unable to get major number\n");
- return major_num;
+ return ret;
}
+ if (!major_num)
+ major_num = ret;
+
for (i = NFHD_DEV_OFFSET; i < 24; i++) {
if (nfhd_get_capacity(i, 0, &blocks, &bsize))
continue;
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 9f1dd26903e3..95f8f631c4df 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
+generic-y += shmparam.h
generic-y += spinlock.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/m68k/include/asm/a.out-core.h b/arch/m68k/include/asm/a.out-core.h
deleted file mode 100644
index ae91ea6bb303..000000000000
--- a/arch/m68k/include/asm/a.out-core.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/mm_types.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
- struct switch_stack *sw;
-
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk +
- (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
- dump->u_ar0 = offsetof(struct user, regs);
- sw = ((struct switch_stack *)regs) - 1;
- dump->regs.d1 = regs->d1;
- dump->regs.d2 = regs->d2;
- dump->regs.d3 = regs->d3;
- dump->regs.d4 = regs->d4;
- dump->regs.d5 = regs->d5;
- dump->regs.d6 = sw->d6;
- dump->regs.d7 = sw->d7;
- dump->regs.a0 = regs->a0;
- dump->regs.a1 = regs->a1;
- dump->regs.a2 = regs->a2;
- dump->regs.a3 = sw->a3;
- dump->regs.a4 = sw->a4;
- dump->regs.a5 = sw->a5;
- dump->regs.a6 = sw->a6;
- dump->regs.d0 = regs->d0;
- dump->regs.orig_d0 = regs->orig_d0;
- dump->regs.stkadj = regs->stkadj;
- dump->regs.sr = regs->sr;
- dump->regs.pc = regs->pc;
- dump->regs.fmtvec = (regs->format << 12) | regs->vector;
- /* dump floating point stuff */
- dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 9000b249d225..533008262b69 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -33,6 +33,12 @@ extern int atari_dont_touch_floppy_select;
extern int atari_SCC_reset_done;
+extern ssize_t atari_nvram_read(char *, size_t, loff_t *);
+extern ssize_t atari_nvram_write(char *, size_t, loff_t *);
+extern ssize_t atari_nvram_get_size(void);
+extern long atari_nvram_set_checksum(void);
+extern long atari_nvram_initialize(void);
+
/* convenience macros for testing machine type */
#define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST)
#define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 08cee11180e6..d9a08bed4b12 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -19,6 +19,10 @@ extern void mac_init_IRQ(void);
extern void mac_irq_enable(struct irq_data *data);
extern void mac_irq_disable(struct irq_data *data);
+extern unsigned char mac_pram_read_byte(int);
+extern void mac_pram_write_byte(unsigned char, int);
+extern ssize_t mac_pram_get_size(void);
+
/*
* Macintosh Table
*/
diff --git a/arch/m68k/include/asm/macints.h b/arch/m68k/include/asm/macints.h
index cddb2d3ea49b..4da172bd048c 100644
--- a/arch/m68k/include/asm/macints.h
+++ b/arch/m68k/include/asm/macints.h
@@ -121,7 +121,4 @@
#define SLOT2IRQ(x) (x + 47)
#define IRQ2SLOT(x) (x - 47)
-#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */
-#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */
-
#endif /* asm/macints.h */
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
index 0b4cc1e079b5..c6686559e9b7 100644
--- a/arch/m68k/include/asm/segment.h
+++ b/arch/m68k/include/asm/segment.h
@@ -45,16 +45,9 @@ static inline void set_fs(mm_segment_t val)
: /* no outputs */ : "r" (val.seg) : "memory");
}
-static inline mm_segment_t get_ds(void)
-{
- /* return the supervisor data space code */
- return KERNEL_DS;
-}
-
#else
#define USER_DS MAKE_MM_SEG(TASK_SIZE)
#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#endif
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 49d5de18646b..2e0047cf86f8 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -15,8 +15,8 @@
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_TIME32
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index ad0195cbe042..528484feff80 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -24,6 +24,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <linux/nvram.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
@@ -37,13 +38,14 @@
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
-#ifdef CONFIG_ATARI
#include <asm/atarihw.h>
+#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#ifdef CONFIG_SUN3X
#include <asm/dvma.h>
#endif
+#include <asm/macintosh.h>
#include <asm/natfeat.h>
#if !FPSTATESIZE || !NR_IRQS
@@ -547,3 +549,81 @@ static int __init adb_probe_sync_enable (char *str) {
__setup("adb_sync", adb_probe_sync_enable);
#endif /* CONFIG_ADB */
+
+#if IS_ENABLED(CONFIG_NVRAM)
+#ifdef CONFIG_MAC
+static unsigned char m68k_nvram_read_byte(int addr)
+{
+ if (MACH_IS_MAC)
+ return mac_pram_read_byte(addr);
+ return 0xff;
+}
+
+static void m68k_nvram_write_byte(unsigned char val, int addr)
+{
+ if (MACH_IS_MAC)
+ mac_pram_write_byte(val, addr);
+}
+#endif /* CONFIG_MAC */
+
+#ifdef CONFIG_ATARI
+static ssize_t m68k_nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_read(buf, count, ppos);
+ else if (MACH_IS_MAC)
+ return nvram_read_bytes(buf, count, ppos);
+ return -EINVAL;
+}
+
+static ssize_t m68k_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_write(buf, count, ppos);
+ else if (MACH_IS_MAC)
+ return nvram_write_bytes(buf, count, ppos);
+ return -EINVAL;
+}
+
+static long m68k_nvram_set_checksum(void)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_set_checksum();
+ return -EINVAL;
+}
+
+static long m68k_nvram_initialize(void)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_initialize();
+ return -EINVAL;
+}
+#endif /* CONFIG_ATARI */
+
+static ssize_t m68k_nvram_get_size(void)
+{
+ if (MACH_IS_ATARI)
+ return atari_nvram_get_size();
+ else if (MACH_IS_MAC)
+ return mac_pram_get_size();
+ return -ENODEV;
+}
+
+/* Atari device drivers call .read (to get checksum validation) whereas
+ * Mac and PowerMac device drivers just use .read_byte.
+ */
+const struct nvram_ops arch_nvram_ops = {
+#ifdef CONFIG_MAC
+ .read_byte = m68k_nvram_read_byte,
+ .write_byte = m68k_nvram_write_byte,
+#endif
+#ifdef CONFIG_ATARI
+ .read = m68k_nvram_read,
+ .write = m68k_nvram_write,
+ .set_checksum = m68k_nvram_set_checksum,
+ .initialize = m68k_nvram_initialize,
+#endif
+ .get_size = m68k_nvram_get_size,
+};
+EXPORT_SYMBOL(arch_nvram_ops);
+#endif /* CONFIG_NVRAM */
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index e2a9421c5797..87e7f3639839 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -651,7 +651,8 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
regs->vector = formatvec & 0xfff;
} else {
struct switch_stack *sw = (struct switch_stack *)regs - 1;
- unsigned long buf[fsize / 2]; /* yes, twice as much */
+ /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */
+ unsigned long buf[sizeof(((struct frame *)0)->un) / 2];
/* that'll make sure that expansion won't crap over data */
if (copy_from_user(buf + fsize / 4, fp, fsize))
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 1a95c4a1bc0d..125c14178979 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -20,7 +20,7 @@
10 common unlink sys_unlink
11 common execve sys_execve
12 common chdir sys_chdir
-13 common time sys_time
+13 common time sys_time32
14 common mknod sys_mknod
15 common chmod sys_chmod
16 common chown sys_chown16
@@ -32,12 +32,12 @@
22 common umount sys_oldumount
23 common setuid sys_setuid16
24 common getuid sys_getuid16
-25 common stime sys_stime
+25 common stime sys_stime32
26 common ptrace sys_ptrace
27 common alarm sys_alarm
28 common oldfstat sys_fstat
29 common pause sys_pause
-30 common utime sys_utime
+30 common utime sys_utime32
# 31 was stty
# 32 was gtty
33 common access sys_access
@@ -131,7 +131,7 @@
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common cacheflush sys_cacheflush
-124 common adjtimex sys_adjtimex
+124 common adjtimex sys_adjtimex_time32
125 common mprotect sys_mprotect
126 common sigprocmask sys_sigprocmask
127 common create_module sys_ni_syscall
@@ -168,8 +168,8 @@
158 common sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
163 common mremap sys_mremap
164 common setresuid sys_setresuid16
165 common getresuid sys_getresuid16
@@ -184,7 +184,7 @@
174 common rt_sigaction sys_rt_sigaction
175 common rt_sigprocmask sys_rt_sigprocmask
176 common rt_sigpending sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32
178 common rt_sigqueueinfo sys_rt_sigqueueinfo
179 common rt_sigsuspend sys_rt_sigsuspend
180 common pread64 sys_pread64
@@ -242,7 +242,7 @@
232 common removexattr sys_removexattr
233 common lremovexattr sys_lremovexattr
234 common fremovexattr sys_fremovexattr
-235 common futex sys_futex
+235 common futex sys_futex_time32
236 common sendfile64 sys_sendfile64
237 common mincore sys_mincore
238 common madvise sys_madvise
@@ -250,7 +250,7 @@
240 common readahead sys_readahead
241 common io_setup sys_io_setup
242 common io_destroy sys_io_destroy
-243 common io_getevents sys_io_getevents
+243 common io_getevents sys_io_getevents_time32
244 common io_submit sys_io_submit
245 common io_cancel sys_io_cancel
246 common fadvise64 sys_fadvise64
@@ -262,26 +262,26 @@
252 common remap_file_pages sys_remap_file_pages
253 common set_tid_address sys_set_tid_address
254 common timer_create sys_timer_create
-255 common timer_settime sys_timer_settime
-256 common timer_gettime sys_timer_gettime
+255 common timer_settime sys_timer_settime32
+256 common timer_gettime sys_timer_gettime32
257 common timer_getoverrun sys_timer_getoverrun
258 common timer_delete sys_timer_delete
-259 common clock_settime sys_clock_settime
-260 common clock_gettime sys_clock_gettime
-261 common clock_getres sys_clock_getres
-262 common clock_nanosleep sys_clock_nanosleep
+259 common clock_settime sys_clock_settime32
+260 common clock_gettime sys_clock_gettime32
+261 common clock_getres sys_clock_getres_time32
+262 common clock_nanosleep sys_clock_nanosleep_time32
263 common statfs64 sys_statfs64
264 common fstatfs64 sys_fstatfs64
265 common tgkill sys_tgkill
-266 common utimes sys_utimes
+266 common utimes sys_utimes_time32
267 common fadvise64_64 sys_fadvise64_64
268 common mbind sys_mbind
269 common get_mempolicy sys_get_mempolicy
270 common set_mempolicy sys_set_mempolicy
271 common mq_open sys_mq_open
272 common mq_unlink sys_mq_unlink
-273 common mq_timedsend sys_mq_timedsend
-274 common mq_timedreceive sys_mq_timedreceive
+273 common mq_timedsend sys_mq_timedsend_time32
+274 common mq_timedreceive sys_mq_timedreceive_time32
275 common mq_notify sys_mq_notify
276 common mq_getsetattr sys_mq_getsetattr
277 common waitid sys_waitid
@@ -299,7 +299,7 @@
289 common mkdirat sys_mkdirat
290 common mknodat sys_mknodat
291 common fchownat sys_fchownat
-292 common futimesat sys_futimesat
+292 common futimesat sys_futimesat_time32
293 common fstatat64 sys_fstatat64
294 common unlinkat sys_unlinkat
295 common renameat sys_renameat
@@ -308,8 +308,8 @@
298 common readlinkat sys_readlinkat
299 common fchmodat sys_fchmodat
300 common faccessat sys_faccessat
-301 common pselect6 sys_pselect6
-302 common ppoll sys_ppoll
+301 common pselect6 sys_pselect6_time32
+302 common ppoll sys_ppoll_time32
303 common unshare sys_unshare
304 common set_robust_list sys_set_robust_list
305 common get_robust_list sys_get_robust_list
@@ -323,13 +323,13 @@
313 common kexec_load sys_kexec_load
314 common getcpu sys_getcpu
315 common epoll_pwait sys_epoll_pwait
-316 common utimensat sys_utimensat
+316 common utimensat sys_utimensat_time32
317 common signalfd sys_signalfd
318 common timerfd_create sys_timerfd_create
319 common eventfd sys_eventfd
320 common fallocate sys_fallocate
-321 common timerfd_settime sys_timerfd_settime
-322 common timerfd_gettime sys_timerfd_gettime
+321 common timerfd_settime sys_timerfd_settime32
+322 common timerfd_gettime sys_timerfd_gettime32
323 common signalfd4 sys_signalfd4
324 common eventfd2 sys_eventfd2
325 common epoll_create1 sys_epoll_create1
@@ -349,7 +349,7 @@
339 common prlimit64 sys_prlimit64
340 common name_to_handle_at sys_name_to_handle_at
341 common open_by_handle_at sys_open_by_handle_at
-342 common clock_adjtime sys_clock_adjtime
+342 common clock_adjtime sys_clock_adjtime32
343 common syncfs sys_syncfs
344 common setns sys_setns
345 common process_vm_readv sys_process_vm_readv
@@ -378,7 +378,7 @@
368 common recvfrom sys_recvfrom
369 common recvmsg sys_recvmsg
370 common shutdown sys_shutdown
-371 common recvmmsg sys_recvmmsg
+371 common recvmmsg sys_recvmmsg_time32
372 common sendmmsg sys_sendmmsg
373 common userfaultfd sys_userfaultfd
374 common membarrier sys_membarrier
@@ -387,3 +387,39 @@
377 common preadv2 sys_preadv2
378 common pwritev2 sys_pwritev2
379 common statx sys_statx
+380 common seccomp sys_seccomp
+381 common pkey_mprotect sys_pkey_mprotect
+382 common pkey_alloc sys_pkey_alloc
+383 common pkey_free sys_pkey_free
+384 common rseq sys_rseq
+# room for arch specific calls
+393 common semget sys_semget
+394 common semctl sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl
+397 common shmat sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd
+401 common msgrcv sys_msgrcv
+402 common msgctl sys_msgctl
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 71c4735a31ee..90f4e9ca1276 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -36,8 +36,9 @@
static void (*rom_reset)(void);
+#if IS_ENABLED(CONFIG_NVRAM)
#ifdef CONFIG_ADB_CUDA
-static __u8 cuda_read_pram(int offset)
+static unsigned char cuda_pram_read_byte(int offset)
{
struct adb_request req;
@@ -49,7 +50,7 @@ static __u8 cuda_read_pram(int offset)
return req.reply[3];
}
-static void cuda_write_pram(int offset, __u8 data)
+static void cuda_pram_write_byte(unsigned char data, int offset)
{
struct adb_request req;
@@ -62,29 +63,29 @@ static void cuda_write_pram(int offset, __u8 data)
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
-static __u8 pmu_read_pram(int offset)
+static unsigned char pmu_pram_read_byte(int offset)
{
struct adb_request req;
- if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM,
- (offset >> 8) & 0xFF, offset & 0xFF) < 0)
+ if (pmu_request(&req, NULL, 3, PMU_READ_XPRAM,
+ offset & 0xFF, 1) < 0)
return 0;
- while (!req.complete)
- pmu_poll();
- return req.reply[3];
+ pmu_wait_complete(&req);
+
+ return req.reply[0];
}
-static void pmu_write_pram(int offset, __u8 data)
+static void pmu_pram_write_byte(unsigned char data, int offset)
{
struct adb_request req;
- if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM,
- (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
+ if (pmu_request(&req, NULL, 4, PMU_WRITE_XPRAM,
+ offset & 0xFF, 1, data) < 0)
return;
- while (!req.complete)
- pmu_poll();
+ pmu_wait_complete(&req);
}
#endif /* CONFIG_ADB_PMU */
+#endif /* CONFIG_NVRAM */
/*
* VIA PRAM/RTC access routines
@@ -93,7 +94,7 @@ static void pmu_write_pram(int offset, __u8 data)
* the RTC should be enabled.
*/
-static __u8 via_pram_readbyte(void)
+static __u8 via_rtc_recv(void)
{
int i, reg;
__u8 data;
@@ -120,7 +121,7 @@ static __u8 via_pram_readbyte(void)
return data;
}
-static void via_pram_writebyte(__u8 data)
+static void via_rtc_send(__u8 data)
{
int i, reg, bit;
@@ -137,6 +138,31 @@ static void via_pram_writebyte(__u8 data)
}
/*
+ * These values can be found in Inside Macintosh vol. III ch. 2
+ * which has a description of the RTC chip in the original Mac.
+ */
+
+#define RTC_FLG_READ BIT(7)
+#define RTC_FLG_WRITE_PROTECT BIT(7)
+#define RTC_CMD_READ(r) (RTC_FLG_READ | (r << 2))
+#define RTC_CMD_WRITE(r) (r << 2)
+#define RTC_REG_SECONDS_0 0
+#define RTC_REG_SECONDS_1 1
+#define RTC_REG_SECONDS_2 2
+#define RTC_REG_SECONDS_3 3
+#define RTC_REG_WRITE_PROTECT 13
+
+/*
+ * Inside Mac has no information about two-byte RTC commands but
+ * the MAME/MESS source code has the essentials.
+ */
+
+#define RTC_REG_XPRAM 14
+#define RTC_CMD_XPRAM_READ (RTC_CMD_READ(RTC_REG_XPRAM) << 8)
+#define RTC_CMD_XPRAM_WRITE (RTC_CMD_WRITE(RTC_REG_XPRAM) << 8)
+#define RTC_CMD_XPRAM_ARG(a) (((a & 0xE0) << 3) | ((a & 0x1F) << 2))
+
+/*
* Execute a VIA PRAM/RTC command. For read commands
* data should point to a one-byte buffer for the
* resulting data. For write commands it should point
@@ -145,29 +171,33 @@ static void via_pram_writebyte(__u8 data)
* This function disables all interrupts while running.
*/
-static void via_pram_command(int command, __u8 *data)
+static void via_rtc_command(int command, __u8 *data)
{
unsigned long flags;
int is_read;
local_irq_save(flags);
+ /* The least significant bits must be 0b01 according to Inside Mac */
+
+ command = (command & ~3) | 1;
+
/* Enable the RTC and make sure the strobe line is high */
via1[vBufB] = (via1[vBufB] | VIA1B_vRTCClk) & ~VIA1B_vRTCEnb;
if (command & 0xFF00) { /* extended (two-byte) command */
- via_pram_writebyte((command & 0xFF00) >> 8);
- via_pram_writebyte(command & 0xFF);
- is_read = command & 0x8000;
+ via_rtc_send((command & 0xFF00) >> 8);
+ via_rtc_send(command & 0xFF);
+ is_read = command & (RTC_FLG_READ << 8);
} else { /* one-byte command */
- via_pram_writebyte(command);
- is_read = command & 0x80;
+ via_rtc_send(command);
+ is_read = command & RTC_FLG_READ;
}
if (is_read) {
- *data = via_pram_readbyte();
+ *data = via_rtc_recv();
} else {
- via_pram_writebyte(*data);
+ via_rtc_send(*data);
}
/* All done, disable the RTC */
@@ -177,14 +207,30 @@ static void via_pram_command(int command, __u8 *data)
local_irq_restore(flags);
}
-static __u8 via_read_pram(int offset)
+#if IS_ENABLED(CONFIG_NVRAM)
+static unsigned char via_pram_read_byte(int offset)
{
- return 0;
+ unsigned char temp;
+
+ via_rtc_command(RTC_CMD_XPRAM_READ | RTC_CMD_XPRAM_ARG(offset), &temp);
+
+ return temp;
}
-static void via_write_pram(int offset, __u8 data)
+static void via_pram_write_byte(unsigned char data, int offset)
{
+ unsigned char temp;
+
+ temp = 0x55;
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
+
+ temp = data;
+ via_rtc_command(RTC_CMD_XPRAM_WRITE | RTC_CMD_XPRAM_ARG(offset), &temp);
+
+ temp = 0x55 | RTC_FLG_WRITE_PROTECT;
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
}
+#endif /* CONFIG_NVRAM */
/*
* Return the current time in seconds since January 1, 1904.
@@ -201,10 +247,10 @@ static time64_t via_read_time(void)
} result, last_result;
int count = 1;
- via_pram_command(0x81, &last_result.cdata[3]);
- via_pram_command(0x85, &last_result.cdata[2]);
- via_pram_command(0x89, &last_result.cdata[1]);
- via_pram_command(0x8D, &last_result.cdata[0]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0), &last_result.cdata[3]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1), &last_result.cdata[2]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2), &last_result.cdata[1]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3), &last_result.cdata[0]);
/*
* The NetBSD guys say to loop until you get the same reading
@@ -212,10 +258,14 @@ static time64_t via_read_time(void)
*/
while (1) {
- via_pram_command(0x81, &result.cdata[3]);
- via_pram_command(0x85, &result.cdata[2]);
- via_pram_command(0x89, &result.cdata[1]);
- via_pram_command(0x8D, &result.cdata[0]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0),
+ &result.cdata[3]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1),
+ &result.cdata[2]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2),
+ &result.cdata[1]);
+ via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3),
+ &result.cdata[0]);
if (result.idata == last_result.idata)
return (time64_t)result.idata - RTC_OFFSET;
@@ -254,18 +304,18 @@ static void via_set_rtc_time(struct rtc_time *tm)
/* Clear the write protect bit */
temp = 0x55;
- via_pram_command(0x35, &temp);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
data.idata = lower_32_bits(time + RTC_OFFSET);
- via_pram_command(0x01, &data.cdata[3]);
- via_pram_command(0x05, &data.cdata[2]);
- via_pram_command(0x09, &data.cdata[1]);
- via_pram_command(0x0D, &data.cdata[0]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_0), &data.cdata[3]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_1), &data.cdata[2]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_2), &data.cdata[1]);
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_3), &data.cdata[0]);
/* Set the write protect bit */
- temp = 0xD5;
- via_pram_command(0x35, &temp);
+ temp = 0x55 | RTC_FLG_WRITE_PROTECT;
+ via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp);
}
static void via_shutdown(void)
@@ -326,66 +376,58 @@ static void cuda_shutdown(void)
*-------------------------------------------------------------------
*/
-void mac_pram_read(int offset, __u8 *buffer, int len)
+#if IS_ENABLED(CONFIG_NVRAM)
+unsigned char mac_pram_read_byte(int addr)
{
- __u8 (*func)(int);
- int i;
-
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
case MAC_ADB_II:
case MAC_ADB_PB1:
- func = via_read_pram;
- break;
+ return via_pram_read_byte(addr);
#ifdef CONFIG_ADB_CUDA
case MAC_ADB_EGRET:
case MAC_ADB_CUDA:
- func = cuda_read_pram;
- break;
+ return cuda_pram_read_byte(addr);
#endif
#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
- func = pmu_read_pram;
- break;
+ return pmu_pram_read_byte(addr);
#endif
default:
- return;
- }
- for (i = 0 ; i < len ; i++) {
- buffer[i] = (*func)(offset++);
+ return 0xFF;
}
}
-void mac_pram_write(int offset, __u8 *buffer, int len)
+void mac_pram_write_byte(unsigned char val, int addr)
{
- void (*func)(int, __u8);
- int i;
-
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
case MAC_ADB_II:
case MAC_ADB_PB1:
- func = via_write_pram;
+ via_pram_write_byte(val, addr);
break;
#ifdef CONFIG_ADB_CUDA
case MAC_ADB_EGRET:
case MAC_ADB_CUDA:
- func = cuda_write_pram;
+ cuda_pram_write_byte(val, addr);
break;
#endif
#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
- func = pmu_write_pram;
+ pmu_pram_write_byte(val, addr);
break;
#endif
default:
- return;
- }
- for (i = 0 ; i < len ; i++) {
- (*func)(offset++, buffer[i]);
+ break;
}
}
+ssize_t mac_pram_get_size(void)
+{
+ return 256;
+}
+#endif /* CONFIG_NVRAM */
+
void mac_poweroff(void)
{
if (oss_present) {
@@ -410,9 +452,8 @@ void mac_poweroff(void)
void mac_reset(void)
{
- if (macintosh_config->adb_type == MAC_ADB_II) {
- unsigned long flags;
-
+ if (macintosh_config->adb_type == MAC_ADB_II &&
+ macintosh_config->ident != MAC_MODEL_SE30) {
/* need ROMBASE in booter */
/* indeed, plus need to MAP THE ROM !! */
@@ -422,17 +463,8 @@ void mac_reset(void)
/* works on some */
rom_reset = (void *) (mac_bi_data.rombase + 0xa);
- if (macintosh_config->ident == MAC_MODEL_SE30) {
- /*
- * MSch: Machines known to crash on ROM reset ...
- */
- } else {
- local_irq_save(flags);
-
- rom_reset();
-
- local_irq_restore(flags);
- }
+ local_irq_disable();
+ rom_reset();
#ifdef CONFIG_ADB_CUDA
} else if (macintosh_config->adb_type == MAC_ADB_EGRET ||
macintosh_config->adb_type == MAC_ADB_CUDA) {
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index acdabbeecfd2..0b0289459173 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -189,7 +189,6 @@ void __init via_init(void)
/*
* SE/30: disable video IRQ
- * XXX: testing for SE/30 VBL
*/
if (macintosh_config->ident == MAC_MODEL_SE30) {
@@ -197,13 +196,18 @@ void __init via_init(void)
via1[vBufB] |= 0x40;
}
- /*
- * Set the RTC bits to a known state: all lines to outputs and
- * RTC disabled (yes that's 0 to enable and 1 to disable).
- */
-
- via1[vDirB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData);
- via1[vBufB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk);
+ switch (macintosh_config->adb_type) {
+ case MAC_ADB_IOP:
+ case MAC_ADB_II:
+ case MAC_ADB_PB1:
+ /*
+ * Set the RTC bits to a known state: all lines to outputs and
+ * RTC disabled (yes that's 0 to enable and 1 to disable).
+ */
+ via1[vDirB] |= VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData;
+ via1[vBufB] |= VIA1B_vRTCEnb | VIA1B_vRTCClk;
+ break;
+ }
/* Everything below this point is VIA2/RBV only... */
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index b86a2e21693b..227c04fe60d2 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -51,7 +51,7 @@ void __init init_pointer_table(unsigned long ptable)
pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
/* unreserve the page so it's possible to free that page */
- PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
+ __ClearPageReserved(PD_PAGE(dp));
init_page_count(PD_PAGE(dp));
return;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 58aff2653d86..a51b965b3b82 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,5 +1,6 @@
config MICROBLAZE
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_NO_SWAP
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 9c7d1d25bf3d..791cc8d54d0a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += serial.h
+generic-y += shmparam.h
generic-y += syscalls.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index dbfea093a7c7..bff2a71c828a 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -42,7 +42,6 @@
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
# endif
-# define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(val) (current_thread_info()->addr_limit = (val))
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 9b7c2c4eaf12..d79d35ac6253 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -21,8 +21,8 @@
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_TIME32
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index a24d09e937dd..8ee3a8c18498 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -20,7 +20,7 @@
10 common unlink sys_unlink
11 common execve sys_execve
12 common chdir sys_chdir
-13 common time sys_time
+13 common time sys_time32
14 common mknod sys_mknod
15 common chmod sys_chmod
16 common lchown sys_lchown
@@ -32,12 +32,12 @@
22 common umount sys_oldumount
23 common setuid sys_setuid
24 common getuid sys_getuid
-25 common stime sys_stime
+25 common stime sys_stime32
26 common ptrace sys_ptrace
27 common alarm sys_alarm
28 common oldfstat sys_ni_syscall
29 common pause sys_pause
-30 common utime sys_utime
+30 common utime sys_utime32
31 common stty sys_ni_syscall
32 common gtty sys_ni_syscall
33 common access sys_access
@@ -131,7 +131,7 @@
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common modify_ldt sys_ni_syscall
-124 common adjtimex sys_adjtimex
+124 common adjtimex sys_adjtimex_time32
125 common mprotect sys_mprotect
126 common sigprocmask sys_sigprocmask
127 common create_module sys_ni_syscall
@@ -168,8 +168,8 @@
158 common sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
163 common mremap sys_mremap
164 common setresuid sys_setresuid
165 common getresuid sys_getresuid
@@ -184,7 +184,7 @@
174 common rt_sigaction sys_rt_sigaction
175 common rt_sigprocmask sys_rt_sigprocmask
176 common rt_sigpending sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32
178 common rt_sigqueueinfo sys_rt_sigqueueinfo
179 common rt_sigsuspend sys_rt_sigsuspend
180 common pread64 sys_pread64
@@ -247,14 +247,14 @@
237 common fremovexattr sys_fremovexattr
238 common tkill sys_tkill
239 common sendfile64 sys_sendfile64
-240 common futex sys_futex
+240 common futex sys_futex_time32
241 common sched_setaffinity sys_sched_setaffinity
242 common sched_getaffinity sys_sched_getaffinity
243 common set_thread_area sys_ni_syscall
244 common get_thread_area sys_ni_syscall
245 common io_setup sys_io_setup
246 common io_destroy sys_io_destroy
-247 common io_getevents sys_io_getevents
+247 common io_getevents sys_io_getevents_time32
248 common io_submit sys_io_submit
249 common io_cancel sys_io_cancel
250 common fadvise64 sys_fadvise64
@@ -267,18 +267,18 @@
257 common remap_file_pages sys_remap_file_pages
258 common set_tid_address sys_set_tid_address
259 common timer_create sys_timer_create
-260 common timer_settime sys_timer_settime
-261 common timer_gettime sys_timer_gettime
+260 common timer_settime sys_timer_settime32
+261 common timer_gettime sys_timer_gettime32
262 common timer_getoverrun sys_timer_getoverrun
263 common timer_delete sys_timer_delete
-264 common clock_settime sys_clock_settime
-265 common clock_gettime sys_clock_gettime
-266 common clock_getres sys_clock_getres
-267 common clock_nanosleep sys_clock_nanosleep
+264 common clock_settime sys_clock_settime32
+265 common clock_gettime sys_clock_gettime32
+266 common clock_getres sys_clock_getres_time32
+267 common clock_nanosleep sys_clock_nanosleep_time32
268 common statfs64 sys_statfs64
269 common fstatfs64 sys_fstatfs64
270 common tgkill sys_tgkill
-271 common utimes sys_utimes
+271 common utimes sys_utimes_time32
272 common fadvise64_64 sys_fadvise64_64
273 common vserver sys_ni_syscall
274 common mbind sys_mbind
@@ -286,8 +286,8 @@
276 common set_mempolicy sys_set_mempolicy
277 common mq_open sys_mq_open
278 common mq_unlink sys_mq_unlink
-279 common mq_timedsend sys_mq_timedsend
-280 common mq_timedreceive sys_mq_timedreceive
+279 common mq_timedsend sys_mq_timedsend_time32
+280 common mq_timedreceive sys_mq_timedreceive_time32
281 common mq_notify sys_mq_notify
282 common mq_getsetattr sys_mq_getsetattr
283 common kexec_load sys_kexec_load
@@ -306,7 +306,7 @@
296 common mkdirat sys_mkdirat
297 common mknodat sys_mknodat
298 common fchownat sys_fchownat
-299 common futimesat sys_futimesat
+299 common futimesat sys_futimesat_time32
300 common fstatat64 sys_fstatat64
301 common unlinkat sys_unlinkat
302 common renameat sys_renameat
@@ -315,8 +315,8 @@
305 common readlinkat sys_readlinkat
306 common fchmodat sys_fchmodat
307 common faccessat sys_faccessat
-308 common pselect6 sys_pselect6
-309 common ppoll sys_ppoll
+308 common pselect6 sys_pselect6_time32
+309 common ppoll sys_ppoll_time32
310 common unshare sys_unshare
311 common set_robust_list sys_set_robust_list
312 common get_robust_list sys_get_robust_list
@@ -327,23 +327,23 @@
317 common move_pages sys_move_pages
318 common getcpu sys_getcpu
319 common epoll_pwait sys_epoll_pwait
-320 common utimensat sys_utimensat
+320 common utimensat sys_utimensat_time32
321 common signalfd sys_signalfd
322 common timerfd_create sys_timerfd_create
323 common eventfd sys_eventfd
324 common fallocate sys_fallocate
-325 common semtimedop sys_semtimedop
-326 common timerfd_settime sys_timerfd_settime
-327 common timerfd_gettime sys_timerfd_gettime
-328 common semctl sys_semctl
+325 common semtimedop sys_semtimedop_time32
+326 common timerfd_settime sys_timerfd_settime32
+327 common timerfd_gettime sys_timerfd_gettime32
+328 common semctl sys_old_semctl
329 common semget sys_semget
330 common semop sys_semop
-331 common msgctl sys_msgctl
+331 common msgctl sys_old_msgctl
332 common msgget sys_msgget
333 common msgrcv sys_msgrcv
334 common msgsnd sys_msgsnd
335 common shmat sys_shmat
-336 common shmctl sys_shmctl
+336 common shmctl sys_old_shmctl
337 common shmdt sys_shmdt
338 common shmget sys_shmget
339 common signalfd4 sys_signalfd4
@@ -374,13 +374,13 @@
364 common pwritev sys_pwritev
365 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
366 common perf_event_open sys_perf_event_open
-367 common recvmmsg sys_recvmmsg
+367 common recvmmsg sys_recvmmsg_time32
368 common fanotify_init sys_fanotify_init
369 common fanotify_mark sys_fanotify_mark
370 common prlimit64 sys_prlimit64
371 common name_to_handle_at sys_name_to_handle_at
372 common open_by_handle_at sys_open_by_handle_at
-373 common clock_adjtime sys_clock_adjtime
+373 common clock_adjtime sys_clock_adjtime32
374 common syncfs sys_syncfs
375 common setns sys_setns
376 common sendmmsg sys_sendmmsg
@@ -406,5 +406,26 @@
396 common pkey_alloc sys_pkey_alloc
397 common pkey_free sys_pkey_free
398 common statx sys_statx
-399 common io_pgetevents sys_io_pgetevents
+399 common io_pgetevents sys_io_pgetevents_time32
400 common rseq sys_rseq
+# 401 and 402 are unused
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 787290781b8c..3d7f1153155f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2,6 +2,7 @@
config MIPS
bool
default y
+ select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
@@ -206,7 +207,6 @@ config ATH79
select COMMON_CLK
select CLKDEV_LOOKUP
select IRQ_MIPS_CPU
- select MIPS_MACHINE
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
@@ -391,7 +391,7 @@ config MACH_INGENIC
select GPIOLIB
select COMMON_CLK
select GENERIC_IRQ_CHIP
- select BUILTIN_DTB
+ select BUILTIN_DTB if MIPS_NO_APPENDED_DTB
select USE_OF
select LIBFDT
@@ -676,6 +676,7 @@ config SGI_IP27
select DEFAULT_SGI_PARTITION
select SYS_HAS_EARLY_PRINTK
select HAVE_PCI
+ select IRQ_MIPS_CPU
select NR_CPUS_DEFAULT_64
select SYS_HAS_CPU_R10000
select SYS_SUPPORTS_64BIT_KERNEL
@@ -1124,7 +1125,6 @@ config DMA_NONCOHERENT
bool
select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_SYNC_DMA_FOR_CPU
select NEED_DMA_MAP_STATE
select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_CACHE_SYNC
@@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT
please say 'N' here. If you want a high-performance kernel to run on
new Loongson 3 machines only, please say 'Y' here.
+config CPU_LOONGSON3_WORKAROUNDS
+ bool "Old Loongson 3 LLSC Workarounds"
+ default y if SMP
+ depends on CPU_LOONGSON3
+ help
+ Loongson 3 processors have the llsc issues which require workarounds.
+ Without workarounds the system may hang unexpectedly.
+
+ Newer Loongson 3 will fix these issues and no workarounds are needed.
+ The workarounds have no significant side effect on them but may
+ decrease the performance of the system so this option should be
+ disabled unless the kernel is intended to be run on old systems.
+
+ If unsure, please say Y.
+
config CPU_LOONGSON2E
bool "Loongson 2E"
depends on SYS_HAS_CPU_LOONGSON2E
@@ -1541,6 +1556,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
select CPU_SUPPORTS_MSA
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
@@ -1866,7 +1882,7 @@ config CPU_LOONGSON2
config CPU_LOONGSON1
bool
select CPU_MIPS32
- select CPU_MIPSR1
+ select CPU_MIPSR2
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
@@ -1928,9 +1944,11 @@ config SYS_HAS_CPU_MIPS32_R3_5
config SYS_HAS_CPU_MIPS32_R5
bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_MIPS32_R6
bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_MIPS64_R1
bool
@@ -1940,6 +1958,7 @@ config SYS_HAS_CPU_MIPS64_R2
config SYS_HAS_CPU_MIPS64_R6
bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_R3000
bool
@@ -1976,6 +1995,7 @@ config SYS_HAS_CPU_R8000
config SYS_HAS_CPU_R10000
bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
config SYS_HAS_CPU_RM7000
bool
@@ -2004,6 +2024,7 @@ config SYS_HAS_CPU_BMIPS4380
config SYS_HAS_CPU_BMIPS5000
bool
select SYS_HAS_CPU_BMIPS
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
config SYS_HAS_CPU_XLR
bool
@@ -3155,6 +3176,7 @@ config MIPS32_O32
config MIPS32_N32
bool "Kernel support for n32 binaries"
depends on 64BIT
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 5b174c3d0de3..8f4486c4415b 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -233,6 +233,8 @@ toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc)
cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC
toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp)
cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP
+toolchain-ginv := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mginv)
+cflags-$(toolchain-ginv) += -DTOOLCHAIN_SUPPORTS_GINV
#
# Firmware support
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index f09262e0a72f..10ff07b7721e 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -683,7 +683,7 @@ static int __init ar7_register_devices(void)
if (ar7_has_high_cpmac()) {
res = fixed_phy_add(PHY_POLL, cpmac_high.id,
- &fixed_phy_status, -1);
+ &fixed_phy_status);
if (!res) {
cpmac_get_mac(1, cpmac_high_data.dev_addr);
@@ -696,7 +696,7 @@ static int __init ar7_register_devices(void)
} else
cpmac_low_data.phy_mask = 0xffffffff;
- res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status, -1);
+ res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
if (!res) {
cpmac_get_mac(0, cpmac_low_data.dev_addr);
res = platform_device_register(&cpmac_low);
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index 191c3910eac5..7367416642cb 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -1,79 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
if ATH79
-menu "Atheros AR71XX/AR724X/AR913X machine selection"
-
-config ATH79_MACH_AP121
- bool "Atheros AP121 reference board"
- select SOC_AR933X
- select ATH79_DEV_GPIO_BUTTONS
- select ATH79_DEV_LEDS_GPIO
- select ATH79_DEV_SPI
- select ATH79_DEV_USB
- select ATH79_DEV_WMAC
- help
- Say 'Y' here if you want your kernel to support the
- Atheros AP121 reference board.
-
-config ATH79_MACH_AP136
- bool "Atheros AP136 reference board"
- select SOC_QCA955X
- select ATH79_DEV_GPIO_BUTTONS
- select ATH79_DEV_LEDS_GPIO
- select ATH79_DEV_SPI
- select ATH79_DEV_USB
- select ATH79_DEV_WMAC
- help
- Say 'Y' here if you want your kernel to support the
- Atheros AP136 reference board.
-
-config ATH79_MACH_AP81
- bool "Atheros AP81 reference board"
- select SOC_AR913X
- select ATH79_DEV_GPIO_BUTTONS
- select ATH79_DEV_LEDS_GPIO
- select ATH79_DEV_SPI
- select ATH79_DEV_USB
- select ATH79_DEV_WMAC
- help
- Say 'Y' here if you want your kernel to support the
- Atheros AP81 reference board.
-
-config ATH79_MACH_DB120
- bool "Atheros DB120 reference board"
- select SOC_AR934X
- select ATH79_DEV_GPIO_BUTTONS
- select ATH79_DEV_LEDS_GPIO
- select ATH79_DEV_SPI
- select ATH79_DEV_USB
- select ATH79_DEV_WMAC
- help
- Say 'Y' here if you want your kernel to support the
- Atheros DB120 reference board.
-
-config ATH79_MACH_PB44
- bool "Atheros PB44 reference board"
- select SOC_AR71XX
- select ATH79_DEV_GPIO_BUTTONS
- select ATH79_DEV_LEDS_GPIO
- select ATH79_DEV_SPI
- select ATH79_DEV_USB
- help
- Say 'Y' here if you want your kernel to support the
- Atheros PB44 reference board.
-
-config ATH79_MACH_UBNT_XM
- bool "Ubiquiti Networks XM (rev 1.0) board"
- select SOC_AR724X
- select ATH79_DEV_GPIO_BUTTONS
- select ATH79_DEV_LEDS_GPIO
- select ATH79_DEV_SPI
- help
- Say 'Y' here if you want your kernel to support the
- Ubiquiti Networks XM (rev 1.0) board.
-
-endmenu
-
config SOC_AR71XX
select HAVE_PCI
def_bool n
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index fcc382cfc770..e18d9a2ecf62 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -8,27 +8,6 @@
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
-obj-y := prom.o setup.o irq.o common.o clock.o
+obj-y := prom.o setup.o common.o clock.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_PCI) += pci.o
-
-#
-# Devices
-#
-obj-y += dev-common.o
-obj-$(CONFIG_ATH79_DEV_GPIO_BUTTONS) += dev-gpio-buttons.o
-obj-$(CONFIG_ATH79_DEV_LEDS_GPIO) += dev-leds-gpio.o
-obj-$(CONFIG_ATH79_DEV_SPI) += dev-spi.o
-obj-$(CONFIG_ATH79_DEV_USB) += dev-usb.o
-obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o
-
-#
-# Machines
-#
-obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o
-obj-$(CONFIG_ATH79_MACH_AP136) += mach-ap136.o
-obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o
-obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o
-obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
-obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index cf9158e3c2d9..d4ca97e2ec6c 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -26,7 +26,6 @@
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
-#include "machtypes.h"
#define AR71XX_BASE_FREQ 40000000
#define AR724X_BASE_FREQ 40000000
@@ -37,24 +36,63 @@ static struct clk_onecell_data clk_data = {
.clk_num = ARRAY_SIZE(clks),
};
-static struct clk *__init ath79_add_sys_clkdev(
- const char *id, unsigned long rate)
+static const char * const clk_names[ATH79_CLK_END] = {
+ [ATH79_CLK_CPU] = "cpu",
+ [ATH79_CLK_DDR] = "ddr",
+ [ATH79_CLK_AHB] = "ahb",
+ [ATH79_CLK_REF] = "ref",
+ [ATH79_CLK_MDIO] = "mdio",
+};
+
+static const char * __init ath79_clk_name(int type)
{
- struct clk *clk;
- int err;
+ BUG_ON(type >= ARRAY_SIZE(clk_names) || !clk_names[type]);
+ return clk_names[type];
+}
- clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate);
+static void __init __ath79_set_clk(int type, const char *name, struct clk *clk)
+{
if (IS_ERR(clk))
- panic("failed to allocate %s clock structure", id);
+ panic("failed to allocate %s clock structure", clk_names[type]);
- err = clk_register_clkdev(clk, id, NULL);
- if (err)
- panic("unable to register %s clock device", id);
+ clks[type] = clk;
+ clk_register_clkdev(clk, name, NULL);
+}
+static struct clk * __init ath79_set_clk(int type, unsigned long rate)
+{
+ const char *name = ath79_clk_name(type);
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, name, NULL, 0, rate);
+ __ath79_set_clk(type, name, clk);
return clk;
}
-static void __init ar71xx_clocks_init(void)
+static struct clk * __init ath79_set_ff_clk(int type, const char *parent,
+ unsigned int mult, unsigned int div)
+{
+ const char *name = ath79_clk_name(type);
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, name, parent, 0, mult, div);
+ __ath79_set_clk(type, name, clk);
+ return clk;
+}
+
+static unsigned long __init ath79_setup_ref_clk(unsigned long rate)
+{
+ struct clk *clk = clks[ATH79_CLK_REF];
+
+ if (clk)
+ rate = clk_get_rate(clk);
+ else
+ clk = ath79_set_clk(ATH79_CLK_REF, rate);
+
+ return rate;
+}
+
+static void __init ar71xx_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@@ -64,9 +102,9 @@ static void __init ar71xx_clocks_init(void)
u32 freq;
u32 div;
- ref_rate = AR71XX_BASE_FREQ;
+ ref_rate = ath79_setup_ref_clk(AR71XX_BASE_FREQ);
- pll = ath79_pll_rr(AR71XX_PLL_REG_CPU_CONFIG);
+ pll = __raw_readl(pll_base + AR71XX_PLL_REG_CPU_CONFIG);
div = ((pll >> AR71XX_PLL_FB_SHIFT) & AR71XX_PLL_FB_MASK) + 1;
freq = div * ref_rate;
@@ -80,31 +118,17 @@ static void __init ar71xx_clocks_init(void)
div = (((pll >> AR71XX_AHB_DIV_SHIFT) & AR71XX_AHB_DIV_MASK) + 1) * 2;
ahb_rate = cpu_rate / div;
- ath79_add_sys_clkdev("ref", ref_rate);
- clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
-
- clk_add_alias("wdt", NULL, "ahb", NULL);
- clk_add_alias("uart", NULL, "ahb", NULL);
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
-static struct clk * __init ath79_reg_ffclk(const char *name,
- const char *parent_name, unsigned int mult, unsigned int div)
+static void __init ar724x_clocks_init(void __iomem *pll_base)
{
- struct clk *clk;
-
- clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
- if (IS_ERR(clk))
- panic("failed to allocate %s clock structure", name);
-
- return clk;
-}
-
-static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base)
-{
- u32 pll;
u32 mult, div, ddr_div, ahb_div;
+ u32 pll;
+
+ ath79_setup_ref_clk(AR71XX_BASE_FREQ);
pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG);
@@ -114,30 +138,14 @@ static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base)
ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
- clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", mult, div);
- clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", mult, div * ddr_div);
- clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", mult, div * ahb_div);
-}
-
-static void __init ar724x_clocks_init(void)
-{
- struct clk *ref_clk;
-
- ref_clk = ath79_add_sys_clkdev("ref", AR724X_BASE_FREQ);
-
- ar724x_clk_init(ref_clk, ath79_pll_base);
-
- /* just make happy plat_time_init() from arch/mips/ath79/setup.c */
- clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
- clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
- clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
-
- clk_add_alias("wdt", NULL, "ahb", NULL);
- clk_add_alias("uart", NULL, "ahb", NULL);
+ ath79_set_ff_clk(ATH79_CLK_CPU, "ref", mult, div);
+ ath79_set_ff_clk(ATH79_CLK_DDR, "ref", mult, div * ddr_div);
+ ath79_set_ff_clk(ATH79_CLK_AHB, "ref", mult, div * ahb_div);
}
-static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
+static void __init ar933x_clocks_init(void __iomem *pll_base)
{
+ unsigned long ref_rate;
u32 clock_ctrl;
u32 ref_div;
u32 ninit_mul;
@@ -146,6 +154,15 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
u32 cpu_div;
u32 ddr_div;
u32 ahb_div;
+ u32 t;
+
+ t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
+ if (t & AR933X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = (40 * 1000 * 1000);
+ else
+ ref_rate = (25 * 1000 * 1000);
+
+ ath79_setup_ref_clk(ref_rate);
clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG);
if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) {
@@ -186,37 +203,12 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1;
}
- clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref",
- ninit_mul, ref_div * out_div * cpu_div);
- clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref",
- ninit_mul, ref_div * out_div * ddr_div);
- clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref",
- ninit_mul, ref_div * out_div * ahb_div);
-}
-
-static void __init ar933x_clocks_init(void)
-{
- struct clk *ref_clk;
- unsigned long ref_rate;
- u32 t;
-
- t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
- if (t & AR933X_BOOTSTRAP_REF_CLK_40)
- ref_rate = (40 * 1000 * 1000);
- else
- ref_rate = (25 * 1000 * 1000);
-
- ref_clk = ath79_add_sys_clkdev("ref", ref_rate);
-
- ar9330_clk_init(ref_clk, ath79_pll_base);
-
- /* just make happy plat_time_init() from arch/mips/ath79/setup.c */
- clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
- clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
- clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
-
- clk_add_alias("wdt", NULL, "ahb", NULL);
- clk_add_alias("uart", NULL, "ref", NULL);
+ ath79_set_ff_clk(ATH79_CLK_CPU, "ref", ninit_mul,
+ ref_div * out_div * cpu_div);
+ ath79_set_ff_clk(ATH79_CLK_DDR, "ref", ninit_mul,
+ ref_div * out_div * ddr_div);
+ ath79_set_ff_clk(ATH79_CLK_AHB, "ref", ninit_mul,
+ ref_div * out_div * ahb_div);
}
static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
@@ -239,7 +231,7 @@ static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
return ret;
}
-static void __init ar934x_clocks_init(void)
+static void __init ar934x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@@ -258,6 +250,8 @@ static void __init ar934x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG);
if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
@@ -270,7 +264,7 @@ static void __init ar934x_clocks_init(void)
AR934X_SRIF_DPLL1_REFDIV_MASK;
frac = 1 << 18;
} else {
- pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+ pll = __raw_readl(pll_base + AR934X_PLL_CPU_CONFIG_REG);
out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
@@ -297,7 +291,7 @@ static void __init ar934x_clocks_init(void)
AR934X_SRIF_DPLL1_REFDIV_MASK;
frac = 1 << 18;
} else {
- pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+ pll = __raw_readl(pll_base + AR934X_PLL_DDR_CONFIG_REG);
out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
@@ -312,7 +306,7 @@ static void __init ar934x_clocks_init(void)
ddr_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint,
nfrac, frac, out_div);
- clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+ clk_ctrl = __raw_readl(pll_base + AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
@@ -344,18 +338,18 @@ static void __init ar934x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
- ath79_add_sys_clkdev("ref", ref_rate);
- clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
- clk_add_alias("wdt", NULL, "ref", NULL);
- clk_add_alias("uart", NULL, "ref", NULL);
+ clk_ctrl = __raw_readl(pll_base + AR934X_PLL_SWITCH_CLOCK_CONTROL_REG);
+ if (clk_ctrl & AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL)
+ ath79_set_clk(ATH79_CLK_MDIO, 100 * 1000 * 1000);
iounmap(dpll_base);
}
-static void __init qca953x_clocks_init(void)
+static void __init qca953x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@@ -371,7 +365,9 @@ static void __init qca953x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
- pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG);
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
+ pll = __raw_readl(pll_base + QCA953X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
@@ -385,7 +381,7 @@ static void __init qca953x_clocks_init(void)
cpu_pll += frac * (ref_rate >> 6) / ref_div;
cpu_pll /= (1 << out_div);
- pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG);
+ pll = __raw_readl(pll_base + QCA953X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
@@ -399,7 +395,7 @@ static void __init qca953x_clocks_init(void)
ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4);
ddr_pll /= (1 << out_div);
- clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG);
+ clk_ctrl = __raw_readl(pll_base + QCA953X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
@@ -431,16 +427,12 @@ static void __init qca953x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
- ath79_add_sys_clkdev("ref", ref_rate);
- ath79_add_sys_clkdev("cpu", cpu_rate);
- ath79_add_sys_clkdev("ddr", ddr_rate);
- ath79_add_sys_clkdev("ahb", ahb_rate);
-
- clk_add_alias("wdt", NULL, "ref", NULL);
- clk_add_alias("uart", NULL, "ref", NULL);
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
-static void __init qca955x_clocks_init(void)
+static void __init qca955x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@@ -456,7 +448,9 @@ static void __init qca955x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
- pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG);
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
+ pll = __raw_readl(pll_base + QCA955X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
@@ -470,7 +464,7 @@ static void __init qca955x_clocks_init(void)
cpu_pll += frac * ref_rate / (ref_div * (1 << 6));
cpu_pll /= (1 << out_div);
- pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG);
+ pll = __raw_readl(pll_base + QCA955X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
@@ -484,7 +478,7 @@ static void __init qca955x_clocks_init(void)
ddr_pll += frac * ref_rate / (ref_div * (1 << 10));
ddr_pll /= (1 << out_div);
- clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG);
+ clk_ctrl = __raw_readl(pll_base + QCA955X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
@@ -516,16 +510,12 @@ static void __init qca955x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
- ath79_add_sys_clkdev("ref", ref_rate);
- clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
-
- clk_add_alias("wdt", NULL, "ref", NULL);
- clk_add_alias("uart", NULL, "ref", NULL);
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
-static void __init qca956x_clocks_init(void)
+static void __init qca956x_clocks_init(void __iomem *pll_base)
{
unsigned long ref_rate;
unsigned long cpu_rate;
@@ -551,13 +541,15 @@ static void __init qca956x_clocks_init(void)
else
ref_rate = 25 * 1000 * 1000;
- pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG);
+ ref_rate = ath79_setup_ref_clk(ref_rate);
+
+ pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG_REG);
out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
QCA956X_PLL_CPU_CONFIG_REFDIV_MASK;
- pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG);
+ pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG1_REG);
nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) &
QCA956X_PLL_CPU_CONFIG1_NINT_MASK;
hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) &
@@ -570,12 +562,12 @@ static void __init qca956x_clocks_init(void)
cpu_pll += (hfrac >> 13) * ref_rate / ref_div;
cpu_pll /= (1 << out_div);
- pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG);
+ pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG_REG);
out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK;
ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
QCA956X_PLL_DDR_CONFIG_REFDIV_MASK;
- pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG);
+ pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG1_REG);
nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) &
QCA956X_PLL_DDR_CONFIG1_NINT_MASK;
hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) &
@@ -588,7 +580,7 @@ static void __init qca956x_clocks_init(void)
ddr_pll += (hfrac >> 13) * ref_rate / ref_div;
ddr_pll /= (1 << out_div);
- clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG);
+ clk_ctrl = __raw_readl(pll_base + QCA956X_PLL_CLK_CTRL_REG);
postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
@@ -620,72 +612,19 @@ static void __init qca956x_clocks_init(void)
else
ahb_rate = cpu_pll / (postdiv + 1);
- ath79_add_sys_clkdev("ref", ref_rate);
- ath79_add_sys_clkdev("cpu", cpu_rate);
- ath79_add_sys_clkdev("ddr", ddr_rate);
- ath79_add_sys_clkdev("ahb", ahb_rate);
-
- clk_add_alias("wdt", NULL, "ref", NULL);
- clk_add_alias("uart", NULL, "ref", NULL);
-}
-
-void __init ath79_clocks_init(void)
-{
- if (soc_is_ar71xx())
- ar71xx_clocks_init();
- else if (soc_is_ar724x() || soc_is_ar913x())
- ar724x_clocks_init();
- else if (soc_is_ar933x())
- ar933x_clocks_init();
- else if (soc_is_ar934x())
- ar934x_clocks_init();
- else if (soc_is_qca953x())
- qca953x_clocks_init();
- else if (soc_is_qca955x())
- qca955x_clocks_init();
- else if (soc_is_qca956x() || soc_is_tp9343())
- qca956x_clocks_init();
- else
- BUG();
-}
-
-unsigned long __init
-ath79_get_sys_clk_rate(const char *id)
-{
- struct clk *clk;
- unsigned long rate;
-
- clk = clk_get(NULL, id);
- if (IS_ERR(clk))
- panic("unable to get %s clock, err=%d", id, (int) PTR_ERR(clk));
-
- rate = clk_get_rate(clk);
- clk_put(clk);
-
- return rate;
+ ath79_set_clk(ATH79_CLK_CPU, cpu_rate);
+ ath79_set_clk(ATH79_CLK_DDR, ddr_rate);
+ ath79_set_clk(ATH79_CLK_AHB, ahb_rate);
}
-#ifdef CONFIG_OF
static void __init ath79_clocks_init_dt(struct device_node *np)
{
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-
-CLK_OF_DECLARE(ar7100, "qca,ar7100-pll", ath79_clocks_init_dt);
-CLK_OF_DECLARE(ar7240, "qca,ar7240-pll", ath79_clocks_init_dt);
-CLK_OF_DECLARE(ar9340, "qca,ar9340-pll", ath79_clocks_init_dt);
-CLK_OF_DECLARE(ar9550, "qca,qca9550-pll", ath79_clocks_init_dt);
-
-static void __init ath79_clocks_init_dt_ng(struct device_node *np)
-{
struct clk *ref_clk;
void __iomem *pll_base;
ref_clk = of_clk_get(np, 0);
- if (IS_ERR(ref_clk)) {
- pr_err("%pOF: of_clk_get failed\n", np);
- goto err;
- }
+ if (!IS_ERR(ref_clk))
+ clks[ATH79_CLK_REF] = ref_clk;
pll_base = of_iomap(np, 0);
if (!pll_base) {
@@ -693,14 +632,24 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np)
goto err_clk;
}
- if (of_device_is_compatible(np, "qca,ar9130-pll"))
- ar724x_clk_init(ref_clk, pll_base);
+ if (of_device_is_compatible(np, "qca,ar7100-pll"))
+ ar71xx_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,ar7240-pll") ||
+ of_device_is_compatible(np, "qca,ar9130-pll"))
+ ar724x_clocks_init(pll_base);
else if (of_device_is_compatible(np, "qca,ar9330-pll"))
- ar9330_clk_init(ref_clk, pll_base);
- else {
- pr_err("%pOF: could not find any appropriate clk_init()\n", np);
- goto err_iounmap;
- }
+ ar933x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,ar9340-pll"))
+ ar934x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,qca9530-pll"))
+ qca953x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,qca9550-pll"))
+ qca955x_clocks_init(pll_base);
+ else if (of_device_is_compatible(np, "qca,qca9560-pll"))
+ qca956x_clocks_init(pll_base);
+
+ if (!clks[ATH79_CLK_MDIO])
+ clks[ATH79_CLK_MDIO] = clks[ATH79_CLK_REF];
if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) {
pr_err("%pOF: could not register clk provider\n", np);
@@ -714,10 +663,13 @@ err_iounmap:
err_clk:
clk_put(ref_clk);
-
-err:
- return;
}
-CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt_ng);
-CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt_ng);
-#endif
+
+CLK_OF_DECLARE(ar7100_clk, "qca,ar7100-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar7240_clk, "qca,ar7240-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9340_clk, "qca,ar9340-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9530_clk, "qca,qca9530-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9550_clk, "qca,qca9550-pll", ath79_clocks_init_dt);
+CLK_OF_DECLARE(ar9560_clk, "qca,qca9560-pll", ath79_clocks_init_dt);
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
index 870c6b2e97e8..25b96f59e8e8 100644
--- a/arch/mips/ath79/common.h
+++ b/arch/mips/ath79/common.h
@@ -19,11 +19,6 @@
#define ATH79_MEM_SIZE_MIN (2 * 1024 * 1024)
#define ATH79_MEM_SIZE_MAX (256 * 1024 * 1024)
-void ath79_clocks_init(void);
-unsigned long ath79_get_sys_clk_rate(const char *id);
-
void ath79_ddr_ctrl_init(void);
-void ath79_gpio_init(void);
-
#endif /* __ATH79_COMMON_H */
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
deleted file mode 100644
index 9d0172a4dc69..000000000000
--- a/arch/mips/ath79/dev-common.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X common devices
- *
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * Parts of this file are based on Atheros' 2.6.15 BSP
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/gpio-ath79.h>
-#include <linux/serial_8250.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include "common.h"
-#include "dev-common.h"
-
-static struct resource ath79_uart_resources[] = {
- {
- .start = AR71XX_UART_BASE,
- .end = AR71XX_UART_BASE + AR71XX_UART_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-#define AR71XX_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP)
-static struct plat_serial8250_port ath79_uart_data[] = {
- {
- .mapbase = AR71XX_UART_BASE,
- .irq = ATH79_MISC_IRQ(3),
- .flags = AR71XX_UART_FLAGS,
- .iotype = UPIO_MEM32,
- .regshift = 2,
- }, {
- /* terminating entry */
- }
-};
-
-static struct platform_device ath79_uart_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .resource = ath79_uart_resources,
- .num_resources = ARRAY_SIZE(ath79_uart_resources),
- .dev = {
- .platform_data = ath79_uart_data
- },
-};
-
-static struct resource ar933x_uart_resources[] = {
- {
- .start = AR933X_UART_BASE,
- .end = AR933X_UART_BASE + AR71XX_UART_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = ATH79_MISC_IRQ(3),
- .end = ATH79_MISC_IRQ(3),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device ar933x_uart_device = {
- .name = "ar933x-uart",
- .id = -1,
- .resource = ar933x_uart_resources,
- .num_resources = ARRAY_SIZE(ar933x_uart_resources),
-};
-
-void __init ath79_register_uart(void)
-{
- unsigned long uart_clk_rate;
-
- uart_clk_rate = ath79_get_sys_clk_rate("uart");
-
- if (soc_is_ar71xx() ||
- soc_is_ar724x() ||
- soc_is_ar913x() ||
- soc_is_ar934x() ||
- soc_is_qca955x()) {
- ath79_uart_data[0].uartclk = uart_clk_rate;
- platform_device_register(&ath79_uart_device);
- } else if (soc_is_ar933x()) {
- platform_device_register(&ar933x_uart_device);
- } else {
- BUG();
- }
-}
-
-void __init ath79_register_wdt(void)
-{
- struct resource res;
-
- memset(&res, 0, sizeof(res));
-
- res.flags = IORESOURCE_MEM;
- res.start = AR71XX_RESET_BASE + AR71XX_RESET_REG_WDOG_CTRL;
- res.end = res.start + 0x8 - 1;
-
- platform_device_register_simple("ath79-wdt", -1, &res, 1);
-}
-
-static struct ath79_gpio_platform_data ath79_gpio_pdata;
-
-static struct resource ath79_gpio_resources[] = {
- {
- .flags = IORESOURCE_MEM,
- .start = AR71XX_GPIO_BASE,
- .end = AR71XX_GPIO_BASE + AR71XX_GPIO_SIZE - 1,
- },
- {
- .start = ATH79_MISC_IRQ(2),
- .end = ATH79_MISC_IRQ(2),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device ath79_gpio_device = {
- .name = "ath79-gpio",
- .id = -1,
- .resource = ath79_gpio_resources,
- .num_resources = ARRAY_SIZE(ath79_gpio_resources),
- .dev = {
- .platform_data = &ath79_gpio_pdata
- },
-};
-
-void __init ath79_gpio_init(void)
-{
- if (soc_is_ar71xx()) {
- ath79_gpio_pdata.ngpios = AR71XX_GPIO_COUNT;
- } else if (soc_is_ar7240()) {
- ath79_gpio_pdata.ngpios = AR7240_GPIO_COUNT;
- } else if (soc_is_ar7241() || soc_is_ar7242()) {
- ath79_gpio_pdata.ngpios = AR7241_GPIO_COUNT;
- } else if (soc_is_ar913x()) {
- ath79_gpio_pdata.ngpios = AR913X_GPIO_COUNT;
- } else if (soc_is_ar933x()) {
- ath79_gpio_pdata.ngpios = AR933X_GPIO_COUNT;
- } else if (soc_is_ar934x()) {
- ath79_gpio_pdata.ngpios = AR934X_GPIO_COUNT;
- ath79_gpio_pdata.oe_inverted = 1;
- } else if (soc_is_qca955x()) {
- ath79_gpio_pdata.ngpios = QCA955X_GPIO_COUNT;
- ath79_gpio_pdata.oe_inverted = 1;
- } else {
- BUG();
- }
-
- platform_device_register(&ath79_gpio_device);
-}
diff --git a/arch/mips/ath79/dev-common.h b/arch/mips/ath79/dev-common.h
deleted file mode 100644
index 0f514e1affce..000000000000
--- a/arch/mips/ath79/dev-common.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X common devices
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_DEV_COMMON_H
-#define _ATH79_DEV_COMMON_H
-
-void ath79_register_uart(void);
-void ath79_register_wdt(void);
-
-#endif /* _ATH79_DEV_COMMON_H */
diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c
deleted file mode 100644
index 366b35fb164d..000000000000
--- a/arch/mips/ath79/dev-gpio-buttons.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X GPIO button support
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include "linux/init.h"
-#include "linux/slab.h"
-#include <linux/platform_device.h>
-
-#include "dev-gpio-buttons.h"
-
-void __init ath79_register_gpio_keys_polled(int id,
- unsigned poll_interval,
- unsigned nbuttons,
- struct gpio_keys_button *buttons)
-{
- struct platform_device *pdev;
- struct gpio_keys_platform_data pdata;
- struct gpio_keys_button *p;
- int err;
-
- p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
-
- pdev = platform_device_alloc("gpio-keys-polled", id);
- if (!pdev)
- goto err_free_buttons;
-
- memset(&pdata, 0, sizeof(pdata));
- pdata.poll_interval = poll_interval;
- pdata.nbuttons = nbuttons;
- pdata.buttons = p;
-
- err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
- if (err)
- goto err_put_pdev;
-
- err = platform_device_add(pdev);
- if (err)
- goto err_put_pdev;
-
- return;
-
-err_put_pdev:
- platform_device_put(pdev);
-
-err_free_buttons:
- kfree(p);
-}
diff --git a/arch/mips/ath79/dev-gpio-buttons.h b/arch/mips/ath79/dev-gpio-buttons.h
deleted file mode 100644
index 481847ac1cba..000000000000
--- a/arch/mips/ath79/dev-gpio-buttons.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X GPIO button support
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_DEV_GPIO_BUTTONS_H
-#define _ATH79_DEV_GPIO_BUTTONS_H
-
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-
-void ath79_register_gpio_keys_polled(int id,
- unsigned poll_interval,
- unsigned nbuttons,
- struct gpio_keys_button *buttons);
-
-#endif /* _ATH79_DEV_GPIO_BUTTONS_H */
diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c
deleted file mode 100644
index dcb1debcefb8..000000000000
--- a/arch/mips/ath79/dev-leds-gpio.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X common GPIO LEDs support
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-
-#include "dev-leds-gpio.h"
-
-void __init ath79_register_leds_gpio(int id,
- unsigned num_leds,
- struct gpio_led *leds)
-{
- struct platform_device *pdev;
- struct gpio_led_platform_data pdata;
- struct gpio_led *p;
- int err;
-
- p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
-
- pdev = platform_device_alloc("leds-gpio", id);
- if (!pdev)
- goto err_free_leds;
-
- memset(&pdata, 0, sizeof(pdata));
- pdata.num_leds = num_leds;
- pdata.leds = p;
-
- err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
- if (err)
- goto err_put_pdev;
-
- err = platform_device_add(pdev);
- if (err)
- goto err_put_pdev;
-
- return;
-
-err_put_pdev:
- platform_device_put(pdev);
-
-err_free_leds:
- kfree(p);
-}
diff --git a/arch/mips/ath79/dev-leds-gpio.h b/arch/mips/ath79/dev-leds-gpio.h
deleted file mode 100644
index 6e5d8851ebcf..000000000000
--- a/arch/mips/ath79/dev-leds-gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X common GPIO LEDs support
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_DEV_LEDS_GPIO_H
-#define _ATH79_DEV_LEDS_GPIO_H
-
-#include <linux/leds.h>
-
-void ath79_register_leds_gpio(int id,
- unsigned num_leds,
- struct gpio_led *leds);
-
-#endif /* _ATH79_DEV_LEDS_GPIO_H */
diff --git a/arch/mips/ath79/dev-spi.c b/arch/mips/ath79/dev-spi.c
deleted file mode 100644
index aa30163efbfd..000000000000
--- a/arch/mips/ath79/dev-spi.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X SPI controller device
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include "dev-spi.h"
-
-static struct resource ath79_spi_resources[] = {
- {
- .start = AR71XX_SPI_BASE,
- .end = AR71XX_SPI_BASE + AR71XX_SPI_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device ath79_spi_device = {
- .name = "ath79-spi",
- .id = -1,
- .resource = ath79_spi_resources,
- .num_resources = ARRAY_SIZE(ath79_spi_resources),
-};
-
-void __init ath79_register_spi(struct ath79_spi_platform_data *pdata,
- struct spi_board_info const *info,
- unsigned n)
-{
- spi_register_board_info(info, n);
- ath79_spi_device.dev.platform_data = pdata;
- platform_device_register(&ath79_spi_device);
-}
diff --git a/arch/mips/ath79/dev-spi.h b/arch/mips/ath79/dev-spi.h
deleted file mode 100644
index d732565ca736..000000000000
--- a/arch/mips/ath79/dev-spi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X SPI controller device
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_DEV_SPI_H
-#define _ATH79_DEV_SPI_H
-
-#include <linux/spi/spi.h>
-#include <asm/mach-ath79/ath79_spi_platform.h>
-
-void ath79_register_spi(struct ath79_spi_platform_data *pdata,
- struct spi_board_info const *info,
- unsigned n);
-
-#endif /* _ATH79_DEV_SPI_H */
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
deleted file mode 100644
index 8227265bcc2d..000000000000
--- a/arch/mips/ath79/dev-usb.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Atheros AR7XXX/AR9XXX USB Host Controller device
- *
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * Parts of this file are based on Atheros' 2.6.15 BSP
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/usb/ehci_pdriver.h>
-#include <linux/usb/ohci_pdriver.h>
-
-#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include "common.h"
-#include "dev-usb.h"
-
-static u64 ath79_usb_dmamask = DMA_BIT_MASK(32);
-
-static struct usb_ohci_pdata ath79_ohci_pdata = {
-};
-
-static struct usb_ehci_pdata ath79_ehci_pdata_v1 = {
- .has_synopsys_hc_bug = 1,
-};
-
-static struct usb_ehci_pdata ath79_ehci_pdata_v2 = {
- .caps_offset = 0x100,
- .has_tt = 1,
-};
-
-static void __init ath79_usb_register(const char *name, int id,
- unsigned long base, unsigned long size,
- int irq, const void *data,
- size_t data_size)
-{
- struct resource res[2];
- struct platform_device *pdev;
-
- memset(res, 0, sizeof(res));
-
- res[0].flags = IORESOURCE_MEM;
- res[0].start = base;
- res[0].end = base + size - 1;
-
- res[1].flags = IORESOURCE_IRQ;
- res[1].start = irq;
- res[1].end = irq;
-
- pdev = platform_device_register_resndata(NULL, name, id,
- res, ARRAY_SIZE(res),
- data, data_size);
-
- if (IS_ERR(pdev)) {
- pr_err("ath79: unable to register USB at %08lx, err=%d\n",
- base, (int) PTR_ERR(pdev));
- return;
- }
-
- pdev->dev.dma_mask = &ath79_usb_dmamask;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-}
-
-#define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \
- AR71XX_RESET_USB_PHY | \
- AR71XX_RESET_USB_OHCI_DLL)
-
-static void __init ath79_usb_setup(void)
-{
- void __iomem *usb_ctrl_base;
-
- ath79_device_reset_set(AR71XX_USB_RESET_MASK);
- mdelay(1000);
- ath79_device_reset_clear(AR71XX_USB_RESET_MASK);
-
- usb_ctrl_base = ioremap(AR71XX_USB_CTRL_BASE, AR71XX_USB_CTRL_SIZE);
-
- /* Turning on the Buff and Desc swap bits */
- __raw_writel(0xf0000, usb_ctrl_base + AR71XX_USB_CTRL_REG_CONFIG);
-
- /* WAR for HW bug. Here it adjusts the duration between two SOFS */
- __raw_writel(0x20c00, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ);
-
- iounmap(usb_ctrl_base);
-
- mdelay(900);
-
- ath79_usb_register("ohci-platform", -1,
- AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE,
- ATH79_MISC_IRQ(6),
- &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
-
- ath79_usb_register("ehci-platform", -1,
- AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE,
- ATH79_CPU_IRQ(3),
- &ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1));
-}
-
-static void __init ar7240_usb_setup(void)
-{
- void __iomem *usb_ctrl_base;
-
- ath79_device_reset_clear(AR7240_RESET_OHCI_DLL);
- ath79_device_reset_set(AR7240_RESET_USB_HOST);
-
- mdelay(1000);
-
- ath79_device_reset_set(AR7240_RESET_OHCI_DLL);
- ath79_device_reset_clear(AR7240_RESET_USB_HOST);
-
- usb_ctrl_base = ioremap(AR7240_USB_CTRL_BASE, AR7240_USB_CTRL_SIZE);
-
- /* WAR for HW bug. Here it adjusts the duration between two SOFS */
- __raw_writel(0x3, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ);
-
- iounmap(usb_ctrl_base);
-
- ath79_usb_register("ohci-platform", -1,
- AR7240_OHCI_BASE, AR7240_OHCI_SIZE,
- ATH79_CPU_IRQ(3),
- &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
-}
-
-static void __init ar724x_usb_setup(void)
-{
- ath79_device_reset_set(AR724X_RESET_USBSUS_OVERRIDE);
- mdelay(10);
-
- ath79_device_reset_clear(AR724X_RESET_USB_HOST);
- mdelay(10);
-
- ath79_device_reset_clear(AR724X_RESET_USB_PHY);
- mdelay(10);
-
- ath79_usb_register("ehci-platform", -1,
- AR724X_EHCI_BASE, AR724X_EHCI_SIZE,
- ATH79_CPU_IRQ(3),
- &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
-}
-
-static void __init ar913x_usb_setup(void)
-{
- ath79_device_reset_set(AR913X_RESET_USBSUS_OVERRIDE);
- mdelay(10);
-
- ath79_device_reset_clear(AR913X_RESET_USB_HOST);
- mdelay(10);
-
- ath79_device_reset_clear(AR913X_RESET_USB_PHY);
- mdelay(10);
-
- ath79_usb_register("ehci-platform", -1,
- AR913X_EHCI_BASE, AR913X_EHCI_SIZE,
- ATH79_CPU_IRQ(3),
- &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
-}
-
-static void __init ar933x_usb_setup(void)
-{
- ath79_device_reset_set(AR933X_RESET_USBSUS_OVERRIDE);
- mdelay(10);
-
- ath79_device_reset_clear(AR933X_RESET_USB_HOST);
- mdelay(10);
-
- ath79_device_reset_clear(AR933X_RESET_USB_PHY);
- mdelay(10);
-
- ath79_usb_register("ehci-platform", -1,
- AR933X_EHCI_BASE, AR933X_EHCI_SIZE,
- ATH79_CPU_IRQ(3),
- &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
-}
-
-static void __init ar934x_usb_setup(void)
-{
- u32 bootstrap;
-
- bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
- if (bootstrap & AR934X_BOOTSTRAP_USB_MODE_DEVICE)
- return;
-
- ath79_device_reset_set(AR934X_RESET_USBSUS_OVERRIDE);
- udelay(1000);
-
- ath79_device_reset_clear(AR934X_RESET_USB_PHY);
- udelay(1000);
-
- ath79_device_reset_clear(AR934X_RESET_USB_PHY_ANALOG);
- udelay(1000);
-
- ath79_device_reset_clear(AR934X_RESET_USB_HOST);
- udelay(1000);
-
- ath79_usb_register("ehci-platform", -1,
- AR934X_EHCI_BASE, AR934X_EHCI_SIZE,
- ATH79_CPU_IRQ(3),
- &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
-}
-
-static void __init qca955x_usb_setup(void)
-{
- ath79_usb_register("ehci-platform", 0,
- QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE,
- ATH79_IP3_IRQ(0),
- &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
-
- ath79_usb_register("ehci-platform", 1,
- QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE,
- ATH79_IP3_IRQ(1),
- &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
-}
-
-void __init ath79_register_usb(void)
-{
- if (soc_is_ar71xx())
- ath79_usb_setup();
- else if (soc_is_ar7240())
- ar7240_usb_setup();
- else if (soc_is_ar7241() || soc_is_ar7242())
- ar724x_usb_setup();
- else if (soc_is_ar913x())
- ar913x_usb_setup();
- else if (soc_is_ar933x())
- ar933x_usb_setup();
- else if (soc_is_ar934x())
- ar934x_usb_setup();
- else if (soc_is_qca955x())
- qca955x_usb_setup();
- else
- BUG();
-}
diff --git a/arch/mips/ath79/dev-usb.h b/arch/mips/ath79/dev-usb.h
deleted file mode 100644
index 4b86a69ca080..000000000000
--- a/arch/mips/ath79/dev-usb.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X USB Host Controller support
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_DEV_USB_H
-#define _ATH79_DEV_USB_H
-
-void ath79_register_usb(void);
-
-#endif /* _ATH79_DEV_USB_H */
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
deleted file mode 100644
index da190b1b87ce..000000000000
--- a/arch/mips/ath79/dev-wmac.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Atheros AR913X/AR933X SoC built-in WMAC device support
- *
- * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * Parts of this file are based on Atheros 2.6.15/2.6.31 BSP
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/ath9k_platform.h>
-
-#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include "dev-wmac.h"
-
-static struct ath9k_platform_data ath79_wmac_data;
-
-static struct resource ath79_wmac_resources[] = {
- {
- /* .start and .end fields are filled dynamically */
- .flags = IORESOURCE_MEM,
- }, {
- /* .start and .end fields are filled dynamically */
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device ath79_wmac_device = {
- .name = "ath9k",
- .id = -1,
- .resource = ath79_wmac_resources,
- .num_resources = ARRAY_SIZE(ath79_wmac_resources),
- .dev = {
- .platform_data = &ath79_wmac_data,
- },
-};
-
-static void __init ar913x_wmac_setup(void)
-{
- /* reset the WMAC */
- ath79_device_reset_set(AR913X_RESET_AMBA2WMAC);
- mdelay(10);
-
- ath79_device_reset_clear(AR913X_RESET_AMBA2WMAC);
- mdelay(10);
-
- ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
- ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
- ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
-}
-
-
-static int ar933x_wmac_reset(void)
-{
- ath79_device_reset_set(AR933X_RESET_WMAC);
- ath79_device_reset_clear(AR933X_RESET_WMAC);
-
- return 0;
-}
-
-static int ar933x_r1_get_wmac_revision(void)
-{
- return ath79_soc_rev;
-}
-
-static void __init ar933x_wmac_setup(void)
-{
- u32 t;
-
- ar933x_wmac_reset();
-
- ath79_wmac_device.name = "ar933x_wmac";
-
- ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
- ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
- ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
-
- t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
- if (t & AR933X_BOOTSTRAP_REF_CLK_40)
- ath79_wmac_data.is_clk_25mhz = false;
- else
- ath79_wmac_data.is_clk_25mhz = true;
-
- if (ath79_soc_rev == 1)
- ath79_wmac_data.get_mac_revision = ar933x_r1_get_wmac_revision;
-
- ath79_wmac_data.external_reset = ar933x_wmac_reset;
-}
-
-static void ar934x_wmac_setup(void)
-{
- u32 t;
-
- ath79_wmac_device.name = "ar934x_wmac";
-
- ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
- ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
- ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
-
- t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
- if (t & AR934X_BOOTSTRAP_REF_CLK_40)
- ath79_wmac_data.is_clk_25mhz = false;
- else
- ath79_wmac_data.is_clk_25mhz = true;
-}
-
-static void qca955x_wmac_setup(void)
-{
- u32 t;
-
- ath79_wmac_device.name = "qca955x_wmac";
-
- ath79_wmac_resources[0].start = QCA955X_WMAC_BASE;
- ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
- ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
-
- t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
- if (t & QCA955X_BOOTSTRAP_REF_CLK_40)
- ath79_wmac_data.is_clk_25mhz = false;
- else
- ath79_wmac_data.is_clk_25mhz = true;
-}
-
-void __init ath79_register_wmac(u8 *cal_data)
-{
- if (soc_is_ar913x())
- ar913x_wmac_setup();
- else if (soc_is_ar933x())
- ar933x_wmac_setup();
- else if (soc_is_ar934x())
- ar934x_wmac_setup();
- else if (soc_is_qca955x())
- qca955x_wmac_setup();
- else
- BUG();
-
- if (cal_data)
- memcpy(ath79_wmac_data.eeprom_data, cal_data,
- sizeof(ath79_wmac_data.eeprom_data));
-
- platform_device_register(&ath79_wmac_device);
-}
diff --git a/arch/mips/ath79/dev-wmac.h b/arch/mips/ath79/dev-wmac.h
deleted file mode 100644
index c9cd8709f090..000000000000
--- a/arch/mips/ath79/dev-wmac.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Atheros AR913X/AR933X SoC built-in WMAC device support
- *
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_DEV_WMAC_H
-#define _ATH79_DEV_WMAC_H
-
-void ath79_register_wmac(u8 *cal_data);
-
-#endif /* _ATH79_DEV_WMAC_H */
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
deleted file mode 100644
index 2dfff1f19004..000000000000
--- a/arch/mips/ath79/irq.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Atheros AR71xx/AR724x/AR913x specific interrupt handling
- *
- * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irqchip.h>
-#include <linux/of_irq.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-
-#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include "common.h"
-#include "machtypes.h"
-
-
-static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
-{
- u32 status;
-
- status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
-
- if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
- ath79_ddr_wb_flush(3);
- generic_handle_irq(ATH79_IP2_IRQ(0));
- } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
- ath79_ddr_wb_flush(4);
- generic_handle_irq(ATH79_IP2_IRQ(1));
- } else {
- spurious_interrupt();
- }
-}
-
-static void ar934x_ip2_irq_init(void)
-{
- int i;
-
- for (i = ATH79_IP2_IRQ_BASE;
- i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
- irq_set_chip_and_handler(i, &dummy_irq_chip,
- handle_level_irq);
-
- irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
-}
-
-static void qca955x_ip2_irq_dispatch(struct irq_desc *desc)
-{
- u32 status;
-
- status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
- status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
-
- if (status == 0) {
- spurious_interrupt();
- return;
- }
-
- if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
- /* TODO: flush DDR? */
- generic_handle_irq(ATH79_IP2_IRQ(0));
- }
-
- if (status & QCA955X_EXT_INT_WMAC_ALL) {
- /* TODO: flush DDR? */
- generic_handle_irq(ATH79_IP2_IRQ(1));
- }
-}
-
-static void qca955x_ip3_irq_dispatch(struct irq_desc *desc)
-{
- u32 status;
-
- status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
- status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
- QCA955X_EXT_INT_USB1 |
- QCA955X_EXT_INT_USB2;
-
- if (status == 0) {
- spurious_interrupt();
- return;
- }
-
- if (status & QCA955X_EXT_INT_USB1) {
- /* TODO: flush DDR? */
- generic_handle_irq(ATH79_IP3_IRQ(0));
- }
-
- if (status & QCA955X_EXT_INT_USB2) {
- /* TODO: flush DDR? */
- generic_handle_irq(ATH79_IP3_IRQ(1));
- }
-
- if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) {
- /* TODO: flush DDR? */
- generic_handle_irq(ATH79_IP3_IRQ(2));
- }
-}
-
-static void qca955x_irq_init(void)
-{
- int i;
-
- for (i = ATH79_IP2_IRQ_BASE;
- i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
- irq_set_chip_and_handler(i, &dummy_irq_chip,
- handle_level_irq);
-
- irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch);
-
- for (i = ATH79_IP3_IRQ_BASE;
- i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++)
- irq_set_chip_and_handler(i, &dummy_irq_chip,
- handle_level_irq);
-
- irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
-}
-
-void __init arch_init_irq(void)
-{
- unsigned irq_wb_chan2 = -1;
- unsigned irq_wb_chan3 = -1;
- bool misc_is_ar71xx;
-
- if (mips_machtype == ATH79_MACH_GENERIC_OF) {
- irqchip_init();
- return;
- }
-
- if (soc_is_ar71xx() || soc_is_ar724x() ||
- soc_is_ar913x() || soc_is_ar933x()) {
- irq_wb_chan2 = 3;
- irq_wb_chan3 = 2;
- } else if (soc_is_ar934x()) {
- irq_wb_chan3 = 2;
- }
-
- ath79_cpu_irq_init(irq_wb_chan2, irq_wb_chan3);
-
- if (soc_is_ar71xx() || soc_is_ar913x())
- misc_is_ar71xx = true;
- else if (soc_is_ar724x() ||
- soc_is_ar933x() ||
- soc_is_ar934x() ||
- soc_is_qca955x())
- misc_is_ar71xx = false;
- else
- BUG();
- ath79_misc_irq_init(
- ath79_reset_base + AR71XX_RESET_REG_MISC_INT_STATUS,
- ATH79_CPU_IRQ(6), ATH79_MISC_IRQ_BASE, misc_is_ar71xx);
-
- if (soc_is_ar934x())
- ar934x_ip2_irq_init();
- else if (soc_is_qca955x())
- qca955x_irq_init();
-}
diff --git a/arch/mips/ath79/mach-ap121.c b/arch/mips/ath79/mach-ap121.c
deleted file mode 100644
index 1bf73f2a069d..000000000000
--- a/arch/mips/ath79/mach-ap121.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Atheros AP121 board support
- *
- * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include "machtypes.h"
-#include "dev-gpio-buttons.h"
-#include "dev-leds-gpio.h"
-#include "dev-spi.h"
-#include "dev-usb.h"
-#include "dev-wmac.h"
-
-#define AP121_GPIO_LED_WLAN 0
-#define AP121_GPIO_LED_USB 1
-
-#define AP121_GPIO_BTN_JUMPSTART 11
-#define AP121_GPIO_BTN_RESET 12
-
-#define AP121_KEYS_POLL_INTERVAL 20 /* msecs */
-#define AP121_KEYS_DEBOUNCE_INTERVAL (3 * AP121_KEYS_POLL_INTERVAL)
-
-#define AP121_CAL_DATA_ADDR 0x1fff1000
-
-static struct gpio_led ap121_leds_gpio[] __initdata = {
- {
- .name = "ap121:green:usb",
- .gpio = AP121_GPIO_LED_USB,
- .active_low = 0,
- },
- {
- .name = "ap121:green:wlan",
- .gpio = AP121_GPIO_LED_WLAN,
- .active_low = 0,
- },
-};
-
-static struct gpio_keys_button ap121_gpio_keys[] __initdata = {
- {
- .desc = "jumpstart button",
- .type = EV_KEY,
- .code = KEY_WPS_BUTTON,
- .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL,
- .gpio = AP121_GPIO_BTN_JUMPSTART,
- .active_low = 1,
- },
- {
- .desc = "reset button",
- .type = EV_KEY,
- .code = KEY_RESTART,
- .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL,
- .gpio = AP121_GPIO_BTN_RESET,
- .active_low = 1,
- }
-};
-
-static struct spi_board_info ap121_spi_info[] = {
- {
- .bus_num = 0,
- .chip_select = 0,
- .max_speed_hz = 25000000,
- .modalias = "mx25l1606e",
- }
-};
-
-static struct ath79_spi_platform_data ap121_spi_data = {
- .bus_num = 0,
- .num_chipselect = 1,
-};
-
-static void __init ap121_setup(void)
-{
- u8 *cal_data = (u8 *) KSEG1ADDR(AP121_CAL_DATA_ADDR);
-
- ath79_register_leds_gpio(-1, ARRAY_SIZE(ap121_leds_gpio),
- ap121_leds_gpio);
- ath79_register_gpio_keys_polled(-1, AP121_KEYS_POLL_INTERVAL,
- ARRAY_SIZE(ap121_gpio_keys),
- ap121_gpio_keys);
-
- ath79_register_spi(&ap121_spi_data, ap121_spi_info,
- ARRAY_SIZE(ap121_spi_info));
- ath79_register_usb();
- ath79_register_wmac(cal_data);
-}
-
-MIPS_MACHINE(ATH79_MACH_AP121, "AP121", "Atheros AP121 reference board",
- ap121_setup);
diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c
deleted file mode 100644
index 07eac58c3641..000000000000
--- a/arch/mips/ath79/mach-ap136.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Qualcomm Atheros AP136 reference board support
- *
- * Copyright (c) 2012 Qualcomm Atheros
- * Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/ath9k_platform.h>
-
-#include "machtypes.h"
-#include "dev-gpio-buttons.h"
-#include "dev-leds-gpio.h"
-#include "dev-spi.h"
-#include "dev-usb.h"
-#include "dev-wmac.h"
-#include "pci.h"
-
-#define AP136_GPIO_LED_STATUS_RED 14
-#define AP136_GPIO_LED_STATUS_GREEN 19
-#define AP136_GPIO_LED_USB 4
-#define AP136_GPIO_LED_WLAN_2G 13
-#define AP136_GPIO_LED_WLAN_5G 12
-#define AP136_GPIO_LED_WPS_RED 15
-#define AP136_GPIO_LED_WPS_GREEN 20
-
-#define AP136_GPIO_BTN_WPS 16
-#define AP136_GPIO_BTN_RFKILL 21
-
-#define AP136_KEYS_POLL_INTERVAL 20 /* msecs */
-#define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL)
-
-#define AP136_WMAC_CALDATA_OFFSET 0x1000
-#define AP136_PCIE_CALDATA_OFFSET 0x5000
-
-static struct gpio_led ap136_leds_gpio[] __initdata = {
- {
- .name = "qca:green:status",
- .gpio = AP136_GPIO_LED_STATUS_GREEN,
- .active_low = 1,
- },
- {
- .name = "qca:red:status",
- .gpio = AP136_GPIO_LED_STATUS_RED,
- .active_low = 1,
- },
- {
- .name = "qca:green:wps",
- .gpio = AP136_GPIO_LED_WPS_GREEN,
- .active_low = 1,
- },
- {
- .name = "qca:red:wps",
- .gpio = AP136_GPIO_LED_WPS_RED,
- .active_low = 1,
- },
- {
- .name = "qca:red:wlan-2g",
- .gpio = AP136_GPIO_LED_WLAN_2G,
- .active_low = 1,
- },
- {
- .name = "qca:red:usb",
- .gpio = AP136_GPIO_LED_USB,
- .active_low = 1,
- }
-};
-
-static struct gpio_keys_button ap136_gpio_keys[] __initdata = {
- {
- .desc = "WPS button",
- .type = EV_KEY,
- .code = KEY_WPS_BUTTON,
- .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
- .gpio = AP136_GPIO_BTN_WPS,
- .active_low = 1,
- },
- {
- .desc = "RFKILL button",
- .type = EV_KEY,
- .code = KEY_RFKILL,
- .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
- .gpio = AP136_GPIO_BTN_RFKILL,
- .active_low = 1,
- },
-};
-
-static struct spi_board_info ap136_spi_info[] = {
- {
- .bus_num = 0,
- .chip_select = 0,
- .max_speed_hz = 25000000,
- .modalias = "mx25l6405d",
- }
-};
-
-static struct ath79_spi_platform_data ap136_spi_data = {
- .bus_num = 0,
- .num_chipselect = 1,
-};
-
-#ifdef CONFIG_PCI
-static struct ath9k_platform_data ap136_ath9k_data;
-
-static int ap136_pci_plat_dev_init(struct pci_dev *dev)
-{
- if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0)
- dev->dev.platform_data = &ap136_ath9k_data;
-
- return 0;
-}
-
-static void __init ap136_pci_init(u8 *eeprom)
-{
- memcpy(ap136_ath9k_data.eeprom_data, eeprom,
- sizeof(ap136_ath9k_data.eeprom_data));
-
- ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init);
- ath79_register_pci();
-}
-#else
-static inline void ap136_pci_init(u8 *eeprom) {}
-#endif /* CONFIG_PCI */
-
-static void __init ap136_setup(void)
-{
- u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
-
- ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio),
- ap136_leds_gpio);
- ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL,
- ARRAY_SIZE(ap136_gpio_keys),
- ap136_gpio_keys);
- ath79_register_spi(&ap136_spi_data, ap136_spi_info,
- ARRAY_SIZE(ap136_spi_info));
- ath79_register_usb();
- ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET);
- ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET);
-}
-
-MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010",
- "Atheros AP136-010 reference board",
- ap136_setup);
diff --git a/arch/mips/ath79/mach-ap81.c b/arch/mips/ath79/mach-ap81.c
deleted file mode 100644
index 1c78d497f930..000000000000
--- a/arch/mips/ath79/mach-ap81.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Atheros AP81 board support
- *
- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include "machtypes.h"
-#include "dev-wmac.h"
-#include "dev-gpio-buttons.h"
-#include "dev-leds-gpio.h"
-#include "dev-spi.h"
-#include "dev-usb.h"
-
-#define AP81_GPIO_LED_STATUS 1
-#define AP81_GPIO_LED_AOSS 3
-#define AP81_GPIO_LED_WLAN 6
-#define AP81_GPIO_LED_POWER 14
-
-#define AP81_GPIO_BTN_SW4 12
-#define AP81_GPIO_BTN_SW1 21
-
-#define AP81_KEYS_POLL_INTERVAL 20 /* msecs */
-#define AP81_KEYS_DEBOUNCE_INTERVAL (3 * AP81_KEYS_POLL_INTERVAL)
-
-#define AP81_CAL_DATA_ADDR 0x1fff1000
-
-static struct gpio_led ap81_leds_gpio[] __initdata = {
- {
- .name = "ap81:green:status",
- .gpio = AP81_GPIO_LED_STATUS,
- .active_low = 1,
- }, {
- .name = "ap81:amber:aoss",
- .gpio = AP81_GPIO_LED_AOSS,
- .active_low = 1,
- }, {
- .name = "ap81:green:wlan",
- .gpio = AP81_GPIO_LED_WLAN,
- .active_low = 1,
- }, {
- .name = "ap81:green:power",
- .gpio = AP81_GPIO_LED_POWER,
- .active_low = 1,
- }
-};
-
-static struct gpio_keys_button ap81_gpio_keys[] __initdata = {
- {
- .desc = "sw1",
- .type = EV_KEY,
- .code = BTN_0,
- .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL,
- .gpio = AP81_GPIO_BTN_SW1,
- .active_low = 1,
- } , {
- .desc = "sw4",
- .type = EV_KEY,
- .code = BTN_1,
- .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL,
- .gpio = AP81_GPIO_BTN_SW4,
- .active_low = 1,
- }
-};
-
-static struct spi_board_info ap81_spi_info[] = {
- {
- .bus_num = 0,
- .chip_select = 0,
- .max_speed_hz = 25000000,
- .modalias = "m25p64",
- }
-};
-
-static struct ath79_spi_platform_data ap81_spi_data = {
- .bus_num = 0,
- .num_chipselect = 1,
-};
-
-static void __init ap81_setup(void)
-{
- u8 *cal_data = (u8 *) KSEG1ADDR(AP81_CAL_DATA_ADDR);
-
- ath79_register_leds_gpio(-1, ARRAY_SIZE(ap81_leds_gpio),
- ap81_leds_gpio);
- ath79_register_gpio_keys_polled(-1, AP81_KEYS_POLL_INTERVAL,
- ARRAY_SIZE(ap81_gpio_keys),
- ap81_gpio_keys);
- ath79_register_spi(&ap81_spi_data, ap81_spi_info,
- ARRAY_SIZE(ap81_spi_info));
- ath79_register_wmac(cal_data);
- ath79_register_usb();
-}
-
-MIPS_MACHINE(ATH79_MACH_AP81, "AP81", "Atheros AP81 reference board",
- ap81_setup);
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
deleted file mode 100644
index 9423f5aed287..000000000000
--- a/arch/mips/ath79/mach-db120.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Atheros DB120 reference board support
- *
- * Copyright (c) 2011 Qualcomm Atheros
- * Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/ath9k_platform.h>
-
-#include "machtypes.h"
-#include "dev-gpio-buttons.h"
-#include "dev-leds-gpio.h"
-#include "dev-spi.h"
-#include "dev-usb.h"
-#include "dev-wmac.h"
-#include "pci.h"
-
-#define DB120_GPIO_LED_WLAN_5G 12
-#define DB120_GPIO_LED_WLAN_2G 13
-#define DB120_GPIO_LED_STATUS 14
-#define DB120_GPIO_LED_WPS 15
-
-#define DB120_GPIO_BTN_WPS 16
-
-#define DB120_KEYS_POLL_INTERVAL 20 /* msecs */
-#define DB120_KEYS_DEBOUNCE_INTERVAL (3 * DB120_KEYS_POLL_INTERVAL)
-
-#define DB120_WMAC_CALDATA_OFFSET 0x1000
-#define DB120_PCIE_CALDATA_OFFSET 0x5000
-
-static struct gpio_led db120_leds_gpio[] __initdata = {
- {
- .name = "db120:green:status",
- .gpio = DB120_GPIO_LED_STATUS,
- .active_low = 1,
- },
- {
- .name = "db120:green:wps",
- .gpio = DB120_GPIO_LED_WPS,
- .active_low = 1,
- },
- {
- .name = "db120:green:wlan-5g",
- .gpio = DB120_GPIO_LED_WLAN_5G,
- .active_low = 1,
- },
- {
- .name = "db120:green:wlan-2g",
- .gpio = DB120_GPIO_LED_WLAN_2G,
- .active_low = 1,
- },
-};
-
-static struct gpio_keys_button db120_gpio_keys[] __initdata = {
- {
- .desc = "WPS button",
- .type = EV_KEY,
- .code = KEY_WPS_BUTTON,
- .debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL,
- .gpio = DB120_GPIO_BTN_WPS,
- .active_low = 1,
- },
-};
-
-static struct spi_board_info db120_spi_info[] = {
- {
- .bus_num = 0,
- .chip_select = 0,
- .max_speed_hz = 25000000,
- .modalias = "s25sl064a",
- }
-};
-
-static struct ath79_spi_platform_data db120_spi_data = {
- .bus_num = 0,
- .num_chipselect = 1,
-};
-
-#ifdef CONFIG_PCI
-static struct ath9k_platform_data db120_ath9k_data;
-
-static int db120_pci_plat_dev_init(struct pci_dev *dev)
-{
- switch (PCI_SLOT(dev->devfn)) {
- case 0:
- dev->dev.platform_data = &db120_ath9k_data;
- break;
- }
-
- return 0;
-}
-
-static void __init db120_pci_init(u8 *eeprom)
-{
- memcpy(db120_ath9k_data.eeprom_data, eeprom,
- sizeof(db120_ath9k_data.eeprom_data));
-
- ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init);
- ath79_register_pci();
-}
-#else
-static inline void db120_pci_init(u8 *eeprom) {}
-#endif /* CONFIG_PCI */
-
-static void __init db120_setup(void)
-{
- u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
-
- ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio),
- db120_leds_gpio);
- ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL,
- ARRAY_SIZE(db120_gpio_keys),
- db120_gpio_keys);
- ath79_register_spi(&db120_spi_data, db120_spi_info,
- ARRAY_SIZE(db120_spi_info));
- ath79_register_usb();
- ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET);
- db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET);
-}
-
-MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board",
- db120_setup);
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
deleted file mode 100644
index 75fb96ca61db..000000000000
--- a/arch/mips/ath79/mach-pb44.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Atheros PB44 reference board support
- *
- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/gpio/machine.h>
-#include <linux/platform_data/pcf857x.h>
-
-#include "machtypes.h"
-#include "dev-gpio-buttons.h"
-#include "dev-leds-gpio.h"
-#include "dev-spi.h"
-#include "dev-usb.h"
-#include "pci.h"
-
-#define PB44_GPIO_I2C_SCL 0
-#define PB44_GPIO_I2C_SDA 1
-
-#define PB44_GPIO_EXP_BASE 16
-#define PB44_GPIO_SW_RESET (PB44_GPIO_EXP_BASE + 6)
-#define PB44_GPIO_SW_JUMP (PB44_GPIO_EXP_BASE + 8)
-#define PB44_GPIO_LED_JUMP1 (PB44_GPIO_EXP_BASE + 9)
-#define PB44_GPIO_LED_JUMP2 (PB44_GPIO_EXP_BASE + 10)
-
-#define PB44_KEYS_POLL_INTERVAL 20 /* msecs */
-#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
-
-static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
- .dev_id = "i2c-gpio.0",
- .table = {
- GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
- NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SCL,
- NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- },
-};
-
-static struct platform_device pb44_i2c_gpio_device = {
- .name = "i2c-gpio",
- .id = 0,
- .dev = {
- .platform_data = NULL,
- }
-};
-
-static struct pcf857x_platform_data pb44_pcf857x_data = {
- .gpio_base = PB44_GPIO_EXP_BASE,
-};
-
-static struct i2c_board_info pb44_i2c_board_info[] __initdata = {
- {
- I2C_BOARD_INFO("pcf8575", 0x20),
- .platform_data = &pb44_pcf857x_data,
- },
-};
-
-static struct gpio_led pb44_leds_gpio[] __initdata = {
- {
- .name = "pb44:amber:jump1",
- .gpio = PB44_GPIO_LED_JUMP1,
- .active_low = 1,
- }, {
- .name = "pb44:green:jump2",
- .gpio = PB44_GPIO_LED_JUMP2,
- .active_low = 1,
- },
-};
-
-static struct gpio_keys_button pb44_gpio_keys[] __initdata = {
- {
- .desc = "soft_reset",
- .type = EV_KEY,
- .code = KEY_RESTART,
- .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL,
- .gpio = PB44_GPIO_SW_RESET,
- .active_low = 1,
- } , {
- .desc = "jumpstart",
- .type = EV_KEY,
- .code = KEY_WPS_BUTTON,
- .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL,
- .gpio = PB44_GPIO_SW_JUMP,
- .active_low = 1,
- }
-};
-
-static struct spi_board_info pb44_spi_info[] = {
- {
- .bus_num = 0,
- .chip_select = 0,
- .max_speed_hz = 25000000,
- .modalias = "m25p64",
- },
-};
-
-static struct ath79_spi_platform_data pb44_spi_data = {
- .bus_num = 0,
- .num_chipselect = 1,
-};
-
-static void __init pb44_init(void)
-{
- gpiod_add_lookup_table(&pb44_i2c_gpiod_table);
- i2c_register_board_info(0, pb44_i2c_board_info,
- ARRAY_SIZE(pb44_i2c_board_info));
- platform_device_register(&pb44_i2c_gpio_device);
-
- ath79_register_leds_gpio(-1, ARRAY_SIZE(pb44_leds_gpio),
- pb44_leds_gpio);
- ath79_register_gpio_keys_polled(-1, PB44_KEYS_POLL_INTERVAL,
- ARRAY_SIZE(pb44_gpio_keys),
- pb44_gpio_keys);
- ath79_register_spi(&pb44_spi_data, pb44_spi_info,
- ARRAY_SIZE(pb44_spi_info));
- ath79_register_usb();
- ath79_register_pci();
-}
-
-MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
- pb44_init);
diff --git a/arch/mips/ath79/mach-ubnt-xm.c b/arch/mips/ath79/mach-ubnt-xm.c
deleted file mode 100644
index 4a3c60694c75..000000000000
--- a/arch/mips/ath79/mach-ubnt-xm.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Ubiquiti Networks XM (rev 1.0) board support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- * Derived from: mach-pb44.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/ath9k_platform.h>
-
-#include <asm/mach-ath79/irq.h>
-
-#include "machtypes.h"
-#include "dev-gpio-buttons.h"
-#include "dev-leds-gpio.h"
-#include "dev-spi.h"
-#include "pci.h"
-
-#define UBNT_XM_GPIO_LED_L1 0
-#define UBNT_XM_GPIO_LED_L2 1
-#define UBNT_XM_GPIO_LED_L3 11
-#define UBNT_XM_GPIO_LED_L4 7
-
-#define UBNT_XM_GPIO_BTN_RESET 12
-
-#define UBNT_XM_KEYS_POLL_INTERVAL 20
-#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL)
-
-#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000)
-
-static struct gpio_led ubnt_xm_leds_gpio[] __initdata = {
- {
- .name = "ubnt-xm:red:link1",
- .gpio = UBNT_XM_GPIO_LED_L1,
- .active_low = 0,
- }, {
- .name = "ubnt-xm:orange:link2",
- .gpio = UBNT_XM_GPIO_LED_L2,
- .active_low = 0,
- }, {
- .name = "ubnt-xm:green:link3",
- .gpio = UBNT_XM_GPIO_LED_L3,
- .active_low = 0,
- }, {
- .name = "ubnt-xm:green:link4",
- .gpio = UBNT_XM_GPIO_LED_L4,
- .active_low = 0,
- },
-};
-
-static struct gpio_keys_button ubnt_xm_gpio_keys[] __initdata = {
- {
- .desc = "reset",
- .type = EV_KEY,
- .code = KEY_RESTART,
- .debounce_interval = UBNT_XM_KEYS_DEBOUNCE_INTERVAL,
- .gpio = UBNT_XM_GPIO_BTN_RESET,
- .active_low = 1,
- }
-};
-
-static struct spi_board_info ubnt_xm_spi_info[] = {
- {
- .bus_num = 0,
- .chip_select = 0,
- .max_speed_hz = 25000000,
- .modalias = "mx25l6405d",
- }
-};
-
-static struct ath79_spi_platform_data ubnt_xm_spi_data = {
- .bus_num = 0,
- .num_chipselect = 1,
-};
-
-#ifdef CONFIG_PCI
-static struct ath9k_platform_data ubnt_xm_eeprom_data;
-
-static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev)
-{
- switch (PCI_SLOT(dev->devfn)) {
- case 0:
- dev->dev.platform_data = &ubnt_xm_eeprom_data;
- break;
- }
-
- return 0;
-}
-
-static void __init ubnt_xm_pci_init(void)
-{
- memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
- sizeof(ubnt_xm_eeprom_data.eeprom_data));
-
- ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init);
- ath79_register_pci();
-}
-#else
-static inline void ubnt_xm_pci_init(void) {}
-#endif /* CONFIG_PCI */
-
-static void __init ubnt_xm_init(void)
-{
- ath79_register_leds_gpio(-1, ARRAY_SIZE(ubnt_xm_leds_gpio),
- ubnt_xm_leds_gpio);
-
- ath79_register_gpio_keys_polled(-1, UBNT_XM_KEYS_POLL_INTERVAL,
- ARRAY_SIZE(ubnt_xm_gpio_keys),
- ubnt_xm_gpio_keys);
-
- ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info,
- ARRAY_SIZE(ubnt_xm_spi_info));
-
- ubnt_xm_pci_init();
-}
-
-MIPS_MACHINE(ATH79_MACH_UBNT_XM,
- "UBNT-XM",
- "Ubiquiti Networks XM (rev 1.0) board",
- ubnt_xm_init);
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
deleted file mode 100644
index a13db3d15c8f..000000000000
--- a/arch/mips/ath79/machtypes.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Atheros AR71XX/AR724X/AR913X machine type definitions
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_MACHTYPE_H
-#define _ATH79_MACHTYPE_H
-
-#include <asm/mips_machine.h>
-
-enum ath79_mach_type {
- ATH79_MACH_GENERIC_OF = -1, /* Device tree board */
- ATH79_MACH_GENERIC = 0,
- ATH79_MACH_AP121, /* Atheros AP121 reference board */
- ATH79_MACH_AP136_010, /* Atheros AP136-010 reference board */
- ATH79_MACH_AP81, /* Atheros AP81 reference board */
- ATH79_MACH_DB120, /* Atheros DB120 reference board */
- ATH79_MACH_PB44, /* Atheros PB44 reference board */
- ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */
-};
-
-#endif /* _ATH79_MACHTYPE_H */
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
deleted file mode 100644
index b816cb4a25ff..000000000000
--- a/arch/mips/ath79/pci.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Atheros AR71XX/AR724X specific PCI setup code
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * Parts of this file are based on Atheros' 2.6.15 BSP
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/resource.h>
-#include <linux/platform_device.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/irq.h>
-#include "pci.h"
-
-static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
-static const struct ath79_pci_irq *ath79_pci_irq_map;
-static unsigned ath79_pci_nr_irqs;
-
-static const struct ath79_pci_irq ar71xx_pci_irq_map[] = {
- {
- .slot = 17,
- .pin = 1,
- .irq = ATH79_PCI_IRQ(0),
- }, {
- .slot = 18,
- .pin = 1,
- .irq = ATH79_PCI_IRQ(1),
- }, {
- .slot = 19,
- .pin = 1,
- .irq = ATH79_PCI_IRQ(2),
- }
-};
-
-static const struct ath79_pci_irq ar724x_pci_irq_map[] = {
- {
- .slot = 0,
- .pin = 1,
- .irq = ATH79_PCI_IRQ(0),
- }
-};
-
-static const struct ath79_pci_irq qca955x_pci_irq_map[] = {
- {
- .bus = 0,
- .slot = 0,
- .pin = 1,
- .irq = ATH79_PCI_IRQ(0),
- },
- {
- .bus = 1,
- .slot = 0,
- .pin = 1,
- .irq = ATH79_PCI_IRQ(1),
- },
-};
-
-int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
-{
- int irq = -1;
- int i;
-
- if (ath79_pci_nr_irqs == 0 ||
- ath79_pci_irq_map == NULL) {
- if (soc_is_ar71xx()) {
- ath79_pci_irq_map = ar71xx_pci_irq_map;
- ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map);
- } else if (soc_is_ar724x() ||
- soc_is_ar9342() ||
- soc_is_ar9344()) {
- ath79_pci_irq_map = ar724x_pci_irq_map;
- ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
- } else if (soc_is_qca955x()) {
- ath79_pci_irq_map = qca955x_pci_irq_map;
- ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map);
- } else {
- pr_crit("pci %s: invalid irq map\n",
- pci_name((struct pci_dev *) dev));
- return irq;
- }
- }
-
- for (i = 0; i < ath79_pci_nr_irqs; i++) {
- const struct ath79_pci_irq *entry;
-
- entry = &ath79_pci_irq_map[i];
- if (entry->bus == dev->bus->number &&
- entry->slot == slot &&
- entry->pin == pin) {
- irq = entry->irq;
- break;
- }
- }
-
- if (irq < 0)
- pr_crit("pci %s: no irq found for pin %u\n",
- pci_name((struct pci_dev *) dev), pin);
- else
- pr_info("pci %s: using irq %d for pin %u\n",
- pci_name((struct pci_dev *) dev), irq, pin);
-
- return irq;
-}
-
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- if (ath79_pci_plat_dev_init)
- return ath79_pci_plat_dev_init(dev);
-
- return 0;
-}
-
-void __init ath79_pci_set_irq_map(unsigned nr_irqs,
- const struct ath79_pci_irq *map)
-{
- ath79_pci_nr_irqs = nr_irqs;
- ath79_pci_irq_map = map;
-}
-
-void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
-{
- ath79_pci_plat_dev_init = func;
-}
-
-static struct platform_device *
-ath79_register_pci_ar71xx(void)
-{
- struct platform_device *pdev;
- struct resource res[4];
-
- memset(res, 0, sizeof(res));
-
- res[0].name = "cfg_base";
- res[0].flags = IORESOURCE_MEM;
- res[0].start = AR71XX_PCI_CFG_BASE;
- res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1;
-
- res[1].flags = IORESOURCE_IRQ;
- res[1].start = ATH79_CPU_IRQ(2);
- res[1].end = ATH79_CPU_IRQ(2);
-
- res[2].name = "io_base";
- res[2].flags = IORESOURCE_IO;
- res[2].start = 0;
- res[2].end = 0;
-
- res[3].name = "mem_base";
- res[3].flags = IORESOURCE_MEM;
- res[3].start = AR71XX_PCI_MEM_BASE;
- res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1;
-
- pdev = platform_device_register_simple("ar71xx-pci", -1,
- res, ARRAY_SIZE(res));
- return pdev;
-}
-
-static struct platform_device *
-ath79_register_pci_ar724x(int id,
- unsigned long cfg_base,
- unsigned long ctrl_base,
- unsigned long crp_base,
- unsigned long mem_base,
- unsigned long mem_size,
- unsigned long io_base,
- int irq)
-{
- struct platform_device *pdev;
- struct resource res[6];
-
- memset(res, 0, sizeof(res));
-
- res[0].name = "cfg_base";
- res[0].flags = IORESOURCE_MEM;
- res[0].start = cfg_base;
- res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1;
-
- res[1].name = "ctrl_base";
- res[1].flags = IORESOURCE_MEM;
- res[1].start = ctrl_base;
- res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1;
-
- res[2].flags = IORESOURCE_IRQ;
- res[2].start = irq;
- res[2].end = irq;
-
- res[3].name = "mem_base";
- res[3].flags = IORESOURCE_MEM;
- res[3].start = mem_base;
- res[3].end = mem_base + mem_size - 1;
-
- res[4].name = "io_base";
- res[4].flags = IORESOURCE_IO;
- res[4].start = io_base;
- res[4].end = io_base;
-
- res[5].name = "crp_base";
- res[5].flags = IORESOURCE_MEM;
- res[5].start = crp_base;
- res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1;
-
- pdev = platform_device_register_simple("ar724x-pci", id,
- res, ARRAY_SIZE(res));
- return pdev;
-}
-
-int __init ath79_register_pci(void)
-{
- struct platform_device *pdev = NULL;
-
- if (soc_is_ar71xx()) {
- pdev = ath79_register_pci_ar71xx();
- } else if (soc_is_ar724x()) {
- pdev = ath79_register_pci_ar724x(-1,
- AR724X_PCI_CFG_BASE,
- AR724X_PCI_CTRL_BASE,
- AR724X_PCI_CRP_BASE,
- AR724X_PCI_MEM_BASE,
- AR724X_PCI_MEM_SIZE,
- 0,
- ATH79_CPU_IRQ(2));
- } else if (soc_is_ar9342() ||
- soc_is_ar9344()) {
- u32 bootstrap;
-
- bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
- if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0)
- return -ENODEV;
-
- pdev = ath79_register_pci_ar724x(-1,
- AR724X_PCI_CFG_BASE,
- AR724X_PCI_CTRL_BASE,
- AR724X_PCI_CRP_BASE,
- AR724X_PCI_MEM_BASE,
- AR724X_PCI_MEM_SIZE,
- 0,
- ATH79_IP2_IRQ(0));
- } else if (soc_is_qca9558()) {
- pdev = ath79_register_pci_ar724x(0,
- QCA955X_PCI_CFG_BASE0,
- QCA955X_PCI_CTRL_BASE0,
- QCA955X_PCI_CRP_BASE0,
- QCA955X_PCI_MEM_BASE0,
- QCA955X_PCI_MEM_SIZE,
- 0,
- ATH79_IP2_IRQ(0));
-
- pdev = ath79_register_pci_ar724x(1,
- QCA955X_PCI_CFG_BASE1,
- QCA955X_PCI_CTRL_BASE1,
- QCA955X_PCI_CRP_BASE1,
- QCA955X_PCI_MEM_BASE1,
- QCA955X_PCI_MEM_SIZE,
- 1,
- ATH79_IP3_IRQ(2));
- } else {
- /* No PCI support */
- return -ENODEV;
- }
-
- if (!pdev)
- pr_err("unable to register PCI controller device\n");
-
- return pdev ? 0 : -ENODEV;
-}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
deleted file mode 100644
index 1d00a3803c37..000000000000
--- a/arch/mips/ath79/pci.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Atheros AR71XX/AR724X PCI support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef _ATH79_PCI_H
-#define _ATH79_PCI_H
-
-struct ath79_pci_irq {
- int bus;
- u8 slot;
- u8 pin;
- int irq;
-};
-
-#ifdef CONFIG_PCI
-void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map);
-void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev));
-int ath79_register_pci(void);
-#else
-static inline void
-ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {}
-static inline void
-ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {}
-static inline int ath79_register_pci(void) { return 0; }
-#endif
-
-#endif /* _ATH79_PCI_H */
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 9728abcb18fa..4a70c5de8c92 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_fdt.h>
+#include <linux/irqchip.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
@@ -31,8 +32,6 @@
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
-#include "dev-common.h"
-#include "machtypes.h"
#define ATH79_SYS_TYPE_LEN 64
@@ -235,25 +234,21 @@ void __init plat_mem_setup(void)
else if (fw_passed_dtb)
__dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
- if (mips_machtype != ATH79_MACH_GENERIC_OF) {
- ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
- AR71XX_RESET_SIZE);
- ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
- AR71XX_PLL_SIZE);
- ath79_detect_sys_type();
- ath79_ddr_ctrl_init();
+ ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
+ AR71XX_RESET_SIZE);
+ ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
+ AR71XX_PLL_SIZE);
+ ath79_detect_sys_type();
+ ath79_ddr_ctrl_init();
- detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
-
- /* OF machines should use the reset driver */
- _machine_restart = ath79_restart;
- }
+ detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
+ _machine_restart = ath79_restart;
_machine_halt = ath79_halt;
pm_power_off = ath79_halt;
}
-static void __init ath79_of_plat_time_init(void)
+void __init plat_time_init(void)
{
struct device_node *np;
struct clk *clk;
@@ -283,61 +278,12 @@ static void __init ath79_of_plat_time_init(void)
clk_put(clk);
}
-void __init plat_time_init(void)
-{
- unsigned long cpu_clk_rate;
- unsigned long ahb_clk_rate;
- unsigned long ddr_clk_rate;
- unsigned long ref_clk_rate;
-
- if (IS_ENABLED(CONFIG_OF) && mips_machtype == ATH79_MACH_GENERIC_OF) {
- ath79_of_plat_time_init();
- return;
- }
-
- ath79_clocks_init();
-
- cpu_clk_rate = ath79_get_sys_clk_rate("cpu");
- ahb_clk_rate = ath79_get_sys_clk_rate("ahb");
- ddr_clk_rate = ath79_get_sys_clk_rate("ddr");
- ref_clk_rate = ath79_get_sys_clk_rate("ref");
-
- pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n",
- cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000,
- ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000,
- ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000,
- ref_clk_rate / 1000000, (ref_clk_rate / 1000) % 1000);
-
- mips_hpt_frequency = cpu_clk_rate / 2;
-}
-
-static int __init ath79_setup(void)
+void __init arch_init_irq(void)
{
- if (mips_machtype == ATH79_MACH_GENERIC_OF)
- return 0;
-
- ath79_gpio_init();
- ath79_register_uart();
- ath79_register_wdt();
-
- mips_machine_setup();
-
- return 0;
+ irqchip_init();
}
-arch_initcall(ath79_setup);
-
void __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
}
-
-MIPS_MACHINE(ATH79_MACH_GENERIC,
- "Generic",
- "Generic AR71XX/AR724X/AR913X based board",
- NULL);
-
-MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
- "DTB",
- "Generic AR71XX/AR724X/AR913X based board (DT)",
- NULL);
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 977990a609ba..67b6a78d670b 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -147,7 +147,7 @@ bcm47xx_buttons_buffalo_whr_g125[] __initconst = {
static const struct gpio_keys_button
bcm47xx_buttons_buffalo_whr_g54s[] __initconst = {
BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
- BCM47XX_GPIO_KEY(4, KEY_RESTART),
+ BCM47XX_GPIO_KEY_H(4, KEY_RESTART),
BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
};
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index d85fcdac8bf0..167c42c71e79 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -152,11 +152,11 @@ bcm47xx_leds_buffalo_whr_g125[] __initconst = {
static const struct gpio_led
bcm47xx_leds_buffalo_whr_g54s[] __initconst = {
- BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
- BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
- BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
- BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
- BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "green", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "green", "internal", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(7, "red", "diag", 1, LEDS_GPIO_DEFSTATE_OFF),
};
static const struct gpio_led
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6054d49e608e..82627c264964 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
pm_power_off = bcm47xx_machine_halt;
}
+#ifdef CONFIG_BCM47XX_BCMA
+static struct device * __init bcm47xx_setup_device(void)
+{
+ struct device *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ err = dev_set_name(dev, "bcm47xx_soc");
+ if (err) {
+ pr_err("Failed to set SoC device name: %d\n", err);
+ kfree(dev);
+ return NULL;
+ }
+
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err)
+ pr_err("Failed to set SoC DMA mask: %d\n", err);
+
+ return dev;
+}
+#endif
+
/*
* This finishes bus initialization doing things that were not possible without
* kmalloc. Make sure to call it late enough (after mm_init).
@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
int err;
+ bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
+ if (!bcm47xx_bus.bcma.dev)
+ panic("Failed to setup SoC device\n");
+
err = bcma_host_soc_init(&bcm47xx_bus.bcma);
if (err)
panic("Failed to initialize BCMA bus (err %d)", err);
@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
+ if (device_register(bcm47xx_bus.bcma.dev))
+ pr_err("Failed to register SoC device\n");
bcma_bus_register(&bcm47xx_bus.bcma.bus);
break;
#endif
@@ -243,7 +274,7 @@ static int __init bcm47xx_register_bus_complete(void)
bcm47xx_leds_register();
bcm47xx_workarounds();
- fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status, -1);
+ fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
return 0;
}
device_initcall(bcm47xx_register_bus_complete);
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 07b4c65a88a4..8e73d65f3480 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = {
static int shared_device_registered;
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
static struct resource enet0_res[] = {
{
.start = -1, /* filled at runtime */
@@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = {
.resource = enet0_res,
.dev = {
.platform_data = &enet0_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = {
.resource = enet1_res,
.dev = {
.platform_data = &enet1_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = {
.resource = enetsw_res,
.dev = {
.platform_data = &enetsw_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
index 0fa3dd1819ff..dda0559cef50 100644
--- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
@@ -180,14 +180,28 @@
ethernet@0 {
phy-handle = <&phy2>;
cavium,alt-phy-handle = <&phy100>;
+ rx-delay = <0>;
+ tx-delay = <0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
ethernet@1 {
phy-handle = <&phy3>;
cavium,alt-phy-handle = <&phy101>;
+ rx-delay = <0>;
+ tx-delay = <0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
ethernet@2 {
phy-handle = <&phy4>;
cavium,alt-phy-handle = <&phy102>;
+ rx-delay = <0>;
+ tx-delay = <0>;
};
ethernet@3 {
compatible = "cavium,octeon-3860-pip-port";
diff --git a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
index 243e5dc444fb..962f37fbc7db 100644
--- a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
+++ b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
@@ -33,12 +33,18 @@
interface@0 {
ethernet@0 {
phy-handle = <&phy7>;
+ rx-delay = <0>;
+ tx-delay = <0x10>;
};
ethernet@1 {
phy-handle = <&phy6>;
+ rx-delay = <0>;
+ tx-delay = <0x10>;
};
ethernet@2 {
phy-handle = <&phy5>;
+ rx-delay = <0>;
+ tx-delay = <0x10>;
};
};
};
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 50cff3cbcc6d..4f7b1fa31cf5 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -76,7 +76,7 @@
status = "okay";
pinctrl-names = "default";
- pinctrl-0 = <&pins_uart2>;
+ pinctrl-0 = <&pins_uart3>;
};
&uart4 {
@@ -196,9 +196,9 @@
bias-disable;
};
- pins_uart2: uart2 {
- function = "uart2";
- groups = "uart2-data", "uart2-hwflow";
+ pins_uart3: uart3 {
+ function = "uart3";
+ groups = "uart3-data", "uart3-hwflow";
bias-disable;
};
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 6fb16fd24035..2beb78a62b7d 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -161,7 +161,7 @@
#dma-cells = <2>;
interrupt-parent = <&intc>;
- interrupts = <29>;
+ interrupts = <20>;
clocks = <&cgu JZ4740_CLK_DMA>;
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
index 2152b7ba65fb..cc8dbea0911f 100644
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -90,11 +90,11 @@
interrupts = <0>;
};
- axi_i2c: i2c@10A00000 {
+ axi_i2c: i2c@10a00000 {
compatible = "xlnx,xps-iic-2.00.a";
interrupt-parent = <&axi_intc>;
interrupts = <4>;
- reg = < 0x10A00000 0x10000 >;
+ reg = < 0x10a00000 0x10000 >;
clocks = <&ext>;
xlnx,clk-freq = <0x5f5e100>;
xlnx,family = "Artix7";
@@ -106,9 +106,9 @@
#address-cells = <1>;
#size-cells = <0>;
- ad7420@4B {
+ ad7420@4b {
compatible = "adi,adt7420";
- reg = <0x4B>;
+ reg = <0x4b>;
};
} ;
};
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index ab8362e04461..2e2d45bc850d 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -31,6 +31,7 @@
* network ports from the rest of the cvmx-helper files.
*/
+#include <linux/bug.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-bootinfo.h>
@@ -210,56 +211,18 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
+ WARN(!octeon_is_simulation(),
+ "Using deprecated link status - please update your DT");
+
/* Unless we fix it later, all links are defaulted to down */
result.u64 = 0;
- /*
- * This switch statement should handle all ports that either don't use
- * Marvell PHYS, or don't support in-band status.
- */
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_SIM:
+ if (octeon_is_simulation()) {
/* The simulator gives you a simulated 1Gbps full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
- case CVMX_BOARD_TYPE_EBH3100:
- case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 1) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
- /* Fall through to the generic code below */
- break;
- case CVMX_BOARD_TYPE_CUST_NB5:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 1) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
- break;
- case CVMX_BOARD_TYPE_BBGW_REF:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 2) {
- /* Port 2 is not hooked up */
- result.u64 = 0;
- return result;
- } else {
- /* Ports 0 and 1 connect to the switch */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
- break;
}
if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
@@ -358,45 +321,6 @@ int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
}
/**
- * Enable packet input/output from the hardware. This function is
- * called after by cvmx_helper_packet_hardware_enable() to
- * perform board specific initialization. For most boards
- * nothing is needed.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_board_hardware_enable(int interface)
-{
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) {
- if (interface == 0) {
- /* Different config for switch port */
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
- /*
- * Boards with gigabit WAN ports need a
- * different setting that is compatible with
- * 100 Mbit settings
- */
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface),
- 0xc);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface),
- 0xc);
- }
- } else if (cvmx_sysinfo_get()->board_type ==
- CVMX_BOARD_TYPE_UBNT_E100) {
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0);
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0x10);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(2, interface), 0);
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(2, interface), 0x10);
- }
- return 0;
-}
-
-/**
* Get the clock type used for the USB block based on board type.
* Used by the USB code for auto configuration of clock type.
*
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 38e0444e57e8..de391541d6f7 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -30,6 +30,7 @@
* Helper functions for common, but complicated tasks.
*
*/
+#include <linux/bug.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
@@ -43,7 +44,6 @@
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-pip-defs.h>
-#include <asm/octeon/cvmx-smix-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
/* Port count per interface */
@@ -317,22 +317,6 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
- if (interface == 0
- && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
- && cvmx_sysinfo_get()->board_rev_major == 1) {
- /*
- * Lie about interface type of CN3005 board. This
- * board has a switch on port 1 like the other
- * evaluation boards, but it is connected over RGMII
- * instead of GMII. Report GMII mode so that the
- * speed is forced to 1 Gbit full duplex. Other than
- * some initial configuration (which does not use the
- * output of this function) there is no difference in
- * setup between GMII and RGMII modes.
- */
- return CVMX_HELPER_INTERFACE_MODE_GMII;
- }
-
/* Interface 1 is always disabled on CN31XX and CN30XX */
if ((interface == 1)
&& (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
@@ -778,7 +762,6 @@ static int __cvmx_helper_packet_hardware_enable(int interface)
result = __cvmx_helper_loop_enable(interface);
break;
}
- result |= __cvmx_helper_board_hardware_enable(interface);
return result;
}
@@ -1026,7 +1009,6 @@ int cvmx_helper_initialize_packet_io_global(void)
int result = 0;
int interface;
union cvmx_l2c_cfg l2c_cfg;
- union cvmx_smix_en smix_en;
const int num_interfaces = cvmx_helper_get_number_of_interfaces();
/*
@@ -1046,24 +1028,6 @@ int cvmx_helper_initialize_packet_io_global(void)
l2c_cfg.s.rfb_arb_mode = 0;
cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
- /* Make sure SMI/MDIO is enabled so we can query PHYs */
- smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
- if (!smix_en.s.en) {
- smix_en.s.en = 1;
- cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
- }
-
- /* Newer chips actually have two SMI/MDIO interfaces */
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
- !OCTEON_IS_MODEL(OCTEON_CN58XX) &&
- !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
- if (!smix_en.s.en) {
- smix_en.s.en = 1;
- cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
- }
- }
-
cvmx_pko_initialize_global();
for (interface = 0; interface < num_interfaces; interface++) {
result |= cvmx_helper_interface_probe(interface);
@@ -1136,6 +1100,7 @@ cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
if (index == 0)
result = __cvmx_helper_rgmii_link_get(ipd_port);
else {
+ WARN(1, "Using deprecated link status - please update your DT");
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c
index 2d68a39f1443..13f6c7716b1e 100644
--- a/arch/mips/cavium-octeon/oct_ilm.c
+++ b/arch/mips/cavium-octeon/oct_ilm.c
@@ -63,31 +63,11 @@ static int reset_statistics(void *data, u64 value)
DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
-static int init_debufs(void)
+static void init_debugfs(void)
{
- struct dentry *show_dentry;
dir = debugfs_create_dir("oct_ilm", 0);
- if (!dir) {
- pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n");
- return -1;
- }
-
- show_dentry = debugfs_create_file("statistics", 0222, dir, NULL,
- &oct_ilm_ops);
- if (!show_dentry) {
- pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n");
- return -1;
- }
-
- show_dentry = debugfs_create_file("reset", 0222, dir, NULL,
- &reset_statistics_ops);
- if (!show_dentry) {
- pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n");
- return -1;
- }
-
- return 0;
-
+ debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_ops);
+ debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops);
}
static void init_latency_info(struct latency_info *li, int startup)
@@ -169,11 +149,7 @@ static __init int oct_ilm_module_init(void)
int rc;
int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
- rc = init_debufs();
- if (rc) {
- WARN(1, "Could not create debugfs entries");
- return rc;
- }
+ init_debugfs();
rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
"oct_ilm", 0);
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 1f9ba60f7375..51685f893eab 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -458,6 +458,23 @@ static bool __init octeon_has_88e1145(void)
!OCTEON_IS_MODEL(OCTEON_CN56XX);
}
+static bool __init octeon_has_fixed_link(int ipd_port)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+ case CVMX_BOARD_TYPE_CUST_NB5:
+ case CVMX_BOARD_TYPE_EBH3100:
+ /* Port 1 on these boards is always gigabit. */
+ return ipd_port == 1;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ /* Ports 0 and 1 connect to the switch. */
+ return ipd_port == 0 || ipd_port == 1;
+ }
+ return false;
+}
+
static void __init octeon_fdt_set_phy(int eth, int phy_addr)
{
const __be32 *phy_handle;
@@ -586,12 +603,52 @@ static void __init octeon_fdt_rm_ethernet(int node)
fdt_nop_node(initial_boot_params, node);
}
+static void __init _octeon_rx_tx_delay(int eth, int rx_delay, int tx_delay)
+{
+ fdt_setprop_inplace_cell(initial_boot_params, eth, "rx-delay",
+ rx_delay);
+ fdt_setprop_inplace_cell(initial_boot_params, eth, "tx-delay",
+ tx_delay);
+}
+
+static void __init octeon_rx_tx_delay(int eth, int iface, int port)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ if (iface == 0) {
+ if (port == 0) {
+ /*
+ * Boards with gigabit WAN ports need a
+ * different setting that is compatible with
+ * 100 Mbit settings
+ */
+ _octeon_rx_tx_delay(eth, 0xc, 0x0c);
+ return;
+ } else if (port == 1) {
+ /* Different config for switch port. */
+ _octeon_rx_tx_delay(eth, 0x0, 0x0);
+ return;
+ }
+ }
+ break;
+ case CVMX_BOARD_TYPE_UBNT_E100:
+ if (iface == 0 && port <= 2) {
+ _octeon_rx_tx_delay(eth, 0x0, 0x10);
+ return;
+ }
+ break;
+ }
+ fdt_nop_property(initial_boot_params, eth, "rx-delay");
+ fdt_nop_property(initial_boot_params, eth, "tx-delay");
+}
+
static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
{
char name_buffer[20];
int eth;
int phy_addr;
int ipd_port;
+ int fixed_link;
snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p);
eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer);
@@ -609,6 +666,13 @@ static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
octeon_fdt_set_phy(eth, phy_addr);
+
+ fixed_link = fdt_subnode_offset(initial_boot_params, eth, "fixed-link");
+ if (fixed_link < 0)
+ WARN_ON(octeon_has_fixed_link(ipd_port));
+ else if (!octeon_has_fixed_link(ipd_port))
+ fdt_nop_node(initial_boot_params, fixed_link);
+ octeon_rx_tx_delay(eth, i, p);
}
static void __init octeon_fdt_pip_iface(int pip, int idx)
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 2c79ab52977a..8bf43c5a7bc7 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored)
" sync \n"
" synci ($0) \n");
- relocated_kexec_smp_wait(NULL);
+ kexec_reboot();
}
#endif
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 4e4ec779f182..6f981af67826 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index c3cac29e8414..2bb02ea9fb4e 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_LANTIQ=y
CONFIG_PCI_LANTIQ=y
-CONFIG_XRX200_PHY_FW=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_MIPS_VPE_LOADER=y
CONFIG_NR_CPUS=2
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index f15d5db5dd67..87b86cdf126a 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -3,7 +3,6 @@ generated-y += syscall_table_32_o32.h
generated-y += syscall_table_64_n32.h
generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
-generic-(CONFIG_GENERIC_CSUM) += checksum.h
generic-y += current.h
generic-y += device.h
generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 43fcd35e2957..94096299fc56 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
if (kernel_uses_llsc) { \
int temp; \
\
+ loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
if (kernel_uses_llsc) { \
int temp; \
\
+ loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
if (kernel_uses_llsc) { \
int temp; \
\
+ loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \
long temp; \
\
+ loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \
long temp; \
\
+ loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \
long temp; \
\
+ loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index a5eb1bb199a7..b865e317a14f 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -105,6 +105,20 @@
*/
#define STYPE_SYNC_MB 0x10
+/*
+ * stype 0x14 - A completion barrier specific to global invalidations
+ *
+ * When a sync instruction of this type completes any preceding GINVI or GINVT
+ * operation has been globalized & completed on all coherent CPUs. Anything
+ * that the GINV* instruction should invalidate will have been invalidated on
+ * all coherent CPUs when this instruction completes. It is implementation
+ * specific whether the GINV* instructions themselves will ensure completion,
+ * or this sync type will.
+ *
+ * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3)
+ * this sync type also requires that previous SYNCI operations have completed.
+ */
+#define STYPE_GINV 0x14
#ifdef CONFIG_CPU_HAS_SYNC
#define __sync() \
@@ -222,6 +236,47 @@
#define __smp_mb__before_atomic() __smp_mb__before_llsc()
#define __smp_mb__after_atomic() smp_llsc_mb()
+/*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+ * store or pref) in between an ll & sc can cause the sc instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+ * 1) A memory access appearing prior to the ll in program order may actually
+ * be executed after the ll - this is the reordering case.
+ *
+ * In order to avoid this we need to place a memory barrier (ie. a sync
+ * instruction) prior to every ll instruction, in between it & any earlier
+ * memory access instructions. Many of these cases are already covered by
+ * smp_mb__before_llsc() but for the remaining cases, typically ones in
+ * which multiple CPUs may operate on a memory location but ordering is not
+ * usually guaranteed, we use loongson_llsc_mb() below.
+ *
+ * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+ * 2) If a conditional branch exists between an ll & sc with a target outside
+ * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
+ * or similar, then misprediction of the branch may allow speculative
+ * execution of memory accesses from outside of the ll-sc loop.
+ *
+ * In order to avoid this we need a memory barrier (ie. a sync instruction)
+ * at each affected branch target, for which we also use loongson_llsc_mb()
+ * defined below.
+ *
+ * This case affects all current Loongson 3 CPUs.
+ */
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
+#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#else
+#define loongson_llsc_mb() do { } while (0)
+#endif
+
+static inline void sync_ginv(void)
+{
+ asm volatile("sync\t%0" :: "i"(STYPE_GINV));
+}
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index c4675957b21b..830c93a010c3 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+ loongson_llsc_mb();
do {
__asm__ __volatile__(
" " __LL "%0, %1 # set_bit \n"
@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
+ loongson_llsc_mb();
do {
__asm__ __volatile__(
" .set push \n"
@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (~(1UL << bit)));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+ loongson_llsc_mb();
do {
__asm__ __volatile__(
" " __LL "%0, %1 # clear_bit \n"
@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
+ loongson_llsc_mb();
do {
__asm__ __volatile__(
" .set push \n"
@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+ loongson_llsc_mb();
do {
__asm__ __volatile__(
" .set push \n"
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 4812d1fed0c2..d687b40b9fbb 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -25,7 +25,6 @@
*
* MIPS specific flush operations:
*
- * - flush_cache_sigtramp() flush signal trampoline
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
* - __flush_icache_user_range(start, end) flushes range of user instructions
@@ -110,7 +109,6 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
-extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 638de0c25249..f345a873742d 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -36,6 +36,8 @@
*/
extern unsigned long __cmpxchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for cmpxchg");
+extern unsigned long __cmpxchg64_unsupported(void)
+ __compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false");
extern unsigned long __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg");
@@ -204,12 +206,102 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
cmpxchg((ptr), (o), (n)); \
})
#else
-#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#ifndef CONFIG_SMP
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
-#endif
-#endif
+
+# include <asm-generic/cmpxchg-local.h>
+# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+# ifdef CONFIG_SMP
+
+static inline unsigned long __cmpxchg64(volatile void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long tmp, ret;
+ unsigned long flags;
+
+ /*
+ * The assembly below has to combine 32 bit values into a 64 bit
+ * register, and split 64 bit values from one register into two. If we
+ * were to take an interrupt in the middle of this we'd only save the
+ * least significant 32 bits of each register & probably clobber the
+ * most significant 32 bits of the 64 bit values we're using. In order
+ * to avoid this we must disable interrupts.
+ */
+ local_irq_save(flags);
+
+ asm volatile(
+ " .set push \n"
+ " .set " MIPS_ISA_ARCH_LEVEL " \n"
+ /* Load 64 bits from ptr */
+ "1: lld %L0, %3 # __cmpxchg64 \n"
+ /*
+ * Split the 64 bit value we loaded into the 2 registers that hold the
+ * ret variable.
+ */
+ " dsra %M0, %L0, 32 \n"
+ " sll %L0, %L0, 0 \n"
+ /*
+ * Compare ret against old, breaking out of the loop if they don't
+ * match.
+ */
+ " bne %M0, %M4, 2f \n"
+ " bne %L0, %L4, 2f \n"
+ /*
+ * Combine the 32 bit halves from the 2 registers that hold the new
+ * variable into a single 64 bit register.
+ */
+# if MIPS_ISA_REV >= 2
+ " move %L1, %L5 \n"
+ " dins %L1, %M5, 32, 32 \n"
+# else
+ " dsll %L1, %L5, 32 \n"
+ " dsrl %L1, %L1, 32 \n"
+ " .set noat \n"
+ " dsll $at, %M5, 32 \n"
+ " or %L1, %L1, $at \n"
+ " .set at \n"
+# endif
+ /* Attempt to store new at ptr */
+ " scd %L1, %2 \n"
+ /* If we failed, loop! */
+ "\t" __scbeqz " %L1, 1b \n"
+ " .set pop \n"
+ "2: \n"
+ : "=&r"(ret),
+ "=&r"(tmp),
+ "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
+ : GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr),
+ "r" (old),
+ "r" (new)
+ : "memory");
+
+ local_irq_restore(flags);
+ return ret;
+}
+
+# define cmpxchg64(ptr, o, n) ({ \
+ unsigned long long __old = (__typeof__(*(ptr)))(o); \
+ unsigned long long __new = (__typeof__(*(ptr)))(n); \
+ __typeof__(*(ptr)) __res; \
+ \
+ /* \
+ * We can only use cmpxchg64 if we know that the CPU supports \
+ * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \
+ * will cause a build error unless cpu_has_64bits is a \
+ * compile-time constant 1. \
+ */ \
+ if (cpu_has_64bits && kernel_uses_llsc) \
+ __res = __cmpxchg64((ptr), __old, __new); \
+ else \
+ __res = __cmpxchg64_unsupported(); \
+ \
+ __res; \
+})
+
+# else /* !CONFIG_SMP */
+# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# endif /* !CONFIG_SMP */
+#endif /* !CONFIG_64BIT */
#undef __scbeqz
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 701e525641b8..6998a9796499 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -591,6 +591,19 @@
#endif /* CONFIG_MIPS_MT_SMP */
/*
+ * We only enable MMID support for configurations which natively support 64 bit
+ * atomics because getting good performance from the allocator relies upon
+ * efficient atomic64_*() functions.
+ */
+#ifndef cpu_has_mmid
+# ifdef CONFIG_GENERIC_ATOMIC64
+# define cpu_has_mmid 0
+# else
+# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID)
+# endif
+#endif
+
+/*
* Guest capabilities
*/
#ifndef cpu_guest_has_conf1
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 532b49b1dbb3..6ad7d3cabd91 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -422,6 +422,7 @@ enum cpu_type_enum {
MBIT_ULL(55) /* CPU shares FTLB entries with another */
#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \
MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */
+#define MIPS_CPU_MMID MBIT_ULL(57) /* CPU supports MemoryMapIDs */
/*
* CPU ASE encodings
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index c14d798f3888..b83b0397462d 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -50,6 +50,7 @@
"i" (-EFAULT) \
: "memory"); \
} else if (cpu_has_llsc) { \
+ loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
+ loongson_llsc_mb();
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT)
: "memory");
+ loongson_llsc_mb();
} else
return -ENOSYS;
diff --git a/arch/mips/include/asm/ginvt.h b/arch/mips/include/asm/ginvt.h
new file mode 100644
index 000000000000..49c6dbe37338
--- /dev/null
+++ b/arch/mips/include/asm/ginvt.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MIPS_ASM_GINVT_H__
+#define __MIPS_ASM_GINVT_H__
+
+#include <asm/mipsregs.h>
+
+enum ginvt_type {
+ GINVT_FULL,
+ GINVT_VA,
+ GINVT_MMID,
+};
+
+#ifdef TOOLCHAIN_SUPPORTS_GINV
+# define _ASM_SET_GINV ".set ginv\n"
+#else
+_ASM_MACRO_1R1I(ginvt, rs, type,
+ _ASM_INSN_IF_MIPS(0x7c0000bd | (__rs << 21) | (\\type << 8))
+ _ASM_INSN32_IF_MM(0x0000717c | (__rs << 16) | (\\type << 9)));
+# define _ASM_SET_GINV
+#endif
+
+static inline void ginvt(unsigned long addr, enum ginvt_type type)
+{
+ asm volatile(
+ ".set push\n"
+ _ASM_SET_GINV
+ " ginvt %0, %1\n"
+ ".set pop"
+ : /* no outputs */
+ : "r"(addr), "i"(type)
+ : "memory");
+}
+
+static inline void ginvt_full(void)
+{
+ ginvt(0, GINVT_FULL);
+}
+
+static inline void ginvt_va(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA);
+}
+
+static inline void ginvt_mmid(void)
+{
+ ginvt(0, GINVT_MMID);
+}
+
+static inline void ginvt_va_mmid(unsigned long addr)
+{
+ addr &= PAGE_MASK << 1;
+ ginvt(addr, GINVT_VA | GINVT_MMID);
+}
+
+#endif /* __MIPS_ASM_GINVT_H__ */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9d3610be2323..f0b862a83816 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -41,7 +41,7 @@ static inline unsigned long arch_local_irq_save(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1)
" mfc0 %[flags], $12 \n"
" di \n"
#else
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 73dcd63b8243..47e8827e9564 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -178,8 +178,4 @@ static inline u32 ath79_reset_rr(unsigned reg)
void ath79_device_reset_set(u32 mask);
void ath79_device_reset_clear(u32 mask);
-void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3);
-void ath79_misc_irq_init(void __iomem *regs, int irq,
- int irq_base, bool is_ar71xx);
-
#endif /* __ASM_MACH_ATH79_H */
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
index b0b7261ff3ad..fd91c58aaf7d 100644
--- a/arch/mips/include/asm/mach-ip27/irq.h
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -10,13 +10,15 @@
#ifndef __ASM_MACH_IP27_IRQ_H
#define __ASM_MACH_IP27_IRQ_H
-/*
- * A hardwired interrupt number is completely stupid for this system - a
- * large configuration might have thousands if not tenthousands of
- * interrupts.
- */
#define NR_IRQS 256
#include_next <irq.h>
+#define IP27_HUB_PEND0_IRQ (MIPS_CPU_IRQ_BASE + 2)
+#define IP27_HUB_PEND1_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define IP27_RT_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 4)
+
+#define IP27_HUB_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
+#define IP27_HUB_IRQ_COUNT 128
+
#endif /* __ASM_MACH_IP27_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 2ed3094dee07..1cd6a23a84f2 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -8,20 +8,11 @@
#define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr))
-#define LEVELS_PER_SLICE 128
-
-struct slice_data {
- unsigned long irq_enable_mask[2];
- int level_to_irq[LEVELS_PER_SLICE];
-};
-
struct hub_data {
kern_vars_t kern_vars;
DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW);
cpumask_t h_cpus;
unsigned long slice_map;
- unsigned long irq_alloc_mask[2];
- struct slice_data slice[2];
};
struct node_data {
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
index c6b63a409641..6dd8ad2409dc 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -18,8 +18,6 @@
#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
-#define MIPS_CPU_TIMER_IRQ 7
-
#define MAX_IM 5
#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index 141076325307..0b424214a5e9 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -19,8 +19,6 @@
#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
-#define MIPS_CPU_TIMER_IRQ 7
-
#define MAX_IM 5
#endif
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index 8f8fa43ba095..15d1de2300fe 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -17,19 +17,15 @@
extern struct platform_device ls1x_uart_pdev;
extern struct platform_device ls1x_cpufreq_pdev;
-extern struct platform_device ls1x_dma_pdev;
extern struct platform_device ls1x_eth0_pdev;
extern struct platform_device ls1x_eth1_pdev;
extern struct platform_device ls1x_ehci_pdev;
extern struct platform_device ls1x_gpio0_pdev;
extern struct platform_device ls1x_gpio1_pdev;
-extern struct platform_device ls1x_nand_pdev;
extern struct platform_device ls1x_rtc_pdev;
extern struct platform_device ls1x_wdt_pdev;
void __init ls1x_clk_init(void);
-void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata);
-void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata);
void __init ls1x_rtc_set_extclk(struct platform_device *pdev);
void __init ls1x_serial_set_uartclk(struct platform_device *pdev);
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 402b80af91aa..1e6966e8527e 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -667,6 +667,7 @@
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
+#define MIPS_CONF5_MI (_ULCAST_(1) << 17)
#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
@@ -1247,6 +1248,13 @@ __asm__(".macro parse_r var r\n\t"
ENC \
".endm")
+/* Instructions with 1 register operand & 1 immediate operand */
+#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \
+ __asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \
+ "parse_r __" #R1 ", \\" #R1 "\n\t" \
+ ENC \
+ ".endm")
+
/* Instructions with 2 register operands */
#define _ASM_MACRO_2R(OP, R1, R2, ENC) \
__asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \
@@ -1603,6 +1611,9 @@ do { \
#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3)
#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val)
+#define read_c0_memorymapid() __read_32bit_c0_register($4, 5)
+#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val)
+
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 88a108ce62c1..5df0238f639b 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -7,7 +7,11 @@
#include <linux/wait.h>
typedef struct {
- u64 asid[NR_CPUS];
+ union {
+ u64 asid[NR_CPUS];
+ atomic64_t mmid;
+ };
+
void *vdso;
/* lock to be held whilst modifying fp_bd_emupage_allocmap */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index a589585be21b..cddead91acd4 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -17,8 +17,10 @@
#include <linux/smp.h>
#include <linux/slab.h>
+#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/dsemul.h>
+#include <asm/ginvt.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
@@ -73,6 +75,19 @@ extern unsigned long pgd_current[];
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
/*
+ * The ginvt instruction will invalidate wired entries when its type field
+ * targets anything other than the entire TLB. That means that if we were to
+ * allow the kernel to create wired entries with the MMID of current->active_mm
+ * then those wired entries could be invalidated when we later use ginvt to
+ * invalidate TLB entries with that MMID.
+ *
+ * In order to prevent ginvt from trashing wired entries, we reserve one MMID
+ * for use by the kernel when creating wired entries. This MMID will never be
+ * assigned to a struct mm, and we'll never target it with a ginvt instruction.
+ */
+#define MMID_KERNEL_WIRED 0
+
+/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
@@ -88,7 +103,23 @@ static inline u64 asid_first_version(unsigned int cpu)
return ~asid_version_mask(cpu) + 1;
}
-#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
+static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
+{
+ if (cpu_has_mmid)
+ return atomic64_read(&mm->context.mmid);
+
+ return mm->context.asid[cpu];
+}
+
+static inline void set_cpu_context(unsigned int cpu,
+ struct mm_struct *mm, u64 ctx)
+{
+ if (cpu_has_mmid)
+ atomic64_set(&mm->context.mmid, ctx);
+ else
+ mm->context.asid[cpu] = ctx;
+}
+
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
@@ -97,21 +128,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-
-/* Normal, classic MIPS get_new_mmu_context */
-static inline void
-get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
-{
- u64 asid = asid_cache(cpu);
-
- if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
- local_flush_tlb_all(); /* start new asid cycle */
- }
-
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
+extern void get_new_mmu_context(struct mm_struct *mm);
+extern void check_mmu_context(struct mm_struct *mm);
+extern void check_switch_mmu_context(struct mm_struct *mm);
/*
* Initialize the context related info for a new mm_struct
@@ -122,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
- for_each_possible_cpu(i)
- cpu_context(i, mm) = 0;
+ if (cpu_has_mmid) {
+ set_cpu_context(0, mm, 0);
+ } else {
+ for_each_possible_cpu(i)
+ set_cpu_context(i, mm, 0);
+ }
mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock);
@@ -140,11 +163,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_save(flags);
htw_stop();
- /* Check if our ASID is of an older version and thus invalid */
- if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
- get_new_mmu_context(next, cpu);
- write_c0_entryhi(cpu_asid(cpu, next));
- TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+ check_switch_mmu_context(next);
/*
* Mark current->active_mm as not "active" anymore.
@@ -166,55 +185,55 @@ static inline void destroy_context(struct mm_struct *mm)
dsemul_mm_cleanup(mm);
}
+#define activate_mm(prev, next) switch_mm(prev, next, current)
#define deactivate_mm(tsk, mm) do { } while (0)
-/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
- */
-static inline void
-activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
- unsigned long flags;
- unsigned int cpu = smp_processor_id();
-
- local_irq_save(flags);
-
- htw_stop();
- /* Unconditionally get a new ASID. */
- get_new_mmu_context(next, cpu);
-
- write_c0_entryhi(cpu_asid(cpu, next));
- TLBMISS_HANDLER_SETUP_PGD(next->pgd);
-
- /* mark mmu ownership change */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- cpumask_set_cpu(cpu, mm_cpumask(next));
- htw_start();
-
- local_irq_restore(flags);
-}
-
-/*
- * If mm is currently active_mm, we can't really drop it. Instead,
- * we will get a new one for it.
- */
static inline void
-drop_mmu_context(struct mm_struct *mm, unsigned cpu)
+drop_mmu_context(struct mm_struct *mm)
{
unsigned long flags;
+ unsigned int cpu;
+ u32 old_mmid;
+ u64 ctx;
local_irq_save(flags);
- htw_stop();
- if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
- get_new_mmu_context(mm, cpu);
+ cpu = smp_processor_id();
+ ctx = cpu_context(cpu, mm);
+
+ if (!ctx) {
+ /* no-op */
+ } else if (cpu_has_mmid) {
+ /*
+ * Globally invalidating TLB entries associated with the MMID
+ * is pretty cheap using the GINVT instruction, so we'll do
+ * that rather than incur the overhead of allocating a new
+ * MMID. The latter would be especially difficult since MMIDs
+ * are global & other CPUs may be actively using ctx.
+ */
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
+ mtc0_tlbw_hazard();
+ ginvt_mmid();
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ /*
+ * mm is currently active, so we can't really drop it.
+ * Instead we bump the ASID.
+ */
+ htw_stop();
+ get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
+ htw_start();
} else {
/* will get a new context next time */
- cpu_context(cpu, mm) = 0;
+ set_cpu_context(cpu, mm, 0);
}
- htw_start();
+
local_irq_restore(flags);
}
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index b4d19c21b62c..d7fdcf0a0088 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -119,18 +119,6 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
extern int __cvmx_helper_board_interface_probe(int interface,
int supported_ports);
-/**
- * Enable packet input/output from the hardware. This function is
- * called after by cvmx_helper_packet_hardware_enable() to
- * perform board specific initialization. For most boards
- * nothing is needed.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_board_hardware_enable(int interface);
-
enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void);
#endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
deleted file mode 100644
index 7a928230b0c0..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h
+++ /dev/null
@@ -1,276 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SMIX_DEFS_H__
-#define __CVMX_SMIX_DEFS_H__
-
-static inline uint64_t CVMX_SMIX_CLK(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003818ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_CMD(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003800ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_EN(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003820ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003810ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
-}
-
-static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180000003808ull) + (offset) * 128;
- }
- return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
-}
-
-union cvmx_smix_clk {
- uint64_t u64;
- struct cvmx_smix_clk_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t sample_mode:1;
- uint64_t reserved_14_14:1;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
-#else
- uint64_t phase:8;
- uint64_t sample:4;
- uint64_t preamble:1;
- uint64_t clk_idle:1;
- uint64_t reserved_14_14:1;
- uint64_t sample_mode:1;
- uint64_t sample_hi:5;
- uint64_t reserved_21_23:3;
- uint64_t mode:1;
- uint64_t reserved_25_63:39;
-#endif
- } s;
- struct cvmx_smix_clk_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t sample_hi:5;
- uint64_t sample_mode:1;
- uint64_t reserved_14_14:1;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
-#else
- uint64_t phase:8;
- uint64_t sample:4;
- uint64_t preamble:1;
- uint64_t clk_idle:1;
- uint64_t reserved_14_14:1;
- uint64_t sample_mode:1;
- uint64_t sample_hi:5;
- uint64_t reserved_21_63:43;
-#endif
- } cn30xx;
-};
-
-union cvmx_smix_cmd {
- uint64_t u64;
- struct cvmx_smix_cmd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t phy_op:2;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
-#else
- uint64_t reg_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t phy_adr:5;
- uint64_t reserved_13_15:3;
- uint64_t phy_op:2;
- uint64_t reserved_18_63:46;
-#endif
- } s;
- struct cvmx_smix_cmd_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t phy_op:1;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
-#else
- uint64_t reg_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t phy_adr:5;
- uint64_t reserved_13_15:3;
- uint64_t phy_op:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn30xx;
-};
-
-union cvmx_smix_en {
- uint64_t u64;
- struct cvmx_smix_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t en:1;
-#else
- uint64_t en:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
-};
-
-union cvmx_smix_rd_dat {
- uint64_t u64;
- struct cvmx_smix_rd_dat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
-#else
- uint64_t dat:16;
- uint64_t val:1;
- uint64_t pending:1;
- uint64_t reserved_18_63:46;
-#endif
- } s;
-};
-
-union cvmx_smix_wr_dat {
- uint64_t u64;
- struct cvmx_smix_wr_dat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
-#else
- uint64_t dat:16;
- uint64_t val:1;
- uint64_t pending:1;
- uint64_t reserved_18_63:46;
-#endif
- } s;
-};
-
-#endif
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 3206245d1ed6..23574c27eb40 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -45,18 +45,21 @@
#ifndef __ASSEMBLY__
-/*
- * All accesses to bridge hardware registers must be done
- * using 32-bit loads and stores.
- */
-typedef u32 bridgereg_t;
+#define ATE_V 0x01
+#define ATE_CO 0x02
+#define ATE_PREC 0x04
+#define ATE_PREF 0x08
+#define ATE_BAR 0x10
+
+#define ATE_PFNSHIFT 12
+#define ATE_TIDSHIFT 8
+#define ATE_RMFSHIFT 48
-typedef u64 bridge_ate_t;
+#define mkate(xaddr, xid, attr) (((xaddr) & 0x0000fffffffff000ULL) | \
+ ((xid)<<ATE_TIDSHIFT) | \
+ (attr))
-/* pointers to bridge ATEs
- * are always "pointer to volatile"
- */
-typedef volatile bridge_ate_t *bridge_ate_p;
+#define BRIDGE_INTERNAL_ATES 128
/*
* It is generally preferred that hardware registers on the bridge
@@ -65,7 +68,7 @@ typedef volatile bridge_ate_t *bridge_ate_p;
* Generated from Bridge spec dated 04oct95
*/
-typedef volatile struct bridge_s {
+struct bridge_regs {
/* Local Registers 0x000000-0x00FFFF */
/* standard widget configuration 0x000000-0x000057 */
@@ -86,105 +89,105 @@ typedef volatile struct bridge_s {
#define b_wid_tflush b_widget.w_tflush
/* bridge-specific widget configuration 0x000058-0x00007F */
- bridgereg_t _pad_000058;
- bridgereg_t b_wid_aux_err; /* 0x00005C */
- bridgereg_t _pad_000060;
- bridgereg_t b_wid_resp_upper; /* 0x000064 */
- bridgereg_t _pad_000068;
- bridgereg_t b_wid_resp_lower; /* 0x00006C */
- bridgereg_t _pad_000070;
- bridgereg_t b_wid_tst_pin_ctrl; /* 0x000074 */
- bridgereg_t _pad_000078[2];
+ u32 _pad_000058;
+ u32 b_wid_aux_err; /* 0x00005C */
+ u32 _pad_000060;
+ u32 b_wid_resp_upper; /* 0x000064 */
+ u32 _pad_000068;
+ u32 b_wid_resp_lower; /* 0x00006C */
+ u32 _pad_000070;
+ u32 b_wid_tst_pin_ctrl; /* 0x000074 */
+ u32 _pad_000078[2];
/* PMU & Map 0x000080-0x00008F */
- bridgereg_t _pad_000080;
- bridgereg_t b_dir_map; /* 0x000084 */
- bridgereg_t _pad_000088[2];
+ u32 _pad_000080;
+ u32 b_dir_map; /* 0x000084 */
+ u32 _pad_000088[2];
/* SSRAM 0x000090-0x00009F */
- bridgereg_t _pad_000090;
- bridgereg_t b_ram_perr; /* 0x000094 */
- bridgereg_t _pad_000098[2];
+ u32 _pad_000090;
+ u32 b_ram_perr; /* 0x000094 */
+ u32 _pad_000098[2];
/* Arbitration 0x0000A0-0x0000AF */
- bridgereg_t _pad_0000A0;
- bridgereg_t b_arb; /* 0x0000A4 */
- bridgereg_t _pad_0000A8[2];
+ u32 _pad_0000A0;
+ u32 b_arb; /* 0x0000A4 */
+ u32 _pad_0000A8[2];
/* Number In A Can 0x0000B0-0x0000BF */
- bridgereg_t _pad_0000B0;
- bridgereg_t b_nic; /* 0x0000B4 */
- bridgereg_t _pad_0000B8[2];
+ u32 _pad_0000B0;
+ u32 b_nic; /* 0x0000B4 */
+ u32 _pad_0000B8[2];
/* PCI/GIO 0x0000C0-0x0000FF */
- bridgereg_t _pad_0000C0;
- bridgereg_t b_bus_timeout; /* 0x0000C4 */
+ u32 _pad_0000C0;
+ u32 b_bus_timeout; /* 0x0000C4 */
#define b_pci_bus_timeout b_bus_timeout
- bridgereg_t _pad_0000C8;
- bridgereg_t b_pci_cfg; /* 0x0000CC */
- bridgereg_t _pad_0000D0;
- bridgereg_t b_pci_err_upper; /* 0x0000D4 */
- bridgereg_t _pad_0000D8;
- bridgereg_t b_pci_err_lower; /* 0x0000DC */
- bridgereg_t _pad_0000E0[8];
+ u32 _pad_0000C8;
+ u32 b_pci_cfg; /* 0x0000CC */
+ u32 _pad_0000D0;
+ u32 b_pci_err_upper; /* 0x0000D4 */
+ u32 _pad_0000D8;
+ u32 b_pci_err_lower; /* 0x0000DC */
+ u32 _pad_0000E0[8];
#define b_gio_err_lower b_pci_err_lower
#define b_gio_err_upper b_pci_err_upper
/* Interrupt 0x000100-0x0001FF */
- bridgereg_t _pad_000100;
- bridgereg_t b_int_status; /* 0x000104 */
- bridgereg_t _pad_000108;
- bridgereg_t b_int_enable; /* 0x00010C */
- bridgereg_t _pad_000110;
- bridgereg_t b_int_rst_stat; /* 0x000114 */
- bridgereg_t _pad_000118;
- bridgereg_t b_int_mode; /* 0x00011C */
- bridgereg_t _pad_000120;
- bridgereg_t b_int_device; /* 0x000124 */
- bridgereg_t _pad_000128;
- bridgereg_t b_int_host_err; /* 0x00012C */
+ u32 _pad_000100;
+ u32 b_int_status; /* 0x000104 */
+ u32 _pad_000108;
+ u32 b_int_enable; /* 0x00010C */
+ u32 _pad_000110;
+ u32 b_int_rst_stat; /* 0x000114 */
+ u32 _pad_000118;
+ u32 b_int_mode; /* 0x00011C */
+ u32 _pad_000120;
+ u32 b_int_device; /* 0x000124 */
+ u32 _pad_000128;
+ u32 b_int_host_err; /* 0x00012C */
struct {
- bridgereg_t __pad; /* 0x0001{30,,,68} */
- bridgereg_t addr; /* 0x0001{34,,,6C} */
+ u32 __pad; /* 0x0001{30,,,68} */
+ u32 addr; /* 0x0001{34,,,6C} */
} b_int_addr[8]; /* 0x000130 */
- bridgereg_t _pad_000170[36];
+ u32 _pad_000170[36];
/* Device 0x000200-0x0003FF */
struct {
- bridgereg_t __pad; /* 0x0002{00,,,38} */
- bridgereg_t reg; /* 0x0002{04,,,3C} */
+ u32 __pad; /* 0x0002{00,,,38} */
+ u32 reg; /* 0x0002{04,,,3C} */
} b_device[8]; /* 0x000200 */
struct {
- bridgereg_t __pad; /* 0x0002{40,,,78} */
- bridgereg_t reg; /* 0x0002{44,,,7C} */
+ u32 __pad; /* 0x0002{40,,,78} */
+ u32 reg; /* 0x0002{44,,,7C} */
} b_wr_req_buf[8]; /* 0x000240 */
struct {
- bridgereg_t __pad; /* 0x0002{80,,,88} */
- bridgereg_t reg; /* 0x0002{84,,,8C} */
+ u32 __pad; /* 0x0002{80,,,88} */
+ u32 reg; /* 0x0002{84,,,8C} */
} b_rrb_map[2]; /* 0x000280 */
#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
- bridgereg_t _pad_000290;
- bridgereg_t b_resp_status; /* 0x000294 */
- bridgereg_t _pad_000298;
- bridgereg_t b_resp_clear; /* 0x00029C */
+ u32 _pad_000290;
+ u32 b_resp_status; /* 0x000294 */
+ u32 _pad_000298;
+ u32 b_resp_clear; /* 0x00029C */
- bridgereg_t _pad_0002A0[24];
+ u32 _pad_0002A0[24];
char _pad_000300[0x10000 - 0x000300];
/* Internal Address Translation Entry RAM 0x010000-0x0103FF */
union {
- bridge_ate_t wr; /* write-only */
+ u64 wr; /* write-only */
struct {
- bridgereg_t _p_pad;
- bridgereg_t rd; /* read-only */
+ u32 _p_pad;
+ u32 rd; /* read-only */
} hi;
} b_int_ate_ram[128];
@@ -192,8 +195,8 @@ typedef volatile struct bridge_s {
/* Internal Address Translation Entry RAM LOW 0x011000-0x0113FF */
struct {
- bridgereg_t _p_pad;
- bridgereg_t rd; /* read-only */
+ u32 _p_pad;
+ u32 rd; /* read-only */
} b_int_ate_ram_lo[128];
char _pad_011400[0x20000 - 0x011400];
@@ -212,7 +215,7 @@ typedef volatile struct bridge_s {
} f[8];
} b_type0_cfg_dev[8]; /* 0x020000 */
- /* PCI Type 1 Configuration Space 0x028000-0x028FFF */
+ /* PCI Type 1 Configuration Space 0x028000-0x028FFF */
union { /* make all access sizes available. */
u8 c[0x1000 / 1];
u16 s[0x1000 / 2];
@@ -233,7 +236,7 @@ typedef volatile struct bridge_s {
u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */
/* External Address Translation Entry RAM 0x080000-0x0FFFFF */
- bridge_ate_t b_ext_ate_ram[0x10000];
+ u64 b_ext_ate_ram[0x10000];
/* Reserved 0x100000-0x1FFFFF */
char _pad_100000[0x200000-0x100000];
@@ -259,13 +262,13 @@ typedef volatile struct bridge_s {
u32 l[0x400000 / 4]; /* read-only */
u64 d[0x400000 / 8]; /* read-only */
} b_external_flash; /* 0xC00000 */
-} bridge_t;
+};
/*
* Field formats for Error Command Word and Auxiliary Error Command Word
* of bridge.
*/
-typedef struct bridge_err_cmdword_s {
+struct bridge_err_cmdword {
union {
u32 cmd_word;
struct {
@@ -282,7 +285,7 @@ typedef struct bridge_err_cmdword_s {
rsvd:8;
} berr_st;
} berr_un;
-} bridge_err_cmdword_t;
+};
#define berr_field berr_un.berr_st
#endif /* !__ASSEMBLY__ */
@@ -290,7 +293,7 @@ typedef struct bridge_err_cmdword_s {
/*
* The values of these macros can and should be crosschecked
* regularly against the offsets of the like-named fields
- * within the "bridge_t" structure above.
+ * within the bridge_regs structure above.
*/
/* Byte offset macros for Bridge internal registers */
@@ -797,49 +800,14 @@ typedef struct bridge_err_cmdword_s {
#define PCI64_ATTR_RMF_MASK 0x00ff000000000000
#define PCI64_ATTR_RMF_SHFT 48
-#ifndef __ASSEMBLY__
-/* Address translation entry for mapped pci32 accesses */
-typedef union ate_u {
- u64 ent;
- struct ate_s {
- u64 rmf:16;
- u64 addr:36;
- u64 targ:4;
- u64 reserved:3;
- u64 barrier:1;
- u64 prefetch:1;
- u64 precise:1;
- u64 coherent:1;
- u64 valid:1;
- } field;
-} ate_t;
-#endif /* !__ASSEMBLY__ */
-
-#define ATE_V 0x01
-#define ATE_CO 0x02
-#define ATE_PREC 0x04
-#define ATE_PREF 0x08
-#define ATE_BAR 0x10
-
-#define ATE_PFNSHIFT 12
-#define ATE_TIDSHIFT 8
-#define ATE_RMFSHIFT 48
-
-#define mkate(xaddr, xid, attr) ((xaddr) & 0x0000fffffffff000ULL) | \
- ((xid)<<ATE_TIDSHIFT) | \
- (attr)
-
-#define BRIDGE_INTERNAL_ATES 128
-
struct bridge_controller {
struct pci_controller pc;
struct resource mem;
struct resource io;
struct resource busn;
- bridge_t *base;
+ struct bridge_regs *base;
nasid_t nasid;
unsigned int widget_id;
- unsigned int irq_cpu;
u64 baddr;
unsigned int pci_int[8];
};
@@ -847,8 +815,14 @@ struct bridge_controller {
#define BRIDGE_CONTROLLER(bus) \
((struct bridge_controller *)((bus)->sysdata))
-extern void register_bridge_irq(unsigned int irq);
-extern int request_bridge_irq(struct bridge_controller *bc);
+#define bridge_read(bc, reg) __raw_readl(&bc->base->reg)
+#define bridge_write(bc, reg, val) __raw_writel(val, &bc->base->reg)
+#define bridge_set(bc, reg, val) \
+ __raw_writel(__raw_readl(&bc->base->reg) | (val), &bc->base->reg)
+#define bridge_clr(bc, reg, val) \
+ __raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg)
+
+extern int request_bridge_irq(struct bridge_controller *bc, int pin);
extern struct pci_ops bridge_pci_ops;
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 57933fc8fd98..4ccb465ef3f2 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -17,6 +17,7 @@
#include <asm/pgtable-64.h>
#endif
+#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/pgtable-bits.h>
@@ -204,49 +205,11 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
-#ifdef CONFIG_SMP
- /*
- * For SMP, multiple CPUs can race, so we need to do
- * this atomically.
- */
- unsigned long page_global = _PAGE_GLOBAL;
- unsigned long tmp;
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- __asm__ __volatile__ (
- " .set push \n"
- " .set arch=r4000 \n"
- " .set noreorder \n"
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqzl %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- " .set pop \n"
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
- } else if (kernel_uses_llsc) {
- __asm__ __volatile__ (
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " .set noreorder \n"
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- " .set pop \n"
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
- }
-#else /* !CONFIG_SMP */
- if (pte_none(*buddy))
- pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
-#endif /* CONFIG_SMP */
+# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
+ cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
+# else
+ cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
+# endif
}
#endif
}
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index b7123f9c0785..65618ff1280c 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -29,6 +29,7 @@ struct plat_smp_ops {
int (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
+ void (*prepare_boot_cpu)(void);
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h
index 66814f8ba8e8..837d23e24976 100644
--- a/arch/mips/include/asm/sn/addrs.h
+++ b/arch/mips/include/asm/sn/addrs.h
@@ -27,16 +27,11 @@
#ifndef __ASSEMBLY__
-#define PS_UINT_CAST (unsigned long)
#define UINT64_CAST (unsigned long)
-#define HUBREG_CAST (volatile hubreg_t *)
-
#else /* __ASSEMBLY__ */
-#define PS_UINT_CAST
#define UINT64_CAST
-#define HUBREG_CAST
#endif /* __ASSEMBLY__ */
@@ -256,42 +251,23 @@
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
-#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x)))
-#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
- 0x800000 + (_x)))
-#ifdef CONFIG_SGI_IP27
-#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
- 0x800000 + (_x)))
-#endif /* CONFIG_SGI_IP27 */
+#define LOCAL_HUB_ADDR(_x) (IALIAS_BASE + (_x))
+#define REMOTE_HUB_ADDR(_n, _x) ((NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x)))
#ifndef __ASSEMBLY__
-#define HUB_L(_a) *(_a)
-#define HUB_S(_a, _d) *(_a) = (_d)
+#define LOCAL_HUB_PTR(_x) ((u64 *)LOCAL_HUB_ADDR((_x)))
+#define REMOTE_HUB_PTR(_n, _x) ((u64 *)REMOTE_HUB_ADDR((_n), (_x)))
-#define LOCAL_HUB_L(_r) HUB_L(LOCAL_HUB_ADDR(_r))
-#define LOCAL_HUB_S(_r, _d) HUB_S(LOCAL_HUB_ADDR(_r), (_d))
-#define REMOTE_HUB_L(_n, _r) HUB_L(REMOTE_HUB_ADDR((_n), (_r)))
-#define REMOTE_HUB_S(_n, _r, _d) HUB_S(REMOTE_HUB_ADDR((_n), (_r)), (_d))
-#define REMOTE_HUB_PI_L(_n, _sn, _r) HUB_L(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)))
-#define REMOTE_HUB_PI_S(_n, _sn, _r, _d) HUB_S(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)), (_d))
+#define LOCAL_HUB_L(_r) __raw_readq(LOCAL_HUB_PTR(_r))
+#define LOCAL_HUB_S(_r, _d) __raw_writeq((_d), LOCAL_HUB_PTR(_r))
+#define REMOTE_HUB_L(_n, _r) __raw_readq(REMOTE_HUB_PTR((_n), (_r)))
+#define REMOTE_HUB_S(_n, _r, _d) __raw_writeq((_d), \
+ REMOTE_HUB_PTR((_n), (_r)))
#endif /* !__ASSEMBLY__ */
/*
- * The following macros are used to get to a hub/bridge register, given
- * the base of the register space.
- */
-#define HUB_REG_PTR(_base, _off) \
- (HUBREG_CAST((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
-
-#define HUB_REG_PTR_L(_base, _off) \
- HUB_L(HUB_REG_PTR((_base), (_off)))
-
-#define HUB_REG_PTR_S(_base, _off, _data) \
- HUB_S(HUB_REG_PTR((_base), (_off)), (_data))
-
-/*
* Software structure locations -- permanently fixed
* See diagram in kldir.h
*/
@@ -387,44 +363,14 @@
#define SYMMON_STK_END(nasid) (SYMMON_STK_ADDR(nasid, 0) + KLD_SYMMON_STK(nasid)->size)
-/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a
- * relocatable program
- */
-#define UNIX_DEBUG_LOADADDR 0x300000
-#define SYMMON_LOADADDR(nasid) \
- TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000))
-
-#define FREEMEM_OFFSET(nasid) KLD_FREEMEM(nasid)->offset
-#define FREEMEM_ADDR(nasid) SYMMON_STK_END(nasid)
-/*
- * XXX
- * Fix this. FREEMEM_ADDR should be aware of if symmon is loaded.
- * Also, it should take into account what prom thinks to be a safe
- * address
- PHYS_TO_K0(NODE_OFFSET(nasid) + FREEMEM_OFFSET(nasid))
- */
-#define FREEMEM_SIZE(nasid) KLD_FREEMEM(nasid)->size
-
-#define PI_ERROR_OFFSET(nasid) KLD_PI_ERROR(nasid)->offset
-#define PI_ERROR_ADDR(nasid) \
- TO_NODE_UNCAC((nasid), PI_ERROR_OFFSET(nasid))
-#define PI_ERROR_SIZE(nasid) KLD_PI_ERROR(nasid)->size
-
#define NODE_OFFSET_TO_K0(_nasid, _off) \
PHYS_TO_K0((NODE_OFFSET(_nasid) + (_off)) | CAC_BASE)
#define NODE_OFFSET_TO_K1(_nasid, _off) \
TO_UNCAC((NODE_OFFSET(_nasid) + (_off)) | UNCAC_BASE)
-#define K0_TO_NODE_OFFSET(_k0addr) \
- ((__psunsigned_t)(_k0addr) & NODE_ADDRSPACE_MASK)
#define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer
#define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size
-#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer
-#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size
-
-#define GPDA_ADDR(nasid) TO_NODE_CAC(nasid, GPDA_OFFSET)
-
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
index 471e6870d876..3f1fb1454749 100644
--- a/arch/mips/include/asm/sn/arch.h
+++ b/arch/mips/include/asm/sn/arch.h
@@ -17,8 +17,6 @@
#include <asm/sn/sn0/arch.h>
#endif
-typedef u64 hubreg_t;
-
#define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid)
#define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice)
#define makespnum(_nasid, _slice) \
diff --git a/arch/mips/include/asm/sn/io.h b/arch/mips/include/asm/sn/io.h
index d5174d04538c..211f1e83b523 100644
--- a/arch/mips/include/asm/sn/io.h
+++ b/arch/mips/include/asm/sn/io.h
@@ -44,7 +44,7 @@
IIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \
(bigwin), IIO_ITTE_INVALID_WIDGET, 0)
-#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_ADDR((nasid), IIO_ITTE(bigwin))
+#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_PTR((nasid), IIO_ITTE(bigwin))
/*
* Macro which takes the widget number, and returns the
diff --git a/arch/mips/include/asm/sn/sn0/addrs.h b/arch/mips/include/asm/sn/sn0/addrs.h
index 6b53070f400f..f13df84edfdd 100644
--- a/arch/mips/include/asm/sn/sn0/addrs.h
+++ b/arch/mips/include/asm/sn/sn0/addrs.h
@@ -134,11 +134,6 @@
#define CALIAS_BASE CAC_BASE
-
-
-#define BRIDGE_REG_PTR(_base, _off) ((volatile bridgereg_t *) \
- ((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
-
#define SN0_WIDGET_BASE(_nasid, _wid) (NODE_SWIN_BASE((_nasid), (_wid)))
/* Turn on sable logging for the processors whose bits are set. */
diff --git a/arch/mips/include/asm/tlbflush.h b/arch/mips/include/asm/tlbflush.h
index 40a361092491..9789e7a32def 100644
--- a/arch/mips/include/asm/tlbflush.h
+++ b/arch/mips/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
extern void local_flush_tlb_all(void);
-extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void local_flush_tlb_kernel_range(unsigned long start,
@@ -23,6 +22,8 @@ extern void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
extern void local_flush_tlb_one(unsigned long vaddr);
+#include <asm/mmu_context.h>
+
#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
@@ -36,7 +37,7 @@ extern void flush_tlb_one(unsigned long vaddr);
#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
-#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_mm(mm) drop_mmu_context(mm)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
#define flush_tlb_kernel_range(vmaddr,end) \
local_flush_tlb_kernel_range(vmaddr, end)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index d43c1dc6ef15..62b298c50905 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -69,7 +69,6 @@ extern u64 __ua_limit;
#define USER_DS ((mm_segment_t) { __UA_LIMIT })
#endif
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index b23d74a601b3..071053ece677 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -45,29 +45,16 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
# ifdef CONFIG_32BIT
# define __ARCH_WANT_STAT64
-# define __ARCH_WANT_SYS_TIME
+# define __ARCH_WANT_SYS_TIME32
# endif
# ifdef CONFIG_MIPS32_O32
-# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_SYS_TIME32
# endif
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
/* whitelists for checksyscalls */
-#define __IGNORE_select
-#define __IGNORE_vfork
-#define __IGNORE_time
-#define __IGNORE_uselib
#define __IGNORE_fadvise64_64
-#define __IGNORE_getdents64
-#if _MIPS_SIM == _MIPS_SIM_NABI32
-#define __IGNORE_truncate64
-#define __IGNORE_ftruncate64
-#define __IGNORE_stat64
-#define __IGNORE_lstat64
-#define __IGNORE_fstat64
-#define __IGNORE_fstatat64
-#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 3035ca499cd8..c2b40969eb1f 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -27,9 +27,7 @@
/*
* Flags for mmap
*/
-#define MAP_SHARED 0x001 /* Share changes */
-#define MAP_PRIVATE 0x002 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x003 /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_TYPE 0x00f /* Mask for type of mapping */
#define MAP_FIXED 0x010 /* Interpret addr exactly */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 71370fb3ceef..eb9f33f8a8b3 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -11,6 +11,7 @@
#define _UAPI_ASM_SOCKET_H
#include <asm/sockios.h>
+#include <asm/bitsperlong.h>
/*
* For setsockopt(2)
@@ -38,8 +39,8 @@
#define SO_RCVBUF 0x1002 /* Receive buffer. */
#define SO_SNDLOWAT 0x1003 /* send low-water mark */
#define SO_RCVLOWAT 0x1004 /* receive low-water mark */
-#define SO_SNDTIMEO 0x1005 /* send timeout */
-#define SO_RCVTIMEO 0x1006 /* receive timeout */
+#define SO_SNDTIMEO_OLD 0x1005 /* send timeout */
+#define SO_RCVTIMEO_OLD 0x1006 /* receive timeout */
#define SO_ACCEPTCONN 0x1009
#define SO_PROTOCOL 0x1028 /* protocol type */
#define SO_DOMAIN 0x1029 /* domain/socket family */
@@ -65,21 +66,14 @@
#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_PEERSEC 30
#define SO_SNDBUFFORCE 31
#define SO_RCVBUFFORCE 33
#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
#define SO_MARK 36
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
#define SO_RXQ_OVFL 40
#define SO_WIFI_STATUS 41
@@ -126,4 +120,41 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
+#define SO_TIMESTAMP_OLD 29
+#define SO_TIMESTAMPNS_OLD 35
+#define SO_TIMESTAMPING_OLD 37
+
+#define SO_TIMESTAMP_NEW 63
+#define SO_TIMESTAMPNS_NEW 64
+#define SO_TIMESTAMPING_NEW 65
+
+#define SO_RCVTIMEO_NEW 66
+#define SO_SNDTIMEO_NEW 67
+
+#if !defined(__KERNEL__)
+
+#if __BITS_PER_LONG == 64
+#define SO_TIMESTAMP SO_TIMESTAMP_OLD
+#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD
+#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD
+
+#define SO_RCVTIMEO SO_RCVTIMEO_OLD
+#define SO_SNDTIMEO SO_SNDTIMEO_OLD
+#else
+#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW)
+#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW)
+#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW)
+
+#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW)
+#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW)
+#endif
+
+#define SCM_TIMESTAMP SO_TIMESTAMP
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#endif
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 6256d35dbf4d..bedb5047aff3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -74,14 +74,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+ CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index afb40f8bce96..7e63c54eb8d2 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -31,7 +31,6 @@
#define JZ4740_EMC_SDRAM_CTRL 0x80
-
static void __init jz4740_detect_mem(void)
{
void __iomem *jz_emc_base;
@@ -66,15 +65,22 @@ static unsigned long __init get_board_mach_type(const void *fdt)
void __init plat_mem_setup(void)
{
int offset;
+ void *dtb;
jz4740_reset_init();
- __dt_setup_arch(__dtb_start);
- offset = fdt_path_offset(__dtb_start, "/memory");
+ if (__dtb_start != __dtb_end)
+ dtb = __dtb_start;
+ else
+ dtb = (void *)fw_passed_dtb;
+
+ __dt_setup_arch(dtb);
+
+ offset = fdt_path_offset(dtb, "/memory");
if (offset < 0)
jz4740_detect_mem();
- mips_machtype = get_board_mach_type(__dtb_start);
+ mips_machtype = get_board_mach_type(dtb);
}
void __init device_tree_init(void)
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 0b9535bc2c53..6b2a4a902a98 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
- u32 mask, old32, new32, load32;
+ u32 mask, old32, new32, load32, load;
volatile u32 *ptr32;
unsigned int shift;
- u8 load;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 95b18a194f53..d5e335e6846a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -872,10 +872,19 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
static inline unsigned int decode_config5(struct cpuinfo_mips *c)
{
- unsigned int config5;
+ unsigned int config5, max_mmid_width;
+ unsigned long asid_mask;
config5 = read_c0_config5();
config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
+
+ if (cpu_has_mips_r6) {
+ if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)
+ config5 |= MIPS_CONF5_MI;
+ else
+ config5 &= ~MIPS_CONF5_MI;
+ }
+
write_c0_config5(config5);
if (config5 & MIPS_CONF5_EVA)
@@ -894,6 +903,50 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
if (config5 & MIPS_CONF5_CRCP)
elf_hwcap |= HWCAP_MIPS_CRC32;
+ if (cpu_has_mips_r6) {
+ /* Ensure the write to config5 above takes effect */
+ back_to_back_c0_hazard();
+
+ /* Check whether we successfully enabled MMID support */
+ config5 = read_c0_config5();
+ if (config5 & MIPS_CONF5_MI)
+ c->options |= MIPS_CPU_MMID;
+
+ /*
+ * Warn if we've hardcoded cpu_has_mmid to a value unsuitable
+ * for the CPU we're running on, or if CPUs in an SMP system
+ * have inconsistent MMID support.
+ */
+ WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI));
+
+ if (cpu_has_mmid) {
+ write_c0_memorymapid(~0ul);
+ back_to_back_c0_hazard();
+ asid_mask = read_c0_memorymapid();
+
+ /*
+ * We maintain a bitmap to track MMID allocation, and
+ * need a sensible upper bound on the size of that
+ * bitmap. The initial CPU with MMID support (I6500)
+ * supports 16 bit MMIDs, which gives us an 8KiB
+ * bitmap. The architecture recommends that hardware
+ * support 32 bit MMIDs, which would give us a 512MiB
+ * bitmap - that's too big in most cases.
+ *
+ * Cap MMID width at 16 bits for now & we can revisit
+ * this if & when hardware supports anything wider.
+ */
+ max_mmid_width = 16;
+ if (asid_mask > GENMASK(max_mmid_width - 1, 0)) {
+ pr_info("Capping MMID width at %d bits",
+ max_mmid_width);
+ asid_mask = GENMASK(max_mmid_width - 1, 0);
+ }
+
+ set_cpu_asid_mask(c, asid_mask);
+ }
+ }
+
return config5 & MIPS_CONF_M;
}
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 2ea0ec95efe9..4b5e1f2bfbce 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -86,7 +86,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
return -EFAULT;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
@@ -111,7 +111,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
ip -= 4;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
@@ -135,7 +135,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
return -EFAULT;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index ba150c755fcc..85b6c60f285d 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
void __init init_IRQ(void)
{
int i;
+ unsigned int order = get_order(IRQ_STACK_SIZE);
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
@@ -62,8 +63,7 @@ void __init init_IRQ(void)
arch_init_irq();
for_each_possible_cpu(i) {
- int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
- void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
irq_stack[i] = s;
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 149100e1bc7c..6e574c02e4c3 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -212,7 +212,7 @@ void kgdb_call_nmi_hook(void *ignored)
mm_segment_t old_fs;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
kgdb_nmicallback(raw_smp_processor_id(), NULL);
@@ -318,7 +318,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
/* Kernel mode. Set correct address limit */
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 8f5bd04f320a..537e8d091874 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -382,8 +382,8 @@ void mips_cm_error_report(void)
sc_bit ? "True" : "False",
cm2_cmd[cmd_bits], sport_bits);
}
- pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
- cm2_causes[cause], buf);
+ pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+ cm2_causes[cause], buf);
pr_err("CM_ADDR =%08llx\n", cm_addr);
pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]);
} else { /* CM3 */
@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
}
/* reprime cause register */
- write_gcr_error_cause(0);
+ write_gcr_error_cause(cm_error);
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index c50c89a978f1..b4d210bfcdae 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -2351,23 +2351,10 @@ DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
static int __init mipsr2_init_debugfs(void)
{
- struct dentry *mipsr2_emul;
-
- if (!mips_debugfs_dir)
- return -ENODEV;
-
- mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
- mips_debugfs_dir, NULL,
- &mipsr2_emul_fops);
- if (!mipsr2_emul)
- return -ENOMEM;
-
- mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
- mips_debugfs_dir, NULL,
- &mipsr2_clear_fops);
- if (!mipsr2_emul)
- return -ENOMEM;
-
+ debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL,
+ &mipsr2_emul_fops);
+ debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir,
+ NULL, &mipsr2_clear_fops);
return 0;
}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 6829a064aac8..339870ed92f7 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
- union mips_instruction insn, *ip, *ip_end;
+ union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i;
@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
- ip_end = (void *)ip + info->func_size;
-
- for (i = 0; i < max_insns && ip < ip_end; i++) {
+ for (i = 0; i < max_insns; i++) {
ip = (void *)ip + last_insn_size;
+
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.word = ip->halfword[0] << 16;
last_insn_size = 2;
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
index 2703f218202e..0a9bd7b0983b 100644
--- a/arch/mips/kernel/segment.c
+++ b/arch/mips/kernel/segment.c
@@ -95,18 +95,9 @@ static const struct file_operations segments_fops = {
static int __init segments_info(void)
{
- struct dentry *segments;
-
- if (cpu_has_segments) {
- if (!mips_debugfs_dir)
- return -ENODEV;
-
- segments = debugfs_create_file("segments", S_IRUGO,
- mips_debugfs_dir, NULL,
- &segments_fops);
- if (!segments)
- return -ENOMEM;
- }
+ if (cpu_has_segments)
+ debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL,
+ &segments_fops);
return 0;
}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8c6c48ed786a..5151532ad959 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -384,7 +384,8 @@ static void __init bootmem_init(void)
init_initrd();
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
- memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
+ memblock_reserve(PHYS_OFFSET,
+ (reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
/*
* max_low_pfn is not a number of pages. The number of pages
@@ -1010,12 +1011,7 @@ unsigned long fw_passed_dtb;
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
{
- struct dentry *d;
-
- d = debugfs_create_dir("mips", NULL);
- if (!d)
- return -ENOMEM;
- mips_debugfs_dir = d;
+ mips_debugfs_dir = debugfs_create_dir("mips", NULL);
return 0;
}
arch_initcall(debugfs_mips);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d84b9066b465..bc4bb3c6bd00 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -39,6 +39,7 @@
#include <linux/atomic.h>
#include <asm/cpu.h>
+#include <asm/ginvt.h>
#include <asm/processor.h>
#include <asm/idle.h>
#include <asm/r4k-timer.h>
@@ -443,6 +444,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
+ if (mp_ops->prepare_boot_cpu)
+ mp_ops->prepare_boot_cpu();
set_cpu_possible(0, true);
set_cpu_online(0, true);
}
@@ -482,12 +485,21 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
+ if (cpu_has_mmid) {
+ htw_stop();
+ ginvt_full();
+ sync_ginv();
+ instruction_hazard();
+ htw_start();
+ return;
+ }
+
on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
{
- local_flush_tlb_mm((struct mm_struct *)mm);
+ drop_mmu_context((struct mm_struct *)mm);
}
/*
@@ -530,17 +542,22 @@ void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if (cpu_has_mmid) {
+ /*
+ * No need to worry about other CPUs - the ginvt in
+ * drop_mmu_context() will be globalized.
+ */
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = 0;
+ set_cpu_context(cpu, mm, 0);
}
}
- local_flush_tlb_mm(mm);
+ drop_mmu_context(mm);
preempt_enable();
}
@@ -561,9 +578,26 @@ static void flush_tlb_range_ipi(void *info)
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr;
+ u32 old_mmid;
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, mm));
+ mtc0_tlbw_hazard();
+ addr = round_down(start, PAGE_SIZE * 2);
+ end = round_up(end, PAGE_SIZE * 2);
+ do {
+ ginvt_va_mmid(addr);
+ sync_ginv();
+ addr += PAGE_SIZE * 2;
+ } while (addr < end);
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = start,
@@ -571,6 +605,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
};
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
+ local_flush_tlb_range(vma, start, end);
} else {
unsigned int cpu;
int exec = vma->vm_flags & VM_EXEC;
@@ -583,10 +618,10 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
* mm has been completely unused by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = !exec;
+ set_cpu_context(cpu, mm, !exec);
}
+ local_flush_tlb_range(vma, start, end);
}
- local_flush_tlb_range(vma, start, end);
preempt_enable();
}
@@ -616,14 +651,28 @@ static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ u32 old_mmid;
+
preempt_disable();
- if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
+ mtc0_tlbw_hazard();
+ ginvt_va_mmid(page);
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
+ (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = page,
};
smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
+ local_flush_tlb_page(vma, page);
} else {
unsigned int cpu;
@@ -635,10 +684,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
* by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
- cpu_context(cpu, vma->vm_mm) = 1;
+ set_cpu_context(cpu, vma->vm_mm, 1);
}
+ local_flush_tlb_page(vma, page);
}
- local_flush_tlb_page(vma, page);
preempt_enable();
}
diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c
index eaed550e79a2..ab4e3e1b138d 100644
--- a/arch/mips/kernel/spinlock_test.c
+++ b/arch/mips/kernel/spinlock_test.c
@@ -118,23 +118,10 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
static int __init spinlock_test(void)
{
- struct dentry *d;
-
- if (!mips_debugfs_dir)
- return -ENODEV;
-
- d = debugfs_create_file("spin_single", S_IRUGO,
- mips_debugfs_dir, NULL,
- &fops_ss);
- if (!d)
- return -ENOMEM;
-
- d = debugfs_create_file("spin_multi", S_IRUGO,
- mips_debugfs_dir, NULL,
- &fops_multi);
- if (!d)
- return -ENOMEM;
-
+ debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL,
+ &fops_ss);
+ debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL,
+ &fops_multi);
return 0;
}
device_initcall(spinlock_test);
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 53d5862649ae..15f4117900ee 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -37,11 +37,11 @@
27 n32 madvise sys_madvise
28 n32 shmget sys_shmget
29 n32 shmat sys_shmat
-30 n32 shmctl compat_sys_shmctl
+30 n32 shmctl compat_sys_old_shmctl
31 n32 dup sys_dup
32 n32 dup2 sys_dup2
33 n32 pause sys_pause
-34 n32 nanosleep compat_sys_nanosleep
+34 n32 nanosleep sys_nanosleep_time32
35 n32 getitimer compat_sys_getitimer
36 n32 setitimer compat_sys_setitimer
37 n32 alarm sys_alarm
@@ -71,12 +71,12 @@
61 n32 uname sys_newuname
62 n32 semget sys_semget
63 n32 semop sys_semop
-64 n32 semctl compat_sys_semctl
+64 n32 semctl compat_sys_old_semctl
65 n32 shmdt sys_shmdt
66 n32 msgget sys_msgget
67 n32 msgsnd compat_sys_msgsnd
68 n32 msgrcv compat_sys_msgrcv
-69 n32 msgctl compat_sys_msgctl
+69 n32 msgctl compat_sys_old_msgctl
70 n32 fcntl compat_sys_fcntl
71 n32 flock sys_flock
72 n32 fsync sys_fsync
@@ -133,11 +133,11 @@
123 n32 capget sys_capget
124 n32 capset sys_capset
125 n32 rt_sigpending compat_sys_rt_sigpending
-126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait
+126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait_time32
127 n32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo
128 n32 rt_sigsuspend compat_sys_rt_sigsuspend
129 n32 sigaltstack compat_sys_sigaltstack
-130 n32 utime compat_sys_utime
+130 n32 utime sys_utime32
131 n32 mknod sys_mknod
132 n32 personality sys_32_personality
133 n32 ustat compat_sys_ustat
@@ -152,7 +152,7 @@
142 n32 sched_getscheduler sys_sched_getscheduler
143 n32 sched_get_priority_max sys_sched_get_priority_max
144 n32 sched_get_priority_min sys_sched_get_priority_min
-145 n32 sched_rr_get_interval compat_sys_sched_rr_get_interval
+145 n32 sched_rr_get_interval sys_sched_rr_get_interval_time32
146 n32 mlock sys_mlock
147 n32 munlock sys_munlock
148 n32 mlockall sys_mlockall
@@ -161,7 +161,7 @@
151 n32 pivot_root sys_pivot_root
152 n32 _sysctl compat_sys_sysctl
153 n32 prctl sys_prctl
-154 n32 adjtimex compat_sys_adjtimex
+154 n32 adjtimex sys_adjtimex_time32
155 n32 setrlimit compat_sys_setrlimit
156 n32 chroot sys_chroot
157 n32 sync sys_sync
@@ -202,7 +202,7 @@
191 n32 fremovexattr sys_fremovexattr
192 n32 tkill sys_tkill
193 n32 reserved193 sys_ni_syscall
-194 n32 futex compat_sys_futex
+194 n32 futex sys_futex_time32
195 n32 sched_setaffinity compat_sys_sched_setaffinity
196 n32 sched_getaffinity compat_sys_sched_getaffinity
197 n32 cacheflush sys_cacheflush
@@ -210,7 +210,7 @@
199 n32 sysmips __sys_sysmips
200 n32 io_setup compat_sys_io_setup
201 n32 io_destroy sys_io_destroy
-202 n32 io_getevents compat_sys_io_getevents
+202 n32 io_getevents sys_io_getevents_time32
203 n32 io_submit compat_sys_io_submit
204 n32 io_cancel sys_io_cancel
205 n32 exit_group sys_exit_group
@@ -223,29 +223,29 @@
212 n32 fcntl64 compat_sys_fcntl64
213 n32 set_tid_address sys_set_tid_address
214 n32 restart_syscall sys_restart_syscall
-215 n32 semtimedop compat_sys_semtimedop
+215 n32 semtimedop sys_semtimedop_time32
216 n32 fadvise64 sys_fadvise64_64
217 n32 statfs64 compat_sys_statfs64
218 n32 fstatfs64 compat_sys_fstatfs64
219 n32 sendfile64 sys_sendfile64
220 n32 timer_create compat_sys_timer_create
-221 n32 timer_settime compat_sys_timer_settime
-222 n32 timer_gettime compat_sys_timer_gettime
+221 n32 timer_settime sys_timer_settime32
+222 n32 timer_gettime sys_timer_gettime32
223 n32 timer_getoverrun sys_timer_getoverrun
224 n32 timer_delete sys_timer_delete
-225 n32 clock_settime compat_sys_clock_settime
-226 n32 clock_gettime compat_sys_clock_gettime
-227 n32 clock_getres compat_sys_clock_getres
-228 n32 clock_nanosleep compat_sys_clock_nanosleep
+225 n32 clock_settime sys_clock_settime32
+226 n32 clock_gettime sys_clock_gettime32
+227 n32 clock_getres sys_clock_getres_time32
+228 n32 clock_nanosleep sys_clock_nanosleep_time32
229 n32 tgkill sys_tgkill
-230 n32 utimes compat_sys_utimes
+230 n32 utimes sys_utimes_time32
231 n32 mbind compat_sys_mbind
232 n32 get_mempolicy compat_sys_get_mempolicy
233 n32 set_mempolicy compat_sys_set_mempolicy
234 n32 mq_open compat_sys_mq_open
235 n32 mq_unlink sys_mq_unlink
-236 n32 mq_timedsend compat_sys_mq_timedsend
-237 n32 mq_timedreceive compat_sys_mq_timedreceive
+236 n32 mq_timedsend sys_mq_timedsend_time32
+237 n32 mq_timedreceive sys_mq_timedreceive_time32
238 n32 mq_notify compat_sys_mq_notify
239 n32 mq_getsetattr compat_sys_mq_getsetattr
240 n32 vserver sys_ni_syscall
@@ -263,7 +263,7 @@
252 n32 mkdirat sys_mkdirat
253 n32 mknodat sys_mknodat
254 n32 fchownat sys_fchownat
-255 n32 futimesat compat_sys_futimesat
+255 n32 futimesat sys_futimesat_time32
256 n32 newfstatat sys_newfstatat
257 n32 unlinkat sys_unlinkat
258 n32 renameat sys_renameat
@@ -272,8 +272,8 @@
261 n32 readlinkat sys_readlinkat
262 n32 fchmodat sys_fchmodat
263 n32 faccessat sys_faccessat
-264 n32 pselect6 compat_sys_pselect6
-265 n32 ppoll compat_sys_ppoll
+264 n32 pselect6 compat_sys_pselect6_time32
+265 n32 ppoll compat_sys_ppoll_time32
266 n32 unshare sys_unshare
267 n32 splice sys_splice
268 n32 sync_file_range sys_sync_file_range
@@ -287,14 +287,14 @@
276 n32 epoll_pwait compat_sys_epoll_pwait
277 n32 ioprio_set sys_ioprio_set
278 n32 ioprio_get sys_ioprio_get
-279 n32 utimensat compat_sys_utimensat
+279 n32 utimensat sys_utimensat_time32
280 n32 signalfd compat_sys_signalfd
281 n32 timerfd sys_ni_syscall
282 n32 eventfd sys_eventfd
283 n32 fallocate sys_fallocate
284 n32 timerfd_create sys_timerfd_create
-285 n32 timerfd_gettime compat_sys_timerfd_gettime
-286 n32 timerfd_settime compat_sys_timerfd_settime
+285 n32 timerfd_gettime sys_timerfd_gettime32
+286 n32 timerfd_settime sys_timerfd_settime32
287 n32 signalfd4 compat_sys_signalfd4
288 n32 eventfd2 sys_eventfd2
289 n32 epoll_create1 sys_epoll_create1
@@ -306,14 +306,14 @@
295 n32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
296 n32 perf_event_open sys_perf_event_open
297 n32 accept4 sys_accept4
-298 n32 recvmmsg compat_sys_recvmmsg
+298 n32 recvmmsg compat_sys_recvmmsg_time32
299 n32 getdents64 sys_getdents64
300 n32 fanotify_init sys_fanotify_init
301 n32 fanotify_mark sys_fanotify_mark
302 n32 prlimit64 sys_prlimit64
303 n32 name_to_handle_at sys_name_to_handle_at
304 n32 open_by_handle_at sys_open_by_handle_at
-305 n32 clock_adjtime compat_sys_clock_adjtime
+305 n32 clock_adjtime sys_clock_adjtime32
306 n32 syncfs sys_syncfs
307 n32 sendmmsg compat_sys_sendmmsg
308 n32 setns sys_setns
@@ -341,3 +341,24 @@
330 n32 statx sys_statx
331 n32 rseq sys_rseq
332 n32 io_pgetevents compat_sys_io_pgetevents
+# 333 through 402 are unassigned to sync up with generic numbers
+403 n32 clock_gettime64 sys_clock_gettime
+404 n32 clock_settime64 sys_clock_settime
+405 n32 clock_adjtime64 sys_clock_adjtime
+406 n32 clock_getres_time64 sys_clock_getres
+407 n32 clock_nanosleep_time64 sys_clock_nanosleep
+408 n32 timer_gettime64 sys_timer_gettime
+409 n32 timer_settime64 sys_timer_settime
+410 n32 timerfd_gettime64 sys_timerfd_gettime
+411 n32 timerfd_settime64 sys_timerfd_settime
+412 n32 utimensat_time64 sys_utimensat
+413 n32 pselect6_time64 compat_sys_pselect6_time64
+414 n32 ppoll_time64 compat_sys_ppoll_time64
+416 n32 io_pgetevents_time64 sys_io_pgetevents
+417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
+418 n32 mq_timedsend_time64 sys_mq_timedsend
+419 n32 mq_timedreceive_time64 sys_mq_timedreceive
+420 n32 semtimedop_time64 sys_semtimedop
+421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64
+422 n32 futex_time64 sys_futex
+423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index a8286ccbb66c..c85502e67b44 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -37,7 +37,7 @@
27 n64 madvise sys_madvise
28 n64 shmget sys_shmget
29 n64 shmat sys_shmat
-30 n64 shmctl sys_shmctl
+30 n64 shmctl sys_old_shmctl
31 n64 dup sys_dup
32 n64 dup2 sys_dup2
33 n64 pause sys_pause
@@ -71,12 +71,12 @@
61 n64 uname sys_newuname
62 n64 semget sys_semget
63 n64 semop sys_semop
-64 n64 semctl sys_semctl
+64 n64 semctl sys_old_semctl
65 n64 shmdt sys_shmdt
66 n64 msgget sys_msgget
67 n64 msgsnd sys_msgsnd
68 n64 msgrcv sys_msgrcv
-69 n64 msgctl sys_msgctl
+69 n64 msgctl sys_old_msgctl
70 n64 fcntl sys_fcntl
71 n64 flock sys_flock
72 n64 fsync sys_fsync
@@ -337,3 +337,4 @@
326 n64 statx sys_statx
327 n64 rseq sys_rseq
328 n64 io_pgetevents sys_io_pgetevents
+# 329 through 423 are reserved to sync up with other architectures
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 3d5a47b80d2b..2e063d0f837e 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -20,7 +20,7 @@
10 o32 unlink sys_unlink
11 o32 execve sys_execve compat_sys_execve
12 o32 chdir sys_chdir
-13 o32 time sys_time compat_sys_time
+13 o32 time sys_time32
14 o32 mknod sys_mknod
15 o32 chmod sys_chmod
16 o32 lchown sys_lchown
@@ -33,13 +33,13 @@
22 o32 umount sys_oldumount
23 o32 setuid sys_setuid
24 o32 getuid sys_getuid
-25 o32 stime sys_stime compat_sys_stime
+25 o32 stime sys_stime32
26 o32 ptrace sys_ptrace compat_sys_ptrace
27 o32 alarm sys_alarm
# 28 was sys_fstat
28 o32 unused28 sys_ni_syscall
29 o32 pause sys_pause
-30 o32 utime sys_utime compat_sys_utime
+30 o32 utime sys_utime32
31 o32 stty sys_ni_syscall
32 o32 gtty sys_ni_syscall
33 o32 access sys_access
@@ -135,7 +135,7 @@
121 o32 setdomainname sys_setdomainname
122 o32 uname sys_newuname
123 o32 modify_ldt sys_ni_syscall
-124 o32 adjtimex sys_adjtimex compat_sys_adjtimex
+124 o32 adjtimex sys_adjtimex_time32
125 o32 mprotect sys_mprotect
126 o32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
127 o32 create_module sys_ni_syscall
@@ -176,8 +176,8 @@
162 o32 sched_yield sys_sched_yield
163 o32 sched_get_priority_max sys_sched_get_priority_max
164 o32 sched_get_priority_min sys_sched_get_priority_min
-165 o32 sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
-166 o32 nanosleep sys_nanosleep compat_sys_nanosleep
+165 o32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+166 o32 nanosleep sys_nanosleep_time32
167 o32 mremap sys_mremap
168 o32 accept sys_accept
169 o32 bind sys_bind
@@ -208,7 +208,7 @@
194 o32 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
195 o32 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
196 o32 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-197 o32 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+197 o32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
198 o32 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
199 o32 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
200 o32 pread64 sys_pread64 sys_32_pread
@@ -249,12 +249,12 @@
235 o32 fremovexattr sys_fremovexattr
236 o32 tkill sys_tkill
237 o32 sendfile64 sys_sendfile64
-238 o32 futex sys_futex compat_sys_futex
+238 o32 futex sys_futex_time32
239 o32 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
240 o32 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
241 o32 io_setup sys_io_setup compat_sys_io_setup
242 o32 io_destroy sys_io_destroy
-243 o32 io_getevents sys_io_getevents compat_sys_io_getevents
+243 o32 io_getevents sys_io_getevents_time32
244 o32 io_submit sys_io_submit compat_sys_io_submit
245 o32 io_cancel sys_io_cancel
246 o32 exit_group sys_exit_group
@@ -269,23 +269,23 @@
255 o32 statfs64 sys_statfs64 compat_sys_statfs64
256 o32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
257 o32 timer_create sys_timer_create compat_sys_timer_create
-258 o32 timer_settime sys_timer_settime compat_sys_timer_settime
-259 o32 timer_gettime sys_timer_gettime compat_sys_timer_gettime
+258 o32 timer_settime sys_timer_settime32
+259 o32 timer_gettime sys_timer_gettime32
260 o32 timer_getoverrun sys_timer_getoverrun
261 o32 timer_delete sys_timer_delete
-262 o32 clock_settime sys_clock_settime compat_sys_clock_settime
-263 o32 clock_gettime sys_clock_gettime compat_sys_clock_gettime
-264 o32 clock_getres sys_clock_getres compat_sys_clock_getres
-265 o32 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+262 o32 clock_settime sys_clock_settime32
+263 o32 clock_gettime sys_clock_gettime32
+264 o32 clock_getres sys_clock_getres_time32
+265 o32 clock_nanosleep sys_clock_nanosleep_time32
266 o32 tgkill sys_tgkill
-267 o32 utimes sys_utimes compat_sys_utimes
+267 o32 utimes sys_utimes_time32
268 o32 mbind sys_mbind compat_sys_mbind
269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
271 o32 mq_open sys_mq_open compat_sys_mq_open
272 o32 mq_unlink sys_mq_unlink
-273 o32 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
-274 o32 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+273 o32 mq_timedsend sys_mq_timedsend_time32
+274 o32 mq_timedreceive sys_mq_timedreceive_time32
275 o32 mq_notify sys_mq_notify compat_sys_mq_notify
276 o32 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
277 o32 vserver sys_ni_syscall
@@ -303,7 +303,7 @@
289 o32 mkdirat sys_mkdirat
290 o32 mknodat sys_mknodat
291 o32 fchownat sys_fchownat
-292 o32 futimesat sys_futimesat compat_sys_futimesat
+292 o32 futimesat sys_futimesat_time32
293 o32 fstatat64 sys_fstatat64 sys_newfstatat
294 o32 unlinkat sys_unlinkat
295 o32 renameat sys_renameat
@@ -312,8 +312,8 @@
298 o32 readlinkat sys_readlinkat
299 o32 fchmodat sys_fchmodat
300 o32 faccessat sys_faccessat
-301 o32 pselect6 sys_pselect6 compat_sys_pselect6
-302 o32 ppoll sys_ppoll compat_sys_ppoll
+301 o32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+302 o32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
303 o32 unshare sys_unshare
304 o32 splice sys_splice
305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
@@ -327,14 +327,14 @@
313 o32 epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
314 o32 ioprio_set sys_ioprio_set
315 o32 ioprio_get sys_ioprio_get
-316 o32 utimensat sys_utimensat compat_sys_utimensat
+316 o32 utimensat sys_utimensat_time32
317 o32 signalfd sys_signalfd compat_sys_signalfd
318 o32 timerfd sys_ni_syscall
319 o32 eventfd sys_eventfd
320 o32 fallocate sys_fallocate sys32_fallocate
321 o32 timerfd_create sys_timerfd_create
-322 o32 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
-323 o32 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+322 o32 timerfd_gettime sys_timerfd_gettime32
+323 o32 timerfd_settime sys_timerfd_settime32
324 o32 signalfd4 sys_signalfd4 compat_sys_signalfd4
325 o32 eventfd2 sys_eventfd2
326 o32 epoll_create1 sys_epoll_create1
@@ -346,13 +346,13 @@
332 o32 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
333 o32 perf_event_open sys_perf_event_open
334 o32 accept4 sys_accept4
-335 o32 recvmmsg sys_recvmmsg compat_sys_recvmmsg
+335 o32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
336 o32 fanotify_init sys_fanotify_init
337 o32 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
338 o32 prlimit64 sys_prlimit64
339 o32 name_to_handle_at sys_name_to_handle_at
340 o32 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-341 o32 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+341 o32 clock_adjtime sys_clock_adjtime32
342 o32 syncfs sys_syncfs
343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg
344 o32 setns sys_setns
@@ -379,4 +379,35 @@
365 o32 pkey_free sys_pkey_free
366 o32 statx sys_statx
367 o32 rseq sys_rseq
-368 o32 io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+368 o32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+# room for arch specific calls
+393 o32 semget sys_semget
+394 o32 semctl sys_semctl compat_sys_semctl
+395 o32 shmget sys_shmget
+396 o32 shmctl sys_shmctl compat_sys_shmctl
+397 o32 shmat sys_shmat compat_sys_shmat
+398 o32 shmdt sys_shmdt
+399 o32 msgget sys_msgget
+400 o32 msgsnd sys_msgsnd compat_sys_msgsnd
+401 o32 msgrcv sys_msgrcv compat_sys_msgrcv
+402 o32 msgctl sys_msgctl compat_sys_msgctl
+403 o32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 o32 clock_settime64 sys_clock_settime sys_clock_settime
+405 o32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 o32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 o32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 o32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 o32 timer_settime64 sys_timer_settime sys_timer_settime
+410 o32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 o32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 o32 utimensat_time64 sys_utimensat sys_utimensat
+413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 o32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 o32 futex_time64 sys_futex sys_futex
+423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c91097f7b32f..42d411125690 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1077,7 +1077,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
seg = get_fs();
if (!user_mode(regs))
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
@@ -2223,7 +2223,9 @@ void per_cpu_trap_init(bool is_boot_cpu)
cp0_fdc_irq = -1;
}
- if (!cpu_data[cpu].asid_cache)
+ if (cpu_has_mmid)
+ cpu_data[cpu].asid_cache = 0;
+ else if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = asid_first_version(cpu);
mmgrab(&init_mm);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 595ca9c85111..76e33f940971 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -89,6 +89,7 @@
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
+#include <asm/mmu_context.h>
#include <linux/uaccess.h>
#define STR(x) __STR(x)
@@ -2374,18 +2375,10 @@ sigbus:
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
- struct dentry *d;
-
- if (!mips_debugfs_dir)
- return -ENODEV;
- d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
- mips_debugfs_dir, &unaligned_instructions);
- if (!d)
- return -ENOMEM;
- d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
- mips_debugfs_dir, &unaligned_action);
- if (!d)
- return -ENOMEM;
+ debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
+ &unaligned_instructions);
+ debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
+ mips_debugfs_dir, &unaligned_action);
return 0;
}
arch_initcall(debugfs_unaligned);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index ec9ed23bca7f..0074427b04fb 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1016,10 +1016,10 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
*/
preempt_disable();
cpu = smp_processor_id();
- get_new_mmu_context(kern_mm, cpu);
+ get_new_mmu_context(kern_mm);
for_each_possible_cpu(i)
if (i != cpu)
- cpu_context(i, kern_mm) = 0;
+ set_cpu_context(i, kern_mm, 0);
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0, entryhi);
@@ -1090,8 +1090,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
if (i == cpu)
continue;
if (user)
- cpu_context(i, user_mm) = 0;
- cpu_context(i, kern_mm) = 0;
+ set_cpu_context(i, user_mm, 0);
+ set_cpu_context(i, kern_mm, 0);
}
preempt_enable();
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 3734cd58895e..6d0517ac18e5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1723,6 +1723,11 @@ static int __init kvm_mips_init(void)
{
int ret;
+ if (cpu_has_mmid) {
+ pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
+ return -EOPNOTSUPP;
+ }
+
ret = kvm_mips_entry_setup();
if (ret)
return ret;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 6a0d7040d882..73daa6ad33af 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1056,11 +1056,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/
if (current->flags & PF_VCPU) {
mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
- if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu))
- get_new_mmu_context(mm, cpu);
- write_c0_entryhi(cpu_asid(cpu, mm));
- TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
+ check_switch_mmu_context(mm);
kvm_mips_suspend_mm(cpu);
ehb();
}
@@ -1074,11 +1070,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
if (current->flags & PF_VCPU) {
/* Restore normal Linux process memory map */
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu)))
- get_new_mmu_context(current->mm, cpu);
- write_c0_entryhi(cpu_asid(cpu, current->mm));
- TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
+ check_switch_mmu_context(current->mm);
kvm_mips_resume_mm(cpu);
ehb();
}
@@ -1106,14 +1098,14 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
for_each_possible_cpu(i) {
- cpu_context(i, kern_mm) = 0;
- cpu_context(i, user_mm) = 0;
+ set_cpu_context(i, kern_mm, 0);
+ set_cpu_context(i, user_mm, 0);
}
/* Generate new ASID for current mode */
if (reload_asid) {
mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
- get_new_mmu_context(mm, cpu);
+ get_new_mmu_context(mm);
htw_stop();
write_c0_entryhi(cpu_asid(cpu, mm));
TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
@@ -1219,7 +1211,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
if (gasid != vcpu->arch.last_user_gasid) {
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
for_each_possible_cpu(i)
- cpu_context(i, user_mm) = 0;
+ set_cpu_context(i, user_mm, 0);
vcpu->arch.last_user_gasid = gasid;
}
}
@@ -1228,9 +1220,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
* Check if ASID is stale. This may happen due to a TLB flush request or
* a lazy user MM invalidation.
*/
- if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu))
- get_new_mmu_context(mm, cpu);
+ check_mmu_context(mm);
}
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
@@ -1266,11 +1256,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
cpu = smp_processor_id();
/* Restore normal Linux process memory map */
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu)))
- get_new_mmu_context(current->mm, cpu);
- write_c0_entryhi(cpu_asid(cpu, current->mm));
- TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
+ check_switch_mmu_context(current->mm);
kvm_mips_resume_mm(cpu);
htw_start();
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 74805035edc8..dde20887a70d 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -2454,10 +2454,10 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
* Root ASID dealiases guest GPA mappings in the root TLB.
* Allocate new root ASID if needed.
*/
- if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
- || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu))
- get_new_mmu_context(gpa_mm, cpu);
+ if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
+ get_new_mmu_context(gpa_mm);
+ else
+ check_mmu_context(gpa_mm);
}
}
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index 188de95d6dbd..6c6802e482c9 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -52,8 +52,4 @@ config PCI_LANTIQ
bool "PCI Support"
depends on SOC_XWAY && PCI
-config XRX200_PHY_FW
- bool "XRX200 PHY firmware loader"
- depends on SOC_XWAY
-
endif
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f0bc3312ed11..6549499eb202 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
.irq_set_type = ltq_eiu_settype,
};
-static void ltq_hw_irqdispatch(int module)
+static void ltq_hw_irq_handler(struct irq_desc *desc)
{
+ int module = irq_desc_get_irq(desc) - 2;
u32 irq;
+ int hwirq;
irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
if (irq == 0)
@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
* other bits might be bogus
*/
irq = __fls(irq);
- do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
+ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
LTQ_EBU_PCC_ISTAT);
}
-#define DEFINE_HWx_IRQDISPATCH(x) \
- static void ltq_hw ## x ## _irqdispatch(void) \
- { \
- ltq_hw_irqdispatch(x); \
- }
-DEFINE_HWx_IRQDISPATCH(0)
-DEFINE_HWx_IRQDISPATCH(1)
-DEFINE_HWx_IRQDISPATCH(2)
-DEFINE_HWx_IRQDISPATCH(3)
-DEFINE_HWx_IRQDISPATCH(4)
-
-#if MIPS_CPU_TIMER_IRQ == 7
-static void ltq_hw5_irqdispatch(void)
-{
- do_IRQ(MIPS_CPU_TIMER_IRQ);
-}
-#else
-DEFINE_HWx_IRQDISPATCH(5)
-#endif
-
-static void ltq_hw_irq_handler(struct irq_desc *desc)
-{
- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
- int irq;
-
- if (!pending) {
- spurious_interrupt();
- return;
- }
-
- pending >>= CAUSEB_IP;
- while (pending) {
- irq = fls(pending) - 1;
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
- pending &= ~BIT(irq);
- }
-}
-
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct irq_chip *chip = &ltq_irq_type;
@@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
- if (cpu_has_vint) {
- pr_info("Setting up vectored interrupts\n");
- set_vi_handler(2, ltq_hw0_irqdispatch);
- set_vi_handler(3, ltq_hw1_irqdispatch);
- set_vi_handler(4, ltq_hw2_irqdispatch);
- set_vi_handler(5, ltq_hw3_irqdispatch);
- set_vi_handler(6, ltq_hw4_irqdispatch);
- set_vi_handler(7, ltq_hw5_irqdispatch);
- }
-
ltq_domain = irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
-#ifndef CONFIG_MIPS_MT_SMP
- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
- IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#else
- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
- IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#endif
-
/* tell oprofile which irq to use */
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
- /*
- * if the timer irq is not one of the mips irqs we need to
- * create a mapping
- */
- if (MIPS_CPU_TIMER_IRQ != 7)
- irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
-
/* the external interrupts are optional and xway only */
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
@@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
- return MIPS_CPU_TIMER_IRQ;
+ return CP0_LEGACY_COMPARE_IRQ;
}
static struct of_device_id __initdata of_irq_ids[] = {
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 982859f2b2a3..5e6a1a45cbd2 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -129,9 +129,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
unsigned long flags;
ch->desc = 0;
- ch->desc_base = dma_zalloc_coherent(ch->dev,
- LTQ_DESC_NUM * LTQ_DESC_SIZE,
- &ch->phys, GFP_ATOMIC);
+ ch->desc_base = dma_alloc_coherent(ch->dev,
+ LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ &ch->phys, GFP_ATOMIC);
spin_lock_irqsave(&ltq_dma_lock, flags);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 577ec81b557d..3deab9a77718 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev)
dma_addr_t dma;
cp1_base =
- (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
- &dma, GFP_ATOMIC));
+ (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
+ &dma, GFP_KERNEL));
gpio_count = of_gpio_count(pdev->dev.of_node);
while (gpio_count > 0) {
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 781ad96b78c4..83ed37298e66 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -10,6 +10,7 @@
#include <asm/hazards.h>
#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
@@ -73,12 +74,13 @@ static inline const char *msk2str(unsigned int mask)
static void dump_tlb(int first, int last)
{
- unsigned long s_entryhi, entryhi, asid;
+ unsigned long s_entryhi, entryhi, asid, mmid;
unsigned long long entrylo0, entrylo1, pa;
unsigned int s_index, s_pagemask, s_guestctl1 = 0;
unsigned int pagemask, guestctl1 = 0, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
+ unsigned long uninitialized_var(s_mmid);
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
@@ -92,7 +94,12 @@ static void dump_tlb(int first, int last)
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
- asid = s_entryhi & asidmask;
+
+ if (cpu_has_mmid)
+ asid = s_mmid = read_c0_memorymapid();
+ else
+ asid = s_entryhi & asidmask;
+
if (cpu_has_guestid)
s_guestctl1 = read_c0_guestctl1();
@@ -105,6 +112,12 @@ static void dump_tlb(int first, int last)
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
+
+ if (cpu_has_mmid)
+ mmid = read_c0_memorymapid();
+ else
+ mmid = entryhi & asidmask;
+
if (cpu_has_guestid)
guestctl1 = read_c0_guestctl1();
@@ -124,8 +137,7 @@ static void dump_tlb(int first, int last)
* leave only a single G bit set after a machine check exception
* due to duplicate TLB entry.
*/
- if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
- (entryhi & asidmask) != asid)
+ if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
continue;
/*
@@ -138,7 +150,7 @@ static void dump_tlb(int first, int last)
pr_cont("va=%0*lx asid=%0*lx",
vwidth, (entryhi & ~0x1fffUL),
- asidwidth, entryhi & asidmask);
+ asidwidth, mmid);
if (cpu_has_guestid)
pr_cont(" gid=%02lx",
(guestctl1 & MIPS_GCTL1_RID)
diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig
index 462b126f45aa..6dacc1438906 100644
--- a/arch/mips/loongson32/Kconfig
+++ b/arch/mips/loongson32/Kconfig
@@ -15,7 +15,6 @@ config LOONGSON1_LS1B
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select SYS_SUPPORTS_MIPS16
select SYS_HAS_EARLY_PRINTK
select USE_GENERIC_EARLY_PRINTK_8250
select COMMON_CLK
@@ -31,7 +30,6 @@ config LOONGSON1_LS1C
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
- select SYS_SUPPORTS_MIPS16
select SYS_HAS_EARLY_PRINTK
select USE_GENERIC_EARLY_PRINTK_8250
select COMMON_CLK
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
index a0dbb3b2f2de..333215593092 100644
--- a/arch/mips/loongson32/Platform
+++ b/arch/mips/loongson32/Platform
@@ -1,4 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32 -Wa,--trap
+cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32r2 -Wa,--trap
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
-load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80100000
+load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80200000
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index ac584c5823d0..0bf355c8bcb2 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -81,42 +81,6 @@ struct platform_device ls1x_cpufreq_pdev = {
},
};
-/* DMA */
-static struct resource ls1x_dma_resources[] = {
- [0] = {
- .start = LS1X_DMAC_BASE,
- .end = LS1X_DMAC_BASE + SZ_4 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = LS1X_DMA0_IRQ,
- .end = LS1X_DMA0_IRQ,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = LS1X_DMA1_IRQ,
- .end = LS1X_DMA1_IRQ,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = LS1X_DMA2_IRQ,
- .end = LS1X_DMA2_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device ls1x_dma_pdev = {
- .name = "ls1x-dma",
- .id = -1,
- .num_resources = ARRAY_SIZE(ls1x_dma_resources),
- .resource = ls1x_dma_resources,
-};
-
-void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata)
-{
- ls1x_dma_pdev.dev.platform_data = pdata;
-}
-
/* Synopsys Ethernet GMAC */
static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
.phy_mask = 0,
@@ -291,33 +255,6 @@ struct platform_device ls1x_gpio1_pdev = {
.resource = ls1x_gpio1_resources,
};
-/* NAND Flash */
-static struct resource ls1x_nand_resources[] = {
- [0] = {
- .start = LS1X_NAND_BASE,
- .end = LS1X_NAND_BASE + SZ_32 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- /* DMA channel 0 is dedicated to NAND */
- .start = LS1X_DMA_CHANNEL0,
- .end = LS1X_DMA_CHANNEL0,
- .flags = IORESOURCE_DMA,
- },
-};
-
-struct platform_device ls1x_nand_pdev = {
- .name = "ls1x-nand",
- .id = -1,
- .num_resources = ARRAY_SIZE(ls1x_nand_resources),
- .resource = ls1x_nand_resources,
-};
-
-void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata)
-{
- ls1x_nand_pdev.dev.platform_data = pdata;
-}
-
/* USB EHCI */
static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c
index 01aceaace314..447b15fc0a2b 100644
--- a/arch/mips/loongson32/ls1b/board.c
+++ b/arch/mips/loongson32/ls1b/board.c
@@ -16,30 +16,6 @@
#include <nand.h>
#include <platform.h>
-struct plat_ls1x_dma ls1x_dma_pdata = {
- .nr_channels = 3,
-};
-
-static struct mtd_partition ls1x_nand_parts[] = {
- {
- .name = "kernel",
- .offset = 0,
- .size = SZ_16M,
- },
- {
- .name = "rootfs",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-struct plat_ls1x_nand ls1x_nand_pdata = {
- .parts = ls1x_nand_parts,
- .nr_parts = ARRAY_SIZE(ls1x_nand_parts),
- .hold_cycle = 0x2,
- .wait_cycle = 0xc,
-};
-
static const struct gpio_led ls1x_gpio_leds[] __initconst = {
{
.name = "LED9",
@@ -64,13 +40,11 @@ static const struct gpio_led_platform_data ls1x_led_pdata __initconst = {
static struct platform_device *ls1b_platform_devices[] __initdata = {
&ls1x_uart_pdev,
&ls1x_cpufreq_pdev,
- &ls1x_dma_pdev,
&ls1x_eth0_pdev,
&ls1x_eth1_pdev,
&ls1x_ehci_pdev,
&ls1x_gpio0_pdev,
&ls1x_gpio1_pdev,
- &ls1x_nand_pdev,
&ls1x_rtc_pdev,
&ls1x_wdt_pdev,
};
@@ -78,8 +52,6 @@ static struct platform_device *ls1b_platform_devices[] __initdata = {
static int __init ls1b_platform_init(void)
{
ls1x_serial_set_uartclk(&ls1x_uart_pdev);
- ls1x_dma_set_platdata(&ls1x_dma_pdata);
- ls1x_nand_set_platdata(&ls1x_nand_pdata);
gpio_led_register_device(-1, &ls1x_led_pdata);
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 0fce4608aa88..c1a4d4dc4665 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
endif
cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
+
+#
+# Some versions of binutils, not currently mainline as of 2019/02/04, support
+# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
+# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a
+# description).
+#
+# We disable this in order to prevent the assembler meddling with the
+# instruction that labels refer to, ie. if we label an ll instruction:
+#
+# 1: ll v0, 0(a0)
+#
+# ...then with the assembler fix applied the label may actually point at a sync
+# instruction inserted by the assembler, and if we were using the label in an
+# exception table the table would no longer contain the address of the ll
+# instruction.
+#
+# Avoid this by explicitly disabling that assembler behaviour. If upstream
+# binutils does not merge support for the flag then we can revisit & remove
+# this later - for now it ensures vendor toolchains don't cause problems.
+#
+cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+
#
# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
# as MIPS64 R2; older versions as just R1. This leaves the possibility open
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
index a60715e11306..b26892ce871c 100644
--- a/arch/mips/loongson64/common/reset.c
+++ b/arch/mips/loongson64/common/reset.c
@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
{
#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
mach_prepare_shutdown();
- unreachable();
+
+ /*
+ * It needs a wait loop here, but mips/kernel/reset.c already calls
+ * a generic delay loop, machine_hang(), so simply return.
+ */
+ return;
#else
void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
index 58798f527356..387724860fa6 100644
--- a/arch/mips/math-emu/me-debugfs.c
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -189,32 +189,21 @@ static int __init debugfs_fpuemu(void)
{
struct dentry *fpuemu_debugfs_base_dir;
struct dentry *fpuemu_debugfs_inst_dir;
- struct dentry *d, *reset_file;
-
- if (!mips_debugfs_dir)
- return -ENODEV;
fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats",
mips_debugfs_dir);
- if (!fpuemu_debugfs_base_dir)
- return -ENOMEM;
- reset_file = debugfs_create_file("fpuemustats_clear", 0444,
- mips_debugfs_dir, NULL,
- &fpuemustats_clear_fops);
- if (!reset_file)
- return -ENOMEM;
+ debugfs_create_file("fpuemustats_clear", 0444, mips_debugfs_dir, NULL,
+ &fpuemustats_clear_fops);
#define FPU_EMU_STAT_OFFSET(m) \
offsetof(struct mips_fpu_emulator_stats, m)
#define FPU_STAT_CREATE(m) \
do { \
- d = debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \
+ debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \
(void *)FPU_EMU_STAT_OFFSET(m), \
&fops_fpuemu_stat); \
- if (!d) \
- return -ENOMEM; \
} while (0)
FPU_STAT_CREATE(emulated);
@@ -233,8 +222,6 @@ do { \
fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions",
fpuemu_debugfs_base_dir);
- if (!fpuemu_debugfs_inst_dir)
- return -ENOMEM;
#define FPU_STAT_CREATE_EX(m) \
do { \
@@ -242,11 +229,9 @@ do { \
\
adjust_instruction_counter_name(name, #m); \
\
- d = debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \
+ debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \
(void *)FPU_EMU_STAT_OFFSET(m), \
&fops_fpuemu_stat); \
- if (!d) \
- return -ENOMEM; \
} while (0)
FPU_STAT_CREATE_EX(abs_s);
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 3e5bb203c95a..f34d7ff5eb60 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,9 +3,19 @@
# Makefile for the Linux/MIPS-specific parts of the memory manager.
#
-obj-y += cache.o extable.o fault.o \
- gup.o init.o mmap.o page.o page-funcs.o \
- pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
+obj-y += cache.o
+obj-y += context.o
+obj-y += extable.o
+obj-y += fault.o
+obj-y += gup.o
+obj-y += init.o
+obj-y += mmap.o
+obj-y += page.o
+obj-y += page-funcs.o
+obj-y += pgtable.o
+obj-y += tlbex.o
+obj-y += tlbex-fault.o
+obj-y += tlb-funcs.o
ifdef CONFIG_CPU_MICROMIPS
obj-y += uasm-micromips.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 0e45b061e514..8064821e9805 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -128,23 +128,6 @@ static void octeon_flush_icache_range(unsigned long start, unsigned long end)
/**
- * Flush the icache for a trampoline. These are used for interrupt
- * and exception hooking.
- *
- * @addr: Address to flush
- */
-static void octeon_flush_cache_sigtramp(unsigned long addr)
-{
- struct vm_area_struct *vma;
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, addr);
- octeon_flush_icache_all_cores(vma);
- up_read(&current->mm->mmap_sem);
-}
-
-
-/**
* Flush a range out of a vma
*
* @vma: VMA to flush
@@ -289,7 +272,6 @@ void octeon_cache_init(void)
flush_cache_mm = octeon_flush_cache_mm;
flush_cache_page = octeon_flush_cache_page;
flush_cache_range = octeon_flush_cache_range;
- flush_cache_sigtramp = octeon_flush_cache_sigtramp;
flush_icache_all = octeon_flush_icache_all;
flush_data_cache_page = octeon_flush_data_cache_page;
flush_icache_range = octeon_flush_icache_range;
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 01848cdf2074..0ca401ddf3b7 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -274,30 +274,6 @@ static void r3k_flush_data_cache_page(unsigned long addr)
{
}
-static void r3k_flush_cache_sigtramp(unsigned long addr)
-{
- unsigned long flags;
-
- pr_debug("csigtramp[%08lx]\n", addr);
-
- flags = read_c0_status();
-
- write_c0_status(flags&~ST0_IEC);
-
- /* Fill the TLB to avoid an exception with caches isolated. */
- asm( "lw\t$0, 0x000(%0)\n\t"
- "lw\t$0, 0x004(%0)\n\t"
- : : "r" (addr) );
-
- write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
-
- asm( "sb\t$0, 0x000(%0)\n\t"
- "sb\t$0, 0x004(%0)\n\t"
- : : "r" (addr) );
-
- write_c0_status(flags);
-}
-
static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
@@ -331,7 +307,6 @@ void r3k_cache_init(void)
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
- flush_cache_sigtramp = r3k_flush_cache_sigtramp;
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
flush_data_cache_page = r3k_flush_data_cache_page;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index d0b64df51eb2..5166e38cd1c6 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -540,6 +540,9 @@ static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
unsigned int i;
const cpumask_t *mask = cpu_present_mask;
+ if (cpu_has_mmid)
+ return cpu_context(0, mm) != 0;
+
/* cpu_sibling_map[] undeclared when !CONFIG_SMP */
#ifdef CONFIG_SMP
/*
@@ -697,10 +700,7 @@ static inline void local_r4k_flush_cache_page(void *args)
}
if (exec) {
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm, cpu);
+ drop_mmu_context(mm);
} else
vaddr ? r4k_blast_icache_page(addr) :
r4k_blast_icache_user_page(addr);
@@ -937,119 +937,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
}
#endif /* CONFIG_DMA_NONCOHERENT */
-struct flush_cache_sigtramp_args {
- struct mm_struct *mm;
- struct page *page;
- unsigned long addr;
-};
-
-/*
- * While we're protected against bad userland addresses we don't care
- * very much about what happens in that case. Usually a segmentation
- * fault will dump the process later on anyway ...
- */
-static void local_r4k_flush_cache_sigtramp(void *args)
-{
- struct flush_cache_sigtramp_args *fcs_args = args;
- unsigned long addr = fcs_args->addr;
- struct page *page = fcs_args->page;
- struct mm_struct *mm = fcs_args->mm;
- int map_coherent = 0;
- void *vaddr;
-
- unsigned long ic_lsize = cpu_icache_line_size();
- unsigned long dc_lsize = cpu_dcache_line_size();
- unsigned long sc_lsize = cpu_scache_line_size();
-
- /*
- * If owns no valid ASID yet, cannot possibly have gotten
- * this page into the cache.
- */
- if (!has_valid_asid(mm, R4K_HIT))
- return;
-
- if (mm == current->active_mm) {
- vaddr = NULL;
- } else {
- /*
- * Use kmap_coherent or kmap_atomic to do flushes for
- * another ASID than the current one.
- */
- map_coherent = (cpu_has_dc_aliases &&
- page_mapcount(page) &&
- !Page_dcache_dirty(page));
- if (map_coherent)
- vaddr = kmap_coherent(page, addr);
- else
- vaddr = kmap_atomic(page);
- addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
- }
-
- R4600_HIT_CACHEOP_WAR_IMPL;
- if (!cpu_has_ic_fills_f_dc) {
- if (dc_lsize)
- vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
- : protected_writeback_dcache_line(
- addr & ~(dc_lsize - 1));
- if (!cpu_icache_snoops_remote_store && scache_size)
- vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
- : protected_writeback_scache_line(
- addr & ~(sc_lsize - 1));
- }
- if (ic_lsize)
- vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
- : protected_flush_icache_line(addr & ~(ic_lsize - 1));
-
- if (vaddr) {
- if (map_coherent)
- kunmap_coherent();
- else
- kunmap_atomic(vaddr);
- }
-
- if (MIPS4K_ICACHE_REFILL_WAR) {
- __asm__ __volatile__ (
- ".set push\n\t"
- ".set noat\n\t"
- ".set "MIPS_ISA_LEVEL"\n\t"
-#ifdef CONFIG_32BIT
- "la $at,1f\n\t"
-#endif
-#ifdef CONFIG_64BIT
- "dla $at,1f\n\t"
-#endif
- "cache %0,($at)\n\t"
- "nop; nop; nop\n"
- "1:\n\t"
- ".set pop"
- :
- : "i" (Hit_Invalidate_I));
- }
- if (MIPS_CACHE_SYNC_WAR)
- __asm__ __volatile__ ("sync");
-}
-
-static void r4k_flush_cache_sigtramp(unsigned long addr)
-{
- struct flush_cache_sigtramp_args args;
- int npages;
-
- down_read(&current->mm->mmap_sem);
-
- npages = get_user_pages_fast(addr, 1, 0, &args.page);
- if (npages < 1)
- goto out;
-
- args.mm = current->mm;
- args.addr = addr;
-
- r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
-
- put_page(args.page);
-out:
- up_read(&current->mm->mmap_sem);
-}
-
static void r4k_flush_icache_all(void)
{
if (cpu_has_vtag_icache)
@@ -1978,7 +1865,6 @@ void r4k_cache_init(void)
__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
- flush_cache_sigtramp = r4k_flush_cache_sigtramp;
flush_icache_all = r4k_flush_icache_all;
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page;
@@ -2033,7 +1919,6 @@ void r4k_cache_init(void)
/* I$ fills from D$ just by emptying the write buffers */
flush_cache_page = (void *)b5k_instruction_hazard;
flush_cache_range = (void *)b5k_instruction_hazard;
- flush_cache_sigtramp = (void *)b5k_instruction_hazard;
local_flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_icache_range = (void *)b5k_instruction_hazard;
@@ -2052,7 +1937,6 @@ void r4k_cache_init(void)
flush_cache_mm = (void *)cache_noop;
flush_cache_page = (void *)cache_noop;
flush_cache_range = (void *)cache_noop;
- flush_cache_sigtramp = (void *)cache_noop;
flush_icache_all = (void *)cache_noop;
flush_data_cache_page = (void *)cache_noop;
local_flush_data_cache_page = (void *)cache_noop;
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 5f6c099a9457..b7c8a9d79c35 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -290,25 +290,6 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
}
}
-static void tx39_flush_cache_sigtramp(unsigned long addr)
-{
- unsigned long ic_lsize = current_cpu_data.icache.linesz;
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long config;
- unsigned long flags;
-
- protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
-
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- protected_flush_icache_line(addr & ~(ic_lsize - 1));
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
static __init void tx39_probe_cache(void)
{
unsigned long config;
@@ -368,7 +349,6 @@ void tx39_cache_init(void)
flush_icache_range = (void *) tx39h_flush_icache_all;
local_flush_icache_range = (void *) tx39h_flush_icache_all;
- flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
flush_data_cache_page = (void *) tx39h_flush_icache_all;
@@ -397,7 +377,6 @@ void tx39_cache_init(void)
__flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
- flush_cache_sigtramp = tx39_flush_cache_sigtramp;
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
flush_data_cache_page = tx39_flush_data_cache_page;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 55099fbff4e6..3da216988672 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -47,7 +47,6 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
/* MIPS specific cache operations */
-void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);
diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c
new file mode 100644
index 000000000000..b25564090939
--- /dev/null
+++ b/arch/mips/mm/context.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/atomic.h>
+#include <linux/mmu_context.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
+
+static atomic64_t mmid_version;
+static unsigned int num_mmids;
+static unsigned long *mmid_map;
+
+static DEFINE_PER_CPU(u64, reserved_mmids);
+static cpumask_t tlb_flush_pending;
+
+static bool asid_versions_eq(int cpu, u64 a, u64 b)
+{
+ return ((a ^ b) & asid_version_mask(cpu)) == 0;
+}
+
+void get_new_mmu_context(struct mm_struct *mm)
+{
+ unsigned int cpu;
+ u64 asid;
+
+ /*
+ * This function is specific to ASIDs, and should not be called when
+ * MMIDs are in use.
+ */
+ if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
+ return;
+
+ cpu = smp_processor_id();
+ asid = asid_cache(cpu);
+
+ if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+ local_flush_tlb_all(); /* start new asid cycle */
+ }
+
+ set_cpu_context(cpu, mm, asid);
+ asid_cache(cpu) = asid;
+}
+EXPORT_SYMBOL_GPL(get_new_mmu_context);
+
+void check_mmu_context(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * This function is specific to ASIDs, and should not be called when
+ * MMIDs are in use.
+ */
+ if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
+ return;
+
+ /* Check if our ASID is of an older version and thus invalid */
+ if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
+ get_new_mmu_context(mm);
+}
+EXPORT_SYMBOL_GPL(check_mmu_context);
+
+static void flush_context(void)
+{
+ u64 mmid;
+ int cpu;
+
+ /* Update the list of reserved MMIDs and the MMID bitmap */
+ bitmap_clear(mmid_map, 0, num_mmids);
+
+ /* Reserve an MMID for kmap/wired entries */
+ __set_bit(MMID_KERNEL_WIRED, mmid_map);
+
+ for_each_possible_cpu(cpu) {
+ mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
+
+ /*
+ * If this CPU has already been through a
+ * rollover, but hasn't run another task in
+ * the meantime, we must preserve its reserved
+ * MMID, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (mmid == 0)
+ mmid = per_cpu(reserved_mmids, cpu);
+
+ __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
+ per_cpu(reserved_mmids, cpu) = mmid;
+ }
+
+ /*
+ * Queue a TLB invalidation for each CPU to perform on next
+ * context-switch
+ */
+ cpumask_setall(&tlb_flush_pending);
+}
+
+static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
+{
+ bool hit;
+ int cpu;
+
+ /*
+ * Iterate over the set of reserved MMIDs looking for a match.
+ * If we find one, then we can update our mm to use newmmid
+ * (i.e. the same MMID in the current generation) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old MMID are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved MMID in a future
+ * generation.
+ */
+ hit = false;
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(reserved_mmids, cpu) == mmid) {
+ hit = true;
+ per_cpu(reserved_mmids, cpu) = newmmid;
+ }
+ }
+
+ return hit;
+}
+
+static u64 get_new_mmid(struct mm_struct *mm)
+{
+ static u32 cur_idx = MMID_KERNEL_WIRED + 1;
+ u64 mmid, version, mmid_mask;
+
+ mmid = cpu_context(0, mm);
+ version = atomic64_read(&mmid_version);
+ mmid_mask = cpu_asid_mask(&boot_cpu_data);
+
+ if (!asid_versions_eq(0, mmid, 0)) {
+ u64 newmmid = version | (mmid & mmid_mask);
+
+ /*
+ * If our current MMID was active during a rollover, we
+ * can continue to use it and this was just a false alarm.
+ */
+ if (check_update_reserved_mmid(mmid, newmmid)) {
+ mmid = newmmid;
+ goto set_context;
+ }
+
+ /*
+ * We had a valid MMID in a previous life, so try to re-use
+ * it if possible.
+ */
+ if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
+ mmid = newmmid;
+ goto set_context;
+ }
+ }
+
+ /* Allocate a free MMID */
+ mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
+ if (mmid != num_mmids)
+ goto reserve_mmid;
+
+ /* We're out of MMIDs, so increment the global version */
+ version = atomic64_add_return_relaxed(asid_first_version(0),
+ &mmid_version);
+
+ /* Note currently active MMIDs & mark TLBs as requiring flushes */
+ flush_context();
+
+ /* We have more MMIDs than CPUs, so this will always succeed */
+ mmid = find_first_zero_bit(mmid_map, num_mmids);
+
+reserve_mmid:
+ __set_bit(mmid, mmid_map);
+ cur_idx = mmid;
+ mmid |= version;
+set_context:
+ set_cpu_context(0, mm, mmid);
+ return mmid;
+}
+
+void check_switch_mmu_context(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+ u64 ctx, old_active_mmid;
+ unsigned long flags;
+
+ if (!cpu_has_mmid) {
+ check_mmu_context(mm);
+ write_c0_entryhi(cpu_asid(cpu, mm));
+ goto setup_pgd;
+ }
+
+ /*
+ * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
+ * unnecessary.
+ *
+ * The memory ordering here is subtle. If our active_mmids is non-zero
+ * and the MMID matches the current version, then we update the CPU's
+ * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
+ * means that either:
+ *
+ * - We get a zero back from the cmpxchg and end up waiting on
+ * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
+ * with the rollover and so we are forced to see the updated
+ * generation.
+ *
+ * - We get a valid MMID back from the cmpxchg, which means the
+ * relaxed xchg in flush_context will treat us as reserved
+ * because atomic RmWs are totally ordered for a given location.
+ */
+ ctx = cpu_context(cpu, mm);
+ old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
+ if (!old_active_mmid ||
+ !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
+ !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
+ raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
+
+ ctx = cpu_context(cpu, mm);
+ if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
+ ctx = get_new_mmid(mm);
+
+ WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
+ raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
+ }
+
+ /*
+ * Invalidate the local TLB if needed. Note that we must only clear our
+ * bit in tlb_flush_pending after this is complete, so that the
+ * cpu_has_shared_ftlb_entries case below isn't misled.
+ */
+ if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+ local_flush_tlb_all();
+ cpumask_clear_cpu(cpu, &tlb_flush_pending);
+ }
+
+ write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
+
+ /*
+ * If this CPU shares FTLB entries with its siblings and one or more of
+ * those siblings hasn't yet invalidated its TLB following a version
+ * increase then we need to invalidate any TLB entries for our MMID
+ * that we might otherwise pick up from a sibling.
+ *
+ * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
+ * CONFIG_SMP=n kernels.
+ */
+#ifdef CONFIG_SMP
+ if (cpu_has_shared_ftlb_entries &&
+ cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
+ /* Ensure we operate on the new MMID */
+ mtc0_tlbw_hazard();
+
+ /*
+ * Invalidate all TLB entries associated with the new
+ * MMID, and wait for the invalidation to complete.
+ */
+ ginvt_mmid();
+ sync_ginv();
+ }
+#endif
+
+setup_pgd:
+ TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
+}
+EXPORT_SYMBOL_GPL(check_switch_mmu_context);
+
+static int mmid_init(void)
+{
+ if (!cpu_has_mmid)
+ return 0;
+
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more MMID than CPUs.
+ */
+ num_mmids = asid_first_version(0);
+ WARN_ON(num_mmids <= num_possible_cpus());
+
+ atomic64_set(&mmid_version, asid_first_version(0));
+ mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
+ GFP_KERNEL);
+ if (!mmid_map)
+ panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
+
+ /* Reserve an MMID for kmap/wired entries */
+ __set_bit(MMID_KERNEL_WIRED, mmid_map);
+
+ pr_info("MMID allocator initialised with %u entries\n", num_mmids);
+ return 0;
+}
+early_initcall(mmid_init);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index cb38461391cb..b57465733e87 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -120,13 +120,8 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
if (PageHighMem(page)) {
void *addr;
- if (offset + len > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
- }
+ if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
- }
addr = kmap_atomic(page);
dma_sync_virt(addr + offset, len, dir);
@@ -145,12 +140,14 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
dma_sync_phys(paddr, size, dir);
}
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
if (cpu_needs_post_dma_flush(dev))
dma_sync_phys(paddr, size, dir);
}
+#endif
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index b521d8e2d359..c3b45e248806 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -84,6 +84,7 @@ void setup_zero_pages(void)
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
+ unsigned int uninitialized_var(old_mmid);
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
@@ -110,6 +111,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
+ }
#ifdef CONFIG_XPA
if (cpu_has_xpa) {
entrylo = (pte.pte_low & _PFNX_MASK);
@@ -124,6 +129,8 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
local_irq_restore(flags);
return (void*) vaddr;
diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c
index 2a116084216f..9507421de335 100644
--- a/arch/mips/mm/sc-debugfs.c
+++ b/arch/mips/mm/sc-debugfs.c
@@ -55,20 +55,11 @@ static const struct file_operations sc_prefetch_fops = {
static int __init sc_debugfs_init(void)
{
- struct dentry *dir, *file;
-
- if (!mips_debugfs_dir)
- return -ENODEV;
+ struct dentry *dir;
dir = debugfs_create_dir("l2cache", mips_debugfs_dir);
- if (IS_ERR(dir))
- return PTR_ERR(dir);
-
- file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir,
- NULL, &sc_prefetch_fops);
- if (!file)
- return -ENOMEM;
-
+ debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, NULL,
+ &sc_prefetch_fops);
return 0;
}
late_initcall(sc_debugfs_init);
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 6f589e0112ce..50f207591b6d 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -67,18 +67,6 @@ void local_flush_tlb_all(void)
local_irq_restore(flags);
}
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
-#ifdef DEBUG_TLB
- printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
-#endif
- drop_mmu_context(mm, cpu);
- }
-}
-
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@@ -117,7 +105,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
write_c0_entryhi(oldpid);
} else {
- drop_mmu_context(mm, cpu);
+ drop_mmu_context(mm);
}
local_irq_restore(flags);
}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 0596505770db..c13e46ced425 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -104,23 +104,6 @@ void local_flush_tlb_all(void)
}
EXPORT_SYMBOL(local_flush_tlb_all);
-/* All entries common to a mm share an asid. To effectively flush
- these entries, we just bump the asid. */
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu;
-
- preempt_disable();
-
- cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
- drop_mmu_context(mm, cpu);
- }
-
- preempt_enable();
-}
-
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@@ -137,14 +120,23 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
- int oldpid = read_c0_entryhi();
+ unsigned long old_entryhi, uninitialized_var(old_mmid);
int newpid = cpu_asid(cpu, mm);
+ old_entryhi = read_c0_entryhi();
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(newpid);
+ }
+
htw_stop();
while (start < end) {
int idx;
- write_c0_entryhi(start | newpid);
+ if (cpu_has_mmid)
+ write_c0_entryhi(start);
+ else
+ write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
@@ -160,10 +152,12 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
tlb_write_indexed();
}
tlbw_use_hazard();
- write_c0_entryhi(oldpid);
+ write_c0_entryhi(old_entryhi);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
htw_start();
} else {
- drop_mmu_context(mm, cpu);
+ drop_mmu_context(mm);
}
flush_micro_tlb();
local_irq_restore(flags);
@@ -220,15 +214,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
- unsigned long flags;
- int oldpid, newpid, idx;
+ unsigned long uninitialized_var(old_mmid);
+ unsigned long flags, old_entryhi;
+ int idx;
- newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1);
local_irq_save(flags);
- oldpid = read_c0_entryhi();
+ old_entryhi = read_c0_entryhi();
htw_stop();
- write_c0_entryhi(page | newpid);
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_entryhi(page);
+ write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
+ } else {
+ write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
+ }
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
@@ -244,7 +244,9 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
tlbw_use_hazard();
finish:
- write_c0_entryhi(oldpid);
+ write_c0_entryhi(old_entryhi);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
@@ -307,9 +309,13 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_save(flags);
htw_stop();
- pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
address &= (PAGE_MASK << 1);
- write_c0_entryhi(address | pid);
+ if (cpu_has_mmid) {
+ write_c0_entryhi(address);
+ } else {
+ pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
+ write_c0_entryhi(address | pid);
+ }
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
@@ -375,12 +381,17 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
+ unsigned int uninitialized_var(old_mmid);
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
+ }
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
@@ -398,6 +409,8 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
tlbw_use_hazard(); /* What is the hazard here? */
htw_start();
write_c0_pagemask(old_pagemask);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index e86e2e55ad3e..c1e9e144007e 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -50,14 +50,6 @@ void local_flush_tlb_all(void)
local_irq_restore(flags);
}
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm, cpu);
-}
-
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@@ -75,7 +67,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
local_irq_save(flags);
if (size > TFP_TLB_SIZE / 2) {
- drop_mmu_context(mm, cpu);
+ drop_mmu_context(mm);
goto out_restore;
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 37b1cb246332..65b6e85447b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* to mimic that here by taking a load/istream page
* fault.
*/
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(p, 0);
UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
uasm_i_jr(p, ptr);
@@ -1646,6 +1648,8 @@ static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(p, 0);
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
uasm_i_lld(p, pte, 0, ptr);
@@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void)
#endif
uasm_l_nopage_tlbl(&l, p);
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) {
@@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void)
#endif
uasm_l_nopage_tlbs(&l, p);
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
@@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void)
#endif
uasm_l_nopage_tlbm(&l, p);
+ if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+ uasm_i_sync(&p, 0);
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index b16710a8a9e7..0effd3cba9a7 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -79,8 +79,6 @@ enum reg_val_type {
REG_64BIT_32BIT,
/* 32-bit compatible, need truncation for 64-bit ops. */
REG_32BIT,
- /* 32-bit zero extended. */
- REG_32BIT_ZERO_EX,
/* 32-bit no sign/zero extension needed. */
REG_32BIT_POS
};
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
const struct bpf_prog *prog = ctx->skf;
int stack_adjust = ctx->stack_size;
int store_offset = stack_adjust - 8;
+ enum reg_val_type td;
int r0 = MIPS_R_V0;
- if (dest_reg == MIPS_R_RA &&
- get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+ if (dest_reg == MIPS_R_RA) {
/* Don't let zero extended value escape. */
- emit_instr(ctx, sll, r0, r0, 0);
+ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+ if (td == REG_64BIT)
+ emit_instr(ctx, sll, r0, r0, 0);
+ }
if (ctx->flags & EBPF_SAVE_RA) {
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0)
return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ if (td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0)
return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ if (td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0)
return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+ if (td == REG_64BIT)
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
if (insn->imm == 1) {
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (src < 0 || dst < 0)
return -EINVAL;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ if (td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
did_move = false;
ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
- if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+ if (ts == REG_64BIT) {
int tmp_reg = MIPS_R_AT;
if (bpf_op == BPF_MOV) {
@@ -1254,8 +1255,7 @@ jeq_common:
if (insn->imm == 64 && td == REG_32BIT)
emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
- if (insn->imm != 64 &&
- (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+ if (insn->imm != 64 && td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
@@ -1819,7 +1819,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* Update the icache */
flush_icache_range((unsigned long)ctx.target,
- (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+ (unsigned long)&ctx.target[ctx.idx]);
if (bpf_jit_enable > 1)
/* Dump JIT code */
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 8185a2bfaf09..c4f976593061 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o
#
# These are still pretty much in the old state, watch, go blind.
#
+obj-$(CONFIG_ATH79) += fixup-ath79.o
obj-$(CONFIG_LASAT) += pci-lasat.o
obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o
obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
diff --git a/arch/mips/pci/fixup-ath79.c b/arch/mips/pci/fixup-ath79.c
new file mode 100644
index 000000000000..9e651a4af05e
--- /dev/null
+++ b/arch/mips/pci/fixup-ath79.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2018 John Crispin <john@phrozen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+//#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb849b10e..288b58b00dc8 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq;
struct irq_chip *msi;
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+ return 0;
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index a1d2c4ae0d1b..df95b0da08f2 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -44,7 +44,7 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- bridge_t *bridge = bc->base;
+ struct bridge_regs *bridge = bc->base;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
volatile void *addr;
@@ -56,11 +56,11 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
+ * IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto oh_my_gawd;
+ goto is_ioc3;
addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
@@ -73,21 +73,16 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-oh_my_gawd:
+is_ioc3:
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
- * generic PCI code a chance to look at the wrong register.
+ * IOC3 special handling
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
*value = emulate_ioc3_cfg(where, size);
return PCIBIOS_SUCCESSFUL;
}
- /*
- * IOC3 is fucking fucked beyond belief ... Don't try to access
- * anything but 32-bit words ...
- */
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
if (get_dbe(cf, (u32 *) addr))
@@ -104,7 +99,7 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- bridge_t *bridge = bc->base;
+ struct bridge_regs *bridge = bc->base;
int busno = bus->number;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
@@ -112,19 +107,19 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
u32 cf, shift, mask;
int res;
- bridge->b_pci_cfg = (busno << 16) | (slot << 11);
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
if (get_dbe(cf, (u32 *) addr))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
+ * IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto oh_my_gawd;
+ goto is_ioc3;
- bridge->b_pci_cfg = (busno << 16) | (slot << 11);
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
if (size == 1)
@@ -136,22 +131,17 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-oh_my_gawd:
+is_ioc3:
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
- * generic PCI code a chance to look at the wrong register.
+ * IOC3 special handling
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
*value = emulate_ioc3_cfg(where, size);
return PCIBIOS_SUCCESSFUL;
}
- /*
- * IOC3 is fucking fucked beyond belief ... Don't try to access
- * anything but 32-bit words ...
- */
- bridge->b_pci_cfg = (busno << 16) | (slot << 11);
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
addr = &bridge->b_type1_cfg.c[(fn << 8) | where];
if (get_dbe(cf, (u32 *) addr))
@@ -177,7 +167,7 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- bridge_t *bridge = bc->base;
+ struct bridge_regs *bridge = bc->base;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
volatile void *addr;
@@ -189,11 +179,11 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
+ * IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto oh_my_gawd;
+ goto is_ioc3;
addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
@@ -210,19 +200,14 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
-oh_my_gawd:
+is_ioc3:
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
- * generic PCI code a chance to touch the wrong register.
+ * IOC3 special handling
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
return PCIBIOS_SUCCESSFUL;
- /*
- * IOC3 is fucking fucked beyond belief ... Don't try to access
- * anything but 32-bit words ...
- */
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
if (get_dbe(cf, (u32 *) addr))
@@ -243,7 +228,7 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- bridge_t *bridge = bc->base;
+ struct bridge_regs *bridge = bc->base;
int slot = PCI_SLOT(devfn);
int fn = PCI_FUNC(devfn);
int busno = bus->number;
@@ -251,17 +236,17 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
u32 cf, shift, mask, smask;
int res;
- bridge->b_pci_cfg = (busno << 16) | (slot << 11);
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
if (get_dbe(cf, (u32 *) addr))
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
+ * IOC3 is broken beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto oh_my_gawd;
+ goto is_ioc3;
addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
@@ -278,19 +263,14 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
-oh_my_gawd:
+is_ioc3:
/*
- * IOC3 is fucking fucked beyond belief ... Don't even give the
- * generic PCI code a chance to touch the wrong register.
+ * IOC3 special handling
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
return PCIBIOS_SUCCESSFUL;
- /*
- * IOC3 is fucking fucked beyond belief ... Don't try to access
- * anything but 32-bit words ...
- */
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
if (get_dbe(cf, (u32 *) addr))
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index c94a66070a60..3c177b4d0609 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -24,22 +24,11 @@
#define MAX_PCI_BUSSES 40
/*
- * Max #PCI devices (like scsi controllers) we handle on a bus.
- */
-#define MAX_DEVICES_PER_PCIBUS 8
-
-/*
* XXX: No kmalloc available when we do our crosstalk scan,
* we should try to move it later in the boot process.
*/
static struct bridge_controller bridges[MAX_PCI_BUSSES];
-/*
- * Translate from irq to software PCI bus number and PCI slot.
- */
-struct bridge_controller *irq_to_bridge[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
-int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
-
extern struct pci_ops bridge_pci_ops;
int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
@@ -47,7 +36,6 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
unsigned long offset = NODE_OFFSET(nasid);
struct bridge_controller *bc;
static int num_bridges = 0;
- bridge_t *bridge;
int slot;
pci_set_flags(PCI_PROBE_ONLY);
@@ -78,7 +66,6 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
bc->io.end = ~0UL;
bc->io.flags = IORESOURCE_IO;
- bc->irq_cpu = smp_processor_id();
bc->widget_id = widget_id;
bc->nasid = nasid;
@@ -87,45 +74,43 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
/*
* point to this bridge
*/
- bridge = (bridge_t *) RAW_NODE_SWIN_BASE(nasid, widget_id);
+ bc->base = (struct bridge_regs *)RAW_NODE_SWIN_BASE(nasid, widget_id);
/*
* Clear all pending interrupts.
*/
- bridge->b_int_rst_stat = BRIDGE_IRR_ALL_CLR;
+ bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
/*
* Until otherwise set up, assume all interrupts are from slot 0
*/
- bridge->b_int_device = 0x0;
+ bridge_write(bc, b_int_device, 0x0);
/*
* swap pio's to pci mem and io space (big windows)
*/
- bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP |
- BRIDGE_CTRL_MEM_SWAP;
+ bridge_set(bc, b_wid_control, BRIDGE_CTRL_IO_SWAP |
+ BRIDGE_CTRL_MEM_SWAP);
#ifdef CONFIG_PAGE_SIZE_4KB
- bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
+ bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
#else /* 16kB or larger */
- bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
+ bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
#endif
/*
* Hmm... IRIX sets additional bits in the address which
* are documented as reserved in the bridge docs.
*/
- bridge->b_wid_int_upper = 0x8000 | (masterwid << 16);
- bridge->b_wid_int_lower = 0x01800090; /* PI_INT_PEND_MOD off*/
- bridge->b_dir_map = (masterwid << 20); /* DMA */
- bridge->b_int_enable = 0;
+ bridge_write(bc, b_wid_int_upper, 0x8000 | (masterwid << 16));
+ bridge_write(bc, b_wid_int_lower, 0x01800090); /* PI_INT_PEND_MOD off*/
+ bridge_write(bc, b_dir_map, (masterwid << 20)); /* DMA */
+ bridge_write(bc, b_int_enable, 0);
for (slot = 0; slot < 8; slot ++) {
- bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
+ bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
bc->pci_int[slot] = -1;
}
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- bc->base = bridge;
+ bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
register_pci_controller(&bc->pc);
@@ -168,16 +153,12 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
irq = bc->pci_int[slot];
if (irq == -1) {
- irq = request_bridge_irq(bc);
+ irq = request_bridge_irq(bc, slot);
if (irq < 0)
return irq;
bc->pci_int[slot] = irq;
}
-
- irq_to_bridge[irq] = bc;
- irq_to_slot[irq] = slot;
-
dev->irq = irq;
return 0;
@@ -206,7 +187,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
static inline void pci_disable_swapping(struct pci_dev *dev)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- bridge_t *bridge = bc->base;
+ struct bridge_regs *bridge = bc->base;
int slot = PCI_SLOT(dev->devfn);
/* Turn off byte swapping */
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 5017d5843c5a..fc29b85cfa92 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
+ if (!octeon_is_pci_host()) {
+ pr_notice("Not in host mode, PCI Controller not initialized\n");
+ return 0;
+ }
+
/* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
- if (!octeon_is_pci_host()) {
- pr_notice("Not in host mode, PCI Controller not initialized\n");
- return 0;
- }
-
/* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0;
diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c
index e1fa5972a81d..648f5eb2ba68 100644
--- a/arch/mips/ralink/bootrom.c
+++ b/arch/mips/ralink/bootrom.c
@@ -35,13 +35,7 @@ static const struct file_operations bootrom_file_ops = {
static int bootrom_setup(void)
{
- if (!debugfs_create_file("bootrom", 0444,
- NULL, NULL, &bootrom_file_ops)) {
- pr_err("Failed to create bootrom debugfs file\n");
-
- return -EINVAL;
- }
-
+ debugfs_create_file("bootrom", 0444, NULL, NULL, &bootrom_file_ops);
return 0;
}
diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile
index 73502fda13ee..27c14ede191e 100644
--- a/arch/mips/sgi-ip27/Makefile
+++ b/arch/mips/sgi-ip27/Makefile
@@ -3,10 +3,9 @@
# Makefile for the IP27 specific kernel interface routines under Linux.
#
-obj-y := ip27-berr.o ip27-irq.o ip27-irqno.o ip27-init.o ip27-klconfig.o \
+obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \
ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \
ip27-hubio.o ip27-xtalk.o
obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o
-obj-$(CONFIG_PCI) += ip27-irq-pci.o
obj-$(CONFIG_SMP) += ip27-smp.o
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index 2abe016a0ffc..68fb0cb89d9d 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -63,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
* after we write it.
*/
IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
- (void) HUB_L(IIO_ITTE_GET(nasid, i));
+ __raw_readq(IIO_ITTE_GET(nasid, i));
return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
}
@@ -135,7 +135,7 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
**/
static void hub_set_piomode(nasid_t nasid)
{
- hubreg_t ii_iowa;
+ u64 ii_iowa;
hubii_wcr_t ii_wcr;
unsigned i;
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index e501c43c02db..6074efeff894 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -52,13 +52,10 @@ EXPORT_SYMBOL_GPL(sn_cpu_info);
extern void pcibr_setup(cnodeid_t);
-extern void xtalk_probe_node(cnodeid_t nid);
-
static void per_hub_init(cnodeid_t cnode)
{
struct hub_data *hub = hub_data(cnode);
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
- int i;
cpumask_set_cpu(smp_processor_id(), &hub->h_cpus);
@@ -71,7 +68,6 @@ static void per_hub_init(cnodeid_t cnode)
REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
hub_rtc_init(cnode);
- xtalk_probe_node(cnode);
#ifdef CONFIG_REPLICATE_EXHANDLERS
/*
@@ -90,24 +86,6 @@ static void per_hub_init(cnodeid_t cnode)
__flush_cache_all();
}
#endif
-
- /*
- * Some interrupts are reserved by hardware or by software convention.
- * Mark these as reserved right away so they won't be used accidentally
- * later.
- */
- for (i = 0; i <= BASE_PCI_IRQ; i++) {
- __set_bit(i, hub->irq_alloc_mask);
- LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i);
- }
-
- __set_bit(IP_PEND0_6_63, hub->irq_alloc_mask);
- LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
-
- for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
- __set_bit(i, hub->irq_alloc_mask);
- LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i);
- }
}
void per_cpu_init(void)
@@ -116,8 +94,6 @@ void per_cpu_init(void)
int slice = LOCAL_HUB_L(PI_CPU_NUM);
cnodeid_t cnode = get_compact_nodeid();
struct hub_data *hub = hub_data(cnode);
- struct slice_data *si = hub->slice + slice;
- int i;
if (test_and_set_bit(slice, &hub->slice_map))
return;
@@ -126,22 +102,14 @@ void per_cpu_init(void)
per_hub_init(cnode);
- for (i = 0; i < LEVELS_PER_SLICE; i++)
- si->level_to_irq[i] = -1;
-
- /*
- * We use this so we can find the local hub's data as fast as only
- * possible.
- */
- cpu_data[cpu].data = si;
-
cpu_time_init();
install_ipi();
/* Install our NMI handler if symmon hasn't installed one. */
install_cpu_nmi_handler(cputoslice(cpu));
- set_c0_status(SRB_DEV0 | SRB_DEV1);
+ enable_percpu_irq(IP27_HUB_PEND0_IRQ, IRQ_TYPE_NONE);
+ enable_percpu_irq(IP27_HUB_PEND1_IRQ, IRQ_TYPE_NONE);
}
/*
@@ -177,7 +145,7 @@ extern void ip27_reboot_setup(void);
void __init plat_mem_setup(void)
{
- hubreg_t p, e, n_mode;
+ u64 p, e, n_mode;
nasid_t nid;
ip27_reboot_setup();
@@ -215,7 +183,6 @@ void __init plat_mem_setup(void)
#endif
ioc3_eth_init();
- per_cpu_init();
set_io_port_base(IO_BASE);
}
diff --git a/arch/mips/sgi-ip27/ip27-irq-pci.c b/arch/mips/sgi-ip27/ip27-irq-pci.c
deleted file mode 100644
index cd449e90b917..000000000000
--- a/arch/mips/sgi-ip27/ip27-irq-pci.c
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
- *
- * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 1999 - 2001 Kanoj Sarcar
- */
-
-#undef DEBUG
-
-#include <linux/irq.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/smp.h>
-#include <linux/random.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-
-#include <asm/processor.h>
-#include <asm/pci/bridge.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/agent.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/hub.h>
-#include <asm/sn/intr.h>
-
-/*
- * Linux has a controller-independent x86 interrupt architecture.
- * every controller has a 'controller-template', that is used
- * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the appropriate
- * controller. Thus drivers need not be aware of the
- * interrupt-controller.
- *
- * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
- * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
- * (IO-APICs assumed to be messaging to Pentium local-APICs)
- *
- * the code is designed to be easily extended with new/different
- * interrupt controllers, without having to do assembly magic.
- */
-
-extern struct bridge_controller *irq_to_bridge[];
-extern int irq_to_slot[];
-
-/*
- * use these macros to get the encoded nasid and widget id
- * from the irq value
- */
-#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
-#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
-
-static inline int alloc_level(int cpu, int irq)
-{
- struct hub_data *hub = hub_data(cpu_to_node(cpu));
- struct slice_data *si = cpu_data[cpu].data;
- int level;
-
- level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
- if (level >= LEVELS_PER_SLICE)
- panic("Cpu %d flooded with devices", cpu);
-
- __set_bit(level, hub->irq_alloc_mask);
- si->level_to_irq[level] = irq;
-
- return level;
-}
-
-static inline int find_level(cpuid_t *cpunum, int irq)
-{
- int cpu, i;
-
- for_each_online_cpu(cpu) {
- struct slice_data *si = cpu_data[cpu].data;
-
- for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
- if (si->level_to_irq[i] == irq) {
- *cpunum = cpu;
-
- return i;
- }
- }
-
- panic("Could not identify cpu/level for irq %d", irq);
-}
-
-static int intr_connect_level(int cpu, int bit)
-{
- nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
- struct slice_data *si = cpu_data[cpu].data;
-
- set_bit(bit, si->irq_enable_mask);
-
- if (!cputoslice(cpu)) {
- REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
- REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
- } else {
- REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
- REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
- }
-
- return 0;
-}
-
-static int intr_disconnect_level(int cpu, int bit)
-{
- nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
- struct slice_data *si = cpu_data[cpu].data;
-
- clear_bit(bit, si->irq_enable_mask);
-
- if (!cputoslice(cpu)) {
- REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
- REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
- } else {
- REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
- REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
- }
-
- return 0;
-}
-
-/* Startup one of the (PCI ...) IRQs routes over a bridge. */
-static unsigned int startup_bridge_irq(struct irq_data *d)
-{
- struct bridge_controller *bc;
- bridgereg_t device;
- bridge_t *bridge;
- int pin, swlevel;
- cpuid_t cpu;
-
- pin = SLOT_FROM_PCI_IRQ(d->irq);
- bc = IRQ_TO_BRIDGE(d->irq);
- bridge = bc->base;
-
- pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin);
- /*
- * "map" irq to a swlevel greater than 6 since the first 6 bits
- * of INT_PEND0 are taken
- */
- swlevel = find_level(&cpu, d->irq);
- bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
- bridge->b_int_enable |= (1 << pin);
- bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
-
- /*
- * Enable sending of an interrupt clear packt to the hub on a high to
- * low transition of the interrupt pin.
- *
- * IRIX sets additional bits in the address which are documented as
- * reserved in the bridge docs.
- */
- bridge->b_int_mode |= (1UL << pin);
-
- /*
- * We assume the bridge to have a 1:1 mapping between devices
- * (slots) and intr pins.
- */
- device = bridge->b_int_device;
- device &= ~(7 << (pin*3));
- device |= (pin << (pin*3));
- bridge->b_int_device = device;
-
- bridge->b_wid_tflush;
-
- intr_connect_level(cpu, swlevel);
-
- return 0; /* Never anything pending. */
-}
-
-/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
-static void shutdown_bridge_irq(struct irq_data *d)
-{
- struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq);
- bridge_t *bridge = bc->base;
- int pin, swlevel;
- cpuid_t cpu;
-
- pr_debug("bridge_shutdown: irq 0x%x\n", d->irq);
- pin = SLOT_FROM_PCI_IRQ(d->irq);
-
- /*
- * map irq to a swlevel greater than 6 since the first 6 bits
- * of INT_PEND0 are taken
- */
- swlevel = find_level(&cpu, d->irq);
- intr_disconnect_level(cpu, swlevel);
-
- bridge->b_int_enable &= ~(1 << pin);
- bridge->b_wid_tflush;
-}
-
-static inline void enable_bridge_irq(struct irq_data *d)
-{
- cpuid_t cpu;
- int swlevel;
-
- swlevel = find_level(&cpu, d->irq); /* Criminal offence */
- intr_connect_level(cpu, swlevel);
-}
-
-static inline void disable_bridge_irq(struct irq_data *d)
-{
- cpuid_t cpu;
- int swlevel;
-
- swlevel = find_level(&cpu, d->irq); /* Criminal offence */
- intr_disconnect_level(cpu, swlevel);
-}
-
-static struct irq_chip bridge_irq_type = {
- .name = "bridge",
- .irq_startup = startup_bridge_irq,
- .irq_shutdown = shutdown_bridge_irq,
- .irq_mask = disable_bridge_irq,
- .irq_unmask = enable_bridge_irq,
-};
-
-void register_bridge_irq(unsigned int irq)
-{
- irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
-}
-
-int request_bridge_irq(struct bridge_controller *bc)
-{
- int irq = allocate_irqno();
- int swlevel, cpu;
- nasid_t nasid;
-
- if (irq < 0)
- return irq;
-
- /*
- * "map" irq to a swlevel greater than 6 since the first 6 bits
- * of INT_PEND0 are taken
- */
- cpu = bc->irq_cpu;
- swlevel = alloc_level(cpu, irq);
- if (unlikely(swlevel < 0)) {
- free_irqno(irq);
-
- return -EAGAIN;
- }
-
- /* Make sure it's not already pending when we connect it. */
- nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
- REMOTE_HUB_CLR_INTR(nasid, swlevel);
-
- intr_connect_level(cpu, swlevel);
-
- register_bridge_irq(irq);
-
- return irq;
-}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 0dde6164a06f..710a59764b01 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -7,67 +7,234 @@
* Copyright (C) 1999 - 2001 Kanoj Sarcar
*/
-#undef DEBUG
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/smp.h>
-#include <linux/random.h>
#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/delay.h>
#include <linux/bitops.h>
-#include <asm/bootinfo.h>
#include <asm/io.h>
-#include <asm/mipsregs.h>
-
-#include <asm/processor.h>
+#include <asm/irq_cpu.h>
+#include <asm/pci/bridge.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include <asm/sn/arch.h>
#include <asm/sn/hub.h>
#include <asm/sn/intr.h>
-/*
- * Linux has a controller-independent x86 interrupt architecture.
- * every controller has a 'controller-template', that is used
- * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the appropriate
- * controller. Thus drivers need not be aware of the
- * interrupt-controller.
- *
- * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
- * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
- * (IO-APICs assumed to be messaging to Pentium local-APICs)
- *
- * the code is designed to be easily extended with new/different
- * interrupt controllers, without having to do assembly magic.
- */
+struct hub_irq_data {
+ struct bridge_controller *bc;
+ u64 *irq_mask[2];
+ cpuid_t cpu;
+ int bit;
+ int pin;
+};
-extern asmlinkage void ip27_irq(void);
+static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
-/*
- * Find first bit set
- */
-static int ms1bit(unsigned long x)
+static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
+
+static inline int alloc_level(void)
+{
+ int level;
+
+again:
+ level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
+ if (level >= IP27_HUB_IRQ_COUNT)
+ return -ENOSPC;
+
+ if (test_and_set_bit(level, hub_irq_map))
+ goto again;
+
+ return level;
+}
+
+static void enable_hub_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
+
+ set_bit(hd->bit, mask);
+ __raw_writeq(mask[0], hd->irq_mask[0]);
+ __raw_writeq(mask[1], hd->irq_mask[1]);
+}
+
+static void disable_hub_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
+
+ clear_bit(hd->bit, mask);
+ __raw_writeq(mask[0], hd->irq_mask[0]);
+ __raw_writeq(mask[1], hd->irq_mask[1]);
+}
+
+static unsigned int startup_bridge_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ struct bridge_controller *bc;
+ nasid_t nasid;
+ u32 device;
+ int pin;
+
+ if (!hd)
+ return -EINVAL;
+
+ pin = hd->pin;
+ bc = hd->bc;
+
+ nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(hd->cpu));
+ bridge_write(bc, b_int_addr[pin].addr,
+ (0x20000 | hd->bit | (nasid << 8)));
+ bridge_set(bc, b_int_enable, (1 << pin));
+ bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
+
+ /*
+ * Enable sending of an interrupt clear packt to the hub on a high to
+ * low transition of the interrupt pin.
+ *
+ * IRIX sets additional bits in the address which are documented as
+ * reserved in the bridge docs.
+ */
+ bridge_set(bc, b_int_mode, (1UL << pin));
+
+ /*
+ * We assume the bridge to have a 1:1 mapping between devices
+ * (slots) and intr pins.
+ */
+ device = bridge_read(bc, b_int_device);
+ device &= ~(7 << (pin*3));
+ device |= (pin << (pin*3));
+ bridge_write(bc, b_int_device, device);
+
+ bridge_read(bc, b_wid_tflush);
+
+ enable_hub_irq(d);
+
+ return 0; /* Never anything pending. */
+}
+
+static void shutdown_bridge_irq(struct irq_data *d)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+ struct bridge_controller *bc;
+ int pin = hd->pin;
+
+ if (!hd)
+ return;
+
+ disable_hub_irq(d);
+
+ bc = hd->bc;
+ bridge_clr(bc, b_int_enable, (1 << pin));
+ bridge_read(bc, b_wid_tflush);
+}
+
+static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
+{
+ nasid_t nasid;
+ int cpu;
+
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ hd->cpu = cpu;
+ if (!cputoslice(cpu)) {
+ hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
+ hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
+ } else {
+ hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
+ hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
+ }
+
+ /* Make sure it's not already pending when we connect it. */
+ REMOTE_HUB_CLR_INTR(nasid, hd->bit);
+}
+
+static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
+
+ if (!hd)
+ return -EINVAL;
+
+ if (irqd_is_started(d))
+ disable_hub_irq(d);
+
+ setup_hub_mask(hd, mask);
+
+ if (irqd_is_started(d))
+ startup_bridge_irq(d);
+
+ irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
+
+ return 0;
+}
+
+static struct irq_chip hub_irq_type = {
+ .name = "HUB",
+ .irq_startup = startup_bridge_irq,
+ .irq_shutdown = shutdown_bridge_irq,
+ .irq_mask = disable_hub_irq,
+ .irq_unmask = enable_hub_irq,
+ .irq_set_affinity = set_affinity_hub_irq,
+};
+
+int request_bridge_irq(struct bridge_controller *bc, int pin)
{
- int b = 0, s;
+ struct hub_irq_data *hd;
+ struct hub_data *hub;
+ struct irq_desc *desc;
+ int swlevel;
+ int irq;
+
+ hd = kzalloc(sizeof(*hd), GFP_KERNEL);
+ if (!hd)
+ return -ENOMEM;
+
+ swlevel = alloc_level();
+ if (unlikely(swlevel < 0)) {
+ kfree(hd);
+ return -EAGAIN;
+ }
+ irq = swlevel + IP27_HUB_IRQ_BASE;
+
+ hd->bc = bc;
+ hd->bit = swlevel;
+ hd->pin = pin;
+ irq_set_chip_data(irq, hd);
+
+ /* use CPU connected to nearest hub */
+ hub = hub_data(NASID_TO_COMPACT_NODEID(bc->nasid));
+ setup_hub_mask(hd, &hub->h_cpus);
- s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
- s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s;
- s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s;
- s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s;
- s = 1; if (x >> 1 == 0) s = 0; b += s;
+ desc = irq_to_desc(irq);
+ desc->irq_common_data.node = bc->nasid;
+ cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
- return b;
+ return irq;
+}
+
+void ip27_hub_irq_init(void)
+{
+ int i;
+
+ for (i = IP27_HUB_IRQ_BASE;
+ i < (IP27_HUB_IRQ_BASE + IP27_HUB_IRQ_COUNT); i++)
+ irq_set_chip_and_handler(i, &hub_irq_type, handle_level_irq);
+
+ /*
+ * Some interrupts are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be used accidentally
+ * later.
+ */
+ for (i = 0; i <= BASE_PCI_IRQ; i++)
+ set_bit(i, hub_irq_map);
+
+ set_bit(IP_PEND0_6_63, hub_irq_map);
+
+ for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
+ set_bit(i, hub_irq_map);
}
/*
@@ -82,23 +249,19 @@ static int ms1bit(unsigned long x)
* Kanoj 05.13.00
*/
-static void ip27_do_irq_mask0(void)
+static void ip27_do_irq_mask0(struct irq_desc *desc)
{
- int irq, swlevel;
- hubreg_t pend0, mask0;
cpuid_t cpu = smp_processor_id();
- int pi_int_mask0 =
- (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B;
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ u64 pend0;
/* copied from Irix intpend0() */
pend0 = LOCAL_HUB_L(PI_INT_PEND0);
- mask0 = LOCAL_HUB_L(pi_int_mask0);
- pend0 &= mask0; /* Pick intrs we should look at */
+ pend0 &= mask[0]; /* Pick intrs we should look at */
if (!pend0)
return;
- swlevel = ms1bit(pend0);
#ifdef CONFIG_SMP
if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
@@ -108,106 +271,66 @@ static void ip27_do_irq_mask0(void)
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
} else
#endif
- {
- /* "map" swlevel to irq */
- struct slice_data *si = cpu_data[cpu].data;
-
- irq = si->level_to_irq[swlevel];
- do_IRQ(irq);
- }
+ generic_handle_irq(__ffs(pend0) + IP27_HUB_IRQ_BASE);
LOCAL_HUB_L(PI_INT_PEND0);
}
-static void ip27_do_irq_mask1(void)
+static void ip27_do_irq_mask1(struct irq_desc *desc)
{
- int irq, swlevel;
- hubreg_t pend1, mask1;
cpuid_t cpu = smp_processor_id();
- int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B;
- struct slice_data *si = cpu_data[cpu].data;
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ u64 pend1;
/* copied from Irix intpend0() */
pend1 = LOCAL_HUB_L(PI_INT_PEND1);
- mask1 = LOCAL_HUB_L(pi_int_mask1);
- pend1 &= mask1; /* Pick intrs we should look at */
+ pend1 &= mask[1]; /* Pick intrs we should look at */
if (!pend1)
return;
- swlevel = ms1bit(pend1);
- /* "map" swlevel to irq */
- irq = si->level_to_irq[swlevel];
- LOCAL_HUB_CLR_INTR(swlevel);
- do_IRQ(irq);
+ generic_handle_irq(__ffs(pend1) + IP27_HUB_IRQ_BASE + 64);
LOCAL_HUB_L(PI_INT_PEND1);
}
-static void ip27_prof_timer(void)
-{
- panic("CPU %d got a profiling interrupt", smp_processor_id());
-}
-
-static void ip27_hub_error(void)
-{
- panic("CPU %d got a hub error interrupt", smp_processor_id());
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned long pending = read_c0_cause() & read_c0_status();
- extern unsigned int rt_timer_irq;
-
- if (pending & CAUSEF_IP4)
- do_IRQ(rt_timer_irq);
- else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */
- ip27_do_irq_mask0();
- else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */
- ip27_do_irq_mask1();
- else if (pending & CAUSEF_IP5)
- ip27_prof_timer();
- else if (pending & CAUSEF_IP6)
- ip27_hub_error();
-}
-
-void __init arch_init_irq(void)
-{
-}
-
void install_ipi(void)
{
- int slice = LOCAL_HUB_L(PI_CPU_NUM);
int cpu = smp_processor_id();
- struct slice_data *si = cpu_data[cpu].data;
- struct hub_data *hub = hub_data(cpu_to_node(cpu));
+ unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ int slice = LOCAL_HUB_L(PI_CPU_NUM);
int resched, call;
resched = CPU_RESCHED_A_IRQ + slice;
- __set_bit(resched, hub->irq_alloc_mask);
- __set_bit(resched, si->irq_enable_mask);
+ set_bit(resched, mask);
LOCAL_HUB_CLR_INTR(resched);
call = CPU_CALL_A_IRQ + slice;
- __set_bit(call, hub->irq_alloc_mask);
- __set_bit(call, si->irq_enable_mask);
+ set_bit(call, mask);
LOCAL_HUB_CLR_INTR(call);
if (slice == 0) {
- LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
- LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
+ LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
+ LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
} else {
- LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
- LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
+ LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
+ LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
}
}
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+ ip27_hub_irq_init();
+
+ irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
+ irq_set_chained_handler(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0);
+ irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
+ irq_set_chained_handler(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1);
+}
diff --git a/arch/mips/sgi-ip27/ip27-irqno.c b/arch/mips/sgi-ip27/ip27-irqno.c
deleted file mode 100644
index 957ab58e1c00..000000000000
--- a/arch/mips/sgi-ip27/ip27-irqno.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/types.h>
-
-#include <asm/barrier.h>
-
-static DECLARE_BITMAP(irq_map, NR_IRQS);
-
-int allocate_irqno(void)
-{
- int irq;
-
-again:
- irq = find_first_zero_bit(irq_map, NR_IRQS);
-
- if (irq >= NR_IRQS)
- return -ENOSPC;
-
- if (test_and_set_bit(irq, irq_map))
- goto again;
-
- return irq;
-}
-
-/*
- * Allocate the 16 legacy interrupts for i8259 devices. This happens early
- * in the kernel initialization so treating allocation failure as BUG() is
- * ok.
- */
-void __init alloc_legacy_irqno(void)
-{
- int i;
-
- for (i = 0; i <= 16; i++)
- BUG_ON(test_and_set_bit(i, irq_map));
-}
-
-void free_irqno(unsigned int irq)
-{
- smp_mb__before_atomic();
- clear_bit(irq, irq_map);
- smp_mb__after_atomic();
-}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 813d13f92957..fb077a947575 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -44,7 +44,7 @@ static int is_fine_dirmode(void)
return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE;
}
-static hubreg_t get_region(cnodeid_t cnode)
+static u64 get_region(cnodeid_t cnode)
{
if (fine_mode)
return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
@@ -52,9 +52,9 @@ static hubreg_t get_region(cnodeid_t cnode)
return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
}
-static hubreg_t region_mask;
+static u64 region_mask;
-static void gen_region_mask(hubreg_t *region_mask)
+static void gen_region_mask(u64 *region_mask)
{
cnodeid_t cnode;
@@ -154,11 +154,11 @@ static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
}
if (router_a == NULL) {
- printk("node_distance: router_a NULL\n");
+ pr_info("node_distance: router_a NULL\n");
return -1;
}
if (router_b == NULL) {
- printk("node_distance: router_b NULL\n");
+ pr_info("node_distance: router_b NULL\n");
return -1;
}
@@ -203,17 +203,17 @@ static void __init dump_topology(void)
klrou_t *router;
cnodeid_t row, col;
- printk("************** Topology ********************\n");
+ pr_info("************** Topology ********************\n");
- printk(" ");
+ pr_info(" ");
for_each_online_node(col)
- printk("%02d ", col);
- printk("\n");
+ pr_cont("%02d ", col);
+ pr_cont("\n");
for_each_online_node(row) {
- printk("%02d ", row);
+ pr_info("%02d ", row);
for_each_online_node(col)
- printk("%2d ", node_distance(row, col));
- printk("\n");
+ pr_cont("%2d ", node_distance(row, col));
+ pr_cont("\n");
}
for_each_online_node(cnode) {
@@ -230,7 +230,7 @@ static void __init dump_topology(void)
do {
if (brd->brd_flags & DUPLICATE_BOARD)
continue;
- printk("Router %d:", router_num);
+ pr_cont("Router %d:", router_num);
router_num++;
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
@@ -244,11 +244,11 @@ static void __init dump_topology(void)
router->rou_port[port].port_offset);
if (dest_brd->brd_type == KLTYPE_IP27)
- printk(" %d", dest_brd->brd_nasid);
+ pr_cont(" %d", dest_brd->brd_nasid);
if (dest_brd->brd_type == KLTYPE_ROUTER)
- printk(" r");
+ pr_cont(" r");
}
- printk("\n");
+ pr_cont("\n");
} while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
}
@@ -373,7 +373,7 @@ static void __init szmem(void)
if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
(slot0sz << PAGE_SHIFT)) {
- printk("Ignoring slot %d onwards on node %d\n",
+ pr_info("Ignoring slot %d onwards on node %d\n",
slot, node);
slot = MAX_MEM_SLOTS;
continue;
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index 8ac2bfa35fb6..3aae388561d9 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -62,75 +62,75 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice)
(TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
slice * IP27_NMI_KREGS_CPU_SIZE);
- printk("NMI nasid %d: slice %d\n", nasid, slice);
+ pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
/*
* Saved main processor registers
*/
for (i = 0; i < 32; ) {
if ((i % 4) == 0)
- printk("$%2d :", i);
- printk(" %016lx", nr->gpr[i]);
+ pr_emerg("$%2d :", i);
+ pr_cont(" %016lx", nr->gpr[i]);
i++;
if ((i % 4) == 0)
- printk("\n");
+ pr_cont("\n");
}
- printk("Hi : (value lost)\n");
- printk("Lo : (value lost)\n");
+ pr_emerg("Hi : (value lost)\n");
+ pr_emerg("Lo : (value lost)\n");
/*
* Saved cp0 registers
*/
- printk("epc : %016lx %pS\n", nr->epc, (void *) nr->epc);
- printk("%s\n", print_tainted());
- printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc);
- printk("ra : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]);
- printk("Status: %08lx ", nr->sr);
+ pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc);
+ pr_emerg("%s\n", print_tainted());
+ pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
+ pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
+ pr_emerg("Status: %08lx ", nr->sr);
if (nr->sr & ST0_KX)
- printk("KX ");
+ pr_cont("KX ");
if (nr->sr & ST0_SX)
- printk("SX ");
+ pr_cont("SX ");
if (nr->sr & ST0_UX)
- printk("UX ");
+ pr_cont("UX ");
switch (nr->sr & ST0_KSU) {
case KSU_USER:
- printk("USER ");
+ pr_cont("USER ");
break;
case KSU_SUPERVISOR:
- printk("SUPERVISOR ");
+ pr_cont("SUPERVISOR ");
break;
case KSU_KERNEL:
- printk("KERNEL ");
+ pr_cont("KERNEL ");
break;
default:
- printk("BAD_MODE ");
+ pr_cont("BAD_MODE ");
break;
}
if (nr->sr & ST0_ERL)
- printk("ERL ");
+ pr_cont("ERL ");
if (nr->sr & ST0_EXL)
- printk("EXL ");
+ pr_cont("EXL ");
if (nr->sr & ST0_IE)
- printk("IE ");
- printk("\n");
+ pr_cont("IE ");
+ pr_cont("\n");
- printk("Cause : %08lx\n", nr->cause);
- printk("PrId : %08x\n", read_c0_prid());
- printk("BadVA : %016lx\n", nr->badva);
- printk("CErr : %016lx\n", nr->cache_err);
- printk("NMI_SR: %016lx\n", nr->nmi_sr);
+ pr_emerg("Cause : %08lx\n", nr->cause);
+ pr_emerg("PrId : %08x\n", read_c0_prid());
+ pr_emerg("BadVA : %016lx\n", nr->badva);
+ pr_emerg("CErr : %016lx\n", nr->cache_err);
+ pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
- printk("\n");
+ pr_emerg("\n");
}
void nmi_dump_hub_irq(nasid_t nasid, int slice)
{
- hubreg_t mask0, mask1, pend0, pend1;
+ u64 mask0, mask1, pend0, pend1;
if (slice == 0) { /* Slice A */
mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
@@ -143,9 +143,9 @@ void nmi_dump_hub_irq(nasid_t nasid, int slice)
pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
- printk("PI_INT_MASK0: %16Lx PI_INT_MASK1: %16Lx\n", mask0, mask1);
- printk("PI_INT_PEND0: %16Lx PI_INT_PEND1: %16Lx\n", pend0, pend1);
- printk("\n\n");
+ pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
+ pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
+ pr_emerg("\n\n");
}
/*
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 545446dfe7fa..20b81209c6b8 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -177,7 +177,7 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
ip27_send_ipi_single(i, action);
}
-static void ip27_init_secondary(void)
+static void ip27_init_cpu(void)
{
per_cpu_init();
}
@@ -235,9 +235,10 @@ static void __init ip27_prepare_cpus(unsigned int max_cpus)
const struct plat_smp_ops ip27_smp_ops = {
.send_ipi_single = ip27_send_ipi_single,
.send_ipi_mask = ip27_send_ipi_mask,
- .init_secondary = ip27_init_secondary,
+ .init_secondary = ip27_init_cpu,
.smp_finish = ip27_smp_finish,
.boot_secondary = ip27_boot_secondary,
.smp_setup = ip27_smp_setup,
.prepare_cpus = ip27_prepare_cpus,
+ .prepare_boot_cpu = ip27_init_cpu,
};
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 9d55247533a5..9b4b9ac621a3 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -38,20 +38,6 @@
#include <asm/sn/sn0/hubio.h>
#include <asm/pci/bridge.h>
-static void enable_rt_irq(struct irq_data *d)
-{
-}
-
-static void disable_rt_irq(struct irq_data *d)
-{
-}
-
-static struct irq_chip rt_irq_type = {
- .name = "SN HUB RT timer",
- .irq_mask = disable_rt_irq,
- .irq_unmask = enable_rt_irq,
-};
-
static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
@@ -65,8 +51,6 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt)
return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
}
-unsigned int rt_timer_irq;
-
static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
static DEFINE_PER_CPU(char [11], hub_rt_name);
@@ -87,6 +71,7 @@ static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
struct irqaction hub_rt_irqaction = {
.handler = hub_rt_counter_handler,
+ .percpu_dev_id = &hub_rt_clockevent,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "hub-rt",
};
@@ -107,7 +92,6 @@ void hub_rt_clock_event_init(void)
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
unsigned char *name = per_cpu(hub_rt_name, cpu);
- int irq = rt_timer_irq;
sprintf(name, "hub-rt %d", cpu);
cd->name = name;
@@ -118,29 +102,19 @@ void hub_rt_clock_event_init(void)
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->min_delta_ticks = 0x300;
cd->rating = 200;
- cd->irq = irq;
+ cd->irq = IP27_RT_TIMER_IRQ;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = rt_next_event;
clockevents_register_device(cd);
+
+ enable_percpu_irq(IP27_RT_TIMER_IRQ, IRQ_TYPE_NONE);
}
static void __init hub_rt_clock_event_global_init(void)
{
- int irq;
-
- do {
- smp_wmb();
- irq = rt_timer_irq;
- if (irq)
- break;
-
- irq = allocate_irqno();
- if (irq < 0)
- panic("Allocation of irq number for timer failed");
- } while (xchg(&rt_timer_irq, irq));
-
- irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
- setup_irq(irq, &hub_rt_irqaction);
+ irq_set_handler(IP27_RT_TIMER_IRQ, handle_percpu_devid_irq);
+ irq_set_percpu_devid(IP27_RT_TIMER_IRQ);
+ setup_percpu_irq(IP27_RT_TIMER_IRQ, &hub_rt_irqaction);
}
static u64 hub_rt_read(struct clocksource *cs)
@@ -194,8 +168,6 @@ void cpu_time_init(void)
panic("No information about myself?");
printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
-
- set_c0_status(SRB_TIMOCLK);
}
void hub_rtc_init(cnodeid_t cnode)
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 4fe5678ba74d..ce06aaa115ae 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -99,7 +99,7 @@ static int xbow_probe(nasid_t nasid)
return 0;
}
-void xtalk_probe_node(cnodeid_t nid)
+static void xtalk_probe_node(cnodeid_t nid)
{
volatile u64 hubreg;
nasid_t nasid;
@@ -133,3 +133,14 @@ void xtalk_probe_node(cnodeid_t nid)
break;
}
}
+
+static int __init xtalk_init(void)
+{
+ cnodeid_t cnode;
+
+ for_each_online_node(cnode)
+ xtalk_probe_node(cnode);
+
+ return 0;
+}
+arch_initcall(xtalk_init);
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index f6fd340e39c2..0ede4deb8181 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -8,6 +8,7 @@ ccflags-vdso := \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
-D__VDSO__
ifdef CONFIG_CC_IS_CLANG
@@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
@@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index dda1906bba11..addb7f5f5264 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -5,6 +5,7 @@
config NDS32
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_WANT_FRAME_POINTERS if FTRACE
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 0a935c136ec2..ac3482882cf9 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
KBUILD_DEFCONFIG := defconfig
-comma = ,
-
-
ifdef CONFIG_FUNCTION_TRACER
arch-y += -malways-save-lp -mno-relax
endif
@@ -54,8 +51,6 @@ endif
boot := arch/nds32/boot
core-y += $(boot)/dts/
-.PHONY: FORCE
-
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@@ -68,9 +63,6 @@ prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
-CLEAN_FILES += include/asm-nds32/constants.h*
-
-# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h
index 53dcb49b0b12..116598b47c4d 100644
--- a/arch/nds32/include/asm/uaccess.h
+++ b/arch/nds32/include/asm/uaccess.h
@@ -37,7 +37,6 @@ extern int fixup_exception(struct pt_regs *regs);
#define KERNEL_DS ((mm_segment_t) { ~0UL })
#define USER_DS ((mm_segment_t) {TASK_SIZE - 1})
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define user_addr_max get_fs
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index c2c3a3e34083..4ec8f543103f 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -3,6 +3,8 @@
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYNC_FILE_RANGE2
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
/* Use the standard ABI for syscalls */
#include <asm-generic/unistd.h>
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
index ab7ab46234b1..9712fd474f2c 100644
--- a/arch/nds32/kernel/process.c
+++ b/arch/nds32/kernel/process.c
@@ -121,7 +121,7 @@ void show_regs(struct pt_regs *regs)
regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
pr_info(" IRQs o%s Segment %s\n",
interrupts_enabled(regs) ? "n" : "ff",
- segment_eq(get_fs(), get_ds())? "kernel" : "user");
+ segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user");
}
EXPORT_SYMBOL(show_regs);
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 532343eebf89..c3e913ef4f0c 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config NIOS2
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_SWAP
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index e0ea10806491..e83f831a76f9 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -26,7 +26,6 @@
#define USER_DS MAKE_MM_SEG(0x80000000UL)
#define KERNEL_DS MAKE_MM_SEG(0)
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(seg) (current_thread_info()->addr_limit = (seg))
diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h
index d9948d88790b..0b4bb1d41b28 100644
--- a/arch/nios2/include/uapi/asm/unistd.h
+++ b/arch/nios2/include/uapi/asm/unistd.h
@@ -20,6 +20,8 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
/* Use the standard ABI for syscalls */
#include <asm-generic/unistd.h>
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 09ab59e942ae..a5e361fbb75a 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -6,6 +6,7 @@
config OPENRISC
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select OF
select OF_EARLY_FLATTREE
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 70e06d34006c..bf10141c7426 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -20,7 +20,6 @@
KBUILD_DEFCONFIG := or1ksim_defconfig
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
-LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
@@ -50,5 +49,3 @@ else
BUILTIN_DTB := n
endif
core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
-
-all: vmlinux
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index eb87cd8327c8..1f04844b6b82 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -34,6 +34,7 @@ generic-y += qrwlock_types.h
generic-y += qrwlock.h
generic-y += sections.h
generic-y += segment.h
+generic-y += shmparam.h
generic-y += string.h
generic-y += switch_to.h
generic-y += topology.h
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index bc8191a34db7..45afd9ab78c1 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -42,7 +42,6 @@
*/
#define KERNEL_DS (~0UL)
-#define get_ds() (KERNEL_DS)
#define USER_DS (TASK_SIZE)
#define get_fs() (current_thread_info()->addr_limit)
@@ -58,8 +57,12 @@
/* Ensure that addr is below task's addr_limit */
#define __addr_ok(addr) ((unsigned long) addr < get_fs())
-#define access_ok(addr, size) \
- __range_ok((unsigned long)addr, (unsigned long)size)
+#define access_ok(addr, size) \
+({ \
+ unsigned long __ao_addr = (unsigned long)(addr); \
+ unsigned long __ao_size = (unsigned long)(size); \
+ __range_ok(__ao_addr, __ao_size); \
+})
/*
* These are the main single-value transfer routines. They automatically
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index ec37df18d8ed..566f8c4f8047 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -21,8 +21,10 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7ca2c3ebad64..c8e621296092 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config PARISC
def_bool y
+ select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
select HAVE_OPROFILE
diff --git a/arch/parisc/boot/Makefile b/arch/parisc/boot/Makefile
index cad68a584884..41cce0706f80 100644
--- a/arch/parisc/boot/Makefile
+++ b/arch/parisc/boot/Makefile
@@ -2,12 +2,6 @@
# Makefile for the linux parisc-specific parts of the boot image creator.
#
-COMPILE_VERSION := __linux_compile_version_id__`hostname | \
- tr -c '[0-9A-Za-z]' '_'`__`date | \
- tr -c '[0-9A-Za-z]' '_'`_t
-
-ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
-
targets := image
targets += bzImage
subdir- := compressed
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 44a9f97194aa..d5bd94247371 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -2,8 +2,6 @@
#ifndef _PARISC_DMA_MAPPING_H
#define _PARISC_DMA_MAPPING_H
-#include <asm/cacheflush.h>
-
/*
** We need to support 4 different coherent dma models with one binary:
**
@@ -28,48 +26,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return hppa_dma_ops;
}
-static inline void *
-parisc_walk_tree(struct device *dev)
-{
- struct device *otherdev;
- if(likely(dev->platform_data != NULL))
- return dev->platform_data;
- /* OK, just traverse the bus to find it */
- for(otherdev = dev->parent; otherdev;
- otherdev = otherdev->parent) {
- if(otherdev->platform_data) {
- dev->platform_data = otherdev->platform_data;
- break;
- }
- }
- return dev->platform_data;
-}
-
-#define GET_IOC(dev) ({ \
- void *__pdata = parisc_walk_tree(dev); \
- __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
-})
-
-#ifdef CONFIG_IOMMU_CCIO
-struct parisc_device;
-struct ioc;
-void * ccio_get_iommu(const struct parisc_device *dev);
-int ccio_request_resource(const struct parisc_device *dev,
- struct resource *res);
-int ccio_allocate_resource(const struct parisc_device *dev,
- struct resource *res, unsigned long size,
- unsigned long min, unsigned long max, unsigned long align);
-#else /* !CONFIG_IOMMU_CCIO */
-#define ccio_get_iommu(dev) NULL
-#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
-#define ccio_allocate_resource(dev, res, size, min, max, align) \
- allocate_resource(&iomem_resource, res, size, min, max, \
- align, NULL, NULL)
-#endif /* !CONFIG_IOMMU_CCIO */
-
-#ifdef CONFIG_IOMMU_SBA
-struct parisc_device;
-void * sba_get_iommu(struct parisc_device *dev);
-#endif
-
#endif
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 1a1235a9d533..7f7039516e53 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -22,6 +22,7 @@ typedef struct {
unsigned int irq_stack_usage;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
+ unsigned int irq_call_count;
#endif
unsigned int irq_unaligned_count;
unsigned int irq_fpassist_count;
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index afe493b23d04..30a8315d5c07 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -311,6 +311,15 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
* value for either 32 or 64 bit mode */
#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
+#define ioread64 ioread64
+#define ioread64be ioread64be
+#define iowrite64 iowrite64
+#define iowrite64be iowrite64be
+extern u64 ioread64(void __iomem *addr);
+extern u64 ioread64be(void __iomem *addr);
+extern void iowrite64(u64 val, void __iomem *addr);
+extern void iowrite64be(u64 val, void __iomem *addr);
+
#include <asm-generic/iomap.h>
/*
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index 3328fd17c19d..f14465b84de4 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -56,7 +56,7 @@ struct pci_hba_data {
#define DINO_MAX_LMMIO_RESOURCES 3
unsigned long lmmio_space_offset; /* CPU view - PCI view */
- void * iommu; /* IOMMU this device is under */
+ struct ioc *iommu; /* IOMMU this device is under */
/* REVISIT - spinlock to protect resources? */
#define HBA_NAME_SIZE 16
@@ -66,8 +66,6 @@ struct pci_hba_data {
char gmmio_name[HBA_NAME_SIZE];
};
-#define HBA_DATA(d) ((struct pci_hba_data *) (d))
-
/*
** We support 2^16 I/O ports per HBA. These are set up in the form
** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 5b187d40d604..19bb2e46cd36 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -44,6 +44,7 @@ int pdc_model_sysmodel(char *name);
int pdc_model_cpuid(unsigned long *cpu_id);
int pdc_model_versions(unsigned long *versions, int id);
int pdc_model_capabilities(unsigned long *capabilities);
+int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
int pdc_cache_info(struct pdc_cache_info *cache);
int pdc_spaceid_bits(unsigned long *space_bits);
#ifndef CONFIG_PA20
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index bce9ee1c1c99..24355ed1453a 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -67,6 +67,10 @@
#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
+/* PDC PAT COMPLEX */
+
+#define PDC_PAT_COMPLEX 66L
+
/* PDC PAT CPU -- CPU configuration within the protection domain */
#define PDC_PAT_CPU 67L
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 30ac2865ea73..ebbb9ffe038c 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -16,7 +16,6 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index c2c2afb28941..b0838dc4dfee 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -10,11 +10,7 @@
#define SYS_ify(syscall_name) __NR_##syscall_name
-#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-#define __IGNORE_pkey_mprotect
-#define __IGNORE_pkey_alloc
-#define __IGNORE_pkey_free
#ifndef ASM_LINE_SEP
# define ASM_LINE_SEP ;
@@ -156,10 +152,8 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_SYS_TIME32
#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
-#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
@@ -174,6 +168,11 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#ifdef CONFIG_64BIT
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#endif
+
#endif /* __ASSEMBLY__ */
#undef STR
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index 870fbf8c7088..c98162f494db 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -10,9 +10,7 @@
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_TYPE 0x2b /* Mask for type of mapping, includes bits 0x08 and 0x20 */
#define MAP_FIXED 0x04 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x10 /* don't use a file */
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index 593eeb573138..15211723ebf5 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -60,6 +60,8 @@
#define PDC_MODEL_NVA_UNSUPPORTED (3 << 4)
#define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */
#define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */
+#define PDC_MODEL_GET_PLATFORM_INFO 10 /* returns platform info */
+#define PDC_MODEL_GET_INSTALL_KERNEL 11 /* returns kernel for installation */
#define PA89_INSTRUCTION_SET 0x4 /* capabilities returned */
#define PA90_INSTRUCTION_SET 0x8
@@ -99,7 +101,7 @@
#define PDC_TOD 9 /* time-of-day clock (TOD) */
#define PDC_TOD_READ 0 /* read TOD */
#define PDC_TOD_WRITE 1 /* write TOD */
-
+#define PDC_TOD_CALIBRATE 2 /* calibrate timers */
#define PDC_STABLE 10 /* stable storage (sprockets) */
#define PDC_STABLE_READ 0
@@ -109,15 +111,22 @@
#define PDC_STABLE_INITIALIZE 4
#define PDC_NVOLATILE 11 /* often not implemented */
+#define PDC_NVOLATILE_READ 0
+#define PDC_NVOLATILE_WRITE 1
+#define PDC_NVOLATILE_RETURN_SIZE 2
+#define PDC_NVOLATILE_VERIFY_CONTENTS 3
+#define PDC_NVOLATILE_INITIALIZE 4
#define PDC_ADD_VALID 12 /* Memory validation PDC call */
#define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */
+#define PDC_DEBUG 14 /* Obsolete */
+
#define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */
#define PDC_PROC 16 /* (sprockets) */
-#define PDC_CONFIG 16 /* (sprockets) */
+#define PDC_CONFIG 17 /* (sprockets) */
#define PDC_CONFIG_DECONFIG 0
#define PDC_CONFIG_DRECONFIG 1
#define PDC_CONFIG_DRETURN_CONFIG 2
@@ -167,6 +176,15 @@
#define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */
#define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */
+#define PDC_ALLOC 24 /* allocate static storage for PDC & IODC */
+
+#define PDC_CRASH_PREP 25 /* Prepare system for crash dump */
+#define PDC_CRASH_DUMP 0 /* Do platform specific preparations for dump */
+#define PDC_CRASH_LOG_CEC_ERROR 1 /* Dump hardware registers */
+
+#define PDC_SCSI_PARMS 26 /* Get and set SCSI parameters */
+#define PDC_SCSI_GET_PARMS 0 /* Get SCSI parameters for I/O device */
+#define PDC_SCSI_SET_PARMS 1 /* Set SCSI parameters for I/O device */
/* HVERSION dependent */
@@ -260,6 +278,10 @@
#define PDC_PCI_READ_MON_TYPE 15
#define PDC_PCI_WRITE_MON_TYPE 16
+#define PDC_RELOCATE 149 /* (sprockets) */
+#define PDC_RELOCATE_GET_RELOCINFO 0
+#define PDC_RELOCATE_CHECKSUM 1
+#define PDC_RELOCATE_RELOCATE 2
/* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */
#define PDC_INITIATOR 163
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 061b9cf2a779..16e428f03526 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -3,6 +3,7 @@
#define _UAPI_ASM_SOCKET_H
#include <asm/sockios.h>
+#include <asm/bitsperlong.h>
/* For setsockopt(2) */
#define SOL_SOCKET 0xffff
@@ -21,8 +22,8 @@
#define SO_RCVBUFFORCE 0x100b
#define SO_SNDLOWAT 0x1003
#define SO_RCVLOWAT 0x1004
-#define SO_SNDTIMEO 0x1005
-#define SO_RCVTIMEO 0x1006
+#define SO_SNDTIMEO_OLD 0x1005
+#define SO_RCVTIMEO_OLD 0x1006
#define SO_ERROR 0x1007
#define SO_TYPE 0x1008
#define SO_PROTOCOL 0x1028
@@ -34,10 +35,6 @@
#define SO_BSDCOMPAT 0x400e
#define SO_PASSCRED 0x4010
#define SO_PEERCRED 0x4011
-#define SO_TIMESTAMP 0x4012
-#define SCM_TIMESTAMP SO_TIMESTAMP
-#define SO_TIMESTAMPNS 0x4013
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x4016
@@ -58,9 +55,6 @@
#define SO_MARK 0x401f
-#define SO_TIMESTAMPING 0x4020
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
#define SO_RXQ_OVFL 0x4021
#define SO_WIFI_STATUS 0x4022
@@ -107,4 +101,40 @@
#define SO_TXTIME 0x4036
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 0x4037
+
+#define SO_TIMESTAMP_OLD 0x4012
+#define SO_TIMESTAMPNS_OLD 0x4013
+#define SO_TIMESTAMPING_OLD 0x4020
+
+#define SO_TIMESTAMP_NEW 0x4038
+#define SO_TIMESTAMPNS_NEW 0x4039
+#define SO_TIMESTAMPING_NEW 0x403A
+
+#define SO_RCVTIMEO_NEW 0x4040
+#define SO_SNDTIMEO_NEW 0x4041
+
+#if !defined(__KERNEL__)
+
+#if __BITS_PER_LONG == 64
+#define SO_TIMESTAMP SO_TIMESTAMP_OLD
+#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD
+#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD
+#define SO_RCVTIMEO SO_RCVTIMEO_OLD
+#define SO_SNDTIMEO SO_SNDTIMEO_OLD
+#else
+#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW)
+#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW)
+#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW)
+
+#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW)
+#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW)
+#endif
+
+#define SCM_TIMESTAMP SO_TIMESTAMP
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#endif
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index e6f3b49f2fd7..7a17551ea31e 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -569,6 +569,30 @@ int pdc_model_capabilities(unsigned long *capabilities)
}
/**
+ * pdc_model_platform_info - Returns machine product and serial number.
+ * @orig_prod_num: Return buffer for original product number.
+ * @current_prod_num: Return buffer for current product number.
+ * @serial_no: Return buffer for serial number.
+ *
+ * Returns strings containing the original and current product numbers and the
+ * serial number of the system.
+ */
+int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num,
+ char *serial_no)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_GET_PLATFORM_INFO,
+ __pa(orig_prod_num), __pa(current_prod_num), __pa(serial_no));
+ convert_to_wide(pdc_result);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
* pdc_cache_info - Return cache and TLB information.
* @cache_info: The return buffer.
*
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 0ca254085a66..23040a67583e 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -117,7 +117,10 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
return -EINVAL;
/* whatever mask they set, we just allow one CPU */
- cpu_dest = cpumask_first_and(dest, cpu_online_mask);
+ cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1),
+ dest, cpu_online_mask);
+ if (cpu_dest >= nr_cpu_ids)
+ cpu_dest = cpumask_first_and(dest, cpu_online_mask);
return cpu_dest;
}
@@ -175,10 +178,16 @@ int arch_show_interrupts(struct seq_file *p, int prec)
# endif
#endif
#ifdef CONFIG_SMP
- seq_printf(p, "%*s: ", prec, "RES");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
- seq_puts(p, " Rescheduling interrupts\n");
+ if (num_online_cpus() > 1) {
+ seq_printf(p, "%*s: ", prec, "RES");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
+ seq_puts(p, " Rescheduling interrupts\n");
+ seq_printf(p, "%*s: ", prec, "CAL");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
+ seq_puts(p, " Function call interrupts\n");
+ }
#endif
seq_printf(p, "%*s: ", prec, "UAH");
for_each_online_cpu(j)
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 82bd0d0927ce..7f4d042856b5 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -242,6 +242,7 @@ static int __init processor_probe(struct parisc_device *dev)
void __init collect_boot_cpu_data(void)
{
unsigned long cr16_seed;
+ char orig_prod_num[64], current_prod_num[64], serial_no[64];
memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
@@ -301,6 +302,14 @@ void __init collect_boot_cpu_data(void)
_parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
(boot_cpu_data.cpu_type == mako2);
#endif
+
+ if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
+ printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
+ current_prod_num, orig_prod_num, serial_no);
+ add_device_randomness(orig_prod_num, strlen(orig_prod_num));
+ add_device_randomness(current_prod_num, strlen(current_prod_num));
+ add_device_randomness(serial_no, strlen(serial_no));
+ }
}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 2582df1c529b..0964c236e3e5 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs)) {
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ int rc = tracehook_report_syscall_entry(regs);
+
/*
- * Tracing decided this syscall should not happen or the
- * debugger stored an invalid system call number. Skip
- * the system call and the system call restart handling.
+ * As tracesys_next does not set %r28 to -ENOSYS
+ * when %r20 is set to -1, initialize it here.
*/
- regs->gr[20] = -1UL;
- goto out;
+ regs->gr[28] = -ENOSYS;
+
+ if (rc) {
+ /*
+ * A nonzero return code from
+ * tracehook_report_syscall_entry() tells us
+ * to prevent the syscall execution. Skip
+ * the syscall call and the syscall restart handling.
+ *
+ * Note that the tracer may also just change
+ * regs->gr[20] to an invalid syscall number,
+ * that is handled by tracesys_next.
+ */
+ regs->gr[20] = -1UL;
+ return -1;
+ }
}
/* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
-out:
/*
* Sign extend the syscall number to 64bit since it may have been
* modified by a compat ptrace call
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index f2cf86ac279b..15dd9e21be7e 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -40,6 +40,7 @@
#include <linux/sched/clock.h>
#include <linux/start_kernel.h>
+#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/pdc.h>
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 5e26dbede5fc..d9e2d69c9e48 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -155,6 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
+ inc_irq_stat(irq_call_count);
generic_smp_call_function_interrupt();
break;
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 9bbd2f9f56c8..b26766c6647d 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -20,7 +20,8 @@
10 common unlink sys_unlink
11 common execve sys_execve compat_sys_execve
12 common chdir sys_chdir
-13 common time sys_time compat_sys_time
+13 32 time sys_time32
+13 64 time sys_time
14 common mknod sys_mknod
15 common chmod sys_chmod
16 common lchown sys_lchown
@@ -32,12 +33,14 @@
22 common bind sys_bind
23 common setuid sys_setuid
24 common getuid sys_getuid
-25 common stime sys_stime compat_sys_stime
+25 32 stime sys_stime32
+25 64 stime sys_stime
26 common ptrace sys_ptrace compat_sys_ptrace
27 common alarm sys_alarm
28 common fstat sys_newfstat compat_sys_newfstat
29 common pause sys_pause
-30 common utime sys_utime compat_sys_utime
+30 32 utime sys_utime32
+30 64 utime sys_utime
31 common connect sys_connect
32 common listen sys_listen
33 common access sys_access
@@ -133,7 +136,8 @@
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
123 common recvfrom sys_recvfrom
-124 common adjtimex sys_adjtimex compat_sys_adjtimex
+124 32 adjtimex sys_adjtimex_time32
+124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
# 127 was create_module
@@ -171,8 +175,10 @@
158 common sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep compat_sys_nanosleep
+161 32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+161 64 sched_rr_get_interval sys_sched_rr_get_interval
+162 32 nanosleep sys_nanosleep_time32
+162 64 nanosleep sys_nanosleep
163 common mremap sys_mremap
164 common setresuid sys_setresuid
165 common getresuid sys_getresuid
@@ -187,7 +193,8 @@
174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+177 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+177 64 rt_sigtimedwait sys_rt_sigtimedwait
178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
180 common chown sys_chown
@@ -223,14 +230,16 @@
207 64 readahead sys_readahead
208 common tkill sys_tkill
209 common sendfile64 sys_sendfile64 compat_sys_sendfile64
-210 common futex sys_futex compat_sys_futex
+210 32 futex sys_futex_time32
+210 64 futex sys_futex
211 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
212 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
# 213 was set_thread_area
# 214 was get_thread_area
215 common io_setup sys_io_setup compat_sys_io_setup
216 common io_destroy sys_io_destroy
-217 common io_getevents sys_io_getevents compat_sys_io_getevents
+217 32 io_getevents sys_io_getevents_time32
+217 64 io_getevents sys_io_getevents
218 common io_submit sys_io_submit compat_sys_io_submit
219 common io_cancel sys_io_cancel
# 220 was alloc_hugepages
@@ -241,11 +250,14 @@
225 common epoll_ctl sys_epoll_ctl
226 common epoll_wait sys_epoll_wait
227 common remap_file_pages sys_remap_file_pages
-228 common semtimedop sys_semtimedop compat_sys_semtimedop
+228 32 semtimedop sys_semtimedop_time32
+228 64 semtimedop sys_semtimedop
229 common mq_open sys_mq_open compat_sys_mq_open
230 common mq_unlink sys_mq_unlink
-231 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
-232 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+231 32 mq_timedsend sys_mq_timedsend_time32
+231 64 mq_timedsend sys_mq_timedsend
+232 32 mq_timedreceive sys_mq_timedreceive_time32
+232 64 mq_timedreceive sys_mq_timedreceive
233 common mq_notify sys_mq_notify compat_sys_mq_notify
234 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
235 common waitid sys_waitid compat_sys_waitid
@@ -265,14 +277,20 @@
248 common lremovexattr sys_lremovexattr
249 common fremovexattr sys_fremovexattr
250 common timer_create sys_timer_create compat_sys_timer_create
-251 common timer_settime sys_timer_settime compat_sys_timer_settime
-252 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+251 32 timer_settime sys_timer_settime32
+251 64 timer_settime sys_timer_settime
+252 32 timer_gettime sys_timer_gettime32
+252 64 timer_gettime sys_timer_gettime
253 common timer_getoverrun sys_timer_getoverrun
254 common timer_delete sys_timer_delete
-255 common clock_settime sys_clock_settime compat_sys_clock_settime
-256 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
-257 common clock_getres sys_clock_getres compat_sys_clock_getres
-258 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+255 32 clock_settime sys_clock_settime32
+255 64 clock_settime sys_clock_settime
+256 32 clock_gettime sys_clock_gettime32
+256 64 clock_gettime sys_clock_gettime
+257 32 clock_getres sys_clock_getres_time32
+257 64 clock_getres sys_clock_getres
+258 32 clock_nanosleep sys_clock_nanosleep_time32
+258 64 clock_nanosleep sys_clock_nanosleep
259 common tgkill sys_tgkill
260 common mbind sys_mbind compat_sys_mbind
261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
@@ -287,13 +305,16 @@
270 common inotify_add_watch sys_inotify_add_watch
271 common inotify_rm_watch sys_inotify_rm_watch
272 common migrate_pages sys_migrate_pages
-273 common pselect6 sys_pselect6 compat_sys_pselect6
-274 common ppoll sys_ppoll compat_sys_ppoll
+273 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+273 64 pselect6 sys_pselect6
+274 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+274 64 ppoll sys_ppoll
275 common openat sys_openat compat_sys_openat
276 common mkdirat sys_mkdirat
277 common mknodat sys_mknodat
278 common fchownat sys_fchownat
-279 common futimesat sys_futimesat compat_sys_futimesat
+279 32 futimesat sys_futimesat_time32
+279 64 futimesat sys_futimesat
280 common fstatat64 sys_fstatat64
281 common unlinkat sys_unlinkat
282 common renameat sys_renameat
@@ -316,15 +337,18 @@
298 common statfs64 sys_statfs64 compat_sys_statfs64
299 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
300 common kexec_load sys_kexec_load compat_sys_kexec_load
-301 common utimensat sys_utimensat compat_sys_utimensat
+301 32 utimensat sys_utimensat_time32
+301 64 utimensat sys_utimensat
302 common signalfd sys_signalfd compat_sys_signalfd
# 303 was timerfd
304 common eventfd sys_eventfd
305 32 fallocate parisc_fallocate
305 64 fallocate sys_fallocate
306 common timerfd_create sys_timerfd_create
-307 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
-308 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+307 32 timerfd_settime sys_timerfd_settime32
+307 64 timerfd_settime sys_timerfd_settime
+308 32 timerfd_gettime sys_timerfd_gettime32
+308 64 timerfd_gettime sys_timerfd_gettime
309 common signalfd4 sys_signalfd4 compat_sys_signalfd4
310 common eventfd2 sys_eventfd2
311 common epoll_create1 sys_epoll_create1
@@ -335,12 +359,14 @@
316 common pwritev sys_pwritev compat_sys_pwritev
317 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
318 common perf_event_open sys_perf_event_open
-319 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+319 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+319 64 recvmmsg sys_recvmmsg
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
-324 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+324 32 clock_adjtime sys_clock_adjtime32
+324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at
326 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
327 common syncfs sys_syncfs
@@ -352,7 +378,8 @@
333 common finit_module sys_finit_module
334 common sched_setattr sys_sched_setattr
335 common sched_getattr sys_sched_getattr
-336 common utimes sys_utimes compat_sys_utimes
+336 32 utimes sys_utimes_time32
+336 64 utimes sys_utimes
337 common renameat2 sys_renameat2
338 common seccomp sys_seccomp
339 common getrandom sys_getrandom
@@ -366,4 +393,30 @@
347 common preadv2 sys_preadv2 compat_sys_preadv2
348 common pwritev2 sys_pwritev2 compat_sys_pwritev2
349 common statx sys_statx
-350 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+350 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+350 64 io_pgetevents sys_io_pgetevents
+351 common pkey_mprotect sys_pkey_mprotect
+352 common pkey_alloc sys_pkey_alloc
+353 common pkey_free sys_pkey_free
+354 common rseq sys_rseq
+# 355 through 402 are unassigned to sync up with generic numbers
+403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 472a818e8c17..7e1ccafadf57 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -218,7 +218,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
return;
}
- oops_in_progress = 1;
+ bust_spinlocks(1);
oops_enter();
@@ -396,7 +396,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
{
static DEFINE_SPINLOCK(terminate_lock);
- oops_in_progress = 1;
+ bust_spinlocks(1);
set_eiem(0);
local_irq_disable();
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 4b19e6e64fb7..0195aec657e2 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -48,11 +48,15 @@ struct iomap_ops {
unsigned int (*read16be)(void __iomem *);
unsigned int (*read32)(void __iomem *);
unsigned int (*read32be)(void __iomem *);
+ u64 (*read64)(void __iomem *);
+ u64 (*read64be)(void __iomem *);
void (*write8)(u8, void __iomem *);
void (*write16)(u16, void __iomem *);
void (*write16be)(u16, void __iomem *);
void (*write32)(u32, void __iomem *);
void (*write32be)(u32, void __iomem *);
+ void (*write64)(u64, void __iomem *);
+ void (*write64be)(u64, void __iomem *);
void (*read8r)(void __iomem *, void *, unsigned long);
void (*read16r)(void __iomem *, void *, unsigned long);
void (*read32r)(void __iomem *, void *, unsigned long);
@@ -171,6 +175,16 @@ static unsigned int iomem_read32be(void __iomem *addr)
return __raw_readl(addr);
}
+static u64 iomem_read64(void __iomem *addr)
+{
+ return readq(addr);
+}
+
+static u64 iomem_read64be(void __iomem *addr)
+{
+ return __raw_readq(addr);
+}
+
static void iomem_write8(u8 datum, void __iomem *addr)
{
writeb(datum, addr);
@@ -196,6 +210,16 @@ static void iomem_write32be(u32 datum, void __iomem *addr)
__raw_writel(datum, addr);
}
+static void iomem_write64(u64 datum, void __iomem *addr)
+{
+ writel(datum, addr);
+}
+
+static void iomem_write64be(u64 datum, void __iomem *addr)
+{
+ __raw_writel(datum, addr);
+}
+
static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
@@ -250,11 +274,15 @@ static const struct iomap_ops iomem_ops = {
.read16be = iomem_read16be,
.read32 = iomem_read32,
.read32be = iomem_read32be,
+ .read64 = iomem_read64,
+ .read64be = iomem_read64be,
.write8 = iomem_write8,
.write16 = iomem_write16,
.write16be = iomem_write16be,
.write32 = iomem_write32,
.write32be = iomem_write32be,
+ .write64 = iomem_write64,
+ .write64be = iomem_write64be,
.read8r = iomem_read8r,
.read16r = iomem_read16r,
.read32r = iomem_read32r,
@@ -304,6 +332,20 @@ unsigned int ioread32be(void __iomem *addr)
return *((u32 *)addr);
}
+u64 ioread64(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr);
+ return le64_to_cpup((u64 *)addr);
+}
+
+u64 ioread64be(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr);
+ return *((u64 *)addr);
+}
+
void iowrite8(u8 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
@@ -349,6 +391,24 @@ void iowrite32be(u32 datum, void __iomem *addr)
}
}
+void iowrite64(u64 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write64(datum, addr);
+ } else {
+ *((u64 *)addr) = cpu_to_le64(datum);
+ }
+}
+
+void iowrite64be(u64 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write64be(datum, addr);
+ } else {
+ *((u64 *)addr) = datum;
+ }
+}
+
/* Repeating interfaces */
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
@@ -449,11 +509,15 @@ EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
+EXPORT_SYMBOL(ioread64);
+EXPORT_SYMBOL(ioread64be);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
+EXPORT_SYMBOL(iowrite64);
+EXPORT_SYMBOL(iowrite64be);
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(ioread32_rep);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 059187a3ded7..d0b166256f1a 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
int npmem_ranges __read_mostly;
-/*
- * get_memblock() allocates pages via memblock.
- * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
- * doesn't allocate from bottom to top which is needed because we only created
- * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
- */
-static void * __init get_memblock(unsigned long size)
-{
- static phys_addr_t search_addr __initdata;
- phys_addr_t phys;
-
- if (!search_addr)
- search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
- search_addr = ALIGN(search_addr, size);
- while (!memblock_is_region_memory(search_addr, size) ||
- memblock_is_region_reserved(search_addr, size)) {
- search_addr += size;
- }
- phys = search_addr;
-
- if (phys)
- memblock_reserve(phys, size);
- else
- panic("get_memblock() failed.\n");
-
- memset(__va(phys), 0, size);
-
- return __va(phys);
-}
-
#ifdef CONFIG_64BIT
#define MAX_MEM (~0UL)
#else /* !CONFIG_64BIT */
@@ -321,6 +291,13 @@ static void __init setup_bootmem(void)
max_pfn = start_pfn + npages;
}
+ /*
+ * We can't use memblock top-down allocations because we only
+ * created the initial mapping up to KERNEL_INITIAL_SIZE in
+ * the assembly bootup code.
+ */
+ memblock_set_bottom_up(true);
+
/* IOMMU is always used to access "high mem" on those boxes
* that can support enough mem that a PCI device couldn't
* directly DMA to any physical addresses.
@@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr,
*/
if (!pmd) {
- pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
+ pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
+ PAGE_SIZE << PMD_ORDER);
+ if (!pmd)
+ panic("pmd allocation failed.\n");
pmd = (pmd_t *) __pa(pmd);
}
@@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr,
pg_table = (pte_t *)pmd_address(*pmd);
if (!pg_table) {
- pg_table = (pte_t *) get_memblock(PAGE_SIZE);
+ pg_table = memblock_alloc(PAGE_SIZE,
+ PAGE_SIZE);
+ if (!pg_table)
+ panic("page table allocation failed\n");
pg_table = (pte_t *) __pa(pg_table);
}
@@ -700,7 +683,10 @@ static void __init pagetable_init(void)
}
#endif
- empty_zero_page = get_memblock(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("zero page allocation failed.\n");
+
}
static void __init gateway_init(void)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2890d36eb531..7deb3ea2dd3f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -128,6 +128,7 @@ config PPC
#
# Please keep this list sorted alphabetically.
#
+ select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
@@ -178,6 +179,7 @@ config PPC
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_NVRAM_OPS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
@@ -274,11 +276,6 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-# All PPC32s use generic nvram driver through ppc_md
-config GENERIC_NVRAM
- bool
- default y if PPC32
-
config SCHED_OMIT_FRAME_POINTER
bool
default y
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 5b0177733994..66c1e4f88d65 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -13,6 +13,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
+extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte);
+
static inline int hstate_get_psize(struct hstate *hstate)
{
unsigned long shift;
@@ -42,4 +46,12 @@ static inline bool gigantic_page_supported(void)
/* hugepd entry valid bit */
#define HUGEPD_VAL_BITS (0x8000000000000000UL)
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t new_pte);
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 2e6ada28da64..868fcaf56f6b 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud)
static inline int pud_present(pud_t pud)
{
- return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
}
extern struct page *pud_page(pud_t pud);
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd)
static inline int pgd_present(pgd_t pgd)
{
- return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
+ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
}
static inline pte_t pgd_pte(pgd_t pgd)
@@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
- struct spinlock *old_pmd_ptl,
- struct vm_area_struct *vma)
-{
- if (radix_enabled())
- return false;
- /*
- * Archs like ppc64 use pgtable to store per pmd
- * specific information. So when we switch the pmd,
- * we should also withdraw and deposit the pgtable
- */
- return true;
-}
-
-
+extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ struct spinlock *old_pmd_ptl,
+ struct vm_area_struct *vma);
+/*
+ * Hash translation mode use the deposited table to store hash pte
+ * slot information.
+ */
#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
static inline bool arch_needs_pgtable_deposit(void)
{
@@ -1314,6 +1306,24 @@ static inline int pud_pfn(pud_t pud)
BUILD_BUG();
return 0;
}
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
+void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
+ pte_t *, pte_t, pte_t);
+
+/*
+ * Returns true for a R -> RW upgrade of pte
+ */
+static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_val)
+{
+ if (!(old_val & _PAGE_READ))
+ return false;
+
+ if ((!(old_val & _PAGE_WRITE)) && (new_val & _PAGE_WRITE))
+ return true;
+
+ return false;
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 7d1a3d1543fc..5ab134eeed20 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -127,6 +127,10 @@ extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep
pte_t entry, unsigned long address,
int psize);
+extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte);
+
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
unsigned long set)
{
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 7f19fbd3ba55..4b73847e9b95 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -783,8 +783,10 @@ extern void __iounmap_at(void *ea, unsigned long size);
#define mmio_read16be(addr) readw_be(addr)
#define mmio_read32be(addr) readl_be(addr)
+#define mmio_read64be(addr) readq_be(addr)
#define mmio_write16be(val, addr) writew_be(val, addr)
#define mmio_write32be(val, addr) writel_be(val, addr)
+#define mmio_write64be(val, addr) writeq_be(val, addr)
#define mmio_insb(addr, dst, count) readsb(addr, dst, count)
#define mmio_insw(addr, dst, count) readsw(addr, dst, count)
#define mmio_insl(addr, dst, count) readsl(addr, dst, count)
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 09a518bb7c03..629a5cdcc865 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -78,9 +78,6 @@ extern int pmac_get_partition(int partition);
extern u8 pmac_xpram_read(int xpaddr);
extern void pmac_xpram_write(int xpaddr, u8 data);
-/* Synchronize NVRAM */
-extern void nvram_sync(void);
-
/* Initialize NVRAM OS partition */
extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
@@ -98,10 +95,4 @@ extern int nvram_write_os_partition(struct nvram_os_partition *part,
unsigned int err_type,
unsigned int error_log_cnt);
-/* Determine NVRAM size */
-extern ssize_t nvram_get_size(void);
-
-/* Normal access to NVRAM */
-extern unsigned char nvram_read_byte(int i);
-extern void nvram_write_byte(unsigned char c, int i);
#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index aee4fcc24990..77fc21278fa2 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/ioport.h>
+#include <linux/numa.h>
struct device_node;
@@ -265,7 +266,7 @@ extern int pcibios_map_io_space(struct pci_bus *bus);
#ifdef CONFIG_NUMA
#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
#else
-#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = -1)
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = NUMA_NO_NODE)
#endif
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 19a8834e0398..f9513ad38fa6 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -337,6 +337,7 @@
#define PPC_INST_DIVWU 0x7c000396
#define PPC_INST_DIVD 0x7c0003d2
#define PPC_INST_RLWINM 0x54000000
+#define PPC_INST_RLWINM_DOT 0x54000001
#define PPC_INST_RLWIMI 0x50000000
#define PPC_INST_RLDICL 0x78000000
#define PPC_INST_RLDICR 0x78000004
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index e3a731793ea2..4d6d905e9138 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -28,7 +28,6 @@
#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
#endif
-#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.addr_limit)
static inline void set_fs(mm_segment_t fs)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index a3c35e6d6ffb..f44dbc65e38e 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -29,8 +29,8 @@
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_TIME32
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
@@ -45,8 +45,8 @@
#define __ARCH_WANT_OLD_STAT
#endif
#ifdef CONFIG_PPC64
-#define __ARCH_WANT_COMPAT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME32
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_NEWFSTATAT
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
+ PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 94de465e0920..12aa0c43e775 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -11,8 +11,8 @@
#define SO_RCVLOWAT 16
#define SO_SNDLOWAT 17
-#define SO_RCVTIMEO 18
-#define SO_SNDTIMEO 19
+#define SO_RCVTIMEO_OLD 18
+#define SO_SNDTIMEO_OLD 19
#define SO_PASSCRED 20
#define SO_PEERCRED 21
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 57deb1e9ffea..20cc816b3508 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -852,11 +852,12 @@ start_here:
/* set up the PTE pointers for the Abatron bdiGDB.
*/
- tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
+ lis r6, swapper_pg_dir@h
+ ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5)
/* Now turn on the MMU for real! */
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 38b03a330cd2..244d2462e781 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -7,12 +7,6 @@
* 2 of the License, or (at your option) any later version.
*
* /dev/nvram driver for PPC64
- *
- * This perhaps should live in drivers/char
- *
- * TODO: Split the /dev/nvram part (that one can use
- * drivers/char/generic_nvram.c) from the arch & partition
- * parsing code.
*/
#include <linux/types.h>
@@ -714,137 +708,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
spin_unlock_irqrestore(&lock, flags);
}
-static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
-{
- if (ppc_md.nvram_size == NULL)
- return -ENODEV;
- return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
- ppc_md.nvram_size());
-}
-
-
-static ssize_t dev_nvram_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
- char *tmp = NULL;
- ssize_t size;
-
- if (!ppc_md.nvram_size) {
- ret = -ENODEV;
- goto out;
- }
-
- size = ppc_md.nvram_size();
- if (size < 0) {
- ret = size;
- goto out;
- }
-
- if (*ppos >= size) {
- ret = 0;
- goto out;
- }
-
- count = min_t(size_t, count, size - *ppos);
- count = min(count, PAGE_SIZE);
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = ppc_md.nvram_read(tmp, count, ppos);
- if (ret <= 0)
- goto out;
-
- if (copy_to_user(buf, tmp, ret))
- ret = -EFAULT;
-
-out:
- kfree(tmp);
- return ret;
-
-}
-
-static ssize_t dev_nvram_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
- char *tmp = NULL;
- ssize_t size;
-
- ret = -ENODEV;
- if (!ppc_md.nvram_size)
- goto out;
-
- ret = 0;
- size = ppc_md.nvram_size();
- if (*ppos >= size || size < 0)
- goto out;
-
- count = min_t(size_t, count, size - *ppos);
- count = min(count, PAGE_SIZE);
-
- tmp = memdup_user(buf, count);
- if (IS_ERR(tmp)) {
- ret = PTR_ERR(tmp);
- goto out;
- }
-
- ret = ppc_md.nvram_write(tmp, count, ppos);
-
- kfree(tmp);
-out:
- return ret;
-}
-
-static long dev_nvram_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- switch(cmd) {
-#ifdef CONFIG_PPC_PMAC
- case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
- printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
- /* fall through */
- case IOC_NVRAM_GET_OFFSET: {
- int part, offset;
-
- if (!machine_is(powermac))
- return -EINVAL;
- if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
- return -EFAULT;
- if (part < pmac_nvram_OF || part > pmac_nvram_NR)
- return -EINVAL;
- offset = pmac_get_partition(part);
- if (offset < 0)
- return offset;
- if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
- return -EFAULT;
- return 0;
- }
-#endif /* CONFIG_PPC_PMAC */
- default:
- return -EINVAL;
- }
-}
-
-static const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = dev_nvram_llseek,
- .read = dev_nvram_read,
- .write = dev_nvram_write,
- .unlocked_ioctl = dev_nvram_ioctl,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-
#ifdef DEBUG_NVRAM
static void __init nvram_print_partitions(char * label)
{
@@ -992,6 +855,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
long size = 0;
int rc;
+ BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
+
/* Convert sizes from bytes to blocks */
req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
@@ -1192,22 +1057,3 @@ int __init nvram_scan_partitions(void)
kfree(header);
return err;
}
-
-static int __init nvram_init(void)
-{
- int rc;
-
- BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
-
- if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
- return -ENODEV;
-
- rc = misc_register(&nvram_dev);
- if (rc != 0) {
- printk(KERN_ERR "nvram_init: failed to register device\n");
- return rc;
- }
-
- return rc;
-}
-device_initcall(nvram_init);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 913bfca09c4f..b8480127793d 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/sched/task.h>
+#include <linux/numa.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
@@ -36,7 +37,7 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align,
* which will put its paca in the right place.
*/
if (cpu == boot_cpuid) {
- nid = -1;
+ nid = NUMA_NO_NODE;
memblock_set_bottom_up(true);
} else {
nid = early_cpu_to_node(cpu);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 88e4f69a09e5..4538e8ddde80 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -32,6 +32,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
+#include <linux/numa.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -132,7 +133,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
int nid = of_node_to_nid(dev);
if (nid < 0 || !node_online(nid))
- nid = -1;
+ nid = NUMA_NO_NODE;
PHB_SET_NODE(phb, nid);
}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 947f904688b0..c31082233a25 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -17,6 +17,7 @@
#include <linux/console.h>
#include <linux/memblock.h>
#include <linux/export.h>
+#include <linux/nvram.h>
#include <asm/io.h>
#include <asm/prom.h>
@@ -147,41 +148,6 @@ static int __init ppc_setup_l3cr(char *str)
}
__setup("l3cr=", ppc_setup_l3cr);
-#ifdef CONFIG_GENERIC_NVRAM
-
-/* Generic nvram hooks used by drivers/char/gen_nvram.c */
-unsigned char nvram_read_byte(int addr)
-{
- if (ppc_md.nvram_read_val)
- return ppc_md.nvram_read_val(addr);
- return 0xff;
-}
-EXPORT_SYMBOL(nvram_read_byte);
-
-void nvram_write_byte(unsigned char val, int addr)
-{
- if (ppc_md.nvram_write_val)
- ppc_md.nvram_write_val(addr, val);
-}
-EXPORT_SYMBOL(nvram_write_byte);
-
-ssize_t nvram_get_size(void)
-{
- if (ppc_md.nvram_size)
- return ppc_md.nvram_size();
- return -1;
-}
-EXPORT_SYMBOL(nvram_get_size);
-
-void nvram_sync(void)
-{
- if (ppc_md.nvram_sync)
- ppc_md.nvram_sync();
-}
-EXPORT_SYMBOL(nvram_sync);
-
-#endif /* CONFIG_NVRAM */
-
static int __init ppc_init(void)
{
/* clear the progress line */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index bd5e6834ca69..6794466f6420 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
- }
+ } else
#endif
- /* Fall through, for non-TM restore */
- if (!MSR_TM_ACTIVE(msr)) {
+ {
/*
+ * Fall through, for non-TM restore
+ *
* Unset MSR[TS] on the thread regs since MSR from user
* context does not have MSR active, and recheckpoint was
* not called since restore_tm_sigcontexts() was not called
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index db3bbb8744af..b18abb0c3dae 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -20,7 +20,9 @@
10 common unlink sys_unlink
11 nospu execve sys_execve compat_sys_execve
12 common chdir sys_chdir
-13 common time sys_time compat_sys_time
+13 32 time sys_time32
+13 64 time sys_time
+13 spu time sys_time
14 common mknod sys_mknod
15 common chmod sys_chmod
16 common lchown sys_lchown
@@ -36,14 +38,17 @@
22 spu umount sys_ni_syscall
23 common setuid sys_setuid
24 common getuid sys_getuid
-25 common stime sys_stime compat_sys_stime
+25 32 stime sys_stime32
+25 64 stime sys_stime
+25 spu stime sys_stime
26 nospu ptrace sys_ptrace compat_sys_ptrace
27 common alarm sys_alarm
28 32 oldfstat sys_fstat sys_ni_syscall
28 64 oldfstat sys_ni_syscall
28 spu oldfstat sys_ni_syscall
29 nospu pause sys_pause
-30 nospu utime sys_utime compat_sys_utime
+30 32 utime sys_utime32
+30 64 utime sys_utime
31 common stty sys_ni_syscall
32 common gtty sys_ni_syscall
33 common access sys_access
@@ -157,7 +162,9 @@
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common modify_ldt sys_ni_syscall
-124 common adjtimex sys_adjtimex compat_sys_adjtimex
+124 32 adjtimex sys_adjtimex_time32
+124 64 adjtimex sys_adjtimex
+124 spu adjtimex sys_adjtimex
125 common mprotect sys_mprotect
126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
126 64 sigprocmask sys_ni_syscall
@@ -198,8 +205,12 @@
158 common sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep compat_sys_nanosleep
+161 32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+161 64 sched_rr_get_interval sys_sched_rr_get_interval
+161 spu sched_rr_get_interval sys_sched_rr_get_interval
+162 32 nanosleep sys_nanosleep_time32
+162 64 nanosleep sys_nanosleep
+162 spu nanosleep sys_nanosleep
163 common mremap sys_mremap
164 common setresuid sys_setresuid
165 common getresuid sys_getresuid
@@ -213,7 +224,8 @@
173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+176 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+176 64 rt_sigtimedwait sys_rt_sigtimedwait
177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
179 common pread64 sys_pread64 compat_sys_pread64
@@ -260,7 +272,9 @@
218 common removexattr sys_removexattr
219 common lremovexattr sys_lremovexattr
220 common fremovexattr sys_fremovexattr
-221 common futex sys_futex compat_sys_futex
+221 32 futex sys_futex_time32
+221 64 futex sys_futex
+221 spu futex sys_futex
222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
# 224 unused
@@ -268,7 +282,9 @@
226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64
227 common io_setup sys_io_setup compat_sys_io_setup
228 common io_destroy sys_io_destroy
-229 common io_getevents sys_io_getevents compat_sys_io_getevents
+229 32 io_getevents sys_io_getevents_time32
+229 64 io_getevents sys_io_getevents
+229 spu io_getevents sys_io_getevents
230 common io_submit sys_io_submit compat_sys_io_submit
231 common io_cancel sys_io_cancel
232 nospu set_tid_address sys_set_tid_address
@@ -280,19 +296,33 @@
238 common epoll_wait sys_epoll_wait
239 common remap_file_pages sys_remap_file_pages
240 common timer_create sys_timer_create compat_sys_timer_create
-241 common timer_settime sys_timer_settime compat_sys_timer_settime
-242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+241 32 timer_settime sys_timer_settime32
+241 64 timer_settime sys_timer_settime
+241 spu timer_settime sys_timer_settime
+242 32 timer_gettime sys_timer_gettime32
+242 64 timer_gettime sys_timer_gettime
+242 spu timer_gettime sys_timer_gettime
243 common timer_getoverrun sys_timer_getoverrun
244 common timer_delete sys_timer_delete
-245 common clock_settime sys_clock_settime compat_sys_clock_settime
-246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
-247 common clock_getres sys_clock_getres compat_sys_clock_getres
-248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+245 32 clock_settime sys_clock_settime32
+245 64 clock_settime sys_clock_settime
+245 spu clock_settime sys_clock_settime
+246 32 clock_gettime sys_clock_gettime32
+246 64 clock_gettime sys_clock_gettime
+246 spu clock_gettime sys_clock_gettime
+247 32 clock_getres sys_clock_getres_time32
+247 64 clock_getres sys_clock_getres
+247 spu clock_getres sys_clock_getres
+248 32 clock_nanosleep sys_clock_nanosleep_time32
+248 64 clock_nanosleep sys_clock_nanosleep
+248 spu clock_nanosleep sys_clock_nanosleep
249 32 swapcontext ppc_swapcontext ppc32_swapcontext
249 64 swapcontext ppc64_swapcontext
249 spu swapcontext sys_ni_syscall
250 common tgkill sys_tgkill
-251 common utimes sys_utimes compat_sys_utimes
+251 32 utimes sys_utimes_time32
+251 64 utimes sys_utimes
+251 spu utimes sys_utimes
252 common statfs64 sys_statfs64 compat_sys_statfs64
253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
254 32 fadvise64_64 ppc_fadvise64_64
@@ -308,8 +338,10 @@
261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
262 nospu mq_open sys_mq_open compat_sys_mq_open
263 nospu mq_unlink sys_mq_unlink
-264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
-265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+264 32 mq_timedsend sys_mq_timedsend_time32
+264 64 mq_timedsend sys_mq_timedsend
+265 32 mq_timedreceive sys_mq_timedreceive_time32
+265 64 mq_timedreceive sys_mq_timedreceive
266 nospu mq_notify sys_mq_notify compat_sys_mq_notify
267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
268 nospu kexec_load sys_kexec_load compat_sys_kexec_load
@@ -324,8 +356,10 @@
277 nospu inotify_rm_watch sys_inotify_rm_watch
278 nospu spu_run sys_spu_run
279 nospu spu_create sys_spu_create
-280 nospu pselect6 sys_pselect6 compat_sys_pselect6
-281 nospu ppoll sys_ppoll compat_sys_ppoll
+280 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+280 64 pselect6 sys_pselect6
+281 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+281 64 ppoll sys_ppoll
282 common unshare sys_unshare
283 common splice sys_splice
284 common tee sys_tee
@@ -334,7 +368,9 @@
287 common mkdirat sys_mkdirat
288 common mknodat sys_mknodat
289 common fchownat sys_fchownat
-290 common futimesat sys_futimesat compat_sys_futimesat
+290 32 futimesat sys_futimesat_time32
+290 64 futimesat sys_futimesat
+290 spu utimesat sys_futimesat
291 32 fstatat64 sys_fstatat64
291 64 newfstatat sys_newfstatat
291 spu newfstatat sys_newfstatat
@@ -350,15 +386,21 @@
301 common move_pages sys_move_pages compat_sys_move_pages
302 common getcpu sys_getcpu
303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
-304 common utimensat sys_utimensat compat_sys_utimensat
+304 32 utimensat sys_utimensat_time32
+304 64 utimensat sys_utimensat
+304 spu utimensat sys_utimensat
305 common signalfd sys_signalfd compat_sys_signalfd
306 common timerfd_create sys_timerfd_create
307 common eventfd sys_eventfd
308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
309 nospu fallocate sys_fallocate compat_sys_fallocate
310 nospu subpage_prot sys_subpage_prot
-311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
-312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+311 32 timerfd_settime sys_timerfd_settime32
+311 64 timerfd_settime sys_timerfd_settime
+311 spu timerfd_settime sys_timerfd_settime
+312 32 timerfd_gettime sys_timerfd_gettime32
+312 64 timerfd_gettime sys_timerfd_gettime
+312 spu timerfd_gettime sys_timerfd_gettime
313 common signalfd4 sys_signalfd4 compat_sys_signalfd4
314 common eventfd2 sys_eventfd2
315 common epoll_create1 sys_epoll_create1
@@ -389,11 +431,15 @@
340 common getsockopt sys_getsockopt compat_sys_getsockopt
341 common sendmsg sys_sendmsg compat_sys_sendmsg
342 common recvmsg sys_recvmsg compat_sys_recvmsg
-343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+343 64 recvmmsg sys_recvmmsg
+343 spu recvmmsg sys_recvmmsg
344 common accept4 sys_accept4
345 common name_to_handle_at sys_name_to_handle_at
346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+347 32 clock_adjtime sys_clock_adjtime32
+347 64 clock_adjtime sys_clock_adjtime
+347 spu clock_adjtime sys_clock_adjtime
348 common syncfs sys_syncfs
349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
350 common setns sys_setns
@@ -414,6 +460,7 @@
363 spu switch_endian sys_ni_syscall
364 common userfaultfd sys_userfaultfd
365 common membarrier sys_membarrier
+# 366-377 originally left for IPC, now unused
378 nospu mlock2 sys_mlock2
379 nospu copy_file_range sys_copy_file_range
380 common preadv2 sys_preadv2 compat_sys_preadv2
@@ -424,4 +471,37 @@
385 nospu pkey_free sys_pkey_free
386 nospu pkey_mprotect sys_pkey_mprotect
387 nospu rseq sys_rseq
-388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+388 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+388 64 io_pgetevents sys_io_pgetevents
+# room for arch specific syscalls
+392 64 semtimedop sys_semtimedop
+393 common semget sys_semget
+394 common semctl sys_semctl compat_sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl compat_sys_shmctl
+397 common shmat sys_shmat compat_sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd compat_sys_msgsnd
+401 common msgrcv sys_msgrcv compat_sys_msgrcv
+402 common msgctl sys_msgctl compat_sys_msgctl
+403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 29746dc28df5..517662a56bdc 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -967,13 +967,6 @@ out:
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
-unsigned long __init arch_syscall_addr(int nr)
-{
- return sys_call_table[nr*2];
-}
-#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
-
#ifdef PPC64_ELF_ABI_v1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 7725a9714736..a31b6234fcd7 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -798,7 +798,6 @@ static int __init vdso_init(void)
BUG_ON(vdso32_pagelist == NULL);
for (i = 0; i < vdso32_pages; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
- ClearPageReserved(pg);
get_page(pg);
vdso32_pagelist[i] = pg;
}
@@ -812,7 +811,6 @@ static int __init vdso_init(void)
BUG_ON(vdso64_pagelist == NULL);
for (i = 0; i < vdso64_pages; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
- ClearPageReserved(pg);
get_page(pg);
vdso64_pagelist[i] = pg;
}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 2e6a8f9345d3..367ce3a4a503 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -121,3 +121,28 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
}
+
+pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long pte_val;
+ /*
+ * Clear the _PAGE_PRESENT so that no hardware parallel update is
+ * possible. Also keep the pte_present true so that we don't take
+ * wrong fault.
+ */
+ pte_val = pte_update(vma->vm_mm, addr, ptep,
+ _PAGE_PRESENT, _PAGE_INVALID, 1);
+
+ return __pte(pte_val);
+}
+
+void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
+{
+
+ if (radix_enabled())
+ return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
+ old_pte, pte);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 2486bee0f93e..11d9ea28a816 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -90,3 +90,20 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return vm_unmapped_area(&info);
}
+
+void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ /*
+ * To avoid NMMU hang while relaxing access we need to flush the tlb before
+ * we set the new value.
+ */
+ if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
+ (atomic_read(&mm->context.copros) > 0))
+ radix__flush_hugetlb_page(vma, addr);
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index a712a650a8b6..e7a9c4f6bfca 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -21,6 +21,7 @@
#include <linux/sizes.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
+#include <linux/mm_inline.h>
static DEFINE_MUTEX(mem_list_mutex);
@@ -34,8 +35,18 @@ struct mm_iommu_table_group_mem_t {
atomic64_t mapped;
unsigned int pageshift;
u64 ua; /* userspace address */
- u64 entries; /* number of entries in hpas[] */
- u64 *hpas; /* vmalloc'ed */
+ u64 entries; /* number of entries in hpas/hpages[] */
+ /*
+ * in mm_iommu_get we temporarily use this to store
+ * struct page address.
+ *
+ * We need to convert ua to hpa in real mode. Make it
+ * simpler by storing physical address.
+ */
+ union {
+ struct page **hpages; /* vmalloc'ed */
+ phys_addr_t *hpas;
+ };
#define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
u64 dev_hpa; /* Device memory base address */
};
@@ -80,64 +91,13 @@ bool mm_iommu_preregistered(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
-/*
- * Taken from alloc_migrate_target with changes to remove CMA allocations
- */
-struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
-{
- gfp_t gfp_mask = GFP_USER;
- struct page *new_page;
-
- if (PageCompound(page))
- return NULL;
-
- if (PageHighMem(page))
- gfp_mask |= __GFP_HIGHMEM;
-
- /*
- * We don't want the allocation to force an OOM if possibe
- */
- new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
- return new_page;
-}
-
-static int mm_iommu_move_page_from_cma(struct page *page)
-{
- int ret = 0;
- LIST_HEAD(cma_migrate_pages);
-
- /* Ignore huge pages for now */
- if (PageCompound(page))
- return -EBUSY;
-
- lru_add_drain();
- ret = isolate_lru_page(page);
- if (ret)
- return ret;
-
- list_add(&page->lru, &cma_migrate_pages);
- put_page(page); /* Drop the gup reference */
-
- ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
- NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
- if (ret) {
- if (!list_empty(&cma_migrate_pages))
- putback_movable_pages(&cma_migrate_pages);
- }
-
- return 0;
-}
-
static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
- unsigned long entries, unsigned long dev_hpa,
- struct mm_iommu_table_group_mem_t **pmem)
+ unsigned long entries, unsigned long dev_hpa,
+ struct mm_iommu_table_group_mem_t **pmem)
{
struct mm_iommu_table_group_mem_t *mem;
- long i, j, ret = 0, locked_entries = 0;
+ long i, ret, locked_entries = 0;
unsigned int pageshift;
- unsigned long flags;
- unsigned long cur_ua;
- struct page *page = NULL;
mutex_lock(&mem_list_mutex);
@@ -187,62 +147,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
goto unlock_exit;
}
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
+ up_read(&mm->mmap_sem);
+ if (ret != entries) {
+ /* free the reference taken */
+ for (i = 0; i < ret; i++)
+ put_page(mem->hpages[i]);
+
+ vfree(mem->hpas);
+ kfree(mem);
+ ret = -EFAULT;
+ goto unlock_exit;
+ }
+
+ pageshift = PAGE_SHIFT;
for (i = 0; i < entries; ++i) {
- cur_ua = ua + (i << PAGE_SHIFT);
- if (1 != get_user_pages_fast(cur_ua,
- 1/* pages */, 1/* iswrite */, &page)) {
- ret = -EFAULT;
- for (j = 0; j < i; ++j)
- put_page(pfn_to_page(mem->hpas[j] >>
- PAGE_SHIFT));
- vfree(mem->hpas);
- kfree(mem);
- goto unlock_exit;
- }
+ struct page *page = mem->hpages[i];
+
/*
- * If we get a page from the CMA zone, since we are going to
- * be pinning these entries, we might as well move them out
- * of the CMA zone if possible. NOTE: faulting in + migration
- * can be expensive. Batching can be considered later
+ * Allow to use larger than 64k IOMMU pages. Only do that
+ * if we are backed by hugetlb.
*/
- if (is_migrate_cma_page(page)) {
- if (mm_iommu_move_page_from_cma(page))
- goto populate;
- if (1 != get_user_pages_fast(cur_ua,
- 1/* pages */, 1/* iswrite */,
- &page)) {
- ret = -EFAULT;
- for (j = 0; j < i; ++j)
- put_page(pfn_to_page(mem->hpas[j] >>
- PAGE_SHIFT));
- vfree(mem->hpas);
- kfree(mem);
- goto unlock_exit;
- }
- }
-populate:
- pageshift = PAGE_SHIFT;
- if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
- pte_t *pte;
+ if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) {
struct page *head = compound_head(page);
- unsigned int compshift = compound_order(head);
- unsigned int pteshift;
-
- local_irq_save(flags); /* disables as well */
- pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
-
- /* Double check it is still the same pinned page */
- if (pte && pte_page(*pte) == head &&
- pteshift == compshift + PAGE_SHIFT)
- pageshift = max_t(unsigned int, pteshift,
- PAGE_SHIFT);
- local_irq_restore(flags);
+
+ pageshift = compound_order(head) + PAGE_SHIFT;
}
mem->pageshift = min(mem->pageshift, pageshift);
+ /*
+ * We don't need struct page reference any more, switch
+ * to physical address.
+ */
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
good_exit:
+ ret = 0;
atomic64_set(&mem->mapped, 1);
mem->used = 1;
mem->ua = ua;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 87f0dd004295..df1e11ebbabb 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -84,7 +84,7 @@ static void __init setup_node_to_cpumask_map(void)
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
/* cpumask_of_node() will now work */
- dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
+ dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
}
static int __init fake_numa_create_new_node(unsigned long end_pfn,
@@ -215,7 +215,7 @@ static void initialize_distance_lookup_table(int nid,
*/
static int associativity_to_nid(const __be32 *associativity)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
if (min_common_depth == -1)
goto out;
@@ -225,7 +225,7 @@ static int associativity_to_nid(const __be32 *associativity)
/* POWER4 LPAR uses 0xffff as invalid node */
if (nid == 0xffff || nid >= MAX_NUMNODES)
- nid = -1;
+ nid = NUMA_NO_NODE;
if (nid > 0 &&
of_read_number(associativity, 1) >= distance_ref_points_depth) {
@@ -244,7 +244,7 @@ out:
*/
static int of_node_to_nid_single(struct device_node *device)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
const __be32 *tmp;
tmp = of_get_associativity(device);
@@ -256,7 +256,7 @@ static int of_node_to_nid_single(struct device_node *device)
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
of_node_get(device);
while (device) {
@@ -454,7 +454,7 @@ static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
*/
static int numa_setup_cpu(unsigned long lcpu)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
struct device_node *cpu;
/*
@@ -930,7 +930,7 @@ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
{
struct drmem_lmb *lmb;
unsigned long lmb_size;
- int nid = -1;
+ int nid = NUMA_NO_NODE;
lmb_size = drmem_lmb_size();
@@ -960,7 +960,7 @@ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
static int hot_add_node_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory;
- int nid = -1;
+ int nid = NUMA_NO_NODE;
for_each_node_by_type(memory, "memory") {
unsigned long start, size;
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index f3c31f5e1026..e7da590c7a78 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -400,3 +400,50 @@ void arch_report_meminfo(struct seq_file *m)
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
}
#endif /* CONFIG_PROC_FS */
+
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ unsigned long pte_val;
+
+ /*
+ * Clear the _PAGE_PRESENT so that no hardware parallel update is
+ * possible. Also keep the pte_present true so that we don't take
+ * wrong fault.
+ */
+ pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
+
+ return __pte(pte_val);
+
+}
+
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
+{
+ if (radix_enabled())
+ return radix__ptep_modify_prot_commit(vma, addr,
+ ptep, old_pte, pte);
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
+
+/*
+ * For hash translation mode, we use the deposited table to store hash slot
+ * information and they are stored at PTRS_PER_PMD offset from related pmd
+ * location. Hence a pmd move requires deposit and withdraw.
+ *
+ * For radix translation with split pmd ptl, we store the deposited table in the
+ * pmd page. Hence if we have different pmd page we need to withdraw during pmd
+ * move.
+ *
+ * With hash we use deposited table always irrespective of anon or not.
+ * With radix we use deposited table only for anonymous mapping.
+ */
+int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ struct spinlock *old_pmd_ptl,
+ struct vm_area_struct *vma)
+{
+ if (radix_enabled())
+ return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+
+ return true;
+}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 931156069a81..dced3cd241c2 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -1063,3 +1063,21 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
}
/* See ptesync comment in radix__set_pte_at */
}
+
+void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ /*
+ * To avoid NMMU hang while relaxing access we need to flush the tlb before
+ * we set the new value. We need to do this only for radix, because hash
+ * translation does flush when updating the linux pte.
+ */
+ if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
+ (atomic_read(&mm->context.copros) > 0))
+ radix__flush_tlb_page(vma, addr);
+
+ set_pte_at(mm, addr, ptep, pte);
+}
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index c2d5192ed64f..549e9490ff2a 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -165,6 +165,10 @@
#define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
___PPC_RS(a) | __PPC_SH(i) | \
__PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RLWINM_DOT(d, a, i, mb, me) EMIT(PPC_INST_RLWINM_DOT | \
+ ___PPC_RA(d) | ___PPC_RS(a) | \
+ __PPC_SH(i) | __PPC_MB(mb) | \
+ __PPC_ME(me))
#define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \
___PPC_RS(a) | __PPC_SH(i) | \
__PPC_MB(mb) | __PPC_ME(me))
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 7ce57657d3b8..4194d3cfb60c 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -768,36 +768,58 @@ emit_clear:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
true_cond = COND_GT;
goto cond_branch;
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
true_cond = COND_LT;
goto cond_branch;
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
true_cond = COND_GE;
goto cond_branch;
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
true_cond = COND_LE;
goto cond_branch;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
true_cond = COND_EQ;
goto cond_branch;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
true_cond = COND_NE;
goto cond_branch;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
true_cond = COND_NE;
/* Fall through */
@@ -809,18 +831,44 @@ cond_branch:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
/* unsigned comparison */
- PPC_CMPLD(dst_reg, src_reg);
+ if (BPF_CLASS(code) == BPF_JMP32)
+ PPC_CMPLW(dst_reg, src_reg);
+ else
+ PPC_CMPLD(dst_reg, src_reg);
break;
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
/* signed comparison */
- PPC_CMPD(dst_reg, src_reg);
+ if (BPF_CLASS(code) == BPF_JMP32)
+ PPC_CMPW(dst_reg, src_reg);
+ else
+ PPC_CMPD(dst_reg, src_reg);
break;
case BPF_JMP | BPF_JSET | BPF_X:
- PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ if (BPF_CLASS(code) == BPF_JMP) {
+ PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
+ src_reg);
+ } else {
+ int tmp_reg = b2p[TMP_REG_1];
+
+ PPC_AND(tmp_reg, dst_reg, src_reg);
+ PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
+ 31);
+ }
break;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -828,43 +876,87 @@ cond_branch:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ {
+ bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
+
/*
* Need sign-extended load, so only positive
* values can be used as imm in cmpldi
*/
- if (imm >= 0 && imm < 32768)
- PPC_CMPLDI(dst_reg, imm);
- else {
+ if (imm >= 0 && imm < 32768) {
+ if (is_jmp32)
+ PPC_CMPLWI(dst_reg, imm);
+ else
+ PPC_CMPLDI(dst_reg, imm);
+ } else {
/* sign-extending load */
PPC_LI32(b2p[TMP_REG_1], imm);
/* ... but unsigned comparison */
- PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
+ if (is_jmp32)
+ PPC_CMPLW(dst_reg,
+ b2p[TMP_REG_1]);
+ else
+ PPC_CMPLD(dst_reg,
+ b2p[TMP_REG_1]);
}
break;
+ }
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ {
+ bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
+
/*
* signed comparison, so any 16-bit value
* can be used in cmpdi
*/
- if (imm >= -32768 && imm < 32768)
- PPC_CMPDI(dst_reg, imm);
- else {
+ if (imm >= -32768 && imm < 32768) {
+ if (is_jmp32)
+ PPC_CMPWI(dst_reg, imm);
+ else
+ PPC_CMPDI(dst_reg, imm);
+ } else {
PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
+ if (is_jmp32)
+ PPC_CMPW(dst_reg,
+ b2p[TMP_REG_1]);
+ else
+ PPC_CMPD(dst_reg,
+ b2p[TMP_REG_1]);
}
break;
+ }
case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768)
/* PPC_ANDI is _only/always_ dot-form */
PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
else {
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
- b2p[TMP_REG_1]);
+ int tmp_reg = b2p[TMP_REG_1];
+
+ PPC_LI32(tmp_reg, imm);
+ if (BPF_CLASS(code) == BPF_JMP) {
+ PPC_AND_DOT(tmp_reg, dst_reg,
+ tmp_reg);
+ } else {
+ PPC_AND(tmp_reg, dst_reg,
+ tmp_reg);
+ PPC_RLWINM_DOT(tmp_reg, tmp_reg,
+ 0, 0, 31);
+ }
}
break;
}
@@ -1093,6 +1185,7 @@ skip_codegen_passes:
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
if (!fp->is_func || extra_pass) {
+ bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
kfree(addrs);
kfree(jit_data);
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 72238eedc360..d2b8e6061933 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1306,15 +1306,6 @@ static int h_24x7_event_init(struct perf_event *event)
return -EINVAL;
}
- /* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
/* no branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -1577,6 +1568,7 @@ static struct pmu h_24x7_pmu = {
.start_txn = h_24x7_event_start_txn,
.commit_txn = h_24x7_event_commit_txn,
.cancel_txn = h_24x7_event_cancel_txn,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
static int hv_24x7_init(void)
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 43fabb3cae0f..735e77b09cdb 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -232,15 +232,6 @@ static int h_gpci_event_init(struct perf_event *event)
return -EINVAL;
}
- /* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
/* no branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -285,6 +276,7 @@ static struct pmu h_gpci_pmu = {
.start = h_gpci_event_start,
.stop = h_gpci_event_stop,
.read = h_gpci_event_update,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
static int hv_gpci_init(void)
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index f292a3f284f1..b1c37cc3fa98 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -473,15 +473,6 @@ static int nest_imc_event_init(struct perf_event *event)
if (event->hw.sample_period)
return -EINVAL;
- /* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
@@ -748,15 +739,6 @@ static int core_imc_event_init(struct perf_event *event)
if (event->hw.sample_period)
return -EINVAL;
- /* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
@@ -1069,6 +1051,7 @@ static int update_pmu_ops(struct imc_pmu *pmu)
pmu->pmu.stop = imc_event_stop;
pmu->pmu.read = imc_event_update;
pmu->pmu.attr_groups = pmu->attr_groups;
+ pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;
switch (pmu->domain) {
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index 5c36b3a8d47a..3349f3f8fe84 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
+ PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
u64 perf_reg_value(struct pt_regs *regs, int idx)
@@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
!is_sier_available()))
return 0;
+ if (idx == PERF_REG_POWERPC_MMCRA &&
+ (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
+ IS_ENABLED(CONFIG_PPC32)))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
index a1aaa1569d7c..f0e488d97567 100644
--- a/arch/powerpc/platforms/4xx/ocm.c
+++ b/arch/powerpc/platforms/4xx/ocm.c
@@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
continue;
seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
- seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys));
+ seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys));
seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
- seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys));
+ seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys));
seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
@@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
blk->size, blk->owner);
}
- seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys));
+ seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys));
seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile
index 4b3bfadc70fa..dc3465cc8bc6 100644
--- a/arch/powerpc/platforms/chrp/Makefile
+++ b/arch/powerpc/platforms/chrp/Makefile
@@ -1,3 +1,3 @@
obj-y += setup.o time.o pegasos_eth.o pci.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_NVRAM) += nvram.o
+obj-$(CONFIG_NVRAM:m=y) += nvram.o
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index 791b86398e1d..37ac20ccbb19 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -24,7 +24,7 @@ static unsigned int nvram_size;
static unsigned char nvram_buf[4];
static DEFINE_SPINLOCK(nvram_lock);
-static unsigned char chrp_nvram_read(int addr)
+static unsigned char chrp_nvram_read_val(int addr)
{
unsigned int done;
unsigned long flags;
@@ -46,7 +46,7 @@ static unsigned char chrp_nvram_read(int addr)
return ret;
}
-static void chrp_nvram_write(int addr, unsigned char val)
+static void chrp_nvram_write_val(int addr, unsigned char val)
{
unsigned int done;
unsigned long flags;
@@ -64,6 +64,11 @@ static void chrp_nvram_write(int addr, unsigned char val)
spin_unlock_irqrestore(&nvram_lock, flags);
}
+static ssize_t chrp_nvram_size(void)
+{
+ return nvram_size;
+}
+
void __init chrp_nvram_init(void)
{
struct device_node *nvram;
@@ -85,8 +90,9 @@ void __init chrp_nvram_init(void)
printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
of_node_put(nvram);
- ppc_md.nvram_read_val = chrp_nvram_read;
- ppc_md.nvram_write_val = chrp_nvram_write;
+ ppc_md.nvram_read_val = chrp_nvram_read_val;
+ ppc_md.nvram_write_val = chrp_nvram_write_val;
+ ppc_md.nvram_size = chrp_nvram_size;
return;
}
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index e66644e0fb40..fcf6f2342ef4 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void)
/* see if there is a keyboard in the device tree
with a parent of type "adb" */
for_each_node_by_name(kbd, "keyboard")
- if (kbd->parent && kbd->parent->type
- && strcmp(kbd->parent->type, "adb") == 0)
+ if (of_node_is_type(kbd->parent, "adb"))
break;
of_node_put(kbd);
if (kbd)
@@ -550,7 +549,7 @@ static void __init chrp_init_IRQ(void)
static void __init
chrp_init2(void)
{
-#ifdef CONFIG_NVRAM
+#if IS_ENABLED(CONFIG_NVRAM)
chrp_nvram_init();
#endif
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index d18d16489a15..bdf9b716e848 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
chan->ring_size = ring_size;
- chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev,
+ chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
ring_size * sizeof(u64),
&chan->ring_dma, GFP_KERNEL);
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 923bfb340433..20ebf35d7913 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -15,7 +15,5 @@ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really
# CONFIG_NVRAM=y
obj-$(CONFIG_NVRAM:m=y) += nvram.o
-# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
-obj-$(CONFIG_PPC64) += nvram.o
obj-$(CONFIG_PPC32) += bootx_init.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index ae54d7fe68f3..9360cdc408c1 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -147,6 +147,11 @@ static ssize_t core99_nvram_size(void)
static volatile unsigned char __iomem *nvram_addr;
static int nvram_mult;
+static ssize_t ppc32_nvram_size(void)
+{
+ return NVRAM_SIZE;
+}
+
static unsigned char direct_nvram_read_byte(int addr)
{
return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
@@ -590,21 +595,25 @@ int __init pmac_nvram_init(void)
nvram_mult = 1;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
} else if (nvram_naddrs == 1) {
nvram_data = ioremap(r1.start, s1);
nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
} else if (nvram_naddrs == 2) {
nvram_addr = ioremap(r1.start, s1);
nvram_data = ioremap(r2.start, s2);
ppc_md.nvram_read_val = indirect_nvram_read_byte;
ppc_md.nvram_write_val = indirect_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
} else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
#ifdef CONFIG_ADB_PMU
nvram_naddrs = -1;
ppc_md.nvram_read_val = pmu_nvram_read_byte;
ppc_md.nvram_write_val = pmu_nvram_write_byte;
+ ppc_md.nvram_size = ppc32_nvram_size;
#endif /* CONFIG_ADB_PMU */
} else {
printk(KERN_ERR "Incompatible type of NVRAM\n");
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 2e8221e20ee8..b7efcf336589 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -316,8 +316,7 @@ static void __init pmac_setup_arch(void)
find_via_pmu();
smu_init();
-#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \
- defined(CONFIG_PPC64)
+#if IS_ENABLED(CONFIG_NVRAM)
pmac_nvram_init();
#endif
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index f157e3d071f2..b36ddee17c87 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -68,7 +68,7 @@
long __init pmac_time_init(void)
{
s32 delta = 0;
-#ifdef CONFIG_NVRAM
+#if defined(CONFIG_NVRAM) && defined(CONFIG_PPC32)
int dst;
delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 84d038ed3882..248a38ad25c7 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
+#include <linux/numa.h>
#include <asm/machdep.h>
#include <asm/debugfs.h>
@@ -223,7 +224,7 @@ static int memtrace_online(void)
ent = &memtrace_array[i];
/* We have onlined this chunk previously */
- if (ent->nid == -1)
+ if (ent->nid == NUMA_NO_NODE)
continue;
/* Remove from io mappings */
@@ -257,7 +258,7 @@ static int memtrace_online(void)
*/
debugfs_remove_recursive(ent->dir);
pr_info("Added trace memory back to node %d\n", ent->nid);
- ent->size = ent->start = ent->nid = -1;
+ ent->size = ent->start = ent->nid = NUMA_NO_NODE;
}
if (ret)
return ret;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index d7f742ed48ba..3f58c7dbd581 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
}
} else {
/* Create a group for 1 GPU and attached NPUs for POWER8 */
- pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL);
+ pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
table_group = &pe->npucomp->table_group;
table_group->ops = &pnv_npu_peers_ops;
iommu_register_group(table_group, hose->global_number,
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 1d6406a051f1..145373f0e5dc 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
pnv_pci_ioda2_setup_dma_pe(phb, pe);
#ifdef CONFIG_IOMMU_API
+ iommu_register_group(&pe->table_group,
+ pe->phb->hose->global_number, pe->pe_number);
pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
#endif
}
@@ -2681,7 +2683,8 @@ static void pnv_pci_ioda_setup_iommu_api(void)
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
- if (phb->type == PNV_PHB_NPU_NVLINK)
+ if (phb->type == PNV_PHB_NPU_NVLINK ||
+ phb->type == PNV_PHB_NPU_OCAPI)
continue;
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 45fb70b4bfa7..ef9448a907c6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
return 0;
pe = &phb->ioda.pe_array[pdn->pe_number];
+ if (!pe->table_group.group)
+ return 0;
iommu_add_device(&pe->table_group, dev);
return 0;
case BUS_NOTIFY_DEL_DEVICE:
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index 5b4a56131904..84e8ec4011ba 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -261,8 +261,7 @@ static char *ibmebus_chomp(const char *in, size_t count)
return out;
}
-static ssize_t ibmebus_store_probe(struct bus_type *bus,
- const char *buf, size_t count)
+static ssize_t probe_store(struct bus_type *bus, const char *buf, size_t count)
{
struct device_node *dn = NULL;
struct device *dev;
@@ -298,10 +297,9 @@ out:
return rc;
return count;
}
-static BUS_ATTR(probe, 0200, NULL, ibmebus_store_probe);
+static BUS_ATTR_WO(probe);
-static ssize_t ibmebus_store_remove(struct bus_type *bus,
- const char *buf, size_t count)
+static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
{
struct device *dev;
char *path;
@@ -325,7 +323,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
return -ENODEV;
}
}
-static BUS_ATTR(remove, 0200, NULL, ibmebus_store_remove);
+static BUS_ATTR_WO(remove);
static struct attribute *ibmbus_bus_attrs[] = {
&bus_attr_probe.attr,
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 69cedc1b3b8a..1136a38ff039 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -7,8 +7,6 @@
* 2 of the License, or (at your option) any later version.
*
* /dev/nvram driver for PPC64
- *
- * This perhaps should live in drivers/char
*/
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 7d6457ab5d34..bba281b1fe1b 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
uint64_t rc, token;
+ uint64_t saved = 0;
/*
* When the hypervisor cannot map all the requested memory in a single
@@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
p->blocks, BIND_ANY_ADDR, token);
token = ret[0];
+ if (!saved)
+ saved = ret[1];
cond_resched();
} while (rc == H_BUSY);
@@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
return -ENXIO;
}
- p->bound_addr = ret[1];
+ p->bound_addr = saved;
dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 7725825d887d..37a77e57893e 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void)
if (!of_device_is_compatible(nvdn->parent,
"ibm,power9-npu"))
continue;
+#ifdef CONFIG_PPC_POWERNV
WARN_ON_ONCE(pnv_npu2_init(hose));
+#endif
break;
}
}
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 8b0ebf3940d2..ebed46f80254 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
}
/* Initialize outbound message descriptor ring */
- rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev,
- rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
- &rmu->msg_tx_ring.phys, GFP_KERNEL);
+ rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
+ rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+ &rmu->msg_tx_ring.phys,
+ GFP_KERNEL);
if (!rmu->msg_tx_ring.virt) {
rc = -ENOMEM;
goto out_dma;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e0d7d61779a6..bd149905a5b5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -28,11 +28,13 @@ config RISCV
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
+ select HAVE_ARCH_AUDITSYSCALL
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
+ select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
select RISCV_ISA_A if SMP
select SPARSE_IRQ
@@ -40,12 +42,14 @@ config RISCV
select HAVE_ARCH_TRACEHOOK
select HAVE_PCI
select MODULES_USE_ELF_RELA if MODULES
+ select MODULE_SECTIONS if MODULES
select THREAD_INFO_IN_TASK
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
select ARCH_HAS_PTE_SPECIAL
+ select HAVE_EBPF_JIT if 64BIT
config MMU
def_bool y
@@ -100,7 +104,7 @@ choice
prompt "Base ISA"
default ARCH_RV64I
help
- This selects the base ISA that this kernel will traget and must match
+ This selects the base ISA that this kernel will target and must match
the target platform.
config ARCH_RV32I
@@ -152,7 +156,6 @@ choice
bool "2GiB"
config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY
- select MODULE_SECTIONS if MODULES
bool "128GiB"
endchoice
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 4b594f2e4f7e..c6342e638ef7 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -77,7 +77,7 @@ KBUILD_IMAGE := $(boot)/Image.gz
head-y := arch/riscv/kernel/head.o
-core-y += arch/riscv/kernel/ arch/riscv/mm/
+core-y += arch/riscv/kernel/ arch/riscv/mm/ arch/riscv/net/
libs-y += arch/riscv/lib/
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index f399659d3b8d..2fd3461e50ab 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_SMP=y
-CONFIG_PCI=y
-CONFIG_PCIE_XILINX=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_NET=y
@@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
@@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_VIRTIO_MMIO=y
CONFIG_SIFIVE_PLIC=y
-CONFIG_RAS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index cd2af4b013e3..46202dad365d 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -9,12 +9,12 @@
#define MODULE_ARCH_VERMAGIC "riscv"
struct module;
-u64 module_emit_got_entry(struct module *mod, u64 val);
-u64 module_emit_plt_entry(struct module *mod, u64 val);
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
#ifdef CONFIG_MODULE_SECTIONS
struct mod_section {
- struct elf64_shdr *shdr;
+ Elf_Shdr *shdr;
int num_entries;
int max_entries;
};
@@ -26,18 +26,18 @@ struct mod_arch_specific {
};
struct got_entry {
- u64 symbol_addr; /* the real variable address */
+ unsigned long symbol_addr; /* the real variable address */
};
-static inline struct got_entry emit_got_entry(u64 val)
+static inline struct got_entry emit_got_entry(unsigned long val)
{
return (struct got_entry) {val};
}
-static inline struct got_entry *get_got_entry(u64 val,
+static inline struct got_entry *get_got_entry(unsigned long val,
const struct mod_section *sec)
{
- struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr;
+ struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got[i].symbol_addr == val)
@@ -62,7 +62,9 @@ struct plt_entry {
#define REG_T0 0x5
#define REG_T1 0x6
-static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
+static inline struct plt_entry emit_plt_entry(unsigned long val,
+ unsigned long plt,
+ unsigned long got_plt)
{
/*
* U-Type encoding:
@@ -76,7 +78,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
* +------------+------------+--------+----------+----------+
*
*/
- u64 offset = got_plt - plt;
+ unsigned long offset = got_plt - plt;
u32 hi20 = (offset + 0x800) & 0xfffff000;
u32 lo12 = (offset - hi20);
return (struct plt_entry) {
@@ -86,7 +88,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
};
}
-static inline int get_got_plt_idx(u64 val, const struct mod_section *sec)
+static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
{
struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
int i;
@@ -97,9 +99,9 @@ static inline int get_got_plt_idx(u64 val, const struct mod_section *sec)
return -1;
}
-static inline struct plt_entry *get_plt_entry(u64 val,
- const struct mod_section *sec_plt,
- const struct mod_section *sec_got_plt)
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+ const struct mod_section *sec_plt,
+ const struct mod_section *sec_got_plt)
{
struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 06cfbb3aacbb..2a546a52f02a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -80,7 +80,7 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
-#ifdef CONFIG_64BITS
+#ifdef CONFIG_64BIT
#define PTE_FMT "%016lx"
#else
#define PTE_FMT "%08lx"
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index 2fa2942be221..470755cb7558 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -35,6 +35,12 @@
#define _PAGE_SPECIAL _PAGE_SOFT
#define _PAGE_TABLE _PAGE_PRESENT
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_READ
+
#define _PAGE_PFN_SHIFT 10
/* Set of bits to preserve across pte_modify() */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 16301966d65b..a8179a8c1491 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -44,7 +44,7 @@
/* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
-#define PAGE_NONE __pgprot(0)
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
static inline int pmd_present(pmd_t pmd)
{
- return (pmd_val(pmd) & _PAGE_PRESENT);
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
static inline int pmd_none(pmd_t pmd)
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
static inline int pte_present(pte_t pte)
{
- return (pte_val(pte) & _PAGE_PRESENT);
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
static inline int pte_none(pte_t pte)
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
- * bit 1: reserved for future use (zero)
+ * bit 1: _PAGE_PROT_NONE (zero)
* bits 2 to 6: swap type
* bits 7 to XLEN-1: swap offset
*/
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 0531f49af5c3..ce70bceb8872 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -22,7 +22,7 @@
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index bbe1862e8f80..d35ec2f41381 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -113,6 +113,11 @@ static inline void frame_pointer_set(struct pt_regs *regs,
SET_FP(regs, val);
}
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 8d25f8904c00..bba3da6ef157 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -18,6 +18,7 @@
#ifndef _ASM_RISCV_SYSCALL_H
#define _ASM_RISCV_SYSCALL_H
+#include <uapi/linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
@@ -99,4 +100,13 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
}
+static inline int syscall_get_arch(void)
+{
+#ifdef CONFIG_64BIT
+ return AUDIT_ARCH_RISCV64;
+#else
+ return AUDIT_ARCH_RISCV32;
+#endif
+}
+
#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index f8fa1cd2dad9..1c9cc8389928 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -80,13 +80,19 @@ struct thread_info {
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_WORK \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
+
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 637b896894fc..a00168b980d2 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -41,7 +41,6 @@
#define KERNEL_DS (~0UL)
#define USER_DS (TASK_SIZE)
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index fef96f117b4d..073ee80fdf74 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -19,3 +19,5 @@
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 1f3bd3ebbb0d..0e2eeeb1fd27 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -17,6 +17,7 @@
#ifdef __LP64__
#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
#endif /* __LP64__ */
#include <asm-generic/unistd.h>
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 6a92a2fe198e..dac98348c6a3 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ void asm_offsets(void)
OFFSET(TASK_STACK, task_struct, stack);
OFFSET(TASK_TI, task_struct, thread_info);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+ OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 13d4826ab2a1..fd9b57c8b4ce 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -144,6 +144,10 @@ _save_context:
REG_L x2, PT_SP(sp)
.endm
+#if !IS_ENABLED(CONFIG_PREEMPT)
+.set resume_kernel, restore_all
+#endif
+
ENTRY(handle_exception)
SAVE_ALL
@@ -201,7 +205,7 @@ handle_syscall:
REG_S s2, PT_SEPC(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_TRACE
+ andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_enter
check_syscall_nr:
/* Check to make sure we don't jump to a bogus syscall number. */
@@ -221,14 +225,14 @@ ret_from_syscall:
REG_S a0, PT_A0(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_TRACE
+ andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_exit
ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
csrc sstatus, SR_SIE
andi s0, s0, SR_SPP
- bnez s0, restore_all
+ bnez s0, resume_kernel
resume_userspace:
/* Interrupts must be disabled here so flags are checked atomically */
@@ -250,6 +254,18 @@ restore_all:
RESTORE_ALL
sret
+#if IS_ENABLED(CONFIG_PREEMPT)
+resume_kernel:
+ REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+ bnez s0, restore_all
+need_resched:
+ REG_L s0, TASK_TI_FLAGS(tp)
+ andi s0, s0, _TIF_NEED_RESCHED
+ beqz s0, restore_all
+ call preempt_schedule_irq
+ j need_resched
+#endif
+
work_pending:
/* Enter slow path for supplementary processing */
la ra, ret_from_exception
diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
index bbbd26e19bfd..c9ae48333114 100644
--- a/arch/riscv/kernel/module-sections.c
+++ b/arch/riscv/kernel/module-sections.c
@@ -9,14 +9,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
-u64 module_emit_got_entry(struct module *mod, u64 val)
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
{
struct mod_section *got_sec = &mod->arch.got;
int i = got_sec->num_entries;
struct got_entry *got = get_got_entry(val, got_sec);
if (got)
- return (u64)got;
+ return (unsigned long)got;
/* There is no duplicate entry, create a new one */
got = (struct got_entry *)got_sec->shdr->sh_addr;
@@ -25,10 +25,10 @@ u64 module_emit_got_entry(struct module *mod, u64 val)
got_sec->num_entries++;
BUG_ON(got_sec->num_entries > got_sec->max_entries);
- return (u64)&got[i];
+ return (unsigned long)&got[i];
}
-u64 module_emit_plt_entry(struct module *mod, u64 val)
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val)
{
struct mod_section *got_plt_sec = &mod->arch.got_plt;
struct got_entry *got_plt;
@@ -37,27 +37,29 @@ u64 module_emit_plt_entry(struct module *mod, u64 val)
int i = plt_sec->num_entries;
if (plt)
- return (u64)plt;
+ return (unsigned long)plt;
/* There is no duplicate entry, create a new one */
got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
got_plt[i] = emit_got_entry(val);
plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
- plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]);
+ plt[i] = emit_plt_entry(val,
+ (unsigned long)&plt[i],
+ (unsigned long)&got_plt[i]);
plt_sec->num_entries++;
got_plt_sec->num_entries++;
BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
- return (u64)&plt[i];
+ return (unsigned long)&plt[i];
}
-static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y)
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
{
return x->r_info == y->r_info && x->r_addend == y->r_addend;
}
-static bool duplicate_rela(const Elf64_Rela *rela, int idx)
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
{
int i;
for (i = 0; i < idx; i++) {
@@ -67,13 +69,13 @@ static bool duplicate_rela(const Elf64_Rela *rela, int idx)
return false;
}
-static void count_max_entries(Elf64_Rela *relas, int num,
+static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
unsigned int type, i;
for (i = 0; i < num; i++) {
- type = ELF64_R_TYPE(relas[i].r_info);
+ type = ELF_RISCV_R_TYPE(relas[i].r_info);
if (type == R_RISCV_CALL_PLT) {
if (!duplicate_rela(relas, i))
(*plts)++;
@@ -118,9 +120,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
/* Calculate the maxinum number of entries */
for (i = 0; i < ehdr->e_shnum; i++) {
- Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
- int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela);
- Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+ Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+ int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+ Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 60f1e02eed36..2ae5e0284f56 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -18,12 +18,15 @@
#include <asm/ptrace.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
+#include <linux/audit.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/tracehook.h>
+
+#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
enum riscv_regset {
@@ -163,15 +166,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
#endif
+
+ audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
}
void do_syscall_trace_exit(struct pt_regs *regs)
{
+ audit_syscall_exit(regs);
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- trace_sys_exit(regs, regs->regs[0]);
+ trace_sys_exit(regs, regs_return_value(regs));
#endif
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index fc8006a042eb..77564310235f 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -149,7 +149,14 @@ asmlinkage void __init setup_vm(void)
void __init parse_dtb(unsigned int hartid, void *dtb)
{
- early_init_dt_scan(__va(dtb));
+ if (!early_init_dt_scan(__va(dtb)))
+ return;
+
+ pr_err("No DTB passed to the kernel\n");
+#ifdef CONFIG_CMDLINE_FORCE
+ strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ pr_info("Forcing kernel command line to: %s\n", boot_command_line);
+#endif
}
static void __init setup_bootmem(void)
@@ -174,7 +181,7 @@ static void __init setup_bootmem(void)
BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size));
- max_low_pfn = memblock_end_of_DRAM();
+ max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 57b1383e5ef7..246635eac7bb 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -23,6 +23,7 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/delay.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
@@ -31,6 +32,7 @@
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CPU_STOP,
IPI_MAX
};
@@ -66,6 +68,13 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
}
+static void ipi_stop(void)
+{
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ wait_for_interrupt();
+}
+
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -94,6 +103,11 @@ void riscv_software_interrupt(void)
generic_smp_call_function_interrupt();
}
+ if (ops & (1 << IPI_CPU_STOP)) {
+ stats[IPI_CPU_STOP]++;
+ ipi_stop();
+ }
+
BUG_ON((ops >> IPI_MAX) != 0);
/* Order data access and bit testing. */
@@ -121,6 +135,7 @@ send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
@@ -146,15 +161,29 @@ void arch_send_call_function_single_ipi(int cpu)
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
}
-static void ipi_stop(void *unused)
-{
- while (1)
- wait_for_interrupt();
-}
-
void smp_send_stop(void)
{
- on_each_cpu(ipi_stop, NULL, 1);
+ unsigned long timeout;
+
+ if (num_online_cpus() > 1) {
+ cpumask_t mask;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ if (system_state <= SYSTEM_RUNNING)
+ pr_crit("SMP: stopping secondary CPUs\n");
+ send_ipi_message(&mask, IPI_CPU_STOP);
+ }
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && timeout--)
+ udelay(1);
+
+ if (num_online_cpus() > 1)
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(cpu_online_mask));
}
void smp_send_reschedule(int cpu)
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index fc185ecabb0a..18cda0e8cf94 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -57,15 +57,12 @@ void __init setup_smp(void)
while ((dn = of_find_node_by_type(dn, "cpu"))) {
hart = riscv_of_processor_hartid(dn);
- if (hart < 0) {
- of_node_put(dn);
+ if (hart < 0)
continue;
- }
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = 1;
- of_node_put(dn);
continue;
}
@@ -73,7 +70,6 @@ void __init setup_smp(void)
set_cpu_possible(cpuid, true);
set_cpu_present(cpuid, true);
cpuid++;
- of_node_put(dn);
}
BUG_ON(!found_boot_cpu);
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 582cb153eb24..0cd044122234 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -54,7 +54,6 @@ static int __init vdso_init(void)
struct page *pg;
pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
- ClearPageReserved(pg);
vdso_pagelist[i] = pg;
}
vdso_pagelist[i] = virt_to_page(vdso_data);
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index eed1c137f618..fec62b24df89 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -2,9 +2,11 @@
# Symbols present in the vdso
vdso-syms = rt_sigreturn
+ifdef CONFIG_64BIT
vdso-syms += gettimeofday
vdso-syms += clock_gettime
vdso-syms += clock_getres
+endif
vdso-syms += getcpu
vdso-syms += flush_icache
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1d9bfaff60bc..658ebf645f42 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
+ (unsigned long) PFN_PHYS(max_low_pfn)));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/riscv/net/Makefile b/arch/riscv/net/Makefile
new file mode 100644
index 000000000000..a132220cc582
--- /dev/null
+++ b/arch/riscv/net/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..80b12aa5e10d
--- /dev/null
+++ b/arch/riscv/net/bpf_jit_comp.c
@@ -0,0 +1,1602 @@
+// SPDX-License-Identifier: GPL-2.0
+/* BPF JIT compiler for RV64G
+ *
+ * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com>
+ *
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <asm/cacheflush.h>
+
+enum {
+ RV_REG_ZERO = 0, /* The constant value 0 */
+ RV_REG_RA = 1, /* Return address */
+ RV_REG_SP = 2, /* Stack pointer */
+ RV_REG_GP = 3, /* Global pointer */
+ RV_REG_TP = 4, /* Thread pointer */
+ RV_REG_T0 = 5, /* Temporaries */
+ RV_REG_T1 = 6,
+ RV_REG_T2 = 7,
+ RV_REG_FP = 8,
+ RV_REG_S1 = 9, /* Saved registers */
+ RV_REG_A0 = 10, /* Function argument/return values */
+ RV_REG_A1 = 11, /* Function arguments */
+ RV_REG_A2 = 12,
+ RV_REG_A3 = 13,
+ RV_REG_A4 = 14,
+ RV_REG_A5 = 15,
+ RV_REG_A6 = 16,
+ RV_REG_A7 = 17,
+ RV_REG_S2 = 18, /* Saved registers */
+ RV_REG_S3 = 19,
+ RV_REG_S4 = 20,
+ RV_REG_S5 = 21,
+ RV_REG_S6 = 22,
+ RV_REG_S7 = 23,
+ RV_REG_S8 = 24,
+ RV_REG_S9 = 25,
+ RV_REG_S10 = 26,
+ RV_REG_S11 = 27,
+ RV_REG_T3 = 28, /* Temporaries */
+ RV_REG_T4 = 29,
+ RV_REG_T5 = 30,
+ RV_REG_T6 = 31,
+};
+
+#define RV_REG_TCC RV_REG_A6
+#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
+
+static const int regmap[] = {
+ [BPF_REG_0] = RV_REG_A5,
+ [BPF_REG_1] = RV_REG_A0,
+ [BPF_REG_2] = RV_REG_A1,
+ [BPF_REG_3] = RV_REG_A2,
+ [BPF_REG_4] = RV_REG_A3,
+ [BPF_REG_5] = RV_REG_A4,
+ [BPF_REG_6] = RV_REG_S1,
+ [BPF_REG_7] = RV_REG_S2,
+ [BPF_REG_8] = RV_REG_S3,
+ [BPF_REG_9] = RV_REG_S4,
+ [BPF_REG_FP] = RV_REG_S5,
+ [BPF_REG_AX] = RV_REG_T0,
+};
+
+enum {
+ RV_CTX_F_SEEN_TAIL_CALL = 0,
+ RV_CTX_F_SEEN_CALL = RV_REG_RA,
+ RV_CTX_F_SEEN_S1 = RV_REG_S1,
+ RV_CTX_F_SEEN_S2 = RV_REG_S2,
+ RV_CTX_F_SEEN_S3 = RV_REG_S3,
+ RV_CTX_F_SEEN_S4 = RV_REG_S4,
+ RV_CTX_F_SEEN_S5 = RV_REG_S5,
+ RV_CTX_F_SEEN_S6 = RV_REG_S6,
+};
+
+struct rv_jit_context {
+ struct bpf_prog *prog;
+ u32 *insns; /* RV insns */
+ int ninsns;
+ int epilogue_offset;
+ int *offset; /* BPF to RV */
+ unsigned long flags;
+ int stack_size;
+};
+
+struct rv_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct rv_jit_context ctx;
+};
+
+static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
+{
+ u8 reg = regmap[bpf_reg];
+
+ switch (reg) {
+ case RV_CTX_F_SEEN_S1:
+ case RV_CTX_F_SEEN_S2:
+ case RV_CTX_F_SEEN_S3:
+ case RV_CTX_F_SEEN_S4:
+ case RV_CTX_F_SEEN_S5:
+ case RV_CTX_F_SEEN_S6:
+ __set_bit(reg, &ctx->flags);
+ }
+ return reg;
+};
+
+static bool seen_reg(int reg, struct rv_jit_context *ctx)
+{
+ switch (reg) {
+ case RV_CTX_F_SEEN_CALL:
+ case RV_CTX_F_SEEN_S1:
+ case RV_CTX_F_SEEN_S2:
+ case RV_CTX_F_SEEN_S3:
+ case RV_CTX_F_SEEN_S4:
+ case RV_CTX_F_SEEN_S5:
+ case RV_CTX_F_SEEN_S6:
+ return test_bit(reg, &ctx->flags);
+ }
+ return false;
+}
+
+static void mark_call(struct rv_jit_context *ctx)
+{
+ __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
+}
+
+static bool seen_call(struct rv_jit_context *ctx)
+{
+ return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
+}
+
+static void mark_tail_call(struct rv_jit_context *ctx)
+{
+ __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
+}
+
+static bool seen_tail_call(struct rv_jit_context *ctx)
+{
+ return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
+}
+
+static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
+{
+ mark_tail_call(ctx);
+
+ if (seen_call(ctx)) {
+ __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags);
+ return RV_REG_S6;
+ }
+ return RV_REG_A6;
+}
+
+static void emit(const u32 insn, struct rv_jit_context *ctx)
+{
+ if (ctx->insns)
+ ctx->insns[ctx->ninsns] = insn;
+
+ ctx->ninsns++;
+}
+
+static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode)
+{
+ return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (rd << 7) | opcode;
+}
+
+static u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode)
+{
+ return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) |
+ opcode;
+}
+
+static u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
+{
+ u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f;
+
+ return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (imm4_0 << 7) | opcode;
+}
+
+static u32 rv_sb_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
+{
+ u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4);
+ u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10);
+
+ return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (imm4_1 << 7) | opcode;
+}
+
+static u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode)
+{
+ return (imm31_12 << 12) | (rd << 7) | opcode;
+}
+
+static u32 rv_uj_insn(u32 imm20_1, u8 rd, u8 opcode)
+{
+ u32 imm;
+
+ imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) |
+ ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11);
+
+ return (imm << 12) | (rd << 7) | opcode;
+}
+
+static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
+ u8 funct3, u8 rd, u8 opcode)
+{
+ u8 funct7 = (funct5 << 2) | (aq << 1) | rl;
+
+ return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
+}
+
+static u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b);
+}
+
+static u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x13);
+}
+
+static u32 rv_addw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b);
+}
+
+static u32 rv_add(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 0, rd, 0x33);
+}
+
+static u32 rv_subw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b);
+}
+
+static u32 rv_sub(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33);
+}
+
+static u32 rv_and(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 7, rd, 0x33);
+}
+
+static u32 rv_or(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 6, rd, 0x33);
+}
+
+static u32 rv_xor(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 4, rd, 0x33);
+}
+
+static u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
+}
+
+static u32 rv_mul(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 0, rd, 0x33);
+}
+
+static u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
+}
+
+static u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
+}
+
+static u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);
+}
+
+static u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
+}
+
+static u32 rv_sllw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b);
+}
+
+static u32 rv_sll(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 1, rd, 0x33);
+}
+
+static u32 rv_srlw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b);
+}
+
+static u32 rv_srl(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 5, rd, 0x33);
+}
+
+static u32 rv_sraw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b);
+}
+
+static u32 rv_sra(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33);
+}
+
+static u32 rv_lui(u8 rd, u32 imm31_12)
+{
+ return rv_u_insn(imm31_12, rd, 0x37);
+}
+
+static u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 1, rd, 0x13);
+}
+
+static u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 7, rd, 0x13);
+}
+
+static u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 6, rd, 0x13);
+}
+
+static u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 4, rd, 0x13);
+}
+
+static u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b);
+}
+
+static u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b);
+}
+
+static u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x13);
+}
+
+static u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b);
+}
+
+static u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13);
+}
+
+static u32 rv_jal(u8 rd, u32 imm20_1)
+{
+ return rv_uj_insn(imm20_1, rd, 0x6f);
+}
+
+static u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x67);
+}
+
+static u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_sb_insn(imm12_1, rs2, rs1, 0, 0x63);
+}
+
+static u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_sb_insn(imm12_1, rs2, rs1, 6, 0x63);
+}
+
+static u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_sb_insn(imm12_1, rs2, rs1, 7, 0x63);
+}
+
+static u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_sb_insn(imm12_1, rs2, rs1, 1, 0x63);
+}
+
+static u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_sb_insn(imm12_1, rs2, rs1, 4, 0x63);
+}
+
+static u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_sb_insn(imm12_1, rs2, rs1, 5, 0x63);
+}
+
+static u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23);
+}
+
+static u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23);
+}
+
+static u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23);
+}
+
+static u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23);
+}
+
+static u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 4, rd, 0x03);
+}
+
+static u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x03);
+}
+
+static u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 6, rd, 0x03);
+}
+
+static u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 3, rd, 0x03);
+}
+
+static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+static bool is_12b_int(s64 val)
+{
+ return -(1 << 11) <= val && val < (1 << 11);
+}
+
+static bool is_13b_int(s64 val)
+{
+ return -(1 << 12) <= val && val < (1 << 12);
+}
+
+static bool is_21b_int(s64 val)
+{
+ return -(1L << 20) <= val && val < (1L << 20);
+}
+
+static bool is_32b_int(s64 val)
+{
+ return -(1L << 31) <= val && val < (1L << 31);
+}
+
+static int is_12b_check(int off, int insn)
+{
+ if (!is_12b_int(off)) {
+ pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n",
+ insn, (int)off);
+ return -1;
+ }
+ return 0;
+}
+
+static int is_13b_check(int off, int insn)
+{
+ if (!is_13b_int(off)) {
+ pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n",
+ insn, (int)off);
+ return -1;
+ }
+ return 0;
+}
+
+static int is_21b_check(int off, int insn)
+{
+ if (!is_21b_int(off)) {
+ pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n",
+ insn, (int)off);
+ return -1;
+ }
+ return 0;
+}
+
+static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
+{
+ /* Note that the immediate from the add is sign-extended,
+ * which means that we need to compensate this by adding 2^12,
+ * when the 12th bit is set. A simpler way of doing this, and
+ * getting rid of the check, is to just add 2**11 before the
+ * shift. The "Loading a 32-Bit constant" example from the
+ * "Computer Organization and Design, RISC-V edition" book by
+ * Patterson/Hennessy highlights this fact.
+ *
+ * This also means that we need to process LSB to MSB.
+ */
+ s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff;
+ int shift;
+
+ if (is_32b_int(val)) {
+ if (upper)
+ emit(rv_lui(rd, upper), ctx);
+
+ if (!upper) {
+ emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
+ return;
+ }
+
+ emit(rv_addiw(rd, rd, lower), ctx);
+ return;
+ }
+
+ shift = __ffs(upper);
+ upper >>= shift;
+ shift += 12;
+
+ emit_imm(rd, upper, ctx);
+
+ emit(rv_slli(rd, rd, shift), ctx);
+ if (lower)
+ emit(rv_addi(rd, rd, lower), ctx);
+}
+
+static int rv_offset(int bpf_to, int bpf_from, struct rv_jit_context *ctx)
+{
+ int from = ctx->offset[bpf_from] - 1, to = ctx->offset[bpf_to];
+
+ return (to - from) << 2;
+}
+
+static int epilogue_offset(struct rv_jit_context *ctx)
+{
+ int to = ctx->epilogue_offset, from = ctx->ninsns;
+
+ return (to - from) << 2;
+}
+
+static void __build_epilogue(u8 reg, struct rv_jit_context *ctx)
+{
+ int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
+
+ if (seen_reg(RV_REG_RA, ctx)) {
+ emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ }
+ emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ if (seen_reg(RV_REG_S1, ctx)) {
+ emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S2, ctx)) {
+ emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S3, ctx)) {
+ emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S4, ctx)) {
+ emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S5, ctx)) {
+ emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S6, ctx)) {
+ emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx);
+ store_offset -= 8;
+ }
+
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
+ /* Set return value. */
+ emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx);
+ emit(rv_jalr(RV_REG_ZERO, reg, 0), ctx);
+}
+
+static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
+{
+ emit(rv_slli(reg, reg, 32), ctx);
+ emit(rv_srli(reg, reg, 32), ctx);
+}
+
+static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
+{
+ int tc_ninsn, off, start_insn = ctx->ninsns;
+ u8 tcc = rv_tail_call_reg(ctx);
+
+ /* a0: &ctx
+ * a1: &array
+ * a2: index
+ *
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
+ ctx->offset[0];
+ emit_zext_32(RV_REG_A2, ctx);
+
+ off = offsetof(struct bpf_array, map.max_entries);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
+ off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ if (is_13b_check(off, insn))
+ return -1;
+ emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx);
+
+ /* if (--TCC < 0)
+ * goto out;
+ */
+ emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
+ off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ if (is_13b_check(off, insn))
+ return -1;
+ emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx);
+
+ /* prog = array->ptrs[index];
+ * if (!prog)
+ * goto out;
+ */
+ emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx);
+ off = offsetof(struct bpf_array, ptrs);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx);
+ off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ if (is_13b_check(off, insn))
+ return -1;
+ emit(rv_beq(RV_REG_T2, RV_REG_ZERO, off >> 1), ctx);
+
+ /* goto *(prog->bpf_func + 4); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx);
+ emit(rv_addi(RV_REG_T3, RV_REG_T3, 4), ctx);
+ emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx);
+ __build_epilogue(RV_REG_T3, ctx);
+ return 0;
+}
+
+static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
+ struct rv_jit_context *ctx)
+{
+ u8 code = insn->code;
+
+ switch (code) {
+ case BPF_JMP | BPF_JA:
+ case BPF_JMP | BPF_CALL:
+ case BPF_JMP | BPF_EXIT:
+ case BPF_JMP | BPF_TAIL_CALL:
+ break;
+ default:
+ *rd = bpf_to_rv_reg(insn->dst_reg, ctx);
+ }
+
+ if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
+ code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
+ code & BPF_LDX || code & BPF_STX)
+ *rs = bpf_to_rv_reg(insn->src_reg, ctx);
+}
+
+static int rv_offset_check(int *rvoff, s16 off, int insn,
+ struct rv_jit_context *ctx)
+{
+ *rvoff = rv_offset(insn + off, insn, ctx);
+ return is_13b_check(*rvoff, insn);
+}
+
+static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
+{
+ emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
+ emit_zext_32(RV_REG_T2, ctx);
+ emit(rv_addi(RV_REG_T1, *rs, 0), ctx);
+ emit_zext_32(RV_REG_T1, ctx);
+ *rd = RV_REG_T2;
+ *rs = RV_REG_T1;
+}
+
+static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
+{
+ emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
+ emit(rv_addiw(RV_REG_T1, *rs, 0), ctx);
+ *rd = RV_REG_T2;
+ *rs = RV_REG_T1;
+}
+
+static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
+{
+ emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
+ emit_zext_32(RV_REG_T2, ctx);
+ emit_zext_32(RV_REG_T1, ctx);
+ *rd = RV_REG_T2;
+}
+
+static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
+{
+ emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
+ *rd = RV_REG_T2;
+}
+
+static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ bool extra_pass)
+{
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
+ BPF_CLASS(insn->code) == BPF_JMP;
+ int rvoff, i = insn - ctx->prog->insnsi;
+ u8 rd = -1, rs = -1, code = insn->code;
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+
+ init_regs(&rd, &rs, insn, ctx);
+
+ switch (code) {
+ /* dst = src */
+ case BPF_ALU | BPF_MOV | BPF_X:
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = dst OP src */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ emit(rv_and(rd, rd, rs), ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ emit(rv_or(rd, rd, rs), ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ emit(rv_xor(rd, rd, rs), ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
+ break;
+
+ /* dst = -dst */
+ case BPF_ALU | BPF_NEG:
+ case BPF_ALU64 | BPF_NEG:
+ emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
+ rv_subw(rd, RV_REG_ZERO, rd), ctx);
+ break;
+
+ /* dst = BSWAP##imm(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ {
+ int shift = 64 - imm;
+
+ emit(rv_slli(rd, rd, shift), ctx);
+ emit(rv_srli(rd, rd, shift), ctx);
+ break;
+ }
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx);
+
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ if (imm == 16)
+ goto out_be;
+
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ if (imm == 32)
+ goto out_be;
+
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+out_be:
+ emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+
+ emit(rv_addi(rd, RV_REG_T2, 0), ctx);
+ break;
+
+ /* dst = imm */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ emit_imm(rd, imm, ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = dst OP imm */
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ if (is_12b_int(imm)) {
+ emit(is64 ? rv_addi(rd, rd, imm) :
+ rv_addiw(rd, rd, imm), ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_add(rd, rd, RV_REG_T1) :
+ rv_addw(rd, rd, RV_REG_T1), ctx);
+ }
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ if (is_12b_int(-imm)) {
+ emit(is64 ? rv_addi(rd, rd, -imm) :
+ rv_addiw(rd, rd, -imm), ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_sub(rd, rd, RV_REG_T1) :
+ rv_subw(rd, rd, RV_REG_T1), ctx);
+ }
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ if (is_12b_int(imm)) {
+ emit(rv_andi(rd, rd, imm), ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(rv_and(rd, rd, RV_REG_T1), ctx);
+ }
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ if (is_12b_int(imm)) {
+ emit(rv_ori(rd, rd, imm), ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(rv_or(rd, rd, RV_REG_T1), ctx);
+ }
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ if (is_12b_int(imm)) {
+ emit(rv_xori(rd, rd, imm), ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(rv_xor(rd, rd, RV_REG_T1), ctx);
+ }
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
+ rv_mulw(rd, rd, RV_REG_T1), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
+ rv_divuw(rd, rd, RV_REG_T1), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
+ rv_remuw(rd, rd, RV_REG_T1), ctx);
+ if (!is64)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
+ break;
+
+ /* JUMP off */
+ case BPF_JMP | BPF_JA:
+ rvoff = rv_offset(i + off, i, ctx);
+ if (!is_21b_int(rvoff)) {
+ pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n",
+ i, rvoff);
+ return -1;
+ }
+
+ emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx);
+ break;
+
+ /* IF (dst COND src) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_beq(rd, rs, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_bltu(rs, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_bltu(rd, rs, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_bgeu(rd, rs, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_bgeu(rs, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_bne(rd, rs, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_sext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_blt(rs, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_sext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_blt(rd, rs, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_sext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_bge(rd, rs, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_sext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_bge(rs, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ if (!is64)
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ emit(rv_and(RV_REG_T1, rd, rs), ctx);
+ emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx);
+ break;
+
+ /* IF (dst COND imm) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_zext_32_rd_t1(&rd, ctx);
+ emit(rv_beq(rd, RV_REG_T1, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_zext_32_rd_t1(&rd, ctx);
+ emit(rv_bltu(RV_REG_T1, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_zext_32_rd_t1(&rd, ctx);
+ emit(rv_bltu(rd, RV_REG_T1, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_zext_32_rd_t1(&rd, ctx);
+ emit(rv_bgeu(rd, RV_REG_T1, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_zext_32_rd_t1(&rd, ctx);
+ emit(rv_bgeu(RV_REG_T1, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_zext_32_rd_t1(&rd, ctx);
+ emit(rv_bne(rd, RV_REG_T1, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_sext_32_rd(&rd, ctx);
+ emit(rv_blt(RV_REG_T1, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_sext_32_rd(&rd, ctx);
+ emit(rv_blt(rd, RV_REG_T1, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_sext_32_rd(&rd, ctx);
+ emit(rv_bge(rd, RV_REG_T1, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_sext_32_rd(&rd, ctx);
+ emit(rv_bge(RV_REG_T1, rd, rvoff >> 1), ctx);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ if (rv_offset_check(&rvoff, off, i, ctx))
+ return -1;
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (!is64)
+ emit_zext_32_rd_t1(&rd, ctx);
+ emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx);
+ emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx);
+ break;
+
+ /* function call */
+ case BPF_JMP | BPF_CALL:
+ {
+ bool fixed;
+ int i, ret;
+ u64 addr;
+
+ mark_call(ctx);
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
+ &fixed);
+ if (ret < 0)
+ return ret;
+ if (fixed) {
+ emit_imm(RV_REG_T1, addr, ctx);
+ } else {
+ i = ctx->ninsns;
+ emit_imm(RV_REG_T1, addr, ctx);
+ for (i = ctx->ninsns - i; i < 8; i++) {
+ /* nop */
+ emit(rv_addi(RV_REG_ZERO, RV_REG_ZERO, 0),
+ ctx);
+ }
+ }
+ emit(rv_jalr(RV_REG_RA, RV_REG_T1, 0), ctx);
+ rd = bpf_to_rv_reg(BPF_REG_0, ctx);
+ emit(rv_addi(rd, RV_REG_A0, 0), ctx);
+ break;
+ }
+ /* tail call */
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(i, ctx))
+ return -1;
+ break;
+
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+
+ rvoff = epilogue_offset(ctx);
+ if (is_21b_check(rvoff, i))
+ return -1;
+ emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx);
+ break;
+
+ /* dst = imm64 */
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ struct bpf_insn insn1 = insn[1];
+ u64 imm64;
+
+ imm64 = (u64)insn1.imm << 32 | (u32)imm;
+ emit_imm(rd, imm64, ctx);
+ return 1;
+ }
+
+ /* LDX: dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+ if (is_12b_int(off)) {
+ emit(rv_lbu(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+ emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
+ break;
+ case BPF_LDX | BPF_MEM | BPF_H:
+ if (is_12b_int(off)) {
+ emit(rv_lhu(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+ emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
+ break;
+ case BPF_LDX | BPF_MEM | BPF_W:
+ if (is_12b_int(off)) {
+ emit(rv_lwu(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+ emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
+ break;
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ if (is_12b_int(off)) {
+ emit(rv_ld(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+ emit(rv_ld(rd, 0, RV_REG_T1), ctx);
+ break;
+
+ /* ST: *(size *)(dst + off) = imm */
+ case BPF_ST | BPF_MEM | BPF_B:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit(rv_sb(rd, off, RV_REG_T1), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+ emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
+ break;
+
+ case BPF_ST | BPF_MEM | BPF_H:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit(rv_sh(rd, off, RV_REG_T1), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+ emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
+ break;
+ case BPF_ST | BPF_MEM | BPF_W:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit(rv_sw(rd, off, RV_REG_T1), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+ emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx);
+ break;
+ case BPF_ST | BPF_MEM | BPF_DW:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit(rv_sd(rd, off, RV_REG_T1), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+ emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx);
+ break;
+
+ /* STX: *(size *)(dst + off) = src */
+ case BPF_STX | BPF_MEM | BPF_B:
+ if (is_12b_int(off)) {
+ emit(rv_sb(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ emit(rv_sb(RV_REG_T1, 0, rs), ctx);
+ break;
+ case BPF_STX | BPF_MEM | BPF_H:
+ if (is_12b_int(off)) {
+ emit(rv_sh(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ emit(rv_sh(RV_REG_T1, 0, rs), ctx);
+ break;
+ case BPF_STX | BPF_MEM | BPF_W:
+ if (is_12b_int(off)) {
+ emit(rv_sw(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ emit(rv_sw(RV_REG_T1, 0, rs), ctx);
+ break;
+ case BPF_STX | BPF_MEM | BPF_DW:
+ if (is_12b_int(off)) {
+ emit(rv_sd(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ emit(rv_sd(RV_REG_T1, 0, rs), ctx);
+ break;
+ /* STX XADD: lock *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* STX XADD: lock *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ if (off) {
+ if (is_12b_int(off)) {
+ emit(rv_addi(RV_REG_T1, rd, off), ctx);
+ } else {
+ emit_imm(RV_REG_T1, off, ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ }
+
+ rd = RV_REG_T1;
+ }
+
+ emit(BPF_SIZE(code) == BPF_W ?
+ rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) :
+ rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx);
+ break;
+ default:
+ pr_err("bpf-jit: unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void build_prologue(struct rv_jit_context *ctx)
+{
+ int stack_adjust = 0, store_offset, bpf_stack_adjust;
+
+ if (seen_reg(RV_REG_RA, ctx))
+ stack_adjust += 8;
+ stack_adjust += 8; /* RV_REG_FP */
+ if (seen_reg(RV_REG_S1, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S2, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S3, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S4, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S5, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S6, ctx))
+ stack_adjust += 8;
+
+ stack_adjust = round_up(stack_adjust, 16);
+ bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
+ stack_adjust += bpf_stack_adjust;
+
+ store_offset = stack_adjust - 8;
+
+ /* First instruction is always setting the tail-call-counter
+ * (TCC) register. This instruction is skipped for tail calls.
+ */
+ emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
+
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
+
+ if (seen_reg(RV_REG_RA, ctx)) {
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx);
+ store_offset -= 8;
+ }
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx);
+ store_offset -= 8;
+ if (seen_reg(RV_REG_S1, ctx)) {
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S2, ctx)) {
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S3, ctx)) {
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S4, ctx)) {
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S5, ctx)) {
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S6, ctx)) {
+ emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx);
+ store_offset -= 8;
+ }
+
+ emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
+
+ if (bpf_stack_adjust)
+ emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx);
+
+ /* Program contains calls and tail calls, so RV_REG_TCC need
+ * to be saved across calls.
+ */
+ if (seen_tail_call(ctx) && seen_call(ctx))
+ emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
+
+ ctx->stack_size = stack_adjust;
+}
+
+static void build_epilogue(struct rv_jit_context *ctx)
+{
+ __build_epilogue(RV_REG_RA, ctx);
+}
+
+static int build_body(struct rv_jit_context *ctx, bool extra_pass)
+{
+ const struct bpf_prog *prog = ctx->prog;
+ int i;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ int ret;
+
+ ret = emit_insn(insn, ctx, extra_pass);
+ if (ret > 0) {
+ i++;
+ if (ctx->insns == NULL)
+ ctx->offset[i] = ctx->ninsns;
+ continue;
+ }
+ if (ctx->insns == NULL)
+ ctx->offset[i] = ctx->ninsns;
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void bpf_fill_ill_insns(void *area, unsigned int size)
+{
+ memset(area, 0, size);
+}
+
+static void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ bool tmp_blinded = false, extra_pass = false;
+ struct bpf_prog *tmp, *orig_prog = prog;
+ struct rv_jit_data *jit_data;
+ struct rv_jit_context *ctx;
+ unsigned int image_size;
+
+ if (!prog->jit_requested)
+ return orig_prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+
+ ctx = &jit_data->ctx;
+
+ if (ctx->offset) {
+ extra_pass = true;
+ image_size = sizeof(u32) * ctx->ninsns;
+ goto skip_init_ctx;
+ }
+
+ ctx->prog = prog;
+ ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ if (!ctx->offset) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ /* First pass generates the ctx->offset, but does not emit an image. */
+ if (build_body(ctx, extra_pass)) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+ build_prologue(ctx);
+ ctx->epilogue_offset = ctx->ninsns;
+ build_epilogue(ctx);
+
+ /* Allocate image, now that we know the size. */
+ image_size = sizeof(u32) * ctx->ninsns;
+ jit_data->header = bpf_jit_binary_alloc(image_size, &jit_data->image,
+ sizeof(u32),
+ bpf_fill_ill_insns);
+ if (!jit_data->header) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ /* Second, real pass, that acutally emits the image. */
+ ctx->insns = (u32 *)jit_data->image;
+skip_init_ctx:
+ ctx->ninsns = 0;
+
+ build_prologue(ctx);
+ if (build_body(ctx, extra_pass)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+ build_epilogue(ctx);
+
+ if (bpf_jit_enable > 1)
+ bpf_jit_dump(prog->len, image_size, 2, ctx->insns);
+
+ prog->bpf_func = (void *)ctx->insns;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+
+ bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
+
+ if (!prog->is_func || extra_pass) {
+out_offset:
+ kfree(ctx->offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
+out:
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ return prog;
+}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ed554b09eb3f..b6e3d0653002 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -75,6 +75,7 @@ config S390
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
+ select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
@@ -379,6 +380,7 @@ config COMPAT
select COMPAT_BINFMT_ELF if BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
+ select HAVE_UID16
depends on MULTIUSER
help
Select this option if you want to enable your system kernel to
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index d592e0d90d9f..f902215e9cd9 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -7,6 +7,7 @@
#include <asm/facility.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
+#include "boot.h"
/*
* The code within this file will be called very early. It may _not_
@@ -58,7 +59,7 @@ static void u16_to_decimal(char *str, u16 val)
*str = '\0';
}
-static void print_missing_facilities(void)
+void print_missing_facilities(void)
{
static char als_str[80] = "Missing facilities: ";
unsigned long val;
@@ -90,7 +91,6 @@ static void print_missing_facilities(void)
}
strcat(als_str, "\n");
sclp_early_printk(als_str);
- sclp_early_printk("See Principles of Operations for facility bits\n");
}
static void facility_mismatch(void)
@@ -98,6 +98,7 @@ static void facility_mismatch(void)
sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
print_missing_facilities();
+ sclp_early_printk("See Principles of Operations for facility bits\n");
disabled_wait(0x8badcccc);
}
@@ -105,20 +106,7 @@ void verify_facilities(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(S390_lowcore.stfle_fac_list); i++)
- S390_lowcore.stfle_fac_list[i] = 0;
- asm volatile(
- " stfl 0(0)\n"
- : "=m" (S390_lowcore.stfl_fac_list));
- S390_lowcore.stfle_fac_list[0] = (u64)S390_lowcore.stfl_fac_list << 32;
- if (S390_lowcore.stfl_fac_list & 0x01000000) {
- register unsigned long reg0 asm("0") = ARRAY_SIZE(als) - 1;
-
- asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
- : "+d" (reg0)
- : "a" (&S390_lowcore.stfle_fac_list)
- : "memory", "cc");
- }
+ __stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list));
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i])
facility_mismatch();
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index fc41e2277ea8..82bc06346e05 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -6,6 +6,8 @@ void startup_kernel(void);
void detect_memory(void);
void store_ipl_parmblock(void);
void setup_boot_command_line(void);
+void parse_boot_command_line(void);
void setup_memory_end(void);
+void print_missing_facilities(void);
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 9dab596be98e..36beb56de021 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
+#include <asm/facility.h>
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
@@ -143,8 +145,66 @@ void setup_boot_command_line(void)
append_ipl_block_parm();
}
+static void modify_facility(unsigned long nr, bool clear)
+{
+ if (clear)
+ __clear_facility(nr, S390_lowcore.stfle_fac_list);
+ else
+ __set_facility(nr, S390_lowcore.stfle_fac_list);
+}
+
+static void check_cleared_facilities(void)
+{
+ unsigned long als[] = { FACILITIES_ALS };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(als); i++) {
+ if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) {
+ sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
+ print_missing_facilities();
+ break;
+ }
+ }
+}
+
+static void modify_fac_list(char *str)
+{
+ unsigned long val, endval;
+ char *endp;
+ bool clear;
+
+ while (*str) {
+ clear = false;
+ if (*str == '!') {
+ clear = true;
+ str++;
+ }
+ val = simple_strtoull(str, &endp, 0);
+ if (str == endp)
+ break;
+ str = endp;
+ if (*str == '-') {
+ str++;
+ endval = simple_strtoull(str, &endp, 0);
+ if (str == endp)
+ break;
+ str = endp;
+ while (val <= endval) {
+ modify_facility(val, clear);
+ val++;
+ }
+ } else {
+ modify_facility(val, clear);
+ }
+ if (*str != ',')
+ break;
+ str++;
+ }
+ check_cleared_facilities();
+}
+
static char command_line_buf[COMMAND_LINE_SIZE] __section(.data);
-static void parse_mem_opt(void)
+void parse_boot_command_line(void)
{
char *param, *val;
bool enabled;
@@ -165,12 +225,14 @@ static void parse_mem_opt(void)
if (!rc && !enabled)
noexec_disabled = 1;
}
+
+ if (!strcmp(param, "facilities"))
+ modify_fac_list(val);
}
}
void setup_memory_end(void)
{
- parse_mem_opt();
#ifdef CONFIG_CRASH_DUMP
if (!OLDMEM_BASE && early_ipl_block_valid &&
early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP &&
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 4d441317cdeb..bdfc5549a299 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -53,6 +53,7 @@ void startup_kernel(void)
sclp_early_read_info();
store_ipl_parmblock();
setup_boot_command_line();
+ parse_boot_command_line();
setup_memory_end();
detect_memory();
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c
index 25aca07898ba..b11e8108773a 100644
--- a/arch/s390/boot/string.c
+++ b/arch/s390/boot/string.c
@@ -2,6 +2,7 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#undef CONFIG_KASAN
#include "../lib/string.c"
int strncmp(const char *cs, const char *ct, size_t count)
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 5346b5a80bb6..0d15383d0ff1 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -38,7 +38,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
/* check for weak keys */
if (!des_ekey(tmp, key) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -228,7 +228,7 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE)) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index 52348e0a812e..05f3f9aee5fc 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -43,7 +43,7 @@ int hypfs_diag0c_init(void);
void hypfs_diag0c_exit(void);
/* Set Partition-Resource Parameter */
-int hypfs_sprp_init(void);
+void hypfs_sprp_init(void);
void hypfs_sprp_exit(void);
/* debugfs interface */
@@ -69,9 +69,9 @@ struct hypfs_dbfs_file {
struct dentry *dentry;
};
-extern int hypfs_dbfs_init(void);
+extern void hypfs_dbfs_init(void);
extern void hypfs_dbfs_exit(void);
-extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
+extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index b9bdf5c1918e..f4c7dbfaf8ee 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -78,14 +78,11 @@ static const struct file_operations dbfs_ops = {
.unlocked_ioctl = dbfs_ioctl,
};
-int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
{
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
&dbfs_ops);
- if (IS_ERR(df->dentry))
- return PTR_ERR(df->dentry);
mutex_init(&df->lock);
- return 0;
}
void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
@@ -93,10 +90,9 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
debugfs_remove(df->dentry);
}
-int hypfs_dbfs_init(void)
+void hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
- return PTR_ERR_OR_ZERO(dbfs_dir);
}
void hypfs_dbfs_exit(void)
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 3452e18bb1ca..f0bc4dc3e9bf 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -440,11 +440,10 @@ __init int hypfs_diag_init(void)
pr_err("The hardware system does not support hypfs\n");
return -ENODATA;
}
- if (diag204_info_type == DIAG204_INFO_EXT) {
- rc = hypfs_dbfs_create_file(&dbfs_file_d204);
- if (rc)
- return rc;
- }
+
+ if (diag204_info_type == DIAG204_INFO_EXT)
+ hypfs_dbfs_create_file(&dbfs_file_d204);
+
if (MACHINE_IS_LPAR) {
rc = diag224_get_name_table();
if (rc) {
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index cebf05150cc1..72e3140fafb5 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -54,8 +54,7 @@ static void *diag0c_store(unsigned int *count)
if (!cpu_vec)
goto fail_put_online_cpus;
/* Note: Diag 0c needs 8 byte alignment and real storage */
- diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) +
- cpu_count * sizeof(struct hypfs_diag0c_entry),
+ diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
GFP_KERNEL | GFP_DMA);
if (!diag0c_data)
goto fail_kfree_cpu_vec;
@@ -125,7 +124,8 @@ int __init hypfs_diag0c_init(void)
{
if (!MACHINE_IS_VM)
return 0;
- return hypfs_dbfs_create_file(&dbfs_file_0c);
+ hypfs_dbfs_create_file(&dbfs_file_0c);
+ return 0;
}
/*
diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c
index 601b70786dc8..7d9fb496d155 100644
--- a/arch/s390/hypfs/hypfs_sprp.c
+++ b/arch/s390/hypfs/hypfs_sprp.c
@@ -137,11 +137,11 @@ static struct hypfs_dbfs_file hypfs_sprp_file = {
.unlocked_ioctl = hypfs_sprp_ioctl,
};
-int hypfs_sprp_init(void)
+void hypfs_sprp_init(void)
{
if (!sclp.has_sprp)
- return 0;
- return hypfs_dbfs_create_file(&hypfs_sprp_file);
+ return;
+ hypfs_dbfs_create_file(&hypfs_sprp_file);
}
void hypfs_sprp_exit(void)
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index c4b7b681e055..42f2375c203e 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -279,7 +279,8 @@ int hypfs_vm_init(void)
guest_query = local_guest;
else
return -EACCES;
- return hypfs_dbfs_create_file(&dbfs_file_2fc);
+ hypfs_dbfs_create_file(&dbfs_file_2fc);
+ return 0;
}
void hypfs_vm_exit(void)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c681329fdeec..ccad1398abd4 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -456,9 +456,8 @@ static int __init hypfs_init(void)
{
int rc;
- rc = hypfs_dbfs_init();
- if (rc)
- return rc;
+ hypfs_dbfs_init();
+
if (hypfs_diag_init()) {
rc = -ENODATA;
goto fail_dbfs_exit;
@@ -467,10 +466,7 @@ static int __init hypfs_init(void)
rc = -ENODATA;
goto fail_hypfs_diag_exit;
}
- if (hypfs_sprp_init()) {
- rc = -ENODATA;
- goto fail_hypfs_vm_exit;
- }
+ hypfs_sprp_init();
if (hypfs_diag0c_init()) {
rc = -ENODATA;
goto fail_hypfs_sprp_exit;
@@ -489,7 +485,6 @@ fail_hypfs_diag0c_exit:
hypfs_diag0c_exit();
fail_hypfs_sprp_exit:
hypfs_sprp_exit();
-fail_hypfs_vm_exit:
hypfs_vm_exit();
fail_hypfs_diag_exit:
hypfs_diag_exit();
diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h
new file mode 100644
index 000000000000..649b9fc60685
--- /dev/null
+++ b/arch/s390/include/asm/cpu_mcf.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter facility support definitions for the Linux perf
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ */
+#ifndef _ASM_S390_CPU_MCF_H
+#define _ASM_S390_CPU_MCF_H
+
+#include <linux/perf_event.h>
+#include <asm/cpu_mf.h>
+
+enum cpumf_ctr_set {
+ CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
+ CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
+ CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */
+ CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */
+ CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */
+
+ /* Maximum number of counter sets */
+ CPUMF_CTR_SET_MAX,
+};
+
+#define CPUMF_LCCTL_ENABLE_SHIFT 16
+#define CPUMF_LCCTL_ACTCTL_SHIFT 0
+static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
+ [CPUMF_CTR_SET_BASIC] = 0x02,
+ [CPUMF_CTR_SET_USER] = 0x04,
+ [CPUMF_CTR_SET_CRYPTO] = 0x08,
+ [CPUMF_CTR_SET_EXT] = 0x01,
+ [CPUMF_CTR_SET_MT_DIAG] = 0x20,
+};
+
+static inline void ctr_set_enable(u64 *state, int ctr_set)
+{
+ *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
+}
+static inline void ctr_set_disable(u64 *state, int ctr_set)
+{
+ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
+}
+static inline void ctr_set_start(u64 *state, int ctr_set)
+{
+ *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
+}
+static inline void ctr_set_stop(u64 *state, int ctr_set)
+{
+ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
+}
+
+static inline void ctr_set_multiple_enable(u64 *state, u64 ctrsets)
+{
+ *state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT;
+}
+
+static inline void ctr_set_multiple_disable(u64 *state, u64 ctrsets)
+{
+ *state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT);
+}
+
+static inline void ctr_set_multiple_start(u64 *state, u64 ctrsets)
+{
+ *state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT;
+}
+
+static inline void ctr_set_multiple_stop(u64 *state, u64 ctrsets)
+{
+ *state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT);
+}
+
+static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
+{
+ switch (set) {
+ case CPUMF_CTR_SET_BASIC:
+ return stcctm(BASIC, range, dest);
+ case CPUMF_CTR_SET_USER:
+ return stcctm(PROBLEM_STATE, range, dest);
+ case CPUMF_CTR_SET_CRYPTO:
+ return stcctm(CRYPTO_ACTIVITY, range, dest);
+ case CPUMF_CTR_SET_EXT:
+ return stcctm(EXTENDED, range, dest);
+ case CPUMF_CTR_SET_MT_DIAG:
+ return stcctm(MT_DIAG_CLEARING, range, dest);
+ case CPUMF_CTR_SET_MAX:
+ return 3;
+ }
+ return 3;
+}
+
+struct cpu_cf_events {
+ struct cpumf_ctr_info info;
+ atomic_t ctr_set[CPUMF_CTR_SET_MAX];
+ atomic64_t alert;
+ u64 state, tx_state;
+ unsigned int flags;
+ unsigned int txn_flags;
+};
+DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
+
+bool kernel_cpumcf_avail(void);
+int __kernel_cpumcf_begin(void);
+unsigned long kernel_cpumcf_alert(int clear);
+void __kernel_cpumcf_end(void);
+
+static inline int kernel_cpumcf_begin(void)
+{
+ if (!cpum_cf_avail())
+ return -ENODEV;
+
+ preempt_disable();
+ return __kernel_cpumcf_begin();
+}
+static inline void kernel_cpumcf_end(void)
+{
+ __kernel_cpumcf_end();
+ preempt_enable();
+}
+
+/* Return true if store counter set multiple instruction is available */
+static inline int stccm_avail(void)
+{
+ return test_facility(142);
+}
+
+#endif /* _ASM_S390_CPU_MCF_H */
diff --git a/arch/s390/include/asm/cpu_mf-insn.h b/arch/s390/include/asm/cpu_mf-insn.h
new file mode 100644
index 000000000000..a68b362e0964
--- /dev/null
+++ b/arch/s390/include/asm/cpu_mf-insn.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for CPU-MF instructions
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+#ifndef _ASM_S390_CPU_MF_INSN_H
+#define _ASM_S390_CPU_MF_INSN_H
+
+#ifdef __ASSEMBLY__
+
+/* Macro to generate the STCCTM instruction with a customized
+ * M3 field designating the counter set.
+ */
+.macro STCCTM r1 m3 db2
+ .insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index bf2cbff926ef..ae3e3221d4b5 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,8 @@
#include <linux/errno.h>
#include <asm/facility.h>
+asm(".include \"asm/cpu_mf-insn.h\"\n");
+
#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
#define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */
#define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */
@@ -209,17 +211,25 @@ static inline int ecctr(u64 ctr, u64 *val)
return cc;
}
-/* Store CPU counter multiple for the MT utilization counter set */
-static inline int stcctm5(u64 num, u64 *val)
+/* Store CPU counter multiple for a particular counter set */
+enum stcctm_ctr_set {
+ EXTENDED = 0,
+ BASIC = 1,
+ PROBLEM_STATE = 2,
+ CRYPTO_ACTIVITY = 3,
+ MT_DIAG = 5,
+ MT_DIAG_CLEARING = 9, /* clears loss-of-MT-ctr-data alert */
+};
+static inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
{
int cc;
asm volatile (
- " .insn rsy,0xeb0000000017,%2,5,%1\n"
+ " STCCTM %2,%3,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
- : "Q" (*val), "d" (num)
+ : "Q" (*dest), "d" (range), "i" (set)
: "cc", "memory");
return cc;
}
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index cdbaad50c7c7..19562be22b7e 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -32,6 +32,7 @@ enum diag_stat_enum {
DIAG_STAT_X2FC,
DIAG_STAT_X304,
DIAG_STAT_X308,
+ DIAG_STAT_X318,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@@ -293,6 +294,17 @@ struct diag26c_mac_resp {
u8 res[2];
} __aligned(8);
+#define CPNC_LINUX 0x4
+union diag318_info {
+ unsigned long val;
+ struct {
+ unsigned int cpnc : 8;
+ unsigned int cpvc_linux : 24;
+ unsigned char cpvc_distro[3];
+ unsigned char zero;
+ };
+};
+
int diag204(unsigned long subcode, unsigned long size, void *addr);
int diag224(void *ptr);
int diag26c(void *req, void *resp, enum diag26c_sc subcode);
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 8ea270fdc7fb..5a3c95b11952 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -81,5 +81,30 @@ static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
#endif
}
+/*
+ * Even though the system call numbers are identical for s390/s390x a
+ * different system call table is used for compat tasks. This may lead
+ * to e.g. incorrect or missing trace event sysfs files.
+ * Therefore simply do not trace compat system calls at all.
+ * See kernel/trace/trace_syscalls.c.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ return is_compat_task();
+}
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ /*
+ * Skip __s390_ and __s390x_ prefix - due to compat wrappers
+ * and aliasing some symbols of 64 bit system call functions
+ * may get the __s390_ prefix instead of the __s390x_ prefix.
+ */
+ return !strcmp(sym + 7, name) || !strcmp(sym + 8, name);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index e2d3e6c43395..e548ec1ec12c 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -10,6 +10,12 @@
#define JUMP_LABEL_NOP_SIZE 6
#define JUMP_LABEL_NOP_OFFSET 2
+#if __GNUC__ < 9
+#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X"
+#else
+#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd"
+#endif
+
/*
* We use a brcl 0,2 instruction for jump labels at compile time so it
* can be easily distinguished from a hotpatch generated instruction.
@@ -20,9 +26,9 @@ static inline bool arch_static_branch(struct static_key *key, bool branch)
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
- ".quad %0-.\n"
+ ".quad %0+%1-.\n"
".popsection\n"
- : : "X" (&((char *)key)[branch]) : : label);
+ : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label);
return false;
label:
return true;
@@ -34,9 +40,9 @@ static inline bool arch_static_branch_jump(struct static_key *key, bool branch)
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
- ".quad %0-.\n"
+ ".quad %0+%1-.\n"
".popsection\n"
- : : "X" (&((char *)key)[branch]) : : label);
+ : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label);
return false;
label:
return true;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index ccbb53e22024..8d04e6f3f796 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.flush_count, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
- mm->context.compat_mm = 0;
+ mm->context.compat_mm = test_thread_flag(TIF_31BIT);
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) ||
@@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
- if (prev == next)
- return;
S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR1 and CR7 */
@@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
__ctl_load(S390_lowcore.vdso_asce, 7, 7);
clear_cpu_flag(CIF_ASCE_SECONDARY);
}
- cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+ if (prev != next)
+ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 10fe982f2b4b..4e0efebc56a9 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -148,7 +148,6 @@ struct zpci_dev {
enum pci_bus_speed max_bus_speed;
struct dentry *debugfs_dev;
- struct dentry *debugfs_perf;
struct s390_domain *s390_domain; /* s390 IOMMU domain data */
};
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index b9c0e361748b..560d8f766ddf 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -12,7 +12,6 @@
#include <linux/perf_event.h>
#include <linux/device.h>
-#include <asm/cpu_mf.h>
/* Per-CPU flags for PMU states */
#define PMU_F_RESERVED 0x1000
@@ -55,6 +54,7 @@ struct perf_sf_sde_regs {
#define PERF_CPUM_SF_MAX_CTR 2
#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
+#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 063732414dfb..76dc344edb8c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1069,8 +1069,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
-pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
-void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
+pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
+void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
+ pte_t *, pte_t, pte_t);
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/s390/include/asm/pnet.h b/arch/s390/include/asm/pnet.h
index 6e278584f8f1..5739276b458d 100644
--- a/arch/s390/include/asm/pnet.h
+++ b/arch/s390/include/asm/pnet.h
@@ -11,13 +11,5 @@
#include <linux/device.h>
#include <linux/types.h>
-#define PNETIDS_LEN 64 /* Total utility string length in bytes
- * to cover up to 4 PNETIDs of 16 bytes
- * for up to 4 device ports
- */
-#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */
-#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN)
- /* Max. # of ports with a PNETID */
-
int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid);
#endif /* _ASM_S390_PNET_H */
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index d46edde7e458..db5ef22c46e4 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -361,8 +361,8 @@ struct qdio_initialize {
unsigned long);
int scan_threshold;
unsigned long int_parm;
- void **input_sbal_addr_array;
- void **output_sbal_addr_array;
+ struct qdio_buffer **input_sbal_addr_array;
+ struct qdio_buffer **output_sbal_addr_array;
struct qdio_outbuf_state *output_sbal_state_array;
};
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 0cd4bda85eb1..ef4c9dec06a4 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -78,6 +78,7 @@ struct sclp_info {
unsigned char has_skey : 1;
unsigned char has_kss : 1;
unsigned char has_gisaf : 1;
+ unsigned char has_diag318 : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 116cc15a4b8a..70d87db54e62 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -12,15 +12,21 @@
#include <linux/types.h>
#endif
-#define __HAVE_ARCH_MEMCHR /* inline & arch function */
-#define __HAVE_ARCH_MEMCMP /* arch function */
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
-#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSET16 /* arch function */
#define __HAVE_ARCH_MEMSET32 /* arch function */
#define __HAVE_ARCH_MEMSET64 /* arch function */
+
+void *memcpy(void *dest, const void *src, size_t n);
+void *memset(void *s, int c, size_t n);
+void *memmove(void *dest, const void *src, size_t n);
+
+#ifndef CONFIG_KASAN
+#define __HAVE_ARCH_MEMCHR /* inline & arch function */
+#define __HAVE_ARCH_MEMCMP /* arch function */
+#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
#define __HAVE_ARCH_STRCMP /* arch function */
#define __HAVE_ARCH_STRCPY /* inline & arch function */
@@ -35,9 +41,6 @@
/* Prototypes for non-inlined arch strings functions. */
int memcmp(const void *s1, const void *s2, size_t n);
-void *memcpy(void *dest, const void *src, size_t n);
-void *memset(void *s, int c, size_t n);
-void *memmove(void *dest, const void *src, size_t n);
int strcmp(const char *s1, const char *s2);
size_t strlcat(char *dest, const char *src, size_t n);
size_t strlcpy(char *dest, const char *src, size_t size);
@@ -45,6 +48,7 @@ char *strncat(char *dest, const char *src, size_t n);
char *strncpy(char *dest, const char *src, size_t n);
char *strrchr(const char *s, int c);
char *strstr(const char *s1, const char *s2);
+#endif /* !CONFIG_KASAN */
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
@@ -95,6 +99,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
#if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY))
+#ifdef __HAVE_ARCH_MEMCHR
static inline void *memchr(const void * s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
@@ -109,7 +114,9 @@ static inline void *memchr(const void * s, int c, size_t n)
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
+#endif
+#ifdef __HAVE_ARCH_MEMSCAN
static inline void *memscan(void *s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
@@ -121,7 +128,9 @@ static inline void *memscan(void *s, int c, size_t n)
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
return (void *) ret;
}
+#endif
+#ifdef __HAVE_ARCH_STRCAT
static inline char *strcat(char *dst, const char *src)
{
register int r0 asm("0") = 0;
@@ -137,7 +146,9 @@ static inline char *strcat(char *dst, const char *src)
: "d" (r0), "0" (0) : "cc", "memory" );
return ret;
}
+#endif
+#ifdef __HAVE_ARCH_STRCPY
static inline char *strcpy(char *dst, const char *src)
{
register int r0 asm("0") = 0;
@@ -150,7 +161,9 @@ static inline char *strcpy(char *dst, const char *src)
: "cc", "memory");
return ret;
}
+#endif
+#ifdef __HAVE_ARCH_STRLEN
static inline size_t strlen(const char *s)
{
register unsigned long r0 asm("0") = 0;
@@ -162,7 +175,9 @@ static inline size_t strlen(const char *s)
: "+d" (r0), "+a" (tmp) : : "cc", "memory");
return r0 - (unsigned long) s;
}
+#endif
+#ifdef __HAVE_ARCH_STRNLEN
static inline size_t strnlen(const char * s, size_t n)
{
register int r0 asm("0") = 0;
@@ -175,6 +190,7 @@ static inline size_t strnlen(const char * s, size_t n)
: "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
return end - s;
}
+#endif
#else /* IN_ARCH_STRING_C */
void *memchr(const void * s, int c, size_t n);
void *memscan(void *s, int c, size_t n);
diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000000..5596c5c625d2
--- /dev/null
+++ b/arch/s390/include/asm/syscall_wrapper.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - s390 specific wrappers to syscall definitions
+ *
+ */
+
+#ifndef _ASM_S390_SYSCALL_WRAPPER_H
+#define _ASM_S390_SYSCALL_WRAPPER_H
+
+#ifdef CONFIG_COMPAT
+#define __SC_COMPAT_TYPE(t, a) \
+ __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a
+
+#define __SC_COMPAT_CAST(t, a) \
+({ \
+ long __ReS = a; \
+ \
+ BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \
+ !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t) && \
+ !__TYPE_IS_LL(t)); \
+ if (__TYPE_IS_L(t)) \
+ __ReS = (s32)a; \
+ if (__TYPE_IS_UL(t)) \
+ __ReS = (u32)a; \
+ if (__TYPE_IS_PTR(t)) \
+ __ReS = a & 0x7fffffff; \
+ if (__TYPE_IS_LL(t)) \
+ return -ENOSYS; \
+ (t)__ReS; \
+})
+
+#define __S390_SYS_STUBx(x, name, ...) \
+ asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
+ ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
+ asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
+ { \
+ long ret = __s390x_sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
+ }
+
+/*
+ * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
+ * named __s390x_sys_*()
+ */
+#define COMPAT_SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __s390_compat_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__s390_compat__sys_##sname, ERRNO); \
+ asmlinkage long __s390_compat_sys_##sname(void)
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __s390x_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \
+ asmlinkage long __s390_sys_##sname(void) \
+ __attribute__((alias(__stringify(__s390x_sys_##sname)))); \
+ asmlinkage long __s390x_sys_##sname(void)
+
+#define COND_SYSCALL(name) \
+ cond_syscall(__s390x_sys_##name); \
+ cond_syscall(__s390_sys_##name)
+
+#define SYS_NI(name) \
+ SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
+ SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
+ asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
+ __attribute__((alias(__stringify(__se_compat_sys##name)))); \
+ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
+ } \
+ __diag_pop(); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+/*
+ * As some compat syscalls may not be implemented, we need to expand
+ * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
+ * kernel/time/posix-stubs.c to cover this case as well.
+ */
+#define COND_SYSCALL_COMPAT(name) \
+ cond_syscall(__s390_compat_sys_##name)
+
+#define COMPAT_SYS_NI(name) \
+ SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
+
+#else /* CONFIG_COMPAT */
+
+#define __S390_SYS_STUBx(x, fullname, name, ...)
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __s390x_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \
+ asmlinkage long __s390x_sys_##sname(void)
+
+#define COND_SYSCALL(name) \
+ cond_syscall(__s390x_sys_##name)
+
+#define SYS_NI(name) \
+ SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers);
+
+#endif /* CONFIG_COMPAT */
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
+ asmlinkage long __s390x_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
+ __attribute__((alias(__stringify(__se_sys##name)))); \
+ ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ __S390_SYS_STUBx(x, name, __VA_ARGS__) \
+ asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
+ } \
+ __diag_pop(); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index bd2545977ad3..007fcb9aeeb8 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -31,7 +31,6 @@
#define USER_DS (2)
#define USER_DS_SACF (3)
-#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
#define segment_eq(a,b) (((a) & 2) == ((b) & 2))
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index a1fbf15d53aa..b6755685c7b8 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -10,11 +10,6 @@
#include <uapi/asm/unistd.h>
#include <asm/unistd_nr.h>
-#define __IGNORE_time
-#define __IGNORE_pkey_mprotect
-#define __IGNORE_pkey_alloc
-#define __IGNORE_pkey_free
-
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
@@ -33,7 +28,7 @@
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
# ifdef CONFIG_COMPAT
-# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_SYS_TIME32
# define __ARCH_WANT_SYS_UTIME32
# endif
#define __ARCH_WANT_SYS_FORK
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h
index 266a72320e05..0c05a673811c 100644
--- a/arch/s390/include/asm/vx-insn.h
+++ b/arch/s390/include/asm/vx-insn.h
@@ -363,23 +363,23 @@
.endm
/* VECTOR LOAD MULTIPLE */
-.macro VLM vfrom, vto, disp, base
+.macro VLM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
- MRXBOPC 0, 0x36, v1, v3
+ MRXBOPC \hint, 0x36, v1, v3
.endm
/* VECTOR STORE MULTIPLE */
-.macro VSTM vfrom, vto, disp, base
+.macro VSTM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
- MRXBOPC 0, 0x3E, v1, v3
+ MRXBOPC \hint, 0x3E, v1, v3
.endm
/* VECTOR PERMUTE */
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index da3e0d48abbc..6b0f30b14642 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
+generic-y += socket.h
diff --git a/arch/s390/include/uapi/asm/posix_types.h b/arch/s390/include/uapi/asm/posix_types.h
index 2a3fc638414b..1913613e71b6 100644
--- a/arch/s390/include/uapi/asm/posix_types.h
+++ b/arch/s390/include/uapi/asm/posix_types.h
@@ -20,6 +20,12 @@ typedef long __kernel_ssize_t;
typedef unsigned short __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
+#ifdef __KERNEL__
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
+#endif
+
#ifndef __s390x__
typedef unsigned long __kernel_ino_t;
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
deleted file mode 100644
index 39d901476ee5..000000000000
--- a/arch/s390/include/uapi/asm/socket.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/socket.h"
- */
-
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-#define SO_REUSEPORT 15
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-#define SO_GET_FILTER SO_ATTACH_FILTER
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-
-#define SO_WIFI_STATUS 41
-#define SCM_WIFI_STATUS SO_WIFI_STATUS
-#define SO_PEEK_OFF 42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS 43
-
-#define SO_LOCK_FILTER 44
-
-#define SO_SELECT_ERR_QUEUE 45
-
-#define SO_BUSY_POLL 46
-
-#define SO_MAX_PACING_RATE 47
-
-#define SO_BPF_EXTENSIONS 48
-
-#define SO_INCOMING_CPU 49
-
-#define SO_ATTACH_BPF 50
-#define SO_DETACH_BPF SO_DETACH_FILTER
-
-#define SO_ATTACH_REUSEPORT_CBPF 51
-#define SO_ATTACH_REUSEPORT_EBPF 52
-
-#define SO_CNX_ADVICE 53
-
-#define SCM_TIMESTAMPING_OPT_STATS 54
-
-#define SO_MEMINFO 55
-
-#define SO_INCOMING_NAPI_ID 56
-
-#define SO_COOKIE 57
-
-#define SCM_TIMESTAMPING_PKTINFO 58
-
-#define SO_PEERGROUPS 59
-
-#define SO_ZEROCOPY 60
-
-#define SO_TXTIME 61
-#define SCM_TXTIME SO_TXTIME
-
-#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index e216e116a9a9..8a62c7f72e1b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -65,7 +65,7 @@ obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
-obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
+obj-$(CONFIG_COMPAT) += $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
@@ -77,8 +77,10 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
+obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
+obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 8ac38d51ed7d..f9d418d1b619 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -34,7 +34,6 @@
#include <linux/stat.h>
#include <linux/filter.h>
#include <linux/highmem.h>
-#include <linux/highuid.h>
#include <linux/mman.h>
#include <linux/ipv6.h>
#include <linux/in.h>
@@ -58,245 +57,13 @@
#include "compat_linux.h"
-/* For this source file, we want overflow handling. */
-
-#undef high2lowuid
-#undef high2lowgid
-#undef low2highuid
-#undef low2highgid
-#undef SET_UID16
-#undef SET_GID16
-#undef NEW_TO_OLD_UID
-#undef NEW_TO_OLD_GID
-#undef SET_OLDSTAT_UID
-#undef SET_OLDSTAT_GID
-#undef SET_STAT_UID
-#undef SET_STAT_GID
-
-#define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
-#define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
-#define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid)
-#define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid)
-#define SET_UID16(var, uid) var = high2lowuid(uid)
-#define SET_GID16(var, gid) var = high2lowgid(gid)
-#define NEW_TO_OLD_UID(uid) high2lowuid(uid)
-#define NEW_TO_OLD_GID(gid) high2lowgid(gid)
-#define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
-#define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
-#define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
-#define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
-
-COMPAT_SYSCALL_DEFINE3(s390_chown16, const char __user *, filename,
- u16, user, u16, group)
-{
- return ksys_chown(filename, low2highuid(user), low2highgid(group));
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_lchown16, const char __user *,
- filename, u16, user, u16, group)
-{
- return ksys_lchown(filename, low2highuid(user), low2highgid(group));
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_fchown16, unsigned int, fd, u16, user, u16, group)
-{
- return ksys_fchown(fd, low2highuid(user), low2highgid(group));
-}
-
-COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
-{
- return sys_setregid(low2highgid(rgid), low2highgid(egid));
-}
-
-COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
-{
- return sys_setgid(low2highgid(gid));
-}
-
-COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
-{
- return sys_setreuid(low2highuid(ruid), low2highuid(euid));
-}
-
-COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
-{
- return sys_setuid(low2highuid(uid));
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
-{
- return sys_setresuid(low2highuid(ruid), low2highuid(euid),
- low2highuid(suid));
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_getresuid16, u16 __user *, ruidp,
- u16 __user *, euidp, u16 __user *, suidp)
-{
- const struct cred *cred = current_cred();
- int retval;
- u16 ruid, euid, suid;
-
- ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid));
- euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid));
- suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid));
-
- if (!(retval = put_user(ruid, ruidp)) &&
- !(retval = put_user(euid, euidp)))
- retval = put_user(suid, suidp);
-
- return retval;
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_setresgid16, u16, rgid, u16, egid, u16, sgid)
-{
- return sys_setresgid(low2highgid(rgid), low2highgid(egid),
- low2highgid(sgid));
-}
-
-COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
- u16 __user *, egidp, u16 __user *, sgidp)
-{
- const struct cred *cred = current_cred();
- int retval;
- u16 rgid, egid, sgid;
-
- rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid));
- egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid));
- sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid));
-
- if (!(retval = put_user(rgid, rgidp)) &&
- !(retval = put_user(egid, egidp)))
- retval = put_user(sgid, sgidp);
-
- return retval;
-}
-
-COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
-{
- return sys_setfsuid(low2highuid(uid));
-}
-
-COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
-{
- return sys_setfsgid(low2highgid(gid));
-}
-
-static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
-{
- struct user_namespace *user_ns = current_user_ns();
- int i;
- u16 group;
- kgid_t kgid;
-
- for (i = 0; i < group_info->ngroups; i++) {
- kgid = group_info->gid[i];
- group = (u16)from_kgid_munged(user_ns, kgid);
- if (put_user(group, grouplist+i))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
-{
- struct user_namespace *user_ns = current_user_ns();
- int i;
- u16 group;
- kgid_t kgid;
-
- for (i = 0; i < group_info->ngroups; i++) {
- if (get_user(group, grouplist+i))
- return -EFAULT;
-
- kgid = make_kgid(user_ns, (gid_t)group);
- if (!gid_valid(kgid))
- return -EINVAL;
-
- group_info->gid[i] = kgid;
- }
-
- return 0;
-}
-
-COMPAT_SYSCALL_DEFINE2(s390_getgroups16, int, gidsetsize, u16 __user *, grouplist)
-{
- const struct cred *cred = current_cred();
- int i;
-
- if (gidsetsize < 0)
- return -EINVAL;
-
- get_group_info(cred->group_info);
- i = cred->group_info->ngroups;
- if (gidsetsize) {
- if (i > gidsetsize) {
- i = -EINVAL;
- goto out;
- }
- if (groups16_to_user(grouplist, cred->group_info)) {
- i = -EFAULT;
- goto out;
- }
- }
-out:
- put_group_info(cred->group_info);
- return i;
-}
-
-COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplist)
-{
- struct group_info *group_info;
- int retval;
-
- if (!may_setgroups())
- return -EPERM;
- if ((unsigned)gidsetsize > NGROUPS_MAX)
- return -EINVAL;
-
- group_info = groups_alloc(gidsetsize);
- if (!group_info)
- return -ENOMEM;
- retval = groups16_from_user(group_info, grouplist);
- if (retval) {
- put_group_info(group_info);
- return retval;
- }
-
- groups_sort(group_info);
- retval = set_current_groups(group_info);
- put_group_info(group_info);
-
- return retval;
-}
-
-COMPAT_SYSCALL_DEFINE0(s390_getuid16)
-{
- return high2lowuid(from_kuid_munged(current_user_ns(), current_uid()));
-}
-
-COMPAT_SYSCALL_DEFINE0(s390_geteuid16)
-{
- return high2lowuid(from_kuid_munged(current_user_ns(), current_euid()));
-}
-
-COMPAT_SYSCALL_DEFINE0(s390_getgid16)
-{
- return high2lowgid(from_kgid_munged(current_user_ns(), current_gid()));
-}
-
-COMPAT_SYSCALL_DEFINE0(s390_getegid16)
-{
- return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
-}
-
#ifdef CONFIG_SYSVIPC
COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second,
compat_ulong_t, third, compat_uptr_t, ptr)
{
if (call >> 16) /* hack for backward compatibility */
return -EINVAL;
- return compat_sys_ipc(call, first, second, third, ptr, third);
+ return compat_ksys_ipc(call, first, second, third, ptr, third);
}
#endif
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
deleted file mode 100644
index 48c4ce668244..000000000000
--- a/arch/s390/kernel/compat_wrapper.c
+++ /dev/null
@@ -1,186 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Compat system call wrappers.
- *
- * Copyright IBM Corp. 2014
- */
-
-#include <linux/syscalls.h>
-#include <linux/compat.h>
-#include "entry.h"
-
-#define COMPAT_SYSCALL_WRAP1(name, ...) \
- COMPAT_SYSCALL_WRAPx(1, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_WRAP2(name, ...) \
- COMPAT_SYSCALL_WRAPx(2, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_WRAP3(name, ...) \
- COMPAT_SYSCALL_WRAPx(3, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_WRAP4(name, ...) \
- COMPAT_SYSCALL_WRAPx(4, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_WRAP5(name, ...) \
- COMPAT_SYSCALL_WRAPx(5, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_WRAP6(name, ...) \
- COMPAT_SYSCALL_WRAPx(6, _##name, __VA_ARGS__)
-
-#define __SC_COMPAT_TYPE(t, a) \
- __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a
-
-#define __SC_COMPAT_CAST(t, a) \
-({ \
- long __ReS = a; \
- \
- BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \
- !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t)); \
- if (__TYPE_IS_L(t)) \
- __ReS = (s32)a; \
- if (__TYPE_IS_UL(t)) \
- __ReS = (u32)a; \
- if (__TYPE_IS_PTR(t)) \
- __ReS = a & 0x7fffffff; \
- (t)__ReS; \
-})
-
-/*
- * The COMPAT_SYSCALL_WRAP macro generates system call wrappers to be used by
- * compat tasks. These wrappers will only be used for system calls where only
- * the system call arguments need sign or zero extension or zeroing of the upper
- * 33 bits of pointers.
- * Note: since the wrapper function will afterwards call a system call which
- * again performs zero and sign extension for all system call arguments with
- * a size of less than eight bytes, these compat wrappers only touch those
- * system call arguments with a size of eight bytes ((unsigned) long and
- * pointers). Zero and sign extension for e.g. int parameters will be done by
- * the regular system call wrappers.
- */
-#define COMPAT_SYSCALL_WRAPx(x, name, ...) \
-asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
-asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\
-asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \
-{ \
- return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \
-}
-
-COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode);
-COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname);
-COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname);
-COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename);
-COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev);
-COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode);
-COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name);
-COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode);
-COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname);
-COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode);
-COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname);
-COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes);
-COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk);
-COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler);
-COMPAT_SYSCALL_WRAP1(acct, const char __user *, name);
-COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags);
-COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename);
-COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask);
-COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len);
-COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new);
-COMPAT_SYSCALL_WRAP3(readlink, const char __user *, path, char __user *, buf, int, bufsiz);
-COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library);
-COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags);
-COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg);
-COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len);
-COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len);
-COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile);
-COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len);
-COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name);
-COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot);
-COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs);
-COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags);
-COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr);
-COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data);
-COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2);
-COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence);
-COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags);
-COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len);
-COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len);
-COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param);
-COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param);
-COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param);
-COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr);
-COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout);
-COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5);
-COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size);
-COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr);
-COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data);
-COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group);
-COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist);
-COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist);
-COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid);
-COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid);
-COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group);
-COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old);
-COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec);
-COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior);
-COMPAT_SYSCALL_WRAP5(setxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags);
-COMPAT_SYSCALL_WRAP5(lsetxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags);
-COMPAT_SYSCALL_WRAP5(fsetxattr, int, fd, const char __user *, name, const void __user *, value, size_t, size, int, flags);
-COMPAT_SYSCALL_WRAP3(getdents64, unsigned int, fd, struct linux_dirent64 __user *, dirent, unsigned int, count);
-COMPAT_SYSCALL_WRAP4(getxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size);
-COMPAT_SYSCALL_WRAP4(lgetxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size);
-COMPAT_SYSCALL_WRAP4(fgetxattr, int, fd, const char __user *, name, void __user *, value, size_t, size);
-COMPAT_SYSCALL_WRAP3(listxattr, const char __user *, path, char __user *, list, size_t, size);
-COMPAT_SYSCALL_WRAP3(llistxattr, const char __user *, path, char __user *, list, size_t, size);
-COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size);
-COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name);
-COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name);
-COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name);
-COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr);
-COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event);
-COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout);
-COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx);
-COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result);
-COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name);
-COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id);
-COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id);
-COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags);
-COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask);
-COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode);
-COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev);
-COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag);
-COMPAT_SYSCALL_WRAP3(unlinkat, int, dfd, const char __user *, pathname, int, flag);
-COMPAT_SYSCALL_WRAP4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname);
-COMPAT_SYSCALL_WRAP5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags);
-COMPAT_SYSCALL_WRAP3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname);
-COMPAT_SYSCALL_WRAP4(readlinkat, int, dfd, const char __user *, path, char __user *, buf, int, bufsiz);
-COMPAT_SYSCALL_WRAP3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode);
-COMPAT_SYSCALL_WRAP3(faccessat, int, dfd, const char __user *, filename, int, mode);
-COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags);
-COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
-COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags);
-COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache);
-COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags);
-COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags);
-COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls);
-COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim);
-COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag);
-COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2);
-COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags);
-COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
-COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags);
-COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags);
-COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, void __user *, uargs)
-COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
-COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
-COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
-COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
-COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
-COMPAT_SYSCALL_WRAP4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec);
-COMPAT_SYSCALL_WRAP3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen);
-COMPAT_SYSCALL_WRAP3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen);
-COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags);
-COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
-COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
-COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
-COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
-COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
-COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
-COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
-COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
-COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
-COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index d374f9b218b4..0ebf08c3b35e 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1056,12 +1056,6 @@ int debug_register_view(debug_info_t *id, struct debug_view *view)
mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
id, &debug_file_ops);
- if (!pde) {
- pr_err("Registering view %s/%s failed due to out of "
- "memory\n", id->name, view->name);
- rc = -1;
- goto out;
- }
spin_lock_irqsave(&id->lock, flags);
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!id->views[i])
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 53a5316cc4b7..7edaa733a77f 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -45,6 +45,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" },
[DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" },
[DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
+ [DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" },
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index af5c2b3f7065..d6edf45f93b9 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
- /* Running under KVM? If not we assume z/VM */
+ /* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
- else
+ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
@@ -164,8 +164,6 @@ static noinline __init void setup_lowcore_early(void)
static noinline __init void setup_facility_list(void)
{
- stfle(S390_lowcore.stfle_fac_list,
- ARRAY_SIZE(S390_lowcore.stfle_fac_list));
memcpy(S390_lowcore.alt_stfle_fac_list,
S390_lowcore.stfle_fac_list,
sizeof(S390_lowcore.alt_stfle_fac_list));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 39191a0feed1..583d65ef5007 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1512,7 +1512,7 @@ cleanup_critical:
.quad .Lsie_skip - .Lsie_entry
#endif
.section .rodata, "a"
-#define SYSCALL(esame,emu) .long esame
+#define SYSCALL(esame,emu) .long __s390x_ ## esame
.globl sys_call_table
sys_call_table:
#include "asm/syscall_table.h"
@@ -1520,7 +1520,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
-#define SYSCALL(esame,emu) .long emu
+#define SYSCALL(esame,emu) .long __s390_ ## emu
.globl sys_call_table_emu
sys_call_table_emu:
#include "asm/syscall_table.h"
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 57bba24b1c27..56491e636eab 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -27,8 +27,6 @@ ENTRY(startup_continue)
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
- # move IPL device to lowcore
larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
#
diff --git a/arch/s390/kernel/kdebugfs.c b/arch/s390/kernel/kdebugfs.c
index 2c46bd6c6fd2..33130c7daf55 100644
--- a/arch/s390/kernel/kdebugfs.c
+++ b/arch/s390/kernel/kdebugfs.c
@@ -9,8 +9,6 @@ EXPORT_SYMBOL(arch_debugfs_dir);
static int __init arch_kdebugfs_init(void)
{
arch_debugfs_dir = debugfs_create_dir("s390", NULL);
- if (IS_ERR(arch_debugfs_dir))
- arch_debugfs_dir = NULL;
return 0;
}
postcore_initcall(arch_kdebugfs_init);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index d5523adeddbf..e1c54d28713a 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -10,73 +10,11 @@
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
-#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
-#include <asm/ctl_reg.h>
-#include <asm/irq.h>
-#include <asm/cpu_mf.h>
-
-enum cpumf_ctr_set {
- CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
- CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
- CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */
- CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */
- CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */
-
- /* Maximum number of counter sets */
- CPUMF_CTR_SET_MAX,
-};
-
-#define CPUMF_LCCTL_ENABLE_SHIFT 16
-#define CPUMF_LCCTL_ACTCTL_SHIFT 0
-static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
- [CPUMF_CTR_SET_BASIC] = 0x02,
- [CPUMF_CTR_SET_USER] = 0x04,
- [CPUMF_CTR_SET_CRYPTO] = 0x08,
- [CPUMF_CTR_SET_EXT] = 0x01,
- [CPUMF_CTR_SET_MT_DIAG] = 0x20,
-};
-
-static void ctr_set_enable(u64 *state, int ctr_set)
-{
- *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
-}
-static void ctr_set_disable(u64 *state, int ctr_set)
-{
- *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
-}
-static void ctr_set_start(u64 *state, int ctr_set)
-{
- *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
-}
-static void ctr_set_stop(u64 *state, int ctr_set)
-{
- *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
-}
-
-/* Local CPUMF event structure */
-struct cpu_hw_events {
- struct cpumf_ctr_info info;
- atomic_t ctr_set[CPUMF_CTR_SET_MAX];
- u64 state, tx_state;
- unsigned int flags;
- unsigned int txn_flags;
-};
-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
- .ctr_set = {
- [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
- },
- .state = 0,
- .flags = 0,
- .txn_flags = 0,
-};
+#include <asm/cpu_mcf.h>
static enum cpumf_ctr_set get_counter_set(u64 event)
{
@@ -98,11 +36,11 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
static int validate_ctr_version(const struct hw_perf_event *hwc)
{
- struct cpu_hw_events *cpuhw;
+ struct cpu_cf_events *cpuhw;
int err = 0;
u16 mtdiag_ctl;
- cpuhw = &get_cpu_var(cpu_hw_events);
+ cpuhw = &get_cpu_var(cpu_cf_events);
/* check required version for counter sets */
switch (hwc->config_base) {
@@ -135,7 +73,7 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
* Thus, the counters can only be used if SMT is on and the
* counter set is enabled and active.
*/
- mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG];
+ mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
(cpuhw->info.enable_ctl & mtdiag_ctl) &&
(cpuhw->info.act_ctl & mtdiag_ctl)))
@@ -143,28 +81,28 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
break;
}
- put_cpu_var(cpu_hw_events);
+ put_cpu_var(cpu_cf_events);
return err;
}
static int validate_ctr_auth(const struct hw_perf_event *hwc)
{
- struct cpu_hw_events *cpuhw;
+ struct cpu_cf_events *cpuhw;
u64 ctrs_state;
int err = 0;
- cpuhw = &get_cpu_var(cpu_hw_events);
+ cpuhw = &get_cpu_var(cpu_cf_events);
/* Check authorization for cpu counter sets.
* If the particular CPU counter set is not authorized,
* return with -ENOENT in order to fall back to other
* PMUs that might suffice the event request.
*/
- ctrs_state = cpumf_state_ctl[hwc->config_base];
+ ctrs_state = cpumf_ctr_ctl[hwc->config_base];
if (!(ctrs_state & cpuhw->info.auth_ctl))
err = -ENOENT;
- put_cpu_var(cpu_hw_events);
+ put_cpu_var(cpu_cf_events);
return err;
}
@@ -175,7 +113,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
*/
static void cpumf_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
int err;
if (cpuhw->flags & PMU_F_ENABLED)
@@ -198,7 +136,7 @@ static void cpumf_pmu_enable(struct pmu *pmu)
*/
static void cpumf_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
int err;
u64 inactive;
@@ -222,86 +160,13 @@ static atomic_t num_events = ATOMIC_INIT(0);
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
-/* CPU-measurement alerts for the counter facility */
-static void cpumf_measurement_alert(struct ext_code ext_code,
- unsigned int alert, unsigned long unused)
-{
- struct cpu_hw_events *cpuhw;
-
- if (!(alert & CPU_MF_INT_CF_MASK))
- return;
-
- inc_irq_stat(IRQEXT_CMC);
- cpuhw = this_cpu_ptr(&cpu_hw_events);
-
- /* Measurement alerts are shared and might happen when the PMU
- * is not reserved. Ignore these alerts in this case. */
- if (!(cpuhw->flags & PMU_F_RESERVED))
- return;
-
- /* counter authorization change alert */
- if (alert & CPU_MF_INT_CF_CACA)
- qctri(&cpuhw->info);
-
- /* loss of counter data alert */
- if (alert & CPU_MF_INT_CF_LCDA)
- pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
-
- /* loss of MT counter data alert */
- if (alert & CPU_MF_INT_CF_MTDA)
- pr_warn("CPU[%i] MT counter data was lost\n",
- smp_processor_id());
-}
-
-#define PMC_INIT 0
-#define PMC_RELEASE 1
-static void setup_pmc_cpu(void *flags)
-{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
-
- switch (*((int *) flags)) {
- case PMC_INIT:
- memset(&cpuhw->info, 0, sizeof(cpuhw->info));
- qctri(&cpuhw->info);
- cpuhw->flags |= PMU_F_RESERVED;
- break;
-
- case PMC_RELEASE:
- cpuhw->flags &= ~PMU_F_RESERVED;
- break;
- }
-
- /* Disable CPU counter sets */
- lcctl(0);
-}
-
-/* Initialize the CPU-measurement facility */
-static int reserve_pmc_hardware(void)
-{
- int flags = PMC_INIT;
-
- on_each_cpu(setup_pmc_cpu, &flags, 1);
- irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-
- return 0;
-}
-
-/* Release the CPU-measurement facility */
-static void release_pmc_hardware(void)
-{
- int flags = PMC_RELEASE;
-
- on_each_cpu(setup_pmc_cpu, &flags, 1);
- irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-}
-
/* Release the PMU if event is the last perf event */
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
- release_pmc_hardware();
+ __kernel_cpumcf_end();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -332,7 +197,7 @@ static int __hw_perf_event_init(struct perf_event *event)
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
enum cpumf_ctr_set set;
- int err;
+ int err = 0;
u64 ev;
switch (attr->type) {
@@ -402,12 +267,14 @@ static int __hw_perf_event_init(struct perf_event *event)
/* Initialize for using the CPU-measurement counter facility */
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
+ if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
+ if (err)
+ return err;
event->destroy = hw_perf_event_destroy;
/* Finally, validate version and authorization of the counter set */
@@ -488,7 +355,7 @@ static void cpumf_pmu_read(struct perf_event *event)
static void cpumf_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
@@ -519,7 +386,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
static void cpumf_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -540,7 +407,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
static int cpumf_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
/* Check authorization for the counter set to which this
* counter belongs.
@@ -564,7 +431,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
static void cpumf_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
cpumf_pmu_stop(event, PERF_EF_UPDATE);
@@ -592,7 +459,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
*/
static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
@@ -612,7 +479,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
static void cpumf_pmu_cancel_txn(struct pmu *pmu)
{
unsigned int txn_flags;
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
@@ -633,7 +500,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu)
*/
static int cpumf_pmu_commit_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
u64 state;
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
@@ -671,54 +538,17 @@ static struct pmu cpumf_pmu = {
.cancel_txn = cpumf_pmu_cancel_txn,
};
-static int cpumf_pmf_setup(unsigned int cpu, int flags)
-{
- local_irq_disable();
- setup_pmc_cpu(&flags);
- local_irq_enable();
- return 0;
-}
-
-static int s390_pmu_online_cpu(unsigned int cpu)
-{
- return cpumf_pmf_setup(cpu, PMC_INIT);
-}
-
-static int s390_pmu_offline_cpu(unsigned int cpu)
-{
- return cpumf_pmf_setup(cpu, PMC_RELEASE);
-}
-
static int __init cpumf_pmu_init(void)
{
int rc;
- if (!cpum_cf_avail())
+ if (!kernel_cpumcf_avail())
return -ENODEV;
- /* clear bit 15 of cr0 to unauthorize problem-state to
- * extract measurement counters */
- ctl_clear_bit(0, 48);
-
- /* register handler for measurement-alert interruptions */
- rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
- cpumf_measurement_alert);
- if (rc) {
- pr_err("Registering for CPU-measurement alerts "
- "failed with rc=%i\n", rc);
- return rc;
- }
-
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
- if (rc) {
+ if (rc)
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
- unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
- cpumf_measurement_alert);
- return rc;
- }
- return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
- "perf/s390/cf:online",
- s390_pmu_online_cpu, s390_pmu_offline_cpu);
+ return rc;
}
-early_initcall(cpumf_pmu_init);
+subsys_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c
new file mode 100644
index 000000000000..3bced89caffb
--- /dev/null
+++ b/arch/s390/kernel/perf_cpum_cf_common.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CPU-Measurement Counter Facility Support - Common Layer
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ */
+#define KMSG_COMPONENT "cpum_cf_common"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <asm/ctl_reg.h>
+#include <asm/irq.h>
+#include <asm/cpu_mcf.h>
+
+/* Per-CPU event structure for the counter facility */
+DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
+ .ctr_set = {
+ [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
+ [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
+ [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
+ [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
+ [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
+ },
+ .alert = ATOMIC64_INIT(0),
+ .state = 0,
+ .flags = 0,
+ .txn_flags = 0,
+};
+/* Indicator whether the CPU-Measurement Counter Facility Support is ready */
+static bool cpum_cf_initalized;
+
+/* CPU-measurement alerts for the counter facility */
+static void cpumf_measurement_alert(struct ext_code ext_code,
+ unsigned int alert, unsigned long unused)
+{
+ struct cpu_cf_events *cpuhw;
+
+ if (!(alert & CPU_MF_INT_CF_MASK))
+ return;
+
+ inc_irq_stat(IRQEXT_CMC);
+ cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ /* Measurement alerts are shared and might happen when the PMU
+ * is not reserved. Ignore these alerts in this case. */
+ if (!(cpuhw->flags & PMU_F_RESERVED))
+ return;
+
+ /* counter authorization change alert */
+ if (alert & CPU_MF_INT_CF_CACA)
+ qctri(&cpuhw->info);
+
+ /* loss of counter data alert */
+ if (alert & CPU_MF_INT_CF_LCDA)
+ pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
+
+ /* loss of MT counter data alert */
+ if (alert & CPU_MF_INT_CF_MTDA)
+ pr_warn("CPU[%i] MT counter data was lost\n",
+ smp_processor_id());
+
+ /* store alert for special handling by in-kernel users */
+ atomic64_or(alert, &cpuhw->alert);
+}
+
+#define PMC_INIT 0
+#define PMC_RELEASE 1
+static void cpum_cf_setup_cpu(void *flags)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ switch (*((int *) flags)) {
+ case PMC_INIT:
+ memset(&cpuhw->info, 0, sizeof(cpuhw->info));
+ qctri(&cpuhw->info);
+ cpuhw->flags |= PMU_F_RESERVED;
+ break;
+
+ case PMC_RELEASE:
+ cpuhw->flags &= ~PMU_F_RESERVED;
+ break;
+ }
+
+ /* Disable CPU counter sets */
+ lcctl(0);
+}
+
+bool kernel_cpumcf_avail(void)
+{
+ return cpum_cf_initalized;
+}
+EXPORT_SYMBOL(kernel_cpumcf_avail);
+
+
+/* Reserve/release functions for sharing perf hardware */
+static DEFINE_SPINLOCK(cpumcf_owner_lock);
+static void *cpumcf_owner;
+
+/* Initialize the CPU-measurement counter facility */
+int __kernel_cpumcf_begin(void)
+{
+ int flags = PMC_INIT;
+ int err = 0;
+
+ spin_lock(&cpumcf_owner_lock);
+ if (cpumcf_owner)
+ err = -EBUSY;
+ else
+ cpumcf_owner = __builtin_return_address(0);
+ spin_unlock(&cpumcf_owner_lock);
+ if (err)
+ return err;
+
+ on_each_cpu(cpum_cf_setup_cpu, &flags, 1);
+ irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+
+ return 0;
+}
+EXPORT_SYMBOL(__kernel_cpumcf_begin);
+
+/* Obtain the CPU-measurement alerts for the counter facility */
+unsigned long kernel_cpumcf_alert(int clear)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ unsigned long alert;
+
+ alert = atomic64_read(&cpuhw->alert);
+ if (clear)
+ atomic64_set(&cpuhw->alert, 0);
+
+ return alert;
+}
+EXPORT_SYMBOL(kernel_cpumcf_alert);
+
+/* Release the CPU-measurement counter facility */
+void __kernel_cpumcf_end(void)
+{
+ int flags = PMC_RELEASE;
+
+ on_each_cpu(cpum_cf_setup_cpu, &flags, 1);
+ irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+
+ spin_lock(&cpumcf_owner_lock);
+ cpumcf_owner = NULL;
+ spin_unlock(&cpumcf_owner_lock);
+}
+EXPORT_SYMBOL(__kernel_cpumcf_end);
+
+static int cpum_cf_setup(unsigned int cpu, int flags)
+{
+ local_irq_disable();
+ cpum_cf_setup_cpu(&flags);
+ local_irq_enable();
+ return 0;
+}
+
+static int cpum_cf_online_cpu(unsigned int cpu)
+{
+ return cpum_cf_setup(cpu, PMC_INIT);
+}
+
+static int cpum_cf_offline_cpu(unsigned int cpu)
+{
+ return cpum_cf_setup(cpu, PMC_RELEASE);
+}
+
+static int __init cpum_cf_init(void)
+{
+ int rc;
+
+ if (!cpum_cf_avail())
+ return -ENODEV;
+
+ /* clear bit 15 of cr0 to unauthorize problem-state to
+ * extract measurement counters */
+ ctl_clear_bit(0, 48);
+
+ /* register handler for measurement-alert interruptions */
+ rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
+ cpumf_measurement_alert);
+ if (rc) {
+ pr_err("Registering for CPU-measurement alerts "
+ "failed with rc=%i\n", rc);
+ return rc;
+ }
+
+ rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
+ "perf/s390/cf:online",
+ cpum_cf_online_cpu, cpum_cf_offline_cpu);
+ if (!rc)
+ cpum_cf_initalized = true;
+
+ return rc;
+}
+early_initcall(cpum_cf_init);
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
new file mode 100644
index 000000000000..c6fad208c2fa
--- /dev/null
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Performance event support for s390x - CPU-measurement Counter Sets
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ * Thomas Richer <tmricht@linux.ibm.com>
+ */
+#define KMSG_COMPONENT "cpum_cf_diag"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/processor.h>
+
+#include <asm/ctl_reg.h>
+#include <asm/irq.h>
+#include <asm/cpu_mcf.h>
+#include <asm/timex.h>
+#include <asm/debug.h>
+
+#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
+
+static unsigned int cf_diag_cpu_speed;
+static debug_info_t *cf_diag_dbg;
+
+struct cf_diag_csd { /* Counter set data per CPU */
+ size_t used; /* Bytes used in data/start */
+ unsigned char start[PAGE_SIZE]; /* Counter set at event start */
+ unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
+};
+DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
+
+/* Counter sets are stored as data stream in a page sized memory buffer and
+ * exported to user space via raw data attached to the event sample data.
+ * Each counter set starts with an eight byte header consisting of:
+ * - a two byte eye catcher (0xfeef)
+ * - a one byte counter set number
+ * - a two byte counter set size (indicates the number of counters in this set)
+ * - a three byte reserved value (must be zero) to make the header the same
+ * size as a counter value.
+ * All counter values are eight byte in size.
+ *
+ * All counter sets are followed by a 64 byte trailer.
+ * The trailer consists of a:
+ * - flag field indicating valid fields when corresponding bit set
+ * - the counter facility first and second version number
+ * - the CPU speed if nonzero
+ * - the time stamp the counter sets have been collected
+ * - the time of day (TOD) base value
+ * - the machine type.
+ *
+ * The counter sets are saved when the process is prepared to be executed on a
+ * CPU and saved again when the process is going to be removed from a CPU.
+ * The difference of both counter sets are calculated and stored in the event
+ * sample data area.
+ */
+
+struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int set:16; /* 16-31 Counter set identifier */
+ unsigned int ctr:16; /* 32-47 Number of stored counters */
+ unsigned int res1:16; /* 48-63 Reserved */
+};
+
+struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
+ /* 0 - 7 */
+ union {
+ struct {
+ unsigned int clock_base:1; /* TOD clock base set */
+ unsigned int speed:1; /* CPU speed set */
+ /* Measurement alerts */
+ unsigned int mtda:1; /* Loss of MT ctr. data alert */
+ unsigned int caca:1; /* Counter auth. change alert */
+ unsigned int lcda:1; /* Loss of counter data alert */
+ };
+ unsigned long flags; /* 0-63 All indicators */
+ };
+ /* 8 - 15 */
+ unsigned int cfvn:16; /* 64-79 Ctr First Version */
+ unsigned int csvn:16; /* 80-95 Ctr Second Version */
+ unsigned int cpu_speed:32; /* 96-127 CPU speed */
+ /* 16 - 23 */
+ unsigned long timestamp; /* 128-191 Timestamp (TOD) */
+ /* 24 - 55 */
+ union {
+ struct {
+ unsigned long progusage1;
+ unsigned long progusage2;
+ unsigned long progusage3;
+ unsigned long tod_base;
+ };
+ unsigned long progusage[4];
+ };
+ /* 56 - 63 */
+ unsigned int mach_type:16; /* Machine type */
+ unsigned int res1:16; /* Reserved */
+ unsigned int res2:32; /* Reserved */
+};
+
+/* Create the trailer data at the end of a page. */
+static void cf_diag_trailer(struct cf_trailer_entry *te)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cpuid cpuid;
+
+ te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
+ te->csvn = cpuhw->info.csvn;
+
+ get_cpu_id(&cpuid); /* Machine type */
+ te->mach_type = cpuid.machine;
+ te->cpu_speed = cf_diag_cpu_speed;
+ if (te->cpu_speed)
+ te->speed = 1;
+ te->clock_base = 1; /* Save clock base */
+ memcpy(&te->tod_base, &tod_clock_base[1], 8);
+ store_tod_clock((__u64 *)&te->timestamp);
+}
+
+/*
+ * Change the CPUMF state to active.
+ * Enable and activate the CPU-counter sets according
+ * to the per-cpu control state.
+ */
+static void cf_diag_enable(struct pmu *pmu)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ int err;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s pmu %p cpu %d flags %#x state %#llx\n",
+ __func__, pmu, smp_processor_id(), cpuhw->flags,
+ cpuhw->state);
+ if (cpuhw->flags & PMU_F_ENABLED)
+ return;
+
+ err = lcctl(cpuhw->state);
+ if (err) {
+ pr_err("Enabling the performance measuring unit "
+ "failed with rc=%x\n", err);
+ return;
+ }
+ cpuhw->flags |= PMU_F_ENABLED;
+}
+
+/*
+ * Change the CPUMF state to inactive.
+ * Disable and enable (inactive) the CPU-counter sets according
+ * to the per-cpu control state.
+ */
+static void cf_diag_disable(struct pmu *pmu)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ u64 inactive;
+ int err;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s pmu %p cpu %d flags %#x state %#llx\n",
+ __func__, pmu, smp_processor_id(), cpuhw->flags,
+ cpuhw->state);
+ if (!(cpuhw->flags & PMU_F_ENABLED))
+ return;
+
+ inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
+ err = lcctl(inactive);
+ if (err) {
+ pr_err("Disabling the performance measuring unit "
+ "failed with rc=%x\n", err);
+ return;
+ }
+ cpuhw->flags &= ~PMU_F_ENABLED;
+}
+
+/* Number of perf events counting hardware events */
+static atomic_t cf_diag_events = ATOMIC_INIT(0);
+
+/* Release the PMU if event is the last perf event */
+static void cf_diag_perf_event_destroy(struct perf_event *event)
+{
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s event %p cpu %d cf_diag_events %d\n",
+ __func__, event, event->cpu,
+ atomic_read(&cf_diag_events));
+ if (atomic_dec_return(&cf_diag_events) == 0)
+ __kernel_cpumcf_end();
+}
+
+/* Setup the event. Test for authorized counter sets and only include counter
+ * sets which are authorized at the time of the setup. Including unauthorized
+ * counter sets result in specification exception (and panic).
+ */
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct perf_event_attr *attr = &event->attr;
+ enum cpumf_ctr_set i;
+ int err = 0;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s event %p cpu %d authorized %#x\n", __func__,
+ event, event->cpu, cpuhw->info.auth_ctl);
+
+ event->hw.config = attr->config;
+ event->hw.config_base = 0;
+ local64_set(&event->count, 0);
+
+ /* Add all authorized counter sets to config_base */
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
+ if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
+ event->hw.config_base |= cpumf_ctr_ctl[i];
+
+ /* No authorized counter sets, nothing to count/sample */
+ if (!event->hw.config_base) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Set sample_period to indicate sampling */
+ event->hw.sample_period = attr->sample_period;
+ local64_set(&event->hw.period_left, event->hw.sample_period);
+ event->hw.last_period = event->hw.sample_period;
+out:
+ debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n",
+ __func__, err, event->hw.config_base);
+ return err;
+}
+
+static int cf_diag_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ int err = -ENOENT;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s event %p cpu %d config %#llx "
+ "sample_type %#llx cf_diag_events %d\n", __func__,
+ event, event->cpu, attr->config, attr->sample_type,
+ atomic_read(&cf_diag_events));
+
+ if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
+ event->attr.type != PERF_TYPE_RAW)
+ goto out;
+
+ /* Raw events are used to access counters directly,
+ * hence do not permit excludes.
+ * This event is usesless without PERF_SAMPLE_RAW to return counter set
+ * values as raw data.
+ */
+ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
+ !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Initialize for using the CPU-measurement counter facility */
+ if (atomic_inc_return(&cf_diag_events) == 1) {
+ if (__kernel_cpumcf_begin()) {
+ atomic_dec(&cf_diag_events);
+ err = -EBUSY;
+ goto out;
+ }
+ }
+ event->destroy = cf_diag_perf_event_destroy;
+
+ err = __hw_perf_event_init(event);
+ if (unlikely(err))
+ event->destroy(event);
+out:
+ debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
+ return err;
+}
+
+static void cf_diag_read(struct perf_event *event)
+{
+ debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event);
+}
+
+/* Return the maximum possible counter set size (in number of 8 byte counters)
+ * depending on type and model number.
+ */
+static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
+ struct cpumf_ctr_info *info)
+{
+ size_t ctrset_size = 0;
+
+ switch (ctrset) {
+ case CPUMF_CTR_SET_BASIC:
+ if (info->cfvn >= 1)
+ ctrset_size = 6;
+ break;
+ case CPUMF_CTR_SET_USER:
+ if (info->cfvn == 1)
+ ctrset_size = 6;
+ else if (info->cfvn >= 3)
+ ctrset_size = 2;
+ break;
+ case CPUMF_CTR_SET_CRYPTO:
+ ctrset_size = 16;
+ break;
+ case CPUMF_CTR_SET_EXT:
+ if (info->csvn == 1)
+ ctrset_size = 32;
+ else if (info->csvn == 2)
+ ctrset_size = 48;
+ else if (info->csvn >= 3)
+ ctrset_size = 128;
+ break;
+ case CPUMF_CTR_SET_MT_DIAG:
+ if (info->csvn > 3)
+ ctrset_size = 48;
+ break;
+ case CPUMF_CTR_SET_MAX:
+ break;
+ }
+
+ return ctrset_size;
+}
+
+/* Calculate memory needed to store all counter sets together with header and
+ * trailer data. This is independend of the counter set authorization which
+ * can vary depending on the configuration.
+ */
+static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info)
+{
+ size_t max_size = sizeof(struct cf_trailer_entry);
+ enum cpumf_ctr_set i;
+
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ size_t size = cf_diag_ctrset_size(i, info);
+
+ if (size)
+ max_size += size * sizeof(u64) +
+ sizeof(struct cf_ctrset_entry);
+ }
+ debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__,
+ max_size);
+
+ return max_size;
+}
+
+/* Read a counter set. The counter set number determines which counter set and
+ * the CPUM-CF first and second version number determine the number of
+ * available counters in this counter set.
+ * Each counter set starts with header containing the counter set number and
+ * the number of 8 byte counters.
+ *
+ * The functions returns the number of bytes occupied by this counter set
+ * including the header.
+ * If there is no counter in the counter set, this counter set is useless and
+ * zero is returned on this case.
+ */
+static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
+ size_t room)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ size_t ctrset_size, need = 0;
+ int rc = 3; /* Assume write failure */
+
+ ctrdata->def = CF_DIAG_CTRSET_DEF;
+ ctrdata->set = ctrset;
+ ctrdata->res1 = 0;
+ ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info);
+
+ if (ctrset_size) { /* Save data */
+ need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
+ if (need <= room)
+ rc = ctr_stcctm(ctrset, ctrset_size,
+ (u64 *)(ctrdata + 1));
+ if (rc != 3)
+ ctrdata->ctr = ctrset_size;
+ else
+ need = 0;
+ }
+
+ debug_sprintf_event(cf_diag_dbg, 6,
+ "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
+ " need %zd rc:%d\n",
+ __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
+ cpuhw->info.csvn, need, rc);
+ return need;
+}
+
+/* Read out all counter sets and save them in the provided data buffer.
+ * The last 64 byte host an artificial trailer entry.
+ */
+static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth)
+{
+ struct cf_trailer_entry *trailer;
+ size_t offset = 0, done;
+ int i;
+
+ memset(data, 0, sz);
+ sz -= sizeof(*trailer); /* Always room for trailer */
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ struct cf_ctrset_entry *ctrdata = data + offset;
+
+ if (!(auth & cpumf_ctr_ctl[i]))
+ continue; /* Counter set not authorized */
+
+ done = cf_diag_getctrset(ctrdata, i, sz - offset);
+ offset += done;
+ debug_sprintf_event(cf_diag_dbg, 6,
+ "%s ctrset %d offset %zu done %zu\n",
+ __func__, i, offset, done);
+ }
+ trailer = data + offset;
+ cf_diag_trailer(trailer);
+ return offset + sizeof(*trailer);
+}
+
+/* Calculate the difference for each counter in a counter set. */
+static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters)
+{
+ for (; --counters >= 0; ++pstart, ++pstop)
+ if (*pstop >= *pstart)
+ *pstop -= *pstart;
+ else
+ *pstop = *pstart - *pstop;
+}
+
+/* Scan the counter sets and calculate the difference of each counter
+ * in each set. The result is the increment of each counter during the
+ * period the counter set has been activated.
+ *
+ * Return true on success.
+ */
+static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth)
+{
+ struct cf_trailer_entry *trailer_start, *trailer_stop;
+ struct cf_ctrset_entry *ctrstart, *ctrstop;
+ size_t offset = 0;
+
+ auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
+ do {
+ ctrstart = (struct cf_ctrset_entry *)(csd->start + offset);
+ ctrstop = (struct cf_ctrset_entry *)(csd->data + offset);
+
+ if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
+ pr_err("cpum_cf_diag counter set compare error "
+ "in set %i\n", ctrstart->set);
+ return 0;
+ }
+ auth &= ~cpumf_ctr_ctl[ctrstart->set];
+ if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
+ cf_diag_diffctrset((u64 *)(ctrstart + 1),
+ (u64 *)(ctrstop + 1), ctrstart->ctr);
+ offset += ctrstart->ctr * sizeof(u64) +
+ sizeof(*ctrstart);
+ }
+ debug_sprintf_event(cf_diag_dbg, 6,
+ "%s set %d ctr %d offset %zu auth %lx\n",
+ __func__, ctrstart->set, ctrstart->ctr,
+ offset, auth);
+ } while (ctrstart->def && auth);
+
+ /* Save time_stamp from start of event in stop's trailer */
+ trailer_start = (struct cf_trailer_entry *)(csd->start + offset);
+ trailer_stop = (struct cf_trailer_entry *)(csd->data + offset);
+ trailer_stop->progusage[0] = trailer_start->timestamp;
+
+ return 1;
+}
+
+/* Create perf event sample with the counter sets as raw data. The sample
+ * is then pushed to the event subsystem and the function checks for
+ * possible event overflows. If an event overflow occurs, the PMU is
+ * stopped.
+ *
+ * Return non-zero if an event overflow occurred.
+ */
+static int cf_diag_push_sample(struct perf_event *event,
+ struct cf_diag_csd *csd)
+{
+ struct perf_sample_data data;
+ struct perf_raw_record raw;
+ struct pt_regs regs;
+ int overflow;
+
+ /* Setup perf sample */
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+ memset(&regs, 0, sizeof(regs));
+ memset(&raw, 0, sizeof(raw));
+
+ if (event->attr.sample_type & PERF_SAMPLE_CPU)
+ data.cpu_entry.cpu = event->cpu;
+ if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ raw.frag.size = csd->used;
+ raw.frag.data = csd->data;
+ raw.size = csd->used;
+ data.raw = &raw;
+ }
+
+ overflow = perf_event_overflow(event, &data, &regs);
+ debug_sprintf_event(cf_diag_dbg, 6,
+ "%s event %p cpu %d sample_type %#llx raw %d "
+ "ov %d\n", __func__, event, event->cpu,
+ event->attr.sample_type, raw.size, overflow);
+ if (overflow)
+ event->pmu->stop(event, 0);
+
+ perf_event_update_userpage(event);
+ return overflow;
+}
+
+static void cf_diag_start(struct perf_event *event, int flags)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
+ struct hw_perf_event *hwc = &event->hw;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s event %p cpu %d flags %#x hwc-state %#x\n",
+ __func__, event, event->cpu, flags, hwc->state);
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ /* (Re-)enable and activate all counter sets */
+ lcctl(0); /* Reset counter sets */
+ hwc->state = 0;
+ ctr_set_multiple_enable(&cpuhw->state, hwc->config_base);
+ lcctl(cpuhw->state); /* Enable counter sets */
+ csd->used = cf_diag_getctr(csd->start, sizeof(csd->start),
+ event->hw.config_base);
+ ctr_set_multiple_start(&cpuhw->state, hwc->config_base);
+ /* Function cf_diag_enable() starts the counter sets. */
+}
+
+static void cf_diag_stop(struct perf_event *event, int flags)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
+ struct hw_perf_event *hwc = &event->hw;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s event %p cpu %d flags %#x hwc-state %#x\n",
+ __func__, event, event->cpu, flags, hwc->state);
+
+ /* Deactivate all counter sets */
+ ctr_set_multiple_stop(&cpuhw->state, hwc->config_base);
+ local64_inc(&event->count);
+ csd->used = cf_diag_getctr(csd->data, sizeof(csd->data),
+ event->hw.config_base);
+ if (cf_diag_diffctr(csd, event->hw.config_base))
+ cf_diag_push_sample(event, csd);
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static int cf_diag_add(struct perf_event *event, int flags)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ int err = 0;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s event %p cpu %d flags %#x cpuhw:%p\n",
+ __func__, event, event->cpu, flags, cpuhw);
+
+ if (cpuhw->flags & PMU_F_IN_USE) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ cpuhw->flags |= PMU_F_IN_USE;
+ if (flags & PERF_EF_START)
+ cf_diag_start(event, PERF_EF_RELOAD);
+out:
+ debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
+ return err;
+}
+
+static void cf_diag_del(struct perf_event *event, int flags)
+{
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+ "%s event %p cpu %d flags %#x\n",
+ __func__, event, event->cpu, flags);
+
+ cf_diag_stop(event, PERF_EF_UPDATE);
+ ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base);
+ ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base);
+ cpuhw->flags &= ~PMU_F_IN_USE;
+}
+
+CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
+
+static struct attribute *cf_diag_events_attr[] = {
+ CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
+ NULL,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-63");
+
+static struct attribute *cf_diag_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group cf_diag_events_group = {
+ .name = "events",
+ .attrs = cf_diag_events_attr,
+};
+static struct attribute_group cf_diag_format_group = {
+ .name = "format",
+ .attrs = cf_diag_format_attr,
+};
+static const struct attribute_group *cf_diag_attr_groups[] = {
+ &cf_diag_events_group,
+ &cf_diag_format_group,
+ NULL,
+};
+
+/* Performance monitoring unit for s390x */
+static struct pmu cf_diag = {
+ .task_ctx_nr = perf_sw_context,
+ .pmu_enable = cf_diag_enable,
+ .pmu_disable = cf_diag_disable,
+ .event_init = cf_diag_event_init,
+ .add = cf_diag_add,
+ .del = cf_diag_del,
+ .start = cf_diag_start,
+ .stop = cf_diag_stop,
+ .read = cf_diag_read,
+
+ .attr_groups = cf_diag_attr_groups
+};
+
+/* Get the CPU speed, try sampling facility first and CPU attributes second. */
+static void cf_diag_get_cpu_speed(void)
+{
+ if (cpum_sf_avail()) { /* Sampling facility first */
+ struct hws_qsi_info_block si;
+
+ memset(&si, 0, sizeof(si));
+ if (!qsi(&si)) {
+ cf_diag_cpu_speed = si.cpu_speed;
+ return;
+ }
+ }
+
+ if (test_facility(34)) { /* CPU speed extract static part */
+ unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
+
+ if (mhz != -1UL)
+ cf_diag_cpu_speed = mhz & 0xffffffff;
+ }
+}
+
+/* Initialize the counter set PMU to generate complete counter set data as
+ * event raw data. This relies on the CPU Measurement Counter Facility device
+ * already being loaded and initialized.
+ */
+static int __init cf_diag_init(void)
+{
+ struct cpumf_ctr_info info;
+ size_t need;
+ int rc;
+
+ if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info))
+ return -ENODEV;
+ cf_diag_get_cpu_speed();
+
+ /* Make sure the counter set data fits into predefined buffer. */
+ need = cf_diag_ctrset_maxsize(&info);
+ if (need > sizeof(((struct cf_diag_csd *)0)->start)) {
+ pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
+ need);
+ return -ENOMEM;
+ }
+
+ /* Setup s390dbf facility */
+ cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
+ if (!cf_diag_dbg) {
+ pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
+ return -ENOMEM;
+ }
+ debug_register_view(cf_diag_dbg, &debug_sprintf_view);
+
+ rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW);
+ if (rc) {
+ debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
+ debug_unregister(cf_diag_dbg);
+ pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
+ rc);
+ }
+ return rc;
+}
+arch_initcall(cf_diag_init);
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index d63fb3c56b8a..b45238c89728 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <asm/cpu_mf.h>
/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index bfabeb1889cc..1266194afb02 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb)
/*
* aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
- * @cpu: On which to allocate, -1 means current
+ * @event: Event the buffer is setup for, event->cpu == -1 means current
* @pages: Array of pointers to buffer pages passed from perf core
* @nr_pages: Total pages
* @snapshot: Flag for snapshot mode
@@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb)
*
* Return the private AUX buffer structure if success or NULL if fails.
*/
-static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
- bool snapshot)
+static void *aux_buffer_setup(struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
{
struct sf_buffer *sfb;
struct aux_buffer *aux;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 72dd23ef771b..12934e8fbb91 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -369,7 +369,7 @@ void __init arch_call_rest_init(void)
: : [_frame] "a" (frame));
}
-static void __init setup_lowcore(void)
+static void __init setup_lowcore_dat_off(void)
{
struct lowcore *lc;
@@ -380,19 +380,16 @@ static void __init setup_lowcore(void)
lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
- lc->external_new_psw.mask = PSW_KERNEL_BITS |
- PSW_MASK_DAT | PSW_MASK_MCHECK;
+ lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->external_new_psw.addr = (unsigned long) ext_int_handler;
lc->svc_new_psw.mask = PSW_KERNEL_BITS |
- PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = (unsigned long) system_call;
- lc->program_new_psw.mask = PSW_KERNEL_BITS |
- PSW_MASK_DAT | PSW_MASK_MCHECK;
+ lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
- lc->io_new_psw.mask = PSW_KERNEL_BITS |
- PSW_MASK_DAT | PSW_MASK_MCHECK;
+ lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max;
lc->nodat_stack = ((unsigned long) &init_thread_union)
@@ -452,6 +449,16 @@ static void __init setup_lowcore(void)
lowcore_ptr[0] = lc;
}
+static void __init setup_lowcore_dat_on(void)
+{
+ __ctl_clear_bit(0, 28);
+ S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
+ S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
+ S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
+ S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
+ __ctl_set_bit(0, 28);
+}
+
static struct resource code_resource = {
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
@@ -794,18 +801,9 @@ static void __init reserve_kernel(void)
{
unsigned long start_pfn = PFN_UP(__pa(_end));
-#ifdef CONFIG_DMA_API_DEBUG
- /*
- * DMA_API_DEBUG code stumbles over addresses from the
- * range [PARMAREA_END, _stext]. Mark the memory as reserved
- * so it is not used for CONFIG_DMA_API_DEBUG=y.
- */
- memblock_reserve(0, PFN_PHYS(start_pfn));
-#else
memblock_reserve(0, PARMAREA_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
-#endif
}
static void __init setup_memory(void)
@@ -990,6 +988,25 @@ static void __init setup_task_size(void)
}
/*
+ * Issue diagnose 318 to set the control program name and
+ * version codes.
+ */
+static void __init setup_control_program_code(void)
+{
+ union diag318_info diag318_info = {
+ .cpnc = CPNC_LINUX,
+ .cpvc_linux = 0,
+ .cpvc_distro = {0},
+ };
+
+ if (!sclp.has_diag318)
+ return;
+
+ diag_stat_inc(DIAG_STAT_X318);
+ asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
+}
+
+/*
* Setup function called from init/main.c just after the banner
* was printed.
*/
@@ -1006,6 +1023,8 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
+ else
+ pr_info("Linux is running as a guest in 64-bit mode\n");
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
@@ -1031,6 +1050,7 @@ void __init setup_arch(char **cmdline_p)
os_info_init();
setup_ipl();
setup_task_size();
+ setup_control_program_code();
/* Do some memory reservations *before* memory is added to memblock */
reserve_memory_end();
@@ -1070,7 +1090,7 @@ void __init setup_arch(char **cmdline_p)
#endif
setup_resources();
- setup_lowcore();
+ setup_lowcore_dat_off();
smp_fill_possible_mask();
cpu_detect_mhz_feature();
cpu_init();
@@ -1083,6 +1103,12 @@ void __init setup_arch(char **cmdline_p)
*/
paging_init();
+ /*
+ * After paging_init created the kernel page table, the new PSWs
+ * in lowcore can now run with DAT enabled.
+ */
+ setup_lowcore_dat_on();
+
/* Setup default console */
conmode_default();
set_preferred_console();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f82b3d3c36e2..b198ece2aad6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
+ struct lowcore *lc = pcpu_devices->lowcore;
+
+ if (pcpu_devices[0].address == stap())
+ lc = &S390_lowcore;
+
pcpu_delegate(&pcpu_devices[0], func, data,
- pcpu_devices->lowcore->nodat_stack);
+ lc->nodat_stack);
}
int smp_find_processor_id(u16 address)
@@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
{
int rc;
+ rc = lock_device_hotplug_sysfs();
+ if (rc)
+ return rc;
rc = smp_rescan_cpus();
+ unlock_device_hotplug();
return rc ? rc : count;
}
static DEVICE_ATTR_WO(rescan);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 537f97fde37f..993100c31d65 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -30,10 +30,10 @@
.section .text
ENTRY(swsusp_arch_suspend)
lg %r1,__LC_NODAT_STACK
- aghi %r1,-STACK_FRAME_OVERHEAD
stmg %r6,%r15,__SF_GPRS(%r1)
+ aghi %r1,-STACK_FRAME_OVERHEAD
stg %r15,__SF_BACKCHAIN(%r1)
- lgr %r1,%r15
+ lgr %r15,%r1
/* Store FPU registers */
brasl %r14,save_fpu_regs
@@ -124,13 +124,13 @@ ENTRY(swsusp_arch_resume)
lghi %r2,1
brasl %r14,arch_set_page_states
- /* Deactivate DAT */
- stnsm __SF_EMPTY(%r15),0xfb
-
/* Set prefix page to zero */
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
/* Restore saved image */
larl %r1,restore_pblist
lg %r1,0(%r1)
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 31cefe0c28c0..202fa73ac167 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -58,6 +58,7 @@ out:
return error;
}
+#ifdef CONFIG_SYSVIPC
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.
*/
@@ -74,19 +75,28 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
* Therefore we can call the generic variant by simply passing the
* third parameter also as fifth parameter.
*/
- return sys_ipc(call, first, second, third, ptr, third);
+ return ksys_ipc(call, first, second, third, ptr, third);
}
+#endif /* CONFIG_SYSVIPC */
SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
- unsigned int ret;
+ unsigned int ret = current->personality;
if (personality(current->personality) == PER_LINUX32 &&
personality(personality) == PER_LINUX)
personality |= PER_LINUX32;
- ret = sys_personality(personality);
+
+ if (personality != 0xffffffff)
+ set_personality(personality);
+
if (personality(ret) == PER_LINUX32)
ret &= ~PER_LINUX32;
return ret;
}
+
+SYSCALL_DEFINE0(ni_syscall)
+{
+ return -ENOSYS;
+}
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 022fc099b628..02579f95f391 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -15,86 +15,86 @@
5 common open sys_open compat_sys_open
6 common close sys_close sys_close
7 common restart_syscall sys_restart_syscall sys_restart_syscall
-8 common creat sys_creat compat_sys_creat
-9 common link sys_link compat_sys_link
-10 common unlink sys_unlink compat_sys_unlink
+8 common creat sys_creat sys_creat
+9 common link sys_link sys_link
+10 common unlink sys_unlink sys_unlink
11 common execve sys_execve compat_sys_execve
-12 common chdir sys_chdir compat_sys_chdir
-13 32 time - compat_sys_time
-14 common mknod sys_mknod compat_sys_mknod
-15 common chmod sys_chmod compat_sys_chmod
-16 32 lchown - compat_sys_s390_lchown16
+12 common chdir sys_chdir sys_chdir
+13 32 time - sys_time32
+14 common mknod sys_mknod sys_mknod
+15 common chmod sys_chmod sys_chmod
+16 32 lchown - sys_lchown16
19 common lseek sys_lseek compat_sys_lseek
20 common getpid sys_getpid sys_getpid
21 common mount sys_mount compat_sys_mount
-22 common umount sys_oldumount compat_sys_oldumount
-23 32 setuid - compat_sys_s390_setuid16
-24 32 getuid - compat_sys_s390_getuid16
-25 32 stime - compat_sys_stime
+22 common umount sys_oldumount sys_oldumount
+23 32 setuid - sys_setuid16
+24 32 getuid - sys_getuid16
+25 32 stime - sys_stime32
26 common ptrace sys_ptrace compat_sys_ptrace
27 common alarm sys_alarm sys_alarm
29 common pause sys_pause sys_pause
-30 common utime sys_utime compat_sys_utime
-33 common access sys_access compat_sys_access
+30 common utime sys_utime sys_utime32
+33 common access sys_access sys_access
34 common nice sys_nice sys_nice
36 common sync sys_sync sys_sync
37 common kill sys_kill sys_kill
-38 common rename sys_rename compat_sys_rename
-39 common mkdir sys_mkdir compat_sys_mkdir
-40 common rmdir sys_rmdir compat_sys_rmdir
+38 common rename sys_rename sys_rename
+39 common mkdir sys_mkdir sys_mkdir
+40 common rmdir sys_rmdir sys_rmdir
41 common dup sys_dup sys_dup
-42 common pipe sys_pipe compat_sys_pipe
+42 common pipe sys_pipe sys_pipe
43 common times sys_times compat_sys_times
-45 common brk sys_brk compat_sys_brk
-46 32 setgid - compat_sys_s390_setgid16
-47 32 getgid - compat_sys_s390_getgid16
-48 common signal sys_signal compat_sys_signal
-49 32 geteuid - compat_sys_s390_geteuid16
-50 32 getegid - compat_sys_s390_getegid16
-51 common acct sys_acct compat_sys_acct
-52 common umount2 sys_umount compat_sys_umount
+45 common brk sys_brk sys_brk
+46 32 setgid - sys_setgid16
+47 32 getgid - sys_getgid16
+48 common signal sys_signal sys_signal
+49 32 geteuid - sys_geteuid16
+50 32 getegid - sys_getegid16
+51 common acct sys_acct sys_acct
+52 common umount2 sys_umount sys_umount
54 common ioctl sys_ioctl compat_sys_ioctl
55 common fcntl sys_fcntl compat_sys_fcntl
57 common setpgid sys_setpgid sys_setpgid
60 common umask sys_umask sys_umask
-61 common chroot sys_chroot compat_sys_chroot
+61 common chroot sys_chroot sys_chroot
62 common ustat sys_ustat compat_sys_ustat
63 common dup2 sys_dup2 sys_dup2
64 common getppid sys_getppid sys_getppid
65 common getpgrp sys_getpgrp sys_getpgrp
66 common setsid sys_setsid sys_setsid
67 common sigaction sys_sigaction compat_sys_sigaction
-70 32 setreuid - compat_sys_s390_setreuid16
-71 32 setregid - compat_sys_s390_setregid16
-72 common sigsuspend sys_sigsuspend compat_sys_sigsuspend
+70 32 setreuid - sys_setreuid16
+71 32 setregid - sys_setregid16
+72 common sigsuspend sys_sigsuspend sys_sigsuspend
73 common sigpending sys_sigpending compat_sys_sigpending
-74 common sethostname sys_sethostname compat_sys_sethostname
+74 common sethostname sys_sethostname sys_sethostname
75 common setrlimit sys_setrlimit compat_sys_setrlimit
76 32 getrlimit - compat_sys_old_getrlimit
77 common getrusage sys_getrusage compat_sys_getrusage
78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
79 common settimeofday sys_settimeofday compat_sys_settimeofday
-80 32 getgroups - compat_sys_s390_getgroups16
-81 32 setgroups - compat_sys_s390_setgroups16
-83 common symlink sys_symlink compat_sys_symlink
-85 common readlink sys_readlink compat_sys_readlink
-86 common uselib sys_uselib compat_sys_uselib
-87 common swapon sys_swapon compat_sys_swapon
-88 common reboot sys_reboot compat_sys_reboot
+80 32 getgroups - sys_getgroups16
+81 32 setgroups - sys_setgroups16
+83 common symlink sys_symlink sys_symlink
+85 common readlink sys_readlink sys_readlink
+86 common uselib sys_uselib sys_uselib
+87 common swapon sys_swapon sys_swapon
+88 common reboot sys_reboot sys_reboot
89 common readdir - compat_sys_old_readdir
90 common mmap sys_old_mmap compat_sys_s390_old_mmap
-91 common munmap sys_munmap compat_sys_munmap
+91 common munmap sys_munmap sys_munmap
92 common truncate sys_truncate compat_sys_truncate
93 common ftruncate sys_ftruncate compat_sys_ftruncate
94 common fchmod sys_fchmod sys_fchmod
-95 32 fchown - compat_sys_s390_fchown16
+95 32 fchown - sys_fchown16
96 common getpriority sys_getpriority sys_getpriority
97 common setpriority sys_setpriority sys_setpriority
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 32 ioperm - -
102 common socketcall sys_socketcall compat_sys_socketcall
-103 common syslog sys_syslog compat_sys_syslog
+103 common syslog sys_syslog sys_syslog
104 common setitimer sys_setitimer compat_sys_setitimer
105 common getitimer sys_getitimer compat_sys_getitimer
106 common stat sys_newstat compat_sys_newstat
@@ -104,76 +104,76 @@
111 common vhangup sys_vhangup sys_vhangup
112 common idle - -
114 common wait4 sys_wait4 compat_sys_wait4
-115 common swapoff sys_swapoff compat_sys_swapoff
+115 common swapoff sys_swapoff sys_swapoff
116 common sysinfo sys_sysinfo compat_sys_sysinfo
117 common ipc sys_s390_ipc compat_sys_s390_ipc
118 common fsync sys_fsync sys_fsync
119 common sigreturn sys_sigreturn compat_sys_sigreturn
-120 common clone sys_clone compat_sys_clone
-121 common setdomainname sys_setdomainname compat_sys_setdomainname
-122 common uname sys_newuname compat_sys_newuname
-124 common adjtimex sys_adjtimex compat_sys_adjtimex
-125 common mprotect sys_mprotect compat_sys_mprotect
+120 common clone sys_clone sys_clone
+121 common setdomainname sys_setdomainname sys_setdomainname
+122 common uname sys_newuname sys_newuname
+124 common adjtimex sys_adjtimex sys_adjtimex_time32
+125 common mprotect sys_mprotect sys_mprotect
126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
127 common create_module - -
-128 common init_module sys_init_module compat_sys_init_module
-129 common delete_module sys_delete_module compat_sys_delete_module
+128 common init_module sys_init_module sys_init_module
+129 common delete_module sys_delete_module sys_delete_module
130 common get_kernel_syms - -
-131 common quotactl sys_quotactl compat_sys_quotactl
+131 common quotactl sys_quotactl sys_quotactl
132 common getpgid sys_getpgid sys_getpgid
133 common fchdir sys_fchdir sys_fchdir
-134 common bdflush sys_bdflush compat_sys_bdflush
-135 common sysfs sys_sysfs compat_sys_sysfs
+134 common bdflush sys_bdflush sys_bdflush
+135 common sysfs sys_sysfs sys_sysfs
136 common personality sys_s390_personality sys_s390_personality
137 common afs_syscall - -
-138 32 setfsuid - compat_sys_s390_setfsuid16
-139 32 setfsgid - compat_sys_s390_setfsgid16
-140 32 _llseek - compat_sys_llseek
+138 32 setfsuid - sys_setfsuid16
+139 32 setfsgid - sys_setfsgid16
+140 32 _llseek - sys_llseek
141 common getdents sys_getdents compat_sys_getdents
142 32 _newselect - compat_sys_select
142 64 select sys_select -
143 common flock sys_flock sys_flock
-144 common msync sys_msync compat_sys_msync
+144 common msync sys_msync sys_msync
145 common readv sys_readv compat_sys_readv
146 common writev sys_writev compat_sys_writev
147 common getsid sys_getsid sys_getsid
148 common fdatasync sys_fdatasync sys_fdatasync
149 common _sysctl sys_sysctl compat_sys_sysctl
-150 common mlock sys_mlock compat_sys_mlock
-151 common munlock sys_munlock compat_sys_munlock
+150 common mlock sys_mlock sys_mlock
+151 common munlock sys_munlock sys_munlock
152 common mlockall sys_mlockall sys_mlockall
153 common munlockall sys_munlockall sys_munlockall
-154 common sched_setparam sys_sched_setparam compat_sys_sched_setparam
-155 common sched_getparam sys_sched_getparam compat_sys_sched_getparam
-156 common sched_setscheduler sys_sched_setscheduler compat_sys_sched_setscheduler
+154 common sched_setparam sys_sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler
157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler
158 common sched_yield sys_sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep compat_sys_nanosleep
-163 common mremap sys_mremap compat_sys_mremap
-164 32 setresuid - compat_sys_s390_setresuid16
-165 32 getresuid - compat_sys_s390_getresuid16
+161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep sys_nanosleep_time32
+163 common mremap sys_mremap sys_mremap
+164 32 setresuid - sys_setresuid16
+165 32 getresuid - sys_getresuid16
167 common query_module - -
-168 common poll sys_poll compat_sys_poll
+168 common poll sys_poll sys_poll
169 common nfsservctl - -
-170 32 setresgid - compat_sys_s390_setresgid16
-171 32 getresgid - compat_sys_s390_getresgid16
-172 common prctl sys_prctl compat_sys_prctl
+170 32 setresgid - sys_setresgid16
+171 32 getresgid - sys_getresgid16
+172 common prctl sys_prctl sys_prctl
173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32
178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
180 common pread64 sys_pread64 compat_sys_s390_pread64
181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64
-182 32 chown - compat_sys_s390_chown16
-183 common getcwd sys_getcwd compat_sys_getcwd
-184 common capget sys_capget compat_sys_capget
-185 common capset sys_capset compat_sys_capset
+182 32 chown - sys_chown16
+183 common getcwd sys_getcwd sys_getcwd
+184 common capget sys_capget sys_capget
+185 common capset sys_capset sys_capset
186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
187 common sendfile sys_sendfile64 compat_sys_sendfile
188 common getpmsg - -
@@ -187,7 +187,7 @@
195 32 stat64 - compat_sys_s390_stat64
196 32 lstat64 - compat_sys_s390_lstat64
197 32 fstat64 - compat_sys_s390_fstat64
-198 32 lchown32 - compat_sys_lchown
+198 32 lchown32 - sys_lchown
198 64 lchown sys_lchown -
199 32 getuid32 - sys_getuid
199 64 getuid sys_getuid -
@@ -201,21 +201,21 @@
203 64 setreuid sys_setreuid -
204 32 setregid32 - sys_setregid
204 64 setregid sys_setregid -
-205 32 getgroups32 - compat_sys_getgroups
+205 32 getgroups32 - sys_getgroups
205 64 getgroups sys_getgroups -
-206 32 setgroups32 - compat_sys_setgroups
+206 32 setgroups32 - sys_setgroups
206 64 setgroups sys_setgroups -
207 32 fchown32 - sys_fchown
207 64 fchown sys_fchown -
208 32 setresuid32 - sys_setresuid
208 64 setresuid sys_setresuid -
-209 32 getresuid32 - compat_sys_getresuid
+209 32 getresuid32 - sys_getresuid
209 64 getresuid sys_getresuid -
210 32 setresgid32 - sys_setresgid
210 64 setresgid sys_setresgid -
-211 32 getresgid32 - compat_sys_getresgid
+211 32 getresgid32 - sys_getresgid
211 64 getresgid sys_getresgid -
-212 32 chown32 - compat_sys_chown
+212 32 chown32 - sys_chown
212 64 chown sys_chown -
213 32 setuid32 - sys_setuid
213 64 setuid sys_setuid -
@@ -225,169 +225,204 @@
215 64 setfsuid sys_setfsuid -
216 32 setfsgid32 - sys_setfsgid
216 64 setfsgid sys_setfsgid -
-217 common pivot_root sys_pivot_root compat_sys_pivot_root
-218 common mincore sys_mincore compat_sys_mincore
-219 common madvise sys_madvise compat_sys_madvise
-220 common getdents64 sys_getdents64 compat_sys_getdents64
+217 common pivot_root sys_pivot_root sys_pivot_root
+218 common mincore sys_mincore sys_mincore
+219 common madvise sys_madvise sys_madvise
+220 common getdents64 sys_getdents64 sys_getdents64
221 32 fcntl64 - compat_sys_fcntl64
222 common readahead sys_readahead compat_sys_s390_readahead
223 32 sendfile64 - compat_sys_sendfile64
-224 common setxattr sys_setxattr compat_sys_setxattr
-225 common lsetxattr sys_lsetxattr compat_sys_lsetxattr
-226 common fsetxattr sys_fsetxattr compat_sys_fsetxattr
-227 common getxattr sys_getxattr compat_sys_getxattr
-228 common lgetxattr sys_lgetxattr compat_sys_lgetxattr
-229 common fgetxattr sys_fgetxattr compat_sys_fgetxattr
-230 common listxattr sys_listxattr compat_sys_listxattr
-231 common llistxattr sys_llistxattr compat_sys_llistxattr
-232 common flistxattr sys_flistxattr compat_sys_flistxattr
-233 common removexattr sys_removexattr compat_sys_removexattr
-234 common lremovexattr sys_lremovexattr compat_sys_lremovexattr
-235 common fremovexattr sys_fremovexattr compat_sys_fremovexattr
+224 common setxattr sys_setxattr sys_setxattr
+225 common lsetxattr sys_lsetxattr sys_lsetxattr
+226 common fsetxattr sys_fsetxattr sys_fsetxattr
+227 common getxattr sys_getxattr sys_getxattr
+228 common lgetxattr sys_lgetxattr sys_lgetxattr
+229 common fgetxattr sys_fgetxattr sys_fgetxattr
+230 common listxattr sys_listxattr sys_listxattr
+231 common llistxattr sys_llistxattr sys_llistxattr
+232 common flistxattr sys_flistxattr sys_flistxattr
+233 common removexattr sys_removexattr sys_removexattr
+234 common lremovexattr sys_lremovexattr sys_lremovexattr
+235 common fremovexattr sys_fremovexattr sys_fremovexattr
236 common gettid sys_gettid sys_gettid
237 common tkill sys_tkill sys_tkill
-238 common futex sys_futex compat_sys_futex
+238 common futex sys_futex sys_futex_time32
239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
241 common tgkill sys_tgkill sys_tgkill
243 common io_setup sys_io_setup compat_sys_io_setup
-244 common io_destroy sys_io_destroy compat_sys_io_destroy
-245 common io_getevents sys_io_getevents compat_sys_io_getevents
+244 common io_destroy sys_io_destroy sys_io_destroy
+245 common io_getevents sys_io_getevents sys_io_getevents_time32
246 common io_submit sys_io_submit compat_sys_io_submit
-247 common io_cancel sys_io_cancel compat_sys_io_cancel
+247 common io_cancel sys_io_cancel sys_io_cancel
248 common exit_group sys_exit_group sys_exit_group
249 common epoll_create sys_epoll_create sys_epoll_create
-250 common epoll_ctl sys_epoll_ctl compat_sys_epoll_ctl
-251 common epoll_wait sys_epoll_wait compat_sys_epoll_wait
-252 common set_tid_address sys_set_tid_address compat_sys_set_tid_address
+250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl
+251 common epoll_wait sys_epoll_wait sys_epoll_wait
+252 common set_tid_address sys_set_tid_address sys_set_tid_address
253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64
254 common timer_create sys_timer_create compat_sys_timer_create
-255 common timer_settime sys_timer_settime compat_sys_timer_settime
-256 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+255 common timer_settime sys_timer_settime sys_timer_settime32
+256 common timer_gettime sys_timer_gettime sys_timer_gettime32
257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun
258 common timer_delete sys_timer_delete sys_timer_delete
-259 common clock_settime sys_clock_settime compat_sys_clock_settime
-260 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
-261 common clock_getres sys_clock_getres compat_sys_clock_getres
-262 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+259 common clock_settime sys_clock_settime sys_clock_settime32
+260 common clock_gettime sys_clock_gettime sys_clock_gettime32
+261 common clock_getres sys_clock_getres sys_clock_getres_time32
+262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32
264 32 fadvise64_64 - compat_sys_s390_fadvise64_64
265 common statfs64 sys_statfs64 compat_sys_statfs64
266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-267 common remap_file_pages sys_remap_file_pages compat_sys_remap_file_pages
+267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages
268 common mbind sys_mbind compat_sys_mbind
269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
271 common mq_open sys_mq_open compat_sys_mq_open
-272 common mq_unlink sys_mq_unlink compat_sys_mq_unlink
-273 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
-274 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+272 common mq_unlink sys_mq_unlink sys_mq_unlink
+273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32
+274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32
275 common mq_notify sys_mq_notify compat_sys_mq_notify
276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
277 common kexec_load sys_kexec_load compat_sys_kexec_load
-278 common add_key sys_add_key compat_sys_add_key
-279 common request_key sys_request_key compat_sys_request_key
+278 common add_key sys_add_key sys_add_key
+279 common request_key sys_request_key sys_request_key
280 common keyctl sys_keyctl compat_sys_keyctl
281 common waitid sys_waitid compat_sys_waitid
282 common ioprio_set sys_ioprio_set sys_ioprio_set
283 common ioprio_get sys_ioprio_get sys_ioprio_get
284 common inotify_init sys_inotify_init sys_inotify_init
-285 common inotify_add_watch sys_inotify_add_watch compat_sys_inotify_add_watch
+285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch
286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
288 common openat sys_openat compat_sys_openat
-289 common mkdirat sys_mkdirat compat_sys_mkdirat
-290 common mknodat sys_mknodat compat_sys_mknodat
-291 common fchownat sys_fchownat compat_sys_fchownat
-292 common futimesat sys_futimesat compat_sys_futimesat
+289 common mkdirat sys_mkdirat sys_mkdirat
+290 common mknodat sys_mknodat sys_mknodat
+291 common fchownat sys_fchownat sys_fchownat
+292 common futimesat sys_futimesat sys_futimesat_time32
293 32 fstatat64 - compat_sys_s390_fstatat64
293 64 newfstatat sys_newfstatat -
-294 common unlinkat sys_unlinkat compat_sys_unlinkat
-295 common renameat sys_renameat compat_sys_renameat
-296 common linkat sys_linkat compat_sys_linkat
-297 common symlinkat sys_symlinkat compat_sys_symlinkat
-298 common readlinkat sys_readlinkat compat_sys_readlinkat
-299 common fchmodat sys_fchmodat compat_sys_fchmodat
-300 common faccessat sys_faccessat compat_sys_faccessat
-301 common pselect6 sys_pselect6 compat_sys_pselect6
-302 common ppoll sys_ppoll compat_sys_ppoll
-303 common unshare sys_unshare compat_sys_unshare
+294 common unlinkat sys_unlinkat sys_unlinkat
+295 common renameat sys_renameat sys_renameat
+296 common linkat sys_linkat sys_linkat
+297 common symlinkat sys_symlinkat sys_symlinkat
+298 common readlinkat sys_readlinkat sys_readlinkat
+299 common fchmodat sys_fchmodat sys_fchmodat
+300 common faccessat sys_faccessat sys_faccessat
+301 common pselect6 sys_pselect6 compat_sys_pselect6_time32
+302 common ppoll sys_ppoll compat_sys_ppoll_time32
+303 common unshare sys_unshare sys_unshare
304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
-306 common splice sys_splice compat_sys_splice
+306 common splice sys_splice sys_splice
307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
-308 common tee sys_tee compat_sys_tee
+308 common tee sys_tee sys_tee
309 common vmsplice sys_vmsplice compat_sys_vmsplice
310 common move_pages sys_move_pages compat_sys_move_pages
-311 common getcpu sys_getcpu compat_sys_getcpu
+311 common getcpu sys_getcpu sys_getcpu
312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
-313 common utimes sys_utimes compat_sys_utimes
+313 common utimes sys_utimes sys_utimes_time32
314 common fallocate sys_fallocate compat_sys_s390_fallocate
-315 common utimensat sys_utimensat compat_sys_utimensat
+315 common utimensat sys_utimensat sys_utimensat_time32
316 common signalfd sys_signalfd compat_sys_signalfd
317 common timerfd - -
318 common eventfd sys_eventfd sys_eventfd
319 common timerfd_create sys_timerfd_create sys_timerfd_create
-320 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
-321 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32
+321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32
322 common signalfd4 sys_signalfd4 compat_sys_signalfd4
323 common eventfd2 sys_eventfd2 sys_eventfd2
324 common inotify_init1 sys_inotify_init1 sys_inotify_init1
-325 common pipe2 sys_pipe2 compat_sys_pipe2
+325 common pipe2 sys_pipe2 sys_pipe2
326 common dup3 sys_dup3 sys_dup3
327 common epoll_create1 sys_epoll_create1 sys_epoll_create1
328 common preadv sys_preadv compat_sys_preadv
329 common pwritev sys_pwritev compat_sys_pwritev
330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
-331 common perf_event_open sys_perf_event_open compat_sys_perf_event_open
+331 common perf_event_open sys_perf_event_open sys_perf_event_open
332 common fanotify_init sys_fanotify_init sys_fanotify_init
333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
-334 common prlimit64 sys_prlimit64 compat_sys_prlimit64
-335 common name_to_handle_at sys_name_to_handle_at compat_sys_name_to_handle_at
+334 common prlimit64 sys_prlimit64 sys_prlimit64
+335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at
336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-337 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32
338 common syncfs sys_syncfs sys_syncfs
339 common setns sys_setns sys_setns
340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
-343 common kcmp sys_kcmp compat_sys_kcmp
-344 common finit_module sys_finit_module compat_sys_finit_module
-345 common sched_setattr sys_sched_setattr compat_sys_sched_setattr
-346 common sched_getattr sys_sched_getattr compat_sys_sched_getattr
-347 common renameat2 sys_renameat2 compat_sys_renameat2
-348 common seccomp sys_seccomp compat_sys_seccomp
-349 common getrandom sys_getrandom compat_sys_getrandom
-350 common memfd_create sys_memfd_create compat_sys_memfd_create
-351 common bpf sys_bpf compat_sys_bpf
-352 common s390_pci_mmio_write sys_s390_pci_mmio_write compat_sys_s390_pci_mmio_write
-353 common s390_pci_mmio_read sys_s390_pci_mmio_read compat_sys_s390_pci_mmio_read
+343 common kcmp sys_kcmp sys_kcmp
+344 common finit_module sys_finit_module sys_finit_module
+345 common sched_setattr sys_sched_setattr sys_sched_setattr
+346 common sched_getattr sys_sched_getattr sys_sched_getattr
+347 common renameat2 sys_renameat2 sys_renameat2
+348 common seccomp sys_seccomp sys_seccomp
+349 common getrandom sys_getrandom sys_getrandom
+350 common memfd_create sys_memfd_create sys_memfd_create
+351 common bpf sys_bpf sys_bpf
+352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write
+353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read
354 common execveat sys_execveat compat_sys_execveat
355 common userfaultfd sys_userfaultfd sys_userfaultfd
356 common membarrier sys_membarrier sys_membarrier
-357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32
358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
359 common socket sys_socket sys_socket
-360 common socketpair sys_socketpair compat_sys_socketpair
-361 common bind sys_bind compat_sys_bind
-362 common connect sys_connect compat_sys_connect
+360 common socketpair sys_socketpair sys_socketpair
+361 common bind sys_bind sys_bind
+362 common connect sys_connect sys_connect
363 common listen sys_listen sys_listen
-364 common accept4 sys_accept4 compat_sys_accept4
+364 common accept4 sys_accept4 sys_accept4
365 common getsockopt sys_getsockopt compat_sys_getsockopt
366 common setsockopt sys_setsockopt compat_sys_setsockopt
-367 common getsockname sys_getsockname compat_sys_getsockname
-368 common getpeername sys_getpeername compat_sys_getpeername
-369 common sendto sys_sendto compat_sys_sendto
+367 common getsockname sys_getsockname sys_getsockname
+368 common getpeername sys_getpeername sys_getpeername
+369 common sendto sys_sendto sys_sendto
370 common sendmsg sys_sendmsg compat_sys_sendmsg
371 common recvfrom sys_recvfrom compat_sys_recvfrom
372 common recvmsg sys_recvmsg compat_sys_recvmsg
373 common shutdown sys_shutdown sys_shutdown
-374 common mlock2 sys_mlock2 compat_sys_mlock2
-375 common copy_file_range sys_copy_file_range compat_sys_copy_file_range
+374 common mlock2 sys_mlock2 sys_mlock2
+375 common copy_file_range sys_copy_file_range sys_copy_file_range
376 common preadv2 sys_preadv2 compat_sys_preadv2
377 common pwritev2 sys_pwritev2 compat_sys_pwritev2
-378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage
-379 common statx sys_statx compat_sys_statx
-380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
-381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load
+378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage
+379 common statx sys_statx sys_statx
+380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi
+381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
-383 common rseq sys_rseq compat_sys_rseq
+383 common rseq sys_rseq sys_rseq
+384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect
+385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc
+386 common pkey_free sys_pkey_free sys_pkey_free
+# room for arch specific syscalls
+392 64 semtimedop sys_semtimedop -
+393 common semget sys_semget sys_semget
+394 common semctl sys_semctl compat_sys_semctl
+395 common shmget sys_shmget sys_shmget
+396 common shmctl sys_shmctl compat_sys_shmctl
+397 common shmat sys_shmat compat_sys_shmat
+398 common shmdt sys_shmdt sys_shmdt
+399 common msgget sys_msgget sys_msgget
+400 common msgsnd sys_msgsnd compat_sys_msgsnd
+401 common msgrcv sys_msgrcv compat_sys_msgrcv
+402 common msgctl sys_msgctl compat_sys_msgctl
+403 32 clock_gettime64 - sys_clock_gettime
+404 32 clock_settime64 - sys_clock_settime
+405 32 clock_adjtime64 - sys_clock_adjtime
+406 32 clock_getres_time64 - sys_clock_getres
+407 32 clock_nanosleep_time64 - sys_clock_nanosleep
+408 32 timer_gettime64 - sys_timer_gettime
+409 32 timer_settime64 - sys_timer_settime
+410 32 timerfd_gettime64 - sys_timerfd_gettime
+411 32 timerfd_settime64 - sys_timerfd_settime
+412 32 utimensat_time64 - sys_utimensat
+413 32 pselect6_time64 - compat_sys_pselect6_time64
+414 32 ppoll_time64 - compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 - sys_io_pgetevents
+417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 - sys_mq_timedsend
+419 32 mq_timedreceive_time64 - sys_mq_timedreceive
+420 32 semtimedop_time64 - sys_semtimedop
+421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 - sys_futex
+423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 12f80d1f0415..2ac3c9b56a13 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -545,8 +545,6 @@ static __init int stsi_init_debugfs(void)
int lvl, i;
stsi_root = debugfs_create_dir("stsi", arch_debugfs_dir);
- if (IS_ERR_OR_NULL(stsi_root))
- return 0;
lvl = stsi(NULL, 0, 0, 0);
if (lvl > 0)
stsi_0_0_0 = lvl;
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ebe748a9f472..e7920a68a12e 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
+ mm->context.compat_mm = is_compat_task();
+ if (mm->context.compat_mm)
vdso_pages = vdso32_pages;
- mm->context.compat_mm = 1;
- }
#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for
@@ -292,7 +291,6 @@ static int __init vdso_init(void)
BUG_ON(vdso32_pagelist == NULL);
for (i = 0; i < vdso32_pages - 1; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
- ClearPageReserved(pg);
get_page(pg);
vdso32_pagelist[i] = pg;
}
@@ -310,7 +308,6 @@ static int __init vdso_init(void)
BUG_ON(vdso64_pagelist == NULL);
for (i = 0; i < vdso64_pages - 1; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
- ClearPageReserved(pg);
get_page(pg);
vdso64_pagelist[i] = pg;
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index f24395a01918..98f850e00008 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -69,7 +69,7 @@ static void update_mt_scaling(void)
u64 delta, fac, mult, div;
int i;
- stcctm5(smp_cpu_mtid + 1, cycles_new);
+ stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new);
cycles_old = this_cpu_ptr(mt_cycles);
fac = 1;
mult = div = 0;
@@ -432,6 +432,6 @@ void vtime_init(void)
__this_cpu_write(mt_scaling_jiffies, jiffies);
__this_cpu_write(mt_scaling_mult, 1);
__this_cpu_write(mt_scaling_div, 1);
- stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
+ stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
}
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a153257bf7d9..d62fa148558b 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->crycbd = 0;
apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
- if (!apie_h && !key_msk)
+ if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
return 0;
if (!crycb_addr)
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index a10e11f7a5f7..0e30e6e43b0c 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -43,11 +43,13 @@ static inline char *__strnend(const char *s, size_t n)
*
* returns the length of @s
*/
+#ifdef __HAVE_ARCH_STRLEN
size_t strlen(const char *s)
{
return __strend(s) - s;
}
EXPORT_SYMBOL(strlen);
+#endif
/**
* strnlen - Find the length of a length-limited string
@@ -56,11 +58,13 @@ EXPORT_SYMBOL(strlen);
*
* returns the minimum of the length of @s and @n
*/
+#ifdef __HAVE_ARCH_STRNLEN
size_t strnlen(const char *s, size_t n)
{
return __strnend(s, n) - s;
}
EXPORT_SYMBOL(strnlen);
+#endif
/**
* strcpy - Copy a %NUL terminated string
@@ -69,6 +73,7 @@ EXPORT_SYMBOL(strnlen);
*
* returns a pointer to @dest
*/
+#ifdef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
{
register int r0 asm("0") = 0;
@@ -81,6 +86,7 @@ char *strcpy(char *dest, const char *src)
return ret;
}
EXPORT_SYMBOL(strcpy);
+#endif
/**
* strlcpy - Copy a %NUL terminated string into a sized buffer
@@ -93,6 +99,7 @@ EXPORT_SYMBOL(strcpy);
* of course, the buffer size is zero). It does not pad
* out the result like strncpy() does.
*/
+#ifdef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = __strend(src) - src;
@@ -105,6 +112,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
return ret;
}
EXPORT_SYMBOL(strlcpy);
+#endif
/**
* strncpy - Copy a length-limited, %NUL-terminated string
@@ -115,6 +123,7 @@ EXPORT_SYMBOL(strlcpy);
* The result is not %NUL-terminated if the source exceeds
* @n bytes.
*/
+#ifdef __HAVE_ARCH_STRNCPY
char *strncpy(char *dest, const char *src, size_t n)
{
size_t len = __strnend(src, n) - src;
@@ -123,6 +132,7 @@ char *strncpy(char *dest, const char *src, size_t n)
return dest;
}
EXPORT_SYMBOL(strncpy);
+#endif
/**
* strcat - Append one %NUL-terminated string to another
@@ -131,6 +141,7 @@ EXPORT_SYMBOL(strncpy);
*
* returns a pointer to @dest
*/
+#ifdef __HAVE_ARCH_STRCAT
char *strcat(char *dest, const char *src)
{
register int r0 asm("0") = 0;
@@ -146,6 +157,7 @@ char *strcat(char *dest, const char *src)
return ret;
}
EXPORT_SYMBOL(strcat);
+#endif
/**
* strlcat - Append a length-limited, %NUL-terminated string to another
@@ -153,6 +165,7 @@ EXPORT_SYMBOL(strcat);
* @src: The string to append to it
* @n: The size of the destination buffer.
*/
+#ifdef __HAVE_ARCH_STRLCAT
size_t strlcat(char *dest, const char *src, size_t n)
{
size_t dsize = __strend(dest) - dest;
@@ -170,6 +183,7 @@ size_t strlcat(char *dest, const char *src, size_t n)
return res;
}
EXPORT_SYMBOL(strlcat);
+#endif
/**
* strncat - Append a length-limited, %NUL-terminated string to another
@@ -182,6 +196,7 @@ EXPORT_SYMBOL(strlcat);
* Note that in contrast to strncpy, strncat ensures the result is
* terminated.
*/
+#ifdef __HAVE_ARCH_STRNCAT
char *strncat(char *dest, const char *src, size_t n)
{
size_t len = __strnend(src, n) - src;
@@ -192,6 +207,7 @@ char *strncat(char *dest, const char *src, size_t n)
return dest;
}
EXPORT_SYMBOL(strncat);
+#endif
/**
* strcmp - Compare two strings
@@ -202,6 +218,7 @@ EXPORT_SYMBOL(strncat);
* < 0 if @s1 is less than @s2
* > 0 if @s1 is greater than @s2
*/
+#ifdef __HAVE_ARCH_STRCMP
int strcmp(const char *s1, const char *s2)
{
register int r0 asm("0") = 0;
@@ -219,12 +236,14 @@ int strcmp(const char *s1, const char *s2)
return ret;
}
EXPORT_SYMBOL(strcmp);
+#endif
/**
* strrchr - Find the last occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
*/
+#ifdef __HAVE_ARCH_STRRCHR
char *strrchr(const char *s, int c)
{
size_t len = __strend(s) - s;
@@ -237,6 +256,7 @@ char *strrchr(const char *s, int c)
return NULL;
}
EXPORT_SYMBOL(strrchr);
+#endif
static inline int clcle(const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
@@ -261,6 +281,7 @@ static inline int clcle(const char *s1, unsigned long l1,
* @s1: The string to be searched
* @s2: The string to search for
*/
+#ifdef __HAVE_ARCH_STRSTR
char *strstr(const char *s1, const char *s2)
{
int l1, l2;
@@ -280,6 +301,7 @@ char *strstr(const char *s1, const char *s2)
return NULL;
}
EXPORT_SYMBOL(strstr);
+#endif
/**
* memchr - Find a character in an area of memory.
@@ -290,6 +312,7 @@ EXPORT_SYMBOL(strstr);
* returns the address of the first occurrence of @c, or %NULL
* if @c is not found
*/
+#ifdef __HAVE_ARCH_MEMCHR
void *memchr(const void *s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
@@ -304,6 +327,7 @@ void *memchr(const void *s, int c, size_t n)
return (void *) ret;
}
EXPORT_SYMBOL(memchr);
+#endif
/**
* memcmp - Compare two areas of memory
@@ -311,6 +335,7 @@ EXPORT_SYMBOL(memchr);
* @s2: Another area of memory
* @count: The size of the area.
*/
+#ifdef __HAVE_ARCH_MEMCMP
int memcmp(const void *s1, const void *s2, size_t n)
{
int ret;
@@ -321,6 +346,7 @@ int memcmp(const void *s1, const void *s2, size_t n)
return ret;
}
EXPORT_SYMBOL(memcmp);
+#endif
/**
* memscan - Find a character in an area of memory.
@@ -331,6 +357,7 @@ EXPORT_SYMBOL(memcmp);
* returns the address of the first occurrence of @c, or 1 byte past
* the area if @c is not found
*/
+#ifdef __HAVE_ARCH_MEMSCAN
void *memscan(void *s, int c, size_t n)
{
register int r0 asm("0") = (char) c;
@@ -342,3 +369,4 @@ void *memscan(void *s, int c, size_t n)
return (void *) ret;
}
EXPORT_SYMBOL(memscan);
+#endif
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index eba2def3414d..0b5622714c12 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -28,12 +28,7 @@
#include <asm/cpcmd.h>
#include <asm/setup.h>
-#define DCSS_LOADSHR 0x00
-#define DCSS_LOADNSR 0x04
#define DCSS_PURGESEG 0x08
-#define DCSS_FINDSEG 0x0c
-#define DCSS_LOADNOLY 0x10
-#define DCSS_SEGEXT 0x18
#define DCSS_LOADSHRX 0x20
#define DCSS_LOADNSRX 0x24
#define DCSS_FINDSEGX 0x2c
@@ -53,20 +48,6 @@ struct qout64 {
struct qrange range[6];
};
-struct qrange_old {
- unsigned int start; /* last byte type */
- unsigned int end; /* last byte reserved */
-};
-
-/* output area format for the Diag x'64' old subcode x'18' */
-struct qout64_old {
- int segstart;
- int segend;
- int segcnt;
- int segrcnt;
- struct qrange_old range[6];
-};
-
struct qin64 {
char qopcode;
char rsrv1[3];
@@ -95,52 +76,10 @@ static DEFINE_MUTEX(dcss_lock);
static LIST_HEAD(dcss_list);
static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
"EW/EN-MIXED" };
-static int loadshr_scode, loadnsr_scode;
-static int segext_scode, purgeseg_scode;
-static int scode_set;
-
-/* set correct Diag x'64' subcodes. */
-static int
-dcss_set_subcodes(void)
-{
- char *name = kmalloc(8, GFP_KERNEL | GFP_DMA);
- unsigned long rx, ry;
- int rc;
-
- if (name == NULL)
- return -ENOMEM;
-
- rx = (unsigned long) name;
- ry = DCSS_FINDSEGX;
-
- strcpy(name, "dummy");
- diag_stat_inc(DIAG_STAT_X064);
- asm volatile(
- " diag %0,%1,0x64\n"
- "0: ipm %2\n"
- " srl %2,28\n"
- " j 2f\n"
- "1: la %2,3\n"
- "2:\n"
- EX_TABLE(0b, 1b)
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory");
-
- kfree(name);
- /* Diag x'64' new subcodes are supported, set to new subcodes */
- if (rc != 3) {
- loadshr_scode = DCSS_LOADSHRX;
- loadnsr_scode = DCSS_LOADNSRX;
- purgeseg_scode = DCSS_PURGESEG;
- segext_scode = DCSS_SEGEXTX;
- return 0;
- }
- /* Diag x'64' new subcodes are not supported, set to old subcodes */
- loadshr_scode = DCSS_LOADNOLY;
- loadnsr_scode = DCSS_LOADNSR;
- purgeseg_scode = DCSS_PURGESEG;
- segext_scode = DCSS_SEGEXT;
- return 0;
-}
+static int loadshr_scode = DCSS_LOADSHRX;
+static int loadnsr_scode = DCSS_LOADNSRX;
+static int purgeseg_scode = DCSS_PURGESEG;
+static int segext_scode = DCSS_SEGEXTX;
/*
* Create the 8 bytes, ebcdic VM segment name from
@@ -196,32 +135,15 @@ dcss_diag(int *func, void *parameter,
unsigned long rx, ry;
int rc;
- if (scode_set == 0) {
- rc = dcss_set_subcodes();
- if (rc < 0)
- return rc;
- scode_set = 1;
- }
rx = (unsigned long) parameter;
ry = (unsigned long) *func;
- /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
diag_stat_inc(DIAG_STAT_X064);
- if (*func > DCSS_SEGEXT)
- asm volatile(
- " diag %0,%1,0x64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
- /* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */
- else
- asm volatile(
- " sam31\n"
- " diag %0,%1,0x64\n"
- " sam64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+ asm volatile(
+ " diag %0,%1,0x64\n"
+ " ipm %2\n"
+ " srl %2,28\n"
+ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
*ret1 = rx;
*ret2 = ry;
return rc;
@@ -271,31 +193,6 @@ query_segment_type (struct dcss_segment *seg)
goto out_free;
}
- /* Only old format of output area of Diagnose x'64' is supported,
- copy data for the new format. */
- if (segext_scode == DCSS_SEGEXT) {
- struct qout64_old *qout_old;
- qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
- if (qout_old == NULL) {
- rc = -ENOMEM;
- goto out_free;
- }
- memcpy(qout_old, qout, sizeof(struct qout64_old));
- qout->segstart = (unsigned long) qout_old->segstart;
- qout->segend = (unsigned long) qout_old->segend;
- qout->segcnt = qout_old->segcnt;
- qout->segrcnt = qout_old->segrcnt;
-
- if (qout->segcnt > 6)
- qout->segrcnt = 6;
- for (i = 0; i < qout->segrcnt; i++) {
- qout->range[i].start =
- (unsigned long) qout_old->range[i].start;
- qout->range[i].end =
- (unsigned long) qout_old->range[i].end;
- }
- kfree(qout_old);
- }
if (qout->segcnt > 6) {
rc = -EOPNOTSUPP;
goto out_free;
@@ -410,11 +307,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
if (rc < 0)
goto out_free;
- if (loadshr_scode == DCSS_LOADSHRX) {
- if (segment_overlaps_others(seg)) {
- rc = -EBUSY;
- goto out_free;
- }
+ if (segment_overlaps_others(seg)) {
+ rc = -EBUSY;
+ goto out_free;
}
rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
@@ -472,11 +367,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
*addr = seg->start_addr;
*end = seg->end;
if (do_nonshared)
- pr_info("DCSS %s of range %p to %p and type %s loaded as "
+ pr_info("DCSS %s of range %px to %px and type %s loaded as "
"exclusive-writable\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
else {
- pr_info("DCSS %s of range %p to %p and type %s loaded in "
+ pr_info("DCSS %s of range %px to %px and type %s loaded in "
"shared access mode\n", name, (void*) seg->start_addr,
(void*) seg->end, segtype_string[seg->vm_segtype]);
}
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index bac5c27d11fc..01892dcf4029 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -226,8 +226,6 @@ static void __init kasan_enable_dat(void)
static void __init kasan_early_detect_facilities(void)
{
- __stfle(S390_lowcore.stfle_fac_list,
- ARRAY_SIZE(S390_lowcore.stfle_fac_list));
if (test_facility(8)) {
has_edat = true;
__ctl_set_bit(0, 23);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 0a7627cdb34e..687f2a4d3459 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -29,14 +29,6 @@ static unsigned long stack_maxrandom_size(void)
return STACK_RND_MASK << PAGE_SHIFT;
}
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave at least a ~32 MB hole.
- */
-#define MIN_GAP (32*1024*1024)
-#define MAX_GAP (STACK_TOP/6*5)
-
static inline int mmap_is_legacy(struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
@@ -60,13 +52,26 @@ static inline unsigned long mmap_base(unsigned long rnd,
struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
+ unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
+ unsigned long gap_min, gap_max;
+
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+ gap += pad;
+
+ /*
+ * Top of mmap area (just below the process stack).
+ * Leave at least a ~32 MB hole.
+ */
+ gap_min = 32 * 1024 * 1024UL;
+ gap_max = (STACK_TOP / 6) * 5;
+
+ if (gap < gap_min)
+ gap = gap_min;
+ else if (gap > gap_max)
+ gap = gap_max;
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
- gap &= PAGE_MASK;
- return STACK_TOP - stack_maxrandom_size() - rnd - gap;
+ return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
unsigned long
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index f2cc7da473e4..8485d6dc2754 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -301,12 +301,13 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(ptep_xchg_lazy);
-pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
pgste_t pgste;
pte_t old;
int nodat;
+ struct mm_struct *mm = vma->vm_mm;
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
@@ -318,12 +319,12 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
}
return old;
}
-EXPORT_SYMBOL(ptep_modify_prot_start);
-void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
{
pgste_t pgste;
+ struct mm_struct *mm = vma->vm_mm;
if (!MACHINE_HAS_NX)
pte_val(pte) &= ~_PAGE_NOEXEC;
@@ -337,7 +338,6 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
}
preempt_enable();
}
-EXPORT_SYMBOL(ptep_modify_prot_commit);
static inline void pmdp_idte_local(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 3ff758eeb71d..51dd0267d014 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1110,103 +1110,145 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
mask = 0xf000; /* j */
goto branch_oc;
case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
+ case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
mask = 0x2000; /* jh */
goto branch_ks;
case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
+ case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
mask = 0x4000; /* jl */
goto branch_ks;
case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
+ case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
mask = 0xa000; /* jhe */
goto branch_ks;
case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
+ case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
mask = 0xc000; /* jle */
goto branch_ks;
case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
+ case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
mask = 0x2000; /* jh */
goto branch_ku;
case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
+ case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
mask = 0x4000; /* jl */
goto branch_ku;
case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
+ case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
mask = 0xa000; /* jhe */
goto branch_ku;
case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
+ case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
mask = 0xc000; /* jle */
goto branch_ku;
case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
+ case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
mask = 0x7000; /* jne */
goto branch_ku;
case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
+ case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
mask = 0x8000; /* je */
goto branch_ku;
case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
+ case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
mask = 0x7000; /* jnz */
- /* lgfi %w1,imm (load sign extend imm) */
- EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* ngr %w1,%dst */
- EMIT4(0xb9800000, REG_W1, dst_reg);
+ if (BPF_CLASS(insn->code) == BPF_JMP32) {
+ /* llilf %w1,imm (load zero extend imm) */
+ EMIT6_IMM(0xc00f0000, REG_W1, imm);
+ /* nr %w1,%dst */
+ EMIT2(0x1400, REG_W1, dst_reg);
+ } else {
+ /* lgfi %w1,imm (load sign extend imm) */
+ EMIT6_IMM(0xc0010000, REG_W1, imm);
+ /* ngr %w1,%dst */
+ EMIT4(0xb9800000, REG_W1, dst_reg);
+ }
goto branch_oc;
case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
+ case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
mask = 0x2000; /* jh */
goto branch_xs;
case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
+ case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
mask = 0x4000; /* jl */
goto branch_xs;
case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
+ case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
mask = 0xa000; /* jhe */
goto branch_xs;
case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
+ case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
mask = 0xc000; /* jle */
goto branch_xs;
case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
+ case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
mask = 0x2000; /* jh */
goto branch_xu;
case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
+ case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
mask = 0x4000; /* jl */
goto branch_xu;
case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
+ case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
mask = 0xa000; /* jhe */
goto branch_xu;
case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
+ case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
mask = 0xc000; /* jle */
goto branch_xu;
case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
+ case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
mask = 0x7000; /* jne */
goto branch_xu;
case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
+ case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
mask = 0x8000; /* je */
goto branch_xu;
case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
+ case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
+ {
+ bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+
mask = 0x7000; /* jnz */
- /* ngrk %w1,%dst,%src */
- EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
+ /* nrk or ngrk %w1,%dst,%src */
+ EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
+ REG_W1, dst_reg, src_reg);
goto branch_oc;
branch_ks:
+ is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
/* lgfi %w1,imm (load sign extend imm) */
EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* cgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
+ /* crj or cgrj %dst,%w1,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
+ dst_reg, REG_W1, i, off, mask);
break;
branch_ku:
+ is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
/* lgfi %w1,imm (load sign extend imm) */
EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* clgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
+ /* clrj or clgrj %dst,%w1,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
+ dst_reg, REG_W1, i, off, mask);
break;
branch_xs:
- /* cgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
+ is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+ /* crj or cgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
+ dst_reg, src_reg, i, off, mask);
break;
branch_xu:
- /* clgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
+ is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+ /* clrj or clgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
+ dst_reg, src_reg, i, off, mask);
break;
branch_oc:
/* brc mask,jmp_off (branch instruction needs 4 bytes) */
jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
break;
+ }
default: /* too complex, give up */
pr_err("Unknown opcode %02x\n", insn->code);
return -1;
diff --git a/arch/s390/net/pnet.c b/arch/s390/net/pnet.c
index e22f1b10a6c7..79211bec0fc8 100644
--- a/arch/s390/net/pnet.c
+++ b/arch/s390/net/pnet.c
@@ -12,6 +12,15 @@
#include <asm/ccwgroup.h>
#include <asm/ccwdev.h>
#include <asm/pnet.h>
+#include <asm/ebcdic.h>
+
+#define PNETIDS_LEN 64 /* Total utility string length in bytes
+ * to cover up to 4 PNETIDs of 16 bytes
+ * for up to 4 device ports
+ */
+#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */
+#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN)
+ /* Max. # of ports with a PNETID */
/*
* Get the PNETIDs from a device.
@@ -40,6 +49,7 @@ static int pnet_ids_by_device(struct device *dev, u8 *pnetids)
if (!util_str)
return -ENOMEM;
memcpy(pnetids, util_str, PNETIDS_LEN);
+ EBCASC(pnetids, PNETIDS_LEN);
kfree(util_str);
return 0;
}
@@ -47,6 +57,7 @@ static int pnet_ids_by_device(struct device *dev, u8 *pnetids)
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
memcpy(pnetids, zdev->util_str, sizeof(zdev->util_str));
+ EBCASC(pnetids, sizeof(zdev->util_str));
return 0;
}
return -EOPNOTSUPP;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index a966d7bfac57..dc9bc82c072c 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -285,7 +285,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
struct zpci_dev *zdev = to_zpci(pdev);
int idx;
- if (!pci_resource_len(pdev, bar))
+ if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
return NULL;
idx = zdev->bars[bar].map_idx;
@@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq)
if (ai == -1UL)
break;
inc_irq_stat(IRQIO_MSI);
+ airq_iv_lock(aibv, ai);
generic_handle_irq(airq_iv_get_data(aibv, ai));
+ airq_iv_unlock(aibv, ai);
}
}
}
@@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
zdev->aisb = aisb;
/* Create adapter interrupt vector */
- zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA);
+ zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
if (!zdev->aibv)
return -ENOMEM;
@@ -482,6 +484,15 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
}
}
+#ifdef CONFIG_PCI_IOV
+static struct resource iov_res = {
+ .name = "PCI IOV res",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+};
+#endif
+
static void zpci_map_resources(struct pci_dev *pdev)
{
resource_size_t len;
@@ -495,6 +506,17 @@ static void zpci_map_resources(struct pci_dev *pdev)
(resource_size_t __force) pci_iomap(pdev, i, 0);
pdev->resource[i].end = pdev->resource[i].start + len - 1;
}
+
+#ifdef CONFIG_PCI_IOV
+ i = PCI_IOV_RESOURCES;
+
+ for (; i < PCI_SRIOV_NUM_BARS + PCI_IOV_RESOURCES; i++) {
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ continue;
+ pdev->resource[i].parent = &iov_res;
+ }
+#endif
}
static void zpci_unmap_resources(struct pci_dev *pdev)
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 04388a254ffb..6b48ca7760a7 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -172,21 +172,14 @@ static const struct file_operations debugfs_pci_perf_fops = {
void zpci_debug_init_device(struct zpci_dev *zdev, const char *name)
{
zdev->debugfs_dev = debugfs_create_dir(name, debugfs_root);
- if (IS_ERR(zdev->debugfs_dev))
- zdev->debugfs_dev = NULL;
-
- zdev->debugfs_perf = debugfs_create_file("statistics",
- S_IFREG | S_IRUGO | S_IWUSR,
- zdev->debugfs_dev, zdev,
- &debugfs_pci_perf_fops);
- if (IS_ERR(zdev->debugfs_perf))
- zdev->debugfs_perf = NULL;
+
+ debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
+ zdev->debugfs_dev, zdev, &debugfs_pci_perf_fops);
}
void zpci_debug_exit_device(struct zpci_dev *zdev)
{
- debugfs_remove(zdev->debugfs_perf);
- debugfs_remove(zdev->debugfs_dev);
+ debugfs_remove_recursive(zdev->debugfs_dev);
}
int __init zpci_debug_init(void)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index a9c36f95744a..d9a9144dec35 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -62,6 +62,7 @@ config SUPERH
config SUPERH32
def_bool "$(ARCH)" = "sh"
+ select ARCH_32BIT_OFF_T
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_IOREMAP_PROT if MMU && !X2TLB
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 22b4106b8084..5495efa07335 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -630,7 +630,6 @@ static struct regulator_init_data cn12_power_init_data = {
static struct fixed_voltage_config cn12_power_info = {
.supply_name = "CN12 SD/MMC Vdd",
.microvolts = 3300000,
- .enable_high = 1,
.init_data = &cn12_power_init_data,
};
@@ -671,7 +670,6 @@ static struct regulator_init_data sdhi0_power_init_data = {
static struct fixed_voltage_config sdhi0_power_info = {
.supply_name = "CN11 SD/MMC Vdd",
.microvolts = 3300000,
- .enable_high = 1,
.init_data = &sdhi0_power_init_data,
};
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
index 01d0f7fb14cc..2563d1e532e2 100644
--- a/arch/sh/boot/dts/Makefile
+++ b/arch/sh/boot/dts/Makefile
@@ -1,3 +1,3 @@
ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
-obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
endif
diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h
index 101c13c0c6ad..33d1d28057cb 100644
--- a/arch/sh/include/asm/segment.h
+++ b/arch/sh/include/asm/segment.h
@@ -26,7 +26,6 @@ typedef struct {
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index a97f93ca3bd7..9c7d9d9999c6 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -16,8 +16,8 @@
# define __ARCH_WANT_SYS_IPC
# define __ARCH_WANT_SYS_PAUSE
# define __ARCH_WANT_SYS_SIGNAL
-# define __ARCH_WANT_SYS_TIME
-# define __ARCH_WANT_SYS_UTIME
+# define __ARCH_WANT_SYS_TIME32
+# define __ARCH_WANT_SYS_UTIME32
# define __ARCH_WANT_SYS_WAITPID
# define __ARCH_WANT_SYS_SOCKETCALL
# define __ARCH_WANT_SYS_FADVISE64
diff --git a/arch/sh/include/uapi/asm/unistd_32.h b/arch/sh/include/uapi/asm/unistd_32.h
deleted file mode 100644
index 31c85aa251ab..000000000000
--- a/arch/sh/include/uapi/asm/unistd_32.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ASM_SH_UNISTD_32_H
-#define __ASM_SH_UNISTD_32_H
-
-/*
- * Copyright (C) 1999 Niibe Yutaka
- */
-
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
- /* 17 was sys_break */
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
- /* 31 was sys_stty */
- /* 32 was sys_gtty */
-#define __NR_access 33
-#define __NR_nice 34
- /* 35 was sys_ftime */
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
- /* 44 was sys_prof */
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
- /* 53 was sys_lock */
-#define __NR_ioctl 54
-#define __NR_fcntl 55
- /* 56 was sys_mpx */
-#define __NR_setpgid 57
- /* 58 was sys_ulimit */
- /* 59 was sys_olduname */
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
- /* 82 was sys_oldselect */
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
- /* 98 was sys_profil */
-#define __NR_statfs 99
-#define __NR_fstatfs 100
- /* 101 was sys_ioperm */
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
- /* 110 was sys_iopl */
-#define __NR_vhangup 111
- /* 112 was sys_idle */
- /* 113 was sys_vm86old */
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_cacheflush 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
- /* 127 was sys_create_module */
-#define __NR_init_module 128
-#define __NR_delete_module 129
- /* 130 was sys_get_kernel_syms */
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
- /* 137 was sys_afs_syscall */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
- /* 166 was sys_vm86 */
- /* 167 was sys_query_module */
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_chown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
- /* 188 reserved for sys_getpmsg */
- /* 189 reserved for sys_putpmsg */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-#define __NR_getdents64 220
-#define __NR_fcntl64 221
- /* 222 is reserved for tux */
- /* 223 is unused */
-#define __NR_gettid 224
-#define __NR_readahead 225
-#define __NR_setxattr 226
-#define __NR_lsetxattr 227
-#define __NR_fsetxattr 228
-#define __NR_getxattr 229
-#define __NR_lgetxattr 230
-#define __NR_fgetxattr 231
-#define __NR_listxattr 232
-#define __NR_llistxattr 233
-#define __NR_flistxattr 234
-#define __NR_removexattr 235
-#define __NR_lremovexattr 236
-#define __NR_fremovexattr 237
-#define __NR_tkill 238
-#define __NR_sendfile64 239
-#define __NR_futex 240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
- /* 243 is reserved for set_thread_area */
- /* 244 is reserved for get_thread_area */
-#define __NR_io_setup 245
-#define __NR_io_destroy 246
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-#define __NR_io_cancel 249
-#define __NR_fadvise64 250
- /* 251 is unused */
-#define __NR_exit_group 252
-#define __NR_lookup_dcookie 253
-#define __NR_epoll_create 254
-#define __NR_epoll_ctl 255
-#define __NR_epoll_wait 256
-#define __NR_remap_file_pages 257
-#define __NR_set_tid_address 258
-#define __NR_timer_create 259
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 268
-#define __NR_fstatfs64 269
-#define __NR_tgkill 270
-#define __NR_utimes 271
-#define __NR_fadvise64_64 272
- /* 273 is reserved for vserver */
-#define __NR_mbind 274
-#define __NR_get_mempolicy 275
-#define __NR_set_mempolicy 276
-#define __NR_mq_open 277
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
-#define __NR_kexec_load 283
-#define __NR_waitid 284
-#define __NR_add_key 285
-#define __NR_request_key 286
-#define __NR_keyctl 287
-#define __NR_ioprio_set 288
-#define __NR_ioprio_get 289
-#define __NR_inotify_init 290
-#define __NR_inotify_add_watch 291
-#define __NR_inotify_rm_watch 292
- /* 293 is unused */
-#define __NR_migrate_pages 294
-#define __NR_openat 295
-#define __NR_mkdirat 296
-#define __NR_mknodat 297
-#define __NR_fchownat 298
-#define __NR_futimesat 299
-#define __NR_fstatat64 300
-#define __NR_unlinkat 301
-#define __NR_renameat 302
-#define __NR_linkat 303
-#define __NR_symlinkat 304
-#define __NR_readlinkat 305
-#define __NR_fchmodat 306
-#define __NR_faccessat 307
-#define __NR_pselect6 308
-#define __NR_ppoll 309
-#define __NR_unshare 310
-#define __NR_set_robust_list 311
-#define __NR_get_robust_list 312
-#define __NR_splice 313
-#define __NR_sync_file_range 314
-#define __NR_tee 315
-#define __NR_vmsplice 316
-#define __NR_move_pages 317
-#define __NR_getcpu 318
-#define __NR_epoll_pwait 319
-#define __NR_utimensat 320
-#define __NR_signalfd 321
-#define __NR_timerfd_create 322
-#define __NR_eventfd 323
-#define __NR_fallocate 324
-#define __NR_timerfd_settime 325
-#define __NR_timerfd_gettime 326
-#define __NR_signalfd4 327
-#define __NR_eventfd2 328
-#define __NR_epoll_create1 329
-#define __NR_dup3 330
-#define __NR_pipe2 331
-#define __NR_inotify_init1 332
-#define __NR_preadv 333
-#define __NR_pwritev 334
-#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_event_open 336
-#define __NR_fanotify_init 337
-#define __NR_fanotify_mark 338
-#define __NR_prlimit64 339
-
-/* Non-multiplexed socket family */
-#define __NR_socket 340
-#define __NR_bind 341
-#define __NR_connect 342
-#define __NR_listen 343
-#define __NR_accept 344
-#define __NR_getsockname 345
-#define __NR_getpeername 346
-#define __NR_socketpair 347
-#define __NR_send 348
-#define __NR_sendto 349
-#define __NR_recv 350
-#define __NR_recvfrom 351
-#define __NR_shutdown 352
-#define __NR_setsockopt 353
-#define __NR_getsockopt 354
-#define __NR_sendmsg 355
-#define __NR_recvmsg 356
-#define __NR_recvmmsg 357
-#define __NR_accept4 358
-#define __NR_name_to_handle_at 359
-#define __NR_open_by_handle_at 360
-#define __NR_clock_adjtime 361
-#define __NR_syncfs 362
-#define __NR_sendmmsg 363
-#define __NR_setns 364
-#define __NR_process_vm_readv 365
-#define __NR_process_vm_writev 366
-#define __NR_kcmp 367
-#define __NR_finit_module 368
-#define __NR_sched_getattr 369
-#define __NR_sched_setattr 370
-#define __NR_renameat2 371
-#define __NR_seccomp 372
-#define __NR_getrandom 373
-#define __NR_memfd_create 374
-#define __NR_bpf 375
-#define __NR_execveat 376
-#define __NR_userfaultfd 377
-#define __NR_membarrier 378
-#define __NR_mlock2 379
-#define __NR_copy_file_range 380
-#define __NR_preadv2 381
-#define __NR_pwritev2 382
-
-#ifdef __KERNEL__
-#define __NR_syscalls 383
-#endif
-
-#endif /* __ASM_SH_UNISTD_32_H */
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 21ec75288562..bfda678576e4 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -20,7 +20,7 @@
10 common unlink sys_unlink
11 common execve sys_execve
12 common chdir sys_chdir
-13 common time sys_time
+13 common time sys_time32
14 common mknod sys_mknod
15 common chmod sys_chmod
16 common lchown sys_lchown16
@@ -32,12 +32,12 @@
22 common umount sys_oldumount
23 common setuid sys_setuid16
24 common getuid sys_getuid16
-25 common stime sys_stime
+25 common stime sys_stime32
26 common ptrace sys_ptrace
27 common alarm sys_alarm
28 common oldfstat sys_fstat
29 common pause sys_pause
-30 common utime sys_utime
+30 common utime sys_utime32
# 31 was stty
# 32 was gtty
33 common access sys_access
@@ -131,7 +131,7 @@
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common cacheflush sys_cacheflush
-124 common adjtimex sys_adjtimex
+124 common adjtimex sys_adjtimex_time32
125 common mprotect sys_mprotect
126 common sigprocmask sys_sigprocmask
# 127 was create_module
@@ -168,8 +168,8 @@
158 common sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
163 common mremap sys_mremap
164 common setresuid sys_setresuid16
165 common getresuid sys_getresuid16
@@ -184,7 +184,7 @@
174 common rt_sigaction sys_rt_sigaction
175 common rt_sigprocmask sys_rt_sigprocmask
176 common rt_sigpending sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32
178 common rt_sigqueueinfo sys_rt_sigqueueinfo
179 common rt_sigsuspend sys_rt_sigsuspend
180 common pread64 sys_pread_wrapper
@@ -247,14 +247,14 @@
237 common fremovexattr sys_fremovexattr
238 common tkill sys_tkill
239 common sendfile64 sys_sendfile64
-240 common futex sys_futex
+240 common futex sys_futex_time32
241 common sched_setaffinity sys_sched_setaffinity
242 common sched_getaffinity sys_sched_getaffinity
# 243 is reserved for set_thread_area
# 244 is reserved for get_thread_area
245 common io_setup sys_io_setup
246 common io_destroy sys_io_destroy
-247 common io_getevents sys_io_getevents
+247 common io_getevents sys_io_getevents_time32
248 common io_submit sys_io_submit
249 common io_cancel sys_io_cancel
250 common fadvise64 sys_fadvise64
@@ -267,18 +267,18 @@
257 common remap_file_pages sys_remap_file_pages
258 common set_tid_address sys_set_tid_address
259 common timer_create sys_timer_create
-260 common timer_settime sys_timer_settime
-261 common timer_gettime sys_timer_gettime
+260 common timer_settime sys_timer_settime32
+261 common timer_gettime sys_timer_gettime32
262 common timer_getoverrun sys_timer_getoverrun
263 common timer_delete sys_timer_delete
-264 common clock_settime sys_clock_settime
-265 common clock_gettime sys_clock_gettime
-266 common clock_getres sys_clock_getres
-267 common clock_nanosleep sys_clock_nanosleep
+264 common clock_settime sys_clock_settime32
+265 common clock_gettime sys_clock_gettime32
+266 common clock_getres sys_clock_getres_time32
+267 common clock_nanosleep sys_clock_nanosleep_time32
268 common statfs64 sys_statfs64
269 common fstatfs64 sys_fstatfs64
270 common tgkill sys_tgkill
-271 common utimes sys_utimes
+271 common utimes sys_utimes_time32
272 common fadvise64_64 sys_fadvise64_64_wrapper
# 273 is reserved for vserver
274 common mbind sys_mbind
@@ -286,8 +286,8 @@
276 common set_mempolicy sys_set_mempolicy
277 common mq_open sys_mq_open
278 common mq_unlink sys_mq_unlink
-279 common mq_timedsend sys_mq_timedsend
-280 common mq_timedreceive sys_mq_timedreceive
+279 common mq_timedsend sys_mq_timedsend_time32
+280 common mq_timedreceive sys_mq_timedreceive_time32
281 common mq_notify sys_mq_notify
282 common mq_getsetattr sys_mq_getsetattr
283 common kexec_load sys_kexec_load
@@ -306,7 +306,7 @@
296 common mkdirat sys_mkdirat
297 common mknodat sys_mknodat
298 common fchownat sys_fchownat
-299 common futimesat sys_futimesat
+299 common futimesat sys_futimesat_time32
300 common fstatat64 sys_fstatat64
301 common unlinkat sys_unlinkat
302 common renameat sys_renameat
@@ -315,8 +315,8 @@
305 common readlinkat sys_readlinkat
306 common fchmodat sys_fchmodat
307 common faccessat sys_faccessat
-308 common pselect6 sys_pselect6
-309 common ppoll sys_ppoll
+308 common pselect6 sys_pselect6_time32
+309 common ppoll sys_ppoll_time32
310 common unshare sys_unshare
311 common set_robust_list sys_set_robust_list
312 common get_robust_list sys_get_robust_list
@@ -327,13 +327,13 @@
317 common move_pages sys_move_pages
318 common getcpu sys_getcpu
319 common epoll_pwait sys_epoll_pwait
-320 common utimensat sys_utimensat
+320 common utimensat sys_utimensat_time32
321 common signalfd sys_signalfd
322 common timerfd_create sys_timerfd_create
323 common eventfd sys_eventfd
324 common fallocate sys_fallocate
-325 common timerfd_settime sys_timerfd_settime
-326 common timerfd_gettime sys_timerfd_gettime
+325 common timerfd_settime sys_timerfd_settime32
+326 common timerfd_gettime sys_timerfd_gettime32
327 common signalfd4 sys_signalfd4
328 common eventfd2 sys_eventfd2
329 common epoll_create1 sys_epoll_create1
@@ -364,11 +364,11 @@
354 common getsockopt sys_getsockopt
355 common sendmsg sys_sendmsg
356 common recvmsg sys_recvmsg
-357 common recvmmsg sys_recvmmsg
+357 common recvmmsg sys_recvmmsg_time32
358 common accept4 sys_accept4
359 common name_to_handle_at sys_name_to_handle_at
360 common open_by_handle_at sys_open_by_handle_at
-361 common clock_adjtime sys_clock_adjtime
+361 common clock_adjtime sys_clock_adjtime32
362 common syncfs sys_syncfs
363 common sendmmsg sys_sendmmsg
364 common setns sys_setns
@@ -390,3 +390,39 @@
380 common copy_file_range sys_copy_file_range
381 common preadv2 sys_preadv2
382 common pwritev2 sys_pwritev2
+383 common statx sys_statx
+384 common pkey_mprotect sys_pkey_mprotect
+385 common pkey_alloc sys_pkey_alloc
+386 common pkey_free sys_pkey_free
+387 common rseq sys_rseq
+# room for arch specific syscalls
+393 common semget sys_semget
+394 common semctl sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl
+397 common shmat sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd
+401 common msgrcv sys_msgrcv
+402 common msgctl sys_msgctl
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
diff --git a/arch/sh/kernel/syscalls/syscalltbl.sh b/arch/sh/kernel/syscalls/syscalltbl.sh
index 85d78d9309ad..904b8e6e625d 100644
--- a/arch/sh/kernel/syscalls/syscalltbl.sh
+++ b/arch/sh/kernel/syscalls/syscalltbl.sh
@@ -13,10 +13,10 @@ emit() {
t_entry="$3"
while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
t_nxt=$((t_nxt+1))
done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+ printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
}
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 96e9c54a07f5..bd1a9c544767 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -10,7 +10,7 @@
#include <linux/sys.h>
#include <linux/linkage.h>
-#define __SYSCALL(nr, entry, nargs) .long entry
+#define __SYSCALL(nr, entry) .long entry
.data
ENTRY(sys_call_table)
#include <asm/syscall_table.h>
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index d5dd652fb8cc..40f8f4f73fe8 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -49,6 +49,7 @@ config SPARC
config SPARC32
def_bool !64BIT
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select GENERIC_ATOMIC64
select CLZ_TAB
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 56499ea39fd3..4884315daff4 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -53,7 +53,7 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
* weak key detection code.
*/
ret = des_ekey(tmp, key);
- if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -209,7 +209,7 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 5153798051fb..d6d8413eca83 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -25,7 +25,6 @@
#define KERNEL_DS ((mm_segment_t) { 0 })
#define USER_DS ((mm_segment_t) { -1 })
-#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.current_ds)
#define set_fs(val) ((current->thread.current_ds) = (val))
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 87ae9ffb1521..bf9d330073b2 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -31,7 +31,6 @@
#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
-#define get_ds() (KERNEL_DS)
#define segment_eq(a, b) ((a).seg == (b).seg)
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 5194d86ef72d..1e66278ba4a5 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -30,8 +30,8 @@
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_TIME32
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
@@ -43,8 +43,8 @@
#ifdef __32bit_syscall_numbers__
#define __ARCH_WANT_SYS_IPC
#else
-#define __ARCH_WANT_COMPAT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME32
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
@@ -59,9 +59,4 @@
#define __IGNORE_getresgid
#endif
-/* Sparc doesn't have protection keys. */
-#define __IGNORE_pkey_mprotect
-#define __IGNORE_pkey_alloc
-#define __IGNORE_pkey_free
-
#endif /* _SPARC_UNISTD_H */
diff --git a/arch/sparc/include/uapi/asm/posix_types.h b/arch/sparc/include/uapi/asm/posix_types.h
index fec499d6efb7..f139e0048628 100644
--- a/arch/sparc/include/uapi/asm/posix_types.h
+++ b/arch/sparc/include/uapi/asm/posix_types.h
@@ -19,6 +19,16 @@ typedef unsigned short __kernel_old_gid_t;
typedef int __kernel_suseconds_t;
#define __kernel_suseconds_t __kernel_suseconds_t
+typedef long __kernel_long_t;
+typedef unsigned long __kernel_ulong_t;
+#define __kernel_long_t __kernel_long_t
+
+struct __kernel_old_timeval {
+ __kernel_long_t tv_sec;
+ __kernel_suseconds_t tv_usec;
+};
+#define __kernel_old_timeval __kernel_old_timeval
+
#else
/* sparc 32 bit */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 7ea35e5601b6..88fe4f978aca 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -3,6 +3,7 @@
#define _ASM_SOCKET_H
#include <asm/sockios.h>
+#include <asm/bitsperlong.h>
/* For setsockopt(2) */
#define SOL_SOCKET 0xffff
@@ -20,8 +21,8 @@
#define SO_BSDCOMPAT 0x0400
#define SO_RCVLOWAT 0x0800
#define SO_SNDLOWAT 0x1000
-#define SO_RCVTIMEO 0x2000
-#define SO_SNDTIMEO 0x4000
+#define SO_RCVTIMEO_OLD 0x2000
+#define SO_SNDTIMEO_OLD 0x4000
#define SO_ACCEPTCONN 0x8000
#define SO_SNDBUF 0x1001
@@ -33,7 +34,6 @@
#define SO_PROTOCOL 0x1028
#define SO_DOMAIN 0x1029
-
/* Linux specific, keep the same. */
#define SO_NO_CHECK 0x000b
#define SO_PRIORITY 0x000c
@@ -45,19 +45,12 @@
#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 0x001c
-#define SO_TIMESTAMP 0x001d
-#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_PEERSEC 0x001e
#define SO_PASSSEC 0x001f
-#define SO_TIMESTAMPNS 0x0021
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
#define SO_MARK 0x0022
-#define SO_TIMESTAMPING 0x0023
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
#define SO_RXQ_OVFL 0x0024
#define SO_WIFI_STATUS 0x0025
@@ -104,9 +97,47 @@
#define SO_TXTIME 0x003f
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 0x0041
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
#define SO_SECURITY_ENCRYPTION_NETWORK 0x5004
+#define SO_TIMESTAMP_OLD 0x001d
+#define SO_TIMESTAMPNS_OLD 0x0021
+#define SO_TIMESTAMPING_OLD 0x0023
+
+#define SO_TIMESTAMP_NEW 0x0046
+#define SO_TIMESTAMPNS_NEW 0x0042
+#define SO_TIMESTAMPING_NEW 0x0043
+
+#define SO_RCVTIMEO_NEW 0x0044
+#define SO_SNDTIMEO_NEW 0x0045
+
+#if !defined(__KERNEL__)
+
+
+#if __BITS_PER_LONG == 64
+#define SO_TIMESTAMP SO_TIMESTAMP_OLD
+#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD
+#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD
+
+#define SO_RCVTIMEO SO_RCVTIMEO_OLD
+#define SO_SNDTIMEO SO_SNDTIMEO_OLD
+#else
+#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW)
+#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW)
+#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW)
+
+#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW)
+#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW)
+#endif
+
+#define SCM_TIMESTAMP SO_TIMESTAMP
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#endif
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index be71ae086622..0ca08d455e80 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/of_device.h>
+#include <linux/numa.h>
#include <asm/prom.h>
#include <asm/irq.h>
@@ -416,7 +417,7 @@ static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
struct device_node *dp = op->dev.of_node;
int err;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 12;
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 934b97c72f7c..421aba00e6b0 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
+#include <linux/numa.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -1347,7 +1348,7 @@ static int schizo_pbm_init(struct pci_pbm_info *pbm,
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index 81aa91e5c0e6..e90bcb6bad7f 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/numa.h>
#include <asm/upa.h>
@@ -454,7 +455,7 @@ void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct platform_device *op
struct device_node *dp = op->dev.of_node;
pbm->name = dp->full_name;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0);
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 41c5deb581b8..32141e1006c4 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/numa.h>
#include <asm/page.h>
#include <asm/io.h>
@@ -561,7 +562,7 @@ static void __init sbus_iommu_init(struct platform_device *op)
op->dev.archdata.iommu = iommu;
op->dev.archdata.stc = strbuf;
- op->dev.archdata.numa_node = -1;
+ op->dev.archdata.numa_node = NUMA_NO_NODE;
reg_base = regs + SYSIO_IOMMUREG_BASE;
iommu->iommu_control = reg_base + IOMMU_CONTROL;
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 274ed0b9b3e0..9825ca6a6020 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -28,8 +28,9 @@
#include <linux/random.h>
#include <linux/export.h>
#include <linux/context_tracking.h>
-
+#include <linux/timex.h>
#include <linux/uaccess.h>
+
#include <asm/utrap.h>
#include <asm/unistd.h>
@@ -344,7 +345,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
goto out;
case SEMTIMEDOP:
err = sys_semtimedop(first, ptr, (unsigned int)second,
- (const struct timespec __user *)
+ (const struct __kernel_timespec __user *)
(unsigned long) fifth);
goto out;
case SEMGET:
@@ -544,6 +545,62 @@ out_unlock:
return err;
}
+SYSCALL_DEFINE1(sparc_adjtimex, struct timex __user *, txc_p)
+{
+ struct timex txc; /* Local copy of parameter */
+ struct __kernel_timex *kt = (void *)&txc;
+ int ret;
+
+ /* Copy the user data space into the kernel copy
+ * structure. But bear in mind that the structures
+ * may change
+ */
+ if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
+ return -EFAULT;
+
+ /*
+ * override for sparc64 specific timeval type: tv_usec
+ * is 32 bit wide instead of 64-bit in __kernel_timex
+ */
+ kt->time.tv_usec = txc.time.tv_usec;
+ ret = do_adjtimex(kt);
+ txc.time.tv_usec = kt->time.tv_usec;
+
+ return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
+}
+
+SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,struct timex __user *, txc_p)
+{
+ struct timex txc; /* Local copy of parameter */
+ struct __kernel_timex *kt = (void *)&txc;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
+ pr_err_once("process %d (%s) attempted a POSIX timer syscall "
+ "while CONFIG_POSIX_TIMERS is not set\n",
+ current->pid, current->comm);
+
+ return -ENOSYS;
+ }
+
+ /* Copy the user data space into the kernel copy
+ * structure. But bear in mind that the structures
+ * may change
+ */
+ if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
+ return -EFAULT;
+
+ /*
+ * override for sparc64 specific timeval type: tv_usec
+ * is 32 bit wide instead of 64-bit in __kernel_timex
+ */
+ kt->time.tv_usec = txc.time.tv_usec;
+ ret = do_clock_adjtime(which_clock, kt);
+ txc.time.tv_usec = kt->time.tv_usec;
+
+ return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
+}
+
SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
utrap_handler_t, new_p, utrap_handler_t, new_d,
utrap_handler_t __user *, old_p,
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index c8c77c05ea97..b9a5a04b2d2c 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -44,7 +44,8 @@
28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
29 32 pause sys_pause
29 64 pause sys_nis_syscall
-30 common utime sys_utime compat_sys_utime
+30 32 utime sys_utime32
+30 64 utime sys_utime
31 32 lchown32 sys_lchown
32 32 fchown32 sys_fchown
33 common access sys_access
@@ -128,7 +129,8 @@
102 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
103 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
104 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-105 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+105 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+105 64 rt_sigtimedwait sys_rt_sigtimedwait
106 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
107 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
108 32 setresuid32 sys_setresuid
@@ -168,11 +170,13 @@
135 common socketpair sys_socketpair
136 common mkdir sys_mkdir
137 common rmdir sys_rmdir
-138 common utimes sys_utimes compat_sys_utimes
+138 32 utimes sys_utimes_time32
+138 64 utimes sys_utimes
139 common stat64 sys_stat64 compat_sys_stat64
140 common sendfile64 sys_sendfile64
141 common getpeername sys_getpeername
-142 common futex sys_futex compat_sys_futex
+142 32 futex sys_futex_time32
+142 64 futex sys_futex
143 common gettid sys_gettid
144 common getrlimit sys_getrlimit compat_sys_getrlimit
145 common setrlimit sys_setrlimit compat_sys_setrlimit
@@ -258,7 +262,8 @@
216 64 sigreturn sys_nis_syscall
217 common clone sys_clone
218 common ioprio_get sys_ioprio_get
-219 common adjtimex sys_adjtimex compat_sys_adjtimex
+219 32 adjtimex sys_adjtimex_time32
+219 64 adjtimex sys_sparc_adjtimex
220 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
220 64 sigprocmask sys_nis_syscall
221 common create_module sys_ni_syscall
@@ -271,9 +276,10 @@
228 common setfsuid sys_setfsuid16
229 common setfsgid sys_setfsgid16
230 common _newselect sys_select compat_sys_select
-231 32 time sys_time compat_sys_time
+231 32 time sys_time32
232 common splice sys_splice
-233 common stime sys_stime compat_sys_stime
+233 32 stime sys_stime32
+233 64 stime sys_stime
234 common statfs64 sys_statfs64 compat_sys_statfs64
235 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
236 common _llseek sys_llseek
@@ -288,8 +294,10 @@
245 common sched_yield sys_sched_yield
246 common sched_get_priority_max sys_sched_get_priority_max
247 common sched_get_priority_min sys_sched_get_priority_min
-248 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
-249 common nanosleep sys_nanosleep compat_sys_nanosleep
+248 32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+248 64 sched_rr_get_interval sys_sched_rr_get_interval
+249 32 nanosleep sys_nanosleep_time32
+249 64 nanosleep sys_nanosleep
250 32 mremap sys_mremap
250 64 mremap sys_64_mremap
251 common _sysctl sys_sysctl compat_sys_sysctl
@@ -298,14 +306,20 @@
254 32 nfsservctl sys_ni_syscall sys_nis_syscall
254 64 nfsservctl sys_nis_syscall
255 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
-256 common clock_settime sys_clock_settime compat_sys_clock_settime
-257 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
-258 common clock_getres sys_clock_getres compat_sys_clock_getres
-259 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+256 32 clock_settime sys_clock_settime32
+256 64 clock_settime sys_clock_settime
+257 32 clock_gettime sys_clock_gettime32
+257 64 clock_gettime sys_clock_gettime
+258 32 clock_getres sys_clock_getres_time32
+258 64 clock_getres sys_clock_getres
+259 32 clock_nanosleep sys_clock_nanosleep_time32
+259 64 clock_nanosleep sys_clock_nanosleep
260 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
261 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
-262 common timer_settime sys_timer_settime compat_sys_timer_settime
-263 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+262 32 timer_settime sys_timer_settime32
+262 64 timer_settime sys_timer_settime
+263 32 timer_gettime sys_timer_gettime32
+263 64 timer_gettime sys_timer_gettime
264 common timer_getoverrun sys_timer_getoverrun
265 common timer_delete sys_timer_delete
266 common timer_create sys_timer_create compat_sys_timer_create
@@ -315,11 +329,14 @@
269 common io_destroy sys_io_destroy
270 common io_submit sys_io_submit compat_sys_io_submit
271 common io_cancel sys_io_cancel
-272 common io_getevents sys_io_getevents compat_sys_io_getevents
+272 32 io_getevents sys_io_getevents_time32
+272 64 io_getevents sys_io_getevents
273 common mq_open sys_mq_open compat_sys_mq_open
274 common mq_unlink sys_mq_unlink
-275 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
-276 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+275 32 mq_timedsend sys_mq_timedsend_time32
+275 64 mq_timedsend sys_mq_timedsend
+276 32 mq_timedreceive sys_mq_timedreceive_time32
+276 64 mq_timedreceive sys_mq_timedreceive
277 common mq_notify sys_mq_notify compat_sys_mq_notify
278 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
279 common waitid sys_waitid compat_sys_waitid
@@ -331,7 +348,8 @@
285 common mkdirat sys_mkdirat
286 common mknodat sys_mknodat
287 common fchownat sys_fchownat
-288 common futimesat sys_futimesat compat_sys_futimesat
+288 32 futimesat sys_futimesat_time32
+288 64 futimesat sys_futimesat
289 common fstatat64 sys_fstatat64 compat_sys_fstatat64
290 common unlinkat sys_unlinkat
291 common renameat sys_renameat
@@ -340,8 +358,10 @@
294 common readlinkat sys_readlinkat
295 common fchmodat sys_fchmodat
296 common faccessat sys_faccessat
-297 common pselect6 sys_pselect6 compat_sys_pselect6
-298 common ppoll sys_ppoll compat_sys_ppoll
+297 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+297 64 pselect6 sys_pselect6
+298 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+298 64 ppoll sys_ppoll
299 common unshare sys_unshare
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
@@ -353,13 +373,16 @@
307 common move_pages sys_move_pages compat_sys_move_pages
308 common getcpu sys_getcpu
309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
-310 common utimensat sys_utimensat compat_sys_utimensat
+310 32 utimensat sys_utimensat_time32
+310 64 utimensat sys_utimensat
311 common signalfd sys_signalfd compat_sys_signalfd
312 common timerfd_create sys_timerfd_create
313 common eventfd sys_eventfd
314 common fallocate sys_fallocate compat_sys_fallocate
-315 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
-316 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+315 32 timerfd_settime sys_timerfd_settime32
+315 64 timerfd_settime sys_timerfd_settime
+316 32 timerfd_gettime sys_timerfd_gettime32
+316 64 timerfd_gettime sys_timerfd_gettime
317 common signalfd4 sys_signalfd4 compat_sys_signalfd4
318 common eventfd2 sys_eventfd2
319 common epoll_create1 sys_epoll_create1
@@ -371,13 +394,15 @@
325 common pwritev sys_pwritev compat_sys_pwritev
326 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
327 common perf_event_open sys_perf_event_open
-328 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+328 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+328 64 recvmmsg sys_recvmmsg
329 common fanotify_init sys_fanotify_init
330 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
331 common prlimit64 sys_prlimit64
332 common name_to_handle_at sys_name_to_handle_at
333 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-334 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+334 32 clock_adjtime sys_clock_adjtime32
+334 64 clock_adjtime sys_sparc_clock_adjtime
335 common syncfs sys_syncfs
336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
337 common setns sys_setns
@@ -406,4 +431,41 @@
358 common preadv2 sys_preadv2 compat_sys_preadv2
359 common pwritev2 sys_pwritev2 compat_sys_pwritev2
360 common statx sys_statx
-361 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+361 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+361 64 io_pgetevents sys_io_pgetevents
+362 common pkey_mprotect sys_pkey_mprotect
+363 common pkey_alloc sys_pkey_alloc
+364 common pkey_free sys_pkey_free
+365 common rseq sys_rseq
+# room for arch specific syscalls
+392 64 semtimedop sys_semtimedop
+393 common semget sys_semget
+394 common semctl sys_semctl compat_sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl compat_sys_shmctl
+397 common shmat sys_shmat compat_sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd compat_sys_msgsnd
+401 common msgrcv sys_msgrcv compat_sys_msgrcv
+402 common msgctl sys_msgctl compat_sys_msgctl
+403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 32 clock_settime64 sys_clock_settime sys_clock_settime
+405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 32 timer_settime64 sys_timer_settime sys_timer_settime
+410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 32 utimensat_time64 sys_utimensat sys_utimensat
+413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 sys_futex sys_futex
+423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index b4221d3727d0..9e6bd868ba6f 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -976,13 +976,13 @@ static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
{
int prev_nid, new_nid;
- prev_nid = -1;
+ prev_nid = NUMA_NO_NODE;
for ( ; start < end; start += PAGE_SIZE) {
for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
struct node_mem_mask *p = &node_masks[new_nid];
if ((start & p->mask) == p->match) {
- if (prev_nid == -1)
+ if (prev_nid == NUMA_NO_NODE)
prev_nid = new_nid;
break;
}
@@ -1208,7 +1208,7 @@ int of_node_to_nid(struct device_node *dp)
md = mdesc_grab();
count = 0;
- nid = -1;
+ nid = NUMA_NO_NODE;
mdesc_for_each_node_by_name(md, grp, "group") {
if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
nid = count;
diff --git a/arch/um/include/asm/a.out-core.h b/arch/um/include/asm/a.out-core.h
deleted file mode 100644
index 995643b18309..000000000000
--- a/arch/um/include/asm/a.out-core.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef __UM_A_OUT_CORE_H
-#define __UM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
-{
-}
-
-#endif /* __KERNEL__ */
-#endif /* __UM_A_OUT_CORE_H */
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index c3a41bfe161b..a7f1ae58d211 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config UNICORE32
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1372553dc0a9..1d1544b6ca74 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += preempt.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h
+generic-y += shmparam.h
generic-y += sizes.h
generic-y += syscalls.h
generic-y += topology.h
diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h
index 1e8fe5941b8a..54a7378a70b1 100644
--- a/arch/unicore32/include/uapi/asm/unistd.h
+++ b/arch/unicore32/include/uapi/asm/unistd.h
@@ -12,8 +12,10 @@
*/
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_TIME32_SYSCALLS
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
-#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6185d4f33296..90b562a34d65 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -14,7 +14,6 @@ config X86_32
select ARCH_WANT_IPC_PARSE_VERSION
select CLKSRC_I8253
select CLONE_BACKWARDS
- select HAVE_AOUT
select HAVE_GENERIC_DMA_COHERENT
select MODULES_USE_ELF_REL
select OLD_SIGACTION
@@ -47,6 +46,7 @@ config X86
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ANON_INODES
+ select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_DATA
select ARCH_CLOCKSOURCE_INIT
select ARCH_DISCARD_MEMBLOCK
@@ -198,7 +198,7 @@ config X86
select IRQ_FORCED_THREADING
select NEED_SG_DMA_LENGTH
select PCI_DOMAINS if PCI
- select PCI_LOCKLESS_CONFIG
+ select PCI_LOCKLESS_CONFIG if PCI
select PERF_EVENTS
select RTC_LIB
select RTC_MC146818_LIB
@@ -446,12 +446,12 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
-config RESCTRL
- bool "Resource Control support"
+config X86_CPU_RESCTRL
+ bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS
help
- Enable Resource Control support.
+ Enable x86 CPU resource control support.
Provide support for the allocation and monitoring of system resources
usage by the CPU.
@@ -617,7 +617,7 @@ config X86_INTEL_QUARK
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
- depends on X86 && ACPI
+ depends on X86 && ACPI && PCI
select COMMON_CLK
select PINCTRL
select IOSF_MBI
@@ -1510,6 +1510,7 @@ config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
select DYNAMIC_PHYSICAL_MASK
+ select ARCH_USE_MEMREMAP_PROT
---help---
Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory
@@ -1529,10 +1530,6 @@ config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
If set to N, then the encryption of system memory can be
activated with the mem_encrypt=on command line option.
-config ARCH_USE_MEMREMAP_PROT
- def_bool y
- depends on AMD_MEM_ENCRYPT
-
# Common NUMA Features
config NUMA
bool "Numa Memory Allocation and Scheduler Support"
@@ -2843,6 +2840,7 @@ config IA32_EMULATION
config IA32_AOUT
tristate "IA32 a.out support"
depends on IA32_EMULATION
+ depends on BROKEN
---help---
Support old a.out binaries in the 32bit emulation.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0723dff17e6c..15d0fbe27872 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -40,16 +40,6 @@ config EARLY_PRINTK_DBGP
with klogd/syslogd or the X server. You should normally say N here,
unless you want to debug such a crash. You need usb debug device.
-config EARLY_PRINTK_EFI
- bool "Early printk via the EFI framebuffer"
- depends on EFI && EARLY_PRINTK
- select FONT_SUPPORT
- ---help---
- Write kernel log output directly into the EFI framebuffer.
-
- This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized.
-
config EARLY_PRINTK_USB_XDBC
bool "Early printk via the xHCI debug port"
depends on EARLY_PRINTK && PCI
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 64037895b085..f62e347862cc 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,16 @@ ENTRY(trampoline_32bit_src)
leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
movl %eax, %cr3
3:
+ /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
+ pushl %ecx
+ pushl %edx
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+ popl %edx
+ popl %ecx
+
/* Enable PAE and LA57 (if required) paging modes */
movl $X86_CR4_PAE, %eax
cmpl $0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f75638f6e6..6ff7e81b5628 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
-#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
+#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 2a356b948720..3ea71b871813 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
}
static void crypto_aegis128_aesni_process_crypt(
- struct aegis_state *state, struct aead_request *req,
+ struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
- struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize, base;
-
- ops->skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops->crypt_blocks(state, chunksize, src, dst);
-
- base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
- src += base;
- dst += base;
- chunksize &= AEGIS128_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops->crypt_tail(state, chunksize, src, dst);
+ while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
+ ops->crypt_blocks(state,
+ round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
+ walk->src.virt.addr, walk->dst.virt.addr);
+ skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+ walk->dst.virt.addr);
+ skcipher_walk_done(walk, 0);
}
}
@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
+ struct skcipher_walk walk;
struct aegis_state state;
+ ops->skcipher_walk_init(&walk, req, true);
+
kernel_fpu_begin();
crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
- crypto_aegis128_aesni_process_crypt(&state, req, ops);
+ crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index dbe8bb980da1..1b1b39c66c5e 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
}
static void crypto_aegis128l_aesni_process_crypt(
- struct aegis_state *state, struct aead_request *req,
+ struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
- struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize, base;
-
- ops->skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops->crypt_blocks(state, chunksize, src, dst);
-
- base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
- src += base;
- dst += base;
- chunksize &= AEGIS128L_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops->crypt_tail(state, chunksize, src, dst);
+ while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
+ ops->crypt_blocks(state, round_down(walk->nbytes,
+ AEGIS128L_BLOCK_SIZE),
+ walk->src.virt.addr, walk->dst.virt.addr);
+ skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+ walk->dst.virt.addr);
+ skcipher_walk_done(walk, 0);
}
}
@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
+ struct skcipher_walk walk;
struct aegis_state state;
+ ops->skcipher_walk_init(&walk, req, true);
+
kernel_fpu_begin();
crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
- crypto_aegis128l_aesni_process_crypt(&state, req, ops);
+ crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 8bebda2de92f..6227ca3220a0 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
}
static void crypto_aegis256_aesni_process_crypt(
- struct aegis_state *state, struct aead_request *req,
+ struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
- struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize, base;
-
- ops->skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops->crypt_blocks(state, chunksize, src, dst);
-
- base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
- src += base;
- dst += base;
- chunksize &= AEGIS256_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops->crypt_tail(state, chunksize, src, dst);
+ while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
+ ops->crypt_blocks(state,
+ round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
+ walk->src.virt.addr, walk->dst.virt.addr);
+ skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+ walk->dst.virt.addr);
+ skcipher_walk_done(walk, 0);
}
}
@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
+ struct skcipher_walk walk;
struct aegis_state state;
+ ops->skcipher_walk_init(&walk, req, true);
+
kernel_fpu_begin();
crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
- crypto_aegis256_aesni_process_crypt(&state, req, ops);
+ crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1321700d6647..1e3d2102033a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -175,26 +175,18 @@ asmlinkage void aesni_gcm_finalize(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
-static struct aesni_gcm_tfm_s {
-void (*init)(void *ctx,
- struct gcm_context_data *gdata,
- u8 *iv,
- u8 *hash_subkey, const u8 *aad,
- unsigned long aad_len);
-void (*enc_update)(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in,
- unsigned long plaintext_len);
-void (*dec_update)(void *ctx,
- struct gcm_context_data *gdata, u8 *out,
- const u8 *in,
- unsigned long ciphertext_len);
-void (*finalize)(void *ctx,
- struct gcm_context_data *gdata,
- u8 *auth_tag, unsigned long auth_tag_len);
+static const struct aesni_gcm_tfm_s {
+ void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
+ void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
+ const u8 *in, unsigned long plaintext_len);
+ void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
+ const u8 *in, unsigned long ciphertext_len);
+ void (*finalize)(void *ctx, struct gcm_context_data *gdata,
+ u8 *auth_tag, unsigned long auth_tag_len);
} *aesni_gcm_tfm;
-struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
.init = &aesni_gcm_init,
.enc_update = &aesni_gcm_enc_update,
.dec_update = &aesni_gcm_dec_update,
@@ -243,7 +235,7 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
.init = &aesni_gcm_init_avx_gen2,
.enc_update = &aesni_gcm_enc_update_avx_gen2,
.dec_update = &aesni_gcm_dec_update_avx_gen2,
@@ -288,7 +280,7 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
+static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
.init = &aesni_gcm_init_avx_gen4,
.enc_update = &aesni_gcm_enc_update_avx_gen4,
.dec_update = &aesni_gcm_dec_update_avx_gen4,
@@ -778,7 +770,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
+ const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
struct gcm_context_data data AESNI_ALIGN_ATTR;
struct scatter_walk dst_sg_walk = {};
unsigned long left = req->cryptlen;
@@ -821,11 +813,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
}
- src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
- scatterwalk_start(&src_sg_walk, src_sg);
- if (req->src != req->dst) {
- dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
- scatterwalk_start(&dst_sg_walk, dst_sg);
+ if (left) {
+ src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
+ scatterwalk_start(&src_sg_walk, src_sg);
+ if (req->src != req->dst) {
+ dst_sg = scatterwalk_ffwd(dst_start, req->dst,
+ req->assoclen);
+ scatterwalk_start(&dst_sg_walk, dst_sg);
+ }
}
kernel_fpu_begin();
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
index de04d3e98d8d..3d873e67749d 100644
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S
@@ -43,609 +43,291 @@
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-########################################################################
-# Function API:
-# UINT16 crc_t10dif_pcl(
-# UINT16 init_crc, //initial CRC value, 16 bits
-# const unsigned char *buf, //buffer pointer to calculate CRC on
-# UINT64 len //buffer length in bytes (64-bit data)
-# );
#
# Reference paper titled "Fast CRC Computation for Generic
# Polynomials Using PCLMULQDQ Instruction"
# URL: http://www.intel.com/content/dam/www/public/us/en/documents
# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
#
-#
#include <linux/linkage.h>
.text
-#define arg1 %rdi
-#define arg2 %rsi
-#define arg3 %rdx
-
-#define arg1_low32 %edi
+#define init_crc %edi
+#define buf %rsi
+#define len %rdx
+
+#define FOLD_CONSTS %xmm10
+#define BSWAP_MASK %xmm11
+
+# Fold reg1, reg2 into the next 32 data bytes, storing the result back into
+# reg1, reg2.
+.macro fold_32_bytes offset, reg1, reg2
+ movdqu \offset(buf), %xmm9
+ movdqu \offset+16(buf), %xmm12
+ pshufb BSWAP_MASK, %xmm9
+ pshufb BSWAP_MASK, %xmm12
+ movdqa \reg1, %xmm8
+ movdqa \reg2, %xmm13
+ pclmulqdq $0x00, FOLD_CONSTS, \reg1
+ pclmulqdq $0x11, FOLD_CONSTS, %xmm8
+ pclmulqdq $0x00, FOLD_CONSTS, \reg2
+ pclmulqdq $0x11, FOLD_CONSTS, %xmm13
+ pxor %xmm9 , \reg1
+ xorps %xmm8 , \reg1
+ pxor %xmm12, \reg2
+ xorps %xmm13, \reg2
+.endm
+
+# Fold src_reg into dst_reg.
+.macro fold_16_bytes src_reg, dst_reg
+ movdqa \src_reg, %xmm8
+ pclmulqdq $0x11, FOLD_CONSTS, \src_reg
+ pclmulqdq $0x00, FOLD_CONSTS, %xmm8
+ pxor %xmm8, \dst_reg
+ xorps \src_reg, \dst_reg
+.endm
-ENTRY(crc_t10dif_pcl)
+#
+# u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len);
+#
+# Assumes len >= 16.
+#
.align 16
+ENTRY(crc_t10dif_pcl)
- # adjust the 16-bit initial_crc value, scale it to 32 bits
- shl $16, arg1_low32
-
- # Allocate Stack Space
- mov %rsp, %rcx
- sub $16*2, %rsp
- # align stack to 16 byte boundary
- and $~(0x10 - 1), %rsp
-
- # check if smaller than 256
- cmp $256, arg3
-
- # for sizes less than 128, we can't fold 64B at a time...
- jl _less_than_128
-
-
- # load the initial crc value
- movd arg1_low32, %xmm10 # initial crc
-
- # crc value does not need to be byte-reflected, but it needs
- # to be moved to the high part of the register.
- # because data will be byte-reflected and will align with
- # initial crc at correct place.
- pslldq $12, %xmm10
-
- movdqa SHUF_MASK(%rip), %xmm11
- # receive the initial 64B data, xor the initial crc value
- movdqu 16*0(arg2), %xmm0
- movdqu 16*1(arg2), %xmm1
- movdqu 16*2(arg2), %xmm2
- movdqu 16*3(arg2), %xmm3
- movdqu 16*4(arg2), %xmm4
- movdqu 16*5(arg2), %xmm5
- movdqu 16*6(arg2), %xmm6
- movdqu 16*7(arg2), %xmm7
-
- pshufb %xmm11, %xmm0
- # XOR the initial_crc value
- pxor %xmm10, %xmm0
- pshufb %xmm11, %xmm1
- pshufb %xmm11, %xmm2
- pshufb %xmm11, %xmm3
- pshufb %xmm11, %xmm4
- pshufb %xmm11, %xmm5
- pshufb %xmm11, %xmm6
- pshufb %xmm11, %xmm7
-
- movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4
- #imm value of pclmulqdq instruction
- #will determine which constant to use
-
- #################################################################
- # we subtract 256 instead of 128 to save one instruction from the loop
- sub $256, arg3
-
- # at this section of the code, there is 64*x+y (0<=y<64) bytes of
- # buffer. The _fold_64_B_loop will fold 64B at a time
- # until we have 64+y Bytes of buffer
-
-
- # fold 64B at a time. This section of the code folds 4 xmm
- # registers in parallel
-_fold_64_B_loop:
-
- # update the buffer pointer
- add $128, arg2 # buf += 64#
-
- movdqu 16*0(arg2), %xmm9
- movdqu 16*1(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm0, %xmm8
- movdqa %xmm1, %xmm13
- pclmulqdq $0x0 , %xmm10, %xmm0
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0 , %xmm10, %xmm1
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm0
- xorps %xmm8 , %xmm0
- pxor %xmm12, %xmm1
- xorps %xmm13, %xmm1
-
- movdqu 16*2(arg2), %xmm9
- movdqu 16*3(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm2, %xmm8
- movdqa %xmm3, %xmm13
- pclmulqdq $0x0, %xmm10, %xmm2
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0, %xmm10, %xmm3
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm2
- xorps %xmm8 , %xmm2
- pxor %xmm12, %xmm3
- xorps %xmm13, %xmm3
-
- movdqu 16*4(arg2), %xmm9
- movdqu 16*5(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm4, %xmm8
- movdqa %xmm5, %xmm13
- pclmulqdq $0x0, %xmm10, %xmm4
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0, %xmm10, %xmm5
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm4
- xorps %xmm8 , %xmm4
- pxor %xmm12, %xmm5
- xorps %xmm13, %xmm5
-
- movdqu 16*6(arg2), %xmm9
- movdqu 16*7(arg2), %xmm12
- pshufb %xmm11, %xmm9
- pshufb %xmm11, %xmm12
- movdqa %xmm6 , %xmm8
- movdqa %xmm7 , %xmm13
- pclmulqdq $0x0 , %xmm10, %xmm6
- pclmulqdq $0x11, %xmm10, %xmm8
- pclmulqdq $0x0 , %xmm10, %xmm7
- pclmulqdq $0x11, %xmm10, %xmm13
- pxor %xmm9 , %xmm6
- xorps %xmm8 , %xmm6
- pxor %xmm12, %xmm7
- xorps %xmm13, %xmm7
-
- sub $128, arg3
-
- # check if there is another 64B in the buffer to be able to fold
- jge _fold_64_B_loop
- ##################################################################
-
-
- add $128, arg2
- # at this point, the buffer pointer is pointing at the last y Bytes
- # of the buffer the 64B of folded data is in 4 of the xmm
- # registers: xmm0, xmm1, xmm2, xmm3
-
-
- # fold the 8 xmm registers to 1 xmm register with different constants
-
- movdqa rk9(%rip), %xmm10
- movdqa %xmm0, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm0
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm0, %xmm7
-
- movdqa rk11(%rip), %xmm10
- movdqa %xmm1, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm1
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm1, %xmm7
-
- movdqa rk13(%rip), %xmm10
- movdqa %xmm2, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm2
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm2, %xmm7
-
- movdqa rk15(%rip), %xmm10
- movdqa %xmm3, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm3
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm3, %xmm7
-
- movdqa rk17(%rip), %xmm10
- movdqa %xmm4, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm4
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm4, %xmm7
-
- movdqa rk19(%rip), %xmm10
- movdqa %xmm5, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm5
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- xorps %xmm5, %xmm7
-
- movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2
- #imm value of pclmulqdq instruction
- #will determine which constant to use
- movdqa %xmm6, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm6
- pclmulqdq $0x0 , %xmm10, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm6, %xmm7
-
-
- # instead of 64, we add 48 to the loop counter to save 1 instruction
- # from the loop instead of a cmp instruction, we use the negative
- # flag with the jl instruction
- add $128-16, arg3
- jl _final_reduction_for_128
-
- # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7
- # and the rest is in memory. We can fold 16 bytes at a time if y>=16
- # continue folding 16B at a time
-
-_16B_reduction_loop:
+ movdqa .Lbswap_mask(%rip), BSWAP_MASK
+
+ # For sizes less than 256 bytes, we can't fold 128 bytes at a time.
+ cmp $256, len
+ jl .Lless_than_256_bytes
+
+ # Load the first 128 data bytes. Byte swapping is necessary to make the
+ # bit order match the polynomial coefficient order.
+ movdqu 16*0(buf), %xmm0
+ movdqu 16*1(buf), %xmm1
+ movdqu 16*2(buf), %xmm2
+ movdqu 16*3(buf), %xmm3
+ movdqu 16*4(buf), %xmm4
+ movdqu 16*5(buf), %xmm5
+ movdqu 16*6(buf), %xmm6
+ movdqu 16*7(buf), %xmm7
+ add $128, buf
+ pshufb BSWAP_MASK, %xmm0
+ pshufb BSWAP_MASK, %xmm1
+ pshufb BSWAP_MASK, %xmm2
+ pshufb BSWAP_MASK, %xmm3
+ pshufb BSWAP_MASK, %xmm4
+ pshufb BSWAP_MASK, %xmm5
+ pshufb BSWAP_MASK, %xmm6
+ pshufb BSWAP_MASK, %xmm7
+
+ # XOR the first 16 data *bits* with the initial CRC value.
+ pxor %xmm8, %xmm8
+ pinsrw $7, init_crc, %xmm8
+ pxor %xmm8, %xmm0
+
+ movdqa .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS
+
+ # Subtract 128 for the 128 data bytes just consumed. Subtract another
+ # 128 to simplify the termination condition of the following loop.
+ sub $256, len
+
+ # While >= 128 data bytes remain (not counting xmm0-7), fold the 128
+ # bytes xmm0-7 into them, storing the result back into xmm0-7.
+.Lfold_128_bytes_loop:
+ fold_32_bytes 0, %xmm0, %xmm1
+ fold_32_bytes 32, %xmm2, %xmm3
+ fold_32_bytes 64, %xmm4, %xmm5
+ fold_32_bytes 96, %xmm6, %xmm7
+ add $128, buf
+ sub $128, len
+ jge .Lfold_128_bytes_loop
+
+ # Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7.
+
+ # Fold across 64 bytes.
+ movdqa .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS
+ fold_16_bytes %xmm0, %xmm4
+ fold_16_bytes %xmm1, %xmm5
+ fold_16_bytes %xmm2, %xmm6
+ fold_16_bytes %xmm3, %xmm7
+ # Fold across 32 bytes.
+ movdqa .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS
+ fold_16_bytes %xmm4, %xmm6
+ fold_16_bytes %xmm5, %xmm7
+ # Fold across 16 bytes.
+ movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
+ fold_16_bytes %xmm6, %xmm7
+
+ # Add 128 to get the correct number of data bytes remaining in 0...127
+ # (not counting xmm7), following the previous extra subtraction by 128.
+ # Then subtract 16 to simplify the termination condition of the
+ # following loop.
+ add $128-16, len
+
+ # While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes
+ # xmm7 into them, storing the result back into xmm7.
+ jl .Lfold_16_bytes_loop_done
+.Lfold_16_bytes_loop:
movdqa %xmm7, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm7
- pclmulqdq $0x0 , %xmm10, %xmm8
+ pclmulqdq $0x11, FOLD_CONSTS, %xmm7
+ pclmulqdq $0x00, FOLD_CONSTS, %xmm8
pxor %xmm8, %xmm7
- movdqu (arg2), %xmm0
- pshufb %xmm11, %xmm0
+ movdqu (buf), %xmm0
+ pshufb BSWAP_MASK, %xmm0
pxor %xmm0 , %xmm7
- add $16, arg2
- sub $16, arg3
- # instead of a cmp instruction, we utilize the flags with the
- # jge instruction equivalent of: cmp arg3, 16-16
- # check if there is any more 16B in the buffer to be able to fold
- jge _16B_reduction_loop
-
- #now we have 16+z bytes left to reduce, where 0<= z < 16.
- #first, we reduce the data in the xmm7 register
-
-
-_final_reduction_for_128:
- # check if any more data to fold. If not, compute the CRC of
- # the final 128 bits
- add $16, arg3
- je _128_done
-
- # here we are getting data that is less than 16 bytes.
- # since we know that there was data before the pointer, we can
- # offset the input pointer before the actual point, to receive
- # exactly 16 bytes. after that the registers need to be adjusted.
-_get_last_two_xmms:
+ add $16, buf
+ sub $16, len
+ jge .Lfold_16_bytes_loop
+
+.Lfold_16_bytes_loop_done:
+ # Add 16 to get the correct number of data bytes remaining in 0...15
+ # (not counting xmm7), following the previous extra subtraction by 16.
+ add $16, len
+ je .Lreduce_final_16_bytes
+
+.Lhandle_partial_segment:
+ # Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16
+ # bytes are in xmm7 and the rest are the remaining data in 'buf'. To do
+ # this without needing a fold constant for each possible 'len', redivide
+ # the bytes into a first chunk of 'len' bytes and a second chunk of 16
+ # bytes, then fold the first chunk into the second.
+
movdqa %xmm7, %xmm2
- movdqu -16(arg2, arg3), %xmm1
- pshufb %xmm11, %xmm1
+ # xmm1 = last 16 original data bytes
+ movdqu -16(buf, len), %xmm1
+ pshufb BSWAP_MASK, %xmm1
- # get rid of the extra data that was loaded before
- # load the shift constant
- lea pshufb_shf_table+16(%rip), %rax
- sub arg3, %rax
+ # xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes.
+ lea .Lbyteshift_table+16(%rip), %rax
+ sub len, %rax
movdqu (%rax), %xmm0
-
- # shift xmm2 to the left by arg3 bytes
pshufb %xmm0, %xmm2
- # shift xmm7 to the right by 16-arg3 bytes
- pxor mask1(%rip), %xmm0
+ # xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes.
+ pxor .Lmask1(%rip), %xmm0
pshufb %xmm0, %xmm7
+
+ # xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes),
+ # then '16-len' bytes from xmm2 (high-order bytes).
pblendvb %xmm2, %xmm1 #xmm0 is implicit
- # fold 16 Bytes
- movdqa %xmm1, %xmm2
+ # Fold the first chunk into the second chunk, storing the result in xmm7.
movdqa %xmm7, %xmm8
- pclmulqdq $0x11, %xmm10, %xmm7
- pclmulqdq $0x0 , %xmm10, %xmm8
+ pclmulqdq $0x11, FOLD_CONSTS, %xmm7
+ pclmulqdq $0x00, FOLD_CONSTS, %xmm8
pxor %xmm8, %xmm7
- pxor %xmm2, %xmm7
+ pxor %xmm1, %xmm7
-_128_done:
- # compute crc of a 128-bit value
- movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10
- movdqa %xmm7, %xmm0
+.Lreduce_final_16_bytes:
+ # Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC
- #64b fold
- pclmulqdq $0x1, %xmm10, %xmm7
- pslldq $8 , %xmm0
- pxor %xmm0, %xmm7
+ # Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
+ movdqa .Lfinal_fold_consts(%rip), FOLD_CONSTS
- #32b fold
+ # Fold the high 64 bits into the low 64 bits, while also multiplying by
+ # x^64. This produces a 128-bit value congruent to x^64 * M(x) and
+ # whose low 48 bits are 0.
movdqa %xmm7, %xmm0
+ pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x))
+ pslldq $8, %xmm0
+ pxor %xmm0, %xmm7 # + low bits * x^64
- pand mask2(%rip), %xmm0
-
- psrldq $12, %xmm7
- pclmulqdq $0x10, %xmm10, %xmm7
- pxor %xmm0, %xmm7
-
- #barrett reduction
-_barrett:
- movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10
+ # Fold the high 32 bits into the low 96 bits. This produces a 96-bit
+ # value congruent to x^64 * M(x) and whose low 48 bits are 0.
movdqa %xmm7, %xmm0
- pclmulqdq $0x01, %xmm10, %xmm7
- pslldq $4, %xmm7
- pclmulqdq $0x11, %xmm10, %xmm7
+ pand .Lmask2(%rip), %xmm0 # zero high 32 bits
+ psrldq $12, %xmm7 # extract high 32 bits
+ pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x))
+ pxor %xmm0, %xmm7 # + low bits
- pslldq $4, %xmm7
- pxor %xmm0, %xmm7
- pextrd $1, %xmm7, %eax
+ # Load G(x) and floor(x^48 / G(x)).
+ movdqa .Lbarrett_reduction_consts(%rip), FOLD_CONSTS
-_cleanup:
- # scale the result back to 16 bits
- shr $16, %eax
- mov %rcx, %rsp
+ # Use Barrett reduction to compute the final CRC value.
+ movdqa %xmm7, %xmm0
+ pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x))
+ psrlq $32, %xmm7 # /= x^32
+ pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # *= G(x)
+ psrlq $48, %xmm0
+ pxor %xmm7, %xmm0 # + low 16 nonzero bits
+ # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
+
+ pextrw $0, %xmm0, %eax
ret
-########################################################################
-
.align 16
-_less_than_128:
-
- # check if there is enough buffer to be able to fold 16B at a time
- cmp $32, arg3
- jl _less_than_32
- movdqa SHUF_MASK(%rip), %xmm11
+.Lless_than_256_bytes:
+ # Checksumming a buffer of length 16...255 bytes
- # now if there is, load the constants
- movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
+ # Load the first 16 data bytes.
+ movdqu (buf), %xmm7
+ pshufb BSWAP_MASK, %xmm7
+ add $16, buf
- movd arg1_low32, %xmm0 # get the initial crc value
- pslldq $12, %xmm0 # align it to its correct place
- movdqu (arg2), %xmm7 # load the plaintext
- pshufb %xmm11, %xmm7 # byte-reflect the plaintext
+ # XOR the first 16 data *bits* with the initial CRC value.
+ pxor %xmm0, %xmm0
+ pinsrw $7, init_crc, %xmm0
pxor %xmm0, %xmm7
-
- # update the buffer pointer
- add $16, arg2
-
- # update the counter. subtract 32 instead of 16 to save one
- # instruction from the loop
- sub $32, arg3
-
- jmp _16B_reduction_loop
-
-
-.align 16
-_less_than_32:
- # mov initial crc to the return value. this is necessary for
- # zero-length buffers.
- mov arg1_low32, %eax
- test arg3, arg3
- je _cleanup
-
- movdqa SHUF_MASK(%rip), %xmm11
-
- movd arg1_low32, %xmm0 # get the initial crc value
- pslldq $12, %xmm0 # align it to its correct place
-
- cmp $16, arg3
- je _exact_16_left
- jl _less_than_16_left
-
- movdqu (arg2), %xmm7 # load the plaintext
- pshufb %xmm11, %xmm7 # byte-reflect the plaintext
- pxor %xmm0 , %xmm7 # xor the initial crc value
- add $16, arg2
- sub $16, arg3
- movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
- jmp _get_last_two_xmms
-
-
-.align 16
-_less_than_16_left:
- # use stack space to load data less than 16 bytes, zero-out
- # the 16B in memory first.
-
- pxor %xmm1, %xmm1
- mov %rsp, %r11
- movdqa %xmm1, (%r11)
-
- cmp $4, arg3
- jl _only_less_than_4
-
- # backup the counter value
- mov arg3, %r9
- cmp $8, arg3
- jl _less_than_8_left
-
- # load 8 Bytes
- mov (arg2), %rax
- mov %rax, (%r11)
- add $8, %r11
- sub $8, arg3
- add $8, arg2
-_less_than_8_left:
-
- cmp $4, arg3
- jl _less_than_4_left
-
- # load 4 Bytes
- mov (arg2), %eax
- mov %eax, (%r11)
- add $4, %r11
- sub $4, arg3
- add $4, arg2
-_less_than_4_left:
-
- cmp $2, arg3
- jl _less_than_2_left
-
- # load 2 Bytes
- mov (arg2), %ax
- mov %ax, (%r11)
- add $2, %r11
- sub $2, arg3
- add $2, arg2
-_less_than_2_left:
- cmp $1, arg3
- jl _zero_left
-
- # load 1 Byte
- mov (arg2), %al
- mov %al, (%r11)
-_zero_left:
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- # shl r9, 4
- lea pshufb_shf_table+16(%rip), %rax
- sub %r9, %rax
- movdqu (%rax), %xmm0
- pxor mask1(%rip), %xmm0
-
- pshufb %xmm0, %xmm7
- jmp _128_done
-
-.align 16
-_exact_16_left:
- movdqu (arg2), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- jmp _128_done
-
-_only_less_than_4:
- cmp $3, arg3
- jl _only_less_than_3
-
- # load 3 Bytes
- mov (arg2), %al
- mov %al, (%r11)
-
- mov 1(arg2), %al
- mov %al, 1(%r11)
-
- mov 2(arg2), %al
- mov %al, 2(%r11)
-
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- psrldq $5, %xmm7
-
- jmp _barrett
-_only_less_than_3:
- cmp $2, arg3
- jl _only_less_than_2
-
- # load 2 Bytes
- mov (arg2), %al
- mov %al, (%r11)
-
- mov 1(arg2), %al
- mov %al, 1(%r11)
-
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- psrldq $6, %xmm7
-
- jmp _barrett
-_only_less_than_2:
-
- # load 1 Byte
- mov (arg2), %al
- mov %al, (%r11)
-
- movdqa (%rsp), %xmm7
- pshufb %xmm11, %xmm7
- pxor %xmm0 , %xmm7 # xor the initial crc value
-
- psrldq $7, %xmm7
-
- jmp _barrett
-
+ movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
+ cmp $16, len
+ je .Lreduce_final_16_bytes # len == 16
+ sub $32, len
+ jge .Lfold_16_bytes_loop # 32 <= len <= 255
+ add $16, len
+ jmp .Lhandle_partial_segment # 17 <= len <= 31
ENDPROC(crc_t10dif_pcl)
.section .rodata, "a", @progbits
.align 16
-# precomputed constants
-# these constants are precomputed from the poly:
-# 0x8bb70000 (0x8bb7 scaled to 32 bits)
-# Q = 0x18BB70000
-# rk1 = 2^(32*3) mod Q << 32
-# rk2 = 2^(32*5) mod Q << 32
-# rk3 = 2^(32*15) mod Q << 32
-# rk4 = 2^(32*17) mod Q << 32
-# rk5 = 2^(32*3) mod Q << 32
-# rk6 = 2^(32*2) mod Q << 32
-# rk7 = floor(2^64/Q)
-# rk8 = Q
-rk1:
-.quad 0x2d56000000000000
-rk2:
-.quad 0x06df000000000000
-rk3:
-.quad 0x9d9d000000000000
-rk4:
-.quad 0x7cf5000000000000
-rk5:
-.quad 0x2d56000000000000
-rk6:
-.quad 0x1368000000000000
-rk7:
-.quad 0x00000001f65a57f8
-rk8:
-.quad 0x000000018bb70000
-
-rk9:
-.quad 0xceae000000000000
-rk10:
-.quad 0xbfd6000000000000
-rk11:
-.quad 0x1e16000000000000
-rk12:
-.quad 0x713c000000000000
-rk13:
-.quad 0xf7f9000000000000
-rk14:
-.quad 0x80a6000000000000
-rk15:
-.quad 0x044c000000000000
-rk16:
-.quad 0xe658000000000000
-rk17:
-.quad 0xad18000000000000
-rk18:
-.quad 0xa497000000000000
-rk19:
-.quad 0x6ee3000000000000
-rk20:
-.quad 0xe7b5000000000000
-
+# Fold constants precomputed from the polynomial 0x18bb7
+# G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+.Lfold_across_128_bytes_consts:
+ .quad 0x0000000000006123 # x^(8*128) mod G(x)
+ .quad 0x0000000000002295 # x^(8*128+64) mod G(x)
+.Lfold_across_64_bytes_consts:
+ .quad 0x0000000000001069 # x^(4*128) mod G(x)
+ .quad 0x000000000000dd31 # x^(4*128+64) mod G(x)
+.Lfold_across_32_bytes_consts:
+ .quad 0x000000000000857d # x^(2*128) mod G(x)
+ .quad 0x0000000000007acc # x^(2*128+64) mod G(x)
+.Lfold_across_16_bytes_consts:
+ .quad 0x000000000000a010 # x^(1*128) mod G(x)
+ .quad 0x0000000000001faa # x^(1*128+64) mod G(x)
+.Lfinal_fold_consts:
+ .quad 0x1368000000000000 # x^48 * (x^48 mod G(x))
+ .quad 0x2d56000000000000 # x^48 * (x^80 mod G(x))
+.Lbarrett_reduction_consts:
+ .quad 0x0000000000018bb7 # G(x)
+ .quad 0x00000001f65a57f8 # floor(x^48 / G(x))
.section .rodata.cst16.mask1, "aM", @progbits, 16
.align 16
-mask1:
-.octa 0x80808080808080808080808080808080
+.Lmask1:
+ .octa 0x80808080808080808080808080808080
.section .rodata.cst16.mask2, "aM", @progbits, 16
.align 16
-mask2:
-.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
+.Lmask2:
+ .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
+
+.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
+.align 16
+.Lbswap_mask:
+ .octa 0x000102030405060708090A0B0C0D0E0F
-.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16
+.section .rodata.cst32.byteshift_table, "aM", @progbits, 32
.align 16
-SHUF_MASK:
-.octa 0x000102030405060708090A0B0C0D0E0F
-
-.section .rodata.cst32.pshufb_shf_table, "aM", @progbits, 32
-.align 32
-pshufb_shf_table:
-# use these values for shift constants for the pshufb instruction
-# different alignments result in values as shown:
-# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
-# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
-# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
-# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
-# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
-# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
-# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
-# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
-# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
-# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
-# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
-# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
-# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
-# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
-# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
-.octa 0x8f8e8d8c8b8a89888786858483828100
-.octa 0x000e0d0c0b0a09080706050403020100
+# For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len]
+# is the index vector to shift left by 'len' bytes, and is also {0x80, ...,
+# 0x80} XOR the index vector to shift right by '16 - len' bytes.
+.Lbyteshift_table:
+ .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
+ .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index cd4df9322501..0e785c0b2354 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -33,18 +33,12 @@
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
-asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
- size_t len);
+asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
struct chksum_desc_ctx {
__u16 crc;
};
-/*
- * Steps through buffer one byte at at time, calculates reflected
- * crc using table.
- */
-
static int chksum_init(struct shash_desc *desc)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
@@ -59,7 +53,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- if (irq_fpu_usable()) {
+ if (length >= 16 && irq_fpu_usable()) {
kernel_fpu_begin();
ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
kernel_fpu_end();
@@ -79,7 +73,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (irq_fpu_usable()) {
+ if (len >= 16 && irq_fpu_usable()) {
kernel_fpu_begin();
*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
kernel_fpu_end();
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
index 0dccdda1eb3a..7e600f8bcdad 100644
--- a/arch/x86/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
struct morus1280_ops ops,
- struct aead_request *req)
+ struct skcipher_walk *walk)
{
- struct skcipher_walk walk;
- u8 *cursor_src, *cursor_dst;
- unsigned int chunksize, base;
-
- ops.skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- cursor_src = walk.src.virt.addr;
- cursor_dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
-
- base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
- cursor_src += base;
- cursor_dst += base;
- chunksize &= MORUS1280_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops.crypt_tail(state, cursor_src, cursor_dst,
- chunksize);
+ while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
+ ops.crypt_blocks(state, walk->src.virt.addr,
+ walk->dst.virt.addr,
+ round_down(walk->nbytes,
+ MORUS1280_BLOCK_SIZE));
+ skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
+ walk->nbytes);
+ skcipher_walk_done(walk, 0);
}
}
@@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
struct morus1280_state state;
+ struct skcipher_walk walk;
+
+ ops.skcipher_walk_init(&walk, req, true);
kernel_fpu_begin();
ctx->ops->init(&state, &ctx->key, req->iv);
crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
- crypto_morus1280_glue_process_crypt(&state, ops, req);
+ crypto_morus1280_glue_process_crypt(&state, ops, &walk);
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
index 7b58fe4d9bd1..cb3a81732016 100644
--- a/arch/x86/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
struct morus640_ops ops,
- struct aead_request *req)
+ struct skcipher_walk *walk)
{
- struct skcipher_walk walk;
- u8 *cursor_src, *cursor_dst;
- unsigned int chunksize, base;
-
- ops.skcipher_walk_init(&walk, req, false);
-
- while (walk.nbytes) {
- cursor_src = walk.src.virt.addr;
- cursor_dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
-
- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
-
- base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
- cursor_src += base;
- cursor_dst += base;
- chunksize &= MORUS640_BLOCK_SIZE - 1;
-
- if (chunksize > 0)
- ops.crypt_tail(state, cursor_src, cursor_dst,
- chunksize);
+ while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
+ ops.crypt_blocks(state, walk->src.virt.addr,
+ walk->dst.virt.addr,
+ round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
+ skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
+ }
- skcipher_walk_done(&walk, 0);
+ if (walk->nbytes) {
+ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
+ walk->nbytes);
+ skcipher_walk_done(walk, 0);
}
}
@@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
struct morus640_state state;
+ struct skcipher_walk walk;
+
+ ops.skcipher_walk_init(&walk, req, true);
kernel_fpu_begin();
ctx->ops->init(&state, &ctx->key, req->iv);
crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
- crypto_morus640_glue_process_crypt(&state, ops, req);
+ crypto_morus640_glue_process_crypt(&state, ops, &walk);
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index c88c670cb5fc..e6add74d78a5 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -272,6 +272,10 @@ ENTRY(poly1305_block_sse2)
dec %rcx
jnz .Ldoblock
+ # Zeroing of key material
+ mov %rcx,0x00(%rsp)
+ mov %rcx,0x08(%rsp)
+
add $0x10,%rsp
pop %r12
pop %rbx
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 8eaf8952c408..39913770a44d 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
/* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
- movq %rsp, %rdi
+ /* In the Xen PV case we already run on the thread stack. */
+ ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 6*8(%rdi) /* regs->ss */
@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
pushq 3*8(%rdi) /* regs->cs */
pushq 2*8(%rdi) /* regs->ip */
pushq 1*8(%rdi) /* regs->orig_ax */
-
pushq (%rdi) /* pt_regs->di */
+.Lint80_keep_stack:
+
pushq %rsi /* pt_regs->si */
xorl %esi, %esi /* nospec si */
pushq %rdx /* pt_regs->dx */
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 3cf7b533b3d1..955ab6a3b61f 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -24,7 +24,7 @@
10 i386 unlink sys_unlink __ia32_sys_unlink
11 i386 execve sys_execve __ia32_compat_sys_execve
12 i386 chdir sys_chdir __ia32_sys_chdir
-13 i386 time sys_time __ia32_compat_sys_time
+13 i386 time sys_time32 __ia32_sys_time32
14 i386 mknod sys_mknod __ia32_sys_mknod
15 i386 chmod sys_chmod __ia32_sys_chmod
16 i386 lchown sys_lchown16 __ia32_sys_lchown16
@@ -36,12 +36,12 @@
22 i386 umount sys_oldumount __ia32_sys_oldumount
23 i386 setuid sys_setuid16 __ia32_sys_setuid16
24 i386 getuid sys_getuid16 __ia32_sys_getuid16
-25 i386 stime sys_stime __ia32_compat_sys_stime
+25 i386 stime sys_stime32 __ia32_sys_stime32
26 i386 ptrace sys_ptrace __ia32_compat_sys_ptrace
27 i386 alarm sys_alarm __ia32_sys_alarm
28 i386 oldfstat sys_fstat __ia32_sys_fstat
29 i386 pause sys_pause __ia32_sys_pause
-30 i386 utime sys_utime __ia32_compat_sys_utime
+30 i386 utime sys_utime32 __ia32_sys_utime32
31 i386 stty
32 i386 gtty
33 i386 access sys_access __ia32_sys_access
@@ -135,7 +135,7 @@
121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname
122 i386 uname sys_newuname __ia32_sys_newuname
123 i386 modify_ldt sys_modify_ldt __ia32_sys_modify_ldt
-124 i386 adjtimex sys_adjtimex __ia32_compat_sys_adjtimex
+124 i386 adjtimex sys_adjtimex_time32 __ia32_sys_adjtimex_time32
125 i386 mprotect sys_mprotect __ia32_sys_mprotect
126 i386 sigprocmask sys_sigprocmask __ia32_compat_sys_sigprocmask
127 i386 create_module
@@ -172,8 +172,8 @@
158 i386 sched_yield sys_sched_yield __ia32_sys_sched_yield
159 i386 sched_get_priority_max sys_sched_get_priority_max __ia32_sys_sched_get_priority_max
160 i386 sched_get_priority_min sys_sched_get_priority_min __ia32_sys_sched_get_priority_min
-161 i386 sched_rr_get_interval sys_sched_rr_get_interval __ia32_compat_sys_sched_rr_get_interval
-162 i386 nanosleep sys_nanosleep __ia32_compat_sys_nanosleep
+161 i386 sched_rr_get_interval sys_sched_rr_get_interval_time32 __ia32_sys_sched_rr_get_interval_time32
+162 i386 nanosleep sys_nanosleep_time32 __ia32_sys_nanosleep_time32
163 i386 mremap sys_mremap __ia32_sys_mremap
164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16
165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16
@@ -188,7 +188,7 @@
174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction
175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_sys_rt_sigprocmask
176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending
-177 i386 rt_sigtimedwait sys_rt_sigtimedwait __ia32_compat_sys_rt_sigtimedwait
+177 i386 rt_sigtimedwait sys_rt_sigtimedwait_time32 __ia32_compat_sys_rt_sigtimedwait_time32
178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo __ia32_compat_sys_rt_sigqueueinfo
179 i386 rt_sigsuspend sys_rt_sigsuspend __ia32_sys_rt_sigsuspend
180 i386 pread64 sys_pread64 __ia32_compat_sys_x86_pread
@@ -251,14 +251,14 @@
237 i386 fremovexattr sys_fremovexattr __ia32_sys_fremovexattr
238 i386 tkill sys_tkill __ia32_sys_tkill
239 i386 sendfile64 sys_sendfile64 __ia32_sys_sendfile64
-240 i386 futex sys_futex __ia32_compat_sys_futex
+240 i386 futex sys_futex_time32 __ia32_sys_futex_time32
241 i386 sched_setaffinity sys_sched_setaffinity __ia32_compat_sys_sched_setaffinity
242 i386 sched_getaffinity sys_sched_getaffinity __ia32_compat_sys_sched_getaffinity
243 i386 set_thread_area sys_set_thread_area __ia32_sys_set_thread_area
244 i386 get_thread_area sys_get_thread_area __ia32_sys_get_thread_area
245 i386 io_setup sys_io_setup __ia32_compat_sys_io_setup
246 i386 io_destroy sys_io_destroy __ia32_sys_io_destroy
-247 i386 io_getevents sys_io_getevents __ia32_compat_sys_io_getevents
+247 i386 io_getevents sys_io_getevents_time32 __ia32_sys_io_getevents_time32
248 i386 io_submit sys_io_submit __ia32_compat_sys_io_submit
249 i386 io_cancel sys_io_cancel __ia32_sys_io_cancel
250 i386 fadvise64 sys_fadvise64 __ia32_compat_sys_x86_fadvise64
@@ -271,18 +271,18 @@
257 i386 remap_file_pages sys_remap_file_pages __ia32_sys_remap_file_pages
258 i386 set_tid_address sys_set_tid_address __ia32_sys_set_tid_address
259 i386 timer_create sys_timer_create __ia32_compat_sys_timer_create
-260 i386 timer_settime sys_timer_settime __ia32_compat_sys_timer_settime
-261 i386 timer_gettime sys_timer_gettime __ia32_compat_sys_timer_gettime
+260 i386 timer_settime sys_timer_settime32 __ia32_sys_timer_settime32
+261 i386 timer_gettime sys_timer_gettime32 __ia32_sys_timer_gettime32
262 i386 timer_getoverrun sys_timer_getoverrun __ia32_sys_timer_getoverrun
263 i386 timer_delete sys_timer_delete __ia32_sys_timer_delete
-264 i386 clock_settime sys_clock_settime __ia32_compat_sys_clock_settime
-265 i386 clock_gettime sys_clock_gettime __ia32_compat_sys_clock_gettime
-266 i386 clock_getres sys_clock_getres __ia32_compat_sys_clock_getres
-267 i386 clock_nanosleep sys_clock_nanosleep __ia32_compat_sys_clock_nanosleep
+264 i386 clock_settime sys_clock_settime32 __ia32_sys_clock_settime32
+265 i386 clock_gettime sys_clock_gettime32 __ia32_sys_clock_gettime32
+266 i386 clock_getres sys_clock_getres_time32 __ia32_sys_clock_getres_time32
+267 i386 clock_nanosleep sys_clock_nanosleep_time32 __ia32_sys_clock_nanosleep_time32
268 i386 statfs64 sys_statfs64 __ia32_compat_sys_statfs64
269 i386 fstatfs64 sys_fstatfs64 __ia32_compat_sys_fstatfs64
270 i386 tgkill sys_tgkill __ia32_sys_tgkill
-271 i386 utimes sys_utimes __ia32_compat_sys_utimes
+271 i386 utimes sys_utimes_time32 __ia32_sys_utimes_time32
272 i386 fadvise64_64 sys_fadvise64_64 __ia32_compat_sys_x86_fadvise64_64
273 i386 vserver
274 i386 mbind sys_mbind __ia32_sys_mbind
@@ -290,8 +290,8 @@
276 i386 set_mempolicy sys_set_mempolicy __ia32_sys_set_mempolicy
277 i386 mq_open sys_mq_open __ia32_compat_sys_mq_open
278 i386 mq_unlink sys_mq_unlink __ia32_sys_mq_unlink
-279 i386 mq_timedsend sys_mq_timedsend __ia32_compat_sys_mq_timedsend
-280 i386 mq_timedreceive sys_mq_timedreceive __ia32_compat_sys_mq_timedreceive
+279 i386 mq_timedsend sys_mq_timedsend_time32 __ia32_sys_mq_timedsend_time32
+280 i386 mq_timedreceive sys_mq_timedreceive_time32 __ia32_sys_mq_timedreceive_time32
281 i386 mq_notify sys_mq_notify __ia32_compat_sys_mq_notify
282 i386 mq_getsetattr sys_mq_getsetattr __ia32_compat_sys_mq_getsetattr
283 i386 kexec_load sys_kexec_load __ia32_compat_sys_kexec_load
@@ -310,7 +310,7 @@
296 i386 mkdirat sys_mkdirat __ia32_sys_mkdirat
297 i386 mknodat sys_mknodat __ia32_sys_mknodat
298 i386 fchownat sys_fchownat __ia32_sys_fchownat
-299 i386 futimesat sys_futimesat __ia32_compat_sys_futimesat
+299 i386 futimesat sys_futimesat_time32 __ia32_sys_futimesat_time32
300 i386 fstatat64 sys_fstatat64 __ia32_compat_sys_x86_fstatat
301 i386 unlinkat sys_unlinkat __ia32_sys_unlinkat
302 i386 renameat sys_renameat __ia32_sys_renameat
@@ -319,8 +319,8 @@
305 i386 readlinkat sys_readlinkat __ia32_sys_readlinkat
306 i386 fchmodat sys_fchmodat __ia32_sys_fchmodat
307 i386 faccessat sys_faccessat __ia32_sys_faccessat
-308 i386 pselect6 sys_pselect6 __ia32_compat_sys_pselect6
-309 i386 ppoll sys_ppoll __ia32_compat_sys_ppoll
+308 i386 pselect6 sys_pselect6_time32 __ia32_compat_sys_pselect6_time32
+309 i386 ppoll sys_ppoll_time32 __ia32_compat_sys_ppoll_time32
310 i386 unshare sys_unshare __ia32_sys_unshare
311 i386 set_robust_list sys_set_robust_list __ia32_compat_sys_set_robust_list
312 i386 get_robust_list sys_get_robust_list __ia32_compat_sys_get_robust_list
@@ -331,13 +331,13 @@
317 i386 move_pages sys_move_pages __ia32_compat_sys_move_pages
318 i386 getcpu sys_getcpu __ia32_sys_getcpu
319 i386 epoll_pwait sys_epoll_pwait __ia32_sys_epoll_pwait
-320 i386 utimensat sys_utimensat __ia32_compat_sys_utimensat
+320 i386 utimensat sys_utimensat_time32 __ia32_sys_utimensat_time32
321 i386 signalfd sys_signalfd __ia32_compat_sys_signalfd
322 i386 timerfd_create sys_timerfd_create __ia32_sys_timerfd_create
323 i386 eventfd sys_eventfd __ia32_sys_eventfd
324 i386 fallocate sys_fallocate __ia32_compat_sys_x86_fallocate
-325 i386 timerfd_settime sys_timerfd_settime __ia32_compat_sys_timerfd_settime
-326 i386 timerfd_gettime sys_timerfd_gettime __ia32_compat_sys_timerfd_gettime
+325 i386 timerfd_settime sys_timerfd_settime32 __ia32_sys_timerfd_settime32
+326 i386 timerfd_gettime sys_timerfd_gettime32 __ia32_sys_timerfd_gettime32
327 i386 signalfd4 sys_signalfd4 __ia32_compat_sys_signalfd4
328 i386 eventfd2 sys_eventfd2 __ia32_sys_eventfd2
329 i386 epoll_create1 sys_epoll_create1 __ia32_sys_epoll_create1
@@ -348,13 +348,13 @@
334 i386 pwritev sys_pwritev __ia32_compat_sys_pwritev
335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo __ia32_compat_sys_rt_tgsigqueueinfo
336 i386 perf_event_open sys_perf_event_open __ia32_sys_perf_event_open
-337 i386 recvmmsg sys_recvmmsg __ia32_compat_sys_recvmmsg
+337 i386 recvmmsg sys_recvmmsg_time32 __ia32_compat_sys_recvmmsg_time32
338 i386 fanotify_init sys_fanotify_init __ia32_sys_fanotify_init
339 i386 fanotify_mark sys_fanotify_mark __ia32_compat_sys_fanotify_mark
340 i386 prlimit64 sys_prlimit64 __ia32_sys_prlimit64
341 i386 name_to_handle_at sys_name_to_handle_at __ia32_sys_name_to_handle_at
342 i386 open_by_handle_at sys_open_by_handle_at __ia32_compat_sys_open_by_handle_at
-343 i386 clock_adjtime sys_clock_adjtime __ia32_compat_sys_clock_adjtime
+343 i386 clock_adjtime sys_clock_adjtime32 __ia32_sys_clock_adjtime32
344 i386 syncfs sys_syncfs __ia32_sys_syncfs
345 i386 sendmmsg sys_sendmmsg __ia32_compat_sys_sendmmsg
346 i386 setns sys_setns __ia32_sys_setns
@@ -396,5 +396,36 @@
382 i386 pkey_free sys_pkey_free __ia32_sys_pkey_free
383 i386 statx sys_statx __ia32_sys_statx
384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl
-385 i386 io_pgetevents sys_io_pgetevents __ia32_compat_sys_io_pgetevents
+385 i386 io_pgetevents sys_io_pgetevents_time32 __ia32_compat_sys_io_pgetevents
386 i386 rseq sys_rseq __ia32_sys_rseq
+# don't use numbers 387 through 392, add new calls at the end
+393 i386 semget sys_semget __ia32_sys_semget
+394 i386 semctl sys_semctl __ia32_compat_sys_semctl
+395 i386 shmget sys_shmget __ia32_sys_shmget
+396 i386 shmctl sys_shmctl __ia32_compat_sys_shmctl
+397 i386 shmat sys_shmat __ia32_compat_sys_shmat
+398 i386 shmdt sys_shmdt __ia32_sys_shmdt
+399 i386 msgget sys_msgget __ia32_sys_msgget
+400 i386 msgsnd sys_msgsnd __ia32_compat_sys_msgsnd
+401 i386 msgrcv sys_msgrcv __ia32_compat_sys_msgrcv
+402 i386 msgctl sys_msgctl __ia32_compat_sys_msgctl
+403 i386 clock_gettime64 sys_clock_gettime __ia32_sys_clock_gettime
+404 i386 clock_settime64 sys_clock_settime __ia32_sys_clock_settime
+405 i386 clock_adjtime64 sys_clock_adjtime __ia32_sys_clock_adjtime
+406 i386 clock_getres_time64 sys_clock_getres __ia32_sys_clock_getres
+407 i386 clock_nanosleep_time64 sys_clock_nanosleep __ia32_sys_clock_nanosleep
+408 i386 timer_gettime64 sys_timer_gettime __ia32_sys_timer_gettime
+409 i386 timer_settime64 sys_timer_settime __ia32_sys_timer_settime
+410 i386 timerfd_gettime64 sys_timerfd_gettime __ia32_sys_timerfd_gettime
+411 i386 timerfd_settime64 sys_timerfd_settime __ia32_sys_timerfd_settime
+412 i386 utimensat_time64 sys_utimensat __ia32_sys_utimensat
+413 i386 pselect6_time64 sys_pselect6 __ia32_compat_sys_pselect6_time64
+414 i386 ppoll_time64 sys_ppoll __ia32_compat_sys_ppoll_time64
+416 i386 io_pgetevents_time64 sys_io_pgetevents __ia32_sys_io_pgetevents
+417 i386 recvmmsg_time64 sys_recvmmsg __ia32_compat_sys_recvmmsg_time64
+418 i386 mq_timedsend_time64 sys_mq_timedsend __ia32_sys_mq_timedsend
+419 i386 mq_timedreceive_time64 sys_mq_timedreceive __ia32_sys_mq_timedreceive
+420 i386 semtimedop_time64 sys_semtimedop __ia32_sys_semtimedop
+421 i386 rt_sigtimedwait_time64 sys_rt_sigtimedwait __ia32_compat_sys_rt_sigtimedwait_time64
+422 i386 futex_time64 sys_futex __ia32_sys_futex
+423 i386 sched_rr_get_interval_time64 sys_sched_rr_get_interval __ia32_sys_sched_rr_get_interval
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index f0b1709a5ffb..2ae92fddb6d5 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -343,6 +343,8 @@
332 common statx __x64_sys_statx
333 common io_pgetevents __x64_sys_io_pgetevents
334 common rseq __x64_sys_rseq
+# don't use numbers 387 through 423, add new calls after the last
+# 'common' entry
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -361,7 +363,7 @@
520 x32 execve __x32_compat_sys_execve/ptregs
521 x32 ptrace __x32_compat_sys_ptrace
522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
-523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait
+523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait_time64
524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
525 x32 sigaltstack __x32_compat_sys_sigaltstack
526 x32 timer_create __x32_compat_sys_timer_create
@@ -375,7 +377,7 @@
534 x32 preadv __x32_compat_sys_preadv64
535 x32 pwritev __x32_compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
-537 x32 recvmmsg __x32_compat_sys_recvmmsg
+537 x32 recvmmsg __x32_compat_sys_recvmmsg_time64
538 x32 sendmmsg __x32_compat_sys_sendmmsg
539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index d50bb4dc0650..62f317c9113a 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -253,15 +253,6 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
return -EOPNOTSUPP;
}
-static const struct perf_event_attr ibs_notsupp = {
- .exclude_user = 1,
- .exclude_kernel = 1,
- .exclude_hv = 1,
- .exclude_idle = 1,
- .exclude_host = 1,
- .exclude_guest = 1,
-};
-
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -282,9 +273,6 @@ static int perf_ibs_init(struct perf_event *event)
if (event->pmu != &perf_ibs->pmu)
return -ENOENT;
- if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
- return -EINVAL;
-
if (config & ~perf_ibs->config_mask)
return -EINVAL;
@@ -537,6 +525,7 @@ static struct perf_ibs perf_ibs_fetch = {
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
},
.msr = MSR_AMD64_IBSFETCHCTL,
.config_mask = IBS_FETCH_CONFIG_MASK,
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 3210fee27e7f..7635c23f7d82 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -223,11 +223,6 @@ static int perf_iommu_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- /* IOMMU counters do not have usr/os/guest/host bits */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_host || event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
@@ -414,6 +409,7 @@ static const struct pmu iommu_pmu __initconst = {
.read = perf_iommu_read,
.task_ctx_nr = perf_invalid_context,
.attr_groups = amd_iommu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
static __init int init_one_iommu(unsigned int idx)
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 2aefacf5c5b2..c5ff084551c6 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -136,14 +136,7 @@ static int pmu_event_init(struct perf_event *event)
return -ENOENT;
/* Unsupported modes and filters. */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- /* no sampling */
- event->attr.sample_period)
+ if (event->attr.sample_period)
return -EINVAL;
if (cfg != AMD_POWER_EVENTSEL_PKG)
@@ -226,6 +219,7 @@ static struct pmu pmu_class = {
.start = pmu_event_start,
.stop = pmu_event_stop,
.read = pmu_event_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
static int power_cpu_exit(unsigned int cpu)
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 398df6eaa109..79cfd3b30ceb 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -201,11 +201,6 @@ static int amd_uncore_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- /* NB and Last level cache counters do not have usr/os/guest/host bits */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_host || event->attr.exclude_guest)
- return -EINVAL;
-
/* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
@@ -307,6 +302,7 @@ static struct pmu amd_nb_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
static struct pmu amd_llc_pmu = {
@@ -317,6 +313,7 @@ static struct pmu amd_llc_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 374a19712e20..b684f0294f35 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
x86_pmu.check_microcode();
}
+static int x86_pmu_check_period(struct perf_event *event, u64 value)
+{
+ if (x86_pmu.check_period && x86_pmu.check_period(event, value))
+ return -EINVAL;
+
+ if (value && x86_pmu.limit_period) {
+ if (x86_pmu.limit_period(event, value) > value)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
+ .check_period = x86_pmu_check_period,
};
void arch_perf_update_userpage(struct perf_event *event,
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index a01ef1b0f883..7cdd7b13bbda 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -77,10 +77,12 @@ static size_t buf_size(struct page *page)
}
static void *
-bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
+bts_buffer_setup_aux(struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
{
struct bts_buffer *buf;
struct page *page;
+ int cpu = event->cpu;
int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
unsigned long offset;
size_t size = nr_pages << PAGE_SHIFT;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 40e12cfc87f6..17096d3cd616 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -18,6 +18,7 @@
#include <asm/hardirq.h>
#include <asm/intel-family.h>
#include <asm/apic.h>
+#include <asm/cpu_device_id.h>
#include "../perf_event.h"
@@ -3206,16 +3207,27 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
- /*
- * If PMU counter has PEBS enabled it is not enough to disable counter
- * on a guest entry since PEBS memory write can overshoot guest entry
- * and corrupt guest memory. Disabling PEBS solves the problem.
- */
- arr[1].msr = MSR_IA32_PEBS_ENABLE;
- arr[1].host = cpuc->pebs_enabled;
- arr[1].guest = 0;
+ if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+ arr[0].guest &= ~cpuc->pebs_enabled;
+ else
+ arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+ *nr = 1;
+
+ if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
+ /*
+ * If PMU counter has PEBS enabled it is not enough to
+ * disable counter on a guest entry since PEBS memory
+ * write can overshoot guest entry and corrupt guest
+ * memory. Disabling PEBS solves the problem.
+ *
+ * Don't do this if the CPU already enforces it.
+ */
+ arr[1].msr = MSR_IA32_PEBS_ENABLE;
+ arr[1].host = cpuc->pebs_enabled;
+ arr[1].guest = 0;
+ *nr = 2;
+ }
- *nr = 2;
return arr;
}
@@ -3559,6 +3571,14 @@ static void free_excl_cntrs(int cpu)
static void intel_pmu_cpu_dying(int cpu)
{
+ fini_debug_store_on_cpu(cpu);
+
+ if (x86_pmu.counter_freezing)
+ disable_counter_freeze();
+}
+
+static void intel_pmu_cpu_dead(int cpu)
+{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_shared_regs *pc;
@@ -3570,11 +3590,6 @@ static void intel_pmu_cpu_dying(int cpu)
}
free_excl_cntrs(cpu);
-
- fini_debug_store_on_cpu(cpu);
-
- if (x86_pmu.counter_freezing)
- disable_counter_freeze();
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3584,6 +3599,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
intel_pmu_lbr_sched_task(ctx, sched_in);
}
+static int intel_pmu_check_period(struct perf_event *event, u64 value)
+{
+ return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
+}
+
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3663,6 +3683,9 @@ static __initconst const struct x86_pmu core_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
+ .cpu_dead = intel_pmu_cpu_dead,
+
+ .check_period = intel_pmu_check_period,
};
static struct attribute *intel_pmu_attrs[];
@@ -3703,8 +3726,12 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
+ .cpu_dead = intel_pmu_cpu_dead,
+
.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,
+
+ .check_period = intel_pmu_check_period,
};
static __init void intel_clovertown_quirk(void)
@@ -3733,36 +3760,62 @@ static __init void intel_clovertown_quirk(void)
x86_pmu.pebs_constraints = NULL;
}
-static int intel_snb_pebs_broken(int cpu)
+static const struct x86_cpu_desc isolation_ucodes[] = {
+ INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f),
+ INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e),
+ INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015),
+ INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
+ INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c),
+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e),
+ INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e),
+ {}
+};
+
+static void intel_check_pebs_isolation(void)
{
- u32 rev = UINT_MAX; /* default to broken for unknown models */
+ x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
+}
- switch (cpu_data(cpu).x86_model) {
- case INTEL_FAM6_SANDYBRIDGE:
- rev = 0x28;
- break;
+static __init void intel_pebs_isolation_quirk(void)
+{
+ WARN_ON_ONCE(x86_pmu.check_microcode);
+ x86_pmu.check_microcode = intel_check_pebs_isolation;
+ intel_check_pebs_isolation();
+}
- case INTEL_FAM6_SANDYBRIDGE_X:
- switch (cpu_data(cpu).x86_stepping) {
- case 6: rev = 0x618; break;
- case 7: rev = 0x70c; break;
- }
- }
+static const struct x86_cpu_desc pebs_ucodes[] = {
+ INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
+ INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
+ INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
+ {}
+};
- return (cpu_data(cpu).microcode < rev);
+static bool intel_snb_pebs_broken(void)
+{
+ return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
}
static void intel_snb_check_microcode(void)
{
- int pebs_broken = 0;
- int cpu;
-
- for_each_online_cpu(cpu) {
- if ((pebs_broken = intel_snb_pebs_broken(cpu)))
- break;
- }
-
- if (pebs_broken == x86_pmu.pebs_broken)
+ if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
return;
/*
@@ -3879,23 +3932,22 @@ static __init void intel_nehalem_quirk(void)
}
}
-static bool intel_glp_counter_freezing_broken(int cpu)
-{
- u32 rev = UINT_MAX; /* default to broken for unknown stepping */
-
- switch (cpu_data(cpu).x86_stepping) {
- case 1:
- rev = 0x28;
- break;
- case 8:
- rev = 0x6;
- break;
- }
+static const struct x86_cpu_desc counter_freezing_ucodes[] = {
+ INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
+ INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
+ INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
+ INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028),
+ INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
+ INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
+ {}
+};
- return (cpu_data(cpu).microcode < rev);
+static bool intel_counter_freezing_broken(void)
+{
+ return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
}
-static __init void intel_glp_counter_freezing_quirk(void)
+static __init void intel_counter_freezing_quirk(void)
{
/* Check if it's already disabled */
if (disable_counter_freezing)
@@ -3905,7 +3957,7 @@ static __init void intel_glp_counter_freezing_quirk(void)
* If the system starts with the wrong ucode, leave the
* counter-freezing feature permanently disabled.
*/
- if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) {
+ if (intel_counter_freezing_broken()) {
pr_info("PMU counter freezing disabled due to CPU errata,"
"please upgrade microcode\n");
x86_pmu.counter_freezing = false;
@@ -4256,6 +4308,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_X:
+ x86_add_quirk(intel_counter_freezing_quirk);
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -4282,7 +4335,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
- x86_add_quirk(intel_glp_counter_freezing_quirk);
+ x86_add_quirk(intel_counter_freezing_quirk);
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
@@ -4425,6 +4478,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_HASWELL_ULT:
case INTEL_FAM6_HASWELL_GT3E:
x86_add_quirk(intel_ht_bug);
+ x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -4456,6 +4510,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_BROADWELL_XEON_D:
case INTEL_FAM6_BROADWELL_GT3E:
case INTEL_FAM6_BROADWELL_X:
+ x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -4518,6 +4573,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
+ x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index d2e780705c5a..94a4b7fc75d0 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -280,13 +280,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
return -ENOENT;
/* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.sample_period) /* no sampling */
+ if (event->attr.sample_period) /* no sampling */
return -EINVAL;
if (event->cpu < 0)
@@ -437,7 +431,7 @@ static struct pmu cstate_core_pmu = {
.start = cstate_pmu_event_start,
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
- .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
.module = THIS_MODULE,
};
@@ -451,7 +445,7 @@ static struct pmu cstate_pkg_pmu = {
.start = cstate_pmu_event_start,
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
- .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
.module = THIS_MODULE,
};
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index e9acf1d2e7b2..10c99ce1fead 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1628,6 +1628,8 @@ void __init intel_ds_init(void)
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
+ if (x86_pmu.version <= 4)
+ x86_pmu.pebs_no_isolation = 1;
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
int format = x86_pmu.intel_cap.pebs_format;
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 9494ca68fd9d..fb3a2f13fc70 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1114,10 +1114,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
* Return: Our private PT buffer structure.
*/
static void *
-pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
+pt_buffer_setup_aux(struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
{
struct pt_buffer *buf;
- int node, ret;
+ int node, ret, cpu = event->cpu;
if (!nr_pages)
return NULL;
@@ -1222,7 +1223,8 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
static void pt_event_addr_filters_sync(struct perf_event *event)
{
struct perf_addr_filters_head *head = perf_event_addr_filters(event);
- unsigned long msr_a, msr_b, *offs = event->addr_filters_offs;
+ unsigned long msr_a, msr_b;
+ struct perf_addr_filter_range *fr = event->addr_filter_ranges;
struct pt_filters *filters = event->hw.addr_filters;
struct perf_addr_filter *filter;
int range = 0;
@@ -1231,12 +1233,12 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
return;
list_for_each_entry(filter, &head->list, entry) {
- if (filter->path.dentry && !offs[range]) {
+ if (filter->path.dentry && !fr[range].start) {
msr_a = msr_b = 0;
} else {
/* apply the offset */
- msr_a = filter->offset + offs[range];
- msr_b = filter->size + msr_a - 1;
+ msr_a = fr[range].start;
+ msr_b = msr_a + fr[range].size - 1;
}
filters->filter[range].msr_a = msr_a;
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 91039ffed633..94dc564146ca 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -397,13 +397,7 @@ static int rapl_pmu_event_init(struct perf_event *event)
return -EINVAL;
/* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.sample_period) /* no sampling */
+ if (event->attr.sample_period) /* no sampling */
return -EINVAL;
/* must be done before validate_group */
@@ -699,6 +693,7 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
rapl_pmus->pmu.read = rapl_pmu_event_read;
rapl_pmus->pmu.module = THIS_MODULE;
+ rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
return 0;
}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 27a461414b30..d516161c00c4 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -695,14 +695,6 @@ static int uncore_pmu_event_init(struct perf_event *event)
if (pmu->func_id < 0)
return -ENOENT;
- /*
- * Uncore PMU does measure at all privilege level all the time.
- * So it doesn't make sense to specify any exclude bits.
- */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_hv || event->attr.exclude_idle)
- return -EINVAL;
-
/* Sampling not supported yet */
if (hwc->sample_period)
return -EINVAL;
@@ -800,6 +792,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
.stop = uncore_pmu_event_stop,
.read = uncore_pmu_event_read,
.module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
} else {
pmu->pmu = *pmu->type->pmu;
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 2593b0d7aeee..b12517fae77a 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -397,13 +397,7 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
return -EINVAL;
/* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.sample_period) /* no sampling */
+ if (event->attr.sample_period) /* no sampling */
return -EINVAL;
/*
@@ -497,6 +491,7 @@ static struct pmu snb_uncore_imc_pmu = {
.start = uncore_pmu_event_start,
.stop = uncore_pmu_event_stop,
.read = uncore_pmu_event_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
static struct intel_uncore_ops snb_uncore_imc_ops = {
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index c07bee31abe8..b10e04387f38 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
.id_table = snbep_uncore_pci_ids,
};
+#define NODE_ID_MASK 0x7
+
/*
* build pci bus to socket mapping
*/
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
if (err)
break;
- nodeid = config;
+ nodeid = config & NODE_ID_MASK;
/* get the Node ID mapping */
err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
if (err)
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 1b9f85abf9bc..a878e6286e4a 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -160,13 +160,7 @@ static int msr_event_init(struct perf_event *event)
return -ENOENT;
/* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.sample_period) /* no sampling */
+ if (event->attr.sample_period) /* no sampling */
return -EINVAL;
if (cfg >= PERF_MSR_EVENT_MAX)
@@ -256,7 +250,7 @@ static struct pmu pmu_msr = {
.start = msr_event_start,
.stop = msr_event_stop,
.read = msr_event_update,
- .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
};
static int __init msr_init(void)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 78d7b7031bfc..7e75f474b076 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -601,13 +601,14 @@ struct x86_pmu {
/*
* Intel DebugStore bits
*/
- unsigned int bts :1,
- bts_active :1,
- pebs :1,
- pebs_active :1,
- pebs_broken :1,
- pebs_prec_dist :1,
- pebs_no_tlb :1;
+ unsigned int bts :1,
+ bts_active :1,
+ pebs :1,
+ pebs_active :1,
+ pebs_broken :1,
+ pebs_prec_dist :1,
+ pebs_no_tlb :1,
+ pebs_no_isolation :1;
int pebs_record_size;
int pebs_buffer_size;
void (*drain_pebs)(struct pt_regs *regs);
@@ -646,6 +647,11 @@ struct x86_pmu {
* Intel host/guest support (KVM)
*/
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
+
+ /*
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+ */
+ int (*check_period) (struct perf_event *event, u64 period);
};
struct x86_perf_task_context {
@@ -857,7 +863,7 @@ static inline int amd_pmu_init(void)
#ifdef CONFIG_CPU_SUP_INTEL
-static inline bool intel_pmu_has_bts(struct perf_event *event)
+static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event;
@@ -868,7 +874,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
- return hw_event == bts_event && hwc->sample_period == 1;
+ return hw_event == bts_event && period == 1;
+}
+
+static inline bool intel_pmu_has_bts(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ return intel_pmu_has_bts_period(event, hwc->sample_period);
}
int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f65b78d32f5e..3c135084e1eb 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -39,82 +39,10 @@
static int load_aout_binary(struct linux_binprm *);
static int load_aout_library(struct file *);
-#ifdef CONFIG_COREDUMP
-static int aout_core_dump(struct coredump_params *);
-
-static unsigned long get_dr(int n)
-{
- struct perf_event *bp = current->thread.ptrace_bps[n];
- return bp ? bp->hw.info.address : 0;
-}
-
-/*
- * fill in the user structure for a core dump..
- */
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
-{
- u32 fs, gs;
- memset(dump, 0, sizeof(*dump));
-
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long)
- (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_debugreg[0] = get_dr(0);
- dump->u_debugreg[1] = get_dr(1);
- dump->u_debugreg[2] = get_dr(2);
- dump->u_debugreg[3] = get_dr(3);
- dump->u_debugreg[6] = current->thread.debugreg6;
- dump->u_debugreg[7] = current->thread.ptrace_dr7;
-
- if (dump->start_stack < 0xc0000000) {
- unsigned long tmp;
-
- tmp = (unsigned long) (0xc0000000 - dump->start_stack);
- dump->u_ssize = tmp >> PAGE_SHIFT;
- }
-
- dump->regs.ebx = regs->bx;
- dump->regs.ecx = regs->cx;
- dump->regs.edx = regs->dx;
- dump->regs.esi = regs->si;
- dump->regs.edi = regs->di;
- dump->regs.ebp = regs->bp;
- dump->regs.eax = regs->ax;
- dump->regs.ds = current->thread.ds;
- dump->regs.es = current->thread.es;
- savesegment(fs, fs);
- dump->regs.fs = fs;
- savesegment(gs, gs);
- dump->regs.gs = gs;
- dump->regs.orig_eax = regs->orig_ax;
- dump->regs.eip = regs->ip;
- dump->regs.cs = regs->cs;
- dump->regs.eflags = regs->flags;
- dump->regs.esp = regs->sp;
- dump->regs.ss = regs->ss;
-
-#if 1 /* FIXME */
- dump->u_fpvalid = 0;
-#else
- dump->u_fpvalid = dump_fpu(regs, &dump->i387);
-#endif
-}
-
-#endif
-
static struct linux_binfmt aout_format = {
.module = THIS_MODULE,
.load_binary = load_aout_binary,
.load_shlib = load_aout_library,
-#ifdef CONFIG_COREDUMP
- .core_dump = aout_core_dump,
-#endif
- .min_coredump = PAGE_SIZE
};
static int set_brk(unsigned long start, unsigned long end)
@@ -126,91 +54,6 @@ static int set_brk(unsigned long start, unsigned long end)
return vm_brk(start, end - start);
}
-#ifdef CONFIG_COREDUMP
-/*
- * These are the only things you should do on a core-file: use only these
- * macros to write out all the necessary info.
- */
-
-#include <linux/coredump.h>
-
-#define START_DATA(u) (u.u_tsize << PAGE_SHIFT)
-#define START_STACK(u) (u.start_stack)
-
-/*
- * Routine writes a core dump image in the current directory.
- * Currently only a stub-function.
- *
- * Note that setuid/setgid files won't make a core-dump if the uid/gid
- * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
- * field, which also makes sure the core-dumps won't be recursive if the
- * dumping of the process results in another error..
- */
-
-static int aout_core_dump(struct coredump_params *cprm)
-{
- mm_segment_t fs;
- int has_dumped = 0;
- unsigned long dump_start, dump_size;
- struct user32 dump;
-
- fs = get_fs();
- set_fs(KERNEL_DS);
- has_dumped = 1;
- strncpy(dump.u_comm, current->comm, sizeof(current->comm));
- dump.u_ar0 = offsetof(struct user32, regs);
- dump.signal = cprm->siginfo->si_signo;
- dump_thread32(cprm->regs, &dump);
-
- /*
- * If the size of the dump file exceeds the rlimit, then see
- * what would happen if we wrote the stack, but not the data
- * area.
- */
- if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
- dump.u_dsize = 0;
-
- /* Make sure we have enough room to write the stack and data areas. */
- if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
- dump.u_ssize = 0;
-
- /* make sure we actually have a data and stack area to dump */
- set_fs(USER_DS);
- if (!access_ok((void *) (unsigned long)START_DATA(dump),
- dump.u_dsize << PAGE_SHIFT))
- dump.u_dsize = 0;
- if (!access_ok((void *) (unsigned long)START_STACK(dump),
- dump.u_ssize << PAGE_SHIFT))
- dump.u_ssize = 0;
-
- set_fs(KERNEL_DS);
- /* struct user */
- if (!dump_emit(cprm, &dump, sizeof(dump)))
- goto end_coredump;
- /* Now dump all of the user data. Include malloced stuff as well */
- if (!dump_skip(cprm, PAGE_SIZE - sizeof(dump)))
- goto end_coredump;
- /* now we start writing out the user space info */
- set_fs(USER_DS);
- /* Dump the data area */
- if (dump.u_dsize != 0) {
- dump_start = START_DATA(dump);
- dump_size = dump.u_dsize << PAGE_SHIFT;
- if (!dump_emit(cprm, (void *)dump_start, dump_size))
- goto end_coredump;
- }
- /* Now prepare to dump the stack area */
- if (dump.u_ssize != 0) {
- dump_start = START_STACK(dump);
- dump_size = dump.u_ssize << PAGE_SHIFT;
- if (!dump_emit(cprm, (void *)dump_start, dump_size))
- goto end_coredump;
- }
-end_coredump:
- set_fs(fs);
- return has_dumped;
-}
-#endif
/*
* create_aout_tables() parses the env- and arg-strings in new user
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h
deleted file mode 100644
index 7d3ece8bfb61..000000000000
--- a/arch/x86/include/asm/a.out-core.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_X86_A_OUT_CORE_H
-#define _ASM_X86_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-#ifdef CONFIG_X86_32
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/mm_types.h>
-
-#include <asm/debugreg.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
- >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
- aout_dump_debugregs(dump);
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
- >> PAGE_SHIFT;
-
- dump->regs.bx = regs->bx;
- dump->regs.cx = regs->cx;
- dump->regs.dx = regs->dx;
- dump->regs.si = regs->si;
- dump->regs.di = regs->di;
- dump->regs.bp = regs->bp;
- dump->regs.ax = regs->ax;
- dump->regs.ds = (u16)regs->ds;
- dump->regs.es = (u16)regs->es;
- dump->regs.fs = (u16)regs->fs;
- dump->regs.gs = get_user_gs(regs);
- dump->regs.orig_ax = regs->orig_ax;
- dump->regs.ip = regs->ip;
- dump->regs.cs = (u16)regs->cs;
- dump->regs.flags = regs->flags;
- dump->regs.sp = regs->sp;
- dump->regs.ss = (u16)regs->ss;
-
- dump->u_fpvalid = dump_fpu(regs, &dump->i387);
-}
-
-#endif /* CONFIG_X86_32 */
-#endif /* __KERNEL__ */
-#endif /* _ASM_X86_A_OUT_CORE_H */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 0660e14690c8..4c74073a19cc 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -94,13 +94,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
-#define __OLDINSTR(oldinstr, num) \
+#define OLDINSTR(oldinstr, num) \
+ "# ALT: oldnstr\n" \
"661:\n\t" oldinstr "\n662:\n" \
+ "# ALT: padding\n" \
".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
- "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n"
-
-#define OLDINSTR(oldinstr, num) \
- __OLDINSTR(oldinstr, num) \
+ "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" \
alt_end_marker ":\n"
/*
@@ -116,11 +115,23 @@ static inline int alternatives_text_reserved(void *start, void *end)
* additionally longer than the first replacement alternative.
*/
#define OLDINSTR_2(oldinstr, num1, num2) \
+ "# ALT: oldinstr2\n" \
"661:\n\t" oldinstr "\n662:\n" \
+ "# ALT: padding2\n" \
".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
alt_end_marker ":\n"
+#define OLDINSTR_3(oldinsn, n1, n2, n3) \
+ "# ALT: oldinstr3\n" \
+ "661:\n\t" oldinsn "\n662:\n" \
+ "# ALT: padding3\n" \
+ ".skip -((" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \
+ " - (" alt_slen ")) > 0) * " \
+ "(" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \
+ " - (" alt_slen ")), 0x90\n" \
+ alt_end_marker ":\n"
+
#define ALTINSTR_ENTRY(feature, num) \
" .long 661b - .\n" /* label */ \
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
@@ -129,8 +140,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
" .byte " alt_rlen(num) "\n" /* replacement len */ \
" .byte " alt_pad_len "\n" /* pad len */
-#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
- b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
+#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
+ "# ALT: replacement " #num "\n" \
+ b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
@@ -153,6 +165,19 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection\n"
+#define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \
+ OLDINSTR_3(oldinsn, 1, 2, 3) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feat1, 1) \
+ ALTINSTR_ENTRY(feat2, 2) \
+ ALTINSTR_ENTRY(feat3, 3) \
+ ".popsection\n" \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinsn1, feat1, 1) \
+ ALTINSTR_REPLACEMENT(newinsn2, feat2, 2) \
+ ALTINSTR_REPLACEMENT(newinsn3, feat3, 3) \
+ ".popsection\n"
+
/*
* Alternative instructions for different CPU types or capabilities.
*
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index baeba0567126..3417110574c1 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -11,4 +11,32 @@
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
+/*
+ * Match specific microcode revisions.
+ *
+ * vendor/family/model/stepping must be all set.
+ *
+ * Only checks against the boot CPU. When mixed-stepping configs are
+ * valid for a CPU model, add a quirk for every valid stepping and
+ * do the fine-tuning in the quirk handler.
+ */
+
+struct x86_cpu_desc {
+ __u8 x86_family;
+ __u8 x86_vendor;
+ __u8 x86_model;
+ __u8 x86_stepping;
+ __u32 x86_microcode_rev;
+};
+
+#define INTEL_CPU_DESC(mod, step, rev) { \
+ .x86_family = 6, \
+ .x86_vendor = X86_VENDOR_INTEL, \
+ .x86_model = mod, \
+ .x86_stepping = step, \
+ .x86_microcode_rev = rev, \
+}
+
+extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
+
#endif
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 107283b1eb1e..606a4b6a9812 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -170,7 +170,6 @@ static inline bool efi_runtime_supported(void)
return false;
}
-extern struct console early_efi_console;
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 705dafc2d11a..2bdbbbcfa393 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -841,7 +841,7 @@ union hv_gpa_page_range {
* count is equal with how many entries of union hv_gpa_page_range can
* be populated into the input parameter page.
*/
-#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \
+#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \
sizeof(union hv_gpa_page_range))
struct hv_guest_mapping_flush_list {
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0dd6b0f4000e..9f15384c504a 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -6,7 +6,7 @@
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
- * "Extreme" ones, like Broadwell-E.
+ * "Extreme" ones, like Broadwell-E, or Atom microserver.
*
* While adding a new CPUID for a new microarchitecture, add a new
* group to keep logically sorted out in chronological order. Within
@@ -52,6 +52,8 @@
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
+#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
@@ -71,6 +73,7 @@
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
+#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
/* Xeon Phi */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4660ce90de7f..180373360e34 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
unsigned int cr4_la57:1;
+ unsigned int maxphyaddr:6;
};
};
@@ -397,6 +398,7 @@ struct kvm_mmu {
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
+ gpa_t root_cr3;
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 0ca50611e8ce..19d18fae6ec6 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+/*
+ * Init a new mm. Used on mm copies, like at fork()
+ * and on mm's that are brand-new, like at execve().
+ */
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -228,8 +232,22 @@ do { \
} while (0)
#endif
+static inline void arch_dup_pkeys(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
+
+ /* Duplicate the oldmm pkey state in mm: */
+ mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
+ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
+#endif
+}
+
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
+ arch_dup_pkeys(oldmm, mm);
paravirt_arch_dup_mmap(oldmm, mm);
return ldt_dup_context(oldmm, mm);
}
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 91e4cf189914..5cc3930cb465 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -217,6 +217,8 @@ static __always_inline unsigned long long rdtsc(void)
*/
static __always_inline unsigned long long rdtsc_ordered(void)
{
+ DECLARE_ARGS(val, low, high);
+
/*
* The RDTSC instruction is not ordered relative to memory
* access. The Intel SDM and the AMD APM are both vague on this
@@ -227,9 +229,19 @@ static __always_inline unsigned long long rdtsc_ordered(void)
* ordering guarantees as reading from a global memory location
* that some other imaginary CPU is updating continuously with a
* time stamp.
+ *
+ * Thus, use the preferred barrier on the respective CPU, aiming for
+ * RDTSCP as the default.
*/
- barrier_nospec();
- return rdtsc();
+ asm volatile(ALTERNATIVE_3("rdtsc",
+ "mfence; rdtsc", X86_FEATURE_MFENCE_RDTSC,
+ "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
+ "rdtscp", X86_FEATURE_RDTSCP)
+ : EAX_EDX_RET(val, low, high)
+ /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
+ :: "ecx");
+
+ return EAX_EDX_VAL(val, low, high);
}
static inline unsigned long long native_read_pmc(int counter)
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8f657286d599..0ce558a8150d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
#endif
#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_STACK_ORDER 2
+#else
#define KASAN_STACK_ORDER 1
+#endif
#else
#define KASAN_STACK_ORDER 0
#endif
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a97f28d914d5..c25c38a05c1c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -422,25 +422,26 @@ static inline pgdval_t pgd_val(pgd_t pgd)
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
-static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
pteval_t ret;
- ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep);
+ ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
return (pte_t) { .pte = ret };
}
-static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
{
+
if (sizeof(pteval_t) > sizeof(long))
/* 5 arg words */
- pv_ops.mmu.ptep_modify_prot_commit(mm, addr, ptep, pte);
+ pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
else
PVOP_VCALL4(mmu.ptep_modify_prot_commit,
- mm, addr, ptep, pte.pte);
+ vma, addr, ptep, pte.pte);
}
static inline void set_pte(pte_t *ptep, pte_t pte)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 488c59686a73..2474e434a6f7 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -55,6 +55,7 @@ struct task_struct;
struct cpumask;
struct flush_tlb_info;
struct mmu_gather;
+struct vm_area_struct;
/*
* Wrapper type for pointers to code which uses the non-standard
@@ -254,9 +255,9 @@ struct pv_mmu_ops {
pte_t *ptep, pte_t pteval);
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
- pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
+ pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep);
- void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
+ void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t pte);
struct paravirt_callee_save pte_val;
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 662963681ea6..e662f987dfa2 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
+#include <linux/numa.h>
#include <asm/io.h>
#include <asm/pat.h>
#include <asm/x86_init.h>
@@ -141,7 +142,7 @@ cpumask_of_pcibus(const struct pci_bus *bus)
int node;
node = __pcibus_to_node(bus);
- return (node == -1) ? cpu_online_mask :
+ return (node == NUMA_NO_NODE) ? cpu_online_mask :
cpumask_of_node(node);
}
#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 40616e805292..2779ace16d23 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- native_set_pmd(pmdp, pmd);
+ set_pmd(pmdp, pmd);
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index dbaed55c1c24..232f856e0db0 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -67,16 +67,30 @@ static __always_inline void refcount_dec(refcount_t *r)
static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
- return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
+ bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "er", i, "cx");
+
+ if (ret) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ return false;
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
- REFCOUNT_CHECK_LT_ZERO,
- r->refs.counter, e, "cx");
+ bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
+ REFCOUNT_CHECK_LT_ZERO,
+ r->refs.counter, e, "cx");
+
+ if (ret) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ return false;
}
static __always_inline __must_check
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index 54990fe2a3ae..f6b7fe2833cc 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_RESCTRL_SCHED_H
-#ifdef CONFIG_RESCTRL
+#ifdef CONFIG_X86_CPU_RESCTRL
#include <linux/sched.h>
#include <linux/jump_label.h>
@@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
static inline void resctrl_sched_in(void) {}
-#endif /* CONFIG_RESCTRL */
+#endif /* CONFIG_X86_CPU_RESCTRL */
#endif /* _ASM_X86_RESCTRL_SCHED_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a77445d1b034..62004d22524a 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -25,7 +25,6 @@
#define KERNEL_DS MAKE_MM_SEG(-1UL)
#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
-#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.addr_limit)
static inline void set_fs(mm_segment_t fs)
{
@@ -76,7 +75,7 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
#endif
/**
- * access_ok: - Checks if a user space pointer is valid
+ * access_ok - Checks if a user space pointer is valid
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
@@ -85,12 +84,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
*
* Checks if a pointer to a block of memory in user space is valid.
*
- * Returns true (nonzero) if the memory block may be valid, false (zero)
- * if it is definitely invalid.
- *
* Note that, depending on architecture, this function probably just
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
+ *
+ * Return: true (nonzero) if the memory block may be valid, false (zero)
+ * if it is definitely invalid.
*/
#define access_ok(addr, size) \
({ \
@@ -135,7 +134,7 @@ extern int __get_user_bad(void);
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/**
- * get_user: - Get a simple variable from user space.
+ * get_user - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -149,7 +148,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
- * Returns zero on success, or -EFAULT on error.
+ * Return: zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
/*
@@ -227,7 +226,7 @@ extern void __put_user_4(void);
extern void __put_user_8(void);
/**
- * put_user: - Write a simple value into user space.
+ * put_user - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -241,7 +240,7 @@ extern void __put_user_8(void);
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
- * Returns zero on success, or -EFAULT on error.
+ * Return: zero on success, or -EFAULT on error.
*/
#define put_user(x, ptr) \
({ \
@@ -284,7 +283,7 @@ do { \
__put_user_goto(x, ptr, "l", "k", "ir", label); \
break; \
case 8: \
- __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
+ __put_user_goto_u64(x, ptr, label); \
break; \
default: \
__put_user_bad(); \
@@ -431,8 +430,10 @@ do { \
({ \
__label__ __pu_label; \
int __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __pu_val; \
+ __pu_val = x; \
__uaccess_begin(); \
- __put_user_size((x), (ptr), (size), __pu_label); \
+ __put_user_size(__pu_val, (ptr), (size), __pu_label); \
__pu_err = 0; \
__pu_label: \
__uaccess_end(); \
@@ -501,7 +502,7 @@ struct __large_struct { unsigned long buf[100]; };
} while (0)
/**
- * __get_user: - Get a simple variable from user space, with less checking.
+ * __get_user - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -518,7 +519,7 @@ struct __large_struct { unsigned long buf[100]; };
* Caller must check the pointer with access_ok() before calling this
* function.
*
- * Returns zero on success, or -EFAULT on error.
+ * Return: zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
@@ -526,7 +527,7 @@ struct __large_struct { unsigned long buf[100]; };
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
/**
- * __put_user: - Write a simple value into user space, with less checking.
+ * __put_user - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -543,7 +544,7 @@ struct __large_struct { unsigned long buf[100]; };
* Caller must check the pointer with access_ok() before calling this
* function.
*
- * Returns zero on success, or -EFAULT on error.
+ * Return: zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) \
@@ -711,7 +712,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
{
if (unlikely(!access_ok(ptr,len)))
return 0;
- __uaccess_begin();
+ __uaccess_begin_nospec();
return 1;
}
#define user_access_begin(a,b) user_access_begin(a,b)
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index dc4ed8bc2382..146859efd83c 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -23,8 +23,8 @@
# include <asm/unistd_64.h>
# include <asm/unistd_64_x32.h>
-# define __ARCH_WANT_COMPAT_SYS_TIME
-# define __ARCH_WANT_SYS_UTIME32
+# define __ARCH_WANT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_COMPAT_SYS_PREADV64
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
@@ -48,8 +48,8 @@
# define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK
# define __ARCH_WANT_SYS_SOCKETCALL
-# define __ARCH_WANT_SYS_TIME
-# define __ARCH_WANT_SYS_UTIME
+# define __ARCH_WANT_SYS_TIME32
+# define __ARCH_WANT_SYS_UTIME32
# define __ARCH_WANT_SYS_WAITPID
# define __ARCH_WANT_SYS_FORK
# define __ARCH_WANT_SYS_VFORK
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index e652a7cc6186..3f697a9e3f59 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -48,7 +48,8 @@ enum {
BIOS_STATUS_SUCCESS = 0,
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
BIOS_STATUS_EINVAL = -EINVAL,
- BIOS_STATUS_UNAVAIL = -EBUSY
+ BIOS_STATUS_UNAVAIL = -EBUSY,
+ BIOS_STATUS_ABORT = -EINTR,
};
/* Address map parameters */
@@ -167,4 +168,9 @@ extern long system_serial_number;
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
+/*
+ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
+ */
+extern struct semaphore __efi_uv_runtime_lock;
+
#endif /* _ASM_X86_UV_BIOS_H */
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index f6648e9928b3..efe701b7c6ce 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generated-y += unistd_64.h
generated-y += unistd_x32.h
+generic-y += socket.h
diff --git a/arch/x86/include/uapi/asm/socket.h b/arch/x86/include/uapi/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/x86/include/uapi/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ebeac487a20c..9a79c7808f9c 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -11,6 +11,7 @@
#include <linux/stop_machine.h>
#include <linux/slab.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
@@ -393,10 +394,10 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
continue;
}
- DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
+ DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
a->cpuid >> 5,
a->cpuid & 0x1f,
- instr, a->instrlen,
+ instr, instr, a->instrlen,
replacement, a->replacementlen, a->padlen);
DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
@@ -764,8 +765,8 @@ int poke_int3_handler(struct pt_regs *regs)
regs->ip = (unsigned long) bp_int3_handler;
return 1;
-
}
+NOKPROBE_SYMBOL(poke_int3_handler);
/**
* text_poke_bp() -- update instructions on live kernel on SMP
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index a555da094157..1e225528f0d7 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -27,6 +27,7 @@
#include <linux/crash_dump.h>
#include <linux/reboot.h>
#include <linux/memory.h>
+#include <linux/numa.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
@@ -1390,7 +1391,7 @@ static void __init build_socket_tables(void)
}
/* Set socket -> node values: */
- lnid = -1;
+ lnid = NUMA_NO_NODE;
for_each_present_cpu(cpu) {
int nid = cpu_to_node(cpu);
int apicid, sockid;
@@ -1521,7 +1522,7 @@ static void __init uv_system_init_hub(void)
new_hub->pnode = 0xffff;
new_hub->numa_blade_id = uv_node_to_blade_id(nodeid);
- new_hub->memory_nid = -1;
+ new_hub->memory_nid = NUMA_NO_NODE;
new_hub->nr_possible_cpus = 0;
new_hub->nr_online_cpus = 0;
}
@@ -1538,7 +1539,7 @@ static void __init uv_system_init_hub(void)
uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++;
- if (uv_cpu_hub_info(cpu)->memory_nid == -1)
+ if (uv_cpu_hub_info(cpu)->memory_nid == NUMA_NO_NODE)
uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
/* Init memoryless node: */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ac78f90aea56..cfd24f9f7614 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_X86_MCE) += mce/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
-obj-$(CONFIG_RESCTRL) += resctrl/
+obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 69f6bbb41be0..01004bfb1a1b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -819,11 +819,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
- /*
- * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
- * all up to and including B1.
- */
- if (c->x86_model <= 1 && c->x86_stepping <= 1)
+
+ /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+ if (!cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 8654b8b0c848..2da82eff0eb4 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -71,7 +71,7 @@ void __init check_bugs(void)
* identify_boot_cpu() initialized SMT support information, let the
* core code know.
*/
- cpu_smt_check_topology_early();
+ cpu_smt_check_topology();
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");
@@ -215,7 +215,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
SPECTRE_V2_USER_NONE;
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
static bool spectre_v2_bad_module;
bool retpoline_module_ok(bool has_retpoline)
@@ -798,15 +798,25 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
if (task_spec_ssb_force_disable(task))
return -EPERM;
task_clear_spec_ssb_disable(task);
+ task_clear_spec_ssb_noexec(task);
task_update_spec_tif(task);
break;
case PR_SPEC_DISABLE:
task_set_spec_ssb_disable(task);
+ task_clear_spec_ssb_noexec(task);
task_update_spec_tif(task);
break;
case PR_SPEC_FORCE_DISABLE:
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
+ task_clear_spec_ssb_noexec(task);
+ task_update_spec_tif(task);
+ break;
+ case PR_SPEC_DISABLE_NOEXEC:
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_noexec(task);
task_update_spec_tif(task);
break;
default:
@@ -885,6 +895,8 @@ static int ssb_prctl_get(struct task_struct *task)
case SPEC_STORE_BYPASS_PRCTL:
if (task_spec_ssb_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ssb_noexec(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
if (task_spec_ssb_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index 3fed38812eea..6dd78d8235e4 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -48,3 +48,34 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
return NULL;
}
EXPORT_SYMBOL(x86_match_cpu);
+
+static const struct x86_cpu_desc *
+x86_match_cpu_with_stepping(const struct x86_cpu_desc *match)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ const struct x86_cpu_desc *m;
+
+ for (m = match; m->x86_family | m->x86_model; m++) {
+ if (c->x86_vendor != m->x86_vendor)
+ continue;
+ if (c->x86 != m->x86_family)
+ continue;
+ if (c->x86_model != m->x86_model)
+ continue;
+ if (c->x86_stepping != m->x86_stepping)
+ continue;
+ return m;
+ }
+ return NULL;
+}
+
+bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table)
+{
+ const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table);
+
+ if (!res || res->x86_microcode_rev > boot_cpu_data.microcode)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev);
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 672c7225cb1b..6ce290c506d9 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
quirk_no_way_out(i, m, regs);
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 51adde0a0f1a..e1f3ba19ba54 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
if (!p) {
return ret;
} else {
- if (boot_cpu_data.microcode == p->patch_id)
+ if (boot_cpu_data.microcode >= p->patch_id)
return ret;
ret = UCODE_NEW;
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 6895049ceef7..4a06c37b9cf1 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
-obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
+obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
+obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c8b07d8ea5a2..17ffc869cab8 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
kbuf.memsz = kbuf.bufsz;
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret) {
vfree((void *)image->arch.elf_headers);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 374a52fa5296..9b33904251a9 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -388,10 +388,6 @@ static int __init setup_early_printk(char *buf)
if (!strncmp(buf, "xen", 3))
early_console_register(&xenboot_console, keep);
#endif
-#ifdef CONFIG_EARLY_PRINTK_EFI
- if (!strncmp(buf, "efi", 3))
- early_console_register(&early_efi_console, keep);
-#endif
#ifdef CONFIG_EARLY_PRINTK_USB_XDBC
if (!strncmp(buf, "xdbc", 4))
early_xdbc_parse_parameter(buf + 4);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8257a59704ae..3e3789c8f8e1 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -269,7 +269,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
}
-static int is_ftrace_caller(unsigned long ip)
+static nokprobe_inline int is_ftrace_caller(unsigned long ip)
{
if (ip == ftrace_update_func)
return 1;
@@ -299,6 +299,7 @@ int ftrace_int3_handler(struct pt_regs *regs)
return 1;
}
+NOKPROBE_SYMBOL(ftrace_int3_handler);
static int ftrace_write(unsigned long ip, const char *val, int size)
{
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b0acb22e5a46..dfd3aca82c61 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -21,10 +21,6 @@
#define HPET_MASK CLOCKSOURCE_MASK(32)
-/* FSEC = 10^-15
- NSEC = 10^-9 */
-#define FSEC_PER_NSEC 1000000L
-
#define HPET_DEV_USED_BIT 2
#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
#define HPET_DEV_VALID 0x8
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 278cd07228dd..53917a3ebf94 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
struct efi_info *current_ei = &boot_params.efi_info;
struct efi_info *ei = &params->efi_info;
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
if (!current_ei->efi_memmap_size)
return 0;
@@ -434,6 +437,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
kbuf.memsz = PAGE_ALIGN(header->init_size);
kbuf.buf_align = header->kernel_alignment;
kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out_free_params;
@@ -448,6 +452,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out_free_params;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4ba75afba527..a034cb808e7e 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1028,6 +1028,13 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
int __init arch_populate_kprobe_blacklist(void)
{
+ int ret;
+
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ if (ret)
+ return ret;
+
return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
(unsigned long)__entry_text_end);
}
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 6adf6e6c2933..f14262952015 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -97,6 +97,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
}
asm (
+ ".pushsection .rodata\n"
"optprobe_template_func:\n"
".global optprobe_template_entry\n"
"optprobe_template_entry:\n"
@@ -136,8 +137,7 @@ asm (
#endif
".global optprobe_template_end\n"
"optprobe_template_end:\n"
- ".type optprobe_template_func, @function\n"
- ".size optprobe_template_func, .-optprobe_template_func\n");
+ ".popsection\n");
void optprobe_template_func(void);
STACK_FRAME_NON_STANDARD(optprobe_template_func);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index ba4bfb7f6a36..5c93a65ee1e5 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
#else
u64 ipi_bitmap = 0;
#endif
+ long ret;
if (cpumask_empty(mask))
return;
@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
max = apic_id < max ? max : apic_id;
} else {
- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
min = max = apic_id;
ipi_bitmap = 0;
}
@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
}
if (ipi_bitmap) {
- kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+ ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+ WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
}
local_irq_restore(flags);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 90ae0ca51083..58ac7be52c7a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -255,6 +255,18 @@ void arch_setup_new_exec(void)
/* If cpuid was previously disabled for this task, re-enable it. */
if (test_thread_flag(TIF_NOCPUID))
enable_cpuid();
+
+ /*
+ * Don't inherit TIF_SSBD across exec boundary when
+ * PR_SPEC_DISABLE_NOEXEC is used.
+ */
+ if (test_thread_flag(TIF_SSBD) &&
+ task_spec_ssb_noexec(current)) {
+ clear_thread_flag(TIF_SSBD);
+ task_clear_spec_ssb_disable(current);
+ task_clear_spec_ssb_noexec(current);
+ speculation_ctrl_update(task_thread_info(current)->flags);
+ }
}
static inline void switch_to_bitmap(struct thread_struct *prev,
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e8796fcd7e5a..13af08827eef 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -171,7 +171,7 @@ void __init setup_per_cpu_areas(void)
unsigned long delta;
int rc;
- pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
+ pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n",
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ccd1f2a8e557..c91ff9f9fe8a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -56,6 +56,7 @@
#include <linux/stackprotector.h>
#include <linux/gfp.h>
#include <linux/cpuidle.h>
+#include <linux/numa.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -841,7 +842,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
/* reduce the number of lines printed when booting a large cpu count system */
static void announce_cpu(int cpu, int apicid)
{
- static int current_node = -1;
+ static int current_node = NUMA_NO_NODE;
int node = early_cpu_to_node(cpu);
static int width, node_width;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9b7c4ca8f0a7..e289ce1332ab 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -111,6 +111,7 @@ void ist_enter(struct pt_regs *regs)
/* This code is a bit fragile. Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
}
+NOKPROBE_SYMBOL(ist_enter);
void ist_exit(struct pt_regs *regs)
{
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e9f777bfed40..3fae23834069 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -297,15 +297,16 @@ static int __init tsc_setup(char *str)
__setup("tsc=", tsc_setup);
-#define MAX_RETRIES 5
-#define SMI_TRESHOLD 50000
+#define MAX_RETRIES 5
+#define TSC_DEFAULT_THRESHOLD 0x20000
/*
- * Read TSC and the reference counters. Take care of SMI disturbance
+ * Read TSC and the reference counters. Take care of any disturbances
*/
static u64 tsc_read_refs(u64 *p, int hpet)
{
u64 t1, t2;
+ u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
int i;
for (i = 0; i < MAX_RETRIES; i++) {
@@ -315,7 +316,7 @@ static u64 tsc_read_refs(u64 *p, int hpet)
else
*p = acpi_pm_read_early();
t2 = get_cycles();
- if ((t2 - t1) < SMI_TRESHOLD)
+ if ((t2 - t1) < thresh)
return t2;
}
return ULLONG_MAX;
@@ -703,15 +704,15 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
* zero. In each wait loop iteration we read the TSC and check
* the delta to the previous read. We keep track of the min
* and max values of that delta. The delta is mostly defined
- * by the IO time of the PIT access, so we can detect when a
- * SMI/SMM disturbance happened between the two reads. If the
+ * by the IO time of the PIT access, so we can detect when
+ * any disturbance happened between the two reads. If the
* maximum time is significantly larger than the minimum time,
* then we discard the result and have another try.
*
* 2) Reference counter. If available we use the HPET or the
* PMTIMER as a reference to check the sanity of that value.
* We use separate TSC readouts and check inside of the
- * reference read for a SMI/SMM disturbance. We dicard
+ * reference read for any possible disturbance. We dicard
* disturbed values here as well. We do that around the PIT
* calibration delay loop as we have to wait for a certain
* amount of time anyway.
@@ -744,7 +745,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
if (ref1 == ref2)
continue;
- /* Check, whether the sampling was disturbed by an SMI */
+ /* Check, whether the sampling was disturbed */
if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
continue;
@@ -1268,7 +1269,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
*/
static void tsc_refine_calibration_work(struct work_struct *work)
{
- static u64 tsc_start = -1, ref_start;
+ static u64 tsc_start = ULLONG_MAX, ref_start;
static int hpet;
u64 tsc_stop, ref_stop, delta;
unsigned long freq;
@@ -1283,14 +1284,15 @@ static void tsc_refine_calibration_work(struct work_struct *work)
* delayed the first time we expire. So set the workqueue
* again once we know timers are working.
*/
- if (tsc_start == -1) {
+ if (tsc_start == ULLONG_MAX) {
+restart:
/*
* Only set hpet once, to avoid mixing hardware
* if the hpet becomes enabled later.
*/
hpet = is_hpet_enabled();
- schedule_delayed_work(&tsc_irqwork, HZ);
tsc_start = tsc_read_refs(&ref_start, hpet);
+ schedule_delayed_work(&tsc_irqwork, HZ);
return;
}
@@ -1300,9 +1302,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
if (ref_start == ref_stop)
goto out;
- /* Check, whether the sampling was disturbed by an SMI */
- if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
- goto out;
+ /* Check, whether the sampling was disturbed */
+ if (tsc_stop == ULLONG_MAX)
+ goto restart;
delta = tsc_stop - tsc_start;
delta *= 1000000LL;
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 69b3a7c30013..31ecf7a76d5a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -2,10 +2,6 @@
ccflags-y += -Iarch/x86/kvm
-CFLAGS_x86.o := -I.
-CFLAGS_svm.o := -I.
-CFLAGS_vmx.o := -I.
-
KVM := ../../../virt/kvm
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index bbffa6c54697..c07958b59f50 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
+ unsigned f_la57 = 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
@@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+ f_la57 = entry->ecx & F(LA57);
cpuid_mask(&entry->ecx, CPUID_7_ECX);
+ /* Set LA57 based on hardware capability. */
+ entry->ecx |= f_la57;
entry->ecx |= f_umip;
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c90a5352d158..89d20ed1d2e8 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
- /* maybe userspace knows this conn_id: fall through */
+ /* fall through - maybe userspace knows this conn_id. */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
@@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
- ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
@@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
case HYPERV_CPUID_ENLIGHTMENT_INFO:
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
- ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
- ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
+ if (evmcs_ver)
+ ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
/*
* Default number of spinlock retry attempts, matches
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9f089e2e09d0..4b6c2da7265c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
+ /* fall through */
case APIC_DM_FIXED:
if (unlikely(trig_mode && !level))
break;
@@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
+ /* fall through */
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ce770b446238..f2d1d230d5b8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
&invalid_list);
mmu->root_hpa = INVALID_PAGE;
}
+ mmu->root_cr3 = 0;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
+ vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return 0;
}
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
u64 pdptr, pm_mask;
- gfn_t root_gfn;
+ gfn_t root_gfn, root_cr3;
int i;
- root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
+ root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+ root_gfn = root_cr3 >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = root;
- return 0;
+ goto set_root_cr3;
}
/*
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
}
+set_root_cr3:
+ vcpu->arch.mmu->root_cr3 = root_cr3;
+
return 0;
}
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
struct kvm_mmu_root_info root;
struct kvm_mmu *mmu = vcpu->arch.mmu;
- root.cr3 = mmu->get_cr3(vcpu);
+ root.cr3 = mmu->root_cr3;
root.hpa = mmu->root_hpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
}
mmu->root_hpa = root.hpa;
+ mmu->root_cr3 = root.cr3;
return i < KVM_MMU_NUM_PREV_ROOTS;
}
@@ -4371,6 +4378,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4];
+ /* fall through */
case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
@@ -4769,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
ext.cr4_pse = !!is_pse(vcpu);
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+ ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
ext.valid = 1;
@@ -5515,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.root_mmu.root_cr3 = 0;
vcpu->arch.root_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.guest_mmu.root_cr3 = 0;
vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 307e5bddb6d9..f13a3a24d360 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
+ /*
+ * Drop what we picked up for L2 via svm_complete_interrupts() so it
+ * doesn't end up in L1.
+ */
+ svm->vcpu.arch.nmi_injected = false;
+ kvm_clear_exception_queue(&svm->vcpu);
+ kvm_clear_interrupt_queue(&svm->vcpu);
+
return 0;
}
@@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data);
- /* Follow through */
+ /* Fall through */
default:
return kvm_set_msr_common(vcpu, msr);
}
@@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
- int i;
- struct kvm_vcpu *vcpu;
- struct kvm *kvm = svm->vcpu.kvm;
struct kvm_lapic *apic = svm->vcpu.arch.apic;
/*
- * At this point, we expect that the AVIC HW has already
- * set the appropriate IRR bits on the valid target
- * vcpus. So, we just need to kick the appropriate vcpu.
+ * Update ICR high and low, then emulate sending IPI,
+ * which is handled when writing APIC_ICR.
*/
- kvm_for_each_vcpu(i, vcpu, kvm) {
- bool m = kvm_apic_match_dest(vcpu, apic,
- icrl & KVM_APIC_SHORT_MASK,
- GET_APIC_DEST_FIELD(icrh),
- icrl & KVM_APIC_DEST_MASK);
-
- if (m && !avic_vcpu_is_running(vcpu))
- kvm_vcpu_wake_up(vcpu);
- }
+ kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
+ kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
}
case AVIC_IPI_FAILURE_INVALID_TARGET:
+ WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
+ index, svm->vcpu.vcpu_id, icrh, icrl);
break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
@@ -6278,6 +6277,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
int asid, ret;
ret = -EBUSY;
+ if (unlikely(sev->active))
+ return ret;
+
asid = sev_asid_new();
if (asid < 0)
return ret;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 705f40ae2532..6432d08c7de7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex,
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH arch/x86/kvm
+#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 95bc2247478d..5466c6d85cf3 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
+
+ vmx->nested.enlightened_vmcs_enabled = true;
if (vmcs_version)
*vmcs_version = nested_get_evmcs_version(vcpu);
/* We don't support disabling the feature for simplicity. */
- if (vmx->nested.enlightened_vmcs_enabled)
+ if (evmcs_already_enabled)
return 0;
- vmx->nested.enlightened_vmcs_enabled = true;
-
vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 3170e291215d..d737a51a53ca 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = {
static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields);
-void init_vmcs_shadow_fields(void)
+static void init_vmcs_shadow_fields(void)
{
int i, j;
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
+ hrtimer_cancel(&vmx->nested.preemption_timer);
vmx->nested.vmxon = false;
vmx->nested.smm.vmxon = false;
free_vpid(vmx->nested.vpid02);
@@ -2472,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
return -EINVAL;
+ if (!nested_cpu_has_preemption_timer(vmcs12) &&
+ nested_cpu_has_save_preemption_timer(vmcs12))
+ return -EINVAL;
+
if (nested_cpu_has_ept(vmcs12) &&
!valid_ept_address(vcpu, vmcs12->ept_pointer))
return -EINVAL;
@@ -4140,11 +4145,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (r < 0)
goto out_vmcs02;
- vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+ vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12;
- vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+ vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12;
@@ -4540,9 +4545,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
* given physical address won't match the required
* VMCS12_REVISION identifier.
*/
- nested_vmx_failValid(vcpu,
+ return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
- return kvm_skip_emulated_instruction(vcpu);
}
new_vmcs12 = kmap(page);
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
@@ -5264,13 +5268,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
copy_shadow_to_vmcs12(vmx);
}
- if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+ /*
+ * Copy over the full allocated size of vmcs12 rather than just the size
+ * of the struct.
+ */
+ if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
return -EFAULT;
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) {
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
- get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
+ get_shadow_vmcs12(vcpu), VMCS12_SIZE))
return -EFAULT;
}
@@ -5553,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
* secondary cpu-based controls. Do not include those that
* depend on CPUID bits, they are added later by vmx_cpuid_update.
*/
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
- msrs->secondary_ctls_low,
- msrs->secondary_ctls_high);
+ if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+ msrs->secondary_ctls_low,
+ msrs->secondary_ctls_high);
+
msrs->secondary_ctls_low = 0;
msrs->secondary_ctls_high &=
SECONDARY_EXEC_DESC |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4d39f731bc33..30a6bcd735ec 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -26,6 +26,7 @@
#include <linux/mod_devicetable.h>
#include <linux/mm.h>
#include <linux/sched.h>
+#include <linux/sched/smt.h>
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/trace_events.h>
@@ -423,7 +424,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
}
-int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data)
{
struct kvm_tlb_range *range = data;
@@ -453,7 +454,7 @@ static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
struct kvm_vcpu *vcpu;
- int ret = -ENOTSUPP, i;
+ int ret = 0, i;
spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
@@ -862,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
if (!entry_only)
j = find_msr(&m->host, msr);
- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
+ (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
@@ -1192,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
return;
- /*
- * First handle the simple case where no cmpxchg is necessary; just
- * allow posting non-urgent interrupts.
- *
- * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
- * PI.NDST: pi_post_block will do it for us and the wakeup_handler
- * expects the VCPU to be on the blocked_vcpu_list that matches
- * PI.NDST.
- */
- if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
- vcpu->cpu == cpu) {
- pi_clear_sn(pi_desc);
- return;
- }
-
/* The full case. */
do {
old.control = new.control = pi_desc->control;
@@ -1221,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
new.sn = 0;
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
+
+ /*
+ * Clear SN before reading the bitmap. The VT-d firmware
+ * writes the bitmap and reads SN atomically (5.2.3 in the
+ * spec), so it doesn't really have a memory barrier that
+ * pairs with this, but we cannot do that and we need one.
+ */
+ smp_mb__after_atomic();
+
+ if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS))
+ pi_set_on(pi_desc);
}
/*
@@ -1773,7 +1771,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1;
- /* Otherwise falls through */
+ /* Else, falls through */
default:
msr = find_msr_entry(vmx, msr_info->index);
if (msr) {
@@ -2014,7 +2012,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0)
return 1;
- /* Otherwise falls through */
+ /* Else, falls through */
default:
msr = find_msr_entry(vmx, msr_index);
if (msr) {
@@ -2344,7 +2342,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
case 37: /* AAT100 */
case 44: /* BC86,AAY89,BD102 */
case 46: /* BA97 */
- _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
"does not work properly. Using workaround\n");
@@ -6362,72 +6360,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->hv_timer_armed = false;
}
-static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long cr3, cr4, evmcs_rsp;
-
- /* Record the guest's net vcpu time for enforced NMI injections. */
- if (unlikely(!enable_vnmi &&
- vmx->loaded_vmcs->soft_vnmi_blocked))
- vmx->loaded_vmcs->entry_time = ktime_get();
-
- /* Don't enter VMX if guest state is invalid, let the exit handler
- start emulation until we arrive back to a valid state */
- if (vmx->emulation_required)
- return;
-
- if (vmx->ple_window_dirty) {
- vmx->ple_window_dirty = false;
- vmcs_write32(PLE_WINDOW, vmx->ple_window);
- }
-
- if (vmx->nested.need_vmcs12_sync)
- nested_sync_from_vmcs12(vcpu);
-
- if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
- vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
- vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
- cr3 = __get_current_cr3_fast();
- if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
- vmcs_writel(HOST_CR3, cr3);
- vmx->loaded_vmcs->host_state.cr3 = cr3;
- }
-
- cr4 = cr4_read_shadow();
- if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
- vmcs_writel(HOST_CR4, cr4);
- vmx->loaded_vmcs->host_state.cr4 = cr4;
- }
-
- /* When single-stepping over STI and MOV SS, we must clear the
- * corresponding interruptibility bits in the guest state. Otherwise
- * vmentry fails as it then expects bit 14 (BS) in pending debug
- * exceptions being set, but that's not correct for the guest debugging
- * case. */
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vmx_set_interrupt_shadow(vcpu, 0);
-
- if (static_cpu_has(X86_FEATURE_PKU) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
- vcpu->arch.pkru != vmx->host_pkru)
- __write_pkru(vcpu->arch.pkru);
-
- pt_guest_enter(vmx);
-
- atomic_switch_perf_msrs(vmx);
-
- vmx_update_hv_timer(vcpu);
-
- /*
- * If this vCPU has touched SPEC_CTRL, restore the guest's value if
- * it's non-zero. Since vmentry is serialising on affected CPUs, there
- * is no need to worry about the conditional branch over the wrmsr
- * being speculatively taken.
- */
- x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+ unsigned long evmcs_rsp;
vmx->__launched = vmx->loaded_vmcs->launched;
@@ -6567,6 +6502,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
, "eax", "ebx", "edi"
#endif
);
+}
+STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
+
+static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long cr3, cr4;
+
+ /* Record the guest's net vcpu time for enforced NMI injections. */
+ if (unlikely(!enable_vnmi &&
+ vmx->loaded_vmcs->soft_vnmi_blocked))
+ vmx->loaded_vmcs->entry_time = ktime_get();
+
+ /* Don't enter VMX if guest state is invalid, let the exit handler
+ start emulation until we arrive back to a valid state */
+ if (vmx->emulation_required)
+ return;
+
+ if (vmx->ple_window_dirty) {
+ vmx->ple_window_dirty = false;
+ vmcs_write32(PLE_WINDOW, vmx->ple_window);
+ }
+
+ if (vmx->nested.need_vmcs12_sync)
+ nested_sync_from_vmcs12(vcpu);
+
+ if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
+ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+ if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+ vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+ }
+
+ cr4 = cr4_read_shadow();
+ if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
+ vmcs_writel(HOST_CR4, cr4);
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
+ }
+
+ /* When single-stepping over STI and MOV SS, we must clear the
+ * corresponding interruptibility bits in the guest state. Otherwise
+ * vmentry fails as it then expects bit 14 (BS) in pending debug
+ * exceptions being set, but that's not correct for the guest debugging
+ * case. */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vmx_set_interrupt_shadow(vcpu, 0);
+
+ if (static_cpu_has(X86_FEATURE_PKU) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
+ vcpu->arch.pkru != vmx->host_pkru)
+ __write_pkru(vcpu->arch.pkru);
+
+ pt_guest_enter(vmx);
+
+ atomic_switch_perf_msrs(vmx);
+
+ vmx_update_hv_timer(vcpu);
+
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+ __vmx_vcpu_run(vcpu, vmx);
/*
* We do not use IBRS in the kernel. If this vCPU has used the
@@ -6648,7 +6654,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
}
-STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
static struct kvm *vmx_vm_alloc(void)
{
@@ -6816,7 +6821,7 @@ static int vmx_vm_init(struct kvm *kvm)
* Warn upon starting the first VM in a potentially
* insecure environment.
*/
- if (cpu_smt_control == CPU_SMT_ENABLED)
+ if (sched_smt_active())
pr_warn_once(L1TF_MSG_SMT);
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
pr_warn_once(L1TF_MSG_L1D);
@@ -7044,7 +7049,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
/* unmask address range configure area */
for (i = 0; i < vmx->pt_desc.addr_range; i++)
- vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4));
+ vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
}
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 99328954c2fc..0ac0a64c7790 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
-static inline void pi_clear_sn(struct pi_desc *pi_desc)
+static inline void pi_set_sn(struct pi_desc *pi_desc)
{
- return clear_bit(POSTED_INTR_SN,
+ return set_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
-static inline void pi_set_sn(struct pi_desc *pi_desc)
+static inline void pi_set_on(struct pi_desc *pi_desc)
{
- return set_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
+ set_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
}
static inline void pi_clear_on(struct pi_desc *pi_desc)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02c8e095a239..941f932373d0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0])
return -EINVAL;
+ /* fall through */
+
case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL;
@@ -5114,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
@@ -6480,8 +6489,7 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE &&
- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ if (r == EMULATE_DONE && ctxt->tf)
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -7093,10 +7101,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_CLOCK_PAIRING:
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
break;
+#endif
case KVM_HC_SEND_IPI:
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
break;
-#endif
default:
ret = -KVM_ENOSYS;
break;
@@ -7793,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* 1) We should set ->mode before checking ->requests. Please see
* the comment in kvm_vcpu_exiting_guest_mode().
*
- * 2) For APICv, we should set ->mode before checking PIR.ON. This
+ * 2) For APICv, we should set ->mode before checking PID.ON. This
* pairs with the memory barrier implicit in pi_test_and_set_on
* (see vmx_deliver_posted_interrupt).
*
@@ -7937,6 +7945,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
+ /* fall through */
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 66894675f3c8..df50451d94ef 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -2,8 +2,11 @@
#include <linux/module.h>
#include <linux/io.h>
+#define movs(type,to,from) \
+ asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
+
/* Originally from i386/string.h */
-static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
+static __always_inline void rep_movs(void *to, const void *from, size_t n)
{
unsigned long d0, d1, d2;
asm volatile("rep ; movsl\n\t"
@@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
{
- __iomem_memcpy(to, (const void *)from, n);
+ if (unlikely(!n))
+ return;
+
+ /* Align any unaligned source IO */
+ if (unlikely(1 & (unsigned long)from)) {
+ movs("b", to, from);
+ n--;
+ }
+ if (n > 1 && unlikely(2 & (unsigned long)from)) {
+ movs("w", to, from);
+ n-=2;
+ }
+ rep_movs(to, (const void *)from, n);
}
EXPORT_SYMBOL(memcpy_fromio);
void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
{
- __iomem_memcpy((void *)to, (const void *) from, n);
+ if (unlikely(!n))
+ return;
+
+ /* Align any unaligned destination IO */
+ if (unlikely(1 & (unsigned long)to)) {
+ movs("b", to, from);
+ n--;
+ }
+ if (n > 1 && unlikely(2 & (unsigned long)to)) {
+ movs("w", to, from);
+ n-=2;
+ }
+ rep_movs((void *)to, (const void *) from, n);
}
EXPORT_SYMBOL(memcpy_toio);
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 79778ab200e4..a53665116458 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -36,8 +36,8 @@ static inline u16 i8254(void)
u16 status, timer;
do {
- outb(I8254_PORT_CONTROL,
- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
+ I8254_PORT_CONTROL);
status = inb(I8254_PORT_COUNTER0);
timer = inb(I8254_PORT_COUNTER0);
timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index bfd94e7812fc..7d290777246d 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -54,13 +54,13 @@ do { \
} while (0)
/**
- * clear_user: - Zero a block of memory in user space.
+ * clear_user - Zero a block of memory in user space.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
* Zero a block of memory in user space.
*
- * Returns number of bytes that could not be cleared.
+ * Return: number of bytes that could not be cleared.
* On success, this will be zero.
*/
unsigned long
@@ -74,14 +74,14 @@ clear_user(void __user *to, unsigned long n)
EXPORT_SYMBOL(clear_user);
/**
- * __clear_user: - Zero a block of memory in user space, with less checking.
+ * __clear_user - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
* Zero a block of memory in user space. Caller must check
* the specified block with access_ok() before calling this function.
*
- * Returns number of bytes that could not be cleared.
+ * Return: number of bytes that could not be cleared.
* On success, this will be zero.
*/
unsigned long
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 6521134057e8..3c4568f8fb28 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -117,67 +117,12 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
-/* Helper to check whether a uaccess fault indicates a kernel bug. */
-static bool bogus_uaccess(struct pt_regs *regs, int trapnr,
- unsigned long fault_addr)
-{
- /* This is the normal case: #PF with a fault address in userspace. */
- if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX)
- return false;
-
- /*
- * This code can be reached for machine checks, but only if the #MC
- * handler has already decided that it looks like a candidate for fixup.
- * This e.g. happens when attempting to access userspace memory which
- * the CPU can't access because of uncorrectable bad memory.
- */
- if (trapnr == X86_TRAP_MC)
- return false;
-
- /*
- * There are two remaining exception types we might encounter here:
- * - #PF for faulting accesses to kernel addresses
- * - #GP for faulting accesses to noncanonical addresses
- * Complain about anything else.
- */
- if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) {
- WARN(1, "unexpected trap %d in uaccess\n", trapnr);
- return false;
- }
-
- /*
- * This is a faulting memory access in kernel space, on a kernel
- * address, in a usercopy function. This can e.g. be caused by improper
- * use of helpers like __put_user and by improper attempts to access
- * userspace addresses in KERNEL_DS regions.
- * The one (semi-)legitimate exception are probe_kernel_{read,write}(),
- * which can be invoked from places like kgdb, /dev/mem (for reading)
- * and privileged BPF code (for reading).
- * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag
- * to tell us that faulting on kernel addresses, and even noncanonical
- * addresses, in a userspace accessor does not necessarily imply a
- * kernel bug, root might just be doing weird stuff.
- */
- if (current->kernel_uaccess_faults_ok)
- return false;
-
- /* This is bad. Refuse the fixup so that we go into die(). */
- if (trapnr == X86_TRAP_PF) {
- pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n",
- fault_addr);
- } else {
- pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n");
- }
- return true;
-}
-
__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
- if (bogus_uaccess(regs, trapnr, fault_addr))
- return false;
+ WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
regs->ip = ex_fixup_addr(fixup);
return true;
}
@@ -188,8 +133,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
unsigned long error_code,
unsigned long fault_addr)
{
- if (bogus_uaccess(regs, trapnr, fault_addr))
- return false;
/* Special hack for uaccess_err */
current->thread.uaccess_err = 1;
regs->ip = ex_fixup_addr(fixup);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2ff25ad33233..9d5c75f02295 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
return;
}
- addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24);
+ addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
#ifdef CONFIG_X86_64
addr |= ((u64)desc.base3 << 32);
#endif
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 5378d10f1d31..0029604af8a4 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -705,7 +705,7 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
return arch_memremap_can_ram_remap(phys_addr, size, 0);
}
-#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
+#ifdef CONFIG_AMD_MEM_ENCRYPT
/* Remap memory with encryption */
void __init *early_memremap_encrypted(resource_size_t phys_addr,
unsigned long size)
@@ -747,7 +747,7 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
}
-#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index a19ef1a416ff..4aa9b1480866 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
pmd = pmd_offset(pud, ppd->vaddr);
if (pmd_none(*pmd)) {
pte = ppd->pgtable_area;
- memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
- ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
+ memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
+ ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index de1851d15699..c805db6236b4 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -9,12 +9,12 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/mman.h>
#include <linux/syscalls.h>
#include <linux/sched/sysctl.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
-#include <asm/mman.h>
#include <asm/mmu_context.h>
#include <asm/mpx.h>
#include <asm/processor.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 1308f5408bf7..12c1b7a83ed7 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -123,7 +123,7 @@ void __init setup_node_to_cpumask_map(void)
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
/* cpumask_of_node() will now work */
- pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
+ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
@@ -866,7 +866,7 @@ const struct cpumask *cpumask_of_node(int node)
{
if (node >= nr_node_ids) {
printk(KERN_WARNING
- "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
+ "cpumask_of_node(%d): node > nr_node_ids(%u)\n",
node, nr_node_ids);
dump_stack();
return cpu_none_mask;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4f8972311a77..14e6119838a6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
#endif
+/*
+ * See set_mce_nospec().
+ *
+ * Machine check recovery code needs to change cache mode of poisoned pages to
+ * UC to avoid speculative access logging another error. But passing the
+ * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
+ * speculative access. So we cheat and flip the top bit of the address. This
+ * works fine for the code that updates the page tables. But at the end of the
+ * process we need to flush the TLB and cache and the non-canonical address
+ * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
+ *
+ * But in the common case we already have a canonical address. This code
+ * will fix the top bit if needed and is a no-op otherwise.
+ */
+static inline unsigned long fix_addr(unsigned long addr)
+{
+#ifdef CONFIG_X86_64
+ return (long)(addr << 1) >> 1;
+#else
+ return addr;
+#endif
+}
+
static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
{
if (cpa->flags & CPA_PAGES_ARRAY) {
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
unsigned int i;
for (i = 0; i < cpa->numpages; i++)
- __flush_tlb_one_kernel(__cpa_addr(cpa, i));
+ __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
static void cpa_flush(struct cpa_data *data, int cache)
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
- clflush_cache_range_opt((void *)addr, PAGE_SIZE);
+ clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
}
mb();
}
@@ -1627,29 +1650,6 @@ out:
return ret;
}
-/*
- * Machine check recovery code needs to change cache mode of poisoned
- * pages to UC to avoid speculative access logging another error. But
- * passing the address of the 1:1 mapping to set_memory_uc() is a fine
- * way to encourage a speculative access. So we cheat and flip the top
- * bit of the address. This works fine for the code that updates the
- * page tables. But at the end of the process we need to flush the cache
- * and the non-canonical address causes a #GP fault when used by the
- * CLFLUSH instruction.
- *
- * But in the common case we already have a canonical address. This code
- * will fix the top bit if needed and is a no-op otherwise.
- */
-static inline unsigned long make_addr_canonical_again(unsigned long addr)
-{
-#ifdef CONFIG_X86_64
- return (long)(addr << 1) >> 1;
-#else
- return addr;
-#endif
-}
-
-
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
int force_split, int in_flag,
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5542303c43d9..afabf597c855 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -881,20 +881,41 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
/* cmp dst_reg, src_reg */
- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
- add_2reg(0xC0, dst_reg, src_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_2mod(0x48, dst_reg, src_reg));
+ else if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
/* test dst_reg, src_reg */
- EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
- add_2reg(0xC0, dst_reg, src_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_2mod(0x48, dst_reg, src_reg));
+ else if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
/* test dst_reg, imm32 */
- EMIT1(add_1mod(0x48, dst_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_1mod(0x48, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
goto emit_cond_jmp;
@@ -908,8 +929,21 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
/* cmp dst_reg, imm8/32 */
- EMIT1(add_1mod(0x48, dst_reg));
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ EMIT1(add_1mod(0x48, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 8f6cc71e0848..0d9cdffce6ac 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2072,7 +2072,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
- case BPF_JMP | BPF_JSGE | BPF_X: {
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
@@ -2081,25 +2092,35 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX),
- STACK_VAR(src_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EBX),
+ STACK_VAR(src_hi));
}
- /* cmp dreg_hi,sreg_hi */
- EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
- EMIT2(IA32_JNE, 2);
+ if (is_jmp64) {
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 2);
+ }
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp;
}
- case BPF_JMP | BPF_JSET | BPF_X: {
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
@@ -2108,15 +2129,21 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX),
- STACK_VAR(src_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EBX),
+ STACK_VAR(src_hi));
}
/* and dreg_lo,sreg_lo */
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
@@ -2126,32 +2153,39 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
goto emit_cond_jmp;
}
- case BPF_JMP | BPF_JSET | BPF_K: {
- u32 hi;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
+ u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
- hi = imm32 & (1<<31) ? (u32)~0 : 0;
/* mov ecx,imm32 */
- EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
- /* mov ebx,imm32 */
- EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
+ EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32);
/* and dreg_lo,sreg_lo */
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
- /* and dreg_hi,sreg_hi */
- EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
- /* or dreg_lo,dreg_hi */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+ if (is_jmp64) {
+ hi = imm32 & (1 << 31) ? (u32)~0 : 0;
+ /* mov ebx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi);
+ /* and dreg_hi,sreg_hi */
+ EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
+ /* or dreg_lo,dreg_hi */
+ EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+ }
goto emit_cond_jmp;
}
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -2163,29 +2197,44 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
- case BPF_JMP | BPF_JSGE | BPF_K: {
- u32 hi;
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
+ u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
- EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
- STACK_VAR(dst_hi));
+ if (is_jmp64)
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
}
- hi = imm32 & (1<<31) ? (u32)~0 : 0;
/* mov ecx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
- /* mov ebx,imm32 */
- EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
-
- /* cmp dreg_hi,sreg_hi */
- EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
- EMIT2(IA32_JNE, 2);
+ if (is_jmp64) {
+ hi = imm32 & (1 << 31) ? (u32)~0 : 0;
+ /* mov ebx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 2);
+ }
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index e4dc3862d423..fe29f3f5d384 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -3,5 +3,4 @@ OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
-obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o
obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
deleted file mode 100644
index 7138bc7a265c..000000000000
--- a/arch/x86/platform/efi/early_printk.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2013 Intel Corporation; author Matt Fleming
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
- */
-
-#include <linux/console.h>
-#include <linux/efi.h>
-#include <linux/font.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <asm/setup.h>
-
-static const struct font_desc *font;
-static u32 efi_x, efi_y;
-static void *efi_fb;
-static bool early_efi_keep;
-
-/*
- * efi earlyprintk need use early_ioremap to map the framebuffer.
- * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should
- * be used instead. ioremap will be available after paging_init() which is
- * earlier than initcall callbacks. Thus adding this early initcall function
- * early_efi_map_fb to map the whole efi framebuffer.
- */
-static __init int early_efi_map_fb(void)
-{
- u64 base, size;
-
- if (!early_efi_keep)
- return 0;
-
- base = boot_params.screen_info.lfb_base;
- if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)boot_params.screen_info.ext_lfb_base << 32;
- size = boot_params.screen_info.lfb_size;
- efi_fb = ioremap(base, size);
-
- return efi_fb ? 0 : -ENOMEM;
-}
-early_initcall(early_efi_map_fb);
-
-/*
- * early_efi_map maps efi framebuffer region [start, start + len -1]
- * In case earlyprintk=efi,keep we have the whole framebuffer mapped already
- * so just return the offset efi_fb + start.
- */
-static __ref void *early_efi_map(unsigned long start, unsigned long len)
-{
- u64 base;
-
- base = boot_params.screen_info.lfb_base;
- if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)boot_params.screen_info.ext_lfb_base << 32;
-
- if (efi_fb)
- return (efi_fb + start);
- else
- return early_ioremap(base + start, len);
-}
-
-static __ref void early_efi_unmap(void *addr, unsigned long len)
-{
- if (!efi_fb)
- early_iounmap(addr, len);
-}
-
-static void early_efi_clear_scanline(unsigned int y)
-{
- unsigned long *dst;
- u16 len;
-
- len = boot_params.screen_info.lfb_linelength;
- dst = early_efi_map(y*len, len);
- if (!dst)
- return;
-
- memset(dst, 0, len);
- early_efi_unmap(dst, len);
-}
-
-static void early_efi_scroll_up(void)
-{
- unsigned long *dst, *src;
- u16 len;
- u32 i, height;
-
- len = boot_params.screen_info.lfb_linelength;
- height = boot_params.screen_info.lfb_height;
-
- for (i = 0; i < height - font->height; i++) {
- dst = early_efi_map(i*len, len);
- if (!dst)
- return;
-
- src = early_efi_map((i + font->height) * len, len);
- if (!src) {
- early_efi_unmap(dst, len);
- return;
- }
-
- memmove(dst, src, len);
-
- early_efi_unmap(src, len);
- early_efi_unmap(dst, len);
- }
-}
-
-static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h)
-{
- const u32 color_black = 0x00000000;
- const u32 color_white = 0x00ffffff;
- const u8 *src;
- u8 s8;
- int m;
-
- src = font->data + c * font->height;
- s8 = *(src + h);
-
- for (m = 0; m < 8; m++) {
- if ((s8 >> (7 - m)) & 1)
- *dst = color_white;
- else
- *dst = color_black;
- dst++;
- }
-}
-
-static void
-early_efi_write(struct console *con, const char *str, unsigned int num)
-{
- struct screen_info *si;
- unsigned int len;
- const char *s;
- void *dst;
-
- si = &boot_params.screen_info;
- len = si->lfb_linelength;
-
- while (num) {
- unsigned int linemax;
- unsigned int h, count = 0;
-
- for (s = str; *s && *s != '\n'; s++) {
- if (count == num)
- break;
- count++;
- }
-
- linemax = (si->lfb_width - efi_x) / font->width;
- if (count > linemax)
- count = linemax;
-
- for (h = 0; h < font->height; h++) {
- unsigned int n, x;
-
- dst = early_efi_map((efi_y + h) * len, len);
- if (!dst)
- return;
-
- s = str;
- n = count;
- x = efi_x;
-
- while (n-- > 0) {
- early_efi_write_char(dst + x*4, *s, h);
- x += font->width;
- s++;
- }
-
- early_efi_unmap(dst, len);
- }
-
- num -= count;
- efi_x += count * font->width;
- str += count;
-
- if (num > 0 && *s == '\n') {
- efi_x = 0;
- efi_y += font->height;
- str++;
- num--;
- }
-
- if (efi_x + font->width > si->lfb_width) {
- efi_x = 0;
- efi_y += font->height;
- }
-
- if (efi_y + font->height > si->lfb_height) {
- u32 i;
-
- efi_y -= font->height;
- early_efi_scroll_up();
-
- for (i = 0; i < font->height; i++)
- early_efi_clear_scanline(efi_y + i);
- }
- }
-}
-
-static __init int early_efi_setup(struct console *con, char *options)
-{
- struct screen_info *si;
- u16 xres, yres;
- u32 i;
-
- si = &boot_params.screen_info;
- xres = si->lfb_width;
- yres = si->lfb_height;
-
- /*
- * early_efi_write_char() implicitly assumes a framebuffer with
- * 32-bits per pixel.
- */
- if (si->lfb_depth != 32)
- return -ENODEV;
-
- font = get_default_font(xres, yres, -1, -1);
- if (!font)
- return -ENODEV;
-
- efi_y = rounddown(yres, font->height) - font->height;
- for (i = 0; i < (yres - efi_y) / font->height; i++)
- early_efi_scroll_up();
-
- /* early_console_register will unset CON_BOOT in case ,keep */
- if (!(con->flags & CON_BOOT))
- early_efi_keep = true;
- return 0;
-}
-
-struct console early_efi_console = {
- .name = "earlyefi",
- .write = early_efi_write,
- .setup = early_efi_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 17456a1d3f04..458a0e2bcc57 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -304,7 +304,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
* - Not within any part of the kernel
* - Not the BIOS reserved area (E820_TYPE_RESERVED, E820_TYPE_NVS, etc)
*/
-static bool can_free_region(u64 start, u64 size)
+static __init bool can_free_region(u64 start, u64 size)
{
if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end))
return false;
@@ -717,7 +717,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
* "efi_mm" cannot be used to check if the page fault had occurred
* in the firmware context because efi=old_map doesn't use efi_pgd.
*/
- if (efi_rts_work.efi_rts_id == NONE)
+ if (efi_rts_work.efi_rts_id == EFI_NONE)
return;
/*
@@ -742,7 +742,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
* because this case occurs *very* rarely and hence could be improved
* on a need by basis.
*/
- if (efi_rts_work.efi_rts_id == RESET_SYSTEM) {
+ if (efi_rts_work.efi_rts_id == EFI_RESET_SYSTEM) {
pr_info("efi_reset_system() buggy! Reboot through BIOS\n");
machine_real_restart(MRR_BIOS);
return;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
index 96f438d4b026..1421d5330b2c 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
@@ -44,7 +44,6 @@ static struct fixed_voltage_config bcm43xx_vmmc = {
*/
.microvolts = 2000000, /* 1.8V */
.startup_delay = 250 * 1000, /* 250ms */
- .enable_high = 1, /* active high */
.enabled_at_boot = 0, /* disabled at boot */
.init_data = &bcm43xx_vmmc_data,
};
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4a6a5a26c582..eb33432f2f24 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -29,7 +29,8 @@
struct uv_systab *uv_systab;
-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+ u64 a4, u64 a5)
{
struct uv_systab *tab = uv_systab;
s64 ret;
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
return ret;
}
+
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+{
+ s64 ret;
+
+ if (down_interruptible(&__efi_uv_runtime_lock))
+ return BIOS_STATUS_ABORT;
+
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+ up(&__efi_uv_runtime_lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(uv_bios_call);
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
unsigned long bios_flags;
s64 ret;
+ if (down_interruptible(&__efi_uv_runtime_lock))
+ return BIOS_STATUS_ABORT;
+
local_irq_save(bios_flags);
- ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
local_irq_restore(bios_flags);
+ up(&__efi_uv_runtime_lock);
+
return ret;
}
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index f518b4744ff8..a9e80e44178c 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -16,7 +16,7 @@ config 64BIT
config X86_32
def_bool !64BIT
- select HAVE_AOUT
+ select ARCH_32BIT_OFF_T
select ARCH_WANT_IPC_PARSE_VERSION
select MODULES_USE_ELF_REL
select CLONE_BACKWARDS
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 2f6787fc7106..c54a493e139a 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
-#ifdef CONFIG_X86_X2APIC
- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
-#endif
- val &= ~X2APIC_ENABLE;
+ val &= ~X2APIC_ENABLE;
break;
}
return val;
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index a7e47cf7ec6c..6e4c6bd62203 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -17,8 +17,8 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
-pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t pte);
unsigned long xen_read_cr2_direct(void);
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 0f4fe206dcc2..856a85814f00 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -306,20 +306,20 @@ static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
__xen_set_pte(ptep, pteval);
}
-pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
+pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
- trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
+ trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
return *ptep;
}
-void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t pte)
{
struct mmu_update u;
- trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
+ trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 72bf446c3fee..6e29794573b7 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -361,8 +361,6 @@ void xen_timer_resume(void)
{
int cpu;
- pvclock_resume();
-
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
};
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
+static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
+ xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
+
if (!xen_clock)
return;
@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
int ret;
if (!xen_clock)
- return;
+ goto out;
t.addr.v = &xen_clock->pvti;
@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret);
+
+out:
+ /* Need pvclock_resume() before using xen_clocksource_read(). */
+ pvclock_resume();
+ xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
}
static void xen_setup_vsyscall_time_info(void)
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 20a0756f27ef..963986a48c62 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config XTENSA
def_bool y
+ select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
@@ -164,7 +165,7 @@ config XTENSA_FAKE_NMI
If unsure, say N.
config XTENSA_UNALIGNED_USER
- bool "Unaligned memory access in use space"
+ bool "Unaligned memory access in user space"
help
The Xtensa architecture currently does not handle unaligned
memory accesses in hardware but through an exception handler.
@@ -451,7 +452,7 @@ config USE_OF
help
Include support for flattened device tree machine descriptions.
-config BUILTIN_DTB
+config BUILTIN_DTB_SOURCE
string "DTB to build into the kernel image"
depends on OF
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index f8052ba5aea8..0b8d00cdae7c 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -7,9 +7,9 @@
#
#
-BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
-ifneq ($(CONFIG_BUILTIN_DTB),"")
-obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
endif
# for CONFIG_OF_ALL_DTBS test
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 2bf964df37ba..f378e56f9ce6 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="kc705"
+CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_PM=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index 3221b7053fa3..62f32a902568 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="csp"
+CONFIG_BUILTIN_DTB_SOURCE="csp"
# CONFIG_COMPACTION is not set
CONFIG_XTFPGA_LCD=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 985fa8546e4e..8bebe07f1060 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="kc705"
+CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index f3fc4f970ca8..933ab2adf434 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="kc705_nommu"
+CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu"
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 11fed6c06a7c..e29c5b179a5b 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,11 +33,12 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
# CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
CONFIG_USE_OF=y
-CONFIG_BUILTIN_DTB="lx200mx"
+CONFIG_BUILTIN_DTB_SOURCE="lx200mx"
# CONFIG_COMPACTION is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index e255683cd520..809f39ce08c0 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += percpu.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += sections.h
+generic-y += socket.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
index dfdf9fae1f84..7f6cf4151843 100644
--- a/arch/xtensa/include/asm/asm-uaccess.h
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -32,8 +32,6 @@
#define KERNEL_DS 0
#define USER_DS 1
-#define get_ds (KERNEL_DS)
-
/*
* get_fs reads current->thread.current_ds into a register.
* On Entry:
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 4b2480304bc3..6792928ba84a 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -32,7 +32,6 @@
#define KERNEL_DS ((mm_segment_t) { 0 })
#define USER_DS ((mm_segment_t) { 1 })
-#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.current_ds)
#define set_fs(val) (current->thread.current_ds = (val))
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 0d34629dafc5..30af4dc3ce7b 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -7,21 +7,9 @@
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_GETPGRP
-/*
- * Ignore legacy system calls in the checksyscalls.sh script
- */
-
-#define __IGNORE_fork /* use clone */
-#define __IGNORE_time
-#define __IGNORE_alarm /* use setitimer */
-#define __IGNORE_pause
-#define __IGNORE_mmap /* use mmap2 */
-#define __IGNORE_vfork /* use clone */
-#define __IGNORE_fadvise64 /* use fadvise64_64 */
-
#define NR_syscalls __NR_syscalls
#endif /* _XTENSA_UNISTD_H */
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index 960bf1e4be53..6b43e5049ff7 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
generated-y += unistd_32.h
generic-y += kvm_para.h
+generic-y += socket.h
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 58f29a9d895d..be726062412b 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -34,9 +34,7 @@
/*
* Flags for mmap
*/
-#define MAP_SHARED 0x001 /* Share changes */
-#define MAP_PRIVATE 0x002 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x003 /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_TYPE 0x00f /* Mask for type of mapping */
#define MAP_FIXED 0x010 /* Interpret addr exactly */
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
deleted file mode 100644
index 1de07a7f7680..000000000000
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * include/asm-xtensa/socket.h
- *
- * Copied from i386.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _XTENSA_SOCKET_H
-#define _XTENSA_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockoptions(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-#define SO_REUSEPORT 15
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-#define SO_GET_FILTER SO_ATTACH_FILTER
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-
-#define SO_WIFI_STATUS 41
-#define SCM_WIFI_STATUS SO_WIFI_STATUS
-#define SO_PEEK_OFF 42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS 43
-
-#define SO_LOCK_FILTER 44
-
-#define SO_SELECT_ERR_QUEUE 45
-
-#define SO_BUSY_POLL 46
-
-#define SO_MAX_PACING_RATE 47
-
-#define SO_BPF_EXTENSIONS 48
-
-#define SO_INCOMING_CPU 49
-
-#define SO_ATTACH_BPF 50
-#define SO_DETACH_BPF SO_DETACH_FILTER
-
-#define SO_ATTACH_REUSEPORT_CBPF 51
-#define SO_ATTACH_REUSEPORT_EBPF 52
-
-#define SO_CNX_ADVICE 53
-
-#define SCM_TIMESTAMPING_OPT_STATS 54
-
-#define SO_MEMINFO 55
-
-#define SO_INCOMING_NAPI_ID 56
-
-#define SO_COOKIE 57
-
-#define SCM_TIMESTAMPING_PKTINFO 58
-
-#define SO_PEERGROUPS 59
-
-#define SO_ZEROCOPY 60
-
-#define SO_TXTIME 61
-#define SCM_TXTIME SO_TXTIME
-
-#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index da08e75100ab..7f009719304e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -276,12 +276,13 @@ should_never_return:
movi a2, cpu_start_ccount
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
- memw
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
@@ -317,11 +318,13 @@ ENTRY(cpu_restart)
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
+ memw
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
+ memw
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 932d64689bac..be1f280c322c 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned i;
- for (i = 0; i < max_cpus; ++i)
+ for_each_possible_cpu(i)
set_cpu_present(i, true);
}
@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
+ if (ncpus > NR_CPUS) {
+ ncpus = NR_CPUS;
+ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+ }
+
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
int i;
#ifdef CONFIG_HOTPLUG_CPU
- cpu_start_id = cpu;
- system_flush_invalidate_dcache_range(
- (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+ WRITE_ONCE(cpu_start_id, cpu);
+ /* Pairs with the third memw in the cpu_restart */
+ mb();
+ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
ccount = get_ccount();
while (!ccount);
- cpu_start_ccount = ccount;
+ WRITE_ONCE(cpu_start_ccount, ccount);
- while (time_before(jiffies, timeout)) {
+ do {
+ /*
+ * Pairs with the first two memws in the
+ * .Lboot_secondary.
+ */
mb();
- if (!cpu_start_ccount)
- break;
- }
+ ccount = READ_ONCE(cpu_start_ccount);
+ } while (ccount && time_before(jiffies, timeout));
- if (cpu_start_ccount) {
+ if (ccount) {
smp_call_function_single(0, mx_cpu_stop,
- (void *)cpu, 1);
- cpu_start_ccount = 0;
+ (void *)cpu, 1);
+ WRITE_ONCE(cpu_start_ccount, 0);
return -EIO;
}
}
@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
+ init_completion(&cpu_running);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
- sizeof(cpu_start_id));
- if (cpu_start_id == -cpu) {
+ sizeof(cpu_start_id));
+ /* Pairs with the second memw in the cpu_restart */
+ mb();
+ if (READ_ONCE(cpu_start_id) == -cpu) {
platform_cpu_kill(cpu);
return;
}
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 69cf91b03b26..6af49929de85 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -72,8 +72,8 @@
61 common fcntl64 sys_fcntl64
62 common fallocate sys_fallocate
63 common fadvise64_64 xtensa_fadvise64_64
-64 common utime sys_utime
-65 common utimes sys_utimes
+64 common utime sys_utime32
+65 common utimes sys_utimes_time32
66 common ioctl sys_ioctl
67 common fcntl sys_fcntl
68 common setxattr sys_setxattr
@@ -103,7 +103,7 @@
91 common madvise sys_madvise
92 common shmget sys_shmget
93 common shmat xtensa_shmat
-94 common shmctl sys_shmctl
+94 common shmctl sys_old_shmctl
95 common shmdt sys_shmdt
# Socket Operations
96 common socket sys_socket
@@ -174,15 +174,15 @@
158 common capget sys_capget
159 common capset sys_capset
160 common ptrace sys_ptrace
-161 common semtimedop sys_semtimedop
+161 common semtimedop sys_semtimedop_time32
162 common semget sys_semget
163 common semop sys_semop
-164 common semctl sys_semctl
+164 common semctl sys_old_semctl
165 common available165 sys_ni_syscall
166 common msgget sys_msgget
167 common msgsnd sys_msgsnd
168 common msgrcv sys_msgrcv
-169 common msgctl sys_msgctl
+169 common msgctl sys_old_msgctl
170 common available170 sys_ni_syscall
# File System
171 common umount2 sys_umount
@@ -206,11 +206,11 @@
188 common setrlimit sys_setrlimit
189 common getrlimit sys_getrlimit
190 common getrusage sys_getrusage
-191 common futex sys_futex
+191 common futex sys_futex_time32
192 common gettimeofday sys_gettimeofday
193 common settimeofday sys_settimeofday
-194 common adjtimex sys_adjtimex
-195 common nanosleep sys_nanosleep
+194 common adjtimex sys_adjtimex_time32
+195 common nanosleep sys_nanosleep_time32
196 common getgroups sys_getgroups
197 common setgroups sys_setgroups
198 common sethostname sys_sethostname
@@ -234,7 +234,7 @@
215 common sched_getscheduler sys_sched_getscheduler
216 common sched_get_priority_max sys_sched_get_priority_max
217 common sched_get_priority_min sys_sched_get_priority_min
-218 common sched_rr_get_interval sys_sched_rr_get_interval
+218 common sched_rr_get_interval sys_sched_rr_get_interval_time32
219 common sched_yield sys_sched_yield
222 common available222 sys_ni_syscall
# Signal Handling
@@ -244,14 +244,14 @@
226 common rt_sigaction sys_rt_sigaction
227 common rt_sigprocmask sys_rt_sigprocmask
228 common rt_sigpending sys_rt_sigpending
-229 common rt_sigtimedwait sys_rt_sigtimedwait
+229 common rt_sigtimedwait sys_rt_sigtimedwait_time32
230 common rt_sigqueueinfo sys_rt_sigqueueinfo
231 common rt_sigsuspend sys_rt_sigsuspend
# Message
232 common mq_open sys_mq_open
233 common mq_unlink sys_mq_unlink
-234 common mq_timedsend sys_mq_timedsend
-235 common mq_timedreceive sys_mq_timedreceive
+234 common mq_timedsend sys_mq_timedsend_time32
+235 common mq_timedreceive sys_mq_timedreceive_time32
236 common mq_notify sys_mq_notify
237 common mq_getsetattr sys_mq_getsetattr
238 common available238 sys_ni_syscall
@@ -259,17 +259,17 @@
# IO
240 common io_destroy sys_io_destroy
241 common io_submit sys_io_submit
-242 common io_getevents sys_io_getevents
+242 common io_getevents sys_io_getevents_time32
243 common io_cancel sys_io_cancel
-244 common clock_settime sys_clock_settime
-245 common clock_gettime sys_clock_gettime
-246 common clock_getres sys_clock_getres
-247 common clock_nanosleep sys_clock_nanosleep
+244 common clock_settime sys_clock_settime32
+245 common clock_gettime sys_clock_gettime32
+246 common clock_getres sys_clock_getres_time32
+247 common clock_nanosleep sys_clock_nanosleep_time32
# Timer
248 common timer_create sys_timer_create
249 common timer_delete sys_timer_delete
-250 common timer_settime sys_timer_settime
-251 common timer_gettime sys_timer_gettime
+250 common timer_settime sys_timer_settime32
+251 common timer_gettime sys_timer_gettime32
252 common timer_getoverrun sys_timer_getoverrun
# System
253 common reserved253 sys_ni_syscall
@@ -291,8 +291,8 @@
269 common tee sys_tee
270 common vmsplice sys_vmsplice
271 common available271 sys_ni_syscall
-272 common pselect6 sys_pselect6
-273 common ppoll sys_ppoll
+272 common pselect6 sys_pselect6_time32
+273 common ppoll sys_ppoll_time32
274 common epoll_pwait sys_epoll_pwait
275 common epoll_create1 sys_epoll_create1
276 common inotify_init sys_inotify_init
@@ -316,9 +316,9 @@
293 common linkat sys_linkat
294 common symlinkat sys_symlinkat
295 common readlinkat sys_readlinkat
-296 common utimensat sys_utimensat
+296 common utimensat sys_utimensat_time32
297 common fchownat sys_fchownat
-298 common futimesat sys_futimesat
+298 common futimesat sys_futimesat_time32
299 common fstatat64 sys_fstatat64
300 common fchmodat sys_fchmodat
301 common faccessat sys_faccessat
@@ -327,14 +327,14 @@
304 common signalfd sys_signalfd
# 305 was timerfd
306 common eventfd sys_eventfd
-307 common recvmmsg sys_recvmmsg
+307 common recvmmsg sys_recvmmsg_time32
308 common setns sys_setns
309 common signalfd4 sys_signalfd4
310 common dup3 sys_dup3
311 common pipe2 sys_pipe2
312 common timerfd_create sys_timerfd_create
-313 common timerfd_settime sys_timerfd_settime
-314 common timerfd_gettime sys_timerfd_gettime
+313 common timerfd_settime sys_timerfd_settime32
+314 common timerfd_gettime sys_timerfd_gettime32
315 common available315 sys_ni_syscall
316 common eventfd2 sys_eventfd2
317 common preadv sys_preadv
@@ -349,7 +349,7 @@
326 common sync_file_range2 sys_sync_file_range2
327 common perf_event_open sys_perf_event_open
328 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
-329 common clock_adjtime sys_clock_adjtime
+329 common clock_adjtime sys_clock_adjtime32
330 common prlimit64 sys_prlimit64
331 common kcmp sys_kcmp
332 common finit_module sys_finit_module
@@ -372,3 +372,25 @@
349 common pkey_alloc sys_pkey_alloc
350 common pkey_free sys_pkey_free
351 common statx sys_statx
+352 common rseq sys_rseq
+# 353 through 402 are unassigned to sync up with generic numbers
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index fd524a54d2ab..378186b5eb40 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
container_of(evt, struct ccount_timer, evt);
if (timer->irq_enabled) {
- disable_irq(evt->irq);
+ disable_irq_nosync(evt->irq);
timer->irq_enabled = 0;
}
return 0;
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 63e0f12be7c9..72adbbe975d5 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
}
/**
- * __bfq_deactivate_entity - deactivate an entity from its service tree.
- * @entity: the entity to deactivate.
+ * __bfq_deactivate_entity - update sched_data and service trees for
+ * entity, so as to represent entity as inactive
+ * @entity: the entity being deactivated.
* @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree.
*
- * Deactivates an entity, independently of its previous state. Must
- * be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it into the idle
- * tree.
+ * If necessary and allowed, puts entity into the idle tree. NOTE:
+ * entity may be on no tree if in service.
*/
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{
diff --git a/block/blk-core.c b/block/blk-core.c
index c78042975737..6b78ec56a4f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
kblockd_schedule_work(&q->timeout_work);
}
+static void blk_timeout_work(struct work_struct *work)
+{
+}
+
/**
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
- INIT_WORK(&q->timeout_work, NULL);
+ INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
@@ -661,7 +665,6 @@ no_merge:
* blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
- * @request_count: out parameter for number of traversed plugged requests
* @same_queue_rq: pointer to &struct request that gets filled in when
* another request associated with @q is found on the plug list
* (optional, may be %NULL)
@@ -1683,6 +1686,15 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
* @plug: The &struct blk_plug that needs to be initialized
*
* Description:
+ * blk_start_plug() indicates to the block layer an intent by the caller
+ * to submit multiple I/O requests in a batch. The block layer may use
+ * this hint to defer submitting I/Os from the caller until blk_finish_plug()
+ * is called. However, the block layer may choose to submit requests
+ * before a call to blk_finish_plug() if the number of queued I/Os
+ * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
+ * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
+ * the task schedules (see below).
+ *
* Tracking blk_plug inside the task_struct will help with auto-flushing the
* pending I/O should the task end up blocking between blk_start_plug() and
* blk_finish_plug(). This is important from a performance perspective, but
@@ -1765,6 +1777,16 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
blk_mq_flush_plug_list(plug, from_schedule);
}
+/**
+ * blk_finish_plug - mark the end of a batch of submitted I/O
+ * @plug: The &struct blk_plug passed to blk_start_plug()
+ *
+ * Description:
+ * Indicate that a batch of I/O submissions is complete. This function
+ * must be paired with an initial call to blk_start_plug(). The intent
+ * is to allow the block layer to optimize I/O submission. See the
+ * documentation for blk_start_plug() for more information.
+ */
void blk_finish_plug(struct blk_plug *plug)
{
if (plug != current->plug)
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a3fc7191c694..6e0f2d97fc6d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
- blk_mq_run_hw_queue(hctx, true);
+ blk_mq_sched_restart(hctx);
}
/**
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index fc714ef402a6..2620baa1f699 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -72,6 +72,7 @@
#include <linux/sched/loadavg.h>
#include <linux/sched/signal.h>
#include <trace/events/block.h>
+#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
@@ -591,6 +592,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
u64 now = ktime_to_ns(ktime_get());
bool issue_as_root = bio_issue_as_root_blkg(bio);
bool enabled = false;
+ int inflight = 0;
blkg = bio->bi_blkg;
if (!blkg || !bio_flagged(bio, BIO_TRACKED))
@@ -601,6 +603,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
return;
enabled = blk_iolatency_enabled(iolat->blkiolat);
+ if (!enabled)
+ return;
+
while (blkg && blkg->parent) {
iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -609,8 +614,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
}
rqw = &iolat->rq_wait;
- atomic_dec(&rqw->inflight);
- if (!enabled || iolat->min_lat_nsec == 0)
+ inflight = atomic_dec_return(&rqw->inflight);
+ WARN_ON_ONCE(inflight < 0);
+ if (iolat->min_lat_nsec == 0)
goto next;
iolatency_record_time(iolat, &bio->bi_issue, now,
issue_as_root);
@@ -754,10 +760,13 @@ int blk_iolatency_init(struct request_queue *q)
return 0;
}
-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+/*
+ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
+ * return 0.
+ */
+static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
- struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
@@ -766,9 +775,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
BLKIOLATENCY_MAX_WIN_SIZE);
if (!oldval && val)
- atomic_inc(&blkiolat->enabled);
+ return 1;
if (oldval && !val)
- atomic_dec(&blkiolat->enabled);
+ return -1;
+ return 0;
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -800,6 +810,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
u64 lat_val = 0;
u64 oldval;
int ret;
+ int enable = 0;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
@@ -834,7 +845,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
- iolatency_set_min_lat_nsec(blkg, lat_val);
+ enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+ if (enable) {
+ WARN_ON_ONCE(!blk_get_queue(blkg->q));
+ blkg_get(blkg);
+ }
+
if (oldval != iolat->min_lat_nsec) {
iolatency_clear_scaling(blkg);
}
@@ -842,6 +858,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
ret = 0;
out:
blkg_conf_finish(&ctx);
+ if (ret == 0 && enable) {
+ struct iolatency_grp *tmp = blkg_to_lat(blkg);
+ struct blk_iolatency *blkiolat = tmp->blkiolat;
+
+ blk_mq_freeze_queue(blkg->q);
+
+ if (enable == 1)
+ atomic_inc(&blkiolat->enabled);
+ else if (enable == -1)
+ atomic_dec(&blkiolat->enabled);
+ else
+ WARN_ON_ONCE(1);
+
+ blk_mq_unfreeze_queue(blkg->q);
+
+ blkg_put(blkg);
+ blk_put_queue(blkg->q);
+ }
return ret ?: nbytes;
}
@@ -977,8 +1011,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ struct blk_iolatency *blkiolat = iolat->blkiolat;
+ int ret;
- iolatency_set_min_lat_nsec(blkg, 0);
+ ret = iolatency_set_min_lat_nsec(blkg, 0);
+ if (ret == 1)
+ atomic_inc(&blkiolat->enabled);
+ if (ret == -1)
+ atomic_dec(&blkiolat->enabled);
iolatency_clear_scaling(blkg);
}
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
index fb2c82c351e4..038cb627c868 100644
--- a/block/blk-mq-debugfs-zoned.c
+++ b/block/blk-mq-debugfs-zoned.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
- *
- * This file is released under the GPL.
*/
#include <linux/blkdev.h>
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 90d68760af08..7921573aebbc 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(PREFLUSH),
CMD_FLAG_NAME(RAHEAD),
CMD_FLAG_NAME(BACKGROUND),
- CMD_FLAG_NAME(NOUNMAP),
CMD_FLAG_NAME(NOWAIT),
+ CMD_FLAG_NAME(NOUNMAP),
+ CMD_FLAG_NAME(HIPRI),
};
#undef CMD_FLAG_NAME
@@ -838,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
static bool debugfs_create_files(struct dentry *parent, void *data,
const struct blk_mq_debugfs_attr *attr)
{
+ if (IS_ERR_OR_NULL(parent))
+ return false;
+
d_inode(parent)->i_private = data;
for (; attr->name; attr++) {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ba37b9e15e9..9437a5eb07cf 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
spin_unlock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
- if (!(rq->rq_flags & RQF_SOFTBARRIER))
+ if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
continue;
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
- blk_mq_sched_insert_request(rq, true, false, false);
+ /*
+ * If RQF_DONTPREP, rq has contained some driver specific
+ * data, so insert it to hctx dispatch list to avoid any
+ * merge.
+ */
+ if (rq->rq_flags & RQF_DONTPREP)
+ blk_mq_request_bypass_insert(rq, false);
+ else
+ blk_mq_sched_insert_request(rq, true, false, false);
}
while (!list_empty(&rq_list)) {
@@ -1906,7 +1914,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
- struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
+ struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
@@ -1928,6 +1936,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio);
+ data.cmd_flags = bio->bi_opf;
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d943d46b0785..d0b3dd54ef8d 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -36,7 +36,6 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
-void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index f0c56649775f..fd166fbb0f65 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
}
-void wbt_issue(struct rq_qos *rqos, struct request *rq)
+static void wbt_issue(struct rq_qos *rqos, struct request *rq)
{
struct rq_wb *rwb = RQWB(rqos);
@@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq)
}
}
-void wbt_requeue(struct rq_qos *rqos, struct request *rq)
+static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{
struct rq_wb *rwb = RQWB(rqos);
if (!rwb_enabled(rwb))
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 9511144ac7b5..bbab6bf33519 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -168,6 +168,16 @@ config CRYPTO_MANAGER_DISABLE_TESTS
Disable run-time self tests that normally take place at
algorithm registration.
+config CRYPTO_MANAGER_EXTRA_TESTS
+ bool "Enable extra run-time crypto self tests"
+ depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS
+ help
+ Enable extra run-time self tests of registered crypto algorithms,
+ including randomized fuzz tests.
+
+ This is intended for developer use only, as these tests take much
+ longer to run than the normal self tests.
+
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions"
help
@@ -642,7 +652,7 @@ config CRYPTO_CRC32_PCLMUL
From Intel Westmere and AMD Bulldozer processor with SSE4.2
and PCLMULQDQ supported, the processor will support
CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ
- instruction. This option will create 'crc32-plcmul' module,
+ instruction. This option will create 'crc32-pclmul' module,
which will enable any routine to use the CRC-32-IEEE 802.3 checksum
and gain better performance as compared with the table implementation.
@@ -671,7 +681,7 @@ config CRYPTO_CRCT10DIF_PCLMUL
For x86_64 processors with SSE4.2 and PCLMULQDQ supported,
CRC T10 DIF PCLMULQDQ computation can be hardware
accelerated PCLMULQDQ instruction. This option will create
- 'crct10dif-plcmul' module, which is faster when computing the
+ 'crct10dif-pclmul' module, which is faster when computing the
crct10dif checksum as compared with the generic table implementation.
config CRYPTO_CRCT10DIF_VPMSUM
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 6651e713c45d..5564e73266a6 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
+ crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
+ skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type,
algt->mask));
@@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
+ crypto_set_spawn(&ictx->blockcipher_spawn,
+ skcipher_crypto_instance(inst));
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err)
diff --git a/crypto/aead.c b/crypto/aead.c
index 189c52d1f63a..4908b5e846f0 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
else
err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
- if (err)
+ if (unlikely(err)) {
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
return err;
+ }
crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
diff --git a/crypto/aegis.h b/crypto/aegis.h
index 405e025fc906..41a3090cda8e 100644
--- a/crypto/aegis.h
+++ b/crypto/aegis.h
@@ -1,14 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* AEGIS common definitions
*
* Copyright (c) 2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (c) 2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#ifndef _CRYPTO_AEGIS_H
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index c22f4414856d..3718a8341303 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-128 Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#include <crypto/algapi.h>
@@ -290,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
const struct aegis128_ops *ops)
{
struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- ops->crypt_chunk(state, dst, src, chunksize);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
- skcipher_walk_done(&walk, 0);
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index b6fb21ebdc3e..275a8616d71b 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-128L Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#include <crypto/algapi.h>
@@ -353,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
const struct aegis128l_ops *ops)
{
struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- ops->crypt_chunk(state, dst, src, chunksize);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
- skcipher_walk_done(&walk, 0);
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index 11f0f8ec9c7c..ecd6b7f34a2d 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-256 Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#include <crypto/algapi.h>
@@ -303,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
const struct aegis256_ops *ops)
{
struct skcipher_walk walk;
- u8 *src, *dst;
- unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
- chunksize = walk.nbytes;
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- ops->crypt_chunk(state, dst, src, chunksize);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
- skcipher_walk_done(&walk, 0);
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 17eb09d222ff..edca0998b2a4 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
int af_alg_release(struct socket *sock)
{
- if (sock->sk)
+ if (sock->sk) {
sock_put(sock->sk);
+ sock->sk = NULL;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_release);
@@ -302,8 +304,6 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
if (err)
goto unlock;
- sk2->sk_family = PF_ALG;
-
if (nokey || !ask->refcnt++)
sock_hold(sk);
ask->nokey_refcnt += nokey;
@@ -380,7 +380,6 @@ static int alg_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &alg_proto_ops;
sock_init_data(sock, sk);
- sk->sk_family = PF_ALG;
sk->sk_destruct = alg_sock_destruct;
return 0;
@@ -425,12 +424,12 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);
-void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
+static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
+ struct af_alg_sgl *sgl_new)
{
sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
}
-EXPORT_SYMBOL_GPL(af_alg_link_sg);
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
@@ -441,7 +440,7 @@ void af_alg_free_sg(struct af_alg_sgl *sgl)
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
-int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
+static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
{
struct cmsghdr *cmsg;
@@ -480,7 +479,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
return 0;
}
-EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
/**
* af_alg_alloc_tsgl - allocate the TX SGL
@@ -488,7 +486,7 @@ EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
* @sk socket of connection to user space
* @return: 0 upon success, < 0 upon error
*/
-int af_alg_alloc_tsgl(struct sock *sk)
+static int af_alg_alloc_tsgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -517,7 +515,6 @@ int af_alg_alloc_tsgl(struct sock *sk)
return 0;
}
-EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
/**
* aead_count_tsgl - Count number of TX SG entries
@@ -532,17 +529,17 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
*/
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
{
- struct alg_sock *ask = alg_sk(sk);
- struct af_alg_ctx *ctx = ask->private;
- struct af_alg_tsgl *sgl, *tmp;
+ const struct alg_sock *ask = alg_sk(sk);
+ const struct af_alg_ctx *ctx = ask->private;
+ const struct af_alg_tsgl *sgl;
unsigned int i;
unsigned int sgl_count = 0;
if (!bytes)
return 0;
- list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
- struct scatterlist *sg = sgl->sg;
+ list_for_each_entry(sgl, &ctx->tsgl_list, list) {
+ const struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t bytes_count;
@@ -640,8 +637,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
}
list_del(&sgl->list);
- sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
- (MAX_SGL_ENTS + 1));
+ sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1));
}
if (!ctx->used)
@@ -654,7 +650,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
*
* @areq Request holding the TX and RX SGL
*/
-void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
+static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -683,7 +679,6 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
}
}
-EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
/**
* af_alg_wait_for_wmem - wait for availability of writable memory
@@ -692,7 +687,7 @@ EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
* @flags If MSG_DONTWAIT is set, then only report if function would sleep
* @return 0 when writable memory is available, < 0 upon error
*/
-int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
+static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err = -ERESTARTSYS;
@@ -717,7 +712,6 @@ int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
return err;
}
-EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem);
/**
* af_alg_wmem_wakeup - wakeup caller when writable memory is available
@@ -786,8 +780,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
*
* @sk socket of connection to user space
*/
-
-void af_alg_data_wakeup(struct sock *sk)
+static void af_alg_data_wakeup(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -805,7 +798,6 @@ void af_alg_data_wakeup(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(af_alg_data_wakeup);
/**
* af_alg_sendmsg - implementation of sendmsg system call handler
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 5d320a811f75..81e2767e2164 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
- unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
- if (nbytes && walk->offset & alignmask && !err) {
- walk->offset = ALIGN(walk->offset, alignmask + 1);
- nbytes = min(nbytes,
- ((unsigned int)(PAGE_SIZE)) - walk->offset);
- walk->entrylen -= nbytes;
+ if (walk->entrylen && (walk->offset & alignmask) && !err) {
+ unsigned int nbytes;
+ walk->offset = ALIGN(walk->offset, alignmask + 1);
+ nbytes = min(walk->entrylen,
+ (unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
+ walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
@@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (err)
return err;
- if (nbytes) {
+ if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
@@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
return ret;
}
+static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return -ENOSYS;
+}
+
+static void ahash_set_needkey(struct crypto_ahash *tfm)
+{
+ const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+ if (tfm->setkey != ahash_nosetkey &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+}
+
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
else
err = tfm->setkey(tfm, key, keylen);
- if (err)
+ if (unlikely(err)) {
+ ahash_set_needkey(tfm);
return err;
+ }
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
-static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- return -ENOSYS;
-}
-
static inline unsigned int ahash_align_buffer_size(unsigned len,
unsigned long mask)
{
@@ -489,8 +500,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
if (alg->setkey) {
hash->setkey = alg->setkey;
- if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
- crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
+ ahash_set_needkey(hash);
}
return 0;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 8b65ada33e5d..4c9c86b55738 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -494,6 +494,24 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_register_template);
+int crypto_register_templates(struct crypto_template *tmpls, int count)
+{
+ int i, err;
+
+ for (i = 0; i < count; i++) {
+ err = crypto_register_template(&tmpls[i]);
+ if (err)
+ goto out;
+ }
+ return 0;
+
+out:
+ for (--i; i >= 0; --i)
+ crypto_unregister_template(&tmpls[i]);
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_register_templates);
+
void crypto_unregister_template(struct crypto_template *tmpl)
{
struct crypto_instance *inst;
@@ -523,6 +541,15 @@ void crypto_unregister_template(struct crypto_template *tmpl)
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
+void crypto_unregister_templates(struct crypto_template *tmpls, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_template(&tmpls[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_templates);
+
static struct crypto_template *__crypto_lookup_template(const char *name)
{
struct crypto_template *q, *tmpl = NULL;
@@ -608,6 +635,9 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
{
int err = -EAGAIN;
+ if (WARN_ON_ONCE(inst == NULL))
+ return -EINVAL;
+
spawn->inst = inst;
spawn->mask = mask;
@@ -845,8 +875,8 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name,
}
EXPORT_SYMBOL_GPL(crypto_inst_setname);
-void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
- unsigned int head)
+void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
+ unsigned int head)
{
struct crypto_instance *inst;
char *p;
@@ -869,35 +899,6 @@ err_free_inst:
kfree(p);
return ERR_PTR(err);
}
-EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
-
-struct crypto_instance *crypto_alloc_instance(const char *name,
- struct crypto_alg *alg)
-{
- struct crypto_instance *inst;
- struct crypto_spawn *spawn;
- int err;
-
- inst = crypto_alloc_instance2(name, alg, 0);
- if (IS_ERR(inst))
- goto out;
-
- spawn = crypto_instance_ctx(inst);
- err = crypto_init_spawn(spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
-
- if (err)
- goto err_free_inst;
-
- return inst;
-
-err_free_inst:
- kfree(inst);
- inst = ERR_PTR(err);
-
-out:
- return inst;
-}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
diff --git a/crypto/arc4.c b/crypto/arc4.c
index f1a81925558f..6c93342e3405 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -12,14 +12,11 @@
*
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/crypto.h>
#include <crypto/algapi.h>
-
-#define ARC4_MIN_KEY_SIZE 1
-#define ARC4_MAX_KEY_SIZE 256
-#define ARC4_BLOCK_SIZE 1
+#include <crypto/arc4.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/init.h>
+#include <linux/module.h>
struct arc4_ctx {
u32 S[256];
@@ -50,6 +47,12 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
+static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return arc4_set_key(&tfm->base, in_key, key_len);
+}
+
static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in,
unsigned int len)
{
@@ -92,30 +95,25 @@ static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in)
arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1);
}
-static int ecb_arc4_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_arc4_crypt(struct skcipher_request *req)
{
- struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
-
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes > 0) {
- u8 *wsrc = walk.src.virt.addr;
- u8 *wdst = walk.dst.virt.addr;
-
- arc4_crypt(ctx, wdst, wsrc, walk.nbytes);
-
- err = blkcipher_walk_done(desc, &walk, 0);
+ arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-static struct crypto_alg arc4_algs[2] = { {
+static struct crypto_alg arc4_cipher = {
.cra_name = "arc4",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARC4_BLOCK_SIZE,
@@ -130,34 +128,39 @@ static struct crypto_alg arc4_algs[2] = { {
.cia_decrypt = arc4_crypt_one,
},
},
-}, {
- .cra_name = "ecb(arc4)",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct arc4_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .setkey = arc4_set_key,
- .encrypt = ecb_arc4_crypt,
- .decrypt = ecb_arc4_crypt,
- },
- },
-} };
+};
+
+static struct skcipher_alg arc4_skcipher = {
+ .base.cra_name = "ecb(arc4)",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = ARC4_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct arc4_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ .setkey = arc4_set_key_skcipher,
+ .encrypt = ecb_arc4_crypt,
+ .decrypt = ecb_arc4_crypt,
+};
static int __init arc4_init(void)
{
- return crypto_register_algs(arc4_algs, ARRAY_SIZE(arc4_algs));
+ int err;
+
+ err = crypto_register_alg(&arc4_cipher);
+ if (err)
+ return err;
+
+ err = crypto_register_skcipher(&arc4_skcipher);
+ if (err)
+ crypto_unregister_alg(&arc4_cipher);
+ return err;
}
static void __exit arc4_exit(void)
{
- crypto_unregister_algs(arc4_algs, ARRAY_SIZE(arc4_algs));
+ crypto_unregister_alg(&arc4_cipher);
+ crypto_unregister_skcipher(&arc4_skcipher);
}
module_init(arc4_init);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 37f54d1b2f66..4be293a4b5f0 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 80a25cc04aec..4741fe89ba2c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
- aead_request_complete(req, err);
+ authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/cbc.c b/crypto/cbc.c
index dd5f332fd566..d12efaac9230 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -18,34 +18,11 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/slab.h>
-
-struct crypto_cbc_ctx {
- struct crypto_cipher *child;
-};
-
-static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_cipher *child = ctx->child;
- int err;
-
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
const u8 *src, u8 *dst)
{
- struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_cipher_encrypt_one(ctx->child, dst, src);
+ crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
}
static int crypto_cbc_encrypt(struct skcipher_request *req)
@@ -56,9 +33,7 @@ static int crypto_cbc_encrypt(struct skcipher_request *req)
static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
const u8 *src, u8 *dst)
{
- struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_cipher_decrypt_one(ctx->child, dst, src);
+ crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src);
}
static int crypto_cbc_decrypt(struct skcipher_request *req)
@@ -78,113 +53,33 @@ static int crypto_cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm)
-{
- struct skcipher_instance *inst = skcipher_alg_instance(tfm);
- struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
- struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- return 0;
-}
-
-static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_cipher(ctx->child);
-}
-
-static void crypto_cbc_free(struct skcipher_instance *inst)
-{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
- kfree(inst);
-}
-
static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
- struct crypto_spawn *spawn;
struct crypto_alg *alg;
- u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
- if (err)
- return err;
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
-
- algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
- if (IS_ERR(algt))
- goto err_free_inst;
-
- mask = CRYPTO_ALG_TYPE_MASK |
- crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
- err = PTR_ERR(alg);
- if (IS_ERR(alg))
- goto err_free_inst;
-
- spawn = skcipher_instance_ctx(inst);
- err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
- if (err)
- goto err_put_alg;
-
- err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
- if (err)
- goto err_drop_spawn;
+ inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
err = -EINVAL;
if (!is_power_of_2(alg->cra_blocksize))
- goto err_drop_spawn;
-
- inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = alg->cra_blocksize;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
-
- inst->alg.ivsize = alg->cra_blocksize;
- inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
-
- inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
-
- inst->alg.init = crypto_cbc_init_tfm;
- inst->alg.exit = crypto_cbc_exit_tfm;
+ goto out_free_inst;
- inst->alg.setkey = crypto_cbc_setkey;
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;
- inst->free = crypto_cbc_free;
-
err = skcipher_register_instance(tmpl, inst);
if (err)
- goto err_drop_spawn;
- crypto_mod_put(alg);
+ goto out_free_inst;
+ goto out_put_alg;
-out:
- return err;
-
-err_drop_spawn:
- crypto_drop_spawn(spawn);
-err_put_alg:
+out_free_inst:
+ inst->free(inst);
+out_put_alg:
crypto_mod_put(alg);
-err_free_inst:
- kfree(inst);
- goto out;
+ return err;
}
static struct crypto_template crypto_cbc_tmpl = {
@@ -207,5 +102,5 @@ module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CBC block cipher algorithm");
+MODULE_DESCRIPTION("CBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cbc");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index b242fd0d3262..50df8f001c1c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -589,12 +589,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
mac_name);
}
-static struct crypto_template crypto_ccm_tmpl = {
- .name = "ccm",
- .create = crypto_ccm_create,
- .module = THIS_MODULE,
-};
-
static int crypto_ccm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
@@ -618,12 +612,6 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl,
cipher_name);
}
-static struct crypto_template crypto_ccm_base_tmpl = {
- .name = "ccm_base",
- .create = crypto_ccm_base_create,
- .module = THIS_MODULE,
-};
-
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
@@ -854,12 +842,6 @@ out_free_inst:
goto out;
}
-static struct crypto_template crypto_rfc4309_tmpl = {
- .name = "rfc4309",
- .create = crypto_rfc4309_create,
- .module = THIS_MODULE,
-};
-
static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -999,51 +981,37 @@ out_put_alg:
return err;
}
-static struct crypto_template crypto_cbcmac_tmpl = {
- .name = "cbcmac",
- .create = cbcmac_create,
- .free = shash_free_instance,
- .module = THIS_MODULE,
+static struct crypto_template crypto_ccm_tmpls[] = {
+ {
+ .name = "cbcmac",
+ .create = cbcmac_create,
+ .free = shash_free_instance,
+ .module = THIS_MODULE,
+ }, {
+ .name = "ccm_base",
+ .create = crypto_ccm_base_create,
+ .module = THIS_MODULE,
+ }, {
+ .name = "ccm",
+ .create = crypto_ccm_create,
+ .module = THIS_MODULE,
+ }, {
+ .name = "rfc4309",
+ .create = crypto_rfc4309_create,
+ .module = THIS_MODULE,
+ },
};
static int __init crypto_ccm_module_init(void)
{
- int err;
-
- err = crypto_register_template(&crypto_cbcmac_tmpl);
- if (err)
- goto out;
-
- err = crypto_register_template(&crypto_ccm_base_tmpl);
- if (err)
- goto out_undo_cbcmac;
-
- err = crypto_register_template(&crypto_ccm_tmpl);
- if (err)
- goto out_undo_base;
-
- err = crypto_register_template(&crypto_rfc4309_tmpl);
- if (err)
- goto out_undo_ccm;
-
-out:
- return err;
-
-out_undo_ccm:
- crypto_unregister_template(&crypto_ccm_tmpl);
-out_undo_base:
- crypto_unregister_template(&crypto_ccm_base_tmpl);
-out_undo_cbcmac:
- crypto_register_template(&crypto_cbcmac_tmpl);
- goto out;
+ return crypto_register_templates(crypto_ccm_tmpls,
+ ARRAY_SIZE(crypto_ccm_tmpls));
}
static void __exit crypto_ccm_module_exit(void)
{
- crypto_unregister_template(&crypto_rfc4309_tmpl);
- crypto_unregister_template(&crypto_ccm_tmpl);
- crypto_unregister_template(&crypto_ccm_base_tmpl);
- crypto_unregister_template(&crypto_cbcmac_tmpl);
+ crypto_unregister_templates(crypto_ccm_tmpls,
+ ARRAY_SIZE(crypto_ccm_tmpls));
}
module_init(crypto_ccm_module_init);
diff --git a/crypto/cfb.c b/crypto/cfb.c
index e81e45673498..03ac847f6d6a 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -25,28 +25,17 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/types.h>
-
-struct crypto_cfb_ctx {
- struct crypto_cipher *child;
-};
static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
{
- struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
-
- return crypto_cipher_blocksize(child);
+ return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
}
static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
const u8 *src, u8 *dst)
{
- struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_cipher_encrypt_one(ctx->child, dst, src);
+ crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
}
/* final encrypt and decrypt is the same */
@@ -77,12 +66,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
do {
crypto_cfb_encrypt_one(tfm, iv, dst);
crypto_xor(dst, src, bsize);
- memcpy(iv, dst, bsize);
+ iv = dst;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
+ memcpy(walk->iv, iv, bsize);
+
return nbytes;
}
@@ -162,7 +153,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
const unsigned int bsize = crypto_cfb_bsize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE];
do {
@@ -172,8 +163,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
@@ -186,22 +175,6 @@ static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
return crypto_cfb_decrypt_segment(walk, tfm);
}
-static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_cipher *child = ctx->child;
- int err;
-
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
static int crypto_cfb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -224,110 +197,34 @@ static int crypto_cfb_decrypt(struct skcipher_request *req)
return err;
}
-static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm)
-{
- struct skcipher_instance *inst = skcipher_alg_instance(tfm);
- struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
- struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- return 0;
-}
-
-static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_cipher(ctx->child);
-}
-
-static void crypto_cfb_free(struct skcipher_instance *inst)
-{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
- kfree(inst);
-}
-
static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
- struct crypto_spawn *spawn;
struct crypto_alg *alg;
- u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
- if (err)
- return err;
+ inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
-
- algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
- if (IS_ERR(algt))
- goto err_free_inst;
-
- mask = CRYPTO_ALG_TYPE_MASK |
- crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
- err = PTR_ERR(alg);
- if (IS_ERR(alg))
- goto err_free_inst;
-
- spawn = skcipher_instance_ctx(inst);
- err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
- if (err)
- goto err_put_alg;
-
- err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
- if (err)
- goto err_drop_spawn;
-
- inst->alg.base.cra_priority = alg->cra_priority;
- /* we're a stream cipher independend of the crypto cra_blocksize */
+ /* CFB mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
-
- inst->alg.ivsize = alg->cra_blocksize;
- inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
-
- inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx);
- inst->alg.init = crypto_cfb_init_tfm;
- inst->alg.exit = crypto_cfb_exit_tfm;
+ /*
+ * To simplify the implementation, configure the skcipher walk to only
+ * give a partial block at the very end, never earlier.
+ */
+ inst->alg.chunksize = alg->cra_blocksize;
- inst->alg.setkey = crypto_cfb_setkey;
inst->alg.encrypt = crypto_cfb_encrypt;
inst->alg.decrypt = crypto_cfb_decrypt;
- inst->free = crypto_cfb_free;
-
err = skcipher_register_instance(tmpl, inst);
if (err)
- goto err_drop_spawn;
- crypto_mod_put(alg);
+ inst->free(inst);
-out:
- return err;
-
-err_drop_spawn:
- crypto_drop_spawn(spawn);
-err_put_alg:
crypto_mod_put(alg);
-err_free_inst:
- kfree(inst);
- goto out;
+ return err;
}
static struct crypto_template crypto_cfb_tmpl = {
@@ -350,5 +247,5 @@ module_init(crypto_cfb_module_init);
module_exit(crypto_cfb_module_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CFB block cipher algorithm");
+MODULE_DESCRIPTION("CFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cfb");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index fef11446ab1b..ed2e12e26dd8 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -701,37 +701,28 @@ static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
}
-static struct crypto_template rfc7539_tmpl = {
- .name = "rfc7539",
- .create = rfc7539_create,
- .module = THIS_MODULE,
-};
-
-static struct crypto_template rfc7539esp_tmpl = {
- .name = "rfc7539esp",
- .create = rfc7539esp_create,
- .module = THIS_MODULE,
+static struct crypto_template rfc7539_tmpls[] = {
+ {
+ .name = "rfc7539",
+ .create = rfc7539_create,
+ .module = THIS_MODULE,
+ }, {
+ .name = "rfc7539esp",
+ .create = rfc7539esp_create,
+ .module = THIS_MODULE,
+ },
};
static int __init chacha20poly1305_module_init(void)
{
- int err;
-
- err = crypto_register_template(&rfc7539_tmpl);
- if (err)
- return err;
-
- err = crypto_register_template(&rfc7539esp_tmpl);
- if (err)
- crypto_unregister_template(&rfc7539_tmpl);
-
- return err;
+ return crypto_register_templates(rfc7539_tmpls,
+ ARRAY_SIZE(rfc7539_tmpls));
}
static void __exit chacha20poly1305_module_exit(void)
{
- crypto_unregister_template(&rfc7539esp_tmpl);
- crypto_unregister_template(&rfc7539_tmpl);
+ crypto_unregister_templates(rfc7539_tmpls,
+ ARRAY_SIZE(rfc7539_tmpls));
}
module_init(chacha20poly1305_module_init);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 0bae59922a80..01630a9c7e01 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -65,6 +65,10 @@ static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
+static int null_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{ return 0; }
+
static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
@@ -74,21 +78,18 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
memcpy(dst, src, NULL_BLOCK_SIZE);
}
-static int skcipher_null_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int null_skcipher_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
if (walk.src.virt.addr != walk.dst.virt.addr)
memcpy(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
@@ -109,7 +110,22 @@ static struct shash_alg digest_null = {
}
};
-static struct crypto_alg null_algs[3] = { {
+static struct skcipher_alg skcipher_null = {
+ .base.cra_name = "ecb(cipher_null)",
+ .base.cra_driver_name = "ecb-cipher_null",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = NULL_BLOCK_SIZE,
+ .base.cra_ctxsize = 0,
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = NULL_KEY_SIZE,
+ .max_keysize = NULL_KEY_SIZE,
+ .ivsize = NULL_IV_SIZE,
+ .setkey = null_skcipher_setkey,
+ .encrypt = null_skcipher_crypt,
+ .decrypt = null_skcipher_crypt,
+};
+
+static struct crypto_alg null_algs[] = { {
.cra_name = "cipher_null",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = NULL_BLOCK_SIZE,
@@ -122,22 +138,6 @@ static struct crypto_alg null_algs[3] = { {
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
}, {
- .cra_name = "ecb(cipher_null)",
- .cra_driver_name = "ecb-cipher_null",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = NULL_BLOCK_SIZE,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = 0,
- .cra_module = THIS_MODULE,
- .cra_u = { .blkcipher = {
- .min_keysize = NULL_KEY_SIZE,
- .max_keysize = NULL_KEY_SIZE,
- .ivsize = NULL_IV_SIZE,
- .setkey = null_setkey,
- .encrypt = skcipher_null_crypt,
- .decrypt = skcipher_null_crypt } }
-}, {
.cra_name = "compress_null",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_blocksize = NULL_BLOCK_SIZE,
@@ -199,8 +199,14 @@ static int __init crypto_null_mod_init(void)
if (ret < 0)
goto out_unregister_algs;
+ ret = crypto_register_skcipher(&skcipher_null);
+ if (ret < 0)
+ goto out_unregister_shash;
+
return 0;
+out_unregister_shash:
+ crypto_unregister_shash(&digest_null);
out_unregister_algs:
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
out:
@@ -209,8 +215,9 @@ out:
static void __exit crypto_null_mod_fini(void)
{
- crypto_unregister_shash(&digest_null);
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
+ crypto_unregister_shash(&digest_null);
+ crypto_unregister_skcipher(&skcipher_null);
}
module_init(crypto_null_mod_init);
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 3e9a53233d80..a03f326a63d3 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -20,10 +20,6 @@
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
-static DEFINE_MUTEX(crypto_cfg_mutex);
-
-extern struct sock *crypto_nlsk;
-
struct crypto_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 30f3946efc6d..ec8f8b67473a 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -17,14 +17,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
#include <linux/slab.h>
-struct crypto_ctr_ctx {
- struct crypto_cipher *child;
-};
-
struct crypto_rfc3686_ctx {
struct crypto_skcipher *child;
u8 nonce[CTR_RFC3686_NONCE_SIZE];
@@ -35,24 +29,7 @@ struct crypto_rfc3686_req_ctx {
struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
};
-static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent);
- struct crypto_cipher *child = ctx->child;
- int err;
-
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
-
- return err;
-}
-
-static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
+static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
unsigned int bsize = crypto_cipher_blocksize(tfm);
@@ -70,7 +47,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
crypto_inc(ctrblk, bsize);
}
-static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
+static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
@@ -96,7 +73,7 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
return nbytes;
}
-static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
+static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
@@ -123,138 +100,77 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
return nbytes;
}
-static int crypto_ctr_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_ctr_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
- unsigned int bsize = crypto_cipher_blocksize(child);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
+ const unsigned int bsize = crypto_cipher_blocksize(cipher);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, bsize);
+ err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= bsize) {
if (walk.src.virt.addr == walk.dst.virt.addr)
- nbytes = crypto_ctr_crypt_inplace(&walk, child);
+ nbytes = crypto_ctr_crypt_inplace(&walk, cipher);
else
- nbytes = crypto_ctr_crypt_segment(&walk, child);
+ nbytes = crypto_ctr_crypt_segment(&walk, cipher);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
if (walk.nbytes) {
- crypto_ctr_crypt_final(&walk, child);
- err = blkcipher_walk_done(desc, &walk, 0);
+ crypto_ctr_crypt_final(&walk, cipher);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-static int crypto_ctr_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
-
- return 0;
-}
-
-static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(ctx->child);
-}
-
-static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
+static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
- struct crypto_attr_type *algt;
+ struct skcipher_instance *inst;
struct crypto_alg *alg;
- u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
- if (err)
- return ERR_PTR(err);
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- mask = CRYPTO_ALG_TYPE_MASK |
- crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
-
- alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
/* Block size must be >= 4 bytes. */
err = -EINVAL;
if (alg->cra_blocksize < 4)
- goto out_put_alg;
+ goto out_free_inst;
/* If this is false we'd fail the alignment of crypto_inc. */
if (alg->cra_blocksize % 4)
- goto out_put_alg;
-
- inst = crypto_alloc_instance("ctr", alg);
- if (IS_ERR(inst))
- goto out;
-
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = 1;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
-
- inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+ goto out_free_inst;
- inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx);
+ /* CTR mode is a stream cipher. */
+ inst->alg.base.cra_blocksize = 1;
- inst->alg.cra_init = crypto_ctr_init_tfm;
- inst->alg.cra_exit = crypto_ctr_exit_tfm;
+ /*
+ * To simplify the implementation, configure the skcipher walk to only
+ * give a partial block at the very end, never earlier.
+ */
+ inst->alg.chunksize = alg->cra_blocksize;
- inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
- inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
+ inst->alg.encrypt = crypto_ctr_crypt;
+ inst->alg.decrypt = crypto_ctr_crypt;
-out:
- crypto_mod_put(alg);
- return inst;
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto out_free_inst;
+ goto out_put_alg;
+out_free_inst:
+ inst->free(inst);
out_put_alg:
- inst = ERR_PTR(err);
- goto out;
-}
-
-static void crypto_ctr_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
+ crypto_mod_put(alg);
+ return err;
}
-static struct crypto_template crypto_ctr_tmpl = {
- .name = "ctr",
- .alloc = crypto_ctr_alloc,
- .free = crypto_ctr_free,
- .module = THIS_MODULE,
-};
-
static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
@@ -444,42 +360,34 @@ err_free_inst:
goto out;
}
-static struct crypto_template crypto_rfc3686_tmpl = {
- .name = "rfc3686",
- .create = crypto_rfc3686_create,
- .module = THIS_MODULE,
+static struct crypto_template crypto_ctr_tmpls[] = {
+ {
+ .name = "ctr",
+ .create = crypto_ctr_create,
+ .module = THIS_MODULE,
+ }, {
+ .name = "rfc3686",
+ .create = crypto_rfc3686_create,
+ .module = THIS_MODULE,
+ },
};
static int __init crypto_ctr_module_init(void)
{
- int err;
-
- err = crypto_register_template(&crypto_ctr_tmpl);
- if (err)
- goto out;
-
- err = crypto_register_template(&crypto_rfc3686_tmpl);
- if (err)
- goto out_drop_ctr;
-
-out:
- return err;
-
-out_drop_ctr:
- crypto_unregister_template(&crypto_ctr_tmpl);
- goto out;
+ return crypto_register_templates(crypto_ctr_tmpls,
+ ARRAY_SIZE(crypto_ctr_tmpls));
}
static void __exit crypto_ctr_module_exit(void)
{
- crypto_unregister_template(&crypto_rfc3686_tmpl);
- crypto_unregister_template(&crypto_ctr_tmpl);
+ crypto_unregister_templates(crypto_ctr_tmpls,
+ ARRAY_SIZE(crypto_ctr_tmpls));
}
module_init(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CTR Counter block mode");
+MODULE_DESCRIPTION("CTR block cipher mode of operation");
MODULE_ALIAS_CRYPTO("rfc3686");
MODULE_ALIAS_CRYPTO("ctr");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index a71720544d11..1e6621665dd9 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -789,7 +789,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
/* Expand to tmp */
ret = des_ekey(tmp, key);
- if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -866,7 +866,7 @@ int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 12011aff0971..0732715c8d91 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -11,162 +11,83 @@
*/
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-struct crypto_ecb_ctx {
- struct crypto_cipher *child;
-};
-
-static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent);
- struct crypto_cipher *child = ctx->child;
- int err;
-
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int crypto_ecb_crypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_cipher *tfm,
+static int crypto_ecb_crypt(struct skcipher_request *req,
+ struct crypto_cipher *cipher,
void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
- int bsize = crypto_cipher_blocksize(tfm);
+ const unsigned int bsize = crypto_cipher_blocksize(cipher);
+ struct skcipher_walk walk;
unsigned int nbytes;
int err;
- err = blkcipher_walk_virt(desc, walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes) != 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
do {
- fn(crypto_cipher_tfm(tfm), wdst, wsrc);
+ fn(crypto_cipher_tfm(cipher), dst, src);
- wsrc += bsize;
- wdst += bsize;
+ src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int crypto_ecb_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_ecb_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return crypto_ecb_crypt(desc, &walk, child,
- crypto_cipher_alg(child)->cia_encrypt);
-}
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
-static int crypto_ecb_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return crypto_ecb_crypt(desc, &walk, child,
- crypto_cipher_alg(child)->cia_decrypt);
+ return crypto_ecb_crypt(req, cipher,
+ crypto_cipher_alg(cipher)->cia_encrypt);
}
-static int crypto_ecb_init_tfm(struct crypto_tfm *tfm)
+static int crypto_ecb_decrypt(struct skcipher_request *req)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_cipher *cipher;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- return 0;
+ return crypto_ecb_crypt(req, cipher,
+ crypto_cipher_alg(cipher)->cia_decrypt);
}
-static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm)
+static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->child);
-}
-
-static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb)
-{
- struct crypto_instance *inst;
+ struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
- if (err)
- return ERR_PTR(err);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
-
- inst = crypto_alloc_instance("ecb", alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
- goto out_put_alg;
-
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_blkcipher_type;
-
- inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+ return PTR_ERR(inst);
- inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx);
+ inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */
- inst->alg.cra_init = crypto_ecb_init_tfm;
- inst->alg.cra_exit = crypto_ecb_exit_tfm;
+ inst->alg.encrypt = crypto_ecb_encrypt;
+ inst->alg.decrypt = crypto_ecb_decrypt;
- inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt;
-
-out_put_alg:
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ inst->free(inst);
crypto_mod_put(alg);
- return inst;
-}
-
-static void crypto_ecb_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
+ return err;
}
static struct crypto_template crypto_ecb_tmpl = {
.name = "ecb",
- .alloc = crypto_ecb_alloc,
- .free = crypto_ecb_free,
+ .create = crypto_ecb_create,
.module = THIS_MODULE,
};
@@ -184,5 +105,5 @@ module_init(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ECB block cipher algorithm");
+MODULE_DESCRIPTION("ECB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ecb");
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e438492db2ca..e1a11f529d25 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -247,7 +247,7 @@ static int gcm_hash_len(struct aead_request *req, u32 flags)
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
- u128 lengths;
+ be128 lengths;
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(gctx->cryptlen * 8);
@@ -727,12 +727,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
ctr_name, "ghash");
}
-static struct crypto_template crypto_gcm_tmpl = {
- .name = "gcm",
- .create = crypto_gcm_create,
- .module = THIS_MODULE,
-};
-
static int crypto_gcm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
@@ -756,12 +750,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
ctr_name, ghash_name);
}
-static struct crypto_template crypto_gcm_base_tmpl = {
- .name = "gcm_base",
- .create = crypto_gcm_base_create,
- .module = THIS_MODULE,
-};
-
static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
@@ -989,12 +977,6 @@ out_free_inst:
goto out;
}
-static struct crypto_template crypto_rfc4106_tmpl = {
- .name = "rfc4106",
- .create = crypto_rfc4106_create,
- .module = THIS_MODULE,
-};
-
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
@@ -1231,10 +1213,24 @@ out_free_inst:
goto out;
}
-static struct crypto_template crypto_rfc4543_tmpl = {
- .name = "rfc4543",
- .create = crypto_rfc4543_create,
- .module = THIS_MODULE,
+static struct crypto_template crypto_gcm_tmpls[] = {
+ {
+ .name = "gcm_base",
+ .create = crypto_gcm_base_create,
+ .module = THIS_MODULE,
+ }, {
+ .name = "gcm",
+ .create = crypto_gcm_create,
+ .module = THIS_MODULE,
+ }, {
+ .name = "rfc4106",
+ .create = crypto_rfc4106_create,
+ .module = THIS_MODULE,
+ }, {
+ .name = "rfc4543",
+ .create = crypto_rfc4543_create,
+ .module = THIS_MODULE,
+ },
};
static int __init crypto_gcm_module_init(void)
@@ -1247,42 +1243,19 @@ static int __init crypto_gcm_module_init(void)
sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf));
- err = crypto_register_template(&crypto_gcm_base_tmpl);
- if (err)
- goto out;
-
- err = crypto_register_template(&crypto_gcm_tmpl);
+ err = crypto_register_templates(crypto_gcm_tmpls,
+ ARRAY_SIZE(crypto_gcm_tmpls));
if (err)
- goto out_undo_base;
+ kfree(gcm_zeroes);
- err = crypto_register_template(&crypto_rfc4106_tmpl);
- if (err)
- goto out_undo_gcm;
-
- err = crypto_register_template(&crypto_rfc4543_tmpl);
- if (err)
- goto out_undo_rfc4106;
-
- return 0;
-
-out_undo_rfc4106:
- crypto_unregister_template(&crypto_rfc4106_tmpl);
-out_undo_gcm:
- crypto_unregister_template(&crypto_gcm_tmpl);
-out_undo_base:
- crypto_unregister_template(&crypto_gcm_base_tmpl);
-out:
- kfree(gcm_zeroes);
return err;
}
static void __exit crypto_gcm_module_exit(void)
{
kfree(gcm_zeroes);
- crypto_unregister_template(&crypto_rfc4543_tmpl);
- crypto_unregister_template(&crypto_rfc4106_tmpl);
- crypto_unregister_template(&crypto_gcm_tmpl);
- crypto_unregister_template(&crypto_gcm_base_tmpl);
+ crypto_unregister_templates(crypto_gcm_tmpls,
+ ARRAY_SIZE(crypto_gcm_tmpls));
}
module_init(crypto_gcm_module_init);
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index ec5c6a087c90..a5cfe610d8f4 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -56,7 +56,7 @@
* u8 *iv = data;
* u8 *pt = data + crypto_skcipher_ivsize(tfm);
* <ensure that pt contains the plaintext of size ptlen>
- * sg_init_one(&sg, ptdata, ptlen);
+ * sg_init_one(&sg, pt, ptlen);
* skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
*
* ==> After encryption, data now contains full KW result as per SP800-38F.
@@ -70,8 +70,8 @@
* u8 *iv = data;
* u8 *ct = data + crypto_skcipher_ivsize(tfm);
* unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
- * sg_init_one(&sg, ctdata, ctlen);
- * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
+ * sg_init_one(&sg, ct, ctlen);
+ * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
*
* ==> After decryption (which hopefully does not return EBADMSG), the ct
* pointer now points to the plaintext of size ctlen.
@@ -87,10 +87,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
-struct crypto_kw_ctx {
- struct crypto_cipher *child;
-};
-
struct crypto_kw_block {
#define SEMIBSIZE 8
__be64 A;
@@ -124,16 +120,13 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
}
}
-static int crypto_kw_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_kw_decrypt(struct skcipher_request *req)
{
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
- struct scatterlist *lsrc, *ldst;
- u64 t = 6 * ((nbytes) >> 3);
+ struct scatterlist *src, *dst;
+ u64 t = 6 * ((req->cryptlen) >> 3);
unsigned int i;
int ret = 0;
@@ -141,27 +134,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
* Require at least 2 semiblocks (note, the 3rd semiblock that is
* required by SP800-38F is the IV.
*/
- if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
+ if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/* Place the IV into block A */
- memcpy(&block.A, desc->info, SEMIBSIZE);
+ memcpy(&block.A, req->iv, SEMIBSIZE);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, lsrc points to src and ldst to dst. For any
- * subsequent round, the code operates on dst only.
+ * first loop, src points to req->src and dst to req->dst. For any
+ * subsequent round, the code operates on req->dst only.
*/
- lsrc = src;
- ldst = dst;
+ src = req->src;
+ dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
- unsigned int tmp_nbytes = nbytes;
+ unsigned int nbytes = req->cryptlen;
- while (tmp_nbytes) {
- /* move pointer by tmp_nbytes in the SGL */
- crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
+ while (nbytes) {
+ /* move pointer by nbytes in the SGL */
+ crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
@@ -170,21 +163,21 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
block.A ^= cpu_to_be64(t);
t--;
/* perform KW operation: decrypt block */
- crypto_cipher_decrypt_one(child, (u8*)&block,
- (u8*)&block);
+ crypto_cipher_decrypt_one(cipher, (u8 *)&block,
+ (u8 *)&block);
- /* move pointer by tmp_nbytes in the SGL */
- crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
+ /* move pointer by nbytes in the SGL */
+ crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
/* Copy block->R into place */
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
- tmp_nbytes -= SEMIBSIZE;
+ nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
- lsrc = dst;
- ldst = dst;
+ src = req->dst;
+ dst = req->dst;
}
/* Perform authentication check */
@@ -196,15 +189,12 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
return ret;
}
-static int crypto_kw_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int crypto_kw_encrypt(struct skcipher_request *req)
{
- struct crypto_blkcipher *tfm = desc->tfm;
- struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
- struct scatterlist *lsrc, *ldst;
+ struct scatterlist *src, *dst;
u64 t = 1;
unsigned int i;
@@ -214,7 +204,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* This means that the dst memory must be one semiblock larger than src.
* Also ensure that the given data is aligned to semiblock.
*/
- if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
+ if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/*
@@ -225,26 +215,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
- * first loop, lsrc points to src and ldst to dst. For any
- * subsequent round, the code operates on dst only.
+ * first loop, src points to req->src and dst to req->dst. For any
+ * subsequent round, the code operates on req->dst only.
*/
- lsrc = src;
- ldst = dst;
+ src = req->src;
+ dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
- unsigned int tmp_nbytes = nbytes;
+ unsigned int nbytes = req->cryptlen;
- scatterwalk_start(&src_walk, lsrc);
- scatterwalk_start(&dst_walk, ldst);
+ scatterwalk_start(&src_walk, src);
+ scatterwalk_start(&dst_walk, dst);
- while (tmp_nbytes) {
+ while (nbytes) {
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: encrypt block */
- crypto_cipher_encrypt_one(child, (u8 *)&block,
+ crypto_cipher_encrypt_one(cipher, (u8 *)&block,
(u8 *)&block);
/* perform KW operation: modify IV with counter */
block.A ^= cpu_to_be64(t);
@@ -254,117 +244,59 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
- tmp_nbytes -= SEMIBSIZE;
+ nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
- lsrc = dst;
- ldst = dst;
+ src = req->dst;
+ dst = req->dst;
}
/* establish the IV for the caller to pick up */
- memcpy(desc->info, &block.A, SEMIBSIZE);
+ memcpy(req->iv, &block.A, SEMIBSIZE);
memzero_explicit(&block, sizeof(struct crypto_kw_block));
return 0;
}
-static int crypto_kw_setkey(struct crypto_tfm *parent, const u8 *key,
- unsigned int keylen)
+static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_kw_ctx *ctx = crypto_tfm_ctx(parent);
- struct crypto_cipher *child = ctx->child;
+ struct skcipher_instance *inst;
+ struct crypto_alg *alg;
int err;
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen);
- crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int crypto_kw_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- return 0;
-}
-
-static void crypto_kw_exit_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(ctx->child);
-}
-
-static struct crypto_instance *crypto_kw_alloc(struct rtattr **tb)
-{
- struct crypto_instance *inst = NULL;
- struct crypto_alg *alg = NULL;
- int err;
-
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
- if (err)
- return ERR_PTR(err);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
- CRYPTO_ALG_TYPE_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
+ inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
- inst = ERR_PTR(-EINVAL);
+ err = -EINVAL;
/* Section 5.1 requirement for KW */
if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
- goto err;
+ goto out_free_inst;
- inst = crypto_alloc_instance("kw", alg);
- if (IS_ERR(inst))
- goto err;
+ inst->alg.base.cra_blocksize = SEMIBSIZE;
+ inst->alg.base.cra_alignmask = 0;
+ inst->alg.ivsize = SEMIBSIZE;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = SEMIBSIZE;
- inst->alg.cra_alignmask = 0;
- inst->alg.cra_type = &crypto_blkcipher_type;
- inst->alg.cra_blkcipher.ivsize = SEMIBSIZE;
- inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+ inst->alg.encrypt = crypto_kw_encrypt;
+ inst->alg.decrypt = crypto_kw_decrypt;
- inst->alg.cra_ctxsize = sizeof(struct crypto_kw_ctx);
-
- inst->alg.cra_init = crypto_kw_init_tfm;
- inst->alg.cra_exit = crypto_kw_exit_tfm;
-
- inst->alg.cra_blkcipher.setkey = crypto_kw_setkey;
- inst->alg.cra_blkcipher.encrypt = crypto_kw_encrypt;
- inst->alg.cra_blkcipher.decrypt = crypto_kw_decrypt;
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto out_free_inst;
+ goto out_put_alg;
-err:
+out_free_inst:
+ inst->free(inst);
+out_put_alg:
crypto_mod_put(alg);
- return inst;
-}
-
-static void crypto_kw_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
+ return err;
}
static struct crypto_template crypto_kw_tmpl = {
.name = "kw",
- .alloc = crypto_kw_alloc,
- .free = crypto_kw_free,
+ .create = crypto_kw_create,
.module = THIS_MODULE,
};
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 3889c188f266..0747732d5b78 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The MORUS-1280 Authenticated-Encryption Algorithm
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#include <asm/unaligned.h>
@@ -366,18 +362,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
const struct morus1280_ops *ops)
{
struct skcipher_walk walk;
- u8 *dst;
- const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- ops->crypt_chunk(state, dst, src, walk.nbytes);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
- skcipher_walk_done(&walk, 0);
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/morus640.c b/crypto/morus640.c
index da06ec2f6a80..1617a1eb8be1 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The MORUS-640 Authenticated-Encryption Algorithm
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#include <asm/unaligned.h>
@@ -365,18 +361,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
const struct morus640_ops *ops)
{
struct skcipher_walk walk;
- u8 *dst;
- const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
- src = walk.src.virt.addr;
- dst = walk.dst.virt.addr;
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
- ops->crypt_chunk(state, dst, src, walk.nbytes);
+ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
- skcipher_walk_done(&walk, 0);
+ skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}
diff --git a/crypto/ofb.c b/crypto/ofb.c
index 886631708c5e..34b6e1f426f7 100644
--- a/crypto/ofb.c
+++ b/crypto/ofb.c
@@ -5,9 +5,6 @@
*
* Copyright (C) 2018 ARM Limited or its affiliates.
* All rights reserved.
- *
- * Based loosely on public domain code gleaned from libtomcrypt
- * (https://github.com/libtom/libtomcrypt).
*/
#include <crypto/algapi.h>
@@ -16,189 +13,70 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-struct crypto_ofb_ctx {
- struct crypto_cipher *child;
- int cnt;
-};
-
-static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
+static int crypto_ofb_crypt(struct skcipher_request *req)
{
- struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_cipher *child = ctx->child;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
+ const unsigned int bsize = crypto_cipher_blocksize(cipher);
+ struct skcipher_walk walk;
int err;
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
- struct skcipher_walk *walk,
- struct crypto_cipher *tfm)
-{
- int bsize = crypto_cipher_blocksize(tfm);
- int nbytes = walk->nbytes;
-
- u8 *src = walk->src.virt.addr;
- u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
-
- do {
- if (ctx->cnt == bsize) {
- if (nbytes < bsize)
- break;
- crypto_cipher_encrypt_one(tfm, iv, iv);
- ctx->cnt = 0;
- }
- *dst = *src ^ iv[ctx->cnt];
- src++;
- dst++;
- ctx->cnt++;
- } while (--nbytes);
- return nbytes;
-}
-
-static int crypto_ofb_encrypt(struct skcipher_request *req)
-{
- struct skcipher_walk walk;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned int bsize;
- struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
- int ret = 0;
+ err = skcipher_walk_virt(&walk, req, false);
- bsize = crypto_cipher_blocksize(child);
- ctx->cnt = bsize;
+ while (walk.nbytes >= bsize) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 * const iv = walk.iv;
+ unsigned int nbytes = walk.nbytes;
- ret = skcipher_walk_virt(&walk, req, false);
+ do {
+ crypto_cipher_encrypt_one(cipher, iv, iv);
+ crypto_xor_cpy(dst, src, iv, bsize);
+ dst += bsize;
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
- while (walk.nbytes) {
- ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
- ret = skcipher_walk_done(&walk, ret);
+ err = skcipher_walk_done(&walk, nbytes);
}
- return ret;
-}
-
-/* OFB encrypt and decrypt are identical */
-static int crypto_ofb_decrypt(struct skcipher_request *req)
-{
- return crypto_ofb_encrypt(req);
-}
-
-static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
-{
- struct skcipher_instance *inst = skcipher_alg_instance(tfm);
- struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
- struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- return 0;
-}
-
-static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_cipher(ctx->child);
-}
-
-static void crypto_ofb_free(struct skcipher_instance *inst)
-{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
- kfree(inst);
+ if (walk.nbytes) {
+ crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
+ crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
+ walk.nbytes);
+ err = skcipher_walk_done(&walk, 0);
+ }
+ return err;
}
static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
- struct crypto_spawn *spawn;
struct crypto_alg *alg;
- u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
- if (err)
- return err;
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
-
- algt = crypto_get_attr_type(tb);
- err = PTR_ERR(algt);
- if (IS_ERR(algt))
- goto err_free_inst;
-
- mask = CRYPTO_ALG_TYPE_MASK |
- crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
+ inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
- err = PTR_ERR(alg);
- if (IS_ERR(alg))
- goto err_free_inst;
+ /* OFB mode is a stream cipher. */
+ inst->alg.base.cra_blocksize = 1;
- spawn = skcipher_instance_ctx(inst);
- err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
- crypto_mod_put(alg);
- if (err)
- goto err_free_inst;
-
- err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg);
- if (err)
- goto err_drop_spawn;
-
- inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = alg->cra_blocksize;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
-
- /* We access the data as u32s when xoring. */
- inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+ /*
+ * To simplify the implementation, configure the skcipher walk to only
+ * give a partial block at the very end, never earlier.
+ */
+ inst->alg.chunksize = alg->cra_blocksize;
- inst->alg.ivsize = alg->cra_blocksize;
- inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
-
- inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx);
-
- inst->alg.init = crypto_ofb_init_tfm;
- inst->alg.exit = crypto_ofb_exit_tfm;
-
- inst->alg.setkey = crypto_ofb_setkey;
- inst->alg.encrypt = crypto_ofb_encrypt;
- inst->alg.decrypt = crypto_ofb_decrypt;
-
- inst->free = crypto_ofb_free;
+ inst->alg.encrypt = crypto_ofb_crypt;
+ inst->alg.decrypt = crypto_ofb_crypt;
err = skcipher_register_instance(tmpl, inst);
if (err)
- goto err_drop_spawn;
+ inst->free(inst);
-out:
+ crypto_mod_put(alg);
return err;
-
-err_drop_spawn:
- crypto_drop_spawn(spawn);
-err_free_inst:
- kfree(inst);
- goto out;
}
static struct crypto_template crypto_ofb_tmpl = {
@@ -221,5 +99,5 @@ module_init(crypto_ofb_module_init);
module_exit(crypto_ofb_module_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("OFB block cipher algorithm");
+MODULE_DESCRIPTION("OFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ofb");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 8aa10144407c..2fa03fc576fe 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -20,28 +20,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/compiler.h>
-
-struct crypto_pcbc_ctx {
- struct crypto_cipher *child;
-};
-
-static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_cipher *child = ctx->child;
- int err;
-
- crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_cipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
struct skcipher_walk *walk,
@@ -51,7 +29,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
@@ -72,7 +50,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
@@ -84,16 +62,13 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
static int crypto_pcbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
@@ -103,10 +78,10 @@ static int crypto_pcbc_encrypt(struct skcipher_request *req)
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
- child);
+ cipher);
else
nbytes = crypto_pcbc_encrypt_segment(req, &walk,
- child);
+ cipher);
err = skcipher_walk_done(&walk, nbytes);
}
@@ -121,7 +96,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
do {
crypto_cipher_decrypt_one(tfm, dst, src);
@@ -132,8 +107,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
@@ -144,7 +117,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
@@ -156,16 +129,13 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
static int crypto_pcbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *child = ctx->child;
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
@@ -175,117 +145,34 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req)
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
- child);
+ cipher);
else
nbytes = crypto_pcbc_decrypt_segment(req, &walk,
- child);
+ cipher);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm)
-{
- struct skcipher_instance *inst = skcipher_alg_instance(tfm);
- struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
- struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_cipher *cipher;
-
- cipher = crypto_spawn_cipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- return 0;
-}
-
-static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_cipher(ctx->child);
-}
-
-static void crypto_pcbc_free(struct skcipher_instance *inst)
-{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
- kfree(inst);
-}
-
static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
- struct crypto_spawn *spawn;
struct crypto_alg *alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) &
- ~CRYPTO_ALG_INTERNAL)
- return -EINVAL;
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER |
- (algt->type & CRYPTO_ALG_INTERNAL),
- CRYPTO_ALG_TYPE_MASK |
- (algt->mask & CRYPTO_ALG_INTERNAL));
- err = PTR_ERR(alg);
- if (IS_ERR(alg))
- goto err_free_inst;
-
- spawn = skcipher_instance_ctx(inst);
- err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
- CRYPTO_ALG_TYPE_MASK);
- if (err)
- goto err_put_alg;
-
- err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
- if (err)
- goto err_drop_spawn;
+ inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
- inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL;
- inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = alg->cra_blocksize;
- inst->alg.base.cra_alignmask = alg->cra_alignmask;
-
- inst->alg.ivsize = alg->cra_blocksize;
- inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
- inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
-
- inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
-
- inst->alg.init = crypto_pcbc_init_tfm;
- inst->alg.exit = crypto_pcbc_exit_tfm;
-
- inst->alg.setkey = crypto_pcbc_setkey;
inst->alg.encrypt = crypto_pcbc_encrypt;
inst->alg.decrypt = crypto_pcbc_decrypt;
- inst->free = crypto_pcbc_free;
-
err = skcipher_register_instance(tmpl, inst);
if (err)
- goto err_drop_spawn;
+ inst->free(inst);
crypto_mod_put(alg);
-
-out:
return err;
-
-err_drop_spawn:
- crypto_drop_spawn(spawn);
-err_put_alg:
- crypto_mod_put(alg);
-err_free_inst:
- kfree(inst);
- goto out;
}
static struct crypto_template crypto_pcbc_tmpl = {
@@ -308,5 +195,5 @@ module_init(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("PCBC block cipher algorithm");
+MODULE_DESCRIPTION("PCBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("pcbc");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index cfc04e15fd97..0a6680ca8cb6 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -12,6 +12,7 @@
#include <crypto/algapi.h>
#include <crypto/akcipher.h>
#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 64a412be255e..ed1b0e9f2436 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -89,13 +89,12 @@ static int seqiv_aead_encrypt(struct aead_request *req)
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
- info = kmalloc(ivsize, req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
- GFP_ATOMIC);
+ info = kmemdup(req->iv, ivsize, req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC);
if (!info)
return -ENOMEM;
- memcpy(info, req->iv, ivsize);
compl = seqiv_aead_encrypt_complete;
data = req;
}
diff --git a/crypto/shash.c b/crypto/shash.c
index 44d297b82a8f..15b369c4745f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
return err;
}
+static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
+{
+ if (crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+}
+
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
else
err = shash->setkey(tfm, key, keylen);
- if (err)
+ if (unlikely(err)) {
+ shash_set_needkey(tfm, shash);
return err;
+ }
crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -373,15 +382,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
- crt->setkey = shash_async_setkey;
+ if (crypto_shash_alg_has_setkey(alg))
+ crt->setkey = shash_async_setkey;
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
CRYPTO_TFM_NEED_KEY);
- if (alg->export)
- crt->export = shash_async_export;
- if (alg->import)
- crt->import = shash_async_import;
+ crt->export = shash_async_export;
+ crt->import = shash_async_import;
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
@@ -395,9 +403,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
hash->descsize = alg->descsize;
- if (crypto_shash_alg_has_setkey(alg) &&
- !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
- crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
+ shash_set_needkey(hash, alg);
return 0;
}
@@ -464,6 +470,9 @@ static int shash_prepare_alg(struct shash_alg *alg)
alg->statesize > HASH_MAX_STATESIZE)
return -EINVAL;
+ if ((alg->export && !alg->import) || (alg->import && !alg->export))
+ return -EINVAL;
+
base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 2a969296bc24..bcf13d95f54a 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -585,6 +585,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
return crypto_alg_extsize(alg);
}
+static void skcipher_set_needkey(struct crypto_skcipher *tfm)
+{
+ if (tfm->keysize)
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+}
+
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -598,8 +604,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
CRYPTO_TFM_RES_MASK);
- if (err)
+ if (unlikely(err)) {
+ skcipher_set_needkey(tfm);
return err;
+ }
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -677,8 +685,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
skcipher->keysize = calg->cra_blkcipher.max_keysize;
- if (skcipher->keysize)
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+ skcipher_set_needkey(skcipher);
return 0;
}
@@ -698,8 +705,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
crypto_skcipher_set_flags(tfm,
crypto_ablkcipher_get_flags(ablkcipher) &
CRYPTO_TFM_RES_MASK);
- if (err)
+ if (unlikely(err)) {
+ skcipher_set_needkey(tfm);
return err;
+ }
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -776,8 +785,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
sizeof(struct ablkcipher_request);
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
- if (skcipher->keysize)
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+ skcipher_set_needkey(skcipher);
return 0;
}
@@ -820,8 +828,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
else
err = cipher->setkey(tfm, key, keylen);
- if (err)
+ if (unlikely(err)) {
+ skcipher_set_needkey(tfm);
return err;
+ }
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
- if (skcipher->keysize)
- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+ skcipher_set_needkey(skcipher);
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
@@ -1058,5 +1067,136 @@ int skcipher_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(skcipher_register_instance);
+static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
+ int err;
+
+ crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_cipher_setkey(cipher, key, keylen);
+ crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *cipher;
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->cipher = cipher;
+ return 0;
+}
+
+static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
+{
+ struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_cipher(ctx->cipher);
+}
+
+static void skcipher_free_instance_simple(struct skcipher_instance *inst)
+{
+ crypto_drop_spawn(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+/**
+ * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
+ *
+ * Allocate an skcipher_instance for a simple block cipher mode of operation,
+ * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
+ * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
+ * alignmask, and priority are set from the underlying cipher but can be
+ * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
+ * default ->setkey(), ->init(), and ->exit() methods are installed.
+ *
+ * @tmpl: the template being instantiated
+ * @tb: the template parameters
+ * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
+ * returned here. It must be dropped with crypto_mod_put().
+ *
+ * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
+ * needs to register the instance.
+ */
+struct skcipher_instance *
+skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
+ struct crypto_alg **cipher_alg_ret)
+{
+ struct crypto_attr_type *algt;
+ struct crypto_alg *cipher_alg;
+ struct skcipher_instance *inst;
+ struct crypto_spawn *spawn;
+ u32 mask;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return ERR_CAST(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return ERR_PTR(-EINVAL);
+
+ mask = CRYPTO_ALG_TYPE_MASK |
+ crypto_requires_off(algt->type, algt->mask,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
+ if (IS_ERR(cipher_alg))
+ return ERR_CAST(cipher_alg);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst) {
+ err = -ENOMEM;
+ goto err_put_cipher_alg;
+ }
+ spawn = skcipher_instance_ctx(inst);
+
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
+ cipher_alg);
+ if (err)
+ goto err_free_inst;
+
+ err = crypto_init_spawn(spawn, cipher_alg,
+ skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto err_free_inst;
+ inst->free = skcipher_free_instance_simple;
+
+ /* Default algorithm properties, can be overridden */
+ inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
+ inst->alg.base.cra_priority = cipher_alg->cra_priority;
+ inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
+ inst->alg.ivsize = cipher_alg->cra_blocksize;
+
+ /* Use skcipher_ctx_simple by default, can be overridden */
+ inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
+ inst->alg.setkey = skcipher_setkey_simple;
+ inst->alg.init = skcipher_init_tfm_simple;
+ inst->alg.exit = skcipher_exit_tfm_simple;
+
+ *cipher_alg_ret = cipher_alg;
+ return inst;
+
+err_free_inst:
+ kfree(inst);
+err_put_cipher_alg:
+ crypto_mod_put(cipher_alg);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9a5c60f08aad..c0cf87ae7ef6 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
for (i = 0; i <= 63; i++) {
- ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12);
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index 03272a22afce..5a2eafed9c29 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -960,7 +960,7 @@ static int streebog_init(struct shash_desc *desc)
memset(ctx, 0, sizeof(struct streebog_state));
for (i = 0; i < 8; i++) {
if (digest_size == STREEBOG256_DIGEST_SIZE)
- ctx->h.qword[i] = 0x0101010101010101ULL;
+ ctx->h.qword[i] = cpu_to_le64(0x0101010101010101ULL);
}
return 0;
}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 0f684a414acb..8386038d67c7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -5,6 +5,7 @@
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) 2007 Nokia Siemens Networks
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2019 Google LLC
*
* Updated RFC4106 AES-GCM testing.
* Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
@@ -26,6 +27,8 @@
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/module.h>
+#include <linux/once.h>
+#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -41,6 +44,16 @@ static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+static bool noextratests;
+module_param(noextratests, bool, 0644);
+MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
+
+static unsigned int fuzz_iterations = 100;
+module_param(fuzz_iterations, uint, 0644);
+MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
+#endif
+
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
/* a perfect nop */
@@ -59,28 +72,14 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
#define XBUFSIZE 8
/*
- * Indexes into the xbuf to simulate cross-page access.
- */
-#define IDX1 32
-#define IDX2 32400
-#define IDX3 1511
-#define IDX4 8193
-#define IDX5 22222
-#define IDX6 17101
-#define IDX7 27333
-#define IDX8 3000
-
-/*
* Used by test_cipher()
*/
#define ENCRYPT 1
#define DECRYPT 0
struct aead_test_suite {
- struct {
- const struct aead_testvec *vecs;
- unsigned int count;
- } enc, dec;
+ const struct aead_testvec *vecs;
+ unsigned int count;
};
struct cipher_test_suite {
@@ -138,9 +137,6 @@ struct alg_test_desc {
} suite;
};
-static const unsigned int IDX[8] = {
- IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
-
static void hexdump(unsigned char *buf, unsigned int len)
{
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
@@ -148,12 +144,12 @@ static void hexdump(unsigned char *buf, unsigned int len)
buf, len, false);
}
-static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order)
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
- buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ buf[i] = (char *)__get_free_pages(GFP_KERNEL, order);
if (!buf[i])
goto err_free_buf;
}
@@ -162,856 +158,1264 @@ static int testmgr_alloc_buf(char *buf[XBUFSIZE])
err_free_buf:
while (i-- > 0)
- free_page((unsigned long)buf[i]);
+ free_pages((unsigned long)buf[i], order);
return -ENOMEM;
}
-static void testmgr_free_buf(char *buf[XBUFSIZE])
+static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+{
+ return __testmgr_alloc_buf(buf, 0);
+}
+
+static void __testmgr_free_buf(char *buf[XBUFSIZE], int order)
{
int i;
for (i = 0; i < XBUFSIZE; i++)
- free_page((unsigned long)buf[i]);
+ free_pages((unsigned long)buf[i], order);
}
-static int ahash_guard_result(char *result, char c, int size)
+static void testmgr_free_buf(char *buf[XBUFSIZE])
{
- int i;
+ __testmgr_free_buf(buf, 0);
+}
- for (i = 0; i < size; i++) {
- if (result[i] != c)
- return -EINVAL;
- }
+#define TESTMGR_POISON_BYTE 0xfe
+#define TESTMGR_POISON_LEN 16
- return 0;
+static inline void testmgr_poison(void *addr, size_t len)
+{
+ memset(addr, TESTMGR_POISON_BYTE, len);
}
-static int ahash_partial_update(struct ahash_request **preq,
- struct crypto_ahash *tfm, const struct hash_testvec *template,
- void *hash_buff, int k, int temp, struct scatterlist *sg,
- const char *algo, char *result, struct crypto_wait *wait)
+/* Is the memory region still fully poisoned? */
+static inline bool testmgr_is_poison(const void *addr, size_t len)
{
- char *state;
- struct ahash_request *req;
- int statesize, ret = -EINVAL;
- static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 };
- int digestsize = crypto_ahash_digestsize(tfm);
-
- req = *preq;
- statesize = crypto_ahash_statesize(
- crypto_ahash_reqtfm(req));
- state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
- if (!state) {
- pr_err("alg: hash: Failed to alloc state for %s\n", algo);
- goto out_nostate;
- }
- memcpy(state + statesize, guard, sizeof(guard));
- memset(result, 1, digestsize);
- ret = crypto_ahash_export(req, state);
- WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
- if (ret) {
- pr_err("alg: hash: Failed to export() for %s\n", algo);
- goto out;
- }
- ret = ahash_guard_result(result, 1, digestsize);
- if (ret) {
- pr_err("alg: hash: Failed, export used req->result for %s\n",
- algo);
- goto out;
- }
- ahash_request_free(req);
- req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- pr_err("alg: hash: Failed to alloc request for %s\n", algo);
- goto out_noreq;
- }
- ahash_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, wait);
-
- memcpy(hash_buff, template->plaintext + temp,
- template->tap[k]);
- sg_init_one(&sg[0], hash_buff, template->tap[k]);
- ahash_request_set_crypt(req, sg, result, template->tap[k]);
- ret = crypto_ahash_import(req, state);
- if (ret) {
- pr_err("alg: hash: Failed to import() for %s\n", algo);
- goto out;
+ return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL;
+}
+
+/* flush type for hash algorithms */
+enum flush_type {
+ /* merge with update of previous buffer(s) */
+ FLUSH_TYPE_NONE = 0,
+
+ /* update with previous buffer(s) before doing this one */
+ FLUSH_TYPE_FLUSH,
+
+ /* likewise, but also export and re-import the intermediate state */
+ FLUSH_TYPE_REIMPORT,
+};
+
+/* finalization function for hash algorithms */
+enum finalization_type {
+ FINALIZATION_TYPE_FINAL, /* use final() */
+ FINALIZATION_TYPE_FINUP, /* use finup() */
+ FINALIZATION_TYPE_DIGEST, /* use digest() */
+};
+
+#define TEST_SG_TOTAL 10000
+
+/**
+ * struct test_sg_division - description of a scatterlist entry
+ *
+ * This struct describes one entry of a scatterlist being constructed to check a
+ * crypto test vector.
+ *
+ * @proportion_of_total: length of this chunk relative to the total length,
+ * given as a proportion out of TEST_SG_TOTAL so that it
+ * scales to fit any test vector
+ * @offset: byte offset into a 2-page buffer at which this chunk will start
+ * @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the
+ * @offset
+ * @flush_type: for hashes, whether an update() should be done now vs.
+ * continuing to accumulate data
+ */
+struct test_sg_division {
+ unsigned int proportion_of_total;
+ unsigned int offset;
+ bool offset_relative_to_alignmask;
+ enum flush_type flush_type;
+};
+
+/**
+ * struct testvec_config - configuration for testing a crypto test vector
+ *
+ * This struct describes the data layout and other parameters with which each
+ * crypto test vector can be tested.
+ *
+ * @name: name of this config, logged for debugging purposes if a test fails
+ * @inplace: operate on the data in-place, if applicable for the algorithm type?
+ * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP
+ * @src_divs: description of how to arrange the source scatterlist
+ * @dst_divs: description of how to arrange the dst scatterlist, if applicable
+ * for the algorithm type. Defaults to @src_divs if unset.
+ * @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1],
+ * where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary
+ * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
+ * the @iv_offset
+ * @finalization_type: what finalization function to use for hashes
+ */
+struct testvec_config {
+ const char *name;
+ bool inplace;
+ u32 req_flags;
+ struct test_sg_division src_divs[XBUFSIZE];
+ struct test_sg_division dst_divs[XBUFSIZE];
+ unsigned int iv_offset;
+ bool iv_offset_relative_to_alignmask;
+ enum finalization_type finalization_type;
+};
+
+#define TESTVEC_CONFIG_NAMELEN 192
+
+/*
+ * The following are the lists of testvec_configs to test for each algorithm
+ * type when the basic crypto self-tests are enabled, i.e. when
+ * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test
+ * coverage, while keeping the test time much shorter than the full fuzz tests
+ * so that the basic tests can be enabled in a wider range of circumstances.
+ */
+
+/* Configs for skciphers and aeads */
+static const struct testvec_config default_cipher_testvec_configs[] = {
+ {
+ .name = "in-place",
+ .inplace = true,
+ .src_divs = { { .proportion_of_total = 10000 } },
+ }, {
+ .name = "out-of-place",
+ .src_divs = { { .proportion_of_total = 10000 } },
+ }, {
+ .name = "unaligned buffer, offset=1",
+ .src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
+ .iv_offset = 1,
+ }, {
+ .name = "buffer aligned only to alignmask",
+ .src_divs = {
+ {
+ .proportion_of_total = 10000,
+ .offset = 1,
+ .offset_relative_to_alignmask = true,
+ },
+ },
+ .iv_offset = 1,
+ .iv_offset_relative_to_alignmask = true,
+ }, {
+ .name = "two even aligned splits",
+ .src_divs = {
+ { .proportion_of_total = 5000 },
+ { .proportion_of_total = 5000 },
+ },
+ }, {
+ .name = "uneven misaligned splits, may sleep",
+ .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
+ .src_divs = {
+ { .proportion_of_total = 1900, .offset = 33 },
+ { .proportion_of_total = 3300, .offset = 7 },
+ { .proportion_of_total = 4800, .offset = 18 },
+ },
+ .iv_offset = 3,
+ }, {
+ .name = "misaligned splits crossing pages, inplace",
+ .inplace = true,
+ .src_divs = {
+ {
+ .proportion_of_total = 7500,
+ .offset = PAGE_SIZE - 32
+ }, {
+ .proportion_of_total = 2500,
+ .offset = PAGE_SIZE - 7
+ },
+ },
}
- ret = ahash_guard_result(result, 1, digestsize);
- if (ret) {
- pr_err("alg: hash: Failed, import used req->result for %s\n",
- algo);
- goto out;
+};
+
+static const struct testvec_config default_hash_testvec_configs[] = {
+ {
+ .name = "init+update+final aligned buffer",
+ .src_divs = { { .proportion_of_total = 10000 } },
+ .finalization_type = FINALIZATION_TYPE_FINAL,
+ }, {
+ .name = "init+finup aligned buffer",
+ .src_divs = { { .proportion_of_total = 10000 } },
+ .finalization_type = FINALIZATION_TYPE_FINUP,
+ }, {
+ .name = "digest aligned buffer",
+ .src_divs = { { .proportion_of_total = 10000 } },
+ .finalization_type = FINALIZATION_TYPE_DIGEST,
+ }, {
+ .name = "init+update+final misaligned buffer",
+ .src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
+ .finalization_type = FINALIZATION_TYPE_FINAL,
+ }, {
+ .name = "digest buffer aligned only to alignmask",
+ .src_divs = {
+ {
+ .proportion_of_total = 10000,
+ .offset = 1,
+ .offset_relative_to_alignmask = true,
+ },
+ },
+ .finalization_type = FINALIZATION_TYPE_DIGEST,
+ }, {
+ .name = "init+update+update+final two even splits",
+ .src_divs = {
+ { .proportion_of_total = 5000 },
+ {
+ .proportion_of_total = 5000,
+ .flush_type = FLUSH_TYPE_FLUSH,
+ },
+ },
+ .finalization_type = FINALIZATION_TYPE_FINAL,
+ }, {
+ .name = "digest uneven misaligned splits, may sleep",
+ .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
+ .src_divs = {
+ { .proportion_of_total = 1900, .offset = 33 },
+ { .proportion_of_total = 3300, .offset = 7 },
+ { .proportion_of_total = 4800, .offset = 18 },
+ },
+ .finalization_type = FINALIZATION_TYPE_DIGEST,
+ }, {
+ .name = "digest misaligned splits crossing pages",
+ .src_divs = {
+ {
+ .proportion_of_total = 7500,
+ .offset = PAGE_SIZE - 32,
+ }, {
+ .proportion_of_total = 2500,
+ .offset = PAGE_SIZE - 7,
+ },
+ },
+ .finalization_type = FINALIZATION_TYPE_DIGEST,
+ }, {
+ .name = "import/export",
+ .src_divs = {
+ {
+ .proportion_of_total = 6500,
+ .flush_type = FLUSH_TYPE_REIMPORT,
+ }, {
+ .proportion_of_total = 3500,
+ .flush_type = FLUSH_TYPE_REIMPORT,
+ },
+ },
+ .finalization_type = FINALIZATION_TYPE_FINAL,
}
- ret = crypto_wait_req(crypto_ahash_update(req), wait);
- if (ret)
- goto out;
- *preq = req;
- ret = 0;
- goto out_noreq;
-out:
- ahash_request_free(req);
-out_noreq:
- kfree(state);
-out_nostate:
- return ret;
+};
+
+static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
+{
+ unsigned int remaining = TEST_SG_TOTAL;
+ unsigned int ndivs = 0;
+
+ do {
+ remaining -= divs[ndivs++].proportion_of_total;
+ } while (remaining);
+
+ return ndivs;
}
-enum hash_test {
- HASH_TEST_DIGEST,
- HASH_TEST_FINAL,
- HASH_TEST_FINUP
-};
+static bool valid_sg_divisions(const struct test_sg_division *divs,
+ unsigned int count, bool *any_flushes_ret)
+{
+ unsigned int total = 0;
+ unsigned int i;
+
+ for (i = 0; i < count && total != TEST_SG_TOTAL; i++) {
+ if (divs[i].proportion_of_total <= 0 ||
+ divs[i].proportion_of_total > TEST_SG_TOTAL - total)
+ return false;
+ total += divs[i].proportion_of_total;
+ if (divs[i].flush_type != FLUSH_TYPE_NONE)
+ *any_flushes_ret = true;
+ }
+ return total == TEST_SG_TOTAL &&
+ memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
+}
-static int __test_hash(struct crypto_ahash *tfm,
- const struct hash_testvec *template, unsigned int tcount,
- enum hash_test test_type, const int align_offset)
+/*
+ * Check whether the given testvec_config is valid. This isn't strictly needed
+ * since every testvec_config should be valid, but check anyway so that people
+ * don't unknowingly add broken configs that don't do what they wanted.
+ */
+static bool valid_testvec_config(const struct testvec_config *cfg)
{
- const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
- size_t digest_size = crypto_ahash_digestsize(tfm);
- unsigned int i, j, k, temp;
- struct scatterlist sg[8];
- char *result;
- char *key;
- struct ahash_request *req;
- struct crypto_wait wait;
- void *hash_buff;
- char *xbuf[XBUFSIZE];
- int ret = -ENOMEM;
+ bool any_flushes = false;
- result = kmalloc(digest_size, GFP_KERNEL);
- if (!result)
- return ret;
- key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
- if (!key)
- goto out_nobuf;
- if (testmgr_alloc_buf(xbuf))
- goto out_nobuf;
+ if (cfg->name == NULL)
+ return false;
- crypto_init_wait(&wait);
+ if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
+ &any_flushes))
+ return false;
- req = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- printk(KERN_ERR "alg: hash: Failed to allocate request for "
- "%s\n", algo);
- goto out_noreq;
+ if (cfg->dst_divs[0].proportion_of_total) {
+ if (!valid_sg_divisions(cfg->dst_divs,
+ ARRAY_SIZE(cfg->dst_divs),
+ &any_flushes))
+ return false;
+ } else {
+ if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
+ return false;
+ /* defaults to dst_divs=src_divs */
}
- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
- j = 0;
- for (i = 0; i < tcount; i++) {
- if (template[i].np)
- continue;
+ if (cfg->iv_offset +
+ (cfg->iv_offset_relative_to_alignmask ? MAX_ALGAPI_ALIGNMASK : 0) >
+ MAX_ALGAPI_ALIGNMASK + 1)
+ return false;
- ret = -EINVAL;
- if (WARN_ON(align_offset + template[i].psize > PAGE_SIZE))
- goto out;
+ if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+ return false;
- j++;
- memset(result, 0, digest_size);
+ return true;
+}
- hash_buff = xbuf[0];
- hash_buff += align_offset;
+struct test_sglist {
+ char *bufs[XBUFSIZE];
+ struct scatterlist sgl[XBUFSIZE];
+ struct scatterlist sgl_saved[XBUFSIZE];
+ struct scatterlist *sgl_ptr;
+ unsigned int nents;
+};
- memcpy(hash_buff, template[i].plaintext, template[i].psize);
- sg_init_one(&sg[0], hash_buff, template[i].psize);
+static int init_test_sglist(struct test_sglist *tsgl)
+{
+ return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */);
+}
- if (template[i].ksize) {
- crypto_ahash_clear_flags(tfm, ~0);
- if (template[i].ksize > MAX_KEYLEN) {
- pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
- j, algo, template[i].ksize, MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
- }
- memcpy(key, template[i].key, template[i].ksize);
- ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
- if (ret) {
- printk(KERN_ERR "alg: hash: setkey failed on "
- "test %d for %s: ret=%d\n", j, algo,
- -ret);
- goto out;
- }
- }
+static void destroy_test_sglist(struct test_sglist *tsgl)
+{
+ return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */);
+}
- ahash_request_set_crypt(req, sg, result, template[i].psize);
- switch (test_type) {
- case HASH_TEST_DIGEST:
- ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret) {
- pr_err("alg: hash: digest failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
- }
- break;
+/**
+ * build_test_sglist() - build a scatterlist for a crypto test
+ *
+ * @tsgl: the scatterlist to build. @tsgl->bufs[] contains an array of 2-page
+ * buffers which the scatterlist @tsgl->sgl[] will be made to point into.
+ * @divs: the layout specification on which the scatterlist will be based
+ * @alignmask: the algorithm's alignmask
+ * @total_len: the total length of the scatterlist to build in bytes
+ * @data: if non-NULL, the buffers will be filled with this data until it ends.
+ * Otherwise the buffers will be poisoned. In both cases, some bytes
+ * past the end of each buffer will be poisoned to help detect overruns.
+ * @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry
+ * corresponds will be returned here. This will match @divs except
+ * that divisions resolving to a length of 0 are omitted as they are
+ * not included in the scatterlist.
+ *
+ * Return: 0 or a -errno value
+ */
+static int build_test_sglist(struct test_sglist *tsgl,
+ const struct test_sg_division *divs,
+ const unsigned int alignmask,
+ const unsigned int total_len,
+ struct iov_iter *data,
+ const struct test_sg_division *out_divs[XBUFSIZE])
+{
+ struct {
+ const struct test_sg_division *div;
+ size_t length;
+ } partitions[XBUFSIZE];
+ const unsigned int ndivs = count_test_sg_divisions(divs);
+ unsigned int len_remaining = total_len;
+ unsigned int i;
- case HASH_TEST_FINAL:
- memset(result, 1, digest_size);
- ret = crypto_wait_req(crypto_ahash_init(req), &wait);
- if (ret) {
- pr_err("alg: hash: init failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
- }
- ret = ahash_guard_result(result, 1, digest_size);
- if (ret) {
- pr_err("alg: hash: init failed on test %d "
- "for %s: used req->result\n", j, algo);
- goto out;
- }
- ret = crypto_wait_req(crypto_ahash_update(req), &wait);
- if (ret) {
- pr_err("alg: hash: update failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
- }
- ret = ahash_guard_result(result, 1, digest_size);
- if (ret) {
- pr_err("alg: hash: update failed on test %d "
- "for %s: used req->result\n", j, algo);
- goto out;
- }
- ret = crypto_wait_req(crypto_ahash_final(req), &wait);
- if (ret) {
- pr_err("alg: hash: final failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
- }
- break;
+ BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl));
+ if (WARN_ON(ndivs > ARRAY_SIZE(partitions)))
+ return -EINVAL;
- case HASH_TEST_FINUP:
- memset(result, 1, digest_size);
- ret = crypto_wait_req(crypto_ahash_init(req), &wait);
- if (ret) {
- pr_err("alg: hash: init failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
- }
- ret = ahash_guard_result(result, 1, digest_size);
- if (ret) {
- pr_err("alg: hash: init failed on test %d "
- "for %s: used req->result\n", j, algo);
- goto out;
- }
- ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
- if (ret) {
- pr_err("alg: hash: final failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
- }
- break;
- }
+ /* Calculate the (div, length) pairs */
+ tsgl->nents = 0;
+ for (i = 0; i < ndivs; i++) {
+ unsigned int len_this_sg =
+ min(len_remaining,
+ (total_len * divs[i].proportion_of_total +
+ TEST_SG_TOTAL / 2) / TEST_SG_TOTAL);
- if (memcmp(result, template[i].digest,
- crypto_ahash_digestsize(tfm))) {
- printk(KERN_ERR "alg: hash: Test %d failed for %s\n",
- j, algo);
- hexdump(result, crypto_ahash_digestsize(tfm));
- ret = -EINVAL;
- goto out;
+ if (len_this_sg != 0) {
+ partitions[tsgl->nents].div = &divs[i];
+ partitions[tsgl->nents].length = len_this_sg;
+ tsgl->nents++;
+ len_remaining -= len_this_sg;
}
}
+ if (tsgl->nents == 0) {
+ partitions[tsgl->nents].div = &divs[0];
+ partitions[tsgl->nents].length = 0;
+ tsgl->nents++;
+ }
+ partitions[tsgl->nents - 1].length += len_remaining;
- if (test_type)
- goto out;
+ /* Set up the sgl entries and fill the data or poison */
+ sg_init_table(tsgl->sgl, tsgl->nents);
+ for (i = 0; i < tsgl->nents; i++) {
+ unsigned int offset = partitions[i].div->offset;
+ void *addr;
- j = 0;
- for (i = 0; i < tcount; i++) {
- /* alignment tests are only done with continuous buffers */
- if (align_offset != 0)
- break;
+ if (partitions[i].div->offset_relative_to_alignmask)
+ offset += alignmask;
- if (!template[i].np)
- continue;
+ while (offset + partitions[i].length + TESTMGR_POISON_LEN >
+ 2 * PAGE_SIZE) {
+ if (WARN_ON(offset <= 0))
+ return -EINVAL;
+ offset /= 2;
+ }
- j++;
- memset(result, 0, digest_size);
+ addr = &tsgl->bufs[i][offset];
+ sg_set_buf(&tsgl->sgl[i], addr, partitions[i].length);
- temp = 0;
- sg_init_table(sg, template[i].np);
- ret = -EINVAL;
- for (k = 0; k < template[i].np; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].tap[k] > PAGE_SIZE))
- goto out;
- sg_set_buf(&sg[k],
- memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]),
- template[i].plaintext + temp,
- template[i].tap[k]),
- template[i].tap[k]);
- temp += template[i].tap[k];
- }
-
- if (template[i].ksize) {
- if (template[i].ksize > MAX_KEYLEN) {
- pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
- j, algo, template[i].ksize, MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
- }
- crypto_ahash_clear_flags(tfm, ~0);
- memcpy(key, template[i].key, template[i].ksize);
- ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
-
- if (ret) {
- printk(KERN_ERR "alg: hash: setkey "
- "failed on chunking test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
- }
- }
+ if (out_divs)
+ out_divs[i] = partitions[i].div;
- ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret) {
- pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
- j, algo, -ret);
- goto out;
- }
+ if (data) {
+ size_t copy_len, copied;
- if (memcmp(result, template[i].digest,
- crypto_ahash_digestsize(tfm))) {
- printk(KERN_ERR "alg: hash: Chunking test %d "
- "failed for %s\n", j, algo);
- hexdump(result, crypto_ahash_digestsize(tfm));
- ret = -EINVAL;
- goto out;
+ copy_len = min(partitions[i].length, data->count);
+ copied = copy_from_iter(addr, copy_len, data);
+ if (WARN_ON(copied != copy_len))
+ return -EINVAL;
+ testmgr_poison(addr + copy_len, partitions[i].length +
+ TESTMGR_POISON_LEN - copy_len);
+ } else {
+ testmgr_poison(addr, partitions[i].length +
+ TESTMGR_POISON_LEN);
}
}
- /* partial update exercise */
- j = 0;
- for (i = 0; i < tcount; i++) {
- /* alignment tests are only done with continuous buffers */
- if (align_offset != 0)
- break;
+ sg_mark_end(&tsgl->sgl[tsgl->nents - 1]);
+ tsgl->sgl_ptr = tsgl->sgl;
+ memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0]));
+ return 0;
+}
- if (template[i].np < 2)
- continue;
+/*
+ * Verify that a scatterlist crypto operation produced the correct output.
+ *
+ * @tsgl: scatterlist containing the actual output
+ * @expected_output: buffer containing the expected output
+ * @len_to_check: length of @expected_output in bytes
+ * @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result
+ * @check_poison: verify that the poison bytes after each chunk are intact?
+ *
+ * Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun.
+ */
+static int verify_correct_output(const struct test_sglist *tsgl,
+ const char *expected_output,
+ unsigned int len_to_check,
+ unsigned int unchecked_prefix_len,
+ bool check_poison)
+{
+ unsigned int i;
- j++;
- memset(result, 0, digest_size);
+ for (i = 0; i < tsgl->nents; i++) {
+ struct scatterlist *sg = &tsgl->sgl_ptr[i];
+ unsigned int len = sg->length;
+ unsigned int offset = sg->offset;
+ const char *actual_output;
- ret = -EINVAL;
- hash_buff = xbuf[0];
- memcpy(hash_buff, template[i].plaintext,
- template[i].tap[0]);
- sg_init_one(&sg[0], hash_buff, template[i].tap[0]);
-
- if (template[i].ksize) {
- crypto_ahash_clear_flags(tfm, ~0);
- if (template[i].ksize > MAX_KEYLEN) {
- pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
- j, algo, template[i].ksize, MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
- }
- memcpy(key, template[i].key, template[i].ksize);
- ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
- if (ret) {
- pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n",
- j, algo, -ret);
- goto out;
+ if (unchecked_prefix_len) {
+ if (unchecked_prefix_len >= len) {
+ unchecked_prefix_len -= len;
+ continue;
}
+ offset += unchecked_prefix_len;
+ len -= unchecked_prefix_len;
+ unchecked_prefix_len = 0;
}
+ len = min(len, len_to_check);
+ actual_output = page_address(sg_page(sg)) + offset;
+ if (memcmp(expected_output, actual_output, len) != 0)
+ return -EINVAL;
+ if (check_poison &&
+ !testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN))
+ return -EOVERFLOW;
+ len_to_check -= len;
+ expected_output += len;
+ }
+ if (WARN_ON(len_to_check != 0))
+ return -EINVAL;
+ return 0;
+}
- ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
- ret = crypto_wait_req(crypto_ahash_init(req), &wait);
- if (ret) {
- pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
- j, algo, -ret);
- goto out;
- }
- ret = crypto_wait_req(crypto_ahash_update(req), &wait);
- if (ret) {
- pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
- j, algo, -ret);
- goto out;
- }
+static bool is_test_sglist_corrupted(const struct test_sglist *tsgl)
+{
+ unsigned int i;
- temp = template[i].tap[0];
- for (k = 1; k < template[i].np; k++) {
- ret = ahash_partial_update(&req, tfm, &template[i],
- hash_buff, k, temp, &sg[0], algo, result,
- &wait);
- if (ret) {
- pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
- j, algo, -ret);
- goto out_noreq;
- }
- temp += template[i].tap[k];
- }
- ret = crypto_wait_req(crypto_ahash_final(req), &wait);
- if (ret) {
- pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
- j, algo, -ret);
- goto out;
- }
- if (memcmp(result, template[i].digest,
- crypto_ahash_digestsize(tfm))) {
- pr_err("alg: hash: Partial Test %d failed for %s\n",
- j, algo);
- hexdump(result, crypto_ahash_digestsize(tfm));
- ret = -EINVAL;
- goto out;
- }
+ for (i = 0; i < tsgl->nents; i++) {
+ if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link)
+ return true;
+ if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset)
+ return true;
+ if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length)
+ return true;
}
+ return false;
+}
- ret = 0;
+struct cipher_test_sglists {
+ struct test_sglist src;
+ struct test_sglist dst;
+};
-out:
- ahash_request_free(req);
-out_noreq:
- testmgr_free_buf(xbuf);
-out_nobuf:
- kfree(key);
- kfree(result);
- return ret;
+static struct cipher_test_sglists *alloc_cipher_test_sglists(void)
+{
+ struct cipher_test_sglists *tsgls;
+
+ tsgls = kmalloc(sizeof(*tsgls), GFP_KERNEL);
+ if (!tsgls)
+ return NULL;
+
+ if (init_test_sglist(&tsgls->src) != 0)
+ goto fail_kfree;
+ if (init_test_sglist(&tsgls->dst) != 0)
+ goto fail_destroy_src;
+
+ return tsgls;
+
+fail_destroy_src:
+ destroy_test_sglist(&tsgls->src);
+fail_kfree:
+ kfree(tsgls);
+ return NULL;
}
-static int test_hash(struct crypto_ahash *tfm,
- const struct hash_testvec *template,
- unsigned int tcount, enum hash_test test_type)
+static void free_cipher_test_sglists(struct cipher_test_sglists *tsgls)
{
- unsigned int alignmask;
- int ret;
+ if (tsgls) {
+ destroy_test_sglist(&tsgls->src);
+ destroy_test_sglist(&tsgls->dst);
+ kfree(tsgls);
+ }
+}
- ret = __test_hash(tfm, template, tcount, test_type, 0);
- if (ret)
- return ret;
+/* Build the src and dst scatterlists for an skcipher or AEAD test */
+static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
+ const struct testvec_config *cfg,
+ unsigned int alignmask,
+ unsigned int src_total_len,
+ unsigned int dst_total_len,
+ const struct kvec *inputs,
+ unsigned int nr_inputs)
+{
+ struct iov_iter input;
+ int err;
- /* test unaligned buffers, check with one byte offset */
- ret = __test_hash(tfm, template, tcount, test_type, 1);
- if (ret)
- return ret;
+ iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
+ err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
+ cfg->inplace ?
+ max(dst_total_len, src_total_len) :
+ src_total_len,
+ &input, NULL);
+ if (err)
+ return err;
- alignmask = crypto_tfm_alg_alignmask(&tfm->base);
- if (alignmask) {
- /* Check if alignment mask for tfm is correctly set. */
- ret = __test_hash(tfm, template, tcount, test_type,
- alignmask + 1);
- if (ret)
- return ret;
+ if (cfg->inplace) {
+ tsgls->dst.sgl_ptr = tsgls->src.sgl;
+ tsgls->dst.nents = tsgls->src.nents;
+ return 0;
}
+ return build_test_sglist(&tsgls->dst,
+ cfg->dst_divs[0].proportion_of_total ?
+ cfg->dst_divs : cfg->src_divs,
+ alignmask, dst_total_len, NULL, NULL);
+}
- return 0;
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+ size_t max_divs, char *p, char *end,
+ bool gen_flushes)
+{
+ struct test_sg_division *div = divs;
+ unsigned int remaining = TEST_SG_TOTAL;
+
+ do {
+ unsigned int this_len;
+
+ if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
+ this_len = remaining;
+ else
+ this_len = 1 + (prandom_u32() % remaining);
+ div->proportion_of_total = this_len;
+
+ if (prandom_u32() % 4 == 0)
+ div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128);
+ else if (prandom_u32() % 2 == 0)
+ div->offset = prandom_u32() % 32;
+ else
+ div->offset = prandom_u32() % PAGE_SIZE;
+ if (prandom_u32() % 8 == 0)
+ div->offset_relative_to_alignmask = true;
+
+ div->flush_type = FLUSH_TYPE_NONE;
+ if (gen_flushes) {
+ switch (prandom_u32() % 4) {
+ case 0:
+ div->flush_type = FLUSH_TYPE_REIMPORT;
+ break;
+ case 1:
+ div->flush_type = FLUSH_TYPE_FLUSH;
+ break;
+ }
+ }
+
+ BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
+ p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s",
+ div->flush_type == FLUSH_TYPE_NONE ? "" :
+ div->flush_type == FLUSH_TYPE_FLUSH ?
+ "<flush> " : "<reimport> ",
+ this_len / 100, this_len % 100,
+ div->offset_relative_to_alignmask ?
+ "alignmask" : "",
+ div->offset, this_len == remaining ? "" : ", ");
+ remaining -= this_len;
+ div++;
+ } while (remaining);
+
+ return p;
}
-static int __test_aead(struct crypto_aead *tfm, int enc,
- const struct aead_testvec *template, unsigned int tcount,
- const bool diff_dst, const int align_offset)
+/* Generate a random testvec_config for fuzz testing */
+static void generate_random_testvec_config(struct testvec_config *cfg,
+ char *name, size_t max_namelen)
{
- const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
- unsigned int i, j, k, n, temp;
- int ret = -ENOMEM;
- char *q;
- char *key;
- struct aead_request *req;
- struct scatterlist *sg;
- struct scatterlist *sgout;
- const char *e, *d;
- struct crypto_wait wait;
- unsigned int authsize, iv_len;
- void *input;
- void *output;
- void *assoc;
- char *iv;
- char *xbuf[XBUFSIZE];
- char *xoutbuf[XBUFSIZE];
- char *axbuf[XBUFSIZE];
+ char *p = name;
+ char * const end = name + max_namelen;
- iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
- if (!iv)
- return ret;
- key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
- if (!key)
- goto out_noxbuf;
- if (testmgr_alloc_buf(xbuf))
- goto out_noxbuf;
- if (testmgr_alloc_buf(axbuf))
- goto out_noaxbuf;
- if (diff_dst && testmgr_alloc_buf(xoutbuf))
- goto out_nooutbuf;
-
- /* avoid "the frame size is larger than 1024 bytes" compiler warning */
- sg = kmalloc(array3_size(sizeof(*sg), 8, (diff_dst ? 4 : 2)),
- GFP_KERNEL);
- if (!sg)
- goto out_nosg;
- sgout = &sg[16];
-
- if (diff_dst)
- d = "-ddst";
- else
- d = "";
+ memset(cfg, 0, sizeof(*cfg));
- if (enc == ENCRYPT)
- e = "encryption";
- else
- e = "decryption";
+ cfg->name = name;
- crypto_init_wait(&wait);
+ p += scnprintf(p, end - p, "random:");
- req = aead_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- pr_err("alg: aead%s: Failed to allocate request for %s\n",
- d, algo);
- goto out;
+ if (prandom_u32() % 2 == 0) {
+ cfg->inplace = true;
+ p += scnprintf(p, end - p, " inplace");
}
- aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
+ if (prandom_u32() % 2 == 0) {
+ cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
+ p += scnprintf(p, end - p, " may_sleep");
+ }
- iv_len = crypto_aead_ivsize(tfm);
+ switch (prandom_u32() % 4) {
+ case 0:
+ cfg->finalization_type = FINALIZATION_TYPE_FINAL;
+ p += scnprintf(p, end - p, " use_final");
+ break;
+ case 1:
+ cfg->finalization_type = FINALIZATION_TYPE_FINUP;
+ p += scnprintf(p, end - p, " use_finup");
+ break;
+ default:
+ cfg->finalization_type = FINALIZATION_TYPE_DIGEST;
+ p += scnprintf(p, end - p, " use_digest");
+ break;
+ }
- for (i = 0, j = 0; i < tcount; i++) {
- if (template[i].np)
- continue;
+ p += scnprintf(p, end - p, " src_divs=[");
+ p = generate_random_sgl_divisions(cfg->src_divs,
+ ARRAY_SIZE(cfg->src_divs), p, end,
+ (cfg->finalization_type !=
+ FINALIZATION_TYPE_DIGEST));
+ p += scnprintf(p, end - p, "]");
- j++;
+ if (!cfg->inplace && prandom_u32() % 2 == 0) {
+ p += scnprintf(p, end - p, " dst_divs=[");
+ p = generate_random_sgl_divisions(cfg->dst_divs,
+ ARRAY_SIZE(cfg->dst_divs),
+ p, end, false);
+ p += scnprintf(p, end - p, "]");
+ }
- /* some templates have no input data but they will
- * touch input
- */
- input = xbuf[0];
- input += align_offset;
- assoc = axbuf[0];
+ if (prandom_u32() % 2 == 0) {
+ cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
+ p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
+ }
- ret = -EINVAL;
- if (WARN_ON(align_offset + template[i].ilen >
- PAGE_SIZE || template[i].alen > PAGE_SIZE))
- goto out;
+ WARN_ON_ONCE(!valid_testvec_config(cfg));
+}
+#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
- memcpy(input, template[i].input, template[i].ilen);
- memcpy(assoc, template[i].assoc, template[i].alen);
- if (template[i].iv)
- memcpy(iv, template[i].iv, iv_len);
- else
- memset(iv, 0, iv_len);
+static int check_nonfinal_hash_op(const char *op, int err,
+ u8 *result, unsigned int digestsize,
+ const char *driver, unsigned int vec_num,
+ const struct testvec_config *cfg)
+{
+ if (err) {
+ pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n",
+ driver, op, err, vec_num, cfg->name);
+ return err;
+ }
+ if (!testmgr_is_poison(result, digestsize)) {
+ pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return -EINVAL;
+ }
+ return 0;
+}
- crypto_aead_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+static int test_hash_vec_cfg(const char *driver,
+ const struct hash_testvec *vec,
+ unsigned int vec_num,
+ const struct testvec_config *cfg,
+ struct ahash_request *req,
+ struct test_sglist *tsgl,
+ u8 *hashstate)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ const unsigned int alignmask = crypto_ahash_alignmask(tfm);
+ const unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ const unsigned int statesize = crypto_ahash_statesize(tfm);
+ const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
+ const struct test_sg_division *divs[XBUFSIZE];
+ DECLARE_CRYPTO_WAIT(wait);
+ struct kvec _input;
+ struct iov_iter input;
+ unsigned int i;
+ struct scatterlist *pending_sgl;
+ unsigned int pending_len;
+ u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
+ int err;
- if (template[i].klen > MAX_KEYLEN) {
- pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
- d, j, algo, template[i].klen,
- MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
+ /* Set the key, if specified */
+ if (vec->ksize) {
+ err = crypto_ahash_setkey(tfm, vec->key, vec->ksize);
+ if (err) {
+ pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n",
+ driver, err, vec_num,
+ crypto_ahash_get_flags(tfm));
+ return err;
}
- memcpy(key, template[i].key, template[i].klen);
-
- ret = crypto_aead_setkey(tfm, key, template[i].klen);
- if (template[i].fail == !ret) {
- pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
- d, j, algo, crypto_aead_get_flags(tfm));
- goto out;
- } else if (ret)
- continue;
+ }
- authsize = abs(template[i].rlen - template[i].ilen);
- ret = crypto_aead_setauthsize(tfm, authsize);
- if (ret) {
- pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
- d, authsize, j, algo);
- goto out;
- }
+ /* Build the scatterlist for the source data */
+ _input.iov_base = (void *)vec->plaintext;
+ _input.iov_len = vec->psize;
+ iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize);
+ err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
+ &input, divs);
+ if (err) {
+ pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n",
+ driver, vec_num, cfg->name);
+ return err;
+ }
- k = !!template[i].alen;
- sg_init_table(sg, k + 1);
- sg_set_buf(&sg[0], assoc, template[i].alen);
- sg_set_buf(&sg[k], input,
- template[i].ilen + (enc ? authsize : 0));
- output = input;
+ /* Do the actual hashing */
- if (diff_dst) {
- sg_init_table(sgout, k + 1);
- sg_set_buf(&sgout[0], assoc, template[i].alen);
+ testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
+ testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
- output = xoutbuf[0];
- output += align_offset;
- sg_set_buf(&sgout[k], output,
- template[i].rlen + (enc ? 0 : authsize));
+ if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) {
+ /* Just using digest() */
+ ahash_request_set_callback(req, req_flags, crypto_req_done,
+ &wait);
+ ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
+ err = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (err) {
+ pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n",
+ driver, err, vec_num, cfg->name);
+ return err;
}
+ goto result_ready;
+ }
- aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
- template[i].ilen, iv);
-
- aead_request_set_ad(req, template[i].alen);
+ /* Using init(), zero or more update(), then final() or finup() */
- ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
- : crypto_aead_decrypt(req), &wait);
+ ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
+ ahash_request_set_crypt(req, NULL, result, 0);
+ err = crypto_wait_req(crypto_ahash_init(req), &wait);
+ err = check_nonfinal_hash_op("init", err, result, digestsize,
+ driver, vec_num, cfg);
+ if (err)
+ return err;
- switch (ret) {
- case 0:
- if (template[i].novrfy) {
- /* verification was supposed to fail */
- pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
- d, e, j, algo);
- /* so really, we got a bad message */
- ret = -EBADMSG;
- goto out;
+ pending_sgl = NULL;
+ pending_len = 0;
+ for (i = 0; i < tsgl->nents; i++) {
+ if (divs[i]->flush_type != FLUSH_TYPE_NONE &&
+ pending_sgl != NULL) {
+ /* update() with the pending data */
+ ahash_request_set_callback(req, req_flags,
+ crypto_req_done, &wait);
+ ahash_request_set_crypt(req, pending_sgl, result,
+ pending_len);
+ err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ err = check_nonfinal_hash_op("update", err,
+ result, digestsize,
+ driver, vec_num, cfg);
+ if (err)
+ return err;
+ pending_sgl = NULL;
+ pending_len = 0;
+ }
+ if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
+ /* Test ->export() and ->import() */
+ testmgr_poison(hashstate + statesize,
+ TESTMGR_POISON_LEN);
+ err = crypto_ahash_export(req, hashstate);
+ err = check_nonfinal_hash_op("export", err,
+ result, digestsize,
+ driver, vec_num, cfg);
+ if (err)
+ return err;
+ if (!testmgr_is_poison(hashstate + statesize,
+ TESTMGR_POISON_LEN)) {
+ pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n",
+ driver, vec_num, cfg->name);
+ return -EOVERFLOW;
}
- break;
- case -EBADMSG:
- if (template[i].novrfy)
- /* verification failure was expected */
- continue;
- /* fall through */
- default:
- pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
- goto out;
- }
- q = output;
- if (memcmp(q, template[i].result, template[i].rlen)) {
- pr_err("alg: aead%s: Test %d failed on %s for %s\n",
- d, j, e, algo);
- hexdump(q, template[i].rlen);
- ret = -EINVAL;
- goto out;
+ testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
+ err = crypto_ahash_import(req, hashstate);
+ err = check_nonfinal_hash_op("import", err,
+ result, digestsize,
+ driver, vec_num, cfg);
+ if (err)
+ return err;
+ }
+ if (pending_sgl == NULL)
+ pending_sgl = &tsgl->sgl[i];
+ pending_len += tsgl->sgl[i].length;
+ }
+
+ ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
+ ahash_request_set_crypt(req, pending_sgl, result, pending_len);
+ if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
+ /* finish with update() and final() */
+ err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ err = check_nonfinal_hash_op("update", err, result, digestsize,
+ driver, vec_num, cfg);
+ if (err)
+ return err;
+ err = crypto_wait_req(crypto_ahash_final(req), &wait);
+ if (err) {
+ pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n",
+ driver, err, vec_num, cfg->name);
+ return err;
+ }
+ } else {
+ /* finish with finup() */
+ err = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (err) {
+ pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n",
+ driver, err, vec_num, cfg->name);
+ return err;
}
}
- for (i = 0, j = 0; i < tcount; i++) {
- /* alignment tests are only done with continuous buffers */
- if (align_offset != 0)
- break;
+result_ready:
+ /* Check that the algorithm produced the correct digest */
+ if (memcmp(result, vec->digest, digestsize) != 0) {
+ pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
+ driver, vec_num, cfg->name);
+ return -EINVAL;
+ }
+ if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
+ pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n",
+ driver, vec_num, cfg->name);
+ return -EOVERFLOW;
+ }
- if (!template[i].np)
- continue;
+ return 0;
+}
- j++;
+static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
+ unsigned int vec_num, struct ahash_request *req,
+ struct test_sglist *tsgl, u8 *hashstate)
+{
+ unsigned int i;
+ int err;
- if (template[i].iv)
- memcpy(iv, template[i].iv, iv_len);
- else
- memset(iv, 0, MAX_IVLEN);
+ for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
+ err = test_hash_vec_cfg(driver, vec, vec_num,
+ &default_hash_testvec_configs[i],
+ req, tsgl, hashstate);
+ if (err)
+ return err;
+ }
- crypto_aead_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
- if (template[i].klen > MAX_KEYLEN) {
- pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
- d, j, algo, template[i].klen, MAX_KEYLEN);
- ret = -EINVAL;
- goto out;
- }
- memcpy(key, template[i].key, template[i].klen);
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ if (!noextratests) {
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
- ret = crypto_aead_setkey(tfm, key, template[i].klen);
- if (template[i].fail == !ret) {
- pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
- d, j, algo, crypto_aead_get_flags(tfm));
- goto out;
- } else if (ret)
- continue;
+ for (i = 0; i < fuzz_iterations; i++) {
+ generate_random_testvec_config(&cfg, cfgname,
+ sizeof(cfgname));
+ err = test_hash_vec_cfg(driver, vec, vec_num, &cfg,
+ req, tsgl, hashstate);
+ if (err)
+ return err;
+ }
+ }
+#endif
+ return 0;
+}
- authsize = abs(template[i].rlen - template[i].ilen);
+static int __alg_test_hash(const struct hash_testvec *vecs,
+ unsigned int num_vecs, const char *driver,
+ u32 type, u32 mask)
+{
+ struct crypto_ahash *tfm;
+ struct ahash_request *req = NULL;
+ struct test_sglist *tsgl = NULL;
+ u8 *hashstate = NULL;
+ unsigned int i;
+ int err;
- ret = -EINVAL;
- sg_init_table(sg, template[i].anp + template[i].np);
- if (diff_dst)
- sg_init_table(sgout, template[i].anp + template[i].np);
+ tfm = crypto_alloc_ahash(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
- ret = -EINVAL;
- for (k = 0, temp = 0; k < template[i].anp; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].atap[k] > PAGE_SIZE))
- goto out;
- sg_set_buf(&sg[k],
- memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]),
- template[i].assoc + temp,
- template[i].atap[k]),
- template[i].atap[k]);
- if (diff_dst)
- sg_set_buf(&sgout[k],
- axbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]),
- template[i].atap[k]);
- temp += template[i].atap[k];
- }
-
- for (k = 0, temp = 0; k < template[i].np; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].tap[k] > PAGE_SIZE))
- goto out;
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: hash: failed to allocate request for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
+ }
- q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
- memcpy(q, template[i].input + temp, template[i].tap[k]);
- sg_set_buf(&sg[template[i].anp + k],
- q, template[i].tap[k]);
+ tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL);
+ if (!tsgl || init_test_sglist(tsgl) != 0) {
+ pr_err("alg: hash: failed to allocate test buffers for %s\n",
+ driver);
+ kfree(tsgl);
+ tsgl = NULL;
+ err = -ENOMEM;
+ goto out;
+ }
- if (diff_dst) {
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
+ hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN,
+ GFP_KERNEL);
+ if (!hashstate) {
+ pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
+ }
- memset(q, 0, template[i].tap[k]);
+ for (i = 0; i < num_vecs; i++) {
+ err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate);
+ if (err)
+ goto out;
+ }
+ err = 0;
+out:
+ kfree(hashstate);
+ if (tsgl) {
+ destroy_test_sglist(tsgl);
+ kfree(tsgl);
+ }
+ ahash_request_free(req);
+ crypto_free_ahash(tfm);
+ return err;
+}
- sg_set_buf(&sgout[template[i].anp + k],
- q, template[i].tap[k]);
- }
+static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
+ u32 type, u32 mask)
+{
+ const struct hash_testvec *template = desc->suite.hash.vecs;
+ unsigned int tcount = desc->suite.hash.count;
+ unsigned int nr_unkeyed, nr_keyed;
+ int err;
- n = template[i].tap[k];
- if (k == template[i].np - 1 && enc)
- n += authsize;
- if (offset_in_page(q) + n < PAGE_SIZE)
- q[n] = 0;
+ /*
+ * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests
+ * first, before setting a key on the tfm. To make this easier, we
+ * require that the unkeyed test vectors (if any) are listed first.
+ */
- temp += template[i].tap[k];
+ for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) {
+ if (template[nr_unkeyed].ksize)
+ break;
+ }
+ for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) {
+ if (!template[nr_unkeyed + nr_keyed].ksize) {
+ pr_err("alg: hash: test vectors for %s out of order, "
+ "unkeyed ones must come first\n", desc->alg);
+ return -EINVAL;
}
+ }
- ret = crypto_aead_setauthsize(tfm, authsize);
- if (ret) {
- pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n",
- d, authsize, j, algo);
- goto out;
- }
+ err = 0;
+ if (nr_unkeyed) {
+ err = __alg_test_hash(template, nr_unkeyed, driver, type, mask);
+ template += nr_unkeyed;
+ }
- if (enc) {
- if (WARN_ON(sg[template[i].anp + k - 1].offset +
- sg[template[i].anp + k - 1].length +
- authsize > PAGE_SIZE)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!err && nr_keyed)
+ err = __alg_test_hash(template, nr_keyed, driver, type, mask);
- if (diff_dst)
- sgout[template[i].anp + k - 1].length +=
- authsize;
- sg[template[i].anp + k - 1].length += authsize;
- }
+ return err;
+}
+
+static int test_aead_vec_cfg(const char *driver, int enc,
+ const struct aead_testvec *vec,
+ unsigned int vec_num,
+ const struct testvec_config *cfg,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ const unsigned int alignmask = crypto_aead_alignmask(tfm);
+ const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ const unsigned int authsize = vec->clen - vec->plen;
+ const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
+ const char *op = enc ? "encryption" : "decryption";
+ DECLARE_CRYPTO_WAIT(wait);
+ u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
+ u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
+ cfg->iv_offset +
+ (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
+ struct kvec input[2];
+ int err;
+
+ /* Set the key */
+ if (vec->wk)
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+ else
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+ err = crypto_aead_setkey(tfm, vec->key, vec->klen);
+ if (err) {
+ if (vec->fail) /* expectedly failed to set key? */
+ return 0;
+ pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n",
+ driver, err, vec_num, crypto_aead_get_flags(tfm));
+ return err;
+ }
+ if (vec->fail) {
+ pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n",
+ driver, vec_num);
+ return -EINVAL;
+ }
+
+ /* Set the authentication tag size */
+ err = crypto_aead_setauthsize(tfm, authsize);
+ if (err) {
+ pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n",
+ driver, err, vec_num);
+ return err;
+ }
- aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
- template[i].ilen,
- iv);
+ /* The IV must be copied to a buffer, as the algorithm may modify it */
+ if (WARN_ON(ivsize > MAX_IVLEN))
+ return -EINVAL;
+ if (vec->iv)
+ memcpy(iv, vec->iv, ivsize);
+ else
+ memset(iv, 0, ivsize);
+
+ /* Build the src/dst scatterlists */
+ input[0].iov_base = (void *)vec->assoc;
+ input[0].iov_len = vec->alen;
+ input[1].iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
+ input[1].iov_len = enc ? vec->plen : vec->clen;
+ err = build_cipher_test_sglists(tsgls, cfg, alignmask,
+ vec->alen + (enc ? vec->plen :
+ vec->clen),
+ vec->alen + (enc ? vec->clen :
+ vec->plen),
+ input, 2);
+ if (err) {
+ pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return err;
+ }
- aead_request_set_ad(req, template[i].alen);
+ /* Do the actual encryption or decryption */
+ testmgr_poison(req->__ctx, crypto_aead_reqsize(tfm));
+ aead_request_set_callback(req, req_flags, crypto_req_done, &wait);
+ aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
+ enc ? vec->plen : vec->clen, iv);
+ aead_request_set_ad(req, vec->alen);
+ err = crypto_wait_req(enc ? crypto_aead_encrypt(req) :
+ crypto_aead_decrypt(req), &wait);
- ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
- : crypto_aead_decrypt(req), &wait);
+ aead_request_set_tfm(req, tfm); /* TODO: get rid of this */
- switch (ret) {
- case 0:
- if (template[i].novrfy) {
- /* verification was supposed to fail */
- pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n",
- d, e, j, algo);
- /* so really, we got a bad message */
- ret = -EBADMSG;
- goto out;
- }
- break;
- case -EBADMSG:
- if (template[i].novrfy)
- /* verification failure was expected */
- continue;
- /* fall through */
- default:
- pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
- goto out;
- }
+ if (err) {
+ if (err == -EBADMSG && vec->novrfy)
+ return 0;
+ pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
+ driver, op, err, vec_num, cfg->name);
+ return err;
+ }
+ if (vec->novrfy) {
+ pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return -EINVAL;
+ }
+
+ /* Check that the algorithm didn't overwrite things it shouldn't have */
+ if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
+ req->assoclen != vec->alen ||
+ req->iv != iv ||
+ req->src != tsgls->src.sgl_ptr ||
+ req->dst != tsgls->dst.sgl_ptr ||
+ crypto_aead_reqtfm(req) != tfm ||
+ req->base.complete != crypto_req_done ||
+ req->base.flags != req_flags ||
+ req->base.data != &wait) {
+ pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ if (req->cryptlen != (enc ? vec->plen : vec->clen))
+ pr_err("alg: aead: changed 'req->cryptlen'\n");
+ if (req->assoclen != vec->alen)
+ pr_err("alg: aead: changed 'req->assoclen'\n");
+ if (req->iv != iv)
+ pr_err("alg: aead: changed 'req->iv'\n");
+ if (req->src != tsgls->src.sgl_ptr)
+ pr_err("alg: aead: changed 'req->src'\n");
+ if (req->dst != tsgls->dst.sgl_ptr)
+ pr_err("alg: aead: changed 'req->dst'\n");
+ if (crypto_aead_reqtfm(req) != tfm)
+ pr_err("alg: aead: changed 'req->base.tfm'\n");
+ if (req->base.complete != crypto_req_done)
+ pr_err("alg: aead: changed 'req->base.complete'\n");
+ if (req->base.flags != req_flags)
+ pr_err("alg: aead: changed 'req->base.flags'\n");
+ if (req->base.data != &wait)
+ pr_err("alg: aead: changed 'req->base.data'\n");
+ return -EINVAL;
+ }
+ if (is_test_sglist_corrupted(&tsgls->src)) {
+ pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return -EINVAL;
+ }
+ if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
+ is_test_sglist_corrupted(&tsgls->dst)) {
+ pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return -EINVAL;
+ }
+
+ /* Check for the correct output (ciphertext or plaintext) */
+ err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
+ enc ? vec->clen : vec->plen,
+ vec->alen, enc || !cfg->inplace);
+ if (err == -EOVERFLOW) {
+ pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return err;
+ }
+ if (err) {
+ pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return err;
+ }
- ret = -EINVAL;
- for (k = 0, temp = 0; k < template[i].np; k++) {
- if (diff_dst)
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
- else
- q = xbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
+ return 0;
+}
- n = template[i].tap[k];
- if (k == template[i].np - 1)
- n += enc ? authsize : -authsize;
+static int test_aead_vec(const char *driver, int enc,
+ const struct aead_testvec *vec, unsigned int vec_num,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ unsigned int i;
+ int err;
- if (memcmp(q, template[i].result + temp, n)) {
- pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n",
- d, j, e, k, algo);
- hexdump(q, n);
- goto out;
- }
+ if (enc && vec->novrfy)
+ return 0;
- q += n;
- if (k == template[i].np - 1 && !enc) {
- if (!diff_dst &&
- memcmp(q, template[i].input +
- temp + n, authsize))
- n = authsize;
- else
- n = 0;
- } else {
- for (n = 0; offset_in_page(q + n) && q[n]; n++)
- ;
- }
- if (n) {
- pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
- d, j, e, k, algo, n);
- hexdump(q, n);
- goto out;
- }
+ for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
+ err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+ &default_cipher_testvec_configs[i],
+ req, tsgls);
+ if (err)
+ return err;
+ }
+
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ if (!noextratests) {
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
- temp += template[i].tap[k];
+ for (i = 0; i < fuzz_iterations; i++) {
+ generate_random_testvec_config(&cfg, cfgname,
+ sizeof(cfgname));
+ err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+ &cfg, req, tsgls);
+ if (err)
+ return err;
}
}
+#endif
+ return 0;
+}
- ret = 0;
+static int test_aead(const char *driver, int enc,
+ const struct aead_test_suite *suite,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ unsigned int i;
+ int err;
-out:
- aead_request_free(req);
- kfree(sg);
-out_nosg:
- if (diff_dst)
- testmgr_free_buf(xoutbuf);
-out_nooutbuf:
- testmgr_free_buf(axbuf);
-out_noaxbuf:
- testmgr_free_buf(xbuf);
-out_noxbuf:
- kfree(key);
- kfree(iv);
- return ret;
+ for (i = 0; i < suite->count; i++) {
+ err = test_aead_vec(driver, enc, &suite->vecs[i], i, req,
+ tsgls);
+ if (err)
+ return err;
+ }
+ return 0;
}
-static int test_aead(struct crypto_aead *tfm, int enc,
- const struct aead_testvec *template, unsigned int tcount)
+static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
+ u32 type, u32 mask)
{
- unsigned int alignmask;
- int ret;
+ const struct aead_test_suite *suite = &desc->suite.aead;
+ struct crypto_aead *tfm;
+ struct aead_request *req = NULL;
+ struct cipher_test_sglists *tsgls = NULL;
+ int err;
- /* test 'dst == src' case */
- ret = __test_aead(tfm, enc, template, tcount, false, 0);
- if (ret)
- return ret;
+ if (suite->count <= 0) {
+ pr_err("alg: aead: empty test suite for %s\n", driver);
+ return -EINVAL;
+ }
- /* test 'dst != src' case */
- ret = __test_aead(tfm, enc, template, tcount, true, 0);
- if (ret)
- return ret;
+ tfm = crypto_alloc_aead(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
- /* test unaligned buffers, check with one byte offset */
- ret = __test_aead(tfm, enc, template, tcount, true, 1);
- if (ret)
- return ret;
+ req = aead_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: aead: failed to allocate request for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
+ }
- alignmask = crypto_tfm_alg_alignmask(&tfm->base);
- if (alignmask) {
- /* Check if alignment mask for tfm is correctly set. */
- ret = __test_aead(tfm, enc, template, tcount, true,
- alignmask + 1);
- if (ret)
- return ret;
+ tsgls = alloc_cipher_test_sglists();
+ if (!tsgls) {
+ pr_err("alg: aead: failed to allocate test buffers for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
}
- return 0;
+ err = test_aead(driver, ENCRYPT, suite, req, tsgls);
+ if (err)
+ goto out;
+
+ err = test_aead(driver, DECRYPT, suite, req, tsgls);
+out:
+ free_cipher_test_sglists(tsgls);
+ aead_request_free(req);
+ crypto_free_aead(tfm);
+ return err;
}
static int test_cipher(struct crypto_cipher *tfm, int enc,
@@ -1037,8 +1441,6 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
j = 0;
for (i = 0; i < tcount; i++) {
- if (template[i].np)
- continue;
if (fips_enabled && template[i].fips_skip)
continue;
@@ -1056,7 +1458,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
crypto_cipher_clear_flags(tfm, ~0);
if (template[i].wk)
- crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
@@ -1096,284 +1498,257 @@ out_nobuf:
return ret;
}
-static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
- const struct cipher_testvec *template,
- unsigned int tcount,
- const bool diff_dst, const int align_offset)
+static int test_skcipher_vec_cfg(const char *driver, int enc,
+ const struct cipher_testvec *vec,
+ unsigned int vec_num,
+ const struct testvec_config *cfg,
+ struct skcipher_request *req,
+ struct cipher_test_sglists *tsgls)
{
- const char *algo =
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
- unsigned int i, j, k, n, temp;
- char *q;
- struct skcipher_request *req;
- struct scatterlist sg[8];
- struct scatterlist sgout[8];
- const char *e, *d;
- struct crypto_wait wait;
- const char *input, *result;
- void *data;
- char iv[MAX_IVLEN];
- char *xbuf[XBUFSIZE];
- char *xoutbuf[XBUFSIZE];
- int ret = -ENOMEM;
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
-
- if (testmgr_alloc_buf(xbuf))
- goto out_nobuf;
-
- if (diff_dst && testmgr_alloc_buf(xoutbuf))
- goto out_nooutbuf;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const unsigned int alignmask = crypto_skcipher_alignmask(tfm);
+ const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
+ const char *op = enc ? "encryption" : "decryption";
+ DECLARE_CRYPTO_WAIT(wait);
+ u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
+ u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
+ cfg->iv_offset +
+ (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
+ struct kvec input;
+ int err;
- if (diff_dst)
- d = "-ddst";
+ /* Set the key */
+ if (vec->wk)
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
else
- d = "";
-
- if (enc == ENCRYPT)
- e = "encryption";
- else
- e = "decryption";
-
- crypto_init_wait(&wait);
-
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
- d, algo);
- goto out;
+ crypto_skcipher_clear_flags(tfm,
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+ err = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+ if (err) {
+ if (vec->fail) /* expectedly failed to set key? */
+ return 0;
+ pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n",
+ driver, err, vec_num, crypto_skcipher_get_flags(tfm));
+ return err;
+ }
+ if (vec->fail) {
+ pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n",
+ driver, vec_num);
+ return -EINVAL;
}
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
-
- j = 0;
- for (i = 0; i < tcount; i++) {
- if (template[i].np && !template[i].also_non_np)
- continue;
-
- if (fips_enabled && template[i].fips_skip)
- continue;
-
- if (template[i].iv && !(template[i].generates_iv && enc))
- memcpy(iv, template[i].iv, ivsize);
+ /* The IV must be copied to a buffer, as the algorithm may modify it */
+ if (ivsize) {
+ if (WARN_ON(ivsize > MAX_IVLEN))
+ return -EINVAL;
+ if (vec->generates_iv && !enc)
+ memcpy(iv, vec->iv_out, ivsize);
+ else if (vec->iv)
+ memcpy(iv, vec->iv, ivsize);
else
- memset(iv, 0, MAX_IVLEN);
-
- input = enc ? template[i].ptext : template[i].ctext;
- result = enc ? template[i].ctext : template[i].ptext;
- j++;
- ret = -EINVAL;
- if (WARN_ON(align_offset + template[i].len > PAGE_SIZE))
- goto out;
-
- data = xbuf[0];
- data += align_offset;
- memcpy(data, input, template[i].len);
-
- crypto_skcipher_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_skcipher_set_flags(tfm,
- CRYPTO_TFM_REQ_WEAK_KEY);
-
- ret = crypto_skcipher_setkey(tfm, template[i].key,
- template[i].klen);
- if (template[i].fail == !ret) {
- pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
- d, j, algo, crypto_skcipher_get_flags(tfm));
- goto out;
- } else if (ret)
- continue;
-
- sg_init_one(&sg[0], data, template[i].len);
- if (diff_dst) {
- data = xoutbuf[0];
- data += align_offset;
- sg_init_one(&sgout[0], data, template[i].len);
- }
-
- skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
- template[i].len, iv);
- ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req), &wait);
-
- if (ret) {
- pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
- goto out;
- }
-
- q = data;
- if (memcmp(q, result, template[i].len)) {
- pr_err("alg: skcipher%s: Test %d failed (invalid result) on %s for %s\n",
- d, j, e, algo);
- hexdump(q, template[i].len);
- ret = -EINVAL;
- goto out;
- }
-
- if (template[i].generates_iv && enc &&
- memcmp(iv, template[i].iv, crypto_skcipher_ivsize(tfm))) {
- pr_err("alg: skcipher%s: Test %d failed (invalid output IV) on %s for %s\n",
- d, j, e, algo);
- hexdump(iv, crypto_skcipher_ivsize(tfm));
- ret = -EINVAL;
- goto out;
+ memset(iv, 0, ivsize);
+ } else {
+ if (vec->generates_iv) {
+ pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n",
+ driver, vec_num);
+ return -EINVAL;
}
+ iv = NULL;
}
- j = 0;
- for (i = 0; i < tcount; i++) {
- /* alignment tests are only done with continuous buffers */
- if (align_offset != 0)
- break;
-
- if (!template[i].np)
- continue;
-
- if (fips_enabled && template[i].fips_skip)
- continue;
-
- if (template[i].iv && !(template[i].generates_iv && enc))
- memcpy(iv, template[i].iv, ivsize);
- else
- memset(iv, 0, MAX_IVLEN);
-
- input = enc ? template[i].ptext : template[i].ctext;
- result = enc ? template[i].ctext : template[i].ptext;
- j++;
- crypto_skcipher_clear_flags(tfm, ~0);
- if (template[i].wk)
- crypto_skcipher_set_flags(tfm,
- CRYPTO_TFM_REQ_WEAK_KEY);
-
- ret = crypto_skcipher_setkey(tfm, template[i].key,
- template[i].klen);
- if (template[i].fail == !ret) {
- pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
- d, j, algo, crypto_skcipher_get_flags(tfm));
- goto out;
- } else if (ret)
- continue;
-
- temp = 0;
- ret = -EINVAL;
- sg_init_table(sg, template[i].np);
- if (diff_dst)
- sg_init_table(sgout, template[i].np);
- for (k = 0; k < template[i].np; k++) {
- if (WARN_ON(offset_in_page(IDX[k]) +
- template[i].tap[k] > PAGE_SIZE))
- goto out;
-
- q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
-
- memcpy(q, input + temp, template[i].tap[k]);
-
- if (offset_in_page(q) + template[i].tap[k] < PAGE_SIZE)
- q[template[i].tap[k]] = 0;
-
- sg_set_buf(&sg[k], q, template[i].tap[k]);
- if (diff_dst) {
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
+ /* Build the src/dst scatterlists */
+ input.iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
+ input.iov_len = vec->len;
+ err = build_cipher_test_sglists(tsgls, cfg, alignmask,
+ vec->len, vec->len, &input, 1);
+ if (err) {
+ pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return err;
+ }
- sg_set_buf(&sgout[k], q, template[i].tap[k]);
+ /* Do the actual encryption or decryption */
+ testmgr_poison(req->__ctx, crypto_skcipher_reqsize(tfm));
+ skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
+ skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
+ vec->len, iv);
+ err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
+ if (err) {
+ pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
+ driver, op, err, vec_num, cfg->name);
+ return err;
+ }
- memset(q, 0, template[i].tap[k]);
- if (offset_in_page(q) +
- template[i].tap[k] < PAGE_SIZE)
- q[template[i].tap[k]] = 0;
- }
+ /* Check that the algorithm didn't overwrite things it shouldn't have */
+ if (req->cryptlen != vec->len ||
+ req->iv != iv ||
+ req->src != tsgls->src.sgl_ptr ||
+ req->dst != tsgls->dst.sgl_ptr ||
+ crypto_skcipher_reqtfm(req) != tfm ||
+ req->base.complete != crypto_req_done ||
+ req->base.flags != req_flags ||
+ req->base.data != &wait) {
+ pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ if (req->cryptlen != vec->len)
+ pr_err("alg: skcipher: changed 'req->cryptlen'\n");
+ if (req->iv != iv)
+ pr_err("alg: skcipher: changed 'req->iv'\n");
+ if (req->src != tsgls->src.sgl_ptr)
+ pr_err("alg: skcipher: changed 'req->src'\n");
+ if (req->dst != tsgls->dst.sgl_ptr)
+ pr_err("alg: skcipher: changed 'req->dst'\n");
+ if (crypto_skcipher_reqtfm(req) != tfm)
+ pr_err("alg: skcipher: changed 'req->base.tfm'\n");
+ if (req->base.complete != crypto_req_done)
+ pr_err("alg: skcipher: changed 'req->base.complete'\n");
+ if (req->base.flags != req_flags)
+ pr_err("alg: skcipher: changed 'req->base.flags'\n");
+ if (req->base.data != &wait)
+ pr_err("alg: skcipher: changed 'req->base.data'\n");
+ return -EINVAL;
+ }
+ if (is_test_sglist_corrupted(&tsgls->src)) {
+ pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return -EINVAL;
+ }
+ if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
+ is_test_sglist_corrupted(&tsgls->dst)) {
+ pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return -EINVAL;
+ }
+
+ /* Check for the correct output (ciphertext or plaintext) */
+ err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
+ vec->len, 0, true);
+ if (err == -EOVERFLOW) {
+ pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return err;
+ }
+ if (err) {
+ pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ return err;
+ }
- temp += template[i].tap[k];
- }
+ /* If applicable, check that the algorithm generated the correct IV */
+ if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
+ pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n",
+ driver, op, vec_num, cfg->name);
+ hexdump(iv, ivsize);
+ return -EINVAL;
+ }
- skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
- template[i].len, iv);
+ return 0;
+}
- ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req), &wait);
+static int test_skcipher_vec(const char *driver, int enc,
+ const struct cipher_testvec *vec,
+ unsigned int vec_num,
+ struct skcipher_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ unsigned int i;
+ int err;
- if (ret) {
- pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
- d, e, j, algo, -ret);
- goto out;
- }
+ if (fips_enabled && vec->fips_skip)
+ return 0;
- temp = 0;
- ret = -EINVAL;
- for (k = 0; k < template[i].np; k++) {
- if (diff_dst)
- q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
- else
- q = xbuf[IDX[k] >> PAGE_SHIFT] +
- offset_in_page(IDX[k]);
+ for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
+ err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+ &default_cipher_testvec_configs[i],
+ req, tsgls);
+ if (err)
+ return err;
+ }
- if (memcmp(q, result + temp, template[i].tap[k])) {
- pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n",
- d, j, e, k, algo);
- hexdump(q, template[i].tap[k]);
- goto out;
- }
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ if (!noextratests) {
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
- q += template[i].tap[k];
- for (n = 0; offset_in_page(q + n) && q[n]; n++)
- ;
- if (n) {
- pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
- d, j, e, k, algo, n);
- hexdump(q, n);
- goto out;
- }
- temp += template[i].tap[k];
+ for (i = 0; i < fuzz_iterations; i++) {
+ generate_random_testvec_config(&cfg, cfgname,
+ sizeof(cfgname));
+ err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+ &cfg, req, tsgls);
+ if (err)
+ return err;
}
}
+#endif
+ return 0;
+}
- ret = 0;
+static int test_skcipher(const char *driver, int enc,
+ const struct cipher_test_suite *suite,
+ struct skcipher_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ unsigned int i;
+ int err;
-out:
- skcipher_request_free(req);
- if (diff_dst)
- testmgr_free_buf(xoutbuf);
-out_nooutbuf:
- testmgr_free_buf(xbuf);
-out_nobuf:
- return ret;
+ for (i = 0; i < suite->count; i++) {
+ err = test_skcipher_vec(driver, enc, &suite->vecs[i], i, req,
+ tsgls);
+ if (err)
+ return err;
+ }
+ return 0;
}
-static int test_skcipher(struct crypto_skcipher *tfm, int enc,
- const struct cipher_testvec *template,
- unsigned int tcount)
+static int alg_test_skcipher(const struct alg_test_desc *desc,
+ const char *driver, u32 type, u32 mask)
{
- unsigned int alignmask;
- int ret;
+ const struct cipher_test_suite *suite = &desc->suite.cipher;
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req = NULL;
+ struct cipher_test_sglists *tsgls = NULL;
+ int err;
- /* test 'dst == src' case */
- ret = __test_skcipher(tfm, enc, template, tcount, false, 0);
- if (ret)
- return ret;
+ if (suite->count <= 0) {
+ pr_err("alg: skcipher: empty test suite for %s\n", driver);
+ return -EINVAL;
+ }
- /* test 'dst != src' case */
- ret = __test_skcipher(tfm, enc, template, tcount, true, 0);
- if (ret)
- return ret;
+ tfm = crypto_alloc_skcipher(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
- /* test unaligned buffers, check with one byte offset */
- ret = __test_skcipher(tfm, enc, template, tcount, true, 1);
- if (ret)
- return ret;
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: skcipher: failed to allocate request for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
+ }
- alignmask = crypto_tfm_alg_alignmask(&tfm->base);
- if (alignmask) {
- /* Check if alignment mask for tfm is correctly set. */
- ret = __test_skcipher(tfm, enc, template, tcount, true,
- alignmask + 1);
- if (ret)
- return ret;
+ tsgls = alloc_cipher_test_sglists();
+ if (!tsgls) {
+ pr_err("alg: skcipher: failed to allocate test buffers for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
}
- return 0;
+ err = test_skcipher(driver, ENCRYPT, suite, req, tsgls);
+ if (err)
+ goto out;
+
+ err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
+out:
+ free_cipher_test_sglists(tsgls);
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return err;
}
static int test_comp(struct crypto_comp *tfm,
@@ -1713,35 +2088,6 @@ out:
return err;
}
-static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
- u32 type, u32 mask)
-{
- struct crypto_aead *tfm;
- int err = 0;
-
- tfm = crypto_alloc_aead(driver, type, mask);
- if (IS_ERR(tfm)) {
- printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- if (desc->suite.aead.enc.vecs) {
- err = test_aead(tfm, ENCRYPT, desc->suite.aead.enc.vecs,
- desc->suite.aead.enc.count);
- if (err)
- goto out;
- }
-
- if (!err && desc->suite.aead.dec.vecs)
- err = test_aead(tfm, DECRYPT, desc->suite.aead.dec.vecs,
- desc->suite.aead.dec.count);
-
-out:
- crypto_free_aead(tfm);
- return err;
-}
-
static int alg_test_cipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
@@ -1764,28 +2110,6 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
return err;
}
-static int alg_test_skcipher(const struct alg_test_desc *desc,
- const char *driver, u32 type, u32 mask)
-{
- const struct cipher_test_suite *suite = &desc->suite.cipher;
- struct crypto_skcipher *tfm;
- int err;
-
- tfm = crypto_alloc_skcipher(driver, type, mask);
- if (IS_ERR(tfm)) {
- printk(KERN_ERR "alg: skcipher: Failed to load transform for "
- "%s: %ld\n", driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- err = test_skcipher(tfm, ENCRYPT, suite->vecs, suite->count);
- if (!err)
- err = test_skcipher(tfm, DECRYPT, suite->vecs, suite->count);
-
- crypto_free_skcipher(tfm);
- return err;
-}
-
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
@@ -1824,84 +2148,30 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
return err;
}
-static int __alg_test_hash(const struct hash_testvec *template,
- unsigned int tcount, const char *driver,
- u32 type, u32 mask)
-{
- struct crypto_ahash *tfm;
- int err;
-
- tfm = crypto_alloc_ahash(driver, type, mask);
- if (IS_ERR(tfm)) {
- printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
- if (!err)
- err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
- if (!err)
- err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
- crypto_free_ahash(tfm);
- return err;
-}
-
-static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
- u32 type, u32 mask)
-{
- const struct hash_testvec *template = desc->suite.hash.vecs;
- unsigned int tcount = desc->suite.hash.count;
- unsigned int nr_unkeyed, nr_keyed;
- int err;
-
- /*
- * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests
- * first, before setting a key on the tfm. To make this easier, we
- * require that the unkeyed test vectors (if any) are listed first.
- */
-
- for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) {
- if (template[nr_unkeyed].ksize)
- break;
- }
- for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) {
- if (!template[nr_unkeyed + nr_keyed].ksize) {
- pr_err("alg: hash: test vectors for %s out of order, "
- "unkeyed ones must come first\n", desc->alg);
- return -EINVAL;
- }
- }
-
- err = 0;
- if (nr_unkeyed) {
- err = __alg_test_hash(template, nr_unkeyed, driver, type, mask);
- template += nr_unkeyed;
- }
-
- if (!err && nr_keyed)
- err = __alg_test_hash(template, nr_keyed, driver, type, mask);
-
- return err;
-}
-
static int alg_test_crc32c(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
struct crypto_shash *tfm;
- u32 val;
+ __le32 val;
int err;
err = alg_test_hash(desc, driver, type, mask);
if (err)
- goto out;
+ return err;
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT) {
+ /*
+ * This crc32c implementation is only available through
+ * ahash API, not the shash API, so the remaining part
+ * of the test is not applicable to it.
+ */
+ return 0;
+ }
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
- err = PTR_ERR(tfm);
- goto out;
+ return PTR_ERR(tfm);
}
do {
@@ -1911,7 +2181,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
shash->tfm = tfm;
shash->flags = 0;
- *ctx = le32_to_cpu(420553207);
+ *ctx = 420553207;
err = crypto_shash_final(shash, (u8 *)&val);
if (err) {
printk(KERN_ERR "alg: crc32c: Operation failed for "
@@ -1919,16 +2189,15 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
break;
}
- if (val != ~420553207) {
- printk(KERN_ERR "alg: crc32c: Test failed for %s: "
- "%d\n", driver, val);
+ if (val != cpu_to_le32(~420553207)) {
+ pr_err("alg: crc32c: Test failed for %s: %u\n",
+ driver, le32_to_cpu(val));
err = -EINVAL;
}
} while (0);
crypto_free_shash(tfm);
-out:
return err;
}
@@ -2094,12 +2363,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
if (vec->genkey) {
/* Save party A's public key */
- a_public = kzalloc(out_len_max, GFP_KERNEL);
+ a_public = kmemdup(sg_virt(req->dst), out_len_max, GFP_KERNEL);
if (!a_public) {
err = -ENOMEM;
goto free_output;
}
- memcpy(a_public, sg_virt(req->dst), out_len_max);
} else {
/* Verify calculated public key */
if (memcmp(vec->expected_a_public, sg_virt(req->dst),
@@ -2112,13 +2380,12 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
}
/* Calculate shared secret key by using counter part (b) public key. */
- input_buf = kzalloc(vec->b_public_size, GFP_KERNEL);
+ input_buf = kmemdup(vec->b_public, vec->b_public_size, GFP_KERNEL);
if (!input_buf) {
err = -ENOMEM;
goto free_output;
}
- memcpy(input_buf, vec->b_public, vec->b_public_size);
sg_init_one(&src, input_buf, vec->b_public_size);
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_input(req, &src, vec->b_public_size);
@@ -2134,12 +2401,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
if (vec->genkey) {
/* Save the shared secret obtained by party A */
- a_ss = kzalloc(vec->expected_ss_size, GFP_KERNEL);
+ a_ss = kmemdup(sg_virt(req->dst), vec->expected_ss_size, GFP_KERNEL);
if (!a_ss) {
err = -ENOMEM;
goto free_all;
}
- memcpy(a_ss, sg_virt(req->dst), vec->expected_ss_size);
/*
* Calculate party B's shared secret by using party A's
@@ -2238,6 +2504,9 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
+ const char *m, *c;
+ unsigned int m_size, c_size;
+ const char *op;
if (testmgr_alloc_buf(xbuf))
return err;
@@ -2259,46 +2528,72 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
err = -ENOMEM;
out_len_max = crypto_akcipher_maxsize(tfm);
+
+ /*
+ * First run test which do not require a private key, such as
+ * encrypt or verify.
+ */
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
goto free_req;
- if (WARN_ON(vecs->m_size > PAGE_SIZE))
- goto free_all;
+ if (!vecs->siggen_sigver_test) {
+ m = vecs->m;
+ m_size = vecs->m_size;
+ c = vecs->c;
+ c_size = vecs->c_size;
+ op = "encrypt";
+ } else {
+ /* Swap args so we could keep plaintext (digest)
+ * in vecs->m, and cooked signature in vecs->c.
+ */
+ m = vecs->c; /* signature */
+ m_size = vecs->c_size;
+ c = vecs->m; /* digest */
+ c_size = vecs->m_size;
+ op = "verify";
+ }
- memcpy(xbuf[0], vecs->m, vecs->m_size);
+ if (WARN_ON(m_size > PAGE_SIZE))
+ goto free_all;
+ memcpy(xbuf[0], m, m_size);
sg_init_table(src_tab, 2);
sg_set_buf(&src_tab[0], xbuf[0], 8);
- sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
+ sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
sg_init_one(&dst, outbuf_enc, out_len_max);
- akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
+ akcipher_request_set_crypt(req, src_tab, &dst, m_size,
out_len_max);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
err = crypto_wait_req(vecs->siggen_sigver_test ?
- /* Run asymmetric signature generation */
- crypto_akcipher_sign(req) :
+ /* Run asymmetric signature verification */
+ crypto_akcipher_verify(req) :
/* Run asymmetric encrypt */
crypto_akcipher_encrypt(req), &wait);
if (err) {
- pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
+ pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
goto free_all;
}
- if (req->dst_len != vecs->c_size) {
- pr_err("alg: akcipher: encrypt test failed. Invalid output len\n");
+ if (req->dst_len != c_size) {
+ pr_err("alg: akcipher: %s test failed. Invalid output len\n",
+ op);
err = -EINVAL;
goto free_all;
}
/* verify that encrypted message is equal to expected */
- if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
- pr_err("alg: akcipher: encrypt test failed. Invalid output\n");
- hexdump(outbuf_enc, vecs->c_size);
+ if (memcmp(c, outbuf_enc, c_size)) {
+ pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
+ hexdump(outbuf_enc, c_size);
err = -EINVAL;
goto free_all;
}
- /* Don't invoke decrypt for vectors with public key */
+
+ /*
+ * Don't invoke (decrypt or sign) test which require a private key
+ * for vectors with only a public key.
+ */
if (vecs->public_key_vec) {
err = 0;
goto free_all;
@@ -2309,37 +2604,36 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
goto free_all;
}
- if (WARN_ON(vecs->c_size > PAGE_SIZE))
+ op = vecs->siggen_sigver_test ? "sign" : "decrypt";
+ if (WARN_ON(c_size > PAGE_SIZE))
goto free_all;
+ memcpy(xbuf[0], c, c_size);
- memcpy(xbuf[0], vecs->c, vecs->c_size);
-
- sg_init_one(&src, xbuf[0], vecs->c_size);
+ sg_init_one(&src, xbuf[0], c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
crypto_init_wait(&wait);
- akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
+ akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max);
err = crypto_wait_req(vecs->siggen_sigver_test ?
- /* Run asymmetric signature verification */
- crypto_akcipher_verify(req) :
+ /* Run asymmetric signature generation */
+ crypto_akcipher_sign(req) :
/* Run asymmetric decrypt */
crypto_akcipher_decrypt(req), &wait);
if (err) {
- pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
+ pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
goto free_all;
}
out_len = req->dst_len;
- if (out_len < vecs->m_size) {
- pr_err("alg: akcipher: decrypt test failed. "
- "Invalid output len %u\n", out_len);
+ if (out_len < m_size) {
+ pr_err("alg: akcipher: %s test failed. Invalid output len %u\n",
+ op, out_len);
err = -EINVAL;
goto free_all;
}
/* verify that decrypted message is equal to the original msg */
- if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) ||
- memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size,
- vecs->m_size)) {
- pr_err("alg: akcipher: decrypt test failed. Invalid output\n");
+ if (memchr_inv(outbuf_dec, 0, out_len - m_size) ||
+ memcmp(m, outbuf_dec + out_len - m_size, m_size)) {
+ pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
hexdump(outbuf_dec, out_len);
err = -EINVAL;
}
@@ -2419,28 +2713,19 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "aegis128",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(aegis128_enc_tv_template),
- .dec = __VECS(aegis128_dec_tv_template),
- }
+ .aead = __VECS(aegis128_tv_template)
}
}, {
.alg = "aegis128l",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(aegis128l_enc_tv_template),
- .dec = __VECS(aegis128l_dec_tv_template),
- }
+ .aead = __VECS(aegis128l_tv_template)
}
}, {
.alg = "aegis256",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(aegis256_enc_tv_template),
- .dec = __VECS(aegis256_dec_tv_template),
- }
+ .aead = __VECS(aegis256_tv_template)
}
}, {
.alg = "ansi_cprng",
@@ -2452,36 +2737,27 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "authenc(hmac(md5),ecb(cipher_null))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
- .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
- }
+ .aead = __VECS(hmac_md5_ecb_cipher_null_tv_template)
}
}, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha1_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),cbc(des))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha1_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha1_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),ctr(aes))",
@@ -2491,10 +2767,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "authenc(hmac(sha1),ecb(cipher_null))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
- .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
- }
+ .aead = __VECS(hmac_sha1_ecb_cipher_null_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
@@ -2504,44 +2777,34 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "authenc(hmac(sha224),cbc(des))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha224_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha224),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha256_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(des))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha256_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha256_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),ctr(aes))",
@@ -2555,18 +2818,14 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "authenc(hmac(sha384),cbc(des))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha384_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha384),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha384_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha384),ctr(aes))",
@@ -2581,26 +2840,20 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha512_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),cbc(des))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha512_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
- }
+ .aead = __VECS(hmac_sha512_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),ctr(aes))",
@@ -2697,10 +2950,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(aes_ccm_enc_tv_template),
- .dec = __VECS(aes_ccm_dec_tv_template)
- }
+ .aead = __VECS(aes_ccm_tv_template)
}
}, {
.alg = "cfb(aes)",
@@ -2735,6 +2985,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "crc32",
.test = alg_test_hash,
+ .fips_allowed = 1,
.suite = {
.hash = __VECS(crc32_tv_template)
}
@@ -3111,10 +3362,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(aes_gcm_enc_tv_template),
- .dec = __VECS(aes_gcm_dec_tv_template)
- }
+ .aead = __VECS(aes_gcm_tv_template)
}
}, {
.alg = "ghash",
@@ -3309,19 +3557,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "morus1280",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(morus1280_enc_tv_template),
- .dec = __VECS(morus1280_dec_tv_template),
- }
+ .aead = __VECS(morus1280_tv_template)
}
}, {
.alg = "morus640",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(morus640_enc_tv_template),
- .dec = __VECS(morus640_dec_tv_template),
- }
+ .aead = __VECS(morus640_tv_template)
}
}, {
.alg = "nhpoly1305",
@@ -3386,47 +3628,32 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
- .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
- }
+ .aead = __VECS(aes_gcm_rfc4106_tv_template)
}
}, {
.alg = "rfc4309(ccm(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
- .aead = {
- .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
- .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
- }
+ .aead = __VECS(aes_ccm_rfc4309_tv_template)
}
}, {
.alg = "rfc4543(gcm(aes))",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
- .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
- }
+ .aead = __VECS(aes_gcm_rfc4543_tv_template)
}
}, {
.alg = "rfc7539(chacha20,poly1305)",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(rfc7539_enc_tv_template),
- .dec = __VECS(rfc7539_dec_tv_template),
- }
+ .aead = __VECS(rfc7539_tv_template)
}
}, {
.alg = "rfc7539esp(chacha20,poly1305)",
.test = alg_test_aead,
.suite = {
- .aead = {
- .enc = __VECS(rfc7539esp_enc_tv_template),
- .dec = __VECS(rfc7539esp_dec_tv_template),
- }
+ .aead = __VECS(rfc7539esp_tv_template)
}
}, {
.alg = "rmd128",
@@ -3675,18 +3902,10 @@ static const struct alg_test_desc alg_test_descs[] = {
}
};
-static bool alg_test_descs_checked;
-
-static void alg_test_descs_check_order(void)
+static void alg_check_test_descs_order(void)
{
int i;
- /* only check once */
- if (alg_test_descs_checked)
- return;
-
- alg_test_descs_checked = true;
-
for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) {
int diff = strcmp(alg_test_descs[i - 1].alg,
alg_test_descs[i].alg);
@@ -3704,6 +3923,29 @@ static void alg_test_descs_check_order(void)
}
}
+static void alg_check_testvec_configs(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++)
+ WARN_ON(!valid_testvec_config(
+ &default_cipher_testvec_configs[i]));
+
+ for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++)
+ WARN_ON(!valid_testvec_config(
+ &default_hash_testvec_configs[i]));
+}
+
+static void testmgr_onetime_init(void)
+{
+ alg_check_test_descs_order();
+ alg_check_testvec_configs();
+
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n");
+#endif
+}
+
static int alg_find_test(const char *alg)
{
int start = 0;
@@ -3740,7 +3982,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
return 0;
}
- alg_test_descs_check_order();
+ DO_ONCE(testmgr_onetime_init);
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
char nalg[CRYPTO_MAX_ALG_NAME];
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index e8f47d7b92cd..f267633cf13a 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -5,6 +5,7 @@
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) 2007 Nokia Siemens Networks
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2019 Google LLC
*
* Updated RFC4106 AES-GCM testing. Some test vectors were taken from
* http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/
@@ -24,19 +25,20 @@
#ifndef _CRYPTO_TESTMGR_H
#define _CRYPTO_TESTMGR_H
-#define MAX_DIGEST_SIZE 64
-#define MAX_TAP 8
-
-#define MAX_KEYLEN 1088
#define MAX_IVLEN 32
+/*
+ * hash_testvec: structure to describe a hash (message digest) test
+ * @key: Pointer to key (NULL if none)
+ * @plaintext: Pointer to source data
+ * @digest: Pointer to expected digest
+ * @psize: Length of source data in bytes
+ * @ksize: Length of @key in bytes (0 if no key)
+ */
struct hash_testvec {
- /* only used with keyed hash algorithms */
const char *key;
const char *plaintext;
const char *digest;
- unsigned short tap[MAX_TAP];
- unsigned short np;
unsigned short psize;
unsigned short ksize;
};
@@ -45,29 +47,24 @@ struct hash_testvec {
* cipher_testvec: structure to describe a symmetric cipher test
* @key: Pointer to key
* @klen: Length of @key in bytes
- * @iv: Pointer to IV (optional for some ciphers)
+ * @iv: Pointer to IV. If NULL, an all-zeroes IV is used.
+ * @iv_out: Pointer to output IV, if applicable for the cipher.
* @ptext: Pointer to plaintext
* @ctext: Pointer to ciphertext
* @len: Length of @ptext and @ctext in bytes
* @fail: If set to one, the test need to fail
- * @wk: Does the test need CRYPTO_TFM_REQ_WEAK_KEY
+ * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* ( e.g. test needs to fail due to a weak key )
- * @np: numbers of SG to distribute data in (from 1 to MAX_TAP)
- * @tap: How to distribute data in @np SGs
- * @also_non_np: if set to 1, the test will be also done without
- * splitting data in @np SGs
* @fips_skip: Skip the test vector in FIPS mode
- * @generates_iv: Encryption should ignore the given IV, and output @iv.
- * Decryption takes @iv. Needed for AES Keywrap ("kw(aes)").
+ * @generates_iv: Encryption should ignore the given IV, and output @iv_out.
+ * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
*/
struct cipher_testvec {
const char *key;
const char *iv;
+ const char *iv_out;
const char *ptext;
const char *ctext;
- unsigned short tap[MAX_TAP];
- int np;
- unsigned char also_non_np;
bool fail;
unsigned char wk; /* weak key flag */
unsigned char klen;
@@ -76,23 +73,37 @@ struct cipher_testvec {
bool generates_iv;
};
+/*
+ * aead_testvec: structure to describe an AEAD test
+ * @key: Pointer to key
+ * @iv: Pointer to IV. If NULL, an all-zeroes IV is used.
+ * @ptext: Pointer to plaintext
+ * @assoc: Pointer to associated data
+ * @ctext: Pointer to the full authenticated ciphertext. For AEADs that
+ * produce a separate "ciphertext" and "authentication tag", these
+ * two parts are concatenated: ciphertext || tag.
+ * @fail: setkey() failure expected?
+ * @novrfy: Decryption verification failure expected?
+ * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
+ * (e.g. setkey() needs to fail due to a weak key)
+ * @klen: Length of @key in bytes
+ * @plen: Length of @ptext in bytes
+ * @alen: Length of @assoc in bytes
+ * @clen: Length of @ctext in bytes
+ */
struct aead_testvec {
const char *key;
const char *iv;
- const char *input;
+ const char *ptext;
const char *assoc;
- const char *result;
- unsigned char tap[MAX_TAP];
- unsigned char atap[MAX_TAP];
- int np;
- int anp;
+ const char *ctext;
bool fail;
- unsigned char novrfy; /* ccm dec verification failure expected */
- unsigned char wk; /* weak key flag */
+ unsigned char novrfy;
+ unsigned char wk;
unsigned char klen;
- unsigned short ilen;
+ unsigned short plen;
+ unsigned short clen;
unsigned short alen;
- unsigned short rlen;
};
struct cprng_testvec {
@@ -1015,8 +1026,6 @@ static const struct hash_testvec md4_tv_template[] = {
.psize = 26,
.digest = "\xd7\x9e\x1c\x30\x8a\xa5\xbb\xcd"
"\xee\xa8\xed\x63\xdf\x41\x2d\xa9",
- .np = 2,
- .tap = { 13, 13 },
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
.psize = 62,
@@ -1053,8 +1062,6 @@ static const struct hash_testvec sha3_224_tv_template[] = {
"\xc9\xfd\x55\x74\x49\x44\x79\xba"
"\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
"\xd0\xfc\xce\x33",
- .np = 2,
- .tap = { 28, 28 },
}, {
.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
"\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
@@ -1214,8 +1221,6 @@ static const struct hash_testvec sha3_256_tv_template[] = {
"\x49\x10\x03\x76\xa8\x23\x5e\x2c"
"\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
"\xdb\x32\xdd\x97\x49\x6d\x33\x76",
- .np = 2,
- .tap = { 28, 28 },
}, {
.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
"\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
@@ -1382,8 +1387,6 @@ static const struct hash_testvec sha3_384_tv_template[] = {
"\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
"\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
"\x9e\xef\x51\xac\xd0\x65\x7c\x22",
- .np = 2,
- .tap = { 28, 28 },
}, {
.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
"\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
@@ -1558,8 +1561,6 @@ static const struct hash_testvec sha3_512_tv_template[] = {
"\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
"\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
"\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
- .np = 2,
- .tap = { 28, 28 },
}, {
.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
"\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
@@ -1729,8 +1730,6 @@ static const struct hash_testvec md5_tv_template[] = {
.psize = 26,
.digest = "\xc3\xfc\xd3\xd7\x61\x92\xe4\x00"
"\x7d\xfb\x49\x6c\xca\x67\xe1\x3b",
- .np = 2,
- .tap = {13, 13}
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
.psize = 62,
@@ -1791,8 +1790,6 @@ static const struct hash_testvec rmd128_tv_template[] = {
.psize = 56,
.digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
"\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
- .np = 2,
- .tap = { 28, 28 },
}, {
.plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
@@ -1853,8 +1850,6 @@ static const struct hash_testvec rmd160_tv_template[] = {
.psize = 56,
.digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05"
"\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b",
- .np = 2,
- .tap = { 28, 28 },
}, {
.plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
@@ -1931,8 +1926,6 @@ static const struct hash_testvec rmd256_tv_template[] = {
"\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
"\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
"\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
- .np = 2,
- .tap = { 28, 28 },
}
};
@@ -1997,8 +1990,6 @@ static const struct hash_testvec rmd320_tv_template[] = {
"\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
"\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
"\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
- .np = 2,
- .tap = { 28, 28 },
}
};
@@ -2012,15 +2003,11 @@ static const struct hash_testvec crct10dif_tv_template[] = {
"123456789012345678901234567890123456789",
.psize = 79,
.digest = (u8 *)(u16 []){ 0x4b70 },
- .np = 2,
- .tap = { 63, 16 },
}, {
.plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
"ddddddddddddd",
.psize = 56,
.digest = (u8 *)(u16 []){ 0x9ce3 },
- .np = 8,
- .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890"
@@ -2033,19 +2020,6 @@ static const struct hash_testvec crct10dif_tv_template[] = {
.psize = 319,
.digest = (u8 *)(u16 []){ 0x44c6 },
}, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 319,
- .digest = (u8 *)(u16 []){ 0x44c6 },
- .np = 4,
- .tap = { 1, 255, 57, 6 },
- }, {
.plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
"\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
"\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
@@ -2510,8 +2484,6 @@ static const struct hash_testvec sha1_tv_template[] = {
.psize = 56,
.digest = "\x84\x98\x3e\x44\x1c\x3b\xd2\x6e\xba\xae"
"\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1",
- .np = 2,
- .tap = { 28, 28 }
}, {
.plaintext = "\xec\x29\x56\x12\x44\xed\xe7\x06"
"\xb6\xeb\x30\xa1\xc3\x71\xd7\x44"
@@ -2537,8 +2509,6 @@ static const struct hash_testvec sha1_tv_template[] = {
.psize = 163,
.digest = "\x97\x01\x11\xc4\xe7\x7b\xcc\x88\xcc\x20"
"\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17",
- .np = 4,
- .tap = { 63, 64, 31, 5 }
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
.psize = 64,
@@ -2707,8 +2677,6 @@ static const struct hash_testvec sha224_tv_template[] = {
"\x5D\xBA\x5D\xA1\xFD\x89\x01\x50"
"\xB0\xC6\x45\x5C\xB4\xF5\x8B\x19"
"\x52\x52\x25\x25",
- .np = 2,
- .tap = { 28, 28 }
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
.psize = 64,
@@ -2878,8 +2846,6 @@ static const struct hash_testvec sha256_tv_template[] = {
"\xe5\xc0\x26\x93\x0c\x3e\x60\x39"
"\xa3\x3c\xe4\x59\x64\xff\x21\x67"
"\xf6\xec\xed\xd4\x19\xdb\x06\xc1",
- .np = 2,
- .tap = { 28, 28 }
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
.psize = 64,
@@ -3075,8 +3041,6 @@ static const struct hash_testvec sha384_tv_template[] = {
"\x4d\x8f\xd0\x14\xe5\x82\x82\x3a"
"\x89\xe1\x6f\x9b\x2a\x7b\xbc\x1a"
"\xc9\x38\xe2\xd1\x99\xe8\xbe\xa4",
- .np = 4,
- .tap = { 26, 26, 26, 26 }
}, {
.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
"\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
@@ -3277,8 +3241,6 @@ static const struct hash_testvec sha512_tv_template[] = {
"\xb2\x78\xe6\x6d\xff\x8b\x84\xfe"
"\x2b\x28\x70\xf7\x42\xa5\x80\xd8"
"\xed\xb4\x19\x87\x23\x28\x50\xc9",
- .np = 4,
- .tap = { 26, 26, 26, 26 }
}, {
.plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
"\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
@@ -3811,8 +3773,6 @@ static const struct hash_testvec ghash_tv_template[] =
.psize = 28,
.digest = "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce"
"\x0d\x61\x06\x27\x66\x51\xd5\xe2",
- .np = 2,
- .tap = {14, 14}
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
@@ -3923,8 +3883,6 @@ static const struct hash_testvec hmac_md5_tv_template[] =
.psize = 28,
.digest = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03"
"\xea\xa8\x6e\x31\x0a\x5d\xb7\x38",
- .np = 2,
- .tap = {14, 14}
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 16,
@@ -4002,8 +3960,6 @@ static const struct hash_testvec hmac_rmd128_tv_template[] = {
.psize = 28,
.digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
"\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
- .np = 2,
- .tap = { 14, 14 },
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 16,
@@ -4081,8 +4037,6 @@ static const struct hash_testvec hmac_rmd160_tv_template[] = {
.psize = 28,
.digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4"
"\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69",
- .np = 2,
- .tap = { 14, 14 },
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 20,
@@ -4161,8 +4115,6 @@ static const struct hash_testvec hmac_sha1_tv_template[] = {
.psize = 28,
.digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74"
"\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79",
- .np = 2,
- .tap = { 14, 14 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 20,
@@ -4252,8 +4204,6 @@ static const struct hash_testvec hmac_sha224_tv_template[] = {
"\x45\x69\x0f\x3a\x7e\x9e\x6d\x0f"
"\x8b\xbe\xa2\xa3\x9e\x61\x48\x00"
"\x8f\xd0\x5e\x44",
- .np = 4,
- .tap = { 7, 7, 7, 7 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -4397,8 +4347,6 @@ static const struct hash_testvec hmac_sha256_tv_template[] = {
"\x6a\x04\x24\x26\x08\x95\x75\xc7"
"\x5a\x00\x3f\x08\x9d\x27\x39\x83"
"\x9d\xec\x58\xb9\x64\xec\x38\x43",
- .np = 2,
- .tap = { 14, 14 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -4571,8 +4519,6 @@ static const struct hash_testvec aes_cbcmac_tv_template[] = {
"\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
.psize = 33,
.ksize = 16,
- .np = 2,
- .tap = { 7, 26 },
}, {
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
@@ -4689,9 +4635,7 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
"\x10\x11\x12\x13",
.digest = "\x47\xf5\x1b\x45\x64\x96\x62\x15"
"\xb8\x98\x5c\x63\x05\x5e\xd3\x08",
- .tap = { 10, 10 },
.psize = 20,
- .np = 2,
.ksize = 16,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
@@ -4714,9 +4658,7 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
"\x20\x21",
.digest = "\xbe\xcb\xb3\xbc\xcd\xb5\x18\xa3"
"\x06\x77\xd5\x48\x1f\xb6\xb4\xd8",
- .tap = { 17, 17 },
.psize = 34,
- .np = 2,
.ksize = 16,
}
};
@@ -4799,8 +4741,6 @@ static const struct hash_testvec vmac64_aes_tv_template[] = {
"abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
.psize = 316,
.digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
- .tap = { 1, 100, 200, 15 },
- .np = 4,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
@@ -4905,8 +4845,6 @@ static const struct hash_testvec hmac_sha384_tv_template[] = {
"\xe4\x2e\xc3\x73\x63\x22\x44\x5e"
"\x8e\x22\x40\xca\x5e\x69\xe2\xc7"
"\x8b\x32\x39\xec\xfa\xb2\x16\x49",
- .np = 4,
- .tap = { 7, 7, 7, 7 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -5007,8 +4945,6 @@ static const struct hash_testvec hmac_sha512_tv_template[] = {
"\x6d\x03\x4f\x65\xf8\xf0\xe6\xfd"
"\xca\xea\xb1\xa3\x4d\x4a\x6b\x4b"
"\x63\x6e\x07\x0a\x38\xbc\xe7\x37",
- .np = 4,
- .tap = { 7, 7, 7, 7 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -5104,8 +5040,6 @@ static const struct hash_testvec hmac_sha3_224_tv_template[] = {
"\x1b\x79\x86\x34\xad\x38\x68\x11"
"\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b"
"\xba\xce\x5e\x66",
- .np = 4,
- .tap = { 7, 7, 7, 7 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -5193,8 +5127,6 @@ static const struct hash_testvec hmac_sha3_256_tv_template[] = {
"\x35\x96\xbb\xb0\xda\x73\xb8\x87"
"\xc9\x17\x1f\x93\x09\x5b\x29\x4a"
"\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5",
- .np = 4,
- .tap = { 7, 7, 7, 7 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -5286,8 +5218,6 @@ static const struct hash_testvec hmac_sha3_384_tv_template[] = {
"\x3c\xa1\x35\x08\xa9\x32\x43\xce"
"\x48\xc0\x45\xdc\x00\x7f\x26\xa2"
"\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a",
- .np = 4,
- .tap = { 7, 7, 7, 7 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -5387,8 +5317,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = {
"\xee\x7a\x0c\x31\xd0\x22\xa9\x5e"
"\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83"
"\x96\x02\x75\xbe\xb4\xe6\x20\x24",
- .np = 4,
- .tap = { 7, 7, 7, 7 }
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
@@ -5996,8 +5924,150 @@ static const struct hash_testvec nhpoly1305_tv_template[] = {
.psize = 16,
.digest = "\x04\xbf\x7f\x6a\xce\x72\xea\x6a"
"\x79\xdb\xb0\xc9\x60\xf6\x12\xcc",
- .np = 6,
- .tap = { 4, 4, 1, 1, 1, 5 },
+ }, {
+ .key = "\x2e\x77\x1e\x2c\x63\x76\x34\x3f"
+ "\x71\x08\x4f\x5a\xe3\x3d\x74\x56"
+ "\xc7\x98\x46\x52\xe5\x8a\xba\x0d"
+ "\x72\x41\x11\x15\x14\x72\x50\x8a"
+ "\xd5\xec\x60\x09\xdd\x71\xcc\xb9"
+ "\x59\x81\x65\x2d\x9e\x50\x18\xf3"
+ "\x32\xf3\xf1\xe7\x01\x82\x1c\xad"
+ "\x88\xa0\x21\x0c\x4b\x80\x5e\x62"
+ "\xfc\x81\xec\x52\xaa\xe4\xa5\x86"
+ "\xc2\xe6\x03\x11\xdc\x66\x09\x86"
+ "\x3c\x3b\xf0\x59\x0f\xb3\xf7\x44"
+ "\x24\xb7\x88\xc5\xfc\xc8\x77\x9f"
+ "\x8c\x44\xc4\x11\x55\xce\x7a\xa3"
+ "\xe0\xa2\xb8\xbf\xb5\x3d\x07\x2c"
+ "\x32\xb6\x6c\xfc\xb4\x42\x95\x95"
+ "\x98\x32\x81\xc4\xe7\xe2\xd9\x6a"
+ "\x87\xf4\xf4\x1e\x74\x7c\xb5\xcd"
+ "\x51\x45\x68\x38\x51\xdb\x30\x74"
+ "\x11\xe0\xaa\xae\x19\x8f\x15\x55"
+ "\xdd\x47\x4a\x35\xb9\x0c\xb4\x4e"
+ "\xa9\xce\x2f\xfa\x8f\xc1\x8a\x5e"
+ "\x5b\xec\xa5\x81\x3b\xb3\x43\x06"
+ "\x24\x81\xf4\x24\xe2\x21\xfa\xcb"
+ "\x49\xa8\xf8\xbd\x31\x4a\x5b\x2d"
+ "\x64\x0a\x07\xf0\x80\xc9\x0d\x81"
+ "\x14\x58\x54\x2b\xba\x22\x31\xba"
+ "\xef\x66\xc9\x49\x69\x69\x83\x0d"
+ "\xf2\xf9\x80\x9d\x30\x36\xfb\xe3"
+ "\xc0\x72\x2b\xcc\x5a\x81\x2c\x5d"
+ "\x3b\x5e\xf8\x2b\xd3\x14\x28\x73"
+ "\xf9\x1c\x70\xe6\xd8\xbb\xac\x30"
+ "\xf9\xd9\xa0\xe2\x33\x7c\x33\x34"
+ "\xa5\x6a\x77\x6d\xd5\xaf\xf4\xf3"
+ "\xc7\xb3\x0e\x83\x3d\xcb\x01\xcc"
+ "\x81\xc0\xf9\x4a\xae\x36\x92\xf7"
+ "\x69\x7b\x65\x01\xc3\xc8\xb8\xae"
+ "\x16\xd8\x30\xbb\xba\x6d\x78\x6e"
+ "\x0d\xf0\x7d\x84\xb7\x87\xda\x28"
+ "\x7a\x18\x10\x0b\x29\xec\x29\xf3"
+ "\xb0\x7b\xa1\x28\xbf\xbc\x2b\x2c"
+ "\x92\x2c\x16\xfb\x02\x39\xf9\xa6"
+ "\xa2\x15\x05\xa6\x72\x10\xbc\x62"
+ "\x4a\x6e\xb8\xb5\x5d\x59\xae\x3c"
+ "\x32\xd3\x68\xd7\x8e\x5a\xcd\x1b"
+ "\xef\xf6\xa7\x5e\x10\x51\x15\x4b"
+ "\x2c\xe3\xba\x70\x4f\x2c\xa0\x1c"
+ "\x7b\x97\xd7\xb2\xa5\x05\x17\xcc"
+ "\xf7\x3a\x29\x6f\xd5\x4b\xb8\x24"
+ "\xf4\x65\x95\x12\xc0\x86\xd1\x64"
+ "\x81\xdf\x46\x55\x0d\x22\x06\x77"
+ "\xd8\xca\x8d\xc8\x87\xc3\xfa\xb9"
+ "\xe1\x98\x94\xe6\x7b\xed\x65\x66"
+ "\x0e\xc7\x25\x15\xee\x4a\xe6\x7e"
+ "\xea\x1b\x58\xee\x96\xa0\x75\x9a"
+ "\xa3\x00\x9e\x42\xc2\x26\x20\x8c"
+ "\x3d\x22\x1f\x94\x3e\x74\x43\x72"
+ "\xe9\x1d\xa6\xa1\x6c\xa7\xb8\x03"
+ "\xdf\xb9\x7a\xaf\xe9\xe9\x3b\xfe"
+ "\xdf\x91\xc1\x01\xa8\xba\x5d\x29"
+ "\xa5\xe0\x98\x9b\x13\xe5\x13\x11"
+ "\x7c\x04\x3a\xe8\x44\x7e\x78\xfc"
+ "\xd6\x96\xa8\xbc\x7d\xc1\x89\x3d"
+ "\x75\x64\xa9\x0e\x86\x33\xfb\x73"
+ "\xf7\x15\xbc\x2c\x9a\x3f\x29\xce"
+ "\x1c\x9d\x10\x4e\x85\xe1\x77\x41"
+ "\x01\xe2\xbc\x88\xec\x81\xef\xc2"
+ "\x6a\xed\x4f\xf7\xdf\xac\x10\x71"
+ "\x94\xed\x71\xa4\x01\xd4\xd6\xbe"
+ "\xfe\x3e\xc3\x92\x6a\xf2\x2b\xb5"
+ "\xab\x15\x96\xb7\x88\x2c\xc2\xe1"
+ "\xb0\x04\x22\xe7\x3d\xa9\xc9\x7d"
+ "\x2c\x7c\x21\xff\x97\x86\x6b\x0c"
+ "\x2b\x5b\xe0\xb6\x48\x74\x8f\x24"
+ "\xef\x8e\xdd\x0f\x2a\x5f\xff\x33"
+ "\xf4\x8e\xc5\xeb\x9c\xd7\x2a\x45"
+ "\xf3\x50\xf1\xc0\x91\x8f\xc7\xf9"
+ "\x97\xc1\x3c\x9c\xf4\xed\x8a\x23"
+ "\x61\x5b\x40\x1a\x09\xee\x23\xa8"
+ "\x7c\x7a\x96\xe1\x31\x55\x3d\x12"
+ "\x04\x1f\x21\x78\x72\xf0\x0f\xa5"
+ "\x80\x58\x7c\x2f\x37\xb5\x67\x24"
+ "\x2f\xce\xf9\xf6\x86\x9f\xb3\x34"
+ "\x0c\xfe\x0a\xaf\x27\xe6\x5e\x0a"
+ "\x21\x44\x68\xe1\x5d\x84\x25\xae"
+ "\x2c\x5a\x94\x66\x9a\x3f\x0e\x5a"
+ "\xd0\x60\x2a\xd5\x3a\x4e\x2f\x40"
+ "\x87\xe9\x27\x3e\xee\x92\xe1\x07"
+ "\x22\x43\x52\xed\x67\x49\x13\xdd"
+ "\x68\xd7\x54\xc2\x76\x72\x7e\x75"
+ "\xaf\x24\x98\x5c\xe8\x22\xaa\x35"
+ "\x0f\x9a\x1c\x4c\x0b\x43\x68\x99"
+ "\x45\xdd\xbf\x82\xa5\x6f\x0a\xef"
+ "\x44\x90\x85\xe7\x57\x23\x22\x41"
+ "\x2e\xda\x24\x28\x65\x7f\x96\x85"
+ "\x9f\x4b\x0d\x43\xb9\xa8\xbd\x84"
+ "\xad\x0b\x09\xcc\x2c\x4a\x0c\xec"
+ "\x71\x58\xba\xf1\xfc\x49\x4c\xca"
+ "\x5c\x5d\xb2\x77\x0c\x99\xae\x1c"
+ "\xce\x70\x05\x5b\x73\x6b\x7c\x28"
+ "\x3b\xeb\x21\x3f\xa3\x71\xe1\x6a"
+ "\xf4\x87\xd0\xbf\x73\xaa\x0b\x0b"
+ "\xed\x70\xb3\xd4\xa3\xca\x76\x3a"
+ "\xdb\xfa\xd8\x08\x95\xec\xac\x59"
+ "\xd0\x79\x90\xc2\x33\x7b\xcc\x28"
+ "\x65\xb6\x5f\x92\xc4\xac\x23\x40"
+ "\xd1\x20\x44\x1f\xd7\x29\xab\x46"
+ "\x79\x32\xc6\x8f\x79\xe5\xaa\x2c"
+ "\xa6\x76\x70\x3a\x9e\x46\x3f\x8c"
+ "\x1a\x89\x32\x28\x61\x5c\xcf\x93"
+ "\x1e\xde\x9e\x98\xbe\x06\x30\x23"
+ "\xc4\x8b\xda\x1c\xd1\x67\x46\x93"
+ "\x9d\x41\xa2\x8c\x03\x22\xbd\x55"
+ "\x7e\x91\x51\x13\xdc\xcf\x5c\x1e"
+ "\xcb\x5d\xfb\x14\x16\x1a\x44\x56"
+ "\x27\x77\xfd\xed\x7d\xbd\xd1\x49"
+ "\x7f\x0d\xc3\x59\x48\x6b\x3c\x02"
+ "\x6b\xb5\xd0\x83\xd5\x81\x29\xe7"
+ "\xe0\xc9\x36\x23\x8d\x41\x33\x77"
+ "\xff\x5f\x54\xde\x4d\x3f\xd2\x4e"
+ "\xb6\x4d\xdd\x85\xf8\x9b\x20\x7d"
+ "\x39\x27\x68\x63\xd3\x8e\x61\x39"
+ "\xfa\xe1\xc3\x04\x74\x27\x5a\x34"
+ "\x7f\xec\x59\x2d\xc5\x6e\x54\x23"
+ "\xf5\x7b\x4b\xbe\x58\x2b\xf2\x81"
+ "\x93\x63\xcc\x13\xd9\x90\xbb\x6a"
+ "\x41\x03\x8d\x95\xeb\xbb\x5d\x06"
+ "\x38\x4c\x0e\xd6\xa9\x5b\x84\x97"
+ "\x3e\x64\x72\xe9\x96\x07\x0f\x73"
+ "\x6e\xc6\x3b\x32\xbe\xac\x13\x14"
+ "\xd0\x0a\x17\x5f\xb9\x9c\x3e\x34"
+ "\xd9\xec\xd6\x8f\x89\xbf\x1e\xd3"
+ "\xda\x80\xb2\x29\xff\x28\x96\xb3"
+ "\x46\x50\x5b\x15\x80\x97\xee\x1f"
+ "\x6c\xd8\xe8\xe0\xbd\x09\xe7\x20"
+ "\x8c\x23\x8e\xd9\xbb\x92\xfa\x82"
+ "\xaa\x0f\xb5\xf8\x78\x60\x11\xf0",
+ .ksize = 1088,
+ .plaintext = "\x0b\xb2\x31\x2d\xad\xfe\xce\xf9"
+ "\xec\x5d\x3d\x64\x5f\x3f\x75\x43"
+ "\x05\x5b\x97",
+ .psize = 19,
+ .digest = "\x5f\x02\xae\x65\x6c\x13\x21\x67"
+ "\x77\x9e\xc4\x43\x58\x68\xde\x8f",
}, {
.key = "\x65\x4d\xe3\xf8\xd2\x4c\xac\x28"
"\x68\xf5\xb3\x81\x71\x4b\xa1\xfa"
@@ -6267,8 +6337,6 @@ static const struct hash_testvec nhpoly1305_tv_template[] = {
.psize = 1024,
.digest = "\x64\x3a\xbc\xc3\x3f\x74\x40\x51"
"\x6e\x56\x01\x1a\x51\xec\x36\xde",
- .np = 8,
- .tap = { 64, 203, 267, 28, 263, 62, 54, 83 },
}, {
.key = "\x1b\x82\x2e\x1b\x17\x23\xb9\x6d"
"\xdc\x9c\xda\x99\x07\xe3\x5f\xd8"
@@ -6989,18 +7057,6 @@ static const struct cipher_testvec des_tv_template[] = {
.ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
"\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
.len = 16,
- .np = 2,
- .tap = { 8, 8 }
- }, {
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .klen = 8,
- .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
- "\xa3\x99\x7b\xca\xaf\x69\xa0\xf5",
- .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
- "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
- .len = 16,
- .np = 2,
- .tap = { 8, 8 }
}, {
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
@@ -7009,8 +7065,6 @@ static const struct cipher_testvec des_tv_template[] = {
.ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
"\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
.len = 16,
- .np = 3,
- .tap = { 3, 12, 1 }
}, { /* Four blocks -- for testing encryption with chunking */
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
@@ -7023,38 +7077,6 @@ static const struct cipher_testvec des_tv_template[] = {
"\xb4\x99\x26\xf7\x1f\xe1\xd4\x90"
"\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
.len = 32,
- .np = 3,
- .tap = { 14, 10, 8 }
- }, {
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .klen = 8,
- .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
- "\x22\x33\x44\x55\x66\x77\x88\x99"
- "\xca\xfe\xba\xbe\xfe\xed\xbe\xef",
- .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
- "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b"
- "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
- .len = 24,
- .np = 4,
- .tap = { 2, 1, 3, 18 }
- }, {
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .klen = 8,
- .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
- "\x22\x33\x44\x55\x66\x77\x88\x99",
- .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
- "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
- .len = 16,
- .np = 5,
- .tap = { 2, 2, 2, 2, 8 }
- }, {
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .klen = 8,
- .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
- .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
- .len = 8,
- .np = 8,
- .tap = { 1, 1, 1, 1, 1, 1, 1, 1 }
}, { /* Generated with Crypto++ */
.key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
.klen = 8,
@@ -7121,9 +7143,6 @@ static const struct cipher_testvec des_tv_template[] = {
"\xE1\x58\x39\x09\xB4\x8B\x40\xAC"
"\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A",
.len = 248,
- .also_non_np = 1,
- .np = 3,
- .tap = { 248 - 10, 2, 8 },
},
};
@@ -7132,6 +7151,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = {
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
.iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .iv_out = "\x46\x8e\x91\x15\x78\x88\xba\x68",
.ptext = "\x37\x36\x35\x34\x33\x32\x31\x20"
"\x4e\x6f\x77\x20\x69\x73\x20\x74"
"\x68\x65\x20\x74\x69\x6d\x65\x20",
@@ -7143,6 +7163,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = {
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
.iv = "\x12\x34\x56\x78\x90\xab\xcd\xef",
+ .iv_out = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
.ptext = "\x4e\x6f\x77\x20\x69\x73\x20\x74",
.ctext = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
.len = 8,
@@ -7150,6 +7171,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = {
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
.iv = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
+ .iv_out = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
.ptext = "\x68\x65\x20\x74\x69\x6d\x65\x20",
.ctext = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
.len = 8,
@@ -7157,30 +7179,15 @@ static const struct cipher_testvec des_cbc_tv_template[] = {
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
.iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
+ .iv_out = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
.ptext = "\x66\x6f\x72\x20\x61\x6c\x6c\x20",
.ctext = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
.len = 8,
- .np = 2,
- .tap = { 4, 4 },
- .also_non_np = 1,
- }, { /* Copy of openssl vector for chunk testing */
- /* From OpenSSL */
- .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .klen = 8,
- .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .ptext = "\x37\x36\x35\x34\x33\x32\x31\x20"
- "\x4e\x6f\x77\x20\x69\x73\x20\x74"
- "\x68\x65\x20\x74\x69\x6d\x65\x20",
- .ctext = "\xcc\xd1\x73\xff\xab\x20\x39\xf4"
- "\xac\xd8\xae\xfd\xdf\xd8\xa1\xeb"
- "\x46\x8e\x91\x15\x78\x88\xba\x68",
- .len = 24,
- .np = 2,
- .tap = { 13, 11 }
}, { /* Generated with Crypto++ */
.key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
.klen = 8,
.iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47",
+ .iv_out = "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63",
.ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
"\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
"\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
@@ -7244,9 +7251,6 @@ static const struct cipher_testvec des_cbc_tv_template[] = {
"\x82\xA9\xBD\x6A\x31\x91\x39\x11"
"\xC6\x4A\xF3\x55\xC7\x29\x2E\x63",
.len = 248,
- .also_non_np = 1,
- .np = 3,
- .tap = { 248 - 10, 2, 8 },
},
};
@@ -7255,6 +7259,7 @@ static const struct cipher_testvec des_ctr_tv_template[] = {
.key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
.klen = 8,
.iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x1C",
.ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
"\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
"\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
@@ -7318,13 +7323,11 @@ static const struct cipher_testvec des_ctr_tv_template[] = {
"\x19\x7F\x99\x19\x53\xCE\x1D\x14"
"\x69\x74\xA1\x06\x46\x0F\x4E\x75",
.len = 248,
- .also_non_np = 1,
- .np = 3,
- .tap = { 248 - 10, 2, 8 },
}, { /* Generated with Crypto++ */
.key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
.klen = 8,
.iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47",
+ .iv_out = "\xE7\x82\x1D\xB8\x53\x11\xAC\x66",
.ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
"\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
"\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
@@ -7388,9 +7391,6 @@ static const struct cipher_testvec des_ctr_tv_template[] = {
"\xA5\xA6\xE7\xB0\x51\x36\x52\x37"
"\x91\x45\x05\x3E\x58\xBF\x32",
.len = 247,
- .also_non_np = 1,
- .np = 2,
- .tap = { 247 - 8, 8 },
},
};
@@ -7549,9 +7549,6 @@ static const struct cipher_testvec des3_ede_tv_template[] = {
"\x93\x03\xD7\x51\x09\xFA\xBE\x68"
"\xD8\x45\xFF\x33\xBA\xBB\x2B\x63",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -7562,6 +7559,7 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = {
"\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
.klen = 24,
.iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+ .iv_out = "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
.ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -7602,6 +7600,7 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = {
.klen = 24,
.iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12"
"\xB7\x28\x4D\x83\x24\x59\xF2\x17",
+ .iv_out = "\x95\x63\x73\xA2\x44\xAC\xF8\xA5",
.ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
"\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
"\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
@@ -7727,9 +7726,6 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = {
"\x83\x70\xFF\x86\xE6\xAA\x0F\x1F"
"\x95\x63\x73\xA2\x44\xAC\xF8\xA5",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -7739,8 +7735,8 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = {
"\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
"\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
.klen = 24,
- .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
- "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF",
+ .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x3D",
.ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
"\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
"\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
@@ -7866,16 +7862,13 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = {
"\xFD\x51\xB0\xC6\x2C\x63\x13\x78"
"\x5C\xEE\xFC\xCF\xC4\x70\x00\x34",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
}, { /* Generated with Crypto++ */
.key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
"\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
"\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
.klen = 24,
- .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12"
- "\xB7\x28\x4D\x83\x24\x59\xF2\x17",
+ .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12",
+ .iv_out = "\xB2\xD7\x48\xED\x06\x44\xF9\x51",
.ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
"\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
"\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
@@ -8003,9 +7996,6 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = {
"\x32\x0F\x05\x2F\xF2\x4C\x95\x3B"
"\xF2\x79\xD9",
.len = 499,
- .also_non_np = 1,
- .np = 2,
- .tap = { 499 - 16, 16 },
},
};
@@ -8191,9 +8181,6 @@ static const struct cipher_testvec bf_tv_template[] = {
"\x56\xEB\x36\x77\x3D\xAA\xB8\xF5"
"\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4",
.len = 504,
- .also_non_np = 1,
- .np = 3,
- .tap = { 504 - 10, 2, 8 },
},
};
@@ -8203,6 +8190,7 @@ static const struct cipher_testvec bf_cbc_tv_template[] = {
"\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
.klen = 16,
.iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+ .iv_out = "\x59\xf1\x65\x2b\xd5\xff\x92\xcc",
.ptext = "\x37\x36\x35\x34\x33\x32\x31\x20"
"\x4e\x6f\x77\x20\x69\x73\x20\x74"
"\x68\x65\x20\x74\x69\x6d\x65\x20"
@@ -8219,6 +8207,7 @@ static const struct cipher_testvec bf_cbc_tv_template[] = {
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
+ .iv_out = "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -8346,9 +8335,6 @@ static const struct cipher_testvec bf_cbc_tv_template[] = {
"\x93\x9B\xEE\xB5\x97\x41\xD2\xA0"
"\xB4\x98\xD8\x6B\x74\xE7\x65\xF4",
.len = 504,
- .also_non_np = 1,
- .np = 3,
- .tap = { 504 - 10, 2, 8 },
},
};
@@ -8360,6 +8346,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = {
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9E",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -8494,6 +8481,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = {
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9E",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -8621,9 +8609,6 @@ static const struct cipher_testvec bf_ctr_tv_template[] = {
"\x32\x44\x96\x1C\xD8\xEB\x95\xD2"
"\xF3\x71\xEF\xEB\x4E\xBB\x4D",
.len = 503,
- .also_non_np = 1,
- .np = 2,
- .tap = { 503 - 8, 8 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -8631,6 +8616,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = {
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x3C",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -8922,9 +8908,6 @@ static const struct cipher_testvec tf_tv_template[] = {
"\x58\x33\x9B\x78\xC7\x58\x48\x6B"
"\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -8933,6 +8916,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = {
.key = zeroed_string,
.klen = 16,
.iv = zeroed_string,
+ .iv_out = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
+ "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
.ptext = zeroed_string,
.ctext = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
"\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
@@ -8942,6 +8927,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = {
.klen = 16,
.iv = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
"\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
+ .iv_out = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
+ "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
.ptext = zeroed_string,
.ctext = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
"\x86\xcb\x08\x6b\x78\x9f\x54\x19",
@@ -8951,6 +8938,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = {
.klen = 16,
.iv = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
"\x86\xcb\x08\x6b\x78\x9f\x54\x19",
+ .iv_out = "\x05\xef\x8c\x61\xa8\x11\x58\x26"
+ "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
.ptext = zeroed_string,
.ctext = "\x05\xef\x8c\x61\xa8\x11\x58\x26"
"\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
@@ -8959,6 +8948,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = {
.key = zeroed_string,
.klen = 16,
.iv = zeroed_string,
+ .iv_out = "\x05\xef\x8c\x61\xa8\x11\x58\x26"
+ "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
.ptext = zeroed_string,
.ctext = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
"\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a"
@@ -8975,6 +8966,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\x30\x70\x56\xA4\x37\xDD\x7C\xC0"
+ "\x0A\xA3\x30\x10\x26\x25\x41\x2C",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -9100,9 +9093,6 @@ static const struct cipher_testvec tf_cbc_tv_template[] = {
"\x30\x70\x56\xA4\x37\xDD\x7C\xC0"
"\x0A\xA3\x30\x10\x26\x25\x41\x2C",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -9115,6 +9105,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -9248,6 +9240,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = {
.klen = 32,
.iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x1C",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -9381,6 +9375,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\x84",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -9508,9 +9504,6 @@ static const struct cipher_testvec tf_ctr_tv_template[] = {
"\xC5\xC9\x7F\x9E\xCF\x33\x7A\xDF"
"\x6C\x82\x9D",
.len = 499,
- .also_non_np = 1,
- .np = 2,
- .tap = { 499 - 16, 16 },
},
};
@@ -9752,9 +9745,6 @@ static const struct cipher_testvec tf_lrw_tv_template[] = {
"\x80\x18\xc4\x6c\x03\xd3\xb7\xba"
"\x11\xd7\xb8\x6e\xea\xe1\x80\x30",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -10089,9 +10079,6 @@ static const struct cipher_testvec tf_xts_tv_template[] = {
"\xa4\x05\x0b\xb2\xb3\xa8\x30\x97"
"\x37\x30\xe1\x91\x8d\xb3\x2a\xff",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -10264,9 +10251,6 @@ static const struct cipher_testvec serpent_tv_template[] = {
"\x75\x55\x9B\xFF\x36\x73\xAB\x7C"
"\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -10358,6 +10342,8 @@ static const struct cipher_testvec serpent_cbc_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xFC\x66\xAA\x37\xF2\x37\x39\x6B"
+ "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -10483,9 +10469,6 @@ static const struct cipher_testvec serpent_cbc_tv_template[] = {
"\xFC\x66\xAA\x37\xF2\x37\x39\x6B"
"\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -10498,6 +10481,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -10631,6 +10616,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\x84",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -10758,9 +10745,6 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = {
"\x40\x53\x77\x8C\x15\xF8\x8D\x13"
"\x38\xE2\xE5",
.len = 499,
- .also_non_np = 1,
- .np = 2,
- .tap = { 499 - 16, 16 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -10769,6 +10753,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = {
.klen = 32,
.iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x1C",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -11135,9 +11121,6 @@ static const struct cipher_testvec serpent_lrw_tv_template[] = {
"\x5c\xc6\x84\xfe\x7c\xcb\x26\xfd"
"\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -11472,9 +11455,6 @@ static const struct cipher_testvec serpent_xts_tv_template[] = {
"\xaf\x43\x0b\xc5\x20\x41\x92\x20"
"\xd4\xa0\x91\x98\x11\x5f\x4d\xb1",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -11579,6 +11559,8 @@ static const struct cipher_testvec sm4_cbc_tv_template[] = {
"\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .iv_out = "\x4C\xB7\x01\x69\x51\x90\x92\x26"
+ "\x97\x9B\x0D\x15\xDC\x6A\x8F\x6D",
.ctext = "\x78\xEB\xB1\x1C\xC4\x0B\x0A\x48"
"\x31\x2A\xAE\xB2\x04\x02\x44\xCB"
"\x4C\xB7\x01\x69\x51\x90\x92\x26"
@@ -11594,6 +11576,8 @@ static const struct cipher_testvec sm4_cbc_tv_template[] = {
"\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .iv_out = "\x91\xf2\xc1\x47\x91\x1a\x41\x44"
+ "\x66\x5e\x1f\xa1\xd4\x0b\xae\x38",
.ctext = "\x0d\x3a\x6d\xdc\x2d\x21\xc6\x98"
"\x85\x72\x15\x58\x7b\x7b\xb5\x9a"
"\x91\xf2\xc1\x47\x91\x1a\x41\x44"
@@ -11617,6 +11601,8 @@ static const struct cipher_testvec sm4_ctr_tv_template[] = {
"\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .iv_out = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x13",
.ctext = "\xac\x32\x36\xcb\x97\x0c\xc2\x07"
"\x91\x36\x4c\x39\x5a\x13\x42\xd1"
"\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd"
@@ -11640,6 +11626,8 @@ static const struct cipher_testvec sm4_ctr_tv_template[] = {
"\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .iv_out = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x13",
.ctext = "\x5d\xcc\xcd\x25\xb9\x5a\xb0\x74"
"\x17\xa0\x85\x12\xee\x16\x0e\x2f"
"\x8f\x66\x15\x21\xcb\xba\xb4\x4c"
@@ -11814,9 +11802,6 @@ static const struct cipher_testvec cast6_tv_template[] = {
"\x84\x52\x6D\x68\xDE\xC6\x64\xB2"
"\x11\x74\x93\x57\xB4\x7E\xC6\x00",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -11829,6 +11814,8 @@ static const struct cipher_testvec cast6_cbc_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\x4D\x59\x7D\xC5\x28\x69\xFA\x92"
+ "\x22\x46\x89\x2D\x0F\x2B\x08\x24",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -11954,9 +11941,6 @@ static const struct cipher_testvec cast6_cbc_tv_template[] = {
"\x4D\x59\x7D\xC5\x28\x69\xFA\x92"
"\x22\x46\x89\x2D\x0F\x2B\x08\x24",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -11969,6 +11953,8 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\x66",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A",
@@ -11984,6 +11970,8 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -12109,9 +12097,6 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = {
"\x0E\x74\x33\x30\x62\xB9\x89\xDF"
"\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -12255,9 +12240,6 @@ static const struct cipher_testvec cast6_lrw_tv_template[] = {
"\x8D\xD9\xCD\x3B\x22\x67\x18\xC7"
"\xC4\xF5\x99\x61\xBC\xBB\x5B\x46",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -12403,9 +12385,6 @@ static const struct cipher_testvec cast6_xts_tv_template[] = {
"\xA1\xAC\xE8\xCF\xC6\x74\xCF\xDC"
"\x22\x60\x4E\xE8\xA4\x5D\x85\xB9",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -12574,9 +12553,6 @@ static const struct cipher_testvec aes_tv_template[] = {
"\x09\x79\xA0\x43\x5C\x0D\x08\x58"
"\x17\xBB\xC0\x6B\x62\x3F\x56\xE9",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -12587,19 +12563,20 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
.klen = 16,
.iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+ .iv_out = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
+ "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
.ptext = "Single block msg",
.ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
"\x27\x08\x94\x2d\xbe\x77\x18\x1a",
.len = 16,
- .also_non_np = 1,
- .np = 8,
- .tap = { 3, 2, 3, 2, 3, 1, 1, 1 },
}, {
.key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
"\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
.klen = 16,
.iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+ .iv_out = "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
+ "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1",
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -12616,6 +12593,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
.klen = 24,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .iv_out = "\x08\xb0\xe2\x79\x88\x59\x88\x81"
+ "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd",
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -12641,6 +12620,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
.klen = 32,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .iv_out = "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
+ "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -12666,6 +12647,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
.klen = 32,
.iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
"\xE2\x7D\x18\xD6\x71\x0C\xA7\x42",
+ .iv_out = "\xE0\x1F\x91\xF8\x82\x96\x2D\x65"
+ "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02",
.ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
"\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
"\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
@@ -12791,9 +12774,6 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
"\xE0\x1F\x91\xF8\x82\x96\x2D\x65"
"\xA3\xAA\x13\xCC\x50\xFF\x7B\x02",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -12870,10 +12850,32 @@ static const struct cipher_testvec aes_cfb_tv_template[] = {
"\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
"\x20\x31\x62\x3d\x55\xb1\xe4\x71",
.len = 64,
+ }, { /* > 16 bytes, not a multiple of 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8",
+ .len = 17,
+ }, { /* < 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
+ .len = 7,
},
};
-static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
+static const struct aead_testvec hmac_md5_ecb_cipher_null_tv_template[] = {
{ /* Input data from RFC 2410 Case 1 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -12887,12 +12889,12 @@ static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 8 + 16 + 0,
.iv = "",
- .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .ilen = 8,
- .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .plen = 8,
+ .ctext = "\x01\x23\x45\x67\x89\xab\xcd\xef"
"\xaa\x42\xfe\x43\x8d\xea\xa3\x5a"
"\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae",
- .rlen = 8 + 16,
+ .clen = 8 + 16,
}, { /* Input data from RFC 2410 Case 2 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -12906,58 +12908,16 @@ static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 8 + 16 + 0,
.iv = "",
- .input = "Network Security People Have A Strange Sense Of Humor",
- .ilen = 53,
- .result = "Network Security People Have A Strange Sense Of Humor"
+ .ptext = "Network Security People Have A Strange Sense Of Humor",
+ .plen = 53,
+ .ctext = "Network Security People Have A Strange Sense Of Humor"
"\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a"
"\x8e\xb5\x5f\x90\x8e\xfe\x13\x23",
- .rlen = 53 + 16,
+ .clen = 53 + 16,
},
};
-static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
- {
-#ifdef __LITTLE_ENDIAN
- .key = "\x08\x00" /* rta length */
- "\x01\x00" /* rta type */
-#else
- .key = "\x00\x08" /* rta length */
- "\x00\x01" /* rta type */
-#endif
- "\x00\x00\x00\x00" /* enc key length */
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 8 + 16 + 0,
- .iv = "",
- .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a"
- "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae",
- .ilen = 8 + 16,
- .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .rlen = 8,
- }, {
-#ifdef __LITTLE_ENDIAN
- .key = "\x08\x00" /* rta length */
- "\x01\x00" /* rta type */
-#else
- .key = "\x00\x08" /* rta length */
- "\x00\x01" /* rta type */
-#endif
- "\x00\x00\x00\x00" /* enc key length */
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 8 + 16 + 0,
- .iv = "",
- .input = "Network Security People Have A Strange Sense Of Humor"
- "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a"
- "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23",
- .ilen = 53 + 16,
- .result = "Network Security People Have A Strange Sense Of Humor",
- .rlen = 53,
- },
-};
-
-static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha1_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -12978,14 +12938,14 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
.assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
.alen = 16,
- .input = "Single block msg",
- .ilen = 16,
- .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
"\x27\x08\x94\x2d\xbe\x77\x18\x1a"
"\x1b\x13\xcb\xaf\x89\x5e\xe1\x2c"
"\x13\xc5\x2e\xa3\xcc\xed\xdc\xb5"
"\x03\x71\xa2\x06",
- .rlen = 16 + 20,
+ .clen = 16 + 20,
}, { /* RFC 3602 Case 2 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13006,19 +12966,19 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
.assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
.alen = 16,
- .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .ilen = 32,
- .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
+ .plen = 32,
+ .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
"\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
"\x75\x86\x60\x2d\x25\x3c\xff\xf9"
"\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
"\xad\x9b\x4c\x5c\x85\xe1\xda\xae"
"\xee\x81\x4e\xd7\xdb\x74\xcf\x58"
"\x65\x39\xf8\xde",
- .rlen = 32 + 20,
+ .clen = 32 + 20,
}, { /* RFC 3602 Case 3 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13039,9 +12999,9 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
.assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
"\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
.alen = 16,
- .input = "This is a 48-byte message (exactly 3 AES blocks)",
- .ilen = 48,
- .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
+ .ptext = "This is a 48-byte message (exactly 3 AES blocks)",
+ .plen = 48,
+ .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
"\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
"\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
"\x50\x69\x39\x27\x67\x72\xf8\xd5"
@@ -13050,7 +13010,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\xc2\xec\x0c\xf8\x7f\x05\xba\xca"
"\xff\xee\x4c\xd0\x93\xe6\x36\x7f"
"\x8d\x62\xf2\x1e",
- .rlen = 48 + 20,
+ .clen = 48 + 20,
}, { /* RFC 3602 Case 4 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13071,7 +13031,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
.assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
"\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
.alen = 16,
- .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
@@ -13079,8 +13039,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
- .ilen = 64,
- .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
+ .plen = 64,
+ .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
"\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
"\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
"\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
@@ -13091,7 +13051,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\x1c\x45\x57\xa9\x56\xcb\xa9\x2d"
"\x18\xac\xf1\xc7\x5d\xd1\xcd\x0d"
"\x1d\xbe\xc6\xe9",
- .rlen = 64 + 20,
+ .clen = 64 + 20,
}, { /* RFC 3602 Case 5 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13113,7 +13073,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\xe9\x6e\x8c\x08\xab\x46\x57\x63"
"\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
.alen = 24,
- .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
+ .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
"\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -13123,8 +13083,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\x30\x31\x32\x33\x34\x35\x36\x37"
"\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
- .ilen = 80,
- .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
+ .plen = 80,
+ .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
"\xa9\x45\x3e\x19\x4e\x12\x08\x49"
"\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
"\x33\x00\x13\xb4\x89\x8d\xc8\x56"
@@ -13137,7 +13097,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\x58\xc6\x84\x75\xe4\xe9\x6b\x0c"
"\xe1\xc5\x0b\x73\x4d\x82\x55\xa8"
"\x85\xe1\x59\xf7",
- .rlen = 80 + 20,
+ .clen = 80 + 20,
}, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13159,7 +13119,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
- .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
@@ -13167,8 +13127,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ilen = 64,
- .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
+ .plen = 64,
+ .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
"\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
"\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
"\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
@@ -13179,7 +13139,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\x73\xe3\x19\x3f\x8b\xc9\xc6\xf4"
"\x5a\xf1\x5b\xa8\x98\x07\xc5\x36"
"\x47\x4c\xfc\x36",
- .rlen = 64 + 20,
+ .clen = 64 + 20,
}, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13202,7 +13162,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
- .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
@@ -13210,8 +13170,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ilen = 64,
- .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
+ .plen = 64,
+ .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
"\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
"\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
"\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
@@ -13222,11 +13182,11 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
"\xa3\xe8\x9b\x17\xe3\xf4\x7f\xde"
"\x1b\x9f\xc6\x81\x26\x43\x4a\x87"
"\x51\xee\xd6\x4e",
- .rlen = 64 + 20,
+ .clen = 64 + 20,
},
};
-static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha1_ecb_cipher_null_tv_temp[] = {
{ /* Input data from RFC 2410 Case 1 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13241,13 +13201,13 @@ static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
"\x00\x00\x00\x00",
.klen = 8 + 20 + 0,
.iv = "",
- .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .ilen = 8,
- .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .plen = 8,
+ .ctext = "\x01\x23\x45\x67\x89\xab\xcd\xef"
"\x40\xc3\x0a\xa1\xc9\xa0\x28\xab"
"\x99\x5e\x19\x04\xd1\x72\xef\xb8"
"\x8c\x5e\xe4\x08",
- .rlen = 8 + 20,
+ .clen = 8 + 20,
}, { /* Input data from RFC 2410 Case 2 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13262,63 +13222,17 @@ static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
"\x00\x00\x00\x00",
.klen = 8 + 20 + 0,
.iv = "",
- .input = "Network Security People Have A Strange Sense Of Humor",
- .ilen = 53,
- .result = "Network Security People Have A Strange Sense Of Humor"
+ .ptext = "Network Security People Have A Strange Sense Of Humor",
+ .plen = 53,
+ .ctext = "Network Security People Have A Strange Sense Of Humor"
"\x75\x6f\x42\x1e\xf8\x50\x21\xd2"
"\x65\x47\xee\x8e\x1a\xef\x16\xf6"
"\x91\x56\xe4\xd6",
- .rlen = 53 + 20,
+ .clen = 53 + 20,
},
};
-static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
- {
-#ifdef __LITTLE_ENDIAN
- .key = "\x08\x00" /* rta length */
- "\x01\x00" /* rta type */
-#else
- .key = "\x00\x08" /* rta length */
- "\x00\x01" /* rta type */
-#endif
- "\x00\x00\x00\x00" /* enc key length */
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00",
- .klen = 8 + 20 + 0,
- .iv = "",
- .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
- "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab"
- "\x99\x5e\x19\x04\xd1\x72\xef\xb8"
- "\x8c\x5e\xe4\x08",
- .ilen = 8 + 20,
- .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
- .rlen = 8,
- }, {
-#ifdef __LITTLE_ENDIAN
- .key = "\x08\x00" /* rta length */
- "\x01\x00" /* rta type */
-#else
- .key = "\x00\x08" /* rta length */
- "\x00\x01" /* rta type */
-#endif
- "\x00\x00\x00\x00" /* enc key length */
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00",
- .klen = 8 + 20 + 0,
- .iv = "",
- .input = "Network Security People Have A Strange Sense Of Humor"
- "\x75\x6f\x42\x1e\xf8\x50\x21\xd2"
- "\x65\x47\xee\x8e\x1a\xef\x16\xf6"
- "\x91\x56\xe4\xd6",
- .ilen = 53 + 20,
- .result = "Network Security People Have A Strange Sense Of Humor",
- .rlen = 53,
- },
-};
-
-static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13340,15 +13254,15 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
.assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
.alen = 16,
- .input = "Single block msg",
- .ilen = 16,
- .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
"\x27\x08\x94\x2d\xbe\x77\x18\x1a"
"\xcc\xde\x2d\x6a\xae\xf1\x0b\xcc"
"\x38\x06\x38\x51\xb4\xb8\xf3\x5b"
"\x5c\x34\xa6\xa3\x6e\x0b\x05\xe5"
"\x6a\x6d\x44\xaa\x26\xa8\x44\xa5",
- .rlen = 16 + 32,
+ .clen = 16 + 32,
}, { /* RFC 3602 Case 2 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13370,12 +13284,12 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
.assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
.alen = 16,
- .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .ilen = 32,
- .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
+ .plen = 32,
+ .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
"\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
"\x75\x86\x60\x2d\x25\x3c\xff\xf9"
"\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
@@ -13383,7 +13297,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\x0e\x06\x58\x8f\xba\xf6\x06\xda"
"\x49\x69\x0d\x5b\xd4\x36\x06\x62"
"\x35\x5e\x54\x58\x53\x4d\xdf\xbf",
- .rlen = 32 + 32,
+ .clen = 32 + 32,
}, { /* RFC 3602 Case 3 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13405,9 +13319,9 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
.assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
"\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
.alen = 16,
- .input = "This is a 48-byte message (exactly 3 AES blocks)",
- .ilen = 48,
- .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
+ .ptext = "This is a 48-byte message (exactly 3 AES blocks)",
+ .plen = 48,
+ .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
"\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
"\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
"\x50\x69\x39\x27\x67\x72\xf8\xd5"
@@ -13417,7 +13331,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\xe7\xc6\xce\x10\x31\x2f\x9b\x1d"
"\x24\x78\xfb\xbe\x02\xe0\x4f\x40"
"\x10\xbd\xaa\xc6\xa7\x79\xe0\x1a",
- .rlen = 48 + 32,
+ .clen = 48 + 32,
}, { /* RFC 3602 Case 4 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13439,7 +13353,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
.assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
"\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
.alen = 16,
- .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
@@ -13447,8 +13361,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
- .ilen = 64,
- .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
+ .plen = 64,
+ .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
"\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
"\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
"\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
@@ -13460,7 +13374,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\xe0\x93\xec\xc9\x9f\xf7\xce\xd8"
"\x3f\x54\xe2\x49\x39\xe3\x71\x25"
"\x2b\x6c\xe9\x5d\xec\xec\x2b\x64",
- .rlen = 64 + 32,
+ .clen = 64 + 32,
}, { /* RFC 3602 Case 5 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13483,7 +13397,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\xe9\x6e\x8c\x08\xab\x46\x57\x63"
"\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
.alen = 24,
- .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
+ .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
"\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -13493,8 +13407,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\x30\x31\x32\x33\x34\x35\x36\x37"
"\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
- .ilen = 80,
- .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
+ .plen = 80,
+ .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
"\xa9\x45\x3e\x19\x4e\x12\x08\x49"
"\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
"\x33\x00\x13\xb4\x89\x8d\xc8\x56"
@@ -13508,7 +13422,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\x3a\xd2\xe1\x03\x86\xa5\x59\xb7"
"\x73\xc3\x46\x20\x2c\xb1\xef\x68"
"\xbb\x8a\x32\x7e\x12\x8c\x69\xcf",
- .rlen = 80 + 32,
+ .clen = 80 + 32,
}, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13531,7 +13445,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
- .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
@@ -13539,8 +13453,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ilen = 64,
- .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
+ .plen = 64,
+ .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
"\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
"\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
"\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
@@ -13552,7 +13466,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\x61\x81\x31\xea\x5b\x3d\x8e\xfb"
"\xca\x71\x85\x93\xf7\x85\x55\x8b"
"\x7a\xe4\x94\xca\x8b\xba\x19\x33",
- .rlen = 64 + 32,
+ .clen = 64 + 32,
}, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13576,7 +13490,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
- .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
@@ -13584,8 +13498,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ilen = 64,
- .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
+ .plen = 64,
+ .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
"\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
"\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
"\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
@@ -13597,11 +13511,11 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
"\x8f\x74\xbd\x17\x92\x03\xbe\x8f"
"\xf3\x61\xde\x1c\xe9\xdb\xcd\xd0"
"\xcc\xce\xe9\x85\x57\xcf\x6f\x5f",
- .rlen = 64 + 32,
+ .clen = 64 + 32,
},
};
-static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13627,9 +13541,9 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
.assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
.alen = 16,
- .input = "Single block msg",
- .ilen = 16,
- .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
"\x27\x08\x94\x2d\xbe\x77\x18\x1a"
"\x3f\xdc\xad\x90\x03\x63\x5e\x68"
"\xc3\x13\xdd\xa4\x5c\x4d\x54\xa7"
@@ -13639,7 +13553,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xe8\x9a\x7c\x06\x3d\xcb\xff\xb2"
"\xfa\x20\x89\xdd\x9c\xac\x9e\x16"
"\x18\x8a\xa0\x6d\x01\x6c\xa3\x3a",
- .rlen = 16 + 64,
+ .clen = 16 + 64,
}, { /* RFC 3602 Case 2 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13665,12 +13579,12 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
.assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
.alen = 16,
- .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .ilen = 32,
- .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
+ .plen = 32,
+ .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
"\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
"\x75\x86\x60\x2d\x25\x3c\xff\xf9"
"\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
@@ -13682,7 +13596,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\x46\x32\x7c\x41\x9c\x59\x3e\xe9"
"\x8f\x9f\xd4\x31\xd6\x22\xbd\xf8"
"\xf7\x0a\x94\xe5\xa9\xc3\xf6\x9d",
- .rlen = 32 + 64,
+ .clen = 32 + 64,
}, { /* RFC 3602 Case 3 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13708,9 +13622,9 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
.assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
"\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
.alen = 16,
- .input = "This is a 48-byte message (exactly 3 AES blocks)",
- .ilen = 48,
- .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
+ .ptext = "This is a 48-byte message (exactly 3 AES blocks)",
+ .plen = 48,
+ .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
"\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
"\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
"\x50\x69\x39\x27\x67\x72\xf8\xd5"
@@ -13724,7 +13638,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\x08\xea\x29\x6c\x74\x67\x3f\xb0"
"\xac\x7f\x5c\x1d\xf5\xee\x22\x66"
"\x27\xa6\xb6\x13\xba\xba\xf0\xc2",
- .rlen = 48 + 64,
+ .clen = 48 + 64,
}, { /* RFC 3602 Case 4 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13750,7 +13664,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
.assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
"\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
.alen = 16,
- .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
@@ -13758,8 +13672,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
- .ilen = 64,
- .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
+ .plen = 64,
+ .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
"\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
"\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
"\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
@@ -13775,7 +13689,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xbc\x6f\xed\xd5\x8d\xde\x23\x7c"
"\x62\x98\x14\xd7\x2f\x37\x8d\xdf"
"\xf4\x33\x80\xeb\x8e\xb4\xa4\xda",
- .rlen = 64 + 64,
+ .clen = 64 + 64,
}, { /* RFC 3602 Case 5 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13802,7 +13716,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xe9\x6e\x8c\x08\xab\x46\x57\x63"
"\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
.alen = 24,
- .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
+ .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
"\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -13812,8 +13726,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\x30\x31\x32\x33\x34\x35\x36\x37"
"\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
- .ilen = 80,
- .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
+ .plen = 80,
+ .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
"\xa9\x45\x3e\x19\x4e\x12\x08\x49"
"\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
"\x33\x00\x13\xb4\x89\x8d\xc8\x56"
@@ -13831,7 +13745,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\x92\x26\xc1\x76\x20\x11\xeb\xba"
"\x62\x4f\x9a\x62\x25\xc3\x75\x80"
"\xb7\x0a\x17\xf5\xd7\x94\xb4\x14",
- .rlen = 80 + 64,
+ .clen = 80 + 64,
}, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13858,7 +13772,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
- .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
@@ -13866,8 +13780,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ilen = 64,
- .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
+ .plen = 64,
+ .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
"\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
"\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
"\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
@@ -13883,7 +13797,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xba\x03\xd5\x32\xfa\x5f\x41\x58"
"\x8d\x43\x98\xa7\x94\x16\x07\x02"
"\x0f\xb6\x81\x50\x28\x95\x2e\x75",
- .rlen = 64 + 64,
+ .clen = 64 + 64,
}, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13911,7 +13825,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
- .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
@@ -13919,8 +13833,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
- .ilen = 64,
- .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
+ .plen = 64,
+ .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
"\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
"\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
"\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
@@ -13936,11 +13850,11 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
"\xe3\x8d\x64\xc3\x8d\xff\x7c\x8c"
"\xdb\xbf\xa0\xb4\x01\xa2\xa8\xa2"
"\x2c\xb1\x62\x2c\x10\xca\xf1\x21",
- .rlen = 64 + 64,
+ .clen = 64 + 64,
},
};
-static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha1_des_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -13959,7 +13873,7 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -13975,8 +13889,8 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
+ .plen = 128,
+ .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
"\x54\x31\x85\x37\xed\x6b\x01\x8d"
"\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1"
"\x41\xaa\x33\x91\xa7\x7d\x99\x88"
@@ -13995,11 +13909,11 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
"\x95\x16\x20\x09\xf5\x95\x19\xfd"
"\x3c\xc7\xe0\x42\xc0\x14\x69\xfa"
"\x5c\x44\xa9\x37",
- .rlen = 128 + 20,
+ .clen = 128 + 20,
},
};
-static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha224_des_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14018,7 +13932,7 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14034,8 +13948,8 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
+ .plen = 128,
+ .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
"\x54\x31\x85\x37\xed\x6b\x01\x8d"
"\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1"
"\x41\xaa\x33\x91\xa7\x7d\x99\x88"
@@ -14054,11 +13968,11 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
"\x9c\x2d\x7e\xee\x20\x34\x55\x0a"
"\xce\xb5\x4e\x64\x53\xe7\xbf\x91"
"\xab\xd4\xd9\xda\xc9\x12\xae\xf7",
- .rlen = 128 + 24,
+ .clen = 128 + 24,
},
};
-static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha256_des_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14078,7 +13992,7 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14094,8 +14008,8 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
+ .plen = 128,
+ .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
"\x54\x31\x85\x37\xed\x6b\x01\x8d"
"\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1"
"\x41\xaa\x33\x91\xa7\x7d\x99\x88"
@@ -14115,11 +14029,11 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
"\x50\xf6\x5d\xab\x4b\x51\x4e\x5e"
"\xde\x63\xde\x76\x52\xde\x9f\xba"
"\x90\xcf\x15\xf2\xbb\x6e\x84\x00",
- .rlen = 128 + 32,
+ .clen = 128 + 32,
},
};
-static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha384_des_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14141,7 +14055,7 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14157,8 +14071,8 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
+ .plen = 128,
+ .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
"\x54\x31\x85\x37\xed\x6b\x01\x8d"
"\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1"
"\x41\xaa\x33\x91\xa7\x7d\x99\x88"
@@ -14180,11 +14094,11 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
"\x5e\x67\xb5\x74\xe7\xe7\x85\x61"
"\x6a\x95\x26\x75\xcc\x53\x89\xf3"
"\x74\xc9\x2a\x76\x20\xa2\x64\x62",
- .rlen = 128 + 48,
+ .clen = 128 + 48,
},
};
-static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha512_des_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14208,7 +14122,7 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14224,8 +14138,8 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
+ .plen = 128,
+ .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
"\x54\x31\x85\x37\xed\x6b\x01\x8d"
"\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1"
"\x41\xaa\x33\x91\xa7\x7d\x99\x88"
@@ -14249,11 +14163,11 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
"\x97\xe2\xe3\xb8\xaa\x48\x85\xee"
"\x8c\xf6\x07\x95\x1f\xa6\x6c\x96"
"\x99\xc7\x5c\x8d\xd8\xb5\x68\x7b",
- .rlen = 128 + 64,
+ .clen = 128 + 64,
},
};
-static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha1_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14274,7 +14188,7 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14290,8 +14204,8 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
+ .plen = 128,
+ .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
@@ -14310,11 +14224,11 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
"\x67\x6d\xb1\xf5\xb8\x10\xdc\xc6"
"\x75\x86\x96\x6b\xb1\xc5\xe4\xcf"
"\xd1\x60\x91\xb3",
- .rlen = 128 + 20,
+ .clen = 128 + 20,
},
};
-static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha224_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14335,7 +14249,7 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14351,8 +14265,8 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
+ .plen = 128,
+ .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
@@ -14371,11 +14285,11 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
"\x15\x24\x7f\x5a\x45\x4a\x66\xce"
"\x2b\x0b\x93\x99\x2f\x9d\x0c\x6c"
"\x56\x1f\xe1\xa6\x41\xb2\x4c\xd0",
- .rlen = 128 + 24,
+ .clen = 128 + 24,
},
};
-static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha256_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14397,7 +14311,7 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14413,8 +14327,8 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
+ .plen = 128,
+ .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
@@ -14434,11 +14348,11 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
"\x56\x38\x44\xc0\xdb\xe3\x4f\x71"
"\xf7\xce\xd1\xd3\xf8\xbd\x3e\x4f"
"\xca\x43\x95\xdf\x80\x61\x81\xa9",
- .rlen = 128 + 32,
+ .clen = 128 + 32,
},
};
-static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha384_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14462,7 +14376,7 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14478,8 +14392,8 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
+ .plen = 128,
+ .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
@@ -14501,11 +14415,11 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
"\xa4\x32\x8a\x0b\x46\xd7\xf0\x39"
"\x36\x5d\x13\x2f\x86\x10\x78\xd6"
"\xd6\xbe\x5c\xb9\x15\x89\xf9\x1b",
- .rlen = 128 + 48,
+ .clen = 128 + 48,
},
};
-static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
+static const struct aead_testvec hmac_sha512_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
@@ -14531,7 +14445,7 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
- .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
@@ -14547,8 +14461,8 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
- .ilen = 128,
- .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
+ .plen = 128,
+ .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
@@ -14572,7 +14486,7 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
"\x2a\x74\xd4\x65\x12\xcb\x55\xf2"
"\xd5\x02\x6d\xe6\xaf\xc9\x2f\xf2"
"\x57\xaa\x85\xf7\xf3\x6a\xcb\xdb",
- .rlen = 128 + 64,
+ .clen = 128 + 64,
},
};
@@ -14836,9 +14750,6 @@ static const struct cipher_testvec aes_lrw_tv_template[] = {
"\xcd\x7e\x2b\x5d\x43\xea\x42\xe7"
"\x74\x3f\x7d\x58\x88\x75\xde\x3e",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
}
};
@@ -15174,9 +15085,6 @@ static const struct cipher_testvec aes_xts_tv_template[] = {
"\xc4\xf3\x6f\xfd\xa9\xfc\xea\x70"
"\xb9\xc6\xe6\x93\xe1\x48\xc1\x51",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
}
};
@@ -15187,6 +15095,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = {
.klen = 16,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03",
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15211,6 +15121,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = {
.klen = 24,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03",
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15236,6 +15148,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = {
.klen = 32,
.iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03",
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15261,6 +15175,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = {
.klen = 32,
.iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x1C",
.ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
"\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
"\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
@@ -15386,9 +15302,6 @@ static const struct cipher_testvec aes_ctr_tv_template[] = {
"\xFA\x3A\x05\x4C\xFA\xD1\xFF\xFE"
"\xF1\x4C\xE5\xB2\x91\x64\x0C\x51",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
}, { /* Generated with Crypto++ */
.key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
"\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
@@ -15397,6 +15310,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
"\xE2\x7D\x18\xD6\x71\x0C\xA7\x42",
+ .iv_out = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
+ "\xE2\x7D\x18\xD6\x71\x0C\xA7\x62",
.ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
"\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
"\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
@@ -15524,9 +15439,6 @@ static const struct cipher_testvec aes_ctr_tv_template[] = {
"\xD8\xFE\xC9\x5B\x5C\x25\xE5\x76"
"\xFB\xF2\x3F",
.len = 499,
- .also_non_np = 1,
- .np = 2,
- .tap = { 499 - 16, 16 },
},
};
@@ -16650,14 +16562,11 @@ static const struct cipher_testvec aes_ctr_rfc3686_tv_template[] = {
"\x4b\xef\x31\x18\xea\xac\xb1\x84"
"\x21\xed\xda\x86",
.len = 4100,
- .np = 2,
- .tap = { 4064, 36 },
},
};
static const struct cipher_testvec aes_ofb_tv_template[] = {
- /* From NIST Special Publication 800-38A, Appendix F.5 */
- {
+ { /* From NIST Special Publication 800-38A, Appendix F.5 */
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.klen = 16,
@@ -16680,33 +16589,55 @@ static const struct cipher_testvec aes_ofb_tv_template[] = {
"\x30\x4c\x65\x28\xf6\x59\xc7\x78"
"\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
.len = 64,
+ }, { /* > 16 bytes, not a multiple of 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\x77",
+ .len = 17,
+ }, { /* < 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
+ .len = 7,
}
};
-static const struct aead_testvec aes_gcm_enc_tv_template[] = {
+static const struct aead_testvec aes_gcm_tv_template[] = {
{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
.key = zeroed_string,
.klen = 16,
- .result = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
+ .ctext = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
"\x36\x7f\x1d\x57\xa4\xe7\x45\x5a",
- .rlen = 16,
+ .clen = 16,
}, {
.key = zeroed_string,
.klen = 16,
- .input = zeroed_string,
- .ilen = 16,
- .result = "\x03\x88\xda\xce\x60\xb6\xa3\x92"
+ .ptext = zeroed_string,
+ .plen = 16,
+ .ctext = "\x03\x88\xda\xce\x60\xb6\xa3\x92"
"\xf3\x28\xc2\xb9\x71\xb2\xfe\x78"
"\xab\x6e\x47\xd4\x2c\xec\x13\xbd"
"\xf5\x3a\x67\xb2\x12\x57\xbd\xdf",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08",
.klen = 16,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
- .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
@@ -16714,8 +16645,8 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = {
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
- .ilen = 64,
- .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ .plen = 64,
+ .ctext = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
"\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
"\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
"\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
@@ -16725,14 +16656,14 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = {
"\x3d\x58\xe0\x91\x47\x3f\x59\x85"
"\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
"\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08",
.klen = 16,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
- .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
@@ -16740,12 +16671,12 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = {
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39",
- .ilen = 60,
+ .plen = 60,
.assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xab\xad\xda\xd2",
.alen = 20,
- .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ .ctext = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
"\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
"\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
"\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
@@ -16755,23 +16686,23 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = {
"\x3d\x58\xe0\x91"
"\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
"\x94\xfa\xe9\x5a\xe7\x12\x1a\x47",
- .rlen = 76,
+ .clen = 76,
}, {
.key = zeroed_string,
.klen = 24,
- .result = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
+ .ctext = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
"\xa0\x0e\xd1\xf3\x12\x57\x24\x35",
- .rlen = 16,
+ .clen = 16,
}, {
.key = zeroed_string,
.klen = 24,
- .input = zeroed_string,
- .ilen = 16,
- .result = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
+ .ptext = zeroed_string,
+ .plen = 16,
+ .ctext = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
"\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
"\x2f\xf5\x8d\x80\x03\x39\x27\xab"
"\x8e\xf4\xd4\x58\x75\x14\xf0\xfb",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
@@ -16779,7 +16710,7 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = {
.klen = 24,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
- .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
@@ -16787,8 +16718,8 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = {
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
- .ilen = 64,
- .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ .plen = 64,
+ .ctext = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
"\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
"\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
"\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
@@ -16798,62 +16729,23 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = {
"\xcc\xda\x27\x10\xac\xad\xe2\x56"
"\x99\x24\xa7\xc8\x58\x73\x36\xbf"
"\xb1\x18\x02\x4d\xb8\x67\x4a\x14",
- .rlen = 80,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
- .klen = 24,
- .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
- "\xde\xca\xf8\x88",
- .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
- "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
- "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
- "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
- "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
- "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
- "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
- "\xba\x63\x7b\x39",
- .ilen = 60,
- .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
- "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
- "\xab\xad\xda\xd2",
- .alen = 20,
- .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
- "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
- "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
- "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
- "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
- "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
- "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
- "\xcc\xda\x27\x10"
- "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
- "\x37\xba\x55\xbd\x6d\x27\x61\x8c",
- .rlen = 76,
- .np = 2,
- .tap = { 32, 28 },
- .anp = 2,
- .atap = { 8, 12 }
+ .clen = 80,
}, {
.key = zeroed_string,
.klen = 32,
- .result = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
+ .ctext = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
"\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b",
- .rlen = 16,
- }
-};
-
-static const struct aead_testvec aes_gcm_dec_tv_template[] = {
- { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
+ .clen = 16,
+ }, {
.key = zeroed_string,
.klen = 32,
- .input = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
+ .ptext = zeroed_string,
+ .plen = 16,
+ .ctext = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
"\x07\x4e\xc5\xd3\xba\xf3\x9d\x18"
"\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0"
"\x26\x5b\x98\xb5\xd4\x8a\xb9\x19",
- .ilen = 32,
- .result = zeroed_string,
- .rlen = 16,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
@@ -16862,18 +16754,7 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = {
.klen = 32,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
- .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
- "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
- "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
- "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
- "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
- "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
- "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
- "\xbc\xc9\xf6\x62\x89\x80\x15\xad"
- "\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
- "\xec\x1a\x50\x22\x70\xe3\xcc\x6c",
- .ilen = 80,
- .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
@@ -16881,109 +16762,50 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = {
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
- .rlen = 64,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
- .klen = 32,
- .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
- "\xde\xca\xf8\x88",
- .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ .plen = 64,
+ .ctext = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
"\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
"\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
"\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
"\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
"\xa7\xb0\x8b\x10\x56\x82\x88\x38"
"\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
- "\xbc\xc9\xf6\x62"
- "\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
- "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b",
- .ilen = 76,
- .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
- "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
- "\xab\xad\xda\xd2",
- .alen = 20,
- .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
- "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
- "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
- "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
- "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
- "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
- "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
- "\xba\x63\x7b\x39",
- .rlen = 60,
- .np = 2,
- .tap = { 48, 28 },
- .anp = 3,
- .atap = { 8, 8, 4 }
+ "\xbc\xc9\xf6\x62\x89\x80\x15\xad"
+ "\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
+ "\xec\x1a\x50\x22\x70\xe3\xcc\x6c",
+ .clen = 80,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08",
- .klen = 16,
+ .klen = 32,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
- .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
- "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
- "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
- "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
- "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
- "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
- "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
- "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
- "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
- "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4",
- .ilen = 80,
- .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
- "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
- .rlen = 64,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
- .klen = 16,
- .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
- "\xde\xca\xf8\x88",
- .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
- "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
- "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
- "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
- "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
- "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
- "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
- "\x3d\x58\xe0\x91"
- "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
- "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47",
- .ilen = 76,
+ "\xba\x63\x7b\x39",
+ .plen = 60,
.assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xab\xad\xda\xd2",
.alen = 20,
- .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
- "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
- "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
- "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
- "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
- "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
- "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
- "\xba\x63\x7b\x39",
- .rlen = 60,
- }, {
- .key = zeroed_string,
- .klen = 24,
- .input = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
- "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
- "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
- "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb",
- .ilen = 32,
- .result = zeroed_string,
- .rlen = 16,
+ .ctext = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
+ "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
+ "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
+ "\xbc\xc9\xf6\x62"
+ "\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
+ "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b",
+ .clen = 76,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
@@ -16991,34 +16813,20 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = {
.klen = 24,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
- .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
- "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
- "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
- "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
- "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
- "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
- "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
- "\xcc\xda\x27\x10\xac\xad\xe2\x56"
- "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
- "\xb1\x18\x02\x4d\xb8\x67\x4a\x14",
- .ilen = 80,
- .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
- "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
- .rlen = 64,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
- .klen = 24,
- .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
- "\xde\xca\xf8\x88",
- .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ "\xba\x63\x7b\x39",
+ .plen = 60,
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
"\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
"\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
"\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
@@ -17028,53 +16836,40 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = {
"\xcc\xda\x27\x10"
"\x25\x19\x49\x8e\x80\xf1\x47\x8f"
"\x37\xba\x55\xbd\x6d\x27\x61\x8c",
- .ilen = 76,
- .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
- "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
- "\xab\xad\xda\xd2",
- .alen = 20,
- .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
- "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
- "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
- "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
- "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
- "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
- "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
- "\xba\x63\x7b\x39",
- .rlen = 60,
+ .clen = 76,
}
};
-static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
+static const struct aead_testvec aes_gcm_rfc4106_tv_template[] = {
{ /* Generated using Crypto++ */
.key = zeroed_string,
.klen = 20,
.iv = zeroed_string,
- .input = zeroed_string,
- .ilen = 16,
+ .ptext = zeroed_string,
+ .plen = 16,
.assoc = zeroed_string,
.alen = 16,
- .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+ .ctext = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
"\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
"\x97\xFE\x4C\x23\x37\x42\x01\xE0"
"\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
- .rlen = 32,
+ .clen = 32,
},{
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00\x00",
.klen = 20,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = zeroed_string,
- .ilen = 16,
+ .ptext = zeroed_string,
+ .plen = 16,
.assoc = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.alen = 16,
- .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+ .ctext = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
"\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
"\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
"\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
@@ -17082,57 +16877,57 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x00\x00\x00\x00",
.klen = 20,
.iv = zeroed_string,
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 16,
+ .plen = 16,
.assoc = zeroed_string,
.alen = 16,
- .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+ .ctext = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
"\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
"\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
"\xB1\x68\xFD\x14\x52\x64\x61\xB2",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00\x00",
.klen = 20,
.iv = zeroed_string,
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 16,
+ .plen = 16,
.assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.alen = 16,
- .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+ .ctext = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
"\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
"\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
"\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00\x00",
.klen = 20,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 16,
+ .plen = 16,
.assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.alen = 16,
- .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+ .ctext = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
"\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
"\x64\x50\xF9\x32\x13\xFB\x74\x61"
"\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00\x00",
.klen = 20,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
@@ -17140,11 +16935,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 64,
+ .plen = 64,
.assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.alen = 16,
- .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+ .ctext = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
"\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
"\x98\x14\xA1\x42\x37\x80\xFD\x90"
"\x68\x12\x01\xA8\x91\x89\xB9\x83"
@@ -17154,14 +16949,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
"\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
"\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x00\x00\x00\x00",
.klen = 20,
.iv = "\x00\x00\x45\x67\x89\xab\xcd\xef",
- .input = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ .ptext = "\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
@@ -17185,12 +16980,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .ilen = 192,
+ .plen = 192,
.assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
"\x89\xab\xcd\xef",
.alen = 20,
- .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+ .ctext = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
"\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
"\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
"\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
@@ -17216,14 +17011,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
"\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
"\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
- .rlen = 208,
+ .clen = 208,
}, { /* From draft-mcgrew-gcm-test-01 */
.key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
"\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
"\x2E\x44\x3B\x68",
.klen = 20,
.iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
- .input = "\x45\x00\x00\x48\x69\x9A\x00\x00"
+ .ptext = "\x45\x00\x00\x48\x69\x9A\x00\x00"
"\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
"\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
"\x38\xD3\x01\x00\x00\x01\x00\x00"
@@ -17232,12 +17027,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x69\x70\x09\x63\x79\x62\x65\x72"
"\x63\x69\x74\x79\x02\x64\x6B\x00"
"\x00\x21\x00\x01\x01\x02\x02\x01",
- .ilen = 72,
+ .plen = 72,
.assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
"\x00\x00\x00\x00\x49\x56\xED\x7E"
"\x3B\x24\x4C\xFE",
.alen = 20,
- .result = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
+ .ctext = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
"\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
"\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
"\x34\x85\x09\xFA\x13\xCE\xAC\x34"
@@ -17248,14 +17043,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x61\xBC\x17\xD7\x68\xFD\x97\x32"
"\x45\x90\x18\x14\x8F\x6C\xBE\x72"
"\x2F\xD0\x47\x96\x56\x2D\xFD\xB4",
- .rlen = 88,
+ .clen = 88,
}, {
.key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
"\x6D\x6A\x8F\x94\x67\x30\x83\x08"
"\xCA\xFE\xBA\xBE",
.klen = 20,
.iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .input = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+ .ptext = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
"\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
"\xC0\xA8\x01\x01\x0A\x98\x00\x35"
"\x00\x2A\x23\x43\xB2\xD0\x01\x00"
@@ -17263,11 +17058,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x03\x73\x69\x70\x09\x63\x79\x62"
"\x65\x72\x63\x69\x74\x79\x02\x64"
"\x6B\x00\x00\x01\x00\x01\x00\x01",
- .ilen = 64,
+ .plen = 64,
.assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
"\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
.alen = 16,
- .result = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
+ .ctext = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
"\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
"\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
"\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C"
@@ -17277,7 +17072,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1"
"\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4"
"\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
@@ -17286,18 +17081,18 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x11\x22\x33\x44",
.klen = 36,
.iv = "\x01\x02\x03\x04\x05\x06\x07\x08",
- .input = "\x45\x00\x00\x30\x69\xA6\x40\x00"
+ .ptext = "\x45\x00\x00\x30\x69\xA6\x40\x00"
"\x80\x06\x26\x90\xC0\xA8\x01\x02"
"\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
"\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
"\x70\x02\x40\x00\x20\xBF\x00\x00"
"\x02\x04\x05\xB4\x01\x01\x04\x02"
"\x01\x02\x02\x01",
- .ilen = 52,
+ .plen = 52,
.assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
"\x01\x02\x03\x04\x05\x06\x07\x08",
.alen = 16,
- .result = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
+ .ctext = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
"\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
"\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
"\x06\xEF\xAE\x9D\x65\xA5\xD7\x63"
@@ -17306,14 +17101,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE"
"\xCF\xDB\xF8\x31\x82\x4B\x4C\x49"
"\x15\x95\x6C\x96",
- .rlen = 68,
+ .clen = 68,
}, {
.key = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00",
.klen = 20,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+ .ptext = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
"\x80\x01\xCB\x7A\x40\x67\x93\x18"
"\x01\x01\x01\x01\x08\x00\x07\x5C"
"\x02\x00\x44\x00\x61\x62\x63\x64"
@@ -17321,11 +17116,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x75\x76\x77\x61\x62\x63\x64\x65"
"\x66\x67\x68\x69\x01\x02\x02\x01",
- .ilen = 64,
+ .plen = 64,
.assoc = "\x00\x00\x00\x00\x00\x00\x00\x01"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.alen = 16,
- .result = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
+ .ctext = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
"\x73\x29\x09\xC3\x31\xD5\x6D\x60"
"\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
"\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84"
@@ -17335,14 +17130,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3"
"\xF8\x21\xD4\x96\xEE\xB0\x96\xE9"
"\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E\x43",
.klen = 20,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+ .ptext = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
"\x80\x01\xCB\x7C\x40\x67\x93\x18"
"\x01\x01\x01\x01\x08\x00\x08\x5C"
"\x02\x00\x43\x00\x61\x62\x63\x64"
@@ -17350,12 +17145,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x75\x76\x77\x61\x62\x63\x64\x65"
"\x66\x67\x68\x69\x01\x02\x02\x01",
- .ilen = 64,
+ .plen = 64,
.assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .result = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
+ .ctext = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
"\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
"\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
"\x0D\x11\x38\xEC\x9C\x35\x79\x17"
@@ -17365,29 +17160,29 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x1F\x5E\x22\x73\x95\x30\x32\x0A"
"\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA"
"\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E\x43",
.klen = 20,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+ .ptext = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
"\x80\x01\x44\x1F\x40\x67\x93\xB6"
"\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
"\x01\x02\x02\x01",
- .ilen = 28,
+ .plen = 28,
.assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .result = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
+ .ctext = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
"\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
"\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
"\x0E\x13\x79\xED\x36\x9F\x07\x1F"
"\x35\xE0\x34\xBE\x95\xF1\x12\xE4"
"\xE7\xD0\x5D\x35",
- .rlen = 44,
+ .clen = 44,
}, {
.key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
"\x6D\x6A\x8F\x94\x67\x30\x83\x08"
@@ -17395,30 +17190,30 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\xCA\xFE\xBA\xBE",
.klen = 28,
.iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .input = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+ .ptext = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
"\x40\x06\x78\x80\x0A\x01\x03\x8F"
"\x0A\x01\x06\x12\x80\x23\x06\xB8"
"\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
"\x50\x10\x16\xD0\x75\x68\x00\x01",
- .ilen = 40,
+ .plen = 40,
.assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
"\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
.alen = 16,
- .result = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
+ .ctext = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
"\x0E\x59\x8B\x81\x22\xDE\x02\x42"
"\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
"\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0"
"\x31\x5B\x27\x45\x21\x44\xCC\x77"
"\x95\x45\x7B\x96\x52\x03\x7F\x53"
"\x18\x02\x7B\x5B\x4C\xD7\xA6\x36",
- .rlen = 56,
+ .clen = 56,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
"\xDE\xCA\xF8\x88",
.klen = 20,
.iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .input = "\x45\x00\x00\x49\x33\xBA\x00\x00"
+ .ptext = "\x45\x00\x00\x49\x33\xBA\x00\x00"
"\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
"\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
"\x00\x35\xDD\x7B\x80\x03\x02\xD5"
@@ -17428,12 +17223,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
"\x9B\x62\x66\xC0\x47\x22\xB1\x49"
"\x23\x01\x01\x01",
- .ilen = 76,
+ .plen = 76,
.assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
"\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
"\xCE\xFA\xCE\x74",
.alen = 20,
- .result = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
+ .ctext = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
"\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
"\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
"\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F"
@@ -17445,7 +17240,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\xE7\x84\x5D\x68\x65\x1F\x57\xE6"
"\x5F\x35\x4F\x75\xFF\x17\x01\x57"
"\x69\x62\x34\x36",
- .rlen = 92,
+ .clen = 92,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
@@ -17454,31 +17249,31 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x73\x61\x6C\x74",
.klen = 36,
.iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .input = "\x45\x08\x00\x28\x73\x2C\x00\x00"
+ .ptext = "\x45\x08\x00\x28\x73\x2C\x00\x00"
"\x40\x06\xE9\xF9\x0A\x01\x06\x12"
"\x0A\x01\x03\x8F\x06\xB8\x80\x23"
"\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
"\x50\x10\x1F\x64\x6D\x54\x00\x01",
- .ilen = 40,
+ .plen = 40,
.assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
"\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
"\x69\x76\x65\x63",
.alen = 20,
- .result = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
+ .ctext = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
"\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
"\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
"\x9D\x52\x4A\x30\x18\xD9\xA5\x7F"
"\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E"
"\x45\x16\x26\xC2\x41\x57\x71\xE3"
"\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35",
- .rlen = 56,
+ .clen = 56,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E\x43",
.klen = 20,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x49\x33\x3E\x00\x00"
+ .ptext = "\x45\x00\x00\x49\x33\x3E\x00\x00"
"\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
"\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
"\x00\x35\xCB\x45\x80\x03\x02\x5B"
@@ -17488,12 +17283,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
"\x5A\xE2\x70\xC0\x38\x99\x49\x39"
"\x15\x01\x01\x01",
- .ilen = 76,
+ .plen = 76,
.assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .result = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
+ .ctext = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
"\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
"\x3D\x77\x84\xB6\x07\x32\x3D\x22"
"\x0F\x24\xB0\xA9\x7D\x54\x18\x28"
@@ -17505,7 +17300,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\xE5\x16\x09\x75\xCD\xB6\x08\xC5"
"\x76\x91\x89\x60\x97\x63\xB8\xE1"
"\x8C\xAA\x81\xE2",
- .rlen = 92,
+ .clen = 92,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
@@ -17514,7 +17309,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x73\x61\x6C\x74",
.klen = 36,
.iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .input = "\x63\x69\x73\x63\x6F\x01\x72\x75"
+ .ptext = "\x63\x69\x73\x63\x6F\x01\x72\x75"
"\x6C\x65\x73\x01\x74\x68\x65\x01"
"\x6E\x65\x74\x77\x65\x01\x64\x65"
"\x66\x69\x6E\x65\x01\x74\x68\x65"
@@ -17523,12 +17318,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x74\x77\x69\x6C\x6C\x01\x64\x65"
"\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
"\x72\x72\x6F\x77\x01\x02\x02\x01",
- .ilen = 72,
+ .plen = 72,
.assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
"\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
"\x69\x76\x65\x63",
.alen = 20,
- .result = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
+ .ctext = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
"\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
"\x7B\x43\xF8\x26\xFB\x56\x83\x12"
"\x26\x50\x8B\xEB\xD2\xDC\xEB\x18"
@@ -17539,42 +17334,42 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x12\xA4\x93\x63\x41\x23\x64\xF8"
"\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B"
"\x11\xE2\x4F\x30\xE4\x4C\xCC\x76",
- .rlen = 88,
+ .clen = 88,
}, {
.key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
"\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
"\xD9\x66\x42\x67",
.klen = 20,
.iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
- .input = "\x01\x02\x02\x01",
- .ilen = 4,
+ .ptext = "\x01\x02\x02\x01",
+ .plen = 4,
.assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
"\x43\x45\x7E\x91\x82\x44\x3B\xC6",
.alen = 16,
- .result = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
+ .ctext = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
"\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
"\x04\xBE\xF2\x70",
- .rlen = 20,
+ .clen = 20,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
"\xDE\xCA\xF8\x88",
.klen = 20,
.iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .input = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+ .ptext = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
"\x01\x6E\x6F\x74\x01\x74\x6F\x01"
"\x62\x65\x00\x01",
- .ilen = 20,
+ .plen = 20,
.assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
"\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
"\xCE\xFA\xCE\x74",
.alen = 20,
- .result = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
+ .ctext = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
"\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
"\x43\x33\x21\x64\x41\x25\x03\x52"
"\x43\x03\xED\x3C\x6C\x5F\x28\x38"
"\x43\xAF\x8C\x3E",
- .rlen = 36,
+ .clen = 36,
}, {
.key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
"\x6D\x61\x72\x69\x6A\x75\x61\x6E"
@@ -17583,19 +17378,19 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x74\x75\x72\x6E",
.klen = 36,
.iv = "\x33\x30\x21\x69\x67\x65\x74\x6D",
- .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+ .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
"\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
"\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
"\x02\x00\x07\x00\x61\x62\x63\x64"
"\x65\x66\x67\x68\x69\x6A\x6B\x6C"
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x01\x02\x02\x01",
- .ilen = 52,
+ .plen = 52,
.assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
"\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
"\x67\x65\x74\x6D",
.alen = 20,
- .result = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
+ .ctext = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
"\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
"\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
"\x4A\x27\x4B\x39\xB4\x9C\x3A\x86"
@@ -17604,26 +17399,26 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2"
"\x94\x5F\x66\x93\x68\x66\x1A\x32"
"\x9F\xB4\xC0\x53",
- .rlen = 68,
+ .clen = 68,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E\x43",
.klen = 20,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+ .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
"\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
"\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
"\x02\x00\x07\x00\x61\x62\x63\x64"
"\x65\x66\x67\x68\x69\x6A\x6B\x6C"
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x01\x02\x02\x01",
- .ilen = 52,
+ .plen = 52,
.assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .result = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
+ .ctext = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
"\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
"\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
"\x0D\x11\x7C\xEC\x9C\x35\x79\x17"
@@ -17632,647 +17427,33 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
"\x63\x21\x93\x06\x84\xEE\xCA\xDB"
"\x56\x91\x25\x46\xE7\xA9\x5C\x97"
"\x40\xD7\xCB\x05",
- .rlen = 68,
+ .clen = 68,
}, {
.key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
"\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
"\x22\x43\x3C\x64",
.klen = 20,
.iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
- .input = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+ .ptext = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
"\x61\x62\x63\x64\x65\x66\x67\x68"
"\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
"\x71\x72\x73\x74\x01\x02\x02\x01",
- .ilen = 32,
+ .plen = 32,
.assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
"\x00\x00\x00\x07\x48\x55\xEC\x7D"
"\x3A\x23\x4B\xFD",
.alen = 20,
- .result = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
+ .ctext = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
"\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
"\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
"\xAC\xDA\x68\x94\xBC\x61\x90\x69"
"\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7"
"\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0",
- .rlen = 48,
+ .clen = 48,
}
};
-static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
- { /* Generated using Crypto++ */
- .key = zeroed_string,
- .klen = 20,
- .iv = zeroed_string,
- .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
- "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
- "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
- "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
- .ilen = 32,
- .assoc = zeroed_string,
- .alen = 16,
- .result = zeroed_string,
- .rlen = 16,
-
- },{
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00\x00",
- .klen = 20,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
- "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
- "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
- "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
- .ilen = 32,
- .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .alen = 16,
- .result = zeroed_string,
- .rlen = 16,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00\x00",
- .klen = 20,
- .iv = zeroed_string,
- .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
- "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
- "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
- "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
- .ilen = 32,
- .assoc = zeroed_string,
- .alen = 16,
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 16,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00\x00",
- .klen = 20,
- .iv = zeroed_string,
- .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
- "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
- "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
- "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
- .ilen = 32,
- .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .alen = 16,
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 16,
-
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00\x00",
- .klen = 20,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
- "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
- "\x64\x50\xF9\x32\x13\xFB\x74\x61"
- "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
- .ilen = 32,
- .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .alen = 16,
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 16,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00\x00",
- .klen = 20,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
- "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
- "\x98\x14\xA1\x42\x37\x80\xFD\x90"
- "\x68\x12\x01\xA8\x91\x89\xB9\x83"
- "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
- "\x94\x5F\x18\x12\xBA\x27\x09\x39"
- "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
- "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
- "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
- "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
- .ilen = 80,
- .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .alen = 16,
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 64,
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x00\x00\x00\x00",
- .klen = 20,
- .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef",
- .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
- "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
- "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
- "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
- "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
- "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
- "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
- "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
- "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
- "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
- "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
- "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
- "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
- "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
- "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
- "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
- "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
- "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
- "\x7E\x13\x06\x82\x08\x17\xA4\x35"
- "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
- "\xA3\x05\x38\x95\x20\x1A\x47\x04"
- "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
- "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
- "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
- "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
- "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
- .ilen = 208,
- .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
- "\x89\xab\xcd\xef",
- .alen = 20,
- .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .rlen = 192,
- }, {
- .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
- "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
- "\x2E\x44\x3B\x68",
- .klen = 20,
- .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
- .result = "\x45\x00\x00\x48\x69\x9A\x00\x00"
- "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
- "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
- "\x38\xD3\x01\x00\x00\x01\x00\x00"
- "\x00\x00\x00\x00\x04\x5F\x73\x69"
- "\x70\x04\x5F\x75\x64\x70\x03\x73"
- "\x69\x70\x09\x63\x79\x62\x65\x72"
- "\x63\x69\x74\x79\x02\x64\x6B\x00"
- "\x00\x21\x00\x01\x01\x02\x02\x01",
- .rlen = 72,
- .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
- "\x00\x00\x00\x00\x49\x56\xED\x7E"
- "\x3B\x24\x4C\xFE",
- .alen = 20,
- .input = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
- "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
- "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
- "\x34\x85\x09\xFA\x13\xCE\xAC\x34"
- "\xCF\xA2\x43\x6F\x14\xA3\xF3\xCF"
- "\x65\x92\x5B\xF1\xF4\xA1\x3C\x5D"
- "\x15\xB2\x1E\x18\x84\xF5\xFF\x62"
- "\x47\xAE\xAB\xB7\x86\xB9\x3B\xCE"
- "\x61\xBC\x17\xD7\x68\xFD\x97\x32"
- "\x45\x90\x18\x14\x8F\x6C\xBE\x72"
- "\x2F\xD0\x47\x96\x56\x2D\xFD\xB4",
- .ilen = 88,
- }, {
- .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
- "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
- "\xCA\xFE\xBA\xBE",
- .klen = 20,
- .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
- "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
- "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
- "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
- "\x00\x01\x00\x00\x00\x00\x00\x00"
- "\x03\x73\x69\x70\x09\x63\x79\x62"
- "\x65\x72\x63\x69\x74\x79\x02\x64"
- "\x6B\x00\x00\x01\x00\x01\x00\x01",
- .rlen = 64,
- .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
- "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .alen = 16,
- .input = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
- "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
- "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
- "\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C"
- "\x3D\xE8\x18\x27\xC1\x0E\x9A\x4F"
- "\x51\x33\x0D\x0E\xEC\x41\x66\x42"
- "\xCF\xBB\x85\xA5\xB4\x7E\x48\xA4"
- "\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1"
- "\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4"
- "\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A",
- .ilen = 80,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\x11\x22\x33\x44",
- .klen = 36,
- .iv = "\x01\x02\x03\x04\x05\x06\x07\x08",
- .result = "\x45\x00\x00\x30\x69\xA6\x40\x00"
- "\x80\x06\x26\x90\xC0\xA8\x01\x02"
- "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
- "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
- "\x70\x02\x40\x00\x20\xBF\x00\x00"
- "\x02\x04\x05\xB4\x01\x01\x04\x02"
- "\x01\x02\x02\x01",
- .rlen = 52,
- .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
- "\x01\x02\x03\x04\x05\x06\x07\x08",
- .alen = 16,
- .input = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
- "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
- "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
- "\x06\xEF\xAE\x9D\x65\xA5\xD7\x63"
- "\x74\x8A\x63\x79\x85\x77\x1D\x34"
- "\x7F\x05\x45\x65\x9F\x14\xE9\x9D"
- "\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE"
- "\xCF\xDB\xF8\x31\x82\x4B\x4C\x49"
- "\x15\x95\x6C\x96",
- .ilen = 68,
- }, {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00",
- .klen = 20,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
- .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
- "\x80\x01\xCB\x7A\x40\x67\x93\x18"
- "\x01\x01\x01\x01\x08\x00\x07\x5C"
- "\x02\x00\x44\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x75\x76\x77\x61\x62\x63\x64\x65"
- "\x66\x67\x68\x69\x01\x02\x02\x01",
- .rlen = 64,
- .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .alen = 16,
- .input = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
- "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
- "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
- "\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84"
- "\x45\x64\x76\x49\x27\x19\xFF\xB6"
- "\x4D\xE7\xD9\xDC\xA1\xE1\xD8\x94"
- "\xBC\x3B\xD5\x78\x73\xED\x4D\x18"
- "\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3"
- "\xF8\x21\xD4\x96\xEE\xB0\x96\xE9"
- "\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D",
- .ilen = 80,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E\x43",
- .klen = 20,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
- "\x80\x01\xCB\x7C\x40\x67\x93\x18"
- "\x01\x01\x01\x01\x08\x00\x08\x5C"
- "\x02\x00\x43\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x75\x76\x77\x61\x62\x63\x64\x65"
- "\x66\x67\x68\x69\x01\x02\x02\x01",
- .rlen = 64,
- .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .input = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
- "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
- "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
- "\x0D\x11\x38\xEC\x9C\x35\x79\x17"
- "\x65\xAC\xBD\x87\x01\xAD\x79\x84"
- "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9"
- "\x17\x55\xE6\x66\x2B\x4C\x8D\x0D"
- "\x1F\x5E\x22\x73\x95\x30\x32\x0A"
- "\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA"
- "\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48",
- .ilen = 80,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E\x43",
- .klen = 20,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
- "\x80\x01\x44\x1F\x40\x67\x93\xB6"
- "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
- "\x01\x02\x02\x01",
- .rlen = 28,
- .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .input = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
- "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
- "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
- "\x0E\x13\x79\xED\x36\x9F\x07\x1F"
- "\x35\xE0\x34\xBE\x95\xF1\x12\xE4"
- "\xE7\xD0\x5D\x35",
- .ilen = 44,
- }, {
- .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
- "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
- "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
- "\xCA\xFE\xBA\xBE",
- .klen = 28,
- .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
- "\x40\x06\x78\x80\x0A\x01\x03\x8F"
- "\x0A\x01\x06\x12\x80\x23\x06\xB8"
- "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
- "\x50\x10\x16\xD0\x75\x68\x00\x01",
- .rlen = 40,
- .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
- "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .alen = 16,
- .input = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
- "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
- "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
- "\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0"
- "\x31\x5B\x27\x45\x21\x44\xCC\x77"
- "\x95\x45\x7B\x96\x52\x03\x7F\x53"
- "\x18\x02\x7B\x5B\x4C\xD7\xA6\x36",
- .ilen = 56,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xDE\xCA\xF8\x88",
- .klen = 20,
- .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .result = "\x45\x00\x00\x49\x33\xBA\x00\x00"
- "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
- "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
- "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
- "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
- "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
- "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
- "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
- "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
- "\x23\x01\x01\x01",
- .rlen = 76,
- .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
- "\xCE\xFA\xCE\x74",
- .alen = 20,
- .input = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
- "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
- "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
- "\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F"
- "\x91\x90\xFE\xEB\xAF\x2C\xB0\x19"
- "\x84\xE6\x58\x63\x96\x5D\x74\x72"
- "\xB7\x9D\xA3\x45\xE0\xE7\x80\x19"
- "\x1F\x0D\x2F\x0E\x0F\x49\x6C\x22"
- "\x6F\x21\x27\xB2\x7D\xB3\x57\x24"
- "\xE7\x84\x5D\x68\x65\x1F\x57\xE6"
- "\x5F\x35\x4F\x75\xFF\x17\x01\x57"
- "\x69\x62\x34\x36",
- .ilen = 92,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\x73\x61\x6C\x74",
- .klen = 36,
- .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .result = "\x45\x08\x00\x28\x73\x2C\x00\x00"
- "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
- "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
- "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
- "\x50\x10\x1F\x64\x6D\x54\x00\x01",
- .rlen = 40,
- .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
- "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
- "\x69\x76\x65\x63",
- .alen = 20,
- .input = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
- "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
- "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
- "\x9D\x52\x4A\x30\x18\xD9\xA5\x7F"
- "\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E"
- "\x45\x16\x26\xC2\x41\x57\x71\xE3"
- "\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35",
- .ilen = 56,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E\x43",
- .klen = 20,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x49\x33\x3E\x00\x00"
- "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
- "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
- "\x00\x35\xCB\x45\x80\x03\x02\x5B"
- "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
- "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
- "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
- "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
- "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
- "\x15\x01\x01\x01",
- .rlen = 76,
- .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .input = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
- "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
- "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
- "\x0F\x24\xB0\xA9\x7D\x54\x18\x28"
- "\x00\xCA\xDB\x0F\x68\xD9\x9E\xF0"
- "\xE0\xC0\xC8\x9A\xE9\xBE\xA8\x88"
- "\x4E\x52\xD6\x5B\xC1\xAF\xD0\x74"
- "\x0F\x74\x24\x44\x74\x7B\x5B\x39"
- "\xAB\x53\x31\x63\xAA\xD4\x55\x0E"
- "\xE5\x16\x09\x75\xCD\xB6\x08\xC5"
- "\x76\x91\x89\x60\x97\x63\xB8\xE1"
- "\x8C\xAA\x81\xE2",
- .ilen = 92,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\x73\x61\x6C\x74",
- .klen = 36,
- .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .result = "\x63\x69\x73\x63\x6F\x01\x72\x75"
- "\x6C\x65\x73\x01\x74\x68\x65\x01"
- "\x6E\x65\x74\x77\x65\x01\x64\x65"
- "\x66\x69\x6E\x65\x01\x74\x68\x65"
- "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
- "\x67\x69\x65\x73\x01\x74\x68\x61"
- "\x74\x77\x69\x6C\x6C\x01\x64\x65"
- "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
- "\x72\x72\x6F\x77\x01\x02\x02\x01",
- .rlen = 72,
- .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
- "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
- "\x69\x76\x65\x63",
- .alen = 20,
- .input = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
- "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
- "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
- "\x26\x50\x8B\xEB\xD2\xDC\xEB\x18"
- "\xD0\xA6\xDF\x10\xE5\x48\x7D\xF0"
- "\x74\x11\x3E\x14\xC6\x41\x02\x4E"
- "\x3E\x67\x73\xD9\x1A\x62\xEE\x42"
- "\x9B\x04\x3A\x10\xE3\xEF\xE6\xB0"
- "\x12\xA4\x93\x63\x41\x23\x64\xF8"
- "\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B"
- "\x11\xE2\x4F\x30\xE4\x4C\xCC\x76",
- .ilen = 88,
- }, {
- .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
- "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
- "\xD9\x66\x42\x67",
- .klen = 20,
- .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
- .result = "\x01\x02\x02\x01",
- .rlen = 4,
- .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
- "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
- .alen = 16,
- .input = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
- "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
- "\x04\xBE\xF2\x70",
- .ilen = 20,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xDE\xCA\xF8\x88",
- .klen = 20,
- .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
- "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
- "\x62\x65\x00\x01",
- .rlen = 20,
- .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
- "\xCE\xFA\xCE\x74",
- .alen = 20,
- .input = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
- "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
- "\x43\x33\x21\x64\x41\x25\x03\x52"
- "\x43\x03\xED\x3C\x6C\x5F\x28\x38"
- "\x43\xAF\x8C\x3E",
- .ilen = 36,
- }, {
- .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
- "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
- "\x61\x61\x6E\x64\x64\x6F\x69\x74"
- "\x62\x65\x66\x6F\x72\x65\x69\x61"
- "\x74\x75\x72\x6E",
- .klen = 36,
- .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D",
- .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
- "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
- "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
- "\x02\x00\x07\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x01\x02\x02\x01",
- .rlen = 52,
- .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
- "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
- "\x67\x65\x74\x6D",
- .alen = 20,
- .input = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
- "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
- "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
- "\x4A\x27\x4B\x39\xB4\x9C\x3A\x86"
- "\x4C\xD3\xD7\x8C\xA4\xAE\x68\xA3"
- "\x2B\x42\x45\x8F\xB5\x7D\xBE\x82"
- "\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2"
- "\x94\x5F\x66\x93\x68\x66\x1A\x32"
- "\x9F\xB4\xC0\x53",
- .ilen = 68,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E\x43",
- .klen = 20,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
- "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
- "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
- "\x02\x00\x07\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x01\x02\x02\x01",
- .rlen = 52,
- .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .input = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
- "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
- "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
- "\x0D\x11\x7C\xEC\x9C\x35\x79\x17"
- "\x65\xAC\xBD\x87\x01\xAD\x79\x84"
- "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9"
- "\x63\x21\x93\x06\x84\xEE\xCA\xDB"
- "\x56\x91\x25\x46\xE7\xA9\x5C\x97"
- "\x40\xD7\xCB\x05",
- .ilen = 68,
- }, {
- .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
- "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
- "\x22\x43\x3C\x64",
- .klen = 20,
- .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
- .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
- "\x61\x62\x63\x64\x65\x66\x67\x68"
- "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
- "\x71\x72\x73\x74\x01\x02\x02\x01",
- .rlen = 32,
- .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
- "\x00\x00\x00\x07\x48\x55\xEC\x7D"
- "\x3A\x23\x4B\xFD",
- .alen = 20,
- .input = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
- "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
- "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
- "\xAC\xDA\x68\x94\xBC\x61\x90\x69"
- "\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7"
- "\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0",
- .ilen = 48,
- }
-};
-
-static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
+static const struct aead_testvec aes_gcm_rfc4543_tv_template[] = {
{ /* From draft-mcgrew-gcm-test-01 */
.key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
"\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
@@ -18282,15 +17463,15 @@ static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x07"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.alen = 16,
- .input = "\x45\x00\x00\x30\xda\x3a\x00\x00"
+ .ptext = "\x45\x00\x00\x30\xda\x3a\x00\x00"
"\x80\x01\xdf\x3b\xc0\xa8\x00\x05"
"\xc0\xa8\x00\x01\x08\x00\xc6\xcd"
"\x02\x00\x07\x00\x61\x62\x63\x64"
"\x65\x66\x67\x68\x69\x6a\x6b\x6c"
"\x6d\x6e\x6f\x70\x71\x72\x73\x74"
"\x01\x02\x02\x01",
- .ilen = 52,
- .result = "\x45\x00\x00\x30\xda\x3a\x00\x00"
+ .plen = 52,
+ .ctext = "\x45\x00\x00\x30\xda\x3a\x00\x00"
"\x80\x01\xdf\x3b\xc0\xa8\x00\x05"
"\xc0\xa8\x00\x01\x08\x00\xc6\xcd"
"\x02\x00\x07\x00\x61\x62\x63\x64"
@@ -18299,12 +17480,8 @@ static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
"\x01\x02\x02\x01\xf2\xa9\xa8\x36"
"\xe1\x55\x10\x6a\xa8\xdc\xd6\x18"
"\xe4\x09\x9a\xaa",
- .rlen = 68,
- }
-};
-
-static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
- { /* From draft-mcgrew-gcm-test-01 */
+ .clen = 68,
+ }, { /* nearly same as previous, but should fail */
.key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
"\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
"\x22\x43\x3c\x64",
@@ -18313,34 +17490,16 @@ static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x07"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.alen = 16,
- .input = "\x45\x00\x00\x30\xda\x3a\x00\x00"
- "\x80\x01\xdf\x3b\xc0\xa8\x00\x05"
- "\xc0\xa8\x00\x01\x08\x00\xc6\xcd"
- "\x02\x00\x07\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6a\x6b\x6c"
- "\x6d\x6e\x6f\x70\x71\x72\x73\x74"
- "\x01\x02\x02\x01\xf2\xa9\xa8\x36"
- "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18"
- "\xe4\x09\x9a\xaa",
- .ilen = 68,
- .result = "\x45\x00\x00\x30\xda\x3a\x00\x00"
+ .ptext = "\x45\x00\x00\x30\xda\x3a\x00\x00"
"\x80\x01\xdf\x3b\xc0\xa8\x00\x05"
"\xc0\xa8\x00\x01\x08\x00\xc6\xcd"
"\x02\x00\x07\x00\x61\x62\x63\x64"
"\x65\x66\x67\x68\x69\x6a\x6b\x6c"
"\x6d\x6e\x6f\x70\x71\x72\x73\x74"
"\x01\x02\x02\x01",
- .rlen = 52,
- }, { /* nearly same as previous, but should fail */
- .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
- "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
- "\x22\x43\x3c\x64",
- .klen = 20,
- .iv = zeroed_string,
- .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .alen = 16,
- .input = "\x45\x00\x00\x30\xda\x3a\x00\x00"
+ .plen = 52,
+ .novrfy = 1,
+ .ctext = "\x45\x00\x00\x30\xda\x3a\x00\x00"
"\x80\x01\xdf\x3b\xc0\xa8\x00\x05"
"\xc0\xa8\x00\x01\x08\x00\xc6\xcd"
"\x02\x00\x07\x00\x61\x62\x63\x64"
@@ -18349,20 +17508,11 @@ static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
"\x01\x02\x02\x01\xf2\xa9\xa8\x36"
"\xe1\x55\x10\x6a\xa8\xdc\xd6\x18"
"\x00\x00\x00\x00",
- .ilen = 68,
- .novrfy = 1,
- .result = "\x45\x00\x00\x30\xda\x3a\x00\x00"
- "\x80\x01\xdf\x3b\xc0\xa8\x00\x05"
- "\xc0\xa8\x00\x01\x08\x00\xc6\xcd"
- "\x02\x00\x07\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6a\x6b\x6c"
- "\x6d\x6e\x6f\x70\x71\x72\x73\x74"
- "\x01\x02\x02\x01",
- .rlen = 52,
+ .clen = 68,
},
};
-static const struct aead_testvec aes_ccm_enc_tv_template[] = {
+static const struct aead_testvec aes_ccm_tv_template[] = {
{ /* From RFC 3610 */
.key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
@@ -18371,15 +17521,15 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
.alen = 8,
- .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e",
- .ilen = 23,
- .result = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
+ .plen = 23,
+ .ctext = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
"\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
"\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
"\xe8\xd1\x2c\xfd\xf9\x26\xe0",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
@@ -18389,15 +17539,15 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b",
.alen = 12,
- .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
+ .ptext = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
"\x14\x15\x16\x17\x18\x19\x1a\x1b"
"\x1c\x1d\x1e\x1f",
- .ilen = 20,
- .result = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
+ .plen = 20,
+ .ctext = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
"\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
"\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
"\x7d\x9c\x2d\x93",
- .rlen = 28,
+ .clen = 28,
}, {
.key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
@@ -18406,17 +17556,17 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
.alen = 8,
- .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20",
- .ilen = 25,
- .result = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
+ .plen = 25,
+ .ctext = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
"\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
"\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
"\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
"\x7e\x5f\x4e",
- .rlen = 35,
+ .clen = 35,
}, {
.key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
@@ -18426,15 +17576,15 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b",
.alen = 12,
- .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
+ .ptext = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
"\x14\x15\x16\x17\x18\x19\x1a\x1b"
"\x1c\x1d\x1e",
- .ilen = 19,
- .result = "\x07\x34\x25\x94\x15\x77\x85\x15"
+ .plen = 19,
+ .ctext = "\x07\x34\x25\x94\x15\x77\x85\x15"
"\x2b\x07\x40\x98\x33\x0a\xbb\x14"
"\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
"\x4d\x99\x99\x88\xdd",
- .rlen = 29,
+ .clen = 29,
}, {
.key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
"\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
@@ -18443,15 +17593,15 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\x3c\x96\x96\x76\x6c\xfa\x00\x00",
.assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
.alen = 8,
- .input = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
+ .ptext = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
"\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
"\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
- .ilen = 24,
- .result = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
+ .plen = 24,
+ .ctext = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
"\xa0\x72\x6c\x55\xd3\x78\x06\x12"
"\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
"\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
"\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
@@ -18461,15 +17611,15 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
.assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
"\x20\xea\x60\xc0",
.alen = 12,
- .input = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
+ .ptext = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
"\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
"\x3a\x80\x3b\xa8\x7f",
- .ilen = 21,
- .result = "\x00\x97\x69\xec\xab\xdf\x48\x62"
+ .plen = 21,
+ .ctext = "\x00\x97\x69\xec\xab\xdf\x48\x62"
"\x55\x94\xc5\x92\x51\xe6\x03\x57"
"\x22\x67\x5e\x04\xc8\x47\x09\x9e"
"\x5a\xe0\x70\x45\x51",
- .rlen = 29,
+ .clen = 29,
}, {
.key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
"\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
@@ -18478,16 +17628,16 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\x3c\x96\x96\x76\x6c\xfa\x00\x00",
.assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
.alen = 8,
- .input = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
+ .ptext = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
"\x8e\x5e\x67\x01\xc9\x17\x87\x65"
"\x98\x09\xd6\x7d\xbe\xdd\x18",
- .ilen = 23,
- .result = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
+ .plen = 23,
+ .ctext = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
"\xdb\x38\x6a\x99\xac\x1a\xef\x23"
"\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
"\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
"\xba",
- .rlen = 33,
+ .clen = 33,
}, {
/* This is taken from FIPS CAVS. */
.key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
@@ -18495,18 +17645,18 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
.klen = 16,
.iv = "\x03\x96\xac\x59\x30\x07\xa1\xe2\xa2\xc7\x55\x24\0\0\0\0",
.alen = 0,
- .input = "\x19\xc8\x81\xf6\xe9\x86\xff\x93"
+ .ptext = "\x19\xc8\x81\xf6\xe9\x86\xff\x93"
"\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e"
"\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c"
"\x35\x2e\xad\xe0\x62\xf9\x91\xa1",
- .ilen = 32,
- .result = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8"
+ .plen = 32,
+ .ctext = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8"
"\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1"
"\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a"
"\x57\x2b\xbe\x5d\x98\xa6\xb1\x32"
"\xda\x24\xea\xd9\xa1\x39\x98\xfd"
"\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
"\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3",
@@ -18518,18 +17668,18 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
"\x0a\x63\x09\x78\xbc\x2c\x55\xde",
.alen = 32,
- .input = "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
+ .ptext = "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
"\xa9\x28\x63\xba\x12\xa3\x14\x85"
"\x57\x1e\x06\xc9\x7b\x21\xef\x76"
"\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
- .ilen = 32,
- .result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
+ .plen = 32,
+ .ctext = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
"\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
"\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
"\x3b\xb5\xce\x53\xef\xde\xbb\x02"
"\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
"\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
"\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
@@ -18542,9 +17692,9 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
"\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
.alen = 32,
- .result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
+ .ctext = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
"\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42"
"\xef\x7a\xd3\xce\xfc\x84\x60\x62"
@@ -18557,18 +17707,18 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\xf2\x88\x32\xa3\xf2\x50\xcb\x4c"
"\xe3\x00\x73\x69\x84\x69\x87\x79",
.alen = 32,
- .input = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c"
+ .ptext = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c"
"\x43\x69\x3a\x2d\x8e\x70\xad\x7e"
"\xe0\xe5\x46\x09\x80\x89\x13\xb2"
"\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b",
- .ilen = 32,
- .result = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62"
+ .plen = 32,
+ .ctext = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62"
"\x5d\x51\xc2\x16\xd8\xbd\x06\x9f"
"\x9b\x6a\x09\x70\xc1\x51\x83\xc2"
"\x66\x88\x1d\x4f\x9a\xda\xe0\x1e"
"\xc7\x79\x11\x58\xe5\x6b\x20\x40"
"\x7a\xea\x46\x42\x8b\xe4\x6f\xe1",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a"
"\xbd\x01\x99\xd5\x8a\xdf\x71\x3a"
@@ -18582,17 +17732,17 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\x6b\x75\xcb\x98\x34\x08\x7e\x79"
"\xe4\x3e\x49\x0d\x84\x8b\x22\x87",
.alen = 32,
- .input = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f"
+ .ptext = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f"
"\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66"
"\xbf\x17\x99\x62\x4a\x39\x27\x1f"
"\x1d\xdc\x24\xae\x19\x2f\x98\x4c",
- .ilen = 32,
- .result = "\x19\xb8\x61\x33\x45\x2b\x43\x96"
+ .plen = 32,
+ .ctext = "\x19\xb8\x61\x33\x45\x2b\x43\x96"
"\x6f\x51\xd0\x20\x30\x7d\x9b\xc6"
"\x26\x3d\xf8\xc9\x65\x16\xa8\x9f"
"\xf0\x62\x17\x34\xf2\x1e\x8d\x75"
"\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d",
- .rlen = 40,
+ .clen = 40,
}, {
.key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c"
"\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
@@ -18606,18 +17756,18 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
"\xb0\x39\x36\xe6\x8f\x57\xe0\x13",
.alen = 32,
- .input = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6"
+ .ptext = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6"
"\x83\x72\x07\x4f\xcf\xfa\x66\x89"
"\x5f\xca\xb1\xba\xd5\x8f\x2c\x27"
"\x30\xdb\x75\x09\x93\xd4\x65\xe4",
- .ilen = 32,
- .result = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d"
+ .plen = 32,
+ .ctext = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d"
"\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d"
"\xcc\x63\x44\x25\x07\x78\x4f\x9e"
"\x96\xb8\x88\xeb\xbc\x48\x1f\x06"
"\x39\xaf\x39\xac\xd8\x4a\x80\x39"
"\x7b\x72\x8a\xf7",
- .rlen = 44,
+ .clen = 44,
}, {
.key = "\xab\xd0\xe9\x33\x07\x26\xe5\x83"
"\x8c\x76\x95\xd4\xb6\xdc\xf3\x46"
@@ -18631,147 +17781,18 @@ static const struct aead_testvec aes_ccm_enc_tv_template[] = {
"\xab\x90\x65\x8d\x8e\xca\x4d\x4f"
"\x16\x0c\x40\x90\x4b\xc7\x36\x73",
.alen = 32,
- .input = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92"
+ .ptext = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92"
"\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d"
"\x6c\xde\xbc\xf1\x90\xea\x6a\xb2"
"\x35\x86\x36\xaf\x5c\xfe\x4b\x3a",
- .ilen = 32,
- .result = "\x83\x6f\x40\x87\x72\xcf\xc1\x13"
+ .plen = 32,
+ .ctext = "\x83\x6f\x40\x87\x72\xcf\xc1\x13"
"\xef\xbb\x80\x21\x04\x6c\x58\x09"
"\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7"
"\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf"
"\x5c\xda\xb2\x33\xe5\x13\xe2\x0d"
"\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8",
- .rlen = 48,
- }
-};
-
-static const struct aead_testvec aes_ccm_dec_tv_template[] = {
- { /* From RFC 3610 */
- .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
- .klen = 16,
- .iv = "\x01\x00\x00\x00\x03\x02\x01\x00"
- "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
- .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
- .alen = 8,
- .input = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
- "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
- "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
- "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
- .ilen = 31,
- .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e",
- .rlen = 23,
- }, {
- .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
- .klen = 16,
- .iv = "\x01\x00\x00\x00\x07\x06\x05\x04"
- "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
- .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b",
- .alen = 12,
- .input = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
- "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
- "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
- "\x7d\x9c\x2d\x93",
- .ilen = 28,
- .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
- "\x14\x15\x16\x17\x18\x19\x1a\x1b"
- "\x1c\x1d\x1e\x1f",
- .rlen = 20,
- }, {
- .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
- .klen = 16,
- .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08"
- "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
- .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
- .alen = 8,
- .input = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
- "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
- "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
- "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
- "\x7e\x5f\x4e",
- .ilen = 35,
- .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20",
- .rlen = 25,
- }, {
- .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
- .klen = 16,
- .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
- "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
- .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b",
- .alen = 12,
- .input = "\x07\x34\x25\x94\x15\x77\x85\x15"
- "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
- "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
- "\x4d\x99\x99\x88\xdd",
- .ilen = 29,
- .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
- "\x14\x15\x16\x17\x18\x19\x1a\x1b"
- "\x1c\x1d\x1e",
- .rlen = 19,
- }, {
- .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
- "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
- .klen = 16,
- .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
- "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
- .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
- .alen = 8,
- .input = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
- "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
- "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
- "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
- .ilen = 32,
- .result = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
- "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
- "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
- .rlen = 24,
- }, {
- .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
- "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
- .klen = 16,
- .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
- "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
- .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
- "\x20\xea\x60\xc0",
- .alen = 12,
- .input = "\x00\x97\x69\xec\xab\xdf\x48\x62"
- "\x55\x94\xc5\x92\x51\xe6\x03\x57"
- "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
- "\x5a\xe0\x70\x45\x51",
- .ilen = 29,
- .result = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
- "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
- "\x3a\x80\x3b\xa8\x7f",
- .rlen = 21,
- }, {
- .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
- "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
- .klen = 16,
- .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
- "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
- .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
- .alen = 8,
- .input = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
- "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
- "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
- "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
- "\xba",
- .ilen = 33,
- .result = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
- "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
- "\x98\x09\xd6\x7d\xbe\xdd\x18",
- .rlen = 23,
+ .clen = 48,
}, {
/* This is taken from FIPS CAVS. */
.key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
@@ -18780,10 +17801,10 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
.iv = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab"
"\xd8\xa6\xb2\xd8\x00\x00\x00\x00",
.alen = 0,
- .input = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b",
- .ilen = 8,
- .result = "\x00",
- .rlen = 0,
+ .ptext = "\x00",
+ .plen = 0,
+ .ctext = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b",
+ .clen = 8,
.novrfy = 1,
}, {
.key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
@@ -18792,10 +17813,10 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
.iv = "\x03\xaf\x94\x87\x78\x35\x82\x81"
"\x7f\x88\x94\x68\x00\x00\x00\x00",
.alen = 0,
- .input = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3",
- .ilen = 8,
- .result = "\x00",
- .rlen = 0,
+ .ptext = "\x00",
+ .plen = 0,
+ .ctext = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3",
+ .clen = 8,
}, {
.key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
"\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8",
@@ -18807,18 +17828,18 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
"\x04\x1f\x4e\xed\x78\xd5\x33\x66"
"\xd8\x94\x99\x91\x81\x54\x62\x57",
.alen = 32,
- .input = "\xf0\x7c\x29\x02\xae\x1c\x2f\x55"
+ .ptext = "\x50\x82\x3e\x07\xe2\x1e\xb6\xfb"
+ "\x33\xe4\x73\xce\xd2\xfb\x95\x79"
+ "\xe8\xb4\xb5\x77\x11\x10\x62\x6f"
+ "\x6a\x82\xd1\x13\xec\xf5\xd0\x48",
+ .plen = 32,
+ .ctext = "\xf0\x7c\x29\x02\xae\x1c\x2f\x55"
"\xd0\xd1\x3d\x1a\xa3\x6d\xe4\x0a"
"\x86\xb0\x87\x6b\x62\x33\x8c\x34"
"\xce\xab\x57\xcc\x79\x0b\xe0\x6f"
"\x5c\x3e\x48\x1f\x6c\x46\xf7\x51"
"\x8b\x84\x83\x2a\xc1\x05\xb8\xc5",
- .ilen = 48,
- .result = "\x50\x82\x3e\x07\xe2\x1e\xb6\xfb"
- "\x33\xe4\x73\xce\xd2\xfb\x95\x79"
- "\xe8\xb4\xb5\x77\x11\x10\x62\x6f"
- "\x6a\x82\xd1\x13\xec\xf5\xd0\x48",
- .rlen = 32,
+ .clen = 48,
.novrfy = 1,
}, {
.key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
@@ -18831,18 +17852,18 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
"\xd8\x5c\x42\x68\xe0\x6c\xda\x89"
"\x05\xac\x56\xac\x1b\x2a\xd3\x86",
.alen = 32,
- .input = "\x39\xbe\x7d\x15\x62\x77\xf3\x3c"
+ .ptext = "\x75\x05\xbe\xc2\xd9\x1e\xde\x60"
+ "\x47\x3d\x8c\x7d\xbd\xb5\xd9\xb7"
+ "\xf2\xae\x61\x05\x8f\x82\x24\x3f"
+ "\x9c\x67\x91\xe1\x38\x4f\xe4\x0c",
+ .plen = 32,
+ .ctext = "\x39\xbe\x7d\x15\x62\x77\xf3\x3c"
"\xad\x83\x52\x6d\x71\x03\x25\x1c"
"\xed\x81\x3a\x9a\x16\x7d\x19\x80"
"\x72\x04\x72\xd0\xf6\xff\x05\x0f"
"\xb7\x14\x30\x00\x32\x9e\xa0\xa6"
"\x9e\x5a\x18\xa1\xb8\xfe\xdb\xd3",
- .ilen = 48,
- .result = "\x75\x05\xbe\xc2\xd9\x1e\xde\x60"
- "\x47\x3d\x8c\x7d\xbd\xb5\xd9\xb7"
- "\xf2\xae\x61\x05\x8f\x82\x24\x3f"
- "\x9c\x67\x91\xe1\x38\x4f\xe4\x0c",
- .rlen = 32,
+ .clen = 48,
}, {
.key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
"\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
@@ -18855,10 +17876,10 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
"\xa4\xf0\x13\x05\xd1\x77\x99\x67"
"\x11\xc4\xc6\xdb\x00\x56\x36\x61",
.alen = 32,
- .input = "\x71\x99\xfa\xf4\x44\x12\x68\x9b",
- .ilen = 8,
- .result = "\x00",
- .rlen = 0,
+ .ptext = "\x00",
+ .plen = 0,
+ .ctext = "\x71\x99\xfa\xf4\x44\x12\x68\x9b",
+ .clen = 8,
}, {
.key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
"\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
@@ -18871,17 +17892,17 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
"\xa4\xf0\x13\x05\xd1\x77\x99\x67"
"\x11\xc4\xc6\xdb\x00\x56\x36\x61",
.alen = 32,
- .input = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7"
+ .ptext = "\x85\x34\x66\x42\xc8\x92\x0f\x36"
+ "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
+ "\x0a\x85\xcc\x02\xad\x7a\x96\xe9"
+ "\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
+ .plen = 32,
+ .ctext = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7"
"\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
"\x66\xca\x61\x1e\x96\x7a\x61\xb3"
"\x1c\x16\x45\x52\xba\x04\x9c\x9f"
"\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1",
- .ilen = 40,
- .result = "\x85\x34\x66\x42\xc8\x92\x0f\x36"
- "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
- "\x0a\x85\xcc\x02\xad\x7a\x96\xe9"
- "\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
- .rlen = 32,
+ .clen = 40,
}, {
.key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
"\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
@@ -18894,18 +17915,18 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
"\xff\xb6\x81\xbd\xe2\xd5\x06\xc7"
"\x3c\xa1\x52\x13\x03\x8a\x23\x3a",
.alen = 32,
- .input = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00"
+ .ptext = "\x02\x87\x4d\x28\x80\x6e\xb2\xed"
+ "\x99\x2a\xa8\xca\x04\x25\x45\x90"
+ "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c"
+ "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b",
+ .plen = 32,
+ .ctext = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00"
"\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37"
"\x8c\x26\x33\xc6\xb2\xa2\x17\xfa"
"\x64\x19\xc0\x30\xd7\xfc\x14\x6b"
"\xe3\x33\xc2\x04\xb0\x37\xbe\x3f"
"\xa9\xb4\x2d\x68\x03\xa3\x44\xef",
- .ilen = 48,
- .result = "\x02\x87\x4d\x28\x80\x6e\xb2\xed"
- "\x99\x2a\xa8\xca\x04\x25\x45\x90"
- "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c"
- "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b",
- .rlen = 32,
+ .clen = 48,
.novrfy = 1,
}, {
.key = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01"
@@ -18916,10 +17937,10 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
.iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
"\x57\xba\xfd\x9e\x00\x00\x00\x00",
.alen = 0,
- .input = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
- .ilen = 8,
- .result = "\x00",
- .rlen = 0,
+ .ptext = "\x00",
+ .plen = 0,
+ .ctext = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
+ .clen = 8,
}, {
.key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
"\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
@@ -18929,18 +17950,18 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
.iv = "\x03\x85\x34\x66\x42\xc8\x92\x0f"
"\x36\x58\xe0\x6b\x00\x00\x00\x00",
.alen = 0,
- .input = "\x48\x01\x5e\x02\x24\x04\x66\x47"
+ .ptext = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c"
+ "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99"
+ "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39"
+ "\x89\xd4\x75\x7a\x63\xb1\xda\x93",
+ .plen = 32,
+ .ctext = "\x48\x01\x5e\x02\x24\x04\x66\x47"
"\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd"
"\xa5\xa9\x87\x8d\x84\xee\x2e\x77"
"\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6"
"\x72\xc3\x8e\xf7\x70\xb1\xb2\x07"
"\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a",
- .ilen = 48,
- .result = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c"
- "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99"
- "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39"
- "\x89\xd4\x75\x7a\x63\xb1\xda\x93",
- .rlen = 32,
+ .clen = 48,
.novrfy = 1,
}, {
.key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
@@ -18955,18 +17976,18 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
"\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
"\xfb\x36\x03\x11\xb4\xd9\xf2\xfe",
.alen = 32,
- .input = "\x48\x58\xd6\xf3\xad\x63\x58\xbf"
+ .ptext = "\xc2\x54\xc8\xde\x78\x87\x77\x40"
+ "\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
+ "\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
+ "\x04\x49\x3b\x19\x93\x57\x25\x5d",
+ .plen = 32,
+ .ctext = "\x48\x58\xd6\xf3\xad\x63\x58\xbf"
"\xae\xc7\x5e\xae\x83\x8f\x7b\xe4"
"\x78\x5c\x4c\x67\x71\x89\x94\xbf"
"\x47\xf1\x63\x7e\x1c\x59\xbd\xc5"
"\x7f\x44\x0a\x0c\x01\x18\x07\x92"
"\xe1\xd3\x51\xce\x32\x6d\x0c\x5b",
- .ilen = 48,
- .result = "\xc2\x54\xc8\xde\x78\x87\x77\x40"
- "\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
- "\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
- "\x04\x49\x3b\x19\x93\x57\x25\x5d",
- .rlen = 32,
+ .clen = 48,
},
};
@@ -18978,649 +17999,36 @@ static const struct aead_testvec aes_ccm_dec_tv_template[] = {
* These vectors are copied/generated from the ones for rfc4106 with
* the key truncated by one byte..
*/
-static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
+static const struct aead_testvec aes_ccm_rfc4309_tv_template[] = {
{ /* Generated using Crypto++ */
.key = zeroed_string,
.klen = 19,
.iv = zeroed_string,
- .input = zeroed_string,
- .ilen = 16,
- .assoc = zeroed_string,
- .alen = 16,
- .result = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
- "\x12\x50\xE8\xDE\x81\x3C\x63\x08"
- "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
- "\x27\x50\x01\xAC\x03\x33\x39\xFB",
- .rlen = 32,
- },{
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00",
- .klen = 19,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = zeroed_string,
- .ilen = 16,
- .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .alen = 16,
- .result = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
- "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
- "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
- "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
- .rlen = 32,
-
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00",
- .klen = 19,
- .iv = zeroed_string,
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 16,
- .assoc = zeroed_string,
- .alen = 16,
- .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
- "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
- "\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
- "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
- .rlen = 32,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00",
- .klen = 19,
- .iv = zeroed_string,
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 16,
- .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .alen = 16,
- .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
- "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
- "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
- "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
- .rlen = 32,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00",
- .klen = 19,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 16,
- .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .alen = 16,
- .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
- "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
- "\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
- "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
- .rlen = 32,
- }, {
- .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
- "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
- "\x00\x00\x00",
- .klen = 19,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .ilen = 64,
- .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .alen = 16,
- .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
- "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
- "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
- "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
- "\x9B\xAB\x39\x18\xEB\x94\x34\x36"
- "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49"
- "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E"
- "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
- "\x02\x73\xDD\xE7\x30\x4A\x30\x54"
- "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
- .rlen = 80,
- }, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x00\x00\x00",
- .klen = 19,
- .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef",
- .input = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .ilen = 192,
- .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
- "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
- "\x89\xab\xcd\xef",
- .alen = 20,
- .result = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
- "\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
- "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
- "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
- "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB"
- "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01"
- "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04"
- "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B"
- "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09"
- "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79"
- "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D"
- "\x22\xEB\xD1\x09\x80\x3F\x5A\x70"
- "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B"
- "\x36\x12\x00\x89\xAA\x5D\x55\xDA"
- "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6"
- "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F"
- "\x2B\x50\x44\x52\xC2\x10\x7D\x38"
- "\x82\x64\x83\x0C\xAE\x49\xD0\xE5"
- "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43"
- "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC"
- "\x19\x6D\x60\x66\x61\xF9\x9A\x3F"
- "\x75\xFC\x38\x53\x5B\xB5\xCD\x52"
- "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98"
- "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
- "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
- "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
- .rlen = 208,
- }, { /* From draft-mcgrew-gcm-test-01 */
- .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
- "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
- "\x2E\x44\x3B",
- .klen = 19,
- .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
- .input = "\x45\x00\x00\x48\x69\x9A\x00\x00"
- "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
- "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
- "\x38\xD3\x01\x00\x00\x01\x00\x00"
- "\x00\x00\x00\x00\x04\x5F\x73\x69"
- "\x70\x04\x5F\x75\x64\x70\x03\x73"
- "\x69\x70\x09\x63\x79\x62\x65\x72"
- "\x63\x69\x74\x79\x02\x64\x6B\x00"
- "\x00\x21\x00\x01\x01\x02\x02\x01",
- .ilen = 72,
- .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
- "\x00\x00\x00\x00\x49\x56\xED\x7E"
- "\x3B\x24\x4C\xFE",
- .alen = 20,
- .result = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
- "\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
- "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
- "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
- "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0"
- "\x39\xF8\x53\x6D\x31\x22\x2B\xBF"
- "\x98\x81\xFC\x34\xEE\x85\x36\xCD"
- "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35"
- "\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
- "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
- "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
- .rlen = 88,
- }, {
- .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
- "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
- "\xCA\xFE\xBA",
- .klen = 19,
- .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .input = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
- "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
- "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
- "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
- "\x00\x01\x00\x00\x00\x00\x00\x00"
- "\x03\x73\x69\x70\x09\x63\x79\x62"
- "\x65\x72\x63\x69\x74\x79\x02\x64"
- "\x6B\x00\x00\x01\x00\x01\x00\x01",
- .ilen = 64,
- .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
- "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .alen = 16,
- .result = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
- "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
- "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
- "\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
- "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D"
- "\xEF\x25\x88\x1F\x1D\x77\x11\xFF"
- "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B"
- "\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
- "\x10\x72\x7E\x53\x13\x3B\x68\xE4"
- "\x30\x99\x91\x79\x09\xEA\xFF\x6A",
- .rlen = 80,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\x11\x22\x33",
- .klen = 35,
- .iv = "\x01\x02\x03\x04\x05\x06\x07\x08",
- .input = "\x45\x00\x00\x30\x69\xA6\x40\x00"
- "\x80\x06\x26\x90\xC0\xA8\x01\x02"
- "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
- "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
- "\x70\x02\x40\x00\x20\xBF\x00\x00"
- "\x02\x04\x05\xB4\x01\x01\x04\x02"
- "\x01\x02\x02\x01",
- .ilen = 52,
- .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
- "\x01\x02\x03\x04\x05\x06\x07\x08",
- .alen = 16,
- .result = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
- "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
- "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
- "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
- "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10"
- "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B"
- "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
- "\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
- "\x5A\x48\x6A\x3E",
- .rlen = 68,
- }, {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00",
- .klen = 19,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
- "\x80\x01\xCB\x7A\x40\x67\x93\x18"
- "\x01\x01\x01\x01\x08\x00\x07\x5C"
- "\x02\x00\x44\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x75\x76\x77\x61\x62\x63\x64\x65"
- "\x66\x67\x68\x69\x01\x02\x02\x01",
- .ilen = 64,
- .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .alen = 16,
- .result = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
- "\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
- "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
- "\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
- "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7"
- "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C"
- "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9"
- "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
- "\x36\x25\xC1\x10\x12\x1C\xCA\x82"
- "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
- .rlen = 80,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E",
- .klen = 19,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
- "\x80\x01\xCB\x7C\x40\x67\x93\x18"
- "\x01\x01\x01\x01\x08\x00\x08\x5C"
- "\x02\x00\x43\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x75\x76\x77\x61\x62\x63\x64\x65"
- "\x66\x67\x68\x69\x01\x02\x02\x01",
- .ilen = 64,
- .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .result = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
- "\x10\x60\x40\x62\x6B\x4F\x97\x8E"
- "\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
- "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
- "\x79\x01\x58\x50\x01\x06\xE1\xE0"
- "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
- "\x30\xB8\xE5\xDF\xD7\x12\x56\x75"
- "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
- "\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
- "\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
- .rlen = 80,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E",
- .klen = 19,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
- "\x80\x01\x44\x1F\x40\x67\x93\xB6"
- "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
- "\x01\x02\x02\x01",
- .ilen = 28,
- .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .result = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
- "\x10\x60\xCF\x01\x6B\x4F\x97\x20"
- "\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
- "\xA1\xE5\x90\x40\x05\x37\x45\x70"
- "\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
- "\x08\xB4\x22\xE4",
- .rlen = 44,
- }, {
- .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
- "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
- "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
- "\xCA\xFE\xBA",
- .klen = 27,
- .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .input = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
- "\x40\x06\x78\x80\x0A\x01\x03\x8F"
- "\x0A\x01\x06\x12\x80\x23\x06\xB8"
- "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
- "\x50\x10\x16\xD0\x75\x68\x00\x01",
- .ilen = 40,
- .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
- "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .alen = 16,
- .result = "\x05\x22\x15\xD1\x52\x56\x85\x04"
- "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
- "\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
- "\x2F\x32\x18\x57\x34\x2A\x8C\x23"
- "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
- "\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
- "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
- .rlen = 56,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xDE\xCA\xF8",
- .klen = 19,
- .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .input = "\x45\x00\x00\x49\x33\xBA\x00\x00"
- "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
- "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
- "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
- "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
- "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
- "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
- "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
- "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
- "\x23\x01\x01\x01",
- .ilen = 76,
- .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
- "\xCE\xFA\xCE\x74",
- .alen = 20,
- .result = "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
- "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
- "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
- "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
- "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA"
- "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7"
- "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6"
- "\x63\xC9\xDB\x05\x69\x51\x12\xAD"
- "\x3E\x00\x32\x73\x86\xF2\xEE\xF5"
- "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
- "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
- "\x12\x25\x0B\xF9",
- .rlen = 92,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\x73\x61\x6C",
- .klen = 35,
- .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .input = "\x45\x08\x00\x28\x73\x2C\x00\x00"
- "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
- "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
- "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
- "\x50\x10\x1F\x64\x6D\x54\x00\x01",
- .ilen = 40,
- .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
- "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
- "\x69\x76\x65\x63",
- .alen = 20,
- .result = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
- "\x2C\x64\x87\x46\x1E\x34\x10\x05"
- "\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
- "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
- "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
- "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
- "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
- .rlen = 56,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E",
- .klen = 19,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x49\x33\x3E\x00\x00"
- "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
- "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
- "\x00\x35\xCB\x45\x80\x03\x02\x5B"
- "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
- "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
- "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
- "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
- "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
- "\x15\x01\x01\x01",
- .ilen = 76,
- .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .result = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
- "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
- "\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
- "\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
- "\x1C\x67\x3E\xD8\x68\x72\x06\x94"
- "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B"
- "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C"
- "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE"
- "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5"
- "\x23\xF4\x84\x40\x74\x14\x8A\x6B"
- "\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
- "\xCC\xF7\x46\x6F",
- .rlen = 92,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\x73\x61\x6C",
- .klen = 35,
- .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .input = "\x63\x69\x73\x63\x6F\x01\x72\x75"
- "\x6C\x65\x73\x01\x74\x68\x65\x01"
- "\x6E\x65\x74\x77\x65\x01\x64\x65"
- "\x66\x69\x6E\x65\x01\x74\x68\x65"
- "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
- "\x67\x69\x65\x73\x01\x74\x68\x61"
- "\x74\x77\x69\x6C\x6C\x01\x64\x65"
- "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
- "\x72\x72\x6F\x77\x01\x02\x02\x01",
- .ilen = 72,
- .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
- "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
- "\x69\x76\x65\x63",
- .alen = 20,
- .result = "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
- "\x00\x07\x1D\xBE\x60\x5D\x73\x16"
- "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
- "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
- "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4"
- "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5"
- "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6"
- "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C"
- "\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
- "\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
- "\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
- .rlen = 88,
- }, {
- .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
- "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
- "\xD9\x66\x42",
- .klen = 19,
- .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
- .input = "\x01\x02\x02\x01",
- .ilen = 4,
- .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
- "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
- .alen = 16,
- .result = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
- "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
- "\xF7\x61\x24\x62",
- .rlen = 20,
- }, {
- .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
- "\x34\x45\x56\x67\x78\x89\x9A\xAB"
- "\xDE\xCA\xF8",
- .klen = 19,
- .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .input = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
- "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
- "\x62\x65\x00\x01",
- .ilen = 20,
- .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
- "\xCE\xFA\xCE\x74",
- .alen = 20,
- .result = "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
- "\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
- "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
- "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
- "\x17\x17\x65\xAD",
- .rlen = 36,
- }, {
- .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
- "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
- "\x61\x61\x6E\x64\x64\x6F\x69\x74"
- "\x62\x65\x66\x6F\x72\x65\x69\x61"
- "\x74\x75\x72",
- .klen = 35,
- .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D",
- .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
- "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
- "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
- "\x02\x00\x07\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x01\x02\x02\x01",
- .ilen = 52,
- .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
- "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
- "\x67\x65\x74\x6D",
- .alen = 20,
- .result = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
- "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
- "\x48\x59\x80\x18\x1A\x18\x1A\x04"
- "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
- "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF"
- "\x16\xC3\x35\xA8\xE7\xCE\x84\x04"
- "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
- "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
- "\x39\xDB\xC8\xDC",
- .rlen = 68,
- }, {
- .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
- "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
- "\x57\x69\x0E",
- .klen = 19,
- .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
- "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
- "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
- "\x02\x00\x07\x00\x61\x62\x63\x64"
- "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
- "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
- "\x01\x02\x02\x01",
- .ilen = 52,
- .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
- "\x10\x10\x10\x10\x4E\x28\x00\x00"
- "\xA2\xFC\xA1\xA3",
- .alen = 20,
- .result = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
- "\x10\x60\x54\x25\xEB\x80\x04\x93"
- "\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
- "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
- "\x79\x01\x58\x50\x01\x06\xE1\xE0"
- "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
- "\x44\xCC\x90\xBF\x00\x94\x94\x92"
- "\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
- "\xF4\x95\x5D\x4F",
- .rlen = 68,
- }, {
- .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
- "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
- "\x22\x43\x3C",
- .klen = 19,
- .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
- .input = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
- "\x61\x62\x63\x64\x65\x66\x67\x68"
- "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
- "\x71\x72\x73\x74\x01\x02\x02\x01",
- .ilen = 32,
- .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
- "\x00\x00\x00\x07\x48\x55\xEC\x7D"
- "\x3A\x23\x4B\xFD",
- .alen = 20,
- .result = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
- "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
- "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
- "\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
- "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
- "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
- .rlen = 48,
- }
-};
-
-static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
- { /* Generated using Crypto++ */
- .key = zeroed_string,
- .klen = 19,
- .iv = zeroed_string,
- .result = zeroed_string,
- .rlen = 16,
+ .ptext = zeroed_string,
+ .plen = 16,
.assoc = zeroed_string,
.alen = 16,
- .input = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
+ .ctext = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
"\x12\x50\xE8\xDE\x81\x3C\x63\x08"
"\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
"\x27\x50\x01\xAC\x03\x33\x39\xFB",
- .ilen = 32,
+ .clen = 32,
},{
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00",
.klen = 19,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .result = zeroed_string,
- .rlen = 16,
+ .ptext = zeroed_string,
+ .plen = 16,
.assoc = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.alen = 16,
- .input = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
+ .ctext = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
"\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
"\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
"\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
- .ilen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
@@ -19628,57 +18036,57 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x00\x00\x00",
.klen = 19,
.iv = zeroed_string,
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 16,
+ .plen = 16,
.assoc = zeroed_string,
.alen = 16,
- .input = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+ .ctext = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
"\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
"\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
"\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
- .ilen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00",
.klen = 19,
.iv = zeroed_string,
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 16,
+ .plen = 16,
.assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.alen = 16,
- .input = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+ .ctext = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
"\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
"\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
"\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
- .ilen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00",
.klen = 19,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 16,
+ .plen = 16,
.assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.alen = 16,
- .input = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+ .ctext = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
"\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
"\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
"\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
- .ilen = 32,
+ .clen = 32,
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\x00\x00\x00",
.klen = 19,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x01",
- .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
@@ -19686,11 +18094,11 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01"
"\x01\x01\x01\x01\x01\x01\x01\x01",
- .rlen = 64,
+ .plen = 64,
.assoc = "\x01\x01\x01\x01\x01\x01\x01\x01"
"\x00\x00\x00\x00\x00\x00\x00\x01",
.alen = 16,
- .input = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+ .ctext = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
"\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
"\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
"\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
@@ -19700,14 +18108,14 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
"\x02\x73\xDD\xE7\x30\x4A\x30\x54"
"\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
- .ilen = 80,
+ .clen = 80,
}, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x00\x00\x00",
.klen = 19,
.iv = "\x00\x00\x45\x67\x89\xab\xcd\xef",
- .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ .ptext = "\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
@@ -19731,12 +18139,12 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
- .rlen = 192,
+ .plen = 192,
.assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
"\x89\xab\xcd\xef",
.alen = 20,
- .input = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
+ .ctext = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
"\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
"\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
"\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
@@ -19762,14 +18170,14 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
"\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
"\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
- .ilen = 208,
+ .clen = 208,
}, { /* From draft-mcgrew-gcm-test-01 */
.key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
"\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
"\x2E\x44\x3B",
.klen = 19,
.iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
- .result = "\x45\x00\x00\x48\x69\x9A\x00\x00"
+ .ptext = "\x45\x00\x00\x48\x69\x9A\x00\x00"
"\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
"\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
"\x38\xD3\x01\x00\x00\x01\x00\x00"
@@ -19778,12 +18186,12 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x69\x70\x09\x63\x79\x62\x65\x72"
"\x63\x69\x74\x79\x02\x64\x6B\x00"
"\x00\x21\x00\x01\x01\x02\x02\x01",
- .rlen = 72,
+ .plen = 72,
.assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
"\x00\x00\x00\x00\x49\x56\xED\x7E"
"\x3B\x24\x4C\xFE",
.alen = 20,
- .input = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
+ .ctext = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
"\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
"\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
"\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
@@ -19794,14 +18202,14 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
"\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
"\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
- .ilen = 88,
+ .clen = 88,
}, {
.key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
"\x6D\x6A\x8F\x94\x67\x30\x83\x08"
"\xCA\xFE\xBA",
.klen = 19,
.iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+ .ptext = "\x45\x00\x00\x3E\x69\x8F\x00\x00"
"\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
"\xC0\xA8\x01\x01\x0A\x98\x00\x35"
"\x00\x2A\x23\x43\xB2\xD0\x01\x00"
@@ -19809,11 +18217,11 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x03\x73\x69\x70\x09\x63\x79\x62"
"\x65\x72\x63\x69\x74\x79\x02\x64"
"\x6B\x00\x00\x01\x00\x01\x00\x01",
- .rlen = 64,
+ .plen = 64,
.assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
"\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
.alen = 16,
- .input = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
+ .ctext = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
"\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
"\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
"\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
@@ -19823,7 +18231,7 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
"\x10\x72\x7E\x53\x13\x3B\x68\xE4"
"\x30\x99\x91\x79\x09\xEA\xFF\x6A",
- .ilen = 80,
+ .clen = 80,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
@@ -19832,18 +18240,18 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x11\x22\x33",
.klen = 35,
.iv = "\x01\x02\x03\x04\x05\x06\x07\x08",
- .result = "\x45\x00\x00\x30\x69\xA6\x40\x00"
+ .ptext = "\x45\x00\x00\x30\x69\xA6\x40\x00"
"\x80\x06\x26\x90\xC0\xA8\x01\x02"
"\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
"\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
"\x70\x02\x40\x00\x20\xBF\x00\x00"
"\x02\x04\x05\xB4\x01\x01\x04\x02"
"\x01\x02\x02\x01",
- .rlen = 52,
+ .plen = 52,
.assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
"\x01\x02\x03\x04\x05\x06\x07\x08",
.alen = 16,
- .input = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
+ .ctext = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
"\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
"\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
"\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
@@ -19852,14 +18260,14 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
"\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
"\x5A\x48\x6A\x3E",
- .ilen = 68,
+ .clen = 68,
}, {
.key = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00",
.klen = 19,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
- .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+ .ptext = "\x45\x00\x00\x3C\x99\xC5\x00\x00"
"\x80\x01\xCB\x7A\x40\x67\x93\x18"
"\x01\x01\x01\x01\x08\x00\x07\x5C"
"\x02\x00\x44\x00\x61\x62\x63\x64"
@@ -19867,11 +18275,11 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x75\x76\x77\x61\x62\x63\x64\x65"
"\x66\x67\x68\x69\x01\x02\x02\x01",
- .rlen = 64,
+ .plen = 64,
.assoc = "\x00\x00\x00\x00\x00\x00\x00\x01"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.alen = 16,
- .input = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
+ .ctext = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
"\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
"\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
"\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
@@ -19881,14 +18289,14 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
"\x36\x25\xC1\x10\x12\x1C\xCA\x82"
"\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
- .ilen = 80,
+ .clen = 80,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E",
.klen = 19,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+ .ptext = "\x45\x00\x00\x3C\x99\xC3\x00\x00"
"\x80\x01\xCB\x7C\x40\x67\x93\x18"
"\x01\x01\x01\x01\x08\x00\x08\x5C"
"\x02\x00\x43\x00\x61\x62\x63\x64"
@@ -19896,12 +18304,12 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x75\x76\x77\x61\x62\x63\x64\x65"
"\x66\x67\x68\x69\x01\x02\x02\x01",
- .rlen = 64,
+ .plen = 64,
.assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .input = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
+ .ctext = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
"\x10\x60\x40\x62\x6B\x4F\x97\x8E"
"\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
"\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
@@ -19911,29 +18319,29 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
"\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
"\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
- .ilen = 80,
+ .clen = 80,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E",
.klen = 19,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+ .ptext = "\x45\x00\x00\x1C\x42\xA2\x00\x00"
"\x80\x01\x44\x1F\x40\x67\x93\xB6"
"\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
"\x01\x02\x02\x01",
- .rlen = 28,
+ .plen = 28,
.assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .input = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
+ .ctext = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
"\x10\x60\xCF\x01\x6B\x4F\x97\x20"
"\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
"\xA1\xE5\x90\x40\x05\x37\x45\x70"
"\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
"\x08\xB4\x22\xE4",
- .ilen = 44,
+ .clen = 44,
}, {
.key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
"\x6D\x6A\x8F\x94\x67\x30\x83\x08"
@@ -19941,30 +18349,30 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\xCA\xFE\xBA",
.klen = 27,
.iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
- .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+ .ptext = "\x45\x00\x00\x28\xA4\xAD\x40\x00"
"\x40\x06\x78\x80\x0A\x01\x03\x8F"
"\x0A\x01\x06\x12\x80\x23\x06\xB8"
"\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
"\x50\x10\x16\xD0\x75\x68\x00\x01",
- .rlen = 40,
+ .plen = 40,
.assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
"\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
.alen = 16,
- .input = "\x05\x22\x15\xD1\x52\x56\x85\x04"
+ .ctext = "\x05\x22\x15\xD1\x52\x56\x85\x04"
"\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
"\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
"\x2F\x32\x18\x57\x34\x2A\x8C\x23"
"\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
"\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
"\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
- .ilen = 56,
+ .clen = 56,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
"\xDE\xCA\xF8",
.klen = 19,
.iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .result = "\x45\x00\x00\x49\x33\xBA\x00\x00"
+ .ptext = "\x45\x00\x00\x49\x33\xBA\x00\x00"
"\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
"\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
"\x00\x35\xDD\x7B\x80\x03\x02\xD5"
@@ -19974,12 +18382,12 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
"\x9B\x62\x66\xC0\x47\x22\xB1\x49"
"\x23\x01\x01\x01",
- .rlen = 76,
+ .plen = 76,
.assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
"\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
"\xCE\xFA\xCE\x74",
.alen = 20,
- .input = "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
+ .ctext = "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
"\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
"\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
"\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
@@ -19991,7 +18399,7 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
"\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
"\x12\x25\x0B\xF9",
- .ilen = 92,
+ .clen = 92,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
@@ -20000,31 +18408,31 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x73\x61\x6C",
.klen = 35,
.iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .result = "\x45\x08\x00\x28\x73\x2C\x00\x00"
+ .ptext = "\x45\x08\x00\x28\x73\x2C\x00\x00"
"\x40\x06\xE9\xF9\x0A\x01\x06\x12"
"\x0A\x01\x03\x8F\x06\xB8\x80\x23"
"\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
"\x50\x10\x1F\x64\x6D\x54\x00\x01",
- .rlen = 40,
+ .plen = 40,
.assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
"\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
"\x69\x76\x65\x63",
.alen = 20,
- .input = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
+ .ctext = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
"\x2C\x64\x87\x46\x1E\x34\x10\x05"
"\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
"\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
"\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
"\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
"\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
- .ilen = 56,
+ .clen = 56,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E",
.klen = 19,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x49\x33\x3E\x00\x00"
+ .ptext = "\x45\x00\x00\x49\x33\x3E\x00\x00"
"\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
"\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
"\x00\x35\xCB\x45\x80\x03\x02\x5B"
@@ -20034,12 +18442,12 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
"\x5A\xE2\x70\xC0\x38\x99\x49\x39"
"\x15\x01\x01\x01",
- .rlen = 76,
+ .plen = 76,
.assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .input = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
+ .ctext = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
"\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
"\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
"\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
@@ -20051,7 +18459,7 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x23\xF4\x84\x40\x74\x14\x8A\x6B"
"\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
"\xCC\xF7\x46\x6F",
- .ilen = 92,
+ .clen = 92,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
@@ -20060,7 +18468,7 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x73\x61\x6C",
.klen = 35,
.iv = "\x61\x6E\x64\x01\x69\x76\x65\x63",
- .result = "\x63\x69\x73\x63\x6F\x01\x72\x75"
+ .ptext = "\x63\x69\x73\x63\x6F\x01\x72\x75"
"\x6C\x65\x73\x01\x74\x68\x65\x01"
"\x6E\x65\x74\x77\x65\x01\x64\x65"
"\x66\x69\x6E\x65\x01\x74\x68\x65"
@@ -20069,12 +18477,12 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x74\x77\x69\x6C\x6C\x01\x64\x65"
"\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
"\x72\x72\x6F\x77\x01\x02\x02\x01",
- .rlen = 72,
+ .plen = 72,
.assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26"
"\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
"\x69\x76\x65\x63",
.alen = 20,
- .input = "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
+ .ctext = "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
"\x00\x07\x1D\xBE\x60\x5D\x73\x16"
"\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
"\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
@@ -20085,42 +18493,42 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
"\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
"\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
- .ilen = 88,
+ .clen = 88,
}, {
.key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
"\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
"\xD9\x66\x42",
.klen = 19,
.iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
- .result = "\x01\x02\x02\x01",
- .rlen = 4,
+ .ptext = "\x01\x02\x02\x01",
+ .plen = 4,
.assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
"\x43\x45\x7E\x91\x82\x44\x3B\xC6",
.alen = 16,
- .input = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
+ .ctext = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
"\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
"\xF7\x61\x24\x62",
- .ilen = 20,
+ .clen = 20,
}, {
.key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
"\x34\x45\x56\x67\x78\x89\x9A\xAB"
"\xDE\xCA\xF8",
.klen = 19,
.iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
- .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+ .ptext = "\x74\x6F\x01\x62\x65\x01\x6F\x72"
"\x01\x6E\x6F\x74\x01\x74\x6F\x01"
"\x62\x65\x00\x01",
- .rlen = 20,
+ .plen = 20,
.assoc = "\x00\x00\x01\x00\x00\x00\x00\x00"
"\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
"\xCE\xFA\xCE\x74",
.alen = 20,
- .input = "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
+ .ctext = "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
"\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
"\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
"\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
"\x17\x17\x65\xAD",
- .ilen = 36,
+ .clen = 36,
}, {
.key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
"\x6D\x61\x72\x69\x6A\x75\x61\x6E"
@@ -20129,19 +18537,19 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x74\x75\x72",
.klen = 35,
.iv = "\x33\x30\x21\x69\x67\x65\x74\x6D",
- .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+ .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
"\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
"\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
"\x02\x00\x07\x00\x61\x62\x63\x64"
"\x65\x66\x67\x68\x69\x6A\x6B\x6C"
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x01\x02\x02\x01",
- .rlen = 52,
+ .plen = 52,
.assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
"\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
"\x67\x65\x74\x6D",
.alen = 20,
- .input = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
+ .ctext = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
"\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
"\x48\x59\x80\x18\x1A\x18\x1A\x04"
"\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
@@ -20150,26 +18558,26 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
"\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
"\x39\xDB\xC8\xDC",
- .ilen = 68,
+ .clen = 68,
}, {
.key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
"\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
"\x57\x69\x0E",
.klen = 19,
.iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
- .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+ .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00"
"\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
"\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
"\x02\x00\x07\x00\x61\x62\x63\x64"
"\x65\x66\x67\x68\x69\x6A\x6B\x6C"
"\x6D\x6E\x6F\x70\x71\x72\x73\x74"
"\x01\x02\x02\x01",
- .rlen = 52,
+ .plen = 52,
.assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
"\x10\x10\x10\x10\x4E\x28\x00\x00"
"\xA2\xFC\xA1\xA3",
.alen = 20,
- .input = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
+ .ctext = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
"\x10\x60\x54\x25\xEB\x80\x04\x93"
"\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
"\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
@@ -20178,36 +18586,36 @@ static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
"\x44\xCC\x90\xBF\x00\x94\x94\x92"
"\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
"\xF4\x95\x5D\x4F",
- .ilen = 68,
+ .clen = 68,
}, {
.key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
"\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
"\x22\x43\x3C",
.klen = 19,
.iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
- .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+ .ptext = "\x08\x00\xC6\xCD\x02\x00\x07\x00"
"\x61\x62\x63\x64\x65\x66\x67\x68"
"\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
"\x71\x72\x73\x74\x01\x02\x02\x01",
- .rlen = 32,
+ .plen = 32,
.assoc = "\x00\x00\x43\x21\x87\x65\x43\x21"
"\x00\x00\x00\x07\x48\x55\xEC\x7D"
"\x3A\x23\x4B\xFD",
.alen = 20,
- .input = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
+ .ctext = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
"\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
"\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
"\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
"\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
"\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
- .ilen = 48,
+ .clen = 48,
}
};
/*
* ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
*/
-static const struct aead_testvec rfc7539_enc_tv_template[] = {
+static const struct aead_testvec rfc7539_tv_template[] = {
{
.key = "\x80\x81\x82\x83\x84\x85\x86\x87"
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
@@ -20219,7 +18627,7 @@ static const struct aead_testvec rfc7539_enc_tv_template[] = {
.assoc = "\x50\x51\x52\x53\xc0\xc1\xc2\xc3"
"\xc4\xc5\xc6\xc7",
.alen = 12,
- .input = "\x4c\x61\x64\x69\x65\x73\x20\x61"
+ .ptext = "\x4c\x61\x64\x69\x65\x73\x20\x61"
"\x6e\x64\x20\x47\x65\x6e\x74\x6c"
"\x65\x6d\x65\x6e\x20\x6f\x66\x20"
"\x74\x68\x65\x20\x63\x6c\x61\x73"
@@ -20234,8 +18642,8 @@ static const struct aead_testvec rfc7539_enc_tv_template[] = {
"\x63\x72\x65\x65\x6e\x20\x77\x6f"
"\x75\x6c\x64\x20\x62\x65\x20\x69"
"\x74\x2e",
- .ilen = 114,
- .result = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb"
+ .plen = 114,
+ .ctext = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb"
"\x7b\x86\xaf\xbc\x53\xef\x7e\xc2"
"\xa4\xad\xed\x51\x29\x6e\x08\xfe"
"\xa9\xe2\xb5\xa7\x36\xee\x62\xd6"
@@ -20252,7 +18660,7 @@ static const struct aead_testvec rfc7539_enc_tv_template[] = {
"\x61\x16\x1a\xe1\x0b\x59\x4f\x09"
"\xe2\x6a\x7e\x90\x2e\xcb\xd0\x60"
"\x06\x91",
- .rlen = 130,
+ .clen = 130,
}, {
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
"\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -20264,7 +18672,7 @@ static const struct aead_testvec rfc7539_enc_tv_template[] = {
.assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00"
"\x00\x00\x4e\x91",
.alen = 12,
- .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74"
+ .ptext = "\x49\x6e\x74\x65\x72\x6e\x65\x74"
"\x2d\x44\x72\x61\x66\x74\x73\x20"
"\x61\x72\x65\x20\x64\x72\x61\x66"
"\x74\x20\x64\x6f\x63\x75\x6d\x65"
@@ -20298,105 +18706,8 @@ static const struct aead_testvec rfc7539_enc_tv_template[] = {
"\x20\x69\x6e\x20\x70\x72\x6f\x67"
"\x72\x65\x73\x73\x2e\x2f\xe2\x80"
"\x9d",
- .ilen = 265,
- .result = "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
- "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
- "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
- "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2"
- "\x4c\x6c\xfc\x18\x75\x5d\x43\xee"
- "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0"
- "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00"
- "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf"
- "\x33\x2f\x83\x0e\x71\x0b\x97\xce"
- "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81"
- "\x14\xad\x17\x6e\x00\x8d\x33\xbd"
- "\x60\xf9\x82\xb1\xff\x37\xc8\x55"
- "\x97\x97\xa0\x6e\xf4\xf0\xef\x61"
- "\xc1\x86\x32\x4e\x2b\x35\x06\x38"
- "\x36\x06\x90\x7b\x6a\x7c\x02\xb0"
- "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4"
- "\xb9\x16\x6c\x76\x7b\x80\x4d\x46"
- "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9"
- "\x90\x40\xc5\xa4\x04\x33\x22\x5e"
- "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e"
- "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15"
- "\x5b\x00\x47\x71\x8c\xbc\x54\x6a"
- "\x0d\x07\x2b\x04\xb3\x56\x4e\xea"
- "\x1b\x42\x22\x73\xf5\x48\x27\x1a"
- "\x0b\xb2\x31\x60\x53\xfa\x76\x99"
- "\x19\x55\xeb\xd6\x31\x59\x43\x4e"
- "\xce\xbb\x4e\x46\x6d\xae\x5a\x10"
- "\x73\xa6\x72\x76\x27\x09\x7a\x10"
- "\x49\xe6\x17\xd9\x1d\x36\x10\x94"
- "\xfa\x68\xf0\xff\x77\x98\x71\x30"
- "\x30\x5b\xea\xba\x2e\xda\x04\xdf"
- "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29"
- "\xa6\xad\x5c\xb4\x02\x2b\x02\x70"
- "\x9b\xee\xad\x9d\x67\x89\x0c\xbb"
- "\x22\x39\x23\x36\xfe\xa1\x85\x1f"
- "\x38",
- .rlen = 281,
- },
-};
-
-static const struct aead_testvec rfc7539_dec_tv_template[] = {
- {
- .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f",
- .klen = 32,
- .iv = "\x07\x00\x00\x00\x40\x41\x42\x43"
- "\x44\x45\x46\x47",
- .assoc = "\x50\x51\x52\x53\xc0\xc1\xc2\xc3"
- "\xc4\xc5\xc6\xc7",
- .alen = 12,
- .input = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb"
- "\x7b\x86\xaf\xbc\x53\xef\x7e\xc2"
- "\xa4\xad\xed\x51\x29\x6e\x08\xfe"
- "\xa9\xe2\xb5\xa7\x36\xee\x62\xd6"
- "\x3d\xbe\xa4\x5e\x8c\xa9\x67\x12"
- "\x82\xfa\xfb\x69\xda\x92\x72\x8b"
- "\x1a\x71\xde\x0a\x9e\x06\x0b\x29"
- "\x05\xd6\xa5\xb6\x7e\xcd\x3b\x36"
- "\x92\xdd\xbd\x7f\x2d\x77\x8b\x8c"
- "\x98\x03\xae\xe3\x28\x09\x1b\x58"
- "\xfa\xb3\x24\xe4\xfa\xd6\x75\x94"
- "\x55\x85\x80\x8b\x48\x31\xd7\xbc"
- "\x3f\xf4\xde\xf0\x8e\x4b\x7a\x9d"
- "\xe5\x76\xd2\x65\x86\xce\xc6\x4b"
- "\x61\x16\x1a\xe1\x0b\x59\x4f\x09"
- "\xe2\x6a\x7e\x90\x2e\xcb\xd0\x60"
- "\x06\x91",
- .ilen = 130,
- .result = "\x4c\x61\x64\x69\x65\x73\x20\x61"
- "\x6e\x64\x20\x47\x65\x6e\x74\x6c"
- "\x65\x6d\x65\x6e\x20\x6f\x66\x20"
- "\x74\x68\x65\x20\x63\x6c\x61\x73"
- "\x73\x20\x6f\x66\x20\x27\x39\x39"
- "\x3a\x20\x49\x66\x20\x49\x20\x63"
- "\x6f\x75\x6c\x64\x20\x6f\x66\x66"
- "\x65\x72\x20\x79\x6f\x75\x20\x6f"
- "\x6e\x6c\x79\x20\x6f\x6e\x65\x20"
- "\x74\x69\x70\x20\x66\x6f\x72\x20"
- "\x74\x68\x65\x20\x66\x75\x74\x75"
- "\x72\x65\x2c\x20\x73\x75\x6e\x73"
- "\x63\x72\x65\x65\x6e\x20\x77\x6f"
- "\x75\x6c\x64\x20\x62\x65\x20\x69"
- "\x74\x2e",
- .rlen = 114,
- }, {
- .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
- "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
- "\x47\x39\x17\xc1\x40\x2b\x80\x09"
- "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x01\x02\x03\x04"
- "\x05\x06\x07\x08",
- .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00"
- "\x00\x00\x4e\x91",
- .alen = 12,
- .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
+ .plen = 265,
+ .ctext = "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
"\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
"\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
"\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2"
@@ -20432,49 +18743,14 @@ static const struct aead_testvec rfc7539_dec_tv_template[] = {
"\x9b\xee\xad\x9d\x67\x89\x0c\xbb"
"\x22\x39\x23\x36\xfe\xa1\x85\x1f"
"\x38",
- .ilen = 281,
- .result = "\x49\x6e\x74\x65\x72\x6e\x65\x74"
- "\x2d\x44\x72\x61\x66\x74\x73\x20"
- "\x61\x72\x65\x20\x64\x72\x61\x66"
- "\x74\x20\x64\x6f\x63\x75\x6d\x65"
- "\x6e\x74\x73\x20\x76\x61\x6c\x69"
- "\x64\x20\x66\x6f\x72\x20\x61\x20"
- "\x6d\x61\x78\x69\x6d\x75\x6d\x20"
- "\x6f\x66\x20\x73\x69\x78\x20\x6d"
- "\x6f\x6e\x74\x68\x73\x20\x61\x6e"
- "\x64\x20\x6d\x61\x79\x20\x62\x65"
- "\x20\x75\x70\x64\x61\x74\x65\x64"
- "\x2c\x20\x72\x65\x70\x6c\x61\x63"
- "\x65\x64\x2c\x20\x6f\x72\x20\x6f"
- "\x62\x73\x6f\x6c\x65\x74\x65\x64"
- "\x20\x62\x79\x20\x6f\x74\x68\x65"
- "\x72\x20\x64\x6f\x63\x75\x6d\x65"
- "\x6e\x74\x73\x20\x61\x74\x20\x61"
- "\x6e\x79\x20\x74\x69\x6d\x65\x2e"
- "\x20\x49\x74\x20\x69\x73\x20\x69"
- "\x6e\x61\x70\x70\x72\x6f\x70\x72"
- "\x69\x61\x74\x65\x20\x74\x6f\x20"
- "\x75\x73\x65\x20\x49\x6e\x74\x65"
- "\x72\x6e\x65\x74\x2d\x44\x72\x61"
- "\x66\x74\x73\x20\x61\x73\x20\x72"
- "\x65\x66\x65\x72\x65\x6e\x63\x65"
- "\x20\x6d\x61\x74\x65\x72\x69\x61"
- "\x6c\x20\x6f\x72\x20\x74\x6f\x20"
- "\x63\x69\x74\x65\x20\x74\x68\x65"
- "\x6d\x20\x6f\x74\x68\x65\x72\x20"
- "\x74\x68\x61\x6e\x20\x61\x73\x20"
- "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b"
- "\x20\x69\x6e\x20\x70\x72\x6f\x67"
- "\x72\x65\x73\x73\x2e\x2f\xe2\x80"
- "\x9d",
- .rlen = 265,
+ .clen = 281,
},
};
/*
* draft-irtf-cfrg-chacha20-poly1305
*/
-static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
+static const struct aead_testvec rfc7539esp_tv_template[] = {
{
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
"\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -20487,7 +18763,7 @@ static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
"\x00\x00\x4e\x91\x01\x02\x03\x04"
"\x05\x06\x07\x08",
.alen = 20,
- .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74"
+ .ptext = "\x49\x6e\x74\x65\x72\x6e\x65\x74"
"\x2d\x44\x72\x61\x66\x74\x73\x20"
"\x61\x72\x65\x20\x64\x72\x61\x66"
"\x74\x20\x64\x6f\x63\x75\x6d\x65"
@@ -20521,8 +18797,8 @@ static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
"\x20\x69\x6e\x20\x70\x72\x6f\x67"
"\x72\x65\x73\x73\x2e\x2f\xe2\x80"
"\x9d",
- .ilen = 265,
- .result = "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
+ .plen = 265,
+ .ctext = "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
"\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
"\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
"\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2"
@@ -20558,99 +18834,18 @@ static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
"\x9b\xee\xad\x9d\x67\x89\x0c\xbb"
"\x22\x39\x23\x36\xfe\xa1\x85\x1f"
"\x38",
- .rlen = 281,
- },
-};
-
-static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
- {
- .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
- "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
- "\x47\x39\x17\xc1\x40\x2b\x80\x09"
- "\x9d\xca\x5c\xbc\x20\x70\x75\xc0"
- "\x00\x00\x00\x00",
- .klen = 36,
- .iv = "\x01\x02\x03\x04\x05\x06\x07\x08",
- .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00"
- "\x00\x00\x4e\x91\x01\x02\x03\x04"
- "\x05\x06\x07\x08",
- .alen = 20,
- .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
- "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
- "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
- "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2"
- "\x4c\x6c\xfc\x18\x75\x5d\x43\xee"
- "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0"
- "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00"
- "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf"
- "\x33\x2f\x83\x0e\x71\x0b\x97\xce"
- "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81"
- "\x14\xad\x17\x6e\x00\x8d\x33\xbd"
- "\x60\xf9\x82\xb1\xff\x37\xc8\x55"
- "\x97\x97\xa0\x6e\xf4\xf0\xef\x61"
- "\xc1\x86\x32\x4e\x2b\x35\x06\x38"
- "\x36\x06\x90\x7b\x6a\x7c\x02\xb0"
- "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4"
- "\xb9\x16\x6c\x76\x7b\x80\x4d\x46"
- "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9"
- "\x90\x40\xc5\xa4\x04\x33\x22\x5e"
- "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e"
- "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15"
- "\x5b\x00\x47\x71\x8c\xbc\x54\x6a"
- "\x0d\x07\x2b\x04\xb3\x56\x4e\xea"
- "\x1b\x42\x22\x73\xf5\x48\x27\x1a"
- "\x0b\xb2\x31\x60\x53\xfa\x76\x99"
- "\x19\x55\xeb\xd6\x31\x59\x43\x4e"
- "\xce\xbb\x4e\x46\x6d\xae\x5a\x10"
- "\x73\xa6\x72\x76\x27\x09\x7a\x10"
- "\x49\xe6\x17\xd9\x1d\x36\x10\x94"
- "\xfa\x68\xf0\xff\x77\x98\x71\x30"
- "\x30\x5b\xea\xba\x2e\xda\x04\xdf"
- "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29"
- "\xa6\xad\x5c\xb4\x02\x2b\x02\x70"
- "\x9b\xee\xad\x9d\x67\x89\x0c\xbb"
- "\x22\x39\x23\x36\xfe\xa1\x85\x1f"
- "\x38",
- .ilen = 281,
- .result = "\x49\x6e\x74\x65\x72\x6e\x65\x74"
- "\x2d\x44\x72\x61\x66\x74\x73\x20"
- "\x61\x72\x65\x20\x64\x72\x61\x66"
- "\x74\x20\x64\x6f\x63\x75\x6d\x65"
- "\x6e\x74\x73\x20\x76\x61\x6c\x69"
- "\x64\x20\x66\x6f\x72\x20\x61\x20"
- "\x6d\x61\x78\x69\x6d\x75\x6d\x20"
- "\x6f\x66\x20\x73\x69\x78\x20\x6d"
- "\x6f\x6e\x74\x68\x73\x20\x61\x6e"
- "\x64\x20\x6d\x61\x79\x20\x62\x65"
- "\x20\x75\x70\x64\x61\x74\x65\x64"
- "\x2c\x20\x72\x65\x70\x6c\x61\x63"
- "\x65\x64\x2c\x20\x6f\x72\x20\x6f"
- "\x62\x73\x6f\x6c\x65\x74\x65\x64"
- "\x20\x62\x79\x20\x6f\x74\x68\x65"
- "\x72\x20\x64\x6f\x63\x75\x6d\x65"
- "\x6e\x74\x73\x20\x61\x74\x20\x61"
- "\x6e\x79\x20\x74\x69\x6d\x65\x2e"
- "\x20\x49\x74\x20\x69\x73\x20\x69"
- "\x6e\x61\x70\x70\x72\x6f\x70\x72"
- "\x69\x61\x74\x65\x20\x74\x6f\x20"
- "\x75\x73\x65\x20\x49\x6e\x74\x65"
- "\x72\x6e\x65\x74\x2d\x44\x72\x61"
- "\x66\x74\x73\x20\x61\x73\x20\x72"
- "\x65\x66\x65\x72\x65\x6e\x63\x65"
- "\x20\x6d\x61\x74\x65\x72\x69\x61"
- "\x6c\x20\x6f\x72\x20\x74\x6f\x20"
- "\x63\x69\x74\x65\x20\x74\x68\x65"
- "\x6d\x20\x6f\x74\x68\x65\x72\x20"
- "\x74\x68\x61\x6e\x20\x61\x73\x20"
- "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b"
- "\x20\x69\x6e\x20\x70\x72\x6f\x67"
- "\x72\x65\x73\x73\x2e\x2f\xe2\x80"
- "\x9d",
- .rlen = 265,
+ .clen = 281,
},
};
-static const struct aead_testvec aegis128_enc_tv_template[] = {
+/*
+ * AEGIS-128 test vectors - generated via reference implementation from
+ * SUPERCOP (https://bench.cr.yp.to/supercop.html):
+ *
+ * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
+ * (see crypto_aead/aegis128/)
+ */
+static const struct aead_testvec aegis128_tv_template[] = {
{
.key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
"\x20\x36\x2c\x24\xfe\xc9\x30\x81",
@@ -20659,11 +18854,11 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x40\x6d\x59\x48\xfc\x92\x61\x03",
.assoc = "",
.alen = 0,
- .input = "",
- .ilen = 0,
- .result = "\x07\xa5\x11\xf2\x9d\x40\xb8\x6d"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x07\xa5\x11\xf2\x9d\x40\xb8\x6d"
"\xda\xb8\x12\x34\x4c\x53\xd9\x72",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
"\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
@@ -20672,12 +18867,12 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\xc1\x47\x0b\xda\xf6\xb6\x23\x09",
.assoc = "",
.alen = 0,
- .input = "\x79",
- .ilen = 1,
- .result = "\x9e\x78\x52\xae\xcb\x9e\xe4\xd3"
+ .ptext = "\x79",
+ .plen = 1,
+ .ctext = "\x9e\x78\x52\xae\xcb\x9e\xe4\xd3"
"\x9a\xd7\x5d\xd7\xaa\x9a\xe9\x5a"
"\xcc",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
"\x22\xea\x90\x47\xf2\x11\xb5\x8e",
@@ -20686,14 +18881,14 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x42\x21\xbd\x6b\xf0\xda\xe6\x0f",
.assoc = "",
.alen = 0,
- .input = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
+ .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
"\x82\x8e\x16\xb4\xed\x6d\x47",
- .ilen = 15,
- .result = "\xc3\x80\x83\x04\x5f\xaa\x61\xc7"
+ .plen = 15,
+ .ctext = "\xc3\x80\x83\x04\x5f\xaa\x61\xc7"
"\xca\xdd\x6f\xac\x85\x08\xb5\x35"
"\x2b\xc2\x3e\x0b\x1b\x39\x37\x2b"
"\x7a\x21\x16\xb3\xe6\x67\x66",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
"\xa2\xc5\x42\xd8\xec\x36\x78\x94",
@@ -20702,14 +18897,14 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\xc3\xfb\x6f\xfd\xea\xff\xa9\x15",
.assoc = "",
.alen = 0,
- .input = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
+ .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
"\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .ilen = 16,
- .result = "\x23\x25\x30\xe5\x6a\xb6\x36\x7d"
+ .plen = 16,
+ .ctext = "\x23\x25\x30\xe5\x6a\xb6\x36\x7d"
"\x38\xfd\x3a\xd2\xc2\x58\xa9\x11"
"\x1e\xa8\x30\x9c\x16\xa4\xdb\x65"
"\x51\x10\x16\x27\x70\x9b\x64\x29",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
"\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
@@ -20718,16 +18913,16 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x44\xd5\x21\x8e\xe4\x23\x6b\x1c",
.assoc = "",
.alen = 0,
- .input = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
+ .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
"\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
"\xd3",
- .ilen = 17,
- .result = "\x2a\x8d\x56\x91\xc6\xf3\x56\xa5"
+ .plen = 17,
+ .ctext = "\x2a\x8d\x56\x91\xc6\xf3\x56\xa5"
"\x1f\xf0\x89\x2e\x13\xad\xe6\xf6"
"\x46\x80\xb1\x0e\x18\x30\x40\x97"
"\x03\xdf\x64\x3c\xbe\x93\x9e\xc9"
"\x3b",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
"\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
@@ -20736,18 +18931,18 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\xc5\xb0\xd3\x1f\xde\x48\x2e\x22",
.assoc = "",
.alen = 0,
- .input = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
+ .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
"\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
"\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
"\x88\x11\x39\x12\x1c\x3a\xbb",
- .ilen = 31,
- .result = "\x4e\xf6\xfa\x13\xde\x43\x63\x4c"
+ .plen = 31,
+ .ctext = "\x4e\xf6\xfa\x13\xde\x43\x63\x4c"
"\xe2\x04\x3e\xe4\x85\x14\xb6\x3f"
"\xb1\x8f\x4c\xdb\x41\xa2\x14\x99"
"\xf5\x53\x0f\x73\x86\x7e\x97\xa1"
"\x4b\x56\x5b\x94\xce\xcd\x74\xcd"
"\x75\xc4\x53\x01\x89\x45\x59",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
"\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
@@ -20756,18 +18951,18 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x45\x8a\x85\xb1\xd8\x6c\xf1\x28",
.assoc = "",
.alen = 0,
- .input = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
+ .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
"\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
"\x28\x50\x51\x9d\x24\x60\x8d\xb3"
"\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
- .ilen = 32,
- .result = "\xa4\x9a\xb7\xfd\xa0\xd4\xd6\x47"
+ .plen = 32,
+ .ctext = "\xa4\x9a\xb7\xfd\xa0\xd4\xd6\x47"
"\x95\xf4\x58\x38\x14\x83\x27\x01"
"\x4c\xed\x32\x2c\xf7\xd6\x31\xf7"
"\x38\x1b\x2c\xc9\xb6\x31\xce\xaa"
"\xa5\x3c\x1a\x18\x5c\xce\xb9\xdf"
"\x51\x52\x77\xf2\x5e\x85\x80\x41",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
"\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
@@ -20776,11 +18971,11 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\xc6\x64\x37\x42\xd2\x90\xb3\x2e",
.assoc = "\xd5",
.alen = 1,
- .input = "",
- .ilen = 0,
- .result = "\xfb\xd4\x83\x71\x9e\x63\xad\x60"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xfb\xd4\x83\x71\x9e\x63\xad\x60"
"\xb9\xf9\xeb\x34\x52\x49\xcf\xb7",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
"\x27\x08\xbd\xaf\xce\xec\x45\xb3",
@@ -20790,11 +18985,11 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
.assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
"\x68\x75\x16\xf8\xcb\x7e\xa7",
.alen = 15,
- .input = "",
- .ilen = 0,
- .result = "\x0c\xaf\x2e\x96\xf6\x97\x08\x71"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x0c\xaf\x2e\x96\xf6\x97\x08\x71"
"\x7d\x3a\x84\xc4\x44\x57\x77\x7e",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
"\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
@@ -20804,11 +18999,11 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
.assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
"\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
.alen = 16,
- .input = "",
- .ilen = 0,
- .result = "\xc7\x87\x09\x3b\xc7\x19\x74\x22"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xc7\x87\x09\x3b\xc7\x19\x74\x22"
"\x22\xa5\x67\x10\xb2\x36\xb3\x45",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
"\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
@@ -20819,11 +19014,11 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
"\x07",
.alen = 17,
- .input = "",
- .ilen = 0,
- .result = "\x02\xc6\x3b\x46\x65\xb2\xef\x91"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x02\xc6\x3b\x46\x65\xb2\xef\x91"
"\x31\xf0\x45\x48\x8a\x2a\xed\xe4",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
"\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
@@ -20835,11 +19030,11 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
"\xe0\x17\x3a\x2e\x83\x5c\x8f",
.alen = 31,
- .input = "",
- .ilen = 0,
- .result = "\x20\x85\xa8\xd0\x91\x48\x85\xf3"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x20\x85\xa8\xd0\x91\x48\x85\xf3"
"\x5a\x16\xc0\x57\x68\x47\xdd\xcb",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
"\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
@@ -20851,11 +19046,11 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x5c\x2d\x14\x96\x01\x78\xb9\x47"
"\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
.alen = 32,
- .input = "",
- .ilen = 0,
- .result = "\x6a\xf8\x8d\x9c\x42\x75\x35\x79"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x6a\xf8\x8d\x9c\x42\x75\x35\x79"
"\xc1\x96\xbd\x31\x6e\x69\x1b\x50",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
"\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
@@ -20864,12 +19059,12 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\xcc\x81\x63\xab\xae\x6b\x43\x54",
.assoc = "\x40",
.alen = 1,
- .input = "\x4f",
- .ilen = 1,
- .result = "\x01\x24\xb1\xba\xf6\xd3\xdf\x83"
+ .ptext = "\x4f",
+ .plen = 1,
+ .ctext = "\x01\x24\xb1\xba\xf6\xd3\xdf\x83"
"\x70\x45\xe3\x2a\x9d\x5c\x63\x98"
"\x39",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
"\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
@@ -20879,14 +19074,14 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
.assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
"\x6d\x92\x42\x61\xa7\x58\x37",
.alen = 15,
- .input = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
+ .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
"\x8d\xc8\x6e\x85\xa5\x21\x67",
- .ilen = 15,
- .result = "\x18\x78\xc2\x6e\xe1\xf7\xe6\x8a"
+ .plen = 15,
+ .ctext = "\x18\x78\xc2\x6e\xe1\xf7\xe6\x8a"
"\xca\x0e\x62\x00\xa8\x21\xb5\x21"
"\x3d\x36\xdb\xf7\xcc\x31\x94\x9c"
"\x98\xbd\x71\x7a\xef\xa4\xfa",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
"\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
@@ -20896,14 +19091,14 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
.assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
"\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
.alen = 16,
- .input = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
+ .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
"\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .ilen = 16,
- .result = "\xea\xd1\x81\x75\xb4\x13\x1d\x86"
+ .plen = 16,
+ .ctext = "\xea\xd1\x81\x75\xb4\x13\x1d\x86"
"\xd4\x17\x26\xe5\xd6\x89\x39\x04"
"\xa9\x6c\xca\xac\x40\x73\xb2\x4c"
"\x9c\xb9\x0e\x79\x4c\x40\x65\xc6",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
"\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
@@ -20914,16 +19109,16 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
"\x05",
.alen = 17,
- .input = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
+ .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
"\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
"\xd0",
- .ilen = 17,
- .result = "\xf4\xb2\x84\xd1\x81\xfa\x98\x1c"
+ .plen = 17,
+ .ctext = "\xf4\xb2\x84\xd1\x81\xfa\x98\x1c"
"\x38\x2d\x69\x90\x1c\x71\x38\x98"
"\x9f\xe1\x19\x3b\x63\x91\xaf\x6e"
"\x4b\x07\x2c\xac\x53\xc5\xd5\xfe"
"\x93",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
"\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
@@ -20935,18 +19130,18 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
"\x68\x28\x73\x40\x9f\x96\x4a",
.alen = 31,
- .input = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
+ .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
"\x10\x57\x85\x39\x93\x8f\xaf\x70"
"\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
"\x98\x34\xab\x37\x56\xae\x32",
- .ilen = 31,
- .result = "\xa0\xe7\x0a\x60\xe7\xb8\x8a\xdb"
+ .plen = 31,
+ .ctext = "\xa0\xe7\x0a\x60\xe7\xb8\x8a\xdb"
"\x94\xd3\x93\xf2\x41\x86\x16\xdd"
"\x4c\xe8\xe7\xe0\x62\x48\x89\x40"
"\xc0\x49\x9b\x63\x32\xec\x8b\xdb"
"\xdc\xa6\xea\x2c\xc2\x7f\xf5\x04"
"\xcb\xe5\x47\xbb\xa7\xd1\x9d",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
"\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
@@ -20958,18 +19153,18 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
"\x29\x56\x52\x19\x79\xf5\xe9\x37",
.alen = 32,
- .input = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
+ .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
"\x91\x31\x37\xcb\x8d\xb3\x72\x76"
"\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
"\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
- .ilen = 32,
- .result = "\x62\xdc\x2d\x68\x2d\x71\xbb\x33"
+ .plen = 32,
+ .ctext = "\x62\xdc\x2d\x68\x2d\x71\xbb\x33"
"\x13\xdf\xc0\x46\xf6\x61\x94\xa7"
"\x60\xd3\xd4\xca\xd9\xbe\x82\xf3"
"\xf1\x5b\xa0\xfa\x15\xba\xda\xea"
"\x87\x68\x47\x08\x5d\xdd\x83\xb0"
"\x60\xf4\x93\x20\xdf\x34\x8f\xea",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
"\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
@@ -20982,7 +19177,7 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\xeb\x83\x31\xf1\x54\x54\x89\x0d"
"\x9d",
.alen = 33,
- .input = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
+ .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
"\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
"\x4f\x2e\xe8\x55\x66\x80\x27\x00"
"\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
@@ -20991,8 +19186,8 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
"\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
"\xbd",
- .ilen = 65,
- .result = "\x84\xc5\x21\xab\xe1\xeb\xbb\x6d"
+ .plen = 65,
+ .ctext = "\x84\xc5\x21\xab\xe1\xeb\xbb\x6d"
"\xaa\x2a\xaf\xeb\x3b\x3b\x69\xe7"
"\x2c\x47\xef\x9d\xb7\x53\x36\xb7"
"\xb6\xf5\xe5\xa8\xc9\x9e\x02\xd7"
@@ -21003,7 +19198,7 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x61\xe6\xae\x07\xf2\xe0\xa7\x44"
"\x96\x28\x3b\xee\x6b\xc6\x16\x31"
"\x3f",
- .rlen = 81,
+ .clen = 81,
}, {
.key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
"\x32\x42\x15\x80\x85\xa1\x65\xfe",
@@ -21020,20 +19215,20 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
"\x09\x4f\x77\x62\x88\x2d\xf2\x68"
"\x54",
.alen = 65,
- .input = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
+ .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
"\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
"\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
"\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
"\x2f",
- .ilen = 33,
- .result = "\x8f\x23\x47\xfb\xf2\xac\x23\x83"
+ .plen = 33,
+ .ctext = "\x8f\x23\x47\xfb\xf2\xac\x23\x83"
"\x77\x09\xac\x74\xef\xd2\x56\xae"
"\x20\x7b\x7b\xca\x45\x8e\xc8\xc2"
"\x50\xbd\xc7\x44\x1c\x54\x98\xd8"
"\x1f\xd0\x9a\x79\xaa\xf9\xe1\xb3"
"\xb4\x98\x5a\x9b\xe4\x4d\xbf\x4e"
"\x39",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
"\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
@@ -21043,14 +19238,14 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
.assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
"\xf3\x89\x20\x5b\x7c\x57\x89\x07",
.alen = 16,
- .input = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
+ .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
"\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .ilen = 16,
- .result = "\x42\xc3\x58\xfb\x29\xe2\x4a\x56"
+ .plen = 16,
+ .ctext = "\x42\xc3\x58\xfb\x29\xe2\x4a\x56"
"\xf1\xf5\xe1\x51\x55\x4b\x0a\x45"
"\x46\xb5\x8d\xac\xb6\x34\xd8\x8b"
"\xde\x20\x59\x77\xc1\x74\x90",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
"\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
@@ -21060,14 +19255,14 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
.assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
"\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
.alen = 16,
- .input = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
+ .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
"\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .ilen = 16,
- .result = "\xb2\xfb\xf6\x97\x69\x7a\xe9\xec"
+ .plen = 16,
+ .ctext = "\xb2\xfb\xf6\x97\x69\x7a\xe9\xec"
"\xe2\x94\xa1\x8b\xa0\x2b\x60\x72"
"\x1d\x04\xdd\x6a\xef\x46\x8f\x68"
"\xe9\xe0\x17\x45\x70\x12",
- .rlen = 30,
+ .clen = 30,
}, {
.key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
"\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
@@ -21077,457 +19272,13 @@ static const struct aead_testvec aegis128_enc_tv_template[] = {
.assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
"\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
.alen = 16,
- .input = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
+ .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
"\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .ilen = 16,
- .result = "\x47\xda\x54\x42\x51\x72\xc4\x8b"
- "\xf5\x57\x0f\x2f\x49\x0e\x11\x3b"
- "\x78\x93\xec\xfc\xf4\xff\xe1\x2d",
- .rlen = 24,
- },
-};
-
-/*
- * AEGIS-128 test vectors - generated via reference implementation from
- * SUPERCOP (https://bench.cr.yp.to/supercop.html):
- *
- * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
- * (see crypto_aead/aegis128/)
- */
-static const struct aead_testvec aegis128_dec_tv_template[] = {
- {
- .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
- "\x20\x36\x2c\x24\xfe\xc9\x30\x81",
- .klen = 16,
- .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d"
- "\x40\x6d\x59\x48\xfc\x92\x61\x03",
- .assoc = "",
- .alen = 0,
- .input = "\x07\xa5\x11\xf2\x9d\x40\xb8\x6d"
- "\xda\xb8\x12\x34\x4c\x53\xd9\x72",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
- "\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
- .klen = 16,
- .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29"
- "\xc1\x47\x0b\xda\xf6\xb6\x23\x09",
- .assoc = "",
- .alen = 0,
- .input = "\x9e\x78\x52\xae\xcb\x9e\xe4\xd3"
- "\x9a\xd7\x5d\xd7\xaa\x9a\xe9\x5a"
- "\xcc",
- .ilen = 17,
- .result = "\x79",
- .rlen = 1,
- }, {
- .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
- "\x22\xea\x90\x47\xf2\x11\xb5\x8e",
- .klen = 16,
- .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45"
- "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f",
- .assoc = "",
- .alen = 0,
- .input = "\xc3\x80\x83\x04\x5f\xaa\x61\xc7"
- "\xca\xdd\x6f\xac\x85\x08\xb5\x35"
- "\x2b\xc2\x3e\x0b\x1b\x39\x37\x2b"
- "\x7a\x21\x16\xb3\xe6\x67\x66",
- .ilen = 31,
- .result = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
- "\x82\x8e\x16\xb4\xed\x6d\x47",
- .rlen = 15,
- }, {
- .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
- "\xa2\xc5\x42\xd8\xec\x36\x78\x94",
- .klen = 16,
- .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61"
- "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15",
- .assoc = "",
- .alen = 0,
- .input = "\x23\x25\x30\xe5\x6a\xb6\x36\x7d"
- "\x38\xfd\x3a\xd2\xc2\x58\xa9\x11"
- "\x1e\xa8\x30\x9c\x16\xa4\xdb\x65"
- "\x51\x10\x16\x27\x70\x9b\x64\x29",
- .ilen = 32,
- .result = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
- "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .rlen = 16,
- }, {
- .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
- "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
- .klen = 16,
- .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d"
- "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c",
- .assoc = "",
- .alen = 0,
- .input = "\x2a\x8d\x56\x91\xc6\xf3\x56\xa5"
- "\x1f\xf0\x89\x2e\x13\xad\xe6\xf6"
- "\x46\x80\xb1\x0e\x18\x30\x40\x97"
- "\x03\xdf\x64\x3c\xbe\x93\x9e\xc9"
- "\x3b",
- .ilen = 33,
- .result = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
- "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
- "\xd3",
- .rlen = 17,
- }, {
- .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
- "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
- .klen = 16,
- .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98"
- "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22",
- .assoc = "",
- .alen = 0,
- .input = "\x4e\xf6\xfa\x13\xde\x43\x63\x4c"
- "\xe2\x04\x3e\xe4\x85\x14\xb6\x3f"
- "\xb1\x8f\x4c\xdb\x41\xa2\x14\x99"
- "\xf5\x53\x0f\x73\x86\x7e\x97\xa1"
- "\x4b\x56\x5b\x94\xce\xcd\x74\xcd"
- "\x75\xc4\x53\x01\x89\x45\x59",
- .ilen = 47,
- .result = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
- "\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
- "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
- "\x88\x11\x39\x12\x1c\x3a\xbb",
- .rlen = 31,
- }, {
- .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
- "\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
- .klen = 16,
- .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4"
- "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28",
- .assoc = "",
- .alen = 0,
- .input = "\xa4\x9a\xb7\xfd\xa0\xd4\xd6\x47"
- "\x95\xf4\x58\x38\x14\x83\x27\x01"
- "\x4c\xed\x32\x2c\xf7\xd6\x31\xf7"
- "\x38\x1b\x2c\xc9\xb6\x31\xce\xaa"
- "\xa5\x3c\x1a\x18\x5c\xce\xb9\xdf"
- "\x51\x52\x77\xf2\x5e\x85\x80\x41",
- .ilen = 48,
- .result = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
- "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
- "\x28\x50\x51\x9d\x24\x60\x8d\xb3"
- "\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
- .rlen = 32,
- }, {
- .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
- "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
- .klen = 16,
- .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0"
- "\xc6\x64\x37\x42\xd2\x90\xb3\x2e",
- .assoc = "\xd5",
- .alen = 1,
- .input = "\xfb\xd4\x83\x71\x9e\x63\xad\x60"
- "\xb9\xf9\xeb\x34\x52\x49\xcf\xb7",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
- "\x27\x08\xbd\xaf\xce\xec\x45\xb3",
- .klen = 16,
- .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
- "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34",
- .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
- "\x68\x75\x16\xf8\xcb\x7e\xa7",
- .alen = 15,
- .input = "\x0c\xaf\x2e\x96\xf6\x97\x08\x71"
- "\x7d\x3a\x84\xc4\x44\x57\x77\x7e",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
- "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
- .klen = 16,
- .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
- "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b",
- .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
- "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
- .alen = 16,
- .input = "\xc7\x87\x09\x3b\xc7\x19\x74\x22"
- "\x22\xa5\x67\x10\xb2\x36\xb3\x45",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
- "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
- .klen = 16,
- .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
- "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41",
- .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab"
- "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
- "\x07",
- .alen = 17,
- .input = "\x02\xc6\x3b\x46\x65\xb2\xef\x91"
- "\x31\xf0\x45\x48\x8a\x2a\xed\xe4",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
- "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
- .klen = 16,
- .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
- "\xca\xcd\xff\x88\xba\x22\xbe\x47",
- .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6"
- "\xea\x03\x2c\xac\xb9\xeb\xef\xc9"
- "\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
- "\xe0\x17\x3a\x2e\x83\x5c\x8f",
- .alen = 31,
- .input = "\x20\x85\xa8\xd0\x91\x48\x85\xf3"
- "\x5a\x16\xc0\x57\x68\x47\xdd\xcb",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
- "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
- .klen = 16,
- .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
- "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d",
- .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2"
- "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf"
- "\x5c\x2d\x14\x96\x01\x78\xb9\x47"
- "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
- .alen = 32,
- .input = "\x6a\xf8\x8d\x9c\x42\x75\x35\x79"
- "\xc1\x96\xbd\x31\x6e\x69\x1b\x50",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
- "\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
- .klen = 16,
- .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77"
- "\xcc\x81\x63\xab\xae\x6b\x43\x54",
- .assoc = "\x40",
- .alen = 1,
- .input = "\x01\x24\xb1\xba\xf6\xd3\xdf\x83"
- "\x70\x45\xe3\x2a\x9d\x5c\x63\x98"
- "\x39",
- .ilen = 17,
- .result = "\x4f",
- .rlen = 1,
- }, {
- .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
- "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
- .klen = 16,
- .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
- "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a",
- .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
- "\x6d\x92\x42\x61\xa7\x58\x37",
- .alen = 15,
- .input = "\x18\x78\xc2\x6e\xe1\xf7\xe6\x8a"
- "\xca\x0e\x62\x00\xa8\x21\xb5\x21"
- "\x3d\x36\xdb\xf7\xcc\x31\x94\x9c"
- "\x98\xbd\x71\x7a\xef\xa4\xfa",
- .ilen = 31,
- .result = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
- "\x8d\xc8\x6e\x85\xa5\x21\x67",
- .rlen = 15,
- }, {
- .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
- "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
- .klen = 16,
- .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
- "\xce\x36\xc7\xce\xa2\xb4\xc9\x60",
- .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
- "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
- .alen = 16,
- .input = "\xea\xd1\x81\x75\xb4\x13\x1d\x86"
- "\xd4\x17\x26\xe5\xd6\x89\x39\x04"
- "\xa9\x6c\xca\xac\x40\x73\xb2\x4c"
- "\x9c\xb9\x0e\x79\x4c\x40\x65\xc6",
- .ilen = 32,
- .result = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
- "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .rlen = 16,
- }, {
- .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
- "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
- .klen = 16,
- .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
- "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66",
- .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
- "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
- "\x05",
- .alen = 17,
- .input = "\xf4\xb2\x84\xd1\x81\xfa\x98\x1c"
- "\x38\x2d\x69\x90\x1c\x71\x38\x98"
- "\x9f\xe1\x19\x3b\x63\x91\xaf\x6e"
- "\x4b\x07\x2c\xac\x53\xc5\xd5\xfe"
- "\x93",
- .ilen = 33,
- .result = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
- "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
- "\xd0",
- .rlen = 17,
- }, {
- .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
- "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
- .klen = 16,
- .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
- "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d",
- .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
- "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
- "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
- "\x68\x28\x73\x40\x9f\x96\x4a",
- .alen = 31,
- .input = "\xa0\xe7\x0a\x60\xe7\xb8\x8a\xdb"
- "\x94\xd3\x93\xf2\x41\x86\x16\xdd"
- "\x4c\xe8\xe7\xe0\x62\x48\x89\x40"
- "\xc0\x49\x9b\x63\x32\xec\x8b\xdb"
- "\xdc\xa6\xea\x2c\xc2\x7f\xf5\x04"
- "\xcb\xe5\x47\xbb\xa7\xd1\x9d",
- .ilen = 47,
- .result = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
- "\x10\x57\x85\x39\x93\x8f\xaf\x70"
- "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
- "\x98\x34\xab\x37\x56\xae\x32",
- .rlen = 31,
- }, {
- .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
- "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
- .klen = 16,
- .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
- "\x50\xc4\xde\x82\x90\x21\x11\x73",
- .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
- "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
- "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
- "\x29\x56\x52\x19\x79\xf5\xe9\x37",
- .alen = 32,
- .input = "\x62\xdc\x2d\x68\x2d\x71\xbb\x33"
- "\x13\xdf\xc0\x46\xf6\x61\x94\xa7"
- "\x60\xd3\xd4\xca\xd9\xbe\x82\xf3"
- "\xf1\x5b\xa0\xfa\x15\xba\xda\xea"
- "\x87\x68\x47\x08\x5d\xdd\x83\xb0"
- "\x60\xf4\x93\x20\xdf\x34\x8f\xea",
- .ilen = 48,
- .result = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
- "\x91\x31\x37\xcb\x8d\xb3\x72\x76"
- "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
- "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
- .rlen = 32,
- }, {
- .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
- "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
- .klen = 16,
- .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
- "\xd1\x9e\x90\x13\x8a\x45\xd3\x79",
- .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
- "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
- "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
- "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
- "\x9d",
- .alen = 33,
- .input = "\x84\xc5\x21\xab\xe1\xeb\xbb\x6d"
- "\xaa\x2a\xaf\xeb\x3b\x3b\x69\xe7"
- "\x2c\x47\xef\x9d\xb7\x53\x36\xb7"
- "\xb6\xf5\xe5\xa8\xc9\x9e\x02\xd7"
- "\x83\x88\xc2\xbd\x2f\xf9\x10\xc0"
- "\xf5\xa1\x6e\xd3\x97\x64\x82\xa3"
- "\xfb\xda\x2c\xb1\x94\xa1\x58\x32"
- "\xe8\xd4\x39\xfc\x9e\x26\xf9\xf1"
- "\x61\xe6\xae\x07\xf2\xe0\xa7\x44"
- "\x96\x28\x3b\xee\x6b\xc6\x16\x31"
- "\x3f",
- .ilen = 81,
- .result = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
- "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
- "\x4f\x2e\xe8\x55\x66\x80\x27\x00"
- "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
- "\x21\x78\x55\x9d\x9c\x65\x7b\xcd"
- "\x0a\x34\x97\xff\x47\x37\xb0\x2a"
- "\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
- "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
- "\xbd",
- .rlen = 65,
- }, {
- .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
- "\x32\x42\x15\x80\x85\xa1\x65\xfe",
- .klen = 16,
- .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
- "\x52\x79\x42\xa5\x84\x6a\x96\x7f",
- .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
- "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
- "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
- "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
- "\xac\xfa\x58\x7f\xe5\x73\x09\x8c"
- "\x1d\x40\x87\x8c\xd9\x75\xc0\x55"
- "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
- "\x09\x4f\x77\x62\x88\x2d\xf2\x68"
- "\x54",
- .alen = 65,
- .input = "\x8f\x23\x47\xfb\xf2\xac\x23\x83"
- "\x77\x09\xac\x74\xef\xd2\x56\xae"
- "\x20\x7b\x7b\xca\x45\x8e\xc8\xc2"
- "\x50\xbd\xc7\x44\x1c\x54\x98\xd8"
- "\x1f\xd0\x9a\x79\xaa\xf9\xe1\xb3"
- "\xb4\x98\x5a\x9b\xe4\x4d\xbf\x4e"
- "\x39",
- .ilen = 49,
- .result = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
- "\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
- "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
- "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
- "\x2f",
- .rlen = 33,
- }, {
- .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
- "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
- .klen = 16,
- .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
- "\xd3\x53\xf4\x36\x7e\x8e\x59\x85",
- .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
- "\xf3\x89\x20\x5b\x7c\x57\x89\x07",
- .alen = 16,
- .input = "\x42\xc3\x58\xfb\x29\xe2\x4a\x56"
- "\xf1\xf5\xe1\x51\x55\x4b\x0a\x45"
- "\x46\xb5\x8d\xac\xb6\x34\xd8\x8b"
- "\xde\x20\x59\x77\xc1\x74\x90",
- .ilen = 31,
- .result = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
- "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .rlen = 16,
- }, {
- .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
- "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
- .klen = 16,
- .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
- "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c",
- .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
- "\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
- .alen = 16,
- .input = "\xb2\xfb\xf6\x97\x69\x7a\xe9\xec"
- "\xe2\x94\xa1\x8b\xa0\x2b\x60\x72"
- "\x1d\x04\xdd\x6a\xef\x46\x8f\x68"
- "\xe9\xe0\x17\x45\x70\x12",
- .ilen = 30,
- .result = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
- "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .rlen = 16,
- }, {
- .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
- "\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
- .klen = 16,
- .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
- "\xd5\x07\x58\x59\x72\xd7\xde\x92",
- .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
- "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
- .alen = 16,
- .input = "\x47\xda\x54\x42\x51\x72\xc4\x8b"
+ .plen = 16,
+ .ctext = "\x47\xda\x54\x42\x51\x72\xc4\x8b"
"\xf5\x57\x0f\x2f\x49\x0e\x11\x3b"
"\x78\x93\xec\xfc\xf4\xff\xe1\x2d",
- .ilen = 24,
- .result = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
- "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .rlen = 16,
+ .clen = 24,
},
};
@@ -21538,7 +19289,7 @@ static const struct aead_testvec aegis128_dec_tv_template[] = {
* https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
* (see crypto_aead/aegis128l/)
*/
-static const struct aead_testvec aegis128l_enc_tv_template[] = {
+static const struct aead_testvec aegis128l_tv_template[] = {
{
.key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
"\x20\x36\x2c\x24\xfe\xc9\x30\x81",
@@ -21547,11 +19298,11 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x40\x6d\x59\x48\xfc\x92\x61\x03",
.assoc = "",
.alen = 0,
- .input = "",
- .ilen = 0,
- .result = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6"
"\x20\x72\x78\xdd\x93\xc8\x57\xef",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
"\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
@@ -21560,12 +19311,12 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\xc1\x47\x0b\xda\xf6\xb6\x23\x09",
.assoc = "",
.alen = 0,
- .input = "\x79",
- .ilen = 1,
- .result = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb"
+ .ptext = "\x79",
+ .plen = 1,
+ .ctext = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb"
"\x40\xb3\x71\xc5\x22\x58\x31\x77"
"\x6d",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
"\x22\xea\x90\x47\xf2\x11\xb5\x8e",
@@ -21574,14 +19325,14 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x42\x21\xbd\x6b\xf0\xda\xe6\x0f",
.assoc = "",
.alen = 0,
- .input = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
+ .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
"\x82\x8e\x16\xb4\xed\x6d\x47",
- .ilen = 15,
- .result = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03"
+ .plen = 15,
+ .ctext = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03"
"\x2b\xee\x62\x99\x7b\x98\x13\x1f"
"\xe0\x76\x4c\x2e\x53\x99\x4f\xbe"
"\xe1\xa8\x04\x7f\xe1\x71\xbe",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
"\xa2\xc5\x42\xd8\xec\x36\x78\x94",
@@ -21590,14 +19341,14 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\xc3\xfb\x6f\xfd\xea\xff\xa9\x15",
.assoc = "",
.alen = 0,
- .input = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
+ .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
"\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .ilen = 16,
- .result = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c"
+ .plen = 16,
+ .ctext = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c"
"\x9d\xb7\x8c\x9a\xdb\x1f\xd2\x2e"
"\x23\xb6\xa4\xfb\xd3\x86\xdd\xbb"
"\xde\x54\x9b\xf5\x92\x8b\x93\xc5",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
"\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
@@ -21606,16 +19357,16 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x44\xd5\x21\x8e\xe4\x23\x6b\x1c",
.assoc = "",
.alen = 0,
- .input = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
+ .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
"\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
"\xd3",
- .ilen = 17,
- .result = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b"
+ .plen = 17,
+ .ctext = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b"
"\x71\xc3\x8a\x91\x22\x4f\x1b\xd2"
"\x33\x6d\x86\xbc\xf8\x2f\x06\xf9"
"\x82\x64\xc7\x72\x00\x30\xfc\xf0"
"\xf8",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
"\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
@@ -21624,18 +19375,18 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\xc5\xb0\xd3\x1f\xde\x48\x2e\x22",
.assoc = "",
.alen = 0,
- .input = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
+ .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
"\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
"\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
"\x88\x11\x39\x12\x1c\x3a\xbb",
- .ilen = 31,
- .result = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5"
+ .plen = 31,
+ .ctext = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5"
"\xd6\x9d\xee\xee\xcf\xaa\xaf\xe1"
"\x45\x10\x96\xe0\xbf\x55\x0f\x4c"
"\x1a\xfd\xf4\xda\x4e\x10\xde\xc9"
"\x0e\x6f\xc7\x3c\x49\x94\x41\xfc"
"\x59\x28\x88\x3c\x79\x10\x6b",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
"\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
@@ -21644,18 +19395,18 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x45\x8a\x85\xb1\xd8\x6c\xf1\x28",
.assoc = "",
.alen = 0,
- .input = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
+ .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
"\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
"\x28\x50\x51\x9d\x24\x60\x8d\xb3"
"\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
- .ilen = 32,
- .result = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d"
+ .plen = 32,
+ .ctext = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d"
"\x1c\x9f\x06\x54\x8b\x0b\xb4\x40"
"\xde\x11\x59\x3e\xfd\x74\xf6\x42"
"\x97\x17\xf7\x24\xb6\x7e\xc4\xc6"
"\x06\xa3\x94\xda\x3d\x7f\x55\x0a"
"\x92\x07\x2f\xa6\xf3\x6b\x2c\xfc",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
"\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
@@ -21664,11 +19415,11 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\xc6\x64\x37\x42\xd2\x90\xb3\x2e",
.assoc = "\xd5",
.alen = 1,
- .input = "",
- .ilen = 0,
- .result = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8"
"\x0f\x9a\x15\x60\x0e\x9b\x13\x8f",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
"\x27\x08\xbd\xaf\xce\xec\x45\xb3",
@@ -21678,11 +19429,11 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
.assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
"\x68\x75\x16\xf8\xcb\x7e\xa7",
.alen = 15,
- .input = "",
- .ilen = 0,
- .result = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c"
"\x84\xfb\x26\xfb\xd5\xca\x62\x39",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
"\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
@@ -21692,11 +19443,11 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
.assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
"\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
.alen = 16,
- .input = "",
- .ilen = 0,
- .result = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1"
"\x99\x4d\x6d\xb4\x67\x32\xb0\x3a",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
"\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
@@ -21707,11 +19458,11 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
"\x07",
.alen = 17,
- .input = "",
- .ilen = 0,
- .result = "\x33\xb1\x42\x97\x8e\x16\x7b\x63"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x33\xb1\x42\x97\x8e\x16\x7b\x63"
"\x06\xba\x5b\xcb\xae\x6d\x8b\x56",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
"\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
@@ -21723,11 +19474,11 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
"\xe0\x17\x3a\x2e\x83\x5c\x8f",
.alen = 31,
- .input = "",
- .ilen = 0,
- .result = "\xda\x44\x08\x8c\x2a\xa5\x07\x35"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xda\x44\x08\x8c\x2a\xa5\x07\x35"
"\x0b\x54\x4e\x6d\xe3\xfd\xc4\x5f",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
"\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
@@ -21739,11 +19490,11 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x5c\x2d\x14\x96\x01\x78\xb9\x47"
"\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
.alen = 32,
- .input = "",
- .ilen = 0,
- .result = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88"
"\x40\x7f\x7b\x19\x7a\x52\x8c\xf0",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
"\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
@@ -21752,12 +19503,12 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\xcc\x81\x63\xab\xae\x6b\x43\x54",
.assoc = "\x40",
.alen = 1,
- .input = "\x4f",
- .ilen = 1,
- .result = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9"
+ .ptext = "\x4f",
+ .plen = 1,
+ .ctext = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9"
"\xa0\x98\x09\x85\xbe\x56\x8e\x79"
"\xf4",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
"\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
@@ -21767,14 +19518,14 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
.assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
"\x6d\x92\x42\x61\xa7\x58\x37",
.alen = 15,
- .input = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
+ .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
"\x8d\xc8\x6e\x85\xa5\x21\x67",
- .ilen = 15,
- .result = "\x99\x2e\x84\x50\x64\x5c\xab\x29"
+ .plen = 15,
+ .ctext = "\x99\x2e\x84\x50\x64\x5c\xab\x29"
"\x20\xba\xb9\x2f\x62\x3a\xce\x2a"
"\x75\x25\x3b\xe3\x40\xe0\x1d\xfc"
"\x20\x63\x0b\x49\x7e\x97\x08",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
"\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
@@ -21784,14 +19535,14 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
.assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
"\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
.alen = 16,
- .input = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
+ .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
"\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .ilen = 16,
- .result = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee"
+ .plen = 16,
+ .ctext = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee"
"\x78\x08\x12\xec\x09\xaf\x53\x14"
"\x90\x3e\x3d\x76\xad\x71\x21\x08"
"\x77\xe5\x4b\x15\xc2\xe6\xbc\xdb",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
"\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
@@ -21802,16 +19553,16 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
"\x05",
.alen = 17,
- .input = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
+ .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
"\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
"\xd0",
- .ilen = 17,
- .result = "\xf3\xe7\x95\x86\xcf\x34\x95\x96"
+ .plen = 17,
+ .ctext = "\xf3\xe7\x95\x86\xcf\x34\x95\x96"
"\x17\xfe\x1b\xae\x1b\x31\xf2\x1a"
"\xbd\xbc\xc9\x4e\x11\x29\x09\x5c"
"\x05\xd3\xb4\x2e\x4a\x74\x59\x49"
"\x7d",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
"\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
@@ -21823,18 +19574,18 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
"\x68\x28\x73\x40\x9f\x96\x4a",
.alen = 31,
- .input = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
+ .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
"\x10\x57\x85\x39\x93\x8f\xaf\x70"
"\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
"\x98\x34\xab\x37\x56\xae\x32",
- .ilen = 31,
- .result = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24"
+ .plen = 31,
+ .ctext = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24"
"\x0d\x19\x15\x61\x65\x3b\x06\x26"
"\x71\xe8\x7e\x16\xdb\x96\x01\x01"
"\x52\xcd\x49\x5b\x07\x33\x4e\xe7"
"\xaa\x91\xf5\xd5\xc6\xfe\x41\xb5"
"\xed\x90\xce\xb9\xcd\xcc\xa1",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
"\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
@@ -21846,18 +19597,18 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
"\x29\x56\x52\x19\x79\xf5\xe9\x37",
.alen = 32,
- .input = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
+ .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
"\x91\x31\x37\xcb\x8d\xb3\x72\x76"
"\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
"\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
- .ilen = 32,
- .result = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1"
+ .plen = 32,
+ .ctext = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1"
"\xbc\x0f\x35\x97\x97\x0c\x4b\x18"
"\xce\x58\xc8\x3b\xd4\x85\x93\x79"
"\xcc\x9c\xea\xc1\x73\x13\x0b\x4c"
"\xcc\x6f\x28\xf8\xa4\x4e\xb8\x56"
"\x64\x4e\x47\xce\xb2\xb4\x92\xb4",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
"\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
@@ -21870,7 +19621,7 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\xeb\x83\x31\xf1\x54\x54\x89\x0d"
"\x9d",
.alen = 33,
- .input = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
+ .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
"\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
"\x4f\x2e\xe8\x55\x66\x80\x27\x00"
"\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
@@ -21879,8 +19630,8 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
"\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
"\xbd",
- .ilen = 65,
- .result = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8"
+ .plen = 65,
+ .ctext = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8"
"\xc5\x78\x50\x8b\x4a\xc9\xdf\x7f"
"\x4b\xfa\xc8\x2e\x67\x43\xf3\x63"
"\x42\x8e\x99\x5a\x9c\x0b\x84\x77"
@@ -21891,7 +19642,7 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x8a\xd6\x8c\x5a\xe4\x95\xd1\x8d"
"\xf7\x33\x64\xc1\xd3\xf2\xfc\x35"
"\x01",
- .rlen = 81,
+ .clen = 81,
}, {
.key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
"\x32\x42\x15\x80\x85\xa1\x65\xfe",
@@ -21908,20 +19659,20 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
"\x09\x4f\x77\x62\x88\x2d\xf2\x68"
"\x54",
.alen = 65,
- .input = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
+ .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
"\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
"\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
"\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
"\x2f",
- .ilen = 33,
- .result = "\x4c\xa9\xac\x71\xed\x10\xa6\x24"
+ .plen = 33,
+ .ctext = "\x4c\xa9\xac\x71\xed\x10\xa6\x24"
"\xb7\xa7\xdf\x8b\xf5\xc2\x41\xcb"
"\x05\xc9\xd6\x97\xb6\x10\x7f\x17"
"\xc2\xc0\x93\xcf\xe0\x94\xfd\x99"
"\xf2\x62\x25\x28\x01\x23\x6f\x8b"
"\x04\x52\xbc\xb0\x3e\x66\x52\x90"
"\x9f",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
"\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
@@ -21931,14 +19682,14 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
.assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
"\xf3\x89\x20\x5b\x7c\x57\x89\x07",
.alen = 16,
- .input = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
+ .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
"\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .ilen = 16,
- .result = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5"
+ .plen = 16,
+ .ctext = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5"
"\x96\xe6\x97\xe4\x10\xeb\x40\x95"
"\xc5\x9a\xdf\x31\xd5\xa5\xa6\xec"
"\x05\xa8\x31\x50\x11\x19\x44",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
"\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
@@ -21948,14 +19699,14 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
.assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
"\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
.alen = 16,
- .input = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
+ .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
"\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .ilen = 16,
- .result = "\x30\x95\x7d\xea\xdc\x62\xc0\x88"
+ .plen = 16,
+ .ctext = "\x30\x95\x7d\xea\xdc\x62\xc0\x88"
"\xa1\xe3\x8d\x8c\xac\x04\x10\xa7"
"\xfa\xfa\x07\xbd\xa0\xf0\x36\xeb"
"\x21\x93\x2e\x31\x84\x83",
- .rlen = 30,
+ .clen = 30,
}, {
.key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
"\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
@@ -21965,450 +19716,13 @@ static const struct aead_testvec aegis128l_enc_tv_template[] = {
.assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
"\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
.alen = 16,
- .input = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
+ .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
"\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .ilen = 16,
- .result = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16"
- "\x63\x0d\x43\xd5\x49\xca\xa8\x85"
- "\x49\xc0\xae\x13\xbc\x26\x1d\x4b",
- .rlen = 24,
- },
-};
-
-static const struct aead_testvec aegis128l_dec_tv_template[] = {
- {
- .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
- "\x20\x36\x2c\x24\xfe\xc9\x30\x81",
- .klen = 16,
- .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d"
- "\x40\x6d\x59\x48\xfc\x92\x61\x03",
- .assoc = "",
- .alen = 0,
- .input = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6"
- "\x20\x72\x78\xdd\x93\xc8\x57\xef",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
- "\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
- .klen = 16,
- .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29"
- "\xc1\x47\x0b\xda\xf6\xb6\x23\x09",
- .assoc = "",
- .alen = 0,
- .input = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb"
- "\x40\xb3\x71\xc5\x22\x58\x31\x77"
- "\x6d",
- .ilen = 17,
- .result = "\x79",
- .rlen = 1,
- }, {
- .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
- "\x22\xea\x90\x47\xf2\x11\xb5\x8e",
- .klen = 16,
- .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45"
- "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f",
- .assoc = "",
- .alen = 0,
- .input = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03"
- "\x2b\xee\x62\x99\x7b\x98\x13\x1f"
- "\xe0\x76\x4c\x2e\x53\x99\x4f\xbe"
- "\xe1\xa8\x04\x7f\xe1\x71\xbe",
- .ilen = 31,
- .result = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
- "\x82\x8e\x16\xb4\xed\x6d\x47",
- .rlen = 15,
- }, {
- .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
- "\xa2\xc5\x42\xd8\xec\x36\x78\x94",
- .klen = 16,
- .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61"
- "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15",
- .assoc = "",
- .alen = 0,
- .input = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c"
- "\x9d\xb7\x8c\x9a\xdb\x1f\xd2\x2e"
- "\x23\xb6\xa4\xfb\xd3\x86\xdd\xbb"
- "\xde\x54\x9b\xf5\x92\x8b\x93\xc5",
- .ilen = 32,
- .result = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
- "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .rlen = 16,
- }, {
- .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
- "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
- .klen = 16,
- .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d"
- "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c",
- .assoc = "",
- .alen = 0,
- .input = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b"
- "\x71\xc3\x8a\x91\x22\x4f\x1b\xd2"
- "\x33\x6d\x86\xbc\xf8\x2f\x06\xf9"
- "\x82\x64\xc7\x72\x00\x30\xfc\xf0"
- "\xf8",
- .ilen = 33,
- .result = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
- "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
- "\xd3",
- .rlen = 17,
- }, {
- .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
- "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
- .klen = 16,
- .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98"
- "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22",
- .assoc = "",
- .alen = 0,
- .input = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5"
- "\xd6\x9d\xee\xee\xcf\xaa\xaf\xe1"
- "\x45\x10\x96\xe0\xbf\x55\x0f\x4c"
- "\x1a\xfd\xf4\xda\x4e\x10\xde\xc9"
- "\x0e\x6f\xc7\x3c\x49\x94\x41\xfc"
- "\x59\x28\x88\x3c\x79\x10\x6b",
- .ilen = 47,
- .result = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
- "\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
- "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
- "\x88\x11\x39\x12\x1c\x3a\xbb",
- .rlen = 31,
- }, {
- .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
- "\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
- .klen = 16,
- .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4"
- "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28",
- .assoc = "",
- .alen = 0,
- .input = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d"
- "\x1c\x9f\x06\x54\x8b\x0b\xb4\x40"
- "\xde\x11\x59\x3e\xfd\x74\xf6\x42"
- "\x97\x17\xf7\x24\xb6\x7e\xc4\xc6"
- "\x06\xa3\x94\xda\x3d\x7f\x55\x0a"
- "\x92\x07\x2f\xa6\xf3\x6b\x2c\xfc",
- .ilen = 48,
- .result = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
- "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
- "\x28\x50\x51\x9d\x24\x60\x8d\xb3"
- "\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
- .rlen = 32,
- }, {
- .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
- "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
- .klen = 16,
- .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0"
- "\xc6\x64\x37\x42\xd2\x90\xb3\x2e",
- .assoc = "\xd5",
- .alen = 1,
- .input = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8"
- "\x0f\x9a\x15\x60\x0e\x9b\x13\x8f",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
- "\x27\x08\xbd\xaf\xce\xec\x45\xb3",
- .klen = 16,
- .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
- "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34",
- .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
- "\x68\x75\x16\xf8\xcb\x7e\xa7",
- .alen = 15,
- .input = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c"
- "\x84\xfb\x26\xfb\xd5\xca\x62\x39",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
- "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
- .klen = 16,
- .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
- "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b",
- .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
- "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
- .alen = 16,
- .input = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1"
- "\x99\x4d\x6d\xb4\x67\x32\xb0\x3a",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
- "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
- .klen = 16,
- .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
- "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41",
- .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab"
- "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
- "\x07",
- .alen = 17,
- .input = "\x33\xb1\x42\x97\x8e\x16\x7b\x63"
- "\x06\xba\x5b\xcb\xae\x6d\x8b\x56",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
- "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
- .klen = 16,
- .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
- "\xca\xcd\xff\x88\xba\x22\xbe\x47",
- .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6"
- "\xea\x03\x2c\xac\xb9\xeb\xef\xc9"
- "\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
- "\xe0\x17\x3a\x2e\x83\x5c\x8f",
- .alen = 31,
- .input = "\xda\x44\x08\x8c\x2a\xa5\x07\x35"
- "\x0b\x54\x4e\x6d\xe3\xfd\xc4\x5f",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
- "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
- .klen = 16,
- .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
- "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d",
- .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2"
- "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf"
- "\x5c\x2d\x14\x96\x01\x78\xb9\x47"
- "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
- .alen = 32,
- .input = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88"
- "\x40\x7f\x7b\x19\x7a\x52\x8c\xf0",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
- "\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
- .klen = 16,
- .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77"
- "\xcc\x81\x63\xab\xae\x6b\x43\x54",
- .assoc = "\x40",
- .alen = 1,
- .input = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9"
- "\xa0\x98\x09\x85\xbe\x56\x8e\x79"
- "\xf4",
- .ilen = 17,
- .result = "\x4f",
- .rlen = 1,
- }, {
- .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
- "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
- .klen = 16,
- .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
- "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a",
- .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
- "\x6d\x92\x42\x61\xa7\x58\x37",
- .alen = 15,
- .input = "\x99\x2e\x84\x50\x64\x5c\xab\x29"
- "\x20\xba\xb9\x2f\x62\x3a\xce\x2a"
- "\x75\x25\x3b\xe3\x40\xe0\x1d\xfc"
- "\x20\x63\x0b\x49\x7e\x97\x08",
- .ilen = 31,
- .result = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
- "\x8d\xc8\x6e\x85\xa5\x21\x67",
- .rlen = 15,
- }, {
- .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
- "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
- .klen = 16,
- .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
- "\xce\x36\xc7\xce\xa2\xb4\xc9\x60",
- .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
- "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
- .alen = 16,
- .input = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee"
- "\x78\x08\x12\xec\x09\xaf\x53\x14"
- "\x90\x3e\x3d\x76\xad\x71\x21\x08"
- "\x77\xe5\x4b\x15\xc2\xe6\xbc\xdb",
- .ilen = 32,
- .result = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
- "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .rlen = 16,
- }, {
- .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
- "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
- .klen = 16,
- .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
- "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66",
- .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
- "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
- "\x05",
- .alen = 17,
- .input = "\xf3\xe7\x95\x86\xcf\x34\x95\x96"
- "\x17\xfe\x1b\xae\x1b\x31\xf2\x1a"
- "\xbd\xbc\xc9\x4e\x11\x29\x09\x5c"
- "\x05\xd3\xb4\x2e\x4a\x74\x59\x49"
- "\x7d",
- .ilen = 33,
- .result = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
- "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
- "\xd0",
- .rlen = 17,
- }, {
- .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
- "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
- .klen = 16,
- .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
- "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d",
- .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
- "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
- "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
- "\x68\x28\x73\x40\x9f\x96\x4a",
- .alen = 31,
- .input = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24"
- "\x0d\x19\x15\x61\x65\x3b\x06\x26"
- "\x71\xe8\x7e\x16\xdb\x96\x01\x01"
- "\x52\xcd\x49\x5b\x07\x33\x4e\xe7"
- "\xaa\x91\xf5\xd5\xc6\xfe\x41\xb5"
- "\xed\x90\xce\xb9\xcd\xcc\xa1",
- .ilen = 47,
- .result = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
- "\x10\x57\x85\x39\x93\x8f\xaf\x70"
- "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
- "\x98\x34\xab\x37\x56\xae\x32",
- .rlen = 31,
- }, {
- .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
- "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
- .klen = 16,
- .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
- "\x50\xc4\xde\x82\x90\x21\x11\x73",
- .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
- "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
- "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
- "\x29\x56\x52\x19\x79\xf5\xe9\x37",
- .alen = 32,
- .input = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1"
- "\xbc\x0f\x35\x97\x97\x0c\x4b\x18"
- "\xce\x58\xc8\x3b\xd4\x85\x93\x79"
- "\xcc\x9c\xea\xc1\x73\x13\x0b\x4c"
- "\xcc\x6f\x28\xf8\xa4\x4e\xb8\x56"
- "\x64\x4e\x47\xce\xb2\xb4\x92\xb4",
- .ilen = 48,
- .result = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
- "\x91\x31\x37\xcb\x8d\xb3\x72\x76"
- "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
- "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
- .rlen = 32,
- }, {
- .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
- "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
- .klen = 16,
- .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
- "\xd1\x9e\x90\x13\x8a\x45\xd3\x79",
- .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
- "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
- "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
- "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
- "\x9d",
- .alen = 33,
- .input = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8"
- "\xc5\x78\x50\x8b\x4a\xc9\xdf\x7f"
- "\x4b\xfa\xc8\x2e\x67\x43\xf3\x63"
- "\x42\x8e\x99\x5a\x9c\x0b\x84\x77"
- "\xbc\x46\x76\x48\x82\xc7\x57\x96"
- "\xe1\x65\xd1\xed\x1d\xdd\x80\x24"
- "\xa6\x4d\xa9\xf1\x53\x8b\x5e\x0e"
- "\x26\xb9\xcc\x37\xe5\x43\xe1\x5a"
- "\x8a\xd6\x8c\x5a\xe4\x95\xd1\x8d"
- "\xf7\x33\x64\xc1\xd3\xf2\xfc\x35"
- "\x01",
- .ilen = 81,
- .result = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
- "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
- "\x4f\x2e\xe8\x55\x66\x80\x27\x00"
- "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
- "\x21\x78\x55\x9d\x9c\x65\x7b\xcd"
- "\x0a\x34\x97\xff\x47\x37\xb0\x2a"
- "\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
- "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
- "\xbd",
- .rlen = 65,
- }, {
- .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
- "\x32\x42\x15\x80\x85\xa1\x65\xfe",
- .klen = 16,
- .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
- "\x52\x79\x42\xa5\x84\x6a\x96\x7f",
- .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
- "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
- "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
- "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
- "\xac\xfa\x58\x7f\xe5\x73\x09\x8c"
- "\x1d\x40\x87\x8c\xd9\x75\xc0\x55"
- "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
- "\x09\x4f\x77\x62\x88\x2d\xf2\x68"
- "\x54",
- .alen = 65,
- .input = "\x4c\xa9\xac\x71\xed\x10\xa6\x24"
- "\xb7\xa7\xdf\x8b\xf5\xc2\x41\xcb"
- "\x05\xc9\xd6\x97\xb6\x10\x7f\x17"
- "\xc2\xc0\x93\xcf\xe0\x94\xfd\x99"
- "\xf2\x62\x25\x28\x01\x23\x6f\x8b"
- "\x04\x52\xbc\xb0\x3e\x66\x52\x90"
- "\x9f",
- .ilen = 49,
- .result = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
- "\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
- "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
- "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
- "\x2f",
- .rlen = 33,
- }, {
- .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
- "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
- .klen = 16,
- .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
- "\xd3\x53\xf4\x36\x7e\x8e\x59\x85",
- .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
- "\xf3\x89\x20\x5b\x7c\x57\x89\x07",
- .alen = 16,
- .input = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5"
- "\x96\xe6\x97\xe4\x10\xeb\x40\x95"
- "\xc5\x9a\xdf\x31\xd5\xa5\xa6\xec"
- "\x05\xa8\x31\x50\x11\x19\x44",
- .ilen = 31,
- .result = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
- "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .rlen = 16,
- }, {
- .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
- "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
- .klen = 16,
- .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
- "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c",
- .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
- "\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
- .alen = 16,
- .input = "\x30\x95\x7d\xea\xdc\x62\xc0\x88"
- "\xa1\xe3\x8d\x8c\xac\x04\x10\xa7"
- "\xfa\xfa\x07\xbd\xa0\xf0\x36\xeb"
- "\x21\x93\x2e\x31\x84\x83",
- .ilen = 30,
- .result = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
- "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .rlen = 16,
- }, {
- .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
- "\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
- .klen = 16,
- .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
- "\xd5\x07\x58\x59\x72\xd7\xde\x92",
- .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
- "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
- .alen = 16,
- .input = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16"
+ .plen = 16,
+ .ctext = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16"
"\x63\x0d\x43\xd5\x49\xca\xa8\x85"
"\x49\xc0\xae\x13\xbc\x26\x1d\x4b",
- .ilen = 24,
- .result = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
- "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .rlen = 16,
+ .clen = 24,
},
};
@@ -22419,7 +19733,7 @@ static const struct aead_testvec aegis128l_dec_tv_template[] = {
* https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
* (see crypto_aead/aegis256/)
*/
-static const struct aead_testvec aegis256_enc_tv_template[] = {
+static const struct aead_testvec aegis256_tv_template[] = {
{
.key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
"\x20\x36\x2c\x24\xfe\xc9\x30\x81"
@@ -22432,11 +19746,11 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x60\x16\x6f\xec\x6d\x2f\xcf\x6b",
.assoc = "",
.alen = 0,
- .input = "",
- .ilen = 0,
- .result = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa"
"\xfa\x4b\xd8\xa2\x41\x9b\xc1\xb2",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
"\xa1\x10\xde\xb5\xf8\xed\xf3\x87"
@@ -22449,12 +19763,12 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x22\x44\x4e\xc4\x47\x8e\x6e\x41",
.assoc = "",
.alen = 0,
- .input = "\x79",
- .ilen = 1,
- .result = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16"
+ .ptext = "\x79",
+ .plen = 1,
+ .ctext = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16"
"\x9e\x89\xd9\x06\xa6\xa8\x14\x29"
"\x8b",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
"\x22\xea\x90\x47\xf2\x11\xb5\x8e"
@@ -22467,14 +19781,14 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\xe3\x71\x2d\x9c\x21\xed\x0e\x18",
.assoc = "",
.alen = 0,
- .input = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
+ .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
"\x82\x8e\x16\xb4\xed\x6d\x47",
- .ilen = 15,
- .result = "\x09\x94\x1f\xa6\x13\xc3\x74\x75"
+ .plen = 15,
+ .ctext = "\x09\x94\x1f\xa6\x13\xc3\x74\x75"
"\x17\xad\x8a\x0e\xd8\x66\x9a\x28"
"\xd7\x30\x66\x09\x2a\xdc\xfa\x2a"
"\x9f\x3b\xd7\xdd\x66\xd1\x2b",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
"\xa2\xc5\x42\xd8\xec\x36\x78\x94"
@@ -22487,14 +19801,14 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\xa4\x9f\x0b\x75\xfb\x4c\xad\xee",
.assoc = "",
.alen = 0,
- .input = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
+ .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
"\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .ilen = 16,
- .result = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f"
+ .plen = 16,
+ .ctext = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f"
"\x54\x63\x4e\x7f\xc9\x8e\xfa\x70"
"\x7b\xe5\x8d\x78\xbc\xe9\xb6\xa1"
"\x29\x17\xc8\x3b\x52\xa4\x98\x72",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
"\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a"
@@ -22507,16 +19821,16 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x66\xcd\xea\x4d\xd5\xab\x4c\xc5",
.assoc = "",
.alen = 0,
- .input = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
+ .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
"\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
"\xd3",
- .ilen = 17,
- .result = "\x71\x6b\x37\x0b\x02\x61\x28\x12"
+ .plen = 17,
+ .ctext = "\x71\x6b\x37\x0b\x02\x61\x28\x12"
"\x83\xab\x66\x90\x84\xc7\xd1\xc5"
"\xb2\x7a\xb4\x7b\xb4\xfe\x02\xb2"
"\xc0\x00\x39\x13\xb5\x51\x68\x44"
"\xad",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
"\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0"
@@ -22529,18 +19843,18 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x27\xfa\xc9\x26\xaf\x0a\xeb\x9c",
.assoc = "",
.alen = 0,
- .input = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
+ .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
"\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
"\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
"\x88\x11\x39\x12\x1c\x3a\xbb",
- .ilen = 31,
- .result = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f"
+ .plen = 31,
+ .ctext = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f"
"\x06\x3b\x52\x18\x49\x75\x1b\xf0"
"\x53\x09\x72\x7b\x45\x79\xe0\xbe"
"\x89\x85\x23\x15\xb8\x79\x07\x4c"
"\x53\x7a\x15\x37\x0a\xee\xb7\xfb"
"\xc4\x1f\x12\x27\xcf\x77\x90",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
"\x25\x53\x58\x8c\xda\xa3\xc0\xa6"
@@ -22553,18 +19867,18 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\xe8\x28\xa8\xfe\x89\x69\x8b\x72",
.assoc = "",
.alen = 0,
- .input = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
+ .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
"\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
"\x28\x50\x51\x9d\x24\x60\x8d\xb3"
"\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
- .ilen = 32,
- .result = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4"
+ .plen = 32,
+ .ctext = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4"
"\x33\xfe\xeb\xa8\xb7\x9b\xb2\xd7"
"\xeb\x0f\x05\x2b\xba\xb3\xca\xef"
"\xf6\xd1\xb6\xc0\xb9\x9b\x85\xc5"
"\xbf\x7a\x3e\xcc\x31\x76\x09\x80"
"\x32\x5d\xbb\xe8\x38\x0e\x77\xd3",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
"\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad"
@@ -22577,11 +19891,11 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\xaa\x55\x87\xd6\x63\xc8\x2a\x49",
.assoc = "\xd5",
.alen = 1,
- .input = "",
- .ilen = 0,
- .result = "\x96\x43\x30\xca\x6c\x4f\xd7\x12"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x96\x43\x30\xca\x6c\x4f\xd7\x12"
"\xba\xd9\xb3\x18\x86\xdf\xc3\x52",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
"\x27\x08\xbd\xaf\xce\xec\x45\xb3"
@@ -22595,11 +19909,11 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
.assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
"\x68\x75\x16\xf8\xcb\x7e\xa7",
.alen = 15,
- .input = "",
- .ilen = 0,
- .result = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83"
"\x11\x9f\xb0\x74\xee\xc7\x03\xdd",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
"\xa8\xe2\x6f\x41\xc8\x10\x08\xb9"
@@ -22613,11 +19927,11 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
.assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
"\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
.alen = 16,
- .input = "",
- .ilen = 0,
- .result = "\x16\x44\x73\x33\x5d\xf2\xb9\x04"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x16\x44\x73\x33\x5d\xf2\xb9\x04"
"\x6b\x79\x98\xef\xdb\xd5\xc5\xf1",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
"\x29\xbc\x21\xd2\xc2\x35\xcb\xbf"
@@ -22632,11 +19946,11 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
"\x07",
.alen = 17,
- .input = "",
- .ilen = 0,
- .result = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45"
"\x98\x54\x8c\xed\x3d\x17\xf0\xdd",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
"\xaa\x96\xd3\x64\xbc\x59\x8d\xc6"
@@ -22652,11 +19966,11 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
"\xe0\x17\x3a\x2e\x83\x5c\x8f",
.alen = 31,
- .input = "",
- .ilen = 0,
- .result = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0"
"\xa4\x96\x2f\x0d\x53\xc2\xf8\xfc",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
"\x2b\x70\x85\xf5\xb6\x7d\x50\xcc"
@@ -22672,11 +19986,11 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x5c\x2d\x14\x96\x01\x78\xb9\x47"
"\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
.alen = 32,
- .input = "",
- .ilen = 0,
- .result = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1"
"\xd2\x64\x3e\x66\x6a\xb2\x03\xc0",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
"\xac\x4b\x37\x86\xb0\xa2\x13\xd2"
@@ -22689,12 +20003,12 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x32\x67\xc0\xe9\x80\x02\xe5\x50",
.assoc = "\x40",
.alen = 1,
- .input = "\x4f",
- .ilen = 1,
- .result = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b"
+ .ptext = "\x4f",
+ .plen = 1,
+ .ctext = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b"
"\x7a\x3f\x81\xf7\xfc\x1b\x79\x83"
"\xc7",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
"\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8"
@@ -22708,14 +20022,14 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
.assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
"\x6d\x92\x42\x61\xa7\x58\x37",
.alen = 15,
- .input = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
+ .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
"\x8d\xc8\x6e\x85\xa5\x21\x67",
- .ilen = 15,
- .result = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba"
+ .plen = 15,
+ .ctext = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba"
"\x7e\x98\x83\x02\x34\x23\xf7\x94"
"\xde\x35\xe6\x1d\x14\x18\xe5\x38"
"\x14\x80\x6a\xa7\x1b\xae\x1d",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
"\xad\xff\x9b\xa9\xa4\xeb\x98\xdf"
@@ -22729,14 +20043,14 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
.assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
"\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
.alen = 16,
- .input = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
+ .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
"\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .ilen = 16,
- .result = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01"
+ .plen = 16,
+ .ctext = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01"
"\xbe\x28\x98\x10\x6f\xe9\x61\x32"
"\x96\xbb\xb1\x2e\x8f\x0c\x44\xb9"
"\x46\x2d\x55\xe3\x42\x67\xf2\xaf",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
"\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5"
@@ -22751,16 +20065,16 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
"\x05",
.alen = 17,
- .input = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
+ .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
"\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
"\xd0",
- .ilen = 17,
- .result = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8"
+ .plen = 17,
+ .ctext = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8"
"\x45\x65\x1b\x72\x9b\xaa\xa3\x1e"
"\x87\x9d\x26\xdf\xff\x81\x11\xd2"
"\x47\x41\xb9\x24\xc1\x8a\xa3\x8b"
"\x55",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
"\xaf\xb3\xff\xcc\x98\x33\x1d\xeb"
@@ -22776,18 +20090,18 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
"\x68\x28\x73\x40\x9f\x96\x4a",
.alen = 31,
- .input = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
+ .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
"\x10\x57\x85\x39\x93\x8f\xaf\x70"
"\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
"\x98\x34\xab\x37\x56\xae\x32",
- .ilen = 31,
- .result = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5"
+ .plen = 31,
+ .ctext = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5"
"\x69\x7f\x7a\xdd\x8c\x46\xda\xba"
"\x0a\x5c\x0e\x7f\xac\xee\x02\xd2"
"\xe5\x4b\x0a\xba\xb8\xa4\x7b\x66"
"\xde\xae\xdb\xc2\xc0\x0b\xf7\x2b"
"\xdf\xb8\xea\xd8\xa9\x38\xed",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
"\x30\x8e\xb1\x5e\x92\x58\xe0\xf1"
@@ -22803,18 +20117,18 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
"\x29\x56\x52\x19\x79\xf5\xe9\x37",
.alen = 32,
- .input = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
+ .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
"\x91\x31\x37\xcb\x8d\xb3\x72\x76"
"\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
"\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
- .ilen = 32,
- .result = "\xd3\x68\x14\x70\x3c\x01\x43\x86"
+ .plen = 32,
+ .ctext = "\xd3\x68\x14\x70\x3c\x01\x43\x86"
"\x02\xab\xbe\x75\xaa\xe7\xf5\x53"
"\x5c\x05\xbd\x9b\x19\xbb\x2a\x61"
"\x8f\x69\x05\x75\x8e\xca\x60\x0c"
"\x5b\xa2\x48\x61\x32\x74\x11\x2b"
"\xf6\xcf\x06\x78\x6f\x78\x1a\x4a",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
"\xb1\x68\x63\xef\x8c\x7c\xa3\xf7"
@@ -22831,7 +20145,7 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\xeb\x83\x31\xf1\x54\x54\x89\x0d"
"\x9d",
.alen = 33,
- .input = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
+ .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
"\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
"\x4f\x2e\xe8\x55\x66\x80\x27\x00"
"\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
@@ -22840,8 +20154,8 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
"\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
"\xbd",
- .ilen = 65,
- .result = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2"
+ .plen = 65,
+ .ctext = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2"
"\x15\x3a\x6c\x72\x83\x9b\xb1\x75"
"\xea\xf2\xfc\xff\xc6\xf1\x13\xa4"
"\x1a\x93\x33\x79\x97\x82\x81\xc0"
@@ -22852,7 +20166,7 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\xfb\x18\x73\x31\x4f\x08\x21\x5d"
"\x20\xe9\xc3\x7e\xea\x25\x77\x3a"
"\x65",
- .rlen = 81,
+ .clen = 81,
}, {
.key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
"\x32\x42\x15\x80\x85\xa1\x65\xfe"
@@ -22873,20 +20187,20 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
"\x09\x4f\x77\x62\x88\x2d\xf2\x68"
"\x54",
.alen = 65,
- .input = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
+ .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
"\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
"\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
"\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
"\x2f",
- .ilen = 33,
- .result = "\x33\xc1\xda\xfa\x15\x21\x07\x8e"
+ .plen = 33,
+ .ctext = "\x33\xc1\xda\xfa\x15\x21\x07\x8e"
"\x93\x68\xea\x64\x7b\x3d\x4b\x6b"
"\x71\x5e\x5e\x6b\x92\xaa\x65\xc2"
"\x7a\x2a\xc1\xa9\x0a\xa1\x24\x81"
"\x26\x3a\x5a\x09\xe8\xce\x73\x72"
"\xde\x7b\x58\x9e\x85\xb9\xa4\x28"
"\xda",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
"\xb3\x1c\xc7\x12\x7f\xc5\x28\x04"
@@ -22900,14 +20214,14 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
.assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
"\xf3\x89\x20\x5b\x7c\x57\x89\x07",
.alen = 16,
- .input = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
+ .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
"\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .ilen = 16,
- .result = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02"
+ .plen = 16,
+ .ctext = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02"
"\x0f\xdf\xc9\x6e\x37\x1e\x57\x99"
"\x07\x2a\x1a\xac\xd1\xda\xfd\x3b"
"\xc7\xff\xbd\xbc\x85\x09\x0b",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
"\x34\xf6\x79\xa3\x79\xe9\xeb\x0a"
@@ -22921,14 +20235,14 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
.assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
"\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
.alen = 16,
- .input = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
+ .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
"\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .ilen = 16,
- .result = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e"
+ .plen = 16,
+ .ctext = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e"
"\x6c\xd9\x84\x63\x70\x97\x61\x37"
"\x08\x2f\x16\x90\x9e\x62\x30\x0d"
"\x62\xd5\xc8\xf0\x46\x1a",
- .rlen = 30,
+ .clen = 30,
}, {
.key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
"\xb5\xd1\x2b\x35\x73\x0e\xad\x10"
@@ -22942,546 +20256,13 @@ static const struct aead_testvec aegis256_enc_tv_template[] = {
.assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
"\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
.alen = 16,
- .input = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
+ .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
"\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .ilen = 16,
- .result = "\xce\xf3\x17\x87\x49\xc2\x00\x46"
+ .plen = 16,
+ .ctext = "\xce\xf3\x17\x87\x49\xc2\x00\x46"
"\xc6\x12\x5c\x8f\x81\x38\xaa\x55"
"\xf8\x67\x75\xf1\x75\xe3\x2a\x24",
- .rlen = 24,
- },
-};
-
-static const struct aead_testvec aegis256_dec_tv_template[] = {
- {
- .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
- "\x20\x36\x2c\x24\xfe\xc9\x30\x81"
- "\xca\xb0\x82\x21\x41\xa8\xe0\x06"
- "\x30\x0b\x37\xf6\xb6\x17\xe7\xb5",
- .klen = 32,
- .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d"
- "\x40\x6d\x59\x48\xfc\x92\x61\x03"
- "\x95\x61\x05\x42\x82\x50\xc0\x0c"
- "\x60\x16\x6f\xec\x6d\x2f\xcf\x6b",
- .assoc = "",
- .alen = 0,
- .input = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa"
- "\xfa\x4b\xd8\xa2\x41\x9b\xc1\xb2",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
- "\xa1\x10\xde\xb5\xf8\xed\xf3\x87"
- "\xf4\x72\x8e\xa5\x46\x48\x62\x20"
- "\xf1\x38\x16\xce\x90\x76\x87\x8c",
- .klen = 32,
- .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29"
- "\xc1\x47\x0b\xda\xf6\xb6\x23\x09"
- "\xbf\x23\x11\xc6\x87\xf0\x42\x26"
- "\x22\x44\x4e\xc4\x47\x8e\x6e\x41",
- .assoc = "",
- .alen = 0,
- .input = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16"
- "\x9e\x89\xd9\x06\xa6\xa8\x14\x29"
- "\x8b",
- .ilen = 17,
- .result = "\x79",
- .rlen = 1,
- }, {
- .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
- "\x22\xea\x90\x47\xf2\x11\xb5\x8e"
- "\x1f\x35\x9a\x29\x4b\xe8\xe4\x39"
- "\xb3\x66\xf5\xa6\x6a\xd5\x26\x62",
- .klen = 32,
- .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45"
- "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f"
- "\xe9\xe5\x1d\x4a\x8c\x90\xc4\x40"
- "\xe3\x71\x2d\x9c\x21\xed\x0e\x18",
- .assoc = "",
- .alen = 0,
- .input = "\x09\x94\x1f\xa6\x13\xc3\x74\x75"
- "\x17\xad\x8a\x0e\xd8\x66\x9a\x28"
- "\xd7\x30\x66\x09\x2a\xdc\xfa\x2a"
- "\x9f\x3b\xd7\xdd\x66\xd1\x2b",
- .ilen = 31,
- .result = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
- "\x82\x8e\x16\xb4\xed\x6d\x47",
- .rlen = 15,
- }, {
- .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
- "\xa2\xc5\x42\xd8\xec\x36\x78\x94"
- "\x49\xf7\xa5\xad\x50\x88\x66\x53"
- "\x74\x94\xd4\x7f\x44\x34\xc5\x39",
- .klen = 32,
- .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61"
- "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15"
- "\x14\xa8\x28\xce\x92\x30\x46\x59"
- "\xa4\x9f\x0b\x75\xfb\x4c\xad\xee",
- .assoc = "",
- .alen = 0,
- .input = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f"
- "\x54\x63\x4e\x7f\xc9\x8e\xfa\x70"
- "\x7b\xe5\x8d\x78\xbc\xe9\xb6\xa1"
- "\x29\x17\xc8\x3b\x52\xa4\x98\x72",
- .ilen = 32,
- .result = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
- "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .rlen = 16,
- }, {
- .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
- "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a"
- "\x74\xb9\xb1\x32\x55\x28\xe8\x6d"
- "\x35\xc1\xb3\x57\x1f\x93\x64\x0f",
- .klen = 32,
- .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d"
- "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c"
- "\x3e\x6a\x34\x53\x97\xd0\xc8\x73"
- "\x66\xcd\xea\x4d\xd5\xab\x4c\xc5",
- .assoc = "",
- .alen = 0,
- .input = "\x71\x6b\x37\x0b\x02\x61\x28\x12"
- "\x83\xab\x66\x90\x84\xc7\xd1\xc5"
- "\xb2\x7a\xb4\x7b\xb4\xfe\x02\xb2"
- "\xc0\x00\x39\x13\xb5\x51\x68\x44"
- "\xad",
- .ilen = 33,
- .result = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
- "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
- "\xd3",
- .rlen = 17,
- }, {
- .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
- "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0"
- "\x9e\x7c\xbc\xb6\x5b\xc8\x6a\x86"
- "\xf7\xef\x91\x30\xf9\xf2\x04\xe6",
- .klen = 32,
- .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98"
- "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22"
- "\x69\x2c\x3f\xd7\x9c\x70\x4a\x8d"
- "\x27\xfa\xc9\x26\xaf\x0a\xeb\x9c",
- .assoc = "",
- .alen = 0,
- .input = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f"
- "\x06\x3b\x52\x18\x49\x75\x1b\xf0"
- "\x53\x09\x72\x7b\x45\x79\xe0\xbe"
- "\x89\x85\x23\x15\xb8\x79\x07\x4c"
- "\x53\x7a\x15\x37\x0a\xee\xb7\xfb"
- "\xc4\x1f\x12\x27\xcf\x77\x90",
- .ilen = 47,
- .result = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
- "\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
- "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
- "\x88\x11\x39\x12\x1c\x3a\xbb",
- .rlen = 31,
- }, {
- .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
- "\x25\x53\x58\x8c\xda\xa3\xc0\xa6"
- "\xc8\x3e\xc8\x3a\x60\x68\xec\xa0"
- "\xb8\x1c\x70\x08\xd3\x51\xa3\xbd",
- .klen = 32,
- .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4"
- "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28"
- "\x93\xef\x4b\x5b\xa1\x10\xcc\xa6"
- "\xe8\x28\xa8\xfe\x89\x69\x8b\x72",
- .assoc = "",
- .alen = 0,
- .input = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4"
- "\x33\xfe\xeb\xa8\xb7\x9b\xb2\xd7"
- "\xeb\x0f\x05\x2b\xba\xb3\xca\xef"
- "\xf6\xd1\xb6\xc0\xb9\x9b\x85\xc5"
- "\xbf\x7a\x3e\xcc\x31\x76\x09\x80"
- "\x32\x5d\xbb\xe8\x38\x0e\x77\xd3",
- .ilen = 48,
- .result = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
- "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
- "\x28\x50\x51\x9d\x24\x60\x8d\xb3"
- "\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
- .rlen = 32,
- }, {
- .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
- "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad"
- "\xf3\x00\xd4\xbf\x65\x08\x6e\xb9"
- "\x7a\x4a\x4f\xe0\xad\xb0\x42\x93",
- .klen = 32,
- .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0"
- "\xc6\x64\x37\x42\xd2\x90\xb3\x2e"
- "\xbd\xb1\x57\xe0\xa6\xb0\x4e\xc0"
- "\xaa\x55\x87\xd6\x63\xc8\x2a\x49",
- .assoc = "\xd5",
- .alen = 1,
- .input = "\x96\x43\x30\xca\x6c\x4f\xd7\x12"
- "\xba\xd9\xb3\x18\x86\xdf\xc3\x52",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
- "\x27\x08\xbd\xaf\xce\xec\x45\xb3"
- "\x1d\xc3\xdf\x43\x6a\xa8\xf0\xd3"
- "\x3b\x77\x2e\xb9\x87\x0f\xe1\x6a",
- .klen = 32,
- .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
- "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34"
- "\xe8\x73\x62\x64\xab\x50\xd0\xda"
- "\x6b\x83\x66\xaf\x3e\x27\xc9\x1f",
- .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
- "\x68\x75\x16\xf8\xcb\x7e\xa7",
- .alen = 15,
- .input = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83"
- "\x11\x9f\xb0\x74\xee\xc7\x03\xdd",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
- "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9"
- "\x47\x85\xeb\xc7\x6f\x48\x72\xed"
- "\xfc\xa5\x0d\x91\x61\x6e\x81\x40",
- .klen = 32,
- .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
- "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b"
- "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3"
- "\x2d\xb0\x45\x87\x18\x86\x68\xf6",
- .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
- "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
- .alen = 16,
- .input = "\x16\x44\x73\x33\x5d\xf2\xb9\x04"
- "\x6b\x79\x98\xef\xdb\xd5\xc5\xf1",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
- "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf"
- "\x72\x47\xf6\x4b\x74\xe8\xf4\x06"
- "\xbe\xd3\xec\x6a\x3b\xcd\x20\x17",
- .klen = 32,
- .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
- "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41"
- "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d"
- "\xee\xde\x23\x60\xf2\xe5\x08\xcc",
- .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab"
- "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
- "\x07",
- .alen = 17,
- .input = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45"
- "\x98\x54\x8c\xed\x3d\x17\xf0\xdd",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
- "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6"
- "\x9c\x0a\x02\xd0\x79\x88\x76\x20"
- "\x7f\x00\xca\x42\x15\x2c\xbf\xed",
- .klen = 32,
- .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
- "\xca\xcd\xff\x88\xba\x22\xbe\x47"
- "\x67\xba\x85\xf1\xbb\x30\x56\x26"
- "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3",
- .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6"
- "\xea\x03\x2c\xac\xb9\xeb\xef\xc9"
- "\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
- "\xe0\x17\x3a\x2e\x83\x5c\x8f",
- .alen = 31,
- .input = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0"
- "\xa4\x96\x2f\x0d\x53\xc2\xf8\xfc",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
- "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc"
- "\xc6\xcc\x0e\x54\x7f\x28\xf8\x3a"
- "\x40\x2e\xa9\x1a\xf0\x8b\x5e\xc4",
- .klen = 32,
- .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
- "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d"
- "\x91\x7c\x91\x75\xc0\xd0\xd8\x40"
- "\x71\x39\xe1\x10\xa6\xa3\x46\x7a",
- .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2"
- "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf"
- "\x5c\x2d\x14\x96\x01\x78\xb9\x47"
- "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
- .alen = 32,
- .input = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1"
- "\xd2\x64\x3e\x66\x6a\xb2\x03\xc0",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
- "\xac\x4b\x37\x86\xb0\xa2\x13\xd2"
- "\xf1\x8e\x19\xd8\x84\xc8\x7a\x53"
- "\x02\x5b\x88\xf3\xca\xea\xfe\x9b",
- .klen = 32,
- .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77"
- "\xcc\x81\x63\xab\xae\x6b\x43\x54"
- "\xbb\x3f\x9c\xf9\xc5\x70\x5a\x5a"
- "\x32\x67\xc0\xe9\x80\x02\xe5\x50",
- .assoc = "\x40",
- .alen = 1,
- .input = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b"
- "\x7a\x3f\x81\xf7\xfc\x1b\x79\x83"
- "\xc7",
- .ilen = 17,
- .result = "\x4f",
- .rlen = 1,
- }, {
- .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
- "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8"
- "\x1b\x50\x25\x5d\x89\x68\xfc\x6d"
- "\xc3\x89\x67\xcb\xa4\x49\x9d\x71",
- .klen = 32,
- .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
- "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a"
- "\xe6\x01\xa8\x7e\xca\x10\xdc\x73"
- "\xf4\x94\x9f\xc1\x5a\x61\x85\x27",
- .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
- "\x6d\x92\x42\x61\xa7\x58\x37",
- .alen = 15,
- .input = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba"
- "\x7e\x98\x83\x02\x34\x23\xf7\x94"
- "\xde\x35\xe6\x1d\x14\x18\xe5\x38"
- "\x14\x80\x6a\xa7\x1b\xae\x1d",
- .ilen = 31,
- .result = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
- "\x8d\xc8\x6e\x85\xa5\x21\x67",
- .rlen = 15,
- }, {
- .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
- "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf"
- "\x46\x13\x31\xe1\x8e\x08\x7e\x87"
- "\x85\xb6\x46\xa3\x7e\xa8\x3c\x48",
- .klen = 32,
- .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
- "\xce\x36\xc7\xce\xa2\xb4\xc9\x60"
- "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d"
- "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd",
- .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
- "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
- .alen = 16,
- .input = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01"
- "\xbe\x28\x98\x10\x6f\xe9\x61\x32"
- "\x96\xbb\xb1\x2e\x8f\x0c\x44\xb9"
- "\x46\x2d\x55\xe3\x42\x67\xf2\xaf",
- .ilen = 32,
- .result = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
- "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .rlen = 16,
- }, {
- .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
- "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5"
- "\x70\xd5\x3c\x65\x93\xa8\x00\xa0"
- "\x46\xe4\x25\x7c\x58\x08\xdb\x1e",
- .klen = 32,
- .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
- "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66"
- "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7"
- "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4",
- .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
- "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
- "\x05",
- .alen = 17,
- .input = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8"
- "\x45\x65\x1b\x72\x9b\xaa\xa3\x1e"
- "\x87\x9d\x26\xdf\xff\x81\x11\xd2"
- "\x47\x41\xb9\x24\xc1\x8a\xa3\x8b"
- "\x55",
- .ilen = 33,
- .result = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
- "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
- "\xd0",
- .rlen = 17,
- }, {
- .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
- "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb"
- "\x9a\x97\x48\xe9\x98\x48\x82\xba"
- "\x07\x11\x04\x54\x32\x67\x7b\xf5",
- .klen = 32,
- .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
- "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d"
- "\x65\x48\xcb\x0a\xda\xf0\x62\xc0"
- "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa",
- .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
- "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
- "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
- "\x68\x28\x73\x40\x9f\x96\x4a",
- .alen = 31,
- .input = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5"
- "\x69\x7f\x7a\xdd\x8c\x46\xda\xba"
- "\x0a\x5c\x0e\x7f\xac\xee\x02\xd2"
- "\xe5\x4b\x0a\xba\xb8\xa4\x7b\x66"
- "\xde\xae\xdb\xc2\xc0\x0b\xf7\x2b"
- "\xdf\xb8\xea\xd8\xa9\x38\xed",
- .ilen = 47,
- .result = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
- "\x10\x57\x85\x39\x93\x8f\xaf\x70"
- "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
- "\x98\x34\xab\x37\x56\xae\x32",
- .rlen = 31,
- }, {
- .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
- "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1"
- "\xc5\x5a\x53\x6e\x9d\xe8\x04\xd4"
- "\xc9\x3f\xe2\x2d\x0c\xc6\x1a\xcb",
- .klen = 32,
- .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
- "\x50\xc4\xde\x82\x90\x21\x11\x73"
- "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda"
- "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81",
- .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
- "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
- "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
- "\x29\x56\x52\x19\x79\xf5\xe9\x37",
- .alen = 32,
- .input = "\xd3\x68\x14\x70\x3c\x01\x43\x86"
- "\x02\xab\xbe\x75\xaa\xe7\xf5\x53"
- "\x5c\x05\xbd\x9b\x19\xbb\x2a\x61"
- "\x8f\x69\x05\x75\x8e\xca\x60\x0c"
- "\x5b\xa2\x48\x61\x32\x74\x11\x2b"
- "\xf6\xcf\x06\x78\x6f\x78\x1a\x4a",
- .ilen = 48,
- .result = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
- "\x91\x31\x37\xcb\x8d\xb3\x72\x76"
- "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
- "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
- .rlen = 32,
- }, {
- .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
- "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7"
- "\xef\x1c\x5f\xf2\xa3\x88\x86\xed"
- "\x8a\x6d\xc1\x05\xe7\x25\xb9\xa2",
- .klen = 32,
- .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
- "\xd1\x9e\x90\x13\x8a\x45\xd3\x79"
- "\xba\xcd\xe2\x13\xe4\x30\x66\xf4"
- "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58",
- .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
- "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
- "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
- "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
- "\x9d",
- .alen = 33,
- .input = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2"
- "\x15\x3a\x6c\x72\x83\x9b\xb1\x75"
- "\xea\xf2\xfc\xff\xc6\xf1\x13\xa4"
- "\x1a\x93\x33\x79\x97\x82\x81\xc0"
- "\x96\xc2\x00\xab\x39\xae\xa1\x62"
- "\x53\xa3\x86\xc9\x07\x8c\xaf\x22"
- "\x47\x31\x29\xca\x4a\x95\xf5\xd5"
- "\x20\x63\x5a\x54\x80\x2c\x4a\x63"
- "\xfb\x18\x73\x31\x4f\x08\x21\x5d"
- "\x20\xe9\xc3\x7e\xea\x25\x77\x3a"
- "\x65",
- .ilen = 81,
- .result = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
- "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
- "\x4f\x2e\xe8\x55\x66\x80\x27\x00"
- "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
- "\x21\x78\x55\x9d\x9c\x65\x7b\xcd"
- "\x0a\x34\x97\xff\x47\x37\xb0\x2a"
- "\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
- "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
- "\xbd",
- .rlen = 65,
- }, {
- .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
- "\x32\x42\x15\x80\x85\xa1\x65\xfe"
- "\x19\xde\x6b\x76\xa8\x28\x08\x07"
- "\x4b\x9a\xa0\xdd\xc1\x84\x58\x79",
- .klen = 32,
- .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
- "\x52\x79\x42\xa5\x84\x6a\x96\x7f"
- "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d"
- "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e",
- .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
- "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
- "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
- "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
- "\xac\xfa\x58\x7f\xe5\x73\x09\x8c"
- "\x1d\x40\x87\x8c\xd9\x75\xc0\x55"
- "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
- "\x09\x4f\x77\x62\x88\x2d\xf2\x68"
- "\x54",
- .alen = 65,
- .input = "\x33\xc1\xda\xfa\x15\x21\x07\x8e"
- "\x93\x68\xea\x64\x7b\x3d\x4b\x6b"
- "\x71\x5e\x5e\x6b\x92\xaa\x65\xc2"
- "\x7a\x2a\xc1\xa9\x0a\xa1\x24\x81"
- "\x26\x3a\x5a\x09\xe8\xce\x73\x72"
- "\xde\x7b\x58\x9e\x85\xb9\xa4\x28"
- "\xda",
- .ilen = 49,
- .result = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
- "\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
- "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
- "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
- "\x2f",
- .rlen = 33,
- }, {
- .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
- "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04"
- "\x44\xa1\x76\xfb\xad\xc8\x8a\x21"
- "\x0d\xc8\x7f\xb6\x9b\xe3\xf8\x4f",
- .klen = 32,
- .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
- "\xd3\x53\xf4\x36\x7e\x8e\x59\x85"
- "\x0e\x51\xf9\x1c\xee\x70\x6a\x27"
- "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05",
- .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
- "\xf3\x89\x20\x5b\x7c\x57\x89\x07",
- .alen = 16,
- .input = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02"
- "\x0f\xdf\xc9\x6e\x37\x1e\x57\x99"
- "\x07\x2a\x1a\xac\xd1\xda\xfd\x3b"
- "\xc7\xff\xbd\xbc\x85\x09\x0b",
- .ilen = 31,
- .result = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
- "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .rlen = 16,
- }, {
- .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
- "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a"
- "\x6e\x63\x82\x7f\xb2\x68\x0c\x3a"
- "\xce\xf5\x5e\x8e\x75\x42\x97\x26",
- .klen = 32,
- .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
- "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c"
- "\x39\x14\x05\xa0\xf3\x10\xec\x41"
- "\xff\x01\x95\x84\x2b\x59\x7f\xdb",
- .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
- "\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
- .alen = 16,
- .input = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e"
- "\x6c\xd9\x84\x63\x70\x97\x61\x37"
- "\x08\x2f\x16\x90\x9e\x62\x30\x0d"
- "\x62\xd5\xc8\xf0\x46\x1a",
- .ilen = 30,
- .result = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
- "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .rlen = 16,
- }, {
- .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
- "\xb5\xd1\x2b\x35\x73\x0e\xad\x10"
- "\x98\x25\x8d\x03\xb7\x08\x8e\x54"
- "\x90\x23\x3d\x67\x4f\xa1\x36\xfc",
- .klen = 32,
- .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
- "\xd5\x07\x58\x59\x72\xd7\xde\x92"
- "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a"
- "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2",
- .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
- "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
- .alen = 16,
- .input = "\xce\xf3\x17\x87\x49\xc2\x00\x46"
- "\xc6\x12\x5c\x8f\x81\x38\xaa\x55"
- "\xf8\x67\x75\xf1\x75\xe3\x2a\x24",
- .ilen = 24,
- .result = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
- "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .rlen = 16,
+ .clen = 24,
},
};
@@ -23492,7 +20273,7 @@ static const struct aead_testvec aegis256_dec_tv_template[] = {
* https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
* (see crypto_aead/morus640128v2/)
*/
-static const struct aead_testvec morus640_enc_tv_template[] = {
+static const struct aead_testvec morus640_tv_template[] = {
{
.key = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
@@ -23501,11 +20282,11 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x20\x36\x2c\x24\xfe\xc9\x30\x81",
.assoc = "",
.alen = 0,
- .input = "",
- .ilen = 0,
- .result = "\x89\x62\x7d\xf3\x07\x9d\x52\x05"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x89\x62\x7d\xf3\x07\x9d\x52\x05"
"\x53\xc3\x04\x60\x93\xb4\x37\x9a",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b"
"\x80\xda\xb2\x91\xf9\x24\xc2\x06",
@@ -23514,12 +20295,12 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
.assoc = "",
.alen = 0,
- .input = "\x69",
- .ilen = 1,
- .result = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78"
+ .ptext = "\x69",
+ .plen = 1,
+ .ctext = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78"
"\xb6\x10\x9a\x59\x5f\x61\x37\x70"
"\x09",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x79\x49\x73\x3e\x20\xf7\x51\x37"
"\x01\xb4\x64\x22\xf3\x48\x85\x0c",
@@ -23528,14 +20309,14 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x22\xea\x90\x47\xf2\x11\xb5\x8e",
.assoc = "",
.alen = 0,
- .input = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
+ .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
"\x62\x58\xe9\x8f\xef\xa4\x17",
- .ilen = 15,
- .result = "\x76\xdd\xb9\x05\x3d\xce\x61\x38"
+ .plen = 15,
+ .ctext = "\x76\xdd\xb9\x05\x3d\xce\x61\x38"
"\xf3\xef\xf7\xe5\xd7\xfd\x70\xa5"
"\xcf\x9d\x64\xb8\x0a\x9f\xfd\x8b"
"\xd4\x6e\xfe\xd9\xc8\x63\x4b",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
"\x82\x8e\x16\xb4\xed\x6d\x47\x12",
@@ -23544,14 +20325,14 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\xa2\xc5\x42\xd8\xec\x36\x78\x94",
.assoc = "",
.alen = 0,
- .input = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
+ .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
"\xe3\x32\x9b\x21\xe9\xc8\xd9\x97",
- .ilen = 16,
- .result = "\xdc\x72\xe8\x14\xfb\x63\xad\x72"
+ .plen = 16,
+ .ctext = "\xdc\x72\xe8\x14\xfb\x63\xad\x72"
"\x1f\x57\x9a\x1f\x88\x81\xdb\xd6"
"\xc1\x91\x9d\xb9\x25\xc4\x99\x4c"
"\x97\xcd\x8a\x0c\x9d\x68\x00\x1c",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
"\x03\x68\xc8\x45\xe7\x91\x0a\x18",
@@ -23560,16 +20341,16 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
.assoc = "",
.alen = 0,
- .input = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
+ .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
"\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d"
"\x09",
- .ilen = 17,
- .result = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82"
+ .plen = 17,
+ .ctext = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82"
"\x0a\xb8\x55\xee\xeb\x73\x4d\x7f"
"\x54\x11\x3a\x8a\x31\xa3\xb5\xf2"
"\xcd\x49\xdb\xf3\xee\x26\xbd\xa2"
"\x0d",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
"\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f",
@@ -23578,18 +20359,18 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
.assoc = "",
.alen = 0,
- .input = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
+ .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
"\xe5\xe6\xff\x44\xdd\x11\x5f\xa3"
"\x33\xdd\xc2\xf8\xdd\x18\x2b\x93"
"\x57\x05\x01\x1c\x66\x22\xd3",
- .ilen = 31,
- .result = "\x59\xd1\x0f\x6b\xee\x27\x84\x92"
+ .plen = 31,
+ .ctext = "\x59\xd1\x0f\x6b\xee\x27\x84\x92"
"\xb7\xa9\xb5\xdd\x02\xa4\x12\xa5"
"\x50\x32\xb4\x9a\x2e\x35\x83\x55"
"\x36\x12\x12\xed\xa3\x31\xc5\x30"
"\xa7\xe2\x4a\x6d\x05\x59\x43\x91"
"\x75\xfa\x6c\x17\xc6\x73\xca",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
"\x05\x1d\x2c\x68\xdb\xda\x8f\x25",
@@ -23598,18 +20379,18 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
.assoc = "",
.alen = 0,
- .input = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
+ .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
"\x66\xc0\xb1\xd5\xd7\x35\x21\xaa"
"\x5d\x9f\xce\x7c\xe2\xb8\xad\xad"
"\x19\x33\xe0\xf4\x40\x81\x72\x28",
- .ilen = 32,
- .result = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1"
+ .plen = 32,
+ .ctext = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1"
"\xcf\x50\xb2\x4c\x32\xe1\xa6\x69"
"\xc0\xfb\x44\x1f\xa0\x9a\xeb\x39"
"\x1b\xde\x68\x38\xcc\x27\x52\xc5"
"\xf6\x3e\x74\xea\x66\x5b\x5f\x0c"
"\x65\x9e\x58\xe6\x52\xa2\xfe\x59",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
"\x86\xf7\xde\xfa\xd5\xfe\x52\x2b",
@@ -23618,11 +20399,11 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
.assoc = "\xc5",
.alen = 1,
- .input = "",
- .ilen = 0,
- .result = "\x56\xe7\x24\x52\xdd\x95\x60\x5b"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x56\xe7\x24\x52\xdd\x95\x60\x5b"
"\x09\x48\x39\x69\x9c\xb3\x62\x46",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde"
"\x07\xd1\x90\x8b\xcf\x23\x15\x31",
@@ -23632,11 +20413,11 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
"\x47\x3e\xe9\xd4\xcc\xb5\x76",
.alen = 15,
- .input = "",
- .ilen = 0,
- .result = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01"
"\x13\xe5\x73\x46\x46\xf2\x5c\xe1",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x20\x4a\x07\x99\x91\x58\xee\xfa"
"\x88\xab\x42\x1c\xc9\x47\xd7\x38",
@@ -23646,11 +20427,11 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
"\xc8\x18\x9b\x65\xc6\xd9\x39\x3b",
.alen = 16,
- .input = "",
- .ilen = 0,
- .result = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac"
"\xa9\x21\x45\x0b\x16\x52\xf7\xe1",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16"
"\x09\x85\xf4\xae\xc3\x6b\x9a\x3e",
@@ -23661,11 +20442,11 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41"
"\x3c",
.alen = 17,
- .input = "",
- .ilen = 0,
- .result = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9"
"\xbb\xa8\x62\xad\x0a\xf5\x48\x60",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31"
"\x8a\x60\xa6\x3f\xbd\x90\x5d\x44",
@@ -23677,11 +20458,11 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x67\xba\x85\xf1\xbb\x30\x56\x26"
"\xaf\x0b\x02\x38\xcc\x44\xa7",
.alen = 31,
- .input = "",
- .ilen = 0,
- .result = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d"
"\x4b\xef\x75\x16\x0a\x99\xae\x6b",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d"
"\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a",
@@ -23693,11 +20474,11 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x91\x7c\x91\x75\xc0\xd0\xd8\x40"
"\x71\x39\xe1\x10\xa6\xa3\x46\x7a",
.alen = 32,
- .input = "",
- .ilen = 0,
- .result = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f"
"\xce\xfb\xaf\xd6\xc2\xe6\xee\xc0",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x12\xdd\xee\x17\xd1\x47\x92\x69"
"\x8b\x14\x0a\x62\xb1\xd9\xe2\x50",
@@ -23706,12 +20487,12 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
.assoc = "\x31",
.alen = 1,
- .input = "\x40",
- .ilen = 1,
- .result = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38"
+ .ptext = "\x40",
+ .plen = 1,
+ .ctext = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38"
"\x01\xfe\x84\x14\x85\xf8\xd1\xe3"
"\x22",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85"
"\x0c\xee\xbc\xf4\xab\xfd\xa5\x57",
@@ -23721,14 +20502,14 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
"\x4d\x5b\x15\x3c\xa8\x8f\x06",
.alen = 15,
- .input = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
+ .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
"\x6d\x92\x42\x61\xa7\x58\x37",
- .ilen = 15,
- .result = "\x77\x32\x61\xeb\xb4\x33\x29\x92"
+ .plen = 15,
+ .ctext = "\x77\x32\x61\xeb\xb4\x33\x29\x92"
"\x29\x95\xc5\x8e\x85\x76\xab\xfc"
"\x07\x95\xa7\x44\x74\xf7\x22\xff"
"\xd8\xd8\x36\x3d\x8a\x7f\x9e",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
"\x8d\xc8\x6e\x85\xa5\x21\x67\x5d",
@@ -23738,14 +20519,14 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
"\xce\x36\xc7\xce\xa2\xb4\xc9\x60",
.alen = 16,
- .input = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
+ .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
"\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
- .ilen = 16,
- .result = "\xd8\xfd\x44\x45\xf6\x42\x12\x38"
+ .plen = 16,
+ .ctext = "\xd8\xfd\x44\x45\xf6\x42\x12\x38"
"\xf2\x0b\xea\x4f\x9e\x11\x61\x07"
"\x48\x67\x98\x18\x9b\xd0\x0c\x59"
"\x67\xa4\x11\xb3\x2b\xd6\xc1\x70",
- .rlen = 32,
+ .clen = 32,
}, {
.key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
"\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
@@ -23756,16 +20537,16 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66"
"\x3b",
.alen = 17,
- .input = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
+ .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
"\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
"\x05",
- .ilen = 17,
- .result = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6"
+ .plen = 17,
+ .ctext = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6"
"\x71\x3a\x00\x9f\x41\x88\xb0\xb2"
"\x71\x83\x85\x5f\xc8\x79\x0a\x99"
"\x99\xdc\x89\x1c\x88\xd2\x3e\xf9"
"\x83",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
"\x8f\x7d\xd3\xa8\x99\x6a\xed\x69",
@@ -23777,18 +20558,18 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x65\x48\xcb\x0a\xda\xf0\x62\xc0"
"\x38\x1d\x3b\x4a\xe9\x7e\x62",
.alen = 31,
- .input = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
+ .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
"\xf0\x20\x58\x15\x95\xc6\x7f\xee"
"\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
"\x68\x28\x73\x40\x9f\x96\x4a",
- .ilen = 31,
- .result = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06"
+ .plen = 31,
+ .ctext = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06"
"\x5c\x7b\xef\x64\x87\x00\xd1\x37"
"\xa7\x08\xbc\x7f\x8f\x41\x54\xd0"
"\x3e\xf1\xc3\xa2\x96\x84\xdd\x2a"
"\x2d\x21\x30\xf9\x02\xdb\x06\x0c"
"\xf1\x5a\x66\x69\xe0\xca\x83",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
"\x10\x57\x85\x39\x93\x8f\xaf\x70",
@@ -23800,18 +20581,18 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda"
"\xf9\x4a\x1a\x23\xc3\xdd\x02\x81",
.alen = 32,
- .input = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
+ .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
"\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
"\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
"\x29\x56\x52\x19\x79\xf5\xe9\x37",
- .ilen = 32,
- .result = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2"
+ .plen = 32,
+ .ctext = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2"
"\x70\x57\x37\xc5\xc2\x4f\x8d\x14"
"\xc6\xbf\x8b\xec\xf5\x62\x67\xf2"
"\x2f\xa1\xe6\xd6\xa7\xb1\x8c\x54"
"\xe5\x6b\x49\xf9\x6e\x90\xc3\xaa"
"\x7a\x00\x2e\x4d\x7f\x31\x2e\x81",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
"\x91\x31\x37\xcb\x8d\xb3\x72\x76",
@@ -23824,7 +20605,7 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\xba\x78\xf9\xfb\x9d\x3c\xa1\x58"
"\x1a",
.alen = 33,
- .input = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
+ .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
"\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
"\x84\x7d\x65\x34\x25\xd8\x47\xfa"
"\xeb\x83\x31\xf1\x54\x54\x89\x0d"
@@ -23833,8 +20614,8 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x75\x73\x20\x30\x59\x54\xb2\xf0"
"\x3a\x4b\xe0\x23\x8e\xa6\x08\x35"
"\x8a",
- .ilen = 65,
- .result = "\xc7\xca\x26\x61\x57\xee\xa2\xb9"
+ .plen = 65,
+ .ctext = "\xc7\xca\x26\x61\x57\xee\xa2\xb9"
"\xb1\x37\xde\x95\x06\x90\x11\x08"
"\x4d\x30\x9f\x24\xc0\x56\xb7\xe1"
"\x0b\x9f\xd2\x57\xe9\xd2\xb1\x76"
@@ -23845,7 +20626,7 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\xac\xc3\xb9\xcb\x61\x8f\x73\x92"
"\x2c\x7a\x6f\xda\xf9\x09\x6f\xe1"
"\xc4",
- .rlen = 81,
+ .clen = 81,
}, {
.key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
"\x12\x0b\xe9\x5c\x87\xd7\x35\x7c",
@@ -23862,20 +20643,20 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
"\x15\x4e\x91\x92\x89\x4b\xb7\x9b"
"\x21",
.alen = 65,
- .input = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
+ .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
"\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
"\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
"\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
"\xac",
- .ilen = 33,
- .result = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b"
+ .plen = 33,
+ .ctext = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b"
"\x2c\x0f\xb4\x7e\x7b\x64\x3e\x40"
"\xf3\x78\x63\x34\x89\x79\x39\x6b"
"\x61\x64\x4a\x9a\xfa\x70\xa4\xd3"
"\x54\x0b\xea\x05\xa6\x95\x64\xed"
"\x3d\x69\xa2\x0c\x27\x56\x2f\x34"
"\x66",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
"\x93\xe6\x9b\xee\x81\xfc\xf7\x82",
@@ -23885,14 +20666,14 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
"\xd3\x53\xf4\x36\x7e\x8e\x59\x85",
.alen = 16,
- .input = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
+ .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
"\xf3\x89\x20\x5b\x7c\x57\x89\x07",
- .ilen = 16,
- .result = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f"
+ .plen = 16,
+ .ctext = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f"
"\x33\x98\x87\xde\x08\xb6\xb6\xae"
"\x3e\xa4\xf8\x19\xf1\x92\x60\x39"
"\xb9\x6b\x3f\xdf\xc8\xcb\x30",
- .rlen = 31,
+ .clen = 31,
}, {
.key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
"\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
@@ -23902,14 +20683,14 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
"\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c",
.alen = 16,
- .input = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
+ .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
"\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
- .ilen = 16,
- .result = "\x74\x7d\x70\x07\xe9\xba\x01\xee"
+ .plen = 16,
+ .ctext = "\x74\x7d\x70\x07\xe9\xba\x01\xee"
"\x6c\xc6\x6f\x50\x25\x33\xbe\x50"
"\x17\xb8\x17\x62\xed\x80\xa2\xf5"
"\x03\xde\x85\x71\x5d\x34",
- .rlen = 30,
+ .clen = 30,
}, {
.key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
"\x95\x9a\xff\x10\x75\x45\x7d\x8f",
@@ -23919,13 +20700,13 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
"\xd5\x07\x58\x59\x72\xd7\xde\x92",
.alen = 16,
- .input = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
+ .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
"\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
- .ilen = 16,
- .result = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38"
+ .plen = 16,
+ .ctext = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38"
"\x29\xfd\x6c\x7c\x49\xe5\x1d\xaf"
"\xba\xea\xd4\xfa\x3f\x11\x33\x98",
- .rlen = 24,
+ .clen = 24,
}, {
.key = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
"\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
@@ -23935,466 +20716,13 @@ static const struct aead_testvec morus640_enc_tv_template[] = {
.assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9"
"\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98",
.alen = 16,
- .input = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
+ .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
"\x76\x18\x37\x0f\x6a\xc4\xd1\x1a",
- .ilen = 16,
- .result = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86"
+ .plen = 16,
+ .ctext = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86"
"\xe1\xb7\xa5\xc3\x32\x88\x3c\x8c"
"\x6e",
- .rlen = 17,
- },
-};
-
-static const struct aead_testvec morus640_dec_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 16,
- .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
- "\x20\x36\x2c\x24\xfe\xc9\x30\x81",
- .assoc = "",
- .alen = 0,
- .input = "\x89\x62\x7d\xf3\x07\x9d\x52\x05"
- "\x53\xc3\x04\x60\x93\xb4\x37\x9a",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b"
- "\x80\xda\xb2\x91\xf9\x24\xc2\x06",
- .klen = 16,
- .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
- "\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
- .assoc = "",
- .alen = 0,
- .input = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78"
- "\xb6\x10\x9a\x59\x5f\x61\x37\x70"
- "\x09",
- .ilen = 17,
- .result = "\x69",
- .rlen = 1,
- }, {
- .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37"
- "\x01\xb4\x64\x22\xf3\x48\x85\x0c",
- .klen = 16,
- .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
- "\x22\xea\x90\x47\xf2\x11\xb5\x8e",
- .assoc = "",
- .alen = 0,
- .input = "\x76\xdd\xb9\x05\x3d\xce\x61\x38"
- "\xf3\xef\xf7\xe5\xd7\xfd\x70\xa5"
- "\xcf\x9d\x64\xb8\x0a\x9f\xfd\x8b"
- "\xd4\x6e\xfe\xd9\xc8\x63\x4b",
- .ilen = 31,
- .result = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
- "\x62\x58\xe9\x8f\xef\xa4\x17",
- .rlen = 15,
- }, {
- .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
- "\x82\x8e\x16\xb4\xed\x6d\x47\x12",
- .klen = 16,
- .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
- "\xa2\xc5\x42\xd8\xec\x36\x78\x94",
- .assoc = "",
- .alen = 0,
- .input = "\xdc\x72\xe8\x14\xfb\x63\xad\x72"
- "\x1f\x57\x9a\x1f\x88\x81\xdb\xd6"
- "\xc1\x91\x9d\xb9\x25\xc4\x99\x4c"
- "\x97\xcd\x8a\x0c\x9d\x68\x00\x1c",
- .ilen = 32,
- .result = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
- "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97",
- .rlen = 16,
- }, {
- .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
- "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .klen = 16,
- .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
- "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
- .assoc = "",
- .alen = 0,
- .input = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82"
- "\x0a\xb8\x55\xee\xeb\x73\x4d\x7f"
- "\x54\x11\x3a\x8a\x31\xa3\xb5\xf2"
- "\xcd\x49\xdb\xf3\xee\x26\xbd\xa2"
- "\x0d",
- .ilen = 33,
- .result = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
- "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d"
- "\x09",
- .rlen = 17,
- }, {
- .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
- "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f",
- .klen = 16,
- .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
- "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
- .assoc = "",
- .alen = 0,
- .input = "\x59\xd1\x0f\x6b\xee\x27\x84\x92"
- "\xb7\xa9\xb5\xdd\x02\xa4\x12\xa5"
- "\x50\x32\xb4\x9a\x2e\x35\x83\x55"
- "\x36\x12\x12\xed\xa3\x31\xc5\x30"
- "\xa7\xe2\x4a\x6d\x05\x59\x43\x91"
- "\x75\xfa\x6c\x17\xc6\x73\xca",
- .ilen = 47,
- .result = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
- "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3"
- "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93"
- "\x57\x05\x01\x1c\x66\x22\xd3",
- .rlen = 31,
- }, {
- .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
- "\x05\x1d\x2c\x68\xdb\xda\x8f\x25",
- .klen = 16,
- .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
- "\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
- .assoc = "",
- .alen = 0,
- .input = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1"
- "\xcf\x50\xb2\x4c\x32\xe1\xa6\x69"
- "\xc0\xfb\x44\x1f\xa0\x9a\xeb\x39"
- "\x1b\xde\x68\x38\xcc\x27\x52\xc5"
- "\xf6\x3e\x74\xea\x66\x5b\x5f\x0c"
- "\x65\x9e\x58\xe6\x52\xa2\xfe\x59",
- .ilen = 48,
- .result = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
- "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa"
- "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad"
- "\x19\x33\xe0\xf4\x40\x81\x72\x28",
- .rlen = 32,
- }, {
- .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
- "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b",
- .klen = 16,
- .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
- "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
- .assoc = "\xc5",
- .alen = 1,
- .input = "\x56\xe7\x24\x52\xdd\x95\x60\x5b"
- "\x09\x48\x39\x69\x9c\xb3\x62\x46",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde"
- "\x07\xd1\x90\x8b\xcf\x23\x15\x31",
- .klen = 16,
- .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
- "\x27\x08\xbd\xaf\xce\xec\x45\xb3",
- .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
- "\x47\x3e\xe9\xd4\xcc\xb5\x76",
- .alen = 15,
- .input = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01"
- "\x13\xe5\x73\x46\x46\xf2\x5c\xe1",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa"
- "\x88\xab\x42\x1c\xc9\x47\xd7\x38",
- .klen = 16,
- .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
- "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
- .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
- "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b",
- .alen = 16,
- .input = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac"
- "\xa9\x21\x45\x0b\x16\x52\xf7\xe1",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16"
- "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e",
- .klen = 16,
- .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
- "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
- .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
- "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41"
- "\x3c",
- .alen = 17,
- .input = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9"
- "\xbb\xa8\x62\xad\x0a\xf5\x48\x60",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31"
- "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44",
- .klen = 16,
- .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
- "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
- .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
- "\xca\xcd\xff\x88\xba\x22\xbe\x47"
- "\x67\xba\x85\xf1\xbb\x30\x56\x26"
- "\xaf\x0b\x02\x38\xcc\x44\xa7",
- .alen = 31,
- .input = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d"
- "\x4b\xef\x75\x16\x0a\x99\xae\x6b",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d"
- "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a",
- .klen = 16,
- .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
- "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
- .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
- "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d"
- "\x91\x7c\x91\x75\xc0\xd0\xd8\x40"
- "\x71\x39\xe1\x10\xa6\xa3\x46\x7a",
- .alen = 32,
- .input = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f"
- "\xce\xfb\xaf\xd6\xc2\xe6\xee\xc0",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69"
- "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50",
- .klen = 16,
- .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
- "\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
- .assoc = "\x31",
- .alen = 1,
- .input = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38"
- "\x01\xfe\x84\x14\x85\xf8\xd1\xe3"
- "\x22",
- .ilen = 17,
- .result = "\x40",
- .rlen = 1,
- }, {
- .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85"
- "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57",
- .klen = 16,
- .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
- "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
- .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
- "\x4d\x5b\x15\x3c\xa8\x8f\x06",
- .alen = 15,
- .input = "\x77\x32\x61\xeb\xb4\x33\x29\x92"
- "\x29\x95\xc5\x8e\x85\x76\xab\xfc"
- "\x07\x95\xa7\x44\x74\xf7\x22\xff"
- "\xd8\xd8\x36\x3d\x8a\x7f\x9e",
- .ilen = 31,
- .result = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
- "\x6d\x92\x42\x61\xa7\x58\x37",
- .rlen = 15,
- }, {
- .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
- "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d",
- .klen = 16,
- .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
- "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
- .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
- "\xce\x36\xc7\xce\xa2\xb4\xc9\x60",
- .alen = 16,
- .input = "\xd8\xfd\x44\x45\xf6\x42\x12\x38"
- "\xf2\x0b\xea\x4f\x9e\x11\x61\x07"
- "\x48\x67\x98\x18\x9b\xd0\x0c\x59"
- "\x67\xa4\x11\xb3\x2b\xd6\xc1\x70",
- .ilen = 32,
- .result = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
- "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
- .rlen = 16,
- }, {
- .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
- "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .klen = 16,
- .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
- "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
- .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
- "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66"
- "\x3b",
- .alen = 17,
- .input = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6"
- "\x71\x3a\x00\x9f\x41\x88\xb0\xb2"
- "\x71\x83\x85\x5f\xc8\x79\x0a\x99"
- "\x99\xdc\x89\x1c\x88\xd2\x3e\xf9"
- "\x83",
- .ilen = 33,
- .result = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
- "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
- "\x05",
- .rlen = 17,
- }, {
- .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
- "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69",
- .klen = 16,
- .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
- "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
- .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
- "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d"
- "\x65\x48\xcb\x0a\xda\xf0\x62\xc0"
- "\x38\x1d\x3b\x4a\xe9\x7e\x62",
- .alen = 31,
- .input = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06"
- "\x5c\x7b\xef\x64\x87\x00\xd1\x37"
- "\xa7\x08\xbc\x7f\x8f\x41\x54\xd0"
- "\x3e\xf1\xc3\xa2\x96\x84\xdd\x2a"
- "\x2d\x21\x30\xf9\x02\xdb\x06\x0c"
- "\xf1\x5a\x66\x69\xe0\xca\x83",
- .ilen = 47,
- .result = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
- "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
- "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
- "\x68\x28\x73\x40\x9f\x96\x4a",
- .rlen = 31,
- }, {
- .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
- "\x10\x57\x85\x39\x93\x8f\xaf\x70",
- .klen = 16,
- .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
- "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
- .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
- "\x50\xc4\xde\x82\x90\x21\x11\x73"
- "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda"
- "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81",
- .alen = 32,
- .input = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2"
- "\x70\x57\x37\xc5\xc2\x4f\x8d\x14"
- "\xc6\xbf\x8b\xec\xf5\x62\x67\xf2"
- "\x2f\xa1\xe6\xd6\xa7\xb1\x8c\x54"
- "\xe5\x6b\x49\xf9\x6e\x90\xc3\xaa"
- "\x7a\x00\x2e\x4d\x7f\x31\x2e\x81",
- .ilen = 48,
- .result = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
- "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
- "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
- "\x29\x56\x52\x19\x79\xf5\xe9\x37",
- .rlen = 32,
- }, {
- .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
- "\x91\x31\x37\xcb\x8d\xb3\x72\x76",
- .klen = 16,
- .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
- "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
- .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
- "\xd1\x9e\x90\x13\x8a\x45\xd3\x79"
- "\xba\xcd\xe2\x13\xe4\x30\x66\xf4"
- "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58"
- "\x1a",
- .alen = 33,
- .input = "\xc7\xca\x26\x61\x57\xee\xa2\xb9"
- "\xb1\x37\xde\x95\x06\x90\x11\x08"
- "\x4d\x30\x9f\x24\xc0\x56\xb7\xe1"
- "\x0b\x9f\xd2\x57\xe9\xd2\xb1\x76"
- "\x56\x9a\xb4\x58\xc5\x08\xfc\xb5"
- "\xf2\x31\x9b\xc9\xcd\xb3\x64\xdb"
- "\x6f\x50\xbf\xf4\x73\x9d\xfb\x6b"
- "\xef\x35\x25\x48\xed\xcf\x29\xa8"
- "\xac\xc3\xb9\xcb\x61\x8f\x73\x92"
- "\x2c\x7a\x6f\xda\xf9\x09\x6f\xe1"
- "\xc4",
- .ilen = 81,
- .result = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
- "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
- "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
- "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
- "\x9d\x4d\x54\x51\x84\x61\xf6\x8e"
- "\x03\x31\xf2\x25\x16\xcc\xaa\xc6"
- "\x75\x73\x20\x30\x59\x54\xb2\xf0"
- "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35"
- "\x8a",
- .rlen = 65,
- }, {
- .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
- "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c",
- .klen = 16,
- .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
- "\x32\x42\x15\x80\x85\xa1\x65\xfe",
- .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
- "\x52\x79\x42\xa5\x84\x6a\x96\x7f"
- "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d"
- "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e"
- "\x28\xce\x57\x34\xcd\x6e\x84\x4c"
- "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1"
- "\x96\x41\x0d\x69\xe8\x54\x0a\xc8"
- "\x15\x4e\x91\x92\x89\x4b\xb7\x9b"
- "\x21",
- .alen = 65,
- .input = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b"
- "\x2c\x0f\xb4\x7e\x7b\x64\x3e\x40"
- "\xf3\x78\x63\x34\x89\x79\x39\x6b"
- "\x61\x64\x4a\x9a\xfa\x70\xa4\xd3"
- "\x54\x0b\xea\x05\xa6\x95\x64\xed"
- "\x3d\x69\xa2\x0c\x27\x56\x2f\x34"
- "\x66",
- .ilen = 49,
- .result = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
- "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
- "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
- "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
- "\xac",
- .rlen = 33,
- }, {
- .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
- "\x93\xe6\x9b\xee\x81\xfc\xf7\x82",
- .klen = 16,
- .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
- "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
- .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
- "\xd3\x53\xf4\x36\x7e\x8e\x59\x85",
- .alen = 16,
- .input = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f"
- "\x33\x98\x87\xde\x08\xb6\xb6\xae"
- "\x3e\xa4\xf8\x19\xf1\x92\x60\x39"
- "\xb9\x6b\x3f\xdf\xc8\xcb\x30",
- .ilen = 31,
- .result = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
- "\xf3\x89\x20\x5b\x7c\x57\x89\x07",
- .rlen = 16,
- }, {
- .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
- "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .klen = 16,
- .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
- "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
- .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
- "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c",
- .alen = 16,
- .input = "\x74\x7d\x70\x07\xe9\xba\x01\xee"
- "\x6c\xc6\x6f\x50\x25\x33\xbe\x50"
- "\x17\xb8\x17\x62\xed\x80\xa2\xf5"
- "\x03\xde\x85\x71\x5d\x34",
- .ilen = 30,
- .result = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
- "\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
- .rlen = 16,
- }, {
- .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
- "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .klen = 16,
- .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
- "\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
- .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
- "\xd5\x07\x58\x59\x72\xd7\xde\x92",
- .alen = 16,
- .input = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38"
- "\x29\xfd\x6c\x7c\x49\xe5\x1d\xaf"
- "\xba\xea\xd4\xfa\x3f\x11\x33\x98",
- .ilen = 24,
- .result = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
- "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
- .rlen = 16,
- }, {
- .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
- "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .klen = 16,
- .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22"
- "\x36\xab\xde\xc6\x6d\x32\x70\x17",
- .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9"
- "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98",
- .alen = 16,
- .input = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86"
- "\xe1\xb7\xa5\xc3\x32\x88\x3c\x8c"
- "\x6e",
- .ilen = 17,
- .result = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
- "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a",
- .rlen = 16,
+ .clen = 17,
},
};
@@ -24405,7 +20733,7 @@ static const struct aead_testvec morus640_dec_tv_template[] = {
* https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
* (see crypto_aead/morus1280128v2/ and crypto_aead/morus1280256v2/ )
*/
-static const struct aead_testvec morus1280_enc_tv_template[] = {
+static const struct aead_testvec morus1280_tv_template[] = {
{
.key = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
@@ -24414,11 +20742,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x20\x36\x2c\x24\xfe\xc9\x30\x81",
.assoc = "",
.alen = 0,
- .input = "",
- .ilen = 0,
- .result = "\x91\x85\x0f\xf5\x52\x9e\xce\xce"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x91\x85\x0f\xf5\x52\x9e\xce\xce"
"\x65\x99\xc7\xbf\xd3\x76\xe8\x98",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b"
"\x80\xda\xb2\x91\xf9\x24\xc2\x06",
@@ -24427,12 +20755,12 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
.assoc = "",
.alen = 0,
- .input = "\x69",
- .ilen = 1,
- .result = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13"
+ .ptext = "\x69",
+ .plen = 1,
+ .ctext = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13"
"\x96\xda\x76\x34\x33\x4e\xd5\x39"
"\x73",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x79\x49\x73\x3e\x20\xf7\x51\x37"
"\x01\xb4\x64\x22\xf3\x48\x85\x0c",
@@ -24441,18 +20769,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x22\xea\x90\x47\xf2\x11\xb5\x8e",
.assoc = "",
.alen = 0,
- .input = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
+ .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
"\x62\x58\xe9\x8f\xef\xa4\x17\x91"
"\xb4\x96\x9f\x6b\xce\x38\xa5\x46"
"\x13\x7d\x64\x93\xd7\x05\xf5",
- .ilen = 31,
- .result = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22"
+ .plen = 31,
+ .ctext = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22"
"\x75\x0b\x24\xa6\x0e\xc3\xde\x52"
"\x97\x0b\x64\xd4\xce\x90\x52\xf7"
"\xef\xdb\x6a\x38\xd2\xa8\xa1\x0d"
"\xe0\x61\x33\x24\xc6\x4d\x51\xbc"
"\xa4\x21\x74\xcf\x19\x16\x59",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
"\x82\x8e\x16\xb4\xed\x6d\x47\x12",
@@ -24461,18 +20789,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xa2\xc5\x42\xd8\xec\x36\x78\x94",
.assoc = "",
.alen = 0,
- .input = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
+ .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
"\xe3\x32\x9b\x21\xe9\xc8\xd9\x97"
"\xde\x58\xab\xf0\xd3\xd8\x27\x60"
"\xd5\xaa\x43\x6b\xb1\x64\x95\xa4",
- .ilen = 32,
- .result = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f"
+ .plen = 32,
+ .ctext = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f"
"\xde\x9f\x77\xb2\xda\x92\x61\x5c"
"\x09\x0b\x2d\x9a\x26\xaa\x1c\x06"
"\xab\x74\xb7\x2b\x95\x5f\x9f\xa1"
"\x9a\xff\x50\xa0\xa2\xff\xc5\xad"
"\x21\x8e\x84\x5c\x12\x61\xb2\xae",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
"\x03\x68\xc8\x45\xe7\x91\x0a\x18",
@@ -24481,20 +20809,20 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
.assoc = "",
.alen = 0,
- .input = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
+ .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
"\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d"
"\x09\x1a\xb7\x74\xd8\x78\xa9\x79"
"\x96\xd8\x22\x43\x8c\xc3\x34\x7b"
"\xc4",
- .ilen = 33,
- .result = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17"
+ .plen = 33,
+ .ctext = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17"
"\xc1\x1d\x2a\xdd\x88\x67\xda\x1f"
"\x6d\xe8\x37\x28\x5a\xc1\x5e\x9f"
"\xa6\xec\xc6\x92\x05\x4b\xc0\xa3"
"\x63\xef\x88\xa4\x9b\x0a\x5c\xed"
"\x2b\x6a\xac\x63\x52\xaa\x10\x94"
"\xd0",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
"\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f",
@@ -24503,7 +20831,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
.assoc = "",
.alen = 0,
- .input = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
+ .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
"\xe5\xe6\xff\x44\xdd\x11\x5f\xa3"
"\x33\xdd\xc2\xf8\xdd\x18\x2b\x93"
"\x57\x05\x01\x1c\x66\x22\xd3\x51"
@@ -24511,8 +20839,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x96\x58\xd5\x8c\x64\x8c\x7c\xf5"
"\x01\xd0\x74\x5f\x9b\xaa\xf6\xd1"
"\xe6\x16\xa2\xac\xde\x47\x40",
- .ilen = 63,
- .result = "\x7d\x61\x1a\x35\x20\xcc\x07\x88"
+ .plen = 63,
+ .ctext = "\x7d\x61\x1a\x35\x20\xcc\x07\x88"
"\x03\x98\x87\xcf\xc0\x6e\x4d\x19"
"\xe3\xd4\x0b\xfb\x29\x8f\x49\x1a"
"\x3a\x06\x77\xce\x71\x2c\xcd\xdd"
@@ -24522,7 +20850,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xdf\x5b\x0f\xbd\x8a\x88\x78\xc9"
"\xe5\x81\x37\xde\x84\x7a\xf6\x84"
"\x99\x7a\x72\x9c\x54\x31\xa1",
- .rlen = 79,
+ .clen = 79,
}, {
.key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
"\x05\x1d\x2c\x68\xdb\xda\x8f\x25",
@@ -24531,7 +20859,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
.assoc = "",
.alen = 0,
- .input = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
+ .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
"\x66\xc0\xb1\xd5\xd7\x35\x21\xaa"
"\x5d\x9f\xce\x7c\xe2\xb8\xad\xad"
"\x19\x33\xe0\xf4\x40\x81\x72\x28"
@@ -24539,8 +20867,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xb0\x68\x69\xf2\x27\x35\x91\x84"
"\x2e\x37\x5b\x00\x04\xff\x16\x9c"
"\xb5\x19\x39\xeb\xd9\xcd\x29\x9a",
- .ilen = 64,
- .result = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c"
+ .plen = 64,
+ .ctext = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c"
"\xa5\x07\x12\xa7\x12\x39\x60\x66"
"\x30\x81\x4a\x03\x78\x28\x45\x52"
"\xd2\x2b\x24\xfd\x8b\xa5\xb7\x66"
@@ -24550,7 +20878,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xeb\x71\x40\xc9\x2c\x40\x45\x6d"
"\x73\x77\x01\xf3\x4f\xf3\x9d\x2a"
"\x5d\x57\xa8\xa1\x18\xa2\xad\xcb",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
"\x86\xf7\xde\xfa\xd5\xfe\x52\x2b",
@@ -24559,11 +20887,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
.assoc = "\xc5",
.alen = 1,
- .input = "",
- .ilen = 0,
- .result = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e"
"\x89\x3b\x9d\x0f\x83\x1c\x08\xc3",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde"
"\x07\xd1\x90\x8b\xcf\x23\x15\x31",
@@ -24575,11 +20903,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xe8\x73\x62\x64\xab\x50\xd0\xda"
"\x6b\x83\x66\xaf\x3e\x27\xc9",
.alen = 31,
- .input = "",
- .ilen = 0,
- .result = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38"
"\x03\x12\xf9\xcc\x9e\x46\x42\x92",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x20\x4a\x07\x99\x91\x58\xee\xfa"
"\x88\xab\x42\x1c\xc9\x47\xd7\x38",
@@ -24591,11 +20919,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x12\x35\x6e\xe8\xb0\xf0\x52\xf3"
"\x2d\xb0\x45\x87\x18\x86\x68\xf6",
.alen = 32,
- .input = "",
- .ilen = 0,
- .result = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2"
"\x6d\x65\xe0\x67\x9c\x1d\xa0\xf0",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16"
"\x09\x85\xf4\xae\xc3\x6b\x9a\x3e",
@@ -24608,11 +20936,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xee\xde\x23\x60\xf2\xe5\x08\xcc"
"\x97",
.alen = 33,
- .input = "",
- .ilen = 0,
- .result = "\x28\x64\x78\x51\x55\xd8\x56\x4a"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x28\x64\x78\x51\x55\xd8\x56\x4a"
"\x58\x3e\xf7\xbe\xee\x21\xfe\x94",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31"
"\x8a\x60\xa6\x3f\xbd\x90\x5d\x44",
@@ -24628,11 +20956,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x03\xa1\xe8\xbe\x37\x54\xec\xa2"
"\xcd\x2c\x45\x58\xbd\x8e\x80",
.alen = 63,
- .input = "",
- .ilen = 0,
- .result = "\xb3\xa6\x00\x4e\x09\x20\xac\x21"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xb3\xa6\x00\x4e\x09\x20\xac\x21"
"\x77\x72\x69\x76\x2d\x36\xe5\xc8",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d"
"\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a",
@@ -24648,11 +20976,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x30\x08\xd0\x5f\xa0\xaa\x0c\x6d"
"\x9c\x2f\xdb\x97\xb8\x15\x69\x01",
.alen = 64,
- .input = "",
- .ilen = 0,
- .result = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd"
"\xe4\xb9\x4a\xaa\x9a\x21\xaa\x14",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x12\xdd\xee\x17\xd1\x47\x92\x69"
"\x8b\x14\x0a\x62\xb1\xd9\xe2\x50",
@@ -24661,12 +20989,12 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
.assoc = "\x31",
.alen = 1,
- .input = "\x40",
- .ilen = 1,
- .result = "\x1d\x47\x17\x34\x86\xf5\x54\x1a"
+ .ptext = "\x40",
+ .plen = 1,
+ .ctext = "\x1d\x47\x17\x34\x86\xf5\x54\x1a"
"\x6d\x28\xb8\x5d\x6c\xcf\xa0\xb9"
"\xbf",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85"
"\x0c\xee\xbc\xf4\xab\xfd\xa5\x57",
@@ -24678,18 +21006,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xe6\x01\xa8\x7e\xca\x10\xdc\x73"
"\xf4\x94\x9f\xc1\x5a\x61\x85",
.alen = 31,
- .input = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
+ .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
"\x6d\x92\x42\x61\xa7\x58\x37\xdb"
"\xb0\xb2\x2b\x9f\x0b\xb8\xbd\x7a"
"\x24\xa0\xd6\xb7\x11\x79\x6c",
- .ilen = 31,
- .result = "\x78\x90\x52\xae\x0f\xf7\x2e\xef"
+ .plen = 31,
+ .ctext = "\x78\x90\x52\xae\x0f\xf7\x2e\xef"
"\x63\x09\x08\x58\xb5\x56\xbd\x72"
"\x6e\x42\xcf\x27\x04\x7c\xdb\x92"
"\x18\xe9\xa4\x33\x90\xba\x62\xb5"
"\x70\xd3\x88\x9b\x4f\x05\xa7\x51"
"\x85\x87\x17\x09\x42\xed\x4e",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
"\x8d\xc8\x6e\x85\xa5\x21\x67\x5d",
@@ -24701,18 +21029,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d"
"\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd",
.alen = 32,
- .input = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
+ .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
"\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2"
"\xdb\x74\x36\x23\x11\x58\x3f\x93"
"\xe5\xcd\xb5\x90\xeb\xd8\x0c\xb3",
- .ilen = 32,
- .result = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41"
+ .plen = 32,
+ .ctext = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41"
"\x2e\x71\xc8\x3b\x92\x43\x58\xaf"
"\x5a\xfb\xad\x8f\xd9\xd5\x8a\x5e"
"\xdb\xf3\xcd\x3a\x2b\xe1\x2c\x1a"
"\xb0\xed\xe3\x0c\x6e\xf9\xf2\xd6"
"\x90\xe6\xb1\x0e\xa5\x8a\xac\xb7",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
"\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
@@ -24725,20 +21053,20 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x76\xef\x5c\x72\x0f\x1f\xc3\xd4"
"\xee",
.alen = 33,
- .input = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
+ .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
"\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
"\x05\x36\x42\xa7\x16\xf8\xc1\xad"
"\xa7\xfb\x94\x68\xc5\x37\xab\x8a"
"\x72",
- .ilen = 33,
- .result = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc"
+ .plen = 33,
+ .ctext = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc"
"\xfd\x2e\x4b\x46\x84\xff\x78\x4e"
"\x50\xda\x5c\xb9\x61\x1d\xf5\xb9"
"\xfe\xbb\x7f\xae\x8c\xc1\x24\xbd"
"\x8c\x6f\x1f\x9b\xce\xc6\xc1\x37"
"\x08\x06\x5a\xe5\x96\x10\x95\xc2"
"\x5e",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
"\x8f\x7d\xd3\xa8\x99\x6a\xed\x69",
@@ -24754,7 +21082,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x10\x0b\x56\x85\xad\x54\xaa\x66"
"\xa8\x43\xcd\xd4\x9b\xb7\xfa",
.alen = 63,
- .input = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
+ .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
"\xf0\x20\x58\x15\x95\xc6\x7f\xee"
"\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
"\x68\x28\x73\x40\x9f\x96\x4a\x60"
@@ -24762,8 +21090,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xcf\x12\xc9\x59\x8f\x7a\x7f\xa8"
"\x1b\xa5\x50\xed\x87\xa9\x72\x59"
"\x9c\x44\xb2\xa4\x99\x98\x34",
- .ilen = 63,
- .result = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22"
+ .plen = 63,
+ .ctext = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22"
"\x49\x2d\x07\x92\xfc\x3d\x6d\x5f"
"\xef\x36\x19\xae\x91\xfa\xd6\x63"
"\x46\xea\x8a\x39\x14\x21\xa6\x37"
@@ -24773,7 +21101,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x58\x58\x23\x40\xfd\xa5\xc2\xe6"
"\xe9\x5a\x50\x98\x00\x58\xc9\x86"
"\x4f\x20\x37\xdb\x7b\x22\xa3",
- .rlen = 79,
+ .clen = 79,
}, {
.key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
"\x10\x57\x85\x39\x93\x8f\xaf\x70",
@@ -24789,7 +21117,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x3d\x72\x3e\x26\x16\xa9\xca\x32"
"\x77\x47\x63\x14\x95\x3d\xe4\x34",
.alen = 64,
- .input = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
+ .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
"\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
"\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
"\x29\x56\x52\x19\x79\xf5\xe9\x37"
@@ -24797,8 +21125,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xe9\x21\x5e\xbf\x52\x23\x95\x37"
"\x48\x0c\x38\x8f\xf0\xff\x92\x24"
"\x6b\x47\x49\xe3\x94\x1f\x1e\x01",
- .ilen = 64,
- .result = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb"
+ .plen = 64,
+ .ctext = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb"
"\x23\xec\x35\xe3\xae\xc9\xfb\x0b"
"\x90\x14\x46\xeb\xa8\x8d\xb0\x9b"
"\x39\xda\x8b\x48\xec\xb2\x00\x4e"
@@ -24808,7 +21136,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xec\x99\x7d\x61\xb3\x15\x93\xed"
"\x83\x1e\xd9\x48\x84\x0b\x37\xfe"
"\x95\x74\x44\xd5\x54\xa6\x27\x06",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
"\x91\x31\x37\xcb\x8d\xb3\x72\x76",
@@ -24825,7 +21153,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x46\x4a\xfa\x53\x8f\xc4\xcd\x68"
"\x58",
.alen = 65,
- .input = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
+ .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
"\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
"\x84\x7d\x65\x34\x25\xd8\x47\xfa"
"\xeb\x83\x31\xf1\x54\x54\x89\x0d"
@@ -24842,8 +21170,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x33\x16\x59\x6c\x3b\xef\x88\xad"
"\x2f\xab\xbc\x25\x76\x87\x41\x2f"
"\x36",
- .ilen = 129,
- .result = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9"
+ .plen = 129,
+ .ctext = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9"
"\xd1\xcd\xdc\x16\xdd\x2c\xc1\xfb"
"\x52\xb5\xb3\xab\x50\x99\x3f\xa0"
"\x38\xa4\x74\xa5\x04\x15\x63\x05"
@@ -24862,7 +21190,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xed\xed\x35\xe8\x83\xa5\xec\x25"
"\x6b\xff\x5f\x1a\x09\x96\x3d\xdc"
"\x20",
- .rlen = 145,
+ .clen = 145,
}, {
.key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
"\x12\x0b\xe9\x5c\x87\xd7\x35\x7c",
@@ -24887,7 +21215,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xd9\xd2\xaf\x8e\xd5\xd3\xa8\xa9"
"\x51",
.alen = 129,
- .input = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
+ .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
"\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
"\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
"\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
@@ -24896,8 +21224,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
"\x09\x4f\x77\x62\x88\x2d\xf2\x68"
"\x54",
- .ilen = 65,
- .result = "\x36\x78\xb9\x22\xde\x62\x35\x55"
+ .plen = 65,
+ .ctext = "\x36\x78\xb9\x22\xde\x62\x35\x55"
"\x1a\x7a\xf5\x45\xbc\xd7\x15\x82"
"\x01\xe9\x5a\x07\xea\x46\xaf\x91"
"\xcb\x73\xa5\xee\xe1\xb4\xbf\xc2"
@@ -24908,7 +21236,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xbf\xbc\x88\x3f\x5d\xd1\xf9\x19"
"\x0f\x9d\xb2\xaf\xb9\x6e\x17\xdf"
"\xa2",
- .rlen = 81,
+ .clen = 81,
}, {
.key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
"\x93\xe6\x9b\xee\x81\xfc\xf7\x82",
@@ -24920,18 +21248,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x0e\x51\xf9\x1c\xee\x70\x6a\x27"
"\x3d\xd3\xb7\xac\x51\xfa\xdf\x05",
.alen = 32,
- .input = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
+ .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
"\xf3\x89\x20\x5b\x7c\x57\x89\x07"
"\xd9\x02\x7c\x3d\x2f\x18\x4b\x2d"
"\x6e\xde\xee\xa2\x08\x12\xc7\xba",
- .ilen = 32,
- .result = "\x08\x1b\x95\x0e\x41\x95\x02\x4b"
+ .plen = 32,
+ .ctext = "\x08\x1b\x95\x0e\x41\x95\x02\x4b"
"\x9c\xbb\xa8\xd0\x7c\xd3\x44\x6e"
"\x89\x14\x33\x70\x0a\xbc\xea\x39"
"\x88\xaa\x2b\xd5\x73\x11\x55\xf5"
"\x33\x33\x9c\xd7\x42\x34\x49\x8e"
"\x2f\x03\x30\x05\x47\xaf\x34",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
"\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
@@ -24943,18 +21271,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x39\x14\x05\xa0\xf3\x10\xec\x41"
"\xff\x01\x95\x84\x2b\x59\x7f\xdb",
.alen = 32,
- .input = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
+ .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
"\x74\x63\xd2\xec\x76\x7c\x4c\x0d"
"\x03\xc4\x88\xc1\x35\xb8\xcd\x47"
"\x2f\x0c\xcd\x7a\xe2\x71\x66\x91",
- .ilen = 32,
- .result = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68"
+ .plen = 32,
+ .ctext = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68"
"\x0c\x60\xb9\x27\xdf\xaa\x41\xc6"
"\x25\xd8\xf7\x1f\x10\x15\x48\x61"
"\x4c\x95\x00\xdf\x51\x9b\x7f\xe6"
"\x24\x40\x9e\xbe\x3b\xeb\x1b\x98"
"\xb9\x9c\xe5\xef\xf2\x05",
- .rlen = 46,
+ .clen = 46,
}, {
.key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
"\x95\x9a\xff\x10\x75\x45\x7d\x8f",
@@ -24966,17 +21294,17 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x63\xd6\x10\x24\xf8\xb0\x6e\x5a"
"\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2",
.alen = 32,
- .input = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
+ .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
"\xf5\x3e\x85\x7d\x70\xa0\x0f\x13"
"\x2e\x86\x93\x45\x3a\x58\x4f\x61"
"\xf0\x3a\xac\x53\xbc\xd0\x06\x68",
- .ilen = 32,
- .result = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d"
+ .plen = 32,
+ .ctext = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d"
"\xb5\xec\x9b\x4e\x12\x23\xa3\xcf"
"\x1a\x5a\x70\x15\x5a\x10\x40\x51"
"\xca\x47\x4c\x9d\xc9\x97\xf4\x77"
"\xdb\xc8\x10\x2d\xdc\x65\x20\x3f",
- .rlen = 40,
+ .clen = 40,
}, {
.key = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
"\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
@@ -24988,17 +21316,17 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x8d\x98\x1c\xa8\xfe\x50\xf0\x74"
"\x81\x5c\x53\x35\xe0\x17\xbd\x88",
.alen = 32,
- .input = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
+ .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
"\x76\x18\x37\x0f\x6a\xc4\xd1\x1a"
"\x58\x49\x9f\xc9\x3f\xf8\xd1\x7a"
"\xb2\x67\x8b\x2b\x96\x2f\xa5\x3e",
- .ilen = 32,
- .result = "\xf1\x62\x44\xc7\x5f\x19\xca\x43"
+ .plen = 32,
+ .ctext = "\xf1\x62\x44\xc7\x5f\x19\xca\x43"
"\x47\x2c\xaf\x68\x82\xbd\x51\xef"
"\x3d\x65\xd8\x45\x2d\x06\x07\x78"
"\x08\x2e\xb3\x23\xcd\x81\x12\x55"
"\x1a",
- .rlen = 33,
+ .clen = 33,
}, {
.key = "\xe9\x95\xa2\x8f\x93\x13\x7b\xb7"
"\x96\x4e\x63\x33\x69\x8d\x02\x9b"
@@ -25009,11 +21337,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xb7\x85\x90\x58\x67\x57\x33\x1d",
.assoc = "",
.alen = 0,
- .input = "",
- .ilen = 0,
- .result = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf"
"\xb9\xd2\x41\xf6\x80\xa1\x52\x70",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x25\xba\xdc\x2e\xa3\x8f\x24\xd3"
"\x17\x29\x15\xc5\x63\xb2\xc5\xa1"
@@ -25024,12 +21352,12 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x38\x5f\x42\xe9\x61\x7b\xf5\x23",
.assoc = "",
.alen = 0,
- .input = "\x53",
- .ilen = 1,
- .result = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7"
+ .ptext = "\x53",
+ .plen = 1,
+ .ctext = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7"
"\x01\xf4\x08\xe3\x0d\xf7\xf0\x78"
"\x53",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x62\xdf\x16\xcd\xb3\x0a\xcc\xef"
"\x98\x03\xc7\x56\x5d\xd6\x87\xa8"
@@ -25040,18 +21368,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xb8\x39\xf4\x7a\x5b\x9f\xb8\x29",
.assoc = "",
.alen = 0,
- .input = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83"
+ .ptext = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83"
"\xf9\xa6\x4d\xc3\x58\x31\x19\x2c"
"\xd7\x90\xc2\x56\x4e\xd8\x57\xc7"
"\xf6\xf0\x27\xb4\x25\x4c\x83",
- .ilen = 31,
- .result = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07"
+ .plen = 31,
+ .ctext = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07"
"\xff\x8e\x74\xf8\xa1\xa6\xd5\x37"
"\xa5\x64\x31\x5c\xca\x73\x9b\x43"
"\xe6\x70\x63\x46\x95\xcb\xf7\xb5"
"\x20\x8c\x75\x7a\x2a\x17\x2f\xa9"
"\xb8\x4d\x11\x42\xd1\xf8\xf1",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x9e\x03\x4f\x6d\xc3\x86\x75\x0a"
"\x19\xdd\x79\xe8\x57\xfb\x4a\xae"
@@ -25062,18 +21390,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x39\x14\xa6\x0c\x55\xc4\x7b\x30",
.assoc = "",
.alen = 0,
- .input = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f"
+ .ptext = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f"
"\x7a\x81\xff\x55\x52\x56\xdc\x33"
"\x01\x52\xcd\xdb\x53\x78\xd9\xe1"
"\xb7\x1d\x06\x8d\xff\xab\x22\x98",
- .ilen = 32,
- .result = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37"
+ .plen = 32,
+ .ctext = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37"
"\x0a\xee\xc4\x30\x19\xd7\x3a\x6f"
"\xf8\x2b\x67\xf5\x3b\x84\x87\x2a"
"\xfb\x07\x7a\x82\xb5\xe4\x85\x26"
"\x1e\xa8\xe5\x04\x54\xce\xe5\x5f"
"\xb5\x3f\xc1\xd5\x7f\xbd\xd2\xa6",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xdb\x28\x89\x0c\xd3\x01\x1e\x26"
"\x9a\xb7\x2b\x79\x51\x1f\x0d\xb4"
@@ -25084,20 +21412,20 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xba\xee\x58\x9d\x4f\xe8\x3d\x36",
.assoc = "",
.alen = 0,
- .input = "\x08\x84\x34\x44\x9f\xde\x1c\xbb"
+ .ptext = "\x08\x84\x34\x44\x9f\xde\x1c\xbb"
"\xfb\x5b\xb1\xe6\x4c\x7a\x9f\x39"
"\x2c\x14\xd9\x5f\x59\x18\x5b\xfb"
"\x79\x4b\xe5\x65\xd9\x0a\xc1\x6f"
"\x2e",
- .ilen = 33,
- .result = "\xc2\xf4\x40\x55\xf9\x59\xff\x73"
+ .plen = 33,
+ .ctext = "\xc2\xf4\x40\x55\xf9\x59\xff\x73"
"\x08\xf5\x98\x92\x0c\x7b\x35\x9a"
"\xa8\xf4\x42\x7e\x6f\x93\xca\x22"
"\x23\x06\x1e\xf8\x89\x22\xf4\x46"
"\x7c\x7c\x67\x75\xab\xe5\x75\xaa"
"\x15\xd7\x83\x19\xfd\x31\x59\x5b"
"\x32",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\x17\x4d\xc3\xab\xe3\x7d\xc7\x42"
"\x1b\x91\xdd\x0a\x4b\x43\xcf\xba"
@@ -25108,7 +21436,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x3b\xc8\x0a\x2f\x49\x0c\x00\x3c",
.assoc = "",
.alen = 0,
- .input = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7"
+ .ptext = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7"
"\x7c\x35\x63\x77\x46\x9f\x61\x3f"
"\x56\xd7\xe4\xe3\x5e\xb8\xdc\x14"
"\x3a\x79\xc4\x3e\xb3\x69\x61\x46"
@@ -25116,8 +21444,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x22\xda\x52\x8b\x7d\x11\x98\xea"
"\x62\xe1\x14\x1e\xdc\xfe\x0f\xad"
"\x20\x76\x5a\xdc\x4e\x71\x13",
- .ilen = 63,
- .result = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb"
+ .plen = 63,
+ .ctext = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb"
"\xa0\x8c\xd3\x3e\x7f\x8d\xe8\x28"
"\x2a\xdc\xfa\x01\x84\x87\x9a\x70"
"\x81\x75\x37\x0a\xd2\x75\xa9\xb6"
@@ -25127,7 +21455,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x83\x9b\x05\x11\x72\x60\xf0\xb4"
"\x7e\x06\xab\x0a\xc0\xbb\x59\x23"
"\xaa\x2d\xfc\x4e\x35\x05\x59",
- .rlen = 79,
+ .clen = 79,
}, {
.key = "\x54\x71\xfd\x4b\xf3\xf9\x6f\x5e"
"\x9c\x6c\x8f\x9c\x45\x68\x92\xc1"
@@ -25138,7 +21466,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xbc\xa2\xbc\xc0\x43\x31\xc2\x42",
.assoc = "",
.alen = 0,
- .input = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3"
+ .ptext = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3"
"\xfd\x0f\x15\x09\x40\xc3\x24\x45"
"\x81\x99\xf0\x67\x63\x58\x5e\x2e"
"\xfb\xa6\xa3\x16\x8d\xc8\x00\x1c"
@@ -25146,8 +21474,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x3d\xea\xe7\xf2\x40\xba\xae\x79"
"\x8f\x48\xfc\xbf\x45\x53\x2e\x78"
"\xef\x79\xf0\x1b\x49\xf7\xfd\x9c",
- .ilen = 64,
- .result = "\x11\x7c\x7d\xef\xce\x29\x95\xec"
+ .plen = 64,
+ .ctext = "\x11\x7c\x7d\xef\xce\x29\x95\xec"
"\x7e\x9f\x42\xa6\x26\x07\xa1\x75"
"\x2f\x4e\x09\x9a\xf6\x6b\xc2\xfa"
"\x0d\xd0\x17\xdc\x25\x1e\x9b\xdc"
@@ -25157,7 +21485,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x43\x11\x26\x58\xcf\xc5\x41\xcf"
"\x13\xcc\xde\x32\x92\xfa\x86\xf2"
"\xaf\x16\xe8\x8f\xca\xb6\xfd\x54",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\x90\x96\x36\xea\x03\x74\x18\x7a"
"\x1d\x46\x42\x2d\x3f\x8c\x54\xc7"
@@ -25168,11 +21496,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x3d\x7c\x6e\x52\x3d\x55\x85\x48",
.assoc = "\xaf",
.alen = 1,
- .input = "",
- .ilen = 0,
- .result = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe"
"\x69\xdf\xc4\xc4\x02\x46\x3a\xf0",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xcd\xbb\x70\x89\x13\xf0\xc1\x95"
"\x9e\x20\xf4\xbf\x39\xb1\x17\xcd"
@@ -25186,11 +21514,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x0b\x6d\x84\x4f\x2c\xf0\x82\x5b"
"\x4e\xf6\x29\xd1\x8b\x6f\x56",
.alen = 31,
- .input = "",
- .ilen = 0,
- .result = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d"
"\x2e\x9a\xd6\x61\x43\xc0\x74\x69",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x0a\xe0\xaa\x29\x24\x6c\x6a\xb1"
"\x1f\xfa\xa6\x50\x33\xd5\xda\xd3"
@@ -25204,11 +21532,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x35\x2f\x90\xd3\x31\x90\x04\x74"
"\x0f\x23\x08\xa9\x65\xce\xf6\xea",
.alen = 32,
- .input = "",
- .ilen = 0,
- .result = "\xb9\x57\x13\x3e\x82\x31\x61\x65"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\xb9\x57\x13\x3e\x82\x31\x61\x65"
"\x0d\x7f\x6c\x96\x93\x5c\x50\xe2",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x46\x04\xe3\xc8\x34\xe7\x12\xcd"
"\xa0\xd4\x58\xe2\x2d\xf9\x9c\xda"
@@ -25223,11 +21551,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xd1\x51\xe6\x81\x3f\x2d\x95\xc1"
"\x01",
.alen = 33,
- .input = "",
- .ilen = 0,
- .result = "\x81\x96\x34\xde\xbb\x36\xdd\x3e"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x81\x96\x34\xde\xbb\x36\xdd\x3e"
"\x4e\x5e\xcb\x44\x21\xb8\x3f\xf1",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\x83\x29\x1d\x67\x44\x63\xbb\xe9"
"\x20\xaf\x0a\x73\x27\x1e\x5f\xe0"
@@ -25245,11 +21573,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x64\xb2\x89\x7d\x78\xa8\x05\x7e"
"\x07\x8c\xfc\x88\x2d\xb8\x53",
.alen = 63,
- .input = "",
- .ilen = 0,
- .result = "\x2e\x99\xb6\x79\x57\x56\x80\x36"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x2e\x99\xb6\x79\x57\x56\x80\x36"
"\x8e\xc4\x1c\x12\x7d\x71\x36\x0c",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xbf\x4e\x57\x07\x54\xdf\x64\x05"
"\xa1\x89\xbc\x04\x21\x42\x22\xe6"
@@ -25267,11 +21595,11 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x91\x19\x70\x1e\xe1\xfe\x25\x49"
"\xd6\x8f\x93\xc7\x28\x3f\x3d\x03",
.alen = 64,
- .input = "",
- .ilen = 0,
- .result = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce"
+ .ptext = "",
+ .plen = 0,
+ .ctext = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce"
"\x3b\x89\x40\x36\xba\x6d\x0e\xa2",
- .rlen = 16,
+ .clen = 16,
}, {
.key = "\xfc\x72\x90\xa6\x64\x5a\x0d\x21"
"\x22\x63\x6e\x96\x1b\x67\xe4\xec"
@@ -25282,12 +21610,12 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x42\x9a\x9a\xba\x19\x30\x15\x6e",
.assoc = "\x1a",
.alen = 1,
- .input = "\x29",
- .ilen = 1,
- .result = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6"
+ .ptext = "\x29",
+ .plen = 1,
+ .ctext = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6"
"\x17\x75\x81\x16\xdf\x26\xff\x67"
"\x92",
- .rlen = 17,
+ .clen = 17,
}, {
.key = "\x38\x97\xca\x45\x74\xd6\xb6\x3c"
"\xa3\x3d\x20\x27\x15\x8b\xa7\xf2"
@@ -25301,18 +21629,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x09\xfb\xca\x69\x4b\xb0\x8e\xf5"
"\xd6\x07\x62\xe3\xa8\xa9\x12",
.alen = 31,
- .input = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1"
+ .ptext = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1"
"\x04\xe1\xa6\x94\x10\xe6\x39\x77"
"\xd3\xac\x4d\x8a\x8c\x58\x6e\xfb"
"\x06\x13\x9a\xd9\x5e\xc0\xfa",
- .ilen = 31,
- .result = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd"
+ .plen = 31,
+ .ctext = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd"
"\x3c\xd1\x2a\xd4\x15\x86\x9d\xda"
"\xea\x6c\x6f\xa1\x33\xb0\x7a\x01"
"\x57\xe7\xf3\x7b\x73\xe7\x54\x10"
"\xc6\x91\xe2\xc6\xa0\x69\xe7\xe6"
"\x76\xc3\xf5\x3a\x76\xfd\x4a",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x75\xbc\x04\xe5\x84\x52\x5e\x58"
"\x24\x17\xd2\xb9\x0e\xaf\x6a\xf9"
@@ -25326,18 +21654,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x33\xbd\xd6\xed\x50\x50\x10\x0e"
"\x97\x35\x41\xbb\x82\x08\xb1\xf2",
.alen = 32,
- .input = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed"
+ .ptext = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed"
"\x85\xbb\x58\x26\x0a\x0b\xfc\x7d"
"\xfe\x6e\x59\x0e\x91\xf8\xf0\x15"
"\xc8\x40\x78\xb1\x38\x1f\x99\xa7",
- .ilen = 32,
- .result = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a"
+ .plen = 32,
+ .ctext = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a"
"\x71\xce\xe4\xaa\x45\x70\xe6\x84"
"\x62\x48\x08\x64\x86\x6a\xdf\xec"
"\xb4\xa0\xfb\x34\x03\x0c\x19\xf4"
"\x2b\x7b\x36\x73\xec\x54\xa9\x1e"
"\x30\x85\xdb\xe4\xac\xe9\x2c\xca",
- .rlen = 48,
+ .clen = 48,
}, {
.key = "\xb1\xe1\x3e\x84\x94\xcd\x07\x74"
"\xa5\xf2\x84\x4a\x08\xd4\x2c\xff"
@@ -25352,20 +21680,20 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x59\x62\x20\x94\x5c\x67\x50\xc8"
"\x58",
.alen = 33,
- .input = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09"
+ .ptext = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09"
"\x06\x95\x0a\xb7\x04\x2f\xbe\x84"
"\x28\x30\x64\x92\x96\x98\x72\x2e"
"\x89\x6e\x57\x8a\x13\x7e\x38\x7e"
"\xdb",
- .ilen = 33,
- .result = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60"
+ .plen = 33,
+ .ctext = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60"
"\xcb\x4a\x54\x94\xcf\xf5\x7e\x34"
"\xe9\xf8\x80\x65\x53\xd0\x72\x70"
"\x4f\x7d\x9d\xd1\x15\x6f\xb9\x2c"
"\xfa\xe8\xdd\xac\x2e\xe1\x3f\x67"
"\x63\x0f\x1a\x59\xb7\x89\xdb\xf4"
"\xc3",
- .rlen = 49,
+ .clen = 49,
}, {
.key = "\xee\x05\x77\x23\xa5\x49\xb0\x90"
"\x26\xcc\x36\xdc\x02\xf8\xef\x05"
@@ -25383,7 +21711,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x71\x1c\xf7\x44\xee\xa8\xc3\x42"
"\xe2\xa3\x84\x04\x0b\xe1\xce",
.alen = 63,
- .input = "\x1b\x61\x23\x5b\x71\x26\xae\x25"
+ .ptext = "\x1b\x61\x23\x5b\x71\x26\xae\x25"
"\x87\x6f\xbc\x49\xfe\x53\x81\x8a"
"\x53\xf2\x70\x17\x9b\x38\xf4\x48"
"\x4b\x9b\x36\x62\xed\xdd\xd8\x54"
@@ -25391,8 +21719,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x5c\x94\x47\x58\xa7\xff\x9c\x9e"
"\x7c\xb6\xf1\xac\xc8\xfd\x8b\x35"
"\xd5\xa4\x6a\xd4\x09\xc2\x08",
- .ilen = 63,
- .result = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a"
+ .plen = 63,
+ .ctext = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a"
"\xda\x1f\xd3\xff\xbb\xb2\xb0\xf8"
"\xef\xe9\xeb\x9e\x7c\x80\xf4\x2b"
"\x59\xc0\x79\xbc\x17\xa0\x15\x01"
@@ -25402,7 +21730,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xed\x0b\x23\xea\x9b\xda\x57\x2f"
"\xf6\xa9\xae\x0d\x4e\x40\x96\x45"
"\x7f\xfa\xf0\xbf\xc4\x98\x78",
- .rlen = 79,
+ .clen = 79,
}, {
.key = "\x2a\x2a\xb1\xc3\xb5\xc5\x59\xac"
"\xa7\xa6\xe8\x6d\xfc\x1d\xb2\x0b"
@@ -25420,7 +21748,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x9d\x83\xde\xe5\x57\xfd\xe3\x0e"
"\xb1\xa7\x1b\x44\x05\x67\xb7\x37",
.alen = 64,
- .input = "\x58\x85\x5c\xfa\x81\xa1\x57\x40"
+ .ptext = "\x58\x85\x5c\xfa\x81\xa1\x57\x40"
"\x08\x4a\x6e\xda\xf8\x78\x44\x90"
"\x7d\xb5\x7b\x9b\xa1\xd8\x76\x62"
"\x0c\xc9\x15\x3b\xc7\x3c\x77\x2b"
@@ -25428,8 +21756,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x76\xa3\xdc\xbe\x6b\xa8\xb1\x2d"
"\xa9\x1d\xd8\x4e\x31\x53\xab\x00"
"\xa5\xa7\x01\x13\x04\x49\xf2\x04",
- .ilen = 64,
- .result = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1"
+ .plen = 64,
+ .ctext = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1"
"\x58\x06\x1a\x9b\x8c\x67\xdf\xeb"
"\x35\x35\x60\x9d\x06\x40\x65\xc1"
"\x93\xe8\xb3\x82\x50\x29\xdd\xb5"
@@ -25439,7 +21767,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xf4\x77\xe4\xbf\x41\x3b\xc4\x06"
"\xce\x9e\x34\x81\xf0\x89\x11\x13"
"\x02\x65\xa1\x7c\xdf\x07\x33\x06",
- .rlen = 80,
+ .clen = 80,
}, {
.key = "\x67\x4f\xeb\x62\xc5\x40\x01\xc7"
"\x28\x80\x9a\xfe\xf6\x41\x74\x12"
@@ -25458,7 +21786,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x80\xaa\xb2\x83\xff\xee\xa1\x6a"
"\x04",
.alen = 65,
- .input = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c"
+ .ptext = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c"
"\x88\x24\x20\x6b\xf2\x9c\x06\x96"
"\xa7\x77\x87\x1f\xa6\x78\xf8\x7b"
"\xcd\xf6\xf4\x13\xa1\x9b\x16\x02"
@@ -25475,8 +21803,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x27\x7f\xe3\x34\x80\x02\x49\x4b"
"\x07\x68\x22\x2a\x88\x25\x53\xb2"
"\x2f",
- .ilen = 129,
- .result = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6"
+ .plen = 129,
+ .ctext = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6"
"\x85\x43\x88\xd0\xd7\x78\x60\x19"
"\x3e\x1f\xb1\xa4\xd6\xc5\x96\xec"
"\xf7\x84\x85\xc7\x27\x0f\x74\x57"
@@ -25495,7 +21823,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x4c\x50\x10\x62\xba\x7a\xb1\x68"
"\x37\xd7\x87\x4e\xe4\x66\x09\x1f"
"\xa5",
- .rlen = 145,
+ .clen = 145,
}, {
.key = "\xa3\x73\x24\x01\xd5\xbc\xaa\xe3"
"\xa9\x5a\x4c\x90\xf0\x65\x37\x18"
@@ -25522,7 +21850,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xb1\x8f\x15\x93\xe7\x71\xb9\x2c"
"\x4b",
.alen = 129,
- .input = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78"
+ .ptext = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78"
"\x09\xfe\xd2\xfd\xec\xc1\xc9\x9d"
"\xd2\x39\x93\xa3\xab\x18\x7a\x95"
"\x8f\x24\xd3\xeb\x7b\xfa\xb5\xd8"
@@ -25531,8 +21859,8 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x02\xeb\xa8\x90\x03\xfd\xea\x97"
"\x43\xaf\x2e\x92\xf8\x57\xc5\x6a"
"\x00",
- .ilen = 65,
- .result = "\x7d\xde\x53\x22\xe4\x23\x3b\x30"
+ .plen = 65,
+ .ctext = "\x7d\xde\x53\x22\xe4\x23\x3b\x30"
"\x78\xde\x35\x90\x7a\xd9\x0b\x93"
"\xf6\x0e\x0b\xed\x40\xee\x10\x9c"
"\x96\x3a\xd3\x34\xb2\xd0\x67\xcf"
@@ -25543,7 +21871,7 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xf2\x3b\xd8\x28\x40\xe2\x76\xf6"
"\x20\x13\x83\x46\xaf\xff\xe3\x0f"
"\x72",
- .rlen = 81,
+ .clen = 81,
}, {
.key = "\xe0\x98\x5e\xa1\xe5\x38\x53\xff"
"\x2a\x35\xfe\x21\xea\x8a\xfa\x1e"
@@ -25557,18 +21885,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x31\x4b\x1b\x07\x6f\x10\x1c\xa8"
"\x20\x46\x7a\xce\x9f\x42\x6d\xf9",
.alen = 32,
- .input = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94"
+ .ptext = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94"
"\x8a\xd8\x84\x8e\xe6\xe5\x8c\xa3"
"\xfc\xfc\x9e\x28\xb0\xb8\xfc\xaf"
"\x50\x52\xb1\xc4\x55\x59\x55\xaf",
- .ilen = 32,
- .result = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe"
+ .plen = 32,
+ .ctext = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe"
"\x53\xc7\xaa\x9a\x60\x74\x9c\xc4"
"\xa2\xc2\xd0\x6d\xe1\x03\x63\xdc"
"\xbb\x51\x7e\x9c\x89\x73\xde\x4e"
"\x24\xf8\x52\x7c\x15\x41\x0e\xba"
"\x69\x0e\x36\x5f\x2f\x22\x8c",
- .rlen = 47,
+ .clen = 47,
}, {
.key = "\x1c\xbd\x98\x40\xf5\xb3\xfc\x1b"
"\xaa\x0f\xb0\xb3\xe4\xae\xbc\x24"
@@ -25582,18 +21910,18 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x5c\x0d\x27\x8b\x74\xb0\x9e\xc2"
"\xe1\x74\x59\xa6\x79\xa1\x0c\xd0",
.alen = 32,
- .input = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0"
+ .ptext = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0"
"\x0b\xb2\x36\x20\xe0\x09\x4e\xa9"
"\x26\xbe\xaa\xac\xb5\x58\x7e\xc8"
"\x11\x7f\x90\x9c\x2f\xb8\xf4\x85",
- .ilen = 32,
- .result = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51"
+ .plen = 32,
+ .ctext = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51"
"\xb8\xda\x92\x3c\xfd\xda\xac\x8e"
"\x8d\x88\xd7\x4d\x90\xe5\xeb\xa1"
"\xab\xd6\x7c\x76\xad\xea\x7d\x76"
"\x53\xee\xb0\xcd\xd0\x02\xbb\x70"
"\x5b\x6f\x7b\xe2\x8c\xe8",
- .rlen = 46,
+ .clen = 46,
}, {
.key = "\x59\xe1\xd2\xdf\x05\x2f\xa4\x37"
"\x2b\xe9\x63\x44\xde\xd3\x7f\x2b"
@@ -25607,17 +21935,17 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\x86\xd0\x32\x0f\x79\x50\x20\xdb"
"\xa2\xa1\x37\x7e\x53\x00\xab\xa6",
.alen = 32,
- .input = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc"
+ .ptext = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc"
"\x8c\x8d\xe8\xb1\xda\x2e\x11\xaf"
"\x51\x80\xb5\x30\xba\xf8\x00\xe2"
"\xd3\xad\x6f\x75\x09\x18\x93\x5c",
- .ilen = 32,
- .result = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b"
+ .plen = 32,
+ .ctext = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b"
"\xe7\x68\x81\x51\xbb\xfb\xdf\x60"
"\xbb\xac\xe8\xc1\xdc\x68\xae\x68"
"\x3a\xcd\x7a\x06\x49\xfe\x80\x11"
"\xe6\x61\x99\xe2\xdd\xbe\x2c\xbf",
- .rlen = 40,
+ .clen = 40,
}, {
.key = "\x96\x06\x0b\x7f\x15\xab\x4d\x53"
"\xac\xc3\x15\xd6\xd8\xf7\x42\x31"
@@ -25631,1257 +21959,17 @@ static const struct aead_testvec morus1280_enc_tv_template[] = {
"\xb1\x92\x3e\x93\x7e\xf0\xa2\xf5"
"\x64\xcf\x16\x57\x2d\x5f\x4a\x7d",
.alen = 32,
- .input = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7"
+ .ptext = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7"
"\x0d\x67\x9a\x43\xd4\x52\xd4\xb5"
"\x7b\x43\xc1\xb5\xbf\x98\x82\xfc"
"\x94\xda\x4e\x4d\xe4\x77\x32\x32",
- .ilen = 32,
- .result = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e"
- "\x5b\x4e\x0b\x15\x6e\x39\xfb\x0c"
- "\x73\xc7\xd9\x6b\xbe\xce\x9b\x70"
- "\xc7\x4f\x96\x16\x03\xfc\xea\xfb"
- "\x56",
- .rlen = 33,
- },
-};
-
-static const struct aead_testvec morus1280_dec_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 16,
- .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
- "\x20\x36\x2c\x24\xfe\xc9\x30\x81",
- .assoc = "",
- .alen = 0,
- .input = "\x91\x85\x0f\xf5\x52\x9e\xce\xce"
- "\x65\x99\xc7\xbf\xd3\x76\xe8\x98",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b"
- "\x80\xda\xb2\x91\xf9\x24\xc2\x06",
- .klen = 16,
- .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
- "\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
- .assoc = "",
- .alen = 0,
- .input = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13"
- "\x96\xda\x76\x34\x33\x4e\xd5\x39"
- "\x73",
- .ilen = 17,
- .result = "\x69",
- .rlen = 1,
- }, {
- .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37"
- "\x01\xb4\x64\x22\xf3\x48\x85\x0c",
- .klen = 16,
- .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
- "\x22\xea\x90\x47\xf2\x11\xb5\x8e",
- .assoc = "",
- .alen = 0,
- .input = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22"
- "\x75\x0b\x24\xa6\x0e\xc3\xde\x52"
- "\x97\x0b\x64\xd4\xce\x90\x52\xf7"
- "\xef\xdb\x6a\x38\xd2\xa8\xa1\x0d"
- "\xe0\x61\x33\x24\xc6\x4d\x51\xbc"
- "\xa4\x21\x74\xcf\x19\x16\x59",
- .ilen = 47,
- .result = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
- "\x62\x58\xe9\x8f\xef\xa4\x17\x91"
- "\xb4\x96\x9f\x6b\xce\x38\xa5\x46"
- "\x13\x7d\x64\x93\xd7\x05\xf5",
- .rlen = 31,
- }, {
- .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
- "\x82\x8e\x16\xb4\xed\x6d\x47\x12",
- .klen = 16,
- .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
- "\xa2\xc5\x42\xd8\xec\x36\x78\x94",
- .assoc = "",
- .alen = 0,
- .input = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f"
- "\xde\x9f\x77\xb2\xda\x92\x61\x5c"
- "\x09\x0b\x2d\x9a\x26\xaa\x1c\x06"
- "\xab\x74\xb7\x2b\x95\x5f\x9f\xa1"
- "\x9a\xff\x50\xa0\xa2\xff\xc5\xad"
- "\x21\x8e\x84\x5c\x12\x61\xb2\xae",
- .ilen = 48,
- .result = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
- "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97"
- "\xde\x58\xab\xf0\xd3\xd8\x27\x60"
- "\xd5\xaa\x43\x6b\xb1\x64\x95\xa4",
- .rlen = 32,
- }, {
- .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
- "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
- .klen = 16,
- .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
- "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
- .assoc = "",
- .alen = 0,
- .input = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17"
- "\xc1\x1d\x2a\xdd\x88\x67\xda\x1f"
- "\x6d\xe8\x37\x28\x5a\xc1\x5e\x9f"
- "\xa6\xec\xc6\x92\x05\x4b\xc0\xa3"
- "\x63\xef\x88\xa4\x9b\x0a\x5c\xed"
- "\x2b\x6a\xac\x63\x52\xaa\x10\x94"
- "\xd0",
- .ilen = 49,
- .result = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
- "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d"
- "\x09\x1a\xb7\x74\xd8\x78\xa9\x79"
- "\x96\xd8\x22\x43\x8c\xc3\x34\x7b"
- "\xc4",
- .rlen = 33,
- }, {
- .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
- "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f",
- .klen = 16,
- .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
- "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
- .assoc = "",
- .alen = 0,
- .input = "\x7d\x61\x1a\x35\x20\xcc\x07\x88"
- "\x03\x98\x87\xcf\xc0\x6e\x4d\x19"
- "\xe3\xd4\x0b\xfb\x29\x8f\x49\x1a"
- "\x3a\x06\x77\xce\x71\x2c\xcd\xdd"
- "\xed\xf6\xc9\xbe\xa6\x3b\xb8\xfc"
- "\x6c\xbe\x77\xed\x74\x0e\x20\x85"
- "\xd0\x65\xde\x24\x6f\xe3\x25\xc5"
- "\xdf\x5b\x0f\xbd\x8a\x88\x78\xc9"
- "\xe5\x81\x37\xde\x84\x7a\xf6\x84"
- "\x99\x7a\x72\x9c\x54\x31\xa1",
- .ilen = 79,
- .result = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
- "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3"
- "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93"
- "\x57\x05\x01\x1c\x66\x22\xd3\x51"
- "\xd3\xdf\x18\xc9\x30\x66\xed\xb1"
- "\x96\x58\xd5\x8c\x64\x8c\x7c\xf5"
- "\x01\xd0\x74\x5f\x9b\xaa\xf6\xd1"
- "\xe6\x16\xa2\xac\xde\x47\x40",
- .rlen = 63,
- }, {
- .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
- "\x05\x1d\x2c\x68\xdb\xda\x8f\x25",
- .klen = 16,
- .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
- "\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
- .assoc = "",
- .alen = 0,
- .input = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c"
- "\xa5\x07\x12\xa7\x12\x39\x60\x66"
- "\x30\x81\x4a\x03\x78\x28\x45\x52"
- "\xd2\x2b\x24\xfd\x8b\xa5\xb7\x66"
- "\x6f\x45\xd7\x3b\x67\x6f\x51\xb9"
- "\xc0\x3d\x6c\xca\x1e\xae\xff\xb6"
- "\x79\xa9\xe4\x82\x5d\x4c\x2d\xdf"
- "\xeb\x71\x40\xc9\x2c\x40\x45\x6d"
- "\x73\x77\x01\xf3\x4f\xf3\x9d\x2a"
- "\x5d\x57\xa8\xa1\x18\xa2\xad\xcb",
- .ilen = 80,
- .result = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
- "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa"
- "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad"
- "\x19\x33\xe0\xf4\x40\x81\x72\x28"
- "\xe1\x8b\x1c\xf8\x91\x78\xff\xaf"
- "\xb0\x68\x69\xf2\x27\x35\x91\x84"
- "\x2e\x37\x5b\x00\x04\xff\x16\x9c"
- "\xb5\x19\x39\xeb\xd9\xcd\x29\x9a",
- .rlen = 64,
- }, {
- .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
- "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b",
- .klen = 16,
- .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
- "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
- .assoc = "\xc5",
- .alen = 1,
- .input = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e"
- "\x89\x3b\x9d\x0f\x83\x1c\x08\xc3",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde"
- "\x07\xd1\x90\x8b\xcf\x23\x15\x31",
- .klen = 16,
- .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
- "\x27\x08\xbd\xaf\xce\xec\x45\xb3",
- .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
- "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34"
- "\xe8\x73\x62\x64\xab\x50\xd0\xda"
- "\x6b\x83\x66\xaf\x3e\x27\xc9",
- .alen = 31,
- .input = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38"
- "\x03\x12\xf9\xcc\x9e\x46\x42\x92",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa"
- "\x88\xab\x42\x1c\xc9\x47\xd7\x38",
- .klen = 16,
- .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
- "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
- .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
- "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b"
- "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3"
- "\x2d\xb0\x45\x87\x18\x86\x68\xf6",
- .alen = 32,
- .input = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2"
- "\x6d\x65\xe0\x67\x9c\x1d\xa0\xf0",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16"
- "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e",
- .klen = 16,
- .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
- "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
- .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
- "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41"
- "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d"
- "\xee\xde\x23\x60\xf2\xe5\x08\xcc"
- "\x97",
- .alen = 33,
- .input = "\x28\x64\x78\x51\x55\xd8\x56\x4a"
- "\x58\x3e\xf7\xbe\xee\x21\xfe\x94",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31"
- "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44",
- .klen = 16,
- .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
- "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
- .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
- "\xca\xcd\xff\x88\xba\x22\xbe\x47"
- "\x67\xba\x85\xf1\xbb\x30\x56\x26"
- "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3"
- "\xa6\xbf\x31\x93\x60\xcd\xda\x63"
- "\x2c\xb1\xaa\x19\xc8\x19\xf8\xeb"
- "\x03\xa1\xe8\xbe\x37\x54\xec\xa2"
- "\xcd\x2c\x45\x58\xbd\x8e\x80",
- .alen = 63,
- .input = "\xb3\xa6\x00\x4e\x09\x20\xac\x21"
- "\x77\x72\x69\x76\x2d\x36\xe5\xc8",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d"
- "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a",
- .klen = 16,
- .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
- "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
- .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
- "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d"
- "\x91\x7c\x91\x75\xc0\xd0\xd8\x40"
- "\x71\x39\xe1\x10\xa6\xa3\x46\x7a"
- "\xb4\x6b\x35\xc2\xc1\xdf\xed\x60"
- "\x46\xc1\x3e\x7f\x8c\xc2\x0e\x7a"
- "\x30\x08\xd0\x5f\xa0\xaa\x0c\x6d"
- "\x9c\x2f\xdb\x97\xb8\x15\x69\x01",
- .alen = 64,
- .input = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd"
- "\xe4\xb9\x4a\xaa\x9a\x21\xaa\x14",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69"
- "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50",
- .klen = 16,
- .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
- "\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
- .assoc = "\x31",
- .alen = 1,
- .input = "\x1d\x47\x17\x34\x86\xf5\x54\x1a"
- "\x6d\x28\xb8\x5d\x6c\xcf\xa0\xb9"
- "\xbf",
- .ilen = 17,
- .result = "\x40",
- .rlen = 1,
- }, {
- .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85"
- "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57",
- .klen = 16,
- .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
- "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
- .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
- "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a"
- "\xe6\x01\xa8\x7e\xca\x10\xdc\x73"
- "\xf4\x94\x9f\xc1\x5a\x61\x85",
- .alen = 31,
- .input = "\x78\x90\x52\xae\x0f\xf7\x2e\xef"
- "\x63\x09\x08\x58\xb5\x56\xbd\x72"
- "\x6e\x42\xcf\x27\x04\x7c\xdb\x92"
- "\x18\xe9\xa4\x33\x90\xba\x62\xb5"
- "\x70\xd3\x88\x9b\x4f\x05\xa7\x51"
- "\x85\x87\x17\x09\x42\xed\x4e",
- .ilen = 47,
- .result = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
- "\x6d\x92\x42\x61\xa7\x58\x37\xdb"
- "\xb0\xb2\x2b\x9f\x0b\xb8\xbd\x7a"
- "\x24\xa0\xd6\xb7\x11\x79\x6c",
- .rlen = 31,
- }, {
- .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
- "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d",
- .klen = 16,
- .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
- "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
- .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
- "\xce\x36\xc7\xce\xa2\xb4\xc9\x60"
- "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d"
- "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd",
- .alen = 32,
- .input = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41"
- "\x2e\x71\xc8\x3b\x92\x43\x58\xaf"
- "\x5a\xfb\xad\x8f\xd9\xd5\x8a\x5e"
- "\xdb\xf3\xcd\x3a\x2b\xe1\x2c\x1a"
- "\xb0\xed\xe3\x0c\x6e\xf9\xf2\xd6"
- "\x90\xe6\xb1\x0e\xa5\x8a\xac\xb7",
- .ilen = 48,
- .result = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
- "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2"
- "\xdb\x74\x36\x23\x11\x58\x3f\x93"
- "\xe5\xcd\xb5\x90\xeb\xd8\x0c\xb3",
- .rlen = 32,
- }, {
- .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
- "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
- .klen = 16,
- .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
- "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
- .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
- "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66"
- "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7"
- "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4"
- "\xee",
- .alen = 33,
- .input = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc"
- "\xfd\x2e\x4b\x46\x84\xff\x78\x4e"
- "\x50\xda\x5c\xb9\x61\x1d\xf5\xb9"
- "\xfe\xbb\x7f\xae\x8c\xc1\x24\xbd"
- "\x8c\x6f\x1f\x9b\xce\xc6\xc1\x37"
- "\x08\x06\x5a\xe5\x96\x10\x95\xc2"
- "\x5e",
- .ilen = 49,
- .result = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
- "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
- "\x05\x36\x42\xa7\x16\xf8\xc1\xad"
- "\xa7\xfb\x94\x68\xc5\x37\xab\x8a"
- "\x72",
- .rlen = 33,
- }, {
- .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
- "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69",
- .klen = 16,
- .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
- "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
- .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
- "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d"
- "\x65\x48\xcb\x0a\xda\xf0\x62\xc0"
- "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa"
- "\xfd\xc9\x4a\xa9\xa9\x39\x4b\x54"
- "\xc8\x0e\x24\x7f\x5e\x10\x7a\x45"
- "\x10\x0b\x56\x85\xad\x54\xaa\x66"
- "\xa8\x43\xcd\xd4\x9b\xb7\xfa",
- .alen = 63,
- .input = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22"
- "\x49\x2d\x07\x92\xfc\x3d\x6d\x5f"
- "\xef\x36\x19\xae\x91\xfa\xd6\x63"
- "\x46\xea\x8a\x39\x14\x21\xa6\x37"
- "\x18\xfc\x97\x3e\x16\xa5\x4d\x39"
- "\x45\x2e\x69\xcc\x9c\x5f\xdf\x6d"
- "\x5e\xa2\xbf\xac\x83\x32\x72\x52"
- "\x58\x58\x23\x40\xfd\xa5\xc2\xe6"
- "\xe9\x5a\x50\x98\x00\x58\xc9\x86"
- "\x4f\x20\x37\xdb\x7b\x22\xa3",
- .ilen = 79,
- .result = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
- "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
- "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
- "\x68\x28\x73\x40\x9f\x96\x4a\x60"
- "\x80\xf4\x4b\xf4\xc1\x3d\xd0\x93"
- "\xcf\x12\xc9\x59\x8f\x7a\x7f\xa8"
- "\x1b\xa5\x50\xed\x87\xa9\x72\x59"
- "\x9c\x44\xb2\xa4\x99\x98\x34",
- .rlen = 63,
- }, {
- .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
- "\x10\x57\x85\x39\x93\x8f\xaf\x70",
- .klen = 16,
- .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
- "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
- .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
- "\x50\xc4\xde\x82\x90\x21\x11\x73"
- "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda"
- "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81"
- "\x0b\x76\x4f\xd7\x0a\x4b\x5e\x51"
- "\xe3\x1d\xb9\xe5\x21\xb9\x8f\xd4"
- "\x3d\x72\x3e\x26\x16\xa9\xca\x32"
- "\x77\x47\x63\x14\x95\x3d\xe4\x34",
- .alen = 64,
- .input = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb"
- "\x23\xec\x35\xe3\xae\xc9\xfb\x0b"
- "\x90\x14\x46\xeb\xa8\x8d\xb0\x9b"
- "\x39\xda\x8b\x48\xec\xb2\x00\x4e"
- "\x80\x6f\x46\x4f\x9b\x1e\xbb\x35"
- "\xea\x5a\xbc\xa2\x36\xa5\x89\x45"
- "\xc2\xd6\xd7\x15\x0b\xf6\x6c\x56"
- "\xec\x99\x7d\x61\xb3\x15\x93\xed"
- "\x83\x1e\xd9\x48\x84\x0b\x37\xfe"
- "\x95\x74\x44\xd5\x54\xa6\x27\x06",
- .ilen = 80,
- .result = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
- "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
- "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
- "\x29\x56\x52\x19\x79\xf5\xe9\x37"
- "\x8f\xa1\x50\x23\x22\x4f\xe3\x91"
- "\xe9\x21\x5e\xbf\x52\x23\x95\x37"
- "\x48\x0c\x38\x8f\xf0\xff\x92\x24"
- "\x6b\x47\x49\xe3\x94\x1f\x1e\x01",
- .rlen = 64,
- }, {
- .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
- "\x91\x31\x37\xcb\x8d\xb3\x72\x76",
- .klen = 16,
- .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
- "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
- .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
- "\xd1\x9e\x90\x13\x8a\x45\xd3\x79"
- "\xba\xcd\xe2\x13\xe4\x30\x66\xf4"
- "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58"
- "\x1a\x22\x53\x05\x6b\x5c\x71\x4f"
- "\xfd\x2d\x4d\x4c\xe5\x62\xa5\x63"
- "\x6a\xda\x26\xc8\x7f\xff\xea\xfd"
- "\x46\x4a\xfa\x53\x8f\xc4\xcd\x68"
- "\x58",
- .alen = 65,
- .input = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9"
- "\xd1\xcd\xdc\x16\xdd\x2c\xc1\xfb"
- "\x52\xb5\xb3\xab\x50\x99\x3f\xa0"
- "\x38\xa4\x74\xa5\x04\x15\x63\x05"
- "\x8f\x54\x81\x06\x5a\x6b\xa4\x63"
- "\x6d\xa7\x21\xcb\xff\x42\x30\x8e"
- "\x3b\xd1\xca\x3f\x4b\x1a\xb8\xc3"
- "\x42\x01\xe6\xbc\x75\x15\x87\xee"
- "\xc9\x8e\x65\x01\xd9\xd8\xb5\x9f"
- "\x48\x86\xa6\x5f\x2c\xc7\xb5\xb0"
- "\xed\x5d\x14\x7c\x3f\x40\xb1\x0b"
- "\x72\xef\x94\x8d\x7a\x85\x56\xe5"
- "\x56\x08\x15\x56\xba\xaf\xbd\xf0"
- "\x20\xef\xa0\xf6\xa9\xad\xa2\xc9"
- "\x1c\x3b\x28\x51\x7e\x77\xb2\x18"
- "\x4f\x61\x64\x37\x22\x36\x6d\x78"
- "\xed\xed\x35\xe8\x83\xa5\xec\x25"
- "\x6b\xff\x5f\x1a\x09\x96\x3d\xdc"
- "\x20",
- .ilen = 145,
- .result = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
- "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
- "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
- "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
- "\x9d\x4d\x54\x51\x84\x61\xf6\x8e"
- "\x03\x31\xf2\x25\x16\xcc\xaa\xc6"
- "\x75\x73\x20\x30\x59\x54\xb2\xf0"
- "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35"
- "\x8a\xdf\x27\xa0\xe4\x60\x99\xae"
- "\x8e\x43\xd9\x39\x7b\x10\x40\x67"
- "\x5c\x7e\xc9\x70\x63\x34\xca\x59"
- "\xfe\x86\xbc\xb7\x9c\x39\xf3\x6d"
- "\x6a\x41\x64\x6f\x16\x7f\x65\x7e"
- "\x89\x84\x68\xeb\xb0\x51\xbe\x55"
- "\x33\x16\x59\x6c\x3b\xef\x88\xad"
- "\x2f\xab\xbc\x25\x76\x87\x41\x2f"
- "\x36",
- .rlen = 129,
- }, {
- .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
- "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c",
- .klen = 16,
- .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
- "\x32\x42\x15\x80\x85\xa1\x65\xfe",
- .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
- "\x52\x79\x42\xa5\x84\x6a\x96\x7f"
- "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d"
- "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e"
- "\x28\xce\x57\x34\xcd\x6e\x84\x4c"
- "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1"
- "\x96\x41\x0d\x69\xe8\x54\x0a\xc8"
- "\x15\x4e\x91\x92\x89\x4b\xb7\x9b"
- "\x21\xf7\x42\x89\xac\x12\x2a\x54"
- "\x69\xee\x18\xc7\x8d\xed\xe8\xfd"
- "\xbb\x04\x28\xe6\x8a\x3c\x98\xc1"
- "\x04\x2d\xa9\xa1\x24\x83\xff\xe9"
- "\x55\x7a\xf0\xd1\xf6\x63\x05\xe1"
- "\xd9\x1e\x75\x72\xc1\x9f\xae\x32"
- "\xe1\x6b\xcd\x9e\x61\x19\x23\x86"
- "\xd9\xd2\xaf\x8e\xd5\xd3\xa8\xa9"
- "\x51",
- .alen = 129,
- .input = "\x36\x78\xb9\x22\xde\x62\x35\x55"
- "\x1a\x7a\xf5\x45\xbc\xd7\x15\x82"
- "\x01\xe9\x5a\x07\xea\x46\xaf\x91"
- "\xcb\x73\xa5\xee\xe1\xb4\xbf\xc2"
- "\xdb\xd2\x9d\x59\xde\xfc\x83\x00"
- "\xf5\x46\xac\x97\xd5\x57\xa9\xb9"
- "\x1f\x8c\xe8\xca\x68\x8b\x91\x0c"
- "\x01\xbe\x0a\xaf\x7c\xf6\x67\xa4"
- "\xbf\xbc\x88\x3f\x5d\xd1\xf9\x19"
- "\x0f\x9d\xb2\xaf\xb9\x6e\x17\xdf"
- "\xa2",
- .ilen = 81,
- .result = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
- "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
- "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
- "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
- "\xac\xfa\x58\x7f\xe5\x73\x09\x8c"
- "\x1d\x40\x87\x8c\xd9\x75\xc0\x55"
- "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
- "\x09\x4f\x77\x62\x88\x2d\xf2\x68"
- "\x54",
- .rlen = 65,
- }, {
- .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
- "\x93\xe6\x9b\xee\x81\xfc\xf7\x82",
- .klen = 16,
- .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
- "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
- .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
- "\xd3\x53\xf4\x36\x7e\x8e\x59\x85"
- "\x0e\x51\xf9\x1c\xee\x70\x6a\x27"
- "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05",
- .alen = 32,
- .input = "\x08\x1b\x95\x0e\x41\x95\x02\x4b"
- "\x9c\xbb\xa8\xd0\x7c\xd3\x44\x6e"
- "\x89\x14\x33\x70\x0a\xbc\xea\x39"
- "\x88\xaa\x2b\xd5\x73\x11\x55\xf5"
- "\x33\x33\x9c\xd7\x42\x34\x49\x8e"
- "\x2f\x03\x30\x05\x47\xaf\x34",
- .ilen = 47,
- .result = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
- "\xf3\x89\x20\x5b\x7c\x57\x89\x07"
- "\xd9\x02\x7c\x3d\x2f\x18\x4b\x2d"
- "\x6e\xde\xee\xa2\x08\x12\xc7\xba",
- .rlen = 32,
- }, {
- .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
- "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
- .klen = 16,
- .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
- "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
- .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
- "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c"
- "\x39\x14\x05\xa0\xf3\x10\xec\x41"
- "\xff\x01\x95\x84\x2b\x59\x7f\xdb",
- .alen = 32,
- .input = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68"
- "\x0c\x60\xb9\x27\xdf\xaa\x41\xc6"
- "\x25\xd8\xf7\x1f\x10\x15\x48\x61"
- "\x4c\x95\x00\xdf\x51\x9b\x7f\xe6"
- "\x24\x40\x9e\xbe\x3b\xeb\x1b\x98"
- "\xb9\x9c\xe5\xef\xf2\x05",
- .ilen = 46,
- .result = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
- "\x74\x63\xd2\xec\x76\x7c\x4c\x0d"
- "\x03\xc4\x88\xc1\x35\xb8\xcd\x47"
- "\x2f\x0c\xcd\x7a\xe2\x71\x66\x91",
- .rlen = 32,
- }, {
- .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
- "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
- .klen = 16,
- .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
- "\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
- .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
- "\xd5\x07\x58\x59\x72\xd7\xde\x92"
- "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a"
- "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2",
- .alen = 32,
- .input = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d"
- "\xb5\xec\x9b\x4e\x12\x23\xa3\xcf"
- "\x1a\x5a\x70\x15\x5a\x10\x40\x51"
- "\xca\x47\x4c\x9d\xc9\x97\xf4\x77"
- "\xdb\xc8\x10\x2d\xdc\x65\x20\x3f",
- .ilen = 40,
- .result = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
- "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13"
- "\x2e\x86\x93\x45\x3a\x58\x4f\x61"
- "\xf0\x3a\xac\x53\xbc\xd0\x06\x68",
- .rlen = 32,
- }, {
- .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
- "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
- .klen = 16,
- .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22"
- "\x36\xab\xde\xc6\x6d\x32\x70\x17",
- .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9"
- "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98"
- "\x8d\x98\x1c\xa8\xfe\x50\xf0\x74"
- "\x81\x5c\x53\x35\xe0\x17\xbd\x88",
- .alen = 32,
- .input = "\xf1\x62\x44\xc7\x5f\x19\xca\x43"
- "\x47\x2c\xaf\x68\x82\xbd\x51\xef"
- "\x3d\x65\xd8\x45\x2d\x06\x07\x78"
- "\x08\x2e\xb3\x23\xcd\x81\x12\x55"
- "\x1a",
- .ilen = 33,
- .result = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
- "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a"
- "\x58\x49\x9f\xc9\x3f\xf8\xd1\x7a"
- "\xb2\x67\x8b\x2b\x96\x2f\xa5\x3e",
- .rlen = 32,
- }, {
- .key = "\xe9\x95\xa2\x8f\x93\x13\x7b\xb7"
- "\x96\x4e\x63\x33\x69\x8d\x02\x9b"
- "\x23\xf9\x22\xeb\x80\xa0\xb1\x81"
- "\xe2\x73\xc3\x21\x4d\x47\x8d\xf4",
- .klen = 32,
- .iv = "\xf8\x5e\x31\xf7\xd7\xb2\x25\x3e"
- "\xb7\x85\x90\x58\x67\x57\x33\x1d",
- .assoc = "",
- .alen = 0,
- .input = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf"
- "\xb9\xd2\x41\xf6\x80\xa1\x52\x70",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x25\xba\xdc\x2e\xa3\x8f\x24\xd3"
- "\x17\x29\x15\xc5\x63\xb2\xc5\xa1"
- "\x4d\xbc\x2d\x6f\x85\x40\x33\x9a"
- "\xa3\xa0\xa1\xfa\x27\xa6\x2c\xca",
- .klen = 32,
- .iv = "\x34\x83\x6a\x96\xe7\x2d\xce\x5a"
- "\x38\x5f\x42\xe9\x61\x7b\xf5\x23",
- .assoc = "",
- .alen = 0,
- .input = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7"
- "\x01\xf4\x08\xe3\x0d\xf7\xf0\x78"
- "\x53",
- .ilen = 17,
- .result = "\x53",
- .rlen = 1,
- }, {
- .key = "\x62\xdf\x16\xcd\xb3\x0a\xcc\xef"
- "\x98\x03\xc7\x56\x5d\xd6\x87\xa8"
- "\x77\x7e\x39\xf3\x8a\xe0\xb5\xb4"
- "\x65\xce\x80\xd2\x01\x05\xcb\xa1",
- .klen = 32,
- .iv = "\x71\xa8\xa4\x35\xf7\xa9\x76\x75"
- "\xb8\x39\xf4\x7a\x5b\x9f\xb8\x29",
- .assoc = "",
- .alen = 0,
- .input = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07"
- "\xff\x8e\x74\xf8\xa1\xa6\xd5\x37"
- "\xa5\x64\x31\x5c\xca\x73\x9b\x43"
- "\xe6\x70\x63\x46\x95\xcb\xf7\xb5"
- "\x20\x8c\x75\x7a\x2a\x17\x2f\xa9"
- "\xb8\x4d\x11\x42\xd1\xf8\xf1",
- .ilen = 47,
- .result = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83"
- "\xf9\xa6\x4d\xc3\x58\x31\x19\x2c"
- "\xd7\x90\xc2\x56\x4e\xd8\x57\xc7"
- "\xf6\xf0\x27\xb4\x25\x4c\x83",
- .rlen = 31,
- }, {
- .key = "\x9e\x03\x4f\x6d\xc3\x86\x75\x0a"
- "\x19\xdd\x79\xe8\x57\xfb\x4a\xae"
- "\xa2\x40\x45\x77\x90\x80\x37\xce"
- "\x26\xfb\x5f\xaa\xdb\x64\x6b\x77",
- .klen = 32,
- .iv = "\xae\xcc\xde\xd5\x07\x25\x1f\x91"
- "\x39\x14\xa6\x0c\x55\xc4\x7b\x30",
- .assoc = "",
- .alen = 0,
- .input = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37"
- "\x0a\xee\xc4\x30\x19\xd7\x3a\x6f"
- "\xf8\x2b\x67\xf5\x3b\x84\x87\x2a"
- "\xfb\x07\x7a\x82\xb5\xe4\x85\x26"
- "\x1e\xa8\xe5\x04\x54\xce\xe5\x5f"
- "\xb5\x3f\xc1\xd5\x7f\xbd\xd2\xa6",
- .ilen = 48,
- .result = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f"
- "\x7a\x81\xff\x55\x52\x56\xdc\x33"
- "\x01\x52\xcd\xdb\x53\x78\xd9\xe1"
- "\xb7\x1d\x06\x8d\xff\xab\x22\x98",
- .rlen = 32,
- }, {
- .key = "\xdb\x28\x89\x0c\xd3\x01\x1e\x26"
- "\x9a\xb7\x2b\x79\x51\x1f\x0d\xb4"
- "\xcc\x03\x50\xfc\x95\x20\xb9\xe7"
- "\xe8\x29\x3e\x83\xb5\xc3\x0a\x4e",
- .klen = 32,
- .iv = "\xea\xf1\x18\x74\x17\xa0\xc8\xad"
- "\xba\xee\x58\x9d\x4f\xe8\x3d\x36",
- .assoc = "",
- .alen = 0,
- .input = "\xc2\xf4\x40\x55\xf9\x59\xff\x73"
- "\x08\xf5\x98\x92\x0c\x7b\x35\x9a"
- "\xa8\xf4\x42\x7e\x6f\x93\xca\x22"
- "\x23\x06\x1e\xf8\x89\x22\xf4\x46"
- "\x7c\x7c\x67\x75\xab\xe5\x75\xaa"
- "\x15\xd7\x83\x19\xfd\x31\x59\x5b"
- "\x32",
- .ilen = 49,
- .result = "\x08\x84\x34\x44\x9f\xde\x1c\xbb"
- "\xfb\x5b\xb1\xe6\x4c\x7a\x9f\x39"
- "\x2c\x14\xd9\x5f\x59\x18\x5b\xfb"
- "\x79\x4b\xe5\x65\xd9\x0a\xc1\x6f"
- "\x2e",
- .rlen = 33,
- }, {
- .key = "\x17\x4d\xc3\xab\xe3\x7d\xc7\x42"
- "\x1b\x91\xdd\x0a\x4b\x43\xcf\xba"
- "\xf6\xc5\x5c\x80\x9a\xc0\x3b\x01"
- "\xa9\x56\x1d\x5b\x8f\x22\xa9\x25",
- .klen = 32,
- .iv = "\x27\x16\x51\x13\x27\x1c\x71\xc9"
- "\x3b\xc8\x0a\x2f\x49\x0c\x00\x3c",
- .assoc = "",
- .alen = 0,
- .input = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb"
- "\xa0\x8c\xd3\x3e\x7f\x8d\xe8\x28"
- "\x2a\xdc\xfa\x01\x84\x87\x9a\x70"
- "\x81\x75\x37\x0a\xd2\x75\xa9\xb6"
- "\x21\x72\xee\x7e\x65\x95\xe5\xcc"
- "\x01\xb7\x39\xa6\x51\x15\xca\xff"
- "\x61\xdc\x97\x38\xcc\xf4\xca\xc7"
- "\x83\x9b\x05\x11\x72\x60\xf0\xb4"
- "\x7e\x06\xab\x0a\xc0\xbb\x59\x23"
- "\xaa\x2d\xfc\x4e\x35\x05\x59",
- .ilen = 79,
- .result = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7"
- "\x7c\x35\x63\x77\x46\x9f\x61\x3f"
- "\x56\xd7\xe4\xe3\x5e\xb8\xdc\x14"
- "\x3a\x79\xc4\x3e\xb3\x69\x61\x46"
- "\x3c\xb6\x83\x4e\xb4\x26\xc7\x73"
- "\x22\xda\x52\x8b\x7d\x11\x98\xea"
- "\x62\xe1\x14\x1e\xdc\xfe\x0f\xad"
- "\x20\x76\x5a\xdc\x4e\x71\x13",
- .rlen = 63,
- }, {
- .key = "\x54\x71\xfd\x4b\xf3\xf9\x6f\x5e"
- "\x9c\x6c\x8f\x9c\x45\x68\x92\xc1"
- "\x21\x87\x67\x04\x9f\x60\xbd\x1b"
- "\x6a\x84\xfc\x34\x6a\x81\x48\xfb",
- .klen = 32,
- .iv = "\x63\x3b\x8b\xb3\x37\x98\x1a\xe5"
- "\xbc\xa2\xbc\xc0\x43\x31\xc2\x42",
- .assoc = "",
- .alen = 0,
- .input = "\x11\x7c\x7d\xef\xce\x29\x95\xec"
- "\x7e\x9f\x42\xa6\x26\x07\xa1\x75"
- "\x2f\x4e\x09\x9a\xf6\x6b\xc2\xfa"
- "\x0d\xd0\x17\xdc\x25\x1e\x9b\xdc"
- "\x5f\x8c\x1c\x60\x15\x4f\x9b\x20"
- "\x7b\xff\xcd\x82\x60\x84\xf4\xa5"
- "\x20\x9a\x05\x19\x5b\x02\x0a\x72"
- "\x43\x11\x26\x58\xcf\xc5\x41\xcf"
- "\x13\xcc\xde\x32\x92\xfa\x86\xf2"
- "\xaf\x16\xe8\x8f\xca\xb6\xfd\x54",
- .ilen = 80,
- .result = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3"
- "\xfd\x0f\x15\x09\x40\xc3\x24\x45"
- "\x81\x99\xf0\x67\x63\x58\x5e\x2e"
- "\xfb\xa6\xa3\x16\x8d\xc8\x00\x1c"
- "\x4b\x62\x87\x7c\x15\x38\xda\x70"
- "\x3d\xea\xe7\xf2\x40\xba\xae\x79"
- "\x8f\x48\xfc\xbf\x45\x53\x2e\x78"
- "\xef\x79\xf0\x1b\x49\xf7\xfd\x9c",
- .rlen = 64,
- }, {
- .key = "\x90\x96\x36\xea\x03\x74\x18\x7a"
- "\x1d\x46\x42\x2d\x3f\x8c\x54\xc7"
- "\x4b\x4a\x73\x89\xa4\x00\x3f\x34"
- "\x2c\xb1\xdb\x0c\x44\xe0\xe8\xd2",
- .klen = 32,
- .iv = "\xa0\x5f\xc5\x52\x47\x13\xc2\x01"
- "\x3d\x7c\x6e\x52\x3d\x55\x85\x48",
- .assoc = "\xaf",
- .alen = 1,
- .input = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe"
- "\x69\xdf\xc4\xc4\x02\x46\x3a\xf0",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xcd\xbb\x70\x89\x13\xf0\xc1\x95"
- "\x9e\x20\xf4\xbf\x39\xb1\x17\xcd"
- "\x76\x0c\x7f\x0d\xa9\xa0\xc1\x4e"
- "\xed\xdf\xb9\xe4\x1e\x3f\x87\xa8",
- .klen = 32,
- .iv = "\xdc\x84\xfe\xf1\x58\x8f\x6b\x1c"
- "\xbe\x57\x20\xe3\x37\x7a\x48\x4f",
- .assoc = "\xeb\x4d\x8d\x59\x9c\x2e\x15\xa3"
- "\xde\x8d\x4d\x07\x36\x43\x78\xd0"
- "\x0b\x6d\x84\x4f\x2c\xf0\x82\x5b"
- "\x4e\xf6\x29\xd1\x8b\x6f\x56",
- .alen = 31,
- .input = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d"
- "\x2e\x9a\xd6\x61\x43\xc0\x74\x69",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x0a\xe0\xaa\x29\x24\x6c\x6a\xb1"
- "\x1f\xfa\xa6\x50\x33\xd5\xda\xd3"
- "\xa0\xce\x8a\x91\xae\x40\x43\x68"
- "\xae\x0d\x98\xbd\xf8\x9e\x26\x7f",
- .klen = 32,
- .iv = "\x19\xa9\x38\x91\x68\x0b\x14\x38"
- "\x3f\x31\xd2\x74\x31\x9e\x0a\x55",
- .assoc = "\x28\x72\xc7\xf8\xac\xaa\xbe\xbf"
- "\x5f\x67\xff\x99\x30\x67\x3b\xd6"
- "\x35\x2f\x90\xd3\x31\x90\x04\x74"
- "\x0f\x23\x08\xa9\x65\xce\xf6\xea",
- .alen = 32,
- .input = "\xb9\x57\x13\x3e\x82\x31\x61\x65"
- "\x0d\x7f\x6c\x96\x93\x5c\x50\xe2",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x46\x04\xe3\xc8\x34\xe7\x12\xcd"
- "\xa0\xd4\x58\xe2\x2d\xf9\x9c\xda"
- "\xca\x91\x96\x15\xb4\xe0\xc5\x81"
- "\x70\x3a\x77\x95\xd2\xfd\xc5\x55",
- .klen = 32,
- .iv = "\x55\xcd\x72\x30\x78\x86\xbd\x54"
- "\xc0\x0b\x84\x06\x2b\xc2\xcd\x5b",
- .assoc = "\x64\x97\x00\x98\xbc\x25\x67\xdb"
- "\xe0\x41\xb1\x2a\x2a\x8c\xfe\xdd"
- "\x5f\xf2\x9c\x58\x36\x30\x86\x8e"
- "\xd1\x51\xe6\x81\x3f\x2d\x95\xc1"
- "\x01",
- .alen = 33,
- .input = "\x81\x96\x34\xde\xbb\x36\xdd\x3e"
- "\x4e\x5e\xcb\x44\x21\xb8\x3f\xf1",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\x83\x29\x1d\x67\x44\x63\xbb\xe9"
- "\x20\xaf\x0a\x73\x27\x1e\x5f\xe0"
- "\xf5\x53\xa1\x9a\xb9\x80\x47\x9b"
- "\x31\x68\x56\x6e\xac\x5c\x65\x2c",
- .klen = 32,
- .iv = "\x92\xf2\xac\xcf\x88\x02\x65\x70"
- "\x41\xe5\x36\x97\x25\xe7\x90\x61",
- .assoc = "\xa1\xbb\x3a\x37\xcc\xa1\x10\xf7"
- "\x61\x1c\x63\xbc\x24\xb0\xc0\xe3"
- "\x8a\xb4\xa7\xdc\x3b\xd0\x08\xa8"
- "\x92\x7f\xc5\x5a\x19\x8c\x34\x97"
- "\x0f\x95\x9b\x18\xe4\x8d\xb4\x24"
- "\xb9\x33\x28\x18\xe1\x9d\x14\xe0"
- "\x64\xb2\x89\x7d\x78\xa8\x05\x7e"
- "\x07\x8c\xfc\x88\x2d\xb8\x53",
- .alen = 63,
- .input = "\x2e\x99\xb6\x79\x57\x56\x80\x36"
- "\x8e\xc4\x1c\x12\x7d\x71\x36\x0c",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xbf\x4e\x57\x07\x54\xdf\x64\x05"
- "\xa1\x89\xbc\x04\x21\x42\x22\xe6"
- "\x1f\x15\xad\x1e\xbe\x20\xc9\xb4"
- "\xf3\x95\x35\x46\x86\xbb\x04\x03",
- .klen = 32,
- .iv = "\xce\x17\xe5\x6f\x98\x7e\x0e\x8c"
- "\xc2\xbf\xe8\x29\x1f\x0b\x52\x68",
- .assoc = "\xdd\xe0\x74\xd6\xdc\x1d\xb8\x13"
- "\xe2\xf6\x15\x4d\x1e\xd4\x83\xe9"
- "\xb4\x76\xb3\x60\x40\x70\x8a\xc1"
- "\x53\xac\xa4\x32\xf3\xeb\xd3\x6e"
- "\x1e\x42\xa0\x46\x45\x9f\xc7\x22"
- "\xd3\x43\xbc\x7e\xa5\x47\x2a\x6f"
- "\x91\x19\x70\x1e\xe1\xfe\x25\x49"
- "\xd6\x8f\x93\xc7\x28\x3f\x3d\x03",
- .alen = 64,
- .input = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce"
- "\x3b\x89\x40\x36\xba\x6d\x0e\xa2",
- .ilen = 16,
- .result = "",
- .rlen = 0,
- }, {
- .key = "\xfc\x72\x90\xa6\x64\x5a\x0d\x21"
- "\x22\x63\x6e\x96\x1b\x67\xe4\xec"
- "\x49\xd7\xb9\xa2\xc3\xc0\x4b\xce"
- "\xb4\xc3\x14\x1e\x61\x1a\xa3\xd9",
- .klen = 32,
- .iv = "\x0b\x3c\x1f\x0e\xa8\xf9\xb7\xa7"
- "\x42\x9a\x9a\xba\x19\x30\x15\x6e",
- .assoc = "\x1a",
- .alen = 1,
- .input = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6"
- "\x17\x75\x81\x16\xdf\x26\xff\x67"
- "\x92",
- .ilen = 17,
- .result = "\x29",
- .rlen = 1,
- }, {
- .key = "\x38\x97\xca\x45\x74\xd6\xb6\x3c"
- "\xa3\x3d\x20\x27\x15\x8b\xa7\xf2"
- "\x74\x9a\xc4\x27\xc8\x60\xcd\xe8"
- "\x75\xf0\xf2\xf7\x3b\x79\x42\xb0",
- .klen = 32,
- .iv = "\x47\x60\x59\xad\xb8\x75\x60\xc3"
- "\xc3\x74\x4c\x4c\x13\x54\xd8\x74",
- .assoc = "\x56\x29\xe7\x15\xfc\x14\x0a\x4a"
- "\xe4\xaa\x79\x70\x12\x1d\x08\xf6"
- "\x09\xfb\xca\x69\x4b\xb0\x8e\xf5"
- "\xd6\x07\x62\xe3\xa8\xa9\x12",
- .alen = 31,
- .input = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd"
- "\x3c\xd1\x2a\xd4\x15\x86\x9d\xda"
- "\xea\x6c\x6f\xa1\x33\xb0\x7a\x01"
- "\x57\xe7\xf3\x7b\x73\xe7\x54\x10"
- "\xc6\x91\xe2\xc6\xa0\x69\xe7\xe6"
- "\x76\xc3\xf5\x3a\x76\xfd\x4a",
- .ilen = 47,
- .result = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1"
- "\x04\xe1\xa6\x94\x10\xe6\x39\x77"
- "\xd3\xac\x4d\x8a\x8c\x58\x6e\xfb"
- "\x06\x13\x9a\xd9\x5e\xc0\xfa",
- .rlen = 31,
- }, {
- .key = "\x75\xbc\x04\xe5\x84\x52\x5e\x58"
- "\x24\x17\xd2\xb9\x0e\xaf\x6a\xf9"
- "\x9e\x5c\xd0\xab\xcd\x00\x4f\x01"
- "\x37\x1e\xd1\xcf\x15\xd8\xe2\x86",
- .klen = 32,
- .iv = "\x84\x85\x92\x4d\xc8\xf1\x08\xdf"
- "\x44\x4e\xff\xdd\x0d\x78\x9a\x7a",
- .assoc = "\x93\x4e\x21\xb4\x0c\x90\xb3\x66"
- "\x65\x84\x2b\x01\x0b\x42\xcb\xfc"
- "\x33\xbd\xd6\xed\x50\x50\x10\x0e"
- "\x97\x35\x41\xbb\x82\x08\xb1\xf2",
- .alen = 32,
- .input = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a"
- "\x71\xce\xe4\xaa\x45\x70\xe6\x84"
- "\x62\x48\x08\x64\x86\x6a\xdf\xec"
- "\xb4\xa0\xfb\x34\x03\x0c\x19\xf4"
- "\x2b\x7b\x36\x73\xec\x54\xa9\x1e"
- "\x30\x85\xdb\xe4\xac\xe9\x2c\xca",
- .ilen = 48,
- .result = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed"
- "\x85\xbb\x58\x26\x0a\x0b\xfc\x7d"
- "\xfe\x6e\x59\x0e\x91\xf8\xf0\x15"
- "\xc8\x40\x78\xb1\x38\x1f\x99\xa7",
- .rlen = 32,
- }, {
- .key = "\xb1\xe1\x3e\x84\x94\xcd\x07\x74"
- "\xa5\xf2\x84\x4a\x08\xd4\x2c\xff"
- "\xc8\x1e\xdb\x2f\xd2\xa0\xd1\x1b"
- "\xf8\x4c\xb0\xa8\xef\x37\x81\x5d",
- .klen = 32,
- .iv = "\xc0\xaa\xcc\xec\xd8\x6c\xb1\xfb"
- "\xc5\x28\xb1\x6e\x07\x9d\x5d\x81",
- .assoc = "\xd0\x73\x5a\x54\x1d\x0b\x5b\x82"
- "\xe5\x5f\xdd\x93\x05\x66\x8e\x02"
- "\x5e\x80\xe1\x71\x55\xf0\x92\x28"
- "\x59\x62\x20\x94\x5c\x67\x50\xc8"
- "\x58",
- .alen = 33,
- .input = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60"
- "\xcb\x4a\x54\x94\xcf\xf5\x7e\x34"
- "\xe9\xf8\x80\x65\x53\xd0\x72\x70"
- "\x4f\x7d\x9d\xd1\x15\x6f\xb9\x2c"
- "\xfa\xe8\xdd\xac\x2e\xe1\x3f\x67"
- "\x63\x0f\x1a\x59\xb7\x89\xdb\xf4"
- "\xc3",
- .ilen = 49,
- .result = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09"
- "\x06\x95\x0a\xb7\x04\x2f\xbe\x84"
- "\x28\x30\x64\x92\x96\x98\x72\x2e"
- "\x89\x6e\x57\x8a\x13\x7e\x38\x7e"
- "\xdb",
- .rlen = 33,
- }, {
- .key = "\xee\x05\x77\x23\xa5\x49\xb0\x90"
- "\x26\xcc\x36\xdc\x02\xf8\xef\x05"
- "\xf3\xe1\xe7\xb3\xd8\x40\x53\x35"
- "\xb9\x79\x8f\x80\xc9\x96\x20\x33",
- .klen = 32,
- .iv = "\xfd\xce\x06\x8b\xe9\xe8\x5a\x17"
- "\x46\x02\x63\x00\x01\xc1\x20\x87",
- .assoc = "\x0c\x98\x94\xf3\x2d\x87\x04\x9e"
- "\x66\x39\x8f\x24\xff\x8a\x50\x08"
- "\x88\x42\xed\xf6\x5a\x90\x14\x42"
- "\x1a\x90\xfe\x6c\x36\xc6\xf0\x9f"
- "\x66\xa0\xb5\x2d\x2c\xf8\x25\x15"
- "\x55\x90\xa2\x7e\x77\x94\x96\x3a"
- "\x71\x1c\xf7\x44\xee\xa8\xc3\x42"
- "\xe2\xa3\x84\x04\x0b\xe1\xce",
- .alen = 63,
- .input = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a"
- "\xda\x1f\xd3\xff\xbb\xb2\xb0\xf8"
- "\xef\xe9\xeb\x9e\x7c\x80\xf4\x2b"
- "\x59\xc0\x79\xbc\x17\xa0\x15\x01"
- "\xf5\x72\xfb\x5a\xe7\xaf\x07\xe3"
- "\x1b\x49\x21\x34\x23\x63\x55\x5e"
- "\xee\x4f\x34\x17\xfa\xfe\xa5\x0c"
- "\xed\x0b\x23\xea\x9b\xda\x57\x2f"
- "\xf6\xa9\xae\x0d\x4e\x40\x96\x45"
- "\x7f\xfa\xf0\xbf\xc4\x98\x78",
- .ilen = 79,
- .result = "\x1b\x61\x23\x5b\x71\x26\xae\x25"
- "\x87\x6f\xbc\x49\xfe\x53\x81\x8a"
- "\x53\xf2\x70\x17\x9b\x38\xf4\x48"
- "\x4b\x9b\x36\x62\xed\xdd\xd8\x54"
- "\xea\xcb\xb6\x79\x45\xfc\xaa\x54"
- "\x5c\x94\x47\x58\xa7\xff\x9c\x9e"
- "\x7c\xb6\xf1\xac\xc8\xfd\x8b\x35"
- "\xd5\xa4\x6a\xd4\x09\xc2\x08",
- .rlen = 63,
- }, {
- .key = "\x2a\x2a\xb1\xc3\xb5\xc5\x59\xac"
- "\xa7\xa6\xe8\x6d\xfc\x1d\xb2\x0b"
- "\x1d\xa3\xf3\x38\xdd\xe0\xd5\x4e"
- "\x7b\xa7\x6e\x58\xa3\xf5\xbf\x0a",
- .klen = 32,
- .iv = "\x39\xf3\x3f\x2b\xf9\x64\x03\x33"
- "\xc7\xdd\x15\x91\xfb\xe6\xe2\x8d",
- .assoc = "\x49\xbc\xce\x92\x3d\x02\xad\xba"
- "\xe7\x13\x41\xb6\xf9\xaf\x13\x0f"
- "\xb2\x04\xf8\x7a\x5f\x30\x96\x5b"
- "\xdc\xbd\xdd\x44\x10\x25\x8f\x75"
- "\x75\x4d\xb9\x5b\x8e\x0a\x38\x13"
- "\x6f\x9f\x36\xe4\x3a\x3e\xac\xc9"
- "\x9d\x83\xde\xe5\x57\xfd\xe3\x0e"
- "\xb1\xa7\x1b\x44\x05\x67\xb7\x37",
- .alen = 64,
- .input = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1"
- "\x58\x06\x1a\x9b\x8c\x67\xdf\xeb"
- "\x35\x35\x60\x9d\x06\x40\x65\xc1"
- "\x93\xe8\xb3\x82\x50\x29\xdd\xb5"
- "\x2b\xcb\xde\x18\x78\x6b\x42\xbe"
- "\x6d\x24\xd0\xb2\x7d\xd7\x08\x8f"
- "\x4a\x18\x98\xad\x8c\xf2\x97\xb4"
- "\xf4\x77\xe4\xbf\x41\x3b\xc4\x06"
- "\xce\x9e\x34\x81\xf0\x89\x11\x13"
- "\x02\x65\xa1\x7c\xdf\x07\x33\x06",
- .ilen = 80,
- .result = "\x58\x85\x5c\xfa\x81\xa1\x57\x40"
- "\x08\x4a\x6e\xda\xf8\x78\x44\x90"
- "\x7d\xb5\x7b\x9b\xa1\xd8\x76\x62"
- "\x0c\xc9\x15\x3b\xc7\x3c\x77\x2b"
- "\xf8\x78\xba\xa7\xa6\x0e\xbd\x52"
- "\x76\xa3\xdc\xbe\x6b\xa8\xb1\x2d"
- "\xa9\x1d\xd8\x4e\x31\x53\xab\x00"
- "\xa5\xa7\x01\x13\x04\x49\xf2\x04",
- .rlen = 64,
- }, {
- .key = "\x67\x4f\xeb\x62\xc5\x40\x01\xc7"
- "\x28\x80\x9a\xfe\xf6\x41\x74\x12"
- "\x48\x65\xfe\xbc\xe2\x80\x57\x68"
- "\x3c\xd4\x4d\x31\x7d\x54\x5f\xe1",
- .klen = 32,
- .iv = "\x76\x18\x79\xca\x09\xdf\xac\x4e"
- "\x48\xb7\xc7\x23\xf5\x0a\xa5\x93",
- .assoc = "\x85\xe1\x08\x32\x4d\x7e\x56\xd5"
- "\x68\xed\xf3\x47\xf3\xd3\xd6\x15"
- "\xdd\xc7\x04\xfe\x64\xd0\x18\x75"
- "\x9d\xeb\xbc\x1d\xea\x84\x2e\x4c"
- "\x83\xf9\xbe\x8a\xef\x1c\x4b\x10"
- "\x89\xaf\xcb\x4b\xfe\xe7\xc1\x58"
- "\xca\xea\xc6\x87\xc0\x53\x03\xd9"
- "\x80\xaa\xb2\x83\xff\xee\xa1\x6a"
- "\x04",
- .alen = 65,
- .input = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6"
- "\x85\x43\x88\xd0\xd7\x78\x60\x19"
- "\x3e\x1f\xb1\xa4\xd6\xc5\x96\xec"
- "\xf7\x84\x85\xc7\x27\x0f\x74\x57"
- "\x28\x9e\xdd\x90\x3c\x43\x12\xc5"
- "\x51\x3d\x39\x8f\xa5\xf4\xe0\x0b"
- "\x57\x04\xf1\x6d\xfe\x9b\x84\x27"
- "\xe8\xeb\x4d\xda\x02\x0a\xc5\x49"
- "\x1a\x55\x5e\x50\x56\x4d\x94\xda"
- "\x20\xf8\x12\x54\x50\xb3\x11\xda"
- "\xed\x44\x27\x67\xd5\xd1\x8b\x4b"
- "\x38\x67\x56\x65\x59\xda\xe6\x97"
- "\x81\xae\x2f\x92\x3b\xae\x22\x1c"
- "\x91\x59\x38\x18\x00\xe8\xba\x92"
- "\x04\x19\x56\xdf\xb0\x82\xeb\x6f"
- "\x2e\xdb\x54\x3c\x4b\xbb\x60\x90"
- "\x4c\x50\x10\x62\xba\x7a\xb1\x68"
- "\x37\xd7\x87\x4e\xe4\x66\x09\x1f"
- "\xa5",
- .ilen = 145,
- .result = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c"
- "\x88\x24\x20\x6b\xf2\x9c\x06\x96"
- "\xa7\x77\x87\x1f\xa6\x78\xf8\x7b"
- "\xcd\xf6\xf4\x13\xa1\x9b\x16\x02"
- "\x07\x24\xbf\xd5\x08\x20\xd0\x4f"
- "\x90\xb3\x70\x24\x2f\x51\xc7\xbb"
- "\xd6\x84\xc0\xef\x9a\xa8\xca\xcc"
- "\x74\xab\x97\x53\xfe\xd0\xdb\x37"
- "\x37\x6a\x0e\x9f\x3f\xa3\x2a\xe3"
- "\x1b\x34\x6d\x51\x72\x2b\x17\xe7"
- "\x4d\xaa\x2c\x18\xda\xa3\x33\x89"
- "\x2a\x9f\xf4\xd2\xed\x76\x3d\x3f"
- "\x3c\x15\x9d\x8e\x4f\x3c\x27\xb0"
- "\x42\x3f\x2f\x8a\xd4\xc2\x10\xb2"
- "\x27\x7f\xe3\x34\x80\x02\x49\x4b"
- "\x07\x68\x22\x2a\x88\x25\x53\xb2"
- "\x2f",
- .rlen = 129,
- }, {
- .key = "\xa3\x73\x24\x01\xd5\xbc\xaa\xe3"
- "\xa9\x5a\x4c\x90\xf0\x65\x37\x18"
- "\x72\x28\x0a\x40\xe7\x20\xd9\x82"
- "\xfe\x02\x2b\x09\x57\xb3\xfe\xb7",
- .klen = 32,
- .iv = "\xb3\x3d\xb3\x69\x19\x5b\x54\x6a"
- "\xc9\x91\x79\xb4\xef\x2e\x68\x99",
- .assoc = "\xc2\x06\x41\xd1\x5d\xfa\xff\xf1"
- "\xe9\xc7\xa5\xd9\xed\xf8\x98\x1b"
- "\x07\x89\x10\x82\x6a\x70\x9a\x8f"
- "\x5e\x19\x9b\xf5\xc5\xe3\xcd\x22"
- "\x92\xa5\xc2\xb8\x51\x2e\x5e\x0e"
- "\xa4\xbe\x5f\xb1\xc1\x90\xd7\xe7"
- "\xf7\x52\xae\x28\x29\xa8\x22\xa4"
- "\x4f\xae\x48\xc2\xfa\x75\x8b\x9e"
- "\xce\x83\x2a\x88\x07\x55\xbb\x89"
- "\xf6\xdf\xac\xdf\x83\x08\xbf\x7d"
- "\xac\x30\x8b\x8e\x02\xac\x00\xf1"
- "\x30\x46\xe1\xbc\x75\xbf\x49\xbb"
- "\x26\x4e\x29\xf0\x2f\x21\xc6\x13"
- "\x92\xd9\x3d\x11\xe4\x10\x00\x8e"
- "\xd4\xd4\x58\x65\xa6\x2b\xe3\x25"
- "\xb1\x8f\x15\x93\xe7\x71\xb9\x2c"
- "\x4b",
- .alen = 129,
- .input = "\x7d\xde\x53\x22\xe4\x23\x3b\x30"
- "\x78\xde\x35\x90\x7a\xd9\x0b\x93"
- "\xf6\x0e\x0b\xed\x40\xee\x10\x9c"
- "\x96\x3a\xd3\x34\xb2\xd0\x67\xcf"
- "\x63\x7f\x2d\x0c\xcf\x96\xec\x64"
- "\x1a\x87\xcc\x7d\x2c\x5e\x81\x4b"
- "\xd2\x8f\x4c\x7c\x00\xb1\xb4\xe0"
- "\x87\x4d\xb1\xbc\xd8\x78\x2c\x17"
- "\xf2\x3b\xd8\x28\x40\xe2\x76\xf6"
- "\x20\x13\x83\x46\xaf\xff\xe3\x0f"
- "\x72",
- .ilen = 81,
- .result = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78"
- "\x09\xfe\xd2\xfd\xec\xc1\xc9\x9d"
- "\xd2\x39\x93\xa3\xab\x18\x7a\x95"
- "\x8f\x24\xd3\xeb\x7b\xfa\xb5\xd8"
- "\x15\xd1\xc3\x04\x69\x32\xe3\x4d"
- "\xaa\xc2\x04\x8b\xf2\xfa\xdc\x4a"
- "\x02\xeb\xa8\x90\x03\xfd\xea\x97"
- "\x43\xaf\x2e\x92\xf8\x57\xc5\x6a"
- "\x00",
- .rlen = 65,
- }, {
- .key = "\xe0\x98\x5e\xa1\xe5\x38\x53\xff"
- "\x2a\x35\xfe\x21\xea\x8a\xfa\x1e"
- "\x9c\xea\x15\xc5\xec\xc0\x5b\x9b"
- "\xbf\x2f\x0a\xe1\x32\x12\x9d\x8e",
- .klen = 32,
- .iv = "\xef\x61\xed\x08\x29\xd7\xfd\x86"
- "\x4a\x6b\x2b\x46\xe9\x53\x2a\xa0",
- .assoc = "\xfe\x2a\x7b\x70\x6d\x75\xa7\x0d"
- "\x6a\xa2\x57\x6a\xe7\x1c\x5b\x21"
- "\x31\x4b\x1b\x07\x6f\x10\x1c\xa8"
- "\x20\x46\x7a\xce\x9f\x42\x6d\xf9",
- .alen = 32,
- .input = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe"
- "\x53\xc7\xaa\x9a\x60\x74\x9c\xc4"
- "\xa2\xc2\xd0\x6d\xe1\x03\x63\xdc"
- "\xbb\x51\x7e\x9c\x89\x73\xde\x4e"
- "\x24\xf8\x52\x7c\x15\x41\x0e\xba"
- "\x69\x0e\x36\x5f\x2f\x22\x8c",
- .ilen = 47,
- .result = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94"
- "\x8a\xd8\x84\x8e\xe6\xe5\x8c\xa3"
- "\xfc\xfc\x9e\x28\xb0\xb8\xfc\xaf"
- "\x50\x52\xb1\xc4\x55\x59\x55\xaf",
- .rlen = 32,
- }, {
- .key = "\x1c\xbd\x98\x40\xf5\xb3\xfc\x1b"
- "\xaa\x0f\xb0\xb3\xe4\xae\xbc\x24"
- "\xc7\xac\x21\x49\xf1\x60\xdd\xb5"
- "\x80\x5d\xe9\xba\x0c\x71\x3c\x64",
- .klen = 32,
- .iv = "\x2c\x86\x26\xa8\x39\x52\xa6\xa2"
- "\xcb\x45\xdd\xd7\xe3\x77\xed\xa6",
- .assoc = "\x3b\x4f\xb5\x10\x7d\xf1\x50\x29"
- "\xeb\x7c\x0a\xfb\xe1\x40\x1e\x27"
- "\x5c\x0d\x27\x8b\x74\xb0\x9e\xc2"
- "\xe1\x74\x59\xa6\x79\xa1\x0c\xd0",
- .alen = 32,
- .input = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51"
- "\xb8\xda\x92\x3c\xfd\xda\xac\x8e"
- "\x8d\x88\xd7\x4d\x90\xe5\xeb\xa1"
- "\xab\xd6\x7c\x76\xad\xea\x7d\x76"
- "\x53\xee\xb0\xcd\xd0\x02\xbb\x70"
- "\x5b\x6f\x7b\xe2\x8c\xe8",
- .ilen = 46,
- .result = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0"
- "\x0b\xb2\x36\x20\xe0\x09\x4e\xa9"
- "\x26\xbe\xaa\xac\xb5\x58\x7e\xc8"
- "\x11\x7f\x90\x9c\x2f\xb8\xf4\x85",
- .rlen = 32,
- }, {
- .key = "\x59\xe1\xd2\xdf\x05\x2f\xa4\x37"
- "\x2b\xe9\x63\x44\xde\xd3\x7f\x2b"
- "\xf1\x6f\x2d\xcd\xf6\x00\x5f\xcf"
- "\x42\x8a\xc8\x92\xe6\xd0\xdc\x3b",
- .klen = 32,
- .iv = "\x68\xab\x60\x47\x49\xce\x4f\xbe"
- "\x4c\x20\x8f\x68\xdd\x9c\xb0\xac",
- .assoc = "\x77\x74\xee\xaf\x8d\x6d\xf9\x45"
- "\x6c\x56\xbc\x8d\xdb\x65\xe0\x2e"
- "\x86\xd0\x32\x0f\x79\x50\x20\xdb"
- "\xa2\xa1\x37\x7e\x53\x00\xab\xa6",
- .alen = 32,
- .input = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b"
- "\xe7\x68\x81\x51\xbb\xfb\xdf\x60"
- "\xbb\xac\xe8\xc1\xdc\x68\xae\x68"
- "\x3a\xcd\x7a\x06\x49\xfe\x80\x11"
- "\xe6\x61\x99\xe2\xdd\xbe\x2c\xbf",
- .ilen = 40,
- .result = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc"
- "\x8c\x8d\xe8\xb1\xda\x2e\x11\xaf"
- "\x51\x80\xb5\x30\xba\xf8\x00\xe2"
- "\xd3\xad\x6f\x75\x09\x18\x93\x5c",
- .rlen = 32,
- }, {
- .key = "\x96\x06\x0b\x7f\x15\xab\x4d\x53"
- "\xac\xc3\x15\xd6\xd8\xf7\x42\x31"
- "\x1b\x31\x38\x51\xfc\xa0\xe1\xe8"
- "\x03\xb8\xa7\x6b\xc0\x2f\x7b\x11",
- .klen = 32,
- .iv = "\xa5\xcf\x9a\xe6\x59\x4a\xf7\xd9"
- "\xcd\xfa\x41\xfa\xd7\xc0\x72\xb2",
- .assoc = "\xb4\x99\x28\x4e\x9d\xe8\xa2\x60"
- "\xed\x30\x6e\x1e\xd5\x89\xa3\x34"
- "\xb1\x92\x3e\x93\x7e\xf0\xa2\xf5"
- "\x64\xcf\x16\x57\x2d\x5f\x4a\x7d",
- .alen = 32,
- .input = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e"
+ .plen = 32,
+ .ctext = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e"
"\x5b\x4e\x0b\x15\x6e\x39\xfb\x0c"
"\x73\xc7\xd9\x6b\xbe\xce\x9b\x70"
"\xc7\x4f\x96\x16\x03\xfc\xea\xfb"
"\x56",
- .ilen = 33,
- .result = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7"
- "\x0d\x67\x9a\x43\xd4\x52\xd4\xb5"
- "\x7b\x43\xc1\xb5\xbf\x98\x82\xfc"
- "\x94\xda\x4e\x4d\xe4\x77\x32\x32",
- .rlen = 32,
+ .clen = 33,
},
};
@@ -26903,7 +21991,7 @@ static const struct cipher_testvec aes_kw_tv_template[] = {
.ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3"
"\xf5\x6f\xab\xea\x25\x48\xf5\xfb",
.len = 16,
- .iv = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d",
+ .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d",
.generates_iv = true,
}, {
.key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
@@ -26916,7 +22004,7 @@ static const struct cipher_testvec aes_kw_tv_template[] = {
.ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15"
"\x59\xf9\x9c\x8a\xcd\x29\x3d\x43",
.len = 16,
- .iv = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1",
+ .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1",
.generates_iv = true,
},
};
@@ -27995,9 +23083,6 @@ static const struct cipher_testvec cast5_tv_template[] = {
"\x4F\xFE\x24\x9C\x9A\x02\xE5\x57"
"\xF5\xBC\x25\xD6\x02\x56\x57\x1C",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -28007,6 +23092,7 @@ static const struct cipher_testvec cast5_cbc_tv_template[] = {
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
.klen = 16,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
+ .iv_out = "\x1D\x18\x66\x44\x5B\x8F\x14\xEB",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -28132,9 +23218,6 @@ static const struct cipher_testvec cast5_cbc_tv_template[] = {
"\x15\x5F\xDB\xE9\xB1\x83\xD2\xE6"
"\x1D\x18\x66\x44\x5B\x8F\x14\xEB",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -28144,6 +23227,7 @@ static const struct cipher_testvec cast5_ctr_tv_template[] = {
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
.klen = 16,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x62",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A",
@@ -28156,6 +23240,7 @@ static const struct cipher_testvec cast5_ctr_tv_template[] = {
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
.klen = 16,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9D",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -28281,9 +23366,6 @@ static const struct cipher_testvec cast5_ctr_tv_template[] = {
"\x8C\x98\xDB\xDE\xFC\x72\x94\xAA"
"\xC0\x0D\x96\xAA\x23\xF8\xFE\x13",
.len = 496,
- .also_non_np = 1,
- .np = 3,
- .tap = { 496 - 20, 4, 16 },
},
};
@@ -28544,6 +23626,8 @@ static const struct cipher_testvec anubis_cbc_tv_template[] = {
.key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
"\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
.klen = 16,
+ .iv_out = "\x86\xd8\xb5\x6f\x98\x5e\x8a\x66"
+ "\x4f\x1f\x78\xa1\xbb\x37\xf1\xbe",
.ptext = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
"\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
"\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
@@ -28560,6 +23644,8 @@ static const struct cipher_testvec anubis_cbc_tv_template[] = {
"\x35\x35\x35\x35\x35\x35\x35\x35"
"\x35\x35\x35\x35\x35\x35\x35\x35",
.klen = 40,
+ .iv_out = "\xa2\xbc\x06\x98\xc6\x4b\xda\x75"
+ "\x2e\xaa\xbe\x58\xce\x01\x5b\xc7",
.ptext = "\x35\x35\x35\x35\x35\x35\x35\x35"
"\x35\x35\x35\x35\x35\x35\x35\x35"
"\x35\x35\x35\x35\x35\x35\x35\x35"
@@ -28656,20 +23742,6 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = {
"\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
"\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
.len = 48,
- }, { /* split-page version */
- .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
- .klen = 8,
- .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
- .ptext = "The quick brown fox jumps over the lazy dogs.\0\0",
- .ctext = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
- "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
- "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
- "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
- "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
- "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
- .len = 48,
- .np = 2,
- .tap = { 20, 28 },
}
};
@@ -28966,9 +24038,6 @@ static const struct cipher_testvec camellia_tv_template[] = {
"\xF8\xB2\xAA\x7A\xD6\xFF\xFA\x55"
"\x33\x1A\xBB\xD3\xA2\x7E\x97\x66",
.len = 1008,
- .also_non_np = 1,
- .np = 3,
- .tap = { 1008 - 20, 4, 16 },
},
};
@@ -28979,6 +24048,8 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = {
.klen = 16,
.iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+ .iv_out = "\xea\x32\x12\x76\x3b\x50\x10\xe7"
+ "\x18\xf6\xfd\x5d\xf6\x8f\x13\x51",
.ptext = "Single block msg",
.ctext = "\xea\x32\x12\x76\x3b\x50\x10\xe7"
"\x18\xf6\xfd\x5d\xf6\x8f\x13\x51",
@@ -28989,6 +24060,8 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = {
.klen = 16,
.iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+ .iv_out = "\x19\xb4\x3e\x57\x1c\x02\x5e\xa0"
+ "\x15\x78\xe0\x5e\xf2\xcb\x87\x16",
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -29006,6 +24079,8 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\x55\x01\xD4\x58\xB2\xF2\x85\x49"
+ "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -29259,9 +24334,6 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = {
"\x55\x01\xD4\x58\xB2\xF2\x85\x49"
"\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C",
.len = 1008,
- .also_non_np = 1,
- .np = 3,
- .tap = { 1008 - 20, 4, 16 },
},
};
@@ -29274,6 +24346,8 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -29407,6 +24481,8 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = {
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+ .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+ "\xC4\x29\x8E\xF3\x35\x9A\xFF\xA4",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -29662,9 +24738,6 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = {
"\xE7\x2C\x49\x08\x8B\x72\xFA\x5C"
"\xF1\x6B\xD9",
.len = 1011,
- .also_non_np = 1,
- .np = 2,
- .tap = { 1011 - 16, 16 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -29673,6 +24746,8 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = {
.klen = 32,
.iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x3C",
.ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -30167,9 +25242,6 @@ static const struct cipher_testvec camellia_lrw_tv_template[] = {
"\xb2\x1a\xd8\x4c\xbd\x1d\x10\xe9"
"\x5a\xa8\x92\x7f\xba\xe6\x0c\x95",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -30504,9 +25576,6 @@ static const struct cipher_testvec camellia_xts_tv_template[] = {
"\xb7\x16\xd8\x12\x5c\xcd\x7d\x4e"
"\xd5\xc6\x99\xcc\x4e\x6c\x94\x95",
.len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
},
};
@@ -31710,8 +26779,6 @@ static const struct cipher_testvec salsa20_stream_tv_template[] = {
"\x87\x13\xc6\x5b\x59\x8d\xf2\xc8"
"\xaf\xdf\x11\x95",
.len = 4100,
- .np = 2,
- .tap = { 4064, 36 },
},
};
@@ -31844,9 +26911,6 @@ static const struct cipher_testvec chacha20_tv_template[] = {
"\x5b\x86\x2f\x37\x30\xe3\x7c\xfd"
"\xc4\xfd\x80\x6c\x22\xf2\x21",
.len = 375,
- .also_non_np = 1,
- .np = 3,
- .tap = { 375 - 20, 4, 16 },
}, { /* RFC7539 A.2. Test Vector #3 */
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
@@ -32220,9 +27284,6 @@ static const struct cipher_testvec chacha20_tv_template[] = {
"\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
"\x98",
.len = 1281,
- .also_non_np = 1,
- .np = 3,
- .tap = { 1200, 1, 80 },
},
};
@@ -32415,9 +27476,6 @@ static const struct cipher_testvec xchacha20_tv_template[] = {
"\xab\xff\x1f\x12\xc3\xee\xe5\x65"
"\x12\x8d\x7b\x61\xe5\x1f\x98",
.len = 375,
- .also_non_np = 1,
- .np = 3,
- .tap = { 375 - 20, 4, 16 },
}, { /* Derived from a ChaCha20 test vector, via the process above */
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
@@ -32795,9 +27853,6 @@ static const struct cipher_testvec xchacha20_tv_template[] = {
"\xba\xd0\x34\xc9\x2d\x91\xc5\x17"
"\x11",
.len = 1281,
- .also_non_np = 1,
- .np = 3,
- .tap = { 1200, 1, 80 },
}, { /* test vector from https://tools.ietf.org/html/draft-arciszewski-xchacha-02#appendix-A.3.2 */
.key = "\x80\x81\x82\x83\x84\x85\x86\x87"
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
@@ -33080,9 +28135,6 @@ static const struct cipher_testvec xchacha12_tv_template[] = {
"\xda\x4e\xc9\xab\x9b\x8a\x7b",
.len = 375,
- .also_non_np = 1,
- .np = 3,
- .tap = { 375 - 20, 4, 16 },
}, {
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
@@ -33460,9 +28512,6 @@ static const struct cipher_testvec xchacha12_tv_template[] = {
"\xf0\xfc\x5e\x1c\xf1\xf5\xf9\xf3"
"\x5b",
.len = 1281,
- .also_non_np = 1,
- .np = 3,
- .tap = { 1200, 1, 80 },
}, {
.key = "\x80\x81\x82\x83\x84\x85\x86\x87"
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
@@ -33570,9 +28619,6 @@ static const struct cipher_testvec adiantum_xchacha12_aes_tv_template[] = {
.ctext = "\x6d\x32\x86\x18\x67\x86\x0f\x3f"
"\x96\x7c\x9d\x28\x0d\x53\xec\x9f",
.len = 16,
- .also_non_np = 1,
- .np = 2,
- .tap = { 14, 2 },
}, {
.key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99"
"\x5f\x1a\x5a\x44\x1d\x92\x0f\x27"
@@ -33635,9 +28681,6 @@ static const struct cipher_testvec adiantum_xchacha12_aes_tv_template[] = {
"\x74\xa6\xaa\xa3\xac\xdc\xc2\xf5"
"\x8d\xde\x34\x86\x78\x60\x75\x8d",
.len = 128,
- .also_non_np = 1,
- .np = 4,
- .tap = { 104, 16, 4, 4 },
}, {
.key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a"
"\x25\x74\x29\x0d\x51\x8a\x0e\x13"
@@ -33777,9 +28820,1436 @@ static const struct cipher_testvec adiantum_xchacha12_aes_tv_template[] = {
"\x21\xb0\x21\x52\xba\xa7\x37\xaa"
"\xcc\xbf\x95\xa8\xf4\xd0\x91\xf6",
.len = 512,
- .also_non_np = 1,
- .np = 2,
- .tap = { 144, 368 },
+ }, {
+ .key = "\xeb\xe5\x11\x3a\x72\xeb\x10\xbe"
+ "\x70\xcf\xe3\xea\xc2\x74\xa4\x48"
+ "\x29\x0f\x8f\x3f\xcf\x4c\x28\x2a"
+ "\x4e\x1e\x3c\xc3\x27\x9f\x16\x13",
+ .klen = 32,
+ .iv = "\x84\x3e\xa2\x7c\x06\x72\xb2\xad"
+ "\x88\x76\x65\xb4\x1a\x29\x27\x12"
+ "\x45\xb6\x8d\x0e\x4b\x87\x04\xfc"
+ "\xb5\xcd\x1c\x4d\xe8\x06\xf1\xcb",
+ .ptext = "\x8e\xb6\x07\x9b\x7c\xe4\xa4\xa2"
+ "\x41\x6c\x24\x1d\xc0\x77\x4e\xd9"
+ "\x4a\xa4\x2c\xb6\xe4\x55\x02\x7f"
+ "\xc4\xec\xab\xc2\x5c\x63\x40\x92"
+ "\x38\x24\x62\xdb\x65\x82\x10\x7f"
+ "\x21\xa5\x39\x3a\x3f\x38\x7e\xad"
+ "\x6c\x7b\xc9\x3f\x89\x8f\xa8\x08"
+ "\xbd\x31\x57\x3c\x7a\x45\x67\x30"
+ "\xa9\x27\x58\x34\xbe\xe3\xa4\xc3"
+ "\xff\xc2\x9f\x43\xf0\x04\xba\x1e"
+ "\xb6\xf3\xc4\xce\x09\x7a\x2e\x42"
+ "\x7d\xad\x97\xc9\x77\x9a\x3a\x78"
+ "\x6c\xaf\x7c\x2a\x46\xb4\x41\x86"
+ "\x1a\x20\xf2\x5b\x1a\x60\xc9\xc4"
+ "\x47\x5d\x10\xa4\xd2\x15\x6a\x19"
+ "\x4f\xd5\x51\x37\xd5\x06\x70\x1a"
+ "\x3e\x78\xf0\x2e\xaa\xb5\x2a\xbd"
+ "\x83\x09\x7c\xcb\x29\xac\xd7\x9c"
+ "\xbf\x80\xfd\x9d\xd4\xcf\x64\xca"
+ "\xf8\xc9\xf1\x77\x2e\xbb\x39\x26"
+ "\xac\xd9\xbe\xce\x24\x7f\xbb\xa2"
+ "\x82\xba\xeb\x5f\x65\xc5\xf1\x56"
+ "\x8a\x52\x02\x4d\x45\x23\x6d\xeb"
+ "\xb0\x60\x7b\xd8\x6e\xb2\x98\xd2"
+ "\xaf\x76\xf2\x33\x9b\xf3\xbb\x95"
+ "\xc0\x50\xaa\xc7\x47\xf6\xb3\xf3"
+ "\x77\x16\xcb\x14\x95\xbf\x1d\x32"
+ "\x45\x0c\x75\x52\x2c\xe8\xd7\x31"
+ "\xc0\x87\xb0\x97\x30\x30\xc5\x5e"
+ "\x50\x70\x6e\xb0\x4b\x4e\x38\x19"
+ "\x46\xca\x38\x6a\xca\x7d\xfe\x05"
+ "\xc8\x80\x7c\x14\x6c\x24\xb5\x42"
+ "\x28\x04\x4c\xff\x98\x20\x08\x10"
+ "\x90\x31\x03\x78\xd8\xa1\xe6\xf9"
+ "\x52\xc2\xfc\x3e\xa7\x68\xce\xeb"
+ "\x59\x5d\xeb\xd8\x64\x4e\xf8\x8b"
+ "\x24\x62\xcf\x17\x36\x84\xc0\x72"
+ "\x60\x4f\x3e\x47\xda\x72\x3b\x0e"
+ "\xce\x0b\xa9\x9c\x51\xdc\xa5\xb9"
+ "\x71\x73\x08\x4e\x22\x31\xfd\x88"
+ "\x29\xfc\x8d\x17\x3a\x7a\xe5\xb9"
+ "\x0b\x9c\x6d\xdb\xce\xdb\xde\x81"
+ "\x73\x5a\x16\x9d\x3c\x72\x88\x51"
+ "\x10\x16\xf3\x11\x6e\x32\x5f\x4c"
+ "\x87\xce\x88\x2c\xd2\xaf\xf5\xb7"
+ "\xd8\x22\xed\xc9\xae\x68\x7f\xc5"
+ "\x30\x62\xbe\xc9\xe0\x27\xa1\xb5"
+ "\x57\x74\x36\x60\xb8\x6b\x8c\xec"
+ "\x14\xad\xed\x69\xc9\xd8\xa5\x5b"
+ "\x38\x07\x5b\xf3\x3e\x74\x48\x90"
+ "\x61\x17\x23\xdd\x44\xbc\x9d\x12"
+ "\x0a\x3a\x63\xb2\xab\x86\xb8\x67"
+ "\x85\xd6\xb2\x5d\xde\x4a\xc1\x73"
+ "\x2a\x7c\x53\x8e\xd6\x7d\x0e\xe4"
+ "\x3b\xab\xc5\x3d\x32\x79\x18\xb7"
+ "\xd6\x50\x4d\xf0\x8a\x37\xbb\xd3"
+ "\x8d\xd8\x08\xd7\x7d\xaa\x24\x52"
+ "\xf7\x90\xe3\xaa\xd6\x49\x7a\x47"
+ "\xec\x37\xad\x74\x8b\xc1\xb7\xfe"
+ "\x4f\x70\x14\x62\x22\x8c\x63\xc2"
+ "\x1c\x4e\x38\xc3\x63\xb7\xbf\x53"
+ "\xbd\x1f\xac\xa6\x94\xc5\x81\xfa"
+ "\xe0\xeb\x81\xe9\xd9\x1d\x32\x3c"
+ "\x85\x12\xca\x61\x65\xd1\x66\xd8"
+ "\xe2\x0e\xc3\xa3\xff\x0d\xd3\xee"
+ "\xdf\xcc\x3e\x01\xf5\x9b\x45\x5c"
+ "\x33\xb5\xb0\x8d\x36\x1a\xdf\xf8"
+ "\xa3\x81\xbe\xdb\x3d\x4b\xf6\xc6"
+ "\xdf\x7f\xb0\x89\xbd\x39\x32\x50"
+ "\xbb\xb2\xe3\x5c\xbb\x4b\x18\x98"
+ "\x08\x66\x51\xe7\x4d\xfb\xfc\x4e"
+ "\x22\x42\x6f\x61\xdb\x7f\x27\x88"
+ "\x29\x3f\x02\xa9\xc6\x83\x30\xcc"
+ "\x8b\xd5\x64\x7b\x7c\x76\x16\xbe"
+ "\xb6\x8b\x26\xb8\x83\x16\xf2\x6b"
+ "\xd1\xdc\x20\x6b\x42\x5a\xef\x7a"
+ "\xa9\x60\xb8\x1a\xd3\x0d\x4e\xcb"
+ "\x75\x6b\xc5\x80\x43\x38\x7f\xad"
+ "\x9c\x56\xd9\xc4\xf1\x01\x74\xf0"
+ "\x16\x53\x8d\x69\xbe\xf2\x5d\x92"
+ "\x34\x38\xc8\x84\xf9\x1a\xfc\x26"
+ "\x16\xcb\xae\x7d\x38\x21\x67\x74"
+ "\x4c\x40\xaa\x6b\x97\xe0\xb0\x2f"
+ "\xf5\x3e\xf6\xe2\x24\xc8\x22\xa4"
+ "\xa8\x88\x27\x86\x44\x75\x5b\x29"
+ "\x34\x08\x4b\xa1\xfe\x0c\x26\xe5"
+ "\xac\x26\xf6\x21\x0c\xfb\xde\x14"
+ "\xfe\xd7\xbe\xee\x48\x93\xd6\x99"
+ "\x56\x9c\xcf\x22\xad\xa2\x53\x41"
+ "\xfd\x58\xa1\x68\xdc\xc4\xef\x20"
+ "\xa1\xee\xcf\x2b\x43\xb6\x57\xd8"
+ "\xfe\x01\x80\x25\xdf\xd2\x35\x44"
+ "\x0d\x15\x15\xc3\xfc\x49\xbf\xd0"
+ "\xbf\x2f\x95\x81\x09\xa6\xb6\xd7"
+ "\x21\x03\xfe\x52\xb7\xa8\x32\x4d"
+ "\x75\x1e\x46\x44\xbc\x2b\x61\x04"
+ "\x1b\x1c\xeb\x39\x86\x8f\xe9\x49"
+ "\xce\x78\xa5\x5e\x67\xc5\xe9\xef"
+ "\x43\xf8\xf1\x35\x22\x43\x61\xc1"
+ "\x27\xb5\x09\xb2\xb8\xe1\x5e\x26"
+ "\xcc\xf3\x6f\xb2\xb7\x55\x30\x98"
+ "\x87\xfc\xe7\xa8\xc8\x94\x86\xa1"
+ "\xd9\xa0\x3c\x74\x16\xb3\x25\x98"
+ "\xba\xc6\x84\x4a\x27\xa6\x58\xfe"
+ "\xe1\x68\x04\x30\xc8\xdb\x44\x52"
+ "\x4e\xb2\xa4\x6f\xf7\x63\xf2\xd6"
+ "\x63\x36\x17\x04\xf8\x06\xdb\xeb"
+ "\x99\x17\xa5\x1b\x61\x90\xa3\x9f"
+ "\x05\xae\x3e\xe4\xdb\xc8\x1c\x8e"
+ "\x77\x27\x88\xdf\xd3\x22\x5a\xc5"
+ "\x9c\xd6\x22\xf8\xc4\xd8\x92\x9d"
+ "\x16\xcc\x54\x25\x3b\x6f\xdb\xc0"
+ "\x78\xd8\xe3\xb3\x03\x69\xd7\x5d"
+ "\xf8\x08\x04\x63\x61\x9d\x76\xf9"
+ "\xad\x1d\xc4\x30\x9f\x75\x89\x6b"
+ "\xfb\x62\xba\xae\xcb\x1b\x6c\xe5"
+ "\x7e\xea\x58\x6b\xae\xce\x9b\x48"
+ "\x4b\x80\xd4\x5e\x71\x53\xa7\x24"
+ "\x73\xca\xf5\x3e\xbb\x5e\xd3\x1c"
+ "\x33\xe3\xec\x5b\xa0\x32\x9d\x25"
+ "\x0e\x0c\x28\x29\x39\x51\xc5\x70"
+ "\xec\x60\x8f\x77\xfc\x06\x7a\x33"
+ "\x19\xd5\x7a\x6e\x94\xea\xa3\xeb"
+ "\x13\xa4\x2e\x09\xd8\x81\x65\x83"
+ "\x03\x63\x8b\xb5\xc9\x89\x98\x73"
+ "\x69\x53\x8e\xab\xf1\xd2\x2f\x67"
+ "\xbd\xa6\x16\x6e\xd0\x8b\xc1\x25"
+ "\x93\xd2\x50\x7c\x1f\xe1\x11\xd0"
+ "\x58\x0d\x2f\x72\xe7\x5e\xdb\xa2"
+ "\x55\x9a\xe0\x09\x21\xac\x61\x85"
+ "\x4b\x20\x95\x73\x63\x26\xe3\x83"
+ "\x4b\x5b\x40\x03\x14\xb0\x44\x16"
+ "\xbd\xe0\x0e\xb7\x66\x56\xd7\x30"
+ "\xb3\xfd\x8a\xd3\xda\x6a\xa7\x3d"
+ "\x98\x09\x11\xb7\x00\x06\x24\x5a"
+ "\xf7\x42\x94\xa6\x0e\xb1\x6d\x48"
+ "\x74\xb1\xa7\xe6\x92\x0a\x15\x9a"
+ "\xf5\xfa\x55\x1a\x6c\xdd\x71\x08"
+ "\xd0\xf7\x8d\x0e\x7c\x67\x4d\xc6"
+ "\xe6\xde\x78\x88\x88\x3c\x5e\x23"
+ "\x46\xd2\x25\xa4\xfb\xa3\x26\x3f"
+ "\x2b\xfd\x9c\x20\xda\x72\xe1\x81"
+ "\x8f\xe6\xae\x08\x1d\x67\x15\xde"
+ "\x86\x69\x1d\xc6\x1e\x6d\xb7\x5c"
+ "\xdd\x43\x72\x5a\x7d\xa7\xd8\xd7"
+ "\x1e\x66\xc5\x90\xf6\x51\x76\x91"
+ "\xb3\xe3\x39\x81\x75\x08\xfa\xc5"
+ "\x06\x70\x69\x1b\x2c\x20\x74\xe0"
+ "\x53\xb0\x0c\x9d\xda\xa9\x5b\xdd"
+ "\x1c\x38\x6c\x9e\x3b\xc4\x7a\x82"
+ "\x93\x9e\xbb\x75\xfb\x19\x4a\x55"
+ "\x65\x7a\x3c\xda\xcb\x66\x5c\x13"
+ "\x17\x97\xe8\xbd\xae\x24\xd9\x76"
+ "\xfb\x8c\x73\xde\xbd\xb4\x1b\xe0"
+ "\xb9\x2c\xe8\xe0\x1d\x3f\xa8\x2c"
+ "\x1e\x81\x5b\x77\xe7\xdf\x6d\x06"
+ "\x7c\x9a\xf0\x2b\x5d\xfc\x86\xd5"
+ "\xb1\xad\xbc\xa8\x73\x48\x61\x67"
+ "\xd6\xba\xc8\xe8\xe2\xb8\xee\x40"
+ "\x36\x22\x3e\x61\xf6\xc8\x16\xe4"
+ "\x0e\x88\xad\x71\x53\x58\xe1\x6c"
+ "\x8f\x4f\x89\x4b\x3e\x9c\x7f\xe9"
+ "\xad\xc2\x28\xc2\x3a\x29\xf3\xec"
+ "\xa9\x28\x39\xba\xc2\x86\xe1\x06"
+ "\xf3\x8b\xe3\x95\x0c\x87\xb8\x1b"
+ "\x72\x35\x8e\x8f\x6d\x18\xc8\x1c"
+ "\xa5\x5d\x57\x9d\x73\x8a\xbb\x9e"
+ "\x21\x05\x12\xd7\xe0\x21\x1c\x16"
+ "\x3a\x95\x85\xbc\xb0\x71\x0b\x36"
+ "\x6c\x44\x8d\xef\x3b\xec\x3f\x8e"
+ "\x24\xa9\xe3\xa7\x63\x23\xca\x09"
+ "\x62\x96\x79\x0c\x81\x05\x41\xf2"
+ "\x07\x20\x26\xe5\x8e\x10\x54\x03"
+ "\x05\x7b\xfe\x0c\xcc\x8c\x50\xe5"
+ "\xca\x33\x4d\x48\x7a\x03\xd5\x64"
+ "\x49\x09\xf2\x5c\x5d\xfe\x2b\x30"
+ "\xbf\x29\x14\x29\x8b\x9b\x7c\x96"
+ "\x47\x07\x86\x4d\x4e\x4d\xf1\x47"
+ "\xd1\x10\x2a\xa8\xd3\x15\x8c\xf2"
+ "\x2f\xf4\x3a\xdf\xd0\xa7\xcb\x5a"
+ "\xad\x99\x39\x4a\xdf\x60\xbe\xf9"
+ "\x91\x4e\xf5\x94\xef\xc5\x56\x32"
+ "\x33\x86\x78\xa3\xd6\x4c\x29\x7c"
+ "\xe8\xac\x06\xb5\xf5\x01\x5c\x9f"
+ "\x02\xc8\xe8\xbf\x5c\x1a\x7f\x4d"
+ "\x28\xa5\xb9\xda\xa9\x5e\xe7\x4b"
+ "\xf4\x3d\xe9\x1d\x28\xaa\x1a\x8a"
+ "\x76\xc8\x6c\x19\x61\x3c\x9e\x29"
+ "\xcd\xbe\xff\xe0\x1c\xb8\x67\xb5"
+ "\xa4\x46\xf8\xb9\x8a\xa2\xf6\x7c"
+ "\xef\x23\x73\x0c\xe9\x72\x0a\x0d"
+ "\x9b\x40\xd8\xfb\x0c\x9c\xab\xa8",
+ .ctext = "\xcb\x78\x87\x9c\xc7\x13\xc1\x30"
+ "\xdd\x2c\x7d\xb2\x97\xab\x06\x69"
+ "\x47\x87\x8a\x12\x2b\x5d\x86\xd7"
+ "\x2e\xe6\x7a\x0d\x58\x5d\xe7\x01"
+ "\x78\x0e\xff\xc7\xc5\xd2\x94\xd6"
+ "\xdd\x6b\x38\x1f\xa4\xe3\x3d\xe7"
+ "\xc5\x8a\xb5\xbe\x65\x11\x2b\xe1"
+ "\x2b\x8e\x84\xe8\xe0\x00\x7f\xdd"
+ "\x15\x15\xab\xbd\x22\x94\xf7\xce"
+ "\x99\x6f\xfd\x0e\x9b\x16\xeb\xeb"
+ "\x24\xc7\xbb\xc6\xe1\x6c\x57\xba"
+ "\x84\xab\x16\xf2\x57\xd6\x42\x9d"
+ "\x56\x92\x5b\x44\x18\xd4\xa2\x1b"
+ "\x1e\xa9\xdc\x7a\x16\x88\xc4\x4f"
+ "\x6d\x77\x9a\x2e\x82\xa9\xc3\xee"
+ "\xa4\xca\x05\x1b\x0e\xdc\x48\x96"
+ "\xd0\x50\x21\x1f\x46\xc7\xc7\x70"
+ "\x53\xcd\x1e\x4e\x5f\x2d\x4b\xb2"
+ "\x86\xe5\x3a\xe6\x1d\xec\x7b\x9d"
+ "\x8f\xd6\x41\xc6\xbb\x00\x4f\xe6"
+ "\x02\x47\x07\x73\x50\x6b\xcf\xb2"
+ "\x9e\x1c\x01\xc9\x09\xcc\xc3\x52"
+ "\x27\xe6\x63\xe0\x5b\x55\x60\x4d"
+ "\x72\xd0\xda\x4b\xec\xcb\x72\x5d"
+ "\x37\x4a\xf5\xb8\xd9\xe2\x08\x10"
+ "\xf3\xb9\xdc\x07\xc0\x02\x10\x14"
+ "\x9f\xe6\x8f\xc4\xc4\xe1\x39\x7b"
+ "\x47\xea\xae\x7c\xdd\x27\xa8\x4c"
+ "\x6b\x0f\x4c\xf8\xff\x16\x4e\xcb"
+ "\xec\x88\x33\x0d\x15\x10\x82\x66"
+ "\xa7\x3d\x2c\xb6\xbc\x2e\xe4\xce"
+ "\x4c\x2f\x4b\x46\x0f\x67\x78\xa5"
+ "\xff\x6a\x7d\x0d\x5e\x6d\xab\xfb"
+ "\x59\x99\xd8\x1f\x30\xd4\x33\xe8"
+ "\x7d\x11\xae\xe3\xba\xd0\x3f\xa7"
+ "\xa5\x5e\x43\xda\xf3\x0f\x3a\x5f"
+ "\xba\xb0\x47\xb2\x08\x60\xf4\xed"
+ "\x35\x23\x0c\xe9\x4f\x81\xc4\xc5"
+ "\xa8\x35\xdc\x99\x52\x33\x19\xd4"
+ "\x00\x01\x8d\x5a\x10\x82\x39\x78"
+ "\xfc\x72\x24\x63\x4a\x38\xc5\x6f"
+ "\xfe\xec\x2f\x26\x0c\x3c\x1c\xf6"
+ "\x4d\x99\x7a\x77\x59\xfe\x10\xa5"
+ "\xa1\x35\xbf\x2f\x15\xfa\x4e\x52"
+ "\xe6\xd5\x1c\x88\x90\x75\xd5\xcc"
+ "\xdb\x2a\xb1\xf0\x70\x54\x89\xc7"
+ "\xeb\x1d\x6e\x61\x45\xa3\x50\x48"
+ "\xcd\xdb\x32\xba\x7f\x6b\xaf\xef"
+ "\x50\xcb\x0d\x36\xf7\x29\x3a\x10"
+ "\x02\x73\xca\x8f\x3f\x5d\x82\x17"
+ "\x91\x9a\xd8\x15\x15\xe3\xe1\x41"
+ "\x43\xef\x85\xa6\xb0\xc7\x3b\x0f"
+ "\xf0\xa5\xaa\x66\x77\x70\x5e\x70"
+ "\xce\x17\x84\x68\x45\x39\x2c\x25"
+ "\xc6\xc1\x5f\x7e\xe8\xfa\xe4\x3a"
+ "\x47\x51\x7b\x9d\x54\x84\x98\x04"
+ "\x5f\xf7\x5f\x3c\x34\xe7\xa3\x1d"
+ "\xea\xb7\x6d\x05\xab\x28\xe4\x2c"
+ "\xb1\x7f\x08\xa8\x5d\x07\xbf\xfe"
+ "\x39\x72\x44\x87\x51\xc5\x73\xe4"
+ "\x9a\x5f\xdd\x46\xbc\x4e\xb1\x39"
+ "\xe4\x78\xb8\xbf\xdc\x5b\x88\x9b"
+ "\xc1\x3f\xd9\xd0\xb3\x5a\xdf\xaa"
+ "\x53\x6a\x91\x6d\x2a\x09\xf0\x0b"
+ "\x5e\xe8\xb2\xa0\xb4\x73\x07\x1d"
+ "\xc8\x33\x84\xe6\xda\xe6\xad\xd6"
+ "\xad\x91\x01\x4e\x14\x42\x34\x2c"
+ "\xe5\xf9\x99\x21\x56\x1f\x6c\x2b"
+ "\x4c\xe3\xd5\x9e\x04\xdc\x9a\x16"
+ "\xd1\x54\xe9\xc2\xf7\xc0\xd5\x06"
+ "\x2f\xa1\x38\x2a\x55\x88\x23\xf8"
+ "\xb0\xdb\x87\x32\xc9\x4e\xb0\x0c"
+ "\xc5\x05\x78\x58\xa1\x2e\x75\x75"
+ "\x68\xdc\xea\xdd\x0c\x33\x16\x5e"
+ "\xe7\xdc\xfd\x42\x74\xbe\xae\x60"
+ "\x3c\x37\x4b\x27\xf5\x2c\x5f\x55"
+ "\x4a\x0b\x64\xfd\xa2\x01\x65\x9c"
+ "\x27\x9f\x5e\x87\xd5\x95\x88\x66"
+ "\x09\x84\x42\xab\x00\xe2\x58\xc3"
+ "\x97\x45\xf1\x93\xe2\x34\x37\x3d"
+ "\xfe\x93\x8c\x17\xb9\x79\x65\x06"
+ "\xf7\x58\xe5\x1b\x3b\x4e\xda\x36"
+ "\x17\xe3\x56\xec\x26\x0f\x2e\xfa"
+ "\xd1\xb9\x2b\x3e\x7f\x1d\xe3\x4b"
+ "\x67\xdf\x43\x53\x10\xba\xa3\xfb"
+ "\x5d\x5a\xd8\xc4\xab\x19\x7e\x12"
+ "\xaa\x83\xf1\xc0\xa1\xe0\xbf\x72"
+ "\x5f\xe8\x68\x39\xef\x1a\xbe\xee"
+ "\x6f\x47\x79\x19\xed\xf2\xa1\x4a"
+ "\xe5\xfc\xb5\x58\xae\x63\x82\xcb"
+ "\x16\x0b\x94\xbb\x3e\x02\x49\xc4"
+ "\x3c\x33\xf1\xec\x1b\x11\x71\x9b"
+ "\x5b\x80\xf1\x6f\x88\x1c\x05\x36"
+ "\xa8\xd8\xee\x44\xb5\x18\xc3\x14"
+ "\x62\xba\x98\xb9\xc0\x2a\x70\x93"
+ "\xb3\xd8\x11\x69\x95\x1d\x43\x7b"
+ "\x39\xc1\x91\x05\xc4\xe3\x1e\xc2"
+ "\x1e\x5d\xe7\xde\xbe\xfd\xae\x99"
+ "\x4b\x8f\x83\x1e\xf4\x9b\xb0\x2b"
+ "\x66\x6e\x62\x24\x8d\xe0\x1b\x22"
+ "\x59\xeb\xbd\x2a\x6b\x2e\x37\x17"
+ "\x9e\x1f\x66\xcb\x66\xb4\xfb\x2c"
+ "\x36\x22\x5d\x73\x56\xc1\xb0\x27"
+ "\xe0\xf0\x1b\xe4\x47\x8b\xc6\xdc"
+ "\x7c\x0c\x3d\x29\xcb\x33\x10\xfe"
+ "\xc3\xc3\x1e\xff\x4c\x9b\x27\x86"
+ "\xe2\xb0\xaf\xb7\x89\xce\x61\x69"
+ "\xe7\x00\x3e\x92\xea\x5f\x9e\xc1"
+ "\xfa\x6b\x20\xe2\x41\x23\x82\xeb"
+ "\x07\x76\x4c\x4c\x2a\x96\x33\xbe"
+ "\x89\xa9\xa8\xb9\x9a\x7d\x27\x18"
+ "\x48\x23\x70\x46\xf3\x87\xa7\x91"
+ "\x58\xb8\x74\xba\xed\xc6\xb2\xa1"
+ "\x4d\xb6\x43\x9a\xe1\xa2\x41\xa5"
+ "\x35\xd3\x90\x8a\xc7\x4d\xb7\x88"
+ "\x0b\xe3\x74\x9f\x84\xfc\xd9\x73"
+ "\xf2\x86\x0c\xad\xeb\x5d\x70\xac"
+ "\x65\x07\x14\x8e\x57\xf6\xdc\xb4"
+ "\xc2\x02\x7c\xd6\x89\xe2\x8a\x3e"
+ "\x8e\x08\x3c\x12\x37\xaf\xe1\xa8"
+ "\x04\x11\x5c\xae\x5a\x2b\x60\xa0"
+ "\x03\x3c\x7a\xa2\x38\x92\xbe\xce"
+ "\x09\xa2\x5e\x0f\xc2\xb2\xb5\x06"
+ "\xc2\x97\x97\x9b\x09\x2f\x04\xfe"
+ "\x2c\xe7\xa3\xc4\x42\xe9\xa3\x40"
+ "\xa5\x52\x07\x2c\x3b\x89\x1a\xa5"
+ "\x28\xb1\x93\x05\x98\x0c\x2f\x3d"
+ "\xc6\xf5\x83\xac\x24\x1d\x28\x9f"
+ "\x32\x66\x4d\x70\xb7\xe0\xab\xb8"
+ "\x75\xc5\xf3\xd2\x7b\x26\x3e\xec"
+ "\x64\xe6\xf7\x70\xe7\xf8\x10\x8e"
+ "\x67\xd2\xb3\x87\x69\x40\x06\x9a"
+ "\x2f\x6a\x1a\xfd\x62\x0c\xee\x31"
+ "\x2e\xbe\x58\x97\x77\xd1\x09\x08"
+ "\x1f\x8d\x42\x29\x34\xd5\xd8\xb5"
+ "\x1f\xd7\x21\x18\xe3\xe7\x2e\x4a"
+ "\x42\xfc\xdb\x19\xe9\xee\xb9\x22"
+ "\xad\x5c\x07\xe9\xc8\x07\xe5\xe9"
+ "\x95\xa2\x0d\x30\x46\xe2\x65\x51"
+ "\x01\xa5\x74\x85\xe2\x52\x6e\x07"
+ "\xc9\xf5\x33\x09\xde\x78\x62\xa9"
+ "\x30\x2a\xd3\x86\xe5\x46\x2e\x60"
+ "\xff\x74\xb0\x5f\xec\x76\xb7\xd1"
+ "\x5e\x4d\x61\x97\x3c\x9c\x99\xc3"
+ "\x41\x65\x21\x47\xf9\xb1\x06\xec"
+ "\x18\xf8\x3f\xc7\x38\xfa\x7b\x14"
+ "\x62\x79\x6a\x0b\x0c\xf5\x2c\xb7"
+ "\xab\xcf\x63\x49\x6d\x1f\x46\xa8"
+ "\xbc\x7d\x42\x53\x75\x6b\xca\x38"
+ "\xac\x8b\xe7\xa1\xa1\x92\x19\x6b"
+ "\x0d\x75\x80\x5b\x7d\x35\x86\x70"
+ "\x12\x6b\xe5\x3e\xe5\x85\xa0\xa4"
+ "\xd6\x77\x5e\x4d\x24\x57\x84\xa9"
+ "\xe5\xa4\xbf\x25\xfb\x36\x65\x3b"
+ "\x81\x39\x61\xec\x5e\x4a\x7e\x10"
+ "\x58\x19\x13\x5c\x0f\x79\xec\xcf"
+ "\xbb\x5f\x69\x21\xc3\xa7\x5a\xff"
+ "\x3b\xc7\x85\x9b\x47\xbc\x3e\xad"
+ "\xbf\x54\x60\xb6\x5b\x3f\xfc\x50"
+ "\x68\x83\x76\x24\xb0\xc3\x3f\x93"
+ "\x0d\xce\x36\x0a\x58\x9d\xcc\xe9"
+ "\x52\xbb\xd0\x0b\x65\xe5\x0f\x62"
+ "\x82\x16\xaa\xd2\xba\x5a\x4c\xd0"
+ "\x67\xb5\x4e\x84\x1c\x02\x6e\xa3"
+ "\xaa\x22\x54\x96\xc8\xd9\x9c\x58"
+ "\x15\x63\xf4\x98\x1a\xa1\xd9\x11"
+ "\x64\x25\x56\xb5\x03\x8e\x29\x85"
+ "\x75\x88\xd1\xd2\xe4\xe6\x27\x48"
+ "\x13\x9c\x2b\xaa\xfb\xd3\x6e\x2c"
+ "\xe6\xd4\xe4\x8b\xd9\xf7\x01\x16"
+ "\x46\xf9\x5c\x88\x7a\x93\x9e\x2d"
+ "\xa6\xeb\x01\x2a\x72\xe4\x7f\xb4"
+ "\x78\x0c\x50\x18\xd3\x8e\x65\xa7"
+ "\x1b\xf9\x28\x5d\x89\x70\x96\x2f"
+ "\xa1\xc2\x9b\x34\xfc\x7c\x27\x63"
+ "\x93\xe6\xe3\xa4\x9d\x17\x97\x7e"
+ "\x13\x79\x9c\x4b\x2c\x23\x91\x2c"
+ "\x4f\xb1\x1d\x4b\xb4\x61\x6e\xe8"
+ "\x32\x35\xc3\x41\x7a\x50\x60\xc8"
+ "\x3e\xd8\x3f\x38\xfc\xc2\xa2\xe0"
+ "\x3a\x21\x25\x8f\xc2\x22\xed\x04"
+ "\x31\xb8\x72\x69\xaf\x6c\x6d\xab"
+ "\x25\x16\x95\x87\x92\xc7\x46\x3f"
+ "\x47\x05\x6c\xad\xa0\xa6\x1d\xf0"
+ "\x66\x2e\x01\x1a\xc3\xbe\xe4\xf6"
+ "\x51\xec\xa3\x95\x81\xe1\xcc\xab"
+ "\xc1\x71\x65\x0a\xe6\x53\xfb\xb8"
+ "\x53\x69\xad\x8b\xab\x8b\xa7\xcd"
+ "\x8f\x15\x01\x25\xb1\x1f\x9c\x3b"
+ "\x9b\x47\xad\x38\x38\x89\x6b\x1c"
+ "\x8a\x33\xdd\x8a\x06\x23\x06\x0b"
+ "\x7f\x70\xbe\x7e\xa1\x80\xbc\x7a",
+ .len = 1536,
+ }, {
+ .key = "\x60\xd5\x36\xb0\x8e\x5d\x0e\x5f"
+ "\x70\x47\x8c\xea\x87\x30\x1d\x58"
+ "\x2a\xb2\xe8\xc6\xcb\x60\xe7\x6f"
+ "\x56\x95\x83\x98\x38\x80\x84\x8a",
+ .klen = 32,
+ .iv = "\x43\xfe\x63\x3c\xdc\x9e\x0c\xa6"
+ "\xee\x9c\x0b\x97\x65\xc2\x56\x1d"
+ "\x5d\xd0\xbf\xa3\x9f\x1e\xfb\x78"
+ "\xbf\x51\x1b\x18\x73\x27\x27\x8c",
+ .ptext = "\x0b\x77\xd8\xa3\x8c\xa6\xb2\x2d"
+ "\x3e\xdd\xcc\x7c\x4a\x3e\x61\xc4"
+ "\x9a\x7f\x73\xb0\xb3\x29\x32\x61"
+ "\x13\x25\x62\xcc\x59\x4c\xf4\xdb"
+ "\xd7\xf5\xf4\xac\x75\x51\xb2\x83"
+ "\x64\x9d\x1c\x8b\xd1\x8b\x0c\x06"
+ "\xf1\x9f\xba\x9d\xae\x62\xd4\xd8"
+ "\x96\xbe\x3c\x4c\x32\xe4\x82\x44"
+ "\x47\x5a\xec\xb8\x8a\x5b\xd5\x35"
+ "\x57\x1e\x5c\x80\x6f\x77\xa9\xb9"
+ "\xf2\x4f\x71\x1e\x48\x51\x86\x43"
+ "\x0d\xd5\x5b\x52\x30\x40\xcd\xbb"
+ "\x2c\x25\xc1\x47\x8b\xb7\x13\xc2"
+ "\x3a\x11\x40\xfc\xed\x45\xa4\xf0"
+ "\xd6\xfd\x32\x99\x13\x71\x47\x2e"
+ "\x4c\xb0\x81\xac\x95\x31\xd6\x23"
+ "\xa4\x2f\xa9\xe8\x5a\x62\xdc\x96"
+ "\xcf\x49\xa7\x17\x77\x76\x8a\x8c"
+ "\x04\x22\xaf\xaf\x6d\xd9\x16\xba"
+ "\x35\x21\x66\x78\x3d\xb6\x65\x83"
+ "\xc6\xc1\x67\x8c\x32\xd6\xc0\xc7"
+ "\xf5\x8a\xfc\x47\xd5\x87\x09\x2f"
+ "\x51\x9d\x57\x6c\x29\x0b\x1c\x32"
+ "\x47\x6e\x47\xb5\xf3\x81\xc8\x82"
+ "\xca\x5d\xe3\x61\x38\xa0\xdc\xcc"
+ "\x35\x73\xfd\xb3\x92\x5c\x72\xd2"
+ "\x2d\xad\xf6\xcd\x20\x36\xff\x49"
+ "\x48\x80\x21\xd3\x2f\x5f\xe9\xd8"
+ "\x91\x20\x6b\xb1\x38\x52\x1e\xbc"
+ "\x88\x48\xa1\xde\xc0\xa5\x46\xce"
+ "\x9f\x32\x29\xbc\x2b\x51\x0b\xae"
+ "\x7a\x44\x4e\xed\xeb\x95\x63\x99"
+ "\x96\x87\xc9\x34\x02\x26\xde\x20"
+ "\xe4\xcb\x59\x0c\xb5\x55\xbd\x55"
+ "\x3f\xa9\x15\x25\xa7\x5f\xab\x10"
+ "\xbe\x9a\x59\x6c\xd5\x27\xf3\xf0"
+ "\x73\x4a\xb3\xe4\x08\x11\x00\xeb"
+ "\xf1\xae\xc8\x0d\xef\xcd\xb5\xfc"
+ "\x0d\x7e\x03\x67\xad\x0d\xec\xf1"
+ "\x9a\xfd\x31\x60\x3e\xa2\xfa\x1c"
+ "\x93\x79\x31\x31\xd6\x66\x7a\xbd"
+ "\x85\xfd\x22\x08\x00\xae\x72\x10"
+ "\xd6\xb0\xf4\xb8\x4a\x72\x5b\x9c"
+ "\xbf\x84\xdd\xeb\x13\x05\x28\xb7"
+ "\x61\x60\xfd\x7f\xf0\xbe\x4d\x18"
+ "\x7d\xc9\xba\xb0\x01\x59\x74\x18"
+ "\xe4\xf6\xa6\x74\x5d\x3f\xdc\xa0"
+ "\x9e\x57\x93\xbf\x16\x6c\xf6\xbd"
+ "\x93\x45\x38\x95\xb9\x69\xe9\x62"
+ "\x21\x73\xbd\x81\x73\xac\x15\x74"
+ "\x9e\x68\x28\x91\x38\xb7\xd4\x47"
+ "\xc7\xab\xc9\x14\xad\x52\xe0\x4c"
+ "\x17\x1c\x42\xc1\xb4\x9f\xac\xcc"
+ "\xc8\x12\xea\xa9\x9e\x30\x21\x14"
+ "\xa8\x74\xb4\x74\xec\x8d\x40\x06"
+ "\x82\xb7\x92\xd7\x42\x5b\xf2\xf9"
+ "\x6a\x1e\x75\x6e\x44\x55\xc2\x8d"
+ "\x73\x5b\xb8\x8c\x3c\xef\x97\xde"
+ "\x24\x43\xb3\x0e\xba\xad\x63\x63"
+ "\x16\x0a\x77\x03\x48\xcf\x02\x8d"
+ "\x76\x83\xa3\xba\x73\xbe\x80\x3f"
+ "\x8f\x6e\x76\x24\xc1\xff\x2d\xb4"
+ "\x20\x06\x9b\x67\xea\x29\xb5\xe0"
+ "\x57\xda\x30\x9d\x38\xa2\x7d\x1e"
+ "\x8f\xb9\xa8\x17\x64\xea\xbe\x04"
+ "\x84\xd1\xce\x2b\xfd\x84\xf9\x26"
+ "\x1f\x26\x06\x5c\x77\x6d\xc5\x9d"
+ "\xe6\x37\x76\x60\x7d\x3e\xf9\x02"
+ "\xba\xa6\xf3\x7f\xd3\x95\xb4\x0e"
+ "\x52\x1c\x6a\x00\x8f\x3a\x0b\xce"
+ "\x30\x98\xb2\x63\x2f\xff\x2d\x3b"
+ "\x3a\x06\x65\xaf\xf4\x2c\xef\xbb"
+ "\x88\xff\x2d\x4c\xa9\xf4\xff\x69"
+ "\x9d\x46\xae\x67\x00\x3b\x40\x94"
+ "\xe9\x7a\xf7\x0b\xb7\x3c\xa2\x2f"
+ "\xc3\xde\x5e\x29\x01\xde\xca\xfa"
+ "\xc6\xda\xd7\x19\xc7\xde\x4a\x16"
+ "\x93\x6a\xb3\x9b\x47\xe9\xd2\xfc"
+ "\xa1\xc3\x95\x9c\x0b\xa0\x2b\xd4"
+ "\xd3\x1e\xd7\x21\x96\xf9\x1e\xf4"
+ "\x59\xf4\xdf\x00\xf3\x37\x72\x7e"
+ "\xd8\xfd\x49\xd4\xcd\x61\x7b\x22"
+ "\x99\x56\x94\xff\x96\xcd\x9b\xb2"
+ "\x76\xca\x9f\x56\xae\x04\x2e\x75"
+ "\x89\x4e\x1b\x60\x52\xeb\x84\xf4"
+ "\xd1\x33\xd2\x6c\x09\xb1\x1c\x43"
+ "\x08\x67\x02\x01\xe3\x64\x82\xee"
+ "\x36\xcd\xd0\x70\xf1\x93\xd5\x63"
+ "\xef\x48\xc5\x56\xdb\x0a\x35\xfe"
+ "\x85\x48\xb6\x97\x97\x02\x43\x1f"
+ "\x7d\xc9\xa8\x2e\x71\x90\x04\x83"
+ "\xe7\x46\xbd\x94\x52\xe3\xc5\xd1"
+ "\xce\x6a\x2d\x6b\x86\x9a\xf5\x31"
+ "\xcd\x07\x9c\xa2\xcd\x49\xf5\xec"
+ "\x01\x3e\xdf\xd5\xdc\x15\x12\x9b"
+ "\x0c\x99\x19\x7b\x2e\x83\xfb\xd8"
+ "\x89\x3a\x1c\x1e\xb4\xdb\xeb\x23"
+ "\xd9\x42\xae\x47\xfc\xda\x37\xe0"
+ "\xd2\xb7\x47\xd9\xe8\xb5\xf6\x20"
+ "\x42\x8a\x9d\xaf\xb9\x46\x80\xfd"
+ "\xd4\x74\x6f\x38\x64\xf3\x8b\xed"
+ "\x81\x94\x56\xe7\xf1\x1a\x64\x17"
+ "\xd4\x27\x59\x09\xdf\x9b\x74\x05"
+ "\x79\x6e\x13\x29\x2b\x9e\x1b\x86"
+ "\x73\x9f\x40\xbe\x6e\xff\x92\x4e"
+ "\xbf\xaa\xf4\xd0\x88\x8b\x6f\x73"
+ "\x9d\x8b\xbf\xe5\x8a\x85\x45\x67"
+ "\xd3\x13\x72\xc6\x2a\x63\x3d\xb1"
+ "\x35\x7c\xb4\x38\xbb\x31\xe3\x77"
+ "\x37\xad\x75\xa9\x6f\x84\x4e\x4f"
+ "\xeb\x5b\x5d\x39\x6d\xed\x0a\xad"
+ "\x6c\x1b\x8e\x1f\x57\xfa\xc7\x7c"
+ "\xbf\xcf\xf2\xd1\x72\x3b\x70\x78"
+ "\xee\x8e\xf3\x4f\xfd\x61\x30\x9f"
+ "\x56\x05\x1d\x7d\x94\x9b\x5f\x8c"
+ "\xa1\x0f\xeb\xc3\xa9\x9e\xb8\xa0"
+ "\xc6\x4e\x1e\xb1\xbc\x0a\x87\xa8"
+ "\x52\xa9\x1e\x3d\x58\x8e\xc6\x95"
+ "\x85\x58\xa3\xc3\x3a\x43\x32\x50"
+ "\x6c\xb3\x61\xe1\x0c\x7d\x02\x63"
+ "\x5f\x8b\xdf\xef\x13\xf8\x66\xea"
+ "\x89\x00\x1f\xbd\x5b\x4c\xd5\x67"
+ "\x8f\x89\x84\x33\x2d\xd3\x70\x94"
+ "\xde\x7b\xd4\xb0\xeb\x07\x96\x98"
+ "\xc5\xc0\xbf\xc8\xcf\xdc\xc6\x5c"
+ "\xd3\x7d\x78\x30\x0e\x14\xa0\x86"
+ "\xd7\x8a\xb7\x53\xa3\xec\x71\xbf"
+ "\x85\xf2\xea\xbd\x77\xa6\xd1\xfd"
+ "\x5a\x53\x0c\xc3\xff\xf5\x1d\x46"
+ "\x37\xb7\x2d\x88\x5c\xeb\x7a\x0c"
+ "\x0d\x39\xc6\x40\x08\x90\x1f\x58"
+ "\x36\x12\x35\x28\x64\x12\xe7\xbb"
+ "\x50\xac\x45\x15\x7b\x16\x23\x5e"
+ "\xd4\x11\x2a\x8e\x17\x47\xe1\xd0"
+ "\x69\xc6\xd2\x5c\x2c\x76\xe6\xbb"
+ "\xf7\xe7\x34\x61\x8e\x07\x36\xc8"
+ "\xce\xcf\x3b\xeb\x0a\x55\xbd\x4e"
+ "\x59\x95\xc9\x32\x5b\x79\x7a\x86"
+ "\x03\x74\x4b\x10\x87\xb3\x60\xf6"
+ "\x21\xa4\xa6\xa8\x9a\xc9\x3a\x6f"
+ "\xd8\x13\xc9\x18\xd4\x38\x2b\xc2"
+ "\xa5\x7e\x6a\x09\x0f\x06\xdf\x53"
+ "\x9a\x44\xd9\x69\x2d\x39\x61\xb7"
+ "\x1c\x36\x7f\x9e\xc6\x44\x9f\x42"
+ "\x18\x0b\x99\xe6\x27\xa3\x1e\xa6"
+ "\xd0\xb9\x9a\x2b\x6f\x60\x75\xbd"
+ "\x52\x4a\x91\xd4\x7b\x8f\x95\x9f"
+ "\xdd\x74\xed\x8b\x20\x00\xdd\x08"
+ "\x6e\x5b\x61\x7b\x06\x6a\x19\x84"
+ "\x1c\xf9\x86\x65\xcd\x1c\x73\x3f"
+ "\x28\x5c\x8a\x93\x1a\xf3\xa3\x6c"
+ "\x6c\xa9\x7c\xea\x3c\xd4\x15\x45"
+ "\x7f\xbc\xe3\xbb\x42\xf0\x2e\x10"
+ "\xcd\x0c\x8b\x44\x1a\x82\x83\x0c"
+ "\x58\xb1\x24\x28\xa0\x11\x2f\x63"
+ "\xa5\x82\xc5\x9f\x86\x42\xf4\x4d"
+ "\x89\xdb\x76\x4a\xc3\x7f\xc4\xb8"
+ "\xdd\x0d\x14\xde\xd2\x62\x02\xcb"
+ "\x70\xb7\xee\xf4\x6a\x09\x12\x5e"
+ "\xd1\x26\x1a\x2c\x20\x71\x31\xef"
+ "\x7d\x65\x57\x65\x98\xff\x8b\x02"
+ "\x9a\xb5\xa4\xa1\xaf\x03\xc4\x50"
+ "\x33\xcf\x1b\x25\xfa\x7a\x79\xcc"
+ "\x55\xe3\x21\x63\x0c\x6d\xeb\x5b"
+ "\x1c\xad\x61\x0b\xbd\xb0\x48\xdb"
+ "\xb3\xc8\xa0\x87\x7f\x8b\xac\xfd"
+ "\xd2\x68\x9e\xb4\x11\x3c\x6f\xb1"
+ "\xfe\x25\x7d\x84\x5a\xae\xc9\x31"
+ "\xc3\xe5\x6a\x6f\xbc\xab\x41\xd9"
+ "\xde\xce\xf9\xfa\xd5\x7c\x47\xd2"
+ "\x66\x30\xc9\x97\xf2\x67\xdf\x59"
+ "\xef\x4e\x11\xbc\x4e\x70\xe3\x46"
+ "\x53\xbe\x16\x6d\x33\xfb\x57\x98"
+ "\x4e\x34\x79\x3b\xc7\x3b\xaf\x94"
+ "\xc1\x87\x4e\x47\x11\x1b\x22\x41"
+ "\x99\x12\x61\xe0\xe0\x8c\xa9\xbd"
+ "\x79\xb6\x06\x4d\x90\x3b\x0d\x30"
+ "\x1a\x00\xaa\x0e\xed\x7c\x16\x2f"
+ "\x0d\x1a\xfb\xf8\xad\x51\x4c\xab"
+ "\x98\x4c\x80\xb6\x92\x03\xcb\xa9"
+ "\x99\x9d\x16\xab\x43\x8c\x3f\x52"
+ "\x96\x53\x63\x7e\xbb\xd2\x76\xb7"
+ "\x6b\x77\xab\x52\x80\x33\xe3\xdf"
+ "\x4b\x3c\x23\x1a\x33\xe1\x43\x40"
+ "\x39\x1a\xe8\xbd\x3c\x6a\x77\x42"
+ "\x88\x9f\xc6\xaa\x65\x28\xf2\x1e"
+ "\xb0\x7c\x8e\x10\x41\x31\xe9\xd5"
+ "\x9d\xfd\x28\x7f\xfb\x61\xd3\x39"
+ "\x5f\x7e\xb4\xfb\x9c\x7d\x98\xb7"
+ "\x37\x2f\x18\xd9\x3b\x83\xaf\x4e"
+ "\xbb\xd5\x49\x69\x46\x93\x3a\x21"
+ "\x46\x1d\xad\x84\xb5\xe7\x8c\xff"
+ "\xbf\x81\x7e\x22\xf6\x88\x8c\x82"
+ "\xf5\xde\xfe\x18\xc9\xfb\x58\x07"
+ "\xe4\x68\xff\x9c\xf4\xe0\x24\x20"
+ "\x90\x92\x01\x49\xc2\x38\xe1\x7c"
+ "\xac\x61\x0b\x96\x36\xa4\x77\xe9"
+ "\x29\xd4\x97\xae\x15\x13\x7c\x6c"
+ "\x2d\xf1\xc5\x83\x97\x02\xa8\x2e"
+ "\x0b\x0f\xaf\xb5\x42\x18\x8a\x8c"
+ "\xb8\x28\x85\x28\x1b\x2a\x12\xa5"
+ "\x4b\x0a\xaf\xd2\x72\x37\x66\x23"
+ "\x28\xe6\x71\xa0\x77\x85\x7c\xff"
+ "\xf3\x8d\x2f\x0c\x33\x30\xcd\x7f"
+ "\x61\x64\x23\xb2\xe9\x79\x05\xb8"
+ "\x61\x47\xb1\x2b\xda\xf7\x9a\x24"
+ "\x94\xf6\xcf\x07\x78\xa2\x80\xaa"
+ "\x6e\xe9\x58\x97\x19\x0c\x58\x73"
+ "\xaf\xee\x2d\x6e\x26\x67\x18\x8a"
+ "\xc6\x6d\xf6\xbc\x65\xa9\xcb\xe7"
+ "\x53\xf1\x61\x97\x63\x52\x38\x86"
+ "\x0e\xdd\x33\xa5\x30\xe9\x9f\x32"
+ "\x43\x64\xbc\x2d\xdc\x28\x43\xd8"
+ "\x6c\xcd\x00\x2c\x87\x9a\x33\x79"
+ "\xbd\x63\x6d\x4d\xf9\x8a\x91\x83"
+ "\x9a\xdb\xf7\x9a\x11\xe1\xd1\x93"
+ "\x4a\x54\x0d\x51\x38\x30\x84\x0b"
+ "\xc5\x29\x8d\x92\x18\x6c\x28\xfe"
+ "\x1b\x07\x57\xec\x94\x74\x0b\x2c"
+ "\x21\x01\xf6\x23\xf9\xb0\xa0\xaf"
+ "\xb1\x3e\x2e\xa8\x0d\xbc\x2a\x68"
+ "\x59\xde\x0b\x2d\xde\x74\x42\xa1"
+ "\xb4\xce\xaf\xd8\x42\xeb\x59\xbd"
+ "\x61\xcc\x27\x28\xc6\xf2\xde\x3e"
+ "\x68\x64\x13\xd3\xc3\xc0\x31\xe0"
+ "\x5d\xf9\xb4\xa1\x09\x20\x46\x8b"
+ "\x48\xb9\x27\x62\x00\x12\xc5\x03"
+ "\x28\xfd\x55\x27\x1c\x31\xfc\xdb"
+ "\xc1\xcb\x7e\x67\x91\x2e\x50\x0c"
+ "\x61\xf8\x9f\x31\x26\x5a\x3d\x2e"
+ "\xa0\xc7\xef\x2a\xb6\x24\x48\xc9"
+ "\xbb\x63\x99\xf4\x7c\x4e\xc5\x94"
+ "\x99\xd5\xff\x34\x93\x8f\x31\x45"
+ "\xae\x5e\x7b\xfd\xf4\x81\x84\x65"
+ "\x5b\x41\x70\x0b\xe5\xaa\xec\x95"
+ "\x6b\x3d\xe3\xdc\x12\x78\xf8\x28"
+ "\x26\xec\x3a\x64\xc4\xab\x74\x97"
+ "\x3d\xcf\x21\x7d\xcf\x59\xd3\x15"
+ "\x47\x94\xe4\xd9\x48\x4c\x02\x49"
+ "\x68\x50\x22\x16\x96\x2f\xc4\x23"
+ "\x80\x47\x27\xd1\xee\x10\x3b\xa7"
+ "\x19\xae\xe1\x40\x5f\x3a\xde\x5d"
+ "\x97\x1c\x59\xce\xe1\xe7\x32\xa7"
+ "\x20\x89\xef\x44\x22\x38\x3c\x14"
+ "\x99\x3f\x1b\xd6\x37\xfe\x93\xbf"
+ "\x34\x13\x86\xd7\x9b\xe5\x2a\x37"
+ "\x72\x16\xa4\xdf\x7f\xe4\xa4\x66"
+ "\x9d\xf2\x0b\x29\xa1\xe2\x9d\x36"
+ "\xe1\x9d\x56\x95\x73\xe1\x91\x58"
+ "\x0f\x64\xf8\x90\xbb\x0c\x48\x0f"
+ "\xf5\x52\xae\xd9\xeb\x95\xb7\xdd"
+ "\xae\x0b\x20\x55\x87\x3d\xf0\x69"
+ "\x3c\x0a\x54\x61\xea\x00\xbd\xba"
+ "\x5f\x7e\x25\x8c\x3e\x61\xee\xb2"
+ "\x1a\xc8\x0e\x0b\xa5\x18\x49\xf2"
+ "\x6e\x1d\x3f\x83\xc3\xf1\x1a\xcb"
+ "\x9f\xc9\x82\x4e\x7b\x26\xfd\x68"
+ "\x28\x25\x8d\x22\x17\xab\xf8\x4e"
+ "\x1a\xa9\x81\x48\xb0\x9f\x52\x75"
+ "\xe4\xef\xdd\xbd\x5b\xbe\xab\x3c"
+ "\x43\x76\x23\x62\xce\xb8\xc2\x5b"
+ "\xc6\x31\xe6\x81\xb4\x42\xb2\xfd"
+ "\xf3\x74\xdd\x02\x3c\xa0\xd7\x97"
+ "\xb0\xe7\xe9\xe0\xce\xef\xe9\x1c"
+ "\x09\xa2\x6d\xd3\xc4\x60\xd6\xd6"
+ "\x9e\x54\x31\x45\x76\xc9\x14\xd4"
+ "\x95\x17\xe9\xbe\x69\x92\x71\xcb"
+ "\xde\x7c\xf1\xbd\x2b\xef\x8d\xaf"
+ "\x51\xe8\x28\xec\x48\x7f\xf8\xfa"
+ "\x9f\x9f\x5e\x52\x61\xc3\xfc\x9a"
+ "\x7e\xeb\xe3\x30\xb6\xfe\xc4\x4a"
+ "\x87\x1a\xff\x54\x64\xc7\xaa\xa2"
+ "\xfa\xb7\xb2\xe7\x25\xce\x95\xb4"
+ "\x15\x93\xbd\x24\xb6\xbc\xe4\x62"
+ "\x93\x7f\x44\x40\x72\xcb\xfb\xb2"
+ "\xbf\xe8\x03\xa5\x87\x12\x27\xfd"
+ "\xc6\x21\x8a\x8f\xc2\x48\x48\xb9"
+ "\x6b\xb6\xf0\xf0\x0e\x0a\x0e\xa4"
+ "\x40\xa9\xd8\x23\x24\xd0\x7f\xe2"
+ "\xf9\xed\x76\xf0\x91\xa5\x83\x3c"
+ "\x55\xe1\x92\xb8\xb6\x32\x9e\x63"
+ "\x60\x81\x75\x29\x9e\xce\x2a\x70"
+ "\x28\x0c\x87\xe5\x46\x73\x76\x66"
+ "\xbc\x4b\x6c\x37\xc7\xd0\x1a\xa0"
+ "\x9d\xcf\x04\xd3\x8c\x42\xae\x9d"
+ "\x35\x5a\xf1\x40\x4c\x4e\x81\xaa"
+ "\xfe\xd5\x83\x4f\x29\x19\xf3\x6c"
+ "\x9e\xd0\x53\xe5\x05\x8f\x14\xfb"
+ "\x68\xec\x0a\x3a\x85\xcd\x3e\xb4"
+ "\x4a\xc2\x5b\x92\x2e\x0b\x58\x64"
+ "\xde\xca\x64\x86\x53\xdb\x7f\x4e"
+ "\x54\xc6\x5e\xaa\xe5\x82\x3b\x98"
+ "\x5b\x01\xa7\x1f\x7b\x3d\xcc\x19"
+ "\xf1\x11\x02\x64\x09\x25\x7c\x26"
+ "\xee\xad\x50\x68\x31\x26\x16\x0f"
+ "\xb6\x7b\x6f\xa2\x17\x1a\xba\xbe"
+ "\xc3\x60\xdc\xd2\x44\xe0\xb4\xc4"
+ "\xfe\xff\x69\xdb\x60\xa6\xaf\x39"
+ "\x0a\xbd\x6e\x41\xd1\x9f\x87\x71"
+ "\xcc\x43\xa8\x47\x10\xbc\x2b\x7d"
+ "\x40\x12\x43\x31\xb8\x12\xe0\x95"
+ "\x6f\x9d\xf8\x75\x51\x3d\x61\xbe"
+ "\xa0\xd1\x0b\x8d\x50\xc7\xb8\xe7"
+ "\xab\x03\xda\x41\xab\xc5\x4e\x33"
+ "\x5a\x63\x94\x90\x22\x72\x54\x26"
+ "\x93\x65\x99\x45\x55\xd3\x55\x56"
+ "\xc5\x39\xe4\xb4\xb1\xea\xd8\xf9"
+ "\xb5\x31\xf7\xeb\x80\x1a\x9e\x8d"
+ "\xd2\x40\x01\xea\x33\xb9\xf2\x7a"
+ "\x43\x41\x72\x0c\xbf\x20\xab\xf7"
+ "\xfa\x65\xec\x3e\x35\x57\x1e\xef"
+ "\x2a\x81\xfa\x10\xb2\xdb\x8e\xfa"
+ "\x7f\xe7\xaf\x73\xfc\xbb\x57\xa2"
+ "\xaf\x6f\x41\x11\x30\xd8\xaf\x94"
+ "\x53\x8d\x4c\x23\xa5\x20\x63\xcf"
+ "\x0d\x00\xe0\x94\x5e\x92\xaa\xb5"
+ "\xe0\x4e\x96\x3c\xf4\x26\x2f\xf0"
+ "\x3f\xd7\xed\x75\x2c\x63\xdf\xc8"
+ "\xfb\x20\xb5\xae\x44\x83\xc0\xab"
+ "\x05\xf9\xbb\xa7\x62\x7d\x21\x5b"
+ "\x04\x80\x93\x84\x5f\x1d\x9e\xcd"
+ "\xa2\x07\x7e\x22\x2f\x55\x94\x23"
+ "\x74\x35\xa3\x0f\x03\xbe\x07\x62"
+ "\xe9\x16\x69\x7e\xae\x38\x0e\x9b"
+ "\xad\x6e\x83\x90\x21\x10\xb8\x07"
+ "\xdc\xc1\x44\x20\xa5\x88\x00\xdc"
+ "\xe1\x82\x16\xf1\x0c\xdc\xed\x8c"
+ "\x32\xb5\x49\xab\x11\x41\xd5\xd2"
+ "\x35\x2c\x70\x73\xce\xeb\xe3\xd6"
+ "\xe4\x7d\x2c\xe8\x8c\xec\x8a\x92"
+ "\x50\x87\x51\xbd\x2d\x9d\xf2\xf0"
+ "\x3c\x7d\xb1\x87\xf5\x01\xb0\xed"
+ "\x02\x5a\x20\x4d\x43\x08\x71\x49"
+ "\x77\x72\x9b\xe6\xef\x30\xc9\xa2"
+ "\x66\x66\xb8\x68\x9d\xdf\xc6\x16"
+ "\xa5\x78\xee\x3c\x47\xa6\x7a\x31"
+ "\x07\x6d\xce\x7b\x86\xf8\xb2\x31"
+ "\xa8\xa4\x77\x3c\x63\x36\xe8\xd3"
+ "\x7d\x40\x56\xd8\x48\x56\x9e\x3e"
+ "\x56\xf6\x3d\xd2\x12\x6e\x35\x29"
+ "\xd4\x7a\xdb\xff\x97\x4c\xeb\x3c"
+ "\x28\x2a\xeb\xe9\x43\x40\x61\x06"
+ "\xb8\xa8\x6d\x18\xc8\xbc\xc7\x23"
+ "\x53\x2b\x8b\xcc\xce\x88\xdf\xf8"
+ "\xff\xf8\x94\xe4\x5c\xee\xcf\x39"
+ "\xe0\xf6\x1a\xae\xf2\xd5\x41\x6a"
+ "\x09\x5a\x50\x66\xc4\xf4\x66\xdc"
+ "\x6a\x69\xee\xc8\x47\xe6\x87\x52"
+ "\x9e\x28\xe4\x39\x02\x0d\xc4\x7e"
+ "\x18\xe6\xc6\x09\x07\x03\x30\xb9"
+ "\xd1\xb0\x48\xe6\x80\xe8\x8c\xe6"
+ "\xc7\x2c\x33\xca\x64\xe5\xc0\x6e"
+ "\xac\x14\x4b\xe1\xf6\xeb\xce\xe4"
+ "\xc1\x8c\xea\x5b\x8d\x3c\x86\x91"
+ "\xd1\xd7\x16\x9c\x09\x9c\x6a\x51"
+ "\xe5\xcd\xe3\xb0\x33\x1f\x03\xcd"
+ "\xe5\xd8\x40\x9b\xdc\x29\xbe\xfa"
+ "\x24\xcc\xf1\x55\x68\x3a\x89\x0d"
+ "\x08\x48\xfd\x9b\x47\x41\x10\xae"
+ "\x53\x3a\x83\x87\xd4\x89\xe7\x38"
+ "\x47\xee\xd7\xbe\xe2\x58\x37\xd2"
+ "\xfc\x21\x1d\x20\xa5\x2d\x69\x0c"
+ "\x36\x5b\x2f\xcd\xa1\xa6\xe4\xa1"
+ "\x00\x4d\xf7\xc8\x2d\xc7\x16\x6c"
+ "\x6d\xad\x32\x8c\x8f\x74\xf9\xfa"
+ "\x78\x1c\x9a\x0f\x6e\x93\x9c\x20"
+ "\x43\xb9\xe4\xda\xc4\xc7\x90\x47"
+ "\x86\x68\xb7\x6f\x82\x59\x4a\x30"
+ "\xf1\xfd\x31\x0f\xa1\xea\x9b\x6b"
+ "\x18\x5c\x39\xb0\xc7\x80\x64\xff"
+ "\x6d\x5b\xb4\x8b\xba\x90\xea\x4e"
+ "\x9a\x04\xd2\x68\x18\x50\xb5\x91"
+ "\x45\x4f\x58\x5a\xe5\xc6\x7c\xab"
+ "\x61\x3e\x3d\xec\x18\x87\xfc\xea"
+ "\x26\x35\x4c\x99\x8a\x3f\x00\x7b"
+ "\xf5\x89\x62\xda\xdd\xf1\x43\xef"
+ "\x2c\x1d\x92\xfa\x9a\xd0\x37\x03"
+ "\x69\x9c\xd8\x1f\x41\x44\xb7\x73"
+ "\x54\x14\x91\x12\x41\x41\x54\xa2"
+ "\x91\x55\xb6\xf7\x23\x41\xc9\xc2"
+ "\x5b\x53\xf2\x61\x63\x0d\xa9\x87"
+ "\x1a\xbb\x11\x1f\x3c\xbb\xa8\x1f"
+ "\xe2\x66\x56\x88\x06\x3c\xd2\x0f"
+ "\x3b\xc4\xd6\x8c\xbe\x54\x9f\xa8"
+ "\x9c\x89\xfb\x88\x05\xef\xcd\xe7"
+ "\xc1\xc4\x21\x36\x22\x8d\x9a\x5d"
+ "\x1b\x1e\x4a\xc0\x89\xdd\x76\x16"
+ "\x5a\xce\xcd\x1e\x6a\x1f\xa0\x2b"
+ "\x83\xf6\x5e\x28\x8e\x65\xb5\x86"
+ "\x72\x8f\xc5\xf2\x54\x81\x10\x8d"
+ "\x63\x7b\x42\x7d\x06\x08\x16\xb3"
+ "\xb0\x60\x65\x41\x49\xdb\x0d\xc1"
+ "\xe2\xef\x72\x72\x06\xe7\x60\x5c"
+ "\x95\x1c\x7d\x52\xec\x82\xee\xd3"
+ "\x5b\xab\x61\xa4\x1f\x61\x64\x0c"
+ "\x28\x32\x21\x7a\x81\xe7\x81\xf3"
+ "\xdb\xc0\x18\xd9\xae\x0b\x3c\x9a"
+ "\x58\xec\x70\x4f\x40\x25\x2b\xba"
+ "\x96\x59\xac\x34\x45\x29\xc6\x57"
+ "\xc1\xc3\x93\x60\x77\x92\xbb\x83"
+ "\x8a\xa7\x72\x45\x2a\xc9\x35\xe7"
+ "\x66\xd6\xa9\xe9\x43\x87\x20\x11"
+ "\x6a\x2f\x87\xac\xe0\x93\x82\xe5"
+ "\x6c\x57\xa9\x4c\x9e\x56\x57\x33"
+ "\x1c\xd8\x7e\x25\x27\x41\x89\x97"
+ "\xea\xa5\x56\x02\x5b\x93\x13\x46"
+ "\xdc\x53\x3d\x95\xef\xaf\x9f\xf0"
+ "\x0a\x8a\xfe\x0c\xbf\xf0\x25\x5f"
+ "\xb4\x9f\x1b\x72\x9c\x37\xba\x46"
+ "\x4e\xcc\xcc\x02\x5c\xec\x3f\x98"
+ "\xff\x56\x1a\xc2\x7a\x65\x8f\xf6"
+ "\xd2\x81\x37\x7a\x0a\xfc\x79\xb9"
+ "\xcb\x8c\xc8\x1a\xd0\xba\x5d\x55"
+ "\xbc\x6d\x2e\xb2\x2f\x75\x29\x3f"
+ "\x1a\x4b\xa8\xd7\xe8\xf6\xf4\x2a"
+ "\xa5\xa1\x68\xec\xf3\xd5\xdd\x0f"
+ "\xad\x57\xae\x98\x83\xd5\x92\x4e"
+ "\x76\x86\x8e\x5e\x4b\x87\x7b\xf7"
+ "\x2d\x79\x3f\x12\x6a\x24\x58\xc8"
+ "\xab\x9a\x65\x75\x82\x6f\xa5\x39"
+ "\x72\xb0\xdf\x93\xb5\xa2\xf3\xdd"
+ "\x1f\x32\xfa\xdb\xfe\x1b\xbf\x0a"
+ "\xd9\x95\xdd\x02\xf1\x23\x54\xb1"
+ "\xa5\xbb\x24\x04\x5c\x2a\x97\x92"
+ "\xe6\xe0\x10\x61\xe3\x46\xc7\x0c"
+ "\xcb\xbc\x51\x9a\x35\x16\xd9\x42"
+ "\x62\xb3\x5e\xa4\x3c\x84\xa0\x7f"
+ "\xb8\x7f\x70\xd1\x8b\x03\xdf\x27"
+ "\x32\x06\x3f\x12\x23\x19\x22\x82"
+ "\x2d\x37\xa5\x00\x31\x9b\xa9\x21"
+ "\x8e\x34\x8c\x8e\x4f\xe8\xd4\x63"
+ "\x6c\xb2\xa9\x6e\xf6\x7c\x96\xf1"
+ "\x0e\x64\xab\x14\x3d\x8f\x74\xb3"
+ "\x35\x79\x84\x78\x06\x68\x97\x30"
+ "\xe0\x22\x55\xd6\xc5\x5b\x38\xb2"
+ "\x75\x24\x0c\x52\xb6\x57\xcc\x0a"
+ "\xbd\x3c\xd0\x73\x47\xd1\x25\xd6"
+ "\x1c\xfd\x27\x05\x3f\x70\xe1\xa7"
+ "\x69\x3b\xee\xc9\x9f\xfd\x2a\x7e"
+ "\xab\x58\xe6\x0b\x35\x5e\x52\xf9"
+ "\xff\xac\x5b\x82\x88\xa7\x65\xbc"
+ "\x61\x29\xdc\xa1\x94\x42\xd1\xd3"
+ "\xa0\xd8\xba\x3b\x49\xc8\xa7\xce"
+ "\x01\x6c\xb7\x3f\xe3\x98\x4d\xd1"
+ "\x9f\x46\x0d\xb3\xf2\x43\x33\x49"
+ "\xb7\x27\xbd\xba\xcc\x3f\x09\x56"
+ "\xfa\x64\x18\xb8\x17\x28\xde\x0d"
+ "\x29\xfa\x1f\xad\x60\x3b\x90\xa7"
+ "\x05\x9f\x4c\xc4\xdc\x05\x3b\x17"
+ "\x58\xea\x99\xfd\x6b\x8a\x93\x77"
+ "\xa5\x44\xbd\x8d\x29\x44\x29\x89"
+ "\x52\x1d\x89\x8b\x44\x8f\xb9\x68"
+ "\xeb\x93\xfd\x92\xd9\x14\x35\x9c"
+ "\x28\x3a\x9f\x1d\xd8\xe0\x2a\x76"
+ "\x51\xc1\xf0\xa9\x1d\xb4\xf8\xb9"
+ "\xfc\x14\x78\x5a\xa2\xb1\xdb\x94"
+ "\xcb\x18\xb9\x34\xbd\x0c\x65\x1d"
+ "\x64\xde\xd0\x3a\xe4\x68\x0e\xbc"
+ "\x13\xa7\x47\x89\x62\xa3\x03\x19"
+ "\x64\xa1\x02\x27\x3a\x8d\x43\xfa"
+ "\x68\xff\xda\x8b\x40\xe9\x19\x8b"
+ "\x56\xbe\x1c\x9b\xe6\xf6\x3f\x60"
+ "\xdb\x7a\xd5\xab\x82\xd8\xd9\x99"
+ "\xe3\x5b\x0c\x0c\x69\x18\x5c\xed"
+ "\x03\xf9\xc1\x61\xc4\x7b\xd4\x90"
+ "\x43\xc3\x39\xec\xac\xcb\x1f\x4b"
+ "\x23\xf8\xa9\x98\x2f\xf6\x48\x90"
+ "\x6c\x2b\x94\xad\x14\xdd\xcc\xa2"
+ "\x3d\xc7\x86\x0f\x7f\x1c\x0b\x93"
+ "\x4b\x74\x1f\x80\x75\xb4\x91\xdf"
+ "\xa8\x26\xf9\x06\x2b\x3a\x2c\xfd"
+ "\x3c\x31\x40\x1e\x5b\xa6\x86\x01"
+ "\xc4\xa2\x80\x4f\xf5\xa2\xf4\xff"
+ "\xf6\x07\x8c\x92\xf7\x74\xbd\x42"
+ "\xb0\x3f\x6b\x05\xca\x40\xeb\x04"
+ "\x20\xa9\x37\x78\x32\x03\x60\xcc"
+ "\xf3\xec\xb2\x2d\xb5\x80\x7c\xe4"
+ "\x37\x53\x25\xd1\xe8\x91\x6a\xe5"
+ "\xdf\xdd\xb0\xab\x69\xc7\xa1\xb2"
+ "\xfc\xb3\xd1\x9e\xda\xa8\x0d\x68"
+ "\xfe\x7d\xdc\x56\x33\x65\x99\xd2"
+ "\xec\xa5\xa0\xa1\x26\xc9\xec\xbd"
+ "\x22\x20\x5e\x0d\xcb\x93\x64\x7a"
+ "\x56\x75\xed\xe5\x45\xa2\xbd\x16"
+ "\x59\xf7\x43\xd9\x5b\x2c\xdd\xb6"
+ "\x1d\xa8\x05\x89\x2f\x65\x2e\x66"
+ "\xfe\xad\x93\xeb\x85\x8f\xe8\x4c"
+ "\x00\x44\x71\x03\x0e\x26\xaf\xfd"
+ "\xfa\x56\x0f\xdc\x9c\xf3\x2e\xab"
+ "\x88\x26\x61\xc6\x13\xfe\xba\xc1"
+ "\xd8\x8a\x38\xc3\xb6\x4e\x6d\x80"
+ "\x4c\x65\x93\x2f\xf5\x54\xff\x63"
+ "\xbe\xdf\x9a\xe3\x4f\xca\xc9\x71"
+ "\x12\xab\x95\x66\xec\x09\x64\xea"
+ "\xdc\x9f\x01\x61\x24\x88\xd1\xa7"
+ "\xd0\x69\x26\xf0\x80\xb0\xec\x86"
+ "\xc2\x58\x2f\x6a\xc5\xfd\xfc\x2a"
+ "\xf6\x3e\x23\x77\x3b\x7e\xc5\xc5"
+ "\xe7\xf9\x4d\xcc\x68\x53\x11\xc8"
+ "\x5b\x44\xbd\x48\x0f\xb3\x35\x1a"
+ "\x93\x4a\x80\x16\xa3\x0d\x50\x85"
+ "\xa6\xc4\xd4\x74\x4d\x87\x59\x51"
+ "\xd7\xf7\x7d\xee\xd0\x9b\xd1\x83"
+ "\x25\x2b\xc6\x39\x27\x6a\xb3\x41"
+ "\x5f\xd2\x24\xd4\xd6\xfa\x8c\x3e"
+ "\xb2\xf9\x11\x71\x7a\x9e\x5e\x7b"
+ "\x5b\x9a\x47\x80\xca\x1c\xbe\x04"
+ "\x5d\x34\xc4\xa2\x2d\x41\xfe\x73"
+ "\x53\x15\x9f\xdb\xe7\x7d\x82\x19"
+ "\x21\x1b\x67\x2a\x74\x7a\x21\x4a"
+ "\xc4\x96\x6f\x00\x92\x69\xf1\x99"
+ "\x50\xf1\x4a\x16\x11\xf1\x16\x51",
+ .ctext = "\x57\xd1\xcf\x26\xe5\x07\x7a\x3f"
+ "\xa5\x5e\xd4\xa8\x12\xe9\x4e\x36"
+ "\x9c\x28\x65\xe0\xbd\xef\xf1\x49"
+ "\x04\xd4\xd4\x01\x4d\xf5\xfc\x2a"
+ "\x32\xd8\x19\x21\xcd\x58\x2a\x1a"
+ "\x43\x78\xa4\x57\x69\xa0\x52\xeb"
+ "\xcd\xa5\x9c\x4d\x03\x28\xef\x8b"
+ "\x54\xc6\x6c\x31\xab\x3e\xaf\x6d"
+ "\x0a\x87\x83\x3d\xb7\xea\x6b\x3d"
+ "\x11\x58\x7d\x5f\xaf\xc9\xfc\x50"
+ "\x58\x9a\x84\xa1\xcf\x76\xdc\x77"
+ "\x83\x9a\x28\x74\x69\xc9\x0c\xc2"
+ "\x7b\x1e\x4e\xe4\x25\x41\x23\x0d"
+ "\x4e\x0e\x2d\x7a\x87\xaa\x0f\x7c"
+ "\x98\xad\xf0\x6f\xbf\xcb\xd5\x1a"
+ "\x3e\xcf\x0e\xc5\xde\xbd\x8d\xf1"
+ "\xaa\x19\x16\xb8\xc5\x25\x02\x33"
+ "\xbd\x5a\x85\xe2\xc0\x77\x71\xda"
+ "\x12\x4c\xdf\x7f\xce\xc0\x32\x95"
+ "\x1a\xde\xcb\x0a\x70\xd0\x9e\x89"
+ "\xc5\x97\x18\x04\xab\x8c\x38\x56"
+ "\x69\xe5\xf6\xa5\x76\x2c\x52\x7a"
+ "\x49\xd2\x9a\x95\xa6\xa8\x82\x42"
+ "\x20\x1f\x58\x57\x4e\x22\xdb\x92"
+ "\xec\xbd\x4a\x21\x66\x9b\x7a\xcb"
+ "\x73\xcd\x6d\x15\x07\xc9\x97\xb8"
+ "\x11\x35\xee\x29\xa4\x90\xfc\x46"
+ "\x0f\x39\x56\xc6\x4a\x3a\xcf\xcc"
+ "\xb1\xbf\x62\x1c\x16\xc5\x12\x6c"
+ "\x0e\x69\x89\xce\xcf\x11\x4e\xe5"
+ "\x7e\x4e\x7c\x8f\xb4\xc9\xe6\x54"
+ "\x42\x89\x28\x27\xe6\xec\x50\xb7"
+ "\x69\x91\x44\x3e\x46\xd4\x64\xf6"
+ "\x25\x4c\x4d\x2f\x60\xd9\x9a\xd3"
+ "\x1c\x70\xf4\xd8\x24\x1e\xdb\xcf"
+ "\xa8\xc0\x22\xe6\x82\x57\xf6\xf0"
+ "\xe1\x1e\x38\x66\xec\xdc\x20\xdb"
+ "\x6a\x57\x68\xb1\x43\x61\xe1\x12"
+ "\x18\x5f\x31\x57\x39\xcb\xea\x3c"
+ "\x6e\x5d\x9a\xe0\xa6\x70\x4d\xd8"
+ "\xf9\x47\x4e\xef\x31\xa5\x66\x9b"
+ "\xb7\xf1\xd9\x59\x85\xfc\xdb\x7e"
+ "\xa2\x7a\x70\x25\x0c\xfd\x18\x0d"
+ "\x00\x42\xc9\x48\x8a\xbd\x74\xc5"
+ "\x3e\xe1\x20\x5a\x5d\x2e\xe5\x32"
+ "\x1d\x1c\x08\x65\x80\x69\xae\x24"
+ "\x80\xde\xb6\xdf\x97\xaa\x42\x8d"
+ "\xce\x39\x07\xe6\x69\x94\x5a\x75"
+ "\x39\xda\x5e\x1a\xed\x4a\x4c\x23"
+ "\x66\x1f\xf3\xb1\x6e\x8f\x21\x94"
+ "\x45\xc4\x63\xbd\x06\x93\x5e\x30"
+ "\xe7\x8f\xcb\xe0\xbb\x2a\x27\xcf"
+ "\x57\xa9\xa6\x28\xaf\xae\xcb\xa5"
+ "\x7b\x36\x61\x77\x3a\x4f\xec\x51"
+ "\x71\xfd\x52\x9e\x32\x7b\x98\x09"
+ "\xae\x27\xbc\x93\x96\xab\xb6\x02"
+ "\xf7\x21\xd3\x42\x00\x7e\x7a\x92"
+ "\x17\xfe\x1b\x3d\xcf\xb6\xfe\x1e"
+ "\x40\xc3\x10\x25\xac\x22\x9e\xcc"
+ "\xc2\x02\x61\xf5\x0a\x4b\xc3\xec"
+ "\xb1\x44\x06\x05\xb8\xd6\xcb\xd5"
+ "\xf1\xf5\xb5\x65\xbc\x1a\x19\xa2"
+ "\x7d\x60\x87\x11\x06\x83\x25\xe3"
+ "\x5e\xf0\xeb\x15\x93\xb6\x8e\xab"
+ "\x49\x52\xe8\xdb\xde\xd1\x8e\xa2"
+ "\x3a\x64\x13\x30\xaa\x20\xaf\x81"
+ "\x8d\x3c\x24\x2a\x76\x6d\xca\x32"
+ "\x63\x51\x6b\x8e\x4b\xa7\xf6\xad"
+ "\xa5\x94\x16\x82\xa6\x97\x3b\xe5"
+ "\x41\xcd\x87\x33\xdc\xc1\x48\xca"
+ "\x4e\xa2\x82\xad\x8e\x1b\xae\xcb"
+ "\x12\x93\x27\xa3\x2b\xfa\xe6\x26"
+ "\x43\xbd\xb0\x00\x01\x22\x1d\xd3"
+ "\x28\x9d\x69\xe0\xd4\xf8\x5b\x01"
+ "\x40\x7d\x54\xe5\xe2\xbd\x78\x5a"
+ "\x0e\xab\x51\xfc\xd4\xde\xba\xbc"
+ "\xa4\x7a\x74\x6d\xf8\x36\xc2\x70"
+ "\x03\x27\x36\xa2\xc0\xde\xf2\xc7"
+ "\x55\xd4\x66\xee\x9a\x9e\xaa\x99"
+ "\x2b\xeb\xa2\x6f\x17\x80\x60\x64"
+ "\xed\x73\xdb\xc1\x70\xda\xde\x67"
+ "\xcd\x6e\xc9\xfa\x3f\xef\x49\xd9"
+ "\x18\x42\xf1\x87\x6e\x2c\xac\xe1"
+ "\x12\x26\x52\xbe\x3e\xf1\xcc\x85"
+ "\x9a\xd1\x9e\xc1\x02\xd3\xca\x2b"
+ "\x99\xe7\xe8\x95\x7f\x91\x4b\xc0"
+ "\xab\xd4\x5a\xf7\x88\x1c\x7e\xea"
+ "\xd3\x15\x38\x26\xb5\xa3\xf2\xfc"
+ "\xc4\x12\x70\x5a\x37\x83\x49\xac"
+ "\xf4\x5e\x4c\xc8\x64\x03\x98\xad"
+ "\xd2\xbb\x8d\x90\x01\x80\xa1\x2a"
+ "\x23\xd1\x8d\x26\x43\x7d\x2b\xd0"
+ "\x87\xe1\x8e\x6a\xb3\x73\x9d\xc2"
+ "\x66\x75\xee\x2b\x41\x1a\xa0\x3b"
+ "\x1b\xdd\xb9\x21\x69\x5c\xef\x52"
+ "\x21\x57\xd6\x53\x31\x67\x7e\xd1"
+ "\xd0\x67\x8b\xc0\x97\x2c\x0a\x09"
+ "\x1d\xd4\x35\xc5\xd4\x11\x68\xf8"
+ "\x5e\x75\xaf\x0c\xc3\x9d\xa7\x09"
+ "\x38\xf5\x77\xb9\x80\xa9\x6b\xbd"
+ "\x0c\x98\xb4\x8d\xf0\x35\x5a\x19"
+ "\x1d\xf8\xb3\x5b\x45\xad\x4e\x4e"
+ "\xd5\x59\xf5\xd7\x53\x63\x3e\x97"
+ "\x7f\x91\x50\x65\x61\x21\xa9\xb7"
+ "\x65\x12\xdc\x01\x56\x40\xe0\xb1"
+ "\xe1\x23\xba\x9d\xb9\xc4\x8b\x1f"
+ "\xa6\xfe\x24\x19\xe9\x42\x9f\x9b"
+ "\x02\x48\xaa\x60\x0b\xf5\x7f\x8f"
+ "\x35\x70\xed\x85\xb8\xc4\xdc\xb7"
+ "\x16\xb7\x03\xe0\x2e\xa0\x25\xab"
+ "\x02\x1f\x97\x8e\x5a\x48\xb6\xdb"
+ "\x25\x7a\x16\xf6\x4c\xec\xec\xa6"
+ "\xc1\x4e\xe3\x4e\xe3\x27\x78\xc8"
+ "\xb6\xd7\x01\x61\x98\x1b\x38\xaa"
+ "\x36\x93\xac\x6d\x05\x61\x4d\x5a"
+ "\xc9\xe5\x27\xa9\x22\xf2\x38\x5e"
+ "\x9e\xe5\xf7\x4a\x64\xd2\x14\x15"
+ "\x71\x7c\x65\x6e\x90\x31\xc7\x49"
+ "\x25\xec\x9f\xf1\xb2\xd6\xbc\x20"
+ "\x6a\x13\xd5\x70\x65\xfc\x8b\x66"
+ "\x2c\xf1\x57\xc2\xe7\xb8\x89\xf7"
+ "\x17\xb2\x45\x64\xe0\xb3\x8c\x0d"
+ "\x69\x57\xf9\x5c\xff\xc2\x3c\x18"
+ "\x1e\xfd\x4b\x5e\x0d\x20\x01\x1a"
+ "\xa3\xa3\xb3\x76\x98\x9c\x92\x41"
+ "\xb4\xcd\x9f\x8f\x88\xcb\xb1\xb5"
+ "\x25\x87\x45\x4c\x07\xa7\x15\x99"
+ "\x24\x85\x15\x9e\xfc\x28\x98\x2b"
+ "\xd0\x22\x0a\xcc\x62\x12\x86\x0a"
+ "\xa8\x0e\x7d\x15\x32\x98\xae\x2d"
+ "\x95\x25\x55\x33\x41\x5b\x8d\x75"
+ "\x46\x61\x01\xa4\xfb\xf8\x6e\xe5"
+ "\xec\x24\xfe\xd2\xd2\x46\xe2\x3a"
+ "\x77\xf3\xa1\x39\xd3\x39\x32\xd8"
+ "\x2a\x6b\x44\xd7\x70\x36\x23\x89"
+ "\x4f\x75\x85\x42\x70\xd4\x2d\x4f"
+ "\xea\xfc\xc9\xfe\xb4\x86\xd8\x73"
+ "\x1d\xeb\xf7\x54\x0a\x47\x7e\x2c"
+ "\x04\x7b\x47\xea\x52\x8f\x13\x1a"
+ "\xf0\x19\x65\xe2\x0a\x1c\xae\x89"
+ "\xe1\xc5\x87\x6e\x5d\x7f\xf8\x79"
+ "\x08\xbf\xd2\x7f\x2c\x95\x22\xba"
+ "\x32\x78\xa9\xf6\x03\x98\x18\xed"
+ "\x15\xbf\x49\xb0\x6c\xa1\x4b\xb0"
+ "\xf3\x17\xd5\x35\x5d\x19\x57\x5b"
+ "\xf1\x07\x1e\xaa\x4d\xef\xd0\xd6"
+ "\x72\x12\x6b\xd9\xbc\x10\x49\xc5"
+ "\x28\xd4\xec\xe9\x8a\xb1\x6d\x50"
+ "\x4b\xf3\x44\xb8\x49\x04\x62\xe9"
+ "\xa4\xd8\x5a\xe7\x90\x02\xb7\x1e"
+ "\x66\x89\xbc\x5a\x71\x4e\xbd\xf8"
+ "\x18\xfb\x34\x2f\x67\xa2\x65\x71"
+ "\x00\x63\x22\xef\x3a\xa5\x18\x0e"
+ "\x54\x76\xaa\x58\xae\x87\x23\x93"
+ "\xb0\x3c\xa2\xa4\x07\x77\x3e\xd7"
+ "\x1a\x9c\xfe\x32\xc3\x54\x04\x4e"
+ "\xd6\x98\x44\xda\x98\xf8\xd3\xc8"
+ "\x1c\x07\x4b\xcd\x97\x5d\x96\x95"
+ "\x9a\x1d\x4a\xfc\x19\xcb\x0b\xd0"
+ "\x6d\x43\x3a\x9a\x39\x1c\xa8\x90"
+ "\x9f\x53\x8b\xc4\x41\x75\xb5\xb9"
+ "\x91\x5f\x02\x0a\x57\x6c\x8f\xc3"
+ "\x1b\x0b\x3a\x8b\x58\x3b\xbe\x2e"
+ "\xdc\x4c\x23\x71\x2e\x14\x06\x21"
+ "\x0b\x3b\x58\xb8\x97\xd1\x00\x62"
+ "\x2e\x74\x3e\x6e\x21\x8a\xcf\x60"
+ "\xda\x0c\xf8\x7c\xfd\x07\x55\x7f"
+ "\xb9\x1d\xda\x34\xc7\x27\xbf\x2a"
+ "\xd9\xba\x41\x9b\x37\xa1\xc4\x5d"
+ "\x03\x01\xce\xbb\x58\xff\xee\x74"
+ "\x08\xbd\x0b\x80\xb1\xd5\xf8\xb5"
+ "\x92\xf9\xbb\xbe\x03\xb5\xec\xbe"
+ "\x17\xee\xd7\x4e\x87\x2b\x61\x1b"
+ "\x27\xc3\x51\x50\xa0\x02\x73\x00"
+ "\x1a\xea\x2a\x2b\xf8\xf6\xe6\x96"
+ "\x75\x00\x56\xcc\xcb\x7a\x24\x29"
+ "\xe8\xdb\x95\xbf\x4e\x8f\x0a\x78"
+ "\xb8\xeb\x5a\x90\x37\xd0\x21\x94"
+ "\x6a\x89\x6b\x41\x3a\x1b\xa7\x20"
+ "\x43\x37\xda\xad\x81\xdd\xb4\xfc"
+ "\xe9\x60\x82\x77\x44\x3f\x89\x23"
+ "\x35\x04\x8f\xa1\xe8\xc0\xb6\x9f"
+ "\x56\xa7\x86\x3d\x65\x9c\x57\xbb"
+ "\x27\xdb\xe1\xb2\x13\x07\x9c\xb1"
+ "\x60\x8b\x38\x6b\x7f\x24\x28\x14"
+ "\xfe\xbf\xc0\xda\x61\x6e\xc2\xc7"
+ "\x63\x36\xa8\x02\x54\x93\xb0\xba"
+ "\xbd\x4d\x29\x14\x5a\x8b\xbc\x78"
+ "\xb3\xa6\xc5\x15\x5d\x36\x4d\x38"
+ "\x20\x9c\x1e\x98\x2e\x16\x89\x33"
+ "\x66\xa2\x54\x57\xcc\xde\x12\xa6"
+ "\x3b\x44\xf1\xac\x36\x3b\x97\xc1"
+ "\x96\x94\xf2\x67\x57\x23\x9c\x29"
+ "\xcd\xb7\x24\x2a\x8c\x86\xee\xaa"
+ "\x0f\xee\xaf\xa0\xec\x40\x8c\x08"
+ "\x18\xa1\xb4\x2c\x09\x46\x11\x7e"
+ "\x97\x84\xb1\x03\xa5\x3e\x59\x05"
+ "\x07\xc5\xf0\xcc\xb6\x71\x72\x2a"
+ "\xa2\x02\x78\x60\x0b\xc4\x47\x93"
+ "\xab\xcd\x67\x2b\xf5\xc5\x67\xa0"
+ "\xc0\x3c\x6a\xd4\x7e\xc9\x93\x0c"
+ "\x02\xdc\x15\x87\x48\x16\x26\x18"
+ "\x4e\x0b\x16\x0e\xb3\x02\x3e\x4b"
+ "\xc2\xe4\x49\x08\x9f\xb9\x8b\x1a"
+ "\xca\x10\xe8\x6c\x58\xa9\x7e\xb8"
+ "\xbe\xff\x58\x0e\x8a\xfb\x35\x93"
+ "\xcc\x76\x7d\xd9\x44\x7c\x31\x96"
+ "\xc0\x29\x73\xd3\x91\x0a\xc0\x65"
+ "\x5c\xbe\xe7\x4e\xda\x31\x85\xf2"
+ "\x72\xee\x34\xbe\x41\x90\xd4\x07"
+ "\x50\x64\x56\x81\xe3\x27\xfb\xcc"
+ "\xb7\x5c\x36\xb4\x6e\xbd\x23\xf8"
+ "\xe8\x71\xce\xa8\x73\x77\x82\x74"
+ "\xab\x8d\x0e\xe5\x93\x68\xb1\xd2"
+ "\x51\xc2\x18\x58\xd5\x3f\x29\x6b"
+ "\x2e\xd0\x88\x7f\x4a\x9d\xa2\xb8"
+ "\xae\x96\x09\xbf\x47\xae\x7d\x12"
+ "\x70\x67\xf1\xdd\xda\xdf\x47\x57"
+ "\xc9\x2c\x0f\xcb\xf3\x57\xd4\xda"
+ "\x00\x2e\x13\x48\x8f\xc0\xaa\x46"
+ "\xe1\xc1\x57\x75\x1e\xce\x74\xc2"
+ "\x82\xef\x31\x85\x8e\x38\x56\xff"
+ "\xcb\xab\xe0\x78\x40\x51\xd3\xc5"
+ "\xc3\xb1\xee\x9b\xd7\x72\x7f\x13"
+ "\x83\x7f\x45\x49\x45\xa1\x05\x8e"
+ "\xdc\x83\x81\x3c\x24\x28\x87\x08"
+ "\xa0\x70\x73\x80\x42\xcf\x5c\x26"
+ "\x39\xa5\xc5\x90\x5c\x56\xda\x58"
+ "\x93\x45\x5d\x45\x64\x59\x16\x3f"
+ "\xf1\x20\xf7\xa8\x2a\xd4\x3d\xbd"
+ "\x17\xfb\x90\x01\xcf\x1e\x71\xab"
+ "\x22\xa2\x24\xb5\x80\xac\xa2\x9a"
+ "\x9c\x2d\x85\x69\xa7\x87\x33\x55"
+ "\x65\x72\xc0\x91\x2a\x3d\x05\x33"
+ "\x25\x0d\x29\x25\x9f\x45\x4e\xfa"
+ "\x5d\x90\x3f\x34\x08\x54\xdb\x7d"
+ "\x94\x20\xa2\x3b\x10\x01\xa4\x89"
+ "\x1e\x90\x4f\x36\x3f\xc2\x40\x07"
+ "\x3f\xab\x2e\x89\xce\x80\xe1\xf5"
+ "\xac\xaf\x17\x10\x18\x0f\x4d\xe3"
+ "\xfc\x82\x2b\xbe\xe2\x91\xfa\x5b"
+ "\x9a\x9b\x2a\xd7\x99\x8d\x8f\xdc"
+ "\x54\x99\xc4\xa3\x97\xfd\xd3\xdb"
+ "\xd1\x51\x7c\xce\x13\x5c\x3b\x74"
+ "\xda\x9a\xe3\xdc\xdc\x87\x84\x98"
+ "\x16\x6d\xb0\x3d\x65\x57\x0b\xb2"
+ "\xb8\x04\xd4\xea\x49\x72\xc3\x66"
+ "\xbc\xdc\x91\x05\x2b\xa6\x5e\xeb"
+ "\x55\x72\x3e\x34\xd4\x28\x4b\x9c"
+ "\x07\x51\xf7\x30\xf3\xca\x04\xc1"
+ "\xd3\x69\x50\x2c\x27\x27\xc4\xb9"
+ "\x56\xc7\xa2\xd2\x66\x29\xea\xe0"
+ "\x25\xb8\x49\xd1\x60\xc9\x5e\xb5"
+ "\xed\x87\xb8\x74\x98\x0d\x16\x86"
+ "\x2a\x02\x24\xde\xb9\xa9\x5e\xf0"
+ "\xdd\xf7\x55\xb0\x26\x7a\x93\xd4"
+ "\xe6\x7d\xd2\x43\xb2\x8f\x7e\x9a"
+ "\x5d\x81\xe6\x28\xe5\x96\x7d\xc8"
+ "\x33\xe0\x56\x57\xe2\xa0\xf2\x1d"
+ "\x61\x78\x60\xd5\x81\x70\xa4\x11"
+ "\x43\x36\xe9\xd1\x68\x27\x21\x3c"
+ "\xb2\xa2\xad\x5f\x04\xd4\x55\x00"
+ "\x25\x71\x91\xed\x3a\xc9\x7b\x57"
+ "\x7b\xd1\x8a\xfb\x0e\xf5\x7b\x08"
+ "\xa9\x26\x4f\x24\x5f\xdd\x79\xed"
+ "\x19\xc4\xe1\xd5\xa8\x66\x60\xfc"
+ "\x5d\x48\x11\xb0\xa3\xc3\xe6\xc0"
+ "\xc6\x16\x7d\x20\x3f\x7c\x25\x52"
+ "\xdf\x05\xdd\xb5\x0b\x92\xee\xc5"
+ "\xe6\xd2\x7c\x3e\x2e\xd5\xac\xda"
+ "\xdb\x48\x31\xac\x87\x13\x8c\xfa"
+ "\xac\x18\xbc\xd1\x7f\x2d\xc6\x19"
+ "\x8a\xfa\xa0\x97\x89\x26\x50\x46"
+ "\x9c\xca\xe1\x73\x97\x26\x0a\x50"
+ "\x95\xec\x79\x19\xf6\xbd\x9a\xa1"
+ "\xcf\xc9\xab\xf7\x85\x84\xb2\xf5"
+ "\x2c\x7c\x73\xaa\xe2\xc2\xfb\xcd"
+ "\x5f\x08\x46\x2f\x8e\xd9\xff\xfd"
+ "\x19\xf6\xf4\x5d\x2b\x4b\x54\xe2"
+ "\x27\xaa\xfd\x2c\x5f\x75\x7c\xf6"
+ "\x2c\x95\x77\xcc\x90\xa2\xda\x1e"
+ "\x85\x37\x18\x34\x1d\xcf\x1b\xf2"
+ "\x86\xda\x71\xfb\x72\xab\x87\x0f"
+ "\x1e\x10\xb3\xba\x51\xea\x29\xd3"
+ "\x8c\x87\xce\x4b\x66\xbf\x60\x6d"
+ "\x81\x7c\xb8\x9c\xcc\x2e\x35\x02"
+ "\x02\x32\x4a\x7a\x24\xc4\x9f\xce"
+ "\xf0\x8a\x85\x90\xf3\x24\x95\x02"
+ "\xec\x13\xc1\xa4\xdd\x44\x01\xef"
+ "\xf6\xaa\x30\x70\xbf\x4e\x1a\xb9"
+ "\xc0\xff\x3b\x57\x5d\x12\xfe\xc3"
+ "\x1d\x5c\x3f\x74\xf9\xd9\x64\x61"
+ "\x20\xb2\x76\x79\x38\xd2\x21\xfb"
+ "\xc9\x32\xe8\xcc\x8e\x5f\xd7\x01"
+ "\x9e\x25\x76\x4d\xa7\xc1\x33\x21"
+ "\xfa\xcf\x98\x40\xd2\x1d\x48\xbd"
+ "\xd0\xc0\x38\x90\x27\x9b\x89\x4a"
+ "\x10\x1e\xaf\xa0\x78\x7d\x87\x2b"
+ "\x72\x10\x02\xf0\x5d\x22\x8b\x22"
+ "\xd7\x56\x7c\xd7\x6d\xcd\x9b\xc6"
+ "\xbc\xb2\xa6\x36\xde\xac\x87\x14"
+ "\x92\x93\x47\xca\x7d\xf4\x0b\x88"
+ "\xea\xbf\x3f\x2f\xa9\x94\x24\x13"
+ "\xa1\x52\x29\xfd\x5d\xa9\x76\x85"
+ "\x21\x62\x39\xa3\xf0\xf7\xb5\xa3"
+ "\xe0\x6c\x1b\xcb\xdb\x41\x91\xc6"
+ "\x4f\xaa\x26\x8b\x15\xd5\x84\x3a"
+ "\xda\xd6\x05\xc8\x8c\x0f\xe9\x19"
+ "\x00\x81\x38\xfb\x8f\xdf\xb0\x63"
+ "\x75\xe0\xe8\x8f\xef\x4a\xe0\x83"
+ "\x34\xe9\x4e\x06\xd7\xbb\xcd\xed"
+ "\x70\x0c\x72\x80\x64\x94\x67\xad"
+ "\x4a\xda\x82\xcf\x60\xfc\x92\x43"
+ "\xe3\x2f\xd1\x1e\x81\x1d\xdc\x62"
+ "\xec\xb1\xb0\xad\x4f\x43\x1d\x38"
+ "\x4e\x0d\x90\x40\x29\x1b\x98\xf1"
+ "\xbc\x70\x4e\x5a\x08\xbe\x88\x3a"
+ "\x55\xfb\x8c\x33\x1f\x0a\x7d\x2d"
+ "\xdc\x75\x03\xd2\x3b\xe8\xb8\x32"
+ "\x13\xab\x04\xbc\xe2\x33\x44\xa6"
+ "\xff\x6e\xba\xbd\xdc\xe2\xbf\x54"
+ "\x99\x71\x76\x59\x3b\x7a\xbc\xde"
+ "\xa1\x6e\x73\x62\x96\x73\x56\x66"
+ "\xfb\x1a\x56\x91\x2a\x8b\x12\xb0"
+ "\x82\x9f\x9b\x0c\x42\xc7\x22\x2c"
+ "\xbc\x49\xc5\x3c\x3b\xbf\x52\x64"
+ "\xd6\xd4\x03\x52\xf3\xfd\x13\x98"
+ "\xcc\xd8\xaa\x3e\x1d\x1f\x04\x8a"
+ "\x03\x41\x19\x5b\x31\xf3\x48\x83"
+ "\x49\xa3\xdd\xc9\x7c\x01\x34\x64"
+ "\xe5\xf3\xdf\xc9\x7f\x17\xa2\xf5"
+ "\x9c\x21\x79\x93\x91\x93\xbf\x9b"
+ "\xa5\xa5\xda\x1d\x55\x32\x72\x78"
+ "\xa6\x45\x2d\x21\x97\x6b\xfe\xbc"
+ "\xd0\xe7\x8e\x97\x66\x85\x9e\x41"
+ "\xfa\x2c\x8a\xee\x0d\x5a\x18\xf2"
+ "\x15\x89\x8f\xfb\xbc\xd8\xa6\x0c"
+ "\x83\xcc\x20\x08\xce\x70\xe5\xe6"
+ "\xbb\x7d\x9f\x11\x5f\x1e\x16\x68"
+ "\x18\xad\xa9\x4b\x04\x97\x8c\x18"
+ "\xed\x2a\x70\x79\x39\xcf\x36\x72"
+ "\x1e\x3e\x6d\x3c\x19\xce\x13\x19"
+ "\xb5\x13\xe7\x02\xd8\x5c\xec\x0c"
+ "\x81\xc5\xe5\x86\x10\x83\x9e\x67"
+ "\x3b\x74\x29\x63\xda\x23\xbc\x43"
+ "\xe9\x73\xa6\x2d\x25\x77\x66\xd0"
+ "\x2e\x05\x38\xae\x2e\x0e\x7f\xaf"
+ "\x82\xed\xef\x28\x39\x4c\x4b\x6f"
+ "\xdb\xa1\xb5\x79\xd0\x5b\x50\x77"
+ "\x6d\x75\x9f\x3c\xcf\xde\x41\xb8"
+ "\xa9\x13\x11\x60\x19\x23\xc7\x35"
+ "\x48\xbc\x14\x08\xf9\x57\xfe\x15"
+ "\xfd\xb2\xbb\x8c\x44\x3b\xf1\x62"
+ "\xbc\x0e\x01\x45\x39\xc0\xbb\xce"
+ "\xf5\xb7\xe1\x16\x7b\xcc\x8d\x7f"
+ "\xd3\x15\x36\xef\x8e\x4b\xaa\xee"
+ "\x49\x0c\x6e\x9b\x8c\x0e\x9f\xe0"
+ "\xd5\x7b\xdd\xbc\xb3\x67\x53\x6d"
+ "\x8b\xbe\xa3\xcd\x1e\x37\x9d\xc3"
+ "\x61\x36\xf4\x77\xec\x2b\xc7\x8b"
+ "\xd7\xad\x8d\x23\xdd\xf7\x9d\xf1"
+ "\x61\x1c\xbf\x09\xa5\x5e\xb9\x14"
+ "\xa6\x3f\x1a\xd9\x12\xb4\xef\x56"
+ "\x20\xa0\x77\x3e\xab\xf1\xb9\x91"
+ "\x5a\x92\x85\x5c\x92\x15\xb2\x1f"
+ "\xaf\xb0\x92\x23\x2d\x27\x8b\x7e"
+ "\x12\xcc\x56\xaa\x62\x85\x15\xd7"
+ "\x41\x89\x62\xd6\xd9\xd0\x6d\xbd"
+ "\x21\xa8\x49\xb6\x35\x40\x2f\x8d"
+ "\x2e\xfa\x24\x1e\x30\x12\x9c\x05"
+ "\x59\xfa\xe1\xad\xc0\x53\x09\xda"
+ "\xc0\x2e\x9d\x24\x0e\x4b\x6e\xd7"
+ "\x68\x32\x6a\xa0\x3c\x23\xb6\x5a"
+ "\x90\xb1\x1f\x62\xc8\x37\x36\x88"
+ "\xa4\x4d\x91\x12\x8d\x51\x8d\x81"
+ "\x44\x21\xfe\xd3\x61\x8d\xea\x5b"
+ "\x87\x24\xa9\xe9\x87\xde\x75\x77"
+ "\xc6\xa0\xd3\xf6\x99\x8b\x32\x56"
+ "\x47\xc6\x60\x65\xb6\x4f\xd1\x59"
+ "\x08\xb2\xe0\x15\x3e\xcb\x2c\xd6"
+ "\x8d\xc6\xbf\xda\x63\xe2\x04\x88"
+ "\x30\x9f\x37\x38\x98\x1c\x3e\x7a"
+ "\xa8\x8f\x3e\x2c\xcf\x90\x15\x6e"
+ "\x5d\xe9\x76\xd5\xdf\xc6\x2f\xf6"
+ "\xf5\x4a\x86\xbd\x36\x2a\xda\xdf"
+ "\x2f\xd8\x6e\x15\x18\x6b\xe9\xdb"
+ "\x26\x54\x6e\x60\x3b\xb8\xf9\x91"
+ "\xc1\x1d\xc0\x4f\x26\x8b\xdf\x55"
+ "\x47\x2f\xce\xdd\x4e\x93\x58\x3f"
+ "\x70\xdc\xf9\x4e\x9b\x37\x5e\x4f"
+ "\x39\xb9\x30\xe6\xce\xdb\xaf\x46"
+ "\xca\xfa\x52\xc9\x75\x3e\xd6\x96"
+ "\xe8\x97\xf1\xb1\x64\x31\x71\x1e"
+ "\x9f\xb6\xff\x69\xd6\xcd\x85\x4e"
+ "\x20\xf5\xfc\x84\x3c\xaf\xcc\x8d"
+ "\x5b\x52\xb8\xa2\x1c\x38\x47\x82"
+ "\x96\xff\x06\x4c\xaf\x8a\xf4\x8f"
+ "\xf8\x15\x97\xf6\xc3\xbc\x8c\x9e"
+ "\xc2\x06\xd9\x64\xb8\x1b\x0d\xd1"
+ "\x53\x55\x83\x7d\xcb\x8b\x7d\x20"
+ "\xa7\x70\xcb\xaa\x25\xae\x5a\x4f"
+ "\xdc\x66\xad\xe4\x54\xff\x09\xef"
+ "\x25\xcb\xac\x59\x89\x1d\x06\xcf"
+ "\xc7\x74\xe0\x5d\xa6\xd0\x04\xb4"
+ "\x41\x75\x34\x80\x6c\x4c\xc9\xd0"
+ "\x51\x0c\x0f\x84\x26\x75\x69\x23"
+ "\x81\x67\xde\xbf\x6c\x57\x8a\xc4"
+ "\xba\x91\xba\x8c\x2c\x75\xeb\x55"
+ "\xe5\x1b\x13\xbc\xaa\xec\x31\xdb"
+ "\xcc\x00\x3b\xe6\x50\xd8\xc3\xcc"
+ "\x9c\xb8\x6e\xb4\x9b\x16\xee\x74"
+ "\x26\x51\xda\x39\xe6\x31\xa1\xb2"
+ "\xd7\x6f\xcb\xae\x7d\x9f\x38\x7d"
+ "\x86\x49\x2a\x16\x5c\xc0\x08\xea"
+ "\x6b\x55\x85\x47\xbb\x90\xba\x69"
+ "\x56\xa5\x44\x62\x5b\xe6\x3b\xcc"
+ "\xe7\x6d\x1e\xca\x4b\xf3\x86\xe0"
+ "\x09\x76\x51\x83\x0a\x46\x19\x61"
+ "\xf0\xce\xe1\x06\x7d\x06\xb4\xfe"
+ "\xd9\xd3\x64\x8e\x0f\xd9\x64\x9e"
+ "\x74\x44\x97\x5d\x92\x7b\xe3\xcf"
+ "\x51\x44\xe7\xf2\xe7\xc0\x0c\xc2"
+ "\xf1\xf7\xa6\x36\x52\x2f\x7c\x09"
+ "\xfe\x8c\x59\x77\x52\x6a\x7e\xb3"
+ "\x2b\xb9\x17\x78\xe4\xf2\x82\x62"
+ "\x7f\x68\x8e\x04\xb4\x8f\x60\xd2"
+ "\xc6\x22\x1e\x0f\x3a\x8e\x3c\xb2"
+ "\x60\xbc\xa9\xb3\xda\xbd\x50\xe4"
+ "\x33\x98\xdd\x6f\xe9\x3b\x77\x57"
+ "\xeb\x7c\x8f\xbc\xfc\x34\x34\xb9"
+ "\x40\x31\x67\xcf\xfe\x22\x20\xa5"
+ "\x97\xe8\x4c\xa2\xc3\x94\xc6\x28"
+ "\xa6\x24\xe5\xa6\xb5\xd8\x24\xef"
+ "\x16\xa1\xc9\xe5\x92\xe6\x8c\x45"
+ "\x24\x24\x51\x22\x1e\xad\xef\x2f"
+ "\xb6\xbe\xfc\x92\x20\xac\x45\xe6"
+ "\xc0\xb0\xc8\xfb\x21\x34\xd4\x05"
+ "\x54\xb3\x99\xa4\xfe\xa9\xd5\xb5"
+ "\x3b\x72\x83\xf6\xe2\xf9\x88\x0e"
+ "\x20\x80\x3e\x4e\x8f\xa1\x75\x69"
+ "\x43\x5a\x7c\x38\x62\x51\xb5\xb7"
+ "\x84\x95\x3f\x6d\x24\xcc\xfd\x4b"
+ "\x4a\xaa\x97\x83\x6d\x16\xa8\xc5"
+ "\x18\xd9\xb9\xfe\xe2\x3f\xe8\xbd"
+ "\x37\x44\xdf\x79\x3b\x34\x19\x1a"
+ "\x65\x5e\xc7\x61\x1f\x17\x5e\x84"
+ "\x20\x72\x32\x98\x8c\x9e\xac\x1f"
+ "\x6e\x32\xae\x86\x46\x4f\x0f\x64"
+ "\x3f\xce\x96\xe6\x02\x41\x53\x1f"
+ "\x35\x30\x57\x7f\xfe\xb7\x47\xb9"
+ "\x0c\x2f\x14\x34\x9b\x1c\x88\x17"
+ "\xb5\xe5\x94\x17\x3e\xdc\x4d\x49"
+ "\xe1\x5d\x75\x3e\xa6\x16\x42\xd4"
+ "\x59\xb5\x24\x7c\x4c\x54\x1c\xf9"
+ "\xd6\xed\x69\x22\x5f\x74\xc9\xa9"
+ "\x7c\xb8\x09\xa7\xf9\x2b\x0d\x5f"
+ "\x42\xff\x4e\x57\xde\x0c\x67\x45"
+ "\xa4\x6e\xa0\x7e\x28\x34\xc5\xfe"
+ "\x58\x7e\xda\xec\x9f\x0b\x31\x2a"
+ "\x1f\x1b\x98\xad\x14\xcf\x9f\x96"
+ "\xf8\x87\x0e\x14\x19\x81\x23\x53"
+ "\x5f\x38\x08\xd9\xc1\xcb\xb2\xc5"
+ "\x19\x72\x75\x01\xd4\xcf\xd9\x91"
+ "\xfc\x48\xcc\xa3\x3c\xe6\x4c\xc6"
+ "\x73\xde\x5e\x90\xce\x6c\x85\x43"
+ "\x0d\xdf\xe3\x8c\x02\x62\xef\xac"
+ "\xb8\x05\x80\x81\xf6\x22\x30\xad"
+ "\x30\xa8\xcb\x55\x1e\xe6\x05\x7f"
+ "\xc5\x58\x1a\x78\xb7\x2f\x8e\x3c"
+ "\x80\x09\xca\xa2\x9a\x72\xeb\x10"
+ "\x84\x54\xaa\x98\x35\x5e\xb1\xc2"
+ "\xb7\x73\x14\x69\xef\xf8\x28\x43"
+ "\x36\xd3\x10\x0a\xd6\x69\xf8\xc8"
+ "\xbb\xe9\xe9\xf9\x29\x52\xf8\x6f"
+ "\x12\x78\xf9\xc6\xb2\x12\xfd\x39"
+ "\xa9\xeb\xe2\x47\xb9\x22\xc5\x8f"
+ "\x4d\xb1\x17\x40\x02\x84\xed\x53"
+ "\xc5\xfa\xc1\xcd\x59\x56\x93\xaa"
+ "\x3f\x23\x3f\x02\xb7\xe9\x6e\xa0"
+ "\xbc\x96\xb8\xb2\xf8\x04\x19\x87"
+ "\xe9\x4f\x29\xbf\x3a\xcb\x6d\x48"
+ "\xc9\xe7\x1f\xb7\xa8\xf8\xd4\xb4"
+ "\x6d\x0f\xb4\xf6\x44\x11\x0f\xf7"
+ "\x3d\xd2\x36\x05\x67\xa1\x46\x81"
+ "\x90\xe9\x60\x64\xfa\x52\x87\x37"
+ "\x44\x01\xbd\x58\xe1\xda\xda\x1e"
+ "\xa7\x09\xf7\x43\x31\x2b\x4b\x55"
+ "\xbd\x0d\x53\x7f\x12\x6c\xf5\x07"
+ "\xfc\x61\xda\xd6\x0a\xbd\x89\x5f"
+ "\x2c\xf5\xa8\x1f\x0d\x60\xe4\x3c"
+ "\x5d\x94\x8a\x1f\x64\xce\xd5\x16"
+ "\x73\xbc\xbe\xb1\x85\x28\xcb\x0b"
+ "\x47\x5c\x1f\x66\x25\x89\x61\x6a"
+ "\xa7\xcd\xf8\x1b\x31\x88\x42\x71"
+ "\x58\x65\x53\xd5\xc0\xa3\x56\x2e"
+ "\xb6\x86\x9e\x13\x78\x34\x36\x85"
+ "\xbb\xce\x6e\x54\x33\xb9\x97\xc5"
+ "\x72\xb8\xe0\x13\x34\x04\xbf\x83"
+ "\xbf\x78\x1d\x7c\x23\x34\x90\xe0"
+ "\x57\xd4\x3f\xc6\x61\xe3\xca\x96"
+ "\x13\xdd\x9e\x20\x51\x18\x73\x37"
+ "\x69\x37\xfb\xe5\x60\x1f\xf2\xa1"
+ "\xef\xa2\x6e\x16\x32\x8e\xc3\xb6"
+ "\x21\x5e\xc2\x1c\xb6\xc6\x96\x72"
+ "\x4f\xa6\x85\x69\xa9\x5d\xb2\x2e"
+ "\xac\xfe\x6e\xc3\xe7\xb3\x51\x08"
+ "\x66\x2a\xac\x59\xb3\x73\x86\xae"
+ "\x6d\x85\x97\x37\x68\xef\xa7\x85"
+ "\xb7\xdd\xdd\xd9\x85\xc9\x57\x01"
+ "\x10\x2b\x9a\x1e\x44\x12\x87\xa5"
+ "\x60\x1f\x88\xae\xbf\x14\x2d\x05"
+ "\x4c\x60\x85\x8a\x45\xac\x0f\xc2",
+ .len = 4096,
}
};
@@ -33801,9 +30271,6 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = {
.ctext = "\xf6\x78\x97\xd6\xaa\x94\x01\x27"
"\x2e\x4d\x83\xe0\x6e\x64\x9a\xdf",
.len = 16,
- .also_non_np = 1,
- .np = 3,
- .tap = { 5, 2, 9 },
}, {
.key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99"
"\x5f\x1a\x5a\x44\x1d\x92\x0f\x27"
@@ -33823,9 +30290,6 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = {
"\x57\x72\xb5\xfd\xb5\x5d\xb8\x28"
"\x0c\x04\x91\x14\x91\xe9\x37",
.len = 31,
- .also_non_np = 1,
- .np = 2,
- .tap = { 16, 15 },
}, {
.key = "\xa5\x28\x24\x34\x1a\x3c\xd8\xf7"
"\x05\x91\x8f\xee\x85\x1f\x35\x7f"
@@ -33869,9 +30333,6 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = {
"\x29\x62\x0d\xb2\xf6\x3c\x58\x57"
"\xc1\xd5\x5a\xbb\xd6\xa6\x2a\xe5",
.len = 128,
- .also_non_np = 1,
- .np = 4,
- .tap = { 112, 7, 8, 1 },
}, {
.key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a"
"\x25\x74\x29\x0d\x51\x8a\x0e\x13"
@@ -34011,6 +30472,1436 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = {
"\xb8\x78\xd2\xa3\xc6\xf3\x79\x9c"
"\xc7\x27\xe1\x6a\x29\xad\xa4\x03",
.len = 512,
+ }, {
+ .key = "\xeb\xe5\x11\x3a\x72\xeb\x10\xbe"
+ "\x70\xcf\xe3\xea\xc2\x74\xa4\x48"
+ "\x29\x0f\x8f\x3f\xcf\x4c\x28\x2a"
+ "\x4e\x1e\x3c\xc3\x27\x9f\x16\x13",
+ .klen = 32,
+ .iv = "\x84\x3e\xa2\x7c\x06\x72\xb2\xad"
+ "\x88\x76\x65\xb4\x1a\x29\x27\x12"
+ "\x45\xb6\x8d\x0e\x4b\x87\x04\xfc"
+ "\xb5\xcd\x1c\x4d\xe8\x06\xf1\xcb",
+ .ptext = "\x8e\xb6\x07\x9b\x7c\xe4\xa4\xa2"
+ "\x41\x6c\x24\x1d\xc0\x77\x4e\xd9"
+ "\x4a\xa4\x2c\xb6\xe4\x55\x02\x7f"
+ "\xc4\xec\xab\xc2\x5c\x63\x40\x92"
+ "\x38\x24\x62\xdb\x65\x82\x10\x7f"
+ "\x21\xa5\x39\x3a\x3f\x38\x7e\xad"
+ "\x6c\x7b\xc9\x3f\x89\x8f\xa8\x08"
+ "\xbd\x31\x57\x3c\x7a\x45\x67\x30"
+ "\xa9\x27\x58\x34\xbe\xe3\xa4\xc3"
+ "\xff\xc2\x9f\x43\xf0\x04\xba\x1e"
+ "\xb6\xf3\xc4\xce\x09\x7a\x2e\x42"
+ "\x7d\xad\x97\xc9\x77\x9a\x3a\x78"
+ "\x6c\xaf\x7c\x2a\x46\xb4\x41\x86"
+ "\x1a\x20\xf2\x5b\x1a\x60\xc9\xc4"
+ "\x47\x5d\x10\xa4\xd2\x15\x6a\x19"
+ "\x4f\xd5\x51\x37\xd5\x06\x70\x1a"
+ "\x3e\x78\xf0\x2e\xaa\xb5\x2a\xbd"
+ "\x83\x09\x7c\xcb\x29\xac\xd7\x9c"
+ "\xbf\x80\xfd\x9d\xd4\xcf\x64\xca"
+ "\xf8\xc9\xf1\x77\x2e\xbb\x39\x26"
+ "\xac\xd9\xbe\xce\x24\x7f\xbb\xa2"
+ "\x82\xba\xeb\x5f\x65\xc5\xf1\x56"
+ "\x8a\x52\x02\x4d\x45\x23\x6d\xeb"
+ "\xb0\x60\x7b\xd8\x6e\xb2\x98\xd2"
+ "\xaf\x76\xf2\x33\x9b\xf3\xbb\x95"
+ "\xc0\x50\xaa\xc7\x47\xf6\xb3\xf3"
+ "\x77\x16\xcb\x14\x95\xbf\x1d\x32"
+ "\x45\x0c\x75\x52\x2c\xe8\xd7\x31"
+ "\xc0\x87\xb0\x97\x30\x30\xc5\x5e"
+ "\x50\x70\x6e\xb0\x4b\x4e\x38\x19"
+ "\x46\xca\x38\x6a\xca\x7d\xfe\x05"
+ "\xc8\x80\x7c\x14\x6c\x24\xb5\x42"
+ "\x28\x04\x4c\xff\x98\x20\x08\x10"
+ "\x90\x31\x03\x78\xd8\xa1\xe6\xf9"
+ "\x52\xc2\xfc\x3e\xa7\x68\xce\xeb"
+ "\x59\x5d\xeb\xd8\x64\x4e\xf8\x8b"
+ "\x24\x62\xcf\x17\x36\x84\xc0\x72"
+ "\x60\x4f\x3e\x47\xda\x72\x3b\x0e"
+ "\xce\x0b\xa9\x9c\x51\xdc\xa5\xb9"
+ "\x71\x73\x08\x4e\x22\x31\xfd\x88"
+ "\x29\xfc\x8d\x17\x3a\x7a\xe5\xb9"
+ "\x0b\x9c\x6d\xdb\xce\xdb\xde\x81"
+ "\x73\x5a\x16\x9d\x3c\x72\x88\x51"
+ "\x10\x16\xf3\x11\x6e\x32\x5f\x4c"
+ "\x87\xce\x88\x2c\xd2\xaf\xf5\xb7"
+ "\xd8\x22\xed\xc9\xae\x68\x7f\xc5"
+ "\x30\x62\xbe\xc9\xe0\x27\xa1\xb5"
+ "\x57\x74\x36\x60\xb8\x6b\x8c\xec"
+ "\x14\xad\xed\x69\xc9\xd8\xa5\x5b"
+ "\x38\x07\x5b\xf3\x3e\x74\x48\x90"
+ "\x61\x17\x23\xdd\x44\xbc\x9d\x12"
+ "\x0a\x3a\x63\xb2\xab\x86\xb8\x67"
+ "\x85\xd6\xb2\x5d\xde\x4a\xc1\x73"
+ "\x2a\x7c\x53\x8e\xd6\x7d\x0e\xe4"
+ "\x3b\xab\xc5\x3d\x32\x79\x18\xb7"
+ "\xd6\x50\x4d\xf0\x8a\x37\xbb\xd3"
+ "\x8d\xd8\x08\xd7\x7d\xaa\x24\x52"
+ "\xf7\x90\xe3\xaa\xd6\x49\x7a\x47"
+ "\xec\x37\xad\x74\x8b\xc1\xb7\xfe"
+ "\x4f\x70\x14\x62\x22\x8c\x63\xc2"
+ "\x1c\x4e\x38\xc3\x63\xb7\xbf\x53"
+ "\xbd\x1f\xac\xa6\x94\xc5\x81\xfa"
+ "\xe0\xeb\x81\xe9\xd9\x1d\x32\x3c"
+ "\x85\x12\xca\x61\x65\xd1\x66\xd8"
+ "\xe2\x0e\xc3\xa3\xff\x0d\xd3\xee"
+ "\xdf\xcc\x3e\x01\xf5\x9b\x45\x5c"
+ "\x33\xb5\xb0\x8d\x36\x1a\xdf\xf8"
+ "\xa3\x81\xbe\xdb\x3d\x4b\xf6\xc6"
+ "\xdf\x7f\xb0\x89\xbd\x39\x32\x50"
+ "\xbb\xb2\xe3\x5c\xbb\x4b\x18\x98"
+ "\x08\x66\x51\xe7\x4d\xfb\xfc\x4e"
+ "\x22\x42\x6f\x61\xdb\x7f\x27\x88"
+ "\x29\x3f\x02\xa9\xc6\x83\x30\xcc"
+ "\x8b\xd5\x64\x7b\x7c\x76\x16\xbe"
+ "\xb6\x8b\x26\xb8\x83\x16\xf2\x6b"
+ "\xd1\xdc\x20\x6b\x42\x5a\xef\x7a"
+ "\xa9\x60\xb8\x1a\xd3\x0d\x4e\xcb"
+ "\x75\x6b\xc5\x80\x43\x38\x7f\xad"
+ "\x9c\x56\xd9\xc4\xf1\x01\x74\xf0"
+ "\x16\x53\x8d\x69\xbe\xf2\x5d\x92"
+ "\x34\x38\xc8\x84\xf9\x1a\xfc\x26"
+ "\x16\xcb\xae\x7d\x38\x21\x67\x74"
+ "\x4c\x40\xaa\x6b\x97\xe0\xb0\x2f"
+ "\xf5\x3e\xf6\xe2\x24\xc8\x22\xa4"
+ "\xa8\x88\x27\x86\x44\x75\x5b\x29"
+ "\x34\x08\x4b\xa1\xfe\x0c\x26\xe5"
+ "\xac\x26\xf6\x21\x0c\xfb\xde\x14"
+ "\xfe\xd7\xbe\xee\x48\x93\xd6\x99"
+ "\x56\x9c\xcf\x22\xad\xa2\x53\x41"
+ "\xfd\x58\xa1\x68\xdc\xc4\xef\x20"
+ "\xa1\xee\xcf\x2b\x43\xb6\x57\xd8"
+ "\xfe\x01\x80\x25\xdf\xd2\x35\x44"
+ "\x0d\x15\x15\xc3\xfc\x49\xbf\xd0"
+ "\xbf\x2f\x95\x81\x09\xa6\xb6\xd7"
+ "\x21\x03\xfe\x52\xb7\xa8\x32\x4d"
+ "\x75\x1e\x46\x44\xbc\x2b\x61\x04"
+ "\x1b\x1c\xeb\x39\x86\x8f\xe9\x49"
+ "\xce\x78\xa5\x5e\x67\xc5\xe9\xef"
+ "\x43\xf8\xf1\x35\x22\x43\x61\xc1"
+ "\x27\xb5\x09\xb2\xb8\xe1\x5e\x26"
+ "\xcc\xf3\x6f\xb2\xb7\x55\x30\x98"
+ "\x87\xfc\xe7\xa8\xc8\x94\x86\xa1"
+ "\xd9\xa0\x3c\x74\x16\xb3\x25\x98"
+ "\xba\xc6\x84\x4a\x27\xa6\x58\xfe"
+ "\xe1\x68\x04\x30\xc8\xdb\x44\x52"
+ "\x4e\xb2\xa4\x6f\xf7\x63\xf2\xd6"
+ "\x63\x36\x17\x04\xf8\x06\xdb\xeb"
+ "\x99\x17\xa5\x1b\x61\x90\xa3\x9f"
+ "\x05\xae\x3e\xe4\xdb\xc8\x1c\x8e"
+ "\x77\x27\x88\xdf\xd3\x22\x5a\xc5"
+ "\x9c\xd6\x22\xf8\xc4\xd8\x92\x9d"
+ "\x16\xcc\x54\x25\x3b\x6f\xdb\xc0"
+ "\x78\xd8\xe3\xb3\x03\x69\xd7\x5d"
+ "\xf8\x08\x04\x63\x61\x9d\x76\xf9"
+ "\xad\x1d\xc4\x30\x9f\x75\x89\x6b"
+ "\xfb\x62\xba\xae\xcb\x1b\x6c\xe5"
+ "\x7e\xea\x58\x6b\xae\xce\x9b\x48"
+ "\x4b\x80\xd4\x5e\x71\x53\xa7\x24"
+ "\x73\xca\xf5\x3e\xbb\x5e\xd3\x1c"
+ "\x33\xe3\xec\x5b\xa0\x32\x9d\x25"
+ "\x0e\x0c\x28\x29\x39\x51\xc5\x70"
+ "\xec\x60\x8f\x77\xfc\x06\x7a\x33"
+ "\x19\xd5\x7a\x6e\x94\xea\xa3\xeb"
+ "\x13\xa4\x2e\x09\xd8\x81\x65\x83"
+ "\x03\x63\x8b\xb5\xc9\x89\x98\x73"
+ "\x69\x53\x8e\xab\xf1\xd2\x2f\x67"
+ "\xbd\xa6\x16\x6e\xd0\x8b\xc1\x25"
+ "\x93\xd2\x50\x7c\x1f\xe1\x11\xd0"
+ "\x58\x0d\x2f\x72\xe7\x5e\xdb\xa2"
+ "\x55\x9a\xe0\x09\x21\xac\x61\x85"
+ "\x4b\x20\x95\x73\x63\x26\xe3\x83"
+ "\x4b\x5b\x40\x03\x14\xb0\x44\x16"
+ "\xbd\xe0\x0e\xb7\x66\x56\xd7\x30"
+ "\xb3\xfd\x8a\xd3\xda\x6a\xa7\x3d"
+ "\x98\x09\x11\xb7\x00\x06\x24\x5a"
+ "\xf7\x42\x94\xa6\x0e\xb1\x6d\x48"
+ "\x74\xb1\xa7\xe6\x92\x0a\x15\x9a"
+ "\xf5\xfa\x55\x1a\x6c\xdd\x71\x08"
+ "\xd0\xf7\x8d\x0e\x7c\x67\x4d\xc6"
+ "\xe6\xde\x78\x88\x88\x3c\x5e\x23"
+ "\x46\xd2\x25\xa4\xfb\xa3\x26\x3f"
+ "\x2b\xfd\x9c\x20\xda\x72\xe1\x81"
+ "\x8f\xe6\xae\x08\x1d\x67\x15\xde"
+ "\x86\x69\x1d\xc6\x1e\x6d\xb7\x5c"
+ "\xdd\x43\x72\x5a\x7d\xa7\xd8\xd7"
+ "\x1e\x66\xc5\x90\xf6\x51\x76\x91"
+ "\xb3\xe3\x39\x81\x75\x08\xfa\xc5"
+ "\x06\x70\x69\x1b\x2c\x20\x74\xe0"
+ "\x53\xb0\x0c\x9d\xda\xa9\x5b\xdd"
+ "\x1c\x38\x6c\x9e\x3b\xc4\x7a\x82"
+ "\x93\x9e\xbb\x75\xfb\x19\x4a\x55"
+ "\x65\x7a\x3c\xda\xcb\x66\x5c\x13"
+ "\x17\x97\xe8\xbd\xae\x24\xd9\x76"
+ "\xfb\x8c\x73\xde\xbd\xb4\x1b\xe0"
+ "\xb9\x2c\xe8\xe0\x1d\x3f\xa8\x2c"
+ "\x1e\x81\x5b\x77\xe7\xdf\x6d\x06"
+ "\x7c\x9a\xf0\x2b\x5d\xfc\x86\xd5"
+ "\xb1\xad\xbc\xa8\x73\x48\x61\x67"
+ "\xd6\xba\xc8\xe8\xe2\xb8\xee\x40"
+ "\x36\x22\x3e\x61\xf6\xc8\x16\xe4"
+ "\x0e\x88\xad\x71\x53\x58\xe1\x6c"
+ "\x8f\x4f\x89\x4b\x3e\x9c\x7f\xe9"
+ "\xad\xc2\x28\xc2\x3a\x29\xf3\xec"
+ "\xa9\x28\x39\xba\xc2\x86\xe1\x06"
+ "\xf3\x8b\xe3\x95\x0c\x87\xb8\x1b"
+ "\x72\x35\x8e\x8f\x6d\x18\xc8\x1c"
+ "\xa5\x5d\x57\x9d\x73\x8a\xbb\x9e"
+ "\x21\x05\x12\xd7\xe0\x21\x1c\x16"
+ "\x3a\x95\x85\xbc\xb0\x71\x0b\x36"
+ "\x6c\x44\x8d\xef\x3b\xec\x3f\x8e"
+ "\x24\xa9\xe3\xa7\x63\x23\xca\x09"
+ "\x62\x96\x79\x0c\x81\x05\x41\xf2"
+ "\x07\x20\x26\xe5\x8e\x10\x54\x03"
+ "\x05\x7b\xfe\x0c\xcc\x8c\x50\xe5"
+ "\xca\x33\x4d\x48\x7a\x03\xd5\x64"
+ "\x49\x09\xf2\x5c\x5d\xfe\x2b\x30"
+ "\xbf\x29\x14\x29\x8b\x9b\x7c\x96"
+ "\x47\x07\x86\x4d\x4e\x4d\xf1\x47"
+ "\xd1\x10\x2a\xa8\xd3\x15\x8c\xf2"
+ "\x2f\xf4\x3a\xdf\xd0\xa7\xcb\x5a"
+ "\xad\x99\x39\x4a\xdf\x60\xbe\xf9"
+ "\x91\x4e\xf5\x94\xef\xc5\x56\x32"
+ "\x33\x86\x78\xa3\xd6\x4c\x29\x7c"
+ "\xe8\xac\x06\xb5\xf5\x01\x5c\x9f"
+ "\x02\xc8\xe8\xbf\x5c\x1a\x7f\x4d"
+ "\x28\xa5\xb9\xda\xa9\x5e\xe7\x4b"
+ "\xf4\x3d\xe9\x1d\x28\xaa\x1a\x8a"
+ "\x76\xc8\x6c\x19\x61\x3c\x9e\x29"
+ "\xcd\xbe\xff\xe0\x1c\xb8\x67\xb5"
+ "\xa4\x46\xf8\xb9\x8a\xa2\xf6\x7c"
+ "\xef\x23\x73\x0c\xe9\x72\x0a\x0d"
+ "\x9b\x40\xd8\xfb\x0c\x9c\xab\xa8",
+ .ctext = "\xfc\x02\x83\x13\x73\x06\x70\x3f"
+ "\x71\x28\x98\x61\xe5\x2c\x45\x49"
+ "\x18\xa2\x0e\x17\xc9\xdb\x4d\xf6"
+ "\xbe\x05\x02\x35\xc1\x18\x61\x28"
+ "\xff\x28\x0a\xd9\x00\xb8\xed\xec"
+ "\x14\x80\x88\x56\xcf\x98\x32\xcc"
+ "\xb0\xee\xb4\x5e\x2d\x61\x59\xcb"
+ "\x48\xc9\x25\xaa\x7e\x5f\xe5\x4f"
+ "\x95\x8f\x5d\x47\xe8\xc3\x09\xb4"
+ "\xce\xe7\x74\xcd\xc6\x09\x5c\xfc"
+ "\xc7\x79\xc9\x39\xe4\xe3\x9b\x59"
+ "\x67\x61\x10\xc9\xb7\x7a\xa8\x11"
+ "\x59\xf6\x7a\x67\x1c\x3a\x70\x76"
+ "\x2e\x0e\xbd\x10\x93\x01\x06\xea"
+ "\x51\xc6\x5c\xa7\xda\xd1\x7d\x06"
+ "\x8b\x1d\x5b\xb6\x87\xf0\x32\xbe"
+ "\xff\x55\xaa\x58\x5a\x28\xd1\x64"
+ "\x45\x3b\x0b\x5c\xee\xc4\x12\x2d"
+ "\x1f\xb7\xa5\x73\xf5\x20\xf5\xa8"
+ "\x10\x9d\xd8\x16\xd2\x05\x4d\x49"
+ "\x99\x4a\x71\x56\xec\xa3\xc7\x27"
+ "\xb0\x98\xcd\x59\x3c\x8a\xd1\x9e"
+ "\x33\xa5\x92\xf2\xb7\x87\x23\x5d"
+ "\x53\x9a\x8e\x7c\x63\x57\x5e\x9a"
+ "\x21\x54\x7a\x3c\x5a\xd5\x68\x69"
+ "\x35\x17\x51\x06\x19\x82\x9d\x44"
+ "\x9e\x8a\x75\xc5\x16\x55\xa4\x78"
+ "\x95\x63\xc3\xf0\x91\x73\x77\x44"
+ "\x0c\xff\xb9\xb3\xa7\x5f\xcf\x2a"
+ "\xa2\x54\x9c\xe3\x8b\x7e\x9d\x65"
+ "\xe5\x64\x8b\xbe\x06\x3a\x90\x31"
+ "\xdb\x42\x78\xe9\xe6\x8a\xae\xba"
+ "\x8f\xfb\xc9\x3d\xd9\xc2\x3e\x57"
+ "\xd5\x58\xfe\x70\x44\xe5\x2a\xd5"
+ "\x87\xcf\x9f\x6a\x02\xde\x48\xe9"
+ "\x13\xed\x8d\x2b\xf2\xa1\x56\x07"
+ "\x36\x2d\xcf\xc3\x5c\xd4\x4b\x20"
+ "\xb0\xdf\x1a\x70\xed\x0a\xe4\x2e"
+ "\x9a\xfc\x88\xa1\xc4\x2d\xd6\xb8"
+ "\xf1\x6e\x2c\x5c\xdc\x0e\xb0\x21"
+ "\x2d\x76\xb8\xc3\x05\x4c\xf5\xc5"
+ "\x9a\x14\xab\x08\xc2\x67\x59\x30"
+ "\x7a\xef\xd8\x4a\x89\x49\xd4\xf0"
+ "\x22\x39\xf2\x61\xaa\x70\x36\xcf"
+ "\x65\xee\x43\x83\x2e\x32\xe4\xc9"
+ "\xc2\xf1\xc7\x08\x28\x59\x10\x6f"
+ "\x7a\xeb\x8f\x78\x9e\xdf\x07\x0f"
+ "\xca\xc7\x02\x6a\x2e\x2a\xf0\x64"
+ "\xfa\x4c\x8c\x4c\xfc\x13\x23\x63"
+ "\x54\xeb\x1d\x41\xdf\x88\xd6\x66"
+ "\xae\x5e\x31\x74\x5d\x84\x65\xb8"
+ "\x61\x1c\x88\x1b\x8f\xb6\x14\x4e"
+ "\x73\x23\x27\x71\x85\x04\x07\x59"
+ "\x18\xa3\x2b\x69\x2a\x42\x81\xbf"
+ "\x40\xf4\x40\xdf\x04\xb8\x6c\x2e"
+ "\x21\x5b\x22\x25\x61\x01\x96\xce"
+ "\xfb\xbc\x75\x25\x2c\x03\x55\xea"
+ "\xb6\x56\x31\x03\xc8\x98\x77\xd6"
+ "\x30\x19\x9e\x45\x05\xfd\xca\xdf"
+ "\xae\x89\x30\xa3\xc1\x65\x41\x67"
+ "\x12\x8e\xa4\x61\xd0\x87\x04\x0a"
+ "\xe6\xf3\x43\x3a\x38\xce\x22\x36"
+ "\x41\xdc\xe1\x7d\xd2\xa6\xe2\x66"
+ "\x21\x8d\xc9\x59\x73\x52\x34\xd8"
+ "\x1f\xf1\x87\x00\x9b\x12\x74\xeb"
+ "\xbb\xa9\x34\x0c\x8e\x79\x74\x64"
+ "\xbf\x94\x97\xe4\x94\xda\xf0\x39"
+ "\x66\xa8\xd9\x82\xe3\x11\x3d\xe7"
+ "\xb3\x9a\x40\x7a\x6f\x71\xc7\x0f"
+ "\x7b\x6d\x59\x79\x18\x2f\x11\x60"
+ "\x1e\xe0\xae\x1b\x1b\xb4\xad\x4d"
+ "\x63\xd9\x3e\xa0\x8f\xe3\x66\x8c"
+ "\xfe\x5a\x73\x07\x95\x27\x1a\x07"
+ "\x6e\xd6\x14\x3f\xbe\xc5\x99\x94"
+ "\xcf\x40\xf4\x39\x1c\xf2\x99\x5b"
+ "\xb7\xfb\xb4\x4e\x5f\x21\x10\x04"
+ "\x24\x08\xd4\x0d\x10\x7a\x2f\x52"
+ "\x7d\x91\xc3\x38\xd3\x16\xf0\xfd"
+ "\x53\xba\xda\x88\xa5\xf6\xc7\xfd"
+ "\x63\x4a\x9f\x48\xb5\x31\xc2\xe1"
+ "\x7b\x3e\xac\x8d\xc9\x95\x02\x92"
+ "\xcc\xbd\x0e\x15\x2d\x97\x08\x82"
+ "\xa6\x99\xbc\x2c\x96\x91\xde\xa4"
+ "\x9c\xf5\x2c\xef\x12\x29\xb0\x72"
+ "\x5f\x60\x5d\x3d\xf3\x85\x59\x79"
+ "\xac\x06\x63\x74\xcc\x1a\x8d\x0e"
+ "\xa7\x5f\xd9\x3e\x84\xf7\xbb\xde"
+ "\x06\xd9\x4b\xab\xee\xb2\x03\xbe"
+ "\x68\x49\x72\x84\x8e\xf8\x45\x2b"
+ "\x59\x99\x17\xd3\xe9\x32\x79\xc3"
+ "\x83\x4c\x7a\x6c\x71\x53\x8c\x09"
+ "\x76\xfb\x3e\x80\x99\xbc\x2c\x7d"
+ "\x42\xe5\x70\x08\x80\xc7\xaf\x15"
+ "\x90\xda\x98\x98\x81\x04\x1c\x4d"
+ "\x78\xf1\xf3\xcc\x1b\x3a\x7b\xef"
+ "\xea\xe1\xee\x0e\xd2\x32\xb6\x63"
+ "\xbf\xb2\xb5\x86\x8d\x16\xd3\x23"
+ "\x04\x59\x51\xbb\x17\x03\xc0\x07"
+ "\x93\xbf\x72\x58\x30\xf2\x0a\xa2"
+ "\xbc\x60\x86\x3b\x68\x91\x67\x14"
+ "\x10\x76\xda\xa3\x98\x2d\xfc\x8a"
+ "\xb8\x95\xf7\xd2\x8b\x97\x8b\xfc"
+ "\xf2\x9e\x86\x20\xb6\xdf\x93\x41"
+ "\x06\x5e\x37\x3e\xe2\xb8\xd5\x06"
+ "\x59\xd2\x8d\x43\x91\x5a\xed\x94"
+ "\x54\xc2\x77\xbc\x0b\xb4\x29\x80"
+ "\x22\x19\xe7\x35\x1f\x29\x4f\xd8"
+ "\x02\x98\xee\x83\xca\x4c\x94\xa3"
+ "\xec\xde\x4b\xf5\xca\x57\x93\xa3"
+ "\x72\x69\xfe\x27\x7d\x39\x24\x9a"
+ "\x60\x19\x72\xbe\x24\xb2\x2d\x99"
+ "\x8c\xb7\x32\xf8\x74\x77\xfc\x8d"
+ "\xb2\xc1\x7a\x88\x28\x26\xea\xb7"
+ "\xad\xf0\x38\x49\x88\x78\x73\xcd"
+ "\x01\xef\xb9\x30\x1a\x33\xa3\x24"
+ "\x9b\x0b\xc5\x89\x64\x3f\xbe\x76"
+ "\xd5\xa5\x28\x74\xa2\xc6\xa0\xa0"
+ "\xdd\x13\x81\x64\x2f\xd1\xab\x15"
+ "\xab\x13\xb5\x68\x59\xa4\x9f\x0e"
+ "\x1e\x0a\xaf\xf7\x0b\x6e\x6b\x0b"
+ "\xf7\x95\x4c\xbc\x1d\x40\x6d\x9c"
+ "\x08\x42\xef\x07\x03\xb7\xa3\xea"
+ "\x2a\x5f\xec\x41\x3c\x72\x31\x9d"
+ "\xdc\x6b\x3a\x5e\x35\x3d\x12\x09"
+ "\x27\xe8\x63\xbe\xcf\xb3\xbc\x01"
+ "\x2d\x0c\x86\xb2\xab\x4a\x69\xe5"
+ "\xf8\x45\x97\x76\x0e\x31\xe5\xc6"
+ "\x4c\x4f\x94\xa5\x26\x19\x9f\x1b"
+ "\xe1\xf4\x79\x04\xb4\x93\x92\xdc"
+ "\xa5\x2a\x66\x25\x0d\xb2\x9e\xea"
+ "\xa8\xf6\x02\x77\x2d\xd1\x3f\x59"
+ "\x5c\x04\xe2\x36\x52\x5f\xa1\x27"
+ "\x0a\x07\x56\xb6\x2d\xd5\x90\x32"
+ "\x64\xee\x3f\x42\x8f\x61\xf8\xa0"
+ "\xc1\x8b\x1e\x0b\xa2\x73\xa9\xf3"
+ "\xc9\x0e\xb1\x96\x3a\x67\x5f\x1e"
+ "\xd1\x98\x57\xa2\xba\xb3\x23\x9d"
+ "\xa3\xc6\x3c\x7d\x5e\x3e\xb3\xe8"
+ "\x80\xae\x2d\xda\x85\x90\x69\x3c"
+ "\xf0\xe7\xdd\x9e\x20\x10\x52\xdb"
+ "\xc3\xa0\x15\x73\xee\xb1\xf1\x0f"
+ "\xf1\xf8\x3f\x40\xe5\x17\x80\x4e"
+ "\x91\x95\xc7\xec\xd1\x9c\xd9\x1a"
+ "\x8b\xac\xec\xc9\x0c\x07\xf4\xdc"
+ "\x77\x2d\xa2\xc4\xf8\x27\xb5\x41"
+ "\x2f\x85\xa6\x48\xad\x2a\x58\xc5"
+ "\xea\xfa\x1c\xdb\xfd\xb7\x70\x45"
+ "\xfc\xad\x11\xaf\x05\xed\xbf\xb6"
+ "\x3c\xe1\x57\xb8\x72\x4a\xa0\x6b"
+ "\x40\xd3\xda\xa9\xbc\xa5\x02\x95"
+ "\x8c\xf0\x4e\x67\xb2\x58\x66\xea"
+ "\x58\x0e\xc4\x88\xbc\x1d\x3b\x15"
+ "\x17\xc8\xf5\xd0\x69\x08\x0a\x01"
+ "\x80\x2e\x9e\x69\x4c\x37\x0b\xba"
+ "\xfb\x1a\xa9\xc3\x5f\xec\x93\x7c"
+ "\x4f\x72\x68\x1a\x05\xa1\x32\xe1"
+ "\x16\x57\x9e\xa6\xe0\x42\xfa\x76"
+ "\xc2\xf6\xd3\x9b\x37\x0d\xa3\x58"
+ "\x30\x27\xe7\xea\xb1\xc3\x43\xfb"
+ "\x67\x04\x70\x86\x0a\x71\x69\x34"
+ "\xca\xb1\xe3\x4a\x56\xc9\x29\xd1"
+ "\x12\x6a\xee\x89\xfd\x27\x83\xdf"
+ "\x32\x1a\xc2\xe9\x94\xcc\x44\x2e"
+ "\x0f\x3e\xc8\xc1\x70\x5b\xb0\xe8"
+ "\x6d\x47\xe3\x39\x75\xd5\x45\x8a"
+ "\x48\x4c\x64\x76\x6f\xae\x24\x6f"
+ "\xae\x77\x33\x5b\xf5\xca\x9c\x30"
+ "\x2c\x27\x15\x5e\x9c\x65\xad\x2a"
+ "\x88\xb1\x36\xf6\xcd\x5e\x73\x72"
+ "\x99\x5c\xe2\xe4\xb8\x3e\x12\xfb"
+ "\x55\x86\xfa\xab\x53\x12\xdc\x6a"
+ "\xe3\xfe\x6a\xeb\x9b\x5d\xeb\x72"
+ "\x9d\xf1\xbb\x80\x80\x76\x2d\x57"
+ "\x11\xde\xcf\xae\x46\xad\xdb\xcd"
+ "\x62\x66\x3d\x7b\x7f\xcb\xc4\x43"
+ "\x81\x0c\x7e\xb9\xb7\x47\x1a\x40"
+ "\xfd\x08\x51\xbe\x01\x1a\xd8\x31"
+ "\x43\x5e\x24\x91\xa2\x53\xa1\xc5"
+ "\x8a\xe4\xbc\x00\x8e\xf7\x0c\x30"
+ "\xdf\x03\x34\x2f\xce\xe4\x2e\xda"
+ "\x2b\x87\xfc\xf8\x9b\x50\xd5\xb0"
+ "\x5b\x08\xc6\x17\xa0\xae\x6b\x24"
+ "\xe2\x1d\xd0\x47\xbe\xc4\x8f\x62"
+ "\x1d\x12\x26\xc7\x78\xd4\xf2\xa3"
+ "\xea\x39\x8c\xcb\x54\x3e\x2b\xb9"
+ "\x9a\x8f\x97\xcf\x68\x53\x40\x02"
+ "\x56\xac\x52\xbb\x62\x3c\xc6\x3f"
+ "\x3a\x53\x3c\xe8\x21\x9a\x60\x65"
+ "\x10\x6e\x59\xc3\x4f\xc3\x07\xc8"
+ "\x61\x1c\xea\x62\x6e\xa2\x5a\x12"
+ "\xd6\x10\x91\xbe\x5e\x58\x73\xbe"
+ "\x77\xb8\xb7\x98\xc7\x7e\x78\x9a",
+ .len = 1536,
+ }, {
+ .key = "\x60\xd5\x36\xb0\x8e\x5d\x0e\x5f"
+ "\x70\x47\x8c\xea\x87\x30\x1d\x58"
+ "\x2a\xb2\xe8\xc6\xcb\x60\xe7\x6f"
+ "\x56\x95\x83\x98\x38\x80\x84\x8a",
+ .klen = 32,
+ .iv = "\x43\xfe\x63\x3c\xdc\x9e\x0c\xa6"
+ "\xee\x9c\x0b\x97\x65\xc2\x56\x1d"
+ "\x5d\xd0\xbf\xa3\x9f\x1e\xfb\x78"
+ "\xbf\x51\x1b\x18\x73\x27\x27\x8c",
+ .ptext = "\x0b\x77\xd8\xa3\x8c\xa6\xb2\x2d"
+ "\x3e\xdd\xcc\x7c\x4a\x3e\x61\xc4"
+ "\x9a\x7f\x73\xb0\xb3\x29\x32\x61"
+ "\x13\x25\x62\xcc\x59\x4c\xf4\xdb"
+ "\xd7\xf5\xf4\xac\x75\x51\xb2\x83"
+ "\x64\x9d\x1c\x8b\xd1\x8b\x0c\x06"
+ "\xf1\x9f\xba\x9d\xae\x62\xd4\xd8"
+ "\x96\xbe\x3c\x4c\x32\xe4\x82\x44"
+ "\x47\x5a\xec\xb8\x8a\x5b\xd5\x35"
+ "\x57\x1e\x5c\x80\x6f\x77\xa9\xb9"
+ "\xf2\x4f\x71\x1e\x48\x51\x86\x43"
+ "\x0d\xd5\x5b\x52\x30\x40\xcd\xbb"
+ "\x2c\x25\xc1\x47\x8b\xb7\x13\xc2"
+ "\x3a\x11\x40\xfc\xed\x45\xa4\xf0"
+ "\xd6\xfd\x32\x99\x13\x71\x47\x2e"
+ "\x4c\xb0\x81\xac\x95\x31\xd6\x23"
+ "\xa4\x2f\xa9\xe8\x5a\x62\xdc\x96"
+ "\xcf\x49\xa7\x17\x77\x76\x8a\x8c"
+ "\x04\x22\xaf\xaf\x6d\xd9\x16\xba"
+ "\x35\x21\x66\x78\x3d\xb6\x65\x83"
+ "\xc6\xc1\x67\x8c\x32\xd6\xc0\xc7"
+ "\xf5\x8a\xfc\x47\xd5\x87\x09\x2f"
+ "\x51\x9d\x57\x6c\x29\x0b\x1c\x32"
+ "\x47\x6e\x47\xb5\xf3\x81\xc8\x82"
+ "\xca\x5d\xe3\x61\x38\xa0\xdc\xcc"
+ "\x35\x73\xfd\xb3\x92\x5c\x72\xd2"
+ "\x2d\xad\xf6\xcd\x20\x36\xff\x49"
+ "\x48\x80\x21\xd3\x2f\x5f\xe9\xd8"
+ "\x91\x20\x6b\xb1\x38\x52\x1e\xbc"
+ "\x88\x48\xa1\xde\xc0\xa5\x46\xce"
+ "\x9f\x32\x29\xbc\x2b\x51\x0b\xae"
+ "\x7a\x44\x4e\xed\xeb\x95\x63\x99"
+ "\x96\x87\xc9\x34\x02\x26\xde\x20"
+ "\xe4\xcb\x59\x0c\xb5\x55\xbd\x55"
+ "\x3f\xa9\x15\x25\xa7\x5f\xab\x10"
+ "\xbe\x9a\x59\x6c\xd5\x27\xf3\xf0"
+ "\x73\x4a\xb3\xe4\x08\x11\x00\xeb"
+ "\xf1\xae\xc8\x0d\xef\xcd\xb5\xfc"
+ "\x0d\x7e\x03\x67\xad\x0d\xec\xf1"
+ "\x9a\xfd\x31\x60\x3e\xa2\xfa\x1c"
+ "\x93\x79\x31\x31\xd6\x66\x7a\xbd"
+ "\x85\xfd\x22\x08\x00\xae\x72\x10"
+ "\xd6\xb0\xf4\xb8\x4a\x72\x5b\x9c"
+ "\xbf\x84\xdd\xeb\x13\x05\x28\xb7"
+ "\x61\x60\xfd\x7f\xf0\xbe\x4d\x18"
+ "\x7d\xc9\xba\xb0\x01\x59\x74\x18"
+ "\xe4\xf6\xa6\x74\x5d\x3f\xdc\xa0"
+ "\x9e\x57\x93\xbf\x16\x6c\xf6\xbd"
+ "\x93\x45\x38\x95\xb9\x69\xe9\x62"
+ "\x21\x73\xbd\x81\x73\xac\x15\x74"
+ "\x9e\x68\x28\x91\x38\xb7\xd4\x47"
+ "\xc7\xab\xc9\x14\xad\x52\xe0\x4c"
+ "\x17\x1c\x42\xc1\xb4\x9f\xac\xcc"
+ "\xc8\x12\xea\xa9\x9e\x30\x21\x14"
+ "\xa8\x74\xb4\x74\xec\x8d\x40\x06"
+ "\x82\xb7\x92\xd7\x42\x5b\xf2\xf9"
+ "\x6a\x1e\x75\x6e\x44\x55\xc2\x8d"
+ "\x73\x5b\xb8\x8c\x3c\xef\x97\xde"
+ "\x24\x43\xb3\x0e\xba\xad\x63\x63"
+ "\x16\x0a\x77\x03\x48\xcf\x02\x8d"
+ "\x76\x83\xa3\xba\x73\xbe\x80\x3f"
+ "\x8f\x6e\x76\x24\xc1\xff\x2d\xb4"
+ "\x20\x06\x9b\x67\xea\x29\xb5\xe0"
+ "\x57\xda\x30\x9d\x38\xa2\x7d\x1e"
+ "\x8f\xb9\xa8\x17\x64\xea\xbe\x04"
+ "\x84\xd1\xce\x2b\xfd\x84\xf9\x26"
+ "\x1f\x26\x06\x5c\x77\x6d\xc5\x9d"
+ "\xe6\x37\x76\x60\x7d\x3e\xf9\x02"
+ "\xba\xa6\xf3\x7f\xd3\x95\xb4\x0e"
+ "\x52\x1c\x6a\x00\x8f\x3a\x0b\xce"
+ "\x30\x98\xb2\x63\x2f\xff\x2d\x3b"
+ "\x3a\x06\x65\xaf\xf4\x2c\xef\xbb"
+ "\x88\xff\x2d\x4c\xa9\xf4\xff\x69"
+ "\x9d\x46\xae\x67\x00\x3b\x40\x94"
+ "\xe9\x7a\xf7\x0b\xb7\x3c\xa2\x2f"
+ "\xc3\xde\x5e\x29\x01\xde\xca\xfa"
+ "\xc6\xda\xd7\x19\xc7\xde\x4a\x16"
+ "\x93\x6a\xb3\x9b\x47\xe9\xd2\xfc"
+ "\xa1\xc3\x95\x9c\x0b\xa0\x2b\xd4"
+ "\xd3\x1e\xd7\x21\x96\xf9\x1e\xf4"
+ "\x59\xf4\xdf\x00\xf3\x37\x72\x7e"
+ "\xd8\xfd\x49\xd4\xcd\x61\x7b\x22"
+ "\x99\x56\x94\xff\x96\xcd\x9b\xb2"
+ "\x76\xca\x9f\x56\xae\x04\x2e\x75"
+ "\x89\x4e\x1b\x60\x52\xeb\x84\xf4"
+ "\xd1\x33\xd2\x6c\x09\xb1\x1c\x43"
+ "\x08\x67\x02\x01\xe3\x64\x82\xee"
+ "\x36\xcd\xd0\x70\xf1\x93\xd5\x63"
+ "\xef\x48\xc5\x56\xdb\x0a\x35\xfe"
+ "\x85\x48\xb6\x97\x97\x02\x43\x1f"
+ "\x7d\xc9\xa8\x2e\x71\x90\x04\x83"
+ "\xe7\x46\xbd\x94\x52\xe3\xc5\xd1"
+ "\xce\x6a\x2d\x6b\x86\x9a\xf5\x31"
+ "\xcd\x07\x9c\xa2\xcd\x49\xf5\xec"
+ "\x01\x3e\xdf\xd5\xdc\x15\x12\x9b"
+ "\x0c\x99\x19\x7b\x2e\x83\xfb\xd8"
+ "\x89\x3a\x1c\x1e\xb4\xdb\xeb\x23"
+ "\xd9\x42\xae\x47\xfc\xda\x37\xe0"
+ "\xd2\xb7\x47\xd9\xe8\xb5\xf6\x20"
+ "\x42\x8a\x9d\xaf\xb9\x46\x80\xfd"
+ "\xd4\x74\x6f\x38\x64\xf3\x8b\xed"
+ "\x81\x94\x56\xe7\xf1\x1a\x64\x17"
+ "\xd4\x27\x59\x09\xdf\x9b\x74\x05"
+ "\x79\x6e\x13\x29\x2b\x9e\x1b\x86"
+ "\x73\x9f\x40\xbe\x6e\xff\x92\x4e"
+ "\xbf\xaa\xf4\xd0\x88\x8b\x6f\x73"
+ "\x9d\x8b\xbf\xe5\x8a\x85\x45\x67"
+ "\xd3\x13\x72\xc6\x2a\x63\x3d\xb1"
+ "\x35\x7c\xb4\x38\xbb\x31\xe3\x77"
+ "\x37\xad\x75\xa9\x6f\x84\x4e\x4f"
+ "\xeb\x5b\x5d\x39\x6d\xed\x0a\xad"
+ "\x6c\x1b\x8e\x1f\x57\xfa\xc7\x7c"
+ "\xbf\xcf\xf2\xd1\x72\x3b\x70\x78"
+ "\xee\x8e\xf3\x4f\xfd\x61\x30\x9f"
+ "\x56\x05\x1d\x7d\x94\x9b\x5f\x8c"
+ "\xa1\x0f\xeb\xc3\xa9\x9e\xb8\xa0"
+ "\xc6\x4e\x1e\xb1\xbc\x0a\x87\xa8"
+ "\x52\xa9\x1e\x3d\x58\x8e\xc6\x95"
+ "\x85\x58\xa3\xc3\x3a\x43\x32\x50"
+ "\x6c\xb3\x61\xe1\x0c\x7d\x02\x63"
+ "\x5f\x8b\xdf\xef\x13\xf8\x66\xea"
+ "\x89\x00\x1f\xbd\x5b\x4c\xd5\x67"
+ "\x8f\x89\x84\x33\x2d\xd3\x70\x94"
+ "\xde\x7b\xd4\xb0\xeb\x07\x96\x98"
+ "\xc5\xc0\xbf\xc8\xcf\xdc\xc6\x5c"
+ "\xd3\x7d\x78\x30\x0e\x14\xa0\x86"
+ "\xd7\x8a\xb7\x53\xa3\xec\x71\xbf"
+ "\x85\xf2\xea\xbd\x77\xa6\xd1\xfd"
+ "\x5a\x53\x0c\xc3\xff\xf5\x1d\x46"
+ "\x37\xb7\x2d\x88\x5c\xeb\x7a\x0c"
+ "\x0d\x39\xc6\x40\x08\x90\x1f\x58"
+ "\x36\x12\x35\x28\x64\x12\xe7\xbb"
+ "\x50\xac\x45\x15\x7b\x16\x23\x5e"
+ "\xd4\x11\x2a\x8e\x17\x47\xe1\xd0"
+ "\x69\xc6\xd2\x5c\x2c\x76\xe6\xbb"
+ "\xf7\xe7\x34\x61\x8e\x07\x36\xc8"
+ "\xce\xcf\x3b\xeb\x0a\x55\xbd\x4e"
+ "\x59\x95\xc9\x32\x5b\x79\x7a\x86"
+ "\x03\x74\x4b\x10\x87\xb3\x60\xf6"
+ "\x21\xa4\xa6\xa8\x9a\xc9\x3a\x6f"
+ "\xd8\x13\xc9\x18\xd4\x38\x2b\xc2"
+ "\xa5\x7e\x6a\x09\x0f\x06\xdf\x53"
+ "\x9a\x44\xd9\x69\x2d\x39\x61\xb7"
+ "\x1c\x36\x7f\x9e\xc6\x44\x9f\x42"
+ "\x18\x0b\x99\xe6\x27\xa3\x1e\xa6"
+ "\xd0\xb9\x9a\x2b\x6f\x60\x75\xbd"
+ "\x52\x4a\x91\xd4\x7b\x8f\x95\x9f"
+ "\xdd\x74\xed\x8b\x20\x00\xdd\x08"
+ "\x6e\x5b\x61\x7b\x06\x6a\x19\x84"
+ "\x1c\xf9\x86\x65\xcd\x1c\x73\x3f"
+ "\x28\x5c\x8a\x93\x1a\xf3\xa3\x6c"
+ "\x6c\xa9\x7c\xea\x3c\xd4\x15\x45"
+ "\x7f\xbc\xe3\xbb\x42\xf0\x2e\x10"
+ "\xcd\x0c\x8b\x44\x1a\x82\x83\x0c"
+ "\x58\xb1\x24\x28\xa0\x11\x2f\x63"
+ "\xa5\x82\xc5\x9f\x86\x42\xf4\x4d"
+ "\x89\xdb\x76\x4a\xc3\x7f\xc4\xb8"
+ "\xdd\x0d\x14\xde\xd2\x62\x02\xcb"
+ "\x70\xb7\xee\xf4\x6a\x09\x12\x5e"
+ "\xd1\x26\x1a\x2c\x20\x71\x31\xef"
+ "\x7d\x65\x57\x65\x98\xff\x8b\x02"
+ "\x9a\xb5\xa4\xa1\xaf\x03\xc4\x50"
+ "\x33\xcf\x1b\x25\xfa\x7a\x79\xcc"
+ "\x55\xe3\x21\x63\x0c\x6d\xeb\x5b"
+ "\x1c\xad\x61\x0b\xbd\xb0\x48\xdb"
+ "\xb3\xc8\xa0\x87\x7f\x8b\xac\xfd"
+ "\xd2\x68\x9e\xb4\x11\x3c\x6f\xb1"
+ "\xfe\x25\x7d\x84\x5a\xae\xc9\x31"
+ "\xc3\xe5\x6a\x6f\xbc\xab\x41\xd9"
+ "\xde\xce\xf9\xfa\xd5\x7c\x47\xd2"
+ "\x66\x30\xc9\x97\xf2\x67\xdf\x59"
+ "\xef\x4e\x11\xbc\x4e\x70\xe3\x46"
+ "\x53\xbe\x16\x6d\x33\xfb\x57\x98"
+ "\x4e\x34\x79\x3b\xc7\x3b\xaf\x94"
+ "\xc1\x87\x4e\x47\x11\x1b\x22\x41"
+ "\x99\x12\x61\xe0\xe0\x8c\xa9\xbd"
+ "\x79\xb6\x06\x4d\x90\x3b\x0d\x30"
+ "\x1a\x00\xaa\x0e\xed\x7c\x16\x2f"
+ "\x0d\x1a\xfb\xf8\xad\x51\x4c\xab"
+ "\x98\x4c\x80\xb6\x92\x03\xcb\xa9"
+ "\x99\x9d\x16\xab\x43\x8c\x3f\x52"
+ "\x96\x53\x63\x7e\xbb\xd2\x76\xb7"
+ "\x6b\x77\xab\x52\x80\x33\xe3\xdf"
+ "\x4b\x3c\x23\x1a\x33\xe1\x43\x40"
+ "\x39\x1a\xe8\xbd\x3c\x6a\x77\x42"
+ "\x88\x9f\xc6\xaa\x65\x28\xf2\x1e"
+ "\xb0\x7c\x8e\x10\x41\x31\xe9\xd5"
+ "\x9d\xfd\x28\x7f\xfb\x61\xd3\x39"
+ "\x5f\x7e\xb4\xfb\x9c\x7d\x98\xb7"
+ "\x37\x2f\x18\xd9\x3b\x83\xaf\x4e"
+ "\xbb\xd5\x49\x69\x46\x93\x3a\x21"
+ "\x46\x1d\xad\x84\xb5\xe7\x8c\xff"
+ "\xbf\x81\x7e\x22\xf6\x88\x8c\x82"
+ "\xf5\xde\xfe\x18\xc9\xfb\x58\x07"
+ "\xe4\x68\xff\x9c\xf4\xe0\x24\x20"
+ "\x90\x92\x01\x49\xc2\x38\xe1\x7c"
+ "\xac\x61\x0b\x96\x36\xa4\x77\xe9"
+ "\x29\xd4\x97\xae\x15\x13\x7c\x6c"
+ "\x2d\xf1\xc5\x83\x97\x02\xa8\x2e"
+ "\x0b\x0f\xaf\xb5\x42\x18\x8a\x8c"
+ "\xb8\x28\x85\x28\x1b\x2a\x12\xa5"
+ "\x4b\x0a\xaf\xd2\x72\x37\x66\x23"
+ "\x28\xe6\x71\xa0\x77\x85\x7c\xff"
+ "\xf3\x8d\x2f\x0c\x33\x30\xcd\x7f"
+ "\x61\x64\x23\xb2\xe9\x79\x05\xb8"
+ "\x61\x47\xb1\x2b\xda\xf7\x9a\x24"
+ "\x94\xf6\xcf\x07\x78\xa2\x80\xaa"
+ "\x6e\xe9\x58\x97\x19\x0c\x58\x73"
+ "\xaf\xee\x2d\x6e\x26\x67\x18\x8a"
+ "\xc6\x6d\xf6\xbc\x65\xa9\xcb\xe7"
+ "\x53\xf1\x61\x97\x63\x52\x38\x86"
+ "\x0e\xdd\x33\xa5\x30\xe9\x9f\x32"
+ "\x43\x64\xbc\x2d\xdc\x28\x43\xd8"
+ "\x6c\xcd\x00\x2c\x87\x9a\x33\x79"
+ "\xbd\x63\x6d\x4d\xf9\x8a\x91\x83"
+ "\x9a\xdb\xf7\x9a\x11\xe1\xd1\x93"
+ "\x4a\x54\x0d\x51\x38\x30\x84\x0b"
+ "\xc5\x29\x8d\x92\x18\x6c\x28\xfe"
+ "\x1b\x07\x57\xec\x94\x74\x0b\x2c"
+ "\x21\x01\xf6\x23\xf9\xb0\xa0\xaf"
+ "\xb1\x3e\x2e\xa8\x0d\xbc\x2a\x68"
+ "\x59\xde\x0b\x2d\xde\x74\x42\xa1"
+ "\xb4\xce\xaf\xd8\x42\xeb\x59\xbd"
+ "\x61\xcc\x27\x28\xc6\xf2\xde\x3e"
+ "\x68\x64\x13\xd3\xc3\xc0\x31\xe0"
+ "\x5d\xf9\xb4\xa1\x09\x20\x46\x8b"
+ "\x48\xb9\x27\x62\x00\x12\xc5\x03"
+ "\x28\xfd\x55\x27\x1c\x31\xfc\xdb"
+ "\xc1\xcb\x7e\x67\x91\x2e\x50\x0c"
+ "\x61\xf8\x9f\x31\x26\x5a\x3d\x2e"
+ "\xa0\xc7\xef\x2a\xb6\x24\x48\xc9"
+ "\xbb\x63\x99\xf4\x7c\x4e\xc5\x94"
+ "\x99\xd5\xff\x34\x93\x8f\x31\x45"
+ "\xae\x5e\x7b\xfd\xf4\x81\x84\x65"
+ "\x5b\x41\x70\x0b\xe5\xaa\xec\x95"
+ "\x6b\x3d\xe3\xdc\x12\x78\xf8\x28"
+ "\x26\xec\x3a\x64\xc4\xab\x74\x97"
+ "\x3d\xcf\x21\x7d\xcf\x59\xd3\x15"
+ "\x47\x94\xe4\xd9\x48\x4c\x02\x49"
+ "\x68\x50\x22\x16\x96\x2f\xc4\x23"
+ "\x80\x47\x27\xd1\xee\x10\x3b\xa7"
+ "\x19\xae\xe1\x40\x5f\x3a\xde\x5d"
+ "\x97\x1c\x59\xce\xe1\xe7\x32\xa7"
+ "\x20\x89\xef\x44\x22\x38\x3c\x14"
+ "\x99\x3f\x1b\xd6\x37\xfe\x93\xbf"
+ "\x34\x13\x86\xd7\x9b\xe5\x2a\x37"
+ "\x72\x16\xa4\xdf\x7f\xe4\xa4\x66"
+ "\x9d\xf2\x0b\x29\xa1\xe2\x9d\x36"
+ "\xe1\x9d\x56\x95\x73\xe1\x91\x58"
+ "\x0f\x64\xf8\x90\xbb\x0c\x48\x0f"
+ "\xf5\x52\xae\xd9\xeb\x95\xb7\xdd"
+ "\xae\x0b\x20\x55\x87\x3d\xf0\x69"
+ "\x3c\x0a\x54\x61\xea\x00\xbd\xba"
+ "\x5f\x7e\x25\x8c\x3e\x61\xee\xb2"
+ "\x1a\xc8\x0e\x0b\xa5\x18\x49\xf2"
+ "\x6e\x1d\x3f\x83\xc3\xf1\x1a\xcb"
+ "\x9f\xc9\x82\x4e\x7b\x26\xfd\x68"
+ "\x28\x25\x8d\x22\x17\xab\xf8\x4e"
+ "\x1a\xa9\x81\x48\xb0\x9f\x52\x75"
+ "\xe4\xef\xdd\xbd\x5b\xbe\xab\x3c"
+ "\x43\x76\x23\x62\xce\xb8\xc2\x5b"
+ "\xc6\x31\xe6\x81\xb4\x42\xb2\xfd"
+ "\xf3\x74\xdd\x02\x3c\xa0\xd7\x97"
+ "\xb0\xe7\xe9\xe0\xce\xef\xe9\x1c"
+ "\x09\xa2\x6d\xd3\xc4\x60\xd6\xd6"
+ "\x9e\x54\x31\x45\x76\xc9\x14\xd4"
+ "\x95\x17\xe9\xbe\x69\x92\x71\xcb"
+ "\xde\x7c\xf1\xbd\x2b\xef\x8d\xaf"
+ "\x51\xe8\x28\xec\x48\x7f\xf8\xfa"
+ "\x9f\x9f\x5e\x52\x61\xc3\xfc\x9a"
+ "\x7e\xeb\xe3\x30\xb6\xfe\xc4\x4a"
+ "\x87\x1a\xff\x54\x64\xc7\xaa\xa2"
+ "\xfa\xb7\xb2\xe7\x25\xce\x95\xb4"
+ "\x15\x93\xbd\x24\xb6\xbc\xe4\x62"
+ "\x93\x7f\x44\x40\x72\xcb\xfb\xb2"
+ "\xbf\xe8\x03\xa5\x87\x12\x27\xfd"
+ "\xc6\x21\x8a\x8f\xc2\x48\x48\xb9"
+ "\x6b\xb6\xf0\xf0\x0e\x0a\x0e\xa4"
+ "\x40\xa9\xd8\x23\x24\xd0\x7f\xe2"
+ "\xf9\xed\x76\xf0\x91\xa5\x83\x3c"
+ "\x55\xe1\x92\xb8\xb6\x32\x9e\x63"
+ "\x60\x81\x75\x29\x9e\xce\x2a\x70"
+ "\x28\x0c\x87\xe5\x46\x73\x76\x66"
+ "\xbc\x4b\x6c\x37\xc7\xd0\x1a\xa0"
+ "\x9d\xcf\x04\xd3\x8c\x42\xae\x9d"
+ "\x35\x5a\xf1\x40\x4c\x4e\x81\xaa"
+ "\xfe\xd5\x83\x4f\x29\x19\xf3\x6c"
+ "\x9e\xd0\x53\xe5\x05\x8f\x14\xfb"
+ "\x68\xec\x0a\x3a\x85\xcd\x3e\xb4"
+ "\x4a\xc2\x5b\x92\x2e\x0b\x58\x64"
+ "\xde\xca\x64\x86\x53\xdb\x7f\x4e"
+ "\x54\xc6\x5e\xaa\xe5\x82\x3b\x98"
+ "\x5b\x01\xa7\x1f\x7b\x3d\xcc\x19"
+ "\xf1\x11\x02\x64\x09\x25\x7c\x26"
+ "\xee\xad\x50\x68\x31\x26\x16\x0f"
+ "\xb6\x7b\x6f\xa2\x17\x1a\xba\xbe"
+ "\xc3\x60\xdc\xd2\x44\xe0\xb4\xc4"
+ "\xfe\xff\x69\xdb\x60\xa6\xaf\x39"
+ "\x0a\xbd\x6e\x41\xd1\x9f\x87\x71"
+ "\xcc\x43\xa8\x47\x10\xbc\x2b\x7d"
+ "\x40\x12\x43\x31\xb8\x12\xe0\x95"
+ "\x6f\x9d\xf8\x75\x51\x3d\x61\xbe"
+ "\xa0\xd1\x0b\x8d\x50\xc7\xb8\xe7"
+ "\xab\x03\xda\x41\xab\xc5\x4e\x33"
+ "\x5a\x63\x94\x90\x22\x72\x54\x26"
+ "\x93\x65\x99\x45\x55\xd3\x55\x56"
+ "\xc5\x39\xe4\xb4\xb1\xea\xd8\xf9"
+ "\xb5\x31\xf7\xeb\x80\x1a\x9e\x8d"
+ "\xd2\x40\x01\xea\x33\xb9\xf2\x7a"
+ "\x43\x41\x72\x0c\xbf\x20\xab\xf7"
+ "\xfa\x65\xec\x3e\x35\x57\x1e\xef"
+ "\x2a\x81\xfa\x10\xb2\xdb\x8e\xfa"
+ "\x7f\xe7\xaf\x73\xfc\xbb\x57\xa2"
+ "\xaf\x6f\x41\x11\x30\xd8\xaf\x94"
+ "\x53\x8d\x4c\x23\xa5\x20\x63\xcf"
+ "\x0d\x00\xe0\x94\x5e\x92\xaa\xb5"
+ "\xe0\x4e\x96\x3c\xf4\x26\x2f\xf0"
+ "\x3f\xd7\xed\x75\x2c\x63\xdf\xc8"
+ "\xfb\x20\xb5\xae\x44\x83\xc0\xab"
+ "\x05\xf9\xbb\xa7\x62\x7d\x21\x5b"
+ "\x04\x80\x93\x84\x5f\x1d\x9e\xcd"
+ "\xa2\x07\x7e\x22\x2f\x55\x94\x23"
+ "\x74\x35\xa3\x0f\x03\xbe\x07\x62"
+ "\xe9\x16\x69\x7e\xae\x38\x0e\x9b"
+ "\xad\x6e\x83\x90\x21\x10\xb8\x07"
+ "\xdc\xc1\x44\x20\xa5\x88\x00\xdc"
+ "\xe1\x82\x16\xf1\x0c\xdc\xed\x8c"
+ "\x32\xb5\x49\xab\x11\x41\xd5\xd2"
+ "\x35\x2c\x70\x73\xce\xeb\xe3\xd6"
+ "\xe4\x7d\x2c\xe8\x8c\xec\x8a\x92"
+ "\x50\x87\x51\xbd\x2d\x9d\xf2\xf0"
+ "\x3c\x7d\xb1\x87\xf5\x01\xb0\xed"
+ "\x02\x5a\x20\x4d\x43\x08\x71\x49"
+ "\x77\x72\x9b\xe6\xef\x30\xc9\xa2"
+ "\x66\x66\xb8\x68\x9d\xdf\xc6\x16"
+ "\xa5\x78\xee\x3c\x47\xa6\x7a\x31"
+ "\x07\x6d\xce\x7b\x86\xf8\xb2\x31"
+ "\xa8\xa4\x77\x3c\x63\x36\xe8\xd3"
+ "\x7d\x40\x56\xd8\x48\x56\x9e\x3e"
+ "\x56\xf6\x3d\xd2\x12\x6e\x35\x29"
+ "\xd4\x7a\xdb\xff\x97\x4c\xeb\x3c"
+ "\x28\x2a\xeb\xe9\x43\x40\x61\x06"
+ "\xb8\xa8\x6d\x18\xc8\xbc\xc7\x23"
+ "\x53\x2b\x8b\xcc\xce\x88\xdf\xf8"
+ "\xff\xf8\x94\xe4\x5c\xee\xcf\x39"
+ "\xe0\xf6\x1a\xae\xf2\xd5\x41\x6a"
+ "\x09\x5a\x50\x66\xc4\xf4\x66\xdc"
+ "\x6a\x69\xee\xc8\x47\xe6\x87\x52"
+ "\x9e\x28\xe4\x39\x02\x0d\xc4\x7e"
+ "\x18\xe6\xc6\x09\x07\x03\x30\xb9"
+ "\xd1\xb0\x48\xe6\x80\xe8\x8c\xe6"
+ "\xc7\x2c\x33\xca\x64\xe5\xc0\x6e"
+ "\xac\x14\x4b\xe1\xf6\xeb\xce\xe4"
+ "\xc1\x8c\xea\x5b\x8d\x3c\x86\x91"
+ "\xd1\xd7\x16\x9c\x09\x9c\x6a\x51"
+ "\xe5\xcd\xe3\xb0\x33\x1f\x03\xcd"
+ "\xe5\xd8\x40\x9b\xdc\x29\xbe\xfa"
+ "\x24\xcc\xf1\x55\x68\x3a\x89\x0d"
+ "\x08\x48\xfd\x9b\x47\x41\x10\xae"
+ "\x53\x3a\x83\x87\xd4\x89\xe7\x38"
+ "\x47\xee\xd7\xbe\xe2\x58\x37\xd2"
+ "\xfc\x21\x1d\x20\xa5\x2d\x69\x0c"
+ "\x36\x5b\x2f\xcd\xa1\xa6\xe4\xa1"
+ "\x00\x4d\xf7\xc8\x2d\xc7\x16\x6c"
+ "\x6d\xad\x32\x8c\x8f\x74\xf9\xfa"
+ "\x78\x1c\x9a\x0f\x6e\x93\x9c\x20"
+ "\x43\xb9\xe4\xda\xc4\xc7\x90\x47"
+ "\x86\x68\xb7\x6f\x82\x59\x4a\x30"
+ "\xf1\xfd\x31\x0f\xa1\xea\x9b\x6b"
+ "\x18\x5c\x39\xb0\xc7\x80\x64\xff"
+ "\x6d\x5b\xb4\x8b\xba\x90\xea\x4e"
+ "\x9a\x04\xd2\x68\x18\x50\xb5\x91"
+ "\x45\x4f\x58\x5a\xe5\xc6\x7c\xab"
+ "\x61\x3e\x3d\xec\x18\x87\xfc\xea"
+ "\x26\x35\x4c\x99\x8a\x3f\x00\x7b"
+ "\xf5\x89\x62\xda\xdd\xf1\x43\xef"
+ "\x2c\x1d\x92\xfa\x9a\xd0\x37\x03"
+ "\x69\x9c\xd8\x1f\x41\x44\xb7\x73"
+ "\x54\x14\x91\x12\x41\x41\x54\xa2"
+ "\x91\x55\xb6\xf7\x23\x41\xc9\xc2"
+ "\x5b\x53\xf2\x61\x63\x0d\xa9\x87"
+ "\x1a\xbb\x11\x1f\x3c\xbb\xa8\x1f"
+ "\xe2\x66\x56\x88\x06\x3c\xd2\x0f"
+ "\x3b\xc4\xd6\x8c\xbe\x54\x9f\xa8"
+ "\x9c\x89\xfb\x88\x05\xef\xcd\xe7"
+ "\xc1\xc4\x21\x36\x22\x8d\x9a\x5d"
+ "\x1b\x1e\x4a\xc0\x89\xdd\x76\x16"
+ "\x5a\xce\xcd\x1e\x6a\x1f\xa0\x2b"
+ "\x83\xf6\x5e\x28\x8e\x65\xb5\x86"
+ "\x72\x8f\xc5\xf2\x54\x81\x10\x8d"
+ "\x63\x7b\x42\x7d\x06\x08\x16\xb3"
+ "\xb0\x60\x65\x41\x49\xdb\x0d\xc1"
+ "\xe2\xef\x72\x72\x06\xe7\x60\x5c"
+ "\x95\x1c\x7d\x52\xec\x82\xee\xd3"
+ "\x5b\xab\x61\xa4\x1f\x61\x64\x0c"
+ "\x28\x32\x21\x7a\x81\xe7\x81\xf3"
+ "\xdb\xc0\x18\xd9\xae\x0b\x3c\x9a"
+ "\x58\xec\x70\x4f\x40\x25\x2b\xba"
+ "\x96\x59\xac\x34\x45\x29\xc6\x57"
+ "\xc1\xc3\x93\x60\x77\x92\xbb\x83"
+ "\x8a\xa7\x72\x45\x2a\xc9\x35\xe7"
+ "\x66\xd6\xa9\xe9\x43\x87\x20\x11"
+ "\x6a\x2f\x87\xac\xe0\x93\x82\xe5"
+ "\x6c\x57\xa9\x4c\x9e\x56\x57\x33"
+ "\x1c\xd8\x7e\x25\x27\x41\x89\x97"
+ "\xea\xa5\x56\x02\x5b\x93\x13\x46"
+ "\xdc\x53\x3d\x95\xef\xaf\x9f\xf0"
+ "\x0a\x8a\xfe\x0c\xbf\xf0\x25\x5f"
+ "\xb4\x9f\x1b\x72\x9c\x37\xba\x46"
+ "\x4e\xcc\xcc\x02\x5c\xec\x3f\x98"
+ "\xff\x56\x1a\xc2\x7a\x65\x8f\xf6"
+ "\xd2\x81\x37\x7a\x0a\xfc\x79\xb9"
+ "\xcb\x8c\xc8\x1a\xd0\xba\x5d\x55"
+ "\xbc\x6d\x2e\xb2\x2f\x75\x29\x3f"
+ "\x1a\x4b\xa8\xd7\xe8\xf6\xf4\x2a"
+ "\xa5\xa1\x68\xec\xf3\xd5\xdd\x0f"
+ "\xad\x57\xae\x98\x83\xd5\x92\x4e"
+ "\x76\x86\x8e\x5e\x4b\x87\x7b\xf7"
+ "\x2d\x79\x3f\x12\x6a\x24\x58\xc8"
+ "\xab\x9a\x65\x75\x82\x6f\xa5\x39"
+ "\x72\xb0\xdf\x93\xb5\xa2\xf3\xdd"
+ "\x1f\x32\xfa\xdb\xfe\x1b\xbf\x0a"
+ "\xd9\x95\xdd\x02\xf1\x23\x54\xb1"
+ "\xa5\xbb\x24\x04\x5c\x2a\x97\x92"
+ "\xe6\xe0\x10\x61\xe3\x46\xc7\x0c"
+ "\xcb\xbc\x51\x9a\x35\x16\xd9\x42"
+ "\x62\xb3\x5e\xa4\x3c\x84\xa0\x7f"
+ "\xb8\x7f\x70\xd1\x8b\x03\xdf\x27"
+ "\x32\x06\x3f\x12\x23\x19\x22\x82"
+ "\x2d\x37\xa5\x00\x31\x9b\xa9\x21"
+ "\x8e\x34\x8c\x8e\x4f\xe8\xd4\x63"
+ "\x6c\xb2\xa9\x6e\xf6\x7c\x96\xf1"
+ "\x0e\x64\xab\x14\x3d\x8f\x74\xb3"
+ "\x35\x79\x84\x78\x06\x68\x97\x30"
+ "\xe0\x22\x55\xd6\xc5\x5b\x38\xb2"
+ "\x75\x24\x0c\x52\xb6\x57\xcc\x0a"
+ "\xbd\x3c\xd0\x73\x47\xd1\x25\xd6"
+ "\x1c\xfd\x27\x05\x3f\x70\xe1\xa7"
+ "\x69\x3b\xee\xc9\x9f\xfd\x2a\x7e"
+ "\xab\x58\xe6\x0b\x35\x5e\x52\xf9"
+ "\xff\xac\x5b\x82\x88\xa7\x65\xbc"
+ "\x61\x29\xdc\xa1\x94\x42\xd1\xd3"
+ "\xa0\xd8\xba\x3b\x49\xc8\xa7\xce"
+ "\x01\x6c\xb7\x3f\xe3\x98\x4d\xd1"
+ "\x9f\x46\x0d\xb3\xf2\x43\x33\x49"
+ "\xb7\x27\xbd\xba\xcc\x3f\x09\x56"
+ "\xfa\x64\x18\xb8\x17\x28\xde\x0d"
+ "\x29\xfa\x1f\xad\x60\x3b\x90\xa7"
+ "\x05\x9f\x4c\xc4\xdc\x05\x3b\x17"
+ "\x58\xea\x99\xfd\x6b\x8a\x93\x77"
+ "\xa5\x44\xbd\x8d\x29\x44\x29\x89"
+ "\x52\x1d\x89\x8b\x44\x8f\xb9\x68"
+ "\xeb\x93\xfd\x92\xd9\x14\x35\x9c"
+ "\x28\x3a\x9f\x1d\xd8\xe0\x2a\x76"
+ "\x51\xc1\xf0\xa9\x1d\xb4\xf8\xb9"
+ "\xfc\x14\x78\x5a\xa2\xb1\xdb\x94"
+ "\xcb\x18\xb9\x34\xbd\x0c\x65\x1d"
+ "\x64\xde\xd0\x3a\xe4\x68\x0e\xbc"
+ "\x13\xa7\x47\x89\x62\xa3\x03\x19"
+ "\x64\xa1\x02\x27\x3a\x8d\x43\xfa"
+ "\x68\xff\xda\x8b\x40\xe9\x19\x8b"
+ "\x56\xbe\x1c\x9b\xe6\xf6\x3f\x60"
+ "\xdb\x7a\xd5\xab\x82\xd8\xd9\x99"
+ "\xe3\x5b\x0c\x0c\x69\x18\x5c\xed"
+ "\x03\xf9\xc1\x61\xc4\x7b\xd4\x90"
+ "\x43\xc3\x39\xec\xac\xcb\x1f\x4b"
+ "\x23\xf8\xa9\x98\x2f\xf6\x48\x90"
+ "\x6c\x2b\x94\xad\x14\xdd\xcc\xa2"
+ "\x3d\xc7\x86\x0f\x7f\x1c\x0b\x93"
+ "\x4b\x74\x1f\x80\x75\xb4\x91\xdf"
+ "\xa8\x26\xf9\x06\x2b\x3a\x2c\xfd"
+ "\x3c\x31\x40\x1e\x5b\xa6\x86\x01"
+ "\xc4\xa2\x80\x4f\xf5\xa2\xf4\xff"
+ "\xf6\x07\x8c\x92\xf7\x74\xbd\x42"
+ "\xb0\x3f\x6b\x05\xca\x40\xeb\x04"
+ "\x20\xa9\x37\x78\x32\x03\x60\xcc"
+ "\xf3\xec\xb2\x2d\xb5\x80\x7c\xe4"
+ "\x37\x53\x25\xd1\xe8\x91\x6a\xe5"
+ "\xdf\xdd\xb0\xab\x69\xc7\xa1\xb2"
+ "\xfc\xb3\xd1\x9e\xda\xa8\x0d\x68"
+ "\xfe\x7d\xdc\x56\x33\x65\x99\xd2"
+ "\xec\xa5\xa0\xa1\x26\xc9\xec\xbd"
+ "\x22\x20\x5e\x0d\xcb\x93\x64\x7a"
+ "\x56\x75\xed\xe5\x45\xa2\xbd\x16"
+ "\x59\xf7\x43\xd9\x5b\x2c\xdd\xb6"
+ "\x1d\xa8\x05\x89\x2f\x65\x2e\x66"
+ "\xfe\xad\x93\xeb\x85\x8f\xe8\x4c"
+ "\x00\x44\x71\x03\x0e\x26\xaf\xfd"
+ "\xfa\x56\x0f\xdc\x9c\xf3\x2e\xab"
+ "\x88\x26\x61\xc6\x13\xfe\xba\xc1"
+ "\xd8\x8a\x38\xc3\xb6\x4e\x6d\x80"
+ "\x4c\x65\x93\x2f\xf5\x54\xff\x63"
+ "\xbe\xdf\x9a\xe3\x4f\xca\xc9\x71"
+ "\x12\xab\x95\x66\xec\x09\x64\xea"
+ "\xdc\x9f\x01\x61\x24\x88\xd1\xa7"
+ "\xd0\x69\x26\xf0\x80\xb0\xec\x86"
+ "\xc2\x58\x2f\x6a\xc5\xfd\xfc\x2a"
+ "\xf6\x3e\x23\x77\x3b\x7e\xc5\xc5"
+ "\xe7\xf9\x4d\xcc\x68\x53\x11\xc8"
+ "\x5b\x44\xbd\x48\x0f\xb3\x35\x1a"
+ "\x93\x4a\x80\x16\xa3\x0d\x50\x85"
+ "\xa6\xc4\xd4\x74\x4d\x87\x59\x51"
+ "\xd7\xf7\x7d\xee\xd0\x9b\xd1\x83"
+ "\x25\x2b\xc6\x39\x27\x6a\xb3\x41"
+ "\x5f\xd2\x24\xd4\xd6\xfa\x8c\x3e"
+ "\xb2\xf9\x11\x71\x7a\x9e\x5e\x7b"
+ "\x5b\x9a\x47\x80\xca\x1c\xbe\x04"
+ "\x5d\x34\xc4\xa2\x2d\x41\xfe\x73"
+ "\x53\x15\x9f\xdb\xe7\x7d\x82\x19"
+ "\x21\x1b\x67\x2a\x74\x7a\x21\x4a"
+ "\xc4\x96\x6f\x00\x92\x69\xf1\x99"
+ "\x50\xf1\x4a\x16\x11\xf1\x16\x51",
+ .ctext = "\x2c\xf5\x4c\xc9\x99\x19\x83\x84"
+ "\x09\xbc\xe6\xad\xbe\xb6\x6b\x1b"
+ "\x75\x0b\x3d\x33\x10\xb4\x8b\xf7"
+ "\xa7\xc7\xba\x9f\x6e\xd7\xc7\xfd"
+ "\x58\xef\x24\xf4\xdc\x26\x3f\x35"
+ "\x02\x98\xf2\x8c\x96\xca\xfc\xca"
+ "\xca\xfa\x27\xe6\x23\x1f\xf0\xc7"
+ "\xe3\x46\xbf\xca\x7b\x4e\x24\xcd"
+ "\xd0\x13\x3f\x80\xd6\x5b\x0b\xdc"
+ "\xad\xc6\x49\x77\xd7\x58\xf5\xfd"
+ "\x58\xba\x72\x0d\x9e\x0b\x63\xc3"
+ "\x86\xac\x06\x97\x70\x42\xec\x3a"
+ "\x0d\x53\x27\x17\xbd\x3e\xcb\xe0"
+ "\xaa\x19\xb4\xfe\x5d\x1b\xcb\xd7"
+ "\x99\xc3\x19\x45\x6f\xdf\x64\x44"
+ "\x9f\xf8\x55\x1b\x72\x8d\x78\x51"
+ "\x3c\x83\x48\x8f\xaf\x05\x60\x7d"
+ "\x22\xce\x07\x53\xfd\x91\xcf\xfa"
+ "\x5f\x86\x66\x3e\x72\x67\x7f\xc1"
+ "\x49\x82\xc7\x1c\x91\x1e\x48\xcd"
+ "\x5e\xc6\x5f\xd9\xc9\x43\x88\x35"
+ "\x80\xba\x91\xe1\x54\x4b\x14\xbe"
+ "\xbd\x75\x48\xb8\xde\x22\x64\xb5"
+ "\x8c\xcb\x5e\x92\x99\x8f\x4a\xab"
+ "\x00\x6c\xb4\x2e\x03\x3b\x0e\xee"
+ "\x4d\x39\x05\xbc\x94\x80\xbb\xb2"
+ "\x36\x16\xa3\xd9\x8f\x61\xd7\x67"
+ "\xb5\x90\x46\x85\xe1\x4e\x71\x84"
+ "\xd0\x84\xc0\xc0\x8f\xad\xdb\xeb"
+ "\x44\xf4\x66\x35\x3f\x92\xa2\x05"
+ "\xa4\x9c\xb8\xdc\x77\x6c\x85\x34"
+ "\xd2\x6a\xea\x32\xb8\x08\xf6\x13"
+ "\x78\x1e\x29\xef\x12\x54\x16\x28"
+ "\x25\xf8\x32\x0e\x4f\x94\xe6\xb3"
+ "\x0b\x97\x79\x97\xb3\xb0\x37\x61"
+ "\xa4\x10\x6f\x15\x9c\x7d\x22\x41"
+ "\xe2\xd7\xa7\xa0\xfc\xc5\x62\x55"
+ "\xed\x68\x39\x7b\x09\xd2\x17\xaa"
+ "\xf2\xb8\xc9\x1d\xa2\x23\xfd\xaa"
+ "\x9c\x57\x16\x0d\xe3\x63\x3c\x2b"
+ "\x13\xdd\xa2\xf0\x8e\xd3\x02\x81"
+ "\x09\xba\x80\x02\xdb\x97\xfe\x0f"
+ "\x77\x8d\x18\xf1\xf4\x59\x27\x79"
+ "\xa3\x46\x88\xda\x51\x67\xd0\xe9"
+ "\x5d\x22\x98\xc1\xe4\xea\x08\xda"
+ "\xf7\xb9\x16\x71\x36\xbd\x43\x8a"
+ "\x4b\x6e\xf3\xaa\xb0\xba\x1a\xbc"
+ "\xaa\xca\xde\x5c\xc0\xa5\x11\x6d"
+ "\x8a\x8f\xcc\x04\xfc\x6c\x89\x75"
+ "\x4b\x2c\x29\x6f\x41\xc7\x6e\xda"
+ "\xea\xa6\xaf\xb0\xb1\x46\x9e\x30"
+ "\x5e\x11\x46\x07\x3b\xd6\xaa\x36"
+ "\xa4\x01\x84\x1d\xb9\x8e\x58\x9d"
+ "\xa9\xb6\x1c\x56\x5c\x5a\xde\xfa"
+ "\x66\x96\xe6\x29\x26\xd4\x68\xd0"
+ "\x1a\xcb\x98\xbb\xce\x19\xbb\x87"
+ "\x00\x6c\x59\x17\xe3\xd1\xe6\x5c"
+ "\xd0\x98\xe1\x91\xc4\x28\xaf\xbf"
+ "\xbb\xdf\x75\x4e\xd9\x9d\x99\x0f"
+ "\xc6\x0c\x03\x24\x3e\xb6\xd7\x3f"
+ "\xd5\x43\x4a\x47\x26\xab\xf6\x3f"
+ "\x7f\xf1\x15\x0c\xde\x68\xa0\x5f"
+ "\x63\xf9\xe2\x5e\x5d\x42\xf1\x36"
+ "\x38\x90\x06\x18\x84\xf2\xfa\x81"
+ "\x36\x33\x29\x18\xaa\x8c\x49\x0e"
+ "\xda\x27\x38\x9c\x12\x8b\x83\xfa"
+ "\x40\xd0\xb6\x0a\x72\x85\xf0\xc7"
+ "\xaa\x5f\x30\x1a\x6f\x45\xe4\x35"
+ "\x4c\xf3\x4c\xe4\x1c\xd7\x48\x77"
+ "\xdd\x3e\xe4\x73\x44\xb1\xb8\x1c"
+ "\x42\x40\x90\x61\xb1\x6d\x8b\x20"
+ "\x2d\x30\x63\x01\x26\x71\xbc\x5a"
+ "\x76\xce\xc1\xfb\x13\xf9\x4c\x6e"
+ "\x7a\x16\x8a\x53\xcb\x07\xaa\xa1"
+ "\xba\xd0\x68\x7a\x2d\x25\x48\x85"
+ "\xb7\x6b\x0a\x05\xf2\xdf\x0e\x46"
+ "\x4e\xc8\xcd\x59\x5b\x9a\x2e\x9e"
+ "\xdb\x4a\xf6\xfd\x7b\xa4\x5c\x4d"
+ "\x78\x8d\xe7\xb0\x84\x3f\xf0\xc1"
+ "\x47\x39\xbf\x1e\x8c\xc2\x11\x0d"
+ "\x90\xd1\x17\x42\xb3\x50\xeb\xaa"
+ "\xcd\xc0\x98\x36\x84\xd0\xfe\x75"
+ "\xf8\x8f\xdc\xa0\xa1\x53\xe5\x8c"
+ "\xf2\x0f\x4a\x31\x48\xae\x3d\xaf"
+ "\x19\x4b\x75\x2e\xc1\xe3\xcd\x4d"
+ "\x2c\xa4\x54\x7b\x4d\x5e\x93\xa2"
+ "\xe7\x1f\x34\x19\x9f\xb2\xbf\x22"
+ "\x65\x1a\x03\x48\x12\x66\x50\x3e"
+ "\x0e\x5d\x60\x29\x44\x69\x90\xee"
+ "\x9d\x8b\x55\x78\xdf\x63\x31\xc3"
+ "\x1b\x21\x7d\x06\x21\x86\x60\xb0"
+ "\x9d\xdb\x3d\xcc\xe2\x20\xf4\x88"
+ "\x20\x62\x2e\xe8\xa9\xea\x42\x41"
+ "\xb0\xab\x73\x61\x40\x39\xac\x11"
+ "\x55\x27\x51\x5f\x11\xef\xb1\x23"
+ "\xff\x81\x99\x86\x0c\x6f\x16\xaf"
+ "\xf6\x89\x86\xd8\xf6\x41\xc2\x80"
+ "\x21\xf4\xd5\x6d\xef\xa3\x0c\x4d"
+ "\x59\xfd\xdc\x93\x1a\x4f\xe6\x22"
+ "\x83\x40\x0c\x98\x67\xba\x7c\x93"
+ "\x0b\xa9\x89\xfc\x3e\xff\x84\x12"
+ "\x3e\x27\xa3\x8a\x48\x17\xd6\x08"
+ "\x85\x2f\xf1\xa8\x90\x90\x71\xbe"
+ "\x44\xd6\x34\xbf\x74\x52\x0a\x17"
+ "\x39\x64\x78\x1a\xbc\x81\xbe\xc8"
+ "\xea\x7f\x0b\x5a\x2c\x77\xff\xac"
+ "\xdd\x37\x35\x78\x09\x28\x29\x4a"
+ "\xd1\xd6\x6c\xc3\xd5\x70\xdd\xfc"
+ "\x21\xcd\xce\xeb\x51\x11\xf7\xbc"
+ "\x12\x43\x1e\x6c\xa1\xa3\x79\xe6"
+ "\x1d\x63\x52\xff\xf0\xbb\xcf\xec"
+ "\x56\x58\x63\xe2\x21\x0b\x2d\x5c"
+ "\x64\x09\xf3\xee\x05\x42\x34\x93"
+ "\x38\xa8\x60\xea\x1d\x95\x90\x65"
+ "\xad\x2f\xda\x1d\xdd\x21\x1a\xf1"
+ "\x94\xe0\x6a\x81\xa1\xd3\x63\x31"
+ "\x45\x73\xce\x54\x4e\xb1\x75\x26"
+ "\x59\x18\xc2\x31\x73\xe6\xf5\x7d"
+ "\x06\x5b\x65\x67\xe5\x69\x90\xdf"
+ "\x27\x6a\xbf\x81\x7d\x92\xbe\xd1"
+ "\x4e\x0b\xa8\x18\x94\x72\xe1\xd0"
+ "\xb6\x2a\x16\x08\x7a\x34\xb8\xf2"
+ "\xe1\xac\x08\x66\xe6\x78\x66\xfd"
+ "\x36\xbd\xee\xc6\x71\xa4\x09\x4e"
+ "\x3b\x09\xf2\x8e\x3a\x90\xba\xa0"
+ "\xc2\x1d\x9f\xad\x52\x0e\xc9\x10"
+ "\x99\x40\x90\xd5\x7d\x73\x56\xef"
+ "\x48\x1e\x56\x5c\x7d\x3c\xcb\x84"
+ "\x10\x0a\xcc\xda\xce\xad\xd8\xa8"
+ "\x79\xc7\x29\x95\x31\x3b\xd9\x9b"
+ "\xb6\x84\x3e\x03\x74\xc5\x76\xba"
+ "\x4b\xd9\x4f\x7c\xc4\x5f\x7f\x70"
+ "\xc5\xe3\x6e\xd0\x14\x32\xec\x60"
+ "\xb0\x69\x78\xb7\xef\xda\x5a\xe7"
+ "\x4e\x50\x97\xd4\x94\x58\x67\x57"
+ "\x4e\x7c\x75\xe0\xcf\x8d\xe1\x78"
+ "\x97\x52\xc8\x73\x81\xf9\xb6\x02"
+ "\x54\x72\x6d\xc0\x70\xff\xe2\xeb"
+ "\x6c\xe1\x30\x0a\x94\xd0\x55\xec"
+ "\xed\x61\x9c\x6d\xd9\xa0\x92\x62"
+ "\x4e\xfd\xd8\x79\x27\x02\x4e\x13"
+ "\xb2\x04\xba\x00\x9a\x77\xed\xc3"
+ "\x5b\xa4\x22\x02\xa9\xed\xaf\xac"
+ "\x4f\xe1\x74\x73\x51\x36\x78\x8b"
+ "\xdb\xf5\x32\xfd\x0d\xb9\xcb\x15"
+ "\x4c\xae\x43\x72\xeb\xbe\xc0\xf8"
+ "\x91\x67\xf1\x4f\x5a\xd4\xa4\x69"
+ "\x8f\x3e\x16\xd2\x09\x31\x72\x5a"
+ "\x5e\x0a\xc4\xbc\x44\xd4\xbb\x82"
+ "\x7a\xdf\x52\x25\x8c\x45\xdc\xe4"
+ "\xe0\x71\x84\xe4\xe0\x3d\x59\x30"
+ "\x5b\x94\x12\x33\x78\x85\x90\x84"
+ "\x52\x05\x33\xa7\xa7\x16\xe0\x4d"
+ "\x6a\xf7\xfa\x03\x98\x6c\x4f\xb0"
+ "\x06\x66\x06\xa1\xdd\x3c\xbe\xbb"
+ "\xb2\x62\xab\x64\xd3\xbf\x2c\x30"
+ "\x0e\xfc\xd9\x95\x32\x32\xf3\x3b"
+ "\x39\x7e\xda\x62\x62\x0f\xc3\xfe"
+ "\x55\x76\x09\xf5\x8a\x09\x91\x93"
+ "\x32\xea\xbc\x2b\x0b\xcf\x1d\x65"
+ "\x48\x33\xba\xeb\x0f\xd4\xf9\x3b"
+ "\x1e\x90\x74\x6d\x93\x52\x61\x81"
+ "\xa3\xf2\xb5\xea\x1d\x61\x86\x68"
+ "\x00\x40\xcc\x58\xdd\xf2\x64\x01"
+ "\xab\xfd\x94\xc0\xa3\x83\x83\x33"
+ "\xa4\xb0\xb8\xd3\x9d\x08\x3c\x7f"
+ "\x8e\xa8\xaf\x87\xa5\xe7\xcd\x36"
+ "\x92\x96\xdc\xa1\xf2\xea\xe6\xd1"
+ "\x1e\xe9\x65\xa4\xff\xda\x17\x96"
+ "\xad\x91\x4a\xc5\x26\xb4\x1d\x1c"
+ "\x2b\x50\x48\x26\xc8\x86\x3f\x05"
+ "\xb8\x87\x1b\x3f\xee\x2e\x55\x61"
+ "\x0d\xdc\xcf\x56\x0e\xe2\xcc\xda"
+ "\x87\xee\xc5\xcd\x0e\xf4\xa4\xaf"
+ "\x8a\x02\xee\x16\x0b\xc4\xdd\x6d"
+ "\x80\x3e\xf3\xfe\x95\xb4\xfe\x97"
+ "\x0d\xe2\xab\xbb\x27\x84\xee\x25"
+ "\x39\x74\xb0\xfb\xdc\x5a\x0f\x65"
+ "\x31\x2a\x89\x08\xa4\x8c\x9f\x25"
+ "\x5f\x93\x83\x39\xda\xb4\x22\x17"
+ "\xbd\xd2\x0d\xfc\xde\xf8\x00\x34"
+ "\xc2\x48\x55\x06\x4c\x8b\x79\xe5"
+ "\xba\x0c\x50\x4f\x98\xa3\x59\x3d"
+ "\xc4\xec\xd1\x85\xf3\x60\x41\x16"
+ "\x0a\xe2\xf4\x38\x33\x24\xc1\xe0"
+ "\x0d\x86\x1f\x5a\xd2\xba\x7c\x5f"
+ "\x97\x60\x54\xa3\x52\x31\x78\x57"
+ "\x7a\xc0\xc7\x1e\xd4\x11\x8f\xef"
+ "\x86\x0a\x60\x26\x4a\x8f\x06\xf7"
+ "\x1f\x47\x45\x6e\x87\x13\x15\xf3"
+ "\x91\x08\xbf\x2a\x6e\x71\x21\x8e"
+ "\x92\x90\xde\x01\x97\x81\x46\x87"
+ "\x8a\xfc\xab\x12\x0c\x60\x3e\x9d"
+ "\xbd\x40\x0a\x45\x3f\x5b\x83\x04"
+ "\xb5\x8f\x42\x78\x68\xfe\x3a\xd1"
+ "\x59\xf7\x12\xaa\x86\x86\x1c\x77"
+ "\xfc\xc6\x64\x47\x0f\x7e\xd3\xbc"
+ "\x95\x90\x23\xb3\x60\xdc\x0d\xf4"
+ "\x67\xe6\x32\xee\xad\xbf\x60\x07"
+ "\xbd\xdb\x6e\x3f\x55\x88\xdb\x93"
+ "\x62\x41\xd6\xeb\x34\xd6\xa3\x96"
+ "\xd2\xbc\x29\xaa\x75\x65\x41\x9f"
+ "\x70\x43\xbb\x6d\xd9\xa5\x95\x22"
+ "\x3e\xf9\x07\xa0\x7d\x75\xba\xb8"
+ "\xcd\x81\x3b\x94\x01\x19\xc3\x67"
+ "\x9d\xa4\x7f\xa0\x99\xcc\x4a\xc4"
+ "\xfa\x76\x3f\xab\x5c\xea\x26\xdf"
+ "\xa2\x4c\x5b\x11\x55\xa3\x6a\x70"
+ "\xcb\xbc\x93\x11\x48\x38\x73\x7a"
+ "\x40\xbf\xbc\x04\x05\xb0\x2d\x9b"
+ "\x9a\x23\x57\xa5\xf6\x63\xfa\xc7"
+ "\xd8\x4d\xc2\xc0\xf8\xbd\xfb\x7d"
+ "\xea\x20\xa2\xe0\x4d\xaa\x63\x1e"
+ "\x9a\xa2\xed\x54\xe6\x49\xaf\x52"
+ "\xaf\x7e\x94\x57\x19\x07\x06\x74"
+ "\x57\x5b\x62\x61\x99\x20\xe7\x95"
+ "\x14\x19\xcf\x42\x83\x6a\x94\xf5"
+ "\xab\xa7\xf2\x48\xf6\x0b\x40\x3d"
+ "\x93\x8d\x3d\x14\x5d\xf2\x45\x2c"
+ "\xac\x1c\x0b\x12\xc9\x56\x3f\x7c"
+ "\x17\xeb\x1d\xed\x7e\x5c\xaa\x37"
+ "\xe3\xb4\x56\xf9\x0e\xb9\x8e\xc8"
+ "\x16\x70\x3e\xff\x95\xb9\x89\x9c"
+ "\x19\x0d\x0d\x48\xbd\xb9\xe3\x73"
+ "\xdf\x4e\x67\x9d\x93\x6c\x0b\x75"
+ "\x8a\x2d\x89\x5c\x32\x9d\x75\x05"
+ "\xd9\x13\xbe\x14\x5f\xf0\xb7\xb4"
+ "\xd9\x2c\x02\x22\x41\xf2\x9c\x1f"
+ "\xc1\x8c\xf5\x6a\x8c\xd5\xa5\x6b"
+ "\x54\x47\xec\x3a\x76\x08\xf6\xf7"
+ "\xed\x7c\x7e\x3b\x55\xb8\xa9\x20"
+ "\xa6\xec\x2d\x8c\x03\x38\x9d\x74"
+ "\xe9\x36\xe7\x05\x40\xec\xf4\xa1"
+ "\xa7\x70\xa7\x6f\x1f\x93\xc2\x1d"
+ "\x2c\x4e\x5f\xe8\x04\x6d\x91\x67"
+ "\x23\xd9\x47\xb4\xf6\xbc\x35\x25"
+ "\x1b\xa8\xe1\x17\xa8\x21\x38\xd8"
+ "\x7a\x55\xd9\xc6\x6f\x0a\x1b\xcb"
+ "\xde\xf8\x1e\x20\x8c\xa1\x14\x49"
+ "\x49\x00\x00\x31\x0f\xa8\x24\x67"
+ "\x97\x7a\x1f\x04\xb9\x6b\x60\xd0"
+ "\x32\xc3\xf4\xf9\x4f\xb2\xfd\x7b"
+ "\xf9\xb3\x43\xd8\x23\xaa\x21\x37"
+ "\x9e\x91\xc5\xa4\xce\xd8\xe4\xf5"
+ "\x55\x3e\xc9\xe4\xc5\x51\xd3\x4d"
+ "\xc6\x83\xe9\x23\x8e\x3e\x21\xe0"
+ "\x40\x23\x4e\x2b\x2d\x89\xc4\x5d"
+ "\x58\xdc\x43\x03\x8e\x9a\xfb\xef"
+ "\x76\xac\x78\x57\xc3\xb8\xf7\x9f"
+ "\xf5\xb1\xc2\xa4\x0c\xee\x58\x52"
+ "\x45\xdf\x1a\xd9\x0e\xe0\x56\x1f"
+ "\x23\x79\x99\x5f\x34\xad\x9f\x41"
+ "\x67\x2a\xc7\x8b\xe7\x82\x6e\x67"
+ "\x58\xb5\xae\x18\xd7\x2f\x8f\x57"
+ "\x0e\xa4\x21\x3c\x84\x21\x05\x50"
+ "\x57\xb0\xd1\xb1\xc8\x9d\xd4\x44"
+ "\x25\x40\x6b\xd5\x6f\x18\x92\x89"
+ "\x6d\x5b\xe9\x5a\x3c\x74\xc0\x33"
+ "\x2c\x7a\xa7\x99\x71\x4e\x9d\x1b"
+ "\xe1\x1d\xcb\x62\x8b\x3c\x07\x07"
+ "\x67\xf6\xa6\x54\x10\x72\x3f\xea"
+ "\xe5\xcd\xe6\xf1\xeb\x3d\x43\x0b"
+ "\xfe\x4b\xc7\x1d\x3d\xd9\xa3\xe2"
+ "\x9b\x79\x47\xc7\xab\x28\xcc\x4d"
+ "\xa8\x77\x9c\xec\xef\x56\xf8\x92"
+ "\x07\x48\x1b\x21\x04\xa8\x24\xb0"
+ "\x82\x7d\xd1\x17\xa4\xaf\x5f\xfa"
+ "\x92\xbf\x6a\xb7\x7e\xc7\xb7\x75"
+ "\x40\x3c\x14\x09\x57\xae\xe0\x4e"
+ "\xf8\xc9\xda\x1e\x5d\x27\xc4\x8c"
+ "\x27\xe3\x4d\xe3\x55\x8c\xd2\xef"
+ "\x0c\xab\x67\x53\x96\xd3\x48\xfb"
+ "\x75\x4f\x74\x9e\xcb\x82\xa4\x96"
+ "\x91\x41\x48\xaa\x65\xdb\x34\x72"
+ "\xc9\xee\xa2\x77\x8b\x6e\x44\x12"
+ "\x4e\x51\x51\xc3\xf5\xef\x6a\x50"
+ "\x99\x26\x41\x1e\x66\xa4\x2b\xb9"
+ "\x21\x15\x38\xc2\x0b\x7f\x37\xb6"
+ "\x89\x8b\x27\x70\xae\xa1\x90\x28"
+ "\x04\xe7\xd5\x17\xcb\x60\x99\xb4"
+ "\xe2\xd7\x04\xd3\x11\x27\x86\xe4"
+ "\xd0\x0d\x36\x04\x68\xe0\xb4\x71"
+ "\xe8\x86\x4b\x9f\xa3\xd2\xda\x87"
+ "\xc2\x2c\xad\x66\xfa\x53\x18\xf8"
+ "\xec\x10\x74\xc5\xb6\x53\x09\x93"
+ "\x21\x09\xbd\x77\x2d\x2a\x12\x4c"
+ "\x86\xfe\x50\x8e\xd1\x16\xab\xb1"
+ "\xfd\xd7\x87\xde\xc3\x6f\x7c\x16"
+ "\xe2\x88\x3d\x41\xac\x36\x7e\xf8"
+ "\xc2\x3b\x46\xd5\x44\x3d\x9d\xe8"
+ "\xe9\x0c\xb7\xb3\xc6\xb9\xe5\xe7"
+ "\x27\x17\x78\x03\xd4\xda\xe4\x73"
+ "\x38\x34\xe7\x53\x29\xc4\xcb\x93"
+ "\xc9\xa1\x10\x8a\xb2\xfc\x0b\x07"
+ "\x47\xb8\xb1\x13\x49\x86\x24\x8b"
+ "\x10\xb1\xd9\x5f\xbb\xd8\x90\x37"
+ "\x06\x03\xe0\x76\xff\x19\x1a\x16"
+ "\xd8\x2d\xa7\x4a\xea\x22\x64\xbe"
+ "\xed\x1c\xc8\x33\xb4\xf4\xb1\x48"
+ "\x95\xb5\x2f\xaa\x05\xc7\x03\xa0"
+ "\xf1\xa4\xf3\x63\x4b\xbe\x79\xb9"
+ "\x4b\x67\x7e\x4e\x3e\x81\x8f\xef"
+ "\xe9\x55\x99\x30\xd0\x26\xec\x5d"
+ "\x89\xb6\x3f\x28\x38\x81\x7a\x00"
+ "\x89\x85\xb8\xff\x19\x0f\x8f\x5d"
+ "\x5c\x6d\x6a\x3d\x6c\xb9\xfb\x7c"
+ "\x0c\x4b\x7e\xbc\x0c\xc4\xad\xbb"
+ "\x0a\x8b\xc8\x48\xb7\xfa\x4d\x53"
+ "\x82\x10\xd6\x29\x58\x83\x50\x3c"
+ "\xd4\x5a\xfd\x14\xa3\xb5\x88\xfb"
+ "\x23\xee\xc9\xcc\xab\x92\x52\xb3"
+ "\x0b\x07\xf3\x1e\x9a\x2a\x2e\x35"
+ "\x32\x37\xa5\x86\xd0\xe5\x5f\xdd"
+ "\x3d\x67\x70\xb4\x9a\xc9\x93\xdc"
+ "\x31\x33\xe3\x3a\xc5\xcf\xd9\x44"
+ "\x2f\x3f\x87\xb2\x0c\x36\x55\x17"
+ "\xa9\xda\xb1\xca\x00\x09\x87\xe6"
+ "\x66\x34\xb3\x9f\x52\x37\x98\x10"
+ "\x2e\x5d\xa4\x14\x7f\x63\xa6\xcd"
+ "\x6c\x2d\x7c\x74\x4c\xae\x9c\x65"
+ "\xe0\x79\xc0\xd6\xc3\xfe\xa8\xf4"
+ "\x1a\x4f\xf5\xbc\xea\x7a\x92\x40"
+ "\x51\xa7\x05\x45\x40\xd8\x9c\x3c"
+ "\xde\x5f\x0b\x6e\x10\x5c\x1c\xdc"
+ "\xd2\x65\x60\xbb\x70\x68\x5c\xa9"
+ "\x59\x25\x0e\x4e\x93\xb8\x49\x89"
+ "\xf6\xae\xeb\x1f\x8b\x56\xc8\x56"
+ "\xb0\xb5\xc9\xee\xa5\x15\x07\x4d"
+ "\x8a\xcc\xad\x04\x4d\x99\x8c\x49"
+ "\x8d\x7c\xe0\xa5\x7d\x7f\x33\x61"
+ "\xf2\xfc\xe7\x88\x3f\x2b\x73\xab"
+ "\x2e\x38\x17\x48\xa9\x86\xdd\x81"
+ "\x21\x45\xbc\x98\x1d\xe5\xa5\xbc"
+ "\x0d\x0b\x18\x8e\x86\x1e\x76\x0a"
+ "\x30\x12\x21\xf0\x51\xed\xc1\xcd"
+ "\x9a\xf1\x7e\x7e\x64\xb2\xa3\xd6"
+ "\x37\xe7\xc6\xde\x97\xb9\x5d\x05"
+ "\xf5\x50\xe2\x0a\xaa\x68\x16\xa6"
+ "\x26\x9c\x7d\xff\x4c\x05\xce\x48"
+ "\xa7\xff\x10\x19\x5e\xef\x46\x54"
+ "\xec\xe4\x7b\xb6\x12\x23\xae\x93"
+ "\x4f\x79\xf8\x3c\x1c\x07\x15\x66"
+ "\x07\xc1\x52\xde\x7f\xda\x51\x7b"
+ "\xfe\x13\x67\xab\x8d\x56\xdc\xc1"
+ "\x70\x4b\x13\xd2\x30\x00\xc1\x97"
+ "\x22\xa7\x83\xf8\x18\xd9\x6d\x40"
+ "\x54\xe0\xc1\xdb\x3e\x83\x73\x12"
+ "\xe1\x48\x49\xb9\xd4\x20\x0c\x06"
+ "\x1c\x82\xb5\xbe\x5a\xae\x60\x5e"
+ "\xe2\x09\xba\x05\xbb\x9a\x80\x63"
+ "\xf2\xc4\x4b\x41\x39\x16\x76\x26"
+ "\xb1\x03\x06\x23\x65\x37\x33\x92"
+ "\xca\xf9\x72\xf5\xcd\x95\xc1\xc0"
+ "\x91\x5a\xfd\x28\xb9\x62\x59\x84"
+ "\x87\x9d\x82\xcb\xe0\x67\x7c\x26"
+ "\xb8\x00\x16\xd9\x5d\xb4\x74\xd4"
+ "\x75\x8c\x75\xf8\x87\x3b\xa8\x77"
+ "\xcd\x82\x3d\x7b\xb9\x63\x44\x0f"
+ "\x44\x83\x55\x5b\xc7\xdc\x18\x0b"
+ "\x8c\x36\xb3\x59\xeb\x58\x13\x38"
+ "\x4b\x8a\xb7\xa3\x9a\xa2\xf3\xeb"
+ "\xc6\x30\x84\x86\x0a\xcf\x8b\xfa"
+ "\x36\x66\x26\xbc\xd0\x96\xa3\xb4"
+ "\x8d\x6b\xf7\x5b\x75\x59\xbb\xd3"
+ "\x14\x78\x57\x2f\x27\xa8\x95\xcf"
+ "\xa2\xa5\x76\x28\xbd\xab\x8b\x59"
+ "\x04\x91\x8a\xc5\x3c\xc3\xa7\xcf"
+ "\xe0\xfb\xdd\x7a\xbb\x10\xde\x36"
+ "\x43\x1c\x59\xf7\x41\xb6\xa5\x80"
+ "\x72\x7b\xe3\x7a\xa3\x01\xc3\x8c"
+ "\x7e\xf3\xf2\x42\x1a\x0c\x7e\xf3"
+ "\xfc\x5b\x6e\x1f\x20\xf1\x32\x76"
+ "\x83\x71\x36\x3e\x7e\xa7\xf7\xdd"
+ "\x25\x2e\xe6\x04\xe2\x5b\x44\xb5"
+ "\x16\xfb\xdf\x9b\x46\x2a\xa8\x81"
+ "\x89\x15\x3e\xb5\xb0\x09\x40\x33"
+ "\x60\xc7\x37\x63\x14\x09\xc1\x6e"
+ "\x56\x52\xbe\xe4\x88\xe0\x75\xbc"
+ "\x49\x62\x8c\xf1\xdf\x62\xe6\xac"
+ "\xd5\x87\xf7\xc9\x92\x52\x36\x59"
+ "\x22\x6f\x31\x99\x76\xdb\x41\xb6"
+ "\x26\x91\x79\x7e\xd2\x78\xaf\x07"
+ "\x78\x4b\xed\x54\x30\xb2\xff\xbc"
+ "\x2c\x0a\x1a\xbe\xbf\xd5\x5a\x4d"
+ "\xd1\xbc\x30\xc2\xf4\xf1\xc1\x9e"
+ "\x9a\x96\x89\x00\x50\xfc\xf6\xaf"
+ "\xfa\x60\xbf\x1a\x32\x8f\x57\x36"
+ "\x2f\x02\xb7\x28\x50\xc3\xd3\xfd"
+ "\x6b\xc4\xe6\xbb\xc9\xec\xed\x86"
+ "\xdf\x27\x45\x2c\x0c\x6d\x65\x3b"
+ "\x6e\x63\x96\xc7\xd6\xb5\xb2\x05"
+ "\x8b\xe0\x02\x2a\xfa\x20\x0c\x82"
+ "\xa5\x45\x75\x12\x01\x40\xff\x3e"
+ "\xfd\xfc\xfb\xbc\x30\x49\xe8\x99"
+ "\x8d\x48\x8e\x49\x65\x2a\xe3\xa5"
+ "\x06\xe3\x22\x68\x3b\xd9\xa4\xcf"
+ "\x84\x6f\xfa\x2b\xb1\xd8\x8c\x30"
+ "\xd5\x5d\x0c\x63\x32\x59\x28\x6e"
+ "\x2a\x60\xa4\x57\x12\xf8\xc2\x95"
+ "\x0a\xf6\xc6\x48\x23\xce\x72\x40"
+ "\x0d\x75\xa0\xd4\x48\x03\xf5\xc4"
+ "\xcd\x26\xe7\x83\xcc\x0d\xcf\x7f"
+ "\x22\x5f\x91\xb3\x42\x02\x9a\x26"
+ "\x12\x26\x68\x12\x25\x0b\x08\x61"
+ "\xcb\x25\x86\x95\xfc\x57\x4d\xb6"
+ "\x36\x6c\xb4\xdc\xa9\x2d\x76\x7f"
+ "\x25\x06\xa2\x08\x69\x09\xd9\x09"
+ "\x3c\x40\xe1\xfd\x30\x8f\xc2\x13"
+ "\x92\xd4\xb5\x3b\x0c\xb2\x32\x4f"
+ "\x10\xc9\x1a\x41\xa6\xb2\x11\xf6"
+ "\x3b\x1b\x88\x56\xbf\x61\x3c\xb2"
+ "\xe6\xdb\x24\x9a\x55\x7e\x35\xf8"
+ "\x82\x5e\x52\xe3\xf2\xb3\x40\x1c"
+ "\xdd\xe3\x29\x37\xe0\x85\x08\x8b"
+ "\xb2\x8b\x09\x38\xac\xa9\x85\xe5"
+ "\x9e\x36\xb8\x95\x0b\x84\x9d\x10"
+ "\xcc\xae\xe2\x06\x56\x3c\x85\xce"
+ "\xc0\xdc\x36\x59\x17\xf9\x48\xf4"
+ "\x5b\x08\x8e\x86\x00\xa0\xf5\xdd"
+ "\x0c\xb6\x63\xfd\x5a\xe5\x1e\xa6"
+ "\x0a\xef\x76\xc2\xc7\x9b\x96\x2f"
+ "\x66\x2b\x7d\x50\xa6\x0c\x42\xc6"
+ "\xa5\x05\x05\x10\xeb\xd8\xda\x15"
+ "\x03\xbe\x2f\x24\x34\x8f\x84\xd8"
+ "\x58\xb8\xa3\xf2\x63\xc8\xc3\xf6"
+ "\xc2\xde\x27\x58\x69\xf9\x07\xca"
+ "\x12\x3e\xe2\xf4\xc8\x29\x60\x30"
+ "\x2f\x87\xf4\x50\xc2\x25\xcc\xfd"
+ "\xdc\x76\x4f\x56\x1c\xb2\xd9\x78"
+ "\x11\x6b\x6e\xb4\x67\xbf\x25\xc4"
+ "\xae\x7d\x50\x7f\xb2\x5c\x69\x26"
+ "\xed\x6b\xd2\x3b\x42\x64\xe3\x0c"
+ "\x15\xa6\xd1\xb6\x3e\x23\x76\x09"
+ "\x48\xd2\x08\x41\x76\xc9\x7d\x5f"
+ "\x50\x5d\x8e\xf9\x04\x96\xed\x3a"
+ "\xf8\x7c\x3b\x7d\x84\xba\xea\xe6"
+ "\x24\xd2\x0f\x7f\x5a\x0b\x6f\xd9"
+ "\x33\x14\x67\xfb\x9f\xe7\x44\x4e"
+ "\x3b\x4b\x06\xaa\xb4\x7a\x8b\x83"
+ "\x82\x74\xa6\x5e\x10\xea\xd6\x4b"
+ "\x56\x32\xd7\x79\x7c\x05\xf4\x64"
+ "\x9c\x64\x25\x9c\xc2\xda\x21\x9a"
+ "\xd8\xde\x37\x83\x3f\xd8\x83\xa2"
+ "\x1e\x3c\x1e\x41\x7e\xf2\x97\x84"
+ "\xe5\xa2\x02\x2b\x6e\xc5\xd7\x91"
+ "\x24\x66\xc1\xf0\x05\x1c\x0f\x3d"
+ "\xcf\x63\x94\x10\x2e\x0e\x89\xda"
+ "\x0d\xe9\x58\x2a\x48\x0c\xc8\x36"
+ "\xc4\x7b\xf0\xd3\xe2\x5b\xf1\xf6"
+ "\xad\x3d\xe7\x25\x6b\x83\x08\x5c"
+ "\xd9\x79\xde\x93\x37\x93\x92\x46"
+ "\xe7\xf4\x1c\x9e\x94\x91\x30\xd9"
+ "\xb6\x57\xf1\x04\xb5\x2f\xe3\xb9"
+ "\x0a\x78\xfe\xcb\xb5\x31\xc1\xc6"
+ "\x99\xb3\xaf\x73\xfb\x69\xcb\x49"
+ "\xd2\xec\xea\xd3\x0f\x45\x13\x23"
+ "\xc8\xae\x92\x29\xce\x71\xd0\xba"
+ "\xcf\xfd\xb2\x14\x61\xfd\xf6\x7b"
+ "\xdf\x05\xe5\xbb\x58\xf7\x41\x3b"
+ "\x6e\xd2\x14\x28\x7c\x15\xb7\x70"
+ "\xca\xc7\x7a\xd7\x4e\x4b\x35\x6e"
+ "\x9e\x09\x24\x33\xaf\xca\x41\x1f"
+ "\x0d\xe3\xf1\x7c\x35\xcb\xe2\x0a"
+ "\xb2\xeb\x94\x7a\xbc\x53\xd7\xe1"
+ "\x5e\xbc\xa1\x55\xef\x3c\x37\xef"
+ "\x6d\xfe\x3a\xcd\xcf\x48\x36\x26"
+ "\xdb\x3e\x44\xdd\xc8\x03\xa6\xa6"
+ "\x85\xb5\xfe\xf3\xec\x44\xb3\x22"
+ "\x9d\x21\x82\xc6\x0b\x1a\x7c\xc6"
+ "\xf7\xa9\x8e\x7e\x13\x1a\x85\x1f"
+ "\x93\x81\x38\x47\xc0\x83\x21\xa3"
+ "\xde\xec\xc0\x8f\x4c\x3b\x57\x2f"
+ "\x92\xbb\x66\xe3\x24\xeb\xae\x1e"
+ "\xb3\x18\x57\xf2\xf3\x4a\x50\x52"
+ "\xe9\x91\x08\x1f\x85\x44\xc1\x07"
+ "\xa1\xd3\x62\xe9\xe0\x82\x38\xfd"
+ "\x27\x3f\x7e\x10\x7d\xaf\xa1\x7a"
+ "\xf0\xaa\x79\xee\x6e\xa2\xc0\xbb"
+ "\x01\xda\xfb\xc4\x85\x26\x85\x31"
+ "\x15\xf4\x3c\xe0\x96\x79\x0e\xd7"
+ "\x50\x68\x37\x57\xb5\x31\xf7\x3c"
+ "\xbd\xaa\xcc\x2c\x8f\x57\x59\xa5"
+ "\xd4\x4b\xc6\x45\xc0\x32\x3d\x85"
+ "\x6d\xee\xf4\x6b\x63\xf9\x3a\xfb"
+ "\x2f\xdb\xb8\x42\x19\x8e\x88\x1f"
+ "\xfd\x7d\x0b\x69\x14\x8f\x36\xb2"
+ "\xd9\x27\x34\x53\x9c\x52\x00\x94"
+ "\xcc\x8b\x37\x82\xaf\x8e\xb3\xc0"
+ "\x8a\xcf\x44\xc6\x3a\x19\xbe\x1f"
+ "\x23\x33\x68\xc4\xb6\xbb\x13\x20"
+ "\xec\x6a\x87\x5b\xc2\x7c\xd3\x04"
+ "\x34\x97\x32\xd5\x11\x02\x06\x45"
+ "\x98\x0b\xaa\xab\xbe\xfb\xd0\x2c"
+ "\x0e\xf1\x8b\x7f\x1c\x70\x85\x67"
+ "\x60\x50\x66\x79\xbb\x45\x21\xc4"
+ "\xb5\xd3\xb9\x4f\xe5\x41\x49\x86"
+ "\x6b\x20\xef\xac\x16\x74\xe9\x23"
+ "\xa5\x2d\x5c\x2b\x85\xb2\x33\xe8"
+ "\x2a\xd1\x24\xd1\x5b\x9b\x7f\xfc"
+ "\x2f\x3b\xf7\x6a\x8b\xde\x55\x7e"
+ "\xda\x13\x1b\xd6\x90\x74\xb0\xbe"
+ "\x46\x0d\xcf\xc7\x78\x33\x31\xdc"
+ "\x6a\x6a\x50\x3e\x4c\xe2\xab\x48"
+ "\xbc\x4e\x7d\x62\xb9\xfc\xdd\x85"
+ "\x1c\x5d\x93\x15\x5e\x01\xd9\x2b"
+ "\x48\x71\x82\xd6\x44\xd6\x0e\x92"
+ "\x6e\x75\xc9\x3c\x1d\x31\x18\x6f"
+ "\x8b\xd7\x18\xf3\x09\x08\x45\xb1"
+ "\x3e\xa4\x25\xc6\x34\x48\xaf\x42"
+ "\x77\x33\x03\x65\x3e\x2f\xff\x8f"
+ "\xe9\xe1\xa0\xfe\xb2\xc3\x80\x77"
+ "\x20\x05\xe4\x9b\x47\x3b\xb2\xbd",
+ .len = 4096,
}
};
@@ -34582,8 +32473,6 @@ static const struct hash_testvec crc32_tv_template[] = {
"\xe9\xea\xeb\xec\xed\xee\xef\xf0",
.psize = 240,
.digest = "\x6c\xc6\x56\xde",
- .np = 2,
- .tap = { 31, 209 }
}, {
.key = "\xff\xff\xff\xff",
.ksize = 4,
@@ -35023,8 +32912,6 @@ static const struct hash_testvec crc32c_tv_template[] = {
"\xe9\xea\xeb\xec\xed\xee\xef\xf0",
.psize = 240,
.digest = "\x75\xd3\xc5\x24",
- .np = 2,
- .tap = { 31, 209 }
}, {
.key = "\xff\xff\xff\xff",
.ksize = 4,
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 022d3dd76c3b..f8e1d9f9938f 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -25,8 +25,9 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#define TGR192_DIGEST_SIZE 24
#define TGR160_DIGEST_SIZE 20
@@ -468,10 +469,9 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
u64 a, b, c, aa, bb, cc;
u64 x[8];
int i;
- const __le64 *ptr = (const __le64 *)data;
for (i = 0; i < 8; i++)
- x[i] = le64_to_cpu(ptr[i]);
+ x[i] = get_unaligned_le64(data + i * sizeof(__le64));
/* save */
a = aa = tctx->a;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4f9f99057ff8..45f9decb9848 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -228,4 +228,6 @@ source "drivers/siox/Kconfig"
source "drivers/slimbus/Kconfig"
+source "drivers/interconnect/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e1ce029d28fd..bb15b9d0e793 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -186,3 +186,4 @@ obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
+obj-$(CONFIG_INTERCONNECT) += interconnect/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7b65a807b3dd..4e015c77e48e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -10,6 +10,7 @@ menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on ARCH_SUPPORTS_ACPI
select PNP
+ select NLS
default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
@@ -356,6 +357,16 @@ config ACPI_TABLE_UPGRADE
initrd, therefore it's safe to say Y.
See Documentation/acpi/initrd_table_override.txt for details
+config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
+ bool "Override ACPI tables from built-in initrd"
+ depends on ACPI_TABLE_UPGRADE
+ depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION=""
+ help
+ This option provides functionality to override arbitrary ACPI tables
+ from built-in uncompressed initrd.
+
+ See Documentation/acpi/initrd_table_override.txt for details
+
config ACPI_DEBUG
bool "Debug Statements"
help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7c6afc111d76..bb857421c2e8 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -41,7 +41,8 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
-acpi-y += acpi_lpss.o acpi_apd.o
+acpi-$(CONFIG_PCI) += acpi_lpss.o
+acpi-y += acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index a2dcd62ea32f..4a434c23a196 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -750,48 +750,36 @@ static const struct acpi_debugger_ops acpi_aml_debugger = {
int __init acpi_aml_init(void)
{
- int ret = 0;
-
- if (!acpi_debugfs_dir) {
- ret = -ENOENT;
- goto err_exit;
- }
+ int ret;
/* Initialize AML IO interface */
mutex_init(&acpi_aml_io.lock);
init_waitqueue_head(&acpi_aml_io.wait);
acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
+
acpi_aml_dentry = debugfs_create_file("acpidbg",
S_IFREG | S_IRUGO | S_IWUSR,
acpi_debugfs_dir, NULL,
&acpi_aml_operations);
- if (acpi_aml_dentry == NULL) {
- ret = -ENODEV;
- goto err_exit;
- }
- ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
- if (ret)
- goto err_fs;
- acpi_aml_initialized = true;
-err_fs:
+ ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
if (ret) {
debugfs_remove(acpi_aml_dentry);
acpi_aml_dentry = NULL;
+ return ret;
}
-err_exit:
- return ret;
+
+ acpi_aml_initialized = true;
+ return 0;
}
void __exit acpi_aml_exit(void)
{
if (acpi_aml_initialized) {
acpi_unregister_debugger(&acpi_aml_debugger);
- if (acpi_aml_dentry) {
- debugfs_remove(acpi_aml_dentry);
- acpi_aml_dentry = NULL;
- }
+ debugfs_remove(acpi_aml_dentry);
+ acpi_aml_dentry = NULL;
acpi_aml_initialized = false;
}
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index f0b52266b3ac..d73afb562ad9 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2124,21 +2124,29 @@ static int __init intel_opregion_present(void)
return opregion;
}
+/* Check if the chassis-type indicates there is no builtin LCD panel */
static bool dmi_is_desktop(void)
{
const char *chassis_type;
+ unsigned long type;
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
if (!chassis_type)
return false;
- if (!strcmp(chassis_type, "3") || /* 3: Desktop */
- !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
- !strcmp(chassis_type, "5") || /* 5: Pizza Box */
- !strcmp(chassis_type, "6") || /* 6: Mini Tower */
- !strcmp(chassis_type, "7") || /* 7: Tower */
- !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
+ if (kstrtoul(chassis_type, 10, &type) != 0)
+ return false;
+
+ switch (type) {
+ case 0x03: /* Desktop */
+ case 0x04: /* Low Profile Desktop */
+ case 0x05: /* Pizza Box */
+ case 0x06: /* Mini Tower */
+ case 0x07: /* Tower */
+ case 0x10: /* Lunch Box */
+ case 0x11: /* Main Server Chassis */
return true;
+ }
return false;
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 5a9c2febc0fb..863ade9add6d 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2018 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 8bc935977d8e..54f81eac7ec9 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index 4ebe18826646..d5478cd4a857 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 57d9495e5933..32f2e38c7570 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -16,7 +16,8 @@
#include "acdisasm.h"
#endif
-#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */
+#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */
+#define ACPI_DEBUG_LENGTH_FORMAT " (%.4X bits, %.3X bytes)"
struct acpi_db_command_info {
const char *name; /* Command Name */
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index e577f3a40e6a..82f81501566b 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index b412aa909907..831660179662 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 87d6eb01beaf..d056a1845613 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -164,6 +164,7 @@ ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
+ACPI_GLOBAL(u8, acpi_gbl_verbose_leak_dump);
#endif
/*****************************************************************************
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index ef99e2fc37f8..bcf8f7501db7 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index c5b2be0b6613..20706adbc148 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 99b0da899109..a2dfbf6b004e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -802,7 +802,7 @@ struct acpi_comment_addr_node {
/*
* File node - used for "Include" operator file stack and
- * depdendency tree for the -ca option
+ * dependency tree for the -ca option
*/
struct acpi_file_node {
void *file;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index de52cd6e868a..283614e82a20 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -462,7 +462,7 @@
#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
/*
- * Macors used for the ASL-/ASL+ converter utility
+ * Macros used for the ASL-/ASL+ converter utility
*/
#ifdef ACPI_ASL_COMPILER
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9bd25f36c608..39812fc4386a 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index ac992b6ebce9..b2ef703d7df8 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -239,6 +239,7 @@ struct acpi_object_region_field {
union acpi_operand_object *region_obj; /* Containing op_region object */
u8 *resource_buffer; /* resource_template for serial regions/fields */
u16 pin_number_index; /* Index relative to previous Connection/Template */
+ u8 *internal_pcc_buffer; /* Internal buffer for fields associated with PCC */
};
struct acpi_object_bank_field {
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 818eba413614..9d78134428e3 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index ab48196ae55e..6e32c97cba6c 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index d31bb04facb6..387163b962a7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -631,6 +631,21 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_MTL", METHOD_0ARGS, /* ACPI 6.0 */
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+ {{"_NBS", METHOD_0ARGS, /* ACPI 6.3 */
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_NCH", METHOD_0ARGS, /* ACPI 6.3 */
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_NIC", METHOD_0ARGS, /* ACPI 6.3 */
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_NIG", METHOD_1ARGS(ACPI_TYPE_BUFFER), /* ACPI 6.3 */
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+ {{"_NIH", METHOD_0ARGS, /* ACPI 6.3 */
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
{{"_NTT", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 59ae8b1a6e40..422cd8f2b92e 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 14be32961b4c..8a4e6b4aaf2c 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 12fac33ce77e..dfbf1dbd4033 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 3374d41582b5..9022537567e9 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 6c05355447c1..49e412edd7c6 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index cdb590176e9d..7c3bd4ab60fc 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 9fcb8ec64681..30ab62b0fec8 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -237,7 +237,7 @@ void acpi_db_decode_and_display_object(char *target, char *output_type)
default:
- /* Is not a recognizeable object */
+ /* Is not a recognizable object */
acpi_os_printf
("Not a known ACPI internal object, descriptor type %2.2X\n",
@@ -647,7 +647,7 @@ void acpi_db_display_object_type(char *object_arg)
*
* DESCRIPTION: Display the result of an AML opcode
*
- * Note: Curently only displays the result object if we are single stepping.
+ * Note: Currently only displays the result object if we are single stepping.
* However, this output may be useful in other contexts and could be enabled
* to do so if needed.
*
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 6abb6b834d97..bb43305cb215 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -160,12 +160,12 @@ acpi_db_execute_method(struct acpi_db_method_info *info,
}
ACPI_EXCEPTION((AE_INFO, status,
- "while executing %s from debugger",
+ "while executing %s from AML Debugger",
info->pathname));
if (status == AE_BUFFER_OVERFLOW) {
ACPI_ERROR((AE_INFO,
- "Possible overflow of internal debugger "
+ "Possible buffer overflow within AML Debugger "
"buffer (size 0x%X needed 0x%X)",
ACPI_DEBUG_BUFFER_SIZE,
(u32)return_obj->length));
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index b0b9a26c7db5..7809bd94a18d 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 992bd7b92540..004d34d9369b 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -904,7 +904,7 @@ acpi_db_bus_walk(acpi_handle obj_handle,
*
* RETURN: None
*
- * DESCRIPTION: Display info about system busses.
+ * DESCRIPTION: Display info about system buses.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index a1c76bf21122..d220168dca01 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -243,7 +243,7 @@ acpi_db_display_internal_object(union acpi_operand_object *obj_desc,
acpi_os_printf("[%s] ",
acpi_ut_get_reference_name(obj_desc));
- /* Decode the refererence */
+ /* Decode the reference */
switch (obj_desc->reference.class) {
case ACPI_REFCLASS_LOCAL:
diff --git a/drivers/acpi/acpica/dbtest.c b/drivers/acpi/acpica/dbtest.c
index 8a5462439a97..6db44a5ac786 100644
--- a/drivers/acpi/acpica/dbtest.c
+++ b/drivers/acpi/acpica/dbtest.c
@@ -10,6 +10,7 @@
#include "acdebug.h"
#include "acnamesp.h"
#include "acpredef.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME("dbtest")
@@ -33,6 +34,9 @@ acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length);
static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node);
static acpi_status
+acpi_db_test_field_unit_type(union acpi_operand_object *obj_desc);
+
+static acpi_status
acpi_db_read_from_object(struct acpi_namespace_node *node,
acpi_object_type expected_type,
union acpi_object **value);
@@ -74,7 +78,7 @@ static struct acpi_db_argument_info acpi_db_test_types[] = {
static acpi_handle read_handle = NULL;
static acpi_handle write_handle = NULL;
-/* ASL Definitions of the debugger read/write control methods */
+/* ASL Definitions of the debugger read/write control methods. AML below. */
#if 0
definition_block("ssdt.aml", "SSDT", 2, "Intel", "DEBUG", 0x00000001)
@@ -227,10 +231,8 @@ static void acpi_db_test_all_objects(void)
* RETURN: Status
*
* DESCRIPTION: Test one namespace object. Supported types are Integer,
- * String, Buffer, buffer_field, and field_unit. All other object
- * types are simply ignored.
- *
- * Note: Support for Packages is not implemented.
+ * String, Buffer, Package, buffer_field, and field_unit.
+ * All other object types are simply ignored.
*
******************************************************************************/
@@ -240,7 +242,6 @@ acpi_db_test_one_object(acpi_handle obj_handle,
{
struct acpi_namespace_node *node;
union acpi_operand_object *obj_desc;
- union acpi_operand_object *region_obj;
acpi_object_type local_type;
u32 bit_length = 0;
u32 byte_length = 0;
@@ -281,18 +282,21 @@ acpi_db_test_one_object(acpi_handle obj_handle,
break;
case ACPI_TYPE_FIELD_UNIT:
- case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
+ local_type = ACPI_TYPE_FIELD_UNIT;
+ break;
+
+ case ACPI_TYPE_BUFFER_FIELD:
+ /*
+ * The returned object will be a Buffer if the field length
+ * is larger than the size of an Integer (32 or 64 bits
+ * depending on the DSDT version).
+ */
local_type = ACPI_TYPE_INTEGER;
if (obj_desc) {
- /*
- * Returned object will be a Buffer if the field length
- * is larger than the size of an Integer (32 or 64 bits
- * depending on the DSDT version).
- */
bit_length = obj_desc->common_field.bit_length;
byte_length = ACPI_ROUND_BITS_UP_TO_BYTES(bit_length);
if (bit_length > acpi_gbl_integer_bit_width) {
@@ -303,7 +307,7 @@ acpi_db_test_one_object(acpi_handle obj_handle,
default:
- /* Ignore all other types */
+ /* Ignore all non-data types - Methods, Devices, Scopes, etc. */
return (AE_OK);
}
@@ -314,40 +318,10 @@ acpi_db_test_one_object(acpi_handle obj_handle,
acpi_ut_get_type_name(node->type), node->name.ascii);
if (!obj_desc) {
- acpi_os_printf(" Ignoring, no attached object\n");
+ acpi_os_printf(" No attached sub-object, ignoring\n");
return (AE_OK);
}
- /*
- * Check for unsupported region types. Note: acpi_exec simulates
- * access to system_memory, system_IO, PCI_Config, and EC.
- */
- switch (node->type) {
- case ACPI_TYPE_LOCAL_REGION_FIELD:
-
- region_obj = obj_desc->field.region_obj;
- switch (region_obj->region.space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- case ACPI_ADR_SPACE_SYSTEM_IO:
- case ACPI_ADR_SPACE_PCI_CONFIG:
-
- break;
-
- default:
-
- acpi_os_printf
- (" %s space is not supported in this command [%4.4s]\n",
- acpi_ut_get_region_name(region_obj->region.
- space_id),
- region_obj->region.node->name.ascii);
- return (AE_OK);
- }
- break;
-
- default:
- break;
- }
-
/* At this point, we have resolved the object to one of the major types */
switch (local_type) {
@@ -371,6 +345,11 @@ acpi_db_test_one_object(acpi_handle obj_handle,
status = acpi_db_test_package_type(node);
break;
+ case ACPI_TYPE_FIELD_UNIT:
+
+ status = acpi_db_test_field_unit_type(obj_desc);
+ break;
+
default:
acpi_os_printf(" Ignoring, type not implemented (%2.2X)",
@@ -382,24 +361,8 @@ acpi_db_test_one_object(acpi_handle obj_handle,
if (ACPI_FAILURE(status)) {
status = AE_OK;
- goto exit;
- }
-
- switch (node->type) {
- case ACPI_TYPE_LOCAL_REGION_FIELD:
-
- region_obj = obj_desc->field.region_obj;
- acpi_os_printf(" (%s)",
- acpi_ut_get_region_name(region_obj->region.
- space_id));
-
- break;
-
- default:
- break;
}
-exit:
acpi_os_printf("\n");
return (status);
}
@@ -444,7 +407,7 @@ acpi_db_test_integer_type(struct acpi_namespace_node *node, u32 bit_length)
return (status);
}
- acpi_os_printf(" (%4.4X/%3.3X) %8.8X%8.8X",
+ acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " %8.8X%8.8X",
bit_length, ACPI_ROUND_BITS_UP_TO_BYTES(bit_length),
ACPI_FORMAT_UINT64(temp1->integer.value));
@@ -558,8 +521,9 @@ acpi_db_test_buffer_type(struct acpi_namespace_node *node, u32 bit_length)
/* Emit a few bytes of the buffer */
- acpi_os_printf(" (%4.4X/%3.3X)", bit_length, temp1->buffer.length);
- for (i = 0; ((i < 4) && (i < byte_length)); i++) {
+ acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT, bit_length,
+ temp1->buffer.length);
+ for (i = 0; ((i < 8) && (i < byte_length)); i++) {
acpi_os_printf(" %2.2X", temp1->buffer.pointer[i]);
}
acpi_os_printf("... ");
@@ -665,8 +629,9 @@ acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length)
return (status);
}
- acpi_os_printf(" (%4.4X/%3.3X) \"%s\"", (temp1->string.length * 8),
- temp1->string.length, temp1->string.pointer);
+ acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " \"%s\"",
+ (temp1->string.length * 8), temp1->string.length,
+ temp1->string.pointer);
/* Write a new value */
@@ -750,13 +715,80 @@ static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node)
return (status);
}
- acpi_os_printf(" %8.8X Elements", temp1->package.count);
+ acpi_os_printf(" %.2X Elements", temp1->package.count);
acpi_os_free(temp1);
return (status);
}
/*******************************************************************************
*
+ * FUNCTION: acpi_db_test_field_unit_type
+ *
+ * PARAMETERS: obj_desc - A field unit object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Test read/write on a named field unit.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_db_test_field_unit_type(union acpi_operand_object *obj_desc)
+{
+ union acpi_operand_object *region_obj;
+ u32 bit_length = 0;
+ u32 byte_length = 0;
+ acpi_status status = AE_OK;
+ union acpi_operand_object *ret_buffer_desc;
+
+ /* Supported spaces are memory/io/pci_config */
+
+ region_obj = obj_desc->field.region_obj;
+ switch (region_obj->region.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+
+ /* Need the interpreter to execute */
+
+ acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
+ acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+
+ /* Exercise read-then-write */
+
+ status =
+ acpi_ex_read_data_from_field(NULL, obj_desc,
+ &ret_buffer_desc);
+ if (status == AE_OK) {
+ acpi_ex_write_data_to_field(ret_buffer_desc, obj_desc,
+ NULL);
+ acpi_ut_remove_reference(ret_buffer_desc);
+ }
+
+ acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+
+ bit_length = obj_desc->common_field.bit_length;
+ byte_length = ACPI_ROUND_BITS_UP_TO_BYTES(bit_length);
+
+ acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " [%s]", bit_length,
+ byte_length,
+ acpi_ut_get_region_name(region_obj->region.
+ space_id));
+ return (status);
+
+ default:
+
+ acpi_os_printf
+ (" %s address space is not supported in this command [%4.4s]",
+ acpi_ut_get_region_name(region_obj->region.space_id),
+ region_obj->region.node->name.ascii);
+ return (AE_OK);
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_read_from_object
*
* PARAMETERS: node - Parent NS node for the object
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 6b15625e8099..85b34d02233e 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 0da96268deb5..4847f89c678c 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 9d33f0bb2885..0d3e1ced1f57 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 30fe89545d6a..cf4e061bb0f0 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -518,6 +518,13 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+ if (info.region_node->object->region.space_id ==
+ ACPI_ADR_SPACE_PLATFORM_COMM
+ && !(region_node->object->field.internal_pcc_buffer =
+ ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
+ length))) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index e8de1b0ce2f5..a4a24ffe5fae 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index c1a4d02fafd5..f59b4d944f7f 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 6a9cc613adaa..179129a2deb1 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 78f9de260d5f..10f32b62608e 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -130,8 +130,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Must have a valid (>0) bit count */
if (bit_count == 0) {
- ACPI_ERROR((AE_INFO,
- "Attempt to CreateField of length zero"));
+ ACPI_BIOS_ERROR((AE_INFO,
+ "Attempt to CreateField of length zero"));
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
@@ -194,12 +194,13 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Entire field must fit within the current length of the buffer */
if ((bit_offset + bit_count) > (8 * (u32)buffer_desc->buffer.length)) {
- ACPI_ERROR((AE_INFO,
- "Field [%4.4s] at bit offset/length %u/%u "
- "exceeds size of target Buffer (%u bits)",
- acpi_ut_get_node_name(result_desc), bit_offset,
- bit_count, 8 * (u32)buffer_desc->buffer.length));
status = AE_AML_BUFFER_LIMIT;
+ ACPI_BIOS_EXCEPTION((AE_INFO, status,
+ "Field [%4.4s] at bit offset/length %u/%u "
+ "exceeds size of target Buffer (%u bits)",
+ acpi_ut_get_node_name(result_desc),
+ bit_offset, bit_count,
+ 8 * (u32)buffer_desc->buffer.length));
goto cleanup;
}
@@ -355,6 +356,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
union acpi_operand_object *operand_desc;
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
+ acpi_adr_space_type space_id;
ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op);
@@ -367,6 +369,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
/* next_op points to the op that holds the space_ID */
next_op = op->common.value.arg;
+ space_id = (acpi_adr_space_type)next_op->common.value.integer;
/* next_op points to address op */
@@ -402,6 +405,15 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
obj_desc->region.length = (u32) operand_desc->integer.value;
acpi_ut_remove_reference(operand_desc);
+ /* A zero-length operation region is unusable. Just warn */
+
+ if (!obj_desc->region.length
+ && (space_id < ACPI_NUM_PREDEFINED_REGIONS)) {
+ ACPI_WARNING((AE_INFO,
+ "Operation Region [%4.4s] has zero length (SpaceId %X)",
+ node->name.ascii, space_id));
+ }
+
/*
* Get the address and save it
* (at top of stack - 1)
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 584853385268..997faa10f615 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 1504b93cc5f4..d75aae304595 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index e2ef09643d50..c88fd31208a5 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 9a309f5c4de8..935a8e2623e4 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -24,7 +24,7 @@ ACPI_MODULE_NAME("dswload2")
* FUNCTION: acpi_ds_load2_begin_op
*
* PARAMETERS: walk_state - Current state of the parse tree walk
- * out_op - Wher to return op if a new one is created
+ * out_op - Where to return op if a new one is created
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 7592176a8fa2..39acf7b286da 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 4c1ec202d5ab..de79f835a373 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index b3d07cc14d75..9e2f5a05c066 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 1b8a662a14a9..5c77bee5d31f 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index e10fec99a182..62d3aa74277b 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -801,7 +801,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
dispatch.handler->
context);
- /* If requested, clear (if level-triggered) and reenable the GPE */
+ /* If requested, clear (if level-triggered) and re-enable the GPE */
if (return_value & ACPI_REENABLE_GPE) {
(void)acpi_ev_finish_gpe(gpe_event_info);
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index b253063b09d3..328d1d6123ad 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 1f686750bb1a..c92d2f6ebe01 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 0fb6c70f44ed..917892227e09 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 4ed1e67db6be..3ef4e27995f0 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index baadd635b5af..d45f7639f7ee 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 49decca4e08f..45dc797df05d 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -250,7 +250,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/*
* For handlers other than the default (supplied) handlers, we must
* exit the interpreter because the handler *might* block -- we don't
- * know what it will do, so we can't hold the lock on the intepreter.
+ * know what it will do, so we can't hold the lock on the interpreter.
*/
acpi_ex_exit_interpreter();
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 17df5dacd43c..0b47bbcd2a23 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -516,25 +516,6 @@ acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj)
handler_obj = obj_desc->common_notify.handler;
break;
- case ACPI_TYPE_METHOD:
- /*
- * If we are executing module level code, the original
- * Node's object was replaced by this Method object and we
- * saved the handler in the method object.
- *
- * Note: Only used for the legacy MLC support. Will
- * be removed in the future.
- *
- * See acpi_ns_exec_module_code
- */
- if (!acpi_gbl_execute_tables_as_methods &&
- obj_desc->method.
- info_flags & ACPI_METHOD_MODULE_LEVEL) {
- handler_obj =
- obj_desc->method.dispatch.handler;
- }
- break;
-
default:
/* Ignore other objects */
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index febc332b00ac..3df00eb6621b 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 970e940bdb17..e528fe56b755 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index b2d5f66cc1b0..30a083902f52 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -669,9 +669,9 @@ ACPI_EXPORT_SYMBOL(acpi_dispatch_gpe)
*
* RETURN: Status
*
- * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE
+ * DESCRIPTION: Clear and conditionally re-enable a GPE. This completes the GPE
* processing. Intended for use by asynchronous host-installed
- * GPE handlers. The GPE is only reenabled if the enable_for_run bit
+ * GPE handlers. The GPE is only re-enabled if the enable_for_run bit
* is set in the GPE info.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 3b3a25d9f0e6..47265b073e6f 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 5e75c510ca25..c7af07566b7b 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 2373a7492151..587aeeeb5070 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 1a70b80cc406..ca2966bacb50 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -520,7 +520,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
for (i = 0; i < obj_desc->buffer.length; i++) {
if (base == 16) {
- /* Emit 0x prefix for explict/implicit hex conversion */
+ /* Emit 0x prefix for explicit/implicit hex conversion */
*new_buf++ = '0';
*new_buf++ = 'x';
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3304c6b1e8a7..f376fc00064e 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index ebbc244039ab..b1aeec8cac55 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index f71dfa1e90e1..6526b2deeaad 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index e5798f15793a..d3d2dbfba680 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -41,6 +41,17 @@ const u8 acpi_protocol_lengths[] = {
0xFF /* F - ATTRIB_RAW_PROCESS_BYTES */
};
+#define PCC_MASTER_SUBSPACE 3
+
+/*
+ * The following macros determine a given offset is a COMD field.
+ * According to the specification, generic subspaces (types 0-2) contains a
+ * 2-byte COMD field at offset 4 and master subspaces (type 3) contains a 4-byte
+ * COMD field starting at offset 12.
+ */
+#define GENERIC_SUBSPACE_COMMAND(a) (4 == a || a == 5)
+#define MASTER_SUBSPACE_COMMAND(a) (12 <= a && a <= 15)
+
/*******************************************************************************
*
* FUNCTION: acpi_ex_get_protocol_buffer_length
@@ -177,6 +188,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
status = acpi_ex_read_gpio(obj_desc, buffer);
goto exit;
+ } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+ (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_PLATFORM_COMM)) {
+ /*
+ * Reading from a PCC field unit does not require the handler because
+ * it only requires reading from the internal_pcc_buffer.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "PCC FieldRead bits %u\n",
+ obj_desc->field.bit_length));
+
+ memcpy(buffer,
+ obj_desc->field.region_obj->field.internal_pcc_buffer +
+ obj_desc->field.base_byte_offset,
+ (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.
+ bit_length));
+
+ *ret_buffer_desc = buffer_desc;
+ return AE_OK;
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@@ -229,6 +259,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
{
acpi_status status;
u32 buffer_length;
+ u32 data_length;
void *buffer;
ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
@@ -272,6 +303,44 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
acpi_ex_write_serial_bus(source_desc, obj_desc,
result_desc);
return_ACPI_STATUS(status);
+ } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
+ (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_PLATFORM_COMM)) {
+ /*
+ * According to the spec a write to the COMD field will invoke the
+ * region handler. Otherwise, write to the pcc_internal buffer. This
+ * implementation will use the offsets specified rather than the name
+ * of the field. This is considered safer because some firmware tools
+ * are known to obfiscate named objects.
+ */
+ data_length =
+ (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.
+ bit_length);
+ memcpy(obj_desc->field.region_obj->field.internal_pcc_buffer +
+ obj_desc->field.base_byte_offset,
+ source_desc->buffer.pointer, data_length);
+
+ if ((obj_desc->field.region_obj->region.address ==
+ PCC_MASTER_SUBSPACE
+ && MASTER_SUBSPACE_COMMAND(obj_desc->field.
+ base_byte_offset))
+ || GENERIC_SUBSPACE_COMMAND(obj_desc->field.
+ base_byte_offset)) {
+
+ /* Perform the write */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "PCC COMD field has been written. Invoking PCC handler now.\n"));
+
+ status =
+ acpi_ex_access_region(obj_desc, 0,
+ (u64 *)obj_desc->field.
+ region_obj->field.
+ internal_pcc_buffer,
+ ACPI_WRITE);
+ return_ACPI_STATUS(status);
+ }
+ return (AE_OK);
}
/* Get a pointer to the data to be written */
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 516994133128..95a0dcb4f7b9 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index d91f15cdf3ae..60e854965af9 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index c06079774bad..775cd62af5b3 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 7eed79dcda83..bd68d66e89f0 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index ba9fbae0cf91..06e35ea09823 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 3a477566ba1b..5e4a31a11df4 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -390,10 +390,10 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
/* Failure means that the Index was beyond the end of the object */
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Index (0x%X%8.8X) is beyond end of object (length 0x%X)",
- ACPI_FORMAT_UINT64(index),
- (u32)length));
+ ACPI_BIOS_EXCEPTION((AE_INFO, status,
+ "Index (0x%X%8.8X) is beyond end of object (length 0x%X)",
+ ACPI_FORMAT_UINT64(index),
+ (u32)length));
goto cleanup;
}
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 764fa6f924ff..a4ebce417930 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 3941525f3d6b..31385a0b2dab 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 738f3c732363..728d752f7adc 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 2c58f5e00b1a..c08521194b29 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index ea4b0fe674f1..b223d01e6bf8 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 5e42c7de46fa..36da5c0ef69c 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index d94190bc5985..bdfe4d33b483 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index ec61553c4483..c5aa4b0deb70 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -21,7 +21,7 @@ ACPI_MODULE_NAME("exserial")
* FUNCTION: acpi_ex_read_gpio
*
* PARAMETERS: obj_desc - The named field to read
- * buffer - Where the return data is returnd
+ * buffer - Where the return data is returned
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 75d5665b7b2f..7f3c3571c292 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 31cba19652ed..4e43c8277f07 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 4cd82ff509bc..dc9e2b1c1ad9 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index ec8b5a22cad4..a538f7799b78 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index 9bd3fa56b51a..db7f93ca539f 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index bd22e27adf9b..75380be1c2ef 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -160,7 +160,7 @@ u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
* RETURN: None
*
* DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field
- * flags specifiy that it is to be obtained before field access.
+ * flags specify that it is to be obtained before field access.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 525e6ea5c114..926f7e080f22 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index e0ad3f11142e..dee3affaca49 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 2d2e2e41a685..565bd3f29f31 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d8b8fc2ff563..b62db8ec446f 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 5d5e27146fc2..2fb9f75d71c5 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 24f9b61aa404..cd576153257c 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 6e39a771a56e..c4fd97104024 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 3f22f7dd4556..abbf9702aa7f 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -23,33 +23,6 @@ acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs,
acpi_physical_address physical_address64);
#endif
-static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
-
-/*
- * Dispatch table used to efficiently branch to the various sleep
- * functions.
- */
-#define ACPI_SLEEP_FUNCTION_ID 0
-#define ACPI_WAKE_PREP_FUNCTION_ID 1
-#define ACPI_WAKE_FUNCTION_ID 2
-
-/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
-
-static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
- {ACPI_STRUCT_INIT(legacy_function,
- ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)),
- ACPI_STRUCT_INIT(extended_function,
- acpi_hw_extended_sleep)},
- {ACPI_STRUCT_INIT(legacy_function,
- ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)),
- ACPI_STRUCT_INIT(extended_function,
- acpi_hw_extended_wake_prep)},
- {ACPI_STRUCT_INIT(legacy_function,
- ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)),
- ACPI_STRUCT_INIT(extended_function,
- acpi_hw_extended_wake)}
-};
-
/*
* These functions are removed for the ACPI_REDUCED_HARDWARE case:
* acpi_set_firmware_waking_vector
@@ -209,53 +182,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void)
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
#endif /* !ACPI_REDUCED_HARDWARE */
-/*******************************************************************************
- *
- * FUNCTION: acpi_hw_sleep_dispatch
- *
- * PARAMETERS: sleep_state - Which sleep state to enter/exit
- * function_id - Sleep, wake_prep, or Wake
- *
- * RETURN: Status from the invoked sleep handling function.
- *
- * DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling
- * function.
- *
- ******************************************************************************/
-static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
-{
- acpi_status status;
- struct acpi_sleep_functions *sleep_functions =
- &acpi_sleep_dispatch[function_id];
-
-#if (!ACPI_REDUCED_HARDWARE)
- /*
- * If the Hardware Reduced flag is set (from the FADT), we must
- * use the extended sleep registers (FADT). Note: As per the ACPI
- * specification, these extended registers are to be used for HW-reduced
- * platforms only. They are not general-purpose replacements for the
- * legacy PM register sleep support.
- */
- if (acpi_gbl_reduced_hardware) {
- status = sleep_functions->extended_function(sleep_state);
- } else {
- /* Legacy sleep */
-
- status = sleep_functions->legacy_function(sleep_state);
- }
-
- return (status);
-
-#else
- /*
- * For the case where reduced-hardware-only code is being generated,
- * we know that only the extended sleep registers are available
- */
- status = sleep_functions->extended_function(sleep_state);
- return (status);
-
-#endif /* !ACPI_REDUCED_HARDWARE */
-}
/*******************************************************************************
*
@@ -362,7 +288,12 @@ acpi_status acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
- status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
+#if !ACPI_REDUCED_HARDWARE
+ if (!acpi_gbl_reduced_hardware)
+ status = acpi_hw_legacy_sleep(sleep_state);
+ else
+#endif
+ status = acpi_hw_extended_sleep(sleep_state);
return_ACPI_STATUS(status);
}
@@ -388,8 +319,12 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
- status =
- acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID);
+#if !ACPI_REDUCED_HARDWARE
+ if (!acpi_gbl_reduced_hardware)
+ status = acpi_hw_legacy_wake_prep(sleep_state);
+ else
+#endif
+ status = acpi_hw_extended_wake_prep(sleep_state);
return_ACPI_STATUS(status);
}
@@ -413,7 +348,12 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
- status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
+#if !ACPI_REDUCED_HARDWARE
+ if (!acpi_gbl_reduced_hardware)
+ status = acpi_hw_legacy_wake(sleep_state);
+ else
+#endif
+ status = acpi_hw_extended_wake(sleep_state);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index b9ede797b654..0e97ed38973f 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index f9527346b0f7..14cbf63f1991 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 90ccffcd770b..15070bd0c28a 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 2b291c500fb0..73e5c83c8c9f 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index d77257d1c827..19fb8dda870f 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 04bc73e82aed..35fff5c75da1 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -75,7 +75,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
/*
* On error, delete any namespace objects created by this table.
* We cannot initialize these objects, so delete them. There are
- * a couple of expecially bad cases:
+ * a couple of especially bad cases:
* AE_ALREADY_EXISTS - namespace collision.
* AE_NOT_FOUND - the target of a Scope operator does not
* exist. This target of Scope must already exist in the
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 488ff39d86f7..c0b4f7bedfab 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -253,61 +253,19 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
ACPI_FUNCTION_TRACE(ns_parse_table);
- if (acpi_gbl_execute_tables_as_methods) {
- /*
- * This case executes the AML table as one large control method.
- * The point of this is to execute any module-level code in-place
- * as the table is parsed. Some AML code depends on this behavior.
- *
- * It is a run-time option at this time, but will eventually become
- * the default.
- *
- * Note: This causes the table to only have a single-pass parse.
- * However, this is compatible with other ACPI implementations.
- */
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE,
- "%s: **** Start table execution pass\n",
- ACPI_GET_FUNCTION_NAME));
-
- status = acpi_ns_execute_table(table_index, start_node);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- } else {
- /*
- * AML Parse, pass 1
- *
- * In this pass, we load most of the namespace. Control methods
- * are not parsed until later. A parse tree is not created.
- * Instead, each Parser Op subtree is deleted when it is finished.
- * This saves a great deal of memory, and allows a small cache of
- * parse objects to service the entire parse. The second pass of
- * the parse then performs another complete parse of the AML.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
-
- status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
- table_index, start_node);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ /*
+ * Executes the AML table as one large control method.
+ * The point of this is to execute any module-level code in-place
+ * as the table is parsed. Some AML code depends on this behavior.
+ *
+ * Note: This causes the table to only have a single-pass parse.
+ * However, this is compatible with other ACPI implementations.
+ */
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE,
+ "%s: **** Start table execution pass\n",
+ ACPI_GET_FUNCTION_NAME));
- /*
- * AML Parse, pass 2
- *
- * In this pass, we resolve forward references and other things
- * that could not be completed during the first pass.
- * Another complete parse of the AML is performed, but the
- * overhead of this is compensated for by the fact that the
- * parse objects are all cached.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
- status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
- table_index, start_node);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ status = acpi_ns_execute_table(table_index, start_node);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 29c68b15a64f..2f9d93122d0c 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 51523473e7fe..9a80e3b23496 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index ff2ab8fbec38..0aacfa48e20d 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index a3bd6280882c..d5804a6d1d65 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a2bf4b2caa6c..e5cef1edf49f 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -350,7 +350,7 @@ acpi_ns_internalize_name(const char *external_name, char **converted_name)
*
* FUNCTION: acpi_ns_externalize_name
*
- * PARAMETERS: internal_name_length - Lenth of the internal name below
+ * PARAMETERS: internal_name_length - Length of the internal name below
* internal_name - Internal representation of name
* converted_name_length - Where the length is returned
* converted_name - Where the resulting external name
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index e9a061da9bb2..ceea6af79d12 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b2915c9cceaf..de2d3135d6a9 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 176d28d60125..9d9d442cd999 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index e00d1af6fa80..207805047bc4 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -32,10 +32,6 @@ static acpi_status
acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
u8 * aml_op_start, union acpi_parse_object *op);
-static void
-acpi_ps_link_module_code(union acpi_parse_object *parent_op,
- u8 *aml_start, u32 aml_length, acpi_owner_id owner_id);
-
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_arguments
@@ -56,7 +52,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
{
acpi_status status = AE_OK;
union acpi_parse_object *arg = NULL;
- const struct acpi_opcode_info *op_info;
ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
@@ -136,96 +131,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
walk_state->arg_count,
walk_state->pass_number));
- /*
- * This case handles the legacy option that groups all module-level
- * code blocks together and defers execution until all of the tables
- * are loaded. Execute all of these blocks at this time.
- * Execute any module-level code that was detected during the table
- * load phase.
- *
- * Note: this option is deprecated and will be eliminated in the
- * future. Use of this option can cause problems with AML code that
- * depends upon in-order immediate execution of module-level code.
- */
- if (!acpi_gbl_execute_tables_as_methods &&
- (walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2) &&
- ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
- /*
- * We want to skip If/Else/While constructs during Pass1 because we
- * want to actually conditionally execute the code during Pass2.
- *
- * Except for disassembly, where we always want to walk the
- * If/Else/While packages
- */
- switch (op->common.aml_opcode) {
- case AML_IF_OP:
- case AML_ELSE_OP:
- case AML_WHILE_OP:
- /*
- * Currently supported module-level opcodes are:
- * IF/ELSE/WHILE. These appear to be the most common,
- * and easiest to support since they open an AML
- * package.
- */
- if (walk_state->pass_number ==
- ACPI_IMODE_LOAD_PASS1) {
- acpi_ps_link_module_code(op->common.
- parent,
- aml_op_start,
- (u32)
- (walk_state->
- parser_state.
- pkg_end -
- aml_op_start),
- walk_state->
- owner_id);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Pass1: Skipping an If/Else/While body\n"));
-
- /* Skip body of if/else/while in pass 1 */
-
- walk_state->parser_state.aml =
- walk_state->parser_state.pkg_end;
- walk_state->arg_count = 0;
- break;
-
- default:
- /*
- * Check for an unsupported executable opcode at module
- * level. We must be in PASS1, the parent must be a SCOPE,
- * The opcode class must be EXECUTE, and the opcode must
- * not be an argument to another opcode.
- */
- if ((walk_state->pass_number ==
- ACPI_IMODE_LOAD_PASS1)
- && (op->common.parent->common.aml_opcode ==
- AML_SCOPE_OP)) {
- op_info =
- acpi_ps_get_opcode_info(op->common.
- aml_opcode);
- if ((op_info->class ==
- AML_CLASS_EXECUTE) && (!arg)) {
- ACPI_WARNING((AE_INFO,
- "Unsupported module-level executable opcode "
- "0x%.2X at table offset 0x%.4X",
- op->common.
- aml_opcode,
- (u32)
- (ACPI_PTR_DIFF
- (aml_op_start,
- walk_state->
- parser_state.
- aml_start) +
- sizeof(struct
- acpi_table_header))));
- }
- }
- break;
- }
- }
-
/* Special processing for certain opcodes */
switch (op->common.aml_opcode) {
@@ -302,104 +207,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
/*******************************************************************************
*
- * FUNCTION: acpi_ps_link_module_code
- *
- * PARAMETERS: parent_op - Parent parser op
- * aml_start - Pointer to the AML
- * aml_length - Length of executable AML
- * owner_id - owner_id of module level code
- *
- * RETURN: None.
- *
- * DESCRIPTION: Wrap the module-level code with a method object and link the
- * object to the global list. Note, the mutex field of the method
- * object is used to link multiple module-level code objects.
- *
- * NOTE: In this legacy option, each block of detected executable AML
- * code that is outside of any control method is wrapped with a temporary
- * control method object and placed on a global list below.
- *
- * This function executes the module-level code for all tables only after
- * all of the tables have been loaded. It is a legacy option and is
- * not compatible with other ACPI implementations. See acpi_ns_load_table.
- *
- * This function will be removed when the legacy option is removed.
- *
- ******************************************************************************/
-
-static void
-acpi_ps_link_module_code(union acpi_parse_object *parent_op,
- u8 *aml_start, u32 aml_length, acpi_owner_id owner_id)
-{
- union acpi_operand_object *prev;
- union acpi_operand_object *next;
- union acpi_operand_object *method_obj;
- struct acpi_namespace_node *parent_node;
-
- ACPI_FUNCTION_TRACE(ps_link_module_code);
-
- /* Get the tail of the list */
-
- prev = next = acpi_gbl_module_code_list;
- while (next) {
- prev = next;
- next = next->method.mutex;
- }
-
- /*
- * Insert the module level code into the list. Merge it if it is
- * adjacent to the previous element.
- */
- if (!prev ||
- ((prev->method.aml_start + prev->method.aml_length) != aml_start)) {
-
- /* Create, initialize, and link a new temporary method object */
-
- method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
- if (!method_obj) {
- return_VOID;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Create/Link new code block: %p\n",
- method_obj));
-
- if (parent_op->common.node) {
- parent_node = parent_op->common.node;
- } else {
- parent_node = acpi_gbl_root_node;
- }
-
- method_obj->method.aml_start = aml_start;
- method_obj->method.aml_length = aml_length;
- method_obj->method.owner_id = owner_id;
- method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL;
-
- /*
- * Save the parent node in next_object. This is cheating, but we
- * don't want to expand the method object.
- */
- method_obj->method.next_object =
- ACPI_CAST_PTR(union acpi_operand_object, parent_node);
-
- if (!prev) {
- acpi_gbl_module_code_list = method_obj;
- } else {
- prev->method.mutex = method_obj;
- }
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Appending to existing code block: %p\n",
- prev));
-
- prev->method.aml_length += aml_length;
- }
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ps_parse_loop
*
* PARAMETERS: walk_state - Current state
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index e1fd819a2955..98e5c7400e54 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 8d7dc98bad17..43775c5ce17c 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index f310954eea59..15e7563829f1 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 65603473b6cb..9b386530ffbe 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -523,12 +523,12 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
if (status == AE_ABORT_METHOD) {
acpi_ns_print_node_pathname(walk_state->
method_node,
- "Method aborted:");
+ "Aborting method");
acpi_os_printf("\n");
} else {
- ACPI_ERROR_METHOD
- ("Method parse/execution failed",
- walk_state->method_node, NULL, status);
+ ACPI_ERROR_METHOD("Aborting method",
+ walk_state->method_node, NULL,
+ status);
}
acpi_ex_enter_interpreter();
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 00c67bc249aa..f153ca804740 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 64a8329a17f1..22d8a2becdd0 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index ef8a5805a836..2512f584fa3c 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index bd6af8c87d48..cf91841297c2 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 5743b22399a0..ee2ee2c858f2 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 77a3263169fa..cafa8134b4c6 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -32,7 +32,7 @@ struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
acpi_gbl_he_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
acpi_gbl_ll_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.shareable), "Sharing",
acpi_gbl_shr_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
"Interrupt Count", NULL},
@@ -222,7 +222,7 @@ struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
"Triggering", acpi_gbl_he_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
acpi_gbl_ll_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.shareable), "Sharing",
acpi_gbl_shr_decode},
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
NULL},
@@ -255,7 +255,7 @@ struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
"ProducerConsumer", acpi_gbl_consume_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
acpi_gbl_ppc_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharing",
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.shareable), "Sharing",
acpi_gbl_shr_decode},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
"IoRestriction", acpi_gbl_ior_decode},
@@ -285,7 +285,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_function[10] = {
"RevisionId", NULL},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_function.pin_config), "PinConfig",
acpi_gbl_ppc_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_function.sharable), "Sharing",
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_function.shareable), "Sharing",
acpi_gbl_shr_decode},
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_function.function_number),
"FunctionNumber", NULL},
@@ -308,7 +308,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_config[11] = {
NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.producer_consumer),
"ProducerConsumer", acpi_gbl_consume_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.sharable), "Sharing",
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.shareable), "Sharing",
acpi_gbl_shr_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_config.pin_config_type),
"PinConfigType", NULL},
@@ -353,7 +353,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_group_function[9] = {
{ACPI_RSD_1BITFLAG,
ACPI_RSD_OFFSET(pin_group_function.producer_consumer),
"ProducerConsumer", acpi_gbl_consume_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.sharable),
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.shareable),
"Sharing", acpi_gbl_shr_decode},
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group_function.function_number),
"FunctionNumber", NULL},
@@ -375,7 +375,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_group_config[10] = {
"RevisionId", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.producer_consumer),
"ProducerConsumer", acpi_gbl_consume_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.sharable),
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.shareable),
"Sharing", acpi_gbl_shr_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_group_config.pin_config_type),
"PinConfigType", NULL},
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 134b67cd48ee..b0d970efa072 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -54,7 +54,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[9] = {
AML_OFFSET(irq.flags),
3},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable),
AML_OFFSET(irq.flags),
4},
@@ -92,7 +92,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[14] = {
AML_OFFSET(irq.flags),
3},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable),
AML_OFFSET(irq.flags),
4},
@@ -139,7 +139,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[14] = {
ACPI_ACTIVE_HIGH},
{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
- ACPI_RS_OFFSET(data.irq.sharable),
+ ACPI_RS_OFFSET(data.irq.shareable),
ACPI_EXCLUSIVE},
/* We can optimize to a 2-byte irq_no_flags() descriptor */
@@ -178,7 +178,7 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[10] = {
AML_OFFSET(extended_irq.flags),
2},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.shareable),
AML_OFFSET(extended_irq.flags),
3},
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index d073ebb51f90..1b937d88980f 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -39,7 +39,7 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
AML_OFFSET(gpio.flags),
0},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.shareable),
AML_OFFSET(gpio.int_flags),
3},
@@ -128,7 +128,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_function[13] = {
AML_OFFSET(pin_function.revision_id),
1},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.shareable),
AML_OFFSET(pin_function.flags),
0},
@@ -518,7 +518,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_config[14] = {
AML_OFFSET(pin_config.revision_id),
1},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.shareable),
AML_OFFSET(pin_config.flags),
0},
@@ -658,7 +658,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[13] = {
AML_OFFSET(pin_group_function.revision_id),
1},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.shareable),
AML_OFFSET(pin_group_function.flags),
0},
@@ -735,7 +735,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[14] = {
AML_OFFSET(pin_group_config.revision_id),
1},
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.shareable),
AML_OFFSET(pin_group_config.flags),
0},
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 862149c8a208..0cecd0039acf 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 99d325a51816..0041bfba9abc 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -556,7 +556,7 @@ static void acpi_tb_convert_fadt(void)
* 64-bit X length field.
* Note: If the legacy length field is > 0xFF bits, ignore
* this check. (GPE registers can be larger than the
- * 64-bit GAS structure can accomodate, 0xFF bits).
+ * 64-bit GAS structure can accommodate, 0xFF bits).
*/
if ((ACPI_MUL_8(length) <= ACPI_UINT8_MAX) &&
(address64->bit_width !=
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index f00694b1d000..951bd8e1c50a 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 5f8e7b561c90..be6642bf6366 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index e303418a895b..9b5df95d881b 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index b526096560b5..2469e01310e2 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index e4d0dc8948cd..36592888f0e7 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -108,7 +108,7 @@ acpi_initialize_tables(struct acpi_table_desc *initial_table_array,
/*
* Get the root table (RSDT or XSDT) and extract all entries to the local
* Root Table Array. This array contains the information of the RSDT/XSDT
- * in a common, more useable format.
+ * in a common, more usable format.
*/
status = acpi_tb_parse_root_table(rsdp_address);
return_ACPI_STATUS(status);
@@ -169,7 +169,7 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
if (!acpi_gbl_enable_table_validation) {
/*
* Now it's safe to do full table validation. We can do deferred
- * table initilization here once the flag is set.
+ * table initialization here once the flag is set.
*/
acpi_gbl_enable_table_validation = TRUE;
for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 9011297552af..1a2592cc3245 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -69,23 +69,18 @@ acpi_status ACPI_INIT_FUNCTION acpi_load_tables(void)
"While loading namespace from ACPI tables"));
}
- if (acpi_gbl_execute_tables_as_methods) {
- /*
- * If the module-level code support is enabled, initialize the objects
- * in the namespace that remain uninitialized. This runs the executable
- * AML that may be part of the declaration of these name objects:
- * operation_regions, buffer_fields, Buffers, and Packages.
- *
- * Note: The module-level code is optional at this time, but will
- * become the default in the future.
- */
- status = acpi_ns_initialize_objects();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ /*
+ * Initialize the objects in the namespace that remain uninitialized.
+ * This runs the executable AML that may be part of the declaration of
+ * these name objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ *
+ */
+ status = acpi_ns_initialize_objects();
+ if (ACPI_SUCCESS(status)) {
+ acpi_gbl_namespace_initialized = TRUE;
}
- acpi_gbl_namespace_initialized = TRUE;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 483d0ce5180a..e2859d09ca2e 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index dbabe680ff58..bb260376bd59 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 8cbcd7d6bd5e..d64da4d9e8d0 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 04ff61e284f5..79d7426fd7bf 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index fffa6f5ae59e..61db9967ebe4 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 97d6ec174c28..8533fce7fa93 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index a872ed7879ca..1fb8327f3c3b 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index aabdc25effd9..01b1b36c8a8e 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index dad02b821e19..ad9f77eb554f 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -284,7 +284,7 @@ const char *acpi_ut_get_node_name(void *object)
static const char *acpi_gbl_desc_type_names[] = {
/* 00 */ "Not a Descriptor",
- /* 01 */ "Cached",
+ /* 01 */ "Cached Object",
/* 02 */ "State-Generic",
/* 03 */ "State-Update",
/* 04 */ "State-Package",
@@ -295,10 +295,10 @@ static const char *acpi_gbl_desc_type_names[] = {
/* 09 */ "State-Result",
/* 10 */ "State-Notify",
/* 11 */ "State-Thread",
- /* 12 */ "Walk",
- /* 13 */ "Parser",
- /* 14 */ "Operand",
- /* 15 */ "Node"
+ /* 12 */ "Tree Walk State",
+ /* 13 */ "Parse Tree Op",
+ /* 14 */ "Operand Object",
+ /* 15 */ "Namespace Node"
};
const char *acpi_ut_get_descriptor_name(void *object)
@@ -430,8 +430,10 @@ static const char *acpi_gbl_generic_notify[ACPI_GENERIC_NOTIFY_MAX + 1] = {
/* 0C */ "Reserved (was previously Shutdown Request)",
/* Reserved in ACPI 6.0 */
/* 0D */ "System Resource Affinity Update",
- /* 0E */ "Heterogeneous Memory Attributes Update"
+ /* 0E */ "Heterogeneous Memory Attributes Update",
/* ACPI 6.2 */
+ /* 0F */ "Error Disconnect Recover"
+ /* ACPI 6.3 */
};
static const char *acpi_gbl_device_notify[5] = {
@@ -461,13 +463,13 @@ static const char *acpi_gbl_thermal_notify[5] = {
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
{
- /* 00 - 0D are "common to all object types" (from ACPI Spec) */
+ /* 00 - 0F are "common to all object types" (from ACPI Spec) */
if (notify_value <= ACPI_GENERIC_NOTIFY_MAX) {
return (acpi_gbl_generic_notify[notify_value]);
}
- /* 0E - 7F are reserved */
+ /* 10 - 7F are reserved */
if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
return ("Reserved");
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 8cc4392c61f3..eee263cb7beb 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -257,6 +257,10 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
acpi_ut_delete_object_desc(second_desc);
}
+ if (object->field.internal_pcc_buffer) {
+ ACPI_FREE(object->field.internal_pcc_buffer);
+ }
+
break;
case ACPI_TYPE_BUFFER_FIELD:
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index e47430272692..075457341bad 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -183,19 +183,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
case AE_ALREADY_EXISTS:
acpi_os_printf(ACPI_MSG_BIOS_ERROR);
- message = "Failure creating";
+ message = "Failure creating named object";
break;
case AE_NOT_FOUND:
acpi_os_printf(ACPI_MSG_BIOS_ERROR);
- message = "Could not resolve";
+ message = "Could not resolve symbol";
break;
default:
acpi_os_printf(ACPI_MSG_ERROR);
- message = "Failure resolving";
+ message = "Failure resolving symbol";
break;
}
@@ -317,7 +317,8 @@ acpi_ut_method_error(const char *module_name,
}
acpi_ns_print_node_pathname(node, message);
- acpi_os_printf(", %s", acpi_format_exception(method_status));
+ acpi_os_printf(" due to previous error (%s)",
+ acpi_format_exception(method_status));
ACPI_MSG_SUFFIX;
ACPI_MSG_REDIRECT_END;
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index c56ae6e058d5..558a9f3b0678 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index f8c5b49344df..b0622ec4bb85 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 3d63a9e8da4f..b6da135d5f41 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 70e6bf1107a1..e805abdd95b8 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 0646ed62b351..bc124591320e 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index d61e01bd01a3..8b4ff11d617a 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index ae6d8cc18cec..eee97a902696 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 902a47463abf..688c61a90725 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 65ca9807c2a8..a9f08f43c685 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index a98c334c3bb7..5839f2fa7400 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 016a6621cc6f..8052f7ef5025 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -588,6 +588,18 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
acpi_ut_get_descriptor_name
(descriptor));
+ /* Optional object hex dump */
+
+ if (acpi_gbl_verbose_leak_dump) {
+ acpi_os_printf("\n");
+ acpi_ut_dump_buffer((u8 *)
+ descriptor,
+ element->
+ size,
+ DB_BYTE_DISPLAY,
+ 0);
+ }
+
/* Validate the descriptor type using Type field and length */
descriptor_type = 0; /* Not a valid descriptor type */
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 59ae118092a3..0a7cf8007643 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index d2d6cc065181..f497c4b30e65 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6bb85d691fcb..a1ed7fced4db 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -187,6 +187,50 @@ ACPI_EXPORT_SYMBOL(acpi_bios_error)
/*******************************************************************************
*
+ * FUNCTION: acpi_bios_exception
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * status - Status value to be decoded/formatted
+ * format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print an "ACPI Firmware Error" message with module/line/version
+ * info as well as decoded acpi_status.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_exception(const char *module_name,
+ u32 line_number,
+ acpi_status status, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+
+ /* For AE_OK, just print the message */
+
+ if (ACPI_SUCCESS(status)) {
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+
+ } else {
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR "%s, ",
+ acpi_format_exception(status));
+ }
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_bios_exception)
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_bios_warning
*
* PARAMETERS: module_name - Caller's module name (for warning output)
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index e3c60f57c9f0..9f3b1e3a09de 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 52ae5438edeb..6b18f8bc7be3 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -41,19 +41,9 @@ config ACPI_APEI_PCIEAER
Turn on this option to enable the corresponding support.
config ACPI_APEI_SEA
- bool "APEI Synchronous External Abort logging/recovering support"
+ bool
depends on ARM64 && ACPI_APEI_GHES
default y
- help
- This option should be enabled if the system supports
- firmware first handling of SEA (Synchronous External Abort).
- SEA happens with certain faults of data abort or instruction
- abort synchronous exceptions on ARMv8 systems. If a system
- supports firmware first handling of SEA, the platform analyzes
- and handles hardware error notifications from SEA, and it may then
- form a HW error record for the OS to parse and handle. This
- option allows the OS to look for such hardware error record, and
- take appropriate action.
config ACPI_APEI_MEMORY_FAILURE
bool "APEI memory error recovering support"
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 12771fcf0417..0d948d0a41af 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -42,15 +42,7 @@ static void __init bert_print_all(struct acpi_bert_region *region,
int remain = region_len;
u32 estatus_len;
- if (!estatus->block_status)
- return;
-
- while (remain > sizeof(struct acpi_bert_region)) {
- if (cper_estatus_check(estatus)) {
- pr_err(FW_BUG "Invalid error record.\n");
- return;
- }
-
+ while (remain >= sizeof(struct acpi_bert_region)) {
estatus_len = cper_estatus_len(estatus);
if (remain < estatus_len) {
pr_err(FW_BUG "Truncated status block (length: %u).\n",
@@ -58,6 +50,15 @@ static void __init bert_print_all(struct acpi_bert_region *region,
return;
}
+ /* No more error records. */
+ if (!estatus->block_status)
+ return;
+
+ if (cper_estatus_check(estatus)) {
+ pr_err(FW_BUG "Invalid error record.\n");
+ return;
+ }
+
pr_info_once("Error records from previous boot:\n");
cper_estatus_print(KERN_INFO HW_ERR, estatus);
@@ -70,10 +71,6 @@ static void __init bert_print_all(struct acpi_bert_region *region,
estatus->block_status = 0;
estatus = (void *)estatus + estatus_len;
- /* No more error records. */
- if (!estatus->block_status)
- return;
-
remain -= estatus_len;
}
}
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index fcccbfdbdd1a..2d4be94f8c00 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -644,8 +644,8 @@ static int error_type_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
- error_type_set, "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set,
+ "0x%llx\n");
static int error_inject_set(void *data, u64 val)
{
@@ -656,8 +656,7 @@ static int error_inject_set(void *data, u64 val)
error_param3, error_param4);
}
-DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
- error_inject_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(error_inject_fops, NULL, error_inject_set, "%llu\n");
static int einj_check_table(struct acpi_table_einj *einj_tab)
{
@@ -679,7 +678,6 @@ static int __init einj_init(void)
{
int rc;
acpi_status status;
- struct dentry *fentry;
struct apei_exec_context ctx;
if (acpi_disabled) {
@@ -707,25 +705,13 @@ static int __init einj_init(void)
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
- if (!einj_debug_dir) {
- pr_err("Error creating debugfs node.\n");
- goto err_cleanup;
- }
- fentry = debugfs_create_file("available_error_type", S_IRUSR,
- einj_debug_dir, NULL,
- &available_error_type_fops);
- if (!fentry)
- goto err_cleanup;
-
- fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
- einj_debug_dir, NULL, &error_type_fops);
- if (!fentry)
- goto err_cleanup;
- fentry = debugfs_create_file("error_inject", S_IWUSR,
- einj_debug_dir, NULL, &error_inject_fops);
- if (!fentry)
- goto err_cleanup;
+ debugfs_create_file("available_error_type", S_IRUSR, einj_debug_dir,
+ NULL, &available_error_type_fops);
+ debugfs_create_file_unsafe("error_type", 0600, einj_debug_dir,
+ NULL, &error_type_fops);
+ debugfs_create_file_unsafe("error_inject", 0200, einj_debug_dir,
+ NULL, &error_inject_fops);
apei_resources_init(&einj_resources);
einj_exec_ctx_init(&ctx);
@@ -750,66 +736,37 @@ static int __init einj_init(void)
rc = -ENOMEM;
einj_param = einj_get_parameter_address();
if ((param_extension || acpi5) && einj_param) {
- fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_flags);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param3);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param4);
- if (!fentry)
- goto err_unmap;
-
- fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
- einj_debug_dir, &notrigger);
- if (!fentry)
- goto err_unmap;
+ debugfs_create_x32("flags", S_IRUSR | S_IWUSR, einj_debug_dir,
+ &error_flags);
+ debugfs_create_x64("param1", S_IRUSR | S_IWUSR, einj_debug_dir,
+ &error_param1);
+ debugfs_create_x64("param2", S_IRUSR | S_IWUSR, einj_debug_dir,
+ &error_param2);
+ debugfs_create_x64("param3", S_IRUSR | S_IWUSR, einj_debug_dir,
+ &error_param3);
+ debugfs_create_x64("param4", S_IRUSR | S_IWUSR, einj_debug_dir,
+ &error_param4);
+ debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &notrigger);
}
if (vendor_dev[0]) {
vendor_blob.data = vendor_dev;
vendor_blob.size = strlen(vendor_dev);
- fentry = debugfs_create_blob("vendor", S_IRUSR,
- einj_debug_dir, &vendor_blob);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
- einj_debug_dir, &vendor_flags);
- if (!fentry)
- goto err_unmap;
+ debugfs_create_blob("vendor", S_IRUSR, einj_debug_dir,
+ &vendor_blob);
+ debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &vendor_flags);
}
pr_info("Error INJection is initialized.\n");
return 0;
-err_unmap:
- if (einj_param) {
- acpi_size size = (acpi5) ?
- sizeof(struct set_error_type_with_address) :
- sizeof(struct einj_parameter);
-
- acpi_os_unmap_iomem(einj_param, size);
- pr_err("Error creating param extension debugfs nodes.\n");
- }
- apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&einj_resources);
err_fini:
apei_resources_fini(&einj_resources);
-err_cleanup:
- pr_err("Error creating primary debugfs nodes.\n");
debugfs_remove_recursive(einj_debug_dir);
return rc;
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 9953e50667ec..389d88e35ffb 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -938,17 +938,17 @@ static struct pstore_info erst_info = {
};
#define CPER_CREATOR_PSTORE \
- UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
- 0x64, 0x90, 0xb8, 0x9d)
+ GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
+ 0x64, 0x90, 0xb8, 0x9d)
#define CPER_SECTION_TYPE_DMESG \
- UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \
- 0x94, 0x19, 0xeb, 0x12)
+ GUID_INIT(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \
+ 0x94, 0x19, 0xeb, 0x12)
#define CPER_SECTION_TYPE_DMESG_Z \
- UUID_LE(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \
- 0x34, 0xdd, 0xfa, 0xc6)
+ GUID_INIT(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \
+ 0x34, 0xdd, 0xfa, 0xc6)
#define CPER_SECTION_TYPE_MCE \
- UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
- 0x04, 0x4a, 0x38, 0xfc)
+ GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
+ 0x04, 0x4a, 0x38, 0xfc)
struct cper_pstore_record {
struct cper_record_header hdr;
@@ -1012,7 +1012,7 @@ skip:
rc = -EIO;
goto out;
}
- if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
+ if (!guid_equal(&rcd->hdr.creator_id, &CPER_CREATOR_PSTORE))
goto skip;
record->buf = kmalloc(len, GFP_KERNEL);
@@ -1024,15 +1024,12 @@ skip:
record->id = record_id;
record->compressed = false;
record->ecc_notice_size = 0;
- if (uuid_le_cmp(rcd->sec_hdr.section_type,
- CPER_SECTION_TYPE_DMESG_Z) == 0) {
+ if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG_Z)) {
record->type = PSTORE_TYPE_DMESG;
record->compressed = true;
- } else if (uuid_le_cmp(rcd->sec_hdr.section_type,
- CPER_SECTION_TYPE_DMESG) == 0)
+ } else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG))
record->type = PSTORE_TYPE_DMESG;
- else if (uuid_le_cmp(rcd->sec_hdr.section_type,
- CPER_SECTION_TYPE_MCE) == 0)
+ else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_MCE))
record->type = PSTORE_TYPE_MCE;
else
record->type = PSTORE_TYPE_MAX;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f008ba7c9ced..0b5ae91fd0fb 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -25,6 +25,7 @@
* GNU General Public License for more details.
*/
+#include <linux/arm_sdei.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -33,7 +34,6 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
-#include <linux/kdebug.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -42,6 +42,7 @@
#include <linux/llist.h>
#include <linux/genalloc.h>
#include <linux/pci.h>
+#include <linux/pfn.h>
#include <linux/aer.h>
#include <linux/nmi.h>
#include <linux/sched/clock.h>
@@ -85,6 +86,15 @@
((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
+/*
+ * NMI-like notifications vary by architecture, before the compiler can prune
+ * unused static functions it needs a value for these enums.
+ */
+#ifndef CONFIG_ARM_SDE_INTERFACE
+#define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses
+#define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses
+#endif
+
static inline bool is_hest_type_generic_v2(struct ghes *ghes)
{
return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
@@ -115,11 +125,10 @@ static DEFINE_MUTEX(ghes_list_mutex);
* handler, but general ioremap can not be used in atomic context, so
* the fixmap is used instead.
*
- * These 2 spinlocks are used to prevent the fixmap entries from being used
+ * This spinlock is used to prevent the fixmap entry from being used
* simultaneously.
*/
-static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
-static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+static DEFINE_SPINLOCK(ghes_notify_lock_irq);
static struct gen_pool *ghes_estatus_pool;
static unsigned long ghes_estatus_pool_size_request;
@@ -129,82 +138,49 @@ static atomic_t ghes_estatus_cache_alloced;
static int ghes_panic_timeout __read_mostly = 30;
-static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
+static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
{
phys_addr_t paddr;
pgprot_t prot;
- paddr = pfn << PAGE_SHIFT;
+ paddr = PFN_PHYS(pfn);
prot = arch_apei_get_mem_attribute(paddr);
- __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot);
+ __set_fixmap(fixmap_idx, paddr, prot);
- return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI);
+ return (void __iomem *) __fix_to_virt(fixmap_idx);
}
-static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
+static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
{
- phys_addr_t paddr;
- pgprot_t prot;
-
- paddr = pfn << PAGE_SHIFT;
- prot = arch_apei_get_mem_attribute(paddr);
- __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot);
-
- return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ);
-}
+ int _idx = virt_to_fix((unsigned long)vaddr);
-static void ghes_iounmap_nmi(void)
-{
- clear_fixmap(FIX_APEI_GHES_NMI);
+ WARN_ON_ONCE(fixmap_idx != _idx);
+ clear_fixmap(fixmap_idx);
}
-static void ghes_iounmap_irq(void)
+int ghes_estatus_pool_init(int num_ghes)
{
- clear_fixmap(FIX_APEI_GHES_IRQ);
-}
+ unsigned long addr, len;
-static int ghes_estatus_pool_init(void)
-{
ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
if (!ghes_estatus_pool)
return -ENOMEM;
- return 0;
-}
-static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
- struct gen_pool_chunk *chunk,
- void *data)
-{
- free_page(chunk->start_addr);
-}
-
-static void ghes_estatus_pool_exit(void)
-{
- gen_pool_for_each_chunk(ghes_estatus_pool,
- ghes_estatus_pool_free_chunk_page, NULL);
- gen_pool_destroy(ghes_estatus_pool);
-}
+ len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
+ len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
-static int ghes_estatus_pool_expand(unsigned long len)
-{
- unsigned long i, pages, size, addr;
- int ret;
+ ghes_estatus_pool_size_request = PAGE_ALIGN(len);
+ addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
+ if (!addr)
+ return -ENOMEM;
- ghes_estatus_pool_size_request += PAGE_ALIGN(len);
- size = gen_pool_size(ghes_estatus_pool);
- if (size >= ghes_estatus_pool_size_request)
- return 0;
- pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
- for (i = 0; i < pages; i++) {
- addr = __get_free_page(GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
- if (ret)
- return ret;
- }
+ /*
+ * New allocation must be visible in all pgd before it can be found by
+ * an NMI allocating from the pool.
+ */
+ vmalloc_sync_all();
- return 0;
+ return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
}
static int map_gen_v2(struct ghes *ghes)
@@ -217,6 +193,21 @@ static void unmap_gen_v2(struct ghes *ghes)
apei_unmap_generic_address(&ghes->generic_v2->read_ack_register);
}
+static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
+{
+ int rc;
+ u64 val = 0;
+
+ rc = apei_read(&val, &gv2->read_ack_register);
+ if (rc)
+ return;
+
+ val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
+ val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset;
+
+ apei_write(val, &gv2->read_ack_register);
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -289,23 +280,16 @@ static inline int ghes_severity(int severity)
}
static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
- int from_phys)
+ int from_phys,
+ enum fixed_addresses fixmap_idx)
{
void __iomem *vaddr;
- unsigned long flags = 0;
- int in_nmi = in_nmi();
u64 offset;
u32 trunk;
while (len > 0) {
offset = paddr - (paddr & PAGE_MASK);
- if (in_nmi) {
- raw_spin_lock(&ghes_ioremap_lock_nmi);
- vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
- } else {
- spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
- vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
- }
+ vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
trunk = PAGE_SIZE - offset;
trunk = min(trunk, len);
if (from_phys)
@@ -315,72 +299,114 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
len -= trunk;
paddr += trunk;
buffer += trunk;
- if (in_nmi) {
- ghes_iounmap_nmi();
- raw_spin_unlock(&ghes_ioremap_lock_nmi);
- } else {
- ghes_iounmap_irq();
- spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
- }
+ ghes_unmap(vaddr, fixmap_idx);
+ }
+}
+
+/* Check the top-level record header has an appropriate size. */
+static int __ghes_check_estatus(struct ghes *ghes,
+ struct acpi_hest_generic_status *estatus)
+{
+ u32 len = cper_estatus_len(estatus);
+
+ if (len < sizeof(*estatus)) {
+ pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n");
+ return -EIO;
+ }
+
+ if (len > ghes->generic->error_block_length) {
+ pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n");
+ return -EIO;
+ }
+
+ if (cper_estatus_check_header(estatus)) {
+ pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n");
+ return -EIO;
}
+
+ return 0;
}
-static int ghes_read_estatus(struct ghes *ghes, int silent)
+/* Read the CPER block, returning its address, and header in estatus. */
+static int __ghes_peek_estatus(struct ghes *ghes,
+ struct acpi_hest_generic_status *estatus,
+ u64 *buf_paddr, enum fixed_addresses fixmap_idx)
{
struct acpi_hest_generic *g = ghes->generic;
- u64 buf_paddr;
- u32 len;
int rc;
- rc = apei_read(&buf_paddr, &g->error_status_address);
+ rc = apei_read(buf_paddr, &g->error_status_address);
if (rc) {
- if (!silent && printk_ratelimit())
- pr_warning(FW_WARN GHES_PFX
+ *buf_paddr = 0;
+ pr_warn_ratelimited(FW_WARN GHES_PFX
"Failed to read error status block address for hardware error source: %d.\n",
g->header.source_id);
return -EIO;
}
- if (!buf_paddr)
+ if (!*buf_paddr)
return -ENOENT;
- ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
- sizeof(*ghes->estatus), 1);
- if (!ghes->estatus->block_status)
+ ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1,
+ fixmap_idx);
+ if (!estatus->block_status) {
+ *buf_paddr = 0;
return -ENOENT;
+ }
- ghes->buffer_paddr = buf_paddr;
- ghes->flags |= GHES_TO_CLEAR;
+ return __ghes_check_estatus(ghes, estatus);
+}
- rc = -EIO;
- len = cper_estatus_len(ghes->estatus);
- if (len < sizeof(*ghes->estatus))
- goto err_read_block;
- if (len > ghes->generic->error_block_length)
- goto err_read_block;
- if (cper_estatus_check_header(ghes->estatus))
- goto err_read_block;
- ghes_copy_tofrom_phys(ghes->estatus + 1,
- buf_paddr + sizeof(*ghes->estatus),
- len - sizeof(*ghes->estatus), 1);
- if (cper_estatus_check(ghes->estatus))
- goto err_read_block;
- rc = 0;
-
-err_read_block:
- if (rc && !silent && printk_ratelimit())
- pr_warning(FW_WARN GHES_PFX
- "Failed to read error status block!\n");
- return rc;
+static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
+ u64 buf_paddr, enum fixed_addresses fixmap_idx,
+ size_t buf_len)
+{
+ ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx);
+ if (cper_estatus_check(estatus)) {
+ pr_warn_ratelimited(FW_WARN GHES_PFX
+ "Failed to read error status block!\n");
+ return -EIO;
+ }
+
+ return 0;
}
-static void ghes_clear_estatus(struct ghes *ghes)
+static int ghes_read_estatus(struct ghes *ghes,
+ struct acpi_hest_generic_status *estatus,
+ u64 *buf_paddr, enum fixed_addresses fixmap_idx)
{
- ghes->estatus->block_status = 0;
- if (!(ghes->flags & GHES_TO_CLEAR))
+ int rc;
+
+ rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx);
+ if (rc)
+ return rc;
+
+ rc = __ghes_check_estatus(ghes, estatus);
+ if (rc)
+ return rc;
+
+ return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx,
+ cper_estatus_len(estatus));
+}
+
+static void ghes_clear_estatus(struct ghes *ghes,
+ struct acpi_hest_generic_status *estatus,
+ u64 buf_paddr, enum fixed_addresses fixmap_idx)
+{
+ estatus->block_status = 0;
+
+ if (!buf_paddr)
return;
- ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
- sizeof(ghes->estatus->block_status), 0);
- ghes->flags &= ~GHES_TO_CLEAR;
+
+ ghes_copy_tofrom_phys(estatus, buf_paddr,
+ sizeof(estatus->block_status), 0,
+ fixmap_idx);
+
+ /*
+ * GHESv2 type HEST entries introduce support for error acknowledgment,
+ * so only acknowledge the error if this support is present.
+ */
+ if (is_hest_type_generic_v2(ghes))
+ ghes_ack_error(ghes->generic_v2);
}
static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
@@ -672,26 +698,13 @@ static void ghes_estatus_cache_add(
rcu_read_unlock();
}
-static int ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
+static void __ghes_panic(struct ghes *ghes,
+ struct acpi_hest_generic_status *estatus,
+ u64 buf_paddr, enum fixed_addresses fixmap_idx)
{
- int rc;
- u64 val = 0;
+ __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
- rc = apei_read(&val, &gv2->read_ack_register);
- if (rc)
- return rc;
-
- val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset;
- val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset;
-
- return apei_write(val, &gv2->read_ack_register);
-}
-
-static void __ghes_panic(struct ghes *ghes)
-{
- __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
-
- ghes_clear_estatus(ghes);
+ ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
/* reboot to log the error! */
if (!panic_timeout)
@@ -701,34 +714,25 @@ static void __ghes_panic(struct ghes *ghes)
static int ghes_proc(struct ghes *ghes)
{
+ struct acpi_hest_generic_status *estatus = ghes->estatus;
+ u64 buf_paddr;
int rc;
- rc = ghes_read_estatus(ghes, 0);
+ rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ);
if (rc)
goto out;
- if (ghes_severity(ghes->estatus->error_severity) >= GHES_SEV_PANIC) {
- __ghes_panic(ghes);
- }
+ if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC)
+ __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
- if (!ghes_estatus_cached(ghes->estatus)) {
- if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
- ghes_estatus_cache_add(ghes->generic, ghes->estatus);
+ if (!ghes_estatus_cached(estatus)) {
+ if (ghes_print_estatus(NULL, ghes->generic, estatus))
+ ghes_estatus_cache_add(ghes->generic, estatus);
}
- ghes_do_proc(ghes, ghes->estatus);
+ ghes_do_proc(ghes, estatus);
out:
- ghes_clear_estatus(ghes);
-
- if (rc == -ENOENT)
- return rc;
-
- /*
- * GHESv2 type HEST entries introduce support for error acknowledgment,
- * so only acknowledge the error if this support is present.
- */
- if (is_hest_type_generic_v2(ghes))
- return ghes_ack_error(ghes->generic_v2);
+ ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ);
return rc;
}
@@ -751,8 +755,11 @@ static void ghes_add_timer(struct ghes *ghes)
static void ghes_poll_func(struct timer_list *t)
{
struct ghes *ghes = from_timer(ghes, t, timer);
+ unsigned long flags;
+ spin_lock_irqsave(&ghes_notify_lock_irq, flags);
ghes_proc(ghes);
+ spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
if (!(ghes->flags & GHES_EXITING))
ghes_add_timer(ghes);
}
@@ -760,9 +767,12 @@ static void ghes_poll_func(struct timer_list *t)
static irqreturn_t ghes_irq_func(int irq, void *data)
{
struct ghes *ghes = data;
+ unsigned long flags;
int rc;
+ spin_lock_irqsave(&ghes_notify_lock_irq, flags);
rc = ghes_proc(ghes);
+ spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
if (rc)
return IRQ_NONE;
@@ -773,14 +783,17 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
void *data)
{
struct ghes *ghes;
+ unsigned long flags;
int ret = NOTIFY_DONE;
+ spin_lock_irqsave(&ghes_notify_lock_irq, flags);
rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
rcu_read_unlock();
+ spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return ret;
}
@@ -789,66 +802,20 @@ static struct notifier_block ghes_notifier_hed = {
.notifier_call = ghes_notify_hed,
};
-#ifdef CONFIG_ACPI_APEI_SEA
-static LIST_HEAD(ghes_sea);
-
-/*
- * Return 0 only if one of the SEA error sources successfully reported an error
- * record sent from the firmware.
- */
-int ghes_notify_sea(void)
-{
- struct ghes *ghes;
- int ret = -ENOENT;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ghes, &ghes_sea, list) {
- if (!ghes_proc(ghes))
- ret = 0;
- }
- rcu_read_unlock();
- return ret;
-}
-
-static void ghes_sea_add(struct ghes *ghes)
-{
- mutex_lock(&ghes_list_mutex);
- list_add_rcu(&ghes->list, &ghes_sea);
- mutex_unlock(&ghes_list_mutex);
-}
-
-static void ghes_sea_remove(struct ghes *ghes)
-{
- mutex_lock(&ghes_list_mutex);
- list_del_rcu(&ghes->list);
- mutex_unlock(&ghes_list_mutex);
- synchronize_rcu();
-}
-#else /* CONFIG_ACPI_APEI_SEA */
-static inline void ghes_sea_add(struct ghes *ghes) { }
-static inline void ghes_sea_remove(struct ghes *ghes) { }
-#endif /* CONFIG_ACPI_APEI_SEA */
-
-#ifdef CONFIG_HAVE_ACPI_APEI_NMI
/*
- * printk is not safe in NMI context. So in NMI handler, we allocate
- * required memory from lock-less memory allocator
- * (ghes_estatus_pool), save estatus into it, put them into lock-less
- * list (ghes_estatus_llist), then delay printk into IRQ context via
- * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
- * required pool size by all NMI error source.
+ * Handlers for CPER records may not be NMI safe. For example,
+ * memory_failure_queue() takes spinlocks and calls schedule_work_on().
+ * In any NMI-like handler, memory from ghes_estatus_pool is used to save
+ * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes
+ * ghes_proc_in_irq() to run in IRQ context where each estatus in
+ * ghes_estatus_llist is processed.
+ *
+ * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache
+ * to suppress frequent messages.
*/
static struct llist_head ghes_estatus_llist;
static struct irq_work ghes_proc_irq_work;
-/*
- * NMI may be triggered on any CPU, so ghes_in_nmi is used for
- * having only one concurrent reader.
- */
-static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
-
-static LIST_HEAD(ghes_nmi);
-
static void ghes_proc_in_irq(struct irq_work *irq_work)
{
struct llist_node *llnode, *next;
@@ -905,96 +872,154 @@ static void ghes_print_queued_estatus(void)
}
}
-/* Save estatus for further processing in IRQ context */
-static void __process_error(struct ghes *ghes)
+static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
+ enum fixed_addresses fixmap_idx)
{
-#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- u32 len, node_len;
+ struct acpi_hest_generic_status *estatus, tmp_header;
struct ghes_estatus_node *estatus_node;
- struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+ u64 buf_paddr;
+ int sev, rc;
- if (ghes_estatus_cached(ghes->estatus))
- return;
+ if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG))
+ return -EOPNOTSUPP;
- len = cper_estatus_len(ghes->estatus);
- node_len = GHES_ESTATUS_NODE_LEN(len);
+ rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx);
+ if (rc) {
+ ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
+ return rc;
+ }
+ rc = __ghes_check_estatus(ghes, &tmp_header);
+ if (rc) {
+ ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
+ return rc;
+ }
+
+ len = cper_estatus_len(&tmp_header);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
if (!estatus_node)
- return;
+ return -ENOMEM;
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- memcpy(estatus, ghes->estatus, len);
- llist_add(&estatus_node->llnode, &ghes_estatus_llist);
-#endif
-}
-static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
-{
- struct ghes *ghes;
- int sev, ret = NMI_DONE;
+ if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
+ ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
+ rc = -ENOENT;
+ goto no_work;
+ }
- if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
- return ret;
+ sev = ghes_severity(estatus->error_severity);
+ if (sev >= GHES_SEV_PANIC) {
+ ghes_print_queued_estatus();
+ __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx);
+ }
- list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
- if (ghes_read_estatus(ghes, 1)) {
- ghes_clear_estatus(ghes);
- continue;
- } else {
- ret = NMI_HANDLED;
- }
+ ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
- sev = ghes_severity(ghes->estatus->error_severity);
- if (sev >= GHES_SEV_PANIC) {
- oops_begin();
- ghes_print_queued_estatus();
- __ghes_panic(ghes);
- }
+ /* This error has been reported before, don't process it again. */
+ if (ghes_estatus_cached(estatus))
+ goto no_work;
- if (!(ghes->flags & GHES_TO_CLEAR))
- continue;
+ llist_add(&estatus_node->llnode, &ghes_estatus_llist);
+
+ return rc;
+
+no_work:
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
+
+ return rc;
+}
- __process_error(ghes);
- ghes_clear_estatus(ghes);
+static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list,
+ enum fixed_addresses fixmap_idx)
+{
+ int ret = -ENOENT;
+ struct ghes *ghes;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ghes, rcu_list, list) {
+ if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx))
+ ret = 0;
}
+ rcu_read_unlock();
-#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- if (ret == NMI_HANDLED)
+ if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret)
irq_work_queue(&ghes_proc_irq_work);
-#endif
- atomic_dec(&ghes_in_nmi);
+
return ret;
}
-static unsigned long ghes_esource_prealloc_size(
- const struct acpi_hest_generic *generic)
+#ifdef CONFIG_ACPI_APEI_SEA
+static LIST_HEAD(ghes_sea);
+
+/*
+ * Return 0 only if one of the SEA error sources successfully reported an error
+ * record sent from the firmware.
+ */
+int ghes_notify_sea(void)
{
- unsigned long block_length, prealloc_records, prealloc_size;
+ static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea);
+ int rv;
- block_length = min_t(unsigned long, generic->error_block_length,
- GHES_ESTATUS_MAX_SIZE);
- prealloc_records = max_t(unsigned long,
- generic->records_to_preallocate, 1);
- prealloc_size = min_t(unsigned long, block_length * prealloc_records,
- GHES_ESOURCE_PREALLOC_MAX_SIZE);
+ raw_spin_lock(&ghes_notify_lock_sea);
+ rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA);
+ raw_spin_unlock(&ghes_notify_lock_sea);
- return prealloc_size;
+ return rv;
}
-static void ghes_estatus_pool_shrink(unsigned long len)
+static void ghes_sea_add(struct ghes *ghes)
{
- ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
+ mutex_lock(&ghes_list_mutex);
+ list_add_rcu(&ghes->list, &ghes_sea);
+ mutex_unlock(&ghes_list_mutex);
}
-static void ghes_nmi_add(struct ghes *ghes)
+static void ghes_sea_remove(struct ghes *ghes)
{
- unsigned long len;
+ mutex_lock(&ghes_list_mutex);
+ list_del_rcu(&ghes->list);
+ mutex_unlock(&ghes_list_mutex);
+ synchronize_rcu();
+}
+#else /* CONFIG_ACPI_APEI_SEA */
+static inline void ghes_sea_add(struct ghes *ghes) { }
+static inline void ghes_sea_remove(struct ghes *ghes) { }
+#endif /* CONFIG_ACPI_APEI_SEA */
+
+#ifdef CONFIG_HAVE_ACPI_APEI_NMI
+/*
+ * NMI may be triggered on any CPU, so ghes_in_nmi is used for
+ * having only one concurrent reader.
+ */
+static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
+
+static LIST_HEAD(ghes_nmi);
+
+static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
+{
+ static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi);
+ int ret = NMI_DONE;
+
+ if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
+ return ret;
+
+ raw_spin_lock(&ghes_notify_lock_nmi);
+ if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI))
+ ret = NMI_HANDLED;
+ raw_spin_unlock(&ghes_notify_lock_nmi);
- len = ghes_esource_prealloc_size(ghes->generic);
- ghes_estatus_pool_expand(len);
+ atomic_dec(&ghes_in_nmi);
+ return ret;
+}
+
+static void ghes_nmi_add(struct ghes *ghes)
+{
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
@@ -1004,8 +1029,6 @@ static void ghes_nmi_add(struct ghes *ghes)
static void ghes_nmi_remove(struct ghes *ghes)
{
- unsigned long len;
-
mutex_lock(&ghes_list_mutex);
list_del_rcu(&ghes->list);
if (list_empty(&ghes_nmi))
@@ -1016,24 +1039,79 @@ static void ghes_nmi_remove(struct ghes *ghes)
* freed after NMI handler finishes.
*/
synchronize_rcu();
- len = ghes_esource_prealloc_size(ghes->generic);
- ghes_estatus_pool_shrink(len);
}
+#else /* CONFIG_HAVE_ACPI_APEI_NMI */
+static inline void ghes_nmi_add(struct ghes *ghes) { }
+static inline void ghes_nmi_remove(struct ghes *ghes) { }
+#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
static void ghes_nmi_init_cxt(void)
{
init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
}
-#else /* CONFIG_HAVE_ACPI_APEI_NMI */
-static inline void ghes_nmi_add(struct ghes *ghes) { }
-static inline void ghes_nmi_remove(struct ghes *ghes) { }
-static inline void ghes_nmi_init_cxt(void) { }
-#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
+
+static int __ghes_sdei_callback(struct ghes *ghes,
+ enum fixed_addresses fixmap_idx)
+{
+ if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) {
+ irq_work_queue(&ghes_proc_irq_work);
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs,
+ void *arg)
+{
+ static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal);
+ struct ghes *ghes = arg;
+ int err;
+
+ raw_spin_lock(&ghes_notify_lock_sdei_normal);
+ err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL);
+ raw_spin_unlock(&ghes_notify_lock_sdei_normal);
+
+ return err;
+}
+
+static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs,
+ void *arg)
+{
+ static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical);
+ struct ghes *ghes = arg;
+ int err;
+
+ raw_spin_lock(&ghes_notify_lock_sdei_critical);
+ err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL);
+ raw_spin_unlock(&ghes_notify_lock_sdei_critical);
+
+ return err;
+}
+
+static int apei_sdei_register_ghes(struct ghes *ghes)
+{
+ if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
+ return -EOPNOTSUPP;
+
+ return sdei_register_ghes(ghes, ghes_sdei_normal_callback,
+ ghes_sdei_critical_callback);
+}
+
+static int apei_sdei_unregister_ghes(struct ghes *ghes)
+{
+ if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
+ return -EOPNOTSUPP;
+
+ return sdei_unregister_ghes(ghes);
+}
static int ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
+ unsigned long flags;
int rc = -EINVAL;
@@ -1064,6 +1142,13 @@ static int ghes_probe(struct platform_device *ghes_dev)
goto err;
}
break;
+ case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
+ if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) {
+ pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n",
+ generic->header.source_id);
+ goto err;
+ }
+ break;
case ACPI_HEST_NOTIFY_LOCAL:
pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
generic->header.source_id);
@@ -1127,6 +1212,11 @@ static int ghes_probe(struct platform_device *ghes_dev)
case ACPI_HEST_NOTIFY_NMI:
ghes_nmi_add(ghes);
break;
+ case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
+ rc = apei_sdei_register_ghes(ghes);
+ if (rc)
+ goto err;
+ break;
default:
BUG();
}
@@ -1136,7 +1226,9 @@ static int ghes_probe(struct platform_device *ghes_dev)
ghes_edac_register(ghes, &ghes_dev->dev);
/* Handle any pending errors right away */
+ spin_lock_irqsave(&ghes_notify_lock_irq, flags);
ghes_proc(ghes);
+ spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return 0;
@@ -1150,6 +1242,7 @@ err:
static int ghes_remove(struct platform_device *ghes_dev)
{
+ int rc;
struct ghes *ghes;
struct acpi_hest_generic *generic;
@@ -1182,6 +1275,11 @@ static int ghes_remove(struct platform_device *ghes_dev)
case ACPI_HEST_NOTIFY_NMI:
ghes_nmi_remove(ghes);
break;
+ case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED:
+ rc = apei_sdei_unregister_ghes(ghes);
+ if (rc)
+ return rc;
+ break;
default:
BUG();
break;
@@ -1230,18 +1328,9 @@ static int __init ghes_init(void)
ghes_nmi_init_cxt();
- rc = ghes_estatus_pool_init();
- if (rc)
- goto err;
-
- rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
- GHES_ESTATUS_CACHE_ALLOCED_MAX);
- if (rc)
- goto err_pool_exit;
-
rc = platform_driver_register(&ghes_platform_driver);
if (rc)
- goto err_pool_exit;
+ goto err;
rc = apei_osc_setup();
if (rc == 0 && osc_sb_apei_support_acked)
@@ -1254,8 +1343,6 @@ static int __init ghes_init(void)
pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
return 0;
-err_pool_exit:
- ghes_estatus_pool_exit();
err:
return rc;
}
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index b1e9f81ebeea..8113ddb14d28 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <acpi/apei.h>
+#include <acpi/ghes.h>
#include "apei-internal.h"
@@ -53,6 +54,7 @@ static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
[ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
[ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
[ACPI_HEST_TYPE_GENERIC_ERROR_V2] = sizeof(struct acpi_hest_generic_v2),
+ [ACPI_HEST_TYPE_IA32_DEFERRED_CHECK] = -1,
};
static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
@@ -75,6 +77,11 @@ static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
len = sizeof(*mc) + mc->num_hardware_banks *
sizeof(struct acpi_hest_ia_error_bank);
+ } else if (hest_type == ACPI_HEST_TYPE_IA32_DEFERRED_CHECK) {
+ struct acpi_hest_ia_deferred_check *mc;
+ mc = (struct acpi_hest_ia_deferred_check *)hest_hdr;
+ len = sizeof(*mc) + mc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
}
BUG_ON(len == -1);
@@ -203,6 +210,11 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count)
rc = apei_hest_parse(hest_parse_ghes, &ghes_arr);
if (rc)
goto err;
+
+ rc = ghes_estatus_pool_init(ghes_count);
+ if (rc)
+ goto err;
+
out:
kfree(ghes_arr.ghes_devs);
return rc;
@@ -251,7 +263,9 @@ void __init acpi_hest_init(void)
rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
if (rc)
goto err;
- rc = hest_ghes_dev_register(ghes_count);
+
+ if (ghes_count)
+ rc = hest_ghes_dev_register(ghes_count);
if (rc)
goto err;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index fdd90ffceb85..e48894e002ba 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -876,7 +876,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
return (resv == its->its_count) ? resv : -ENODEV;
}
#else
-static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev);
+static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
{ return NULL; }
static inline int iort_add_device_replay(const struct iommu_ops *ops,
struct device *dev)
@@ -952,9 +952,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
{
struct acpi_iort_node *node;
struct acpi_iort_root_complex *rc;
+ struct pci_bus *pbus = to_pci_dev(dev)->bus;
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
- iort_match_node_callback, dev);
+ iort_match_node_callback, &pbus->dev);
if (!node || node->revision < 1)
return -ENODEV;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 99d820a693a8..6ecbbabf1233 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -799,10 +799,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
}
EXPORT_SYMBOL_GPL(acpi_match_device);
+static const void *acpi_of_device_get_match_data(const struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ const struct of_device_id *match = NULL;
+
+ if (!acpi_of_match_device(adev, dev->driver->of_match_table, &match))
+ return NULL;
+
+ return match->data;
+}
+
const void *acpi_device_get_match_data(const struct device *dev)
{
const struct acpi_device_id *match;
+ if (!dev->driver->acpi_match_table)
+ return acpi_of_device_get_match_data(dev);
+
match = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!match)
return NULL;
@@ -1029,6 +1043,9 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
+ /* Initialize debug output. Linux does not use ACPICA defaults */
+ acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
+
#ifdef CONFIG_X86
/*
* If the machine falls into the DMI check table,
@@ -1054,18 +1071,6 @@ void __init acpi_early_init(void)
goto error0;
}
- /*
- * ACPI 2.0 requires the EC driver to be loaded and work before
- * the EC device is found in the namespace (i.e. before
- * acpi_load_tables() is called).
- *
- * This is accomplished by looking for the ECDT table, and getting
- * the EC parameters out of that.
- *
- * Ignore the result. Not having an ECDT is not fatal.
- */
- status = acpi_ec_ecdt_probe();
-
#ifdef CONFIG_X86
if (!acpi_ioapic) {
/* compatible (0) means level (3) */
@@ -1142,6 +1147,18 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /*
+ * ACPI 2.0 requires the EC driver to be loaded and work before the EC
+ * device is found in the namespace.
+ *
+ * This is accomplished by looking for the ECDT table and getting the EC
+ * parameters out of that.
+ *
+ * Do that before calling acpi_initialize_objects() which may trigger EC
+ * address space accesses.
+ */
+ acpi_ec_ecdt_probe();
+
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 217a782c3e55..1b207fca1420 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1051,6 +1051,48 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
/**
+ * cppc_get_desired_perf - Get the value of desired performance register.
+ * @cpunum: CPU from which to get desired performance.
+ * @desired_perf: address of a variable to store the returned desired performance
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cpc_register_resource *desired_reg;
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+
+ desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+
+ if (CPC_IN_PCC(desired_reg)) {
+ int ret = 0;
+
+ if (pcc_ss_id < 0)
+ return -EIO;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
+ cpc_read(cpunum, desired_reg, desired_perf);
+ else
+ ret = -EIO;
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+ }
+
+ cpc_read(cpunum, desired_reg, desired_perf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
+
+/**
* cppc_get_perf_caps - Get a CPUs performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 4451877f83b6..aa972dc5cb7e 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -79,14 +79,8 @@ static const struct file_operations cm_fops = {
static int __init acpi_custom_method_init(void)
{
- if (acpi_debugfs_dir == NULL)
- return -ENOENT;
-
cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
acpi_debugfs_dir, NULL, &cm_fops);
- if (cm_dentry == NULL)
- return -ENODEV;
-
return 0;
}
diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile
index 06ea8809583d..e6032e47e83f 100644
--- a/drivers/acpi/dptf/Makefile
+++ b/drivers/acpi/dptf/Makefile
@@ -1,4 +1,2 @@
obj-$(CONFIG_ACPI) += int340x_thermal.o
obj-$(CONFIG_DPTF_POWER) += dptf_power.o
-
-ccflags-y += -Idrivers/acpi
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 86364097e236..0aa7c2e62e95 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -12,7 +12,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
-#include "internal.h"
+#include "../internal.h"
#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 9d66a47d32fb..48d4815603e5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -186,14 +186,17 @@ static void advance_transaction(struct acpi_ec *ec);
static void acpi_ec_event_handler(struct work_struct *work);
static void acpi_ec_event_processor(struct work_struct *work);
-struct acpi_ec *boot_ec, *first_ec;
+struct acpi_ec *first_ec;
EXPORT_SYMBOL(first_ec);
+
+static struct acpi_ec *boot_ec;
static bool boot_ec_is_ecdt = false;
static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
* Logging/Debugging
@@ -499,6 +502,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
ec_log_drv("event blocked");
}
+/*
+ * Process _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+static void acpi_ec_clear(struct acpi_ec *ec)
+{
+ int i, status;
+ u8 value = 0;
+
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+ status = acpi_ec_query(ec, &value);
+ if (status || !value)
+ break;
+ }
+ if (unlikely(i == ACPI_EC_CLEAR_MAX))
+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
+ else
+ pr_info("%d stale EC events cleared\n", i);
+}
+
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
unsigned long flags;
@@ -507,6 +530,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
spin_unlock_irqrestore(&ec->lock, flags);
+
+ /* Drain additional events if hardware requires that */
+ if (EC_FLAGS_CLEAR_ON_RESUME)
+ acpi_ec_clear(ec);
}
#ifdef CONFIG_PM_SLEEP
@@ -1539,49 +1566,6 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
return ret;
}
-static int acpi_config_boot_ec(struct acpi_ec *ec, acpi_handle handle,
- bool handle_events, bool is_ecdt)
-{
- int ret;
-
- /*
- * Changing the ACPI handle results in a re-configuration of the
- * boot EC. And if it happens after the namespace initialization,
- * it causes _REG evaluations.
- */
- if (boot_ec && boot_ec->handle != handle)
- ec_remove_handlers(boot_ec);
-
- /* Unset old boot EC */
- if (boot_ec != ec)
- acpi_ec_free(boot_ec);
-
- /*
- * ECDT device creation is split into acpi_ec_ecdt_probe() and
- * acpi_ec_ecdt_start(). This function takes care of completing the
- * ECDT parsing logic as the handle update should be performed
- * between the installation/uninstallation of the handlers.
- */
- if (ec->handle != handle)
- ec->handle = handle;
-
- ret = acpi_ec_setup(ec, handle_events);
- if (ret)
- return ret;
-
- /* Set new boot EC */
- if (!boot_ec) {
- boot_ec = ec;
- boot_ec_is_ecdt = is_ecdt;
- }
-
- acpi_handle_info(boot_ec->handle,
- "Used as boot %s EC to handle transactions%s\n",
- is_ecdt ? "ECDT" : "DSDT",
- handle_events ? " and events" : "");
- return ret;
-}
-
static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
{
struct acpi_table_ecdt *ecdt_ptr;
@@ -1601,43 +1585,34 @@ static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
return true;
}
-static bool acpi_is_boot_ec(struct acpi_ec *ec)
-{
- if (!boot_ec)
- return false;
- if (ec->command_addr == boot_ec->command_addr &&
- ec->data_addr == boot_ec->data_addr)
- return true;
- return false;
-}
-
static int acpi_ec_add(struct acpi_device *device)
{
struct acpi_ec *ec = NULL;
- int ret;
- bool is_ecdt = false;
+ bool dep_update = true;
acpi_status status;
+ int ret;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
- is_ecdt = true;
+ boot_ec_is_ecdt = true;
ec = boot_ec;
+ dep_update = false;
} else {
ec = acpi_ec_alloc();
if (!ec)
return -ENOMEM;
+
status = ec_parse_device(device->handle, 0, ec, NULL);
if (status != AE_CTRL_TERMINATE) {
ret = -EINVAL;
goto err_alloc;
}
- }
- if (acpi_is_boot_ec(ec)) {
- boot_ec_is_ecdt = is_ecdt;
- if (!is_ecdt) {
+ if (boot_ec && ec->command_addr == boot_ec->command_addr &&
+ ec->data_addr == boot_ec->data_addr) {
+ boot_ec_is_ecdt = false;
/*
* Trust PNP0C09 namespace location rather than
* ECDT ID. But trust ECDT GPE rather than _GPE
@@ -1649,12 +1624,17 @@ static int acpi_ec_add(struct acpi_device *device)
acpi_ec_free(ec);
ec = boot_ec;
}
- ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
- } else
- ret = acpi_ec_setup(ec, true);
+ }
+
+ ret = acpi_ec_setup(ec, true);
if (ret)
goto err_query;
+ if (ec == boot_ec)
+ acpi_handle_info(boot_ec->handle,
+ "Boot %s EC used to handle transactions and events\n",
+ boot_ec_is_ecdt ? "ECDT" : "DSDT");
+
device->driver_data = ec;
ret = !!request_region(ec->data_addr, 1, "EC data");
@@ -1662,7 +1642,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
- if (!is_ecdt) {
+ if (dep_update) {
/* Reprobe devices depending on the EC */
acpi_walk_dep_device_list(ec->handle);
}
@@ -1730,10 +1710,10 @@ static const struct acpi_device_id ec_device_ids[] = {
* namespace EC before the main ACPI device enumeration process. It is
* retained for historical reason and will be deprecated in the future.
*/
-int __init acpi_ec_dsdt_probe(void)
+void __init acpi_ec_dsdt_probe(void)
{
- acpi_status status;
struct acpi_ec *ec;
+ acpi_status status;
int ret;
/*
@@ -1743,21 +1723,22 @@ int __init acpi_ec_dsdt_probe(void)
* picking up an invalid EC device.
*/
if (boot_ec)
- return -ENODEV;
+ return;
ec = acpi_ec_alloc();
if (!ec)
- return -ENOMEM;
+ return;
+
/*
* At this point, the namespace is initialized, so start to find
* the namespace objects.
*/
- status = acpi_get_devices(ec_device_ids[0].id,
- ec_parse_device, ec, NULL);
+ status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
if (ACPI_FAILURE(status) || !ec->handle) {
- ret = -ENODEV;
- goto error;
+ acpi_ec_free(ec);
+ return;
}
+
/*
* When the DSDT EC is available, always re-configure boot EC to
* have _REG evaluated. _REG can only be evaluated after the
@@ -1765,11 +1746,16 @@ int __init acpi_ec_dsdt_probe(void)
* At this point, the GPE is not fully initialized, so do not to
* handle the events.
*/
- ret = acpi_config_boot_ec(ec, ec->handle, false, false);
-error:
- if (ret)
+ ret = acpi_ec_setup(ec, false);
+ if (ret) {
acpi_ec_free(ec);
- return ret;
+ return;
+ }
+
+ boot_ec = ec;
+
+ acpi_handle_info(ec->handle,
+ "Boot DSDT EC used to handle transactions\n");
}
/*
@@ -1821,6 +1807,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
#endif
/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ *
+ * Ideally, the EC should also be instructed NOT to accumulate events during
+ * sleep (which Windows seems to do somehow), but the interface to control this
+ * behaviour is not known at this time.
+ *
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
+ * however it is very likely that other Samsung models are affected.
+ *
+ * On systems which don't accumulate _Q events during sleep, this extra check
+ * should be harmless.
+ */
+static int ec_clear_on_resume(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing EC poll on resume.\n");
+ EC_FLAGS_CLEAR_ON_RESUME = 1;
+ ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
+ return 0;
+}
+
+/*
* Some ECDTs contain wrong register addresses.
* MSI MS-171F
* https://bugzilla.kernel.org/show_bug.cgi?id=12461
@@ -1869,39 +1880,38 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
ec_honor_ecdt_gpe, "ASUS X580VD", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
+ {
+ ec_clear_on_resume, "Samsung hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
-int __init acpi_ec_ecdt_probe(void)
+void __init acpi_ec_ecdt_probe(void)
{
- int ret;
- acpi_status status;
struct acpi_table_ecdt *ecdt_ptr;
struct acpi_ec *ec;
+ acpi_status status;
+ int ret;
- ec = acpi_ec_alloc();
- if (!ec)
- return -ENOMEM;
- /*
- * Generate a boot ec context
- */
+ /* Generate a boot ec context. */
dmi_check_system(ec_dmi_table);
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
- if (ACPI_FAILURE(status)) {
- ret = -ENODEV;
- goto error;
- }
+ if (ACPI_FAILURE(status))
+ return;
if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
/*
* Asus X50GL:
* https://bugzilla.kernel.org/show_bug.cgi?id=11880
*/
- ret = -ENODEV;
- goto error;
+ return;
}
+ ec = acpi_ec_alloc();
+ if (!ec)
+ return;
+
if (EC_FLAGS_CORRECT_ECDT) {
ec->command_addr = ecdt_ptr->data.address;
ec->data_addr = ecdt_ptr->control.address;
@@ -1910,16 +1920,22 @@ int __init acpi_ec_ecdt_probe(void)
ec->data_addr = ecdt_ptr->data.address;
}
ec->gpe = ecdt_ptr->gpe;
+ ec->handle = ACPI_ROOT_OBJECT;
/*
* At this point, the namespace is not initialized, so do not find
* the namespace objects, or handle the events.
*/
- ret = acpi_config_boot_ec(ec, ACPI_ROOT_OBJECT, false, true);
-error:
- if (ret)
+ ret = acpi_ec_setup(ec, false);
+ if (ret) {
acpi_ec_free(ec);
- return ret;
+ return;
+ }
+
+ boot_ec = ec;
+ boot_ec_is_ecdt = true;
+
+ pr_info("Boot ECDT EC used to handle transactions\n");
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index dd70d6c2bca0..23faa66ea772 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -108,52 +108,32 @@ static const struct file_operations acpi_ec_io_ops = {
.llseek = default_llseek,
};
-static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
+static void acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
{
struct dentry *dev_dir;
char name[64];
umode_t mode = 0400;
- if (ec_device_count == 0) {
+ if (ec_device_count == 0)
acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL);
- if (!acpi_ec_debugfs_dir)
- return -ENOMEM;
- }
sprintf(name, "ec%u", ec_device_count);
dev_dir = debugfs_create_dir(name, acpi_ec_debugfs_dir);
- if (!dev_dir) {
- if (ec_device_count != 0)
- goto error;
- return -ENOMEM;
- }
- if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
- goto error;
- if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
- &first_ec->global_lock))
- goto error;
+ debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe);
+ debugfs_create_bool("use_global_lock", 0444, dev_dir,
+ &first_ec->global_lock);
if (write_support)
mode = 0600;
- if (!debugfs_create_file("io", mode, dev_dir, ec, &acpi_ec_io_ops))
- goto error;
-
- return 0;
-
-error:
- debugfs_remove_recursive(acpi_ec_debugfs_dir);
- return -ENOMEM;
+ debugfs_create_file("io", mode, dev_dir, ec, &acpi_ec_io_ops);
}
static int __init acpi_ec_sys_init(void)
{
- int err = 0;
if (first_ec)
- err = acpi_ec_add_debugfs(first_ec, 0);
- else
- err = -ENODEV;
- return err;
+ acpi_ec_add_debugfs(first_ec, 0);
+ return 0;
}
static void __exit acpi_ec_sys_exit(void)
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7e6952edb5b0..6eaf06db7752 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -81,7 +81,11 @@ void acpi_debugfs_init(void);
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
+#ifdef CONFIG_PCI
void acpi_lpss_init(void);
+#else
+static inline void acpi_lpss_init(void) {}
+#endif
void acpi_apd_init(void);
@@ -188,8 +192,8 @@ extern struct acpi_ec *first_ec;
typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
-int acpi_ec_ecdt_probe(void);
-int acpi_ec_dsdt_probe(void);
+void acpi_ec_ecdt_probe(void);
+void acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_mark_gpe_for_wake(void);
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 7c352cba0528..c3b2222e2129 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -196,7 +196,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
fwnode = acpi_gsi_domain_id;
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity,
- irq->sharable, ctx);
+ irq->shareable, ctx);
return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
eirq = &ares->data.extended_irq;
@@ -209,7 +209,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity,
- eirq->sharable, ctx);
+ eirq->shareable, ctx);
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 011d3db19c80..e18ade5d74e9 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -26,7 +26,6 @@
#include <acpi/nfit.h>
#include "intel.h"
#include "nfit.h"
-#include "intel.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
}
EXPORT_SYMBOL(to_nfit_uuid);
-static struct acpi_nfit_desc *to_acpi_nfit_desc(
- struct nvdimm_bus_descriptor *nd_desc)
-{
- return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
-}
-
static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -416,10 +409,36 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
return true;
}
+static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
+ struct nd_cmd_pkg *call_pkg)
+{
+ if (call_pkg) {
+ int i;
+
+ if (nfit_mem->family != call_pkg->nd_family)
+ return -ENOTTY;
+
+ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
+ if (call_pkg->nd_reserved2[i])
+ return -EINVAL;
+ return call_pkg->nd_command;
+ }
+
+ /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+ return cmd;
+
+ /*
+ * Force function number validation to fail since 0 is never
+ * published as a valid function in dsm_mask.
+ */
+ return 0;
+}
+
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
union acpi_object in_obj, in_buf, *out_obj;
const struct nd_cmd_desc *desc = NULL;
@@ -429,30 +448,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned long cmd_mask, dsm_mask;
u32 offset, fw_status = 0;
acpi_handle handle;
- unsigned int func;
const guid_t *guid;
- int rc, i;
+ int func, rc, i;
if (cmd_rc)
*cmd_rc = -EINVAL;
- func = cmd;
- if (cmd == ND_CMD_CALL) {
- call_pkg = buf;
- func = call_pkg->nd_command;
-
- for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
- if (call_pkg->nd_reserved2[i])
- return -EINVAL;
- }
if (nvdimm) {
struct acpi_device *adev = nfit_mem->adev;
if (!adev)
return -ENOTTY;
- if (call_pkg && nfit_mem->family != call_pkg->nd_family)
- return -ENOTTY;
+ if (cmd == ND_CMD_CALL)
+ call_pkg = buf;
+ func = cmd_to_func(nfit_mem, cmd, call_pkg);
+ if (func < 0)
+ return func;
dimm_name = nvdimm_name(nvdimm);
cmd_name = nvdimm_cmd_name(cmd);
cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -463,6 +475,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
} else {
struct acpi_device *adev = to_acpi_dev(acpi_desc);
+ func = cmd;
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
dsm_mask = cmd_mask;
@@ -477,7 +490,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
return -ENOTTY;
- if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
+ /*
+ * Check for a valid command. For ND_CMD_CALL, we also have to
+ * make sure that the DSM function is supported.
+ */
+ if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
+ return -ENOTTY;
+ else if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
in_obj.type = ACPI_TYPE_PACKAGE;
@@ -721,6 +740,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_desc *acpi_desc;
struct nfit_mem *nfit_mem;
+ u16 physical_id;
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -728,10 +748,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
memdev = __to_nfit_memdev(nfit_mem);
if (memdev->device_handle == device_handle) {
+ *flags = memdev->flags;
+ physical_id = memdev->physical_id;
mutex_unlock(&acpi_desc->init_mutex);
mutex_unlock(&acpi_desc_lock);
- *flags = memdev->flags;
- return memdev->physical_id;
+ return physical_id;
}
}
mutex_unlock(&acpi_desc->init_mutex);
@@ -1872,6 +1893,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return 0;
}
+ /*
+ * Function 0 is the command interrogation function, don't
+ * export it to potential userspace use, and enable it to be
+ * used as an error value in acpi_nfit_ctl().
+ */
+ dsm_mask &= ~1UL;
+
guid = to_nfit_uuid(nfit_mem->family);
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
if (acpi_check_dsm(adev_dimm->handle, guid,
@@ -2047,11 +2075,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if (!nvdimm)
continue;
- rc = nvdimm_security_setup_events(nvdimm);
- if (rc < 0)
- dev_warn(acpi_desc->dev,
- "security event setup failed: %d\n", rc);
-
nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
if (nfit_kernfs)
nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
@@ -2231,7 +2254,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
if (!nd_set)
return -ENOMEM;
- ndr_desc->nd_set = nd_set;
guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
@@ -3367,7 +3389,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init);
static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
@@ -3384,7 +3406,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
- struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
if (nvdimm)
return 0;
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 850b2927b4e7..f70de71f79d6 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
static void nvdimm_invalidate_cache(void);
-static int intel_security_unlock(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm,
return 0;
}
-static int intel_security_erase(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key,
enum nvdimm_passphrase_type ptype)
{
@@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm,
return 0;
}
-static int intel_security_query_overwrite(struct nvdimm *nvdimm)
+static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm)
return 0;
}
-static int intel_security_overwrite(struct nvdimm *nvdimm,
+static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
const struct nvdimm_key_data *nkey)
{
int rc;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 274699463b4f..7bbbf8256a41 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -146,9 +146,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
struct acpi_srat_mem_affinity *p =
(struct acpi_srat_mem_affinity *)header;
- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
- (unsigned long)p->base_address,
- (unsigned long)p->length,
+ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
+ (unsigned long long)p->base_address,
+ (unsigned long long)p->length,
p->proximity_domain,
(p->flags & ACPI_SRAT_MEM_ENABLED) ?
"enabled" : "disabled",
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index d5eec352a6e1..df70b1eaef58 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -317,10 +317,10 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
resource->res.data.irq.polarity =
link->irq.polarity;
if (link->irq.triggering == ACPI_EDGE_SENSITIVE)
- resource->res.data.irq.sharable =
+ resource->res.data.irq.shareable =
ACPI_EXCLUSIVE;
else
- resource->res.data.irq.sharable = ACPI_SHARED;
+ resource->res.data.irq.shareable = ACPI_SHARED;
resource->res.data.irq.interrupt_count = 1;
resource->res.data.irq.interrupts[0] = irq;
break;
@@ -335,10 +335,10 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
resource->res.data.extended_irq.polarity =
link->irq.polarity;
if (link->irq.triggering == ACPI_EDGE_SENSITIVE)
- resource->res.data.irq.sharable =
+ resource->res.data.irq.shareable =
ACPI_EXCLUSIVE;
else
- resource->res.data.irq.sharable = ACPI_SHARED;
+ resource->res.data.irq.shareable = ACPI_SHARED;
resource->res.data.extended_irq.interrupt_count = 1;
resource->res.data.extended_irq.interrupts[0] = irq;
/* ignore resource_source, it's optional */
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 2579675b7082..e7c0006e6602 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -20,8 +20,11 @@
#define GPI1_LDO_ON (3 << 0)
#define GPI1_LDO_OFF (4 << 0)
-#define AXP288_ADC_TS_PIN_GPADC 0xf2
-#define AXP288_ADC_TS_PIN_ON 0xf3
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
static struct pmic_table power_table[] = {
{
@@ -212,22 +215,44 @@ out:
*/
static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
+ int ret, adc_ts_pin_ctrl;
u8 buf[2];
- int ret;
- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
- AXP288_ADC_TS_PIN_GPADC);
+ /*
+ * The current-source used for the battery temp-sensor (TS) is shared
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
+ * current-source needs to be permanently on. But to read the GPADC we
+ * need to temporary switch the TS current-source to ondemand, so that
+ * the GPADC can use it, otherwise we will always read an all 0 value.
+ *
+ * Note that the switching from on to on-ondemand is not necessary
+ * when the TS current-source is off (this happens on devices which
+ * do not use the TS-pin).
+ */
+ ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
if (ret)
return ret;
- /* After switching to the GPADC pin give things some time to settle */
- usleep_range(6000, 10000);
+ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+ ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
+ if (ret)
+ return ret;
+
+ /* Wait a bit after switching the current-source */
+ usleep_range(6000, 10000);
+ }
ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
if (ret == 0)
ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
- regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
+ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+ regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_ON);
+ }
return ret;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1b475bc1ae16..665e93ca0b40 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
}
}
+static bool acpi_power_resource_is_dup(union acpi_object *package,
+ unsigned int start, unsigned int i)
+{
+ acpi_handle rhandle, dup;
+ unsigned int j;
+
+ /* The caller is expected to check the package element types */
+ rhandle = package->package.elements[i].reference.handle;
+ for (j = start; j < i; j++) {
+ dup = package->package.elements[j].reference.handle;
+ if (dup == rhandle)
+ return true;
+ }
+
+ return false;
+}
+
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list)
{
@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
err = -ENODEV;
break;
}
+
+ /* Some ACPI tables contain duplicate power resource references */
+ if (acpi_power_resource_is_dup(package, start, i))
+ continue;
+
err = acpi_add_power_resource(rhandle);
if (err)
break;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index da031b1df6f5..ad31c50de3be 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -451,6 +451,11 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta
return cpu;
}
+static void acpi_pptt_warn_missing(void)
+{
+ pr_warn_once("No PPTT table found, cpu and cache topology may be inaccurate\n");
+}
+
/**
* topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
* @table: Pointer to the head of the PPTT table
@@ -498,7 +503,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) {
- pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n");
+ acpi_pptt_warn_missing();
return -ENOENT;
}
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
@@ -531,7 +536,7 @@ int acpi_find_last_cache_level(unsigned int cpu)
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) {
- pr_warn_once("No PPTT table found, cache topology may be inaccurate\n");
+ acpi_pptt_warn_missing();
} else {
number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id);
acpi_put_table(table);
@@ -563,7 +568,7 @@ int cache_setup_acpi(unsigned int cpu)
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) {
- pr_warn_once("No PPTT table found, cache topology may be inaccurate\n");
+ acpi_pptt_warn_missing();
return -ENOENT;
}
@@ -617,7 +622,7 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) {
- pr_warn_once("No PPTT table found, topology may be inaccurate\n");
+ acpi_pptt_warn_missing();
return -ENOENT;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b2131c4ea124..98d4ec5bf450 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -282,6 +282,13 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
pr->power.states[ACPI_STATE_C2].address,
pr->power.states[ACPI_STATE_C3].address));
+ snprintf(pr->power.states[ACPI_STATE_C2].desc,
+ ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
+ pr->power.states[ACPI_STATE_C2].address);
+ snprintf(pr->power.states[ACPI_STATE_C3].desc,
+ ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
+ pr->power.states[ACPI_STATE_C3].address);
+
return 0;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 316a0fc785e3..d556f2144de8 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -476,7 +476,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->sharable, true);
+ irq->shareable, true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
@@ -487,7 +487,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
if (is_gsi(ext_irq))
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity,
- ext_irq->sharable, false);
+ ext_irq->shareable, false);
else
acpi_dev_irqresource_disabled(res, 0);
break;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 48eabb6c2d4f..8fccbe49612a 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -473,14 +473,22 @@ static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES);
void __init acpi_table_upgrade(void)
{
- void *data = (void *)initrd_start;
- size_t size = initrd_end - initrd_start;
+ void *data;
+ size_t size;
int sig, no, table_nr = 0, total_offset = 0;
long offset = 0;
struct acpi_table_header *table;
char cpio_path[32] = "kernel/firmware/acpi/";
struct cpio_data file;
+ if (IS_ENABLED(CONFIG_ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD)) {
+ data = __initramfs_start;
+ size = __initramfs_size;
+ } else {
+ data = (void *)initrd_start;
+ size = initrd_end - initrd_start;
+ }
+
if (data == NULL || size == 0)
return;
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 9a8e286dd86f..c6df14802741 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -56,6 +56,11 @@ static const struct always_present_id always_present_ids[] = {
*/
ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}),
ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
+
+ /* Lenovo Yoga Book uses PWM2 for keyboard backlight control */
+ ENTRY("80862289", "2", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+ }),
/*
* The INT0002 device is necessary to clear wakeup interrupt sources
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4c190f8d1f4c..6fdf2abe4598 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU && !CPU_CACHE_VIVT
+ depends on MMU
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cdfc87629efb..8685882da64c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -329,6 +329,8 @@ struct binder_error {
* (invariant after initialized)
* @min_priority: minimum scheduling priority
* (invariant after initialized)
+ * @txn_security_ctx: require sender's security context
+ * (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
*
@@ -365,6 +367,7 @@ struct binder_node {
* invariant after initialization
*/
u8 accept_fds:1;
+ u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
@@ -615,6 +618,7 @@ struct binder_transaction {
long saved_priority;
kuid_t sender_euid;
struct list_head fd_fixups;
+ binder_uintptr_t security_ctx;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@@ -625,6 +629,26 @@ struct binder_transaction {
};
/**
+ * struct binder_object - union of flat binder object types
+ * @hdr: generic object header
+ * @fbo: binder object (nodes and refs)
+ * @fdo: file descriptor object
+ * @bbo: binder buffer pointer
+ * @fdao: file descriptor array
+ *
+ * Used for type-independent object copies
+ */
+struct binder_object {
+ union {
+ struct binder_object_header hdr;
+ struct flat_binder_object fbo;
+ struct binder_fd_object fdo;
+ struct binder_buffer_object bbo;
+ struct binder_fd_array_object fdao;
+ };
+};
+
+/**
* binder_proc_lock() - Acquire outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
@@ -1152,6 +1176,7 @@ static struct binder_node *binder_init_node_ilocked(
node->work.type = BINDER_WORK_NODE;
node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@@ -2012,26 +2037,33 @@ static void binder_cleanup_transaction(struct binder_transaction *t,
}
/**
- * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * binder_get_object() - gets object and checks for valid metadata
+ * @proc: binder_proc owning the buffer
* @buffer: binder_buffer that we're parsing.
- * @offset: offset in the buffer at which to validate an object.
+ * @offset: offset in the @buffer at which to validate an object.
+ * @object: struct binder_object to read into
*
* Return: If there's a valid metadata object at @offset in @buffer, the
- * size of that object. Otherwise, it returns zero.
+ * size of that object. Otherwise, it returns zero. The object
+ * is read into the struct binder_object pointed to by @object.
*/
-static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+static size_t binder_get_object(struct binder_proc *proc,
+ struct binder_buffer *buffer,
+ unsigned long offset,
+ struct binder_object *object)
{
- /* Check if we can read a header first */
+ size_t read_size;
struct binder_object_header *hdr;
size_t object_size = 0;
- if (buffer->data_size < sizeof(*hdr) ||
- offset > buffer->data_size - sizeof(*hdr) ||
- !IS_ALIGNED(offset, sizeof(u32)))
+ read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
+ if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+ binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size);
- /* Ok, now see if we can read a complete object. */
- hdr = (struct binder_object_header *)(buffer->data + offset);
+ /* Ok, now see if we read a complete object. */
+ hdr = &object->hdr;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER:
@@ -2060,10 +2092,13 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
/**
* binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @proc: binder_proc owning the buffer
* @b: binder_buffer containing the object
+ * @object: struct binder_object to read into
* @index: index in offset array at which the binder_buffer_object is
* located
- * @start: points to the start of the offset array
+ * @start_offset: points to the start of the offset array
+ * @object_offsetp: offset of @object read from @b
* @num_valid: the number of valid offsets in the offset array
*
* Return: If @index is within the valid range of the offset array
@@ -2074,34 +2109,46 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
* Note that the offset found in index @index itself is not
* verified; this function assumes that @num_valid elements
* from @start were previously verified to have valid offsets.
+ * If @object_offsetp is non-NULL, then the offset within
+ * @b is written to it.
*/
-static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
- binder_size_t index,
- binder_size_t *start,
- binder_size_t num_valid)
+static struct binder_buffer_object *binder_validate_ptr(
+ struct binder_proc *proc,
+ struct binder_buffer *b,
+ struct binder_object *object,
+ binder_size_t index,
+ binder_size_t start_offset,
+ binder_size_t *object_offsetp,
+ binder_size_t num_valid)
{
- struct binder_buffer_object *buffer_obj;
- binder_size_t *offp;
+ size_t object_size;
+ binder_size_t object_offset;
+ unsigned long buffer_offset;
if (index >= num_valid)
return NULL;
- offp = start + index;
- buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
- if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+ buffer_offset = start_offset + sizeof(binder_size_t) * index;
+ binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ b, buffer_offset, sizeof(object_offset));
+ object_size = binder_get_object(proc, b, object_offset, object);
+ if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
+ if (object_offsetp)
+ *object_offsetp = object_offset;
- return buffer_obj;
+ return &object->bbo;
}
/**
* binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @proc: binder_proc owning the buffer
* @b: transaction buffer
- * @objects_start start of objects buffer
- * @buffer: binder_buffer_object in which to fix up
- * @offset: start offset in @buffer to fix up
- * @last_obj: last binder_buffer_object that we fixed up in
- * @last_min_offset: minimum fixup offset in @last_obj
+ * @objects_start_offset: offset to start of objects buffer
+ * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
+ * @fixup_offset: start offset in @buffer to fix up
+ * @last_obj_offset: offset to last binder_buffer_object that we fixed
+ * @last_min_offset: minimum fixup offset in object at @last_obj_offset
*
* Return: %true if a fixup in buffer @buffer at offset @offset is
* allowed.
@@ -2132,28 +2179,41 @@ static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
* C (parent = A, offset = 16)
* D (parent = B, offset = 0) // B is not A or any of A's parents
*/
-static bool binder_validate_fixup(struct binder_buffer *b,
- binder_size_t *objects_start,
- struct binder_buffer_object *buffer,
+static bool binder_validate_fixup(struct binder_proc *proc,
+ struct binder_buffer *b,
+ binder_size_t objects_start_offset,
+ binder_size_t buffer_obj_offset,
binder_size_t fixup_offset,
- struct binder_buffer_object *last_obj,
+ binder_size_t last_obj_offset,
binder_size_t last_min_offset)
{
- if (!last_obj) {
+ if (!last_obj_offset) {
/* Nothing to fix up in */
return false;
}
- while (last_obj != buffer) {
+ while (last_obj_offset != buffer_obj_offset) {
+ unsigned long buffer_offset;
+ struct binder_object last_object;
+ struct binder_buffer_object *last_bbo;
+ size_t object_size = binder_get_object(proc, b, last_obj_offset,
+ &last_object);
+ if (object_size != sizeof(*last_bbo))
+ return false;
+
+ last_bbo = &last_object.bbo;
/*
* Safe to retrieve the parent of last_obj, since it
* was already previously verified by the driver.
*/
- if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+ if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
return false;
- last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
- last_obj = (struct binder_buffer_object *)
- (b->data + *(objects_start + last_obj->parent));
+ last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
+ buffer_offset = objects_start_offset +
+ sizeof(binder_size_t) * last_bbo->parent,
+ binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
+ b, buffer_offset,
+ sizeof(last_obj_offset));
}
return (fixup_offset >= last_min_offset);
}
@@ -2218,35 +2278,42 @@ static void binder_deferred_fd_close(int fd)
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
- binder_size_t *failed_at)
+ binder_size_t failed_at,
+ bool is_failure)
{
- binder_size_t *offp, *off_start, *off_end;
int debug_id = buffer->debug_id;
+ binder_size_t off_start_offset, buffer_offset, off_end_offset;
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d buffer release %d, size %zd-%zd, failed at %pK\n",
+ "%d buffer release %d, size %zd-%zd, failed at %llx\n",
proc->pid, buffer->debug_id,
- buffer->data_size, buffer->offsets_size, failed_at);
+ buffer->data_size, buffer->offsets_size,
+ (unsigned long long)failed_at);
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- off_start = (binder_size_t *)(buffer->data +
- ALIGN(buffer->data_size, sizeof(void *)));
- if (failed_at)
- off_end = failed_at;
- else
- off_end = (void *)off_start + buffer->offsets_size;
- for (offp = off_start; offp < off_end; offp++) {
+ off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
+ off_end_offset = is_failure ? failed_at :
+ off_start_offset + buffer->offsets_size;
+ for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+ buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
- size_t object_size = binder_validate_object(buffer, *offp);
-
+ size_t object_size;
+ struct binder_object object;
+ binder_size_t object_offset;
+
+ binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+ buffer, buffer_offset,
+ sizeof(object_offset));
+ object_size = binder_get_object(proc, buffer,
+ object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
- debug_id, (u64)*offp, buffer->data_size);
+ debug_id, (u64)object_offset, buffer->data_size);
continue;
}
- hdr = (struct binder_object_header *)(buffer->data + *offp);
+ hdr = &object.hdr;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
@@ -2309,10 +2376,11 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FDA: {
struct binder_fd_array_object *fda;
struct binder_buffer_object *parent;
- uintptr_t parent_buffer;
- u32 *fd_array;
+ struct binder_object ptr_object;
+ binder_size_t fda_offset;
size_t fd_index;
binder_size_t fd_buf_size;
+ binder_size_t num_valid;
if (proc->tsk != current->group_leader) {
/*
@@ -2323,23 +2391,19 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
continue;
}
+ num_valid = (buffer_offset - off_start_offset) /
+ sizeof(binder_size_t);
fda = to_binder_fd_array_object(hdr);
- parent = binder_validate_ptr(buffer, fda->parent,
- off_start,
- offp - off_start);
+ parent = binder_validate_ptr(proc, buffer, &ptr_object,
+ fda->parent,
+ off_start_offset,
+ NULL,
+ num_valid);
if (!parent) {
pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
- /*
- * Since the parent was already fixed up, convert it
- * back to kernel address space to access it
- */
- parent_buffer = parent->buffer -
- binder_alloc_get_user_buffer_offset(
- &proc->alloc);
-
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
pr_err("transaction release %d invalid number of fds (%lld)\n",
@@ -2353,9 +2417,29 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
debug_id, (u64)fda->num_fds);
continue;
}
- fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
- for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
- binder_deferred_fd_close(fd_array[fd_index]);
+ /*
+ * the source data for binder_buffer_object is visible
+ * to user-space and the @buffer element is the user
+ * pointer to the buffer_object containing the fd_array.
+ * Convert the address to an offset relative to
+ * the base of the transaction buffer.
+ */
+ fda_offset =
+ (parent->buffer - (uintptr_t)buffer->user_data) +
+ fda->parent_offset;
+ for (fd_index = 0; fd_index < fda->num_fds;
+ fd_index++) {
+ u32 fd;
+ binder_size_t offset = fda_offset +
+ fd_index * sizeof(fd);
+
+ binder_alloc_copy_from_buffer(&proc->alloc,
+ &fd,
+ buffer,
+ offset,
+ sizeof(fd));
+ binder_deferred_fd_close(fd);
+ }
} break;
default:
pr_err("transaction release %d bad object type %x\n",
@@ -2491,7 +2575,7 @@ done:
return ret;
}
-static int binder_translate_fd(u32 *fdp,
+static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
@@ -2502,7 +2586,6 @@ static int binder_translate_fd(u32 *fdp,
struct file *file;
int ret = 0;
bool target_allows_fd;
- int fd = *fdp;
if (in_reply_to)
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
@@ -2541,7 +2624,7 @@ static int binder_translate_fd(u32 *fdp,
goto err_alloc;
}
fixup->file = file;
- fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
+ fixup->offset = fd_offset;
trace_binder_transaction_fd_send(t, fd, fixup->offset);
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
@@ -2562,8 +2645,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
struct binder_transaction *in_reply_to)
{
binder_size_t fdi, fd_buf_size;
- uintptr_t parent_buffer;
- u32 *fd_array;
+ binder_size_t fda_offset;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
@@ -2581,20 +2663,29 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
return -EINVAL;
}
/*
- * Since the parent was already fixed up, convert it
- * back to the kernel address space to access it
+ * the source data for binder_buffer_object is visible
+ * to user-space and the @buffer element is the user
+ * pointer to the buffer_object containing the fd_array.
+ * Convert the address to an offset relative to
+ * the base of the transaction buffer.
*/
- parent_buffer = parent->buffer -
- binder_alloc_get_user_buffer_offset(&target_proc->alloc);
- fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
- if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
+ fda->parent_offset;
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
return -EINVAL;
}
for (fdi = 0; fdi < fda->num_fds; fdi++) {
- int ret = binder_translate_fd(&fd_array[fdi], t, thread,
- in_reply_to);
+ u32 fd;
+ int ret;
+ binder_size_t offset = fda_offset + fdi * sizeof(fd);
+
+ binder_alloc_copy_from_buffer(&target_proc->alloc,
+ &fd, t->buffer,
+ offset, sizeof(fd));
+ ret = binder_translate_fd(fd, offset, t, thread,
+ in_reply_to);
if (ret < 0)
return ret;
}
@@ -2604,30 +2695,34 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
static int binder_fixup_parent(struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
- binder_size_t *off_start,
+ binder_size_t off_start_offset,
binder_size_t num_valid,
- struct binder_buffer_object *last_fixup_obj,
+ binder_size_t last_fixup_obj_off,
binder_size_t last_fixup_min_off)
{
struct binder_buffer_object *parent;
- u8 *parent_buffer;
struct binder_buffer *b = t->buffer;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_object object;
+ binder_size_t buffer_offset;
+ binder_size_t parent_offset;
if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
return 0;
- parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+ parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
+ off_start_offset, &parent_offset,
+ num_valid);
if (!parent) {
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return -EINVAL;
}
- if (!binder_validate_fixup(b, off_start,
- parent, bp->parent_offset,
- last_fixup_obj,
+ if (!binder_validate_fixup(target_proc, b, off_start_offset,
+ parent_offset, bp->parent_offset,
+ last_fixup_obj_off,
last_fixup_min_off)) {
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
@@ -2641,10 +2736,10 @@ static int binder_fixup_parent(struct binder_transaction *t,
proc->pid, thread->pid);
return -EINVAL;
}
- parent_buffer = (u8 *)((uintptr_t)parent->buffer -
- binder_alloc_get_user_buffer_offset(
- &target_proc->alloc));
- *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+ buffer_offset = bp->parent_offset +
+ (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
+ binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
+ &bp->buffer, sizeof(bp->buffer));
return 0;
}
@@ -2763,9 +2858,10 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_transaction *t;
struct binder_work *w;
struct binder_work *tcomplete;
- binder_size_t *offp, *off_end, *off_start;
+ binder_size_t buffer_offset = 0;
+ binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
- u8 *sg_bufp, *sg_buf_end;
+ binder_size_t sg_buf_offset, sg_buf_end_offset;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -2774,10 +2870,12 @@ static void binder_transaction(struct binder_proc *proc,
uint32_t return_error = 0;
uint32_t return_error_param = 0;
uint32_t return_error_line = 0;
- struct binder_buffer_object *last_fixup_obj = NULL;
+ binder_size_t last_fixup_obj_off = 0;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
+ char *secctx = NULL;
+ u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3020,6 +3118,20 @@ static void binder_transaction(struct binder_proc *proc,
t->flags = tr->flags;
t->priority = task_nice(current);
+ if (target_node && target_node->txn_security_ctx) {
+ u32 secid;
+
+ security_task_getsecid(proc->tsk, &secid);
+ ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_get_secctx_failed;
+ }
+ extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+ }
+
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3036,16 +3148,30 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
+ if (secctx) {
+ size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+ ALIGN(tr->offsets_size, sizeof(void *)) +
+ ALIGN(extra_buffers_size, sizeof(void *)) -
+ ALIGN(secctx_sz, sizeof(u64));
+
+ t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, buf_offset,
+ secctx, secctx_sz);
+ security_release_secctx(secctx, secctx_sz);
+ secctx = NULL;
+ }
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- off_start = (binder_size_t *)(t->buffer->data +
- ALIGN(tr->data_size, sizeof(void *)));
- offp = off_start;
- if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
- tr->data.ptr.buffer, tr->data_size)) {
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, 0,
+ (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer,
+ tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
@@ -3053,8 +3179,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, (const void __user *)(uintptr_t)
- tr->data.ptr.offsets, tr->offsets_size)) {
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer,
+ ALIGN(tr->data_size, sizeof(void *)),
+ (const void __user *)
+ (uintptr_t)tr->data.ptr.offsets,
+ tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
@@ -3079,17 +3210,30 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- off_end = (void *)off_start + tr->offsets_size;
- sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
- sg_buf_end = sg_bufp + extra_buffers_size;
+ off_start_offset = ALIGN(tr->data_size, sizeof(void *));
+ buffer_offset = off_start_offset;
+ off_end_offset = off_start_offset + tr->offsets_size;
+ sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
+ sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
off_min = 0;
- for (; offp < off_end; offp++) {
+ for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+ buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
- size_t object_size = binder_validate_object(t->buffer, *offp);
-
- if (object_size == 0 || *offp < off_min) {
+ size_t object_size;
+ struct binder_object object;
+ binder_size_t object_offset;
+
+ binder_alloc_copy_from_buffer(&target_proc->alloc,
+ &object_offset,
+ t->buffer,
+ buffer_offset,
+ sizeof(object_offset));
+ object_size = binder_get_object(target_proc, t->buffer,
+ object_offset, &object);
+ if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
- proc->pid, thread->pid, (u64)*offp,
+ proc->pid, thread->pid,
+ (u64)object_offset,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
@@ -3098,8 +3242,8 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_offset;
}
- hdr = (struct binder_object_header *)(t->buffer->data + *offp);
- off_min = *offp + object_size;
+ hdr = &object.hdr;
+ off_min = object_offset + object_size;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
@@ -3113,6 +3257,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ fp, sizeof(*fp));
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
@@ -3126,12 +3273,17 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ fp, sizeof(*fp));
} break;
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
- int ret = binder_translate_fd(&fp->fd, t, thread,
- in_reply_to);
+ binder_size_t fd_offset = object_offset +
+ (uintptr_t)&fp->fd - (uintptr_t)fp;
+ int ret = binder_translate_fd(fp->fd, fd_offset, t,
+ thread, in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
@@ -3140,14 +3292,23 @@ static void binder_transaction(struct binder_proc *proc,
goto err_translate_failed;
}
fp->pad_binder = 0;
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ fp, sizeof(*fp));
} break;
case BINDER_TYPE_FDA: {
+ struct binder_object ptr_object;
+ binder_size_t parent_offset;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
+ size_t num_valid = (buffer_offset - off_start_offset) *
+ sizeof(binder_size_t);
struct binder_buffer_object *parent =
- binder_validate_ptr(t->buffer, fda->parent,
- off_start,
- offp - off_start);
+ binder_validate_ptr(target_proc, t->buffer,
+ &ptr_object, fda->parent,
+ off_start_offset,
+ &parent_offset,
+ num_valid);
if (!parent) {
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
@@ -3156,9 +3317,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_parent;
}
- if (!binder_validate_fixup(t->buffer, off_start,
- parent, fda->parent_offset,
- last_fixup_obj,
+ if (!binder_validate_fixup(target_proc, t->buffer,
+ off_start_offset,
+ parent_offset,
+ fda->parent_offset,
+ last_fixup_obj_off,
last_fixup_min_off)) {
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
@@ -3175,14 +3338,15 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
- last_fixup_obj = parent;
+ last_fixup_obj_off = parent_offset;
last_fixup_min_off =
fda->parent_offset + sizeof(u32) * fda->num_fds;
} break;
case BINDER_TYPE_PTR: {
struct binder_buffer_object *bp =
to_binder_buffer_object(hdr);
- size_t buf_left = sg_buf_end - sg_bufp;
+ size_t buf_left = sg_buf_end_offset - sg_buf_offset;
+ size_t num_valid;
if (bp->length > buf_left) {
binder_user_error("%d:%d got transaction with too large buffer\n",
@@ -3192,9 +3356,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- if (copy_from_user(sg_bufp,
- (const void __user *)(uintptr_t)
- bp->buffer, bp->length)) {
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer,
+ sg_buf_offset,
+ (const void __user *)
+ (uintptr_t)bp->buffer,
+ bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error_param = -EFAULT;
@@ -3203,14 +3371,16 @@ static void binder_transaction(struct binder_proc *proc,
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
- bp->buffer = (uintptr_t)sg_bufp +
- binder_alloc_get_user_buffer_offset(
- &target_proc->alloc);
- sg_bufp += ALIGN(bp->length, sizeof(u64));
-
- ret = binder_fixup_parent(t, thread, bp, off_start,
- offp - off_start,
- last_fixup_obj,
+ bp->buffer = (uintptr_t)
+ t->buffer->user_data + sg_buf_offset;
+ sg_buf_offset += ALIGN(bp->length, sizeof(u64));
+
+ num_valid = (buffer_offset - off_start_offset) *
+ sizeof(binder_size_t);
+ ret = binder_fixup_parent(t, thread, bp,
+ off_start_offset,
+ num_valid,
+ last_fixup_obj_off,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
@@ -3218,7 +3388,10 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_translate_failed;
}
- last_fixup_obj = bp;
+ binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer, object_offset,
+ bp, sizeof(*bp));
+ last_fixup_obj_off = object_offset;
last_fixup_min_off = 0;
} break;
default:
@@ -3298,13 +3471,17 @@ err_bad_parent:
err_copy_data_failed:
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
- binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ binder_transaction_buffer_release(target_proc, t->buffer,
+ buffer_offset, true);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
+ if (secctx)
+ security_release_secctx(secctx, secctx_sz);
+err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
@@ -3396,7 +3573,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_transaction_buffer_release(proc, buffer, 0, false);
binder_alloc_free_buf(&proc->alloc, buffer);
}
@@ -3915,6 +4092,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
/**
* binder_apply_fd_fixups() - finish fd translation
+ * @proc: binder_proc associated @t->buffer
* @t: binder transaction with list of fd fixups
*
* Now that we are in the context of the transaction target
@@ -3926,14 +4104,14 @@ static int binder_wait_for_work(struct binder_thread *thread,
* fput'ing files that have not been processed and ksys_close'ing
* any fds that have already been allocated.
*/
-static int binder_apply_fd_fixups(struct binder_transaction *t)
+static int binder_apply_fd_fixups(struct binder_proc *proc,
+ struct binder_transaction *t)
{
struct binder_txn_fd_fixup *fixup, *tmp;
int ret = 0;
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
int fd = get_unused_fd_flags(O_CLOEXEC);
- u32 *fdp;
if (fd < 0) {
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -3948,33 +4126,20 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
fd_install(fd, fixup->file);
fixup->file = NULL;
- fdp = (u32 *)(t->buffer->data + fixup->offset);
- /*
- * This store can cause problems for CPUs with a
- * VIVT cache (eg ARMv5) since the cache cannot
- * detect virtual aliases to the same physical cacheline.
- * To support VIVT, this address and the user-space VA
- * would both need to be flushed. Since this kernel
- * VA is not constructed via page_to_virt(), we can't
- * use flush_dcache_page() on it, so we'd have to use
- * an internal function. If devices with VIVT ever
- * need to run Android, we'll either need to go back
- * to patching the translated fd from the sender side
- * (using the non-standard kernel functions), or rework
- * how the kernel uses the buffer to use page_to_virt()
- * addresses instead of allocating in our own vm area.
- *
- * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
- */
- *fdp = fd;
+ binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
+ fixup->offset, &fd,
+ sizeof(u32));
}
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
if (fixup->file) {
fput(fixup->file);
} else if (ret) {
- u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
+ u32 fd;
- binder_deferred_fd_close(*fdp);
+ binder_alloc_copy_from_buffer(&proc->alloc, &fd,
+ t->buffer, fixup->offset,
+ sizeof(fd));
+ binder_deferred_fd_close(fd);
}
list_del(&fixup->fixup_entry);
kfree(fixup);
@@ -4036,11 +4201,13 @@ retry:
while (1) {
uint32_t cmd;
- struct binder_transaction_data tr;
+ struct binder_transaction_data_secctx tr;
+ struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
+ size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4240,8 +4407,8 @@ retry:
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
- tr.target.ptr = target_node->ptr;
- tr.cookie = target_node->cookie;
+ trd->target.ptr = target_node->ptr;
+ trd->cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
@@ -4251,25 +4418,26 @@ retry:
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = 0;
- tr.cookie = 0;
+ trd->target.ptr = 0;
+ trd->cookie = 0;
cmd = BR_REPLY;
}
- tr.code = t->code;
- tr.flags = t->flags;
- tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+ trd->code = t->code;
+ trd->flags = t->flags;
+ trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
- tr.sender_pid = task_tgid_nr_ns(sender,
- task_active_pid_ns(current));
+ trd->sender_pid =
+ task_tgid_nr_ns(sender,
+ task_active_pid_ns(current));
} else {
- tr.sender_pid = 0;
+ trd->sender_pid = 0;
}
- ret = binder_apply_fd_fixups(t);
+ ret = binder_apply_fd_fixups(proc, t);
if (ret) {
struct binder_buffer *buffer = t->buffer;
bool oneway = !!(t->flags & TF_ONE_WAY);
@@ -4297,15 +4465,18 @@ retry:
}
continue;
}
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)
- ((uintptr_t)t->buffer->data +
- binder_alloc_get_user_buffer_offset(&proc->alloc));
- tr.data.ptr.offsets = tr.data.ptr.buffer +
+ trd->data_size = t->buffer->data_size;
+ trd->offsets_size = t->buffer->offsets_size;
+ trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
+ trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
+ tr.secctx = t->security_ctx;
+ if (t->security_ctx) {
+ cmd = BR_TRANSACTION_SEC_CTX;
+ trsize = sizeof(tr);
+ }
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4316,7 +4487,7 @@ retry:
return -EFAULT;
}
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (copy_to_user(ptr, &tr, trsize)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4325,7 +4496,7 @@ retry:
return -EFAULT;
}
- ptr += sizeof(tr);
+ ptr += trsize;
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
@@ -4333,16 +4504,18 @@ retry:
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
- "BR_REPLY",
+ (cmd == BR_TRANSACTION_SEC_CTX) ?
+ "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+ (u64)trd->data.ptr.buffer,
+ (u64)trd->data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
- if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
@@ -4690,7 +4863,8 @@ out:
return ret;
}
-static int binder_ioctl_set_ctx_mgr(struct file *filp)
+static int binder_ioctl_set_ctx_mgr(struct file *filp,
+ struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
@@ -4719,7 +4893,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- new_node = binder_new_node(proc, NULL);
+ new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
@@ -4842,8 +5016,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
+ case BINDER_SET_CONTEXT_MGR_EXT: {
+ struct flat_binder_object fbo;
+
+ if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
+ if (ret)
+ goto err;
+ break;
+ }
case BINDER_SET_CONTEXT_MGR:
- ret = binder_ioctl_set_ctx_mgr(filp);
+ ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
@@ -5319,7 +5505,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size,
- buffer->data);
+ buffer->user_data);
}
static void print_binder_work_ilocked(struct seq_file *m,
@@ -5854,9 +6040,10 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
- char *device_name, *device_names, *device_tmp;
+ char *device_name, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
+ char *device_names = NULL;
ret = binder_alloc_shrinker_init();
if (ret)
@@ -5898,23 +6085,29 @@ static int __init binder_init(void)
&transaction_log_fops);
}
- /*
- * Copy the module_parameter string, because we don't want to
- * tokenize it in-place.
- */
- device_names = kstrdup(binder_devices_param, GFP_KERNEL);
- if (!device_names) {
- ret = -ENOMEM;
- goto err_alloc_device_names_failed;
- }
+ if (strcmp(binder_devices_param, "") != 0) {
+ /*
+ * Copy the module_parameter string, because we don't want to
+ * tokenize it in-place.
+ */
+ device_names = kstrdup(binder_devices_param, GFP_KERNEL);
+ if (!device_names) {
+ ret = -ENOMEM;
+ goto err_alloc_device_names_failed;
+ }
- device_tmp = device_names;
- while ((device_name = strsep(&device_tmp, ","))) {
- ret = init_binder_device(device_name);
- if (ret)
- goto err_init_binder_device_failed;
+ device_tmp = device_names;
+ while ((device_name = strsep(&device_tmp, ","))) {
+ ret = init_binder_device(device_name);
+ if (ret)
+ goto err_init_binder_device_failed;
+ }
}
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binder_device_failed;
+
return ret;
err_init_binder_device_failed:
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 022cd80e80cc..6389467670a0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -29,6 +29,8 @@
#include <linux/list_lru.h>
#include <linux/ratelimit.h>
#include <asm/cacheflush.h>
+#include <linux/uaccess.h>
+#include <linux/highmem.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -67,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return (u8 *)alloc->buffer +
- alloc->buffer_size - (u8 *)buffer->data;
- return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+ return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -119,9 +120,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer->data < buffer->data)
+ if (new_buffer->user_data < buffer->user_data)
p = &parent->rb_left;
- else if (new_buffer->data > buffer->data)
+ else if (new_buffer->user_data > buffer->user_data)
p = &parent->rb_right;
else
BUG();
@@ -136,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- void *kern_ptr;
+ void __user *uptr;
- kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+ uptr = (void __user *)user_ptr;
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer->data)
+ if (uptr < buffer->user_data)
n = n->rb_left;
- else if (kern_ptr > buffer->data)
+ else if (uptr > buffer->user_data)
n = n->rb_right;
else {
/*
@@ -186,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end)
+ void __user *start, void __user *end)
{
- void *page_addr;
+ void __user *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
struct vm_area_struct *vma = NULL;
@@ -263,18 +264,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
page->alloc = alloc;
INIT_LIST_HEAD(&page->lru);
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL,
- &page->page_ptr);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
- alloc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + alloc->user_buffer_offset;
+ user_page_addr = (uintptr_t)page_addr;
ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@@ -312,8 +302,6 @@ free_range:
continue;
err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
__free_page(page->page_ptr);
page->page_ptr = NULL;
err_alloc_page_failed:
@@ -368,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
+ void __user *has_page_addr;
+ void __user *end_page_addr;
size_t size, data_offsets_size;
int ret;
@@ -467,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size);
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ has_page_addr = (void __user *)
+ (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
- ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
+ ret = binder_update_page_range(alloc, 1, (void __user *)
+ PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
if (ret)
return ERR_PTR(ret);
@@ -488,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
__func__, alloc->pid);
goto err_alloc_buf_struct_failed;
}
- new_buffer->data = (u8 *)buffer->data + size;
+ new_buffer->user_data = (u8 __user *)buffer->user_data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
@@ -514,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
return buffer;
err_alloc_buf_struct_failed:
- binder_update_page_range(alloc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ binder_update_page_range(alloc, 0, (void __user *)
+ PAGE_ALIGN((uintptr_t)buffer->user_data),
end_page_addr);
return ERR_PTR(-ENOMEM);
}
@@ -550,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
return buffer;
}
-static void *buffer_start_page(struct binder_buffer *buffer)
+static void __user *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+ return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
}
-static void *prev_buffer_end_page(struct binder_buffer *buffer)
+static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+ return (void __user *)
+ (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
@@ -572,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer->data, prev->data);
+ alloc->pid, buffer->user_data,
+ prev->user_data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
@@ -582,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid,
- buffer->data,
- next->data);
+ buffer->user_data,
+ next->user_data);
}
}
- if (PAGE_ALIGNED(buffer->data)) {
+ if (PAGE_ALIGNED(buffer->user_data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer start %pK is page aligned\n",
- alloc->pid, buffer->data);
+ alloc->pid, buffer->user_data);
to_free = false;
}
if (to_free) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
- alloc->pid, buffer->data,
- prev->data, next ? next->data : NULL);
+ alloc->pid, buffer->user_data,
+ prev->user_data,
+ next ? next->user_data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE);
}
@@ -624,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->data < alloc->buffer);
- BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->buffer);
+ BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -636,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
}
binder_update_page_range(alloc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
+ (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
+ (void __user *)(((uintptr_t)
+ buffer->user_data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -693,7 +685,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
int ret;
- struct vm_struct *area;
const char *failure_string;
struct binder_buffer *buffer;
@@ -704,28 +695,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped;
}
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- alloc->buffer = area->addr;
- alloc->user_buffer_offset =
- vma->vm_start - (uintptr_t)alloc->buffer;
+ alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock);
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR(
- (vma->vm_start ^ (uint32_t)alloc->buffer))) {
- pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
- __func__, alloc->pid, vma->vm_start,
- vma->vm_end, alloc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
@@ -743,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->data = alloc->buffer;
+ buffer->user_data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -758,9 +730,7 @@ err_alloc_buf_struct_failed:
alloc->pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_alloc_mmap_lock);
- vfree(alloc->buffer);
alloc->buffer = NULL;
-err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -806,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
+ void __user *page_addr;
bool on_lru;
if (!alloc->pages[i].page_ptr)
@@ -819,12 +789,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
"%s: %d: page %d at %pK %s\n",
__func__, alloc->pid, i, page_addr,
on_lru ? "on lru" : "active");
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
__free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
- vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
if (alloc->vma_vm_mm)
@@ -839,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
+ prefix, buffer->debug_id, buffer->user_data,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
@@ -964,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
mm = alloc->vma_vm_mm;
- if (!down_write_trylock(&mm->mmap_sem))
+ if (!down_read_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
@@ -974,19 +942,16 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range(vma,
- page_addr + alloc->user_buffer_offset,
- PAGE_SIZE);
+ zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
}
trace_binder_unmap_kernel_start(alloc, index);
- unmap_kernel_range(page_addr, PAGE_SIZE);
__free_page(page->page_ptr);
page->page_ptr = NULL;
@@ -1053,3 +1018,173 @@ int binder_alloc_shrinker_init(void)
}
return ret;
}
+
+/**
+ * check_buffer() - verify that buffer/offset is safe to access
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @offset: offset into @buffer data
+ * @bytes: bytes to access from offset
+ *
+ * Check that the @offset/@bytes are within the size of the given
+ * @buffer and that the buffer is currently active and not freeable.
+ * Offsets must also be multiples of sizeof(u32). The kernel is
+ * allowed to touch the buffer in two cases:
+ *
+ * 1) when the buffer is being created:
+ * (buffer->free == 0 && buffer->allow_user_free == 0)
+ * 2) when the buffer is being torn down:
+ * (buffer->free == 0 && buffer->transaction == NULL).
+ *
+ * Return: true if the buffer is safe to access
+ */
+static inline bool check_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t offset, size_t bytes)
+{
+ size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ return buffer_size >= bytes &&
+ offset <= buffer_size - bytes &&
+ IS_ALIGNED(offset, sizeof(u32)) &&
+ !buffer->free &&
+ (!buffer->allow_user_free || !buffer->transaction);
+}
+
+/**
+ * binder_alloc_get_page() - get kernel pointer for given buffer offset
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @pgoffp: address to copy final page offset to
+ *
+ * Lookup the struct page corresponding to the address
+ * at @buffer_offset into @buffer->user_data. If @pgoffp is not
+ * NULL, the byte-offset into the page is written there.
+ *
+ * The caller is responsible to ensure that the offset points
+ * to a valid address within the @buffer and that @buffer is
+ * not freeable by the user. Since it can't be freed, we are
+ * guaranteed that the corresponding elements of @alloc->pages[]
+ * cannot change.
+ *
+ * Return: struct page
+ */
+static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ pgoff_t *pgoffp)
+{
+ binder_size_t buffer_space_offset = buffer_offset +
+ (buffer->user_data - alloc->buffer);
+ pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
+ size_t index = buffer_space_offset >> PAGE_SHIFT;
+ struct binder_lru_page *lru_page;
+
+ lru_page = &alloc->pages[index];
+ *pgoffp = pgoff;
+ return lru_page->page_ptr;
+}
+
+/**
+ * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @from: userspace pointer to source buffer
+ * @bytes: bytes to copy
+ *
+ * Copy bytes from source userspace to target buffer.
+ *
+ * Return: bytes remaining to be copied
+ */
+unsigned long
+binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ const void __user *from,
+ size_t bytes)
+{
+ if (!check_buffer(alloc, buffer, buffer_offset, bytes))
+ return bytes;
+
+ while (bytes) {
+ unsigned long size;
+ unsigned long ret;
+ struct page *page;
+ pgoff_t pgoff;
+ void *kptr;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ kptr = kmap(page) + pgoff;
+ ret = copy_from_user(kptr, from, size);
+ kunmap(page);
+ if (ret)
+ return bytes - size + ret;
+ bytes -= size;
+ from += size;
+ buffer_offset += size;
+ }
+ return 0;
+}
+
+static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
+ bool to_buffer,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *ptr,
+ size_t bytes)
+{
+ /* All copies must be 32-bit aligned and 32-bit size */
+ BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
+
+ while (bytes) {
+ unsigned long size;
+ struct page *page;
+ pgoff_t pgoff;
+ void *tmpptr;
+ void *base_ptr;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ base_ptr = kmap_atomic(page);
+ tmpptr = base_ptr + pgoff;
+ if (to_buffer)
+ memcpy(tmpptr, ptr, size);
+ else
+ memcpy(ptr, tmpptr, size);
+ /*
+ * kunmap_atomic() takes care of flushing the cache
+ * if this device has VIVT cache arch
+ */
+ kunmap_atomic(base_ptr);
+ bytes -= size;
+ pgoff = 0;
+ ptr = ptr + size;
+ buffer_offset += size;
+ }
+}
+
+void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *src,
+ size_t bytes)
+{
+ binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
+ src, bytes);
+}
+
+void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+ void *dest,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ size_t bytes)
+{
+ binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
+ dest, bytes);
+}
+
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index c0aadbbf7f19..b60d161b7a7a 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
+#include <uapi/linux/android/binder.h>
extern struct list_lru binder_alloc_lru;
struct binder_transaction;
@@ -39,7 +40,7 @@ struct binder_transaction;
* @data_size: size of @transaction data
* @offsets_size: size of array of offsets
* @extra_buffers_size: size of space for other objects (like sg lists)
- * @data: pointer to base of buffer space
+ * @user_data: user pointer to base of buffer space
*
* Bookkeeping structure for binder transaction buffers
*/
@@ -58,7 +59,7 @@ struct binder_buffer {
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- void *data;
+ void __user *user_data;
};
/**
@@ -81,7 +82,6 @@ struct binder_lru_page {
* (invariant after init)
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
* @buffer: base of per-proc address space mapped via mmap
- * @user_buffer_offset: offset between user and kernel VAs for buffer
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
@@ -102,8 +102,7 @@ struct binder_alloc {
struct mutex mutex;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
- void *buffer;
- ptrdiff_t user_buffer_offset;
+ void __user *buffer;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
@@ -162,26 +161,24 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
return free_async_space;
}
-/**
- * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
- * @alloc: binder_alloc for this proc
- *
- * Return: the offset between kernel and user-space addresses to use for
- * virtual address conversion
- */
-static inline ptrdiff_t
-binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
-{
- /*
- * user_buffer_offset is constant if vma is set and
- * undefined if vma is not set. It is possible to
- * get here with !alloc->vma if the target process
- * is dying while a transaction is being initiated.
- * Returning the old value is ok in this case and
- * the transaction will fail.
- */
- return alloc->user_buffer_offset;
-}
+unsigned long
+binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ const void __user *from,
+ size_t bytes);
+
+void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *src,
+ size_t bytes);
+
+void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+ void *dest,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ size_t bytes);
#endif /* _LINUX_BINDER_ALLOC_H */
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 8bd7bcef967d..b72708918b06 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -102,11 +102,12 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- void *page_addr, *end;
+ void __user *page_addr;
+ void __user *end;
int page_index;
- end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
- page_addr = buffer->data;
+ end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
+ page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
if (!alloc->pages[page_index].page_ptr ||
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7fb97f503ef2..045b3e42d98b 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode)
}
#endif
+#ifdef CONFIG_ANDROID_BINDERFS
+extern int __init init_binderfs(void);
+#else
+static inline int __init init_binderfs(void)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 14de7ac57a34..83cc254d2335 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -293,7 +293,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_alloc *alloc, bool allocate,
- void *start, void *end),
+ void __user *start, void __user *end),
TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7496b10532aa..e773f45d19d9 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -11,6 +11,7 @@
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/namei.h>
#include <linux/magic.h>
#include <linux/major.h>
#include <linux/miscdevice.h>
@@ -20,6 +21,7 @@
#include <linux/parser.h>
#include <linux/radix-tree.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
#include <linux/stddef.h>
@@ -30,7 +32,7 @@
#include <linux/xarray.h>
#include <uapi/asm-generic/errno-base.h>
#include <uapi/linux/android/binder.h>
-#include <uapi/linux/android/binder_ctl.h>
+#include <uapi/linux/android/binderfs.h>
#include "binder_internal.h"
@@ -39,14 +41,32 @@
#define INODE_OFFSET 3
#define INTSTRLEN 21
#define BINDERFS_MAX_MINOR (1U << MINORBITS)
-
-static struct vfsmount *binderfs_mnt;
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
static dev_t binderfs_dev;
static DEFINE_MUTEX(binderfs_minors_mutex);
static DEFINE_IDA(binderfs_minors);
/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ */
+struct binderfs_mount_opts {
+ int max;
+};
+
+enum {
+ Opt_max,
+ Opt_err
+};
+
+static const match_table_t tokens = {
+ { Opt_max, "max=%d" },
+ { Opt_err, NULL }
+};
+
+/**
* binderfs_info - information about a binderfs mount
* @ipc_ns: The ipc namespace the binderfs mount belongs to.
* @control_dentry: This records the dentry of this binderfs mount
@@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors);
* created.
* @root_gid: gid that needs to be used when a new binder device is
* created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
*/
struct binderfs_info {
struct ipc_namespace *ipc_ns;
struct dentry *control_dentry;
kuid_t root_uid;
kgid_t root_gid;
-
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
};
static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
@@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode)
* @userp: buffer to copy information about new device for userspace to
* @req: struct binderfs_device as copied from userspace
*
- * This function allocated a new binder_device and reserves a new minor
+ * This function allocates a new binder_device and reserves a new minor
* number for it.
* Minor numbers are limited and tracked globally in binderfs_minors. The
* function will stash a struct binder_device for the specific binder
@@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
struct binderfs_device *req)
{
int minor, ret;
- struct dentry *dentry, *dup, *root;
+ struct dentry *dentry, *root;
struct binder_device *device;
- size_t name_len = BINDERFS_MAX_NAME + 1;
char *name = NULL;
+ size_t name_len;
struct inode *inode = NULL;
struct super_block *sb = ref_inode->i_sb;
struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
/* Reserve new minor number for the new device. */
mutex_lock(&binderfs_minors_mutex);
- minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
- mutex_unlock(&binderfs_minors_mutex);
- if (minor < 0)
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
ret = -ENOMEM;
device = kzalloc(sizeof(*device), GFP_KERNEL);
@@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode->i_uid = info->root_uid;
inode->i_gid = info->root_gid;
- name = kmalloc(name_len, GFP_KERNEL);
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+ name_len = strlen(req->name);
+ /* Make sure to include terminating NUL byte */
+ name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
if (!name)
goto err;
- strscpy(name, req->name, name_len);
-
device->binderfs_inode = inode;
device->context.binder_context_mgr_uid = INVALID_UID;
device->context.name = name;
@@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
root = sb->s_root;
inode_lock(d_inode(root));
- dentry = d_alloc_name(root, name);
- if (!dentry) {
+
+ /* look it up */
+ dentry = lookup_one_len(name, root, name_len);
+ if (IS_ERR(dentry)) {
inode_unlock(d_inode(root));
- ret = -ENOMEM;
+ ret = PTR_ERR(dentry);
goto err;
}
- /* Verify that the name userspace gave us is not already in use. */
- dup = d_lookup(root, &dentry->d_name);
- if (dup) {
- if (d_really_is_positive(dup)) {
- dput(dup);
- dput(dentry);
- inode_unlock(d_inode(root));
- ret = -EEXIST;
- goto err;
- }
- dput(dup);
+ if (d_really_is_positive(dentry)) {
+ /* already exists */
+ dput(dentry);
+ inode_unlock(d_inode(root));
+ ret = -EEXIST;
+ goto err;
}
inode->i_private = device;
- d_add(dentry, inode);
+ d_instantiate(dentry, inode);
fsnotify_create(root->d_inode, dentry);
inode_unlock(d_inode(root));
@@ -187,6 +222,7 @@ err:
kfree(name);
kfree(device);
mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
ida_free(&binderfs_minors, minor);
mutex_unlock(&binderfs_minors_mutex);
iput(inode);
@@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
static void binderfs_evict_inode(struct inode *inode)
{
struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_I(inode);
clear_inode(inode);
@@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode)
return;
mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
ida_free(&binderfs_minors, device->miscdev.minor);
mutex_unlock(&binderfs_minors_mutex);
@@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode)
kfree(device);
}
+/**
+ * binderfs_parse_mount_opts - parse binderfs mount options
+ * @data: options to set (can be NULL in which case defaults are used)
+ */
+static int binderfs_parse_mount_opts(char *data,
+ struct binderfs_mount_opts *opts)
+{
+ char *p;
+ opts->max = BINDERFS_MAX_MINOR;
+
+ while ((p = strsep(&data, ",")) != NULL) {
+ substring_t args[MAX_OPT_ARGS];
+ int token;
+ int max_devices;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_max:
+ if (match_int(&args[0], &max_devices) ||
+ (max_devices < 0 ||
+ (max_devices > BINDERFS_MAX_MINOR)))
+ return -EINVAL;
+
+ opts->max = max_devices;
+ break;
+ default:
+ pr_err("Invalid mount options\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int binderfs_remount(struct super_block *sb, int *flags, char *data)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+ return binderfs_parse_mount_opts(data, &info->mount_opts);
+}
+
+static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info;
+
+ info = root->d_sb->s_fs_info;
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+ return 0;
+}
+
static const struct super_operations binderfs_super_ops = {
- .statfs = simple_statfs,
- .evict_inode = binderfs_evict_inode,
+ .evict_inode = binderfs_evict_inode,
+ .remount_fs = binderfs_remount,
+ .show_options = binderfs_show_mount_opts,
+ .statfs = simple_statfs,
};
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+ return info->control_dentry == dentry;
+}
+
static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
- struct inode *inode = d_inode(old_dentry);
-
- /* binderfs doesn't support directories. */
- if (d_is_dir(old_dentry))
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
return -EPERM;
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
- if (!simple_empty(new_dentry))
- return -ENOTEMPTY;
-
- if (d_really_is_positive(new_dentry))
- simple_unlink(new_dir, new_dentry);
-
- old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
- new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
-
- return 0;
+ return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
{
- /*
- * The control dentry is only ever touched during mount so checking it
- * here should not require us to take lock.
- */
- if (BINDERFS_I(dir)->control_dentry == dentry)
+ if (is_binderfs_control_device(dentry))
return -EPERM;
return simple_unlink(dir, dentry);
@@ -313,13 +395,16 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
struct inode *inode = NULL;
struct dentry *root = sb->s_root;
struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
return -ENOMEM;
- inode_lock(d_inode(root));
-
/* If we have already created a binder-control node, return. */
if (info->control_dentry) {
ret = 0;
@@ -333,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
/* Reserve a new minor number for the new device. */
mutex_lock(&binderfs_minors_mutex);
- minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
mutex_unlock(&binderfs_minors_mutex);
if (minor < 0) {
ret = minor;
@@ -358,12 +446,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_private = device;
info->control_dentry = dentry;
d_add(dentry, inode);
- inode_unlock(d_inode(root));
return 0;
out:
- inode_unlock(d_inode(root));
kfree(device);
iput(inode);
@@ -378,12 +464,9 @@ static const struct inode_operations binderfs_dir_inode_operations = {
static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
{
+ int ret;
struct binderfs_info *info;
- int ret = -ENOMEM;
struct inode *inode = NULL;
- struct ipc_namespace *ipc_ns = sb->s_fs_info;
-
- get_ipc_ns(ipc_ns);
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -405,11 +488,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &binderfs_super_ops;
sb->s_time_gran = 1;
- info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
- if (!info)
- goto err_without_dentry;
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ ret = binderfs_parse_mount_opts(data, &info->mount_opts);
+ if (ret)
+ return ret;
- info->ipc_ns = ipc_ns;
info->root_gid = make_kgid(sb->s_user_ns, 0);
if (!gid_valid(info->root_gid))
info->root_gid = GLOBAL_ROOT_GID;
@@ -417,11 +506,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
if (!uid_valid(info->root_uid))
info->root_uid = GLOBAL_ROOT_UID;
- sb->s_fs_info = info;
-
inode = new_inode(sb);
if (!inode)
- goto err_without_dentry;
+ return -ENOMEM;
inode->i_ino = FIRST_INODE;
inode->i_fop = &simple_dir_operations;
@@ -432,79 +519,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_root = d_make_root(inode);
if (!sb->s_root)
- goto err_without_dentry;
-
- ret = binderfs_binder_ctl_create(sb);
- if (ret)
- goto err_with_dentry;
-
- return 0;
-
-err_with_dentry:
- dput(sb->s_root);
- sb->s_root = NULL;
-
-err_without_dentry:
- put_ipc_ns(ipc_ns);
- iput(inode);
- kfree(info);
-
- return ret;
-}
-
-static int binderfs_test_super(struct super_block *sb, void *data)
-{
- struct binderfs_info *info = sb->s_fs_info;
-
- if (info)
- return info->ipc_ns == data;
-
- return 0;
-}
+ return -ENOMEM;
-static int binderfs_set_super(struct super_block *sb, void *data)
-{
- sb->s_fs_info = data;
- return set_anon_super(sb, NULL);
+ return binderfs_binder_ctl_create(sb);
}
static struct dentry *binderfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
- struct super_block *sb;
- struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
-
- if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
-
- sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
- flags, ipc_ns->user_ns, ipc_ns);
- if (IS_ERR(sb))
- return ERR_CAST(sb);
-
- if (!sb->s_root) {
- int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
- if (ret) {
- deactivate_locked_super(sb);
- return ERR_PTR(ret);
- }
-
- sb->s_flags |= SB_ACTIVE;
- }
-
- return dget(sb->s_root);
+ return mount_nodev(fs_type, flags, data, binderfs_fill_super);
}
static void binderfs_kill_super(struct super_block *sb)
{
struct binderfs_info *info = sb->s_fs_info;
+ kill_litter_super(sb);
+
if (info && info->ipc_ns)
put_ipc_ns(info->ipc_ns);
kfree(info);
- kill_litter_super(sb);
}
static struct file_system_type binder_fs_type = {
@@ -514,7 +550,7 @@ static struct file_system_type binder_fs_type = {
.fs_flags = FS_USERNS_MOUNT,
};
-static int __init init_binderfs(void)
+int __init init_binderfs(void)
{
int ret;
@@ -530,15 +566,5 @@ static int __init init_binderfs(void)
return ret;
}
- binderfs_mnt = kern_mount(&binder_fs_type);
- if (IS_ERR(binderfs_mnt)) {
- ret = PTR_ERR(binderfs_mnt);
- binderfs_mnt = NULL;
- unregister_filesystem(&binder_fs_type);
- unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
- }
-
return ret;
}
-
-device_initcall(init_binderfs);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4ca7a6b4eaae..8218db17ebdb 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers"
config PATA_ACPI
tristate "ACPI firmware driver for PATA"
- depends on ATA_ACPI && ATA_BMDMA
+ depends on ATA_ACPI && ATA_BMDMA && PCI
help
This option enables an ACPI method driver which drives
motherboard PATA controller interfaces through the ACPI
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index ef356e70e6de..8810475f307a 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -254,6 +254,8 @@ enum {
AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
SATA_MOBILE_LPM_POLICY
as default lpm_policy */
+ AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
+ suspend/resume */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f9cb51be38eb..d4bba3ace45d 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -28,6 +28,11 @@
#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4))
#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4))
+struct ahci_mvebu_plat_data {
+ int (*plat_config)(struct ahci_host_priv *hpriv);
+ unsigned int flags;
+};
+
static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
const struct mbus_dram_target_info *dram)
{
@@ -62,6 +67,35 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
+static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv)
+{
+ const struct mbus_dram_target_info *dram;
+ int rc = 0;
+
+ dram = mv_mbus_dram_info();
+ if (dram)
+ ahci_mvebu_mbus_config(hpriv, dram);
+ else
+ rc = -ENODEV;
+
+ ahci_mvebu_regret_option(hpriv);
+
+ return rc;
+}
+
+static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv)
+{
+ u32 reg;
+
+ writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
+
+ reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
+ reg |= BIT(6);
+ writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
+
+ return 0;
+}
+
/**
* ahci_mvebu_stop_engine
*
@@ -126,13 +160,9 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
- const struct mbus_dram_target_info *dram;
+ const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data;
- dram = mv_mbus_dram_info();
- if (dram)
- ahci_mvebu_mbus_config(hpriv, dram);
-
- ahci_mvebu_regret_option(hpriv);
+ pdata->plat_config(hpriv);
return ahci_platform_resume_host(&pdev->dev);
}
@@ -154,29 +184,30 @@ static struct scsi_host_template ahci_platform_sht = {
static int ahci_mvebu_probe(struct platform_device *pdev)
{
+ const struct ahci_mvebu_plat_data *pdata;
struct ahci_host_priv *hpriv;
- const struct mbus_dram_target_info *dram;
int rc;
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return -EINVAL;
+
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
+ hpriv->flags |= pdata->flags;
+ hpriv->plat_data = (void *)pdata;
+
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
hpriv->stop_engine = ahci_mvebu_stop_engine;
- if (of_device_is_compatible(pdev->dev.of_node,
- "marvell,armada-380-ahci")) {
- dram = mv_mbus_dram_info();
- if (!dram)
- return -ENODEV;
-
- ahci_mvebu_mbus_config(hpriv, dram);
- ahci_mvebu_regret_option(hpriv);
- }
+ rc = pdata->plat_config(hpriv);
+ if (rc)
+ goto disable_resources;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
&ahci_platform_sht);
@@ -190,18 +221,28 @@ disable_resources:
return rc;
}
+static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
+ .plat_config = ahci_mvebu_armada_380_config,
+};
+
+static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
+ .plat_config = ahci_mvebu_armada_3700_config,
+ .flags = AHCI_HFLAG_SUSPEND_PHYS,
+};
+
static const struct of_device_id ahci_mvebu_of_match[] = {
- { .compatible = "marvell,armada-380-ahci", },
- { .compatible = "marvell,armada-3700-ahci", },
+ {
+ .compatible = "marvell,armada-380-ahci",
+ .data = &ahci_mvebu_armada_380_plat_data,
+ },
+ {
+ .compatible = "marvell,armada-3700-ahci",
+ .data = &ahci_mvebu_armada_3700_plat_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
-/*
- * We currently don't provide power management related operations,
- * since there is no suspend/resume support at the platform level for
- * Armada 38x for the moment.
- */
static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
.remove = ata_platform_remove_one,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 4b900fc659f7..81b1a3332ed6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -56,6 +56,12 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
if (rc)
goto disable_phys;
+ rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
@@ -738,6 +744,9 @@ int ahci_platform_suspend_host(struct device *dev)
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
+ if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
+ ahci_platform_disable_phys(hpriv);
+
return ata_host_suspend(host, PMSG_SUSPEND);
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
@@ -756,6 +765,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
int ahci_platform_resume_host(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
int rc;
if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
@@ -766,6 +776,9 @@ int ahci_platform_resume_host(struct device *dev)
ahci_init_controller(host);
}
+ if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
+ ahci_platform_enable_phys(hpriv);
+
ata_host_resume(host);
return 0;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b8c3f9e6af89..adf28788cab5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 8cc9c429ad95..9e7fc302430f 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = {
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
+ /* Not sure what the real max is but we know it's less than 64K, let's
+ * use 64K minus 256
+ */
+ .max_segment_size = MAX_DBDMA_SEG,
.slave_configure = pata_macio_slave_config,
};
@@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv,
/* Make sure we have sane initial timings in the cache */
pata_macio_default_timings(priv);
- /* Not sure what the real max is but we know it's less than 64K, let's
- * use 64K minus 256
- */
- dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
-
/* Allocate libata host for 1 port */
memset(&pinfo, 0, sizeof(struct ata_port_info));
pmac_macio_calc_timing_masks(priv, &pinfo);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 4dc528bf8e85..9c1247d42897 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap)
if (!pp)
return -ENOMEM;
- mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+ GFP_KERNEL);
if (!mem) {
kfree(pp);
return -ENOMEM;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e0bcf9b2dab0..174e84ce4379 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -245,8 +245,15 @@ struct inic_port_priv {
static struct scsi_host_template inic_sht = {
ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
- .dma_boundary = INIC_DMA_BOUNDARY,
+ .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
+
+ /*
+ * This controller is braindamaged. dma_boundary is 0xffff like others
+ * but it will lock up the whole machine HARD if 65536 byte PRD entry
+ * is fed. Reduce maximum segment size.
+ */
+ .dma_boundary = INIC_DMA_BOUNDARY,
+ .max_segment_size = 65536 - 512,
};
static const int scr_map[] = {
@@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
- /*
- * This controller is braindamaged. dma_boundary is 0xffff
- * like others but it will lock up the whole machine HARD if
- * 65536 byte PRD entry is fed. Reduce maximum segment size.
- */
- rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
- if (rc) {
- dev_err(&pdev->dev, "failed to set the maximum segment size\n");
- return rc;
- }
-
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc) {
dev_err(&pdev->dev, "failed to initialize controller\n");
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29f102dcfec4..211607986134 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
static int he_init_tpdrq(struct he_dev *he_dev)
{
- he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
- &he_dev->tpdrq_phys, GFP_KERNEL);
+ he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
+ &he_dev->tpdrq_phys,
+ GFP_KERNEL);
if (he_dev->tpdrq_base == NULL) {
hprintk("failed to alloc tpdrq\n");
return -ENOMEM;
@@ -717,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
+ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
@@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
goto out_free_rbpl_virt;
}
- he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
- &he_dev->rbpl_phys, GFP_KERNEL);
+ he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
+ &he_dev->rbpl_phys, GFP_KERNEL);
if (he_dev->rbpl_base == NULL) {
hprintk("failed to alloc rbpl_base\n");
goto out_destroy_rbpl_pool;
@@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* rx buffer ready queue */
- he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
- &he_dev->rbrq_phys, GFP_KERNEL);
+ he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+ &he_dev->rbrq_phys, GFP_KERNEL);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
goto out_free_rbpl;
@@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* tx buffer ready queue */
- he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
- &he_dev->tbrq_phys, GFP_KERNEL);
+ he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ &he_dev->tbrq_phys, GFP_KERNEL);
if (he_dev->tbrq_base == NULL) {
hprintk("failed to allocate tbrq\n");
goto out_free_rbpq_base;
@@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev)
/* 2.9.3.5 tail offset for each interrupt queue is located after the
end of the interrupt queue */
- he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- (CONFIG_IRQ_SIZE + 1)
- * sizeof(struct he_irq),
- &he_dev->irq_phys,
- GFP_KERNEL);
+ he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
+ &he_dev->irq_phys, GFP_KERNEL);
if (he_dev->irq_base == NULL) {
hprintk("failed to allocate irq\n");
return -ENOMEM;
@@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev)
/* host status page */
- he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
- sizeof(struct he_hsp),
- &he_dev->hsp_phys, GFP_KERNEL);
+ he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
+ sizeof(struct he_hsp),
+ &he_dev->hsp_phys, GFP_KERNEL);
if (he_dev->hsp == NULL) {
hprintk("failed to allocate host status page\n");
return -ENOMEM;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 6e737142ceaa..43a14579e80e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
if (!scq)
return NULL;
- scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
- &scq->paddr, GFP_KERNEL);
+ scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
+ &scq->paddr, GFP_KERNEL);
if (scq->base == NULL) {
kfree(scq);
return NULL;
@@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
{
struct rsq_entry *rsqe;
- card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
- &card->rsq.paddr, GFP_KERNEL);
+ card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
+ &card->rsq.paddr, GFP_KERNEL);
if (card->rsq.base == NULL) {
printk("%s: can't allocate RSQ.\n", card->name);
return -1;
@@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev)
writel(0, SAR_REG_GP);
/* Initialize RAW Cell Handle Register */
- card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
- 2 * sizeof(u32),
- &card->raw_cell_paddr,
- GFP_KERNEL);
+ card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
+ 2 * sizeof(u32),
+ &card->raw_cell_paddr,
+ GFP_KERNEL);
if (!card->raw_cell_hnd) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a43276c76fc6..21393ec3b9a4 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
struct ht16k33_priv *priv = i2c_get_clientdata(client);
struct ht16k33_fbdev *fbdev = &priv->fbdev;
- cancel_delayed_work(&fbdev->work);
+ cancel_delayed_work_sync(&fbdev->work);
unregister_framebuffer(fbdev->info);
framebuffer_release(fbdev->info);
free_page((unsigned long) fbdev->buffer);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 7a419a7a6235..b405436ee28e 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -60,12 +60,17 @@ struct driver_private {
* @knode_parent - node in sibling list
* @knode_driver - node in driver list
* @knode_bus - node in bus list
+ * @knode_class - node in class list
* @deferred_probe - entry in deferred_probe_list which is used to retry the
* binding of drivers which were unable to get all the resources needed by
* the device; typically because it depends on another driver getting
* probed first.
+ * @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* associated with.
+ * @dead - This device is currently either in the process of or has been
+ * removed from the system. Any asynchronous events scheduled for this
+ * device should exit without taking any action.
*
* Nothing outside of the driver core should ever touch these fields.
*/
@@ -74,8 +79,11 @@ struct device_private {
struct klist_node knode_parent;
struct klist_node knode_driver;
struct klist_node knode_bus;
+ struct klist_node knode_class;
struct list_head deferred_probe;
+ struct device_driver *async_driver;
struct device *device;
+ u8 dead:1;
};
#define to_device_private_parent(obj) \
container_of(obj, struct device_private, knode_parent)
@@ -83,6 +91,8 @@ struct device_private {
container_of(obj, struct device_private, knode_driver)
#define to_device_private_bus(obj) \
container_of(obj, struct device_private, knode_bus)
+#define to_device_private_class(obj) \
+ container_of(obj, struct device_private, knode_class)
/* initialisation functions */
extern int devices_init(void);
@@ -124,6 +134,8 @@ extern int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups);
extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
+int device_driver_attach(struct device_driver *drv, struct device *dev);
+void device_driver_detach(struct device *dev);
extern char *make_class_name(const char *name, struct kobject *kobj);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index e06a57936cc9..0a58e969f8b7 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -187,11 +187,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == drv) {
- if (dev->parent && dev->bus->need_parent_lock)
- device_lock(dev->parent);
- device_release_driver(dev);
- if (dev->parent && dev->bus->need_parent_lock)
- device_unlock(dev->parent);
+ device_driver_detach(dev);
err = count;
}
put_device(dev);
@@ -214,13 +210,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == NULL && driver_match_device(drv, dev)) {
- if (dev->parent && bus->need_parent_lock)
- device_lock(dev->parent);
- device_lock(dev);
- err = driver_probe_device(drv, dev);
- device_unlock(dev);
- if (dev->parent && bus->need_parent_lock)
- device_unlock(dev->parent);
+ err = device_driver_attach(drv, dev);
if (err > 0) {
/* success */
@@ -236,12 +226,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
-static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
+static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf)
{
return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
}
-static ssize_t store_drivers_autoprobe(struct bus_type *bus,
+static ssize_t drivers_autoprobe_store(struct bus_type *bus,
const char *buf, size_t count)
{
if (buf[0] == '0')
@@ -251,7 +241,7 @@ static ssize_t store_drivers_autoprobe(struct bus_type *bus,
return count;
}
-static ssize_t store_drivers_probe(struct bus_type *bus,
+static ssize_t drivers_probe_store(struct bus_type *bus,
const char *buf, size_t count)
{
struct device *dev;
@@ -586,9 +576,8 @@ static void remove_bind_files(struct device_driver *drv)
driver_remove_file(drv, &driver_attr_unbind);
}
-static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe);
-static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO,
- show_drivers_autoprobe, store_drivers_autoprobe);
+static BUS_ATTR_WO(drivers_probe);
+static BUS_ATTR_RW(drivers_autoprobe);
static int add_probe_files(struct bus_type *bus)
{
@@ -621,17 +610,6 @@ static ssize_t uevent_store(struct device_driver *drv, const char *buf,
}
static DRIVER_ATTR_WO(uevent);
-static void driver_attach_async(void *_drv, async_cookie_t cookie)
-{
- struct device_driver *drv = _drv;
- int ret;
-
- ret = driver_attach(drv);
-
- pr_debug("bus: '%s': driver %s async attach completed: %d\n",
- drv->bus->name, drv->name, ret);
-}
-
/**
* bus_add_driver - Add a driver to the bus.
* @drv: driver.
@@ -664,15 +642,9 @@ int bus_add_driver(struct device_driver *drv)
klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
if (drv->bus->p->drivers_autoprobe) {
- if (driver_allows_async_probing(drv)) {
- pr_debug("bus: '%s': probing driver %s asynchronously\n",
- drv->bus->name, drv->name);
- async_schedule(driver_attach_async, drv);
- } else {
- error = driver_attach(drv);
- if (error)
- goto out_unregister;
- }
+ error = driver_attach(drv);
+ if (error)
+ goto out_unregister;
}
module_add_driver(drv->owner, drv);
@@ -774,13 +746,8 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices);
*/
int device_reprobe(struct device *dev)
{
- if (dev->driver) {
- if (dev->parent && dev->bus->need_parent_lock)
- device_lock(dev->parent);
- device_release_driver(dev);
- if (dev->parent && dev->bus->need_parent_lock)
- device_unlock(dev->parent);
- }
+ if (dev->driver)
+ device_driver_detach(dev);
return bus_rescan_devices_helper(dev, NULL);
}
EXPORT_SYMBOL_GPL(device_reprobe);
@@ -838,7 +805,14 @@ static ssize_t bus_uevent_store(struct bus_type *bus,
rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
return rc ? rc : count;
}
-static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
+/*
+ * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO()
+ * here, but can not use it as earlier in the file we have
+ * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store
+ * function name.
+ */
+static struct bus_attribute bus_attr_uevent = __ATTR(uevent, S_IWUSR, NULL,
+ bus_uevent_store);
/**
* bus_register - register a driver-core subsystem
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cf78fa6d470d..a7359535caf5 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
- if (of_property_read_u32(np, propname, &this_leaf->size))
- this_leaf->size = 0;
+ of_property_read_u32(np, propname, &this_leaf->size);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
- if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
- this_leaf->number_of_sets = 0;
+ of_property_read_u32(np, propname, &this_leaf->number_of_sets);
}
static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 54def4e02f00..d8a6a5864c2e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -117,16 +117,22 @@ static void class_put(struct class *cls)
kset_put(&cls->p->subsys);
}
+static struct device *klist_class_to_dev(struct klist_node *n)
+{
+ struct device_private *p = to_device_private_class(n);
+ return p->device;
+}
+
static void klist_class_dev_get(struct klist_node *n)
{
- struct device *dev = container_of(n, struct device, knode_class);
+ struct device *dev = klist_class_to_dev(n);
get_device(dev);
}
static void klist_class_dev_put(struct klist_node *n)
{
- struct device *dev = container_of(n, struct device, knode_class);
+ struct device *dev = klist_class_to_dev(n);
put_device(dev);
}
@@ -277,7 +283,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
struct klist_node *start_knode = NULL;
if (start)
- start_knode = &start->knode_class;
+ start_knode = &start->p->knode_class;
klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
iter->type = type;
}
@@ -304,7 +310,7 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
knode = klist_next(&iter->ki);
if (!knode)
return NULL;
- dev = container_of(knode, struct device, knode_class);
+ dev = klist_class_to_dev(knode);
if (!iter->type || iter->type == dev->type)
return dev;
}
diff --git a/drivers/base/component.c b/drivers/base/component.c
index ddcea8739c12..532a3a5d8f63 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -16,11 +16,38 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
+/**
+ * DOC: overview
+ *
+ * The component helper allows drivers to collect a pile of sub-devices,
+ * including their bound drivers, into an aggregate driver. Various subsystems
+ * already provide functions to get hold of such components, e.g.
+ * of_clk_get_by_name(). The component helper can be used when such a
+ * subsystem-specific way to find a device is not available: The component
+ * helper fills the niche of aggregate drivers for specific hardware, where
+ * further standardization into a subsystem would not be practical. The common
+ * example is when a logical device (e.g. a DRM display driver) is spread around
+ * the SoC on various components (scanout engines, blending blocks, transcoders
+ * for various outputs and so on).
+ *
+ * The component helper also doesn't solve runtime dependencies, e.g. for system
+ * suspend and resume operations. See also :ref:`device links<device_link>`.
+ *
+ * Components are registered using component_add() and unregistered with
+ * component_del(), usually from the driver's probe and disconnect functions.
+ *
+ * Aggregate drivers first assemble a component match list of what they need
+ * using component_match_add(). This is then registered as an aggregate driver
+ * using component_master_add_with_match(), and unregistered using
+ * component_master_del().
+ */
+
struct component;
struct component_match_array {
void *data;
int (*compare)(struct device *, void *);
+ int (*compare_typed)(struct device *, int, void *);
void (*release)(struct device *, void *);
struct component *component;
bool duplicate;
@@ -48,6 +75,7 @@ struct component {
bool bound;
const struct component_ops *ops;
+ int subcomponent;
struct device *dev;
};
@@ -132,7 +160,7 @@ static struct master *__master_find(struct device *dev,
}
static struct component *find_component(struct master *master,
- int (*compare)(struct device *, void *), void *compare_data)
+ struct component_match_array *mc)
{
struct component *c;
@@ -140,7 +168,11 @@ static struct component *find_component(struct master *master,
if (c->master && c->master != master)
continue;
- if (compare(c->dev, compare_data))
+ if (mc->compare && mc->compare(c->dev, mc->data))
+ return c;
+
+ if (mc->compare_typed &&
+ mc->compare_typed(c->dev, c->subcomponent, mc->data))
return c;
}
@@ -166,7 +198,7 @@ static int find_components(struct master *master)
if (match->compare[i].component)
continue;
- c = find_component(master, mc->compare, mc->data);
+ c = find_component(master, mc);
if (!c) {
ret = -ENXIO;
break;
@@ -301,15 +333,12 @@ static int component_match_realloc(struct device *dev,
return 0;
}
-/*
- * Add a component to be matched, with a release function.
- *
- * The match array is first created or extended if necessary.
- */
-void component_match_add_release(struct device *master,
+static void __component_match_add(struct device *master,
struct component_match **matchptr,
void (*release)(struct device *, void *),
- int (*compare)(struct device *, void *), void *compare_data)
+ int (*compare)(struct device *, void *),
+ int (*compare_typed)(struct device *, int, void *),
+ void *compare_data)
{
struct component_match *match = *matchptr;
@@ -341,13 +370,69 @@ void component_match_add_release(struct device *master,
}
match->compare[match->num].compare = compare;
+ match->compare[match->num].compare_typed = compare_typed;
match->compare[match->num].release = release;
match->compare[match->num].data = compare_data;
match->compare[match->num].component = NULL;
match->num++;
}
+
+/**
+ * component_match_add_release - add a component match entry with release callback
+ * @master: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @release: release function for @compare_data
+ * @compare: compare function to match against all components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @master
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions, where upon @release will be called to free any references held by
+ * @compare_data, e.g. when @compare_data is a &device_node that must be
+ * released with of_node_put().
+ *
+ * See also component_match_add() and component_match_add_typed().
+ */
+void component_match_add_release(struct device *master,
+ struct component_match **matchptr,
+ void (*release)(struct device *, void *),
+ int (*compare)(struct device *, void *), void *compare_data)
+{
+ __component_match_add(master, matchptr, release, compare, NULL,
+ compare_data);
+}
EXPORT_SYMBOL(component_match_add_release);
+/**
+ * component_match_add_typed - add a component match entry for a typed component
+ * @master: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @compare_typed: compare function to match against all typed components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @master
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add_typed().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions.
+ *
+ * See also component_match_add_release() and component_match_add_typed().
+ */
+void component_match_add_typed(struct device *master,
+ struct component_match **matchptr,
+ int (*compare_typed)(struct device *, int, void *), void *compare_data)
+{
+ __component_match_add(master, matchptr, NULL, NULL, compare_typed,
+ compare_data);
+}
+EXPORT_SYMBOL(component_match_add_typed);
+
static void free_master(struct master *master)
{
struct component_match *match = master->match;
@@ -367,6 +452,18 @@ static void free_master(struct master *master)
kfree(master);
}
+/**
+ * component_master_add_with_match - register an aggregate driver
+ * @dev: device with the aggregate driver
+ * @ops: callbacks for the aggregate driver
+ * @match: component match list for the aggregate driver
+ *
+ * Registers a new aggregate driver consisting of the components added to @match
+ * by calling one of the component_match_add() functions. Once all components in
+ * @match are available, it will be assembled by calling
+ * &component_master_ops.bind from @ops. Must be unregistered by calling
+ * component_master_del().
+ */
int component_master_add_with_match(struct device *dev,
const struct component_master_ops *ops,
struct component_match *match)
@@ -403,6 +500,15 @@ int component_master_add_with_match(struct device *dev,
}
EXPORT_SYMBOL_GPL(component_master_add_with_match);
+/**
+ * component_master_del - unregister an aggregate driver
+ * @dev: device with the aggregate driver
+ * @ops: callbacks for the aggregate driver
+ *
+ * Unregisters an aggregate driver registered with
+ * component_master_add_with_match(). If necessary the aggregate driver is first
+ * disassembled by calling &component_master_ops.unbind from @ops.
+ */
void component_master_del(struct device *dev,
const struct component_master_ops *ops)
{
@@ -430,6 +536,15 @@ static void component_unbind(struct component *component,
devres_release_group(component->dev, component);
}
+/**
+ * component_unbind_all - unbind all components of an aggregate driver
+ * @master_dev: device with the aggregate driver
+ * @data: opaque pointer, passed to all components
+ *
+ * Unbinds all components of the aggregate @dev by passing @data to their
+ * &component_ops.unbind functions. Should be called from
+ * &component_master_ops.unbind.
+ */
void component_unbind_all(struct device *master_dev, void *data)
{
struct master *master;
@@ -503,6 +618,15 @@ static int component_bind(struct component *component, struct master *master,
return ret;
}
+/**
+ * component_bind_all - bind all components of an aggregate driver
+ * @master_dev: device with the aggregate driver
+ * @data: opaque pointer, passed to all components
+ *
+ * Binds all components of the aggregate @dev by passing @data to their
+ * &component_ops.bind functions. Should be called from
+ * &component_master_ops.bind.
+ */
int component_bind_all(struct device *master_dev, void *data)
{
struct master *master;
@@ -537,7 +661,8 @@ int component_bind_all(struct device *master_dev, void *data)
}
EXPORT_SYMBOL_GPL(component_bind_all);
-int component_add(struct device *dev, const struct component_ops *ops)
+static int __component_add(struct device *dev, const struct component_ops *ops,
+ int subcomponent)
{
struct component *component;
int ret;
@@ -548,6 +673,7 @@ int component_add(struct device *dev, const struct component_ops *ops)
component->ops = ops;
component->dev = dev;
+ component->subcomponent = subcomponent;
dev_dbg(dev, "adding component (ops %ps)\n", ops);
@@ -566,8 +692,66 @@ int component_add(struct device *dev, const struct component_ops *ops)
return ret < 0 ? ret : 0;
}
+
+/**
+ * component_add_typed - register a component
+ * @dev: component device
+ * @ops: component callbacks
+ * @subcomponent: nonzero identifier for subcomponents
+ *
+ * Register a new component for @dev. Functions in @ops will be call when the
+ * aggregate driver is ready to bind the overall driver by calling
+ * component_bind_all(). See also &struct component_ops.
+ *
+ * @subcomponent must be nonzero and is used to differentiate between multiple
+ * components registerd on the same device @dev. These components are match
+ * using component_match_add_typed().
+ *
+ * The component needs to be unregistered at driver unload/disconnect by
+ * calling component_del().
+ *
+ * See also component_add().
+ */
+int component_add_typed(struct device *dev, const struct component_ops *ops,
+ int subcomponent)
+{
+ if (WARN_ON(subcomponent == 0))
+ return -EINVAL;
+
+ return __component_add(dev, ops, subcomponent);
+}
+EXPORT_SYMBOL_GPL(component_add_typed);
+
+/**
+ * component_add - register a component
+ * @dev: component device
+ * @ops: component callbacks
+ *
+ * Register a new component for @dev. Functions in @ops will be called when the
+ * aggregate driver is ready to bind the overall driver by calling
+ * component_bind_all(). See also &struct component_ops.
+ *
+ * The component needs to be unregistered at driver unload/disconnect by
+ * calling component_del().
+ *
+ * See also component_add_typed() for a variant that allows multipled different
+ * components on the same device.
+ */
+int component_add(struct device *dev, const struct component_ops *ops)
+{
+ return __component_add(dev, ops, 0);
+}
EXPORT_SYMBOL_GPL(component_add);
+/**
+ * component_del - unregister a component
+ * @dev: component device
+ * @ops: component callbacks
+ *
+ * Unregister a component added with component_add(). If the component is bound
+ * into an aggregate driver, this will force the entire aggregate driver, including
+ * all its components, to be unbound.
+ */
void component_del(struct device *dev, const struct component_ops *ops)
{
struct component *c, *component = NULL;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 0073b09bb99f..4aeaa0c92bda 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -179,10 +179,31 @@ void device_pm_move_to_tail(struct device *dev)
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
- * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
- * automatically when the consumer device driver unbinds from it.
- * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
- * set is invalid and will cause NULL to be returned.
+ * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by
+ * the driver core and, in particular, the caller of this function is expected
+ * to drop the reference to the link acquired by it directly.
+ *
+ * If that flag is not set, however, the caller of this function is handing the
+ * management of the link over to the driver core entirely and its return value
+ * can only be used to check whether or not the link is present. In that case,
+ * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
+ * flags can be used to indicate to the driver core when the link can be safely
+ * deleted. Namely, setting one of them in @flags indicates to the driver core
+ * that the link is not going to be used (by the given caller of this function)
+ * after unbinding the consumer or supplier driver, respectively, from its
+ * device, so the link can be deleted at that point. If none of them is set,
+ * the link will be maintained until one of the devices pointed to by it (either
+ * the consumer or the supplier) is unregistered.
+ *
+ * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
+ * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
+ * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
+ * be used to request the driver core to automaticall probe for a consmer
+ * driver after successfully binding a driver to the supplier device.
+ *
+ * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER
+ * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and
+ * will cause NULL to be returned upfront.
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
@@ -199,10 +220,22 @@ struct device_link *device_link_add(struct device *consumer,
struct device_link *link;
if (!consumer || !supplier ||
- ((flags & DL_FLAG_STATELESS) &&
- (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
+ (flags & DL_FLAG_STATELESS &&
+ flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_AUTOPROBE_CONSUMER)) ||
+ (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
+ flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_AUTOREMOVE_SUPPLIER)))
return NULL;
+ if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
+ if (pm_runtime_get_sync(supplier) < 0) {
+ pm_runtime_put_noidle(supplier);
+ return NULL;
+ }
+ }
+
device_links_write_lock();
device_pm_lock();
@@ -217,35 +250,71 @@ struct device_link *device_link_add(struct device *consumer,
goto out;
}
- list_for_each_entry(link, &supplier->links.consumers, s_node)
- if (link->consumer == consumer) {
+ /*
+ * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
+ * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
+ * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
+ */
+ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node) {
+ if (link->consumer != consumer)
+ continue;
+
+ /*
+ * Don't return a stateless link if the caller wants a stateful
+ * one and vice versa.
+ */
+ if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) {
+ link = NULL;
+ goto out;
+ }
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
+ pm_runtime_new_link(consumer);
+ link->flags |= DL_FLAG_PM_RUNTIME;
+ }
+ if (flags & DL_FLAG_RPM_ACTIVE)
+ refcount_inc(&link->rpm_active);
+ }
+
+ if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
goto out;
}
+ /*
+ * If the life time of the link following from the new flags is
+ * longer than indicated by the flags of the existing link,
+ * update the existing link to stay around longer.
+ */
+ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
+ link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
+ }
+ } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_AUTOREMOVE_SUPPLIER);
+ }
+ goto out;
+ }
+
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
goto out;
+ refcount_set(&link->rpm_active, 1);
+
if (flags & DL_FLAG_PM_RUNTIME) {
- if (flags & DL_FLAG_RPM_ACTIVE) {
- if (pm_runtime_get_sync(supplier) < 0) {
- pm_runtime_put_noidle(supplier);
- kfree(link);
- link = NULL;
- goto out;
- }
- link->rpm_active = true;
- }
+ if (flags & DL_FLAG_RPM_ACTIVE)
+ refcount_inc(&link->rpm_active);
+
pm_runtime_new_link(consumer);
- /*
- * If the link is being added by the consumer driver at probe
- * time, balance the decrementation of the supplier's runtime PM
- * usage counter after consumer probe in driver_probe_device().
- */
- if (consumer->links.status == DL_DEV_PROBING)
- pm_runtime_get_noresume(supplier);
}
+
get_device(supplier);
link->supplier = supplier;
INIT_LIST_HEAD(&link->s_node);
@@ -260,17 +329,26 @@ struct device_link *device_link_add(struct device *consumer,
link->status = DL_STATE_NONE;
} else {
switch (supplier->links.status) {
- case DL_DEV_DRIVER_BOUND:
+ case DL_DEV_PROBING:
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
- * Some callers expect the link creation during
- * consumer driver probe to resume the supplier
- * even without DL_FLAG_RPM_ACTIVE.
+ * A consumer driver can create a link to a
+ * supplier that has not completed its probing
+ * yet as long as it knows that the supplier is
+ * already functional (for example, it has just
+ * acquired some resources from the supplier).
*/
- if (flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_resume(supplier);
-
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
link->status = DL_STATE_CONSUMER_PROBE;
break;
case DL_DEV_DRIVER_BOUND:
@@ -291,6 +369,14 @@ struct device_link *device_link_add(struct device *consumer,
}
/*
+ * Some callers expect the link creation during consumer driver probe to
+ * resume the supplier even without DL_FLAG_RPM_ACTIVE.
+ */
+ if (link->status == DL_STATE_CONSUMER_PROBE &&
+ flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_resume(supplier);
+
+ /*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
*
@@ -302,17 +388,24 @@ struct device_link *device_link_add(struct device *consumer,
list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
- dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+ dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
out:
device_pm_unlock();
device_links_write_unlock();
+
+ if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
+ pm_runtime_put(supplier);
+
return link;
}
EXPORT_SYMBOL_GPL(device_link_add);
static void device_link_free(struct device_link *link)
{
+ while (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put(link->supplier);
+
put_device(link->consumer);
put_device(link->supplier);
kfree(link);
@@ -328,8 +421,8 @@ static void __device_link_del(struct kref *kref)
{
struct device_link *link = container_of(kref, struct device_link, kref);
- dev_info(link->consumer, "Dropping the link to %s\n",
- dev_name(link->supplier));
+ dev_dbg(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_drop_link(link->consumer);
@@ -355,8 +448,16 @@ static void __device_link_del(struct kref *kref)
}
#endif /* !CONFIG_SRCU */
+static void device_link_put_kref(struct device_link *link)
+{
+ if (link->flags & DL_FLAG_STATELESS)
+ kref_put(&link->kref, __device_link_del);
+ else
+ WARN(1, "Unable to drop a managed device link reference\n");
+}
+
/**
- * device_link_del - Delete a link between two devices.
+ * device_link_del - Delete a stateless link between two devices.
* @link: Device link to delete.
*
* The caller must ensure proper synchronization of this function with runtime
@@ -368,14 +469,14 @@ void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_pm_lock();
- kref_put(&link->kref, __device_link_del);
+ device_link_put_kref(link);
device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
/**
- * device_link_remove - remove a link between two devices.
+ * device_link_remove - Delete a stateless link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
*
@@ -394,7 +495,7 @@ void device_link_remove(void *consumer, struct device *supplier)
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer == consumer) {
- kref_put(&link->kref, __device_link_del);
+ device_link_put_kref(link);
break;
}
}
@@ -474,8 +575,21 @@ void device_links_driver_bound(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
+ /*
+ * Links created during consumer probe may be in the "consumer
+ * probe" state to start with if the supplier is still probing
+ * when they are created and they may become "active" if the
+ * consumer probe returns first. Skip them here.
+ */
+ if (link->status == DL_STATE_CONSUMER_PROBE ||
+ link->status == DL_STATE_ACTIVE)
+ continue;
+
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+
+ if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
+ driver_deferred_probe_add(link->consumer);
}
list_for_each_entry(link, &dev->links.suppliers, c_node) {
@@ -512,18 +626,49 @@ static void __device_links_no_driver(struct device *dev)
continue;
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
- kref_put(&link->kref, __device_link_del);
- else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+ __device_link_del(&link->kref);
+ else if (link->status == DL_STATE_CONSUMER_PROBE ||
+ link->status == DL_STATE_ACTIVE)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
dev->links.status = DL_DEV_NO_DRIVER;
}
+/**
+ * device_links_no_driver - Update links after failing driver probe.
+ * @dev: Device whose driver has just failed to probe.
+ *
+ * Clean up leftover links to consumers for @dev and invoke
+ * %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
void device_links_no_driver(struct device *dev)
{
+ struct device_link *link;
+
device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ /*
+ * The probe has failed, so if the status of the link is
+ * "consumer probe" or "active", it must have been added by
+ * a probing consumer while this device was still probing.
+ * Change its state to "dormant", as it represents a valid
+ * relationship, but it is not functionally meaningful.
+ */
+ if (link->status == DL_STATE_CONSUMER_PROBE ||
+ link->status == DL_STATE_ACTIVE)
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
__device_links_no_driver(dev);
+
device_links_write_unlock();
}
@@ -539,11 +684,11 @@ void device_links_no_driver(struct device *dev)
*/
void device_links_driver_cleanup(struct device *dev)
{
- struct device_link *link;
+ struct device_link *link, *ln;
device_links_write_lock();
- list_for_each_entry(link, &dev->links.consumers, s_node) {
+ list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
@@ -557,7 +702,7 @@ void device_links_driver_cleanup(struct device *dev)
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
- kref_put(&link->kref, __device_link_del);
+ __device_link_del(&link->kref);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
@@ -1966,7 +2111,7 @@ int device_add(struct device *dev)
if (dev->class) {
mutex_lock(&dev->class->p->mutex);
/* tie the class to the device */
- klist_add_tail(&dev->knode_class,
+ klist_add_tail(&dev->p->knode_class,
&dev->class->p->klist_devices);
/* notify any interfaces that the device is here */
@@ -2080,6 +2225,17 @@ void device_del(struct device *dev)
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
+ /*
+ * Hold the device lock and set the "dead" flag to guarantee that
+ * the update behavior is consistent with the other bitfields near
+ * it and that we cannot have an asynchronous probe routine trying
+ * to run while we are tearing out the bus/class/sysfs from
+ * underneath the device.
+ */
+ device_lock(dev);
+ dev->p->dead = true;
+ device_unlock(dev);
+
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
@@ -2105,7 +2261,7 @@ void device_del(struct device *dev)
if (class_intf->remove_dev)
class_intf->remove_dev(dev, class_intf);
/* remove the device from the class list */
- klist_del(&dev->knode_class);
+ klist_del(&dev->p->knode_class);
mutex_unlock(&dev->class->p->mutex);
}
device_remove_file(dev, &dev_attr_uevent);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index eb9443d5bae1..668139cfa664 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -409,6 +409,7 @@ static void device_create_release(struct device *dev)
kfree(dev);
}
+__printf(4, 0)
static struct device *
__cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
@@ -427,6 +428,7 @@ __cpu_device_create(struct device *parent, void *drvdata,
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
+ device_set_pm_not_required(dev);
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 8ac10af17c00..a823f469e53f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -57,6 +57,10 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static struct dentry *deferred_devices;
static bool initcalls_done;
+/* Save the async probe drivers' name from kernel cmdline */
+#define ASYNC_DRV_NAMES_MAX_LEN 256
+static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
+
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@@ -116,7 +120,7 @@ static void deferred_probe_work_func(struct work_struct *work)
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
-static void driver_deferred_probe_add(struct device *dev)
+void driver_deferred_probe_add(struct device *dev)
{
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {
@@ -674,6 +678,23 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
return ret;
}
+static inline bool cmdline_requested_async_probing(const char *drv_name)
+{
+ return parse_option_str(async_probe_drv_names, drv_name);
+}
+
+/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
+static int __init save_async_options(char *buf)
+{
+ if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
+ printk(KERN_WARNING
+ "Too long list of driver names for 'driver_async_probe'!\n");
+
+ strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+ return 0;
+}
+__setup("driver_async_probe=", save_async_options);
+
bool driver_allows_async_probing(struct device_driver *drv)
{
switch (drv->probe_type) {
@@ -684,6 +705,9 @@ bool driver_allows_async_probing(struct device_driver *drv)
return false;
default:
+ if (cmdline_requested_async_probing(drv->name))
+ return true;
+
if (module_requested_async_probing(drv->owner))
return true;
@@ -731,15 +755,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
bool async_allowed;
int ret;
- /*
- * Check if device has already been claimed. This may
- * happen with driver loading, device discovery/registration,
- * and deferred probe processing happens all at once with
- * multiple threads.
- */
- if (dev->driver)
- return -EBUSY;
-
ret = driver_match_device(drv, dev);
if (ret == 0) {
/* no match */
@@ -774,6 +789,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
device_lock(dev);
+ /*
+ * Check if device has already been removed or claimed. This may
+ * happen with driver loading, device discovery/registration,
+ * and deferred probe processing happens all at once with
+ * multiple threads.
+ */
+ if (dev->p->dead || dev->driver)
+ goto out_unlock;
+
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@@ -784,7 +808,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
if (dev->parent)
pm_runtime_put(dev->parent);
-
+out_unlock:
device_unlock(dev);
put_device(dev);
@@ -829,7 +853,7 @@ static int __device_attach(struct device *dev, bool allow_async)
*/
dev_dbg(dev, "scheduling asynchronous probe\n");
get_device(dev);
- async_schedule(__device_attach_async_helper, dev);
+ async_schedule_dev(__device_attach_async_helper, dev);
} else {
pm_request_idle(dev);
}
@@ -867,6 +891,88 @@ void device_initial_probe(struct device *dev)
__device_attach(dev, true);
}
+/*
+ * __device_driver_lock - acquire locks needed to manipulate dev->drv
+ * @dev: Device we will update driver info for
+ * @parent: Parent device. Needed if the bus requires parent lock
+ *
+ * This function will take the required locks for manipulating dev->drv.
+ * Normally this will just be the @dev lock, but when called for a USB
+ * interface, @parent lock will be held as well.
+ */
+static void __device_driver_lock(struct device *dev, struct device *parent)
+{
+ if (parent && dev->bus->need_parent_lock)
+ device_lock(parent);
+ device_lock(dev);
+}
+
+/*
+ * __device_driver_unlock - release locks needed to manipulate dev->drv
+ * @dev: Device we will update driver info for
+ * @parent: Parent device. Needed if the bus requires parent lock
+ *
+ * This function will release the required locks for manipulating dev->drv.
+ * Normally this will just be the the @dev lock, but when called for a
+ * USB interface, @parent lock will be released as well.
+ */
+static void __device_driver_unlock(struct device *dev, struct device *parent)
+{
+ device_unlock(dev);
+ if (parent && dev->bus->need_parent_lock)
+ device_unlock(parent);
+}
+
+/**
+ * device_driver_attach - attach a specific driver to a specific device
+ * @drv: Driver to attach
+ * @dev: Device to attach it to
+ *
+ * Manually attach driver to a device. Will acquire both @dev lock and
+ * @dev->parent lock if needed.
+ */
+int device_driver_attach(struct device_driver *drv, struct device *dev)
+{
+ int ret = 0;
+
+ __device_driver_lock(dev, dev->parent);
+
+ /*
+ * If device has been removed or someone has already successfully
+ * bound a driver before us just skip the driver probe call.
+ */
+ if (!dev->p->dead && !dev->driver)
+ ret = driver_probe_device(drv, dev);
+
+ __device_driver_unlock(dev, dev->parent);
+
+ return ret;
+}
+
+static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
+{
+ struct device *dev = _dev;
+ struct device_driver *drv;
+ int ret = 0;
+
+ __device_driver_lock(dev, dev->parent);
+
+ drv = dev->p->async_driver;
+
+ /*
+ * If device has been removed or someone has already successfully
+ * bound a driver before us just skip the driver probe call.
+ */
+ if (!dev->p->dead && !dev->driver)
+ ret = driver_probe_device(drv, dev);
+
+ __device_driver_unlock(dev, dev->parent);
+
+ dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
+
+ put_device(dev);
+}
+
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
@@ -894,14 +1000,26 @@ static int __driver_attach(struct device *dev, void *data)
return ret;
} /* ret > 0 means positive match */
- if (dev->parent && dev->bus->need_parent_lock)
- device_lock(dev->parent);
- device_lock(dev);
- if (!dev->driver)
- driver_probe_device(drv, dev);
- device_unlock(dev);
- if (dev->parent && dev->bus->need_parent_lock)
- device_unlock(dev->parent);
+ if (driver_allows_async_probing(drv)) {
+ /*
+ * Instead of probing the device synchronously we will
+ * probe it asynchronously to allow for more parallelism.
+ *
+ * We only take the device lock here in order to guarantee
+ * that the dev->driver and async_driver fields are protected
+ */
+ dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
+ device_lock(dev);
+ if (!dev->driver) {
+ get_device(dev);
+ dev->p->async_driver = drv;
+ async_schedule_dev(__driver_attach_async_helper, dev);
+ }
+ device_unlock(dev);
+ return 0;
+ }
+
+ device_driver_attach(drv, dev);
return 0;
}
@@ -932,15 +1050,11 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
while (device_links_busy(dev)) {
- device_unlock(dev);
- if (parent && dev->bus->need_parent_lock)
- device_unlock(parent);
+ __device_driver_unlock(dev, parent);
device_links_unbind_consumers(dev);
- if (parent && dev->bus->need_parent_lock)
- device_lock(parent);
- device_lock(dev);
+ __device_driver_lock(dev, parent);
/*
* A concurrent invocation of the same function might
* have released the driver successfully while this one
@@ -968,9 +1082,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv->remove(dev);
device_links_driver_cleanup(dev);
- arch_teardown_dma_ops(dev);
devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
@@ -993,16 +1107,12 @@ void device_release_driver_internal(struct device *dev,
struct device_driver *drv,
struct device *parent)
{
- if (parent && dev->bus->need_parent_lock)
- device_lock(parent);
+ __device_driver_lock(dev, parent);
- device_lock(dev);
if (!drv || drv == dev->driver)
__device_release_driver(dev, parent);
- device_unlock(dev);
- if (parent && dev->bus->need_parent_lock)
- device_unlock(parent);
+ __device_driver_unlock(dev, parent);
}
/**
@@ -1028,6 +1138,18 @@ void device_release_driver(struct device *dev)
EXPORT_SYMBOL_GPL(device_release_driver);
/**
+ * device_driver_detach - detach driver from a specific device
+ * @dev: device to detach driver from
+ *
+ * Detach driver from device. Will acquire both @dev lock and @dev->parent
+ * lock if needed.
+ */
+void device_driver_detach(struct device *dev)
+{
+ device_release_driver_internal(dev, NULL, dev->parent);
+}
+
+/**
* driver_detach - detach driver from all devices it controls.
* @drv: driver.
*/
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
index d427e806cd73..04db9ae235e4 100644
--- a/drivers/base/devcon.c
+++ b/drivers/base/devcon.c
@@ -7,10 +7,37 @@
*/
#include <linux/device.h>
+#include <linux/property.h>
static DEFINE_MUTEX(devcon_lock);
static LIST_HEAD(devcon_list);
+typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep,
+ void *data);
+
+static void *
+fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
+ void *data, devcon_match_fn_t match)
+{
+ struct device_connection con = { .id = con_id };
+ struct fwnode_handle *ep;
+ void *ret;
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ con.fwnode = fwnode_graph_get_remote_port_parent(ep);
+ if (!fwnode_device_is_available(con.fwnode))
+ continue;
+
+ ret = match(&con, -1, data);
+ fwnode_handle_put(con.fwnode);
+ if (ret) {
+ fwnode_handle_put(ep);
+ return ret;
+ }
+ }
+ return NULL;
+}
+
/**
* device_connection_find_match - Find physical connection to a device
* @dev: Device with the connection
@@ -23,10 +50,9 @@ static LIST_HEAD(devcon_list);
* caller is expecting to be returned.
*/
void *device_connection_find_match(struct device *dev, const char *con_id,
- void *data,
- void *(*match)(struct device_connection *con,
- int ep, void *data))
+ void *data, devcon_match_fn_t match)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
const char *devname = dev_name(dev);
struct device_connection *con;
void *ret = NULL;
@@ -35,6 +61,12 @@ void *device_connection_find_match(struct device *dev, const char *con_id,
if (!match)
return NULL;
+ if (fwnode) {
+ ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&devcon_lock);
list_for_each_entry(con, &devcon_list, list) {
@@ -75,12 +107,36 @@ static struct bus_type *generic_match_buses[] = {
NULL,
};
+static int device_fwnode_match(struct device *dev, void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode;
+}
+
+static void *device_connection_fwnode_match(struct device_connection *con)
+{
+ struct bus_type *bus;
+ struct device *dev;
+
+ for (bus = generic_match_buses[0]; bus; bus++) {
+ dev = bus_find_device(bus, NULL, (void *)con->fwnode,
+ device_fwnode_match);
+ if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id)))
+ return dev;
+
+ put_device(dev);
+ }
+ return NULL;
+}
+
/* This tries to find the device from the most common bus types by name. */
static void *generic_match(struct device_connection *con, int ep, void *data)
{
struct bus_type *bus;
struct device *dev;
+ if (con->fwnode)
+ return device_connection_fwnode_match(con);
+
for (bus = generic_match_buses[0]; bus; bus++) {
dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]);
if (dev)
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
index a97eeb0be1d8..0b2dfa6259c9 100644
--- a/drivers/base/firmware_loader/Makefile
+++ b/drivers/base/firmware_loader/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux firmware loader
-obj-y := fallback_table.o
+obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
firmware_class-objs := main.o
firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
+
+obj-y += builtin/
diff --git a/firmware/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore
index 9c8bdb9fdcc3..9c8bdb9fdcc3 100644
--- a/firmware/.gitignore
+++ b/drivers/base/firmware_loader/builtin/.gitignore
diff --git a/firmware/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 37e5ae387400..37e5ae387400 100644
--- a/firmware/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 7428659d8df9..776dd69cf5be 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -16,9 +16,6 @@
* firmware fallback configuration table
*/
-/* Module or buit-in */
-#ifdef CONFIG_FW_LOADER_USER_HELPER
-
static unsigned int zero;
static unsigned int one = 1;
@@ -51,5 +48,3 @@ struct ctl_table firmware_config_table[] = {
{ }
};
EXPORT_SYMBOL_GPL(firmware_config_table);
-
-#endif
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 8e9213b36e31..7eaaf5ee5ba6 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -328,12 +328,12 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
rc = kernel_read_file_from_path(path, &fw_priv->data, &size,
msize, id);
if (rc) {
- if (rc == -ENOENT)
- dev_dbg(device, "loading %s failed with error %d\n",
- path, rc);
- else
+ if (rc != -ENOENT)
dev_warn(device, "loading %s failed with error %d\n",
path, rc);
+ else
+ dev_dbg(device, "loading %s failed for no such file or directory.\n",
+ path);
continue;
}
dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c958eb33ef4..4e45ac21d672 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -127,7 +127,20 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
}
- return r ? r->start : -ENXIO;
+ if (r)
+ return r->start;
+
+ /*
+ * For the index 0 interrupt, allow falling back to GpioInt
+ * resources. While a device could have both Interrupt and GpioInt
+ * resources, making this fallback ambiguous, in many common cases
+ * the device will only expose one IRQ, and this fallback
+ * allows a common code path across either kind of resource.
+ */
+ if (num == 0 && has_acpi_companion(&dev->dev))
+ return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+
+ return -ENXIO;
#endif
}
EXPORT_SYMBOL_GPL(platform_get_irq);
@@ -508,10 +521,12 @@ struct platform_device *platform_device_register_full(
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
if (!pdev)
- goto err_alloc;
+ return ERR_PTR(-ENOMEM);
pdev->dev.parent = pdevinfo->parent;
pdev->dev.fwnode = pdevinfo->fwnode;
+ pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
+ pdev->dev.of_node_reused = pdevinfo->of_node_reused;
if (pdevinfo->dma_mask) {
/*
@@ -553,8 +568,6 @@ struct platform_device *platform_device_register_full(
err:
ACPI_COMPANION_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
-
-err_alloc:
platform_device_put(pdev);
return ERR_PTR(ret);
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5a42ae4078c2..365ad751ce0f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
- clk_prepare(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
+ if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ } else {
+ ce->status = PCE_STATUS_ACQUIRED;
+ dev_dbg(dev,
+ "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
+ }
}
}
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b413951c6abc..22aedb28aad7 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
* For a detailed function description, see dev_pm_domain_attach_by_id().
*/
struct device *dev_pm_domain_attach_by_name(struct device *dev,
- char *name)
+ const char *name)
{
if (dev->pm_domain)
return ERR_PTR(-EEXIST);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 500de1dee967..2c334c01fc43 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
* power-domain-names DT property. For further description see
* genpd_dev_pm_attach_by_id().
*/
-struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
{
int index;
@@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void)
genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
- if (!genpd_debugfs_dir)
- return -ENOMEM;
-
- d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- genpd_debugfs_dir, NULL, &summary_fops);
- if (!d)
- return -ENOMEM;
+ debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
+ NULL, &summary_fops);
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
- if (!d)
- return -ENOMEM;
debugfs_create_file("current_state", 0444,
d, genpd, &status_fops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a690fd400260..5a8149829ab3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,7 @@
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
+#include <linux/devfreq.h>
#include <linux/timer.h>
#include "../base.h"
@@ -123,6 +124,10 @@ void device_pm_unlock(void)
*/
void device_pm_add(struct device *dev)
{
+ /* Skip PM setup/initialization. */
+ if (device_pm_not_required(dev))
+ return;
+
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
@@ -141,6 +146,9 @@ void device_pm_add(struct device *dev)
*/
void device_pm_remove(struct device *dev)
{
+ if (device_pm_not_required(dev))
+ return;
+
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
@@ -726,7 +734,7 @@ void dpm_noirq_resume_devices(pm_message_t state)
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
- async_schedule(async_resume_noirq, dev);
+ async_schedule_dev(async_resume_noirq, dev);
}
}
@@ -883,7 +891,7 @@ void dpm_resume_early(pm_message_t state)
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
- async_schedule(async_resume_early, dev);
+ async_schedule_dev(async_resume_early, dev);
}
}
@@ -1047,7 +1055,7 @@ void dpm_resume(pm_message_t state)
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
- async_schedule(async_resume, dev);
+ async_schedule_dev(async_resume, dev);
}
}
@@ -1078,6 +1086,7 @@ void dpm_resume(pm_message_t state)
dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
+ devfreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
@@ -1366,7 +1375,7 @@ static int device_suspend_noirq(struct device *dev)
if (is_async(dev)) {
get_device(dev);
- async_schedule(async_suspend_noirq, dev);
+ async_schedule_dev(async_suspend_noirq, dev);
return 0;
}
return __device_suspend_noirq(dev, pm_transition, false);
@@ -1569,7 +1578,7 @@ static int device_suspend_late(struct device *dev)
if (is_async(dev)) {
get_device(dev);
- async_schedule(async_suspend_late, dev);
+ async_schedule_dev(async_suspend_late, dev);
return 0;
}
@@ -1739,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev)) {
+ pm_dev_dbg(dev, state, "direct-complete ");
goto Complete;
+ }
pm_runtime_enable(dev);
}
@@ -1833,7 +1844,7 @@ static int device_suspend(struct device *dev)
if (is_async(dev)) {
get_device(dev);
- async_schedule(async_suspend, dev);
+ async_schedule_dev(async_suspend, dev);
return 0;
}
@@ -1852,6 +1863,7 @@ int dpm_suspend(pm_message_t state)
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
+ devfreq_suspend();
cpufreq_suspend();
mutex_lock(&dpm_list_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 70624695b6d5..a80dbf08a99c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags);
*/
void update_pm_runtime_accounting(struct device *dev)
{
- unsigned long now = jiffies;
- unsigned long delta;
+ u64 now, last, delta;
- delta = now - dev->power.accounting_timestamp;
+ if (dev->power.disable_depth > 0)
+ return;
+
+ last = dev->power.accounting_timestamp;
+ now = ktime_get_mono_fast_ns();
dev->power.accounting_timestamp = now;
- if (dev->power.disable_depth > 0)
+ /*
+ * Because ktime_get_mono_fast_ns() is not monotonic during
+ * timekeeping updates, ensure that 'now' is after the last saved
+ * timesptamp.
+ */
+ if (now < last)
return;
+ delta = now - last;
+
if (dev->power.runtime_status == RPM_SUSPENDED)
- dev->power.suspended_jiffies += delta;
+ dev->power.suspended_time += delta;
else
- dev->power.active_jiffies += delta;
+ dev->power.active_time += delta;
}
static void __update_runtime_status(struct device *dev, enum rpm_status status)
@@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
dev->power.runtime_status = status;
}
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ u64 time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ update_pm_runtime_accounting(dev);
+ time = dev->power.suspended_time;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return time;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
+
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
@@ -95,7 +121,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
- hrtimer_cancel(&dev->power.suspend_timer);
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
@@ -121,7 +147,7 @@ static void pm_runtime_cancel_pending(struct device *dev)
* Compute the autosuspend-delay expiration time based on the device's
* power.last_busy time. If the delay has already expired or is disabled
* (negative) or the power.use_autosuspend flag isn't set, return 0.
- * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
+ * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
*
* This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time.
@@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev)
u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
- u64 last_busy, expires = 0;
- u64 now = ktime_to_ns(ktime_get());
+ u64 expires;
if (!dev->power.use_autosuspend)
- goto out;
+ return 0;
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
- goto out;
-
- last_busy = READ_ONCE(dev->power.last_busy);
+ return 0;
- expires = last_busy + autosuspend_delay * NSEC_PER_MSEC;
- if (expires <= now)
- expires = 0; /* Already expired. */
+ expires = READ_ONCE(dev->power.last_busy);
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
+ if (expires > ktime_get_mono_fast_ns())
+ return expires; /* Expires in the future */
- out:
- return expires;
+ return 0;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
@@ -259,11 +282,8 @@ static int rpm_get_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
- continue;
-
- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
- link->rpm_active)
+ if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
+ READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -272,7 +292,7 @@ static int rpm_get_suppliers(struct device *dev)
pm_runtime_put_noidle(link->supplier);
return retval;
}
- link->rpm_active = true;
+ refcount_inc(&link->rpm_active);
}
return 0;
}
@@ -281,12 +301,13 @@ static void rpm_put_suppliers(struct device *dev)
{
struct device_link *link;
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->rpm_active &&
- READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
+ continue;
+
+ while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);
- link->rpm_active = false;
- }
+ }
}
/**
@@ -525,7 +546,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
* We add a slack of 25% to gather wakeups
* without sacrificing the granularity.
*/
- u64 slack = READ_ONCE(dev->power.autosuspend_delay) *
+ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
(NSEC_PER_MSEC >> 2);
dev->power.timer_expires = expires;
@@ -905,8 +926,11 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires;
- /* If 'expire' is after 'jiffies' we've been called too early. */
- if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
+ /*
+ * If 'expires' is after the current time, we've been called
+ * too early.
+ */
+ if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -925,7 +949,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
- ktime_t expires;
+ u64 expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
@@ -942,8 +966,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
- expires = ktime_add(ktime_get(), ms_to_ktime(delay));
- dev->power.timer_expires = ktime_to_ns(expires);
+ expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
+ dev->power.timer_expires = expires;
dev->power.timer_autosuspends = 0;
hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
@@ -1088,24 +1112,57 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
* and the device parent's counter of unsuspended children is modified to
* reflect the new status. If the new status is RPM_SUSPENDED, an idle
* notification request for the parent is submitted.
+ *
+ * If @dev has any suppliers (as reflected by device links to them), and @status
+ * is RPM_ACTIVE, they will be activated upfront and if the activation of one
+ * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
+ * of the @status value) and the suppliers will be deacticated on exit. The
+ * error returned by the failing supplier activation will be returned in that
+ * case.
*/
int __pm_runtime_set_status(struct device *dev, unsigned int status)
{
struct device *parent = dev->parent;
- unsigned long flags;
bool notify_parent = false;
int error = 0;
if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
return -EINVAL;
- spin_lock_irqsave(&dev->power.lock, flags);
+ spin_lock_irq(&dev->power.lock);
- if (!dev->power.runtime_error && !dev->power.disable_depth) {
+ /*
+ * Prevent PM-runtime from being enabled for the device or return an
+ * error if it is enabled already and working.
+ */
+ if (dev->power.runtime_error || dev->power.disable_depth)
+ dev->power.disable_depth++;
+ else
error = -EAGAIN;
- goto out;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (error)
+ return error;
+
+ /*
+ * If the new status is RPM_ACTIVE, the suppliers can be activated
+ * upfront regardless of the current status, because next time
+ * rpm_put_suppliers() runs, the rpm_active refcounts of the links
+ * involved will be dropped down to one anyway.
+ */
+ if (status == RPM_ACTIVE) {
+ int idx = device_links_read_lock();
+
+ error = rpm_get_suppliers(dev);
+ if (error)
+ status = RPM_SUSPENDED;
+
+ device_links_read_unlock(idx);
}
+ spin_lock_irq(&dev->power.lock);
+
if (dev->power.runtime_status == status || !parent)
goto out_set;
@@ -1133,19 +1190,33 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
spin_unlock(&parent->power.lock);
- if (error)
+ if (error) {
+ status = RPM_SUSPENDED;
goto out;
+ }
}
out_set:
__update_runtime_status(dev, status);
- dev->power.runtime_error = 0;
+ if (!error)
+ dev->power.runtime_error = 0;
+
out:
- spin_unlock_irqrestore(&dev->power.lock, flags);
+ spin_unlock_irq(&dev->power.lock);
if (notify_parent)
pm_request_idle(parent);
+ if (status == RPM_SUSPENDED) {
+ int idx = device_links_read_lock();
+
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
+ pm_runtime_enable(dev);
+
return error;
}
EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
@@ -1273,6 +1344,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
pm_runtime_put_noidle(dev);
}
+ /* Update time accounting before disabling PM-runtime. */
+ update_pm_runtime_accounting(dev);
+
if (!dev->power.disable_depth++)
__pm_runtime_barrier(dev);
@@ -1291,10 +1365,15 @@ void pm_runtime_enable(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
- if (dev->power.disable_depth > 0)
+ if (dev->power.disable_depth > 0) {
dev->power.disable_depth--;
- else
+
+ /* About to enable runtime pm, set accounting_timestamp to now */
+ if (!dev->power.disable_depth)
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+ } else {
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ }
WARN(!dev->power.disable_depth &&
dev->power.runtime_status == RPM_SUSPENDED &&
@@ -1491,7 +1570,6 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.accounting_timestamp = jiffies;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@@ -1536,7 +1614,7 @@ void pm_runtime_remove(struct device *dev)
*
* Check links from this device to any consumers and if any of them have active
* runtime PM references to the device, drop the usage counter of the device
- * (once per link).
+ * (as many times as needed).
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*
@@ -1558,10 +1636,8 @@ void pm_runtime_clean_up_links(struct device *dev)
if (link->flags & DL_FLAG_STATELESS)
continue;
- if (link->rpm_active) {
+ while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put_noidle(dev);
- link->rpm_active = false;
- }
}
device_links_read_unlock(idx);
@@ -1579,8 +1655,11 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->flags & DL_FLAG_PM_RUNTIME)
+ if (link->flags & DL_FLAG_PM_RUNTIME) {
+ link->supplier_preactivated = true;
+ refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ }
device_links_read_unlock(idx);
}
@@ -1597,8 +1676,11 @@ void pm_runtime_put_suppliers(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_put(link->supplier);
+ if (link->supplier_preactivated) {
+ link->supplier_preactivated = false;
+ if (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put(link->supplier);
+ }
device_links_read_unlock(idx);
}
@@ -1612,8 +1694,6 @@ void pm_runtime_new_link(struct device *dev)
void pm_runtime_drop_link(struct device *dev)
{
- rpm_put_suppliers(dev);
-
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
dev->power.links_count--;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d713738ce796..c6bf76124184 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
+ u64 tmp;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
+ tmp = dev->power.active_time;
+ do_div(tmp, NSEC_PER_MSEC);
+ ret = sprintf(buf, "%llu\n", tmp);
spin_unlock_irq(&dev->power.lock);
return ret;
}
@@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
+ u64 tmp;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n",
- jiffies_to_msecs(dev->power.suspended_jiffies));
+ tmp = dev->power.suspended_time;
+ do_div(tmp, NSEC_PER_MSEC);
+ ret = sprintf(buf, "%llu\n", tmp);
spin_unlock_irq(&dev->power.lock);
return ret;
}
@@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev)
{
int rc;
+ /* No need to create PM sysfs if explicitly disabled. */
+ if (device_pm_not_required(dev))
+ return 0;
+
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
if (rc)
return rc;
@@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ if (device_pm_not_required(dev))
+ return;
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5fa1898755a3..f1fee72ed970 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
- * pm_wakeup_event - Notify the PM core of a wakeup event.
+ * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 2e8f0144f9ab..9cbb4b0cd01b 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -33,7 +33,7 @@ struct regcache_rbtree_node {
unsigned int blklen;
/* the actual rbtree node holding this block */
struct rb_node node;
-} __attribute__ ((packed));
+};
struct regcache_rbtree_ctx {
struct rb_root root;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1bd1145ad8b5..5059748afd4c 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -35,6 +35,7 @@ struct regmap_irq_chip_data {
int wake_count;
void *status_reg_buf;
+ unsigned int *main_status_buf;
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
@@ -108,6 +109,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
+ if (!d->chip->mask_base)
+ continue;
+
reg = d->chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (d->chip->mask_invert) {
@@ -258,7 +262,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
const struct regmap_irq_type *t = &irq_data->type;
if ((t->types_supported & type) != type)
- return -ENOTSUPP;
+ return 0;
reg = t->type_reg_offset / map->reg_stride;
@@ -326,6 +330,33 @@ static const struct irq_chip regmap_irq_chip = {
.irq_set_wake = regmap_irq_set_wake,
};
+static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
+ unsigned int b)
+{
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ struct regmap_irq_sub_irq_map *subreg;
+ int i, ret = 0;
+
+ if (!chip->sub_reg_offsets) {
+ /* Assume linear mapping */
+ ret = regmap_read(map, chip->status_base +
+ (b * map->reg_stride * data->irq_reg_stride),
+ &data->status_buf[b]);
+ } else {
+ subreg = &chip->sub_reg_offsets[b];
+ for (i = 0; i < subreg->num_regs; i++) {
+ unsigned int offset = subreg->offset[i];
+
+ ret = regmap_read(map, chip->status_base + offset,
+ &data->status_buf[offset]);
+ if (ret)
+ break;
+ }
+ }
+ return ret;
+}
+
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
struct regmap_irq_chip_data *data = d;
@@ -349,11 +380,65 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
/*
- * Read in the statuses, using a single bulk read if possible
- * in order to reduce the I/O overheads.
+ * Read only registers with active IRQs if the chip has 'main status
+ * register'. Else read in the statuses, using a single bulk read if
+ * possible in order to reduce the I/O overheads.
*/
- if (!map->use_single_read && map->reg_stride == 1 &&
- data->irq_reg_stride == 1) {
+
+ if (chip->num_main_regs) {
+ unsigned int max_main_bits;
+ unsigned long size;
+
+ size = chip->num_regs * sizeof(unsigned int);
+
+ max_main_bits = (chip->num_main_status_bits) ?
+ chip->num_main_status_bits : chip->num_regs;
+ /* Clear the status buf as we don't read all status regs */
+ memset(data->status_buf, 0, size);
+
+ /* We could support bulk read for main status registers
+ * but I don't expect to see devices with really many main
+ * status registers so let's only support single reads for the
+ * sake of simplicity. and add bulk reads only if needed
+ */
+ for (i = 0; i < chip->num_main_regs; i++) {
+ ret = regmap_read(map, chip->main_status +
+ (i * map->reg_stride
+ * data->irq_reg_stride),
+ &data->main_status_buf[i]);
+ if (ret) {
+ dev_err(map->dev,
+ "Failed to read IRQ status %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ /* Read sub registers with active IRQs */
+ for (i = 0; i < chip->num_main_regs; i++) {
+ unsigned int b;
+ const unsigned long mreg = data->main_status_buf[i];
+
+ for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
+ if (i * map->format.val_bytes * 8 + b >
+ max_main_bits)
+ break;
+ ret = read_sub_irq_data(data, b);
+
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to read IRQ status %d\n",
+ ret);
+ if (chip->runtime_pm)
+ pm_runtime_put(map->dev);
+ goto exit;
+ }
+ }
+
+ }
+ } else if (!map->use_single_read && map->reg_stride == 1 &&
+ data->irq_reg_stride == 1) {
+
u8 *buf8 = data->status_reg_buf;
u16 *buf16 = data->status_reg_buf;
u32 *buf32 = data->status_reg_buf;
@@ -518,6 +603,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d)
return -ENOMEM;
+ if (chip->num_main_regs) {
+ d->main_status_buf = kcalloc(chip->num_main_regs,
+ sizeof(unsigned int),
+ GFP_KERNEL);
+
+ if (!d->main_status_buf)
+ goto err_alloc;
+ }
+
d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
GFP_KERNEL);
if (!d->status_buf)
@@ -588,6 +682,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
+ if (!chip->mask_base)
+ continue;
+
reg = chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
if (chip->mask_invert)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 89ad8dee6ad5..1fad9291f6aa 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -499,6 +499,28 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
return &c->fwnode;
}
+static struct fwnode_handle *
+software_node_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
+{
+ struct software_node *swnode = to_software_node(fwnode);
+ const struct property_entry *prop;
+ struct software_node *child;
+
+ if (!swnode || list_empty(&swnode->children))
+ return NULL;
+
+ list_for_each_entry(child, &swnode->children, entry) {
+ prop = property_entry_get(child->properties, "name");
+ if (!prop)
+ continue;
+ if (!strcmp(childname, prop->value.str)) {
+ kobject_get(&child->kobj);
+ return &child->fwnode;
+ }
+ }
+ return NULL;
+}
static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
@@ -508,6 +530,7 @@ static const struct fwnode_operations software_node_ops = {
.property_read_string_array = software_node_read_string_array,
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
+ .get_named_child_node = software_node_get_named_child_node,
};
/* -------------------------------------------------------------------------- */
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index e7f145d662f0..f4b1d8e54daf 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -11,16 +11,47 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/time.h>
+#include <linux/numa.h>
+#include <linux/nodemask.h>
+#include <linux/topology.h>
#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
+static atomic_t warnings, errors, timeout, async_completed;
+
static int test_probe(struct platform_device *pdev)
{
- dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
- TEST_PROBE_DELAY);
- msleep(TEST_PROBE_DELAY);
- dev_info(&pdev->dev, "done sleeping\n");
+ struct device *dev = &pdev->dev;
+
+ /*
+ * Determine if we have hit the "timeout" limit for the test if we
+ * have then report it as an error, otherwise we wil sleep for the
+ * required amount of time and then report completion.
+ */
+ if (atomic_read(&timeout)) {
+ dev_err(dev, "async probe took too long\n");
+ atomic_inc(&errors);
+ } else {
+ dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n",
+ TEST_PROBE_DELAY);
+ msleep(TEST_PROBE_DELAY);
+ dev_dbg(&pdev->dev, "done sleeping\n");
+ }
+
+ /*
+ * Report NUMA mismatch if device node is set and we are not
+ * performing an async init on that node.
+ */
+ if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
+ if (dev_to_node(dev) != numa_node_id()) {
+ dev_warn(dev, "NUMA node mismatch %d != %d\n",
+ dev_to_node(dev), numa_node_id());
+ atomic_inc(&warnings);
+ }
+
+ atomic_inc(&async_completed);
+ }
return 0;
}
@@ -41,31 +72,64 @@ static struct platform_driver sync_driver = {
.probe = test_probe,
};
-static struct platform_device *async_dev_1, *async_dev_2;
-static struct platform_device *sync_dev_1;
+static struct platform_device *async_dev[NR_CPUS * 2];
+static struct platform_device *sync_dev[2];
+
+static struct platform_device *
+test_platform_device_register_node(char *name, int id, int nid)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_alloc(name, id);
+ if (!pdev)
+ return NULL;
+
+ if (nid != NUMA_NO_NODE)
+ set_dev_node(&pdev->dev, nid);
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
+
+ return pdev;
+
+}
static int __init test_async_probe_init(void)
{
- ktime_t calltime, delta;
+ struct platform_device **pdev = NULL;
+ int async_id = 0, sync_id = 0;
unsigned long long duration;
- int error;
+ ktime_t calltime, delta;
+ int err, nid, cpu;
+
+ pr_info("registering first set of asynchronous devices...\n");
- pr_info("registering first asynchronous device...\n");
+ for_each_online_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+ pdev = &async_dev[async_id];
+ *pdev = test_platform_device_register_node("test_async_driver",
+ async_id,
+ nid);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create async_dev: %d\n", err);
+ goto err_unregister_async_devs;
+ }
- async_dev_1 = platform_device_register_simple("test_async_driver", 1,
- NULL, 0);
- if (IS_ERR(async_dev_1)) {
- error = PTR_ERR(async_dev_1);
- pr_err("failed to create async_dev_1: %d\n", error);
- return error;
+ async_id++;
}
pr_info("registering asynchronous driver...\n");
calltime = ktime_get();
- error = platform_driver_register(&async_driver);
- if (error) {
- pr_err("Failed to register async_driver: %d\n", error);
- goto err_unregister_async_dev_1;
+ err = platform_driver_register(&async_driver);
+ if (err) {
+ pr_err("Failed to register async_driver: %d\n", err);
+ goto err_unregister_async_devs;
}
delta = ktime_sub(ktime_get(), calltime);
@@ -73,86 +137,163 @@ static int __init test_async_probe_init(void)
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
- error = -ETIMEDOUT;
+ err = -ETIMEDOUT;
goto err_unregister_async_driver;
}
- pr_info("registering second asynchronous device...\n");
+ pr_info("registering second set of asynchronous devices...\n");
calltime = ktime_get();
- async_dev_2 = platform_device_register_simple("test_async_driver", 2,
- NULL, 0);
- if (IS_ERR(async_dev_2)) {
- error = PTR_ERR(async_dev_2);
- pr_err("failed to create async_dev_2: %d\n", error);
- goto err_unregister_async_driver;
+ for_each_online_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+ pdev = &sync_dev[sync_id];
+
+ *pdev = test_platform_device_register_node("test_async_driver",
+ async_id,
+ nid);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create async_dev: %d\n", err);
+ goto err_unregister_async_driver;
+ }
+
+ async_id++;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
- pr_info("registration took %lld msecs\n", duration);
+ dev_info(&(*pdev)->dev,
+ "registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
- pr_err("test failed: probe took too long\n");
- error = -ETIMEDOUT;
- goto err_unregister_async_dev_2;
+ dev_err(&(*pdev)->dev,
+ "test failed: probe took too long\n");
+ err = -ETIMEDOUT;
+ goto err_unregister_async_driver;
}
- pr_info("registering synchronous driver...\n");
- error = platform_driver_register(&sync_driver);
- if (error) {
- pr_err("Failed to register async_driver: %d\n", error);
- goto err_unregister_async_dev_2;
+ pr_info("registering first synchronous device...\n");
+ nid = cpu_to_node(cpu);
+ pdev = &sync_dev[sync_id];
+
+ *pdev = test_platform_device_register_node("test_sync_driver",
+ sync_id,
+ NUMA_NO_NODE);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create sync_dev: %d\n", err);
+ goto err_unregister_async_driver;
}
- pr_info("registering synchronous device...\n");
+ sync_id++;
+
+ pr_info("registering synchronous driver...\n");
calltime = ktime_get();
- sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
- NULL, 0);
- if (IS_ERR(sync_dev_1)) {
- error = PTR_ERR(sync_dev_1);
- pr_err("failed to create sync_dev_1: %d\n", error);
- goto err_unregister_sync_driver;
+ err = platform_driver_register(&sync_driver);
+ if (err) {
+ pr_err("Failed to register async_driver: %d\n", err);
+ goto err_unregister_sync_devs;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
- pr_err("test failed: probe was too quick\n");
- error = -ETIMEDOUT;
- goto err_unregister_sync_dev_1;
+ dev_err(&(*pdev)->dev,
+ "test failed: probe was too quick\n");
+ err = -ETIMEDOUT;
+ goto err_unregister_sync_driver;
}
- pr_info("completed successfully");
+ pr_info("registering second synchronous device...\n");
+ pdev = &sync_dev[sync_id];
+ calltime = ktime_get();
- return 0;
+ *pdev = test_platform_device_register_node("test_sync_driver",
+ sync_id,
+ NUMA_NO_NODE);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create sync_dev: %d\n", err);
+ goto err_unregister_sync_driver;
+ }
-err_unregister_sync_dev_1:
- platform_device_unregister(sync_dev_1);
+ sync_id++;
-err_unregister_sync_driver:
- platform_driver_unregister(&sync_driver);
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ dev_info(&(*pdev)->dev,
+ "registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ dev_err(&(*pdev)->dev,
+ "test failed: probe was too quick\n");
+ err = -ETIMEDOUT;
+ goto err_unregister_sync_driver;
+ }
-err_unregister_async_dev_2:
- platform_device_unregister(async_dev_2);
+ /*
+ * The async events should have completed while we were taking care
+ * of the synchronous events. We will now terminate any outstanding
+ * asynchronous probe calls remaining by forcing timeout and remove
+ * the driver before we return which should force the flush of the
+ * pending asynchronous probe calls.
+ *
+ * Otherwise if they completed without errors or warnings then
+ * report successful completion.
+ */
+ if (atomic_read(&async_completed) != async_id) {
+ pr_err("async events still pending, forcing timeout\n");
+ atomic_inc(&timeout);
+ err = -ETIMEDOUT;
+ } else if (!atomic_read(&errors) && !atomic_read(&warnings)) {
+ pr_info("completed successfully\n");
+ return 0;
+ }
+err_unregister_sync_driver:
+ platform_driver_unregister(&sync_driver);
+err_unregister_sync_devs:
+ while (sync_id--)
+ platform_device_unregister(sync_dev[sync_id]);
err_unregister_async_driver:
platform_driver_unregister(&async_driver);
+err_unregister_async_devs:
+ while (async_id--)
+ platform_device_unregister(async_dev[async_id]);
+
+ /*
+ * If err is already set then count that as an additional error for
+ * the test. Otherwise we will report an invalid argument error and
+ * not count that as we should have reached here as a result of
+ * errors or warnings being reported by the probe routine.
+ */
+ if (err)
+ atomic_inc(&errors);
+ else
+ err = -EINVAL;
-err_unregister_async_dev_1:
- platform_device_unregister(async_dev_1);
+ pr_err("Test failed with %d errors and %d warnings\n",
+ atomic_read(&errors), atomic_read(&warnings));
- return error;
+ return err;
}
module_init(test_async_probe_init);
static void __exit test_async_probe_exit(void)
{
+ int id = 2;
+
platform_driver_unregister(&async_driver);
platform_driver_unregister(&sync_driver);
- platform_device_unregister(async_dev_1);
- platform_device_unregister(async_dev_2);
- platform_device_unregister(sync_dev_1);
+
+ while (id--)
+ platform_device_unregister(sync_dev[id]);
+
+ id = NR_CPUS * 2;
+ while (id--)
+ platform_device_unregister(async_dev[id]);
}
module_exit(test_async_probe_exit);
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index a4aac370f21f..6eded32d1aac 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -10,13 +10,13 @@
#include <linux/delay.h>
#define bcma_err(bus, fmt, ...) \
- pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_err((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
- pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_warn((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_info(bus, fmt, ...) \
- pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_info((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_debug(bus, fmt, ...) \
- pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_dbg((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
struct bcma_bus;
@@ -33,7 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus);
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
-struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
/* scan.c */
void bcma_detect_chip(struct bcma_bus *bus);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 2c0ffb77d738..a5df3d111334 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -183,7 +183,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->owner = THIS_MODULE;
- chip->parent = bcma_bus_get_host_dev(bus);
+ chip->parent = bus->dev;
#if IS_BUILTIN(CONFIG_OF)
chip->of_node = cc->core->dev.of_node;
#endif
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 63410ecfe640..f52239feb4cb 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -196,6 +196,8 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
goto err_pci_release_regions;
}
+ bus->dev = &dev->dev;
+
/* Map MMIO */
err = -ENOMEM;
bus->mmio = pci_iomap(dev, 0, ~0UL);
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 2dce34789329..c8073b509a2b 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -179,7 +179,6 @@ int __init bcma_host_soc_register(struct bcma_soc *soc)
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
- bus->host_pdev = NULL;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
@@ -213,6 +212,8 @@ static int bcma_host_soc_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
+ bus->dev = dev;
+
/* Map MMIO */
bus->mmio = of_iomap(np, 0);
if (!bus->mmio)
@@ -221,7 +222,6 @@ static int bcma_host_soc_probe(struct platform_device *pdev)
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
- bus->host_pdev = pdev;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index fc1f4acdd189..6535614a7dc1 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -223,8 +223,8 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num)
mips_irq = bcma_core_mips_irq(core);
return mips_irq <= 4 ? mips_irq + 2 : 0;
}
- if (bus->host_pdev)
- return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
+ if (bus->dev)
+ return bcma_of_get_irq(bus->dev, core, num);
return 0;
case BCMA_HOSTTYPE_SDIO:
return 0;
@@ -239,18 +239,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
- core->dev.parent = bcma_bus_get_host_dev(bus);
- if (core->dev.parent)
- bcma_of_fill_device(core->dev.parent, core);
+ core->dev.parent = bus->dev;
+ if (bus->dev)
+ bcma_of_fill_device(bus->dev, core);
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
- core->dma_dev = &bus->host_pci->dev;
+ core->dma_dev = bus->dev;
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
- if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
- core->dma_dev = &bus->host_pdev->dev;
+ if (IS_ENABLED(CONFIG_OF) && bus->dev) {
+ core->dma_dev = bus->dev;
} else {
core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
@@ -261,28 +261,6 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
}
}
-struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
-{
- switch (bus->hosttype) {
- case BCMA_HOSTTYPE_PCI:
- if (bus->host_pci)
- return &bus->host_pci->dev;
- else
- return NULL;
- case BCMA_HOSTTYPE_SOC:
- if (bus->host_pdev)
- return &bus->host_pdev->dev;
- else
- return NULL;
- case BCMA_HOSTTYPE_SDIO:
- if (bus->host_sdio)
- return &bus->host_sdio->dev;
- else
- return NULL;
- }
- return NULL;
-}
-
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
@@ -402,7 +380,6 @@ int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
- struct device *dev;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
@@ -425,10 +402,8 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
- dev = bcma_bus_get_host_dev(bus);
- if (dev) {
- of_platform_default_populate(dev->of_node, NULL, dev);
- }
+ if (bus->dev)
+ of_platform_default_populate(bus->dev->of_node, NULL, bus->dev);
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 6f2856c6d0f2..55481b40df9a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
if (lock_fdc(drive))
- return -EINTR;
+ return 0;
poll_drive(false, 0);
process_fd_request();
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b8a0720d3653..cf5538942834 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1190,6 +1190,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
goto out_unlock;
}
+ if (lo->lo_offset != info->lo_offset ||
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ sync_blockdev(lo->lo_device);
+ kill_bdev(lo->lo_device);
+ }
+
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
@@ -1218,6 +1224,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
+ /* kill_bdev should have truncated all the pages */
+ if (lo->lo_device->bd_inode->i_mapping->nrpages) {
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
+ goto out_unfreeze;
+ }
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto out_unfreeze;
@@ -1443,22 +1457,39 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
+ int err = 0;
+
if (lo->lo_state != Lo_bound)
return -ENXIO;
if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
return -EINVAL;
+ if (lo->lo_queue->limits.logical_block_size != arg) {
+ sync_blockdev(lo->lo_device);
+ kill_bdev(lo->lo_device);
+ }
+
blk_mq_freeze_queue(lo->lo_queue);
+ /* kill_bdev should have truncated all the pages */
+ if (lo->lo_queue->limits.logical_block_size != arg &&
+ lo->lo_device->bd_inode->i_mapping->nrpages) {
+ err = -EAGAIN;
+ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+ __func__, lo->lo_number, lo->lo_file_name,
+ lo->lo_device->bd_inode->i_mapping->nrpages);
+ goto out_unfreeze;
+ }
+
blk_queue_logical_block_size(lo->lo_queue, arg);
blk_queue_physical_block_size(lo->lo_queue, arg);
blk_queue_io_min(lo->lo_queue, arg);
loop_update_dio(lo);
-
+out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
- return 0;
+ return err;
}
static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 88e8440e75c3..2f3ee4d6af82 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -40,6 +40,7 @@
#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/prefetch.h>
+#include <linux/numa.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -4018,9 +4019,9 @@ static int get_least_used_cpu_on_node(int node)
/* Helper for selecting a node in round robin mode */
static inline int mtip_get_next_rr_node(void)
{
- static int next_node = -1;
+ static int next_node = NUMA_NO_NODE;
- if (next_node == -1) {
+ if (next_node == NUMA_NO_NODE) {
next_node = first_online_node;
return next_node;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08696f5f00bb..7c9a949e876b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
set_capacity(nbd->disk, config->bytesize >> 9);
if (bdev) {
- if (bdev->bd_disk)
+ if (bdev->bd_disk) {
bd_set_size(bdev, config->bytesize);
- else
+ set_blocksize(bdev, config->blksize);
+ } else
bdev->bd_invalidated = 1;
bdput(bdev);
}
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index b3df2793e7cd..34b22d6523ba 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -97,6 +97,7 @@ void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
+ pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n");
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e5140bbf241..282e2e82d849 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -428,14 +428,13 @@ static bool single_major = true;
module_param(single_major, bool, 0444);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
-static ssize_t rbd_add(struct bus_type *bus, const char *buf,
- size_t count);
-static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
- size_t count);
-static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
- size_t count);
-static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
- size_t count);
+static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
+static ssize_t remove_store(struct bus_type *bus, const char *buf,
+ size_t count);
+static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
+ size_t count);
+static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
+ size_t count);
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
static int rbd_dev_id_to_minor(int dev_id)
@@ -464,16 +463,16 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
return is_lock_owner;
}
-static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
+static ssize_t supported_features_show(struct bus_type *bus, char *buf)
{
return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
}
-static BUS_ATTR(add, 0200, NULL, rbd_add);
-static BUS_ATTR(remove, 0200, NULL, rbd_remove);
-static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major);
-static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major);
-static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL);
+static BUS_ATTR_WO(add);
+static BUS_ATTR_WO(remove);
+static BUS_ATTR_WO(add_single_major);
+static BUS_ATTR_WO(remove_single_major);
+static BUS_ATTR_RO(supported_features);
static struct attribute *rbd_bus_attrs[] = {
&bus_attr_add.attr,
@@ -5934,9 +5933,7 @@ err_out_args:
goto out;
}
-static ssize_t rbd_add(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
{
if (single_major)
return -EINVAL;
@@ -5944,9 +5941,8 @@ static ssize_t rbd_add(struct bus_type *bus,
return do_rbd_add(bus, buf, count);
}
-static ssize_t rbd_add_single_major(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
+ size_t count)
{
return do_rbd_add(bus, buf, count);
}
@@ -5986,7 +5982,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
struct list_head *tmp;
int dev_id;
char opt_buf[6];
- bool already = false;
bool force = false;
int ret;
@@ -6019,13 +6014,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
spin_lock_irq(&rbd_dev->lock);
if (rbd_dev->open_count && !force)
ret = -EBUSY;
- else
- already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
- &rbd_dev->flags);
+ else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
+ &rbd_dev->flags))
+ ret = -EINPROGRESS;
spin_unlock_irq(&rbd_dev->lock);
}
spin_unlock(&rbd_dev_list_lock);
- if (ret < 0 || already)
+ if (ret)
return ret;
if (force) {
@@ -6050,9 +6045,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
return count;
}
-static ssize_t rbd_remove(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
{
if (single_major)
return -EINVAL;
@@ -6060,9 +6053,8 @@ static ssize_t rbd_remove(struct bus_type *bus,
return do_rbd_remove(bus, buf, count);
}
-static ssize_t rbd_remove_single_major(struct bus_type *bus,
- const char *buf,
- size_t count)
+static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
+ size_t count)
{
return do_rbd_remove(bus, buf, count);
}
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a10d5736d8f7..ab893a7571a2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
"comp pci_alloc, total bytes %zd entries %d\n",
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
- &skdev->cq_dma_address, GFP_KERNEL);
+ skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
+ &skdev->cq_dma_address, GFP_KERNEL);
if (skcomp == NULL) {
rc = -ENOMEM;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 33c5cc879f24..04ca65912638 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -316,11 +316,9 @@ static ssize_t idle_store(struct device *dev,
* See the comment in writeback_store.
*/
zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB))
- goto next;
- zram_set_flag(zram, index, ZRAM_IDLE);
-next:
+ if (zram_allocated(zram, index) &&
+ !zram_test_flag(zram, index, ZRAM_UNDER_WB))
+ zram_set_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
}
@@ -330,6 +328,41 @@ next:
}
#ifdef CONFIG_ZRAM_WRITEBACK
+static ssize_t writeback_limit_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 val;
+ ssize_t ret = -EINVAL;
+
+ if (kstrtoull(buf, 10, &val))
+ return ret;
+
+ down_read(&zram->init_lock);
+ spin_lock(&zram->wb_limit_lock);
+ zram->wb_limit_enable = val;
+ spin_unlock(&zram->wb_limit_lock);
+ up_read(&zram->init_lock);
+ ret = len;
+
+ return ret;
+}
+
+static ssize_t writeback_limit_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ spin_lock(&zram->wb_limit_lock);
+ val = zram->wb_limit_enable;
+ spin_unlock(&zram->wb_limit_lock);
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
static ssize_t writeback_limit_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
@@ -341,9 +374,9 @@ static ssize_t writeback_limit_store(struct device *dev,
return ret;
down_read(&zram->init_lock);
- atomic64_set(&zram->stats.bd_wb_limit, val);
- if (val == 0)
- zram->stop_writeback = false;
+ spin_lock(&zram->wb_limit_lock);
+ zram->bd_wb_limit = val;
+ spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
ret = len;
@@ -357,7 +390,9 @@ static ssize_t writeback_limit_show(struct device *dev,
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- val = atomic64_read(&zram->stats.bd_wb_limit);
+ spin_lock(&zram->wb_limit_lock);
+ val = zram->bd_wb_limit;
+ spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
@@ -588,8 +623,8 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
return 1;
}
-#define HUGE_WRITEBACK 0x1
-#define IDLE_WRITEBACK 0x2
+#define HUGE_WRITEBACK 1
+#define IDLE_WRITEBACK 2
static ssize_t writeback_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
@@ -602,7 +637,7 @@ static ssize_t writeback_store(struct device *dev,
struct page *page;
ssize_t ret, sz;
char mode_buf[8];
- unsigned long mode = -1UL;
+ int mode = -1;
unsigned long blk_idx = 0;
sz = strscpy(mode_buf, buf, sizeof(mode_buf));
@@ -618,7 +653,7 @@ static ssize_t writeback_store(struct device *dev,
else if (!strcmp(mode_buf, "huge"))
mode = HUGE_WRITEBACK;
- if (mode == -1UL)
+ if (mode == -1)
return -EINVAL;
down_read(&zram->init_lock);
@@ -645,10 +680,13 @@ static ssize_t writeback_store(struct device *dev,
bvec.bv_len = PAGE_SIZE;
bvec.bv_offset = 0;
- if (zram->stop_writeback) {
+ spin_lock(&zram->wb_limit_lock);
+ if (zram->wb_limit_enable && !zram->bd_wb_limit) {
+ spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
+ spin_unlock(&zram->wb_limit_lock);
if (!blk_idx) {
blk_idx = alloc_block_bdev(zram);
@@ -667,10 +705,11 @@ static ssize_t writeback_store(struct device *dev,
zram_test_flag(zram, index, ZRAM_UNDER_WB))
goto next;
- if ((mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE)) &&
- (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE)))
+ if (mode == IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode == HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
goto next;
/*
* Clearing ZRAM_UNDER_WB is duty of caller.
@@ -732,11 +771,10 @@ static ssize_t writeback_store(struct device *dev,
zram_set_element(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
- if (atomic64_add_unless(&zram->stats.bd_wb_limit,
- -1 << (PAGE_SHIFT - 12), 0)) {
- if (atomic64_read(&zram->stats.bd_wb_limit) == 0)
- zram->stop_writeback = true;
- }
+ spin_lock(&zram->wb_limit_lock);
+ if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+ zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
+ spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
}
@@ -1812,6 +1850,7 @@ static DEVICE_ATTR_RW(comp_algorithm);
static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
+static DEVICE_ATTR_RW(writeback_limit_enable);
#endif
static struct attribute *zram_disk_attrs[] = {
@@ -1828,6 +1867,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_backing_dev.attr,
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
+ &dev_attr_writeback_limit_enable.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
@@ -1867,7 +1907,9 @@ static int zram_add(void)
device_id = ret;
init_rwsem(&zram->init_lock);
-
+#ifdef CONFIG_ZRAM_WRITEBACK
+ spin_lock_init(&zram->wb_limit_lock);
+#endif
queue = blk_alloc_queue(GFP_KERNEL);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 4bd3afd15e83..f2fd46daa760 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -86,7 +86,6 @@ struct zram_stats {
atomic64_t bd_count; /* no. of pages in backing device */
atomic64_t bd_reads; /* no. of reads from backing device */
atomic64_t bd_writes; /* no. of writes from backing device */
- atomic64_t bd_wb_limit; /* writeback limit of backing device */
#endif
};
@@ -114,8 +113,10 @@ struct zram {
*/
bool claim; /* Protected by bdev->bd_mutex */
struct file *backing_dev;
- bool stop_writeback;
#ifdef CONFIG_ZRAM_WRITEBACK
+ spinlock_t wb_limit_lock;
+ bool wb_limit_enable;
+ u64 bd_wb_limit;
struct block_device *bdev;
unsigned int old_block_size;
unsigned long *bitmap;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 845b0314ce3a..7b2e76e7f22f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -336,7 +336,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8997.
+ Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8997.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -350,7 +350,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8997
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index f0454541e5fd..fb7729779166 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -24,11 +24,9 @@
#include <linux/slab.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index fb3d03928460..047b75ce1deb 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -62,13 +62,14 @@ static const struct of_device_id btmrvl_sdio_of_match_table[] = {
static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv)
{
struct btmrvl_sdio_card *card = priv;
+ struct device *dev = &card->func->dev;
struct btmrvl_plt_wake_cfg *cfg = card->plt_wake_cfg;
- pr_info("%s: wake by bt\n", __func__);
+ dev_info(dev, "wake by bt\n");
cfg->wake_by_bt = true;
disable_irq_nosync(irq);
- pm_wakeup_event(&card->func->dev, 0);
+ pm_wakeup_event(dev, 0);
pm_system_wakeup();
return IRQ_HANDLED;
@@ -87,7 +88,7 @@ static int btmrvl_sdio_probe_of(struct device *dev,
if (!dev->of_node ||
!of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) {
- pr_err("sdio platform data not available\n");
+ dev_info(dev, "sdio device tree data not available\n");
return -1;
}
@@ -211,6 +212,29 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = {
.fw_dump_end = 0xea,
};
+static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = {
+ .cfg = 0x00,
+ .host_int_mask = 0x08,
+ .host_intstatus = 0x0c,
+ .card_status = 0x5c,
+ .sq_read_base_addr_a0 = 0xf8,
+ .sq_read_base_addr_a1 = 0xf9,
+ .card_revision = 0xc8,
+ .card_fw_status0 = 0xe8,
+ .card_fw_status1 = 0xe9,
+ .card_rx_len = 0xea,
+ .card_rx_unit = 0xeb,
+ .io_port_0 = 0xe4,
+ .io_port_1 = 0xe5,
+ .io_port_2 = 0xe6,
+ .int_read_to_clear = true,
+ .host_int_rsr = 0x04,
+ .card_misc_cfg = 0xD8,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+};
+
static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
.cfg = 0x00,
.host_int_mask = 0x08,
@@ -279,6 +303,15 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.supports_fw_dump = true,
};
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8977_uapsta.bin",
+ .reg = &btmrvl_reg_8977,
+ .support_pscan_win_report = true,
+ .sd_blksz_fw_dl = 256,
+ .supports_fw_dump = true,
+};
+
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin",
@@ -307,6 +340,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8897 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
.driver_data = (unsigned long)&btmrvl_sdio_sd8897 },
+ /* Marvell SD8977 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146),
+ .driver_data = (unsigned long)&btmrvl_sdio_sd8977 },
/* Marvell SD8997 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
@@ -1760,4 +1796,5 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 4593baff2bc9..b0b680dd69f4 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -12,10 +12,15 @@
#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
@@ -24,20 +29,38 @@
#include "h4_recv.h"
-#define VERSION "0.1"
+#define VERSION "0.2"
#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
+#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define MTK_STP_TLR_SIZE 2
#define BTMTKUART_TX_STATE_ACTIVE 1
#define BTMTKUART_TX_STATE_WAKEUP 2
#define BTMTKUART_TX_WAIT_VND_EVT 3
+#define BTMTKUART_REQUIRED_WAKEUP 4
+
+#define BTMTKUART_FLAG_STANDALONE_HW BIT(0)
enum {
MTK_WMT_PATCH_DWNLD = 0x1,
+ MTK_WMT_TEST = 0x2,
+ MTK_WMT_WAKEUP = 0x3,
+ MTK_WMT_HIF = 0x4,
MTK_WMT_FUNC_CTRL = 0x6,
- MTK_WMT_RST = 0x7
+ MTK_WMT_RST = 0x7,
+ MTK_WMT_SEMAPHORE = 0x17,
+};
+
+enum {
+ BTMTK_WMT_INVALID,
+ BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_DONE,
+ BTMTK_WMT_ON_UNDONE,
+ BTMTK_WMT_ON_DONE,
+ BTMTK_WMT_ON_PROGRESS,
};
struct mtk_stp_hdr {
@@ -46,6 +69,11 @@ struct mtk_stp_hdr {
u8 cs;
} __packed;
+struct btmtkuart_data {
+ unsigned int flags;
+ const char *fwname;
+};
+
struct mtk_wmt_hdr {
u8 dir;
u8 op;
@@ -58,41 +86,85 @@ struct mtk_hci_wmt_cmd {
u8 data[256];
} __packed;
+struct btmtk_hci_wmt_evt {
+ struct hci_event_hdr hhdr;
+ struct mtk_wmt_hdr whdr;
+} __packed;
+
+struct btmtk_hci_wmt_evt_funcc {
+ struct btmtk_hci_wmt_evt hwhdr;
+ __be16 status;
+} __packed;
+
+struct btmtk_tci_sleep {
+ u8 mode;
+ __le16 duration;
+ __le16 host_duration;
+ u8 host_wakeup_pin;
+ u8 time_compensation;
+} __packed;
+
+struct btmtk_hci_wmt_params {
+ u8 op;
+ u8 flag;
+ u16 dlen;
+ const void *data;
+ u32 *status;
+};
+
struct btmtkuart_dev {
struct hci_dev *hdev;
struct serdev_device *serdev;
struct clk *clk;
+ struct regulator *vcc;
+ struct gpio_desc *reset;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_runtime;
+ struct pinctrl_state *pins_boot;
+ speed_t desired_speed;
+ speed_t curr_speed;
+
struct work_struct tx_work;
unsigned long tx_state;
struct sk_buff_head txq;
struct sk_buff *rx_skb;
+ struct sk_buff *evt_skb;
u8 stp_pad[6];
u8 stp_cursor;
u16 stp_dlen;
+
+ const struct btmtkuart_data *data;
};
-static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
- const void *param)
+#define btmtkuart_is_standalone(bdev) \
+ ((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
+#define btmtkuart_is_builtin_soc(bdev) \
+ !((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
+
+static int mtk_hci_wmt_sync(struct hci_dev *hdev,
+ struct btmtk_hci_wmt_params *wmt_params)
{
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ u32 hlen, status = BTMTK_WMT_INVALID;
+ struct btmtk_hci_wmt_evt *wmt_evt;
struct mtk_hci_wmt_cmd wc;
struct mtk_wmt_hdr *hdr;
- u32 hlen;
int err;
- hlen = sizeof(*hdr) + plen;
+ hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
return -EINVAL;
hdr = (struct mtk_wmt_hdr *)&wc;
hdr->dir = 1;
- hdr->op = op;
- hdr->dlen = cpu_to_le16(plen + 1);
- hdr->flag = flag;
- memcpy(wc.data, param, plen);
+ hdr->op = wmt_params->op;
+ hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
+ hdr->flag = wmt_params->flag;
+ memcpy(wc.data, wmt_params->data, wmt_params->dlen);
set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
@@ -107,7 +179,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
* Complete as with usual HCI command flow control.
*
* After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
- * state to be cleared. The driver speicfic event receive routine
+ * state to be cleared. The driver specific event receive routine
* will clear that state and with that indicate completion of the
* WMT command.
*/
@@ -115,26 +187,63 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
+ clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
return err;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
+ clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
return -ETIMEDOUT;
}
- return 0;
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+ bt_dev_err(hdev, "Wrong op received %d expected %d",
+ wmt_evt->whdr.op, hdr->op);
+ err = -EIO;
+ goto err_free_skb;
+ }
+
+ switch (wmt_evt->whdr.op) {
+ case MTK_WMT_SEMAPHORE:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_UNDONE;
+ else
+ status = BTMTK_WMT_PATCH_DONE;
+ break;
+ case MTK_WMT_FUNC_CTRL:
+ wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
+ if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
+ status = BTMTK_WMT_ON_DONE;
+ else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
+ status = BTMTK_WMT_ON_PROGRESS;
+ else
+ status = BTMTK_WMT_ON_UNDONE;
+ break;
+ }
+
+ if (wmt_params->status)
+ *wmt_params->status = status;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+ return err;
}
-static int mtk_setup_fw(struct hci_dev *hdev)
+static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
{
+ struct btmtk_hci_wmt_params wmt_params;
const struct firmware *fw;
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
u8 flag;
- err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev);
+ err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
return err;
@@ -153,6 +262,9 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_ptr += 30;
flag = 1;
+ wmt_params.op = MTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
while (fw_size > 0) {
dlen = min_t(int, 250, fw_size);
@@ -162,18 +274,37 @@ static int mtk_setup_fw(struct hci_dev *hdev)
else if (fw_size < fw->size - 30)
flag = 2;
- err = mtk_hci_wmt_sync(hdev, MTK_WMT_PATCH_DWNLD, flag, dlen,
- fw_ptr);
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
- break;
+ goto free_fw;
}
fw_size -= dlen;
fw_ptr += dlen;
}
+ wmt_params.op = MTK_WMT_RST;
+ wmt_params.flag = 4;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ /* Activate funciton the firmware providing to */
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ goto free_fw;
+ }
+
+ /* Wait a few moments for firmware activation done */
+ usleep_range(10000, 12000);
+
free_fw:
release_firmware(fw);
return err;
@@ -192,7 +323,20 @@ static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
if (hdr->evt == 0xe4)
hdr->evt = HCI_EV_VENDOR;
+ /* When someone waits for the WMT event, the skb is being cloned
+ * and being processed the events from there then.
+ */
+ if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
+ bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
+ if (!bdev->evt_skb) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ }
+
err = hci_recv_frame(hdev, skb);
+ if (err < 0)
+ goto err_free_skb;
if (hdr->evt == HCI_EV_VENDOR) {
if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
@@ -203,6 +347,13 @@ static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
}
}
+ return 0;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+err_out:
return err;
}
@@ -404,6 +555,23 @@ static int btmtkuart_open(struct hci_dev *hdev)
goto err_open;
}
+ if (btmtkuart_is_standalone(bdev)) {
+ if (bdev->curr_speed != bdev->desired_speed)
+ err = serdev_device_set_baudrate(bdev->serdev,
+ 115200);
+ else
+ err = serdev_device_set_baudrate(bdev->serdev,
+ bdev->desired_speed);
+
+ if (err < 0) {
+ bt_dev_err(hdev, "Unable to set baudrate UART device %s",
+ dev_name(&bdev->serdev->dev));
+ goto err_serdev_close;
+ }
+
+ serdev_device_set_flow_control(bdev->serdev, false);
+ }
+
bdev->stp_cursor = 2;
bdev->stp_dlen = 0;
@@ -427,6 +595,8 @@ err_put_rpm:
pm_runtime_put_sync(dev);
err_disable_rpm:
pm_runtime_disable(dev);
+err_serdev_close:
+ serdev_device_close(bdev->serdev);
err_open:
return err;
}
@@ -465,42 +635,222 @@ static int btmtkuart_flush(struct hci_dev *hdev)
return 0;
}
+static int btmtkuart_func_query(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ int status, err;
+ u8 param = 0;
+
+ /* Query whether the function is enabled */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 4;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query function status (%d)", err);
+ return err;
+ }
+
+ return status;
+}
+
+static int btmtkuart_change_baudrate(struct hci_dev *hdev)
+{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ u32 baudrate;
+ u8 param;
+ int err;
+
+ /* Indicate the device to enter the probe state the host is
+ * ready to change a new baudrate.
+ */
+ baudrate = cpu_to_le32(bdev->desired_speed);
+ wmt_params.op = MTK_WMT_HIF;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 4;
+ wmt_params.data = &baudrate;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
+ return err;
+ }
+
+ err = serdev_device_set_baudrate(bdev->serdev,
+ bdev->desired_speed);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
+ err);
+ return err;
+ }
+
+ serdev_device_set_flow_control(bdev->serdev, false);
+
+ /* Send a dummy byte 0xff to activate the new baudrate */
+ param = 0xff;
+ err = serdev_device_write(bdev->serdev, &param, sizeof(param),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0 || err < sizeof(param))
+ return err;
+
+ serdev_device_wait_until_sent(bdev->serdev, 0);
+
+ /* Wait some time for the device changing baudrate done */
+ usleep_range(20000, 22000);
+
+ /* Test the new baudrate */
+ wmt_params.op = MTK_WMT_TEST;
+ wmt_params.flag = 7;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to test new baudrate (%d)",
+ err);
+ return err;
+ }
+
+ bdev->curr_speed = bdev->desired_speed;
+
+ return 0;
+}
+
static int btmtkuart_setup(struct hci_dev *hdev)
{
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ ktime_t calltime, delta, rettime;
+ struct btmtk_tci_sleep tci_sleep;
+ unsigned long long duration;
+ struct sk_buff *skb;
+ int err, status;
u8 param = 0x1;
- int err = 0;
+
+ calltime = ktime_get();
+
+ /* Wakeup MCUSYS is required for certain devices before we start to
+ * do any setups.
+ */
+ if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
+ wmt_params.op = MTK_WMT_WAKEUP;
+ wmt_params.flag = 3;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
+ return err;
+ }
+
+ clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
+ }
+
+ if (btmtkuart_is_standalone(bdev))
+ btmtkuart_change_baudrate(hdev);
+
+ /* Query whether the firmware is already download */
+ wmt_params.op = MTK_WMT_SEMAPHORE;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
+ return err;
+ }
+
+ if (status == BTMTK_WMT_PATCH_DONE) {
+ bt_dev_info(hdev, "Firmware already downloaded");
+ goto ignore_setup_fw;
+ }
/* Setup a firmware which the device definitely requires */
- err = mtk_setup_fw(hdev);
+ err = mtk_setup_firmware(hdev, bdev->data->fwname);
if (err < 0)
return err;
- /* Activate function the firmware providing to */
- err = mtk_hci_wmt_sync(hdev, MTK_WMT_RST, 0x4, 0, 0);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ignore_setup_fw:
+ /* Query whether the device is already enabled */
+ err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
+ status < 0 || status != BTMTK_WMT_ON_PROGRESS,
+ 2000, 5000000);
+ /* -ETIMEDOUT happens */
+ if (err < 0)
return err;
+
+ /* The other errors happen in btusb_mtk_func_query */
+ if (status < 0)
+ return status;
+
+ if (status == BTMTK_WMT_ON_DONE) {
+ bt_dev_info(hdev, "function already on");
+ goto ignore_func_on;
}
/* Enable Bluetooth protocol */
- err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
- &param);
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
return err;
}
+ignore_func_on:
+ /* Apply the low power environment setup */
+ tci_sleep.mode = 0x5;
+ tci_sleep.duration = cpu_to_le16(0x640);
+ tci_sleep.host_duration = cpu_to_le16(0x640);
+ tci_sleep.host_wakeup_pin = 0;
+ tci_sleep.time_compensation = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device setup in %llu usecs", duration);
+
return 0;
}
static int btmtkuart_shutdown(struct hci_dev *hdev)
{
+ struct btmtk_hci_wmt_params wmt_params;
u8 param = 0x0;
int err;
/* Disable the device */
- err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param),
- &param);
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
return err;
@@ -543,24 +893,82 @@ static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static int btmtkuart_parse_dt(struct serdev_device *serdev)
+{
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+ struct device_node *node = serdev->dev.of_node;
+ u32 speed = 921600;
+ int err;
+
+ if (btmtkuart_is_standalone(bdev)) {
+ of_property_read_u32(node, "current-speed", &speed);
+
+ bdev->desired_speed = speed;
+
+ bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
+ if (IS_ERR(bdev->vcc)) {
+ err = PTR_ERR(bdev->vcc);
+ return err;
+ }
+
+ bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
+ if (IS_ERR(bdev->pinctrl)) {
+ err = PTR_ERR(bdev->pinctrl);
+ return err;
+ }
+
+ bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
+ "default");
+ if (IS_ERR(bdev->pins_boot)) {
+ err = PTR_ERR(bdev->pins_boot);
+ return err;
+ }
+
+ bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
+ "runtime");
+ if (IS_ERR(bdev->pins_runtime)) {
+ err = PTR_ERR(bdev->pins_runtime);
+ return err;
+ }
+
+ bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(bdev->reset)) {
+ err = PTR_ERR(bdev->reset);
+ return err;
+ }
+ } else if (btmtkuart_is_builtin_soc(bdev)) {
+ bdev->clk = devm_clk_get(&serdev->dev, "ref");
+ if (IS_ERR(bdev->clk))
+ return PTR_ERR(bdev->clk);
+ }
+
+ return 0;
+}
+
static int btmtkuart_probe(struct serdev_device *serdev)
{
struct btmtkuart_dev *bdev;
struct hci_dev *hdev;
+ int err;
bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
if (!bdev)
return -ENOMEM;
- bdev->clk = devm_clk_get(&serdev->dev, "ref");
- if (IS_ERR(bdev->clk))
- return PTR_ERR(bdev->clk);
+ bdev->data = of_device_get_match_data(&serdev->dev);
+ if (!bdev->data)
+ return -ENODEV;
bdev->serdev = serdev;
serdev_device_set_drvdata(serdev, bdev);
serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
+ err = btmtkuart_parse_dt(serdev);
+ if (err < 0)
+ return err;
+
INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
skb_queue_head_init(&bdev->txq);
@@ -587,13 +995,54 @@ static int btmtkuart_probe(struct serdev_device *serdev)
hdev->manufacturer = 70;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
- if (hci_register_dev(hdev) < 0) {
+ if (btmtkuart_is_standalone(bdev)) {
+ /* Switch to the specific pin state for the booting requires */
+ pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
+
+ /* Power on */
+ err = regulator_enable(bdev->vcc);
+ if (err < 0)
+ return err;
+
+ /* Reset if the reset-gpios is available otherwise the board
+ * -level design should be guaranteed.
+ */
+ if (bdev->reset) {
+ gpiod_set_value_cansleep(bdev->reset, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(bdev->reset, 0);
+ }
+
+ /* Wait some time until device got ready and switch to the pin
+ * mode the device requires for UART transfers.
+ */
+ msleep(50);
+ pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
+
+ /* A standalone device doesn't depends on power domain on SoC,
+ * so mark it as no callbacks.
+ */
+ pm_runtime_no_callbacks(&serdev->dev);
+
+ set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
+ }
+
+ err = hci_register_dev(hdev);
+ if (err < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
hci_free_dev(hdev);
- return -ENODEV;
+ goto err_regulator_disable;
}
return 0;
+
+err_regulator_disable:
+ if (btmtkuart_is_standalone(bdev)) {
+ pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
+ regulator_disable(bdev->vcc);
+ }
+
+ return err;
}
static void btmtkuart_remove(struct serdev_device *serdev)
@@ -601,13 +1050,34 @@ static void btmtkuart_remove(struct serdev_device *serdev)
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
struct hci_dev *hdev = bdev->hdev;
+ if (btmtkuart_is_standalone(bdev)) {
+ pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
+ regulator_disable(bdev->vcc);
+ }
+
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
+static const struct btmtkuart_data mt7622_data = {
+ .fwname = FIRMWARE_MT7622,
+};
+
+static const struct btmtkuart_data mt7663_data = {
+ .flags = BTMTKUART_FLAG_STANDALONE_HW,
+ .fwname = FIRMWARE_MT7663,
+};
+
+static const struct btmtkuart_data mt7668_data = {
+ .flags = BTMTKUART_FLAG_STANDALONE_HW,
+ .fwname = FIRMWARE_MT7668,
+};
+
#ifdef CONFIG_OF
static const struct of_device_id mtk_of_match_table[] = {
- { .compatible = "mediatek,mt7622-bluetooth"},
+ { .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
+ { .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
+ { .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_of_match_table);
@@ -629,3 +1099,5 @@ MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7622);
+MODULE_FIRMWARE(FIRMWARE_MT7663);
+MODULE_FIRMWARE(FIRMWARE_MT7668);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index ec9e03a6b778..612268574fc7 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -391,6 +391,25 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
}
EXPORT_SYMBOL_GPL(qca_uart_setup);
+int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
+ HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca_set_bdaddr);
+
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 0c01f375fe83..c72c56ea7480 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -20,6 +20,7 @@
#define EDL_PATCH_CMD_OPCODE (0xFC00)
#define EDL_NVM_ACCESS_OPCODE (0xFC0B)
+#define EDL_WRITE_BD_ADDR_OPCODE (0xFC14)
#define EDL_PATCH_CMD_LEN (1)
#define EDL_PATCH_VER_REQ_CMD (0x19)
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
@@ -140,7 +141,7 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
-
+int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
#else
static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
@@ -159,4 +160,9 @@ static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version)
return -EOPNOTSUPP;
}
+static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 7df3eed1ef5e..e0d4c6f1d3ab 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -28,7 +28,6 @@
struct btqcomsmd {
struct hci_dev *hdev;
- bdaddr_t bdaddr;
struct rpmsg_endpoint *acl_channel;
struct rpmsg_endpoint *cmd_channel;
};
@@ -116,32 +115,17 @@ static int btqcomsmd_close(struct hci_dev *hdev)
static int btqcomsmd_setup(struct hci_dev *hdev)
{
- struct btqcomsmd *btq = hci_get_drvdata(hdev);
struct sk_buff *skb;
- int err;
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
kfree_skb(skb);
- /* Devices do not have persistent storage for BD address. If no
- * BD address has been retrieved during probe, mark the device
- * as having an invalid BD address.
- */
- if (!bacmp(&btq->bdaddr, BDADDR_ANY)) {
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
- return 0;
- }
-
- /* When setting a configured BD address fails, mark the device
- * as having an invalid BD address.
+ /* Devices do not have persistent storage for BD address. Retrieve
+ * it from the firmware node property.
*/
- err = qca_set_bdaddr_rome(hdev, &btq->bdaddr);
- if (err) {
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
- return 0;
- }
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
return 0;
}
@@ -169,15 +153,6 @@ static int btqcomsmd_probe(struct platform_device *pdev)
if (IS_ERR(btq->cmd_channel))
return PTR_ERR(btq->cmd_channel);
- /* The local-bd-address property is usually injected by the
- * bootloader which has access to the allocated BD address.
- */
- if (!of_property_read_u8_array(pdev->dev.of_node, "local-bd-address",
- (u8 *)&btq->bdaddr, sizeof(bdaddr_t))) {
- dev_info(&pdev->dev, "BD address %pMR retrieved from device-tree",
- &btq->bdaddr);
- }
-
hdev = hci_alloc_dev();
if (!hdev)
return -ENOMEM;
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 41405de27d66..c91bba00df4e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -552,10 +552,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hdev->bus);
if (!btrtl_dev->ic_info) {
- rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+ rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
lmp_subver, hci_rev, hci_ver);
- ret = -EINVAL;
- goto err_free;
+ return btrtl_dev;
}
if (btrtl_dev->ic_info->has_rom_version) {
@@ -610,6 +609,11 @@ int btrtl_download_firmware(struct hci_dev *hdev,
* standard btusb. Once that firmware is uploaded, the subver changes
* to a different value.
*/
+ if (!btrtl_dev->ic_info) {
+ rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
+ return 0;
+ }
+
switch (btrtl_dev->ic_info->lmp_subver) {
case RTL_ROM_LMP_8723A:
case RTL_ROM_LMP_3499:
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 4761499db9ee..ded198328f21 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -29,6 +29,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/suspend.h>
+#include <linux/gpio/consumer.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -439,6 +440,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_BOOTING 9
#define BTUSB_DIAG_RUNNING 10
#define BTUSB_OOB_WAKE_ENABLED 11
+#define BTUSB_HW_RESET_ACTIVE 12
struct btusb_data {
struct hci_dev *hdev;
@@ -476,6 +478,8 @@ struct btusb_data {
struct usb_endpoint_descriptor *diag_tx_ep;
struct usb_endpoint_descriptor *diag_rx_ep;
+ struct gpio_desc *reset_gpio;
+
__u8 cmdreq_type;
__u8 cmdreq;
@@ -489,8 +493,41 @@ struct btusb_data {
int (*setup_on_usb)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
+ unsigned cmd_timeout_cnt;
};
+
+static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct gpio_desc *reset_gpio = data->reset_gpio;
+
+ if (++data->cmd_timeout_cnt < 5)
+ return;
+
+ if (!reset_gpio) {
+ bt_dev_err(hdev, "No way to reset. Ignoring and continuing");
+ return;
+ }
+
+ /*
+ * Toggle the hard reset line if the platform provides one. The reset
+ * is going to yank the device off the USB and then replug. So doing
+ * once is enough. The cleanup is handled correctly on the way out
+ * (standard USB disconnect), and the new device is detected cleanly
+ * and bound to the driver again like it should be.
+ */
+ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ bt_dev_err(hdev, "last reset failed? Not resetting again");
+ return;
+ }
+
+ bt_dev_err(hdev, "Initiating HW reset via gpio");
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ msleep(100);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+}
+
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@@ -2397,6 +2434,24 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
return 0;
}
+static int btusb_shutdown_intel_new(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Send HCI Reset to the controller to stop any BT activity which
+ * were triggered. This will help to save power and maintain the
+ * sync b/w Host and controller
+ */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
static int marvell_config_oob_wake(struct hci_dev *hdev)
@@ -2862,6 +2917,8 @@ static irqreturn_t btusb_oob_wake_handler(int irq, void *priv)
static const struct of_device_id btusb_match_table[] = {
{ .compatible = "usb1286,204e" },
+ { .compatible = "usbcf3,e300" }, /* QCA6174A */
+ { .compatible = "usb4ca,301a" }, /* QCA6174A (Lite-On) */
{ }
};
MODULE_DEVICE_TABLE(of, btusb_match_table);
@@ -2915,6 +2972,7 @@ static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_endpoint_descriptor *ep_desc;
+ struct gpio_desc *reset_gpio;
struct btusb_data *data;
struct hci_dev *hdev;
unsigned ifnum_base;
@@ -3028,6 +3086,15 @@ static int btusb_probe(struct usb_interface *intf,
SET_HCIDEV_DEV(hdev, &intf->dev);
+ reset_gpio = gpiod_get_optional(&data->udev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ err = PTR_ERR(reset_gpio);
+ goto out_free_dev;
+ } else if (reset_gpio) {
+ data->reset_gpio = reset_gpio;
+ }
+
hdev->open = btusb_open;
hdev->close = btusb_close;
hdev->flush = btusb_flush;
@@ -3082,6 +3149,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_shutdown_intel;
hdev->set_diag = btintel_set_diag_mfg;
hdev->set_bdaddr = btintel_set_bdaddr;
+ hdev->cmd_timeout = btusb_intel_cmd_timeout;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
@@ -3091,9 +3159,11 @@ static int btusb_probe(struct usb_interface *intf,
hdev->manufacturer = 2;
hdev->send = btusb_send_frame_intel;
hdev->setup = btusb_setup_intel_new;
+ hdev->shutdown = btusb_shutdown_intel_new;
hdev->hw_error = btintel_hw_error;
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
+ hdev->cmd_timeout = btusb_intel_cmd_timeout;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
@@ -3226,6 +3296,8 @@ static int btusb_probe(struct usb_interface *intf,
return 0;
out_free_dev:
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
return err;
}
@@ -3269,6 +3341,9 @@ static void btusb_disconnect(struct usb_interface *intf)
if (data->oob_wake_irq)
device_init_wakeup(&data->udev->dev, false);
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
+
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
index b432651f8236..87ccaceadba7 100644
--- a/drivers/bluetooth/h4_recv.h
+++ b/drivers/bluetooth/h4_recv.h
@@ -60,12 +60,13 @@ static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
const struct h4_recv_pkt *pkts,
int pkts_count)
{
+ /* Check for error from previous call */
+ if (IS_ERR(skb))
+ skb = NULL;
+
while (count) {
int i, len;
- if (!count)
- break;
-
if (!skb) {
for (i = 0; i < pkts_count; i++) {
if (buffer[0] != (&pkts[i])->type)
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index fb97a3bf069b..5d97d77627c1 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -174,6 +174,10 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
struct hci_uart *hu = hci_get_drvdata(hdev);
u8 alignment = hu->alignment ? hu->alignment : 1;
+ /* Check for error from previous call */
+ if (IS_ERR(skb))
+ skb = NULL;
+
while (count) {
int i, len;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index fbf7b4df23ab..9562e72c1ae5 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -207,11 +207,11 @@ void hci_uart_init_work(struct work_struct *work)
err = hci_register_dev(hu->hdev);
if (err < 0) {
BT_ERR("Can't register HCI device");
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ hu->proto->close(hu);
hdev = hu->hdev;
hu->hdev = NULL;
hci_free_dev(hdev);
- clear_bit(HCI_UART_PROTO_READY, &hu->flags);
- hu->proto->close(hu);
return;
}
@@ -616,6 +616,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
static int hci_uart_register_dev(struct hci_uart *hu)
{
struct hci_dev *hdev;
+ int err;
BT_DBG("");
@@ -659,11 +660,22 @@ static int hci_uart_register_dev(struct hci_uart *hu)
else
hdev->dev_type = HCI_PRIMARY;
+ /* Only call open() for the protocol after hdev is fully initialized as
+ * open() (or a timer/workqueue it starts) may attempt to reference it.
+ */
+ err = hu->proto->open(hu);
+ if (err) {
+ hu->hdev = NULL;
+ hci_free_dev(hdev);
+ return err;
+ }
+
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
+ hu->proto->close(hu);
hu->hdev = NULL;
hci_free_dev(hdev);
return -ENODEV;
@@ -683,20 +695,14 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
if (!p)
return -EPROTONOSUPPORT;
- err = p->open(hu);
- if (err)
- return err;
-
hu->proto = p;
- set_bit(HCI_UART_PROTO_READY, &hu->flags);
err = hci_uart_register_dev(hu);
if (err) {
- clear_bit(HCI_UART_PROTO_READY, &hu->flags);
- p->close(hu);
return err;
}
+ set_bit(HCI_UART_PROTO_READY, &hu->flags);
return 0;
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index f036c8f98ea3..237aea34b69f 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -59,7 +59,7 @@
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
#define IBS_TX_IDLE_TIMEOUT_MS 2000
-#define BAUDRATE_SETTLE_TIMEOUT_MS 300
+#define CMD_TRANS_TIMEOUT_MS 100
/* susclk rate */
#define SUSCLK_RATE_32KHZ 32768
@@ -770,16 +770,17 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
/* Don't go to sleep in middle of patch download or
* Out-Of-Band(GPIOs control) sleep is selected.
*/
if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
skb_queue_tail(&qca->txq, skb);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
return 0;
}
- spin_lock_irqsave(&qca->hci_ibs_lock, flags);
-
/* Act according to current state */
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_AWAKE:
@@ -962,8 +963,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- struct sk_buff *skb;
struct qca_serdev *qcadev;
+ struct sk_buff *skb;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
if (baudrate > QCA_BAUDRATE_3200000)
@@ -977,13 +978,6 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
return -ENOMEM;
}
- /* Disabling hardware flow control is mandatory while
- * sending change baudrate request to wcn3990 SoC.
- */
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
- hci_uart_set_flow_control(hu, true);
-
/* Assign commands to change baudrate and packet type. */
skb_put_data(skb, cmd, sizeof(cmd));
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
@@ -991,16 +985,21 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
skb_queue_tail(&qca->txq, skb);
hci_uart_tx_wakeup(hu);
- /* wait 300ms to change new baudrate on controller side
- * controller will come back after they receive this HCI command
- * then host can communicate with new baudrate to controller
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
- set_current_state(TASK_RUNNING);
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+
+ /* Wait for the baudrate change request to be sent */
+
+ while (!skb_queue_empty(&qca->txq))
+ usleep_range(100, 200);
+ serdev_device_wait_until_sent(hu->serdev,
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+
+ /* Give the controller time to process the request */
if (qcadev->btsoc_type == QCA_WCN3990)
- hci_uart_set_flow_control(hu, false);
+ msleep(10);
+ else
+ msleep(300);
return 0;
}
@@ -1013,11 +1012,11 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
hci_uart_set_baudrate(hu, speed);
}
-static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd)
+static int qca_send_power_pulse(struct hci_uart *hu, bool on)
{
- struct hci_uart *hu = hci_get_drvdata(hdev);
- struct qca_data *qca = hu->priv;
- struct sk_buff *skb;
+ int ret;
+ int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
+ u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE;
/* These power pulses are single byte command which are sent
* at required baudrate to wcn3990. On wcn3990, we have an external
@@ -1029,24 +1028,25 @@ static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd)
* save power. Disabling hardware flow control is mandatory while
* sending power pulses to SoC.
*/
- bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd);
-
- skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd);
+ serdev_device_write_flush(hu->serdev);
hci_uart_set_flow_control(hu, true);
+ ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
+ if (ret < 0) {
+ bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd);
+ return ret;
+ }
- skb_put_u8(skb, cmd);
- hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
-
- skb_queue_tail(&qca->txq, skb);
- hci_uart_tx_wakeup(hu);
-
- /* Wait for 100 uS for SoC to settle down */
- usleep_range(100, 200);
+ serdev_device_wait_until_sent(hu->serdev, timeout);
hci_uart_set_flow_control(hu, false);
+ /* Give to controller time to boot/shutdown */
+ if (on)
+ msleep(100);
+ else
+ msleep(10);
+
return 0;
}
@@ -1091,7 +1091,8 @@ static int qca_check_speeds(struct hci_uart *hu)
static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
{
unsigned int speed, qca_baudrate;
- int ret;
+ struct qca_serdev *qcadev;
+ int ret = 0;
if (speed_type == QCA_INIT_SPEED) {
speed = qca_get_speed(hu, QCA_INIT_SPEED);
@@ -1102,21 +1103,31 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
if (!speed)
return 0;
+ /* Disable flow control for wcn3990 to deassert RTS while
+ * changing the baudrate of chip and host.
+ */
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, true);
+
qca_baudrate = qca_get_baudrate_value(speed);
bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
ret = qca_set_baudrate(hu->hdev, qca_baudrate);
if (ret)
- return ret;
+ goto error;
host_set_baudrate(hu, speed);
+
+error:
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hci_uart_set_flow_control(hu, false);
}
- return 0;
+ return ret;
}
static int qca_wcn3990_init(struct hci_uart *hu)
{
- struct hci_dev *hdev = hu->hdev;
struct qca_serdev *qcadev;
int ret;
@@ -1139,18 +1150,15 @@ static int qca_wcn3990_init(struct hci_uart *hu)
/* Forcefully enable wcn3990 to enter in to boot mode. */
host_set_baudrate(hu, 2400);
- ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ ret = qca_send_power_pulse(hu, false);
if (ret)
return ret;
qca_set_speed(hu, QCA_INIT_SPEED);
- ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE);
+ ret = qca_send_power_pulse(hu, true);
if (ret)
return ret;
- /* Wait for 100 ms for SoC to boot */
- msleep(100);
-
/* Now the device is in ready state to communicate with host.
* To sync host with device we need to reopen port.
* Without this, we will have RTS and CTS synchronization
@@ -1193,6 +1201,7 @@ static int qca_setup(struct hci_uart *hu)
* setup for every hci up.
*/
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
hu->hdev->shutdown = qca_power_off;
ret = qca_wcn3990_init(hu);
if (ret)
@@ -1241,7 +1250,10 @@ static int qca_setup(struct hci_uart *hu)
}
/* Setup bdaddr */
- hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+ if (qcadev->btsoc_type == QCA_WCN3990)
+ hu->hdev->set_bdaddr = qca_set_bdaddr;
+ else
+ hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
return ret;
}
@@ -1274,13 +1286,20 @@ static const struct qca_vreg_data qca_soc_data = {
static void qca_power_shutdown(struct hci_uart *hu)
{
- struct serdev_device *serdev = hu->serdev;
- unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE;
+ struct qca_data *qca = hu->priv;
+ unsigned long flags;
+
+ /* From this point we go into power off state. But serial port is
+ * still open, stop queueing the IBS data and flush all the buffered
+ * data in skb's.
+ */
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+ clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ qca_flush(hu);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
host_set_baudrate(hu, 2400);
- hci_uart_set_flow_control(hu, true);
- serdev_device_write_buf(serdev, &cmd, sizeof(cmd));
- hci_uart_set_flow_control(hu, false);
+ qca_send_power_pulse(hu, false);
qca_power_setup(hu, false);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index e906ecfe23dd..8ad77246f322 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -295,6 +295,14 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
if (!mc_adev)
goto error;
+ mc_adev->consumer_link = device_link_add(&mc_dev->dev,
+ &mc_adev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!mc_adev->consumer_link) {
+ error = -EINVAL;
+ goto error;
+ }
+
*new_mc_adev = mc_adev;
return 0;
error:
@@ -321,6 +329,9 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
return;
fsl_mc_resource_free(resource);
+
+ device_link_del(mc_adev->consumer_link);
+ mc_adev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 7226cfc49b6f..3ae574a58cce 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -209,9 +209,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
+
*new_mc_io = mc_io;
return 0;
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
error_cleanup_resource:
fsl_mc_resource_free(resource);
return error;
@@ -244,6 +254,9 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
fsl_destroy_mc_io(mc_io);
fsl_mc_resource_free(resource);
+
+ device_link_del(dpmcp_dev->consumer_link);
+ dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index d5f85455fa62..19d7b6ff2f17 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -522,10 +522,9 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
if (!found) {
dev_warn(hostdev,
- "could not find cell for child device (%s)\n",
+ "could not find cell for child device (%s), discarding\n",
hid);
- ret = -ENODEV;
- goto fail;
+ continue;
}
pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index d84996a4528e..db74334ca5ef 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -46,6 +46,17 @@ static const struct imx_weim_devtype imx51_weim_devtype = {
};
#define MAX_CS_REGS_COUNT 6
+#define MAX_CS_COUNT 6
+#define OF_REG_SIZE 3
+
+struct cs_timing {
+ bool is_applied;
+ u32 regs[MAX_CS_REGS_COUNT];
+};
+
+struct cs_timing_state {
+ struct cs_timing cs[MAX_CS_COUNT];
+};
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
@@ -111,21 +122,19 @@ err:
}
/* Parse and set the timing for this device. */
-static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
- const struct imx_weim_devtype *devtype)
+static int __init weim_timing_setup(struct device *dev,
+ struct device_node *np, void __iomem *base,
+ const struct imx_weim_devtype *devtype,
+ struct cs_timing_state *ts)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
+ int reg_idx, num_regs;
+ struct cs_timing *cst;
if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
return -EINVAL;
-
- /* get the CS index from this child node's "reg" property. */
- ret = of_property_read_u32(np, "reg", &cs_idx);
- if (ret)
- return ret;
-
- if (cs_idx >= devtype->cs_count)
+ if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
return -EINVAL;
ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
@@ -133,9 +142,43 @@ static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
if (ret)
return ret;
- /* set the timing for WEIM */
- for (i = 0; i < devtype->cs_regs_count; i++)
- writel(value[i], base + cs_idx * devtype->cs_stride + i * 4);
+ /*
+ * the child node's "reg" property may contain multiple address ranges,
+ * extract the chip select for each.
+ */
+ num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE);
+ if (num_regs < 0)
+ return num_regs;
+ if (!num_regs)
+ return -EINVAL;
+ for (reg_idx = 0; reg_idx < num_regs; reg_idx++) {
+ /* get the CS index from this child node's "reg" property. */
+ ret = of_property_read_u32_index(np, "reg",
+ reg_idx * OF_REG_SIZE, &cs_idx);
+ if (ret)
+ break;
+
+ if (cs_idx >= devtype->cs_count)
+ return -EINVAL;
+
+ /* prevent re-configuring a CS that's already been configured */
+ cst = &ts->cs[cs_idx];
+ if (cst->is_applied && memcmp(value, cst->regs,
+ devtype->cs_regs_count * sizeof(u32))) {
+ dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np);
+ return -EINVAL;
+ }
+
+ /* set the timing for WEIM */
+ for (i = 0; i < devtype->cs_regs_count; i++)
+ writel(value[i],
+ base + cs_idx * devtype->cs_stride + i * 4);
+ if (!cst->is_applied) {
+ cst->is_applied = true;
+ memcpy(cst->regs, value,
+ devtype->cs_regs_count * sizeof(u32));
+ }
+ }
return 0;
}
@@ -148,6 +191,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
const struct imx_weim_devtype *devtype = of_id->data;
struct device_node *child;
int ret, have_child = 0;
+ struct cs_timing_state ts = {};
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);
@@ -156,7 +200,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
}
for_each_available_child_of_node(pdev->dev.of_node, child) {
- ret = weim_timing_setup(child, base, devtype);
+ ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
if (ret)
dev_warn(&pdev->dev, "%pOF set timing failed.\n",
child);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f94d33525771..d299ec79e4c3 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ 0),
/* Some timers on omap4 and later */
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ 0),
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE),
+ 0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2e2ffe7010aa..72866a004f07 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -244,26 +244,23 @@ source "drivers/char/hw_random/Kconfig"
config NVRAM
tristate "/dev/nvram support"
- depends on ATARI || X86 || GENERIC_NVRAM
+ depends on X86 || HAVE_ARCH_NVRAM_OPS
+ default M68K || PPC
---help---
If you say Y here and create a character special file /dev/nvram
with major number 10 and minor number 144 using mknod ("man mknod"),
- you get read and write access to the extra bytes of non-volatile
- memory in the real time clock (RTC), which is contained in every PC
- and most Ataris. The actual number of bytes varies, depending on the
- nvram in the system, but is usually 114 (128-14 for the RTC).
-
- This memory is conventionally called "CMOS RAM" on PCs and "NVRAM"
- on Ataris. /dev/nvram may be used to view settings there, or to
- change them (with some utility). It could also be used to frequently
+ you get read and write access to the non-volatile memory.
+
+ /dev/nvram may be used to view settings in NVRAM or to change them
+ (with some utility). It could also be used to frequently
save a few bits of very important data that may not be lost over
power-off and for which writing to disk is too insecure. Note
however that most NVRAM space in a PC belongs to the BIOS and you
should NEVER idly tamper with it. See Ralf Brown's interrupt list
for a guide to the use of CMOS bytes by your BIOS.
- On Atari machines, /dev/nvram is always configured and does not need
- to be selected.
+ This memory is conventionally called "NVRAM" on PowerPC machines,
+ "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
To compile this driver as a module, choose M here: the
module will be called nvram.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index b8d42b4e979b..fbea7dd12932 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -26,11 +26,7 @@ obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_HPET) += hpet.o
obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
-ifeq ($(CONFIG_GENERIC_NVRAM),y)
- obj-$(CONFIG_NVRAM) += generic_nvram.o
-else
- obj-$(CONFIG_NVRAM) += nvram.o
-endif
+obj-$(CONFIG_NVRAM) += nvram.o
obj-$(CONFIG_TOSHIBA) += toshiba.o
obj-$(CONFIG_DS1620) += ds1620.o
obj-$(CONFIG_HW_RANDOM) += hw_random/
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 7f88490b5479..c53f0f9ef5b0 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -163,7 +163,6 @@ static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
unsigned long page = efficeon_private.l1_table[index];
if (page) {
efficeon_private.l1_table[index] = 0;
- ClearPageReserved(virt_to_page((char *)page));
free_page(page);
freed++;
}
@@ -219,7 +218,6 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
efficeon_free_gatt_table(agp_bridge);
return -ENOMEM;
}
- SetPageReserved(virt_to_page((char *)page));
for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
clflush((char *)page+offset);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index c0a5b1f3a986..4ccc39e00ced 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/nospec.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
IndexCard = NumCard - 1;
- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+ if (IndexCard >= MAX_BOARD)
+ return -EINVAL;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (!apbs[IndexCard].RamIO)
return -EINVAL;
#ifdef DEBUG
@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned char IndexCard;
void __iomem *pmem;
int ret = 0;
+ static int warncount = 10;
volatile unsigned char byte_reset_it;
struct st_ram_io *adgl;
void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ac_mutex);
IndexCard = adgl->num_card-1;
- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
- static int warncount = 10;
- if (warncount) {
- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
- warncount--;
- }
- kfree(adgl);
- mutex_unlock(&ac_mutex);
- return -EINVAL;
- }
+ if (cmd != 6 && IndexCard >= MAX_BOARD)
+ goto err;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (cmd != 6 && !apbs[IndexCard].RamIO)
+ goto err;
switch (cmd) {
@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
kfree(adgl);
mutex_unlock(&ac_mutex);
return 0;
+
+err:
+ if (warncount) {
+ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+ (int)IndexCard + 1);
+ warncount--;
+ }
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return -EINVAL;
+
}
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index d9aab643997e..11781ebffbf7 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -255,35 +255,12 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
}
/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
- */
-
-static int efi_rtc_open(struct inode *inode, struct file *file)
-{
- /*
- * nothing special to do here
- * We do accept multiple open files at the same time as we
- * synchronize on the per call operation.
- */
- return 0;
-}
-
-static int efi_rtc_close(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-/*
* The various file operations we support.
*/
static const struct file_operations efi_rtc_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = efi_rtc_ioctl,
- .open = efi_rtc_open,
- .release = efi_rtc_close,
.llseek = no_llseek,
};
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
deleted file mode 100644
index ff5394f47587..000000000000
--- a/drivers/char/generic_nvram.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Generic /dev/nvram driver for architectures providing some
- * "generic" hooks, that is :
- *
- * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size
- *
- * Note that an additional hook is supported for PowerMac only
- * for getting the nvram "partition" informations
- *
- */
-
-#define NVRAM_VERSION "1.1"
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/pagemap.h>
-#include <linux/uaccess.h>
-#include <asm/nvram.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#endif
-
-#define NVRAM_SIZE 8192
-
-static DEFINE_MUTEX(nvram_mutex);
-static ssize_t nvram_len;
-
-static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
-{
- return generic_file_llseek_size(file, offset, origin,
- MAX_LFS_FILESIZE, nvram_len);
-}
-
-static ssize_t read_nvram(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int i;
- char __user *p = buf;
-
- if (!access_ok(buf, count))
- return -EFAULT;
- if (*ppos >= nvram_len)
- return 0;
- for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count)
- if (__put_user(nvram_read_byte(i), p))
- return -EFAULT;
- *ppos = i;
- return p - buf;
-}
-
-static ssize_t write_nvram(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int i;
- const char __user *p = buf;
- char c;
-
- if (!access_ok(buf, count))
- return -EFAULT;
- if (*ppos >= nvram_len)
- return 0;
- for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) {
- if (__get_user(c, p))
- return -EFAULT;
- nvram_write_byte(c, i);
- }
- *ppos = i;
- return p - buf;
-}
-
-static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- switch(cmd) {
-#ifdef CONFIG_PPC_PMAC
- case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
- printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
- case IOC_NVRAM_GET_OFFSET: {
- int part, offset;
-
- if (!machine_is(powermac))
- return -EINVAL;
- if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
- return -EFAULT;
- if (part < pmac_nvram_OF || part > pmac_nvram_NR)
- return -EINVAL;
- offset = pmac_get_partition(part);
- if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
- return -EFAULT;
- break;
- }
-#endif /* CONFIG_PPC_PMAC */
- case IOC_NVRAM_SYNC:
- nvram_sync();
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&nvram_mutex);
- ret = nvram_ioctl(file, cmd, arg);
- mutex_unlock(&nvram_mutex);
-
- return ret;
-}
-
-const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = nvram_llseek,
- .read = read_nvram,
- .write = write_nvram,
- .unlocked_ioctl = nvram_unlocked_ioctl,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-int __init nvram_init(void)
-{
- int ret = 0;
-
- printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
- NVRAM_VERSION);
- ret = misc_register(&nvram_dev);
- if (ret != 0)
- goto out;
-
- nvram_len = nvram_get_size();
- if (nvram_len < 0)
- nvram_len = NVRAM_SIZE;
-
-out:
- return ret;
-}
-
-void __exit nvram_cleanup(void)
-{
- misc_deregister( &nvram_dev );
-}
-
-module_init(nvram_init);
-module_exit(nvram_cleanup);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4a22b4b41aef..d0ad85900b79 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
return 1;
}
-__setup("hpet_mmap", hpet_mmap_enable);
+__setup("hpet_mmap=", hpet_mmap_enable);
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
@@ -842,7 +842,6 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpet_dev *devp;
u32 i, ntimer;
struct hpets *hpetp;
- size_t siz;
struct hpet __iomem *hpet;
static struct hpets *last;
unsigned long period;
@@ -860,10 +859,8 @@ int hpet_alloc(struct hpet_data *hdp)
return 0;
}
- siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
- sizeof(struct hpet_dev));
-
- hpetp = kzalloc(siz, GFP_KERNEL);
+ hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1),
+ GFP_KERNEL);
if (!hpetp)
return -ENOMEM;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dac895dc01b9..25a7d8ffdb5d 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -424,6 +424,21 @@ config HW_RANDOM_EXYNOS
will be called exynos-trng.
If unsure, say Y.
+
+config HW_RANDOM_OPTEE
+ tristate "OP-TEE based Random Number Generator support"
+ depends on OPTEE
+ default HW_RANDOM
+ help
+ This driver provides support for OP-TEE based Random Number
+ Generator on ARM SoCs where hardware entropy sources are not
+ accessible to normal world (Linux).
+
+ To compile this driver as a module, choose M here: the module
+ will be called optee-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e35ec3ce3a20..7c9ef4a7667f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
+obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 256b0b1d0f26..f759790c3cdb 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -168,14 +168,16 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
priv->rng.read = bcm2835_rng_read;
priv->rng.cleanup = bcm2835_rng_cleanup;
- rng_id = of_match_node(bcm2835_rng_of_match, np);
- if (!rng_id)
- return -EINVAL;
-
- /* Check for rng init function, execute it */
- of_data = rng_id->data;
- if (of_data)
- priv->mask_interrupts = of_data->mask_interrupts;
+ if (dev_of_node(dev)) {
+ rng_id = of_match_node(bcm2835_rng_of_match, np);
+ if (!rng_id)
+ return -EINVAL;
+
+ /* Check for rng init function, execute it */
+ of_data = rng_id->data;
+ if (of_data)
+ priv->mask_interrupts = of_data->mask_interrupts;
+ }
/* register driver */
err = devm_hwrng_register(dev, &priv->rng);
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
new file mode 100644
index 000000000000..ddfbabaa5f8f
--- /dev/null
+++ b/drivers/char/hw_random/optee-rng.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 Linaro Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#define DRIVER_NAME "optee-rng"
+
+#define TEE_ERROR_HEALTH_TEST_FAIL 0x00000001
+
+/*
+ * TA_CMD_GET_ENTROPY - Get Entropy from RNG
+ *
+ * param[0] (inout memref) - Entropy buffer memory reference
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_NOT_SUPPORTED - Requested entropy size greater than size of pool
+ * TEE_ERROR_HEALTH_TEST_FAIL - Continuous health testing failed
+ */
+#define TA_CMD_GET_ENTROPY 0x0
+
+/*
+ * TA_CMD_GET_RNG_INFO - Get RNG information
+ *
+ * param[0] (out value) - value.a: RNG data-rate in bytes per second
+ * value.b: Quality/Entropy per 1024 bit of data
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ */
+#define TA_CMD_GET_RNG_INFO 0x1
+
+#define MAX_ENTROPY_REQ_SZ (4 * 1024)
+
+/**
+ * struct optee_rng_private - OP-TEE Random Number Generator private data
+ * @dev: OP-TEE based RNG device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: RNG TA session identifier.
+ * @data_rate: RNG data rate.
+ * @entropy_shm_pool: Memory pool shared with RNG device.
+ * @optee_rng: OP-TEE RNG driver structure.
+ */
+struct optee_rng_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ u32 data_rate;
+ struct tee_shm *entropy_shm_pool;
+ struct hwrng optee_rng;
+};
+
+#define to_optee_rng_private(r) \
+ container_of(r, struct optee_rng_private, optee_rng)
+
+static size_t get_optee_rng_data(struct optee_rng_private *pvt_data,
+ void *buf, size_t req_size)
+{
+ int ret = 0;
+ u8 *rng_data = NULL;
+ size_t rng_size = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_ENTROPY function of Trusted App */
+ inv_arg.func = TA_CMD_GET_ENTROPY;
+ inv_arg.session = pvt_data->session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data->entropy_shm_pool;
+ param[0].u.memref.size = req_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data->ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data->dev, "TA_CMD_GET_ENTROPY invoke err: %x\n",
+ inv_arg.ret);
+ return 0;
+ }
+
+ rng_data = tee_shm_get_va(pvt_data->entropy_shm_pool, 0);
+ if (IS_ERR(rng_data)) {
+ dev_err(pvt_data->dev, "tee_shm_get_va failed\n");
+ return 0;
+ }
+
+ rng_size = param[0].u.memref.size;
+ memcpy(buf, rng_data, rng_size);
+
+ return rng_size;
+}
+
+static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ size_t read = 0, rng_size = 0;
+ int timeout = 1;
+ u8 *data = buf;
+
+ if (max > MAX_ENTROPY_REQ_SZ)
+ max = MAX_ENTROPY_REQ_SZ;
+
+ while (read == 0) {
+ rng_size = get_optee_rng_data(pvt_data, data, (max - read));
+
+ data += rng_size;
+ read += rng_size;
+
+ if (wait) {
+ if (timeout-- == 0)
+ return read;
+ msleep((1000 * (max - read)) / pvt_data->data_rate);
+ } else {
+ return read;
+ }
+ }
+
+ return read;
+}
+
+static int optee_rng_init(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ struct tee_shm *entropy_shm_pool = NULL;
+
+ entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(entropy_shm_pool)) {
+ dev_err(pvt_data->dev, "tee_shm_alloc failed\n");
+ return PTR_ERR(entropy_shm_pool);
+ }
+
+ pvt_data->entropy_shm_pool = entropy_shm_pool;
+
+ return 0;
+}
+
+static void optee_rng_cleanup(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+
+ tee_shm_free(pvt_data->entropy_shm_pool);
+}
+
+static struct optee_rng_private pvt_data = {
+ .optee_rng = {
+ .name = DRIVER_NAME,
+ .init = optee_rng_init,
+ .cleanup = optee_rng_cleanup,
+ .read = optee_rng_read,
+ }
+};
+
+static int get_optee_rng_info(struct device *dev)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_RNG_INFO function of Trusted App */
+ inv_arg.func = TA_CMD_GET_RNG_INFO;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(dev, "TA_CMD_GET_RNG_INFO invoke err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ pvt_data.data_rate = param[0].u.value.a;
+ pvt_data.optee_rng.quality = param[0].u.value.b;
+
+ return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int optee_rng_probe(struct device *dev)
+{
+ struct tee_client_device *rng_device = to_tee_client_device(dev);
+ int ret = 0, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with hwrng Trusted App */
+ memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if ((ret < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ err = get_optee_rng_info(dev);
+ if (err)
+ goto out_sess;
+
+ err = hwrng_register(&pvt_data.optee_rng);
+ if (err) {
+ dev_err(dev, "hwrng registration failed (%d)\n", err);
+ goto out_sess;
+ }
+
+ pvt_data.dev = dev;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int optee_rng_remove(struct device *dev)
+{
+ hwrng_unregister(&pvt_data.optee_rng);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id optee_rng_id_table[] = {
+ {UUID_INIT(0xab7a617c, 0xb8e7, 0x4d8f,
+ 0x83, 0x01, 0xd0, 0x9b, 0x61, 0x03, 0x6b, 0x64)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rng_id_table);
+
+static struct tee_client_driver optee_rng_driver = {
+ .id_table = optee_rng_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .bus = &tee_bus_type,
+ .probe = optee_rng_probe,
+ .remove = optee_rng_remove,
+ },
+};
+
+static int __init optee_rng_mod_init(void)
+{
+ return driver_register(&optee_rng_driver.driver);
+}
+
+static void __exit optee_rng_mod_exit(void)
+{
+ driver_unregister(&optee_rng_driver.driver);
+}
+
+module_init(optee_rng_mod_init);
+module_exit(optee_rng_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sumit Garg <sumit.garg@linaro.org>");
+MODULE_DESCRIPTION("OP-TEE based random number generator driver");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index b89df66ea1ae..7abd604e938c 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
if (!vi->busy) {
vi->busy = true;
- init_completion(&vi->have_data);
+ reinit_completion(&vi->have_data);
register_buffer(vi, buf, size);
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a74ce885b541..c518659b4d9f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -32,6 +32,7 @@
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/uuid.h>
+#include <linux/nospec.h>
#define IPMI_DRIVER_VERSION "39.2"
@@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
{ }
#endif
-static int initialized;
+static bool initialized;
+static bool drvregistered;
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
+struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -720,7 +722,15 @@ struct watcher_entry {
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
- int index;
+ int index, rv;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
mutex_lock(&smi_watchers_mutex);
@@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
if (user) {
user->handler->ipmi_recv_hndl(msg, user->handler_data);
- release_ipmi_user(msg->user, index);
+ release_ipmi_user(user, index);
} else {
/* User went away, give up. */
ipmi_free_recv_msg(msg);
@@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num,
{
unsigned long flags;
struct ipmi_user *new_user;
- int rv = 0, index;
+ int rv, index;
struct ipmi_smi *intf;
/*
@@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num,
* Make sure the driver is actually initialized, this handles
* problems with initialization order.
*/
- if (!initialized) {
- rv = ipmi_init_msghandler();
- if (rv)
- return rv;
-
- /*
- * The init code doesn't return an error if it was turned
- * off, but it won't initialize. Check that.
- */
- if (!initialized)
- return -ENODEV;
- }
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
if (!new_user)
@@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+ cleanup_srcu_struct(&user->release_barrier);
kfree(user);
}
@@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
{
_ipmi_destroy_user(user);
- cleanup_srcu_struct(&user->release_barrier);
kref_put(&user->refcount, free_user);
return 0;
@@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
if (!user)
return -ENODEV;
- if (channel >= IPMI_MAX_CHANNELS)
+ if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
- else
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
+ }
release_ipmi_user(user, index);
return rv;
@@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
if (!user)
return -ENODEV;
- if (channel >= IPMI_MAX_CHANNELS)
+ if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
- else
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
+ }
release_ipmi_user(user, index);
return rv;
@@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
if (!user)
return -ENODEV;
- if (channel >= IPMI_MAX_CHANNELS)
+ if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
- else
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
+ }
release_ipmi_user(user, index);
return rv;
@@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
if (!user)
return -ENODEV;
- if (channel >= IPMI_MAX_CHANNELS)
+ if (channel >= IPMI_MAX_CHANNELS) {
rv = -EINVAL;
- else
+ } else {
+ channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
+ }
release_ipmi_user(user, index);
return rv;
@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
{
if (addr->channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
+ addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
*lun = intf->addrinfo[addr->channel].lun;
*saddr = intf->addrinfo[addr->channel].address;
return 0;
@@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
* Make sure the driver is actually initialized, this handles
* problems with initialization order.
*/
- if (!initialized) {
- rv = ipmi_init_msghandler();
- if (rv)
- return rv;
- /*
- * The init code doesn't return an error if it was turned
- * off, but it won't initialize. Check that.
- */
- if (!initialized)
- return -ENODEV;
- }
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf)
@@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+/* Must be called with ipmi_interfaces_mutex held. */
+static int ipmi_register_driver(void)
+{
+ int rv;
+
+ if (drvregistered)
+ return 0;
+
+ rv = driver_register(&ipmidriver.driver);
+ if (rv)
+ pr_err("Could not register IPMI driver\n");
+ else
+ drvregistered = true;
+ return rv;
+}
+
static struct notifier_block panic_block = {
.notifier_call = panic_event,
.next = NULL,
@@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
{
int rv;
+ mutex_lock(&ipmi_interfaces_mutex);
+ rv = ipmi_register_driver();
+ if (rv)
+ goto out;
if (initialized)
- return 0;
-
- rv = driver_register(&ipmidriver.driver);
- if (rv) {
- pr_err("Could not register IPMI driver\n");
- return rv;
- }
+ goto out;
- pr_info("version " IPMI_DRIVER_VERSION "\n");
+ init_srcu_struct(&ipmi_interfaces_srcu);
timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
- initialized = 1;
+ initialized = true;
- return 0;
+out:
+ mutex_unlock(&ipmi_interfaces_mutex);
+ return rv;
}
static int __init ipmi_init_msghandler_mod(void)
{
- ipmi_init_msghandler();
- return 0;
+ int rv;
+
+ pr_info("version " IPMI_DRIVER_VERSION "\n");
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ rv = ipmi_register_driver();
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ return rv;
}
static void __exit cleanup_ipmi(void)
{
int count;
- if (!initialized)
- return;
-
- atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
+ if (initialized) {
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &panic_block);
- /*
- * This can't be called if any interfaces exist, so no worry
- * about shutting down the interfaces.
- */
+ /*
+ * This can't be called if any interfaces exist, so no worry
+ * about shutting down the interfaces.
+ */
- /*
- * Tell the timer to stop, then wait for it to stop. This
- * avoids problems with race conditions removing the timer
- * here.
- */
- atomic_inc(&stop_operation);
- del_timer_sync(&ipmi_timer);
+ /*
+ * Tell the timer to stop, then wait for it to stop. This
+ * avoids problems with race conditions removing the timer
+ * here.
+ */
+ atomic_inc(&stop_operation);
+ del_timer_sync(&ipmi_timer);
- driver_unregister(&ipmidriver.driver);
+ initialized = false;
- initialized = 0;
+ /* Check for buffer leaks. */
+ count = atomic_read(&smi_msg_inuse_count);
+ if (count != 0)
+ pr_warn("SMI message count %d at exit\n", count);
+ count = atomic_read(&recv_msg_inuse_count);
+ if (count != 0)
+ pr_warn("recv message count %d at exit\n", count);
- /* Check for buffer leaks. */
- count = atomic_read(&smi_msg_inuse_count);
- if (count != 0)
- pr_warn("SMI message count %d at exit\n", count);
- count = atomic_read(&recv_msg_inuse_count);
- if (count != 0)
- pr_warn("recv message count %d at exit\n", count);
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
+ }
+ if (drvregistered)
+ driver_unregister(&ipmidriver.driver);
}
module_exit(cleanup_ipmi);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index ca9528c4f183..b7a1ae2afaea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* Remove the multi-part read marker. */
len -= 2;
+ data += 2;
for (i = 0; i < len; i++)
- ssif_info->data[i] = data[i+2];
+ ssif_info->data[i] = data[i];
ssif_info->multi_len = len;
ssif_info->multi_pos = 1;
@@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
blocknum = data[0];
+ len--;
+ data++;
+
+ if (blocknum != 0xff && len != 31) {
+ /* All blocks but the last must have 31 data bytes. */
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Received middle message <31\n");
- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
+ goto continue_op;
+ }
+
+ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
/* Received message too big, abort the operation. */
result = -E2BIG;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
goto continue_op;
}
- /* Remove the blocknum from the data. */
- len--;
for (i = 0; i < len; i++)
- ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
+ ssif_info->data[i + ssif_info->multi_len] = data[i];
ssif_info->multi_len += len;
if (blocknum == 0xff) {
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if (blocknum + 1 != ssif_info->multi_pos) {
+ } else if (blocknum != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
@@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
}
+ continue_op:
if (result < 0) {
ssif_inc_stat(ssif_info, receive_errors);
} else {
@@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_inc_stat(ssif_info, received_message_parts);
}
-
- continue_op:
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
pr_info("DONE 1: state = %d, result=%d\n",
ssif_info->ssif_state, result);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 5c8d780637bd..3406852f67ff 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -729,7 +729,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd,
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fallthrough for 64-bit */
+ /* fall through - for 64-bit */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
@@ -757,7 +757,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fallthrough for x32 mode */
+ /* fall through - for x32 mode */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 8c9216a0f62e..0a31b60bee7b 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -50,6 +50,7 @@ static LIST_HEAD(soft_list);
* file operations
*/
static const struct file_operations mbcs_ops = {
+ .owner = THIS_MODULE,
.open = mbcs_open,
.llseek = mbcs_sram_llseek,
.read = mbcs_sram_read,
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index b5e3103c1175..e43c876a9223 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -59,6 +59,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/serial_8250.h>
+#include <linux/nospec.h>
#include "smapi.h"
#include "mwavedd.h"
#include "3780i.h"
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
ipcnum);
return -EINVAL;
}
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
" ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
" Invalid ipcnum %x\n", ipcnum);
return -EINVAL;
}
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
" ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
ipcnum);
return -EINVAL;
}
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
pDrvData->IPCs[ipcnum].bIsEnabled = false;
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 25264d65e716..eff1e3f1b3a2 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -21,13 +21,6 @@
* ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid
* again; use with care!)
*
- * This file also provides some functions for other parts of the kernel that
- * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}.
- * Obviously this can be used only if this driver is always configured into
- * the kernel and is not a module. Since the functions are used by some Atari
- * drivers, this is the case on the Atari.
- *
- *
* 1.1 Cesar Barros: SMP locking fixes
* added changelog
* 1.2 Erik Gilling: Cobalt Networks support
@@ -39,64 +32,6 @@
#include <linux/module.h>
#include <linux/nvram.h>
-
-#define PC 1
-#define ATARI 2
-
-/* select machine configuration */
-#if defined(CONFIG_ATARI)
-# define MACH ATARI
-#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */
-# define MACH PC
-#else
-# error Cannot build nvram driver for this machine configuration.
-#endif
-
-#if MACH == PC
-
-/* RTC in a PC */
-#define CHECK_DRIVER_INIT() 1
-
-/* On PCs, the checksum is built only over bytes 2..31 */
-#define PC_CKS_RANGE_START 2
-#define PC_CKS_RANGE_END 31
-#define PC_CKS_LOC 32
-#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE)
-
-#define mach_check_checksum pc_check_checksum
-#define mach_set_checksum pc_set_checksum
-#define mach_proc_infos pc_proc_infos
-
-#endif
-
-#if MACH == ATARI
-
-/* Special parameters for RTC in Atari machines */
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
-#define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))
-
-#define NVRAM_BYTES 50
-
-/* On Ataris, the checksum is over all bytes except the checksum bytes
- * themselves; these are at the very end */
-#define ATARI_CKS_RANGE_START 0
-#define ATARI_CKS_RANGE_END 47
-#define ATARI_CKS_LOC 48
-
-#define mach_check_checksum atari_check_checksum
-#define mach_set_checksum atari_set_checksum
-#define mach_proc_infos atari_proc_infos
-
-#endif
-
-/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
- * rtc_lock held. Due to the index-port/data-port design of the RTC, we
- * don't want two different things trying to get to it at once. (e.g. the
- * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
- */
-
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
@@ -106,28 +41,26 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#ifdef CONFIG_PPC
+#include <asm/nvram.h>
+#endif
static DEFINE_MUTEX(nvram_mutex);
static DEFINE_SPINLOCK(nvram_state_lock);
static int nvram_open_cnt; /* #times opened */
static int nvram_open_mode; /* special open modes */
+static ssize_t nvram_size;
#define NVRAM_WRITE 1 /* opened for writing (exclusive) */
#define NVRAM_EXCL 2 /* opened with O_EXCL */
-static int mach_check_checksum(void);
-static void mach_set_checksum(void);
-
-#ifdef CONFIG_PROC_FS
-static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
- void *offset);
-#endif
-
+#ifdef CONFIG_X86
/*
* These functions are provided to be called internally or by other parts of
* the kernel. It's up to the caller to ensure correct checksum before reading
@@ -139,13 +72,20 @@ static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
* know about the RTC cruft.
*/
-unsigned char __nvram_read_byte(int i)
+#define NVRAM_BYTES (128 - NVRAM_FIRST_BYTE)
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
+ */
+
+static unsigned char __nvram_read_byte(int i)
{
return CMOS_READ(NVRAM_FIRST_BYTE + i);
}
-EXPORT_SYMBOL(__nvram_read_byte);
-unsigned char nvram_read_byte(int i)
+static unsigned char pc_nvram_read_byte(int i)
{
unsigned long flags;
unsigned char c;
@@ -155,16 +95,14 @@ unsigned char nvram_read_byte(int i)
spin_unlock_irqrestore(&rtc_lock, flags);
return c;
}
-EXPORT_SYMBOL(nvram_read_byte);
/* This races nicely with trying to read with checksum checking (nvram_read) */
-void __nvram_write_byte(unsigned char c, int i)
+static void __nvram_write_byte(unsigned char c, int i)
{
CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
}
-EXPORT_SYMBOL(__nvram_write_byte);
-void nvram_write_byte(unsigned char c, int i)
+static void pc_nvram_write_byte(unsigned char c, int i)
{
unsigned long flags;
@@ -172,172 +110,266 @@ void nvram_write_byte(unsigned char c, int i)
__nvram_write_byte(c, i);
spin_unlock_irqrestore(&rtc_lock, flags);
}
-EXPORT_SYMBOL(nvram_write_byte);
-int __nvram_check_checksum(void)
+/* On PCs, the checksum is built only over bytes 2..31 */
+#define PC_CKS_RANGE_START 2
+#define PC_CKS_RANGE_END 31
+#define PC_CKS_LOC 32
+
+static int __nvram_check_checksum(void)
{
- return mach_check_checksum();
+ int i;
+ unsigned short sum = 0;
+ unsigned short expect;
+
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
+ __nvram_read_byte(PC_CKS_LOC+1);
+ return (sum & 0xffff) == expect;
}
-EXPORT_SYMBOL(__nvram_check_checksum);
-int nvram_check_checksum(void)
+static void __nvram_set_checksum(void)
{
- unsigned long flags;
- int rv;
+ int i;
+ unsigned short sum = 0;
- spin_lock_irqsave(&rtc_lock, flags);
- rv = __nvram_check_checksum();
- spin_unlock_irqrestore(&rtc_lock, flags);
- return rv;
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(sum >> 8, PC_CKS_LOC);
+ __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
}
-EXPORT_SYMBOL(nvram_check_checksum);
-static void __nvram_set_checksum(void)
+static long pc_nvram_set_checksum(void)
{
- mach_set_checksum();
+ spin_lock_irq(&rtc_lock);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
}
-#if 0
-void nvram_set_checksum(void)
+static long pc_nvram_initialize(void)
{
- unsigned long flags;
+ ssize_t i;
- spin_lock_irqsave(&rtc_lock, flags);
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ __nvram_write_byte(0, i);
__nvram_set_checksum();
- spin_unlock_irqrestore(&rtc_lock, flags);
+ spin_unlock_irq(&rtc_lock);
+ return 0;
}
-#endif /* 0 */
-
-/*
- * The are the file operation function for user access to /dev/nvram
- */
-static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
+static ssize_t pc_nvram_get_size(void)
{
- return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
- NVRAM_BYTES);
+ return NVRAM_BYTES;
}
-static ssize_t nvram_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos)
{
- unsigned char contents[NVRAM_BYTES];
- unsigned i = *ppos;
- unsigned char *tmp;
+ char *p = buf;
+ loff_t i;
spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ *p = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
- if (!__nvram_check_checksum())
- goto checksum_err;
+ *ppos = i;
+ return p - buf;
+}
- for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
- *tmp = __nvram_read_byte(i);
+static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ __nvram_write_byte(*p, i);
+ __nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
- if (copy_to_user(buf, contents, tmp - contents))
- return -EFAULT;
-
*ppos = i;
+ return p - buf;
+}
- return tmp - contents;
+const struct nvram_ops arch_nvram_ops = {
+ .read = pc_nvram_read,
+ .write = pc_nvram_write,
+ .read_byte = pc_nvram_read_byte,
+ .write_byte = pc_nvram_write_byte,
+ .get_size = pc_nvram_get_size,
+ .set_checksum = pc_nvram_set_checksum,
+ .initialize = pc_nvram_initialize,
+};
+EXPORT_SYMBOL(arch_nvram_ops);
+#endif /* CONFIG_X86 */
-checksum_err:
- spin_unlock_irq(&rtc_lock);
- return -EIO;
-}
+/*
+ * The are the file operation function for user access to /dev/nvram
+ */
-static ssize_t nvram_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin)
{
- unsigned char contents[NVRAM_BYTES];
- unsigned i = *ppos;
- unsigned char *tmp;
+ return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+ nvram_size);
+}
- if (i >= NVRAM_BYTES)
- return 0; /* Past EOF */
+static ssize_t nvram_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
- if (count > NVRAM_BYTES - i)
- count = NVRAM_BYTES - i;
- if (count > NVRAM_BYTES)
- return -EFAULT; /* Can't happen, but prove it to gcc */
- if (copy_from_user(contents, buf, count))
+ if (!access_ok(buf, count))
return -EFAULT;
+ if (*ppos >= nvram_size)
+ return 0;
- spin_lock_irq(&rtc_lock);
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
- if (!__nvram_check_checksum())
- goto checksum_err;
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
- for (tmp = contents; count--; ++i, ++tmp)
- __nvram_write_byte(*tmp, i);
+ ret = nvram_read(tmp, count, ppos);
+ if (ret <= 0)
+ goto out;
- __nvram_set_checksum();
+ if (copy_to_user(buf, tmp, ret)) {
+ *ppos -= ret;
+ ret = -EFAULT;
+ }
- spin_unlock_irq(&rtc_lock);
+out:
+ kfree(tmp);
+ return ret;
+}
- *ppos = i;
+static ssize_t nvram_misc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
- return tmp - contents;
+ if (!access_ok(buf, count))
+ return -EFAULT;
+ if (*ppos >= nvram_size)
+ return 0;
-checksum_err:
- spin_unlock_irq(&rtc_lock);
- return -EIO;
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
+
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ ret = nvram_write(tmp, count, ppos);
+ kfree(tmp);
+ return ret;
}
-static long nvram_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long nvram_misc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- int i;
+ long ret = -ENOTTY;
switch (cmd) {
-
+#ifdef CONFIG_PPC
+ case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
+ pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
+ /* fall through */
+ case IOC_NVRAM_GET_OFFSET:
+ ret = -EINVAL;
+#ifdef CONFIG_PPC_PMAC
+ if (machine_is(powermac)) {
+ int part, offset;
+
+ if (copy_from_user(&part, (void __user *)arg,
+ sizeof(part)) != 0)
+ return -EFAULT;
+ if (part < pmac_nvram_OF || part > pmac_nvram_NR)
+ return -EINVAL;
+ offset = pmac_get_partition(part);
+ if (offset < 0)
+ return -EINVAL;
+ if (copy_to_user((void __user *)arg,
+ &offset, sizeof(offset)) != 0)
+ return -EFAULT;
+ ret = 0;
+ }
+#endif
+ break;
+#ifdef CONFIG_PPC32
+ case IOC_NVRAM_SYNC:
+ if (ppc_md.nvram_sync != NULL) {
+ mutex_lock(&nvram_mutex);
+ ppc_md.nvram_sync();
+ mutex_unlock(&nvram_mutex);
+ }
+ ret = 0;
+ break;
+#endif
+#elif defined(CONFIG_X86) || defined(CONFIG_M68K)
case NVRAM_INIT:
/* initialize NVRAM contents and checksum */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&nvram_mutex);
- spin_lock_irq(&rtc_lock);
-
- for (i = 0; i < NVRAM_BYTES; ++i)
- __nvram_write_byte(0, i);
- __nvram_set_checksum();
-
- spin_unlock_irq(&rtc_lock);
- mutex_unlock(&nvram_mutex);
- return 0;
-
+ if (arch_nvram_ops.initialize != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.initialize();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
case NVRAM_SETCKS:
/* just set checksum, contents unchanged (maybe useful after
* checksum garbaged somehow...) */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&nvram_mutex);
- spin_lock_irq(&rtc_lock);
- __nvram_set_checksum();
- spin_unlock_irq(&rtc_lock);
- mutex_unlock(&nvram_mutex);
- return 0;
-
- default:
- return -ENOTTY;
+ if (arch_nvram_ops.set_checksum != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.set_checksum();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
+#endif /* CONFIG_X86 || CONFIG_M68K */
}
+ return ret;
}
-static int nvram_open(struct inode *inode, struct file *file)
+static int nvram_misc_open(struct inode *inode, struct file *file)
{
spin_lock(&nvram_state_lock);
+ /* Prevent multiple readers/writers if desired. */
if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
- (nvram_open_mode & NVRAM_EXCL) ||
- ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
+ (nvram_open_mode & NVRAM_EXCL)) {
spin_unlock(&nvram_state_lock);
return -EBUSY;
}
+#if defined(CONFIG_X86) || defined(CONFIG_M68K)
+ /* Prevent multiple writers if the set_checksum ioctl is implemented. */
+ if ((arch_nvram_ops.set_checksum != NULL) &&
+ (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) {
+ spin_unlock(&nvram_state_lock);
+ return -EBUSY;
+ }
+#endif
+
if (file->f_flags & O_EXCL)
nvram_open_mode |= NVRAM_EXCL;
if (file->f_mode & FMODE_WRITE)
@@ -349,7 +381,7 @@ static int nvram_open(struct inode *inode, struct file *file)
return 0;
}
-static int nvram_release(struct inode *inode, struct file *file)
+static int nvram_misc_release(struct inode *inode, struct file *file)
{
spin_lock(&nvram_state_lock);
@@ -366,123 +398,7 @@ static int nvram_release(struct inode *inode, struct file *file)
return 0;
}
-#ifndef CONFIG_PROC_FS
-static int nvram_add_proc_fs(void)
-{
- return 0;
-}
-
-#else
-
-static int nvram_proc_read(struct seq_file *seq, void *offset)
-{
- unsigned char contents[NVRAM_BYTES];
- int i = 0;
-
- spin_lock_irq(&rtc_lock);
- for (i = 0; i < NVRAM_BYTES; ++i)
- contents[i] = __nvram_read_byte(i);
- spin_unlock_irq(&rtc_lock);
-
- mach_proc_infos(contents, seq, offset);
-
- return 0;
-}
-
-static int nvram_add_proc_fs(void)
-{
- if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read))
- return -ENOMEM;
- return 0;
-}
-
-#endif /* CONFIG_PROC_FS */
-
-static const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = nvram_llseek,
- .read = nvram_read,
- .write = nvram_write,
- .unlocked_ioctl = nvram_ioctl,
- .open = nvram_open,
- .release = nvram_release,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-static int __init nvram_init(void)
-{
- int ret;
-
- /* First test whether the driver should init at all */
- if (!CHECK_DRIVER_INIT())
- return -ENODEV;
-
- ret = misc_register(&nvram_dev);
- if (ret) {
- printk(KERN_ERR "nvram: can't misc_register on minor=%d\n",
- NVRAM_MINOR);
- goto out;
- }
- ret = nvram_add_proc_fs();
- if (ret) {
- printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
- goto outmisc;
- }
- ret = 0;
- printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n");
-out:
- return ret;
-outmisc:
- misc_deregister(&nvram_dev);
- goto out;
-}
-
-static void __exit nvram_cleanup_module(void)
-{
- remove_proc_entry("driver/nvram", NULL);
- misc_deregister(&nvram_dev);
-}
-
-module_init(nvram_init);
-module_exit(nvram_cleanup_module);
-
-/*
- * Machine specific functions
- */
-
-#if MACH == PC
-
-static int pc_check_checksum(void)
-{
- int i;
- unsigned short sum = 0;
- unsigned short expect;
-
- for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
- __nvram_read_byte(PC_CKS_LOC+1);
- return (sum & 0xffff) == expect;
-}
-
-static void pc_set_checksum(void)
-{
- int i;
- unsigned short sum = 0;
-
- for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- __nvram_write_byte(sum >> 8, PC_CKS_LOC);
- __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
-}
-
-#ifdef CONFIG_PROC_FS
-
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
static const char * const floppy_types[] = {
"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
"3.5'' 2.88M", "3.5'' 2.88M"
@@ -495,8 +411,8 @@ static const char * const gfx_types[] = {
"monochrome",
};
-static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
- void *offset)
+static void pc_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
{
int checksum;
int type;
@@ -557,143 +473,76 @@ static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
return;
}
-#endif
-#endif /* MACH == PC */
-
-#if MACH == ATARI
-
-static int atari_check_checksum(void)
+static int nvram_proc_read(struct seq_file *seq, void *offset)
{
- int i;
- unsigned char sum = 0;
+ unsigned char contents[NVRAM_BYTES];
+ int i = 0;
- for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
- (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
-}
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ contents[i] = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
-static void atari_set_checksum(void)
-{
- int i;
- unsigned char sum = 0;
+ pc_nvram_proc_read(contents, seq, offset);
- for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- __nvram_write_byte(~sum, ATARI_CKS_LOC);
- __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
+ return 0;
}
+#endif /* CONFIG_X86 && CONFIG_PROC_FS */
-#ifdef CONFIG_PROC_FS
-
-static struct {
- unsigned char val;
- const char *name;
-} boot_prefs[] = {
- { 0x80, "TOS" },
- { 0x40, "ASV" },
- { 0x20, "NetBSD (?)" },
- { 0x10, "Linux" },
- { 0x00, "unspecified" }
-};
-
-static const char * const languages[] = {
- "English (US)",
- "German",
- "French",
- "English (UK)",
- "Spanish",
- "Italian",
- "6 (undefined)",
- "Swiss (French)",
- "Swiss (German)"
-};
-
-static const char * const dateformat[] = {
- "MM%cDD%cYY",
- "DD%cMM%cYY",
- "YY%cMM%cDD",
- "YY%cDD%cMM",
- "4 (undefined)",
- "5 (undefined)",
- "6 (undefined)",
- "7 (undefined)"
+static const struct file_operations nvram_misc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = nvram_misc_llseek,
+ .read = nvram_misc_read,
+ .write = nvram_misc_write,
+ .unlocked_ioctl = nvram_misc_ioctl,
+ .open = nvram_misc_open,
+ .release = nvram_misc_release,
};
-static const char * const colors[] = {
- "2", "4", "16", "256", "65536", "??", "??", "??"
+static struct miscdevice nvram_misc = {
+ NVRAM_MINOR,
+ "nvram",
+ &nvram_misc_fops,
};
-static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
- void *offset)
+static int __init nvram_module_init(void)
{
- int checksum = nvram_check_checksum();
- int i;
- unsigned vmode;
+ int ret;
- seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not ");
+ nvram_size = nvram_get_size();
+ if (nvram_size < 0)
+ return nvram_size;
- seq_printf(seq, "Boot preference : ");
- for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) {
- if (nvram[1] == boot_prefs[i].val) {
- seq_printf(seq, "%s\n", boot_prefs[i].name);
- break;
- }
+ ret = misc_register(&nvram_misc);
+ if (ret) {
+ pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR);
+ return ret;
}
- if (i < 0)
- seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
-
- seq_printf(seq, "SCSI arbitration : %s\n",
- (nvram[16] & 0x80) ? "on" : "off");
- seq_printf(seq, "SCSI host ID : ");
- if (nvram[16] & 0x80)
- seq_printf(seq, "%d\n", nvram[16] & 7);
- else
- seq_printf(seq, "n/a\n");
-
- /* the following entries are defined only for the Falcon */
- if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON)
- return;
- seq_printf(seq, "OS language : ");
- if (nvram[6] < ARRAY_SIZE(languages))
- seq_printf(seq, "%s\n", languages[nvram[6]]);
- else
- seq_printf(seq, "%u (undefined)\n", nvram[6]);
- seq_printf(seq, "Keyboard language: ");
- if (nvram[7] < ARRAY_SIZE(languages))
- seq_printf(seq, "%s\n", languages[nvram[7]]);
- else
- seq_printf(seq, "%u (undefined)\n", nvram[7]);
- seq_printf(seq, "Date format : ");
- seq_printf(seq, dateformat[nvram[8] & 7],
- nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
- seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
- seq_printf(seq, "Boot delay : ");
- if (nvram[10] == 0)
- seq_printf(seq, "default");
- else
- seq_printf(seq, "%ds%s\n", nvram[10],
- nvram[10] < 8 ? ", no memory test" : "");
-
- vmode = (nvram[14] << 8) | nvram[15];
- seq_printf(seq,
- "Video mode : %s colors, %d columns, %s %s monitor\n",
- colors[vmode & 7],
- vmode & 8 ? 80 : 40,
- vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
- seq_printf(seq, " %soverscan, compat. mode %s%s\n",
- vmode & 64 ? "" : "no ",
- vmode & 128 ? "on" : "off",
- vmode & 256 ?
- (vmode & 16 ? ", line doubling" : ", half screen") : "");
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
+ pr_err("nvram: can't create /proc/driver/nvram\n");
+ misc_deregister(&nvram_misc);
+ return -ENOMEM;
+ }
+#endif
- return;
+ pr_info("Non-volatile memory driver v" NVRAM_VERSION "\n");
+ return 0;
}
+
+static void __exit nvram_module_exit(void)
+{
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ remove_proc_entry("driver/nvram", NULL);
#endif
+ misc_deregister(&nvram_misc);
+}
-#endif /* MACH == ATARI */
+module_init(nvram_module_init);
+module_exit(nvram_module_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
+MODULE_ALIAS("devname:nvram");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e5b2fe80eab4..d2f0bb5ba47e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -293,7 +293,6 @@ config COMMON_CLK_BD718XX
source "drivers/clk/actions/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
-source "drivers/clk/imx/Kconfig"
source "drivers/clk/imgtec/Kconfig"
source "drivers/clk/imx/Kconfig"
source "drivers/clk/ingenic/Kconfig"
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 2fe225a697df..3487e03d4bc6 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
- nck(at91sam9x5_systemck),
- nck(at91sam9x35_periphck), 0);
+ nck(at91sam9x5_systemck), 31, 0);
if (!at91sam9x5_pmc)
return;
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 2; i++) {
char name[6];
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index d69ad96fe988..cd0ef7274fdb 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index e358be7f6c8d..b645a9d59cdb 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 5b393e711e94..7d16ab0784ec 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
src = VC5_PRIM_SRC_SHDN_EN_XTAL;
- if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
+ else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
+ else /* Invalid; should have been caught by vc5_probe() */
+ return -EINVAL;
}
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 75d13c0eff12..d2477a5058ac 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core,
if (!parent)
return -EINVAL;
- for (i = 0; i < core->num_parents; i++)
- if (clk_core_get_parent_by_index(core, i) == parent)
+ for (i = 0; i < core->num_parents; i++) {
+ if (core->parents[i] == parent)
+ return i;
+
+ if (core->parents[i])
+ continue;
+
+ /* Fallback to comparing globally unique names */
+ if (!strcmp(parent->name, core->parent_names[i])) {
+ core->parents[i] = parent;
return i;
+ }
+ }
return -EINVAL;
}
@@ -2779,7 +2789,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
- seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+ seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 0026c3969b1e..76b9eb15604e 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_frac_pll *pll = to_clk_frac_pll(hw);
u32 val, divfi, divff;
- u64 temp64 = parent_rate;
+ u64 temp64;
int ret;
parent_rate *= 8;
rate *= 2;
divfi = rate / parent_rate;
- temp64 *= rate - divfi;
+ temp64 = parent_rate * divfi;
+ temp64 = rate - temp64;
temp64 *= PLL_FRAC_DENOM;
do_div(temp64, parent_rate);
divff = temp64;
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index 99c2508de8e5..fb6edf1b8aa2 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
base = devm_ioremap(dev, res->start, resource_size(res));
if (!base)
return -ENOMEM;
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 61fefc046ec5..d083b860f083 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,7 +53,6 @@
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
-#define APMU_SP 0x68
#define MPMU_UART_PLL 0x14
struct mmp2_clk_unit {
@@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
};
-static DEFINE_SPINLOCK(sp_lock);
-
static struct mmp_param_mux_clk apmu_mux_clks[] = {
{MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
{MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
@@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
- {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock},
};
static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1b1ba54e33dd..1c04575c118f 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -215,6 +215,7 @@ config MSM_MMCC_8996
config MSM_GCC_8998
tristate "MSM8998 Global Clock Controller"
+ select QCOM_GDSC
help
Support for the global clock controller on msm8998 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index c782e62dd98b..58fa5c247af1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
"core_bi_pll_test_se",
};
-static const char * const gcc_parent_names_7[] = {
- "bi_tcxo",
+static const char * const gcc_parent_names_7_ao[] = {
+ "bi_tcxo_ao",
"gpll0",
"gpll0_out_even",
"core_bi_pll_test_se",
@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
"core_bi_pll_test_se",
};
+static const char * const gcc_parent_names_8_ao[] = {
+ "bi_tcxo_ao",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
static const struct parent_map gcc_parent_map_10[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_ahb_clk_src",
- .parent_names = gcc_parent_names_7,
+ .parent_names = gcc_parent_names_7_ao,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_rbcpr_clk_src",
- .parent_names = gcc_parent_names_8,
+ .parent_names = gcc_parent_names_8_ao,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 2d5d8b43727e..c4d0b6f6abf2 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
/* Read mdiv and fdiv from the fdbck register */
reg = readl(socfpgaclk->hw.reg + 0x4);
mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
- vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
+ vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
return (unsigned long)vco_freq;
}
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 5b238fc314ac..8281dfbf38c2 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,17 +12,17 @@
#include "stratix10-clk.h"
-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
- "f2s_free_clk",};
+static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",};
static const char * const cntr_mux[] = { "main_pll", "periph_pll",
- "osc1", "cb_intosc_hs_div2_clk",
- "f2s_free_clk"};
-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
+ "osc1", "cb-intosc-hs-div2-clk",
+ "f2s-free-clk"};
+static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
static const char * const noc_free_mux[] = {"main_noc_base_clk",
"peri_noc_base_clk",
- "osc1", "cb_intosc_hs_div2_clk",
- "f2s_free_clk"};
+ "osc1", "cb-intosc-hs-div2-clk",
+ "f2s-free-clk"};
static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
+static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
"peri_mpu_base_clk",
- "osc1", "cb_intosc_hs_div2_clk",
- "f2s_free_clk"};
+ "osc1", "cb-intosc-hs-div2-clk",
+ "f2s-free-clk"};
/* clocks in AO (always on) controller */
static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 3b97f60540ad..609970c0b666 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
0x060, BIT(10), 0);
static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
- 0x060, BIT(12), 0);
+ 0x060, BIT(11), 0);
static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
- 0x060, BIT(13), 0);
+ 0x060, BIT(12), 0);
static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
0x060, BIT(13), 0);
static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 621b1cd996db..ac12f261f8ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
[RST_BUS_VE] = { 0x2c4, BIT(0) },
- [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
+ [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
[RST_BUS_CSI] = { 0x2c4, BIT(8) },
[RST_BUS_DE] = { 0x2c4, BIT(12) },
[RST_BUS_DBG] = { 0x2c4, BIT(31) },
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 7ddacae5d0b1..1adcccfa7829 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -5,3 +5,8 @@ config TEGRA_CLK_EMC
config CLK_TEGRA_BPMP
def_bool y
depends on TEGRA_BPMP
+
+config TEGRA_CLK_DFLL
+ depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+ select PM_OPP
+ def_bool y
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 6507acc843c7..4812e45c2214 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
-obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124-dfll-fcpu.o
+obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 609e363dabf8..0400e5b1d627 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1,7 +1,7 @@
/*
* clk-dfll.c - Tegra DFLL clock source common code
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -47,6 +47,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -243,6 +244,12 @@ enum dfll_tune_range {
DFLL_TUNE_LOW = 1,
};
+
+enum tegra_dfll_pmu_if {
+ TEGRA_DFLL_PMU_I2C = 0,
+ TEGRA_DFLL_PMU_PWM = 1,
+};
+
/**
* struct dfll_rate_req - target DFLL rate request data
* @rate: target frequency, after the postscaling
@@ -300,10 +307,19 @@ struct tegra_dfll {
u32 i2c_reg;
u32 i2c_slave_addr;
- /* i2c_lut array entries are regulator framework selectors */
- unsigned i2c_lut[MAX_DFLL_VOLTAGES];
- int i2c_lut_size;
- u8 lut_min, lut_max, lut_safe;
+ /* lut array entries are regulator framework selectors or PWM values*/
+ unsigned lut[MAX_DFLL_VOLTAGES];
+ unsigned long lut_uv[MAX_DFLL_VOLTAGES];
+ int lut_size;
+ u8 lut_bottom, lut_min, lut_max, lut_safe;
+
+ /* PWM interface */
+ enum tegra_dfll_pmu_if pmu_if;
+ unsigned long pwm_rate;
+ struct pinctrl *pwm_pin;
+ struct pinctrl_state *pwm_enable_state;
+ struct pinctrl_state *pwm_disable_state;
+ u32 reg_init_uV;
};
#define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw)
@@ -490,6 +506,34 @@ static void dfll_set_mode(struct tegra_dfll *td,
}
/*
+ * DVCO rate control
+ */
+
+static unsigned long get_dvco_rate_below(struct tegra_dfll *td, u8 out_min)
+{
+ struct dev_pm_opp *opp;
+ unsigned long rate, prev_rate;
+ unsigned long uv, min_uv;
+
+ min_uv = td->lut_uv[out_min];
+ for (rate = 0, prev_rate = 0; ; rate++) {
+ opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ uv = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (uv && uv > min_uv)
+ return prev_rate;
+
+ prev_rate = rate;
+ }
+
+ return prev_rate;
+}
+
+/*
* DFLL-to-I2C controller interface
*/
@@ -518,6 +562,118 @@ static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable)
return 0;
}
+
+/*
+ * DFLL-to-PWM controller interface
+ */
+
+/**
+ * dfll_pwm_set_output_enabled - enable/disable PWM voltage requests
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the PWM voltage requests
+ *
+ * Set the master enable control for PWM control value updates. If disabled,
+ * then the PWM signal is not driven. Also configure the PWM output pad
+ * to the appropriate state.
+ */
+static int dfll_pwm_set_output_enabled(struct tegra_dfll *td, bool enable)
+{
+ int ret;
+ u32 val, div;
+
+ if (enable) {
+ ret = pinctrl_select_state(td->pwm_pin, td->pwm_enable_state);
+ if (ret < 0) {
+ dev_err(td->dev, "setting enable state failed\n");
+ return -EINVAL;
+ }
+ val = dfll_readl(td, DFLL_OUTPUT_CFG);
+ val &= ~DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+ div = DIV_ROUND_UP(td->ref_rate, td->pwm_rate);
+ val |= (div << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT)
+ & DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+
+ val |= DFLL_OUTPUT_CFG_PWM_ENABLE;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+ } else {
+ ret = pinctrl_select_state(td->pwm_pin, td->pwm_disable_state);
+ if (ret < 0)
+ dev_warn(td->dev, "setting disable state failed\n");
+
+ val = dfll_readl(td, DFLL_OUTPUT_CFG);
+ val &= ~DFLL_OUTPUT_CFG_PWM_ENABLE;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+ }
+
+ return 0;
+}
+
+/**
+ * dfll_set_force_output_value - set fixed value for force output
+ * @td: DFLL instance
+ * @out_val: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value when
+ * force output is enabled.
+ */
+static u32 dfll_set_force_output_value(struct tegra_dfll *td, u8 out_val)
+{
+ u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+ val = (val & DFLL_OUTPUT_FORCE_ENABLE) | (out_val & OUT_MASK);
+ dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+ dfll_wmb(td);
+
+ return dfll_readl(td, DFLL_OUTPUT_FORCE);
+}
+
+/**
+ * dfll_set_force_output_enabled - enable/disable force output
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the force output
+ *
+ * Set the enable control for fouce output with fixed value.
+ */
+static void dfll_set_force_output_enabled(struct tegra_dfll *td, bool enable)
+{
+ u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+ if (enable)
+ val |= DFLL_OUTPUT_FORCE_ENABLE;
+ else
+ val &= ~DFLL_OUTPUT_FORCE_ENABLE;
+
+ dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+ dfll_wmb(td);
+}
+
+/**
+ * dfll_force_output - force output a fixed value
+ * @td: DFLL instance
+ * @out_sel: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value.
+ */
+static int dfll_force_output(struct tegra_dfll *td, unsigned int out_sel)
+{
+ u32 val;
+
+ if (out_sel > OUT_MASK)
+ return -EINVAL;
+
+ val = dfll_set_force_output_value(td, out_sel);
+ if ((td->mode < DFLL_CLOSED_LOOP) &&
+ !(val & DFLL_OUTPUT_FORCE_ENABLE)) {
+ dfll_set_force_output_enabled(td, true);
+ }
+
+ return 0;
+}
+
/**
* dfll_load_lut - load the voltage lookup table
* @td: struct tegra_dfll *
@@ -539,7 +695,7 @@ static void dfll_load_i2c_lut(struct tegra_dfll *td)
lut_index = i;
val = regulator_list_hardware_vsel(td->vdd_reg,
- td->i2c_lut[lut_index]);
+ td->lut[lut_index]);
__raw_writel(val, td->lut_base + i * 4);
}
@@ -594,24 +750,41 @@ static void dfll_init_out_if(struct tegra_dfll *td)
{
u32 val;
- td->lut_min = 0;
- td->lut_max = td->i2c_lut_size - 1;
- td->lut_safe = td->lut_min + 1;
+ td->lut_min = td->lut_bottom;
+ td->lut_max = td->lut_size - 1;
+ td->lut_safe = td->lut_min + (td->lut_min < td->lut_max ? 1 : 0);
+
+ /* clear DFLL_OUTPUT_CFG before setting new value */
+ dfll_writel(td, 0, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
- dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG);
val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) |
- (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
- (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
- dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
- dfll_i2c_wmb(td);
+ (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
+ (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
dfll_writel(td, 0, DFLL_OUTPUT_FORCE);
dfll_i2c_writel(td, 0, DFLL_INTR_EN);
dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK,
DFLL_INTR_STS);
- dfll_load_i2c_lut(td);
- dfll_init_i2c_if(td);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM) {
+ u32 vinit = td->reg_init_uV;
+ int vstep = td->soc->alignment.step_uv;
+ unsigned long vmin = td->lut_uv[0];
+
+ /* set initial voltage */
+ if ((vinit >= vmin) && vstep) {
+ unsigned int vsel;
+
+ vsel = DIV_ROUND_UP((vinit - vmin), vstep);
+ dfll_force_output(td, vsel);
+ }
+ } else {
+ dfll_load_i2c_lut(td);
+ dfll_init_i2c_if(td);
+ }
}
/*
@@ -631,17 +804,17 @@ static void dfll_init_out_if(struct tegra_dfll *td)
static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
{
struct dev_pm_opp *opp;
- int i, uv;
+ int i, align_step;
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
if (IS_ERR(opp))
return PTR_ERR(opp);
- uv = dev_pm_opp_get_voltage(opp);
+ align_step = dev_pm_opp_get_voltage(opp) / td->soc->alignment.step_uv;
dev_pm_opp_put(opp);
- for (i = 0; i < td->i2c_lut_size; i++) {
- if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
+ for (i = td->lut_bottom; i < td->lut_size; i++) {
+ if ((td->lut_uv[i] / td->soc->alignment.step_uv) >= align_step)
return i;
}
@@ -863,9 +1036,14 @@ static int dfll_lock(struct tegra_dfll *td)
return -EINVAL;
}
- dfll_i2c_set_output_enabled(td, true);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ dfll_pwm_set_output_enabled(td, true);
+ else
+ dfll_i2c_set_output_enabled(td, true);
+
dfll_set_mode(td, DFLL_CLOSED_LOOP);
dfll_set_frequency_request(td, req);
+ dfll_set_force_output_enabled(td, false);
return 0;
default:
@@ -889,7 +1067,10 @@ static int dfll_unlock(struct tegra_dfll *td)
case DFLL_CLOSED_LOOP:
dfll_set_open_loop_config(td);
dfll_set_mode(td, DFLL_OPEN_LOOP);
- dfll_i2c_set_output_enabled(td, false);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ dfll_pwm_set_output_enabled(td, false);
+ else
+ dfll_i2c_set_output_enabled(td, false);
return 0;
case DFLL_OPEN_LOOP:
@@ -1171,15 +1352,17 @@ static int attr_registers_show(struct seq_file *s, void *data)
seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
dfll_i2c_readl(td, offs));
- seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
- offs = DFLL_I2C_CLK_DIVISOR;
- seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
- __raw_readl(td->i2c_controller_base + offs));
-
- seq_puts(s, "\nLUT:\n");
- for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4)
+ if (td->pmu_if == TEGRA_DFLL_PMU_I2C) {
+ seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
+ offs = DFLL_I2C_CLK_DIVISOR;
seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
- __raw_readl(td->lut_base + offs));
+ __raw_readl(td->i2c_controller_base + offs));
+
+ seq_puts(s, "\nLUT:\n");
+ for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4)
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+ __raw_readl(td->lut_base + offs));
+ }
return 0;
}
@@ -1349,15 +1532,21 @@ di_err1:
*/
static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
{
- int i, n_voltages, reg_uV;
+ int i, n_voltages, reg_uV,reg_volt_id, align_step;
+
+ if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+ return -EINVAL;
+ align_step = uV / td->soc->alignment.step_uv;
n_voltages = regulator_count_voltages(td->vdd_reg);
for (i = 0; i < n_voltages; i++) {
reg_uV = regulator_list_voltage(td->vdd_reg, i);
if (reg_uV < 0)
break;
- if (uV == reg_uV)
+ reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+ if (align_step == reg_volt_id)
return i;
}
@@ -1371,15 +1560,21 @@ static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
* */
static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
{
- int i, n_voltages, reg_uV;
+ int i, n_voltages, reg_uV, reg_volt_id, align_step;
+ if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+ return -EINVAL;
+
+ align_step = uV / td->soc->alignment.step_uv;
n_voltages = regulator_count_voltages(td->vdd_reg);
for (i = 0; i < n_voltages; i++) {
reg_uV = regulator_list_voltage(td->vdd_reg, i);
if (reg_uV < 0)
break;
- if (uV <= reg_uV)
+ reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+ if (align_step <= reg_volt_id)
return i;
}
@@ -1387,9 +1582,61 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
return -EINVAL;
}
+/*
+ * dfll_build_pwm_lut - build the PWM regulator lookup table
+ * @td: DFLL instance
+ * @v_max: Vmax from OPP table
+ *
+ * Look-up table in h/w is ignored when PWM is used as DFLL interface to PMIC.
+ * In this case closed loop output is controlling duty cycle directly. The s/w
+ * look-up that maps PWM duty cycle to voltage is still built by this function.
+ */
+static int dfll_build_pwm_lut(struct tegra_dfll *td, unsigned long v_max)
+{
+ int i;
+ unsigned long rate, reg_volt;
+ u8 lut_bottom = MAX_DFLL_VOLTAGES;
+ int v_min = td->soc->cvb->min_millivolts * 1000;
+
+ for (i = 0; i < MAX_DFLL_VOLTAGES; i++) {
+ reg_volt = td->lut_uv[i];
+
+ /* since opp voltage is exact mv */
+ reg_volt = (reg_volt / 1000) * 1000;
+ if (reg_volt > v_max)
+ break;
+
+ td->lut[i] = i;
+ if ((lut_bottom == MAX_DFLL_VOLTAGES) && (reg_volt >= v_min))
+ lut_bottom = i;
+ }
+
+ /* determine voltage boundaries */
+ td->lut_size = i;
+ if ((lut_bottom == MAX_DFLL_VOLTAGES) ||
+ (lut_bottom + 1 >= td->lut_size)) {
+ dev_err(td->dev, "no voltage above DFLL minimum %d mV\n",
+ td->soc->cvb->min_millivolts);
+ return -EINVAL;
+ }
+ td->lut_bottom = lut_bottom;
+
+ /* determine rate boundaries */
+ rate = get_dvco_rate_below(td, td->lut_bottom);
+ if (!rate) {
+ dev_err(td->dev, "no opp below DFLL minimum voltage %d mV\n",
+ td->soc->cvb->min_millivolts);
+ return -EINVAL;
+ }
+ td->dvco_rate_min = rate;
+
+ return 0;
+}
+
/**
* dfll_build_i2c_lut - build the I2C voltage register lookup table
* @td: DFLL instance
+ * @v_max: Vmax from OPP table
*
* The DFLL hardware has 33 bytes of look-up table RAM that must be filled with
* PMIC voltage register values that span the entire DFLL operating range.
@@ -1397,33 +1644,24 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
* the soc-specific platform driver (td->soc->opp_dev) and the PMIC
* register-to-voltage mapping queried from the regulator framework.
*
- * On success, fills in td->i2c_lut and returns 0, or -err on failure.
+ * On success, fills in td->lut and returns 0, or -err on failure.
*/
-static int dfll_build_i2c_lut(struct tegra_dfll *td)
+static int dfll_build_i2c_lut(struct tegra_dfll *td, unsigned long v_max)
{
+ unsigned long rate, v, v_opp;
int ret = -EINVAL;
- int j, v, v_max, v_opp;
- int selector;
- unsigned long rate;
- struct dev_pm_opp *opp;
- int lut;
-
- rate = ULONG_MAX;
- opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
- if (IS_ERR(opp)) {
- dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
- goto out;
- }
- v_max = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
+ int j, selector, lut;
v = td->soc->cvb->min_millivolts * 1000;
lut = find_vdd_map_entry_exact(td, v);
if (lut < 0)
goto out;
- td->i2c_lut[0] = lut;
+ td->lut[0] = lut;
+ td->lut_bottom = 0;
for (j = 1, rate = 0; ; rate++) {
+ struct dev_pm_opp *opp;
+
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
if (IS_ERR(opp))
break;
@@ -1435,39 +1673,64 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
dev_pm_opp_put(opp);
for (;;) {
- v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
+ v += max(1UL, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
if (v >= v_opp)
break;
selector = find_vdd_map_entry_min(td, v);
if (selector < 0)
goto out;
- if (selector != td->i2c_lut[j - 1])
- td->i2c_lut[j++] = selector;
+ if (selector != td->lut[j - 1])
+ td->lut[j++] = selector;
}
v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp;
selector = find_vdd_map_entry_exact(td, v);
if (selector < 0)
goto out;
- if (selector != td->i2c_lut[j - 1])
- td->i2c_lut[j++] = selector;
+ if (selector != td->lut[j - 1])
+ td->lut[j++] = selector;
if (v >= v_max)
break;
}
- td->i2c_lut_size = j;
+ td->lut_size = j;
if (!td->dvco_rate_min)
dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n",
td->soc->cvb->min_millivolts);
- else
+ else {
ret = 0;
+ for (j = 0; j < td->lut_size; j++)
+ td->lut_uv[j] =
+ regulator_list_voltage(td->vdd_reg,
+ td->lut[j]);
+ }
out:
return ret;
}
+static int dfll_build_lut(struct tegra_dfll *td)
+{
+ unsigned long rate, v_max;
+ struct dev_pm_opp *opp;
+
+ rate = ULONG_MAX;
+ opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
+ if (IS_ERR(opp)) {
+ dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
+ return -EINVAL;
+ }
+ v_max = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ return dfll_build_pwm_lut(td, v_max);
+ else
+ return dfll_build_i2c_lut(td, v_max);
+}
+
/**
* read_dt_param - helper function for reading required parameters from the DT
* @td: DFLL instance
@@ -1526,11 +1789,56 @@ static int dfll_fetch_i2c_params(struct tegra_dfll *td)
}
td->i2c_reg = vsel_reg;
- ret = dfll_build_i2c_lut(td);
- if (ret) {
- dev_err(td->dev, "couldn't build I2C LUT\n");
+ return 0;
+}
+
+static int dfll_fetch_pwm_params(struct tegra_dfll *td)
+{
+ int ret, i;
+ u32 pwm_period;
+
+ if (!td->soc->alignment.step_uv || !td->soc->alignment.offset_uv) {
+ dev_err(td->dev,
+ "Missing step or alignment info for PWM regulator");
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_DFLL_VOLTAGES; i++)
+ td->lut_uv[i] = td->soc->alignment.offset_uv +
+ i * td->soc->alignment.step_uv;
+
+ ret = read_dt_param(td, "nvidia,pwm-tristate-microvolts",
+ &td->reg_init_uV);
+ if (!ret) {
+ dev_err(td->dev, "couldn't get initialized voltage\n");
+ return ret;
+ }
+
+ ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
+ if (!ret) {
+ dev_err(td->dev, "couldn't get PWM period\n");
return ret;
}
+ td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
+
+ td->pwm_pin = devm_pinctrl_get(td->dev);
+ if (IS_ERR(td->pwm_pin)) {
+ dev_err(td->dev, "DT: missing pinctrl device\n");
+ return PTR_ERR(td->pwm_pin);
+ }
+
+ td->pwm_enable_state = pinctrl_lookup_state(td->pwm_pin,
+ "dvfs_pwm_enable");
+ if (IS_ERR(td->pwm_enable_state)) {
+ dev_err(td->dev, "DT: missing pwm enabled state\n");
+ return PTR_ERR(td->pwm_enable_state);
+ }
+
+ td->pwm_disable_state = pinctrl_lookup_state(td->pwm_pin,
+ "dvfs_pwm_disable");
+ if (IS_ERR(td->pwm_disable_state)) {
+ dev_err(td->dev, "DT: missing pwm disabled state\n");
+ return PTR_ERR(td->pwm_disable_state);
+ }
return 0;
}
@@ -1597,16 +1905,6 @@ int tegra_dfll_register(struct platform_device *pdev,
td->soc = soc;
- td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
- if (IS_ERR(td->vdd_reg)) {
- ret = PTR_ERR(td->vdd_reg);
- if (ret != -EPROBE_DEFER)
- dev_err(td->dev, "couldn't get vdd_cpu regulator: %d\n",
- ret);
-
- return ret;
- }
-
td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
if (IS_ERR(td->dvco_rst)) {
dev_err(td->dev, "couldn't get dvco reset\n");
@@ -1619,10 +1917,27 @@ int tegra_dfll_register(struct platform_device *pdev,
return ret;
}
- ret = dfll_fetch_i2c_params(td);
+ if (of_property_read_bool(td->dev->of_node, "nvidia,pwm-to-pmic")) {
+ td->pmu_if = TEGRA_DFLL_PMU_PWM;
+ ret = dfll_fetch_pwm_params(td);
+ } else {
+ td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
+ if (IS_ERR(td->vdd_reg)) {
+ dev_err(td->dev, "couldn't get vdd_cpu regulator\n");
+ return PTR_ERR(td->vdd_reg);
+ }
+ td->pmu_if = TEGRA_DFLL_PMU_I2C;
+ ret = dfll_fetch_i2c_params(td);
+ }
if (ret)
return ret;
+ ret = dfll_build_lut(td);
+ if (ret) {
+ dev_err(td->dev, "couldn't build LUT\n");
+ return ret;
+ }
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(td->dev, "no control register resource\n");
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index 83352c8078f2..85d0d95223f3 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -1,6 +1,6 @@
/*
* clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver
- * Copyright (C) 2013 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2013-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -22,11 +22,14 @@
#include <linux/reset.h>
#include <linux/types.h>
+#include "cvb.h"
+
/**
* struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver
* @dev: struct device * that holds the OPP table for the DFLL
* @max_freq: maximum frequency supported on this SoC
* @cvb: CPU frequency table for this SoC
+ * @alignment: parameters of the regulator step and offset
* @init_clock_trimmers: callback to initialize clock trimmers
* @set_clock_trimmers_high: callback to tune clock trimmers for high voltage
* @set_clock_trimmers_low: callback to tune clock trimmers for low voltage
@@ -35,6 +38,7 @@ struct tegra_dfll_soc_data {
struct device *dev;
unsigned long max_freq;
const struct cvb_table *cvb;
+ struct rail_alignment alignment;
void (*init_clock_trimmers)(void);
void (*set_clock_trimmers_high)(void);
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 269d3595758b..e8ec42bf8638 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -1,7 +1,7 @@
/*
* Tegra124 DFLL FCPU clock source driver
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -21,15 +21,24 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <soc/tegra/fuse.h>
#include "clk.h"
#include "clk-dfll.h"
#include "cvb.h"
+struct dfll_fcpu_data {
+ const unsigned long *cpu_max_freq_table;
+ unsigned int cpu_max_freq_table_size;
+ const struct cvb_table *cpu_cvb_tables;
+ unsigned int cpu_cvb_tables_size;
+};
+
/* Maximum CPU frequency, indexed by CPU speedo id */
-static const unsigned long cpu_max_freq_table[] = {
+static const unsigned long tegra124_cpu_max_freq_table[] = {
[0] = 2014500000UL,
[1] = 2320500000UL,
[2] = 2116500000UL,
@@ -42,9 +51,6 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
.process_id = -1,
.min_millivolts = 900,
.max_millivolts = 1260,
- .alignment = {
- .step_uv = 10000, /* 10mV */
- },
.speedo_scale = 100,
.voltage_scale = 1000,
.entries = {
@@ -82,16 +88,493 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
},
};
+static const unsigned long tegra210_cpu_max_freq_table[] = {
+ [0] = 1912500000UL,
+ [1] = 1912500000UL,
+ [2] = 2218500000UL,
+ [3] = 1785000000UL,
+ [4] = 1632000000UL,
+ [5] = 1912500000UL,
+ [6] = 2014500000UL,
+ [7] = 1734000000UL,
+ [8] = 1683000000UL,
+ [9] = 1555500000UL,
+ [10] = 1504500000UL,
+};
+
+#define CPU_CVB_TABLE \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 1007452, -23865, 370 } }, \
+ { 306000000UL, { 1052709, -24875, 370 } }, \
+ { 408000000UL, { 1099069, -25895, 370 } }, \
+ { 510000000UL, { 1146534, -26905, 370 } }, \
+ { 612000000UL, { 1195102, -27915, 370 } }, \
+ { 714000000UL, { 1244773, -28925, 370 } }, \
+ { 816000000UL, { 1295549, -29935, 370 } }, \
+ { 918000000UL, { 1347428, -30955, 370 } }, \
+ { 1020000000UL, { 1400411, -31965, 370 } }, \
+ { 1122000000UL, { 1454497, -32975, 370 } }, \
+ { 1224000000UL, { 1509687, -33985, 370 } }, \
+ { 1326000000UL, { 1565981, -35005, 370 } }, \
+ { 1428000000UL, { 1623379, -36015, 370 } }, \
+ { 1530000000UL, { 1681880, -37025, 370 } }, \
+ { 1632000000UL, { 1741485, -38035, 370 } }, \
+ { 1734000000UL, { 1802194, -39055, 370 } }, \
+ { 1836000000UL, { 1864006, -40065, 370 } }, \
+ { 1912500000UL, { 1910780, -40815, 370 } }, \
+ { 2014500000UL, { 1227000, 0, 0 } }, \
+ { 2218500000UL, { 1227000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_XA \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 1250024, -39785, 565 } }, \
+ { 306000000UL, { 1297556, -41145, 565 } }, \
+ { 408000000UL, { 1346718, -42505, 565 } }, \
+ { 510000000UL, { 1397511, -43855, 565 } }, \
+ { 612000000UL, { 1449933, -45215, 565 } }, \
+ { 714000000UL, { 1503986, -46575, 565 } }, \
+ { 816000000UL, { 1559669, -47935, 565 } }, \
+ { 918000000UL, { 1616982, -49295, 565 } }, \
+ { 1020000000UL, { 1675926, -50645, 565 } }, \
+ { 1122000000UL, { 1736500, -52005, 565 } }, \
+ { 1224000000UL, { 1798704, -53365, 565 } }, \
+ { 1326000000UL, { 1862538, -54725, 565 } }, \
+ { 1428000000UL, { 1928003, -56085, 565 } }, \
+ { 1530000000UL, { 1995097, -57435, 565 } }, \
+ { 1606500000UL, { 2046149, -58445, 565 } }, \
+ { 1632000000UL, { 2063822, -58795, 565 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM1 \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 734429, 0, 0 } }, \
+ { 306000000UL, { 768191, 0, 0 } }, \
+ { 408000000UL, { 801953, 0, 0 } }, \
+ { 510000000UL, { 835715, 0, 0 } }, \
+ { 612000000UL, { 869477, 0, 0 } }, \
+ { 714000000UL, { 903239, 0, 0 } }, \
+ { 816000000UL, { 937001, 0, 0 } }, \
+ { 918000000UL, { 970763, 0, 0 } }, \
+ { 1020000000UL, { 1004525, 0, 0 } }, \
+ { 1122000000UL, { 1038287, 0, 0 } }, \
+ { 1224000000UL, { 1072049, 0, 0 } }, \
+ { 1326000000UL, { 1105811, 0, 0 } }, \
+ { 1428000000UL, { 1130000, 0, 0 } }, \
+ { 1555500000UL, { 1130000, 0, 0 } }, \
+ { 1632000000UL, { 1170000, 0, 0 } }, \
+ { 1734000000UL, { 1227500, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM2 \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 742283, 0, 0 } }, \
+ { 306000000UL, { 776249, 0, 0 } }, \
+ { 408000000UL, { 810215, 0, 0 } }, \
+ { 510000000UL, { 844181, 0, 0 } }, \
+ { 612000000UL, { 878147, 0, 0 } }, \
+ { 714000000UL, { 912113, 0, 0 } }, \
+ { 816000000UL, { 946079, 0, 0 } }, \
+ { 918000000UL, { 980045, 0, 0 } }, \
+ { 1020000000UL, { 1014011, 0, 0 } }, \
+ { 1122000000UL, { 1047977, 0, 0 } }, \
+ { 1224000000UL, { 1081943, 0, 0 } }, \
+ { 1326000000UL, { 1090000, 0, 0 } }, \
+ { 1479000000UL, { 1090000, 0, 0 } }, \
+ { 1555500000UL, { 1162000, 0, 0 } }, \
+ { 1683000000UL, { 1195000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 742283, 0, 0 } }, \
+ { 306000000UL, { 776249, 0, 0 } }, \
+ { 408000000UL, { 810215, 0, 0 } }, \
+ { 510000000UL, { 844181, 0, 0 } }, \
+ { 612000000UL, { 878147, 0, 0 } }, \
+ { 714000000UL, { 912113, 0, 0 } }, \
+ { 816000000UL, { 946079, 0, 0 } }, \
+ { 918000000UL, { 980045, 0, 0 } }, \
+ { 1020000000UL, { 1014011, 0, 0 } }, \
+ { 1122000000UL, { 1047977, 0, 0 } }, \
+ { 1224000000UL, { 1081943, 0, 0 } }, \
+ { 1326000000UL, { 1090000, 0, 0 } }, \
+ { 1479000000UL, { 1090000, 0, 0 } }, \
+ { 1504500000UL, { 1120000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_ODN \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 721094, 0, 0 } }, \
+ { 306000000UL, { 754040, 0, 0 } }, \
+ { 408000000UL, { 786986, 0, 0 } }, \
+ { 510000000UL, { 819932, 0, 0 } }, \
+ { 612000000UL, { 852878, 0, 0 } }, \
+ { 714000000UL, { 885824, 0, 0 } }, \
+ { 816000000UL, { 918770, 0, 0 } }, \
+ { 918000000UL, { 915716, 0, 0 } }, \
+ { 1020000000UL, { 984662, 0, 0 } }, \
+ { 1122000000UL, { 1017608, 0, 0 } }, \
+ { 1224000000UL, { 1050554, 0, 0 } }, \
+ { 1326000000UL, { 1083500, 0, 0 } }, \
+ { 1428000000UL, { 1116446, 0, 0 } }, \
+ { 1581000000UL, { 1130000, 0, 0 } }, \
+ { 1683000000UL, { 1168000, 0, 0 } }, \
+ { 1785000000UL, { 1227500, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+static struct cvb_table tegra210_cpu_cvb_tables[] = {
+ {
+ .speedo_id = 10,
+ .process_id = 0,
+ .min_millivolts = 840,
+ .max_millivolts = 1120,
+ CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 10,
+ .process_id = 1,
+ .min_millivolts = 840,
+ .max_millivolts = 1120,
+ CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 9,
+ .process_id = 0,
+ .min_millivolts = 900,
+ .max_millivolts = 1162,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 9,
+ .process_id = 1,
+ .min_millivolts = 900,
+ .max_millivolts = 1162,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 8,
+ .process_id = 0,
+ .min_millivolts = 900,
+ .max_millivolts = 1195,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 8,
+ .process_id = 1,
+ .min_millivolts = 900,
+ .max_millivolts = 1195,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 7,
+ .process_id = 0,
+ .min_millivolts = 841,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_EUCM1,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 7,
+ .process_id = 1,
+ .min_millivolts = 841,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_EUCM1,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 6,
+ .process_id = 0,
+ .min_millivolts = 870,
+ .max_millivolts = 1150,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 6,
+ .process_id = 1,
+ .min_millivolts = 870,
+ .max_millivolts = 1150,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ }
+ },
+ {
+ .speedo_id = 5,
+ .process_id = 0,
+ .min_millivolts = 818,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 5,
+ .process_id = 1,
+ .min_millivolts = 818,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 4,
+ .process_id = -1,
+ .min_millivolts = 918,
+ .max_millivolts = 1113,
+ CPU_CVB_TABLE_XA,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x17711BD,
+ }
+ },
+ {
+ .speedo_id = 3,
+ .process_id = 0,
+ .min_millivolts = 825,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_ODN,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 3,
+ .process_id = 1,
+ .min_millivolts = 825,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_ODN,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 2,
+ .process_id = 0,
+ .min_millivolts = 870,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 2,
+ .process_id = 1,
+ .min_millivolts = 870,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = 0,
+ .min_millivolts = 837,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = 1,
+ .min_millivolts = 837,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 0,
+ .process_id = 0,
+ .min_millivolts = 850,
+ .max_millivolts = 1170,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 0,
+ .process_id = 1,
+ .min_millivolts = 850,
+ .max_millivolts = 1170,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+};
+
+static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra124_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra124_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra124_cpu_cvb_tables)
+};
+
+static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra210_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra210_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra210_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra210_cpu_cvb_tables),
+};
+
+static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
+ {
+ .compatible = "nvidia,tegra124-dfll",
+ .data = &tegra124_dfll_fcpu_data,
+ },
+ {
+ .compatible = "nvidia,tegra210-dfll",
+ .data = &tegra210_dfll_fcpu_data
+ },
+ { },
+};
+
+static void get_alignment_from_dt(struct device *dev,
+ struct rail_alignment *align)
+{
+ if (of_property_read_u32(dev->of_node,
+ "nvidia,pwm-voltage-step-microvolts",
+ &align->step_uv))
+ align->step_uv = 0;
+
+ if (of_property_read_u32(dev->of_node,
+ "nvidia,pwm-min-microvolts",
+ &align->offset_uv))
+ align->offset_uv = 0;
+}
+
+static int get_alignment_from_regulator(struct device *dev,
+ struct rail_alignment *align)
+{
+ struct regulator *reg = devm_regulator_get(dev, "vdd-cpu");
+
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ align->offset_uv = regulator_list_voltage(reg, 0);
+ align->step_uv = regulator_get_linear_step(reg);
+
+ devm_regulator_put(reg);
+
+ return 0;
+}
+
static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
{
int process_id, speedo_id, speedo_value, err;
struct tegra_dfll_soc_data *soc;
+ const struct dfll_fcpu_data *fcpu_data;
+ struct rail_alignment align;
+
+ fcpu_data = of_device_get_match_data(&pdev->dev);
+ if (!fcpu_data)
+ return -ENODEV;
process_id = tegra_sku_info.cpu_process_id;
speedo_id = tegra_sku_info.cpu_speedo_id;
speedo_value = tegra_sku_info.cpu_speedo_value;
- if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) {
+ if (speedo_id >= fcpu_data->cpu_max_freq_table_size) {
dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n",
speedo_id);
return -ENODEV;
@@ -107,12 +590,22 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
return -ENODEV;
}
- soc->max_freq = cpu_max_freq_table[speedo_id];
+ if (of_property_read_bool(pdev->dev.of_node, "nvidia,pwm-to-pmic")) {
+ get_alignment_from_dt(&pdev->dev, &align);
+ } else {
+ err = get_alignment_from_regulator(&pdev->dev, &align);
+ if (err)
+ return err;
+ }
+
+ soc->max_freq = fcpu_data->cpu_max_freq_table[speedo_id];
+
+ soc->cvb = tegra_cvb_add_opp_table(soc->dev, fcpu_data->cpu_cvb_tables,
+ fcpu_data->cpu_cvb_tables_size,
+ &align, process_id, speedo_id,
+ speedo_value, soc->max_freq);
+ soc->alignment = align;
- soc->cvb = tegra_cvb_add_opp_table(soc->dev, tegra124_cpu_cvb_tables,
- ARRAY_SIZE(tegra124_cpu_cvb_tables),
- process_id, speedo_id, speedo_value,
- soc->max_freq);
if (IS_ERR(soc->cvb)) {
dev_err(&pdev->dev, "couldn't add OPP table: %ld\n",
PTR_ERR(soc->cvb));
@@ -133,20 +626,17 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
struct tegra_dfll_soc_data *soc;
soc = tegra_dfll_unregister(pdev);
- if (IS_ERR(soc))
+ if (IS_ERR(soc)) {
dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
PTR_ERR(soc));
+ return PTR_ERR(soc);
+ }
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
return 0;
}
-static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
- { .compatible = "nvidia,tegra124-dfll", },
- { },
-};
-
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
tegra_dfll_runtime_resume, NULL)
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index da9e8e7b5ce5..35eeb6adc68e 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -1,7 +1,7 @@
/*
* Utility functions for parsing Tegra CVB voltage tables
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -62,9 +62,9 @@ static int round_voltage(int mv, const struct rail_alignment *align, int up)
}
static int build_opp_table(struct device *dev, const struct cvb_table *table,
+ struct rail_alignment *align,
int speedo_value, unsigned long max_freq)
{
- const struct rail_alignment *align = &table->alignment;
int i, ret, dfll_mv, min_mv, max_mv;
min_mv = round_voltage(table->min_millivolts, align, UP);
@@ -109,8 +109,9 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
*/
const struct cvb_table *
tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
- size_t count, int process_id, int speedo_id,
- int speedo_value, unsigned long max_freq)
+ size_t count, struct rail_alignment *align,
+ int process_id, int speedo_id, int speedo_value,
+ unsigned long max_freq)
{
size_t i;
int ret;
@@ -124,7 +125,8 @@ tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
if (table->process_id != -1 && table->process_id != process_id)
continue;
- ret = build_opp_table(dev, table, speedo_value, max_freq);
+ ret = build_opp_table(dev, table, align, speedo_value,
+ max_freq);
return ret ? ERR_PTR(ret) : table;
}
diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h
index c1f077993b2a..91a1941c21ef 100644
--- a/drivers/clk/tegra/cvb.h
+++ b/drivers/clk/tegra/cvb.h
@@ -41,6 +41,7 @@ struct cvb_cpu_dfll_data {
u32 tune0_low;
u32 tune0_high;
u32 tune1;
+ unsigned int tune_high_min_millivolts;
};
struct cvb_table {
@@ -49,7 +50,6 @@ struct cvb_table {
int min_millivolts;
int max_millivolts;
- struct rail_alignment alignment;
int speedo_scale;
int voltage_scale;
@@ -59,8 +59,9 @@ struct cvb_table {
const struct cvb_table *
tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *cvb_tables,
- size_t count, int process_id, int speedo_id,
- int speedo_value, unsigned long max_freq);
+ size_t count, struct rail_alignment *align,
+ int process_id, int speedo_id, int speedo_value,
+ unsigned long max_freq);
void tegra_cvb_remove_opp_table(struct device *dev,
const struct cvb_table *table,
unsigned long max_freq);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 8d77090ad94a..0241450f3eb3 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
num_dividers = i;
tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
+ if (!tmp) {
+ *table = ERR_PTR(-ENOMEM);
return -ENOMEM;
+ }
valid_div = 0;
*width = 0;
@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
{
struct clk_omap_divider *div;
struct clk_omap_reg *reg;
+ int ret;
if (!setup)
return NULL;
@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
div->flags |= CLK_DIVIDER_POWER_OF_TWO;
div->table = _get_div_table_from_setup(setup, &div->width);
+ if (IS_ERR(div->table)) {
+ ret = PTR_ERR(div->table);
+ kfree(div);
+ return ERR_PTR(ret);
+ }
+
div->shift = setup->bit_shift;
div->latch = -EINVAL;
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index f65cc0ff76ab..b0908ec62f73 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
if (ret)
return ret;
- zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) *
- clock_max_idx, GFP_KERNEL);
+ zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
+ GFP_KERNEL);
if (!zynqmp_data)
return -ENOMEM;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a9e26f6a81a1..171502a356aa 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -131,7 +131,8 @@ config SUN5I_HSTIMER
config TEGRA_TIMER
bool "Tegra timer driver" if COMPILE_TEST
select CLKSRC_MMIO
- depends on ARM
+ select TIMER_OF
+ depends on ARM || ARM64
help
Enables support for the Tegra driver.
@@ -360,6 +361,16 @@ config ARM64_ERRATUM_858921
The workaround will be dynamically enabled when an affected
core is detected.
+config SUN50I_ERRATUM_UNKNOWN1
+ bool "Workaround for Allwinner A64 erratum UNKNOWN1"
+ default y
+ depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option enables a workaround for instability in the timer on
+ the Allwinner A64 SoC. The workaround will only be active if the
+ allwinner,erratum-unknown1 property is found in the timer node.
+
config ARM_GLOBAL_TIMER
bool "Support for the ARM global timer" if COMPILE_TEST
select TIMER_OF if OF
@@ -634,4 +645,13 @@ config GX6605S_TIMER
help
This option enables support for gx6605s SOC's timer.
+config MILBEAUT_TIMER
+ bool "Milbeaut timer driver" if COMPILE_TEST
+ depends on OF
+ depends on ARM
+ select TIMER_OF
+ select CLKSRC_MMIO
+ help
+ Enables the support for Milbeaut timer driver.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index cdd210ff89ea..be6e0fbc7489 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
-obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += timer-cs5535.o
obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
@@ -29,7 +29,7 @@ obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
-obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
+obj-$(CONFIG_CLKSRC_PXA) += timer-pxa.o
obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
@@ -55,6 +55,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += timer-owl.o
+obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
obj-$(CONFIG_RDA_TIMER) += timer-rda.o
@@ -69,7 +70,7 @@ obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
-obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
+obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9a7d4dc00b6e..a8b20b65bd4b 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -326,6 +326,48 @@ static u64 notrace arm64_1188873_read_cntvct_el0(void)
}
#endif
+#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
+/*
+ * The low bits of the counter registers are indeterminate while bit 10 or
+ * greater is rolling over. Since the counter value can jump both backward
+ * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
+ * with all ones or all zeros in the low bits. Bound the loop by the maximum
+ * number of CPU cycles in 3 consecutive 24 MHz counter periods.
+ */
+#define __sun50i_a64_read_reg(reg) ({ \
+ u64 _val; \
+ int _retries = 150; \
+ \
+ do { \
+ _val = read_sysreg(reg); \
+ _retries--; \
+ } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
+ \
+ WARN_ON_ONCE(!_retries); \
+ _val; \
+})
+
+static u64 notrace sun50i_a64_read_cntpct_el0(void)
+{
+ return __sun50i_a64_read_reg(cntpct_el0);
+}
+
+static u64 notrace sun50i_a64_read_cntvct_el0(void)
+{
+ return __sun50i_a64_read_reg(cntvct_el0);
+}
+
+static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
+{
+ return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
+}
+
+static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
+{
+ return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
+}
+#endif
+
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
@@ -423,6 +465,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
},
#endif
+#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
+ {
+ .match_type = ate_match_dt,
+ .id = "allwinner,erratum-unknown1",
+ .desc = "Allwinner erratum UNKNOWN1",
+ .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
+ .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
+ .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
+ .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
+ .set_next_event_phys = erratum_set_next_event_tval_phys,
+ .set_next_event_virt = erratum_set_next_event_tval_virt,
+ },
+#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 7a244b681876..34bd250d46c6 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -10,14 +10,12 @@
* published by the Free Software Foundation.
*/
-#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
-#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/percpu.h>
#include <linux/of.h>
@@ -388,6 +386,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}
+static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+{
+ /* Clear the MCT tick interrupt */
+ if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+}
+
static int exynos4_tick_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
@@ -404,6 +409,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_stop(mevt);
+ exynos4_mct_tick_clear(mevt);
return 0;
}
@@ -420,8 +426,11 @@ static int set_state_periodic(struct clock_event_device *evt)
return 0;
}
-static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
+ struct mct_clock_event_device *mevt = dev_id;
+ struct clock_event_device *evt = &mevt->evt;
+
/*
* This is for supporting oneshot mode.
* Mct would generate interrupt periodically
@@ -430,16 +439,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
if (!clockevent_state_periodic(&mevt->evt))
exynos4_mct_tick_stop(mevt);
- /* Clear the MCT tick interrupt */
- if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
- exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
-}
-
-static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
-{
- struct mct_clock_event_device *mevt = dev_id;
- struct clock_event_device *evt = &mevt->evt;
-
exynos4_mct_tick_clear(mevt);
evt->event_handler(evt);
@@ -507,13 +506,12 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
int err, cpu;
struct clk *mct_clk, *tick_clk;
- tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
- clk_get(NULL, "fin_pll");
+ tick_clk = of_clk_get_by_name(np, "fin_pll");
if (IS_ERR(tick_clk))
panic("%s: unable to determine tick clock rate\n", __func__);
clk_rate = clk_get_rate(tick_clk);
- mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
+ mct_clk = of_clk_get_by_name(np, "mct");
if (IS_ERR(mct_clk))
panic("%s: unable to retrieve mct clock instance\n", __func__);
clk_prepare_enable(mct_clk);
@@ -562,7 +560,19 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
return 0;
out_irq:
- free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ if (mct_int_type == MCT_INT_PPI) {
+ free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ } else {
+ for_each_possible_cpu(cpu) {
+ struct mct_clock_event_device *pcpu_mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
+
+ if (pcpu_mevt->evt.irq != -1) {
+ free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
+ pcpu_mevt->evt.irq = -1;
+ }
+ }
+ }
return err;
}
@@ -581,11 +591,7 @@ static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
* timer irqs are specified after the four global timer
* irqs are specified.
*/
-#ifdef CONFIG_OF
nr_irqs = of_irq_count(np);
-#else
- nr_irqs = 0;
-#endif
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/timer-cs5535.c
index 1de8cac99a0e..1de8cac99a0e 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/timer-cs5535.c
diff --git a/drivers/clocksource/timer-milbeaut.c b/drivers/clocksource/timer-milbeaut.c
new file mode 100644
index 000000000000..f2019a88e3ee
--- /dev/null
+++ b/drivers/clocksource/timer-milbeaut.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Socionext Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include "timer-of.h"
+
+#define MLB_TMR_TMCSR_OFS 0x0
+#define MLB_TMR_TMR_OFS 0x4
+#define MLB_TMR_TMRLR1_OFS 0x8
+#define MLB_TMR_TMRLR2_OFS 0xc
+#define MLB_TMR_REGSZPCH 0x10
+
+#define MLB_TMR_TMCSR_OUTL BIT(5)
+#define MLB_TMR_TMCSR_RELD BIT(4)
+#define MLB_TMR_TMCSR_INTE BIT(3)
+#define MLB_TMR_TMCSR_UF BIT(2)
+#define MLB_TMR_TMCSR_CNTE BIT(1)
+#define MLB_TMR_TMCSR_TRG BIT(0)
+
+#define MLB_TMR_TMCSR_CSL_DIV2 0
+#define MLB_TMR_DIV_CNT 2
+
+#define MLB_TMR_SRC_CH (1)
+#define MLB_TMR_EVT_CH (0)
+
+#define MLB_TMR_SRC_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH)
+#define MLB_TMR_EVT_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH)
+
+#define MLB_TMR_SRC_TMCSR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMCSR_OFS)
+#define MLB_TMR_SRC_TMR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMR_OFS)
+#define MLB_TMR_SRC_TMRLR1_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR1_OFS)
+#define MLB_TMR_SRC_TMRLR2_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR2_OFS)
+
+#define MLB_TMR_EVT_TMCSR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMCSR_OFS)
+#define MLB_TMR_EVT_TMR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMR_OFS)
+#define MLB_TMR_EVT_TMRLR1_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR1_OFS)
+#define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS)
+
+#define MLB_TIMER_RATING 500
+
+static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clk = dev_id;
+ struct timer_of *to = to_timer_of(clk);
+ u32 val;
+
+ val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ val &= ~MLB_TMR_TMCSR_UF;
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+ clk->event_handler(clk);
+
+ return IRQ_HANDLED;
+}
+
+static int mlb_set_state_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+ u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+ writel_relaxed(to->of_clk.period, timer_of_base(to) +
+ MLB_TMR_EVT_TMRLR1_OFS);
+ val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE |
+ MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_set_state_oneshot(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+ u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_clkevt_next_event(unsigned long event,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ writel_relaxed(event, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
+ writel_relaxed(MLB_TMR_TMCSR_CSL_DIV2 |
+ MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_INTE |
+ MLB_TMR_TMCSR_TRG, timer_of_base(to) +
+ MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_config_clock_source(struct timer_of *to)
+{
+ writel_relaxed(0, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
+ writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMR_OFS);
+ writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
+ writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
+ writel_relaxed(BIT(4) | BIT(1) | BIT(0), timer_of_base(to) +
+ MLB_TMR_SRC_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_config_clock_event(struct timer_of *to)
+{
+ writel_relaxed(0, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "mlb-clkevt",
+ .rating = MLB_TIMER_RATING,
+ .cpumask = cpu_possible_mask,
+ .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_oneshot = mlb_set_state_oneshot,
+ .set_state_periodic = mlb_set_state_periodic,
+ .set_next_event = mlb_clkevt_next_event,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = mlb_timer_interrupt,
+ },
+};
+
+static u64 notrace mlb_timer_sched_read(void)
+{
+ return ~readl_relaxed(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS);
+}
+
+static int __init mlb_timer_init(struct device_node *node)
+{
+ int ret;
+ unsigned long rate;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ return ret;
+
+ rate = timer_of_rate(&to) / MLB_TMR_DIV_CNT;
+ mlb_config_clock_source(&to);
+ clocksource_mmio_init(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS,
+ node->name, rate, MLB_TIMER_RATING, 32,
+ clocksource_mmio_readl_down);
+ sched_clock_register(mlb_timer_sched_read, 32, rate);
+ mlb_config_clock_event(&to);
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 15,
+ 0xffffffff);
+ return 0;
+}
+TIMER_OF_DECLARE(mlb_peritimer, "socionext,milbeaut-timer",
+ mlb_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/timer-pxa.c
index 395837938301..395837938301 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/timer-pxa.c
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 431892200a08..e8163693e936 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -95,13 +95,30 @@ static int __init riscv_timer_init_dt(struct device_node *n)
struct clocksource *cs;
hartid = riscv_of_processor_hartid(n);
+ if (hartid < 0) {
+ pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
+ n, hartid);
+ return hartid;
+ }
+
cpuid = riscv_hartid_to_cpuid(hartid);
+ if (cpuid < 0) {
+ pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
+ return cpuid;
+ }
if (cpuid != smp_processor_id())
return 0;
+ pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
+ __func__, cpuid, hartid);
cs = per_cpu_ptr(&riscv_clocksource, cpuid);
- clocksource_register_hz(cs, riscv_timebase);
+ error = clocksource_register_hz(cs, riscv_timebase);
+ if (error) {
+ pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
+ error, cpuid);
+ return error;
+ }
sched_clock_register(riscv_sched_clock,
BITS_PER_LONG, riscv_timebase);
@@ -110,8 +127,8 @@ static int __init riscv_timer_init_dt(struct device_node *n)
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
if (error)
- pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
- error, cpuid);
+ pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
+ error);
return error;
}
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 3b56ea3f52af..552c5254390c 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -202,6 +202,11 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
}
rate = clk_get_rate(clk);
+ if (!rate) {
+ pr_err("Couldn't get parent clock rate\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
cs->timer.base = base;
cs->timer.clk = clk;
@@ -275,6 +280,11 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
}
rate = clk_get_rate(clk);
+ if (!rate) {
+ pr_err("Couldn't get parent clock rate\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
ce->timer.base = base;
ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/timer-tango-xtal.c
index 3f94e454ef99..3f94e454ef99 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/timer-tango-xtal.c
diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
index 4293943f4e2b..fdb3d795a409 100644
--- a/drivers/clocksource/timer-tegra20.c
+++ b/drivers/clocksource/timer-tegra20.c
@@ -15,21 +15,24 @@
*
*/
-#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/time.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/clk.h>
-#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/percpu.h>
#include <linux/sched_clock.h>
-#include <linux/delay.h>
+#include <linux/time.h>
+
+#include "timer-of.h"
+#ifdef CONFIG_ARM
#include <asm/mach/time.h>
+#endif
#define RTC_SECONDS 0x08
#define RTC_SHADOW_SECONDS 0x0c
@@ -39,74 +42,161 @@
#define TIMERUS_USEC_CFG 0x14
#define TIMERUS_CNTR_FREEZE 0x4c
-#define TIMER1_BASE 0x0
-#define TIMER2_BASE 0x8
-#define TIMER3_BASE 0x50
-#define TIMER4_BASE 0x58
-
-#define TIMER_PTV 0x0
-#define TIMER_PCR 0x4
-
+#define TIMER_PTV 0x0
+#define TIMER_PTV_EN BIT(31)
+#define TIMER_PTV_PER BIT(30)
+#define TIMER_PCR 0x4
+#define TIMER_PCR_INTR_CLR BIT(30)
+
+#ifdef CONFIG_ARM
+#define TIMER_CPU0 0x50 /* TIMER3 */
+#else
+#define TIMER_CPU0 0x90 /* TIMER10 */
+#define TIMER10_IRQ_IDX 10
+#define IRQ_IDX_FOR_CPU(cpu) (TIMER10_IRQ_IDX + cpu)
+#endif
+#define TIMER_BASE_FOR_CPU(cpu) (TIMER_CPU0 + (cpu) * 8)
+
+static u32 usec_config;
static void __iomem *timer_reg_base;
+#ifdef CONFIG_ARM
static void __iomem *rtc_base;
-
static struct timespec64 persistent_ts;
static u64 persistent_ms, last_persistent_ms;
-
static struct delay_timer tegra_delay_timer;
-
-#define timer_writel(value, reg) \
- writel_relaxed(value, timer_reg_base + (reg))
-#define timer_readl(reg) \
- readl_relaxed(timer_reg_base + (reg))
+#endif
static int tegra_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- u32 reg;
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
- reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0);
- timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ writel(TIMER_PTV_EN |
+ ((cycles > 1) ? (cycles - 1) : 0), /* n+1 scheme */
+ reg_base + TIMER_PTV);
return 0;
}
-static inline void timer_shutdown(struct clock_event_device *evt)
+static int tegra_timer_shutdown(struct clock_event_device *evt)
{
- timer_writel(0, TIMER3_BASE + TIMER_PTV);
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(0, reg_base + TIMER_PTV);
+
+ return 0;
}
-static int tegra_timer_shutdown(struct clock_event_device *evt)
+static int tegra_timer_set_periodic(struct clock_event_device *evt)
{
- timer_shutdown(evt);
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(TIMER_PTV_EN | TIMER_PTV_PER |
+ ((timer_of_rate(to_timer_of(evt)) / HZ) - 1),
+ reg_base + TIMER_PTV);
+
return 0;
}
-static int tegra_timer_set_periodic(struct clock_event_device *evt)
+static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_timer_suspend(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+}
+
+static void tegra_timer_resume(struct clock_event_device *evt)
+{
+ writel(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
+}
+
+#ifdef CONFIG_ARM64
+static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
+ .flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "tegra_timer",
+ .rating = 460,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_state_shutdown = tegra_timer_shutdown,
+ .set_state_periodic = tegra_timer_set_periodic,
+ .set_state_oneshot = tegra_timer_shutdown,
+ .tick_resume = tegra_timer_shutdown,
+ .suspend = tegra_timer_suspend,
+ .resume = tegra_timer_resume,
+ },
+};
+
+static int tegra_timer_setup(unsigned int cpu)
{
- u32 reg = 0xC0000000 | ((1000000 / HZ) - 1);
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
+ enable_irq(to->clkevt.irq);
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+ 1, /* min */
+ 0x1fffffff); /* 29 bits */
- timer_shutdown(evt);
- timer_writel(reg, TIMER3_BASE + TIMER_PTV);
return 0;
}
-static struct clock_event_device tegra_clockevent = {
- .name = "timer0",
- .rating = 300,
- .features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DYNIRQ,
- .set_next_event = tegra_timer_set_next_event,
- .set_state_shutdown = tegra_timer_shutdown,
- .set_state_periodic = tegra_timer_set_periodic,
- .set_state_oneshot = tegra_timer_shutdown,
- .tick_resume = tegra_timer_shutdown,
+static int tegra_timer_stop(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ to->clkevt.set_state_shutdown(&to->clkevt);
+ disable_irq_nosync(to->clkevt.irq);
+
+ return 0;
+}
+#else /* CONFIG_ARM */
+static struct timer_of tegra_to = {
+ .flags = TIMER_OF_CLOCK | TIMER_OF_BASE | TIMER_OF_IRQ,
+
+ .clkevt = {
+ .name = "tegra_timer",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DYNIRQ,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_state_shutdown = tegra_timer_shutdown,
+ .set_state_periodic = tegra_timer_set_periodic,
+ .set_state_oneshot = tegra_timer_shutdown,
+ .tick_resume = tegra_timer_shutdown,
+ .suspend = tegra_timer_suspend,
+ .resume = tegra_timer_resume,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .index = 2,
+ .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
+ .handler = tegra_timer_isr,
+ },
};
static u64 notrace tegra_read_sched_clock(void)
{
- return timer_readl(TIMERUS_CNTR_1US);
+ return readl(timer_reg_base + TIMERUS_CNTR_1US);
+}
+
+static unsigned long tegra_delay_timer_read_counter_long(void)
+{
+ return readl(timer_reg_base + TIMERUS_CNTR_1US);
}
/*
@@ -143,100 +233,155 @@ static void tegra_read_persistent_clock64(struct timespec64 *ts)
timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC);
*ts = persistent_ts;
}
+#endif
-static unsigned long tegra_delay_timer_read_counter_long(void)
-{
- return readl(timer_reg_base + TIMERUS_CNTR_1US);
-}
-
-static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- timer_writel(1<<30, TIMER3_BASE + TIMER_PCR);
- evt->event_handler(evt);
- return IRQ_HANDLED;
-}
-
-static struct irqaction tegra_timer_irq = {
- .name = "timer0",
- .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
- .handler = tegra_timer_interrupt,
- .dev_id = &tegra_clockevent,
-};
-
-static int __init tegra20_init_timer(struct device_node *np)
+static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
{
- struct clk *clk;
- unsigned long rate;
- int ret;
-
- timer_reg_base = of_iomap(np, 0);
- if (!timer_reg_base) {
- pr_err("Can't map timer registers\n");
- return -ENXIO;
- }
+ int ret = 0;
- tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
- if (tegra_timer_irq.irq <= 0) {
- pr_err("Failed to map timer IRQ\n");
- return -EINVAL;
- }
+ ret = timer_of_init(np, to);
+ if (ret < 0)
+ goto out;
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
- rate = 12000000;
- } else {
- clk_prepare_enable(clk);
- rate = clk_get_rate(clk);
- }
+ timer_reg_base = timer_of_base(to);
- switch (rate) {
+ /*
+ * Configure microsecond timers to have 1MHz clock
+ * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
+ * Uses n+1 scheme
+ */
+ switch (timer_of_rate(to)) {
case 12000000:
- timer_writel(0x000b, TIMERUS_USEC_CFG);
+ usec_config = 0x000b; /* (11+1)/(0+1) */
+ break;
+ case 12800000:
+ usec_config = 0x043f; /* (63+1)/(4+1) */
break;
case 13000000:
- timer_writel(0x000c, TIMERUS_USEC_CFG);
+ usec_config = 0x000c; /* (12+1)/(0+1) */
+ break;
+ case 16800000:
+ usec_config = 0x0453; /* (83+1)/(4+1) */
break;
case 19200000:
- timer_writel(0x045f, TIMERUS_USEC_CFG);
+ usec_config = 0x045f; /* (95+1)/(4+1) */
break;
case 26000000:
- timer_writel(0x0019, TIMERUS_USEC_CFG);
+ usec_config = 0x0019; /* (25+1)/(0+1) */
+ break;
+ case 38400000:
+ usec_config = 0x04bf; /* (191+1)/(4+1) */
+ break;
+ case 48000000:
+ usec_config = 0x002f; /* (47+1)/(0+1) */
break;
default:
- WARN(1, "Unknown clock rate");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ writel(usec_config, timer_of_base(to) + TIMERUS_USEC_CFG);
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_ARM64
+static int __init tegra_init_timer(struct device_node *np)
+{
+ int cpu, ret = 0;
+ struct timer_of *to;
+
+ to = this_cpu_ptr(&tegra_to);
+ ret = tegra_timer_common_init(np, to);
+ if (ret < 0)
+ goto out;
+
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to;
+
+ cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ cpu_to->of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(cpu);
+ cpu_to->of_clk.rate = timer_of_rate(to);
+ cpu_to->clkevt.cpumask = cpumask_of(cpu);
+ cpu_to->clkevt.irq =
+ irq_of_parse_and_map(np, IRQ_IDX_FOR_CPU(cpu));
+ if (!cpu_to->clkevt.irq) {
+ pr_err("%s: can't map IRQ for CPU%d\n",
+ __func__, cpu);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
+ ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
+ IRQF_TIMER | IRQF_NOBALANCING,
+ cpu_to->clkevt.name, &cpu_to->clkevt);
+ if (ret) {
+ pr_err("%s: cannot setup irq %d for CPU%d\n",
+ __func__, cpu_to->clkevt.irq, cpu);
+ ret = -EINVAL;
+ goto out_irq;
+ }
+ }
+
+ cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
+ "AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
+ tegra_timer_stop);
+
+ return ret;
+out_irq:
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to;
+
+ cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ if (cpu_to->clkevt.irq) {
+ free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ }
}
+out:
+ timer_of_cleanup(to);
+ return ret;
+}
+#else /* CONFIG_ARM */
+static int __init tegra_init_timer(struct device_node *np)
+{
+ int ret = 0;
+
+ ret = tegra_timer_common_init(np, &tegra_to);
+ if (ret < 0)
+ goto out;
- sched_clock_register(tegra_read_sched_clock, 32, 1000000);
+ tegra_to.of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(0);
+ tegra_to.of_clk.rate = 1000000; /* microsecond timer */
+ sched_clock_register(tegra_read_sched_clock, 32,
+ timer_of_rate(&tegra_to));
ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
- "timer_us", 1000000, 300, 32,
- clocksource_mmio_readl_up);
+ "timer_us", timer_of_rate(&tegra_to),
+ 300, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("Failed to register clocksource\n");
- return ret;
+ goto out;
}
tegra_delay_timer.read_current_timer =
tegra_delay_timer_read_counter_long;
- tegra_delay_timer.freq = 1000000;
+ tegra_delay_timer.freq = timer_of_rate(&tegra_to);
register_current_timer_delay(&tegra_delay_timer);
- ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
- if (ret) {
- pr_err("Failed to register timer IRQ: %d\n", ret);
- return ret;
- }
+ clockevents_config_and_register(&tegra_to.clkevt,
+ timer_of_rate(&tegra_to),
+ 0x1,
+ 0x1fffffff);
- tegra_clockevent.cpumask = cpu_possible_mask;
- tegra_clockevent.irq = tegra_timer_irq.irq;
- clockevents_config_and_register(&tegra_clockevent, 1000000,
- 0x1, 0x1fffffff);
+ return ret;
+out:
+ timer_of_cleanup(&tegra_to);
- return 0;
+ return ret;
}
-TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
static int __init tegra20_init_rtc(struct device_node *np)
{
@@ -261,3 +406,6 @@ static int __init tegra20_init_rtc(struct device_node *np)
return register_persistent_clock(tegra_read_persistent_clock64);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
+#endif
+TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra_init_timer);
+TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra_init_timer);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 595124074821..c364027638e1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
if (IS_ERR(parent))
return -ENODEV;
+ /* Bail out if both clocks point to fck */
+ if (clk_is_match(parent, timer->fclk))
+ return 0;
+
ret = clk_set_parent(timer->fclk, parent);
if (ret < 0)
pr_err("%s: failed to set parent\n", __func__);
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
timer->pdev = pdev;
pm_runtime_enable(dev);
- pm_runtime_irq_safe(dev);
if (!timer->reserved) {
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 608af20a3494..b22e6bba71f1 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -207,8 +207,6 @@ comment "CPU frequency scaling drivers"
config CPUFREQ_DT
tristate "Generic DT based cpufreq driver"
depends on HAVE_CLK && OF
- # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
- depends on !CPU_THERMAL || THERMAL
select CPUFREQ_DT_PLATDEV
select PM_OPP
help
@@ -327,7 +325,6 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
- depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ
help
This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 688f10227793..179a1d302f48 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -25,12 +25,21 @@ config ARM_ARMADA_37XX_CPUFREQ
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
+config ARM_ARMADA_8K_CPUFREQ
+ tristate "Armada 8K CPUFreq driver"
+ depends on ARCH_MVEBU && CPUFREQ_DT
+ help
+ This enables the CPUFreq driver support for Marvell
+ Armada8k SOCs.
+ Armada8K device has the AP806 which supports scaling
+ to any full integer divider.
+
+ If in doubt, say N.
+
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y
- depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -38,7 +47,6 @@ config ARM_BIG_LITTLE_CPUFREQ
config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver"
depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
- depends on !CPU_THERMAL || THERMAL
help
This adds the CPUfreq driver support for ARM platforms using SCPI
protocol for CPU power management.
@@ -93,7 +101,6 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MEDIATEK_CPUFREQ
tristate "CPU Frequency scaling support for MediaTek SoCs"
depends on ARCH_MEDIATEK && REGULATOR
- depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds the CPUFreq driver support for MediaTek SoCs.
@@ -233,7 +240,6 @@ config ARM_SA1110_CPUFREQ
config ARM_SCMI_CPUFREQ
tristate "SCMI based CPUfreq driver"
depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
- depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds the CPUfreq driver support for ARM platforms using SCMI
@@ -272,8 +278,8 @@ config ARM_TEGRA20_CPUFREQ
This adds the CPUFreq driver support for Tegra20 SOCs.
config ARM_TEGRA124_CPUFREQ
- tristate "Tegra124 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
+ bool "Tegra124 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
default y
help
This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 08c071be2491..689b26c6f949 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
+obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d62fd374d5c7..c72258a44ba4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void)
{
int ret;
- if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
+ if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
+ pr_debug("Boost capabilities not present in the processor\n");
return;
+ }
acpi_cpufreq_driver.set_boost = set_boost;
acpi_cpufreq_driver.boost_enabled = boost_state(0);
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index cf62a1f64dd7..7fe52fcddcf1 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -487,6 +487,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
+ dev_pm_opp_of_register_em(policy->cpus);
+
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
new file mode 100644
index 000000000000..b3f4bd647e9b
--- /dev/null
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPUFreq support for Armada 8K
+ *
+ * Copyright (C) 2018 Marvell
+ *
+ * Omri Itach <omrii@marvell.com>
+ * Gregory Clement <gregory.clement@bootlin.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+/*
+ * Setup the opps list with the divider for the max frequency, that
+ * will be filled at runtime.
+ */
+static const int opps_div[] __initconst = {1, 2, 3, 4};
+
+static struct platform_device *armada_8k_pdev;
+
+struct freq_table {
+ struct device *cpu_dev;
+ unsigned int freq[ARRAY_SIZE(opps_div)];
+};
+
+/* If the CPUs share the same clock, then they are in the same cluster. */
+static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
+ struct cpumask *cpumask)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev;
+ struct clk *clk;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_warn("Failed to get cpu%d device\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ pr_warn("Cannot get clock for CPU %d\n", cpu);
+ } else {
+ if (clk_is_match(clk, cur_clk))
+ cpumask_set_cpu(cpu, cpumask);
+
+ clk_put(clk);
+ }
+ }
+}
+
+static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev,
+ struct freq_table *freq_tables,
+ int opps_index)
+{
+ unsigned int cur_frequency;
+ unsigned int freq;
+ int i, ret;
+
+ /* Get nominal (current) CPU frequency. */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for this CPU\n");
+ return -EINVAL;
+ }
+
+ freq_tables[opps_index].cpu_dev = cpu_dev;
+
+ for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
+ freq = cur_frequency / opps_div[i];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret)
+ return ret;
+
+ freq_tables[opps_index].freq[i] = freq;
+ }
+
+ return 0;
+}
+
+static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
+{
+ int opps_index, nb_cpus = num_possible_cpus();
+
+ for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
+ int i;
+
+ /* If cpu_dev is NULL then we reached the end of the array */
+ if (!freq_tables[opps_index].cpu_dev)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
+ /*
+ * A 0Hz frequency is not valid, this meant
+ * that it was not yet initialized so there is
+ * no more opp to free
+ */
+ if (freq_tables[opps_index].freq[i] == 0)
+ break;
+
+ dev_pm_opp_remove(freq_tables[opps_index].cpu_dev,
+ freq_tables[opps_index].freq[i]);
+ }
+ }
+
+ kfree(freq_tables);
+}
+
+static int __init armada_8k_cpufreq_init(void)
+{
+ int ret = 0, opps_index = 0, cpu, nb_cpus;
+ struct freq_table *freq_tables;
+ struct device_node *node;
+ struct cpumask cpus;
+
+ node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock");
+ if (!node || !of_device_is_available(node)) {
+ of_node_put(node);
+ return -ENODEV;
+ }
+
+ nb_cpus = num_possible_cpus();
+ freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
+ cpumask_copy(&cpus, cpu_possible_mask);
+
+ /*
+ * For each CPU, this loop registers the operating points
+ * supported (which are the nominal CPU frequency and full integer
+ * divisions of it).
+ */
+ for_each_cpu(cpu, &cpus) {
+ struct cpumask shared_cpus;
+ struct device *cpu_dev;
+ struct clk *clk;
+
+ cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU %d\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+
+ if (IS_ERR(clk)) {
+ pr_err("Cannot get clock for CPU %d\n", cpu);
+ ret = PTR_ERR(clk);
+ goto remove_opp;
+ }
+
+ ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index);
+ if (ret) {
+ clk_put(clk);
+ goto remove_opp;
+ }
+
+ opps_index++;
+ cpumask_clear(&shared_cpus);
+ armada_8k_get_sharing_cpus(clk, &shared_cpus);
+ dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus);
+ cpumask_andnot(&cpus, &cpus, &shared_cpus);
+ clk_put(clk);
+ }
+
+ armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ ret = PTR_ERR_OR_ZERO(armada_8k_pdev);
+ if (ret)
+ goto remove_opp;
+
+ platform_set_drvdata(armada_8k_pdev, freq_tables);
+
+ return 0;
+
+remove_opp:
+ armada_8k_cpufreq_free_table(freq_tables);
+ return ret;
+}
+module_init(armada_8k_cpufreq_init);
+
+static void __exit armada_8k_cpufreq_exit(void)
+{
+ struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev);
+
+ platform_device_unregister(armada_8k_pdev);
+ armada_8k_cpufreq_free_table(freq_tables);
+}
+module_exit(armada_8k_cpufreq_exit);
+
+MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>");
+MODULE_DESCRIPTION("Armada 8K cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index fd25c21cee72..2ae978d27e61 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -42,6 +42,66 @@
*/
static struct cppc_cpudata **all_cpu_data;
+struct cppc_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE +1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+};
+
+static bool apply_hisi_workaround;
+
+static struct cppc_workaround_oem_info wa_info[] = {
+ {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP07 ",
+ .oem_revision = 0,
+ }, {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP08 ",
+ .oem_revision = 0,
+ }
+};
+
+static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
+ unsigned int perf);
+
+/*
+ * HISI platform does not support delivered performance counter and
+ * reference performance counter. It can calculate the performance using the
+ * platform specific mechanism. We reuse the desired performance register to
+ * store the real performance calculated by the platform.
+ */
+static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
+{
+ struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
+ u64 desired_perf;
+ int ret;
+
+ ret = cppc_get_desired_perf(cpunum, &desired_perf);
+ if (ret < 0)
+ return -EIO;
+
+ return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
+}
+
+static void cppc_check_hisi_workaround(void)
+{
+ struct acpi_table_header *tbl;
+ acpi_status status = AE_OK;
+ int i;
+
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
+ if (ACPI_FAILURE(status) || !tbl)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ wa_info[i].oem_revision == tbl->oem_revision)
+ apply_hisi_workaround = true;
+ }
+}
+
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
@@ -334,6 +394,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
struct cppc_cpudata *cpu = all_cpu_data[cpunum];
int ret;
+ if (apply_hisi_workaround)
+ return hisi_cppc_cpufreq_get_rate(cpunum);
+
ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
if (ret)
return ret;
@@ -386,6 +449,8 @@ static int __init cppc_cpufreq_init(void)
goto out;
}
+ cppc_check_hisi_workaround();
+
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
if (ret)
goto out;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index b1c5468dca16..47729a22c159 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -119,6 +119,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "mediatek,mt8176", },
{ .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra210", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index e58bfcb1169e..bde28878725b 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/cpu.h>
-#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/err.h>
@@ -30,7 +29,6 @@
struct private_data {
struct opp_table *opp_table;
struct device *cpu_dev;
- struct thermal_cooling_device *cdev;
const char *reg_name;
bool have_static_opps;
};
@@ -280,6 +278,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
+ dev_pm_opp_of_register_em(policy->cpus);
+
return 0;
out_free_cpufreq_table:
@@ -297,11 +297,25 @@ out_put_clk:
return ret;
}
+static int cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* We did light-weight tear down earlier, nothing to do here */
+ return 0;
+}
+
+static int cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /*
+ * Preserve policy->driver_data and don't free resources on light-weight
+ * tear down.
+ */
+ return 0;
+}
+
static int cpufreq_exit(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
@@ -314,21 +328,16 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static void cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct private_data *priv = policy->driver_data;
-
- priv->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver dt_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target,
.get = cpufreq_generic_get,
.init = cpufreq_init,
.exit = cpufreq_exit,
- .ready = cpufreq_ready,
+ .online = cpufreq_online,
+ .offline = cpufreq_offline,
.name = "cpufreq-dt",
.attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6f23ebb395f1..0e626b00053b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_cooling.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -545,13 +546,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int ret, enable;
@@ -1200,28 +1201,39 @@ static int cpufreq_online(unsigned int cpu)
return -ENOMEM;
}
- cpumask_copy(policy->cpus, cpumask_of(cpu));
+ if (!new_policy && cpufreq_driver->online) {
+ ret = cpufreq_driver->online(policy);
+ if (ret) {
+ pr_debug("%s: %d: initialization failed\n", __func__,
+ __LINE__);
+ goto out_exit_policy;
+ }
- /* call driver. From then on the cpufreq must be able
- * to accept all calls to ->verify and ->setpolicy for this CPU
- */
- ret = cpufreq_driver->init(policy);
- if (ret) {
- pr_debug("initialization failed\n");
- goto out_free_policy;
- }
+ /* Recover policy->cpus using related_cpus */
+ cpumask_copy(policy->cpus, policy->related_cpus);
+ } else {
+ cpumask_copy(policy->cpus, cpumask_of(cpu));
- ret = cpufreq_table_validate_and_sort(policy);
- if (ret)
- goto out_exit_policy;
+ /*
+ * Call driver. From then on the cpufreq must be able
+ * to accept all calls to ->verify and ->setpolicy for this CPU.
+ */
+ ret = cpufreq_driver->init(policy);
+ if (ret) {
+ pr_debug("%s: %d: initialization failed\n", __func__,
+ __LINE__);
+ goto out_free_policy;
+ }
- down_write(&policy->rwsem);
+ ret = cpufreq_table_validate_and_sort(policy);
+ if (ret)
+ goto out_exit_policy;
- if (new_policy) {
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
}
+ down_write(&policy->rwsem);
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
@@ -1305,8 +1317,6 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
__func__, cpu, ret);
- /* cpufreq_policy_free() will notify based on this */
- new_policy = false;
goto out_destroy_policy;
}
@@ -1318,6 +1328,10 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
+ if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
+ cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV)
+ policy->cdev = of_cpufreq_cooling_register(policy);
+
pr_debug("initialization complete\n");
return 0;
@@ -1405,6 +1419,12 @@ static int cpufreq_offline(unsigned int cpu)
goto unlock;
}
+ if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
+ cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
if (cpufreq_driver->stop_cpu)
cpufreq_driver->stop_cpu(policy);
@@ -1412,11 +1432,12 @@ static int cpufreq_offline(unsigned int cpu)
cpufreq_exit_governor(policy);
/*
- * Perform the ->exit() even during light-weight tear-down,
- * since this is a core component, and is essential for the
- * subsequent light-weight ->init() to succeed.
+ * Perform the ->offline() during light-weight tear-down, as
+ * that allows fast recovery when the CPU comes back.
*/
- if (cpufreq_driver->exit) {
+ if (cpufreq_driver->offline) {
+ cpufreq_driver->offline(policy);
+ } else if (cpufreq_driver->exit) {
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
@@ -1445,8 +1466,13 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, dev);
- if (cpumask_empty(policy->real_cpus))
+ if (cpumask_empty(policy->real_cpus)) {
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline)
+ cpufreq_driver->exit(policy);
+
cpufreq_policy_free(policy);
+ }
}
/**
@@ -1530,17 +1556,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
unsigned int ret_freq = 0;
- if (!cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
/*
- * Updating inactive policies is invalid, so avoid doing that. Also
- * if fast frequency switching is used with the given policy, the check
+ * If fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too.
*/
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+ if (policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
@@ -1569,10 +1594,7 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
-
- if (!policy_is_inactive(policy))
- ret_freq = __cpufreq_get(policy);
-
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -2196,12 +2218,25 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get_policy);
-/*
- * policy : current policy.
- * new_policy: policy to be set.
+/**
+ * cpufreq_set_policy - Modify cpufreq policy parameters.
+ * @policy: Policy object to modify.
+ * @new_policy: New policy data.
+ *
+ * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
+ * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
+ * the driver's ->verify() callback again and run the notifiers for it again
+ * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
+ * of @new_policy to @policy and either invoke the driver's ->setpolicy()
+ * callback (if present) or carry out a governor update for @policy. That is,
+ * run the current governor's ->limits() callback (if the governor field in
+ * @new_policy points to the same object as the one in @policy) or replace the
+ * governor for @policy with the new one stored in @new_policy.
+ *
+ * The cpuinfo part of @policy is not updated by this function.
*/
static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+ struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
int ret;
@@ -2251,11 +2286,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (cpufreq_driver->setpolicy) {
policy->policy = new_policy->policy;
pr_debug("setting range\n");
- return cpufreq_driver->setpolicy(new_policy);
+ return cpufreq_driver->setpolicy(policy);
}
if (new_policy->governor == policy->governor) {
- pr_debug("cpufreq: governor limits update\n");
+ pr_debug("governor limits update\n");
cpufreq_governor_limits(policy);
return 0;
}
@@ -2276,7 +2311,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (!ret) {
ret = cpufreq_start_governor(policy);
if (!ret) {
- pr_debug("cpufreq: governor change\n");
+ pr_debug("governor change\n");
sched_cpufreq_governor_change(policy, old_gov);
return 0;
}
@@ -2297,11 +2332,14 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
}
/**
- * cpufreq_update_policy - re-evaluate an existing cpufreq policy
- * @cpu: CPU which shall be re-evaluated
+ * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
+ * @cpu: CPU to re-evaluate the policy for.
*
- * Useful for policy notifiers which have different necessities
- * at different times.
+ * Update the current frequency for the cpufreq policy of @cpu and use
+ * cpufreq_set_policy() to re-apply the min and max limits saved in the
+ * user_policy sub-structure of that policy, which triggers the evaluation
+ * of policy notifiers and the cpufreq driver's ->verify() callback for the
+ * policy in question, among other things.
*/
void cpufreq_update_policy(unsigned int cpu)
{
@@ -2316,23 +2354,18 @@ void cpufreq_update_policy(unsigned int cpu)
if (policy_is_inactive(policy))
goto unlock;
- pr_debug("updating policy for CPU %u\n", cpu);
- memcpy(&new_policy, policy, sizeof(*policy));
- new_policy.min = policy->user_policy.min;
- new_policy.max = policy->user_policy.max;
-
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
- if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
- if (cpufreq_suspended)
- goto unlock;
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
+ goto unlock;
- new_policy.cur = cpufreq_update_current_freq(policy);
- if (WARN_ON(!new_policy.cur))
- goto unlock;
- }
+ pr_debug("updating policy for CPU %u\n", cpu);
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.min = policy->user_policy.min;
+ new_policy.max = policy->user_policy.max;
cpufreq_set_policy(policy, &new_policy);
@@ -2483,7 +2516,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
driver_data->target) ||
(driver_data->setpolicy && (driver_data->target_index ||
driver_data->target)) ||
- (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
+ (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
+ (!driver_data->online != !driver_data->offline))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1572129844a5..e2db5581489a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -31,26 +31,27 @@ static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
- spin_lock(&cpufreq_stats_lock);
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
- spin_unlock(&cpufreq_stats_lock);
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
{
unsigned int count = stats->max_state;
+ spin_lock(&cpufreq_stats_lock);
memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = get_jiffies_64();
stats->total_trans = 0;
+ spin_unlock(&cpufreq_stats_lock);
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
return sprintf(buf, "%d\n", policy->stats->total_trans);
}
+cpufreq_freq_attr_ro(total_trans);
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{
@@ -61,7 +62,10 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled)
return 0;
+ spin_lock(&cpufreq_stats_lock);
cpufreq_stats_update(stats);
+ spin_unlock(&cpufreq_stats_lock);
+
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
(unsigned long long)
@@ -69,6 +73,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
}
return len;
}
+cpufreq_freq_attr_ro(time_in_state);
static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
size_t count)
@@ -77,6 +82,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
cpufreq_stats_clear_table(policy->stats);
return count;
}
+cpufreq_freq_attr_wo(reset);
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
@@ -126,10 +132,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro(trans_table);
-cpufreq_freq_attr_ro(total_trans);
-cpufreq_freq_attr_ro(time_in_state);
-cpufreq_freq_attr_wo(reset);
-
static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
@@ -240,9 +242,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
if (old_index == -1 || new_index == -1 || old_index == new_index)
return;
+ spin_lock(&cpufreq_stats_lock);
cpufreq_stats_update(stats);
stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++;
+ spin_unlock(&cpufreq_stats_lock);
}
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index d54a27c99121..940fe85db97a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -23,13 +23,10 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/platform_data/davinci-cpufreq.h>
#include <linux/platform_device.h>
#include <linux/export.h>
-#include <mach/hardware.h>
-#include <mach/cpufreq.h>
-#include <mach/common.h>
-
struct davinci_cpufreq {
struct device *dev;
struct clk *armclk;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 60bea302abbe..2d3ef208dd70 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -323,9 +323,8 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
states = 2;
/* Allocate private data and frequency table for current cpu */
- centaur = kzalloc(sizeof(*centaur)
- + (states + 1) * sizeof(struct cpufreq_frequency_table),
- GFP_KERNEL);
+ centaur = kzalloc(struct_size(centaur, freq_table, states + 1),
+ GFP_KERNEL);
if (!centaur)
return -ENOMEM;
eps_cpu[0] = centaur;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9fedf627e000..a4ff09f91c8f 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -52,7 +51,6 @@ static struct clk_bulk_data clks[] = {
};
static struct device *cpu_dev;
-static struct thermal_cooling_device *cdev;
static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq;
@@ -193,16 +191,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0;
}
-static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
-{
- cdev = of_cpufreq_cooling_register(policy);
-
- if (!cdev)
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev));
-}
-
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
@@ -210,26 +198,19 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
+ dev_pm_opp_of_register_em(policy->cpus);
return ret;
}
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_cooling_unregister(cdev);
-
- return 0;
-}
-
static struct cpufreq_driver imx6q_cpufreq_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
- .exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq",
- .ready = imx6q_cpufreq_ready,
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dd66decf2087..002f5169d4eb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -50,6 +50,8 @@
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
+
#define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
@@ -895,7 +897,7 @@ static void intel_pstate_update_policies(void)
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
- (struct kobject *kobj, struct attribute *attr, char *buf) \
+ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", global.object); \
}
@@ -904,7 +906,7 @@ static ssize_t intel_pstate_show_status(char *buf);
static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -915,7 +917,7 @@ static ssize_t show_status(struct kobject *kobj,
return ret;
}
-static ssize_t store_status(struct kobject *a, struct attribute *b,
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
@@ -929,7 +931,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
}
static ssize_t show_turbo_pct(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total, no_turbo, turbo_pct;
@@ -955,7 +957,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
}
static ssize_t show_num_pstates(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total;
@@ -976,7 +978,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
}
static ssize_t show_no_turbo(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -998,7 +1000,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return ret;
}
-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1045,7 +1047,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1075,7 +1077,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1107,12 +1109,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
}
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", hwp_boost);
}
-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+static ssize_t store_hwp_dynamic_boost(struct kobject *a,
+ struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1444,12 +1447,6 @@ static int knl_get_turbo_pstate(void)
return ret;
}
-static int intel_pstate_get_base_pstate(struct cpudata *cpu)
-{
- return global.no_turbo || global.turbo_disabled ?
- cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
-}
-
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@@ -1470,11 +1467,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
static void intel_pstate_max_within_limits(struct cpudata *cpu)
{
- int pstate;
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
update_turbo_state();
- pstate = intel_pstate_get_base_pstate(cpu);
- pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
intel_pstate_set_pstate(cpu, pstate);
}
@@ -1678,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
static inline int32_t get_target_pstate(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
- int32_t busy_frac, boost;
+ int32_t busy_frac;
int target, avg_pstate;
busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
sample->tsc);
- boost = cpu->iowait_boost;
- cpu->iowait_boost >>= 1;
-
- if (busy_frac < boost)
- busy_frac = boost;
+ if (busy_frac < cpu->iowait_boost)
+ busy_frac = cpu->iowait_boost;
sample->busy_scaled = busy_frac * 100;
@@ -1715,11 +1707,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
- int max_pstate = intel_pstate_get_base_pstate(cpu);
- int min_pstate;
+ int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
+ int max_pstate = max(min_pstate, cpu->max_perf_ratio);
- min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
- max_pstate = max(min_pstate, cpu->max_perf_ratio);
return clamp_t(int, pstate, min_pstate, max_pstate);
}
@@ -1767,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
if (smp_processor_id() != cpu->cpu)
return;
+ delta_ns = time - cpu->last_update;
if (flags & SCHED_CPUFREQ_IOWAIT) {
- cpu->iowait_boost = int_tofp(1);
- cpu->last_update = time;
- /*
- * The last time the busy was 100% so P-state was max anyway
- * so avoid overhead of computation.
- */
- if (fp_toint(cpu->sample.busy_scaled) == 100)
- return;
-
- goto set_pstate;
+ /* Start over if the CPU may have been idle. */
+ if (delta_ns > TICK_NSEC) {
+ cpu->iowait_boost = ONE_EIGHTH_FP;
+ } else if (cpu->iowait_boost) {
+ cpu->iowait_boost <<= 1;
+ if (cpu->iowait_boost > int_tofp(1))
+ cpu->iowait_boost = int_tofp(1);
+ } else {
+ cpu->iowait_boost = ONE_EIGHTH_FP;
+ }
} else if (cpu->iowait_boost) {
/* Clear iowait_boost if the CPU may have been idle. */
- delta_ns = time - cpu->last_update;
if (delta_ns > TICK_NSEC)
cpu->iowait_boost = 0;
+ else
+ cpu->iowait_boost >>= 1;
}
cpu->last_update = time;
delta_ns = time - cpu->sample.time;
if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
return;
-set_pstate:
if (intel_pstate_sample(cpu, time))
intel_pstate_adjust_pstate(cpu);
}
@@ -1976,7 +1967,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
if (hwp_active) {
intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
} else {
- max_state = intel_pstate_get_base_pstate(cpu);
+ max_state = global.no_turbo || global.turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
turbo_max = cpu->pstate.turbo_pstate;
}
@@ -2475,6 +2467,7 @@ static bool __init intel_pstate_no_acpi_pss(void)
kfree(pss);
}
+ pr_debug("ACPI _PSS not found\n");
return true;
}
@@ -2485,9 +2478,14 @@ static bool __init intel_pstate_no_acpi_pcch(void)
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
- return true;
+ goto not_found;
+
+ if (acpi_has_method(handle, "PCCH"))
+ return false;
- return !acpi_has_method(handle, "PCCH");
+not_found:
+ pr_debug("ACPI PCCH not found\n");
+ return true;
}
static bool __init intel_pstate_has_acpi_ppc(void)
@@ -2502,6 +2500,7 @@ static bool __init intel_pstate_has_acpi_ppc(void)
if (acpi_has_method(pr->handle, "_PPC"))
return true;
}
+ pr_debug("ACPI _PPC not found\n");
return false;
}
@@ -2539,8 +2538,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
- if ( misc_pwr & (1 << 8))
+ if (misc_pwr & (1 << 8)) {
+ pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
return true;
+ }
}
idx = acpi_match_platform_list(plat_info);
@@ -2606,22 +2607,28 @@ static int __init intel_pstate_init(void)
}
} else {
id = x86_match_cpu(intel_pstate_cpu_ids);
- if (!id)
+ if (!id) {
+ pr_info("CPU ID not supported\n");
return -ENODEV;
+ }
copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
}
- if (intel_pstate_msrs_not_valid())
+ if (intel_pstate_msrs_not_valid()) {
+ pr_info("Invalid MSRs\n");
return -ENODEV;
+ }
hwp_cpu_matched:
/*
* The Intel pstate driver will be ignored if the platform
* firmware has its own power management modes.
*/
- if (intel_pstate_platform_pwr_mgmt_exists())
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
return -ENODEV;
+ }
if (!hwp_active && hwp_only)
return -ENOTSUPP;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 279bd9e9fa95..fb546e0d0356 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -851,7 +851,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
case TYPE_POWERSAVER:
pr_cont("Powersaver supported\n");
break;
- };
+ }
/* Doesn't hurt */
longhaul_setup_southbridge();
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index eb8920d39818..48e9829274c6 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/cpu.h>
-#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/module.h>
@@ -48,7 +47,6 @@ struct mtk_cpu_dvfs_info {
struct regulator *sram_reg;
struct clk *cpu_clk;
struct clk *inter_clk;
- struct thermal_cooling_device *cdev;
struct list_head list_head;
int intermediate_voltage;
bool need_voltage_tracking;
@@ -307,13 +305,6 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
#define DYNAMIC_POWER "dynamic-power-coefficient"
-static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct mtk_cpu_dvfs_info *info = policy->driver_data;
-
- info->cdev = of_cpufreq_cooling_register(policy);
-}
-
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
{
struct device *cpu_dev;
@@ -465,6 +456,8 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = info;
policy->clk = info->cpu_clk;
+ dev_pm_opp_of_register_em(policy->cpus);
+
return 0;
}
@@ -472,7 +465,6 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
- cpufreq_cooling_unregister(info->cdev);
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
return 0;
@@ -480,13 +472,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver mtk_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = mtk_cpufreq_set_target,
.get = cpufreq_generic_get,
.init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit,
- .ready = mtk_cpufreq_ready,
.name = "mtk-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 71e81bbf031b..68052b74d28f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -133,8 +133,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
- if (!result)
+ if (!result) {
+ dev_pm_opp_of_register_em(policy->cpus);
return 0;
+ }
freq_table_free();
fail:
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 099a849396f6..1e5e64643c3a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -268,7 +268,7 @@ static int pcc_get_offset(int cpu)
if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
ret = -ENODEV;
goto out_free;
- };
+ }
offset = &(pccp->package.elements[0]);
if (!offset || offset->type != ACPI_TYPE_INTEGER) {
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 7e7ad3879c4e..d2230812fa4b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -244,6 +244,7 @@ static int init_powernv_pstates(void)
u32 len_ids, len_freqs;
u32 pstate_min, pstate_max, pstate_nominal;
u32 pstate_turbo, pstate_ultra_turbo;
+ int rc = -ENODEV;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
@@ -327,8 +328,11 @@ next:
powernv_freqs[i].frequency = freq * 1000; /* kHz */
powernv_freqs[i].driver_data = id & 0xFF;
- revmap_data = (struct pstate_idx_revmap_data *)
- kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+ revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+ if (!revmap_data) {
+ rc = -ENOMEM;
+ goto out;
+ }
revmap_data->pstate_id = id & 0xFF;
revmap_data->cpufreq_table_idx = i;
@@ -357,7 +361,7 @@ next:
return 0;
out:
of_node_put(power_mgt);
- return -ENODEV;
+ return rc;
}
/* Returns the CPU frequency corresponding to the pstate_id. */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index d83939a1b3d4..4b0b50403901 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -10,18 +10,21 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
#define LUT_L_VAL GENMASK(7, 0)
#define LUT_CORE_COUNT GENMASK(18, 16)
+#define LUT_VOLT GENMASK(11, 0)
#define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2
/* Register offsets */
#define REG_ENABLE 0x0
-#define REG_LUT_TABLE 0x110
+#define REG_FREQ_LUT 0x110
+#define REG_VOLT_LUT 0x114
#define REG_PERF_STATE 0x920
static unsigned long cpu_hw_rate, xo_rate;
@@ -70,11 +73,12 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
return policy->freq_table[index].frequency;
}
-static int qcom_cpufreq_hw_read_lut(struct device *dev,
+static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
struct cpufreq_policy *policy,
void __iomem *base)
{
u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
+ u32 volt;
unsigned int max_cores = cpumask_weight(policy->cpus);
struct cpufreq_frequency_table *table;
@@ -83,23 +87,28 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
return -ENOMEM;
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
- data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE);
+ data = readl_relaxed(base + REG_FREQ_LUT +
+ i * LUT_ROW_SIZE);
src = FIELD_GET(LUT_SRC, data);
lval = FIELD_GET(LUT_L_VAL, data);
core_count = FIELD_GET(LUT_CORE_COUNT, data);
+ data = readl_relaxed(base + REG_VOLT_LUT +
+ i * LUT_ROW_SIZE);
+ volt = FIELD_GET(LUT_VOLT, data) * 1000;
+
if (src)
freq = xo_rate * lval / 1000;
else
freq = cpu_hw_rate / 1000;
- /* Ignore boosts in the middle of the table */
- if (core_count != max_cores) {
- table[i].frequency = CPUFREQ_ENTRY_INVALID;
- } else {
+ if (freq != prev_freq && core_count == max_cores) {
table[i].frequency = freq;
- dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i,
+ dev_pm_opp_add(cpu_dev, freq * 1000, volt);
+ dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
freq, core_count);
+ } else {
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
}
/*
@@ -116,6 +125,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
if (prev_cc != max_cores) {
prev->frequency = prev_freq;
prev->flags = CPUFREQ_BOOST_FREQ;
+ dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
}
break;
@@ -127,6 +137,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
table[i].frequency = CPUFREQ_TABLE_END;
policy->freq_table = table;
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
return 0;
}
@@ -159,10 +170,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
struct device *dev = &global_pdev->dev;
struct of_phandle_args args;
struct device_node *cpu_np;
+ struct device *cpu_dev;
struct resource *res;
void __iomem *base;
int ret, index;
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
cpu_np = of_cpu_device_node_get(policy->cpu);
if (!cpu_np)
return -EINVAL;
@@ -199,12 +218,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = base + REG_PERF_STATE;
- ret = qcom_cpufreq_hw_read_lut(dev, policy, base);
+ ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base);
if (ret) {
dev_err(dev, "Domain-%d failed to read LUT\n", index);
goto error;
}
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_pm_opp_of_register_em(policy->cpus);
+
policy->fast_switch_possible = true;
return 0;
@@ -215,8 +243,10 @@ error:
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
void __iomem *base = policy->driver_data - REG_PERF_STATE;
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
kfree(policy->freq_table);
devm_iounmap(&global_pdev->dev, base);
@@ -231,7 +261,8 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qcom_cpufreq_hw_target_index,
.get = qcom_cpufreq_hw_get,
@@ -296,7 +327,7 @@ static int __init qcom_cpufreq_hw_init(void)
{
return platform_driver_register(&qcom_cpufreq_hw_driver);
}
-subsys_initcall(qcom_cpufreq_hw_init);
+device_initcall(qcom_cpufreq_hw_init);
static void __exit qcom_cpufreq_hw_exit(void)
{
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 2a3675c24032..dd64dcf89c74 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -42,7 +42,7 @@ enum _msm8996_version {
NUM_OF_MSM8996_VERSIONS,
};
-struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
{
@@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
{
- struct opp_table *opp_tables[NR_CPUS] = {0};
+ struct opp_table **opp_tables;
enum _msm8996_version msm8996_version;
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
@@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
}
kfree(speedbin);
+ opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
+ if (!opp_tables)
+ return -ENOMEM;
+
for_each_possible_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (NULL == cpu_dev) {
@@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
- if (!IS_ERR(cpufreq_dt_pdev))
+ if (!IS_ERR(cpufreq_dt_pdev)) {
+ platform_set_drvdata(pdev, opp_tables);
return 0;
+ }
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(cpu_dev, "Failed to register platform device\n");
@@ -163,13 +169,23 @@ free_opp:
break;
dev_pm_opp_put_supported_hw(opp_tables[cpu]);
}
+ kfree(opp_tables);
return ret;
}
static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
{
+ struct opp_table **opp_tables = platform_get_drvdata(pdev);
+ unsigned int cpu;
+
platform_device_unregister(cpufreq_dt_pdev);
+
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_supported_hw(opp_tables[cpu]);
+
+ kfree(opp_tables);
+
return 0;
}
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 3d773f64b4df..4295e5476264 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -31,7 +30,6 @@
struct cpu_data {
struct clk **pclk;
struct cpufreq_frequency_table *table;
- struct thermal_cooling_device *cdev;
};
/*
@@ -239,7 +237,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
- cpufreq_cooling_unregister(data->cdev);
kfree(data->pclk);
kfree(data->table);
kfree(data);
@@ -258,23 +255,15 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
return clk_set_parent(policy->clk, parent);
}
-
-static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct cpu_data *cpud = policy->driver_data;
-
- cpud->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver qoriq_cpufreq_driver = {
.name = "qoriq_cpufreq",
- .flags = CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_CONST_LOOPS |
+ CPUFREQ_IS_COOLING_DEV,
.init = qoriq_cpufreq_cpu_init,
.exit = qoriq_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
- .ready = qoriq_cpufreq_ready,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index dbecd7667db2..5b4289460bc9 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -584,7 +584,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
static int s5pv210_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- int id;
+ int id, result = 0;
/*
* HACK: This is a temporary workaround to get access to clock
@@ -594,18 +594,39 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
* this whole driver as soon as S5PV210 gets migrated to use
* cpufreq-dt driver.
*/
+ arm_regulator = regulator_get(NULL, "vddarm");
+ if (IS_ERR(arm_regulator)) {
+ if (PTR_ERR(arm_regulator) == -EPROBE_DEFER)
+ pr_debug("vddarm regulator not ready, defer\n");
+ else
+ pr_err("failed to get regulator vddarm\n");
+ return PTR_ERR(arm_regulator);
+ }
+
+ int_regulator = regulator_get(NULL, "vddint");
+ if (IS_ERR(int_regulator)) {
+ if (PTR_ERR(int_regulator) == -EPROBE_DEFER)
+ pr_debug("vddint regulator not ready, defer\n");
+ else
+ pr_err("failed to get regulator vddint\n");
+ result = PTR_ERR(int_regulator);
+ goto err_int_regulator;
+ }
+
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
if (!np) {
pr_err("%s: failed to find clock controller DT node\n",
__func__);
- return -ENODEV;
+ result = -ENODEV;
+ goto err_clock;
}
clk_base = of_iomap(np, 0);
of_node_put(np);
if (!clk_base) {
pr_err("%s: failed to map clock registers\n", __func__);
- return -EFAULT;
+ result = -EFAULT;
+ goto err_clock;
}
for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
@@ -614,7 +635,8 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
pr_err("%s: failed to get alias of dmc node '%pOFn'\n",
__func__, np);
of_node_put(np);
- return id;
+ result = id;
+ goto err_clk_base;
}
dmc_base[id] = of_iomap(np, 0);
@@ -622,33 +644,40 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
pr_err("%s: failed to map dmc%d registers\n",
__func__, id);
of_node_put(np);
- return -EFAULT;
+ result = -EFAULT;
+ goto err_dmc;
}
}
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
if (!dmc_base[id]) {
pr_err("%s: failed to find dmc%d node\n", __func__, id);
- return -ENODEV;
+ result = -ENODEV;
+ goto err_dmc;
}
}
- arm_regulator = regulator_get(NULL, "vddarm");
- if (IS_ERR(arm_regulator)) {
- pr_err("failed to get regulator vddarm\n");
- return PTR_ERR(arm_regulator);
- }
-
- int_regulator = regulator_get(NULL, "vddint");
- if (IS_ERR(int_regulator)) {
- pr_err("failed to get regulator vddint\n");
- regulator_put(arm_regulator);
- return PTR_ERR(int_regulator);
- }
-
register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
return cpufreq_register_driver(&s5pv210_driver);
+
+err_dmc:
+ for (id = 0; id < ARRAY_SIZE(dmc_base); ++id)
+ if (dmc_base[id]) {
+ iounmap(dmc_base[id]);
+ dmc_base[id] = NULL;
+ }
+
+err_clk_base:
+ iounmap(clk_base);
+
+err_clock:
+ regulator_put(int_regulator);
+
+err_int_regulator:
+ regulator_put(arm_regulator);
+
+ return result;
}
static struct platform_driver s5pv210_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 50b1551ba894..e6182c89df79 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -11,7 +11,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
+#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
@@ -22,7 +22,6 @@
struct scmi_data {
int domain_id;
struct device *cpu_dev;
- struct thermal_cooling_device *cdev;
};
static const struct scmi_handle *handle;
@@ -52,9 +51,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
int ret;
struct scmi_data *priv = policy->driver_data;
struct scmi_perf_ops *perf_ops = handle->perf_ops;
- u64 freq = policy->freq_table[index].frequency * 1000;
+ u64 freq = policy->freq_table[index].frequency;
- ret = perf_ops->freq_set(handle, priv->domain_id, freq, false);
+ ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
if (!ret)
arch_set_freq_scale(policy->related_cpus, freq,
policy->cpuinfo.max_freq);
@@ -103,13 +102,42 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
return 0;
}
+static int __maybe_unused
+scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+ unsigned long Hz;
+ int ret, domain;
+
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", cpu);
+ return -ENODEV;
+ }
+
+ domain = handle->perf_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ /* Get the power cost of the performance domain. */
+ Hz = *KHz * 1000;
+ ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
+ if (ret)
+ return ret;
+
+ /* The EM framework specifies the frequency in KHz. */
+ *KHz = Hz / 1000;
+
+ return 0;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
+ int ret, nr_opp;
unsigned int latency;
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -136,8 +164,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- ret = dev_pm_opp_get_opp_count(cpu_dev);
- if (ret <= 0) {
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0) {
dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
@@ -171,12 +199,15 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
+
+ em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+
return 0;
out_free_priv:
kfree(priv);
out_free_opp:
- dev_pm_opp_cpumask_remove_table(policy->cpus);
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret;
}
@@ -185,25 +216,18 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv);
- dev_pm_opp_cpumask_remove_table(policy->related_cpus);
return 0;
}
-static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct scmi_data *priv = policy->driver_data;
-
- priv->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr,
.target_index = scmi_cpufreq_set_target,
@@ -211,7 +235,6 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
- .ready = scmi_cpufreq_ready,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 87a98ec77773..3f49427766b8 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -22,7 +22,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -34,7 +33,6 @@
struct scpi_data {
struct clk *clk;
struct device *cpu_dev;
- struct thermal_cooling_device *cdev;
};
static struct scpi_ops *scpi_ops;
@@ -170,6 +168,9 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = false;
+
+ dev_pm_opp_of_register_em(policy->cpus);
+
return 0;
out_free_cpufreq_table:
@@ -177,7 +178,7 @@ out_free_cpufreq_table:
out_free_priv:
kfree(priv);
out_free_opp:
- dev_pm_opp_cpumask_remove_table(policy->cpus);
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret;
}
@@ -186,32 +187,24 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv);
- dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
return 0;
}
-static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct scpi_data *priv = policy->driver_data;
-
- priv->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver scpi_cpufreq_driver = {
.name = "scpi-cpufreq",
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
- .ready = scpi_cpufreq_ready,
.target_index = scpi_cpufreq_set_target,
};
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index fbbcb88db061..5d8a09b82efb 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -243,8 +243,7 @@ static unsigned int speedstep_get(unsigned int cpu)
unsigned int speed;
/* You're supposed to ensure CPU is online. */
- if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
- BUG();
+ BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
pr_debug("detected %u kHz as current frequency\n", speed);
return speed;
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 43530254201a..5e748c8a5c9a 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -22,11 +22,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
-#include <linux/regulator/consumer.h>
#include <linux/types.h>
struct tegra124_cpufreq_priv {
- struct regulator *vdd_cpu_reg;
struct clk *cpu_clk;
struct clk *pllp_clk;
struct clk *pllx_clk;
@@ -60,14 +58,6 @@ out:
return ret;
}
-static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
-{
- clk_set_parent(priv->cpu_clk, priv->pllp_clk);
- clk_disable_unprepare(priv->dfll_clk);
- regulator_sync_voltage(priv->vdd_cpu_reg);
- clk_set_parent(priv->cpu_clk, priv->pllx_clk);
-}
-
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
struct tegra124_cpufreq_priv *priv;
@@ -88,16 +78,10 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
- priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu");
- if (IS_ERR(priv->vdd_cpu_reg)) {
- ret = PTR_ERR(priv->vdd_cpu_reg);
- goto out_put_np;
- }
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
if (IS_ERR(priv->cpu_clk)) {
ret = PTR_ERR(priv->cpu_clk);
- goto out_put_vdd_cpu_reg;
+ goto out_put_np;
}
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
@@ -129,15 +113,15 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_device_register_full(&cpufreq_dt_devinfo);
if (IS_ERR(priv->cpufreq_dt_pdev)) {
ret = PTR_ERR(priv->cpufreq_dt_pdev);
- goto out_switch_to_pllx;
+ goto out_put_pllp_clk;
}
platform_set_drvdata(pdev, priv);
+ of_node_put(np);
+
return 0;
-out_switch_to_pllx:
- tegra124_cpu_switch_to_pllx(priv);
out_put_pllp_clk:
clk_put(priv->pllp_clk);
out_put_pllx_clk:
@@ -146,34 +130,15 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_vdd_cpu_reg:
- regulator_put(priv->vdd_cpu_reg);
out_put_np:
of_node_put(np);
return ret;
}
-static int tegra124_cpufreq_remove(struct platform_device *pdev)
-{
- struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev);
-
- platform_device_unregister(priv->cpufreq_dt_pdev);
- tegra124_cpu_switch_to_pllx(priv);
-
- clk_put(priv->pllp_clk);
- clk_put(priv->pllx_clk);
- clk_put(priv->dfll_clk);
- clk_put(priv->cpu_clk);
- regulator_put(priv->vdd_cpu_reg);
-
- return 0;
-}
-
static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
.probe = tegra124_cpufreq_probe,
- .remove = tegra124_cpufreq_remove,
};
static int __init tegra_cpufreq_init(void)
@@ -181,7 +146,8 @@ static int __init tegra_cpufreq_init(void)
int ret;
struct platform_device *pdev;
- if (!of_machine_is_compatible("nvidia,tegra124"))
+ if (!(of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
return -ENODEV;
/*
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 7e48eb5bf0a7..8caccbbd7353 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -4,7 +4,7 @@ config CPU_IDLE
bool "CPU idle PM support"
default y if ACPI || PPC_PSERIES
select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
- select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE)
+ select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) && !CPU_IDLE_GOV_TEO
help
CPU idle is a generic framework for supporting software-controlled
idle processor power management. It includes modular cross-platform
@@ -23,6 +23,15 @@ config CPU_IDLE_GOV_LADDER
config CPU_IDLE_GOV_MENU
bool "Menu governor (for tickless system)"
+config CPU_IDLE_GOV_TEO
+ bool "Timer events oriented (TEO) governor (for tickless systems)"
+ help
+ This governor implements a simplified idle state selection method
+ focused on timer events and does not do any interactivity boosting.
+
+ Some workloads benefit from using it and it generally should be safe
+ to use. Say Y here if you are not happy with the alternatives.
+
config DT_IDLE_STATES
bool
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 53342b7f1010..add9569636b5 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -22,16 +22,12 @@
#include "dt_idle_states.h"
static int init_state_node(struct cpuidle_state *idle_state,
- const struct of_device_id *matches,
+ const struct of_device_id *match_id,
struct device_node *state_node)
{
int err;
- const struct of_device_id *match_id;
const char *desc;
- match_id = of_match_node(matches, state_node);
- if (!match_id)
- return -ENODEV;
/*
* CPUidle drivers are expected to initialize the const void *data
* pointer of the passed in struct of_device_id array to the idle
@@ -160,6 +156,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
{
struct cpuidle_state *idle_state;
struct device_node *state_node, *cpu_node;
+ const struct of_device_id *match_id;
int i, err = 0;
const cpumask_t *cpumask;
unsigned int state_idx = start_idx;
@@ -180,6 +177,12 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
if (!state_node)
break;
+ match_id = of_match_node(matches, state_node);
+ if (!match_id) {
+ err = -ENODEV;
+ break;
+ }
+
if (!of_device_is_available(state_node)) {
of_node_put(state_node);
continue;
@@ -198,7 +201,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
}
idle_state = &drv->states[state_idx++];
- err = init_state_node(idle_state, matches, state_node);
+ err = init_state_node(idle_state, match_id, state_node);
if (err) {
pr_err("Parsing idle state node %pOF failed with err %d\n",
state_node, err);
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile
index 1b512722689f..4d8aff5248a8 100644
--- a/drivers/cpuidle/governors/Makefile
+++ b/drivers/cpuidle/governors/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
+obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
new file mode 100644
index 000000000000..7d05efdbd3c6
--- /dev/null
+++ b/drivers/cpuidle/governors/teo.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer events oriented CPU idle governor
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * The idea of this governor is based on the observation that on many systems
+ * timer events are two or more orders of magnitude more frequent than any
+ * other interrupts, so they are likely to be the most significant source of CPU
+ * wakeups from idle states. Moreover, information about what happened in the
+ * (relatively recent) past can be used to estimate whether or not the deepest
+ * idle state with target residency within the time to the closest timer is
+ * likely to be suitable for the upcoming idle time of the CPU and, if not, then
+ * which of the shallower idle states to choose.
+ *
+ * Of course, non-timer wakeup sources are more important in some use cases and
+ * they can be covered by taking a few most recent idle time intervals of the
+ * CPU into account. However, even in that case it is not necessary to consider
+ * idle duration values greater than the time till the closest timer, as the
+ * patterns that they may belong to produce average values close enough to
+ * the time till the closest timer (sleep length) anyway.
+ *
+ * Thus this governor estimates whether or not the upcoming idle time of the CPU
+ * is likely to be significantly shorter than the sleep length and selects an
+ * idle state for it in accordance with that, as follows:
+ *
+ * - Find an idle state on the basis of the sleep length and state statistics
+ * collected over time:
+ *
+ * o Find the deepest idle state whose target residency is less than or equal
+ * to the sleep length.
+ *
+ * o Select it if it matched both the sleep length and the observed idle
+ * duration in the past more often than it matched the sleep length alone
+ * (i.e. the observed idle duration was significantly shorter than the sleep
+ * length matched by it).
+ *
+ * o Otherwise, select the shallower state with the greatest matched "early"
+ * wakeups metric.
+ *
+ * - If the majority of the most recent idle duration values are below the
+ * target residency of the idle state selected so far, use those values to
+ * compute the new expected idle duration and find an idle state matching it
+ * (which has to be shallower than the one selected so far).
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/sched/clock.h>
+#include <linux/tick.h>
+
+/*
+ * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
+ * is used for decreasing metrics on a regular basis.
+ */
+#define PULSE 1024
+#define DECAY_SHIFT 3
+
+/*
+ * Number of the most recent idle duration values to take into consideration for
+ * the detection of wakeup patterns.
+ */
+#define INTERVALS 8
+
+/**
+ * struct teo_idle_state - Idle state data used by the TEO cpuidle governor.
+ * @early_hits: "Early" CPU wakeups "matching" this state.
+ * @hits: "On time" CPU wakeups "matching" this state.
+ * @misses: CPU wakeups "missing" this state.
+ *
+ * A CPU wakeup is "matched" by a given idle state if the idle duration measured
+ * after the wakeup is between the target residency of that state and the target
+ * residency of the next one (or if this is the deepest available idle state, it
+ * "matches" a CPU wakeup when the measured idle duration is at least equal to
+ * its target residency).
+ *
+ * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if
+ * it occurs significantly earlier than the closest expected timer event (that
+ * is, early enough to match an idle state shallower than the one matching the
+ * time till the closest timer event). Otherwise, the wakeup is "on time", or
+ * it is a "hit".
+ *
+ * A "miss" occurs when the given state doesn't match the wakeup, but it matches
+ * the time till the closest timer event used for idle state selection.
+ */
+struct teo_idle_state {
+ unsigned int early_hits;
+ unsigned int hits;
+ unsigned int misses;
+};
+
+/**
+ * struct teo_cpu - CPU data used by the TEO cpuidle governor.
+ * @time_span_ns: Time between idle state selection and post-wakeup update.
+ * @sleep_length_ns: Time till the closest timer event (at the selection time).
+ * @states: Idle states data corresponding to this CPU.
+ * @last_state: Idle state entered by the CPU last time.
+ * @interval_idx: Index of the most recent saved idle interval.
+ * @intervals: Saved idle duration values.
+ */
+struct teo_cpu {
+ u64 time_span_ns;
+ u64 sleep_length_ns;
+ struct teo_idle_state states[CPUIDLE_STATE_MAX];
+ int last_state;
+ int interval_idx;
+ unsigned int intervals[INTERVALS];
+};
+
+static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
+
+/**
+ * teo_update - Update CPU data after wakeup.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ */
+static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
+ int i, idx_hit = -1, idx_timer = -1;
+ unsigned int measured_us;
+
+ if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
+ /*
+ * One of the safety nets has triggered or this was a timer
+ * wakeup (or equivalent).
+ */
+ measured_us = sleep_length_us;
+ } else {
+ unsigned int lat = drv->states[cpu_data->last_state].exit_latency;
+
+ measured_us = ktime_to_us(cpu_data->time_span_ns);
+ /*
+ * The delay between the wakeup and the first instruction
+ * executed by the CPU is not likely to be worst-case every
+ * time, so take 1/2 of the exit latency as a very rough
+ * approximation of the average of it.
+ */
+ if (measured_us >= lat)
+ measured_us -= lat / 2;
+ else
+ measured_us /= 2;
+ }
+
+ /*
+ * Decay the "early hits" metric for all of the states and find the
+ * states matching the sleep length and the measured idle duration.
+ */
+ for (i = 0; i < drv->state_count; i++) {
+ unsigned int early_hits = cpu_data->states[i].early_hits;
+
+ cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
+
+ if (drv->states[i].target_residency <= sleep_length_us) {
+ idx_timer = i;
+ if (drv->states[i].target_residency <= measured_us)
+ idx_hit = i;
+ }
+ }
+
+ /*
+ * Update the "hits" and "misses" data for the state matching the sleep
+ * length. If it matches the measured idle duration too, this is a hit,
+ * so increase the "hits" metric for it then. Otherwise, this is a
+ * miss, so increase the "misses" metric for it. In the latter case
+ * also increase the "early hits" metric for the state that actually
+ * matches the measured idle duration.
+ */
+ if (idx_timer >= 0) {
+ unsigned int hits = cpu_data->states[idx_timer].hits;
+ unsigned int misses = cpu_data->states[idx_timer].misses;
+
+ hits -= hits >> DECAY_SHIFT;
+ misses -= misses >> DECAY_SHIFT;
+
+ if (idx_timer > idx_hit) {
+ misses += PULSE;
+ if (idx_hit >= 0)
+ cpu_data->states[idx_hit].early_hits += PULSE;
+ } else {
+ hits += PULSE;
+ }
+
+ cpu_data->states[idx_timer].misses = misses;
+ cpu_data->states[idx_timer].hits = hits;
+ }
+
+ /*
+ * If the total time span between idle state selection and the "reflect"
+ * callback is greater than or equal to the sleep length determined at
+ * the idle state selection time, the wakeup is likely to be due to a
+ * timer event.
+ */
+ if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns)
+ measured_us = UINT_MAX;
+
+ /*
+ * Save idle duration values corresponding to non-timer wakeups for
+ * pattern detection.
+ */
+ cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
+ if (cpu_data->interval_idx > INTERVALS)
+ cpu_data->interval_idx = 0;
+}
+
+/**
+ * teo_find_shallower_state - Find shallower idle state matching given duration.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ * @state_idx: Index of the capping idle state.
+ * @duration_us: Idle duration value to match.
+ */
+static int teo_find_shallower_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int state_idx,
+ unsigned int duration_us)
+{
+ int i;
+
+ for (i = state_idx - 1; i >= 0; i--) {
+ if (drv->states[i].disabled || dev->states_usage[i].disable)
+ continue;
+
+ state_idx = i;
+ if (drv->states[i].target_residency <= duration_us)
+ break;
+ }
+ return state_idx;
+}
+
+/**
+ * teo_select - Selects the next idle state to enter.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ * @stop_tick: Indication on whether or not to stop the scheduler tick.
+ */
+static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ bool *stop_tick)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ unsigned int duration_us, count;
+ int max_early_idx, idx, i;
+ ktime_t delta_tick;
+
+ if (cpu_data->last_state >= 0) {
+ teo_update(drv, dev);
+ cpu_data->last_state = -1;
+ }
+
+ cpu_data->time_span_ns = local_clock();
+
+ cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
+ duration_us = ktime_to_us(cpu_data->sleep_length_ns);
+
+ count = 0;
+ max_early_idx = -1;
+ idx = -1;
+
+ for (i = 0; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+ struct cpuidle_state_usage *su = &dev->states_usage[i];
+
+ if (s->disabled || su->disable) {
+ /*
+ * If the "early hits" metric of a disabled state is
+ * greater than the current maximum, it should be taken
+ * into account, because it would be a mistake to select
+ * a deeper state with lower "early hits" metric. The
+ * index cannot be changed to point to it, however, so
+ * just increase the max count alone and let the index
+ * still point to a shallower idle state.
+ */
+ if (max_early_idx >= 0 &&
+ count < cpu_data->states[i].early_hits)
+ count = cpu_data->states[i].early_hits;
+
+ continue;
+ }
+
+ if (idx < 0)
+ idx = i; /* first enabled state */
+
+ if (s->target_residency > duration_us)
+ break;
+
+ if (s->exit_latency > latency_req) {
+ /*
+ * If we break out of the loop for latency reasons, use
+ * the target residency of the selected state as the
+ * expected idle duration to avoid stopping the tick
+ * as long as that target residency is low enough.
+ */
+ duration_us = drv->states[idx].target_residency;
+ goto refine;
+ }
+
+ idx = i;
+
+ if (count < cpu_data->states[i].early_hits &&
+ !(tick_nohz_tick_stopped() &&
+ drv->states[i].target_residency < TICK_USEC)) {
+ count = cpu_data->states[i].early_hits;
+ max_early_idx = i;
+ }
+ }
+
+ /*
+ * If the "hits" metric of the idle state matching the sleep length is
+ * greater than its "misses" metric, that is the one to use. Otherwise,
+ * it is more likely that one of the shallower states will match the
+ * idle duration observed after wakeup, so take the one with the maximum
+ * "early hits" metric, but if that cannot be determined, just use the
+ * state selected so far.
+ */
+ if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
+ max_early_idx >= 0) {
+ idx = max_early_idx;
+ duration_us = drv->states[idx].target_residency;
+ }
+
+refine:
+ if (idx < 0) {
+ idx = 0; /* No states enabled. Must use 0. */
+ } else if (idx > 0) {
+ u64 sum = 0;
+
+ count = 0;
+
+ /*
+ * Count and sum the most recent idle duration values less than
+ * the target residency of the state selected so far, find the
+ * max.
+ */
+ for (i = 0; i < INTERVALS; i++) {
+ unsigned int val = cpu_data->intervals[i];
+
+ if (val >= drv->states[idx].target_residency)
+ continue;
+
+ count++;
+ sum += val;
+ }
+
+ /*
+ * Give up unless the majority of the most recent idle duration
+ * values are in the interesting range.
+ */
+ if (count > INTERVALS / 2) {
+ unsigned int avg_us = div64_u64(sum, count);
+
+ /*
+ * Avoid spending too much time in an idle state that
+ * would be too shallow.
+ */
+ if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
+ idx = teo_find_shallower_state(drv, dev, idx, avg_us);
+ duration_us = avg_us;
+ }
+ }
+ }
+
+ /*
+ * Don't stop the tick if the selected state is a polling one or if the
+ * expected idle duration is shorter than the tick period length.
+ */
+ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+ duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
+ unsigned int delta_tick_us = ktime_to_us(delta_tick);
+
+ *stop_tick = false;
+
+ /*
+ * The tick is not going to be stopped, so if the target
+ * residency of the state to be returned is not within the time
+ * till the closest timer including the tick, try to correct
+ * that.
+ */
+ if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
+ }
+
+ return idx;
+}
+
+/**
+ * teo_reflect - Note that governor data for the CPU need to be updated.
+ * @dev: Target CPU.
+ * @state: Entered state.
+ */
+static void teo_reflect(struct cpuidle_device *dev, int state)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+
+ cpu_data->last_state = state;
+ /*
+ * If the wakeup was not "natural", but triggered by one of the safety
+ * nets, assume that the CPU might have been idle for the entire sleep
+ * length time.
+ */
+ if (dev->poll_time_limit ||
+ (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
+ dev->poll_time_limit = false;
+ cpu_data->time_span_ns = cpu_data->sleep_length_ns;
+ } else {
+ cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
+ }
+}
+
+/**
+ * teo_enable_device - Initialize the governor's data for the target CPU.
+ * @drv: cpuidle driver (not used).
+ * @dev: Target CPU.
+ */
+static int teo_enable_device(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ int i;
+
+ memset(cpu_data, 0, sizeof(*cpu_data));
+
+ for (i = 0; i < INTERVALS; i++)
+ cpu_data->intervals[i] = UINT_MAX;
+
+ return 0;
+}
+
+static struct cpuidle_governor teo_governor = {
+ .name = "teo",
+ .rating = 19,
+ .enable = teo_enable_device,
+ .select = teo_select,
+ .reflect = teo_reflect,
+};
+
+static int __init teo_governor_init(void)
+{
+ return cpuidle_register_governor(&teo_governor);
+}
+
+postcore_initcall(teo_governor_init);
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index b17d153e724f..23a1b27579a5 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
- u64 limit = TICK_USEC;
+ u64 limit = TICK_NSEC;
int i;
for (i = 1; i < drv->state_count; i++) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5a90075f719d..0be55fcc19ba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
depends on ARCH_BCM_IPROC
depends on MAILBOX
default m
+ select CRYPTO_AUTHENC
select CRYPTO_DES
select CRYPTO_MD5
select CRYPTO_SHA1
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 63cb6956c948..06574a884715 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -40,9 +40,11 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/rng.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
@@ -283,9 +285,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
@@ -1035,6 +1037,10 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ rc = crypto_register_rng(&alg->alg.u.rng);
+ break;
+
default:
rc = crypto_register_skcipher(&alg->alg.u.cipher);
break;
@@ -1064,6 +1070,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
crypto_unregister_aead(&alg->alg.u.aead);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&alg->alg.u.rng);
+ break;
+
default:
crypto_unregister_skcipher(&alg->alg.u.cipher);
}
@@ -1122,6 +1132,69 @@ static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
PPC4XX_TMO_ERR_INT);
}
+static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
+ u8 *data, unsigned int max)
+{
+ unsigned int i, curr = 0;
+ u32 val[2];
+
+ do {
+ /* trigger PRN generation */
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN,
+ dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+
+ for (i = 0; i < 1024; i++) {
+ /* usually 19 iterations are enough */
+ if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
+ CRYPTO4XX_PRNG_STAT_BUSY))
+ continue;
+
+ val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
+ val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
+ break;
+ }
+ if (i == 1024)
+ return -ETIMEDOUT;
+
+ if ((max - curr) >= 8) {
+ memcpy(data, &val, 8);
+ data += 8;
+ curr += 8;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - curr);
+ break;
+ }
+ } while (curr < max);
+
+ return curr;
+}
+
+static int crypto4xx_prng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_device *dev;
+ int ret;
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
+ dev = amcc_alg->dev;
+
+ mutex_lock(&dev->core_dev->rng_lock);
+ ret = ppc4xx_prng_data_read(dev, dstn, dlen);
+ mutex_unlock(&dev->core_dev->rng_lock);
+ return ret;
+}
+
+
+static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1291,6 +1364,18 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_module = THIS_MODULE,
},
} },
+ { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "crypto4xx_rng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .generate = crypto4xx_prng_generate,
+ .seed = crypto4xx_prng_seed,
+ .seedsize = 0,
+ } },
};
/**
@@ -1360,6 +1445,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
+ mutex_init(&core_dev->rng_lock);
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@@ -1439,6 +1525,7 @@ static int crypto4xx_remove(struct platform_device *ofdev)
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
+ mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index e2ca56722f07..18df695ca6b1 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -23,8 +23,10 @@
#define __CRYPTO4XX_CORE_H__
#include <linux/ratelimit.h>
+#include <linux/mutex.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
@@ -119,6 +121,7 @@ struct crypto4xx_core_device {
u32 irq;
struct tasklet_struct tasklet;
spinlock_t lock;
+ struct mutex rng_lock;
};
struct crypto4xx_ctx {
@@ -143,6 +146,7 @@ struct crypto4xx_alg_common {
struct skcipher_alg cipher;
struct ahash_alg hash;
struct aead_alg aead;
+ struct rng_alg rng;
} u;
};
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 472331787e04..80c67490bbf6 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -100,6 +100,7 @@
#define CRYPTO4XX_ENDIAN_CFG 0x000600d8
#define CRYPTO4XX_PRNG_STAT 0x00070000
+#define CRYPTO4XX_PRNG_STAT_BUSY 0x1
#define CRYPTO4XX_PRNG_CTRL 0x00070004
#define CRYPTO4XX_PRNG_SEED_L 0x00070008
#define CRYPTO4XX_PRNG_SEED_H 0x0007000c
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 5e63742b0d22..53ab1f140a26 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
/* Find the TRNG device node and map it */
trng = of_find_matching_node(NULL, ppc4xx_trng_match);
- if (!trng || !of_device_is_available(trng))
+ if (!trng || !of_device_is_available(trng)) {
+ of_node_put(trng);
return;
+ }
dev->trng_base = of_iomap(trng, 0);
of_node_put(trng);
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
index 931d22531f51..7bbda51b7337 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.h
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -26,9 +26,9 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
#else
static inline void ppc4xx_trng_probe(
- struct crypto4xx_device *dev __maybe_unused) { }
+ struct crypto4xx_core_device *dev __maybe_unused) { }
static inline void ppc4xx_trng_remove(
- struct crypto4xx_device *dev __maybe_unused) { }
+ struct crypto4xx_core_device *dev __maybe_unused) { }
#endif
#endif
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 438e1ffb2ec0..65bf1a299562 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -785,7 +785,7 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
}
err = des_ekey(tmp, key);
- if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index f3442c2bdbdc..57e5dca3253f 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -135,8 +135,6 @@
#define regk_crypto_ext 0x00000001
#define regk_crypto_hmac_sha1 0x00000007
#define regk_crypto_hmac_sha256 0x00000009
-#define regk_crypto_hmac_sha384 0x0000000b
-#define regk_crypto_hmac_sha512 0x0000000d
#define regk_crypto_init 0x00000000
#define regk_crypto_key_128 0x00000000
#define regk_crypto_key_192 0x00000001
@@ -144,8 +142,6 @@
#define regk_crypto_null 0x00000000
#define regk_crypto_sha1 0x00000006
#define regk_crypto_sha256 0x00000008
-#define regk_crypto_sha384 0x0000000a
-#define regk_crypto_sha512 0x0000000c
/* DMA descriptor structures */
struct pdma_descr_ctrl {
@@ -190,8 +186,6 @@ struct pdma_stat_descr {
/* Hash modes (including HMAC variants) */
#define ARTPEC6_CRYPTO_HASH_SHA1 1
#define ARTPEC6_CRYPTO_HASH_SHA256 2
-#define ARTPEC6_CRYPTO_HASH_SHA384 3
-#define ARTPEC6_CRYPTO_HASH_SHA512 4
/* Crypto modes */
#define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
@@ -284,6 +278,7 @@ enum artpec6_crypto_hash_flags {
struct artpec6_crypto_req_common {
struct list_head list;
+ struct list_head complete_in_progress;
struct artpec6_crypto_dma_descriptors *dma;
struct crypto_async_request *req;
void (*complete)(struct crypto_async_request *req);
@@ -291,11 +286,11 @@ struct artpec6_crypto_req_common {
};
struct artpec6_hash_request_context {
- char partial_buffer[SHA512_BLOCK_SIZE];
- char partial_buffer_out[SHA512_BLOCK_SIZE];
- char key_buffer[SHA512_BLOCK_SIZE];
- char pad_buffer[SHA512_BLOCK_SIZE + 32];
- unsigned char digeststate[SHA512_DIGEST_SIZE];
+ char partial_buffer[SHA256_BLOCK_SIZE];
+ char partial_buffer_out[SHA256_BLOCK_SIZE];
+ char key_buffer[SHA256_BLOCK_SIZE];
+ char pad_buffer[SHA256_BLOCK_SIZE + 32];
+ unsigned char digeststate[SHA256_DIGEST_SIZE];
size_t partial_bytes;
u64 digcnt;
u32 key_md;
@@ -305,8 +300,8 @@ struct artpec6_hash_request_context {
};
struct artpec6_hash_export_state {
- char partial_buffer[SHA512_BLOCK_SIZE];
- unsigned char digeststate[SHA512_DIGEST_SIZE];
+ char partial_buffer[SHA256_BLOCK_SIZE];
+ unsigned char digeststate[SHA256_DIGEST_SIZE];
size_t partial_bytes;
u64 digcnt;
int oper;
@@ -314,7 +309,7 @@ struct artpec6_hash_export_state {
};
struct artpec6_hashalg_context {
- char hmac_key[SHA512_BLOCK_SIZE];
+ char hmac_key[SHA256_BLOCK_SIZE];
size_t hmac_key_length;
struct crypto_shash *child_hash;
};
@@ -670,8 +665,8 @@ artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
* to be written.
*/
return artpec6_crypto_dma_map_single(common,
- dma->stat + dma->in_cnt - 1,
- sizeof(dma->stat[0]),
+ dma->stat,
+ sizeof(dma->stat[0]) * dma->in_cnt,
DMA_BIDIRECTIONAL,
&dma->stat_dma_addr);
}
@@ -1315,8 +1310,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
- size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
- SHA512_DIGEST_SIZE : digestsize;
+ size_t contextsize = digestsize;
size_t blocksize = crypto_tfm_alg_blocksize(
crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
struct artpec6_crypto_req_common *common = &req_ctx->common;
@@ -1456,7 +1450,6 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
/* Finalize */
if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
- bool needtrim = contextsize != digestsize;
size_t hash_pad_len;
u64 digest_bits;
u32 oper;
@@ -1502,19 +1495,10 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
/* Descriptor for the final result */
error = artpec6_crypto_setup_in_descr(common, areq->result,
digestsize,
- !needtrim);
+ true);
if (error)
return error;
- if (needtrim) {
- /* Discard the extra context bytes for SHA-384 */
- error = artpec6_crypto_setup_in_descr(common,
- req_ctx->partial_buffer,
- digestsize - contextsize, true);
- if (error)
- return error;
- }
-
} else { /* This is not the final operation for this request */
if (!run_hw)
return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
@@ -1923,7 +1907,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
/* For the decryption, cryptlen includes the tag. */
input_length = areq->cryptlen;
if (req_ctx->decrypt)
- input_length -= AES_BLOCK_SIZE;
+ input_length -= crypto_aead_authsize(cipher);
/* Prepare the context buffer */
req_ctx->hw_ctx.aad_length_bits =
@@ -1988,7 +1972,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
size_t output_len = areq->cryptlen;
if (req_ctx->decrypt)
- output_len -= AES_BLOCK_SIZE;
+ output_len -= crypto_aead_authsize(cipher);
artpec6_crypto_walk_init(&walk, areq->dst);
@@ -2017,19 +2001,32 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
* the output ciphertext. For decryption it is put in a context
* buffer for later compare against the input tag.
*/
- count = AES_BLOCK_SIZE;
if (req_ctx->decrypt) {
ret = artpec6_crypto_setup_in_descr(common,
- req_ctx->decryption_tag, count, false);
+ req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
if (ret)
return ret;
} else {
+ /* For encryption the requested tag size may be smaller
+ * than the hardware's generated tag.
+ */
+ size_t authsize = crypto_aead_authsize(cipher);
+
ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
- count);
+ authsize);
if (ret)
return ret;
+
+ if (authsize < AES_BLOCK_SIZE) {
+ count = AES_BLOCK_SIZE - authsize;
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer,
+ count, false);
+ if (ret)
+ return ret;
+ }
}
}
@@ -2045,7 +2042,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
return artpec6_crypto_dma_map_descs(common);
}
-static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
+ struct list_head *completions)
{
struct artpec6_crypto_req_common *req;
@@ -2056,7 +2054,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
list_move_tail(&req->list, &ac->pending);
artpec6_crypto_start_dma(req);
- req->req->complete(req->req, -EINPROGRESS);
+ list_add_tail(&req->complete_in_progress, completions);
}
/*
@@ -2086,6 +2084,11 @@ static void artpec6_crypto_task(unsigned long data)
struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
struct artpec6_crypto_req_common *req;
struct artpec6_crypto_req_common *n;
+ struct list_head complete_done;
+ struct list_head complete_in_progress;
+
+ INIT_LIST_HEAD(&complete_done);
+ INIT_LIST_HEAD(&complete_in_progress);
if (list_empty(&ac->pending)) {
pr_debug("Spurious IRQ\n");
@@ -2097,9 +2100,12 @@ static void artpec6_crypto_task(unsigned long data)
list_for_each_entry_safe(req, n, &ac->pending, list) {
struct artpec6_crypto_dma_descriptors *dma = req->dma;
u32 stat;
+ dma_addr_t stataddr;
- dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
- sizeof(dma->stat[0]),
+ stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
+ dma_sync_single_for_cpu(artpec6_crypto_dev,
+ stataddr,
+ 4,
DMA_BIDIRECTIONAL);
stat = req->dma->stat[req->dma->in_cnt-1];
@@ -2119,19 +2125,30 @@ static void artpec6_crypto_task(unsigned long data)
pr_debug("Completing request %p\n", req);
- list_del(&req->list);
+ list_move_tail(&req->list, &complete_done);
+ ac->pending_count--;
+ }
+
+ artpec6_crypto_process_queue(ac, &complete_in_progress);
+
+ spin_unlock_bh(&ac->queue_lock);
+
+ /* Perform the completion callbacks without holding the queue lock
+ * to allow new request submissions from the callbacks.
+ */
+ list_for_each_entry_safe(req, n, &complete_done, list) {
artpec6_crypto_dma_unmap_all(req);
artpec6_crypto_copy_bounce_buffers(req);
-
- ac->pending_count--;
artpec6_crypto_common_destroy(req);
+
req->complete(req->req);
}
- artpec6_crypto_process_queue(ac);
-
- spin_unlock_bh(&ac->queue_lock);
+ list_for_each_entry_safe(req, n, &complete_in_progress,
+ complete_in_progress) {
+ req->req->complete(req->req, -EINPROGRESS);
+ }
}
static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
@@ -2170,27 +2187,29 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
/* Verify GCM hashtag. */
struct aead_request *areq = container_of(req,
struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
if (req_ctx->decrypt) {
u8 input_tag[AES_BLOCK_SIZE];
+ unsigned int authsize = crypto_aead_authsize(aead);
sg_pcopy_to_buffer(areq->src,
sg_nents(areq->src),
input_tag,
- AES_BLOCK_SIZE,
+ authsize,
areq->assoclen + areq->cryptlen -
- AES_BLOCK_SIZE);
+ authsize);
- if (memcmp(req_ctx->decryption_tag,
- input_tag,
- AES_BLOCK_SIZE)) {
+ if (crypto_memneq(req_ctx->decryption_tag,
+ input_tag,
+ authsize)) {
pr_debug("***EBADMSG:\n");
print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
- input_tag, AES_BLOCK_SIZE, true);
+ input_tag, authsize, true);
print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
req_ctx->decryption_tag,
- AES_BLOCK_SIZE, true);
+ authsize, true);
result = -EBADMSG;
}
@@ -2266,13 +2285,6 @@ artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
case ARTPEC6_CRYPTO_HASH_SHA256:
oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
break;
- case ARTPEC6_CRYPTO_HASH_SHA384:
- oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
- break;
- case ARTPEC6_CRYPTO_HASH_SHA512:
- oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
- break;
-
default:
pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
return -EINVAL;
@@ -2368,53 +2380,11 @@ static int artpec6_crypto_sha256_digest(struct ahash_request *req)
return artpec6_crypto_prepare_submit_hash(req);
}
-static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
-}
-
-static int __maybe_unused
-artpec6_crypto_sha384_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
-static int artpec6_crypto_sha512_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
-}
-
-static int artpec6_crypto_sha512_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
{
return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
}
-static int __maybe_unused
-artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
-}
-
-static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
-{
- return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
-}
-
static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
{
struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
@@ -2425,27 +2395,6 @@ static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
return artpec6_crypto_prepare_submit_hash(req);
}
-static int __maybe_unused
-artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
-static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
-{
- struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
- artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
- req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
- return artpec6_crypto_prepare_submit_hash(req);
-}
-
static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
const char *base_hash_name)
{
@@ -2480,17 +2429,6 @@ static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
return artpec6_crypto_ahash_init_common(tfm, "sha256");
}
-static int __maybe_unused
-artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
-{
- return artpec6_crypto_ahash_init_common(tfm, "sha384");
-}
-
-static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
-{
- return artpec6_crypto_ahash_init_common(tfm, "sha512");
-}
-
static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
{
struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
@@ -2761,103 +2699,6 @@ static struct ahash_alg hash_algos[] = {
},
};
-static struct ahash_alg artpec7_hash_algos[] = {
- /* SHA-384 */
- {
- .init = artpec6_crypto_sha384_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_sha384_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "sha384",
- .cra_driver_name = "artpec-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
- /* HMAC SHA-384 */
- {
- .init = artpec6_crypto_hmac_sha384_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_hmac_sha384_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .setkey = artpec6_crypto_hash_set_key,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "artpec-hmac-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
- /* SHA-512 */
- {
- .init = artpec6_crypto_sha512_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_sha512_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "sha512",
- .cra_driver_name = "artpec-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
- /* HMAC SHA-512 */
- {
- .init = artpec6_crypto_hmac_sha512_init,
- .update = artpec6_crypto_hash_update,
- .final = artpec6_crypto_hash_final,
- .digest = artpec6_crypto_hmac_sha512_digest,
- .import = artpec6_crypto_hash_import,
- .export = artpec6_crypto_hash_export,
- .setkey = artpec6_crypto_hash_set_key,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.statesize = sizeof(struct artpec6_hash_export_state),
- .halg.base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "artpec-hmac-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
- .cra_exit = artpec6_crypto_ahash_exit,
- }
- },
-};
-
/* Crypto */
static struct skcipher_alg crypto_algos[] = {
/* AES - ECB */
@@ -2984,12 +2825,6 @@ static void artpec6_crypto_init_debugfs(void)
{
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
- if (!dbgfs_root || IS_ERR(dbgfs_root)) {
- dbgfs_root = NULL;
- pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
- return;
- }
-
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_status_read", dbgfs_root,
&artpec6_crypto_fail_status_read);
@@ -3001,9 +2836,6 @@ static void artpec6_crypto_init_debugfs(void)
static void artpec6_crypto_free_debugfs(void)
{
- if (!dbgfs_root)
- return;
-
debugfs_remove_recursive(dbgfs_root);
dbgfs_root = NULL;
}
@@ -3104,19 +2936,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
goto disable_hw;
}
- if (variant != ARTPEC6_CRYPTO) {
- err = crypto_register_ahashes(artpec7_hash_algos,
- ARRAY_SIZE(artpec7_hash_algos));
- if (err) {
- dev_err(dev, "Failed to register ahashes\n");
- goto unregister_ahashes;
- }
- }
-
err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
if (err) {
dev_err(dev, "Failed to register ciphers\n");
- goto unregister_a7_ahashes;
+ goto unregister_ahashes;
}
err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
@@ -3129,10 +2952,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
unregister_algs:
crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
-unregister_a7_ahashes:
- if (variant != ARTPEC6_CRYPTO)
- crypto_unregister_ahashes(artpec7_hash_algos,
- ARRAY_SIZE(artpec7_hash_algos));
unregister_ahashes:
crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
disable_hw:
@@ -3148,9 +2967,6 @@ static int artpec6_crypto_remove(struct platform_device *pdev)
int irq = platform_get_irq(pdev, 0);
crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
- if (ac->variant != ARTPEC6_CRYPTO)
- crypto_unregister_ahashes(artpec7_hash_algos,
- ARRAY_SIZE(artpec7_hash_algos));
crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
diff --git a/drivers/crypto/bcm/Makefile b/drivers/crypto/bcm/Makefile
index 13cb80eb2665..7469e19afe85 100644
--- a/drivers/crypto/bcm/Makefile
+++ b/drivers/crypto/bcm/Makefile
@@ -11,5 +11,3 @@
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) := bcm_crypto_spu.o
bcm_crypto_spu-objs := util.o spu.o spu2.o cipher.o
-
-ccflags-y += -I. -DBCMDRIVER
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c9393ffb70ed..28f592f7e1b7 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -717,7 +717,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
*/
unsigned int new_data_len;
- unsigned int chunk_start = 0;
+ unsigned int __maybe_unused chunk_start = 0;
u32 db_size; /* Length of data field, incl gcm and hash padding */
int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
u32 data_pad_len = 0; /* length of GCM/CCM padding */
@@ -1675,8 +1675,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
struct spu_hw *spu = &iproc_priv.spu;
struct brcm_message *mssg = msg;
struct iproc_reqctx_s *rctx;
- struct iproc_ctx_s *ctx;
- struct crypto_async_request *areq;
int err = 0;
rctx = mssg->ctx;
@@ -1686,8 +1684,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
err = -EFAULT;
goto cb_finish;
}
- areq = rctx->parent;
- ctx = rctx->ctx;
/* process the SPU status */
err = spu->spu_status_process(rctx->msg_buf.rx_stat);
@@ -1822,7 +1818,7 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
if (keylen == DES_KEY_SIZE) {
if (des_ekey(tmp, key) == 0) {
if (crypto_ablkcipher_get_flags(cipher) &
- CRYPTO_TFM_REQ_WEAK_KEY) {
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
crypto_ablkcipher_set_flags(cipher, flags);
@@ -2845,44 +2841,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
- struct rtattr *rta = (void *)key;
- struct crypto_authenc_key_param *param;
- const u8 *origkey = key;
- const unsigned int origkeylen = keylen;
-
- int ret = 0;
+ struct crypto_authenc_keys keys;
+ int ret;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen);
flow_dump(" key: ", key, keylen);
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+ ret = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (ret)
goto badkey;
- param = RTA_DATA(rta);
- ctx->enckeylen = be32_to_cpu(param->enckeylen);
-
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
-
- if (keylen < ctx->enckeylen)
- goto badkey;
- if (ctx->enckeylen > MAX_KEY_SIZE)
+ if (keys.enckeylen > MAX_KEY_SIZE ||
+ keys.authkeylen > MAX_KEY_SIZE)
goto badkey;
- ctx->authkeylen = keylen - ctx->enckeylen;
-
- if (ctx->authkeylen > MAX_KEY_SIZE)
- goto badkey;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey));
- memcpy(ctx->authkey, key, ctx->authkeylen);
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES:
@@ -2890,9 +2870,9 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
- if (des_ekey(tmp, key) == 0) {
+ if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) &
- CRYPTO_TFM_REQ_WEAK_KEY) {
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
crypto_aead_set_flags(cipher, flags);
return -EINVAL;
}
@@ -2905,7 +2885,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)key;
+ const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2936,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
- ret =
- crypto_aead_setkey(ctx->fallback_cipher, origkey,
- origkeylen);
+ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 763c425c41ca..f6da49758954 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -23,6 +23,7 @@
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
#include <crypto/aead.h>
+#include <crypto/arc4.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
@@ -34,9 +35,6 @@
/* Driver supports up to MAX_SPUS SPU blocks */
#define MAX_SPUS 16
-#define ARC4_MIN_KEY_SIZE 1
-#define ARC4_MAX_KEY_SIZE 256
-#define ARC4_BLOCK_SIZE 1
#define ARC4_STATE_SIZE 4
#define CCM_AES_IV_SIZE 16
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index a912c6ad3e85..d8cda5fb75ad 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -201,46 +201,6 @@ struct sdesc {
char ctx[];
};
-/* do a synchronous decrypt operation */
-int do_decrypt(char *alg_name,
- void *key_ptr, unsigned int key_len,
- void *iv_ptr, void *src_ptr, void *dst_ptr,
- unsigned int block_len)
-{
- struct scatterlist sg_in[1], sg_out[1];
- struct crypto_blkcipher *tfm =
- crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
- struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 };
- int ret = 0;
- void *iv;
- int ivsize;
-
- flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len);
-
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len);
-
- sg_init_table(sg_in, 1);
- sg_set_buf(sg_in, src_ptr, block_len);
-
- sg_init_table(sg_out, 1);
- sg_set_buf(sg_out, dst_ptr, block_len);
-
- iv = crypto_blkcipher_crt(tfm)->iv;
- ivsize = crypto_blkcipher_ivsize(tfm);
- memcpy(iv, iv_ptr, ivsize);
-
- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len);
- crypto_free_blkcipher(tfm);
-
- if (ret < 0)
- pr_err("aes_decrypt failed %d\n", ret);
-
- return ret;
-}
-
/**
* do_shash() - Do a synchronous hash operation in software
* @name: The name of the hash algorithm
diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h
index 712e029795f8..15c60356518a 100644
--- a/drivers/crypto/bcm/util.h
+++ b/drivers/crypto/bcm/util.h
@@ -95,12 +95,6 @@ u32 spu_msg_sg_add(struct scatterlist **to_sg,
void add_to_ctr(u8 *ctr_pos, unsigned int increment);
-/* do a synchronous decrypt operation */
-int do_decrypt(char *alg_name,
- void *key_ptr, unsigned int key_len,
- void *iv_ptr, void *src_ptr, void *dst_ptr,
- unsigned int block_len);
-
/* produce a message digest from data of length n bytes */
int do_shash(unsigned char *name, unsigned char *result,
const u8 *data1, unsigned int data1_len,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index c4b1cade55c1..577c9844b322 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -91,6 +91,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 92e593e2069a..9eac5099098e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*
* Based on talitos crypto API driver.
*
@@ -766,6 +766,27 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+
+ if (keylen == DES3_EDE_KEY_SIZE &&
+ __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) {
+ return -EINVAL;
+ }
+
+ if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -802,6 +823,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
* aead_edesc - s/w-extended aead descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
@@ -810,6 +833,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct aead_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -820,6 +845,8 @@ struct aead_edesc {
* skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @iv_dma: dma address of iv for checking continuity and link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
@@ -830,6 +857,8 @@ struct aead_edesc {
struct skcipher_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
@@ -846,7 +875,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
@@ -961,8 +991,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- ivsize, 0);
+ if (ivsize)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
+ ivsize, ivsize, 0);
kfree(edesc);
@@ -1023,11 +1054,12 @@ static void init_aead_job(struct aead_request *req,
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
- src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+ src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
+ 0;
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
in_options = LDST_SGF;
}
@@ -1038,8 +1070,11 @@ static void init_aead_job(struct aead_request *req,
out_options = in_options;
if (unlikely(req->src != req->dst)) {
- if (edesc->dst_nents == 1) {
+ if (!edesc->mapped_dst_nents) {
+ dst_dma = 0;
+ } else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
+ out_options = 0;
} else {
dst_dma = edesc->sec4_sg_dma +
sec4_sg_index *
@@ -1183,9 +1218,9 @@ static void init_skcipher_job(struct skcipher_request *req,
int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
u32 *sh_desc;
- u32 out_options = 0;
- dma_addr_t dst_dma, ptr;
- int len;
+ u32 in_options = 0, out_options = 0;
+ dma_addr_t src_dma, dst_dma, ptr;
+ int len, sec4_sg_index = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1203,21 +1238,27 @@ static void init_skcipher_job(struct skcipher_request *req,
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
- LDST_SGF);
+ if (ivsize || edesc->mapped_src_nents > 1) {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
+ in_options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ }
+
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
if (likely(req->src == req->dst)) {
- dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
+ dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
+ out_options = in_options;
+ } else if (edesc->mapped_dst_nents == 1) {
+ dst_dma = sg_dma_address(req->dst);
} else {
- if (edesc->dst_nents == 1) {
- dst_dma = sg_dma_address(req->dst);
- } else {
- dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
+ dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
}
+
append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
}
@@ -1289,12 +1330,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
+ /* Cover also the case of null (zero length) output data */
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst,
+ dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
}
@@ -1313,6 +1361,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
*all_contig_ptr = !(mapped_src_nents > 1);
@@ -1586,7 +1636,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct skcipher_edesc *edesc;
- dma_addr_t iv_dma;
+ dma_addr_t iv_dma = 0;
u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1621,7 +1671,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dev_err(jrdev, "unable to map source\n");
return ERR_PTR(-ENOMEM);
}
-
mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
DMA_FROM_DEVICE);
if (unlikely(!mapped_dst_nents)) {
@@ -1631,7 +1680,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
}
- sec4_sg_ents = 1 + mapped_src_nents;
+ if (!ivsize && mapped_src_nents == 1)
+ sec4_sg_ents = 0; // no need for an input hw s/g table
+ else
+ sec4_sg_ents = mapped_src_nents + !!ivsize;
dst_sg_idx = sec4_sg_ents;
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
@@ -1650,39 +1702,48 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
/* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->iv, ivsize);
+ if (ivsize) {
+ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
}
-
- dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+ if (dst_sg_idx)
+ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
+ !!ivsize, 0);
if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
edesc->sec4_sg + dst_sg_idx, 0);
}
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ if (sec4_sg_bytes) {
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, iv_dma, ivsize, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
}
edesc->iv_dma = iv_dma;
@@ -1749,8 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
* The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- ivsize, 0);
+ if (ivsize)
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
+ ivsize, ivsize, 0);
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false);
@@ -1796,7 +1858,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1812,7 +1874,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-des-caam",
.cra_blocksize = DES_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
@@ -1878,6 +1940,66 @@ static struct caam_skcipher_alg driver_algs[] = {
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(arc4)",
+ .cra_driver_name = "ecb-arc4-caam",
+ .cra_blocksize = ARC4_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
+ },
};
static struct caam_aead_alg driver_aeads[] = {
@@ -3337,6 +3459,7 @@ static int __init caam_algapi_init(void)
struct caam_drv_private *priv;
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+ u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
@@ -3381,6 +3504,8 @@ static int __init caam_algapi_init(void)
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
+ CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
} else {
@@ -3397,6 +3522,7 @@ static int __init caam_algapi_init(void)
md_inst = mdha & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
}
/* If MD is present, limit digest size based on LP256 */
@@ -3417,6 +3543,10 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /* Skip ARC4 algorithms if not supported by device */
+ if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
+ continue;
+
/*
* Check support for AES modes not available
* on LP devices.
@@ -3476,7 +3606,7 @@ static int __init caam_algapi_init(void)
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
- if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ if (is_mdha(c2_alg_sel) &&
(!md_inst || t_alg->aead.maxauthsize > md_limit))
continue;
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 7db1640d3577..1e1a376edc2f 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -2,7 +2,7 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*/
#include "compat.h"
@@ -1396,9 +1396,11 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
set_jump_tgt_here(desc, key_jump_cmd);
- /* Load iv */
- append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -1462,9 +1464,11 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
set_jump_tgt_here(desc, key_jump_cmd);
- /* load IV */
- append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c0d55310aade..a15ce9213310 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -782,7 +782,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
cpu = smp_processor_id();
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
- if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+ if (!IS_ERR_OR_NULL(drv_ctx))
drv_ctx->op_type = type;
ctx->drv_ctx[type] = drv_ctx;
@@ -802,7 +802,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
@@ -892,7 +893,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct caam_drv_ctx *drv_ctx;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ if (IS_ERR_OR_NULL(drv_ctx))
return (struct aead_edesc *)drv_ctx;
/* allocate space for base edesc and hw desc commands, link tables */
@@ -955,13 +956,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(qidev, req->dst,
+ dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
}
@@ -1184,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
struct caam_drv_ctx *drv_ctx;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ if (IS_ERR_OR_NULL(drv_ctx))
return (struct skcipher_edesc *)drv_ctx;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 425d5d974613..c2c1abc68f81 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -25,13 +25,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
-#endif
-
/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
@@ -151,7 +144,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
@@ -392,13 +386,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(dev, "unable to map destination\n");
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
} else {
src_nents = sg_nents_for_len(req->src, req->assoclen +
@@ -4503,7 +4502,8 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
nctx->cb = dpaa2_caam_fqdan_cb;
/* Register notification callbacks */
- err = dpaa2_io_service_register(NULL, nctx);
+ ppriv->dpio = dpaa2_io_service_select(cpu);
+ err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
if (unlikely(err)) {
dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
nctx->cb = NULL;
@@ -4536,7 +4536,7 @@ err:
ppriv = per_cpu_ptr(priv->ppriv, cpu);
if (!ppriv->nctx.cb)
break;
- dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
}
for_each_online_cpu(cpu) {
@@ -4556,7 +4556,8 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
for_each_online_cpu(cpu) {
ppriv = per_cpu_ptr(priv->ppriv, cpu);
- dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
+ priv->dev);
dpaa2_io_store_destroy(ppriv->store);
if (++i == priv->num_pairs)
@@ -4654,7 +4655,7 @@ static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
/* Retry while portal is busy */
do {
- err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
ppriv->store);
} while (err == -EBUSY);
@@ -4722,7 +4723,7 @@ static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
- err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+ err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
if (unlikely(err))
dev_err(priv->dev, "Notification rearm failed: %d\n",
err);
@@ -4863,21 +4864,31 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
i = 0;
for_each_online_cpu(cpu) {
- dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
- priv->rx_queue_attr[i].fqid,
- priv->tx_queue_attr[i].fqid);
+ u8 j;
+
+ j = i % priv->num_pairs;
ppriv = per_cpu_ptr(priv->ppriv, cpu);
- ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
- ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
- ppriv->prio = i;
+ ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
+
+ /*
+ * Allow all cores to enqueue, while only some of them
+ * will take part in dequeuing.
+ */
+ if (++i > priv->num_pairs)
+ continue;
+
+ ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
+ ppriv->prio = j;
+
+ dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
+ priv->rx_queue_attr[j].fqid,
+ priv->tx_queue_attr[j].fqid);
ppriv->net_dev.dev = *dev;
INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
DPAA2_CAAM_NAPI_WEIGHT);
- if (++i == priv->num_pairs)
- break;
}
return 0;
@@ -5229,7 +5240,8 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
{
struct dpaa2_fd fd;
struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
- int err = 0, i, id;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i;
if (IS_ERR(req))
return PTR_ERR(req);
@@ -5259,23 +5271,18 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
dpaa2_fd_set_flc(&fd, req->flc_dma);
- /*
- * There is no guarantee that preemption is disabled here,
- * thus take action.
- */
- preempt_disable();
- id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+ ppriv = this_cpu_ptr(priv->ppriv);
for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
- err = dpaa2_io_service_enqueue_fq(NULL,
- priv->tx_queue_attr[id].fqid,
+ err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
&fd);
if (err != -EBUSY)
break;
+
+ cpu_relax();
}
- preempt_enable();
if (unlikely(err)) {
- dev_err(dev, "Error enqueuing frame: %d\n", err);
+ dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
goto err_out;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 9823bdefd029..20890780fb82 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -76,6 +76,7 @@ struct dpaa2_caam_priv {
* @nctx: notification context of response FQ
* @store: where dequeued frames are stored
* @priv: backpointer to dpaa2_caam_priv
+ * @dpio: portal used for data path operations
*/
struct dpaa2_caam_priv_per_cpu {
struct napi_struct napi;
@@ -86,6 +87,7 @@ struct dpaa2_caam_priv_per_cpu {
struct dpaa2_io_notification_ctx nctx;
struct dpaa2_io_store *store;
struct dpaa2_caam_priv *priv;
+ struct dpaa2_io *dpio;
};
/*
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 81712aa5d0f2..d7483e4d0ce2 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -98,13 +98,14 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
- u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
struct alginfo adata;
};
@@ -113,6 +114,7 @@ struct caam_hash_ctx {
struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
+ int ctx_dma_len;
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -158,6 +160,11 @@ static inline int *alt_buflen(struct caam_hash_state *state)
return state->current_buf ? &state->buflen_0 : &state->buflen_1;
}
+static inline bool is_cmac_aes(u32 algtype)
+{
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
+}
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -165,6 +172,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
struct caam_hash_state *state,
int ctx_len)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
@@ -178,18 +186,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
return 0;
}
-/* Map req->result, and append seq_out_ptr command that points to it */
-static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
- u8 *result, int digestsize)
-{
- dma_addr_t dst_dma;
-
- dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
-
- return dst_dma;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
@@ -218,6 +214,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
dev_err(jrdev, "unable to map ctx\n");
@@ -292,14 +289,119 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
+static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+
+ /* key is loaded from memory for UPDATE and FINALIZE states */
+ ctx->adata.key_dma = ctx->key_dma;
+
+ /* shared descriptor for ahash_update */
+ desc = ctx->sh_desc_update;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+ ctx->ctx_len, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* shared descriptor for ahash_{final,finup} */
+ desc = ctx->sh_desc_fin;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* key is immediate data for INIT and INITFINAL states */
+ ctx->adata.key_virt = ctx->key;
+
+ /* shared descriptor for first invocation of ahash_update */
+ desc = ctx->sh_desc_update_first;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, ctx->key_dma);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* shared descriptor for ahash_digest */
+ desc = ctx->sh_desc_digest;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+ return 0;
+}
+
+static int acmac_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+
+ /* shared descriptor for ahash_update */
+ desc = ctx->sh_desc_update;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+ ctx->ctx_len, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for ahash_{final,finup} */
+ desc = ctx->sh_desc_fin;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for first invocation of ahash_update */
+ desc = ctx->sh_desc_update_first;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for ahash_digest */
+ desc = ctx->sh_desc_digest;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+ digestsize, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ return 0;
+}
+
/* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+ u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
u32 *desc;
struct split_key_result result;
- dma_addr_t src_dma, dst_dma;
+ dma_addr_t key_dma;
int ret;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
@@ -310,18 +412,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
init_job_desc(desc, 0);
- src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, src_dma)) {
- dev_err(jrdev, "unable to map key input memory\n");
- kfree(desc);
- return -ENOMEM;
- }
- dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dst_dma)) {
- dev_err(jrdev, "unable to map key output memory\n");
- dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(jrdev, key_dma)) {
+ dev_err(jrdev, "unable to map key memory\n");
kfree(desc);
return -ENOMEM;
}
@@ -329,16 +422,16 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
/* Job descriptor to perform unkeyed hash on key_in */
append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
- append_seq_in_ptr(desc, src_dma, *keylen, 0);
+ append_seq_in_ptr(desc, key_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+ append_seq_out_ptr(desc, key_dma, digestsize, 0);
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -354,12 +447,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"digested key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in,
- digestsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1);
#endif
}
- dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
- dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
*keylen = digestsize;
@@ -383,13 +474,10 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#endif
if (keylen > blocksize) {
- hashed_key = kmalloc_array(digestsize,
- sizeof(*hashed_key),
- GFP_KERNEL | GFP_DMA);
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!hashed_key)
return -ENOMEM;
- ret = hash_digest_key(ctx, key, &keylen, hashed_key,
- digestsize);
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
if (ret)
goto bad_free_key;
key = hashed_key;
@@ -424,9 +512,39 @@ static int ahash_setkey(struct crypto_ahash *ahash,
return -EINVAL;
}
+static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->adata.keylen = keylen;
+
+ print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
+
+ return axcbc_set_sh_desc(ahash);
+}
+
+static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ /* key is immediate data for all cmac shared descriptors */
+ ctx->adata.key_virt = key;
+ ctx->adata.keylen = keylen;
+
+ print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ return acmac_set_sh_desc(ahash);
+}
+
/*
* ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: physical mapped address of req->result
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -434,7 +552,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
* @sec4_sg: h/w link table
*/
struct ahash_edesc {
- dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
@@ -450,8 +567,6 @@ static inline void ahash_unmap(struct device *dev,
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- if (edesc->dst_dma)
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
@@ -468,12 +583,10 @@ static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len, u32 flag)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
if (state->ctx_dma) {
- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
state->ctx_dma = 0;
}
ahash_unmap(dev, edesc, req, dst_len);
@@ -486,9 +599,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -497,17 +610,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
#endif
req->base.complete(&req->base, err);
@@ -555,9 +665,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -566,17 +676,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
#endif
req->base.complete(&req->base, err);
@@ -688,6 +795,7 @@ static int ahash_update_ctx(struct ahash_request *req)
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
u8 *next_buf = alt_buf(state);
+ int blocksize = crypto_ahash_blocksize(ahash);
int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
@@ -696,9 +804,20 @@ static int ahash_update_ctx(struct ahash_request *req)
int ret = 0;
last_buflen = *next_buflen;
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ *next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - (*next_buflen));
@@ -801,7 +920,7 @@ static int ahash_update_ctx(struct ahash_request *req)
#endif
return ret;
- unmap_ctx:
+unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
@@ -837,7 +956,7 @@ static int ahash_final_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -857,14 +976,7 @@ static int ahash_final_ctx(struct ahash_request *req)
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
-
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
- ret = -ENOMEM;
- goto unmap_ctx;
- }
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -877,7 +989,7 @@ static int ahash_final_ctx(struct ahash_request *req)
return -EINPROGRESS;
unmap_ctx:
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
}
@@ -931,7 +1043,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -945,13 +1057,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
- ret = -ENOMEM;
- goto unmap_ctx;
- }
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -964,7 +1070,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
return -EINPROGRESS;
unmap_ctx:
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
}
@@ -1023,10 +1129,8 @@ static int ahash_digest(struct ahash_request *req)
desc = edesc->hw_desc;
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret) {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
return -ENOMEM;
@@ -1041,7 +1145,7 @@ static int ahash_digest(struct ahash_request *req)
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1072,20 +1176,20 @@ static int ahash_final_no_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, state->buf_dma)) {
- dev_err(jrdev, "unable to map src\n");
- goto unmap;
- }
+ if (buflen) {
+ state->buf_dma = dma_map_single(jrdev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ goto unmap;
+ }
- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+ }
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret)
goto unmap;
- }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1096,7 +1200,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1119,6 +1223,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
+ int blocksize = crypto_ahash_blocksize(ahash);
u8 *next_buf = alt_buf(state);
int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
@@ -1127,9 +1232,20 @@ static int ahash_update_no_ctx(struct ahash_request *req)
u32 *desc;
int ret = 0;
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ *next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - *next_buflen);
@@ -1295,12 +1411,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
goto unmap;
}
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret)
goto unmap;
- }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1311,7 +1424,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1335,15 +1448,26 @@ static int ahash_update_first(struct ahash_request *req)
u8 *next_buf = alt_buf(state);
int *next_buflen = alt_buflen(state);
int to_hash;
+ int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
int ret = 0;
- *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
- 1);
+ *next_buflen = req->nbytes & (blocksize - 1);
to_hash = req->nbytes - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - *next_buflen);
@@ -1443,6 +1567,7 @@ static int ahash_init(struct ahash_request *req)
state->final = ahash_final_no_ctx;
state->ctx_dma = 0;
+ state->ctx_dma_len = 0;
state->current_buf = 0;
state->buf_dma = 0;
state->buflen_0 = 0;
@@ -1651,6 +1776,44 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
+ }, {
+ .hmac_name = "xcbc(aes)",
+ .hmac_driver_name = "xcbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = axcbc_setkey,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
+ }, {
+ .hmac_name = "cmac(aes)",
+ .hmac_driver_name = "cmac-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = acmac_setkey,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
},
};
@@ -1692,14 +1855,45 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
}
priv = dev_get_drvdata(ctx->jrdev->parent);
- ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ if (is_xcbc_aes(caam_hash->alg_type)) {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->ctx_len = 48;
+
+ ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+ } else if (is_cmac_aes(caam_hash->alg_type)) {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->ctx_len = 32;
+ } else {
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+ }
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
+ offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+
+ if (is_xcbc_aes(caam_hash->alg_type))
+ dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
+ ARRAY_SIZE(ctx->key),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
@@ -1713,16 +1907,14 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
sh_desc_digest);
- /* copy descriptor header template value */
- ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
-
- ctx->ctx_len = runninglen[(ctx->adata.algtype &
- OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT];
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
- return ahash_set_sh_desc(ahash);
+
+ /*
+ * For keyed hash algorithms shared descriptors
+ * will be created later in setkey() callback
+ */
+ return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1730,9 +1922,12 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
+ offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (is_xcbc_aes(ctx->adata.algtype))
+ dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
+ ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
@@ -1863,14 +2058,16 @@ static int __init caam_algapi_hash_init(void)
struct caam_hash_template *alg = driver_hash + i;
/* If MD size is not supported by device, skip registration */
- if (alg->template_ahash.halg.digestsize > md_limit)
+ if (is_mdha(alg->alg_type) &&
+ alg->template_ahash.halg.digestsize > md_limit)
continue;
/* register hmac version */
t_alg = caam_hash_alloc(alg, true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
+ pr_warn("%s alg allocation failed\n",
+ alg->hmac_driver_name);
continue;
}
@@ -1883,6 +2080,9 @@ static int __init caam_algapi_hash_init(void)
} else
list_add_tail(&t_alg->entry, &hash_list);
+ if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
+ continue;
+
/* register unkeyed version */
t_alg = caam_hash_alloc(alg, false);
if (IS_ERR(t_alg)) {
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
index a12f7959a2c3..71d018343ee4 100644
--- a/drivers/crypto/caam/caamhash_desc.c
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -2,7 +2,7 @@
/*
* Shared descriptors for ahash algorithms
*
- * Copyright 2017 NXP
+ * Copyright 2017-2019 NXP
*/
#include "compat.h"
@@ -75,6 +75,72 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
}
EXPORT_SYMBOL(cnstr_shdsc_ahash);
+/**
+ * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based
+ * hash algorithms
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @key_dma: I/O Virtual Address of the key
+ */
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, dma_addr_t key_dma)
+{
+ u32 *skip_key_load;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip loading of key, context if already shared */
+ skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+
+ if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) {
+ append_key_as_imm(desc, adata->key_virt, adata->keylen,
+ adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else { /* UPDATE, FINALIZE */
+ if (is_xcbc_aes(adata->algtype))
+ /* Load K1 */
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC);
+ else /* CMAC */
+ append_key_as_imm(desc, adata->key_virt, adata->keylen,
+ adata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ /* Restore context */
+ append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ }
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ /* Class 1 operation */
+ append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
+ FIFOLD_TYPE_MSG | FIFOLDST_VLF);
+
+ /*
+ * Save context:
+ * - xcbc: partial hash, keys K2 and K3
+ * - cmac: partial hash, constant L = E(K,0)
+ */
+ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT)
+ /* Save K1 */
+ append_fifo_store(desc, key_dma, adata->keylen,
+ LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK);
+}
+EXPORT_SYMBOL(cnstr_shdsc_sk_hash);
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
index 631fc1ac312c..6947ee1f200c 100644
--- a/drivers/crypto/caam/caamhash_desc.h
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -15,7 +15,15 @@
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+static inline bool is_xcbc_aes(u32 algtype)
+{
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC);
+}
+
void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
int digestsize, int ctx_len, bool import_ctx, int era);
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, dma_addr_t key_dma);
#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 87d9efe4c7aa..8639b2df0371 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -43,6 +43,7 @@
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
+#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 16bbc72f041a..858bdc9ab4a3 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -18,12 +18,8 @@
#include "desc_constr.h"
#include "ctrl.h"
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
@@ -863,27 +859,18 @@ static int caam_probe(struct platform_device *pdev)
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_kek = debugfs_create_blob("kek",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_kek_wrap);
+ debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_kek_wrap);
ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_tkek_wrap);
+ debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tkek_wrap);
ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_tdsk_wrap);
+ debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tdsk_wrap);
#endif
return 0;
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index ec10230178c5..4b6854bf896a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,6 +1155,7 @@
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 7e8d690f2827..21a70fd32f5d 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
#endif /* DEBUG */
EXPORT_SYMBOL(caam_dump_sg);
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+
static const struct {
u8 value;
const char *error_text;
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 67ea94079837..8c6b83e02a70 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,6 +7,9 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
+
+#include "desc.h"
+
#define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
+
+static inline bool is_mdha(u32 algtype)
+{
+ return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
+ OP_ALG_CHA_MDHA;
+}
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index babc78abd155..5869ad58d497 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -106,7 +106,6 @@ struct caam_drv_private {
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
- struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
};
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 312b5f042f31..8d0713fae6ac 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -48,7 +48,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
{
u32 *desc;
struct split_key_result result;
- dma_addr_t dma_addr_in, dma_addr_out;
+ dma_addr_t dma_addr;
int ret = -ENOMEM;
adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
@@ -71,22 +71,17 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
return ret;
}
- dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_in)) {
- dev_err(jrdev, "unable to map key input memory\n");
- goto out_free;
- }
+ memcpy(key_out, key_in, keylen);
- dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_out)) {
- dev_err(jrdev, "unable to map key output memory\n");
- goto out_unmap_in;
+ dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(jrdev, dma_addr)) {
+ dev_err(jrdev, "unable to map key memory\n");
+ goto out_free;
}
init_job_desc(desc, 0);
- append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+ append_key(desc, dma_addr, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
@@ -104,12 +99,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, adata->keylen,
+ append_fifo_store(desc, dma_addr, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
@@ -129,10 +122,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
#endif
}
- dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
- DMA_FROM_DEVICE);
-out_unmap_in:
- dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
out_free:
kfree(desc);
return ret;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b84e6c8b1e13..7cb8b1755e57 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
/* Create a new req FQ in parked state */
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
drv_ctx->context_a, 0);
- if (unlikely(IS_ERR_OR_NULL(new_fq))) {
+ if (IS_ERR_OR_NULL(new_fq)) {
dev_err(qidev, "FQ allocation for shdesc update failed\n");
return PTR_ERR(new_fq);
}
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
/* Attach request FQ */
drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
QMAN_INITFQ_FLAG_SCHED);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
+ if (IS_ERR_OR_NULL(drv_ctx->req_fq)) {
dev_err(qidev, "create_caam_req_fq failed\n");
dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
kfree(drv_ctx);
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 06ad85ab5e86..a876535529d1 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
mcode->num_cores = is_ae ? 6 : 10;
/* Allocate DMAable space */
- mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size,
- &mcode->phys_base, GFP_KERNEL);
+ mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
+ &mcode->phys_base, GFP_KERNEL);
if (!mcode->code) {
dev_err(dev, "Unable to allocate space for microcode");
ret = -ENOMEM;
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 5c796ed55eba..2ca431ed1db8 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
rem_q_size;
- curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
- c_size + CPT_NEXT_CHUNK_PTR_SIZE,
- &curr->dma_addr, GFP_KERNEL);
+ curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
+ c_size + CPT_NEXT_CHUNK_PTR_SIZE,
+ &curr->dma_addr,
+ GFP_KERNEL);
if (!curr->head) {
dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
i, queue->nchunks);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
index 0196b992280f..848ec93d4333 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -55,31 +55,14 @@ void nitrox_debugfs_exit(struct nitrox_device *ndev)
ndev->debugfs_dir = NULL;
}
-int nitrox_debugfs_init(struct nitrox_device *ndev)
+void nitrox_debugfs_init(struct nitrox_device *ndev)
{
- struct dentry *dir, *f;
+ struct dentry *dir;
dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!dir)
- return -ENOMEM;
ndev->debugfs_dir = dir;
- f = debugfs_create_file("firmware", 0400, dir, ndev,
- &firmware_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("device", 0400, dir, ndev,
- &device_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("stats", 0400, dir, ndev,
- &stats_fops);
- if (!f)
- goto err;
-
- return 0;
-
-err:
- nitrox_debugfs_exit(ndev);
- return -ENODEV;
+ debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ debugfs_create_file("device", 0400, dir, ndev, &device_fops);
+ debugfs_create_file("stats", 0400, dir, ndev, &stats_fops);
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
index a8d85ffa619c..f177b79bbab0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -5,12 +5,11 @@
#include "nitrox_dev.h"
#ifdef CONFIG_DEBUG_FS
-int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_init(struct nitrox_device *ndev);
void nitrox_debugfs_exit(struct nitrox_device *ndev);
#else
-static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+static inline void nitrox_debugfs_init(struct nitrox_device *ndev)
{
- return 0;
}
static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 9138bae12521..4ace9bcd603a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
struct nitrox_device *ndev = cmdq->ndev;
cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
- cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
- &cmdq->unalign_dma,
- GFP_KERNEL);
+ cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
+ &cmdq->unalign_dma,
+ GFP_KERNEL);
if (!cmdq->unalign_base)
return -ENOMEM;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 014e9863c20e..faa78f651238 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -404,9 +404,7 @@ static int nitrox_probe(struct pci_dev *pdev,
if (err)
goto pf_hw_fail;
- err = nitrox_debugfs_init(ndev);
- if (err)
- goto pf_hw_fail;
+ nitrox_debugfs_init(ndev);
/* clear the statistics */
atomic64_set(&ndev->stats.posted, 0);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index e34e4df8fd24..4c97478d44bd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
struct nitrox_device *ndev = cmdq->ndev;
struct nitrox_softreq *sr;
int req_completed = 0, err = 0, budget;
+ completion_t callback;
+ void *cb_arg;
/* check all pending requests */
budget = atomic_read(&cmdq->pending_count);
@@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
smp_mb__after_atomic();
/* remove from response list */
response_list_del(sr, cmdq);
-
/* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff;
+ callback = sr->callback;
+ cb_arg = sr->cb_arg;
softreq_destroy(sr);
-
- if (sr->callback)
- sr->callback(sr->cb_arg, err);
+ if (callback)
+ callback(cb_arg, err);
req_completed++;
}
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index be055b9547f6..a8447a3cf366 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -351,6 +351,7 @@ static struct pci_driver zip_driver = {
static struct crypto_alg zip_comp_deflate = {
.cra_name = "deflate",
+ .cra_driver_name = "deflate-cavium",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct zip_kernel_ctx),
.cra_priority = 300,
@@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = {
static struct crypto_alg zip_comp_lzs = {
.cra_name = "lzs",
+ .cra_driver_name = "lzs-cavium",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct zip_kernel_ctx),
.cra_priority = 300,
@@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = {
.decompress = zip_scomp_decompress,
.base = {
.cra_name = "deflate",
- .cra_driver_name = "deflate-scomp",
+ .cra_driver_name = "deflate-scomp-cavium",
.cra_module = THIS_MODULE,
.cra_priority = 300,
}
@@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = {
.decompress = zip_scomp_decompress,
.base = {
.cra_name = "lzs",
- .cra_driver_name = "lzs-scomp",
+ .cra_driver_name = "lzs-scomp-cavium",
.cra_module = THIS_MODULE,
.cra_priority = 300,
}
@@ -618,41 +620,23 @@ static const struct file_operations zip_regs_fops = {
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
-static int __init zip_debugfs_init(void)
+static void __init zip_debugfs_init(void)
{
- struct dentry *zip_stats, *zip_clear, *zip_regs;
-
if (!debugfs_initialized())
- return -ENODEV;
+ return;
zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
- if (!zip_debugfs_root)
- return -ENOMEM;
/* Creating files for entries inside thunderx_zip directory */
- zip_stats = debugfs_create_file("zip_stats", 0444,
- zip_debugfs_root,
- NULL, &zip_stats_fops);
- if (!zip_stats)
- goto failed_to_create;
-
- zip_clear = debugfs_create_file("zip_clear", 0444,
- zip_debugfs_root,
- NULL, &zip_clear_fops);
- if (!zip_clear)
- goto failed_to_create;
-
- zip_regs = debugfs_create_file("zip_regs", 0444,
- zip_debugfs_root,
- NULL, &zip_regs_fops);
- if (!zip_regs)
- goto failed_to_create;
+ debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
+ &zip_stats_fops);
- return 0;
+ debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
+ &zip_clear_fops);
+
+ debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
+ &zip_regs_fops);
-failed_to_create:
- debugfs_remove_recursive(zip_debugfs_root);
- return -ENOENT;
}
static void __exit zip_debugfs_exit(void)
@@ -661,13 +645,8 @@ static void __exit zip_debugfs_exit(void)
}
#else
-static int __init zip_debugfs_init(void)
-{
- return 0;
-}
-
+static void __init zip_debugfs_init(void) { }
static void __exit zip_debugfs_exit(void) { }
-
#endif
/* debugfs - end */
@@ -691,17 +670,10 @@ static int __init zip_init_module(void)
}
/* comp-decomp statistics are handled with debugfs interface */
- ret = zip_debugfs_init();
- if (ret < 0) {
- zip_err("ZIP: debugfs initialization failed\n");
- goto err_crypto_unregister;
- }
+ zip_debugfs_init();
return ret;
-err_crypto_unregister:
- zip_unregister_compression_device();
-
err_pci_unregister:
pci_unregister_driver(&zip_driver);
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 9108015e56cc..f6e252c1d6fb 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ae87b741f9d5..c2ff551d215b 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -57,7 +57,7 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 2ca64bb57d2e..10a61cd54fce 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 1a734bd2070a..4bd26af7098d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -286,10 +286,7 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
{
struct ccp_cmd_queue *cmd_q;
char name[MAX_NAME_LEN + 1];
- struct dentry *debugfs_info;
- struct dentry *debugfs_stats;
struct dentry *debugfs_q_instance;
- struct dentry *debugfs_q_stats;
int i;
if (!debugfs_initialized())
@@ -299,24 +296,14 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
if (!ccp_debugfs_dir)
ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
mutex_unlock(&ccp_debugfs_lock);
- if (!ccp_debugfs_dir)
- return;
ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);
- if (!ccp->debugfs_instance)
- goto err;
- debugfs_info = debugfs_create_file("info", 0400,
- ccp->debugfs_instance, ccp,
- &ccp_debugfs_info_ops);
- if (!debugfs_info)
- goto err;
+ debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp,
+ &ccp_debugfs_info_ops);
- debugfs_stats = debugfs_create_file("stats", 0600,
- ccp->debugfs_instance, ccp,
- &ccp_debugfs_stats_ops);
- if (!debugfs_stats)
- goto err;
+ debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp,
+ &ccp_debugfs_stats_ops);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -325,21 +312,12 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
debugfs_q_instance =
debugfs_create_dir(name, ccp->debugfs_instance);
- if (!debugfs_q_instance)
- goto err;
-
- debugfs_q_stats =
- debugfs_create_file("stats", 0600,
- debugfs_q_instance, cmd_q,
- &ccp_debugfs_queue_ops);
- if (!debugfs_q_stats)
- goto err;
+
+ debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q,
+ &ccp_debugfs_queue_ops);
}
return;
-
-err:
- debugfs_remove_recursive(ccp->debugfs_instance);
}
void ccp5_debugfs_destroy(void)
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 44a4d2779b15..c9bfd4f439ce 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp)
/* Page alignment satisfies our needs for N <= 128 */
BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
- cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
- &cmd_q->qbase_dma,
- GFP_KERNEL);
+ cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 0ea43cdeb05f..267a367bd076 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index b16be8a11d92..fadf859a14b8 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Platform Security Processor (PSP) interface
*
- * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
@@ -437,6 +437,7 @@ static int sev_get_api_version(void)
psp_master->api_major = status->api_major;
psp_master->api_minor = status->api_minor;
psp_master->build = status->build;
+ psp_master->sev_state = status->state;
return 0;
}
@@ -857,15 +858,15 @@ static int sev_misc_init(struct psp_device *psp)
return 0;
}
-static int sev_init(struct psp_device *psp)
+static int psp_check_sev_support(struct psp_device *psp)
{
/* Check if device supports SEV feature */
if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
- dev_dbg(psp->dev, "device does not support SEV\n");
- return 1;
+ dev_dbg(psp->dev, "psp does not support SEV\n");
+ return -ENODEV;
}
- return sev_misc_init(psp);
+ return 0;
}
int psp_dev_init(struct sp_device *sp)
@@ -890,6 +891,10 @@ int psp_dev_init(struct sp_device *sp)
psp->io_regs = sp->io_map;
+ ret = psp_check_sev_support(psp);
+ if (ret)
+ goto e_disable;
+
/* Disable and clear interrupts until ready */
iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
@@ -901,7 +906,7 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- ret = sev_init(psp);
+ ret = sev_misc_init(psp);
if (ret)
goto e_irq;
@@ -923,6 +928,11 @@ e_err:
dev_notice(dev, "psp initialization failed\n");
return ret;
+
+e_disable:
+ sp->psp_data = NULL;
+
+ return ret;
}
void psp_dev_destroy(struct sp_device *sp)
@@ -964,6 +974,21 @@ void psp_pci_init(void)
if (sev_get_api_version())
goto err;
+ /*
+ * If platform is not in UNINIT state then firmware upgrade and/or
+ * platform INIT command will fail. These command require UNINIT state.
+ *
+ * In a normal boot we should never run into case where the firmware
+ * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+ * may not go through a typical shutdown sequence and may leave the
+ * firmware in INIT or WORKING state.
+ */
+
+ if (psp_master->sev_state != SEV_STATE_UNINIT) {
+ sev_platform_shutdown(NULL);
+ psp_master->sev_state = SEV_STATE_UNINIT;
+ }
+
if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
sev_update_firmware(psp_master->dev) == 0)
sev_get_api_version();
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 8b53a9674ecb..f5afeccf42a1 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Platform Security Processor (PSP) interface driver
*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index e0459002eb71..b2879767fc98 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 14398cad1625..5b0790025db3 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor driver
*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 7da93e9bebed..41bce0a3f4bb 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -226,8 +226,6 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto e_err;
- dev_notice(dev, "enabled\n");
-
return 0;
e_err:
@@ -246,8 +244,6 @@ static void sp_pci_remove(struct pci_dev *pdev)
sp_destroy(sp);
sp_free_irqs(sp);
-
- dev_notice(dev, "disabled\n");
}
#ifdef CONFIG_PM
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index b75dc7db2d4a..d24228efbaaa 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -1,7 +1,7 @@
/*
* AMD Secure Processor device driver
*
- * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2014,2018 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index f2643cda45db..a3527c00b29a 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {};
- struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
- int rc = -EINVAL;
unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata);
+ const u8 *enckey, *authkey;
+ int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
- if (!RTA_OK(rta, keylen))
- goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
- param = RTA_DATA(rta);
- ctx->enc_keylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < ctx->enc_keylen)
+ struct crypto_authenc_keys keys;
+
+ rc = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (rc)
goto badkey;
- ctx->auth_keylen = keylen - ctx->enc_keylen;
+ enckey = keys.enckey;
+ authkey = keys.authkey;
+ ctx->enc_keylen = keys.enckeylen;
+ ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
+ rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
- CTR_RFC3686_NONCE_SIZE);
+ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
} else { /* non-authenc - has just one key */
+ enckey = key;
+ authkey = NULL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = 0;
}
@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_1: Copy key to ctx */
/* Get key material */
- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+ ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
goto badkey;
}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index dd948e1df9e5..0ee1c52da0a4 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -156,8 +156,11 @@ static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
/* Verify there is no memory overflow*/
new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
- if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
+ dev_err(dev, "Too many mlli entries. current %d max %d\n",
+ new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
return -ENOMEM;
+ }
/*handle buffer longer than 64 kbytes */
while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
@@ -511,10 +514,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
/* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -528,12 +529,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
} else {
/* Map the dst sg */
- if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
- rc = -ENOMEM;
+ rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
hw_iv_size, DMA_BIDIRECTIONAL);
}
- /*In case a pool was set, a table was
- *allocated and should be released
- */
- if (areq_ctx->mlli_params.curr_pool) {
+ /* Release pool */
+ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
+ (areq_ctx->mlli_params.mlli_virt_addr)) {
dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
@@ -1078,10 +1078,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
&areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto chain_data_exit;
- }
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
@@ -1235,11 +1233,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
}
areq_ctx->ccm_iv0_dma_addr = dma_addr;
- if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen)) {
- rc = -ENOMEM;
+ rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, req->assoclen);
+ if (rc)
goto aead_map_failure;
- }
}
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
@@ -1299,10 +1296,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto aead_map_failure;
- }
if (areq_ctx->is_single_pass) {
/*
@@ -1386,6 +1381,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1405,18 +1401,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
}
if (src && nbytes > 0 && do_update) {
- if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
- &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
+ rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (src && mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
@@ -1435,7 +1431,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
@@ -1451,7 +1448,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
@@ -1470,6 +1467,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1514,21 +1512,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
}
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
/* change the buffer index for next operation */
swap_index = 1;
}
if (update_data_len > *curr_buff_cnt) {
- if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE, &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents)) {
+ rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
@@ -1548,7 +1546,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1562,7 +1561,7 @@ unmap_curr_buff:
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
void cc_unmap_hash_request(struct device *dev, void *ctx,
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index cc92b031fad1..d9c17078517b 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -80,6 +80,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
default:
break;
}
+ break;
case S_DIN_to_DES:
if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
@@ -352,7 +353,8 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
dev_dbg(dev, "weak 3DES key");
return -EINVAL;
} else if (!des_ekey(tmp, key) &&
- (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (crypto_tfm_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_dbg(dev, "weak DES key");
return -EINVAL;
@@ -652,6 +654,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
unsigned int len;
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_CBC:
/*
@@ -681,7 +685,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
break;
}
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
kzfree(req_ctx->iv);
skcipher_request_complete(req, err);
@@ -799,7 +802,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
memset(req_ctx, 0, sizeof(*req_ctx));
- if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+ if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
+ (req->cryptlen >= ivsize)) {
/* Allocate and save the last IV sized bytes of the source,
* which will be lost in case of in-place decryption.
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5ca184e42483..5fa05a7bcf36 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -39,11 +39,9 @@ static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(AXIM_MON_COMP),
};
-int __init cc_debugfs_global_init(void)
+void __init cc_debugfs_global_init(void)
{
cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
-
- return !cc_debugfs_dir;
}
void __exit cc_debugfs_global_fini(void)
@@ -56,7 +54,6 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
struct device *dev = drvdata_to_dev(drvdata);
struct cc_debugfs_ctx *ctx;
struct debugfs_regset32 *regset;
- struct dentry *file;
debug_regs[0].offset = drvdata->sig_offset;
debug_regs[1].offset = drvdata->ver_offset;
@@ -74,22 +71,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
regset->base = drvdata->cc_base;
ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
- if (!ctx->dir)
- return -ENFILE;
-
- file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
- if (!file) {
- debugfs_remove(ctx->dir);
- return -ENFILE;
- }
- file = debugfs_create_bool("coherent", 0400, ctx->dir,
- &drvdata->coherent);
-
- if (!file) {
- debugfs_remove_recursive(ctx->dir);
- return -ENFILE;
- }
+ debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+ debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
drvdata->debugfs = ctx;
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 5b5320eca7d2..01cbd9a95659 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -5,7 +5,7 @@
#define __CC_DEBUGFS_H__
#ifdef CONFIG_DEBUG_FS
-int cc_debugfs_global_init(void);
+void cc_debugfs_global_init(void);
void cc_debugfs_global_fini(void);
int cc_debugfs_init(struct cc_drvdata *drvdata);
@@ -13,11 +13,7 @@ void cc_debugfs_fini(struct cc_drvdata *drvdata);
#else
-static inline int cc_debugfs_global_init(void)
-{
- return 0;
-}
-
+static inline void cc_debugfs_global_init(void) {}
static inline void cc_debugfs_global_fini(void) {}
static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8ada308d72ee..3bcc6c76e090 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -103,10 +103,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
- if (irr == 0) { /* Probably shared interrupt line */
- dev_err(dev, "Got interrupt with empty IRR\n");
+
+ if (irr == 0) /* Probably shared interrupt line */
return IRQ_NONE;
- }
+
imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
/* clear interrupt - must be before processing events */
@@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
rc = cc_ivgen_init(new_drvdata);
if (rc) {
dev_err(dev, "cc_ivgen_init failed\n");
- goto post_power_mgr_err;
+ goto post_buf_mgr_err;
}
/* Allocate crypto algs */
@@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_hash_err;
}
+ /* All set, we can allow autosuspend */
+ cc_pm_go(new_drvdata);
+
/* If we got here and FIPS mode is enabled
* it means all FIPS test passed, so let TEE
* know we're good.
@@ -417,8 +420,6 @@ post_cipher_err:
cc_cipher_free(new_drvdata);
post_ivgen_err:
cc_ivgen_fini(new_drvdata);
-post_power_mgr_err:
- cc_pm_fini(new_drvdata);
post_buf_mgr_err:
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
@@ -538,13 +539,8 @@ static struct platform_driver ccree_driver = {
static int __init ccree_init(void)
{
- int ret;
-
cc_hash_global_init();
-
- ret = cc_debugfs_global_init();
- if (ret)
- return ret;
+ cc_debugfs_global_init();
return platform_driver_register(&ccree_driver);
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 5be7fd431b05..33dbf3e6d15d 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -111,13 +111,11 @@ struct cc_crypto_req {
* @cc_base: virt address of the CC registers
* @irq: device IRQ number
* @irq_mask: Interrupt mask shadow (1 for masked interrupts)
- * @fw_ver: SeP loaded firmware version
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
u32 irq_mask;
- u32 fw_ver;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d990f472e89f..6ff7e75ad90e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev)
int cc_pm_init(struct cc_drvdata *drvdata)
{
- int rc = 0;
struct device *dev = drvdata_to_dev(drvdata);
/* must be before the enabling to avoid resdundent suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
/* activate the PM module */
- rc = pm_runtime_set_active(dev);
- if (rc)
- return rc;
- /* enable the PM module*/
- pm_runtime_enable(dev);
+ return pm_runtime_set_active(dev);
+}
- return rc;
+/* enable the PM module*/
+void cc_pm_go(struct cc_drvdata *drvdata)
+{
+ pm_runtime_enable(drvdata_to_dev(drvdata));
}
void cc_pm_fini(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 020a5403c58b..907a6db4d6c0 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -16,6 +16,7 @@
extern const struct dev_pm_ops ccree_pm;
int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_go(struct cc_drvdata *drvdata);
void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
return 0;
}
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
+
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
static inline int cc_pm_suspend(struct device *dev)
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index 639e5718dff4..b7bd980a27d8 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index bcef76508dfa..8d8cf80b9294 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1368,7 +1368,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx = NULL;
- struct adapter *adap;
unsigned int id;
int txq_perchan, txq_idx, ntxq;
int err = 0, rxq_perchan, rxq_idx;
@@ -1382,7 +1381,6 @@ static int chcr_device_init(struct chcr_context *ctx)
goto out;
}
ctx->dev = &u_ctx->dev;
- adap = padap(ctx->dev);
ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
@@ -2762,7 +2760,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
return 0;
}
-static void generate_b0(struct aead_request *req, u8 *ivptr,
+static int generate_b0(struct aead_request *req, u8 *ivptr,
unsigned short op_type)
{
unsigned int l, lp, m;
@@ -2787,6 +2785,8 @@ static void generate_b0(struct aead_request *req, u8 *ivptr,
rc = set_msg_len(b0 + 16 - l,
(op_type == CHCR_DECRYPT_OP) ?
req->cryptlen - m : req->cryptlen, l);
+
+ return rc;
}
static inline int crypto_ccm_check_iv(const u8 *iv)
@@ -2821,7 +2821,7 @@ static int ccm_format_packet(struct aead_request *req,
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(assoclen);
- generate_b0(req, ivptr, op_type);
+ rc = generate_b0(req, ivptr, op_type);
/* zero the ctr value */
memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
return rc;
@@ -3676,9 +3676,9 @@ static int chcr_aead_op(struct aead_request *req,
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
- if (IS_ERR(skb) || !skb) {
+ if (IS_ERR_OR_NULL(skb)) {
chcr_dec_wrcount(cdev);
- return PTR_ERR(skb);
+ return PTR_ERR_OR_ZERO(skb);
}
skb->dev = u_ctx->lldi.ports[0];
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 1159dee964ed..ad874d548aa5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -183,7 +183,7 @@ struct chcr_ipsec_aadiv {
struct ipsec_sa_entry {
int hmac_ctrl;
u16 esn;
- u16 imm;
+ u16 resv;
unsigned int enckey_len;
unsigned int kctx_len;
unsigned int authsize;
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 2fb48cce4462..2f60049361ef 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -303,6 +303,9 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
return false;
}
+ /* Inline single pdu */
+ if (skb_shinfo(skb)->gso_size)
+ return false;
return true;
}
@@ -333,7 +336,8 @@ static inline int is_eth_imm(const struct sk_buff *skb,
}
static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
- struct ipsec_sa_entry *sa_entry)
+ struct ipsec_sa_entry *sa_entry,
+ bool *immediate)
{
unsigned int kctx_len;
unsigned int flits;
@@ -351,8 +355,10 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
* TX Packet header plus the skb data in the Work Request.
*/
- if (hdrlen)
+ if (hdrlen) {
+ *immediate = true;
return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+ }
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
@@ -415,12 +421,12 @@ inline void *copy_esn_pktxt(struct sk_buff *skb,
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
memcpy(aadiv->iv, iv, 8);
- if (sa_entry->imm) {
+ if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
sc_imm = (struct ulptx_idata *)(pos +
(DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
sizeof(__be64)) << 3));
- sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
- sc_imm->len = cpu_to_be32(sa_entry->imm);
+ sc_imm->cmd_more = FILL_CMD_MORE(0);
+ sc_imm->len = cpu_to_be32(skb->len);
}
pos += len;
return pos;
@@ -528,15 +534,18 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
struct adapter *adap = pi->adapter;
unsigned int ivsize = GCM_ESP_IV_SIZE;
struct chcr_ipsec_wr *wr;
+ bool immediate = false;
u16 immdatalen = 0;
unsigned int flits;
u32 ivinoffset;
u32 aadstart;
u32 aadstop;
u32 ciphstart;
+ u16 sc_more = 0;
u32 ivdrop = 0;
u32 esnlen = 0;
u32 wr_mid;
+ u16 ndesc;
int qidx = skb_get_queue_mapping(skb);
struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
unsigned int kctx_len = sa_entry->kctx_len;
@@ -544,22 +553,24 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
atomic_inc(&adap->chcr_stats.ipsec_cnt);
- flits = calc_tx_sec_flits(skb, sa_entry);
+ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
+ ndesc = DIV_ROUND_UP(flits, 2);
if (sa_entry->esn)
ivdrop = 1;
- if (is_eth_imm(skb, sa_entry)) {
+ if (immediate)
immdatalen = skb->len;
- sa_entry->imm = immdatalen;
- }
- if (sa_entry->esn)
+ if (sa_entry->esn) {
esnlen = sizeof(struct chcr_ipsec_aadiv);
+ if (!skb_is_nonlinear(skb))
+ sc_more = 1;
+ }
/* WR Header */
wr = (struct chcr_ipsec_wr *)pos;
wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
- wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
netif_tx_stop_queue(q->txq);
@@ -571,10 +582,10 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
/* ULPTX */
wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
- wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+ wr->req.ulptx.len = htonl(ndesc - 1);
/* Sub-command */
- wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(wr->req.key_ctx) +
kctx_len +
@@ -697,7 +708,7 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_reclaim_completed_tx(adap, &q->q, true);
- flits = calc_tx_sec_flits(skb, sa_entry);
+ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -710,9 +721,6 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb, sa_entry))
- immediate = true;
-
if (!immediate &&
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/crypto/chelsio/chtls/Makefile
index df1379570a8e..b958f1b8ec39 100644
--- a/drivers/crypto/chelsio/chtls/Makefile
+++ b/drivers/crypto/chelsio/chtls/Makefile
@@ -1,4 +1,5 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 -Idrivers/crypto/chelsio/
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \
+ -I $(srctree)/drivers/crypto/chelsio
obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls.o
chtls-objs := chtls_main.o chtls_cm.o chtls_io.o chtls_hw.o
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 59b75299fcbc..4e22332496c5 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -24,6 +24,7 @@
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>
+#include <net/tls.h>
#include "chtls.h"
#include "chtls_cm.h"
@@ -615,7 +616,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
pi = netdev_priv(ndev);
adap = pi->adapter;
- if (!(adap->flags & FULL_INIT_DONE))
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
return -EBADF;
if (listen_hash_find(cdev, sk) >= 0) /* already have it */
@@ -1015,6 +1016,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
{
struct inet_sock *newinet;
const struct iphdr *iph;
+ struct tls_context *ctx;
struct net_device *ndev;
struct chtls_sock *csk;
struct dst_entry *dst;
@@ -1063,6 +1065,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
sk_setup_caps(newsk, dst);
+ ctx = tls_get_ctx(lsk);
+ newsk->sk_destruct = ctx->sk_destruct;
csk->sk = newsk;
csk->passive_reap_next = oreq;
csk->tx_chan = cxgb4_port_chan(ndev);
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 18f553fcc167..1285a1bceda7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -922,14 +922,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
struct sock *sk, long *timeo_p)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int sndbuf, err = 0;
+ int err = 0;
long current_timeo;
long vm_wait = 0;
bool noblock;
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
- sndbuf = cdev->max_host_sndbuf;
if (csk_mem_free(cdev, sk)) {
current_timeo = (prandom_u32() % (HZ / 5)) + 2;
vm_wait = (prandom_u32() % (HZ / 5)) + 2;
@@ -1401,23 +1400,18 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct net_device *dev = csk->egress_dev;
struct chtls_hws *hws = &csk->tlshws;
struct tcp_sock *tp = tcp_sk(sk);
- struct adapter *adap;
unsigned long avail;
int buffers_freed;
int copied = 0;
- int request;
int target;
long timeo;
- adap = netdev2adap(dev);
buffers_freed = 0;
timeo = sock_rcvtimeo(sk, nonblock);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- request = len;
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
chtls_cleanup_rbuf(sk, copied);
@@ -1694,11 +1688,9 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
- struct chtls_hws *hws;
unsigned long avail; /* amount of available data in current skb */
int buffers_freed;
int copied = 0;
- int request;
long timeo;
int target; /* Read at least this many bytes */
@@ -1718,7 +1710,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
- hws = &csk->tlshws;
if (is_tls_rx(csk))
return chtls_pt_recvmsg(sk, msg, len, nonblock,
@@ -1726,7 +1717,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
timeo = sock_rcvtimeo(sk, nonblock);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- request = len;
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
chtls_cleanup_rbuf(sk, copied);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 563f8fe7686a..dd2daf2a54e0 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -30,7 +30,6 @@
*/
static LIST_HEAD(cdev_list);
static DEFINE_MUTEX(cdev_mutex);
-static DEFINE_MUTEX(cdev_list_lock);
static DEFINE_MUTEX(notify_mutex);
static RAW_NOTIFIER_HEAD(listen_notify_list);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a5a36fe7bf2c..dad212cabe63 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1961,7 +1961,8 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 tmp[DES_EXPKEY_WORDS];
int ret = des_ekey(tmp, key);
- if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index cdc4f9a171d9..adc0cd8ae97b 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
} else {
/* new key */
- ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
- &ctx->pkey, GFP_KERNEL);
+ ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+ &ctx->pkey, GFP_KERNEL);
if (!ctx->key) {
mutex_unlock(&ctx->lock);
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index c1ee4e7bf996..91ee2bb575df 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
struct sec_queue_ring_db *ring_db = &queue->ring_db;
int ret;
- ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
- &ring_cmd->paddr,
- GFP_KERNEL);
+ ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
+ &ring_cmd->paddr, GFP_KERNEL);
if (!ring_cmd->vaddr)
return -ENOMEM;
@@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
mutex_init(&ring_cmd->lock);
ring_cmd->callback = sec_alg_callback;
- ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
- &ring_cq->paddr,
- GFP_KERNEL);
+ ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
+ &ring_cq->paddr, GFP_KERNEL);
if (!ring_cq->vaddr) {
ret = -ENOMEM;
goto err_free_ring_cmd;
}
- ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
- &ring_db->paddr,
- GFP_KERNEL);
+ ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
+ &ring_db->paddr, GFP_KERNEL);
if (!ring_db->vaddr) {
ret = -ENOMEM;
goto err_free_ring_cq;
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d531c14020dc..7ef30a98cb24 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -940,7 +940,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
ret = des_ekey(tmp, key);
- if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 19fba998b86b..5c4659b04d70 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,9 +260,9 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_zalloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_alloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
return 0;
@@ -847,7 +847,7 @@ static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+ if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
ret = -EINVAL;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
@@ -1125,7 +1125,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
- if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+ if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
ret = -EINVAL;
goto out;
} else {
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0ae84ec9e21c..fb279b3a1ca1 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -286,7 +286,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
}
ret = des_ekey(tmp, key);
- if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -322,7 +322,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
struct mv_cesa_skcipher_dma_iter iter;
bool skip_ctx = false;
int ret;
- unsigned int ivsize;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
@@ -381,7 +380,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
- ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index ee0404e27a0f..5660e5e5e022 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
if (!ring[i])
goto err_cleanup;
- ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->cmd_dma,
- GFP_KERNEL);
+ ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
+ MTK_DESC_RING_SZ,
+ &ring[i]->cmd_dma,
+ GFP_KERNEL);
if (!ring[i]->cmd_base)
goto err_cleanup;
- ring[i]->res_base = dma_zalloc_coherent(cryp->dev,
- MTK_DESC_RING_SZ,
- &ring[i]->res_dma,
- GFP_KERNEL);
+ ring[i]->res_base = dma_alloc_coherent(cryp->dev,
+ MTK_DESC_RING_SZ,
+ &ring[i]->res_dma,
+ GFP_KERNEL);
if (!ring[i]->res_base)
goto err_cleanup;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 55f34cfc43ff..9450c41211b2 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -772,7 +772,7 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
}
err = des_ekey(tmp, key);
- if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 6369019219d4..1ba2633e90d6 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -662,7 +662,7 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
pr_debug("enter, keylen: %d\n", keylen);
/* Do we need to test against weak key? */
- if (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+ if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
u32 tmp[DES_EXPKEY_WORDS];
int ret = des_ekey(tmp, key);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 17068b55fea5..1b3acdeffede 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -759,7 +759,8 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
}
if (unlikely(!des_ekey(tmp, key)) &&
- (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (crypto_ablkcipher_get_flags(cipher) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile
index 8f5fd4838a96..822b5de58ec6 100644
--- a/drivers/crypto/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/qat/qat_c3xxx/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 763c2166ee0e..d937cc7248a5 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile
index 16d178e2eaa2..8f56d27c7479 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 613c7d5644ce..1dc5ac859f7b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile
index bd75ace59b76..6dcd404578fc 100644
--- a/drivers/crypto/qat/qat_c62x/Makefile
+++ b/drivers/crypto/qat/qat_c62x/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 9cb832963357..2bc06c89d2fe 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile
index ecd708c213b2..1e5d51de778f 100644
--- a/drivers/crypto/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/qat/qat_c62xvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 278452b8ef81..a68358b31292 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 3744b22f0c46..d28cba34773e 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
- admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- &admin->phy_addr, GFP_KERNEL);
+ admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
if (!admin->virt_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
kfree(admin);
return -ENOMEM;
}
- admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
- PAGE_SIZE,
- &admin->const_tbl_addr,
- GFP_KERNEL);
+ admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+ PAGE_SIZE,
+ &admin->const_tbl_addr,
+ GFP_KERNEL);
if (!admin->virt_tbl_addr) {
dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index d0879790561f..5c7fdb0fc53d 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -141,13 +141,6 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
accel_dev->debugfs_dir,
dev_cfg_data,
&qat_dev_cfg_fops);
- if (!dev_cfg_data->debug) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to create qat cfg debugfs entry.\n");
- kfree(dev_cfg_data);
- accel_dev->cfg = NULL;
- return -EFAULT;
- }
return 0;
}
EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 57d2622728a5..2136cbe4bf6c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -486,12 +486,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
/* accel_dev->debugfs_dir should always be non-NULL here */
etr_data->debug = debugfs_create_dir("transport",
accel_dev->debugfs_dir);
- if (!etr_data->debug) {
- dev_err(&GET_DEV(accel_dev),
- "Unable to create transport debugfs entry\n");
- ret = -ENOENT;
- goto err_bank_debug;
- }
for (i = 0; i < num_banks; i++) {
ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
@@ -504,7 +498,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
err_bank_all:
debugfs_remove(etr_data->debug);
-err_bank_debug:
kfree(etr_data->banks);
err_bank:
kfree(etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 52340b9bb387..e794e9d97b2c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -163,11 +163,6 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
ring->bank->bank_debug_dir,
ring, &adf_ring_debug_fops);
- if (!ring_debug->debug) {
- pr_err("QAT: Failed to create ring debug entry.\n");
- kfree(ring_debug);
- return -EFAULT;
- }
ring->ring_debug = ring_debug;
return 0;
}
@@ -271,19 +266,9 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
bank->bank_debug_dir = debugfs_create_dir(name, parent);
- if (!bank->bank_debug_dir) {
- pr_err("QAT: Failed to create bank debug dir.\n");
- return -EFAULT;
- }
-
bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
bank->bank_debug_dir, bank,
&adf_bank_debug_fops);
- if (!bank->bank_debug_cfg) {
- pr_err("QAT: Failed to create bank debug entry.\n");
- debugfs_remove(bank->bank_debug_dir);
- return -EFAULT;
- }
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index d2698299896f..975c75198f56 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->enc_cd) {
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->dec_cd) {
goto out_free_enc;
}
@@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->enc_cd) {
spin_unlock(&ctx->lock);
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
if (!ctx->dec_cd) {
spin_unlock(&ctx->lock);
goto out_free_enc;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 320e7854b4ee..c9f324730d71 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
} else {
int shift = ctx->p_size - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev,
- ctx->p_size,
- &qat_req->in.dh.in.b,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev,
+ ctx->p_size,
+ &qat_req->in.dh.in.b,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
- &qat_req->out.dh.r,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
+ &qat_req->out.dh.r,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
@@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
return -EINVAL;
ctx->p_size = params->p_size;
- ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+ ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
return -ENOMEM;
memcpy(ctx->p, params->p, ctx->p_size);
@@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
return 0;
}
- ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+ ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
if (!ctx->g)
return -ENOMEM;
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
@@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
if (ret < 0)
goto err_clear_ctx;
- ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
- GFP_KERNEL);
+ ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+ GFP_KERNEL);
if (!ctx->xa) {
ret = -ENOMEM;
goto err_clear_ctx;
@@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.enc.m,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.rsa.enc.m,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.enc.c,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.rsa.enc.c,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
@@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.dec.c,
- GFP_KERNEL);
+ qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.rsa.dec.c,
+ GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
goto unmap_src;
} else {
- qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.dec.m,
- GFP_KERNEL);
+ qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.rsa.dec.m,
+ GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
@@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
goto err;
ret = -ENOMEM;
- ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+ ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
if (!ctx->n)
goto err;
@@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
return -EINVAL;
}
- ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+ ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
if (!ctx->e)
return -ENOMEM;
@@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
goto err;
ret = -ENOMEM;
- ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+ ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
if (!ctx->d)
goto err;
@@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto err;
- ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+ ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
goto err;
memcpy(ctx->p + (half_key_sz - len), ptr, len);
@@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_p;
- ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+ ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
if (!ctx->q)
goto free_p;
memcpy(ctx->q + (half_key_sz - len), ptr, len);
@@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_q;
- ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
- GFP_KERNEL);
+ ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+ GFP_KERNEL);
if (!ctx->dp)
goto free_q;
memcpy(ctx->dp + (half_key_sz - len), ptr, len);
@@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dp;
- ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
- GFP_KERNEL);
+ ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+ GFP_KERNEL);
if (!ctx->dq)
goto free_dp;
memcpy(ctx->dq + (half_key_sz - len), ptr, len);
@@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
qat_rsa_drop_leading_zeros(&ptr, &len);
if (!len)
goto free_dq;
- ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
- GFP_KERNEL);
+ ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+ GFP_KERNEL);
if (!ctx->qinv)
goto free_dq;
memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
index 180a00ed7f89..0fc06b1e1632 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 3a9708ef4ce2..b11bf8c0e683 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
index 5c3ccf8267eb..9ce906af6034 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 3da0f951cb59..1b762eefc6c1 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
- if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
- ret = -EINVAL;
- goto out_err;
- }
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 25c13e26d012..154b6baa124e 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -180,8 +180,8 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
u32 tmp[DES_EXPKEY_WORDS];
ret = des_ekey(tmp, key);
- if (!ret && crypto_ablkcipher_get_flags(ablk) &
- CRYPTO_TFM_REQ_WEAK_KEY)
+ if (!ret && (crypto_ablkcipher_get_flags(ablk) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))
goto weakkey;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index c9d622abd90c..0ce4a65b95f5 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev,
count = (dev->left_bytes > PAGE_SIZE) ?
PAGE_SIZE : dev->left_bytes;
- if (!sg_pcopy_to_buffer(dev->first, dev->nents,
+ if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
dev->addr_vir, count,
dev->total - dev->left_bytes)) {
dev_err(dev->dev, "[%s:%d] pcopy err\n",
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index d5fb4013fb42..54ee5b3ed9db 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -207,7 +207,8 @@ struct rk_crypto_info {
void *addr_vir;
int aligned;
int align_size;
- size_t nents;
+ size_t src_nents;
+ size_t dst_nents;
unsigned int total;
unsigned int count;
dma_addr_t addr_in;
@@ -244,6 +245,7 @@ struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
u32 mode;
+ u8 iv[AES_BLOCK_SIZE];
};
enum alg_type {
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 639c15c5364b..02dac6ae7e53 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -60,7 +60,7 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
if (keylen == DES_KEY_SIZE) {
if (!des_ekey(tmp, key) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev)
static int rk_set_data_start(struct rk_crypto_info *dev)
{
int err;
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+ u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+ dev->sg_src->offset + dev->sg_src->length - ivsize;
+
+ /* store the iv that need to be updated in chain mode */
+ if (ctx->mode & RK_CRYPTO_DEC)
+ memcpy(ctx->iv, src_last_blk, ivsize);
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
if (!err)
@@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
dev->total = req->nbytes;
dev->sg_src = req->src;
dev->first = req->src;
- dev->nents = sg_nents(req->src);
+ dev->src_nents = sg_nents(req->src);
dev->sg_dst = req->dst;
+ dev->dst_nents = sg_nents(req->dst);
dev->aligned = 1;
spin_lock_irqsave(&dev->lock, flags);
@@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
}
+static void rk_update_iv(struct rk_crypto_info *dev)
+{
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+ u8 *new_iv = NULL;
+
+ if (ctx->mode & RK_CRYPTO_DEC) {
+ new_iv = ctx->iv;
+ } else {
+ new_iv = page_address(sg_page(dev->sg_dst)) +
+ dev->sg_dst->offset + dev->sg_dst->length - ivsize;
+ }
+
+ if (ivsize == DES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
+ else if (ivsize == AES_BLOCK_SIZE)
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
+}
+
/* return:
* true some err was occurred
* fault no err, continue
@@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
dev->unload_data(dev);
if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(req->dst, dev->nents,
+ if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
dev->addr_vir, dev->count,
dev->total - dev->left_bytes -
dev->count)) {
@@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
}
}
if (dev->left_bytes) {
+ rk_update_iv(dev);
if (dev->aligned) {
if (sg_is_last(dev->sg_src)) {
dev_err(dev->dev, "[%s:%d] Lack of data\n",
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 821a506b9e17..c336ae75e361 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
dev->sg_dst = NULL;
dev->sg_src = req->src;
dev->first = req->src;
- dev->nents = sg_nents(req->src);
+ dev->src_nents = sg_nents(req->src);
rctx = ahash_request_ctx(req);
rctx->mode = 0;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 0064be0e3941..f4e625cf53ca 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -232,6 +232,7 @@
* struct samsung_aes_variant - platform specific SSS driver data
* @aes_offset: AES register offset from SSS module's base.
* @hash_offset: HASH register offset from SSS module's base.
+ * @clk_names: names of clocks needed to run SSS IP
*
* Specifies platform specific configuration of SSS module.
* Note: A structure for driver specific platform data is used for future
@@ -240,6 +241,7 @@
struct samsung_aes_variant {
unsigned int aes_offset;
unsigned int hash_offset;
+ const char *clk_names[];
};
struct s5p_aes_reqctx {
@@ -296,6 +298,7 @@ struct s5p_aes_ctx {
struct s5p_aes_dev {
struct device *dev;
struct clk *clk;
+ struct clk *pclk;
void __iomem *ioaddr;
void __iomem *aes_ioaddr;
int irq_fc;
@@ -384,11 +387,19 @@ struct s5p_hash_ctx {
static const struct samsung_aes_variant s5p_aes_data = {
.aes_offset = 0x4000,
.hash_offset = 0x6000,
+ .clk_names = { "secss", },
};
static const struct samsung_aes_variant exynos_aes_data = {
.aes_offset = 0x200,
.hash_offset = 0x400,
+ .clk_names = { "secss", },
+};
+
+static const struct samsung_aes_variant exynos5433_slim_aes_data = {
+ .aes_offset = 0x400,
+ .hash_offset = 0x800,
+ .clk_names = { "pclk", "aclk", },
};
static const struct of_device_id s5p_sss_dt_match[] = {
@@ -400,6 +411,10 @@ static const struct of_device_id s5p_sss_dt_match[] = {
.compatible = "samsung,exynos4210-secss",
.data = &exynos_aes_data,
},
+ {
+ .compatible = "samsung,exynos5433-slim-sss",
+ .data = &exynos5433_slim_aes_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
@@ -463,6 +478,9 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
static void s5p_sg_done(struct s5p_aes_dev *dev)
{
+ struct ablkcipher_request *req = dev->req;
+ struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+
if (dev->sg_dst_cpy) {
dev_dbg(dev->dev,
"Copying %d bytes of output data back to original place\n",
@@ -472,6 +490,11 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+ if (reqctx->mode & FLAGS_AES_CBC)
+ memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
+
+ else if (reqctx->mode & FLAGS_AES_CTR)
+ memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
}
/* Calls the completion. Cannot be called with dev->lock hold. */
@@ -1819,10 +1842,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
void __iomem *keystart;
if (iv)
- memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
+ AES_BLOCK_SIZE);
if (ctr)
- memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
+ AES_BLOCK_SIZE);
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
@@ -2208,18 +2233,39 @@ static int s5p_aes_probe(struct platform_device *pdev)
return PTR_ERR(pdata->ioaddr);
}
- pdata->clk = devm_clk_get(dev, "secss");
+ pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
if (IS_ERR(pdata->clk)) {
- dev_err(dev, "failed to find secss clock source\n");
+ dev_err(dev, "failed to find secss clock %s\n",
+ variant->clk_names[0]);
return -ENOENT;
}
err = clk_prepare_enable(pdata->clk);
if (err < 0) {
- dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
+ dev_err(dev, "Enabling clock %s failed, err %d\n",
+ variant->clk_names[0], err);
return err;
}
+ if (variant->clk_names[1]) {
+ pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
+ if (IS_ERR(pdata->pclk)) {
+ dev_err(dev, "failed to find clock %s\n",
+ variant->clk_names[1]);
+ err = -ENOENT;
+ goto err_clk;
+ }
+
+ err = clk_prepare_enable(pdata->pclk);
+ if (err < 0) {
+ dev_err(dev, "Enabling clock %s failed, err %d\n",
+ variant->clk_names[0], err);
+ goto err_clk;
+ }
+ } else {
+ pdata->pclk = NULL;
+ }
+
spin_lock_init(&pdata->lock);
spin_lock_init(&pdata->hash_lock);
@@ -2295,8 +2341,11 @@ err_algs:
tasklet_kill(&pdata->tasklet);
err_irq:
- clk_disable_unprepare(pdata->clk);
+ if (pdata->pclk)
+ clk_disable_unprepare(pdata->pclk);
+err_clk:
+ clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
return err;
@@ -2323,6 +2372,9 @@ static int s5p_aes_remove(struct platform_device *pdev)
pdata->use_hash = false;
}
+ if (pdata->pclk)
+ clk_disable_unprepare(pdata->pclk);
+
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 590d7352837e..4a6cc8a3045d 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1564,7 +1564,7 @@ err_engine:
static int stm32_hash_remove(struct platform_device *pdev)
{
- static struct stm32_hash_dev *hdev;
+ struct stm32_hash_dev *hdev;
int ret;
hdev = platform_get_drvdata(pdev);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 5cf64746731a..54fd714d53ca 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -517,7 +517,7 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
flags = crypto_skcipher_get_flags(tfm);
ret = des_ekey(tmp, key);
- if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
dev_dbg(ss->dev, "Weak key %u\n", keylen);
return -EINVAL;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 45e20707cef8..de78b54bcfb1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
- void *err;
if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
- if (ivsize)
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-
if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
- err = ERR_PTR(-EINVAL);
- goto error_sg;
+ return ERR_PTR(-EINVAL);
}
dst_nents = (dst_nents == 1) ? 0 : dst_nents;
}
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc);
+ alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags);
- if (!edesc) {
- err = ERR_PTR(-ENOMEM);
- goto error_sg;
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+ if (ivsize) {
+ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
}
memset(&edesc->desc, 0, sizeof(edesc->desc));
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL);
}
return edesc;
-error_sg:
- if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
- return err;
}
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
@@ -1543,7 +1535,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
}
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
- CRYPTO_TFM_REQ_WEAK_KEY) &&
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
!des_ekey(tmp, key)) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
return -EINVAL;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index a92a66b1ff46..3235611928f2 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -595,6 +595,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
}
cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n",
+ __func__);
+ return cookie;
+ }
+
dma_async_issue_pending(channel);
return 0;
@@ -994,10 +1000,11 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
}
ret = des_ekey(tmp, key);
- if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) &&
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
- __func__);
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+ __func__);
return -EINVAL;
}
@@ -1028,18 +1035,19 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
/* Checking key interdependency for weak key detection. */
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
- __func__);
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+ __func__);
return -EINVAL;
}
for (i = 0; i < 3; i++) {
ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
- if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ if (unlikely(ret == 0) &&
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: "
- "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+ __func__);
return -EINVAL;
}
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 2c573d1aaa64..0704833ece92 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -406,7 +406,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
} else {
req_data->header.session_id =
cpu_to_le64(ctx->dec_sess_info.session_id);
- req_data->header.opcode =
+ req_data->header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
}
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 92e78d16b476..c9aa15fb86a9 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -89,8 +89,8 @@ static struct dioname names[] =
#undef DIONAME
#undef DIOFBNAME
-static const char *unknowndioname
- = "unknown DIO board -- please email <linux-m68k@lists.linux-m68k.org>!";
+static const char unknowndioname[]
+ = "unknown DIO board, please email linux-m68k@lists.linux-m68k.org";
static const char *dio_getname(int id)
{
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4e557684f792..fe69dccfa0c0 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@ struct at_xdmac_chan {
u32 save_cim;
u32 save_cnda;
u32 save_cndc;
+ u32 irq_status;
unsigned long status;
struct tasklet_struct tasklet;
struct dma_slave_config sconfig;
@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
struct at_xdmac_desc *desc;
u32 error_mask;
- dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
- __func__, atchan->status);
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+ __func__, atchan->irq_status);
error_mask = AT_XDMAC_CIS_RBEIS
| AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
if (at_xdmac_chan_is_cyclic(atchan)) {
at_xdmac_handle_cyclic(atchan);
- } else if ((atchan->status & AT_XDMAC_CIS_LIS)
- || (atchan->status & error_mask)) {
+ } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
+ || (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
- if (atchan->status & AT_XDMAC_CIS_RBEIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
- if (atchan->status & AT_XDMAC_CIS_WBEIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
- if (atchan->status & AT_XDMAC_CIS_ROIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock(&atchan->lock);
@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
atchan = &atxdmac->chan[i];
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
- atchan->status = chan_status & chan_imr;
+ atchan->irq_status = chan_status & chan_imr;
dev_vdbg(atxdmac->dma.dev,
"%s: chan%d: imr=0x%x, status=0x%x\n",
__func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
- if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+ if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 1a44c8086d77..ae10f5614f95 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
}
}
-static int bcm2835_dma_abort(void __iomem *chan_base)
+static int bcm2835_dma_abort(struct bcm2835_chan *c)
{
- unsigned long cs;
+ void __iomem *chan_base = c->chan_base;
long int timeout = 10000;
- cs = readl(chan_base + BCM2835_DMA_CS);
- if (!(cs & BCM2835_DMA_ACTIVE))
+ /*
+ * A zero control block address means the channel is idle.
+ * (The ACTIVE flag in the CS register is not a reliable indicator.)
+ */
+ if (!readl(chan_base + BCM2835_DMA_ADDR))
return 0;
/* Write 0 to the active bit - Pause the DMA */
writel(0, chan_base + BCM2835_DMA_CS);
/* Wait for any current AXI transfer to complete */
- while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+ while ((readl(chan_base + BCM2835_DMA_CS) &
+ BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
cpu_relax();
- cs = readl(chan_base + BCM2835_DMA_CS);
- }
- /* We'll un-pause when we set of our next DMA */
+ /* Peripheral might be stuck and fail to signal AXI write responses */
if (!timeout)
- return -ETIMEDOUT;
-
- if (!(cs & BCM2835_DMA_ACTIVE))
- return 0;
-
- /* Terminate the control block chain */
- writel(0, chan_base + BCM2835_DMA_NEXTCB);
-
- /* Abort the whole DMA */
- writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
- chan_base + BCM2835_DMA_CS);
+ dev_err(c->vc.chan.device->dev,
+ "failed to complete outstanding writes\n");
+ writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
return 0;
}
@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
spin_lock_irqsave(&c->vc.lock, flags);
- /* Acknowledge interrupt */
- writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+ /*
+ * Clear the INT flag to receive further interrupts. Keep the channel
+ * active in case the descriptor is cyclic or in case the client has
+ * already terminated the descriptor and issued a new one. (May happen
+ * if this IRQ handler is threaded.) If the channel is finished, it
+ * will remain idle despite the ACTIVE flag being set.
+ */
+ writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
+ c->chan_base + BCM2835_DMA_CS);
d = c->desc;
@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
if (d->cyclic) {
/* call the cyclic callback */
vchan_cyclic_callback(&d->vd);
-
- /* Keep the DMA engine running */
- writel(BCM2835_DMA_ACTIVE,
- c->chan_base + BCM2835_DMA_CS);
- } else {
+ } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
vchan_cookie_complete(&c->desc->vd);
bcm2835_dma_start_desc(c);
}
@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
unsigned long flags;
- int timeout = 10000;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
list_del_init(&c->node);
spin_unlock(&d->lock);
- /*
- * Stop DMA activity: we assume the callback will not be called
- * after bcm_dma_abort() returns (even if it does, it will see
- * c->desc is NULL and exit.)
- */
+ /* stop DMA activity */
if (c->desc) {
vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
- bcm2835_dma_abort(c->chan_base);
-
- /* Wait for stopping */
- while (--timeout) {
- if (!(readl(c->chan_base + BCM2835_DMA_CS) &
- BCM2835_DMA_ACTIVE))
- break;
-
- cpu_relax();
- }
-
- if (!timeout)
- dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+ bcm2835_dma_abort(c);
}
vchan_get_all_descriptors(&c->vc, &head);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index f1a441ab395d..3a11b1092e80 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -63,6 +63,7 @@
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
#include <linux/mempool.h>
+#include <linux/numa.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDA(dma_ida);
@@ -386,7 +387,8 @@ EXPORT_SYMBOL(dma_issue_pending_all);
static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
{
int node = dev_to_node(chan->device->dev);
- return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
}
/**
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 2eea4ef72915..6511928b4cdf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -711,11 +711,9 @@ static int dmatest_func(void *data)
srcs[i] = um->addr[i] + src_off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
- dmaengine_unmap_put(um);
result("src mapping error", total_tests,
src_off, dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
um->to_cnt++;
}
@@ -730,11 +728,9 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
- dmaengine_unmap_put(um);
result("dst mapping error", total_tests,
src_off, dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
um->bidi_cnt++;
}
@@ -762,12 +758,10 @@ static int dmatest_func(void *data)
}
if (!tx) {
- dmaengine_unmap_put(um);
result("prep error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
done->done = false;
@@ -776,12 +770,10 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- dmaengine_unmap_put(um);
result("submit error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
dma_async_issue_pending(chan);
@@ -790,22 +782,20 @@ static int dmatest_func(void *data)
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- dmaengine_unmap_put(um);
-
if (!done->done) {
result("test timed out", total_tests, src_off, dst_off,
len, 0);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
result(status == DMA_ERROR ?
"completion error status" :
"completion busy status", total_tests, src_off,
dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
+ dmaengine_unmap_put(um);
+
if (params->noverify) {
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
@@ -846,6 +836,12 @@ static int dmatest_func(void *data)
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
}
+
+ continue;
+
+error_unmap_continue:
+ dmaengine_unmap_put(um);
+ failed_tests++;
}
ktime = ktime_sub(ktime_get(), ktime);
ktime = ktime_sub(ktime, comparetime);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index c2fff3f6c9ca..4a09af3cd546 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
{
struct imxdma_channel *imxdmac = (void *)data;
struct imxdma_engine *imxdma = imxdmac->imxdma;
- struct imxdma_desc *desc;
+ struct imxdma_desc *desc, *next_desc;
unsigned long flags;
spin_lock_irqsave(&imxdma->lock, flags);
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
if (!list_empty(&imxdmac->ld_queue)) {
- desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
- node);
+ next_desc = list_first_entry(&imxdmac->ld_queue,
+ struct imxdma_desc, node);
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
- if (imxdma_xfer_desc(desc) < 0)
+ if (imxdma_xfer_desc(next_desc) < 0)
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
__func__, imxdmac->channel);
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a2b0a0e71168..86708fb9bda1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{
int ret = -EBUSY;
- sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
- GFP_NOWAIT);
+ sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
+ GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
@@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
- GFP_NOWAIT);
+ desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
+ GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index b7ec56ae02a6..1a2028e1c29e 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
* and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
*/
pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
- ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
- &ring->tphys, GFP_NOWAIT);
+ ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
+ &ring->tphys, GFP_NOWAIT);
if (!ring->txd)
return -ENOMEM;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 35193b31a9e0..22cc7f68ef6e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
- CCW_BLOCK_SIZE,
- &mxs_chan->ccw_phys, GFP_KERNEL);
+ mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
+ CCW_BLOCK_SIZE,
+ &mxs_chan->ccw_phys, GFP_KERNEL);
if (!mxs_chan->ccw) {
ret = -ENOMEM;
goto err_alloc;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 1d5988849aa6..eafd6c4b90fe 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
ring->size = ret;
/* Allocate memory for DMA ring descriptor */
- ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
- &ring->desc_paddr, GFP_KERNEL);
+ ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
+ &ring->desc_paddr, GFP_KERNEL);
if (!ring->desc_vaddr) {
chan_err(chan, "Failed to allocate ring desc\n");
return -ENOMEM;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 02880963092f..cb20b411493e 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
*/
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/* Allocate the buffer descriptors. */
- chan->seg_v = dma_zalloc_coherent(chan->dev,
- sizeof(*chan->seg_v) *
- XILINX_DMA_NUM_DESCS,
- &chan->seg_p, GFP_KERNEL);
+ chan->seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
if (!chan->seg_v) {
dev_err(chan->dev,
"unable to allocate channel %d descriptors\n",
@@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
- chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
- sizeof(*chan->cyclic_seg_v),
- &chan->cyclic_seg_p, GFP_KERNEL);
+ chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p,
+ GFP_KERNEL);
if (!chan->cyclic_seg_v) {
dev_err(chan->dev,
"unable to allocate desc segment for cyclic DMA\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 8db51750ce93..4478787a247f 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
list_add_tail(&desc->node, &chan->free_list);
}
- chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
- &chan->desc_pool_p, GFP_KERNEL);
+ chan->desc_pool_v = dma_alloc_coherent(chan->dev,
+ (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ &chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 4213cb0bb2a7..f8664bac9fa8 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
/* Sticky registers for Uncorrected Errors */
-#define S10_SYSMGR_UE_VAL_OFST 0x120
-#define S10_SYSMGR_UE_ADDR_OFST 0x124
+#define S10_SYSMGR_UE_VAL_OFST 0x220
+#define S10_SYSMGR_UE_ADDR_OFST 0x224
#define S10_DDR0_IRQ_MASK BIT(16)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index de15bf55895b..8e17149655f0 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -114,6 +114,14 @@ config EXTCON_PALMAS
Say Y here to enable support for USB peripheral and USB host
detection by palmas usb.
+config EXTCON_PTN5150
+ tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
+ depends on I2C && GPIOLIB || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ Say Y here to enable support for USB peripheral and USB host
+ detection by NXP PTN5150 CC (Configuration Channel) logic chip.
+
config EXTCON_QCOM_SPMI_MISC
tristate "Qualcomm USB extcon support"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0888fdeded72..261ce4cfe209 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
+obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
new file mode 100644
index 000000000000..d1c997599390
--- /dev/null
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-ptn5150.c - PTN5150 CC logic extcon driver to support USB detection
+//
+// Based on extcon-sm5502.c driver
+// Copyright (c) 2018-2019 by Vijai Kumar K
+// Author: Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/extcon-provider.h>
+#include <linux/gpio/consumer.h>
+
+/* PTN5150 registers */
+enum ptn5150_reg {
+ PTN5150_REG_DEVICE_ID = 0x01,
+ PTN5150_REG_CONTROL,
+ PTN5150_REG_INT_STATUS,
+ PTN5150_REG_CC_STATUS,
+ PTN5150_REG_CON_DET = 0x09,
+ PTN5150_REG_VCONN_STATUS,
+ PTN5150_REG_RESET,
+ PTN5150_REG_INT_MASK = 0x18,
+ PTN5150_REG_INT_REG_STATUS,
+ PTN5150_REG_END,
+};
+
+#define PTN5150_DFP_ATTACHED 0x1
+#define PTN5150_UFP_ATTACHED 0x2
+
+/* Define PTN5150 MASK/SHIFT constant */
+#define PTN5150_REG_DEVICE_ID_VENDOR_SHIFT 0
+#define PTN5150_REG_DEVICE_ID_VENDOR_MASK \
+ (0x3 << PTN5150_REG_DEVICE_ID_VENDOR_SHIFT)
+
+#define PTN5150_REG_DEVICE_ID_VERSION_SHIFT 3
+#define PTN5150_REG_DEVICE_ID_VERSION_MASK \
+ (0x1f << PTN5150_REG_DEVICE_ID_VERSION_SHIFT)
+
+#define PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT 2
+#define PTN5150_REG_CC_PORT_ATTACHMENT_MASK \
+ (0x7 << PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT)
+
+#define PTN5150_REG_CC_VBUS_DETECTION_SHIFT 7
+#define PTN5150_REG_CC_VBUS_DETECTION_MASK \
+ (0x1 << PTN5150_REG_CC_VBUS_DETECTION_SHIFT)
+
+#define PTN5150_REG_INT_CABLE_ATTACH_SHIFT 0
+#define PTN5150_REG_INT_CABLE_ATTACH_MASK \
+ (0x1 << PTN5150_REG_INT_CABLE_ATTACH_SHIFT)
+
+#define PTN5150_REG_INT_CABLE_DETACH_SHIFT 1
+#define PTN5150_REG_INT_CABLE_DETACH_MASK \
+ (0x1 << PTN5150_REG_CC_CABLE_DETACH_SHIFT)
+
+struct ptn5150_info {
+ struct device *dev;
+ struct extcon_dev *edev;
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ struct gpio_desc *int_gpiod;
+ struct gpio_desc *vbus_gpiod;
+ int irq;
+ struct work_struct irq_work;
+ struct mutex mutex;
+};
+
+/* List of detectable cables */
+static const unsigned int ptn5150_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static const struct regmap_config ptn5150_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PTN5150_REG_END,
+};
+
+static void ptn5150_irq_work(struct work_struct *work)
+{
+ struct ptn5150_info *info = container_of(work,
+ struct ptn5150_info, irq_work);
+ int ret = 0;
+ unsigned int reg_data;
+ unsigned int int_status;
+
+ if (!info->edev)
+ return;
+
+ mutex_lock(&info->mutex);
+
+ ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
+ if (ret) {
+ dev_err(info->dev, "failed to read CC STATUS %d\n", ret);
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ /* Clear interrupt. Read would clear the register */
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &int_status);
+ if (ret) {
+ dev_err(info->dev, "failed to read INT STATUS %d\n", ret);
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ if (int_status) {
+ unsigned int cable_attach;
+
+ cable_attach = int_status & PTN5150_REG_INT_CABLE_ATTACH_MASK;
+ if (cable_attach) {
+ unsigned int port_status;
+ unsigned int vbus;
+
+ port_status = ((reg_data &
+ PTN5150_REG_CC_PORT_ATTACHMENT_MASK) >>
+ PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT);
+
+ switch (port_status) {
+ case PTN5150_DFP_ATTACHED:
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB_HOST, false);
+ gpiod_set_value(info->vbus_gpiod, 0);
+ extcon_set_state_sync(info->edev, EXTCON_USB,
+ true);
+ break;
+ case PTN5150_UFP_ATTACHED:
+ extcon_set_state_sync(info->edev, EXTCON_USB,
+ false);
+ vbus = ((reg_data &
+ PTN5150_REG_CC_VBUS_DETECTION_MASK) >>
+ PTN5150_REG_CC_VBUS_DETECTION_SHIFT);
+ if (vbus)
+ gpiod_set_value(info->vbus_gpiod, 0);
+ else
+ gpiod_set_value(info->vbus_gpiod, 1);
+
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB_HOST, true);
+ break;
+ default:
+ dev_err(info->dev,
+ "Unknown Port status : %x\n",
+ port_status);
+ break;
+ }
+ } else {
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB_HOST, false);
+ extcon_set_state_sync(info->edev,
+ EXTCON_USB, false);
+ gpiod_set_value(info->vbus_gpiod, 0);
+ }
+ }
+
+ /* Clear interrupt. Read would clear the register */
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS,
+ &int_status);
+ if (ret) {
+ dev_err(info->dev,
+ "failed to read INT REG STATUS %d\n", ret);
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ mutex_unlock(&info->mutex);
+}
+
+
+static irqreturn_t ptn5150_irq_handler(int irq, void *data)
+{
+ struct ptn5150_info *info = data;
+
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static int ptn5150_init_dev_type(struct ptn5150_info *info)
+{
+ unsigned int reg_data, vendor_id, version_id;
+ int ret;
+
+ ret = regmap_read(info->regmap, PTN5150_REG_DEVICE_ID, &reg_data);
+ if (ret) {
+ dev_err(info->dev, "failed to read DEVICE_ID %d\n", ret);
+ return -EINVAL;
+ }
+
+ vendor_id = ((reg_data & PTN5150_REG_DEVICE_ID_VENDOR_MASK) >>
+ PTN5150_REG_DEVICE_ID_VENDOR_SHIFT);
+ version_id = ((reg_data & PTN5150_REG_DEVICE_ID_VERSION_MASK) >>
+ PTN5150_REG_DEVICE_ID_VERSION_SHIFT);
+
+ dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
+ version_id, vendor_id);
+
+ /* Clear any existing interrupts */
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &reg_data);
+ if (ret) {
+ dev_err(info->dev,
+ "failed to read PTN5150_REG_INT_STATUS %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS, &reg_data);
+ if (ret) {
+ dev_err(info->dev,
+ "failed to read PTN5150_REG_INT_REG_STATUS %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ptn5150_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct device_node *np = i2c->dev.of_node;
+ struct ptn5150_info *info;
+ int ret;
+
+ if (!np)
+ return -EINVAL;
+
+ info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ i2c_set_clientdata(i2c, info);
+
+ info->dev = &i2c->dev;
+ info->i2c = i2c;
+ info->int_gpiod = devm_gpiod_get(&i2c->dev, "int", GPIOD_IN);
+ if (IS_ERR(info->int_gpiod)) {
+ dev_err(dev, "failed to get INT GPIO\n");
+ return PTR_ERR(info->int_gpiod);
+ }
+ info->vbus_gpiod = devm_gpiod_get(&i2c->dev, "vbus", GPIOD_IN);
+ if (IS_ERR(info->vbus_gpiod)) {
+ dev_err(dev, "failed to get VBUS GPIO\n");
+ return PTR_ERR(info->vbus_gpiod);
+ }
+ ret = gpiod_direction_output(info->vbus_gpiod, 0);
+ if (ret) {
+ dev_err(dev, "failed to set VBUS GPIO direction\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&info->mutex);
+
+ INIT_WORK(&info->irq_work, ptn5150_irq_work);
+
+ info->regmap = devm_regmap_init_i2c(i2c, &ptn5150_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ ret = PTR_ERR(info->regmap);
+ dev_err(info->dev, "failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (info->int_gpiod) {
+ info->irq = gpiod_to_irq(info->int_gpiod);
+ if (info->irq < 0) {
+ dev_err(dev, "failed to get INTB IRQ\n");
+ return info->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->irq, NULL,
+ ptn5150_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ i2c->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for INTB IRQ\n");
+ return ret;
+ }
+ }
+
+ /* Allocate extcon device */
+ info->edev = devm_extcon_dev_allocate(info->dev, ptn5150_extcon_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(info->dev, "failed to allocate memory for extcon\n");
+ return -ENOMEM;
+ }
+
+ /* Register extcon device */
+ ret = devm_extcon_dev_register(info->dev, info->edev);
+ if (ret) {
+ dev_err(info->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ /* Initialize PTN5150 device and print vendor id and version id */
+ ret = ptn5150_init_dev_type(info);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct of_device_id ptn5150_dt_match[] = {
+ { .compatible = "nxp,ptn5150" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
+
+static const struct i2c_device_id ptn5150_i2c_id[] = {
+ { "ptn5150", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
+
+static struct i2c_driver ptn5150_i2c_driver = {
+ .driver = {
+ .name = "ptn5150",
+ .of_match_table = ptn5150_dt_match,
+ },
+ .probe = ptn5150_i2c_probe,
+ .id_table = ptn5150_i2c_id,
+};
+
+static int __init ptn5150_i2c_init(void)
+{
+ return i2c_add_driver(&ptn5150_i2c_driver);
+}
+subsys_initcall(ptn5150_i2c_init);
+
+MODULE_DESCRIPTION("NXP PTN5150 CC logic Extcon driver");
+MODULE_AUTHOR("Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 09b845e90114..a785ffd5af89 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
if (device->is_local)
return -ENODEV;
- if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
- WARN_ON(dma_set_max_seg_size(device->card->device,
- SBP2_MAX_SEG_SIZE));
-
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
if (shost == NULL)
return -ENOMEM;
@@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = {
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
+ .max_segment_size = SBP2_MAX_SEG_SIZE,
.can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs,
};
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index f754578414f0..cac16c4b0df3 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -218,7 +218,7 @@ config FW_CFG_SYSFS_CMDLINE
config INTEL_STRATIX10_SERVICE
tristate "Intel Stratix10 Service Layer"
- depends on HAVE_ARM_SMCCC
+ depends on ARCH_STRATIX10 && HAVE_ARM_SMCCC
default n
help
Intel Stratix10 service layer runs at privileged exception level,
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 472c88ae1c0f..92f843eaf1e0 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
+static void scmi_device_release(struct device *dev)
+{
+ kfree(to_scmi_dev(dev));
+}
+
struct scmi_device *
scmi_device_create(struct device_node *np, struct device *parent, int protocol)
{
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
scmi_dev->dev.parent = parent;
scmi_dev->dev.of_node = np;
scmi_dev->dev.bus = &scmi_bus_type;
+ scmi_dev->dev.release = scmi_device_release;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
retval = device_register(&scmi_dev->dev);
@@ -156,9 +162,8 @@ free_mem:
void scmi_device_destroy(struct scmi_device *scmi_dev)
{
scmi_handle_put(scmi_dev->handle);
- device_unregister(&scmi_dev->dev);
ida_simple_remove(&scmi_bus_id, scmi_dev->id);
- kfree(scmi_dev);
+ device_unregister(&scmi_dev->dev);
}
void scmi_set_handle(struct scmi_device *scmi_dev)
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index c64c7da73829..e6376f985ef7 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -2,6 +2,7 @@
// Copyright (C) 2017 Arm Ltd.
#define pr_fmt(fmt) "sdei: " fmt
+#include <acpi/ghes.h>
#include <linux/acpi.h>
#include <linux/arm_sdei.h>
#include <linux/arm-smccc.h>
@@ -887,6 +888,73 @@ static void sdei_smccc_hvc(unsigned long function_id,
arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
+int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
+ sdei_event_callback *critical_cb)
+{
+ int err;
+ u64 result;
+ u32 event_num;
+ sdei_event_callback *cb;
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return -EOPNOTSUPP;
+
+ event_num = ghes->generic->notify.vector;
+ if (event_num == 0) {
+ /*
+ * Event 0 is reserved by the specification for
+ * SDEI_EVENT_SIGNAL.
+ */
+ return -EINVAL;
+ }
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err)
+ return err;
+
+ if (result == SDEI_EVENT_PRIORITY_CRITICAL)
+ cb = critical_cb;
+ else
+ cb = normal_cb;
+
+ err = sdei_event_register(event_num, cb, ghes);
+ if (!err)
+ err = sdei_event_enable(event_num);
+
+ return err;
+}
+
+int sdei_unregister_ghes(struct ghes *ghes)
+{
+ int i;
+ int err;
+ u32 event_num = ghes->generic->notify.vector;
+
+ might_sleep();
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return -EOPNOTSUPP;
+
+ /*
+ * The event may be running on another CPU. Disable it
+ * to stop new events, then try to unregister a few times.
+ */
+ err = sdei_event_disable(event_num);
+ if (err)
+ return err;
+
+ for (i = 0; i < 3; i++) {
+ err = sdei_event_unregister(event_num);
+ if (err != -EINPROGRESS)
+ break;
+
+ schedule();
+ }
+
+ return err;
+}
+
static int sdei_get_conduit(struct platform_device *pdev)
{
const char *method;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 89110dfc7127..190be0b1d109 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -198,3 +198,9 @@ config EFI_DEV_PATH_PARSER
bool
depends on ACPI
default n
+
+config EFI_EARLYCON
+ def_bool y
+ depends on SERIAL_EARLYCON && !ARM && !IA64
+ select FONT_SUPPORT
+ select ARCH_USE_MEMREMAP_PROT
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 5f9f5039de50..d2d0d2030620 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -30,5 +30,6 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_EFI_EARLYCON) += earlycon.o
obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index ac1654f74dc7..0e206c9e0d7a 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Note, all properties are considered as u8 arrays.
* To get a value of any of them the caller must use device_property_read_u8_array().
*/
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 1a6a77df8a5e..311cd349a862 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 2.4
*
* Copyright (C) 2013 - 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 23ea1ed409d1..f99995666f86 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 2.4
*
* Copyright (C) 2013, 2014 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/dmi.h>
@@ -37,8 +33,9 @@ extern u64 efi_system_table;
static struct ptdump_info efi_ptdump_info = {
.mm = &efi_mm,
.markers = (struct addr_marker[]){
- { 0, "UEFI runtime start" },
- { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }
+ { 0, "UEFI runtime start" },
+ { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
+ { -1, NULL }
},
.base_addr = 0,
};
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 96688986da56..b1395133389e 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EFI capsule loader driver.
*
* Copyright 2015 Intel Corporation
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
*/
#define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 4938c29b7c5d..598b7800d14e 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EFI capsule support.
*
* Copyright 2013 Intel Corporation; author Matt Fleming
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
*/
#define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index 502811344e81..36d3b8b9da47 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UEFI Common Platform Error Record (CPER) support
*
* Copyright (C) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index a7902fccdcfa..8fa977c7861f 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UEFI Common Platform Error Record (CPER) support
*
@@ -9,19 +10,6 @@
*
* For more information about CPER, please refer to Appendix N of UEFI
* Specification version 2.4.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
@@ -546,19 +534,24 @@ EXPORT_SYMBOL_GPL(cper_estatus_check_header);
int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
{
struct acpi_hest_generic_data *gdata;
- unsigned int data_len, gedata_len;
+ unsigned int data_len, record_size;
int rc;
rc = cper_estatus_check_header(estatus);
if (rc)
return rc;
+
data_len = estatus->data_length;
apei_estatus_for_each_section(estatus, gdata) {
- gedata_len = acpi_hest_get_error_length(gdata);
- if (gedata_len > data_len - acpi_hest_get_size(gdata))
+ if (sizeof(struct acpi_hest_generic_data) > data_len)
return -EINVAL;
- data_len -= acpi_hest_get_record_size(gdata);
+
+ record_size = acpi_hest_get_record_size(gdata);
+ if (record_size > data_len)
+ return -EINVAL;
+
+ data_len -= record_size;
}
if (data_len)
return -EINVAL;
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 85d1834ee9b7..85ec99f97841 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dev-path-parser.c - EFI Device Path parser
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
@@ -5,14 +6,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2) as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
new file mode 100644
index 000000000000..c9a0efca17b0
--- /dev/null
+++ b/drivers/firmware/efi/earlycon.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/font.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/serial_core.h>
+#include <linux/screen_info.h>
+
+#include <asm/early_ioremap.h>
+
+static const struct font_desc *font;
+static u32 efi_x, efi_y;
+static u64 fb_base;
+static pgprot_t fb_prot;
+
+static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
+{
+ return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
+}
+
+static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
+{
+ early_memunmap(addr, len);
+}
+
+static void efi_earlycon_clear_scanline(unsigned int y)
+{
+ unsigned long *dst;
+ u16 len;
+
+ len = screen_info.lfb_linelength;
+ dst = efi_earlycon_map(y*len, len);
+ if (!dst)
+ return;
+
+ memset(dst, 0, len);
+ efi_earlycon_unmap(dst, len);
+}
+
+static void efi_earlycon_scroll_up(void)
+{
+ unsigned long *dst, *src;
+ u16 len;
+ u32 i, height;
+
+ len = screen_info.lfb_linelength;
+ height = screen_info.lfb_height;
+
+ for (i = 0; i < height - font->height; i++) {
+ dst = efi_earlycon_map(i*len, len);
+ if (!dst)
+ return;
+
+ src = efi_earlycon_map((i + font->height) * len, len);
+ if (!src) {
+ efi_earlycon_unmap(dst, len);
+ return;
+ }
+
+ memmove(dst, src, len);
+
+ efi_earlycon_unmap(src, len);
+ efi_earlycon_unmap(dst, len);
+ }
+}
+
+static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
+{
+ const u32 color_black = 0x00000000;
+ const u32 color_white = 0x00ffffff;
+ const u8 *src;
+ u8 s8;
+ int m;
+
+ src = font->data + c * font->height;
+ s8 = *(src + h);
+
+ for (m = 0; m < 8; m++) {
+ if ((s8 >> (7 - m)) & 1)
+ *dst = color_white;
+ else
+ *dst = color_black;
+ dst++;
+ }
+}
+
+static void
+efi_earlycon_write(struct console *con, const char *str, unsigned int num)
+{
+ struct screen_info *si;
+ unsigned int len;
+ const char *s;
+ void *dst;
+
+ si = &screen_info;
+ len = si->lfb_linelength;
+
+ while (num) {
+ unsigned int linemax;
+ unsigned int h, count = 0;
+
+ for (s = str; *s && *s != '\n'; s++) {
+ if (count == num)
+ break;
+ count++;
+ }
+
+ linemax = (si->lfb_width - efi_x) / font->width;
+ if (count > linemax)
+ count = linemax;
+
+ for (h = 0; h < font->height; h++) {
+ unsigned int n, x;
+
+ dst = efi_earlycon_map((efi_y + h) * len, len);
+ if (!dst)
+ return;
+
+ s = str;
+ n = count;
+ x = efi_x;
+
+ while (n-- > 0) {
+ efi_earlycon_write_char(dst + x*4, *s, h);
+ x += font->width;
+ s++;
+ }
+
+ efi_earlycon_unmap(dst, len);
+ }
+
+ num -= count;
+ efi_x += count * font->width;
+ str += count;
+
+ if (num > 0 && *s == '\n') {
+ efi_x = 0;
+ efi_y += font->height;
+ str++;
+ num--;
+ }
+
+ if (efi_x + font->width > si->lfb_width) {
+ efi_x = 0;
+ efi_y += font->height;
+ }
+
+ if (efi_y + font->height > si->lfb_height) {
+ u32 i;
+
+ efi_y -= font->height;
+ efi_earlycon_scroll_up();
+
+ for (i = 0; i < font->height; i++)
+ efi_earlycon_clear_scanline(efi_y + i);
+ }
+ }
+}
+
+static int __init efi_earlycon_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ struct screen_info *si;
+ u16 xres, yres;
+ u32 i;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return -ENODEV;
+
+ fb_base = screen_info.lfb_base;
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)screen_info.ext_lfb_base << 32;
+
+ if (opt && !strcmp(opt, "ram"))
+ fb_prot = PAGE_KERNEL;
+ else
+ fb_prot = pgprot_writecombine(PAGE_KERNEL);
+
+ si = &screen_info;
+ xres = si->lfb_width;
+ yres = si->lfb_height;
+
+ /*
+ * efi_earlycon_write_char() implicitly assumes a framebuffer with
+ * 32 bits per pixel.
+ */
+ if (si->lfb_depth != 32)
+ return -ENODEV;
+
+ font = get_default_font(xres, yres, -1, -1);
+ if (!font)
+ return -ENODEV;
+
+ efi_y = rounddown(yres, font->height) - font->height;
+ for (i = 0; i < (yres - efi_y) / font->height; i++)
+ efi_earlycon_scroll_up();
+
+ device->con->write = efi_earlycon_write;
+ return 0;
+}
+EARLYCON_DECLARE(efifb, efi_earlycon_setup);
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index b22ccfb0c991..a2384184a7de 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Intel Corporation
* Author: Josh Triplett <josh@joshtriplett.org>
@@ -5,10 +6,6 @@
* Based on the bgrt driver:
* Copyright 2012 Red Hat, Inc <mjg@redhat.com>
* Author: Matthew Garrett
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 0f7d97917197..9ea13e8d12ec 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
+
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/pstore.h>
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4c46ff6f2242..55b77c576c42 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
early_memunmap(tbl, sizeof(*tbl));
}
- return 0;
-}
-int __init efi_apply_persistent_mem_reservations(void)
-{
if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
unsigned long prsv = efi.mem_reserve;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 503bbe2a9d49..61e099826cbb 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* efibc: control EFI bootloaders which obey LoaderEntryOneShot var
* Copyright (c) 2013-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#define pr_fmt(fmt) "efibc: " fmt
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 8061667a6765..7576450c8254 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Originally from efivars.c,
*
@@ -6,63 +7,6 @@
*
* This code takes all variables accessible from EFI runtime and
* exports them via sysfs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Changelog:
- *
- * 17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
- * remove check for efi_enabled in exit
- * add MODULE_VERSION
- *
- * 26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
- * minor bug fixes
- *
- * 21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
- * converted driver to export variable information via sysfs
- * and moved to drivers/firmware directory
- * bumped revision number to v0.07 to reflect conversion & move
- *
- * 10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * fix locking per Peter Chubb's findings
- *
- * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str()
- *
- * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * use list_for_each_safe when deleting vars.
- * remove ifdef CONFIG_SMP around include <linux/smp.h>
- * v0.04 release to linux-ia64@linuxia64.org
- *
- * 20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * Moved vars from /proc/efi to /proc/efi/vars, and made
- * efi.c own the /proc/efi directory.
- * v0.03 release to linux-ia64@linuxia64.org
- *
- * 26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * At the request of Stephane, moved ownership of /proc/efi
- * to efi.c, and now efivars lives under /proc/efi/vars.
- *
- * 12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * Feedback received from Stephane Eranian incorporated.
- * efivar_write() checks copy_from_user() return value.
- * efivar_read/write() returns proper errno.
- * v0.02 release to linux-ia64@linuxia64.org
- *
- * 26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * v0.01 release to linux-ia64@linuxia64.org
*/
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 5d06bd247d07..d6dd5f503fa2 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* esrt.c
*
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 6c7d60c239b5..9501edc0fcfb 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fake_mem.c
*
@@ -8,21 +9,6 @@
* By specifying this parameter, you can add arbitrary attribute to
* specific memory range by updating original (firmware provided) EFI
* memmap.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
*/
#include <linux/kernel.h>
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d9845099635e..b0103e16fc1b 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -52,7 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
-CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
@@ -89,7 +89,7 @@ quiet_cmd_stubcopy = STUBCPY $@
cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
- rm -f $@; /bin/false); \
+ rm -f $@; /bin/false); \
else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
else /bin/false; fi
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index eee42d5e25ee..04e6ecd72cd9 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
efi_status_t status;
- if (IS_ENABLED(CONFIG_ARM))
- return;
-
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
(void **)&rsv);
if (status != EFI_SUCCESS) {
@@ -370,6 +367,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
paddr = in->phys_addr;
size = in->num_pages * EFI_PAGE_SIZE;
+ if (novamap()) {
+ in->virt_addr = in->phys_addr;
+ continue;
+ }
+
/*
* Make the mapping compatible with 64k pages: this allows
* a 4k page size kernel to kexec a 64k page size kernel and
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index becbda445913..e8f7aefb6813 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/efi.h>
#include <asm/efi.h>
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 1b4d465cc5d9..1550d244e996 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
*
* This file implements the EFI boot stub for the arm64 kernel.
* Adapted from ARM version by Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
/*
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e94975f4655b..e4610e72b78f 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Helper functions used by the EFI stub on multiple
* architectures. This should be #included by the EFI stub
* implementation files.
*
* Copyright 2011 Intel Corporation; author Matt Fleming
- *
- * This file is part of the Linux kernel, and is made available
- * under the terms of the GNU General Public License version 2.
- *
*/
#include <linux/efi.h>
@@ -34,6 +31,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
static int __section(.data) __nokaslr;
static int __section(.data) __quiet;
+static int __section(.data) __novamap;
int __pure nokaslr(void)
{
@@ -43,6 +41,10 @@ int __pure is_quiet(void)
{
return __quiet;
}
+int __pure novamap(void)
+{
+ return __novamap;
+}
#define EFI_MMAP_NR_SLACK_SLOTS 8
@@ -482,6 +484,11 @@ efi_status_t efi_parse_options(char const *cmdline)
__chunk_size = -1UL;
}
+ if (!strncmp(str, "novamap", 7)) {
+ str += strlen("novamap");
+ __novamap = 1;
+ }
+
/* Group words together, delimited by "," */
while (*str && *str != ' ' && *str != ',')
str++;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 32799cf039ef..1b1dfcaa6fb9 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -27,6 +27,7 @@
extern int __pure nokaslr(void);
extern int __pure is_quiet(void);
+extern int __pure novamap(void);
#define pr_efi(sys_table, msg) do { \
if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
@@ -64,4 +65,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+/* Helper macros for the usual case of using simple C variables: */
+#ifndef fdt_setprop_inplace_var
+#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
+ fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#ifndef fdt_setprop_var
+#define fdt_setprop_var(fdt, node_offset, name, var) \
+ fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0dc7b4987cc2..5440ba17a1c5 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* FDT related Helper functions used by the EFI stub on multiple
* architectures. This should be #included by the EFI stub
* implementation files.
*
* Copyright 2013 Linaro Limited; author Roy Franz
- *
- * This file is part of the Linux kernel, and is made available
- * under the terms of the GNU General Public License version 2.
- *
*/
#include <linux/efi.h>
@@ -26,10 +23,8 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
offset = fdt_path_offset(fdt, "/");
/* Set the #address-cells and #size-cells values for an empty tree */
- fdt_setprop_u32(fdt, offset, "#address-cells",
- EFI_DT_ADDR_CELLS_DEFAULT);
-
- fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+ fdt_setprop_u32(fdt, offset, "#address-cells", EFI_DT_ADDR_CELLS_DEFAULT);
+ fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
}
static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
@@ -42,7 +37,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
u32 fdt_val32;
u64 fdt_val64;
- /* Do some checks on provided FDT, if it exists*/
+ /* Do some checks on provided FDT, if it exists: */
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
pr_efi_err(sys_table, "Device Tree header not valid!\n");
@@ -50,7 +45,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
}
/*
* We don't get the size of the FDT if we get if from a
- * configuration table.
+ * configuration table:
*/
if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
pr_efi_err(sys_table, "Truncated device tree! foo!\n");
@@ -64,8 +59,8 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
status = fdt_create_empty_tree(fdt, new_fdt_size);
if (status == 0) {
/*
- * Any failure from the following function is non
- * critical
+ * Any failure from the following function is
+ * non-critical:
*/
fdt_update_cell_size(sys_table, fdt);
}
@@ -86,12 +81,13 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (node < 0) {
node = fdt_add_subnode(fdt, 0, "chosen");
if (node < 0) {
- status = node; /* node is error code when negative */
+ /* 'node' is an error code when negative: */
+ status = node;
goto fdt_set_fail;
}
}
- if ((cmdline_ptr != NULL) && (strlen(cmdline_ptr) > 0)) {
+ if (cmdline_ptr != NULL && strlen(cmdline_ptr) > 0) {
status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr,
strlen(cmdline_ptr) + 1);
if (status)
@@ -103,13 +99,12 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
u64 initrd_image_end;
u64 initrd_image_start = cpu_to_fdt64(initrd_addr);
- status = fdt_setprop(fdt, node, "linux,initrd-start",
- &initrd_image_start, sizeof(u64));
+ status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start);
if (status)
goto fdt_set_fail;
+
initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size);
- status = fdt_setprop(fdt, node, "linux,initrd-end",
- &initrd_image_end, sizeof(u64));
+ status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end);
if (status)
goto fdt_set_fail;
}
@@ -117,30 +112,28 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
- status = fdt_setprop(fdt, node, "linux,uefi-system-table",
- &fdt_val64, sizeof(fdt_val64));
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
if (status)
goto fdt_set_fail;
fdt_val64 = U64_MAX; /* placeholder */
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
- &fdt_val64, sizeof(fdt_val64));
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
if (status)
goto fdt_set_fail;
fdt_val32 = U32_MAX; /* placeholder */
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
- &fdt_val32, sizeof(fdt_val32));
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
if (status)
goto fdt_set_fail;
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
- &fdt_val32, sizeof(fdt_val32));
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
if (status)
goto fdt_set_fail;
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
- &fdt_val32, sizeof(fdt_val32));
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
if (status)
goto fdt_set_fail;
@@ -150,8 +143,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
(u8 *)&fdt_val64);
if (efi_status == EFI_SUCCESS) {
- status = fdt_setprop(fdt, node, "kaslr-seed",
- &fdt_val64, sizeof(fdt_val64));
+ status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
if (status)
goto fdt_set_fail;
} else if (efi_status != EFI_NOT_FOUND) {
@@ -159,7 +151,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
}
}
- /* shrink the FDT back to its minimum size */
+ /* Shrink the FDT back to its minimum size: */
fdt_pack(fdt);
return EFI_SUCCESS;
@@ -182,26 +174,26 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
return EFI_LOAD_ERROR;
fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
- &fdt_val64, sizeof(fdt_val64));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
if (err)
return EFI_LOAD_ERROR;
fdt_val32 = cpu_to_fdt32(*map->map_size);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
- &fdt_val32, sizeof(fdt_val32));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
fdt_val32 = cpu_to_fdt32(*map->desc_size);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
- &fdt_val32, sizeof(fdt_val32));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
fdt_val32 = cpu_to_fdt32(*map->desc_ver);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
- &fdt_val32, sizeof(fdt_val32));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
@@ -209,13 +201,13 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
}
#ifndef EFI_FDT_ALIGN
-#define EFI_FDT_ALIGN EFI_PAGE_SIZE
+# define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
struct exit_boot_struct {
- efi_memory_desc_t *runtime_map;
- int *runtime_entry_count;
- void *new_fdt_addr;
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
+ void *new_fdt_addr;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -235,7 +227,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
}
#ifndef MAX_FDT_SIZE
-#define MAX_FDT_SIZE SZ_2M
+# define MAX_FDT_SIZE SZ_2M
#endif
/*
@@ -266,16 +258,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long mmap_key;
efi_memory_desc_t *memory_map, *runtime_map;
efi_status_t status;
- int runtime_entry_count = 0;
+ int runtime_entry_count;
struct efi_boot_memmap map;
struct exit_boot_struct priv;
- map.map = &runtime_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_ver;
- map.key_ptr = &mmap_key;
- map.buff_size = &buff_size;
+ map.map = &runtime_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_ver;
+ map.key_ptr = &mmap_key;
+ map.buff_size = &buff_size;
/*
* Get a copy of the current memory map that we will use to prepare
@@ -289,15 +281,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return status;
}
- pr_efi(sys_table,
- "Exiting boot services and installing virtual address map...\n");
+ pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table,
- "Unable to allocate memory for new device tree.\n");
+ pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n");
goto fail;
}
@@ -318,15 +308,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
goto fail_free_new_fdt;
}
- priv.runtime_map = runtime_map;
- priv.runtime_entry_count = &runtime_entry_count;
- priv.new_fdt_addr = (void *)*new_fdt_addr;
- status = efi_exit_boot_services(sys_table, handle, &map, &priv,
- exit_boot_func);
+ runtime_entry_count = 0;
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
+
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
+ if (novamap())
+ return EFI_SUCCESS;
+
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
@@ -363,6 +357,7 @@ fail_free_new_fdt:
fail:
sys_table->boottime->free_pool(runtime_map);
+
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 24c461dea7af..0101ca4c13b1 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/* -----------------------------------------------------------------------
*
* Copyright 2011 Intel Corporation; author Matt Fleming
*
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
- *
* ----------------------------------------------------------------------- */
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index e0e603a89aa9..b4b1d1dcb5fd 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 72d9dfbebf08..edba5e7a3743 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Secure boot handling.
*
@@ -5,9 +6,6 @@
* Roy Franz <roy.franz@linaro.org
* Copyright (C) 2013 Red Hat, Inc.
* Mark Salter <msalter@redhat.com>
- *
- * This file is part of the Linux kernel, and is made available under the
- * terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
#include <asm/efi.h>
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index a90b0b8fc69a..5bd04f75d8d6 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TPM handling.
*
@@ -5,9 +6,6 @@
* Copyright (C) 2017 Google, Inc.
* Matthew Garrett <mjg59@google.com>
* Thiebaud Weksteen <tweek@google.com>
- *
- * This file is part of the Linux kernel, and is made available under the
- * terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
#include <linux/tpm_eventlog.h>
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 8986757eafaf..58452fde92cc 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "efi: memattr: " fmt
@@ -94,7 +91,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
- if (md->virt_addr == 0) {
+ if (md->virt_addr == 0 && md->phys_addr != 0) {
/* no virtual mapping has been installed by the stub */
break;
}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 84a11d0a8023..ad9ddefc9dcb 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/efi/runtime-map.c
* Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
- *
- * This file is released under the GPLv2.
*/
#include <linux/string.h>
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 8903b9ccfc2b..698745c249e8 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -85,7 +85,7 @@ struct efi_runtime_work efi_rts_work;
pr_err("Failed to queue work to efi_rts_wq.\n"); \
\
exit: \
- efi_rts_work.efi_rts_id = NONE; \
+ efi_rts_work.efi_rts_id = EFI_NONE; \
efi_rts_work.status; \
})
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
static DEFINE_SEMAPHORE(efi_runtime_lock);
/*
+ * Expose the EFI runtime lock to the UV platform
+ */
+#ifdef CONFIG_X86_UV
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
+#endif
+
+/*
* Calls the appropriate efi_runtime_service() with the appropriate
* arguments.
*
@@ -168,50 +175,50 @@ static void efi_call_rts(struct work_struct *work)
arg5 = efi_rts_work.arg5;
switch (efi_rts_work.efi_rts_id) {
- case GET_TIME:
+ case EFI_GET_TIME:
status = efi_call_virt(get_time, (efi_time_t *)arg1,
(efi_time_cap_t *)arg2);
break;
- case SET_TIME:
+ case EFI_SET_TIME:
status = efi_call_virt(set_time, (efi_time_t *)arg1);
break;
- case GET_WAKEUP_TIME:
+ case EFI_GET_WAKEUP_TIME:
status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
(efi_bool_t *)arg2, (efi_time_t *)arg3);
break;
- case SET_WAKEUP_TIME:
+ case EFI_SET_WAKEUP_TIME:
status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
(efi_time_t *)arg2);
break;
- case GET_VARIABLE:
+ case EFI_GET_VARIABLE:
status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
(efi_guid_t *)arg2, (u32 *)arg3,
(unsigned long *)arg4, (void *)arg5);
break;
- case GET_NEXT_VARIABLE:
+ case EFI_GET_NEXT_VARIABLE:
status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
(efi_char16_t *)arg2,
(efi_guid_t *)arg3);
break;
- case SET_VARIABLE:
+ case EFI_SET_VARIABLE:
status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
(efi_guid_t *)arg2, *(u32 *)arg3,
*(unsigned long *)arg4, (void *)arg5);
break;
- case QUERY_VARIABLE_INFO:
+ case EFI_QUERY_VARIABLE_INFO:
status = efi_call_virt(query_variable_info, *(u32 *)arg1,
(u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
break;
- case GET_NEXT_HIGH_MONO_COUNT:
+ case EFI_GET_NEXT_HIGH_MONO_COUNT:
status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
break;
- case UPDATE_CAPSULE:
+ case EFI_UPDATE_CAPSULE:
status = efi_call_virt(update_capsule,
(efi_capsule_header_t **)arg1,
*(unsigned long *)arg2,
*(unsigned long *)arg3);
break;
- case QUERY_CAPSULE_CAPS:
+ case EFI_QUERY_CAPSULE_CAPS:
status = efi_call_virt(query_capsule_caps,
(efi_capsule_header_t **)arg1,
*(unsigned long *)arg2, (u64 *)arg3,
@@ -235,7 +242,7 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
+ status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -246,7 +253,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
+ status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -259,7 +266,7 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+ status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL,
NULL);
up(&efi_runtime_lock);
return status;
@@ -271,7 +278,7 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+ status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
NULL);
up(&efi_runtime_lock);
return status;
@@ -287,7 +294,7 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+ status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size,
data);
up(&efi_runtime_lock);
return status;
@@ -301,7 +308,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+ status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor,
NULL, NULL);
up(&efi_runtime_lock);
return status;
@@ -317,7 +324,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+ status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size,
data);
up(&efi_runtime_lock);
return status;
@@ -352,7 +359,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+ status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space,
remaining_space, max_variable_size, NULL);
up(&efi_runtime_lock);
return status;
@@ -384,7 +391,7 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+ status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
NULL, NULL);
up(&efi_runtime_lock);
return status;
@@ -400,7 +407,7 @@ static void virt_efi_reset_system(int reset_type,
"could not get exclusive access to the firmware\n");
return;
}
- efi_rts_work.efi_rts_id = RESET_SYSTEM;
+ efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
__efi_call_virt(reset_system, reset_type, status, data_size, data);
up(&efi_runtime_lock);
}
@@ -416,7 +423,7 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+ status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list,
NULL, NULL);
up(&efi_runtime_lock);
return status;
@@ -434,7 +441,7 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+ status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count,
max_size, reset_type, NULL);
up(&efi_runtime_lock);
return status;
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 51ecf7d6da48..877745c3aaf2 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EFI Test Driver for Runtime Services
*
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
index 5f4818bf112f..f2446aa1c2e3 100644
--- a/drivers/firmware/efi/test/efi_test.h
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* EFI Test driver Header
*
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 0cbeb3d46b18..3a689b40ccc0 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Google, Inc.
* Thiebaud Weksteen <tweek@google.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index fceaafd67ec6..436d1776bc7b 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Originally from efivars.c
*
* Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/capability.h>
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
index 97f5424dbac9..4b56a587dacd 100644
--- a/drivers/firmware/imx/misc.c
+++ b/drivers/firmware/imx/misc.c
@@ -18,6 +18,14 @@ struct imx_sc_msg_req_misc_set_ctrl {
u16 resource;
} __packed;
+struct imx_sc_msg_req_cpu_start {
+ struct imx_sc_rpc_msg hdr;
+ u32 address_hi;
+ u32 address_lo;
+ u16 resource;
+ u8 enable;
+} __packed;
+
struct imx_sc_msg_req_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 ctrl;
@@ -97,3 +105,33 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
return 0;
}
EXPORT_SYMBOL(imx_sc_misc_get_control);
+
+/*
+ * This function starts/stops a CPU identified by @resource
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] enable true for start, false for stop
+ * @param[in] phys_addr initial instruction address to be executed
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ struct imx_sc_msg_req_cpu_start msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_CPU_START;
+ hdr->size = 4;
+
+ msg.address_hi = phys_addr >> 32;
+ msg.address_lo = phys_addr;
+ msg.resource = resource;
+ msg.enable = enable;
+
+ return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_pm_cpu_start);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 407245f2efd0..39a94c7177fc 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -322,6 +322,7 @@ static int imx_sc_pd_probe(struct platform_device *pdev)
static const struct of_device_id imx_sc_pd_match[] = {
{ .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd},
+ { .compatible = "fsl,scu-pd", &imx8qxp_scu_pd},
{ /* sentinel */ }
};
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index a13558154ac3..61be15d9df7d 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -238,6 +238,16 @@ static int rpi_firmware_probe(struct platform_device *pdev)
return 0;
}
+static void rpi_firmware_shutdown(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ if (!fw)
+ return;
+
+ rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
+}
+
static int rpi_firmware_remove(struct platform_device *pdev)
{
struct rpi_firmware *fw = platform_get_drvdata(pdev);
@@ -278,6 +288,7 @@ static struct platform_driver rpi_firmware_driver = {
.of_match_table = rpi_firmware_of_match,
},
.probe = rpi_firmware_probe,
+ .shutdown = rpi_firmware_shutdown,
.remove = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index 1b826dcca719..676b01caff05 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -1,4 +1,7 @@
tegra-bpmp-y = bpmp.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
new file mode 100644
index 000000000000..54d560c48398
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra_bpmp_ops {
+ int (*init)(struct tegra_bpmp *bpmp);
+ void (*deinit)(struct tegra_bpmp *bpmp);
+ bool (*is_response_ready)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_ready)(struct tegra_bpmp_channel *channel);
+ int (*ack_response)(struct tegra_bpmp_channel *channel);
+ int (*ack_request)(struct tegra_bpmp_channel *channel);
+ bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel);
+ int (*post_response)(struct tegra_bpmp_channel *channel);
+ int (*post_request)(struct tegra_bpmp_channel *channel);
+ int (*ring_doorbell)(struct tegra_bpmp *bpmp);
+ int (*resume)(struct tegra_bpmp *bpmp);
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
+#endif
+
+#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
new file mode 100644
index 000000000000..ea308751635f
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+struct tegra186_bpmp {
+ struct tegra_bpmp *parent;
+
+ struct {
+ struct gen_pool *pool;
+ dma_addr_t phys;
+ void *virt;
+ } tx, rx;
+
+ struct {
+ struct mbox_client client;
+ struct mbox_chan *channel;
+ } mbox;
+};
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ struct tegra186_bpmp *priv;
+
+ priv = container_of(client, struct tegra186_bpmp, mbox.client);
+
+ return priv->parent;
+}
+
+static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ int err;
+
+ err = mbox_send_message(priv->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(priv->mbox.channel, 0);
+
+ return 0;
+}
+
+static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ struct tegra186_bpmp *priv = bpmp->priv;
+
+ if (WARN_ON(priv->mbox.channel == NULL))
+ return;
+
+ tegra186_bpmp_ring_doorbell(bpmp);
+}
+
+static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ priv->rx.virt + offset, priv->rx.phys + offset,
+ priv->tx.virt + offset, priv->tx.phys + offset,
+ 1, message_size, tegra186_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static void mbox_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+
+ tegra_bpmp_handle_rx(bpmp);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+ priv->parent = bpmp;
+
+ priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
+ if (!priv->tx.pool) {
+ dev_err(bpmp->dev, "TX shmem pool not found\n");
+ return -ENOMEM;
+ }
+
+ priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
+ if (!priv->tx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
+ if (!priv->rx.pool) {
+ dev_err(bpmp->dev, "RX shmem pool not found\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
+ if (!priv->rx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ goto free_rx;
+
+ err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ goto cleanup_tx_channel;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ priv->mbox.client.dev = bpmp->dev;
+ priv->mbox.client.rx_callback = mbox_handle_rx;
+ priv->mbox.client.tx_block = false;
+ priv->mbox.client.knows_txdone = false;
+
+ priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
+ if (IS_ERR(priv->mbox.channel)) {
+ err = PTR_ERR(priv->mbox.channel);
+ dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+
+cleanup_channels:
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ if (!bpmp->threaded_channels[i].bpmp)
+ continue;
+
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+ }
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+cleanup_tx_channel:
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+free_rx:
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+free_tx:
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+
+ return err;
+}
+
+static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ unsigned int i;
+
+ mbox_free_channel(priv->mbox.channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+}
+
+static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+
+ /* reset message channels */
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra186_bpmp_ops = {
+ .init = tegra186_bpmp_init,
+ .deinit = tegra186_bpmp_deinit,
+ .is_response_ready = tegra186_bpmp_is_message_ready,
+ .is_request_ready = tegra186_bpmp_is_message_ready,
+ .ack_response = tegra186_bpmp_ack_message,
+ .ack_request = tegra186_bpmp_ack_message,
+ .is_response_channel_free = tegra186_bpmp_is_channel_free,
+ .is_request_channel_free = tegra186_bpmp_is_channel_free,
+ .post_response = tegra186_bpmp_post_message,
+ .post_request = tegra186_bpmp_post_message,
+ .ring_doorbell = tegra186_bpmp_ring_doorbell,
+ .resume = tegra186_bpmp_resume,
+};
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
new file mode 100644
index 000000000000..ae15940a078e
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+#include "bpmp-private.h"
+
+#define TRIGGER_OFFSET 0x000
+#define RESULT_OFFSET(id) (0xc00 + id * 4)
+#define TRIGGER_ID_SHIFT 16
+#define TRIGGER_CMD_GET 4
+
+#define STA_OFFSET 0
+#define SET_OFFSET 4
+#define CLR_OFFSET 8
+
+#define CH_MASK(ch) (0x3 << ((ch) * 2))
+#define SL_SIGL(ch) (0x0 << ((ch) * 2))
+#define SL_QUED(ch) (0x1 << ((ch) * 2))
+#define MA_FREE(ch) (0x2 << ((ch) * 2))
+#define MA_ACKD(ch) (0x3 << ((ch) * 2))
+
+struct tegra210_bpmp {
+ void __iomem *atomics;
+ void __iomem *arb_sema;
+ struct irq_data *tx_irq_data;
+};
+
+static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+
+ return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
+}
+
+static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
+}
+
+static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
+}
+
+static bool
+tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
+}
+
+static bool
+tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
+}
+
+static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
+ priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ struct irq_data *irq_data = priv->tx_irq_data;
+
+ /*
+ * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
+ * available messages
+ */
+ if (irq_data->chip->irq_retrigger)
+ return irq_data->chip->irq_retrigger(irq_data);
+
+ return -EINVAL;
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+
+ tegra_bpmp_handle_rx(bpmp);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ u32 address;
+ void *p;
+
+ /* Retrieve channel base address from BPMP */
+ writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
+ priv->atomics + TRIGGER_OFFSET);
+ address = readl(priv->atomics + RESULT_OFFSET(index));
+
+ p = devm_ioremap(bpmp->dev, address, 0x80);
+ if (!p)
+ return -ENOMEM;
+
+ channel->ib = p;
+ channel->ob = p;
+ channel->index = index;
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct platform_device *pdev = to_platform_device(bpmp->dev);
+ struct tegra210_bpmp *priv;
+ struct resource *res;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->atomics))
+ return PTR_ERR(priv->atomics);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->arb_sema))
+ return PTR_ERR(priv->arb_sema);
+
+ err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ return err;
+
+ err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "tx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get TX IRQ: %d\n", err);
+ return err;
+ }
+
+ priv->tx_irq_data = irq_get_irq_data(err);
+ if (!priv->tx_irq_data) {
+ dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "rx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get rx IRQ: %d\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, err, rx_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra210_bpmp_ops = {
+ .init = tegra210_bpmp_init,
+ .is_response_ready = tegra210_bpmp_is_response_ready,
+ .is_request_ready = tegra210_bpmp_is_request_ready,
+ .ack_response = tegra210_bpmp_ack_response,
+ .ack_request = tegra210_bpmp_ack_request,
+ .is_response_channel_free = tegra210_bpmp_is_response_channel_free,
+ .is_request_channel_free = tegra210_bpmp_is_request_channel_free,
+ .post_response = tegra210_bpmp_post_response,
+ .post_request = tegra210_bpmp_post_request,
+ .ring_doorbell = tegra210_bpmp_ring_doorbell,
+};
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 689478b92bce..dd775e8ba5a0 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -26,6 +26,8 @@
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/ivc.h>
+#include "bpmp-private.h"
+
#define MSG_ACK BIT(0)
#define MSG_RING BIT(1)
#define TAG_SZ 32
@@ -36,6 +38,14 @@ mbox_client_to_bpmp(struct mbox_client *client)
return container_of(client, struct tegra_bpmp, mbox.client);
}
+static inline const struct tegra_bpmp_ops *
+channel_to_ops(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+
+ return bpmp->soc->ops;
+}
+
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
struct platform_device *pdev;
@@ -96,22 +106,21 @@ static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
(msg->rx.size == 0 || msg->rx.data);
}
-static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
{
- void *frame;
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- frame = tegra_ivc_read_get_next_frame(channel->ivc);
- if (IS_ERR(frame)) {
- channel->ib = NULL;
- return false;
- }
+ return ops->is_response_ready(channel);
+}
- channel->ib = frame;
+static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- return true;
+ return ops->is_request_ready(channel);
}
-static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t end;
@@ -119,29 +128,45 @@ static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
end = ktime_add_us(ktime_get(), timeout);
do {
- if (tegra_bpmp_master_acked(channel))
+ if (tegra_bpmp_is_response_ready(channel))
return 0;
} while (ktime_before(ktime_get(), end));
return -ETIMEDOUT;
}
-static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
{
- void *frame;
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- frame = tegra_ivc_write_get_next_frame(channel->ivc);
- if (IS_ERR(frame)) {
- channel->ob = NULL;
- return false;
- }
+ return ops->ack_response(channel);
+}
- channel->ob = frame;
+static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- return true;
+ return ops->ack_request(channel);
}
-static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+static bool
+tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_request_channel_free(channel);
+}
+
+static bool
+tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_response_channel_free(channel);
+}
+
+static int
+tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t start, now;
@@ -149,7 +174,7 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
start = ns_to_ktime(local_clock());
do {
- if (tegra_bpmp_master_free(channel))
+ if (tegra_bpmp_is_request_channel_free(channel))
return 0;
now = ns_to_ktime(local_clock());
@@ -158,6 +183,25 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
return -ETIMEDOUT;
}
+static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_request(channel);
+}
+
+static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_response(channel);
+}
+
+static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ return bpmp->soc->ops->ring_doorbell(bpmp);
+}
+
static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
void *data, size_t size, int *ret)
{
@@ -166,7 +210,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
if (data && size > 0)
memcpy(data, channel->ib->data, size);
- err = tegra_ivc_read_advance(channel->ivc);
+ err = tegra_bpmp_ack_response(channel);
if (err < 0)
return err;
@@ -210,7 +254,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
if (data && size > 0)
memcpy(channel->ob->data, data, size);
- return tegra_ivc_write_advance(channel->ivc);
+ return tegra_bpmp_post_request(channel);
}
static struct tegra_bpmp_channel *
@@ -238,7 +282,7 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
channel = &bpmp->threaded_channels[index];
- if (!tegra_bpmp_master_free(channel)) {
+ if (!tegra_bpmp_is_request_channel_free(channel)) {
err = -EBUSY;
goto unlock;
}
@@ -270,7 +314,7 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
{
int err;
- err = tegra_bpmp_wait_master_free(channel);
+ err = tegra_bpmp_wait_request_channel_free(channel);
if (err < 0)
return err;
@@ -302,13 +346,11 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
spin_unlock(&bpmp->atomic_tx_lock);
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
- mbox_client_txdone(bpmp->mbox.channel, 0);
-
- err = tegra_bpmp_wait_ack(channel);
+ err = tegra_bpmp_wait_response(channel);
if (err < 0)
return err;
@@ -335,12 +377,10 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
if (IS_ERR(channel))
return PTR_ERR(channel);
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
- mbox_client_txdone(bpmp->mbox.channel, 0);
-
timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
err = wait_for_completion_timeout(&channel->completion, timeout);
@@ -369,38 +409,34 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
{
unsigned long flags = channel->ib->flags;
struct tegra_bpmp *bpmp = channel->bpmp;
- struct tegra_bpmp_mb_data *frame;
int err;
if (WARN_ON(size > MSG_DATA_MIN_SZ))
return;
- err = tegra_ivc_read_advance(channel->ivc);
+ err = tegra_bpmp_ack_request(channel);
if (WARN_ON(err < 0))
return;
if ((flags & MSG_ACK) == 0)
return;
- frame = tegra_ivc_write_get_next_frame(channel->ivc);
- if (WARN_ON(IS_ERR(frame)))
+ if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
return;
- frame->code = code;
+ channel->ob->code = code;
if (data && size > 0)
- memcpy(frame->data, data, size);
+ memcpy(channel->ob->data, data, size);
- err = tegra_ivc_write_advance(channel->ivc);
+ err = tegra_bpmp_post_response(channel);
if (WARN_ON(err < 0))
return;
if (flags & MSG_RING) {
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (WARN_ON(err < 0))
return;
-
- mbox_client_txdone(bpmp->mbox.channel, 0);
}
}
EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
@@ -627,9 +663,8 @@ static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
complete(&channel->completion);
}
-static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
{
- struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
struct tegra_bpmp_channel *channel;
unsigned int i, count;
unsigned long *busy;
@@ -638,7 +673,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
count = bpmp->soc->channels.thread.count;
busy = bpmp->threaded.busy;
- if (tegra_bpmp_master_acked(channel))
+ if (tegra_bpmp_is_request_ready(channel))
tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
spin_lock(&bpmp->lock);
@@ -648,7 +683,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
channel = &bpmp->threaded_channels[i];
- if (tegra_bpmp_master_acked(channel)) {
+ if (tegra_bpmp_is_response_ready(channel)) {
tegra_bpmp_channel_signal(channel);
clear_bit(i, busy);
}
@@ -657,74 +692,9 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
spin_unlock(&bpmp->lock);
}
-static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
-{
- struct tegra_bpmp *bpmp = data;
- int err;
-
- if (WARN_ON(bpmp->mbox.channel == NULL))
- return;
-
- err = mbox_send_message(bpmp->mbox.channel, NULL);
- if (err < 0)
- return;
-
- mbox_client_txdone(bpmp->mbox.channel, 0);
-}
-
-static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
- struct tegra_bpmp *bpmp,
- unsigned int index)
-{
- size_t message_size, queue_size;
- unsigned int offset;
- int err;
-
- channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
- GFP_KERNEL);
- if (!channel->ivc)
- return -ENOMEM;
-
- message_size = tegra_ivc_align(MSG_MIN_SZ);
- queue_size = tegra_ivc_total_queue_size(message_size);
- offset = queue_size * index;
-
- err = tegra_ivc_init(channel->ivc, NULL,
- bpmp->rx.virt + offset, bpmp->rx.phys + offset,
- bpmp->tx.virt + offset, bpmp->tx.phys + offset,
- 1, message_size, tegra_bpmp_ivc_notify,
- bpmp);
- if (err < 0) {
- dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
- index, err);
- return err;
- }
-
- init_completion(&channel->completion);
- channel->bpmp = bpmp;
-
- return 0;
-}
-
-static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
-{
- /* reset the channel state */
- tegra_ivc_reset(channel->ivc);
-
- /* sync the channel state with BPMP */
- while (tegra_ivc_notified(channel->ivc))
- ;
-}
-
-static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
-{
- tegra_ivc_cleanup(channel->ivc);
-}
-
static int tegra_bpmp_probe(struct platform_device *pdev)
{
struct tegra_bpmp *bpmp;
- unsigned int i;
char tag[TAG_SZ];
size_t size;
int err;
@@ -736,32 +706,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
bpmp->soc = of_device_get_match_data(&pdev->dev);
bpmp->dev = &pdev->dev;
- bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
- if (!bpmp->tx.pool) {
- dev_err(&pdev->dev, "TX shmem pool not found\n");
- return -ENOMEM;
- }
-
- bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
- if (!bpmp->tx.virt) {
- dev_err(&pdev->dev, "failed to allocate from TX pool\n");
- return -ENOMEM;
- }
-
- bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
- if (!bpmp->rx.pool) {
- dev_err(&pdev->dev, "RX shmem pool not found\n");
- err = -ENOMEM;
- goto free_tx;
- }
-
- bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
- if (!bpmp->rx.virt) {
- dev_err(&pdev->dev, "failed to allocate from RX pool\n");
- err = -ENOMEM;
- goto free_tx;
- }
-
INIT_LIST_HEAD(&bpmp->mrqs);
spin_lock_init(&bpmp->lock);
@@ -771,81 +715,38 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!bpmp->threaded.allocated) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->threaded.allocated)
+ return -ENOMEM;
bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!bpmp->threaded.busy) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->threaded.busy)
+ return -ENOMEM;
spin_lock_init(&bpmp->atomic_tx_lock);
bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
GFP_KERNEL);
- if (!bpmp->tx_channel) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->tx_channel)
+ return -ENOMEM;
bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
GFP_KERNEL);
- if (!bpmp->rx_channel) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->rx_channel)
+ return -ENOMEM;
bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
sizeof(*bpmp->threaded_channels),
GFP_KERNEL);
- if (!bpmp->threaded_channels) {
- err = -ENOMEM;
- goto free_rx;
- }
-
- err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
- bpmp->soc->channels.cpu_tx.offset);
- if (err < 0)
- goto free_rx;
+ if (!bpmp->threaded_channels)
+ return -ENOMEM;
- err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
- bpmp->soc->channels.cpu_rx.offset);
+ err = bpmp->soc->ops->init(bpmp);
if (err < 0)
- goto cleanup_tx_channel;
-
- for (i = 0; i < bpmp->threaded.count; i++) {
- err = tegra_bpmp_channel_init(
- &bpmp->threaded_channels[i], bpmp,
- bpmp->soc->channels.thread.offset + i);
- if (err < 0)
- goto cleanup_threaded_channels;
- }
-
- /* mbox registration */
- bpmp->mbox.client.dev = &pdev->dev;
- bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
- bpmp->mbox.client.tx_block = false;
- bpmp->mbox.client.knows_txdone = false;
-
- bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
- if (IS_ERR(bpmp->mbox.channel)) {
- err = PTR_ERR(bpmp->mbox.channel);
- dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
- goto cleanup_threaded_channels;
- }
-
- /* reset message channels */
- tegra_bpmp_channel_reset(bpmp->tx_channel);
- tegra_bpmp_channel_reset(bpmp->rx_channel);
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+ return err;
err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
tegra_bpmp_mrq_handle_ping, bpmp);
if (err < 0)
- goto free_mbox;
+ goto deinit;
err = tegra_bpmp_ping(bpmp);
if (err < 0) {
@@ -867,17 +768,23 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (err < 0)
goto free_mrq;
- err = tegra_bpmp_init_clocks(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
- err = tegra_bpmp_init_resets(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
- err = tegra_bpmp_init_powergates(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
+ err = tegra_bpmp_init_powergates(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
err = tegra_bpmp_init_debugfs(bpmp);
if (err < 0)
@@ -887,41 +794,27 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
free_mrq:
tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
-free_mbox:
- mbox_free_channel(bpmp->mbox.channel);
-cleanup_threaded_channels:
- for (i = 0; i < bpmp->threaded.count; i++) {
- if (bpmp->threaded_channels[i].bpmp)
- tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
- }
+deinit:
+ if (bpmp->soc->ops->deinit)
+ bpmp->soc->ops->deinit(bpmp);
- tegra_bpmp_channel_cleanup(bpmp->rx_channel);
-cleanup_tx_channel:
- tegra_bpmp_channel_cleanup(bpmp->tx_channel);
-free_rx:
- gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
-free_tx:
- gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
return err;
}
static int __maybe_unused tegra_bpmp_resume(struct device *dev)
{
struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
- unsigned int i;
-
- /* reset message channels */
- tegra_bpmp_channel_reset(bpmp->tx_channel);
- tegra_bpmp_channel_reset(bpmp->rx_channel);
-
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
- return 0;
+ if (bpmp->soc->ops->resume)
+ return bpmp->soc->ops->resume(bpmp);
+ else
+ return 0;
}
static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
@@ -938,11 +831,42 @@ static const struct tegra_bpmp_soc tegra186_soc = {
.timeout = 0,
},
},
+ .ops = &tegra186_bpmp_ops,
.num_resets = 193,
};
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_bpmp_soc tegra210_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 1,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 4,
+ .count = 1,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 8,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .ops = &tegra210_bpmp_ops,
+};
+#endif
static const struct of_device_id tegra_bpmp_match[] = {
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
+#endif
{ }
};
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 69ed1464175c..3fbbb61012c4 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -146,25 +146,8 @@ static int ti_sci_debug_show(struct seq_file *s, void *unused)
return 0;
}
-/**
- * ti_sci_debug_open() - debug file open
- * @inode: inode pointer
- * @file: file pointer
- *
- * Return: result of single_open
- */
-static int ti_sci_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ti_sci_debug_show, inode->i_private);
-}
-
-/* log file operations */
-static const struct file_operations ti_sci_debug_fops = {
- .open = ti_sci_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+/* Provide the log file operations interface*/
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
/**
* ti_sci_debugfs_create() - Create log debug file
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index 8f44b9cd295a..bd33bbf70daf 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -6,6 +6,7 @@ menu "Zynq MPSoC Firmware Drivers"
config ZYNQMP_FIRMWARE
bool "Enable Xilinx Zynq MPSoC firmware interface"
+ select MFD_CORE
help
Firmware interface driver is used by different
drivers to communicate with the firmware for
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 9a1c72a9280f..98f936125643 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -14,6 +14,7 @@
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -23,6 +24,12 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static const struct mfd_cell firmware_devs[] = {
+ {
+ .name = "zynqmp_power_controller",
+ },
+};
+
/**
* zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
* @ret_status: PMUFW return code
@@ -187,6 +194,29 @@ static int zynqmp_pm_get_api_version(u32 *version)
}
/**
+ * zynqmp_pm_get_chipid - Get silicon ID registers
+ * @idcode: IDCODE register
+ * @version: version register
+ *
+ * Return: Returns the status of the operation and the idcode and version
+ * registers in @idcode and @version.
+ */
+static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!idcode || !version)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ *idcode = ret_payload[1];
+ *version = ret_payload[2];
+
+ return ret;
+}
+
+/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
* @version: Returned version value
*
@@ -469,8 +499,129 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
arg1, arg2, out);
}
+/**
+ * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
+ * @reset: Reset to be configured
+ * @assert_flag: Flag stating should reset be asserted (1) or
+ * released (0)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_reset_get_status - Get status of the reset
+ * @reset: Reset whose status should be returned
+ * @status: Returned status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+ u32 *status)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
+ 0, 0, ret_payload);
+ *status = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
+ * master has initialized its own power management
+ *
+ * This API function is to be used for notify the power management controller
+ * about the completed power management initialization.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_init_finalize(void)
+{
+ return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_suspend_mode() - Set system suspend mode
+ * @mode: Mode to set for system suspend
+ *
+ * This API function is used to set mode of system suspend.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_node() - Request a node with specific capabilities
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This function is used by master to request particular node from firmware.
+ * Every master must request node before using it.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
+ qos, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_release_node() - Release a node
+ * @node: Node ID of the slave
+ *
+ * This function is used by master to inform firmware that master
+ * has released node. Once released, master must not use that node
+ * without re-request.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_release_node(const u32 node)
+{
+ return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ * to change its capabilities.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
+ qos, ack, NULL);
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
+ .get_chipid = zynqmp_pm_get_chipid,
.query_data = zynqmp_pm_query_data,
.clock_enable = zynqmp_pm_clock_enable,
.clock_disable = zynqmp_pm_clock_disable,
@@ -482,6 +633,13 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.clock_setparent = zynqmp_pm_clock_setparent,
.clock_getparent = zynqmp_pm_clock_getparent,
.ioctl = zynqmp_pm_ioctl,
+ .reset_assert = zynqmp_pm_reset_assert,
+ .reset_get_status = zynqmp_pm_reset_get_status,
+ .init_finalize = zynqmp_pm_init_finalize,
+ .set_suspend_mode = zynqmp_pm_set_suspend_mode,
+ .request_node = zynqmp_pm_request_node,
+ .release_node = zynqmp_pm_release_node,
+ .set_requirement = zynqmp_pm_set_requirement,
};
/**
@@ -538,11 +696,19 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
+ ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ return ret;
+ }
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
static int zynqmp_firmware_remove(struct platform_device *pdev)
{
+ mfd_remove_devices(&pdev->dev);
zynqmp_pm_api_debugfs_exit();
return 0;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 0bb7b5cd6cdc..c20445b867ae 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -104,7 +104,7 @@ config SOCFPGA_FPGA_BRIDGE
config ALTERA_FREEZE_BRIDGE
tristate "Altera FPGA Freeze Bridge"
- depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ depends on FPGA_BRIDGE && HAS_IOMEM
help
Say Y to enable drivers for Altera FPGA Freeze bridges. A
freeze bridge is a bridge that exists in the FPGA fabric to
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 8c18beec6b57..678d0115f840 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -205,7 +205,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct altera_ps_conf *conf = mgr->priv;
- const char dummy[] = {0};
+ static const char dummy[] = {0};
int ret;
if (gpiod_get_value_cansleep(conf->status)) {
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index a1a09e04fab8..13851b3d1c56 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -508,14 +508,11 @@ static int __init s10_init(void)
return -ENODEV;
np = of_find_matching_node(fw_np, s10_of_match);
- if (!np) {
- of_node_put(fw_np);
+ if (!np)
return -ENODEV;
- }
of_node_put(np);
ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
- of_node_put(fw_np);
if (ret)
return ret;
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
index 6abc88514512..6d8c8027e1cd 100644
--- a/drivers/gnss/Kconfig
+++ b/drivers/gnss/Kconfig
@@ -15,6 +15,19 @@ if GNSS
config GNSS_SERIAL
tristate
+config GNSS_MTK_SERIAL
+ tristate "Mediatek GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ select GNSS_SERIAL
+ help
+ Say Y here if you have a Mediatek-based GNSS receiver which uses a
+ serial interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-mtk.
+
+ If unsure, say N.
+
config GNSS_SIRF_SERIAL
tristate "SiRFstar GNSS receiver support"
depends on SERIAL_DEV_BUS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
index 5cf0ebe0330a..451f11401ecc 100644
--- a/drivers/gnss/Makefile
+++ b/drivers/gnss/Makefile
@@ -9,6 +9,9 @@ gnss-y := core.o
obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o
gnss-serial-y := serial.o
+obj-$(CONFIG_GNSS_MTK_SERIAL) += gnss-mtk.o
+gnss-mtk-y := mtk.o
+
obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o
gnss-sirf-y := sirf.o
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 4291a0dd22aa..320cfca80d5f 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -334,6 +334,7 @@ static const char * const gnss_type_names[GNSS_TYPE_COUNT] = {
[GNSS_TYPE_NMEA] = "NMEA",
[GNSS_TYPE_SIRF] = "SiRF",
[GNSS_TYPE_UBX] = "UBX",
+ [GNSS_TYPE_MTK] = "MTK",
};
static const char *gnss_type_name(struct gnss_device *gdev)
diff --git a/drivers/gnss/mtk.c b/drivers/gnss/mtk.c
new file mode 100644
index 000000000000..d1fc55560daf
--- /dev/null
+++ b/drivers/gnss/mtk.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mediatek GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+
+#include "serial.h"
+
+struct mtk_data {
+ struct regulator *vbackup;
+ struct regulator *vcc;
+};
+
+static int mtk_set_active(struct gnss_serial *gserial)
+{
+ struct mtk_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mtk_set_standby(struct gnss_serial *gserial)
+{
+ struct mtk_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_disable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mtk_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ switch (state) {
+ case GNSS_SERIAL_ACTIVE:
+ return mtk_set_active(gserial);
+ case GNSS_SERIAL_OFF:
+ case GNSS_SERIAL_STANDBY:
+ return mtk_set_standby(gserial);
+ }
+
+ return -EINVAL;
+}
+
+static const struct gnss_serial_ops mtk_gserial_ops = {
+ .set_power = mtk_set_power,
+};
+
+static int mtk_probe(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial;
+ struct mtk_data *data;
+ int ret;
+
+ gserial = gnss_serial_allocate(serdev, sizeof(*data));
+ if (IS_ERR(gserial)) {
+ ret = PTR_ERR(gserial);
+ return ret;
+ }
+
+ gserial->ops = &mtk_gserial_ops;
+
+ gserial->gdev->type = GNSS_TYPE_MTK;
+
+ data = gnss_serial_get_drvdata(gserial);
+
+ data->vcc = devm_regulator_get(&serdev->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_free_gserial;
+ }
+
+ data->vbackup = devm_regulator_get_optional(&serdev->dev, "vbackup");
+ if (IS_ERR(data->vbackup)) {
+ ret = PTR_ERR(data->vbackup);
+ if (ret == -ENODEV)
+ data->vbackup = NULL;
+ else
+ goto err_free_gserial;
+ }
+
+ if (data->vbackup) {
+ ret = regulator_enable(data->vbackup);
+ if (ret)
+ goto err_free_gserial;
+ }
+
+ ret = gnss_serial_register(gserial);
+ if (ret)
+ goto err_disable_vbackup;
+
+ return 0;
+
+err_disable_vbackup:
+ if (data->vbackup)
+ regulator_disable(data->vbackup);
+err_free_gserial:
+ gnss_serial_free(gserial);
+
+ return ret;
+}
+
+static void mtk_remove(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct mtk_data *data = gnss_serial_get_drvdata(gserial);
+
+ gnss_serial_deregister(gserial);
+ if (data->vbackup)
+ regulator_disable(data->vbackup);
+ gnss_serial_free(gserial);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_of_match[] = {
+ { .compatible = "globaltop,pa6h" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_of_match);
+#endif
+
+static struct serdev_device_driver mtk_driver = {
+ .driver = {
+ .name = "gnss-mtk",
+ .of_match_table = of_match_ptr(mtk_of_match),
+ .pm = &gnss_serial_pm_ops,
+ },
+ .probe = mtk_probe,
+ .remove = mtk_remove,
+};
+module_serdev_device_driver(mtk_driver);
+
+MODULE_AUTHOR("Loys Ollivier <lollivier@baylibre.com>");
+MODULE_DESCRIPTION("Mediatek GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 226f6e6fe01b..effed3a8d398 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -25,31 +25,83 @@
#define SIRF_ON_OFF_PULSE_TIME 100
#define SIRF_ACTIVATE_TIMEOUT 200
#define SIRF_HIBERNATE_TIMEOUT 200
+/*
+ * If no data arrives for this time, we assume that the chip is off.
+ * REVISIT: The report cycle is configurable and can be several minutes long,
+ * so this will only work reliably if the report cycle is set to a reasonable
+ * low value. Also power saving settings (like send data only on movement)
+ * might things work even worse.
+ * Workaround might be to parse shutdown or bootup messages.
+ */
+#define SIRF_REPORT_CYCLE 2000
struct sirf_data {
struct gnss_device *gdev;
struct serdev_device *serdev;
speed_t speed;
struct regulator *vcc;
+ struct regulator *lna;
struct gpio_desc *on_off;
struct gpio_desc *wakeup;
int irq;
bool active;
+
+ struct mutex gdev_mutex;
+ bool open;
+
+ struct mutex serdev_mutex;
+ int serdev_count;
+
wait_queue_head_t power_wait;
};
+static int sirf_serdev_open(struct sirf_data *data)
+{
+ int ret = 0;
+
+ mutex_lock(&data->serdev_mutex);
+ if (++data->serdev_count == 1) {
+ ret = serdev_device_open(data->serdev);
+ if (ret) {
+ data->serdev_count--;
+ goto out_unlock;
+ }
+
+ serdev_device_set_baudrate(data->serdev, data->speed);
+ serdev_device_set_flow_control(data->serdev, false);
+ }
+
+out_unlock:
+ mutex_unlock(&data->serdev_mutex);
+
+ return ret;
+}
+
+static void sirf_serdev_close(struct sirf_data *data)
+{
+ mutex_lock(&data->serdev_mutex);
+ if (--data->serdev_count == 0)
+ serdev_device_close(data->serdev);
+ mutex_unlock(&data->serdev_mutex);
+}
+
static int sirf_open(struct gnss_device *gdev)
{
struct sirf_data *data = gnss_get_drvdata(gdev);
struct serdev_device *serdev = data->serdev;
int ret;
- ret = serdev_device_open(serdev);
- if (ret)
- return ret;
+ mutex_lock(&data->gdev_mutex);
+ data->open = true;
+ mutex_unlock(&data->gdev_mutex);
- serdev_device_set_baudrate(serdev, data->speed);
- serdev_device_set_flow_control(serdev, false);
+ ret = sirf_serdev_open(data);
+ if (ret) {
+ mutex_lock(&data->gdev_mutex);
+ data->open = false;
+ mutex_unlock(&data->gdev_mutex);
+ return ret;
+ }
ret = pm_runtime_get_sync(&serdev->dev);
if (ret < 0) {
@@ -61,7 +113,11 @@ static int sirf_open(struct gnss_device *gdev)
return 0;
err_close:
- serdev_device_close(serdev);
+ sirf_serdev_close(data);
+
+ mutex_lock(&data->gdev_mutex);
+ data->open = false;
+ mutex_unlock(&data->gdev_mutex);
return ret;
}
@@ -71,9 +127,13 @@ static void sirf_close(struct gnss_device *gdev)
struct sirf_data *data = gnss_get_drvdata(gdev);
struct serdev_device *serdev = data->serdev;
- serdev_device_close(serdev);
+ sirf_serdev_close(data);
pm_runtime_put(&serdev->dev);
+
+ mutex_lock(&data->gdev_mutex);
+ data->open = false;
+ mutex_unlock(&data->gdev_mutex);
}
static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
@@ -105,8 +165,19 @@ static int sirf_receive_buf(struct serdev_device *serdev,
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
struct gnss_device *gdev = data->gdev;
+ int ret = 0;
+
+ if (!data->wakeup && !data->active) {
+ data->active = true;
+ wake_up_interruptible(&data->power_wait);
+ }
+
+ mutex_lock(&data->gdev_mutex);
+ if (data->open)
+ ret = gnss_insert_raw(gdev, buf, count);
+ mutex_unlock(&data->gdev_mutex);
- return gnss_insert_raw(gdev, buf, count);
+ return ret;
}
static const struct serdev_device_ops sirf_serdev_ops = {
@@ -125,17 +196,45 @@ static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id)
if (ret < 0)
goto out;
- data->active = !!ret;
+ data->active = ret;
wake_up_interruptible(&data->power_wait);
out:
return IRQ_HANDLED;
}
+static int sirf_wait_for_power_state_nowakeup(struct sirf_data *data,
+ bool active,
+ unsigned long timeout)
+{
+ int ret;
+
+ /* Wait for state change (including any shutdown messages). */
+ msleep(timeout);
+
+ /* Wait for data reception or timeout. */
+ data->active = false;
+ ret = wait_event_interruptible_timeout(data->power_wait,
+ data->active, msecs_to_jiffies(SIRF_REPORT_CYCLE));
+ if (ret < 0)
+ return ret;
+
+ if (ret > 0 && !active)
+ return -ETIMEDOUT;
+
+ if (ret == 0 && active)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int sirf_wait_for_power_state(struct sirf_data *data, bool active,
unsigned long timeout)
{
int ret;
+ if (!data->wakeup)
+ return sirf_wait_for_power_state_nowakeup(data, active, timeout);
+
ret = wait_event_interruptible_timeout(data->power_wait,
data->active == active, msecs_to_jiffies(timeout));
if (ret < 0)
@@ -168,21 +267,22 @@ static int sirf_set_active(struct sirf_data *data, bool active)
else
timeout = SIRF_HIBERNATE_TIMEOUT;
+ if (!data->wakeup) {
+ ret = sirf_serdev_open(data);
+ if (ret)
+ return ret;
+ }
+
do {
sirf_pulse_on_off(data);
ret = sirf_wait_for_power_state(data, active, timeout);
- if (ret < 0) {
- if (ret == -ETIMEDOUT)
- continue;
+ } while (ret == -ETIMEDOUT && retries--);
- return ret;
- }
+ if (!data->wakeup)
+ sirf_serdev_close(data);
- break;
- } while (retries--);
-
- if (retries < 0)
- return -ETIMEDOUT;
+ if (ret)
+ return ret;
return 0;
}
@@ -190,21 +290,60 @@ static int sirf_set_active(struct sirf_data *data, bool active)
static int sirf_runtime_suspend(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
+ int ret2;
+ int ret;
- if (!data->on_off)
- return regulator_disable(data->vcc);
+ if (data->on_off)
+ ret = sirf_set_active(data, false);
+ else
+ ret = regulator_disable(data->vcc);
+
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(data->lna);
+ if (ret)
+ goto err_reenable;
- return sirf_set_active(data, false);
+ return 0;
+
+err_reenable:
+ if (data->on_off)
+ ret2 = sirf_set_active(data, true);
+ else
+ ret2 = regulator_enable(data->vcc);
+
+ if (ret2)
+ dev_err(dev,
+ "failed to reenable power on failed suspend: %d\n",
+ ret2);
+
+ return ret;
}
static int sirf_runtime_resume(struct device *dev)
{
struct sirf_data *data = dev_get_drvdata(dev);
+ int ret;
- if (!data->on_off)
- return regulator_enable(data->vcc);
+ ret = regulator_enable(data->lna);
+ if (ret)
+ return ret;
+
+ if (data->on_off)
+ ret = sirf_set_active(data, true);
+ else
+ ret = regulator_enable(data->vcc);
+
+ if (ret)
+ goto err_disable_lna;
+
+ return 0;
+
+err_disable_lna:
+ regulator_disable(data->lna);
- return sirf_set_active(data, true);
+ return ret;
}
static int __maybe_unused sirf_suspend(struct device *dev)
@@ -275,6 +414,8 @@ static int sirf_probe(struct serdev_device *serdev)
data->serdev = serdev;
data->gdev = gdev;
+ mutex_init(&data->gdev_mutex);
+ mutex_init(&data->serdev_mutex);
init_waitqueue_head(&data->power_wait);
serdev_device_set_drvdata(serdev, data);
@@ -290,6 +431,12 @@ static int sirf_probe(struct serdev_device *serdev)
goto err_put_device;
}
+ data->lna = devm_regulator_get(dev, "lna");
+ if (IS_ERR(data->lna)) {
+ ret = PTR_ERR(data->lna);
+ goto err_put_device;
+ }
+
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
if (IS_ERR(data->on_off))
@@ -301,39 +448,53 @@ static int sirf_probe(struct serdev_device *serdev)
if (IS_ERR(data->wakeup))
goto err_put_device;
- /*
- * Configurations where WAKEUP has been left not connected,
- * are currently not supported.
- */
- if (!data->wakeup) {
- dev_err(dev, "no wakeup gpio specified\n");
- ret = -ENODEV;
+ ret = regulator_enable(data->vcc);
+ if (ret)
goto err_put_device;
- }
+
+ /* Wait for chip to boot into hibernate mode. */
+ msleep(SIRF_BOOT_DELAY);
}
if (data->wakeup) {
- ret = gpiod_to_irq(data->wakeup);
+ ret = gpiod_get_value_cansleep(data->wakeup);
if (ret < 0)
- goto err_put_device;
+ goto err_disable_vcc;
+ data->active = ret;
+ ret = gpiod_to_irq(data->wakeup);
+ if (ret < 0)
+ goto err_disable_vcc;
data->irq = ret;
- ret = devm_request_threaded_irq(dev, data->irq, NULL,
- sirf_wakeup_handler,
+ ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"wakeup", data);
if (ret)
- goto err_put_device;
+ goto err_disable_vcc;
}
if (data->on_off) {
- ret = regulator_enable(data->vcc);
- if (ret)
- goto err_put_device;
+ if (!data->wakeup) {
+ data->active = false;
- /* Wait for chip to boot into hibernate mode */
- msleep(SIRF_BOOT_DELAY);
+ ret = sirf_serdev_open(data);
+ if (ret)
+ goto err_disable_vcc;
+
+ msleep(SIRF_REPORT_CYCLE);
+ sirf_serdev_close(data);
+ }
+
+ /* Force hibernate mode if already active. */
+ if (data->active) {
+ ret = sirf_set_active(data, false);
+ if (ret) {
+ dev_err(dev, "failed to set hibernate mode: %d\n",
+ ret);
+ goto err_free_irq;
+ }
+ }
}
if (IS_ENABLED(CONFIG_PM)) {
@@ -342,7 +503,7 @@ static int sirf_probe(struct serdev_device *serdev)
} else {
ret = sirf_runtime_resume(dev);
if (ret < 0)
- goto err_disable_vcc;
+ goto err_free_irq;
}
ret = gnss_register_device(gdev);
@@ -356,6 +517,9 @@ err_disable_rpm:
pm_runtime_disable(dev);
else
sirf_runtime_suspend(dev);
+err_free_irq:
+ if (data->wakeup)
+ free_irq(data->irq, data);
err_disable_vcc:
if (data->on_off)
regulator_disable(data->vcc);
@@ -376,6 +540,9 @@ static void sirf_remove(struct serdev_device *serdev)
else
sirf_runtime_suspend(&serdev->dev);
+ if (data->wakeup)
+ free_irq(data->irq, data);
+
if (data->on_off)
regulator_disable(data->vcc);
@@ -386,6 +553,7 @@ static void sirf_remove(struct serdev_device *serdev)
static const struct of_device_id sirf_of_match[] = {
{ .compatible = "fastrax,uc430" },
{ .compatible = "linx,r4" },
+ { .compatible = "wi2wi,w2sg0004" },
{ .compatible = "wi2wi,w2sg0008i" },
{ .compatible = "wi2wi,w2sg0084i" },
{},
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f1314248..7f9e0304b510 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
unsigned int nr, int value)
{
- if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
+ if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
+ altr_a10sr_gpio_set(gc, nr, value);
return 0;
+ }
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e0d6a0a7bc69..e41223c05f6e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
{
- return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+ struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
+
+ switch (sprd_eic->type) {
+ case SPRD_EIC_DEBOUNCE:
+ return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+ case SPRD_EIC_ASYNC:
+ return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
+ case SPRD_EIC_SYNC:
+ return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
+ default:
+ return -ENOTSUPP;
+ }
}
static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 00e954f22bc9..74401e0adb29 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -30,6 +30,7 @@
#define GPIO_REG_EDGE 0xA0
struct mtk_gc {
+ struct irq_chip irq_chip;
struct gpio_chip chip;
spinlock_t lock;
int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip mediatek_gpio_irq_chip = {
- .irq_unmask = mediatek_gpio_irq_unmask,
- .irq_mask = mediatek_gpio_irq_mask,
- .irq_mask_ack = mediatek_gpio_irq_mask,
- .irq_set_type = mediatek_gpio_irq_type,
-};
-
static int
mediatek_gpio_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec, u32 *flags)
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
+ rg->irq_chip.name = dev_name(dev);
+ rg->irq_chip.parent_device = dev;
+ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
+ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
+
if (mtk->gpio_irq) {
/*
* Manually request the irq here instead of passing
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
0, handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add gpiochip_irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
mtk->gpio_irq, NULL);
}
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
- mediatek_gpio_irq_chip.name = dev_name(dev);
for (i = 0; i < MTK_BANK_CNT; i++) {
ret = mediatek_gpio_bank_probe(dev, np, i);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 83617fdc661d..0dc96419efe3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -289,7 +289,7 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
return pca953x_check_register(chip, reg, bank);
}
-const struct regmap_config pca953x_i2c_regmap = {
+static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index adf72dda25a2..68a35b65925a 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
*/
struct pcf857x {
struct gpio_chip chip;
+ struct irq_chip irqchip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
unsigned out; /* software latch */
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&gpio->lock);
}
-static struct irq_chip pcf857x_irq_chip = {
- .name = "pcf857x",
- .irq_enable = pcf857x_irq_enable,
- .irq_disable = pcf857x_irq_disable,
- .irq_ack = noop,
- .irq_mask = noop,
- .irq_unmask = noop,
- .irq_set_wake = pcf857x_irq_set_wake,
- .irq_bus_lock = pcf857x_irq_bus_lock,
- .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
-};
-
/*-------------------------------------------------------------------------*/
static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
/* Enable irqchip if we have an interrupt */
if (client->irq) {
+ gpio->irqchip.name = "pcf857x",
+ gpio->irqchip.irq_enable = pcf857x_irq_enable,
+ gpio->irqchip.irq_disable = pcf857x_irq_disable,
+ gpio->irqchip.irq_ack = noop,
+ gpio->irqchip.irq_mask = noop,
+ gpio->irqchip.irq_unmask = noop,
+ gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
+ gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
+ gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
status = gpiochip_irqchip_add_nested(&gpio->chip,
- &pcf857x_irq_chip,
+ &gpio->irqchip,
0, handle_level_irq,
IRQ_TYPE_NONE);
if (status) {
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
if (status)
goto fail;
- gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
+ gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
client->irq);
gpio->irq_parent = client->irq;
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index e9600b556f39..bcc6be4a5cb2 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
{
switch (gpio_type) {
case PXA3XX_GPIO:
+ case MMP2_GPIO:
return false;
default:
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 1b79ebcfce3e..541fa6ac399d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct resource *iores;
struct gpio_chip *gc;
+ int i;
int ret;
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ /* Mask all GPIO interrupts */
+ for (i = 0; i < gc->ngpio; i++)
+ vf610_gpio_writel(0, port->base + PORT_PCR(i));
+
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 48534bda73d3..3e8e2d3ce7a8 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -357,8 +357,6 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
- struct gpio_desc *desc;
-
if (event->irq_requested) {
if (event->irq_is_wake)
disable_irq_wake(event->irq);
@@ -366,11 +364,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
free_irq(event->irq, event);
}
- desc = event->desc;
- if (WARN_ON(IS_ERR(desc)))
- continue;
gpiochip_unlock_as_irq(chip, event->pin);
- gpiochip_free_own_desc(desc);
+ gpiochip_free_own_desc(event->desc);
list_del(&event->node);
kfree(event);
}
@@ -535,17 +530,24 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
return 1;
- if (lookup->n++ == lookup->index && !lookup->desc) {
+ if (!lookup->desc) {
const struct acpi_resource_gpio *agpio = &ares->data.gpio;
- int pin_index = lookup->pin_index;
+ bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
+ int pin_index;
+
+ if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
+ lookup->index++;
+
+ if (lookup->n++ != lookup->index)
+ return 1;
+ pin_index = lookup->pin_index;
if (pin_index >= agpio->pin_table_length)
return 1;
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
- lookup->info.gpioint =
- agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
+ lookup->info.gpioint = gpioint;
/*
* Polarity and triggering are only specified for GpioInt
@@ -897,7 +899,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
* event but only if the access here is ACPI_READ. In that
* case we "borrow" the event GPIO instead.
*/
- if (!found && agpio->sharable == ACPI_SHARED &&
+ if (!found && agpio->shareable == ACPI_SHARED &&
function == ACPI_READ) {
struct acpi_gpio_event *event;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a6e1891217e2..c34eb9d9c59a 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -125,7 +125,7 @@ static void of_gpio_flags_quirks(struct device_node *np,
for_each_child_of_node(np, child) {
ret = of_property_read_u32(child, "reg", &cs);
- if (!ret)
+ if (ret)
continue;
if (cs == index) {
/*
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1651d7f0a303..d1adfdf50fb3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
/* Do not leak kernel stack to userspace */
memset(&ge, 0, sizeof(ge));
- ge.timestamp = le->timestamp;
+ /*
+ * We may be running from a nested threaded interrupt in which case
+ * we didn't get the timestamp from lineevent_irq_handler().
+ */
+ if (!le->timestamp)
+ ge.timestamp = ktime_get_real_ns();
+ else
+ ge.timestamp = le->timestamp;
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661d9e20..92b11de19581 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8a078f4ae73d..7ff3a28fc903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1701,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+ }
return 0;
}
@@ -2632,9 +2634,6 @@ fence_driver_init:
goto failed;
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
@@ -2798,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
@@ -2906,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
@@ -3226,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
r = amdgpu_ib_ring_tests(adev);
error:
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 15ce7e681d67..b083b219b1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to pin new abo buffer before flip\n");
- goto unreserve;
+ if (!adev->enable_virtual_display) {
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
+ goto unreserve;
+ }
}
r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = amdgpu_bo_gpu_offset(new_abo);
+ if (!adev->enable_virtual_display)
+ work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
@@ -242,9 +245,10 @@ pflip_cleanup:
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
- DRM_ERROR("failed to unpin new abo in error path\n");
- }
+ if (!adev->enable_virtual_display)
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
+ DRM_ERROR("failed to unpin new abo in error path\n");
+
unreserve:
amdgpu_bo_unreserve(new_abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bc62bf41b7e9..5dc349173e4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index aadd0fa42e43..3aa42c64484a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -405,6 +405,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
+ u64 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 1f61ed95727c..0ed41a9d2d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
@@ -2008,6 +2009,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
if (adev->pm.sysfs_initialized)
@@ -2091,12 +2093,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_power_profile_mode\n");
return ret;
}
- ret = device_create_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_od_clk_voltage\n");
- return ret;
+ if (hwmgr->od_enabled) {
+ ret = device_create_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "pp_od_clk_voltage\n");
+ return ret;
+ }
}
ret = device_create_file(adev->dev,
&dev_attr_gpu_busy_percent);
@@ -2118,6 +2122,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
if (adev->pm.dpm_enabled == 0)
return;
@@ -2138,8 +2144,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
- device_remove_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
+ if (hwmgr->od_enabled)
+ device_remove_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 71913a18d142..a38e0fb4a6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -38,6 +38,7 @@
#include "amdgpu_gem.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
+#include <linux/dma-fence-array.h>
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@@ -187,6 +188,48 @@ error:
return ERR_PTR(ret);
}
+static int
+__reservation_object_make_exclusive(struct reservation_object *obj)
+{
+ struct dma_fence **fences;
+ unsigned int count;
+ int r;
+
+ if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+ return 0;
+
+ r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+ if (r)
+ return r;
+
+ if (count == 0) {
+ /* Now that was unexpected. */
+ } else if (count == 1) {
+ reservation_object_add_excl_fence(obj, fences[0]);
+ dma_fence_put(fences[0]);
+ kfree(fences);
+ } else {
+ struct dma_fence_array *array;
+
+ array = dma_fence_array_create(count, fences,
+ dma_fence_context_alloc(1), 0,
+ false);
+ if (!array)
+ goto err_fences_put;
+
+ reservation_object_add_excl_fence(obj, &array->base);
+ dma_fence_put(&array->base);
+ }
+
+ return 0;
+
+err_fences_put:
+ while (count--)
+ dma_fence_put(fences[count]);
+ kfree(fences);
+ return -ENOMEM;
+}
+
/**
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: Shared DMA buffer
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
if (attach->dev->driver != adev->dev->driver) {
/*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
*/
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(r < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+ r = __reservation_object_make_exclusive(bo->tbo.resv);
+ if (r)
goto error_unreserve;
- }
}
/* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fab0d637ee5..3a9b48b227ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle)
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
+ if (adev->psp.ta_fw) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ }
return 0;
}
@@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
+ if (!psp->adev->psp.ta_fw)
+ return -ENOENT;
+
if (!psp->xgmi_context.initialized) {
ret = psp_xgmi_init_shared_buf(psp);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e73d152659a2..698bcb8ce61d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
+#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
+#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
@@ -847,9 +849,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->size = amdgpu_vm_bo_size(adev, level);
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
- adev->flags & AMD_IS_APU)
- bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -3366,14 +3365,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
+ unsigned long flags;
- spin_lock(&adev->vm_manager.pasid_lock);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm)
*task_info = vm->task_info;
- spin_unlock(&adev->vm_manager.pasid_lock);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index fdace004544d..e4cc1d48eaab 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
+ amdgpu_bo_unref(&works->old_abo);
+ kfree(works->shared);
+ kfree(works);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 381f593b0cda..57cb3a51bda7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- int r;
/* Set the write pointer delay */
WREG32(mmCP_RB_WPTR_DELAY, 0);
@@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
ring->sched.ready = true;
- r = amdgpu_ring_test_helper(ring);
- return r;
+ return 0;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ enable failed\n");
- return r;
+ amdgpu_ring_commit(kiq_ring);
+
+ return 0;
}
static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
@@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
if (r)
goto done;
- /* Test KCQs - reversing the order of rings seems to fix ring test failure
- * after GPU reset
- */
- for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
+done:
+ return r;
+}
+
+static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
+{
+ int r, i;
+ struct amdgpu_ring *ring;
+
+ /* collect all the ring_tests here, gfx, kiq, compute */
+ ring = &adev->gfx.gfx_ring[0];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ ring = &adev->gfx.kiq.ring;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- r = amdgpu_ring_test_helper(ring);
+ amdgpu_ring_test_helper(ring);
}
-done:
- return r;
+ return 0;
}
static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
@@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
r = gfx_v8_0_kcq_resume(adev);
if (r)
return r;
+
+ r = gfx_v8_0_cp_test_all_rings(adev);
+ if (r)
+ return r;
+
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
return 0;
@@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
+ gfx_v8_0_cp_test_all_rings(adev);
+
adev->gfx.rlc.funcs->start(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7556716038d3..fbca0494f871 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8cbb4655896a..b11a1c17a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return r;
}
/* Retrieve checksum from mailbox2 */
- if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4cd31a276dcd..186db182f924 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
+ u32 tmp = 0;
+ if (enable) {
+ tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
+ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
+ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
+
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
+ lower_32_bits(adev->doorbell.base));
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
+ upper_32_bits(adev->doorbell.base));
+ }
+
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
}
static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0c6e7f9b143f..189fcb004579 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err)
- goto out2;
-
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
- adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
- adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+ adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
+ adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
+ adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+ }
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd0bfe140ee0..aa2f71cc1eba 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
@@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
};
@@ -127,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
{
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -157,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8849b74078d6..9b639974c70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle)
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
- adev->external_rev_id = adev->rev_id + 0x81;
+ adev->external_rev_id = adev->rev_id + 0x79;
else if (adev->pdev->device == 0x15d8)
adev->external_rev_id = adev->rev_id + 0x41;
+ else if (adev->rev_id == 1)
+ adev->external_rev_id = adev->rev_id + 0x20;
else
- adev->external_rev_id = 0x1;
+ adev->external_rev_id = adev->rev_id + 0x01;
if (adev->rev_id >= 0x8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index fbf0ee5201c3..c3613604a4f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,8 +4,8 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && X86_64
- imply AMD_IOMMU_V2
+ depends on DRM_AMDGPU && (X86_64 || ARM64)
+ imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index b7bc7d7d048f..2e7c44955f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
+#ifdef CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr)
@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
+#endif
/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
*
@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
struct crat_subtype_generic *sub_type_hdr;
int avail_size = *size;
int numa_node_id;
+#ifdef CONFIG_X86_64
uint32_t entries = 0;
+#endif
int ret = 0;
if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr->length);
/* Fill in Subtype: IO Link */
+#ifdef CONFIG_X86_64
ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
&entries,
(struct crat_subtype_iolink *)sub_type_hdr);
@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries);
+#else
+ pr_info("IO link not available for non x86 platforms\n");
+#endif
crat_table->num_domains++;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5f5b2acedbac..09da91644f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
- * TODO: Rather than assiging @gpu to first topology device withtout
- * gpu attached, it will better to have more stringent check.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock);
- list_for_each_entry(dev, &topology_device_list, list)
+ list_for_each_entry(dev, &topology_device_list, list) {
+ /* Discrete GPUs need their own topology device list
+ * entries. Don't assign them to CPU/APU nodes.
+ */
+ if (!gpu->device_info->needs_iommu_device &&
+ dev->node_props.cpu_cores_count)
+ continue;
+
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
+ }
up_write(&topology_lock);
return out_dev;
}
@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
{
- const struct cpuinfo_x86 *cpuinfo;
int first_cpu_of_numa_node;
if (!cpumask || cpumask == cpu_none_mask)
@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
first_cpu_of_numa_node = cpumask_first(cpumask);
if (first_cpu_of_numa_node >= nr_cpu_ids)
return -1;
- cpuinfo = &cpu_data(first_cpu_of_numa_node);
-
- return cpuinfo->apicid;
+#ifdef CONFIG_X86_64
+ return cpu_data(first_cpu_of_numa_node).apicid;
+#else
+ return first_cpu_of_numa_node;
+#endif
}
/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a9a28dbc3e24..636d14a60952 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
+ /* Update to correct count(s) if racing with vblank irq */
+ amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* wake up userspace */
if (amdgpu_crtc->event) {
- /* Update to correct count(s) if racing with vblank irq */
- drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
-
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
/* page flip completed. clean up */
@@ -699,22 +698,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_dp_mst_topology_mgr *mgr;
+ int ret;
+ bool need_hotplug = false;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->dc_link->type == dc_connection_mst_branch &&
- !aconnector->mst_port) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_port)
+ continue;
+
+ mgr = &aconnector->mst_mgr;
- if (suspend)
- drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
- else
- drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
- }
+ if (suspend) {
+ drm_dp_mst_topology_mgr_suspend(mgr);
+ } else {
+ ret = drm_dp_mst_topology_mgr_resume(mgr);
+ if (ret < 0) {
+ drm_dp_mst_topology_mgr_set_mst(mgr, false);
+ need_hotplug = true;
+ }
+ }
}
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(dev);
}
/**
@@ -772,12 +785,13 @@ static int dm_suspend(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
s3_handle_mst(adev->ddev, true);
amdgpu_dm_irq_suspend(adev);
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
@@ -898,7 +912,6 @@ static int dm_resume(void *handle)
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
enum dc_connection_type new_connection_type = dc_connection_none;
- int ret;
int i;
/* power on hardware */
@@ -971,13 +984,13 @@ static int dm_resume(void *handle)
}
}
- ret = drm_atomic_helper_resume(ddev, dm->cached_state);
+ drm_atomic_helper_resume(ddev, dm->cached_state);
dm->cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
- return ret;
+ return 0;
}
/**
@@ -1759,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+ caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link,
- brightness, 0, 0))
+ brightness, 0))
return 0;
else
return 1;
@@ -4069,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
}
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
drm_connector_attach_vrr_capable_property(
&aconnector->base);
}
@@ -4813,6 +4827,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0;
unsigned long flags;
+ u64 last_flip_vblank;
+ bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4844,6 +4860,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* In commit tail framework this cannot happen */
WARN_ON(1);
}
+
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr flips per
+ * video frame by use of throttling, but allow flip programming
+ * anywhere in the possibly large variable vrr vblank interval
+ * for fine-grained flip timing control and more opportunity to
+ * avoid stutter on late submission of amdgpu_dm_do_flip() calls.
+ */
+ last_flip_vblank = acrtc_attach->last_flip_vblank;
+
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
@@ -4867,10 +4893,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
drm_crtc_vblank_get(crtc);
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., clients
+ * using GLX_OML_sync_control extension.
+ */
+ if (!vrr_active)
+ last_flip_vblank = drm_crtc_vblank_count(crtc);
+
amdgpu_dm_do_flip(
crtc,
fb,
- (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ (uint32_t) last_flip_vblank + *wait_for_vblank,
dc_state);
}
@@ -5920,7 +5954,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
- !new_crtc_state->vrr_enabled)
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue;
if (!new_crtc_state->enable)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9a7ac58eb18e..ddd75a4d8ba5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return bytes_from_user;
}
+/*
+ * Returns the min and max vrr vfreq through the connector's debugfs file.
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
+ */
+static int vrr_range_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
+ seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vrr_range);
+
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_link_settings_read,
@@ -697,7 +716,8 @@ static const struct {
} dp_debugfs_entries[] = {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
- {"test_pattern", &dp_phy_test_pattern_fops}
+ {"test_pattern", &dp_phy_test_pattern_fops},
+ {"vrr_range", &vrr_range_fops}
};
int connector_debugfs_init(struct amdgpu_dm_connector *connector)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 52deacf39841..b0265dbebd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- const struct dc_stream_state *stream)
+ uint32_t frame_ramp)
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
(abm->funcs->set_backlight_level_pwm == NULL))
return false;
- if (stream)
- ((struct dc_stream_state *)stream)->bl_pwm_level =
- backlight_pwm_u16_16;
-
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
@@ -2637,11 +2632,6 @@ void core_link_enable_stream(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
-
- dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
- pipe_ctx->stream->bl_pwm_level,
- 0,
- pipe_ctx->stream);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 29f19d57ff7a..b2243e0dad1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
*/
bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- const struct dc_stream_state *stream);
+ uint32_t frame_ramp);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index be34d638e15d..d70c9e1cda3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -91,7 +91,6 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- unsigned int bl_pwm_level;
/* from core_stream struct */
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index afd287f08bc9..7a72ee46f14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements(
dc,
context->bw.dce.sclk_khz);
- pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+ /*
+ * As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
+ * This is not required for less than 5 displays,
+ * thus don't request decfclk in dc to avoid impact
+ * on power saving.
+ *
+ */
+ pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
+ pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;
@@ -654,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
/* get max clock state from PPLIB */
@@ -663,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
- clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index acd418515346..a6b80fdaa666 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dce100_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4bf24758217f..8f09b8625c5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
if (option != KEEP_ACQUIRED_RESOURCE ||
!dc->debug.az_endpoint_mute_only) {
/*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
+ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index a60a90e68d91..c4543178ba20 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
- dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index cdd1d6b7b9f2..4e9ea50141bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -790,9 +790,22 @@ bool dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index dcb3c5530236..cd1ebe57ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset < 0)
+ if (src_y_offset + (int)height <= 0)
cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 345af015d061..d1acd7165bc8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset < 0) //+ (int)hubp->curs_attr.height
+ if (src_y_offset + (int)hubp->curs_attr.height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 91e015e14355..41883c981789 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface(
top_pipe_to_program->plane_state->update_flags.bits.full_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
+ tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state)
+ || !pipe_ctx->plane_state
+ || !tg->funcs->is_tg_enabled(tg))
continue;
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ tg->funcs->lock(tg);
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state)
- continue;
-
- dcn10_pipe_control_lock(dc, pipe_ctx, false);
- }
+ tg->funcs->unlock(tg);
+ }
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2665,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
+ pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 00f63b7dd32f..c11a443dcbc8 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
#define NUM_POWER_FN_SEGS 8
#define NUM_BL_CURVE_SEGS 16
+#pragma pack(push, 1)
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
@@ -100,6 +101,7 @@ struct iram_table_v_2 {
uint8_t dummy8; /* 0xfe */
uint8_t dummy9; /* 0xff */
};
+#pragma pack(pop)
static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
{
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1479ea1dc3e7..789c4f288485 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -127,12 +127,13 @@ enum amd_pp_task {
};
enum PP_SMC_POWER_PROFILE {
- PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0,
- PP_SMC_POWER_PROFILE_POWERSAVING = 0x1,
- PP_SMC_POWER_PROFILE_VIDEO = 0x2,
- PP_SMC_POWER_PROFILE_VR = 0x3,
- PP_SMC_POWER_PROFILE_COMPUTE = 0x4,
- PP_SMC_POWER_PROFILE_CUSTOM = 0x5,
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
+ PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
+ PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
+ PP_SMC_POWER_PROFILE_VIDEO = 0x3,
+ PP_SMC_POWER_PROFILE_VR = 0x4,
+ PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
+ PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
};
enum {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 0173d0480024..310b102a9292 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
{
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
-
- hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
- hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
- hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+
+ hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
+ hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
+ hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
}
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index f95c5f50eb0f..5273de3c5b98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
break;
case amd_pp_dpp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d91390459326..c8f5c00dd1e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,8 +77,9 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[6] =
- {{1, 0, 100, 30, 1, 0, 100, 10},
+static const struct profile_mode_setting smu7_profiling[7] =
+ {{0, 0, 0, 0, 0, 0, 0, 0},
+ {1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
@@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint32_t len;
- static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ static const char *profile_name[7] = {"BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 79c86247d0ac..91e3bbe6d61d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->backend = data;
- hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
- hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega10_set_default_registry_data(hwmgr);
data->disable_dpm_mask = 0xff;
@@ -4668,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, size = 0;
- static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
+ static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
+ {70, 60, 1, 3,},
{90, 60, 0, 0,},
{70, 60, 0, 0,},
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
- static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ static const char *profile_name[7] = {"BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index b8747a5c9204..99d596dc0e89 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
#include "vega10_pptable.h"
#define NUM_DSPCLK_LEVELS 8
+#define VEGA10_ENGINECLOCK_HARDMAX 198000
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
enum phm_platform_caps cap)
@@ -258,7 +259,26 @@ static int init_over_drive_limits(
struct pp_hwmgr *hwmgr,
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
{
- hwmgr->platform_descriptor.overdriveLimit.engineClock =
+ const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
+ (const ATOM_Vega10_GFXCLK_Dependency_Table *)
+ (((unsigned long) powerplay_table) +
+ le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
+ bool is_acg_enabled = false;
+ ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
+
+ if (gfxclk_dep_table->ucRevId == 1) {
+ patom_record_v2 =
+ (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
+ is_acg_enabled =
+ (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
+ }
+
+ if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
+ !is_acg_enabled)
+ hwmgr->platform_descriptor.overdriveLimit.engineClock =
+ VEGA10_ENGINECLOCK_HARDMAX;
+ else
+ hwmgr->platform_descriptor.overdriveLimit.engineClock =
le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 54364444ecd1..0c8212902275 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
+{
+ uint32_t result;
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+ "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
+ return -EINVAL);
+
+ result = smum_get_argument(hwmgr);
+ PP_ASSERT_WITH_CODE(result == 1,
+ "Failed to run ACG BTC!", return -EINVAL);
+
+ return 0;
+}
+
static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to initialize SMC table!",
result = tmp_result);
+ tmp_result = vega12_run_acg_btc(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to run ACG BTC!",
+ result = tmp_result);
+
result = vega12_enable_all_smu_features(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to enable all smu features!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 26154f9b2178..82935a3bd950 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -390,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->backend = data;
- hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
- hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega20_set_default_registry_data(hwmgr);
@@ -980,6 +980,9 @@ static int vega20_od8_set_feature_capabilities(
pp_table->FanZeroRpmEnable)
od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
+ if (!od_settings->overdrive8_capabilities)
+ hwmgr->od_enabled = false;
+
return 0;
}
@@ -1689,13 +1692,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
"Failed to set soft min memclk !",
return ret);
-
- min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
- PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
- hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
- "Failed to set hard min memclk !",
- return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled &&
@@ -2248,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_max_level >= data->dpm_table.gfx_table.count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level,
+ data->dpm_table.gfx_table.count - 1);
+ return -EINVAL;
+ }
+
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
data->dpm_table.gfx_table.dpm_state.soft_max_level =
@@ -2268,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_max_level >= data->dpm_table.mem_table.count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level,
+ data->dpm_table.mem_table.count - 1);
+ return -EINVAL;
+ }
+
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
data->dpm_table.mem_table.dpm_state.soft_max_level =
@@ -3261,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
int pplib_workload = 0;
switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
@@ -3290,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint16_t workload_type = 0;
static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 0d298a0409f5..8cb831b6a016 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -705,7 +705,7 @@ enum PP_TABLE_VERSION {
/**
* The main hardware manager structure.
*/
-#define Workload_Policy_Max 5
+#define Workload_Policy_Max 6
struct pp_hwmgr {
void *adev;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index f3dd66ae990a..aa35007262cd 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -154,6 +154,10 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (IS_ERR(dev))
return PTR_ERR(dev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free_dev;
+
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index cf3f0caf9c63..ed7af7518b52 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -614,7 +614,6 @@ static int snd_dw_hdmi_suspend(struct device *dev)
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold);
- snd_pcm_suspend_all(dw->pcm);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e738cb52..e6403b9549f1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -98,6 +98,8 @@
#define DP0_STARTVAL 0x064c
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
+#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
+#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
#define DP0_MISC 0x0658
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
@@ -142,6 +144,8 @@
#define DP0_LTLOOPCTRL 0x06d8
#define DP0_SNKLTCTRL 0x06e4
+#define DP1_SRCCTRL 0x07a0
+
/* PHY */
#define DP_PHY_CTRL 0x0800
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
@@ -150,6 +154,7 @@
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
+#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
unsigned long rate;
u32 value;
int ret;
+ u32 dp_phy_ctrl;
rate = clk_get_rate(tc->refclk);
switch (rate) {
@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
/*
* Initially PLLs are in bypass. Force PLL parameter update,
@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
+ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
if (!tc->mode)
return -EINVAL;
- /* from excel file - DP0_SrcCtrl */
- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
- /* from excel file - DP1_SrcCtrl */
- tc_write(0x07a0, 0x00003083);
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
+ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
+ tc_write(DP1_SRCCTRL,
+ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
+ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
rate = clk_get_rate(tc->refclk);
switch (rate) {
@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
}
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
+
/* Setup Main Link */
- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
msleep(100);
@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct tc_data *tc = connector_to_tc(connector);
+ u32 req, avail;
+ u32 bits_per_pixel = 24;
+
/* DPI interface clock limitation: upto 154 MHz */
if (mode->clock > 154000)
return MODE_CLOCK_HIGH;
+ req = mode->clock * bits_per_pixel / 8;
+ avail = tc->link.base.num_lanes * tc->link.base.rate;
+
+ if (req > avail)
+ return MODE_BAD;
+
return MODE_OK;
}
@@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
/* Create eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
+ tc->panel ? DRM_MODE_CONNECTOR_eDP :
+ DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
@@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
+ tc->connector.display_info.bus_flags =
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 54e2ae614dcc..f4290f6b0c38 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1602,6 +1602,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
+ /*
+ * FIXME: Since prepare_fb and cleanup_fb are always called on
+ * the new_plane_state for async updates we need to block framebuffer
+ * changes. This prevents use of a fb that's been cleaned up and
+ * double cleanups from occuring.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c40889888a16..9a1f41adfc67 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1296,12 +1296,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
return -EINVAL;
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state->acquire_ctx = &ctx;
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 2d6c491a0542..516e82d0ed50 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1273,6 +1273,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* LG LP140WF6-SPM1 eDP panel */
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
+ /* Apple panels need some additional handling to support PSR */
+ { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
};
#undef OUI
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d3af098b0922..d73703a695e8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1621,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
var_1->transp.msb_right == var_2->transp.msb_right;
}
+static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
+ u8 depth)
+{
+ switch (depth) {
+ case 8:
+ var->red.offset = 0;
+ var->green.offset = 0;
+ var->blue.offset = 0;
+ var->red.length = 8; /* 8bit DAC */
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 15:
+ var->red.offset = 10;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 5;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ var->transp.length = 1;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ break;
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ break;
+ default:
+ break;
+ }
+}
+
/**
* drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
* @var: screeninfo to check
@@ -1632,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
- if (var->pixclock != 0 || in_dbg_master())
+ if (in_dbg_master())
return -EINVAL;
+ if (var->pixclock != 0) {
+ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+ var->pixclock = 0;
+ }
+
if ((drm_format_info_block_width(fb->format, 0) > 1) ||
(drm_format_info_block_height(fb->format, 0) > 1))
return -EINVAL;
@@ -1655,6 +1718,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
/*
+ * Workaround for SDL 1.2, which is known to be setting all pixel format
+ * fields values to zero in some cases. We treat this situation as a
+ * kind of "use some reasonable autodetected values".
+ */
+ if (!var->red.offset && !var->green.offset &&
+ !var->blue.offset && !var->transp.offset &&
+ !var->red.length && !var->green.length &&
+ !var->blue.length && !var->transp.length &&
+ !var->red.msb_right && !var->green.msb_right &&
+ !var->blue.msb_right && !var->transp.msb_right) {
+ drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
+ }
+
+ /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
@@ -1967,59 +2044,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
- switch (fb->format->depth) {
- case 8:
- info->var.red.offset = 0;
- info->var.green.offset = 0;
- info->var.blue.offset = 0;
- info->var.red.length = 8; /* 8bit DAC */
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 15:
- info->var.red.offset = 10;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 5;
- info->var.blue.length = 5;
- info->var.transp.offset = 15;
- info->var.transp.length = 1;
- break;
- case 16:
- info->var.red.offset = 11;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 6;
- info->var.blue.length = 5;
- info->var.transp.offset = 0;
- break;
- case 24:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 32:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 24;
- info->var.transp.length = 8;
- break;
- default:
- break;
- }
+ drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
info->var.xres = fb_width;
info->var.yres = fb_height;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 99cba8ea5d82..5df1256618cc 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count = cl->object_count;
- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+ array_size(object_count, sizeof(__u32)));
if (IS_ERR(object_ids))
return PTR_ERR(object_ids);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index cd9bc0ce9be0..004191d01772 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -459,11 +459,11 @@ static int set_property_atomic(struct drm_mode_object *obj,
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, 0);
-
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
+
+ drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx;
retry:
if (prop == state->dev->mode_config.dpms_property) {
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 24a750436559..f91e02c87fd8 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
if (mode->hsync)
return mode->hsync;
- if (mode->htotal < 0)
+ if (mode->htotal <= 0)
return 0;
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a9d9df6c85ad..693748ad8b88 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
- GFP_KERNEL | __GFP_COMP);
+ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+ &dmah->busaddr,
+ GFP_KERNEL | __GFP_COMP);
if (dmah->vaddr == NULL) {
kfree(dmah);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index b5475c91e2ef..e9f343b124b0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 5af11cf1b482..e1675a00df12 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -41,7 +41,7 @@ struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
- void (*detach_vgpu)(unsigned long handle);
+ void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
int (*enable_page_track)(unsigned long handle, u64 gfn);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c1072143da1d..dd3dfd00f4e6 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
{
unsigned int index;
u64 virtaddr;
- unsigned long req_size, pgoff = 0;
+ unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
pg_prot = vma->vm_page_prot;
virtaddr = vma->vm_start;
req_size = vma->vm_end - vma->vm_start;
- pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (!intel_vgpu_in_aperture(vgpu, req_start))
+ return -EINVAL;
+ if (req_start + req_size >
+ vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
+ return -EINVAL;
+
+ pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
}
@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
return 0;
}
-static void kvmgt_detach_vgpu(unsigned long handle)
+static void kvmgt_detach_vgpu(void *p_vgpu)
{
- /* nothing to do here */
+ int i;
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ if (!vgpu->vdev.region)
+ return;
+
+ for (i = 0; i < vgpu->vdev.num_regions; i++)
+ if (vgpu->vdev.region[i].ops->release)
+ vgpu->vdev.region[i].ops->release(vgpu,
+ &vgpu->vdev.region[i]);
+ vgpu->vdev.num_regions = 0;
+ kfree(vgpu->vdev.region);
+ vgpu->vdev.region = NULL;
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 67f19992b226..3ed34123d8d1 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
if (!intel_gvt_host.mpt->detach_vgpu)
return;
- intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
+ intel_gvt_host.mpt->detach_vgpu(vgpu);
}
#define MSI_CAP_CONTROL(offset) (offset + 2)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1ad8c5e1455d..55bb7885e228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+
+ wa_ctx->indirect_ctx.obj = NULL;
+ wa_ctx->indirect_ctx.shadow_va = NULL;
}
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -356,6 +359,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
return 0;
}
+static int
+intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+ struct i915_request *rq;
+ int ret = 0;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (workload->req)
+ goto out;
+
+ rq = i915_request_alloc(engine, shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
+ goto out;
+ }
+ workload->req = i915_request_get(rq);
+out:
+ return ret;
+}
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -372,12 +402,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct intel_context *ce;
- struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (workload->req)
+ if (workload->shadow)
return 0;
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
@@ -417,22 +446,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
goto err_shadow;
}
- rq = i915_request_alloc(engine, shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto err_shadow;
- }
- workload->req = i915_request_get(rq);
-
- ret = populate_shadow_context(workload);
- if (ret)
- goto err_req;
-
+ workload->shadow = true;
return 0;
-err_req:
- rq = fetch_and_zero(&workload->req);
- i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_unpin:
@@ -671,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = intel_gvt_workload_req_alloc(workload);
+ if (ret)
+ goto err_req;
+
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto out;
- ret = prepare_workload(workload);
+ ret = populate_shadow_context(workload);
+ if (ret) {
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ goto out;
+ }
+ ret = prepare_workload(workload);
out:
- if (ret)
- workload->status = ret;
-
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
-
+err_req:
+ if (ret)
+ workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;
@@ -891,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
- if (!workload->status) {
- release_shadow_batch_buffer(workload);
- release_shadow_wa_ctx(&workload->wa_ctx);
- }
-
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
@@ -1263,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu_submission *s = &workload->vgpu->submission;
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+
if (workload->shadow_mm)
intel_vgpu_mm_put(workload->shadow_mm);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ca5529d0e48e..2065cba59aab 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,6 +83,7 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
+ bool shadow; /* if workload has done shadow of guest request */
int status;
struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 38dcee1ca062..40a61ef9aac1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
intel_runtime_pm_get(i915);
gpu = i915_capture_gpu_state(i915);
intel_runtime_pm_put(i915);
- if (!gpu)
- return -ENOMEM;
+ if (IS_ERR(gpu))
+ return PTR_ERR(gpu);
file->private_data = gpu;
return 0;
@@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- file->private_data = i915_first_error_state(inode->i_private);
+ struct i915_gpu_state *error;
+
+ error = i915_first_error_state(inode->i_private);
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ file->private_data = error;
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 216f52b744a6..c882ea94172c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static inline bool
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
+ unsigned long addr, unsigned long size)
+{
+ if (vma->vm_file != filp)
+ return false;
+
+ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+}
+
/**
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
* it is mapped to.
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINTR;
}
vma = find_vma(mm, addr);
- if (vma)
+ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index add1fe7aeb93..bd17dd1f5da5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ int err;
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- return i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto unpin;
+
+ return 0;
+
+unpin:
+ ppgtt->pin_count = 0;
+ return err;
}
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 07465123c166..3f9ce403c755 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
+ /* Check if GPU capture has been disabled */
+ error = READ_ONCE(i915->gpu_error.first_error);
+ if (IS_ERR(error))
+ return error;
+
error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (!error)
- return NULL;
+ if (!error) {
+ i915_disable_error_state(i915, -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
kref_init(&error->ref);
error->i915 = i915;
@@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
return;
error = i915_capture_gpu_state(i915);
- if (!error) {
- DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
- i915_disable_error_state(i915, -ENOMEM);
+ if (IS_ERR(error))
return;
- }
i915_error_capture_msg(i915, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
- if (error)
+ if (!IS_ERR_OR_NULL(error))
i915_gpu_state_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
@@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
- i915->gpu_error.first_error = NULL;
+ if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
+ i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
- if (!IS_ERR(error))
+ if (!IS_ERR_OR_NULL(error))
i915_gpu_state_put(error);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d6c8f8fdfda5..cf7c66bb3ed9 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -5,6 +5,7 @@
*/
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
@@ -478,7 +479,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
* counter value.
*/
spin_lock_irqsave(&i915->pmu.lock, flags);
- spin_lock(&kdev->power.lock);
/*
* After the above branch intel_runtime_pm_get_if_in_use failed
@@ -491,16 +491,13 @@ static u64 get_rc6(struct drm_i915_private *i915)
* suspended and if not we cannot do better than report the last
* known RC6 value.
*/
- if (kdev->power.runtime_status == RPM_SUSPENDED) {
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_jiffies_last =
- kdev->power.suspended_jiffies;
+ if (pm_runtime_status_suspended(kdev)) {
+ val = pm_runtime_suspended_time(kdev);
- val = kdev->power.suspended_jiffies -
- i915->pmu.suspended_jiffies_last;
- val += jiffies - kdev->power.accounting_timestamp;
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_time_last = val;
- val = jiffies_to_nsecs(val);
+ val -= i915->pmu.suspended_time_last;
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
@@ -510,7 +507,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
}
- spin_unlock(&kdev->power.lock);
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
@@ -594,7 +590,8 @@ static void i915_pmu_enable(struct perf_event *event)
* Update the bitmask of enabled events and increment
* the event reference counter.
*/
- GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
i915->pmu.enable |= BIT_ULL(bit);
i915->pmu.enable_count[bit]++;
@@ -615,11 +612,16 @@ static void i915_pmu_enable(struct perf_event *event)
engine = intel_engine_lookup_user(i915,
engine_event_class(event),
engine_event_instance(event));
- GEM_BUG_ON(!engine);
- engine->pmu.enable |= BIT(sample);
- GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+ BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
+ I915_ENGINE_SAMPLE_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
+ I915_ENGINE_SAMPLE_COUNT);
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+
+ engine->pmu.enable |= BIT(sample);
engine->pmu.enable_count[sample]++;
}
@@ -649,9 +651,11 @@ static void i915_pmu_disable(struct perf_event *event)
engine = intel_engine_lookup_user(i915,
engine_event_class(event),
engine_event_instance(event));
- GEM_BUG_ON(!engine);
- GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+ GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+
/*
* Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away.
@@ -660,7 +664,7 @@ static void i915_pmu_disable(struct perf_event *event)
engine->pmu.enable &= ~BIT(sample);
}
- GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
/*
* Decrement the reference count and clear the enabled
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 7f164ca3db12..4fc4f2478301 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -31,6 +31,8 @@ enum {
((1 << I915_PMU_SAMPLE_BITS) + \
(I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
+
struct i915_pmu_sample {
u64 cur;
};
@@ -95,9 +97,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
- * @suspended_jiffies_last: Cached suspend time from PM core.
+ * @suspended_time_last: Cached suspend time from PM core.
*/
- unsigned long suspended_jiffies_last;
+ u64 suspended_time_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a7d60509ca7..067054cf4a86 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1790,7 +1790,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40
#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40
#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840
-#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \
+#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_B_GRP_OFFSET, \
_CNL_PORT_TX_B_GRP_OFFSET, \
@@ -1798,7 +1798,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_F_GRP_OFFSET) + \
4 * (dw))
-#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
+#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1834,9 +1834,9 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
-#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
-#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
-#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
+#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
+#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
+#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@@ -1864,8 +1864,12 @@ enum i915_power_well_id {
#define RTERM_SELECT(x) ((x) << 3)
#define RTERM_SELECT_MASK (0x7 << 3)
-#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
-#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
+#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
+#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
+#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
+#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
+#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
+#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 535caebd9813..c0cfe7ae2ba5 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
ssize_t ret;
gpu = i915_first_error_state(i915);
- if (gpu) {
+ if (IS_ERR(gpu)) {
+ ret = PTR_ERR(gpu);
+ } else if (gpu) {
ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
i915_gpu_state_put(gpu);
} else {
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 9726df37c4c4..540e20eb032c 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -123,12 +123,6 @@ static inline u64 ptr_to_u64(const void *ptr)
#include <linux/list.h>
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
-{
- return head->next == list;
-}
-
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ae55a6865d5c..b32681632f30 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -984,7 +984,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
+ ret = component_add_typed(dev_priv->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f3e1d6a0b7dd..7edce1b7b348 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
-struct icl_combo_phy_ddi_buf_trans {
- u32 dw2_swing_select;
- u32 dw2_swing_scalar;
- u32 dw4_scaling;
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
- /* Voltage mV db */
- { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
- { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
- { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
- { 0x2, 0x98, 0x900F }, /* 400 9.5 */
- { 0xB, 0x70, 0x0018 }, /* 600 0.0 */
- { 0xB, 0x70, 0x3015 }, /* 600 3.5 */
- { 0xB, 0x70, 0x6012 }, /* 600 6.0 */
- { 0x5, 0x00, 0x0018 }, /* 800 0.0 */
- { 0x5, 0x00, 0x3015 }, /* 800 3.5 */
- { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
-};
-
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
- /* Voltage mV db */
- { 0x0, 0x00, 0x00 }, /* 200 0.0 */
- { 0x0, 0x00, 0x00 }, /* 200 1.5 */
- { 0x0, 0x00, 0x00 }, /* 200 4.0 */
- { 0x0, 0x00, 0x00 }, /* 200 6.0 */
- { 0x0, 0x00, 0x00 }, /* 250 0.0 */
- { 0x0, 0x00, 0x00 }, /* 250 1.5 */
- { 0x0, 0x00, 0x00 }, /* 250 4.0 */
- { 0x0, 0x00, 0x00 }, /* 300 0.0 */
- { 0x0, 0x00, 0x00 }, /* 300 1.5 */
- { 0x0, 0x00, 0x00 }, /* 350 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
- /* Voltage mV db */
- { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
- { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
- { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
- { 0x2, 0x98, 0x900F }, /* 400 9.5 */
- { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
- { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
- { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
- { 0x5, 0x76, 0x0018 }, /* 800 0.0 */
- { 0x5, 0x76, 0x3015 }, /* 800 3.5 */
- { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
+/* icl_combo_phy_ddi_translations */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
- /* Voltage mV db */
- { 0x0, 0x00, 0x00 }, /* 200 0.0 */
- { 0x0, 0x00, 0x00 }, /* 200 1.5 */
- { 0x0, 0x00, 0x00 }, /* 200 4.0 */
- { 0x0, 0x00, 0x00 }, /* 200 6.0 */
- { 0x0, 0x00, 0x00 }, /* 250 0.0 */
- { 0x0, 0x00, 0x00 }, /* 250 1.5 */
- { 0x0, 0x00, 0x00 }, /* 250 4.0 */
- { 0x0, 0x00, 0x00 }, /* 300 0.0 */
- { 0x0, 0x00, 0x00 }, /* 300 1.5 */
- { 0x0, 0x00, 0x00 }, /* 350 0.0 */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
+ { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
+ { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
};
-/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = {
- /* Voltage mV db */
- { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
- { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
- { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
- { 0x2, 0x98, 0x900F }, /* 400 9.5 */
- { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
- { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
- { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
- { 0x5, 0x71, 0x0018 }, /* 800 0.0 */
- { 0x5, 0x71, 0x3015 }, /* 800 3.5 */
- { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = {
- /* Voltage mV db */
- { 0x0, 0x00, 0x00 }, /* 200 0.0 */
- { 0x0, 0x00, 0x00 }, /* 200 1.5 */
- { 0x0, 0x00, 0x00 }, /* 200 4.0 */
- { 0x0, 0x00, 0x00 }, /* 200 6.0 */
- { 0x0, 0x00, 0x00 }, /* 250 0.0 */
- { 0x0, 0x00, 0x00 }, /* 250 1.5 */
- { 0x0, 0x00, 0x00 }, /* 250 4.0 */
- { 0x0, 0x00, 0x00 }, /* 300 0.0 */
- { 0x0, 0x00, 0x00 }, /* 300 1.5 */
- { 0x0, 0x00, 0x00 }, /* 350 0.0 */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
+ { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
+ { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
-static const struct icl_combo_phy_ddi_buf_trans *
+static const struct cnl_ddi_buf_trans *
icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
- int type, int *n_entries)
+ int type, int rate, int *n_entries)
{
- u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK;
-
- if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
- switch (voltage) {
- case VOLTAGE_INFO_0_85V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V);
- return icl_combo_phy_ddi_translations_edp_0_85V;
- case VOLTAGE_INFO_0_95V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V);
- return icl_combo_phy_ddi_translations_edp_0_95V;
- case VOLTAGE_INFO_1_05V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
- return icl_combo_phy_ddi_translations_edp_1_05V;
- default:
- MISSING_CASE(voltage);
- return NULL;
- }
- } else {
- switch (voltage) {
- case VOLTAGE_INFO_0_85V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
- return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
- case VOLTAGE_INFO_0_95V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
- return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
- case VOLTAGE_INFO_1_05V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
- return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
- default:
- MISSING_CASE(voltage);
- return NULL;
- }
+ if (type == INTEL_OUTPUT_HDMI) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+ } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
}
+
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+ return icl_combo_phy_ddi_translations_dp_hbr2;
}
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
if (IS_ICELAKE(dev_priv)) {
if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port,
- INTEL_OUTPUT_HDMI, &n_entries);
+ icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+ 0, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
default_entry = n_entries - 1;
@@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
return DDI_CLK_SEL_TBT_810;
default:
MISSING_CASE(clock);
- break;
+ return DDI_CLK_SEL_NONE;
}
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
int n_entries;
if (IS_ICELAKE(dev_priv)) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
- &n_entries);
+ intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
+ u32 level, enum port port, int type,
+ int rate)
{
- const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL;
+ const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
- &n_entries);
+ rate, &n_entries);
if (!ddi_translations)
return;
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
level = n_entries - 1;
}
- /* Set PORT_TX_DW5 Rterm Sel to 110b. */
+ /* Set PORT_TX_DW5 */
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
- val &= ~RTERM_SELECT_MASK;
+ val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
+ TAP2_DISABLE | TAP3_DISABLE);
+ val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
-
- /* Program PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
- /* Set DisableTap2 and DisableTap3 if MIPI DSI
- * Clear DisableTap2 and DisableTap3 for all other Ports
- */
- if (type == INTEL_OUTPUT_DSI) {
- val |= TAP2_DISABLE;
- val |= TAP3_DISABLE;
- } else {
- val &= ~TAP2_DISABLE;
- val &= ~TAP3_DISABLE;
- }
+ val |= TAP3_DISABLE;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW2 */
val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
- val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select);
- val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select);
+ val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
+ val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
- val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar);
+ val |= RCOMP_SCALAR(0x98);
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
/* Program PORT_TX_DW4 */
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
- val |= ddi_translations[level].dw4_scaling;
+ val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
+ val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
+ val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
}
+
+ /* Program PORT_TX_DW7 */
+ val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
+ val &= ~N_SCALAR_MASK;
+ val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
+ I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(dev_priv, level, port, type);
+ icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
/* 6. Set training enable to trigger update */
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3da9c0f9e948..248128126422 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
}
}
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ /*
+ * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
+ * the hardware when a high res displays plugged in. DPLL P
+ * divider is zero, and the pipe timings are bonkers. We'll
+ * try to disable everything in that case.
+ *
+ * FIXME would be nice to be able to sanitize this state
+ * without several WARNs, but for now let's take the easy
+ * road.
+ */
+ return IS_GEN6(dev_priv) &&
+ crtc_state->base.active &&
+ crtc_state->shared_dpll &&
+ crtc_state->port_clock == 0;
+}
+
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc_state *crtc_state = crtc ?
+ to_intel_crtc_state(crtc->base.state) : NULL;
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
* pipe itself being active. */
- bool has_active_crtc = encoder->base.crtc &&
- to_intel_crtc(encoder->base.crtc)->active;
+ bool has_active_crtc = crtc_state &&
+ crtc_state->base.active;
+
+ if (crtc_state && has_bogus_dpll_config(crtc_state)) {
+ DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+ pipe_name(crtc->pipe));
+ has_active_crtc = false;
+ }
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
* the encoder manually again. */
- if (encoder->base.crtc) {
- struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+ if (crtc_state) {
+ struct drm_encoder *best_encoder;
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
+
+ /* avoid oopsing in case the hooks consult best_encoder */
+ best_encoder = connector->base.state->best_encoder;
+ connector->base.state->best_encoder = &encoder->base;
+
if (encoder->disable)
- encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+ encoder->disable(encoder, crtc_state,
+ connector->base.state);
if (encoder->post_disable)
- encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+ encoder->post_disable(encoder, crtc_state,
+ connector->base.state);
+
+ connector->base.state->best_encoder = best_encoder;
}
encoder->base.crtc = NULL;
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 4262452963b3..79203666fc62 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -26,6 +26,7 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
+#include <drm/i915_drm.h>
enum i915_gpio {
GPIOA,
@@ -150,21 +151,6 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
-
- I915_MAX_PORTS
-};
-
-#define port_name(p) ((p) + 'A')
-
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fdd2cbc56fa3..22a74608c6e4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
- if (port == PORT_B)
+ if (intel_port_is_combophy(dev_priv, port) &&
+ !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f94a04b4ad87..e9ddeaf05a14 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,16 @@ struct intel_fbdev {
unsigned long vma_flags;
async_cookie_t cookie;
int preferred_bpp;
+
+ /* Whether or not fbdev hpd processing is temporarily suspended */
+ bool hpd_suspended : 1;
+ /* Set when a hotplug was received while HPD processing was
+ * suspended
+ */
+ bool hpd_waiting : 1;
+
+ /* Protects hpd_suspended */
+ struct mutex hpd_lock;
};
struct intel_encoder {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb5bb5b32a60..4ee16b264dbe 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
+ conn_seq = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
- conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -371,7 +370,8 @@ retry:
if (conn_configured & BIT(i))
continue;
- if (conn_seq == 0 && !connector->has_tile)
+ /* First pass, only consider tiled connectors */
+ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -475,8 +475,10 @@ retry:
conn_configured |= BIT(i);
}
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ if (conn_configured != conn_seq) { /* repeat until no more are found */
+ conn_seq = conn_configured;
goto retry;
+ }
/*
* If the BIOS didn't enable everything it could, fall back to have the
@@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (ifbdev == NULL)
return -ENOMEM;
+ mutex_init(&ifbdev->hpd_lock);
drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
intel_fbdev_destroy(ifbdev);
}
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
+ * was received while HPD was suspended.
+ */
+static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+{
+ bool send_hpd = false;
+
+ mutex_lock(&ifbdev->hpd_lock);
+ ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
+ send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
+ ifbdev->hpd_waiting = false;
+ mutex_unlock(&ifbdev->hpd_lock);
+
+ if (send_hpd) {
+ DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
+ }
+}
+
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
*/
if (state != FBINFO_STATE_RUNNING)
flush_work(&dev_priv->fbdev_suspend_work);
+
console_lock();
} else {
/*
@@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
+
+ intel_fbdev_hpd_set_suspend(ifbdev, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ bool send_hpd;
if (!ifbdev)
return;
intel_fbdev_sync(ifbdev);
- if (ifbdev->vma || ifbdev->helper.deferred_setup)
+
+ mutex_lock(&ifbdev->hpd_lock);
+ send_hpd = !ifbdev->hpd_suspended;
+ ifbdev->hpd_waiting = true;
+ mutex_unlock(&ifbdev->hpd_lock);
+
+ if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4be167dcd209..eab9341a5152 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
*/
if (!(prio & I915_PRIORITY_NEWCLIENT)) {
prio |= I915_PRIORITY_NEWCLIENT;
+ active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
}
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
+ GEM_BUG_ON(last &&
+ need_preempt(engine, last, rq_prio(rq)));
+
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -2244,6 +2248,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
+ intel_engine_init_workarounds(engine);
+
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
@@ -2310,7 +2316,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
}
intel_engine_init_whitelist(engine);
- intel_engine_init_workarounds(engine);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b8f106d9ecf8..3ac20153705a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -55,7 +55,12 @@
struct opregion_header {
u8 signature[16];
u32 size;
- u32 opregion_ver;
+ struct {
+ u8 rsvd;
+ u8 revision;
+ u8 minor;
+ u8 major;
+ } __packed over;
u8 bios_ver[32];
u8 vbios_ver[16];
u8 driver_ver[16];
@@ -119,7 +124,8 @@ struct opregion_asle {
u64 fdss;
u32 fdsp;
u32 stat;
- u64 rvda; /* Physical address of raw vbt data */
+ u64 rvda; /* Physical (2.0) or relative from opregion (2.1+)
+ * address of raw VBT data. */
u32 rvds; /* Size of raw vbt data */
u8 rsvd[58];
} __packed;
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
opregion->header = base;
opregion->lid_state = base + ACPI_CLID;
+ DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
+ opregion->header->over.major,
+ opregion->header->over.minor,
+ opregion->header->over.revision);
+
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (dmi_check_system(intel_no_opregion_vbt))
goto out;
- if (opregion->header->opregion_ver >= 2 && opregion->asle &&
+ if (opregion->header->over.major >= 2 && opregion->asle &&
opregion->asle->rvda && opregion->asle->rvds) {
- opregion->rvda = memremap(opregion->asle->rvda,
- opregion->asle->rvds,
+ resource_size_t rvda = opregion->asle->rvda;
+
+ /*
+ * opregion 2.0: rvda is the physical VBT address.
+ *
+ * opregion 2.1+: rvda is unsigned, relative offset from
+ * opregion base, and should never point within opregion.
+ */
+ if (opregion->header->over.major > 2 ||
+ opregion->header->over.minor >= 1) {
+ WARN_ON(rvda < OPREGION_SIZE);
+
+ rvda += asls;
+ }
+
+ opregion->rvda = memremap(rvda, opregion->asle->rvds,
MEMREMAP_WB);
+
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
goto out;
} else {
DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
+ memunmap(opregion->rvda);
+ opregion->rvda = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 419e56342523..f71970df9936 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+ DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+ return;
+ }
+
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
return;
}
+
dev_priv->psr.sink_support = true;
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 72edaa7ff411..a1a7cc29fdd1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -415,16 +415,17 @@ struct intel_engine_cs {
/**
* @enable_count: Reference count for the enabled samplers.
*
- * Index number corresponds to the bit number from @enable.
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
*/
- unsigned int enable_count[I915_PMU_SAMPLE_BITS];
+ unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
/**
* @sample: Counter values for sampling events.
*
* Our internal timer stores the current counters in this field.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
*/
-#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
- struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
} pmu;
/*
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d2e003d8f3db..5170a0f5fe7b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane,
keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
- keymsk = key->channel_mask & 0x3ffffff;
+ keymsk = key->channel_mask & 0x7ffffff;
if (alpha < 0xff)
keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2c5bbe317353..e31e263cf86b 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
int bus_format;
ret = of_property_read_u32(child, "reg", &i);
- if (ret || i < 0 || i > 1)
- return -EINVAL;
+ if (ret || i < 0 || i > 1) {
+ ret = -EINVAL;
+ goto free_child;
+ }
if (!of_device_is_available(child))
continue;
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
- channel->child = child;
/*
* The output port is port@4 with an external 4-port mux or
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
imx_ldb->lvds_mux ? 4 : 2, 0,
&channel->panel, &channel->bridge);
if (ret && ret != -ENODEV)
- return ret;
+ goto free_child;
/* panel ddc only if there is no bridge */
if (!channel->bridge) {
ret = imx_ldb_panel_ddc(dev, channel, child);
if (ret)
- return ret;
+ goto free_child;
}
bus_format = of_get_bus_format(dev, child);
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (bus_format < 0) {
dev_err(dev, "could not determine data mapping: %d\n",
bus_format);
- return bus_format;
+ ret = bus_format;
+ goto free_child;
}
channel->bus_format = bus_format;
+ channel->child = child;
ret = imx_ldb_register(drm, channel);
- if (ret)
- return ret;
+ if (ret) {
+ channel->child = NULL;
+ goto free_child;
+ }
}
dev_set_drvdata(dev, imx_ldb);
return 0;
+
+free_child:
+ of_node_put(child);
+ return ret;
}
static void imx_ldb_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index c390924de93d..21e964f6ab5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (ret)
return ret;
- /* CRTC should be enabled */
+ /* nothing to check when disabling or disabled */
if (!crtc_state->enable)
- return -EINVAL;
+ return 0;
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 75d97f1b2e8f..4f5c67f70c4d 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -46,7 +46,6 @@ struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
- bool enabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
-static void meson_crtc_enable(struct drm_crtc *crtc)
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
- meson_crtc->enabled = true;
-}
-
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
-{
- struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
- struct meson_drm *priv = meson_crtc->priv;
-
- DRM_DEBUG_DRIVER("\n");
-
- if (!meson_crtc->enabled)
- meson_crtc_enable(crtc);
-
priv->viu.osd1_enabled = true;
}
@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
-
- meson_crtc->enabled = false;
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
- if (crtc->state->enable && !meson_crtc->enabled)
- meson_crtc_enable(crtc);
-
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3ee4d4a4ecba..12ff47b13668 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
};
+static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
static irqreturn_t meson_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
+ drm->mode_config.helper_private = &meson_mode_config_helpers;
/* Hardware Initialization */
@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev,
remote_node = of_graph_get_remote_port_parent(ep);
if (!remote_node ||
remote_node == parent || /* Ignore parent endpoint */
- !of_device_is_available(remote_node))
+ !of_device_is_available(remote_node)) {
+ of_node_put(remote_node);
continue;
+ }
count += meson_probe_remote(pdev, match, remote, remote_node);
@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev)
for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep);
- if (!remote || !of_device_is_available(remote))
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
continue;
+ }
count += meson_probe_remote(pdev, &match, np, remote);
+ of_node_put(remote);
}
if (count && !match)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index cf549f1ed403..78c9e5a5e793 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,6 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 5beb83d1cf87..d1662a75c7ec 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
@@ -84,6 +85,9 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
@@ -106,6 +110,12 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
gmu->freq = gmu->gpu_freqs[index];
+
+ /*
+ * Eventually we will want to scale the path vote with the frequency but
+ * for now leave it at max so that the performance is nominal.
+ */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
}
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
@@ -705,6 +715,8 @@ out:
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int status, ret;
@@ -720,6 +732,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
if (ret)
goto out;
+ /* Set the bus quota to a reasonable value for boot */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
+
a6xx_gmu_irq_enable(gmu);
/* Check to see if we are doing a cold or warm boot */
@@ -760,6 +775,8 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 val;
@@ -806,6 +823,9 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+ /* Remove the bus vote */
+ icc_set_bw(gpu->icc_path, 0, 0);
+
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->dev);
@@ -944,7 +964,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
np = dev_pm_opp_get_of_node(opp);
if (np) {
- of_property_read_u32(np, "qcom,level", &val);
+ of_property_read_u32(np, "opp-level", &val);
of_node_put(np);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2e4372ef17a3..27898475cdf4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -18,6 +18,7 @@
*/
#include <linux/ascii85.h>
+#include <linux/interconnect.h>
#include <linux/kernel.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
@@ -747,6 +748,11 @@ static int adreno_get_pwrlevels(struct device *dev,
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+ /* Check for an interconnect path for the bus */
+ gpu->icc_path = of_icc_get(dev, NULL);
+ if (IS_ERR(gpu->icc_path))
+ gpu->icc_path = NULL;
+
return 0;
}
@@ -765,7 +771,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->rev = config->rev;
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
- adreno_gpu_config.irqname = "kgsl_3d0_irq";
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
@@ -788,10 +793,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
+ struct msm_gpu *gpu = &adreno_gpu->base;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
+ icc_put(gpu->icc_path);
+
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index fd75870eb17f..6aefcd6db46b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
&pdpu->pipe_qos_cfg);
}
-static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
-{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
-
- if (!pdpu->is_rt_pipe)
- return;
-
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-}
-
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
@@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane)
}
#ifdef CONFIG_DEBUG_FS
+static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ if (!pdpu->is_rt_pipe)
+ return;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
static ssize_t _dpu_plane_danger_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9cd6a96c6bf2..927e5d86f7c1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+ struct msm_gem_vma *vma, int prot,
+ struct sg_table *sgt, int npages);
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
@@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+__printf(2, 3)
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
@@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
+__printf(3, 4)
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...);
int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+__printf(3, 4)
static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...) {}
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 51a95da694d8..c8886d3071fa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
struct page **pages;
+ int prot = IOMMU_READ;
+
+ if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
+ prot |= IOMMU_WRITE;
WARN_ON(!mutex_is_locked(&msm_obj->lock));
@@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (IS_ERR(pages))
return PTR_ERR(pages);
- return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
- obj->size >> PAGE_SHIFT);
+ return msm_gem_map_vma(aspace, vma, prot,
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
}
/* get iova and pin it. Should have a matching put */
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 557360788084..49c04829cf34 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
int
msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+ struct msm_gem_vma *vma, int prot,
+ struct sg_table *sgt, int npages)
{
unsigned size = npages << PAGE_SHIFT;
int ret = 0;
@@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
if (aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- size, IOMMU_READ | IOMMU_WRITE);
+ size, prot);
if (ret)
vma->mapped = false;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5f3eff304355..10babd18e286 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
/* Get Interrupt: */
- gpu->irq = platform_get_irq_byname(pdev, config->irqname);
+ gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
ret = gpu->irq;
DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index efb49bb64191..6241986bab51 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,6 +19,7 @@
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
@@ -31,7 +32,6 @@ struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
- const char *irqname;
uint64_t va_start;
uint64_t va_end;
unsigned int nr_rings;
@@ -63,7 +63,7 @@ struct msm_gpu_funcs {
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);
@@ -119,6 +119,8 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
+ struct icc_path *icc_path;
+
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 90e9d0a48dc0..d21172933d92 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
char *fptr = &fifo->buf[fifo->head];
int n;
- wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+ if (!rd->open)
+ return;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@ out:
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
+
rd->open = false;
+ wake_up_all(&rd->fifo_event);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 5f5be6368aed..c7a94c94dbf3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -253,6 +253,9 @@ nouveau_backlight_init(struct drm_connector *connector)
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
+ case NV_DEVICE_INFO_V0_VOLTA:
+ case NV_DEVICE_INFO_V0_TURING:
ret = nv50_backlight_init(nv_encoder, &props, &ops);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index bfbc9341e0c2..d9edb5785813 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2435,6 +2435,38 @@ nv140_chipset = {
};
static const struct nvkm_device_chip
+nv162_chipset = {
+ .name = "TU102",
+ .bar = tu104_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu104_devinit_new,
+ .fault = tu104_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu104_mc_new,
+ .mmu = tu104_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu104_ce_new,
+ .ce[1] = tu104_ce_new,
+ .ce[2] = tu104_ce_new,
+ .ce[3] = tu104_ce_new,
+ .ce[4] = tu104_ce_new,
+ .disp = tu104_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu104_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
.bar = tu104_bar_new,
@@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
case 0x140: device->chip = &nv140_chipset; break;
+ case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 816ccaedfc73..8675613e142b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -22,6 +22,7 @@
#include <engine/falcon.h>
#include <core/gpuobj.h>
+#include <subdev/mc.h>
#include <subdev/timer.h>
#include <engine/fifo.h>
@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
}
}
- nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
- nvkm_wr32(device, base + 0x014, 0xffffffff);
+ if (nvkm_mc_enabled(device, engine->subdev.index)) {
+ nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+ nvkm_wr32(device, base + 0x014, 0xffffffff);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 3695cde669f8..07914e36939e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
duty = nvkm_therm_update_linear(therm);
break;
case NVBIOS_THERM_FAN_OTHER:
- if (therm->cstate)
+ if (therm->cstate) {
duty = therm->cstate;
- else
+ poll = false;
+ } else {
duty = nvkm_therm_update_linear_fallback(therm);
- poll = false;
+ }
break;
}
immd = false;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 00a9c2ab9e6c..64fb788b6647 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
{
- struct dsi_data *dsi = p;
+ struct dsi_data *dsi = s->private;
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
- struct dsi_data *dsi = p;
+ struct dsi_data *dsi = s->private;
unsigned long flags;
struct dsi_irq_stats stats;
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
{
- struct dsi_data *dsi = p;
+ struct dsi_data *dsi = s->private;
if (dsi_runtime_get(dsi))
return 0;
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+ /*
+ * HACK: These flags should be handled through the omap_dss_device bus
+ * flags, but this will only be possible when the DSI encoder will be
+ * converted to the omapdrm-managed encoder model.
+ */
+ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
dss_mgr_set_timings(&dsi->output, &dsi->vm);
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
- dsi_dump_dsi_regs, &dsi);
+ dsi_dump_dsi_regs, dsi);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
- dsi_dump_dsi_irqs, &dsi);
+ dsi_dump_dsi_irqs, dsi);
#endif
snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
- dsi_dump_dsi_clocks, &dsi);
+ dsi_dump_dsi_clocks, dsi);
return 0;
}
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
dss_debugfs_remove_file(dsi->debugfs.irqs);
dss_debugfs_remove_file(dsi->debugfs.regs);
- of_platform_depopulate(dev);
-
WARN_ON(dsi->scp_clk_refcount > 0);
dss_pll_unregister(&dsi->pll);
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
dsi_uninit_output(dsi);
+ of_platform_depopulate(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 13c8a662f9b4..ccb090f3ab30 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
- .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index a55dece118b2..df65d3c1a7b8 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
-struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
-}
-
-struct drm_gem_object *qxl_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
-}
-
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779a80b4..a97294ac96d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
- enum pci_bus_speed speed_cap;
+ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
- speed_cap = pcie_get_speed_cap(root);
+ if (!pci_is_root_bus(rdev->pdev->bus))
+ speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e081f529..6a8fb6fd183c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3af015..0a785ef0ab66 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
- enum pci_bus_speed speed_cap;
+ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- speed_cap = pcie_get_speed_cap(root);
+ if (!pci_is_root_bus(rdev->pdev->bus))
+ speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
} else {
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 96ac1458a59c..c0351abf83a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -1,17 +1,8 @@
-//SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:
* Sandy Huang <hjc@rock-chips.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <drm/drmP.h>
@@ -113,8 +104,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
child_count++;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
&panel, &bridge);
- if (!ret)
+ if (!ret) {
+ of_node_put(endpoint);
break;
+ }
}
of_node_put(port);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 38b52e63b2b0..27b9635124bc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -1,17 +1,8 @@
-//SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:
* Sandy Huang <hjc@rock-chips.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifdef CONFIG_ROCKCHIP_RGB
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 4463d3826ecb..e2942c9a11a7 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
sched->ops->dependency(sched_job, entity))) {
+ trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
- if (drm_sched_entity_add_dependency_cb(entity)) {
-
- trace_drm_sched_job_wait_dep(sched_job,
- entity->dependency);
+ if (drm_sched_entity_add_dependency_cb(entity))
return NULL;
- }
}
/* skip jobs from entity that marked guilty */
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 9e9255ee59cd..a021bab11a4f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
continue;
+ of_node_put(remote);
/* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) {
- of_node_put(remote);
of_node_put(port);
+ of_node_put(ep);
return frontend;
}
}
}
-
+ of_node_put(port);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 061d2e0d9011..416da5376701 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
+
+ clk_disable_unprepare(hdmi->tmds_clk);
}
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
+ clk_prepare_enable(hdmi->tmds_clk);
+
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 0420f5c978b9..cf45d0f940f9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return PTR_ERR(tcon->sclk0);
}
}
+ clk_prepare_enable(tcon->sclk0);
if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
{
+ clk_disable_unprepare(tcon->sclk0);
clk_disable_unprepare(tcon->clk);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index f7f32a885af7..2d1aaca49105 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -127,14 +127,10 @@ static struct drm_driver driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = virtgpu_gem_prime_pin,
.gem_prime_unpin = virtgpu_gem_prime_unpin,
- .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1deb41d42ea4..0c15000f926e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 86ce0ae93f59..c59ec34c80a5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
index 9d9e8146db90..d7b409a3c0f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -1,4 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
+
#include "vkms_drv.h"
#include <linux/crc32.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 177bbcb38306..eb56ee893761 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 83087877565c..7dcbecb5fac2 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -1,9 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
/**
* DOC: vkms (Virtual Kernel Modesetting)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index e4469cd3d254..81f1cfbeb936 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
#ifndef _VKMS_DRV_H_
#define _VKMS_DRV_H_
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 80311daed47a..138b0bb325cf 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/shmem_fs.h>
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 271a0eb9042c..4173e4f48334 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include "vkms_drv.h"
#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 418817600ad1..0e67d2d42f0c 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include "vkms_drv.h"
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25afb1d594e3..7ef5dcb06104 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -26,6 +26,7 @@
**************************************************************************/
#include <linux/module.h>
#include <linux/console.h>
+#include <linux/dma-mapping.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
@@ -34,7 +35,6 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <linux/intel-iommu.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
}
/**
+ * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
+ * taking place.
+ * @dev: Pointer to the struct drm_device.
+ *
+ * Return: true if iommu present, false otherwise.
+ */
+static bool vmw_assume_iommu(struct drm_device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev->dev);
+
+ return !dma_is_direct(ops) && ops &&
+ ops->map_page != dma_direct_map_page;
+}
+
+/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
*
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
-#ifdef CONFIG_X86
- const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_enabled) {
+ if (vmw_force_coherent)
+ dev_priv->map_mode = vmw_dma_alloc_coherent;
+ else if (vmw_assume_iommu(dev_priv->dev))
dev_priv->map_mode = vmw_dma_map_populate;
- goto out_fixup;
- }
-#endif
-
- if (!(vmw_force_iommu || vmw_force_coherent)) {
+ else if (!vmw_force_iommu)
dev_priv->map_mode = vmw_dma_phys;
- DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
- return 0;
- }
-
- dev_priv->map_mode = vmw_dma_map_populate;
-
- if (dma_ops && dma_ops->sync_single_for_cpu)
+ else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
dev_priv->map_mode = vmw_dma_alloc_coherent;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl() == 0)
+ else
dev_priv->map_mode = vmw_dma_map_populate;
-#endif
-#ifdef CONFIG_INTEL_IOMMU
-out_fixup:
-#endif
- if (dev_priv->map_mode == vmw_dma_map_populate &&
- vmw_restrict_iommu)
+ if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
- if (vmw_force_coherent)
- dev_priv->map_mode = vmw_dma_alloc_coherent;
-
-#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
- /*
- * No coherent page pool
- */
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ /* No TTM coherent page pool? FIXME: Ask TTM instead! */
+ if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+ (dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
-#endif
-
-#else /* CONFIG_X86 */
- dev_priv->map_mode = vmw_dma_map_populate;
-#endif /* CONFIG_X86 */
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
-
return 0;
}
@@ -625,24 +612,20 @@ out_fixup:
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
* restriction also for 64-bit systems.
*/
-#ifdef CONFIG_INTEL_IOMMU
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ int ret = 0;
- if (intel_iommu_enabled &&
+ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+ if (dev_priv->map_mode != vmw_dma_phys &&
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
- return 0;
-}
-#else
-static int vmw_dma_masks(struct vmw_private *dev_priv)
-{
- return 0;
+
+ return ret;
}
-#endif
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f2d13a72c05d..88b8178d4687 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
*p_fence = NULL;
}
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b351fb5214d3..ed2f67822f45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!du->pref_active) {
+ if (!du->pref_active && new_crtc_state->enable) {
ret = -EINVAL;
goto clean;
}
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
user_fence_rep)
{
struct vmw_fence_obj *fence = NULL;
- uint32_t handle;
- int ret;
+ uint32_t handle = 0;
+ int ret = 0;
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
out_fence)
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 474b00e19697..0a7d4395d427 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = {
.cpmem_ofs = 0x1f000000,
.srm_ofs = 0x1f040000,
.tpm_ofs = 0x1f060000,
- .csi0_ofs = 0x1f030000,
- .csi1_ofs = 0x1f038000,
+ .csi0_ofs = 0x1e030000,
+ .csi1_ofs = 0x1e038000,
.ic_ofs = 0x1e020000,
.disp0_ofs = 0x1e040000,
.disp1_ofs = 0x1e048000,
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = {
.cpmem_ofs = 0x07000000,
.srm_ofs = 0x07040000,
.tpm_ofs = 0x07060000,
- .csi0_ofs = 0x07030000,
- .csi1_ofs = 0x07038000,
+ .csi0_ofs = 0x06030000,
+ .csi1_ofs = 0x06038000,
.ic_ofs = 0x06020000,
.disp0_ofs = 0x06040000,
.disp1_ofs = 0x06048000,
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 2f8db9d62551..4a28f3fbb0a2 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -106,6 +106,7 @@ struct ipu_pre {
void *buffer_virt;
bool in_use;
unsigned int safe_window_end;
+ unsigned int last_bufaddr;
};
static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+ pre->last_bufaddr = bufaddr;
val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
unsigned short current_yblock;
u32 val;
+ if (bufaddr == pre->last_bufaddr)
+ return;
+
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+ pre->last_bufaddr = bufaddr;
do {
if (time_after(jiffies, timeout)) {
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index b677e5d524e6..d5f1d8e1c6f8 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
+ depends on PCI
select VGA_ARB
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f41d5fe51abe..9993b692598f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
{
struct hid_collection *collection;
unsigned usage;
+ int collection_index;
usage = parser->local.usage[0];
@@ -167,13 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type)
parser->collection_stack[parser->collection_stack_ptr++] =
parser->device->maxcollection;
- collection = parser->device->collection +
- parser->device->maxcollection++;
+ collection_index = parser->device->maxcollection++;
+ collection = parser->device->collection + collection_index;
collection->type = type;
collection->usage = usage;
collection->level = parser->collection_stack_ptr - 1;
- collection->parent = parser->active_collection;
- parser->active_collection = collection;
+ collection->parent_idx = (collection->level == 0) ? -1 :
+ parser->collection_stack[collection->level - 1];
if (type == HID_COLLECTION_APPLICATION)
parser->device->maxapplication++;
@@ -192,8 +193,6 @@ static int close_collection(struct hid_parser *parser)
return -EINVAL;
}
parser->collection_stack_ptr--;
- if (parser->active_collection)
- parser->active_collection = parser->active_collection->parent;
return 0;
}
@@ -1006,10 +1005,12 @@ static void hid_apply_multiplier_to_field(struct hid_device *hid,
usage = &field->usage[i];
collection = &hid->collection[usage->collection_index];
- while (collection && collection != multiplier_collection)
- collection = collection->parent;
+ while (collection->parent_idx != -1 &&
+ collection != multiplier_collection)
+ collection = &hid->collection[collection->parent_idx];
- if (collection || multiplier_collection == NULL)
+ if (collection->parent_idx != -1 ||
+ multiplier_collection == NULL)
usage->resolution_multiplier = effective_multiplier;
}
@@ -1044,9 +1045,9 @@ static void hid_apply_multiplier(struct hid_device *hid,
* applicable fields later.
*/
multiplier_collection = &hid->collection[multiplier->usage->collection_index];
- while (multiplier_collection &&
+ while (multiplier_collection->parent_idx != -1 &&
multiplier_collection->type != HID_COLLECTION_LOGICAL)
- multiplier_collection = multiplier_collection->parent;
+ multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
effective_multiplier = hid_calculate_multiplier(hid, multiplier);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index c530476edba6..ac9fda1b5a72 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/kfifo.h>
#include <linux/sched/signal.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
/* enqueue string to 'events' ring buffer */
void hid_debug_event(struct hid_device *hdev, char *buf)
{
- unsigned i;
struct hid_debug_list *list;
unsigned long flags;
spin_lock_irqsave(&hdev->debug_list_lock, flags);
- list_for_each_entry(list, &hdev->debug_list, node) {
- for (i = 0; buf[i]; i++)
- list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
- buf[i];
- list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
- }
+ list_for_each_entry(list, &hdev->debug_list, node)
+ kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
hid_debug_event(hdev, buf);
kfree(buf);
- wake_up_interruptible(&hdev->debug_wait);
-
+ wake_up_interruptible(&hdev->debug_wait);
}
EXPORT_SYMBOL_GPL(hid_dump_input);
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out;
}
- if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
- err = -ENOMEM;
+ err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
+ if (err) {
kfree(list);
goto out;
}
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct hid_debug_list *list = file->private_data;
- int ret = 0, len;
+ int ret = 0, copied;
DECLARE_WAITQUEUE(wait, current);
mutex_lock(&list->read_mutex);
- while (ret == 0) {
- if (list->head == list->tail) {
- add_wait_queue(&list->hdev->debug_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (list->head == list->tail) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
+ if (kfifo_is_empty(&list->hid_debug_fifo)) {
+ add_wait_queue(&list->hdev->debug_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (kfifo_is_empty(&list->hid_debug_fifo)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
- if (!list->hdev || !list->hdev->debug) {
- ret = -EIO;
- set_current_state(TASK_RUNNING);
- goto out;
- }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
- /* allow O_NONBLOCK from other threads */
- mutex_unlock(&list->read_mutex);
- schedule();
- mutex_lock(&list->read_mutex);
- set_current_state(TASK_INTERRUPTIBLE);
+ /* if list->hdev is NULL we cannot remove_wait_queue().
+ * if list->hdev->debug is 0 then hid_debug_unregister()
+ * was already called and list->hdev is being destroyed.
+ * if we add remove_wait_queue() here we can hit a race.
+ */
+ if (!list->hdev || !list->hdev->debug) {
+ ret = -EIO;
+ set_current_state(TASK_RUNNING);
+ goto out;
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&list->hdev->debug_wait, &wait);
+ /* allow O_NONBLOCK from other threads */
+ mutex_unlock(&list->read_mutex);
+ schedule();
+ mutex_lock(&list->read_mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
}
- if (ret)
- goto out;
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&list->hdev->debug_wait, &wait);
- /* pass the ringbuffer contents to userspace */
-copy_rest:
- if (list->tail == list->head)
+ if (ret)
goto out;
- if (list->tail > list->head) {
- len = list->tail - list->head;
- if (len > count)
- len = count;
-
- if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
- ret = -EFAULT;
- goto out;
- }
- ret += len;
- list->head += len;
- } else {
- len = HID_DEBUG_BUFSIZE - list->head;
- if (len > count)
- len = count;
-
- if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
- ret = -EFAULT;
- goto out;
- }
- list->head = 0;
- ret += len;
- count -= len;
- if (count > 0)
- goto copy_rest;
- }
-
}
+
+ /* pass the fifo content to userspace, locking is not needed with only
+ * one concurrent reader and one concurrent writer
+ */
+ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
+ if (ret)
+ goto out;
+ ret = copied;
out:
mutex_unlock(&list->read_mutex);
return ret;
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
struct hid_debug_list *list = file->private_data;
poll_wait(file, &list->hdev->debug_wait, wait);
- if (list->head != list->tail)
+ if (!kfifo_is_empty(&list->hid_debug_fifo))
return EPOLLIN | EPOLLRDNORM;
if (!list->hdev->debug)
return EPOLLERR | EPOLLHUP;
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
- kfree(list->hid_debug_buf);
+ kfifo_free(&list->hid_debug_fifo);
kfree(list);
return 0;
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void)
{
debugfs_remove_recursive(hid_debug_root);
}
-
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 518fa76414f5..24f846d67478 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -461,6 +461,9 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+#define I2C_VENDOR_ID_GOODIX 0x27c6
+#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
+
#define USB_VENDOR_ID_GOODTOUCH 0x1aad
#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 8555ce7e737b..c5edfa966343 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -179,6 +179,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
{ USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
I2C_HID_QUIRK_NO_RUNTIME_PM },
+ { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
+ I2C_HID_QUIRK_NO_RUNTIME_PM },
{ 0, 0 }
};
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ce0ba2062723..23381c41d087 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -282,8 +282,8 @@ int vmbus_open(struct vmbus_channel *newchannel,
EXPORT_SYMBOL_GPL(vmbus_open);
/* Used for Hyper-V Socket: a guest client's connect() to the host */
-int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
- const uuid_le *shv_host_servie_id)
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
int ret;
@@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
int vmbus_disconnect_ring(struct vmbus_channel *channel)
{
struct vmbus_channel *cur_channel, *tmp;
- unsigned long flags;
- LIST_HEAD(list);
int ret;
if (channel->primary_channel != NULL)
return -EINVAL;
- /* Snapshot the list of subchannels */
- spin_lock_irqsave(&channel->lock, flags);
- list_splice_init(&channel->sc_list, &list);
- spin_unlock_irqrestore(&channel->lock, flags);
-
- list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
+ list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
if (cur_channel->rescind)
wait_for_completion(&cur_channel->rescind_event);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index d01689079e9b..62703b354d6d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -141,7 +141,7 @@ static const struct vmbus_device vmbus_devs[] = {
};
static const struct {
- uuid_le guid;
+ guid_t guid;
} vmbus_unsupported_devs[] = {
{ HV_AVMA1_GUID },
{ HV_AVMA2_GUID },
@@ -171,26 +171,26 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
-static bool is_unsupported_vmbus_devs(const uuid_le *guid)
+static bool is_unsupported_vmbus_devs(const guid_t *guid)
{
int i;
for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
- if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
+ if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
return true;
return false;
}
static u16 hv_get_dev_type(const struct vmbus_channel *channel)
{
- const uuid_le *guid = &channel->offermsg.offer.if_type;
+ const guid_t *guid = &channel->offermsg.offer.if_type;
u16 i;
if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
return HV_UNKNOWN;
for (i = HV_IDE; i < HV_UNKNOWN; i++) {
- if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
+ if (guid_equal(guid, &vmbus_devs[i].guid))
return i;
}
pr_info("Unknown GUID: %pUl\n", guid);
@@ -561,10 +561,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
atomic_dec(&vmbus_connection.offer_in_progress);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
- if (!uuid_le_cmp(channel->offermsg.offer.if_type,
- newchannel->offermsg.offer.if_type) &&
- !uuid_le_cmp(channel->offermsg.offer.if_instance,
- newchannel->offermsg.offer.if_instance)) {
+ if (guid_equal(&channel->offermsg.offer.if_type,
+ &newchannel->offermsg.offer.if_type) &&
+ guid_equal(&channel->offermsg.offer.if_instance,
+ &newchannel->offermsg.offer.if_instance)) {
fnew = false;
break;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 5301fef16c31..dd475f3bcc8a 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -681,8 +681,13 @@ static struct notifier_block hv_memory_nb = {
/* Check if the particular page is backed and can be onlined and online it. */
static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
{
- if (!has_pfn_is_backed(has, page_to_pfn(pg)))
+ if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
+ if (!PageOffline(pg))
+ __SetPageOffline(pg);
return;
+ }
+ if (PageOffline(pg))
+ __ClearPageOffline(pg);
/* This frame is currently backed; online the page. */
__online_page_set_limits(pg);
@@ -771,7 +776,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}
}
-static void hv_online_page(struct page *pg)
+static void hv_online_page(struct page *pg, unsigned int order)
{
struct hv_hotadd_state *has;
unsigned long flags;
@@ -780,10 +785,11 @@ static void hv_online_page(struct page *pg)
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
- if ((pfn < has->start_pfn) || (pfn >= has->end_pfn))
+ if ((pfn < has->start_pfn) ||
+ (pfn + (1UL << order) > has->end_pfn))
continue;
- hv_page_online_one(has, pg);
+ hv_bring_pgs_online(has, pfn, 1UL << order);
break;
}
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
@@ -888,12 +894,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
pfn_cnt -= pgs_ol;
/*
* Check if the corresponding memory block is already
- * online by checking its last previously backed page.
- * In case it is we need to bring rest (which was not
- * backed previously) online too.
+ * online. It is possible to observe struct pages still
+ * being uninitialized here so check section instead.
+ * In case the section is online we need to bring the
+ * rest of pfns (which were not backed previously)
+ * online too.
*/
if (start_pfn > has->start_pfn &&
- !PageReserved(pfn_to_page(start_pfn - 1)))
+ online_section_nr(pfn_to_section_nr(start_pfn)))
hv_bring_pgs_online(has, start_pfn, pgs_ol);
}
@@ -1199,6 +1207,7 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
for (i = 0; i < num_pages; i++) {
pg = pfn_to_page(i + start_frame);
+ __ClearPageOffline(pg);
__free_page(pg);
dm->num_pages_ballooned--;
}
@@ -1211,7 +1220,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
struct dm_balloon_response *bl_resp,
int alloc_unit)
{
- unsigned int i = 0;
+ unsigned int i, j;
struct page *pg;
if (num_pages < alloc_unit)
@@ -1243,6 +1252,10 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
if (alloc_unit != 1)
split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
+ /* mark all pages offline */
+ for (j = 0; j < (1 << get_order(alloc_unit << PAGE_SHIFT)); j++)
+ __SetPageOffline(pg + j);
+
bl_resp->range_count++;
bl_resp->range_array[i].finfo.start_page =
page_to_pfn(pg);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a1f6ce6e5974..cb86b133eb4d 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -312,8 +312,8 @@ extern const struct vmbus_channel_message_table_entry
/* General vmbus interface */
-struct hv_device *vmbus_device_create(const uuid_le *type,
- const uuid_le *instance,
+struct hv_device *vmbus_device_create(const guid_t *type,
+ const guid_t *instance,
struct vmbus_channel *channel);
int vmbus_device_register(struct hv_device *child_device_obj);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 64d0c85d5161..9e8b31ccc142 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -74,8 +74,10 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
* This is the only case we need to signal when the
* ring transitions from being empty to non-empty.
*/
- if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
+ if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
+ ++channel->intr_out_empty;
vmbus_setevent(channel);
+ }
}
/* Get the next write location for the specified ring buffer. */
@@ -164,26 +166,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
}
/* Get various debug metrics for the specified ring buffer. */
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
- struct hv_ring_buffer_debug_info *debug_info)
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
- if (ring_info->ring_buffer) {
- hv_get_ringbuffer_availbytes(ring_info,
- &bytes_avail_toread,
- &bytes_avail_towrite);
-
- debug_info->bytes_avail_toread = bytes_avail_toread;
- debug_info->bytes_avail_towrite = bytes_avail_towrite;
- debug_info->current_read_index =
- ring_info->ring_buffer->read_index;
- debug_info->current_write_index =
- ring_info->ring_buffer->write_index;
- debug_info->current_interrupt_mask =
- ring_info->ring_buffer->interrupt_mask;
- }
+ if (!ring_info->ring_buffer)
+ return -EINVAL;
+
+ hv_get_ringbuffer_availbytes(ring_info,
+ &bytes_avail_toread,
+ &bytes_avail_towrite);
+ debug_info->bytes_avail_toread = bytes_avail_toread;
+ debug_info->bytes_avail_towrite = bytes_avail_towrite;
+ debug_info->current_read_index = ring_info->ring_buffer->read_index;
+ debug_info->current_write_index = ring_info->ring_buffer->write_index;
+ debug_info->current_interrupt_mask
+ = ring_info->ring_buffer->interrupt_mask;
+ return 0;
}
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
@@ -273,10 +274,19 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
+ ++channel->out_full_total;
+
+ if (!channel->out_full_flag) {
+ ++channel->out_full_first;
+ channel->out_full_flag = true;
+ }
+
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
+ channel->out_full_flag = false;
+
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
@@ -531,6 +541,7 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
if (curr_write_sz <= pending_sz)
return;
+ ++channel->intr_in_full;
vmbus_setevent(channel);
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index d0ff65675292..000b53e5a17a 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -234,7 +234,7 @@ static ssize_t server_monitor_pending_show(struct device *dev,
return -ENODEV;
return sprintf(buf, "%d\n",
channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_pending);
@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
+
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(out_intr_mask);
@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
return sprintf(buf, "%d\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
return sprintf(buf, "%d\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info outbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+ &outbound);
+ if (ret < 0)
+ return ret;
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(in_intr_mask);
@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
return sprintf(buf, "%d\n", inbound.current_read_index);
}
static DEVICE_ATTR_RO(in_read_index);
@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
return sprintf(buf, "%d\n", inbound.current_write_index);
}
static DEVICE_ATTR_RO(in_write_index);
@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_ring_buffer_debug_info inbound;
+ int ret;
if (!hv_dev->channel)
return -ENODEV;
- if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
- hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+ ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+ if (ret < 0)
+ return ret;
+
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(in_write_bytes_avail);
@@ -623,38 +654,28 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
return ret;
}
-static const uuid_le null_guid;
-
-static inline bool is_null_guid(const uuid_le *guid)
-{
- if (uuid_le_cmp(*guid, null_guid))
- return false;
- return true;
-}
-
static const struct hv_vmbus_device_id *
-hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid)
-
+hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
{
if (id == NULL)
return NULL; /* empty device table */
- for (; !is_null_guid(&id->guid); id++)
- if (!uuid_le_cmp(id->guid, *guid))
+ for (; !guid_is_null(&id->guid); id++)
+ if (guid_equal(&id->guid, guid))
return id;
return NULL;
}
static const struct hv_vmbus_device_id *
-hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
+hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
{
const struct hv_vmbus_device_id *id = NULL;
struct vmbus_dynid *dynid;
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
- if (!uuid_le_cmp(dynid->id.guid, *guid)) {
+ if (guid_equal(&dynid->id.guid, guid)) {
id = &dynid->id;
break;
}
@@ -664,9 +685,7 @@ hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
return id;
}
-static const struct hv_vmbus_device_id vmbus_device_null = {
- .guid = NULL_UUID_LE,
-};
+static const struct hv_vmbus_device_id vmbus_device_null;
/*
* Return a matching hv_vmbus_device_id pointer.
@@ -675,7 +694,7 @@ static const struct hv_vmbus_device_id vmbus_device_null = {
static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
struct hv_device *dev)
{
- const uuid_le *guid = &dev->dev_type;
+ const guid_t *guid = &dev->dev_type;
const struct hv_vmbus_device_id *id;
/* When driver_override is set, only bind to the matching driver */
@@ -695,7 +714,7 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
}
/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
-static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
+static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{
struct vmbus_dynid *dynid;
@@ -733,10 +752,10 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
size_t count)
{
struct hv_driver *drv = drv_to_hv_drv(driver);
- uuid_le guid;
+ guid_t guid;
ssize_t retval;
- retval = uuid_le_to_bin(buf, &guid);
+ retval = guid_parse(buf, &guid);
if (retval)
return retval;
@@ -760,10 +779,10 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
{
struct hv_driver *drv = drv_to_hv_drv(driver);
struct vmbus_dynid *dynid, *n;
- uuid_le guid;
+ guid_t guid;
ssize_t retval;
- retval = uuid_le_to_bin(buf, &guid);
+ retval = guid_parse(buf, &guid);
if (retval)
return retval;
@@ -772,7 +791,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
struct hv_vmbus_device_id *id = &dynid->id;
- if (!uuid_le_cmp(id->guid, guid)) {
+ if (guid_equal(&id->guid, &guid)) {
list_del(&dynid->node);
kfree(dynid);
retval = count;
@@ -1465,6 +1484,38 @@ static ssize_t channel_events_show(const struct vmbus_channel *channel, char *bu
}
static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->intr_in_full);
+}
+static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
+
+static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->intr_out_empty);
+}
+static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
+
+static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->out_full_first);
+}
+static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
+
+static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)channel->out_full_total);
+}
+static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
+
static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
char *buf)
{
@@ -1490,6 +1541,10 @@ static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_latency.attr,
&chan_attr_interrupts.attr,
&chan_attr_events.attr,
+ &chan_attr_intr_in_full.attr,
+ &chan_attr_intr_out_empty.attr,
+ &chan_attr_out_full_first.attr,
+ &chan_attr_out_full_total.attr,
&chan_attr_monitor_id.attr,
&chan_attr_subchannel_id.attr,
NULL
@@ -1525,8 +1580,8 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
*/
-struct hv_device *vmbus_device_create(const uuid_le *type,
- const uuid_le *instance,
+struct hv_device *vmbus_device_create(const guid_t *type,
+ const guid_t *instance,
struct vmbus_channel *channel)
{
struct hv_device *child_device_obj;
@@ -1538,12 +1593,10 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
}
child_device_obj->channel = channel;
- memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
- memcpy(&child_device_obj->dev_instance, instance,
- sizeof(uuid_le));
+ guid_copy(&child_device_obj->dev_type, type);
+ guid_copy(&child_device_obj->dev_instance, instance);
child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
-
return child_device_obj;
}
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index 76f0a5c01e8a..4aeba29b4629 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -19,6 +19,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -54,10 +55,11 @@ struct ad7418_data {
u16 in[4];
};
-static struct ad7418_data *ad7418_update_device(struct device *dev)
+static int ad7418_update_device(struct device *dev)
{
struct ad7418_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
+ s32 val;
mutex_lock(&data->lock);
@@ -67,47 +69,74 @@ static struct ad7418_data *ad7418_update_device(struct device *dev)
int i, ch;
/* read config register and clear channel bits */
- cfg = i2c_smbus_read_byte_data(client, AD7418_REG_CONF);
+ val = i2c_smbus_read_byte_data(client, AD7418_REG_CONF);
+ if (val < 0)
+ goto abort;
+
+ cfg = val;
cfg &= 0x1F;
- i2c_smbus_write_byte_data(client, AD7418_REG_CONF,
+ val = i2c_smbus_write_byte_data(client, AD7418_REG_CONF,
cfg | AD7418_CH_TEMP);
+ if (val < 0)
+ goto abort;
+
udelay(30);
for (i = 0; i < 3; i++) {
- data->temp[i] =
- i2c_smbus_read_word_swapped(client,
- AD7418_REG_TEMP[i]);
+ val = i2c_smbus_read_word_swapped(client,
+ AD7418_REG_TEMP[i]);
+ if (val < 0)
+ goto abort;
+
+ data->temp[i] = val;
}
for (i = 0, ch = 4; i < data->adc_max; i++, ch--) {
- i2c_smbus_write_byte_data(client,
- AD7418_REG_CONF,
+ val = i2c_smbus_write_byte_data(client, AD7418_REG_CONF,
cfg | AD7418_REG_ADC_CH(ch));
+ if (val < 0)
+ goto abort;
udelay(15);
- data->in[data->adc_max - 1 - i] =
- i2c_smbus_read_word_swapped(client,
- AD7418_REG_ADC);
+ val = i2c_smbus_read_word_swapped(client,
+ AD7418_REG_ADC);
+ if (val < 0)
+ goto abort;
+
+ data->in[data->adc_max - 1 - i] = val;
}
/* restore old configuration value */
- i2c_smbus_write_word_swapped(client, AD7418_REG_CONF, cfg);
+ val = i2c_smbus_write_word_swapped(client, AD7418_REG_CONF,
+ cfg);
+ if (val < 0)
+ goto abort;
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->lock);
+ return 0;
- return data;
+abort:
+ data->valid = 0;
+ mutex_unlock(&data->lock);
+ return val;
}
static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct ad7418_data *data = ad7418_update_device(dev);
+ struct ad7418_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ad7418_update_device(dev);
+ if (ret < 0)
+ return ret;
+
return sprintf(buf, "%d\n",
LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
@@ -116,7 +145,12 @@ static ssize_t adc_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct ad7418_data *data = ad7418_update_device(dev);
+ struct ad7418_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ad7418_update_device(dev);
+ if (ret < 0)
+ return ret;
return sprintf(buf, "%d\n",
((data->in[attr->index] >> 6) * 2500 + 512) / 1024);
@@ -220,7 +254,10 @@ static int ad7418_probe(struct i2c_client *client,
mutex_init(&data->lock);
data->client = client;
- data->type = id->driver_data;
+ if (dev->of_node)
+ data->type = (enum chips)of_device_get_match_data(dev);
+ else
+ data->type = id->driver_data;
switch (data->type) {
case ad7416:
@@ -258,9 +295,18 @@ static const struct i2c_device_id ad7418_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad7418_id);
+static const struct of_device_id ad7418_dt_ids[] = {
+ { .compatible = "adi,ad7416", .data = (void *)ad7416, },
+ { .compatible = "adi,ad7417", .data = (void *)ad7417, },
+ { .compatible = "adi,ad7418", .data = (void *)ad7418, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7418_dt_ids);
+
static struct i2c_driver ad7418_driver = {
.driver = {
.name = "ad7418",
+ .of_match_table = ad7418_dt_ids,
},
.probe = ad7418_probe,
.id_table = ad7418_id,
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 8c5cdb560258..e561279aea21 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* adm1029.c - Part of lm_sensors, Linux kernel modules for hardware monitoring
*
@@ -19,10 +20,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -111,7 +108,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = {
struct adm1029_data {
struct i2c_client *client;
- struct mutex update_lock;
+ struct mutex update_lock; /* protect register access */
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -134,8 +131,7 @@ static struct adm1029_data *adm1029_update_device(struct device *dev)
* Use the "cache" Luke, don't recheck values
* if there are already checked not a long time later
*/
- if (time_after(jiffies, data->last_updated + HZ * 2)
- || !data->valid) {
+ if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
int nr;
dev_dbg(&client->dev, "Updating adm1029 data\n");
@@ -174,6 +170,7 @@ show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
+
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
@@ -183,9 +180,10 @@ show_fan(struct device *dev, struct device_attribute *devattr, char *buf)
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
u16 val;
- if (data->fan[attr->index] == 0
- || (data->fan_div[attr->index] & 0xC0) == 0
- || data->fan[attr->index] == 255) {
+
+ if (data->fan[attr->index] == 0 ||
+ (data->fan_div[attr->index] & 0xC0) == 0 ||
+ data->fan[attr->index] == 255) {
return sprintf(buf, "0\n");
}
@@ -199,13 +197,14 @@ show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
+
if ((data->fan_div[attr->index] & 0xC0) == 0)
return sprintf(buf, "0\n");
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
}
-static ssize_t set_fan_div(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count)
+static ssize_t set_fan_div(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adm1029_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -213,6 +212,7 @@ static ssize_t set_fan_div(struct device *dev,
u8 reg;
long val;
int ret = kstrtol(buf, 10, &val);
+
if (ret < 0)
return ret;
@@ -253,32 +253,27 @@ static ssize_t set_fan_div(struct device *dev,
return count;
}
-/*
- * Access rights on sysfs. S_IRUGO: Is Readable by User, Group and Others
- * S_IWUSR: Is Writable by User.
- */
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+/* Access rights on sysfs. */
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, 0444, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, 0444, show_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp1_max, 0444, show_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp2_max, 0444, show_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp3_max, 0444, show_temp, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp1_min, 0444, show_temp, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp2_min, 0444, show_temp, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp3_min, 0444, show_temp, NULL, 8);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_input, 0444, show_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, 0444, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO, show_fan, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan1_min, 0444, show_fan, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan2_min, 0444, show_fan, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 1);
+static SENSOR_DEVICE_ATTR(fan1_div, 0644, show_fan_div, set_fan_div, 0);
+static SENSOR_DEVICE_ATTR(fan2_div, 0644, show_fan_div, set_fan_div, 1);
static struct attribute *adm1029_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -327,10 +322,10 @@ static int adm1029_detect(struct i2c_client *client,
temp_devices_installed = i2c_smbus_read_byte_data(client,
ADM1029_REG_TEMP_DEVICES_INSTALLED);
nb_fan_support = i2c_smbus_read_byte_data(client,
- ADM1029_REG_NB_FAN_SUPPORT);
+ ADM1029_REG_NB_FAN_SUPPORT);
/* 0x41 is Analog Devices */
- if (man_id != 0x41 || (temp_devices_installed & 0xf9) != 0x01
- || nb_fan_support != 0x03)
+ if (man_id != 0x41 || (temp_devices_installed & 0xf9) != 0x01 ||
+ nb_fan_support != 0x03)
return -ENODEV;
if ((chip_id & 0xF0) != 0x00) {
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index b0211f731251..030f5d49c061 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -448,6 +448,7 @@ static const char *voltage_label(struct adt7462_data *data, int which)
case 3:
return "+1.5V";
}
+ /* fall through */
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return "+12V3";
@@ -505,6 +506,7 @@ static const char *voltage_label(struct adt7462_data *data, int which)
case 3:
return "+1.5";
}
+ /* fall through */
case 11:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
@@ -542,6 +544,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which)
case 3:
return 7800;
}
+ /* fall through */
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return 62500;
@@ -599,6 +602,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which)
case 3:
return 7800;
}
+ /* fall through */
case 11:
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 68c9a6664557..a6a38ceec174 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -82,9 +82,15 @@ static bool disallow_fan_support;
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
#define I8K_HWMON_HAVE_TEMP3 (1 << 2)
#define I8K_HWMON_HAVE_TEMP4 (1 << 3)
-#define I8K_HWMON_HAVE_FAN1 (1 << 4)
-#define I8K_HWMON_HAVE_FAN2 (1 << 5)
-#define I8K_HWMON_HAVE_FAN3 (1 << 6)
+#define I8K_HWMON_HAVE_TEMP5 (1 << 4)
+#define I8K_HWMON_HAVE_TEMP6 (1 << 5)
+#define I8K_HWMON_HAVE_TEMP7 (1 << 6)
+#define I8K_HWMON_HAVE_TEMP8 (1 << 7)
+#define I8K_HWMON_HAVE_TEMP9 (1 << 8)
+#define I8K_HWMON_HAVE_TEMP10 (1 << 9)
+#define I8K_HWMON_HAVE_FAN1 (1 << 10)
+#define I8K_HWMON_HAVE_FAN2 (1 << 11)
+#define I8K_HWMON_HAVE_FAN3 (1 << 12)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
@@ -737,6 +743,18 @@ static SENSOR_DEVICE_ATTR_RO(temp3_input, i8k_hwmon_temp, 2);
static SENSOR_DEVICE_ATTR_RO(temp3_label, i8k_hwmon_temp_label, 2);
static SENSOR_DEVICE_ATTR_RO(temp4_input, i8k_hwmon_temp, 3);
static SENSOR_DEVICE_ATTR_RO(temp4_label, i8k_hwmon_temp_label, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_input, i8k_hwmon_temp, 4);
+static SENSOR_DEVICE_ATTR_RO(temp5_label, i8k_hwmon_temp_label, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_input, i8k_hwmon_temp, 5);
+static SENSOR_DEVICE_ATTR_RO(temp6_label, i8k_hwmon_temp_label, 5);
+static SENSOR_DEVICE_ATTR_RO(temp7_input, i8k_hwmon_temp, 6);
+static SENSOR_DEVICE_ATTR_RO(temp7_label, i8k_hwmon_temp_label, 6);
+static SENSOR_DEVICE_ATTR_RO(temp8_input, i8k_hwmon_temp, 7);
+static SENSOR_DEVICE_ATTR_RO(temp8_label, i8k_hwmon_temp_label, 7);
+static SENSOR_DEVICE_ATTR_RO(temp9_input, i8k_hwmon_temp, 8);
+static SENSOR_DEVICE_ATTR_RO(temp9_label, i8k_hwmon_temp_label, 8);
+static SENSOR_DEVICE_ATTR_RO(temp10_input, i8k_hwmon_temp, 9);
+static SENSOR_DEVICE_ATTR_RO(temp10_label, i8k_hwmon_temp_label, 9);
static SENSOR_DEVICE_ATTR_RO(fan1_input, i8k_hwmon_fan, 0);
static SENSOR_DEVICE_ATTR_RO(fan1_label, i8k_hwmon_fan_label, 0);
static SENSOR_DEVICE_ATTR_RW(pwm1, i8k_hwmon_pwm, 0);
@@ -756,15 +774,27 @@ static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_temp3_label.dev_attr.attr, /* 5 */
&sensor_dev_attr_temp4_input.dev_attr.attr, /* 6 */
&sensor_dev_attr_temp4_label.dev_attr.attr, /* 7 */
- &sensor_dev_attr_fan1_input.dev_attr.attr, /* 8 */
- &sensor_dev_attr_fan1_label.dev_attr.attr, /* 9 */
- &sensor_dev_attr_pwm1.dev_attr.attr, /* 10 */
- &sensor_dev_attr_fan2_input.dev_attr.attr, /* 11 */
- &sensor_dev_attr_fan2_label.dev_attr.attr, /* 12 */
- &sensor_dev_attr_pwm2.dev_attr.attr, /* 13 */
- &sensor_dev_attr_fan3_input.dev_attr.attr, /* 14 */
- &sensor_dev_attr_fan3_label.dev_attr.attr, /* 15 */
- &sensor_dev_attr_pwm3.dev_attr.attr, /* 16 */
+ &sensor_dev_attr_temp5_input.dev_attr.attr, /* 8 */
+ &sensor_dev_attr_temp5_label.dev_attr.attr, /* 9 */
+ &sensor_dev_attr_temp6_input.dev_attr.attr, /* 10 */
+ &sensor_dev_attr_temp6_label.dev_attr.attr, /* 11 */
+ &sensor_dev_attr_temp7_input.dev_attr.attr, /* 12 */
+ &sensor_dev_attr_temp7_label.dev_attr.attr, /* 13 */
+ &sensor_dev_attr_temp8_input.dev_attr.attr, /* 14 */
+ &sensor_dev_attr_temp8_label.dev_attr.attr, /* 15 */
+ &sensor_dev_attr_temp9_input.dev_attr.attr, /* 16 */
+ &sensor_dev_attr_temp9_label.dev_attr.attr, /* 17 */
+ &sensor_dev_attr_temp10_input.dev_attr.attr, /* 18 */
+ &sensor_dev_attr_temp10_label.dev_attr.attr, /* 19 */
+ &sensor_dev_attr_fan1_input.dev_attr.attr, /* 20 */
+ &sensor_dev_attr_fan1_label.dev_attr.attr, /* 21 */
+ &sensor_dev_attr_pwm1.dev_attr.attr, /* 22 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 23 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 24 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 25 */
+ &sensor_dev_attr_fan3_input.dev_attr.attr, /* 26 */
+ &sensor_dev_attr_fan3_label.dev_attr.attr, /* 27 */
+ &sensor_dev_attr_pwm3.dev_attr.attr, /* 28 */
NULL
};
@@ -788,13 +818,32 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
if (index >= 6 && index <= 7 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
return 0;
- if (index >= 8 && index <= 10 &&
+ if (index >= 8 && index <= 9 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP5))
+ return 0;
+ if (index >= 10 && index <= 11 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP6))
+ return 0;
+ if (index >= 12 && index <= 13 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP7))
+ return 0;
+ if (index >= 14 && index <= 15 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP8))
+ return 0;
+ if (index >= 16 && index <= 17 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP9))
+ return 0;
+ if (index >= 18 && index <= 19 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP10))
+ return 0;
+
+ if (index >= 20 && index <= 22 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
return 0;
- if (index >= 11 && index <= 13 &&
+ if (index >= 23 && index <= 25 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
- if (index >= 14 && index <= 16 &&
+ if (index >= 26 && index <= 28 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
return 0;
@@ -827,6 +876,24 @@ static int __init i8k_init_hwmon(void)
err = i8k_get_temp_type(3);
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
+ err = i8k_get_temp_type(4);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP5;
+ err = i8k_get_temp_type(5);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP6;
+ err = i8k_get_temp_type(6);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP7;
+ err = i8k_get_temp_type(7);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP8;
+ err = i8k_get_temp_type(8);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP9;
+ err = i8k_get_temp_type(9);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP10;
/* First fan attributes, if fan status or type is OK */
err = i8k_get_fan_status(0);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index ca54ce5c8e10..83023798e489 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -2455,7 +2455,7 @@ static int f71882fg_probe(struct platform_device *pdev)
case f71869a:
/* These always have signed auto point temps */
data->auto_point_temp_signed = 1;
- /* Fall through to select correct fan/pwm reg bank! */
+ /* Fall through - to select correct fan/pwm reg bank! */
case f71889fg:
case f71889ed:
case f71889a:
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index b267510daeb2..b7e453298409 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -264,7 +264,7 @@ show(RAW, alarms, alarms);
show(BOOL, beep_enable, beep_enable);
show(BEEP_MASK, beep_mask, beep_mask);
-static ssize_t show_fan_input(struct device *dev,
+static ssize_t fan_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -273,8 +273,8 @@ static ssize_t show_fan_input(struct device *dev,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct gl518_data *data = gl518_update_device(dev);
@@ -282,8 +282,8 @@ static ssize_t show_fan_min(struct device *dev,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct gl518_data *data = gl518_update_device(dev);
@@ -350,8 +350,9 @@ set_high(IN, in_max3, voltage_max[3], GL518_REG_VIN3_LIMIT);
set_bits(BOOL, beep_enable, beep_enable, GL518_REG_CONF, 0x04, 2);
set(BEEP_MASK, beep_mask, beep_mask, GL518_REG_ALARM);
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct gl518_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -383,8 +384,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct gl518_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -427,40 +429,36 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
-static DEVICE_ATTR(temp1_max, S_IWUSR|S_IRUGO, show_temp_max1, set_temp_max1);
-static DEVICE_ATTR(temp1_max_hyst, S_IWUSR|S_IRUGO,
- show_temp_hyst1, set_temp_hyst1);
-static DEVICE_ATTR(fan1_auto, S_IWUSR|S_IRUGO, show_fan_auto1, set_fan_auto1);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR|S_IRUGO,
- show_fan_min, set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR|S_IRUGO,
- show_fan_min, set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR|S_IRUGO,
- show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR|S_IRUGO,
- show_fan_div, set_fan_div, 1);
-static DEVICE_ATTR(in0_input, S_IRUGO, show_in_input0, NULL);
-static DEVICE_ATTR(in1_input, S_IRUGO, show_in_input1, NULL);
-static DEVICE_ATTR(in2_input, S_IRUGO, show_in_input2, NULL);
-static DEVICE_ATTR(in3_input, S_IRUGO, show_in_input3, NULL);
-static DEVICE_ATTR(in0_min, S_IWUSR|S_IRUGO, show_in_min0, set_in_min0);
-static DEVICE_ATTR(in1_min, S_IWUSR|S_IRUGO, show_in_min1, set_in_min1);
-static DEVICE_ATTR(in2_min, S_IWUSR|S_IRUGO, show_in_min2, set_in_min2);
-static DEVICE_ATTR(in3_min, S_IWUSR|S_IRUGO, show_in_min3, set_in_min3);
-static DEVICE_ATTR(in0_max, S_IWUSR|S_IRUGO, show_in_max0, set_in_max0);
-static DEVICE_ATTR(in1_max, S_IWUSR|S_IRUGO, show_in_max1, set_in_max1);
-static DEVICE_ATTR(in2_max, S_IWUSR|S_IRUGO, show_in_max2, set_in_max2);
-static DEVICE_ATTR(in3_max, S_IWUSR|S_IRUGO, show_in_max3, set_in_max3);
-static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-static DEVICE_ATTR(beep_enable, S_IWUSR|S_IRUGO,
- show_beep_enable, set_beep_enable);
-static DEVICE_ATTR(beep_mask, S_IWUSR|S_IRUGO,
- show_beep_mask, set_beep_mask);
-
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static DEVICE_ATTR(temp1_input, 0444, show_temp_input1, NULL);
+static DEVICE_ATTR(temp1_max, 0644, show_temp_max1, set_temp_max1);
+static DEVICE_ATTR(temp1_max_hyst, 0644,
+ show_temp_hyst1, set_temp_hyst1);
+static DEVICE_ATTR(fan1_auto, 0644, show_fan_auto1, set_fan_auto1);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static DEVICE_ATTR(in0_input, 0444, show_in_input0, NULL);
+static DEVICE_ATTR(in1_input, 0444, show_in_input1, NULL);
+static DEVICE_ATTR(in2_input, 0444, show_in_input2, NULL);
+static DEVICE_ATTR(in3_input, 0444, show_in_input3, NULL);
+static DEVICE_ATTR(in0_min, 0644, show_in_min0, set_in_min0);
+static DEVICE_ATTR(in1_min, 0644, show_in_min1, set_in_min1);
+static DEVICE_ATTR(in2_min, 0644, show_in_min2, set_in_min2);
+static DEVICE_ATTR(in3_min, 0644, show_in_min3, set_in_min3);
+static DEVICE_ATTR(in0_max, 0644, show_in_max0, set_in_max0);
+static DEVICE_ATTR(in1_max, 0644, show_in_max1, set_in_max1);
+static DEVICE_ATTR(in2_max, 0644, show_in_max2, set_in_max2);
+static DEVICE_ATTR(in3_max, 0644, show_in_max3, set_in_max3);
+static DEVICE_ATTR(alarms, 0444, show_alarms, NULL);
+static DEVICE_ATTR(beep_enable, 0644,
+ show_beep_enable, set_beep_enable);
+static DEVICE_ATTR(beep_mask, 0644,
+ show_beep_mask, set_beep_mask);
+
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -468,24 +466,24 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 6);
-static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t beep_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct gl518_data *data = gl518_update_device(dev);
return sprintf(buf, "%u\n", (data->beep_mask >> bitnr) & 1);
}
-static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t beep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gl518_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -511,13 +509,13 @@ static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 0);
-static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 1);
-static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 2);
-static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 3);
-static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 4);
-static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 5);
-static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 6);
+static SENSOR_DEVICE_ATTR_RW(in0_beep, beep, 0);
+static SENSOR_DEVICE_ATTR_RW(in1_beep, beep, 1);
+static SENSOR_DEVICE_ATTR_RW(in2_beep, beep, 2);
+static SENSOR_DEVICE_ATTR_RW(in3_beep, beep, 3);
+static SENSOR_DEVICE_ATTR_RW(temp1_beep, beep, 4);
+static SENSOR_DEVICE_ATTR_RW(fan1_beep, beep, 5);
+static SENSOR_DEVICE_ATTR_RW(fan2_beep, beep, 6);
static struct attribute *gl518_attributes[] = {
&dev_attr_in3_input.attr,
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 4ff32ee67fb6..7d430ad955fe 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -216,8 +216,8 @@ static DEVICE_ATTR_RO(cpu0_vid);
#define IN_CLAMP(val) clamp_val(val, 0, 255 * 19)
#define IN_TO_REG(val) DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
-static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -229,8 +229,8 @@ static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(r));
}
-static ssize_t get_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -242,8 +242,8 @@ static ssize_t get_in_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(r));
}
-static ssize_t get_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -255,8 +255,8 @@ static ssize_t get_in_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(r));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -289,8 +289,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -323,31 +323,21 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, get_in_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, get_in_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, get_in_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, get_in_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, get_in_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR,
- get_in_min, set_in_min, 0);
-static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR,
- get_in_min, set_in_min, 1);
-static SENSOR_DEVICE_ATTR(in2_min, S_IRUGO | S_IWUSR,
- get_in_min, set_in_min, 2);
-static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO | S_IWUSR,
- get_in_min, set_in_min, 3);
-static SENSOR_DEVICE_ATTR(in4_min, S_IRUGO | S_IWUSR,
- get_in_min, set_in_min, 4);
-static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR,
- get_in_max, set_in_max, 0);
-static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR,
- get_in_max, set_in_max, 1);
-static SENSOR_DEVICE_ATTR(in2_max, S_IRUGO | S_IWUSR,
- get_in_max, set_in_max, 2);
-static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO | S_IWUSR,
- get_in_max, set_in_max, 3);
-static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
- get_in_max, set_in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in_input, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
@@ -359,8 +349,8 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
DIV_ROUND_CLOSEST(480000, \
FAN_CLAMP(val, div) << (div)))
-static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -369,8 +359,8 @@ static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
data->fan_div[n]));
}
-static ssize_t get_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -379,8 +369,8 @@ static ssize_t get_fan_min(struct device *dev, struct device_attribute *attr,
data->fan_div[n]));
}
-static ssize_t get_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -395,8 +385,9 @@ static ssize_t fan1_off_show(struct device *dev,
return sprintf(buf, "%d\n", data->fan_off);
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -434,8 +425,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -508,24 +500,20 @@ static ssize_t fan1_off_store(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR,
- get_fan_min, set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO | S_IWUSR,
- get_fan_min, set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
- get_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
- get_fan_div, set_fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static DEVICE_ATTR_RW(fan1_off);
#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
#define TEMP_CLAMP(val) clamp_val(val, -130000, 125000)
#define TEMP_TO_REG(val) (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
-static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -533,8 +521,8 @@ static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_input[n]));
}
-static ssize_t get_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -542,8 +530,8 @@ static ssize_t get_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[n]));
}
-static ssize_t get_temp_max_hyst(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_max_hyst_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -551,8 +539,9 @@ static ssize_t get_temp_max_hyst(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max_hyst[n]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -571,8 +560,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_max_hyst(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t temp_max_hyst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -592,16 +582,12 @@ static ssize_t set_temp_max_hyst(struct device *dev, struct device_attribute
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_temp_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
- get_temp_max, set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR,
- get_temp_max, set_temp_max, 1);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
- get_temp_max_hyst, set_temp_max_hyst, 0);
-static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR,
- get_temp_max_hyst, set_temp_max_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -674,8 +660,8 @@ static DEVICE_ATTR_RO(alarms);
static DEVICE_ATTR_RW(beep_enable);
static DEVICE_ATTR_RW(beep_mask);
-static ssize_t get_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bit_nr = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -683,18 +669,18 @@ static ssize_t get_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (data->alarms >> bit_nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, get_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, get_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, get_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, get_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, get_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, get_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, get_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, get_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, get_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 7);
-static ssize_t get_beep(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t beep_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
@@ -702,8 +688,8 @@ static ssize_t get_beep(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (data->beep_mask >> bitnr) & 1);
}
-static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t beep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gl520_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -729,15 +715,15 @@ static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 0);
-static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 1);
-static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 2);
-static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 3);
-static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 4);
-static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 5);
-static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 6);
-static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 7);
-static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 7);
+static SENSOR_DEVICE_ATTR_RW(in0_beep, beep, 0);
+static SENSOR_DEVICE_ATTR_RW(in1_beep, beep, 1);
+static SENSOR_DEVICE_ATTR_RW(in2_beep, beep, 2);
+static SENSOR_DEVICE_ATTR_RW(in3_beep, beep, 3);
+static SENSOR_DEVICE_ATTR_RW(temp1_beep, beep, 4);
+static SENSOR_DEVICE_ATTR_RW(fan1_beep, beep, 5);
+static SENSOR_DEVICE_ATTR_RW(fan2_beep, beep, 6);
+static SENSOR_DEVICE_ATTR_RW(temp2_beep, beep, 7);
+static SENSOR_DEVICE_ATTR_RW(in4_beep, beep, 7);
static struct attribute *gl520_attributes[] = {
&dev_attr_cpu0_vid.attr,
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index a3974cddef07..f1bf67aca9e8 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -307,7 +307,7 @@ static DEVICE_ATTR_RO(pwm1_mode);
static DEVICE_ATTR_RO(fan1_min);
static DEVICE_ATTR_RO(fan1_max);
static DEVICE_ATTR_RO(fan1_input);
-static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, fan1_input_show, set_rpm);
+static DEVICE_ATTR(fan1_target, 0644, fan1_input_show, set_rpm);
static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 0ae1ee1dbf76..d167fcfec765 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -171,7 +171,7 @@ out:
* Will be called on read access to temp1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
-static ssize_t hih6130_show_temperature(struct device *dev,
+static ssize_t hih6130_temperature_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -193,7 +193,7 @@ static ssize_t hih6130_show_temperature(struct device *dev,
* Will be called on read access to humidity1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
-static ssize_t hih6130_show_humidity(struct device *dev,
+static ssize_t hih6130_humidity_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hih6130 *hih6130 = dev_get_drvdata(dev);
@@ -206,10 +206,8 @@ static ssize_t hih6130_show_humidity(struct device *dev,
}
/* sysfs attributes */
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, hih6130_show_temperature,
- NULL, 0);
-static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, hih6130_show_humidity,
- NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, hih6130_temperature, 0);
+static SENSOR_DEVICE_ATTR_RO(humidity1_input, hih6130_humidity, 0);
static struct attribute *hih6130_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -254,8 +252,17 @@ static const struct i2c_device_id hih6130_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hih6130_id);
+static const struct of_device_id hih6130_of_match[] = {
+ { .compatible = "honeywell,hih6130", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hih6130_of_match);
+
static struct i2c_driver hih6130_driver = {
- .driver.name = "hih6130",
+ .driver = {
+ .name = "hih6130",
+ .of_match_table = of_match_ptr(hih6130_of_match),
+ },
.probe = hih6130_probe,
.id_table = hih6130_id,
};
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 36ed50d4b276..c22dc1e07911 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -278,10 +278,10 @@ static struct attribute *hwmon_genattr(struct device *dev,
if (!mode)
return ERR_PTR(-ENOENT);
- if ((mode & S_IRUGO) && ((is_string && !ops->read_string) ||
+ if ((mode & 0444) && ((is_string && !ops->read_string) ||
(!is_string && !ops->read)))
return ERR_PTR(-EINVAL);
- if ((mode & S_IWUGO) && !ops->write)
+ if ((mode & 0222) && !ops->write)
return ERR_PTR(-EINVAL);
hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 400e0675a90b..a51038c6597d 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -58,7 +58,7 @@ static ssize_t temp1_input_show(struct device *dev,
return sprintf(buf, "%ld\n", temp);
}
-static ssize_t show_thresh(struct device *dev,
+static ssize_t thresh_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev->parent);
@@ -72,7 +72,7 @@ static ssize_t show_thresh(struct device *dev,
return sprintf(buf, "%ld\n", temp);
}
-static ssize_t show_alarm(struct device *dev,
+static ssize_t alarm_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev->parent);
@@ -84,11 +84,11 @@ static ssize_t show_alarm(struct device *dev,
}
static DEVICE_ATTR_RO(temp1_input);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, thresh, 0xE2);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, thresh, 0xEC);
+static SENSOR_DEVICE_ATTR_RO(temp1_max, thresh, 0xEE);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 1);
static struct attribute *i5500_temp_attrs[] = {
&dev_attr_temp1_input.attr,
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index a4edc43dd060..2cf73d8eec1c 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -296,7 +296,7 @@ static int i5k_amb_hwmon_init(struct platform_device *pdev)
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_label", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
- iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
+ iattr->s_attr.dev_attr.attr.mode = 0444;
iattr->s_attr.dev_attr.show = show_label;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
@@ -311,7 +311,7 @@ static int i5k_amb_hwmon_init(struct platform_device *pdev)
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_input", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
- iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
+ iattr->s_attr.dev_attr.attr.mode = 0444;
iattr->s_attr.dev_attr.show = show_amb_temp;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
@@ -326,7 +326,7 @@ static int i5k_amb_hwmon_init(struct platform_device *pdev)
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_min", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
- iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO;
+ iattr->s_attr.dev_attr.attr.mode = 0644;
iattr->s_attr.dev_attr.show = show_amb_min;
iattr->s_attr.dev_attr.store = store_amb_min;
iattr->s_attr.index = k;
@@ -342,7 +342,7 @@ static int i5k_amb_hwmon_init(struct platform_device *pdev)
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_mid", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
- iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO;
+ iattr->s_attr.dev_attr.attr.mode = 0644;
iattr->s_attr.dev_attr.show = show_amb_mid;
iattr->s_attr.dev_attr.store = store_amb_mid;
iattr->s_attr.index = k;
@@ -358,7 +358,7 @@ static int i5k_amb_hwmon_init(struct platform_device *pdev)
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_max", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
- iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO;
+ iattr->s_attr.dev_attr.attr.mode = 0644;
iattr->s_attr.dev_attr.show = show_amb_max;
iattr->s_attr.dev_attr.store = store_amb_max;
iattr->s_attr.index = k;
@@ -374,7 +374,7 @@ static int i5k_amb_hwmon_init(struct platform_device *pdev)
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_alarm", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
- iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
+ iattr->s_attr.dev_attr.attr.mode = 0444;
iattr->s_attr.dev_attr.show = show_amb_alarm;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 9e92673f6913..db63c1295cb2 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -813,25 +813,24 @@ static void aem_bmc_gone(int iface)
/* sysfs support functions */
/* AEM device name */
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct aem_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s%d\n", DRVNAME, data->ver_major);
}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(name, name, 0);
/* AEM device version */
-static ssize_t show_version(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct aem_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d.%d\n", data->ver_major, data->ver_minor);
}
-static SENSOR_DEVICE_ATTR(version, S_IRUGO, show_version, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(version, version, 0);
/* Display power use */
static ssize_t aem_show_power(struct device *dev,
@@ -931,7 +930,7 @@ static int aem_register_sensors(struct aem_data *data,
while (ro->label) {
sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = ro->label;
- sensors->dev_attr.attr.mode = S_IRUGO;
+ sensors->dev_attr.attr.mode = 0444;
sensors->dev_attr.show = ro->show;
sensors->index = ro->index;
@@ -948,7 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
while (rw->label) {
sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = rw->label;
- sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
+ sensors->dev_attr.attr.mode = 0644;
sensors->dev_attr.show = rw->show;
sensors->dev_attr.store = rw->set;
sensors->index = rw->index;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index bb17a29af64c..5fd70faf0d16 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -269,12 +269,12 @@ static struct ibmpex_bmc_data *get_bmc_data(int iface)
return NULL;
}
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
return sprintf(buf, "%s\n", DRVNAME);
}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(name, name, 0);
static ssize_t ibmpex_show_sensor(struct device *dev,
struct device_attribute *devattr,
@@ -289,10 +289,9 @@ static ssize_t ibmpex_show_sensor(struct device *dev,
data->sensors[attr->index].values[attr->nr] * mult);
}
-static ssize_t ibmpex_reset_high_low(struct device *dev,
+static ssize_t ibmpex_high_low_store(struct device *dev,
struct device_attribute *devattr,
- const char *buf,
- size_t count)
+ const char *buf, size_t count)
{
struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
@@ -301,8 +300,7 @@ static ssize_t ibmpex_reset_high_low(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(reset_high_low, S_IWUSR, NULL,
- ibmpex_reset_high_low, 0);
+static SENSOR_DEVICE_ATTR_WO(reset_high_low, ibmpex_high_low, 0);
static int is_power_sensor(const char *sensor_id, int len)
{
@@ -358,7 +356,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
data->sensors[sensor].attr[func].dev_attr.attr.name = n;
- data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
+ data->sensors[sensor].attr[func].dev_attr.attr.mode = 0444;
data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
data->sensors[sensor].attr[func].index = sensor;
data->sensors[sensor].attr[func].nr = func;
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index eed66e533ee2..5c3c08449de7 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -129,7 +129,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
a->dev_attr.show = iio_hwmon_read_val;
- a->dev_attr.attr.mode = S_IRUGO;
+ a->dev_attr.attr.mode = 0444;
a->index = i;
st->attrs[i] = &a->dev_attr.attr;
}
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index aa0768ce8aea..e3854463db84 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -230,9 +230,9 @@ static u16 ina209_reg_from_interval(u16 config, long interval)
return (config & 0xf807) | (adc << 3) | (adc << 7);
}
-static ssize_t ina209_set_interval(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t ina209_interval_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
struct ina209_data *data = ina209_update_device(dev);
long val;
@@ -257,7 +257,7 @@ static ssize_t ina209_set_interval(struct device *dev,
return count;
}
-static ssize_t ina209_show_interval(struct device *dev,
+static ssize_t ina209_interval_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct ina209_data *data = dev_get_drvdata(dev);
@@ -279,10 +279,9 @@ static u16 ina209_reset_history_regs[] = {
INA209_POWER_PEAK
};
-static ssize_t ina209_reset_history(struct device *dev,
+static ssize_t ina209_history_store(struct device *dev,
struct device_attribute *da,
- const char *buf,
- size_t count)
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct ina209_data *data = dev_get_drvdata(dev);
@@ -306,10 +305,9 @@ static ssize_t ina209_reset_history(struct device *dev,
return count;
}
-static ssize_t ina209_set_value(struct device *dev,
- struct device_attribute *da,
- const char *buf,
- size_t count)
+static ssize_t ina209_value_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
struct ina209_data *data = ina209_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -337,9 +335,8 @@ abort:
return count;
}
-static ssize_t ina209_show_value(struct device *dev,
- struct device_attribute *da,
- char *buf)
+static ssize_t ina209_value_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct ina209_data *data = ina209_update_device(dev);
@@ -352,9 +349,8 @@ static ssize_t ina209_show_value(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%ld\n", val);
}
-static ssize_t ina209_show_alarm(struct device *dev,
- struct device_attribute *da,
- char *buf)
+static ssize_t ina209_alarm_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct ina209_data *data = ina209_update_device(dev);
@@ -374,82 +370,65 @@ static ssize_t ina209_show_alarm(struct device *dev,
}
/* Shunt voltage, history, limits, alarms */
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina209_show_value, NULL,
- INA209_SHUNT_VOLTAGE);
-static SENSOR_DEVICE_ATTR(in0_input_highest, S_IRUGO, ina209_show_value, NULL,
- INA209_SHUNT_VOLTAGE_POS_PEAK);
-static SENSOR_DEVICE_ATTR(in0_input_lowest, S_IRUGO, ina209_show_value, NULL,
- INA209_SHUNT_VOLTAGE_NEG_PEAK);
-static SENSOR_DEVICE_ATTR(in0_reset_history, S_IWUSR, NULL,
- ina209_reset_history, (1 << 0) | (1 << 1));
-static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_SHUNT_VOLTAGE_POS_WARN);
-static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_SHUNT_VOLTAGE_NEG_WARN);
-static SENSOR_DEVICE_ATTR(in0_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_CRITICAL_DAC_POS);
-static SENSOR_DEVICE_ATTR(in0_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_CRITICAL_DAC_NEG);
-
-static SENSOR_DEVICE_ATTR(in0_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 11);
-static SENSOR_DEVICE_ATTR(in0_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 12);
-static SENSOR_DEVICE_ATTR(in0_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 6);
-static SENSOR_DEVICE_ATTR(in0_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 7);
+static SENSOR_DEVICE_ATTR_RO(in0_input, ina209_value, INA209_SHUNT_VOLTAGE);
+static SENSOR_DEVICE_ATTR_RO(in0_input_highest, ina209_value,
+ INA209_SHUNT_VOLTAGE_POS_PEAK);
+static SENSOR_DEVICE_ATTR_RO(in0_input_lowest, ina209_value,
+ INA209_SHUNT_VOLTAGE_NEG_PEAK);
+static SENSOR_DEVICE_ATTR_WO(in0_reset_history, ina209_history,
+ (1 << 0) | (1 << 1));
+static SENSOR_DEVICE_ATTR_RW(in0_max, ina209_value,
+ INA209_SHUNT_VOLTAGE_POS_WARN);
+static SENSOR_DEVICE_ATTR_RW(in0_min, ina209_value,
+ INA209_SHUNT_VOLTAGE_NEG_WARN);
+static SENSOR_DEVICE_ATTR_RW(in0_crit_max, ina209_value,
+ INA209_CRITICAL_DAC_POS);
+static SENSOR_DEVICE_ATTR_RW(in0_crit_min, ina209_value,
+ INA209_CRITICAL_DAC_NEG);
+
+static SENSOR_DEVICE_ATTR_RO(in0_min_alarm, ina209_alarm, 1 << 11);
+static SENSOR_DEVICE_ATTR_RO(in0_max_alarm, ina209_alarm, 1 << 12);
+static SENSOR_DEVICE_ATTR_RO(in0_crit_min_alarm, ina209_alarm, 1 << 6);
+static SENSOR_DEVICE_ATTR_RO(in0_crit_max_alarm, ina209_alarm, 1 << 7);
/* Bus voltage, history, limits, alarms */
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ina209_show_value, NULL,
- INA209_BUS_VOLTAGE);
-static SENSOR_DEVICE_ATTR(in1_input_highest, S_IRUGO, ina209_show_value, NULL,
- INA209_BUS_VOLTAGE_MAX_PEAK);
-static SENSOR_DEVICE_ATTR(in1_input_lowest, S_IRUGO, ina209_show_value, NULL,
- INA209_BUS_VOLTAGE_MIN_PEAK);
-static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL,
- ina209_reset_history, (1 << 2) | (1 << 3));
-static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_BUS_VOLTAGE_OVER_WARN);
-static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_BUS_VOLTAGE_UNDER_WARN);
-static SENSOR_DEVICE_ATTR(in1_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_BUS_VOLTAGE_OVER_LIMIT);
-static SENSOR_DEVICE_ATTR(in1_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_BUS_VOLTAGE_UNDER_LIMIT);
-
-static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 14);
-static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 15);
-static SENSOR_DEVICE_ATTR(in1_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 9);
-static SENSOR_DEVICE_ATTR(in1_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 10);
+static SENSOR_DEVICE_ATTR_RO(in1_input, ina209_value, INA209_BUS_VOLTAGE);
+static SENSOR_DEVICE_ATTR_RO(in1_input_highest, ina209_value,
+ INA209_BUS_VOLTAGE_MAX_PEAK);
+static SENSOR_DEVICE_ATTR_RO(in1_input_lowest, ina209_value,
+ INA209_BUS_VOLTAGE_MIN_PEAK);
+static SENSOR_DEVICE_ATTR_WO(in1_reset_history, ina209_history,
+ (1 << 2) | (1 << 3));
+static SENSOR_DEVICE_ATTR_RW(in1_max, ina209_value,
+ INA209_BUS_VOLTAGE_OVER_WARN);
+static SENSOR_DEVICE_ATTR_RW(in1_min, ina209_value,
+ INA209_BUS_VOLTAGE_UNDER_WARN);
+static SENSOR_DEVICE_ATTR_RW(in1_crit_max, ina209_value,
+ INA209_BUS_VOLTAGE_OVER_LIMIT);
+static SENSOR_DEVICE_ATTR_RW(in1_crit_min, ina209_value,
+ INA209_BUS_VOLTAGE_UNDER_LIMIT);
+
+static SENSOR_DEVICE_ATTR_RO(in1_min_alarm, ina209_alarm, 1 << 14);
+static SENSOR_DEVICE_ATTR_RO(in1_max_alarm, ina209_alarm, 1 << 15);
+static SENSOR_DEVICE_ATTR_RO(in1_crit_min_alarm, ina209_alarm, 1 << 9);
+static SENSOR_DEVICE_ATTR_RO(in1_crit_max_alarm, ina209_alarm, 1 << 10);
/* Power */
-static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina209_show_value, NULL,
- INA209_POWER);
-static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ina209_show_value,
- NULL, INA209_POWER_PEAK);
-static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL,
- ina209_reset_history, 1 << 4);
-static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_POWER_WARN);
-static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO | S_IWUSR, ina209_show_value,
- ina209_set_value, INA209_POWER_OVER_LIMIT);
-
-static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 13);
-static SENSOR_DEVICE_ATTR(power1_crit_alarm, S_IRUGO, ina209_show_alarm, NULL,
- 1 << 8);
+static SENSOR_DEVICE_ATTR_RO(power1_input, ina209_value, INA209_POWER);
+static SENSOR_DEVICE_ATTR_RO(power1_input_highest, ina209_value,
+ INA209_POWER_PEAK);
+static SENSOR_DEVICE_ATTR_WO(power1_reset_history, ina209_history, 1 << 4);
+static SENSOR_DEVICE_ATTR_RW(power1_max, ina209_value, INA209_POWER_WARN);
+static SENSOR_DEVICE_ATTR_RW(power1_crit, ina209_value,
+ INA209_POWER_OVER_LIMIT);
+
+static SENSOR_DEVICE_ATTR_RO(power1_max_alarm, ina209_alarm, 1 << 13);
+static SENSOR_DEVICE_ATTR_RO(power1_crit_alarm, ina209_alarm, 1 << 8);
/* Current */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina209_show_value, NULL,
- INA209_CURRENT);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, ina209_value, INA209_CURRENT);
-static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
- ina209_show_interval, ina209_set_interval, 0);
+static SENSOR_DEVICE_ATTR_RW(update_interval, ina209_interval, 0);
/*
* Finally, construct an array of pointers to members of the above objects,
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index e90ccac8bebb..3626b87a5fd2 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -111,6 +111,7 @@ struct ina3221_input {
* @inputs: Array of channel input source specific structures
* @lock: mutex lock to serialize sysfs attribute accesses
* @reg_config: Register value of INA3221_CONFIG
+ * @single_shot: running in single-shot operating mode
*/
struct ina3221_data {
struct device *pm_dev;
@@ -119,6 +120,8 @@ struct ina3221_data {
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
struct mutex lock;
u32 reg_config;
+
+ bool single_shot;
};
static inline bool ina3221_is_enabled(struct ina3221_data *ina, int channel)
@@ -188,6 +191,11 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
if (!ina3221_is_enabled(ina, channel))
return -ENODATA;
+ /* Write CONFIG register to trigger a single-shot measurement */
+ if (ina->single_shot)
+ regmap_write(ina->regmap, INA3221_CONFIG,
+ ina->reg_config);
+
ret = ina3221_wait_for_data(ina);
if (ret)
return ret;
@@ -232,6 +240,11 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
if (!ina3221_is_enabled(ina, channel))
return -ENODATA;
+ /* Write CONFIG register to trigger a single-shot measurement */
+ if (ina->single_shot)
+ regmap_write(ina->regmap, INA3221_CONFIG,
+ ina->reg_config);
+
ret = ina3221_wait_for_data(ina);
if (ret)
return ret;
@@ -499,7 +512,7 @@ static const struct hwmon_chip_info ina3221_chip_info = {
};
/* Extra attribute groups */
-static ssize_t ina3221_show_shunt(struct device *dev,
+static ssize_t ina3221_shunt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
@@ -510,9 +523,9 @@ static ssize_t ina3221_show_shunt(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", input->shunt_resistor);
}
-static ssize_t ina3221_set_shunt(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t ina3221_shunt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
struct ina3221_data *ina = dev_get_drvdata(dev);
@@ -533,12 +546,9 @@ static ssize_t ina3221_set_shunt(struct device *dev,
}
/* shunt resistance */
-static SENSOR_DEVICE_ATTR(shunt1_resistor, S_IRUGO | S_IWUSR,
- ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL1);
-static SENSOR_DEVICE_ATTR(shunt2_resistor, S_IRUGO | S_IWUSR,
- ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL2);
-static SENSOR_DEVICE_ATTR(shunt3_resistor, S_IRUGO | S_IWUSR,
- ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL3);
+static SENSOR_DEVICE_ATTR_RW(shunt1_resistor, ina3221_shunt, INA3221_CHANNEL1);
+static SENSOR_DEVICE_ATTR_RW(shunt2_resistor, ina3221_shunt, INA3221_CHANNEL2);
+static SENSOR_DEVICE_ATTR_RW(shunt3_resistor, ina3221_shunt, INA3221_CHANNEL3);
static struct attribute *ina3221_attrs[] = {
&sensor_dev_attr_shunt1_resistor.dev_attr.attr,
@@ -617,6 +627,8 @@ static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina)
if (!np)
return 0;
+ ina->single_shot = of_property_read_bool(np, "ti,single-shot");
+
for_each_child_of_node(np, child) {
ret = ina3221_probe_child_from_dt(dev, child, ina);
if (ret)
@@ -666,6 +678,10 @@ static int ina3221_probe(struct i2c_client *client,
/* The driver will be reset, so use reset value */
ina->reg_config = INA3221_CONFIG_DEFAULT;
+ /* Clear continuous bit to use single-shot mode */
+ if (ina->single_shot)
+ ina->reg_config &= ~INA3221_CONFIG_MODE_CONTINUOUS;
+
/* Disable channels if their inputs are disconnected */
for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
if (ina->inputs[i].disconnected)
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index e5234f953a6d..4fa482ae0eb5 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -390,21 +390,21 @@ static umode_t jc42_is_visible(const void *_data, enum hwmon_sensor_types type,
{
const struct jc42_data *data = _data;
unsigned int config = data->config;
- umode_t mode = S_IRUGO;
+ umode_t mode = 0444;
switch (attr) {
case hwmon_temp_min:
case hwmon_temp_max:
if (!(config & JC42_CFG_EVENT_LOCK))
- mode |= S_IWUSR;
+ mode |= 0200;
break;
case hwmon_temp_crit:
if (!(config & JC42_CFG_TCRIT_LOCK))
- mode |= S_IWUSR;
+ mode |= 0200;
break;
case hwmon_temp_crit_hyst:
if (!(config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK)))
- mode |= S_IWUSR;
+ mode |= 0200;
break;
case hwmon_temp_input:
case hwmon_temp_max_hyst:
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index e59f9113fb93..93a5d51f3c6d 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -109,8 +109,8 @@ static ssize_t name_show(struct device *dev, struct device_attribute
}
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute_2 *attr =
to_sensor_dev_attr_2(devattr);
@@ -129,10 +129,10 @@ static ssize_t show_temp(struct device *dev,
/* core, place */
-static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0);
-static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1);
-static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
-static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
+static SENSOR_DEVICE_ATTR_2_RO(temp1_input, temp, 0, 0);
+static SENSOR_DEVICE_ATTR_2_RO(temp2_input, temp, 0, 1);
+static SENSOR_DEVICE_ATTR_2_RO(temp3_input, temp, 1, 0);
+static SENSOR_DEVICE_ATTR_2_RO(temp4_input, temp, 1, 1);
static DEVICE_ATTR_RO(name);
static const struct pci_device_id k8temp_ids[] = {
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index 84d791bdb62d..d470295760e2 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -282,8 +282,8 @@ static long pem_get_fan(u8 *data, int len, int index)
* Show boolean, either a fault or an alarm.
* .nr points to the register, .index is the bit mask to check
*/
-static ssize_t pem_show_bool(struct device *dev,
- struct device_attribute *da, char *buf)
+static ssize_t pem_bool_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
struct pem_data *data = pem_update_device(dev);
@@ -296,7 +296,7 @@ static ssize_t pem_show_bool(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", !!status);
}
-static ssize_t pem_show_data(struct device *dev, struct device_attribute *da,
+static ssize_t pem_data_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -312,7 +312,7 @@ static ssize_t pem_show_data(struct device *dev, struct device_attribute *da,
return snprintf(buf, PAGE_SIZE, "%ld\n", value);
}
-static ssize_t pem_show_input(struct device *dev, struct device_attribute *da,
+static ssize_t pem_input_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -328,7 +328,7 @@ static ssize_t pem_show_input(struct device *dev, struct device_attribute *da,
return snprintf(buf, PAGE_SIZE, "%ld\n", value);
}
-static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t pem_fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -345,53 +345,42 @@ static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da,
}
/* Voltages */
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, pem_show_data, NULL,
- PEM_DATA_VOUT_LSB);
-static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_1, ALRM1_VOUT_OUT_LIMIT);
-static SENSOR_DEVICE_ATTR_2(in1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_1, ALRM1_OV_VOLT_SHUTDOWN);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, pem_show_input, NULL,
- PEM_INPUT_VOLTAGE);
-static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_1,
- ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
+static SENSOR_DEVICE_ATTR_RO(in1_input, pem_data, PEM_DATA_VOUT_LSB);
+static SENSOR_DEVICE_ATTR_2_RO(in1_alarm, pem_bool, PEM_DATA_ALARM_1,
+ ALRM1_VOUT_OUT_LIMIT);
+static SENSOR_DEVICE_ATTR_2_RO(in1_crit_alarm, pem_bool, PEM_DATA_ALARM_1,
+ ALRM1_OV_VOLT_SHUTDOWN);
+static SENSOR_DEVICE_ATTR_RO(in2_input, pem_input, PEM_INPUT_VOLTAGE);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, pem_bool, PEM_DATA_ALARM_1,
+ ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
/* Currents */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, pem_show_data, NULL,
- PEM_DATA_CURRENT);
-static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_1, ALRM1_VIN_OVERCURRENT);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, pem_data, PEM_DATA_CURRENT);
+static SENSOR_DEVICE_ATTR_2_RO(curr1_alarm, pem_bool, PEM_DATA_ALARM_1,
+ ALRM1_VIN_OVERCURRENT);
/* Power */
-static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, pem_show_input, NULL,
- PEM_INPUT_POWER_LSB);
-static SENSOR_DEVICE_ATTR_2(power1_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_1, ALRM1_POWER_LIMIT);
+static SENSOR_DEVICE_ATTR_RO(power1_input, pem_input, PEM_INPUT_POWER_LSB);
+static SENSOR_DEVICE_ATTR_2_RO(power1_alarm, pem_bool, PEM_DATA_ALARM_1,
+ ALRM1_POWER_LIMIT);
/* Fans */
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, pem_show_fan, NULL,
- PEM_FAN_FAN1);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, pem_show_fan, NULL,
- PEM_FAN_FAN2);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, pem_show_fan, NULL,
- PEM_FAN_FAN3);
-static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_2, ALRM2_FAN_FAULT);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, pem_fan, PEM_FAN_FAN1);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, pem_fan, PEM_FAN_FAN2);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, pem_fan, PEM_FAN_FAN3);
+static SENSOR_DEVICE_ATTR_2_RO(fan1_alarm, pem_bool, PEM_DATA_ALARM_2,
+ ALRM2_FAN_FAULT);
/* Temperatures */
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, pem_show_data, NULL,
- PEM_DATA_TEMP);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, pem_show_data, NULL,
- PEM_DATA_TEMP_MAX);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, pem_show_data, NULL,
- PEM_DATA_TEMP_CRIT);
-static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_1, ALRM1_TEMP_WARNING);
-static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_1, ALRM1_TEMP_SHUTDOWN);
-static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, pem_show_bool, NULL,
- PEM_DATA_ALARM_2, ALRM2_TEMP_FAULT);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, pem_data, PEM_DATA_TEMP);
+static SENSOR_DEVICE_ATTR_RO(temp1_max, pem_data, PEM_DATA_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, pem_data, PEM_DATA_TEMP_CRIT);
+static SENSOR_DEVICE_ATTR_2_RO(temp1_alarm, pem_bool, PEM_DATA_ALARM_1,
+ ALRM1_TEMP_WARNING);
+static SENSOR_DEVICE_ATTR_2_RO(temp1_crit_alarm, pem_bool, PEM_DATA_ALARM_1,
+ ALRM1_TEMP_SHUTDOWN);
+static SENSOR_DEVICE_ATTR_2_RO(temp1_fault, pem_bool, PEM_DATA_ALARM_2,
+ ALRM2_TEMP_FAULT);
static struct attribute *pem_attributes[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 9653bb870a47..d1d728aa31d2 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -62,8 +62,8 @@ struct lm73_data {
/*-----------------------------------------------------------------------*/
-static ssize_t set_temp(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t temp_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm73_data *data = dev_get_drvdata(dev);
@@ -81,7 +81,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
return (err < 0) ? err : count;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+static ssize_t temp_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -98,8 +98,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
}
-static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t convrate_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct lm73_data *data = dev_get_drvdata(dev);
unsigned long convrate;
@@ -133,7 +133,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
+static ssize_t convrate_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct lm73_data *data = dev_get_drvdata(dev);
@@ -143,7 +143,7 @@ static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
return scnprintf(buf, PAGE_SIZE, "%hu\n", lm73_convrates[res]);
}
-static ssize_t show_maxmin_alarm(struct device *dev,
+static ssize_t maxmin_alarm_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -168,18 +168,14 @@ abort:
/* sysfs attributes for hwmon */
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_temp, set_temp, LM73_REG_MAX);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
- show_temp, set_temp, LM73_REG_MIN);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
- show_temp, NULL, LM73_REG_INPUT);
-static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
- show_convrate, set_convrate, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
- show_maxmin_alarm, NULL, LM73_CTRL_HI_SHIFT);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
- show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, LM73_REG_MAX);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, LM73_REG_MIN);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, LM73_REG_INPUT);
+static SENSOR_DEVICE_ATTR_RW(update_interval, convrate, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, maxmin_alarm,
+ LM73_CTRL_HI_SHIFT);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, maxmin_alarm,
+ LM73_CTRL_LO_SHIFT);
static struct attribute *lm73_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 62acb9f16ec5..447af07450f1 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -176,16 +176,16 @@ static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_chip:
switch (attr) {
case hwmon_chip_update_interval:
- return S_IRUGO;
+ return 0444;
}
break;
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- return S_IRUGO;
+ return 0444;
case hwmon_temp_max:
case hwmon_temp_max_hyst:
- return S_IRUGO | S_IWUSR;
+ return 0644;
}
break;
default:
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 69b05cc2f60e..c27073dc24c1 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -137,7 +137,7 @@ static struct lm77_data *lm77_update_device(struct device *dev)
/* sysfs stuff */
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -146,7 +146,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", data->temp[attr->index]);
}
-static ssize_t show_temp_hyst(struct device *dev,
+static ssize_t temp_hyst_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -160,8 +160,9 @@ static ssize_t show_temp_hyst(struct device *dev,
return sprintf(buf, "%d\n", temp);
}
-static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm77_data *data = dev_get_drvdata(dev);
@@ -186,9 +187,9 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
* hysteresis is stored as a relative value on the chip, so it has to be
* converted first.
*/
-static ssize_t set_temp_hyst(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_hyst_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct lm77_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -208,7 +209,7 @@ static ssize_t set_temp_hyst(struct device *dev,
return count;
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -216,22 +217,18 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, t_input);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_crit);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_min);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_max);
-
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst,
- set_temp_hyst, t_crit);
-static SENSOR_DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_temp_hyst, NULL, t_min);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_hyst, NULL, t_max);
-
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, t_crit);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, t_min);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, t_max);
+
+static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, temp_hyst, t_crit);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_hyst, temp_hyst, t_min);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, temp_hyst, t_max);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 1);
static struct attribute *lm77_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 0e30fa00204c..54cf24a2b0ed 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -269,7 +269,7 @@ done:
* Sysfs stuff
*/
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm80_data *data = lm80_update_device(dev);
@@ -281,8 +281,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr][index]));
}
-static ssize_t set_in(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lm80_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -303,7 +303,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int index = to_sensor_dev_attr_2(attr)->index;
@@ -315,8 +315,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[index])));
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm80_data *data = lm80_update_device(dev);
@@ -325,8 +325,8 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr_2(attr)->index;
int nr = to_sensor_dev_attr_2(attr)->nr;
@@ -352,8 +352,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm80_data *data = dev_get_drvdata(dev);
@@ -393,8 +394,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
}
rv = lm80_read_value(client, LM80_REG_FANDIV);
- if (rv < 0)
+ if (rv < 0) {
+ mutex_unlock(&data->update_lock);
return rv;
+ }
reg = (rv & ~(3 << (2 * (nr + 1))))
| (data->fan_div[nr] << (2 * (nr + 1)));
lm80_write_value(client, LM80_REG_FANDIV, reg);
@@ -408,7 +411,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -418,8 +421,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
-static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm80_data *data = dev_get_drvdata(dev);
@@ -446,7 +450,7 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", data->alarms);
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -456,72 +460,50 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
- show_in, set_in, i_min, 0);
-static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
- show_in, set_in, i_min, 1);
-static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
- show_in, set_in, i_min, 2);
-static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
- show_in, set_in, i_min, 3);
-static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
- show_in, set_in, i_min, 4);
-static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
- show_in, set_in, i_min, 5);
-static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
- show_in, set_in, i_min, 6);
-static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
- show_in, set_in, i_max, 0);
-static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
- show_in, set_in, i_max, 1);
-static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
- show_in, set_in, i_max, 2);
-static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
- show_in, set_in, i_max, 3);
-static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
- show_in, set_in, i_max, 4);
-static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
- show_in, set_in, i_max, 5);
-static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
- show_in, set_in, i_max, 6);
-static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in, NULL, i_input, 0);
-static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in, NULL, i_input, 1);
-static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in, NULL, i_input, 2);
-static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in, NULL, i_input, 3);
-static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in, NULL, i_input, 4);
-static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in, NULL, i_input, 5);
-static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, show_in, NULL, i_input, 6);
-static SENSOR_DEVICE_ATTR_2(fan1_min, S_IWUSR | S_IRUGO,
- show_fan, set_fan_min, f_min, 0);
-static SENSOR_DEVICE_ATTR_2(fan2_min, S_IWUSR | S_IRUGO,
- show_fan, set_fan_min, f_min, 1);
-static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, f_input, 0);
-static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, f_input, 1);
-static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO,
- show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO,
- show_fan_div, set_fan_div, 1);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, t_input);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp,
- set_temp, t_hot_max);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp,
- set_temp, t_hot_hyst);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp,
- set_temp, t_os_max);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp,
- set_temp, t_os_hyst);
+static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, i_min, 0);
+static SENSOR_DEVICE_ATTR_2_RW(in1_min, in, i_min, 1);
+static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, i_min, 2);
+static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, i_min, 3);
+static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, i_min, 4);
+static SENSOR_DEVICE_ATTR_2_RW(in5_min, in, i_min, 5);
+static SENSOR_DEVICE_ATTR_2_RW(in6_min, in, i_min, 6);
+static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, i_max, 0);
+static SENSOR_DEVICE_ATTR_2_RW(in1_max, in, i_max, 1);
+static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, i_max, 2);
+static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, i_max, 3);
+static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, i_max, 4);
+static SENSOR_DEVICE_ATTR_2_RW(in5_max, in, i_max, 5);
+static SENSOR_DEVICE_ATTR_2_RW(in6_max, in, i_max, 6);
+static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, i_input, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, i_input, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, i_input, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, i_input, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, i_input, 4);
+static SENSOR_DEVICE_ATTR_2_RO(in5_input, in, i_input, 5);
+static SENSOR_DEVICE_ATTR_2_RO(in6_input, in, i_input, 6);
+static SENSOR_DEVICE_ATTR_2_RW(fan1_min, fan, f_min, 0);
+static SENSOR_DEVICE_ATTR_2_RW(fan2_min, fan, f_min, 1);
+static SENSOR_DEVICE_ATTR_2_RO(fan1_input, fan, f_input, 0);
+static SENSOR_DEVICE_ATTR_2_RO(fan2_input, fan, f_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, t_hot_max);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp, t_hot_hyst);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, t_os_max);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, temp, t_os_hyst);
static DEVICE_ATTR_RO(alarms);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 13);
/*
* Real code
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index cbfd0bb7f135..5bb35dff3d76 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -158,7 +158,7 @@ static struct lm83_data *lm83_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -166,8 +166,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
-static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm83_data *data = dev_get_drvdata(dev);
@@ -195,8 +196,8 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *dummy,
return sprintf(buf, "%d\n", data->alarms);
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm83_data *data = lm83_update_device(dev);
@@ -205,36 +206,31 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp,
- set_temp, 4);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp,
- set_temp, 5);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp,
- set_temp, 6);
-static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_temp,
- set_temp, 7);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp,
- set_temp, 8);
-static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, 4);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, 5);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp, 6);
+static SENSOR_DEVICE_ATTR_RW(temp4_max, temp, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp, 8);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp, 8);
+static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp, 8);
+static SENSOR_DEVICE_ATTR_RO(temp4_crit, temp, 8);
/* Individual alarm files */
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 13);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 15);
/* Raw alarm file for compatibility */
static DEVICE_ATTR_RO(alarms);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 0a325878e8f5..a95d48316f06 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
- lm85,
+ lm85, lm96000,
adm1027, adt7463, adt7468,
emc6d100, emc6d102, emc6d103, emc6d103s
};
@@ -198,13 +198,18 @@ static int RANGE_TO_REG(long range)
#define RANGE_FROM_REG(val) lm85_range_map[(val) & 0x0f]
/* These are the PWM frequency encodings */
-static const int lm85_freq_map[8] = { /* 1 Hz */
+static const int lm85_freq_map[] = { /* 1 Hz */
10, 15, 23, 30, 38, 47, 61, 94
};
-static const int adm1027_freq_map[8] = { /* 1 Hz */
+
+static const int lm96000_freq_map[] = { /* 1 Hz */
+ 10, 15, 23, 30, 38, 47, 61, 94,
+ 22500, 24000, 25700, 25700, 27700, 27700, 30000, 30000
+};
+
+static const int adm1027_freq_map[] = { /* 1 Hz */
11, 15, 22, 29, 35, 44, 59, 88
};
-#define FREQ_MAP_LEN 8
static int FREQ_TO_REG(const int *map,
unsigned int map_size, unsigned long freq)
@@ -212,9 +217,9 @@ static int FREQ_TO_REG(const int *map,
return find_closest(freq, map, map_size);
}
-static int FREQ_FROM_REG(const int *map, u8 reg)
+static int FREQ_FROM_REG(const int *map, unsigned int map_size, u8 reg)
{
- return map[reg & 0x07];
+ return map[reg % map_size];
}
/*
@@ -296,6 +301,8 @@ struct lm85_data {
struct i2c_client *client;
const struct attribute_group *groups[6];
const int *freq_map;
+ unsigned int freq_map_size;
+
enum chips type;
bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */
@@ -514,7 +521,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
data->autofan[i].config =
lm85_read_value(client, LM85_REG_AFAN_CONFIG(i));
val = lm85_read_value(client, LM85_REG_AFAN_RANGE(i));
- data->pwm_freq[i] = val & 0x07;
+ data->pwm_freq[i] = val % data->freq_map_size;
data->zone[i].range = val >> 4;
data->autofan[i].min_pwm =
lm85_read_value(client, LM85_REG_AFAN_MINPWM(i));
@@ -791,7 +798,8 @@ static ssize_t show_pwm_freq(struct device *dev,
if (IS_ADT7468_HFPWM(data))
freq = 22500;
else
- freq = FREQ_FROM_REG(data->freq_map, data->pwm_freq[nr]);
+ freq = FREQ_FROM_REG(data->freq_map, data->freq_map_size,
+ data->pwm_freq[nr]);
return sprintf(buf, "%d\n", freq);
}
@@ -820,7 +828,7 @@ static ssize_t set_pwm_freq(struct device *dev,
lm85_write_value(client, ADT7468_REG_CFG5, data->cfg5);
} else { /* Low freq. mode */
data->pwm_freq[nr] = FREQ_TO_REG(data->freq_map,
- FREQ_MAP_LEN, val);
+ data->freq_map_size, val);
lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
(data->zone[nr].range << 4)
| data->pwm_freq[nr]);
@@ -1196,7 +1204,7 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
TEMP_FROM_REG(data->zone[nr].limit));
lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
((data->zone[nr].range & 0x0f) << 4)
- | (data->pwm_freq[nr] & 0x07));
+ | data->pwm_freq[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -1232,7 +1240,7 @@ static ssize_t set_temp_auto_temp_max(struct device *dev,
val - min);
lm85_write_value(client, LM85_REG_AFAN_RANGE(nr),
((data->zone[nr].range & 0x0f) << 4)
- | (data->pwm_freq[nr] & 0x07));
+ | data->pwm_freq[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1496,7 +1504,7 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
"Found Winbond WPCD377I, ignoring\n");
return -ENODEV;
}
- type_name = "lm85";
+ type_name = "lm96000";
break;
}
} else if (company == LM85_COMPANY_ANALOG_DEV) {
@@ -1569,9 +1577,15 @@ static int lm85_probe(struct i2c_client *client, const struct i2c_device_id *id)
case emc6d103:
case emc6d103s:
data->freq_map = adm1027_freq_map;
+ data->freq_map_size = ARRAY_SIZE(adm1027_freq_map);
+ break;
+ case lm96000:
+ data->freq_map = lm96000_freq_map;
+ data->freq_map_size = ARRAY_SIZE(lm96000_freq_map);
break;
default:
data->freq_map = lm85_freq_map;
+ data->freq_map_size = ARRAY_SIZE(lm85_freq_map);
}
/* Set the VRM version */
@@ -1618,6 +1632,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "lm85", lm85 },
{ "lm85b", lm85 },
{ "lm85c", lm85 },
+ { "lm96000", lm96000 },
{ "emc6d100", emc6d100 },
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
@@ -1653,6 +1668,10 @@ static const struct of_device_id lm85_of_match[] = {
.data = (void *)lm85
},
{
+ .compatible = "ti,lm96000",
+ .data = (void *)lm96000
+ },
+ {
.compatible = "smsc,emc6d100",
.data = (void *)emc6d100
},
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index c2f411c290bf..480d70a51778 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1266,17 +1266,17 @@ static umode_t lm90_temp_is_visible(const void *data, u32 attr, int channel)
case hwmon_temp_emergency_alarm:
case hwmon_temp_emergency_hyst:
case hwmon_temp_fault:
- return S_IRUGO;
+ return 0444;
case hwmon_temp_min:
case hwmon_temp_max:
case hwmon_temp_crit:
case hwmon_temp_emergency:
case hwmon_temp_offset:
- return S_IRUGO | S_IWUSR;
+ return 0644;
case hwmon_temp_crit_hyst:
if (channel == 0)
- return S_IRUGO | S_IWUSR;
- return S_IRUGO;
+ return 0644;
+ return 0444;
default:
return 0;
}
@@ -1338,9 +1338,9 @@ static umode_t lm90_chip_is_visible(const void *data, u32 attr, int channel)
{
switch (attr) {
case hwmon_chip_update_interval:
- return S_IRUGO | S_IWUSR;
+ return 0644;
case hwmon_chip_alarms:
- return S_IRUGO;
+ return 0444;
default:
return 0;
}
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index e7333f8e185c..39d8afe4279a 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -143,7 +143,7 @@ static struct lm92_data *lm92_update_device(struct device *dev)
return data;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -152,8 +152,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
-static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm92_data *data = dev_get_drvdata(dev);
@@ -173,7 +174,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
return count;
}
-static ssize_t show_temp_hyst(struct device *dev,
+static ssize_t temp_hyst_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -192,9 +193,9 @@ static ssize_t temp1_min_hyst_show(struct device *dev,
+ TEMP_FROM_REG(data->temp[t_hyst]));
}
-static ssize_t set_temp_hyst(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_hyst_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm92_data *data = dev_get_drvdata(dev);
@@ -224,7 +225,7 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp[t_input]));
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -232,21 +233,17 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (data->temp[t_input] >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, t_input);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_crit);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst,
- set_temp_hyst, t_crit);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_min);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp, t_crit);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, temp_hyst, t_crit);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp, t_min);
static DEVICE_ATTR_RO(temp1_min_hyst);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_max);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_hyst, NULL, t_max);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp, t_max);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, temp_hyst, t_max);
static DEVICE_ATTR_RO(alarms);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 1);
/*
* Detection and registration
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 77a0a83399b3..a0b5fbf958f3 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -1111,8 +1111,8 @@ static void lm93_update_client_min(struct lm93_data *data,
}
/* following are the sysfs callback functions */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -1120,25 +1120,25 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", LM93_IN_FROM_REG(nr, data->block3[nr]));
}
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 2);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 3);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 4);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 5);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 6);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 7);
-static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_in, NULL, 8);
-static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_in, NULL, 9);
-static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_in, NULL, 10);
-static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_in, NULL, 11);
-static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_in, NULL, 12);
-static SENSOR_DEVICE_ATTR(in14_input, S_IRUGO, show_in, NULL, 13);
-static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, show_in, NULL, 14);
-static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in, NULL, 15);
-
-static ssize_t show_in_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 0);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 1);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 2);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 3);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 4);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 5);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 6);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in, 7);
+static SENSOR_DEVICE_ATTR_RO(in9_input, in, 8);
+static SENSOR_DEVICE_ATTR_RO(in10_input, in, 9);
+static SENSOR_DEVICE_ATTR_RO(in11_input, in, 10);
+static SENSOR_DEVICE_ATTR_RO(in12_input, in, 11);
+static SENSOR_DEVICE_ATTR_RO(in13_input, in, 12);
+static SENSOR_DEVICE_ATTR_RO(in14_input, in, 13);
+static SENSOR_DEVICE_ATTR_RO(in15_input, in, 14);
+static SENSOR_DEVICE_ATTR_RO(in16_input, in, 15);
+
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -1154,7 +1154,7 @@ static ssize_t show_in_min(struct device *dev,
return sprintf(buf, "%ld\n", rc);
}
-static ssize_t store_in_min(struct device *dev, struct device_attribute *attr,
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -1185,41 +1185,25 @@ static ssize_t store_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 0);
-static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 1);
-static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 2);
-static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 3);
-static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 4);
-static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 5);
-static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 6);
-static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 7);
-static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 8);
-static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 9);
-static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 10);
-static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 11);
-static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 12);
-static SENSOR_DEVICE_ATTR(in14_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 13);
-static SENSOR_DEVICE_ATTR(in15_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 14);
-static SENSOR_DEVICE_ATTR(in16_min, S_IWUSR | S_IRUGO,
- show_in_min, store_in_min, 15);
-
-static ssize_t show_in_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in9_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in10_min, in_min, 9);
+static SENSOR_DEVICE_ATTR_RW(in11_min, in_min, 10);
+static SENSOR_DEVICE_ATTR_RW(in12_min, in_min, 11);
+static SENSOR_DEVICE_ATTR_RW(in13_min, in_min, 12);
+static SENSOR_DEVICE_ATTR_RW(in14_min, in_min, 13);
+static SENSOR_DEVICE_ATTR_RW(in15_min, in_min, 14);
+static SENSOR_DEVICE_ATTR_RW(in16_min, in_min, 15);
+
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -1235,7 +1219,7 @@ static ssize_t show_in_max(struct device *dev,
return sprintf(buf, "%ld\n", rc);
}
-static ssize_t store_in_max(struct device *dev, struct device_attribute *attr,
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -1266,61 +1250,46 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 0);
-static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 1);
-static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 2);
-static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 3);
-static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 4);
-static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 5);
-static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 6);
-static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 7);
-static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 8);
-static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 9);
-static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 10);
-static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 11);
-static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 12);
-static SENSOR_DEVICE_ATTR(in14_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 13);
-static SENSOR_DEVICE_ATTR(in15_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 14);
-static SENSOR_DEVICE_ATTR(in16_max, S_IWUSR | S_IRUGO,
- show_in_max, store_in_max, 15);
-
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RW(in9_max, in_max, 8);
+static SENSOR_DEVICE_ATTR_RW(in10_max, in_max, 9);
+static SENSOR_DEVICE_ATTR_RW(in11_max, in_max, 10);
+static SENSOR_DEVICE_ATTR_RW(in12_max, in_max, 11);
+static SENSOR_DEVICE_ATTR_RW(in13_max, in_max, 12);
+static SENSOR_DEVICE_ATTR_RW(in14_max, in_max, 13);
+static SENSOR_DEVICE_ATTR_RW(in15_max, in_max, 14);
+static SENSOR_DEVICE_ATTR_RW(in16_max, in_max, 15);
+
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->block2[nr]));
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static ssize_t show_temp_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->temp_lim[nr].min));
}
-static ssize_t store_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1339,14 +1308,11 @@ static ssize_t store_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
- show_temp_min, store_temp_min, 0);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO,
- show_temp_min, store_temp_min, 1);
-static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO,
- show_temp_min, store_temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
-static ssize_t show_temp_max(struct device *dev,
+static ssize_t temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -1354,8 +1320,9 @@ static ssize_t show_temp_max(struct device *dev,
return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->temp_lim[nr].max));
}
-static ssize_t store_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1374,24 +1341,21 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_temp_max, store_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO,
- show_temp_max, store_temp_max, 1);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO,
- show_temp_max, store_temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
-static ssize_t show_temp_auto_base(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_base_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->block10.base[nr]));
}
-static ssize_t store_temp_auto_base(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_auto_base_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1410,14 +1374,11 @@ static ssize_t store_temp_auto_base(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_auto_base, S_IWUSR | S_IRUGO,
- show_temp_auto_base, store_temp_auto_base, 0);
-static SENSOR_DEVICE_ATTR(temp2_auto_base, S_IWUSR | S_IRUGO,
- show_temp_auto_base, store_temp_auto_base, 1);
-static SENSOR_DEVICE_ATTR(temp3_auto_base, S_IWUSR | S_IRUGO,
- show_temp_auto_base, store_temp_auto_base, 2);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_base, temp_auto_base, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_base, temp_auto_base, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_base, temp_auto_base, 2);
-static ssize_t show_temp_auto_boost(struct device *dev,
+static ssize_t temp_auto_boost_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -1425,7 +1386,7 @@ static ssize_t show_temp_auto_boost(struct device *dev,
return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->boost[nr]));
}
-static ssize_t store_temp_auto_boost(struct device *dev,
+static ssize_t temp_auto_boost_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1446,14 +1407,11 @@ static ssize_t store_temp_auto_boost(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_auto_boost, S_IWUSR | S_IRUGO,
- show_temp_auto_boost, store_temp_auto_boost, 0);
-static SENSOR_DEVICE_ATTR(temp2_auto_boost, S_IWUSR | S_IRUGO,
- show_temp_auto_boost, store_temp_auto_boost, 1);
-static SENSOR_DEVICE_ATTR(temp3_auto_boost, S_IWUSR | S_IRUGO,
- show_temp_auto_boost, store_temp_auto_boost, 2);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_boost, temp_auto_boost, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_boost, temp_auto_boost, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_boost, temp_auto_boost, 2);
-static ssize_t show_temp_auto_boost_hyst(struct device *dev,
+static ssize_t temp_auto_boost_hyst_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -1464,7 +1422,7 @@ static ssize_t show_temp_auto_boost_hyst(struct device *dev,
LM93_AUTO_BOOST_HYST_FROM_REGS(data, nr, mode));
}
-static ssize_t store_temp_auto_boost_hyst(struct device *dev,
+static ssize_t temp_auto_boost_hyst_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1490,18 +1448,12 @@ static ssize_t store_temp_auto_boost_hyst(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_auto_boost_hyst, S_IWUSR | S_IRUGO,
- show_temp_auto_boost_hyst,
- store_temp_auto_boost_hyst, 0);
-static SENSOR_DEVICE_ATTR(temp2_auto_boost_hyst, S_IWUSR | S_IRUGO,
- show_temp_auto_boost_hyst,
- store_temp_auto_boost_hyst, 1);
-static SENSOR_DEVICE_ATTR(temp3_auto_boost_hyst, S_IWUSR | S_IRUGO,
- show_temp_auto_boost_hyst,
- store_temp_auto_boost_hyst, 2);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_boost_hyst, temp_auto_boost_hyst, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_boost_hyst, temp_auto_boost_hyst, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_boost_hyst, temp_auto_boost_hyst, 2);
-static ssize_t show_temp_auto_offset(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
int nr = s_attr->index;
@@ -1513,9 +1465,9 @@ static ssize_t show_temp_auto_offset(struct device *dev,
nr, mode));
}
-static ssize_t store_temp_auto_offset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_auto_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
int nr = s_attr->index;
@@ -1542,81 +1494,46 @@ static ssize_t store_temp_auto_offset(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset1, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 0, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset2, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 1, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset3, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 2, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset4, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 3, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset5, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 4, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset6, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 5, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset7, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 6, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset8, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 7, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset9, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 8, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset10, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 9, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset11, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 10, 0);
-static SENSOR_DEVICE_ATTR_2(temp1_auto_offset12, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 11, 0);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset1, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 0, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset2, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 1, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset3, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 2, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset4, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 3, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset5, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 4, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset6, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 5, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset7, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 6, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset8, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 7, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset9, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 8, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset10, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 9, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset11, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 10, 1);
-static SENSOR_DEVICE_ATTR_2(temp2_auto_offset12, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 11, 1);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset1, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 0, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset2, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 1, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset3, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 2, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset4, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 3, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset5, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 4, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset6, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 5, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset7, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 6, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset8, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 7, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset9, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 8, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset10, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 9, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset11, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 10, 2);
-static SENSOR_DEVICE_ATTR_2(temp3_auto_offset12, S_IWUSR | S_IRUGO,
- show_temp_auto_offset, store_temp_auto_offset, 11, 2);
-
-static ssize_t show_temp_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset1, temp_auto_offset, 0, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset2, temp_auto_offset, 1, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset3, temp_auto_offset, 2, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset4, temp_auto_offset, 3, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset5, temp_auto_offset, 4, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset6, temp_auto_offset, 5, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset7, temp_auto_offset, 6, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset8, temp_auto_offset, 7, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset9, temp_auto_offset, 8, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset10, temp_auto_offset, 9, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset11, temp_auto_offset, 10, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_auto_offset12, temp_auto_offset, 11, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset1, temp_auto_offset, 0, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset2, temp_auto_offset, 1, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset3, temp_auto_offset, 2, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset4, temp_auto_offset, 3, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset5, temp_auto_offset, 4, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset6, temp_auto_offset, 5, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset7, temp_auto_offset, 6, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset8, temp_auto_offset, 7, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset9, temp_auto_offset, 8, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset10, temp_auto_offset, 9, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset11, temp_auto_offset, 10, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_auto_offset12, temp_auto_offset, 11, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset1, temp_auto_offset, 0, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset2, temp_auto_offset, 1, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset3, temp_auto_offset, 2, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset4, temp_auto_offset, 3, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset5, temp_auto_offset, 4, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset6, temp_auto_offset, 5, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset7, temp_auto_offset, 6, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset8, temp_auto_offset, 7, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset9, temp_auto_offset, 8, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset10, temp_auto_offset, 9, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset11, temp_auto_offset, 10, 2);
+static SENSOR_DEVICE_ATTR_2_RW(temp3_auto_offset12, temp_auto_offset, 11, 2);
+
+static ssize_t temp_auto_pwm_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
u8 reg, ctl4;
@@ -1627,9 +1544,9 @@ static ssize_t show_temp_auto_pwm_min(struct device *dev,
LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ));
}
-static ssize_t store_temp_auto_pwm_min(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_auto_pwm_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1655,18 +1572,13 @@ static ssize_t store_temp_auto_pwm_min(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_auto_pwm_min, S_IWUSR | S_IRUGO,
- show_temp_auto_pwm_min,
- store_temp_auto_pwm_min, 0);
-static SENSOR_DEVICE_ATTR(temp2_auto_pwm_min, S_IWUSR | S_IRUGO,
- show_temp_auto_pwm_min,
- store_temp_auto_pwm_min, 1);
-static SENSOR_DEVICE_ATTR(temp3_auto_pwm_min, S_IWUSR | S_IRUGO,
- show_temp_auto_pwm_min,
- store_temp_auto_pwm_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_pwm_min, temp_auto_pwm_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_pwm_min, temp_auto_pwm_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_pwm_min, temp_auto_pwm_min, 2);
-static ssize_t show_temp_auto_offset_hyst(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_offset_hyst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -1675,9 +1587,9 @@ static ssize_t show_temp_auto_offset_hyst(struct device *dev,
data->auto_pwm_min_hyst[nr / 2], mode));
}
-static ssize_t store_temp_auto_offset_hyst(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_auto_offset_hyst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1703,18 +1615,12 @@ static ssize_t store_temp_auto_offset_hyst(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_auto_offset_hyst, S_IWUSR | S_IRUGO,
- show_temp_auto_offset_hyst,
- store_temp_auto_offset_hyst, 0);
-static SENSOR_DEVICE_ATTR(temp2_auto_offset_hyst, S_IWUSR | S_IRUGO,
- show_temp_auto_offset_hyst,
- store_temp_auto_offset_hyst, 1);
-static SENSOR_DEVICE_ATTR(temp3_auto_offset_hyst, S_IWUSR | S_IRUGO,
- show_temp_auto_offset_hyst,
- store_temp_auto_offset_hyst, 2);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_offset_hyst, temp_auto_offset_hyst, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_offset_hyst, temp_auto_offset_hyst, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_offset_hyst, temp_auto_offset_hyst, 2);
-static ssize_t show_fan_input(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *s_attr = to_sensor_dev_attr(attr);
int nr = s_attr->index;
@@ -1723,13 +1629,13 @@ static ssize_t show_fan_input(struct device *dev,
return sprintf(buf, "%d\n", LM93_FAN_FROM_REG(data->block5[nr]));
}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan_input, 3);
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -1737,8 +1643,9 @@ static ssize_t show_fan_min(struct device *dev,
return sprintf(buf, "%d\n", LM93_FAN_FROM_REG(data->block8[nr]));
}
-static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1757,14 +1664,10 @@ static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO,
- show_fan_min, store_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO,
- show_fan_min, store_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO,
- show_fan_min, store_fan_min, 2);
-static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO,
- show_fan_min, store_fan_min, 3);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
/*
* some tedious bit-twiddling here to deal with the register format:
@@ -1780,8 +1683,8 @@ static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO,
* T4 T3 T2 T1
*/
-static ssize_t show_fan_smart_tach(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_smart_tach_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -1819,9 +1722,9 @@ static void lm93_write_fan_smart_tach(struct i2c_client *client,
lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
}
-static ssize_t store_fan_smart_tach(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_smart_tach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1849,16 +1752,12 @@ static ssize_t store_fan_smart_tach(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(fan1_smart_tach, S_IWUSR | S_IRUGO,
- show_fan_smart_tach, store_fan_smart_tach, 0);
-static SENSOR_DEVICE_ATTR(fan2_smart_tach, S_IWUSR | S_IRUGO,
- show_fan_smart_tach, store_fan_smart_tach, 1);
-static SENSOR_DEVICE_ATTR(fan3_smart_tach, S_IWUSR | S_IRUGO,
- show_fan_smart_tach, store_fan_smart_tach, 2);
-static SENSOR_DEVICE_ATTR(fan4_smart_tach, S_IWUSR | S_IRUGO,
- show_fan_smart_tach, store_fan_smart_tach, 3);
+static SENSOR_DEVICE_ATTR_RW(fan1_smart_tach, fan_smart_tach, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_smart_tach, fan_smart_tach, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_smart_tach, fan_smart_tach, 2);
+static SENSOR_DEVICE_ATTR_RW(fan4_smart_tach, fan_smart_tach, 3);
-static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -1876,8 +1775,8 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%ld\n", rc);
}
-static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -1904,11 +1803,11 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -1923,7 +1822,7 @@ static ssize_t show_pwm_enable(struct device *dev,
return sprintf(buf, "%ld\n", rc);
}
-static ssize_t store_pwm_enable(struct device *dev,
+static ssize_t pwm_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1961,13 +1860,11 @@ static ssize_t store_pwm_enable(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
- show_pwm_enable, store_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
- show_pwm_enable, store_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
-static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -2001,9 +1898,9 @@ static void lm93_disable_fan_smart_tach(struct i2c_client *client,
lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
}
-static ssize_t store_pwm_freq(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_freq_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -2028,22 +1925,21 @@ static ssize_t store_pwm_freq(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO,
- show_pwm_freq, store_pwm_freq, 0);
-static SENSOR_DEVICE_ATTR(pwm2_freq, S_IWUSR | S_IRUGO,
- show_pwm_freq, store_pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
-static ssize_t show_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_channels_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", data->block9[nr][LM93_PWM_CTL1]);
}
-static ssize_t store_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_auto_channels_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -2063,13 +1959,12 @@ static ssize_t store_pwm_auto_channels(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_auto_channels, S_IWUSR | S_IRUGO,
- show_pwm_auto_channels, store_pwm_auto_channels, 0);
-static SENSOR_DEVICE_ATTR(pwm2_auto_channels, S_IWUSR | S_IRUGO,
- show_pwm_auto_channels, store_pwm_auto_channels, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_channels, pwm_auto_channels, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_channels, pwm_auto_channels, 1);
-static ssize_t show_pwm_auto_spinup_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_spinup_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -2082,9 +1977,9 @@ static ssize_t show_pwm_auto_spinup_min(struct device *dev,
LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ));
}
-static ssize_t store_pwm_auto_spinup_min(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_auto_spinup_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -2109,15 +2004,12 @@ static ssize_t store_pwm_auto_spinup_min(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_min, S_IWUSR | S_IRUGO,
- show_pwm_auto_spinup_min,
- store_pwm_auto_spinup_min, 0);
-static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_min, S_IWUSR | S_IRUGO,
- show_pwm_auto_spinup_min,
- store_pwm_auto_spinup_min, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_spinup_min, pwm_auto_spinup_min, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_spinup_min, pwm_auto_spinup_min, 1);
-static ssize_t show_pwm_auto_spinup_time(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_spinup_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -2125,9 +2017,9 @@ static ssize_t show_pwm_auto_spinup_time(struct device *dev,
data->block9[nr][LM93_PWM_CTL3]));
}
-static ssize_t store_pwm_auto_spinup_time(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_auto_spinup_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -2149,12 +2041,8 @@ static ssize_t store_pwm_auto_spinup_time(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_time, S_IWUSR | S_IRUGO,
- show_pwm_auto_spinup_time,
- store_pwm_auto_spinup_time, 0);
-static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_time, S_IWUSR | S_IRUGO,
- show_pwm_auto_spinup_time,
- store_pwm_auto_spinup_time, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_spinup_time, pwm_auto_spinup_time, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_spinup_time, pwm_auto_spinup_time, 1);
static ssize_t pwm_auto_prochot_ramp_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -2220,7 +2108,7 @@ static ssize_t pwm_auto_vrdhot_ramp_store(struct device *dev,
static DEVICE_ATTR_RW(pwm_auto_vrdhot_ramp);
-static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
+static ssize_t vid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -2228,21 +2116,21 @@ static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", LM93_VID_FROM_REG(data->vid[nr]));
}
-static SENSOR_DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL, 0);
-static SENSOR_DEVICE_ATTR(cpu1_vid, S_IRUGO, show_vid, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(cpu0_vid, vid, 0);
+static SENSOR_DEVICE_ATTR_RO(cpu1_vid, vid, 1);
-static ssize_t show_prochot(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t prochot_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
return sprintf(buf, "%d\n", data->block4[nr].cur);
}
-static SENSOR_DEVICE_ATTR(prochot1, S_IRUGO, show_prochot, NULL, 0);
-static SENSOR_DEVICE_ATTR(prochot2, S_IRUGO, show_prochot, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(prochot1, prochot, 0);
+static SENSOR_DEVICE_ATTR_RO(prochot2, prochot, 1);
-static ssize_t show_prochot_avg(struct device *dev,
+static ssize_t prochot_avg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -2250,10 +2138,10 @@ static ssize_t show_prochot_avg(struct device *dev,
return sprintf(buf, "%d\n", data->block4[nr].avg);
}
-static SENSOR_DEVICE_ATTR(prochot1_avg, S_IRUGO, show_prochot_avg, NULL, 0);
-static SENSOR_DEVICE_ATTR(prochot2_avg, S_IRUGO, show_prochot_avg, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(prochot1_avg, prochot_avg, 0);
+static SENSOR_DEVICE_ATTR_RO(prochot2_avg, prochot_avg, 1);
-static ssize_t show_prochot_max(struct device *dev,
+static ssize_t prochot_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
@@ -2261,9 +2149,9 @@ static ssize_t show_prochot_max(struct device *dev,
return sprintf(buf, "%d\n", data->prochot_max[nr]);
}
-static ssize_t store_prochot_max(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t prochot_max_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -2283,15 +2171,13 @@ static ssize_t store_prochot_max(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(prochot1_max, S_IWUSR | S_IRUGO,
- show_prochot_max, store_prochot_max, 0);
-static SENSOR_DEVICE_ATTR(prochot2_max, S_IWUSR | S_IRUGO,
- show_prochot_max, store_prochot_max, 1);
+static SENSOR_DEVICE_ATTR_RW(prochot1_max, prochot_max, 0);
+static SENSOR_DEVICE_ATTR_RW(prochot2_max, prochot_max, 1);
static const u8 prochot_override_mask[] = { 0x80, 0x40 };
-static ssize_t show_prochot_override(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t prochot_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -2299,9 +2185,9 @@ static ssize_t show_prochot_override(struct device *dev,
(data->prochot_override & prochot_override_mask[nr]) ? 1 : 0);
}
-static ssize_t store_prochot_override(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t prochot_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -2324,13 +2210,11 @@ static ssize_t store_prochot_override(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(prochot1_override, S_IWUSR | S_IRUGO,
- show_prochot_override, store_prochot_override, 0);
-static SENSOR_DEVICE_ATTR(prochot2_override, S_IWUSR | S_IRUGO,
- show_prochot_override, store_prochot_override, 1);
+static SENSOR_DEVICE_ATTR_RW(prochot1_override, prochot_override, 0);
+static SENSOR_DEVICE_ATTR_RW(prochot2_override, prochot_override, 1);
-static ssize_t show_prochot_interval(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t prochot_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -2342,9 +2226,9 @@ static ssize_t show_prochot_interval(struct device *dev,
return sprintf(buf, "%d\n", LM93_INTERVAL_FROM_REG(tmp));
}
-static ssize_t store_prochot_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t prochot_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = dev_get_drvdata(dev);
@@ -2369,10 +2253,8 @@ static ssize_t store_prochot_interval(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(prochot1_interval, S_IWUSR | S_IRUGO,
- show_prochot_interval, store_prochot_interval, 0);
-static SENSOR_DEVICE_ATTR(prochot2_interval, S_IWUSR | S_IRUGO,
- show_prochot_interval, store_prochot_interval, 1);
+static SENSOR_DEVICE_ATTR_RW(prochot1_interval, prochot_interval, 0);
+static SENSOR_DEVICE_ATTR_RW(prochot2_interval, prochot_interval, 1);
static ssize_t prochot_override_duty_cycle_show(struct device *dev,
struct device_attribute *attr,
@@ -2438,8 +2320,8 @@ static ssize_t prochot_short_store(struct device *dev,
static DEVICE_ATTR_RW(prochot_short);
-static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vrdhot_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = (to_sensor_dev_attr(attr))->index;
struct lm93_data *data = lm93_update_device(dev);
@@ -2447,8 +2329,8 @@ static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
data->block1.host_status_1 & (1 << (nr + 4)) ? 1 : 0);
}
-static SENSOR_DEVICE_ATTR(vrdhot1, S_IRUGO, show_vrdhot, NULL, 0);
-static SENSOR_DEVICE_ATTR(vrdhot2, S_IRUGO, show_vrdhot, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(vrdhot1, vrdhot, 0);
+static SENSOR_DEVICE_ATTR_RO(vrdhot2, vrdhot, 1);
static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 8c573e6e9726..3ff188937158 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -349,19 +349,19 @@ static umode_t lm95241_is_visible(const void *data,
case hwmon_chip:
switch (attr) {
case hwmon_chip_update_interval:
- return S_IRUGO | S_IWUSR;
+ return 0644;
}
break;
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- return S_IRUGO;
+ return 0444;
case hwmon_temp_fault:
- return S_IRUGO;
+ return 0444;
case hwmon_temp_min:
case hwmon_temp_max:
case hwmon_temp_type:
- return S_IRUGO | S_IWUSR;
+ return 0644;
}
break;
default:
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 996b50246175..e4cac3a04536 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -421,14 +421,14 @@ static umode_t lm95245_temp_is_visible(const void *data, u32 attr, int channel)
case hwmon_temp_max_hyst:
case hwmon_temp_crit_alarm:
case hwmon_temp_fault:
- return S_IRUGO;
+ return 0444;
case hwmon_temp_type:
case hwmon_temp_max:
case hwmon_temp_crit:
case hwmon_temp_offset:
- return S_IRUGO | S_IWUSR;
+ return 0644;
case hwmon_temp_crit_hyst:
- return (channel == 0) ? S_IRUGO | S_IWUSR : S_IRUGO;
+ return (channel == 0) ? 0644 : 0444;
default:
return 0;
}
@@ -442,7 +442,7 @@ static umode_t lm95245_is_visible(const void *data,
case hwmon_chip:
switch (attr) {
case hwmon_chip_update_interval:
- return S_IRUGO | S_IWUSR;
+ return 0644;
default:
return 0;
}
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
index 2aefdc58b242..be4e89645c0b 100644
--- a/drivers/hwmon/ltc2990.c
+++ b/drivers/hwmon/ltc2990.c
@@ -136,7 +136,7 @@ static int ltc2990_get_value(struct i2c_client *i2c, int index, int *result)
return 0;
}
-static ssize_t ltc2990_show_value(struct device *dev,
+static ssize_t ltc2990_value_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -170,26 +170,16 @@ static umode_t ltc2990_attrs_visible(struct kobject *kobj,
return 0;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_TEMP1);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_TEMP2);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_TEMP3);
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_CURR1);
-static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_CURR2);
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_IN0);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_IN1);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_IN2);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_IN3);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, ltc2990_show_value, NULL,
- LTC2990_IN4);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, ltc2990_value, LTC2990_TEMP1);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, ltc2990_value, LTC2990_TEMP2);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, ltc2990_value, LTC2990_TEMP3);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, ltc2990_value, LTC2990_CURR1);
+static SENSOR_DEVICE_ATTR_RO(curr2_input, ltc2990_value, LTC2990_CURR2);
+static SENSOR_DEVICE_ATTR_RO(in0_input, ltc2990_value, LTC2990_IN0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, ltc2990_value, LTC2990_IN1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, ltc2990_value, LTC2990_IN2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, ltc2990_value, LTC2990_IN3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, ltc2990_value, LTC2990_IN4);
static struct attribute *ltc2990_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index b904cb547ffb..76c6fda76d95 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -131,7 +131,7 @@ static int ltc4151_get_value(struct ltc4151_data *data, u8 reg)
return val;
}
-static ssize_t ltc4151_show_value(struct device *dev,
+static ssize_t ltc4151_value_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -148,14 +148,11 @@ static ssize_t ltc4151_show_value(struct device *dev,
/*
* Input voltages.
*/
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4151_show_value, NULL,
- LTC4151_VIN_H);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4151_show_value, NULL,
- LTC4151_ADIN_H);
+static SENSOR_DEVICE_ATTR_RO(in1_input, ltc4151_value, LTC4151_VIN_H);
+static SENSOR_DEVICE_ATTR_RO(in2_input, ltc4151_value, LTC4151_ADIN_H);
/* Currents (via sense resistor) */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4151_show_value, NULL,
- LTC4151_SENSE_H);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, ltc4151_value, LTC4151_SENSE_H);
/*
* Finally, construct an array of pointers to members of the above objects,
diff --git a/drivers/hwmon/ltc4222.c b/drivers/hwmon/ltc4222.c
index 88f747292816..32248f351a6e 100644
--- a/drivers/hwmon/ltc4222.c
+++ b/drivers/hwmon/ltc4222.c
@@ -94,7 +94,7 @@ static int ltc4222_get_value(struct device *dev, u8 reg)
return val;
}
-static ssize_t ltc4222_show_value(struct device *dev,
+static ssize_t ltc4222_value_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -106,7 +106,7 @@ static ssize_t ltc4222_show_value(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
-static ssize_t ltc4222_show_bool(struct device *dev,
+static ssize_t ltc4222_bool_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
@@ -125,45 +125,39 @@ static ssize_t ltc4222_show_bool(struct device *dev,
}
/* Voltages */
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4222_show_value, NULL,
- LTC4222_SOURCE1);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4222_show_value, NULL,
- LTC4222_ADIN1);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, ltc4222_show_value, NULL,
- LTC4222_SOURCE2);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, ltc4222_show_value, NULL,
- LTC4222_ADIN2);
+static SENSOR_DEVICE_ATTR_RO(in1_input, ltc4222_value, LTC4222_SOURCE1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, ltc4222_value, LTC4222_ADIN1);
+static SENSOR_DEVICE_ATTR_RO(in3_input, ltc4222_value, LTC4222_SOURCE2);
+static SENSOR_DEVICE_ATTR_RO(in4_input, ltc4222_value, LTC4222_ADIN2);
/*
* Voltage alarms
* UV/OV faults are associated with the input voltage, and power bad and fet
* faults are associated with the output voltage.
*/
-static SENSOR_DEVICE_ATTR_2(in1_min_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT1, FAULT_UV);
-static SENSOR_DEVICE_ATTR_2(in1_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT1, FAULT_OV);
-static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT1, FAULT_POWER_BAD | FAULT_FET_BAD);
-
-static SENSOR_DEVICE_ATTR_2(in3_min_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT2, FAULT_UV);
-static SENSOR_DEVICE_ATTR_2(in3_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT2, FAULT_OV);
-static SENSOR_DEVICE_ATTR_2(in4_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT2, FAULT_POWER_BAD | FAULT_FET_BAD);
+static SENSOR_DEVICE_ATTR_2_RO(in1_min_alarm, ltc4222_bool, LTC4222_FAULT1,
+ FAULT_UV);
+static SENSOR_DEVICE_ATTR_2_RO(in1_max_alarm, ltc4222_bool, LTC4222_FAULT1,
+ FAULT_OV);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, ltc4222_bool, LTC4222_FAULT1,
+ FAULT_POWER_BAD | FAULT_FET_BAD);
+
+static SENSOR_DEVICE_ATTR_2_RO(in3_min_alarm, ltc4222_bool, LTC4222_FAULT2,
+ FAULT_UV);
+static SENSOR_DEVICE_ATTR_2_RO(in3_max_alarm, ltc4222_bool, LTC4222_FAULT2,
+ FAULT_OV);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, ltc4222_bool, LTC4222_FAULT2,
+ FAULT_POWER_BAD | FAULT_FET_BAD);
/* Current (via sense resistor) */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4222_show_value, NULL,
- LTC4222_SENSE1);
-static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc4222_show_value, NULL,
- LTC4222_SENSE2);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, ltc4222_value, LTC4222_SENSE1);
+static SENSOR_DEVICE_ATTR_RO(curr2_input, ltc4222_value, LTC4222_SENSE2);
/* Overcurrent alarm */
-static SENSOR_DEVICE_ATTR_2(curr1_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT1, FAULT_OC);
-static SENSOR_DEVICE_ATTR_2(curr2_max_alarm, S_IRUGO, ltc4222_show_bool, NULL,
- LTC4222_FAULT2, FAULT_OC);
+static SENSOR_DEVICE_ATTR_2_RO(curr1_max_alarm, ltc4222_bool, LTC4222_FAULT1,
+ FAULT_OC);
+static SENSOR_DEVICE_ATTR_2_RO(curr2_max_alarm, ltc4222_bool, LTC4222_FAULT2,
+ FAULT_OC);
static struct attribute *ltc4222_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 082f0a0bd8a0..34d0653ca607 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -362,11 +362,11 @@ static umode_t ltc4245_is_visible(const void *_data,
case hwmon_in_input:
if (channel > 9 && !data->use_extra_gpios)
return 0;
- return S_IRUGO;
+ return 0444;
case hwmon_in_min_alarm:
if (channel > 8)
return 0;
- return S_IRUGO;
+ return 0444;
default:
return 0;
}
@@ -374,14 +374,14 @@ static umode_t ltc4245_is_visible(const void *_data,
switch (attr) {
case hwmon_curr_input:
case hwmon_curr_max_alarm:
- return S_IRUGO;
+ return 0444;
default:
return 0;
}
case hwmon_power:
switch (attr) {
case hwmon_power_input:
- return S_IRUGO;
+ return 0444;
default:
return 0;
}
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 0becd69842bb..6eb3415e0639 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -132,7 +132,7 @@ static int ltc4261_get_value(struct ltc4261_data *data, u8 reg)
return val;
}
-static ssize_t ltc4261_show_value(struct device *dev,
+static ssize_t ltc4261_value_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -146,7 +146,7 @@ static ssize_t ltc4261_show_value(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
-static ssize_t ltc4261_show_bool(struct device *dev,
+static ssize_t ltc4261_bool_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -166,10 +166,8 @@ static ssize_t ltc4261_show_bool(struct device *dev,
/*
* Input voltages.
*/
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ltc4261_show_value, NULL,
- LTC4261_ADIN_H);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4261_show_value, NULL,
- LTC4261_ADIN2_H);
+static SENSOR_DEVICE_ATTR_RO(in1_input, ltc4261_value, LTC4261_ADIN_H);
+static SENSOR_DEVICE_ATTR_RO(in2_input, ltc4261_value, LTC4261_ADIN2_H);
/*
* Voltage alarms. The chip has only one set of voltage alarm status bits,
@@ -179,22 +177,16 @@ static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, ltc4261_show_value, NULL,
* To ensure that the alarm condition is reported to the user, report it
* with both voltage sensors.
*/
-static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ltc4261_show_bool, NULL,
- FAULT_UV);
-static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ltc4261_show_bool, NULL,
- FAULT_OV);
-static SENSOR_DEVICE_ATTR(in2_min_alarm, S_IRUGO, ltc4261_show_bool, NULL,
- FAULT_UV);
-static SENSOR_DEVICE_ATTR(in2_max_alarm, S_IRUGO, ltc4261_show_bool, NULL,
- FAULT_OV);
+static SENSOR_DEVICE_ATTR_RO(in1_min_alarm, ltc4261_bool, FAULT_UV);
+static SENSOR_DEVICE_ATTR_RO(in1_max_alarm, ltc4261_bool, FAULT_OV);
+static SENSOR_DEVICE_ATTR_RO(in2_min_alarm, ltc4261_bool, FAULT_UV);
+static SENSOR_DEVICE_ATTR_RO(in2_max_alarm, ltc4261_bool, FAULT_OV);
/* Currents (via sense resistor) */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4261_show_value, NULL,
- LTC4261_SENSE_H);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, ltc4261_value, LTC4261_SENSE_H);
/* Overcurrent alarm */
-static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc4261_show_bool, NULL,
- FAULT_OC);
+static SENSOR_DEVICE_ATTR_RO(curr1_max_alarm, ltc4261_bool, FAULT_OC);
static struct attribute *ltc4261_attrs[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 162401aaef71..1c372f76cd0b 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -175,7 +175,7 @@ static struct max16065_data *max16065_update_device(struct device *dev)
return data;
}
-static ssize_t max16065_show_alarm(struct device *dev,
+static ssize_t max16065_alarm_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
@@ -193,7 +193,7 @@ static ssize_t max16065_show_alarm(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", !!val);
}
-static ssize_t max16065_show_input(struct device *dev,
+static ssize_t max16065_input_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -207,7 +207,7 @@ static ssize_t max16065_show_input(struct device *dev,
ADC_TO_MV(adc, data->range[attr->index]));
}
-static ssize_t max16065_show_current(struct device *dev,
+static ssize_t max16065_current_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct max16065_data *data = max16065_update_device(dev);
@@ -219,9 +219,9 @@ static ssize_t max16065_show_current(struct device *dev,
ADC_TO_CURR(data->curr_sense, data->curr_gain));
}
-static ssize_t max16065_set_limit(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t max16065_limit_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
struct max16065_data *data = dev_get_drvdata(dev);
@@ -246,7 +246,7 @@ static ssize_t max16065_set_limit(struct device *dev,
return count;
}
-static ssize_t max16065_show_limit(struct device *dev,
+static ssize_t max16065_limit_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
@@ -259,154 +259,93 @@ static ssize_t max16065_show_limit(struct device *dev,
/* Construct a sensor_device_attribute structure for each register */
/* Input voltages */
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, max16065_show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, max16065_show_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, max16065_show_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, max16065_show_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, max16065_show_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, max16065_show_input, NULL, 5);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, max16065_show_input, NULL, 6);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, max16065_show_input, NULL, 7);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, max16065_show_input, NULL, 8);
-static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, max16065_show_input, NULL, 9);
-static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, max16065_show_input, NULL, 10);
-static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, max16065_show_input, NULL, 11);
-static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, max16065_show_input, NULL, 12);
+static SENSOR_DEVICE_ATTR_RO(in0_input, max16065_input, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, max16065_input, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, max16065_input, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, max16065_input, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, max16065_input, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, max16065_input, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, max16065_input, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, max16065_input, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, max16065_input, 8);
+static SENSOR_DEVICE_ATTR_RO(in9_input, max16065_input, 9);
+static SENSOR_DEVICE_ATTR_RO(in10_input, max16065_input, 10);
+static SENSOR_DEVICE_ATTR_RO(in11_input, max16065_input, 11);
+static SENSOR_DEVICE_ATTR_RO(in12_input, max16065_input, 12);
/* Input voltages lcrit */
-static SENSOR_DEVICE_ATTR_2(in0_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 0);
-static SENSOR_DEVICE_ATTR_2(in1_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 1);
-static SENSOR_DEVICE_ATTR_2(in2_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 2);
-static SENSOR_DEVICE_ATTR_2(in3_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 3);
-static SENSOR_DEVICE_ATTR_2(in4_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 4);
-static SENSOR_DEVICE_ATTR_2(in5_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 5);
-static SENSOR_DEVICE_ATTR_2(in6_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 6);
-static SENSOR_DEVICE_ATTR_2(in7_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 7);
-static SENSOR_DEVICE_ATTR_2(in8_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 8);
-static SENSOR_DEVICE_ATTR_2(in9_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 9);
-static SENSOR_DEVICE_ATTR_2(in10_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 10);
-static SENSOR_DEVICE_ATTR_2(in11_lcrit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 2, 11);
+static SENSOR_DEVICE_ATTR_2_RW(in0_lcrit, max16065_limit, 2, 0);
+static SENSOR_DEVICE_ATTR_2_RW(in1_lcrit, max16065_limit, 2, 1);
+static SENSOR_DEVICE_ATTR_2_RW(in2_lcrit, max16065_limit, 2, 2);
+static SENSOR_DEVICE_ATTR_2_RW(in3_lcrit, max16065_limit, 2, 3);
+static SENSOR_DEVICE_ATTR_2_RW(in4_lcrit, max16065_limit, 2, 4);
+static SENSOR_DEVICE_ATTR_2_RW(in5_lcrit, max16065_limit, 2, 5);
+static SENSOR_DEVICE_ATTR_2_RW(in6_lcrit, max16065_limit, 2, 6);
+static SENSOR_DEVICE_ATTR_2_RW(in7_lcrit, max16065_limit, 2, 7);
+static SENSOR_DEVICE_ATTR_2_RW(in8_lcrit, max16065_limit, 2, 8);
+static SENSOR_DEVICE_ATTR_2_RW(in9_lcrit, max16065_limit, 2, 9);
+static SENSOR_DEVICE_ATTR_2_RW(in10_lcrit, max16065_limit, 2, 10);
+static SENSOR_DEVICE_ATTR_2_RW(in11_lcrit, max16065_limit, 2, 11);
/* Input voltages crit */
-static SENSOR_DEVICE_ATTR_2(in0_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 0);
-static SENSOR_DEVICE_ATTR_2(in1_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 1);
-static SENSOR_DEVICE_ATTR_2(in2_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 2);
-static SENSOR_DEVICE_ATTR_2(in3_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 3);
-static SENSOR_DEVICE_ATTR_2(in4_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 4);
-static SENSOR_DEVICE_ATTR_2(in5_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 5);
-static SENSOR_DEVICE_ATTR_2(in6_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 6);
-static SENSOR_DEVICE_ATTR_2(in7_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 7);
-static SENSOR_DEVICE_ATTR_2(in8_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 8);
-static SENSOR_DEVICE_ATTR_2(in9_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 9);
-static SENSOR_DEVICE_ATTR_2(in10_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 10);
-static SENSOR_DEVICE_ATTR_2(in11_crit, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 1, 11);
+static SENSOR_DEVICE_ATTR_2_RW(in0_crit, max16065_limit, 1, 0);
+static SENSOR_DEVICE_ATTR_2_RW(in1_crit, max16065_limit, 1, 1);
+static SENSOR_DEVICE_ATTR_2_RW(in2_crit, max16065_limit, 1, 2);
+static SENSOR_DEVICE_ATTR_2_RW(in3_crit, max16065_limit, 1, 3);
+static SENSOR_DEVICE_ATTR_2_RW(in4_crit, max16065_limit, 1, 4);
+static SENSOR_DEVICE_ATTR_2_RW(in5_crit, max16065_limit, 1, 5);
+static SENSOR_DEVICE_ATTR_2_RW(in6_crit, max16065_limit, 1, 6);
+static SENSOR_DEVICE_ATTR_2_RW(in7_crit, max16065_limit, 1, 7);
+static SENSOR_DEVICE_ATTR_2_RW(in8_crit, max16065_limit, 1, 8);
+static SENSOR_DEVICE_ATTR_2_RW(in9_crit, max16065_limit, 1, 9);
+static SENSOR_DEVICE_ATTR_2_RW(in10_crit, max16065_limit, 1, 10);
+static SENSOR_DEVICE_ATTR_2_RW(in11_crit, max16065_limit, 1, 11);
/* Input voltages min */
-static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 0);
-static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 1);
-static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 2);
-static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 3);
-static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 4);
-static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 5);
-static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 6);
-static SENSOR_DEVICE_ATTR_2(in7_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 7);
-static SENSOR_DEVICE_ATTR_2(in8_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 8);
-static SENSOR_DEVICE_ATTR_2(in9_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 9);
-static SENSOR_DEVICE_ATTR_2(in10_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 10);
-static SENSOR_DEVICE_ATTR_2(in11_min, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 11);
+static SENSOR_DEVICE_ATTR_2_RW(in0_min, max16065_limit, 0, 0);
+static SENSOR_DEVICE_ATTR_2_RW(in1_min, max16065_limit, 0, 1);
+static SENSOR_DEVICE_ATTR_2_RW(in2_min, max16065_limit, 0, 2);
+static SENSOR_DEVICE_ATTR_2_RW(in3_min, max16065_limit, 0, 3);
+static SENSOR_DEVICE_ATTR_2_RW(in4_min, max16065_limit, 0, 4);
+static SENSOR_DEVICE_ATTR_2_RW(in5_min, max16065_limit, 0, 5);
+static SENSOR_DEVICE_ATTR_2_RW(in6_min, max16065_limit, 0, 6);
+static SENSOR_DEVICE_ATTR_2_RW(in7_min, max16065_limit, 0, 7);
+static SENSOR_DEVICE_ATTR_2_RW(in8_min, max16065_limit, 0, 8);
+static SENSOR_DEVICE_ATTR_2_RW(in9_min, max16065_limit, 0, 9);
+static SENSOR_DEVICE_ATTR_2_RW(in10_min, max16065_limit, 0, 10);
+static SENSOR_DEVICE_ATTR_2_RW(in11_min, max16065_limit, 0, 11);
/* Input voltages max */
-static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 0);
-static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 1);
-static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 2);
-static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 3);
-static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 4);
-static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 5);
-static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 6);
-static SENSOR_DEVICE_ATTR_2(in7_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 7);
-static SENSOR_DEVICE_ATTR_2(in8_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 8);
-static SENSOR_DEVICE_ATTR_2(in9_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 9);
-static SENSOR_DEVICE_ATTR_2(in10_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 10);
-static SENSOR_DEVICE_ATTR_2(in11_max, S_IWUSR | S_IRUGO, max16065_show_limit,
- max16065_set_limit, 0, 11);
+static SENSOR_DEVICE_ATTR_2_RW(in0_max, max16065_limit, 0, 0);
+static SENSOR_DEVICE_ATTR_2_RW(in1_max, max16065_limit, 0, 1);
+static SENSOR_DEVICE_ATTR_2_RW(in2_max, max16065_limit, 0, 2);
+static SENSOR_DEVICE_ATTR_2_RW(in3_max, max16065_limit, 0, 3);
+static SENSOR_DEVICE_ATTR_2_RW(in4_max, max16065_limit, 0, 4);
+static SENSOR_DEVICE_ATTR_2_RW(in5_max, max16065_limit, 0, 5);
+static SENSOR_DEVICE_ATTR_2_RW(in6_max, max16065_limit, 0, 6);
+static SENSOR_DEVICE_ATTR_2_RW(in7_max, max16065_limit, 0, 7);
+static SENSOR_DEVICE_ATTR_2_RW(in8_max, max16065_limit, 0, 8);
+static SENSOR_DEVICE_ATTR_2_RW(in9_max, max16065_limit, 0, 9);
+static SENSOR_DEVICE_ATTR_2_RW(in10_max, max16065_limit, 0, 10);
+static SENSOR_DEVICE_ATTR_2_RW(in11_max, max16065_limit, 0, 11);
/* alarms */
-static SENSOR_DEVICE_ATTR_2(in0_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 0);
-static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 1);
-static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 2);
-static SENSOR_DEVICE_ATTR_2(in3_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 3);
-static SENSOR_DEVICE_ATTR_2(in4_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 4);
-static SENSOR_DEVICE_ATTR_2(in5_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 5);
-static SENSOR_DEVICE_ATTR_2(in6_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 6);
-static SENSOR_DEVICE_ATTR_2(in7_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 0, 7);
-static SENSOR_DEVICE_ATTR_2(in8_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 1, 0);
-static SENSOR_DEVICE_ATTR_2(in9_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 1, 1);
-static SENSOR_DEVICE_ATTR_2(in10_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 1, 2);
-static SENSOR_DEVICE_ATTR_2(in11_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 1, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, max16065_alarm, 0, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in1_alarm, max16065_alarm, 0, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, max16065_alarm, 0, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, max16065_alarm, 0, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, max16065_alarm, 0, 4);
+static SENSOR_DEVICE_ATTR_2_RO(in5_alarm, max16065_alarm, 0, 5);
+static SENSOR_DEVICE_ATTR_2_RO(in6_alarm, max16065_alarm, 0, 6);
+static SENSOR_DEVICE_ATTR_2_RO(in7_alarm, max16065_alarm, 0, 7);
+static SENSOR_DEVICE_ATTR_2_RO(in8_alarm, max16065_alarm, 1, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in9_alarm, max16065_alarm, 1, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in10_alarm, max16065_alarm, 1, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in11_alarm, max16065_alarm, 1, 3);
/* Current and alarm */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, max16065_show_current, NULL, 0);
-static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, max16065_show_alarm, NULL,
- 1, 4);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, max16065_current, 0);
+static SENSOR_DEVICE_ATTR_2_RO(curr1_alarm, max16065_alarm, 1, 4);
/*
* Finally, construct an array of pointers to members of the above objects,
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 76d966932941..94e345fb2a78 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -145,7 +145,7 @@ static struct max1619_data *max1619_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -154,8 +154,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%d\n", temp_from_reg(data->temp[attr->index]));
}
-static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max1619_data *data = dev_get_drvdata(dev);
@@ -180,7 +181,7 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", data->alarms);
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -188,22 +189,18 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, t_input1);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, t_input2);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_low2);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_high2);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp, set_temp,
- t_crit2);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp,
- set_temp, t_hyst2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, t_input1);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, t_input2);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp, t_low2);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp, t_high2);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp, t_crit2);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit_hyst, temp, t_hyst2);
static DEVICE_ATTR_RO(alarms);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4);
static struct attribute *max1619_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 30a100e70a0d..6d169b4271f7 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -50,9 +50,8 @@ static int max31722_set_mode(struct max31722_data *data, u8 mode)
return 0;
}
-static ssize_t max31722_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t max31722_temp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
ssize_t ret;
struct max31722_data *data = dev_get_drvdata(dev);
@@ -64,8 +63,7 @@ static ssize_t max31722_show_temp(struct device *dev,
return sprintf(buf, "%d\n", (s16)le16_to_cpu(ret) * 125 / 32);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
- max31722_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, max31722_temp, 0);
static struct attribute *max31722_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 281491cca510..722bcbb9865a 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -252,12 +252,12 @@ static umode_t max31790_fan_is_visible(const void *_data, u32 attr, int channel)
case hwmon_fan_fault:
if (channel < NR_CHANNEL ||
(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
- return S_IRUGO;
+ return 0444;
return 0;
case hwmon_fan_target:
if (channel < NR_CHANNEL &&
!(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
- return S_IRUGO | S_IWUSR;
+ return 0644;
return 0;
default:
return 0;
@@ -353,7 +353,7 @@ static umode_t max31790_pwm_is_visible(const void *_data, u32 attr, int channel)
case hwmon_pwm_input:
case hwmon_pwm_enable:
if (!(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
- return S_IRUGO | S_IWUSR;
+ return 0644;
return 0;
default:
return 0;
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index f98a83c79ff1..fc3ed518f478 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -162,7 +162,7 @@ abort:
return ret;
}
-static ssize_t show_temp_input(struct device *dev,
+static ssize_t temp_input_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
long temp;
@@ -176,7 +176,7 @@ static ssize_t show_temp_input(struct device *dev,
return sprintf(buf, "%ld\n", temp);
}
-static ssize_t show_temp_fault(struct device *dev,
+static ssize_t temp_fault_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct max6639_data *data = max6639_update_device(dev);
@@ -188,7 +188,7 @@ static ssize_t show_temp_fault(struct device *dev,
return sprintf(buf, "%d\n", data->temp_fault[attr->index]);
}
-static ssize_t show_temp_max(struct device *dev,
+static ssize_t temp_max_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
@@ -197,9 +197,9 @@ static ssize_t show_temp_max(struct device *dev,
return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
}
-static ssize_t set_temp_max(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
@@ -220,7 +220,7 @@ static ssize_t set_temp_max(struct device *dev,
return count;
}
-static ssize_t show_temp_crit(struct device *dev,
+static ssize_t temp_crit_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
@@ -229,9 +229,9 @@ static ssize_t show_temp_crit(struct device *dev,
return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
}
-static ssize_t set_temp_crit(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
@@ -252,7 +252,7 @@ static ssize_t set_temp_crit(struct device *dev,
return count;
}
-static ssize_t show_temp_emergency(struct device *dev,
+static ssize_t temp_emergency_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
@@ -262,9 +262,9 @@ static ssize_t show_temp_emergency(struct device *dev,
return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
}
-static ssize_t set_temp_emergency(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static ssize_t temp_emergency_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
@@ -285,8 +285,8 @@ static ssize_t set_temp_emergency(struct device *dev,
return count;
}
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
@@ -294,9 +294,9 @@ static ssize_t show_pwm(struct device *dev,
return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
}
-static ssize_t set_pwm(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct max6639_data *data = dev_get_drvdata(dev);
@@ -319,7 +319,7 @@ static ssize_t set_pwm(struct device *dev,
return count;
}
-static ssize_t show_fan_input(struct device *dev,
+static ssize_t fan_input_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct max6639_data *data = max6639_update_device(dev);
@@ -332,7 +332,7 @@ static ssize_t show_fan_input(struct device *dev,
data->rpm_range));
}
-static ssize_t show_alarm(struct device *dev,
+static ssize_t alarm_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct max6639_data *data = max6639_update_device(dev);
@@ -344,34 +344,28 @@ static ssize_t show_alarm(struct device *dev,
return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index)));
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
- set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
- set_temp_max, 1);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
- set_temp_crit, 0);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
- set_temp_crit, 1);
-static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO,
- show_temp_emergency, set_temp_emergency, 0);
-static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO,
- show_temp_emergency, set_temp_emergency, 1);
-static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, temp_fault, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp_fault, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_emergency, temp_emergency, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_emergency, temp_emergency, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RO(fan1_fault, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(fan2_fault, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp1_emergency_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp2_emergency_alarm, alarm, 4);
static struct attribute *max6639_attrs[] = {
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 6520bc51d02a..084b2685b7a5 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -206,7 +206,7 @@ static struct max6642_data *max6642_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t show_temp_max10(struct device *dev,
+static ssize_t temp_max10_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
@@ -216,8 +216,8 @@ static ssize_t show_temp_max10(struct device *dev,
temp_from_reg10(data->temp_input[attr->index]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
struct max6642_data *data = max6642_update_device(dev);
@@ -225,8 +225,9 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
struct max6642_data *data = dev_get_drvdata(dev);
@@ -245,7 +246,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -253,15 +254,15 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_max10, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_max10, NULL, 1);
-static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
- set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
-static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
- set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_max10, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_max10, 1);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp_max, 0,
+ MAX6642_REG_W_LOCAL_HIGH);
+static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp_max, 1,
+ MAX6642_REG_W_REMOTE_HIGH);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 4);
static struct attribute *max6642_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 4752a9ee9645..61135a2d0cff 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -52,9 +52,9 @@ static int prescaler;
/* clock: The clock frequency of the chip (max6651 can be clocked externally) */
static int clock = 254000;
-module_param(fan_voltage, int, S_IRUGO);
-module_param(prescaler, int, S_IRUGO);
-module_param(clock, int, S_IRUGO);
+module_param(fan_voltage, int, 0444);
+module_param(prescaler, int, 0444);
+module_param(clock, int, 0444);
/*
* MAX 6650/6651 registers
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 825b922a3f92..ff147e5e1b8c 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -63,8 +63,9 @@ static int mc13783_adc_read(struct device *dev,
return 0;
}
-static ssize_t mc13783_adc_read_bp(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t mc13783_adc_bp_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
unsigned val;
struct platform_device *pdev = to_platform_device(dev);
@@ -86,8 +87,9 @@ static ssize_t mc13783_adc_read_bp(struct device *dev,
return sprintf(buf, "%u\n", val);
}
-static ssize_t mc13783_adc_read_gp(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t mc13783_adc_gp_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
unsigned val;
int ret = mc13783_adc_read(dev, devattr, &val);
@@ -104,8 +106,9 @@ static ssize_t mc13783_adc_read_gp(struct device *dev,
return sprintf(buf, "%u\n", val);
}
-static ssize_t mc13783_adc_read_uid(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t mc13783_adc_uid_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
unsigned int val;
struct platform_device *pdev = to_platform_device(dev);
@@ -125,8 +128,9 @@ static ssize_t mc13783_adc_read_uid(struct device *dev,
return sprintf(buf, "%u\n", val);
}
-static ssize_t mc13783_adc_read_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t mc13783_adc_temp_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
unsigned int val;
struct platform_device *pdev = to_platform_device(dev);
@@ -156,21 +160,20 @@ static ssize_t mc13783_adc_read_temp(struct device *dev,
}
static DEVICE_ATTR_RO(name);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, mc13783_adc_read_bp, NULL, 2);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, mc13783_adc_read_gp, NULL, 5);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, mc13783_adc_read_gp, NULL, 6);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, mc13783_adc_read_gp, NULL, 7);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, mc13783_adc_read_gp, NULL, 8);
-static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, mc13783_adc_read_gp, NULL, 9);
-static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, mc13783_adc_read_gp, NULL, 10);
-static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, mc13783_adc_read_gp, NULL, 11);
-static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, mc13783_adc_read_gp, NULL, 12);
-static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, mc13783_adc_read_gp, NULL, 13);
-static SENSOR_DEVICE_ATTR(in14_input, S_IRUGO, mc13783_adc_read_gp, NULL, 14);
-static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, mc13783_adc_read_gp, NULL, 15);
-static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, mc13783_adc_read_uid, NULL, 16);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
- mc13783_adc_read_temp, NULL, 17);
+static SENSOR_DEVICE_ATTR_RO(in2_input, mc13783_adc_bp, 2);
+static SENSOR_DEVICE_ATTR_RO(in5_input, mc13783_adc_gp, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, mc13783_adc_gp, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, mc13783_adc_gp, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, mc13783_adc_gp, 8);
+static SENSOR_DEVICE_ATTR_RO(in9_input, mc13783_adc_gp, 9);
+static SENSOR_DEVICE_ATTR_RO(in10_input, mc13783_adc_gp, 10);
+static SENSOR_DEVICE_ATTR_RO(in11_input, mc13783_adc_gp, 11);
+static SENSOR_DEVICE_ATTR_RO(in12_input, mc13783_adc_gp, 12);
+static SENSOR_DEVICE_ATTR_RO(in13_input, mc13783_adc_gp, 13);
+static SENSOR_DEVICE_ATTR_RO(in14_input, mc13783_adc_gp, 14);
+static SENSOR_DEVICE_ATTR_RO(in15_input, mc13783_adc_gp, 15);
+static SENSOR_DEVICE_ATTR_RO(in16_input, mc13783_adc_uid, 16);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, mc13783_adc_temp, 17);
static struct attribute *mc13783_attr_base[] = {
&dev_attr_name.attr,
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c3040079b1cb..59ee01f3d022 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -44,8 +44,8 @@
* nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
* nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
* (0xd451)
- * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3
- * (0xd459)
+ * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
+ * (0xd429)
*
* #temp lists the number of monitored temperature sources (first value) plus
* the number of directly connectable temperature sensors (second value).
@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_NCT6795_ID 0xd350
#define SIO_NCT6796_ID 0xd420
#define SIO_NCT6797_ID 0xd450
-#define SIO_NCT6798_ID 0xd458
+#define SIO_NCT6798_ID 0xd428
#define SIO_ID_MASK 0xFFF8
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
fan5pin |= cr1b & BIT(5);
fan5pin |= creb & BIT(5);
- fan6pin = creb & BIT(3);
+ fan6pin = !dsw_en && (cr2d & BIT(1));
+ fan6pin |= creb & BIT(3);
pwm5pin |= cr2d & BIT(7);
pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
@@ -4508,7 +4509,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
if (data->kind == nct6791 || data->kind == nct6792 ||
data->kind == nct6793 || data->kind == nct6795 ||
- data->kind == nct6796)
+ data->kind == nct6796 || data->kind == nct6797 ||
+ data->kind == nct6798)
nct6791_enable_io_mapping(sioreg);
superio_exit(sioreg);
@@ -4644,7 +4646,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
- sio_data->kind == nct6796)
+ sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
+ sio_data->kind == nct6798)
nct6791_enable_io_mapping(sioaddr);
superio_exit(sioaddr);
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 7815ddf149f6..82c7de7b4639 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -182,7 +182,7 @@ static umode_t nct7904_fan_is_visible(const void *_data, u32 attr, int channel)
const struct nct7904_data *data = _data;
if (attr == hwmon_fan_input && data->fanin_mask & (1 << channel))
- return S_IRUGO;
+ return 0444;
return 0;
}
@@ -225,7 +225,7 @@ static umode_t nct7904_in_is_visible(const void *_data, u32 attr, int channel)
if (channel > 0 && attr == hwmon_in_input &&
(data->vsen_mask & BIT(index)))
- return S_IRUGO;
+ return 0444;
return 0;
}
@@ -260,10 +260,10 @@ static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
if (attr == hwmon_temp_input) {
if (channel == 0) {
if (data->vsen_mask & BIT(17))
- return S_IRUGO;
+ return 0444;
} else {
if (data->tcpu_mask & BIT(channel - 1))
- return S_IRUGO;
+ return 0444;
}
}
@@ -325,7 +325,7 @@ static umode_t nct7904_pwm_is_visible(const void *_data, u32 attr, int channel)
switch (attr) {
case hwmon_pwm_input:
case hwmon_pwm_enable:
- return S_IRUGO | S_IWUSR;
+ return 0644;
default:
return 0;
}
diff --git a/drivers/hwmon/nsa320-hwmon.c b/drivers/hwmon/nsa320-hwmon.c
index 5a16109cdea8..f952f803faeb 100644
--- a/drivers/hwmon/nsa320-hwmon.c
+++ b/drivers/hwmon/nsa320-hwmon.c
@@ -114,8 +114,8 @@ static s32 nsa320_hwmon_update(struct device *dev)
return mcu_data;
}
-static ssize_t show_label(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t label_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int channel = to_sensor_dev_attr(attr)->index;
@@ -144,9 +144,9 @@ static ssize_t fan1_input_show(struct device *dev,
return sprintf(buf, "%d\n", ((mcu_data & 0xff0000) >> 16) * 100);
}
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, NSA320_TEMP);
+static SENSOR_DEVICE_ATTR_RO(temp1_label, label, NSA320_TEMP);
static DEVICE_ATTR_RO(temp1_input);
-static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, show_label, NULL, NSA320_FAN);
+static SENSOR_DEVICE_ATTR_RO(fan1_label, label, NSA320_FAN);
static DEVICE_ATTR_RO(fan1_input);
static struct attribute *nsa320_attrs[] = {
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 2823aff82c82..e4f9f7ce92fa 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -37,8 +37,6 @@
#include <linux/iio/consumer.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/thermal.h>
struct ntc_compensation {
int temp_c;
@@ -588,55 +586,87 @@ static int ntc_thermistor_get_ohm(struct ntc_data *data)
return -EINVAL;
}
-static int ntc_read_temp(void *data, int *temp)
+static int ntc_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
{
+ struct ntc_data *data = dev_get_drvdata(dev);
int ohm;
- ohm = ntc_thermistor_get_ohm(data);
- if (ohm < 0)
- return ohm;
-
- *temp = get_temp_mc(data, ohm);
-
- return 0;
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ ohm = ntc_thermistor_get_ohm(data);
+ if (ohm < 0)
+ return ohm;
+ *val = get_temp_mc(data, ohm);
+ return 0;
+ case hwmon_temp_type:
+ *val = 4;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
}
-static ssize_t ntc_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static umode_t ntc_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- return sprintf(buf, "4\n");
+ if (type == hwmon_temp) {
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_type:
+ return 0444;
+ default:
+ break;
+ }
+ }
+ return 0;
}
-static ssize_t ntc_temp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ntc_data *data = dev_get_drvdata(dev);
- int ohm;
+static const u32 ntc_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
- ohm = ntc_thermistor_get_ohm(data);
- if (ohm < 0)
- return ohm;
+static const struct hwmon_channel_info ntc_chip = {
+ .type = hwmon_chip,
+ .config = ntc_chip_config,
+};
- return sprintf(buf, "%d\n", get_temp_mc(data, ohm));
-}
+static const u32 ntc_temp_config[] = {
+ HWMON_T_INPUT, HWMON_T_TYPE,
+ 0
+};
-static SENSOR_DEVICE_ATTR_RO(temp1_type, ntc_type, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_input, ntc_temp, 0);
+static const struct hwmon_channel_info ntc_temp = {
+ .type = hwmon_temp,
+ .config = ntc_temp_config,
+};
-static struct attribute *ntc_attrs[] = {
- &sensor_dev_attr_temp1_type.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- NULL,
+static const struct hwmon_channel_info *ntc_info[] = {
+ &ntc_chip,
+ &ntc_temp,
+ NULL
};
-ATTRIBUTE_GROUPS(ntc);
-static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
- .get_temp = ntc_read_temp,
+static const struct hwmon_ops ntc_hwmon_ops = {
+ .is_visible = ntc_is_visible,
+ .read = ntc_read,
+};
+
+static const struct hwmon_chip_info ntc_chip_info = {
+ .ops = &ntc_hwmon_ops,
+ .info = ntc_info,
};
static int ntc_thermistor_probe(struct platform_device *pdev)
{
- struct thermal_zone_device *tz;
struct device *dev = &pdev->dev;
const struct of_device_id *of_id =
of_match_device(of_match_ptr(ntc_match), dev);
@@ -697,8 +727,9 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
data->comp = ntc_type[pdev_id->driver_data].comp;
data->n_comp = ntc_type[pdev_id->driver_data].n_comp;
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, pdev_id->name,
- data, ntc_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, pdev_id->name,
+ data, &ntc_chip_info,
+ NULL);
if (IS_ERR(hwmon_dev)) {
dev_err(dev, "unable to register as hwmon device.\n");
return PTR_ERR(hwmon_dev);
@@ -707,11 +738,6 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
dev_info(dev, "Thermistor type: %s successfully probed.\n",
pdev_id->name);
- tz = devm_thermal_zone_of_sensor_register(dev, 0, data,
- &ntc_of_thermal_ops);
- if (IS_ERR(tz))
- dev_dbg(dev, "Failed to register to thermal fw.\n");
-
return 0;
}
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 423903f87955..b91a80abf724 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -1,4 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright IBM Corp 2019
#include <linux/device.h>
#include <linux/hwmon.h>
@@ -380,8 +381,8 @@ static ssize_t occ_show_power_1(struct device *dev,
val *= 1000000ULL;
break;
case 2:
- val = get_unaligned_be32(&power->update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -425,8 +426,8 @@ static ssize_t occ_show_power_2(struct device *dev,
&power->update_tag);
break;
case 2:
- val = get_unaligned_be32(&power->update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -463,8 +464,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->system.update_tag);
break;
case 2:
- val = get_unaligned_be32(&power->system.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->system.update_tag) *
+ occ->powr_sample_time_us;
break;
case 3:
val = get_unaligned_be16(&power->system.value) * 1000000ULL;
@@ -477,8 +478,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->proc.update_tag);
break;
case 6:
- val = get_unaligned_be32(&power->proc.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->proc.update_tag) *
+ occ->powr_sample_time_us;
break;
case 7:
val = get_unaligned_be16(&power->proc.value) * 1000000ULL;
@@ -491,8 +492,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->vdd.update_tag);
break;
case 10:
- val = get_unaligned_be32(&power->vdd.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
+ occ->powr_sample_time_us;
break;
case 11:
val = get_unaligned_be16(&power->vdd.value) * 1000000ULL;
@@ -505,8 +506,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
&power->vdn.update_tag);
break;
case 14:
- val = get_unaligned_be32(&power->vdn.update_tag) *
- occ->powr_sample_time_us;
+ val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
+ occ->powr_sample_time_us;
break;
case 15:
val = get_unaligned_be16(&power->vdn.value) * 1000000ULL;
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index 7c44df3f5631..ed2cf4245295 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -1,4 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright IBM Corp 2019 */
#ifndef OCC_COMMON_H
#define OCC_COMMON_H
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index b59efc945e54..76fb7870c7d3 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -1,4 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright IBM Corp 2019
#include <linux/device.h>
#include <linux/errno.h>
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index b65c1d1dfb54..f6387cc0b754 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -1,4 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright IBM Corp 2019
#include <linux/device.h>
#include <linux/errno.h>
diff --git a/drivers/hwmon/occ/sysfs.c b/drivers/hwmon/occ/sysfs.c
index 743b26ec8e54..fe3d15e416e7 100644
--- a/drivers/hwmon/occ/sysfs.c
+++ b/drivers/hwmon/occ/sysfs.c
@@ -1,14 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * OCC hwmon driver sysfs interface
- *
- * Copyright (C) IBM Corporation 2018
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright IBM Corp 2019
#include <linux/bitops.h>
#include <linux/device.h>
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 7e3697727537..56584f9ab803 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -254,7 +254,7 @@ static struct platform_driver pc87360_driver = {
* Sysfs stuff
*/
-static ssize_t show_fan_input(struct device *dev,
+static ssize_t fan_input_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -262,7 +262,7 @@ static ssize_t show_fan_input(struct device *dev,
return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan[attr->index],
FAN_DIV_FROM_REG(data->fan_status[attr->index])));
}
-static ssize_t show_fan_min(struct device *dev,
+static ssize_t fan_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -270,7 +270,7 @@ static ssize_t show_fan_min(struct device *dev,
return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan_min[attr->index],
FAN_DIV_FROM_REG(data->fan_status[attr->index])));
}
-static ssize_t show_fan_div(struct device *dev,
+static ssize_t fan_div_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -278,7 +278,7 @@ static ssize_t show_fan_div(struct device *dev,
return sprintf(buf, "%u\n",
FAN_DIV_FROM_REG(data->fan_status[attr->index]));
}
-static ssize_t show_fan_status(struct device *dev,
+static ssize_t fan_status_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -286,9 +286,9 @@ static ssize_t show_fan_status(struct device *dev,
return sprintf(buf, "%u\n",
FAN_STATUS_FROM_REG(data->fan_status[attr->index]));
}
-static ssize_t set_fan_min(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -325,24 +325,24 @@ static ssize_t set_fan_min(struct device *dev,
}
static struct sensor_device_attribute fan_input[] = {
- SENSOR_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0),
- SENSOR_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1),
- SENSOR_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2),
+ SENSOR_ATTR_RO(fan1_input, fan_input, 0),
+ SENSOR_ATTR_RO(fan2_input, fan_input, 1),
+ SENSOR_ATTR_RO(fan3_input, fan_input, 2),
};
static struct sensor_device_attribute fan_status[] = {
- SENSOR_ATTR(fan1_status, S_IRUGO, show_fan_status, NULL, 0),
- SENSOR_ATTR(fan2_status, S_IRUGO, show_fan_status, NULL, 1),
- SENSOR_ATTR(fan3_status, S_IRUGO, show_fan_status, NULL, 2),
+ SENSOR_ATTR_RO(fan1_status, fan_status, 0),
+ SENSOR_ATTR_RO(fan2_status, fan_status, 1),
+ SENSOR_ATTR_RO(fan3_status, fan_status, 2),
};
static struct sensor_device_attribute fan_div[] = {
- SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
- SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
- SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
+ SENSOR_ATTR_RO(fan1_div, fan_div, 0),
+ SENSOR_ATTR_RO(fan2_div, fan_div, 1),
+ SENSOR_ATTR_RO(fan3_div, fan_div, 2),
};
static struct sensor_device_attribute fan_min[] = {
- SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 0),
- SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 1),
- SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 2),
+ SENSOR_ATTR_RW(fan1_min, fan_min, 0),
+ SENSOR_ATTR_RW(fan2_min, fan_min, 1),
+ SENSOR_ATTR_RW(fan3_min, fan_min, 2),
};
#define FAN_UNIT_ATTRS(X) \
@@ -353,7 +353,7 @@ static struct sensor_device_attribute fan_min[] = {
NULL \
}
-static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
+static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -363,8 +363,8 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
FAN_CONFIG_INVERT(data->fan_conf,
attr->index)));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -385,9 +385,9 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
}
static struct sensor_device_attribute pwm[] = {
- SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0),
- SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1),
- SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2),
+ SENSOR_ATTR_RW(pwm1, pwm, 0),
+ SENSOR_ATTR_RW(pwm2, pwm, 1),
+ SENSOR_ATTR_RW(pwm3, pwm, 2),
};
static struct attribute *pc8736x_fan_attr[][5] = {
@@ -402,7 +402,7 @@ static const struct attribute_group pc8736x_fan_attr_group[] = {
{ .attrs = pc8736x_fan_attr[2], },
};
-static ssize_t show_in_input(struct device *dev,
+static ssize_t in_input_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -410,7 +410,7 @@ static ssize_t show_in_input(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
data->in_vref));
}
-static ssize_t show_in_min(struct device *dev,
+static ssize_t in_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -418,7 +418,7 @@ static ssize_t show_in_min(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
data->in_vref));
}
-static ssize_t show_in_max(struct device *dev,
+static ssize_t in_max_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -426,15 +426,16 @@ static ssize_t show_in_max(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
data->in_vref));
}
-static ssize_t show_in_status(struct device *dev,
+static ssize_t in_status_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
return sprintf(buf, "%u\n", data->in_status[attr->index]);
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -452,8 +453,9 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -474,56 +476,56 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr,
}
static struct sensor_device_attribute in_input[] = {
- SENSOR_ATTR(in0_input, S_IRUGO, show_in_input, NULL, 0),
- SENSOR_ATTR(in1_input, S_IRUGO, show_in_input, NULL, 1),
- SENSOR_ATTR(in2_input, S_IRUGO, show_in_input, NULL, 2),
- SENSOR_ATTR(in3_input, S_IRUGO, show_in_input, NULL, 3),
- SENSOR_ATTR(in4_input, S_IRUGO, show_in_input, NULL, 4),
- SENSOR_ATTR(in5_input, S_IRUGO, show_in_input, NULL, 5),
- SENSOR_ATTR(in6_input, S_IRUGO, show_in_input, NULL, 6),
- SENSOR_ATTR(in7_input, S_IRUGO, show_in_input, NULL, 7),
- SENSOR_ATTR(in8_input, S_IRUGO, show_in_input, NULL, 8),
- SENSOR_ATTR(in9_input, S_IRUGO, show_in_input, NULL, 9),
- SENSOR_ATTR(in10_input, S_IRUGO, show_in_input, NULL, 10),
+ SENSOR_ATTR_RO(in0_input, in_input, 0),
+ SENSOR_ATTR_RO(in1_input, in_input, 1),
+ SENSOR_ATTR_RO(in2_input, in_input, 2),
+ SENSOR_ATTR_RO(in3_input, in_input, 3),
+ SENSOR_ATTR_RO(in4_input, in_input, 4),
+ SENSOR_ATTR_RO(in5_input, in_input, 5),
+ SENSOR_ATTR_RO(in6_input, in_input, 6),
+ SENSOR_ATTR_RO(in7_input, in_input, 7),
+ SENSOR_ATTR_RO(in8_input, in_input, 8),
+ SENSOR_ATTR_RO(in9_input, in_input, 9),
+ SENSOR_ATTR_RO(in10_input, in_input, 10),
};
static struct sensor_device_attribute in_status[] = {
- SENSOR_ATTR(in0_status, S_IRUGO, show_in_status, NULL, 0),
- SENSOR_ATTR(in1_status, S_IRUGO, show_in_status, NULL, 1),
- SENSOR_ATTR(in2_status, S_IRUGO, show_in_status, NULL, 2),
- SENSOR_ATTR(in3_status, S_IRUGO, show_in_status, NULL, 3),
- SENSOR_ATTR(in4_status, S_IRUGO, show_in_status, NULL, 4),
- SENSOR_ATTR(in5_status, S_IRUGO, show_in_status, NULL, 5),
- SENSOR_ATTR(in6_status, S_IRUGO, show_in_status, NULL, 6),
- SENSOR_ATTR(in7_status, S_IRUGO, show_in_status, NULL, 7),
- SENSOR_ATTR(in8_status, S_IRUGO, show_in_status, NULL, 8),
- SENSOR_ATTR(in9_status, S_IRUGO, show_in_status, NULL, 9),
- SENSOR_ATTR(in10_status, S_IRUGO, show_in_status, NULL, 10),
+ SENSOR_ATTR_RO(in0_status, in_status, 0),
+ SENSOR_ATTR_RO(in1_status, in_status, 1),
+ SENSOR_ATTR_RO(in2_status, in_status, 2),
+ SENSOR_ATTR_RO(in3_status, in_status, 3),
+ SENSOR_ATTR_RO(in4_status, in_status, 4),
+ SENSOR_ATTR_RO(in5_status, in_status, 5),
+ SENSOR_ATTR_RO(in6_status, in_status, 6),
+ SENSOR_ATTR_RO(in7_status, in_status, 7),
+ SENSOR_ATTR_RO(in8_status, in_status, 8),
+ SENSOR_ATTR_RO(in9_status, in_status, 9),
+ SENSOR_ATTR_RO(in10_status, in_status, 10),
};
static struct sensor_device_attribute in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 9),
- SENSOR_ATTR(in10_min, S_IWUSR | S_IRUGO, show_in_min, set_in_min, 10),
+ SENSOR_ATTR_RW(in0_min, in_min, 0),
+ SENSOR_ATTR_RW(in1_min, in_min, 1),
+ SENSOR_ATTR_RW(in2_min, in_min, 2),
+ SENSOR_ATTR_RW(in3_min, in_min, 3),
+ SENSOR_ATTR_RW(in4_min, in_min, 4),
+ SENSOR_ATTR_RW(in5_min, in_min, 5),
+ SENSOR_ATTR_RW(in6_min, in_min, 6),
+ SENSOR_ATTR_RW(in7_min, in_min, 7),
+ SENSOR_ATTR_RW(in8_min, in_min, 8),
+ SENSOR_ATTR_RW(in9_min, in_min, 9),
+ SENSOR_ATTR_RW(in10_min, in_min, 10),
};
static struct sensor_device_attribute in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 9),
- SENSOR_ATTR(in10_max, S_IWUSR | S_IRUGO, show_in_max, set_in_max, 10),
+ SENSOR_ATTR_RW(in0_max, in_max, 0),
+ SENSOR_ATTR_RW(in1_max, in_max, 1),
+ SENSOR_ATTR_RW(in2_max, in_max, 2),
+ SENSOR_ATTR_RW(in3_max, in_max, 3),
+ SENSOR_ATTR_RW(in4_max, in_max, 4),
+ SENSOR_ATTR_RW(in5_max, in_max, 5),
+ SENSOR_ATTR_RW(in6_max, in_max, 6),
+ SENSOR_ATTR_RW(in7_max, in_max, 7),
+ SENSOR_ATTR_RW(in8_max, in_max, 8),
+ SENSOR_ATTR_RW(in9_max, in_max, 9),
+ SENSOR_ATTR_RW(in10_max, in_max, 10),
};
/* (temp & vin) channel status register alarm bits (pdf sec.11.5.12) */
@@ -537,16 +539,16 @@ static struct sensor_device_attribute in_max[] = {
* 11.5.2) that (legacy) show_in_alarm() resds (via data->in_alarms)
*/
-static ssize_t show_in_min_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_min_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
}
-static ssize_t show_in_max_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_max_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
@@ -555,30 +557,30 @@ static ssize_t show_in_max_alarm(struct device *dev,
}
static struct sensor_device_attribute in_min_alarm[] = {
- SENSOR_ATTR(in0_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 0),
- SENSOR_ATTR(in1_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 1),
- SENSOR_ATTR(in2_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 2),
- SENSOR_ATTR(in3_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 3),
- SENSOR_ATTR(in4_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 4),
- SENSOR_ATTR(in5_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 5),
- SENSOR_ATTR(in6_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 6),
- SENSOR_ATTR(in7_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 7),
- SENSOR_ATTR(in8_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 8),
- SENSOR_ATTR(in9_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 9),
- SENSOR_ATTR(in10_min_alarm, S_IRUGO, show_in_min_alarm, NULL, 10),
+ SENSOR_ATTR_RO(in0_min_alarm, in_min_alarm, 0),
+ SENSOR_ATTR_RO(in1_min_alarm, in_min_alarm, 1),
+ SENSOR_ATTR_RO(in2_min_alarm, in_min_alarm, 2),
+ SENSOR_ATTR_RO(in3_min_alarm, in_min_alarm, 3),
+ SENSOR_ATTR_RO(in4_min_alarm, in_min_alarm, 4),
+ SENSOR_ATTR_RO(in5_min_alarm, in_min_alarm, 5),
+ SENSOR_ATTR_RO(in6_min_alarm, in_min_alarm, 6),
+ SENSOR_ATTR_RO(in7_min_alarm, in_min_alarm, 7),
+ SENSOR_ATTR_RO(in8_min_alarm, in_min_alarm, 8),
+ SENSOR_ATTR_RO(in9_min_alarm, in_min_alarm, 9),
+ SENSOR_ATTR_RO(in10_min_alarm, in_min_alarm, 10),
};
static struct sensor_device_attribute in_max_alarm[] = {
- SENSOR_ATTR(in0_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 0),
- SENSOR_ATTR(in1_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 1),
- SENSOR_ATTR(in2_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 2),
- SENSOR_ATTR(in3_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 3),
- SENSOR_ATTR(in4_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 4),
- SENSOR_ATTR(in5_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 5),
- SENSOR_ATTR(in6_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 6),
- SENSOR_ATTR(in7_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 7),
- SENSOR_ATTR(in8_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 8),
- SENSOR_ATTR(in9_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 9),
- SENSOR_ATTR(in10_max_alarm, S_IRUGO, show_in_max_alarm, NULL, 10),
+ SENSOR_ATTR_RO(in0_max_alarm, in_max_alarm, 0),
+ SENSOR_ATTR_RO(in1_max_alarm, in_max_alarm, 1),
+ SENSOR_ATTR_RO(in2_max_alarm, in_max_alarm, 2),
+ SENSOR_ATTR_RO(in3_max_alarm, in_max_alarm, 3),
+ SENSOR_ATTR_RO(in4_max_alarm, in_max_alarm, 4),
+ SENSOR_ATTR_RO(in5_max_alarm, in_max_alarm, 5),
+ SENSOR_ATTR_RO(in6_max_alarm, in_max_alarm, 6),
+ SENSOR_ATTR_RO(in7_max_alarm, in_max_alarm, 7),
+ SENSOR_ATTR_RO(in8_max_alarm, in_max_alarm, 8),
+ SENSOR_ATTR_RO(in9_max_alarm, in_max_alarm, 9),
+ SENSOR_ATTR_RO(in10_max_alarm, in_max_alarm, 10),
};
#define VIN_UNIT_ATTRS(X) \
@@ -651,7 +653,7 @@ static const struct attribute_group pc8736x_vin_group = {
.attrs = pc8736x_vin_attr_array,
};
-static ssize_t show_therm_input(struct device *dev,
+static ssize_t therm_input_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -659,7 +661,7 @@ static ssize_t show_therm_input(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
data->in_vref));
}
-static ssize_t show_therm_min(struct device *dev,
+static ssize_t therm_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -667,7 +669,7 @@ static ssize_t show_therm_min(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
data->in_vref));
}
-static ssize_t show_therm_max(struct device *dev,
+static ssize_t therm_max_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -675,7 +677,7 @@ static ssize_t show_therm_max(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
data->in_vref));
}
-static ssize_t show_therm_crit(struct device *dev,
+static ssize_t therm_crit_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -683,7 +685,7 @@ static ssize_t show_therm_crit(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in_crit[attr->index-11],
data->in_vref));
}
-static ssize_t show_therm_status(struct device *dev,
+static ssize_t therm_status_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -691,9 +693,9 @@ static ssize_t show_therm_status(struct device *dev,
return sprintf(buf, "%u\n", data->in_status[attr->index]);
}
-static ssize_t set_therm_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t therm_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -712,9 +714,9 @@ static ssize_t set_therm_min(struct device *dev,
return count;
}
-static ssize_t set_therm_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t therm_max_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -732,9 +734,9 @@ static ssize_t set_therm_max(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_therm_crit(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t therm_crit_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -758,38 +760,29 @@ static ssize_t set_therm_crit(struct device *dev,
* used in the chip to measure voltage across the thermistors
*/
static struct sensor_device_attribute therm_input[] = {
- SENSOR_ATTR(temp4_input, S_IRUGO, show_therm_input, NULL, 0 + 11),
- SENSOR_ATTR(temp5_input, S_IRUGO, show_therm_input, NULL, 1 + 11),
- SENSOR_ATTR(temp6_input, S_IRUGO, show_therm_input, NULL, 2 + 11),
+ SENSOR_ATTR_RO(temp4_input, therm_input, 0 + 11),
+ SENSOR_ATTR_RO(temp5_input, therm_input, 1 + 11),
+ SENSOR_ATTR_RO(temp6_input, therm_input, 2 + 11),
};
static struct sensor_device_attribute therm_status[] = {
- SENSOR_ATTR(temp4_status, S_IRUGO, show_therm_status, NULL, 0 + 11),
- SENSOR_ATTR(temp5_status, S_IRUGO, show_therm_status, NULL, 1 + 11),
- SENSOR_ATTR(temp6_status, S_IRUGO, show_therm_status, NULL, 2 + 11),
+ SENSOR_ATTR_RO(temp4_status, therm_status, 0 + 11),
+ SENSOR_ATTR_RO(temp5_status, therm_status, 1 + 11),
+ SENSOR_ATTR_RO(temp6_status, therm_status, 2 + 11),
};
static struct sensor_device_attribute therm_min[] = {
- SENSOR_ATTR(temp4_min, S_IRUGO | S_IWUSR,
- show_therm_min, set_therm_min, 0 + 11),
- SENSOR_ATTR(temp5_min, S_IRUGO | S_IWUSR,
- show_therm_min, set_therm_min, 1 + 11),
- SENSOR_ATTR(temp6_min, S_IRUGO | S_IWUSR,
- show_therm_min, set_therm_min, 2 + 11),
+ SENSOR_ATTR_RW(temp4_min, therm_min, 0 + 11),
+ SENSOR_ATTR_RW(temp5_min, therm_min, 1 + 11),
+ SENSOR_ATTR_RW(temp6_min, therm_min, 2 + 11),
};
static struct sensor_device_attribute therm_max[] = {
- SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR,
- show_therm_max, set_therm_max, 0 + 11),
- SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR,
- show_therm_max, set_therm_max, 1 + 11),
- SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR,
- show_therm_max, set_therm_max, 2 + 11),
+ SENSOR_ATTR_RW(temp4_max, therm_max, 0 + 11),
+ SENSOR_ATTR_RW(temp5_max, therm_max, 1 + 11),
+ SENSOR_ATTR_RW(temp6_max, therm_max, 2 + 11),
};
static struct sensor_device_attribute therm_crit[] = {
- SENSOR_ATTR(temp4_crit, S_IRUGO | S_IWUSR,
- show_therm_crit, set_therm_crit, 0 + 11),
- SENSOR_ATTR(temp5_crit, S_IRUGO | S_IWUSR,
- show_therm_crit, set_therm_crit, 1 + 11),
- SENSOR_ATTR(temp6_crit, S_IRUGO | S_IWUSR,
- show_therm_crit, set_therm_crit, 2 + 11),
+ SENSOR_ATTR_RW(temp4_crit, therm_crit, 0 + 11),
+ SENSOR_ATTR_RW(temp5_crit, therm_crit, 1 + 11),
+ SENSOR_ATTR_RW(temp6_crit, therm_crit, 2 + 11),
};
/*
@@ -797,24 +790,27 @@ static struct sensor_device_attribute therm_crit[] = {
* status register (sec 11.5.12)
*/
-static ssize_t show_therm_min_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t therm_min_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
}
-static ssize_t show_therm_max_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t therm_max_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
}
-static ssize_t show_therm_crit_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t therm_crit_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
@@ -823,28 +819,19 @@ static ssize_t show_therm_crit_alarm(struct device *dev,
}
static struct sensor_device_attribute therm_min_alarm[] = {
- SENSOR_ATTR(temp4_min_alarm, S_IRUGO,
- show_therm_min_alarm, NULL, 0 + 11),
- SENSOR_ATTR(temp5_min_alarm, S_IRUGO,
- show_therm_min_alarm, NULL, 1 + 11),
- SENSOR_ATTR(temp6_min_alarm, S_IRUGO,
- show_therm_min_alarm, NULL, 2 + 11),
+ SENSOR_ATTR_RO(temp4_min_alarm, therm_min_alarm, 0 + 11),
+ SENSOR_ATTR_RO(temp5_min_alarm, therm_min_alarm, 1 + 11),
+ SENSOR_ATTR_RO(temp6_min_alarm, therm_min_alarm, 2 + 11),
};
static struct sensor_device_attribute therm_max_alarm[] = {
- SENSOR_ATTR(temp4_max_alarm, S_IRUGO,
- show_therm_max_alarm, NULL, 0 + 11),
- SENSOR_ATTR(temp5_max_alarm, S_IRUGO,
- show_therm_max_alarm, NULL, 1 + 11),
- SENSOR_ATTR(temp6_max_alarm, S_IRUGO,
- show_therm_max_alarm, NULL, 2 + 11),
+ SENSOR_ATTR_RO(temp4_max_alarm, therm_max_alarm, 0 + 11),
+ SENSOR_ATTR_RO(temp5_max_alarm, therm_max_alarm, 1 + 11),
+ SENSOR_ATTR_RO(temp6_max_alarm, therm_max_alarm, 2 + 11),
};
static struct sensor_device_attribute therm_crit_alarm[] = {
- SENSOR_ATTR(temp4_crit_alarm, S_IRUGO,
- show_therm_crit_alarm, NULL, 0 + 11),
- SENSOR_ATTR(temp5_crit_alarm, S_IRUGO,
- show_therm_crit_alarm, NULL, 1 + 11),
- SENSOR_ATTR(temp6_crit_alarm, S_IRUGO,
- show_therm_crit_alarm, NULL, 2 + 11),
+ SENSOR_ATTR_RO(temp4_crit_alarm, therm_crit_alarm, 0 + 11),
+ SENSOR_ATTR_RO(temp5_crit_alarm, therm_crit_alarm, 1 + 11),
+ SENSOR_ATTR_RO(temp6_crit_alarm, therm_crit_alarm, 2 + 11),
};
#define THERM_UNIT_ATTRS(X) \
@@ -867,7 +854,7 @@ static const struct attribute_group pc8736x_therm_group = {
.attrs = pc8736x_therm_attr_array,
};
-static ssize_t show_temp_input(struct device *dev,
+static ssize_t temp_input_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -875,7 +862,7 @@ static ssize_t show_temp_input(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
-static ssize_t show_temp_min(struct device *dev,
+static ssize_t temp_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -883,7 +870,7 @@ static ssize_t show_temp_min(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[attr->index]));
}
-static ssize_t show_temp_max(struct device *dev,
+static ssize_t temp_max_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -891,7 +878,7 @@ static ssize_t show_temp_max(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[attr->index]));
}
-static ssize_t show_temp_crit(struct device *dev,
+static ssize_t temp_crit_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -900,7 +887,7 @@ static ssize_t show_temp_crit(struct device *dev,
TEMP_FROM_REG(data->temp_crit[attr->index]));
}
-static ssize_t show_temp_status(struct device *dev,
+static ssize_t temp_status_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -908,9 +895,9 @@ static ssize_t show_temp_status(struct device *dev,
return sprintf(buf, "%d\n", data->temp_status[attr->index]);
}
-static ssize_t set_temp_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -929,9 +916,9 @@ static ssize_t set_temp_min(struct device *dev,
return count;
}
-static ssize_t set_temp_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -950,9 +937,9 @@ static ssize_t set_temp_max(struct device *dev,
return count;
}
-static ssize_t set_temp_crit(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
@@ -972,38 +959,29 @@ static ssize_t set_temp_crit(struct device *dev,
}
static struct sensor_device_attribute temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2),
+ SENSOR_ATTR_RO(temp1_input, temp_input, 0),
+ SENSOR_ATTR_RO(temp2_input, temp_input, 1),
+ SENSOR_ATTR_RO(temp3_input, temp_input, 2),
};
static struct sensor_device_attribute temp_status[] = {
- SENSOR_ATTR(temp1_status, S_IRUGO, show_temp_status, NULL, 0),
- SENSOR_ATTR(temp2_status, S_IRUGO, show_temp_status, NULL, 1),
- SENSOR_ATTR(temp3_status, S_IRUGO, show_temp_status, NULL, 2),
+ SENSOR_ATTR_RO(temp1_status, temp_status, 0),
+ SENSOR_ATTR_RO(temp2_status, temp_status, 1),
+ SENSOR_ATTR_RO(temp3_status, temp_status, 2),
};
static struct sensor_device_attribute temp_min[] = {
- SENSOR_ATTR(temp1_min, S_IRUGO | S_IWUSR,
- show_temp_min, set_temp_min, 0),
- SENSOR_ATTR(temp2_min, S_IRUGO | S_IWUSR,
- show_temp_min, set_temp_min, 1),
- SENSOR_ATTR(temp3_min, S_IRUGO | S_IWUSR,
- show_temp_min, set_temp_min, 2),
+ SENSOR_ATTR_RW(temp1_min, temp_min, 0),
+ SENSOR_ATTR_RW(temp2_min, temp_min, 1),
+ SENSOR_ATTR_RW(temp3_min, temp_min, 2),
};
static struct sensor_device_attribute temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR,
- show_temp_max, set_temp_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR,
- show_temp_max, set_temp_max, 1),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR,
- show_temp_max, set_temp_max, 2),
+ SENSOR_ATTR_RW(temp1_max, temp_max, 0),
+ SENSOR_ATTR_RW(temp2_max, temp_max, 1),
+ SENSOR_ATTR_RW(temp3_max, temp_max, 2),
};
static struct sensor_device_attribute temp_crit[] = {
- SENSOR_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
- show_temp_crit, set_temp_crit, 0),
- SENSOR_ATTR(temp2_crit, S_IRUGO | S_IWUSR,
- show_temp_crit, set_temp_crit, 1),
- SENSOR_ATTR(temp3_crit, S_IRUGO | S_IWUSR,
- show_temp_crit, set_temp_crit, 2),
+ SENSOR_ATTR_RW(temp1_crit, temp_crit, 0),
+ SENSOR_ATTR_RW(temp2_crit, temp_crit, 1),
+ SENSOR_ATTR_RW(temp3_crit, temp_crit, 2),
};
static ssize_t alarms_temp_show(struct device *dev,
@@ -1021,8 +999,9 @@ static DEVICE_ATTR_RO(alarms_temp);
* 12.3.2) that show_temp_alarm() reads (via data->temp_alarms)
*/
-static ssize_t show_temp_min_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_min_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
@@ -1030,8 +1009,9 @@ static ssize_t show_temp_min_alarm(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MIN));
}
-static ssize_t show_temp_max_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_max_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
@@ -1039,8 +1019,9 @@ static ssize_t show_temp_max_alarm(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MAX));
}
-static ssize_t show_temp_crit_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_crit_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
@@ -1049,26 +1030,26 @@ static ssize_t show_temp_crit_alarm(struct device *dev,
}
static struct sensor_device_attribute temp_min_alarm[] = {
- SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 0),
- SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 1),
- SENSOR_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 2),
+ SENSOR_ATTR_RO(temp1_min_alarm, temp_min_alarm, 0),
+ SENSOR_ATTR_RO(temp2_min_alarm, temp_min_alarm, 1),
+ SENSOR_ATTR_RO(temp3_min_alarm, temp_min_alarm, 2),
};
static struct sensor_device_attribute temp_max_alarm[] = {
- SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 0),
- SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 1),
- SENSOR_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 2),
+ SENSOR_ATTR_RO(temp1_max_alarm, temp_max_alarm, 0),
+ SENSOR_ATTR_RO(temp2_max_alarm, temp_max_alarm, 1),
+ SENSOR_ATTR_RO(temp3_max_alarm, temp_max_alarm, 2),
};
static struct sensor_device_attribute temp_crit_alarm[] = {
- SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 0),
- SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 1),
- SENSOR_ATTR(temp3_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 2),
+ SENSOR_ATTR_RO(temp1_crit_alarm, temp_crit_alarm, 0),
+ SENSOR_ATTR_RO(temp2_crit_alarm, temp_crit_alarm, 1),
+ SENSOR_ATTR_RO(temp3_crit_alarm, temp_crit_alarm, 2),
};
#define TEMP_FAULT 0x40 /* open diode */
-static ssize_t show_temp_fault(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_fault_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87360_data *data = pc87360_update_device(dev);
unsigned nr = to_sensor_dev_attr(devattr)->index;
@@ -1076,9 +1057,9 @@ static ssize_t show_temp_fault(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & TEMP_FAULT));
}
static struct sensor_device_attribute temp_fault[] = {
- SENSOR_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0),
- SENSOR_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1),
- SENSOR_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2),
+ SENSOR_ATTR_RO(temp1_fault, temp_fault, 0),
+ SENSOR_ATTR_RO(temp2_fault, temp_fault, 1),
+ SENSOR_ATTR_RO(temp3_fault, temp_fault, 2),
};
#define TEMP_UNIT_ATTRS(X) \
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index dc5a9d5ada51..d1a3f2040c00 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -384,8 +384,8 @@ done:
return data;
}
-static ssize_t show_fan_input(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_input_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -393,8 +393,8 @@ static ssize_t show_fan_input(struct device *dev, struct device_attribute
return sprintf(buf, "%lu\n", fan_from_reg(data->fan[nr]));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -402,8 +402,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute
return sprintf(buf, "%lu\n", fan_from_reg(data->fan_min[nr]));
}
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -412,8 +412,8 @@ static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
& FAN_STATUS_LOSPD));
}
-static ssize_t show_fan_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_fault_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -422,8 +422,9 @@ static ssize_t show_fan_fault(struct device *dev, struct device_attribute
& FAN_STATUS_STALL));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct pc87427_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -449,49 +450,41 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
return count;
}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan_input, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan_input, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan_input, NULL, 7);
-
-static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 2);
-static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 3);
-static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 4);
-static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 5);
-static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 6);
-static SENSOR_DEVICE_ATTR(fan8_min, S_IWUSR | S_IRUGO,
- show_fan_min, set_fan_min, 7);
-
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_fan_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_fan_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_fan_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_fan_alarm, NULL, 7);
-
-static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_fan_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, show_fan_fault, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, show_fan_fault, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan5_fault, S_IRUGO, show_fan_fault, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan6_fault, S_IRUGO, show_fan_fault, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan7_fault, S_IRUGO, show_fan_fault, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan8_fault, S_IRUGO, show_fan_fault, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan_input, 3);
+static SENSOR_DEVICE_ATTR_RO(fan5_input, fan_input, 4);
+static SENSOR_DEVICE_ATTR_RO(fan6_input, fan_input, 5);
+static SENSOR_DEVICE_ATTR_RO(fan7_input, fan_input, 6);
+static SENSOR_DEVICE_ATTR_RO(fan8_input, fan_input, 7);
+
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
+static SENSOR_DEVICE_ATTR_RW(fan5_min, fan_min, 4);
+static SENSOR_DEVICE_ATTR_RW(fan6_min, fan_min, 5);
+static SENSOR_DEVICE_ATTR_RW(fan7_min, fan_min, 6);
+static SENSOR_DEVICE_ATTR_RW(fan8_min, fan_min, 7);
+
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, fan_alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, fan_alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, fan_alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_alarm, fan_alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(fan5_alarm, fan_alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(fan6_alarm, fan_alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(fan7_alarm, fan_alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan8_alarm, fan_alarm, 7);
+
+static SENSOR_DEVICE_ATTR_RO(fan1_fault, fan_fault, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_fault, fan_fault, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_fault, fan_fault, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_fault, fan_fault, 3);
+static SENSOR_DEVICE_ATTR_RO(fan5_fault, fan_fault, 4);
+static SENSOR_DEVICE_ATTR_RO(fan6_fault, fan_fault, 5);
+static SENSOR_DEVICE_ATTR_RO(fan7_fault, fan_fault, 6);
+static SENSOR_DEVICE_ATTR_RO(fan8_fault, fan_fault, 7);
static struct attribute *pc87427_attributes_fan[8][5] = {
{
@@ -568,8 +561,8 @@ static void update_pwm_enable(struct pc87427_data *data, int nr, u8 mode)
outb(data->pwm_enable[nr], iobase + PC87427_REG_PWM_ENABLE);
}
-static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_enable_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -581,8 +574,9 @@ static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", pwm_enable);
}
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_enable_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct pc87427_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -602,8 +596,8 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
return count;
}
-static ssize_t show_pwm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -611,8 +605,8 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", (int)data->pwm[nr]);
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct pc87427_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -657,19 +651,15 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
- show_pwm_enable, set_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
- show_pwm_enable, set_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
- show_pwm_enable, set_pwm_enable, 2);
-static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO,
- show_pwm_enable, set_pwm_enable, 3);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm4_enable, pwm_enable, 3);
-static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
-static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
-static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm4, pwm, 3);
static struct attribute *pc87427_attributes_pwm[4][3] = {
{
@@ -698,8 +688,8 @@ static const struct attribute_group pc87427_group_pwm[4] = {
{ .attrs = pc87427_attributes_pwm[3] },
};
-static ssize_t show_temp_input(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_input_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -707,8 +697,8 @@ static ssize_t show_temp_input(struct device *dev, struct device_attribute
return sprintf(buf, "%ld\n", temp_from_reg(data->temp[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -716,8 +706,8 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute
return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_min[nr]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -725,8 +715,8 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute
return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_max[nr]));
}
-static ssize_t show_temp_crit(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_crit_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -734,8 +724,8 @@ static ssize_t show_temp_crit(struct device *dev, struct device_attribute
return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_crit[nr]));
}
-static ssize_t show_temp_type(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_type_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -743,8 +733,9 @@ static ssize_t show_temp_type(struct device *dev, struct device_attribute
return sprintf(buf, "%u\n", temp_type_from_reg(data->temp_type[nr]));
}
-static ssize_t show_temp_min_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_min_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -753,8 +744,9 @@ static ssize_t show_temp_min_alarm(struct device *dev, struct device_attribute
& TEMP_STATUS_LOWFLG));
}
-static ssize_t show_temp_max_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_max_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -763,8 +755,9 @@ static ssize_t show_temp_max_alarm(struct device *dev, struct device_attribute
& TEMP_STATUS_HIGHFLG));
}
-static ssize_t show_temp_crit_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_crit_alarm_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -773,8 +766,8 @@ static ssize_t show_temp_crit_alarm(struct device *dev, struct device_attribute
& TEMP_STATUS_CRITFLG));
}
-static ssize_t show_temp_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_fault_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct pc87427_data *data = pc87427_update_device(dev);
int nr = to_sensor_dev_attr(devattr)->index;
@@ -783,86 +776,68 @@ static ssize_t show_temp_fault(struct device *dev, struct device_attribute
& TEMP_STATUS_SENSERR));
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_min, S_IRUGO, show_temp_min, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_max, S_IRUGO, show_temp_max, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp_crit, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_crit, S_IRUGO, show_temp_crit, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_crit, S_IRUGO, show_temp_crit, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_type, S_IRUGO, show_temp_type, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_type, S_IRUGO, show_temp_type, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_type, S_IRUGO, show_temp_type, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
- show_temp_min_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO,
- show_temp_min_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO,
- show_temp_min_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO,
- show_temp_min_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO,
- show_temp_min_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_min_alarm, S_IRUGO,
- show_temp_min_alarm, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
- show_temp_max_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO,
- show_temp_max_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO,
- show_temp_max_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO,
- show_temp_max_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO,
- show_temp_max_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO,
- show_temp_max_alarm, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
- show_temp_crit_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO,
- show_temp_crit_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO,
- show_temp_crit_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO,
- show_temp_crit_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO,
- show_temp_crit_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO,
- show_temp_crit_alarm, NULL, 5);
-
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_input, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_input, temp_input, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_input, temp_input, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_input, temp_input, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_min, temp_min, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_min, temp_min, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_min, temp_min, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_max, temp_max, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_max, temp_max, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_max, temp_max, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit, temp_crit, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_crit, temp_crit, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_crit, temp_crit, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_crit, temp_crit, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_type, temp_type, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_type, temp_type, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_type, temp_type, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_type, temp_type, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_type, temp_type, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_type, temp_type, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, temp_min_alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, temp_min_alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_min_alarm, temp_min_alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_min_alarm, temp_min_alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_min_alarm, temp_min_alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_min_alarm, temp_min_alarm, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, temp_max_alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, temp_max_alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, temp_max_alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, temp_max_alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_max_alarm, temp_max_alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, temp_max_alarm, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, temp_crit_alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, temp_crit_alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, temp_crit_alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, temp_crit_alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, temp_crit_alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, temp_crit_alarm, 5);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, temp_fault, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp_fault, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, temp_fault, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_fault, temp_fault, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_fault, temp_fault, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_fault, temp_fault, 5);
static struct attribute *pc87427_attributes_temp[6][10] = {
{
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 7688dab32f6e..f05eaa50535e 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -28,6 +28,11 @@
#include <linux/pmbus.h>
#include "pmbus.h"
+struct pmbus_device_info {
+ int pages;
+ u32 flags;
+};
+
/*
* Find sensor groups and status registers on each page.
*/
@@ -172,13 +177,14 @@ static int pmbus_probe(struct i2c_client *client,
struct pmbus_driver_info *info;
struct pmbus_platform_data *pdata = NULL;
struct device *dev = &client->dev;
+ struct pmbus_device_info *device_info;
info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- if (!strcmp(id->name, "dps460") || !strcmp(id->name, "dps800") ||
- !strcmp(id->name, "sgd009")) {
+ device_info = (struct pmbus_device_info *)id->driver_data;
+ if (device_info->flags & PMBUS_SKIP_STATUS_CHECK) {
pdata = devm_kzalloc(dev, sizeof(struct pmbus_platform_data),
GFP_KERNEL);
if (!pdata)
@@ -187,36 +193,50 @@ static int pmbus_probe(struct i2c_client *client,
pdata->flags = PMBUS_SKIP_STATUS_CHECK;
}
- info->pages = id->driver_data;
+ info->pages = device_info->pages;
info->identify = pmbus_identify;
dev->platform_data = pdata;
return pmbus_do_probe(client, id, info);
}
+static const struct pmbus_device_info pmbus_info_one = {
+ .pages = 1,
+ .flags = 0
+};
+static const struct pmbus_device_info pmbus_info_zero = {
+ .pages = 0,
+ .flags = 0
+};
+static const struct pmbus_device_info pmbus_info_one_skip = {
+ .pages = 1,
+ .flags = PMBUS_SKIP_STATUS_CHECK
+};
+
/*
* Use driver_data to set the number of pages supported by the chip.
*/
static const struct i2c_device_id pmbus_id[] = {
- {"adp4000", 1},
- {"bmr453", 1},
- {"bmr454", 1},
- {"dps460", 1},
- {"dps800", 1},
- {"mdt040", 1},
- {"ncp4200", 1},
- {"ncp4208", 1},
- {"pdt003", 1},
- {"pdt006", 1},
- {"pdt012", 1},
- {"pmbus", 0},
- {"sgd009", 1},
- {"tps40400", 1},
- {"tps544b20", 1},
- {"tps544b25", 1},
- {"tps544c20", 1},
- {"tps544c25", 1},
- {"udt020", 1},
+ {"adp4000", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr453", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr454", (kernel_ulong_t)&pmbus_info_one},
+ {"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"mdt040", (kernel_ulong_t)&pmbus_info_one},
+ {"ncp4200", (kernel_ulong_t)&pmbus_info_one},
+ {"ncp4208", (kernel_ulong_t)&pmbus_info_one},
+ {"pdt003", (kernel_ulong_t)&pmbus_info_one},
+ {"pdt006", (kernel_ulong_t)&pmbus_info_one},
+ {"pdt012", (kernel_ulong_t)&pmbus_info_one},
+ {"pmbus", (kernel_ulong_t)&pmbus_info_zero},
+ {"sgd009", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"tps40400", (kernel_ulong_t)&pmbus_info_one},
+ {"tps544b20", (kernel_ulong_t)&pmbus_info_one},
+ {"tps544b25", (kernel_ulong_t)&pmbus_info_one},
+ {"tps544c20", (kernel_ulong_t)&pmbus_info_one},
+ {"tps544c25", (kernel_ulong_t)&pmbus_info_one},
+ {"udt020", (kernel_ulong_t)&pmbus_info_one},
{}
};
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 85b515cd9df0..2bc352c5357f 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -80,7 +80,14 @@ static struct pmbus_driver_info tps53679_info = {
static int tps53679_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return pmbus_do_probe(client, id, &tps53679_info);
+ struct pmbus_driver_info *info;
+
+ info = devm_kmemdup(&client->dev, &tps53679_info, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ return pmbus_do_probe(client, id, info);
}
static const struct i2c_device_id tps53679_id[] = {
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index 3014e4ac741e..16c1c98e0e18 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -177,8 +177,9 @@ exit:
}
/* Shows the voltage associated with the specified ADC channel */
-static ssize_t powr1220_show_voltage(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t powr1220_voltage_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
int adc_val = powr1220_read_adc(dev, attr->index);
@@ -190,8 +191,8 @@ static ssize_t powr1220_show_voltage(struct device *dev,
}
/* Shows the maximum setting associated with the specified ADC channel */
-static ssize_t powr1220_show_max(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t powr1220_max_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
struct powr1220_data *data = dev_get_drvdata(dev);
@@ -200,100 +201,59 @@ static ssize_t powr1220_show_max(struct device *dev,
}
/* Shows the label associated with the specified ADC channel */
-static ssize_t powr1220_show_label(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t powr1220_label_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
return sprintf(buf, "%s\n", input_names[attr->index]);
}
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON1);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON2);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON3);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON4);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON5);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON6);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON7);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON8);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON9);
-static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON10);
-static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON11);
-static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, powr1220_show_voltage, NULL,
- VMON12);
-static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, powr1220_show_voltage, NULL,
- VCCA);
-static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, powr1220_show_voltage, NULL,
- VCCINP);
-
-static SENSOR_DEVICE_ATTR(in0_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON1);
-static SENSOR_DEVICE_ATTR(in1_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON2);
-static SENSOR_DEVICE_ATTR(in2_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON3);
-static SENSOR_DEVICE_ATTR(in3_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON4);
-static SENSOR_DEVICE_ATTR(in4_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON5);
-static SENSOR_DEVICE_ATTR(in5_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON6);
-static SENSOR_DEVICE_ATTR(in6_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON7);
-static SENSOR_DEVICE_ATTR(in7_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON8);
-static SENSOR_DEVICE_ATTR(in8_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON9);
-static SENSOR_DEVICE_ATTR(in9_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON10);
-static SENSOR_DEVICE_ATTR(in10_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON11);
-static SENSOR_DEVICE_ATTR(in11_highest, S_IRUGO, powr1220_show_max, NULL,
- VMON12);
-static SENSOR_DEVICE_ATTR(in12_highest, S_IRUGO, powr1220_show_max, NULL,
- VCCA);
-static SENSOR_DEVICE_ATTR(in13_highest, S_IRUGO, powr1220_show_max, NULL,
- VCCINP);
-
-static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, powr1220_show_label, NULL,
- VMON1);
-static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, powr1220_show_label, NULL,
- VMON2);
-static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, powr1220_show_label, NULL,
- VMON3);
-static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, powr1220_show_label, NULL,
- VMON4);
-static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, powr1220_show_label, NULL,
- VMON5);
-static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, powr1220_show_label, NULL,
- VMON6);
-static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, powr1220_show_label, NULL,
- VMON7);
-static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, powr1220_show_label, NULL,
- VMON8);
-static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, powr1220_show_label, NULL,
- VMON9);
-static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, powr1220_show_label, NULL,
- VMON10);
-static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, powr1220_show_label, NULL,
- VMON11);
-static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, powr1220_show_label, NULL,
- VMON12);
-static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, powr1220_show_label, NULL,
- VCCA);
-static SENSOR_DEVICE_ATTR(in13_label, S_IRUGO, powr1220_show_label, NULL,
- VCCINP);
+static SENSOR_DEVICE_ATTR_RO(in0_input, powr1220_voltage, VMON1);
+static SENSOR_DEVICE_ATTR_RO(in1_input, powr1220_voltage, VMON2);
+static SENSOR_DEVICE_ATTR_RO(in2_input, powr1220_voltage, VMON3);
+static SENSOR_DEVICE_ATTR_RO(in3_input, powr1220_voltage, VMON4);
+static SENSOR_DEVICE_ATTR_RO(in4_input, powr1220_voltage, VMON5);
+static SENSOR_DEVICE_ATTR_RO(in5_input, powr1220_voltage, VMON6);
+static SENSOR_DEVICE_ATTR_RO(in6_input, powr1220_voltage, VMON7);
+static SENSOR_DEVICE_ATTR_RO(in7_input, powr1220_voltage, VMON8);
+static SENSOR_DEVICE_ATTR_RO(in8_input, powr1220_voltage, VMON9);
+static SENSOR_DEVICE_ATTR_RO(in9_input, powr1220_voltage, VMON10);
+static SENSOR_DEVICE_ATTR_RO(in10_input, powr1220_voltage, VMON11);
+static SENSOR_DEVICE_ATTR_RO(in11_input, powr1220_voltage, VMON12);
+static SENSOR_DEVICE_ATTR_RO(in12_input, powr1220_voltage, VCCA);
+static SENSOR_DEVICE_ATTR_RO(in13_input, powr1220_voltage, VCCINP);
+
+static SENSOR_DEVICE_ATTR_RO(in0_highest, powr1220_max, VMON1);
+static SENSOR_DEVICE_ATTR_RO(in1_highest, powr1220_max, VMON2);
+static SENSOR_DEVICE_ATTR_RO(in2_highest, powr1220_max, VMON3);
+static SENSOR_DEVICE_ATTR_RO(in3_highest, powr1220_max, VMON4);
+static SENSOR_DEVICE_ATTR_RO(in4_highest, powr1220_max, VMON5);
+static SENSOR_DEVICE_ATTR_RO(in5_highest, powr1220_max, VMON6);
+static SENSOR_DEVICE_ATTR_RO(in6_highest, powr1220_max, VMON7);
+static SENSOR_DEVICE_ATTR_RO(in7_highest, powr1220_max, VMON8);
+static SENSOR_DEVICE_ATTR_RO(in8_highest, powr1220_max, VMON9);
+static SENSOR_DEVICE_ATTR_RO(in9_highest, powr1220_max, VMON10);
+static SENSOR_DEVICE_ATTR_RO(in10_highest, powr1220_max, VMON11);
+static SENSOR_DEVICE_ATTR_RO(in11_highest, powr1220_max, VMON12);
+static SENSOR_DEVICE_ATTR_RO(in12_highest, powr1220_max, VCCA);
+static SENSOR_DEVICE_ATTR_RO(in13_highest, powr1220_max, VCCINP);
+
+static SENSOR_DEVICE_ATTR_RO(in0_label, powr1220_label, VMON1);
+static SENSOR_DEVICE_ATTR_RO(in1_label, powr1220_label, VMON2);
+static SENSOR_DEVICE_ATTR_RO(in2_label, powr1220_label, VMON3);
+static SENSOR_DEVICE_ATTR_RO(in3_label, powr1220_label, VMON4);
+static SENSOR_DEVICE_ATTR_RO(in4_label, powr1220_label, VMON5);
+static SENSOR_DEVICE_ATTR_RO(in5_label, powr1220_label, VMON6);
+static SENSOR_DEVICE_ATTR_RO(in6_label, powr1220_label, VMON7);
+static SENSOR_DEVICE_ATTR_RO(in7_label, powr1220_label, VMON8);
+static SENSOR_DEVICE_ATTR_RO(in8_label, powr1220_label, VMON9);
+static SENSOR_DEVICE_ATTR_RO(in9_label, powr1220_label, VMON10);
+static SENSOR_DEVICE_ATTR_RO(in10_label, powr1220_label, VMON11);
+static SENSOR_DEVICE_ATTR_RO(in11_label, powr1220_label, VMON12);
+static SENSOR_DEVICE_ATTR_RO(in12_label, powr1220_label, VCCA);
+static SENSOR_DEVICE_ATTR_RO(in13_label, powr1220_label, VCCINP);
static struct attribute *powr1220_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 2c944825026f..167221c7628a 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
+#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
@@ -31,6 +32,7 @@
struct pwm_fan_ctx {
struct mutex lock;
struct pwm_device *pwm;
+ struct regulator *reg_en;
unsigned int pwm_value;
unsigned int pwm_fan_state;
unsigned int pwm_fan_max_state;
@@ -231,6 +233,21 @@ static int pwm_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
+ ctx->reg_en = devm_regulator_get_optional(&pdev->dev, "fan");
+ if (IS_ERR(ctx->reg_en)) {
+ if (PTR_ERR(ctx->reg_en) != -ENODEV)
+ return PTR_ERR(ctx->reg_en);
+
+ ctx->reg_en = NULL;
+ } else {
+ ret = regulator_enable(ctx->reg_en);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable fan supply: %d\n", ret);
+ return ret;
+ }
+ }
+
ctx->pwm_value = MAX_PWM;
/* Set duty cycle to maximum allowed and enable PWM output */
@@ -241,7 +258,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
ret = pwm_apply_state(ctx->pwm, &state);
if (ret) {
dev_err(&pdev->dev, "Failed to configure PWM\n");
- return ret;
+ goto err_reg_disable;
}
hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, "pwmfan",
@@ -277,6 +294,10 @@ err_pwm_disable:
state.enabled = false;
pwm_apply_state(ctx->pwm, &state);
+err_reg_disable:
+ if (ctx->reg_en)
+ regulator_disable(ctx->reg_en);
+
return ret;
}
@@ -287,6 +308,10 @@ static int pwm_fan_remove(struct platform_device *pdev)
thermal_cooling_device_unregister(ctx->cdev);
if (ctx->pwm_value)
pwm_disable(ctx->pwm);
+
+ if (ctx->reg_en)
+ regulator_disable(ctx->reg_en);
+
return 0;
}
@@ -307,6 +332,14 @@ static int pwm_fan_suspend(struct device *dev)
pwm_disable(ctx->pwm);
}
+ if (ctx->reg_en) {
+ ret = regulator_disable(ctx->reg_en);
+ if (ret) {
+ dev_err(dev, "Failed to disable fan supply: %d\n", ret);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -317,6 +350,14 @@ static int pwm_fan_resume(struct device *dev)
unsigned long duty;
int ret;
+ if (ctx->reg_en) {
+ ret = regulator_enable(ctx->reg_en);
+ if (ret) {
+ dev_err(dev, "Failed to enable fan supply: %d\n", ret);
+ return ret;
+ }
+ }
+
if (ctx->pwm_value == 0)
return 0;
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 91544f2312e6..63cfbc5a86ed 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -211,8 +211,8 @@ static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME);
}
-static ssize_t show_temp(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = sch5627_update_device(dev);
@@ -225,8 +225,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_temp_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_fault_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = sch5627_update_device(dev);
@@ -237,8 +237,8 @@ static ssize_t show_temp_fault(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", data->temp[attr->index] == 0);
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = dev_get_drvdata(dev);
@@ -248,8 +248,8 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_temp_crit(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_crit_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = dev_get_drvdata(dev);
@@ -259,8 +259,8 @@ static ssize_t show_temp_crit(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_fan(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = sch5627_update_device(dev);
@@ -276,8 +276,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_fan_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_fault_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = sch5627_update_device(dev);
@@ -289,8 +289,8 @@ static ssize_t show_fan_fault(struct device *dev, struct device_attribute
data->fan[attr->index] == 0xffff);
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = dev_get_drvdata(dev);
@@ -301,8 +301,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_in(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5627_data *data = sch5627_update_device(dev);
@@ -317,8 +317,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_in_label(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t in_label_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -327,61 +327,61 @@ static ssize_t show_in_label(struct device *dev, struct device_attribute
}
static DEVICE_ATTR_RO(name);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_temp_fault, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_temp_fault, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_max, S_IRUGO, show_temp_max, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_max, S_IRUGO, show_temp_max, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_max, S_IRUGO, show_temp_max, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp_crit, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_crit, S_IRUGO, show_temp_crit, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_crit, S_IRUGO, show_temp_crit, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_crit, S_IRUGO, show_temp_crit, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_crit, S_IRUGO, show_temp_crit, NULL, 7);
-
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_fan_fault, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, show_fan_fault, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, show_fan_fault, NULL, 3);
-static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, show_fan_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO, show_fan_min, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_min, S_IRUGO, show_fan_min, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_min, S_IRUGO, show_fan_min, NULL, 3);
-
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 4);
-static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_in_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_in_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_in_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_in_label, NULL, 3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_input, temp, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_input, temp, 5);
+static SENSOR_DEVICE_ATTR_RO(temp7_input, temp, 6);
+static SENSOR_DEVICE_ATTR_RO(temp8_input, temp, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, temp_fault, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp_fault, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, temp_fault, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_fault, temp_fault, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_fault, temp_fault, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_fault, temp_fault, 5);
+static SENSOR_DEVICE_ATTR_RO(temp7_fault, temp_fault, 6);
+static SENSOR_DEVICE_ATTR_RO(temp8_fault, temp_fault, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_max, temp_max, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_max, temp_max, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_max, temp_max, 5);
+static SENSOR_DEVICE_ATTR_RO(temp7_max, temp_max, 6);
+static SENSOR_DEVICE_ATTR_RO(temp8_max, temp_max, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit, temp_crit, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_crit, temp_crit, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_crit, temp_crit, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_crit, temp_crit, 5);
+static SENSOR_DEVICE_ATTR_RO(temp7_crit, temp_crit, 6);
+static SENSOR_DEVICE_ATTR_RO(temp8_crit, temp_crit, 7);
+
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
+static SENSOR_DEVICE_ATTR_RO(fan1_fault, fan_fault, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_fault, fan_fault, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_fault, fan_fault, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_fault, fan_fault, 3);
+static SENSOR_DEVICE_ATTR_RO(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_min, fan_min, 3);
+
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RO(in0_label, in_label, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_label, in_label, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_label, in_label, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_label, in_label, 3);
static struct attribute *sch5627_attributes[] = {
&dev_attr_name.attr,
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index d24d7b6047f2..2a3825603a77 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -170,14 +170,14 @@ static int reg_to_rpm(u16 reg)
return 5400540 / reg;
}
-static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME);
}
-static ssize_t show_in_value(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t in_value_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5636_data *data = sch5636_update_device(dev);
@@ -192,8 +192,8 @@ static ssize_t show_in_value(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_in_label(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t in_label_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -201,8 +201,8 @@ static ssize_t show_in_label(struct device *dev, struct device_attribute
SCH5636_IN_LABELS[attr->index]);
}
-static ssize_t show_temp_value(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_value_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5636_data *data = sch5636_update_device(dev);
@@ -215,8 +215,8 @@ static ssize_t show_temp_value(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_temp_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_fault_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5636_data *data = sch5636_update_device(dev);
@@ -229,8 +229,8 @@ static ssize_t show_temp_fault(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5636_data *data = sch5636_update_device(dev);
@@ -243,8 +243,8 @@ static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_fan_value(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_value_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5636_data *data = sch5636_update_device(dev);
@@ -260,8 +260,8 @@ static ssize_t show_fan_value(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_fan_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_fault_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5636_data *data = sch5636_update_device(dev);
@@ -274,8 +274,8 @@ static ssize_t show_fan_fault(struct device *dev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct sch5636_data *data = sch5636_update_device(dev);
@@ -289,95 +289,95 @@ static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
}
static struct sensor_device_attribute sch5636_attr[] = {
- SENSOR_ATTR(name, 0444, show_name, NULL, 0),
- SENSOR_ATTR(in0_input, 0444, show_in_value, NULL, 0),
- SENSOR_ATTR(in0_label, 0444, show_in_label, NULL, 0),
- SENSOR_ATTR(in1_input, 0444, show_in_value, NULL, 1),
- SENSOR_ATTR(in1_label, 0444, show_in_label, NULL, 1),
- SENSOR_ATTR(in2_input, 0444, show_in_value, NULL, 2),
- SENSOR_ATTR(in2_label, 0444, show_in_label, NULL, 2),
- SENSOR_ATTR(in3_input, 0444, show_in_value, NULL, 3),
- SENSOR_ATTR(in3_label, 0444, show_in_label, NULL, 3),
- SENSOR_ATTR(in4_input, 0444, show_in_value, NULL, 4),
- SENSOR_ATTR(in4_label, 0444, show_in_label, NULL, 4),
+ SENSOR_ATTR_RO(name, name, 0),
+ SENSOR_ATTR_RO(in0_input, in_value, 0),
+ SENSOR_ATTR_RO(in0_label, in_label, 0),
+ SENSOR_ATTR_RO(in1_input, in_value, 1),
+ SENSOR_ATTR_RO(in1_label, in_label, 1),
+ SENSOR_ATTR_RO(in2_input, in_value, 2),
+ SENSOR_ATTR_RO(in2_label, in_label, 2),
+ SENSOR_ATTR_RO(in3_input, in_value, 3),
+ SENSOR_ATTR_RO(in3_label, in_label, 3),
+ SENSOR_ATTR_RO(in4_input, in_value, 4),
+ SENSOR_ATTR_RO(in4_label, in_label, 4),
};
static struct sensor_device_attribute sch5636_temp_attr[] = {
- SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
- SENSOR_ATTR(temp1_fault, 0444, show_temp_fault, NULL, 0),
- SENSOR_ATTR(temp1_alarm, 0444, show_temp_alarm, NULL, 0),
- SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
- SENSOR_ATTR(temp2_fault, 0444, show_temp_fault, NULL, 1),
- SENSOR_ATTR(temp2_alarm, 0444, show_temp_alarm, NULL, 1),
- SENSOR_ATTR(temp3_input, 0444, show_temp_value, NULL, 2),
- SENSOR_ATTR(temp3_fault, 0444, show_temp_fault, NULL, 2),
- SENSOR_ATTR(temp3_alarm, 0444, show_temp_alarm, NULL, 2),
- SENSOR_ATTR(temp4_input, 0444, show_temp_value, NULL, 3),
- SENSOR_ATTR(temp4_fault, 0444, show_temp_fault, NULL, 3),
- SENSOR_ATTR(temp4_alarm, 0444, show_temp_alarm, NULL, 3),
- SENSOR_ATTR(temp5_input, 0444, show_temp_value, NULL, 4),
- SENSOR_ATTR(temp5_fault, 0444, show_temp_fault, NULL, 4),
- SENSOR_ATTR(temp5_alarm, 0444, show_temp_alarm, NULL, 4),
- SENSOR_ATTR(temp6_input, 0444, show_temp_value, NULL, 5),
- SENSOR_ATTR(temp6_fault, 0444, show_temp_fault, NULL, 5),
- SENSOR_ATTR(temp6_alarm, 0444, show_temp_alarm, NULL, 5),
- SENSOR_ATTR(temp7_input, 0444, show_temp_value, NULL, 6),
- SENSOR_ATTR(temp7_fault, 0444, show_temp_fault, NULL, 6),
- SENSOR_ATTR(temp7_alarm, 0444, show_temp_alarm, NULL, 6),
- SENSOR_ATTR(temp8_input, 0444, show_temp_value, NULL, 7),
- SENSOR_ATTR(temp8_fault, 0444, show_temp_fault, NULL, 7),
- SENSOR_ATTR(temp8_alarm, 0444, show_temp_alarm, NULL, 7),
- SENSOR_ATTR(temp9_input, 0444, show_temp_value, NULL, 8),
- SENSOR_ATTR(temp9_fault, 0444, show_temp_fault, NULL, 8),
- SENSOR_ATTR(temp9_alarm, 0444, show_temp_alarm, NULL, 8),
- SENSOR_ATTR(temp10_input, 0444, show_temp_value, NULL, 9),
- SENSOR_ATTR(temp10_fault, 0444, show_temp_fault, NULL, 9),
- SENSOR_ATTR(temp10_alarm, 0444, show_temp_alarm, NULL, 9),
- SENSOR_ATTR(temp11_input, 0444, show_temp_value, NULL, 10),
- SENSOR_ATTR(temp11_fault, 0444, show_temp_fault, NULL, 10),
- SENSOR_ATTR(temp11_alarm, 0444, show_temp_alarm, NULL, 10),
- SENSOR_ATTR(temp12_input, 0444, show_temp_value, NULL, 11),
- SENSOR_ATTR(temp12_fault, 0444, show_temp_fault, NULL, 11),
- SENSOR_ATTR(temp12_alarm, 0444, show_temp_alarm, NULL, 11),
- SENSOR_ATTR(temp13_input, 0444, show_temp_value, NULL, 12),
- SENSOR_ATTR(temp13_fault, 0444, show_temp_fault, NULL, 12),
- SENSOR_ATTR(temp13_alarm, 0444, show_temp_alarm, NULL, 12),
- SENSOR_ATTR(temp14_input, 0444, show_temp_value, NULL, 13),
- SENSOR_ATTR(temp14_fault, 0444, show_temp_fault, NULL, 13),
- SENSOR_ATTR(temp14_alarm, 0444, show_temp_alarm, NULL, 13),
- SENSOR_ATTR(temp15_input, 0444, show_temp_value, NULL, 14),
- SENSOR_ATTR(temp15_fault, 0444, show_temp_fault, NULL, 14),
- SENSOR_ATTR(temp15_alarm, 0444, show_temp_alarm, NULL, 14),
- SENSOR_ATTR(temp16_input, 0444, show_temp_value, NULL, 15),
- SENSOR_ATTR(temp16_fault, 0444, show_temp_fault, NULL, 15),
- SENSOR_ATTR(temp16_alarm, 0444, show_temp_alarm, NULL, 15),
+ SENSOR_ATTR_RO(temp1_input, temp_value, 0),
+ SENSOR_ATTR_RO(temp1_fault, temp_fault, 0),
+ SENSOR_ATTR_RO(temp1_alarm, temp_alarm, 0),
+ SENSOR_ATTR_RO(temp2_input, temp_value, 1),
+ SENSOR_ATTR_RO(temp2_fault, temp_fault, 1),
+ SENSOR_ATTR_RO(temp2_alarm, temp_alarm, 1),
+ SENSOR_ATTR_RO(temp3_input, temp_value, 2),
+ SENSOR_ATTR_RO(temp3_fault, temp_fault, 2),
+ SENSOR_ATTR_RO(temp3_alarm, temp_alarm, 2),
+ SENSOR_ATTR_RO(temp4_input, temp_value, 3),
+ SENSOR_ATTR_RO(temp4_fault, temp_fault, 3),
+ SENSOR_ATTR_RO(temp4_alarm, temp_alarm, 3),
+ SENSOR_ATTR_RO(temp5_input, temp_value, 4),
+ SENSOR_ATTR_RO(temp5_fault, temp_fault, 4),
+ SENSOR_ATTR_RO(temp5_alarm, temp_alarm, 4),
+ SENSOR_ATTR_RO(temp6_input, temp_value, 5),
+ SENSOR_ATTR_RO(temp6_fault, temp_fault, 5),
+ SENSOR_ATTR_RO(temp6_alarm, temp_alarm, 5),
+ SENSOR_ATTR_RO(temp7_input, temp_value, 6),
+ SENSOR_ATTR_RO(temp7_fault, temp_fault, 6),
+ SENSOR_ATTR_RO(temp7_alarm, temp_alarm, 6),
+ SENSOR_ATTR_RO(temp8_input, temp_value, 7),
+ SENSOR_ATTR_RO(temp8_fault, temp_fault, 7),
+ SENSOR_ATTR_RO(temp8_alarm, temp_alarm, 7),
+ SENSOR_ATTR_RO(temp9_input, temp_value, 8),
+ SENSOR_ATTR_RO(temp9_fault, temp_fault, 8),
+ SENSOR_ATTR_RO(temp9_alarm, temp_alarm, 8),
+ SENSOR_ATTR_RO(temp10_input, temp_value, 9),
+ SENSOR_ATTR_RO(temp10_fault, temp_fault, 9),
+ SENSOR_ATTR_RO(temp10_alarm, temp_alarm, 9),
+ SENSOR_ATTR_RO(temp11_input, temp_value, 10),
+ SENSOR_ATTR_RO(temp11_fault, temp_fault, 10),
+ SENSOR_ATTR_RO(temp11_alarm, temp_alarm, 10),
+ SENSOR_ATTR_RO(temp12_input, temp_value, 11),
+ SENSOR_ATTR_RO(temp12_fault, temp_fault, 11),
+ SENSOR_ATTR_RO(temp12_alarm, temp_alarm, 11),
+ SENSOR_ATTR_RO(temp13_input, temp_value, 12),
+ SENSOR_ATTR_RO(temp13_fault, temp_fault, 12),
+ SENSOR_ATTR_RO(temp13_alarm, temp_alarm, 12),
+ SENSOR_ATTR_RO(temp14_input, temp_value, 13),
+ SENSOR_ATTR_RO(temp14_fault, temp_fault, 13),
+ SENSOR_ATTR_RO(temp14_alarm, temp_alarm, 13),
+ SENSOR_ATTR_RO(temp15_input, temp_value, 14),
+ SENSOR_ATTR_RO(temp15_fault, temp_fault, 14),
+ SENSOR_ATTR_RO(temp15_alarm, temp_alarm, 14),
+ SENSOR_ATTR_RO(temp16_input, temp_value, 15),
+ SENSOR_ATTR_RO(temp16_fault, temp_fault, 15),
+ SENSOR_ATTR_RO(temp16_alarm, temp_alarm, 15),
};
static struct sensor_device_attribute sch5636_fan_attr[] = {
- SENSOR_ATTR(fan1_input, 0444, show_fan_value, NULL, 0),
- SENSOR_ATTR(fan1_fault, 0444, show_fan_fault, NULL, 0),
- SENSOR_ATTR(fan1_alarm, 0444, show_fan_alarm, NULL, 0),
- SENSOR_ATTR(fan2_input, 0444, show_fan_value, NULL, 1),
- SENSOR_ATTR(fan2_fault, 0444, show_fan_fault, NULL, 1),
- SENSOR_ATTR(fan2_alarm, 0444, show_fan_alarm, NULL, 1),
- SENSOR_ATTR(fan3_input, 0444, show_fan_value, NULL, 2),
- SENSOR_ATTR(fan3_fault, 0444, show_fan_fault, NULL, 2),
- SENSOR_ATTR(fan3_alarm, 0444, show_fan_alarm, NULL, 2),
- SENSOR_ATTR(fan4_input, 0444, show_fan_value, NULL, 3),
- SENSOR_ATTR(fan4_fault, 0444, show_fan_fault, NULL, 3),
- SENSOR_ATTR(fan4_alarm, 0444, show_fan_alarm, NULL, 3),
- SENSOR_ATTR(fan5_input, 0444, show_fan_value, NULL, 4),
- SENSOR_ATTR(fan5_fault, 0444, show_fan_fault, NULL, 4),
- SENSOR_ATTR(fan5_alarm, 0444, show_fan_alarm, NULL, 4),
- SENSOR_ATTR(fan6_input, 0444, show_fan_value, NULL, 5),
- SENSOR_ATTR(fan6_fault, 0444, show_fan_fault, NULL, 5),
- SENSOR_ATTR(fan6_alarm, 0444, show_fan_alarm, NULL, 5),
- SENSOR_ATTR(fan7_input, 0444, show_fan_value, NULL, 6),
- SENSOR_ATTR(fan7_fault, 0444, show_fan_fault, NULL, 6),
- SENSOR_ATTR(fan7_alarm, 0444, show_fan_alarm, NULL, 6),
- SENSOR_ATTR(fan8_input, 0444, show_fan_value, NULL, 7),
- SENSOR_ATTR(fan8_fault, 0444, show_fan_fault, NULL, 7),
- SENSOR_ATTR(fan8_alarm, 0444, show_fan_alarm, NULL, 7),
+ SENSOR_ATTR_RO(fan1_input, fan_value, 0),
+ SENSOR_ATTR_RO(fan1_fault, fan_fault, 0),
+ SENSOR_ATTR_RO(fan1_alarm, fan_alarm, 0),
+ SENSOR_ATTR_RO(fan2_input, fan_value, 1),
+ SENSOR_ATTR_RO(fan2_fault, fan_fault, 1),
+ SENSOR_ATTR_RO(fan2_alarm, fan_alarm, 1),
+ SENSOR_ATTR_RO(fan3_input, fan_value, 2),
+ SENSOR_ATTR_RO(fan3_fault, fan_fault, 2),
+ SENSOR_ATTR_RO(fan3_alarm, fan_alarm, 2),
+ SENSOR_ATTR_RO(fan4_input, fan_value, 3),
+ SENSOR_ATTR_RO(fan4_fault, fan_fault, 3),
+ SENSOR_ATTR_RO(fan4_alarm, fan_alarm, 3),
+ SENSOR_ATTR_RO(fan5_input, fan_value, 4),
+ SENSOR_ATTR_RO(fan5_fault, fan_fault, 4),
+ SENSOR_ATTR_RO(fan5_alarm, fan_alarm, 4),
+ SENSOR_ATTR_RO(fan6_input, fan_value, 5),
+ SENSOR_ATTR_RO(fan6_fault, fan_fault, 5),
+ SENSOR_ATTR_RO(fan6_alarm, fan_alarm, 5),
+ SENSOR_ATTR_RO(fan7_input, fan_value, 6),
+ SENSOR_ATTR_RO(fan7_fault, fan_fault, 6),
+ SENSOR_ATTR_RO(fan7_alarm, fan_alarm, 6),
+ SENSOR_ATTR_RO(fan8_input, fan_value, 7),
+ SENSOR_ATTR_RO(fan8_fault, fan_fault, 7),
+ SENSOR_ATTR_RO(fan8_alarm, fan_alarm, 7),
};
static int sch5636_remove(struct platform_device *pdev)
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 2e005edee0c9..a80183a488c5 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -57,7 +57,7 @@ scmi_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
sensor = *(scmi_sensors->info[type] + channel);
if (sensor)
- return S_IRUGO;
+ return 0444;
return 0;
}
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 111d521e2189..9bfa228d0eb0 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -226,11 +226,11 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
sensor->scale = scale[sensor->info.class];
- sensor->dev_attr_input.attr.mode = S_IRUGO;
+ sensor->dev_attr_input.attr.mode = 0444;
sensor->dev_attr_input.show = scpi_show_sensor;
sensor->dev_attr_input.attr.name = sensor->input;
- sensor->dev_attr_label.attr.mode = S_IRUGO;
+ sensor->dev_attr_label.attr.mode = 0444;
sensor->dev_attr_label.show = scpi_show_label;
sensor->dev_attr_label.attr.name = sensor->label;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index c878242f3486..39b41e35c2bf 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -677,9 +677,8 @@ static inline int sht15_calc_humid(struct sht15_data *data)
* and heater_enable sysfs attributes.
* Returns number of bytes written into buffer, negative errno on error.
*/
-static ssize_t sht15_show_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t sht15_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
struct sht15_data *data = dev_get_drvdata(dev);
@@ -700,7 +699,7 @@ static ssize_t sht15_show_status(struct device *dev,
* Will be called on write access to heater_enable sysfs attribute.
* Returns number of bytes actually decoded, negative errno on error.
*/
-static ssize_t sht15_store_heater(struct device *dev,
+static ssize_t sht15_status_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -734,9 +733,8 @@ static ssize_t sht15_store_heater(struct device *dev,
* Will be called on read access to temp1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
-static ssize_t sht15_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t sht15_temp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
struct sht15_data *data = dev_get_drvdata(dev);
@@ -757,9 +755,8 @@ static ssize_t sht15_show_temp(struct device *dev,
* Will be called on read access to humidity1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
-static ssize_t sht15_show_humidity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t sht15_humidity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
struct sht15_data *data = dev_get_drvdata(dev);
@@ -777,16 +774,13 @@ static ssize_t name_show(struct device *dev,
return sprintf(buf, "%s\n", pdev->name);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
- sht15_show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO,
- sht15_show_humidity, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, sht15_show_status, NULL,
- SHT15_STATUS_LOW_BATTERY);
-static SENSOR_DEVICE_ATTR(humidity1_fault, S_IRUGO, sht15_show_status, NULL,
- SHT15_STATUS_LOW_BATTERY);
-static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR, sht15_show_status,
- sht15_store_heater, SHT15_STATUS_HEATER);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, sht15_temp, 0);
+static SENSOR_DEVICE_ATTR_RO(humidity1_input, sht15_humidity, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, sht15_status,
+ SHT15_STATUS_LOW_BATTERY);
+static SENSOR_DEVICE_ATTR_RO(humidity1_fault, sht15_status,
+ SHT15_STATUS_LOW_BATTERY);
+static SENSOR_DEVICE_ATTR_RW(heater_enable, sht15_status, SHT15_STATUS_HEATER);
static DEVICE_ATTR_RO(name);
static struct attribute *sht15_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 2c7ba70921f5..df112b73b635 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -135,9 +135,9 @@ out:
* Will be called on read access to temp1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
-static ssize_t sht21_show_temperature(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t sht21_temperature_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sht21 *sht21 = dev_get_drvdata(dev);
int ret;
@@ -157,9 +157,8 @@ static ssize_t sht21_show_temperature(struct device *dev,
* Will be called on read access to humidity1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
-static ssize_t sht21_show_humidity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t sht21_humidity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sht21 *sht21 = dev_get_drvdata(dev);
int ret;
@@ -251,10 +250,8 @@ static ssize_t eic_show(struct device *dev,
}
/* sysfs attributes */
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature,
- NULL, 0);
-static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity,
- NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, sht21_temperature, 0);
+static SENSOR_DEVICE_ATTR_RO(humidity1_input, sht21_humidity, 0);
static DEVICE_ATTR_RO(eic);
static struct attribute *sht21_attrs[] = {
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 370b57dafab7..81ebc96cdec9 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -629,40 +629,22 @@ out:
return count;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp1_input_show, NULL, 0);
-static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, humidity1_input_show,
- NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
- temp1_limit_show, temp1_limit_store,
- limit_max);
-static SENSOR_DEVICE_ATTR(humidity1_max, S_IRUGO | S_IWUSR,
- humidity1_limit_show, humidity1_limit_store,
- limit_max);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
- temp1_limit_show, temp1_limit_store,
- limit_max_hyst);
-static SENSOR_DEVICE_ATTR(humidity1_max_hyst, S_IRUGO | S_IWUSR,
- humidity1_limit_show, humidity1_limit_store,
- limit_max_hyst);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
- temp1_limit_show, temp1_limit_store,
- limit_min);
-static SENSOR_DEVICE_ATTR(humidity1_min, S_IRUGO | S_IWUSR,
- humidity1_limit_show, humidity1_limit_store,
- limit_min);
-static SENSOR_DEVICE_ATTR(temp1_min_hyst, S_IRUGO | S_IWUSR,
- temp1_limit_show, temp1_limit_store,
- limit_min_hyst);
-static SENSOR_DEVICE_ATTR(humidity1_min_hyst, S_IRUGO | S_IWUSR,
- humidity1_limit_show, humidity1_limit_store,
- limit_min_hyst);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, temp1_alarm_show, NULL, 0);
-static SENSOR_DEVICE_ATTR(humidity1_alarm, S_IRUGO, humidity1_alarm_show,
- NULL, 0);
-static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR,
- heater_enable_show, heater_enable_store, 0);
-static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
- update_interval_show, update_interval_store, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp1_input, 0);
+static SENSOR_DEVICE_ATTR_RO(humidity1_input, humidity1_input, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp1_limit, limit_max);
+static SENSOR_DEVICE_ATTR_RW(humidity1_max, humidity1_limit, limit_max);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp1_limit, limit_max_hyst);
+static SENSOR_DEVICE_ATTR_RW(humidity1_max_hyst, humidity1_limit,
+ limit_max_hyst);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp1_limit, limit_min);
+static SENSOR_DEVICE_ATTR_RW(humidity1_min, humidity1_limit, limit_min);
+static SENSOR_DEVICE_ATTR_RW(temp1_min_hyst, temp1_limit, limit_min_hyst);
+static SENSOR_DEVICE_ATTR_RW(humidity1_min_hyst, humidity1_limit,
+ limit_min_hyst);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, temp1_alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(humidity1_alarm, humidity1_alarm, 0);
+static SENSOR_DEVICE_ATTR_RW(heater_enable, heater_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(update_interval, update_interval, 0);
static struct attribute *sht3x_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 6bd200756560..c0775084dde0 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -164,18 +164,18 @@ static int temp_from_reg(u8 reg)
return (s8)reg * 1000;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47b397_data *data = smsc47b397_update_device(dev);
return sprintf(buf, "%d\n", temp_from_reg(data->temp[attr->index]));
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
/*
* FAN: 1 RPM/bit
@@ -188,17 +188,17 @@ static int fan_from_reg(u16 reg)
return 90000 * 60 / reg;
}
-static ssize_t show_fan(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47b397_data *data = smsc47b397_update_device(dev);
return sprintf(buf, "%d\n", fan_from_reg(data->fan[attr->index]));
}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
static struct attribute *smsc47b397_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 7fe152d92350..90b60297f2f7 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -382,8 +382,8 @@ static int stts751_update(struct stts751_priv *priv)
return 0;
}
-static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t max_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
struct stts751_priv *priv = dev_get_drvdata(dev);
@@ -399,8 +399,8 @@ static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->max_alert);
}
-static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t min_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret;
struct stts751_priv *priv = dev_get_drvdata(dev);
@@ -416,7 +416,7 @@ static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->min_alert);
}
-static ssize_t show_input(struct device *dev, struct device_attribute *attr,
+static ssize_t input_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
@@ -431,7 +431,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->temp);
}
-static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
+static ssize_t therm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct stts751_priv *priv = dev_get_drvdata(dev);
@@ -439,8 +439,8 @@ static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm);
}
-static ssize_t set_therm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t therm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
long temp;
@@ -473,7 +473,7 @@ exit:
return count;
}
-static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
+static ssize_t hyst_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct stts751_priv *priv = dev_get_drvdata(dev);
@@ -481,8 +481,8 @@ static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->hyst);
}
-static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t hyst_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
long temp;
@@ -506,7 +506,7 @@ static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_therm_trip(struct device *dev,
+static ssize_t therm_trip_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -521,7 +521,7 @@ static ssize_t show_therm_trip(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm_trip);
}
-static ssize_t show_max(struct device *dev, struct device_attribute *attr,
+static ssize_t max_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct stts751_priv *priv = dev_get_drvdata(dev);
@@ -529,8 +529,8 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_max);
}
-static ssize_t set_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
long temp;
@@ -555,7 +555,7 @@ exit:
return ret;
}
-static ssize_t show_min(struct device *dev, struct device_attribute *attr,
+static ssize_t min_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct stts751_priv *priv = dev_get_drvdata(dev);
@@ -563,8 +563,8 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_min);
}
-static ssize_t set_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int ret;
long temp;
@@ -589,8 +589,8 @@ exit:
return ret;
}
-static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct stts751_priv *priv = dev_get_drvdata(dev);
@@ -598,8 +598,9 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
stts751_intervals[priv->interval]);
}
-static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
unsigned long val;
int idx;
@@ -746,16 +747,15 @@ static int stts751_read_chip_config(struct stts751_priv *priv)
return 0;
}
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_min, 0644, show_min, set_min, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, 0644, show_max, set_max, 0);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, 0444, show_min_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, 0444, show_max_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit, 0644, show_therm, set_therm, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, 0644, show_hyst, set_hyst, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, 0444, show_therm_trip, NULL, 0);
-static SENSOR_DEVICE_ATTR(update_interval, 0644,
- show_interval, set_interval, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, input, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, min_alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, max_alarm, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, therm, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit_hyst, hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, therm_trip, 0);
+static SENSOR_DEVICE_ATTR_RW(update_interval, interval, 0);
static struct attribute *stts751_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
index 18136e1f95fd..81dd229d7db4 100644
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -200,7 +200,7 @@ out:
* sysfs attributes
*/
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
@@ -218,7 +218,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", val);
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
@@ -231,8 +231,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
TC654_FAN_FAULT_FROM_REG(data->fan_fault[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(da)->index;
struct tc654_data *data = dev_get_drvdata(dev);
@@ -255,7 +255,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
return ret < 0 ? ret : count;
}
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
+static ssize_t fan_alarm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
@@ -275,8 +275,8 @@ static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
static const u8 TC654_FAN_PULSE_SHIFT[] = { 1, 3 };
-static ssize_t show_fan_pulses(struct device *dev, struct device_attribute *da,
- char *buf)
+static ssize_t fan_pulses_show(struct device *dev,
+ struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct tc654_data *data = tc654_update_client(dev);
@@ -289,8 +289,9 @@ static ssize_t show_fan_pulses(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", val);
}
-static ssize_t set_fan_pulses(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_pulses_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(da)->index;
struct tc654_data *data = dev_get_drvdata(dev);
@@ -329,8 +330,8 @@ static ssize_t set_fan_pulses(struct device *dev, struct device_attribute *da,
return ret < 0 ? ret : count;
}
-static ssize_t show_pwm_mode(struct device *dev,
- struct device_attribute *da, char *buf)
+static ssize_t pwm_mode_show(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct tc654_data *data = tc654_update_client(dev);
@@ -340,9 +341,8 @@ static ssize_t show_pwm_mode(struct device *dev,
return sprintf(buf, "%d\n", !!(data->config & TC654_REG_CONFIG_DUTYC));
}
-static ssize_t set_pwm_mode(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t pwm_mode_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct tc654_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -371,7 +371,7 @@ static ssize_t set_pwm_mode(struct device *dev,
static const int tc654_pwm_map[16] = { 77, 88, 102, 112, 124, 136, 148, 160,
172, 184, 196, 207, 219, 231, 243, 255};
-static ssize_t show_pwm(struct device *dev, struct device_attribute *da,
+static ssize_t pwm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct tc654_data *data = tc654_update_client(dev);
@@ -388,8 +388,8 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", pwm);
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct tc654_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -423,22 +423,16 @@ out:
return ret < 0 ? ret : count;
}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
- set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
- set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan1_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
- set_fan_pulses, 0);
-static SENSOR_DEVICE_ATTR(fan2_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
- set_fan_pulses, 1);
-static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO,
- show_pwm_mode, set_pwm_mode, 0);
-static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm,
- set_pwm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, fan_alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, fan_alarm, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_pulses, fan_pulses, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_pulses, fan_pulses, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1_mode, pwm_mode, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
/* Driver data */
static struct attribute *tc654_attrs[] = {
diff --git a/drivers/hwmon/tc74.c b/drivers/hwmon/tc74.c
index d95165158800..fa306bb681bb 100644
--- a/drivers/hwmon/tc74.c
+++ b/drivers/hwmon/tc74.c
@@ -86,7 +86,7 @@ ret_unlock:
return ret;
}
-static ssize_t show_temp_input(struct device *dev,
+static ssize_t temp_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tc74_data *data = dev_get_drvdata(dev);
@@ -98,7 +98,7 @@ static ssize_t show_temp_input(struct device *dev,
return sprintf(buf, "%d\n", data->temp_input * 1000);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
static struct attribute *tc74_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 6778283e36f9..35523d315f25 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -141,10 +141,10 @@ static umode_t tmp102_is_visible(const void *data, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_temp_input:
- return S_IRUGO;
+ return 0444;
case hwmon_temp_max_hyst:
case hwmon_temp_max:
- return S_IRUGO | S_IWUSR;
+ return 0644;
default:
return 0;
}
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index 7f85b14544df..bda0fdc1eb53 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -61,9 +61,8 @@ static inline u8 tmp103_mc_to_reg(int val)
return DIV_ROUND_CLOSEST(val, 1000);
}
-static ssize_t tmp103_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t tmp103_temp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct regmap *regmap = dev_get_drvdata(dev);
@@ -77,9 +76,9 @@ static ssize_t tmp103_show_temp(struct device *dev,
return sprintf(buf, "%d\n", tmp103_reg_to_mc(regval));
}
-static ssize_t tmp103_set_temp(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t tmp103_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct regmap *regmap = dev_get_drvdata(dev);
@@ -94,14 +93,11 @@ static ssize_t tmp103_set_temp(struct device *dev,
return ret ? ret : count;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp103_show_temp, NULL ,
- TMP103_TEMP_REG);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, tmp103_temp, TMP103_TEMP_REG);
-static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, tmp103_show_temp,
- tmp103_set_temp, TMP103_TLOW_REG);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, tmp103_temp, TMP103_TLOW_REG);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp103_show_temp,
- tmp103_set_temp, TMP103_THIGH_REG);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, tmp103_temp, TMP103_THIGH_REG);
static struct attribute *tmp103_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 8844c9565d2a..2732a71f3b39 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
.data = (void *)2
},
{
- .compatible = "ti,tmp422",
+ .compatible = "ti,tmp442",
.data = (void *)3
},
{ },
@@ -187,9 +187,9 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_temp_fault:
if (channel == 0)
return 0;
- return S_IRUGO;
+ return 0444;
case hwmon_temp_input:
- return S_IRUGO;
+ return 0444;
default:
return 0;
}
diff --git a/drivers/hwmon/vexpress-hwmon.c b/drivers/hwmon/vexpress-hwmon.c
index 8ba419d343f8..0b84adb5e88e 100644
--- a/drivers/hwmon/vexpress-hwmon.c
+++ b/drivers/hwmon/vexpress-hwmon.c
@@ -92,9 +92,8 @@ struct vexpress_hwmon_type {
};
#if !defined(CONFIG_REGULATOR_VEXPRESS)
-static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
- NULL, 1000);
+static DEVICE_ATTR(in1_label, 0444, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR_RO(in1_input, vexpress_hwmon_u32, 1000);
static struct attribute *vexpress_hwmon_attrs_volt[] = {
&dev_attr_in1_label.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
@@ -113,9 +112,8 @@ static struct vexpress_hwmon_type vexpress_hwmon_volt = {
};
#endif
-static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
- NULL, 1000);
+static DEVICE_ATTR(curr1_label, 0444, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR_RO(curr1_input, vexpress_hwmon_u32, 1000);
static struct attribute *vexpress_hwmon_attrs_amp[] = {
&dev_attr_curr1_label.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
@@ -133,9 +131,8 @@ static struct vexpress_hwmon_type vexpress_hwmon_amp = {
},
};
-static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
- NULL, 1000);
+static DEVICE_ATTR(temp1_label, 0444, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, vexpress_hwmon_u32, 1000);
static struct attribute *vexpress_hwmon_attrs_temp[] = {
&dev_attr_temp1_label.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -153,9 +150,8 @@ static struct vexpress_hwmon_type vexpress_hwmon_temp = {
},
};
-static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
-static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
- NULL, 1);
+static DEVICE_ATTR(power1_label, 0444, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR_RO(power1_input, vexpress_hwmon_u32, 1);
static struct attribute *vexpress_hwmon_attrs_power[] = {
&dev_attr_power1_label.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
@@ -173,9 +169,8 @@ static struct vexpress_hwmon_type vexpress_hwmon_power = {
},
};
-static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
-static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
- NULL, 1);
+static DEVICE_ATTR(energy1_label, 0444, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR_RO(energy1_input, vexpress_hwmon_u64, 1);
static struct attribute *vexpress_hwmon_attrs_energy[] = {
&dev_attr_energy1_label.attr,
&sensor_dev_attr_energy1_input.dev_attr.attr,
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 0e81f287d305..cb94e4880014 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -60,8 +60,8 @@ struct via_cputemp_data {
* Sysfs stuff
*/
-static ssize_t show_name(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int ret;
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -74,8 +74,8 @@ static ssize_t show_name(struct device *dev, struct device_attribute
return ret;
}
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct via_cputemp_data *data = dev_get_drvdata(dev);
u32 eax, edx;
@@ -102,10 +102,9 @@ static ssize_t cpu0_vid_show(struct device *dev,
return sprintf(buf, "%d\n", vid_from_reg(~edx & 0x7f, data->vrm));
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
- SHOW_TEMP);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, SHOW_TEMP);
+static SENSOR_DEVICE_ATTR_RO(temp1_label, name, SHOW_LABEL);
+static SENSOR_DEVICE_ATTR_RO(name, name, SHOW_NAME);
static struct attribute *via_cputemp_attributes[] = {
&sensor_dev_attr_name.dev_attr.attr,
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 45b2460f3166..e8819d750938 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
.id = 0x000bbd08,
.mask = 0x000fffff,
},
+ { /* Debug for Cortex-A73 */
+ .id = 0x000bbd09,
+ .mask = 0x000fffff,
+ },
{ 0, 0 },
};
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index abe8249b893b..4d5a2b9f9d6a 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -14,6 +14,7 @@
#include <linux/perf_event.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h>
+#include <linux/stringhash.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -30,11 +31,14 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
+/* Sink ID - same for all ETMs */
+PMU_FORMAT_ATTR(sinkid, "config2:0-31");
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
+ &format_attr_sinkid.attr,
NULL,
};
@@ -43,8 +47,18 @@ static const struct attribute_group etm_pmu_format_group = {
.attrs = etm_config_formats_attr,
};
+static struct attribute *etm_config_sinks_attr[] = {
+ NULL,
+};
+
+static const struct attribute_group etm_pmu_sinks_group = {
+ .name = "sinks",
+ .attrs = etm_config_sinks_attr,
+};
+
static const struct attribute_group *etm_pmu_attr_groups[] = {
&etm_pmu_format_group,
+ &etm_pmu_sinks_group,
NULL,
};
@@ -177,31 +191,28 @@ static void etm_free_aux(void *data)
schedule_work(&event_data->work);
}
-static void *etm_setup_aux(int event_cpu, void **pages,
+static void *etm_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
- int cpu;
+ u32 id;
+ int cpu = event->cpu;
cpumask_t *mask;
struct coresight_device *sink;
struct etm_event_data *event_data = NULL;
- event_data = alloc_event_data(event_cpu);
+ event_data = alloc_event_data(cpu);
if (!event_data)
return NULL;
INIT_WORK(&event_data->work, free_event_data);
- /*
- * In theory nothing prevent tracers in a trace session from being
- * associated with different sinks, nor having a sink per tracer. But
- * until we have HW with this kind of topology we need to assume tracers
- * in a trace session are using the same sink. Therefore go through
- * the coresight bus and pick the first enabled sink.
- *
- * When operated from sysFS users are responsible to enable the sink
- * while from perf, the perf tools will do it based on the choice made
- * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
- */
- sink = coresight_get_enabled_sink(true);
+ /* First get the selected sink from user space. */
+ if (event->attr.config2) {
+ id = (u32)event->attr.config2;
+ sink = coresight_get_sink_by_id(id);
+ } else {
+ sink = coresight_get_enabled_sink(true);
+ }
+
if (!sink || !sink_ops(sink)->alloc_buffer)
goto err;
@@ -422,15 +433,16 @@ static int etm_addr_filters_validate(struct list_head *filters)
static void etm_addr_filters_sync(struct perf_event *event)
{
struct perf_addr_filters_head *head = perf_event_addr_filters(event);
- unsigned long start, stop, *offs = event->addr_filters_offs;
+ unsigned long start, stop;
+ struct perf_addr_filter_range *fr = event->addr_filter_ranges;
struct etm_filters *filters = event->hw.addr_filters;
struct etm_filter *etm_filter;
struct perf_addr_filter *filter;
int i = 0;
list_for_each_entry(filter, &head->list, entry) {
- start = filter->offset + offs[i];
- stop = start + filter->size;
+ start = fr[i].start;
+ stop = start + fr[i].size;
etm_filter = &filters->etm_filter[i];
switch (filter->action) {
@@ -479,6 +491,77 @@ int etm_perf_symlink(struct coresight_device *csdev, bool link)
return 0;
}
+static ssize_t etm_perf_sink_name_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(dattr, struct dev_ext_attribute, attr);
+ return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
+}
+
+int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+{
+ int ret;
+ unsigned long hash;
+ const char *name;
+ struct device *pmu_dev = etm_pmu.dev;
+ struct device *pdev = csdev->dev.parent;
+ struct dev_ext_attribute *ea;
+
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return -EINVAL;
+
+ if (csdev->ea != NULL)
+ return -EINVAL;
+
+ if (!etm_perf_up)
+ return -EPROBE_DEFER;
+
+ ea = devm_kzalloc(pdev, sizeof(*ea), GFP_KERNEL);
+ if (!ea)
+ return -ENOMEM;
+
+ name = dev_name(pdev);
+ /* See function coresight_get_sink_by_id() to know where this is used */
+ hash = hashlen_hash(hashlen_string(NULL, name));
+
+ ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
+ if (!ea->attr.attr.name)
+ return -ENOMEM;
+
+ ea->attr.attr.mode = 0444;
+ ea->attr.show = etm_perf_sink_name_show;
+ ea->var = (unsigned long *)hash;
+
+ ret = sysfs_add_file_to_group(&pmu_dev->kobj,
+ &ea->attr.attr, "sinks");
+
+ if (!ret)
+ csdev->ea = ea;
+
+ return ret;
+}
+
+void etm_perf_del_symlink_sink(struct coresight_device *csdev)
+{
+ struct device *pmu_dev = etm_pmu.dev;
+ struct dev_ext_attribute *ea = csdev->ea;
+
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return;
+
+ if (!ea)
+ return;
+
+ sysfs_remove_file_from_group(&pmu_dev->kobj,
+ &ea->attr.attr, "sinks");
+ csdev->ea = NULL;
+}
+
static int __init etm_perf_init(void)
{
int ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index da7d9336a15c..015213abe00a 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -59,6 +59,8 @@ struct etm_event_data {
#ifdef CONFIG_CORESIGHT
int etm_perf_symlink(struct coresight_device *csdev, bool link);
+int etm_perf_add_symlink_sink(struct coresight_device *csdev);
+void etm_perf_del_symlink_sink(struct coresight_device *csdev);
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
struct etm_event_data *data = perf_get_aux(handle);
@@ -70,7 +72,9 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
#else
static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
{ return -EINVAL; }
-
+int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+{ return -EINVAL; }
+void etm_perf_del_symlink_sink(struct coresight_device *csdev) {}
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
return NULL;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 53e2fb6e86f6..fe76b176974a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -55,7 +55,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
static bool etm4_arch_supported(u8 arch)
{
- switch (arch) {
+ /* Mask out the minor version number */
+ switch (arch & 0xf0) {
case ETM_ARCH_V4:
break;
default:
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 579f34943bf1..b936c6d7e13f 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -147,6 +147,7 @@ void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index ef339ff22090..f07825df5c7a 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -793,7 +793,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
struct stm_drvdata *drvdata;
struct resource *res = &adev->res;
struct resource ch_res;
- size_t res_size, bitmap_size;
+ size_t bitmap_size;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
@@ -833,15 +833,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->write_bytes = stm_fundamental_data_size(drvdata);
- if (boot_nr_channel) {
+ if (boot_nr_channel)
drvdata->numsp = boot_nr_channel;
- res_size = min((resource_size_t)(boot_nr_channel *
- BYTES_PER_CHANNEL), resource_size(res));
- } else {
+ else
drvdata->numsp = stm_num_stimulus_port(drvdata);
- res_size = min((resource_size_t)(drvdata->numsp *
- BYTES_PER_CHANNEL), resource_size(res));
- }
+
bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 2b0df1a0a8df..29cef898afba 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/stringhash.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/coresight.h>
@@ -18,6 +19,7 @@
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include "coresight-etm-perf.h"
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
@@ -540,6 +542,47 @@ struct coresight_device *coresight_get_enabled_sink(bool deactivate)
return dev ? to_coresight_device(dev) : NULL;
}
+static int coresight_sink_by_id(struct device *dev, void *data)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+ unsigned long hash;
+
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+
+ if (!csdev->ea)
+ return 0;
+ /*
+ * See function etm_perf_add_symlink_sink() to know where
+ * this comes from.
+ */
+ hash = (unsigned long)csdev->ea->var;
+
+ if ((u32)hash == *(u32 *)data)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * coresight_get_sink_by_id - returns the sink that matches the id
+ * @id: Id of the sink to match
+ *
+ * The name of a sink is unique, whether it is found on the AMBA bus or
+ * otherwise. As such the hash of that name can easily be used to identify
+ * a sink.
+ */
+struct coresight_device *coresight_get_sink_by_id(u32 id)
+{
+ struct device *dev = NULL;
+
+ dev = bus_find_device(&coresight_bustype, NULL, &id,
+ coresight_sink_by_id);
+
+ return dev ? to_coresight_device(dev) : NULL;
+}
+
/*
* coresight_grab_device - Power up this device and any of the helper
* devices connected to it for trace operation. Since the helper devices
@@ -1167,6 +1210,22 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
goto err_out;
}
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ ret = etm_perf_add_symlink_sink(csdev);
+
+ if (ret) {
+ device_unregister(&csdev->dev);
+ /*
+ * As with the above, all resources are free'd
+ * explicitly via coresight_device_release() triggered
+ * from put_device(), which is in turn called from
+ * function device_unregister().
+ */
+ goto err_out;
+ }
+ }
+
mutex_lock(&coresight_mutex);
coresight_fixup_device_conns(csdev);
@@ -1185,6 +1244,7 @@ EXPORT_SYMBOL_GPL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
+ etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
device_unregister(&csdev->dev);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 89092f83567e..7045930fc958 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -80,8 +80,8 @@ static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
* Skip one-level up to the real device node, if we
* are using the new bindings.
*/
- if (!of_node_cmp(parent->name, "in-ports") ||
- !of_node_cmp(parent->name, "out-ports"))
+ if (of_node_name_eq(parent, "in-ports") ||
+ of_node_name_eq(parent, "out-ports"))
parent = of_get_next_parent(parent);
return parent;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index fc6b7f8b62fb..7c1acc2f801c 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -422,6 +422,7 @@ static const struct intel_th_subdevice {
unsigned nres;
unsigned type;
unsigned otype;
+ bool mknode;
unsigned scrpd;
int id;
} intel_th_subdevices[] = {
@@ -456,6 +457,7 @@ static const struct intel_th_subdevice {
.name = "msc",
.id = 0,
.type = INTEL_TH_OUTPUT,
+ .mknode = true,
.otype = GTH_MSU,
.scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
},
@@ -476,6 +478,7 @@ static const struct intel_th_subdevice {
.name = "msc",
.id = 1,
.type = INTEL_TH_OUTPUT,
+ .mknode = true,
.otype = GTH_MSU,
.scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
},
@@ -635,7 +638,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
+ if (subdev->mknode)
+ thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
thdev->output.scratchpad = subdev->scrpd;
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 8426b7970c14..edc52d75e6bd 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -607,6 +607,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
+ int master;
if (thdev->host_mode)
return;
@@ -615,6 +616,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
+ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+ if (gth->master[master] == port)
+ gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
}
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 56694339cb06..0da6b787f553 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -272,19 +272,17 @@ static ssize_t lpp_dest_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct pti_device *pti = dev_get_drvdata(dev);
- ssize_t ret = -EINVAL;
int i;
- for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++)
- if (sysfs_streq(buf, lpp_dest_str[i]))
- break;
+ i = sysfs_match_string(lpp_dest_str, buf);
+ if (i < 0)
+ return i;
- if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) {
- pti->lpp_dest = i;
- ret = size;
- }
+ if (!(pti->lpp_dest_mask & BIT(i)))
+ return -EINVAL;
- return ret;
+ pti->lpp_dest = i;
+ return size;
}
static DEVICE_ATTR_RW(lpp_dest);
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 4b7ae47789d2..3a1f4e650378 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -84,8 +84,12 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data,
/* Global packets (GERR, XSYNC, TRIG) are sent with register writes */
case STP_PACKET_GERR:
reg += 4;
+ /* fall through */
+
case STP_PACKET_XSYNC:
reg += 8;
+ /* fall through */
+
case STP_PACKET_TRIG:
if (flags & STP_PACKET_TIMESTAMPED)
reg += 4;
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 93ce3aa740a9..c7ba8acfd4d5 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -244,6 +244,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
;
if (i == width)
return pos;
+
+ /* step over [pos..pos+i) to continue search */
+ pos += i;
}
return -1;
@@ -732,7 +735,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
char *ids[] = { NULL, NULL };
- int ret = -EINVAL;
+ int ret = -EINVAL, wlimit = 1;
u32 size;
if (stmf->output.nr_chans)
@@ -760,8 +763,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (id->__reserved_0 || id->__reserved_1)
goto err_free;
- if (id->width < 1 ||
- id->width > PAGE_SIZE / stm->data->sw_mmiosz)
+ if (stm->data->sw_mmiosz)
+ wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
+
+ if (id->width < 1 || id->width > wlimit)
goto err_free;
ids[0] = id->id;
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ec6e69aa3a8e..d2fbb4bb4a43 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
}
+static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
+{
+ i2c_dev->curr_msg = NULL;
+ i2c_dev->num_msgs = 0;
+
+ i2c_dev->msg_buf = NULL;
+ i2c_dev->msg_buf_remaining = 0;
+}
+
/*
* Note about I2C_C_CLEAR on error:
* The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
time_left = wait_for_completion_timeout(&i2c_dev->completion,
adap->timeout);
+
+ bcm2835_i2c_finish_transfer(i2c_dev);
+
if (!time_left) {
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
BCM2835_I2C_C_CLEAR);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index b13605718291..d917cefc5a19 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+ if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
+ else
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if (id->send_count > CDNS_I2C_FIFO_DEPTH)
+ if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
+ else
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b1086bfb0465..cd9c65f3d404 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int omap_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
{
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
return 0;
}
-static int omap_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
{
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops omap_i2c_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
omap_i2c_runtime_resume, NULL)
};
-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
-#else
-#define OMAP_I2C_PM_OPS NULL
-#endif /* CONFIG_PM */
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
- .pm = OMAP_I2C_PM_OPS,
+ .pm = &omap_i2c_pm_ops,
.of_match_table = of_match_ptr(omap_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e417ebf7628c..c77adbbea0c7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -155,6 +155,8 @@ enum msg_end_type {
* @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
* provides additional features and allows for longer messages to
* be transferred in one go.
+ * @quirks: i2c adapter quirks for limiting write/read transfer size and not
+ * allowing 0 length transfers.
*/
struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
@@ -167,6 +169,7 @@ struct tegra_i2c_hw_feature {
bool has_multi_master_mode;
bool has_slcg_override_reg;
bool has_mst_fifo;
+ const struct i2c_adapter_quirks *quirks;
};
/**
@@ -837,6 +840,10 @@ static const struct i2c_adapter_quirks tegra_i2c_quirks = {
.max_write_len = 4096,
};
+static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_continue_xfer_support = false,
.has_per_pkt_xfer_complete_irq = false,
@@ -848,6 +855,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -861,6 +869,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -874,6 +883,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -887,6 +897,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
@@ -900,6 +911,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
@@ -913,6 +925,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = true,
+ .quirks = &tegra194_i2c_quirks,
};
/* Match table for of_platform binding */
@@ -964,7 +977,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->base = base;
i2c_dev->div_clk = div_clk;
i2c_dev->adapter.algo = &tegra_i2c_algo;
- i2c_dev->adapter.quirks = &tegra_i2c_quirks;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
@@ -980,6 +992,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->hw = of_device_get_match_data(&pdev->dev);
i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
"nvidia,tegra20-i2c-dvc");
+ i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
init_completion(&i2c_dev->msg_complete);
spin_lock_init(&i2c_dev->xfer_lock);
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 1aca742fde4a..ccd76c71af09 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
data_arg.data);
}
case I2C_RETRIES:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
client->adapter->retries = arg;
break;
case I2C_TIMEOUT:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
/* For historical reasons, user-space sets the timeout
* value in units of 10 ms.
*/
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index c39f89d2deba..2dc628d4f1ae 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
ret = i3c_master_retrieve_dev_info(newdev);
if (ret)
- goto err_free_dev;
+ goto err_detach_dev;
olddev = i3c_master_search_i3c_dev_duplicate(newdev);
if (olddev) {
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b532e2c9cf5c..59279224e07f 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
-static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
- struct dw_i3c_xfer *xfer)
+static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
{
- unsigned long flags;
-
- spin_lock_irqsave(&master->xferqueue.lock, flags);
if (master->xferqueue.cur == xfer) {
u32 status;
@@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
} else {
list_del_init(&xfer->node);
}
+}
+
+static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ dw_i3c_master_dequeue_xfer_locked(master, xfer);
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
@@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
complete(&xfer->comp);
if (ret < 0) {
- dw_i3c_master_dequeue_xfer(master, xfer);
+ dw_i3c_master_dequeue_xfer_locked(master, xfer);
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
master->regs + DEVICE_CTRL);
}
@@ -596,6 +602,7 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
ret = dw_i2c_clk_cfg(master);
if (ret)
return ret;
+ /* fall through */
case I3C_BUS_MODE_PURE:
ret = dw_i3c_clk_cfg(master);
if (ret)
@@ -901,9 +908,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
- if (!old_dyn_addr)
- return 0;
-
master->addrs[data->index] = dev->info.dyn_addr;
return 0;
@@ -925,11 +929,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
return -ENOMEM;
data->index = pos;
- master->addrs[pos] = dev->info.dyn_addr;
+ master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index bbd79b8b1a80..8889a4fdb454 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
return PTR_ERR(master->pclk);
master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ if (IS_ERR(master->sysclk))
+ return PTR_ERR(master->sysclk);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index da58020a144e..33a28cde126c 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{
- struct request *sense_rq = drive->sense_rq;
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *sense_rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
/* deferred failure from ide_prep_sense() */
if (!drive->sense_rq_armed) {
printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
drive->name);
+ spin_unlock_irqrestore(&hwif->lock, flags);
return -ENOMEM;
}
+ sense_rq = drive->sense_rq;
ide_req(sense_rq)->special = special;
drive->sense_rq_armed = false;
drive->hwif->rq = NULL;
ide_insert_request_head(drive, sense_rq);
+ spin_unlock_irqrestore(&hwif->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8445b484ae69..b137f27a34d5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
}
if (!blk_update_request(rq, error, nr_bytes)) {
- if (rq == drive->sense_rq)
+ if (rq == drive->sense_rq) {
drive->sense_rq = NULL;
+ drive->sense_rq_active = false;
+ }
__blk_mq_end_request(rq, error);
return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
}
-/*
- * Issue a new request to a device.
- */
-blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
+ bool local_requeue)
{
- ide_drive_t *drive = hctx->queue->queuedata;
- ide_hwif_t *hwif = drive->hwif;
+ ide_hwif_t *hwif = drive->hwif;
struct ide_host *host = hwif->host;
- struct request *rq = bd->rq;
ide_startstop_t startstop;
if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ide_lock_host(host, hwif))
return BLK_STS_DEV_RESOURCE;
- blk_mq_start_request(rq);
-
spin_lock_irq(&hwif->lock);
if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
/*
- * we know that the queue isn't empty, but this can happen
- * if ->prep_rq() decides to kill a request
- */
- if (!rq) {
- rq = bd->rq;
- if (!rq) {
- ide_unlock_port(hwif);
- goto out;
- }
- }
-
- /*
* Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as
* blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
}
} else {
plug_device:
+ if (local_requeue)
+ list_add(&rq->queuelist, &drive->rq_list);
spin_unlock_irq(&hwif->lock);
ide_unlock_host(host);
- ide_requeue_and_plug(drive, rq);
+ if (!local_requeue)
+ ide_requeue_and_plug(drive, rq);
return BLK_STS_OK;
}
@@ -573,6 +559,26 @@ out:
return BLK_STS_OK;
}
+/*
+ * Issue a new request to a device.
+ */
+blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ ide_drive_t *drive = hctx->queue->queuedata;
+ ide_hwif_t *hwif = drive->hwif;
+
+ spin_lock_irq(&hwif->lock);
+ if (drive->sense_rq_active) {
+ spin_unlock_irq(&hwif->lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
+ spin_unlock_irq(&hwif->lock);
+
+ blk_mq_start_request(bd->rq);
+ return ide_issue_rq(drive, bd->rq, false);
+}
+
static int drive_is_ready(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
+ drive->sense_rq_active = true;
list_add_tail(&rq->queuelist, &drive->rq_list);
- spin_unlock_irqrestore(&hwif->lock, flags);
-
kblockd_schedule_work(&drive->rq_work);
}
EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 102aa3bc3e7f..8af7af6001eb 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
+ spin_lock_irq(&hwif->lock);
ide_insert_request_head(drive, rq);
+ spin_unlock_irq(&hwif->lock);
out:
return;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63627be0811a..5aeaca24a28f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
+ blk_status_t ret;
LIST_HEAD(list);
- spin_lock_irq(&hwif->lock);
- if (!list_empty(&drive->rq_list))
- list_splice_init(&drive->rq_list, &list);
- spin_unlock_irq(&hwif->lock);
+ blk_mq_quiesce_queue(drive->queue);
- while (!list_empty(&list)) {
- rq = list_first_entry(&list, struct request, queuelist);
+ ret = BLK_STS_OK;
+ spin_lock_irq(&hwif->lock);
+ while (!list_empty(&drive->rq_list)) {
+ rq = list_first_entry(&drive->rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL);
+
+ spin_unlock_irq(&hwif->lock);
+ ret = ide_issue_rq(drive, rq, true);
+ spin_lock_irq(&hwif->lock);
}
+ spin_unlock_irq(&hwif->lock);
+
+ blk_mq_unquiesce_queue(drive->queue);
+
+ if (ret != BLK_STS_OK)
+ kblockd_schedule_work(&drive->rq_work);
}
static const u8 ide_hwif_to_major[] =
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 4c8c7a620d08..a5dc13576394 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
drive->proc = proc_mkdir(drive->name, parent);
if (drive->proc) {
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
- proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
+ proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
drive->proc, &ide_settings_proc_fops,
drive);
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8b5d85c91e9d..b8647b5c3d4d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1103,6 +1103,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv),
+ INTEL_CPU_FAM6(ATOM_TREMONT_X, idle_cpu_dnv),
{}
};
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 780f87f72338..f03ed00685ea 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -150,8 +150,8 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
}
static int adxl345_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct adxl345_data *data = iio_priv(indio_dev);
s64 n;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 421a0a8a1379..302781126bc6 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -31,6 +31,7 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#define MMA8452_STATUS 0x00
#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
@@ -107,6 +108,8 @@ struct mma8452_data {
u8 data_cfg;
const struct mma_chip_info *chip_info;
int sleep_val;
+ struct regulator *vdd_reg;
+ struct regulator *vddio_reg;
};
/**
@@ -1534,9 +1537,39 @@ static int mma8452_probe(struct i2c_client *client,
mutex_init(&data->lock);
data->chip_info = match->data;
+ data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd_reg)) {
+ if (PTR_ERR(data->vdd_reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_err(&client->dev, "failed to get VDD regulator!\n");
+ return PTR_ERR(data->vdd_reg);
+ }
+
+ data->vddio_reg = devm_regulator_get(&client->dev, "vddio");
+ if (IS_ERR(data->vddio_reg)) {
+ if (PTR_ERR(data->vddio_reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_err(&client->dev, "failed to get VDDIO regulator!\n");
+ return PTR_ERR(data->vddio_reg);
+ }
+
+ ret = regulator_enable(data->vdd_reg);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable VDD regulator!\n");
+ return ret;
+ }
+
+ ret = regulator_enable(data->vddio_reg);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable VDDIO regulator!\n");
+ goto disable_regulator_vdd;
+ }
+
ret = i2c_smbus_read_byte_data(client, MMA8452_WHO_AM_I);
if (ret < 0)
- return ret;
+ goto disable_regulators;
switch (ret) {
case MMA8451_DEVICE_ID:
@@ -1549,7 +1582,8 @@ static int mma8452_probe(struct i2c_client *client,
break;
/* else: fall through */
default:
- return -ENODEV;
+ ret = -ENODEV;
+ goto disable_regulators;
}
dev_info(&client->dev, "registering %s accelerometer; ID 0x%x\n",
@@ -1566,13 +1600,13 @@ static int mma8452_probe(struct i2c_client *client,
ret = mma8452_reset(client);
if (ret < 0)
- return ret;
+ goto disable_regulators;
data->data_cfg = MMA8452_DATA_CFG_FS_2G;
ret = i2c_smbus_write_byte_data(client, MMA8452_DATA_CFG,
data->data_cfg);
if (ret < 0)
- return ret;
+ goto disable_regulators;
/*
* By default set transient threshold to max to avoid events if
@@ -1581,7 +1615,7 @@ static int mma8452_probe(struct i2c_client *client,
ret = i2c_smbus_write_byte_data(client, MMA8452_TRANSIENT_THS,
MMA8452_TRANSIENT_THS_MASK);
if (ret < 0)
- return ret;
+ goto disable_regulators;
if (client->irq) {
int irq2;
@@ -1595,7 +1629,7 @@ static int mma8452_probe(struct i2c_client *client,
MMA8452_CTRL_REG5,
data->chip_info->all_events);
if (ret < 0)
- return ret;
+ goto disable_regulators;
dev_dbg(&client->dev, "using interrupt line INT1\n");
}
@@ -1604,11 +1638,11 @@ static int mma8452_probe(struct i2c_client *client,
MMA8452_CTRL_REG4,
data->chip_info->enabled_events);
if (ret < 0)
- return ret;
+ goto disable_regulators;
ret = mma8452_trigger_setup(indio_dev);
if (ret < 0)
- return ret;
+ goto disable_regulators;
}
data->ctrl_reg1 = MMA8452_CTRL_ACTIVE |
@@ -1661,12 +1695,19 @@ buffer_cleanup:
trigger_cleanup:
mma8452_trigger_cleanup(indio_dev);
+disable_regulators:
+ regulator_disable(data->vddio_reg);
+
+disable_regulator_vdd:
+ regulator_disable(data->vdd_reg);
+
return ret;
}
static int mma8452_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mma8452_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
@@ -1678,6 +1719,9 @@ static int mma8452_remove(struct i2c_client *client)
mma8452_trigger_cleanup(indio_dev);
mma8452_standby(iio_priv(indio_dev));
+ regulator_disable(data->vddio_reg);
+ regulator_disable(data->vdd_reg);
+
return 0;
}
@@ -1696,6 +1740,18 @@ static int mma8452_runtime_suspend(struct device *dev)
return -EAGAIN;
}
+ ret = regulator_disable(data->vddio_reg);
+ if (ret) {
+ dev_err(dev, "failed to disable VDDIO regulator\n");
+ return ret;
+ }
+
+ ret = regulator_disable(data->vdd_reg);
+ if (ret) {
+ dev_err(dev, "failed to disable VDD regulator\n");
+ return ret;
+ }
+
return 0;
}
@@ -1705,9 +1761,22 @@ static int mma8452_runtime_resume(struct device *dev)
struct mma8452_data *data = iio_priv(indio_dev);
int ret, sleep_val;
+ ret = regulator_enable(data->vdd_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable VDD regulator\n");
+ return ret;
+ }
+
+ ret = regulator_enable(data->vddio_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable VDDIO regulator\n");
+ regulator_disable(data->vdd_reg);
+ return ret;
+ }
+
ret = mma8452_active(data);
if (ret < 0)
- return ret;
+ goto runtime_resume_failed;
ret = mma8452_get_odr_index(data);
sleep_val = 1000 / mma8452_samp_freq[ret][0];
@@ -1717,25 +1786,17 @@ static int mma8452_runtime_resume(struct device *dev)
msleep_interruptible(sleep_val);
return 0;
-}
-#endif
-#ifdef CONFIG_PM_SLEEP
-static int mma8452_suspend(struct device *dev)
-{
- return mma8452_standby(iio_priv(i2c_get_clientdata(
- to_i2c_client(dev))));
-}
+runtime_resume_failed:
+ regulator_disable(data->vddio_reg);
+ regulator_disable(data->vdd_reg);
-static int mma8452_resume(struct device *dev)
-{
- return mma8452_active(iio_priv(i2c_get_clientdata(
- to_i2c_client(dev))));
+ return ret;
}
#endif
static const struct dev_pm_ops mma8452_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mma8452_suspend, mma8452_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(mma8452_runtime_suspend,
mma8452_runtime_resume, NULL)
};
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index f7b471121508..a3c0916479fa 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mutex.h>
@@ -918,12 +919,167 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
#define ST_ACCEL_TRIGGER_OPS NULL
#endif
+static const struct iio_mount_matrix *
+get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ return adata->mount_matrix;
+}
+
+static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, get_mount_matrix),
+ { },
+};
+
+/* Read ST-specific _ONT orientation data from ACPI and generate an
+ * appropriate mount matrix.
+ */
+static int apply_acpi_orientation(struct iio_dev *indio_dev,
+ struct iio_chan_spec *channels)
+{
+#ifdef CONFIG_ACPI
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_device *adev;
+ union acpi_object *ont;
+ union acpi_object *elements;
+ acpi_status status;
+ int ret = -EINVAL;
+ unsigned int val;
+ int i, j;
+ int final_ont[3][3] = { { 0 }, };
+
+ /* For some reason, ST's _ONT translation does not apply directly
+ * to the data read from the sensor. Another translation must be
+ * performed first, as described by the matrix below. Perhaps
+ * ST required this specific translation for the first product
+ * where the device was mounted?
+ */
+ const int default_ont[3][3] = {
+ { 0, 1, 0 },
+ { -1, 0, 0 },
+ { 0, 0, -1 },
+ };
+
+
+ adev = ACPI_COMPANION(adata->dev);
+ if (!adev)
+ return 0;
+
+ /* Read _ONT data, which should be a package of 6 integers. */
+ status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer);
+ if (status == AE_NOT_FOUND) {
+ return 0;
+ } else if (ACPI_FAILURE(status)) {
+ dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
+ status);
+ return status;
+ }
+
+ ont = buffer.pointer;
+ if (ont->type != ACPI_TYPE_PACKAGE || ont->package.count != 6)
+ goto out;
+
+ /* The first 3 integers provide axis order information.
+ * e.g. 0 1 2 would indicate normal X,Y,Z ordering.
+ * e.g. 1 0 2 indicates that data arrives in order Y,X,Z.
+ */
+ elements = ont->package.elements;
+ for (i = 0; i < 3; i++) {
+ if (elements[i].type != ACPI_TYPE_INTEGER)
+ goto out;
+
+ val = elements[i].integer.value;
+ if (val < 0 || val > 2)
+ goto out;
+
+ /* Avoiding full matrix multiplication, we simply reorder the
+ * columns in the default_ont matrix according to the
+ * ordering provided by _ONT.
+ */
+ final_ont[0][i] = default_ont[0][val];
+ final_ont[1][i] = default_ont[1][val];
+ final_ont[2][i] = default_ont[2][val];
+ }
+
+ /* The final 3 integers provide sign flip information.
+ * 0 means no change, 1 means flip.
+ * e.g. 0 0 1 means that Z data should be sign-flipped.
+ * This is applied after the axis reordering from above.
+ */
+ elements += 3;
+ for (i = 0; i < 3; i++) {
+ if (elements[i].type != ACPI_TYPE_INTEGER)
+ goto out;
+
+ val = elements[i].integer.value;
+ if (val != 0 && val != 1)
+ goto out;
+ if (!val)
+ continue;
+
+ /* Flip the values in the indicated column */
+ final_ont[0][i] *= -1;
+ final_ont[1][i] *= -1;
+ final_ont[2][i] *= -1;
+ }
+
+ /* Convert our integer matrix to a string-based iio_mount_matrix */
+ adata->mount_matrix = devm_kmalloc(&indio_dev->dev,
+ sizeof(*adata->mount_matrix),
+ GFP_KERNEL);
+ if (!adata->mount_matrix) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 3; j++) {
+ int matrix_val = final_ont[i][j];
+ char *str_value;
+
+ switch (matrix_val) {
+ case -1:
+ str_value = "-1";
+ break;
+ case 0:
+ str_value = "0";
+ break;
+ case 1:
+ str_value = "1";
+ break;
+ default:
+ goto out;
+ }
+ adata->mount_matrix->rotation[i * 3 + j] = str_value;
+ }
+ }
+
+ /* Expose the mount matrix via ext_info */
+ for (i = 0; i < indio_dev->num_channels; i++)
+ channels[i].ext_info = mount_matrix_ext_info;
+
+ ret = 0;
+ dev_info(&indio_dev->dev, "computed mount matrix from ACPI\n");
+
+out:
+ kfree(buffer.pointer);
+ return ret;
+#else /* !CONFIG_ACPI */
+ return 0;
+#endif
+}
+
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
struct st_sensors_platform_data *pdata =
(struct st_sensors_platform_data *)adata->dev->platform_data;
int irq = adata->get_irq_data_ready(indio_dev);
+ struct iio_chan_spec *channels;
+ size_t channels_size;
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -942,9 +1098,22 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
adata->multiread_bit = adata->sensor_settings->multi_read_bit;
- indio_dev->channels = adata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+ channels_size = indio_dev->num_channels * sizeof(struct iio_chan_spec);
+ channels = devm_kmemdup(&indio_dev->dev,
+ adata->sensor_settings->ch,
+ channels_size, GFP_KERNEL);
+ if (!channels) {
+ err = -ENOMEM;
+ goto st_accel_power_off;
+ }
+
+ if (apply_acpi_orientation(indio_dev, channels))
+ dev_warn(&indio_dev->dev,
+ "failed to apply ACPI orientation data: %d\n", err);
+
+ indio_dev->channels = channels;
adata->current_fullscale = (struct st_sensor_fullscale_avl *)
&adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7a3ca4ec0cb7..5debc67df70a 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -57,18 +57,48 @@ config AD7298
module will be called ad7298.
config AD7476
- tristate "Analog Devices AD7476 and similar 1-channel ADCs driver"
+ tristate "Analog Devices AD7476 1-channel ADCs driver and other similar devices from AD an TI"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Analog Devices AD7273, AD7274, AD7276,
- AD7277, AD7278, AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468,
- AD7495, AD7910, AD7920, AD7920 SPI analog to digital converters (ADC).
+ Say yes here to build support for the following SPI analog to
+ digital converters (ADCs):
+ Analog Devices: AD7273, AD7274, AD7276, AD7277, AD7278, AD7475,
+ AD7476, AD7477, AD7478, AD7466, AD7467, AD7468, AD7495, AD7910,
+ AD7920.
+ Texas Instruments: ADS7866, ADS7867, ADS7868.
To compile this driver as a module, choose M here: the
module will be called ad7476.
+config AD7606
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config AD7606_IFACE_PARALLEL
+ tristate "Analog Devices AD7606 ADC driver with parallel interface support"
+ depends on HAS_IOMEM
+ select AD7606
+ help
+ Say yes here to build parallel interface support for Analog Devices:
+ ad7605-4, ad7606, ad7606-6, ad7606-4 analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7606_parallel.
+
+config AD7606_IFACE_SPI
+ tristate "Analog Devices AD7606 ADC driver with spi interface support"
+ depends on SPI
+ select AD7606
+ help
+ Say yes here to build spi interface support for Analog Devices:
+ ad7605-4, ad7606, ad7606-6, ad7606-4 analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7606_spi.
+
config AD7766
tristate "Analog Devices AD7766/AD7767 ADC driver"
depends on SPI_MASTER
@@ -81,6 +111,19 @@ config AD7766
To compile this driver as a module, choose M here: the module will be
called ad7766.
+config AD7768_1
+ tristate "Analog Devices AD7768-1 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7768-1 SPI
+ simultaneously sampling sigma-delta analog to digital converter (ADC).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad7768-1.
+
config AD7791
tristate "Analog Devices AD7791 ADC driver"
depends on SPI
@@ -367,6 +410,15 @@ config INA2XX_ADC
Say yes here to build support for TI INA2xx family of Power Monitors.
This driver is mutually exclusive with the HWMON version.
+config INGENIC_ADC
+ tristate "Ingenic JZ47xx SoCs ADC driver"
+ depends on MIPS || COMPILE_TEST
+ help
+ Say yes here to build support for the Ingenic JZ47xx SoCs ADC unit.
+
+ This driver can also be built as a module. If so, the module will be
+ called ingenic_adc.
+
config IMX7D_ADC
tristate "Freescale IMX7D ADC driver"
depends on ARCH_MXC || COMPILE_TEST
@@ -576,6 +628,16 @@ config NAU7802
To compile this driver as a module, choose M here: the
module will be called nau7802.
+config NPCM_ADC
+ tristate "Nuvoton NPCM ADC driver"
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to build support for Nuvoton NPCM ADC.
+
+ This driver can also be built as a module. If so, the module
+ will be called npcm_adc.
+
config PALMAS_GPADC
tristate "TI Palmas General Purpose ADC"
depends on MFD_PALMAS
@@ -908,6 +970,16 @@ config TI_ADS8688
This driver can also be built as a module. If so, the module will be
called ti-ads8688.
+config TI_ADS124S08
+ tristate "Texas Instruments ADS124S08"
+ depends on SPI && OF
+ help
+ If you say yes here you get support for Texas Instruments ADS124S08
+ and ADS124S06 ADC chips
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads124s08.
+
config TI_AM335X_ADC
tristate "TI's AM335X ADC driver"
depends on MFD_TI_AM335X_TSCADC && HAS_DMA
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 07df37f621bd..d50eb47da484 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -11,7 +11,11 @@ obj-$(CONFIG_AD7291) += ad7291.o
obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7476) += ad7476.o
+obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
+obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
+obj-$(CONFIG_AD7606) += ad7606.o
obj-$(CONFIG_AD7766) += ad7766.o
+obj-$(CONFIG_AD7768_1) += ad7768-1.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
@@ -36,6 +40,7 @@ obj-$(CONFIG_HI8435) += hi8435.o
obj-$(CONFIG_HX711) += hx711.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
+obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
@@ -55,6 +60,7 @@ obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_MESON_SARADC) += meson_saradc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
+obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_ADC5) += qcom-spmi-adc5.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
@@ -81,6 +87,7 @@ obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
+obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
obj-$(CONFIG_TI_TLC4541) += ti-tlc4541.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 0549686b9ef8..76747488044b 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -59,6 +59,9 @@ enum ad7476_supported_device_ids {
ID_ADC081S,
ID_ADC101S,
ID_ADC121S,
+ ID_ADS7866,
+ ID_ADS7867,
+ ID_ADS7868,
};
static irqreturn_t ad7476_trigger_handler(int irq, void *p)
@@ -157,6 +160,8 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
#define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
#define AD7091R_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), 0)
+#define ADS786X_CHAN(bits) _AD7476_CHAN((bits), 12 - (bits), \
+ BIT(IIO_CHAN_INFO_RAW))
static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7091R] = {
@@ -209,6 +214,18 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
.channel[0] = ADC081S_CHAN(12),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
+ [ID_ADS7866] = {
+ .channel[0] = ADS786X_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_ADS7867] = {
+ .channel[0] = ADS786X_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_ADS7868] = {
+ .channel[0] = ADS786X_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
};
static const struct iio_info ad7476_info = {
@@ -314,6 +331,9 @@ static const struct spi_device_id ad7476_id[] = {
{"adc081s", ID_ADC081S},
{"adc101s", ID_ADC101S},
{"adc121s", ID_ADC121S},
+ {"ads7866", ID_ADS7866},
+ {"ads7867", ID_ADS7867},
+ {"ads7868", ID_ADS7868},
{}
};
MODULE_DEVICE_TABLE(spi, ad7476_id);
diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 7308fa8fbb4c..ebb8de03bbce 100644
--- a/drivers/staging/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -1,28 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD7606 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
-#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/util_macros.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#include "ad7606.h"
@@ -30,8 +31,12 @@
* Scales are computed as 5000/32768 and 10000/32768 respectively,
* so that when applied to the raw values they provide mV values
*/
-static const unsigned int scale_avail[2][2] = {
- {0, 152588}, {0, 305176}
+static const unsigned int scale_avail[2] = {
+ 152588, 305176
+};
+
+static const unsigned int ad7606_oversampling_avail[7] = {
+ 1, 2, 4, 8, 16, 32, 64,
};
static int ad7606_reset(struct ad7606_state *st)
@@ -82,36 +87,24 @@ static int ad7606_read_samples(struct ad7606_state *st)
static irqreturn_t ad7606_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
- struct ad7606_state *st = iio_priv(pf->indio_dev);
-
- gpiod_set_value(st->gpio_convst, 1);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
- * @work_s: the work struct through which this was scheduled
- *
- * Currently there is no option in this driver to disable the saving of
- * timestamps within the ring.
- * I think the one copy of this at a time was to avoid problems if the
- * trigger was set far too high and the reads then locked up the computer.
- **/
-static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
-{
- struct ad7606_state *st = container_of(work_s, struct ad7606_state,
- poll_work);
- struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7606_state *st = iio_priv(indio_dev);
int ret;
+ mutex_lock(&st->lock);
+
ret = ad7606_read_samples(st);
if (ret == 0)
iio_push_to_buffers_with_timestamp(indio_dev, st->data,
iio_get_time_ns(indio_dev));
- gpiod_set_value(st->gpio_convst, 0);
iio_trigger_notify_done(indio_dev->trig);
+ /* The rising edge of the CONVST signal starts a new conversion. */
+ gpiod_set_value(st->gpio_convst, 1);
+
+ mutex_unlock(&st->lock);
+
+ return IRQ_HANDLED;
}
static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
@@ -119,12 +112,13 @@ static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
- st->done = false;
gpiod_set_value(st->gpio_convst, 1);
-
- ret = wait_event_interruptible(st->wq_data_avail, st->done);
- if (ret)
+ ret = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(1000));
+ if (!ret) {
+ ret = -ETIMEDOUT;
goto error_ret;
+ }
ret = ad7606_read_samples(st);
if (ret == 0)
@@ -159,8 +153,8 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
*val = (short)ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = scale_avail[st->range][0];
- *val2 = scale_avail[st->range][1];
+ *val = 0;
+ *val2 = scale_avail[st->range];
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*val = st->oversampling;
@@ -176,8 +170,8 @@ static ssize_t in_voltage_scale_available_show(struct device *dev,
int i, len = 0;
for (i = 0; i < ARRAY_SIZE(scale_avail); i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ",
- scale_avail[i][0], scale_avail[i][1]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+ scale_avail[i]);
buf[len - 1] = '\n';
@@ -186,18 +180,6 @@ static ssize_t in_voltage_scale_available_show(struct device *dev,
static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
-static int ad7606_oversampling_get_index(unsigned int val)
-{
- unsigned char supported[] = {1, 2, 4, 8, 16, 32, 64};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(supported); i++)
- if (val == supported[i])
- return i;
-
- return -EINVAL;
-}
-
static int ad7606_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
@@ -206,36 +188,29 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
{
struct ad7606_state *st = iio_priv(indio_dev);
DECLARE_BITMAP(values, 3);
- int ret, i;
+ int i;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = -EINVAL;
mutex_lock(&st->lock);
- for (i = 0; i < ARRAY_SIZE(scale_avail); i++)
- if (val2 == scale_avail[i][1]) {
- gpiod_set_value(st->gpio_range, i);
- st->range = i;
-
- ret = 0;
- break;
- }
+ i = find_closest(val2, scale_avail, ARRAY_SIZE(scale_avail));
+ gpiod_set_value(st->gpio_range, i);
+ st->range = i;
mutex_unlock(&st->lock);
- return ret;
+ return 0;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
if (val2)
return -EINVAL;
- ret = ad7606_oversampling_get_index(val);
- if (ret < 0)
- return ret;
+ i = find_closest(val, ad7606_oversampling_avail,
+ ARRAY_SIZE(ad7606_oversampling_avail));
- values[0] = ret;
+ values[0] = i;
mutex_lock(&st->lock);
- gpiod_set_array_value(3, st->gpio_os->desc, st->gpio_os->info,
- values);
- st->oversampling = val;
+ gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
+ st->gpio_os->info, values);
+ st->oversampling = ad7606_oversampling_avail[i];
mutex_unlock(&st->lock);
return 0;
@@ -274,8 +249,7 @@ static const struct attribute_group ad7606_attribute_group_range = {
.attrs = ad7606_attributes_range,
};
-#define AD760X_CHANNEL(num, mask) \
- { \
+#define AD760X_CHANNEL(num, mask) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = num, \
@@ -290,7 +264,7 @@ static const struct attribute_group ad7606_attribute_group_range = {
.storagebits = 16, \
.endianness = IIO_CPU, \
}, \
- }
+}
#define AD7605_CHANNEL(num) \
AD760X_CHANNEL(num, 0)
@@ -319,9 +293,7 @@ static const struct iio_chan_spec ad7606_channels[] = {
};
static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
- /*
- * More devices added in future
- */
+ /* More devices added in future */
[ID_AD7605_4] = {
.channels = ad7605_channels,
.num_channels = 5,
@@ -347,7 +319,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
{
struct device *dev = st->dev;
- st->gpio_convst = devm_gpiod_get(dev, "conversion-start",
+ st->gpio_convst = devm_gpiod_get(dev, "adi,conversion-start",
GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_convst))
return PTR_ERR(st->gpio_convst);
@@ -356,7 +328,8 @@ static int ad7606_request_gpios(struct ad7606_state *st)
if (IS_ERR(st->gpio_reset))
return PTR_ERR(st->gpio_reset);
- st->gpio_range = devm_gpiod_get_optional(dev, "range", GPIOD_OUT_LOW);
+ st->gpio_range = devm_gpiod_get_optional(dev, "adi,range",
+ GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_range))
return PTR_ERR(st->gpio_range);
@@ -365,7 +338,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
if (IS_ERR(st->gpio_standby))
return PTR_ERR(st->gpio_standby);
- st->gpio_frstdata = devm_gpiod_get_optional(dev, "first-data",
+ st->gpio_frstdata = devm_gpiod_get_optional(dev, "adi,first-data",
GPIOD_IN);
if (IS_ERR(st->gpio_frstdata))
return PTR_ERR(st->gpio_frstdata);
@@ -373,13 +346,17 @@ static int ad7606_request_gpios(struct ad7606_state *st)
if (!st->chip_info->has_oversampling)
return 0;
- st->gpio_os = devm_gpiod_get_array_optional(dev, "oversampling-ratio",
+ st->gpio_os = devm_gpiod_get_array_optional(dev,
+ "adi,oversampling-ratio",
GPIOD_OUT_LOW);
return PTR_ERR_OR_ZERO(st->gpio_os);
}
-/**
- * Interrupt handler
+/*
+ * The BUSY signal indicates when conversions are in progress, so when a rising
+ * edge of CONVST is applied, BUSY goes logic high and transitions low at the
+ * end of the entire conversion process. The falling edge of the BUSY signal
+ * triggers this interrupt.
*/
static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
{
@@ -387,37 +364,87 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
struct ad7606_state *st = iio_priv(indio_dev);
if (iio_buffer_enabled(indio_dev)) {
- schedule_work(&st->poll_work);
+ gpiod_set_value(st->gpio_convst, 0);
+ iio_trigger_poll_chained(st->trig);
} else {
- st->done = true;
- wake_up_interruptible(&st->wq_data_avail);
+ complete(&st->completion);
}
return IRQ_HANDLED;
};
+static int ad7606_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ if (st->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ad7606_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ iio_triggered_buffer_postenable(indio_dev);
+ gpiod_set_value(st->gpio_convst, 1);
+
+ return 0;
+}
+
+static int ad7606_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ gpiod_set_value(st->gpio_convst, 0);
+
+ return iio_triggered_buffer_predisable(indio_dev);
+}
+
+static const struct iio_buffer_setup_ops ad7606_buffer_ops = {
+ .postenable = &ad7606_buffer_postenable,
+ .predisable = &ad7606_buffer_predisable,
+};
+
static const struct iio_info ad7606_info_no_os_or_range = {
.read_raw = &ad7606_read_raw,
+ .validate_trigger = &ad7606_validate_trigger,
};
static const struct iio_info ad7606_info_os_and_range = {
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os_and_range,
+ .validate_trigger = &ad7606_validate_trigger,
};
static const struct iio_info ad7606_info_os = {
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os,
+ .validate_trigger = &ad7606_validate_trigger,
};
static const struct iio_info ad7606_info_range = {
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_range,
+ .validate_trigger = &ad7606_validate_trigger,
+};
+
+static const struct iio_trigger_ops ad7606_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
};
+static void ad7606_regulator_disable(void *data)
+{
+ struct ad7606_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
const char *name, unsigned int id,
const struct ad7606_bus_ops *bops)
@@ -431,6 +458,7 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return -ENOMEM;
st = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
st->dev = dev;
mutex_init(&st->lock);
@@ -439,7 +467,6 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
/* tied to logic low, analog input range is +/- 5V */
st->range = 0;
st->oversampling = 1;
- INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
st->reg = devm_regulator_get(dev, "avcc");
if (IS_ERR(st->reg))
@@ -451,11 +478,15 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return ret;
}
+ ret = devm_add_action_or_reset(dev, ad7606_regulator_disable, st);
+ if (ret)
+ return ret;
+
st->chip_info = &ad7606_chip_info_tbl[id];
ret = ad7606_request_gpios(st);
if (ret)
- goto error_disable_reg;
+ return ret;
indio_dev->dev.parent = dev;
if (st->gpio_os) {
@@ -474,56 +505,45 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
- init_waitqueue_head(&st->wq_data_avail);
+ init_completion(&st->completion);
ret = ad7606_reset(st);
if (ret)
dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
- ret = request_irq(irq, ad7606_interrupt, IRQF_TRIGGER_FALLING, name,
- indio_dev);
- if (ret)
- goto error_disable_reg;
-
- ret = iio_triggered_buffer_setup(indio_dev, &ad7606_trigger_handler,
- NULL, NULL);
- if (ret)
- goto error_free_irq;
+ st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!st->trig)
+ return -ENOMEM;
- ret = iio_device_register(indio_dev);
+ st->trig->ops = &ad7606_trigger_ops;
+ st->trig->dev.parent = dev;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = devm_iio_trigger_register(dev, st->trig);
if (ret)
- goto error_unregister_ring;
+ return ret;
- dev_set_drvdata(dev, indio_dev);
+ indio_dev->trig = iio_trigger_get(st->trig);
- return 0;
-error_unregister_ring:
- iio_triggered_buffer_cleanup(indio_dev);
+ ret = devm_request_threaded_irq(dev, irq,
+ NULL,
+ &ad7606_interrupt,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, indio_dev);
+ if (ret)
+ return ret;
-error_free_irq:
- free_irq(irq, indio_dev);
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad7606_trigger_handler,
+ &ad7606_buffer_ops);
+ if (ret)
+ return ret;
-error_disable_reg:
- regulator_disable(st->reg);
- return ret;
+ return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_GPL(ad7606_probe);
-int ad7606_remove(struct device *dev, int irq)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad7606_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- free_irq(irq, indio_dev);
- regulator_disable(st->reg);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ad7606_remove);
-
#ifdef CONFIG_PM_SLEEP
static int ad7606_suspend(struct device *dev)
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 86188054b60b..5d12410f68e1 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* AD7606 ADC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_ADC_AD7606_H_
@@ -15,7 +14,6 @@
* @num_channels: number of channels
* @has_oversampling: whether the device has oversampling support
*/
-
struct ad7606_chip_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
@@ -27,13 +25,9 @@ struct ad7606_chip_info {
* @dev pointer to kernel device
* @chip_info entry in the table of chips that describes this device
* @reg regulator info for the the power supply of the device
- * @poll_work work struct for continuously reading data from the device
- * into an IIO triggered buffer
- * @wq_data_avail wait queue struct for buffer mode
* @bops bus operations (SPI or parallel)
* @range voltage range selection, selects which scale to apply
* @oversampling oversampling selection
- * @done marks whether reading data is done
* @base_address address from where to read data in parallel operation
* @lock protect sensor state from concurrent accesses to GPIOs
* @gpio_convst GPIO descriptor for conversion start signal (CONVST)
@@ -44,19 +38,17 @@ struct ad7606_chip_info {
* @gpio_frstdata GPIO descriptor for reading from device when data
* is being read on the first channel
* @gpio_os GPIO descriptors to control oversampling on the device
+ * @complete completion to indicate end of conversion
+ * @trig The IIO trigger associated with the device.
* @data buffer for reading data from the device
*/
-
struct ad7606_state {
struct device *dev;
const struct ad7606_chip_info *chip_info;
struct regulator *reg;
- struct work_struct poll_work;
- wait_queue_head_t wq_data_avail;
const struct ad7606_bus_ops *bops;
unsigned int range;
unsigned int oversampling;
- bool done;
void __iomem *base_address;
struct mutex lock; /* protect sensor state */
@@ -66,6 +58,8 @@ struct ad7606_state {
struct gpio_desc *gpio_standby;
struct gpio_desc *gpio_frstdata;
struct gpio_descs *gpio_os;
+ struct iio_trigger *trig;
+ struct completion completion;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -87,7 +81,6 @@ struct ad7606_bus_ops {
int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
const char *name, unsigned int id,
const struct ad7606_bus_ops *bops);
-int ad7606_remove(struct device *dev, int irq);
enum ad7606_supported_device_ids {
ID_AD7605_4,
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 8bd86e727b02..1b08028facde 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD7606 Parallel Interface ADC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#include <linux/module.h>
@@ -27,7 +26,7 @@ static int ad7606_par16_read_block(struct device *dev,
}
static const struct ad7606_bus_ops ad7606_par16_bops = {
- .read_block = ad7606_par16_read_block,
+ .read_block = ad7606_par16_read_block,
};
static int ad7606_par8_read_block(struct device *dev,
@@ -42,7 +41,7 @@ static int ad7606_par8_read_block(struct device *dev,
}
static const struct ad7606_bus_ops ad7606_par8_bops = {
- .read_block = ad7606_par8_read_block,
+ .read_block = ad7606_par8_read_block,
};
static int ad7606_par_probe(struct platform_device *pdev)
@@ -72,40 +71,33 @@ static int ad7606_par_probe(struct platform_device *pdev)
&ad7606_par8_bops);
}
-static int ad7606_par_remove(struct platform_device *pdev)
-{
- return ad7606_remove(&pdev->dev, platform_get_irq(pdev, 0));
-}
-
static const struct platform_device_id ad7606_driver_ids[] = {
- {
- .name = "ad7605-4",
- .driver_data = ID_AD7605_4,
- }, {
- .name = "ad7606-8",
- .driver_data = ID_AD7606_8,
- }, {
- .name = "ad7606-6",
- .driver_data = ID_AD7606_6,
- }, {
- .name = "ad7606-4",
- .driver_data = ID_AD7606_4,
- },
+ { .name = "ad7605-4", .driver_data = ID_AD7605_4, },
+ { .name = "ad7606-4", .driver_data = ID_AD7606_4, },
+ { .name = "ad7606-6", .driver_data = ID_AD7606_6, },
+ { .name = "ad7606-8", .driver_data = ID_AD7606_8, },
{ }
};
-
MODULE_DEVICE_TABLE(platform, ad7606_driver_ids);
+static const struct of_device_id ad7606_of_match[] = {
+ { .compatible = "adi,ad7605-4" },
+ { .compatible = "adi,ad7606-4" },
+ { .compatible = "adi,ad7606-6" },
+ { .compatible = "adi,ad7606-8" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7606_of_match);
+
static struct platform_driver ad7606_driver = {
.probe = ad7606_par_probe,
- .remove = ad7606_par_remove,
.id_table = ad7606_driver_ids,
.driver = {
- .name = "ad7606",
- .pm = AD7606_PM_OPS,
+ .name = "ad7606",
+ .pm = AD7606_PM_OPS,
+ .of_match_table = ad7606_of_match,
},
};
-
module_platform_driver(ad7606_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index b76ca5a8c059..4fd0ec36a086 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AD7606 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#include <linux/module.h>
@@ -37,7 +36,7 @@ static int ad7606_spi_read_block(struct device *dev,
}
static const struct ad7606_bus_ops ad7606_spi_bops = {
- .read_block = ad7606_spi_read_block,
+ .read_block = ad7606_spi_read_block,
};
static int ad7606_spi_probe(struct spi_device *spi)
@@ -49,28 +48,32 @@ static int ad7606_spi_probe(struct spi_device *spi)
&ad7606_spi_bops);
}
-static int ad7606_spi_remove(struct spi_device *spi)
-{
- return ad7606_remove(&spi->dev, spi->irq);
-}
-
-static const struct spi_device_id ad7606_id[] = {
- {"ad7605-4", ID_AD7605_4},
- {"ad7606-8", ID_AD7606_8},
- {"ad7606-6", ID_AD7606_6},
- {"ad7606-4", ID_AD7606_4},
+static const struct spi_device_id ad7606_id_table[] = {
+ { "ad7605-4", ID_AD7605_4 },
+ { "ad7606-4", ID_AD7606_4 },
+ { "ad7606-6", ID_AD7606_6 },
+ { "ad7606-8", ID_AD7606_8 },
{}
};
-MODULE_DEVICE_TABLE(spi, ad7606_id);
+MODULE_DEVICE_TABLE(spi, ad7606_id_table);
+
+static const struct of_device_id ad7606_of_match[] = {
+ { .compatible = "adi,ad7605-4" },
+ { .compatible = "adi,ad7606-4" },
+ { .compatible = "adi,ad7606-6" },
+ { .compatible = "adi,ad7606-8" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7606_of_match);
static struct spi_driver ad7606_driver = {
.driver = {
.name = "ad7606",
+ .of_match_table = ad7606_of_match,
.pm = AD7606_PM_OPS,
},
.probe = ad7606_spi_probe,
- .remove = ad7606_spi_remove,
- .id_table = ad7606_id,
+ .id_table = ad7606_id_table,
};
module_spi_driver(ad7606_driver);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
new file mode 100644
index 000000000000..0d132708c429
--- /dev/null
+++ b/drivers/iio/adc/ad7768-1.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices AD7768-1 SPI ADC driver
+ *
+ * Copyright 2017 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+/* AD7768 registers definition */
+#define AD7768_REG_CHIP_TYPE 0x3
+#define AD7768_REG_PROD_ID_L 0x4
+#define AD7768_REG_PROD_ID_H 0x5
+#define AD7768_REG_CHIP_GRADE 0x6
+#define AD7768_REG_SCRATCH_PAD 0x0A
+#define AD7768_REG_VENDOR_L 0x0C
+#define AD7768_REG_VENDOR_H 0x0D
+#define AD7768_REG_INTERFACE_FORMAT 0x14
+#define AD7768_REG_POWER_CLOCK 0x15
+#define AD7768_REG_ANALOG 0x16
+#define AD7768_REG_ANALOG2 0x17
+#define AD7768_REG_CONVERSION 0x18
+#define AD7768_REG_DIGITAL_FILTER 0x19
+#define AD7768_REG_SINC3_DEC_RATE_MSB 0x1A
+#define AD7768_REG_SINC3_DEC_RATE_LSB 0x1B
+#define AD7768_REG_DUTY_CYCLE_RATIO 0x1C
+#define AD7768_REG_SYNC_RESET 0x1D
+#define AD7768_REG_GPIO_CONTROL 0x1E
+#define AD7768_REG_GPIO_WRITE 0x1F
+#define AD7768_REG_GPIO_READ 0x20
+#define AD7768_REG_OFFSET_HI 0x21
+#define AD7768_REG_OFFSET_MID 0x22
+#define AD7768_REG_OFFSET_LO 0x23
+#define AD7768_REG_GAIN_HI 0x24
+#define AD7768_REG_GAIN_MID 0x25
+#define AD7768_REG_GAIN_LO 0x26
+#define AD7768_REG_SPI_DIAG_ENABLE 0x28
+#define AD7768_REG_ADC_DIAG_ENABLE 0x29
+#define AD7768_REG_DIG_DIAG_ENABLE 0x2A
+#define AD7768_REG_ADC_DATA 0x2C
+#define AD7768_REG_MASTER_STATUS 0x2D
+#define AD7768_REG_SPI_DIAG_STATUS 0x2E
+#define AD7768_REG_ADC_DIAG_STATUS 0x2F
+#define AD7768_REG_DIG_DIAG_STATUS 0x30
+#define AD7768_REG_MCLK_COUNTER 0x31
+
+/* AD7768_REG_POWER_CLOCK */
+#define AD7768_PWR_MCLK_DIV_MSK GENMASK(5, 4)
+#define AD7768_PWR_MCLK_DIV(x) FIELD_PREP(AD7768_PWR_MCLK_DIV_MSK, x)
+#define AD7768_PWR_PWRMODE_MSK GENMASK(1, 0)
+#define AD7768_PWR_PWRMODE(x) FIELD_PREP(AD7768_PWR_PWRMODE_MSK, x)
+
+/* AD7768_REG_DIGITAL_FILTER */
+#define AD7768_DIG_FIL_FIL_MSK GENMASK(6, 4)
+#define AD7768_DIG_FIL_FIL(x) FIELD_PREP(AD7768_DIG_FIL_FIL_MSK, x)
+#define AD7768_DIG_FIL_DEC_MSK GENMASK(2, 0)
+#define AD7768_DIG_FIL_DEC_RATE(x) FIELD_PREP(AD7768_DIG_FIL_DEC_MSK, x)
+
+/* AD7768_REG_CONVERSION */
+#define AD7768_CONV_MODE_MSK GENMASK(2, 0)
+#define AD7768_CONV_MODE(x) FIELD_PREP(AD7768_CONV_MODE_MSK, x)
+
+#define AD7768_RD_FLAG_MSK(x) (BIT(6) | ((x) & 0x3F))
+#define AD7768_WR_FLAG_MSK(x) ((x) & 0x3F)
+
+enum ad7768_conv_mode {
+ AD7768_CONTINUOUS,
+ AD7768_ONE_SHOT,
+ AD7768_SINGLE,
+ AD7768_PERIODIC,
+ AD7768_STANDBY
+};
+
+enum ad7768_pwrmode {
+ AD7768_ECO_MODE = 0,
+ AD7768_MED_MODE = 2,
+ AD7768_FAST_MODE = 3
+};
+
+enum ad7768_mclk_div {
+ AD7768_MCLK_DIV_16,
+ AD7768_MCLK_DIV_8,
+ AD7768_MCLK_DIV_4,
+ AD7768_MCLK_DIV_2
+};
+
+enum ad7768_dec_rate {
+ AD7768_DEC_RATE_32 = 0,
+ AD7768_DEC_RATE_64 = 1,
+ AD7768_DEC_RATE_128 = 2,
+ AD7768_DEC_RATE_256 = 3,
+ AD7768_DEC_RATE_512 = 4,
+ AD7768_DEC_RATE_1024 = 5,
+ AD7768_DEC_RATE_8 = 9,
+ AD7768_DEC_RATE_16 = 10
+};
+
+struct ad7768_clk_configuration {
+ enum ad7768_mclk_div mclk_div;
+ enum ad7768_dec_rate dec_rate;
+ unsigned int clk_div;
+ enum ad7768_pwrmode pwrmode;
+};
+
+static const struct ad7768_clk_configuration ad7768_clk_config[] = {
+ { AD7768_MCLK_DIV_2, AD7768_DEC_RATE_8, 16, AD7768_FAST_MODE },
+ { AD7768_MCLK_DIV_2, AD7768_DEC_RATE_16, 32, AD7768_FAST_MODE },
+ { AD7768_MCLK_DIV_2, AD7768_DEC_RATE_32, 64, AD7768_FAST_MODE },
+ { AD7768_MCLK_DIV_2, AD7768_DEC_RATE_64, 128, AD7768_FAST_MODE },
+ { AD7768_MCLK_DIV_2, AD7768_DEC_RATE_128, 256, AD7768_FAST_MODE },
+ { AD7768_MCLK_DIV_4, AD7768_DEC_RATE_128, 512, AD7768_MED_MODE },
+ { AD7768_MCLK_DIV_4, AD7768_DEC_RATE_256, 1024, AD7768_MED_MODE },
+ { AD7768_MCLK_DIV_4, AD7768_DEC_RATE_512, 2048, AD7768_MED_MODE },
+ { AD7768_MCLK_DIV_4, AD7768_DEC_RATE_1024, 4096, AD7768_MED_MODE },
+ { AD7768_MCLK_DIV_8, AD7768_DEC_RATE_1024, 8192, AD7768_MED_MODE },
+ { AD7768_MCLK_DIV_16, AD7768_DEC_RATE_1024, 16384, AD7768_ECO_MODE },
+};
+
+static const struct iio_chan_spec ad7768_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .indexed = 1,
+ .channel = 0,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .shift = 8,
+ .endianness = IIO_BE,
+ },
+ },
+};
+
+struct ad7768_state {
+ struct spi_device *spi;
+ struct regulator *vref;
+ struct mutex lock;
+ struct clk *mclk;
+ unsigned int mclk_freq;
+ unsigned int samp_freq;
+ struct completion completion;
+ struct iio_trigger *trig;
+ struct gpio_desc *gpio_sync_in;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[2];
+ } data ____cacheline_aligned;
+};
+
+static int ad7768_spi_reg_read(struct ad7768_state *st, unsigned int addr,
+ unsigned int len)
+{
+ unsigned int shift;
+ int ret;
+
+ shift = 32 - (8 * len);
+ st->data.d8[0] = AD7768_RD_FLAG_MSK(addr);
+
+ ret = spi_write_then_read(st->spi, st->data.d8, 1,
+ &st->data.d32, len);
+ if (ret < 0)
+ return ret;
+
+ return (be32_to_cpu(st->data.d32) >> shift);
+}
+
+static int ad7768_spi_reg_write(struct ad7768_state *st,
+ unsigned int addr,
+ unsigned int val)
+{
+ st->data.d8[0] = AD7768_WR_FLAG_MSK(addr);
+ st->data.d8[1] = val & 0xFF;
+
+ return spi_write(st->spi, st->data.d8, 2);
+}
+
+static int ad7768_set_mode(struct ad7768_state *st,
+ enum ad7768_conv_mode mode)
+{
+ int regval;
+
+ regval = ad7768_spi_reg_read(st, AD7768_REG_CONVERSION, 1);
+ if (regval < 0)
+ return regval;
+
+ regval &= ~AD7768_CONV_MODE_MSK;
+ regval |= AD7768_CONV_MODE(mode);
+
+ return ad7768_spi_reg_write(st, AD7768_REG_CONVERSION, regval);
+}
+
+static int ad7768_scan_direct(struct iio_dev *indio_dev)
+{
+ struct ad7768_state *st = iio_priv(indio_dev);
+ int readval, ret;
+
+ reinit_completion(&st->completion);
+
+ ret = ad7768_set_mode(st, AD7768_ONE_SHOT);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ readval = ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3);
+ if (readval < 0)
+ return readval;
+ /*
+ * Any SPI configuration of the AD7768-1 can only be
+ * performed in continuous conversion mode.
+ */
+ ret = ad7768_set_mode(st, AD7768_CONTINUOUS);
+ if (ret < 0)
+ return ret;
+
+ return readval;
+}
+
+static int ad7768_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad7768_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (readval) {
+ ret = ad7768_spi_reg_read(st, reg, 1);
+ if (ret < 0)
+ goto err_unlock;
+ *readval = ret;
+ ret = 0;
+ } else {
+ ret = ad7768_spi_reg_write(st, reg, writeval);
+ }
+err_unlock:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7768_set_dig_fil(struct ad7768_state *st,
+ enum ad7768_dec_rate dec_rate)
+{
+ unsigned int mode;
+ int ret;
+
+ if (dec_rate == AD7768_DEC_RATE_8 || dec_rate == AD7768_DEC_RATE_16)
+ mode = AD7768_DIG_FIL_FIL(dec_rate);
+ else
+ mode = AD7768_DIG_FIL_DEC_RATE(dec_rate);
+
+ ret = ad7768_spi_reg_write(st, AD7768_REG_DIGITAL_FILTER, mode);
+ if (ret < 0)
+ return ret;
+
+ /* A sync-in pulse is required every time the filter dec rate changes */
+ gpiod_set_value(st->gpio_sync_in, 1);
+ gpiod_set_value(st->gpio_sync_in, 0);
+
+ return 0;
+}
+
+static int ad7768_set_freq(struct ad7768_state *st,
+ unsigned int freq)
+{
+ unsigned int diff_new, diff_old, pwr_mode, i, idx;
+ int res, ret;
+
+ diff_old = U32_MAX;
+ idx = 0;
+
+ res = DIV_ROUND_CLOSEST(st->mclk_freq, freq);
+
+ /* Find the closest match for the desired sampling frequency */
+ for (i = 0; i < ARRAY_SIZE(ad7768_clk_config); i++) {
+ diff_new = abs(res - ad7768_clk_config[i].clk_div);
+ if (diff_new < diff_old) {
+ diff_old = diff_new;
+ idx = i;
+ }
+ }
+
+ /*
+ * Set both the mclk_div and pwrmode with a single write to the
+ * POWER_CLOCK register
+ */
+ pwr_mode = AD7768_PWR_MCLK_DIV(ad7768_clk_config[idx].mclk_div) |
+ AD7768_PWR_PWRMODE(ad7768_clk_config[idx].pwrmode);
+ ret = ad7768_spi_reg_write(st, AD7768_REG_POWER_CLOCK, pwr_mode);
+ if (ret < 0)
+ return ret;
+
+ ret = ad7768_set_dig_fil(st, ad7768_clk_config[idx].dec_rate);
+ if (ret < 0)
+ return ret;
+
+ st->samp_freq = DIV_ROUND_CLOSEST(st->mclk_freq,
+ ad7768_clk_config[idx].clk_div);
+
+ return 0;
+}
+
+static ssize_t ad7768_sampling_freq_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7768_state *st = iio_priv(indio_dev);
+ unsigned int freq;
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ad7768_clk_config); i++) {
+ freq = DIV_ROUND_CLOSEST(st->mclk_freq,
+ ad7768_clk_config[i].clk_div);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", freq);
+ }
+
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(ad7768_sampling_freq_avail);
+
+static int ad7768_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad7768_state *st = iio_priv(indio_dev);
+ int scale_uv, ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7768_scan_direct(indio_dev);
+ if (ret >= 0)
+ *val = ret;
+
+ iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = regulator_get_voltage(st->vref);
+ if (scale_uv < 0)
+ return scale_uv;
+
+ *val = (scale_uv * 2) / 1000;
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->samp_freq;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ad7768_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad7768_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ad7768_set_freq(st, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct attribute *ad7768_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7768_group = {
+ .attrs = ad7768_attributes,
+};
+
+static const struct iio_info ad7768_info = {
+ .attrs = &ad7768_group,
+ .read_raw = &ad7768_read_raw,
+ .write_raw = &ad7768_write_raw,
+ .debugfs_reg_access = &ad7768_reg_access,
+};
+
+static int ad7768_setup(struct ad7768_state *st)
+{
+ int ret;
+
+ /*
+ * Two writes to the SPI_RESET[1:0] bits are required to initiate
+ * a software reset. The bits must first be set to 11, and then
+ * to 10. When the sequence is detected, the reset occurs.
+ * See the datasheet, page 70.
+ */
+ ret = ad7768_spi_reg_write(st, AD7768_REG_SYNC_RESET, 0x3);
+ if (ret)
+ return ret;
+
+ ret = ad7768_spi_reg_write(st, AD7768_REG_SYNC_RESET, 0x2);
+ if (ret)
+ return ret;
+
+ st->gpio_sync_in = devm_gpiod_get(&st->spi->dev, "adi,sync-in",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_sync_in))
+ return PTR_ERR(st->gpio_sync_in);
+
+ /* Set the default sampling frequency to 32000 kSPS */
+ return ad7768_set_freq(st, 32000);
+}
+
+static irqreturn_t ad7768_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7768_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ ret = spi_read(st->spi, &st->data.d32, 3);
+ if (ret < 0)
+ goto err_unlock;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32,
+ iio_get_time_ns(indio_dev));
+
+ iio_trigger_notify_done(indio_dev->trig);
+err_unlock:
+ mutex_unlock(&st->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ad7768_interrupt(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct ad7768_state *st = iio_priv(indio_dev);
+
+ if (iio_buffer_enabled(indio_dev))
+ iio_trigger_poll(st->trig);
+ else
+ complete(&st->completion);
+
+ return IRQ_HANDLED;
+};
+
+static int ad7768_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad7768_state *st = iio_priv(indio_dev);
+
+ iio_triggered_buffer_postenable(indio_dev);
+ /*
+ * Write a 1 to the LSB of the INTERFACE_FORMAT register to enter
+ * continuous read mode. Subsequent data reads do not require an
+ * initial 8-bit write to query the ADC_DATA register.
+ */
+ return ad7768_spi_reg_write(st, AD7768_REG_INTERFACE_FORMAT, 0x01);
+}
+
+static int ad7768_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad7768_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * To exit continuous read mode, perform a single read of the ADC_DATA
+ * reg (0x2C), which allows further configuration of the device.
+ */
+ ret = ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3);
+ if (ret < 0)
+ return ret;
+
+ return iio_triggered_buffer_predisable(indio_dev);
+}
+
+static const struct iio_buffer_setup_ops ad7768_buffer_ops = {
+ .postenable = &ad7768_buffer_postenable,
+ .predisable = &ad7768_buffer_predisable,
+};
+
+static const struct iio_trigger_ops ad7768_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static void ad7768_regulator_disable(void *data)
+{
+ struct ad7768_state *st = data;
+
+ regulator_disable(st->vref);
+}
+
+static void ad7768_clk_disable(void *data)
+{
+ struct ad7768_state *st = data;
+
+ clk_disable_unprepare(st->mclk);
+}
+
+static int ad7768_probe(struct spi_device *spi)
+{
+ struct ad7768_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ st->vref = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->vref))
+ return PTR_ERR(st->vref);
+
+ ret = regulator_enable(st->vref);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified vref supply\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7768_regulator_disable, st);
+ if (ret)
+ return ret;
+
+ st->mclk = devm_clk_get(&spi->dev, "mclk");
+ if (IS_ERR(st->mclk))
+ return PTR_ERR(st->mclk);
+
+ ret = clk_prepare_enable(st->mclk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7768_clk_disable, st);
+ if (ret)
+ return ret;
+
+ st->mclk_freq = clk_get_rate(st->mclk);
+
+ spi_set_drvdata(spi, indio_dev);
+ mutex_init(&st->lock);
+
+ indio_dev->channels = ad7768_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7768_channels);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad7768_info;
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_TRIGGERED;
+
+ ret = ad7768_setup(st);
+ if (ret < 0) {
+ dev_err(&spi->dev, "AD7768 setup failed\n");
+ return ret;
+ }
+
+ st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!st->trig)
+ return -ENOMEM;
+
+ st->trig->ops = &ad7768_trigger_ops;
+ st->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = devm_iio_trigger_register(&spi->dev, st->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ init_completion(&st->completion);
+
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ &ad7768_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad7768_trigger_handler,
+ &ad7768_buffer_ops);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad7768_id_table[] = {
+ { "ad7768-1", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7768_id_table);
+
+static const struct of_device_id ad7768_of_match[] = {
+ { .compatible = "adi,ad7768-1" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7768_of_match);
+
+static struct spi_driver ad7768_driver = {
+ .driver = {
+ .name = "ad7768-1",
+ .of_match_table = ad7768_of_match,
+ },
+ .probe = ad7768_probe,
+ .id_table = ad7768_id_table,
+};
+module_spi_driver(ad7768_driver);
+
+MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7768-1 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 031d568b4972..4e339cfd0c54 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -27,9 +27,18 @@
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
-#define AXP288_ADC_EN_MASK 0xF1
-#define AXP288_ADC_TS_PIN_GPADC 0xF2
-#define AXP288_ADC_TS_PIN_ON 0xF3
+/*
+ * This mask enables all ADCs except for the battery temp-sensor (TS), that is
+ * left as-is to avoid breaking charging on devices without a temp-sensor.
+ */
+#define AXP288_ADC_EN_MASK 0xF0
+#define AXP288_ADC_TS_ENABLE 0x01
+
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -44,6 +53,7 @@ enum axp288_adc_id {
struct axp288_adc_info {
int irq;
struct regmap *regmap;
+ bool ts_enabled;
};
static const struct iio_chan_spec axp288_adc_channels[] = {
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
- unsigned long address)
+/*
+ * The current-source used for the battery temp-sensor (TS) is shared
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
+ * current-source needs to be permanently on. But to read the GPADC we
+ * need to temporary switch the TS current-source to ondemand, so that
+ * the GPADC can use it, otherwise we will always read an all 0 value.
+ */
+static int axp288_adc_set_ts(struct axp288_adc_info *info,
+ unsigned int mode, unsigned long address)
{
int ret;
- /* channels other than GPADC do not need to switch TS pin */
+ /* No need to switch the current-source if the TS pin is disabled */
+ if (!info->ts_enabled)
+ return 0;
+
+ /* Channels other than GPADC do not need the current source */
if (address != AXP288_GP_ADC_H)
return 0;
- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
if (ret)
return ret;
/* When switching to the GPADC pin give things some time to settle */
- if (mode == AXP288_ADC_TS_PIN_GPADC)
+ if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
usleep_range(6000, 10000);
return 0;
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
chan->address)) {
dev_err(&indio_dev->dev, "GPADC mode\n");
ret = -EINVAL;
break;
}
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
chan->address))
dev_err(&indio_dev->dev, "TS pin restore\n");
break;
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int axp288_adc_set_state(struct regmap *regmap)
+static int axp288_adc_initialize(struct axp288_adc_info *info)
{
- /* ADC should be always enabled for internal FG to function */
- if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
- return -EIO;
+ int ret, adc_enable_val;
+
+ /*
+ * Determine if the TS pin is enabled and set the TS current-source
+ * accordingly.
+ */
+ ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
+ if (ret)
+ return ret;
+
+ if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
+ info->ts_enabled = true;
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_ON);
+ } else {
+ info->ts_enabled = false;
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_OFF);
+ }
+ if (ret)
+ return ret;
- return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+ /* Turn on the ADC for all channels except TS, leave TS as is */
+ return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
+ AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
}
static const struct iio_info axp288_adc_iio_info = {
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = axp288_adc_set_state(axp20x->regmap);
+ ret = axp288_adc_initialize(info);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index fa2d2b5767f3..1ca2c4d39f87 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -115,6 +115,7 @@
#define MAX_ADC_V2_CHANNELS 10
#define MAX_ADC_V1_CHANNELS 8
#define MAX_EXYNOS3250_ADC_CHANNELS 2
+#define MAX_EXYNOS4212_ADC_CHANNELS 4
#define MAX_S5PV210_ADC_CHANNELS 10
/* Bit definitions common for ADC_V1 and ADC_V2 */
@@ -271,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info,
writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
}
+/* Exynos4212 and 4412 is like ADCv1 but with four channels only */
+static const struct exynos_adc_data exynos4212_adc_data = {
+ .num_channels = MAX_EXYNOS4212_ADC_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+ .needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
+
+ .init_hw = exynos_adc_v1_init_hw,
+ .exit_hw = exynos_adc_v1_exit_hw,
+ .clear_irq = exynos_adc_v1_clear_irq,
+ .start_conv = exynos_adc_v1_start_conv,
+};
+
static const struct exynos_adc_data exynos_adc_v1_data = {
.num_channels = MAX_ADC_V1_CHANNELS,
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
@@ -493,6 +507,9 @@ static const struct of_device_id exynos_adc_match[] = {
.compatible = "samsung,s5pv210-adc",
.data = &exynos_adc_s5pv210_data,
}, {
+ .compatible = "samsung,exynos4212-adc",
+ .data = &exynos4212_adc_data,
+ }, {
.compatible = "samsung,exynos-adc-v1",
.data = &exynos_adc_v1_data,
}, {
@@ -929,7 +946,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct exynos_adc *info = iio_priv(indio_dev);
- if (IS_REACHABLE(CONFIG_INPUT)) {
+ if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
free_irq(info->tsirq, info);
input_unregister_device(info->input);
}
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
new file mode 100644
index 000000000000..6ee1673deb0d
--- /dev/null
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADC driver for the Ingenic JZ47xx SoCs
+ * Copyright (c) 2019 Artur Rojek <contact@artur-rojek.eu>
+ *
+ * based on drivers/mfd/jz4740-adc.c
+ */
+
+#include <dt-bindings/iio/adc/ingenic,adc.h>
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define JZ_ADC_REG_ENABLE 0x00
+#define JZ_ADC_REG_CFG 0x04
+#define JZ_ADC_REG_CTRL 0x08
+#define JZ_ADC_REG_STATUS 0x0c
+#define JZ_ADC_REG_ADTCH 0x18
+#define JZ_ADC_REG_ADBDAT 0x1c
+#define JZ_ADC_REG_ADSDAT 0x20
+
+#define JZ_ADC_REG_CFG_BAT_MD BIT(4)
+
+#define JZ_ADC_AUX_VREF 3300
+#define JZ_ADC_AUX_VREF_BITS 12
+#define JZ_ADC_BATTERY_LOW_VREF 2500
+#define JZ_ADC_BATTERY_LOW_VREF_BITS 12
+#define JZ4725B_ADC_BATTERY_HIGH_VREF 7500
+#define JZ4725B_ADC_BATTERY_HIGH_VREF_BITS 10
+#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
+#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
+
+struct ingenic_adc_soc_data {
+ unsigned int battery_high_vref;
+ unsigned int battery_high_vref_bits;
+ const int *battery_raw_avail;
+ size_t battery_raw_avail_size;
+ const int *battery_scale_avail;
+ size_t battery_scale_avail_size;
+};
+
+struct ingenic_adc {
+ void __iomem *base;
+ struct clk *clk;
+ struct mutex lock;
+ const struct ingenic_adc_soc_data *soc_data;
+ bool low_vref_mode;
+};
+
+static void ingenic_adc_set_config(struct ingenic_adc *adc,
+ uint32_t mask,
+ uint32_t val)
+{
+ uint32_t cfg;
+
+ clk_enable(adc->clk);
+ mutex_lock(&adc->lock);
+
+ cfg = readl(adc->base + JZ_ADC_REG_CFG) & ~mask;
+ cfg |= val;
+ writel(cfg, adc->base + JZ_ADC_REG_CFG);
+
+ mutex_unlock(&adc->lock);
+ clk_disable(adc->clk);
+}
+
+static void ingenic_adc_enable(struct ingenic_adc *adc,
+ int engine,
+ bool enabled)
+{
+ u8 val;
+
+ mutex_lock(&adc->lock);
+ val = readb(adc->base + JZ_ADC_REG_ENABLE);
+
+ if (enabled)
+ val |= BIT(engine);
+ else
+ val &= ~BIT(engine);
+
+ writeb(val, adc->base + JZ_ADC_REG_ENABLE);
+ mutex_unlock(&adc->lock);
+}
+
+static int ingenic_adc_capture(struct ingenic_adc *adc,
+ int engine)
+{
+ u8 val;
+ int ret;
+
+ ingenic_adc_enable(adc, engine, true);
+ ret = readb_poll_timeout(adc->base + JZ_ADC_REG_ENABLE, val,
+ !(val & BIT(engine)), 250, 1000);
+ if (ret)
+ ingenic_adc_enable(adc, engine, false);
+
+ return ret;
+}
+
+static int ingenic_adc_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long m)
+{
+ struct ingenic_adc *adc = iio_priv(iio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case INGENIC_ADC_BATTERY:
+ if (val > JZ_ADC_BATTERY_LOW_VREF) {
+ ingenic_adc_set_config(adc,
+ JZ_ADC_REG_CFG_BAT_MD,
+ 0);
+ adc->low_vref_mode = false;
+ } else {
+ ingenic_adc_set_config(adc,
+ JZ_ADC_REG_CFG_BAT_MD,
+ JZ_ADC_REG_CFG_BAT_MD);
+ adc->low_vref_mode = true;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const int jz4725b_adc_battery_raw_avail[] = {
+ 0, 1, (1 << JZ_ADC_BATTERY_LOW_VREF_BITS) - 1,
+};
+
+static const int jz4725b_adc_battery_scale_avail[] = {
+ JZ4725B_ADC_BATTERY_HIGH_VREF, JZ4725B_ADC_BATTERY_HIGH_VREF_BITS,
+ JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
+};
+
+static const int jz4740_adc_battery_raw_avail[] = {
+ 0, 1, (1 << JZ_ADC_BATTERY_LOW_VREF_BITS) - 1,
+};
+
+static const int jz4740_adc_battery_scale_avail[] = {
+ JZ4740_ADC_BATTERY_HIGH_VREF, JZ4740_ADC_BATTERY_HIGH_VREF_BITS,
+ JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
+};
+
+static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
+ .battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF,
+ .battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS,
+ .battery_raw_avail = jz4725b_adc_battery_raw_avail,
+ .battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
+ .battery_scale_avail = jz4725b_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
+};
+
+static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
+ .battery_high_vref = JZ4740_ADC_BATTERY_HIGH_VREF,
+ .battery_high_vref_bits = JZ4740_ADC_BATTERY_HIGH_VREF_BITS,
+ .battery_raw_avail = jz4740_adc_battery_raw_avail,
+ .battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
+ .battery_scale_avail = jz4740_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
+};
+
+static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type,
+ int *length,
+ long m)
+{
+ struct ingenic_adc *adc = iio_priv(iio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ *type = IIO_VAL_INT;
+ *length = adc->soc_data->battery_raw_avail_size;
+ *vals = adc->soc_data->battery_raw_avail;
+ return IIO_AVAIL_RANGE;
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_FRACTIONAL_LOG2;
+ *length = adc->soc_data->battery_scale_avail_size;
+ *vals = adc->soc_data->battery_scale_avail;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ };
+}
+
+static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ingenic_adc *adc = iio_priv(iio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ clk_enable(adc->clk);
+ ret = ingenic_adc_capture(adc, chan->channel);
+ if (ret) {
+ clk_disable(adc->clk);
+ return ret;
+ }
+
+ switch (chan->channel) {
+ case INGENIC_ADC_AUX:
+ *val = readw(adc->base + JZ_ADC_REG_ADSDAT);
+ break;
+ case INGENIC_ADC_BATTERY:
+ *val = readw(adc->base + JZ_ADC_REG_ADBDAT);
+ break;
+ }
+
+ clk_disable(adc->clk);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case INGENIC_ADC_AUX:
+ *val = JZ_ADC_AUX_VREF;
+ *val2 = JZ_ADC_AUX_VREF_BITS;
+ break;
+ case INGENIC_ADC_BATTERY:
+ if (adc->low_vref_mode) {
+ *val = JZ_ADC_BATTERY_LOW_VREF;
+ *val2 = JZ_ADC_BATTERY_LOW_VREF_BITS;
+ } else {
+ *val = adc->soc_data->battery_high_vref;
+ *val2 = adc->soc_data->battery_high_vref_bits;
+ }
+ break;
+ }
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ingenic_adc_clk_cleanup(void *data)
+{
+ clk_unprepare(data);
+}
+
+static const struct iio_info ingenic_adc_info = {
+ .write_raw = ingenic_adc_write_raw,
+ .read_raw = ingenic_adc_read_raw,
+ .read_avail = ingenic_adc_read_avail,
+};
+
+static const struct iio_chan_spec ingenic_channels[] = {
+ {
+ .extend_name = "aux",
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ .channel = INGENIC_ADC_AUX,
+ },
+ {
+ .extend_name = "battery",
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ .channel = INGENIC_ADC_BATTERY,
+ },
+};
+
+static int ingenic_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *iio_dev;
+ struct ingenic_adc *adc;
+ struct resource *mem_base;
+ const struct ingenic_adc_soc_data *soc_data;
+ int ret;
+
+ soc_data = device_get_match_data(dev);
+ if (!soc_data)
+ return -EINVAL;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(iio_dev);
+ mutex_init(&adc->lock);
+ adc->soc_data = soc_data;
+
+ mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adc->base = devm_ioremap_resource(dev, mem_base);
+ if (IS_ERR(adc->base)) {
+ dev_err(dev, "Unable to ioremap mmio resource\n");
+ return PTR_ERR(adc->base);
+ }
+
+ adc->clk = devm_clk_get(dev, "adc");
+ if (IS_ERR(adc->clk)) {
+ dev_err(dev, "Unable to get clock\n");
+ return PTR_ERR(adc->clk);
+ }
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ /* Put hardware in a known passive state. */
+ writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
+ writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
+ clk_disable(adc->clk);
+
+ ret = devm_add_action_or_reset(dev, ingenic_adc_clk_cleanup, adc->clk);
+ if (ret) {
+ dev_err(dev, "Unable to add action\n");
+ return ret;
+ }
+
+ iio_dev->dev.parent = dev;
+ iio_dev->name = "jz-adc";
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = ingenic_channels;
+ iio_dev->num_channels = ARRAY_SIZE(ingenic_channels);
+ iio_dev->info = &ingenic_adc_info;
+
+ ret = devm_iio_device_register(dev, iio_dev);
+ if (ret)
+ dev_err(dev, "Unable to register IIO device\n");
+
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ingenic_adc_of_match[] = {
+ { .compatible = "ingenic,jz4725b-adc", .data = &jz4725b_adc_soc_data, },
+ { .compatible = "ingenic,jz4740-adc", .data = &jz4740_adc_soc_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ingenic_adc_of_match);
+#endif
+
+static struct platform_driver ingenic_adc_driver = {
+ .driver = {
+ .name = "ingenic-adc",
+ .of_match_table = of_match_ptr(ingenic_adc_of_match),
+ },
+ .probe = ingenic_adc_probe,
+};
+module_platform_driver(ingenic_adc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index 20b36690fa4f..e361c1532a75 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* lpc32xx_adc.c - Support for ADC in LPC32XX
*
* 3-channel, 10-bit ADC
*
* Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 729becb2d3d9..f8600fbcdfe3 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/mfd/syscon.h>
#define MESON_SAR_ADC_REG0 0x00
#define MESON_SAR_ADC_REG0_PANEL_DETECT BIT(31)
@@ -174,6 +175,9 @@
#define MESON_SAR_ADC_EFUSE_BYTE3_UPPER_ADC_VAL GENMASK(6, 0)
#define MESON_SAR_ADC_EFUSE_BYTE3_IS_CALIBRATED BIT(7)
+#define MESON_HHI_DPLL_TOP_0 0x318
+#define MESON_HHI_DPLL_TOP_0_TSC_BIT4 BIT(9)
+
/* for use with IIO_VAL_INT_PLUS_MICRO */
#define MILLION 1000000
@@ -280,6 +284,7 @@ struct meson_sar_adc_priv {
struct completion done;
int calibbias;
int calibscale;
+ struct regmap *tsc_regmap;
bool temperature_sensor_calibrated;
u8 temperature_sensor_coefficient;
u16 temperature_sensor_adc_val;
@@ -727,6 +732,15 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev)
return ret;
}
+ priv->tsc_regmap =
+ syscon_regmap_lookup_by_phandle(indio_dev->dev.parent->of_node,
+ "amlogic,hhi-sysctrl");
+ if (IS_ERR(priv->tsc_regmap)) {
+ dev_err(indio_dev->dev.parent,
+ "failed to get amlogic,hhi-sysctrl regmap\n");
+ return PTR_ERR(priv->tsc_regmap);
+ }
+
read_len = MESON_SAR_ADC_EFUSE_BYTES;
buf = nvmem_cell_read(temperature_calib, &read_len);
if (IS_ERR(buf)) {
@@ -861,6 +875,22 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
priv->temperature_sensor_coefficient);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
MESON_SAR_ADC_DELTA_10_TS_C_MASK, regval);
+
+ if (priv->param->temperature_trimming_bits == 5) {
+ if (priv->temperature_sensor_coefficient & BIT(4))
+ regval = MESON_HHI_DPLL_TOP_0_TSC_BIT4;
+ else
+ regval = 0;
+
+ /*
+ * bit [4] (the 5th bit when starting to count at 1)
+ * of the TSC is located in the HHI register area.
+ */
+ regmap_update_bits(priv->tsc_regmap,
+ MESON_HHI_DPLL_TOP_0,
+ MESON_HHI_DPLL_TOP_0_TSC_BIT4,
+ regval);
+ }
} else {
regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELTA_10,
MESON_SAR_ADC_DELTA_10_TS_REVE1, 0);
@@ -1064,6 +1094,9 @@ static const struct meson_sar_adc_param meson_sar_adc_meson8b_param = {
.bandgap_reg = MESON_SAR_ADC_DELTA_10,
.regmap_config = &meson_sar_adc_regmap_config_meson8,
.resolution = 10,
+ .temperature_trimming_bits = 5,
+ .temperature_multiplier = 10,
+ .temperature_divider = 32,
};
static const struct meson_sar_adc_param meson_sar_adc_gxbb_param = {
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
new file mode 100644
index 000000000000..9e25bbec9c70
--- /dev/null
+++ b/drivers/iio/adc/npcm_adc.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+struct npcm_adc {
+ bool int_status;
+ u32 adc_sample_hz;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *adc_clk;
+ wait_queue_head_t wq;
+ struct regulator *vref;
+ struct regmap *rst_regmap;
+};
+
+/* NPCM7xx reset module */
+#define NPCM7XX_IPSRST1_OFFSET 0x020
+#define NPCM7XX_IPSRST1_ADC_RST BIT(27)
+
+/* ADC registers */
+#define NPCM_ADCCON 0x00
+#define NPCM_ADCDATA 0x04
+
+/* ADCCON Register Bits */
+#define NPCM_ADCCON_ADC_INT_EN BIT(21)
+#define NPCM_ADCCON_REFSEL BIT(19)
+#define NPCM_ADCCON_ADC_INT_ST BIT(18)
+#define NPCM_ADCCON_ADC_EN BIT(17)
+#define NPCM_ADCCON_ADC_RST BIT(16)
+#define NPCM_ADCCON_ADC_CONV BIT(13)
+
+#define NPCM_ADCCON_CH_MASK GENMASK(27, 24)
+#define NPCM_ADCCON_CH(x) ((x) << 24)
+#define NPCM_ADCCON_DIV_SHIFT 1
+#define NPCM_ADCCON_DIV_MASK GENMASK(8, 1)
+#define NPCM_ADC_DATA_MASK(x) ((x) & GENMASK(9, 0))
+
+#define NPCM_ADC_ENABLE (NPCM_ADCCON_ADC_EN | NPCM_ADCCON_ADC_INT_EN)
+
+/* ADC General Definition */
+#define NPCM_RESOLUTION_BITS 10
+#define NPCM_INT_VREF_MV 2000
+
+#define NPCM_ADC_CHAN(ch) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = ch, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+static const struct iio_chan_spec npcm_adc_iio_channels[] = {
+ NPCM_ADC_CHAN(0),
+ NPCM_ADC_CHAN(1),
+ NPCM_ADC_CHAN(2),
+ NPCM_ADC_CHAN(3),
+ NPCM_ADC_CHAN(4),
+ NPCM_ADC_CHAN(5),
+ NPCM_ADC_CHAN(6),
+ NPCM_ADC_CHAN(7),
+};
+
+static irqreturn_t npcm_adc_isr(int irq, void *data)
+{
+ u32 regtemp;
+ struct iio_dev *indio_dev = data;
+ struct npcm_adc *info = iio_priv(indio_dev);
+
+ regtemp = ioread32(info->regs + NPCM_ADCCON);
+ if (regtemp & NPCM_ADCCON_ADC_INT_ST) {
+ iowrite32(regtemp, info->regs + NPCM_ADCCON);
+ wake_up_interruptible(&info->wq);
+ info->int_status = true;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int npcm_adc_read(struct npcm_adc *info, int *val, u8 channel)
+{
+ int ret;
+ u32 regtemp;
+
+ /* Select ADC channel */
+ regtemp = ioread32(info->regs + NPCM_ADCCON);
+ regtemp &= ~NPCM_ADCCON_CH_MASK;
+ info->int_status = false;
+ iowrite32(regtemp | NPCM_ADCCON_CH(channel) |
+ NPCM_ADCCON_ADC_CONV, info->regs + NPCM_ADCCON);
+
+ ret = wait_event_interruptible_timeout(info->wq, info->int_status,
+ msecs_to_jiffies(10));
+ if (ret == 0) {
+ regtemp = ioread32(info->regs + NPCM_ADCCON);
+ if ((regtemp & NPCM_ADCCON_ADC_CONV) && info->rst_regmap) {
+ /* if conversion failed - reset ADC module */
+ regmap_write(info->rst_regmap, NPCM7XX_IPSRST1_OFFSET,
+ NPCM7XX_IPSRST1_ADC_RST);
+ msleep(100);
+ regmap_write(info->rst_regmap, NPCM7XX_IPSRST1_OFFSET,
+ 0x0);
+ msleep(100);
+
+ /* Enable ADC and start conversion module */
+ iowrite32(NPCM_ADC_ENABLE | NPCM_ADCCON_ADC_CONV,
+ info->regs + NPCM_ADCCON);
+ dev_err(info->dev, "RESET ADC Complete\n");
+ }
+ return -ETIMEDOUT;
+ }
+ if (ret < 0)
+ return ret;
+
+ *val = NPCM_ADC_DATA_MASK(ioread32(info->regs + NPCM_ADCDATA));
+
+ return 0;
+}
+
+static int npcm_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+ int vref_uv;
+ struct npcm_adc *info = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = npcm_adc_read(info, val, chan->channel);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret) {
+ dev_err(info->dev, "NPCM ADC read failed\n");
+ return ret;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (info->vref) {
+ vref_uv = regulator_get_voltage(info->vref);
+ *val = vref_uv / 1000;
+ } else {
+ *val = NPCM_INT_VREF_MV;
+ }
+ *val2 = NPCM_RESOLUTION_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = info->adc_sample_hz;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info npcm_adc_iio_info = {
+ .read_raw = &npcm_adc_read_raw,
+};
+
+static const struct of_device_id npcm_adc_match[] = {
+ { .compatible = "nuvoton,npcm750-adc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, npcm_adc_match);
+
+static int npcm_adc_probe(struct platform_device *pdev)
+{
+ int ret;
+ int irq;
+ u32 div;
+ u32 reg_con;
+ struct resource *res;
+ struct npcm_adc *info;
+ struct iio_dev *indio_dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+ info = iio_priv(indio_dev);
+
+ info->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ info->adc_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->adc_clk)) {
+ dev_warn(&pdev->dev, "ADC clock failed: can't read clk\n");
+ return PTR_ERR(info->adc_clk);
+ }
+
+ /* calculate ADC clock sample rate */
+ reg_con = ioread32(info->regs + NPCM_ADCCON);
+ div = reg_con & NPCM_ADCCON_DIV_MASK;
+ div = div >> NPCM_ADCCON_DIV_SHIFT;
+ info->adc_sample_hz = clk_get_rate(info->adc_clk) / ((div + 1) * 2);
+
+ if (of_device_is_compatible(np, "nuvoton,npcm750-adc")) {
+ info->rst_regmap = syscon_regmap_lookup_by_compatible
+ ("nuvoton,npcm750-rst");
+ if (IS_ERR(info->rst_regmap)) {
+ dev_err(&pdev->dev, "Failed to find nuvoton,npcm750-rst\n");
+ ret = PTR_ERR(info->rst_regmap);
+ goto err_disable_clk;
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed getting interrupt resource\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, npcm_adc_isr, 0,
+ "NPCM_ADC", indio_dev);
+ if (ret < 0) {
+ dev_err(dev, "failed requesting interrupt\n");
+ goto err_disable_clk;
+ }
+
+ reg_con = ioread32(info->regs + NPCM_ADCCON);
+ info->vref = devm_regulator_get_optional(&pdev->dev, "vref");
+ if (!IS_ERR(info->vref)) {
+ ret = regulator_enable(info->vref);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable ADC reference voltage\n");
+ goto err_disable_clk;
+ }
+
+ iowrite32(reg_con & ~NPCM_ADCCON_REFSEL,
+ info->regs + NPCM_ADCCON);
+ } else {
+ /*
+ * Any error which is not ENODEV indicates the regulator
+ * has been specified and so is a failure case.
+ */
+ if (PTR_ERR(info->vref) != -ENODEV) {
+ ret = PTR_ERR(info->vref);
+ goto err_disable_clk;
+ }
+
+ /* Use internal reference */
+ iowrite32(reg_con | NPCM_ADCCON_REFSEL,
+ info->regs + NPCM_ADCCON);
+ }
+
+ init_waitqueue_head(&info->wq);
+
+ reg_con = ioread32(info->regs + NPCM_ADCCON);
+ reg_con |= NPCM_ADC_ENABLE;
+
+ /* Enable the ADC Module */
+ iowrite32(reg_con, info->regs + NPCM_ADCCON);
+
+ /* Start ADC conversion */
+ iowrite32(reg_con | NPCM_ADCCON_ADC_CONV, info->regs + NPCM_ADCCON);
+
+ platform_set_drvdata(pdev, indio_dev);
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &npcm_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = npcm_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(npcm_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register the device.\n");
+ goto err_iio_register;
+ }
+
+ pr_info("NPCM ADC driver probed\n");
+
+ return 0;
+
+err_iio_register:
+ iowrite32(reg_con & ~NPCM_ADCCON_ADC_EN, info->regs + NPCM_ADCCON);
+ if (!IS_ERR(info->vref))
+ regulator_disable(info->vref);
+err_disable_clk:
+ clk_disable_unprepare(info->adc_clk);
+
+ return ret;
+}
+
+static int npcm_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct npcm_adc *info = iio_priv(indio_dev);
+ u32 regtemp;
+
+ iio_device_unregister(indio_dev);
+
+ regtemp = ioread32(info->regs + NPCM_ADCCON);
+ iowrite32(regtemp & ~NPCM_ADCCON_ADC_EN, info->regs + NPCM_ADCCON);
+ if (!IS_ERR(info->vref))
+ regulator_disable(info->vref);
+ clk_disable_unprepare(info->adc_clk);
+
+ return 0;
+}
+
+static struct platform_driver npcm_adc_driver = {
+ .probe = npcm_adc_probe,
+ .remove = npcm_adc_remove,
+ .driver = {
+ .name = "npcm_adc",
+ .of_match_table = npcm_adc_match,
+ },
+};
+
+module_platform_driver(npcm_adc_driver);
+
+MODULE_DESCRIPTION("Nuvoton NPCM ADC Driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index c30c002f1fef..4735f8a1ca9d 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -423,18 +423,14 @@ static irqreturn_t pm8xxx_eoc_irq(int irq, void *d)
static struct pm8xxx_chan_info *
pm8xxx_get_channel(struct pm8xxx_xoadc *adc, u8 chan)
{
- struct pm8xxx_chan_info *ch;
int i;
for (i = 0; i < adc->nchans; i++) {
- ch = &adc->chans[i];
+ struct pm8xxx_chan_info *ch = &adc->chans[i];
if (ch->hwchan->amux_channel == chan)
- break;
+ return ch;
}
- if (i == adc->nchans)
- return NULL;
-
- return ch;
+ return NULL;
}
static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc,
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
new file mode 100644
index 000000000000..53f17e4f2f23
--- /dev/null
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI ADS124S0X chip family driver
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/sysfs.h>
+
+/* Commands */
+#define ADS124S08_CMD_NOP 0x00
+#define ADS124S08_CMD_WAKEUP 0x02
+#define ADS124S08_CMD_PWRDWN 0x04
+#define ADS124S08_CMD_RESET 0x06
+#define ADS124S08_CMD_START 0x08
+#define ADS124S08_CMD_STOP 0x0a
+#define ADS124S08_CMD_SYOCAL 0x16
+#define ADS124S08_CMD_SYGCAL 0x17
+#define ADS124S08_CMD_SFOCAL 0x19
+#define ADS124S08_CMD_RDATA 0x12
+#define ADS124S08_CMD_RREG 0x20
+#define ADS124S08_CMD_WREG 0x40
+
+/* Registers */
+#define ADS124S08_ID_REG 0x00
+#define ADS124S08_STATUS 0x01
+#define ADS124S08_INPUT_MUX 0x02
+#define ADS124S08_PGA 0x03
+#define ADS124S08_DATA_RATE 0x04
+#define ADS124S08_REF 0x05
+#define ADS124S08_IDACMAG 0x06
+#define ADS124S08_IDACMUX 0x07
+#define ADS124S08_VBIAS 0x08
+#define ADS124S08_SYS 0x09
+#define ADS124S08_OFCAL0 0x0a
+#define ADS124S08_OFCAL1 0x0b
+#define ADS124S08_OFCAL2 0x0c
+#define ADS124S08_FSCAL0 0x0d
+#define ADS124S08_FSCAL1 0x0e
+#define ADS124S08_FSCAL2 0x0f
+#define ADS124S08_GPIODAT 0x10
+#define ADS124S08_GPIOCON 0x11
+
+/* ADS124S0x common channels */
+#define ADS124S08_AIN0 0x00
+#define ADS124S08_AIN1 0x01
+#define ADS124S08_AIN2 0x02
+#define ADS124S08_AIN3 0x03
+#define ADS124S08_AIN4 0x04
+#define ADS124S08_AIN5 0x05
+#define ADS124S08_AINCOM 0x0c
+/* ADS124S08 only channels */
+#define ADS124S08_AIN6 0x06
+#define ADS124S08_AIN7 0x07
+#define ADS124S08_AIN8 0x08
+#define ADS124S08_AIN9 0x09
+#define ADS124S08_AIN10 0x0a
+#define ADS124S08_AIN11 0x0b
+#define ADS124S08_MAX_CHANNELS 12
+
+#define ADS124S08_POS_MUX_SHIFT 0x04
+#define ADS124S08_INT_REF 0x09
+
+#define ADS124S08_START_REG_MASK 0x1f
+#define ADS124S08_NUM_BYTES_MASK 0x1f
+
+#define ADS124S08_START_CONV 0x01
+#define ADS124S08_STOP_CONV 0x00
+
+enum ads124s_id {
+ ADS124S08_ID,
+ ADS124S06_ID,
+};
+
+struct ads124s_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
+
+struct ads124s_private {
+ const struct ads124s_chip_info *chip_info;
+ struct gpio_desc *reset_gpio;
+ struct spi_device *spi;
+ struct mutex lock;
+ u8 data[5] ____cacheline_aligned;
+};
+
+#define ADS124S08_CHAN(index) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ }, \
+}
+
+static const struct iio_chan_spec ads124s06_channels[] = {
+ ADS124S08_CHAN(0),
+ ADS124S08_CHAN(1),
+ ADS124S08_CHAN(2),
+ ADS124S08_CHAN(3),
+ ADS124S08_CHAN(4),
+ ADS124S08_CHAN(5),
+};
+
+static const struct iio_chan_spec ads124s08_channels[] = {
+ ADS124S08_CHAN(0),
+ ADS124S08_CHAN(1),
+ ADS124S08_CHAN(2),
+ ADS124S08_CHAN(3),
+ ADS124S08_CHAN(4),
+ ADS124S08_CHAN(5),
+ ADS124S08_CHAN(6),
+ ADS124S08_CHAN(7),
+ ADS124S08_CHAN(8),
+ ADS124S08_CHAN(9),
+ ADS124S08_CHAN(10),
+ ADS124S08_CHAN(11),
+};
+
+static const struct ads124s_chip_info ads124s_chip_info_tbl[] = {
+ [ADS124S08_ID] = {
+ .channels = ads124s08_channels,
+ .num_channels = ARRAY_SIZE(ads124s08_channels),
+ },
+ [ADS124S06_ID] = {
+ .channels = ads124s06_channels,
+ .num_channels = ARRAY_SIZE(ads124s06_channels),
+ },
+};
+
+static int ads124s_write_cmd(struct iio_dev *indio_dev, u8 command)
+{
+ struct ads124s_private *priv = iio_priv(indio_dev);
+
+ priv->data[0] = command;
+
+ return spi_write(priv->spi, &priv->data[0], 1);
+}
+
+static int ads124s_write_reg(struct iio_dev *indio_dev, u8 reg, u8 data)
+{
+ struct ads124s_private *priv = iio_priv(indio_dev);
+
+ priv->data[0] = ADS124S08_CMD_WREG | reg;
+ priv->data[1] = 0x0;
+ priv->data[2] = data;
+
+ return spi_write(priv->spi, &priv->data[0], 3);
+}
+
+static int ads124s_reset(struct iio_dev *indio_dev)
+{
+ struct ads124s_private *priv = iio_priv(indio_dev);
+
+ if (priv->reset_gpio) {
+ gpiod_set_value(priv->reset_gpio, 0);
+ udelay(200);
+ gpiod_set_value(priv->reset_gpio, 1);
+ } else {
+ return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET);
+ }
+
+ return 0;
+};
+
+static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
+{
+ struct ads124s_private *priv = iio_priv(indio_dev);
+ int ret;
+ u32 tmp;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &priv->data[0],
+ .len = 4,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &priv->data[1],
+ .rx_buf = &priv->data[1],
+ .len = 4,
+ },
+ };
+
+ priv->data[0] = ADS124S08_CMD_RDATA;
+ memset(&priv->data[1], ADS124S08_CMD_NOP, sizeof(priv->data));
+
+ ret = spi_sync_transfer(priv->spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ return ret;
+
+ tmp = priv->data[2] << 16 | priv->data[3] << 8 | priv->data[4];
+
+ return tmp;
+}
+
+static int ads124s_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ads124s_private *priv = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&priv->lock);
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ads124s_write_reg(indio_dev, ADS124S08_INPUT_MUX,
+ chan->channel);
+ if (ret) {
+ dev_err(&priv->spi->dev, "Set ADC CH failed\n");
+ goto out;
+ }
+
+ ret = ads124s_write_cmd(indio_dev, ADS124S08_START_CONV);
+ if (ret) {
+ dev_err(&priv->spi->dev, "Start conversions failed\n");
+ goto out;
+ }
+
+ ret = ads124s_read(indio_dev, chan->channel);
+ if (ret < 0) {
+ dev_err(&priv->spi->dev, "Read ADC failed\n");
+ goto out;
+ }
+
+ *val = ret;
+
+ ret = ads124s_write_cmd(indio_dev, ADS124S08_STOP_CONV);
+ if (ret) {
+ dev_err(&priv->spi->dev, "Stop conversions failed\n");
+ goto out;
+ }
+
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct iio_info ads124s_info = {
+ .read_raw = &ads124s_read_raw,
+};
+
+static irqreturn_t ads124s_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ads124s_private *priv = iio_priv(indio_dev);
+ u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
+ int scan_index, j = 0;
+ int ret;
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = ads124s_write_reg(indio_dev, ADS124S08_INPUT_MUX,
+ scan_index);
+ if (ret)
+ dev_err(&priv->spi->dev, "Set ADC CH failed\n");
+
+ ret = ads124s_write_cmd(indio_dev, ADS124S08_START_CONV);
+ if (ret)
+ dev_err(&priv->spi->dev, "Start ADC conversions failed\n");
+
+ buffer[j] = ads124s_read(indio_dev, scan_index);
+ ret = ads124s_write_cmd(indio_dev, ADS124S08_STOP_CONV);
+ if (ret)
+ dev_err(&priv->spi->dev, "Stop ADC conversions failed\n");
+
+ j++;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ads124s_probe(struct spi_device *spi)
+{
+ struct ads124s_private *ads124s_priv;
+ struct iio_dev *indio_dev;
+ const struct spi_device_id *spi_id = spi_get_device_id(spi);
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*ads124s_priv));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ ads124s_priv = iio_priv(indio_dev);
+
+ ads124s_priv->reset_gpio = devm_gpiod_get_optional(&spi->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ads124s_priv->reset_gpio))
+ dev_info(&spi->dev, "Reset GPIO not defined\n");
+
+ ads124s_priv->chip_info = &ads124s_chip_info_tbl[spi_id->driver_data];
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ads124s_priv->spi = spi;
+
+ indio_dev->name = spi_id->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ads124s_priv->chip_info->channels;
+ indio_dev->num_channels = ads124s_priv->chip_info->num_channels;
+ indio_dev->info = &ads124s_info;
+
+ mutex_init(&ads124s_priv->lock);
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
+ ads124s_trigger_handler, NULL);
+ if (ret) {
+ dev_err(&spi->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ ads124s_reset(indio_dev);
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ads124s_id[] = {
+ { "ads124s06", ADS124S06_ID },
+ { "ads124s08", ADS124S08_ID },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ads124s_id);
+
+static const struct of_device_id ads124s_of_table[] = {
+ { .compatible = "ti,ads124s06" },
+ { .compatible = "ti,ads124s08" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ads124s_of_table);
+
+static struct spi_driver ads124s_driver = {
+ .driver = {
+ .name = "ads124s08",
+ .of_match_table = ads124s_of_table,
+ },
+ .probe = ads124s_probe,
+ .id_table = ads124s_id,
+};
+module_spi_driver(ads124s_driver);
+
+MODULE_AUTHOR("Dan Murphy <dmuprhy@ti.com>");
+MODULE_DESCRIPTION("TI TI_ADS12S0X ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 184d686ebd99..8b4568edd5cb 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -41,6 +41,7 @@
#define ADS8688_VREF_MV 4096
#define ADS8688_REALBITS 16
+#define ADS8688_MAX_CHANNELS 8
/*
* enum ads8688_range - ADS8688 reference voltage range
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- u16 buffer[8];
+ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
int i, j = 0;
for (i = 0; i < indio_dev->masklength; i++) {
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index cafb1dcadc48..9d984f2a8ba7 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -142,7 +142,10 @@ static void tiadc_step_config(struct iio_dev *indio_dev)
stepconfig |= STEPCONFIG_MODE_SWCNT;
tiadc_writel(adc_dev, REG_STEPCONFIG(steps),
- stepconfig | STEPCONFIG_INP(chan));
+ stepconfig | STEPCONFIG_INP(chan) |
+ STEPCONFIG_INM_ADCREFM |
+ STEPCONFIG_RFP_VREFP |
+ STEPCONFIG_RFM_VREFN);
if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) {
dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n",
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 3f6be5ac049a..b13c61539d46 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1273,8 +1273,10 @@ static int xadc_probe(struct platform_device *pdev)
xadc->threshold[i] = 0xffff;
else
xadc->threshold[i] = 0;
- xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+ ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
xadc->threshold[i]);
+ if (ret)
+ goto err_free_irq;
}
/* Go to non-buffered mode */
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index b8e005be4f87..d5d146e9e372 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -61,6 +61,27 @@ config IAQCORE
iAQ-Core Continuous/Pulsed VOC (Volatile Organic Compounds)
sensors
+config PMS7003
+ tristate "Plantower PMS7003 particulate matter sensor"
+ depends on SERIAL_DEV_BUS
+ help
+ Say Y here to build support for the Plantower PMS7003 particulate
+ matter sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called pms7003.
+
+config SPS30
+ tristate "SPS30 particulate matter sensor"
+ depends on I2C
+ select CRC8
+ help
+ Say Y here to build support for the Sensirion SPS30 particulate
+ matter sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sps30.
+
config VZ89X
tristate "SGX Sensortech MiCS VZ89X VOC sensor"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 2f4c4ba4d781..f5d1365acb49 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -9,4 +9,7 @@ obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
obj-$(CONFIG_BME680_SPI) += bme680_spi.o
obj-$(CONFIG_CCS811) += ccs811.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
+obj-$(CONFIG_PMS7003) += pms7003.o
+obj-$(CONFIG_SENSIRION_SGP30) += sgp30.o
+obj-$(CONFIG_SPS30) += sps30.o
obj-$(CONFIG_VZ89X) += vz89x.o
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index a406ad31b096..3a20cb5d9bff 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_TEMP:
- *val = 1; /* 0.01 */
- *val2 = 100;
- break;
+ *val = 10;
+ return IIO_VAL_INT;
case IIO_PH:
*val = 1; /* 0.001 */
*val2 = 1000;
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct atlas_data *data = iio_priv(indio_dev);
- __be32 reg = cpu_to_be32(val);
+ __be32 reg = cpu_to_be32(val / 10);
if (val2 != 0 || val < 0 || val > 20000)
return -EINVAL;
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index 06d4be539d2e..b2f805b6b36a 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -70,10 +70,17 @@ static const struct acpi_device_id bme680_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
+static const struct of_device_id bme680_of_i2c_match[] = {
+ { .compatible = "bosch,bme680", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bme680_of_i2c_match);
+
static struct i2c_driver bme680_i2c_driver = {
.driver = {
.name = "bme680_i2c",
.acpi_match_table = ACPI_PTR(bme680_acpi_match),
+ .of_match_table = bme680_of_i2c_match,
},
.probe = bme680_i2c_probe,
.id_table = bme680_i2c_id,
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index c9fb05e8d0b9..d0b7bdd3f066 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -110,10 +111,17 @@ static const struct acpi_device_id bme680_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bme680_acpi_match);
+static const struct of_device_id bme680_of_spi_match[] = {
+ { .compatible = "bosch,bme680", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bme680_of_spi_match);
+
static struct spi_driver bme680_spi_driver = {
.driver = {
.name = "bme680_spi",
.acpi_match_table = ACPI_PTR(bme680_acpi_match),
+ .of_match_table = bme680_of_spi_match,
},
.probe = bme680_spi_probe,
.id_table = bme680_spi_id,
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
new file mode 100644
index 000000000000..db8e7b2327b3
--- /dev/null
+++ b/drivers/iio/chemical/pms7003.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Plantower PMS7003 particulate matter sensor driver
+ *
+ * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/serdev.h>
+
+#define PMS7003_DRIVER_NAME "pms7003"
+
+#define PMS7003_MAGIC 0x424d
+/* last 2 data bytes hold frame checksum */
+#define PMS7003_MAX_DATA_LENGTH 28
+#define PMS7003_CHECKSUM_LENGTH 2
+#define PMS7003_PM10_OFFSET 10
+#define PMS7003_PM2P5_OFFSET 8
+#define PMS7003_PM1_OFFSET 6
+
+#define PMS7003_TIMEOUT msecs_to_jiffies(6000)
+#define PMS7003_CMD_LENGTH 7
+#define PMS7003_PM_MAX 1000
+#define PMS7003_PM_MIN 0
+
+enum {
+ PM1,
+ PM2P5,
+ PM10,
+};
+
+enum pms7003_cmd {
+ CMD_WAKEUP,
+ CMD_ENTER_PASSIVE_MODE,
+ CMD_READ_PASSIVE,
+ CMD_SLEEP,
+};
+
+/*
+ * commands have following format:
+ *
+ * +------+------+-----+------+-----+-----------+-----------+
+ * | 0x42 | 0x4d | cmd | 0x00 | arg | cksum msb | cksum lsb |
+ * +------+------+-----+------+-----+-----------+-----------+
+ */
+static const u8 pms7003_cmd_tbl[][PMS7003_CMD_LENGTH] = {
+ [CMD_WAKEUP] = { 0x42, 0x4d, 0xe4, 0x00, 0x01, 0x01, 0x74 },
+ [CMD_ENTER_PASSIVE_MODE] = { 0x42, 0x4d, 0xe1, 0x00, 0x00, 0x01, 0x70 },
+ [CMD_READ_PASSIVE] = { 0x42, 0x4d, 0xe2, 0x00, 0x00, 0x01, 0x71 },
+ [CMD_SLEEP] = { 0x42, 0x4d, 0xe4, 0x00, 0x00, 0x01, 0x73 },
+};
+
+struct pms7003_frame {
+ u8 data[PMS7003_MAX_DATA_LENGTH];
+ u16 expected_length;
+ u16 length;
+};
+
+struct pms7003_state {
+ struct serdev_device *serdev;
+ struct pms7003_frame frame;
+ struct completion frame_ready;
+ struct mutex lock; /* must be held whenever state gets touched */
+};
+
+static int pms7003_do_cmd(struct pms7003_state *state, enum pms7003_cmd cmd)
+{
+ int ret;
+
+ ret = serdev_device_write(state->serdev, pms7003_cmd_tbl[cmd],
+ PMS7003_CMD_LENGTH, PMS7003_TIMEOUT);
+ if (ret < PMS7003_CMD_LENGTH)
+ return ret < 0 ? ret : -EIO;
+
+ ret = wait_for_completion_interruptible_timeout(&state->frame_ready,
+ PMS7003_TIMEOUT);
+ if (!ret)
+ ret = -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static u16 pms7003_get_pm(const u8 *data)
+{
+ return clamp_val(get_unaligned_be16(data),
+ PMS7003_PM_MIN, PMS7003_PM_MAX);
+}
+
+static irqreturn_t pms7003_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct pms7003_state *state = iio_priv(indio_dev);
+ struct pms7003_frame *frame = &state->frame;
+ u16 data[3 + 1 + 4]; /* PM1, PM2P5, PM10, padding, timestamp */
+ int ret;
+
+ mutex_lock(&state->lock);
+ ret = pms7003_do_cmd(state, CMD_READ_PASSIVE);
+ if (ret) {
+ mutex_unlock(&state->lock);
+ goto err;
+ }
+
+ data[PM1] = pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET);
+ data[PM2P5] = pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET);
+ data[PM10] = pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
+ mutex_unlock(&state->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int pms7003_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct pms7003_state *state = iio_priv(indio_dev);
+ struct pms7003_frame *frame = &state->frame;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_MASSCONCENTRATION:
+ mutex_lock(&state->lock);
+ ret = pms7003_do_cmd(state, CMD_READ_PASSIVE);
+ if (ret) {
+ mutex_unlock(&state->lock);
+ return ret;
+ }
+
+ *val = pms7003_get_pm(frame->data + chan->address);
+ mutex_unlock(&state->lock);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info pms7003_info = {
+ .read_raw = pms7003_read_raw,
+};
+
+#define PMS7003_CHAN(_index, _mod, _addr) { \
+ .type = IIO_MASSCONCENTRATION, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _mod, \
+ .address = _addr, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_chan_spec pms7003_channels[] = {
+ PMS7003_CHAN(0, PM1, PMS7003_PM1_OFFSET),
+ PMS7003_CHAN(1, PM2P5, PMS7003_PM2P5_OFFSET),
+ PMS7003_CHAN(2, PM10, PMS7003_PM10_OFFSET),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static u16 pms7003_calc_checksum(struct pms7003_frame *frame)
+{
+ u16 checksum = (PMS7003_MAGIC >> 8) + (u8)(PMS7003_MAGIC & 0xff) +
+ (frame->length >> 8) + (u8)frame->length;
+ int i;
+
+ for (i = 0; i < frame->length - PMS7003_CHECKSUM_LENGTH; i++)
+ checksum += frame->data[i];
+
+ return checksum;
+}
+
+static bool pms7003_frame_is_okay(struct pms7003_frame *frame)
+{
+ int offset = frame->length - PMS7003_CHECKSUM_LENGTH;
+ u16 checksum = get_unaligned_be16(frame->data + offset);
+
+ return checksum == pms7003_calc_checksum(frame);
+}
+
+static int pms7003_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
+ struct pms7003_state *state = iio_priv(indio_dev);
+ struct pms7003_frame *frame = &state->frame;
+ int num;
+
+ if (!frame->expected_length) {
+ u16 magic;
+
+ /* wait for SOF and data length */
+ if (size < 4)
+ return 0;
+
+ magic = get_unaligned_be16(buf);
+ if (magic != PMS7003_MAGIC)
+ return 2;
+
+ num = get_unaligned_be16(buf + 2);
+ if (num <= PMS7003_MAX_DATA_LENGTH) {
+ frame->expected_length = num;
+ frame->length = 0;
+ }
+
+ return 4;
+ }
+
+ num = min(size, (size_t)(frame->expected_length - frame->length));
+ memcpy(frame->data + frame->length, buf, num);
+ frame->length += num;
+
+ if (frame->length == frame->expected_length) {
+ if (pms7003_frame_is_okay(frame))
+ complete(&state->frame_ready);
+
+ frame->expected_length = 0;
+ }
+
+ return num;
+}
+
+static const struct serdev_device_ops pms7003_serdev_ops = {
+ .receive_buf = pms7003_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static void pms7003_stop(void *data)
+{
+ struct pms7003_state *state = data;
+
+ pms7003_do_cmd(state, CMD_SLEEP);
+}
+
+static const unsigned long pms7003_scan_masks[] = { 0x07, 0x00 };
+
+static int pms7003_probe(struct serdev_device *serdev)
+{
+ struct pms7003_state *state;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&serdev->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ serdev_device_set_drvdata(serdev, indio_dev);
+ state->serdev = serdev;
+ indio_dev->dev.parent = &serdev->dev;
+ indio_dev->info = &pms7003_info;
+ indio_dev->name = PMS7003_DRIVER_NAME;
+ indio_dev->channels = pms7003_channels,
+ indio_dev->num_channels = ARRAY_SIZE(pms7003_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = pms7003_scan_masks;
+
+ mutex_init(&state->lock);
+ init_completion(&state->frame_ready);
+
+ serdev_device_set_client_ops(serdev, &pms7003_serdev_ops);
+ ret = devm_serdev_device_open(&serdev->dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, 9600);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret)
+ return ret;
+
+ ret = pms7003_do_cmd(state, CMD_WAKEUP);
+ if (ret) {
+ dev_err(&serdev->dev, "failed to wakeup sensor\n");
+ return ret;
+ }
+
+ ret = pms7003_do_cmd(state, CMD_ENTER_PASSIVE_MODE);
+ if (ret) {
+ dev_err(&serdev->dev, "failed to enter passive mode\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&serdev->dev, pms7003_stop, state);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_triggered_buffer_setup(&serdev->dev, indio_dev, NULL,
+ pms7003_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&serdev->dev, indio_dev);
+}
+
+static const struct of_device_id pms7003_of_match[] = {
+ { .compatible = "plantower,pms7003" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pms7003_of_match);
+
+static struct serdev_device_driver pms7003_driver = {
+ .driver = {
+ .name = PMS7003_DRIVER_NAME,
+ .of_match_table = pms7003_of_match,
+ },
+ .probe = pms7003_probe,
+};
+module_serdev_device_driver(pms7003_driver);
+
+MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
+MODULE_DESCRIPTION("Plantower PMS7003 particulate matter sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
new file mode 100644
index 000000000000..8cc8fe5e356d
--- /dev/null
+++ b/drivers/iio/chemical/sgp30.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sgp30.c - Support for Sensirion SGP Gas Sensors
+ *
+ * Copyright (C) 2018 Andreas Brauchli <andreas.brauchli@sensirion.com>
+ *
+ * I2C slave address: 0x58
+ *
+ * Datasheets:
+ * https://www.sensirion.com/file/datasheet_sgp30
+ * https://www.sensirion.com/file/datasheet_sgpc3
+ *
+ * TODO:
+ * - baseline support
+ * - humidity compensation
+ * - power mode switching (SGPC3)
+ */
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define SGP_WORD_LEN 2
+#define SGP_CRC8_POLYNOMIAL 0x31
+#define SGP_CRC8_INIT 0xff
+#define SGP_CRC8_LEN 1
+#define SGP_CMD(cmd_word) cpu_to_be16(cmd_word)
+#define SGP_CMD_DURATION_US 12000
+#define SGP_MEASUREMENT_DURATION_US 50000
+#define SGP_CMD_LEN SGP_WORD_LEN
+#define SGP_CMD_MAX_BUF_SIZE (SGP_CMD_LEN + 2 * SGP_WORD_LEN)
+#define SGP_MEASUREMENT_LEN 2
+#define SGP30_MEASURE_INTERVAL_HZ 1
+#define SGPC3_MEASURE_INTERVAL_HZ 2
+#define SGP_VERS_PRODUCT(data) ((((data)->feature_set) & 0xf000) >> 12)
+#define SGP_VERS_RESERVED(data) ((((data)->feature_set) & 0x0800) >> 11)
+#define SGP_VERS_GEN(data) ((((data)->feature_set) & 0x0600) >> 9)
+#define SGP_VERS_ENG_BIT(data) ((((data)->feature_set) & 0x0100) >> 8)
+#define SGP_VERS_MAJOR(data) ((((data)->feature_set) & 0x00e0) >> 5)
+#define SGP_VERS_MINOR(data) (((data)->feature_set) & 0x001f)
+
+DECLARE_CRC8_TABLE(sgp_crc8_table);
+
+enum sgp_product_id {
+ SGP30 = 0,
+ SGPC3,
+};
+
+enum sgp30_channel_idx {
+ SGP30_IAQ_TVOC_IDX = 0,
+ SGP30_IAQ_CO2EQ_IDX,
+ SGP30_SIG_ETOH_IDX,
+ SGP30_SIG_H2_IDX,
+};
+
+enum sgpc3_channel_idx {
+ SGPC3_IAQ_TVOC_IDX = 10,
+ SGPC3_SIG_ETOH_IDX,
+};
+
+enum sgp_cmd {
+ SGP_CMD_IAQ_INIT = SGP_CMD(0x2003),
+ SGP_CMD_IAQ_MEASURE = SGP_CMD(0x2008),
+ SGP_CMD_GET_FEATURE_SET = SGP_CMD(0x202f),
+ SGP_CMD_GET_SERIAL_ID = SGP_CMD(0x3682),
+
+ SGP30_CMD_MEASURE_SIGNAL = SGP_CMD(0x2050),
+
+ SGPC3_CMD_MEASURE_RAW = SGP_CMD(0x2046),
+};
+
+struct sgp_version {
+ u8 major;
+ u8 minor;
+};
+
+struct sgp_crc_word {
+ __be16 value;
+ u8 crc8;
+} __attribute__((__packed__));
+
+union sgp_reading {
+ u8 start;
+ struct sgp_crc_word raw_words[4];
+};
+
+enum _iaq_buffer_state {
+ IAQ_BUFFER_EMPTY = 0,
+ IAQ_BUFFER_DEFAULT_VALS,
+ IAQ_BUFFER_VALID,
+};
+
+struct sgp_data {
+ struct i2c_client *client;
+ struct task_struct *iaq_thread;
+ struct mutex data_lock;
+ unsigned long iaq_init_start_jiffies;
+ unsigned long iaq_defval_skip_jiffies;
+ u16 product_id;
+ u16 feature_set;
+ unsigned long measure_interval_jiffies;
+ enum sgp_cmd iaq_init_cmd;
+ enum sgp_cmd measure_iaq_cmd;
+ enum sgp_cmd measure_gas_signals_cmd;
+ union sgp_reading buffer;
+ union sgp_reading iaq_buffer;
+ enum _iaq_buffer_state iaq_buffer_state;
+};
+
+struct sgp_device {
+ const struct iio_chan_spec *channels;
+ int num_channels;
+};
+
+static const struct sgp_version supported_versions_sgp30[] = {
+ {
+ .major = 1,
+ .minor = 0,
+ },
+};
+
+static const struct sgp_version supported_versions_sgpc3[] = {
+ {
+ .major = 0,
+ .minor = 4,
+ },
+};
+
+static const struct iio_chan_spec sgp30_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_VOC,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = SGP30_IAQ_TVOC_IDX,
+ },
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_CO2,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = SGP30_IAQ_CO2EQ_IDX,
+ },
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_ETHANOL,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .address = SGP30_SIG_ETOH_IDX,
+ },
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_H2,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .address = SGP30_SIG_H2_IDX,
+ },
+};
+
+static const struct iio_chan_spec sgpc3_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_VOC,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = SGPC3_IAQ_TVOC_IDX,
+ },
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_ETHANOL,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .address = SGPC3_SIG_ETOH_IDX,
+ },
+};
+
+static const struct sgp_device sgp_devices[] = {
+ [SGP30] = {
+ .channels = sgp30_channels,
+ .num_channels = ARRAY_SIZE(sgp30_channels),
+ },
+ [SGPC3] = {
+ .channels = sgpc3_channels,
+ .num_channels = ARRAY_SIZE(sgpc3_channels),
+ },
+};
+
+/**
+ * sgp_verify_buffer() - verify the checksums of the data buffer words
+ *
+ * @data: SGP data
+ * @buf: Raw data buffer
+ * @word_count: Num data words stored in the buffer, excluding CRC bytes
+ *
+ * Return: 0 on success, negative error otherwise.
+ */
+static int sgp_verify_buffer(const struct sgp_data *data,
+ union sgp_reading *buf, size_t word_count)
+{
+ size_t size = word_count * (SGP_WORD_LEN + SGP_CRC8_LEN);
+ int i;
+ u8 crc;
+ u8 *data_buf = &buf->start;
+
+ for (i = 0; i < size; i += SGP_WORD_LEN + SGP_CRC8_LEN) {
+ crc = crc8(sgp_crc8_table, &data_buf[i], SGP_WORD_LEN,
+ SGP_CRC8_INIT);
+ if (crc != data_buf[i + SGP_WORD_LEN]) {
+ dev_err(&data->client->dev, "CRC error\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sgp_read_cmd() - reads data from sensor after issuing a command
+ * The caller must hold data->data_lock for the duration of the call.
+ * @data: SGP data
+ * @cmd: SGP Command to issue
+ * @buf: Raw data buffer to use
+ * @word_count: Num words to read, excluding CRC bytes
+ *
+ * Return: 0 on success, negative error otherwise.
+ */
+static int sgp_read_cmd(struct sgp_data *data, enum sgp_cmd cmd,
+ union sgp_reading *buf, size_t word_count,
+ unsigned long duration_us)
+{
+ int ret;
+ struct i2c_client *client = data->client;
+ size_t size = word_count * (SGP_WORD_LEN + SGP_CRC8_LEN);
+ u8 *data_buf;
+
+ ret = i2c_master_send(client, (const char *)&cmd, SGP_CMD_LEN);
+ if (ret != SGP_CMD_LEN)
+ return -EIO;
+ usleep_range(duration_us, duration_us + 1000);
+
+ if (word_count == 0)
+ return 0;
+
+ data_buf = &buf->start;
+ ret = i2c_master_recv(client, data_buf, size);
+ if (ret < 0)
+ return ret;
+ if (ret != size)
+ return -EIO;
+
+ return sgp_verify_buffer(data, buf, word_count);
+}
+
+/**
+ * sgp_measure_iaq() - measure and retrieve IAQ values from sensor
+ * The caller must hold data->data_lock for the duration of the call.
+ * @data: SGP data
+ *
+ * Return: 0 on success, -EBUSY on default values, negative error
+ * otherwise.
+ */
+
+static int sgp_measure_iaq(struct sgp_data *data)
+{
+ int ret;
+ /* data contains default values */
+ bool default_vals = !time_after(jiffies, data->iaq_init_start_jiffies +
+ data->iaq_defval_skip_jiffies);
+
+ ret = sgp_read_cmd(data, data->measure_iaq_cmd, &data->iaq_buffer,
+ SGP_MEASUREMENT_LEN, SGP_MEASUREMENT_DURATION_US);
+ if (ret < 0)
+ return ret;
+
+ data->iaq_buffer_state = IAQ_BUFFER_DEFAULT_VALS;
+
+ if (default_vals)
+ return -EBUSY;
+
+ data->iaq_buffer_state = IAQ_BUFFER_VALID;
+
+ return 0;
+}
+
+static void sgp_iaq_thread_sleep_until(const struct sgp_data *data,
+ unsigned long sleep_jiffies)
+{
+ const long IAQ_POLL = 50000;
+
+ while (!time_after(jiffies, sleep_jiffies)) {
+ usleep_range(IAQ_POLL, IAQ_POLL + 10000);
+ if (kthread_should_stop() || data->iaq_init_start_jiffies == 0)
+ return;
+ }
+}
+
+static int sgp_iaq_threadfn(void *p)
+{
+ struct sgp_data *data = (struct sgp_data *)p;
+ unsigned long next_update_jiffies;
+ int ret;
+
+ while (!kthread_should_stop()) {
+ mutex_lock(&data->data_lock);
+ if (data->iaq_init_start_jiffies == 0) {
+ ret = sgp_read_cmd(data, data->iaq_init_cmd, NULL, 0,
+ SGP_CMD_DURATION_US);
+ if (ret < 0)
+ goto unlock_sleep_continue;
+ data->iaq_init_start_jiffies = jiffies;
+ }
+
+ ret = sgp_measure_iaq(data);
+ if (ret && ret != -EBUSY) {
+ dev_warn(&data->client->dev,
+ "IAQ measurement error [%d]\n", ret);
+ }
+unlock_sleep_continue:
+ next_update_jiffies = jiffies + data->measure_interval_jiffies;
+ mutex_unlock(&data->data_lock);
+ sgp_iaq_thread_sleep_until(data, next_update_jiffies);
+ }
+
+ return 0;
+}
+
+static int sgp_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct sgp_data *data = iio_priv(indio_dev);
+ struct sgp_crc_word *words;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&data->data_lock);
+ if (data->iaq_buffer_state != IAQ_BUFFER_VALID) {
+ mutex_unlock(&data->data_lock);
+ return -EBUSY;
+ }
+ words = data->iaq_buffer.raw_words;
+ switch (chan->address) {
+ case SGP30_IAQ_TVOC_IDX:
+ case SGPC3_IAQ_TVOC_IDX:
+ *val = 0;
+ *val2 = be16_to_cpu(words[1].value);
+ ret = IIO_VAL_INT_PLUS_NANO;
+ break;
+ case SGP30_IAQ_CO2EQ_IDX:
+ *val = 0;
+ *val2 = be16_to_cpu(words[0].value);
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->data_lock);
+ break;
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->data_lock);
+ if (chan->address == SGPC3_SIG_ETOH_IDX) {
+ if (data->iaq_buffer_state == IAQ_BUFFER_EMPTY)
+ ret = -EBUSY;
+ else
+ ret = 0;
+ words = data->iaq_buffer.raw_words;
+ } else {
+ ret = sgp_read_cmd(data, data->measure_gas_signals_cmd,
+ &data->buffer, SGP_MEASUREMENT_LEN,
+ SGP_MEASUREMENT_DURATION_US);
+ words = data->buffer.raw_words;
+ }
+ if (ret) {
+ mutex_unlock(&data->data_lock);
+ return ret;
+ }
+
+ switch (chan->address) {
+ case SGP30_SIG_ETOH_IDX:
+ *val = be16_to_cpu(words[1].value);
+ ret = IIO_VAL_INT;
+ break;
+ case SGPC3_SIG_ETOH_IDX:
+ case SGP30_SIG_H2_IDX:
+ *val = be16_to_cpu(words[0].value);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->data_lock);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int sgp_check_compat(struct sgp_data *data,
+ unsigned int product_id)
+{
+ const struct sgp_version *supported_versions;
+ u16 ix, num_fs;
+ u16 product, generation, major, minor;
+
+ /* driver does not match product */
+ generation = SGP_VERS_GEN(data);
+ if (generation != 0) {
+ dev_err(&data->client->dev,
+ "incompatible product generation %d != 0", generation);
+ return -ENODEV;
+ }
+
+ product = SGP_VERS_PRODUCT(data);
+ if (product != product_id) {
+ dev_err(&data->client->dev,
+ "sensor reports a different product: 0x%04hx\n",
+ product);
+ return -ENODEV;
+ }
+
+ if (SGP_VERS_RESERVED(data))
+ dev_warn(&data->client->dev, "reserved bit is set\n");
+
+ /* engineering samples are not supported: no interface guarantees */
+ if (SGP_VERS_ENG_BIT(data))
+ return -ENODEV;
+
+ switch (product) {
+ case SGP30:
+ supported_versions = supported_versions_sgp30;
+ num_fs = ARRAY_SIZE(supported_versions_sgp30);
+ break;
+ case SGPC3:
+ supported_versions = supported_versions_sgpc3;
+ num_fs = ARRAY_SIZE(supported_versions_sgpc3);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ major = SGP_VERS_MAJOR(data);
+ minor = SGP_VERS_MINOR(data);
+ for (ix = 0; ix < num_fs; ix++) {
+ if (major == supported_versions[ix].major &&
+ minor >= supported_versions[ix].minor)
+ return 0;
+ }
+ dev_err(&data->client->dev, "unsupported sgp version: %d.%d\n",
+ major, minor);
+
+ return -ENODEV;
+}
+
+static void sgp_init(struct sgp_data *data)
+{
+ data->iaq_init_cmd = SGP_CMD_IAQ_INIT;
+ data->iaq_init_start_jiffies = 0;
+ data->iaq_buffer_state = IAQ_BUFFER_EMPTY;
+ switch (SGP_VERS_PRODUCT(data)) {
+ case SGP30:
+ data->measure_interval_jiffies = SGP30_MEASURE_INTERVAL_HZ * HZ;
+ data->measure_iaq_cmd = SGP_CMD_IAQ_MEASURE;
+ data->measure_gas_signals_cmd = SGP30_CMD_MEASURE_SIGNAL;
+ data->product_id = SGP30;
+ data->iaq_defval_skip_jiffies = 15 * HZ;
+ break;
+ case SGPC3:
+ data->measure_interval_jiffies = SGPC3_MEASURE_INTERVAL_HZ * HZ;
+ data->measure_iaq_cmd = SGPC3_CMD_MEASURE_RAW;
+ data->measure_gas_signals_cmd = SGPC3_CMD_MEASURE_RAW;
+ data->product_id = SGPC3;
+ data->iaq_defval_skip_jiffies =
+ 43 * data->measure_interval_jiffies;
+ break;
+ };
+}
+
+static const struct iio_info sgp_info = {
+ .read_raw = sgp_read_raw,
+};
+
+static const struct of_device_id sgp_dt_ids[] = {
+ { .compatible = "sensirion,sgp30", .data = (void *)SGP30 },
+ { .compatible = "sensirion,sgpc3", .data = (void *)SGPC3 },
+ { }
+};
+
+static int sgp_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct sgp_data *data;
+ const struct of_device_id *of_id;
+ unsigned long product_id;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ of_id = of_match_device(sgp_dt_ids, &client->dev);
+ if (of_id)
+ product_id = (unsigned long)of_id->data;
+ else
+ product_id = id->driver_data;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ crc8_populate_msb(sgp_crc8_table, SGP_CRC8_POLYNOMIAL);
+ mutex_init(&data->data_lock);
+
+ /* get feature set version and write it to client data */
+ ret = sgp_read_cmd(data, SGP_CMD_GET_FEATURE_SET, &data->buffer, 1,
+ SGP_CMD_DURATION_US);
+ if (ret < 0)
+ return ret;
+
+ data->feature_set = be16_to_cpu(data->buffer.raw_words[0].value);
+
+ ret = sgp_check_compat(data, product_id);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &sgp_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = sgp_devices[product_id].channels;
+ indio_dev->num_channels = sgp_devices[product_id].num_channels;
+
+ sgp_init(data);
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to register iio device\n");
+ return ret;
+ }
+
+ data->iaq_thread = kthread_run(sgp_iaq_threadfn, data,
+ "%s-iaq", data->client->name);
+
+ return 0;
+}
+
+static int sgp_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct sgp_data *data = iio_priv(indio_dev);
+
+ if (data->iaq_thread)
+ kthread_stop(data->iaq_thread);
+
+ return 0;
+}
+
+static const struct i2c_device_id sgp_id[] = {
+ { "sgp30", SGP30 },
+ { "sgpc3", SGPC3 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, sgp_id);
+MODULE_DEVICE_TABLE(of, sgp_dt_ids);
+
+static struct i2c_driver sgp_driver = {
+ .driver = {
+ .name = "sgp30",
+ .of_match_table = of_match_ptr(sgp_dt_ids),
+ },
+ .probe = sgp_probe,
+ .remove = sgp_remove,
+ .id_table = sgp_id,
+};
+module_i2c_driver(sgp_driver);
+
+MODULE_AUTHOR("Andreas Brauchli <andreas.brauchli@sensirion.com>");
+MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>");
+MODULE_DESCRIPTION("Sensirion SGP gas sensors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
new file mode 100644
index 000000000000..edbb956e81e8
--- /dev/null
+++ b/drivers/iio/chemical/sps30.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensirion SPS30 particulate matter sensor driver
+ *
+ * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
+ *
+ * I2C slave address: 0x69
+ */
+
+#include <asm/unaligned.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define SPS30_CRC8_POLYNOMIAL 0x31
+/* max number of bytes needed to store PM measurements or serial string */
+#define SPS30_MAX_READ_SIZE 48
+/* sensor measures reliably up to 3000 ug / m3 */
+#define SPS30_MAX_PM 3000
+/* minimum and maximum self cleaning periods in seconds */
+#define SPS30_AUTO_CLEANING_PERIOD_MIN 0
+#define SPS30_AUTO_CLEANING_PERIOD_MAX 604800
+
+/* SPS30 commands */
+#define SPS30_START_MEAS 0x0010
+#define SPS30_STOP_MEAS 0x0104
+#define SPS30_RESET 0xd304
+#define SPS30_READ_DATA_READY_FLAG 0x0202
+#define SPS30_READ_DATA 0x0300
+#define SPS30_READ_SERIAL 0xd033
+#define SPS30_START_FAN_CLEANING 0x5607
+#define SPS30_AUTO_CLEANING_PERIOD 0x8004
+/* not a sensor command per se, used only to distinguish write from read */
+#define SPS30_READ_AUTO_CLEANING_PERIOD 0x8005
+
+enum {
+ PM1,
+ PM2P5,
+ PM4,
+ PM10,
+};
+
+enum {
+ RESET,
+ MEASURING,
+};
+
+struct sps30_state {
+ struct i2c_client *client;
+ /*
+ * Guards against concurrent access to sensor registers.
+ * Must be held whenever sequence of commands is to be executed.
+ */
+ struct mutex lock;
+ int state;
+};
+
+DECLARE_CRC8_TABLE(sps30_crc8_table);
+
+static int sps30_write_then_read(struct sps30_state *state, u8 *txbuf,
+ int txsize, u8 *rxbuf, int rxsize)
+{
+ int ret;
+
+ /*
+ * Sensor does not support repeated start so instead of
+ * sending two i2c messages in a row we just send one by one.
+ */
+ ret = i2c_master_send(state->client, txbuf, txsize);
+ if (ret != txsize)
+ return ret < 0 ? ret : -EIO;
+
+ if (!rxbuf)
+ return 0;
+
+ ret = i2c_master_recv(state->client, rxbuf, rxsize);
+ if (ret != rxsize)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size)
+{
+ /*
+ * Internally sensor stores measurements in a following manner:
+ *
+ * PM1: upper two bytes, crc8, lower two bytes, crc8
+ * PM2P5: upper two bytes, crc8, lower two bytes, crc8
+ * PM4: upper two bytes, crc8, lower two bytes, crc8
+ * PM10: upper two bytes, crc8, lower two bytes, crc8
+ *
+ * What follows next are number concentration measurements and
+ * typical particle size measurement which we omit.
+ */
+ u8 buf[SPS30_MAX_READ_SIZE] = { cmd >> 8, cmd };
+ int i, ret = 0;
+
+ switch (cmd) {
+ case SPS30_START_MEAS:
+ buf[2] = 0x03;
+ buf[3] = 0x00;
+ buf[4] = crc8(sps30_crc8_table, &buf[2], 2, CRC8_INIT_VALUE);
+ ret = sps30_write_then_read(state, buf, 5, NULL, 0);
+ break;
+ case SPS30_STOP_MEAS:
+ case SPS30_RESET:
+ case SPS30_START_FAN_CLEANING:
+ ret = sps30_write_then_read(state, buf, 2, NULL, 0);
+ break;
+ case SPS30_READ_AUTO_CLEANING_PERIOD:
+ buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8;
+ buf[1] = (u8)SPS30_AUTO_CLEANING_PERIOD;
+ /* fall through */
+ case SPS30_READ_DATA_READY_FLAG:
+ case SPS30_READ_DATA:
+ case SPS30_READ_SERIAL:
+ /* every two data bytes are checksummed */
+ size += size / 2;
+ ret = sps30_write_then_read(state, buf, 2, buf, size);
+ break;
+ case SPS30_AUTO_CLEANING_PERIOD:
+ buf[2] = data[0];
+ buf[3] = data[1];
+ buf[4] = crc8(sps30_crc8_table, &buf[2], 2, CRC8_INIT_VALUE);
+ buf[5] = data[2];
+ buf[6] = data[3];
+ buf[7] = crc8(sps30_crc8_table, &buf[5], 2, CRC8_INIT_VALUE);
+ ret = sps30_write_then_read(state, buf, 8, NULL, 0);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* validate received data and strip off crc bytes */
+ for (i = 0; i < size; i += 3) {
+ u8 crc = crc8(sps30_crc8_table, &buf[i], 2, CRC8_INIT_VALUE);
+
+ if (crc != buf[i + 2]) {
+ dev_err(&state->client->dev,
+ "data integrity check failed\n");
+ return -EIO;
+ }
+
+ *data++ = buf[i];
+ *data++ = buf[i + 1];
+ }
+
+ return 0;
+}
+
+static s32 sps30_float_to_int_clamped(const u8 *fp)
+{
+ int val = get_unaligned_be32(fp);
+ int mantissa = val & GENMASK(22, 0);
+ /* this is fine since passed float is always non-negative */
+ int exp = val >> 23;
+ int fraction, shift;
+
+ /* special case 0 */
+ if (!exp && !mantissa)
+ return 0;
+
+ exp -= 127;
+ if (exp < 0) {
+ /* return values ranging from 1 to 99 */
+ return ((((1 << 23) + mantissa) * 100) >> 23) >> (-exp);
+ }
+
+ /* return values ranging from 100 to 300000 */
+ shift = 23 - exp;
+ val = (1 << exp) + (mantissa >> shift);
+ if (val >= SPS30_MAX_PM)
+ return SPS30_MAX_PM * 100;
+
+ fraction = mantissa & GENMASK(shift - 1, 0);
+
+ return val * 100 + ((fraction * 100) >> shift);
+}
+
+static int sps30_do_meas(struct sps30_state *state, s32 *data, int size)
+{
+ int i, ret, tries = 5;
+ u8 tmp[16];
+
+ if (state->state == RESET) {
+ ret = sps30_do_cmd(state, SPS30_START_MEAS, NULL, 0);
+ if (ret)
+ return ret;
+
+ state->state = MEASURING;
+ }
+
+ while (tries--) {
+ ret = sps30_do_cmd(state, SPS30_READ_DATA_READY_FLAG, tmp, 2);
+ if (ret)
+ return -EIO;
+
+ /* new measurements ready to be read */
+ if (tmp[1] == 1)
+ break;
+
+ msleep_interruptible(300);
+ }
+
+ if (tries == -1)
+ return -ETIMEDOUT;
+
+ ret = sps30_do_cmd(state, SPS30_READ_DATA, tmp, sizeof(int) * size);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ data[i] = sps30_float_to_int_clamped(&tmp[4 * i]);
+
+ return 0;
+}
+
+static irqreturn_t sps30_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sps30_state *state = iio_priv(indio_dev);
+ int ret;
+ s32 data[4 + 2]; /* PM1, PM2P5, PM4, PM10, timestamp */
+
+ mutex_lock(&state->lock);
+ ret = sps30_do_meas(state, data, 4);
+ mutex_unlock(&state->lock);
+ if (ret)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int sps30_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct sps30_state *state = iio_priv(indio_dev);
+ int data[4], ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_MASSCONCENTRATION:
+ mutex_lock(&state->lock);
+ /* read up to the number of bytes actually needed */
+ switch (chan->channel2) {
+ case IIO_MOD_PM1:
+ ret = sps30_do_meas(state, data, 1);
+ break;
+ case IIO_MOD_PM2P5:
+ ret = sps30_do_meas(state, data, 2);
+ break;
+ case IIO_MOD_PM4:
+ ret = sps30_do_meas(state, data, 3);
+ break;
+ case IIO_MOD_PM10:
+ ret = sps30_do_meas(state, data, 4);
+ break;
+ }
+ mutex_unlock(&state->lock);
+ if (ret)
+ return ret;
+
+ *val = data[chan->address] / 100;
+ *val2 = (data[chan->address] % 100) * 10000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_MASSCONCENTRATION:
+ switch (chan->channel2) {
+ case IIO_MOD_PM1:
+ case IIO_MOD_PM2P5:
+ case IIO_MOD_PM4:
+ case IIO_MOD_PM10:
+ *val = 0;
+ *val2 = 10000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int sps30_do_cmd_reset(struct sps30_state *state)
+{
+ int ret;
+
+ ret = sps30_do_cmd(state, SPS30_RESET, NULL, 0);
+ msleep(300);
+ /*
+ * Power-on-reset causes sensor to produce some glitch on i2c bus and
+ * some controllers end up in error state. Recover simply by placing
+ * some data on the bus, for example STOP_MEAS command, which
+ * is NOP in this case.
+ */
+ sps30_do_cmd(state, SPS30_STOP_MEAS, NULL, 0);
+ state->state = RESET;
+
+ return ret;
+}
+
+static ssize_t start_cleaning_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sps30_state *state = iio_priv(indio_dev);
+ int val, ret;
+
+ if (kstrtoint(buf, 0, &val) || val != 1)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ ret = sps30_do_cmd(state, SPS30_START_FAN_CLEANING, NULL, 0);
+ mutex_unlock(&state->lock);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t cleaning_period_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sps30_state *state = iio_priv(indio_dev);
+ u8 tmp[4];
+ int ret;
+
+ mutex_lock(&state->lock);
+ ret = sps30_do_cmd(state, SPS30_READ_AUTO_CLEANING_PERIOD, tmp, 4);
+ mutex_unlock(&state->lock);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", get_unaligned_be32(tmp));
+}
+
+static ssize_t cleaning_period_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sps30_state *state = iio_priv(indio_dev);
+ int val, ret;
+ u8 tmp[4];
+
+ if (kstrtoint(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val < SPS30_AUTO_CLEANING_PERIOD_MIN) ||
+ (val > SPS30_AUTO_CLEANING_PERIOD_MAX))
+ return -EINVAL;
+
+ put_unaligned_be32(val, tmp);
+
+ mutex_lock(&state->lock);
+ ret = sps30_do_cmd(state, SPS30_AUTO_CLEANING_PERIOD, tmp, 0);
+ if (ret) {
+ mutex_unlock(&state->lock);
+ return ret;
+ }
+
+ msleep(20);
+
+ /*
+ * sensor requires reset in order to return up to date self cleaning
+ * period
+ */
+ ret = sps30_do_cmd_reset(state);
+ if (ret)
+ dev_warn(dev,
+ "period changed but reads will return the old value\n");
+
+ mutex_unlock(&state->lock);
+
+ return len;
+}
+
+static ssize_t cleaning_period_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "[%d %d %d]\n",
+ SPS30_AUTO_CLEANING_PERIOD_MIN, 1,
+ SPS30_AUTO_CLEANING_PERIOD_MAX);
+}
+
+static IIO_DEVICE_ATTR_WO(start_cleaning, 0);
+static IIO_DEVICE_ATTR_RW(cleaning_period, 0);
+static IIO_DEVICE_ATTR_RO(cleaning_period_available, 0);
+
+static struct attribute *sps30_attrs[] = {
+ &iio_dev_attr_start_cleaning.dev_attr.attr,
+ &iio_dev_attr_cleaning_period.dev_attr.attr,
+ &iio_dev_attr_cleaning_period_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group sps30_attr_group = {
+ .attrs = sps30_attrs,
+};
+
+static const struct iio_info sps30_info = {
+ .attrs = &sps30_attr_group,
+ .read_raw = sps30_read_raw,
+};
+
+#define SPS30_CHAN(_index, _mod) { \
+ .type = IIO_MASSCONCENTRATION, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = _mod, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 19, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_chan_spec sps30_channels[] = {
+ SPS30_CHAN(0, PM1),
+ SPS30_CHAN(1, PM2P5),
+ SPS30_CHAN(2, PM4),
+ SPS30_CHAN(3, PM10),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static void sps30_stop_meas(void *data)
+{
+ struct sps30_state *state = data;
+
+ sps30_do_cmd(state, SPS30_STOP_MEAS, NULL, 0);
+}
+
+static const unsigned long sps30_scan_masks[] = { 0x0f, 0x00 };
+
+static int sps30_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct sps30_state *state;
+ u8 buf[32];
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ state->client = client;
+ state->state = RESET;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &sps30_info;
+ indio_dev->name = client->name;
+ indio_dev->channels = sps30_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sps30_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = sps30_scan_masks;
+
+ mutex_init(&state->lock);
+ crc8_populate_msb(sps30_crc8_table, SPS30_CRC8_POLYNOMIAL);
+
+ ret = sps30_do_cmd_reset(state);
+ if (ret) {
+ dev_err(&client->dev, "failed to reset device\n");
+ return ret;
+ }
+
+ ret = sps30_do_cmd(state, SPS30_READ_SERIAL, buf, sizeof(buf));
+ if (ret) {
+ dev_err(&client->dev, "failed to read serial number\n");
+ return ret;
+ }
+ /* returned serial number is already NUL terminated */
+ dev_info(&client->dev, "serial number: %s\n", buf);
+
+ ret = devm_add_action_or_reset(&client->dev, sps30_stop_meas, state);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL,
+ sps30_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id sps30_id[] = {
+ { "sps30" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sps30_id);
+
+static const struct of_device_id sps30_of_match[] = {
+ { .compatible = "sensirion,sps30" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sps30_of_match);
+
+static struct i2c_driver sps30_driver = {
+ .driver = {
+ .name = "sps30",
+ .of_match_table = sps30_of_match,
+ },
+ .id_table = sps30_id,
+ .probe_new = sps30_probe,
+};
+module_i2c_driver(sps30_driver);
+
+MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
+MODULE_DESCRIPTION("Sensirion SPS30 particulate matter sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 851b61eaf3da..fbef9107acad 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -148,9 +148,9 @@ config AD5686_SPI
depends on SPI
select AD5686
help
- Say yes here to build support for Analog Devices AD5672R, AD5676,
- AD5676R, AD5684, AD5684R, AD5684R, AD5685R, AD5686, AD5686R.
- Voltage Output Digital to Analog Converter.
+ Say yes here to build support for Analog Devices AD5672R, AD5674R,
+ AD5676, AD5676R, AD5679R, AD5684, AD5684R, AD5684R, AD5685R, AD5686,
+ AD5686R Voltage Output Digital to Analog Converter.
To compile this driver as a module, choose M here: the
module will be called ad5686.
@@ -375,6 +375,16 @@ config TI_DAC7311
If compiled as a module, it will be called ti-dac7311.
+config TI_DAC7612
+ tristate "Texas Instruments 12-bit 2-channel DAC driver"
+ depends on SPI_MASTER && GPIOLIB
+ help
+ Driver for the Texas Instruments DAC7612, DAC7612U, DAC7612UB
+ The driver hand drive the load pin automatically, otherwise
+ it needs to be toggled manually.
+
+ If compiled as a module, it will be called ti-dac7612.
+
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
depends on OF
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index f0a37c93de8e..1369fa1d2f0e 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -41,4 +41,5 @@ obj-$(CONFIG_STM32_DAC) += stm32-dac.o
obj-$(CONFIG_TI_DAC082S085) += ti-dac082s085.o
obj-$(CONFIG_TI_DAC5571) += ti-dac5571.o
obj-$(CONFIG_TI_DAC7311) += ti-dac7311.o
+obj-$(CONFIG_TI_DAC7612) += ti-dac7612.o
obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
index 665fa6bd9ced..0188ded5137c 100644
--- a/drivers/iio/dac/ad5686-spi.c
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -1,7 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0
/*
- * AD5672R, AD5676, AD5676R, AD5681R, AD5682R, AD5683, AD5683R,
- * AD5684, AD5684R, AD5685R, AD5686, AD5686R
+ * AD5672R, AD5674R, AD5676, AD5676R, AD5679R,
+ * AD5681R, AD5682R, AD5683, AD5683R, AD5684,
+ * AD5684R, AD5685R, AD5686, AD5686R
* Digital to analog converters driver
*
* Copyright 2018 Analog Devices Inc.
@@ -102,8 +103,10 @@ static int ad5686_spi_remove(struct spi_device *spi)
static const struct spi_device_id ad5686_spi_id[] = {
{"ad5310r", ID_AD5310R},
{"ad5672r", ID_AD5672R},
+ {"ad5674r", ID_AD5674R},
{"ad5676", ID_AD5676},
{"ad5676r", ID_AD5676R},
+ {"ad5679r", ID_AD5679R},
{"ad5681r", ID_AD5681R},
{"ad5682r", ID_AD5682R},
{"ad5683", ID_AD5683},
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index a332b93ca2c4..e06b29c565b9 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0
/*
* AD5686R, AD5685R, AD5684R Digital to analog converters driver
*
@@ -71,7 +71,7 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
int ret;
struct ad5686_state *st = iio_priv(indio_dev);
unsigned int val, ref_bit_msk;
- u8 shift;
+ u8 shift, address = 0;
ret = strtobool(buf, &readin);
if (ret)
@@ -94,6 +94,9 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
case AD5686_REGMAP:
shift = 0;
ref_bit_msk = 0;
+ /* AD5674R/AD5679R have 16 channels and 2 powerdown registers */
+ if (chan->channel > 0x7)
+ address = 0x8;
break;
case AD5693_REGMAP:
shift = 13;
@@ -107,7 +110,8 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
if (!st->use_internal_vref)
val |= ref_bit_msk;
- ret = st->write(st, AD5686_CMD_POWERDOWN_DAC, 0, val);
+ ret = st->write(st, AD5686_CMD_POWERDOWN_DAC,
+ address, val >> (address * 2));
return ret ? ret : len;
}
@@ -226,10 +230,32 @@ static struct iio_chan_spec name[] = { \
AD5868_CHANNEL(7, 7, bits, _shift), \
}
+#define DECLARE_AD5679_CHANNELS(name, bits, _shift) \
+static struct iio_chan_spec name[] = { \
+ AD5868_CHANNEL(0, 0, bits, _shift), \
+ AD5868_CHANNEL(1, 1, bits, _shift), \
+ AD5868_CHANNEL(2, 2, bits, _shift), \
+ AD5868_CHANNEL(3, 3, bits, _shift), \
+ AD5868_CHANNEL(4, 4, bits, _shift), \
+ AD5868_CHANNEL(5, 5, bits, _shift), \
+ AD5868_CHANNEL(6, 6, bits, _shift), \
+ AD5868_CHANNEL(7, 7, bits, _shift), \
+ AD5868_CHANNEL(8, 8, bits, _shift), \
+ AD5868_CHANNEL(9, 9, bits, _shift), \
+ AD5868_CHANNEL(10, 10, bits, _shift), \
+ AD5868_CHANNEL(11, 11, bits, _shift), \
+ AD5868_CHANNEL(12, 12, bits, _shift), \
+ AD5868_CHANNEL(13, 13, bits, _shift), \
+ AD5868_CHANNEL(14, 14, bits, _shift), \
+ AD5868_CHANNEL(15, 15, bits, _shift), \
+}
+
DECLARE_AD5693_CHANNELS(ad5310r_channels, 10, 2);
DECLARE_AD5693_CHANNELS(ad5311r_channels, 10, 6);
DECLARE_AD5676_CHANNELS(ad5672_channels, 12, 4);
+DECLARE_AD5679_CHANNELS(ad5674r_channels, 12, 4);
DECLARE_AD5676_CHANNELS(ad5676_channels, 16, 0);
+DECLARE_AD5679_CHANNELS(ad5679r_channels, 16, 0);
DECLARE_AD5686_CHANNELS(ad5684_channels, 12, 4);
DECLARE_AD5686_CHANNELS(ad5685r_channels, 14, 2);
DECLARE_AD5686_CHANNELS(ad5686_channels, 16, 0);
@@ -262,6 +288,12 @@ static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
.num_channels = 8,
.regmap_type = AD5686_REGMAP,
},
+ [ID_AD5674R] = {
+ .channels = ad5674r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 16,
+ .regmap_type = AD5686_REGMAP,
+ },
[ID_AD5675R] = {
.channels = ad5676_channels,
.int_vref_mv = 2500,
@@ -279,6 +311,12 @@ static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
.num_channels = 8,
.regmap_type = AD5686_REGMAP,
},
+ [ID_AD5679R] = {
+ .channels = ad5679r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 16,
+ .regmap_type = AD5686_REGMAP,
+ },
[ID_AD5681R] = {
.channels = ad5691r_channels,
.int_vref_mv = 2500,
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index 19f6917d4738..70a779939ddb 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is part of AD5686 DAC driver
*
@@ -54,9 +54,11 @@ enum ad5686_supported_device_ids {
ID_AD5311R,
ID_AD5671R,
ID_AD5672R,
+ ID_AD5674R,
ID_AD5675R,
ID_AD5676,
ID_AD5676R,
+ ID_AD5679R,
ID_AD5681R,
ID_AD5682R,
ID_AD5683,
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index 7350d9806a11..ccf794caef43 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0
/*
* AD5671R, AD5675R, AD5691R, AD5692R, AD5693, AD5693R,
* AD5694, AD5694R, AD5695R, AD5696, AD5696R
diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c
index ef41f12bf262..2bdf1b0aee06 100644
--- a/drivers/iio/dac/ad5758.c
+++ b/drivers/iio/dac/ad5758.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0
/*
* AD5758 Digital to analog converters driver
*
diff --git a/drivers/iio/dac/ti-dac7612.c b/drivers/iio/dac/ti-dac7612.c
new file mode 100644
index 000000000000..c46805144dd4
--- /dev/null
+++ b/drivers/iio/dac/ti-dac7612.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DAC7612 Dual, 12-Bit Serial input Digital-to-Analog Converter
+ *
+ * Copyright 2019 Qtechnology A/S
+ * 2019 Ricardo Ribalda <ricardo@ribalda.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+
+#define DAC7612_RESOLUTION 12
+#define DAC7612_ADDRESS 4
+#define DAC7612_START 5
+
+struct dac7612 {
+ struct spi_device *spi;
+ struct gpio_desc *loaddacs;
+ uint16_t cache[2];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ uint8_t data[2] ____cacheline_aligned;
+};
+
+static int dac7612_cmd_single(struct dac7612 *priv, int channel, u16 val)
+{
+ int ret;
+
+ priv->data[0] = BIT(DAC7612_START) | (channel << DAC7612_ADDRESS);
+ priv->data[0] |= val >> 8;
+ priv->data[1] = val & 0xff;
+
+ priv->cache[channel] = val;
+
+ ret = spi_write(priv->spi, priv->data, sizeof(priv->data));
+ if (ret)
+ return ret;
+
+ gpiod_set_value(priv->loaddacs, 1);
+ gpiod_set_value(priv->loaddacs, 0);
+
+ return 0;
+}
+
+#define dac7612_CHANNEL(chan, name) { \
+ .type = IIO_VOLTAGE, \
+ .channel = (chan), \
+ .indexed = 1, \
+ .output = 1, \
+ .datasheet_name = name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec dac7612_channels[] = {
+ dac7612_CHANNEL(0, "OUTA"),
+ dac7612_CHANNEL(1, "OUTB"),
+};
+
+static int dac7612_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct dac7612 *priv;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ priv = iio_priv(iio_dev);
+ *val = priv->cache[chan->channel];
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dac7612_write_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct dac7612 *priv = iio_priv(iio_dev);
+ int ret;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if ((val >= BIT(DAC7612_RESOLUTION)) || val < 0 || val2)
+ return -EINVAL;
+
+ if (val == priv->cache[chan->channel])
+ return 0;
+
+ mutex_lock(&iio_dev->mlock);
+ ret = dac7612_cmd_single(priv, chan->channel, val);
+ mutex_unlock(&iio_dev->mlock);
+
+ return ret;
+}
+
+static const struct iio_info dac7612_info = {
+ .read_raw = dac7612_read_raw,
+ .write_raw = dac7612_write_raw,
+};
+
+static int dac7612_probe(struct spi_device *spi)
+{
+ struct iio_dev *iio_dev;
+ struct dac7612 *priv;
+ int i;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*priv));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(iio_dev);
+ /*
+ * LOADDACS pin can be controlled by the driver or externally.
+ * When controlled by the driver, the DAC value is updated after
+ * every write.
+ * When the driver does not control the PIN, the user or an external
+ * event can change the value of all DACs by pulsing down the LOADDACs
+ * pin.
+ */
+ priv->loaddacs = devm_gpiod_get_optional(&spi->dev, "ti,loaddacs",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->loaddacs))
+ return PTR_ERR(priv->loaddacs);
+ priv->spi = spi;
+ spi_set_drvdata(spi, iio_dev);
+ iio_dev->dev.parent = &spi->dev;
+ iio_dev->info = &dac7612_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = dac7612_channels;
+ iio_dev->num_channels = ARRAY_SIZE(priv->cache);
+ iio_dev->name = spi_get_device_id(spi)->name;
+
+ for (i = 0; i < ARRAY_SIZE(priv->cache); i++) {
+ ret = dac7612_cmd_single(priv, i, 0);
+ if (ret)
+ return ret;
+ }
+
+ return devm_iio_device_register(&spi->dev, iio_dev);
+}
+
+static const struct spi_device_id dac7612_id[] = {
+ {"ti-dac7612"},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, dac7612_id);
+
+static const struct of_device_id dac7612_of_match[] = {
+ { .compatible = "ti,dac7612" },
+ { .compatible = "ti,dac7612u" },
+ { .compatible = "ti,dac7612ub" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, dac7612_of_match);
+
+static struct spi_driver dac7612_driver = {
+ .driver = {
+ .name = "ti-dac7612",
+ .of_match_table = dac7612_of_match,
+ },
+ .probe = dac7612_probe,
+ .id_table = dac7612_id,
+};
+module_spi_driver(dac7612_driver);
+
+MODULE_AUTHOR("Ricardo Ribalda <ricardo@ribalda.com>");
+MODULE_DESCRIPTION("Texas Instruments DAC7612 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index f3f94fbdd20a..3f9be69499ec 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -943,11 +943,14 @@ static int ad9523_setup(struct iio_dev *indio_dev)
}
}
- for_each_clear_bit(i, &active_mask, AD9523_NUM_CHAN)
- ad9523_write(indio_dev,
+ for_each_clear_bit(i, &active_mask, AD9523_NUM_CHAN) {
+ ret = ad9523_write(indio_dev,
AD9523_CHANNEL_CLOCK_DIST(i),
AD9523_CLK_DIST_DRIVER_MODE(TRISTATE) |
AD9523_CLK_DIST_PWR_DOWN_EN);
+ if (ret < 0)
+ return ret;
+ }
ret = ad9523_write(indio_dev, AD9523_POWER_DOWN_CTRL, 0);
if (ret < 0)
diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h
index 2351049d930b..621f5309d735 100644
--- a/drivers/iio/imu/bmi160/bmi160.h
+++ b/drivers/iio/imu/bmi160/bmi160.h
@@ -2,9 +2,20 @@
#ifndef BMI160_H_
#define BMI160_H_
+#include <linux/iio/iio.h>
+
+struct bmi160_data {
+ struct regmap *regmap;
+ struct iio_trigger *trig;
+};
+
extern const struct regmap_config bmi160_regmap_config;
int bmi160_core_probe(struct device *dev, struct regmap *regmap,
const char *name, bool use_spi);
+int bmi160_enable_irq(struct regmap *regmap, bool enable);
+
+int bmi160_probe_trigger(struct iio_dev *indio_dev, int irq, u32 irq_type);
+
#endif /* BMI160_H_ */
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index b10330b0f93f..6af65d6f1d28 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -1,26 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BMI160 - Bosch IMU (accel, gyro plus external magnetometer)
*
* Copyright (c) 2016, Intel Corporation.
- *
- * This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License. See the file COPYING in the main
- * directory of this archive for more details.
+ * Copyright (c) 2019, Martin Kelly.
*
* IIO core driver for BMI160, with support for I2C/SPI busses
*
- * TODO: magnetometer, interrupts, hardware FIFO
+ * TODO: magnetometer, hardware FIFO
*/
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/acpi.h>
#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
#include <linux/iio/iio.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
#include "bmi160.h"
@@ -64,8 +65,32 @@
#define BMI160_CMD_GYRO_PM_FAST_STARTUP 0x17
#define BMI160_CMD_SOFTRESET 0xB6
+#define BMI160_REG_INT_EN 0x51
+#define BMI160_DRDY_INT_EN BIT(4)
+
+#define BMI160_REG_INT_OUT_CTRL 0x53
+#define BMI160_INT_OUT_CTRL_MASK 0x0f
+#define BMI160_INT1_OUT_CTRL_SHIFT 0
+#define BMI160_INT2_OUT_CTRL_SHIFT 4
+#define BMI160_EDGE_TRIGGERED BIT(0)
+#define BMI160_ACTIVE_HIGH BIT(1)
+#define BMI160_OPEN_DRAIN BIT(2)
+#define BMI160_OUTPUT_EN BIT(3)
+
+#define BMI160_REG_INT_LATCH 0x54
+#define BMI160_INT1_LATCH_MASK BIT(4)
+#define BMI160_INT2_LATCH_MASK BIT(5)
+
+/* INT1 and INT2 are in the opposite order as in INT_OUT_CTRL! */
+#define BMI160_REG_INT_MAP 0x56
+#define BMI160_INT1_MAP_DRDY_EN 0x80
+#define BMI160_INT2_MAP_DRDY_EN 0x08
+
#define BMI160_REG_DUMMY 0x7F
+#define BMI160_NORMAL_WRITE_USLEEP 2
+#define BMI160_SUSPENDED_WRITE_USLEEP 450
+
#define BMI160_ACCEL_PMU_MIN_USLEEP 3800
#define BMI160_GYRO_PMU_MIN_USLEEP 80000
#define BMI160_SOFTRESET_USLEEP 1000
@@ -108,8 +133,9 @@ enum bmi160_sensor_type {
BMI160_NUM_SENSORS /* must be last */
};
-struct bmi160_data {
- struct regmap *regmap;
+enum bmi160_int_pin {
+ BMI160_PIN_INT1,
+ BMI160_PIN_INT2
};
const struct regmap_config bmi160_regmap_config = {
@@ -273,7 +299,7 @@ int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
cmd = bmi160_regs[t].pmu_cmd_suspend;
ret = regmap_write(data->regmap, BMI160_REG_CMD, cmd);
- if (ret < 0)
+ if (ret)
return ret;
usleep_range(bmi160_pmu_time[t], bmi160_pmu_time[t] + 1000);
@@ -305,7 +331,7 @@ int bmi160_get_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
int i, ret, val;
ret = regmap_read(data->regmap, bmi160_regs[t].range, &val);
- if (ret < 0)
+ if (ret)
return ret;
for (i = 0; i < bmi160_scale_table[t].num; i++)
@@ -328,7 +354,7 @@ static int bmi160_get_data(struct bmi160_data *data, int chan_type,
reg = bmi160_regs[t].data + (axis - IIO_MOD_X) * sizeof(sample);
ret = regmap_bulk_read(data->regmap, reg, &sample, sizeof(sample));
- if (ret < 0)
+ if (ret)
return ret;
*val = sign_extend32(le16_to_cpu(sample), 15);
@@ -362,7 +388,7 @@ static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
int i, val, ret;
ret = regmap_read(data->regmap, bmi160_regs[t].config, &val);
- if (ret < 0)
+ if (ret)
return ret;
val &= bmi160_regs[t].config_odr_mask;
@@ -394,13 +420,12 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
indio_dev->masklength) {
ret = regmap_bulk_read(data->regmap, base + i * sizeof(sample),
&sample, sizeof(sample));
- if (ret < 0)
+ if (ret)
goto done;
buf[j++] = sample;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -416,18 +441,18 @@ static int bmi160_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = bmi160_get_data(data, chan->type, chan->channel2, val);
- if (ret < 0)
+ if (ret)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
ret = bmi160_get_scale(data,
bmi160_to_sensor(chan->type), val2);
- return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = bmi160_get_odr(data, bmi160_to_sensor(chan->type),
val, val2);
- return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -498,6 +523,186 @@ static const char *bmi160_match_acpi_device(struct device *dev)
return dev_name(dev);
}
+static int bmi160_write_conf_reg(struct regmap *regmap, unsigned int reg,
+ unsigned int mask, unsigned int bits,
+ unsigned int write_usleep)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ val = (val & ~mask) | bits;
+
+ ret = regmap_write(regmap, reg, val);
+ if (ret)
+ return ret;
+
+ /*
+ * We need to wait after writing before we can write again. See the
+ * datasheet, page 93.
+ */
+ usleep_range(write_usleep, write_usleep + 1000);
+
+ return 0;
+}
+
+static int bmi160_config_pin(struct regmap *regmap, enum bmi160_int_pin pin,
+ bool open_drain, u8 irq_mask,
+ unsigned long write_usleep)
+{
+ int ret;
+ struct device *dev = regmap_get_device(regmap);
+ u8 int_out_ctrl_shift;
+ u8 int_latch_mask;
+ u8 int_map_mask;
+ u8 int_out_ctrl_mask;
+ u8 int_out_ctrl_bits;
+ const char *pin_name;
+
+ switch (pin) {
+ case BMI160_PIN_INT1:
+ int_out_ctrl_shift = BMI160_INT1_OUT_CTRL_SHIFT;
+ int_latch_mask = BMI160_INT1_LATCH_MASK;
+ int_map_mask = BMI160_INT1_MAP_DRDY_EN;
+ break;
+ case BMI160_PIN_INT2:
+ int_out_ctrl_shift = BMI160_INT2_OUT_CTRL_SHIFT;
+ int_latch_mask = BMI160_INT2_LATCH_MASK;
+ int_map_mask = BMI160_INT2_MAP_DRDY_EN;
+ break;
+ }
+ int_out_ctrl_mask = BMI160_INT_OUT_CTRL_MASK << int_out_ctrl_shift;
+
+ /*
+ * Enable the requested pin with the right settings:
+ * - Push-pull/open-drain
+ * - Active low/high
+ * - Edge/level triggered
+ */
+ int_out_ctrl_bits = BMI160_OUTPUT_EN;
+ if (open_drain)
+ /* Default is push-pull. */
+ int_out_ctrl_bits |= BMI160_OPEN_DRAIN;
+ int_out_ctrl_bits |= irq_mask;
+ int_out_ctrl_bits <<= int_out_ctrl_shift;
+
+ ret = bmi160_write_conf_reg(regmap, BMI160_REG_INT_OUT_CTRL,
+ int_out_ctrl_mask, int_out_ctrl_bits,
+ write_usleep);
+ if (ret)
+ return ret;
+
+ /* Set the pin to input mode with no latching. */
+ ret = bmi160_write_conf_reg(regmap, BMI160_REG_INT_LATCH,
+ int_latch_mask, int_latch_mask,
+ write_usleep);
+ if (ret)
+ return ret;
+
+ /* Map interrupts to the requested pin. */
+ ret = bmi160_write_conf_reg(regmap, BMI160_REG_INT_MAP,
+ int_map_mask, int_map_mask,
+ write_usleep);
+ if (ret) {
+ switch (pin) {
+ case BMI160_PIN_INT1:
+ pin_name = "INT1";
+ break;
+ case BMI160_PIN_INT2:
+ pin_name = "INT2";
+ break;
+ }
+ dev_err(dev, "Failed to configure %s IRQ pin", pin_name);
+ }
+
+ return ret;
+}
+
+int bmi160_enable_irq(struct regmap *regmap, bool enable)
+{
+ unsigned int enable_bit = 0;
+
+ if (enable)
+ enable_bit = BMI160_DRDY_INT_EN;
+
+ return bmi160_write_conf_reg(regmap, BMI160_REG_INT_EN,
+ BMI160_DRDY_INT_EN, enable_bit,
+ BMI160_NORMAL_WRITE_USLEEP);
+}
+EXPORT_SYMBOL(bmi160_enable_irq);
+
+static int bmi160_get_irq(struct device_node *of_node, enum bmi160_int_pin *pin)
+{
+ int irq;
+
+ /* Use INT1 if possible, otherwise fall back to INT2. */
+ irq = of_irq_get_byname(of_node, "INT1");
+ if (irq > 0) {
+ *pin = BMI160_PIN_INT1;
+ return irq;
+ }
+
+ irq = of_irq_get_byname(of_node, "INT2");
+ if (irq > 0)
+ *pin = BMI160_PIN_INT2;
+
+ return irq;
+}
+
+static int bmi160_config_device_irq(struct iio_dev *indio_dev, int irq_type,
+ enum bmi160_int_pin pin)
+{
+ bool open_drain;
+ u8 irq_mask;
+ struct bmi160_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ /* Level-triggered, active-low is the default if we set all zeroes. */
+ if (irq_type == IRQF_TRIGGER_RISING)
+ irq_mask = BMI160_ACTIVE_HIGH | BMI160_EDGE_TRIGGERED;
+ else if (irq_type == IRQF_TRIGGER_FALLING)
+ irq_mask = BMI160_EDGE_TRIGGERED;
+ else if (irq_type == IRQF_TRIGGER_HIGH)
+ irq_mask = BMI160_ACTIVE_HIGH;
+ else if (irq_type == IRQF_TRIGGER_LOW)
+ irq_mask = 0;
+ else {
+ dev_err(&indio_dev->dev,
+ "Invalid interrupt type 0x%x specified\n", irq_type);
+ return -EINVAL;
+ }
+
+ open_drain = of_property_read_bool(dev->of_node, "drive-open-drain");
+
+ return bmi160_config_pin(data->regmap, pin, open_drain, irq_mask,
+ BMI160_NORMAL_WRITE_USLEEP);
+}
+
+static int bmi160_setup_irq(struct iio_dev *indio_dev, int irq,
+ enum bmi160_int_pin pin)
+{
+ struct irq_data *desc;
+ u32 irq_type;
+ int ret;
+
+ desc = irq_get_irq_data(irq);
+ if (!desc) {
+ dev_err(&indio_dev->dev, "Could not find IRQ %d\n", irq);
+ return -EINVAL;
+ }
+
+ irq_type = irqd_get_trigger_type(desc);
+
+ ret = bmi160_config_device_irq(indio_dev, irq_type, pin);
+ if (ret)
+ return ret;
+
+ return bmi160_probe_trigger(indio_dev, irq, irq_type);
+}
+
static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
{
int ret;
@@ -505,7 +710,7 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
struct device *dev = regmap_get_device(data->regmap);
ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
- if (ret < 0)
+ if (ret)
return ret;
usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
@@ -516,12 +721,12 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
*/
if (use_spi) {
ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
- if (ret < 0)
+ if (ret)
return ret;
}
ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "Error reading chip id\n");
return ret;
}
@@ -532,16 +737,59 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
}
ret = bmi160_set_mode(data, BMI160_ACCEL, true);
- if (ret < 0)
+ if (ret)
return ret;
ret = bmi160_set_mode(data, BMI160_GYRO, true);
- if (ret < 0)
+ if (ret)
return ret;
return 0;
}
+static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool enable)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bmi160_data *data = iio_priv(indio_dev);
+
+ return bmi160_enable_irq(data->regmap, enable);
+}
+
+static const struct iio_trigger_ops bmi160_trigger_ops = {
+ .set_trigger_state = &bmi160_data_rdy_trigger_set_state,
+};
+
+int bmi160_probe_trigger(struct iio_dev *indio_dev, int irq, u32 irq_type)
+{
+ struct bmi160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ data->trig = devm_iio_trigger_alloc(&indio_dev->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+
+ if (data->trig == NULL)
+ return -ENOMEM;
+
+ ret = devm_request_irq(&indio_dev->dev, irq,
+ &iio_trigger_generic_data_rdy_poll,
+ irq_type, "bmi160", data->trig);
+ if (ret)
+ return ret;
+
+ data->trig->dev.parent = regmap_get_device(data->regmap);
+ data->trig->ops = &bmi160_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(&indio_dev->dev, data->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(data->trig);
+
+ return 0;
+}
+
static void bmi160_chip_uninit(void *data)
{
struct bmi160_data *bmi_data = data;
@@ -555,6 +803,8 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
{
struct iio_dev *indio_dev;
struct bmi160_data *data;
+ int irq;
+ enum bmi160_int_pin int_pin;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
@@ -566,11 +816,11 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
data->regmap = regmap;
ret = bmi160_chip_init(data, use_spi);
- if (ret < 0)
+ if (ret)
return ret;
ret = devm_add_action_or_reset(dev, bmi160_chip_uninit, data);
- if (ret < 0)
+ if (ret)
return ret;
if (!name && ACPI_HANDLE(dev))
@@ -583,16 +833,23 @@ int bmi160_core_probe(struct device *dev, struct regmap *regmap,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmi160_info;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
bmi160_trigger_handler, NULL);
- if (ret < 0)
+ if (ret)
return ret;
- ret = devm_iio_device_register(dev, indio_dev);
- if (ret < 0)
- return ret;
+ irq = bmi160_get_irq(dev->of_node, &int_pin);
+ if (irq > 0) {
+ ret = bmi160_setup_irq(indio_dev, irq, int_pin);
+ if (ret)
+ dev_err(&indio_dev->dev, "Failed to setup IRQ %d\n",
+ irq);
+ } else {
+ dev_info(&indio_dev->dev, "Not setting up IRQ trigger\n");
+ }
- return 0;
+ return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_GPL(bmi160_core_probe);
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 5b1f7e6af651..e36f5e82d400 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BMI160 - Bosch IMU, I2C bits
*
* Copyright (c) 2016, Intel Corporation.
*
- * This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License. See the file COPYING in the main
- * directory of this archive for more details.
- *
* 7-bit I2C slave address is:
* - 0x68 if SDO is pulled to GND
* - 0x69 if SDO is pulled to VDDIO
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index e521ad14eeac..c19e3df35559 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BMI160 - Bosch IMU, SPI bits
*
* Copyright (c) 2016, Intel Corporation.
*
- * This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License. See the file COPYING in the main
- * directory of this archive for more details.
*/
#include <linux/acpi.h>
#include <linux/module.h>
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 5483b2ea754d..d2fe9dbddda7 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -13,8 +13,8 @@ config INV_MPU6050_I2C
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6050/6500/9150 and ICM20608
- motion tracking devices over I2C.
+ This driver supports the Invensense MPU6050/6500/9150 and
+ ICM20608/20602 motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -24,7 +24,7 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6050/6500/9150 and ICM20608
- motion tracking devices over SPI.
+ This driver supports the Invensense MPU6050/6500/9150 and
+ ICM20608/20602 motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 1e428c196a82..650de0fefb7b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -38,6 +38,29 @@ static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724};
*/
static const int accel_scale[] = {598, 1196, 2392, 4785};
+static const struct inv_mpu6050_reg_map reg_set_icm20602 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .lpf = INV_MPU6050_REG_CONFIG,
+ .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .int_status = INV_MPU6050_REG_INT_STATUS,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+ .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
+ .accl_offset = INV_MPU6500_REG_ACCEL_OFFSET,
+ .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET,
+ .i2c_if = INV_ICM20602_REG_I2C_IF,
+};
+
static const struct inv_mpu6050_reg_map reg_set_6500 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG,
@@ -58,6 +81,7 @@ static const struct inv_mpu6050_reg_map reg_set_6500 = {
.int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
.accl_offset = INV_MPU6500_REG_ACCEL_OFFSET,
.gyro_offset = INV_MPU6050_REG_GYRO_OFFSET,
+ .i2c_if = 0,
};
static const struct inv_mpu6050_reg_map reg_set_6050 = {
@@ -78,6 +102,7 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = {
.int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
.accl_offset = INV_MPU6050_REG_ACCEL_OFFSET,
.gyro_offset = INV_MPU6050_REG_GYRO_OFFSET,
+ .i2c_if = 0,
};
static const struct inv_mpu6050_chip_config chip_config_6050 = {
@@ -140,6 +165,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6500,
.config = &chip_config_6050,
},
+ {
+ .whoami = INV_ICM20602_WHOAMI_VALUE,
+ .name = "ICM20602",
+ .reg = &reg_set_icm20602,
+ .config = &chip_config_6050,
+ },
};
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index dd758e3d403d..e46eb4ddea21 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -127,6 +127,7 @@ static int inv_mpu_probe(struct i2c_client *client,
st = iio_priv(dev_get_drvdata(&client->dev));
switch (st->chip_type) {
case INV_ICM20608:
+ case INV_ICM20602:
/* no i2c auxiliary bus on the chip */
break;
default:
@@ -179,6 +180,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20602", INV_ICM20602},
{}
};
@@ -213,6 +215,10 @@ static const struct of_device_id inv_of_match[] = {
.compatible = "invensense,icm20608",
.data = (void *)INV_ICM20608
},
+ {
+ .compatible = "invensense,icm20602",
+ .data = (void *)INV_ICM20602
+ },
{ }
};
MODULE_DEVICE_TABLE(of, inv_of_match);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 6bcc11fc1b88..325afd9f5f61 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -44,6 +44,7 @@
* @int_pin_cfg; Controls interrupt pin configuration.
* @accl_offset: Controls the accelerometer calibration offset.
* @gyro_offset: Controls the gyroscope calibration offset.
+ * @i2c_if: Controls the i2c interface
*/
struct inv_mpu6050_reg_map {
u8 sample_rate_div;
@@ -65,6 +66,7 @@ struct inv_mpu6050_reg_map {
u8 int_pin_cfg;
u8 accl_offset;
u8 gyro_offset;
+ u8 i2c_if;
};
/*device enum */
@@ -77,6 +79,7 @@ enum inv_devices {
INV_MPU9250,
INV_MPU9255,
INV_ICM20608,
+ INV_ICM20602,
INV_NUM_PARTS
};
@@ -195,6 +198,10 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
+/* ICM20602 register */
+#define INV_ICM20602_REG_I2C_IF 0x70
+#define INV_ICM20602_BIT_I2C_IF_DIS 0x40
+
#define INV_MPU6050_REG_FIFO_COUNT_H 0x72
#define INV_MPU6050_REG_FIFO_R_W 0x74
@@ -261,6 +268,7 @@ struct inv_mpu6050_state {
#define INV_MPU9255_WHOAMI_VALUE 0x73
#define INV_MPU6515_WHOAMI_VALUE 0x74
#define INV_ICM20608_WHOAMI_VALUE 0xAF
+#define INV_ICM20602_WHOAMI_VALUE 0x12
/* scan element definition */
enum inv_mpu6050_scan {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 227f50afff22..a112c3f45f74 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -31,9 +31,14 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
if (ret)
return ret;
- st->chip_config.user_ctrl |= INV_MPU6050_BIT_I2C_IF_DIS;
- ret = regmap_write(st->map, st->reg->user_ctrl,
- st->chip_config.user_ctrl);
+ if (st->reg->i2c_if) {
+ ret = regmap_write(st->map, st->reg->i2c_if,
+ INV_ICM20602_BIT_I2C_IF_DIS);
+ } else {
+ st->chip_config.user_ctrl |= INV_MPU6050_BIT_I2C_IF_DIS;
+ ret = regmap_write(st->map, st->reg->user_ctrl,
+ st->chip_config.user_ctrl);
+ }
if (ret) {
inv_mpu6050_set_power_itg(st, false);
return ret;
@@ -81,6 +86,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu9250", INV_MPU9250},
{"mpu9255", INV_MPU9255},
{"icm20608", INV_ICM20608},
+ {"icm20602", INV_ICM20602},
{}
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 8e47dccdd40f..66fbcd94642d 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -105,12 +105,10 @@ static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
static int st_lsm6dsx_shub_read_reg(struct st_lsm6dsx_hw *hw, u8 addr,
u8 *data, int len)
{
- const struct st_lsm6dsx_shub_settings *hub_settings;
int err;
mutex_lock(&hw->page_lock);
- hub_settings = &hw->settings->shub_settings;
err = st_lsm6dsx_set_page(hw, true);
if (err < 0)
goto out;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 4f5cd9f60870..4700fd5d8c90 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -87,6 +87,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_GRAVITY] = "gravity",
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
+ [IIO_MASSCONCENTRATION] = "massconcentration",
};
static const char * const iio_modifier_names[] = {
@@ -127,6 +128,10 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_Q] = "q",
[IIO_MOD_CO2] = "co2",
[IIO_MOD_VOC] = "voc",
+ [IIO_MOD_PM1] = "pm1",
+ [IIO_MOD_PM2P5] = "pm2p5",
+ [IIO_MOD_PM4] = "pm4",
+ [IIO_MOD_PM10] = "pm10",
};
/* relies on pairs of these shared then separate */
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 36f458433480..5190eacfeb0a 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -299,6 +299,16 @@ config MAX44000
To compile this driver as a module, choose M here:
the module will be called max44000.
+config MAX44009
+ tristate "MAX44009 Ambient Light Sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build support for Maxim Integrated's
+ MAX44009 ambient light sensor device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called max44009.
+
config OPT3001
tristate "Texas Instruments OPT3001 Light Sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 286bf3975372..e40794fbb435 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_LTR501) += ltr501.o
obj-$(CONFIG_LV0104CS) += lv0104cs.o
obj-$(CONFIG_MAX44000) += max44000.o
+obj-$(CONFIG_MAX44009) += max44009.o
obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index b45400f8fef4..846df4dce48c 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -95,6 +96,7 @@ struct isl29018_chip {
struct isl29018_scale scale;
int prox_scheme;
bool suspended;
+ struct regulator *vcc_reg;
};
static int isl29018_set_integration_time(struct isl29018_chip *chip,
@@ -708,6 +710,16 @@ static const char *isl29018_match_acpi_device(struct device *dev, int *data)
return dev_name(dev);
}
+static void isl29018_disable_regulator_action(void *_data)
+{
+ struct isl29018_chip *chip = _data;
+ int err;
+
+ err = regulator_disable(chip->vcc_reg);
+ if (err)
+ pr_err("failed to disable isl29018's VCC regulator!\n");
+}
+
static int isl29018_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -742,6 +754,27 @@ static int isl29018_probe(struct i2c_client *client,
chip->scale = isl29018_scales[chip->int_time][0];
chip->suspended = false;
+ chip->vcc_reg = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(chip->vcc_reg)) {
+ err = PTR_ERR(chip->vcc_reg);
+ if (err != -EPROBE_DEFER)
+ dev_err(&client->dev, "failed to get VCC regulator!\n");
+ return err;
+ }
+
+ err = regulator_enable(chip->vcc_reg);
+ if (err) {
+ dev_err(&client->dev, "failed to enable VCC regulator!\n");
+ return err;
+ }
+
+ err = devm_add_action_or_reset(&client->dev, isl29018_disable_regulator_action,
+ chip);
+ if (err) {
+ dev_err(&client->dev, "failed to setup regulator cleanup action!\n");
+ return err;
+ }
+
chip->regmap = devm_regmap_init_i2c(client,
isl29018_chip_info_tbl[dev_id].regmap_cfg);
if (IS_ERR(chip->regmap)) {
@@ -768,6 +801,7 @@ static int isl29018_probe(struct i2c_client *client,
static int isl29018_suspend(struct device *dev)
{
struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev));
+ int ret;
mutex_lock(&chip->lock);
@@ -777,10 +811,13 @@ static int isl29018_suspend(struct device *dev)
* So we do not have much to do here.
*/
chip->suspended = true;
+ ret = regulator_disable(chip->vcc_reg);
+ if (ret)
+ dev_err(dev, "failed to disable VCC regulator\n");
mutex_unlock(&chip->lock);
- return 0;
+ return ret;
}
static int isl29018_resume(struct device *dev)
@@ -790,6 +827,13 @@ static int isl29018_resume(struct device *dev)
mutex_lock(&chip->lock);
+ err = regulator_enable(chip->vcc_reg);
+ if (err) {
+ dev_err(dev, "failed to enable VCC regulator\n");
+ mutex_unlock(&chip->lock);
+ return err;
+ }
+
err = isl29018_chip_init(chip);
if (!err)
chip->suspended = false;
diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
new file mode 100644
index 000000000000..00ba15499638
--- /dev/null
+++ b/drivers/iio/light/max44009.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * max44009.c - Support for MAX44009 Ambient Light Sensor
+ *
+ * Copyright (c) 2019 Robert Eshleman <bobbyeshleman@gmail.com>
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX44009.pdf
+ *
+ * TODO: Support continuous mode and configuring from manual mode to
+ * automatic mode.
+ *
+ * Default I2C address: 0x4a
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/util_macros.h>
+
+#define MAX44009_DRV_NAME "max44009"
+
+/* Registers in datasheet order */
+#define MAX44009_REG_INT_STATUS 0x0
+#define MAX44009_REG_INT_EN 0x1
+#define MAX44009_REG_CFG 0x2
+#define MAX44009_REG_LUX_HI 0x3
+#define MAX44009_REG_LUX_LO 0x4
+#define MAX44009_REG_UPPER_THR 0x5
+#define MAX44009_REG_LOWER_THR 0x6
+#define MAX44009_REG_THR_TIMER 0x7
+
+#define MAX44009_CFG_TIM_MASK GENMASK(2, 0)
+#define MAX44009_CFG_MAN_MODE_MASK BIT(6)
+
+/* The maximum rising threshold for the max44009 */
+#define MAX44009_MAXIMUM_THRESHOLD 7520256
+
+#define MAX44009_THRESH_EXP_MASK (0xf << 4)
+#define MAX44009_THRESH_EXP_RSHIFT 4
+#define MAX44009_THRESH_MANT_LSHIFT 4
+#define MAX44009_THRESH_MANT_MASK 0xf
+
+#define MAX44009_UPPER_THR_MINIMUM 15
+
+/* The max44009 always scales raw readings by 0.045 and is non-configurable */
+#define MAX44009_SCALE_NUMERATOR 45
+#define MAX44009_SCALE_DENOMINATOR 1000
+
+/* The fixed-point fractional multiplier for de-scaling threshold values */
+#define MAX44009_FRACT_MULT 1000000
+
+static const u32 max44009_int_time_ns_array[] = {
+ 800000000,
+ 400000000,
+ 200000000,
+ 100000000,
+ 50000000, /* Manual mode only */
+ 25000000, /* Manual mode only */
+ 12500000, /* Manual mode only */
+ 6250000, /* Manual mode only */
+};
+
+static const char max44009_int_time_str[] =
+ "0.8 "
+ "0.4 "
+ "0.2 "
+ "0.1 "
+ "0.05 "
+ "0.025 "
+ "0.0125 "
+ "0.00625";
+
+struct max44009_data {
+ struct i2c_client *client;
+ struct mutex lock;
+};
+
+static const struct iio_event_spec max44009_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec max44009_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .event_spec = max44009_event_spec,
+ .num_event_specs = ARRAY_SIZE(max44009_event_spec),
+ },
+};
+
+static int max44009_read_int_time(struct max44009_data *data)
+{
+
+ int ret = i2c_smbus_read_byte_data(data->client, MAX44009_REG_CFG);
+
+ if (ret < 0)
+ return ret;
+
+ return max44009_int_time_ns_array[ret & MAX44009_CFG_TIM_MASK];
+}
+
+static int max44009_write_int_time(struct max44009_data *data,
+ int val, int val2)
+{
+ struct i2c_client *client = data->client;
+ int ret, int_time, config;
+ s64 ns;
+
+ ns = val * NSEC_PER_SEC + val2;
+ int_time = find_closest_descending(
+ ns,
+ max44009_int_time_ns_array,
+ ARRAY_SIZE(max44009_int_time_ns_array));
+
+ ret = i2c_smbus_read_byte_data(client, MAX44009_REG_CFG);
+ if (ret < 0)
+ return ret;
+
+ config = ret;
+ config &= int_time;
+
+ /*
+ * To set the integration time, the device must also be in manual
+ * mode.
+ */
+ config |= MAX44009_CFG_MAN_MODE_MASK;
+
+ return i2c_smbus_write_byte_data(client, MAX44009_REG_CFG, config);
+}
+
+static int max44009_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct max44009_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_INT_TIME && chan->type == IIO_LIGHT) {
+ mutex_lock(&data->lock);
+ ret = max44009_write_int_time(data, val, val2);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ return -EINVAL;
+}
+
+static int max44009_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int max44009_lux_raw(u8 hi, u8 lo)
+{
+ int mantissa;
+ int exponent;
+
+ /*
+ * The mantissa consists of the low nibble of the Lux High Byte
+ * and the low nibble of the Lux Low Byte.
+ */
+ mantissa = ((hi & 0xf) << 4) | (lo & 0xf);
+
+ /* The exponent byte is just the upper nibble of the Lux High Byte */
+ exponent = (hi >> 4) & 0xf;
+
+ /*
+ * The exponent value is base 2 to the power of the raw exponent byte.
+ */
+ exponent = 1 << exponent;
+
+ return exponent * mantissa;
+}
+
+#define MAX44009_READ_LUX_XFER_LEN (4)
+
+static int max44009_read_lux_raw(struct max44009_data *data)
+{
+ int ret;
+ u8 hireg = MAX44009_REG_LUX_HI;
+ u8 loreg = MAX44009_REG_LUX_LO;
+ u8 lo = 0;
+ u8 hi = 0;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = data->client->addr,
+ .flags = 0,
+ .len = sizeof(hireg),
+ .buf = &hireg,
+ },
+ {
+ .addr = data->client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(hi),
+ .buf = &hi,
+ },
+ {
+ .addr = data->client->addr,
+ .flags = 0,
+ .len = sizeof(loreg),
+ .buf = &loreg,
+ },
+ {
+ .addr = data->client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(lo),
+ .buf = &lo,
+ }
+ };
+
+ /*
+ * Use i2c_transfer instead of smbus read because i2c_transfer
+ * does NOT use a stop bit between address write and data read.
+ * Using a stop bit causes disjoint upper/lower byte reads and
+ * reduces accuracy.
+ */
+ ret = i2c_transfer(data->client->adapter,
+ msgs, MAX44009_READ_LUX_XFER_LEN);
+
+ if (ret != MAX44009_READ_LUX_XFER_LEN)
+ return -EIO;
+
+ return max44009_lux_raw(hi, lo);
+}
+
+static int max44009_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct max44009_data *data = iio_priv(indio_dev);
+ int lux_raw;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = max44009_read_lux_raw(data);
+ if (ret < 0)
+ return ret;
+ lux_raw = ret;
+
+ *val = lux_raw * MAX44009_SCALE_NUMERATOR;
+ *val2 = MAX44009_SCALE_DENOMINATOR;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = max44009_read_int_time(data);
+ if (ret < 0)
+ return ret;
+
+ *val2 = ret;
+ *val = 0;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR(illuminance_integration_time_available,
+ max44009_int_time_str);
+
+static struct attribute *max44009_attributes[] = {
+ &iio_const_attr_illuminance_integration_time_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group max44009_attribute_group = {
+ .attrs = max44009_attributes,
+};
+
+static int max44009_threshold_byte_from_fraction(int integral, int fractional)
+{
+ int mantissa, exp;
+
+ if ((integral <= 0 && fractional <= 0) ||
+ integral > MAX44009_MAXIMUM_THRESHOLD ||
+ (integral == MAX44009_MAXIMUM_THRESHOLD && fractional != 0))
+ return -EINVAL;
+
+ /* Reverse scaling of fixed-point integral */
+ mantissa = integral * MAX44009_SCALE_DENOMINATOR;
+ mantissa /= MAX44009_SCALE_NUMERATOR;
+
+ /* Reverse scaling of fixed-point fractional */
+ mantissa += fractional / MAX44009_FRACT_MULT *
+ (MAX44009_SCALE_DENOMINATOR / MAX44009_SCALE_NUMERATOR);
+
+ for (exp = 0; mantissa > 0xff; exp++)
+ mantissa >>= 1;
+
+ mantissa >>= 4;
+ mantissa &= 0xf;
+ exp <<= 4;
+
+ return exp | mantissa;
+}
+
+static int max44009_get_thr_reg(enum iio_event_direction dir)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return MAX44009_REG_UPPER_THR;
+ case IIO_EV_DIR_FALLING:
+ return MAX44009_REG_LOWER_THR;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max44009_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct max44009_data *data = iio_priv(indio_dev);
+ int reg, threshold;
+
+ if (info != IIO_EV_INFO_VALUE || chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ threshold = max44009_threshold_byte_from_fraction(val, val2);
+ if (threshold < 0)
+ return threshold;
+
+ reg = max44009_get_thr_reg(dir);
+ if (reg < 0)
+ return reg;
+
+ return i2c_smbus_write_byte_data(data->client, reg, threshold);
+}
+
+static int max44009_read_threshold(struct iio_dev *indio_dev,
+ enum iio_event_direction dir)
+{
+ struct max44009_data *data = iio_priv(indio_dev);
+ int byte, reg;
+ int mantissa, exponent;
+
+ reg = max44009_get_thr_reg(dir);
+ if (reg < 0)
+ return reg;
+
+ byte = i2c_smbus_read_byte_data(data->client, reg);
+ if (byte < 0)
+ return byte;
+
+ mantissa = byte & MAX44009_THRESH_MANT_MASK;
+ mantissa <<= MAX44009_THRESH_MANT_LSHIFT;
+
+ /*
+ * To get the upper threshold, always adds the minimum upper threshold
+ * value to the shifted byte value (see datasheet).
+ */
+ if (dir == IIO_EV_DIR_RISING)
+ mantissa += MAX44009_UPPER_THR_MINIMUM;
+
+ /*
+ * Exponent is base 2 to the power of the threshold exponent byte
+ * value
+ */
+ exponent = byte & MAX44009_THRESH_EXP_MASK;
+ exponent >>= MAX44009_THRESH_EXP_RSHIFT;
+
+ return (1 << exponent) * mantissa;
+}
+
+static int max44009_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret;
+ int threshold;
+
+ if (chan->type != IIO_LIGHT || type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ ret = max44009_read_threshold(indio_dev, dir);
+ if (ret < 0)
+ return ret;
+ threshold = ret;
+
+ *val = threshold * MAX44009_SCALE_NUMERATOR;
+ *val2 = MAX44009_SCALE_DENOMINATOR;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int max44009_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct max44009_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_LIGHT || type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ MAX44009_REG_INT_EN, state);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set device to trigger interrupt immediately upon exceeding
+ * the threshold limit.
+ */
+ return i2c_smbus_write_byte_data(data->client,
+ MAX44009_REG_THR_TIMER, 0);
+}
+
+static int max44009_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct max44009_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_LIGHT || type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ return i2c_smbus_read_byte_data(data->client, MAX44009_REG_INT_EN);
+}
+
+static const struct iio_info max44009_info = {
+ .read_raw = max44009_read_raw,
+ .write_raw = max44009_write_raw,
+ .write_raw_get_fmt = max44009_write_raw_get_fmt,
+ .read_event_value = max44009_read_event_value,
+ .read_event_config = max44009_read_event_config,
+ .write_event_value = max44009_write_event_value,
+ .write_event_config = max44009_write_event_config,
+ .attrs = &max44009_attribute_group,
+};
+
+static irqreturn_t max44009_threaded_irq_handler(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct max44009_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, MAX44009_REG_INT_STATUS);
+ if (ret) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int max44009_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max44009_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &max44009_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = MAX44009_DRV_NAME;
+ indio_dev->channels = max44009_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max44009_channels);
+ mutex_init(&data->lock);
+
+ /* Clear any stale interrupt bit */
+ ret = i2c_smbus_read_byte_data(client, MAX44009_REG_CFG);
+ if (ret < 0)
+ return ret;
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ max44009_threaded_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT | IRQF_SHARED,
+ "max44009_event",
+ indio_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id max44009_id[] = {
+ { "max44009", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max44009_id);
+
+static struct i2c_driver max44009_driver = {
+ .driver = {
+ .name = MAX44009_DRV_NAME,
+ },
+ .probe = max44009_probe,
+ .id_table = max44009_id,
+};
+module_i2c_driver(max44009_driver);
+
+static const struct of_device_id max44009_of_match[] = {
+ { .compatible = "maxim,max44009" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max44009_of_match);
+
+MODULE_AUTHOR("Robert Eshleman <bobbyeshleman@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MAX44009 ambient light sensor driver");
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index f063355480ba..dd990cdb04a8 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -20,6 +20,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
#define MAG3110_STATUS 0x00
#define MAG3110_OUT_X 0x01 /* MSB first */
@@ -56,6 +57,8 @@ struct mag3110_data {
struct mutex lock;
u8 ctrl_reg1;
int sleep_val;
+ struct regulator *vdd_reg;
+ struct regulator *vddio_reg;
};
static int mag3110_request(struct mag3110_data *data)
@@ -469,17 +472,50 @@ static int mag3110_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret;
- ret = i2c_smbus_read_byte_data(client, MAG3110_WHO_AM_I);
- if (ret < 0)
- return ret;
- if (ret != MAG3110_DEVICE_ID)
- return -ENODEV;
-
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
+
+ data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd_reg)) {
+ if (PTR_ERR(data->vdd_reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_err(&client->dev, "failed to get VDD regulator!\n");
+ return PTR_ERR(data->vdd_reg);
+ }
+
+ data->vddio_reg = devm_regulator_get(&client->dev, "vddio");
+ if (IS_ERR(data->vddio_reg)) {
+ if (PTR_ERR(data->vddio_reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_err(&client->dev, "failed to get VDDIO regulator!\n");
+ return PTR_ERR(data->vddio_reg);
+ }
+
+ ret = regulator_enable(data->vdd_reg);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable VDD regulator!\n");
+ return ret;
+ }
+
+ ret = regulator_enable(data->vddio_reg);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable VDDIO regulator!\n");
+ goto disable_regulator_vdd;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, MAG3110_WHO_AM_I);
+ if (ret < 0)
+ goto disable_regulators;
+ if (ret != MAG3110_DEVICE_ID) {
+ ret = -ENODEV;
+ goto disable_regulators;
+ }
+
data->client = client;
mutex_init(&data->lock);
@@ -499,7 +535,7 @@ static int mag3110_probe(struct i2c_client *client,
ret = mag3110_change_config(data, MAG3110_CTRL_REG1, data->ctrl_reg1);
if (ret < 0)
- return ret;
+ goto disable_regulators;
ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG2,
MAG3110_CTRL_AUTO_MRST_EN);
@@ -520,16 +556,24 @@ buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
standby_on_error:
mag3110_standby(iio_priv(indio_dev));
+disable_regulators:
+ regulator_disable(data->vddio_reg);
+disable_regulator_vdd:
+ regulator_disable(data->vdd_reg);
+
return ret;
}
static int mag3110_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct mag3110_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
mag3110_standby(iio_priv(indio_dev));
+ regulator_disable(data->vddio_reg);
+ regulator_disable(data->vdd_reg);
return 0;
}
@@ -537,14 +581,48 @@ static int mag3110_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int mag3110_suspend(struct device *dev)
{
- return mag3110_standby(iio_priv(i2c_get_clientdata(
+ struct mag3110_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ int ret;
+
+ ret = mag3110_standby(iio_priv(i2c_get_clientdata(
to_i2c_client(dev))));
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(data->vddio_reg);
+ if (ret) {
+ dev_err(dev, "failed to disable VDDIO regulator\n");
+ return ret;
+ }
+
+ ret = regulator_disable(data->vdd_reg);
+ if (ret) {
+ dev_err(dev, "failed to disable VDD regulator\n");
+ return ret;
+ }
+
+ return 0;
}
static int mag3110_resume(struct device *dev)
{
struct mag3110_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
+ int ret;
+
+ ret = regulator_enable(data->vdd_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable VDD regulator\n");
+ return ret;
+ }
+
+ ret = regulator_enable(data->vddio_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable VDDIO regulator\n");
+ regulator_disable(data->vdd_reg);
+ return ret;
+ }
return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
data->ctrl_reg1);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index eaa7cfcb4c2a..efeb89f3df71 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -165,7 +165,7 @@ config IIO_ST_PRESS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics pressure
- sensors: LPS001WP, LPS25H, LPS331AP, LPS22HB.
+ sensors: LPS001WP, LPS25H, LPS331AP, LPS22HB, LPS22HH.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index e67eb0d971bf..57946605f3ba 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -21,6 +21,7 @@ enum st_press_type {
LPS22HB,
LPS33HW,
LPS35HW,
+ LPS22HH,
ST_PRESS_MAX,
};
@@ -30,6 +31,7 @@ enum st_press_type {
#define LPS22HB_PRESS_DEV_NAME "lps22hb"
#define LPS33HW_PRESS_DEV_NAME "lps33hw"
#define LPS35HW_PRESS_DEV_NAME "lps35hw"
+#define LPS22HH_PRESS_DEV_NAME "lps22hh"
/**
* struct st_sensors_platform_data - default press platform data
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 4ddb6cf7d401..38dcdb7c000e 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -492,6 +492,75 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.multi_read_bit = false,
.bootime = 2,
},
+ {
+ /*
+ * CUSTOM VALUES FOR LPS22HH SENSOR
+ * See LPS22HH datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps22hh.pdf
+ */
+ .wai = 0xb3,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LPS22HH_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
+ .odr = {
+ .addr = 0x10,
+ .mask = 0x70,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01 },
+ { .hz = 10, .value = 0x02 },
+ { .hz = 25, .value = 0x03 },
+ { .hz = 50, .value = 0x04 },
+ { .hz = 75, .value = 0x05 },
+ { .hz = 100, .value = 0x06 },
+ { .hz = 200, .value = 0x07 },
+ },
+ },
+ .pw = {
+ .addr = 0x10,
+ .mask = 0x70,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 3 of LPS22HH datasheet.
+ */
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LPS22HB_LSB_PER_CELSIUS,
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x10,
+ .mask = BIT(1),
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x12,
+ .mask = BIT(2),
+ .addr_od = 0x11,
+ .mask_od = BIT(5),
+ },
+ .addr_ihl = 0x11,
+ .mask_ihl = BIT(6),
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x10,
+ .value = BIT(0),
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
};
static int st_press_write_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 2026a1012012..a60849dd4ea7 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -45,6 +45,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps35hw",
.data = LPS35HW_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22hh",
+ .data = LPS22HH_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -69,6 +73,7 @@ static const struct i2c_device_id st_press_id_table[] = {
{ LPS22HB_PRESS_DEV_NAME, LPS22HB },
{ LPS33HW_PRESS_DEV_NAME, LPS33HW },
{ LPS35HW_PRESS_DEV_NAME, LPS35HW },
+ { LPS22HH_PRESS_DEV_NAME, LPS22HH },
{},
};
MODULE_DEVICE_TABLE(i2c, st_press_id_table);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 9a3441b128e7..79a12ed46e54 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -49,6 +49,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps35hw",
.data = LPS35HW_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22hh",
+ .data = LPS22HH_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -93,6 +97,7 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS22HB_PRESS_DEV_NAME },
{ LPS33HW_PRESS_DEV_NAME },
{ LPS35HW_PRESS_DEV_NAME },
+ { LPS22HH_PRESS_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_press_id_table);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 63a7cc00bae0..84f077b2b90a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- rdma_restrack_kadd(&id_priv->res);
+ if (id_priv->res.kern_name)
+ rdma_restrack_kadd(&id_priv->res);
+ else
+ rdma_restrack_uadd(&id_priv->res);
}
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3cd830d52967..616734313f0c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
#endif
struct ib_device *ib_device_get_by_index(u32 ifindex);
-void ib_device_put(struct ib_device *device);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 8872453e26c0..238ec42778ef 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index)
down_read(&lists_rwsem);
device = __ib_device_get_by_index(index);
if (device) {
- /* Do not return a device if unregistration has started. */
- if (!refcount_inc_not_zero(&device->refcount))
+ if (!ib_device_try_get(device))
device = NULL;
}
up_read(&lists_rwsem);
return device;
}
+/**
+ * ib_device_put - Release IB device reference
+ * @device: device whose reference to be released
+ *
+ * ib_device_put() releases reference to the IB device to allow it to be
+ * unregistered and eventually free.
+ */
void ib_device_put(struct ib_device *device)
{
if (refcount_dec_and_test(&device->refcount))
complete(&device->unreg_completion);
}
+EXPORT_SYMBOL(ib_device_put);
static struct ib_device *__ib_device_get_by_name(const char *name)
{
@@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size)
rwlock_init(&device->client_data_lock);
INIT_LIST_HEAD(&device->client_data_list);
INIT_LIST_HEAD(&device->port_list);
- refcount_set(&device->refcount, 1);
init_completion(&device->unreg_completion);
return device;
@@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name,
goto cg_cleanup;
}
+ refcount_set(&device->refcount, 1);
device->reg_state = IB_DEV_REGISTERED;
list_for_each_entry(client, &client_list, list)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e600fc23ae62..3c97a8b6bf1e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
- if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
- pd->unsafe_global_rkey))
- goto err;
if (fill_res_name_pid(msg, res))
goto err;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index be6b8e1257d0..69f8db66925e 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
enum uverbs_obj_access access,
bool commit);
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
+
void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index a4ec43093cb3..acb882f279cb 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
umem->writable = 1;
umem->is_odp = 1;
odp_data->per_mm = per_mm;
+ umem->owning_mm = per_mm->mm;
+ mmgrab(umem->owning_mm);
mutex_init(&odp_data->umem_mutex);
init_completion(&odp_data->notifier_completion);
@@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
out_page_list:
vfree(odp_data->page_list);
out_odp_data:
+ mmdrop(umem->owning_mm);
kfree(odp_data);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6b12cc5f97b2..3317300ab036 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
{
int ret;
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ return uverbs_copy_to_struct_or_zero(
+ attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
+
if (copy_to_user(attrs->ucore.outbuf, resp,
min(attrs->ucore.outlen, resp_len)))
return -EFAULT;
@@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
goto out_put;
}
+ if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
+ ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
+
ret = 0;
out_put:
@@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp)
+ if (!qp) {
+ ret = -EINVAL;
goto out;
+ }
is_ud = qp->qp_type == IB_QPT_UD;
sg_ind = 0;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8c81ff698052..0ca04d224015 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
0, uattr->len - len);
}
+static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
+ const struct uverbs_attr *attr)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+ u16 flags;
+
+ flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
+ UVERBS_ATTR_F_VALID_OUTPUT;
+ if (put_user(flags,
+ &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
+ return -EFAULT;
+ return 0;
+}
+
static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
const struct uverbs_api_attr *attr_uapi,
struct uverbs_objs_arr_attr *attr,
@@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
}
/*
+ * Until the drivers are revised to use the bundle directly we have to
+ * assume that the driver wrote to its UHW_OUT and flag userspace
+ * appropriately.
+ */
+ if (!ret && pbundle->method_elm->has_udata) {
+ const struct uverbs_attr *attr =
+ uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
+
+ if (!IS_ERR(attr))
+ ret = uverbs_set_output(&pbundle->bundle, attr);
+ }
+
+ /*
* EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
* not invoke the method because the request is not supported. No
* other cases should return this code.
@@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
const void *from, size_t size)
{
- struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
- u16 flags;
size_t min_size;
if (IS_ERR(attr))
@@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
return -EFAULT;
- flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
- UVERBS_ATTR_F_VALID_OUTPUT;
- if (put_user(flags,
- &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
- return -EFAULT;
-
- return 0;
+ return uverbs_set_output(bundle, attr);
}
EXPORT_SYMBOL(uverbs_copy_to);
+
+/*
+ * This is only used if the caller has directly used copy_to_use to write the
+ * data. It signals to user space that the buffer is filled in.
+ */
+int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ return uverbs_set_output(bundle, attr);
+}
+
int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, s64 lower_bound, u64 upper_bound,
s64 *def_val)
@@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
- if (clear_user(u64_to_user_ptr(attr->ptr_attr.data),
- attr->ptr_attr.len))
- return -EFAULT;
+ if (size < attr->ptr_attr.len) {
+ if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
+ attr->ptr_attr.len - size))
+ return -EFAULT;
+ }
return uverbs_copy_to(bundle, idx, from, size);
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb0007aa0c27..5f366838b7ff 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
+ if (file->async_file)
+ kref_put(&file->async_file->ref,
+ ib_uverbs_release_async_event_file);
put_device(&file->device->dev);
kfree(file);
}
@@ -690,6 +693,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
buf += sizeof(hdr);
+ memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
bundle.ufile = file;
if (!method_elm->is_ex) {
size_t in_len = hdr.in_words * 4 - sizeof(hdr);
@@ -963,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
/* Get an arbitrary mm pointer that hasn't been cleaned yet */
mutex_lock(&ufile->umap_lock);
- if (!list_empty(&ufile->umaps)) {
- mm = list_first_entry(&ufile->umaps,
- struct rdma_umap_priv, list)
- ->vma->vm_mm;
- mmget(mm);
+ while (!list_empty(&ufile->umaps)) {
+ int ret;
+
+ priv = list_first_entry(&ufile->umaps,
+ struct rdma_umap_priv, list);
+ mm = priv->vma->vm_mm;
+ ret = mmget_not_zero(mm);
+ if (!ret) {
+ list_del_init(&priv->list);
+ mm = NULL;
+ continue;
+ }
+ break;
}
mutex_unlock(&ufile->umap_lock);
if (!mm)
@@ -1095,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
list_del_init(&file->list);
mutex_unlock(&file->device->lists_mutex);
- if (file->async_file)
- kref_put(&file->async_file->ref,
- ib_uverbs_release_async_event_file);
-
kref_put(&file->ref, ib_uverbs_release_file);
return 0;
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 5030ec480370..2a3f2f01028d 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
struct uverbs_attr_bundle *attrs)
{
- struct ib_device *ib_dev = attrs->ufile->device->ib_dev;
+ struct ib_device *ib_dev;
struct ib_port_attr attr = {};
struct ib_uverbs_query_port_resp_ex resp = {};
+ struct ib_ucontext *ucontext;
int ret;
u8 port_num;
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
/* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
if (!ib_dev->ops.query_port)
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index 18f5ed082f41..19982a4a9bba 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,7 +1,6 @@
config INFINIBAND_BNXT_RE
tristate "Broadcom Netxtreme HCA support"
depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- depends on MAY_USE_DEVLINK
select NET_VENDOR_BROADCOM
select BNXT
---help---
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 326805461265..19551aa43850 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
return NULL;
sbuf->size = size;
- sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
- &sbuf->dma_addr, GFP_ATOMIC);
+ sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
+ &sbuf->dma_addr, GFP_ATOMIC);
if (!sbuf->sb)
goto bail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 59eeac55626f..57d4951679cb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
if (!sghead) {
for (i = 0; i < pages; i++) {
- pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
- pbl->pg_size,
- &pbl->pg_map_arr[i],
- GFP_KERNEL);
+ pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
pbl->pg_count++;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index df4f7a3f043d..8ac72ac7cbac 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
if (!wq->sq)
goto err3;
- wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
+ wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+ depth * sizeof(union t3_wr),
+ &(wq->dma_addr), GFP_KERNEL);
if (!wq->queue)
goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c13c0ba30f63..d499cd61c0e8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
static int c4iw_rdev_open(struct c4iw_rdev *rdev)
{
int err;
+ unsigned int factor;
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
return -EINVAL;
}
- rdev->qpmask = rdev->lldi.udb_density - 1;
- rdev->cqmask = rdev->lldi.ucq_density - 1;
+ /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
+ if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
+ pr_err("%s: unsupported sge host page size %u\n",
+ pci_name(rdev->lldi.pdev),
+ rdev->lldi.sge_host_page_size);
+ return -EINVAL;
+ }
+
+ factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
+ rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
+ rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
+
pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 981ff5cfb5d1..504cf525508f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
T4_RQT_ENTRY_SHIFT;
- wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev,
- wq->memsize, &wq->dma_addr,
- GFP_KERNEL);
+ wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
+ &wq->dma_addr, GFP_KERNEL);
if (!wq->queue)
goto err_free_rqtpool;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 2baf38cc1e23..4fe662c3bbc1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -48,6 +48,7 @@
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/numa.h>
#include "hfi.h"
#include "affinity.h"
@@ -777,7 +778,7 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
_dev_comp_vect_cpu_mask_clean_up(dd, entry);
unlock:
mutex_unlock(&node_affinity.lock);
- dd->node = -1;
+ dd->node = NUMA_NO_NODE;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c22ebc774a6a..f9a7e9d29c8b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vmf = 1;
break;
case STATUS:
- if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
+ if (flags & VM_WRITE) {
ret = -EPERM;
goto done;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 09044905284f..441b06e2a154 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -54,6 +54,7 @@
#include <linux/printk.h>
#include <linux/hrtimer.h>
#include <linux/bitmap.h>
+#include <linux/numa.h>
#include <rdma/rdma_vt.h>
#include "hfi.h"
@@ -899,10 +900,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
goto done;
/* allocate dummy tail memory for all receive contexts */
- dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, sizeof(u64),
- &dd->rcvhdrtail_dummy_dma,
- GFP_KERNEL);
+ dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+ sizeof(u64),
+ &dd->rcvhdrtail_dummy_dma,
+ GFP_KERNEL);
if (!dd->rcvhdrtail_dummy_kvaddr) {
dd_dev_err(dd, "cannot allocate dummy tail memory\n");
@@ -1303,7 +1304,7 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
dd->unit = ret;
list_add(&dd->list, &hfi1_dev_list);
}
- dd->node = -1;
+ dd->node = NUMA_NO_NODE;
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
idr_preload_end();
@@ -1863,9 +1864,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER;
- rcd->rcvhdrq = dma_zalloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
- gfp_flags | __GFP_COMP);
+ rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
+ &rcd->rcvhdrq_dma,
+ gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) {
dd_dev_err(dd,
@@ -1876,9 +1877,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
- rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE,
- &rcd->rcvhdrqtailaddr_dma, gfp_flags);
+ rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
+ PAGE_SIZE,
+ &rcd->rcvhdrqtailaddr_dma,
+ gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
}
@@ -1974,10 +1976,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
while (alloced_bytes < rcd->egrbufs.size &&
rcd->egrbufs.alloced < rcd->egrbufs.count) {
rcd->egrbufs.buffers[idx].addr =
- dma_zalloc_coherent(&dd->pcidev->dev,
- rcd->egrbufs.rcvtid_size,
- &rcd->egrbufs.buffers[idx].dma,
- gfp_flags);
+ dma_alloc_coherent(&dd->pcidev->dev,
+ rcd->egrbufs.rcvtid_size,
+ &rcd->egrbufs.buffers[idx].dma,
+ gfp_flags);
if (rcd->egrbufs.buffers[idx].addr) {
rcd->egrbufs.buffers[idx].len =
rcd->egrbufs.rcvtid_size;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index dd5a5c030066..04126d7e318d 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
set_dev_node(&dd->pcidev->dev, i);
- dd->cr_base[i].va = dma_zalloc_coherent(
- &dd->pcidev->dev,
- bytes,
- &dd->cr_base[i].dma,
- GFP_KERNEL);
+ dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
+ bytes,
+ &dd->cr_base[i].dma,
+ GFP_KERNEL);
if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node);
dd_dev_err(dd,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index b84356e1a4c1..96897a91fb0a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
timer_setup(&sde->err_progress_check_timer,
sdma_err_progress_check, 0);
- sde->descq = dma_zalloc_coherent(
- &dd->pcidev->dev,
- descq_cnt * sizeof(u64[2]),
- &sde->descq_phys,
- GFP_KERNEL
- );
+ sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
+ descq_cnt * sizeof(u64[2]),
+ &sde->descq_phys, GFP_KERNEL);
if (!sde->descq)
goto bail;
sde->tx_ring =
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
/* Allocate memory for DMA of head registers to memory */
- dd->sdma_heads_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- dd->sdma_heads_size,
- &dd->sdma_heads_phys,
- GFP_KERNEL
- );
+ dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
+ dd->sdma_heads_size,
+ &dd->sdma_heads_phys,
+ GFP_KERNEL);
if (!dd->sdma_heads_dma) {
dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
goto bail;
}
/* Allocate memory for pad */
- dd->sdma_pad_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- sizeof(u32),
- &dd->sdma_pad_phys,
- GFP_KERNEL
- );
+ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+ &dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 88242fe95eaa..bf96067876c9 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 6300033a448f..dac058d3df53 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
buf->npages = 1 << order;
buf->page_shift = page_shift;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_zalloc_coherent(dev,
- size, &t, GFP_KERNEL);
+ buf->direct.buf = dma_alloc_coherent(dev, size, &t,
+ GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_zalloc_coherent(dev,
- page_size, &t,
- GFP_KERNEL);
+ buf->page_list[i].buf = dma_alloc_coherent(dev,
+ page_size,
+ &t,
+ GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 3a669451cf86..543fa1504cd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
size = (eq->entries - eqe_alloc) * eq->eqe_size;
}
- eq->buf[i] = dma_zalloc_coherent(dev, size,
+ eq->buf[i] = dma_alloc_coherent(dev, size,
&(eq->buf_dma[i]),
GFP_KERNEL);
if (!eq->buf[i])
@@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
size = (eq->entries - eqe_alloc)
* eq->eqe_size;
}
- eq->buf[idx] = dma_zalloc_coherent(dev, size,
- &(eq->buf_dma[idx]),
- GFP_KERNEL);
+ eq->buf[idx] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[idx]),
+ GFP_KERNEL);
if (!eq->buf[idx])
goto err_dma_alloc_buf;
@@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
goto free_cmd_mbox;
}
- eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz,
+ eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
&(eq->buf_list->map),
GFP_KERNEL);
if (!eq->buf_list->buf) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 960b1946c365..12deacf442cf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct hns_roce_ib_create_srq_resp resp = {};
struct hns_roce_srq *srq;
int srq_desc_size;
int srq_buf_size;
@@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
srq->event = hns_roce_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->srqn;
+ resp.srqn = srq->srqn;
if (udata) {
- if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
+ if (ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)))) {
ret = -EFAULT;
- goto err_wrid;
+ goto err_srqc_alloc;
}
}
return &srq->ibsrq;
+err_srqc_alloc:
+ hns_roce_srq_free(hr_dev, srq);
+
err_wrid:
kvfree(srq->wrid);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index a9ea966877f2..59e978141ad4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
if (!mem)
return I40IW_ERR_PARAM;
mem->size = ALIGN(size, alignment);
- mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
- (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
if (!mem->va)
return I40IW_ERR_NO_MEMORY;
return 0;
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index d1de3285fd88..4e9936731867 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -2,7 +2,6 @@ config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
- depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 25439da8976c..936ee1314bcd 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
if (sqp->tx_ring[wire_tx_ix].ah)
- rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
+ mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
sqp->tx_ring[wire_tx_ix].ah = ah;
ib_dma_sync_single_for_cpu(&dev->ib_dev,
sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
if (wc.status == IB_WC_SUCCESS) {
switch (wc.opcode) {
case IB_WC_SEND:
- rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
+ mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
@@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
" status = %d, wrid = 0x%llx\n",
ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
- rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
+ mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
= NULL;
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 356bccc715ee..6bcc63aaa50b 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -345,3 +345,40 @@ int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
counter_set_id);
return err;
}
+
+int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
+ u16 opmod, u8 port)
+{
+ int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
+ int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
+ int err = -ENOMEM;
+ void *data;
+ void *resp;
+ u32 *out;
+ u32 *in;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!in || !out)
+ goto out;
+
+ MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
+ MLX5_SET(mad_ifc_in, in, op_mod, opmod);
+ MLX5_SET(mad_ifc_in, in, port, port);
+
+ data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
+ memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ if (err)
+ goto out;
+
+ resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
+ memcpy(outb, resp,
+ MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
+
+out:
+ kfree(out);
+ kfree(in);
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 1e76dc67a369..923a7b93f507 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -63,4 +63,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
u16 uid);
+int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
+ u16 opmod, u8 port);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index e8a1e4498e3f..798591a18484 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
UAPI_DEF_CHAIN_OBJ_TREE(
UVERBS_OBJECT_FLOW,
- &mlx5_ib_fs,
- UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
+ &mlx5_ib_fs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_actions),
{},
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 46a9ddc8ca56..4700cffb5a00 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -3,10 +3,11 @@
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
+#include <linux/mlx5/vport.h>
#include "ib_rep.h"
#include "srq.h"
-static const struct mlx5_ib_profile rep_profile = {
+static const struct mlx5_ib_profile vf_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
@@ -46,30 +47,16 @@ static const struct mlx5_ib_profile rep_profile = {
};
static int
-mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
-{
- struct mlx5_ib_dev *ibdev;
-
- ibdev = mlx5_ib_rep_to_dev(rep);
- if (!__mlx5_ib_add(ibdev, ibdev->profile))
- return -EINVAL;
- return 0;
-}
-
-static void
-mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
-{
- struct mlx5_ib_dev *ibdev;
-
- ibdev = mlx5_ib_rep_to_dev(rep);
- __mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
-}
-
-static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ profile = &uplink_rep_profile;
+ else
+ profile = &vf_rep_profile;
+
ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
if (!ibdev)
return -ENOMEM;
@@ -78,7 +65,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
ibdev->mdev = dev;
ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
MLX5_CAP_GEN(dev, num_vhca_ports));
- if (!__mlx5_ib_add(ibdev, &rep_profile))
+ if (!__mlx5_ib_add(ibdev, profile))
return -EINVAL;
rep->rep_if[REP_IB].priv = ibdev;
@@ -105,53 +92,23 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
return mlx5_ib_rep_to_dev(rep);
}
-static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
-{
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
- int vport;
-
- for (vport = 1; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep_if rep_if = {};
-
- rep_if.load = mlx5_ib_vport_rep_load;
- rep_if.unload = mlx5_ib_vport_rep_unload;
- rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
- mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB);
- }
-}
-
-static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
+void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
{
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
- int vport;
-
- for (vport = 1; vport < total_vfs; vport++)
- mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
-}
-
-void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
-{
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
- rep_if.load = mlx5_ib_nic_rep_load;
- rep_if.unload = mlx5_ib_nic_rep_unload;
+ rep_if.load = mlx5_ib_vport_rep_load;
+ rep_if.unload = mlx5_ib_vport_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
- rep_if.priv = dev;
-
- mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
- mlx5_ib_rep_register_vf_vports(dev);
+ mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB);
}
-void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
+void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
{
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
- mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
- mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
+ mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
}
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 2ba73636a2fb..798d41e61fb4 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -10,14 +10,16 @@
#include "mlx5_ib.h"
#ifdef CONFIG_MLX5_ESWITCH
+extern const struct mlx5_ib_profile uplink_rep_profile;
+
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
int vport_index);
struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw);
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
int vport_index);
-void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev);
-void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev);
+void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
+void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq);
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
@@ -48,8 +50,8 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
return NULL;
}
-static inline void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) {}
-static inline void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) {}
+static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {}
+static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {}
static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 558638468edb..6c529e6f3a01 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -36,6 +36,7 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_pma.h>
#include "mlx5_ib.h"
+#include "cmd.h"
enum {
MLX5_IB_VENDOR_CLASS1 = 0x9,
@@ -51,9 +52,10 @@ static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
return dev->mdev->port_caps[port_num - 1].has_smi;
}
-int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const void *in_mad, void *response_mad)
+static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
+ int ignore_bkey, u8 port, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const void *in_mad,
+ void *response_mad)
{
u8 op_modifier = 0;
@@ -68,7 +70,8 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;
- return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
+ return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier,
+ port);
}
static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 94fe253d4956..581ae11e2fc9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -331,8 +331,8 @@ out:
spin_unlock(&port->mp.mpi_lock);
}
-static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
- u8 *active_width)
+static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+ u8 *active_width)
{
switch (eth_proto_oper) {
case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
@@ -389,10 +389,66 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
+static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+ u8 *active_width)
+{
+ switch (eth_proto_oper) {
+ case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_DDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+ case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+ u8 *active_width, bool ext)
+{
+ return ext ?
+ translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
+ active_width) :
+ translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
+ active_width);
+}
+
static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
struct mlx5_core_dev *mdev;
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
@@ -400,6 +456,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
u16 qkey_viol_cntr;
u32 eth_prot_oper;
u8 mdev_port_num;
+ bool ext;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
@@ -416,16 +473,18 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
- err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
- mdev_port_num);
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ mdev_port_num);
if (err)
goto out;
+ ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+ eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_QDR;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
- &props->active_width);
+ &props->active_width, ext);
props->port_cap_flags |= IB_PORT_CM_SUP;
props->ip_gids = true;
@@ -6386,7 +6445,7 @@ static const struct mlx5_ib_profile pf_profile = {
mlx5_ib_stage_delay_drop_cleanup),
};
-static const struct mlx5_ib_profile nic_rep_profile = {
+const struct mlx5_ib_profile uplink_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
@@ -6479,6 +6538,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
printk_once(KERN_INFO "%s", mlx5_version);
+ if (MLX5_ESWITCH_MANAGER(mdev) &&
+ mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5_ib_register_vport_reps(mdev);
+ return mdev;
+ }
+
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -6493,14 +6558,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
- if (MLX5_ESWITCH_MANAGER(mdev) &&
- mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
- dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
- dev->profile = &nic_rep_profile;
- mlx5_ib_register_vport_reps(dev);
- return dev;
- }
-
return __mlx5_ib_add(dev, &pf_profile);
}
@@ -6509,6 +6566,11 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_dev *dev;
+ if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
+ mlx5_ib_unregister_vport_reps(mdev);
+ return;
+ }
+
if (mlx5_core_is_mp_slave(mdev)) {
mpi = context;
mutex_lock(&mlx5_ib_multiport_mutex);
@@ -6520,10 +6582,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
}
dev = context;
- if (dev->profile == &nic_rep_profile)
- mlx5_ib_unregister_vport_reps(dev);
- else
- __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
ib_dealloc_device((struct ib_device *)dev);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b06d3b1efea8..eedba0d2ec4b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -587,6 +587,7 @@ struct mlx5_ib_mr {
struct mlx5_ib_mr *parent;
atomic_t num_leaf_free;
wait_queue_head_t q_leaf_free;
+ struct mlx5_async_work cb_work;
};
struct mlx5_ib_mw {
@@ -944,6 +945,7 @@ struct mlx5_ib_dev {
struct mlx5_memic memic;
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
+ struct mlx5_async_ctx async_ctx;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1038,9 +1040,6 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
-int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const void *in_mad, void *response_mad);
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags, struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index fd6ea1f75085..bf2b6ea23851 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -123,9 +123,10 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
}
#endif
-static void reg_mr_callback(int status, void *context)
+static void reg_mr_callback(int status, struct mlx5_async_work *context)
{
- struct mlx5_ib_mr *mr = context;
+ struct mlx5_ib_mr *mr =
+ container_of(context, struct mlx5_ib_mr, cb_work);
struct mlx5_ib_dev *dev = mr->dev;
struct mlx5_mr_cache *cache = &dev->cache;
int c = order2idx(dev, mr->order);
@@ -216,9 +217,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->pending++;
spin_unlock_irq(&ent->lock);
err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
- in, inlen,
+ &dev->async_ctx, in, inlen,
mr->out, sizeof(mr->out),
- reg_mr_callback, mr);
+ reg_mr_callback, &mr->cb_work);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
@@ -679,6 +680,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
+ mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
@@ -725,33 +727,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return 0;
}
-static void wait_for_async_commands(struct mlx5_ib_dev *dev)
-{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
- int total = 0;
- int i;
- int j;
-
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
- ent = &cache->ent[i];
- for (j = 0 ; j < 1000; j++) {
- if (!ent->pending)
- break;
- msleep(50);
- }
- }
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
- ent = &cache->ent[i];
- total += ent->pending;
- }
-
- if (total)
- mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
- else
- mlx5_ib_warn(dev, "done with all pending requests\n");
-}
-
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
int i;
@@ -763,12 +738,12 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
flush_workqueue(dev->cache.wq);
mlx5_mr_cache_debugfs_cleanup(dev);
+ mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
- wait_for_async_commands(dev);
del_timer_sync(&dev->delay_timer);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 01e0f6200631..4ee32964e1dd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
struct prefetch_mr_work *w =
container_of(work, struct prefetch_mr_work, work);
- if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED)
+ if (ib_device_try_get(&w->dev->ib_dev)) {
mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
w->num_sge);
-
+ ib_device_put(&w->dev->ib_dev);
+ }
+ put_device(&w->dev->ib_dev.dev);
kfree(w);
}
@@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
num_sge);
- if (dev->ib_dev.reg_state != IB_DEV_REGISTERED)
- return -ENODEV;
-
work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
if (!work)
return -ENOMEM;
memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
+ get_device(&dev->ib_dev.dev);
work->dev = dev;
work->pf_flags = pf_flags;
work->num_sge = num_sge;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index dd2ae640bc84..7db778d96ef5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if (!check_flags_mask(ucmd.flags,
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
+ MLX5_QP_FLAG_BFREG_INDEX |
+ MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
+ MLX5_QP_FLAG_SCATTER_CQE |
MLX5_QP_FLAG_SIGNATURE |
- MLX5_QP_FLAG_SCATTER_CQE |
- MLX5_QP_FLAG_TUNNEL_OFFLOADS |
- MLX5_QP_FLAG_BFREG_INDEX |
- MLX5_QP_FLAG_TYPE_DCT |
- MLX5_QP_FLAG_TYPE_DCI |
- MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
- MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE))
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TYPE_DCI |
+ MLX5_QP_FLAG_TYPE_DCT))
return -EINVAL;
err = get_qp_user_index(to_mucontext(pd->uobject->context),
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index cc9c0c8ccba3..112d2f38e0de 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
+ MTHCA_ICM_PAGE_SIZE, &page->mapping,
+ GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 82cb6b71ac7c..e3e9dd54caa2 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
{
struct mthca_ucontext *context;
- qp = kmalloc(sizeof *qp, GFP_KERNEL);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if (udata)
return ERR_PTR(-EINVAL);
- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
+ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 241a57a07485..097e5ab2a19f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
+ GFP_KERNEL);
if (!q->va)
return -ENOMEM;
return 0;
@@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
@@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
qp->sq.len = len;
@@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
qp->rq.pa = pa;
@@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
- GFP_KERNEL);
+ qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index dd15474b19b7..6be0ea109138 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c46bed0c5513..287c332ff0e6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
@@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b342a70e2814..e1ccf32b1c3d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < pbl_info->num_pbls; i++) {
- va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
- &pa, flags);
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
+ flags);
if (!va)
goto err;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 868da0ece7ba..445ea19a2ec8 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 42b8685c997e..3c633ab58052 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
{
- return (enum pvrdma_wr_opcode)op;
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ return PVRDMA_WR_RDMA_WRITE;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
+ case IB_WR_SEND:
+ return PVRDMA_WR_SEND;
+ case IB_WR_SEND_WITH_IMM:
+ return PVRDMA_WR_SEND_WITH_IMM;
+ case IB_WR_RDMA_READ:
+ return PVRDMA_WR_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_LSO:
+ return PVRDMA_WR_LSO;
+ case IB_WR_SEND_WITH_INV:
+ return PVRDMA_WR_SEND_WITH_INV;
+ case IB_WR_RDMA_READ_WITH_INV:
+ return PVRDMA_WR_RDMA_READ_WITH_INV;
+ case IB_WR_LOCAL_INV:
+ return PVRDMA_WR_LOCAL_INV;
+ case IB_WR_REG_MR:
+ return PVRDMA_WR_FAST_REG_MR;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+ case IB_WR_REG_SIG_MR:
+ return PVRDMA_WR_REG_SIG_MR;
+ default:
+ return PVRDMA_WR_ERROR;
+ }
}
static inline enum ib_wc_status pvrdma_wc_status_to_ib(
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index eaa109dbc96a..39c37b6fd715 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);
- dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
- &dev->dsrbase, GFP_KERNEL);
+ dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 3acf74cbe266..1ec3646087ba 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
wqe_hdr->ex.imm_data = wr->ex.imm_data;
+ if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (qp->ibqp.qp_type) {
case IB_QPT_GSI:
case IB_QPT_UD:
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index a1bd8cfc2c25..c6cc3e4ab71d 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2910,6 +2910,8 @@ send:
goto op_err;
if (!ret)
goto rnr_nak;
+ if (wqe->length > qp->r_len)
+ goto inv_err;
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -3078,7 +3080,10 @@ op_err:
goto err;
inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
+ send_status =
+ sqp->ibqp.qp_type == IB_QPT_RC ?
+ IB_WC_REM_INV_REQ_ERR :
+ IB_WC_SUCCESS;
wc.status = IB_WC_LOC_QP_OP_ERR;
goto err;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1da119d901a9..73e808c1e6ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
struct list_head list;
struct net_device *dev;
struct ipoib_neigh *neigh;
- struct ipoib_path *path;
struct ipoib_tx_buf *tx_ring;
unsigned int tx_head;
unsigned int tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e8f69..aa9dcfc36cd3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
neigh->cm = tx;
tx->neigh = neigh;
- tx->path = path;
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
neigh->daddr + QPN_AND_OPTIONS_OFFSET);
goto free_neigh;
}
- memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
+ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31d91538bbf4..694324b37480 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (status)
return FAILED;
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- for (j = 0; j < target->req_ring_size; ++j) {
- struct srp_request *req = &ch->req_ring[j];
-
- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
- }
- }
-
return SUCCESS;
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cfc8b94527b9..aa4e431cbcd3 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -252,6 +252,8 @@ static const struct xpad_device {
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+ { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4713957b0cbb..a878351f1643 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
config KEYBOARD_SNVS_PWRKEY
tristate "IMX SNVS Power Key Driver"
- depends on SOC_IMX6SX
+ depends on SOC_IMX6SX || SOC_IMX7D
depends on OF
help
This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 312916f99597..73686c2460ce 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -75,9 +75,7 @@
struct cap11xx_led {
struct cap11xx_priv *priv;
struct led_classdev cdev;
- struct work_struct work;
u32 reg;
- enum led_brightness new_brightness;
};
#endif
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
}
#ifdef CONFIG_LEDS_CLASS
-static void cap11xx_led_work(struct work_struct *work)
+static int cap11xx_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
{
- struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
+ struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
struct cap11xx_priv *priv = led->priv;
- int value = led->new_brightness;
/*
- * All LEDs share the same duty cycle as this is a HW limitation.
- * Brightness levels per LED are either 0 (OFF) and 1 (ON).
+ * All LEDs share the same duty cycle as this is a HW
+ * limitation. Brightness levels per LED are either
+ * 0 (OFF) and 1 (ON).
*/
- regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
- BIT(led->reg), value ? BIT(led->reg) : 0);
-}
-
-static void cap11xx_led_set(struct led_classdev *cdev,
- enum led_brightness value)
-{
- struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
-
- if (led->new_brightness == value)
- return;
-
- led->new_brightness = value;
- schedule_work(&led->work);
+ return regmap_update_bits(priv->regmap,
+ CAP11XX_REG_LED_OUTPUT_CONTROL,
+ BIT(led->reg),
+ value ? BIT(led->reg) : 0);
}
static int cap11xx_init_leds(struct device *dev,
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
led->cdev.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
led->cdev.flags = 0;
- led->cdev.brightness_set = cap11xx_led_set;
+ led->cdev.brightness_set_blocking = cap11xx_led_set;
led->cdev.max_brightness = 1;
led->cdev.brightness = LED_OFF;
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
led->reg = reg;
led->priv = priv;
- INIT_WORK(&led->work, cap11xx_led_work);
-
error = devm_led_classdev_register(dev, &led->cdev);
if (error) {
of_node_put(child);
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index b20a5d044caa..b4db72f833ca 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -32,10 +32,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
-#include <asm/irq.h>
-
-#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <linux/platform_data/keyscan-davinci.h>
/* Key scan registers */
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 403452ef00e6..3d1cb7bf5e35 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
keypad->stopped = true;
spin_unlock_irq(&keypad->lock);
- flush_work(&keypad->work.work);
+ flush_delayed_work(&keypad->work);
/*
* matrix_keypad_scan() will leave IRQs enabled;
* we should disable them now.
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 43b86482dda0..d466bc07aebb 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = {
struct qt2160_led {
struct qt2160_data *qt2160;
struct led_classdev cdev;
- struct work_struct work;
char name[32];
int id;
- enum led_brightness new_brightness;
+ enum led_brightness brightness;
};
#endif
@@ -74,7 +73,6 @@ struct qt2160_data {
u16 key_matrix;
#ifdef CONFIG_LEDS_CLASS
struct qt2160_led leds[QT2160_NUM_LEDS_X];
- struct mutex led_lock;
#endif
};
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
#ifdef CONFIG_LEDS_CLASS
-static void qt2160_led_work(struct work_struct *work)
+static int qt2160_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
{
- struct qt2160_led *led = container_of(work, struct qt2160_led, work);
+ struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
struct qt2160_data *qt2160 = led->qt2160;
struct i2c_client *client = qt2160->client;
- int value = led->new_brightness;
u32 drive, pwmen;
- mutex_lock(&qt2160->led_lock);
-
- drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
- pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
- if (value != LED_OFF) {
- drive |= (1 << led->id);
- pwmen |= (1 << led->id);
-
- } else {
- drive &= ~(1 << led->id);
- pwmen &= ~(1 << led->id);
- }
- qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
- qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
+ if (value != led->brightness) {
+ drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
+ pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
+ if (value != LED_OFF) {
+ drive |= BIT(led->id);
+ pwmen |= BIT(led->id);
- /*
- * Changing this register will change the brightness
- * of every LED in the qt2160. It's a HW limitation.
- */
- if (value != LED_OFF)
- qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
+ } else {
+ drive &= ~BIT(led->id);
+ pwmen &= ~BIT(led->id);
+ }
+ qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
+ qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
- mutex_unlock(&qt2160->led_lock);
-}
+ /*
+ * Changing this register will change the brightness
+ * of every LED in the qt2160. It's a HW limitation.
+ */
+ if (value != LED_OFF)
+ qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
-static void qt2160_led_set(struct led_classdev *cdev,
- enum led_brightness value)
-{
- struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
+ led->brightness = value;
+ }
- led->new_brightness = value;
- schedule_work(&led->work);
+ return 0;
}
#endif /* CONFIG_LEDS_CLASS */
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
int ret;
int i;
- mutex_init(&qt2160->led_lock);
-
for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
struct qt2160_led *led = &qt2160->leds[i];
snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
led->cdev.name = led->name;
- led->cdev.brightness_set = qt2160_led_set;
+ led->cdev.brightness_set_blocking = qt2160_led_set;
led->cdev.brightness = LED_OFF;
led->id = i;
led->qt2160 = qt2160;
- INIT_WORK(&led->work, qt2160_led_work);
-
ret = led_classdev_register(&client->dev, &led->cdev);
if (ret < 0)
return ret;
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160)
{
int i;
- for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+ for (i = 0; i < QT2160_NUM_LEDS_X; i++)
led_classdev_unregister(&qt2160->leds[i].cdev);
- cancel_work_sync(&qt2160->leds[i].work);
- }
}
#else
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index babcfb165e4f..3b85631fde91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
input_dev->id.bustype = BUS_HOST;
+ keypad_data->input_dev = input_dev;
+
error = keypad_matrix_key_parse_dt(keypad_data);
if (error)
return error;
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, keypad_data);
- keypad_data->input_dev = input_dev;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(keypad_data->base))
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 094bddf56755..c1e66f45d552 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/input-polldev.h>
#include <linux/i2c.h>
-#include <linux/workqueue.h>
#include <linux/leds.h>
#define APANEL_NAME "Fujitsu Application Panel"
@@ -59,8 +58,6 @@ struct apanel {
struct i2c_client *client;
unsigned short keymap[MAX_PANEL_KEYS];
u16 nkeys;
- u16 led_bits;
- struct work_struct led_work;
struct led_classdev mail_led;
};
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev)
report_key(idev, ap->keymap[i]);
}
-/* Track state changes of LED */
-static void led_update(struct work_struct *work)
-{
- struct apanel *ap = container_of(work, struct apanel, led_work);
-
- i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits);
-}
-
-static void mail_led_set(struct led_classdev *led,
+static int mail_led_set(struct led_classdev *led,
enum led_brightness value)
{
struct apanel *ap = container_of(led, struct apanel, mail_led);
+ u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000;
- if (value != LED_OFF)
- ap->led_bits |= 0x8000;
- else
- ap->led_bits &= ~0x8000;
-
- schedule_work(&ap->led_work);
+ return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
}
static int apanel_remove(struct i2c_client *client)
@@ -179,7 +164,7 @@ static struct apanel apanel = {
},
.mail_led = {
.name = "mail:blue",
- .brightness_set = mail_led_set,
+ .brightness_set_blocking = mail_led_set,
},
};
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client,
if (err)
goto out3;
- INIT_WORK(&ap->led_work, led_update);
if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
err = led_classdev_register(&client->dev, &ap->mail_led);
if (err)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1efcfdf9f8a8..dd9dd4e40827 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
idev->close = bma150_irq_close;
input_set_drvdata(idev, bma150);
+ bma150->input = idev;
+
error = input_register_device(idev);
if (error) {
input_free_device(idev);
return error;
}
- bma150->input = idev;
return 0;
}
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
bma150_init_input_device(bma150, ipoll_dev->input);
+ bma150->input_polled = ipoll_dev;
+ bma150->input = ipoll_dev->input;
+
error = input_register_polled_device(ipoll_dev);
if (error) {
input_free_polled_device(ipoll_dev);
return error;
}
- bma150->input_polled = ipoll_dev;
- bma150->input = ipoll_dev->input;
-
return 0;
}
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 55da191ae550..dbb6d9e1b947 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -34,6 +34,7 @@ struct pwm_vibrator {
struct work_struct play_work;
u16 level;
u32 direction_duty_cycle;
+ bool vcc_on;
};
static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
struct pwm_state state;
int err;
- err = regulator_enable(vibrator->vcc);
- if (err) {
- dev_err(pdev, "failed to enable regulator: %d", err);
- return err;
+ if (!vibrator->vcc_on) {
+ err = regulator_enable(vibrator->vcc);
+ if (err) {
+ dev_err(pdev, "failed to enable regulator: %d", err);
+ return err;
+ }
+ vibrator->vcc_on = true;
}
pwm_get_state(vibrator->pwm, &state);
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
{
- regulator_disable(vibrator->vcc);
-
if (vibrator->pwm_dir)
pwm_disable(vibrator->pwm_dir);
pwm_disable(vibrator->pwm);
+
+ if (vibrator->vcc_on) {
+ regulator_disable(vibrator->vcc);
+ vibrator->vcc_on = false;
+ }
}
static void pwm_vibrator_play_work(struct work_struct *work)
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 8ec483e8688b..26ec603fe220 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -39,6 +39,7 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/overflow.h>
#include <linux/input/mt.h>
#include "../input-compat.h"
@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
const struct input_absinfo *abs)
{
- int min, max;
+ int min, max, range;
min = abs->minimum;
max = abs->maximum;
@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
return -EINVAL;
}
- if (abs->flat > max - min) {
+ if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
printk(KERN_DEBUG
"%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
UINPUT_NAME, code, abs->flat, min, max);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f322a1768fbb..225ae6980182 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
- { "ELAN0501", 0 },
{ "ELAN0600", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
+ { "ELAN0617", 0 },
{ "ELAN0618", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 9fe075c137dc..a7f8b1614559 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
+ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
},
},
+ {
+ /* Fujitsu H780 also has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
+ },
+ },
#endif
{ }
};
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index b36084710f69..a7cfab3db9ee 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -23,7 +23,6 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/clk.h>
/*
* The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller.
@@ -75,7 +74,6 @@ struct olpc_apsp {
struct serio *kbio;
struct serio *padio;
void __iomem *base;
- struct clk *clk;
int open_count;
int irq;
};
@@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port)
struct olpc_apsp *priv = port->port_data;
unsigned int tmp;
unsigned long l;
- int error;
if (priv->open_count++ == 0) {
- error = clk_prepare_enable(priv->clk);
- if (error)
- return error;
-
l = readl(priv->base + COMMAND_FIFO_STATUS);
if (!(l & CMD_STS_MASK)) {
dev_err(priv->dev, "SP cannot accept commands.\n");
- clk_disable_unprepare(priv->clk);
return -EIO;
}
@@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port)
/* Disable interrupt 0 */
tmp = readl(priv->base + PJ_INTERRUPT_MASK);
writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK);
-
- clk_disable_unprepare(priv->clk);
}
}
@@ -195,6 +185,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base)) {
@@ -206,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
- priv->clk = devm_clk_get(&pdev->dev, "sp");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
/* KEYBOARD */
kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!kb_serio)
@@ -248,7 +236,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
goto err_irq;
}
- priv->dev = &pdev->dev;
device_init_wakeup(priv->dev, 1);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index c62cceb97bb1..5e8d8384aa2a 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
{
struct ps2_gpio_data *drvdata = serio->port_data;
+ flush_delayed_work(&drvdata->tx_work);
disable_irq(drvdata->irq);
}
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index af6027cc7bbf..068dbbc610fc 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06
config TOUCHSCREEN_RASPBERRYPI_FW
tristate "Raspberry Pi's firmware base touch screen support"
- depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST
+ depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
help
Say Y here if you have the official Raspberry Pi 7 inch screen on
your system.
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index f456c1125bd6..69881265d121 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev)
return -ENOMEM;
ts->pdev = pdev;
- ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
- GFP_KERNEL);
+ ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
+ GFP_KERNEL);
if (!ts->fw_regs_va) {
dev_err(dev, "failed to dma_alloc_coherent\n");
return -ENOMEM;
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
new file mode 100644
index 000000000000..07a8276fa35a
--- /dev/null
+++ b/drivers/interconnect/Kconfig
@@ -0,0 +1,15 @@
+menuconfig INTERCONNECT
+ tristate "On-Chip Interconnect management support"
+ help
+ Support for management of the on-chip interconnects.
+
+ This framework is designed to provide a generic interface for
+ managing the interconnects in a SoC.
+
+ If unsure, say no.
+
+if INTERCONNECT
+
+source "drivers/interconnect/qcom/Kconfig"
+
+endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
new file mode 100644
index 000000000000..28f2ab0824d5
--- /dev/null
+++ b/drivers/interconnect/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+icc-core-objs := core.o
+
+obj-$(CONFIG_INTERCONNECT) += icc-core.o
+obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
new file mode 100644
index 000000000000..6005a1c189f6
--- /dev/null
+++ b/drivers/interconnect/core.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework core driver
+ *
+ * Copyright (c) 2017-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+
+static DEFINE_IDR(icc_idr);
+static LIST_HEAD(icc_providers);
+static DEFINE_MUTEX(icc_lock);
+static struct dentry *icc_debugfs_dir;
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+ struct hlist_node req_node;
+ struct icc_node *node;
+ struct device *dev;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+ size_t num_nodes;
+ struct icc_req reqs[];
+};
+
+static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+{
+ if (!n)
+ return;
+
+ seq_printf(s, "%-30s %12u %12u\n",
+ n->name, n->avg_bw, n->peak_bw);
+}
+
+static int icc_summary_show(struct seq_file *s, void *data)
+{
+ struct icc_provider *provider;
+
+ seq_puts(s, " node avg peak\n");
+ seq_puts(s, "--------------------------------------------------------\n");
+
+ mutex_lock(&icc_lock);
+
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ struct icc_req *r;
+
+ icc_summary_show_one(s, n);
+ hlist_for_each_entry(r, &n->req_list, req_node) {
+ if (!r->dev)
+ continue;
+
+ seq_printf(s, " %-26s %12u %12u\n",
+ dev_name(r->dev), r->avg_bw,
+ r->peak_bw);
+ }
+ }
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+
+static int icc_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, icc_summary_show, inode->i_private);
+}
+
+static const struct file_operations icc_summary_fops = {
+ .open = icc_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct icc_node *node_find(const int id)
+{
+ return idr_find(&icc_idr, id);
+}
+
+static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
+ ssize_t num_nodes)
+{
+ struct icc_node *node = dst;
+ struct icc_path *path;
+ int i;
+
+ path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+
+ path->num_nodes = num_nodes;
+
+ for (i = num_nodes - 1; i >= 0; i--) {
+ node->provider->users++;
+ hlist_add_head(&path->reqs[i].req_node, &node->req_list);
+ path->reqs[i].node = node;
+ path->reqs[i].dev = dev;
+ /* reference to previous node was saved during path traversal */
+ node = node->reverse;
+ }
+
+ return path;
+}
+
+static struct icc_path *path_find(struct device *dev, struct icc_node *src,
+ struct icc_node *dst)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *n, *node = NULL;
+ struct list_head traverse_list;
+ struct list_head edge_list;
+ struct list_head visited_list;
+ size_t i, depth = 1;
+ bool found = false;
+
+ INIT_LIST_HEAD(&traverse_list);
+ INIT_LIST_HEAD(&edge_list);
+ INIT_LIST_HEAD(&visited_list);
+
+ list_add(&src->search_list, &traverse_list);
+ src->reverse = NULL;
+
+ do {
+ list_for_each_entry_safe(node, n, &traverse_list, search_list) {
+ if (node == dst) {
+ found = true;
+ list_splice_init(&edge_list, &visited_list);
+ list_splice_init(&traverse_list, &visited_list);
+ break;
+ }
+ for (i = 0; i < node->num_links; i++) {
+ struct icc_node *tmp = node->links[i];
+
+ if (!tmp) {
+ path = ERR_PTR(-ENOENT);
+ goto out;
+ }
+
+ if (tmp->is_traversed)
+ continue;
+
+ tmp->is_traversed = true;
+ tmp->reverse = node;
+ list_add_tail(&tmp->search_list, &edge_list);
+ }
+ }
+
+ if (found)
+ break;
+
+ list_splice_init(&traverse_list, &visited_list);
+ list_splice_init(&edge_list, &traverse_list);
+
+ /* count the hops including the source */
+ depth++;
+
+ } while (!list_empty(&traverse_list));
+
+out:
+
+ /* reset the traversed state */
+ list_for_each_entry_reverse(n, &visited_list, search_list)
+ n->is_traversed = false;
+
+ if (found)
+ path = path_init(dev, dst, depth);
+
+ return path;
+}
+
+/*
+ * We want the path to honor all bandwidth requests, so the average and peak
+ * bandwidth requirements from each consumer are aggregated at each node.
+ * The aggregation is platform specific, so each platform can customize it by
+ * implementing its own aggregate() function.
+ */
+
+static int aggregate_requests(struct icc_node *node)
+{
+ struct icc_provider *p = node->provider;
+ struct icc_req *r;
+
+ node->avg_bw = 0;
+ node->peak_bw = 0;
+
+ hlist_for_each_entry(r, &node->req_list, req_node)
+ p->aggregate(node, r->avg_bw, r->peak_bw,
+ &node->avg_bw, &node->peak_bw);
+
+ return 0;
+}
+
+static int apply_constraints(struct icc_path *path)
+{
+ struct icc_node *next, *prev = NULL;
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ next = path->reqs[i].node;
+
+ /*
+ * Both endpoints should be valid master-slave pairs of the
+ * same interconnect provider that will be configured.
+ */
+ if (!prev || next->provider != prev->provider) {
+ prev = next;
+ continue;
+ }
+
+ /* set the constraints */
+ ret = next->provider->set(prev, next);
+ if (ret)
+ goto out;
+
+ prev = next;
+ }
+out:
+ return ret;
+}
+
+/* of_icc_xlate_onecell() - Translate function using a single index.
+ * @spec: OF phandle args to map into an interconnect node.
+ * @data: private data (pointer to struct icc_onecell_data)
+ *
+ * This is a generic translate function that can be used to model simple
+ * interconnect providers that have one device tree node and provide
+ * multiple interconnect nodes. A single cell is used as an index into
+ * an array of icc nodes specified in the icc_onecell_data struct when
+ * registering the provider.
+ */
+struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+ void *data)
+{
+ struct icc_onecell_data *icc_data = data;
+ unsigned int idx = spec->args[0];
+
+ if (idx >= icc_data->num_nodes) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return icc_data->nodes[idx];
+}
+EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
+
+/**
+ * of_icc_get_from_provider() - Look-up interconnect node
+ * @spec: OF phandle args to use for look-up
+ *
+ * Looks for interconnect provider under the node specified by @spec and if
+ * found, uses xlate function of the provider to map phandle args to node.
+ *
+ * Returns a valid pointer to struct icc_node on success or ERR_PTR()
+ * on failure.
+ */
+static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+{
+ struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
+ struct icc_provider *provider;
+
+ if (!spec || spec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&icc_lock);
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ if (provider->dev->of_node == spec->np)
+ node = provider->xlate(spec, provider->data);
+ if (!IS_ERR(node))
+ break;
+ }
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+
+/**
+ * of_icc_get() - get a path handle from a DT node based on name
+ * @dev: device pointer for the consumer device
+ * @name: interconnect path name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled or the "interconnects" DT property is missing.
+ */
+struct icc_path *of_icc_get(struct device *dev, const char *name)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *src_node, *dst_node;
+ struct device_node *np = NULL;
+ struct of_phandle_args src_args, dst_args;
+ int idx = 0;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ /*
+ * When the consumer DT node do not have "interconnects" property
+ * return a NULL path to skip setting constraints.
+ */
+ if (!of_find_property(np, "interconnects", NULL))
+ return NULL;
+
+ /*
+ * We use a combination of phandle and specifier for endpoint. For now
+ * lets support only global ids and extend this in the future if needed
+ * without breaking DT compatibility.
+ */
+ if (name) {
+ idx = of_property_match_string(np, "interconnect-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2,
+ &src_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(src_args.np);
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2 + 1,
+ &dst_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(dst_args.np);
+
+ src_node = of_icc_get_from_provider(&src_args);
+
+ if (IS_ERR(src_node)) {
+ if (PTR_ERR(src_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding src node: %ld\n",
+ PTR_ERR(src_node));
+ return ERR_CAST(src_node);
+ }
+
+ dst_node = of_icc_get_from_provider(&dst_args);
+
+ if (IS_ERR(dst_node)) {
+ if (PTR_ERR(dst_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding dst node: %ld\n",
+ PTR_ERR(dst_node));
+ return ERR_CAST(dst_node);
+ }
+
+ mutex_lock(&icc_lock);
+ path = path_find(dev, src_node, dst_node);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ mutex_unlock(&icc_lock);
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(of_icc_get);
+
+/**
+ * icc_set_bw() - set bandwidth constraints on an interconnect path
+ * @path: reference to the path returned by icc_get()
+ * @avg_bw: average bandwidth in kilobytes per second
+ * @peak_bw: peak bandwidth in kilobytes per second
+ *
+ * This function is used by an interconnect consumer to express its own needs
+ * in terms of bandwidth for a previously requested path between two endpoints.
+ * The requests are aggregated and each node is updated accordingly. The entire
+ * path is locked by a mutex to ensure that the set() is completed.
+ * The @path can be NULL when the "interconnects" DT properties is missing,
+ * which will mean that no constraints will be set.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise.
+ */
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ struct icc_node *node;
+ u32 old_avg, old_peak;
+ size_t i;
+ int ret;
+
+ if (!path || !path->num_nodes)
+ return 0;
+
+ mutex_lock(&icc_lock);
+
+ old_avg = path->reqs[0].avg_bw;
+ old_peak = path->reqs[0].peak_bw;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+
+ /* update the consumer request for this path */
+ path->reqs[i].avg_bw = avg_bw;
+ path->reqs[i].peak_bw = peak_bw;
+
+ /* aggregate requests for this node */
+ aggregate_requests(node);
+ }
+
+ ret = apply_constraints(path);
+ if (ret) {
+ pr_debug("interconnect: error applying constraints (%d)\n",
+ ret);
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ path->reqs[i].avg_bw = old_avg;
+ path->reqs[i].peak_bw = old_peak;
+ aggregate_requests(node);
+ }
+ apply_constraints(path);
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_set_bw);
+
+/**
+ * icc_get() - return a handle for path between two endpoints
+ * @dev: the device requesting the path
+ * @src_id: source device port id
+ * @dst_id: destination device port id
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release
+ * constraints when they are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
+ * interconnect API is disabled.
+ */
+struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
+{
+ struct icc_node *src, *dst;
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+
+ mutex_lock(&icc_lock);
+
+ src = node_find(src_id);
+ if (!src)
+ goto out;
+
+ dst = node_find(dst_id);
+ if (!dst)
+ goto out;
+
+ path = path_find(dev, src, dst);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+
+out:
+ mutex_unlock(&icc_lock);
+ return path;
+}
+EXPORT_SYMBOL_GPL(icc_get);
+
+/**
+ * icc_put() - release the reference to the icc_path
+ * @path: interconnect path
+ *
+ * Use this function to release the constraints on a path when the path is
+ * no longer needed. The constraints will be re-aggregated.
+ */
+void icc_put(struct icc_path *path)
+{
+ struct icc_node *node;
+ size_t i;
+ int ret;
+
+ if (!path || WARN_ON(IS_ERR(path)))
+ return;
+
+ ret = icc_set_bw(path, 0, 0);
+ if (ret)
+ pr_err("%s: error (%d)\n", __func__, ret);
+
+ mutex_lock(&icc_lock);
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ hlist_del(&path->reqs[i].req_node);
+ if (!WARN_ON(!node->provider->users))
+ node->provider->users--;
+ }
+ mutex_unlock(&icc_lock);
+
+ kfree(path);
+}
+EXPORT_SYMBOL_GPL(icc_put);
+
+static struct icc_node *icc_node_create_nolock(int id)
+{
+ struct icc_node *node;
+
+ /* check if node already exists */
+ node = node_find(id);
+ if (node)
+ return node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
+ if (id < 0) {
+ WARN(1, "%s: couldn't get idr\n", __func__);
+ kfree(node);
+ return ERR_PTR(id);
+ }
+
+ node->id = id;
+
+ return node;
+}
+
+/**
+ * icc_node_create() - create a node
+ * @id: node id
+ *
+ * Return: icc_node pointer on success, or ERR_PTR() on error
+ */
+struct icc_node *icc_node_create(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = icc_node_create_nolock(id);
+
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(icc_node_create);
+
+/**
+ * icc_node_destroy() - destroy a node
+ * @id: node id
+ */
+void icc_node_destroy(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = node_find(id);
+ if (node) {
+ idr_remove(&icc_idr, node->id);
+ WARN_ON(!hlist_empty(&node->req_list));
+ }
+
+ mutex_unlock(&icc_lock);
+
+ kfree(node);
+}
+EXPORT_SYMBOL_GPL(icc_node_destroy);
+
+/**
+ * icc_link_create() - create a link between two nodes
+ * @node: source node id
+ * @dst_id: destination node id
+ *
+ * Create a link between two nodes. The nodes might belong to different
+ * interconnect providers and the @dst_id node might not exist (if the
+ * provider driver has not probed yet). So just create the @dst_id node
+ * and when the actual provider driver is probed, the rest of the node
+ * data is filled.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ struct icc_node *dst;
+ struct icc_node **new;
+ int ret = 0;
+
+ if (!node->provider)
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ dst = node_find(dst_id);
+ if (!dst) {
+ dst = icc_node_create_nolock(dst_id);
+
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out;
+ }
+ }
+
+ new = krealloc(node->links,
+ (node->num_links + 1) * sizeof(*node->links),
+ GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->links = new;
+ node->links[node->num_links++] = dst;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_create);
+
+/**
+ * icc_link_destroy() - destroy a link between two nodes
+ * @src: pointer to source node
+ * @dst: pointer to destination node
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+{
+ struct icc_node **new;
+ size_t slot;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(src))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(dst))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (slot = 0; slot < src->num_links; slot++)
+ if (src->links[slot] == dst)
+ break;
+
+ if (WARN_ON(slot == src->num_links)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ src->links[slot] = src->links[--src->num_links];
+
+ new = krealloc(src->links, src->num_links * sizeof(*src->links),
+ GFP_KERNEL);
+ if (new)
+ src->links = new;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_destroy);
+
+/**
+ * icc_node_add() - add interconnect node to interconnect provider
+ * @node: pointer to the interconnect node
+ * @provider: pointer to the interconnect provider
+ */
+void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+
+ node->provider = provider;
+ list_add_tail(&node->node_list, &provider->nodes);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_add);
+
+/**
+ * icc_node_del() - delete interconnect node from interconnect provider
+ * @node: pointer to the interconnect node
+ */
+void icc_node_del(struct icc_node *node)
+{
+ mutex_lock(&icc_lock);
+
+ list_del(&node->node_list);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_del);
+
+/**
+ * icc_provider_add() - add a new interconnect provider
+ * @provider: the interconnect provider that will be added into topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_add(struct icc_provider *provider)
+{
+ if (WARN_ON(!provider->set))
+ return -EINVAL;
+ if (WARN_ON(!provider->xlate))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ INIT_LIST_HEAD(&provider->nodes);
+ list_add_tail(&provider->provider_list, &icc_providers);
+
+ mutex_unlock(&icc_lock);
+
+ dev_dbg(provider->dev, "interconnect provider added to topology\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_add);
+
+/**
+ * icc_provider_del() - delete previously added interconnect provider
+ * @provider: the interconnect provider that will be removed from topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_del(struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+ if (provider->users) {
+ pr_warn("interconnect provider still has %d users\n",
+ provider->users);
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ if (!list_empty(&provider->nodes)) {
+ pr_warn("interconnect provider still has nodes\n");
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ list_del(&provider->provider_list);
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_del);
+
+static int __init icc_init(void)
+{
+ icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
+ debugfs_create_file("interconnect_summary", 0444,
+ icc_debugfs_dir, NULL, &icc_summary_fops);
+ return 0;
+}
+
+static void __exit icc_exit(void)
+{
+ debugfs_remove_recursive(icc_debugfs_dir);
+}
+module_init(icc_init);
+module_exit(icc_exit);
+
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Interconnect Driver Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
new file mode 100644
index 000000000000..290d330abe5a
--- /dev/null
+++ b/drivers/interconnect/qcom/Kconfig
@@ -0,0 +1,13 @@
+config INTERCONNECT_QCOM
+ bool "Qualcomm Network-on-Chip interconnect drivers"
+ depends on ARCH_QCOM
+ help
+ Support for Qualcomm's Network-on-Chip interconnect hardware.
+
+config INTERCONNECT_QCOM_SDM845
+ tristate "Qualcomm SDM845 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdm845-based
+ platforms.
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
new file mode 100644
index 000000000000..1c1cea690f92
--- /dev/null
+++ b/drivers/interconnect/qcom/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+qnoc-sdm845-objs := sdm845.o
+
+obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
new file mode 100644
index 000000000000..4915b78da673
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <asm/div64.h>
+#include <dt-bindings/interconnect/qcom,sdm845.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#define BCM_TCS_CMD_COMMIT_SHFT 30
+#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
+#define BCM_TCS_CMD_VALID_SHFT 29
+#define BCM_TCS_CMD_VALID_MASK 0x20000000
+#define BCM_TCS_CMD_VOTE_X_SHFT 14
+#define BCM_TCS_CMD_VOTE_MASK 0x3fff
+#define BCM_TCS_CMD_VOTE_Y_SHFT 0
+#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
+
+#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
+ (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
+ ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
+ ((cpu_to_le32(vote_x) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
+ ((cpu_to_le32(vote_y) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct device *dev;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+#define SDM845_MAX_LINKS 43
+#define SDM845_MAX_BCMS 30
+#define SDM845_MAX_BCM_PER_NODE 2
+#define SDM845_MAX_VCD 10
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @channels: num of channels at this node
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw requests
+ * @max_peak: current max aggregate value of all peak bw requests
+ * @bcms: list of bcms associated with this logical node
+ * @num_bcms: num of @bcms
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[SDM845_MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 channels;
+ u16 buswidth;
+ u64 sum_avg;
+ u64 max_peak;
+ struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
+ size_t num_bcms;
+};
+
+/**
+ * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
+ * known as Bus Clock Manager (BCM)
+ * @name: the bcm node name used to fetch BCM data from command db
+ * @type: latency or bandwidth bcm
+ * @addr: address offsets used when voting to RPMH
+ * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
+ * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+ * @dirty: flag used to indicate whether the bcm needs to be committed
+ * @keepalive: flag used to indicate whether a keepalive is required
+ * @aux_data: auxiliary data used when calculating threshold values and
+ * communicating with RPMh
+ * @list: used to link to other bcms when compiling lists for commit
+ * @num_nodes: total number of @num_nodes
+ * @nodes: list of qcom_icc_nodes that this BCM encapsulates
+ */
+struct qcom_icc_bcm {
+ const char *name;
+ u32 type;
+ u32 addr;
+ u64 vote_x;
+ u64 vote_y;
+ bool dirty;
+ bool keepalive;
+ struct bcm_db aux_data;
+ struct list_head list;
+ size_t num_nodes;
+ struct qcom_icc_node *nodes[];
+};
+
+struct qcom_icc_fabric {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \
+ _numlinks, ...) \
+ static struct qcom_icc_node _name = { \
+ .id = _id, \
+ .name = #_name, \
+ .channels = _channels, \
+ .buswidth = _buswidth, \
+ .num_links = _numlinks, \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC);
+DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG);
+DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC);
+DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0);
+DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0);
+DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0);
+DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0);
+DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0);
+DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0);
+DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0);
+DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0);
+DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0);
+DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0);
+DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0);
+DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0);
+DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0);
+DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0);
+DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG);
+DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC);
+DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC);
+DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0);
+DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0);
+DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0);
+DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0);
+DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0);
+DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0);
+DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0);
+DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0);
+DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0);
+
+#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \
+ static struct qcom_icc_bcm _name = { \
+ .name = _bcmname, \
+ .keepalive = _keepalive, \
+ .num_nodes = _numnodes, \
+ .nodes = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io);
+DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie);
+DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3);
+DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic);
+DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc);
+DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc);
+
+static struct qcom_icc_node *rsc_hlos_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_l3,
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_LLCC] = &llcc_mc,
+ [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
+ [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_BLSP_1] = &qhm_qup1,
+ [MASTER_BLSP_2] = &qhm_qup2,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPDM] = &qhm_spdm,
+ [MASTER_TIC] = &qhm_tic,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GNOC_MEM_NOC] = &qnm_apps,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_MDP1] = &qxm_mdp1,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [MASTER_GIC] = &xm_gic,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [SLAVE_EBI1] = &ebi,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_BLSP_2] = &qhs_qupv3_north,
+ [SLAVE_BLSP_1] = &qhs_qupv3_south,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
+ [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
+ [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
+ [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &qxs_pcie,
+ [SLAVE_PCIE_1] = &qxs_pcie_gen3,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_SERVICE_GNOC] = &srvc_gnoc,
+ [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_sh0,
+ &bcm_mm0,
+ &bcm_sh1,
+ &bcm_mm1,
+ &bcm_sh2,
+ &bcm_mm2,
+ &bcm_sh3,
+ &bcm_mm3,
+ &bcm_sh5,
+ &bcm_sn0,
+ &bcm_ce0,
+ &bcm_cn0,
+ &bcm_qup0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn11,
+ &bcm_sn12,
+ &bcm_sn14,
+ &bcm_sn15,
+};
+
+static struct qcom_icc_desc sdm845_rsc_hlos = {
+ .nodes = rsc_hlos_nodes,
+ .num_nodes = ARRAY_SIZE(rsc_hlos_nodes),
+ .bcms = rsc_hlos_bcms,
+ .num_bcms = ARRAY_SIZE(rsc_hlos_bcms),
+};
+
+static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
+{
+ struct qcom_icc_node *qn;
+ const struct bcm_db *data;
+ size_t data_count;
+ int i;
+
+ bcm->addr = cmd_db_read_addr(bcm->name);
+ if (!bcm->addr) {
+ dev_err(dev, "%s could not find RPMh address\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ data = cmd_db_read_aux_data(bcm->name, &data_count);
+ if (IS_ERR(data)) {
+ dev_err(dev, "%s command db read error (%ld)\n",
+ bcm->name, PTR_ERR(data));
+ return PTR_ERR(data);
+ }
+ if (!data_count) {
+ dev_err(dev, "%s command db missing or partial aux data\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm->aux_data.unit = le32_to_cpu(data->unit);
+ bcm->aux_data.width = le16_to_cpu(data->width);
+ bcm->aux_data.vcd = data->vcd;
+ bcm->aux_data.reserved = data->reserved;
+
+ /*
+ * Link Qnodes to their respective BCMs
+ */
+ for (i = 0; i < bcm->num_nodes; i++) {
+ qn = bcm->nodes[i];
+ qn->bcms[qn->num_bcms] = bcm;
+ qn->num_bcms++;
+ }
+
+ return 0;
+}
+
+inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
+ u32 addr, bool commit)
+{
+ bool valid = true;
+
+ if (!cmd)
+ return;
+
+ if (vote_x == 0 && vote_y == 0)
+ valid = false;
+
+ if (vote_x > BCM_TCS_CMD_VOTE_MASK)
+ vote_x = BCM_TCS_CMD_VOTE_MASK;
+
+ if (vote_y > BCM_TCS_CMD_VOTE_MASK)
+ vote_y = BCM_TCS_CMD_VOTE_MASK;
+
+ cmd->addr = addr;
+ cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
+
+ /*
+ * Set the wait for completion flag on command that need to be completed
+ * before the next command.
+ */
+ if (commit)
+ cmd->wait = true;
+}
+
+static void tcs_list_gen(struct list_head *bcm_list,
+ struct tcs_cmd tcs_list[SDM845_MAX_VCD],
+ int n[SDM845_MAX_VCD])
+{
+ struct qcom_icc_bcm *bcm;
+ bool commit;
+ size_t idx = 0, batch = 0, cur_vcd_size = 0;
+
+ memset(n, 0, sizeof(int) * SDM845_MAX_VCD);
+
+ list_for_each_entry(bcm, bcm_list, list) {
+ commit = false;
+ cur_vcd_size++;
+ if ((list_is_last(&bcm->list, bcm_list)) ||
+ bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
+ commit = true;
+ cur_vcd_size = 0;
+ }
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y,
+ bcm->addr, commit);
+ idx++;
+ n[batch]++;
+ /*
+ * Batch the BCMs in such a way that we do not split them in
+ * multiple payloads when they are under the same VCD. This is
+ * to ensure that every BCM is committed since we only set the
+ * commit bit on the last BCM request of every VCD.
+ */
+ if (n[batch] >= MAX_RPMH_PAYLOAD) {
+ if (!commit) {
+ n[batch] -= cur_vcd_size;
+ n[batch + 1] = cur_vcd_size;
+ }
+ batch++;
+ }
+ }
+}
+
+static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+{
+ size_t i;
+ u64 agg_avg = 0;
+ u64 agg_peak = 0;
+ u64 temp;
+
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg = max(agg_avg, temp);
+
+ temp = bcm->nodes[i]->max_peak * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak = max(agg_peak, temp);
+ }
+
+ temp = agg_avg * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x = temp;
+
+ temp = agg_peak * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y = temp;
+
+ if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) {
+ bcm->vote_x = 1;
+ bcm->vote_y = 1;
+ }
+
+ bcm->dirty = false;
+}
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ qn->sum_avg = *agg_avg;
+ qn->max_peak = *agg_peak;
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qn->bcms[i]->dirty = true;
+
+ return 0;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ struct tcs_cmd cmds[SDM845_MAX_BCMS];
+ struct list_head commit_list;
+ int commit_idx[SDM845_MAX_VCD];
+ int ret = 0, i;
+
+ if (!src)
+ node = dst;
+ else
+ node = src;
+
+ qp = to_qcom_provider(node->provider);
+
+ INIT_LIST_HEAD(&commit_list);
+
+ for (i = 0; i < qp->num_bcms; i++) {
+ if (qp->bcms[i]->dirty) {
+ bcm_aggregate(qp->bcms[i]);
+ list_add_tail(&qp->bcms[i]->list, &commit_list);
+ }
+ }
+
+ /*
+ * Construct the command list based on a pre ordered list of BCMs
+ * based on VCD.
+ */
+ tcs_list_gen(&commit_list, cmds, commit_idx);
+
+ if (!commit_idx[0])
+ return ret;
+
+ ret = rpmh_invalidate(qp->dev);
+ if (ret) {
+ pr_err("Error invalidating RPMH client (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE,
+ cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending AMC RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int cmp_vcd(const void *_l, const void *_r)
+{
+ const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l;
+ const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r;
+
+ if (l[0]->aux_data.vcd < r[0]->aux_data.vcd)
+ return -1;
+ else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd)
+ return 0;
+ else
+ return 1;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(&pdev->dev, "registered node %p %s %d\n", node,
+ qnodes[i]->name, node->id);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ /*
+ * Pre sort the BCMs based on VCD for ease of generating a command list
+ * that groups the BCMs with the same VCD together. VCDs are numbered
+ * with lowest being the most expensive time wise, ensuring that
+ * those commands are being sent the earliest in the queue.
+ */
+ sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL);
+
+ platform_set_drvdata(pdev, qp);
+
+ dev_dbg(&pdev->dev, "Registered SDM845 ICC\n");
+
+ return ret;
+err:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sdm845",
+ .of_match_table = qnoc_of_match,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 87ba23a75b38..2a7b78bb98b4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
static void do_detach(struct iommu_dev_data *dev_data)
{
+ struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
- /* decrease reference counters */
- dev_data->domain->dev_iommu[iommu->index] -= 1;
- dev_data->domain->dev_cnt -= 1;
-
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
+
+ /* Flush IOTLB */
+ domain_flush_tlb_pde(domain);
+
+ /* Wait for the flushes to finish */
+ domain_flush_complete(domain);
+
+ /* decrease reference counters - needs to happen after the flushes */
+ domain->dev_iommu[iommu->index] -= 1;
+ domain->dev_cnt -= 1;
}
/*
@@ -2617,13 +2624,13 @@ out_unmap:
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
- if (--mapped_pages)
+ if (--mapped_pages == 0)
goto out_free_iova;
}
}
out_free_iova:
- free_iova_fast(&dma_dom->iovad, address, npages);
+ free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
out_err:
return 0;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc9f14811e0f..9c49300e9fb7 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -39,6 +39,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <linux/numa.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
@@ -144,7 +145,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+ size = sizeof(*info) + level * sizeof(info->path[0]);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
@@ -477,7 +478,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
if (!node_online(node))
- node = -1;
+ node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
}
@@ -1062,7 +1063,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->msagaw = msagaw;
iommu->segment = drhd->segment;
- iommu->node = -1;
+ iommu->node = NUMA_NO_NODE;
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 1bd0cd7168df..05c6bc099d62 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1260,6 +1260,7 @@ static int exynos_iommu_add_device(struct device *dev)
* direct calls to pm_runtime_get/put in this driver.
*/
data->link = device_link_add(dev, data->sysmmu,
+ DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME);
}
iommu_group_put(group);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2bd9ac285c0d..39a33dec4d0b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -47,6 +47,7 @@
#include <linux/dma-contiguous.h>
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
+#include <linux/numa.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -363,7 +364,7 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
-static int intel_iommu_sm = 1;
+static int intel_iommu_sm;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
@@ -456,9 +457,9 @@ static int __init intel_iommu_setup(char *str)
} else if (!strncmp(str, "sp_off", 6)) {
pr_info("Disable supported super page\n");
intel_iommu_superpage = 0;
- } else if (!strncmp(str, "sm_off", 6)) {
- pr_info("Intel-IOMMU: disable scalable mode support\n");
- intel_iommu_sm = 0;
+ } else if (!strncmp(str, "sm_on", 5)) {
+ pr_info("Intel-IOMMU: scalable mode supported\n");
+ intel_iommu_sm = 1;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -1716,7 +1717,7 @@ static struct dmar_domain *alloc_domain(int flags)
return NULL;
memset(domain, 0, sizeof(*domain));
- domain->nid = -1;
+ domain->nid = NUMA_NO_NODE;
domain->flags = flags;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
@@ -5294,7 +5295,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list) {
- if (entry->type == IOMMU_RESV_RESERVED)
+ if (entry->type == IOMMU_RESV_MSI)
kfree(entry);
}
}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 6ede4286b835..7e0df67bd3e9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
spin_lock_init(&dom->pgtlock);
- dom->pgt_va = dma_zalloc_coherent(data->dev,
- M2701_IOMMU_PGT_SIZE,
- &dom->pgt_pa, GFP_KERNEL);
+ dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
+ &dom->pgt_pa, GFP_KERNEL);
if (!dom->pgt_va)
return -ENOMEM;
@@ -442,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev)
iommu_spec.args_count = count;
mtk_iommu_create_mapping(dev, &iommu_spec);
+
+ /* dev->iommu_fwspec might have changed */
+ fwspec = dev_iommu_fwspec_get(dev);
+
of_node_put(iommu_spec.np);
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index d8947b28db2d..f04a6df65eb8 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
* If we have reason to believe the IOMMU driver missed the initial
* probe for dev, replay it to get things in order.
*/
- if (dev->bus && !device_iommu_mapped(dev))
+ if (!err && dev->bus && !device_iommu_mapped(dev))
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c9ba9f377f63..77d4bd93fe4b 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1071,7 +1071,8 @@ static int rk_iommu_add_device(struct device *dev)
iommu_group_put(group);
iommu_device_link(&iommu->iommu, dev);
- data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
+ data->link = device_link_add(dev, iommu->dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
return 0;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3d1e60779078..5438abb1baba 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -129,6 +129,16 @@ config BRCMSTB_L2_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config DAVINCI_AINTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config DAVINCI_CP_INTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config DW_APB_ICTL
bool
select GENERIC_IRQ_CHIP
@@ -406,6 +416,15 @@ config IMX_IRQSTEER
help
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+config LS1X_IRQ
+ bool "Loongson-1 Interrupt Controller"
+ depends on MACH_LOONGSON32
+ default y
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson-1 platform Interrupt Controller.
+
endmenu
config SIFIVE_PLIC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c93713d24b86..85972ae1bd7f 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -6,6 +6,8 @@ obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
+obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
+obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
@@ -94,3 +96,4 @@ obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
+obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 0e65f609352e..83364fedbf0a 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
+ unsigned long flags;
- irq_gc_lock(gc);
+ irq_gc_lock_irqsave(gc, flags);
/* Save the current mask */
b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
@@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
- irq_gc_unlock(gc);
+ irq_gc_unlock_irqrestore(gc, flags);
}
static void brcmstb_l2_intc_resume(struct irq_data *d)
@@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
+ unsigned long flags;
- irq_gc_lock(gc);
+ irq_gc_lock_irqsave(gc, flags);
if (ct->chip.irq_ack) {
/* Clear unmasked non-wakeup interrupts */
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
@@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
/* Restore the saved mask */
irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
- irq_gc_unlock(gc);
+ irq_gc_unlock_irqrestore(gc, flags);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 2543baba8b1f..5a2ec43b7ddd 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -95,7 +95,7 @@ static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr)
/* Setup 64 channel slots */
for (i = 0; i < INTC_IRQS; i += 4)
- writel_relaxed(build_channel_val(i, magic), reg_addr + i);
+ writel(build_channel_val(i, magic), reg_addr + i);
}
static int __init
@@ -135,16 +135,10 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent)
static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
u32 irq_base)
{
- u32 irq;
-
if (hwirq == 0)
return 0;
- while (hwirq) {
- irq = __ffs(hwirq);
- hwirq &= ~BIT(irq);
- handle_domain_irq(root_domain, irq_base + irq, regs);
- }
+ handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
return 1;
}
@@ -154,12 +148,16 @@ static void gx_irq_handler(struct pt_regs *regs)
{
bool ret;
- do {
- ret = handle_irq_perbit(regs,
- readl_relaxed(reg_base + GX_INTC_PEN31_00), 0);
- ret |= handle_irq_perbit(regs,
- readl_relaxed(reg_base + GX_INTC_PEN63_32), 32);
- } while (ret);
+retry:
+ ret = handle_irq_perbit(regs,
+ readl(reg_base + GX_INTC_PEN63_32), 32);
+ if (ret)
+ goto retry;
+
+ ret = handle_irq_perbit(regs,
+ readl(reg_base + GX_INTC_PEN31_00), 0);
+ if (ret)
+ goto retry;
}
static int __init
@@ -174,14 +172,14 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
/*
* Initial enable reg to disable all interrupts
*/
- writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00);
- writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32);
+ writel(0x0, reg_base + GX_INTC_NEN31_00);
+ writel(0x0, reg_base + GX_INTC_NEN63_32);
/*
* Initial mask reg with all unmasked, because we only use enalbe reg
*/
- writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00);
- writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32);
+ writel(0x0, reg_base + GX_INTC_NMASK31_00);
+ writel(0x0, reg_base + GX_INTC_NMASK63_32);
setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE);
@@ -204,20 +202,29 @@ static void ck_irq_handler(struct pt_regs *regs)
void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00;
void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32;
- do {
- /* handle 0 - 31 irqs */
- ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0);
- ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32);
+retry:
+ /* handle 0 - 63 irqs */
+ ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32);
+ if (ret)
+ goto retry;
- if (nr_irq == INTC_IRQS)
- continue;
+ ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0);
+ if (ret)
+ goto retry;
+
+ if (nr_irq == INTC_IRQS)
+ return;
- /* handle 64 - 127 irqs */
- ret |= handle_irq_perbit(regs,
- readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
- ret |= handle_irq_perbit(regs,
- readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
- } while (ret);
+ /* handle 64 - 127 irqs */
+ ret = handle_irq_perbit(regs,
+ readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
+ if (ret)
+ goto retry;
+
+ ret = handle_irq_perbit(regs,
+ readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
+ if (ret)
+ goto retry;
}
static int __init
@@ -230,11 +237,11 @@ ck_intc_init(struct device_node *node, struct device_node *parent)
return ret;
/* Initial enable reg to disable all interrupts */
- writel_relaxed(0, reg_base + CK_INTC_NEN31_00);
- writel_relaxed(0, reg_base + CK_INTC_NEN63_32);
+ writel(0, reg_base + CK_INTC_NEN31_00);
+ writel(0, reg_base + CK_INTC_NEN63_32);
/* Enable irq intc */
- writel_relaxed(BIT(31), reg_base + CK_INTC_ICR);
+ writel(BIT(31), reg_base + CK_INTC_ICR);
ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
@@ -260,8 +267,8 @@ ck_dual_intc_init(struct device_node *node, struct device_node *parent)
return ret;
/* Initial enable reg to disable all interrupts */
- writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
- writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
+ writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
+ writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
diff --git a/drivers/irqchip/irq-davinci-aintc.c b/drivers/irqchip/irq-davinci-aintc.c
new file mode 100644
index 000000000000..810ccc4fe476
--- /dev/null
+++ b/drivers/irqchip/irq-davinci-aintc.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// Copyright (C) 2006, 2019 Texas Instruments.
+//
+// Interrupt handler for DaVinci boards.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/irq-davinci-aintc.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+
+#include <asm/exception.h>
+
+#define DAVINCI_AINTC_FIQ_REG0 0x00
+#define DAVINCI_AINTC_FIQ_REG1 0x04
+#define DAVINCI_AINTC_IRQ_REG0 0x08
+#define DAVINCI_AINTC_IRQ_REG1 0x0c
+#define DAVINCI_AINTC_IRQ_IRQENTRY 0x14
+#define DAVINCI_AINTC_IRQ_ENT_REG0 0x18
+#define DAVINCI_AINTC_IRQ_ENT_REG1 0x1c
+#define DAVINCI_AINTC_IRQ_INCTL_REG 0x20
+#define DAVINCI_AINTC_IRQ_EABASE_REG 0x24
+#define DAVINCI_AINTC_IRQ_INTPRI0_REG 0x30
+#define DAVINCI_AINTC_IRQ_INTPRI7_REG 0x4c
+
+static void __iomem *davinci_aintc_base;
+static struct irq_domain *davinci_aintc_irq_domain;
+
+static inline void davinci_aintc_writel(unsigned long value, int offset)
+{
+ writel_relaxed(value, davinci_aintc_base + offset);
+}
+
+static inline unsigned long davinci_aintc_readl(int offset)
+{
+ return readl_relaxed(davinci_aintc_base + offset);
+}
+
+static __init void
+davinci_aintc_setup_gc(void __iomem *base,
+ unsigned int irq_start, unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_get_domain_generic_chip(davinci_aintc_irq_domain, irq_start);
+ gc->reg_base = base;
+ gc->irq_base = irq_start;
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+
+ ct->regs.ack = DAVINCI_AINTC_IRQ_REG0;
+ ct->regs.mask = DAVINCI_AINTC_IRQ_ENT_REG0;
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+}
+
+static asmlinkage void __exception_irq_entry
+davinci_aintc_handle_irq(struct pt_regs *regs)
+{
+ int irqnr = davinci_aintc_readl(DAVINCI_AINTC_IRQ_IRQENTRY);
+
+ /*
+ * Use the formula for entry vector index generation from section
+ * 8.3.3 of the manual.
+ */
+ irqnr >>= 2;
+ irqnr -= 1;
+
+ handle_domain_irq(davinci_aintc_irq_domain, irqnr, regs);
+}
+
+/* ARM Interrupt Controller Initialization */
+void __init davinci_aintc_init(const struct davinci_aintc_config *config)
+{
+ unsigned int irq_off, reg_off, prio, shift;
+ void __iomem *req;
+ int ret, irq_base;
+ const u8 *prios;
+
+ req = request_mem_region(config->reg.start,
+ resource_size(&config->reg),
+ "davinci-cp-intc");
+ if (!req) {
+ pr_err("%s: register range busy\n", __func__);
+ return;
+ }
+
+ davinci_aintc_base = ioremap(config->reg.start,
+ resource_size(&config->reg));
+ if (!davinci_aintc_base) {
+ pr_err("%s: unable to ioremap register range\n", __func__);
+ return;
+ }
+
+ /* Clear all interrupt requests */
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
+
+ /* Disable all interrupts */
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG0);
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG1);
+
+ /* Interrupts disabled immediately, IRQ entry reflects all */
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_INCTL_REG);
+
+ /* we don't use the hardware vector table, just its entry addresses */
+ davinci_aintc_writel(0, DAVINCI_AINTC_IRQ_EABASE_REG);
+
+ /* Clear all interrupt requests */
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
+
+ prios = config->prios;
+ for (reg_off = DAVINCI_AINTC_IRQ_INTPRI0_REG;
+ reg_off <= DAVINCI_AINTC_IRQ_INTPRI7_REG; reg_off += 4) {
+ for (shift = 0, prio = 0; shift < 32; shift += 4, prios++)
+ prio |= (*prios & 0x07) << shift;
+ davinci_aintc_writel(prio, reg_off);
+ }
+
+ irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ if (irq_base < 0) {
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n",
+ __func__, irq_base);
+ return;
+ }
+
+ davinci_aintc_irq_domain = irq_domain_add_legacy(NULL,
+ config->num_irqs, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!davinci_aintc_irq_domain) {
+ pr_err("%s: unable to create interrupt domain\n", __func__);
+ return;
+ }
+
+ ret = irq_alloc_domain_generic_chips(davinci_aintc_irq_domain, 32, 1,
+ "AINTC", handle_edge_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0, 0);
+ if (ret) {
+ pr_err("%s: unable to allocate generic irq chips for domain\n",
+ __func__);
+ return;
+ }
+
+ for (irq_off = 0, reg_off = 0;
+ irq_off < config->num_irqs;
+ irq_off += 32, reg_off += 0x04)
+ davinci_aintc_setup_gc(davinci_aintc_base + reg_off,
+ irq_base + irq_off, 32);
+
+ set_handle_irq(davinci_aintc_handle_irq);
+}
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
new file mode 100644
index 000000000000..276da2772e7f
--- /dev/null
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Author: Steve Chen <schen@mvista.com>
+// Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+// Copyright (C) 2019, Texas Instruments
+//
+// TI Common Platform Interrupt Controller (cp_intc) driver
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-davinci-cp-intc.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+
+#define DAVINCI_CP_INTC_CTRL 0x04
+#define DAVINCI_CP_INTC_HOST_CTRL 0x0c
+#define DAVINCI_CP_INTC_GLOBAL_ENABLE 0x10
+#define DAVINCI_CP_INTC_SYS_STAT_IDX_CLR 0x24
+#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET 0x28
+#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR 0x2c
+#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET 0x34
+#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR 0x38
+#define DAVINCI_CP_INTC_PRIO_IDX 0x80
+#define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2))
+#define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2))
+#define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2))
+#define DAVINCI_CP_INTC_PRI_INDX_MASK GENMASK(9, 0)
+#define DAVINCI_CP_INTC_GPIR_NONE BIT(31)
+
+static void __iomem *davinci_cp_intc_base;
+static struct irq_domain *davinci_cp_intc_irq_domain;
+
+static inline unsigned int davinci_cp_intc_read(unsigned int offset)
+{
+ return readl_relaxed(davinci_cp_intc_base + offset);
+}
+
+static inline void davinci_cp_intc_write(unsigned long value,
+ unsigned int offset)
+{
+ writel_relaxed(value, davinci_cp_intc_base + offset);
+}
+
+static void davinci_cp_intc_ack_irq(struct irq_data *d)
+{
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_STAT_IDX_CLR);
+}
+
+static void davinci_cp_intc_mask_irq(struct irq_data *d)
+{
+ /* XXX don't know why we need to disable nIRQ here... */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR);
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR);
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+}
+
+static void davinci_cp_intc_unmask_irq(struct irq_data *d)
+{
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET);
+}
+
+static int davinci_cp_intc_set_irq_type(struct irq_data *d,
+ unsigned int flow_type)
+{
+ unsigned int reg, mask, polarity, type;
+
+ reg = BIT_WORD(d->hwirq);
+ mask = BIT_MASK(d->hwirq);
+ polarity = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_POLARITY(reg));
+ type = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_TYPE(reg));
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ polarity |= mask;
+ type |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ polarity &= ~mask;
+ type |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ polarity |= mask;
+ type &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ polarity &= ~mask;
+ type &= ~mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ davinci_cp_intc_write(polarity, DAVINCI_CP_INTC_SYS_POLARITY(reg));
+ davinci_cp_intc_write(type, DAVINCI_CP_INTC_SYS_TYPE(reg));
+
+ return 0;
+}
+
+static struct irq_chip davinci_cp_intc_irq_chip = {
+ .name = "cp_intc",
+ .irq_ack = davinci_cp_intc_ack_irq,
+ .irq_mask = davinci_cp_intc_mask_irq,
+ .irq_unmask = davinci_cp_intc_unmask_irq,
+ .irq_set_type = davinci_cp_intc_set_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static asmlinkage void __exception_irq_entry
+davinci_cp_intc_handle_irq(struct pt_regs *regs)
+{
+ int gpir, irqnr, none;
+
+ /*
+ * The interrupt number is in first ten bits. The NONE field set to 1
+ * indicates a spurious irq.
+ */
+
+ gpir = davinci_cp_intc_read(DAVINCI_CP_INTC_PRIO_IDX);
+ irqnr = gpir & DAVINCI_CP_INTC_PRI_INDX_MASK;
+ none = gpir & DAVINCI_CP_INTC_GPIR_NONE;
+
+ if (unlikely(none)) {
+ pr_err_once("%s: spurious irq!\n", __func__);
+ return;
+ }
+
+ handle_domain_irq(davinci_cp_intc_irq_domain, irqnr, regs);
+}
+
+static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
+
+ irq_set_chip(virq, &davinci_cp_intc_irq_chip);
+ irq_set_probe(virq);
+ irq_set_handler(virq, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
+ .map = davinci_cp_intc_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init
+davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
+ struct device_node *node)
+{
+ unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
+ int offset, irq_base;
+ void __iomem *req;
+
+ req = request_mem_region(config->reg.start,
+ resource_size(&config->reg),
+ "davinci-cp-intc");
+ if (!req) {
+ pr_err("%s: register range busy\n", __func__);
+ return -EBUSY;
+ }
+
+ davinci_cp_intc_base = ioremap(config->reg.start,
+ resource_size(&config->reg));
+ if (!davinci_cp_intc_base) {
+ pr_err("%s: unable to ioremap register range\n", __func__);
+ return -EINVAL;
+ }
+
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_GLOBAL_ENABLE);
+
+ /* Disable all host interrupts */
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_ENABLE(0));
+
+ /* Disable system interrupts */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(~0,
+ DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
+
+ /* Set to normal mode, no nesting, no priority hold */
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_CTRL);
+
+ /* Clear system interrupt status */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(~0,
+ DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
+
+ /* Enable nIRQ (what about nFIQ?) */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+
+ /* Default all priorities to channel 7. */
+ num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(0x07070707,
+ DAVINCI_CP_INTC_CHAN_MAP(offset));
+
+ irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ if (irq_base < 0) {
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n",
+ __func__, irq_base);
+ return irq_base;
+ }
+
+ davinci_cp_intc_irq_domain = irq_domain_add_legacy(
+ node, config->num_irqs, irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops, NULL);
+
+ if (!davinci_cp_intc_irq_domain) {
+ pr_err("%s: unable to create an interrupt domain\n", __func__);
+ return -EINVAL;
+ }
+
+ set_handle_irq(davinci_cp_intc_handle_irq);
+
+ /* Enable global interrupt */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_GLOBAL_ENABLE);
+
+ return 0;
+}
+
+int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
+{
+ return davinci_cp_intc_do_init(config, NULL);
+}
+
+static int __init davinci_cp_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct davinci_cp_intc_config config = { };
+ int ret;
+
+ ret = of_address_to_resource(node, 0, &config.reg);
+ if (ret) {
+ pr_err("%s: unable to get the register range from device-tree\n",
+ __func__);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
+ if (ret) {
+ pr_err("%s: unable to read the 'ti,intc-size' property\n",
+ __func__);
+ return ret;
+ }
+
+ return davinci_cp_intc_do_init(&config, node);
+}
+IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index db20e992a40f..2dd1ff0cf558 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -97,9 +97,14 @@ struct its_device;
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
* list of devices writing to it.
+ *
+ * dev_alloc_lock has to be taken for device allocations, while the
+ * spinlock must be taken to parse data structures such as the device
+ * list.
*/
struct its_node {
raw_spinlock_t lock;
+ struct mutex dev_alloc_lock;
struct list_head entry;
void __iomem *base;
phys_addr_t phys_base;
@@ -156,6 +161,7 @@ struct its_device {
void *itt;
u32 nr_ites;
u32 device_id;
+ bool shared;
};
static struct {
@@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
nr_irqs /= 2;
} while (nr_irqs > 0);
+ if (!nr_irqs)
+ err = -ENOSPC;
+
if (err)
goto out;
@@ -1737,6 +1746,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
u64 type = GITS_BASER_TYPE(val);
u64 baser_phys, tmp;
u32 alloc_pages;
+ struct page *page;
void *base;
retry_alloc_baser:
@@ -1749,10 +1759,11 @@ retry_alloc_baser:
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!base)
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
return -ENOMEM;
+ base = (void *)page_address(page);
baser_phys = virt_to_phys(base);
/* Check if the physical address of the memory is above 48bits */
@@ -1946,6 +1957,8 @@ static int its_alloc_tables(struct its_node *its)
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
its->device_ids);
+ break;
+
case GITS_BASER_TYPE_VCPU:
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
@@ -2059,6 +2072,29 @@ static int __init allocate_lpi_tables(void)
return 0;
}
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+{
+ u32 count = 1000000; /* 1s! */
+ bool clean;
+ u64 val;
+
+ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ val &= ~GICR_VPENDBASER_Valid;
+ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ do {
+ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ clean = !(val & GICR_VPENDBASER_Dirty);
+ if (!clean) {
+ count--;
+ cpu_relax();
+ udelay(1);
+ }
+ } while (!clean && count);
+
+ return val;
+}
+
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
@@ -2144,6 +2180,30 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
+ if (gic_rdists->has_vlpis) {
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+ /*
+ * It's possible for CPU to receive VLPIs before it is
+ * sheduled as a vPE, especially for the first CPU, and the
+ * VLPI with INTID larger than 2^(IDbits+1) will be considered
+ * as out of range and dropped by GIC.
+ * So we initialize IDbits to known value to avoid VLPI drop.
+ */
+ val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+ pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
+ smp_processor_id(), val);
+ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+ /*
+ * Also clear Valid bit of GICR_VPENDBASER, in case some
+ * ancient programming gets left in and has possibility of
+ * corrupting memory.
+ */
+ val = its_clear_vpend_valid(vlpi_base);
+ WARN_ON(val & GICR_VPENDBASER_Dirty);
+ }
+
/* Make sure the GIC has seen the above */
dsb(sy);
out:
@@ -2236,7 +2296,8 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL;
}
-static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
+static bool its_alloc_table_entry(struct its_node *its,
+ struct its_baser *baser, u32 id)
{
struct page *page;
u32 esz, idx;
@@ -2256,7 +2317,8 @@ static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
if (!page)
return false;
@@ -2287,7 +2349,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
if (!baser)
return (ilog2(dev_id) < its->device_ids);
- return its_alloc_table_entry(baser, dev_id);
+ return its_alloc_table_entry(its, baser, dev_id);
}
static bool its_alloc_vpe_table(u32 vpe_id)
@@ -2311,7 +2373,7 @@ static bool its_alloc_vpe_table(u32 vpe_id)
if (!baser)
return false;
- if (!its_alloc_table_entry(baser, vpe_id))
+ if (!its_alloc_table_entry(its, baser, vpe_id))
return false;
}
@@ -2345,7 +2407,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc(sz, GFP_KERNEL);
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -2399,13 +2461,14 @@ static void its_free_device(struct its_device *its_dev)
kfree(its_dev);
}
-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
{
int idx;
- idx = find_first_zero_bit(dev->event_map.lpi_map,
- dev->event_map.nr_lpis);
- if (idx == dev->event_map.nr_lpis)
+ idx = bitmap_find_free_region(dev->event_map.lpi_map,
+ dev->event_map.nr_lpis,
+ get_count_order(nvecs));
+ if (idx < 0)
return -ENOSPC;
*hwirq = dev->event_map.lpi_base + idx;
@@ -2421,6 +2484,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
struct its_device *its_dev;
struct msi_domain_info *msi_info;
u32 dev_id;
+ int err = 0;
/*
* We ignore "dev" entierely, and rely on the dev_id that has
@@ -2443,6 +2507,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
return -EINVAL;
}
+ mutex_lock(&its->dev_alloc_lock);
its_dev = its_find_device(its, dev_id);
if (its_dev) {
/*
@@ -2450,18 +2515,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
* another alias (PCI bridge of some sort). No need to
* create the device.
*/
+ its_dev->shared = true;
pr_debug("Reusing ITT for devID %x\n", dev_id);
goto out;
}
its_dev = its_create_device(its, dev_id, nvec, true);
- if (!its_dev)
- return -ENOMEM;
+ if (!its_dev) {
+ err = -ENOMEM;
+ goto out;
+ }
pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
out:
+ mutex_unlock(&its->dev_alloc_lock);
info->scratchpad[0].ptr = its_dev;
- return 0;
+ return err;
}
static struct msi_domain_ops its_msi_domain_ops = {
@@ -2501,21 +2570,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int err;
int i;
- for (i = 0; i < nr_irqs; i++) {
- err = its_alloc_device_irq(its_dev, &hwirq);
- if (err)
- return err;
+ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
+ if (err)
+ return err;
- err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+ for (i = 0; i < nr_irqs; i++) {
+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
if (err)
return err;
irq_domain_set_hwirq_and_chip(domain, virq + i,
- hwirq, &its_irq_chip, its_dev);
+ hwirq + i, &its_irq_chip, its_dev);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
pr_debug("ID:%d pID:%d vID:%d\n",
- (int)(hwirq - its_dev->event_map.lpi_base),
- (int) hwirq, virq + i);
+ (int)(hwirq + i - its_dev->event_map.lpi_base),
+ (int)(hwirq + i), virq + i);
}
return 0;
@@ -2565,6 +2634,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_node *its = its_dev->its;
int i;
for (i = 0; i < nr_irqs; i++) {
@@ -2579,8 +2649,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_reset_irq_data(data);
}
- /* If all interrupts have been freed, start mopping the floor */
- if (bitmap_empty(its_dev->event_map.lpi_map,
+ mutex_lock(&its->dev_alloc_lock);
+
+ /*
+ * If all interrupts have been freed, start mopping the
+ * floor. This is conditionned on the device not being shared.
+ */
+ if (!its_dev->shared &&
+ bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) {
its_lpi_free(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base,
@@ -2592,6 +2668,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
its_free_device(its_dev);
}
+ mutex_unlock(&its->dev_alloc_lock);
+
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
@@ -2754,26 +2832,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
static void its_vpe_deschedule(struct its_vpe *vpe)
{
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
- u32 count = 1000000; /* 1s! */
- bool clean;
u64 val;
- /* We're being scheduled out */
- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
- val &= ~GICR_VPENDBASER_Valid;
- gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+ val = its_clear_vpend_valid(vlpi_base);
- do {
- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
- clean = !(val & GICR_VPENDBASER_Dirty);
- if (!clean) {
- count--;
- cpu_relax();
- udelay(1);
- }
- } while (!clean && count);
-
- if (unlikely(!clean && !count)) {
+ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
vpe->idai = false;
vpe->pending_last = true;
@@ -3486,6 +3549,7 @@ static int __init its_probe_one(struct resource *res,
void __iomem *its_base;
u32 val, ctlr;
u64 baser, tmp, typer;
+ struct page *page;
int err;
its_base = ioremap(res->start, resource_size(res));
@@ -3516,6 +3580,7 @@ static int __init its_probe_one(struct resource *res,
}
raw_spin_lock_init(&its->lock);
+ mutex_init(&its->dev_alloc_lock);
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER);
@@ -3541,12 +3606,13 @@ static int __init its_probe_one(struct resource *res,
its->numa_node = numa_node;
- its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(ITS_CMD_QUEUE_SZ));
- if (!its->cmd_base) {
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
+ if (!page) {
err = -ENOMEM;
goto out_free_its;
}
+ its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
its->fwnode_handle = handle;
its->get_msi_base = its_irq_get_msi_base;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index ad70e7c416e3..fbfa7ff6deb1 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -24,7 +24,7 @@ struct mbi_range {
unsigned long *bm;
};
-static struct mutex mbi_lock;
+static DEFINE_MUTEX(mbi_lock);
static phys_addr_t mbi_phys_base;
static struct mbi_range *mbi_ranges;
static unsigned int mbi_range_nr;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index b0d4aab1a58c..d000870d9b6b 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -225,14 +225,6 @@ static struct syscore_ops i8259_syscore_ops = {
.shutdown = i8259A_shutdown,
};
-static int __init i8259A_init_sysfs(void)
-{
- register_syscore_ops(&i8259_syscore_ops);
- return 0;
-}
-
-device_initcall(i8259A_init_sysfs);
-
static void init_8259A(int auto_eoi)
{
unsigned long flags;
@@ -332,6 +324,7 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+ register_syscore_ops(&i8259_syscore_ops);
return domain;
}
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 5b3f1d735685..d1098f4da6a4 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -10,10 +10,11 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
-#define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r)
+#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r)
#define CHANCTRL 0x0
#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
@@ -21,12 +22,15 @@
#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
+#define CHAN_MAX_OUTPUT_INT 0x8
+
struct irqsteer_data {
void __iomem *regs;
struct clk *ipg_clk;
- int irq;
+ int irq[CHAN_MAX_OUTPUT_INT];
+ int irq_count;
raw_spinlock_t lock;
- int irq_groups;
+ int reg_num;
int channel;
struct irq_domain *domain;
u32 *saved_reg;
@@ -35,7 +39,7 @@ struct irqsteer_data {
static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
unsigned long irqnum)
{
- return (data->irq_groups * 2 - irqnum / 32 - 1);
+ return (data->reg_num - irqnum / 32 - 1);
}
static void imx_irqsteer_irq_unmask(struct irq_data *d)
@@ -46,9 +50,9 @@ static void imx_irqsteer_irq_unmask(struct irq_data *d)
u32 val;
raw_spin_lock_irqsave(&data->lock, flags);
- val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups));
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
val |= BIT(d->hwirq % 32);
- writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups));
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
raw_spin_unlock_irqrestore(&data->lock, flags);
}
@@ -60,9 +64,9 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
u32 val;
raw_spin_lock_irqsave(&data->lock, flags);
- val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups));
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
val &= ~BIT(d->hwirq % 32);
- writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups));
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
raw_spin_unlock_irqrestore(&data->lock, flags);
}
@@ -87,23 +91,47 @@ static const struct irq_domain_ops imx_irqsteer_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
+static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
+{
+ int i;
+
+ for (i = 0; i < data->irq_count; i++) {
+ if (data->irq[i] == irq)
+ return i * 64;
+ }
+
+ return -EINVAL;
+}
+
static void imx_irqsteer_irq_handler(struct irq_desc *desc)
{
struct irqsteer_data *data = irq_desc_get_handler_data(desc);
- int i;
+ int hwirq;
+ int irq, i;
chained_irq_enter(irq_desc_get_chip(desc), desc);
- for (i = 0; i < data->irq_groups * 64; i += 32) {
- int idx = imx_irqsteer_get_reg_index(data, i);
+ irq = irq_desc_get_irq(desc);
+ hwirq = imx_irqsteer_get_hwirq_base(data, irq);
+ if (hwirq < 0) {
+ pr_warn("%s: unable to get hwirq base for irq %d\n",
+ __func__, irq);
+ return;
+ }
+
+ for (i = 0; i < 2; i++, hwirq += 32) {
+ int idx = imx_irqsteer_get_reg_index(data, hwirq);
unsigned long irqmap;
int pos, virq;
+ if (hwirq >= data->reg_num * 32)
+ break;
+
irqmap = readl_relaxed(data->regs +
- CHANSTATUS(idx, data->irq_groups));
+ CHANSTATUS(idx, data->reg_num));
for_each_set_bit(pos, &irqmap, 32) {
- virq = irq_find_mapping(data->domain, pos + i);
+ virq = irq_find_mapping(data->domain, pos + hwirq);
if (virq)
generic_handle_irq(virq);
}
@@ -117,7 +145,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct irqsteer_data *data;
struct resource *res;
- int ret;
+ u32 irqs_num;
+ int i, ret;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -130,12 +159,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
return PTR_ERR(data->regs);
}
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq <= 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -ENODEV;
- }
-
data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(data->ipg_clk)) {
ret = PTR_ERR(data->ipg_clk);
@@ -146,12 +169,19 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
raw_spin_lock_init(&data->lock);
- of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups);
+ of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
of_property_read_u32(np, "fsl,channel", &data->channel);
+ /*
+ * There is one output irq for each group of 64 inputs.
+ * One register bit map can represent 32 input interrupts.
+ */
+ data->irq_count = DIV_ROUND_UP(irqs_num, 64);
+ data->reg_num = irqs_num / 32;
+
if (IS_ENABLED(CONFIG_PM_SLEEP)) {
data->saved_reg = devm_kzalloc(&pdev->dev,
- sizeof(u32) * data->irq_groups * 2,
+ sizeof(u32) * data->reg_num,
GFP_KERNEL);
if (!data->saved_reg)
return -ENOMEM;
@@ -166,27 +196,48 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
/* steer all IRQs into configured channel */
writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
- data->domain = irq_domain_add_linear(np, data->irq_groups * 64,
+ data->domain = irq_domain_add_linear(np, data->reg_num * 32,
&imx_irqsteer_domain_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
- clk_disable_unprepare(data->ipg_clk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
+ ret = -EINVAL;
+ goto out;
}
- irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler,
- data);
+ for (i = 0; i < data->irq_count; i++) {
+ data->irq[i] = irq_of_parse_and_map(np, i);
+ if (!data->irq[i]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ imx_irqsteer_irq_handler,
+ data);
+ }
platform_set_drvdata(pdev, data);
return 0;
+out:
+ clk_disable_unprepare(data->ipg_clk);
+ return ret;
}
static int imx_irqsteer_remove(struct platform_device *pdev)
{
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < irqsteer_data->irq_count; i++)
+ irq_set_chained_handler_and_data(irqsteer_data->irq[i],
+ NULL, NULL);
- irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL);
irq_domain_remove(irqsteer_data->domain);
clk_disable_unprepare(irqsteer_data->ipg_clk);
@@ -199,9 +250,9 @@ static void imx_irqsteer_save_regs(struct irqsteer_data *data)
{
int i;
- for (i = 0; i < data->irq_groups * 2; i++)
+ for (i = 0; i < data->reg_num; i++)
data->saved_reg[i] = readl_relaxed(data->regs +
- CHANMASK(i, data->irq_groups));
+ CHANMASK(i, data->reg_num));
}
static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
@@ -209,9 +260,9 @@ static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
int i;
writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
- for (i = 0; i < data->irq_groups * 2; i++)
+ for (i = 0; i < data->reg_num; i++)
writel_relaxed(data->saved_reg[i],
- data->regs + CHANMASK(i, data->irq_groups));
+ data->regs + CHANMASK(i, data->reg_num));
}
static int imx_irqsteer_suspend(struct device *dev)
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
new file mode 100644
index 000000000000..86b72fbd3b45
--- /dev/null
+++ b/drivers/irqchip/irq-ls1x.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson-1 platform IRQ support
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define LS_REG_INTC_STATUS 0x00
+#define LS_REG_INTC_EN 0x04
+#define LS_REG_INTC_SET 0x08
+#define LS_REG_INTC_CLR 0x0c
+#define LS_REG_INTC_POL 0x10
+#define LS_REG_INTC_EDGE 0x14
+
+/**
+ * struct ls1x_intc_priv - private ls1x-intc data.
+ * @domain: IRQ domain.
+ * @intc_base: IO Base of intc registers.
+ */
+
+struct ls1x_intc_priv {
+ struct irq_domain *domain;
+ void __iomem *intc_base;
+};
+
+
+static void ls1x_chained_handle_irq(struct irq_desc *desc)
+{
+ struct ls1x_intc_priv *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+ pending = readl(priv->intc_base + LS_REG_INTC_STATUS) &
+ readl(priv->intc_base + LS_REG_INTC_EN);
+
+ if (!pending)
+ spurious_interrupt();
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_irq(irq_find_mapping(priv->domain, bit));
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ls_intc_set_bit(struct irq_chip_generic *gc,
+ unsigned int offset,
+ u32 mask, bool set)
+{
+ if (set)
+ writel(readl(gc->reg_base + offset) | mask,
+ gc->reg_base + offset);
+ else
+ writel(readl(gc->reg_base + offset) & ~mask,
+ gc->reg_base + offset);
+}
+
+static int ls_intc_set_type(struct irq_data *data, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ u32 mask = data->mask;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irqd_set_trigger_type(data, type);
+ return irq_setup_alt_chip(data, type);
+}
+
+
+static int __init ls1x_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ struct ls1x_intc_priv *priv;
+ int parent_irq, err = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->intc_base = of_iomap(node, 0);
+ if (!priv->intc_base) {
+ err = -ENODEV;
+ goto out_free_priv;
+ }
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("ls1x-irq: unable to get parent irq\n");
+ err = -ENODEV;
+ goto out_iounmap;
+ }
+
+ /* Set up an IRQ domain */
+ priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
+ NULL);
+ if (!priv->domain) {
+ pr_err("ls1x-irq: cannot add IRQ domain\n");
+ goto out_iounmap;
+ }
+
+ err = irq_alloc_domain_generic_chips(priv->domain, 32, 2,
+ node->full_name, handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (err) {
+ pr_err("ls1x-irq: unable to register IRQ domain\n");
+ goto out_free_domain;
+ }
+
+ /* Mask all irqs */
+ writel(0x0, priv->intc_base + LS_REG_INTC_EN);
+
+ /* Ack all irqs */
+ writel(0xffffffff, priv->intc_base + LS_REG_INTC_CLR);
+
+ /* Set all irqs to high level triggered */
+ writel(0xffffffff, priv->intc_base + LS_REG_INTC_POL);
+
+ gc = irq_get_domain_generic_chip(priv->domain, 0);
+
+ gc->reg_base = priv->intc_base;
+
+ ct = gc->chip_types;
+ ct[0].type = IRQ_TYPE_LEVEL_MASK;
+ ct[0].regs.mask = LS_REG_INTC_EN;
+ ct[0].regs.ack = LS_REG_INTC_CLR;
+ ct[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ ct[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ ct[0].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[0].chip.irq_set_type = ls_intc_set_type;
+ ct[0].handler = handle_level_irq;
+
+ ct[1].type = IRQ_TYPE_EDGE_BOTH;
+ ct[1].regs.mask = LS_REG_INTC_EN;
+ ct[1].regs.ack = LS_REG_INTC_CLR;
+ ct[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ ct[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ ct[1].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[1].chip.irq_set_type = ls_intc_set_type;
+ ct[1].handler = handle_edge_irq;
+
+ irq_set_chained_handler_and_data(parent_irq,
+ ls1x_chained_handle_irq, priv);
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(priv->domain);
+out_iounmap:
+ iounmap(priv->intc_base);
+out_free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(ls1x_intc, "loongson,ls1x-intc", ls1x_intc_of_init);
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index e9256dee1a45..8b81271c823c 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -16,7 +15,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/irqchip/irq-madera.h>
#include <linux/mfd/madera/core.h>
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25f32e1d7764..3496b61a312a 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -34,6 +34,9 @@
#define SEL_INT_PENDING (1 << 6)
#define SEL_INT_NUM_MASK 0x3f
+#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
+#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
+
struct icu_chip_data {
int nr_irqs;
unsigned int virq_base;
@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
static const struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20,
.conf_disable = 0x0,
- .conf_mask = 0x7f,
+ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
};
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 357e9daf94ae..cf755964f2f8 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -59,62 +59,83 @@ static void __iomem *plic_regs;
struct plic_handler {
bool present;
- int ctxid;
+ void __iomem *hart_base;
+ /*
+ * Protect mask operations on the registers given that we can't
+ * assume atomic memory operations work on them.
+ */
+ raw_spinlock_t enable_lock;
+ void __iomem *enable_base;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void __iomem *plic_hart_offset(int ctxid)
-{
- return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
-}
-
-static inline u32 __iomem *plic_enable_base(int ctxid)
-{
- return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
-}
-
-/*
- * Protect mask operations on the registers given that we can't assume that
- * atomic memory operations work on them.
- */
-static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
-
-static inline void plic_toggle(int ctxid, int hwirq, int enable)
+static inline void plic_toggle(struct plic_handler *handler,
+ int hwirq, int enable)
{
- u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+ u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
u32 hwirq_mask = 1 << (hwirq % 32);
- raw_spin_lock(&plic_toggle_lock);
+ raw_spin_lock(&handler->enable_lock);
if (enable)
writel(readl(reg) | hwirq_mask, reg);
else
writel(readl(reg) & ~hwirq_mask, reg);
- raw_spin_unlock(&plic_toggle_lock);
+ raw_spin_unlock(&handler->enable_lock);
}
-static inline void plic_irq_toggle(struct irq_data *d, int enable)
+static inline void plic_irq_toggle(const struct cpumask *mask,
+ int hwirq, int enable)
{
int cpu;
- writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
- for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
+ writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present)
- plic_toggle(handler->ctxid, d->hwirq, enable);
+ plic_toggle(handler, hwirq, enable);
}
}
static void plic_irq_enable(struct irq_data *d)
{
- plic_irq_toggle(d, 1);
+ unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ cpu_online_mask);
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
+ return;
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
static void plic_irq_disable(struct irq_data *d)
{
- plic_irq_toggle(d, 0);
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
+#ifdef CONFIG_SMP
+static int plic_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ unsigned int cpu;
+
+ if (force)
+ cpu = cpumask_first(mask_val);
+ else
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (!irqd_irq_disabled(d)) {
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
/*
@@ -123,6 +144,9 @@ static struct irq_chip plic_chip = {
*/
.irq_enable = plic_irq_enable,
.irq_disable = plic_irq_disable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+#endif
};
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
@@ -150,7 +174,7 @@ static struct irq_domain *plic_irqdomain;
static void plic_handle_irq(struct pt_regs *regs)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present);
@@ -186,7 +210,7 @@ static int plic_find_hart_id(struct device_node *node)
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
- int error = 0, nr_handlers, nr_mapped = 0, i;
+ int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
if (plic_regs) {
@@ -203,10 +227,10 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!nr_irqs))
goto out_iounmap;
- nr_handlers = of_irq_count(node);
- if (WARN_ON(!nr_handlers))
+ nr_contexts = of_irq_count(node);
+ if (WARN_ON(!nr_contexts))
goto out_iounmap;
- if (WARN_ON(nr_handlers < num_possible_cpus()))
+ if (WARN_ON(nr_contexts < num_possible_cpus()))
goto out_iounmap;
error = -ENOMEM;
@@ -215,7 +239,7 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!plic_irqdomain))
goto out_iounmap;
- for (i = 0; i < nr_handlers; i++) {
+ for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
@@ -237,19 +261,33 @@ static int __init plic_init(struct device_node *node,
}
cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ pr_warn("Invalid cpuid for context %d\n", i);
+ continue;
+ }
+
handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (handler->present) {
+ pr_warn("handler already present for context %d.\n", i);
+ continue;
+ }
+
handler->present = true;
- handler->ctxid = i;
+ handler->hart_base =
+ plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ raw_spin_lock_init(&handler->enable_lock);
+ handler->enable_base =
+ plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
/* priority must be > threshold to trigger an interrupt */
- writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+ writel(0, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
- plic_toggle(i, hwirq, 0);
- nr_mapped++;
+ plic_toggle(handler, hwirq, 0);
+ nr_handlers++;
}
- pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
- nr_irqs, nr_mapped, nr_handlers);
+ pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
+ nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 6edfd4bfa169..a93296b9b45d 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -822,6 +822,7 @@ out_unmap:
static const struct irq_domain_ops stm32_exti_h_domain_ops = {
.alloc = stm32_exti_h_domain_alloc,
.free = irq_domain_free_irqs_common,
+ .xlate = irq_domain_xlate_twocell,
};
static int
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 5385f5768345..27933338f7b3 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
unsigned int mask = 1u << d->hwirq;
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
- XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
- set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
- HW_IRQ_MX_BASE), MIENG);
- } else {
- mask = __this_cpu_read(cached_irq_mask) & ~mask;
- __this_cpu_write(cached_irq_mask, mask);
- xtensa_set_sr(mask, intenable);
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
+
+ if (ext_irq >= HW_IRQ_MX_BASE) {
+ set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
+ return;
+ }
}
+ mask = __this_cpu_read(cached_irq_mask) & ~mask;
+ __this_cpu_write(cached_irq_mask, mask);
+ xtensa_set_sr(mask, intenable);
}
static void xtensa_mx_irq_unmask(struct irq_data *d)
@@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
unsigned int mask = 1u << d->hwirq;
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
- XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
- set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
- HW_IRQ_MX_BASE), MIENGSET);
- } else {
- mask |= __this_cpu_read(cached_irq_mask);
- __this_cpu_write(cached_irq_mask, mask);
- xtensa_set_sr(mask, intenable);
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
+ unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
+
+ if (ext_irq >= HW_IRQ_MX_BASE) {
+ set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
+ return;
+ }
}
+ mask |= __this_cpu_read(cached_irq_mask);
+ __this_cpu_write(cached_irq_mask, mask);
+ xtensa_set_sr(mask, intenable);
}
static void xtensa_mx_irq_enable(struct irq_data *d)
@@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d)
static int xtensa_mx_irq_retrigger(struct irq_data *d)
{
- xtensa_set_sr(1 << d->hwirq, intset);
+ unsigned int mask = 1u << d->hwirq;
+
+ if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
+ return 0;
+ xtensa_set_sr(mask, intset);
return 1;
}
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index c200234dd2c9..ab12328be5ee 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d)
static int xtensa_irq_retrigger(struct irq_data *d)
{
- xtensa_set_sr(1 << d->hwirq, intset);
+ unsigned int mask = 1u << d->hwirq;
+
+ if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
+ return 0;
+ xtensa_set_sr(mask, intset);
return 1;
}
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index ab0b63a4d045..e1de8b1a1001 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -633,7 +633,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
flush_send_queue(cs);
break;
}
- /* Pass through */
+ /* fall through */
default:
/* pass through to underlying serial device */
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4ac378e48902..40ca1e8fa09f 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
int i, j;
for (j = 0; j < AVM_MAXVERSION; j++)
- cinfo->version[j] = "\0\0" + 1;
+ cinfo->version[j] = "";
for (i = 0, j = 0;
j < AVM_MAXVERSION && i < cinfo->versionlen;
j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 6d05946b445e..26e3182bbca8 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -262,8 +262,10 @@ hfcsusb_ph_info(struct hfcsusb *hw)
struct dchannel *dch = &hw->dch;
int i;
- phi = kzalloc(sizeof(struct ph_info) +
- dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
+ phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
+ if (!phi)
+ return;
+
phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags;
phi->dch.state = dch->state;
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 81dd465afcf4..71a8312592d6 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -656,7 +656,7 @@ hfcpci_fill_fifo(struct BCState *bcs)
schedule_event(bcs, B_ACKPENDING);
}
- dev_kfree_skb_any(bcs->tx_skb);
+ dev_consume_skb_any(bcs->tx_skb);
bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
}
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index e932a152c405..d7b011c8d692 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -332,7 +332,7 @@ static int make_raw_data_56k(struct BCState *bcs) {
bitcnt = 0;
}
val >>= 1;
- };
+ }
fcs = PPP_INITFCS;
for (i = 0; i < bcs->tx_skb->len; i++) {
val = bcs->tx_skb->data[i];
@@ -415,7 +415,7 @@ static void read_raw(struct BCState *bcs, u_int *buf, int cnt) {
else { // it's 56K
mask = 0x7f;
bits = 7;
- };
+ }
for (i = 0; i < cnt; i++) {
val = bcs->channel ? ((*p >> 8) & 0xff) : (*p & 0xff);
p++;
@@ -623,7 +623,7 @@ void netjet_fill_dma(struct BCState *bcs)
else { // it's 56k
if (make_raw_data_56k(bcs))
return;
- };
+ }
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs, "tiger fill_dma2: c%d %4lx", bcs->channel,
bcs->Flag);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index 298c8dba0321..6b8c3fbe3965 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -598,7 +598,7 @@ prcalling(char *dest, u_char *p)
dp += prbits(dp, *++p, 8, 8);
*dp++ = '\n';
l--;
- };
+ }
p++;
dp += sprintf(dp, " number digits ");
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 8cd2d8277426..b421b86ca7da 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -512,7 +512,7 @@ static inline const char *ST5481_CMD_string(int evt)
case ST5481_CMD_AR10: return "AR10";
case ST5481_CMD_ARL: return "ARL";
case ST5481_CMD_PDN: return "PDN";
- };
+ }
sprintf(s, "0x%x", evt);
return s;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 6a5b3f00f9ad..74ee00f5b310 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -166,11 +166,9 @@ isdn_wildmat(char *s, char *p)
for (; *p; s++, p++)
switch (*p) {
case '\\':
- /*
- * Literal match with following character,
- * fall through.
- */
+ /* Literal match with following character. */
p++;
+ /* fall through */
default:
if (*s != *p)
return (*s == '\0') ? 2 : 1;
@@ -729,6 +727,7 @@ isdn_status_callback(isdn_ctrl *c)
if (divert_if)
return (divert_if->stat_callback(c));
#endif /* CONFIG_ISDN_DIVERSION */
+ /* fall through */
default:
return -1;
}
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 1b2239c1d569..43700fc19a31 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
modem_info *info = (modem_info *) tty->driver_data;
+ mutex_lock(&modem_info_mutex);
if (!old_termios)
isdn_tty_change_speed(info);
else {
if (tty->termios.c_cflag == old_termios->c_cflag &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
- tty->termios.c_ospeed == old_termios->c_ospeed)
+ tty->termios.c_ospeed == old_termios->c_ospeed) {
+ mutex_unlock(&modem_info_mutex);
return;
+ }
isdn_tty_change_speed(info);
}
+ mutex_unlock(&modem_info_mutex);
}
/*
@@ -3638,7 +3642,7 @@ isdn_tty_edit_at(const char *p, int count, modem_info *info)
break;
} else
m->mdmcmdl = 0;
- /* Fall through, check for 'A' */
+ /* Fall through - check for 'A' */
case 0:
if (c == 'A') {
m->mdmcmd[m->mdmcmdl] = c;
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index 2a5f6668756c..d11fe76f138f 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -354,7 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n");
return line;
}
- /* else: fall through */
+ /* fall through */
case 128:
m[line] = 128; /* leftmost -> set byte to 1000000 */
mbit = 64; /* current bit in the matrix line */
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index a4597e96c916..f4253d468ae1 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -72,7 +72,7 @@ isdnloop_bchan_send(isdnloop_card *card, int ch)
printk(KERN_WARNING "isdnloop: no rcard, skb dropped\n");
dev_kfree_skb(skb);
- };
+ }
cmd.command = ISDN_STAT_BSENT;
cmd.parm.length = len;
card->interface.statcallb(&cmd);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 15d3ca37669a..4ab8b1b6608f 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -103,7 +103,7 @@ mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
static inline void
mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
{
- struct timeval tv;
+ struct __kernel_old_timeval tv;
if (_pms(sk)->cmask & MISDN_TIME_STAMP) {
skb_get_timestamp(skb, &tv);
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 211ed6cffd10..578978711887 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
spin_lock_irqsave(&timer->dev->lock, flags);
if (timer->id >= 0)
list_move_tail(&timer->list, &timer->dev->expired);
- spin_unlock_irqrestore(&timer->dev->lock, flags);
wake_up_interruptible(&timer->dev->wait);
+ spin_unlock_irqrestore(&timer->dev->lock, flags);
}
static int
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index ede4fa0ac2cc..e3da7c03da1b 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -16,7 +16,9 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/rwsem.h>
+#include <linux/slab.h>
#include "leds.h"
DECLARE_RWSEM(leds_list_lock);
@@ -310,6 +312,34 @@ int led_update_brightness(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_update_brightness);
+u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size)
+{
+ struct device_node *np = dev_of_node(led_cdev->dev);
+ u32 *pattern;
+ int count;
+
+ if (!np)
+ return NULL;
+
+ count = of_property_count_u32_elems(np, "led-pattern");
+ if (count < 0)
+ return NULL;
+
+ pattern = kcalloc(count, sizeof(*pattern), GFP_KERNEL);
+ if (!pattern)
+ return NULL;
+
+ if (of_property_read_u32_array(np, "led-pattern", pattern, count)) {
+ kfree(pattern);
+ return NULL;
+ }
+
+ *size = count;
+
+ return pattern;
+}
+EXPORT_SYMBOL_GPL(led_get_default_pattern);
+
/* Caller must ensure led_cdev->led_access held */
void led_sysfs_disable(struct led_classdev *led_cdev)
{
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index a2e74feee2b2..fd64df5a57a5 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ if (ret)
+ return ret;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 3d79a6380761..723f2f17497a 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -201,7 +201,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
if (!fw) {
dev_err(dev, "firmware request failed\n");
- goto out;
+ return;
}
/* handling firmware data is chip dependent */
@@ -214,9 +214,9 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
mutex_unlock(&chip->lock);
-out:
/* firmware should be released for other channel use */
release_firmware(chip->fw);
+ chip->fw = NULL;
}
static int lp55xx_request_firmware(struct lp55xx_chip *chip)
diff --git a/drivers/leds/trigger/ledtrig-oneshot.c b/drivers/leds/trigger/ledtrig-oneshot.c
index 95c9be4b6e7e..8808f0ad7339 100644
--- a/drivers/leds/trigger/ledtrig-oneshot.c
+++ b/drivers/leds/trigger/ledtrig-oneshot.c
@@ -130,6 +130,34 @@ static struct attribute *oneshot_trig_attrs[] = {
};
ATTRIBUTE_GROUPS(oneshot_trig);
+static void pattern_init(struct led_classdev *led_cdev)
+{
+ u32 *pattern;
+ unsigned int size = 0;
+
+ pattern = led_get_default_pattern(led_cdev, &size);
+ if (!pattern)
+ goto out_default;
+
+ if (size != 2) {
+ dev_warn(led_cdev->dev,
+ "Expected 2 but got %u values for delays pattern\n",
+ size);
+ goto out_default;
+ }
+
+ led_cdev->blink_delay_on = pattern[0];
+ led_cdev->blink_delay_off = pattern[1];
+ kfree(pattern);
+
+ return;
+
+out_default:
+ kfree(pattern);
+ led_cdev->blink_delay_on = DEFAULT_DELAY;
+ led_cdev->blink_delay_off = DEFAULT_DELAY;
+}
+
static int oneshot_trig_activate(struct led_classdev *led_cdev)
{
struct oneshot_trig_data *oneshot_data;
@@ -140,8 +168,14 @@ static int oneshot_trig_activate(struct led_classdev *led_cdev)
led_set_trigger_data(led_cdev, oneshot_data);
- led_cdev->blink_delay_on = DEFAULT_DELAY;
- led_cdev->blink_delay_off = DEFAULT_DELAY;
+ if (led_cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
+ pattern_init(led_cdev);
+ /*
+ * Mark as initialized even on pattern_init() error because
+ * any consecutive call to it would produce the same error.
+ */
+ led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
+ }
return 0;
}
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index 1870cf87afe1..718729c89440 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -220,22 +220,10 @@ out:
return count;
}
-static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
- const char *buf, size_t count,
- bool hw_pattern)
+static int pattern_trig_store_patterns_string(struct pattern_trig_data *data,
+ const char *buf, size_t count)
{
- struct pattern_trig_data *data = led_cdev->trigger_data;
- int ccount, cr, offset = 0, err = 0;
-
- mutex_lock(&data->lock);
-
- del_timer_sync(&data->timer);
-
- if (data->is_hw_pattern)
- led_cdev->pattern_clear(led_cdev);
-
- data->is_hw_pattern = hw_pattern;
- data->npatterns = 0;
+ int ccount, cr, offset = 0;
while (offset < count - 1 && data->npatterns < MAX_PATTERNS) {
cr = 0;
@@ -244,14 +232,54 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
&data->patterns[data->npatterns].delta_t, &cr);
if (ccount != 2) {
data->npatterns = 0;
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
offset += cr;
data->npatterns++;
}
+ return 0;
+}
+
+static int pattern_trig_store_patterns_int(struct pattern_trig_data *data,
+ const u32 *buf, size_t count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i += 2) {
+ data->patterns[data->npatterns].brightness = buf[i];
+ data->patterns[data->npatterns].delta_t = buf[i + 1];
+ data->npatterns++;
+ }
+
+ return 0;
+}
+
+static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
+ const char *buf, const u32 *buf_int,
+ size_t count, bool hw_pattern)
+{
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+ int err = 0;
+
+ mutex_lock(&data->lock);
+
+ del_timer_sync(&data->timer);
+
+ if (data->is_hw_pattern)
+ led_cdev->pattern_clear(led_cdev);
+
+ data->is_hw_pattern = hw_pattern;
+ data->npatterns = 0;
+
+ if (buf)
+ err = pattern_trig_store_patterns_string(data, buf, count);
+ else
+ err = pattern_trig_store_patterns_int(data, buf_int, count);
+ if (err)
+ goto out;
+
err = pattern_trig_start_pattern(led_cdev);
if (err)
data->npatterns = 0;
@@ -275,7 +303,7 @@ static ssize_t pattern_store(struct device *dev, struct device_attribute *attr,
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- return pattern_trig_store_patterns(led_cdev, buf, count, false);
+ return pattern_trig_store_patterns(led_cdev, buf, NULL, count, false);
}
static DEVICE_ATTR_RW(pattern);
@@ -295,7 +323,7 @@ static ssize_t hw_pattern_store(struct device *dev,
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- return pattern_trig_store_patterns(led_cdev, buf, count, true);
+ return pattern_trig_store_patterns(led_cdev, buf, NULL, count, true);
}
static DEVICE_ATTR_RW(hw_pattern);
@@ -331,6 +359,30 @@ static const struct attribute_group *pattern_trig_groups[] = {
NULL,
};
+static void pattern_init(struct led_classdev *led_cdev)
+{
+ unsigned int size = 0;
+ u32 *pattern;
+ int err;
+
+ pattern = led_get_default_pattern(led_cdev, &size);
+ if (!pattern)
+ return;
+
+ if (size % 2) {
+ dev_warn(led_cdev->dev, "Expected pattern of tuples\n");
+ goto out;
+ }
+
+ err = pattern_trig_store_patterns(led_cdev, NULL, pattern, size, false);
+ if (err < 0)
+ dev_warn(led_cdev->dev,
+ "Pattern initialization failed with error %d\n", err);
+
+out:
+ kfree(pattern);
+}
+
static int pattern_trig_activate(struct led_classdev *led_cdev)
{
struct pattern_trig_data *data;
@@ -354,6 +406,15 @@ static int pattern_trig_activate(struct led_classdev *led_cdev)
timer_setup(&data->timer, pattern_trig_timer_function, 0);
led_cdev->activated = true;
+ if (led_cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
+ pattern_init(led_cdev);
+ /*
+ * Mark as initialized even on pattern_init() error because
+ * any consecutive call to it would produce the same error.
+ */
+ led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
+ }
+
return 0;
}
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index 7c14983781ee..ca898c1383be 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/ctype.h>
+#include <linux/slab.h>
#include <linux/leds.h>
static ssize_t led_delay_on_show(struct device *dev,
@@ -77,8 +78,41 @@ static struct attribute *timer_trig_attrs[] = {
};
ATTRIBUTE_GROUPS(timer_trig);
+static void pattern_init(struct led_classdev *led_cdev)
+{
+ u32 *pattern;
+ unsigned int size = 0;
+
+ pattern = led_get_default_pattern(led_cdev, &size);
+ if (!pattern)
+ return;
+
+ if (size != 2) {
+ dev_warn(led_cdev->dev,
+ "Expected 2 but got %u values for delays pattern\n",
+ size);
+ goto out;
+ }
+
+ led_cdev->blink_delay_on = pattern[0];
+ led_cdev->blink_delay_off = pattern[1];
+ /* led_blink_set() called by caller */
+
+out:
+ kfree(pattern);
+}
+
static int timer_trig_activate(struct led_classdev *led_cdev)
{
+ if (led_cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
+ pattern_init(led_cdev);
+ /*
+ * Mark as initialized even on pattern_init() error because
+ * any consecutive call to it would produce the same error.
+ */
+ led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
+ }
+
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index bbec6ac0a966..3581abfb0c6a 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -569,6 +569,7 @@ cuda_interrupt(int irq, void *arg)
unsigned char ibuf[16];
int ibuf_len = 0;
int complete = 0;
+ bool full;
spin_lock_irqsave(&cuda_lock, flags);
@@ -656,12 +657,13 @@ idle_state:
break;
case reading:
- if (reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
- : ARRAY_FULL(cuda_rbuf, reply_ptr))
+ full = reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
+ : ARRAY_FULL(cuda_rbuf, reply_ptr);
+ if (full)
(void)in_8(&via[SR]);
else
*reply_ptr++ = in_8(&via[SR]);
- if (!TREQ_asserted(status)) {
+ if (!TREQ_asserted(status) || full) {
if (mcu_is_egret)
assert_TACK();
/* that's all folks */
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index d713271ebf7c..a64116586b4c 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Clear ring flush state */
timeout = 1000; /* timeout of 1s */
- writel_relaxed(0x0, ring + RING_CONTROL);
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
do {
- if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
FLUSH_DONE_MASK))
break;
mdelay(1);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index c6a7d4582dc6..38d9df3fb199 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
return ret;
}
+EXPORT_SYMBOL_GPL(mbox_flush);
/**
* mbox_request_channel - Request a mailbox channel.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0ff22159a0ca..dd538e6b2748 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
if (IS_ERR(bip))
return PTR_ERR(bip);
- tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
+ tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
bip->bip_iter.bi_size = tag_len;
bip->bip_iter.bi_sector = io->cc->start + io->sector;
@@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
* capi:cipher_api_spec-iv:ivopts
*/
tmp = &cipher_in[strlen("capi:")];
- cipher_api = strsep(&tmp, "-");
- *ivmode = strsep(&tmp, ":");
- *ivopts = tmp;
+
+ /* Separate IV options if present, it can contain another '-' in hash name */
+ *ivopts = strrchr(tmp, ':');
+ if (*ivopts) {
+ **ivopts = '\0';
+ (*ivopts)++;
+ }
+ /* Parse IV mode */
+ *ivmode = strrchr(tmp, '-');
+ if (*ivmode) {
+ **ivmode = '\0';
+ (*ivmode)++;
+ }
+ /* The rest is crypto API spec */
+ cipher_api = tmp;
if (*ivmode && !strcmp(*ivmode, "lmk"))
cc->tfms_count = 64;
@@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
goto bad_mem;
chainmode = strsep(&tmp, "-");
- *ivopts = strsep(&tmp, "-");
- *ivmode = strsep(&*ivopts, ":");
-
- if (tmp)
- DMWARN("Ignoring unexpected additional cipher options");
+ *ivmode = strsep(&tmp, ":");
+ *ivopts = tmp;
/*
* For compatibility with the original dm-crypt mapping format, if
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4eb5f8c56535..a20531e5f3b4 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
static void rq_completed(struct mapped_device *md)
{
/* nudge anyone waiting on suspend queue */
- if (unlikely(waitqueue_active(&md->wait)))
+ if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
/*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 20b0776e39ef..ed3caceaed07 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
return r;
}
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
{
int r;
uint32_t ref_count;
@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
down_read(&pmd->root_lock);
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
if (!r)
- *result = (ref_count != 0);
+ *result = (ref_count > 1);
up_read(&pmd->root_lock);
return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 35e954ea20a9..f6be0d733c20 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index dadd9696340c..e83b63608262 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@ struct pool {
spinlock_t lock;
struct bio_list deferred_flush_bios;
+ struct bio_list deferred_flush_completions;
struct list_head prepared_mappings;
struct list_head prepared_discards;
struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
mempool_free(m, &m->tc->pool->mapping_pool);
}
+static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ /*
+ * If the bio has the REQ_FUA flag set we must commit the metadata
+ * before signaling its completion.
+ */
+ if (!bio_triggers_commit(tc, bio)) {
+ bio_endio(bio);
+ return;
+ }
+
+ /*
+ * Complete bio with an error if earlier I/O caused changes to the
+ * metadata that can't be committed, e.g, due to I/O errors on the
+ * metadata device.
+ */
+ if (dm_thin_aborted_changes(tc->td)) {
+ bio_io_error(bio);
+ return;
+ }
+
+ /*
+ * Batch together any bios that trigger commits and then issue a
+ * single commit for them in process_deferred_bios().
+ */
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_add(&pool->deferred_flush_completions, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
if (bio) {
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
- bio_endio(bio);
+ complete_overwrite_bio(tc, bio);
} else {
inc_all_io_entry(tc->pool, m->cell->holder);
remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1048,7 +1082,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
* passdown we have to check that these blocks are now unused.
*/
int r = 0;
- bool used = true;
+ bool shared = true;
struct thin_c *tc = m->tc;
struct pool *pool = tc->pool;
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1092,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
while (b != end) {
/* find start of unmapped run */
for (; b < end; b++) {
- r = dm_pool_block_is_used(pool->pmd, b, &used);
+ r = dm_pool_block_is_shared(pool->pmd, b, &shared);
if (r)
goto out;
- if (!used)
+ if (!shared)
break;
}
@@ -1071,11 +1105,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
/* find end of run */
for (e = b + 1; e != end; e++) {
- r = dm_pool_block_is_used(pool->pmd, e, &used);
+ r = dm_pool_block_is_shared(pool->pmd, e, &shared);
if (r)
goto out;
- if (used)
+ if (shared)
break;
}
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
{
unsigned long flags;
struct bio *bio;
- struct bio_list bios;
+ struct bio_list bios, bio_completions;
struct thin_c *tc;
tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
}
/*
- * If there are any deferred flush bios, we must commit
- * the metadata before issuing them.
+ * If there are any deferred flush bios, we must commit the metadata
+ * before issuing them or signaling their completion.
*/
bio_list_init(&bios);
+ bio_list_init(&bio_completions);
+
spin_lock_irqsave(&pool->lock, flags);
bio_list_merge(&bios, &pool->deferred_flush_bios);
bio_list_init(&pool->deferred_flush_bios);
+
+ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
+ bio_list_init(&pool->deferred_flush_completions);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios) &&
+ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
return;
if (commit(pool)) {
+ bio_list_merge(&bios, &bio_completions);
+
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
return;
}
pool->last_commit_jiffies = jiffies;
+ while ((bio = bio_list_pop(&bio_completions)))
+ bio_endio(bio);
+
while ((bio = bio_list_pop(&bios)))
generic_make_request(bio);
}
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
spin_lock_init(&pool->lock);
bio_list_init(&pool->deferred_flush_bios);
+ bio_list_init(&pool->deferred_flush_completions);
INIT_LIST_HEAD(&pool->prepared_mappings);
INIT_LIST_HEAD(&pool->prepared_discards);
INIT_LIST_HEAD(&pool->prepared_discards_pt2);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d67c95ef8d7e..515e6af9bed2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
true, duration, &io->stats_aux);
/* nudge anyone waiting on suspend queue */
- if (unlikely(waitqueue_active(&md->wait)))
+ if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
}
@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
__bio_clone_fast(clone, bio);
- if (unlikely(bio_integrity(bio) != NULL)) {
+ if (bio_integrity(bio)) {
int r;
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
@@ -1339,7 +1339,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
- if (unlikely(bio_integrity(bio) != NULL))
+ if (bio_integrity(bio))
bio_integrity_trim(clone);
return 0;
@@ -1588,6 +1588,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector = bio->bi_iter.bi_sector;
}
+#define __dm_part_stat_sub(part, field, subnd) \
+ (part_stat_get(part, field) -= (subnd))
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1642,7 +1645,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
+
+ /*
+ * Adjust IO stats for each split, otherwise upon queue
+ * reentry there will be redundant IO accounting.
+ * NOTE: this is a stop-gap fix, a proper fix involves
+ * significant refactoring of DM core's bio splitting
+ * (by eliminating DM's splitting and just using bio_split)
+ */
+ part_stat_lock();
+ __dm_part_stat_sub(&dm_disk(md)->part0,
+ sectors[op_stat_group(bio_op(bio))], ci.sector_count);
+ part_stat_unlock();
+
bio_chain(b, bio);
+ trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
ret = generic_make_request(bio);
break;
}
@@ -1713,6 +1730,15 @@ out:
return ret;
}
+static blk_qc_t dm_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
+ return __process_bio(md, map, bio);
+ else
+ return __split_and_process_bio(md, map, bio);
+}
+
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
@@ -1733,10 +1759,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
return ret;
}
- if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
- ret = __process_bio(md, map, bio);
- else
- ret = __split_and_process_bio(md, map, bio);
+ ret = dm_process_bio(md, map, bio);
dm_put_live_table(md, srcu_idx);
return ret;
@@ -2415,9 +2438,9 @@ static void dm_wq_work(struct work_struct *work)
break;
if (dm_request_based(md))
- generic_make_request(c);
+ (void) generic_make_request(c);
else
- __split_and_process_bio(md, map, c);
+ (void) dm_process_bio(md, map, c);
}
dm_put_live_table(md, srcu_idx);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fd4af4de03b4..05ffffb8b769 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -207,15 +207,10 @@ static bool create_on_open = true;
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
- struct bio *b;
-
if (!mddev || !bioset_initialized(&mddev->bio_set))
return bio_alloc(gfp_mask, nr_iovecs);
- b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
- if (!b)
- return NULL;
- return b;
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1d54109071cc..fa47249fa3e4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
reschedule_retry(r1_bio);
}
+static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
+{
+ sector_t sync_blocks = 0;
+ sector_t s = r1_bio->sector;
+ long sectors_to_go = r1_bio->sectors;
+
+ /* make sure these bits don't get cleared. */
+ do {
+ md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
+ s += sync_blocks;
+ sectors_to_go -= sync_blocks;
+ } while (sectors_to_go > 0);
+}
+
static void end_sync_write(struct bio *bio)
{
int uptodate = !bio->bi_status;
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
if (!uptodate) {
- sector_t sync_blocks = 0;
- sector_t s = r1_bio->sector;
- long sectors_to_go = r1_bio->sectors;
- /* make sure these bits doesn't get cleared. */
- do {
- md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
- s += sync_blocks;
- sectors_to_go -= sync_blocks;
- } while (sectors_to_go > 0);
+ abort_sync_write(mddev, r1_bio);
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
(i == r1_bio->read_disk ||
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
continue;
- if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
+ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
+ abort_sync_write(mddev, r1_bio);
continue;
+ }
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ec3a5ef7fee0..cbbe6b6535be 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@ out:
}
static struct stripe_head *
-r5c_recovery_alloc_stripe(struct r5conf *conf,
- sector_t stripe_sect)
+r5c_recovery_alloc_stripe(
+ struct r5conf *conf,
+ sector_t stripe_sect,
+ int noblock)
{
struct stripe_head *sh;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
if (!sh)
return NULL; /* no more stripe available */
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
stripe_sect);
if (!sh) {
- sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
/*
* cannot get stripe from raid5_get_active_stripe
* try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
r5c_recovery_replay_stripes(
cached_stripe_list, ctx);
sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect);
+ conf, stripe_sect, 1);
}
if (!sh) {
+ int new_size = conf->min_nr_stripes * 2;
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
mdname(mddev),
- conf->min_nr_stripes * 2);
- raid5_set_cache_size(mddev,
- conf->min_nr_stripes * 2);
- sh = r5c_recovery_alloc_stripe(conf,
- stripe_sect);
+ new_size);
+ ret = raid5_set_cache_size(mddev, new_size);
+ if (conf->min_nr_stripes <= new_size / 2) {
+ pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
+ mdname(mddev),
+ ret,
+ new_size,
+ conf->min_nr_stripes,
+ conf->max_nr_stripes);
+ return -ENOMEM;
+ }
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, 0);
}
if (!sh) {
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
- mdname(mddev));
+ mdname(mddev));
return -ENOMEM;
}
list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4990f0319f6c..cecea901ab8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
int
raid5_set_cache_size(struct mddev *mddev, int size)
{
+ int result = 0;
struct r5conf *conf = mddev->private;
if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
- if (!grow_one_stripe(conf, GFP_KERNEL))
+ if (!grow_one_stripe(conf, GFP_KERNEL)) {
+ conf->min_nr_stripes = conf->max_nr_stripes;
+ result = -ENOMEM;
break;
+ }
mutex_unlock(&conf->cache_size_mutex);
- return 0;
+ return result;
}
EXPORT_SYMBOL(raid5_set_cache_size);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 447baaebca44..cdb79ae2d8dc 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
{
struct device *dev = &cio2->pci_dev->dev;
- q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
- GFP_KERNEL);
+ q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
+ GFP_KERNEL);
if (!q->fbpt)
return -ENOMEM;
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 2cc05a9d57ac..a16242a9206f 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -360,13 +360,11 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
ss; ss = ss->next, i++)
sprintf(ss->name, "Camera #%d Audio", i);
- ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL),
G723_PERIOD_BYTES * PERIODS,
G723_PERIOD_BYTES * PERIODS);
- if (ret < 0)
- return ret;
solo_dev->snd_pcm = pcm;
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index a28329698e20..fb0e7573b5ae 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -301,11 +301,12 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
ss; ss = ss->next, i++)
snprintf(ss->name, sizeof(ss->name), "vch%u audio", i);
- return snd_pcm_lib_preallocate_pages_for_all(pcm,
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(dev->pci_dev),
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX);
+ return 0;
}
static void tw686x_audio_dma_free(struct tw686x_dev *dev,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index e80123cba406..060c0ad6243a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
struct device *dev = &ctx->dev->plat_dev->dev;
- mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
if (!mem->va) {
mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
size);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d01821a6906a..89d9c4c21037 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
- cancel_delayed_work_sync(&dev->work_run);
+ if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
+ cancel_delayed_work_sync(&dev->work_run);
+
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 1441a73ce64c..90aad465f9ed 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_window *win;
const struct v4l2_sdr_format *sdr;
const struct v4l2_meta_format *meta;
+ u32 planes;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only)
prt_names(mp->field, v4l2_field_names),
mp->colorspace, mp->num_planes, mp->flags,
mp->ycbcr_enc, mp->quantization, mp->xfer_func);
- for (i = 0; i < mp->num_planes; i++)
+ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
+ for (i = 0; i < planes; i++)
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
mp->plane_fmt[i].bytesperline,
mp->plane_fmt[i].sizeimage);
@@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
@@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
@@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
@@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
+ break;
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
- CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
+ CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
+ bytesperline);
return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8c5dfdce4326..76f9909cf396 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -102,6 +102,7 @@ config MFD_AAT2870_CORE
config MFD_AT91_USART
tristate "AT91 USART Driver"
select MFD_CORE
+ depends on ARCH_AT91 || COMPILE_TEST
help
Select this to get support for AT91 USART IP. This is a wrapper
over at91-usart-serial driver and usart-spi-driver. Only one function
@@ -1418,7 +1419,7 @@ config MFD_TPS65217
config MFD_TPS68470
bool "TI TPS68470 Power Management / LED chips"
- depends on ACPI && I2C=y
+ depends on ACPI && PCI && I2C=y
select MFD_CORE
select REGMAP_I2C
select I2C_DESIGNWARE_PLATFORM
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 12980a4ad460..ee6fb6af655e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_ACT8945A) += act8945a.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
+obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
cros_ec_core-objs := cros_ec.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 30d09d177171..11ab17f64c64 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
mutex_unlock(&ab8500->lock);
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
- return ret;
+ return (ret < 0) ? ret : 0;
}
static int ab8500_get_register(struct device *dev, u8 bank,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index e1450a56fc07..3c97f2c0fdfe 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -641,9 +641,9 @@ static const struct mfd_cell axp221_cells[] = {
static const struct mfd_cell axp223_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp22x_pek_resources),
- .resources = axp22x_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp22x_pek_resources),
+ .resources = axp22x_pek_resources,
}, {
.name = "axp22x-adc",
.of_compatible = "x-powers,axp221-adc",
@@ -651,7 +651,7 @@ static const struct mfd_cell axp223_cells[] = {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp221-battery-power-supply",
}, {
- .name = "axp20x-regulator",
+ .name = "axp20x-regulator",
}, {
.name = "axp20x-ac-power-supply",
.of_compatible = "x-powers,axp221-ac-power-supply",
@@ -667,9 +667,9 @@ static const struct mfd_cell axp223_cells[] = {
static const struct mfd_cell axp152_cells[] = {
{
- .name = "axp20x-pek",
- .num_resources = ARRAY_SIZE(axp152_pek_resources),
- .resources = axp152_pek_resources,
+ .name = "axp20x-pek",
+ .num_resources = ARRAY_SIZE(axp152_pek_resources),
+ .resources = axp152_pek_resources,
},
};
@@ -698,87 +698,101 @@ static const struct resource axp288_charger_resources[] = {
static const struct mfd_cell axp288_cells[] = {
{
- .name = "axp288_adc",
- .num_resources = ARRAY_SIZE(axp288_adc_resources),
- .resources = axp288_adc_resources,
- },
- {
- .name = "axp288_extcon",
- .num_resources = ARRAY_SIZE(axp288_extcon_resources),
- .resources = axp288_extcon_resources,
- },
- {
- .name = "axp288_charger",
- .num_resources = ARRAY_SIZE(axp288_charger_resources),
- .resources = axp288_charger_resources,
- },
- {
- .name = "axp288_fuel_gauge",
- .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
- .resources = axp288_fuel_gauge_resources,
- },
- {
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp288_power_button_resources),
- .resources = axp288_power_button_resources,
- },
- {
- .name = "axp288_pmic_acpi",
+ .name = "axp288_adc",
+ .num_resources = ARRAY_SIZE(axp288_adc_resources),
+ .resources = axp288_adc_resources,
+ }, {
+ .name = "axp288_extcon",
+ .num_resources = ARRAY_SIZE(axp288_extcon_resources),
+ .resources = axp288_extcon_resources,
+ }, {
+ .name = "axp288_charger",
+ .num_resources = ARRAY_SIZE(axp288_charger_resources),
+ .resources = axp288_charger_resources,
+ }, {
+ .name = "axp288_fuel_gauge",
+ .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
+ .resources = axp288_fuel_gauge_resources,
+ }, {
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp288_power_button_resources),
+ .resources = axp288_power_button_resources,
+ }, {
+ .name = "axp288_pmic_acpi",
},
};
static const struct mfd_cell axp803_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
- .resources = axp803_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
+ .resources = axp803_pek_resources,
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
+ }, {
+ .name = "axp813-adc",
+ .of_compatible = "x-powers,axp813-adc",
+ }, {
+ .name = "axp20x-battery-power-supply",
+ .of_compatible = "x-powers,axp813-battery-power-supply",
+ }, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp813-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
+ .resources = axp20x_ac_power_supply_resources,
},
- { .name = "axp20x-regulator" },
+ { .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_self_working_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp806_pek_resources),
- .resources = axp806_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp806_pek_resources),
+ .resources = axp806_pek_resources,
},
- { .name = "axp20x-regulator" },
+ { .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_cells[] = {
{
- .id = 2,
- .name = "axp20x-regulator",
+ .id = 2,
+ .name = "axp20x-regulator",
},
};
static const struct mfd_cell axp809_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp809_pek_resources),
- .resources = axp809_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp809_pek_resources),
+ .resources = axp809_pek_resources,
}, {
- .id = 1,
- .name = "axp20x-regulator",
+ .id = 1,
+ .name = "axp20x-regulator",
},
};
static const struct mfd_cell axp813_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
- .resources = axp803_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
+ .resources = axp803_pek_resources,
}, {
- .name = "axp20x-regulator",
+ .name = "axp20x-regulator",
}, {
- .name = "axp20x-gpio",
- .of_compatible = "x-powers,axp813-gpio",
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}, {
- .name = "axp813-adc",
- .of_compatible = "x-powers,axp813-adc",
+ .name = "axp813-adc",
+ .of_compatible = "x-powers,axp813-adc",
}, {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp813-battery-power-supply",
+ }, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp813-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
+ .resources = axp20x_ac_power_supply_resources,
},
};
diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c
new file mode 100644
index 000000000000..42fe67f1538e
--- /dev/null
+++ b/drivers/mfd/bcm2835-pm.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PM MFD driver for Broadcom BCM2835
+ *
+ * This driver binds to the PM block and creates the MFD device for
+ * the WDT and power drivers.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+static const struct mfd_cell bcm2835_pm_devs[] = {
+ { .name = "bcm2835-wdt" },
+};
+
+static const struct mfd_cell bcm2835_power_devs[] = {
+ { .name = "bcm2835-power" },
+};
+
+static int bcm2835_pm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct bcm2835_pm *pm;
+ int ret;
+
+ pm = devm_kzalloc(dev, sizeof(*pm), GFP_KERNEL);
+ if (!pm)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, pm);
+
+ pm->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pm->base))
+ return PTR_ERR(pm->base);
+
+ ret = devm_mfd_add_devices(dev, -1,
+ bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ /* We'll use the presence of the AXI ASB regs in the
+ * bcm2835-pm binding as the key for whether we can reference
+ * the full PM register range and support power domains.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ pm->asb = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pm->asb))
+ return PTR_ERR(pm->asb);
+
+ ret = devm_mfd_add_devices(dev, -1,
+ bcm2835_power_devs,
+ ARRAY_SIZE(bcm2835_power_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_pm_of_match[] = {
+ { .compatible = "brcm,bcm2835-pm-wdt", },
+ { .compatible = "brcm,bcm2835-pm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match);
+
+static struct platform_driver bcm2835_pm_driver = {
+ .probe = bcm2835_pm_probe,
+ .driver = {
+ .name = "bcm2835-pm",
+ .of_match_table = bcm2835_pm_of_match,
+ },
+};
+module_platform_driver(bcm2835_pm_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 503979c81dae..fab3cdc27ed6 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
};
static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
+ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index b99a194ce5a4..2d0fee488c5a 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
cros_ec_debugfs_remove(ec);
+ mfd_remove_devices(ec->dev);
cdev_del(&ec->cdev);
device_unregister(&ec->class_dev);
return 0;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5970b8def548..aec20e1c7d3d 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
-static __init char *fw_project_name(u32 project)
+static char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
-static void __init init_prcm_registers(void)
+static void init_prcm_registers(void)
{
u32 val;
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index ca829f85672f..2713de989f05 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -82,11 +82,13 @@ static void exynos_lpass_enable(struct exynos_lpass *lpass)
LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK,
- LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
+ LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S |
+ LPASS_INTR_UART);
exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET);
exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET);
exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET);
+ exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET);
}
static void exynos_lpass_disable(struct exynos_lpass *lpass)
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 440030cecbbd..2a77988d0462 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -15,6 +15,7 @@
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -155,7 +156,7 @@ static int madera_wait_for_boot(struct madera *madera)
usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
MADERA_BOOT_POLL_INTERVAL_USEC);
regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
- };
+ }
if (!(val & MADERA_BOOT_DONE_STS1)) {
dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
@@ -357,6 +358,8 @@ int madera_dev_init(struct madera *madera)
dev_set_drvdata(madera->dev, madera);
BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier);
+ mutex_init(&madera->dapm_ptr_lock);
+
madera_set_micbias_info(madera);
/*
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index d8217366ed36..d8ddd1a6f304 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -280,7 +280,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
sprintf(fps_name, "fps%d", fps_id);
- if (!strcmp(fps_np->name, fps_name))
+ if (of_node_name_eq(fps_np, fps_name))
break;
}
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index f475e848252f..d0bf50e3568d 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ if (ret)
+ goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
MC13XXX_ADC0_CHRGRAWDIV;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 77b64bd64df3..ab24e176ef44 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "unsupported chip: %d\n", id);
- ret = -ENODEV;
- break;
+ return -ENODEV;
}
if (ret) {
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 52fafea06067..8d420c37b2a6 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
return -EFAULT;
}
+ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
+ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
+ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
+
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
fw_version[1],
fw_version[2]);
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 2a8369657e38..26c7b63e008a 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -109,7 +109,7 @@ struct rave_sp_reply {
/**
* struct rave_sp_checksum - Variant specific checksum implementation details
*
- * @length: Caculated checksum length
+ * @length: Calculated checksum length
* @subroutine: Utilized checksum algorithm implementation
*/
struct rave_sp_checksum {
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 566caca4efd8..7569a4be0608 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1302,17 +1302,17 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
for_each_child_of_node(np, child) {
- if (!strcmp(child->name, "stmpe_gpio")) {
+ if (of_node_name_eq(child, "stmpe_gpio")) {
pdata->blocks |= STMPE_BLOCK_GPIO;
- } else if (!strcmp(child->name, "stmpe_keypad")) {
+ } else if (of_node_name_eq(child, "stmpe_keypad")) {
pdata->blocks |= STMPE_BLOCK_KEYPAD;
- } else if (!strcmp(child->name, "stmpe_touchscreen")) {
+ } else if (of_node_name_eq(child, "stmpe_touchscreen")) {
pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
- } else if (!strcmp(child->name, "stmpe_adc")) {
+ } else if (of_node_name_eq(child, "stmpe_adc")) {
pdata->blocks |= STMPE_BLOCK_ADC;
- } else if (!strcmp(child->name, "stmpe_pwm")) {
+ } else if (of_node_name_eq(child, "stmpe_pwm")) {
pdata->blocks |= STMPE_BLOCK_PWM;
- } else if (!strcmp(child->name, "stmpe_rotator")) {
+ } else if (of_node_name_eq(child, "stmpe_rotator")) {
pdata->blocks |= STMPE_BLOCK_ROTATOR;
}
}
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c2d47d78705b..fd111296b959 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
cell->pdata_size = sizeof(tscadc);
}
- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
- tscadc->used_cells, NULL, 0, NULL);
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ tscadc->cells, tscadc->used_cells, NULL,
+ 0, NULL);
if (err < 0)
goto err_disable_clk;
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 910f569ff77c..8bcdecf494d0 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
mutex_init(&tps->tps_lock);
- ret = regmap_add_irq_chip(tps->regmap, tps->irq,
- IRQF_ONESHOT, 0, &tps65218_irq_chip,
- &tps->irq_data);
+ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
+ IRQF_ONESHOT, 0, &tps65218_irq_chip,
+ &tps->irq_data);
if (ret < 0)
return ret;
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
ARRAY_SIZE(tps65218_cells), NULL, 0,
regmap_irq_get_domain(tps->irq_data));
- if (ret < 0)
- goto err_irq;
-
- return 0;
-
-err_irq:
- regmap_del_irq_chip(tps->irq, tps->irq_data);
-
return ret;
}
-static int tps65218_remove(struct i2c_client *client)
-{
- struct tps65218 *tps = i2c_get_clientdata(client);
-
- regmap_del_irq_chip(tps->irq, tps->irq_data);
-
- return 0;
-}
-
static const struct i2c_device_id tps65218_id_table[] = {
{ "tps65218", TPS65218 },
{ },
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
.of_match_table = of_tps65218_match_table,
},
.probe = tps65218_probe,
- .remove = tps65218_remove,
.id_table = tps65218_id_table,
};
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b89379782741..9c7925ca13cf 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ disable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ enable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
+ tps6586x_i2c_resume);
+
static const struct i2c_device_id tps6586x_id_table[] = {
{ "tps6586x", 0 },
{ },
@@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
.driver = {
.name = "tps6586x",
.of_match_table = of_match_ptr(tps6586x_of_match),
+ .pm = &tps6586x_pm_ops,
},
.probe = tps6586x_i2c_probe,
.remove = tps6586x_i2c_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4be3d239da9e..299016bc46d9 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* letting it generate the right frequencies for USB, MADC, and
* other purposes.
*/
-static inline int __init protect_pm_master(void)
+static inline int protect_pm_master(void)
{
int e = 0;
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
return e;
}
-static inline int __init unprotect_pm_master(void)
+static inline int unprotect_pm_master(void)
{
int e = 0;
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 1ee68bd440fb..16c6e2accfaa 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ASRC_ENABLE:
case ARIZONA_ASRC_STATUS:
case ARIZONA_ASRC_RATE1:
+ case ARIZONA_ASRC_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f417b06e11c5..42ab8ec92a04 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -295,6 +295,17 @@ config QCOM_COINCELL
to maintain PMIC register and RTC state in the absence of
external power.
+config QCOM_FASTRPC
+ tristate "Qualcomm FastRPC"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ select DMA_SHARED_BUFFER
+ help
+ Provides a communication mechanism that allows for clients to
+ make remote method invocations across processor boundary to
+ applications DSP processor. Say M if you want to enable this
+ module.
+
config SGI_GRU
tristate "SGI GRU driver"
depends on X86_UV && SMP
@@ -535,4 +546,5 @@ source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/cardreader/Kconfig"
+source "drivers/misc/habanalabs/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e39ccbbc1b3a..d5b7d3404dc7 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
+obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
@@ -59,3 +60,4 @@ obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic.o
+obj-$(CONFIG_HABANA_AI) += habanalabs/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index a0afadefcc49..1f6d008e0036 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -202,22 +202,20 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
- dpot_write_r8d8(dpot,
+ dpot_write_r8d8(dpot,
(DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
- value = dpot_read_r8d16(dpot,
- DPOT_AD5270_1_2_4_RDAC << 2);
-
- if (value < 0)
- return value;
- /*
- * AD5272/AD5274 returns high byte first, however
- * underling smbus expects low byte first.
- */
- value = swab16(value);
+ value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2);
+ if (value < 0)
+ return value;
+ /*
+ * AD5272/AD5274 returns high byte first, however
+ * underling smbus expects low byte first.
+ */
+ value = swab16(value);
- if (dpot->uid == DPOT_UID(AD5274_ID))
- value = value >> 2;
+ if (dpot->uid == DPOT_UID(AD5274_ID))
+ value = value >> 2;
return value;
default:
if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 024dcba8d6c8..5c98e2221889 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -170,35 +170,46 @@ static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
+ if (pcr->option.ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_PARTIAL_POWER_ON);
+
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x02);
+
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
/* To avoid too large in-rush current */
- udelay(150);
-
+ msleep(20);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_POWER_ON);
+
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x06);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
+ SD_OUTPUT_EN, SD_OUTPUT_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
+ MS_OUTPUT_EN, MS_OUTPUT_EN);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
{
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
- SD_POWER_MASK | PMOS_STRG_MASK,
- SD_POWER_OFF | PMOS_STRG_400mA);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, 0X00);
- return rtsx_pci_send_cmd(pcr, 100);
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK |
+ PMOS_STRG_MASK, SD_POWER_OFF | PMOS_STRG_400mA);
+ rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0X00);
+
+ return 0;
}
static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
@@ -348,6 +359,32 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
+static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x57E4);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
+ if (err < 0)
+ return err;
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x54A4);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rts5227_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+
/* rts522a operations mainly derived from rts5227, except phy/hw init setting.
*/
static const struct pcr_ops rts522a_pcr_ops = {
@@ -360,7 +397,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
.disable_auto_blink = rts5227_disable_auto_blink,
.card_power_on = rts5227_card_power_on,
.card_power_off = rts5227_card_power_off,
- .switch_output_voltage = rts5227_switch_output_voltage,
+ .switch_output_voltage = rts522a_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
.force_power_down = rts5227_force_power_down,
@@ -371,4 +408,11 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
rts5227_init_params(pcr);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
+
+ pcr->option.ocp_en = 1;
+ if (pcr->option.ocp_en)
+ pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
+ pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
+ pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800;
+
}
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index dbe013abdb83..0f72a7e0fdab 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -284,6 +284,10 @@ static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
@@ -306,12 +310,15 @@ static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card)
{
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
- SD_POWER_MASK, SD_POWER_OFF);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
- LDO3318_PWR_MASK, 0x00);
- return rtsx_pci_send_cmd(pcr, 100);
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK, SD_POWER_OFF);
+
+ rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x00);
+ return 0;
}
static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
@@ -629,6 +636,13 @@ void rts524a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts524a_pcr_ops;
+
+ pcr->option.ocp_en = 1;
+ if (pcr->option.ocp_en)
+ pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
+ pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
+ pcr->option.sd_800mA_ocp_thd = RTS524A_OCP_THD_800;
+
}
static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card)
@@ -737,4 +751,10 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
+
+ pcr->option.ocp_en = 1;
+ if (pcr->option.ocp_en)
+ pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
+ pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
+ pcr->option.sd_800mA_ocp_thd = RTS525A_OCP_THD_800;
}
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index a493b01c5bc6..da22bcb62b04 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -64,11 +64,13 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
drive_sel = pcr->sd30_drive_sel_1v8;
}
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
0xFF, driving[drive_sel][0]);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
@@ -193,7 +195,7 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
| SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
- CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
return 0;
@@ -207,22 +209,16 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
if (option->ocp_en)
rtsx_pci_enable_ocp(pcr);
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
- DV331812_VDD1, DV331812_VDD1);
- err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
- if (err < 0)
- return err;
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+ rtsx_pci_write_register(pcr, LDO_CONFIG2, DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
- LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
- DV331812_POWERON, DV331812_POWERON);
- err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_POW_SDVDD1_MASK,
+ LDO_POW_SDVDD1_ON);
+
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWERON);
msleep(20);
if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
@@ -242,8 +238,8 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
/* Reset SD_CFG3 register */
rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
- SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
- SD30_CLK_STOP_CFG0, 0);
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
@@ -273,9 +269,9 @@ static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
}
/* set pad drive */
- rtsx_pci_init_cmd(pcr);
rts5260_fill_driving(pcr, voltage);
- return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ return 0;
}
static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
@@ -290,13 +286,9 @@ static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
{
- struct rtsx_cr_option *option = &pcr->option;
-
rts5260_stop_cmd(pcr);
rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
- if (option->ocp_en)
- rtsx_pci_disable_ocp(pcr);
}
static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
@@ -304,13 +296,12 @@ static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
int err = 0;
rts5260_card_before_power_off(pcr);
-
- rtsx_pci_init_cmd(pcr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ err = rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ err = rtsx_pci_write_register(pcr, LDO_CONFIG2,
DV331812_POWERON, DV331812_POWEROFF);
- err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
return err;
}
@@ -322,41 +313,29 @@ static void rts5260_init_ocp(struct rtsx_pcr *pcr)
if (option->ocp_en) {
u8 mask, val;
- rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
- RTS5260_DVCC_OCP_EN |
- RTS5260_DVCC_OCP_CL_EN,
- RTS5260_DVCC_OCP_EN |
- RTS5260_DVCC_OCP_CL_EN);
- rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
- RTS5260_DVIO_OCP_EN |
- RTS5260_DVIO_OCP_CL_EN,
- RTS5260_DVIO_OCP_EN |
- RTS5260_DVIO_OCP_CL_EN);
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
- RTS5260_DVCC_OCP_THD_MASK,
- option->sd_400mA_ocp_thd);
-
- rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
- RTS5260_DVIO_OCP_THD_MASK,
- RTS5260_DVIO_OCP_THD_350);
+ RTS5260_DVCC_OCP_THD_MASK,
+ option->sd_800mA_ocp_thd);
rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
- RTS5260_DV331812_OCP_THD_MASK,
- RTS5260_DV331812_OCP_THD_210);
+ RTS5260_DV331812_OCP_THD_MASK,
+ RTS5260_DV331812_OCP_THD_270);
- mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ mask = SD_OCP_GLITCH_MASK;
val = pcr->hw_param.ocp_glitch;
rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
rtsx_pci_enable_ocp(pcr);
} else {
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN, 0);
- rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
- RTS5260_DVIO_OCP_EN |
- RTS5260_DVIO_OCP_CL_EN, 0);
}
}
@@ -364,14 +343,9 @@ static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = 0;
- rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
-
val = SD_OCP_INT_EN | SD_DETECT_EN;
- val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
- rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
- DV3318_DETECT_EN | DV3318_OCP_INT_EN,
- DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+
}
static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
@@ -379,15 +353,11 @@ static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
u8 mask = 0;
mask = SD_OCP_INT_EN | SD_DETECT_EN;
- mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
- rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
- DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
- rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
- OC_POWER_DOWN);
}
+
static int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
{
return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
@@ -404,9 +374,7 @@ static void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
u8 val = 0;
mask = SD_OCP_INT_CLR | SD_OC_CLR;
- mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
val = SD_OCP_INT_CLR | SD_OC_CLR;
- val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
@@ -425,36 +393,22 @@ static void rts5260_process_ocp(struct rtsx_pcr *pcr)
rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
- if (pcr->card_exist & SD_EXIST)
- rtsx_sd_power_off_card3v3(pcr);
- else if (pcr->card_exist & MS_EXIST)
- rtsx_ms_power_off_card3v3(pcr);
-
- if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
- if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
- SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
- (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
- rtsx_pci_clear_ocpstat(pcr);
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rtsx_pci_clear_ocpstat(pcr);
pcr->ocp_stat = 0;
pcr->ocp_stat2 = 0;
}
- if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
- SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
- (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
- if (pcr->card_exist & SD_EXIST)
- rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
- else if (pcr->card_exist & MS_EXIST)
- rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
- }
}
static int rts5260_init_hw(struct rtsx_pcr *pcr)
{
int err;
- rtsx_pci_init_ocp(pcr);
-
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
@@ -483,6 +437,8 @@ static int rts5260_init_hw(struct rtsx_pcr *pcr)
if (err < 0)
return err;
+ rtsx_pci_init_ocp(pcr);
+
return 0;
}
@@ -499,7 +455,13 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
pcr_dbg(pcr, "Set parameters for L1.2.");
rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
0xFF, PCIE_L1_2_EN);
- rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
+
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
0xFF, PCIE_L1_2_PD_FE_EN);
} else if (lss_l1_1) {
pcr_dbg(pcr, "Set parameters for L1.1.");
@@ -742,7 +704,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
option->ocp_en = 1;
if (option->ocp_en)
hw_param->interrupt_en |= SD_OC_INT_EN;
- hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+ hw_param->ocp_glitch = SDVIO_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
}
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index da445223f4cc..0d320e0ab4c9 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -703,7 +703,10 @@ EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
{
- pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
+ | hw_param->interrupt_en;
if (pcr->num_slots > 1)
pcr->bier |= MS_INT_EN;
@@ -969,8 +972,19 @@ static void rtsx_pci_card_detect(struct work_struct *work)
static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
{
- if (pcr->ops->process_ocp)
+ if (pcr->ops->process_ocp) {
pcr->ops->process_ocp(pcr);
+ } else {
+ if (!pcr->option.ocp_en)
+ return;
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rtsx_pci_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ }
+ }
}
static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
@@ -1039,7 +1053,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
}
- if (pcr->card_inserted || pcr->card_removed)
+ if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
schedule_delayed_work(&pcr->carddet_work,
msecs_to_jiffies(200));
@@ -1144,10 +1158,12 @@ void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
- if (pcr->ops->enable_ocp)
+ if (pcr->ops->enable_ocp) {
pcr->ops->enable_ocp(pcr);
- else
+ } else {
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ }
}
@@ -1155,10 +1171,13 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
{
u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
- if (pcr->ops->disable_ocp)
+ if (pcr->ops->disable_ocp) {
pcr->ops->disable_ocp(pcr);
- else
+ } else {
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+ }
}
void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
@@ -1169,7 +1188,7 @@ void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
struct rtsx_cr_option *option = &(pcr->option);
if (option->ocp_en) {
- u8 val = option->sd_400mA_ocp_thd;
+ u8 val = option->sd_800mA_ocp_thd;
rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
rtsx_pci_write_register(pcr, REG_OCPPARA1,
@@ -1204,6 +1223,7 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ udelay(100);
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
}
}
@@ -1213,7 +1233,6 @@ int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
MS_CLK_EN | SD40_CLK_EN, 0);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
-
rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
msleep(50);
@@ -1313,6 +1332,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
break;
}
+ /*init ocp*/
+ rtsx_pci_init_ocp(pcr);
+
/* Enable clk_request_n to enable clock power management */
rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
/* Enter L1 when host tx idle */
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 6ea1655db0bb..300fc31d8e67 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -46,6 +46,11 @@
#define SSC_CLOCK_STABLE_WAIT 130
+#define RTS524A_OCP_THD_800 0x04
+#define RTS525A_OCP_THD_800 0x05
+#define RTS522A_OCP_THD_800 0x06
+
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 5a17bfeb80d3..74d4fda6c4a7 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -125,9 +125,7 @@ enclosure_register(struct device *dev, const char *name, int components,
struct enclosure_component_callbacks *cb)
{
struct enclosure_device *edev =
- kzalloc(sizeof(struct enclosure_device) +
- sizeof(struct enclosure_component)*components,
- GFP_KERNEL);
+ kzalloc(struct_size(edev, component, components), GFP_KERNEL);
int err, i;
BUG_ON(!cb);
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
new file mode 100644
index 000000000000..39f832d27288
--- /dev/null
+++ b/drivers/misc/fastrpc.c
@@ -0,0 +1,1401 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rpmsg.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <uapi/misc/fastrpc.h>
+
+#define ADSP_DOMAIN_ID (0)
+#define MDSP_DOMAIN_ID (1)
+#define SDSP_DOMAIN_ID (2)
+#define CDSP_DOMAIN_ID (3)
+#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
+#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
+#define FASTRPC_ALIGN 128
+#define FASTRPC_MAX_FDLIST 16
+#define FASTRPC_MAX_CRCLIST 64
+#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
+#define FASTRPC_CTX_MAX (256)
+#define FASTRPC_INIT_HANDLE 1
+#define FASTRPC_CTXID_MASK (0xFF0)
+#define INIT_FILELEN_MAX (2 * 1024 * 1024)
+#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
+#define FASTRPC_DEVICE_NAME "fastrpc"
+
+/* Retrives number of input buffers from the scalars parameter */
+#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
+
+/* Retrives number of output buffers from the scalars parameter */
+#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
+
+/* Retrives number of input handles from the scalars parameter */
+#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
+
+/* Retrives number of output handles from the scalars parameter */
+#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
+
+#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
+ REMOTE_SCALARS_OUTBUFS(sc) + \
+ REMOTE_SCALARS_INHANDLES(sc)+ \
+ REMOTE_SCALARS_OUTHANDLES(sc))
+#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
+ (((attr & 0x07) << 29) | \
+ ((method & 0x1f) << 24) | \
+ ((in & 0xff) << 16) | \
+ ((out & 0xff) << 8) | \
+ ((oin & 0x0f) << 4) | \
+ (oout & 0x0f))
+
+#define FASTRPC_SCALARS(method, in, out) \
+ FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
+
+#define FASTRPC_CREATE_PROCESS_NARGS 6
+/* Remote Method id table */
+#define FASTRPC_RMID_INIT_ATTACH 0
+#define FASTRPC_RMID_INIT_RELEASE 1
+#define FASTRPC_RMID_INIT_CREATE 6
+#define FASTRPC_RMID_INIT_CREATE_ATTR 7
+#define FASTRPC_RMID_INIT_CREATE_STATIC 8
+
+#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
+
+static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
+ "sdsp", "cdsp"};
+struct fastrpc_phy_page {
+ u64 addr; /* physical address */
+ u64 size; /* size of contiguous region */
+};
+
+struct fastrpc_invoke_buf {
+ u32 num; /* number of contiguous regions */
+ u32 pgidx; /* index to start of contiguous region */
+};
+
+struct fastrpc_remote_arg {
+ u64 pv;
+ u64 len;
+};
+
+struct fastrpc_msg {
+ int pid; /* process group id */
+ int tid; /* thread id */
+ u64 ctx; /* invoke caller context */
+ u32 handle; /* handle to invoke */
+ u32 sc; /* scalars structure describing the data */
+ u64 addr; /* physical address */
+ u64 size; /* size of contiguous region */
+};
+
+struct fastrpc_invoke_rsp {
+ u64 ctx; /* invoke caller context */
+ int retval; /* invoke return value */
+};
+
+struct fastrpc_buf {
+ struct fastrpc_user *fl;
+ struct dma_buf *dmabuf;
+ struct device *dev;
+ void *virt;
+ u64 phys;
+ u64 size;
+ /* Lock for dma buf attachments */
+ struct mutex lock;
+ struct list_head attachments;
+};
+
+struct fastrpc_dma_buf_attachment {
+ struct device *dev;
+ struct sg_table sgt;
+ struct list_head node;
+};
+
+struct fastrpc_map {
+ struct list_head node;
+ struct fastrpc_user *fl;
+ int fd;
+ struct dma_buf *buf;
+ struct sg_table *table;
+ struct dma_buf_attachment *attach;
+ u64 phys;
+ u64 size;
+ void *va;
+ u64 len;
+ struct kref refcount;
+};
+
+struct fastrpc_invoke_ctx {
+ int nscalars;
+ int nbufs;
+ int retval;
+ int pid;
+ int tgid;
+ u32 sc;
+ u32 *crc;
+ u64 ctxid;
+ u64 msg_sz;
+ struct kref refcount;
+ struct list_head node; /* list of ctxs */
+ struct completion work;
+ struct fastrpc_msg msg;
+ struct fastrpc_user *fl;
+ struct fastrpc_remote_arg *rpra;
+ struct fastrpc_map **maps;
+ struct fastrpc_buf *buf;
+ struct fastrpc_invoke_args *args;
+ struct fastrpc_channel_ctx *cctx;
+};
+
+struct fastrpc_session_ctx {
+ struct device *dev;
+ int sid;
+ bool used;
+ bool valid;
+};
+
+struct fastrpc_channel_ctx {
+ int domain_id;
+ int sesscount;
+ struct rpmsg_device *rpdev;
+ struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
+ spinlock_t lock;
+ struct idr ctx_idr;
+ struct list_head users;
+ struct miscdevice miscdev;
+};
+
+struct fastrpc_user {
+ struct list_head user;
+ struct list_head maps;
+ struct list_head pending;
+
+ struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_session_ctx *sctx;
+ struct fastrpc_buf *init_mem;
+
+ int tgid;
+ int pd;
+ /* Lock for lists */
+ spinlock_t lock;
+ /* lock for allocations */
+ struct mutex mutex;
+};
+
+static void fastrpc_free_map(struct kref *ref)
+{
+ struct fastrpc_map *map;
+
+ map = container_of(ref, struct fastrpc_map, refcount);
+
+ if (map->table) {
+ dma_buf_unmap_attachment(map->attach, map->table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(map->buf, map->attach);
+ dma_buf_put(map->buf);
+ }
+
+ kfree(map);
+}
+
+static void fastrpc_map_put(struct fastrpc_map *map)
+{
+ if (map)
+ kref_put(&map->refcount, fastrpc_free_map);
+}
+
+static void fastrpc_map_get(struct fastrpc_map *map)
+{
+ if (map)
+ kref_get(&map->refcount);
+}
+
+static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
+ struct fastrpc_map **ppmap)
+{
+ struct fastrpc_map *map = NULL;
+
+ mutex_lock(&fl->mutex);
+ list_for_each_entry(map, &fl->maps, node) {
+ if (map->fd == fd) {
+ fastrpc_map_get(map);
+ *ppmap = map;
+ mutex_unlock(&fl->mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&fl->mutex);
+
+ return -ENOENT;
+}
+
+static void fastrpc_buf_free(struct fastrpc_buf *buf)
+{
+ dma_free_coherent(buf->dev, buf->size, buf->virt,
+ FASTRPC_PHYS(buf->phys));
+ kfree(buf);
+}
+
+static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
+ u64 size, struct fastrpc_buf **obuf)
+{
+ struct fastrpc_buf *buf;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&buf->attachments);
+ mutex_init(&buf->lock);
+
+ buf->fl = fl;
+ buf->virt = NULL;
+ buf->phys = 0;
+ buf->size = size;
+ buf->dev = dev;
+
+ buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
+ GFP_KERNEL);
+ if (!buf->virt)
+ return -ENOMEM;
+
+ if (fl->sctx && fl->sctx->sid)
+ buf->phys += ((u64)fl->sctx->sid << 32);
+
+ *obuf = buf;
+
+ return 0;
+}
+
+static void fastrpc_context_free(struct kref *ref)
+{
+ struct fastrpc_invoke_ctx *ctx;
+ struct fastrpc_channel_ctx *cctx;
+ int i;
+
+ ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
+ cctx = ctx->cctx;
+
+ for (i = 0; i < ctx->nscalars; i++)
+ fastrpc_map_put(ctx->maps[i]);
+
+ if (ctx->buf)
+ fastrpc_buf_free(ctx->buf);
+
+ spin_lock(&cctx->lock);
+ idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
+ spin_unlock(&cctx->lock);
+
+ kfree(ctx->maps);
+ kfree(ctx);
+}
+
+static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
+{
+ kref_get(&ctx->refcount);
+}
+
+static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
+{
+ kref_put(&ctx->refcount, fastrpc_context_free);
+}
+
+static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
+ struct fastrpc_user *user, u32 kernel, u32 sc,
+ struct fastrpc_invoke_args *args)
+{
+ struct fastrpc_channel_ctx *cctx = user->cctx;
+ struct fastrpc_invoke_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ctx->node);
+ ctx->fl = user;
+ ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
+ ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
+ REMOTE_SCALARS_OUTBUFS(sc);
+
+ if (ctx->nscalars) {
+ ctx->maps = kcalloc(ctx->nscalars,
+ sizeof(*ctx->maps), GFP_KERNEL);
+ if (!ctx->maps) {
+ kfree(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ ctx->args = args;
+ }
+
+ ctx->sc = sc;
+ ctx->retval = -1;
+ ctx->pid = current->pid;
+ ctx->tgid = user->tgid;
+ ctx->cctx = cctx;
+ init_completion(&ctx->work);
+
+ spin_lock(&user->lock);
+ list_add_tail(&ctx->node, &user->pending);
+ spin_unlock(&user->lock);
+
+ spin_lock(&cctx->lock);
+ ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
+ FASTRPC_CTX_MAX, GFP_ATOMIC);
+ if (ret < 0) {
+ spin_unlock(&cctx->lock);
+ goto err_idr;
+ }
+ ctx->ctxid = ret << 4;
+ spin_unlock(&cctx->lock);
+
+ kref_init(&ctx->refcount);
+
+ return ctx;
+err_idr:
+ spin_lock(&user->lock);
+ list_del(&ctx->node);
+ spin_unlock(&user->lock);
+ kfree(ctx->maps);
+ kfree(ctx);
+
+ return ERR_PTR(ret);
+}
+
+static struct sg_table *
+fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct fastrpc_dma_buf_attachment *a = attachment->priv;
+ struct sg_table *table;
+
+ table = &a->sgt;
+
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+}
+
+static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *table,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
+}
+
+static void fastrpc_release(struct dma_buf *dmabuf)
+{
+ struct fastrpc_buf *buffer = dmabuf->priv;
+
+ fastrpc_buf_free(buffer);
+}
+
+static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct fastrpc_dma_buf_attachment *a;
+ struct fastrpc_buf *buffer = dmabuf->priv;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
+ FASTRPC_PHYS(buffer->phys), buffer->size);
+ if (ret < 0) {
+ dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
+ return -EINVAL;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->node);
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->node, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct fastrpc_dma_buf_attachment *a = attachment->priv;
+ struct fastrpc_buf *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->node);
+ mutex_unlock(&buffer->lock);
+ kfree(a);
+}
+
+static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+ struct fastrpc_buf *buf = dmabuf->priv;
+
+ return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
+}
+
+static void *fastrpc_vmap(struct dma_buf *dmabuf)
+{
+ struct fastrpc_buf *buf = dmabuf->priv;
+
+ return buf->virt;
+}
+
+static int fastrpc_mmap(struct dma_buf *dmabuf,
+ struct vm_area_struct *vma)
+{
+ struct fastrpc_buf *buf = dmabuf->priv;
+ size_t size = vma->vm_end - vma->vm_start;
+
+ return dma_mmap_coherent(buf->dev, vma, buf->virt,
+ FASTRPC_PHYS(buf->phys), size);
+}
+
+static const struct dma_buf_ops fastrpc_dma_buf_ops = {
+ .attach = fastrpc_dma_buf_attach,
+ .detach = fastrpc_dma_buf_detatch,
+ .map_dma_buf = fastrpc_map_dma_buf,
+ .unmap_dma_buf = fastrpc_unmap_dma_buf,
+ .mmap = fastrpc_mmap,
+ .map = fastrpc_kmap,
+ .vmap = fastrpc_vmap,
+ .release = fastrpc_release,
+};
+
+static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ u64 len, struct fastrpc_map **ppmap)
+{
+ struct fastrpc_session_ctx *sess = fl->sctx;
+ struct fastrpc_map *map = NULL;
+ int err = 0;
+
+ if (!fastrpc_map_find(fl, fd, ppmap))
+ return 0;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&map->node);
+ map->fl = fl;
+ map->fd = fd;
+ map->buf = dma_buf_get(fd);
+ if (IS_ERR(map->buf)) {
+ err = PTR_ERR(map->buf);
+ goto get_err;
+ }
+
+ map->attach = dma_buf_attach(map->buf, sess->dev);
+ if (IS_ERR(map->attach)) {
+ dev_err(sess->dev, "Failed to attach dmabuf\n");
+ err = PTR_ERR(map->attach);
+ goto attach_err;
+ }
+
+ map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(map->table)) {
+ err = PTR_ERR(map->table);
+ goto map_err;
+ }
+
+ map->phys = sg_dma_address(map->table->sgl);
+ map->phys += ((u64)fl->sctx->sid << 32);
+ map->size = len;
+ map->va = sg_virt(map->table->sgl);
+ map->len = len;
+ kref_init(&map->refcount);
+
+ spin_lock(&fl->lock);
+ list_add_tail(&map->node, &fl->maps);
+ spin_unlock(&fl->lock);
+ *ppmap = map;
+
+ return 0;
+
+map_err:
+ dma_buf_detach(map->buf, map->attach);
+attach_err:
+ dma_buf_put(map->buf);
+get_err:
+ kfree(map);
+
+ return err;
+}
+
+/*
+ * Fastrpc payload buffer with metadata looks like:
+ *
+ * >>>>>> START of METADATA <<<<<<<<<
+ * +---------------------------------+
+ * | Arguments |
+ * | type:(struct fastrpc_remote_arg)|
+ * | (0 - N) |
+ * +---------------------------------+
+ * | Invoke Buffer list |
+ * | type:(struct fastrpc_invoke_buf)|
+ * | (0 - N) |
+ * +---------------------------------+
+ * | Page info list |
+ * | type:(struct fastrpc_phy_page) |
+ * | (0 - N) |
+ * +---------------------------------+
+ * | Optional info |
+ * |(can be specific to SoC/Firmware)|
+ * +---------------------------------+
+ * >>>>>>>> END of METADATA <<<<<<<<<
+ * +---------------------------------+
+ * | Inline ARGS |
+ * | (0-N) |
+ * +---------------------------------+
+ */
+
+static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
+{
+ int size = 0;
+
+ size = (sizeof(struct fastrpc_remote_arg) +
+ sizeof(struct fastrpc_invoke_buf) +
+ sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
+ sizeof(u64) * FASTRPC_MAX_FDLIST +
+ sizeof(u32) * FASTRPC_MAX_CRCLIST;
+
+ return size;
+}
+
+static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
+{
+ u64 size = 0;
+ int i;
+
+ size = ALIGN(metalen, FASTRPC_ALIGN);
+ for (i = 0; i < ctx->nscalars; i++) {
+ if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
+ size = ALIGN(size, FASTRPC_ALIGN);
+ size += ctx->args[i].length;
+ }
+ }
+
+ return size;
+}
+
+static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
+{
+ struct device *dev = ctx->fl->sctx->dev;
+ int i, err;
+
+ for (i = 0; i < ctx->nscalars; ++i) {
+ /* Make sure reserved field is set to 0 */
+ if (ctx->args[i].reserved)
+ return -EINVAL;
+
+ if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
+ ctx->args[i].length == 0)
+ continue;
+
+ err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
+ ctx->args[i].length, &ctx->maps[i]);
+ if (err) {
+ dev_err(dev, "Error Creating map %d\n", err);
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
+static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+{
+ struct device *dev = ctx->fl->sctx->dev;
+ struct fastrpc_remote_arg *rpra;
+ struct fastrpc_invoke_buf *list;
+ struct fastrpc_phy_page *pages;
+ int inbufs, i, err = 0;
+ u64 rlen, pkt_size;
+ uintptr_t args;
+ int metalen;
+
+
+ inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+ metalen = fastrpc_get_meta_size(ctx);
+ pkt_size = fastrpc_get_payload_size(ctx, metalen);
+
+ err = fastrpc_create_maps(ctx);
+ if (err)
+ return err;
+
+ ctx->msg_sz = pkt_size;
+
+ err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
+ if (err)
+ return err;
+
+ rpra = ctx->buf->virt;
+ list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
+ pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
+ sizeof(*rpra));
+ args = (uintptr_t)ctx->buf->virt + metalen;
+ rlen = pkt_size - metalen;
+ ctx->rpra = rpra;
+
+ for (i = 0; i < ctx->nbufs; ++i) {
+ u64 len = ctx->args[i].length;
+
+ rpra[i].pv = 0;
+ rpra[i].len = len;
+ list[i].num = len ? 1 : 0;
+ list[i].pgidx = i;
+
+ if (!len)
+ continue;
+
+ pages[i].size = roundup(len, PAGE_SIZE);
+
+ if (ctx->maps[i]) {
+ rpra[i].pv = (u64) ctx->args[i].ptr;
+ pages[i].addr = ctx->maps[i]->phys;
+ } else {
+ rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
+ args = ALIGN(args, FASTRPC_ALIGN);
+ if (rlen < len)
+ goto bail;
+
+ rpra[i].pv = args;
+ pages[i].addr = ctx->buf->phys + (pkt_size - rlen);
+ pages[i].addr = pages[i].addr & PAGE_MASK;
+ args = args + len;
+ rlen -= len;
+ }
+
+ if (i < inbufs && !ctx->maps[i]) {
+ void *dst = (void *)(uintptr_t)rpra[i].pv;
+ void *src = (void *)(uintptr_t)ctx->args[i].ptr;
+
+ if (!kernel) {
+ if (copy_from_user(dst, (void __user *)src,
+ len)) {
+ err = -EFAULT;
+ goto bail;
+ }
+ } else {
+ memcpy(dst, src, len);
+ }
+ }
+ }
+
+ for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
+ rpra[i].pv = (u64) ctx->args[i].ptr;
+ rpra[i].len = ctx->args[i].length;
+ list[i].num = ctx->args[i].length ? 1 : 0;
+ list[i].pgidx = i;
+ pages[i].addr = ctx->maps[i]->phys;
+ pages[i].size = ctx->maps[i]->size;
+ }
+
+bail:
+ if (err)
+ dev_err(dev, "Error: get invoke args failed:%d\n", err);
+
+ return err;
+}
+
+static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
+ u32 kernel)
+{
+ struct fastrpc_remote_arg *rpra = ctx->rpra;
+ int i, inbufs;
+
+ inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
+
+ for (i = inbufs; i < ctx->nbufs; ++i) {
+ void *src = (void *)(uintptr_t)rpra[i].pv;
+ void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
+ u64 len = rpra[i].len;
+
+ if (!kernel) {
+ if (copy_to_user((void __user *)dst, src, len))
+ return -EFAULT;
+ } else {
+ memcpy(dst, src, len);
+ }
+ }
+
+ return 0;
+}
+
+static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
+ struct fastrpc_invoke_ctx *ctx,
+ u32 kernel, uint32_t handle)
+{
+ struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_user *fl = ctx->fl;
+ struct fastrpc_msg *msg = &ctx->msg;
+
+ cctx = fl->cctx;
+ msg->pid = fl->tgid;
+ msg->tid = current->pid;
+
+ if (kernel)
+ msg->pid = 0;
+
+ msg->ctx = ctx->ctxid | fl->pd;
+ msg->handle = handle;
+ msg->sc = ctx->sc;
+ msg->addr = ctx->buf ? ctx->buf->phys : 0;
+ msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
+ fastrpc_context_get(ctx);
+
+ return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
+}
+
+static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
+ u32 handle, u32 sc,
+ struct fastrpc_invoke_args *args)
+{
+ struct fastrpc_invoke_ctx *ctx = NULL;
+ int err = 0;
+
+ if (!fl->sctx)
+ return -EINVAL;
+
+ ctx = fastrpc_context_alloc(fl, kernel, sc, args);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ if (ctx->nscalars) {
+ err = fastrpc_get_args(kernel, ctx);
+ if (err)
+ goto bail;
+ }
+ /* Send invoke buffer to remote dsp */
+ err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
+ if (err)
+ goto bail;
+
+ /* Wait for remote dsp to respond or time out */
+ err = wait_for_completion_interruptible(&ctx->work);
+ if (err)
+ goto bail;
+
+ /* Check the response from remote dsp */
+ err = ctx->retval;
+ if (err)
+ goto bail;
+
+ if (ctx->nscalars) {
+ /* populate all the output buffers with results */
+ err = fastrpc_put_args(ctx, kernel);
+ if (err)
+ goto bail;
+ }
+
+bail:
+ /* We are done with this compute context, remove it from pending list */
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+
+ if (err)
+ dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
+
+ return err;
+}
+
+static int fastrpc_init_create_process(struct fastrpc_user *fl,
+ char __user *argp)
+{
+ struct fastrpc_init_create init;
+ struct fastrpc_invoke_args *args;
+ struct fastrpc_phy_page pages[1];
+ struct fastrpc_map *map = NULL;
+ struct fastrpc_buf *imem = NULL;
+ int memlen;
+ int err;
+ struct {
+ int pgid;
+ u32 namelen;
+ u32 filelen;
+ u32 pageslen;
+ u32 attrs;
+ u32 siglen;
+ } inbuf;
+ u32 sc;
+
+ args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ if (copy_from_user(&init, argp, sizeof(init))) {
+ err = -EFAULT;
+ goto bail;
+ }
+
+ if (init.filelen > INIT_FILELEN_MAX) {
+ err = -EINVAL;
+ goto bail;
+ }
+
+ inbuf.pgid = fl->tgid;
+ inbuf.namelen = strlen(current->comm) + 1;
+ inbuf.filelen = init.filelen;
+ inbuf.pageslen = 1;
+ inbuf.attrs = init.attrs;
+ inbuf.siglen = init.siglen;
+ fl->pd = 1;
+
+ if (init.filelen && init.filefd) {
+ err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
+ if (err)
+ goto bail;
+ }
+
+ memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
+ 1024 * 1024);
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
+ &imem);
+ if (err) {
+ fastrpc_map_put(map);
+ goto bail;
+ }
+
+ fl->init_mem = imem;
+ args[0].ptr = (u64)(uintptr_t)&inbuf;
+ args[0].length = sizeof(inbuf);
+ args[0].fd = -1;
+
+ args[1].ptr = (u64)(uintptr_t)current->comm;
+ args[1].length = inbuf.namelen;
+ args[1].fd = -1;
+
+ args[2].ptr = (u64) init.file;
+ args[2].length = inbuf.filelen;
+ args[2].fd = init.filefd;
+
+ pages[0].addr = imem->phys;
+ pages[0].size = imem->size;
+
+ args[3].ptr = (u64)(uintptr_t) pages;
+ args[3].length = 1 * sizeof(*pages);
+ args[3].fd = -1;
+
+ args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
+ args[4].length = sizeof(inbuf.attrs);
+ args[4].fd = -1;
+
+ args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
+ args[5].length = sizeof(inbuf.siglen);
+ args[5].fd = -1;
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
+ if (init.attrs)
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
+
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ sc, args);
+
+ if (err) {
+ fastrpc_map_put(map);
+ fastrpc_buf_free(imem);
+ }
+
+bail:
+ kfree(args);
+
+ return err;
+}
+
+static struct fastrpc_session_ctx *fastrpc_session_alloc(
+ struct fastrpc_channel_ctx *cctx)
+{
+ struct fastrpc_session_ctx *session = NULL;
+ int i;
+
+ spin_lock(&cctx->lock);
+ for (i = 0; i < cctx->sesscount; i++) {
+ if (!cctx->session[i].used && cctx->session[i].valid) {
+ cctx->session[i].used = true;
+ session = &cctx->session[i];
+ break;
+ }
+ }
+ spin_unlock(&cctx->lock);
+
+ return session;
+}
+
+static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
+ struct fastrpc_session_ctx *session)
+{
+ spin_lock(&cctx->lock);
+ session->used = false;
+ spin_unlock(&cctx->lock);
+}
+
+static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
+{
+ struct fastrpc_invoke_args args[1];
+ int tgid = 0;
+ u32 sc;
+
+ tgid = fl->tgid;
+ args[0].ptr = (u64)(uintptr_t) &tgid;
+ args[0].length = sizeof(tgid);
+ args[0].fd = -1;
+ args[0].reserved = 0;
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
+
+ return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ sc, &args[0]);
+}
+
+static int fastrpc_device_release(struct inode *inode, struct file *file)
+{
+ struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
+ struct fastrpc_channel_ctx *cctx = fl->cctx;
+ struct fastrpc_invoke_ctx *ctx, *n;
+ struct fastrpc_map *map, *m;
+
+ fastrpc_release_current_dsp_process(fl);
+
+ spin_lock(&cctx->lock);
+ list_del(&fl->user);
+ spin_unlock(&cctx->lock);
+
+ if (fl->init_mem)
+ fastrpc_buf_free(fl->init_mem);
+
+ list_for_each_entry_safe(ctx, n, &fl->pending, node) {
+ list_del(&ctx->node);
+ fastrpc_context_put(ctx);
+ }
+
+ list_for_each_entry_safe(map, m, &fl->maps, node) {
+ list_del(&map->node);
+ fastrpc_map_put(map);
+ }
+
+ fastrpc_session_free(cctx, fl->sctx);
+
+ mutex_destroy(&fl->mutex);
+ kfree(fl);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static int fastrpc_device_open(struct inode *inode, struct file *filp)
+{
+ struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
+ struct fastrpc_user *fl = NULL;
+
+ fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ filp->private_data = fl;
+ spin_lock_init(&fl->lock);
+ mutex_init(&fl->mutex);
+ INIT_LIST_HEAD(&fl->pending);
+ INIT_LIST_HEAD(&fl->maps);
+ INIT_LIST_HEAD(&fl->user);
+ fl->tgid = current->tgid;
+ fl->cctx = cctx;
+
+ fl->sctx = fastrpc_session_alloc(cctx);
+ if (!fl->sctx) {
+ dev_err(&cctx->rpdev->dev, "No session available\n");
+ mutex_destroy(&fl->mutex);
+ kfree(fl);
+
+ return -EBUSY;
+ }
+
+ spin_lock(&cctx->lock);
+ list_add_tail(&fl->user, &cctx->users);
+ spin_unlock(&cctx->lock);
+
+ return 0;
+}
+
+static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
+{
+ struct dma_buf *buf;
+ int info;
+
+ if (copy_from_user(&info, argp, sizeof(info)))
+ return -EFAULT;
+
+ buf = dma_buf_get(info);
+ if (IS_ERR_OR_NULL(buf))
+ return -EINVAL;
+ /*
+ * one for the last get and other for the ALLOC_DMA_BUFF ioctl
+ */
+ dma_buf_put(buf);
+ dma_buf_put(buf);
+
+ return 0;
+}
+
+static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_alloc_dma_buf bp;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct fastrpc_buf *buf = NULL;
+ int err;
+
+ if (copy_from_user(&bp, argp, sizeof(bp)))
+ return -EFAULT;
+
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
+ if (err)
+ return err;
+ exp_info.ops = &fastrpc_dma_buf_ops;
+ exp_info.size = bp.size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buf;
+ buf->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(buf->dmabuf)) {
+ err = PTR_ERR(buf->dmabuf);
+ fastrpc_buf_free(buf);
+ return err;
+ }
+
+ bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
+ if (bp.fd < 0) {
+ dma_buf_put(buf->dmabuf);
+ return -EINVAL;
+ }
+
+ if (copy_to_user(argp, &bp, sizeof(bp))) {
+ dma_buf_put(buf->dmabuf);
+ return -EFAULT;
+ }
+
+ get_dma_buf(buf->dmabuf);
+
+ return 0;
+}
+
+static int fastrpc_init_attach(struct fastrpc_user *fl)
+{
+ struct fastrpc_invoke_args args[1];
+ int tgid = fl->tgid;
+ u32 sc;
+
+ args[0].ptr = (u64)(uintptr_t) &tgid;
+ args[0].length = sizeof(tgid);
+ args[0].fd = -1;
+ args[0].reserved = 0;
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
+ fl->pd = 0;
+
+ return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ sc, &args[0]);
+}
+
+static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_invoke_args *args = NULL;
+ struct fastrpc_invoke inv;
+ u32 nscalars;
+ int err;
+
+ if (copy_from_user(&inv, argp, sizeof(inv)))
+ return -EFAULT;
+
+ /* nscalars is truncated here to max supported value */
+ nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
+ if (nscalars) {
+ args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
+ nscalars * sizeof(*args))) {
+ kfree(args);
+ return -EFAULT;
+ }
+ }
+
+ err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
+ kfree(args);
+
+ return err;
+}
+
+static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
+ char __user *argp = (char __user *)arg;
+ int err;
+
+ switch (cmd) {
+ case FASTRPC_IOCTL_INVOKE:
+ err = fastrpc_invoke(fl, argp);
+ break;
+ case FASTRPC_IOCTL_INIT_ATTACH:
+ err = fastrpc_init_attach(fl);
+ break;
+ case FASTRPC_IOCTL_INIT_CREATE:
+ err = fastrpc_init_create_process(fl, argp);
+ break;
+ case FASTRPC_IOCTL_FREE_DMA_BUFF:
+ err = fastrpc_dmabuf_free(fl, argp);
+ break;
+ case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
+ err = fastrpc_dmabuf_alloc(fl, argp);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
+static const struct file_operations fastrpc_fops = {
+ .open = fastrpc_device_open,
+ .release = fastrpc_device_release,
+ .unlocked_ioctl = fastrpc_device_ioctl,
+ .compat_ioctl = fastrpc_device_ioctl,
+};
+
+static int fastrpc_cb_probe(struct platform_device *pdev)
+{
+ struct fastrpc_channel_ctx *cctx;
+ struct fastrpc_session_ctx *sess;
+ struct device *dev = &pdev->dev;
+ int i, sessions = 0;
+
+ cctx = dev_get_drvdata(dev->parent);
+ if (!cctx)
+ return -EINVAL;
+
+ of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
+
+ spin_lock(&cctx->lock);
+ sess = &cctx->session[cctx->sesscount];
+ sess->used = false;
+ sess->valid = true;
+ sess->dev = dev;
+ dev_set_drvdata(dev, sess);
+
+ if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
+ dev_info(dev, "FastRPC Session ID not specified in DT\n");
+
+ if (sessions > 0) {
+ struct fastrpc_session_ctx *dup_sess;
+
+ for (i = 1; i < sessions; i++) {
+ if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ break;
+ dup_sess = &cctx->session[cctx->sesscount];
+ memcpy(dup_sess, sess, sizeof(*dup_sess));
+ }
+ }
+ cctx->sesscount++;
+ spin_unlock(&cctx->lock);
+ dma_set_mask(dev, DMA_BIT_MASK(32));
+
+ return 0;
+}
+
+static int fastrpc_cb_remove(struct platform_device *pdev)
+{
+ struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
+ struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ spin_lock(&cctx->lock);
+ for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
+ if (cctx->session[i].sid == sess->sid) {
+ cctx->session[i].valid = false;
+ cctx->sesscount--;
+ }
+ }
+ spin_unlock(&cctx->lock);
+
+ return 0;
+}
+
+static const struct of_device_id fastrpc_match_table[] = {
+ { .compatible = "qcom,fastrpc-compute-cb", },
+ {}
+};
+
+static struct platform_driver fastrpc_cb_driver = {
+ .probe = fastrpc_cb_probe,
+ .remove = fastrpc_cb_remove,
+ .driver = {
+ .name = "qcom,fastrpc-cb",
+ .of_match_table = fastrpc_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct device *rdev = &rpdev->dev;
+ struct fastrpc_channel_ctx *data;
+ int i, err, domain_id = -1;
+ const char *domain;
+
+ data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = of_property_read_string(rdev->of_node, "label", &domain);
+ if (err) {
+ dev_info(rdev, "FastRPC Domain not specified in DT\n");
+ return err;
+ }
+
+ for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
+ if (!strcmp(domains[i], domain)) {
+ domain_id = i;
+ break;
+ }
+ }
+
+ if (domain_id < 0) {
+ dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
+ return -EINVAL;
+ }
+
+ data->miscdev.minor = MISC_DYNAMIC_MINOR;
+ data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
+ domains[domain_id]);
+ data->miscdev.fops = &fastrpc_fops;
+ err = misc_register(&data->miscdev);
+ if (err)
+ return err;
+
+ dev_set_drvdata(&rpdev->dev, data);
+ dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
+ INIT_LIST_HEAD(&data->users);
+ spin_lock_init(&data->lock);
+ idr_init(&data->ctx_idr);
+ data->domain_id = domain_id;
+ data->rpdev = rpdev;
+
+ return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
+}
+
+static void fastrpc_notify_users(struct fastrpc_user *user)
+{
+ struct fastrpc_invoke_ctx *ctx;
+
+ spin_lock(&user->lock);
+ list_for_each_entry(ctx, &user->pending, node)
+ complete(&ctx->work);
+ spin_unlock(&user->lock);
+}
+
+static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
+ struct fastrpc_user *user;
+
+ spin_lock(&cctx->lock);
+ list_for_each_entry(user, &cctx->users, user)
+ fastrpc_notify_users(user);
+ spin_unlock(&cctx->lock);
+
+ misc_deregister(&cctx->miscdev);
+ of_platform_depopulate(&rpdev->dev);
+ kfree(cctx);
+}
+
+static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 addr)
+{
+ struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
+ struct fastrpc_invoke_rsp *rsp = data;
+ struct fastrpc_invoke_ctx *ctx;
+ unsigned long flags;
+ unsigned long ctxid;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
+
+ spin_lock_irqsave(&cctx->lock, flags);
+ ctx = idr_find(&cctx->ctx_idr, ctxid);
+ spin_unlock_irqrestore(&cctx->lock, flags);
+
+ if (!ctx) {
+ dev_err(&rpdev->dev, "No context ID matches response\n");
+ return -ENOENT;
+ }
+
+ ctx->retval = rsp->retval;
+ complete(&ctx->work);
+ fastrpc_context_put(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id fastrpc_rpmsg_of_match[] = {
+ { .compatible = "qcom,fastrpc" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
+
+static struct rpmsg_driver fastrpc_driver = {
+ .probe = fastrpc_rpmsg_probe,
+ .remove = fastrpc_rpmsg_remove,
+ .callback = fastrpc_rpmsg_callback,
+ .drv = {
+ .name = "qcom,fastrpc",
+ .of_match_table = fastrpc_rpmsg_of_match,
+ },
+};
+
+static int fastrpc_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&fastrpc_cb_driver);
+ if (ret < 0) {
+ pr_err("fastrpc: failed to register cb driver\n");
+ return ret;
+ }
+
+ ret = register_rpmsg_driver(&fastrpc_driver);
+ if (ret < 0) {
+ pr_err("fastrpc: failed to register rpmsg driver\n");
+ platform_driver_unregister(&fastrpc_cb_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(fastrpc_init);
+
+static void fastrpc_exit(void)
+{
+ platform_driver_unregister(&fastrpc_cb_driver);
+ unregister_rpmsg_driver(&fastrpc_driver);
+}
+module_exit(fastrpc_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index efe2fb72d54b..25265fd0fd6e 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
if (get_order(size) >= MAX_ORDER)
return NULL;
- return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
- GFP_KERNEL);
+ return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
+ GFP_KERNEL);
}
void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
new file mode 100644
index 000000000000..99db2b82ada6
--- /dev/null
+++ b/drivers/misc/habanalabs/Kconfig
@@ -0,0 +1,25 @@
+#
+# HabanaLabs AI accelerators driver
+#
+
+config HABANA_AI
+ tristate "HabanaAI accelerators (habanalabs)"
+ depends on PCI && HAS_IOMEM
+ select FRAME_VECTOR
+ select DMA_SHARED_BUFFER
+ select GENERIC_ALLOCATOR
+ select HWMON
+ help
+ Enables PCIe card driver for Habana's AI Processors (AIP) that are
+ designed to accelerate Deep Learning inference and training workloads.
+
+ The driver manages the PCIe devices and provides IOCTL interface for
+ the user to submit workloads to the devices.
+
+ The user-space interface is described in
+ include/uapi/misc/habanalabs.h
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called habanalabs.
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
new file mode 100644
index 000000000000..c6592db59b25
--- /dev/null
+++ b/drivers/misc/habanalabs/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for HabanaLabs AI accelerators driver
+#
+
+obj-m := habanalabs.o
+
+habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
+ command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
+ command_submission.o mmu.o
+
+habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
+
+include $(src)/goya/Makefile
+habanalabs-y += $(HL_GOYA_FILES)
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c
new file mode 100644
index 000000000000..f54e7971a762
--- /dev/null
+++ b/drivers/misc/habanalabs/asid.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+int hl_asid_init(struct hl_device *hdev)
+{
+ hdev->asid_bitmap = kcalloc(BITS_TO_LONGS(hdev->asic_prop.max_asid),
+ sizeof(*hdev->asid_bitmap), GFP_KERNEL);
+ if (!hdev->asid_bitmap)
+ return -ENOMEM;
+
+ mutex_init(&hdev->asid_mutex);
+
+ /* ASID 0 is reserved for KMD */
+ set_bit(0, hdev->asid_bitmap);
+
+ return 0;
+}
+
+void hl_asid_fini(struct hl_device *hdev)
+{
+ mutex_destroy(&hdev->asid_mutex);
+ kfree(hdev->asid_bitmap);
+}
+
+unsigned long hl_asid_alloc(struct hl_device *hdev)
+{
+ unsigned long found;
+
+ mutex_lock(&hdev->asid_mutex);
+
+ found = find_first_zero_bit(hdev->asid_bitmap,
+ hdev->asic_prop.max_asid);
+ if (found == hdev->asic_prop.max_asid)
+ found = 0;
+ else
+ set_bit(found, hdev->asid_bitmap);
+
+ mutex_unlock(&hdev->asid_mutex);
+
+ return found;
+}
+
+void hl_asid_free(struct hl_device *hdev, unsigned long asid)
+{
+ if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid),
+ "Invalid ASID %lu", asid))
+ return;
+ clear_bit(asid, hdev->asid_bitmap);
+}
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
new file mode 100644
index 000000000000..85f75806a9a7
--- /dev/null
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
+{
+ hdev->asic_funcs->dma_free_coherent(hdev, cb->size,
+ (void *) (uintptr_t) cb->kernel_address,
+ cb->bus_address);
+ kfree(cb);
+}
+
+static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
+{
+ if (cb->is_pool) {
+ spin_lock(&hdev->cb_pool_lock);
+ list_add(&cb->pool_list, &hdev->cb_pool);
+ spin_unlock(&hdev->cb_pool_lock);
+ } else {
+ cb_fini(hdev, cb);
+ }
+}
+
+static void cb_release(struct kref *ref)
+{
+ struct hl_device *hdev;
+ struct hl_cb *cb;
+
+ cb = container_of(ref, struct hl_cb, refcount);
+ hdev = cb->hdev;
+
+ hl_debugfs_remove_cb(cb);
+
+ cb_do_release(hdev, cb);
+}
+
+static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
+ int ctx_id)
+{
+ struct hl_cb *cb;
+ void *p;
+
+ /*
+ * We use of GFP_ATOMIC here because this function can be called from
+ * the latency-sensitive code path for command submission. Due to H/W
+ * limitations in some of the ASICs, the kernel must copy the user CB
+ * that is designated for an external queue and actually enqueue
+ * the kernel's copy. Hence, we must never sleep in this code section
+ * and must use GFP_ATOMIC for all memory allocations.
+ */
+ if (ctx_id == HL_KERNEL_ASID_ID)
+ cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
+ else
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
+
+ if (!cb)
+ return NULL;
+
+ if (ctx_id == HL_KERNEL_ASID_ID)
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
+ &cb->bus_address, GFP_ATOMIC);
+ else
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
+ &cb->bus_address,
+ GFP_USER | __GFP_ZERO);
+ if (!p) {
+ dev_err(hdev->dev,
+ "failed to allocate %d of dma memory for CB\n",
+ cb_size);
+ kfree(cb);
+ return NULL;
+ }
+
+ cb->kernel_address = (u64) (uintptr_t) p;
+ cb->size = cb_size;
+
+ return cb;
+}
+
+int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+ u32 cb_size, u64 *handle, int ctx_id)
+{
+ struct hl_cb *cb;
+ bool alloc_new_cb = true;
+ int rc;
+
+ /*
+ * Can't use generic function to check this because of special case
+ * where we create a CB as part of the reset process
+ */
+ if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
+ (ctx_id != HL_KERNEL_ASID_ID))) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is disabled or in reset. Can't create new CBs\n");
+ rc = -EBUSY;
+ goto out_err;
+ }
+
+ if (cb_size > HL_MAX_CB_SIZE) {
+ dev_err(hdev->dev,
+ "CB size %d must be less then %d\n",
+ cb_size, HL_MAX_CB_SIZE);
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ /* Minimum allocation must be PAGE SIZE */
+ if (cb_size < PAGE_SIZE)
+ cb_size = PAGE_SIZE;
+
+ if (ctx_id == HL_KERNEL_ASID_ID &&
+ cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+
+ spin_lock(&hdev->cb_pool_lock);
+ if (!list_empty(&hdev->cb_pool)) {
+ cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
+ pool_list);
+ list_del(&cb->pool_list);
+ spin_unlock(&hdev->cb_pool_lock);
+ alloc_new_cb = false;
+ } else {
+ spin_unlock(&hdev->cb_pool_lock);
+ dev_dbg(hdev->dev, "CB pool is empty\n");
+ }
+ }
+
+ if (alloc_new_cb) {
+ cb = hl_cb_alloc(hdev, cb_size, ctx_id);
+ if (!cb) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ cb->hdev = hdev;
+ cb->ctx_id = ctx_id;
+
+ spin_lock(&mgr->cb_lock);
+ rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
+ spin_unlock(&mgr->cb_lock);
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
+ goto release_cb;
+ }
+
+ cb->id = rc;
+
+ kref_init(&cb->refcount);
+ spin_lock_init(&cb->lock);
+
+ /*
+ * idr is 32-bit so we can safely OR it with a mask that is above
+ * 32 bit
+ */
+ *handle = cb->id | HL_MMAP_CB_MASK;
+ *handle <<= PAGE_SHIFT;
+
+ hl_debugfs_add_cb(cb);
+
+ return 0;
+
+release_cb:
+ cb_do_release(hdev, cb);
+out_err:
+ *handle = 0;
+
+ return rc;
+}
+
+int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
+{
+ struct hl_cb *cb;
+ u32 handle;
+ int rc = 0;
+
+ /*
+ * handle was given to user to do mmap, I need to shift it back to
+ * how the idr module gave it to me
+ */
+ cb_handle >>= PAGE_SHIFT;
+ handle = (u32) cb_handle;
+
+ spin_lock(&mgr->cb_lock);
+
+ cb = idr_find(&mgr->cb_handles, handle);
+ if (cb) {
+ idr_remove(&mgr->cb_handles, handle);
+ spin_unlock(&mgr->cb_lock);
+ kref_put(&cb->refcount, cb_release);
+ } else {
+ spin_unlock(&mgr->cb_lock);
+ dev_err(hdev->dev,
+ "CB destroy failed, no match to handle 0x%x\n", handle);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ union hl_cb_args *args = data;
+ struct hl_device *hdev = hpriv->hdev;
+ u64 handle;
+ int rc;
+
+ switch (args->in.op) {
+ case HL_CB_OP_CREATE:
+ rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
+ &handle, hpriv->ctx->asid);
+ memset(args, 0, sizeof(*args));
+ args->out.cb_handle = handle;
+ break;
+ case HL_CB_OP_DESTROY:
+ rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
+ args->in.cb_handle);
+ break;
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
+static void cb_vm_close(struct vm_area_struct *vma)
+{
+ struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
+ long new_mmap_size;
+
+ new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
+
+ if (new_mmap_size > 0) {
+ cb->mmap_size = new_mmap_size;
+ return;
+ }
+
+ spin_lock(&cb->lock);
+ cb->mmap = false;
+ spin_unlock(&cb->lock);
+
+ hl_cb_put(cb);
+ vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct cb_vm_ops = {
+ .close = cb_vm_close
+};
+
+int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cb *cb;
+ phys_addr_t address;
+ u32 handle;
+ int rc;
+
+ handle = vma->vm_pgoff;
+
+ /* reference was taken here */
+ cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
+ if (!cb) {
+ dev_err(hdev->dev,
+ "CB mmap failed, no match to handle %d\n", handle);
+ return -EINVAL;
+ }
+
+ /* Validation check */
+ if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
+ vma->vm_end - vma->vm_start, cb->size);
+ rc = -EINVAL;
+ goto put_cb;
+ }
+
+ spin_lock(&cb->lock);
+
+ if (cb->mmap) {
+ dev_err(hdev->dev,
+ "CB mmap failed, CB already mmaped to user\n");
+ rc = -EINVAL;
+ goto release_lock;
+ }
+
+ cb->mmap = true;
+
+ spin_unlock(&cb->lock);
+
+ vma->vm_ops = &cb_vm_ops;
+
+ /*
+ * Note: We're transferring the cb reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = cb;
+
+ /* Calculate address for CB */
+ address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);
+
+ rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
+ address, cb->size);
+
+ if (rc) {
+ spin_lock(&cb->lock);
+ cb->mmap = false;
+ goto release_lock;
+ }
+
+ cb->mmap_size = cb->size;
+
+ return 0;
+
+release_lock:
+ spin_unlock(&cb->lock);
+put_cb:
+ hl_cb_put(cb);
+ return rc;
+}
+
+struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+ u32 handle)
+{
+ struct hl_cb *cb;
+
+ spin_lock(&mgr->cb_lock);
+ cb = idr_find(&mgr->cb_handles, handle);
+
+ if (!cb) {
+ spin_unlock(&mgr->cb_lock);
+ dev_warn(hdev->dev,
+ "CB get failed, no match to handle %d\n", handle);
+ return NULL;
+ }
+
+ kref_get(&cb->refcount);
+
+ spin_unlock(&mgr->cb_lock);
+
+ return cb;
+
+}
+
+void hl_cb_put(struct hl_cb *cb)
+{
+ kref_put(&cb->refcount, cb_release);
+}
+
+void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
+{
+ spin_lock_init(&mgr->cb_lock);
+ idr_init(&mgr->cb_handles);
+}
+
+void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
+{
+ struct hl_cb *cb;
+ struct idr *idp;
+ u32 id;
+
+ idp = &mgr->cb_handles;
+
+ idr_for_each_entry(idp, cb, id) {
+ if (kref_put(&cb->refcount, cb_release) != 1)
+ dev_err(hdev->dev,
+ "CB %d for CTX ID %d is still alive\n",
+ id, cb->ctx_id);
+ }
+
+ idr_destroy(&mgr->cb_handles);
+}
+
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
+{
+ u64 cb_handle;
+ struct hl_cb *cb;
+ int rc;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
+ HL_KERNEL_ASID_ID);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc);
+ return NULL;
+ }
+
+ cb_handle >>= PAGE_SHIFT;
+ cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
+ if (!cb)
+ goto destroy_cb;
+
+ return cb;
+
+destroy_cb:
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
+
+ return NULL;
+}
+
+int hl_cb_pool_init(struct hl_device *hdev)
+{
+ struct hl_cb *cb;
+ int i;
+
+ INIT_LIST_HEAD(&hdev->cb_pool);
+ spin_lock_init(&hdev->cb_pool_lock);
+
+ for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
+ cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
+ HL_KERNEL_ASID_ID);
+ if (cb) {
+ cb->is_pool = true;
+ list_add(&cb->pool_list, &hdev->cb_pool);
+ } else {
+ hl_cb_pool_fini(hdev);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int hl_cb_pool_fini(struct hl_device *hdev)
+{
+ struct hl_cb *cb, *tmp;
+
+ list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
+ list_del(&cb->pool_list);
+ cb_fini(hdev, cb);
+ }
+
+ return 0;
+}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
new file mode 100644
index 000000000000..3525236ed8d9
--- /dev/null
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+static void job_wq_completion(struct work_struct *work);
+static long _hl_cs_wait_ioctl(struct hl_device *hdev,
+ struct hl_ctx *ctx, u64 timeout_us, u64 seq);
+static void cs_do_release(struct kref *ref);
+
+static const char *hl_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "HabanaLabs";
+}
+
+static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct hl_dma_fence *hl_fence =
+ container_of(fence, struct hl_dma_fence, base_fence);
+
+ return dev_name(hl_fence->hdev->dev);
+}
+
+static bool hl_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static void hl_fence_release(struct dma_fence *fence)
+{
+ struct hl_dma_fence *hl_fence =
+ container_of(fence, struct hl_dma_fence, base_fence);
+
+ kfree_rcu(hl_fence, base_fence.rcu);
+}
+
+static const struct dma_fence_ops hl_fence_ops = {
+ .get_driver_name = hl_fence_get_driver_name,
+ .get_timeline_name = hl_fence_get_timeline_name,
+ .enable_signaling = hl_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = hl_fence_release
+};
+
+static void cs_get(struct hl_cs *cs)
+{
+ kref_get(&cs->refcount);
+}
+
+static int cs_get_unless_zero(struct hl_cs *cs)
+{
+ return kref_get_unless_zero(&cs->refcount);
+}
+
+static void cs_put(struct hl_cs *cs)
+{
+ kref_put(&cs->refcount, cs_do_release);
+}
+
+/*
+ * cs_parser - parse the user command submission
+ *
+ * @hpriv : pointer to the private data of the fd
+ * @job : pointer to the job that holds the command submission info
+ *
+ * The function parses the command submission of the user. It calls the
+ * ASIC specific parser, which returns a list of memory blocks to send
+ * to the device as different command buffers
+ *
+ */
+static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cs_parser parser;
+ int rc;
+
+ parser.ctx_id = job->cs->ctx->asid;
+ parser.cs_sequence = job->cs->sequence;
+ parser.job_id = job->id;
+
+ parser.hw_queue_id = job->hw_queue_id;
+ parser.job_userptr_list = &job->userptr_list;
+ parser.patched_cb = NULL;
+ parser.user_cb = job->user_cb;
+ parser.user_cb_size = job->user_cb_size;
+ parser.ext_queue = job->ext_queue;
+ job->patched_cb = NULL;
+ parser.use_virt_addr = hdev->mmu_enable;
+
+ rc = hdev->asic_funcs->cs_parser(hdev, &parser);
+ if (job->ext_queue) {
+ if (!rc) {
+ job->patched_cb = parser.patched_cb;
+ job->job_cb_size = parser.patched_cb_size;
+
+ spin_lock(&job->patched_cb->lock);
+ job->patched_cb->cs_cnt++;
+ spin_unlock(&job->patched_cb->lock);
+ }
+
+ /*
+ * Whether the parsing worked or not, we don't need the
+ * original CB anymore because it was already parsed and
+ * won't be accessed again for this CS
+ */
+ spin_lock(&job->user_cb->lock);
+ job->user_cb->cs_cnt--;
+ spin_unlock(&job->user_cb->lock);
+ hl_cb_put(job->user_cb);
+ job->user_cb = NULL;
+ }
+
+ return rc;
+}
+
+static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_cs *cs = job->cs;
+
+ if (job->ext_queue) {
+ hl_userptr_delete_list(hdev, &job->userptr_list);
+
+ /*
+ * We might arrive here from rollback and patched CB wasn't
+ * created, so we need to check it's not NULL
+ */
+ if (job->patched_cb) {
+ spin_lock(&job->patched_cb->lock);
+ job->patched_cb->cs_cnt--;
+ spin_unlock(&job->patched_cb->lock);
+
+ hl_cb_put(job->patched_cb);
+ }
+ }
+
+ /*
+ * This is the only place where there can be multiple threads
+ * modifying the list at the same time
+ */
+ spin_lock(&cs->job_lock);
+ list_del(&job->cs_node);
+ spin_unlock(&cs->job_lock);
+
+ hl_debugfs_remove_job(hdev, job);
+
+ if (job->ext_queue)
+ cs_put(cs);
+
+ kfree(job);
+}
+
+static void cs_do_release(struct kref *ref)
+{
+ struct hl_cs *cs = container_of(ref, struct hl_cs,
+ refcount);
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_cs_job *job, *tmp;
+
+ cs->completed = true;
+
+ /*
+ * Although if we reached here it means that all external jobs have
+ * finished, because each one of them took refcnt to CS, we still
+ * need to go over the internal jobs and free them. Otherwise, we
+ * will have leaked memory and what's worse, the CS object (and
+ * potentially the CTX object) could be released, while the JOB
+ * still holds a pointer to them (but no reference).
+ */
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
+ free_job(hdev, job);
+
+ /* We also need to update CI for internal queues */
+ if (cs->submitted) {
+ hl_int_hw_queue_update_ci(cs);
+
+ spin_lock(&hdev->hw_queues_mirror_lock);
+ /* remove CS from hw_queues mirror list */
+ list_del_init(&cs->mirror_node);
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+
+ /*
+ * Don't cancel TDR in case this CS was timedout because we
+ * might be running from the TDR context
+ */
+ if ((!cs->timedout) &&
+ (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
+ struct hl_cs *next;
+
+ if (cs->tdr_active)
+ cancel_delayed_work_sync(&cs->work_tdr);
+
+ spin_lock(&hdev->hw_queues_mirror_lock);
+
+ /* queue TDR for next CS */
+ next = list_first_entry_or_null(
+ &hdev->hw_queues_mirror_list,
+ struct hl_cs, mirror_node);
+
+ if ((next) && (!next->tdr_active)) {
+ next->tdr_active = true;
+ schedule_delayed_work(&next->work_tdr,
+ hdev->timeout_jiffies);
+ }
+
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+ }
+ }
+
+ /*
+ * Must be called before hl_ctx_put because inside we use ctx to get
+ * the device
+ */
+ hl_debugfs_remove_cs(cs);
+
+ hl_ctx_put(cs->ctx);
+
+ if (cs->timedout)
+ dma_fence_set_error(cs->fence, -ETIMEDOUT);
+ else if (cs->aborted)
+ dma_fence_set_error(cs->fence, -EIO);
+
+ dma_fence_signal(cs->fence);
+ dma_fence_put(cs->fence);
+
+ kfree(cs);
+}
+
+static void cs_timedout(struct work_struct *work)
+{
+ struct hl_device *hdev;
+ int ctx_asid, rc;
+ struct hl_cs *cs = container_of(work, struct hl_cs,
+ work_tdr.work);
+ rc = cs_get_unless_zero(cs);
+ if (!rc)
+ return;
+
+ if ((!cs->submitted) || (cs->completed)) {
+ cs_put(cs);
+ return;
+ }
+
+ /* Mark the CS is timed out so we won't try to cancel its TDR */
+ cs->timedout = true;
+
+ hdev = cs->ctx->hdev;
+ ctx_asid = cs->ctx->asid;
+
+ /* TODO: add information about last signaled seq and last emitted seq */
+ dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
+
+ cs_put(cs);
+
+ if (hdev->reset_on_lockup)
+ hl_device_reset(hdev, false, false);
+}
+
+static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
+ struct hl_cs **cs_new)
+{
+ struct hl_dma_fence *fence;
+ struct dma_fence *other = NULL;
+ struct hl_cs *cs;
+ int rc;
+
+ cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
+ if (!cs)
+ return -ENOMEM;
+
+ cs->ctx = ctx;
+ cs->submitted = false;
+ cs->completed = false;
+ INIT_LIST_HEAD(&cs->job_list);
+ INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
+ kref_init(&cs->refcount);
+ spin_lock_init(&cs->job_lock);
+
+ fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
+ if (!fence) {
+ rc = -ENOMEM;
+ goto free_cs;
+ }
+
+ fence->hdev = hdev;
+ spin_lock_init(&fence->lock);
+ cs->fence = &fence->base_fence;
+
+ spin_lock(&ctx->cs_lock);
+
+ fence->cs_seq = ctx->cs_sequence;
+ other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
+ if ((other) && (!dma_fence_is_signaled(other))) {
+ spin_unlock(&ctx->cs_lock);
+ rc = -EAGAIN;
+ goto free_fence;
+ }
+
+ dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
+ ctx->asid, ctx->cs_sequence);
+
+ cs->sequence = fence->cs_seq;
+
+ ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
+ &fence->base_fence;
+ ctx->cs_sequence++;
+
+ dma_fence_get(&fence->base_fence);
+
+ dma_fence_put(other);
+
+ spin_unlock(&ctx->cs_lock);
+
+ *cs_new = cs;
+
+ return 0;
+
+free_fence:
+ kfree(fence);
+free_cs:
+ kfree(cs);
+ return rc;
+}
+
+static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_cs_job *job, *tmp;
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
+ free_job(hdev, job);
+}
+
+void hl_cs_rollback_all(struct hl_device *hdev)
+{
+ struct hl_cs *cs, *tmp;
+
+ /* flush all completions */
+ flush_workqueue(hdev->cq_wq);
+
+ /* Make sure we don't have leftovers in the H/W queues mirror list */
+ list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
+ mirror_node) {
+ cs_get(cs);
+ cs->aborted = true;
+ dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
+ cs->ctx->asid, cs->sequence);
+ cs_rollback(hdev, cs);
+ cs_put(cs);
+ }
+}
+
+static void job_wq_completion(struct work_struct *work)
+{
+ struct hl_cs_job *job = container_of(work, struct hl_cs_job,
+ finish_work);
+ struct hl_cs *cs = job->cs;
+ struct hl_device *hdev = cs->ctx->hdev;
+
+ /* job is no longer needed */
+ free_job(hdev, job);
+}
+
+static struct hl_cb *validate_queue_index(struct hl_device *hdev,
+ struct hl_cb_mgr *cb_mgr,
+ struct hl_cs_chunk *chunk,
+ bool *ext_queue)
+{
+ struct asic_fixed_properties *asic = &hdev->asic_prop;
+ struct hw_queue_properties *hw_queue_prop;
+ u32 cb_handle;
+ struct hl_cb *cb;
+
+ /* Assume external queue */
+ *ext_queue = true;
+
+ hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
+
+ if ((chunk->queue_index >= HL_MAX_QUEUES) ||
+ (hw_queue_prop->type == QUEUE_TYPE_NA)) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ chunk->queue_index);
+ return NULL;
+ }
+
+ if (hw_queue_prop->kmd_only) {
+ dev_err(hdev->dev, "Queue index %d is restricted for KMD\n",
+ chunk->queue_index);
+ return NULL;
+ } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
+ *ext_queue = false;
+ return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
+ }
+
+ /* Retrieve CB object */
+ cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
+
+ cb = hl_cb_get(hdev, cb_mgr, cb_handle);
+ if (!cb) {
+ dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
+ return NULL;
+ }
+
+ if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
+ dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
+ goto release_cb;
+ }
+
+ spin_lock(&cb->lock);
+ cb->cs_cnt++;
+ spin_unlock(&cb->lock);
+
+ return cb;
+
+release_cb:
+ hl_cb_put(cb);
+ return NULL;
+}
+
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
+{
+ struct hl_cs_job *job;
+
+ job = kzalloc(sizeof(*job), GFP_ATOMIC);
+ if (!job)
+ return NULL;
+
+ job->ext_queue = ext_queue;
+
+ if (job->ext_queue) {
+ INIT_LIST_HEAD(&job->userptr_list);
+ INIT_WORK(&job->finish_work, job_wq_completion);
+ }
+
+ return job;
+}
+
+static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
+ u32 num_chunks, u64 *cs_seq)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cs_chunk *cs_chunk_array;
+ struct hl_cs_job *job;
+ struct hl_cs *cs;
+ struct hl_cb *cb;
+ bool ext_queue_present = false;
+ u32 size_to_copy;
+ int rc, i, parse_cnt;
+
+ *cs_seq = ULLONG_MAX;
+
+ if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ dev_err(hdev->dev,
+ "Number of chunks can NOT be larger than %d\n",
+ HL_MAX_JOBS_PER_CS);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
+ GFP_ATOMIC);
+ if (!cs_chunk_array) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
+ if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
+ dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
+ rc = -EFAULT;
+ goto free_cs_chunk_array;
+ }
+
+ /* increment refcnt for context */
+ hl_ctx_get(hdev, hpriv->ctx);
+
+ rc = allocate_cs(hdev, hpriv->ctx, &cs);
+ if (rc) {
+ hl_ctx_put(hpriv->ctx);
+ goto free_cs_chunk_array;
+ }
+
+ *cs_seq = cs->sequence;
+
+ hl_debugfs_add_cs(cs);
+
+ /* Validate ALL the CS chunks before submitting the CS */
+ for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
+ struct hl_cs_chunk *chunk = &cs_chunk_array[i];
+ bool ext_queue;
+
+ cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
+ &ext_queue);
+ if (ext_queue) {
+ ext_queue_present = true;
+ if (!cb) {
+ rc = -EINVAL;
+ goto free_cs_object;
+ }
+ }
+
+ job = hl_cs_allocate_job(hdev, ext_queue);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ if (ext_queue)
+ goto release_cb;
+ else
+ goto free_cs_object;
+ }
+
+ job->id = i + 1;
+ job->cs = cs;
+ job->user_cb = cb;
+ job->user_cb_size = chunk->cb_size;
+ if (job->ext_queue)
+ job->job_cb_size = cb->size;
+ else
+ job->job_cb_size = chunk->cb_size;
+ job->hw_queue_id = chunk->queue_index;
+
+ cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+ list_add_tail(&job->cs_node, &cs->job_list);
+
+ /*
+ * Increment CS reference. When CS reference is 0, CS is
+ * done and can be signaled to user and free all its resources
+ * Only increment for JOB on external queues, because only
+ * for those JOBs we get completion
+ */
+ if (job->ext_queue)
+ cs_get(cs);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = cs_parser(hpriv, job);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
+ cs->ctx->asid, cs->sequence, job->id, rc);
+ goto free_cs_object;
+ }
+ }
+
+ if (!ext_queue_present) {
+ dev_err(hdev->dev,
+ "Reject CS %d.%llu because no external queues jobs\n",
+ cs->ctx->asid, cs->sequence);
+ rc = -EINVAL;
+ goto free_cs_object;
+ }
+
+ rc = hl_hw_queue_schedule_cs(cs);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ cs->ctx->asid, cs->sequence, rc);
+ goto free_cs_object;
+ }
+
+ rc = HL_CS_STATUS_SUCCESS;
+ goto put_cs;
+
+release_cb:
+ spin_lock(&cb->lock);
+ cb->cs_cnt--;
+ spin_unlock(&cb->lock);
+ hl_cb_put(cb);
+free_cs_object:
+ cs_rollback(hdev, cs);
+ *cs_seq = ULLONG_MAX;
+ /* The path below is both for good and erroneous exits */
+put_cs:
+ /* We finished with the CS in this function, so put the ref */
+ cs_put(cs);
+free_cs_chunk_array:
+ kfree(cs_chunk_array);
+out:
+ return rc;
+}
+
+int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ union hl_cs_args *args = data;
+ struct hl_ctx *ctx = hpriv->ctx;
+ void __user *chunks;
+ u32 num_chunks;
+ u64 cs_seq = ULONG_MAX;
+ int rc, do_restore;
+ bool need_soft_reset = false;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_warn(hdev->dev,
+ "Device is %s. Can't submit new CS\n",
+ atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
+
+ if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
+ long ret;
+
+ chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
+ num_chunks = args->in.num_chunks_restore;
+
+ mutex_lock(&hpriv->restore_phase_mutex);
+
+ if (do_restore) {
+ rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "Failed to switch to context %d, rejecting CS! %d\n",
+ ctx->asid, rc);
+ /*
+ * If we timedout, or if the device is not IDLE
+ * while we want to do context-switch (-EBUSY),
+ * we need to soft-reset because QMAN is
+ * probably stuck. However, we can't call to
+ * reset here directly because of deadlock, so
+ * need to do it at the very end of this
+ * function
+ */
+ if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
+ need_soft_reset = true;
+ mutex_unlock(&hpriv->restore_phase_mutex);
+ goto out;
+ }
+ }
+
+ hdev->asic_funcs->restore_phase_topology(hdev);
+
+ if (num_chunks == 0) {
+ dev_dbg(hdev->dev,
+ "Need to run restore phase but restore CS is empty\n");
+ rc = 0;
+ } else {
+ rc = _hl_cs_ioctl(hpriv, chunks, num_chunks,
+ &cs_seq);
+ }
+
+ mutex_unlock(&hpriv->restore_phase_mutex);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to submit restore CS for context %d (%d)\n",
+ ctx->asid, rc);
+ goto out;
+ }
+
+ /* Need to wait for restore completion before execution phase */
+ if (num_chunks > 0) {
+ ret = _hl_cs_wait_ioctl(hdev, ctx,
+ jiffies_to_usecs(hdev->timeout_jiffies),
+ cs_seq);
+ if (ret <= 0) {
+ dev_err(hdev->dev,
+ "Restore CS for context %d failed to complete %ld\n",
+ ctx->asid, ret);
+ rc = -ENOEXEC;
+ goto out;
+ }
+ }
+
+ ctx->thread_restore_wait_token = 1;
+ } else if (!ctx->thread_restore_wait_token) {
+ u32 tmp;
+
+ rc = hl_poll_timeout_memory(hdev,
+ (u64) (uintptr_t) &ctx->thread_restore_wait_token,
+ jiffies_to_usecs(hdev->timeout_jiffies),
+ &tmp);
+
+ if (rc || !tmp) {
+ dev_err(hdev->dev,
+ "restore phase hasn't finished in time\n");
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+ }
+
+ chunks = (void __user *)(uintptr_t)args->in.chunks_execute;
+ num_chunks = args->in.num_chunks_execute;
+
+ if (num_chunks == 0) {
+ dev_err(hdev->dev,
+ "Got execute CS with 0 chunks, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq);
+
+out:
+ if (rc != -EAGAIN) {
+ memset(args, 0, sizeof(*args));
+ args->out.status = rc;
+ args->out.seq = cs_seq;
+ }
+
+ if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
+ hl_device_reset(hdev, false, false);
+
+ return rc;
+}
+
+static long _hl_cs_wait_ioctl(struct hl_device *hdev,
+ struct hl_ctx *ctx, u64 timeout_us, u64 seq)
+{
+ struct dma_fence *fence;
+ unsigned long timeout;
+ long rc;
+
+ if (timeout_us == MAX_SCHEDULE_TIMEOUT)
+ timeout = timeout_us;
+ else
+ timeout = usecs_to_jiffies(timeout_us);
+
+ hl_ctx_get(hdev, ctx);
+
+ fence = hl_ctx_get_fence(ctx, seq);
+ if (IS_ERR(fence)) {
+ rc = PTR_ERR(fence);
+ } else if (fence) {
+ rc = dma_fence_wait_timeout(fence, true, timeout);
+ if (fence->error == -ETIMEDOUT)
+ rc = -ETIMEDOUT;
+ else if (fence->error == -EIO)
+ rc = -EIO;
+ dma_fence_put(fence);
+ } else
+ rc = 1;
+
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ union hl_wait_cs_args *args = data;
+ u64 seq = args->in.seq;
+ long rc;
+
+ rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
+
+ memset(args, 0, sizeof(*args));
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
+ rc, seq);
+ if (rc == -ERESTARTSYS) {
+ args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ rc = -EINTR;
+ } else if (rc == -ETIMEDOUT) {
+ args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
+ } else if (rc == -EIO) {
+ args->out.status = HL_WAIT_CS_STATUS_ABORTED;
+ }
+ return rc;
+ }
+
+ if (rc == 0)
+ args->out.status = HL_WAIT_CS_STATUS_BUSY;
+ else
+ args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+
+ return 0;
+}
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
new file mode 100644
index 000000000000..619ace1c4ef7
--- /dev/null
+++ b/drivers/misc/habanalabs/context.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+static void hl_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ int i;
+
+ /*
+ * If we arrived here, there are no jobs waiting for this context
+ * on its queues so we can safely remove it.
+ * This is because for each CS, we increment the ref count and for
+ * every CS that was finished we decrement it and we won't arrive
+ * to this function unless the ref count is 0
+ */
+
+ for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
+ dma_fence_put(ctx->cs_pending[i]);
+
+ if (ctx->asid != HL_KERNEL_ASID_ID) {
+ hl_vm_ctx_fini(ctx);
+ hl_asid_free(hdev, ctx->asid);
+ }
+}
+
+void hl_ctx_do_release(struct kref *ref)
+{
+ struct hl_ctx *ctx;
+
+ ctx = container_of(ref, struct hl_ctx, refcount);
+
+ hl_ctx_fini(ctx);
+
+ if (ctx->hpriv)
+ hl_hpriv_put(ctx->hpriv);
+
+ kfree(ctx);
+}
+
+int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
+{
+ struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
+ struct hl_ctx *ctx;
+ int rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
+ rc = hl_ctx_init(hdev, ctx, false);
+ if (rc)
+ goto free_ctx;
+
+ hl_hpriv_get(hpriv);
+ ctx->hpriv = hpriv;
+
+ /* TODO: remove for multiple contexts */
+ hpriv->ctx = ctx;
+ hdev->user_ctx = ctx;
+
+ mutex_lock(&mgr->ctx_lock);
+ rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ mutex_unlock(&mgr->ctx_lock);
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
+ hl_ctx_free(hdev, ctx);
+ goto out_err;
+ }
+
+ return 0;
+
+free_ctx:
+ kfree(ctx);
+out_err:
+ return rc;
+}
+
+void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
+ return;
+
+ dev_warn(hdev->dev,
+ "Context %d closed or terminated but its CS are executing\n",
+ ctx->asid);
+}
+
+int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
+{
+ int rc = 0;
+
+ ctx->hdev = hdev;
+
+ kref_init(&ctx->refcount);
+
+ ctx->cs_sequence = 1;
+ spin_lock_init(&ctx->cs_lock);
+ atomic_set(&ctx->thread_restore_token, 1);
+ ctx->thread_restore_wait_token = 0;
+
+ if (is_kernel_ctx) {
+ ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
+ } else {
+ ctx->asid = hl_asid_alloc(hdev);
+ if (!ctx->asid) {
+ dev_err(hdev->dev, "No free ASID, failed to create context\n");
+ return -ENOMEM;
+ }
+
+ rc = hl_vm_ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to init mem ctx module\n");
+ rc = -ENOMEM;
+ goto mem_ctx_err;
+ }
+ }
+
+ return 0;
+
+mem_ctx_err:
+ if (ctx->asid != HL_KERNEL_ASID_ID)
+ hl_asid_free(hdev, ctx->asid);
+
+ return rc;
+}
+
+void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ kref_get(&ctx->refcount);
+}
+
+int hl_ctx_put(struct hl_ctx *ctx)
+{
+ return kref_put(&ctx->refcount, hl_ctx_do_release);
+}
+
+struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct dma_fence *fence;
+
+ spin_lock(&ctx->cs_lock);
+
+ if (seq >= ctx->cs_sequence) {
+ dev_notice(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu\n",
+ seq, ctx->cs_sequence);
+ spin_unlock(&ctx->cs_lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ seq, ctx->cs_sequence);
+ spin_unlock(&ctx->cs_lock);
+ return NULL;
+ }
+
+ fence = dma_fence_get(
+ ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
+ spin_unlock(&ctx->cs_lock);
+
+ return fence;
+}
+
+/*
+ * hl_ctx_mgr_init - initialize the context manager
+ *
+ * @mgr: pointer to context manager structure
+ *
+ * This manager is an object inside the hpriv object of the user process.
+ * The function is called when a user process opens the FD.
+ */
+void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
+{
+ mutex_init(&mgr->ctx_lock);
+ idr_init(&mgr->ctx_handles);
+}
+
+/*
+ * hl_ctx_mgr_fini - finalize the context manager
+ *
+ * @hdev: pointer to device structure
+ * @mgr: pointer to context manager structure
+ *
+ * This function goes over all the contexts in the manager and frees them.
+ * It is called when a process closes the FD.
+ */
+void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
+{
+ struct hl_ctx *ctx;
+ struct idr *idp;
+ u32 id;
+
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id)
+ hl_ctx_free(hdev, ctx);
+
+ idr_destroy(&mgr->ctx_handles);
+ mutex_destroy(&mgr->ctx_lock);
+}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
new file mode 100644
index 000000000000..a53c12aff6ad
--- /dev/null
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -0,0 +1,1077 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#define MMU_ADDR_BUF_SIZE 40
+#define MMU_ASID_BUF_SIZE 10
+#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
+
+static struct dentry *hl_debug_root;
+
+static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
+ u8 i2c_reg, u32 *val)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -EBUSY;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_RD <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.i2c_bus = i2c_bus;
+ pkt.i2c_addr = i2c_addr;
+ pkt.i2c_reg = i2c_reg;
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, (long *) val);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
+
+ return rc;
+}
+
+static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
+ u8 i2c_reg, u32 val)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -EBUSY;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_WR <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.i2c_bus = i2c_bus;
+ pkt.i2c_addr = i2c_addr;
+ pkt.i2c_reg = i2c_reg;
+ pkt.value = __cpu_to_le64(val);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
+
+ return rc;
+}
+
+static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_LED_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.led_index = __cpu_to_le32(led);
+ pkt.value = __cpu_to_le64(state);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
+}
+
+static int command_buffers_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cb *cb;
+ bool first = true;
+
+ spin_lock(&dev_entry->cb_spinlock);
+
+ list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
+ seq_puts(s, "---------------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " %03d %d 0x%08x %d %d %d\n",
+ cb->id, cb->ctx_id, cb->size,
+ kref_read(&cb->refcount),
+ cb->mmap, cb->cs_cnt);
+ }
+
+ spin_unlock(&dev_entry->cb_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int command_submission_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cs *cs;
+ bool first = true;
+
+ spin_lock(&dev_entry->cs_spinlock);
+
+ list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
+ seq_puts(s, "------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " %llu %d %d %d %d\n",
+ cs->sequence, cs->ctx->asid,
+ kref_read(&cs->refcount),
+ cs->submitted, cs->completed);
+ }
+
+ spin_unlock(&dev_entry->cs_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int command_submission_jobs_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_cs_job *job;
+ bool first = true;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+
+ list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n");
+ seq_puts(s, "---------------------------------------\n");
+ }
+ if (job->cs)
+ seq_printf(s,
+ " %02d %llu %d %d\n",
+ job->id, job->cs->sequence, job->cs->ctx->asid,
+ job->hw_queue_id);
+ else
+ seq_printf(s,
+ " %02d 0 %d %d\n",
+ job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
+ }
+
+ spin_unlock(&dev_entry->cs_job_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int userptr_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_userptr *userptr;
+ char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
+ "DMA_FROM_DEVICE", "DMA_NONE"};
+ bool first = true;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+
+ list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " user virtual address size dma dir\n");
+ seq_puts(s, "----------------------------------------------------------\n");
+ }
+ seq_printf(s,
+ " 0x%-14llx %-10u %-30s\n",
+ userptr->addr, userptr->size, dma_dir[userptr->dir]);
+ }
+
+ spin_unlock(&dev_entry->userptr_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int vm_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_ctx *ctx;
+ struct hl_vm *vm;
+ struct hl_vm_hash_node *hnode;
+ struct hl_userptr *userptr;
+ struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ enum vm_type_t *vm_type;
+ bool once = true;
+ int i;
+
+ if (!dev_entry->hdev->mmu_enable)
+ return 0;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+
+ list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
+ once = false;
+ seq_puts(s, "\n\n----------------------------------------------------");
+ seq_puts(s, "\n----------------------------------------------------\n\n");
+ seq_printf(s, "ctx asid: %u\n", ctx->asid);
+
+ seq_puts(s, "\nmappings:\n\n");
+ seq_puts(s, " virtual address size handle\n");
+ seq_puts(s, "----------------------------------------------------\n");
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_for_each(ctx->mem_hash, i, hnode, node) {
+ vm_type = hnode->ptr;
+
+ if (*vm_type == VM_TYPE_USERPTR) {
+ userptr = hnode->ptr;
+ seq_printf(s,
+ " 0x%-14llx %-10u\n",
+ hnode->vaddr, userptr->size);
+ } else {
+ phys_pg_pack = hnode->ptr;
+ seq_printf(s,
+ " 0x%-14llx %-10u %-4u\n",
+ hnode->vaddr, phys_pg_pack->total_size,
+ phys_pg_pack->handle);
+ }
+ }
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ vm = &ctx->hdev->vm;
+ spin_lock(&vm->idr_lock);
+
+ if (!idr_is_empty(&vm->phys_pg_pack_handles))
+ seq_puts(s, "\n\nallocations:\n");
+
+ idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
+ if (phys_pg_pack->asid != ctx->asid)
+ continue;
+
+ seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
+ seq_printf(s, "page size: %u\n\n",
+ phys_pg_pack->page_size);
+ seq_puts(s, " physical address\n");
+ seq_puts(s, "---------------------\n");
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ seq_printf(s, " 0x%-14llx\n",
+ phys_pg_pack->pages[i]);
+ }
+ }
+ spin_unlock(&vm->idr_lock);
+
+ }
+
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+
+ if (!once)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+/* these inline functions are copied from mmu.c */
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP0_MASK) >> HOP0_SHIFT);
+}
+
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP1_MASK) >> HOP1_SHIFT);
+}
+
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP2_MASK) >> HOP2_SHIFT);
+}
+
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP3_MASK) >> HOP3_SHIFT);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & HOP4_MASK) >> HOP4_SHIFT);
+}
+
+static inline u64 get_next_hop_addr(u64 curr_pte)
+{
+ if (curr_pte & PAGE_PRESENT_MASK)
+ return curr_pte & PHYS_ADDR_MASK;
+ else
+ return ULLONG_MAX;
+}
+
+static int mmu_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->user_ctx;
+
+ u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
+ hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
+ hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
+ hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
+ hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
+ virt_addr = dev_entry->mmu_addr;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (!ctx) {
+ dev_err(hdev->dev, "no ctx available\n");
+ return 0;
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+
+ /* the following lookup is copied from unmap() in mmu.c */
+
+ hop0_addr = get_hop0_addr(ctx);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+ hop1_addr = get_next_hop_addr(hop0_pte);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+ hop2_addr = get_next_hop_addr(hop1_pte);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+ hop3_addr = get_next_hop_addr(hop2_pte);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+
+ if (!(hop3_pte & LAST_MASK)) {
+ hop4_addr = get_next_hop_addr(hop3_pte);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+ if (!(hop4_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ } else {
+ if (!(hop3_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ }
+
+ seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
+ dev_entry->mmu_asid, dev_entry->mmu_addr);
+
+ seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
+ seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
+ seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
+
+ seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
+ seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
+ seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
+
+ seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
+ seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
+ seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
+
+ seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
+ seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
+ seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
+
+ if (!(hop3_pte & LAST_MASK)) {
+ seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
+ seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
+ seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
+ }
+
+ goto out;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+out:
+ mutex_unlock(&ctx->mmu_lock);
+
+ return 0;
+}
+
+static ssize_t mmu_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE],
+ addr_kbuf[MMU_ADDR_BUF_SIZE];
+ char *c;
+ ssize_t rc;
+
+ if (!hdev->mmu_enable)
+ return count;
+
+ memset(kbuf, 0, sizeof(kbuf));
+ memset(asid_kbuf, 0, sizeof(asid_kbuf));
+ memset(addr_kbuf, 0, sizeof(addr_kbuf));
+
+ if (copy_from_user(kbuf, buf, count))
+ goto err;
+
+ kbuf[MMU_KBUF_SIZE - 1] = 0;
+
+ c = strchr(kbuf, ' ');
+ if (!c)
+ goto err;
+
+ memcpy(asid_kbuf, kbuf, c - kbuf);
+
+ rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid);
+ if (rc)
+ goto err;
+
+ c = strstr(kbuf, " 0x");
+ if (!c)
+ goto err;
+
+ c += 3;
+ memcpy(addr_kbuf, c, (kbuf + count) - c);
+
+ rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr);
+ if (rc)
+ goto err;
+
+ return count;
+
+err:
+ dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
+
+ return -EINVAL;
+}
+
+static ssize_t hl_data_read32(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u32 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read from 0x%010llx\n",
+ entry->addr);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%08x\n", val);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_data_write32(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
+ value, entry->addr);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t hl_get_power_state(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+ int i;
+
+ if (*ppos)
+ return 0;
+
+ if (hdev->pdev->current_state == PCI_D0)
+ i = 1;
+ else if (hdev->pdev->current_state == PCI_D3hot)
+ i = 2;
+ else
+ i = 3;
+
+ sprintf(tmp_buf,
+ "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ if (value == 1) {
+ pci_set_power_state(hdev->pdev, PCI_D0);
+ pci_restore_state(hdev->pdev);
+ rc = pci_enable_device(hdev->pdev);
+ } else if (value == 2) {
+ pci_save_state(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+ pci_set_power_state(hdev->pdev, PCI_D3hot);
+ } else {
+ dev_dbg(hdev->dev, "invalid power state value %u\n", value);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u32 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
+ entry->i2c_reg, &val);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to read from I2C bus %d, addr %d, reg %d\n",
+ entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%02x\n", val);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
+ entry->i2c_reg, value);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
+ value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ return rc;
+ }
+
+ return count;
+}
+
+static ssize_t hl_led0_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 0, value);
+
+ return count;
+}
+
+static ssize_t hl_led1_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 1, value);
+
+ return count;
+}
+
+static ssize_t hl_led2_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ value = value ? 1 : 0;
+
+ hl_debugfs_led_set(hdev, 2, value);
+
+ return count;
+}
+
+static ssize_t hl_device_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf,
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_device_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char data[30];
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ simple_write_to_buffer(data, 29, ppos, buf, count);
+
+ if (strncmp("disable", data, strlen("disable")) == 0) {
+ hdev->disabled = true;
+ } else if (strncmp("enable", data, strlen("enable")) == 0) {
+ hdev->disabled = false;
+ } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
+ hdev->asic_funcs->suspend(hdev);
+ } else if (strncmp("resume", data, strlen("resume")) == 0) {
+ hdev->asic_funcs->resume(hdev);
+ } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
+ hdev->device_cpu_disabled = true;
+ } else {
+ dev_err(hdev->dev,
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
+ count = -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations hl_data32b_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_data_read32,
+ .write = hl_data_write32
+};
+
+static const struct file_operations hl_i2c_data_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_i2c_data_read,
+ .write = hl_i2c_data_write
+};
+
+static const struct file_operations hl_power_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_get_power_state,
+ .write = hl_set_power_state
+};
+
+static const struct file_operations hl_led0_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led0_write
+};
+
+static const struct file_operations hl_led1_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led1_write
+};
+
+static const struct file_operations hl_led2_fops = {
+ .owner = THIS_MODULE,
+ .write = hl_led2_write
+};
+
+static const struct file_operations hl_device_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_device_read,
+ .write = hl_device_write
+};
+
+static const struct hl_info_list hl_debugfs_list[] = {
+ {"command_buffers", command_buffers_show, NULL},
+ {"command_submission", command_submission_show, NULL},
+ {"command_submission_jobs", command_submission_jobs_show, NULL},
+ {"userptr", userptr_show, NULL},
+ {"vm", vm_show, NULL},
+ {"mmu", mmu_show, mmu_write},
+};
+
+static int hl_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct hl_debugfs_entry *node = inode->i_private;
+
+ return single_open(file, node->info_ent->show, node);
+}
+
+static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct hl_debugfs_entry *node = file->f_inode->i_private;
+
+ if (node->info_ent->write)
+ return node->info_ent->write(file, buf, count, f_pos);
+ else
+ return -EINVAL;
+
+}
+
+static const struct file_operations hl_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = hl_debugfs_open,
+ .read = seq_read,
+ .write = hl_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hl_debugfs_add_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+ int count = ARRAY_SIZE(hl_debugfs_list);
+ struct hl_debugfs_entry *entry;
+ struct dentry *ent;
+ int i;
+
+ dev_entry->hdev = hdev;
+ dev_entry->entry_arr = kmalloc_array(count,
+ sizeof(struct hl_debugfs_entry),
+ GFP_KERNEL);
+ if (!dev_entry->entry_arr)
+ return;
+
+ INIT_LIST_HEAD(&dev_entry->file_list);
+ INIT_LIST_HEAD(&dev_entry->cb_list);
+ INIT_LIST_HEAD(&dev_entry->cs_list);
+ INIT_LIST_HEAD(&dev_entry->cs_job_list);
+ INIT_LIST_HEAD(&dev_entry->userptr_list);
+ INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
+ mutex_init(&dev_entry->file_mutex);
+ spin_lock_init(&dev_entry->cb_spinlock);
+ spin_lock_init(&dev_entry->cs_spinlock);
+ spin_lock_init(&dev_entry->cs_job_spinlock);
+ spin_lock_init(&dev_entry->userptr_spinlock);
+ spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
+
+ dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
+ hl_debug_root);
+
+ debugfs_create_x64("addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->addr);
+
+ debugfs_create_file("data32",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_data32b_fops);
+
+ debugfs_create_file("set_power_state",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_power_fops);
+
+ debugfs_create_u8("i2c_bus",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_bus);
+
+ debugfs_create_u8("i2c_addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_addr);
+
+ debugfs_create_u8("i2c_reg",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_reg);
+
+ debugfs_create_file("i2c_data",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_i2c_data_fops);
+
+ debugfs_create_file("led0",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led0_fops);
+
+ debugfs_create_file("led1",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led1_fops);
+
+ debugfs_create_file("led2",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led2_fops);
+
+ debugfs_create_file("device",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_device_fops);
+
+ for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+
+ ent = debugfs_create_file(hl_debugfs_list[i].name,
+ 0444,
+ dev_entry->root,
+ entry,
+ &hl_debugfs_fops);
+ entry->dent = ent;
+ entry->info_ent = &hl_debugfs_list[i];
+ entry->dev_entry = dev_entry;
+ }
+}
+
+void hl_debugfs_remove_device(struct hl_device *hdev)
+{
+ struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
+
+ debugfs_remove_recursive(entry->root);
+
+ mutex_destroy(&entry->file_mutex);
+ kfree(entry->entry_arr);
+}
+
+void hl_debugfs_add_file(struct hl_fpriv *hpriv)
+{
+ struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
+
+ mutex_lock(&dev_entry->file_mutex);
+ list_add(&hpriv->debugfs_list, &dev_entry->file_list);
+ mutex_unlock(&dev_entry->file_mutex);
+}
+
+void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
+{
+ struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
+
+ mutex_lock(&dev_entry->file_mutex);
+ list_del(&hpriv->debugfs_list);
+ mutex_unlock(&dev_entry->file_mutex);
+}
+
+void hl_debugfs_add_cb(struct hl_cb *cb)
+{
+ struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cb_spinlock);
+ list_add(&cb->debugfs_list, &dev_entry->cb_list);
+ spin_unlock(&dev_entry->cb_spinlock);
+}
+
+void hl_debugfs_remove_cb(struct hl_cb *cb)
+{
+ struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cb_spinlock);
+ list_del(&cb->debugfs_list);
+ spin_unlock(&dev_entry->cb_spinlock);
+}
+
+void hl_debugfs_add_cs(struct hl_cs *cs)
+{
+ struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_spinlock);
+ list_add(&cs->debugfs_list, &dev_entry->cs_list);
+ spin_unlock(&dev_entry->cs_spinlock);
+}
+
+void hl_debugfs_remove_cs(struct hl_cs *cs)
+{
+ struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_spinlock);
+ list_del(&cs->debugfs_list);
+ spin_unlock(&dev_entry->cs_spinlock);
+}
+
+void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+ list_add(&job->debugfs_list, &dev_entry->cs_job_list);
+ spin_unlock(&dev_entry->cs_job_spinlock);
+}
+
+void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->cs_job_spinlock);
+ list_del(&job->debugfs_list);
+ spin_unlock(&dev_entry->cs_job_spinlock);
+}
+
+void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+ list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
+ spin_unlock(&dev_entry->userptr_spinlock);
+}
+
+void hl_debugfs_remove_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+ list_del(&userptr->debugfs_list);
+ spin_unlock(&dev_entry->userptr_spinlock);
+}
+
+void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+}
+
+void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ spin_lock(&dev_entry->ctx_mem_hash_spinlock);
+ list_del(&ctx->debugfs_list);
+ spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+}
+
+void __init hl_debugfs_init(void)
+{
+ hl_debug_root = debugfs_create_dir("habanalabs", NULL);
+}
+
+void hl_debugfs_fini(void)
+{
+ debugfs_remove_recursive(hl_debug_root);
+}
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
new file mode 100644
index 000000000000..de46aa6ed154
--- /dev/null
+++ b/drivers/misc/habanalabs/device.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/hwmon.h>
+
+bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
+{
+ if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
+ return true;
+ else
+ return false;
+}
+
+static void hpriv_release(struct kref *ref)
+{
+ struct hl_fpriv *hpriv;
+ struct hl_device *hdev;
+
+ hpriv = container_of(ref, struct hl_fpriv, refcount);
+
+ hdev = hpriv->hdev;
+
+ put_pid(hpriv->taskpid);
+
+ hl_debugfs_remove_file(hpriv);
+
+ mutex_destroy(&hpriv->restore_phase_mutex);
+
+ kfree(hpriv);
+
+ /* Now the FD is really closed */
+ atomic_dec(&hdev->fd_open_cnt);
+
+ /* This allows a new user context to open the device */
+ hdev->user_ctx = NULL;
+}
+
+void hl_hpriv_get(struct hl_fpriv *hpriv)
+{
+ kref_get(&hpriv->refcount);
+}
+
+void hl_hpriv_put(struct hl_fpriv *hpriv)
+{
+ kref_put(&hpriv->refcount, hpriv_release);
+}
+
+/*
+ * hl_device_release - release function for habanalabs device
+ *
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ *
+ * Called when process closes an habanalabs device
+ */
+static int hl_device_release(struct inode *inode, struct file *filp)
+{
+ struct hl_fpriv *hpriv = filp->private_data;
+
+ hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
+ hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+
+ filp->private_data = NULL;
+
+ hl_hpriv_put(hpriv);
+
+ return 0;
+}
+
+/*
+ * hl_mmap - mmap function for habanalabs device
+ *
+ * @*filp: pointer to file structure
+ * @*vma: pointer to vm_area_struct of the process
+ *
+ * Called when process does an mmap on habanalabs device. Call the device's mmap
+ * function at the end of the common code.
+ */
+static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct hl_fpriv *hpriv = filp->private_data;
+
+ if ((vma->vm_pgoff & HL_MMAP_CB_MASK) == HL_MMAP_CB_MASK) {
+ vma->vm_pgoff ^= HL_MMAP_CB_MASK;
+ return hl_cb_mmap(hpriv, vma);
+ }
+
+ return -EINVAL;
+}
+
+static const struct file_operations hl_ops = {
+ .owner = THIS_MODULE,
+ .open = hl_device_open,
+ .release = hl_device_release,
+ .mmap = hl_mmap,
+ .unlocked_ioctl = hl_ioctl,
+ .compat_ioctl = hl_ioctl
+};
+
+/*
+ * device_setup_cdev - setup cdev and device for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @hclass: pointer to the class object of the device
+ * @minor: minor number of the specific device
+ * @fpos : file operations to install for this device
+ *
+ * Create a cdev and a Linux device for habanalabs's device. Need to be
+ * called at the end of the habanalabs device initialization process,
+ * because this function exposes the device to the user
+ */
+static int device_setup_cdev(struct hl_device *hdev, struct class *hclass,
+ int minor, const struct file_operations *fops)
+{
+ int err, devno = MKDEV(hdev->major, minor);
+ struct cdev *hdev_cdev = &hdev->cdev;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "hl%d", hdev->id);
+ if (!name)
+ return -ENOMEM;
+
+ cdev_init(hdev_cdev, fops);
+ hdev_cdev->owner = THIS_MODULE;
+ err = cdev_add(hdev_cdev, devno, 1);
+ if (err) {
+ pr_err("Failed to add char device %s\n", name);
+ goto err_cdev_add;
+ }
+
+ hdev->dev = device_create(hclass, NULL, devno, NULL, "%s", name);
+ if (IS_ERR(hdev->dev)) {
+ pr_err("Failed to create device %s\n", name);
+ err = PTR_ERR(hdev->dev);
+ goto err_device_create;
+ }
+
+ dev_set_drvdata(hdev->dev, hdev);
+
+ kfree(name);
+
+ return 0;
+
+err_device_create:
+ cdev_del(hdev_cdev);
+err_cdev_add:
+ kfree(name);
+ return err;
+}
+
+/*
+ * device_early_init - do some early initialization for the habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Install the relevant function pointers and call the early_init function,
+ * if such a function exists
+ */
+static int device_early_init(struct hl_device *hdev)
+{
+ int rc;
+
+ switch (hdev->asic_type) {
+ case ASIC_GOYA:
+ goya_set_asic_funcs(hdev);
+ strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
+ break;
+ default:
+ dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
+ hdev->asic_type);
+ return -EINVAL;
+ }
+
+ rc = hdev->asic_funcs->early_init(hdev);
+ if (rc)
+ return rc;
+
+ rc = hl_asid_init(hdev);
+ if (rc)
+ goto early_fini;
+
+ hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0);
+ if (hdev->cq_wq == NULL) {
+ dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
+ rc = -ENOMEM;
+ goto asid_fini;
+ }
+
+ hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
+ if (hdev->eq_wq == NULL) {
+ dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
+ rc = -ENOMEM;
+ goto free_cq_wq;
+ }
+
+ hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
+ GFP_KERNEL);
+ if (!hdev->hl_chip_info) {
+ rc = -ENOMEM;
+ goto free_eq_wq;
+ }
+
+ hl_cb_mgr_init(&hdev->kernel_cb_mgr);
+
+ mutex_init(&hdev->fd_open_cnt_lock);
+ mutex_init(&hdev->send_cpu_message_lock);
+ INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
+ spin_lock_init(&hdev->hw_queues_mirror_lock);
+ atomic_set(&hdev->in_reset, 0);
+ atomic_set(&hdev->fd_open_cnt, 0);
+
+ return 0;
+
+free_eq_wq:
+ destroy_workqueue(hdev->eq_wq);
+free_cq_wq:
+ destroy_workqueue(hdev->cq_wq);
+asid_fini:
+ hl_asid_fini(hdev);
+early_fini:
+ if (hdev->asic_funcs->early_fini)
+ hdev->asic_funcs->early_fini(hdev);
+
+ return rc;
+}
+
+/*
+ * device_early_fini - finalize all that was done in device_early_init
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ */
+static void device_early_fini(struct hl_device *hdev)
+{
+ mutex_destroy(&hdev->send_cpu_message_lock);
+
+ hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+
+ kfree(hdev->hl_chip_info);
+
+ destroy_workqueue(hdev->eq_wq);
+ destroy_workqueue(hdev->cq_wq);
+
+ hl_asid_fini(hdev);
+
+ if (hdev->asic_funcs->early_fini)
+ hdev->asic_funcs->early_fini(hdev);
+
+ mutex_destroy(&hdev->fd_open_cnt_lock);
+}
+
+static void set_freq_to_low_job(struct work_struct *work)
+{
+ struct hl_device *hdev = container_of(work, struct hl_device,
+ work_freq.work);
+
+ if (atomic_read(&hdev->fd_open_cnt) == 0)
+ hl_device_set_frequency(hdev, PLL_LOW);
+
+ schedule_delayed_work(&hdev->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+}
+
+static void hl_device_heartbeat(struct work_struct *work)
+{
+ struct hl_device *hdev = container_of(work, struct hl_device,
+ work_heartbeat.work);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ goto reschedule;
+
+ if (!hdev->asic_funcs->send_heartbeat(hdev))
+ goto reschedule;
+
+ dev_err(hdev->dev, "Device heartbeat failed!\n");
+ hl_device_reset(hdev, true, false);
+
+ return;
+
+reschedule:
+ schedule_delayed_work(&hdev->work_heartbeat,
+ usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
+}
+
+/*
+ * device_late_init - do late stuff initialization for the habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Do stuff that either needs the device H/W queues to be active or needs
+ * to happen after all the rest of the initialization is finished
+ */
+static int device_late_init(struct hl_device *hdev)
+{
+ int rc;
+
+ INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
+ hdev->high_pll = hdev->asic_prop.high_pll;
+
+ /* force setting to low frequency */
+ atomic_set(&hdev->curr_pll_profile, PLL_LOW);
+
+ if (hdev->pm_mng_profile == PM_AUTO)
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
+ else
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
+
+ if (hdev->asic_funcs->late_init) {
+ rc = hdev->asic_funcs->late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed late initialization for the H/W\n");
+ return rc;
+ }
+ }
+
+ schedule_delayed_work(&hdev->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+
+ if (hdev->heartbeat) {
+ INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
+ schedule_delayed_work(&hdev->work_heartbeat,
+ usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
+ }
+
+ hdev->late_init_done = true;
+
+ return 0;
+}
+
+/*
+ * device_late_fini - finalize all that was done in device_late_init
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ */
+static void device_late_fini(struct hl_device *hdev)
+{
+ if (!hdev->late_init_done)
+ return;
+
+ cancel_delayed_work_sync(&hdev->work_freq);
+ if (hdev->heartbeat)
+ cancel_delayed_work_sync(&hdev->work_heartbeat);
+
+ if (hdev->asic_funcs->late_fini)
+ hdev->asic_funcs->late_fini(hdev);
+
+ hdev->late_init_done = false;
+}
+
+/*
+ * hl_device_set_frequency - set the frequency of the device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @freq: the new frequency value
+ *
+ * Change the frequency if needed.
+ * We allose to set PLL to low only if there is no user process
+ * Returns 0 if no change was done, otherwise returns 1;
+ */
+int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ enum hl_pll_frequency old_freq =
+ (freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH;
+ int ret;
+
+ if (hdev->pm_mng_profile == PM_MANUAL)
+ return 0;
+
+ ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq);
+ if (ret == freq)
+ return 0;
+
+ /*
+ * in case we want to lower frequency, check if device is not
+ * opened. We must have a check here to workaround race condition with
+ * hl_device_open
+ */
+ if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) {
+ atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
+ return 0;
+ }
+
+ dev_dbg(hdev->dev, "Changing device frequency to %s\n",
+ freq == PLL_HIGH ? "high" : "low");
+
+ hdev->asic_funcs->set_pll_profile(hdev, freq);
+
+ return 1;
+}
+
+/*
+ * hl_device_suspend - initiate device suspend
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int hl_device_suspend(struct hl_device *hdev)
+{
+ int rc;
+
+ pci_save_state(hdev->pdev);
+
+ rc = hdev->asic_funcs->suspend(hdev);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to disable PCI access of device CPU\n");
+
+ /* Shut down the device */
+ pci_disable_device(hdev->pdev);
+ pci_set_power_state(hdev->pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/*
+ * hl_device_resume - initiate device resume
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
+int hl_device_resume(struct hl_device *hdev)
+{
+ int rc;
+
+ pci_set_power_state(hdev->pdev, PCI_D0);
+ pci_restore_state(hdev->pdev);
+ rc = pci_enable_device(hdev->pdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to enable PCI device in resume\n");
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->resume(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to enable PCI access from device CPU\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void hl_device_hard_reset_pending(struct work_struct *work)
+{
+ struct hl_device_reset_work *device_reset_work =
+ container_of(work, struct hl_device_reset_work, reset_work);
+ struct hl_device *hdev = device_reset_work->hdev;
+ u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+ struct task_struct *task = NULL;
+
+ /* Flush all processes that are inside hl_open */
+ mutex_lock(&hdev->fd_open_cnt_lock);
+
+ while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+ pending_cnt--;
+
+ dev_info(hdev->dev,
+ "Can't HARD reset, waiting for user to close FD\n");
+ ssleep(1);
+ }
+
+ if (atomic_read(&hdev->fd_open_cnt)) {
+ task = get_pid_task(hdev->user_ctx->hpriv->taskpid,
+ PIDTYPE_PID);
+ if (task) {
+ dev_info(hdev->dev, "Killing user processes\n");
+ send_sig(SIGKILL, task, 1);
+ msleep(100);
+
+ put_task_struct(task);
+ }
+ }
+
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+
+ hl_device_reset(hdev, true, true);
+
+ kfree(device_reset_work);
+}
+
+/*
+ * hl_device_reset - reset the device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @hard_reset: should we do hard reset to all engines or just reset the
+ * compute/dma engines
+ *
+ * Block future CS and wait for pending CS to be enqueued
+ * Call ASIC H/W fini
+ * Flush all completions
+ * Re-initialize all internal data structures
+ * Call ASIC H/W init, late_init
+ * Test queues
+ * Enable device
+ *
+ * Returns 0 for success or an error on failure.
+ */
+int hl_device_reset(struct hl_device *hdev, bool hard_reset,
+ bool from_hard_reset_thread)
+{
+ int i, rc;
+
+ if (!hdev->init_done) {
+ dev_err(hdev->dev,
+ "Can't reset before initialization is done\n");
+ return 0;
+ }
+
+ /*
+ * Prevent concurrency in this function - only one reset should be
+ * done at any given time. Only need to perform this if we didn't
+ * get from the dedicated hard reset thread
+ */
+ if (!from_hard_reset_thread) {
+ /* Block future CS/VM/JOB completion operations */
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ if (rc)
+ return 0;
+
+ /* This also blocks future CS/VM/JOB completion operations */
+ hdev->disabled = true;
+
+ /*
+ * Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ dev_err(hdev->dev, "Going to RESET device!\n");
+ }
+
+again:
+ if ((hard_reset) && (!from_hard_reset_thread)) {
+ struct hl_device_reset_work *device_reset_work;
+
+ if (!hdev->pdev) {
+ dev_err(hdev->dev,
+ "Reset action is NOT supported in simulator\n");
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ hdev->hard_reset_pending = true;
+
+ device_reset_work = kzalloc(sizeof(*device_reset_work),
+ GFP_ATOMIC);
+ if (!device_reset_work) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
+ /*
+ * Because the reset function can't run from interrupt or
+ * from heartbeat work, we need to call the reset function
+ * from a dedicated work
+ */
+ INIT_WORK(&device_reset_work->reset_work,
+ hl_device_hard_reset_pending);
+ device_reset_work->hdev = hdev;
+ schedule_work(&device_reset_work->reset_work);
+
+ return 0;
+ }
+
+ if (hard_reset) {
+ device_late_fini(hdev);
+
+ /*
+ * Now that the heartbeat thread is closed, flush processes
+ * which are sending messages to CPU
+ */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+ }
+
+ /*
+ * Halt the engines and disable interrupts so we won't get any more
+ * completions from H/W and we won't have any accesses from the
+ * H/W to the host machine
+ */
+ hdev->asic_funcs->halt_engines(hdev, hard_reset);
+
+ /* Go over all the queues, release all CS and their jobs */
+ hl_cs_rollback_all(hdev);
+
+ if (hard_reset) {
+ /* Release kernel context */
+ if (hl_ctx_put(hdev->kernel_ctx) != 1) {
+ dev_err(hdev->dev,
+ "kernel ctx is alive during hard reset\n");
+ rc = -EBUSY;
+ goto out_err;
+ }
+
+ hdev->kernel_ctx = NULL;
+ }
+
+ /* Reset the H/W. It will be in idle state after this returns */
+ hdev->asic_funcs->hw_fini(hdev, hard_reset);
+
+ if (hard_reset) {
+ hl_vm_fini(hdev);
+ hl_eq_reset(hdev, &hdev->event_queue);
+ }
+
+ /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
+ hl_hw_queue_reset(hdev, hard_reset);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ hl_cq_reset(hdev, &hdev->completion_queue[i]);
+
+ /* Make sure the setup phase for the user context will run again */
+ if (hdev->user_ctx) {
+ atomic_set(&hdev->user_ctx->thread_restore_token, 1);
+ hdev->user_ctx->thread_restore_wait_token = 0;
+ }
+
+ /* Finished tear-down, starting to re-initialize */
+
+ if (hard_reset) {
+ hdev->device_cpu_disabled = false;
+
+ /* Allocate the kernel context */
+ hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
+ GFP_KERNEL);
+ if (!hdev->kernel_ctx) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
+ hdev->user_ctx = NULL;
+
+ rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to init kernel ctx in hard reset\n");
+ kfree(hdev->kernel_ctx);
+ hdev->kernel_ctx = NULL;
+ goto out_err;
+ }
+ }
+
+ rc = hdev->asic_funcs->hw_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to initialize the H/W after reset\n");
+ goto out_err;
+ }
+
+ hdev->disabled = false;
+
+ /* Check that the communication with the device is working */
+ rc = hdev->asic_funcs->test_queues(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to detect if device is alive after reset\n");
+ goto out_err;
+ }
+
+ if (hard_reset) {
+ rc = device_late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed late init after hard reset\n");
+ goto out_err;
+ }
+
+ rc = hl_vm_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to init memory module after hard reset\n");
+ goto out_err;
+ }
+
+ hl_set_max_power(hdev, hdev->max_power);
+
+ hdev->hard_reset_pending = false;
+ } else {
+ rc = hdev->asic_funcs->soft_reset_late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed late init after soft reset\n");
+ goto out_err;
+ }
+ }
+
+ atomic_set(&hdev->in_reset, 0);
+
+ if (hard_reset)
+ hdev->hard_reset_cnt++;
+ else
+ hdev->soft_reset_cnt++;
+
+ return 0;
+
+out_err:
+ hdev->disabled = true;
+
+ if (hard_reset) {
+ dev_err(hdev->dev,
+ "Failed to reset! Device is NOT usable\n");
+ hdev->hard_reset_cnt++;
+ } else {
+ dev_err(hdev->dev,
+ "Failed to do soft-reset, trying hard reset\n");
+ hdev->soft_reset_cnt++;
+ hard_reset = true;
+ goto again;
+ }
+
+ atomic_set(&hdev->in_reset, 0);
+
+ return rc;
+}
+
+/*
+ * hl_device_init - main initialization function for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Allocate an id for the device, do early initialization and then call the
+ * ASIC specific initialization functions. Finally, create the cdev and the
+ * Linux device to expose it to the user
+ */
+int hl_device_init(struct hl_device *hdev, struct class *hclass)
+{
+ int i, rc, cq_ready_cnt;
+
+ /* Create device */
+ rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops);
+
+ if (rc)
+ goto out_disabled;
+
+ /* Initialize ASIC function pointers and perform early init */
+ rc = device_early_init(hdev);
+ if (rc)
+ goto release_device;
+
+ /*
+ * Start calling ASIC initialization. First S/W then H/W and finally
+ * late init
+ */
+ rc = hdev->asic_funcs->sw_init(hdev);
+ if (rc)
+ goto early_fini;
+
+ /*
+ * Initialize the H/W queues. Must be done before hw_init, because
+ * there the addresses of the kernel queue are being written to the
+ * registers of the device
+ */
+ rc = hl_hw_queues_create(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize kernel queues\n");
+ goto sw_fini;
+ }
+
+ /*
+ * Initialize the completion queues. Must be done before hw_init,
+ * because there the addresses of the completion queues are being
+ * passed as arguments to request_irq
+ */
+ hdev->completion_queue =
+ kcalloc(hdev->asic_prop.completion_queues_count,
+ sizeof(*hdev->completion_queue), GFP_KERNEL);
+
+ if (!hdev->completion_queue) {
+ dev_err(hdev->dev, "failed to allocate completion queues\n");
+ rc = -ENOMEM;
+ goto hw_queues_destroy;
+ }
+
+ for (i = 0, cq_ready_cnt = 0;
+ i < hdev->asic_prop.completion_queues_count;
+ i++, cq_ready_cnt++) {
+ rc = hl_cq_init(hdev, &hdev->completion_queue[i], i);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to initialize completion queue\n");
+ goto cq_fini;
+ }
+ }
+
+ /*
+ * Initialize the event queue. Must be done before hw_init,
+ * because there the address of the event queue is being
+ * passed as argument to request_irq
+ */
+ rc = hl_eq_init(hdev, &hdev->event_queue);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize event queue\n");
+ goto cq_fini;
+ }
+
+ /* Allocate the kernel context */
+ hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
+ if (!hdev->kernel_ctx) {
+ rc = -ENOMEM;
+ goto eq_fini;
+ }
+
+ hdev->user_ctx = NULL;
+
+ rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize kernel context\n");
+ goto free_ctx;
+ }
+
+ rc = hl_cb_pool_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CB pool\n");
+ goto release_ctx;
+ }
+
+ rc = hl_sysfs_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize sysfs\n");
+ goto free_cb_pool;
+ }
+
+ hl_debugfs_add_device(hdev);
+
+ if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
+ rc = hdev->asic_funcs->hw_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize the H/W\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ hdev->disabled = false;
+
+ /* Check that the communication with the device is working */
+ rc = hdev->asic_funcs->test_queues(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to detect if device is alive\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ /* After test_queues, KMD can start sending messages to device CPU */
+
+ rc = device_late_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed late initialization\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
+ hdev->asic_name,
+ hdev->asic_prop.dram_size / 1024 / 1024 / 1024);
+
+ rc = hl_vm_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize memory module\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ /*
+ * hl_hwmon_init must be called after device_late_init, because only
+ * there we get the information from the device about which
+ * hwmon-related sensors the device supports
+ */
+ rc = hl_hwmon_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize hwmon\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ dev_notice(hdev->dev,
+ "Successfully added device to habanalabs driver\n");
+
+ hdev->init_done = true;
+
+ return 0;
+
+free_cb_pool:
+ hl_cb_pool_fini(hdev);
+release_ctx:
+ if (hl_ctx_put(hdev->kernel_ctx) != 1)
+ dev_err(hdev->dev,
+ "kernel ctx is still alive on initialization failure\n");
+free_ctx:
+ kfree(hdev->kernel_ctx);
+eq_fini:
+ hl_eq_fini(hdev, &hdev->event_queue);
+cq_fini:
+ for (i = 0 ; i < cq_ready_cnt ; i++)
+ hl_cq_fini(hdev, &hdev->completion_queue[i]);
+ kfree(hdev->completion_queue);
+hw_queues_destroy:
+ hl_hw_queues_destroy(hdev);
+sw_fini:
+ hdev->asic_funcs->sw_fini(hdev);
+early_fini:
+ device_early_fini(hdev);
+release_device:
+ device_destroy(hclass, hdev->dev->devt);
+ cdev_del(&hdev->cdev);
+out_disabled:
+ hdev->disabled = true;
+ if (hdev->pdev)
+ dev_err(&hdev->pdev->dev,
+ "Failed to initialize hl%d. Device is NOT usable !\n",
+ hdev->id);
+ else
+ pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
+ hdev->id);
+
+ return rc;
+}
+
+/*
+ * hl_device_fini - main tear-down function for habanalabs device
+ *
+ * @hdev: pointer to habanalabs device structure
+ *
+ * Destroy the device, call ASIC fini functions and release the id
+ */
+void hl_device_fini(struct hl_device *hdev)
+{
+ int i, rc;
+ ktime_t timeout;
+
+ dev_info(hdev->dev, "Removing device\n");
+
+ /*
+ * This function is competing with the reset function, so try to
+ * take the reset atomic and if we are already in middle of reset,
+ * wait until reset function is finished. Reset function is designed
+ * to always finish (could take up to a few seconds in worst case).
+ */
+
+ timeout = ktime_add_us(ktime_get(),
+ HL_PENDING_RESET_PER_SEC * 1000 * 1000 * 4);
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ while (rc) {
+ usleep_range(50, 200);
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ WARN(1, "Failed to remove device because reset function did not finish\n");
+ return;
+ }
+ };
+
+ /* Mark device as disabled */
+ hdev->disabled = true;
+
+ hl_hwmon_fini(hdev);
+
+ device_late_fini(hdev);
+
+ hl_debugfs_remove_device(hdev);
+
+ hl_sysfs_fini(hdev);
+
+ /*
+ * Halt the engines and disable interrupts so we won't get any more
+ * completions from H/W and we won't have any accesses from the
+ * H/W to the host machine
+ */
+ hdev->asic_funcs->halt_engines(hdev, true);
+
+ /* Go over all the queues, release all CS and their jobs */
+ hl_cs_rollback_all(hdev);
+
+ hl_cb_pool_fini(hdev);
+
+ /* Release kernel context */
+ if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
+ dev_err(hdev->dev, "kernel ctx is still alive\n");
+
+ /* Reset the H/W. It will be in idle state after this returns */
+ hdev->asic_funcs->hw_fini(hdev, true);
+
+ hl_vm_fini(hdev);
+
+ hl_eq_fini(hdev, &hdev->event_queue);
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ hl_cq_fini(hdev, &hdev->completion_queue[i]);
+ kfree(hdev->completion_queue);
+
+ hl_hw_queues_destroy(hdev);
+
+ /* Call ASIC S/W finalize function */
+ hdev->asic_funcs->sw_fini(hdev);
+
+ device_early_fini(hdev);
+
+ /* Hide device from user */
+ device_destroy(hdev->dev->class, hdev->dev->devt);
+ cdev_del(&hdev->cdev);
+
+ pr_info("removed device successfully\n");
+}
+
+/*
+ * hl_poll_timeout_memory - Periodically poll a host memory address
+ * until it is not zero or a timeout occurs
+ * @hdev: pointer to habanalabs device structure
+ * @addr: Address to poll
+ * @timeout_us: timeout in us
+ * @val: Variable to read the value into
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * The function sleeps for 100us with timeout value of
+ * timeout_us
+ */
+int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr,
+ u32 timeout_us, u32 *val)
+{
+ /*
+ * address in this function points always to a memory location in the
+ * host's (server's) memory. That location is updated asynchronously
+ * either by the direct access of the device or by another core
+ */
+ u32 *paddr = (u32 *) (uintptr_t) addr;
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ might_sleep();
+
+ for (;;) {
+ /*
+ * Flush CPU read/write buffers to make sure we read updates
+ * done by other cores or by the device
+ */
+ mb();
+ *val = *paddr;
+ if (*val)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ *val = *paddr;
+ break;
+ }
+ usleep_range((100 >> 2) + 1, 100);
+ }
+
+ return *val ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * hl_poll_timeout_devicememory - Periodically poll a device memory address
+ * until it is not zero or a timeout occurs
+ * @hdev: pointer to habanalabs device structure
+ * @addr: Device address to poll
+ * @timeout_us: timeout in us
+ * @val: Variable to read the value into
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * The function sleeps for 100us with timeout value of
+ * timeout_us
+ */
+int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
+ u32 timeout_us, u32 *val)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ might_sleep();
+
+ for (;;) {
+ *val = readl(addr);
+ if (*val)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ *val = readl(addr);
+ break;
+ }
+ usleep_range((100 >> 2) + 1, 100);
+ }
+
+ return *val ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * MMIO register access helper functions.
+ */
+
+/*
+ * hl_rreg - Read an MMIO register
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @reg: MMIO register offset (in bytes)
+ *
+ * Returns the value of the MMIO register we are asked to read
+ *
+ */
+inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
+{
+ return readl(hdev->rmmio + reg);
+}
+
+/*
+ * hl_wreg - Write to an MMIO register
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @reg: MMIO register offset (in bytes)
+ * @val: 32-bit value
+ *
+ * Writes the 32-bit value into the MMIO register
+ *
+ */
+inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
+{
+ writel(val, hdev->rmmio + reg);
+}
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
new file mode 100644
index 000000000000..e458e5ba500b
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -0,0 +1,3 @@
+subdir-ccflags-y += -I$(src)
+
+HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
new file mode 100644
index 000000000000..238dd57c541b
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -0,0 +1,5391 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+#include "include/hw_ip/mmu/mmu_v1_0.h"
+#include "include/goya/asic_reg/goya_masks.h"
+
+#include <linux/pci.h>
+#include <linux/genalloc.h>
+#include <linux/firmware.h>
+#include <linux/hwmon.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+
+/*
+ * GOYA security scheme:
+ *
+ * 1. Host is protected by:
+ * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
+ * - MMU
+ *
+ * 2. DRAM is protected by:
+ * - Range registers (protect the first 512MB)
+ * - MMU (isolation between users)
+ *
+ * 3. Configuration is protected by:
+ * - Range registers
+ * - Protection bits
+ *
+ * When MMU is disabled:
+ *
+ * QMAN DMA: PQ, CQ, CP, DMA are secured.
+ * PQ, CB and the data are on the host.
+ *
+ * QMAN TPC/MME:
+ * PQ, CQ and CP are not secured.
+ * PQ, CB and the data are on the SRAM/DRAM.
+ *
+ * Since QMAN DMA is secured, KMD is parsing the DMA CB:
+ * - KMD checks DMA pointer
+ * - WREG, MSG_PROT are not allowed.
+ * - MSG_LONG/SHORT are allowed.
+ *
+ * A read/write transaction by the QMAN to a protected area will succeed if
+ * and only if the QMAN's CP is secured and MSG_PROT is used
+ *
+ *
+ * When MMU is enabled:
+ *
+ * QMAN DMA: PQ, CQ and CP are secured.
+ * MMU is set to bypass on the Secure props register of the QMAN.
+ * The reasons we don't enable MMU for PQ, CQ and CP are:
+ * - PQ entry is in kernel address space and KMD doesn't map it.
+ * - CP writes to MSIX register and to kernel address space (completion
+ * queue).
+ *
+ * DMA is not secured but because CP is secured, KMD still needs to parse the
+ * CB, but doesn't need to check the DMA addresses.
+ *
+ * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
+ * doesn't map memory in MMU.
+ *
+ * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
+ *
+ * DMA RR does NOT protect host because DMA is not secured
+ *
+ */
+
+#define GOYA_MMU_REGS_NUM 61
+
+#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
+
+#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
+#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
+#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
+#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
+#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
+#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
+#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
+#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
+#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+
+#define GOYA_QMAN0_FENCE_VAL 0xD169B243
+
+#define GOYA_MAX_INITIATORS 20
+
+#define GOYA_MAX_STRING_LEN 20
+
+#define GOYA_CB_POOL_CB_CNT 512
+#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
+
+static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
+ "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
+ "goya cq 4", "goya cpu eq"
+};
+
+static u16 goya_packet_sizes[MAX_PACKET_ID] = {
+ [PACKET_WREG_32] = sizeof(struct packet_wreg32),
+ [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
+ [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
+ [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
+ [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
+ [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
+ [PACKET_FENCE] = sizeof(struct packet_fence),
+ [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
+ [PACKET_NOP] = sizeof(struct packet_nop),
+ [PACKET_STOP] = sizeof(struct packet_stop)
+};
+
+static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
+ mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
+ mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
+ mmTPC0_QM_GLBL_SECURE_PROPS,
+ mmTPC0_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC0_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC0_CFG_ARUSER,
+ mmTPC0_CFG_AWUSER,
+ mmTPC1_QM_GLBL_SECURE_PROPS,
+ mmTPC1_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC1_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC1_CFG_ARUSER,
+ mmTPC1_CFG_AWUSER,
+ mmTPC2_QM_GLBL_SECURE_PROPS,
+ mmTPC2_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC2_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC2_CFG_ARUSER,
+ mmTPC2_CFG_AWUSER,
+ mmTPC3_QM_GLBL_SECURE_PROPS,
+ mmTPC3_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC3_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC3_CFG_ARUSER,
+ mmTPC3_CFG_AWUSER,
+ mmTPC4_QM_GLBL_SECURE_PROPS,
+ mmTPC4_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC4_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC4_CFG_ARUSER,
+ mmTPC4_CFG_AWUSER,
+ mmTPC5_QM_GLBL_SECURE_PROPS,
+ mmTPC5_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC5_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC5_CFG_ARUSER,
+ mmTPC5_CFG_AWUSER,
+ mmTPC6_QM_GLBL_SECURE_PROPS,
+ mmTPC6_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC6_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC6_CFG_ARUSER,
+ mmTPC6_CFG_AWUSER,
+ mmTPC7_QM_GLBL_SECURE_PROPS,
+ mmTPC7_QM_GLBL_NON_SECURE_PROPS,
+ mmTPC7_CMDQ_GLBL_SECURE_PROPS,
+ mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmTPC7_CFG_ARUSER,
+ mmTPC7_CFG_AWUSER,
+ mmMME_QM_GLBL_SECURE_PROPS,
+ mmMME_QM_GLBL_NON_SECURE_PROPS,
+ mmMME_CMDQ_GLBL_SECURE_PROPS,
+ mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
+ mmMME_SBA_CONTROL_DATA,
+ mmMME_SBB_CONTROL_DATA,
+ mmMME_SBC_CONTROL_DATA,
+ mmMME_WBC_CONTROL_DATA
+};
+
+#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
+
+static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
+ GOYA_ASYNC_EVENT_ID_PCIE_IF,
+ GOYA_ASYNC_EVENT_ID_TPC0_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC1_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC2_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC3_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC4_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC5_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC6_ECC,
+ GOYA_ASYNC_EVENT_ID_TPC7_ECC,
+ GOYA_ASYNC_EVENT_ID_MME_ECC,
+ GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
+ GOYA_ASYNC_EVENT_ID_MMU_ECC,
+ GOYA_ASYNC_EVENT_ID_DMA_MACRO,
+ GOYA_ASYNC_EVENT_ID_DMA_ECC,
+ GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
+ GOYA_ASYNC_EVENT_ID_PSOC_MEM,
+ GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
+ GOYA_ASYNC_EVENT_ID_SRAM0,
+ GOYA_ASYNC_EVENT_ID_SRAM1,
+ GOYA_ASYNC_EVENT_ID_SRAM2,
+ GOYA_ASYNC_EVENT_ID_SRAM3,
+ GOYA_ASYNC_EVENT_ID_SRAM4,
+ GOYA_ASYNC_EVENT_ID_SRAM5,
+ GOYA_ASYNC_EVENT_ID_SRAM6,
+ GOYA_ASYNC_EVENT_ID_SRAM7,
+ GOYA_ASYNC_EVENT_ID_SRAM8,
+ GOYA_ASYNC_EVENT_ID_SRAM9,
+ GOYA_ASYNC_EVENT_ID_SRAM10,
+ GOYA_ASYNC_EVENT_ID_SRAM11,
+ GOYA_ASYNC_EVENT_ID_SRAM12,
+ GOYA_ASYNC_EVENT_ID_SRAM13,
+ GOYA_ASYNC_EVENT_ID_SRAM14,
+ GOYA_ASYNC_EVENT_ID_SRAM15,
+ GOYA_ASYNC_EVENT_ID_SRAM16,
+ GOYA_ASYNC_EVENT_ID_SRAM17,
+ GOYA_ASYNC_EVENT_ID_SRAM18,
+ GOYA_ASYNC_EVENT_ID_SRAM19,
+ GOYA_ASYNC_EVENT_ID_SRAM20,
+ GOYA_ASYNC_EVENT_ID_SRAM21,
+ GOYA_ASYNC_EVENT_ID_SRAM22,
+ GOYA_ASYNC_EVENT_ID_SRAM23,
+ GOYA_ASYNC_EVENT_ID_SRAM24,
+ GOYA_ASYNC_EVENT_ID_SRAM25,
+ GOYA_ASYNC_EVENT_ID_SRAM26,
+ GOYA_ASYNC_EVENT_ID_SRAM27,
+ GOYA_ASYNC_EVENT_ID_SRAM28,
+ GOYA_ASYNC_EVENT_ID_SRAM29,
+ GOYA_ASYNC_EVENT_ID_GIC500,
+ GOYA_ASYNC_EVENT_ID_PLL0,
+ GOYA_ASYNC_EVENT_ID_PLL1,
+ GOYA_ASYNC_EVENT_ID_PLL3,
+ GOYA_ASYNC_EVENT_ID_PLL4,
+ GOYA_ASYNC_EVENT_ID_PLL5,
+ GOYA_ASYNC_EVENT_ID_PLL6,
+ GOYA_ASYNC_EVENT_ID_AXI_ECC,
+ GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
+ GOYA_ASYNC_EVENT_ID_PCIE_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC0_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC1_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC2_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC3_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC4_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC5_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC6_DEC,
+ GOYA_ASYNC_EVENT_ID_TPC7_DEC,
+ GOYA_ASYNC_EVENT_ID_MME_WACS,
+ GOYA_ASYNC_EVENT_ID_MME_WACSD,
+ GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
+ GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
+ GOYA_ASYNC_EVENT_ID_PSOC,
+ GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
+ GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
+ GOYA_ASYNC_EVENT_ID_TPC0_QM,
+ GOYA_ASYNC_EVENT_ID_TPC1_QM,
+ GOYA_ASYNC_EVENT_ID_TPC2_QM,
+ GOYA_ASYNC_EVENT_ID_TPC3_QM,
+ GOYA_ASYNC_EVENT_ID_TPC4_QM,
+ GOYA_ASYNC_EVENT_ID_TPC5_QM,
+ GOYA_ASYNC_EVENT_ID_TPC6_QM,
+ GOYA_ASYNC_EVENT_ID_TPC7_QM,
+ GOYA_ASYNC_EVENT_ID_MME_QM,
+ GOYA_ASYNC_EVENT_ID_MME_CMDQ,
+ GOYA_ASYNC_EVENT_ID_DMA0_QM,
+ GOYA_ASYNC_EVENT_ID_DMA1_QM,
+ GOYA_ASYNC_EVENT_ID_DMA2_QM,
+ GOYA_ASYNC_EVENT_ID_DMA3_QM,
+ GOYA_ASYNC_EVENT_ID_DMA4_QM,
+ GOYA_ASYNC_EVENT_ID_DMA0_CH,
+ GOYA_ASYNC_EVENT_ID_DMA1_CH,
+ GOYA_ASYNC_EVENT_ID_DMA2_CH,
+ GOYA_ASYNC_EVENT_ID_DMA3_CH,
+ GOYA_ASYNC_EVENT_ID_DMA4_CH,
+ GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
+};
+
+static int goya_armcp_info_get(struct hl_device *hdev);
+static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
+static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
+static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
+static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+ u64 phys_addr);
+
+static void goya_get_fixed_properties(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int i;
+
+ for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
+ prop->hw_queues_props[i].kmd_only = 0;
+ }
+
+ for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
+ prop->hw_queues_props[i].kmd_only = 1;
+ }
+
+ for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
+ NUMBER_OF_INT_HW_QUEUES; i++) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
+ prop->hw_queues_props[i].kmd_only = 0;
+ }
+
+ for (; i < HL_MAX_QUEUES; i++)
+ prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
+
+ prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
+
+ prop->dram_base_address = DRAM_PHYS_BASE;
+ prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
+ prop->dram_end_address = prop->dram_base_address + prop->dram_size;
+ prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
+
+ prop->sram_base_address = SRAM_BASE_ADDR;
+ prop->sram_size = SRAM_SIZE;
+ prop->sram_end_address = prop->sram_base_address + prop->sram_size;
+ prop->sram_user_base_address = prop->sram_base_address +
+ SRAM_USER_BASE_OFFSET;
+
+ prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
+ prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
+ if (hdev->pldm)
+ prop->mmu_pgt_size = 0x800000; /* 8MB */
+ else
+ prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
+ prop->mmu_pte_size = HL_PTE_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE;
+ prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->dram_page_size = PAGE_SIZE_2MB;
+
+ prop->host_phys_base_address = HOST_PHYS_BASE;
+ prop->va_space_host_start_address = VA_HOST_SPACE_START;
+ prop->va_space_host_end_address = VA_HOST_SPACE_END;
+ prop->va_space_dram_start_address = VA_DDR_SPACE_START;
+ prop->va_space_dram_end_address = VA_DDR_SPACE_END;
+ prop->dram_size_for_default_page_mapping =
+ prop->va_space_dram_end_address;
+ prop->cfg_size = CFG_SIZE;
+ prop->max_asid = MAX_ASID;
+ prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
+ prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
+ prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
+ prop->max_power_default = MAX_POWER_DEFAULT;
+ prop->tpc_enabled_mask = TPC_ENABLED_MASK;
+
+ prop->high_pll = PLL_HIGH_DEFAULT;
+}
+
+int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
+{
+ struct armcp_packet pkt;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
+ sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
+}
+
+/*
+ * goya_pci_bars_map - Map PCI BARS of Goya device
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Request PCI regions and map them to kernel virtual addresses.
+ * Returns 0 on success
+ *
+ */
+static int goya_pci_bars_map(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int rc;
+
+ rc = pci_request_regions(pdev, HL_NAME);
+ if (rc) {
+ dev_err(hdev->dev, "Cannot obtain PCI resources\n");
+ return rc;
+ }
+
+ hdev->pcie_bar[SRAM_CFG_BAR_ID] =
+ pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
+ if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
+ dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
+ rc = -ENODEV;
+ goto err_release_regions;
+ }
+
+ hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
+ if (!hdev->pcie_bar[MSIX_BAR_ID]) {
+ dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
+ rc = -ENODEV;
+ goto err_unmap_sram_cfg;
+ }
+
+ hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
+ if (!hdev->pcie_bar[DDR_BAR_ID]) {
+ dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
+ rc = -ENODEV;
+ goto err_unmap_msix;
+ }
+
+ hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (CFG_BASE - SRAM_BASE_ADDR);
+
+ return 0;
+
+err_unmap_msix:
+ iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
+err_unmap_sram_cfg:
+ iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
+err_release_regions:
+ pci_release_regions(pdev);
+
+ return rc;
+}
+
+/*
+ * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Release all PCI BARS and unmap their virtual addresses
+ *
+ */
+static void goya_pci_bars_unmap(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ iounmap(hdev->pcie_bar[DDR_BAR_ID]);
+ iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
+ iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
+ pci_release_regions(pdev);
+}
+
+/*
+ * goya_elbi_write - Write through the ELBI interface
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * return 0 on success, -1 on failure
+ *
+ */
+static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ ktime_t timeout;
+ u32 val;
+
+ /* Clear previous status */
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
+
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+ pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
+ PCI_CONFIG_ELBI_CTRL_WRITE);
+
+ timeout = ktime_add_ms(ktime_get(), 10);
+ for (;;) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
+ if (val & PCI_CONFIG_ELBI_STS_MASK)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
+ &val);
+ break;
+ }
+ usleep_range(300, 500);
+ }
+
+ if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
+ return 0;
+
+ if (val & PCI_CONFIG_ELBI_STS_ERR) {
+ dev_err(hdev->dev, "Error writing to ELBI\n");
+ return -EIO;
+ }
+
+ if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
+ dev_err(hdev->dev, "ELBI write didn't finish in time\n");
+ return -EIO;
+ }
+
+ dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
+ return -EIO;
+}
+
+/*
+ * goya_iatu_write - iatu write routine
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
+{
+ u32 dbi_offset;
+ int rc;
+
+ dbi_offset = addr & 0xFFF;
+
+ rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
+ rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
+
+ if (rc)
+ return -EIO;
+
+ return 0;
+}
+
+static void goya_reset_link_through_bridge(struct hl_device *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct pci_dev *parent_port;
+ u16 val;
+
+ parent_port = pdev->bus->self;
+ pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
+ val |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+ ssleep(1);
+
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
+ pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
+ ssleep(3);
+}
+
+/*
+ * goya_set_ddr_bar_base - set DDR bar to map specific device address
+ *
+ * @hdev: pointer to hl_device structure
+ * @addr: address in DDR. Must be aligned to DDR bar size
+ *
+ * This function configures the iATU so that the DDR bar will start at the
+ * specified addr.
+ *
+ */
+static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+
+ if ((goya) && (goya->ddr_bar_cur_addr == addr))
+ return 0;
+
+ /* Inbound Region 1 - Bar 4 - Point to DDR */
+ rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
+ rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
+ rc |= goya_iatu_write(hdev, 0x300, 0);
+ /* Enable + Bar match + match enable + Bar 4 */
+ rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
+
+ /* Return the DBI window to the default location */
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
+ return -EIO;
+ }
+
+ if (goya)
+ goya->ddr_bar_cur_addr = addr;
+
+ return 0;
+}
+
+/*
+ * goya_init_iatu - Initialize the iATU unit inside the PCI controller
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * This is needed in case the firmware doesn't initialize the iATU
+ *
+ */
+static int goya_init_iatu(struct hl_device *hdev)
+{
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
+ rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
+ rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
+ rc |= goya_iatu_write(hdev, 0x100, 0);
+ /* Enable + Bar match + match enable */
+ rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
+
+ /* Inbound Region 1 - Bar 4 - Point to DDR */
+ rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
+
+ /* Outbound Region 0 - Point to Host */
+ rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
+ rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
+ rc |= goya_iatu_write(hdev, 0x010,
+ lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
+ rc |= goya_iatu_write(hdev, 0x014, 0);
+ rc |= goya_iatu_write(hdev, 0x018, 0);
+ rc |= goya_iatu_write(hdev, 0x020,
+ upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
+ /* Increase region size */
+ rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
+ /* Enable */
+ rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
+
+ /* Return the DBI window to the default location */
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
+ rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
+
+ if (rc)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * goya_early_init - GOYA early initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Verify PCI bars
+ * Set DMA masks
+ * PCI controller initialization
+ * Map PCI bars
+ *
+ */
+static int goya_early_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_dev *pdev = hdev->pdev;
+ u32 val;
+ int rc;
+
+ goya_get_fixed_properties(hdev);
+
+ /* Check BAR sizes */
+ if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ SRAM_CFG_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ SRAM_CFG_BAR_ID),
+ CFG_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ MSIX_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ MSIX_BAR_ID),
+ MSIX_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
+
+ /* set DMA mask for GOYA */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (rc) {
+ dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci dma mask to 32 bits\n");
+ return rc;
+ }
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (rc) {
+ dev_warn(hdev->dev,
+ "Unable to set pci consistent dma mask to 39 bits\n");
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci consistent dma mask to 32 bits\n");
+ return rc;
+ }
+ }
+
+ if (hdev->reset_pcilink)
+ goya_reset_link_through_bridge(hdev);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(hdev->dev, "can't enable PCI device\n");
+ return rc;
+ }
+
+ pci_set_master(pdev);
+
+ rc = goya_init_iatu(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize iATU\n");
+ goto disable_device;
+ }
+
+ rc = goya_pci_bars_map(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
+ goto disable_device;
+ }
+
+ if (!hdev->pldm) {
+ val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
+ if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
+ dev_warn(hdev->dev,
+ "PCI strap is not configured correctly, PCI bus errors may occur\n");
+ }
+
+ return 0;
+
+disable_device:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+/*
+ * goya_early_fini - GOYA early finalization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Unmap PCI bars
+ *
+ */
+static int goya_early_fini(struct hl_device *hdev)
+{
+ goya_pci_bars_unmap(hdev);
+
+ pci_clear_master(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+
+ return 0;
+}
+
+/*
+ * goya_fetch_psoc_frequency - Fetch PSOC frequency values
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void goya_fetch_psoc_frequency(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
+ prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
+ prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
+ prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+}
+
+/*
+ * goya_late_init - GOYA late initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Get ArmCP info and send message to CPU to enable PCI access
+ */
+static int goya_late_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+
+ rc = goya->armcp_info_get(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get armcp info\n");
+ return rc;
+ }
+
+ /* Now that we have the DRAM size in ASIC prop, we need to check
+ * its size and configure the DMA_IF DDR wrap protection (which is in
+ * the MMU block) accordingly. The value is the log2 of the DRAM size
+ */
+ WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
+
+ rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ return rc;
+ }
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
+
+ goya_fetch_psoc_frequency(hdev);
+
+ rc = goya_mmu_clear_pgt_range(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
+ goto disable_pci_access;
+ }
+
+ rc = goya_mmu_set_dram_default_page(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to set DRAM default page\n");
+ goto disable_pci_access;
+ }
+
+ return 0;
+
+disable_pci_access:
+ goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+
+ return rc;
+}
+
+/*
+ * goya_late_fini - GOYA late tear-down code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Free sensors allocated structures
+ */
+void goya_late_fini(struct hl_device *hdev)
+{
+ const struct hwmon_channel_info **channel_info_arr;
+ int i = 0;
+
+ if (!hdev->hl_chip_info->info)
+ return;
+
+ channel_info_arr = hdev->hl_chip_info->info;
+
+ while (channel_info_arr[i]) {
+ kfree(channel_info_arr[i]->config);
+ kfree(channel_info_arr[i]);
+ i++;
+ }
+
+ kfree(channel_info_arr);
+
+ hdev->hl_chip_info->info = NULL;
+}
+
+/*
+ * goya_sw_init - Goya software initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static int goya_sw_init(struct hl_device *hdev)
+{
+ struct goya_device *goya;
+ int rc;
+
+ /* Allocate device structure */
+ goya = kzalloc(sizeof(*goya), GFP_KERNEL);
+ if (!goya)
+ return -ENOMEM;
+
+ goya->test_cpu_queue = goya_test_cpu_queue;
+ goya->armcp_info_get = goya_armcp_info_get;
+
+ /* according to goya_init_iatu */
+ goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
+
+ goya->mme_clk = GOYA_PLL_FREQ_LOW;
+ goya->tpc_clk = GOYA_PLL_FREQ_LOW;
+ goya->ic_clk = GOYA_PLL_FREQ_LOW;
+
+ hdev->asic_specific = goya;
+
+ /* Create DMA pool for small allocations */
+ hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
+ &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
+ if (!hdev->dma_pool) {
+ dev_err(hdev->dev, "failed to create DMA pool\n");
+ rc = -ENOMEM;
+ goto free_goya_device;
+ }
+
+ hdev->cpu_accessible_dma_mem =
+ hdev->asic_funcs->dma_alloc_coherent(hdev,
+ CPU_ACCESSIBLE_MEM_SIZE,
+ &hdev->cpu_accessible_dma_address,
+ GFP_KERNEL | __GFP_ZERO);
+
+ if (!hdev->cpu_accessible_dma_mem) {
+ dev_err(hdev->dev,
+ "failed to allocate %d of dma memory for CPU accessible memory space\n",
+ CPU_ACCESSIBLE_MEM_SIZE);
+ rc = -ENOMEM;
+ goto free_dma_pool;
+ }
+
+ hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
+ if (!hdev->cpu_accessible_dma_pool) {
+ dev_err(hdev->dev,
+ "Failed to create CPU accessible DMA pool\n");
+ rc = -ENOMEM;
+ goto free_cpu_pq_dma_mem;
+ }
+
+ rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
+ (uintptr_t) hdev->cpu_accessible_dma_mem,
+ CPU_ACCESSIBLE_MEM_SIZE, -1);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to CPU accessible DMA pool\n");
+ rc = -EFAULT;
+ goto free_cpu_pq_pool;
+ }
+
+ spin_lock_init(&goya->hw_queues_lock);
+
+ return 0;
+
+free_cpu_pq_pool:
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+free_cpu_pq_dma_mem:
+ hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+free_dma_pool:
+ dma_pool_destroy(hdev->dma_pool);
+free_goya_device:
+ kfree(goya);
+
+ return rc;
+}
+
+/*
+ * goya_sw_fini - Goya software tear-down code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static int goya_sw_fini(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+
+ hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+
+ dma_pool_destroy(hdev->dma_pool);
+
+ kfree(goya);
+
+ return 0;
+}
+
+static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
+ dma_addr_t bus_address)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
+ WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
+
+ WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
+ WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
+ WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
+
+ WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
+ WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
+ WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
+ WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
+ WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
+ WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
+
+ /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
+ WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
+ WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
+ WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
+ else
+ WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
+
+ WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
+ WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
+}
+
+static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
+{
+ u32 gic_base_lo, gic_base_hi;
+ u64 sob_addr;
+ u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
+ WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
+
+ if (dma_id)
+ sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
+ (dma_id - 1) * 4;
+ else
+ sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
+
+ WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
+ WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
+ WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
+}
+
+/*
+ * goya_init_dma_qmans - Initialize QMAN DMA registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the H/W registers of the QMAN DMA channels
+ *
+ */
+static void goya_init_dma_qmans(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct hl_hw_queue *q;
+ dma_addr_t bus_address;
+ int i;
+
+ if (goya->hw_cap_initialized & HW_CAP_DMA)
+ return;
+
+ q = &hdev->kernel_queues[0];
+
+ for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
+ bus_address = q->bus_address +
+ hdev->asic_prop.host_phys_base_address;
+
+ goya_init_dma_qman(hdev, i, bus_address);
+ goya_init_dma_ch(hdev, i);
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_DMA;
+}
+
+/*
+ * goya_disable_external_queues - Disable external queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void goya_disable_external_queues(struct hl_device *hdev)
+{
+ WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
+ WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
+}
+
+static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
+ u32 cp_sts_reg, u32 glbl_sts0_reg)
+{
+ int rc;
+ u32 status;
+
+ /* use the values of TPC0 as they are all the same*/
+
+ WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+
+ status = RREG32(cp_sts_reg);
+ if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
+ rc = hl_poll_timeout(
+ hdev,
+ cp_sts_reg,
+ status,
+ !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
+ 1000,
+ QMAN_FENCE_TIMEOUT_USEC);
+
+ /* if QMAN is stuck in fence no need to check for stop */
+ if (rc)
+ return 0;
+ }
+
+ rc = hl_poll_timeout(
+ hdev,
+ glbl_sts0_reg,
+ status,
+ (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
+ 1000,
+ QMAN_STOP_TIMEOUT_USEC);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for QMAN to stop\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * goya_stop_external_queues - Stop external queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_stop_external_queues(struct hl_device *hdev)
+{
+ int rc, retval = 0;
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_0_GLBL_CFG1,
+ mmDMA_QM_0_CP_STS,
+ mmDMA_QM_0_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_1_GLBL_CFG1,
+ mmDMA_QM_1_CP_STS,
+ mmDMA_QM_1_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_2_GLBL_CFG1,
+ mmDMA_QM_2_CP_STS,
+ mmDMA_QM_2_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_3_GLBL_CFG1,
+ mmDMA_QM_3_CP_STS,
+ mmDMA_QM_3_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmDMA_QM_4_GLBL_CFG1,
+ mmDMA_QM_4_CP_STS,
+ mmDMA_QM_4_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
+ retval = -EIO;
+ }
+
+ return retval;
+}
+
+static void goya_resume_external_queues(struct hl_device *hdev)
+{
+ WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
+ WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
+}
+
+/*
+ * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_init_cpu_queues(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct hl_eq *eq;
+ dma_addr_t bus_address;
+ u32 status;
+ struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
+ int err;
+
+ if (!hdev->cpu_queues_enable)
+ return 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
+ return 0;
+
+ eq = &hdev->event_queue;
+
+ bus_address = cpu_pq->bus_address +
+ hdev->asic_prop.host_phys_base_address;
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
+
+ bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
+
+ bus_address = hdev->cpu_accessible_dma_address +
+ hdev->asic_prop.host_phys_base_address;
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
+
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
+
+ /* Used for EQ CI */
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
+
+ WREG32(mmCPU_IF_PF_PQ_PI, 0);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_PI_UPDATE);
+
+ err = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
+ status,
+ (status == PQ_INIT_STATUS_READY_FOR_HOST),
+ 1000,
+ GOYA_CPU_TIMEOUT_USEC);
+
+ if (err) {
+ dev_err(hdev->dev,
+ "Failed to communicate with ARM CPU (ArmCP timeout)\n");
+ return -EIO;
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_CPU_Q;
+ return 0;
+}
+
+static void goya_set_pll_refclk(struct hl_device *hdev)
+{
+ WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
+
+ WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
+ WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
+ WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
+ WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
+}
+
+static void goya_disable_clk_rlx(struct hl_device *hdev)
+{
+ WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
+ WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
+}
+
+static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
+{
+ u64 tpc_eml_address;
+ u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
+ int err, slm_index;
+
+ tpc_offset = tpc_id * 0x40000;
+ tpc_eml_offset = tpc_id * 0x200000;
+ tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
+ tpc_slm_offset = tpc_eml_address + 0x100000;
+
+ /*
+ * Workaround for Bug H2 #2443 :
+ * "TPC SB is not initialized on chip reset"
+ */
+
+ val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
+ if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
+ dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
+ tpc_id);
+
+ WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
+
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
+ WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
+
+ WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
+ 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
+
+ err = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
+ val,
+ (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
+ 1000,
+ HL_DEVICE_TIMEOUT_USEC);
+
+ if (err)
+ dev_err(hdev->dev,
+ "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
+
+ WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
+ 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
+
+ msleep(GOYA_RESET_WAIT_MSEC);
+
+ WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
+ ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
+
+ msleep(GOYA_RESET_WAIT_MSEC);
+
+ for (slm_index = 0 ; slm_index < 256 ; slm_index++)
+ WREG32(tpc_slm_offset + (slm_index << 2), 0);
+
+ val = RREG32(tpc_slm_offset);
+}
+
+static void goya_tpc_mbist_workaround(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i;
+
+ if (hdev->pldm)
+ return;
+
+ if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
+ return;
+
+ /* Workaround for H2 #2443 */
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++)
+ _goya_tpc_mbist_workaround(hdev, i);
+
+ goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
+}
+
+/*
+ * goya_init_golden_registers - Initialize golden registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the H/W registers of the device
+ *
+ */
+static void goya_init_golden_registers(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 polynom[10], tpc_intr_mask, offset;
+ int i;
+
+ if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
+ return;
+
+ polynom[0] = 0x00020080;
+ polynom[1] = 0x00401000;
+ polynom[2] = 0x00200800;
+ polynom[3] = 0x00002000;
+ polynom[4] = 0x00080200;
+ polynom[5] = 0x00040100;
+ polynom[6] = 0x00100400;
+ polynom[7] = 0x00004000;
+ polynom[8] = 0x00010000;
+ polynom[9] = 0x00008000;
+
+ /* Mask all arithmetic interrupts from TPC */
+ tpc_intr_mask = 0x7FFF;
+
+ for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
+
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
+
+ WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
+ WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
+ WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
+ WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
+ WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
+ }
+
+ WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
+ WREG32(mmMME_AGU, 0x0f0f0f10);
+ WREG32(mmMME_SEI_MASK, ~0x0);
+
+ WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
+ WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
+ WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
+ WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
+ WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
+ WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
+ WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
+ WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
+ WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
+ WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
+ WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
+ WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
+ WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
+ WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
+ WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
+ WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
+ WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
+ WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
+ WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
+ WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
+ WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
+ WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
+ WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
+ WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
+ WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
+ WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
+ WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
+ WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
+ WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
+ WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
+ WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
+ WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
+ WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
+ WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
+ WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
+ WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
+ WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
+ WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
+ WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
+ WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
+ WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
+ WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
+ WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
+ WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
+ WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
+ WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
+ WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
+ WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
+ WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
+ WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
+ WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
+ WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
+ WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
+ WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
+ WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
+ WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
+ WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
+ WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
+ WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
+ WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
+ WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
+ WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
+ WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
+ WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
+ WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
+ WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
+ WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+ WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
+
+ WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
+ WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
+ WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
+ WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
+ WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
+ WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
+ WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
+ WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
+ WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
+
+ WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
+ WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
+ WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
+ WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
+ WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
+ WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
+ WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
+ WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
+ WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
+ WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
+ WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
+ WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
+
+ WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
+ WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
+ WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
+ WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
+ WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
+ WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
+ WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
+ WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
+ WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
+ WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
+ WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
+ WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
+
+ WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
+ WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
+ WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
+ WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
+ WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
+ WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
+ WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
+ WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
+ WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
+ WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
+ WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
+ WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
+
+ WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
+ WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
+ WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
+ WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
+ WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
+ WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
+ WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
+ WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
+ WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
+ WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
+ WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
+ WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
+
+ WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
+ WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
+ WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
+ WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
+ WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
+ WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
+
+ for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
+ WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+
+ WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+
+ WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
+ }
+
+ for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
+ WREG32(mmMME1_RTR_SCRAMB_EN + offset,
+ 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
+ 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
+ }
+
+ for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
+ /*
+ * Workaround for Bug H2 #2441 :
+ * "ST.NOP set trace event illegal opcode"
+ */
+ WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
+
+ WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
+ 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
+ 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+ }
+
+ WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
+ 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
+ WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
+ 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ /*
+ * Workaround for H2 #HW-23 bug
+ * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
+ * to 16 on KMD DMA
+ * We need to limit only these DMAs because the user can only read
+ * from Host using DMA CH 1
+ */
+ WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
+ WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
+
+ goya->hw_cap_initialized |= HW_CAP_GOLDEN;
+}
+
+static void goya_init_mme_qman(struct hl_device *hdev)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u64 qman_base_addr;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ qman_base_addr = hdev->asic_prop.sram_base_address +
+ MME_QMAN_BASE_OFFSET;
+
+ WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
+ WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
+ WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
+ WREG32(mmMME_QM_PQ_PI, 0);
+ WREG32(mmMME_QM_PQ_CI, 0);
+ WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
+ WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
+ WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
+ WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
+
+ WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
+ WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
+ WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
+ WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
+
+ /* QMAN CQ has 8 cache lines */
+ WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
+
+ WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
+ WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
+
+ WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
+
+ WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
+
+ WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
+
+ WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
+}
+
+static void goya_init_mme_cmdq(struct hl_device *hdev)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u64 qman_base_addr;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ qman_base_addr = hdev->asic_prop.sram_base_address +
+ MME_QMAN_BASE_OFFSET;
+
+ WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
+ WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
+ WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
+ WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
+
+ /* CMDQ CQ has 20 cache lines */
+ WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
+
+ WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
+ WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
+
+ WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
+
+ WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
+
+ WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
+
+ WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
+}
+
+static void goya_init_mme_qmans(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 so_base_lo, so_base_hi;
+
+ if (goya->hw_cap_initialized & HW_CAP_MME)
+ return;
+
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
+ WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
+
+ goya_init_mme_qman(hdev);
+ goya_init_mme_cmdq(hdev);
+
+ goya->hw_cap_initialized |= HW_CAP_MME;
+}
+
+static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u64 qman_base_addr;
+ u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
+
+ WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
+ WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
+ WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
+ WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
+ WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
+ WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
+
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
+
+ WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
+
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
+
+ WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
+
+ WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
+
+ WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
+
+ WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
+}
+
+static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 gic_base_lo, gic_base_hi;
+ u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
+
+ mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ gic_base_lo =
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+ gic_base_hi =
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
+
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
+ WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
+
+ WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
+
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
+
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
+ GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
+
+ WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
+
+ WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
+
+ WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
+}
+
+static void goya_init_tpc_qmans(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 so_base_lo, so_base_hi;
+ u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
+ mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
+ int i;
+
+ if (goya->hw_cap_initialized & HW_CAP_TPC)
+ return;
+
+ so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++) {
+ WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
+ so_base_lo);
+ WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
+ so_base_hi);
+ }
+
+ goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
+ goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
+ goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
+ goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
+ goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
+ goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
+ goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
+ goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++)
+ goya_init_tpc_cmdq(hdev, i);
+
+ goya->hw_cap_initialized |= HW_CAP_TPC;
+}
+
+/*
+ * goya_disable_internal_queues - Disable internal queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void goya_disable_internal_queues(struct hl_device *hdev)
+{
+ WREG32(mmMME_QM_GLBL_CFG0, 0);
+ WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC0_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC1_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC2_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC3_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC4_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC5_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC6_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
+
+ WREG32(mmTPC7_QM_GLBL_CFG0, 0);
+ WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
+}
+
+/*
+ * goya_stop_internal_queues - Stop internal queues
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_stop_internal_queues(struct hl_device *hdev)
+{
+ int rc, retval = 0;
+
+ /*
+ * Each queue (QMAN) is a separate H/W logic. That means that each
+ * QMAN can be stopped independently and failure to stop one does NOT
+ * mandate we should not try to stop other QMANs
+ */
+
+ rc = goya_stop_queue(hdev,
+ mmMME_QM_GLBL_CFG1,
+ mmMME_QM_CP_STS,
+ mmMME_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop MME QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmMME_CMDQ_GLBL_CFG1,
+ mmMME_CMDQ_CP_STS,
+ mmMME_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop MME CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC0_QM_GLBL_CFG1,
+ mmTPC0_QM_CP_STS,
+ mmTPC0_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC0_CMDQ_GLBL_CFG1,
+ mmTPC0_CMDQ_CP_STS,
+ mmTPC0_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC1_QM_GLBL_CFG1,
+ mmTPC1_QM_CP_STS,
+ mmTPC1_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC1_CMDQ_GLBL_CFG1,
+ mmTPC1_CMDQ_CP_STS,
+ mmTPC1_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC2_QM_GLBL_CFG1,
+ mmTPC2_QM_CP_STS,
+ mmTPC2_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC2_CMDQ_GLBL_CFG1,
+ mmTPC2_CMDQ_CP_STS,
+ mmTPC2_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC3_QM_GLBL_CFG1,
+ mmTPC3_QM_CP_STS,
+ mmTPC3_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC3_CMDQ_GLBL_CFG1,
+ mmTPC3_CMDQ_CP_STS,
+ mmTPC3_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC4_QM_GLBL_CFG1,
+ mmTPC4_QM_CP_STS,
+ mmTPC4_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC4_CMDQ_GLBL_CFG1,
+ mmTPC4_CMDQ_CP_STS,
+ mmTPC4_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC5_QM_GLBL_CFG1,
+ mmTPC5_QM_CP_STS,
+ mmTPC5_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC5_CMDQ_GLBL_CFG1,
+ mmTPC5_CMDQ_CP_STS,
+ mmTPC5_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC6_QM_GLBL_CFG1,
+ mmTPC6_QM_CP_STS,
+ mmTPC6_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC6_CMDQ_GLBL_CFG1,
+ mmTPC6_CMDQ_CP_STS,
+ mmTPC6_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC7_QM_GLBL_CFG1,
+ mmTPC7_QM_CP_STS,
+ mmTPC7_QM_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
+ retval = -EIO;
+ }
+
+ rc = goya_stop_queue(hdev,
+ mmTPC7_CMDQ_GLBL_CFG1,
+ mmTPC7_CMDQ_CP_STS,
+ mmTPC7_CMDQ_GLBL_STS0);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
+ retval = -EIO;
+ }
+
+ return retval;
+}
+
+static void goya_resume_internal_queues(struct hl_device *hdev)
+{
+ WREG32(mmMME_QM_GLBL_CFG1, 0);
+ WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC0_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC1_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC2_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC3_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC4_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC5_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC6_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
+
+ WREG32(mmTPC7_QM_GLBL_CFG1, 0);
+ WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
+}
+
+static void goya_dma_stall(struct hl_device *hdev)
+{
+ WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
+ WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
+}
+
+static void goya_tpc_stall(struct hl_device *hdev)
+{
+ WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
+}
+
+static void goya_mme_stall(struct hl_device *hdev)
+{
+ WREG32(mmMME_STALL, 0xFFFFFFFF);
+}
+
+static int goya_enable_msix(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int cq_cnt = hdev->asic_prop.completion_queues_count;
+ int rc, i, irq_cnt_init, irq;
+
+ if (goya->hw_cap_initialized & HW_CAP_MSIX)
+ return 0;
+
+ rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
+ GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+ if (rc < 0) {
+ dev_err(hdev->dev,
+ "MSI-X: Failed to enable support -- %d/%d\n",
+ GOYA_MSIX_ENTRIES, rc);
+ return rc;
+ }
+
+ for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
+ irq = pci_irq_vector(hdev->pdev, i);
+ rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
+ &hdev->completion_queue[i]);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+ }
+
+ irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+
+ rc = request_irq(irq, hl_irq_handler_eq, 0,
+ goya_irq_name[EVENT_QUEUE_MSIX_IDX],
+ &hdev->event_queue);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_MSIX;
+ return 0;
+
+free_irqs:
+ for (i = 0 ; i < irq_cnt_init ; i++)
+ free_irq(pci_irq_vector(hdev->pdev, i),
+ &hdev->completion_queue[i]);
+
+ pci_free_irq_vectors(hdev->pdev);
+ return rc;
+}
+
+static void goya_sync_irqs(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
+ return;
+
+ /* Wait for all pending IRQs to be finished */
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ synchronize_irq(pci_irq_vector(hdev->pdev, i));
+
+ synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
+}
+
+static void goya_disable_msix(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i, irq;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
+ return;
+
+ goya_sync_irqs(hdev);
+
+ irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
+ free_irq(irq, &hdev->event_queue);
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
+ irq = pci_irq_vector(hdev->pdev, i);
+ free_irq(irq, &hdev->completion_queue[i]);
+ }
+
+ pci_free_irq_vectors(hdev->pdev);
+
+ goya->hw_cap_initialized &= ~HW_CAP_MSIX;
+}
+
+static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
+{
+ u32 wait_timeout_ms, cpu_timeout_ms;
+
+ dev_info(hdev->dev,
+ "Halting compute engines and disabling interrupts\n");
+
+ if (hdev->pldm) {
+ wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
+ } else {
+ wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
+ }
+
+ if (hard_reset) {
+ /*
+ * I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
+ msleep(cpu_timeout_ms);
+ }
+
+ goya_stop_external_queues(hdev);
+ goya_stop_internal_queues(hdev);
+
+ msleep(wait_timeout_ms);
+
+ goya_dma_stall(hdev);
+ goya_tpc_stall(hdev);
+ goya_mme_stall(hdev);
+
+ msleep(wait_timeout_ms);
+
+ goya_disable_external_queues(hdev);
+ goya_disable_internal_queues(hdev);
+
+ if (hard_reset)
+ goya_disable_msix(hdev);
+ else
+ goya_sync_irqs(hdev);
+}
+
+/*
+ * goya_push_fw_to_device - Push FW code to device
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Copy fw code from firmware file to device memory.
+ * Returns 0 on success
+ *
+ */
+static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+ void __iomem *dst)
+{
+ const struct firmware *fw;
+ const u64 *fw_data;
+ size_t fw_size, i;
+ int rc;
+
+ rc = request_firmware(&fw, fw_name, hdev->dev);
+
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request %s\n", fw_name);
+ goto out;
+ }
+
+ fw_size = fw->size;
+ if ((fw_size % 4) != 0) {
+ dev_err(hdev->dev, "illegal %s firmware size %zu\n",
+ fw_name, fw_size);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
+
+ fw_data = (const u64 *) fw->data;
+
+ if ((fw->size % 8) != 0)
+ fw_size -= 8;
+
+ for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
+ if (!(i & (0x80000 - 1))) {
+ dev_dbg(hdev->dev,
+ "copied so far %zu out of %zu for %s firmware",
+ i, fw_size, fw_name);
+ usleep_range(20, 100);
+ }
+
+ writeq(*fw_data, dst);
+ }
+
+ if ((fw->size % 8) != 0)
+ writel(*(const u32 *) fw_data, dst);
+
+out:
+ release_firmware(fw);
+ return rc;
+}
+
+static int goya_pldm_init_cpu(struct hl_device *hdev)
+{
+ char fw_name[200];
+ void __iomem *dst;
+ u32 val, unit_rst_val;
+ int rc;
+
+ /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
+ goya_init_golden_registers(hdev);
+
+ /* Put ARM cores into reset */
+ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
+ val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+
+ /* Reset the CA53 MACRO */
+ unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+ WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
+ val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+ WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
+ val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
+ dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
+ rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ if (rc)
+ return rc;
+
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
+ dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
+ rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ if (rc)
+ return rc;
+
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
+ WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
+
+ WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
+ lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
+ WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
+ upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
+
+ /* Release ARM core 0 from reset */
+ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
+ CPU_RESET_CORE0_DEASSERT);
+ val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+
+ return 0;
+}
+
+/*
+ * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
+ * The version string should be located by that offset.
+ */
+static void goya_read_device_fw_version(struct hl_device *hdev,
+ enum goya_fw_component fwc)
+{
+ const char *name;
+ u32 ver_off;
+ char *dest;
+
+ switch (fwc) {
+ case FW_COMP_UBOOT:
+ ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
+ dest = hdev->asic_prop.uboot_ver;
+ name = "U-Boot";
+ break;
+ case FW_COMP_PREBOOT:
+ ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
+ dest = hdev->asic_prop.preboot_ver;
+ name = "Preboot";
+ break;
+ default:
+ dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
+ return;
+ }
+
+ ver_off &= ~((u32)SRAM_BASE_ADDR);
+
+ if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
+ memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
+ VERSION_MAX_LEN);
+ } else {
+ dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
+ name, ver_off);
+ strcpy(dest, "unavailable");
+ }
+}
+
+static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ char fw_name[200];
+ void __iomem *dst;
+ u32 status;
+ int rc;
+
+ if (!hdev->cpu_enable)
+ return 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_CPU)
+ return 0;
+
+ /*
+ * Before pushing u-boot/linux to device, need to set the ddr bar to
+ * base address of dram
+ */
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to map DDR bar to DRAM base address\n");
+ return rc;
+ }
+
+ if (hdev->pldm) {
+ rc = goya_pldm_init_cpu(hdev);
+ if (rc)
+ return rc;
+
+ goto out;
+ }
+
+ /* Make sure CPU boot-loader is running */
+ rc = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_WARM_REBOOT,
+ status,
+ (status == CPU_BOOT_STATUS_DRAM_RDY) ||
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev, "Error in ARM u-boot!");
+ switch (status) {
+ case CPU_BOOT_STATUS_NA:
+ dev_err(hdev->dev,
+ "ARM status %d - BTL did NOT run\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_WFE:
+ dev_err(hdev->dev,
+ "ARM status %d - Inside WFE loop\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_BTL:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in BTL\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_PREBOOT:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in Preboot\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_SPL:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in SPL\n", status);
+ break;
+ case CPU_BOOT_STATUS_IN_UBOOT:
+ dev_err(hdev->dev,
+ "ARM status %d - Stuck in u-boot\n", status);
+ break;
+ case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
+ dev_err(hdev->dev,
+ "ARM status %d - DDR initialization failed\n",
+ status);
+ break;
+ default:
+ dev_err(hdev->dev,
+ "ARM status %d - Invalid status code\n",
+ status);
+ break;
+ }
+ return -EIO;
+ }
+
+ /* Read U-Boot version now in case we will later fail */
+ goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
+ goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
+ if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
+ goto out;
+
+ if (!hdev->fw_loading) {
+ dev_info(hdev->dev, "Skip loading FW\n");
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
+ dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
+ rc = goya_push_fw_to_device(hdev, fw_name, dst);
+ if (rc)
+ return rc;
+
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_WARM_REBOOT,
+ status,
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ if (rc) {
+ if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
+ dev_err(hdev->dev,
+ "ARM u-boot reports FIT image is corrupted\n");
+ else
+ dev_err(hdev->dev,
+ "ARM Linux failed to load, %d\n", status);
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
+ return -EIO;
+ }
+
+ dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+
+out:
+ goya->hw_cap_initialized |= HW_CAP_CPU;
+
+ return 0;
+}
+
+static int goya_mmu_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ u64 hop0_addr;
+ int rc, i;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
+ return 0;
+
+ hdev->dram_supports_virtual_memory = true;
+ hdev->dram_default_page_mapping = true;
+
+ for (i = 0 ; i < prop->max_asid ; i++) {
+ hop0_addr = prop->mmu_pgt_addr +
+ (i * prop->mmu_hop_table_size);
+
+ rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to set hop0 addr for asid %d\n", i);
+ goto err;
+ }
+ }
+
+ goya->hw_cap_initialized |= HW_CAP_MMU;
+
+ /* init MMU cache manage page */
+ WREG32(mmSTLB_CACHE_INV_BASE_39_8,
+ lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
+ WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+
+ /* Remove follower feature due to performance bug */
+ WREG32_AND(mmSTLB_STLB_FEATURE_EN,
+ (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+
+ WREG32(mmMMU_MMU_ENABLE, 1);
+ WREG32(mmMMU_SPI_MASK, 0xF);
+
+ return 0;
+
+err:
+ return rc;
+}
+
+/*
+ * goya_hw_init - Goya hardware initialization code
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Returns 0 on success
+ *
+ */
+static int goya_hw_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 val;
+ int rc;
+
+ dev_info(hdev->dev, "Starting initialization of H/W\n");
+
+ /* Perform read from the device to make sure device is up */
+ val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ /*
+ * Let's mark in the H/W that we have reached this point. We check
+ * this value in the reset_before_init function to understand whether
+ * we need to reset the chip before doing H/W init. This register is
+ * cleared by the H/W upon H/W reset
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
+
+ rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU\n");
+ return rc;
+ }
+
+ goya_tpc_mbist_workaround(hdev);
+
+ goya_init_golden_registers(hdev);
+
+ /*
+ * After CPU initialization is finished, change DDR bar mapping inside
+ * iATU to point to the start address of the MMU page tables
+ */
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
+ (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to map DDR bar to MMU page tables\n");
+ return rc;
+ }
+
+ rc = goya_mmu_init(hdev);
+ if (rc)
+ return rc;
+
+ goya_init_security(hdev);
+
+ goya_init_dma_qmans(hdev);
+
+ goya_init_mme_qmans(hdev);
+
+ goya_init_tpc_qmans(hdev);
+
+ /* MSI-X must be enabled before CPU queues are initialized */
+ rc = goya_enable_msix(hdev);
+ if (rc)
+ goto disable_queues;
+
+ rc = goya_init_cpu_queues(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
+ rc);
+ goto disable_msix;
+ }
+
+ /* CPU initialization is finished, we can now move to 48 bit DMA mask */
+ rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
+ if (rc) {
+ dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
+ rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci dma mask to 32 bits\n");
+ goto disable_pci_access;
+ }
+ }
+
+ rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
+ if (rc) {
+ dev_warn(hdev->dev,
+ "Unable to set pci consistent dma mask to 48 bits\n");
+ rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(hdev->dev,
+ "Unable to set pci consistent dma mask to 32 bits\n");
+ goto disable_pci_access;
+ }
+ }
+
+ /* Perform read from the device to flush all MSI-X configuration */
+ val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ return 0;
+
+disable_pci_access:
+ goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+disable_msix:
+ goya_disable_msix(hdev);
+disable_queues:
+ goya_disable_internal_queues(hdev);
+ goya_disable_external_queues(hdev);
+
+ return rc;
+}
+
+/*
+ * goya_hw_fini - Goya hardware tear-down code
+ *
+ * @hdev: pointer to hl_device structure
+ * @hard_reset: should we do hard reset to all engines or just reset the
+ * compute/dma engines
+ */
+static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 reset_timeout_ms, status;
+
+ if (hdev->pldm)
+ reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
+ else
+ reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
+
+ if (hard_reset) {
+ goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
+ goya_disable_clk_rlx(hdev);
+ goya_set_pll_refclk(hdev);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
+ dev_info(hdev->dev,
+ "Issued SOFT reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ }
+
+ /*
+ * After hard reset, we can't poll the BTM_FSM register because the PSOC
+ * itself is in reset. In either reset we need to wait until the reset
+ * is deasserted
+ */
+ msleep(reset_timeout_ms);
+
+ status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
+ dev_err(hdev->dev,
+ "Timeout while waiting for device to reset 0x%x\n",
+ status);
+
+ if (!hard_reset) {
+ goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
+ HW_CAP_GOLDEN | HW_CAP_TPC);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_SOFT_RESET);
+ return;
+ }
+
+ /* Chicken bit to re-initiate boot sequencer flow */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
+ 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
+ /* Move boot manager FSM to pre boot sequencer init state */
+ WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
+ 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
+
+ goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
+ HW_CAP_DDR_0 | HW_CAP_DDR_1 |
+ HW_CAP_DMA | HW_CAP_MME |
+ HW_CAP_MMU | HW_CAP_TPC_MBIST |
+ HW_CAP_GOLDEN | HW_CAP_TPC);
+ memset(goya->events_stat, 0, sizeof(goya->events_stat));
+
+ if (!hdev->pldm) {
+ int rc;
+ /* In case we are running inside VM and the VM is
+ * shutting down, we need to make sure CPU boot-loader
+ * is running before we can continue the VM shutdown.
+ * That is because the VM will send an FLR signal that
+ * we must answer
+ */
+ dev_info(hdev->dev,
+ "Going to wait up to %ds for CPU boot loader\n",
+ GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmPSOC_GLOBAL_CONF_WARM_REBOOT,
+ status,
+ (status == CPU_BOOT_STATUS_DRAM_RDY),
+ 10000,
+ GOYA_CPU_TIMEOUT_USEC);
+ if (rc)
+ dev_err(hdev->dev,
+ "failed to wait for CPU boot loader\n");
+ }
+}
+
+int goya_suspend(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = goya_stop_internal_queues(hdev);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop internal queues\n");
+ return rc;
+ }
+
+ rc = goya_stop_external_queues(hdev);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to stop external queues\n");
+ return rc;
+ }
+
+ rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ if (rc)
+ dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
+
+ return rc;
+}
+
+int goya_resume(struct hl_device *hdev)
+{
+ int rc;
+
+ goya_resume_external_queues(hdev);
+ goya_resume_internal_queues(hdev);
+
+ rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ if (rc)
+ dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ return rc;
+}
+
+static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ u64 kaddress, phys_addr_t paddress, u32 size)
+{
+ int rc;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE;
+
+ rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ return rc;
+}
+
+static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
+{
+ u32 db_reg_offset, db_value;
+ bool invalid_queue = false;
+
+ switch (hw_queue_id) {
+ case GOYA_QUEUE_ID_DMA_0:
+ db_reg_offset = mmDMA_QM_0_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_1:
+ db_reg_offset = mmDMA_QM_1_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_2:
+ db_reg_offset = mmDMA_QM_2_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_3:
+ db_reg_offset = mmDMA_QM_3_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_DMA_4:
+ db_reg_offset = mmDMA_QM_4_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_CPU_PQ:
+ if (hdev->cpu_queues_enable)
+ db_reg_offset = mmCPU_IF_PF_PQ_PI;
+ else
+ invalid_queue = true;
+ break;
+
+ case GOYA_QUEUE_ID_MME:
+ db_reg_offset = mmMME_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC0:
+ db_reg_offset = mmTPC0_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC1:
+ db_reg_offset = mmTPC1_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC2:
+ db_reg_offset = mmTPC2_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC3:
+ db_reg_offset = mmTPC3_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC4:
+ db_reg_offset = mmTPC4_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC5:
+ db_reg_offset = mmTPC5_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC6:
+ db_reg_offset = mmTPC6_QM_PQ_PI;
+ break;
+
+ case GOYA_QUEUE_ID_TPC7:
+ db_reg_offset = mmTPC7_QM_PQ_PI;
+ break;
+
+ default:
+ invalid_queue = true;
+ }
+
+ if (invalid_queue) {
+ /* Should never get here */
+ dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
+ hw_queue_id);
+ return;
+ }
+
+ db_value = pi;
+
+ /* ring the doorbell */
+ WREG32(db_reg_offset, db_value);
+
+ if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_PI_UPDATE);
+}
+
+void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
+{
+ /* Not needed in Goya */
+}
+
+static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
+}
+
+static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
+}
+
+void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
+ dma_addr_t *dma_handle, u16 *queue_len)
+{
+ void *base;
+ u32 offset;
+
+ *dma_handle = hdev->asic_prop.sram_base_address;
+
+ base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+
+ switch (queue_id) {
+ case GOYA_QUEUE_ID_MME:
+ offset = MME_QMAN_BASE_OFFSET;
+ *queue_len = MME_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC0:
+ offset = TPC0_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC1:
+ offset = TPC1_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC2:
+ offset = TPC2_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC3:
+ offset = TPC3_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC4:
+ offset = TPC4_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC5:
+ offset = TPC5_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC6:
+ offset = TPC6_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ case GOYA_QUEUE_ID_TPC7:
+ offset = TPC7_QMAN_BASE_OFFSET;
+ *queue_len = TPC_QMAN_LENGTH;
+ break;
+ default:
+ dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
+ return NULL;
+ }
+
+ base += offset;
+ *dma_handle += offset;
+
+ return base;
+}
+
+static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct packet_msg_prot *fence_pkt;
+ u32 *fence_ptr;
+ dma_addr_t fence_dma_addr;
+ struct hl_cb *cb;
+ u32 tmp, timeout;
+ int rc;
+
+ if (hdev->pldm)
+ timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
+ else
+ timeout = HL_DEVICE_TIMEOUT_USEC;
+
+ if (!hdev->asic_funcs->is_device_idle(hdev)) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't send KMD job on QMAN0 if device is not idle\n");
+ return -EBUSY;
+ }
+
+ fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate fence memory for QMAN0\n");
+ return -ENOMEM;
+ }
+
+ *fence_ptr = 0;
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU) {
+ WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
+ RREG32(mmDMA_QM_0_GLBL_PROT);
+ }
+
+ /*
+ * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
+ * synchronized kernel jobs we only need space for 1 packet_msg_prot
+ */
+ job->job_cb_size -= sizeof(struct packet_msg_prot);
+
+ cb = job->patched_cb;
+
+ fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
+ job->job_cb_size - sizeof(struct packet_msg_prot));
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_EB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr +
+ hdev->asic_prop.host_phys_base_address);
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
+ job->job_cb_size, cb->bus_address);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
+ goto free_fence_ptr;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
+ &tmp);
+
+ hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
+
+ if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
+ dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
+ rc = -ETIMEDOUT;
+ }
+
+free_fence_ptr:
+ hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+
+ if (goya->hw_cap_initialized & HW_CAP_MMU) {
+ WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
+ RREG32(mmDMA_QM_0_GLBL_PROT);
+ }
+
+ return rc;
+}
+
+int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
+ u32 timeout, long *result)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct armcp_packet *pkt;
+ dma_addr_t pkt_dma_addr;
+ u32 tmp;
+ int rc = 0;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
+ if (result)
+ *result = 0;
+ return 0;
+ }
+
+ if (len > CPU_CB_SIZE) {
+ dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
+ len);
+ return -ENOMEM;
+ }
+
+ pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
+ &pkt_dma_addr);
+ if (!pkt) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for packet to CPU\n");
+ return -ENOMEM;
+ }
+
+ memcpy(pkt, msg, len);
+
+ mutex_lock(&hdev->send_cpu_message_lock);
+
+ if (hdev->disabled)
+ goto out;
+
+ if (hdev->device_cpu_disabled) {
+ rc = -EIO;
+ goto out;
+ }
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
+ pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
+ goto out;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
+ timeout, &tmp);
+
+ hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
+ hdev->device_cpu_disabled = true;
+ goto out;
+ }
+
+ if (tmp == ARMCP_PACKET_FENCE_VAL) {
+ u32 ctl = le32_to_cpu(pkt->ctl);
+
+ rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
+ if (rc) {
+ dev_err(hdev->dev,
+ "F/W ERROR %d for CPU packet %d\n",
+ rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
+ >> ARMCP_PKT_CTL_OPCODE_SHIFT);
+ rc = -EINVAL;
+ } else if (result) {
+ *result = (long) le64_to_cpu(pkt->result);
+ }
+ } else {
+ dev_err(hdev->dev, "CPU packet wrong fence value\n");
+ rc = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
+
+ return rc;
+}
+
+int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
+{
+ struct packet_msg_prot *fence_pkt;
+ dma_addr_t pkt_dma_addr;
+ u32 fence_val, tmp;
+ dma_addr_t fence_dma_addr;
+ u32 *fence_ptr;
+ int rc;
+
+ fence_val = GOYA_QMAN0_FENCE_VAL;
+
+ fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate memory for queue testing\n");
+ return -ENOMEM;
+ }
+
+ *fence_ptr = 0;
+
+ fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
+ sizeof(struct packet_msg_prot),
+ GFP_KERNEL, &pkt_dma_addr);
+ if (!fence_pkt) {
+ dev_err(hdev->dev,
+ "Failed to allocate packet for queue testing\n");
+ rc = -ENOMEM;
+ goto free_fence_ptr;
+ }
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_EB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(fence_val);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr +
+ hdev->asic_prop.host_phys_base_address);
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
+ sizeof(struct packet_msg_prot),
+ pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send fence packet\n");
+ goto free_pkt;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
+ GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
+
+ hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
+
+ if ((!rc) && (tmp == fence_val)) {
+ dev_info(hdev->dev,
+ "queue test on H/W queue %d succeeded\n",
+ hw_queue_id);
+ } else {
+ dev_err(hdev->dev,
+ "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
+ hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
+ rc = -EINVAL;
+ }
+
+free_pkt:
+ hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
+ pkt_dma_addr);
+free_fence_ptr:
+ hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+ return rc;
+}
+
+int goya_test_cpu_queue(struct hl_device *hdev)
+{
+ struct armcp_packet test_pkt;
+ long result;
+ int rc;
+
+ /* cpu_queues_enable flag is always checked in send cpu message */
+
+ memset(&test_pkt, 0, sizeof(test_pkt));
+
+ test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
+ sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (!rc) {
+ if (result == ARMCP_PACKET_FENCE_VAL)
+ dev_info(hdev->dev,
+ "queue test on CPU queue succeeded\n");
+ else
+ dev_err(hdev->dev,
+ "CPU queue test failed (0x%08lX)\n", result);
+ } else {
+ dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
+ }
+
+ return rc;
+}
+
+static int goya_test_queues(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i, rc, ret_val = 0;
+
+ for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
+ rc = goya_test_queue(hdev, i);
+ if (rc)
+ ret_val = -EINVAL;
+ }
+
+ if (hdev->cpu_queues_enable) {
+ rc = goya->test_cpu_queue(hdev);
+ if (rc)
+ ret_val = -EINVAL;
+ }
+
+ return ret_val;
+}
+
+static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma_handle)
+{
+ if (size > GOYA_DMA_POOL_BLK_SIZE)
+ return NULL;
+
+ return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
+}
+
+static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
+ dma_addr_t dma_addr)
+{
+ dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
+}
+
+static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
+ size_t size, dma_addr_t *dma_handle)
+{
+ u64 kernel_addr;
+
+ /* roundup to CPU_PKT_SIZE */
+ size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
+
+ kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
+
+ *dma_handle = hdev->cpu_accessible_dma_address +
+ (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
+
+ return (void *) (uintptr_t) kernel_addr;
+}
+
+static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
+ size_t size, void *vaddr)
+{
+ /* roundup to CPU_PKT_SIZE */
+ size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
+
+ gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
+ size);
+}
+
+static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
+}
+
+u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
+{
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt;
+ u64 len, len_next;
+ dma_addr_t addr, addr_next;
+
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+
+ len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((addr + len == addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ dma_desc_cnt++;
+ }
+
+ return dma_desc_cnt * sizeof(struct packet_lin_dma);
+}
+
+static int goya_pin_memory_before_cs(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ u64 addr, enum dma_data_direction dir)
+{
+ struct hl_userptr *userptr;
+ int rc;
+
+ if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr))
+ goto already_pinned;
+
+ userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+ if (!userptr)
+ return -ENOMEM;
+
+ rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ userptr);
+ if (rc)
+ goto free_userptr;
+
+ list_add_tail(&userptr->job_node, parser->job_userptr_list);
+
+ rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents, dir);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto unpin_memory;
+ }
+
+ userptr->dma_mapped = true;
+ userptr->dir = dir;
+
+already_pinned:
+ parser->patched_cb_size +=
+ goya_get_dma_desc_list_size(hdev, userptr->sgt);
+
+ return 0;
+
+unpin_memory:
+ hl_unpin_host_memory(hdev, userptr);
+free_userptr:
+ kfree(userptr);
+ return rc;
+}
+
+static int goya_validate_dma_pkt_host(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ u64 device_memory_addr, addr;
+ enum dma_data_direction dir;
+ enum goya_dma_direction user_dir;
+ bool sram_addr = true;
+ bool skip_host_mem_pin = false;
+ bool user_memset;
+ u32 ctl;
+ int rc = 0;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ switch (user_dir) {
+ case DMA_HOST_TO_DRAM:
+ dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
+ dir = DMA_TO_DEVICE;
+ sram_addr = false;
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ if (user_memset)
+ skip_host_mem_pin = true;
+ break;
+
+ case DMA_DRAM_TO_HOST:
+ dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
+ dir = DMA_FROM_DEVICE;
+ sram_addr = false;
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ break;
+
+ case DMA_HOST_TO_SRAM:
+ dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
+ dir = DMA_TO_DEVICE;
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ if (user_memset)
+ skip_host_mem_pin = true;
+ break;
+
+ case DMA_SRAM_TO_HOST:
+ dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
+ dir = DMA_FROM_DEVICE;
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ break;
+ default:
+ dev_err(hdev->dev, "DMA direction is undefined\n");
+ return -EFAULT;
+ }
+
+ if (parser->ctx_id != HL_KERNEL_ASID_ID) {
+ if (sram_addr) {
+ if (!hl_mem_area_inside_range(device_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.sram_user_base_address,
+ hdev->asic_prop.sram_end_address)) {
+
+ dev_err(hdev->dev,
+ "SRAM address 0x%llx + 0x%x is invalid\n",
+ device_memory_addr,
+ user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+ } else {
+ if (!hl_mem_area_inside_range(device_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.dram_user_base_address,
+ hdev->asic_prop.dram_end_address)) {
+
+ dev_err(hdev->dev,
+ "DRAM address 0x%llx + 0x%x is invalid\n",
+ device_memory_addr,
+ user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+ }
+ }
+
+ if (skip_host_mem_pin)
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+ else {
+ if ((dir == DMA_TO_DEVICE) &&
+ (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
+ dev_err(hdev->dev,
+ "Can't DMA from host on queue other then 1\n");
+ return -EFAULT;
+ }
+
+ rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
+ addr, dir);
+ }
+
+ return rc;
+}
+
+static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ u64 sram_memory_addr, dram_memory_addr;
+ enum goya_dma_direction user_dir;
+ u32 ctl;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ if (user_dir == DMA_DRAM_TO_SRAM) {
+ dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
+ dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ } else {
+ dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
+ sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ }
+
+ if (!hl_mem_area_inside_range(sram_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.sram_user_base_address,
+ hdev->asic_prop.sram_end_address)) {
+ dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
+ sram_memory_addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ if (!hl_mem_area_inside_range(dram_memory_addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.dram_user_base_address,
+ hdev->asic_prop.dram_end_address)) {
+ dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
+ dram_memory_addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+
+ return 0;
+}
+
+static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ enum goya_dma_direction user_dir;
+ u32 ctl;
+ int rc;
+
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+ dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
+ dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
+ dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ /*
+ * Special handling for DMA with size 0. The H/W has a bug where
+ * this can cause the QMAN DMA to get stuck, so block it here.
+ */
+ if (user_dma_pkt->tsize == 0) {
+ dev_err(hdev->dev,
+ "Got DMA with size 0, might reset the device\n");
+ return -EINVAL;
+ }
+
+ if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
+ rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
+ else
+ rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
+
+ return rc;
+}
+
+static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+ dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
+ dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
+ dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+
+ /*
+ * WA for HW-23.
+ * We can't allow user to read from Host using QMANs other than 1.
+ */
+ if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+ hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
+ le32_to_cpu(user_dma_pkt->tsize),
+ hdev->asic_prop.va_space_host_start_address,
+ hdev->asic_prop.va_space_host_end_address)) {
+ dev_err(hdev->dev,
+ "Can't DMA from host on queue other then 1\n");
+ return -EFAULT;
+ }
+
+ if (user_dma_pkt->tsize == 0) {
+ dev_err(hdev->dev,
+ "Got DMA with size 0, might reset the device\n");
+ return -EINVAL;
+ }
+
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+
+ return 0;
+}
+
+static int goya_validate_wreg32(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_wreg32 *wreg_pkt)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 sob_start_addr, sob_end_addr;
+ u16 reg_offset;
+
+ reg_offset = le32_to_cpu(wreg_pkt->ctl) &
+ GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
+
+ dev_dbg(hdev->dev, "WREG32 packet details:\n");
+ dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
+ dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
+
+ if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
+ dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
+ reg_offset);
+ return -EPERM;
+ }
+
+ /*
+ * With MMU, DMA channels are not secured, so it doesn't matter where
+ * the WR COMP will be written to because it will go out with
+ * non-secured property
+ */
+ if (goya->hw_cap_initialized & HW_CAP_MMU)
+ return 0;
+
+ sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
+ sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
+
+ if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
+ (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
+
+ dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
+ wreg_pkt->value);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int goya_validate_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser, bool is_mmu)
+{
+ u32 cb_parsed_length = 0;
+ int rc = 0;
+
+ parser->patched_cb_size = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ void *user_pkt;
+
+ user_pkt = (void *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+
+ pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = goya_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_WREG_32:
+ /*
+ * Although it is validated after copy in patch_cb(),
+ * need to validate here as well because patch_cb() is
+ * not called in MMU path while this function is called
+ */
+ rc = goya_validate_wreg32(hdev, parser, user_pkt);
+ break;
+
+ case PACKET_WREG_BULK:
+ dev_err(hdev->dev,
+ "User not allowed to use WREG_BULK\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_LIN_DMA:
+ if (is_mmu)
+ rc = goya_validate_dma_pkt_mmu(hdev, parser,
+ user_pkt);
+ else
+ rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
+ user_pkt);
+ break;
+
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ parser->patched_cb_size += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT packets:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI-X interrupt
+ */
+ parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
+
+ return rc;
+}
+
+static int goya_patch_dma_packet(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ struct packet_lin_dma *new_dma_pkt,
+ u32 *new_dma_pkt_size)
+{
+ struct hl_userptr *userptr;
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt;
+ u64 len, len_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ enum goya_dma_direction user_dir;
+ u64 device_memory_addr, addr;
+ enum dma_data_direction dir;
+ struct sg_table *sgt;
+ bool skip_host_mem_pin = false;
+ bool user_memset;
+ u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+
+ user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
+ (user_dma_pkt->tsize == 0)) {
+ memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
+ *new_dma_pkt_size = sizeof(*new_dma_pkt);
+ return 0;
+ }
+
+ if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ dir = DMA_TO_DEVICE;
+ if (user_memset)
+ skip_host_mem_pin = true;
+ } else {
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ dir = DMA_FROM_DEVICE;
+ }
+
+ if ((!skip_host_mem_pin) &&
+ (hl_userptr_is_pinned(hdev, addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr) == false)) {
+ dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
+ addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ if ((user_memset) && (dir == DMA_TO_DEVICE)) {
+ memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
+ *new_dma_pkt_size = sizeof(*user_dma_pkt);
+ return 0;
+ }
+
+ user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
+
+ user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
+
+ sgt = userptr->sgt;
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg_dma_len(sg);
+ dma_addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ dma_addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((dma_addr + len == dma_addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ if (likely(dma_desc_cnt))
+ ctl &= ~GOYA_PKT_CTL_EB_MASK;
+ ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
+ GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
+ new_dma_pkt->ctl = cpu_to_le32(ctl);
+ new_dma_pkt->tsize = cpu_to_le32((u32) len);
+
+ dma_addr += hdev->asic_prop.host_phys_base_address;
+
+ if (dir == DMA_TO_DEVICE) {
+ new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
+ } else {
+ new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
+ }
+
+ if (!user_memset)
+ device_memory_addr += len;
+ dma_desc_cnt++;
+ new_dma_pkt++;
+ }
+
+ if (!dma_desc_cnt) {
+ dev_err(hdev->dev,
+ "Error of 0 SG entries when patching DMA packet\n");
+ return -EFAULT;
+ }
+
+ /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
+ new_dma_pkt--;
+ new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
+
+ *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
+
+ return 0;
+}
+
+static int goya_patch_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u32 cb_parsed_length = 0;
+ u32 cb_patched_cur_length = 0;
+ int rc = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ u32 new_pkt_size = 0;
+ void *user_pkt, *kernel_pkt;
+
+ user_pkt = (void *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+ kernel_pkt = (void *) (uintptr_t)
+ (parser->patched_cb->kernel_address +
+ cb_patched_cur_length);
+
+ pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = goya_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_LIN_DMA:
+ rc = goya_patch_dma_packet(hdev, parser, user_pkt,
+ kernel_pkt, &new_pkt_size);
+ cb_patched_cur_length += new_pkt_size;
+ break;
+
+ case PACKET_WREG_32:
+ memcpy(kernel_pkt, user_pkt, pkt_size);
+ cb_patched_cur_length += pkt_size;
+ rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
+ break;
+
+ case PACKET_WREG_BULK:
+ dev_err(hdev->dev,
+ "User not allowed to use WREG_BULK\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ memcpy(kernel_pkt, user_pkt, pkt_size);
+ cb_patched_cur_length += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int goya_parse_cb_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ u32 patched_cb_size;
+ struct hl_cb *user_cb;
+ int rc;
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT pkt:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI-X interrupt
+ */
+ parser->patched_cb_size = parser->user_cb_size +
+ sizeof(struct packet_msg_prot) * 2;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n",
+ rc);
+ return rc;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * The check that parser->user_cb_size <= parser->user_cb->size was done
+ * in validate_queue_index().
+ */
+ memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
+ (void *) (uintptr_t) parser->user_cb->kernel_address,
+ parser->user_cb_size);
+
+ patched_cb_size = parser->patched_cb_size;
+
+ /* validate patched CB instead of user CB */
+ user_cb = parser->user_cb;
+ parser->user_cb = parser->patched_cb;
+ rc = goya_validate_cb(hdev, parser, true);
+ parser->user_cb = user_cb;
+
+ if (rc) {
+ hl_cb_put(parser->patched_cb);
+ goto out;
+ }
+
+ if (patched_cb_size != parser->patched_cb_size) {
+ dev_err(hdev->dev, "user CB size mismatch\n");
+ hl_cb_put(parser->patched_cb);
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int goya_parse_cb_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ int rc;
+
+ rc = goya_validate_cb(hdev, parser, false);
+
+ if (rc)
+ goto free_userptr;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n", rc);
+ goto free_userptr;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = goya_patch_cb(hdev, parser);
+
+ if (rc)
+ hl_cb_put(parser->patched_cb);
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+free_userptr:
+ if (rc)
+ hl_userptr_delete_list(hdev, parser->job_userptr_list);
+ return rc;
+}
+
+static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
+ /* For internal queue jobs, just check if cb address is valid */
+ if (hl_mem_area_inside_range(
+ (u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->sram_user_base_address,
+ asic_prop->sram_end_address))
+ return 0;
+
+ if (hl_mem_area_inside_range(
+ (u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->dram_user_base_address,
+ asic_prop->dram_end_address))
+ return 0;
+
+ dev_err(hdev->dev,
+ "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
+ parser->user_cb, parser->user_cb_size);
+
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!parser->ext_queue)
+ return goya_parse_cb_no_ext_quque(hdev, parser);
+
+ if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
+ return goya_parse_cb_mmu(hdev, parser);
+ else
+ return goya_parse_cb_no_mmu(hdev, parser);
+}
+
+void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
+ u32 cq_val, u32 msix_vec)
+{
+ struct packet_msg_prot *cq_pkt;
+ u32 tmp;
+
+ cq_pkt = (struct packet_msg_prot *) (uintptr_t)
+ (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_EB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(cq_val);
+ cq_pkt->addr = cpu_to_le64(cq_addr);
+
+ cq_pkt++;
+
+ tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT);
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
+ cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
+}
+
+static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
+{
+ WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
+}
+
+static void goya_restore_phase_topology(struct hl_device *hdev)
+{
+ int i, num_of_sob_in_longs, num_of_mon_in_longs;
+
+ num_of_sob_in_longs =
+ ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
+
+ num_of_mon_in_longs =
+ ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
+
+ for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
+ WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
+
+ for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
+ WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
+
+ /* Flush all WREG to prevent race */
+ i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
+}
+
+/*
+ * goya_debugfs_read32 - read a 32bit value from a given device address
+ *
+ * @hdev: pointer to hl_device structure
+ * @addr: address in device
+ * @val: returned value
+ *
+ * In case of DDR address that is not mapped into the default aperture that
+ * the DDR bar exposes, the function will configure the iATU so that the DDR
+ * bar will be positioned at a base address that allows reading from the
+ * required address. Configuring the iATU during normal operation can
+ * lead to undefined behavior and therefore, should be done with extreme care
+ *
+ */
+static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ *val = RREG32(addr - CFG_BASE);
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
+
+ *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (!rc) {
+ *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
+ (MMU_PAGE_TABLES_ADDR &
+ ~(prop->dram_pci_bar_size - 0x1ull)));
+ }
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+/*
+ * goya_debugfs_write32 - write a 32bit value to a given device address
+ *
+ * @hdev: pointer to hl_device structure
+ * @addr: address in device
+ * @val: returned value
+ *
+ * In case of DDR address that is not mapped into the default aperture that
+ * the DDR bar exposes, the function will configure the iATU so that the DDR
+ * bar will be positioned at a base address that allows writing to the
+ * required address. Configuring the iATU during normal operation can
+ * lead to undefined behavior and therefore, should be done with extreme care
+ *
+ */
+static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ WREG32(addr - CFG_BASE, val);
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
+
+ writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (!rc) {
+ writel(val, hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
+ (MMU_PAGE_TABLES_ADDR &
+ ~(prop->dram_pci_bar_size - 0x1ull)));
+ }
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ return readq(hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - goya->ddr_bar_cur_addr));
+}
+
+static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - goya->ddr_bar_cur_addr));
+}
+
+static const char *_goya_get_event_desc(u16 event_type)
+{
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
+ return "PCIe_dec";
+ case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
+ return "TPC%d_dec";
+ case GOYA_ASYNC_EVENT_ID_MME_WACS:
+ return "MME_wacs";
+ case GOYA_ASYNC_EVENT_ID_MME_WACSD:
+ return "MME_wacsd";
+ case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
+ return "CPU_axi_splitter";
+ case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
+ return "PSOC_axi_dec";
+ case GOYA_ASYNC_EVENT_ID_PSOC:
+ return "PSOC";
+ case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
+ return "TPC%d_krn_err";
+ case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
+ return "TPC%d_cq";
+ case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
+ return "TPC%d_qm";
+ case GOYA_ASYNC_EVENT_ID_MME_QM:
+ return "MME_qm";
+ case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
+ return "MME_cq";
+ case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
+ return "DMA%d_qm";
+ case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
+ return "DMA%d_ch";
+ default:
+ return "N/A";
+ }
+}
+
+static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
+{
+ u8 index;
+
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
+ index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
+ index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
+ index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
+ index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
+ index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
+ index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
+ snprintf(desc, size, _goya_get_event_desc(event_type), index);
+ break;
+ default:
+ snprintf(desc, size, _goya_get_event_desc(event_type));
+ break;
+ }
+}
+
+static void goya_print_razwi_info(struct hl_device *hdev)
+{
+ if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
+ dev_err(hdev->dev, "Illegal write to LBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
+ }
+
+ if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
+ dev_err(hdev->dev, "Illegal read from LBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
+ }
+
+ if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
+ dev_err(hdev->dev, "Illegal write to HBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
+ }
+
+ if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
+ dev_err(hdev->dev, "Illegal read from HBW\n");
+ WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
+ }
+}
+
+static void goya_print_mmu_error_info(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u64 addr;
+ u32 val;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
+ if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
+ addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
+
+ dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
+
+ WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
+ }
+}
+
+static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
+{
+ char desc[20] = "";
+
+ goya_get_event_desc(event_type, desc, sizeof(desc));
+ dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ event_type, desc);
+
+ goya_print_razwi_info(hdev);
+ goya_print_mmu_error_info(hdev);
+}
+
+static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
+ size_t irq_arr_size)
+{
+ struct armcp_unmask_irq_arr_packet *pkt;
+ size_t total_pkt_size;
+ long result;
+ int rc;
+
+ total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
+ irq_arr_size;
+
+ /* data should be aligned to 8 bytes in order to ArmCP to copy it */
+ total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
+
+ /* total_pkt_size is casted to u16 later on */
+ if (total_pkt_size > USHRT_MAX) {
+ dev_err(hdev->dev, "too many elements in IRQ array\n");
+ return -EINVAL;
+ }
+
+ pkt = kzalloc(total_pkt_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
+ memcpy(&pkt->irqs, irq_arr, irq_arr_size);
+
+ pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
+ total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask IRQ array\n");
+
+ kfree(pkt);
+
+ return rc;
+}
+
+static int goya_soft_reset_late_init(struct hl_device *hdev)
+{
+ /*
+ * Unmask all IRQs since some could have been received
+ * during the soft reset
+ */
+ return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
+ sizeof(goya_non_fatal_events));
+}
+
+static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(event_type);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
+
+ return rc;
+}
+
+void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
+{
+ u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
+ >> EQ_CTL_EVENT_TYPE_SHIFT);
+ struct goya_device *goya = hdev->asic_specific;
+
+ goya->events_stat[event_type]++;
+
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_PCIE_IF:
+ case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
+ case GOYA_ASYNC_EVENT_ID_MME_ECC:
+ case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
+ case GOYA_ASYNC_EVENT_ID_MMU_ECC:
+ case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
+ case GOYA_ASYNC_EVENT_ID_DMA_ECC:
+ case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
+ case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
+ case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
+ case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
+ case GOYA_ASYNC_EVENT_ID_GIC500:
+ case GOYA_ASYNC_EVENT_ID_PLL0:
+ case GOYA_ASYNC_EVENT_ID_PLL1:
+ case GOYA_ASYNC_EVENT_ID_PLL3:
+ case GOYA_ASYNC_EVENT_ID_PLL4:
+ case GOYA_ASYNC_EVENT_ID_PLL5:
+ case GOYA_ASYNC_EVENT_ID_PLL6:
+ case GOYA_ASYNC_EVENT_ID_AXI_ECC:
+ case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
+ dev_err(hdev->dev,
+ "Received H/W interrupt %d, reset the chip\n",
+ event_type);
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
+ case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
+ case GOYA_ASYNC_EVENT_ID_MME_WACS:
+ case GOYA_ASYNC_EVENT_ID_MME_WACSD:
+ case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
+ case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
+ case GOYA_ASYNC_EVENT_ID_PSOC:
+ case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
+ case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
+ case GOYA_ASYNC_EVENT_ID_MME_QM:
+ case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
+ case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
+ case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
+ goya_print_irq_info(hdev, event_type);
+ goya_unmask_irq(hdev, event_type);
+ break;
+
+ case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
+ case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
+ dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
+ event_type);
+ break;
+ }
+}
+
+void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ *size = (u32) sizeof(goya->events_stat);
+
+ return goya->events_stat;
+}
+
+static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
+ u64 val, bool is_dram)
+{
+ struct packet_lin_dma *lin_dma_pkt;
+ struct hl_cs_parser parser;
+ struct hl_cs_job *job;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
+ int rc;
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb)
+ return -EFAULT;
+
+ lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
+
+ memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
+ cb_size = sizeof(*lin_dma_pkt);
+
+ ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
+ (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
+ (1 << GOYA_PKT_CTL_RB_SHIFT) |
+ (1 << GOYA_PKT_CTL_MB_SHIFT));
+ ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
+ GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
+ lin_dma_pkt->ctl = cpu_to_le32(ctl);
+
+ lin_dma_pkt->src_addr = cpu_to_le64(val);
+ lin_dma_pkt->dst_addr = cpu_to_le64(addr);
+ lin_dma_pkt->tsize = cpu_to_le32(size);
+
+ job = hl_cs_allocate_job(hdev, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
+
+ hl_debugfs_add_job(hdev, job);
+
+ parser.ctx_id = HL_KERNEL_ASID_ID;
+ parser.cs_sequence = 0;
+ parser.job_id = job->id;
+ parser.hw_queue_id = job->hw_queue_id;
+ parser.job_userptr_list = &job->userptr_list;
+ parser.user_cb = job->user_cb;
+ parser.user_cb_size = job->user_cb_size;
+ parser.ext_queue = job->ext_queue;
+ parser.use_virt_addr = hdev->mmu_enable;
+
+ rc = hdev->asic_funcs->cs_parser(hdev, &parser);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to parse kernel CB\n");
+ goto free_job;
+ }
+
+ job->patched_cb = parser.patched_cb;
+ job->job_cb_size = parser.patched_cb_size;
+ job->patched_cb->cs_cnt++;
+
+ rc = goya_send_job_on_qman0(hdev, job);
+
+ job->patched_cb->cs_cnt--;
+ hl_cb_put(job->patched_cb);
+
+free_job:
+ hl_userptr_delete_list(hdev, &job->userptr_list);
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ cb->cs_cnt--;
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int goya_context_switch(struct hl_device *hdev, u32 asid)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 addr = prop->sram_base_address;
+ u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
+ u64 val = 0x7777777777777777ull;
+ int rc;
+
+ rc = goya_memset_device_memory(hdev, addr, size, val, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
+ return rc;
+ }
+
+ goya_mmu_prepare(hdev, asid);
+
+ return 0;
+}
+
+static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
+ u64 addr = prop->mmu_pgt_addr;
+ u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
+ MMU_CACHE_MNG_SIZE;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return 0;
+
+ return goya_memset_device_memory(hdev, addr, size, 0, true);
+}
+
+static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
+ u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
+ u64 val = 0x9999999999999999ull;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return 0;
+
+ return goya_memset_device_memory(hdev, addr, size, val, true);
+}
+
+static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ int i;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
+ WARN(1, "asid %u is too big\n", asid);
+ return;
+ }
+
+ /* zero the MMBP and ASID bits and then set the ASID */
+ for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
+ WREG32_AND(goya_mmu_regs[i], ~0x7FF);
+ WREG32_OR(goya_mmu_regs[i], asid);
+ }
+}
+
+static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 status, timeout_usec;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ /* no need in L1 only invalidation in Goya */
+ if (!is_hard)
+ return;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ /* L0 & L1 invalidation */
+ WREG32(mmSTLB_INV_ALL_START, 1);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_ALL_START,
+ status,
+ !status,
+ 1000,
+ timeout_usec);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc)
+ dev_notice_ratelimited(hdev->dev,
+ "Timeout when waiting for MMU cache invalidation\n");
+}
+
+static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
+ bool is_hard, u32 asid, u64 va, u64 size)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ u32 status, timeout_usec, inv_data, pi;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ /* no need in L1 only invalidation in Goya */
+ if (!is_hard)
+ return;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ /*
+ * TODO: currently invalidate entire L0 & L1 as in regular hard
+ * invalidation. Need to apply invalidation of specific cache lines with
+ * mask of ASID & VA & size.
+ * Note that L1 with be flushed entirely in any case.
+ */
+
+ /* L0 & L1 invalidation */
+ inv_data = RREG32(mmSTLB_CACHE_INV);
+ /* PI is 8 bit */
+ pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
+ WREG32(mmSTLB_CACHE_INV,
+ (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_CONSUMER_INDEX,
+ status,
+ status == pi,
+ 1000,
+ timeout_usec);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc)
+ dev_notice_ratelimited(hdev->dev,
+ "Timeout when waiting for MMU cache invalidation\n");
+}
+
+static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+ u64 phys_addr)
+{
+ u32 status, timeout_usec;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
+ WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
+ WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
+
+ rc = hl_poll_timeout(
+ hdev,
+ MMU_ASID_BUSY,
+ status,
+ !(status & 0x80000000),
+ 1000,
+ timeout_usec);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout during MMU hop0 config of asid %d\n", asid);
+ return rc;
+ }
+
+ return 0;
+}
+
+int goya_send_heartbeat(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct armcp_packet hb_pkt;
+ long result;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ memset(&hb_pkt, 0, sizeof(hb_pkt));
+
+ hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
+ sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
+ rc = -EIO;
+
+ return rc;
+}
+
+static int goya_armcp_info_get(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct armcp_packet pkt;
+ void *armcp_info_cpu_addr;
+ dma_addr_t armcp_info_dma_addr;
+ u64 dram_size;
+ long result;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ armcp_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ sizeof(struct armcp_info), &armcp_info_dma_addr);
+ if (!armcp_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for ArmCP info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(armcp_info_dma_addr +
+ prop->host_phys_base_address);
+ pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ GOYA_ARMCP_INFO_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send armcp info pkt, error %d\n", rc);
+ goto out;
+ }
+
+ memcpy(&prop->armcp_info, armcp_info_cpu_addr,
+ sizeof(prop->armcp_info));
+
+ dram_size = le64_to_cpu(prop->armcp_info.dram_size);
+ if (dram_size) {
+ if ((!is_power_of_2(dram_size)) ||
+ (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
+ dev_err(hdev->dev,
+ "F/W reported invalid DRAM size %llu. Trying to use default size\n",
+ dram_size);
+ dram_size = DRAM_PHYS_DEFAULT_SIZE;
+ }
+
+ prop->dram_size = dram_size;
+ prop->dram_end_address = prop->dram_base_address + dram_size;
+ }
+
+ rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to build hwmon channel info, error %d\n", rc);
+ rc = -EFAULT;
+ goto out;
+ }
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ sizeof(struct armcp_info), armcp_info_cpu_addr);
+
+ return rc;
+}
+
+static void goya_init_clock_gating(struct hl_device *hdev)
+{
+
+}
+
+static void goya_disable_clock_gating(struct hl_device *hdev)
+{
+
+}
+
+static bool goya_is_device_idle(struct hl_device *hdev)
+{
+ u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
+ int i;
+
+ offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
+
+ for (i = 0 ; i < DMA_MAX_NUM ; i++) {
+ dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
+
+ if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
+ DMA_QM_IDLE_MASK)
+ return false;
+ }
+
+ offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
+
+ for (i = 0 ; i < TPC_MAX_NUM ; i++) {
+ tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
+ tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
+ tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
+
+ if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
+ TPC_QM_IDLE_MASK)
+ return false;
+
+ if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
+ TPC_CMDQ_IDLE_MASK)
+ return false;
+
+ if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
+ TPC_CFG_IDLE_MASK)
+ return false;
+ }
+
+ if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
+ MME_QM_IDLE_MASK)
+ return false;
+
+ if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
+ MME_CMDQ_IDLE_MASK)
+ return false;
+
+ if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
+ MME_ARCH_IDLE_MASK)
+ return false;
+
+ if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
+ return false;
+
+ return true;
+}
+
+static void goya_hw_queues_lock(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ spin_lock(&goya->hw_queues_lock);
+}
+
+static void goya_hw_queues_unlock(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ spin_unlock(&goya->hw_queues_lock);
+}
+
+static u32 goya_get_pci_id(struct hl_device *hdev)
+{
+ return hdev->pdev->device;
+}
+
+static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
+ size_t max_size)
+{
+ struct goya_device *goya = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct armcp_packet pkt;
+ void *eeprom_info_cpu_addr;
+ dma_addr_t eeprom_info_dma_addr;
+ long result;
+ int rc;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ eeprom_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ max_size, &eeprom_info_dma_addr);
+ if (!eeprom_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for EEPROM info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(eeprom_info_cpu_addr, 0, max_size);
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
+ prop->host_phys_base_address);
+ pkt.data_max_size = cpu_to_le32(max_size);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ GOYA_ARMCP_EEPROM_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send armcp EEPROM pkt, error %d\n", rc);
+ goto out;
+ }
+
+ /* result contains the actual size */
+ memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
+ eeprom_info_cpu_addr);
+
+ return rc;
+}
+
+static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
+{
+ return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
+}
+
+static const struct hl_asic_funcs goya_funcs = {
+ .early_init = goya_early_init,
+ .early_fini = goya_early_fini,
+ .late_init = goya_late_init,
+ .late_fini = goya_late_fini,
+ .sw_init = goya_sw_init,
+ .sw_fini = goya_sw_fini,
+ .hw_init = goya_hw_init,
+ .hw_fini = goya_hw_fini,
+ .halt_engines = goya_halt_engines,
+ .suspend = goya_suspend,
+ .resume = goya_resume,
+ .cb_mmap = goya_cb_mmap,
+ .ring_doorbell = goya_ring_doorbell,
+ .flush_pq_write = goya_flush_pq_write,
+ .dma_alloc_coherent = goya_dma_alloc_coherent,
+ .dma_free_coherent = goya_dma_free_coherent,
+ .get_int_queue_base = goya_get_int_queue_base,
+ .test_queues = goya_test_queues,
+ .dma_pool_zalloc = goya_dma_pool_zalloc,
+ .dma_pool_free = goya_dma_pool_free,
+ .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
+ .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
+ .hl_dma_unmap_sg = goya_dma_unmap_sg,
+ .cs_parser = goya_cs_parser,
+ .asic_dma_map_sg = goya_dma_map_sg,
+ .get_dma_desc_list_size = goya_get_dma_desc_list_size,
+ .add_end_of_cb_packets = goya_add_end_of_cb_packets,
+ .update_eq_ci = goya_update_eq_ci,
+ .context_switch = goya_context_switch,
+ .restore_phase_topology = goya_restore_phase_topology,
+ .debugfs_read32 = goya_debugfs_read32,
+ .debugfs_write32 = goya_debugfs_write32,
+ .add_device_attr = goya_add_device_attr,
+ .handle_eqe = goya_handle_eqe,
+ .set_pll_profile = goya_set_pll_profile,
+ .get_events_stat = goya_get_events_stat,
+ .read_pte = goya_read_pte,
+ .write_pte = goya_write_pte,
+ .mmu_invalidate_cache = goya_mmu_invalidate_cache,
+ .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
+ .send_heartbeat = goya_send_heartbeat,
+ .enable_clock_gating = goya_init_clock_gating,
+ .disable_clock_gating = goya_disable_clock_gating,
+ .is_device_idle = goya_is_device_idle,
+ .soft_reset_late_init = goya_soft_reset_late_init,
+ .hw_queues_lock = goya_hw_queues_lock,
+ .hw_queues_unlock = goya_hw_queues_unlock,
+ .get_pci_id = goya_get_pci_id,
+ .get_eeprom_data = goya_get_eeprom_data,
+ .send_cpu_message = goya_send_cpu_message,
+ .get_hw_state = goya_get_hw_state
+};
+
+/*
+ * goya_set_asic_funcs - set Goya function pointers
+ *
+ * @*hdev: pointer to hl_device structure
+ *
+ */
+void goya_set_asic_funcs(struct hl_device *hdev)
+{
+ hdev->asic_funcs = &goya_funcs;
+}
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
new file mode 100644
index 000000000000..830551b6b062
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYAP_H_
+#define GOYAP_H_
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+#include "include/hl_boot_if.h"
+#include "include/goya/goya_packets.h"
+#include "include/goya/goya.h"
+#include "include/goya/goya_async_events.h"
+#include "include/goya/goya_fw_if.h"
+
+#define NUMBER_OF_CMPLT_QUEUES 5
+#define NUMBER_OF_EXT_HW_QUEUES 5
+#define NUMBER_OF_CPU_HW_QUEUES 1
+#define NUMBER_OF_INT_HW_QUEUES 9
+#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \
+ NUMBER_OF_CPU_HW_QUEUES + \
+ NUMBER_OF_INT_HW_QUEUES)
+
+/*
+ * Number of MSIX interrupts IDS:
+ * Each completion queue has 1 ID
+ * The event queue has 1 ID
+ */
+#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1)
+
+#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES)
+#error "Number of H/W queues must be smaller than HL_MAX_QUEUES"
+#endif
+
+#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES)
+#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
+#endif
+
+#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
+
+#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
+
+#define TPC_ENABLED_MASK 0xFF
+
+#define PLL_HIGH_DEFAULT 1575000000 /* 1.575 GHz */
+
+#define MAX_POWER_DEFAULT 200000 /* 200W */
+
+#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */
+#define GOYA_ARMCP_EEPROM_TIMEOUT 10000000 /* 10s */
+
+#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
+
+/* DRAM Memory Map */
+
+#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
+#define MMU_PAGE_TABLES_SIZE 0x0DE00000 /* 222MB */
+#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */
+#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */
+#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */
+#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */
+
+#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
+#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
+#define MMU_DRAM_DEFAULT_PAGE_ADDR (MMU_PAGE_TABLES_ADDR + \
+ MMU_PAGE_TABLES_SIZE)
+#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
+ MMU_DRAM_DEFAULT_PAGE_SIZE)
+#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + \
+ MMU_CACHE_MNG_SIZE)
+#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE)
+#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE)
+
+#if (DRAM_BASE_ADDR_USER != 0x20000000)
+#error "KMD must reserve 512MB"
+#endif
+
+/*
+ * SRAM Memory Map for KMD
+ *
+ * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for
+ * MME/TPC QMANs
+ *
+ */
+
+#define MME_QMAN_BASE_OFFSET 0x000000 /* Must be 0 */
+#define MME_QMAN_LENGTH 64
+#define TPC_QMAN_LENGTH 64
+
+#define TPC0_QMAN_BASE_OFFSET (MME_QMAN_BASE_OFFSET + \
+ (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC1_QMAN_BASE_OFFSET (TPC0_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC2_QMAN_BASE_OFFSET (TPC1_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC3_QMAN_BASE_OFFSET (TPC2_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC4_QMAN_BASE_OFFSET (TPC3_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC5_QMAN_BASE_OFFSET (TPC4_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC6_QMAN_BASE_OFFSET (TPC5_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+
+#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
+ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
+
+#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
+#error "MME/TPC QMANs SRAM space exceeds limit"
+#endif
+
+#define SRAM_USER_BASE_OFFSET GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START
+
+/* Virtual address space */
+#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */
+#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */
+#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \
+ VA_HOST_SPACE_START) /* 767TB */
+
+#define VA_DDR_SPACE_START 0x800000000ull /* 32GB */
+#define VA_DDR_SPACE_END 0x2000000000ull /* 128GB */
+#define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \
+ VA_DDR_SPACE_START) /* 128GB */
+
+#define DMA_MAX_TRANSFER_SIZE U32_MAX
+
+#define HW_CAP_PLL 0x00000001
+#define HW_CAP_DDR_0 0x00000002
+#define HW_CAP_DDR_1 0x00000004
+#define HW_CAP_MME 0x00000008
+#define HW_CAP_CPU 0x00000010
+#define HW_CAP_DMA 0x00000020
+#define HW_CAP_MSIX 0x00000040
+#define HW_CAP_CPU_Q 0x00000080
+#define HW_CAP_MMU 0x00000100
+#define HW_CAP_TPC_MBIST 0x00000200
+#define HW_CAP_GOLDEN 0x00000400
+#define HW_CAP_TPC 0x00000800
+
+#define CPU_PKT_SHIFT 5
+#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT)
+#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1))
+#define CPU_MAX_PKTS_IN_CB 32
+#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB)
+#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE)
+
+enum goya_fw_component {
+ FW_COMP_UBOOT,
+ FW_COMP_PREBOOT
+};
+
+struct goya_device {
+ int (*test_cpu_queue)(struct hl_device *hdev);
+ int (*armcp_info_get)(struct hl_device *hdev);
+
+ /* TODO: remove hw_queues_lock after moving to scheduler code */
+ spinlock_t hw_queues_lock;
+
+ u64 mme_clk;
+ u64 tpc_clk;
+ u64 ic_clk;
+
+ u64 ddr_bar_cur_addr;
+ u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE];
+ u32 hw_cap_initialized;
+};
+
+int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus,
+ u8 i2c_addr, u8 i2c_reg, u32 *val);
+int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus,
+ u8 i2c_addr, u8 i2c_reg, u32 val);
+int goya_test_cpu_queue(struct hl_device *hdev);
+int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
+ u32 timeout, long *result);
+long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
+long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
+void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
+ long value);
+void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
+void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
+void goya_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
+void goya_init_security(struct hl_device *hdev);
+u64 goya_get_max_power(struct hl_device *hdev);
+void goya_set_max_power(struct hl_device *hdev, u64 value);
+
+int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
+void goya_late_fini(struct hl_device *hdev);
+int goya_suspend(struct hl_device *hdev);
+int goya_resume(struct hl_device *hdev);
+void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
+void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
+void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
+void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
+ u32 cq_val, u32 msix_vec);
+int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
+void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
+ dma_addr_t *dma_handle, u16 *queue_len);
+u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
+int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
+int goya_send_heartbeat(struct hl_device *hdev);
+
+#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
new file mode 100644
index 000000000000..088692c852b6
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+
+void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ switch (freq) {
+ case PLL_HIGH:
+ hl_set_frequency(hdev, MME_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, TPC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, IC_PLL, hdev->high_pll);
+ break;
+ case PLL_LOW:
+ hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW);
+ break;
+ case PLL_LAST:
+ hl_set_frequency(hdev, MME_PLL, goya->mme_clk);
+ hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk);
+ hl_set_frequency(hdev, IC_PLL, goya->ic_clk);
+ break;
+ default:
+ dev_err(hdev->dev, "unknown frequency setting\n");
+ }
+}
+
+static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ count = -EPERM;
+ goto fail;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ hl_set_frequency(hdev, MME_PLL, value);
+ goya->mme_clk = value;
+
+fail:
+ return count;
+}
+
+static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, TPC_PLL, false);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ count = -EPERM;
+ goto fail;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ hl_set_frequency(hdev, TPC_PLL, value);
+ goya->tpc_clk = value;
+
+fail:
+ return count;
+}
+
+static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, IC_PLL, false);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
+ int rc;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ count = -EPERM;
+ goto fail;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ hl_set_frequency(hdev, IC_PLL, value);
+ goya->ic_clk = value;
+
+fail:
+ return count;
+}
+
+static ssize_t mme_clk_curr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t tpc_clk_curr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, TPC_PLL, true);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static ssize_t ic_clk_curr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, IC_PLL, true);
+
+ if (value < 0)
+ return value;
+
+ return sprintf(buf, "%lu\n", value);
+}
+
+static DEVICE_ATTR_RW(ic_clk);
+static DEVICE_ATTR_RO(ic_clk_curr);
+static DEVICE_ATTR_RW(mme_clk);
+static DEVICE_ATTR_RO(mme_clk_curr);
+static DEVICE_ATTR_RW(tpc_clk);
+static DEVICE_ATTR_RO(tpc_clk_curr);
+
+static struct attribute *goya_dev_attrs[] = {
+ &dev_attr_ic_clk.attr,
+ &dev_attr_ic_clk_curr.attr,
+ &dev_attr_mme_clk.attr,
+ &dev_attr_mme_clk_curr.attr,
+ &dev_attr_tpc_clk.attr,
+ &dev_attr_tpc_clk_curr.attr,
+ NULL,
+};
+
+void goya_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp)
+{
+ dev_attr_grp->attrs = goya_dev_attrs;
+}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
new file mode 100644
index 000000000000..575003238401
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -0,0 +1,2999 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "goyaP.h"
+
+/*
+ * goya_set_block_as_protected - set the given block as protected
+ *
+ * @hdev: pointer to hl_device structure
+ * @block: block base address
+ *
+ */
+static void goya_pb_set_block(struct hl_device *hdev, u64 base)
+{
+ u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS;
+
+ while (pb_addr & 0xFFF) {
+ WREG32(pb_addr, 0);
+ pb_addr += 4;
+ }
+}
+
+static void goya_init_mme_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ /* TODO: change to real reg name when Soc Online is updated */
+ u64 mmMME_SBB_POWER_ECO1 = 0xDFF60,
+ mmMME_SBB_POWER_ECO2 = 0xDFF64;
+
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_0_BASE);
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_1_BASE);
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_2_BASE);
+ goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_3_BASE);
+
+ goya_pb_set_block(hdev, mmSBA_ECC_MEM_BASE);
+ goya_pb_set_block(hdev, mmSBB_ECC_MEM_BASE);
+
+ goya_pb_set_block(hdev, mmMME1_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME1_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME1_WR_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME2_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME2_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME2_WR_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME3_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME3_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME3_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmMME4_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME4_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME4_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmMME5_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME5_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME5_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmMME6_RTR_BASE);
+ goya_pb_set_block(hdev, mmMME6_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmMME6_WR_REGULATOR_BASE);
+
+ pb_addr = (mmMME_DUMMY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_DUMMY & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_DUMMY & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_LOG_SHADOW & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_STORE_MAX_CREDIT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_STORE_MAX_CREDIT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_STORE_MAX_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_AGU & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBB & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_WBC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBA_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBB_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBC_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_WBC_CONTROL_DATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_TE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_TE2DEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_REI_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_REI_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SEI_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SEI_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SPI_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SPI_MASK & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_QM_CP_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_QM_CP_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_QM_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_PQ_BUF_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_QM_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_CQ_IFIFO_CNT &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME_SBB_POWER_ECO1 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME_SBB_POWER_ECO1 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME_SBB_POWER_ECO1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME_SBB_POWER_ECO2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+static void goya_init_dma_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ goya_pb_set_block(hdev, mmDMA_NRTR_BASE);
+ goya_pb_set_block(hdev, mmDMA_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmDMA_WR_REGULATOR_BASE);
+
+ pb_addr = (mmDMA_QM_0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_0_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_0_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_0_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_0_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_0_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_0_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_0_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_0_BASE);
+
+ pb_addr = (mmDMA_QM_1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_1_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_1_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_1_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_1_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_1_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_1_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_1_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_1_BASE);
+
+ pb_addr = (mmDMA_QM_2_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_2_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_2_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_2_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_2_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_2_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_2_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_2_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_2_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_2_BASE);
+
+ pb_addr = (mmDMA_QM_3_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_3_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_3_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_3_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_3_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_3_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_3_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_3_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_3_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_3_BASE);
+
+ pb_addr = (mmDMA_QM_4_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_4_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_4_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_4_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_4_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_4_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA_QM_4_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA_QM_4_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA_QM_4_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmDMA_CH_4_BASE);
+}
+
+static void goya_init_tpc_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ goya_pb_set_block(hdev, mmTPC0_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC0_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC1_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC2_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC3_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH
+ & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_FUNC_MBIST_CNTRL
+ & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC4_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC5_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC6_RTR_BASE);
+ goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ goya_pb_set_block(hdev, mmTPC7_NRTR_BASE);
+ goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE);
+
+ pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_AWUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_FUNC_MBIST_CNTRL &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_PQ_PUSH0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PUSH1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PUSH2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PUSH3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CMDQ_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CMDQ_CQ_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_ARUSER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_CTL_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_STS1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CP_DBG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+/*
+ * goya_init_protection_bits - Initialize protection bits for specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * All protection bits are 1 by default, means not protected. Need to set to 0
+ * each bit that belongs to a protected register.
+ *
+ */
+static void goya_init_protection_bits(struct hl_device *hdev)
+{
+ /*
+ * In each 4K block of registers, the last 128 bytes are protection
+ * bits - total of 1024 bits, one for each register. Each bit is related
+ * to a specific register, by the order of the registers.
+ * So in order to calculate the bit that is related to a given register,
+ * we need to calculate its word offset and then the exact bit inside
+ * the word (which is 4 bytes).
+ *
+ * Register address:
+ *
+ * 31 12 11 7 6 2 1 0
+ * -----------------------------------------------------------------
+ * | Don't | word | bit location | 0 |
+ * | care | offset | inside word | |
+ * -----------------------------------------------------------------
+ *
+ * Bits 7-11 represents the word offset inside the 128 bytes.
+ * Bits 2-6 represents the bit location inside the word.
+ */
+
+ goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
+ goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
+ goya_pb_set_block(hdev, mmPCI_WR_REGULATOR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y0_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y0_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y1_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y1_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y2_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y2_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y3_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y3_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y4_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y4_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmSRAM_Y5_X0_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X0_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X1_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X1_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X2_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X2_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X3_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X3_RTR_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X4_BANK_BASE);
+ goya_pb_set_block(hdev, mmSRAM_Y5_X4_RTR_BASE);
+
+ goya_pb_set_block(hdev, mmPCIE_WRAP_BASE);
+ goya_pb_set_block(hdev, mmPCIE_CORE_BASE);
+ goya_pb_set_block(hdev, mmPCIE_DB_CFG_BASE);
+ goya_pb_set_block(hdev, mmPCIE_DB_CMD_BASE);
+ goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
+ goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
+ goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
+
+ goya_init_mme_protection_bits(hdev);
+
+ goya_init_dma_protection_bits(hdev);
+
+ goya_init_tpc_protection_bits(hdev);
+}
+
+/*
+ * goya_init_security - Initialize security model
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the security model of the device
+ * That includes range registers and protection bit per register
+ *
+ */
+void goya_init_security(struct hl_device *hdev)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE);
+ u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE);
+
+ u32 lbw_rng0_base = 0xFC440000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng0_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng1_base = 0xFC480000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng1_mask = 0xFFF80000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng2_base = 0xFC600000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng2_mask = 0xFFE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng3_base = 0xFC800000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng3_mask = 0xFFF00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng4_base = 0xFCC02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng4_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng5_base = 0xFCC40000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng5_mask = 0xFFFF8000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng6_base = 0xFCC48000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng6_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng7_base = 0xFCC4A000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng7_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng8_base = 0xFCC4C000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng8_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng9_base = 0xFCC50000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng9_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ u32 lbw_rng13_base = 0xFEC43000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+ u32 lbw_rng13_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
+
+ WREG32(mmDMA_MACRO_LBW_RANGE_HIT_BLOCK, 0xFFFF);
+ WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFF);
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
+ WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_0, 0);
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_0, 0);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_0, 0);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_0, 0xFFF80);
+ }
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_1, dram_addr_lo);
+ WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_1, dram_addr_hi);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_1, 0xE0000000);
+ WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_1, 0x3FFFF);
+
+ /* Protect registers */
+
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmDMA_MACRO_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmDMA_MACRO_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME1_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME2_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME3_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME4_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME5_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmMME6_RTR_LBW_RANGE_HIT, 0xFFFF);
+
+ WREG32(mmMME1_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME2_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME3_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME4_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME5_RTR_HBW_RANGE_HIT, 0xFE);
+ WREG32(mmMME6_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmMME6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmMME6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC0_NRTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC1_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC1_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC2_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC2_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC3_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC3_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC4_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC4_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC5_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC5_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC6_RTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC6_RTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ WREG32(mmTPC7_NRTR_LBW_RANGE_HIT, 0xFFFF);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_HIT, 0xFE);
+
+ /* Protect HOST */
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_0, 0);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_0, 0);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_0, 0);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80);
+
+ /*
+ * Protect DDR @
+ * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END
+ * The mask protects the first 512MB
+ */
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000);
+ WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF);
+
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base);
+ WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask);
+
+ goya_init_protection_bits(hdev);
+}
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
new file mode 100644
index 000000000000..a7c95e9f9b9a
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -0,0 +1,1464 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HABANALABSP_H_
+#define HABANALABSP_H_
+
+#include "include/armcp_if.h"
+#include "include/qman_if.h"
+
+#define pr_fmt(fmt) "habanalabs: " fmt
+
+#include <linux/cdev.h>
+#include <linux/iopoll.h>
+#include <linux/irqreturn.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
+#include <linux/hashtable.h>
+
+#define HL_NAME "habanalabs"
+
+#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT)
+
+#define HL_PENDING_RESET_PER_SEC 5
+
+#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
+
+#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
+
+#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
+
+#define HL_MAX_QUEUES 128
+
+#define HL_MAX_JOBS_PER_CS 64
+
+/* MUST BE POWER OF 2 and larger than 1 */
+#define HL_MAX_PENDING_CS 64
+
+/* Memory */
+#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+
+/* MMU */
+#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+
+/**
+ * struct pgt_info - MMU hop page info.
+ * @node: hash linked-list node for the pgts hash of pgts.
+ * @addr: physical address of the pgt.
+ * @ctx: pointer to the owner ctx.
+ * @num_of_ptes: indicates how many ptes are used in the pgt.
+ *
+ * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
+ * is needed during mapping, a new page is allocated and this structure holds
+ * its essential information. During unmapping, if no valid PTEs remained in the
+ * page, it is freed with its pgt_info structure.
+ */
+struct pgt_info {
+ struct hlist_node node;
+ u64 addr;
+ struct hl_ctx *ctx;
+ int num_of_ptes;
+};
+
+struct hl_device;
+struct hl_fpriv;
+
+/**
+ * enum hl_queue_type - Supported QUEUE types.
+ * @QUEUE_TYPE_NA: queue is not available.
+ * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
+ * host.
+ * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
+ * memories and/or operates the compute engines.
+ * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
+ */
+enum hl_queue_type {
+ QUEUE_TYPE_NA,
+ QUEUE_TYPE_EXT,
+ QUEUE_TYPE_INT,
+ QUEUE_TYPE_CPU
+};
+
+/**
+ * struct hw_queue_properties - queue information.
+ * @type: queue type.
+ * @kmd_only: true if only KMD is allowed to send a job to this queue, false
+ * otherwise.
+ */
+struct hw_queue_properties {
+ enum hl_queue_type type;
+ u8 kmd_only;
+};
+
+/**
+ * enum vm_type_t - virtual memory mapping request information.
+ * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
+ * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
+ */
+enum vm_type_t {
+ VM_TYPE_USERPTR,
+ VM_TYPE_PHYS_PACK
+};
+
+/**
+ * enum hl_device_hw_state - H/W device state. use this to understand whether
+ * to do reset before hw_init or not
+ * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
+ * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
+ * hw_init
+ */
+enum hl_device_hw_state {
+ HL_DEVICE_HW_STATE_CLEAN = 0,
+ HL_DEVICE_HW_STATE_DIRTY
+};
+
+/**
+ * struct asic_fixed_properties - ASIC specific immutable properties.
+ * @hw_queues_props: H/W queues properties.
+ * @armcp_info: received various information from ArmCP regarding the H/W. e.g.
+ * available sensors.
+ * @uboot_ver: F/W U-boot version.
+ * @preboot_ver: F/W Preboot version.
+ * @sram_base_address: SRAM physical start address.
+ * @sram_end_address: SRAM physical end address.
+ * @sram_user_base_address - SRAM physical start address for user access.
+ * @dram_base_address: DRAM physical start address.
+ * @dram_end_address: DRAM physical end address.
+ * @dram_user_base_address: DRAM physical start address for user access.
+ * @dram_size: DRAM total size.
+ * @dram_pci_bar_size: size of PCI bar towards DRAM.
+ * @host_phys_base_address: base physical address of host memory for
+ * transactions that the device generates.
+ * @max_power_default: max power of the device after reset
+ * @va_space_host_start_address: base address of virtual memory range for
+ * mapping host memory.
+ * @va_space_host_end_address: end address of virtual memory range for
+ * mapping host memory.
+ * @va_space_dram_start_address: base address of virtual memory range for
+ * mapping DRAM memory.
+ * @va_space_dram_end_address: end address of virtual memory range for
+ * mapping DRAM memory.
+ * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
+ * fault.
+ * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
+ * @mmu_dram_default_page_addr: DRAM default page physical address.
+ * @mmu_pgt_size: MMU page tables total size.
+ * @mmu_pte_size: PTE size in MMU page tables.
+ * @mmu_hop_table_size: MMU hop table size.
+ * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
+ * @dram_page_size: page size for MMU DRAM allocation.
+ * @cfg_size: configuration space size on SRAM.
+ * @sram_size: total size of SRAM.
+ * @max_asid: maximum number of open contexts (ASIDs).
+ * @num_of_events: number of possible internal H/W IRQs.
+ * @psoc_pci_pll_nr: PCI PLL NR value.
+ * @psoc_pci_pll_nf: PCI PLL NF value.
+ * @psoc_pci_pll_od: PCI PLL OD value.
+ * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
+ * @completion_queues_count: number of completion queues.
+ * @high_pll: high PLL frequency used by the device.
+ * @cb_pool_cb_cnt: number of CBs in the CB pool.
+ * @cb_pool_cb_size: size of each CB in the CB pool.
+ * @tpc_enabled_mask: which TPCs are enabled.
+ */
+struct asic_fixed_properties {
+ struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
+ struct armcp_info armcp_info;
+ char uboot_ver[VERSION_MAX_LEN];
+ char preboot_ver[VERSION_MAX_LEN];
+ u64 sram_base_address;
+ u64 sram_end_address;
+ u64 sram_user_base_address;
+ u64 dram_base_address;
+ u64 dram_end_address;
+ u64 dram_user_base_address;
+ u64 dram_size;
+ u64 dram_pci_bar_size;
+ u64 host_phys_base_address;
+ u64 max_power_default;
+ u64 va_space_host_start_address;
+ u64 va_space_host_end_address;
+ u64 va_space_dram_start_address;
+ u64 va_space_dram_end_address;
+ u64 dram_size_for_default_page_mapping;
+ u64 mmu_pgt_addr;
+ u64 mmu_dram_default_page_addr;
+ u32 mmu_pgt_size;
+ u32 mmu_pte_size;
+ u32 mmu_hop_table_size;
+ u32 mmu_hop0_tables_total_size;
+ u32 dram_page_size;
+ u32 cfg_size;
+ u32 sram_size;
+ u32 max_asid;
+ u32 num_of_events;
+ u32 psoc_pci_pll_nr;
+ u32 psoc_pci_pll_nf;
+ u32 psoc_pci_pll_od;
+ u32 psoc_pci_pll_div_factor;
+ u32 high_pll;
+ u32 cb_pool_cb_cnt;
+ u32 cb_pool_cb_size;
+ u8 completion_queues_count;
+ u8 tpc_enabled_mask;
+};
+
+/**
+ * struct hl_dma_fence - wrapper for fence object used by command submissions.
+ * @base_fence: kernel fence object.
+ * @lock: spinlock to protect fence.
+ * @hdev: habanalabs device structure.
+ * @cs_seq: command submission sequence number.
+ */
+struct hl_dma_fence {
+ struct dma_fence base_fence;
+ spinlock_t lock;
+ struct hl_device *hdev;
+ u64 cs_seq;
+};
+
+/*
+ * Command Buffers
+ */
+
+#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
+
+/**
+ * struct hl_cb_mgr - describes a Command Buffer Manager.
+ * @cb_lock: protects cb_handles.
+ * @cb_handles: an idr to hold all command buffer handles.
+ */
+struct hl_cb_mgr {
+ spinlock_t cb_lock;
+ struct idr cb_handles; /* protected by cb_lock */
+};
+
+/**
+ * struct hl_cb - describes a Command Buffer.
+ * @refcount: reference counter for usage of the CB.
+ * @hdev: pointer to device this CB belongs to.
+ * @lock: spinlock to protect mmap/cs flows.
+ * @debugfs_list: node in debugfs list of command buffers.
+ * @pool_list: node in pool list of command buffers.
+ * @kernel_address: Holds the CB's kernel virtual address.
+ * @bus_address: Holds the CB's DMA address.
+ * @mmap_size: Holds the CB's size that was mmaped.
+ * @size: holds the CB's size.
+ * @id: the CB's ID.
+ * @cs_cnt: holds number of CS that this CB participates in.
+ * @ctx_id: holds the ID of the owner's context.
+ * @mmap: true if the CB is currently mmaped to user.
+ * @is_pool: true if CB was acquired from the pool, false otherwise.
+ */
+struct hl_cb {
+ struct kref refcount;
+ struct hl_device *hdev;
+ spinlock_t lock;
+ struct list_head debugfs_list;
+ struct list_head pool_list;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 mmap_size;
+ u32 size;
+ u32 id;
+ u32 cs_cnt;
+ u32 ctx_id;
+ u8 mmap;
+ u8 is_pool;
+};
+
+
+/*
+ * QUEUES
+ */
+
+struct hl_cs_job;
+
+/*
+ * Currently, there are two limitations on the maximum length of a queue:
+ *
+ * 1. The memory footprint of the queue. The current allocated space for the
+ * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
+ * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
+ * which currently is 4096/16 = 256 entries.
+ *
+ * To increase that, we need either to decrease the size of the
+ * BD (difficult), or allocate more than a single page (easier).
+ *
+ * 2. Because the size of the JOB handle field in the BD CTL / completion queue
+ * is 10-bit, we can have up to 1024 open jobs per hardware queue.
+ * Therefore, each queue can hold up to 1024 entries.
+ *
+ * HL_QUEUE_LENGTH is in units of struct hl_bd.
+ * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
+ */
+
+#define HL_PAGE_SIZE 4096 /* minimum page size */
+/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
+#define HL_QUEUE_LENGTH 256
+#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
+
+/*
+ * HL_CQ_LENGTH is in units of struct hl_cq_entry.
+ * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
+ */
+#define HL_CQ_LENGTH HL_QUEUE_LENGTH
+#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
+
+/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
+#define HL_EQ_LENGTH 64
+#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
+
+
+/**
+ * struct hl_hw_queue - describes a H/W transport queue.
+ * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
+ * @queue_type: type of queue.
+ * @kernel_address: holds the queue's kernel virtual address.
+ * @bus_address: holds the queue's DMA address.
+ * @pi: holds the queue's pi value.
+ * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
+ * @hw_queue_id: the id of the H/W queue.
+ * @int_queue_len: length of internal queue (number of entries).
+ * @valid: is the queue valid (we have array of 32 queues, not all of them
+ * exists).
+ */
+struct hl_hw_queue {
+ struct hl_cs_job **shadow_queue;
+ enum hl_queue_type queue_type;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 pi;
+ u32 ci;
+ u32 hw_queue_id;
+ u16 int_queue_len;
+ u8 valid;
+};
+
+/**
+ * struct hl_cq - describes a completion queue
+ * @hdev: pointer to the device structure
+ * @kernel_address: holds the queue's kernel virtual address
+ * @bus_address: holds the queue's DMA address
+ * @hw_queue_id: the id of the matching H/W queue
+ * @ci: ci inside the queue
+ * @pi: pi inside the queue
+ * @free_slots_cnt: counter of free slots in queue
+ */
+struct hl_cq {
+ struct hl_device *hdev;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 hw_queue_id;
+ u32 ci;
+ u32 pi;
+ atomic_t free_slots_cnt;
+};
+
+/**
+ * struct hl_eq - describes the event queue (single one per device)
+ * @hdev: pointer to the device structure
+ * @kernel_address: holds the queue's kernel virtual address
+ * @bus_address: holds the queue's DMA address
+ * @ci: ci inside the queue
+ */
+struct hl_eq {
+ struct hl_device *hdev;
+ u64 kernel_address;
+ dma_addr_t bus_address;
+ u32 ci;
+};
+
+
+/*
+ * ASICs
+ */
+
+/**
+ * enum hl_asic_type - supported ASIC types.
+ * @ASIC_AUTO_DETECT: ASIC type will be automatically set.
+ * @ASIC_GOYA: Goya device.
+ * @ASIC_INVALID: Invalid ASIC type.
+ */
+enum hl_asic_type {
+ ASIC_AUTO_DETECT,
+ ASIC_GOYA,
+ ASIC_INVALID
+};
+
+struct hl_cs_parser;
+
+/**
+ * enum hl_pm_mng_profile - power management profile.
+ * @PM_AUTO: internal clock is set by KMD.
+ * @PM_MANUAL: internal clock is set by the user.
+ * @PM_LAST: last power management type.
+ */
+enum hl_pm_mng_profile {
+ PM_AUTO = 1,
+ PM_MANUAL,
+ PM_LAST
+};
+
+/**
+ * enum hl_pll_frequency - PLL frequency.
+ * @PLL_HIGH: high frequency.
+ * @PLL_LOW: low frequency.
+ * @PLL_LAST: last frequency values that were configured by the user.
+ */
+enum hl_pll_frequency {
+ PLL_HIGH = 1,
+ PLL_LOW,
+ PLL_LAST
+};
+
+/**
+ * struct hl_asic_funcs - ASIC specific functions that are can be called from
+ * common code.
+ * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
+ * @early_fini: tears down what was done in early_init.
+ * @late_init: sets up late driver/hw state (post hw_init) - Optional.
+ * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
+ * @sw_init: sets up driver state, does not configure H/W.
+ * @sw_fini: tears down driver state, does not configure H/W.
+ * @hw_init: sets up the H/W state.
+ * @hw_fini: tears down the H/W state.
+ * @halt_engines: halt engines, needed for reset sequence. This also disables
+ * interrupts from the device. Should be called before
+ * hw_fini and before CS rollback.
+ * @suspend: handles IP specific H/W or SW changes for suspend.
+ * @resume: handles IP specific H/W or SW changes for resume.
+ * @cb_mmap: maps a CB.
+ * @ring_doorbell: increment PI on a given QMAN.
+ * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
+ * @dma_alloc_coherent: Allocate coherent DMA memory by calling
+ * dma_alloc_coherent(). This is ASIC function because its
+ * implementation is not trivial when the driver is loaded
+ * in simulation mode (not upstreamed).
+ * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent().
+ * This is ASIC function because its implementation is not
+ * trivial when the driver is loaded in simulation mode
+ * (not upstreamed).
+ * @get_int_queue_base: get the internal queue base address.
+ * @test_queues: run simple test on all queues for sanity check.
+ * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
+ * size of allocation is HL_DMA_POOL_BLK_SIZE.
+ * @dma_pool_free: free small DMA allocation from pool.
+ * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
+ * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
+ * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
+ * @cs_parser: parse Command Submission.
+ * @asic_dma_map_sg: DMA map scatter-gather list.
+ * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
+ * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
+ * @update_eq_ci: update event queue CI.
+ * @context_switch: called upon ASID context switch.
+ * @restore_phase_topology: clear all SOBs amd MONs.
+ * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
+ * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
+ * @add_device_attr: add ASIC specific device attributes.
+ * @handle_eqe: handle event queue entry (IRQ) from ArmCP.
+ * @set_pll_profile: change PLL profile (manual/automatic).
+ * @get_events_stat: retrieve event queue entries histogram.
+ * @read_pte: read MMU page table entry from DRAM.
+ * @write_pte: write MMU page table entry to DRAM.
+ * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
+ * hard (L0 & L1) flush.
+ * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
+ * ASID-VA-size mask.
+ * @send_heartbeat: send is-alive packet to ArmCP and verify response.
+ * @enable_clock_gating: enable clock gating for reducing power consumption.
+ * @disable_clock_gating: disable clock for accessing registers on HBW.
+ * @is_device_idle: return true if device is idle, false otherwise.
+ * @soft_reset_late_init: perform certain actions needed after soft reset.
+ * @hw_queues_lock: acquire H/W queues lock.
+ * @hw_queues_unlock: release H/W queues lock.
+ * @get_pci_id: retrieve PCI ID.
+ * @get_eeprom_data: retrieve EEPROM data from F/W.
+ * @send_cpu_message: send buffer to ArmCP.
+ * @get_hw_state: retrieve the H/W state
+ */
+struct hl_asic_funcs {
+ int (*early_init)(struct hl_device *hdev);
+ int (*early_fini)(struct hl_device *hdev);
+ int (*late_init)(struct hl_device *hdev);
+ void (*late_fini)(struct hl_device *hdev);
+ int (*sw_init)(struct hl_device *hdev);
+ int (*sw_fini)(struct hl_device *hdev);
+ int (*hw_init)(struct hl_device *hdev);
+ void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
+ void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
+ int (*suspend)(struct hl_device *hdev);
+ int (*resume)(struct hl_device *hdev);
+ int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ u64 kaddress, phys_addr_t paddress, u32 size);
+ void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
+ void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
+ void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*dma_free_coherent)(struct hl_device *hdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle);
+ void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
+ dma_addr_t *dma_handle, u16 *queue_len);
+ int (*test_queues)(struct hl_device *hdev);
+ void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma_handle);
+ void (*dma_pool_free)(struct hl_device *hdev, void *vaddr,
+ dma_addr_t dma_addr);
+ void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
+ size_t size, dma_addr_t *dma_handle);
+ void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
+ size_t size, void *vaddr);
+ void (*hl_dma_unmap_sg)(struct hl_device *hdev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
+ int (*asic_dma_map_sg)(struct hl_device *hdev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
+ struct sg_table *sgt);
+ void (*add_end_of_cb_packets)(u64 kernel_address, u32 len, u64 cq_addr,
+ u32 cq_val, u32 msix_num);
+ void (*update_eq_ci)(struct hl_device *hdev, u32 val);
+ int (*context_switch)(struct hl_device *hdev, u32 asid);
+ void (*restore_phase_topology)(struct hl_device *hdev);
+ int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
+ int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
+ void (*add_device_attr)(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
+ void (*handle_eqe)(struct hl_device *hdev,
+ struct hl_eq_entry *eq_entry);
+ void (*set_pll_profile)(struct hl_device *hdev,
+ enum hl_pll_frequency freq);
+ void* (*get_events_stat)(struct hl_device *hdev, u32 *size);
+ u64 (*read_pte)(struct hl_device *hdev, u64 addr);
+ void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
+ void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
+ void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
+ u32 asid, u64 va, u64 size);
+ int (*send_heartbeat)(struct hl_device *hdev);
+ void (*enable_clock_gating)(struct hl_device *hdev);
+ void (*disable_clock_gating)(struct hl_device *hdev);
+ bool (*is_device_idle)(struct hl_device *hdev);
+ int (*soft_reset_late_init)(struct hl_device *hdev);
+ void (*hw_queues_lock)(struct hl_device *hdev);
+ void (*hw_queues_unlock)(struct hl_device *hdev);
+ u32 (*get_pci_id)(struct hl_device *hdev);
+ int (*get_eeprom_data)(struct hl_device *hdev, void *data,
+ size_t max_size);
+ int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
+ u16 len, u32 timeout, long *result);
+ enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
+};
+
+
+/*
+ * CONTEXTS
+ */
+
+#define HL_KERNEL_ASID_ID 0
+
+/**
+ * struct hl_va_range - virtual addresses range.
+ * @lock: protects the virtual addresses list.
+ * @list: list of virtual addresses blocks available for mappings.
+ * @start_addr: range start address.
+ * @end_addr: range end address.
+ */
+struct hl_va_range {
+ struct mutex lock;
+ struct list_head list;
+ u64 start_addr;
+ u64 end_addr;
+};
+
+/**
+ * struct hl_ctx - user/kernel context.
+ * @mem_hash: holds mapping from virtual address to virtual memory area
+ * descriptor (hl_vm_phys_pg_list or hl_userptr).
+ * @mmu_hash: holds a mapping from virtual address to pgt_info structure.
+ * @hpriv: pointer to the private (KMD) data of the process (fd).
+ * @hdev: pointer to the device structure.
+ * @refcount: reference counter for the context. Context is released only when
+ * this hits 0l. It is incremented on CS and CS_WAIT.
+ * @cs_pending: array of DMA fence objects representing pending CS.
+ * @host_va_range: holds available virtual addresses for host mappings.
+ * @dram_va_range: holds available virtual addresses for DRAM mappings.
+ * @mem_hash_lock: protects the mem_hash.
+ * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
+ * MMU hash or walking the PGT requires talking this lock
+ * @debugfs_list: node in debugfs list of contexts.
+ * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
+ * to user so user could inquire about CS. It is used as
+ * index to cs_pending array.
+ * @dram_default_hops: array that holds all hops addresses needed for default
+ * DRAM mapping.
+ * @cs_lock: spinlock to protect cs_sequence.
+ * @dram_phys_mem: amount of used physical DRAM memory by this context.
+ * @thread_restore_token: token to prevent multiple threads of the same context
+ * from running the restore phase. Only one thread
+ * should run it.
+ * @thread_restore_wait_token: token to prevent the threads that didn't run
+ * the restore phase from moving to their execution
+ * phase before the restore phase has finished.
+ * @asid: context's unique address space ID in the device's MMU.
+ */
+struct hl_ctx {
+ DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS);
+ struct hl_fpriv *hpriv;
+ struct hl_device *hdev;
+ struct kref refcount;
+ struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
+ struct hl_va_range host_va_range;
+ struct hl_va_range dram_va_range;
+ struct mutex mem_hash_lock;
+ struct mutex mmu_lock;
+ struct list_head debugfs_list;
+ u64 cs_sequence;
+ u64 *dram_default_hops;
+ spinlock_t cs_lock;
+ atomic64_t dram_phys_mem;
+ atomic_t thread_restore_token;
+ u32 thread_restore_wait_token;
+ u32 asid;
+};
+
+/**
+ * struct hl_ctx_mgr - for handling multiple contexts.
+ * @ctx_lock: protects ctx_handles.
+ * @ctx_handles: idr to hold all ctx handles.
+ */
+struct hl_ctx_mgr {
+ struct mutex ctx_lock;
+ struct idr ctx_handles;
+};
+
+
+
+/*
+ * COMMAND SUBMISSIONS
+ */
+
+/**
+ * struct hl_userptr - memory mapping chunk information
+ * @vm_type: type of the VM.
+ * @job_node: linked-list node for hanging the object on the Job's list.
+ * @vec: pointer to the frame vector.
+ * @sgt: pointer to the scatter-gather table that holds the pages.
+ * @dir: for DMA unmapping, the direction must be supplied, so save it.
+ * @debugfs_list: node in debugfs list of command submissions.
+ * @addr: user-space virtual pointer to the start of the memory area.
+ * @size: size of the memory area to pin & map.
+ * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
+ */
+struct hl_userptr {
+ enum vm_type_t vm_type; /* must be first */
+ struct list_head job_node;
+ struct frame_vector *vec;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ struct list_head debugfs_list;
+ u64 addr;
+ u32 size;
+ u8 dma_mapped;
+};
+
+/**
+ * struct hl_cs - command submission.
+ * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
+ * @ctx: the context this CS belongs to.
+ * @job_list: list of the CS's jobs in the various queues.
+ * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
+ * @refcount: reference counter for usage of the CS.
+ * @fence: pointer to the fence object of this CS.
+ * @work_tdr: delayed work node for TDR.
+ * @mirror_node : node in device mirror list of command submissions.
+ * @debugfs_list: node in debugfs list of command submissions.
+ * @sequence: the sequence number of this CS.
+ * @submitted: true if CS was submitted to H/W.
+ * @completed: true if CS was completed by device.
+ * @timedout : true if CS was timedout.
+ * @tdr_active: true if TDR was activated for this CS (to prevent
+ * double TDR activation).
+ * @aborted: true if CS was aborted due to some device error.
+ */
+struct hl_cs {
+ u8 jobs_in_queue_cnt[HL_MAX_QUEUES];
+ struct hl_ctx *ctx;
+ struct list_head job_list;
+ spinlock_t job_lock;
+ struct kref refcount;
+ struct dma_fence *fence;
+ struct delayed_work work_tdr;
+ struct list_head mirror_node;
+ struct list_head debugfs_list;
+ u64 sequence;
+ u8 submitted;
+ u8 completed;
+ u8 timedout;
+ u8 tdr_active;
+ u8 aborted;
+};
+
+/**
+ * struct hl_cs_job - command submission job.
+ * @cs_node: the node to hang on the CS jobs list.
+ * @cs: the CS this job belongs to.
+ * @user_cb: the CB we got from the user.
+ * @patched_cb: in case of patching, this is internal CB which is submitted on
+ * the queue instead of the CB we got from the IOCTL.
+ * @finish_work: workqueue object to run when job is completed.
+ * @userptr_list: linked-list of userptr mappings that belong to this job and
+ * wait for completion.
+ * @debugfs_list: node in debugfs list of command submission jobs.
+ * @id: the id of this job inside a CS.
+ * @hw_queue_id: the id of the H/W queue this job is submitted to.
+ * @user_cb_size: the actual size of the CB we got from the user.
+ * @job_cb_size: the actual size of the CB that we put on the queue.
+ * @ext_queue: whether the job is for external queue or internal queue.
+ */
+struct hl_cs_job {
+ struct list_head cs_node;
+ struct hl_cs *cs;
+ struct hl_cb *user_cb;
+ struct hl_cb *patched_cb;
+ struct work_struct finish_work;
+ struct list_head userptr_list;
+ struct list_head debugfs_list;
+ u32 id;
+ u32 hw_queue_id;
+ u32 user_cb_size;
+ u32 job_cb_size;
+ u8 ext_queue;
+};
+
+/**
+ * struct hl_cs_parser - command submission paerser properties.
+ * @user_cb: the CB we got from the user.
+ * @patched_cb: in case of patching, this is internal CB which is submitted on
+ * the queue instead of the CB we got from the IOCTL.
+ * @job_userptr_list: linked-list of userptr mappings that belong to the related
+ * job and wait for completion.
+ * @cs_sequence: the sequence number of the related CS.
+ * @ctx_id: the ID of the context the related CS belongs to.
+ * @hw_queue_id: the id of the H/W queue this job is submitted to.
+ * @user_cb_size: the actual size of the CB we got from the user.
+ * @patched_cb_size: the size of the CB after parsing.
+ * @ext_queue: whether the job is for external queue or internal queue.
+ * @job_id: the id of the related job inside the related CS.
+ * @use_virt_addr: whether to treat the addresses in the CB as virtual during
+ * parsing.
+ */
+struct hl_cs_parser {
+ struct hl_cb *user_cb;
+ struct hl_cb *patched_cb;
+ struct list_head *job_userptr_list;
+ u64 cs_sequence;
+ u32 ctx_id;
+ u32 hw_queue_id;
+ u32 user_cb_size;
+ u32 patched_cb_size;
+ u8 ext_queue;
+ u8 job_id;
+ u8 use_virt_addr;
+};
+
+
+/*
+ * MEMORY STRUCTURE
+ */
+
+/**
+ * struct hl_vm_hash_node - hash element from virtual address to virtual
+ * memory area descriptor (hl_vm_phys_pg_list or
+ * hl_userptr).
+ * @node: node to hang on the hash table in context object.
+ * @vaddr: key virtual address.
+ * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
+ */
+struct hl_vm_hash_node {
+ struct hlist_node node;
+ u64 vaddr;
+ void *ptr;
+};
+
+/**
+ * struct hl_vm_phys_pg_pack - physical page pack.
+ * @vm_type: describes the type of the virtual area descriptor.
+ * @pages: the physical page array.
+ * @mapping_cnt: number of shared mappings.
+ * @asid: the context related to this list.
+ * @npages: num physical pages in the pack.
+ * @page_size: size of each page in the pack.
+ * @total_size: total size of all the pages in this list.
+ * @flags: HL_MEM_* flags related to this list.
+ * @handle: the provided handle related to this list.
+ * @offset: offset from the first page.
+ * @contiguous: is contiguous physical memory.
+ * @created_from_userptr: is product of host virtual address.
+ */
+struct hl_vm_phys_pg_pack {
+ enum vm_type_t vm_type; /* must be first */
+ u64 *pages;
+ atomic_t mapping_cnt;
+ u32 asid;
+ u32 npages;
+ u32 page_size;
+ u32 total_size;
+ u32 flags;
+ u32 handle;
+ u32 offset;
+ u8 contiguous;
+ u8 created_from_userptr;
+};
+
+/**
+ * struct hl_vm_va_block - virtual range block information.
+ * @node: node to hang on the virtual range list in context object.
+ * @start: virtual range start address.
+ * @end: virtual range end address.
+ * @size: virtual range size.
+ */
+struct hl_vm_va_block {
+ struct list_head node;
+ u64 start;
+ u64 end;
+ u64 size;
+};
+
+/**
+ * struct hl_vm - virtual memory manager for MMU.
+ * @dram_pg_pool: pool for DRAM physical pages of 2MB.
+ * @dram_pg_pool_refcount: reference counter for the pool usage.
+ * @idr_lock: protects the phys_pg_list_handles.
+ * @phys_pg_pack_handles: idr to hold all device allocations handles.
+ * @init_done: whether initialization was done. We need this because VM
+ * initialization might be skipped during device initialization.
+ */
+struct hl_vm {
+ struct gen_pool *dram_pg_pool;
+ struct kref dram_pg_pool_refcount;
+ spinlock_t idr_lock;
+ struct idr phys_pg_pack_handles;
+ u8 init_done;
+};
+
+/*
+ * FILE PRIVATE STRUCTURE
+ */
+
+/**
+ * struct hl_fpriv - process information stored in FD private data.
+ * @hdev: habanalabs device structure.
+ * @filp: pointer to the given file structure.
+ * @taskpid: current process ID.
+ * @ctx: current executing context.
+ * @ctx_mgr: context manager to handle multiple context for this FD.
+ * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
+ * @debugfs_list: list of relevant ASIC debugfs.
+ * @refcount: number of related contexts.
+ * @restore_phase_mutex: lock for context switch and restore phase.
+ */
+struct hl_fpriv {
+ struct hl_device *hdev;
+ struct file *filp;
+ struct pid *taskpid;
+ struct hl_ctx *ctx; /* TODO: remove for multiple ctx */
+ struct hl_ctx_mgr ctx_mgr;
+ struct hl_cb_mgr cb_mgr;
+ struct list_head debugfs_list;
+ struct kref refcount;
+ struct mutex restore_phase_mutex;
+};
+
+
+/*
+ * DebugFS
+ */
+
+/**
+ * struct hl_info_list - debugfs file ops.
+ * @name: file name.
+ * @show: function to output information.
+ * @write: function to write to the file.
+ */
+struct hl_info_list {
+ const char *name;
+ int (*show)(struct seq_file *s, void *data);
+ ssize_t (*write)(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos);
+};
+
+/**
+ * struct hl_debugfs_entry - debugfs dentry wrapper.
+ * @dent: base debugfs entry structure.
+ * @info_ent: dentry realted ops.
+ * @dev_entry: ASIC specific debugfs manager.
+ */
+struct hl_debugfs_entry {
+ struct dentry *dent;
+ const struct hl_info_list *info_ent;
+ struct hl_dbg_device_entry *dev_entry;
+};
+
+/**
+ * struct hl_dbg_device_entry - ASIC specific debugfs manager.
+ * @root: root dentry.
+ * @hdev: habanalabs device structure.
+ * @entry_arr: array of available hl_debugfs_entry.
+ * @file_list: list of available debugfs files.
+ * @file_mutex: protects file_list.
+ * @cb_list: list of available CBs.
+ * @cb_spinlock: protects cb_list.
+ * @cs_list: list of available CSs.
+ * @cs_spinlock: protects cs_list.
+ * @cs_job_list: list of available CB jobs.
+ * @cs_job_spinlock: protects cs_job_list.
+ * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
+ * @userptr_spinlock: protects userptr_list.
+ * @ctx_mem_hash_list: list of available contexts with MMU mappings.
+ * @ctx_mem_hash_spinlock: protects cb_list.
+ * @addr: next address to read/write from/to in read/write32.
+ * @mmu_addr: next virtual address to translate to physical address in mmu_show.
+ * @mmu_asid: ASID to use while translating in mmu_show.
+ * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
+ * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
+ * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
+ */
+struct hl_dbg_device_entry {
+ struct dentry *root;
+ struct hl_device *hdev;
+ struct hl_debugfs_entry *entry_arr;
+ struct list_head file_list;
+ struct mutex file_mutex;
+ struct list_head cb_list;
+ spinlock_t cb_spinlock;
+ struct list_head cs_list;
+ spinlock_t cs_spinlock;
+ struct list_head cs_job_list;
+ spinlock_t cs_job_spinlock;
+ struct list_head userptr_list;
+ spinlock_t userptr_spinlock;
+ struct list_head ctx_mem_hash_list;
+ spinlock_t ctx_mem_hash_spinlock;
+ u64 addr;
+ u64 mmu_addr;
+ u32 mmu_asid;
+ u8 i2c_bus;
+ u8 i2c_addr;
+ u8 i2c_reg;
+};
+
+
+/*
+ * DEVICES
+ */
+
+/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
+ * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards
+ */
+#define HL_MAX_MINORS 256
+
+/*
+ * Registers read & write functions.
+ */
+
+u32 hl_rreg(struct hl_device *hdev, u32 reg);
+void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
+
+#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
+ readl_poll_timeout(hdev->rmmio + addr, val, cond, sleep_us, timeout_us)
+
+#define RREG32(reg) hl_rreg(hdev, (reg))
+#define WREG32(reg, v) hl_wreg(hdev, (reg), (v))
+#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
+ hl_rreg(hdev, (reg)))
+
+#define WREG32_P(reg, val, mask) \
+ do { \
+ u32 tmp_ = RREG32(reg); \
+ tmp_ &= (mask); \
+ tmp_ |= ((val) & ~(mask)); \
+ WREG32(reg, tmp_); \
+ } while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+
+#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
+#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
+#define WREG32_FIELD(reg, field, val) \
+ WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
+ (val) << REG_FIELD_SHIFT(reg, field))
+
+struct hwmon_chip_info;
+
+/**
+ * struct hl_device_reset_work - reset workqueue task wrapper.
+ * @reset_work: reset work to be done.
+ * @hdev: habanalabs device structure.
+ */
+struct hl_device_reset_work {
+ struct work_struct reset_work;
+ struct hl_device *hdev;
+};
+
+/**
+ * struct hl_device - habanalabs device structure.
+ * @pdev: pointer to PCI device, can be NULL in case of simulator device.
+ * @pcie_bar: array of available PCIe bars.
+ * @rmmio: configuration area address on SRAM.
+ * @cdev: related char device.
+ * @dev: realted kernel basic device structure.
+ * @work_freq: delayed work to lower device frequency if possible.
+ * @work_heartbeat: delayed work for ArmCP is-alive check.
+ * @asic_name: ASIC specific nmae.
+ * @asic_type: ASIC specific type.
+ * @completion_queue: array of hl_cq.
+ * @cq_wq: work queue of completion queues for executing work in process context
+ * @eq_wq: work queue of event queue for executing work in process context.
+ * @kernel_ctx: KMD context structure.
+ * @kernel_queues: array of hl_hw_queue.
+ * @hw_queues_mirror_list: CS mirror list for TDR.
+ * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
+ * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
+ * @event_queue: event queue for IRQ from ArmCP.
+ * @dma_pool: DMA pool for small allocations.
+ * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address.
+ * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address.
+ * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
+ * @asid_bitmap: holds used/available ASIDs.
+ * @asid_mutex: protects asid_bitmap.
+ * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
+ * fd_open_cnt is atomic, we need this lock to serialize
+ * the open function because the driver currently supports
+ * only a single process at a time. In addition, we need a
+ * lock here so we can flush user processes which are opening
+ * the device while we are trying to hard reset it
+ * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
+ * @asic_prop: ASIC specific immutable properties.
+ * @asic_funcs: ASIC specific functions.
+ * @asic_specific: ASIC specific information to use only from ASIC files.
+ * @mmu_pgt_pool: pool of available MMU hops.
+ * @vm: virtual memory manager for MMU.
+ * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context
+ * @hwmon_dev: H/W monitor device.
+ * @pm_mng_profile: current power management profile.
+ * @hl_chip_info: ASIC's sensors information.
+ * @hl_debugfs: device's debugfs manager.
+ * @cb_pool: list of preallocated CBs.
+ * @cb_pool_lock: protects the CB pool.
+ * @user_ctx: current user context executing.
+ * @dram_used_mem: current DRAM memory consumption.
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @timeout_jiffies: device CS timeout value.
+ * @max_power: the max power of the device, as configured by the sysadmin. This
+ * value is saved so in case of hard-reset, KMD will restore this
+ * value and update the F/W after the re-initialization
+ * @major: habanalabs KMD major.
+ * @high_pll: high PLL profile frequency.
+ * @soft_reset_cnt: number of soft reset since KMD loading.
+ * @hard_reset_cnt: number of hard reset since KMD loading.
+ * @id: device minor.
+ * @disabled: is device disabled.
+ * @late_init_done: is late init stage was done during initialization.
+ * @hwmon_initialized: is H/W monitor sensors was initialized.
+ * @hard_reset_pending: is there a hard reset work pending.
+ * @heartbeat: is heartbeat sanity check towards ArmCP enabled.
+ * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
+ * otherwise.
+ * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
+ * @dram_default_page_mapping: is DRAM default page mapping enabled.
+ * @init_done: is the initialization of the device done.
+ * @mmu_enable: is MMU enabled.
+ * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
+ */
+struct hl_device {
+ struct pci_dev *pdev;
+ void __iomem *pcie_bar[6];
+ void __iomem *rmmio;
+ struct cdev cdev;
+ struct device *dev;
+ struct delayed_work work_freq;
+ struct delayed_work work_heartbeat;
+ char asic_name[16];
+ enum hl_asic_type asic_type;
+ struct hl_cq *completion_queue;
+ struct workqueue_struct *cq_wq;
+ struct workqueue_struct *eq_wq;
+ struct hl_ctx *kernel_ctx;
+ struct hl_hw_queue *kernel_queues;
+ struct list_head hw_queues_mirror_list;
+ spinlock_t hw_queues_mirror_lock;
+ struct hl_cb_mgr kernel_cb_mgr;
+ struct hl_eq event_queue;
+ struct dma_pool *dma_pool;
+ void *cpu_accessible_dma_mem;
+ dma_addr_t cpu_accessible_dma_address;
+ struct gen_pool *cpu_accessible_dma_pool;
+ unsigned long *asid_bitmap;
+ struct mutex asid_mutex;
+ /* TODO: remove fd_open_cnt_lock for multiple process support */
+ struct mutex fd_open_cnt_lock;
+ struct mutex send_cpu_message_lock;
+ struct asic_fixed_properties asic_prop;
+ const struct hl_asic_funcs *asic_funcs;
+ void *asic_specific;
+ struct gen_pool *mmu_pgt_pool;
+ struct hl_vm vm;
+ struct mutex mmu_cache_lock;
+ struct device *hwmon_dev;
+ enum hl_pm_mng_profile pm_mng_profile;
+ struct hwmon_chip_info *hl_chip_info;
+
+ struct hl_dbg_device_entry hl_debugfs;
+
+ struct list_head cb_pool;
+ spinlock_t cb_pool_lock;
+
+ /* TODO: remove user_ctx for multiple process support */
+ struct hl_ctx *user_ctx;
+
+ atomic64_t dram_used_mem;
+ atomic_t in_reset;
+ atomic_t curr_pll_profile;
+ atomic_t fd_open_cnt;
+ u64 timeout_jiffies;
+ u64 max_power;
+ u32 major;
+ u32 high_pll;
+ u32 soft_reset_cnt;
+ u32 hard_reset_cnt;
+ u16 id;
+ u8 disabled;
+ u8 late_init_done;
+ u8 hwmon_initialized;
+ u8 hard_reset_pending;
+ u8 heartbeat;
+ u8 reset_on_lockup;
+ u8 dram_supports_virtual_memory;
+ u8 dram_default_page_mapping;
+ u8 init_done;
+ u8 device_cpu_disabled;
+
+ /* Parameters for bring-up */
+ u8 mmu_enable;
+ u8 cpu_enable;
+ u8 reset_pcilink;
+ u8 cpu_queues_enable;
+ u8 fw_loading;
+ u8 pldm;
+};
+
+
+/*
+ * IOCTLs
+ */
+
+/**
+ * typedef hl_ioctl_t - typedef for ioctl function in the driver
+ * @hpriv: pointer to the FD's private data, which contains state of
+ * user process
+ * @data: pointer to the input/output arguments structure of the IOCTL
+ *
+ * Return: 0 for success, negative value for error
+ */
+typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
+
+/**
+ * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
+ * @cmd: the IOCTL code as created by the kernel macros.
+ * @func: pointer to the driver's function that should be called for this IOCTL.
+ */
+struct hl_ioctl_desc {
+ unsigned int cmd;
+ hl_ioctl_t *func;
+};
+
+
+/*
+ * Kernel module functions that can be accessed by entire module
+ */
+
+/**
+ * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
+ * @address: The start address of the area we want to validate.
+ * @size: The size in bytes of the area we want to validate.
+ * @range_start_address: The start address of the valid range.
+ * @range_end_address: The end address of the valid range.
+ *
+ * Return: true if the area is inside the valid range, false otherwise.
+ */
+static inline bool hl_mem_area_inside_range(u64 address, u32 size,
+ u64 range_start_address, u64 range_end_address)
+{
+ u64 end_address = address + size;
+
+ if ((address >= range_start_address) &&
+ (end_address <= range_end_address) &&
+ (end_address > address))
+ return true;
+
+ return false;
+}
+
+/**
+ * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
+ * @address: The start address of the area we want to validate.
+ * @size: The size in bytes of the area we want to validate.
+ * @range_start_address: The start address of the valid range.
+ * @range_end_address: The end address of the valid range.
+ *
+ * Return: true if the area overlaps part or all of the valid range,
+ * false otherwise.
+ */
+static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
+ u64 range_start_address, u64 range_end_address)
+{
+ u64 end_address = address + size;
+
+ if ((address >= range_start_address) &&
+ (address < range_end_address))
+ return true;
+
+ if ((end_address >= range_start_address) &&
+ (end_address < range_end_address))
+ return true;
+
+ if ((address < range_start_address) &&
+ (end_address >= range_end_address))
+ return true;
+
+ return false;
+}
+
+int hl_device_open(struct inode *inode, struct file *filp);
+bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
+int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
+ enum hl_asic_type asic_type, int minor);
+void destroy_hdev(struct hl_device *hdev);
+int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us,
+ u32 *val);
+int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
+ u32 timeout_us, u32 *val);
+int hl_hw_queues_create(struct hl_device *hdev);
+void hl_hw_queues_destroy(struct hl_device *hdev);
+int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
+ u32 cb_size, u64 cb_ptr);
+int hl_hw_queue_schedule_cs(struct hl_cs *cs);
+u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
+void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
+void hl_int_hw_queue_update_ci(struct hl_cs *cs);
+void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
+
+#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
+#define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1))
+
+int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
+void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
+int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
+void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
+void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
+void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
+irqreturn_t hl_irq_handler_cq(int irq, void *arg);
+irqreturn_t hl_irq_handler_eq(int irq, void *arg);
+u32 hl_cq_inc_ptr(u32 ptr);
+
+int hl_asid_init(struct hl_device *hdev);
+void hl_asid_fini(struct hl_device *hdev);
+unsigned long hl_asid_alloc(struct hl_device *hdev);
+void hl_asid_free(struct hl_device *hdev, unsigned long asid);
+
+int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
+void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
+int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
+void hl_ctx_do_release(struct kref *ref);
+void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
+int hl_ctx_put(struct hl_ctx *ctx);
+struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
+void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
+void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
+
+int hl_device_init(struct hl_device *hdev, struct class *hclass);
+void hl_device_fini(struct hl_device *hdev);
+int hl_device_suspend(struct hl_device *hdev);
+int hl_device_resume(struct hl_device *hdev);
+int hl_device_reset(struct hl_device *hdev, bool hard_reset,
+ bool from_hard_reset_thread);
+void hl_hpriv_get(struct hl_fpriv *hpriv);
+void hl_hpriv_put(struct hl_fpriv *hpriv);
+int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
+
+int hl_build_hwmon_channel_info(struct hl_device *hdev,
+ struct armcp_sensor *sensors_arr);
+
+int hl_sysfs_init(struct hl_device *hdev);
+void hl_sysfs_fini(struct hl_device *hdev);
+
+int hl_hwmon_init(struct hl_device *hdev);
+void hl_hwmon_fini(struct hl_device *hdev);
+
+int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
+ u64 *handle, int ctx_id);
+int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
+int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
+struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+ u32 handle);
+void hl_cb_put(struct hl_cb *cb);
+void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
+void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
+int hl_cb_pool_init(struct hl_device *hdev);
+int hl_cb_pool_fini(struct hl_device *hdev);
+
+void hl_cs_rollback_all(struct hl_device *hdev);
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
+
+void goya_set_asic_funcs(struct hl_device *hdev);
+
+int hl_vm_ctx_init(struct hl_ctx *ctx);
+void hl_vm_ctx_fini(struct hl_ctx *ctx);
+
+int hl_vm_init(struct hl_device *hdev);
+void hl_vm_fini(struct hl_device *hdev);
+
+int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr *userptr);
+int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_userptr_delete_list(struct hl_device *hdev,
+ struct list_head *userptr_list);
+bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
+ struct list_head *userptr_list,
+ struct hl_userptr **userptr);
+
+int hl_mmu_init(struct hl_device *hdev);
+void hl_mmu_fini(struct hl_device *hdev);
+int hl_mmu_ctx_init(struct hl_ctx *ctx);
+void hl_mmu_ctx_fini(struct hl_ctx *ctx);
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size);
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
+void hl_mmu_swap_out(struct hl_ctx *ctx);
+void hl_mmu_swap_in(struct hl_ctx *ctx);
+
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
+long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
+long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
+void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
+ long value);
+u64 hl_get_max_power(struct hl_device *hdev);
+void hl_set_max_power(struct hl_device *hdev, u64 value);
+
+#ifdef CONFIG_DEBUG_FS
+
+void hl_debugfs_init(void);
+void hl_debugfs_fini(void);
+void hl_debugfs_add_device(struct hl_device *hdev);
+void hl_debugfs_remove_device(struct hl_device *hdev);
+void hl_debugfs_add_file(struct hl_fpriv *hpriv);
+void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
+void hl_debugfs_add_cb(struct hl_cb *cb);
+void hl_debugfs_remove_cb(struct hl_cb *cb);
+void hl_debugfs_add_cs(struct hl_cs *cs);
+void hl_debugfs_remove_cs(struct hl_cs *cs);
+void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
+void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
+void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_debugfs_remove_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr);
+void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
+void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
+
+#else
+
+static inline void __init hl_debugfs_init(void)
+{
+}
+
+static inline void hl_debugfs_fini(void)
+{
+}
+
+static inline void hl_debugfs_add_device(struct hl_device *hdev)
+{
+}
+
+static inline void hl_debugfs_remove_device(struct hl_device *hdev)
+{
+}
+
+static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
+{
+}
+
+static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
+{
+}
+
+static inline void hl_debugfs_add_cb(struct hl_cb *cb)
+{
+}
+
+static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
+{
+}
+
+static inline void hl_debugfs_add_cs(struct hl_cs *cs)
+{
+}
+
+static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
+{
+}
+
+static inline void hl_debugfs_add_job(struct hl_device *hdev,
+ struct hl_cs_job *job)
+{
+}
+
+static inline void hl_debugfs_remove_job(struct hl_device *hdev,
+ struct hl_cs_job *job)
+{
+}
+
+static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr)
+{
+}
+
+static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
+ struct hl_userptr *userptr)
+{
+}
+
+static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
+ struct hl_ctx *ctx)
+{
+}
+
+static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
+ struct hl_ctx *ctx)
+{
+}
+
+#endif
+
+/* IOCTLs */
+long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
+int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
+
+#endif /* HABANALABSP_H_ */
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
new file mode 100644
index 000000000000..748601463f11
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+#include <linux/module.h>
+
+#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team"
+
+#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators"
+
+MODULE_AUTHOR(HL_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(HL_DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+
+static int hl_major;
+static struct class *hl_class;
+static DEFINE_IDR(hl_devs_idr);
+static DEFINE_MUTEX(hl_devs_idr_lock);
+
+static int timeout_locked = 5;
+static int reset_on_lockup = 1;
+
+module_param(timeout_locked, int, 0444);
+MODULE_PARM_DESC(timeout_locked,
+ "Device lockup timeout in seconds (0 = disabled, default 5s)");
+
+module_param(reset_on_lockup, int, 0444);
+MODULE_PARM_DESC(reset_on_lockup,
+ "Do device reset on lockup (0 = no, 1 = yes, default yes)");
+
+#define PCI_VENDOR_ID_HABANALABS 0x1da3
+
+#define PCI_IDS_GOYA 0x0001
+
+static const struct pci_device_id ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+/*
+ * get_asic_type - translate device id to asic type
+ *
+ * @device: id of the PCI device
+ *
+ * Translate device id to asic type.
+ * In case of unidentified device, return -1
+ */
+static enum hl_asic_type get_asic_type(u16 device)
+{
+ enum hl_asic_type asic_type;
+
+ switch (device) {
+ case PCI_IDS_GOYA:
+ asic_type = ASIC_GOYA;
+ break;
+ default:
+ asic_type = ASIC_INVALID;
+ break;
+ }
+
+ return asic_type;
+}
+
+/*
+ * hl_device_open - open function for habanalabs device
+ *
+ * @inode: pointer to inode structure
+ * @filp: pointer to file structure
+ *
+ * Called when process opens an habanalabs device.
+ */
+int hl_device_open(struct inode *inode, struct file *filp)
+{
+ struct hl_device *hdev;
+ struct hl_fpriv *hpriv;
+ int rc;
+
+ mutex_lock(&hl_devs_idr_lock);
+ hdev = idr_find(&hl_devs_idr, iminor(inode));
+ mutex_unlock(&hl_devs_idr_lock);
+
+ if (!hdev) {
+ pr_err("Couldn't find device %d:%d\n",
+ imajor(inode), iminor(inode));
+ return -ENXIO;
+ }
+
+ mutex_lock(&hdev->fd_open_cnt_lock);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't open %s because it is disabled or in reset\n",
+ dev_name(hdev->dev));
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+ return -EPERM;
+ }
+
+ if (atomic_read(&hdev->fd_open_cnt)) {
+ dev_info_ratelimited(hdev->dev,
+ "Device %s is already attached to application\n",
+ dev_name(hdev->dev));
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+ return -EBUSY;
+ }
+
+ atomic_inc(&hdev->fd_open_cnt);
+
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+
+ hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv) {
+ rc = -ENOMEM;
+ goto close_device;
+ }
+
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ mutex_init(&hpriv->restore_phase_mutex);
+ kref_init(&hpriv->refcount);
+ nonseekable_open(inode, filp);
+
+ hl_cb_mgr_init(&hpriv->cb_mgr);
+ hl_ctx_mgr_init(&hpriv->ctx_mgr);
+
+ rc = hl_ctx_create(hdev, hpriv);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to open FD (CTX fail)\n");
+ goto out_err;
+ }
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
+ /*
+ * Device is IDLE at this point so it is legal to change PLLs. There
+ * is no need to check anything because if the PLL is already HIGH, the
+ * set function will return without doing anything
+ */
+ hl_device_set_frequency(hdev, PLL_HIGH);
+
+ hl_debugfs_add_file(hpriv);
+
+ return 0;
+
+out_err:
+ filp->private_data = NULL;
+ hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
+ mutex_destroy(&hpriv->restore_phase_mutex);
+ kfree(hpriv);
+
+close_device:
+ atomic_dec(&hdev->fd_open_cnt);
+ return rc;
+}
+
+/*
+ * create_hdev - create habanalabs device instance
+ *
+ * @dev: will hold the pointer to the new habanalabs device structure
+ * @pdev: pointer to the pci device
+ * @asic_type: in case of simulator device, which device is it
+ * @minor: in case of simulator device, the minor of the device
+ *
+ * Allocate memory for habanalabs device and initialize basic fields
+ * Identify the ASIC type
+ * Allocate ID (minor) for the device (only for real devices)
+ */
+int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
+ enum hl_asic_type asic_type, int minor)
+{
+ struct hl_device *hdev;
+ int rc;
+
+ *dev = NULL;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->major = hl_major;
+ hdev->reset_on_lockup = reset_on_lockup;
+
+ /* Parameters for bring-up - set them to defaults */
+ hdev->mmu_enable = 1;
+ hdev->cpu_enable = 1;
+ hdev->reset_pcilink = 0;
+ hdev->cpu_queues_enable = 1;
+ hdev->fw_loading = 1;
+ hdev->pldm = 0;
+ hdev->heartbeat = 1;
+
+ /* If CPU is disabled, no point in loading FW */
+ if (!hdev->cpu_enable)
+ hdev->fw_loading = 0;
+
+ /* If we don't load FW, no need to initialize CPU queues */
+ if (!hdev->fw_loading)
+ hdev->cpu_queues_enable = 0;
+
+ /* If CPU queues not enabled, no way to do heartbeat */
+ if (!hdev->cpu_queues_enable)
+ hdev->heartbeat = 0;
+
+ if (timeout_locked)
+ hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+ hdev->disabled = true;
+ hdev->pdev = pdev; /* can be NULL in case of simulator device */
+
+ if (asic_type == ASIC_AUTO_DETECT) {
+ hdev->asic_type = get_asic_type(pdev->device);
+ if (hdev->asic_type == ASIC_INVALID) {
+ dev_err(&pdev->dev, "Unsupported ASIC\n");
+ rc = -ENODEV;
+ goto free_hdev;
+ }
+ } else {
+ hdev->asic_type = asic_type;
+ }
+
+ mutex_lock(&hl_devs_idr_lock);
+
+ if (minor == -1) {
+ rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
+ GFP_KERNEL);
+ } else {
+ void *old_idr = idr_replace(&hl_devs_idr, hdev, minor);
+
+ if (IS_ERR_VALUE(old_idr)) {
+ rc = PTR_ERR(old_idr);
+ pr_err("Error %d when trying to replace minor %d\n",
+ rc, minor);
+ mutex_unlock(&hl_devs_idr_lock);
+ goto free_hdev;
+ }
+ rc = minor;
+ }
+
+ mutex_unlock(&hl_devs_idr_lock);
+
+ if (rc < 0) {
+ if (rc == -ENOSPC) {
+ pr_err("too many devices in the system\n");
+ rc = -EBUSY;
+ }
+ goto free_hdev;
+ }
+
+ hdev->id = rc;
+
+ *dev = hdev;
+
+ return 0;
+
+free_hdev:
+ kfree(hdev);
+ return rc;
+}
+
+/*
+ * destroy_hdev - destroy habanalabs device instance
+ *
+ * @dev: pointer to the habanalabs device structure
+ *
+ */
+void destroy_hdev(struct hl_device *hdev)
+{
+ /* Remove device from the device list */
+ mutex_lock(&hl_devs_idr_lock);
+ idr_remove(&hl_devs_idr, hdev->id);
+ mutex_unlock(&hl_devs_idr_lock);
+
+ kfree(hdev);
+}
+
+static int hl_pmops_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct hl_device *hdev = pci_get_drvdata(pdev);
+
+ pr_debug("Going to suspend PCI device\n");
+
+ if (!hdev) {
+ pr_err("device pointer is NULL in suspend\n");
+ return 0;
+ }
+
+ return hl_device_suspend(hdev);
+}
+
+static int hl_pmops_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct hl_device *hdev = pci_get_drvdata(pdev);
+
+ pr_debug("Going to resume PCI device\n");
+
+ if (!hdev) {
+ pr_err("device pointer is NULL in resume\n");
+ return 0;
+ }
+
+ return hl_device_resume(hdev);
+}
+
+/*
+ * hl_pci_probe - probe PCI habanalabs devices
+ *
+ * @pdev: pointer to pci device
+ * @id: pointer to pci device id structure
+ *
+ * Standard PCI probe function for habanalabs device.
+ * Create a new habanalabs device and initialize it according to the
+ * device's type
+ */
+static int hl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct hl_device *hdev;
+ int rc;
+
+ dev_info(&pdev->dev, HL_NAME
+ " device found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1);
+ if (rc)
+ return rc;
+
+ pci_set_drvdata(pdev, hdev);
+
+ rc = hl_device_init(hdev, hl_class);
+ if (rc) {
+ dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
+ rc = -ENODEV;
+ goto disable_device;
+ }
+
+ return 0;
+
+disable_device:
+ pci_set_drvdata(pdev, NULL);
+ destroy_hdev(hdev);
+
+ return rc;
+}
+
+/*
+ * hl_pci_remove - remove PCI habanalabs devices
+ *
+ * @pdev: pointer to pci device
+ *
+ * Standard PCI remove function for habanalabs device
+ */
+static void hl_pci_remove(struct pci_dev *pdev)
+{
+ struct hl_device *hdev;
+
+ hdev = pci_get_drvdata(pdev);
+ if (!hdev)
+ return;
+
+ hl_device_fini(hdev);
+ pci_set_drvdata(pdev, NULL);
+
+ destroy_hdev(hdev);
+}
+
+static const struct dev_pm_ops hl_pm_ops = {
+ .suspend = hl_pmops_suspend,
+ .resume = hl_pmops_resume,
+};
+
+static struct pci_driver hl_pci_driver = {
+ .name = HL_NAME,
+ .id_table = ids,
+ .probe = hl_pci_probe,
+ .remove = hl_pci_remove,
+ .driver.pm = &hl_pm_ops,
+};
+
+/*
+ * hl_init - Initialize the habanalabs kernel driver
+ */
+static int __init hl_init(void)
+{
+ int rc;
+ dev_t dev;
+
+ pr_info("loading driver\n");
+
+ rc = alloc_chrdev_region(&dev, 0, HL_MAX_MINORS, HL_NAME);
+ if (rc < 0) {
+ pr_err("unable to get major\n");
+ return rc;
+ }
+
+ hl_major = MAJOR(dev);
+
+ hl_class = class_create(THIS_MODULE, HL_NAME);
+ if (IS_ERR(hl_class)) {
+ pr_err("failed to allocate class\n");
+ rc = PTR_ERR(hl_class);
+ goto remove_major;
+ }
+
+ hl_debugfs_init();
+
+ rc = pci_register_driver(&hl_pci_driver);
+ if (rc) {
+ pr_err("failed to register pci device\n");
+ goto remove_debugfs;
+ }
+
+ pr_debug("driver loaded\n");
+
+ return 0;
+
+remove_debugfs:
+ hl_debugfs_fini();
+ class_destroy(hl_class);
+remove_major:
+ unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
+ return rc;
+}
+
+/*
+ * hl_exit - Release all resources of the habanalabs kernel driver
+ */
+static void __exit hl_exit(void)
+{
+ pci_unregister_driver(&hl_pci_driver);
+
+ /*
+ * Removing debugfs must be after all devices or simulator devices
+ * have been removed because otherwise we get a bug in the
+ * debugfs module for referencing NULL objects
+ */
+ hl_debugfs_fini();
+
+ class_destroy(hl_class);
+ unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS);
+
+ idr_destroy(&hl_devs_idr);
+
+ pr_debug("driver removed\n");
+}
+
+module_init(hl_init);
+module_exit(hl_exit);
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
new file mode 100644
index 000000000000..2c2739a3c5ec
--- /dev/null
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_hw_ip_info hw_ip = {0};
+ u32 size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 sram_kmd_size, dram_kmd_size;
+
+ if ((!size) || (!out))
+ return -EINVAL;
+
+ sram_kmd_size = (prop->sram_user_base_address -
+ prop->sram_base_address);
+ dram_kmd_size = (prop->dram_user_base_address -
+ prop->dram_base_address);
+
+ hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
+ hw_ip.sram_base_address = prop->sram_user_base_address;
+ hw_ip.dram_base_address = prop->dram_user_base_address;
+ hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
+ hw_ip.sram_size = prop->sram_size - sram_kmd_size;
+ hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+ if (hw_ip.dram_size > 0)
+ hw_ip.dram_enabled = 1;
+ hw_ip.num_of_events = prop->num_of_events;
+ memcpy(hw_ip.armcp_version,
+ prop->armcp_info.armcp_version, VERSION_MAX_LEN);
+ hw_ip.armcp_cpld_version = __le32_to_cpu(prop->armcp_info.cpld_version);
+ hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
+ hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
+ hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
+ hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
+
+ return copy_to_user(out, &hw_ip,
+ min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
+}
+
+static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ u32 size, max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ void *arr;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ arr = hdev->asic_funcs->get_events_stat(hdev, &size);
+
+ return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
+}
+
+static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_dram_usage dram_usage = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 dram_kmd_size;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ dram_kmd_size = (prop->dram_user_base_address -
+ prop->dram_base_address);
+ dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
+ atomic64_read(&hdev->dram_used_mem);
+ dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem);
+
+ return copy_to_user(out, &dram_usage,
+ min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
+}
+
+static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_hw_idle hw_idle = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev);
+
+ return copy_to_user(out, &hw_idle,
+ min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
+}
+
+static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_info_args *args = data;
+ struct hl_device *hdev = hpriv->hdev;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err(hdev->dev,
+ "Device is disabled or in reset. Can't execute INFO IOCTL\n");
+ return -EBUSY;
+ }
+
+ switch (args->op) {
+ case HL_INFO_HW_IP_INFO:
+ rc = hw_ip_info(hdev, args);
+ break;
+
+ case HL_INFO_HW_EVENTS:
+ rc = hw_events_info(hdev, args);
+ break;
+
+ case HL_INFO_DRAM_USAGE:
+ rc = dram_usage_info(hdev, args);
+ break;
+
+ case HL_INFO_HW_IDLE:
+ rc = hw_idle(hdev, args);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid request %d\n", args->op);
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
+#define HL_IOCTL_DEF(ioctl, _func) \
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
+
+static const struct hl_ioctl_desc hl_ioctls[] = {
+ HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
+ HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl)
+};
+
+#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
+
+long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct hl_fpriv *hpriv = filep->private_data;
+ struct hl_device *hdev = hpriv->hdev;
+ hl_ioctl_t *func;
+ const struct hl_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+ char stack_kdata[128] = {0};
+ char *kdata = NULL;
+ unsigned int usize, asize;
+ int retcode;
+
+ if (hdev->hard_reset_pending) {
+ dev_crit_ratelimited(hdev->dev,
+ "Device HARD reset pending! Please close FD\n");
+ return -ENODEV;
+ }
+
+ if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
+ u32 hl_size;
+
+ ioctl = &hl_ioctls[nr];
+
+ hl_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (hl_size > asize)
+ asize = hl_size;
+
+ cmd = ioctl->cmd;
+ } else {
+ dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
+ task_pid_nr(current), nr);
+ return -ENOTTY;
+ }
+
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ dev_dbg(hdev->dev, "no function\n");
+ retcode = -ENOTTY;
+ goto out_err;
+ }
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kzalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto out_err;
+ }
+ }
+ }
+
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg, usize)) {
+ retcode = -EFAULT;
+ goto out_err;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ retcode = func(hpriv, kdata);
+
+ if (cmd & IOC_OUT)
+ if (copy_to_user((void __user *)arg, kdata, usize))
+ retcode = -EFAULT;
+
+out_err:
+ if (retcode)
+ dev_dbg(hdev->dev,
+ "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current), cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+
+ return retcode;
+}
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
new file mode 100644
index 000000000000..67bece26417c
--- /dev/null
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+/*
+ * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
+ *
+ * @ptr: the current pi/ci value
+ * @val: the amount to add
+ *
+ * Add val to ptr. It can go until twice the queue length.
+ */
+inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
+{
+ ptr += val;
+ ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
+ return ptr;
+}
+
+static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
+{
+ int delta = (q->pi - q->ci);
+
+ if (delta >= 0)
+ return (queue_len - delta);
+ else
+ return (abs(delta) - queue_len);
+}
+
+void hl_int_hw_queue_update_ci(struct hl_cs *cs)
+{
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_hw_queue *q;
+ int i;
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (hdev->disabled)
+ goto out;
+
+ q = &hdev->kernel_queues[0];
+ for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
+ if (q->queue_type == QUEUE_TYPE_INT) {
+ q->ci += cs->jobs_in_queue_cnt[i];
+ q->ci &= ((q->int_queue_len << 1) - 1);
+ }
+ }
+
+out:
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+}
+
+/*
+ * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @q: pointer to habanalabs queue structure
+ * @ctl: BD's control word
+ * @len: BD's length
+ * @ptr: BD's pointer
+ *
+ * This function assumes there is enough space on the queue to submit a new
+ * BD to it. It initializes the next BD and calls the device specific
+ * function to set the pi (and doorbell)
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr)
+{
+ struct hl_bd *bd;
+
+ bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
+ bd += hl_pi_2_offset(q->pi);
+ bd->ctl = __cpu_to_le32(ctl);
+ bd->len = __cpu_to_le32(len);
+ bd->ptr = __cpu_to_le64(ptr + hdev->asic_prop.host_phys_base_address);
+
+ q->pi = hl_queue_inc_ptr(q->pi);
+ hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
+}
+
+/*
+ * ext_queue_sanity_checks - perform some sanity checks on external queue
+ *
+ * @hdev : pointer to hl_device structure
+ * @q : pointer to hl_hw_queue structure
+ * @num_of_entries : how many entries to check for space
+ * @reserve_cq_entry : whether to reserve an entry in the cq
+ *
+ * H/W queues spinlock should be taken before calling this function
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the h/w queue
+ * - Make sure we have enough space in the completion queue
+ * - Reserve space in the completion queue (needs to be reversed if there
+ * is a failure down the road before the actual submission of work). Only
+ * do this action if reserve_cq_entry is true
+ *
+ */
+static int ext_queue_sanity_checks(struct hl_device *hdev,
+ struct hl_hw_queue *q, int num_of_entries,
+ bool reserve_cq_entry)
+{
+ atomic_t *free_slots =
+ &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+ int free_slots_cnt;
+
+ /* Check we have enough space in the queue */
+ free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
+
+ if (free_slots_cnt < num_of_entries) {
+ dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
+ q->hw_queue_id, num_of_entries);
+ return -EAGAIN;
+ }
+
+ if (reserve_cq_entry) {
+ /*
+ * Check we have enough space in the completion queue
+ * Add -1 to counter (decrement) unless counter was already 0
+ * In that case, CQ is full so we can't submit a new CB because
+ * we won't get ack on its completion
+ * atomic_add_unless will return 0 if counter was already 0
+ */
+ if (atomic_add_negative(num_of_entries * -1, free_slots)) {
+ dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
+ num_of_entries, q->hw_queue_id);
+ atomic_add(num_of_entries, free_slots);
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * int_queue_sanity_checks - perform some sanity checks on internal queue
+ *
+ * @hdev : pointer to hl_device structure
+ * @q : pointer to hl_hw_queue structure
+ * @num_of_entries : how many entries to check for space
+ *
+ * H/W queues spinlock should be taken before calling this function
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the h/w queue
+ *
+ */
+static int int_queue_sanity_checks(struct hl_device *hdev,
+ struct hl_hw_queue *q,
+ int num_of_entries)
+{
+ int free_slots_cnt;
+
+ /* Check we have enough space in the queue */
+ free_slots_cnt = queue_free_slots(q, q->int_queue_len);
+
+ if (free_slots_cnt < num_of_entries) {
+ dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
+ q->hw_queue_id, num_of_entries);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
+ *
+ * @hdev: pointer to hl_device structure
+ * @hw_queue_id: Queue's type
+ * @cb_size: size of CB
+ * @cb_ptr: pointer to CB location
+ *
+ * This function sends a single CB, that must NOT generate a completion entry
+ *
+ */
+int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
+ u32 cb_size, u64 cb_ptr)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
+ int rc;
+
+ /*
+ * The CPU queue is a synchronous queue with an effective depth of
+ * a single entry (although it is allocated with room for multiple
+ * entries). Therefore, there is a different lock, called
+ * send_cpu_message_lock, that serializes accesses to the CPU queue.
+ * As a result, we don't need to lock the access to the entire H/W
+ * queues module when submitting a JOB to the CPU queue
+ */
+ if (q->queue_type != QUEUE_TYPE_CPU)
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (hdev->disabled) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ rc = ext_queue_sanity_checks(hdev, q, 1, false);
+ if (rc)
+ goto out;
+
+ ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+
+out:
+ if (q->queue_type != QUEUE_TYPE_CPU)
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ return rc;
+}
+
+/*
+ * ext_hw_queue_schedule_job - submit an JOB to an external queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_cq_entry cq_pkt;
+ struct hl_cq *cq;
+ u64 cq_addr;
+ struct hl_cb *cb;
+ u32 ctl;
+ u32 len;
+ u64 ptr;
+
+ /*
+ * Update the JOB ID inside the BD CTL so the device would know what
+ * to write in the completion queue
+ */
+ ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
+
+ cb = job->patched_cb;
+ len = job->job_cb_size;
+ ptr = cb->bus_address;
+
+ cq_pkt.data = __cpu_to_le32(
+ ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
+ & CQ_ENTRY_SHADOW_INDEX_MASK) |
+ (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
+ (1 << CQ_ENTRY_READY_SHIFT));
+
+ /*
+ * No need to protect pi_offset because scheduling to the
+ * H/W queues is done under the scheduler mutex
+ *
+ * No need to check if CQ is full because it was already
+ * checked in hl_queue_sanity_checks
+ */
+ cq = &hdev->completion_queue[q->hw_queue_id];
+ cq_addr = cq->bus_address +
+ hdev->asic_prop.host_phys_base_address;
+ cq_addr += cq->pi * sizeof(struct hl_cq_entry);
+
+ hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len,
+ cq_addr,
+ __le32_to_cpu(cq_pkt.data),
+ q->hw_queue_id);
+
+ q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
+
+ cq->pi = hl_cq_inc_ptr(cq->pi);
+
+ ext_queue_submit_bd(hdev, q, ctl, len, ptr);
+}
+
+/*
+ * int_hw_queue_schedule_job - submit an JOB to an internal queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void int_hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_bd bd;
+ u64 *pi, *pbd = (u64 *) &bd;
+
+ bd.ctl = 0;
+ bd.len = __cpu_to_le32(job->job_cb_size);
+ bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
+
+ pi = (u64 *) (uintptr_t) (q->kernel_address +
+ ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
+
+ pi[0] = pbd[0];
+ pi[1] = pbd[1];
+
+ q->pi++;
+ q->pi &= ((q->int_queue_len << 1) - 1);
+
+ /* Flush PQ entry write. Relevant only for specific ASICs */
+ hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
+
+ hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
+}
+
+/*
+ * hl_hw_queue_schedule_cs - schedule a command submission
+ *
+ * @job : pointer to the CS
+ *
+ */
+int hl_hw_queue_schedule_cs(struct hl_cs *cs)
+{
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_cs_job *job, *tmp;
+ struct hl_hw_queue *q;
+ int rc = 0, i, cq_cnt;
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err(hdev->dev,
+ "device is disabled or in reset, CS rejected!\n");
+ rc = -EPERM;
+ goto out;
+ }
+
+ q = &hdev->kernel_queues[0];
+ /* This loop assumes all external queues are consecutive */
+ for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
+ if (q->queue_type == QUEUE_TYPE_EXT) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ rc = ext_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i], true);
+ if (rc)
+ goto unroll_cq_resv;
+ cq_cnt++;
+ }
+ } else if (q->queue_type == QUEUE_TYPE_INT) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ rc = int_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i]);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+ }
+ }
+
+ spin_lock(&hdev->hw_queues_mirror_lock);
+ list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
+
+ /* Queue TDR if the CS is the first entry and if timeout is wanted */
+ if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
+ (list_first_entry(&hdev->hw_queues_mirror_list,
+ struct hl_cs, mirror_node) == cs)) {
+ cs->tdr_active = true;
+ schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+ } else {
+ spin_unlock(&hdev->hw_queues_mirror_lock);
+ }
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+ if (job->ext_queue)
+ ext_hw_queue_schedule_job(job);
+ else
+ int_hw_queue_schedule_job(job);
+ }
+
+ cs->submitted = true;
+
+ goto out;
+
+unroll_cq_resv:
+ /* This loop assumes all external queues are consecutive */
+ q = &hdev->kernel_queues[0];
+ for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
+ if ((q->queue_type == QUEUE_TYPE_EXT) &&
+ (cs->jobs_in_queue_cnt[i])) {
+ atomic_t *free_slots =
+ &hdev->completion_queue[i].free_slots_cnt;
+ atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
+ cq_cnt--;
+ }
+ }
+
+out:
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ return rc;
+}
+
+/*
+ * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
+ *
+ * @hdev: pointer to hl_device structure
+ * @hw_queue_id: which queue to increment its ci
+ */
+void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
+{
+ struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
+
+ q->ci = hl_queue_inc_ptr(q->ci);
+}
+
+static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
+ struct hl_hw_queue *q)
+{
+ void *p;
+ int rc;
+
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->kernel_address = (u64) (uintptr_t) p;
+
+ q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
+ sizeof(*q->shadow_queue),
+ GFP_KERNEL);
+ if (!q->shadow_queue) {
+ dev_err(hdev->dev,
+ "Failed to allocate shadow queue for H/W queue %d\n",
+ q->hw_queue_id);
+ rc = -ENOMEM;
+ goto free_queue;
+ }
+
+ /* Make sure read/write pointers are initialized to start of queue */
+ q->ci = 0;
+ q->pi = 0;
+
+ return 0;
+
+free_queue:
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+
+ return rc;
+}
+
+static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ void *p;
+
+ p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
+ &q->bus_address, &q->int_queue_len);
+ if (!p) {
+ dev_err(hdev->dev,
+ "Failed to get base address for internal queue %d\n",
+ q->hw_queue_id);
+ return -EFAULT;
+ }
+
+ q->kernel_address = (u64) (uintptr_t) p;
+ q->pi = 0;
+ q->ci = 0;
+
+ return 0;
+}
+
+static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_hw_queue_init(hdev, q);
+}
+
+static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_hw_queue_init(hdev, q);
+}
+
+/*
+ * hw_queue_init - main initialization function for H/W queue object
+ *
+ * @hdev: pointer to hl_device device structure
+ * @q: pointer to hl_hw_queue queue structure
+ * @hw_queue_id: The id of the H/W queue
+ *
+ * Allocate dma-able memory for the queue and initialize fields
+ * Returns 0 on success
+ */
+static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 hw_queue_id)
+{
+ int rc;
+
+ BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
+
+ q->hw_queue_id = hw_queue_id;
+
+ switch (q->queue_type) {
+ case QUEUE_TYPE_EXT:
+ rc = ext_hw_queue_init(hdev, q);
+ break;
+
+ case QUEUE_TYPE_INT:
+ rc = int_hw_queue_init(hdev, q);
+ break;
+
+ case QUEUE_TYPE_CPU:
+ rc = cpu_hw_queue_init(hdev, q);
+ break;
+
+ case QUEUE_TYPE_NA:
+ q->valid = 0;
+ return 0;
+
+ default:
+ dev_crit(hdev->dev, "wrong queue type %d during init\n",
+ q->queue_type);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ return rc;
+
+ q->valid = 1;
+
+ return 0;
+}
+
+/*
+ * hw_queue_fini - destroy queue
+ *
+ * @hdev: pointer to hl_device device structure
+ * @q: pointer to hl_hw_queue queue structure
+ *
+ * Free the queue memory
+ */
+static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ if (!q->valid)
+ return;
+
+ /*
+ * If we arrived here, there are no jobs waiting on this queue
+ * so we can safely remove it.
+ * This is because this function can only called when:
+ * 1. Either a context is deleted, which only can occur if all its
+ * jobs were finished
+ * 2. A context wasn't able to be created due to failure or timeout,
+ * which means there are no jobs on the queue yet
+ *
+ * The only exception are the queues of the kernel context, but
+ * if they are being destroyed, it means that the entire module is
+ * being removed. If the module is removed, it means there is no open
+ * user context. It also means that if a job was submitted by
+ * the kernel driver (e.g. context creation), the job itself was
+ * released by the kernel driver when a timeout occurred on its
+ * Completion. Thus, we don't need to release it again.
+ */
+
+ if (q->queue_type == QUEUE_TYPE_INT)
+ return;
+
+ kfree(q->shadow_queue);
+
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+}
+
+int hl_hw_queues_create(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *asic = &hdev->asic_prop;
+ struct hl_hw_queue *q;
+ int i, rc, q_ready_cnt;
+
+ hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
+ sizeof(*hdev->kernel_queues), GFP_KERNEL);
+
+ if (!hdev->kernel_queues) {
+ dev_err(hdev->dev, "Not enough memory for H/W queues\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize the H/W queues */
+ for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
+ i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
+
+ q->queue_type = asic->hw_queues_props[i].type;
+ rc = hw_queue_init(hdev, q, i);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to initialize queue %d\n", i);
+ goto release_queues;
+ }
+ }
+
+ return 0;
+
+release_queues:
+ for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
+ hw_queue_fini(hdev, q);
+
+ kfree(hdev->kernel_queues);
+
+ return rc;
+}
+
+void hl_hw_queues_destroy(struct hl_device *hdev)
+{
+ struct hl_hw_queue *q;
+ int i;
+
+ for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
+ hw_queue_fini(hdev, q);
+
+ kfree(hdev->kernel_queues);
+}
+
+void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
+{
+ struct hl_hw_queue *q;
+ int i;
+
+ for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
+ if ((!q->valid) ||
+ ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
+ continue;
+ q->pi = q->ci = 0;
+ }
+}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
new file mode 100644
index 000000000000..77facd25c4a2
--- /dev/null
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+
+#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
+#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
+
+int hl_build_hwmon_channel_info(struct hl_device *hdev,
+ struct armcp_sensor *sensors_arr)
+{
+ u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
+ u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
+ u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0};
+ struct hwmon_channel_info **channels_info;
+ u32 num_sensors_for_type, num_active_sensor_types = 0,
+ arr_size = 0, *curr_arr;
+ enum hwmon_sensor_types type;
+ int rc, i, j;
+
+ for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) {
+ type = __le32_to_cpu(sensors_arr[i].type);
+
+ if ((type == 0) && (sensors_arr[i].flags == 0))
+ break;
+
+ if (type >= HWMON_NR_SENSOR_TYPES) {
+ dev_err(hdev->dev,
+ "Got wrong sensor type %d from device\n", type);
+ return -EINVAL;
+ }
+
+ counts[type]++;
+ arr_size++;
+ }
+
+ for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
+ if (counts[i] == 0)
+ continue;
+
+ num_sensors_for_type = counts[i] + 1;
+ curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr),
+ GFP_KERNEL);
+ if (!curr_arr) {
+ rc = -ENOMEM;
+ goto sensors_type_err;
+ }
+
+ num_active_sensor_types++;
+ sensors_by_type[i] = curr_arr;
+ }
+
+ for (i = 0 ; i < arr_size ; i++) {
+ type = __le32_to_cpu(sensors_arr[i].type);
+ curr_arr = sensors_by_type[type];
+ curr_arr[sensors_by_type_next_index[type]++] =
+ __le32_to_cpu(sensors_arr[i].flags);
+ }
+
+ channels_info = kcalloc(num_active_sensor_types + 1,
+ sizeof(*channels_info), GFP_KERNEL);
+ if (!channels_info) {
+ rc = -ENOMEM;
+ goto channels_info_array_err;
+ }
+
+ for (i = 0 ; i < num_active_sensor_types ; i++) {
+ channels_info[i] = kzalloc(sizeof(*channels_info[i]),
+ GFP_KERNEL);
+ if (!channels_info[i]) {
+ rc = -ENOMEM;
+ goto channel_info_err;
+ }
+ }
+
+ for (i = 0, j = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) {
+ if (!sensors_by_type[i])
+ continue;
+
+ channels_info[j]->type = i;
+ channels_info[j]->config = sensors_by_type[i];
+ j++;
+ }
+
+ hdev->hl_chip_info->info =
+ (const struct hwmon_channel_info **)channels_info;
+
+ return 0;
+
+channel_info_err:
+ for (i = 0 ; i < num_active_sensor_types ; i++)
+ if (channels_info[i]) {
+ kfree(channels_info[i]->config);
+ kfree(channels_info[i]);
+ }
+ kfree(channels_info);
+channels_info_array_err:
+sensors_type_err:
+ for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++)
+ kfree(sensors_by_type[i]);
+
+ return rc;
+}
+
+static int hl_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_crit_hyst:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = hl_get_temperature(hdev, channel, attr);
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_min:
+ case hwmon_in_max:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = hl_get_voltage(hdev, channel, attr);
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_min:
+ case hwmon_curr_max:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = hl_get_current(hdev, channel, attr);
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val = hl_get_fan_speed(hdev, channel, attr);
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val = hl_get_pwm_info(hdev, channel, attr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hl_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_pwm_info(hdev, channel, attr, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ return 0444;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return 0444;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_min:
+ case hwmon_curr_max:
+ return 0444;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ return 0444;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_ops hl_hwmon_ops = {
+ .is_visible = hl_is_visible,
+ .read = hl_read,
+ .write = hl_write
+};
+
+long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get temperature from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get voltage from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get current from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get fan speed from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get pwm info from sensor %d, error %d\n",
+ sensor_index, rc);
+ result = 0;
+ }
+
+ return result;
+}
+
+void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
+ long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set pwm info to sensor %d, error %d\n",
+ sensor_index, rc);
+}
+
+int hl_hwmon_init(struct hl_device *hdev)
+{
+ struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
+ int rc;
+
+ if ((hdev->hwmon_initialized) || !(hdev->fw_loading))
+ return 0;
+
+ if (hdev->hl_chip_info->info) {
+ hdev->hl_chip_info->ops = &hl_hwmon_ops;
+
+ hdev->hwmon_dev = hwmon_device_register_with_info(dev,
+ "habanalabs", hdev, hdev->hl_chip_info, NULL);
+ if (IS_ERR(hdev->hwmon_dev)) {
+ rc = PTR_ERR(hdev->hwmon_dev);
+ dev_err(hdev->dev,
+ "Unable to register hwmon device: %d\n", rc);
+ return rc;
+ }
+
+ dev_info(hdev->dev, "%s: add sensors information\n",
+ dev_name(hdev->hwmon_dev));
+
+ hdev->hwmon_initialized = true;
+ } else {
+ dev_info(hdev->dev, "no available sensors\n");
+ }
+
+ return 0;
+}
+
+void hl_hwmon_fini(struct hl_device *hdev)
+{
+ if (!hdev->hwmon_initialized)
+ return;
+
+ hwmon_device_unregister(hdev->hwmon_dev);
+}
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
new file mode 100644
index 000000000000..9dddb917e72c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ARMCP_IF_H
+#define ARMCP_IF_H
+
+#include <linux/types.h>
+
+/*
+ * EVENT QUEUE
+ */
+
+struct hl_eq_header {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct hl_eq_entry {
+ struct hl_eq_header hdr;
+ __le64 data[7];
+};
+
+#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
+
+#define EQ_CTL_READY_SHIFT 31
+#define EQ_CTL_READY_MASK 0x80000000
+
+#define EQ_CTL_EVENT_TYPE_SHIFT 16
+#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
+
+#define EVENT_QUEUE_MSIX_IDX 5
+
+enum pq_init_status {
+ PQ_INIT_STATUS_NA = 0,
+ PQ_INIT_STATUS_READY_FOR_CP,
+ PQ_INIT_STATUS_READY_FOR_HOST
+};
+
+/*
+ * ArmCP Primary Queue Packets
+ *
+ * During normal operation, KMD needs to send various messages to ArmCP,
+ * usually either to SET some value into a H/W periphery or to GET the current
+ * value of some H/W periphery. For example, SET the frequency of MME/TPC and
+ * GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by KMD
+ * itself, e.g. power management code. In either case, the communication from
+ * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will
+ * send a single message and poll until the message was acknowledged and the
+ * results are ready (if results are needed).
+ *
+ * This means that only a single message can be sent at a time and KMD must
+ * wait for its result before sending the next message. Having said that,
+ * because these are control messages which are sent in a relatively low
+ * frequency, this limitation seems acceptable. It's important to note that
+ * in case of multiple devices, messages to different devices *can* be sent
+ * at the same time.
+ *
+ * The message, inputs/outputs (if relevant) and fence object will be located
+ * on the device DDR at an address that will be determined by KMD. During
+ * device initialization phase, KMD will pass to ArmCP that address. Most of
+ * the message types will contain inputs/outputs inside the message itself.
+ * The common part of each message will contain the opcode of the message (its
+ * type) and a field representing a fence object.
+ *
+ * When KMD wishes to send a message to ArmCP, it will write the message
+ * contents to the device DDR, clear the fence object and then write the
+ * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue
+ * the 484 interrupt-id to the ARM core.
+ *
+ * Upon receiving the 484 interrupt-id, ArmCP will read the message from the
+ * DDR. In case the message is a SET operation, ArmCP will first perform the
+ * operation and then write to the fence object on the device DDR. In case the
+ * message is a GET operation, ArmCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, ArmCP
+ * will fill the rc field with the right error code.
+ *
+ * In the meantime, KMD will poll on the fence object. Once KMD sees that the
+ * fence object is signaled, it will read the results from the device DDR
+ * (if relevant) and resume the code execution in KMD.
+ *
+ * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
+ * so the value being put by the KMD matches the value read by ArmCP
+ *
+ * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
+ *
+ * Detailed description:
+ *
+ * ARMCP_PACKET_DISABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU must NOT issue PCI
+ * transactions (read/write) towards the Host CPU. This also include
+ * sending MSI-X interrupts.
+ * This packet is usually sent before the device is moved to D3Hot state.
+ *
+ * ARMCP_PACKET_ENABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU is allowed to issue PCI
+ * transactions towards the Host CPU, including sending MSI-X interrupts.
+ * This packet is usually send after the device is moved to D0 state.
+ *
+ * ARMCP_PACKET_TEMPERATURE_GET -
+ * Fetch the current temperature / Max / Max Hyst / Critical /
+ * Critical Hyst of a specified thermal sensor. The packet's
+ * arguments specify the desired sensor and the field to get.
+ *
+ * ARMCP_PACKET_VOLTAGE_GET -
+ * Fetch the voltage / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_CURRENT_GET -
+ * Fetch the current / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_FAN_SPEED_GET -
+ * Fetch the speed / Max / Min of a specified fan. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_PWM_GET -
+ * Fetch the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor and type.
+ *
+ * ARMCP_PACKET_PWM_SET -
+ * Set the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor, type and value.
+ *
+ * ARMCP_PACKET_FREQUENCY_SET -
+ * Set the frequency of a specified PLL. The packet's arguments specify
+ * the PLL and the desired frequency. The actual frequency in the device
+ * might differ from the requested frequency.
+ *
+ * ARMCP_PACKET_FREQUENCY_GET -
+ * Fetch the frequency of a specified PLL. The packet's arguments specify
+ * the PLL.
+ *
+ * ARMCP_PACKET_LED_SET -
+ * Set the state of a specified led. The packet's arguments
+ * specify the led and the desired state.
+ *
+ * ARMCP_PACKET_I2C_WR -
+ * Write 32-bit value to I2C device. The packet's arguments specify the
+ * I2C bus, address and value.
+ *
+ * ARMCP_PACKET_I2C_RD -
+ * Read 32-bit value from I2C device. The packet's arguments specify the
+ * I2C bus and address.
+ *
+ * ARMCP_PACKET_INFO_GET -
+ * Fetch information from the device as specified in the packet's
+ * structure. KMD passes the max size it allows the ArmCP to write to
+ * the structure, to prevent data corruption in case of mismatched
+ * KMD/FW versions.
+ *
+ * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ *
+ * ARMCP_PACKET_UNMASK_RAZWI_IRQ -
+ * Unmask the given IRQ. The IRQ number is specified in the value field.
+ * The packet is sent after receiving an interrupt and printing its
+ * relevant information.
+ *
+ * ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * Unmask the given IRQs. The IRQs numbers are specified in an array right
+ * after the armcp_packet structure, where its first element is the array
+ * length. The packet is sent after a soft reset was done in order to
+ * handle any interrupts that were sent during the reset process.
+ *
+ * ARMCP_PACKET_TEST -
+ * Test packet for ArmCP connectivity. The CPU will put the fence value
+ * in the result field.
+ *
+ * ARMCP_PACKET_FREQUENCY_CURR_GET -
+ * Fetch the current frequency of a specified PLL. The packet's arguments
+ * specify the PLL.
+ *
+ * ARMCP_PACKET_MAX_POWER_GET -
+ * Fetch the maximal power of the device.
+ *
+ * ARMCP_PACKET_MAX_POWER_SET -
+ * Set the maximal power of the device. The packet's arguments specify
+ * the power.
+ *
+ * ARMCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the ArmCP kernel. The buffer is specified in the
+ * addr field. The CPU will put the returned data size in the result
+ * field. In addition, KMD passes the max size it allows the ArmCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched KMD/FW versions.
+ *
+ */
+
+enum armcp_packet_id {
+ ARMCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ ARMCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ ARMCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ ARMCP_PACKET_VOLTAGE_GET, /* sysfs */
+ ARMCP_PACKET_CURRENT_GET, /* sysfs */
+ ARMCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ ARMCP_PACKET_PWM_GET, /* sysfs */
+ ARMCP_PACKET_PWM_SET, /* sysfs */
+ ARMCP_PACKET_FREQUENCY_SET, /* sysfs */
+ ARMCP_PACKET_FREQUENCY_GET, /* sysfs */
+ ARMCP_PACKET_LED_SET, /* debugfs */
+ ARMCP_PACKET_I2C_WR, /* debugfs */
+ ARMCP_PACKET_I2C_RD, /* debugfs */
+ ARMCP_PACKET_INFO_GET, /* IOCTL */
+ ARMCP_PACKET_FLASH_PROGRAM_REMOVED,
+ ARMCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ ARMCP_PACKET_TEST, /* internal */
+ ARMCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ ARMCP_PACKET_MAX_POWER_GET, /* sysfs */
+ ARMCP_PACKET_MAX_POWER_SET, /* sysfs */
+ ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+};
+
+#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
+
+#define ARMCP_PKT_CTL_RC_SHIFT 12
+#define ARMCP_PKT_CTL_RC_MASK 0x0000F000
+
+#define ARMCP_PKT_CTL_OPCODE_SHIFT 16
+#define ARMCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+
+struct armcp_packet {
+ union {
+ __le64 value; /* For SET packets */
+ __le64 result; /* For GET packets */
+ __le64 addr; /* For PQ */
+ };
+
+ __le32 ctl;
+
+ __le32 fence; /* Signal to KMD that message is completed */
+
+ union {
+ struct {/* For temperature/current/voltage/fan/pwm get/set */
+ __le16 sensor_index;
+ __le16 type;
+ };
+
+ struct { /* For I2C read/write */
+ __u8 i2c_bus;
+ __u8 i2c_addr;
+ __u8 i2c_reg;
+ __u8 pad; /* unused */
+ };
+
+ /* For frequency get/set */
+ __le32 pll_index;
+
+ /* For led set */
+ __le32 led_index;
+
+ /* For get Armcp info/EEPROM data */
+ __le32 data_max_size;
+ };
+};
+
+struct armcp_unmask_irq_arr_packet {
+ struct armcp_packet armcp_pkt;
+ __le32 length;
+ __le32 irqs[0];
+};
+
+enum armcp_packet_rc {
+ armcp_packet_success,
+ armcp_packet_invalid,
+ armcp_packet_fault
+};
+
+enum armcp_temp_type {
+ armcp_temp_input,
+ armcp_temp_max = 6,
+ armcp_temp_max_hyst,
+ armcp_temp_crit,
+ armcp_temp_crit_hyst
+};
+
+enum armcp_in_attributes {
+ armcp_in_input,
+ armcp_in_min,
+ armcp_in_max
+};
+
+enum armcp_curr_attributes {
+ armcp_curr_input,
+ armcp_curr_min,
+ armcp_curr_max
+};
+
+enum armcp_fan_attributes {
+ armcp_fan_input,
+ armcp_fan_min = 2,
+ armcp_fan_max
+};
+
+enum armcp_pwm_attributes {
+ armcp_pwm_input,
+ armcp_pwm_enable
+};
+
+/* Event Queue Packets */
+
+struct eq_generic_event {
+ __le64 data[7];
+};
+
+/*
+ * ArmCP info
+ */
+
+#define VERSION_MAX_LEN 128
+#define ARMCP_MAX_SENSORS 128
+
+struct armcp_sensor {
+ __le32 type;
+ __le32 flags;
+};
+
+struct armcp_info {
+ struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
+ __u8 kernel_version[VERSION_MAX_LEN];
+ __le32 reserved[3];
+ __le32 cpld_version;
+ __le32 infineon_version;
+ __u8 fuse_version[VERSION_MAX_LEN];
+ __u8 thermal_version[VERSION_MAX_LEN];
+ __u8 armcp_version[VERSION_MAX_LEN];
+ __le64 dram_size;
+};
+
+#endif /* ARMCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
new file mode 100644
index 000000000000..2cf5c46b6e8e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_CA53_CFG_MASKS_H_
+#define ASIC_REG_CPU_CA53_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * CPU_CA53_CFG (Prototype: CA53_CFG)
+ *****************************************
+ */
+
+/* CPU_CA53_CFG_ARM_CFG */
+#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_SHIFT 0
+#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_MASK 0x3
+#define CPU_CA53_CFG_ARM_CFG_END_SHIFT 4
+#define CPU_CA53_CFG_ARM_CFG_END_MASK 0x30
+#define CPU_CA53_CFG_ARM_CFG_TE_SHIFT 8
+#define CPU_CA53_CFG_ARM_CFG_TE_MASK 0x300
+#define CPU_CA53_CFG_ARM_CFG_VINITHI_SHIFT 12
+#define CPU_CA53_CFG_ARM_CFG_VINITHI_MASK 0x3000
+
+/* CPU_CA53_CFG_RST_ADDR_LSB */
+#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_SHIFT 0
+#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_MASK 0xFFFFFFFF
+
+/* CPU_CA53_CFG_RST_ADDR_MSB */
+#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_SHIFT 0
+#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_MASK 0xFF
+
+/* CPU_CA53_CFG_ARM_RST_CONTROL */
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000
+
+/* CPU_CA53_CFG_ARM_AFFINITY */
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_SHIFT 0
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_MASK 0xFF
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_SHIFT 8
+#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_MASK 0xFF00
+
+/* CPU_CA53_CFG_ARM_DISABLE */
+#define CPU_CA53_CFG_ARM_DISABLE_CP15S_SHIFT 0
+#define CPU_CA53_CFG_ARM_DISABLE_CP15S_MASK 0x3
+#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_SHIFT 4
+#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_MASK 0x30
+#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_SHIFT 8
+#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_MASK 0x100
+#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_SHIFT 9
+#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_MASK 0x200
+
+/* CPU_CA53_CFG_ARM_GIC_PERIPHBASE */
+#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_SHIFT 0
+#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_MASK 0x3FFFFF
+
+/* CPU_CA53_CFG_ARM_GIC_IRQ_CFG */
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_SHIFT 0
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_MASK 0x3
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_SHIFT 4
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_MASK 0x30
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_SHIFT 8
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_MASK 0x300
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_SHIFT 12
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_MASK 0x3000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_SHIFT 16
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_MASK 0x30000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_MASK 0x300000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_SHIFT 24
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_MASK 0x3000000
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_SHIFT 31
+#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_MASK 0x80000000
+
+/* CPU_CA53_CFG_ARM_PWR_MNG */
+#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_SHIFT 0
+#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_MASK 0x1
+#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_SHIFT 1
+#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_MASK 0x2
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_SHIFT 2
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_MASK 0x4
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_SHIFT 3
+#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_MASK 0x8
+#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_SHIFT 4
+#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_MASK 0x30
+#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_SHIFT 8
+#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_MASK 0x300
+#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_SHIFT 12
+#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_MASK 0x3000
+
+/* CPU_CA53_CFG_ARB_DBG_ROM_ADDR */
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_SHIFT 0
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_MASK 0xFFFFFFF
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_SHIFT 31
+#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_MASK 0x80000000
+
+/* CPU_CA53_CFG_ARM_DBG_MODES */
+#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_SHIFT 0
+#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_MASK 0x3
+#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_SHIFT 4
+#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_MASK 0x30
+#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_SHIFT 8
+#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_MASK 0x300
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_SHIFT 12
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_MASK 0x3000
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_SHIFT 16
+#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_MASK 0x30000
+
+/* CPU_CA53_CFG_ARM_PWR_STAT_0 */
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_SHIFT 0
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_MASK 0x1
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_SHIFT 1
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_MASK 0x2
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_SHIFT 4
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_MASK 0x30
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_SHIFT 8
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_MASK 0x300
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_SHIFT 12
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_MASK 0x1000
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_SHIFT 13
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_MASK 0x2000
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_SHIFT 16
+#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_MASK 0x30000
+
+/* CPU_CA53_CFG_ARM_PWR_STAT_1 */
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_SHIFT 0
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_MASK 0x3
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_SHIFT 4
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_MASK 0x30
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_SHIFT 8
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_MASK 0x300
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_SHIFT 12
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_MASK 0x3000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_SHIFT 16
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_MASK 0x30000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_SHIFT 20
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_MASK 0x300000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_SHIFT 24
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_MASK 0x1000000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_SHIFT 25
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_MASK 0x2000000
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_SHIFT 26
+#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_MASK 0x4000000
+
+/* CPU_CA53_CFG_ARM_DBG_STATUS */
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_SHIFT 0
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_MASK 0x3
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_SHIFT 4
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_MASK 0x30
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_SHIFT 8
+#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_MASK 0x300
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_SHIFT 12
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_MASK 0x3000
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_SHIFT 16
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_MASK 0x30000
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_MASK 0x300000
+
+/* CPU_CA53_CFG_ARM_MEM_ATTR */
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_SHIFT 0
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_MASK 0xFF
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_SHIFT 8
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_MASK 0xFF00
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_SHIFT 16
+#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_MASK 0x10000
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_SHIFT 20
+#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_MASK 0x100000
+
+/* CPU_CA53_CFG_ARM_PMU */
+#define CPU_CA53_CFG_ARM_PMU_EVENT_SHIFT 0
+#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF
+
+#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
new file mode 100644
index 000000000000..840ccffa1081
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_CA53_CFG_REGS_H_
+#define ASIC_REG_CPU_CA53_CFG_REGS_H_
+
+/*
+ *****************************************
+ * CPU_CA53_CFG (Prototype: CA53_CFG)
+ *****************************************
+ */
+
+#define mmCPU_CA53_CFG_ARM_CFG 0x441100
+
+#define mmCPU_CA53_CFG_RST_ADDR_LSB_0 0x441104
+
+#define mmCPU_CA53_CFG_RST_ADDR_LSB_1 0x441108
+
+#define mmCPU_CA53_CFG_RST_ADDR_MSB_0 0x441114
+
+#define mmCPU_CA53_CFG_RST_ADDR_MSB_1 0x441118
+
+#define mmCPU_CA53_CFG_ARM_RST_CONTROL 0x441124
+
+#define mmCPU_CA53_CFG_ARM_AFFINITY 0x441128
+
+#define mmCPU_CA53_CFG_ARM_DISABLE 0x44112C
+
+#define mmCPU_CA53_CFG_ARM_GIC_PERIPHBASE 0x441130
+
+#define mmCPU_CA53_CFG_ARM_GIC_IRQ_CFG 0x441134
+
+#define mmCPU_CA53_CFG_ARM_PWR_MNG 0x441138
+
+#define mmCPU_CA53_CFG_ARB_DBG_ROM_ADDR 0x44113C
+
+#define mmCPU_CA53_CFG_ARM_DBG_MODES 0x441140
+
+#define mmCPU_CA53_CFG_ARM_PWR_STAT_0 0x441200
+
+#define mmCPU_CA53_CFG_ARM_PWR_STAT_1 0x441204
+
+#define mmCPU_CA53_CFG_ARM_DBG_STATUS 0x441208
+
+#define mmCPU_CA53_CFG_ARM_MEM_ATTR 0x44120C
+
+#define mmCPU_CA53_CFG_ARM_PMU_0 0x441210
+
+#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214
+
+#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
new file mode 100644
index 000000000000..f23cb3e41c30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_IF_REGS_H_
+#define ASIC_REG_CPU_IF_REGS_H_
+
+/*
+ *****************************************
+ * CPU_IF (Prototype: CPU_IF)
+ *****************************************
+ */
+
+#define mmCPU_IF_PF_PQ_PI 0x442100
+
+#define mmCPU_IF_ARUSER_OVR 0x442104
+
+#define mmCPU_IF_ARUSER_OVR_EN 0x442108
+
+#define mmCPU_IF_AWUSER_OVR 0x44210C
+
+#define mmCPU_IF_AWUSER_OVR_EN 0x442110
+
+#define mmCPU_IF_AXCACHE_OVR 0x442114
+
+#define mmCPU_IF_LOCK_OVR 0x442118
+
+#define mmCPU_IF_PROT_OVR 0x44211C
+
+#define mmCPU_IF_MAX_OUTSTANDING 0x442120
+
+#define mmCPU_IF_EARLY_BRESP_EN 0x442124
+
+#define mmCPU_IF_FORCE_RSP_OK 0x442128
+
+#define mmCPU_IF_CPU_MSB_ADDR 0x44212C
+
+#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
+
+#endif /* ASIC_REG_CPU_IF_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
new file mode 100644
index 000000000000..8fc97f838ada
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_PLL_REGS_H_
+#define ASIC_REG_CPU_PLL_REGS_H_
+
+/*
+ *****************************************
+ * CPU_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmCPU_PLL_NR 0x4A2100
+
+#define mmCPU_PLL_NF 0x4A2104
+
+#define mmCPU_PLL_OD 0x4A2108
+
+#define mmCPU_PLL_NB 0x4A210C
+
+#define mmCPU_PLL_CFG 0x4A2110
+
+#define mmCPU_PLL_LOSE_MASK 0x4A2120
+
+#define mmCPU_PLL_LOCK_INTR 0x4A2128
+
+#define mmCPU_PLL_LOCK_BYPASS 0x4A212C
+
+#define mmCPU_PLL_DATA_CHNG 0x4A2130
+
+#define mmCPU_PLL_RST 0x4A2134
+
+#define mmCPU_PLL_SLIP_WD_CNTR 0x4A2150
+
+#define mmCPU_PLL_DIV_FACTOR_0 0x4A2200
+
+#define mmCPU_PLL_DIV_FACTOR_1 0x4A2204
+
+#define mmCPU_PLL_DIV_FACTOR_2 0x4A2208
+
+#define mmCPU_PLL_DIV_FACTOR_3 0x4A220C
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_0 0x4A2220
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_1 0x4A2224
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_2 0x4A2228
+
+#define mmCPU_PLL_DIV_FACTOR_CMD_3 0x4A222C
+
+#define mmCPU_PLL_DIV_SEL_0 0x4A2280
+
+#define mmCPU_PLL_DIV_SEL_1 0x4A2284
+
+#define mmCPU_PLL_DIV_SEL_2 0x4A2288
+
+#define mmCPU_PLL_DIV_SEL_3 0x4A228C
+
+#define mmCPU_PLL_DIV_EN_0 0x4A22A0
+
+#define mmCPU_PLL_DIV_EN_1 0x4A22A4
+
+#define mmCPU_PLL_DIV_EN_2 0x4A22A8
+
+#define mmCPU_PLL_DIV_EN_3 0x4A22AC
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_0 0x4A22C0
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_1 0x4A22C4
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_2 0x4A22C8
+
+#define mmCPU_PLL_DIV_FACTOR_BUSY_3 0x4A22CC
+
+#define mmCPU_PLL_CLK_GATER 0x4A2300
+
+#define mmCPU_PLL_CLK_RLX_0 0x4A2310
+
+#define mmCPU_PLL_CLK_RLX_1 0x4A2314
+
+#define mmCPU_PLL_CLK_RLX_2 0x4A2318
+
+#define mmCPU_PLL_CLK_RLX_3 0x4A231C
+
+#define mmCPU_PLL_REF_CNTR_PERIOD 0x4A2400
+
+#define mmCPU_PLL_REF_LOW_THRESHOLD 0x4A2410
+
+#define mmCPU_PLL_REF_HIGH_THRESHOLD 0x4A2420
+
+#define mmCPU_PLL_PLL_NOT_STABLE 0x4A2430
+
+#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440
+
+#endif /* ASIC_REG_CPU_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
new file mode 100644
index 000000000000..61c8cd9ce58b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_0_REGS_H_
+#define ASIC_REG_DMA_CH_0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_0 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_0_CFG0 0x401000
+
+#define mmDMA_CH_0_CFG1 0x401004
+
+#define mmDMA_CH_0_ERRMSG_ADDR_LO 0x401008
+
+#define mmDMA_CH_0_ERRMSG_ADDR_HI 0x40100C
+
+#define mmDMA_CH_0_ERRMSG_WDATA 0x401010
+
+#define mmDMA_CH_0_RD_COMP_ADDR_LO 0x401014
+
+#define mmDMA_CH_0_RD_COMP_ADDR_HI 0x401018
+
+#define mmDMA_CH_0_RD_COMP_WDATA 0x40101C
+
+#define mmDMA_CH_0_WR_COMP_ADDR_LO 0x401020
+
+#define mmDMA_CH_0_WR_COMP_ADDR_HI 0x401024
+
+#define mmDMA_CH_0_WR_COMP_WDATA 0x401028
+
+#define mmDMA_CH_0_LDMA_SRC_ADDR_LO 0x40102C
+
+#define mmDMA_CH_0_LDMA_SRC_ADDR_HI 0x401030
+
+#define mmDMA_CH_0_LDMA_DST_ADDR_LO 0x401034
+
+#define mmDMA_CH_0_LDMA_DST_ADDR_HI 0x401038
+
+#define mmDMA_CH_0_LDMA_TSIZE 0x40103C
+
+#define mmDMA_CH_0_COMIT_TRANSFER 0x401040
+
+#define mmDMA_CH_0_STS0 0x401044
+
+#define mmDMA_CH_0_STS1 0x401048
+
+#define mmDMA_CH_0_STS2 0x40104C
+
+#define mmDMA_CH_0_STS3 0x401050
+
+#define mmDMA_CH_0_STS4 0x401054
+
+#define mmDMA_CH_0_SRC_ADDR_LO_STS 0x401058
+
+#define mmDMA_CH_0_SRC_ADDR_HI_STS 0x40105C
+
+#define mmDMA_CH_0_SRC_TSIZE_STS 0x401060
+
+#define mmDMA_CH_0_DST_ADDR_LO_STS 0x401064
+
+#define mmDMA_CH_0_DST_ADDR_HI_STS 0x401068
+
+#define mmDMA_CH_0_DST_TSIZE_STS 0x40106C
+
+#define mmDMA_CH_0_RD_RATE_LIM_EN 0x401070
+
+#define mmDMA_CH_0_RD_RATE_LIM_RST_TOKEN 0x401074
+
+#define mmDMA_CH_0_RD_RATE_LIM_SAT 0x401078
+
+#define mmDMA_CH_0_RD_RATE_LIM_TOUT 0x40107C
+
+#define mmDMA_CH_0_WR_RATE_LIM_EN 0x401080
+
+#define mmDMA_CH_0_WR_RATE_LIM_RST_TOKEN 0x401084
+
+#define mmDMA_CH_0_WR_RATE_LIM_SAT 0x401088
+
+#define mmDMA_CH_0_WR_RATE_LIM_TOUT 0x40108C
+
+#define mmDMA_CH_0_CFG2 0x401090
+
+#define mmDMA_CH_0_TDMA_CTL 0x401100
+
+#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_LO 0x401104
+
+#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_HI 0x401108
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_0 0x40110C
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_0 0x401110
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0 0x401114
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_0 0x401118
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_0 0x40111C
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_1 0x401120
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_1 0x401124
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1 0x401128
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_1 0x40112C
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_1 0x401130
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_2 0x401134
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_2 0x401138
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2 0x40113C
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_2 0x401140
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_2 0x401144
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_3 0x401148
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_3 0x40114C
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3 0x401150
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_3 0x401154
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_3 0x401158
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_4 0x40115C
+
+#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_4 0x401160
+
+#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4 0x401164
+
+#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_4 0x401168
+
+#define mmDMA_CH_0_TDMA_SRC_STRIDE_4 0x40116C
+
+#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_LO 0x401170
+
+#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_HI 0x401174
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_0 0x401178
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_0 0x40117C
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_0 0x401180
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_0 0x401184
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_0 0x401188
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_1 0x40118C
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_1 0x401190
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_1 0x401194
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_1 0x401198
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_1 0x40119C
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_2 0x4011A0
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_2 0x4011A4
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_2 0x4011A8
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_2 0x4011AC
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_2 0x4011B0
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_3 0x4011B4
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_3 0x4011B8
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_3 0x4011BC
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_3 0x4011C0
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_3 0x4011C4
+
+#define mmDMA_CH_0_TDMA_DST_ROI_BASE_4 0x4011C8
+
+#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_4 0x4011CC
+
+#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_4 0x4011D0
+
+#define mmDMA_CH_0_TDMA_DST_START_OFFSET_4 0x4011D4
+
+#define mmDMA_CH_0_TDMA_DST_STRIDE_4 0x4011D8
+
+#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC
+
+#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
new file mode 100644
index 000000000000..92960ef5e308
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_1_REGS_H_
+#define ASIC_REG_DMA_CH_1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_1 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_1_CFG0 0x409000
+
+#define mmDMA_CH_1_CFG1 0x409004
+
+#define mmDMA_CH_1_ERRMSG_ADDR_LO 0x409008
+
+#define mmDMA_CH_1_ERRMSG_ADDR_HI 0x40900C
+
+#define mmDMA_CH_1_ERRMSG_WDATA 0x409010
+
+#define mmDMA_CH_1_RD_COMP_ADDR_LO 0x409014
+
+#define mmDMA_CH_1_RD_COMP_ADDR_HI 0x409018
+
+#define mmDMA_CH_1_RD_COMP_WDATA 0x40901C
+
+#define mmDMA_CH_1_WR_COMP_ADDR_LO 0x409020
+
+#define mmDMA_CH_1_WR_COMP_ADDR_HI 0x409024
+
+#define mmDMA_CH_1_WR_COMP_WDATA 0x409028
+
+#define mmDMA_CH_1_LDMA_SRC_ADDR_LO 0x40902C
+
+#define mmDMA_CH_1_LDMA_SRC_ADDR_HI 0x409030
+
+#define mmDMA_CH_1_LDMA_DST_ADDR_LO 0x409034
+
+#define mmDMA_CH_1_LDMA_DST_ADDR_HI 0x409038
+
+#define mmDMA_CH_1_LDMA_TSIZE 0x40903C
+
+#define mmDMA_CH_1_COMIT_TRANSFER 0x409040
+
+#define mmDMA_CH_1_STS0 0x409044
+
+#define mmDMA_CH_1_STS1 0x409048
+
+#define mmDMA_CH_1_STS2 0x40904C
+
+#define mmDMA_CH_1_STS3 0x409050
+
+#define mmDMA_CH_1_STS4 0x409054
+
+#define mmDMA_CH_1_SRC_ADDR_LO_STS 0x409058
+
+#define mmDMA_CH_1_SRC_ADDR_HI_STS 0x40905C
+
+#define mmDMA_CH_1_SRC_TSIZE_STS 0x409060
+
+#define mmDMA_CH_1_DST_ADDR_LO_STS 0x409064
+
+#define mmDMA_CH_1_DST_ADDR_HI_STS 0x409068
+
+#define mmDMA_CH_1_DST_TSIZE_STS 0x40906C
+
+#define mmDMA_CH_1_RD_RATE_LIM_EN 0x409070
+
+#define mmDMA_CH_1_RD_RATE_LIM_RST_TOKEN 0x409074
+
+#define mmDMA_CH_1_RD_RATE_LIM_SAT 0x409078
+
+#define mmDMA_CH_1_RD_RATE_LIM_TOUT 0x40907C
+
+#define mmDMA_CH_1_WR_RATE_LIM_EN 0x409080
+
+#define mmDMA_CH_1_WR_RATE_LIM_RST_TOKEN 0x409084
+
+#define mmDMA_CH_1_WR_RATE_LIM_SAT 0x409088
+
+#define mmDMA_CH_1_WR_RATE_LIM_TOUT 0x40908C
+
+#define mmDMA_CH_1_CFG2 0x409090
+
+#define mmDMA_CH_1_TDMA_CTL 0x409100
+
+#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_LO 0x409104
+
+#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_HI 0x409108
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_0 0x40910C
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_0 0x409110
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_0 0x409114
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_0 0x409118
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_0 0x40911C
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_1 0x409120
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_1 0x409124
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_1 0x409128
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_1 0x40912C
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_1 0x409130
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_2 0x409134
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_2 0x409138
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_2 0x40913C
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_2 0x409140
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_2 0x409144
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_3 0x409148
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_3 0x40914C
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_3 0x409150
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_3 0x409154
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_3 0x409158
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_4 0x40915C
+
+#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_4 0x409160
+
+#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_4 0x409164
+
+#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_4 0x409168
+
+#define mmDMA_CH_1_TDMA_SRC_STRIDE_4 0x40916C
+
+#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_LO 0x409170
+
+#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_HI 0x409174
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_0 0x409178
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_0 0x40917C
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_0 0x409180
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_0 0x409184
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_0 0x409188
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_1 0x40918C
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_1 0x409190
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_1 0x409194
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_1 0x409198
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_1 0x40919C
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_2 0x4091A0
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_2 0x4091A4
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_2 0x4091A8
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_2 0x4091AC
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_2 0x4091B0
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_3 0x4091B4
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_3 0x4091B8
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_3 0x4091BC
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_3 0x4091C0
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_3 0x4091C4
+
+#define mmDMA_CH_1_TDMA_DST_ROI_BASE_4 0x4091C8
+
+#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_4 0x4091CC
+
+#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_4 0x4091D0
+
+#define mmDMA_CH_1_TDMA_DST_START_OFFSET_4 0x4091D4
+
+#define mmDMA_CH_1_TDMA_DST_STRIDE_4 0x4091D8
+
+#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC
+
+#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
new file mode 100644
index 000000000000..4e37871a51bb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_2_REGS_H_
+#define ASIC_REG_DMA_CH_2_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_2 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_2_CFG0 0x411000
+
+#define mmDMA_CH_2_CFG1 0x411004
+
+#define mmDMA_CH_2_ERRMSG_ADDR_LO 0x411008
+
+#define mmDMA_CH_2_ERRMSG_ADDR_HI 0x41100C
+
+#define mmDMA_CH_2_ERRMSG_WDATA 0x411010
+
+#define mmDMA_CH_2_RD_COMP_ADDR_LO 0x411014
+
+#define mmDMA_CH_2_RD_COMP_ADDR_HI 0x411018
+
+#define mmDMA_CH_2_RD_COMP_WDATA 0x41101C
+
+#define mmDMA_CH_2_WR_COMP_ADDR_LO 0x411020
+
+#define mmDMA_CH_2_WR_COMP_ADDR_HI 0x411024
+
+#define mmDMA_CH_2_WR_COMP_WDATA 0x411028
+
+#define mmDMA_CH_2_LDMA_SRC_ADDR_LO 0x41102C
+
+#define mmDMA_CH_2_LDMA_SRC_ADDR_HI 0x411030
+
+#define mmDMA_CH_2_LDMA_DST_ADDR_LO 0x411034
+
+#define mmDMA_CH_2_LDMA_DST_ADDR_HI 0x411038
+
+#define mmDMA_CH_2_LDMA_TSIZE 0x41103C
+
+#define mmDMA_CH_2_COMIT_TRANSFER 0x411040
+
+#define mmDMA_CH_2_STS0 0x411044
+
+#define mmDMA_CH_2_STS1 0x411048
+
+#define mmDMA_CH_2_STS2 0x41104C
+
+#define mmDMA_CH_2_STS3 0x411050
+
+#define mmDMA_CH_2_STS4 0x411054
+
+#define mmDMA_CH_2_SRC_ADDR_LO_STS 0x411058
+
+#define mmDMA_CH_2_SRC_ADDR_HI_STS 0x41105C
+
+#define mmDMA_CH_2_SRC_TSIZE_STS 0x411060
+
+#define mmDMA_CH_2_DST_ADDR_LO_STS 0x411064
+
+#define mmDMA_CH_2_DST_ADDR_HI_STS 0x411068
+
+#define mmDMA_CH_2_DST_TSIZE_STS 0x41106C
+
+#define mmDMA_CH_2_RD_RATE_LIM_EN 0x411070
+
+#define mmDMA_CH_2_RD_RATE_LIM_RST_TOKEN 0x411074
+
+#define mmDMA_CH_2_RD_RATE_LIM_SAT 0x411078
+
+#define mmDMA_CH_2_RD_RATE_LIM_TOUT 0x41107C
+
+#define mmDMA_CH_2_WR_RATE_LIM_EN 0x411080
+
+#define mmDMA_CH_2_WR_RATE_LIM_RST_TOKEN 0x411084
+
+#define mmDMA_CH_2_WR_RATE_LIM_SAT 0x411088
+
+#define mmDMA_CH_2_WR_RATE_LIM_TOUT 0x41108C
+
+#define mmDMA_CH_2_CFG2 0x411090
+
+#define mmDMA_CH_2_TDMA_CTL 0x411100
+
+#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_LO 0x411104
+
+#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_HI 0x411108
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_0 0x41110C
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_0 0x411110
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_0 0x411114
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_0 0x411118
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_0 0x41111C
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_1 0x411120
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_1 0x411124
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_1 0x411128
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_1 0x41112C
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_1 0x411130
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_2 0x411134
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_2 0x411138
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_2 0x41113C
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_2 0x411140
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_2 0x411144
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_3 0x411148
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_3 0x41114C
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_3 0x411150
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_3 0x411154
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_3 0x411158
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_4 0x41115C
+
+#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_4 0x411160
+
+#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_4 0x411164
+
+#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_4 0x411168
+
+#define mmDMA_CH_2_TDMA_SRC_STRIDE_4 0x41116C
+
+#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_LO 0x411170
+
+#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_HI 0x411174
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_0 0x411178
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_0 0x41117C
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_0 0x411180
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_0 0x411184
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_0 0x411188
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_1 0x41118C
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_1 0x411190
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_1 0x411194
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_1 0x411198
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_1 0x41119C
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_2 0x4111A0
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_2 0x4111A4
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_2 0x4111A8
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_2 0x4111AC
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_2 0x4111B0
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_3 0x4111B4
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_3 0x4111B8
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_3 0x4111BC
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_3 0x4111C0
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_3 0x4111C4
+
+#define mmDMA_CH_2_TDMA_DST_ROI_BASE_4 0x4111C8
+
+#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_4 0x4111CC
+
+#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_4 0x4111D0
+
+#define mmDMA_CH_2_TDMA_DST_START_OFFSET_4 0x4111D4
+
+#define mmDMA_CH_2_TDMA_DST_STRIDE_4 0x4111D8
+
+#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC
+
+#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
new file mode 100644
index 000000000000..a2d6aeb32a18
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_3_REGS_H_
+#define ASIC_REG_DMA_CH_3_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_3 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_3_CFG0 0x419000
+
+#define mmDMA_CH_3_CFG1 0x419004
+
+#define mmDMA_CH_3_ERRMSG_ADDR_LO 0x419008
+
+#define mmDMA_CH_3_ERRMSG_ADDR_HI 0x41900C
+
+#define mmDMA_CH_3_ERRMSG_WDATA 0x419010
+
+#define mmDMA_CH_3_RD_COMP_ADDR_LO 0x419014
+
+#define mmDMA_CH_3_RD_COMP_ADDR_HI 0x419018
+
+#define mmDMA_CH_3_RD_COMP_WDATA 0x41901C
+
+#define mmDMA_CH_3_WR_COMP_ADDR_LO 0x419020
+
+#define mmDMA_CH_3_WR_COMP_ADDR_HI 0x419024
+
+#define mmDMA_CH_3_WR_COMP_WDATA 0x419028
+
+#define mmDMA_CH_3_LDMA_SRC_ADDR_LO 0x41902C
+
+#define mmDMA_CH_3_LDMA_SRC_ADDR_HI 0x419030
+
+#define mmDMA_CH_3_LDMA_DST_ADDR_LO 0x419034
+
+#define mmDMA_CH_3_LDMA_DST_ADDR_HI 0x419038
+
+#define mmDMA_CH_3_LDMA_TSIZE 0x41903C
+
+#define mmDMA_CH_3_COMIT_TRANSFER 0x419040
+
+#define mmDMA_CH_3_STS0 0x419044
+
+#define mmDMA_CH_3_STS1 0x419048
+
+#define mmDMA_CH_3_STS2 0x41904C
+
+#define mmDMA_CH_3_STS3 0x419050
+
+#define mmDMA_CH_3_STS4 0x419054
+
+#define mmDMA_CH_3_SRC_ADDR_LO_STS 0x419058
+
+#define mmDMA_CH_3_SRC_ADDR_HI_STS 0x41905C
+
+#define mmDMA_CH_3_SRC_TSIZE_STS 0x419060
+
+#define mmDMA_CH_3_DST_ADDR_LO_STS 0x419064
+
+#define mmDMA_CH_3_DST_ADDR_HI_STS 0x419068
+
+#define mmDMA_CH_3_DST_TSIZE_STS 0x41906C
+
+#define mmDMA_CH_3_RD_RATE_LIM_EN 0x419070
+
+#define mmDMA_CH_3_RD_RATE_LIM_RST_TOKEN 0x419074
+
+#define mmDMA_CH_3_RD_RATE_LIM_SAT 0x419078
+
+#define mmDMA_CH_3_RD_RATE_LIM_TOUT 0x41907C
+
+#define mmDMA_CH_3_WR_RATE_LIM_EN 0x419080
+
+#define mmDMA_CH_3_WR_RATE_LIM_RST_TOKEN 0x419084
+
+#define mmDMA_CH_3_WR_RATE_LIM_SAT 0x419088
+
+#define mmDMA_CH_3_WR_RATE_LIM_TOUT 0x41908C
+
+#define mmDMA_CH_3_CFG2 0x419090
+
+#define mmDMA_CH_3_TDMA_CTL 0x419100
+
+#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_LO 0x419104
+
+#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_HI 0x419108
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_0 0x41910C
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_0 0x419110
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_0 0x419114
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_0 0x419118
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_0 0x41911C
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_1 0x419120
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_1 0x419124
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_1 0x419128
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_1 0x41912C
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_1 0x419130
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_2 0x419134
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_2 0x419138
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_2 0x41913C
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_2 0x419140
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_2 0x419144
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_3 0x419148
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_3 0x41914C
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_3 0x419150
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_3 0x419154
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_3 0x419158
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_4 0x41915C
+
+#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_4 0x419160
+
+#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_4 0x419164
+
+#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_4 0x419168
+
+#define mmDMA_CH_3_TDMA_SRC_STRIDE_4 0x41916C
+
+#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_LO 0x419170
+
+#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_HI 0x419174
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_0 0x419178
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_0 0x41917C
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_0 0x419180
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_0 0x419184
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_0 0x419188
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_1 0x41918C
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_1 0x419190
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_1 0x419194
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_1 0x419198
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_1 0x41919C
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_2 0x4191A0
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_2 0x4191A4
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_2 0x4191A8
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_2 0x4191AC
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_2 0x4191B0
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_3 0x4191B4
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_3 0x4191B8
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_3 0x4191BC
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_3 0x4191C0
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_3 0x4191C4
+
+#define mmDMA_CH_3_TDMA_DST_ROI_BASE_4 0x4191C8
+
+#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_4 0x4191CC
+
+#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_4 0x4191D0
+
+#define mmDMA_CH_3_TDMA_DST_START_OFFSET_4 0x4191D4
+
+#define mmDMA_CH_3_TDMA_DST_STRIDE_4 0x4191D8
+
+#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC
+
+#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
new file mode 100644
index 000000000000..400d6fd3acf5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_CH_4_REGS_H_
+#define ASIC_REG_DMA_CH_4_REGS_H_
+
+/*
+ *****************************************
+ * DMA_CH_4 (Prototype: DMA_CH)
+ *****************************************
+ */
+
+#define mmDMA_CH_4_CFG0 0x421000
+
+#define mmDMA_CH_4_CFG1 0x421004
+
+#define mmDMA_CH_4_ERRMSG_ADDR_LO 0x421008
+
+#define mmDMA_CH_4_ERRMSG_ADDR_HI 0x42100C
+
+#define mmDMA_CH_4_ERRMSG_WDATA 0x421010
+
+#define mmDMA_CH_4_RD_COMP_ADDR_LO 0x421014
+
+#define mmDMA_CH_4_RD_COMP_ADDR_HI 0x421018
+
+#define mmDMA_CH_4_RD_COMP_WDATA 0x42101C
+
+#define mmDMA_CH_4_WR_COMP_ADDR_LO 0x421020
+
+#define mmDMA_CH_4_WR_COMP_ADDR_HI 0x421024
+
+#define mmDMA_CH_4_WR_COMP_WDATA 0x421028
+
+#define mmDMA_CH_4_LDMA_SRC_ADDR_LO 0x42102C
+
+#define mmDMA_CH_4_LDMA_SRC_ADDR_HI 0x421030
+
+#define mmDMA_CH_4_LDMA_DST_ADDR_LO 0x421034
+
+#define mmDMA_CH_4_LDMA_DST_ADDR_HI 0x421038
+
+#define mmDMA_CH_4_LDMA_TSIZE 0x42103C
+
+#define mmDMA_CH_4_COMIT_TRANSFER 0x421040
+
+#define mmDMA_CH_4_STS0 0x421044
+
+#define mmDMA_CH_4_STS1 0x421048
+
+#define mmDMA_CH_4_STS2 0x42104C
+
+#define mmDMA_CH_4_STS3 0x421050
+
+#define mmDMA_CH_4_STS4 0x421054
+
+#define mmDMA_CH_4_SRC_ADDR_LO_STS 0x421058
+
+#define mmDMA_CH_4_SRC_ADDR_HI_STS 0x42105C
+
+#define mmDMA_CH_4_SRC_TSIZE_STS 0x421060
+
+#define mmDMA_CH_4_DST_ADDR_LO_STS 0x421064
+
+#define mmDMA_CH_4_DST_ADDR_HI_STS 0x421068
+
+#define mmDMA_CH_4_DST_TSIZE_STS 0x42106C
+
+#define mmDMA_CH_4_RD_RATE_LIM_EN 0x421070
+
+#define mmDMA_CH_4_RD_RATE_LIM_RST_TOKEN 0x421074
+
+#define mmDMA_CH_4_RD_RATE_LIM_SAT 0x421078
+
+#define mmDMA_CH_4_RD_RATE_LIM_TOUT 0x42107C
+
+#define mmDMA_CH_4_WR_RATE_LIM_EN 0x421080
+
+#define mmDMA_CH_4_WR_RATE_LIM_RST_TOKEN 0x421084
+
+#define mmDMA_CH_4_WR_RATE_LIM_SAT 0x421088
+
+#define mmDMA_CH_4_WR_RATE_LIM_TOUT 0x42108C
+
+#define mmDMA_CH_4_CFG2 0x421090
+
+#define mmDMA_CH_4_TDMA_CTL 0x421100
+
+#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_LO 0x421104
+
+#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_HI 0x421108
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_0 0x42110C
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_0 0x421110
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_0 0x421114
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_0 0x421118
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_0 0x42111C
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_1 0x421120
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_1 0x421124
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_1 0x421128
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_1 0x42112C
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_1 0x421130
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_2 0x421134
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_2 0x421138
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_2 0x42113C
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_2 0x421140
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_2 0x421144
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_3 0x421148
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_3 0x42114C
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_3 0x421150
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_3 0x421154
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_3 0x421158
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_4 0x42115C
+
+#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_4 0x421160
+
+#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_4 0x421164
+
+#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_4 0x421168
+
+#define mmDMA_CH_4_TDMA_SRC_STRIDE_4 0x42116C
+
+#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_LO 0x421170
+
+#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_HI 0x421174
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_0 0x421178
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_0 0x42117C
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_0 0x421180
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_0 0x421184
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_0 0x421188
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_1 0x42118C
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_1 0x421190
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_1 0x421194
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_1 0x421198
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_1 0x42119C
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_2 0x4211A0
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_2 0x4211A4
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_2 0x4211A8
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_2 0x4211AC
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_2 0x4211B0
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_3 0x4211B4
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_3 0x4211B8
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_3 0x4211BC
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_3 0x4211C0
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_3 0x4211C4
+
+#define mmDMA_CH_4_TDMA_DST_ROI_BASE_4 0x4211C8
+
+#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_4 0x4211CC
+
+#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_4 0x4211D0
+
+#define mmDMA_CH_4_TDMA_DST_START_OFFSET_4 0x4211D4
+
+#define mmDMA_CH_4_TDMA_DST_STRIDE_4 0x4211D8
+
+#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC
+
+#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
new file mode 100644
index 000000000000..8d965443c51e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_MACRO_MASKS_H_
+#define ASIC_REG_DMA_MACRO_MASKS_H_
+
+/*
+ *****************************************
+ * DMA_MACRO (Prototype: DMA_MACRO)
+ *****************************************
+ */
+
+/* DMA_MACRO_LBW_RANGE_HIT_BLOCK */
+#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_SHIFT 0
+#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_MASK 0xFFFF
+
+/* DMA_MACRO_LBW_RANGE_MASK */
+#define DMA_MACRO_LBW_RANGE_MASK_R_SHIFT 0
+#define DMA_MACRO_LBW_RANGE_MASK_R_MASK 0x3FFFFFF
+
+/* DMA_MACRO_LBW_RANGE_BASE */
+#define DMA_MACRO_LBW_RANGE_BASE_R_SHIFT 0
+#define DMA_MACRO_LBW_RANGE_BASE_R_MASK 0x3FFFFFF
+
+/* DMA_MACRO_HBW_RANGE_HIT_BLOCK */
+#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_MASK 0xFF
+
+/* DMA_MACRO_HBW_RANGE_MASK_49_32 */
+#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_MASK 0x3FFFF
+
+/* DMA_MACRO_HBW_RANGE_MASK_31_0 */
+#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_MASK 0xFFFFFFFF
+
+/* DMA_MACRO_HBW_RANGE_BASE_49_32 */
+#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_MASK 0x3FFFF
+
+/* DMA_MACRO_HBW_RANGE_BASE_31_0 */
+#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_SHIFT 0
+#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_MASK 0xFFFFFFFF
+
+/* DMA_MACRO_WRITE_EN */
+#define DMA_MACRO_WRITE_EN_R_SHIFT 0
+#define DMA_MACRO_WRITE_EN_R_MASK 0x1
+
+/* DMA_MACRO_WRITE_CREDIT */
+#define DMA_MACRO_WRITE_CREDIT_R_SHIFT 0
+#define DMA_MACRO_WRITE_CREDIT_R_MASK 0x3FF
+
+/* DMA_MACRO_READ_EN */
+#define DMA_MACRO_READ_EN_R_SHIFT 0
+#define DMA_MACRO_READ_EN_R_MASK 0x1
+
+/* DMA_MACRO_READ_CREDIT */
+#define DMA_MACRO_READ_CREDIT_R_SHIFT 0
+#define DMA_MACRO_READ_CREDIT_R_MASK 0x3FF
+
+/* DMA_MACRO_SRAM_BUSY */
+
+/* DMA_MACRO_RAZWI_LBW_WT_VLD */
+#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_LBW_WT_ID */
+#define DMA_MACRO_RAZWI_LBW_WT_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_WT_ID_R_MASK 0x7FFF
+
+/* DMA_MACRO_RAZWI_LBW_RD_VLD */
+#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_LBW_RD_ID */
+#define DMA_MACRO_RAZWI_LBW_RD_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_LBW_RD_ID_R_MASK 0x7FFF
+
+/* DMA_MACRO_RAZWI_HBW_WT_VLD */
+#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_HBW_WT_ID */
+#define DMA_MACRO_RAZWI_HBW_WT_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_WT_ID_R_MASK 0x1FFFFFFF
+
+/* DMA_MACRO_RAZWI_HBW_RD_VLD */
+#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_MASK 0x1
+
+/* DMA_MACRO_RAZWI_HBW_RD_ID */
+#define DMA_MACRO_RAZWI_HBW_RD_ID_R_SHIFT 0
+#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF
+
+#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
new file mode 100644
index 000000000000..8bfcb001189d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_MACRO_REGS_H_
+#define ASIC_REG_DMA_MACRO_REGS_H_
+
+/*
+ *****************************************
+ * DMA_MACRO (Prototype: DMA_MACRO)
+ *****************************************
+ */
+
+#define mmDMA_MACRO_LBW_RANGE_HIT_BLOCK 0x4B0000
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_0 0x4B0004
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_1 0x4B0008
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_2 0x4B000C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_3 0x4B0010
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_4 0x4B0014
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_5 0x4B0018
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_6 0x4B001C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_7 0x4B0020
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_8 0x4B0024
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_9 0x4B0028
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_10 0x4B002C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_11 0x4B0030
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_12 0x4B0034
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_13 0x4B0038
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_14 0x4B003C
+
+#define mmDMA_MACRO_LBW_RANGE_MASK_15 0x4B0040
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_0 0x4B0044
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_1 0x4B0048
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_2 0x4B004C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_3 0x4B0050
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_4 0x4B0054
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_5 0x4B0058
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_6 0x4B005C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_7 0x4B0060
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_8 0x4B0064
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_9 0x4B0068
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_10 0x4B006C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_11 0x4B0070
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_12 0x4B0074
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_13 0x4B0078
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_14 0x4B007C
+
+#define mmDMA_MACRO_LBW_RANGE_BASE_15 0x4B0080
+
+#define mmDMA_MACRO_HBW_RANGE_HIT_BLOCK 0x4B0084
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_0 0x4B00A8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_1 0x4B00AC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_2 0x4B00B0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_3 0x4B00B4
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_4 0x4B00B8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_5 0x4B00BC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_6 0x4B00C0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_7 0x4B00C4
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_0 0x4B00C8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_1 0x4B00CC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_2 0x4B00D0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_3 0x4B00D4
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_4 0x4B00D8
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_5 0x4B00DC
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_6 0x4B00E0
+
+#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_7 0x4B00E4
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_0 0x4B00E8
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_1 0x4B00EC
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_2 0x4B00F0
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_3 0x4B00F4
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_4 0x4B00F8
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_5 0x4B00FC
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_6 0x4B0100
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_7 0x4B0104
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_0 0x4B0108
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_1 0x4B010C
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_2 0x4B0110
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_3 0x4B0114
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_4 0x4B0118
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_5 0x4B011C
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_6 0x4B0120
+
+#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_7 0x4B0124
+
+#define mmDMA_MACRO_WRITE_EN 0x4B0128
+
+#define mmDMA_MACRO_WRITE_CREDIT 0x4B012C
+
+#define mmDMA_MACRO_READ_EN 0x4B0130
+
+#define mmDMA_MACRO_READ_CREDIT 0x4B0134
+
+#define mmDMA_MACRO_SRAM_BUSY 0x4B0138
+
+#define mmDMA_MACRO_RAZWI_LBW_WT_VLD 0x4B013C
+
+#define mmDMA_MACRO_RAZWI_LBW_WT_ID 0x4B0140
+
+#define mmDMA_MACRO_RAZWI_LBW_RD_VLD 0x4B0144
+
+#define mmDMA_MACRO_RAZWI_LBW_RD_ID 0x4B0148
+
+#define mmDMA_MACRO_RAZWI_HBW_WT_VLD 0x4B014C
+
+#define mmDMA_MACRO_RAZWI_HBW_WT_ID 0x4B0150
+
+#define mmDMA_MACRO_RAZWI_HBW_RD_VLD 0x4B0154
+
+#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158
+
+#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
new file mode 100644
index 000000000000..9f33f351a3c1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_NRTR_MASKS_H_
+#define ASIC_REG_DMA_NRTR_MASKS_H_
+
+/*
+ *****************************************
+ * DMA_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+/* DMA_NRTR_HBW_MAX_CRED */
+#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
+#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define DMA_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
+#define DMA_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
+#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define DMA_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
+#define DMA_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* DMA_NRTR_LBW_MAX_CRED */
+#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
+#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define DMA_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
+#define DMA_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
+#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define DMA_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
+#define DMA_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* DMA_NRTR_DBG_E_ARB */
+#define DMA_NRTR_DBG_E_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_E_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_E_ARB_S_SHIFT 8
+#define DMA_NRTR_DBG_E_ARB_S_MASK 0x700
+#define DMA_NRTR_DBG_E_ARB_N_SHIFT 16
+#define DMA_NRTR_DBG_E_ARB_N_MASK 0x70000
+#define DMA_NRTR_DBG_E_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_W_ARB */
+#define DMA_NRTR_DBG_W_ARB_E_SHIFT 0
+#define DMA_NRTR_DBG_W_ARB_E_MASK 0x7
+#define DMA_NRTR_DBG_W_ARB_S_SHIFT 8
+#define DMA_NRTR_DBG_W_ARB_S_MASK 0x700
+#define DMA_NRTR_DBG_W_ARB_N_SHIFT 16
+#define DMA_NRTR_DBG_W_ARB_N_MASK 0x70000
+#define DMA_NRTR_DBG_W_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_N_ARB */
+#define DMA_NRTR_DBG_N_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_N_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_N_ARB_E_SHIFT 8
+#define DMA_NRTR_DBG_N_ARB_E_MASK 0x700
+#define DMA_NRTR_DBG_N_ARB_S_SHIFT 16
+#define DMA_NRTR_DBG_N_ARB_S_MASK 0x70000
+#define DMA_NRTR_DBG_N_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_S_ARB */
+#define DMA_NRTR_DBG_S_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_S_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_S_ARB_E_SHIFT 8
+#define DMA_NRTR_DBG_S_ARB_E_MASK 0x700
+#define DMA_NRTR_DBG_S_ARB_N_SHIFT 16
+#define DMA_NRTR_DBG_S_ARB_N_MASK 0x70000
+#define DMA_NRTR_DBG_S_ARB_L_SHIFT 24
+#define DMA_NRTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* DMA_NRTR_DBG_L_ARB */
+#define DMA_NRTR_DBG_L_ARB_W_SHIFT 0
+#define DMA_NRTR_DBG_L_ARB_W_MASK 0x7
+#define DMA_NRTR_DBG_L_ARB_E_SHIFT 8
+#define DMA_NRTR_DBG_L_ARB_E_MASK 0x700
+#define DMA_NRTR_DBG_L_ARB_S_SHIFT 16
+#define DMA_NRTR_DBG_L_ARB_S_MASK 0x70000
+#define DMA_NRTR_DBG_L_ARB_N_SHIFT 24
+#define DMA_NRTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* DMA_NRTR_DBG_E_ARB_MAX */
+#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_W_ARB_MAX */
+#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_N_ARB_MAX */
+#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_S_ARB_MAX */
+#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_DBG_L_ARB_MAX */
+#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* DMA_NRTR_SPLIT_COEF */
+#define DMA_NRTR_SPLIT_COEF_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_CFG */
+#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
+#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
+#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
+#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
+#define DMA_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define DMA_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* DMA_NRTR_SPLIT_RD_SAT */
+#define DMA_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_RD_RST_TOKEN */
+#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_RD_TIMEOUT */
+#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_SPLIT_WR_SAT */
+#define DMA_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_WPLIT_WR_TST_TOLEN */
+#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* DMA_NRTR_SPLIT_WR_TIMEOUT */
+#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_HBW_RANGE_HIT */
+#define DMA_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* DMA_NRTR_HBW_RANGE_MASK_L */
+#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_HBW_RANGE_MASK_H */
+#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* DMA_NRTR_HBW_RANGE_BASE_L */
+#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* DMA_NRTR_HBW_RANGE_BASE_H */
+#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* DMA_NRTR_LBW_RANGE_HIT */
+#define DMA_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define DMA_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* DMA_NRTR_LBW_RANGE_MASK */
+#define DMA_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define DMA_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* DMA_NRTR_LBW_RANGE_BASE */
+#define DMA_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define DMA_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* DMA_NRTR_RGLTR */
+#define DMA_NRTR_RGLTR_WR_EN_SHIFT 0
+#define DMA_NRTR_RGLTR_WR_EN_MASK 0x1
+#define DMA_NRTR_RGLTR_RD_EN_SHIFT 4
+#define DMA_NRTR_RGLTR_RD_EN_MASK 0x10
+
+/* DMA_NRTR_RGLTR_WR_RESULT */
+#define DMA_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define DMA_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* DMA_NRTR_RGLTR_RD_RESULT */
+#define DMA_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define DMA_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* DMA_NRTR_SCRAMB_EN */
+#define DMA_NRTR_SCRAMB_EN_VAL_SHIFT 0
+#define DMA_NRTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* DMA_NRTR_NON_LIN_SCRAMB */
+#define DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
new file mode 100644
index 000000000000..d8293745a02b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_NRTR_REGS_H_
+#define ASIC_REG_DMA_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * DMA_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmDMA_NRTR_HBW_MAX_CRED 0x1C0100
+
+#define mmDMA_NRTR_LBW_MAX_CRED 0x1C0120
+
+#define mmDMA_NRTR_DBG_E_ARB 0x1C0300
+
+#define mmDMA_NRTR_DBG_W_ARB 0x1C0304
+
+#define mmDMA_NRTR_DBG_N_ARB 0x1C0308
+
+#define mmDMA_NRTR_DBG_S_ARB 0x1C030C
+
+#define mmDMA_NRTR_DBG_L_ARB 0x1C0310
+
+#define mmDMA_NRTR_DBG_E_ARB_MAX 0x1C0320
+
+#define mmDMA_NRTR_DBG_W_ARB_MAX 0x1C0324
+
+#define mmDMA_NRTR_DBG_N_ARB_MAX 0x1C0328
+
+#define mmDMA_NRTR_DBG_S_ARB_MAX 0x1C032C
+
+#define mmDMA_NRTR_DBG_L_ARB_MAX 0x1C0330
+
+#define mmDMA_NRTR_SPLIT_COEF_0 0x1C0400
+
+#define mmDMA_NRTR_SPLIT_COEF_1 0x1C0404
+
+#define mmDMA_NRTR_SPLIT_COEF_2 0x1C0408
+
+#define mmDMA_NRTR_SPLIT_COEF_3 0x1C040C
+
+#define mmDMA_NRTR_SPLIT_COEF_4 0x1C0410
+
+#define mmDMA_NRTR_SPLIT_COEF_5 0x1C0414
+
+#define mmDMA_NRTR_SPLIT_COEF_6 0x1C0418
+
+#define mmDMA_NRTR_SPLIT_COEF_7 0x1C041C
+
+#define mmDMA_NRTR_SPLIT_COEF_8 0x1C0420
+
+#define mmDMA_NRTR_SPLIT_COEF_9 0x1C0424
+
+#define mmDMA_NRTR_SPLIT_CFG 0x1C0440
+
+#define mmDMA_NRTR_SPLIT_RD_SAT 0x1C0444
+
+#define mmDMA_NRTR_SPLIT_RD_RST_TOKEN 0x1C0448
+
+#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_0 0x1C044C
+
+#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_1 0x1C0450
+
+#define mmDMA_NRTR_SPLIT_WR_SAT 0x1C0454
+
+#define mmDMA_NRTR_WPLIT_WR_TST_TOLEN 0x1C0458
+
+#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_0 0x1C045C
+
+#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_1 0x1C0460
+
+#define mmDMA_NRTR_HBW_RANGE_HIT 0x1C0470
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_0 0x1C0480
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_1 0x1C0484
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_2 0x1C0488
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_3 0x1C048C
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_4 0x1C0490
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_5 0x1C0494
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_6 0x1C0498
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_L_7 0x1C049C
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_0 0x1C04A0
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_1 0x1C04A4
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_2 0x1C04A8
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_3 0x1C04AC
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_4 0x1C04B0
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_5 0x1C04B4
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_6 0x1C04B8
+
+#define mmDMA_NRTR_HBW_RANGE_MASK_H_7 0x1C04BC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_0 0x1C04C0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_1 0x1C04C4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_2 0x1C04C8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_3 0x1C04CC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_4 0x1C04D0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_5 0x1C04D4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_6 0x1C04D8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_L_7 0x1C04DC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_0 0x1C04E0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_1 0x1C04E4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_2 0x1C04E8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_3 0x1C04EC
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_4 0x1C04F0
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_5 0x1C04F4
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_6 0x1C04F8
+
+#define mmDMA_NRTR_HBW_RANGE_BASE_H_7 0x1C04FC
+
+#define mmDMA_NRTR_LBW_RANGE_HIT 0x1C0500
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_0 0x1C0510
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_1 0x1C0514
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_2 0x1C0518
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_3 0x1C051C
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_4 0x1C0520
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_5 0x1C0524
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_6 0x1C0528
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_7 0x1C052C
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_8 0x1C0530
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_9 0x1C0534
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_10 0x1C0538
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_11 0x1C053C
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_12 0x1C0540
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_13 0x1C0544
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_14 0x1C0548
+
+#define mmDMA_NRTR_LBW_RANGE_MASK_15 0x1C054C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_0 0x1C0550
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_1 0x1C0554
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_2 0x1C0558
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_3 0x1C055C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_4 0x1C0560
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_5 0x1C0564
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_6 0x1C0568
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_7 0x1C056C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_8 0x1C0570
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_9 0x1C0574
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_10 0x1C0578
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_11 0x1C057C
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_12 0x1C0580
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_13 0x1C0584
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_14 0x1C0588
+
+#define mmDMA_NRTR_LBW_RANGE_BASE_15 0x1C058C
+
+#define mmDMA_NRTR_RGLTR 0x1C0590
+
+#define mmDMA_NRTR_RGLTR_WR_RESULT 0x1C0594
+
+#define mmDMA_NRTR_RGLTR_RD_RESULT 0x1C0598
+
+#define mmDMA_NRTR_SCRAMB_EN 0x1C0600
+
+#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604
+
+#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
new file mode 100644
index 000000000000..10619dbb9b17
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_0_MASKS_H_
+#define ASIC_REG_DMA_QM_0_MASKS_H_
+
+/*
+ *****************************************
+ * DMA_QM_0 (Prototype: QMAN)
+ *****************************************
+ */
+
+/* DMA_QM_0_GLBL_CFG0 */
+#define DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT 0
+#define DMA_QM_0_GLBL_CFG0_PQF_EN_MASK 0x1
+#define DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT 1
+#define DMA_QM_0_GLBL_CFG0_CQF_EN_MASK 0x2
+#define DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT 2
+#define DMA_QM_0_GLBL_CFG0_CP_EN_MASK 0x4
+#define DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT 3
+#define DMA_QM_0_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* DMA_QM_0_GLBL_CFG1 */
+#define DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define DMA_QM_0_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define DMA_QM_0_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT 2
+#define DMA_QM_0_GLBL_CFG1_CP_STOP_MASK 0x4
+#define DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define DMA_QM_0_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* DMA_QM_0_GLBL_PROT */
+#define DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT 0
+#define DMA_QM_0_GLBL_PROT_PQF_PROT_MASK 0x1
+#define DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT 1
+#define DMA_QM_0_GLBL_PROT_CQF_PROT_MASK 0x2
+#define DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT 2
+#define DMA_QM_0_GLBL_PROT_CP_PROT_MASK 0x4
+#define DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT 3
+#define DMA_QM_0_GLBL_PROT_DMA_PROT_MASK 0x8
+#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* DMA_QM_0_GLBL_ERR_CFG */
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* DMA_QM_0_GLBL_ERR_ADDR_LO */
+#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_GLBL_ERR_ADDR_HI */
+#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_GLBL_ERR_WDATA */
+#define DMA_QM_0_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define DMA_QM_0_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_GLBL_SECURE_PROPS */
+#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA_QM_0_GLBL_NON_SECURE_PROPS */
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA_QM_0_GLBL_STS0 */
+#define DMA_QM_0_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define DMA_QM_0_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define DMA_QM_0_GLBL_STS0_CP_IDLE_SHIFT 2
+#define DMA_QM_0_GLBL_STS0_CP_IDLE_MASK 0x4
+#define DMA_QM_0_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* DMA_QM_0_GLBL_STS1 */
+#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* DMA_QM_0_PQ_BASE_LO */
+#define DMA_QM_0_PQ_BASE_LO_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_BASE_HI */
+#define DMA_QM_0_PQ_BASE_HI_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_SIZE */
+#define DMA_QM_0_PQ_SIZE_VAL_SHIFT 0
+#define DMA_QM_0_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PI */
+#define DMA_QM_0_PQ_PI_VAL_SHIFT 0
+#define DMA_QM_0_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_CI */
+#define DMA_QM_0_PQ_CI_VAL_SHIFT 0
+#define DMA_QM_0_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_CFG0 */
+#define DMA_QM_0_PQ_CFG0_RESERVED_SHIFT 0
+#define DMA_QM_0_PQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA_QM_0_PQ_CFG1 */
+#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA_QM_0_PQ_ARUSER */
+#define DMA_QM_0_PQ_ARUSER_NOSNOOP_SHIFT 0
+#define DMA_QM_0_PQ_ARUSER_NOSNOOP_MASK 0x1
+#define DMA_QM_0_PQ_ARUSER_WORD_SHIFT 1
+#define DMA_QM_0_PQ_ARUSER_WORD_MASK 0x2
+
+/* DMA_QM_0_PQ_PUSH0 */
+#define DMA_QM_0_PQ_PUSH0_PTR_LO_SHIFT 0
+#define DMA_QM_0_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PUSH1 */
+#define DMA_QM_0_PQ_PUSH1_PTR_HI_SHIFT 0
+#define DMA_QM_0_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PUSH2 */
+#define DMA_QM_0_PQ_PUSH2_TSIZE_SHIFT 0
+#define DMA_QM_0_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_PUSH3 */
+#define DMA_QM_0_PQ_PUSH3_RPT_SHIFT 0
+#define DMA_QM_0_PQ_PUSH3_RPT_MASK 0xFFFF
+#define DMA_QM_0_PQ_PUSH3_CTL_SHIFT 16
+#define DMA_QM_0_PQ_PUSH3_CTL_MASK 0xFFFF0000
+
+/* DMA_QM_0_PQ_STS0 */
+#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA_QM_0_PQ_STS1 */
+#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define DMA_QM_0_PQ_STS1_PQ_BUSY_SHIFT 31
+#define DMA_QM_0_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_EN */
+#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN */
+#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_SAT */
+#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_PQ_RD_RATE_LIM_TOUT */
+#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* DMA_QM_0_CQ_CFG0 */
+#define DMA_QM_0_CQ_CFG0_RESERVED_SHIFT 0
+#define DMA_QM_0_CQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA_QM_0_CQ_CFG1 */
+#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_ARUSER */
+#define DMA_QM_0_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define DMA_QM_0_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define DMA_QM_0_CQ_ARUSER_WORD_SHIFT 1
+#define DMA_QM_0_CQ_ARUSER_WORD_MASK 0x2
+
+/* DMA_QM_0_CQ_PTR_LO */
+#define DMA_QM_0_CQ_PTR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_PTR_HI */
+#define DMA_QM_0_CQ_PTR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_TSIZE */
+#define DMA_QM_0_CQ_TSIZE_VAL_SHIFT 0
+#define DMA_QM_0_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_CTL */
+#define DMA_QM_0_CQ_CTL_RPT_SHIFT 0
+#define DMA_QM_0_CQ_CTL_RPT_MASK 0xFFFF
+#define DMA_QM_0_CQ_CTL_CTL_SHIFT 16
+#define DMA_QM_0_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_PTR_LO_STS */
+#define DMA_QM_0_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_PTR_HI_STS */
+#define DMA_QM_0_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define DMA_QM_0_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_TSIZE_STS */
+#define DMA_QM_0_CQ_TSIZE_STS_VAL_SHIFT 0
+#define DMA_QM_0_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_CTL_STS */
+#define DMA_QM_0_CQ_CTL_STS_RPT_SHIFT 0
+#define DMA_QM_0_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define DMA_QM_0_CQ_CTL_STS_CTL_SHIFT 16
+#define DMA_QM_0_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_STS0 */
+#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA_QM_0_CQ_STS1 */
+#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define DMA_QM_0_CQ_STS1_CQ_BUSY_SHIFT 31
+#define DMA_QM_0_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_EN */
+#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN */
+#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_SAT */
+#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* DMA_QM_0_CQ_RD_RATE_LIM_TOUT */
+#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* DMA_QM_0_CQ_IFIFO_CNT */
+#define DMA_QM_0_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* DMA_QM_0_CP_MSG_BASE0_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE0_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE1_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE1_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE2_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE2_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE3_ADDR_LO */
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_MSG_BASE3_ADDR_HI */
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_TSIZE_OFFSET */
+#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET */
+#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET */
+#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_LDMA_COMMIT_OFFSET */
+#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_FENCE0_RDATA */
+#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE1_RDATA */
+#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE2_RDATA */
+#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE3_RDATA */
+#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* DMA_QM_0_CP_FENCE0_CNT */
+#define DMA_QM_0_CP_FENCE0_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_FENCE1_CNT */
+#define DMA_QM_0_CP_FENCE1_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_FENCE2_CNT */
+#define DMA_QM_0_CP_FENCE2_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_FENCE3_CNT */
+#define DMA_QM_0_CP_FENCE3_CNT_VAL_SHIFT 0
+#define DMA_QM_0_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* DMA_QM_0_CP_STS */
+#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA_QM_0_CP_STS_ERDY_SHIFT 16
+#define DMA_QM_0_CP_STS_ERDY_MASK 0x10000
+#define DMA_QM_0_CP_STS_RRDY_SHIFT 17
+#define DMA_QM_0_CP_STS_RRDY_MASK 0x20000
+#define DMA_QM_0_CP_STS_MRDY_SHIFT 18
+#define DMA_QM_0_CP_STS_MRDY_MASK 0x40000
+#define DMA_QM_0_CP_STS_SW_STOP_SHIFT 19
+#define DMA_QM_0_CP_STS_SW_STOP_MASK 0x80000
+#define DMA_QM_0_CP_STS_FENCE_ID_SHIFT 20
+#define DMA_QM_0_CP_STS_FENCE_ID_MASK 0x300000
+#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* DMA_QM_0_CP_CURRENT_INST_LO */
+#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_CURRENT_INST_HI */
+#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CP_BARRIER_CFG */
+#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* DMA_QM_0_CP_DBG_0 */
+#define DMA_QM_0_CP_DBG_0_VAL_SHIFT 0
+#define DMA_QM_0_CP_DBG_0_VAL_MASK 0xFF
+
+/* DMA_QM_0_PQ_BUF_ADDR */
+#define DMA_QM_0_PQ_BUF_ADDR_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_PQ_BUF_RDATA */
+#define DMA_QM_0_PQ_BUF_RDATA_VAL_SHIFT 0
+#define DMA_QM_0_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_BUF_ADDR */
+#define DMA_QM_0_CQ_BUF_ADDR_VAL_SHIFT 0
+#define DMA_QM_0_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* DMA_QM_0_CQ_BUF_RDATA */
+#define DMA_QM_0_CQ_BUF_RDATA_VAL_SHIFT 0
+#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
new file mode 100644
index 000000000000..c693bc5dcb22
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_0_REGS_H_
+#define ASIC_REG_DMA_QM_0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_0 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_0_GLBL_CFG0 0x400000
+
+#define mmDMA_QM_0_GLBL_CFG1 0x400004
+
+#define mmDMA_QM_0_GLBL_PROT 0x400008
+
+#define mmDMA_QM_0_GLBL_ERR_CFG 0x40000C
+
+#define mmDMA_QM_0_GLBL_ERR_ADDR_LO 0x400010
+
+#define mmDMA_QM_0_GLBL_ERR_ADDR_HI 0x400014
+
+#define mmDMA_QM_0_GLBL_ERR_WDATA 0x400018
+
+#define mmDMA_QM_0_GLBL_SECURE_PROPS 0x40001C
+
+#define mmDMA_QM_0_GLBL_NON_SECURE_PROPS 0x400020
+
+#define mmDMA_QM_0_GLBL_STS0 0x400024
+
+#define mmDMA_QM_0_GLBL_STS1 0x400028
+
+#define mmDMA_QM_0_PQ_BASE_LO 0x400060
+
+#define mmDMA_QM_0_PQ_BASE_HI 0x400064
+
+#define mmDMA_QM_0_PQ_SIZE 0x400068
+
+#define mmDMA_QM_0_PQ_PI 0x40006C
+
+#define mmDMA_QM_0_PQ_CI 0x400070
+
+#define mmDMA_QM_0_PQ_CFG0 0x400074
+
+#define mmDMA_QM_0_PQ_CFG1 0x400078
+
+#define mmDMA_QM_0_PQ_ARUSER 0x40007C
+
+#define mmDMA_QM_0_PQ_PUSH0 0x400080
+
+#define mmDMA_QM_0_PQ_PUSH1 0x400084
+
+#define mmDMA_QM_0_PQ_PUSH2 0x400088
+
+#define mmDMA_QM_0_PQ_PUSH3 0x40008C
+
+#define mmDMA_QM_0_PQ_STS0 0x400090
+
+#define mmDMA_QM_0_PQ_STS1 0x400094
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_EN 0x4000A0
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN 0x4000A4
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_SAT 0x4000A8
+
+#define mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT 0x4000AC
+
+#define mmDMA_QM_0_CQ_CFG0 0x4000B0
+
+#define mmDMA_QM_0_CQ_CFG1 0x4000B4
+
+#define mmDMA_QM_0_CQ_ARUSER 0x4000B8
+
+#define mmDMA_QM_0_CQ_PTR_LO 0x4000C0
+
+#define mmDMA_QM_0_CQ_PTR_HI 0x4000C4
+
+#define mmDMA_QM_0_CQ_TSIZE 0x4000C8
+
+#define mmDMA_QM_0_CQ_CTL 0x4000CC
+
+#define mmDMA_QM_0_CQ_PTR_LO_STS 0x4000D4
+
+#define mmDMA_QM_0_CQ_PTR_HI_STS 0x4000D8
+
+#define mmDMA_QM_0_CQ_TSIZE_STS 0x4000DC
+
+#define mmDMA_QM_0_CQ_CTL_STS 0x4000E0
+
+#define mmDMA_QM_0_CQ_STS0 0x4000E4
+
+#define mmDMA_QM_0_CQ_STS1 0x4000E8
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_EN 0x4000F0
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN 0x4000F4
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_SAT 0x4000F8
+
+#define mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT 0x4000FC
+
+#define mmDMA_QM_0_CQ_IFIFO_CNT 0x400108
+
+#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO 0x400120
+
+#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI 0x400124
+
+#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO 0x400128
+
+#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI 0x40012C
+
+#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO 0x400130
+
+#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI 0x400134
+
+#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO 0x400138
+
+#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI 0x40013C
+
+#define mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET 0x400140
+
+#define mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET 0x400144
+
+#define mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET 0x400148
+
+#define mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET 0x40014C
+
+#define mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET 0x400150
+
+#define mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET 0x400154
+
+#define mmDMA_QM_0_CP_FENCE0_RDATA 0x400158
+
+#define mmDMA_QM_0_CP_FENCE1_RDATA 0x40015C
+
+#define mmDMA_QM_0_CP_FENCE2_RDATA 0x400160
+
+#define mmDMA_QM_0_CP_FENCE3_RDATA 0x400164
+
+#define mmDMA_QM_0_CP_FENCE0_CNT 0x400168
+
+#define mmDMA_QM_0_CP_FENCE1_CNT 0x40016C
+
+#define mmDMA_QM_0_CP_FENCE2_CNT 0x400170
+
+#define mmDMA_QM_0_CP_FENCE3_CNT 0x400174
+
+#define mmDMA_QM_0_CP_STS 0x400178
+
+#define mmDMA_QM_0_CP_CURRENT_INST_LO 0x40017C
+
+#define mmDMA_QM_0_CP_CURRENT_INST_HI 0x400180
+
+#define mmDMA_QM_0_CP_BARRIER_CFG 0x400184
+
+#define mmDMA_QM_0_CP_DBG_0 0x400188
+
+#define mmDMA_QM_0_PQ_BUF_ADDR 0x400300
+
+#define mmDMA_QM_0_PQ_BUF_RDATA 0x400304
+
+#define mmDMA_QM_0_CQ_BUF_ADDR 0x400308
+
+#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C
+
+#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
new file mode 100644
index 000000000000..da928390f89c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_1_REGS_H_
+#define ASIC_REG_DMA_QM_1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_1 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_1_GLBL_CFG0 0x408000
+
+#define mmDMA_QM_1_GLBL_CFG1 0x408004
+
+#define mmDMA_QM_1_GLBL_PROT 0x408008
+
+#define mmDMA_QM_1_GLBL_ERR_CFG 0x40800C
+
+#define mmDMA_QM_1_GLBL_ERR_ADDR_LO 0x408010
+
+#define mmDMA_QM_1_GLBL_ERR_ADDR_HI 0x408014
+
+#define mmDMA_QM_1_GLBL_ERR_WDATA 0x408018
+
+#define mmDMA_QM_1_GLBL_SECURE_PROPS 0x40801C
+
+#define mmDMA_QM_1_GLBL_NON_SECURE_PROPS 0x408020
+
+#define mmDMA_QM_1_GLBL_STS0 0x408024
+
+#define mmDMA_QM_1_GLBL_STS1 0x408028
+
+#define mmDMA_QM_1_PQ_BASE_LO 0x408060
+
+#define mmDMA_QM_1_PQ_BASE_HI 0x408064
+
+#define mmDMA_QM_1_PQ_SIZE 0x408068
+
+#define mmDMA_QM_1_PQ_PI 0x40806C
+
+#define mmDMA_QM_1_PQ_CI 0x408070
+
+#define mmDMA_QM_1_PQ_CFG0 0x408074
+
+#define mmDMA_QM_1_PQ_CFG1 0x408078
+
+#define mmDMA_QM_1_PQ_ARUSER 0x40807C
+
+#define mmDMA_QM_1_PQ_PUSH0 0x408080
+
+#define mmDMA_QM_1_PQ_PUSH1 0x408084
+
+#define mmDMA_QM_1_PQ_PUSH2 0x408088
+
+#define mmDMA_QM_1_PQ_PUSH3 0x40808C
+
+#define mmDMA_QM_1_PQ_STS0 0x408090
+
+#define mmDMA_QM_1_PQ_STS1 0x408094
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_EN 0x4080A0
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN 0x4080A4
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_SAT 0x4080A8
+
+#define mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT 0x4080AC
+
+#define mmDMA_QM_1_CQ_CFG0 0x4080B0
+
+#define mmDMA_QM_1_CQ_CFG1 0x4080B4
+
+#define mmDMA_QM_1_CQ_ARUSER 0x4080B8
+
+#define mmDMA_QM_1_CQ_PTR_LO 0x4080C0
+
+#define mmDMA_QM_1_CQ_PTR_HI 0x4080C4
+
+#define mmDMA_QM_1_CQ_TSIZE 0x4080C8
+
+#define mmDMA_QM_1_CQ_CTL 0x4080CC
+
+#define mmDMA_QM_1_CQ_PTR_LO_STS 0x4080D4
+
+#define mmDMA_QM_1_CQ_PTR_HI_STS 0x4080D8
+
+#define mmDMA_QM_1_CQ_TSIZE_STS 0x4080DC
+
+#define mmDMA_QM_1_CQ_CTL_STS 0x4080E0
+
+#define mmDMA_QM_1_CQ_STS0 0x4080E4
+
+#define mmDMA_QM_1_CQ_STS1 0x4080E8
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_EN 0x4080F0
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN 0x4080F4
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_SAT 0x4080F8
+
+#define mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT 0x4080FC
+
+#define mmDMA_QM_1_CQ_IFIFO_CNT 0x408108
+
+#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO 0x408120
+
+#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI 0x408124
+
+#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO 0x408128
+
+#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI 0x40812C
+
+#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO 0x408130
+
+#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI 0x408134
+
+#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO 0x408138
+
+#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI 0x40813C
+
+#define mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET 0x408140
+
+#define mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET 0x408144
+
+#define mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET 0x408148
+
+#define mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET 0x40814C
+
+#define mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET 0x408150
+
+#define mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET 0x408154
+
+#define mmDMA_QM_1_CP_FENCE0_RDATA 0x408158
+
+#define mmDMA_QM_1_CP_FENCE1_RDATA 0x40815C
+
+#define mmDMA_QM_1_CP_FENCE2_RDATA 0x408160
+
+#define mmDMA_QM_1_CP_FENCE3_RDATA 0x408164
+
+#define mmDMA_QM_1_CP_FENCE0_CNT 0x408168
+
+#define mmDMA_QM_1_CP_FENCE1_CNT 0x40816C
+
+#define mmDMA_QM_1_CP_FENCE2_CNT 0x408170
+
+#define mmDMA_QM_1_CP_FENCE3_CNT 0x408174
+
+#define mmDMA_QM_1_CP_STS 0x408178
+
+#define mmDMA_QM_1_CP_CURRENT_INST_LO 0x40817C
+
+#define mmDMA_QM_1_CP_CURRENT_INST_HI 0x408180
+
+#define mmDMA_QM_1_CP_BARRIER_CFG 0x408184
+
+#define mmDMA_QM_1_CP_DBG_0 0x408188
+
+#define mmDMA_QM_1_PQ_BUF_ADDR 0x408300
+
+#define mmDMA_QM_1_PQ_BUF_RDATA 0x408304
+
+#define mmDMA_QM_1_CQ_BUF_ADDR 0x408308
+
+#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C
+
+#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
new file mode 100644
index 000000000000..b4f06e9b71d6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_2_REGS_H_
+#define ASIC_REG_DMA_QM_2_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_2 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_2_GLBL_CFG0 0x410000
+
+#define mmDMA_QM_2_GLBL_CFG1 0x410004
+
+#define mmDMA_QM_2_GLBL_PROT 0x410008
+
+#define mmDMA_QM_2_GLBL_ERR_CFG 0x41000C
+
+#define mmDMA_QM_2_GLBL_ERR_ADDR_LO 0x410010
+
+#define mmDMA_QM_2_GLBL_ERR_ADDR_HI 0x410014
+
+#define mmDMA_QM_2_GLBL_ERR_WDATA 0x410018
+
+#define mmDMA_QM_2_GLBL_SECURE_PROPS 0x41001C
+
+#define mmDMA_QM_2_GLBL_NON_SECURE_PROPS 0x410020
+
+#define mmDMA_QM_2_GLBL_STS0 0x410024
+
+#define mmDMA_QM_2_GLBL_STS1 0x410028
+
+#define mmDMA_QM_2_PQ_BASE_LO 0x410060
+
+#define mmDMA_QM_2_PQ_BASE_HI 0x410064
+
+#define mmDMA_QM_2_PQ_SIZE 0x410068
+
+#define mmDMA_QM_2_PQ_PI 0x41006C
+
+#define mmDMA_QM_2_PQ_CI 0x410070
+
+#define mmDMA_QM_2_PQ_CFG0 0x410074
+
+#define mmDMA_QM_2_PQ_CFG1 0x410078
+
+#define mmDMA_QM_2_PQ_ARUSER 0x41007C
+
+#define mmDMA_QM_2_PQ_PUSH0 0x410080
+
+#define mmDMA_QM_2_PQ_PUSH1 0x410084
+
+#define mmDMA_QM_2_PQ_PUSH2 0x410088
+
+#define mmDMA_QM_2_PQ_PUSH3 0x41008C
+
+#define mmDMA_QM_2_PQ_STS0 0x410090
+
+#define mmDMA_QM_2_PQ_STS1 0x410094
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_EN 0x4100A0
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN 0x4100A4
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_SAT 0x4100A8
+
+#define mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT 0x4100AC
+
+#define mmDMA_QM_2_CQ_CFG0 0x4100B0
+
+#define mmDMA_QM_2_CQ_CFG1 0x4100B4
+
+#define mmDMA_QM_2_CQ_ARUSER 0x4100B8
+
+#define mmDMA_QM_2_CQ_PTR_LO 0x4100C0
+
+#define mmDMA_QM_2_CQ_PTR_HI 0x4100C4
+
+#define mmDMA_QM_2_CQ_TSIZE 0x4100C8
+
+#define mmDMA_QM_2_CQ_CTL 0x4100CC
+
+#define mmDMA_QM_2_CQ_PTR_LO_STS 0x4100D4
+
+#define mmDMA_QM_2_CQ_PTR_HI_STS 0x4100D8
+
+#define mmDMA_QM_2_CQ_TSIZE_STS 0x4100DC
+
+#define mmDMA_QM_2_CQ_CTL_STS 0x4100E0
+
+#define mmDMA_QM_2_CQ_STS0 0x4100E4
+
+#define mmDMA_QM_2_CQ_STS1 0x4100E8
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_EN 0x4100F0
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN 0x4100F4
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_SAT 0x4100F8
+
+#define mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT 0x4100FC
+
+#define mmDMA_QM_2_CQ_IFIFO_CNT 0x410108
+
+#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO 0x410120
+
+#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI 0x410124
+
+#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO 0x410128
+
+#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI 0x41012C
+
+#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO 0x410130
+
+#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI 0x410134
+
+#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO 0x410138
+
+#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI 0x41013C
+
+#define mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET 0x410140
+
+#define mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET 0x410144
+
+#define mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET 0x410148
+
+#define mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET 0x41014C
+
+#define mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET 0x410150
+
+#define mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET 0x410154
+
+#define mmDMA_QM_2_CP_FENCE0_RDATA 0x410158
+
+#define mmDMA_QM_2_CP_FENCE1_RDATA 0x41015C
+
+#define mmDMA_QM_2_CP_FENCE2_RDATA 0x410160
+
+#define mmDMA_QM_2_CP_FENCE3_RDATA 0x410164
+
+#define mmDMA_QM_2_CP_FENCE0_CNT 0x410168
+
+#define mmDMA_QM_2_CP_FENCE1_CNT 0x41016C
+
+#define mmDMA_QM_2_CP_FENCE2_CNT 0x410170
+
+#define mmDMA_QM_2_CP_FENCE3_CNT 0x410174
+
+#define mmDMA_QM_2_CP_STS 0x410178
+
+#define mmDMA_QM_2_CP_CURRENT_INST_LO 0x41017C
+
+#define mmDMA_QM_2_CP_CURRENT_INST_HI 0x410180
+
+#define mmDMA_QM_2_CP_BARRIER_CFG 0x410184
+
+#define mmDMA_QM_2_CP_DBG_0 0x410188
+
+#define mmDMA_QM_2_PQ_BUF_ADDR 0x410300
+
+#define mmDMA_QM_2_PQ_BUF_RDATA 0x410304
+
+#define mmDMA_QM_2_CQ_BUF_ADDR 0x410308
+
+#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C
+
+#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
new file mode 100644
index 000000000000..53e3cd78a06b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_3_REGS_H_
+#define ASIC_REG_DMA_QM_3_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_3 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_3_GLBL_CFG0 0x418000
+
+#define mmDMA_QM_3_GLBL_CFG1 0x418004
+
+#define mmDMA_QM_3_GLBL_PROT 0x418008
+
+#define mmDMA_QM_3_GLBL_ERR_CFG 0x41800C
+
+#define mmDMA_QM_3_GLBL_ERR_ADDR_LO 0x418010
+
+#define mmDMA_QM_3_GLBL_ERR_ADDR_HI 0x418014
+
+#define mmDMA_QM_3_GLBL_ERR_WDATA 0x418018
+
+#define mmDMA_QM_3_GLBL_SECURE_PROPS 0x41801C
+
+#define mmDMA_QM_3_GLBL_NON_SECURE_PROPS 0x418020
+
+#define mmDMA_QM_3_GLBL_STS0 0x418024
+
+#define mmDMA_QM_3_GLBL_STS1 0x418028
+
+#define mmDMA_QM_3_PQ_BASE_LO 0x418060
+
+#define mmDMA_QM_3_PQ_BASE_HI 0x418064
+
+#define mmDMA_QM_3_PQ_SIZE 0x418068
+
+#define mmDMA_QM_3_PQ_PI 0x41806C
+
+#define mmDMA_QM_3_PQ_CI 0x418070
+
+#define mmDMA_QM_3_PQ_CFG0 0x418074
+
+#define mmDMA_QM_3_PQ_CFG1 0x418078
+
+#define mmDMA_QM_3_PQ_ARUSER 0x41807C
+
+#define mmDMA_QM_3_PQ_PUSH0 0x418080
+
+#define mmDMA_QM_3_PQ_PUSH1 0x418084
+
+#define mmDMA_QM_3_PQ_PUSH2 0x418088
+
+#define mmDMA_QM_3_PQ_PUSH3 0x41808C
+
+#define mmDMA_QM_3_PQ_STS0 0x418090
+
+#define mmDMA_QM_3_PQ_STS1 0x418094
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_EN 0x4180A0
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN 0x4180A4
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_SAT 0x4180A8
+
+#define mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT 0x4180AC
+
+#define mmDMA_QM_3_CQ_CFG0 0x4180B0
+
+#define mmDMA_QM_3_CQ_CFG1 0x4180B4
+
+#define mmDMA_QM_3_CQ_ARUSER 0x4180B8
+
+#define mmDMA_QM_3_CQ_PTR_LO 0x4180C0
+
+#define mmDMA_QM_3_CQ_PTR_HI 0x4180C4
+
+#define mmDMA_QM_3_CQ_TSIZE 0x4180C8
+
+#define mmDMA_QM_3_CQ_CTL 0x4180CC
+
+#define mmDMA_QM_3_CQ_PTR_LO_STS 0x4180D4
+
+#define mmDMA_QM_3_CQ_PTR_HI_STS 0x4180D8
+
+#define mmDMA_QM_3_CQ_TSIZE_STS 0x4180DC
+
+#define mmDMA_QM_3_CQ_CTL_STS 0x4180E0
+
+#define mmDMA_QM_3_CQ_STS0 0x4180E4
+
+#define mmDMA_QM_3_CQ_STS1 0x4180E8
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_EN 0x4180F0
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN 0x4180F4
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_SAT 0x4180F8
+
+#define mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT 0x4180FC
+
+#define mmDMA_QM_3_CQ_IFIFO_CNT 0x418108
+
+#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO 0x418120
+
+#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI 0x418124
+
+#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO 0x418128
+
+#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI 0x41812C
+
+#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO 0x418130
+
+#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI 0x418134
+
+#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO 0x418138
+
+#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI 0x41813C
+
+#define mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET 0x418140
+
+#define mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET 0x418144
+
+#define mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET 0x418148
+
+#define mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET 0x41814C
+
+#define mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET 0x418150
+
+#define mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET 0x418154
+
+#define mmDMA_QM_3_CP_FENCE0_RDATA 0x418158
+
+#define mmDMA_QM_3_CP_FENCE1_RDATA 0x41815C
+
+#define mmDMA_QM_3_CP_FENCE2_RDATA 0x418160
+
+#define mmDMA_QM_3_CP_FENCE3_RDATA 0x418164
+
+#define mmDMA_QM_3_CP_FENCE0_CNT 0x418168
+
+#define mmDMA_QM_3_CP_FENCE1_CNT 0x41816C
+
+#define mmDMA_QM_3_CP_FENCE2_CNT 0x418170
+
+#define mmDMA_QM_3_CP_FENCE3_CNT 0x418174
+
+#define mmDMA_QM_3_CP_STS 0x418178
+
+#define mmDMA_QM_3_CP_CURRENT_INST_LO 0x41817C
+
+#define mmDMA_QM_3_CP_CURRENT_INST_HI 0x418180
+
+#define mmDMA_QM_3_CP_BARRIER_CFG 0x418184
+
+#define mmDMA_QM_3_CP_DBG_0 0x418188
+
+#define mmDMA_QM_3_PQ_BUF_ADDR 0x418300
+
+#define mmDMA_QM_3_PQ_BUF_RDATA 0x418304
+
+#define mmDMA_QM_3_CQ_BUF_ADDR 0x418308
+
+#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C
+
+#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
new file mode 100644
index 000000000000..e0eb5f260201
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_QM_4_REGS_H_
+#define ASIC_REG_DMA_QM_4_REGS_H_
+
+/*
+ *****************************************
+ * DMA_QM_4 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA_QM_4_GLBL_CFG0 0x420000
+
+#define mmDMA_QM_4_GLBL_CFG1 0x420004
+
+#define mmDMA_QM_4_GLBL_PROT 0x420008
+
+#define mmDMA_QM_4_GLBL_ERR_CFG 0x42000C
+
+#define mmDMA_QM_4_GLBL_ERR_ADDR_LO 0x420010
+
+#define mmDMA_QM_4_GLBL_ERR_ADDR_HI 0x420014
+
+#define mmDMA_QM_4_GLBL_ERR_WDATA 0x420018
+
+#define mmDMA_QM_4_GLBL_SECURE_PROPS 0x42001C
+
+#define mmDMA_QM_4_GLBL_NON_SECURE_PROPS 0x420020
+
+#define mmDMA_QM_4_GLBL_STS0 0x420024
+
+#define mmDMA_QM_4_GLBL_STS1 0x420028
+
+#define mmDMA_QM_4_PQ_BASE_LO 0x420060
+
+#define mmDMA_QM_4_PQ_BASE_HI 0x420064
+
+#define mmDMA_QM_4_PQ_SIZE 0x420068
+
+#define mmDMA_QM_4_PQ_PI 0x42006C
+
+#define mmDMA_QM_4_PQ_CI 0x420070
+
+#define mmDMA_QM_4_PQ_CFG0 0x420074
+
+#define mmDMA_QM_4_PQ_CFG1 0x420078
+
+#define mmDMA_QM_4_PQ_ARUSER 0x42007C
+
+#define mmDMA_QM_4_PQ_PUSH0 0x420080
+
+#define mmDMA_QM_4_PQ_PUSH1 0x420084
+
+#define mmDMA_QM_4_PQ_PUSH2 0x420088
+
+#define mmDMA_QM_4_PQ_PUSH3 0x42008C
+
+#define mmDMA_QM_4_PQ_STS0 0x420090
+
+#define mmDMA_QM_4_PQ_STS1 0x420094
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_EN 0x4200A0
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN 0x4200A4
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_SAT 0x4200A8
+
+#define mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT 0x4200AC
+
+#define mmDMA_QM_4_CQ_CFG0 0x4200B0
+
+#define mmDMA_QM_4_CQ_CFG1 0x4200B4
+
+#define mmDMA_QM_4_CQ_ARUSER 0x4200B8
+
+#define mmDMA_QM_4_CQ_PTR_LO 0x4200C0
+
+#define mmDMA_QM_4_CQ_PTR_HI 0x4200C4
+
+#define mmDMA_QM_4_CQ_TSIZE 0x4200C8
+
+#define mmDMA_QM_4_CQ_CTL 0x4200CC
+
+#define mmDMA_QM_4_CQ_PTR_LO_STS 0x4200D4
+
+#define mmDMA_QM_4_CQ_PTR_HI_STS 0x4200D8
+
+#define mmDMA_QM_4_CQ_TSIZE_STS 0x4200DC
+
+#define mmDMA_QM_4_CQ_CTL_STS 0x4200E0
+
+#define mmDMA_QM_4_CQ_STS0 0x4200E4
+
+#define mmDMA_QM_4_CQ_STS1 0x4200E8
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_EN 0x4200F0
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN 0x4200F4
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_SAT 0x4200F8
+
+#define mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT 0x4200FC
+
+#define mmDMA_QM_4_CQ_IFIFO_CNT 0x420108
+
+#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO 0x420120
+
+#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI 0x420124
+
+#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO 0x420128
+
+#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI 0x42012C
+
+#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO 0x420130
+
+#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI 0x420134
+
+#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO 0x420138
+
+#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI 0x42013C
+
+#define mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET 0x420140
+
+#define mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET 0x420144
+
+#define mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET 0x420148
+
+#define mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET 0x42014C
+
+#define mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET 0x420150
+
+#define mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET 0x420154
+
+#define mmDMA_QM_4_CP_FENCE0_RDATA 0x420158
+
+#define mmDMA_QM_4_CP_FENCE1_RDATA 0x42015C
+
+#define mmDMA_QM_4_CP_FENCE2_RDATA 0x420160
+
+#define mmDMA_QM_4_CP_FENCE3_RDATA 0x420164
+
+#define mmDMA_QM_4_CP_FENCE0_CNT 0x420168
+
+#define mmDMA_QM_4_CP_FENCE1_CNT 0x42016C
+
+#define mmDMA_QM_4_CP_FENCE2_CNT 0x420170
+
+#define mmDMA_QM_4_CP_FENCE3_CNT 0x420174
+
+#define mmDMA_QM_4_CP_STS 0x420178
+
+#define mmDMA_QM_4_CP_CURRENT_INST_LO 0x42017C
+
+#define mmDMA_QM_4_CP_CURRENT_INST_HI 0x420180
+
+#define mmDMA_QM_4_CP_BARRIER_CFG 0x420184
+
+#define mmDMA_QM_4_CP_DBG_0 0x420188
+
+#define mmDMA_QM_4_PQ_BUF_ADDR 0x420300
+
+#define mmDMA_QM_4_PQ_BUF_RDATA 0x420304
+
+#define mmDMA_QM_4_CQ_BUF_ADDR 0x420308
+
+#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C
+
+#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
new file mode 100644
index 000000000000..85b15010cd7a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
@@ -0,0 +1,1372 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef GOYA_BLOCKS_H_
+#define GOYA_BLOCKS_H_
+
+#define mmPCI_NRTR_BASE 0x7FFC000000ull
+#define PCI_NRTR_MAX_OFFSET 0x608
+#define PCI_NRTR_SECTION 0x4000
+#define mmPCI_RD_REGULATOR_BASE 0x7FFC004000ull
+#define PCI_RD_REGULATOR_MAX_OFFSET 0x74
+#define PCI_RD_REGULATOR_SECTION 0x1000
+#define mmPCI_WR_REGULATOR_BASE 0x7FFC005000ull
+#define PCI_WR_REGULATOR_MAX_OFFSET 0x74
+#define PCI_WR_REGULATOR_SECTION 0x3B000
+#define mmMME1_RTR_BASE 0x7FFC040000ull
+#define MME1_RTR_MAX_OFFSET 0x608
+#define MME1_RTR_SECTION 0x4000
+#define mmMME1_RD_REGULATOR_BASE 0x7FFC044000ull
+#define MME1_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME1_RD_REGULATOR_SECTION 0x1000
+#define mmMME1_WR_REGULATOR_BASE 0x7FFC045000ull
+#define MME1_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME1_WR_REGULATOR_SECTION 0x3B000
+#define mmMME2_RTR_BASE 0x7FFC080000ull
+#define MME2_RTR_MAX_OFFSET 0x608
+#define MME2_RTR_SECTION 0x4000
+#define mmMME2_RD_REGULATOR_BASE 0x7FFC084000ull
+#define MME2_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME2_RD_REGULATOR_SECTION 0x1000
+#define mmMME2_WR_REGULATOR_BASE 0x7FFC085000ull
+#define MME2_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME2_WR_REGULATOR_SECTION 0x3B000
+#define mmMME3_RTR_BASE 0x7FFC0C0000ull
+#define MME3_RTR_MAX_OFFSET 0x608
+#define MME3_RTR_SECTION 0x4000
+#define mmMME3_RD_REGULATOR_BASE 0x7FFC0C4000ull
+#define MME3_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME3_RD_REGULATOR_SECTION 0x1000
+#define mmMME3_WR_REGULATOR_BASE 0x7FFC0C5000ull
+#define MME3_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME3_WR_REGULATOR_SECTION 0xB000
+#define mmMME_BASE 0x7FFC0D0000ull
+#define MME_MAX_OFFSET 0xBB0
+#define MME_SECTION 0x8000
+#define mmMME_QM_BASE 0x7FFC0D8000ull
+#define MME_QM_MAX_OFFSET 0x310
+#define MME_QM_SECTION 0x1000
+#define mmMME_CMDQ_BASE 0x7FFC0D9000ull
+#define MME_CMDQ_MAX_OFFSET 0x310
+#define MME_CMDQ_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_0_BASE 0x7FFC0DA000ull
+#define ACC_MS_ECC_MEM_0_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_0_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_1_BASE 0x7FFC0DB000ull
+#define ACC_MS_ECC_MEM_1_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_1_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_2_BASE 0x7FFC0DC000ull
+#define ACC_MS_ECC_MEM_2_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_2_SECTION 0x1000
+#define mmACC_MS_ECC_MEM_3_BASE 0x7FFC0DD000ull
+#define ACC_MS_ECC_MEM_3_MAX_OFFSET 0x0
+#define ACC_MS_ECC_MEM_3_SECTION 0x1000
+#define mmSBA_ECC_MEM_BASE 0x7FFC0DE000ull
+#define SBA_ECC_MEM_MAX_OFFSET 0x0
+#define SBA_ECC_MEM_SECTION 0x1000
+#define mmSBB_ECC_MEM_BASE 0x7FFC0DF000ull
+#define SBB_ECC_MEM_MAX_OFFSET 0x0
+#define SBB_ECC_MEM_SECTION 0x21000
+#define mmMME4_RTR_BASE 0x7FFC100000ull
+#define MME4_RTR_MAX_OFFSET 0x608
+#define MME4_RTR_SECTION 0x4000
+#define mmMME4_RD_REGULATOR_BASE 0x7FFC104000ull
+#define MME4_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME4_RD_REGULATOR_SECTION 0x1000
+#define mmMME4_WR_REGULATOR_BASE 0x7FFC105000ull
+#define MME4_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME4_WR_REGULATOR_SECTION 0xB000
+#define mmSYNC_MNGR_BASE 0x7FFC110000ull
+#define SYNC_MNGR_MAX_OFFSET 0x4400
+#define SYNC_MNGR_SECTION 0x30000
+#define mmMME5_RTR_BASE 0x7FFC140000ull
+#define MME5_RTR_MAX_OFFSET 0x608
+#define MME5_RTR_SECTION 0x4000
+#define mmMME5_RD_REGULATOR_BASE 0x7FFC144000ull
+#define MME5_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME5_RD_REGULATOR_SECTION 0x1000
+#define mmMME5_WR_REGULATOR_BASE 0x7FFC145000ull
+#define MME5_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME5_WR_REGULATOR_SECTION 0x3B000
+#define mmMME6_RTR_BASE 0x7FFC180000ull
+#define MME6_RTR_MAX_OFFSET 0x608
+#define MME6_RTR_SECTION 0x4000
+#define mmMME6_RD_REGULATOR_BASE 0x7FFC184000ull
+#define MME6_RD_REGULATOR_MAX_OFFSET 0x74
+#define MME6_RD_REGULATOR_SECTION 0x1000
+#define mmMME6_WR_REGULATOR_BASE 0x7FFC185000ull
+#define MME6_WR_REGULATOR_MAX_OFFSET 0x74
+#define MME6_WR_REGULATOR_SECTION 0x3B000
+#define mmDMA_NRTR_BASE 0x7FFC1C0000ull
+#define DMA_NRTR_MAX_OFFSET 0x608
+#define DMA_NRTR_SECTION 0x4000
+#define mmDMA_RD_REGULATOR_BASE 0x7FFC1C4000ull
+#define DMA_RD_REGULATOR_MAX_OFFSET 0x74
+#define DMA_RD_REGULATOR_SECTION 0x1000
+#define mmDMA_WR_REGULATOR_BASE 0x7FFC1C5000ull
+#define DMA_WR_REGULATOR_MAX_OFFSET 0x74
+#define DMA_WR_REGULATOR_SECTION 0x3B000
+#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull
+#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull
+#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC204000ull
+#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC205000ull
+#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC208000ull
+#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC209000ull
+#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC20C000ull
+#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC20D000ull
+#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC210000ull
+#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y0_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC211000ull
+#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y0_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC220000ull
+#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC221000ull
+#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC224000ull
+#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC225000ull
+#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC228000ull
+#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC229000ull
+#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC22C000ull
+#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC22D000ull
+#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC230000ull
+#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y1_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC231000ull
+#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y1_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC240000ull
+#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC241000ull
+#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC244000ull
+#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC245000ull
+#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC248000ull
+#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC249000ull
+#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC24C000ull
+#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC24D000ull
+#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC250000ull
+#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y2_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC251000ull
+#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y2_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC260000ull
+#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC261000ull
+#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC264000ull
+#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC265000ull
+#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC268000ull
+#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC269000ull
+#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC26C000ull
+#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC26D000ull
+#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC270000ull
+#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y3_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC271000ull
+#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y3_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y4_X0_BANK_BASE 0x7FFC280000ull
+#define SRAM_Y4_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X0_RTR_BASE 0x7FFC281000ull
+#define SRAM_Y4_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X1_BANK_BASE 0x7FFC284000ull
+#define SRAM_Y4_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X1_RTR_BASE 0x7FFC285000ull
+#define SRAM_Y4_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X2_BANK_BASE 0x7FFC288000ull
+#define SRAM_Y4_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X2_RTR_BASE 0x7FFC289000ull
+#define SRAM_Y4_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X3_BANK_BASE 0x7FFC28C000ull
+#define SRAM_Y4_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X3_RTR_BASE 0x7FFC28D000ull
+#define SRAM_Y4_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y4_X4_BANK_BASE 0x7FFC290000ull
+#define SRAM_Y4_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y4_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y4_X4_RTR_BASE 0x7FFC291000ull
+#define SRAM_Y4_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y4_X4_RTR_SECTION 0xF000
+#define mmSRAM_Y5_X0_BANK_BASE 0x7FFC2A0000ull
+#define SRAM_Y5_X0_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X0_RTR_BASE 0x7FFC2A1000ull
+#define SRAM_Y5_X0_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X0_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X1_BANK_BASE 0x7FFC2A4000ull
+#define SRAM_Y5_X1_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X1_RTR_BASE 0x7FFC2A5000ull
+#define SRAM_Y5_X1_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X1_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X2_BANK_BASE 0x7FFC2A8000ull
+#define SRAM_Y5_X2_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X2_RTR_BASE 0x7FFC2A9000ull
+#define SRAM_Y5_X2_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X2_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X3_BANK_BASE 0x7FFC2AC000ull
+#define SRAM_Y5_X3_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X3_RTR_BASE 0x7FFC2AD000ull
+#define SRAM_Y5_X3_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X3_RTR_SECTION 0x3000
+#define mmSRAM_Y5_X4_BANK_BASE 0x7FFC2B0000ull
+#define SRAM_Y5_X4_BANK_MAX_OFFSET 0x4
+#define SRAM_Y5_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y5_X4_RTR_BASE 0x7FFC2B1000ull
+#define SRAM_Y5_X4_RTR_MAX_OFFSET 0x334
+#define SRAM_Y5_X4_RTR_SECTION 0x14F000
+#define mmDMA_QM_0_BASE 0x7FFC400000ull
+#define DMA_QM_0_MAX_OFFSET 0x310
+#define DMA_QM_0_SECTION 0x1000
+#define mmDMA_CH_0_BASE 0x7FFC401000ull
+#define DMA_CH_0_MAX_OFFSET 0x200
+#define DMA_CH_0_SECTION 0x7000
+#define mmDMA_QM_1_BASE 0x7FFC408000ull
+#define DMA_QM_1_MAX_OFFSET 0x310
+#define DMA_QM_1_SECTION 0x1000
+#define mmDMA_CH_1_BASE 0x7FFC409000ull
+#define DMA_CH_1_MAX_OFFSET 0x200
+#define DMA_CH_1_SECTION 0x7000
+#define mmDMA_QM_2_BASE 0x7FFC410000ull
+#define DMA_QM_2_MAX_OFFSET 0x310
+#define DMA_QM_2_SECTION 0x1000
+#define mmDMA_CH_2_BASE 0x7FFC411000ull
+#define DMA_CH_2_MAX_OFFSET 0x200
+#define DMA_CH_2_SECTION 0x7000
+#define mmDMA_QM_3_BASE 0x7FFC418000ull
+#define DMA_QM_3_MAX_OFFSET 0x310
+#define DMA_QM_3_SECTION 0x1000
+#define mmDMA_CH_3_BASE 0x7FFC419000ull
+#define DMA_CH_3_MAX_OFFSET 0x200
+#define DMA_CH_3_SECTION 0x7000
+#define mmDMA_QM_4_BASE 0x7FFC420000ull
+#define DMA_QM_4_MAX_OFFSET 0x310
+#define DMA_QM_4_SECTION 0x1000
+#define mmDMA_CH_4_BASE 0x7FFC421000ull
+#define DMA_CH_4_MAX_OFFSET 0x200
+#define DMA_CH_4_SECTION 0x20000
+#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull
+#define CPU_CA53_CFG_MAX_OFFSET 0x218
+#define CPU_CA53_CFG_SECTION 0x1000
+#define mmCPU_IF_BASE 0x7FFC442000ull
+#define CPU_IF_MAX_OFFSET 0x134
+#define CPU_IF_SECTION 0x2000
+#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull
+#define CPU_TIMESTAMP_MAX_OFFSET 0x1000
+#define CPU_TIMESTAMP_SECTION 0x3C000
+#define mmMMU_BASE 0x7FFC480000ull
+#define MMU_MAX_OFFSET 0x44
+#define MMU_SECTION 0x10000
+#define mmSTLB_BASE 0x7FFC490000ull
+#define STLB_MAX_OFFSET 0x50
+#define STLB_SECTION 0x10000
+#define mmNORTH_THERMAL_SENSOR_BASE 0x7FFC4A0000ull
+#define NORTH_THERMAL_SENSOR_MAX_OFFSET 0xE64
+#define NORTH_THERMAL_SENSOR_SECTION 0x1000
+#define mmMC_PLL_BASE 0x7FFC4A1000ull
+#define MC_PLL_MAX_OFFSET 0x444
+#define MC_PLL_SECTION 0x1000
+#define mmCPU_PLL_BASE 0x7FFC4A2000ull
+#define CPU_PLL_MAX_OFFSET 0x444
+#define CPU_PLL_SECTION 0x1000
+#define mmIC_PLL_BASE 0x7FFC4A3000ull
+#define IC_PLL_MAX_OFFSET 0x444
+#define IC_PLL_SECTION 0x1000
+#define mmDMA_PROCESS_MON_BASE 0x7FFC4A4000ull
+#define DMA_PROCESS_MON_MAX_OFFSET 0x4
+#define DMA_PROCESS_MON_SECTION 0xC000
+#define mmDMA_MACRO_BASE 0x7FFC4B0000ull
+#define DMA_MACRO_MAX_OFFSET 0x15C
+#define DMA_MACRO_SECTION 0x150000
+#define mmDDR_PHY_CH0_BASE 0x7FFC600000ull
+#define DDR_PHY_CH0_MAX_OFFSET 0x0
+#define DDR_PHY_CH0_SECTION 0x40000
+#define mmDDR_MC_CH0_BASE 0x7FFC640000ull
+#define DDR_MC_CH0_MAX_OFFSET 0xF34
+#define DDR_MC_CH0_SECTION 0x8000
+#define mmDDR_MISC_CH0_BASE 0x7FFC648000ull
+#define DDR_MISC_CH0_MAX_OFFSET 0x204
+#define DDR_MISC_CH0_SECTION 0xB8000
+#define mmDDR_PHY_CH1_BASE 0x7FFC700000ull
+#define DDR_PHY_CH1_MAX_OFFSET 0x0
+#define DDR_PHY_CH1_SECTION 0x40000
+#define mmDDR_MC_CH1_BASE 0x7FFC740000ull
+#define DDR_MC_CH1_MAX_OFFSET 0xF34
+#define DDR_MC_CH1_SECTION 0x8000
+#define mmDDR_MISC_CH1_BASE 0x7FFC748000ull
+#define DDR_MISC_CH1_MAX_OFFSET 0x204
+#define DDR_MISC_CH1_SECTION 0xB8000
+#define mmGIC_BASE 0x7FFC800000ull
+#define GIC_MAX_OFFSET 0x10000
+#define GIC_SECTION 0x401000
+#define mmPCIE_WRAP_BASE 0x7FFCC01000ull
+#define PCIE_WRAP_MAX_OFFSET 0xDF4
+#define PCIE_WRAP_SECTION 0x1000
+#define mmPCIE_DBI_BASE 0x7FFCC02000ull
+#define PCIE_DBI_MAX_OFFSET 0xC04
+#define PCIE_DBI_SECTION 0x2000
+#define mmPCIE_CORE_BASE 0x7FFCC04000ull
+#define PCIE_CORE_MAX_OFFSET 0x9B8
+#define PCIE_CORE_SECTION 0x1000
+#define mmPCIE_DB_CFG_BASE 0x7FFCC05000ull
+#define PCIE_DB_CFG_MAX_OFFSET 0xE34
+#define PCIE_DB_CFG_SECTION 0x1000
+#define mmPCIE_DB_CMD_BASE 0x7FFCC06000ull
+#define PCIE_DB_CMD_MAX_OFFSET 0x810
+#define PCIE_DB_CMD_SECTION 0x1000
+#define mmPCIE_AUX_BASE 0x7FFCC07000ull
+#define PCIE_AUX_MAX_OFFSET 0x9BC
+#define PCIE_AUX_SECTION 0x1000
+#define mmPCIE_DB_RSV_BASE 0x7FFCC08000ull
+#define PCIE_DB_RSV_MAX_OFFSET 0x800
+#define PCIE_DB_RSV_SECTION 0x8000
+#define mmPCIE_PHY_BASE 0x7FFCC10000ull
+#define PCIE_PHY_MAX_OFFSET 0x924
+#define PCIE_PHY_SECTION 0x30000
+#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull
+#define PSOC_I2C_M0_MAX_OFFSET 0x100
+#define PSOC_I2C_M0_SECTION 0x1000
+#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull
+#define PSOC_I2C_M1_MAX_OFFSET 0x100
+#define PSOC_I2C_M1_SECTION 0x1000
+#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull
+#define PSOC_I2C_S_MAX_OFFSET 0x100
+#define PSOC_I2C_S_SECTION 0x1000
+#define mmPSOC_SPI_BASE 0x7FFCC43000ull
+#define PSOC_SPI_MAX_OFFSET 0x100
+#define PSOC_SPI_SECTION 0x1000
+#define mmPSOC_EMMC_BASE 0x7FFCC44000ull
+#define PSOC_EMMC_MAX_OFFSET 0xF70
+#define PSOC_EMMC_SECTION 0x1000
+#define mmPSOC_UART_0_BASE 0x7FFCC45000ull
+#define PSOC_UART_0_MAX_OFFSET 0x1000
+#define PSOC_UART_0_SECTION 0x1000
+#define mmPSOC_UART_1_BASE 0x7FFCC46000ull
+#define PSOC_UART_1_MAX_OFFSET 0x1000
+#define PSOC_UART_1_SECTION 0x1000
+#define mmPSOC_TIMER_BASE 0x7FFCC47000ull
+#define PSOC_TIMER_MAX_OFFSET 0x1000
+#define PSOC_TIMER_SECTION 0x1000
+#define mmPSOC_WDOG_BASE 0x7FFCC48000ull
+#define PSOC_WDOG_MAX_OFFSET 0x1000
+#define PSOC_WDOG_SECTION 0x1000
+#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull
+#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000
+#define PSOC_TIMESTAMP_SECTION 0x1000
+#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull
+#define PSOC_EFUSE_MAX_OFFSET 0x10C
+#define PSOC_EFUSE_SECTION 0x1000
+#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull
+#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xA48
+#define PSOC_GLOBAL_CONF_SECTION 0x1000
+#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull
+#define PSOC_GPIO0_MAX_OFFSET 0x1000
+#define PSOC_GPIO0_SECTION 0x1000
+#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull
+#define PSOC_GPIO1_MAX_OFFSET 0x1000
+#define PSOC_GPIO1_SECTION 0x1000
+#define mmPSOC_BTL_BASE 0x7FFCC4E000ull
+#define PSOC_BTL_MAX_OFFSET 0x124
+#define PSOC_BTL_SECTION 0x1000
+#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull
+#define PSOC_CS_TRACE_MAX_OFFSET 0x0
+#define PSOC_CS_TRACE_SECTION 0x1000
+#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull
+#define PSOC_GPIO2_MAX_OFFSET 0x1000
+#define PSOC_GPIO2_SECTION 0x1000
+#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull
+#define PSOC_GPIO3_MAX_OFFSET 0x1000
+#define PSOC_GPIO3_SECTION 0x1000
+#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull
+#define PSOC_GPIO4_MAX_OFFSET 0x1000
+#define PSOC_GPIO4_SECTION 0x1000
+#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull
+#define PSOC_DFT_EFUSE_MAX_OFFSET 0x10C
+#define PSOC_DFT_EFUSE_SECTION 0x1000
+#define mmPSOC_PM_BASE 0x7FFCC54000ull
+#define PSOC_PM_MAX_OFFSET 0x4
+#define PSOC_PM_SECTION 0x1000
+#define mmPSOC_TS_BASE 0x7FFCC55000ull
+#define PSOC_TS_MAX_OFFSET 0xE64
+#define PSOC_TS_SECTION 0xB000
+#define mmPSOC_MII_BASE 0x7FFCC60000ull
+#define PSOC_MII_MAX_OFFSET 0x105C
+#define PSOC_MII_SECTION 0x10000
+#define mmPSOC_EMMC_PLL_BASE 0x7FFCC70000ull
+#define PSOC_EMMC_PLL_MAX_OFFSET 0x444
+#define PSOC_EMMC_PLL_SECTION 0x1000
+#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull
+#define PSOC_MME_PLL_MAX_OFFSET 0x444
+#define PSOC_MME_PLL_SECTION 0x1000
+#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull
+#define PSOC_PCI_PLL_MAX_OFFSET 0x444
+#define PSOC_PCI_PLL_SECTION 0x6000
+#define mmPSOC_PWM0_BASE 0x7FFCC78000ull
+#define PSOC_PWM0_MAX_OFFSET 0x58
+#define PSOC_PWM0_SECTION 0x1000
+#define mmPSOC_PWM1_BASE 0x7FFCC79000ull
+#define PSOC_PWM1_MAX_OFFSET 0x58
+#define PSOC_PWM1_SECTION 0x1000
+#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull
+#define PSOC_PWM2_MAX_OFFSET 0x58
+#define PSOC_PWM2_SECTION 0x1000
+#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull
+#define PSOC_PWM3_MAX_OFFSET 0x58
+#define PSOC_PWM3_SECTION 0x185000
+#define mmTPC0_NRTR_BASE 0x7FFCE00000ull
+#define TPC0_NRTR_MAX_OFFSET 0x608
+#define TPC0_NRTR_SECTION 0x1000
+#define mmTPC_PLL_BASE 0x7FFCE01000ull
+#define TPC_PLL_MAX_OFFSET 0x444
+#define TPC_PLL_SECTION 0x1000
+#define mmTPC_THEMAL_SENSOR_BASE 0x7FFCE02000ull
+#define TPC_THEMAL_SENSOR_MAX_OFFSET 0xE64
+#define TPC_THEMAL_SENSOR_SECTION 0x1000
+#define mmTPC_PROCESS_MON_BASE 0x7FFCE03000ull
+#define TPC_PROCESS_MON_MAX_OFFSET 0x4
+#define TPC_PROCESS_MON_SECTION 0x1000
+#define mmTPC0_RD_REGULATOR_BASE 0x7FFCE04000ull
+#define TPC0_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC0_RD_REGULATOR_SECTION 0x1000
+#define mmTPC0_WR_REGULATOR_BASE 0x7FFCE05000ull
+#define TPC0_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC0_WR_REGULATOR_SECTION 0x1000
+#define mmTPC0_CFG_BASE 0x7FFCE06000ull
+#define TPC0_CFG_MAX_OFFSET 0xE30
+#define TPC0_CFG_SECTION 0x2000
+#define mmTPC0_QM_BASE 0x7FFCE08000ull
+#define TPC0_QM_MAX_OFFSET 0x310
+#define TPC0_QM_SECTION 0x1000
+#define mmTPC0_CMDQ_BASE 0x7FFCE09000ull
+#define TPC0_CMDQ_MAX_OFFSET 0x310
+#define TPC0_CMDQ_SECTION 0x37000
+#define mmTPC1_RTR_BASE 0x7FFCE40000ull
+#define TPC1_RTR_MAX_OFFSET 0x608
+#define TPC1_RTR_SECTION 0x4000
+#define mmTPC1_WR_REGULATOR_BASE 0x7FFCE44000ull
+#define TPC1_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC1_WR_REGULATOR_SECTION 0x1000
+#define mmTPC1_RD_REGULATOR_BASE 0x7FFCE45000ull
+#define TPC1_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC1_RD_REGULATOR_SECTION 0x1000
+#define mmTPC1_CFG_BASE 0x7FFCE46000ull
+#define TPC1_CFG_MAX_OFFSET 0xE30
+#define TPC1_CFG_SECTION 0x2000
+#define mmTPC1_QM_BASE 0x7FFCE48000ull
+#define TPC1_QM_MAX_OFFSET 0x310
+#define TPC1_QM_SECTION 0x1000
+#define mmTPC1_CMDQ_BASE 0x7FFCE49000ull
+#define TPC1_CMDQ_MAX_OFFSET 0x310
+#define TPC1_CMDQ_SECTION 0x37000
+#define mmTPC2_RTR_BASE 0x7FFCE80000ull
+#define TPC2_RTR_MAX_OFFSET 0x608
+#define TPC2_RTR_SECTION 0x4000
+#define mmTPC2_RD_REGULATOR_BASE 0x7FFCE84000ull
+#define TPC2_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC2_RD_REGULATOR_SECTION 0x1000
+#define mmTPC2_WR_REGULATOR_BASE 0x7FFCE85000ull
+#define TPC2_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC2_WR_REGULATOR_SECTION 0x1000
+#define mmTPC2_CFG_BASE 0x7FFCE86000ull
+#define TPC2_CFG_MAX_OFFSET 0xE30
+#define TPC2_CFG_SECTION 0x2000
+#define mmTPC2_QM_BASE 0x7FFCE88000ull
+#define TPC2_QM_MAX_OFFSET 0x310
+#define TPC2_QM_SECTION 0x1000
+#define mmTPC2_CMDQ_BASE 0x7FFCE89000ull
+#define TPC2_CMDQ_MAX_OFFSET 0x310
+#define TPC2_CMDQ_SECTION 0x37000
+#define mmTPC3_RTR_BASE 0x7FFCEC0000ull
+#define TPC3_RTR_MAX_OFFSET 0x608
+#define TPC3_RTR_SECTION 0x4000
+#define mmTPC3_RD_REGULATOR_BASE 0x7FFCEC4000ull
+#define TPC3_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC3_RD_REGULATOR_SECTION 0x1000
+#define mmTPC3_WR_REGULATOR_BASE 0x7FFCEC5000ull
+#define TPC3_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC3_WR_REGULATOR_SECTION 0x1000
+#define mmTPC3_CFG_BASE 0x7FFCEC6000ull
+#define TPC3_CFG_MAX_OFFSET 0xE30
+#define TPC3_CFG_SECTION 0x2000
+#define mmTPC3_QM_BASE 0x7FFCEC8000ull
+#define TPC3_QM_MAX_OFFSET 0x310
+#define TPC3_QM_SECTION 0x1000
+#define mmTPC3_CMDQ_BASE 0x7FFCEC9000ull
+#define TPC3_CMDQ_MAX_OFFSET 0x310
+#define TPC3_CMDQ_SECTION 0x37000
+#define mmTPC4_RTR_BASE 0x7FFCF00000ull
+#define TPC4_RTR_MAX_OFFSET 0x608
+#define TPC4_RTR_SECTION 0x4000
+#define mmTPC4_RD_REGULATOR_BASE 0x7FFCF04000ull
+#define TPC4_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC4_RD_REGULATOR_SECTION 0x1000
+#define mmTPC4_WR_REGULATOR_BASE 0x7FFCF05000ull
+#define TPC4_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC4_WR_REGULATOR_SECTION 0x1000
+#define mmTPC4_CFG_BASE 0x7FFCF06000ull
+#define TPC4_CFG_MAX_OFFSET 0xE30
+#define TPC4_CFG_SECTION 0x2000
+#define mmTPC4_QM_BASE 0x7FFCF08000ull
+#define TPC4_QM_MAX_OFFSET 0x310
+#define TPC4_QM_SECTION 0x1000
+#define mmTPC4_CMDQ_BASE 0x7FFCF09000ull
+#define TPC4_CMDQ_MAX_OFFSET 0x310
+#define TPC4_CMDQ_SECTION 0x37000
+#define mmTPC5_RTR_BASE 0x7FFCF40000ull
+#define TPC5_RTR_MAX_OFFSET 0x608
+#define TPC5_RTR_SECTION 0x4000
+#define mmTPC5_RD_REGULATOR_BASE 0x7FFCF44000ull
+#define TPC5_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC5_RD_REGULATOR_SECTION 0x1000
+#define mmTPC5_WR_REGULATOR_BASE 0x7FFCF45000ull
+#define TPC5_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC5_WR_REGULATOR_SECTION 0x1000
+#define mmTPC5_CFG_BASE 0x7FFCF46000ull
+#define TPC5_CFG_MAX_OFFSET 0xE30
+#define TPC5_CFG_SECTION 0x2000
+#define mmTPC5_QM_BASE 0x7FFCF48000ull
+#define TPC5_QM_MAX_OFFSET 0x310
+#define TPC5_QM_SECTION 0x1000
+#define mmTPC5_CMDQ_BASE 0x7FFCF49000ull
+#define TPC5_CMDQ_MAX_OFFSET 0x310
+#define TPC5_CMDQ_SECTION 0x37000
+#define mmTPC6_RTR_BASE 0x7FFCF80000ull
+#define TPC6_RTR_MAX_OFFSET 0x608
+#define TPC6_RTR_SECTION 0x4000
+#define mmTPC6_RD_REGULATOR_BASE 0x7FFCF84000ull
+#define TPC6_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC6_RD_REGULATOR_SECTION 0x1000
+#define mmTPC6_WR_REGULATOR_BASE 0x7FFCF85000ull
+#define TPC6_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC6_WR_REGULATOR_SECTION 0x1000
+#define mmTPC6_CFG_BASE 0x7FFCF86000ull
+#define TPC6_CFG_MAX_OFFSET 0xE30
+#define TPC6_CFG_SECTION 0x2000
+#define mmTPC6_QM_BASE 0x7FFCF88000ull
+#define TPC6_QM_MAX_OFFSET 0x310
+#define TPC6_QM_SECTION 0x1000
+#define mmTPC6_CMDQ_BASE 0x7FFCF89000ull
+#define TPC6_CMDQ_MAX_OFFSET 0x310
+#define TPC6_CMDQ_SECTION 0x37000
+#define mmTPC7_NRTR_BASE 0x7FFCFC0000ull
+#define TPC7_NRTR_MAX_OFFSET 0x608
+#define TPC7_NRTR_SECTION 0x4000
+#define mmTPC7_RD_REGULATOR_BASE 0x7FFCFC4000ull
+#define TPC7_RD_REGULATOR_MAX_OFFSET 0x74
+#define TPC7_RD_REGULATOR_SECTION 0x1000
+#define mmTPC7_WR_REGULATOR_BASE 0x7FFCFC5000ull
+#define TPC7_WR_REGULATOR_MAX_OFFSET 0x74
+#define TPC7_WR_REGULATOR_SECTION 0x1000
+#define mmTPC7_CFG_BASE 0x7FFCFC6000ull
+#define TPC7_CFG_MAX_OFFSET 0xE30
+#define TPC7_CFG_SECTION 0x2000
+#define mmTPC7_QM_BASE 0x7FFCFC8000ull
+#define TPC7_QM_MAX_OFFSET 0x310
+#define TPC7_QM_SECTION 0x1000
+#define mmTPC7_CMDQ_BASE 0x7FFCFC9000ull
+#define TPC7_CMDQ_MAX_OFFSET 0x310
+#define TPC7_CMDQ_SECTION 0x1037000
+#define mmMME_TOP_TABLE_BASE 0x7FFE000000ull
+#define MME_TOP_TABLE_MAX_OFFSET 0x1000
+#define MME_TOP_TABLE_SECTION 0x1000
+#define mmMME0_RTR_FUNNEL_BASE 0x7FFE001000ull
+#define MME0_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME0_RTR_FUNNEL_SECTION 0x40000
+#define mmMME1_RTR_FUNNEL_BASE 0x7FFE041000ull
+#define MME1_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME1_RTR_FUNNEL_SECTION 0x1000
+#define mmMME1_SBA_STM_BASE 0x7FFE042000ull
+#define MME1_SBA_STM_MAX_OFFSET 0x1000
+#define MME1_SBA_STM_SECTION 0x1000
+#define mmMME1_SBA_CTI_BASE 0x7FFE043000ull
+#define MME1_SBA_CTI_MAX_OFFSET 0x1000
+#define MME1_SBA_CTI_SECTION 0x1000
+#define mmMME1_SBA_ETF_BASE 0x7FFE044000ull
+#define MME1_SBA_ETF_MAX_OFFSET 0x1000
+#define MME1_SBA_ETF_SECTION 0x1000
+#define mmMME1_SBA_SPMU_BASE 0x7FFE045000ull
+#define MME1_SBA_SPMU_MAX_OFFSET 0x1000
+#define MME1_SBA_SPMU_SECTION 0x1000
+#define mmMME1_SBA_CTI0_BASE 0x7FFE046000ull
+#define MME1_SBA_CTI0_MAX_OFFSET 0x1000
+#define MME1_SBA_CTI0_SECTION 0x1000
+#define mmMME1_SBA_CTI1_BASE 0x7FFE047000ull
+#define MME1_SBA_CTI1_MAX_OFFSET 0x1000
+#define MME1_SBA_CTI1_SECTION 0x1000
+#define mmMME1_SBA_BMON0_BASE 0x7FFE048000ull
+#define MME1_SBA_BMON0_MAX_OFFSET 0x1000
+#define MME1_SBA_BMON0_SECTION 0x1000
+#define mmMME1_SBA_BMON1_BASE 0x7FFE049000ull
+#define MME1_SBA_BMON1_MAX_OFFSET 0x1000
+#define MME1_SBA_BMON1_SECTION 0x38000
+#define mmMME2_RTR_FUNNEL_BASE 0x7FFE081000ull
+#define MME2_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME2_RTR_FUNNEL_SECTION 0x40000
+#define mmMME3_RTR_FUNNEL_BASE 0x7FFE0C1000ull
+#define MME3_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME3_RTR_FUNNEL_SECTION 0x1000
+#define mmMME3_SBB_STM_BASE 0x7FFE0C2000ull
+#define MME3_SBB_STM_MAX_OFFSET 0x1000
+#define MME3_SBB_STM_SECTION 0x1000
+#define mmMME3_SBB_CTI_BASE 0x7FFE0C3000ull
+#define MME3_SBB_CTI_MAX_OFFSET 0x1000
+#define MME3_SBB_CTI_SECTION 0x1000
+#define mmMME3_SBB_ETF_BASE 0x7FFE0C4000ull
+#define MME3_SBB_ETF_MAX_OFFSET 0x1000
+#define MME3_SBB_ETF_SECTION 0x1000
+#define mmMME3_SBB_SPMU_BASE 0x7FFE0C5000ull
+#define MME3_SBB_SPMU_MAX_OFFSET 0x1000
+#define MME3_SBB_SPMU_SECTION 0x1000
+#define mmMME3_SBB_CTI0_BASE 0x7FFE0C6000ull
+#define MME3_SBB_CTI0_MAX_OFFSET 0x1000
+#define MME3_SBB_CTI0_SECTION 0x1000
+#define mmMME3_SBB_CTI1_BASE 0x7FFE0C7000ull
+#define MME3_SBB_CTI1_MAX_OFFSET 0x1000
+#define MME3_SBB_CTI1_SECTION 0x1000
+#define mmMME3_SBB_BMON0_BASE 0x7FFE0C8000ull
+#define MME3_SBB_BMON0_MAX_OFFSET 0x1000
+#define MME3_SBB_BMON0_SECTION 0x1000
+#define mmMME3_SBB_BMON1_BASE 0x7FFE0C9000ull
+#define MME3_SBB_BMON1_MAX_OFFSET 0x1000
+#define MME3_SBB_BMON1_SECTION 0x38000
+#define mmMME4_RTR_FUNNEL_BASE 0x7FFE101000ull
+#define MME4_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME4_RTR_FUNNEL_SECTION 0x1000
+#define mmMME4_WACS_STM_BASE 0x7FFE102000ull
+#define MME4_WACS_STM_MAX_OFFSET 0x1000
+#define MME4_WACS_STM_SECTION 0x1000
+#define mmMME4_WACS_CTI_BASE 0x7FFE103000ull
+#define MME4_WACS_CTI_MAX_OFFSET 0x1000
+#define MME4_WACS_CTI_SECTION 0x1000
+#define mmMME4_WACS_ETF_BASE 0x7FFE104000ull
+#define MME4_WACS_ETF_MAX_OFFSET 0x1000
+#define MME4_WACS_ETF_SECTION 0x1000
+#define mmMME4_WACS_SPMU_BASE 0x7FFE105000ull
+#define MME4_WACS_SPMU_MAX_OFFSET 0x1000
+#define MME4_WACS_SPMU_SECTION 0x1000
+#define mmMME4_WACS_CTI0_BASE 0x7FFE106000ull
+#define MME4_WACS_CTI0_MAX_OFFSET 0x1000
+#define MME4_WACS_CTI0_SECTION 0x1000
+#define mmMME4_WACS_CTI1_BASE 0x7FFE107000ull
+#define MME4_WACS_CTI1_MAX_OFFSET 0x1000
+#define MME4_WACS_CTI1_SECTION 0x1000
+#define mmMME4_WACS_BMON0_BASE 0x7FFE108000ull
+#define MME4_WACS_BMON0_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON0_SECTION 0x1000
+#define mmMME4_WACS_BMON1_BASE 0x7FFE109000ull
+#define MME4_WACS_BMON1_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON1_SECTION 0x1000
+#define mmMME4_WACS_BMON2_BASE 0x7FFE10A000ull
+#define MME4_WACS_BMON2_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON2_SECTION 0x1000
+#define mmMME4_WACS_BMON3_BASE 0x7FFE10B000ull
+#define MME4_WACS_BMON3_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON3_SECTION 0x1000
+#define mmMME4_WACS_BMON4_BASE 0x7FFE10C000ull
+#define MME4_WACS_BMON4_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON4_SECTION 0x1000
+#define mmMME4_WACS_BMON5_BASE 0x7FFE10D000ull
+#define MME4_WACS_BMON5_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON5_SECTION 0x1000
+#define mmMME4_WACS_BMON6_BASE 0x7FFE10E000ull
+#define MME4_WACS_BMON6_MAX_OFFSET 0x1000
+#define MME4_WACS_BMON6_SECTION 0x4000
+#define mmMME4_WACS2_STM_BASE 0x7FFE112000ull
+#define MME4_WACS2_STM_MAX_OFFSET 0x1000
+#define MME4_WACS2_STM_SECTION 0x1000
+#define mmMME4_WACS2_CTI_BASE 0x7FFE113000ull
+#define MME4_WACS2_CTI_MAX_OFFSET 0x1000
+#define MME4_WACS2_CTI_SECTION 0x1000
+#define mmMME4_WACS2_ETF_BASE 0x7FFE114000ull
+#define MME4_WACS2_ETF_MAX_OFFSET 0x1000
+#define MME4_WACS2_ETF_SECTION 0x1000
+#define mmMME4_WACS2_SPMU_BASE 0x7FFE115000ull
+#define MME4_WACS2_SPMU_MAX_OFFSET 0x1000
+#define MME4_WACS2_SPMU_SECTION 0x1000
+#define mmMME4_WACS2_CTI0_BASE 0x7FFE116000ull
+#define MME4_WACS2_CTI0_MAX_OFFSET 0x1000
+#define MME4_WACS2_CTI0_SECTION 0x1000
+#define mmMME4_WACS2_CTI1_BASE 0x7FFE117000ull
+#define MME4_WACS2_CTI1_MAX_OFFSET 0x1000
+#define MME4_WACS2_CTI1_SECTION 0x1000
+#define mmMME4_WACS2_BMON0_BASE 0x7FFE118000ull
+#define MME4_WACS2_BMON0_MAX_OFFSET 0x1000
+#define MME4_WACS2_BMON0_SECTION 0x1000
+#define mmMME4_WACS2_BMON1_BASE 0x7FFE119000ull
+#define MME4_WACS2_BMON1_MAX_OFFSET 0x1000
+#define MME4_WACS2_BMON1_SECTION 0x1000
+#define mmMME4_WACS2_BMON2_BASE 0x7FFE11A000ull
+#define MME4_WACS2_BMON2_MAX_OFFSET 0x1000
+#define MME4_WACS2_BMON2_SECTION 0x27000
+#define mmMME5_RTR_FUNNEL_BASE 0x7FFE141000ull
+#define MME5_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define MME5_RTR_FUNNEL_SECTION 0x2BF000
+#define mmDMA_ROM_TABLE_BASE 0x7FFE400000ull
+#define DMA_ROM_TABLE_MAX_OFFSET 0x1000
+#define DMA_ROM_TABLE_SECTION 0x1000
+#define mmDMA_CH_0_CS_STM_BASE 0x7FFE401000ull
+#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_STM_SECTION 0x1000
+#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE402000ull
+#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE403000ull
+#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE404000ull
+#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE405000ull
+#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE406000ull
+#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_0_BMON_0_BASE 0x7FFE407000ull
+#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_0_SECTION 0x1000
+#define mmDMA_CH_0_BMON_1_BASE 0x7FFE408000ull
+#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_1_SECTION 0x9000
+#define mmDMA_CH_1_CS_STM_BASE 0x7FFE411000ull
+#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_STM_SECTION 0x1000
+#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE412000ull
+#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE413000ull
+#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE414000ull
+#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE415000ull
+#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE416000ull
+#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_1_BMON_0_BASE 0x7FFE417000ull
+#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_0_SECTION 0x1000
+#define mmDMA_CH_1_BMON_1_BASE 0x7FFE418000ull
+#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_1_SECTION 0x9000
+#define mmDMA_CH_2_CS_STM_BASE 0x7FFE421000ull
+#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_STM_SECTION 0x1000
+#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE422000ull
+#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE423000ull
+#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE424000ull
+#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE425000ull
+#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE426000ull
+#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_2_BMON_0_BASE 0x7FFE427000ull
+#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_0_SECTION 0x1000
+#define mmDMA_CH_2_BMON_1_BASE 0x7FFE428000ull
+#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_1_SECTION 0x9000
+#define mmDMA_CH_3_CS_STM_BASE 0x7FFE431000ull
+#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_STM_SECTION 0x1000
+#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE432000ull
+#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE433000ull
+#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE434000ull
+#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE435000ull
+#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE436000ull
+#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_3_BMON_0_BASE 0x7FFE437000ull
+#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_0_SECTION 0x1000
+#define mmDMA_CH_3_BMON_1_BASE 0x7FFE438000ull
+#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_1_SECTION 0x9000
+#define mmDMA_CH_4_CS_STM_BASE 0x7FFE441000ull
+#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_STM_SECTION 0x1000
+#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE442000ull
+#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE443000ull
+#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE444000ull
+#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE445000ull
+#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE446000ull
+#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_4_BMON_0_BASE 0x7FFE447000ull
+#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_0_SECTION 0x1000
+#define mmDMA_CH_4_BMON_1_BASE 0x7FFE448000ull
+#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_1_SECTION 0x8000
+#define mmDMA_CH_FUNNEL_6_1_BASE 0x7FFE450000ull
+#define DMA_CH_FUNNEL_6_1_MAX_OFFSET 0x1000
+#define DMA_CH_FUNNEL_6_1_SECTION 0x11000
+#define mmDMA_MACRO_CS_STM_BASE 0x7FFE461000ull
+#define DMA_MACRO_CS_STM_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_STM_SECTION 0x1000
+#define mmDMA_MACRO_CS_CTI_BASE 0x7FFE462000ull
+#define DMA_MACRO_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_CTI_SECTION 0x1000
+#define mmDMA_MACRO_CS_ETF_BASE 0x7FFE463000ull
+#define DMA_MACRO_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_ETF_SECTION 0x1000
+#define mmDMA_MACRO_CS_SPMU_BASE 0x7FFE464000ull
+#define DMA_MACRO_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_MACRO_CS_SPMU_SECTION 0x1000
+#define mmDMA_MACRO_BMON_CTI_BASE 0x7FFE465000ull
+#define DMA_MACRO_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_CTI_SECTION 0x1000
+#define mmDMA_MACRO_USER_CTI_BASE 0x7FFE466000ull
+#define DMA_MACRO_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_MACRO_USER_CTI_SECTION 0x1000
+#define mmDMA_MACRO_BMON_0_BASE 0x7FFE467000ull
+#define DMA_MACRO_BMON_0_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_0_SECTION 0x1000
+#define mmDMA_MACRO_BMON_1_BASE 0x7FFE468000ull
+#define DMA_MACRO_BMON_1_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_1_SECTION 0x1000
+#define mmDMA_MACRO_BMON_2_BASE 0x7FFE469000ull
+#define DMA_MACRO_BMON_2_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_2_SECTION 0x1000
+#define mmDMA_MACRO_BMON_3_BASE 0x7FFE46A000ull
+#define DMA_MACRO_BMON_3_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_3_SECTION 0x1000
+#define mmDMA_MACRO_BMON_4_BASE 0x7FFE46B000ull
+#define DMA_MACRO_BMON_4_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_4_SECTION 0x1000
+#define mmDMA_MACRO_BMON_5_BASE 0x7FFE46C000ull
+#define DMA_MACRO_BMON_5_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_5_SECTION 0x1000
+#define mmDMA_MACRO_BMON_6_BASE 0x7FFE46D000ull
+#define DMA_MACRO_BMON_6_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_6_SECTION 0x1000
+#define mmDMA_MACRO_BMON_7_BASE 0x7FFE46E000ull
+#define DMA_MACRO_BMON_7_MAX_OFFSET 0x1000
+#define DMA_MACRO_BMON_7_SECTION 0x2000
+#define mmDMA_MACRO_FUNNEL_3_1_BASE 0x7FFE470000ull
+#define DMA_MACRO_FUNNEL_3_1_MAX_OFFSET 0x1000
+#define DMA_MACRO_FUNNEL_3_1_SECTION 0x10000
+#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull
+#define CPU_ROM_TABLE_MAX_OFFSET 0x1000
+#define CPU_ROM_TABLE_SECTION 0x1000
+#define mmCPU_ETF_0_BASE 0x7FFE481000ull
+#define CPU_ETF_0_MAX_OFFSET 0x1000
+#define CPU_ETF_0_SECTION 0x1000
+#define mmCPU_ETF_1_BASE 0x7FFE482000ull
+#define CPU_ETF_1_MAX_OFFSET 0x1000
+#define CPU_ETF_1_SECTION 0x2000
+#define mmCPU_CTI_BASE 0x7FFE484000ull
+#define CPU_CTI_MAX_OFFSET 0x1000
+#define CPU_CTI_SECTION 0x1000
+#define mmCPU_FUNNEL_BASE 0x7FFE485000ull
+#define CPU_FUNNEL_MAX_OFFSET 0x1000
+#define CPU_FUNNEL_SECTION 0x1000
+#define mmCPU_STM_BASE 0x7FFE486000ull
+#define CPU_STM_MAX_OFFSET 0x1000
+#define CPU_STM_SECTION 0x1000
+#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull
+#define CPU_CTI_TRACE_MAX_OFFSET 0x1000
+#define CPU_CTI_TRACE_SECTION 0x1000
+#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull
+#define CPU_ETF_TRACE_MAX_OFFSET 0x1000
+#define CPU_ETF_TRACE_SECTION 0x1000
+#define mmCPU_WR_BMON_BASE 0x7FFE489000ull
+#define CPU_WR_BMON_MAX_OFFSET 0x1000
+#define CPU_WR_BMON_SECTION 0x1000
+#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull
+#define CPU_RD_BMON_MAX_OFFSET 0x1000
+#define CPU_RD_BMON_SECTION 0x37000
+#define mmMMU_CS_STM_BASE 0x7FFE4C1000ull
+#define MMU_CS_STM_MAX_OFFSET 0x1000
+#define MMU_CS_STM_SECTION 0x1000
+#define mmMMU_CS_CTI_BASE 0x7FFE4C2000ull
+#define MMU_CS_CTI_MAX_OFFSET 0x1000
+#define MMU_CS_CTI_SECTION 0x1000
+#define mmMMU_CS_ETF_BASE 0x7FFE4C3000ull
+#define MMU_CS_ETF_MAX_OFFSET 0x1000
+#define MMU_CS_ETF_SECTION 0x1000
+#define mmMMU_CS_SPMU_BASE 0x7FFE4C4000ull
+#define MMU_CS_SPMU_MAX_OFFSET 0x1000
+#define MMU_CS_SPMU_SECTION 0x1000
+#define mmMMU_BMON_CTI_BASE 0x7FFE4C5000ull
+#define MMU_BMON_CTI_MAX_OFFSET 0x1000
+#define MMU_BMON_CTI_SECTION 0x1000
+#define mmMMU_USER_CTI_BASE 0x7FFE4C6000ull
+#define MMU_USER_CTI_MAX_OFFSET 0x1000
+#define MMU_USER_CTI_SECTION 0x1000
+#define mmMMU_BMON_0_BASE 0x7FFE4C7000ull
+#define MMU_BMON_0_MAX_OFFSET 0x1000
+#define MMU_BMON_0_SECTION 0x1000
+#define mmMMU_BMON_1_BASE 0x7FFE4C8000ull
+#define MMU_BMON_1_MAX_OFFSET 0x1000
+#define MMU_BMON_1_SECTION 0x338000
+#define mmCA53_BASE 0x7FFE800000ull
+#define CA53_MAX_OFFSET 0x1000
+#define CA53_SECTION 0x400000
+#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull
+#define PCI_ROM_TABLE_MAX_OFFSET 0x1000
+#define PCI_ROM_TABLE_SECTION 0x1000
+#define mmPCIE_STM_BASE 0x7FFEC01000ull
+#define PCIE_STM_MAX_OFFSET 0x1000
+#define PCIE_STM_SECTION 0x1000
+#define mmPCIE_ETF_BASE 0x7FFEC02000ull
+#define PCIE_ETF_MAX_OFFSET 0x1000
+#define PCIE_ETF_SECTION 0x1000
+#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull
+#define PCIE_CTI_0_MAX_OFFSET 0x1000
+#define PCIE_CTI_0_SECTION 0x1000
+#define mmPCIE_SPMU_BASE 0x7FFEC04000ull
+#define PCIE_SPMU_MAX_OFFSET 0x1000
+#define PCIE_SPMU_SECTION 0x1000
+#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull
+#define PCIE_CTI_1_MAX_OFFSET 0x1000
+#define PCIE_CTI_1_SECTION 0x1000
+#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull
+#define PCIE_FUNNEL_MAX_OFFSET 0x1000
+#define PCIE_FUNNEL_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull
+#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_WR_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull
+#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_RD_SECTION 0x1000
+#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull
+#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_WR_SECTION 0x1000
+#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull
+#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_RD_SECTION 0x36000
+#define mmPSOC_CTI_BASE 0x7FFEC40000ull
+#define PSOC_CTI_MAX_OFFSET 0x1000
+#define PSOC_CTI_SECTION 0x1000
+#define mmPSOC_STM_BASE 0x7FFEC41000ull
+#define PSOC_STM_MAX_OFFSET 0x1000
+#define PSOC_STM_SECTION 0x1000
+#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull
+#define PSOC_FUNNEL_MAX_OFFSET 0x1000
+#define PSOC_FUNNEL_SECTION 0x1000
+#define mmPSOC_ETR_BASE 0x7FFEC43000ull
+#define PSOC_ETR_MAX_OFFSET 0x1000
+#define PSOC_ETR_SECTION 0x1000
+#define mmPSOC_ETF_BASE 0x7FFEC44000ull
+#define PSOC_ETF_MAX_OFFSET 0x1000
+#define PSOC_ETF_SECTION 0x1000
+#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull
+#define PSOC_TS_CTI_MAX_OFFSET 0x1000
+#define PSOC_TS_CTI_SECTION 0xB000
+#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull
+#define TOP_ROM_TABLE_MAX_OFFSET 0x1000
+#define TOP_ROM_TABLE_SECTION 0x1F0000
+#define mmTPC1_RTR_FUNNEL_BASE 0x7FFEE40000ull
+#define TPC1_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC1_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC2_RTR_FUNNEL_BASE 0x7FFEE80000ull
+#define TPC2_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC2_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC3_RTR_FUNNEL_BASE 0x7FFEEC0000ull
+#define TPC3_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC3_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC4_RTR_FUNNEL_BASE 0x7FFEF00000ull
+#define TPC4_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC4_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC5_RTR_FUNNEL_BASE 0x7FFEF40000ull
+#define TPC5_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC5_RTR_FUNNEL_SECTION 0x40000
+#define mmTPC6_RTR_FUNNEL_BASE 0x7FFEF80000ull
+#define TPC6_RTR_FUNNEL_MAX_OFFSET 0x1000
+#define TPC6_RTR_FUNNEL_SECTION 0x81000
+#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull
+#define TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC0_EML_SPMU_SECTION 0x1000
+#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull
+#define TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define TPC0_EML_ETF_SECTION 0x1000
+#define mmTPC0_EML_STM_BASE 0x7FFF003000ull
+#define TPC0_EML_STM_MAX_OFFSET 0x1000
+#define TPC0_EML_STM_SECTION 0x1000
+#define mmTPC0_EML_ETM_R4_BASE 0x7FFF004000ull
+#define TPC0_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC0_EML_ETM_R4_SECTION 0x1000
+#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull
+#define TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define TPC0_EML_CTI_SECTION 0x1000
+#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull
+#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull
+#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull
+#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull
+#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull
+#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull
+#define TPC0_EML_CFG_MAX_OFFSET 0x338
+#define TPC0_EML_CFG_SECTION 0x1BF000
+#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull
+#define TPC0_EML_CS_MAX_OFFSET 0x1000
+#define TPC0_EML_CS_SECTION 0x2000
+#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull
+#define TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC1_EML_SPMU_SECTION 0x1000
+#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull
+#define TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define TPC1_EML_ETF_SECTION 0x1000
+#define mmTPC1_EML_STM_BASE 0x7FFF203000ull
+#define TPC1_EML_STM_MAX_OFFSET 0x1000
+#define TPC1_EML_STM_SECTION 0x1000
+#define mmTPC1_EML_ETM_R4_BASE 0x7FFF204000ull
+#define TPC1_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC1_EML_ETM_R4_SECTION 0x1000
+#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull
+#define TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define TPC1_EML_CTI_SECTION 0x1000
+#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull
+#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull
+#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull
+#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull
+#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull
+#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull
+#define TPC1_EML_CFG_MAX_OFFSET 0x338
+#define TPC1_EML_CFG_SECTION 0x1BF000
+#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull
+#define TPC1_EML_CS_MAX_OFFSET 0x1000
+#define TPC1_EML_CS_SECTION 0x2000
+#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull
+#define TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC2_EML_SPMU_SECTION 0x1000
+#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull
+#define TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define TPC2_EML_ETF_SECTION 0x1000
+#define mmTPC2_EML_STM_BASE 0x7FFF403000ull
+#define TPC2_EML_STM_MAX_OFFSET 0x1000
+#define TPC2_EML_STM_SECTION 0x1000
+#define mmTPC2_EML_ETM_R4_BASE 0x7FFF404000ull
+#define TPC2_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC2_EML_ETM_R4_SECTION 0x1000
+#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull
+#define TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define TPC2_EML_CTI_SECTION 0x1000
+#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull
+#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull
+#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull
+#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull
+#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull
+#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull
+#define TPC2_EML_CFG_MAX_OFFSET 0x338
+#define TPC2_EML_CFG_SECTION 0x1BF000
+#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull
+#define TPC2_EML_CS_MAX_OFFSET 0x1000
+#define TPC2_EML_CS_SECTION 0x2000
+#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull
+#define TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC3_EML_SPMU_SECTION 0x1000
+#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull
+#define TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define TPC3_EML_ETF_SECTION 0x1000
+#define mmTPC3_EML_STM_BASE 0x7FFF603000ull
+#define TPC3_EML_STM_MAX_OFFSET 0x1000
+#define TPC3_EML_STM_SECTION 0x1000
+#define mmTPC3_EML_ETM_R4_BASE 0x7FFF604000ull
+#define TPC3_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC3_EML_ETM_R4_SECTION 0x1000
+#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull
+#define TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define TPC3_EML_CTI_SECTION 0x1000
+#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull
+#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull
+#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull
+#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull
+#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull
+#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull
+#define TPC3_EML_CFG_MAX_OFFSET 0x338
+#define TPC3_EML_CFG_SECTION 0x1BF000
+#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull
+#define TPC3_EML_CS_MAX_OFFSET 0x1000
+#define TPC3_EML_CS_SECTION 0x2000
+#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull
+#define TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC4_EML_SPMU_SECTION 0x1000
+#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull
+#define TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define TPC4_EML_ETF_SECTION 0x1000
+#define mmTPC4_EML_STM_BASE 0x7FFF803000ull
+#define TPC4_EML_STM_MAX_OFFSET 0x1000
+#define TPC4_EML_STM_SECTION 0x1000
+#define mmTPC4_EML_ETM_R4_BASE 0x7FFF804000ull
+#define TPC4_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC4_EML_ETM_R4_SECTION 0x1000
+#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull
+#define TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define TPC4_EML_CTI_SECTION 0x1000
+#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull
+#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull
+#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull
+#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull
+#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull
+#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull
+#define TPC4_EML_CFG_MAX_OFFSET 0x338
+#define TPC4_EML_CFG_SECTION 0x1BF000
+#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull
+#define TPC4_EML_CS_MAX_OFFSET 0x1000
+#define TPC4_EML_CS_SECTION 0x2000
+#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull
+#define TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC5_EML_SPMU_SECTION 0x1000
+#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull
+#define TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define TPC5_EML_ETF_SECTION 0x1000
+#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull
+#define TPC5_EML_STM_MAX_OFFSET 0x1000
+#define TPC5_EML_STM_SECTION 0x1000
+#define mmTPC5_EML_ETM_R4_BASE 0x7FFFA04000ull
+#define TPC5_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC5_EML_ETM_R4_SECTION 0x1000
+#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull
+#define TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define TPC5_EML_CTI_SECTION 0x1000
+#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull
+#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull
+#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull
+#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull
+#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull
+#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull
+#define TPC5_EML_CFG_MAX_OFFSET 0x338
+#define TPC5_EML_CFG_SECTION 0x1BF000
+#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull
+#define TPC5_EML_CS_MAX_OFFSET 0x1000
+#define TPC5_EML_CS_SECTION 0x2000
+#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull
+#define TPC6_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC6_EML_SPMU_SECTION 0x1000
+#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull
+#define TPC6_EML_ETF_MAX_OFFSET 0x1000
+#define TPC6_EML_ETF_SECTION 0x1000
+#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull
+#define TPC6_EML_STM_MAX_OFFSET 0x1000
+#define TPC6_EML_STM_SECTION 0x1000
+#define mmTPC6_EML_ETM_R4_BASE 0x7FFFC04000ull
+#define TPC6_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC6_EML_ETM_R4_SECTION 0x1000
+#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull
+#define TPC6_EML_CTI_MAX_OFFSET 0x1000
+#define TPC6_EML_CTI_SECTION 0x1000
+#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull
+#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC6_EML_FUNNEL_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull
+#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull
+#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull
+#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull
+#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull
+#define TPC6_EML_CFG_MAX_OFFSET 0x338
+#define TPC6_EML_CFG_SECTION 0x1BF000
+#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull
+#define TPC6_EML_CS_MAX_OFFSET 0x1000
+#define TPC6_EML_CS_SECTION 0x2000
+#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull
+#define TPC7_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC7_EML_SPMU_SECTION 0x1000
+#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull
+#define TPC7_EML_ETF_MAX_OFFSET 0x1000
+#define TPC7_EML_ETF_SECTION 0x1000
+#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull
+#define TPC7_EML_STM_MAX_OFFSET 0x1000
+#define TPC7_EML_STM_SECTION 0x1000
+#define mmTPC7_EML_ETM_R4_BASE 0x7FFFE04000ull
+#define TPC7_EML_ETM_R4_MAX_OFFSET 0x0
+#define TPC7_EML_ETM_R4_SECTION 0x1000
+#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull
+#define TPC7_EML_CTI_MAX_OFFSET 0x1000
+#define TPC7_EML_CTI_SECTION 0x1000
+#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull
+#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC7_EML_FUNNEL_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull
+#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull
+#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull
+#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull
+#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull
+#define TPC7_EML_CFG_MAX_OFFSET 0x338
+#define TPC7_EML_CFG_SECTION 0x1BF000
+#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull
+#define TPC7_EML_CS_MAX_OFFSET 0x1000
+
+#endif /* GOYA_BLOCKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
new file mode 100644
index 000000000000..a161ecfe74de
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ASIC_REG_GOYA_MASKS_H_
+#define ASIC_REG_GOYA_MASKS_H_
+
+#include "goya_regs.h"
+
+/* Useful masks for bits in various registers */
+#define QMAN_DMA_ENABLE (\
+ (1 << DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT))
+
+#define QMAN_DMA_FULLY_TRUSTED (\
+ (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define QMAN_DMA_PARTLY_TRUSTED (\
+ (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define QMAN_DMA_STOP (\
+ (1 << DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT))
+
+#define QMAN_DMA_IS_STOPPED (\
+ (1 << DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT))
+
+#define QMAN_DMA_ERR_MSG_EN (\
+ (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define QMAN_MME_ENABLE (\
+ (1 << MME_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define CMDQ_MME_ENABLE (\
+ (1 << MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_MME_STOP (\
+ (1 << MME_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << MME_QM_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define CMDQ_MME_STOP (\
+ (1 << MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define QMAN_MME_ERR_MSG_EN (\
+ (1 << MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define CMDQ_MME_ERR_MSG_EN (\
+ (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define QMAN_MME_ERR_PROT (\
+ (1 << MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define CMDQ_MME_ERR_PROT (\
+ (1 << MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define QMAN_TPC_ENABLE (\
+ (1 << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define CMDQ_TPC_ENABLE (\
+ (1 << TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_TPC_STOP (\
+ (1 << TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define CMDQ_TPC_STOP (\
+ (1 << TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT))
+
+#define QMAN_TPC_ERR_MSG_EN (\
+ (1 << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define CMDQ_TPC_ERR_MSG_EN (\
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+
+#define QMAN_TPC_ERR_PROT (\
+ (1 << TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+#define CMDQ_TPC_ERR_PROT (\
+ (1 << TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \
+ (1 << TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT))
+
+/* RESETS */
+#define DMA_MME_TPC_RESET (\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT)
+
+#define RESET_ALL (\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT |\
+ PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT |\
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT)
+
+#define CA53_RESET (\
+ (~\
+ (1 << PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT)\
+ ) & 0x7FFFFF)
+
+#define CPU_RESET_ASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+#define CPU_RESET_CORE0_DEASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+/* PCI CONFIGURATION SPACE */
+#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
+#define mmPCI_CONFIG_ELBI_DATA 0xFF4
+#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
+#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
+
+#define mmPCI_CONFIG_ELBI_STS 0xFFC
+#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
+#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
+#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
+ PCI_CONFIG_ELBI_STS_DONE)
+
+#define GOYA_IRQ_HBW_ID_MASK 0x1FFF
+#define GOYA_IRQ_HBW_ID_SHIFT 0
+#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000
+#define GOYA_IRQ_HBW_INTERNAL_ID_SHIFT 13
+#define GOYA_IRQ_HBW_AGENT_ID_MASK 0x1F0000
+#define GOYA_IRQ_HBW_AGENT_ID_SHIFT 16
+#define GOYA_IRQ_HBW_Y_MASK 0xE00000
+#define GOYA_IRQ_HBW_Y_SHIFT 21
+#define GOYA_IRQ_HBW_X_MASK 0x7000000
+#define GOYA_IRQ_HBW_X_SHIFT 24
+#define GOYA_IRQ_LBW_ID_MASK 0xFF
+#define GOYA_IRQ_LBW_ID_SHIFT 0
+#define GOYA_IRQ_LBW_INTERNAL_ID_MASK 0x700
+#define GOYA_IRQ_LBW_INTERNAL_ID_SHIFT 8
+#define GOYA_IRQ_LBW_AGENT_ID_MASK 0xF800
+#define GOYA_IRQ_LBW_AGENT_ID_SHIFT 11
+#define GOYA_IRQ_LBW_Y_MASK 0x70000
+#define GOYA_IRQ_LBW_Y_SHIFT 16
+#define GOYA_IRQ_LBW_X_MASK 0x380000
+#define GOYA_IRQ_LBW_X_SHIFT 19
+
+#define DMA_QM_IDLE_MASK (DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK | \
+ DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK | \
+ DMA_QM_0_GLBL_STS0_CP_IDLE_MASK | \
+ DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK)
+
+#define TPC_QM_IDLE_MASK (TPC0_QM_GLBL_STS0_PQF_IDLE_MASK | \
+ TPC0_QM_GLBL_STS0_CQF_IDLE_MASK | \
+ TPC0_QM_GLBL_STS0_CP_IDLE_MASK)
+
+#define TPC_CMDQ_IDLE_MASK (TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \
+ TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK)
+
+#define TPC_CFG_IDLE_MASK (TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \
+ TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \
+ TPC0_CFG_STATUS_IQ_EMPTY_MASK | \
+ TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK)
+
+#define MME_QM_IDLE_MASK (MME_QM_GLBL_STS0_PQF_IDLE_MASK | \
+ MME_QM_GLBL_STS0_CQF_IDLE_MASK | \
+ MME_QM_GLBL_STS0_CP_IDLE_MASK)
+
+#define MME_CMDQ_IDLE_MASK (MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \
+ MME_CMDQ_GLBL_STS0_CP_IDLE_MASK)
+
+#define MME_ARCH_IDLE_MASK (MME_ARCH_STATUS_SB_A_EMPTY_MASK | \
+ MME_ARCH_STATUS_SB_B_EMPTY_MASK | \
+ MME_ARCH_STATUS_SB_CIN_EMPTY_MASK | \
+ MME_ARCH_STATUS_SB_COUT_EMPTY_MASK)
+
+#define MME_SHADOW_IDLE_MASK (MME_SHADOW_0_STATUS_A_MASK | \
+ MME_SHADOW_0_STATUS_B_MASK | \
+ MME_SHADOW_0_STATUS_CIN_MASK | \
+ MME_SHADOW_0_STATUS_COUT_MASK | \
+ MME_SHADOW_0_STATUS_TE_MASK | \
+ MME_SHADOW_0_STATUS_LD_MASK | \
+ MME_SHADOW_0_STATUS_ST_MASK)
+
+#define TPC1_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC2_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC3_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC4_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC5_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC6_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+#define TPC7_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT
+
+#define DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+
+#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
new file mode 100644
index 000000000000..6cb0b6e54d41
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ASIC_REG_GOYA_REGS_H_
+#define ASIC_REG_GOYA_REGS_H_
+
+#include "goya_blocks.h"
+#include "stlb_regs.h"
+#include "mmu_regs.h"
+#include "pcie_aux_regs.h"
+#include "psoc_global_conf_regs.h"
+#include "psoc_spi_regs.h"
+#include "psoc_mme_pll_regs.h"
+#include "psoc_pci_pll_regs.h"
+#include "psoc_emmc_pll_regs.h"
+#include "cpu_if_regs.h"
+#include "cpu_ca53_cfg_regs.h"
+#include "cpu_pll_regs.h"
+#include "ic_pll_regs.h"
+#include "mc_pll_regs.h"
+#include "tpc_pll_regs.h"
+#include "dma_qm_0_regs.h"
+#include "dma_qm_1_regs.h"
+#include "dma_qm_2_regs.h"
+#include "dma_qm_3_regs.h"
+#include "dma_qm_4_regs.h"
+#include "dma_ch_0_regs.h"
+#include "dma_ch_1_regs.h"
+#include "dma_ch_2_regs.h"
+#include "dma_ch_3_regs.h"
+#include "dma_ch_4_regs.h"
+#include "dma_macro_regs.h"
+#include "dma_nrtr_regs.h"
+#include "pci_nrtr_regs.h"
+#include "sram_y0_x0_rtr_regs.h"
+#include "sram_y0_x1_rtr_regs.h"
+#include "sram_y0_x2_rtr_regs.h"
+#include "sram_y0_x3_rtr_regs.h"
+#include "sram_y0_x4_rtr_regs.h"
+#include "mme_regs.h"
+#include "mme_qm_regs.h"
+#include "mme_cmdq_regs.h"
+#include "mme1_rtr_regs.h"
+#include "mme2_rtr_regs.h"
+#include "mme3_rtr_regs.h"
+#include "mme4_rtr_regs.h"
+#include "mme5_rtr_regs.h"
+#include "mme6_rtr_regs.h"
+#include "tpc0_cfg_regs.h"
+#include "tpc1_cfg_regs.h"
+#include "tpc2_cfg_regs.h"
+#include "tpc3_cfg_regs.h"
+#include "tpc4_cfg_regs.h"
+#include "tpc5_cfg_regs.h"
+#include "tpc6_cfg_regs.h"
+#include "tpc7_cfg_regs.h"
+#include "tpc0_qm_regs.h"
+#include "tpc1_qm_regs.h"
+#include "tpc2_qm_regs.h"
+#include "tpc3_qm_regs.h"
+#include "tpc4_qm_regs.h"
+#include "tpc5_qm_regs.h"
+#include "tpc6_qm_regs.h"
+#include "tpc7_qm_regs.h"
+#include "tpc0_cmdq_regs.h"
+#include "tpc1_cmdq_regs.h"
+#include "tpc2_cmdq_regs.h"
+#include "tpc3_cmdq_regs.h"
+#include "tpc4_cmdq_regs.h"
+#include "tpc5_cmdq_regs.h"
+#include "tpc6_cmdq_regs.h"
+#include "tpc7_cmdq_regs.h"
+#include "tpc0_nrtr_regs.h"
+#include "tpc1_rtr_regs.h"
+#include "tpc2_rtr_regs.h"
+#include "tpc3_rtr_regs.h"
+#include "tpc4_rtr_regs.h"
+#include "tpc5_rtr_regs.h"
+#include "tpc6_rtr_regs.h"
+#include "tpc7_nrtr_regs.h"
+#include "tpc0_eml_cfg_regs.h"
+
+#include "psoc_global_conf_masks.h"
+#include "dma_macro_masks.h"
+#include "dma_qm_0_masks.h"
+#include "tpc0_qm_masks.h"
+#include "tpc0_cmdq_masks.h"
+#include "mme_qm_masks.h"
+#include "mme_cmdq_masks.h"
+#include "tpc0_cfg_masks.h"
+#include "tpc0_eml_cfg_masks.h"
+#include "mme1_rtr_masks.h"
+#include "tpc0_nrtr_masks.h"
+#include "dma_nrtr_masks.h"
+#include "pci_nrtr_masks.h"
+#include "stlb_masks.h"
+#include "cpu_ca53_cfg_masks.h"
+#include "mmu_masks.h"
+#include "mme_masks.h"
+
+#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
+#define mmPCIE_DBI_MSIX_DOORBELL_OFF 0xC02948
+
+#define mmSYNC_MNGR_MON_PAY_ADDRL_0 0x113000
+#define mmSYNC_MNGR_SOB_OBJ_0 0x112000
+#define mmSYNC_MNGR_SOB_OBJ_1000 0x112FA0
+#define mmSYNC_MNGR_SOB_OBJ_1007 0x112FBC
+#define mmSYNC_MNGR_SOB_OBJ_1023 0x112FFC
+#define mmSYNC_MNGR_MON_STATUS_0 0x114000
+#define mmSYNC_MNGR_MON_STATUS_255 0x1143FC
+
+#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040
+
+#endif /* ASIC_REG_GOYA_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
new file mode 100644
index 000000000000..0a743817aad7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_IC_PLL_REGS_H_
+#define ASIC_REG_IC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * IC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmIC_PLL_NR 0x4A3100
+
+#define mmIC_PLL_NF 0x4A3104
+
+#define mmIC_PLL_OD 0x4A3108
+
+#define mmIC_PLL_NB 0x4A310C
+
+#define mmIC_PLL_CFG 0x4A3110
+
+#define mmIC_PLL_LOSE_MASK 0x4A3120
+
+#define mmIC_PLL_LOCK_INTR 0x4A3128
+
+#define mmIC_PLL_LOCK_BYPASS 0x4A312C
+
+#define mmIC_PLL_DATA_CHNG 0x4A3130
+
+#define mmIC_PLL_RST 0x4A3134
+
+#define mmIC_PLL_SLIP_WD_CNTR 0x4A3150
+
+#define mmIC_PLL_DIV_FACTOR_0 0x4A3200
+
+#define mmIC_PLL_DIV_FACTOR_1 0x4A3204
+
+#define mmIC_PLL_DIV_FACTOR_2 0x4A3208
+
+#define mmIC_PLL_DIV_FACTOR_3 0x4A320C
+
+#define mmIC_PLL_DIV_FACTOR_CMD_0 0x4A3220
+
+#define mmIC_PLL_DIV_FACTOR_CMD_1 0x4A3224
+
+#define mmIC_PLL_DIV_FACTOR_CMD_2 0x4A3228
+
+#define mmIC_PLL_DIV_FACTOR_CMD_3 0x4A322C
+
+#define mmIC_PLL_DIV_SEL_0 0x4A3280
+
+#define mmIC_PLL_DIV_SEL_1 0x4A3284
+
+#define mmIC_PLL_DIV_SEL_2 0x4A3288
+
+#define mmIC_PLL_DIV_SEL_3 0x4A328C
+
+#define mmIC_PLL_DIV_EN_0 0x4A32A0
+
+#define mmIC_PLL_DIV_EN_1 0x4A32A4
+
+#define mmIC_PLL_DIV_EN_2 0x4A32A8
+
+#define mmIC_PLL_DIV_EN_3 0x4A32AC
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_0 0x4A32C0
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_1 0x4A32C4
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_2 0x4A32C8
+
+#define mmIC_PLL_DIV_FACTOR_BUSY_3 0x4A32CC
+
+#define mmIC_PLL_CLK_GATER 0x4A3300
+
+#define mmIC_PLL_CLK_RLX_0 0x4A3310
+
+#define mmIC_PLL_CLK_RLX_1 0x4A3314
+
+#define mmIC_PLL_CLK_RLX_2 0x4A3318
+
+#define mmIC_PLL_CLK_RLX_3 0x4A331C
+
+#define mmIC_PLL_REF_CNTR_PERIOD 0x4A3400
+
+#define mmIC_PLL_REF_LOW_THRESHOLD 0x4A3410
+
+#define mmIC_PLL_REF_HIGH_THRESHOLD 0x4A3420
+
+#define mmIC_PLL_PLL_NOT_STABLE 0x4A3430
+
+#define mmIC_PLL_FREQ_CALC_EN 0x4A3440
+
+#endif /* ASIC_REG_IC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
new file mode 100644
index 000000000000..4408188aa067
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MC_PLL_REGS_H_
+#define ASIC_REG_MC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * MC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmMC_PLL_NR 0x4A1100
+
+#define mmMC_PLL_NF 0x4A1104
+
+#define mmMC_PLL_OD 0x4A1108
+
+#define mmMC_PLL_NB 0x4A110C
+
+#define mmMC_PLL_CFG 0x4A1110
+
+#define mmMC_PLL_LOSE_MASK 0x4A1120
+
+#define mmMC_PLL_LOCK_INTR 0x4A1128
+
+#define mmMC_PLL_LOCK_BYPASS 0x4A112C
+
+#define mmMC_PLL_DATA_CHNG 0x4A1130
+
+#define mmMC_PLL_RST 0x4A1134
+
+#define mmMC_PLL_SLIP_WD_CNTR 0x4A1150
+
+#define mmMC_PLL_DIV_FACTOR_0 0x4A1200
+
+#define mmMC_PLL_DIV_FACTOR_1 0x4A1204
+
+#define mmMC_PLL_DIV_FACTOR_2 0x4A1208
+
+#define mmMC_PLL_DIV_FACTOR_3 0x4A120C
+
+#define mmMC_PLL_DIV_FACTOR_CMD_0 0x4A1220
+
+#define mmMC_PLL_DIV_FACTOR_CMD_1 0x4A1224
+
+#define mmMC_PLL_DIV_FACTOR_CMD_2 0x4A1228
+
+#define mmMC_PLL_DIV_FACTOR_CMD_3 0x4A122C
+
+#define mmMC_PLL_DIV_SEL_0 0x4A1280
+
+#define mmMC_PLL_DIV_SEL_1 0x4A1284
+
+#define mmMC_PLL_DIV_SEL_2 0x4A1288
+
+#define mmMC_PLL_DIV_SEL_3 0x4A128C
+
+#define mmMC_PLL_DIV_EN_0 0x4A12A0
+
+#define mmMC_PLL_DIV_EN_1 0x4A12A4
+
+#define mmMC_PLL_DIV_EN_2 0x4A12A8
+
+#define mmMC_PLL_DIV_EN_3 0x4A12AC
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_0 0x4A12C0
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_1 0x4A12C4
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_2 0x4A12C8
+
+#define mmMC_PLL_DIV_FACTOR_BUSY_3 0x4A12CC
+
+#define mmMC_PLL_CLK_GATER 0x4A1300
+
+#define mmMC_PLL_CLK_RLX_0 0x4A1310
+
+#define mmMC_PLL_CLK_RLX_1 0x4A1314
+
+#define mmMC_PLL_CLK_RLX_2 0x4A1318
+
+#define mmMC_PLL_CLK_RLX_3 0x4A131C
+
+#define mmMC_PLL_REF_CNTR_PERIOD 0x4A1400
+
+#define mmMC_PLL_REF_LOW_THRESHOLD 0x4A1410
+
+#define mmMC_PLL_REF_HIGH_THRESHOLD 0x4A1420
+
+#define mmMC_PLL_PLL_NOT_STABLE 0x4A1430
+
+#define mmMC_PLL_FREQ_CALC_EN 0x4A1440
+
+#endif /* ASIC_REG_MC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
new file mode 100644
index 000000000000..687bca5c5fe3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
@@ -0,0 +1,653 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME1_RTR_MASKS_H_
+#define ASIC_REG_MME1_RTR_MASKS_H_
+
+/*
+ *****************************************
+ * MME1_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+/* MME1_RTR_HBW_RD_RQ_E_ARB */
+#define MME1_RTR_HBW_RD_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_W_ARB */
+#define MME1_RTR_HBW_RD_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_N_ARB */
+#define MME1_RTR_HBW_RD_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_S_ARB */
+#define MME1_RTR_HBW_RD_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RQ_L_ARB */
+#define MME1_RTR_HBW_RD_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_RD_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_HBW_E_ARB_MAX */
+#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_W_ARB_MAX */
+#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_N_ARB_MAX */
+#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_S_ARB_MAX */
+#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_L_ARB_MAX */
+#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_HBW_RD_RS_MAX_CREDIT */
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_MASK 0x3F
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_MASK 0x3F00
+
+/* MME1_RTR_HBW_WR_RQ_MAX_CREDIT */
+#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_MASK 0x3F
+
+/* MME1_RTR_HBW_RD_RQ_MAX_CREDIT */
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_SHIFT 0
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_MASK 0x3F
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_SHIFT 8
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_MASK 0x3F00
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_SHIFT 16
+#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_MASK 0x3F0000
+
+/* MME1_RTR_HBW_RD_RS_E_ARB */
+#define MME1_RTR_HBW_RD_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_W_ARB */
+#define MME1_RTR_HBW_RD_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_N_ARB */
+#define MME1_RTR_HBW_RD_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_S_ARB */
+#define MME1_RTR_HBW_RD_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_RD_RS_L_ARB */
+#define MME1_RTR_HBW_RD_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_RD_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_RD_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_RD_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_RD_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_RD_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_RD_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_RD_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_E_ARB */
+#define MME1_RTR_HBW_WR_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_W_ARB */
+#define MME1_RTR_HBW_WR_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_N_ARB */
+#define MME1_RTR_HBW_WR_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_S_ARB */
+#define MME1_RTR_HBW_WR_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RQ_L_ARB */
+#define MME1_RTR_HBW_WR_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_WR_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_E_ARB */
+#define MME1_RTR_HBW_WR_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_W_ARB */
+#define MME1_RTR_HBW_WR_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_N_ARB */
+#define MME1_RTR_HBW_WR_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_S_ARB */
+#define MME1_RTR_HBW_WR_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_HBW_WR_RS_L_ARB */
+#define MME1_RTR_HBW_WR_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_HBW_WR_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_HBW_WR_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_HBW_WR_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_HBW_WR_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_HBW_WR_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_HBW_WR_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_HBW_WR_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_E_ARB */
+#define MME1_RTR_LBW_RD_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_W_ARB */
+#define MME1_RTR_LBW_RD_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_N_ARB */
+#define MME1_RTR_LBW_RD_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_S_ARB */
+#define MME1_RTR_LBW_RD_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RQ_L_ARB */
+#define MME1_RTR_LBW_RD_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_RD_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_E_ARB_MAX */
+#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_W_ARB_MAX */
+#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_N_ARB_MAX */
+#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_S_ARB_MAX */
+#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_L_ARB_MAX */
+#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_LBW_SRAM_MAX_CREDIT */
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_SHIFT 0
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_MASK 0x3F
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_SHIFT 8
+#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_MASK 0x3F00
+
+/* MME1_RTR_LBW_RD_RS_E_ARB */
+#define MME1_RTR_LBW_RD_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_W_ARB */
+#define MME1_RTR_LBW_RD_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_N_ARB */
+#define MME1_RTR_LBW_RD_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_S_ARB */
+#define MME1_RTR_LBW_RD_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_RD_RS_L_ARB */
+#define MME1_RTR_LBW_RD_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_RD_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_RD_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_RD_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_RD_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_RD_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_RD_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_RD_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_E_ARB */
+#define MME1_RTR_LBW_WR_RQ_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_W_ARB */
+#define MME1_RTR_LBW_WR_RQ_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_N_ARB */
+#define MME1_RTR_LBW_WR_RQ_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_S_ARB */
+#define MME1_RTR_LBW_WR_RQ_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RQ_L_ARB */
+#define MME1_RTR_LBW_WR_RQ_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RQ_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RQ_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RQ_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RQ_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RQ_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RQ_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_WR_RQ_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_E_ARB */
+#define MME1_RTR_LBW_WR_RS_E_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_E_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_E_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_E_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_E_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_E_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_E_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_W_ARB */
+#define MME1_RTR_LBW_WR_RS_W_ARB_E_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_W_ARB_E_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_W_ARB_S_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_W_ARB_S_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_W_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_W_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_W_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_N_ARB */
+#define MME1_RTR_LBW_WR_RS_N_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_N_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_N_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_N_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_N_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_N_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_N_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_S_ARB */
+#define MME1_RTR_LBW_WR_RS_S_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_S_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_S_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_S_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_S_ARB_N_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_S_ARB_N_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_S_ARB_L_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_LBW_WR_RS_L_ARB */
+#define MME1_RTR_LBW_WR_RS_L_ARB_W_SHIFT 0
+#define MME1_RTR_LBW_WR_RS_L_ARB_W_MASK 0x7
+#define MME1_RTR_LBW_WR_RS_L_ARB_E_SHIFT 8
+#define MME1_RTR_LBW_WR_RS_L_ARB_E_MASK 0x700
+#define MME1_RTR_LBW_WR_RS_L_ARB_S_SHIFT 16
+#define MME1_RTR_LBW_WR_RS_L_ARB_S_MASK 0x70000
+#define MME1_RTR_LBW_WR_RS_L_ARB_N_SHIFT 24
+#define MME1_RTR_LBW_WR_RS_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_DBG_E_ARB */
+#define MME1_RTR_DBG_E_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_E_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_E_ARB_S_SHIFT 8
+#define MME1_RTR_DBG_E_ARB_S_MASK 0x700
+#define MME1_RTR_DBG_E_ARB_N_SHIFT 16
+#define MME1_RTR_DBG_E_ARB_N_MASK 0x70000
+#define MME1_RTR_DBG_E_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_W_ARB */
+#define MME1_RTR_DBG_W_ARB_E_SHIFT 0
+#define MME1_RTR_DBG_W_ARB_E_MASK 0x7
+#define MME1_RTR_DBG_W_ARB_S_SHIFT 8
+#define MME1_RTR_DBG_W_ARB_S_MASK 0x700
+#define MME1_RTR_DBG_W_ARB_N_SHIFT 16
+#define MME1_RTR_DBG_W_ARB_N_MASK 0x70000
+#define MME1_RTR_DBG_W_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_N_ARB */
+#define MME1_RTR_DBG_N_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_N_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_N_ARB_E_SHIFT 8
+#define MME1_RTR_DBG_N_ARB_E_MASK 0x700
+#define MME1_RTR_DBG_N_ARB_S_SHIFT 16
+#define MME1_RTR_DBG_N_ARB_S_MASK 0x70000
+#define MME1_RTR_DBG_N_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_S_ARB */
+#define MME1_RTR_DBG_S_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_S_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_S_ARB_E_SHIFT 8
+#define MME1_RTR_DBG_S_ARB_E_MASK 0x700
+#define MME1_RTR_DBG_S_ARB_N_SHIFT 16
+#define MME1_RTR_DBG_S_ARB_N_MASK 0x70000
+#define MME1_RTR_DBG_S_ARB_L_SHIFT 24
+#define MME1_RTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* MME1_RTR_DBG_L_ARB */
+#define MME1_RTR_DBG_L_ARB_W_SHIFT 0
+#define MME1_RTR_DBG_L_ARB_W_MASK 0x7
+#define MME1_RTR_DBG_L_ARB_E_SHIFT 8
+#define MME1_RTR_DBG_L_ARB_E_MASK 0x700
+#define MME1_RTR_DBG_L_ARB_S_SHIFT 16
+#define MME1_RTR_DBG_L_ARB_S_MASK 0x70000
+#define MME1_RTR_DBG_L_ARB_N_SHIFT 24
+#define MME1_RTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* MME1_RTR_DBG_E_ARB_MAX */
+#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_W_ARB_MAX */
+#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_N_ARB_MAX */
+#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_S_ARB_MAX */
+#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_DBG_L_ARB_MAX */
+#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* MME1_RTR_SPLIT_COEF */
+#define MME1_RTR_SPLIT_COEF_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_CFG */
+#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 4
+#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x10
+#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 5
+#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x20
+#define MME1_RTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define MME1_RTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* MME1_RTR_SPLIT_RD_SAT */
+#define MME1_RTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_RD_RST_TOKEN */
+#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_RD_TIMEOUT */
+#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_SPLIT_WR_SAT */
+#define MME1_RTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* MME1_RTR_WPLIT_WR_TST_TOLEN */
+#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* MME1_RTR_SPLIT_WR_TIMEOUT */
+#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_HBW_RANGE_HIT */
+#define MME1_RTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define MME1_RTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* MME1_RTR_HBW_RANGE_MASK_L */
+#define MME1_RTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_HBW_RANGE_MASK_H */
+#define MME1_RTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* MME1_RTR_HBW_RANGE_BASE_L */
+#define MME1_RTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* MME1_RTR_HBW_RANGE_BASE_H */
+#define MME1_RTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define MME1_RTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* MME1_RTR_LBW_RANGE_HIT */
+#define MME1_RTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define MME1_RTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* MME1_RTR_LBW_RANGE_MASK */
+#define MME1_RTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define MME1_RTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* MME1_RTR_LBW_RANGE_BASE */
+#define MME1_RTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define MME1_RTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* MME1_RTR_RGLTR */
+#define MME1_RTR_RGLTR_WR_EN_SHIFT 0
+#define MME1_RTR_RGLTR_WR_EN_MASK 0x1
+#define MME1_RTR_RGLTR_RD_EN_SHIFT 4
+#define MME1_RTR_RGLTR_RD_EN_MASK 0x10
+
+/* MME1_RTR_RGLTR_WR_RESULT */
+#define MME1_RTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define MME1_RTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* MME1_RTR_RGLTR_RD_RESULT */
+#define MME1_RTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define MME1_RTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* MME1_RTR_SCRAMB_EN */
+#define MME1_RTR_SCRAMB_EN_VAL_SHIFT 0
+#define MME1_RTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* MME1_RTR_NON_LIN_SCRAMB */
+#define MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
new file mode 100644
index 000000000000..c248339a1cbe
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME1_RTR_REGS_H_
+#define ASIC_REG_MME1_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME1_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME1_RTR_HBW_RD_RQ_E_ARB 0x40100
+
+#define mmMME1_RTR_HBW_RD_RQ_W_ARB 0x40104
+
+#define mmMME1_RTR_HBW_RD_RQ_N_ARB 0x40108
+
+#define mmMME1_RTR_HBW_RD_RQ_S_ARB 0x4010C
+
+#define mmMME1_RTR_HBW_RD_RQ_L_ARB 0x40110
+
+#define mmMME1_RTR_HBW_E_ARB_MAX 0x40120
+
+#define mmMME1_RTR_HBW_W_ARB_MAX 0x40124
+
+#define mmMME1_RTR_HBW_N_ARB_MAX 0x40128
+
+#define mmMME1_RTR_HBW_S_ARB_MAX 0x4012C
+
+#define mmMME1_RTR_HBW_L_ARB_MAX 0x40130
+
+#define mmMME1_RTR_HBW_RD_RS_MAX_CREDIT 0x40140
+
+#define mmMME1_RTR_HBW_WR_RQ_MAX_CREDIT 0x40144
+
+#define mmMME1_RTR_HBW_RD_RQ_MAX_CREDIT 0x40148
+
+#define mmMME1_RTR_HBW_RD_RS_E_ARB 0x40150
+
+#define mmMME1_RTR_HBW_RD_RS_W_ARB 0x40154
+
+#define mmMME1_RTR_HBW_RD_RS_N_ARB 0x40158
+
+#define mmMME1_RTR_HBW_RD_RS_S_ARB 0x4015C
+
+#define mmMME1_RTR_HBW_RD_RS_L_ARB 0x40160
+
+#define mmMME1_RTR_HBW_WR_RQ_E_ARB 0x40170
+
+#define mmMME1_RTR_HBW_WR_RQ_W_ARB 0x40174
+
+#define mmMME1_RTR_HBW_WR_RQ_N_ARB 0x40178
+
+#define mmMME1_RTR_HBW_WR_RQ_S_ARB 0x4017C
+
+#define mmMME1_RTR_HBW_WR_RQ_L_ARB 0x40180
+
+#define mmMME1_RTR_HBW_WR_RS_E_ARB 0x40190
+
+#define mmMME1_RTR_HBW_WR_RS_W_ARB 0x40194
+
+#define mmMME1_RTR_HBW_WR_RS_N_ARB 0x40198
+
+#define mmMME1_RTR_HBW_WR_RS_S_ARB 0x4019C
+
+#define mmMME1_RTR_HBW_WR_RS_L_ARB 0x401A0
+
+#define mmMME1_RTR_LBW_RD_RQ_E_ARB 0x40200
+
+#define mmMME1_RTR_LBW_RD_RQ_W_ARB 0x40204
+
+#define mmMME1_RTR_LBW_RD_RQ_N_ARB 0x40208
+
+#define mmMME1_RTR_LBW_RD_RQ_S_ARB 0x4020C
+
+#define mmMME1_RTR_LBW_RD_RQ_L_ARB 0x40210
+
+#define mmMME1_RTR_LBW_E_ARB_MAX 0x40220
+
+#define mmMME1_RTR_LBW_W_ARB_MAX 0x40224
+
+#define mmMME1_RTR_LBW_N_ARB_MAX 0x40228
+
+#define mmMME1_RTR_LBW_S_ARB_MAX 0x4022C
+
+#define mmMME1_RTR_LBW_L_ARB_MAX 0x40230
+
+#define mmMME1_RTR_LBW_SRAM_MAX_CREDIT 0x40240
+
+#define mmMME1_RTR_LBW_RD_RS_E_ARB 0x40250
+
+#define mmMME1_RTR_LBW_RD_RS_W_ARB 0x40254
+
+#define mmMME1_RTR_LBW_RD_RS_N_ARB 0x40258
+
+#define mmMME1_RTR_LBW_RD_RS_S_ARB 0x4025C
+
+#define mmMME1_RTR_LBW_RD_RS_L_ARB 0x40260
+
+#define mmMME1_RTR_LBW_WR_RQ_E_ARB 0x40270
+
+#define mmMME1_RTR_LBW_WR_RQ_W_ARB 0x40274
+
+#define mmMME1_RTR_LBW_WR_RQ_N_ARB 0x40278
+
+#define mmMME1_RTR_LBW_WR_RQ_S_ARB 0x4027C
+
+#define mmMME1_RTR_LBW_WR_RQ_L_ARB 0x40280
+
+#define mmMME1_RTR_LBW_WR_RS_E_ARB 0x40290
+
+#define mmMME1_RTR_LBW_WR_RS_W_ARB 0x40294
+
+#define mmMME1_RTR_LBW_WR_RS_N_ARB 0x40298
+
+#define mmMME1_RTR_LBW_WR_RS_S_ARB 0x4029C
+
+#define mmMME1_RTR_LBW_WR_RS_L_ARB 0x402A0
+
+#define mmMME1_RTR_DBG_E_ARB 0x40300
+
+#define mmMME1_RTR_DBG_W_ARB 0x40304
+
+#define mmMME1_RTR_DBG_N_ARB 0x40308
+
+#define mmMME1_RTR_DBG_S_ARB 0x4030C
+
+#define mmMME1_RTR_DBG_L_ARB 0x40310
+
+#define mmMME1_RTR_DBG_E_ARB_MAX 0x40320
+
+#define mmMME1_RTR_DBG_W_ARB_MAX 0x40324
+
+#define mmMME1_RTR_DBG_N_ARB_MAX 0x40328
+
+#define mmMME1_RTR_DBG_S_ARB_MAX 0x4032C
+
+#define mmMME1_RTR_DBG_L_ARB_MAX 0x40330
+
+#define mmMME1_RTR_SPLIT_COEF_0 0x40400
+
+#define mmMME1_RTR_SPLIT_COEF_1 0x40404
+
+#define mmMME1_RTR_SPLIT_COEF_2 0x40408
+
+#define mmMME1_RTR_SPLIT_COEF_3 0x4040C
+
+#define mmMME1_RTR_SPLIT_COEF_4 0x40410
+
+#define mmMME1_RTR_SPLIT_COEF_5 0x40414
+
+#define mmMME1_RTR_SPLIT_COEF_6 0x40418
+
+#define mmMME1_RTR_SPLIT_COEF_7 0x4041C
+
+#define mmMME1_RTR_SPLIT_COEF_8 0x40420
+
+#define mmMME1_RTR_SPLIT_COEF_9 0x40424
+
+#define mmMME1_RTR_SPLIT_CFG 0x40440
+
+#define mmMME1_RTR_SPLIT_RD_SAT 0x40444
+
+#define mmMME1_RTR_SPLIT_RD_RST_TOKEN 0x40448
+
+#define mmMME1_RTR_SPLIT_RD_TIMEOUT_0 0x4044C
+
+#define mmMME1_RTR_SPLIT_RD_TIMEOUT_1 0x40450
+
+#define mmMME1_RTR_SPLIT_WR_SAT 0x40454
+
+#define mmMME1_RTR_WPLIT_WR_TST_TOLEN 0x40458
+
+#define mmMME1_RTR_SPLIT_WR_TIMEOUT_0 0x4045C
+
+#define mmMME1_RTR_SPLIT_WR_TIMEOUT_1 0x40460
+
+#define mmMME1_RTR_HBW_RANGE_HIT 0x40470
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_0 0x40480
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_1 0x40484
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_2 0x40488
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_3 0x4048C
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_4 0x40490
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_5 0x40494
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_6 0x40498
+
+#define mmMME1_RTR_HBW_RANGE_MASK_L_7 0x4049C
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_0 0x404A0
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_1 0x404A4
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_2 0x404A8
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_3 0x404AC
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_4 0x404B0
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_5 0x404B4
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_6 0x404B8
+
+#define mmMME1_RTR_HBW_RANGE_MASK_H_7 0x404BC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_0 0x404C0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_1 0x404C4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_2 0x404C8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_3 0x404CC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_4 0x404D0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_5 0x404D4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_6 0x404D8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_L_7 0x404DC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_0 0x404E0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_1 0x404E4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_2 0x404E8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_3 0x404EC
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_4 0x404F0
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_5 0x404F4
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_6 0x404F8
+
+#define mmMME1_RTR_HBW_RANGE_BASE_H_7 0x404FC
+
+#define mmMME1_RTR_LBW_RANGE_HIT 0x40500
+
+#define mmMME1_RTR_LBW_RANGE_MASK_0 0x40510
+
+#define mmMME1_RTR_LBW_RANGE_MASK_1 0x40514
+
+#define mmMME1_RTR_LBW_RANGE_MASK_2 0x40518
+
+#define mmMME1_RTR_LBW_RANGE_MASK_3 0x4051C
+
+#define mmMME1_RTR_LBW_RANGE_MASK_4 0x40520
+
+#define mmMME1_RTR_LBW_RANGE_MASK_5 0x40524
+
+#define mmMME1_RTR_LBW_RANGE_MASK_6 0x40528
+
+#define mmMME1_RTR_LBW_RANGE_MASK_7 0x4052C
+
+#define mmMME1_RTR_LBW_RANGE_MASK_8 0x40530
+
+#define mmMME1_RTR_LBW_RANGE_MASK_9 0x40534
+
+#define mmMME1_RTR_LBW_RANGE_MASK_10 0x40538
+
+#define mmMME1_RTR_LBW_RANGE_MASK_11 0x4053C
+
+#define mmMME1_RTR_LBW_RANGE_MASK_12 0x40540
+
+#define mmMME1_RTR_LBW_RANGE_MASK_13 0x40544
+
+#define mmMME1_RTR_LBW_RANGE_MASK_14 0x40548
+
+#define mmMME1_RTR_LBW_RANGE_MASK_15 0x4054C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_0 0x40550
+
+#define mmMME1_RTR_LBW_RANGE_BASE_1 0x40554
+
+#define mmMME1_RTR_LBW_RANGE_BASE_2 0x40558
+
+#define mmMME1_RTR_LBW_RANGE_BASE_3 0x4055C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_4 0x40560
+
+#define mmMME1_RTR_LBW_RANGE_BASE_5 0x40564
+
+#define mmMME1_RTR_LBW_RANGE_BASE_6 0x40568
+
+#define mmMME1_RTR_LBW_RANGE_BASE_7 0x4056C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_8 0x40570
+
+#define mmMME1_RTR_LBW_RANGE_BASE_9 0x40574
+
+#define mmMME1_RTR_LBW_RANGE_BASE_10 0x40578
+
+#define mmMME1_RTR_LBW_RANGE_BASE_11 0x4057C
+
+#define mmMME1_RTR_LBW_RANGE_BASE_12 0x40580
+
+#define mmMME1_RTR_LBW_RANGE_BASE_13 0x40584
+
+#define mmMME1_RTR_LBW_RANGE_BASE_14 0x40588
+
+#define mmMME1_RTR_LBW_RANGE_BASE_15 0x4058C
+
+#define mmMME1_RTR_RGLTR 0x40590
+
+#define mmMME1_RTR_RGLTR_WR_RESULT 0x40594
+
+#define mmMME1_RTR_RGLTR_RD_RESULT 0x40598
+
+#define mmMME1_RTR_SCRAMB_EN 0x40600
+
+#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604
+
+#endif /* ASIC_REG_MME1_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
new file mode 100644
index 000000000000..7a2b777bdc4f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME2_RTR_REGS_H_
+#define ASIC_REG_MME2_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME2_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME2_RTR_HBW_RD_RQ_E_ARB 0x80100
+
+#define mmMME2_RTR_HBW_RD_RQ_W_ARB 0x80104
+
+#define mmMME2_RTR_HBW_RD_RQ_N_ARB 0x80108
+
+#define mmMME2_RTR_HBW_RD_RQ_S_ARB 0x8010C
+
+#define mmMME2_RTR_HBW_RD_RQ_L_ARB 0x80110
+
+#define mmMME2_RTR_HBW_E_ARB_MAX 0x80120
+
+#define mmMME2_RTR_HBW_W_ARB_MAX 0x80124
+
+#define mmMME2_RTR_HBW_N_ARB_MAX 0x80128
+
+#define mmMME2_RTR_HBW_S_ARB_MAX 0x8012C
+
+#define mmMME2_RTR_HBW_L_ARB_MAX 0x80130
+
+#define mmMME2_RTR_HBW_RD_RS_MAX_CREDIT 0x80140
+
+#define mmMME2_RTR_HBW_WR_RQ_MAX_CREDIT 0x80144
+
+#define mmMME2_RTR_HBW_RD_RQ_MAX_CREDIT 0x80148
+
+#define mmMME2_RTR_HBW_RD_RS_E_ARB 0x80150
+
+#define mmMME2_RTR_HBW_RD_RS_W_ARB 0x80154
+
+#define mmMME2_RTR_HBW_RD_RS_N_ARB 0x80158
+
+#define mmMME2_RTR_HBW_RD_RS_S_ARB 0x8015C
+
+#define mmMME2_RTR_HBW_RD_RS_L_ARB 0x80160
+
+#define mmMME2_RTR_HBW_WR_RQ_E_ARB 0x80170
+
+#define mmMME2_RTR_HBW_WR_RQ_W_ARB 0x80174
+
+#define mmMME2_RTR_HBW_WR_RQ_N_ARB 0x80178
+
+#define mmMME2_RTR_HBW_WR_RQ_S_ARB 0x8017C
+
+#define mmMME2_RTR_HBW_WR_RQ_L_ARB 0x80180
+
+#define mmMME2_RTR_HBW_WR_RS_E_ARB 0x80190
+
+#define mmMME2_RTR_HBW_WR_RS_W_ARB 0x80194
+
+#define mmMME2_RTR_HBW_WR_RS_N_ARB 0x80198
+
+#define mmMME2_RTR_HBW_WR_RS_S_ARB 0x8019C
+
+#define mmMME2_RTR_HBW_WR_RS_L_ARB 0x801A0
+
+#define mmMME2_RTR_LBW_RD_RQ_E_ARB 0x80200
+
+#define mmMME2_RTR_LBW_RD_RQ_W_ARB 0x80204
+
+#define mmMME2_RTR_LBW_RD_RQ_N_ARB 0x80208
+
+#define mmMME2_RTR_LBW_RD_RQ_S_ARB 0x8020C
+
+#define mmMME2_RTR_LBW_RD_RQ_L_ARB 0x80210
+
+#define mmMME2_RTR_LBW_E_ARB_MAX 0x80220
+
+#define mmMME2_RTR_LBW_W_ARB_MAX 0x80224
+
+#define mmMME2_RTR_LBW_N_ARB_MAX 0x80228
+
+#define mmMME2_RTR_LBW_S_ARB_MAX 0x8022C
+
+#define mmMME2_RTR_LBW_L_ARB_MAX 0x80230
+
+#define mmMME2_RTR_LBW_SRAM_MAX_CREDIT 0x80240
+
+#define mmMME2_RTR_LBW_RD_RS_E_ARB 0x80250
+
+#define mmMME2_RTR_LBW_RD_RS_W_ARB 0x80254
+
+#define mmMME2_RTR_LBW_RD_RS_N_ARB 0x80258
+
+#define mmMME2_RTR_LBW_RD_RS_S_ARB 0x8025C
+
+#define mmMME2_RTR_LBW_RD_RS_L_ARB 0x80260
+
+#define mmMME2_RTR_LBW_WR_RQ_E_ARB 0x80270
+
+#define mmMME2_RTR_LBW_WR_RQ_W_ARB 0x80274
+
+#define mmMME2_RTR_LBW_WR_RQ_N_ARB 0x80278
+
+#define mmMME2_RTR_LBW_WR_RQ_S_ARB 0x8027C
+
+#define mmMME2_RTR_LBW_WR_RQ_L_ARB 0x80280
+
+#define mmMME2_RTR_LBW_WR_RS_E_ARB 0x80290
+
+#define mmMME2_RTR_LBW_WR_RS_W_ARB 0x80294
+
+#define mmMME2_RTR_LBW_WR_RS_N_ARB 0x80298
+
+#define mmMME2_RTR_LBW_WR_RS_S_ARB 0x8029C
+
+#define mmMME2_RTR_LBW_WR_RS_L_ARB 0x802A0
+
+#define mmMME2_RTR_DBG_E_ARB 0x80300
+
+#define mmMME2_RTR_DBG_W_ARB 0x80304
+
+#define mmMME2_RTR_DBG_N_ARB 0x80308
+
+#define mmMME2_RTR_DBG_S_ARB 0x8030C
+
+#define mmMME2_RTR_DBG_L_ARB 0x80310
+
+#define mmMME2_RTR_DBG_E_ARB_MAX 0x80320
+
+#define mmMME2_RTR_DBG_W_ARB_MAX 0x80324
+
+#define mmMME2_RTR_DBG_N_ARB_MAX 0x80328
+
+#define mmMME2_RTR_DBG_S_ARB_MAX 0x8032C
+
+#define mmMME2_RTR_DBG_L_ARB_MAX 0x80330
+
+#define mmMME2_RTR_SPLIT_COEF_0 0x80400
+
+#define mmMME2_RTR_SPLIT_COEF_1 0x80404
+
+#define mmMME2_RTR_SPLIT_COEF_2 0x80408
+
+#define mmMME2_RTR_SPLIT_COEF_3 0x8040C
+
+#define mmMME2_RTR_SPLIT_COEF_4 0x80410
+
+#define mmMME2_RTR_SPLIT_COEF_5 0x80414
+
+#define mmMME2_RTR_SPLIT_COEF_6 0x80418
+
+#define mmMME2_RTR_SPLIT_COEF_7 0x8041C
+
+#define mmMME2_RTR_SPLIT_COEF_8 0x80420
+
+#define mmMME2_RTR_SPLIT_COEF_9 0x80424
+
+#define mmMME2_RTR_SPLIT_CFG 0x80440
+
+#define mmMME2_RTR_SPLIT_RD_SAT 0x80444
+
+#define mmMME2_RTR_SPLIT_RD_RST_TOKEN 0x80448
+
+#define mmMME2_RTR_SPLIT_RD_TIMEOUT_0 0x8044C
+
+#define mmMME2_RTR_SPLIT_RD_TIMEOUT_1 0x80450
+
+#define mmMME2_RTR_SPLIT_WR_SAT 0x80454
+
+#define mmMME2_RTR_WPLIT_WR_TST_TOLEN 0x80458
+
+#define mmMME2_RTR_SPLIT_WR_TIMEOUT_0 0x8045C
+
+#define mmMME2_RTR_SPLIT_WR_TIMEOUT_1 0x80460
+
+#define mmMME2_RTR_HBW_RANGE_HIT 0x80470
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_0 0x80480
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_1 0x80484
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_2 0x80488
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_3 0x8048C
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_4 0x80490
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_5 0x80494
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_6 0x80498
+
+#define mmMME2_RTR_HBW_RANGE_MASK_L_7 0x8049C
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_0 0x804A0
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_1 0x804A4
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_2 0x804A8
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_3 0x804AC
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_4 0x804B0
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_5 0x804B4
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_6 0x804B8
+
+#define mmMME2_RTR_HBW_RANGE_MASK_H_7 0x804BC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_0 0x804C0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_1 0x804C4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_2 0x804C8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_3 0x804CC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_4 0x804D0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_5 0x804D4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_6 0x804D8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_L_7 0x804DC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_0 0x804E0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_1 0x804E4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_2 0x804E8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_3 0x804EC
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_4 0x804F0
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_5 0x804F4
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_6 0x804F8
+
+#define mmMME2_RTR_HBW_RANGE_BASE_H_7 0x804FC
+
+#define mmMME2_RTR_LBW_RANGE_HIT 0x80500
+
+#define mmMME2_RTR_LBW_RANGE_MASK_0 0x80510
+
+#define mmMME2_RTR_LBW_RANGE_MASK_1 0x80514
+
+#define mmMME2_RTR_LBW_RANGE_MASK_2 0x80518
+
+#define mmMME2_RTR_LBW_RANGE_MASK_3 0x8051C
+
+#define mmMME2_RTR_LBW_RANGE_MASK_4 0x80520
+
+#define mmMME2_RTR_LBW_RANGE_MASK_5 0x80524
+
+#define mmMME2_RTR_LBW_RANGE_MASK_6 0x80528
+
+#define mmMME2_RTR_LBW_RANGE_MASK_7 0x8052C
+
+#define mmMME2_RTR_LBW_RANGE_MASK_8 0x80530
+
+#define mmMME2_RTR_LBW_RANGE_MASK_9 0x80534
+
+#define mmMME2_RTR_LBW_RANGE_MASK_10 0x80538
+
+#define mmMME2_RTR_LBW_RANGE_MASK_11 0x8053C
+
+#define mmMME2_RTR_LBW_RANGE_MASK_12 0x80540
+
+#define mmMME2_RTR_LBW_RANGE_MASK_13 0x80544
+
+#define mmMME2_RTR_LBW_RANGE_MASK_14 0x80548
+
+#define mmMME2_RTR_LBW_RANGE_MASK_15 0x8054C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_0 0x80550
+
+#define mmMME2_RTR_LBW_RANGE_BASE_1 0x80554
+
+#define mmMME2_RTR_LBW_RANGE_BASE_2 0x80558
+
+#define mmMME2_RTR_LBW_RANGE_BASE_3 0x8055C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_4 0x80560
+
+#define mmMME2_RTR_LBW_RANGE_BASE_5 0x80564
+
+#define mmMME2_RTR_LBW_RANGE_BASE_6 0x80568
+
+#define mmMME2_RTR_LBW_RANGE_BASE_7 0x8056C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_8 0x80570
+
+#define mmMME2_RTR_LBW_RANGE_BASE_9 0x80574
+
+#define mmMME2_RTR_LBW_RANGE_BASE_10 0x80578
+
+#define mmMME2_RTR_LBW_RANGE_BASE_11 0x8057C
+
+#define mmMME2_RTR_LBW_RANGE_BASE_12 0x80580
+
+#define mmMME2_RTR_LBW_RANGE_BASE_13 0x80584
+
+#define mmMME2_RTR_LBW_RANGE_BASE_14 0x80588
+
+#define mmMME2_RTR_LBW_RANGE_BASE_15 0x8058C
+
+#define mmMME2_RTR_RGLTR 0x80590
+
+#define mmMME2_RTR_RGLTR_WR_RESULT 0x80594
+
+#define mmMME2_RTR_RGLTR_RD_RESULT 0x80598
+
+#define mmMME2_RTR_SCRAMB_EN 0x80600
+
+#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604
+
+#endif /* ASIC_REG_MME2_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
new file mode 100644
index 000000000000..b78f8bc387fc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME3_RTR_REGS_H_
+#define ASIC_REG_MME3_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME3_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME3_RTR_HBW_RD_RQ_E_ARB 0xC0100
+
+#define mmMME3_RTR_HBW_RD_RQ_W_ARB 0xC0104
+
+#define mmMME3_RTR_HBW_RD_RQ_N_ARB 0xC0108
+
+#define mmMME3_RTR_HBW_RD_RQ_S_ARB 0xC010C
+
+#define mmMME3_RTR_HBW_RD_RQ_L_ARB 0xC0110
+
+#define mmMME3_RTR_HBW_E_ARB_MAX 0xC0120
+
+#define mmMME3_RTR_HBW_W_ARB_MAX 0xC0124
+
+#define mmMME3_RTR_HBW_N_ARB_MAX 0xC0128
+
+#define mmMME3_RTR_HBW_S_ARB_MAX 0xC012C
+
+#define mmMME3_RTR_HBW_L_ARB_MAX 0xC0130
+
+#define mmMME3_RTR_HBW_RD_RS_MAX_CREDIT 0xC0140
+
+#define mmMME3_RTR_HBW_WR_RQ_MAX_CREDIT 0xC0144
+
+#define mmMME3_RTR_HBW_RD_RQ_MAX_CREDIT 0xC0148
+
+#define mmMME3_RTR_HBW_RD_RS_E_ARB 0xC0150
+
+#define mmMME3_RTR_HBW_RD_RS_W_ARB 0xC0154
+
+#define mmMME3_RTR_HBW_RD_RS_N_ARB 0xC0158
+
+#define mmMME3_RTR_HBW_RD_RS_S_ARB 0xC015C
+
+#define mmMME3_RTR_HBW_RD_RS_L_ARB 0xC0160
+
+#define mmMME3_RTR_HBW_WR_RQ_E_ARB 0xC0170
+
+#define mmMME3_RTR_HBW_WR_RQ_W_ARB 0xC0174
+
+#define mmMME3_RTR_HBW_WR_RQ_N_ARB 0xC0178
+
+#define mmMME3_RTR_HBW_WR_RQ_S_ARB 0xC017C
+
+#define mmMME3_RTR_HBW_WR_RQ_L_ARB 0xC0180
+
+#define mmMME3_RTR_HBW_WR_RS_E_ARB 0xC0190
+
+#define mmMME3_RTR_HBW_WR_RS_W_ARB 0xC0194
+
+#define mmMME3_RTR_HBW_WR_RS_N_ARB 0xC0198
+
+#define mmMME3_RTR_HBW_WR_RS_S_ARB 0xC019C
+
+#define mmMME3_RTR_HBW_WR_RS_L_ARB 0xC01A0
+
+#define mmMME3_RTR_LBW_RD_RQ_E_ARB 0xC0200
+
+#define mmMME3_RTR_LBW_RD_RQ_W_ARB 0xC0204
+
+#define mmMME3_RTR_LBW_RD_RQ_N_ARB 0xC0208
+
+#define mmMME3_RTR_LBW_RD_RQ_S_ARB 0xC020C
+
+#define mmMME3_RTR_LBW_RD_RQ_L_ARB 0xC0210
+
+#define mmMME3_RTR_LBW_E_ARB_MAX 0xC0220
+
+#define mmMME3_RTR_LBW_W_ARB_MAX 0xC0224
+
+#define mmMME3_RTR_LBW_N_ARB_MAX 0xC0228
+
+#define mmMME3_RTR_LBW_S_ARB_MAX 0xC022C
+
+#define mmMME3_RTR_LBW_L_ARB_MAX 0xC0230
+
+#define mmMME3_RTR_LBW_SRAM_MAX_CREDIT 0xC0240
+
+#define mmMME3_RTR_LBW_RD_RS_E_ARB 0xC0250
+
+#define mmMME3_RTR_LBW_RD_RS_W_ARB 0xC0254
+
+#define mmMME3_RTR_LBW_RD_RS_N_ARB 0xC0258
+
+#define mmMME3_RTR_LBW_RD_RS_S_ARB 0xC025C
+
+#define mmMME3_RTR_LBW_RD_RS_L_ARB 0xC0260
+
+#define mmMME3_RTR_LBW_WR_RQ_E_ARB 0xC0270
+
+#define mmMME3_RTR_LBW_WR_RQ_W_ARB 0xC0274
+
+#define mmMME3_RTR_LBW_WR_RQ_N_ARB 0xC0278
+
+#define mmMME3_RTR_LBW_WR_RQ_S_ARB 0xC027C
+
+#define mmMME3_RTR_LBW_WR_RQ_L_ARB 0xC0280
+
+#define mmMME3_RTR_LBW_WR_RS_E_ARB 0xC0290
+
+#define mmMME3_RTR_LBW_WR_RS_W_ARB 0xC0294
+
+#define mmMME3_RTR_LBW_WR_RS_N_ARB 0xC0298
+
+#define mmMME3_RTR_LBW_WR_RS_S_ARB 0xC029C
+
+#define mmMME3_RTR_LBW_WR_RS_L_ARB 0xC02A0
+
+#define mmMME3_RTR_DBG_E_ARB 0xC0300
+
+#define mmMME3_RTR_DBG_W_ARB 0xC0304
+
+#define mmMME3_RTR_DBG_N_ARB 0xC0308
+
+#define mmMME3_RTR_DBG_S_ARB 0xC030C
+
+#define mmMME3_RTR_DBG_L_ARB 0xC0310
+
+#define mmMME3_RTR_DBG_E_ARB_MAX 0xC0320
+
+#define mmMME3_RTR_DBG_W_ARB_MAX 0xC0324
+
+#define mmMME3_RTR_DBG_N_ARB_MAX 0xC0328
+
+#define mmMME3_RTR_DBG_S_ARB_MAX 0xC032C
+
+#define mmMME3_RTR_DBG_L_ARB_MAX 0xC0330
+
+#define mmMME3_RTR_SPLIT_COEF_0 0xC0400
+
+#define mmMME3_RTR_SPLIT_COEF_1 0xC0404
+
+#define mmMME3_RTR_SPLIT_COEF_2 0xC0408
+
+#define mmMME3_RTR_SPLIT_COEF_3 0xC040C
+
+#define mmMME3_RTR_SPLIT_COEF_4 0xC0410
+
+#define mmMME3_RTR_SPLIT_COEF_5 0xC0414
+
+#define mmMME3_RTR_SPLIT_COEF_6 0xC0418
+
+#define mmMME3_RTR_SPLIT_COEF_7 0xC041C
+
+#define mmMME3_RTR_SPLIT_COEF_8 0xC0420
+
+#define mmMME3_RTR_SPLIT_COEF_9 0xC0424
+
+#define mmMME3_RTR_SPLIT_CFG 0xC0440
+
+#define mmMME3_RTR_SPLIT_RD_SAT 0xC0444
+
+#define mmMME3_RTR_SPLIT_RD_RST_TOKEN 0xC0448
+
+#define mmMME3_RTR_SPLIT_RD_TIMEOUT_0 0xC044C
+
+#define mmMME3_RTR_SPLIT_RD_TIMEOUT_1 0xC0450
+
+#define mmMME3_RTR_SPLIT_WR_SAT 0xC0454
+
+#define mmMME3_RTR_WPLIT_WR_TST_TOLEN 0xC0458
+
+#define mmMME3_RTR_SPLIT_WR_TIMEOUT_0 0xC045C
+
+#define mmMME3_RTR_SPLIT_WR_TIMEOUT_1 0xC0460
+
+#define mmMME3_RTR_HBW_RANGE_HIT 0xC0470
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_0 0xC0480
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_1 0xC0484
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_2 0xC0488
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_3 0xC048C
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_4 0xC0490
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_5 0xC0494
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_6 0xC0498
+
+#define mmMME3_RTR_HBW_RANGE_MASK_L_7 0xC049C
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_0 0xC04A0
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_1 0xC04A4
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_2 0xC04A8
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_3 0xC04AC
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_4 0xC04B0
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_5 0xC04B4
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_6 0xC04B8
+
+#define mmMME3_RTR_HBW_RANGE_MASK_H_7 0xC04BC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_0 0xC04C0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_1 0xC04C4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_2 0xC04C8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_3 0xC04CC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_4 0xC04D0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_5 0xC04D4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_6 0xC04D8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_L_7 0xC04DC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_0 0xC04E0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_1 0xC04E4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_2 0xC04E8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_3 0xC04EC
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_4 0xC04F0
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_5 0xC04F4
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_6 0xC04F8
+
+#define mmMME3_RTR_HBW_RANGE_BASE_H_7 0xC04FC
+
+#define mmMME3_RTR_LBW_RANGE_HIT 0xC0500
+
+#define mmMME3_RTR_LBW_RANGE_MASK_0 0xC0510
+
+#define mmMME3_RTR_LBW_RANGE_MASK_1 0xC0514
+
+#define mmMME3_RTR_LBW_RANGE_MASK_2 0xC0518
+
+#define mmMME3_RTR_LBW_RANGE_MASK_3 0xC051C
+
+#define mmMME3_RTR_LBW_RANGE_MASK_4 0xC0520
+
+#define mmMME3_RTR_LBW_RANGE_MASK_5 0xC0524
+
+#define mmMME3_RTR_LBW_RANGE_MASK_6 0xC0528
+
+#define mmMME3_RTR_LBW_RANGE_MASK_7 0xC052C
+
+#define mmMME3_RTR_LBW_RANGE_MASK_8 0xC0530
+
+#define mmMME3_RTR_LBW_RANGE_MASK_9 0xC0534
+
+#define mmMME3_RTR_LBW_RANGE_MASK_10 0xC0538
+
+#define mmMME3_RTR_LBW_RANGE_MASK_11 0xC053C
+
+#define mmMME3_RTR_LBW_RANGE_MASK_12 0xC0540
+
+#define mmMME3_RTR_LBW_RANGE_MASK_13 0xC0544
+
+#define mmMME3_RTR_LBW_RANGE_MASK_14 0xC0548
+
+#define mmMME3_RTR_LBW_RANGE_MASK_15 0xC054C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_0 0xC0550
+
+#define mmMME3_RTR_LBW_RANGE_BASE_1 0xC0554
+
+#define mmMME3_RTR_LBW_RANGE_BASE_2 0xC0558
+
+#define mmMME3_RTR_LBW_RANGE_BASE_3 0xC055C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_4 0xC0560
+
+#define mmMME3_RTR_LBW_RANGE_BASE_5 0xC0564
+
+#define mmMME3_RTR_LBW_RANGE_BASE_6 0xC0568
+
+#define mmMME3_RTR_LBW_RANGE_BASE_7 0xC056C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_8 0xC0570
+
+#define mmMME3_RTR_LBW_RANGE_BASE_9 0xC0574
+
+#define mmMME3_RTR_LBW_RANGE_BASE_10 0xC0578
+
+#define mmMME3_RTR_LBW_RANGE_BASE_11 0xC057C
+
+#define mmMME3_RTR_LBW_RANGE_BASE_12 0xC0580
+
+#define mmMME3_RTR_LBW_RANGE_BASE_13 0xC0584
+
+#define mmMME3_RTR_LBW_RANGE_BASE_14 0xC0588
+
+#define mmMME3_RTR_LBW_RANGE_BASE_15 0xC058C
+
+#define mmMME3_RTR_RGLTR 0xC0590
+
+#define mmMME3_RTR_RGLTR_WR_RESULT 0xC0594
+
+#define mmMME3_RTR_RGLTR_RD_RESULT 0xC0598
+
+#define mmMME3_RTR_SCRAMB_EN 0xC0600
+
+#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604
+
+#endif /* ASIC_REG_MME3_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
new file mode 100644
index 000000000000..d9a4a02cefa3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME4_RTR_REGS_H_
+#define ASIC_REG_MME4_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME4_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME4_RTR_HBW_RD_RQ_E_ARB 0x100100
+
+#define mmMME4_RTR_HBW_RD_RQ_W_ARB 0x100104
+
+#define mmMME4_RTR_HBW_RD_RQ_N_ARB 0x100108
+
+#define mmMME4_RTR_HBW_RD_RQ_S_ARB 0x10010C
+
+#define mmMME4_RTR_HBW_RD_RQ_L_ARB 0x100110
+
+#define mmMME4_RTR_HBW_E_ARB_MAX 0x100120
+
+#define mmMME4_RTR_HBW_W_ARB_MAX 0x100124
+
+#define mmMME4_RTR_HBW_N_ARB_MAX 0x100128
+
+#define mmMME4_RTR_HBW_S_ARB_MAX 0x10012C
+
+#define mmMME4_RTR_HBW_L_ARB_MAX 0x100130
+
+#define mmMME4_RTR_HBW_RD_RS_MAX_CREDIT 0x100140
+
+#define mmMME4_RTR_HBW_WR_RQ_MAX_CREDIT 0x100144
+
+#define mmMME4_RTR_HBW_RD_RQ_MAX_CREDIT 0x100148
+
+#define mmMME4_RTR_HBW_RD_RS_E_ARB 0x100150
+
+#define mmMME4_RTR_HBW_RD_RS_W_ARB 0x100154
+
+#define mmMME4_RTR_HBW_RD_RS_N_ARB 0x100158
+
+#define mmMME4_RTR_HBW_RD_RS_S_ARB 0x10015C
+
+#define mmMME4_RTR_HBW_RD_RS_L_ARB 0x100160
+
+#define mmMME4_RTR_HBW_WR_RQ_E_ARB 0x100170
+
+#define mmMME4_RTR_HBW_WR_RQ_W_ARB 0x100174
+
+#define mmMME4_RTR_HBW_WR_RQ_N_ARB 0x100178
+
+#define mmMME4_RTR_HBW_WR_RQ_S_ARB 0x10017C
+
+#define mmMME4_RTR_HBW_WR_RQ_L_ARB 0x100180
+
+#define mmMME4_RTR_HBW_WR_RS_E_ARB 0x100190
+
+#define mmMME4_RTR_HBW_WR_RS_W_ARB 0x100194
+
+#define mmMME4_RTR_HBW_WR_RS_N_ARB 0x100198
+
+#define mmMME4_RTR_HBW_WR_RS_S_ARB 0x10019C
+
+#define mmMME4_RTR_HBW_WR_RS_L_ARB 0x1001A0
+
+#define mmMME4_RTR_LBW_RD_RQ_E_ARB 0x100200
+
+#define mmMME4_RTR_LBW_RD_RQ_W_ARB 0x100204
+
+#define mmMME4_RTR_LBW_RD_RQ_N_ARB 0x100208
+
+#define mmMME4_RTR_LBW_RD_RQ_S_ARB 0x10020C
+
+#define mmMME4_RTR_LBW_RD_RQ_L_ARB 0x100210
+
+#define mmMME4_RTR_LBW_E_ARB_MAX 0x100220
+
+#define mmMME4_RTR_LBW_W_ARB_MAX 0x100224
+
+#define mmMME4_RTR_LBW_N_ARB_MAX 0x100228
+
+#define mmMME4_RTR_LBW_S_ARB_MAX 0x10022C
+
+#define mmMME4_RTR_LBW_L_ARB_MAX 0x100230
+
+#define mmMME4_RTR_LBW_SRAM_MAX_CREDIT 0x100240
+
+#define mmMME4_RTR_LBW_RD_RS_E_ARB 0x100250
+
+#define mmMME4_RTR_LBW_RD_RS_W_ARB 0x100254
+
+#define mmMME4_RTR_LBW_RD_RS_N_ARB 0x100258
+
+#define mmMME4_RTR_LBW_RD_RS_S_ARB 0x10025C
+
+#define mmMME4_RTR_LBW_RD_RS_L_ARB 0x100260
+
+#define mmMME4_RTR_LBW_WR_RQ_E_ARB 0x100270
+
+#define mmMME4_RTR_LBW_WR_RQ_W_ARB 0x100274
+
+#define mmMME4_RTR_LBW_WR_RQ_N_ARB 0x100278
+
+#define mmMME4_RTR_LBW_WR_RQ_S_ARB 0x10027C
+
+#define mmMME4_RTR_LBW_WR_RQ_L_ARB 0x100280
+
+#define mmMME4_RTR_LBW_WR_RS_E_ARB 0x100290
+
+#define mmMME4_RTR_LBW_WR_RS_W_ARB 0x100294
+
+#define mmMME4_RTR_LBW_WR_RS_N_ARB 0x100298
+
+#define mmMME4_RTR_LBW_WR_RS_S_ARB 0x10029C
+
+#define mmMME4_RTR_LBW_WR_RS_L_ARB 0x1002A0
+
+#define mmMME4_RTR_DBG_E_ARB 0x100300
+
+#define mmMME4_RTR_DBG_W_ARB 0x100304
+
+#define mmMME4_RTR_DBG_N_ARB 0x100308
+
+#define mmMME4_RTR_DBG_S_ARB 0x10030C
+
+#define mmMME4_RTR_DBG_L_ARB 0x100310
+
+#define mmMME4_RTR_DBG_E_ARB_MAX 0x100320
+
+#define mmMME4_RTR_DBG_W_ARB_MAX 0x100324
+
+#define mmMME4_RTR_DBG_N_ARB_MAX 0x100328
+
+#define mmMME4_RTR_DBG_S_ARB_MAX 0x10032C
+
+#define mmMME4_RTR_DBG_L_ARB_MAX 0x100330
+
+#define mmMME4_RTR_SPLIT_COEF_0 0x100400
+
+#define mmMME4_RTR_SPLIT_COEF_1 0x100404
+
+#define mmMME4_RTR_SPLIT_COEF_2 0x100408
+
+#define mmMME4_RTR_SPLIT_COEF_3 0x10040C
+
+#define mmMME4_RTR_SPLIT_COEF_4 0x100410
+
+#define mmMME4_RTR_SPLIT_COEF_5 0x100414
+
+#define mmMME4_RTR_SPLIT_COEF_6 0x100418
+
+#define mmMME4_RTR_SPLIT_COEF_7 0x10041C
+
+#define mmMME4_RTR_SPLIT_COEF_8 0x100420
+
+#define mmMME4_RTR_SPLIT_COEF_9 0x100424
+
+#define mmMME4_RTR_SPLIT_CFG 0x100440
+
+#define mmMME4_RTR_SPLIT_RD_SAT 0x100444
+
+#define mmMME4_RTR_SPLIT_RD_RST_TOKEN 0x100448
+
+#define mmMME4_RTR_SPLIT_RD_TIMEOUT_0 0x10044C
+
+#define mmMME4_RTR_SPLIT_RD_TIMEOUT_1 0x100450
+
+#define mmMME4_RTR_SPLIT_WR_SAT 0x100454
+
+#define mmMME4_RTR_WPLIT_WR_TST_TOLEN 0x100458
+
+#define mmMME4_RTR_SPLIT_WR_TIMEOUT_0 0x10045C
+
+#define mmMME4_RTR_SPLIT_WR_TIMEOUT_1 0x100460
+
+#define mmMME4_RTR_HBW_RANGE_HIT 0x100470
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_0 0x100480
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_1 0x100484
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_2 0x100488
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_3 0x10048C
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_4 0x100490
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_5 0x100494
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_6 0x100498
+
+#define mmMME4_RTR_HBW_RANGE_MASK_L_7 0x10049C
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_0 0x1004A0
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_1 0x1004A4
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_2 0x1004A8
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_3 0x1004AC
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_4 0x1004B0
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_5 0x1004B4
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_6 0x1004B8
+
+#define mmMME4_RTR_HBW_RANGE_MASK_H_7 0x1004BC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_0 0x1004C0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_1 0x1004C4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_2 0x1004C8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_3 0x1004CC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_4 0x1004D0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_5 0x1004D4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_6 0x1004D8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_L_7 0x1004DC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_0 0x1004E0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_1 0x1004E4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_2 0x1004E8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_3 0x1004EC
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_4 0x1004F0
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_5 0x1004F4
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_6 0x1004F8
+
+#define mmMME4_RTR_HBW_RANGE_BASE_H_7 0x1004FC
+
+#define mmMME4_RTR_LBW_RANGE_HIT 0x100500
+
+#define mmMME4_RTR_LBW_RANGE_MASK_0 0x100510
+
+#define mmMME4_RTR_LBW_RANGE_MASK_1 0x100514
+
+#define mmMME4_RTR_LBW_RANGE_MASK_2 0x100518
+
+#define mmMME4_RTR_LBW_RANGE_MASK_3 0x10051C
+
+#define mmMME4_RTR_LBW_RANGE_MASK_4 0x100520
+
+#define mmMME4_RTR_LBW_RANGE_MASK_5 0x100524
+
+#define mmMME4_RTR_LBW_RANGE_MASK_6 0x100528
+
+#define mmMME4_RTR_LBW_RANGE_MASK_7 0x10052C
+
+#define mmMME4_RTR_LBW_RANGE_MASK_8 0x100530
+
+#define mmMME4_RTR_LBW_RANGE_MASK_9 0x100534
+
+#define mmMME4_RTR_LBW_RANGE_MASK_10 0x100538
+
+#define mmMME4_RTR_LBW_RANGE_MASK_11 0x10053C
+
+#define mmMME4_RTR_LBW_RANGE_MASK_12 0x100540
+
+#define mmMME4_RTR_LBW_RANGE_MASK_13 0x100544
+
+#define mmMME4_RTR_LBW_RANGE_MASK_14 0x100548
+
+#define mmMME4_RTR_LBW_RANGE_MASK_15 0x10054C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_0 0x100550
+
+#define mmMME4_RTR_LBW_RANGE_BASE_1 0x100554
+
+#define mmMME4_RTR_LBW_RANGE_BASE_2 0x100558
+
+#define mmMME4_RTR_LBW_RANGE_BASE_3 0x10055C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_4 0x100560
+
+#define mmMME4_RTR_LBW_RANGE_BASE_5 0x100564
+
+#define mmMME4_RTR_LBW_RANGE_BASE_6 0x100568
+
+#define mmMME4_RTR_LBW_RANGE_BASE_7 0x10056C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_8 0x100570
+
+#define mmMME4_RTR_LBW_RANGE_BASE_9 0x100574
+
+#define mmMME4_RTR_LBW_RANGE_BASE_10 0x100578
+
+#define mmMME4_RTR_LBW_RANGE_BASE_11 0x10057C
+
+#define mmMME4_RTR_LBW_RANGE_BASE_12 0x100580
+
+#define mmMME4_RTR_LBW_RANGE_BASE_13 0x100584
+
+#define mmMME4_RTR_LBW_RANGE_BASE_14 0x100588
+
+#define mmMME4_RTR_LBW_RANGE_BASE_15 0x10058C
+
+#define mmMME4_RTR_RGLTR 0x100590
+
+#define mmMME4_RTR_RGLTR_WR_RESULT 0x100594
+
+#define mmMME4_RTR_RGLTR_RD_RESULT 0x100598
+
+#define mmMME4_RTR_SCRAMB_EN 0x100600
+
+#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604
+
+#endif /* ASIC_REG_MME4_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
new file mode 100644
index 000000000000..205adc988407
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME5_RTR_REGS_H_
+#define ASIC_REG_MME5_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME5_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME5_RTR_HBW_RD_RQ_E_ARB 0x140100
+
+#define mmMME5_RTR_HBW_RD_RQ_W_ARB 0x140104
+
+#define mmMME5_RTR_HBW_RD_RQ_N_ARB 0x140108
+
+#define mmMME5_RTR_HBW_RD_RQ_S_ARB 0x14010C
+
+#define mmMME5_RTR_HBW_RD_RQ_L_ARB 0x140110
+
+#define mmMME5_RTR_HBW_E_ARB_MAX 0x140120
+
+#define mmMME5_RTR_HBW_W_ARB_MAX 0x140124
+
+#define mmMME5_RTR_HBW_N_ARB_MAX 0x140128
+
+#define mmMME5_RTR_HBW_S_ARB_MAX 0x14012C
+
+#define mmMME5_RTR_HBW_L_ARB_MAX 0x140130
+
+#define mmMME5_RTR_HBW_RD_RS_MAX_CREDIT 0x140140
+
+#define mmMME5_RTR_HBW_WR_RQ_MAX_CREDIT 0x140144
+
+#define mmMME5_RTR_HBW_RD_RQ_MAX_CREDIT 0x140148
+
+#define mmMME5_RTR_HBW_RD_RS_E_ARB 0x140150
+
+#define mmMME5_RTR_HBW_RD_RS_W_ARB 0x140154
+
+#define mmMME5_RTR_HBW_RD_RS_N_ARB 0x140158
+
+#define mmMME5_RTR_HBW_RD_RS_S_ARB 0x14015C
+
+#define mmMME5_RTR_HBW_RD_RS_L_ARB 0x140160
+
+#define mmMME5_RTR_HBW_WR_RQ_E_ARB 0x140170
+
+#define mmMME5_RTR_HBW_WR_RQ_W_ARB 0x140174
+
+#define mmMME5_RTR_HBW_WR_RQ_N_ARB 0x140178
+
+#define mmMME5_RTR_HBW_WR_RQ_S_ARB 0x14017C
+
+#define mmMME5_RTR_HBW_WR_RQ_L_ARB 0x140180
+
+#define mmMME5_RTR_HBW_WR_RS_E_ARB 0x140190
+
+#define mmMME5_RTR_HBW_WR_RS_W_ARB 0x140194
+
+#define mmMME5_RTR_HBW_WR_RS_N_ARB 0x140198
+
+#define mmMME5_RTR_HBW_WR_RS_S_ARB 0x14019C
+
+#define mmMME5_RTR_HBW_WR_RS_L_ARB 0x1401A0
+
+#define mmMME5_RTR_LBW_RD_RQ_E_ARB 0x140200
+
+#define mmMME5_RTR_LBW_RD_RQ_W_ARB 0x140204
+
+#define mmMME5_RTR_LBW_RD_RQ_N_ARB 0x140208
+
+#define mmMME5_RTR_LBW_RD_RQ_S_ARB 0x14020C
+
+#define mmMME5_RTR_LBW_RD_RQ_L_ARB 0x140210
+
+#define mmMME5_RTR_LBW_E_ARB_MAX 0x140220
+
+#define mmMME5_RTR_LBW_W_ARB_MAX 0x140224
+
+#define mmMME5_RTR_LBW_N_ARB_MAX 0x140228
+
+#define mmMME5_RTR_LBW_S_ARB_MAX 0x14022C
+
+#define mmMME5_RTR_LBW_L_ARB_MAX 0x140230
+
+#define mmMME5_RTR_LBW_SRAM_MAX_CREDIT 0x140240
+
+#define mmMME5_RTR_LBW_RD_RS_E_ARB 0x140250
+
+#define mmMME5_RTR_LBW_RD_RS_W_ARB 0x140254
+
+#define mmMME5_RTR_LBW_RD_RS_N_ARB 0x140258
+
+#define mmMME5_RTR_LBW_RD_RS_S_ARB 0x14025C
+
+#define mmMME5_RTR_LBW_RD_RS_L_ARB 0x140260
+
+#define mmMME5_RTR_LBW_WR_RQ_E_ARB 0x140270
+
+#define mmMME5_RTR_LBW_WR_RQ_W_ARB 0x140274
+
+#define mmMME5_RTR_LBW_WR_RQ_N_ARB 0x140278
+
+#define mmMME5_RTR_LBW_WR_RQ_S_ARB 0x14027C
+
+#define mmMME5_RTR_LBW_WR_RQ_L_ARB 0x140280
+
+#define mmMME5_RTR_LBW_WR_RS_E_ARB 0x140290
+
+#define mmMME5_RTR_LBW_WR_RS_W_ARB 0x140294
+
+#define mmMME5_RTR_LBW_WR_RS_N_ARB 0x140298
+
+#define mmMME5_RTR_LBW_WR_RS_S_ARB 0x14029C
+
+#define mmMME5_RTR_LBW_WR_RS_L_ARB 0x1402A0
+
+#define mmMME5_RTR_DBG_E_ARB 0x140300
+
+#define mmMME5_RTR_DBG_W_ARB 0x140304
+
+#define mmMME5_RTR_DBG_N_ARB 0x140308
+
+#define mmMME5_RTR_DBG_S_ARB 0x14030C
+
+#define mmMME5_RTR_DBG_L_ARB 0x140310
+
+#define mmMME5_RTR_DBG_E_ARB_MAX 0x140320
+
+#define mmMME5_RTR_DBG_W_ARB_MAX 0x140324
+
+#define mmMME5_RTR_DBG_N_ARB_MAX 0x140328
+
+#define mmMME5_RTR_DBG_S_ARB_MAX 0x14032C
+
+#define mmMME5_RTR_DBG_L_ARB_MAX 0x140330
+
+#define mmMME5_RTR_SPLIT_COEF_0 0x140400
+
+#define mmMME5_RTR_SPLIT_COEF_1 0x140404
+
+#define mmMME5_RTR_SPLIT_COEF_2 0x140408
+
+#define mmMME5_RTR_SPLIT_COEF_3 0x14040C
+
+#define mmMME5_RTR_SPLIT_COEF_4 0x140410
+
+#define mmMME5_RTR_SPLIT_COEF_5 0x140414
+
+#define mmMME5_RTR_SPLIT_COEF_6 0x140418
+
+#define mmMME5_RTR_SPLIT_COEF_7 0x14041C
+
+#define mmMME5_RTR_SPLIT_COEF_8 0x140420
+
+#define mmMME5_RTR_SPLIT_COEF_9 0x140424
+
+#define mmMME5_RTR_SPLIT_CFG 0x140440
+
+#define mmMME5_RTR_SPLIT_RD_SAT 0x140444
+
+#define mmMME5_RTR_SPLIT_RD_RST_TOKEN 0x140448
+
+#define mmMME5_RTR_SPLIT_RD_TIMEOUT_0 0x14044C
+
+#define mmMME5_RTR_SPLIT_RD_TIMEOUT_1 0x140450
+
+#define mmMME5_RTR_SPLIT_WR_SAT 0x140454
+
+#define mmMME5_RTR_WPLIT_WR_TST_TOLEN 0x140458
+
+#define mmMME5_RTR_SPLIT_WR_TIMEOUT_0 0x14045C
+
+#define mmMME5_RTR_SPLIT_WR_TIMEOUT_1 0x140460
+
+#define mmMME5_RTR_HBW_RANGE_HIT 0x140470
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_0 0x140480
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_1 0x140484
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_2 0x140488
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_3 0x14048C
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_4 0x140490
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_5 0x140494
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_6 0x140498
+
+#define mmMME5_RTR_HBW_RANGE_MASK_L_7 0x14049C
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_0 0x1404A0
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_1 0x1404A4
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_2 0x1404A8
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_3 0x1404AC
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_4 0x1404B0
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_5 0x1404B4
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_6 0x1404B8
+
+#define mmMME5_RTR_HBW_RANGE_MASK_H_7 0x1404BC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_0 0x1404C0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_1 0x1404C4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_2 0x1404C8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_3 0x1404CC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_4 0x1404D0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_5 0x1404D4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_6 0x1404D8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_L_7 0x1404DC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_0 0x1404E0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_1 0x1404E4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_2 0x1404E8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_3 0x1404EC
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_4 0x1404F0
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_5 0x1404F4
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_6 0x1404F8
+
+#define mmMME5_RTR_HBW_RANGE_BASE_H_7 0x1404FC
+
+#define mmMME5_RTR_LBW_RANGE_HIT 0x140500
+
+#define mmMME5_RTR_LBW_RANGE_MASK_0 0x140510
+
+#define mmMME5_RTR_LBW_RANGE_MASK_1 0x140514
+
+#define mmMME5_RTR_LBW_RANGE_MASK_2 0x140518
+
+#define mmMME5_RTR_LBW_RANGE_MASK_3 0x14051C
+
+#define mmMME5_RTR_LBW_RANGE_MASK_4 0x140520
+
+#define mmMME5_RTR_LBW_RANGE_MASK_5 0x140524
+
+#define mmMME5_RTR_LBW_RANGE_MASK_6 0x140528
+
+#define mmMME5_RTR_LBW_RANGE_MASK_7 0x14052C
+
+#define mmMME5_RTR_LBW_RANGE_MASK_8 0x140530
+
+#define mmMME5_RTR_LBW_RANGE_MASK_9 0x140534
+
+#define mmMME5_RTR_LBW_RANGE_MASK_10 0x140538
+
+#define mmMME5_RTR_LBW_RANGE_MASK_11 0x14053C
+
+#define mmMME5_RTR_LBW_RANGE_MASK_12 0x140540
+
+#define mmMME5_RTR_LBW_RANGE_MASK_13 0x140544
+
+#define mmMME5_RTR_LBW_RANGE_MASK_14 0x140548
+
+#define mmMME5_RTR_LBW_RANGE_MASK_15 0x14054C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_0 0x140550
+
+#define mmMME5_RTR_LBW_RANGE_BASE_1 0x140554
+
+#define mmMME5_RTR_LBW_RANGE_BASE_2 0x140558
+
+#define mmMME5_RTR_LBW_RANGE_BASE_3 0x14055C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_4 0x140560
+
+#define mmMME5_RTR_LBW_RANGE_BASE_5 0x140564
+
+#define mmMME5_RTR_LBW_RANGE_BASE_6 0x140568
+
+#define mmMME5_RTR_LBW_RANGE_BASE_7 0x14056C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_8 0x140570
+
+#define mmMME5_RTR_LBW_RANGE_BASE_9 0x140574
+
+#define mmMME5_RTR_LBW_RANGE_BASE_10 0x140578
+
+#define mmMME5_RTR_LBW_RANGE_BASE_11 0x14057C
+
+#define mmMME5_RTR_LBW_RANGE_BASE_12 0x140580
+
+#define mmMME5_RTR_LBW_RANGE_BASE_13 0x140584
+
+#define mmMME5_RTR_LBW_RANGE_BASE_14 0x140588
+
+#define mmMME5_RTR_LBW_RANGE_BASE_15 0x14058C
+
+#define mmMME5_RTR_RGLTR 0x140590
+
+#define mmMME5_RTR_RGLTR_WR_RESULT 0x140594
+
+#define mmMME5_RTR_RGLTR_RD_RESULT 0x140598
+
+#define mmMME5_RTR_SCRAMB_EN 0x140600
+
+#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604
+
+#endif /* ASIC_REG_MME5_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
new file mode 100644
index 000000000000..fcec68388278
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME6_RTR_REGS_H_
+#define ASIC_REG_MME6_RTR_REGS_H_
+
+/*
+ *****************************************
+ * MME6_RTR (Prototype: MME_RTR)
+ *****************************************
+ */
+
+#define mmMME6_RTR_HBW_RD_RQ_E_ARB 0x180100
+
+#define mmMME6_RTR_HBW_RD_RQ_W_ARB 0x180104
+
+#define mmMME6_RTR_HBW_RD_RQ_N_ARB 0x180108
+
+#define mmMME6_RTR_HBW_RD_RQ_S_ARB 0x18010C
+
+#define mmMME6_RTR_HBW_RD_RQ_L_ARB 0x180110
+
+#define mmMME6_RTR_HBW_E_ARB_MAX 0x180120
+
+#define mmMME6_RTR_HBW_W_ARB_MAX 0x180124
+
+#define mmMME6_RTR_HBW_N_ARB_MAX 0x180128
+
+#define mmMME6_RTR_HBW_S_ARB_MAX 0x18012C
+
+#define mmMME6_RTR_HBW_L_ARB_MAX 0x180130
+
+#define mmMME6_RTR_HBW_RD_RS_MAX_CREDIT 0x180140
+
+#define mmMME6_RTR_HBW_WR_RQ_MAX_CREDIT 0x180144
+
+#define mmMME6_RTR_HBW_RD_RQ_MAX_CREDIT 0x180148
+
+#define mmMME6_RTR_HBW_RD_RS_E_ARB 0x180150
+
+#define mmMME6_RTR_HBW_RD_RS_W_ARB 0x180154
+
+#define mmMME6_RTR_HBW_RD_RS_N_ARB 0x180158
+
+#define mmMME6_RTR_HBW_RD_RS_S_ARB 0x18015C
+
+#define mmMME6_RTR_HBW_RD_RS_L_ARB 0x180160
+
+#define mmMME6_RTR_HBW_WR_RQ_E_ARB 0x180170
+
+#define mmMME6_RTR_HBW_WR_RQ_W_ARB 0x180174
+
+#define mmMME6_RTR_HBW_WR_RQ_N_ARB 0x180178
+
+#define mmMME6_RTR_HBW_WR_RQ_S_ARB 0x18017C
+
+#define mmMME6_RTR_HBW_WR_RQ_L_ARB 0x180180
+
+#define mmMME6_RTR_HBW_WR_RS_E_ARB 0x180190
+
+#define mmMME6_RTR_HBW_WR_RS_W_ARB 0x180194
+
+#define mmMME6_RTR_HBW_WR_RS_N_ARB 0x180198
+
+#define mmMME6_RTR_HBW_WR_RS_S_ARB 0x18019C
+
+#define mmMME6_RTR_HBW_WR_RS_L_ARB 0x1801A0
+
+#define mmMME6_RTR_LBW_RD_RQ_E_ARB 0x180200
+
+#define mmMME6_RTR_LBW_RD_RQ_W_ARB 0x180204
+
+#define mmMME6_RTR_LBW_RD_RQ_N_ARB 0x180208
+
+#define mmMME6_RTR_LBW_RD_RQ_S_ARB 0x18020C
+
+#define mmMME6_RTR_LBW_RD_RQ_L_ARB 0x180210
+
+#define mmMME6_RTR_LBW_E_ARB_MAX 0x180220
+
+#define mmMME6_RTR_LBW_W_ARB_MAX 0x180224
+
+#define mmMME6_RTR_LBW_N_ARB_MAX 0x180228
+
+#define mmMME6_RTR_LBW_S_ARB_MAX 0x18022C
+
+#define mmMME6_RTR_LBW_L_ARB_MAX 0x180230
+
+#define mmMME6_RTR_LBW_SRAM_MAX_CREDIT 0x180240
+
+#define mmMME6_RTR_LBW_RD_RS_E_ARB 0x180250
+
+#define mmMME6_RTR_LBW_RD_RS_W_ARB 0x180254
+
+#define mmMME6_RTR_LBW_RD_RS_N_ARB 0x180258
+
+#define mmMME6_RTR_LBW_RD_RS_S_ARB 0x18025C
+
+#define mmMME6_RTR_LBW_RD_RS_L_ARB 0x180260
+
+#define mmMME6_RTR_LBW_WR_RQ_E_ARB 0x180270
+
+#define mmMME6_RTR_LBW_WR_RQ_W_ARB 0x180274
+
+#define mmMME6_RTR_LBW_WR_RQ_N_ARB 0x180278
+
+#define mmMME6_RTR_LBW_WR_RQ_S_ARB 0x18027C
+
+#define mmMME6_RTR_LBW_WR_RQ_L_ARB 0x180280
+
+#define mmMME6_RTR_LBW_WR_RS_E_ARB 0x180290
+
+#define mmMME6_RTR_LBW_WR_RS_W_ARB 0x180294
+
+#define mmMME6_RTR_LBW_WR_RS_N_ARB 0x180298
+
+#define mmMME6_RTR_LBW_WR_RS_S_ARB 0x18029C
+
+#define mmMME6_RTR_LBW_WR_RS_L_ARB 0x1802A0
+
+#define mmMME6_RTR_DBG_E_ARB 0x180300
+
+#define mmMME6_RTR_DBG_W_ARB 0x180304
+
+#define mmMME6_RTR_DBG_N_ARB 0x180308
+
+#define mmMME6_RTR_DBG_S_ARB 0x18030C
+
+#define mmMME6_RTR_DBG_L_ARB 0x180310
+
+#define mmMME6_RTR_DBG_E_ARB_MAX 0x180320
+
+#define mmMME6_RTR_DBG_W_ARB_MAX 0x180324
+
+#define mmMME6_RTR_DBG_N_ARB_MAX 0x180328
+
+#define mmMME6_RTR_DBG_S_ARB_MAX 0x18032C
+
+#define mmMME6_RTR_DBG_L_ARB_MAX 0x180330
+
+#define mmMME6_RTR_SPLIT_COEF_0 0x180400
+
+#define mmMME6_RTR_SPLIT_COEF_1 0x180404
+
+#define mmMME6_RTR_SPLIT_COEF_2 0x180408
+
+#define mmMME6_RTR_SPLIT_COEF_3 0x18040C
+
+#define mmMME6_RTR_SPLIT_COEF_4 0x180410
+
+#define mmMME6_RTR_SPLIT_COEF_5 0x180414
+
+#define mmMME6_RTR_SPLIT_COEF_6 0x180418
+
+#define mmMME6_RTR_SPLIT_COEF_7 0x18041C
+
+#define mmMME6_RTR_SPLIT_COEF_8 0x180420
+
+#define mmMME6_RTR_SPLIT_COEF_9 0x180424
+
+#define mmMME6_RTR_SPLIT_CFG 0x180440
+
+#define mmMME6_RTR_SPLIT_RD_SAT 0x180444
+
+#define mmMME6_RTR_SPLIT_RD_RST_TOKEN 0x180448
+
+#define mmMME6_RTR_SPLIT_RD_TIMEOUT_0 0x18044C
+
+#define mmMME6_RTR_SPLIT_RD_TIMEOUT_1 0x180450
+
+#define mmMME6_RTR_SPLIT_WR_SAT 0x180454
+
+#define mmMME6_RTR_WPLIT_WR_TST_TOLEN 0x180458
+
+#define mmMME6_RTR_SPLIT_WR_TIMEOUT_0 0x18045C
+
+#define mmMME6_RTR_SPLIT_WR_TIMEOUT_1 0x180460
+
+#define mmMME6_RTR_HBW_RANGE_HIT 0x180470
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_0 0x180480
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_1 0x180484
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_2 0x180488
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_3 0x18048C
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_4 0x180490
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_5 0x180494
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_6 0x180498
+
+#define mmMME6_RTR_HBW_RANGE_MASK_L_7 0x18049C
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_0 0x1804A0
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_1 0x1804A4
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_2 0x1804A8
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_3 0x1804AC
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_4 0x1804B0
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_5 0x1804B4
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_6 0x1804B8
+
+#define mmMME6_RTR_HBW_RANGE_MASK_H_7 0x1804BC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_0 0x1804C0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_1 0x1804C4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_2 0x1804C8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_3 0x1804CC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_4 0x1804D0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_5 0x1804D4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_6 0x1804D8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_L_7 0x1804DC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_0 0x1804E0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_1 0x1804E4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_2 0x1804E8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_3 0x1804EC
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_4 0x1804F0
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_5 0x1804F4
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_6 0x1804F8
+
+#define mmMME6_RTR_HBW_RANGE_BASE_H_7 0x1804FC
+
+#define mmMME6_RTR_LBW_RANGE_HIT 0x180500
+
+#define mmMME6_RTR_LBW_RANGE_MASK_0 0x180510
+
+#define mmMME6_RTR_LBW_RANGE_MASK_1 0x180514
+
+#define mmMME6_RTR_LBW_RANGE_MASK_2 0x180518
+
+#define mmMME6_RTR_LBW_RANGE_MASK_3 0x18051C
+
+#define mmMME6_RTR_LBW_RANGE_MASK_4 0x180520
+
+#define mmMME6_RTR_LBW_RANGE_MASK_5 0x180524
+
+#define mmMME6_RTR_LBW_RANGE_MASK_6 0x180528
+
+#define mmMME6_RTR_LBW_RANGE_MASK_7 0x18052C
+
+#define mmMME6_RTR_LBW_RANGE_MASK_8 0x180530
+
+#define mmMME6_RTR_LBW_RANGE_MASK_9 0x180534
+
+#define mmMME6_RTR_LBW_RANGE_MASK_10 0x180538
+
+#define mmMME6_RTR_LBW_RANGE_MASK_11 0x18053C
+
+#define mmMME6_RTR_LBW_RANGE_MASK_12 0x180540
+
+#define mmMME6_RTR_LBW_RANGE_MASK_13 0x180544
+
+#define mmMME6_RTR_LBW_RANGE_MASK_14 0x180548
+
+#define mmMME6_RTR_LBW_RANGE_MASK_15 0x18054C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_0 0x180550
+
+#define mmMME6_RTR_LBW_RANGE_BASE_1 0x180554
+
+#define mmMME6_RTR_LBW_RANGE_BASE_2 0x180558
+
+#define mmMME6_RTR_LBW_RANGE_BASE_3 0x18055C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_4 0x180560
+
+#define mmMME6_RTR_LBW_RANGE_BASE_5 0x180564
+
+#define mmMME6_RTR_LBW_RANGE_BASE_6 0x180568
+
+#define mmMME6_RTR_LBW_RANGE_BASE_7 0x18056C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_8 0x180570
+
+#define mmMME6_RTR_LBW_RANGE_BASE_9 0x180574
+
+#define mmMME6_RTR_LBW_RANGE_BASE_10 0x180578
+
+#define mmMME6_RTR_LBW_RANGE_BASE_11 0x18057C
+
+#define mmMME6_RTR_LBW_RANGE_BASE_12 0x180580
+
+#define mmMME6_RTR_LBW_RANGE_BASE_13 0x180584
+
+#define mmMME6_RTR_LBW_RANGE_BASE_14 0x180588
+
+#define mmMME6_RTR_LBW_RANGE_BASE_15 0x18058C
+
+#define mmMME6_RTR_RGLTR 0x180590
+
+#define mmMME6_RTR_RGLTR_WR_RESULT 0x180594
+
+#define mmMME6_RTR_RGLTR_RD_RESULT 0x180598
+
+#define mmMME6_RTR_SCRAMB_EN 0x180600
+
+#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604
+
+#endif /* ASIC_REG_MME6_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
new file mode 100644
index 000000000000..a0d4382fbbd0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_CMDQ_MASKS_H_
+#define ASIC_REG_MME_CMDQ_MASKS_H_
+
+/*
+ *****************************************
+ * MME_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+/* MME_CMDQ_GLBL_CFG0 */
+#define MME_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0
+#define MME_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1
+#define MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1
+#define MME_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2
+#define MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2
+#define MME_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4
+#define MME_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3
+#define MME_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* MME_CMDQ_GLBL_CFG1 */
+#define MME_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define MME_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define MME_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2
+#define MME_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4
+#define MME_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define MME_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* MME_CMDQ_GLBL_PROT */
+#define MME_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0
+#define MME_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1
+#define MME_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1
+#define MME_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2
+#define MME_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2
+#define MME_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4
+#define MME_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3
+#define MME_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8
+#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* MME_CMDQ_GLBL_ERR_CFG */
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* MME_CMDQ_GLBL_ERR_ADDR_LO */
+#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_GLBL_ERR_ADDR_HI */
+#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_GLBL_ERR_WDATA */
+#define MME_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define MME_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_GLBL_SECURE_PROPS */
+#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_CMDQ_GLBL_NON_SECURE_PROPS */
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_CMDQ_GLBL_STS0 */
+#define MME_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define MME_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define MME_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define MME_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2
+#define MME_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4
+#define MME_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define MME_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* MME_CMDQ_GLBL_STS1 */
+#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* MME_CMDQ_CQ_CFG0 */
+#define MME_CMDQ_CQ_CFG0_RESERVED_SHIFT 0
+#define MME_CMDQ_CQ_CFG0_RESERVED_MASK 0x1
+
+/* MME_CMDQ_CQ_CFG1 */
+#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_ARUSER */
+#define MME_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define MME_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define MME_CMDQ_CQ_ARUSER_WORD_SHIFT 1
+#define MME_CMDQ_CQ_ARUSER_WORD_MASK 0x2
+
+/* MME_CMDQ_CQ_PTR_LO */
+#define MME_CMDQ_CQ_PTR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_PTR_HI */
+#define MME_CMDQ_CQ_PTR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_TSIZE */
+#define MME_CMDQ_CQ_TSIZE_VAL_SHIFT 0
+#define MME_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_CTL */
+#define MME_CMDQ_CQ_CTL_RPT_SHIFT 0
+#define MME_CMDQ_CQ_CTL_RPT_MASK 0xFFFF
+#define MME_CMDQ_CQ_CTL_CTL_SHIFT 16
+#define MME_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_PTR_LO_STS */
+#define MME_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_PTR_HI_STS */
+#define MME_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define MME_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_TSIZE_STS */
+#define MME_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0
+#define MME_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_CTL_STS */
+#define MME_CMDQ_CQ_CTL_STS_RPT_SHIFT 0
+#define MME_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define MME_CMDQ_CQ_CTL_STS_CTL_SHIFT 16
+#define MME_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_STS0 */
+#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME_CMDQ_CQ_STS1 */
+#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define MME_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31
+#define MME_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_EN */
+#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */
+#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_SAT */
+#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* MME_CMDQ_CQ_RD_RATE_LIM_TOUT */
+#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* MME_CMDQ_CQ_IFIFO_CNT */
+#define MME_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* MME_CMDQ_CP_MSG_BASE0_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE0_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE1_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE1_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE2_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE2_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE3_ADDR_LO */
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_MSG_BASE3_ADDR_HI */
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_TSIZE_OFFSET */
+#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */
+#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */
+#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_LDMA_COMMIT_OFFSET */
+#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_FENCE0_RDATA */
+#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE1_RDATA */
+#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE2_RDATA */
+#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE3_RDATA */
+#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* MME_CMDQ_CP_FENCE0_CNT */
+#define MME_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_FENCE1_CNT */
+#define MME_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_FENCE2_CNT */
+#define MME_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_FENCE3_CNT */
+#define MME_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0
+#define MME_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* MME_CMDQ_CP_STS */
+#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_CMDQ_CP_STS_ERDY_SHIFT 16
+#define MME_CMDQ_CP_STS_ERDY_MASK 0x10000
+#define MME_CMDQ_CP_STS_RRDY_SHIFT 17
+#define MME_CMDQ_CP_STS_RRDY_MASK 0x20000
+#define MME_CMDQ_CP_STS_MRDY_SHIFT 18
+#define MME_CMDQ_CP_STS_MRDY_MASK 0x40000
+#define MME_CMDQ_CP_STS_SW_STOP_SHIFT 19
+#define MME_CMDQ_CP_STS_SW_STOP_MASK 0x80000
+#define MME_CMDQ_CP_STS_FENCE_ID_SHIFT 20
+#define MME_CMDQ_CP_STS_FENCE_ID_MASK 0x300000
+#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* MME_CMDQ_CP_CURRENT_INST_LO */
+#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_CURRENT_INST_HI */
+#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CP_BARRIER_CFG */
+#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* MME_CMDQ_CP_DBG_0 */
+#define MME_CMDQ_CP_DBG_0_VAL_SHIFT 0
+#define MME_CMDQ_CP_DBG_0_VAL_MASK 0xFF
+
+/* MME_CMDQ_CQ_BUF_ADDR */
+#define MME_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0
+#define MME_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* MME_CMDQ_CQ_BUF_RDATA */
+#define MME_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0
+#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
new file mode 100644
index 000000000000..5c2f6b870a58
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_CMDQ_REGS_H_
+#define ASIC_REG_MME_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * MME_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmMME_CMDQ_GLBL_CFG0 0xD9000
+
+#define mmMME_CMDQ_GLBL_CFG1 0xD9004
+
+#define mmMME_CMDQ_GLBL_PROT 0xD9008
+
+#define mmMME_CMDQ_GLBL_ERR_CFG 0xD900C
+
+#define mmMME_CMDQ_GLBL_ERR_ADDR_LO 0xD9010
+
+#define mmMME_CMDQ_GLBL_ERR_ADDR_HI 0xD9014
+
+#define mmMME_CMDQ_GLBL_ERR_WDATA 0xD9018
+
+#define mmMME_CMDQ_GLBL_SECURE_PROPS 0xD901C
+
+#define mmMME_CMDQ_GLBL_NON_SECURE_PROPS 0xD9020
+
+#define mmMME_CMDQ_GLBL_STS0 0xD9024
+
+#define mmMME_CMDQ_GLBL_STS1 0xD9028
+
+#define mmMME_CMDQ_CQ_CFG0 0xD90B0
+
+#define mmMME_CMDQ_CQ_CFG1 0xD90B4
+
+#define mmMME_CMDQ_CQ_ARUSER 0xD90B8
+
+#define mmMME_CMDQ_CQ_PTR_LO 0xD90C0
+
+#define mmMME_CMDQ_CQ_PTR_HI 0xD90C4
+
+#define mmMME_CMDQ_CQ_TSIZE 0xD90C8
+
+#define mmMME_CMDQ_CQ_CTL 0xD90CC
+
+#define mmMME_CMDQ_CQ_PTR_LO_STS 0xD90D4
+
+#define mmMME_CMDQ_CQ_PTR_HI_STS 0xD90D8
+
+#define mmMME_CMDQ_CQ_TSIZE_STS 0xD90DC
+
+#define mmMME_CMDQ_CQ_CTL_STS 0xD90E0
+
+#define mmMME_CMDQ_CQ_STS0 0xD90E4
+
+#define mmMME_CMDQ_CQ_STS1 0xD90E8
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_EN 0xD90F0
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xD90F4
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_SAT 0xD90F8
+
+#define mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT 0xD90FC
+
+#define mmMME_CMDQ_CQ_IFIFO_CNT 0xD9108
+
+#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO 0xD9120
+
+#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI 0xD9124
+
+#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO 0xD9128
+
+#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI 0xD912C
+
+#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO 0xD9130
+
+#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI 0xD9134
+
+#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO 0xD9138
+
+#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI 0xD913C
+
+#define mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET 0xD9140
+
+#define mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xD9144
+
+#define mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xD9148
+
+#define mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xD914C
+
+#define mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xD9150
+
+#define mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET 0xD9154
+
+#define mmMME_CMDQ_CP_FENCE0_RDATA 0xD9158
+
+#define mmMME_CMDQ_CP_FENCE1_RDATA 0xD915C
+
+#define mmMME_CMDQ_CP_FENCE2_RDATA 0xD9160
+
+#define mmMME_CMDQ_CP_FENCE3_RDATA 0xD9164
+
+#define mmMME_CMDQ_CP_FENCE0_CNT 0xD9168
+
+#define mmMME_CMDQ_CP_FENCE1_CNT 0xD916C
+
+#define mmMME_CMDQ_CP_FENCE2_CNT 0xD9170
+
+#define mmMME_CMDQ_CP_FENCE3_CNT 0xD9174
+
+#define mmMME_CMDQ_CP_STS 0xD9178
+
+#define mmMME_CMDQ_CP_CURRENT_INST_LO 0xD917C
+
+#define mmMME_CMDQ_CP_CURRENT_INST_HI 0xD9180
+
+#define mmMME_CMDQ_CP_BARRIER_CFG 0xD9184
+
+#define mmMME_CMDQ_CP_DBG_0 0xD9188
+
+#define mmMME_CMDQ_CQ_BUF_ADDR 0xD9308
+
+#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C
+
+#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
new file mode 100644
index 000000000000..c7b1b0bb3384
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
@@ -0,0 +1,1537 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_MASKS_H_
+#define ASIC_REG_MME_MASKS_H_
+
+/*
+ *****************************************
+ * MME (Prototype: MME)
+ *****************************************
+ */
+
+/* MME_ARCH_STATUS */
+#define MME_ARCH_STATUS_A_SHIFT 0
+#define MME_ARCH_STATUS_A_MASK 0x1
+#define MME_ARCH_STATUS_B_SHIFT 1
+#define MME_ARCH_STATUS_B_MASK 0x2
+#define MME_ARCH_STATUS_CIN_SHIFT 2
+#define MME_ARCH_STATUS_CIN_MASK 0x4
+#define MME_ARCH_STATUS_COUT_SHIFT 3
+#define MME_ARCH_STATUS_COUT_MASK 0x8
+#define MME_ARCH_STATUS_TE_SHIFT 4
+#define MME_ARCH_STATUS_TE_MASK 0x10
+#define MME_ARCH_STATUS_LD_SHIFT 5
+#define MME_ARCH_STATUS_LD_MASK 0x20
+#define MME_ARCH_STATUS_ST_SHIFT 6
+#define MME_ARCH_STATUS_ST_MASK 0x40
+#define MME_ARCH_STATUS_SB_A_EMPTY_SHIFT 7
+#define MME_ARCH_STATUS_SB_A_EMPTY_MASK 0x80
+#define MME_ARCH_STATUS_SB_B_EMPTY_SHIFT 8
+#define MME_ARCH_STATUS_SB_B_EMPTY_MASK 0x100
+#define MME_ARCH_STATUS_SB_CIN_EMPTY_SHIFT 9
+#define MME_ARCH_STATUS_SB_CIN_EMPTY_MASK 0x200
+#define MME_ARCH_STATUS_SB_COUT_EMPTY_SHIFT 10
+#define MME_ARCH_STATUS_SB_COUT_EMPTY_MASK 0x400
+#define MME_ARCH_STATUS_SM_IDLE_SHIFT 11
+#define MME_ARCH_STATUS_SM_IDLE_MASK 0x800
+#define MME_ARCH_STATUS_WBC_AXI_IDLE_SHIFT 12
+#define MME_ARCH_STATUS_WBC_AXI_IDLE_MASK 0xF000
+#define MME_ARCH_STATUS_SBC_AXI_IDLE_SHIFT 16
+#define MME_ARCH_STATUS_SBC_AXI_IDLE_MASK 0x30000
+#define MME_ARCH_STATUS_SBB_AXI_IDLE_SHIFT 18
+#define MME_ARCH_STATUS_SBB_AXI_IDLE_MASK 0xC0000
+#define MME_ARCH_STATUS_SBA_AXI_IDLE_SHIFT 20
+#define MME_ARCH_STATUS_SBA_AXI_IDLE_MASK 0x300000
+#define MME_ARCH_STATUS_FREE_ACCUMS_SHIFT 22
+#define MME_ARCH_STATUS_FREE_ACCUMS_MASK 0x1C00000
+
+/* MME_ARCH_A_BASE_ADDR_HIGH */
+#define MME_ARCH_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_BASE_ADDR_HIGH */
+#define MME_ARCH_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_CIN_BASE_ADDR_HIGH */
+#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_COUT_BASE_ADDR_HIGH */
+#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_BIAS_BASE_ADDR_HIGH */
+#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_BASE_ADDR_LOW */
+#define MME_ARCH_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_BASE_ADDR_LOW */
+#define MME_ARCH_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_CIN_BASE_ADDR_LOW */
+#define MME_ARCH_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_COUT_BASE_ADDR_LOW */
+#define MME_ARCH_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_BIAS_BASE_ADDR_LOW */
+#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_HEADER */
+#define MME_ARCH_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_ARCH_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_ARCH_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_ARCH_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_ARCH_HEADER_TRANS_A_SHIFT 6
+#define MME_ARCH_HEADER_TRANS_A_MASK 0x40
+#define MME_ARCH_HEADER_LOWER_A_SHIFT 7
+#define MME_ARCH_HEADER_LOWER_A_MASK 0x80
+#define MME_ARCH_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_ARCH_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_ARCH_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_ARCH_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_ARCH_HEADER_LOAD_CIN_SHIFT 13
+#define MME_ARCH_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_ARCH_HEADER_STORE_OUT_SHIFT 15
+#define MME_ARCH_HEADER_STORE_OUT_MASK 0x8000
+#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_ARCH_HEADER_ADVANCE_A_SHIFT 17
+#define MME_ARCH_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_ARCH_HEADER_ADVANCE_B_SHIFT 18
+#define MME_ARCH_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_ARCH_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_ARCH_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_ARCH_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_ARCH_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_ARCH_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_ARCH_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_ARCH_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_ARCH_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_ARCH_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_ARCH_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_ARCH_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_ARCH_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_ARCH_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_ARCH_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_ARCH_KERNEL_SIZE_MINUS_1 */
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_ARCH_ASSOCIATED_DIMS */
+#define MME_ARCH_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_ARCH_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_ARCH_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_ARCH_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_ARCH_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_ARCH_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_ARCH_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_ARCH_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_ARCH_COUT_SCALE */
+#define MME_ARCH_COUT_SCALE_V_SHIFT 0
+#define MME_ARCH_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_CIN_SCALE */
+#define MME_ARCH_CIN_SCALE_V_SHIFT 0
+#define MME_ARCH_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_GEMMLOWP_ZP */
+#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_ARCH_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_ARCH_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_ARCH_GEMMLOWP_EXPONENT */
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_ARCH_A_ROI_BASE_OFFSET */
+#define MME_ARCH_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_ARCH_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_VALID_ELEMENTS */
+#define MME_ARCH_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_ARCH_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_LOOP_STRIDE */
+#define MME_ARCH_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_ARCH_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_ROI_SIZE */
+#define MME_ARCH_A_ROI_SIZE_V_SHIFT 0
+#define MME_ARCH_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_SPATIAL_START_OFFSET */
+#define MME_ARCH_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_ARCH_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_SPATIAL_STRIDE */
+#define MME_ARCH_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_ARCH_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_ROI_BASE_OFFSET */
+#define MME_ARCH_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_ARCH_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_VALID_ELEMENTS */
+#define MME_ARCH_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_ARCH_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_LOOP_STRIDE */
+#define MME_ARCH_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_ARCH_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_ROI_SIZE */
+#define MME_ARCH_B_ROI_SIZE_V_SHIFT 0
+#define MME_ARCH_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_SPATIAL_START_OFFSET */
+#define MME_ARCH_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_ARCH_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_SPATIAL_STRIDE */
+#define MME_ARCH_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_ARCH_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_ROI_BASE_OFFSET */
+#define MME_ARCH_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_ARCH_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_VALID_ELEMENTS */
+#define MME_ARCH_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_ARCH_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_LOOP_STRIDE */
+#define MME_ARCH_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_ARCH_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_ROI_SIZE */
+#define MME_ARCH_C_ROI_SIZE_V_SHIFT 0
+#define MME_ARCH_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_SPATIAL_START_OFFSET */
+#define MME_ARCH_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_ARCH_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_SPATIAL_STRIDE */
+#define MME_ARCH_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_ARCH_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_SYNC_OBJECT_MESSAGE */
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_ARCH_E_PADDING_VALUE_A */
+#define MME_ARCH_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_ARCH_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_ARCH_E_NUM_ITERATION_MINUS_1 */
+#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_ARCH_E_BUBBLES_PER_SPLIT */
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_CMD */
+#define MME_CMD_EXECUTE_SHIFT 0
+#define MME_CMD_EXECUTE_MASK 0x1
+
+/* MME_DUMMY */
+#define MME_DUMMY_V_SHIFT 0
+#define MME_DUMMY_V_MASK 0xFFFFFFFF
+
+/* MME_RESET */
+#define MME_RESET_V_SHIFT 0
+#define MME_RESET_V_MASK 0x1
+
+/* MME_STALL */
+#define MME_STALL_V_SHIFT 0
+#define MME_STALL_V_MASK 0xFFFFFFFF
+
+/* MME_SM_BASE_ADDRESS_LOW */
+#define MME_SM_BASE_ADDRESS_LOW_V_SHIFT 0
+#define MME_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SM_BASE_ADDRESS_HIGH */
+#define MME_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define MME_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_ADD */
+#define MME_DBGMEM_ADD_V_SHIFT 0
+#define MME_DBGMEM_ADD_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_DATA_WR */
+#define MME_DBGMEM_DATA_WR_V_SHIFT 0
+#define MME_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_DATA_RD */
+#define MME_DBGMEM_DATA_RD_V_SHIFT 0
+#define MME_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF
+
+/* MME_DBGMEM_CTRL */
+#define MME_DBGMEM_CTRL_WR_NRD_SHIFT 0
+#define MME_DBGMEM_CTRL_WR_NRD_MASK 0x1
+
+/* MME_DBGMEM_RC */
+#define MME_DBGMEM_RC_VALID_SHIFT 0
+#define MME_DBGMEM_RC_VALID_MASK 0x1
+#define MME_DBGMEM_RC_FULL_SHIFT 1
+#define MME_DBGMEM_RC_FULL_MASK 0x2
+
+/* MME_LOG_SHADOW */
+#define MME_LOG_SHADOW_MASK_0_SHIFT 0
+#define MME_LOG_SHADOW_MASK_0_MASK 0x7F
+#define MME_LOG_SHADOW_MASK_1_SHIFT 8
+#define MME_LOG_SHADOW_MASK_1_MASK 0x7F00
+#define MME_LOG_SHADOW_MASK_2_SHIFT 16
+#define MME_LOG_SHADOW_MASK_2_MASK 0x7F0000
+#define MME_LOG_SHADOW_MASK_3_SHIFT 24
+#define MME_LOG_SHADOW_MASK_3_MASK 0x7F000000
+
+/* MME_STORE_MAX_CREDIT */
+#define MME_STORE_MAX_CREDIT_V_SHIFT 0
+#define MME_STORE_MAX_CREDIT_V_MASK 0x3F
+
+/* MME_AGU */
+#define MME_AGU_SBA_MAX_CREDIT_SHIFT 0
+#define MME_AGU_SBA_MAX_CREDIT_MASK 0x1F
+#define MME_AGU_SBB_MAX_CREDIT_SHIFT 8
+#define MME_AGU_SBB_MAX_CREDIT_MASK 0x1F00
+#define MME_AGU_SBC_MAX_CREDIT_SHIFT 16
+#define MME_AGU_SBC_MAX_CREDIT_MASK 0x1F0000
+#define MME_AGU_WBC_MAX_CREDIT_SHIFT 24
+#define MME_AGU_WBC_MAX_CREDIT_MASK 0x3F000000
+
+/* MME_SBA */
+#define MME_SBA_MAX_SIZE_SHIFT 0
+#define MME_SBA_MAX_SIZE_MASK 0x3FF
+#define MME_SBA_EU_MAX_CREDIT_SHIFT 16
+#define MME_SBA_EU_MAX_CREDIT_MASK 0x1F0000
+
+/* MME_SBB */
+#define MME_SBB_MAX_SIZE_SHIFT 0
+#define MME_SBB_MAX_SIZE_MASK 0x3FF
+#define MME_SBB_EU_MAX_CREDIT_SHIFT 16
+#define MME_SBB_EU_MAX_CREDIT_MASK 0x1F0000
+
+/* MME_SBC */
+#define MME_SBC_MAX_SIZE_SHIFT 0
+#define MME_SBC_MAX_SIZE_MASK 0x3FF
+#define MME_SBC_EU_MAX_CREDIT_SHIFT 16
+#define MME_SBC_EU_MAX_CREDIT_MASK 0x1F0000
+
+/* MME_WBC */
+#define MME_WBC_MAX_OUTSTANDING_SHIFT 0
+#define MME_WBC_MAX_OUTSTANDING_MASK 0xFFF
+#define MME_WBC_DISABLE_FAST_END_PE_SHIFT 12
+#define MME_WBC_DISABLE_FAST_END_PE_MASK 0x1000
+#define MME_WBC_LD_INSERT_BUBBLE_DIS_SHIFT 13
+#define MME_WBC_LD_INSERT_BUBBLE_DIS_MASK 0x2000
+
+/* MME_SBA_CONTROL_DATA */
+#define MME_SBA_CONTROL_DATA_ASID_SHIFT 0
+#define MME_SBA_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_SBA_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_SBA_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_SBB_CONTROL_DATA */
+#define MME_SBB_CONTROL_DATA_ASID_SHIFT 0
+#define MME_SBB_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_SBB_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_SBB_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_SBC_CONTROL_DATA */
+#define MME_SBC_CONTROL_DATA_ASID_SHIFT 0
+#define MME_SBC_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_SBC_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_SBC_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_WBC_CONTROL_DATA */
+#define MME_WBC_CONTROL_DATA_ASID_SHIFT 0
+#define MME_WBC_CONTROL_DATA_ASID_MASK 0x3FF
+#define MME_WBC_CONTROL_DATA_MMBP_SHIFT 10
+#define MME_WBC_CONTROL_DATA_MMBP_MASK 0x400
+
+/* MME_TE */
+#define MME_TE_MAX_CREDIT_SHIFT 0
+#define MME_TE_MAX_CREDIT_MASK 0x1F
+#define MME_TE_DESC_MAX_CREDIT_SHIFT 8
+#define MME_TE_DESC_MAX_CREDIT_MASK 0x1F00
+
+/* MME_TE2DEC */
+#define MME_TE2DEC_MAX_CREDIT_SHIFT 0
+#define MME_TE2DEC_MAX_CREDIT_MASK 0x1F
+
+/* MME_REI_STATUS */
+#define MME_REI_STATUS_V_SHIFT 0
+#define MME_REI_STATUS_V_MASK 0xFFFFFFFF
+
+/* MME_REI_MASK */
+#define MME_REI_MASK_V_SHIFT 0
+#define MME_REI_MASK_V_MASK 0xFFFFFFFF
+
+/* MME_SEI_STATUS */
+#define MME_SEI_STATUS_V_SHIFT 0
+#define MME_SEI_STATUS_V_MASK 0xFFFFFFFF
+
+/* MME_SEI_MASK */
+#define MME_SEI_MASK_V_SHIFT 0
+#define MME_SEI_MASK_V_MASK 0xFFFFFFFF
+
+/* MME_SPI_STATUS */
+#define MME_SPI_STATUS_V_SHIFT 0
+#define MME_SPI_STATUS_V_MASK 0xFFFFFFFF
+
+/* MME_SPI_MASK */
+#define MME_SPI_MASK_V_SHIFT 0
+#define MME_SPI_MASK_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_STATUS */
+#define MME_SHADOW_0_STATUS_A_SHIFT 0
+#define MME_SHADOW_0_STATUS_A_MASK 0x1
+#define MME_SHADOW_0_STATUS_B_SHIFT 1
+#define MME_SHADOW_0_STATUS_B_MASK 0x2
+#define MME_SHADOW_0_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_0_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_0_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_0_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_0_STATUS_TE_SHIFT 4
+#define MME_SHADOW_0_STATUS_TE_MASK 0x10
+#define MME_SHADOW_0_STATUS_LD_SHIFT 5
+#define MME_SHADOW_0_STATUS_LD_MASK 0x20
+#define MME_SHADOW_0_STATUS_ST_SHIFT 6
+#define MME_SHADOW_0_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_0_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_BASE_ADDR_LOW */
+#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_BASE_ADDR_LOW */
+#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_HEADER */
+#define MME_SHADOW_0_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_0_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_0_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_0_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_0_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_0_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_0_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_0_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_0_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_0_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_0_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_0_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_0_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_0_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_0_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_0_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_0_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_0_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_0_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_0_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_0_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_0_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_0_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_0_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_0_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_0_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_0_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_0_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_0_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_0_ASSOCIATED_DIMS */
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_0_COUT_SCALE */
+#define MME_SHADOW_0_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_0_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_CIN_SCALE */
+#define MME_SHADOW_0_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_0_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_GEMMLOWP_ZP */
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_0_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_0_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_VALID_ELEMENTS */
+#define MME_SHADOW_0_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_0_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_LOOP_STRIDE */
+#define MME_SHADOW_0_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_ROI_SIZE */
+#define MME_SHADOW_0_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_0_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_SPATIAL_STRIDE */
+#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_VALID_ELEMENTS */
+#define MME_SHADOW_0_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_0_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_LOOP_STRIDE */
+#define MME_SHADOW_0_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_ROI_SIZE */
+#define MME_SHADOW_0_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_0_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_SPATIAL_STRIDE */
+#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_VALID_ELEMENTS */
+#define MME_SHADOW_0_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_0_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_LOOP_STRIDE */
+#define MME_SHADOW_0_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_ROI_SIZE */
+#define MME_SHADOW_0_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_0_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_SPATIAL_STRIDE */
+#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_0_E_PADDING_VALUE_A */
+#define MME_SHADOW_0_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_0_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_0_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_0_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_SHADOW_1_STATUS */
+#define MME_SHADOW_1_STATUS_A_SHIFT 0
+#define MME_SHADOW_1_STATUS_A_MASK 0x1
+#define MME_SHADOW_1_STATUS_B_SHIFT 1
+#define MME_SHADOW_1_STATUS_B_MASK 0x2
+#define MME_SHADOW_1_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_1_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_1_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_1_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_1_STATUS_TE_SHIFT 4
+#define MME_SHADOW_1_STATUS_TE_MASK 0x10
+#define MME_SHADOW_1_STATUS_LD_SHIFT 5
+#define MME_SHADOW_1_STATUS_LD_MASK 0x20
+#define MME_SHADOW_1_STATUS_ST_SHIFT 6
+#define MME_SHADOW_1_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_1_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_BASE_ADDR_LOW */
+#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_BASE_ADDR_LOW */
+#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_HEADER */
+#define MME_SHADOW_1_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_1_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_1_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_1_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_1_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_1_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_1_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_1_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_1_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_1_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_1_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_1_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_1_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_1_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_1_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_1_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_1_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_1_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_1_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_1_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_1_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_1_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_1_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_1_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_1_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_1_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_1_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_1_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_1_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_1_ASSOCIATED_DIMS */
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_1_COUT_SCALE */
+#define MME_SHADOW_1_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_1_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_CIN_SCALE */
+#define MME_SHADOW_1_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_1_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_GEMMLOWP_ZP */
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_1_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_1_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_VALID_ELEMENTS */
+#define MME_SHADOW_1_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_1_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_LOOP_STRIDE */
+#define MME_SHADOW_1_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_ROI_SIZE */
+#define MME_SHADOW_1_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_1_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_SPATIAL_STRIDE */
+#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_VALID_ELEMENTS */
+#define MME_SHADOW_1_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_1_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_LOOP_STRIDE */
+#define MME_SHADOW_1_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_ROI_SIZE */
+#define MME_SHADOW_1_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_1_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_SPATIAL_STRIDE */
+#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_VALID_ELEMENTS */
+#define MME_SHADOW_1_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_1_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_LOOP_STRIDE */
+#define MME_SHADOW_1_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_ROI_SIZE */
+#define MME_SHADOW_1_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_1_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_SPATIAL_STRIDE */
+#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_1_E_PADDING_VALUE_A */
+#define MME_SHADOW_1_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_1_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_1_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_1_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_SHADOW_2_STATUS */
+#define MME_SHADOW_2_STATUS_A_SHIFT 0
+#define MME_SHADOW_2_STATUS_A_MASK 0x1
+#define MME_SHADOW_2_STATUS_B_SHIFT 1
+#define MME_SHADOW_2_STATUS_B_MASK 0x2
+#define MME_SHADOW_2_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_2_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_2_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_2_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_2_STATUS_TE_SHIFT 4
+#define MME_SHADOW_2_STATUS_TE_MASK 0x10
+#define MME_SHADOW_2_STATUS_LD_SHIFT 5
+#define MME_SHADOW_2_STATUS_LD_MASK 0x20
+#define MME_SHADOW_2_STATUS_ST_SHIFT 6
+#define MME_SHADOW_2_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_2_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_BASE_ADDR_LOW */
+#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_BASE_ADDR_LOW */
+#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_HEADER */
+#define MME_SHADOW_2_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_2_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_2_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_2_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_2_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_2_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_2_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_2_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_2_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_2_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_2_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_2_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_2_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_2_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_2_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_2_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_2_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_2_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_2_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_2_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_2_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_2_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_2_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_2_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_2_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_2_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_2_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_2_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_2_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_2_ASSOCIATED_DIMS */
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_2_COUT_SCALE */
+#define MME_SHADOW_2_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_2_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_CIN_SCALE */
+#define MME_SHADOW_2_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_2_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_GEMMLOWP_ZP */
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_2_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_2_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_VALID_ELEMENTS */
+#define MME_SHADOW_2_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_2_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_LOOP_STRIDE */
+#define MME_SHADOW_2_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_ROI_SIZE */
+#define MME_SHADOW_2_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_2_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_SPATIAL_STRIDE */
+#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_VALID_ELEMENTS */
+#define MME_SHADOW_2_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_2_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_LOOP_STRIDE */
+#define MME_SHADOW_2_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_ROI_SIZE */
+#define MME_SHADOW_2_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_2_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_SPATIAL_STRIDE */
+#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_VALID_ELEMENTS */
+#define MME_SHADOW_2_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_2_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_LOOP_STRIDE */
+#define MME_SHADOW_2_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_ROI_SIZE */
+#define MME_SHADOW_2_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_2_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_SPATIAL_STRIDE */
+#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_2_E_PADDING_VALUE_A */
+#define MME_SHADOW_2_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_2_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_2_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_2_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+/* MME_SHADOW_3_STATUS */
+#define MME_SHADOW_3_STATUS_A_SHIFT 0
+#define MME_SHADOW_3_STATUS_A_MASK 0x1
+#define MME_SHADOW_3_STATUS_B_SHIFT 1
+#define MME_SHADOW_3_STATUS_B_MASK 0x2
+#define MME_SHADOW_3_STATUS_CIN_SHIFT 2
+#define MME_SHADOW_3_STATUS_CIN_MASK 0x4
+#define MME_SHADOW_3_STATUS_COUT_SHIFT 3
+#define MME_SHADOW_3_STATUS_COUT_MASK 0x8
+#define MME_SHADOW_3_STATUS_TE_SHIFT 4
+#define MME_SHADOW_3_STATUS_TE_MASK 0x10
+#define MME_SHADOW_3_STATUS_LD_SHIFT 5
+#define MME_SHADOW_3_STATUS_LD_MASK 0x20
+#define MME_SHADOW_3_STATUS_ST_SHIFT 6
+#define MME_SHADOW_3_STATUS_ST_MASK 0x40
+
+/* MME_SHADOW_3_A_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_CIN_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_COUT_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_BIAS_BASE_ADDR_HIGH */
+#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_SHIFT 0
+#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_BASE_ADDR_LOW */
+#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_BASE_ADDR_LOW */
+#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_CIN_BASE_ADDR_LOW */
+#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_COUT_BASE_ADDR_LOW */
+#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_BIAS_BASE_ADDR_LOW */
+#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_SHIFT 0
+#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_HEADER */
+#define MME_SHADOW_3_HEADER_SIGNAL_MASK_SHIFT 0
+#define MME_SHADOW_3_HEADER_SIGNAL_MASK_MASK 0x1F
+#define MME_SHADOW_3_HEADER_SIGNAL_EN_SHIFT 5
+#define MME_SHADOW_3_HEADER_SIGNAL_EN_MASK 0x20
+#define MME_SHADOW_3_HEADER_TRANS_A_SHIFT 6
+#define MME_SHADOW_3_HEADER_TRANS_A_MASK 0x40
+#define MME_SHADOW_3_HEADER_LOWER_A_SHIFT 7
+#define MME_SHADOW_3_HEADER_LOWER_A_MASK 0x80
+#define MME_SHADOW_3_HEADER_ACCUM_MASK_SHIFT 8
+#define MME_SHADOW_3_HEADER_ACCUM_MASK_MASK 0xF00
+#define MME_SHADOW_3_HEADER_LOAD_BIAS_SHIFT 12
+#define MME_SHADOW_3_HEADER_LOAD_BIAS_MASK 0x1000
+#define MME_SHADOW_3_HEADER_LOAD_CIN_SHIFT 13
+#define MME_SHADOW_3_HEADER_LOAD_CIN_MASK 0x2000
+#define MME_SHADOW_3_HEADER_STORE_OUT_SHIFT 15
+#define MME_SHADOW_3_HEADER_STORE_OUT_MASK 0x8000
+#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_SHIFT 16
+#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000
+#define MME_SHADOW_3_HEADER_ADVANCE_A_SHIFT 17
+#define MME_SHADOW_3_HEADER_ADVANCE_A_MASK 0x20000
+#define MME_SHADOW_3_HEADER_ADVANCE_B_SHIFT 18
+#define MME_SHADOW_3_HEADER_ADVANCE_B_MASK 0x40000
+#define MME_SHADOW_3_HEADER_ADVANCE_CIN_SHIFT 19
+#define MME_SHADOW_3_HEADER_ADVANCE_CIN_MASK 0x80000
+#define MME_SHADOW_3_HEADER_ADVANCE_COUT_SHIFT 20
+#define MME_SHADOW_3_HEADER_ADVANCE_COUT_MASK 0x100000
+#define MME_SHADOW_3_HEADER_COMPRESSED_B_SHIFT 21
+#define MME_SHADOW_3_HEADER_COMPRESSED_B_MASK 0x200000
+#define MME_SHADOW_3_HEADER_MASK_CONV_END_SHIFT 22
+#define MME_SHADOW_3_HEADER_MASK_CONV_END_MASK 0x400000
+#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_SHIFT 23
+#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000
+#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_SHIFT 24
+#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_MASK 0x3000000
+#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_SHIFT 26
+#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_MASK 0x1C000000
+#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_SHIFT 29
+#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_MASK 0xE0000000
+
+/* MME_SHADOW_3_KERNEL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24
+#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000
+
+/* MME_SHADOW_3_ASSOCIATED_DIMS */
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_SHIFT 0
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_MASK 0x7
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_SHIFT 3
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_MASK 0x38
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_SHIFT 6
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_SHIFT 9
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_MASK 0xE00
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_SHIFT 16
+#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_MASK 0x70000
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_SHIFT 19
+#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_MASK 0x380000
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_SHIFT 22
+#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_SHIFT 25
+#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000
+
+/* MME_SHADOW_3_COUT_SCALE */
+#define MME_SHADOW_3_COUT_SCALE_V_SHIFT 0
+#define MME_SHADOW_3_COUT_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_CIN_SCALE */
+#define MME_SHADOW_3_CIN_SCALE_V_SHIFT 0
+#define MME_SHADOW_3_CIN_SCALE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_GEMMLOWP_ZP */
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_SHIFT 0
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_SHIFT 9
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_SHIFT 18
+#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000
+#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27
+#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_SHIFT 28
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_MASK 0x10000000
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29
+#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000
+#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_SHIFT 30
+#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000
+
+/* MME_SHADOW_3_GEMMLOWP_EXPONENT */
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17
+#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000
+
+/* MME_SHADOW_3_A_ROI_BASE_OFFSET */
+#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_VALID_ELEMENTS */
+#define MME_SHADOW_3_A_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_3_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_LOOP_STRIDE */
+#define MME_SHADOW_3_A_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_ROI_SIZE */
+#define MME_SHADOW_3_A_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_3_A_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_SPATIAL_START_OFFSET */
+#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_SPATIAL_STRIDE */
+#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_ROI_BASE_OFFSET */
+#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_VALID_ELEMENTS */
+#define MME_SHADOW_3_B_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_3_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_LOOP_STRIDE */
+#define MME_SHADOW_3_B_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_ROI_SIZE */
+#define MME_SHADOW_3_B_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_3_B_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_SPATIAL_START_OFFSET */
+#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_SPATIAL_STRIDE */
+#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_ROI_BASE_OFFSET */
+#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_VALID_ELEMENTS */
+#define MME_SHADOW_3_C_VALID_ELEMENTS_V_SHIFT 0
+#define MME_SHADOW_3_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_LOOP_STRIDE */
+#define MME_SHADOW_3_C_LOOP_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_ROI_SIZE */
+#define MME_SHADOW_3_C_ROI_SIZE_V_SHIFT 0
+#define MME_SHADOW_3_C_ROI_SIZE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_SPATIAL_START_OFFSET */
+#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_SHIFT 0
+#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_SPATIAL_STRIDE */
+#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_SHIFT 0
+#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 */
+#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_SYNC_OBJECT_MESSAGE */
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* MME_SHADOW_3_E_PADDING_VALUE_A */
+#define MME_SHADOW_3_E_PADDING_VALUE_A_V_SHIFT 0
+#define MME_SHADOW_3_E_PADDING_VALUE_A_V_MASK 0xFFFF
+
+/* MME_SHADOW_3_E_NUM_ITERATION_MINUS_1 */
+#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_SHIFT 0
+#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF
+
+/* MME_SHADOW_3_E_BUBBLES_PER_SPLIT */
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_SHIFT 0
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_MASK 0xFF
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_SHIFT 8
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_SHIFT 24
+#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
+
+#endif /* ASIC_REG_MME_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
new file mode 100644
index 000000000000..d4bfa58dce19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_QM_MASKS_H_
+#define ASIC_REG_MME_QM_MASKS_H_
+
+/*
+ *****************************************
+ * MME_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* MME_QM_GLBL_CFG0 */
+#define MME_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define MME_QM_GLBL_CFG0_PQF_EN_MASK 0x1
+#define MME_QM_GLBL_CFG0_CQF_EN_SHIFT 1
+#define MME_QM_GLBL_CFG0_CQF_EN_MASK 0x2
+#define MME_QM_GLBL_CFG0_CP_EN_SHIFT 2
+#define MME_QM_GLBL_CFG0_CP_EN_MASK 0x4
+#define MME_QM_GLBL_CFG0_DMA_EN_SHIFT 3
+#define MME_QM_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* MME_QM_GLBL_CFG1 */
+#define MME_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define MME_QM_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define MME_QM_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define MME_QM_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define MME_QM_GLBL_CFG1_CP_STOP_SHIFT 2
+#define MME_QM_GLBL_CFG1_CP_STOP_MASK 0x4
+#define MME_QM_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define MME_QM_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define MME_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define MME_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define MME_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define MME_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define MME_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define MME_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define MME_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define MME_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* MME_QM_GLBL_PROT */
+#define MME_QM_GLBL_PROT_PQF_PROT_SHIFT 0
+#define MME_QM_GLBL_PROT_PQF_PROT_MASK 0x1
+#define MME_QM_GLBL_PROT_CQF_PROT_SHIFT 1
+#define MME_QM_GLBL_PROT_CQF_PROT_MASK 0x2
+#define MME_QM_GLBL_PROT_CP_PROT_SHIFT 2
+#define MME_QM_GLBL_PROT_CP_PROT_MASK 0x4
+#define MME_QM_GLBL_PROT_DMA_PROT_SHIFT 3
+#define MME_QM_GLBL_PROT_DMA_PROT_MASK 0x8
+#define MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define MME_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define MME_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define MME_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define MME_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* MME_QM_GLBL_ERR_CFG */
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* MME_QM_GLBL_ERR_ADDR_LO */
+#define MME_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_GLBL_ERR_ADDR_HI */
+#define MME_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_GLBL_ERR_WDATA */
+#define MME_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define MME_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_GLBL_SECURE_PROPS */
+#define MME_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define MME_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_QM_GLBL_NON_SECURE_PROPS */
+#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* MME_QM_GLBL_STS0 */
+#define MME_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define MME_QM_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define MME_QM_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define MME_QM_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define MME_QM_GLBL_STS0_CP_IDLE_SHIFT 2
+#define MME_QM_GLBL_STS0_CP_IDLE_MASK 0x4
+#define MME_QM_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define MME_QM_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define MME_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define MME_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define MME_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define MME_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define MME_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define MME_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define MME_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define MME_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* MME_QM_GLBL_STS1 */
+#define MME_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define MME_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define MME_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define MME_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define MME_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define MME_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define MME_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define MME_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define MME_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define MME_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define MME_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* MME_QM_PQ_BASE_LO */
+#define MME_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define MME_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_BASE_HI */
+#define MME_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define MME_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_SIZE */
+#define MME_QM_PQ_SIZE_VAL_SHIFT 0
+#define MME_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PI */
+#define MME_QM_PQ_PI_VAL_SHIFT 0
+#define MME_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_CI */
+#define MME_QM_PQ_CI_VAL_SHIFT 0
+#define MME_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_CFG0 */
+#define MME_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define MME_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* MME_QM_PQ_CFG1 */
+#define MME_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME_QM_PQ_ARUSER */
+#define MME_QM_PQ_ARUSER_NOSNOOP_SHIFT 0
+#define MME_QM_PQ_ARUSER_NOSNOOP_MASK 0x1
+#define MME_QM_PQ_ARUSER_WORD_SHIFT 1
+#define MME_QM_PQ_ARUSER_WORD_MASK 0x2
+
+/* MME_QM_PQ_PUSH0 */
+#define MME_QM_PQ_PUSH0_PTR_LO_SHIFT 0
+#define MME_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PUSH1 */
+#define MME_QM_PQ_PUSH1_PTR_HI_SHIFT 0
+#define MME_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PUSH2 */
+#define MME_QM_PQ_PUSH2_TSIZE_SHIFT 0
+#define MME_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_PUSH3 */
+#define MME_QM_PQ_PUSH3_RPT_SHIFT 0
+#define MME_QM_PQ_PUSH3_RPT_MASK 0xFFFF
+#define MME_QM_PQ_PUSH3_CTL_SHIFT 16
+#define MME_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000
+
+/* MME_QM_PQ_STS0 */
+#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define MME_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define MME_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME_QM_PQ_STS1 */
+#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define MME_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define MME_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* MME_QM_PQ_RD_RATE_LIM_EN */
+#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* MME_QM_PQ_RD_RATE_LIM_RST_TOKEN */
+#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME_QM_PQ_RD_RATE_LIM_SAT */
+#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* MME_QM_PQ_RD_RATE_LIM_TOUT */
+#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* MME_QM_CQ_CFG0 */
+#define MME_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define MME_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* MME_QM_CQ_CFG1 */
+#define MME_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME_QM_CQ_ARUSER */
+#define MME_QM_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define MME_QM_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define MME_QM_CQ_ARUSER_WORD_SHIFT 1
+#define MME_QM_CQ_ARUSER_WORD_MASK 0x2
+
+/* MME_QM_CQ_PTR_LO */
+#define MME_QM_CQ_PTR_LO_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_PTR_HI */
+#define MME_QM_CQ_PTR_HI_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_TSIZE */
+#define MME_QM_CQ_TSIZE_VAL_SHIFT 0
+#define MME_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_CTL */
+#define MME_QM_CQ_CTL_RPT_SHIFT 0
+#define MME_QM_CQ_CTL_RPT_MASK 0xFFFF
+#define MME_QM_CQ_CTL_CTL_SHIFT 16
+#define MME_QM_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* MME_QM_CQ_PTR_LO_STS */
+#define MME_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_PTR_HI_STS */
+#define MME_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define MME_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_TSIZE_STS */
+#define MME_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define MME_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_CTL_STS */
+#define MME_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define MME_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define MME_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define MME_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* MME_QM_CQ_STS0 */
+#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define MME_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define MME_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME_QM_CQ_STS1 */
+#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define MME_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define MME_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* MME_QM_CQ_RD_RATE_LIM_EN */
+#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* MME_QM_CQ_RD_RATE_LIM_RST_TOKEN */
+#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* MME_QM_CQ_RD_RATE_LIM_SAT */
+#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* MME_QM_CQ_RD_RATE_LIM_TOUT */
+#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* MME_QM_CQ_IFIFO_CNT */
+#define MME_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define MME_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* MME_QM_CP_MSG_BASE0_ADDR_LO */
+#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE0_ADDR_HI */
+#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE1_ADDR_LO */
+#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE1_ADDR_HI */
+#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE2_ADDR_LO */
+#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE2_ADDR_HI */
+#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE3_ADDR_LO */
+#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_MSG_BASE3_ADDR_HI */
+#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_TSIZE_OFFSET */
+#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_DST_BASE_HI_OFFSET */
+#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_LDMA_COMMIT_OFFSET */
+#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_FENCE0_RDATA */
+#define MME_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE1_RDATA */
+#define MME_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE2_RDATA */
+#define MME_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE3_RDATA */
+#define MME_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define MME_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* MME_QM_CP_FENCE0_CNT */
+#define MME_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_FENCE1_CNT */
+#define MME_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_FENCE2_CNT */
+#define MME_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_FENCE3_CNT */
+#define MME_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define MME_QM_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* MME_QM_CP_STS */
+#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define MME_QM_CP_STS_ERDY_SHIFT 16
+#define MME_QM_CP_STS_ERDY_MASK 0x10000
+#define MME_QM_CP_STS_RRDY_SHIFT 17
+#define MME_QM_CP_STS_RRDY_MASK 0x20000
+#define MME_QM_CP_STS_MRDY_SHIFT 18
+#define MME_QM_CP_STS_MRDY_MASK 0x40000
+#define MME_QM_CP_STS_SW_STOP_SHIFT 19
+#define MME_QM_CP_STS_SW_STOP_MASK 0x80000
+#define MME_QM_CP_STS_FENCE_ID_SHIFT 20
+#define MME_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define MME_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define MME_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* MME_QM_CP_CURRENT_INST_LO */
+#define MME_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define MME_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_CURRENT_INST_HI */
+#define MME_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define MME_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CP_BARRIER_CFG */
+#define MME_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define MME_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* MME_QM_CP_DBG_0 */
+#define MME_QM_CP_DBG_0_VAL_SHIFT 0
+#define MME_QM_CP_DBG_0_VAL_MASK 0xFF
+
+/* MME_QM_PQ_BUF_ADDR */
+#define MME_QM_PQ_BUF_ADDR_VAL_SHIFT 0
+#define MME_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_PQ_BUF_RDATA */
+#define MME_QM_PQ_BUF_RDATA_VAL_SHIFT 0
+#define MME_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_BUF_ADDR */
+#define MME_QM_CQ_BUF_ADDR_VAL_SHIFT 0
+#define MME_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* MME_QM_CQ_BUF_RDATA */
+#define MME_QM_CQ_BUF_RDATA_VAL_SHIFT 0
+#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_MME_QM_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
new file mode 100644
index 000000000000..b5b1c776f6c3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_QM_REGS_H_
+#define ASIC_REG_MME_QM_REGS_H_
+
+/*
+ *****************************************
+ * MME_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmMME_QM_GLBL_CFG0 0xD8000
+
+#define mmMME_QM_GLBL_CFG1 0xD8004
+
+#define mmMME_QM_GLBL_PROT 0xD8008
+
+#define mmMME_QM_GLBL_ERR_CFG 0xD800C
+
+#define mmMME_QM_GLBL_ERR_ADDR_LO 0xD8010
+
+#define mmMME_QM_GLBL_ERR_ADDR_HI 0xD8014
+
+#define mmMME_QM_GLBL_ERR_WDATA 0xD8018
+
+#define mmMME_QM_GLBL_SECURE_PROPS 0xD801C
+
+#define mmMME_QM_GLBL_NON_SECURE_PROPS 0xD8020
+
+#define mmMME_QM_GLBL_STS0 0xD8024
+
+#define mmMME_QM_GLBL_STS1 0xD8028
+
+#define mmMME_QM_PQ_BASE_LO 0xD8060
+
+#define mmMME_QM_PQ_BASE_HI 0xD8064
+
+#define mmMME_QM_PQ_SIZE 0xD8068
+
+#define mmMME_QM_PQ_PI 0xD806C
+
+#define mmMME_QM_PQ_CI 0xD8070
+
+#define mmMME_QM_PQ_CFG0 0xD8074
+
+#define mmMME_QM_PQ_CFG1 0xD8078
+
+#define mmMME_QM_PQ_ARUSER 0xD807C
+
+#define mmMME_QM_PQ_PUSH0 0xD8080
+
+#define mmMME_QM_PQ_PUSH1 0xD8084
+
+#define mmMME_QM_PQ_PUSH2 0xD8088
+
+#define mmMME_QM_PQ_PUSH3 0xD808C
+
+#define mmMME_QM_PQ_STS0 0xD8090
+
+#define mmMME_QM_PQ_STS1 0xD8094
+
+#define mmMME_QM_PQ_RD_RATE_LIM_EN 0xD80A0
+
+#define mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xD80A4
+
+#define mmMME_QM_PQ_RD_RATE_LIM_SAT 0xD80A8
+
+#define mmMME_QM_PQ_RD_RATE_LIM_TOUT 0xD80AC
+
+#define mmMME_QM_CQ_CFG0 0xD80B0
+
+#define mmMME_QM_CQ_CFG1 0xD80B4
+
+#define mmMME_QM_CQ_ARUSER 0xD80B8
+
+#define mmMME_QM_CQ_PTR_LO 0xD80C0
+
+#define mmMME_QM_CQ_PTR_HI 0xD80C4
+
+#define mmMME_QM_CQ_TSIZE 0xD80C8
+
+#define mmMME_QM_CQ_CTL 0xD80CC
+
+#define mmMME_QM_CQ_PTR_LO_STS 0xD80D4
+
+#define mmMME_QM_CQ_PTR_HI_STS 0xD80D8
+
+#define mmMME_QM_CQ_TSIZE_STS 0xD80DC
+
+#define mmMME_QM_CQ_CTL_STS 0xD80E0
+
+#define mmMME_QM_CQ_STS0 0xD80E4
+
+#define mmMME_QM_CQ_STS1 0xD80E8
+
+#define mmMME_QM_CQ_RD_RATE_LIM_EN 0xD80F0
+
+#define mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xD80F4
+
+#define mmMME_QM_CQ_RD_RATE_LIM_SAT 0xD80F8
+
+#define mmMME_QM_CQ_RD_RATE_LIM_TOUT 0xD80FC
+
+#define mmMME_QM_CQ_IFIFO_CNT 0xD8108
+
+#define mmMME_QM_CP_MSG_BASE0_ADDR_LO 0xD8120
+
+#define mmMME_QM_CP_MSG_BASE0_ADDR_HI 0xD8124
+
+#define mmMME_QM_CP_MSG_BASE1_ADDR_LO 0xD8128
+
+#define mmMME_QM_CP_MSG_BASE1_ADDR_HI 0xD812C
+
+#define mmMME_QM_CP_MSG_BASE2_ADDR_LO 0xD8130
+
+#define mmMME_QM_CP_MSG_BASE2_ADDR_HI 0xD8134
+
+#define mmMME_QM_CP_MSG_BASE3_ADDR_LO 0xD8138
+
+#define mmMME_QM_CP_MSG_BASE3_ADDR_HI 0xD813C
+
+#define mmMME_QM_CP_LDMA_TSIZE_OFFSET 0xD8140
+
+#define mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xD8144
+
+#define mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xD8148
+
+#define mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xD814C
+
+#define mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xD8150
+
+#define mmMME_QM_CP_LDMA_COMMIT_OFFSET 0xD8154
+
+#define mmMME_QM_CP_FENCE0_RDATA 0xD8158
+
+#define mmMME_QM_CP_FENCE1_RDATA 0xD815C
+
+#define mmMME_QM_CP_FENCE2_RDATA 0xD8160
+
+#define mmMME_QM_CP_FENCE3_RDATA 0xD8164
+
+#define mmMME_QM_CP_FENCE0_CNT 0xD8168
+
+#define mmMME_QM_CP_FENCE1_CNT 0xD816C
+
+#define mmMME_QM_CP_FENCE2_CNT 0xD8170
+
+#define mmMME_QM_CP_FENCE3_CNT 0xD8174
+
+#define mmMME_QM_CP_STS 0xD8178
+
+#define mmMME_QM_CP_CURRENT_INST_LO 0xD817C
+
+#define mmMME_QM_CP_CURRENT_INST_HI 0xD8180
+
+#define mmMME_QM_CP_BARRIER_CFG 0xD8184
+
+#define mmMME_QM_CP_DBG_0 0xD8188
+
+#define mmMME_QM_PQ_BUF_ADDR 0xD8300
+
+#define mmMME_QM_PQ_BUF_RDATA 0xD8304
+
+#define mmMME_QM_CQ_BUF_ADDR 0xD8308
+
+#define mmMME_QM_CQ_BUF_RDATA 0xD830C
+
+#endif /* ASIC_REG_MME_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
new file mode 100644
index 000000000000..9436b1e2705a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
@@ -0,0 +1,1153 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME_REGS_H_
+#define ASIC_REG_MME_REGS_H_
+
+/*
+ *****************************************
+ * MME (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME_ARCH_STATUS 0xD0000
+
+#define mmMME_ARCH_A_BASE_ADDR_HIGH 0xD0008
+
+#define mmMME_ARCH_B_BASE_ADDR_HIGH 0xD000C
+
+#define mmMME_ARCH_CIN_BASE_ADDR_HIGH 0xD0010
+
+#define mmMME_ARCH_COUT_BASE_ADDR_HIGH 0xD0014
+
+#define mmMME_ARCH_BIAS_BASE_ADDR_HIGH 0xD0018
+
+#define mmMME_ARCH_A_BASE_ADDR_LOW 0xD001C
+
+#define mmMME_ARCH_B_BASE_ADDR_LOW 0xD0020
+
+#define mmMME_ARCH_CIN_BASE_ADDR_LOW 0xD0024
+
+#define mmMME_ARCH_COUT_BASE_ADDR_LOW 0xD0028
+
+#define mmMME_ARCH_BIAS_BASE_ADDR_LOW 0xD002C
+
+#define mmMME_ARCH_HEADER 0xD0030
+
+#define mmMME_ARCH_KERNEL_SIZE_MINUS_1 0xD0034
+
+#define mmMME_ARCH_ASSOCIATED_DIMS_0 0xD0038
+
+#define mmMME_ARCH_ASSOCIATED_DIMS_1 0xD003C
+
+#define mmMME_ARCH_COUT_SCALE 0xD0040
+
+#define mmMME_ARCH_CIN_SCALE 0xD0044
+
+#define mmMME_ARCH_GEMMLOWP_ZP 0xD0048
+
+#define mmMME_ARCH_GEMMLOWP_EXPONENT 0xD004C
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_0 0xD0050
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_1 0xD0054
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_2 0xD0058
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_3 0xD005C
+
+#define mmMME_ARCH_A_ROI_BASE_OFFSET_4 0xD0060
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_0 0xD0064
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_1 0xD0068
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_2 0xD006C
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_3 0xD0070
+
+#define mmMME_ARCH_A_VALID_ELEMENTS_4 0xD0074
+
+#define mmMME_ARCH_A_LOOP_STRIDE_0 0xD0078
+
+#define mmMME_ARCH_A_LOOP_STRIDE_1 0xD007C
+
+#define mmMME_ARCH_A_LOOP_STRIDE_2 0xD0080
+
+#define mmMME_ARCH_A_LOOP_STRIDE_3 0xD0084
+
+#define mmMME_ARCH_A_LOOP_STRIDE_4 0xD0088
+
+#define mmMME_ARCH_A_ROI_SIZE_0 0xD008C
+
+#define mmMME_ARCH_A_ROI_SIZE_1 0xD0090
+
+#define mmMME_ARCH_A_ROI_SIZE_2 0xD0094
+
+#define mmMME_ARCH_A_ROI_SIZE_3 0xD0098
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_0 0xD009C
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_1 0xD00A0
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_2 0xD00A4
+
+#define mmMME_ARCH_A_SPATIAL_START_OFFSET_3 0xD00A8
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_0 0xD00AC
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_1 0xD00B0
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_2 0xD00B4
+
+#define mmMME_ARCH_A_SPATIAL_STRIDE_3 0xD00B8
+
+#define mmMME_ARCH_A_SPATIAL_SIZE_MINUS_1 0xD00BC
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_0 0xD00C0
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_1 0xD00C4
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_2 0xD00C8
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_3 0xD00CC
+
+#define mmMME_ARCH_B_ROI_BASE_OFFSET_4 0xD00D0
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_0 0xD00D4
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_1 0xD00D8
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_2 0xD00DC
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_3 0xD00E0
+
+#define mmMME_ARCH_B_VALID_ELEMENTS_4 0xD00E4
+
+#define mmMME_ARCH_B_LOOP_STRIDE_0 0xD00E8
+
+#define mmMME_ARCH_B_LOOP_STRIDE_1 0xD00EC
+
+#define mmMME_ARCH_B_LOOP_STRIDE_2 0xD00F0
+
+#define mmMME_ARCH_B_LOOP_STRIDE_3 0xD00F4
+
+#define mmMME_ARCH_B_LOOP_STRIDE_4 0xD00F8
+
+#define mmMME_ARCH_B_ROI_SIZE_0 0xD00FC
+
+#define mmMME_ARCH_B_ROI_SIZE_1 0xD0100
+
+#define mmMME_ARCH_B_ROI_SIZE_2 0xD0104
+
+#define mmMME_ARCH_B_ROI_SIZE_3 0xD0108
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_0 0xD010C
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_1 0xD0110
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_2 0xD0114
+
+#define mmMME_ARCH_B_SPATIAL_START_OFFSET_3 0xD0118
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_0 0xD011C
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_1 0xD0120
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_2 0xD0124
+
+#define mmMME_ARCH_B_SPATIAL_STRIDE_3 0xD0128
+
+#define mmMME_ARCH_B_SPATIAL_SIZE_MINUS_1 0xD012C
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_0 0xD0130
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_1 0xD0134
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_2 0xD0138
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_3 0xD013C
+
+#define mmMME_ARCH_C_ROI_BASE_OFFSET_4 0xD0140
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_0 0xD0144
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_1 0xD0148
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_2 0xD014C
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_3 0xD0150
+
+#define mmMME_ARCH_C_VALID_ELEMENTS_4 0xD0154
+
+#define mmMME_ARCH_C_LOOP_STRIDE_0 0xD0158
+
+#define mmMME_ARCH_C_LOOP_STRIDE_1 0xD015C
+
+#define mmMME_ARCH_C_LOOP_STRIDE_2 0xD0160
+
+#define mmMME_ARCH_C_LOOP_STRIDE_3 0xD0164
+
+#define mmMME_ARCH_C_LOOP_STRIDE_4 0xD0168
+
+#define mmMME_ARCH_C_ROI_SIZE_0 0xD016C
+
+#define mmMME_ARCH_C_ROI_SIZE_1 0xD0170
+
+#define mmMME_ARCH_C_ROI_SIZE_2 0xD0174
+
+#define mmMME_ARCH_C_ROI_SIZE_3 0xD0178
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_0 0xD017C
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_1 0xD0180
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_2 0xD0184
+
+#define mmMME_ARCH_C_SPATIAL_START_OFFSET_3 0xD0188
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_0 0xD018C
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_1 0xD0190
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_2 0xD0194
+
+#define mmMME_ARCH_C_SPATIAL_STRIDE_3 0xD0198
+
+#define mmMME_ARCH_C_SPATIAL_SIZE_MINUS_1 0xD019C
+
+#define mmMME_ARCH_SYNC_OBJECT_MESSAGE 0xD01A0
+
+#define mmMME_ARCH_E_PADDING_VALUE_A 0xD01A4
+
+#define mmMME_ARCH_E_NUM_ITERATION_MINUS_1 0xD01A8
+
+#define mmMME_ARCH_E_BUBBLES_PER_SPLIT 0xD01AC
+
+#define mmMME_CMD 0xD0200
+
+#define mmMME_DUMMY 0xD0204
+
+#define mmMME_RESET 0xD0208
+
+#define mmMME_STALL 0xD020C
+
+#define mmMME_SM_BASE_ADDRESS_LOW 0xD0210
+
+#define mmMME_SM_BASE_ADDRESS_HIGH 0xD0214
+
+#define mmMME_DBGMEM_ADD 0xD0218
+
+#define mmMME_DBGMEM_DATA_WR 0xD021C
+
+#define mmMME_DBGMEM_DATA_RD 0xD0220
+
+#define mmMME_DBGMEM_CTRL 0xD0224
+
+#define mmMME_DBGMEM_RC 0xD0228
+
+#define mmMME_LOG_SHADOW 0xD022C
+
+#define mmMME_STORE_MAX_CREDIT 0xD0300
+
+#define mmMME_AGU 0xD0304
+
+#define mmMME_SBA 0xD0308
+
+#define mmMME_SBB 0xD030C
+
+#define mmMME_SBC 0xD0310
+
+#define mmMME_WBC 0xD0314
+
+#define mmMME_SBA_CONTROL_DATA 0xD0318
+
+#define mmMME_SBB_CONTROL_DATA 0xD031C
+
+#define mmMME_SBC_CONTROL_DATA 0xD0320
+
+#define mmMME_WBC_CONTROL_DATA 0xD0324
+
+#define mmMME_TE 0xD0328
+
+#define mmMME_TE2DEC 0xD032C
+
+#define mmMME_REI_STATUS 0xD0330
+
+#define mmMME_REI_MASK 0xD0334
+
+#define mmMME_SEI_STATUS 0xD0338
+
+#define mmMME_SEI_MASK 0xD033C
+
+#define mmMME_SPI_STATUS 0xD0340
+
+#define mmMME_SPI_MASK 0xD0344
+
+#define mmMME_SHADOW_0_STATUS 0xD0400
+
+#define mmMME_SHADOW_0_A_BASE_ADDR_HIGH 0xD0408
+
+#define mmMME_SHADOW_0_B_BASE_ADDR_HIGH 0xD040C
+
+#define mmMME_SHADOW_0_CIN_BASE_ADDR_HIGH 0xD0410
+
+#define mmMME_SHADOW_0_COUT_BASE_ADDR_HIGH 0xD0414
+
+#define mmMME_SHADOW_0_BIAS_BASE_ADDR_HIGH 0xD0418
+
+#define mmMME_SHADOW_0_A_BASE_ADDR_LOW 0xD041C
+
+#define mmMME_SHADOW_0_B_BASE_ADDR_LOW 0xD0420
+
+#define mmMME_SHADOW_0_CIN_BASE_ADDR_LOW 0xD0424
+
+#define mmMME_SHADOW_0_COUT_BASE_ADDR_LOW 0xD0428
+
+#define mmMME_SHADOW_0_BIAS_BASE_ADDR_LOW 0xD042C
+
+#define mmMME_SHADOW_0_HEADER 0xD0430
+
+#define mmMME_SHADOW_0_KERNEL_SIZE_MINUS_1 0xD0434
+
+#define mmMME_SHADOW_0_ASSOCIATED_DIMS_0 0xD0438
+
+#define mmMME_SHADOW_0_ASSOCIATED_DIMS_1 0xD043C
+
+#define mmMME_SHADOW_0_COUT_SCALE 0xD0440
+
+#define mmMME_SHADOW_0_CIN_SCALE 0xD0444
+
+#define mmMME_SHADOW_0_GEMMLOWP_ZP 0xD0448
+
+#define mmMME_SHADOW_0_GEMMLOWP_EXPONENT 0xD044C
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_0 0xD0450
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_1 0xD0454
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_2 0xD0458
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_3 0xD045C
+
+#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_4 0xD0460
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_0 0xD0464
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_1 0xD0468
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_2 0xD046C
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_3 0xD0470
+
+#define mmMME_SHADOW_0_A_VALID_ELEMENTS_4 0xD0474
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_0 0xD0478
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_1 0xD047C
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_2 0xD0480
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_3 0xD0484
+
+#define mmMME_SHADOW_0_A_LOOP_STRIDE_4 0xD0488
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_0 0xD048C
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_1 0xD0490
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_2 0xD0494
+
+#define mmMME_SHADOW_0_A_ROI_SIZE_3 0xD0498
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_0 0xD049C
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_1 0xD04A0
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_2 0xD04A4
+
+#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_3 0xD04A8
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_0 0xD04AC
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_1 0xD04B0
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_2 0xD04B4
+
+#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_3 0xD04B8
+
+#define mmMME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 0xD04BC
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_0 0xD04C0
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_1 0xD04C4
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_2 0xD04C8
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_3 0xD04CC
+
+#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_4 0xD04D0
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_0 0xD04D4
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_1 0xD04D8
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_2 0xD04DC
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_3 0xD04E0
+
+#define mmMME_SHADOW_0_B_VALID_ELEMENTS_4 0xD04E4
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_0 0xD04E8
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_1 0xD04EC
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_2 0xD04F0
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_3 0xD04F4
+
+#define mmMME_SHADOW_0_B_LOOP_STRIDE_4 0xD04F8
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_0 0xD04FC
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_1 0xD0500
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_2 0xD0504
+
+#define mmMME_SHADOW_0_B_ROI_SIZE_3 0xD0508
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_0 0xD050C
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_1 0xD0510
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_2 0xD0514
+
+#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_3 0xD0518
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_0 0xD051C
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_1 0xD0520
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_2 0xD0524
+
+#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_3 0xD0528
+
+#define mmMME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 0xD052C
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_0 0xD0530
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_1 0xD0534
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_2 0xD0538
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_3 0xD053C
+
+#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_4 0xD0540
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_0 0xD0544
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_1 0xD0548
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_2 0xD054C
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_3 0xD0550
+
+#define mmMME_SHADOW_0_C_VALID_ELEMENTS_4 0xD0554
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_0 0xD0558
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_1 0xD055C
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_2 0xD0560
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_3 0xD0564
+
+#define mmMME_SHADOW_0_C_LOOP_STRIDE_4 0xD0568
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_0 0xD056C
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_1 0xD0570
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_2 0xD0574
+
+#define mmMME_SHADOW_0_C_ROI_SIZE_3 0xD0578
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_0 0xD057C
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_1 0xD0580
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_2 0xD0584
+
+#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_3 0xD0588
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_0 0xD058C
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_1 0xD0590
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_2 0xD0594
+
+#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_3 0xD0598
+
+#define mmMME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 0xD059C
+
+#define mmMME_SHADOW_0_SYNC_OBJECT_MESSAGE 0xD05A0
+
+#define mmMME_SHADOW_0_E_PADDING_VALUE_A 0xD05A4
+
+#define mmMME_SHADOW_0_E_NUM_ITERATION_MINUS_1 0xD05A8
+
+#define mmMME_SHADOW_0_E_BUBBLES_PER_SPLIT 0xD05AC
+
+#define mmMME_SHADOW_1_STATUS 0xD0600
+
+#define mmMME_SHADOW_1_A_BASE_ADDR_HIGH 0xD0608
+
+#define mmMME_SHADOW_1_B_BASE_ADDR_HIGH 0xD060C
+
+#define mmMME_SHADOW_1_CIN_BASE_ADDR_HIGH 0xD0610
+
+#define mmMME_SHADOW_1_COUT_BASE_ADDR_HIGH 0xD0614
+
+#define mmMME_SHADOW_1_BIAS_BASE_ADDR_HIGH 0xD0618
+
+#define mmMME_SHADOW_1_A_BASE_ADDR_LOW 0xD061C
+
+#define mmMME_SHADOW_1_B_BASE_ADDR_LOW 0xD0620
+
+#define mmMME_SHADOW_1_CIN_BASE_ADDR_LOW 0xD0624
+
+#define mmMME_SHADOW_1_COUT_BASE_ADDR_LOW 0xD0628
+
+#define mmMME_SHADOW_1_BIAS_BASE_ADDR_LOW 0xD062C
+
+#define mmMME_SHADOW_1_HEADER 0xD0630
+
+#define mmMME_SHADOW_1_KERNEL_SIZE_MINUS_1 0xD0634
+
+#define mmMME_SHADOW_1_ASSOCIATED_DIMS_0 0xD0638
+
+#define mmMME_SHADOW_1_ASSOCIATED_DIMS_1 0xD063C
+
+#define mmMME_SHADOW_1_COUT_SCALE 0xD0640
+
+#define mmMME_SHADOW_1_CIN_SCALE 0xD0644
+
+#define mmMME_SHADOW_1_GEMMLOWP_ZP 0xD0648
+
+#define mmMME_SHADOW_1_GEMMLOWP_EXPONENT 0xD064C
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_0 0xD0650
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_1 0xD0654
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_2 0xD0658
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_3 0xD065C
+
+#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_4 0xD0660
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_0 0xD0664
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_1 0xD0668
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_2 0xD066C
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_3 0xD0670
+
+#define mmMME_SHADOW_1_A_VALID_ELEMENTS_4 0xD0674
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_0 0xD0678
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_1 0xD067C
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_2 0xD0680
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_3 0xD0684
+
+#define mmMME_SHADOW_1_A_LOOP_STRIDE_4 0xD0688
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_0 0xD068C
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_1 0xD0690
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_2 0xD0694
+
+#define mmMME_SHADOW_1_A_ROI_SIZE_3 0xD0698
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_0 0xD069C
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_1 0xD06A0
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_2 0xD06A4
+
+#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_3 0xD06A8
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_0 0xD06AC
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_1 0xD06B0
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_2 0xD06B4
+
+#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_3 0xD06B8
+
+#define mmMME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 0xD06BC
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_0 0xD06C0
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_1 0xD06C4
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_2 0xD06C8
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_3 0xD06CC
+
+#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_4 0xD06D0
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_0 0xD06D4
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_1 0xD06D8
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_2 0xD06DC
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_3 0xD06E0
+
+#define mmMME_SHADOW_1_B_VALID_ELEMENTS_4 0xD06E4
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_0 0xD06E8
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_1 0xD06EC
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_2 0xD06F0
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_3 0xD06F4
+
+#define mmMME_SHADOW_1_B_LOOP_STRIDE_4 0xD06F8
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_0 0xD06FC
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_1 0xD0700
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_2 0xD0704
+
+#define mmMME_SHADOW_1_B_ROI_SIZE_3 0xD0708
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_0 0xD070C
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_1 0xD0710
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_2 0xD0714
+
+#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_3 0xD0718
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_0 0xD071C
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_1 0xD0720
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_2 0xD0724
+
+#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_3 0xD0728
+
+#define mmMME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 0xD072C
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_0 0xD0730
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_1 0xD0734
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_2 0xD0738
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_3 0xD073C
+
+#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_4 0xD0740
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_0 0xD0744
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_1 0xD0748
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_2 0xD074C
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_3 0xD0750
+
+#define mmMME_SHADOW_1_C_VALID_ELEMENTS_4 0xD0754
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_0 0xD0758
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_1 0xD075C
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_2 0xD0760
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_3 0xD0764
+
+#define mmMME_SHADOW_1_C_LOOP_STRIDE_4 0xD0768
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_0 0xD076C
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_1 0xD0770
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_2 0xD0774
+
+#define mmMME_SHADOW_1_C_ROI_SIZE_3 0xD0778
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_0 0xD077C
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_1 0xD0780
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_2 0xD0784
+
+#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_3 0xD0788
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_0 0xD078C
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_1 0xD0790
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_2 0xD0794
+
+#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_3 0xD0798
+
+#define mmMME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 0xD079C
+
+#define mmMME_SHADOW_1_SYNC_OBJECT_MESSAGE 0xD07A0
+
+#define mmMME_SHADOW_1_E_PADDING_VALUE_A 0xD07A4
+
+#define mmMME_SHADOW_1_E_NUM_ITERATION_MINUS_1 0xD07A8
+
+#define mmMME_SHADOW_1_E_BUBBLES_PER_SPLIT 0xD07AC
+
+#define mmMME_SHADOW_2_STATUS 0xD0800
+
+#define mmMME_SHADOW_2_A_BASE_ADDR_HIGH 0xD0808
+
+#define mmMME_SHADOW_2_B_BASE_ADDR_HIGH 0xD080C
+
+#define mmMME_SHADOW_2_CIN_BASE_ADDR_HIGH 0xD0810
+
+#define mmMME_SHADOW_2_COUT_BASE_ADDR_HIGH 0xD0814
+
+#define mmMME_SHADOW_2_BIAS_BASE_ADDR_HIGH 0xD0818
+
+#define mmMME_SHADOW_2_A_BASE_ADDR_LOW 0xD081C
+
+#define mmMME_SHADOW_2_B_BASE_ADDR_LOW 0xD0820
+
+#define mmMME_SHADOW_2_CIN_BASE_ADDR_LOW 0xD0824
+
+#define mmMME_SHADOW_2_COUT_BASE_ADDR_LOW 0xD0828
+
+#define mmMME_SHADOW_2_BIAS_BASE_ADDR_LOW 0xD082C
+
+#define mmMME_SHADOW_2_HEADER 0xD0830
+
+#define mmMME_SHADOW_2_KERNEL_SIZE_MINUS_1 0xD0834
+
+#define mmMME_SHADOW_2_ASSOCIATED_DIMS_0 0xD0838
+
+#define mmMME_SHADOW_2_ASSOCIATED_DIMS_1 0xD083C
+
+#define mmMME_SHADOW_2_COUT_SCALE 0xD0840
+
+#define mmMME_SHADOW_2_CIN_SCALE 0xD0844
+
+#define mmMME_SHADOW_2_GEMMLOWP_ZP 0xD0848
+
+#define mmMME_SHADOW_2_GEMMLOWP_EXPONENT 0xD084C
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_0 0xD0850
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_1 0xD0854
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_2 0xD0858
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_3 0xD085C
+
+#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_4 0xD0860
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_0 0xD0864
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_1 0xD0868
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_2 0xD086C
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_3 0xD0870
+
+#define mmMME_SHADOW_2_A_VALID_ELEMENTS_4 0xD0874
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_0 0xD0878
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_1 0xD087C
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_2 0xD0880
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_3 0xD0884
+
+#define mmMME_SHADOW_2_A_LOOP_STRIDE_4 0xD0888
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_0 0xD088C
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_1 0xD0890
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_2 0xD0894
+
+#define mmMME_SHADOW_2_A_ROI_SIZE_3 0xD0898
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_0 0xD089C
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_1 0xD08A0
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_2 0xD08A4
+
+#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_3 0xD08A8
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_0 0xD08AC
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_1 0xD08B0
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_2 0xD08B4
+
+#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_3 0xD08B8
+
+#define mmMME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 0xD08BC
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_0 0xD08C0
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_1 0xD08C4
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_2 0xD08C8
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_3 0xD08CC
+
+#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_4 0xD08D0
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_0 0xD08D4
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_1 0xD08D8
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_2 0xD08DC
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_3 0xD08E0
+
+#define mmMME_SHADOW_2_B_VALID_ELEMENTS_4 0xD08E4
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_0 0xD08E8
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_1 0xD08EC
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_2 0xD08F0
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_3 0xD08F4
+
+#define mmMME_SHADOW_2_B_LOOP_STRIDE_4 0xD08F8
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_0 0xD08FC
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_1 0xD0900
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_2 0xD0904
+
+#define mmMME_SHADOW_2_B_ROI_SIZE_3 0xD0908
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_0 0xD090C
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_1 0xD0910
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_2 0xD0914
+
+#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_3 0xD0918
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_0 0xD091C
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_1 0xD0920
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_2 0xD0924
+
+#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_3 0xD0928
+
+#define mmMME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 0xD092C
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_0 0xD0930
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_1 0xD0934
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_2 0xD0938
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_3 0xD093C
+
+#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_4 0xD0940
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_0 0xD0944
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_1 0xD0948
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_2 0xD094C
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_3 0xD0950
+
+#define mmMME_SHADOW_2_C_VALID_ELEMENTS_4 0xD0954
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_0 0xD0958
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_1 0xD095C
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_2 0xD0960
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_3 0xD0964
+
+#define mmMME_SHADOW_2_C_LOOP_STRIDE_4 0xD0968
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_0 0xD096C
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_1 0xD0970
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_2 0xD0974
+
+#define mmMME_SHADOW_2_C_ROI_SIZE_3 0xD0978
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_0 0xD097C
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_1 0xD0980
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_2 0xD0984
+
+#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_3 0xD0988
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_0 0xD098C
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_1 0xD0990
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_2 0xD0994
+
+#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_3 0xD0998
+
+#define mmMME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 0xD099C
+
+#define mmMME_SHADOW_2_SYNC_OBJECT_MESSAGE 0xD09A0
+
+#define mmMME_SHADOW_2_E_PADDING_VALUE_A 0xD09A4
+
+#define mmMME_SHADOW_2_E_NUM_ITERATION_MINUS_1 0xD09A8
+
+#define mmMME_SHADOW_2_E_BUBBLES_PER_SPLIT 0xD09AC
+
+#define mmMME_SHADOW_3_STATUS 0xD0A00
+
+#define mmMME_SHADOW_3_A_BASE_ADDR_HIGH 0xD0A08
+
+#define mmMME_SHADOW_3_B_BASE_ADDR_HIGH 0xD0A0C
+
+#define mmMME_SHADOW_3_CIN_BASE_ADDR_HIGH 0xD0A10
+
+#define mmMME_SHADOW_3_COUT_BASE_ADDR_HIGH 0xD0A14
+
+#define mmMME_SHADOW_3_BIAS_BASE_ADDR_HIGH 0xD0A18
+
+#define mmMME_SHADOW_3_A_BASE_ADDR_LOW 0xD0A1C
+
+#define mmMME_SHADOW_3_B_BASE_ADDR_LOW 0xD0A20
+
+#define mmMME_SHADOW_3_CIN_BASE_ADDR_LOW 0xD0A24
+
+#define mmMME_SHADOW_3_COUT_BASE_ADDR_LOW 0xD0A28
+
+#define mmMME_SHADOW_3_BIAS_BASE_ADDR_LOW 0xD0A2C
+
+#define mmMME_SHADOW_3_HEADER 0xD0A30
+
+#define mmMME_SHADOW_3_KERNEL_SIZE_MINUS_1 0xD0A34
+
+#define mmMME_SHADOW_3_ASSOCIATED_DIMS_0 0xD0A38
+
+#define mmMME_SHADOW_3_ASSOCIATED_DIMS_1 0xD0A3C
+
+#define mmMME_SHADOW_3_COUT_SCALE 0xD0A40
+
+#define mmMME_SHADOW_3_CIN_SCALE 0xD0A44
+
+#define mmMME_SHADOW_3_GEMMLOWP_ZP 0xD0A48
+
+#define mmMME_SHADOW_3_GEMMLOWP_EXPONENT 0xD0A4C
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_0 0xD0A50
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_1 0xD0A54
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_2 0xD0A58
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_3 0xD0A5C
+
+#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_4 0xD0A60
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_0 0xD0A64
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_1 0xD0A68
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_2 0xD0A6C
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_3 0xD0A70
+
+#define mmMME_SHADOW_3_A_VALID_ELEMENTS_4 0xD0A74
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_0 0xD0A78
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_1 0xD0A7C
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_2 0xD0A80
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_3 0xD0A84
+
+#define mmMME_SHADOW_3_A_LOOP_STRIDE_4 0xD0A88
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_0 0xD0A8C
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_1 0xD0A90
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_2 0xD0A94
+
+#define mmMME_SHADOW_3_A_ROI_SIZE_3 0xD0A98
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_0 0xD0A9C
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_1 0xD0AA0
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_2 0xD0AA4
+
+#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_3 0xD0AA8
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_0 0xD0AAC
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_1 0xD0AB0
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_2 0xD0AB4
+
+#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_3 0xD0AB8
+
+#define mmMME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 0xD0ABC
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_0 0xD0AC0
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_1 0xD0AC4
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_2 0xD0AC8
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_3 0xD0ACC
+
+#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_4 0xD0AD0
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_0 0xD0AD4
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_1 0xD0AD8
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_2 0xD0ADC
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_3 0xD0AE0
+
+#define mmMME_SHADOW_3_B_VALID_ELEMENTS_4 0xD0AE4
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_0 0xD0AE8
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_1 0xD0AEC
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_2 0xD0AF0
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_3 0xD0AF4
+
+#define mmMME_SHADOW_3_B_LOOP_STRIDE_4 0xD0AF8
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_0 0xD0AFC
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_1 0xD0B00
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_2 0xD0B04
+
+#define mmMME_SHADOW_3_B_ROI_SIZE_3 0xD0B08
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_0 0xD0B0C
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_1 0xD0B10
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_2 0xD0B14
+
+#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_3 0xD0B18
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_0 0xD0B1C
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_1 0xD0B20
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_2 0xD0B24
+
+#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_3 0xD0B28
+
+#define mmMME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 0xD0B2C
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_0 0xD0B30
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_1 0xD0B34
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_2 0xD0B38
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_3 0xD0B3C
+
+#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_4 0xD0B40
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_0 0xD0B44
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_1 0xD0B48
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_2 0xD0B4C
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_3 0xD0B50
+
+#define mmMME_SHADOW_3_C_VALID_ELEMENTS_4 0xD0B54
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_0 0xD0B58
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_1 0xD0B5C
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_2 0xD0B60
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_3 0xD0B64
+
+#define mmMME_SHADOW_3_C_LOOP_STRIDE_4 0xD0B68
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_0 0xD0B6C
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_1 0xD0B70
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_2 0xD0B74
+
+#define mmMME_SHADOW_3_C_ROI_SIZE_3 0xD0B78
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_0 0xD0B7C
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_1 0xD0B80
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_2 0xD0B84
+
+#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_3 0xD0B88
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_0 0xD0B8C
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_1 0xD0B90
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_2 0xD0B94
+
+#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_3 0xD0B98
+
+#define mmMME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 0xD0B9C
+
+#define mmMME_SHADOW_3_SYNC_OBJECT_MESSAGE 0xD0BA0
+
+#define mmMME_SHADOW_3_E_PADDING_VALUE_A 0xD0BA4
+
+#define mmMME_SHADOW_3_E_NUM_ITERATION_MINUS_1 0xD0BA8
+
+#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC
+
+#endif /* ASIC_REG_MME_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
new file mode 100644
index 000000000000..3a78078d3c4c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MMU_MASKS_H_
+#define ASIC_REG_MMU_MASKS_H_
+
+/*
+ *****************************************
+ * MMU (Prototype: MMU)
+ *****************************************
+ */
+
+/* MMU_INPUT_FIFO_THRESHOLD */
+#define MMU_INPUT_FIFO_THRESHOLD_PCI_SHIFT 0
+#define MMU_INPUT_FIFO_THRESHOLD_PCI_MASK 0x7
+#define MMU_INPUT_FIFO_THRESHOLD_PSOC_SHIFT 4
+#define MMU_INPUT_FIFO_THRESHOLD_PSOC_MASK 0x70
+#define MMU_INPUT_FIFO_THRESHOLD_DMA_SHIFT 8
+#define MMU_INPUT_FIFO_THRESHOLD_DMA_MASK 0x700
+#define MMU_INPUT_FIFO_THRESHOLD_CPU_SHIFT 12
+#define MMU_INPUT_FIFO_THRESHOLD_CPU_MASK 0x7000
+#define MMU_INPUT_FIFO_THRESHOLD_MME_SHIFT 16
+#define MMU_INPUT_FIFO_THRESHOLD_MME_MASK 0x70000
+#define MMU_INPUT_FIFO_THRESHOLD_TPC_SHIFT 20
+#define MMU_INPUT_FIFO_THRESHOLD_TPC_MASK 0x700000
+#define MMU_INPUT_FIFO_THRESHOLD_OTHER_SHIFT 24
+#define MMU_INPUT_FIFO_THRESHOLD_OTHER_MASK 0x7000000
+
+/* MMU_MMU_ENABLE */
+#define MMU_MMU_ENABLE_R_SHIFT 0
+#define MMU_MMU_ENABLE_R_MASK 0x1
+
+/* MMU_FORCE_ORDERING */
+#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_SHIFT 0
+#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_MASK 0x1
+#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_SHIFT 1
+#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_MASK 0x2
+#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_SHIFT 2
+#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_MASK 0x4
+#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_SHIFT 3
+#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_MASK 0x8
+#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_SHIFT 4
+#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_MASK 0x10
+#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_SHIFT 5
+#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_MASK 0x20
+#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_SHIFT 6
+#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_MASK 0x40
+#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_SHIFT 8
+#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_MASK 0x100
+#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_SHIFT 9
+#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_MASK 0x200
+#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_SHIFT 10
+#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_MASK 0x400
+#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_SHIFT 11
+#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_MASK 0x800
+#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_SHIFT 12
+#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_MASK 0x1000
+#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_SHIFT 13
+#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_MASK 0x2000
+#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_SHIFT 14
+#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_MASK 0x4000
+
+/* MMU_FEATURE_ENABLE */
+#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_SHIFT 0
+#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_MASK 0x1
+#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_SHIFT 1
+#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_MASK 0x2
+#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_SHIFT 2
+#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_MASK 0x4
+#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_SHIFT 3
+#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_MASK 0x8
+#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_SHIFT 4
+#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_MASK 0x10
+#define MMU_FEATURE_ENABLE_TRACE_ENABLE_SHIFT 5
+#define MMU_FEATURE_ENABLE_TRACE_ENABLE_MASK 0x20
+
+/* MMU_VA_ORDERING_MASK_31_7 */
+#define MMU_VA_ORDERING_MASK_31_7_R_SHIFT 0
+#define MMU_VA_ORDERING_MASK_31_7_R_MASK 0x1FFFFFF
+
+/* MMU_VA_ORDERING_MASK_49_32 */
+#define MMU_VA_ORDERING_MASK_49_32_R_SHIFT 0
+#define MMU_VA_ORDERING_MASK_49_32_R_MASK 0x3FFFF
+
+/* MMU_LOG2_DDR_SIZE */
+#define MMU_LOG2_DDR_SIZE_R_SHIFT 0
+#define MMU_LOG2_DDR_SIZE_R_MASK 0xFF
+
+/* MMU_SCRAMBLER */
+#define MMU_SCRAMBLER_ADDR_BIT_SHIFT 0
+#define MMU_SCRAMBLER_ADDR_BIT_MASK 0x3F
+#define MMU_SCRAMBLER_SINGLE_DDR_EN_SHIFT 6
+#define MMU_SCRAMBLER_SINGLE_DDR_EN_MASK 0x40
+#define MMU_SCRAMBLER_SINGLE_DDR_ID_SHIFT 7
+#define MMU_SCRAMBLER_SINGLE_DDR_ID_MASK 0x80
+
+/* MMU_MEM_INIT_BUSY */
+#define MMU_MEM_INIT_BUSY_DATA_SHIFT 0
+#define MMU_MEM_INIT_BUSY_DATA_MASK 0x3
+#define MMU_MEM_INIT_BUSY_OBI0_SHIFT 2
+#define MMU_MEM_INIT_BUSY_OBI0_MASK 0x4
+#define MMU_MEM_INIT_BUSY_OBI1_SHIFT 3
+#define MMU_MEM_INIT_BUSY_OBI1_MASK 0x8
+
+/* MMU_SPI_MASK */
+#define MMU_SPI_MASK_R_SHIFT 0
+#define MMU_SPI_MASK_R_MASK 0xFF
+
+/* MMU_SPI_CAUSE */
+#define MMU_SPI_CAUSE_R_SHIFT 0
+#define MMU_SPI_CAUSE_R_MASK 0xFF
+
+/* MMU_PAGE_ERROR_CAPTURE */
+#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_SHIFT 0
+#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18
+#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+/* MMU_PAGE_ERROR_CAPTURE_VA */
+#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
+#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
+
+/* MMU_ACCESS_ERROR_CAPTURE */
+#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_SHIFT 0
+#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18
+#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+/* MMU_ACCESS_ERROR_CAPTURE_VA */
+#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0
+#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_MMU_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
new file mode 100644
index 000000000000..bec6c014135c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MMU_REGS_H_
+#define ASIC_REG_MMU_REGS_H_
+
+/*
+ *****************************************
+ * MMU (Prototype: MMU)
+ *****************************************
+ */
+
+#define mmMMU_INPUT_FIFO_THRESHOLD 0x480000
+
+#define mmMMU_MMU_ENABLE 0x48000C
+
+#define mmMMU_FORCE_ORDERING 0x480010
+
+#define mmMMU_FEATURE_ENABLE 0x480014
+
+#define mmMMU_VA_ORDERING_MASK_31_7 0x480018
+
+#define mmMMU_VA_ORDERING_MASK_49_32 0x48001C
+
+#define mmMMU_LOG2_DDR_SIZE 0x480020
+
+#define mmMMU_SCRAMBLER 0x480024
+
+#define mmMMU_MEM_INIT_BUSY 0x480028
+
+#define mmMMU_SPI_MASK 0x48002C
+
+#define mmMMU_SPI_CAUSE 0x480030
+
+#define mmMMU_PAGE_ERROR_CAPTURE 0x480034
+
+#define mmMMU_PAGE_ERROR_CAPTURE_VA 0x480038
+
+#define mmMMU_ACCESS_ERROR_CAPTURE 0x48003C
+
+#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040
+
+#endif /* ASIC_REG_MMU_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
new file mode 100644
index 000000000000..209e41402a11
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCI_NRTR_MASKS_H_
+#define ASIC_REG_PCI_NRTR_MASKS_H_
+
+/*
+ *****************************************
+ * PCI_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+/* PCI_NRTR_HBW_MAX_CRED */
+#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
+#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define PCI_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
+#define PCI_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
+#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define PCI_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
+#define PCI_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* PCI_NRTR_LBW_MAX_CRED */
+#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
+#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define PCI_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
+#define PCI_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
+#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define PCI_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
+#define PCI_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* PCI_NRTR_DBG_E_ARB */
+#define PCI_NRTR_DBG_E_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_E_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_E_ARB_S_SHIFT 8
+#define PCI_NRTR_DBG_E_ARB_S_MASK 0x700
+#define PCI_NRTR_DBG_E_ARB_N_SHIFT 16
+#define PCI_NRTR_DBG_E_ARB_N_MASK 0x70000
+#define PCI_NRTR_DBG_E_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_W_ARB */
+#define PCI_NRTR_DBG_W_ARB_E_SHIFT 0
+#define PCI_NRTR_DBG_W_ARB_E_MASK 0x7
+#define PCI_NRTR_DBG_W_ARB_S_SHIFT 8
+#define PCI_NRTR_DBG_W_ARB_S_MASK 0x700
+#define PCI_NRTR_DBG_W_ARB_N_SHIFT 16
+#define PCI_NRTR_DBG_W_ARB_N_MASK 0x70000
+#define PCI_NRTR_DBG_W_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_N_ARB */
+#define PCI_NRTR_DBG_N_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_N_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_N_ARB_E_SHIFT 8
+#define PCI_NRTR_DBG_N_ARB_E_MASK 0x700
+#define PCI_NRTR_DBG_N_ARB_S_SHIFT 16
+#define PCI_NRTR_DBG_N_ARB_S_MASK 0x70000
+#define PCI_NRTR_DBG_N_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_S_ARB */
+#define PCI_NRTR_DBG_S_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_S_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_S_ARB_E_SHIFT 8
+#define PCI_NRTR_DBG_S_ARB_E_MASK 0x700
+#define PCI_NRTR_DBG_S_ARB_N_SHIFT 16
+#define PCI_NRTR_DBG_S_ARB_N_MASK 0x70000
+#define PCI_NRTR_DBG_S_ARB_L_SHIFT 24
+#define PCI_NRTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* PCI_NRTR_DBG_L_ARB */
+#define PCI_NRTR_DBG_L_ARB_W_SHIFT 0
+#define PCI_NRTR_DBG_L_ARB_W_MASK 0x7
+#define PCI_NRTR_DBG_L_ARB_E_SHIFT 8
+#define PCI_NRTR_DBG_L_ARB_E_MASK 0x700
+#define PCI_NRTR_DBG_L_ARB_S_SHIFT 16
+#define PCI_NRTR_DBG_L_ARB_S_MASK 0x70000
+#define PCI_NRTR_DBG_L_ARB_N_SHIFT 24
+#define PCI_NRTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* PCI_NRTR_DBG_E_ARB_MAX */
+#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_W_ARB_MAX */
+#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_N_ARB_MAX */
+#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_S_ARB_MAX */
+#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_DBG_L_ARB_MAX */
+#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* PCI_NRTR_SPLIT_COEF */
+#define PCI_NRTR_SPLIT_COEF_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_CFG */
+#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
+#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
+#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
+#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
+#define PCI_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define PCI_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* PCI_NRTR_SPLIT_RD_SAT */
+#define PCI_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_RD_RST_TOKEN */
+#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_RD_TIMEOUT */
+#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_SPLIT_WR_SAT */
+#define PCI_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_WPLIT_WR_TST_TOLEN */
+#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* PCI_NRTR_SPLIT_WR_TIMEOUT */
+#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_HBW_RANGE_HIT */
+#define PCI_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* PCI_NRTR_HBW_RANGE_MASK_L */
+#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_HBW_RANGE_MASK_H */
+#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* PCI_NRTR_HBW_RANGE_BASE_L */
+#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* PCI_NRTR_HBW_RANGE_BASE_H */
+#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* PCI_NRTR_LBW_RANGE_HIT */
+#define PCI_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define PCI_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* PCI_NRTR_LBW_RANGE_MASK */
+#define PCI_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define PCI_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* PCI_NRTR_LBW_RANGE_BASE */
+#define PCI_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define PCI_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* PCI_NRTR_RGLTR */
+#define PCI_NRTR_RGLTR_WR_EN_SHIFT 0
+#define PCI_NRTR_RGLTR_WR_EN_MASK 0x1
+#define PCI_NRTR_RGLTR_RD_EN_SHIFT 4
+#define PCI_NRTR_RGLTR_RD_EN_MASK 0x10
+
+/* PCI_NRTR_RGLTR_WR_RESULT */
+#define PCI_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define PCI_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* PCI_NRTR_RGLTR_RD_RESULT */
+#define PCI_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define PCI_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* PCI_NRTR_SCRAMB_EN */
+#define PCI_NRTR_SCRAMB_EN_VAL_SHIFT 0
+#define PCI_NRTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* PCI_NRTR_NON_LIN_SCRAMB */
+#define PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
new file mode 100644
index 000000000000..447e5d4e7dc8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCI_NRTR_REGS_H_
+#define ASIC_REG_PCI_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * PCI_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmPCI_NRTR_HBW_MAX_CRED 0x100
+
+#define mmPCI_NRTR_LBW_MAX_CRED 0x120
+
+#define mmPCI_NRTR_DBG_E_ARB 0x300
+
+#define mmPCI_NRTR_DBG_W_ARB 0x304
+
+#define mmPCI_NRTR_DBG_N_ARB 0x308
+
+#define mmPCI_NRTR_DBG_S_ARB 0x30C
+
+#define mmPCI_NRTR_DBG_L_ARB 0x310
+
+#define mmPCI_NRTR_DBG_E_ARB_MAX 0x320
+
+#define mmPCI_NRTR_DBG_W_ARB_MAX 0x324
+
+#define mmPCI_NRTR_DBG_N_ARB_MAX 0x328
+
+#define mmPCI_NRTR_DBG_S_ARB_MAX 0x32C
+
+#define mmPCI_NRTR_DBG_L_ARB_MAX 0x330
+
+#define mmPCI_NRTR_SPLIT_COEF_0 0x400
+
+#define mmPCI_NRTR_SPLIT_COEF_1 0x404
+
+#define mmPCI_NRTR_SPLIT_COEF_2 0x408
+
+#define mmPCI_NRTR_SPLIT_COEF_3 0x40C
+
+#define mmPCI_NRTR_SPLIT_COEF_4 0x410
+
+#define mmPCI_NRTR_SPLIT_COEF_5 0x414
+
+#define mmPCI_NRTR_SPLIT_COEF_6 0x418
+
+#define mmPCI_NRTR_SPLIT_COEF_7 0x41C
+
+#define mmPCI_NRTR_SPLIT_COEF_8 0x420
+
+#define mmPCI_NRTR_SPLIT_COEF_9 0x424
+
+#define mmPCI_NRTR_SPLIT_CFG 0x440
+
+#define mmPCI_NRTR_SPLIT_RD_SAT 0x444
+
+#define mmPCI_NRTR_SPLIT_RD_RST_TOKEN 0x448
+
+#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_0 0x44C
+
+#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_1 0x450
+
+#define mmPCI_NRTR_SPLIT_WR_SAT 0x454
+
+#define mmPCI_NRTR_WPLIT_WR_TST_TOLEN 0x458
+
+#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_0 0x45C
+
+#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_1 0x460
+
+#define mmPCI_NRTR_HBW_RANGE_HIT 0x470
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_0 0x480
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_1 0x484
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_2 0x488
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_3 0x48C
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_4 0x490
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_5 0x494
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_6 0x498
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_L_7 0x49C
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_0 0x4A0
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_1 0x4A4
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_2 0x4A8
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_3 0x4AC
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_4 0x4B0
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_5 0x4B4
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_6 0x4B8
+
+#define mmPCI_NRTR_HBW_RANGE_MASK_H_7 0x4BC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_0 0x4C0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_1 0x4C4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_2 0x4C8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_3 0x4CC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_4 0x4D0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_5 0x4D4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_6 0x4D8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_L_7 0x4DC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_0 0x4E0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_1 0x4E4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_2 0x4E8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_3 0x4EC
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_4 0x4F0
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_5 0x4F4
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_6 0x4F8
+
+#define mmPCI_NRTR_HBW_RANGE_BASE_H_7 0x4FC
+
+#define mmPCI_NRTR_LBW_RANGE_HIT 0x500
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_0 0x510
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_1 0x514
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_2 0x518
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_3 0x51C
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_4 0x520
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_5 0x524
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_6 0x528
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_7 0x52C
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_8 0x530
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_9 0x534
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_10 0x538
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_11 0x53C
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_12 0x540
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_13 0x544
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_14 0x548
+
+#define mmPCI_NRTR_LBW_RANGE_MASK_15 0x54C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_0 0x550
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_1 0x554
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_2 0x558
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_3 0x55C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_4 0x560
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_5 0x564
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_6 0x568
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_7 0x56C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_8 0x570
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_9 0x574
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_10 0x578
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_11 0x57C
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_12 0x580
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_13 0x584
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_14 0x588
+
+#define mmPCI_NRTR_LBW_RANGE_BASE_15 0x58C
+
+#define mmPCI_NRTR_RGLTR 0x590
+
+#define mmPCI_NRTR_RGLTR_WR_RESULT 0x594
+
+#define mmPCI_NRTR_RGLTR_RD_RESULT 0x598
+
+#define mmPCI_NRTR_SCRAMB_EN 0x600
+
+#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604
+
+#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
new file mode 100644
index 000000000000..daaf5d9079dc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_AUX_REGS_H_
+#define ASIC_REG_PCIE_AUX_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_AUX (Prototype: PCIE_AUX)
+ *****************************************
+ */
+
+#define mmPCIE_AUX_APB_TIMEOUT 0xC07004
+
+#define mmPCIE_AUX_PHY_INIT 0xC07100
+
+#define mmPCIE_AUX_LTR_MAX_LATENCY 0xC07138
+
+#define mmPCIE_AUX_BAR0_START_L 0xC07160
+
+#define mmPCIE_AUX_BAR0_START_H 0xC07164
+
+#define mmPCIE_AUX_BAR1_START 0xC07168
+
+#define mmPCIE_AUX_BAR2_START_L 0xC0716C
+
+#define mmPCIE_AUX_BAR2_START_H 0xC07170
+
+#define mmPCIE_AUX_BAR3_START 0xC07174
+
+#define mmPCIE_AUX_BAR4_START_L 0xC07178
+
+#define mmPCIE_AUX_BAR4_START_H 0xC0717C
+
+#define mmPCIE_AUX_BAR5_START 0xC07180
+
+#define mmPCIE_AUX_BAR0_LIMIT_L 0xC07184
+
+#define mmPCIE_AUX_BAR0_LIMIT_H 0xC07188
+
+#define mmPCIE_AUX_BAR1_LIMIT 0xC0718C
+
+#define mmPCIE_AUX_BAR2_LIMIT_L 0xC07190
+
+#define mmPCIE_AUX_BAR2_LIMIT_H 0xC07194
+
+#define mmPCIE_AUX_BAR3_LIMIT 0xC07198
+
+#define mmPCIE_AUX_BAR4_LIMIT_L 0xC0719C
+
+#define mmPCIE_AUX_BAR4_LIMIT_H 0xC07200
+
+#define mmPCIE_AUX_BAR5_LIMIT 0xC07204
+
+#define mmPCIE_AUX_BUS_MASTER_EN 0xC07208
+
+#define mmPCIE_AUX_MEM_SPACE_EN 0xC0720C
+
+#define mmPCIE_AUX_MAX_RD_REQ_SIZE 0xC07210
+
+#define mmPCIE_AUX_MAX_PAYLOAD_SIZE 0xC07214
+
+#define mmPCIE_AUX_EXT_TAG_EN 0xC07218
+
+#define mmPCIE_AUX_RCB 0xC0721C
+
+#define mmPCIE_AUX_PM_NO_SOFT_RST 0xC07220
+
+#define mmPCIE_AUX_PBUS_NUM 0xC07224
+
+#define mmPCIE_AUX_PBUS_DEV_NUM 0xC07228
+
+#define mmPCIE_AUX_NO_SNOOP_EN 0xC0722C
+
+#define mmPCIE_AUX_RELAX_ORDER_EN 0xC07230
+
+#define mmPCIE_AUX_HP_SLOT_CTRL_ACCESS 0xC07234
+
+#define mmPCIE_AUX_DLL_STATE_CHGED_EN 0xC07238
+
+#define mmPCIE_AUX_CMP_CPLED_INT_EN 0xC0723C
+
+#define mmPCIE_AUX_HP_INT_EN 0xC07340
+
+#define mmPCIE_AUX_PRE_DET_CHGEN_EN 0xC07344
+
+#define mmPCIE_AUX_MRL_SENSOR_CHGED_EN 0xC07348
+
+#define mmPCIE_AUX_PWR_FAULT_DET_EN 0xC0734C
+
+#define mmPCIE_AUX_ATTEN_BUTTON_PRESSED_EN 0xC07350
+
+#define mmPCIE_AUX_PF_FLR_ACTIVE 0xC07360
+
+#define mmPCIE_AUX_PF_FLR_DONE 0xC07364
+
+#define mmPCIE_AUX_FLR_INT 0xC07390
+
+#define mmPCIE_AUX_LTR_M_EN 0xC073B0
+
+#define mmPCIE_AUX_LTSSM_EN 0xC07428
+
+#define mmPCIE_AUX_SYS_INTR 0xC07440
+
+#define mmPCIE_AUX_INT_DISABLE 0xC07444
+
+#define mmPCIE_AUX_SMLH_LINK_UP 0xC07448
+
+#define mmPCIE_AUX_PM_CURR_STATE 0xC07450
+
+#define mmPCIE_AUX_RDLH_LINK_UP 0xC07458
+
+#define mmPCIE_AUX_BRDG_SLV_XFER_PENDING 0xC0745C
+
+#define mmPCIE_AUX_BRDG_DBI_XFER_PENDING 0xC07460
+
+#define mmPCIE_AUX_AUTO_SP_DIS 0xC07478
+
+#define mmPCIE_AUX_DBI 0xC07490
+
+#define mmPCIE_AUX_DBI_32 0xC07494
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_0 0xC074A4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_1 0xC074A8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_2 0xC074AC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_3 0xC074B0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_4 0xC074B4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_5 0xC074B8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_6 0xC074BC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_7 0xC074C0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_8 0xC074C4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_9 0xC074C8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_10 0xC074CC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_11 0xC074D0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_12 0xC074D4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_13 0xC074D8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_14 0xC074DC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_15 0xC074E0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_16 0xC074E4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_17 0xC074E8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_18 0xC074EC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_19 0xC074F0
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_20 0xC074F4
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_21 0xC074F8
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_22 0xC074FC
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_23 0xC07500
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_24 0xC07504
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_25 0xC07508
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_26 0xC0750C
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_27 0xC07510
+
+#define mmPCIE_AUX_DIAG_STATUS_BUS_28 0xC07514
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_0 0xC07640
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_1 0xC07644
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_2 0xC07648
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_3 0xC0764C
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_4 0xC07650
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_5 0xC07654
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_6 0xC07658
+
+#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_7 0xC0765C
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_0 0xC07744
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_1 0xC07748
+
+#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_2 0xC0774C
+
+#define mmPCIE_AUX_APP_RAS_DES_TBA_CTRL 0xC07774
+
+#define mmPCIE_AUX_PM_DSTATE 0xC07840
+
+#define mmPCIE_AUX_PM_PME_EN 0xC07844
+
+#define mmPCIE_AUX_PM_LINKST_IN_L0S 0xC07848
+
+#define mmPCIE_AUX_PM_LINKST_IN_L1 0xC0784C
+
+#define mmPCIE_AUX_PM_LINKST_IN_L2 0xC07850
+
+#define mmPCIE_AUX_PM_LINKST_L2_EXIT 0xC07854
+
+#define mmPCIE_AUX_PM_STATUS 0xC07858
+
+#define mmPCIE_AUX_APP_READY_ENTER_L23 0xC0785C
+
+#define mmPCIE_AUX_APP_XFER_PENDING 0xC07860
+
+#define mmPCIE_AUX_APP_REQ_L1 0xC07930
+
+#define mmPCIE_AUX_AUX_PM_EN 0xC07934
+
+#define mmPCIE_AUX_APPS_PM_XMT_PME 0xC07938
+
+#define mmPCIE_AUX_OUTBAND_PWRUP_CMD 0xC07940
+
+#define mmPCIE_AUX_PERST 0xC079B8
+
+#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
new file mode 100644
index 000000000000..8eda4de58788
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_EMMC_PLL_REGS_H_
+#define ASIC_REG_PSOC_EMMC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_EMMC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_EMMC_PLL_NR 0xC70100
+
+#define mmPSOC_EMMC_PLL_NF 0xC70104
+
+#define mmPSOC_EMMC_PLL_OD 0xC70108
+
+#define mmPSOC_EMMC_PLL_NB 0xC7010C
+
+#define mmPSOC_EMMC_PLL_CFG 0xC70110
+
+#define mmPSOC_EMMC_PLL_LOSE_MASK 0xC70120
+
+#define mmPSOC_EMMC_PLL_LOCK_INTR 0xC70128
+
+#define mmPSOC_EMMC_PLL_LOCK_BYPASS 0xC7012C
+
+#define mmPSOC_EMMC_PLL_DATA_CHNG 0xC70130
+
+#define mmPSOC_EMMC_PLL_RST 0xC70134
+
+#define mmPSOC_EMMC_PLL_SLIP_WD_CNTR 0xC70150
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_0 0xC70200
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_1 0xC70204
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_2 0xC70208
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_3 0xC7020C
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_0 0xC70220
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_1 0xC70224
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_2 0xC70228
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_3 0xC7022C
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_0 0xC70280
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_1 0xC70284
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_2 0xC70288
+
+#define mmPSOC_EMMC_PLL_DIV_SEL_3 0xC7028C
+
+#define mmPSOC_EMMC_PLL_DIV_EN_0 0xC702A0
+
+#define mmPSOC_EMMC_PLL_DIV_EN_1 0xC702A4
+
+#define mmPSOC_EMMC_PLL_DIV_EN_2 0xC702A8
+
+#define mmPSOC_EMMC_PLL_DIV_EN_3 0xC702AC
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_0 0xC702C0
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_1 0xC702C4
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_2 0xC702C8
+
+#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_3 0xC702CC
+
+#define mmPSOC_EMMC_PLL_CLK_GATER 0xC70300
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_0 0xC70310
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_1 0xC70314
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_2 0xC70318
+
+#define mmPSOC_EMMC_PLL_CLK_RLX_3 0xC7031C
+
+#define mmPSOC_EMMC_PLL_REF_CNTR_PERIOD 0xC70400
+
+#define mmPSOC_EMMC_PLL_REF_LOW_THRESHOLD 0xC70410
+
+#define mmPSOC_EMMC_PLL_REF_HIGH_THRESHOLD 0xC70420
+
+#define mmPSOC_EMMC_PLL_PLL_NOT_STABLE 0xC70430
+
+#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440
+
+#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
new file mode 100644
index 000000000000..d4bf0e1db4df
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
@@ -0,0 +1,447 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_PCI_FW_FSM */
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BTM_FSM */
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BTM_FSM */
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_MEM_EN */
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN */
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_EN */
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SPI_IMG_STS */
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100
+
+/* PSOC_GLOBAL_CONF_SCRATCHPAD */
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SEMAPHORE */
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_WARM_REBOOT */
+#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_UBOOT_MAGIC */
+#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPL_SOURCE */
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000
+
+/* PSOC_GLOBAL_CONF_I2C_SLV */
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_APP_STATUS */
+#define PSOC_GLOBAL_CONF_APP_STATUS_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_APP_STATUS_IND_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_STS */
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00
+
+/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80
+
+/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_TARGETID */
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 12
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFFF000
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000
+
+/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_MII_ADDR */
+#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_MII_SPEED */
+#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x4
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 3
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x8
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0xFE0
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_SHIFT 12
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_MASK 0x3000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_SHIFT 14
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_MASK 0x1FC000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_SHIFT 21
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK 0x200000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_SHIFT 22
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_MASK 0x1C00000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_SHIFT 25
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_MASK 0x2000000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_SHIFT 26
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_MASK 0x1C000000
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MASK_REQ */
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG */
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_WD_RST_CFG */
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_MNL_RST_CFG */
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_MASK 0x1
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_SHIFT 1
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_MASK 0x2
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_SHIFT 2
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_MASK 0x1FC
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_SHIFT 9
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_MASK 0x200
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_SHIFT 10
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_MASK 0x400
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_SHIFT 11
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_MASK 0x800
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT 12
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_MASK 0x1000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_SHIFT 13
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_MASK 0x2000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_SHIFT 14
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_MASK 0x4000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_SHIFT 15
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_MASK 0x1F8000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_SHIFT 21
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_MASK 0x200000
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_SHIFT 22
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_MASK 0x400000
+
+/* PSOC_GLOBAL_CONF_PRSTN_MASK */
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_WD_MASK */
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RST_SRC */
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_BNK3V3_MS */
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_PAD_DEFAULT */
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_PAD_SEL */
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
new file mode 100644
index 000000000000..cfbdd2c9c5c7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
@@ -0,0 +1,745 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C
+
+#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024
+
+#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028
+
+#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030
+
+#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038
+
+#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040
+
+#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044
+
+#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048
+
+#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C
+
+#define mmPSOC_GLOBAL_CONF_WARM_REBOOT 0xC4B300
+
+#define mmPSOC_GLOBAL_CONF_UBOOT_MAGIC 0xC4B304
+
+#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308
+
+#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314
+
+#define mmPSOC_GLOBAL_CONF_APP_STATUS 0xC4B320
+
+#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340
+
+#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350
+
+#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354
+
+#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358
+
+#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C
+
+#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360
+
+#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400
+
+#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420
+
+#define mmPSOC_GLOBAL_CONF_MII_ADDR 0xC4B424
+
+#define mmPSOC_GLOBAL_CONF_MII_SPEED 0xC4B428
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454
+
+#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458
+
+#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG 0xC4B470
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG 0xC4B474
+
+#define mmPSOC_GLOBAL_CONF_WD_RST_CFG 0xC4B478
+
+#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG 0xC4B47C
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B480
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B484
+
+#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B488
+
+#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B490
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B640
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B644
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B648
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B64C
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B650
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B654
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B658
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B65C
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B660
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B664
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B668
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B66C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B680
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B684
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B688
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B68C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B690
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B694
+
+#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B6E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B700
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B704
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B708
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B70C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B710
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B714
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B718
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B71C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B720
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B724
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B728
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B72C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B730
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B734
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B738
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B73C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B740
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B744
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B748
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B74C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B750
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B754
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B758
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B75C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B760
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B764
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B768
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B76C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B770
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B774
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B778
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B77C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B780
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B784
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B788
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B78C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B790
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B794
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B798
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B79C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B7A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B7A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B7A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B7AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B7B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B7B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B7B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B7BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B7C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B7C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B7C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B7CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B7D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B7D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B7D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B7DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B7E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B7E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B7E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B7EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B7F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B7F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B7F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B7FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B800
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B804
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B808
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B80C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B810
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B814
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B818
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B81C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B820
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B824
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B828
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B82C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B830
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B834
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B838
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B83C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B840
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B844
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4B900
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4B904
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4B908
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4B90C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4B910
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4B914
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4B918
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4B91C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4B920
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4B924
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4B928
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4B92C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4B930
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4B934
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4B938
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4B93C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4B940
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4B944
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4B948
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4B94C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4B950
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4B954
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4B958
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4B95C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4B960
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4B964
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4B968
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4B96C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4B970
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4B974
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4B978
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4B97C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4B980
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4B984
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4B988
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4B98C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4B990
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4B994
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4B998
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4B99C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4B9A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4B9A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4B9A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4B9AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4B9B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4B9B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4B9B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4B9BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4B9C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4B9C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4B9C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4B9CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4B9D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4B9D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4B9D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4B9DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4B9E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4B9E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4B9E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4B9EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4B9F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4B9F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4B9F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4B9FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BA00
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BA04
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BA08
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BA0C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BA10
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BA14
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BA18
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BA1C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BA20
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BA24
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BA28
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BA2C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BA30
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BA34
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BA38
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BA3C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BA40
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
new file mode 100644
index 000000000000..6723d8f76f30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_MME_PLL_REGS_H_
+#define ASIC_REG_PSOC_MME_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_MME_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_MME_PLL_NR 0xC71100
+
+#define mmPSOC_MME_PLL_NF 0xC71104
+
+#define mmPSOC_MME_PLL_OD 0xC71108
+
+#define mmPSOC_MME_PLL_NB 0xC7110C
+
+#define mmPSOC_MME_PLL_CFG 0xC71110
+
+#define mmPSOC_MME_PLL_LOSE_MASK 0xC71120
+
+#define mmPSOC_MME_PLL_LOCK_INTR 0xC71128
+
+#define mmPSOC_MME_PLL_LOCK_BYPASS 0xC7112C
+
+#define mmPSOC_MME_PLL_DATA_CHNG 0xC71130
+
+#define mmPSOC_MME_PLL_RST 0xC71134
+
+#define mmPSOC_MME_PLL_SLIP_WD_CNTR 0xC71150
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_0 0xC71200
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_1 0xC71204
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_2 0xC71208
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_3 0xC7120C
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_0 0xC71220
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_1 0xC71224
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_2 0xC71228
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_3 0xC7122C
+
+#define mmPSOC_MME_PLL_DIV_SEL_0 0xC71280
+
+#define mmPSOC_MME_PLL_DIV_SEL_1 0xC71284
+
+#define mmPSOC_MME_PLL_DIV_SEL_2 0xC71288
+
+#define mmPSOC_MME_PLL_DIV_SEL_3 0xC7128C
+
+#define mmPSOC_MME_PLL_DIV_EN_0 0xC712A0
+
+#define mmPSOC_MME_PLL_DIV_EN_1 0xC712A4
+
+#define mmPSOC_MME_PLL_DIV_EN_2 0xC712A8
+
+#define mmPSOC_MME_PLL_DIV_EN_3 0xC712AC
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_0 0xC712C0
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_1 0xC712C4
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_2 0xC712C8
+
+#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_3 0xC712CC
+
+#define mmPSOC_MME_PLL_CLK_GATER 0xC71300
+
+#define mmPSOC_MME_PLL_CLK_RLX_0 0xC71310
+
+#define mmPSOC_MME_PLL_CLK_RLX_1 0xC71314
+
+#define mmPSOC_MME_PLL_CLK_RLX_2 0xC71318
+
+#define mmPSOC_MME_PLL_CLK_RLX_3 0xC7131C
+
+#define mmPSOC_MME_PLL_REF_CNTR_PERIOD 0xC71400
+
+#define mmPSOC_MME_PLL_REF_LOW_THRESHOLD 0xC71410
+
+#define mmPSOC_MME_PLL_REF_HIGH_THRESHOLD 0xC71420
+
+#define mmPSOC_MME_PLL_PLL_NOT_STABLE 0xC71430
+
+#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440
+
+#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
new file mode 100644
index 000000000000..abcded0531c9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_
+#define ASIC_REG_PSOC_PCI_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_PCI_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_PCI_PLL_NR 0xC72100
+
+#define mmPSOC_PCI_PLL_NF 0xC72104
+
+#define mmPSOC_PCI_PLL_OD 0xC72108
+
+#define mmPSOC_PCI_PLL_NB 0xC7210C
+
+#define mmPSOC_PCI_PLL_CFG 0xC72110
+
+#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120
+
+#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128
+
+#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C
+
+#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130
+
+#define mmPSOC_PCI_PLL_RST 0xC72134
+
+#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C
+
+#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280
+
+#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284
+
+#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288
+
+#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C
+
+#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0
+
+#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4
+
+#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8
+
+#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC
+
+#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300
+
+#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310
+
+#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314
+
+#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318
+
+#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C
+
+#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400
+
+#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410
+
+#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420
+
+#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430
+
+#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
+
+#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
new file mode 100644
index 000000000000..5925c7477c25
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_SPI_REGS_H_
+#define ASIC_REG_PSOC_SPI_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_SPI (Prototype: SPI)
+ *****************************************
+ */
+
+#define mmPSOC_SPI_CTRLR0 0xC43000
+
+#define mmPSOC_SPI_CTRLR1 0xC43004
+
+#define mmPSOC_SPI_SSIENR 0xC43008
+
+#define mmPSOC_SPI_MWCR 0xC4300C
+
+#define mmPSOC_SPI_SER 0xC43010
+
+#define mmPSOC_SPI_BAUDR 0xC43014
+
+#define mmPSOC_SPI_TXFTLR 0xC43018
+
+#define mmPSOC_SPI_RXFTLR 0xC4301C
+
+#define mmPSOC_SPI_TXFLR 0xC43020
+
+#define mmPSOC_SPI_RXFLR 0xC43024
+
+#define mmPSOC_SPI_SR 0xC43028
+
+#define mmPSOC_SPI_IMR 0xC4302C
+
+#define mmPSOC_SPI_ISR 0xC43030
+
+#define mmPSOC_SPI_RISR 0xC43034
+
+#define mmPSOC_SPI_TXOICR 0xC43038
+
+#define mmPSOC_SPI_RXOICR 0xC4303C
+
+#define mmPSOC_SPI_RXUICR 0xC43040
+
+#define mmPSOC_SPI_MSTICR 0xC43044
+
+#define mmPSOC_SPI_ICR 0xC43048
+
+#define mmPSOC_SPI_IDR 0xC43058
+
+#define mmPSOC_SPI_SSI_VERSION_ID 0xC4305C
+
+#define mmPSOC_SPI_DR0 0xC43060
+
+#define mmPSOC_SPI_DR1 0xC43064
+
+#define mmPSOC_SPI_DR2 0xC43068
+
+#define mmPSOC_SPI_DR3 0xC4306C
+
+#define mmPSOC_SPI_DR4 0xC43070
+
+#define mmPSOC_SPI_DR5 0xC43074
+
+#define mmPSOC_SPI_DR6 0xC43078
+
+#define mmPSOC_SPI_DR7 0xC4307C
+
+#define mmPSOC_SPI_DR8 0xC43080
+
+#define mmPSOC_SPI_DR9 0xC43084
+
+#define mmPSOC_SPI_DR10 0xC43088
+
+#define mmPSOC_SPI_DR11 0xC4308C
+
+#define mmPSOC_SPI_DR12 0xC43090
+
+#define mmPSOC_SPI_DR13 0xC43094
+
+#define mmPSOC_SPI_DR14 0xC43098
+
+#define mmPSOC_SPI_DR15 0xC4309C
+
+#define mmPSOC_SPI_DR16 0xC430A0
+
+#define mmPSOC_SPI_DR17 0xC430A4
+
+#define mmPSOC_SPI_DR18 0xC430A8
+
+#define mmPSOC_SPI_DR19 0xC430AC
+
+#define mmPSOC_SPI_DR20 0xC430B0
+
+#define mmPSOC_SPI_DR21 0xC430B4
+
+#define mmPSOC_SPI_DR22 0xC430B8
+
+#define mmPSOC_SPI_DR23 0xC430BC
+
+#define mmPSOC_SPI_DR24 0xC430C0
+
+#define mmPSOC_SPI_DR25 0xC430C4
+
+#define mmPSOC_SPI_DR26 0xC430C8
+
+#define mmPSOC_SPI_DR27 0xC430CC
+
+#define mmPSOC_SPI_DR28 0xC430D0
+
+#define mmPSOC_SPI_DR29 0xC430D4
+
+#define mmPSOC_SPI_DR30 0xC430D8
+
+#define mmPSOC_SPI_DR31 0xC430DC
+
+#define mmPSOC_SPI_DR32 0xC430E0
+
+#define mmPSOC_SPI_DR33 0xC430E4
+
+#define mmPSOC_SPI_DR34 0xC430E8
+
+#define mmPSOC_SPI_DR35 0xC430EC
+
+#define mmPSOC_SPI_RX_SAMPLE_DLY 0xC430F0
+
+#define mmPSOC_SPI_RSVD_1 0xC430F8
+
+#define mmPSOC_SPI_RSVD_2 0xC430FC
+
+#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
new file mode 100644
index 000000000000..d56c9fa0e7ba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X0_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_E_ARB 0x201100
+
+#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_W_ARB 0x201104
+
+#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB 0x201110
+
+#define mmSRAM_Y0_X0_RTR_HBW_E_ARB_MAX 0x201120
+
+#define mmSRAM_Y0_X0_RTR_HBW_W_ARB_MAX 0x201124
+
+#define mmSRAM_Y0_X0_RTR_HBW_L_ARB_MAX 0x201130
+
+#define mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB 0x201140
+
+#define mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB 0x201144
+
+#define mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB 0x201148
+
+#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB 0x201160
+
+#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB 0x201164
+
+#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_L_ARB 0x201168
+
+#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_E_ARB 0x201200
+
+#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_W_ARB 0x201204
+
+#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_L_ARB 0x201210
+
+#define mmSRAM_Y0_X0_RTR_LBW_E_ARB_MAX 0x201220
+
+#define mmSRAM_Y0_X0_RTR_LBW_W_ARB_MAX 0x201224
+
+#define mmSRAM_Y0_X0_RTR_LBW_L_ARB_MAX 0x201230
+
+#define mmSRAM_Y0_X0_RTR_LBW_DATA_E_ARB 0x201240
+
+#define mmSRAM_Y0_X0_RTR_LBW_DATA_W_ARB 0x201244
+
+#define mmSRAM_Y0_X0_RTR_LBW_DATA_L_ARB 0x201248
+
+#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_E_ARB 0x201260
+
+#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_W_ARB 0x201264
+
+#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_L_ARB 0x201268
+
+#define mmSRAM_Y0_X0_RTR_DBG_E_ARB 0x201300
+
+#define mmSRAM_Y0_X0_RTR_DBG_W_ARB 0x201304
+
+#define mmSRAM_Y0_X0_RTR_DBG_L_ARB 0x201310
+
+#define mmSRAM_Y0_X0_RTR_DBG_E_ARB_MAX 0x201320
+
+#define mmSRAM_Y0_X0_RTR_DBG_W_ARB_MAX 0x201324
+
+#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330
+
+#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
new file mode 100644
index 000000000000..5624544303ca
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X1_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_E_ARB 0x205100
+
+#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_W_ARB 0x205104
+
+#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB 0x205110
+
+#define mmSRAM_Y0_X1_RTR_HBW_E_ARB_MAX 0x205120
+
+#define mmSRAM_Y0_X1_RTR_HBW_W_ARB_MAX 0x205124
+
+#define mmSRAM_Y0_X1_RTR_HBW_L_ARB_MAX 0x205130
+
+#define mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB 0x205140
+
+#define mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB 0x205144
+
+#define mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB 0x205148
+
+#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB 0x205160
+
+#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB 0x205164
+
+#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_L_ARB 0x205168
+
+#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_E_ARB 0x205200
+
+#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_W_ARB 0x205204
+
+#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_L_ARB 0x205210
+
+#define mmSRAM_Y0_X1_RTR_LBW_E_ARB_MAX 0x205220
+
+#define mmSRAM_Y0_X1_RTR_LBW_W_ARB_MAX 0x205224
+
+#define mmSRAM_Y0_X1_RTR_LBW_L_ARB_MAX 0x205230
+
+#define mmSRAM_Y0_X1_RTR_LBW_DATA_E_ARB 0x205240
+
+#define mmSRAM_Y0_X1_RTR_LBW_DATA_W_ARB 0x205244
+
+#define mmSRAM_Y0_X1_RTR_LBW_DATA_L_ARB 0x205248
+
+#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_E_ARB 0x205260
+
+#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_W_ARB 0x205264
+
+#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_L_ARB 0x205268
+
+#define mmSRAM_Y0_X1_RTR_DBG_E_ARB 0x205300
+
+#define mmSRAM_Y0_X1_RTR_DBG_W_ARB 0x205304
+
+#define mmSRAM_Y0_X1_RTR_DBG_L_ARB 0x205310
+
+#define mmSRAM_Y0_X1_RTR_DBG_E_ARB_MAX 0x205320
+
+#define mmSRAM_Y0_X1_RTR_DBG_W_ARB_MAX 0x205324
+
+#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330
+
+#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
new file mode 100644
index 000000000000..3322bc0bd1df
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X2_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_E_ARB 0x209100
+
+#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_W_ARB 0x209104
+
+#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB 0x209110
+
+#define mmSRAM_Y0_X2_RTR_HBW_E_ARB_MAX 0x209120
+
+#define mmSRAM_Y0_X2_RTR_HBW_W_ARB_MAX 0x209124
+
+#define mmSRAM_Y0_X2_RTR_HBW_L_ARB_MAX 0x209130
+
+#define mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB 0x209140
+
+#define mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB 0x209144
+
+#define mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB 0x209148
+
+#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB 0x209160
+
+#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB 0x209164
+
+#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_L_ARB 0x209168
+
+#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_E_ARB 0x209200
+
+#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_W_ARB 0x209204
+
+#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_L_ARB 0x209210
+
+#define mmSRAM_Y0_X2_RTR_LBW_E_ARB_MAX 0x209220
+
+#define mmSRAM_Y0_X2_RTR_LBW_W_ARB_MAX 0x209224
+
+#define mmSRAM_Y0_X2_RTR_LBW_L_ARB_MAX 0x209230
+
+#define mmSRAM_Y0_X2_RTR_LBW_DATA_E_ARB 0x209240
+
+#define mmSRAM_Y0_X2_RTR_LBW_DATA_W_ARB 0x209244
+
+#define mmSRAM_Y0_X2_RTR_LBW_DATA_L_ARB 0x209248
+
+#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_E_ARB 0x209260
+
+#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_W_ARB 0x209264
+
+#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_L_ARB 0x209268
+
+#define mmSRAM_Y0_X2_RTR_DBG_E_ARB 0x209300
+
+#define mmSRAM_Y0_X2_RTR_DBG_W_ARB 0x209304
+
+#define mmSRAM_Y0_X2_RTR_DBG_L_ARB 0x209310
+
+#define mmSRAM_Y0_X2_RTR_DBG_E_ARB_MAX 0x209320
+
+#define mmSRAM_Y0_X2_RTR_DBG_W_ARB_MAX 0x209324
+
+#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330
+
+#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
new file mode 100644
index 000000000000..81e393db2027
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X3_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_E_ARB 0x20D100
+
+#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_W_ARB 0x20D104
+
+#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB 0x20D110
+
+#define mmSRAM_Y0_X3_RTR_HBW_E_ARB_MAX 0x20D120
+
+#define mmSRAM_Y0_X3_RTR_HBW_W_ARB_MAX 0x20D124
+
+#define mmSRAM_Y0_X3_RTR_HBW_L_ARB_MAX 0x20D130
+
+#define mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB 0x20D140
+
+#define mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB 0x20D144
+
+#define mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB 0x20D148
+
+#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB 0x20D160
+
+#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB 0x20D164
+
+#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_L_ARB 0x20D168
+
+#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_E_ARB 0x20D200
+
+#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_W_ARB 0x20D204
+
+#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_L_ARB 0x20D210
+
+#define mmSRAM_Y0_X3_RTR_LBW_E_ARB_MAX 0x20D220
+
+#define mmSRAM_Y0_X3_RTR_LBW_W_ARB_MAX 0x20D224
+
+#define mmSRAM_Y0_X3_RTR_LBW_L_ARB_MAX 0x20D230
+
+#define mmSRAM_Y0_X3_RTR_LBW_DATA_E_ARB 0x20D240
+
+#define mmSRAM_Y0_X3_RTR_LBW_DATA_W_ARB 0x20D244
+
+#define mmSRAM_Y0_X3_RTR_LBW_DATA_L_ARB 0x20D248
+
+#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_E_ARB 0x20D260
+
+#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_W_ARB 0x20D264
+
+#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_L_ARB 0x20D268
+
+#define mmSRAM_Y0_X3_RTR_DBG_E_ARB 0x20D300
+
+#define mmSRAM_Y0_X3_RTR_DBG_W_ARB 0x20D304
+
+#define mmSRAM_Y0_X3_RTR_DBG_L_ARB 0x20D310
+
+#define mmSRAM_Y0_X3_RTR_DBG_E_ARB_MAX 0x20D320
+
+#define mmSRAM_Y0_X3_RTR_DBG_W_ARB_MAX 0x20D324
+
+#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330
+
+#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
new file mode 100644
index 000000000000..b2e11b1de385
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_
+#define ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_
+
+/*
+ *****************************************
+ * SRAM_Y0_X4_RTR (Prototype: IC_RTR)
+ *****************************************
+ */
+
+#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_E_ARB 0x211100
+
+#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_W_ARB 0x211104
+
+#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB 0x211110
+
+#define mmSRAM_Y0_X4_RTR_HBW_E_ARB_MAX 0x211120
+
+#define mmSRAM_Y0_X4_RTR_HBW_W_ARB_MAX 0x211124
+
+#define mmSRAM_Y0_X4_RTR_HBW_L_ARB_MAX 0x211130
+
+#define mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB 0x211140
+
+#define mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB 0x211144
+
+#define mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB 0x211148
+
+#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB 0x211160
+
+#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB 0x211164
+
+#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_L_ARB 0x211168
+
+#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_E_ARB 0x211200
+
+#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_W_ARB 0x211204
+
+#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_L_ARB 0x211210
+
+#define mmSRAM_Y0_X4_RTR_LBW_E_ARB_MAX 0x211220
+
+#define mmSRAM_Y0_X4_RTR_LBW_W_ARB_MAX 0x211224
+
+#define mmSRAM_Y0_X4_RTR_LBW_L_ARB_MAX 0x211230
+
+#define mmSRAM_Y0_X4_RTR_LBW_DATA_E_ARB 0x211240
+
+#define mmSRAM_Y0_X4_RTR_LBW_DATA_W_ARB 0x211244
+
+#define mmSRAM_Y0_X4_RTR_LBW_DATA_L_ARB 0x211248
+
+#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_E_ARB 0x211260
+
+#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_W_ARB 0x211264
+
+#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_L_ARB 0x211268
+
+#define mmSRAM_Y0_X4_RTR_DBG_E_ARB 0x211300
+
+#define mmSRAM_Y0_X4_RTR_DBG_W_ARB 0x211304
+
+#define mmSRAM_Y0_X4_RTR_DBG_L_ARB 0x211310
+
+#define mmSRAM_Y0_X4_RTR_DBG_E_ARB_MAX 0x211320
+
+#define mmSRAM_Y0_X4_RTR_DBG_W_ARB_MAX 0x211324
+
+#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330
+
+#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
new file mode 100644
index 000000000000..b4ea8cae2757
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_STLB_MASKS_H_
+#define ASIC_REG_STLB_MASKS_H_
+
+/*
+ *****************************************
+ * STLB (Prototype: STLB)
+ *****************************************
+ */
+
+/* STLB_CACHE_INV */
+#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
+#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
+#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8
+#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
+
+/* STLB_CACHE_INV_BASE_39_8 */
+#define STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0
+#define STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF
+
+/* STLB_CACHE_INV_BASE_49_40 */
+#define STLB_CACHE_INV_BASE_49_40_PA_SHIFT 0
+#define STLB_CACHE_INV_BASE_49_40_PA_MASK 0x3FF
+
+/* STLB_STLB_FEATURE_EN */
+#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0
+#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1
+#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1
+#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2
+#define STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2
+#define STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4
+#define STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3
+#define STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8
+#define STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4
+#define STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10
+#define STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5
+#define STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20
+#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6
+#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40
+#define STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7
+#define STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0xF80
+
+/* STLB_STLB_AXI_CACHE */
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4
+#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0
+#define STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8
+#define STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00
+
+/* STLB_HOP_CONFIGURATION */
+#define STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0
+#define STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7
+#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SHIFT 4
+#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_MASK 0x70
+#define STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 8
+#define STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x700
+
+/* STLB_LINK_LIST_LOOKUP_MASK_49_32 */
+#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_SHIFT 0
+#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_MASK 0x3FFFF
+
+/* STLB_LINK_LIST_LOOKUP_MASK_31_0 */
+#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0
+#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF
+
+/* STLB_LINK_LIST */
+#define STLB_LINK_LIST_CLEAR_SHIFT 0
+#define STLB_LINK_LIST_CLEAR_MASK 0x1
+#define STLB_LINK_LIST_EN_SHIFT 1
+#define STLB_LINK_LIST_EN_MASK 0x2
+
+/* STLB_INV_ALL_START */
+#define STLB_INV_ALL_START_R_SHIFT 0
+#define STLB_INV_ALL_START_R_MASK 0x1
+
+/* STLB_INV_ALL_SET */
+#define STLB_INV_ALL_SET_R_SHIFT 0
+#define STLB_INV_ALL_SET_R_MASK 0xFF
+
+/* STLB_INV_PS */
+#define STLB_INV_PS_R_SHIFT 0
+#define STLB_INV_PS_R_MASK 0x3
+
+/* STLB_INV_CONSUMER_INDEX */
+#define STLB_INV_CONSUMER_INDEX_R_SHIFT 0
+#define STLB_INV_CONSUMER_INDEX_R_MASK 0xFF
+
+/* STLB_INV_HIT_COUNT */
+#define STLB_INV_HIT_COUNT_R_SHIFT 0
+#define STLB_INV_HIT_COUNT_R_MASK 0x7FF
+
+/* STLB_INV_SET */
+#define STLB_INV_SET_R_SHIFT 0
+#define STLB_INV_SET_R_MASK 0xFF
+
+/* STLB_SRAM_INIT */
+#define STLB_SRAM_INIT_BUSY_TAG_SHIFT 0
+#define STLB_SRAM_INIT_BUSY_TAG_MASK 0x3
+#define STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2
+#define STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC
+#define STLB_SRAM_INIT_BUSY_DATA_SHIFT 4
+#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
+
+#endif /* ASIC_REG_STLB_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
new file mode 100644
index 000000000000..0f5281d3e65b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_STLB_REGS_H_
+#define ASIC_REG_STLB_REGS_H_
+
+/*
+ *****************************************
+ * STLB (Prototype: STLB)
+ *****************************************
+ */
+
+#define mmSTLB_CACHE_INV 0x490010
+
+#define mmSTLB_CACHE_INV_BASE_39_8 0x490014
+
+#define mmSTLB_CACHE_INV_BASE_49_40 0x490018
+
+#define mmSTLB_STLB_FEATURE_EN 0x49001C
+
+#define mmSTLB_STLB_AXI_CACHE 0x490020
+
+#define mmSTLB_HOP_CONFIGURATION 0x490024
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0x490028
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0x49002C
+
+#define mmSTLB_LINK_LIST 0x490030
+
+#define mmSTLB_INV_ALL_START 0x490034
+
+#define mmSTLB_INV_ALL_SET 0x490038
+
+#define mmSTLB_INV_PS 0x49003C
+
+#define mmSTLB_INV_CONSUMER_INDEX 0x490040
+
+#define mmSTLB_INV_HIT_COUNT 0x490044
+
+#define mmSTLB_INV_SET 0x490048
+
+#define mmSTLB_SRAM_INIT 0x49004C
+
+#endif /* ASIC_REG_STLB_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
new file mode 100644
index 000000000000..e5587b49eecd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
@@ -0,0 +1,1607 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_MASKS_H_
+#define ASIC_REG_TPC0_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_SRF */
+#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0
+#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_CONFIG */
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00
+
+/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* TPC0_CFG_RESERVED_DESC_END */
+#define TPC0_CFG_RESERVED_DESC_END_V_SHIFT 0
+#define TPC0_CFG_RESERVED_DESC_END_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ROUND_CSR */
+#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0
+#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7
+
+/* TPC0_CFG_TBUF_BASE_ADDR_LOW */
+#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TBUF_BASE_ADDR_HIGH */
+#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SEMAPHORE */
+#define TPC0_CFG_SEMAPHORE_V_SHIFT 0
+#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_VFLAGS */
+#define TPC0_CFG_VFLAGS_V_SHIFT 0
+#define TPC0_CFG_VFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_SFLAGS */
+#define TPC0_CFG_SFLAGS_V_SHIFT 0
+#define TPC0_CFG_SFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_LFSR_POLYNOM */
+#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0
+#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_STATUS */
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4
+#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3
+#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8
+#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_SHIFT 4
+#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK 0x10
+
+/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_CFG_SUBTRACT_VALUE */
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SM_BASE_ADDRESS_LOW */
+#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TPC_CMD */
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40
+
+/* TPC0_CFG_TPC_EXECUTE */
+#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0
+#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1
+
+/* TPC0_CFG_TPC_STALL */
+#define TPC0_CFG_TPC_STALL_V_SHIFT 0
+#define TPC0_CFG_TPC_STALL_V_MASK 0x1
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_MSS_CONFIG */
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400
+
+/* TPC0_CFG_TPC_INTR_CAUSE */
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TPC_INTR_MASK */
+#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0
+#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TSB_CONFIG */
+#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_SHIFT 0
+#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_MASK 0x1F
+#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_SHIFT 5
+#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_MASK 0x3E0
+#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_SHIFT 10
+#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_MASK 0xFFC00
+#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_SHIFT 20
+#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_MASK 0x3FF00000
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_0 */
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_0 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_1 */
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_1 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_2 */
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_2 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_3 */
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_3 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_4 */
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_4 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_SRF */
+#define TPC0_CFG_QM_SRF_V_SHIFT 0
+#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_CONFIG */
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00
+
+/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000
+
+/* TPC0_CFG_ARUSER */
+#define TPC0_CFG_ARUSER_ASID_SHIFT 0
+#define TPC0_CFG_ARUSER_ASID_MASK 0x3FF
+#define TPC0_CFG_ARUSER_MMBP_SHIFT 10
+#define TPC0_CFG_ARUSER_MMBP_MASK 0x400
+#define TPC0_CFG_ARUSER_V_SHIFT 11
+#define TPC0_CFG_ARUSER_V_MASK 0xFFFFF800
+
+/* TPC0_CFG_AWUSER */
+#define TPC0_CFG_AWUSER_ASID_SHIFT 0
+#define TPC0_CFG_AWUSER_ASID_MASK 0x3FF
+#define TPC0_CFG_AWUSER_MMBP_SHIFT 10
+#define TPC0_CFG_AWUSER_MMBP_MASK 0x400
+#define TPC0_CFG_AWUSER_V_SHIFT 11
+#define TPC0_CFG_AWUSER_V_MASK 0xFFFFF800
+
+/* TPC0_CFG_FUNC_MBIST_CNTRL */
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000
+
+/* TPC0_CFG_FUNC_MBIST_PAT */
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00
+
+/* TPC0_CFG_FUNC_MBIST_MEM */
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
+
+#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
new file mode 100644
index 000000000000..2be28a63c50a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_REGS_H_
+#define ASIC_REG_TPC0_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE06418
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE0641C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE06420
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE06424
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06428
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE0642C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE06430
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06434
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE06438
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE0643C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06440
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06444
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE06448
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE0644C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE06450
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06454
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06458
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE0645C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE06460
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE06464
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06468
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE0646C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE06470
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06474
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE06478
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE0647C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06480
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06484
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE06488
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE0648C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE06490
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE06494
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06498
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE0649C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE064A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE064A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE064A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE064AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE064B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE064B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE064B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE064BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE064C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE064C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE064C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE064CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE064D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE064D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE064E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE064FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE06500
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE06504
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE06508
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE0650C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE06510
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE06514
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE06518
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE0651C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE06520
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE06524
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE06528
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE0652C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE06530
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE06534
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE06538
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE0653C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE06540
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE06544
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE06548
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE0654C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE06550
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE06554
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06558
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE0655C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE06560
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06564
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE06568
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE0656C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06570
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06574
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE06578
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE0657C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE06580
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06584
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06588
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE0658C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE06590
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE06594
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06598
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE0659C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE065A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE065A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE065A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE065AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE065B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE065B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE065B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE065BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE065C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE065C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE065C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE065CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE065D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE065D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE065D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE065DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE065E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE065E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE065E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE065EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE065F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE065F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE065F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE065FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE06600
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE06604
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06608
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE0660C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE06610
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06614
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE06618
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE0661C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06620
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06624
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE06628
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE0662C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE06630
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE06634
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE06638
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE0663C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE06640
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE06644
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE06648
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE0664C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE06650
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE06654
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE06658
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE0665C
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06660
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE06664
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06668
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE0666C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06670
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE06674
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE06678
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE0667C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE06680
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE06684
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE06688
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE0668C
+
+#define mmTPC0_CFG_KERNEL_SRF_0 0xE06690
+
+#define mmTPC0_CFG_KERNEL_SRF_1 0xE06694
+
+#define mmTPC0_CFG_KERNEL_SRF_2 0xE06698
+
+#define mmTPC0_CFG_KERNEL_SRF_3 0xE0669C
+
+#define mmTPC0_CFG_KERNEL_SRF_4 0xE066A0
+
+#define mmTPC0_CFG_KERNEL_SRF_5 0xE066A4
+
+#define mmTPC0_CFG_KERNEL_SRF_6 0xE066A8
+
+#define mmTPC0_CFG_KERNEL_SRF_7 0xE066AC
+
+#define mmTPC0_CFG_KERNEL_SRF_8 0xE066B0
+
+#define mmTPC0_CFG_KERNEL_SRF_9 0xE066B4
+
+#define mmTPC0_CFG_KERNEL_SRF_10 0xE066B8
+
+#define mmTPC0_CFG_KERNEL_SRF_11 0xE066BC
+
+#define mmTPC0_CFG_KERNEL_SRF_12 0xE066C0
+
+#define mmTPC0_CFG_KERNEL_SRF_13 0xE066C4
+
+#define mmTPC0_CFG_KERNEL_SRF_14 0xE066C8
+
+#define mmTPC0_CFG_KERNEL_SRF_15 0xE066CC
+
+#define mmTPC0_CFG_KERNEL_SRF_16 0xE066D0
+
+#define mmTPC0_CFG_KERNEL_SRF_17 0xE066D4
+
+#define mmTPC0_CFG_KERNEL_SRF_18 0xE066D8
+
+#define mmTPC0_CFG_KERNEL_SRF_19 0xE066DC
+
+#define mmTPC0_CFG_KERNEL_SRF_20 0xE066E0
+
+#define mmTPC0_CFG_KERNEL_SRF_21 0xE066E4
+
+#define mmTPC0_CFG_KERNEL_SRF_22 0xE066E8
+
+#define mmTPC0_CFG_KERNEL_SRF_23 0xE066EC
+
+#define mmTPC0_CFG_KERNEL_SRF_24 0xE066F0
+
+#define mmTPC0_CFG_KERNEL_SRF_25 0xE066F4
+
+#define mmTPC0_CFG_KERNEL_SRF_26 0xE066F8
+
+#define mmTPC0_CFG_KERNEL_SRF_27 0xE066FC
+
+#define mmTPC0_CFG_KERNEL_SRF_28 0xE06700
+
+#define mmTPC0_CFG_KERNEL_SRF_29 0xE06704
+
+#define mmTPC0_CFG_KERNEL_SRF_30 0xE06708
+
+#define mmTPC0_CFG_KERNEL_SRF_31 0xE0670C
+
+#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE06710
+
+#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06714
+
+#define mmTPC0_CFG_RESERVED_DESC_END 0xE06738
+
+#define mmTPC0_CFG_ROUND_CSR 0xE067FC
+
+#define mmTPC0_CFG_TBUF_BASE_ADDR_LOW 0xE06800
+
+#define mmTPC0_CFG_TBUF_BASE_ADDR_HIGH 0xE06804
+
+#define mmTPC0_CFG_SEMAPHORE 0xE06808
+
+#define mmTPC0_CFG_VFLAGS 0xE0680C
+
+#define mmTPC0_CFG_SFLAGS 0xE06810
+
+#define mmTPC0_CFG_LFSR_POLYNOM 0xE06818
+
+#define mmTPC0_CFG_STATUS 0xE0681C
+
+#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06820
+
+#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06824
+
+#define mmTPC0_CFG_SM_BASE_ADDRESS_LOW 0xE06828
+
+#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0682C
+
+#define mmTPC0_CFG_TPC_CMD 0xE06830
+
+#define mmTPC0_CFG_TPC_EXECUTE 0xE06838
+
+#define mmTPC0_CFG_TPC_STALL 0xE0683C
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06840
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06844
+
+#define mmTPC0_CFG_MSS_CONFIG 0xE06854
+
+#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06858
+
+#define mmTPC0_CFG_TPC_INTR_MASK 0xE0685C
+
+#define mmTPC0_CFG_TSB_CONFIG 0xE06860
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04
+
+#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08
+
+#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE06A18
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A1C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A20
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE06A24
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A28
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A2C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE06A30
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A34
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A38
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE06A3C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A40
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A44
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE06A48
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A4C
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A50
+
+#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A54
+
+#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A58
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A5C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A60
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE06A64
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A68
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A6C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE06A70
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A74
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A78
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE06A7C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A80
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A84
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE06A88
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A8C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A90
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE06A94
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A98
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A9C
+
+#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06AA0
+
+#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06AA4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06AA8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06AAC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE06AB0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06AB4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06AB8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE06ABC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06AC0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06AC4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE06AC8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06ACC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06AD0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE06AD4
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AD8
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06ADC
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE06AE0
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AE4
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AE8
+
+#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AEC
+
+#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AF0
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AF4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06AF8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE06AFC
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06B00
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06B04
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE06B08
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06B0C
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06B10
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE06B14
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06B18
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06B1C
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE06B20
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06B24
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06B28
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE06B2C
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06B30
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06B34
+
+#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06B38
+
+#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06B3C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06B40
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06B44
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE06B48
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06B4C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06B50
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE06B54
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B58
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B5C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE06B60
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B64
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B68
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE06B6C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B70
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B74
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE06B78
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B7C
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B80
+
+#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B84
+
+#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B88
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B8C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B90
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE06B94
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B98
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B9C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE06BA0
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06BA4
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06BA8
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE06BAC
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06BB0
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06BB4
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE06BB8
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06BBC
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06BC0
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE06BC4
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06BC8
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06BCC
+
+#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06BD0
+
+#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06BD4
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06BD8
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06BDC
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE06BE0
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06BE4
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06BE8
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE06BEC
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06BF0
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06BF4
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE06BF8
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06BFC
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06C00
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE06C04
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06C08
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06C0C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE06C10
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06C14
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06C18
+
+#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06C1C
+
+#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06C20
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06C24
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06C28
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE06C2C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06C30
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06C34
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE06C38
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06C3C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06C40
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE06C44
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06C48
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06C4C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE06C50
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06C54
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06C58
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE06C5C
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06C60
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06C64
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06C68
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06C6C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06C70
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06C74
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06C78
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06C7C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06C80
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06C84
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06C88
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06C8C
+
+#define mmTPC0_CFG_QM_SRF_0 0xE06C90
+
+#define mmTPC0_CFG_QM_SRF_1 0xE06C94
+
+#define mmTPC0_CFG_QM_SRF_2 0xE06C98
+
+#define mmTPC0_CFG_QM_SRF_3 0xE06C9C
+
+#define mmTPC0_CFG_QM_SRF_4 0xE06CA0
+
+#define mmTPC0_CFG_QM_SRF_5 0xE06CA4
+
+#define mmTPC0_CFG_QM_SRF_6 0xE06CA8
+
+#define mmTPC0_CFG_QM_SRF_7 0xE06CAC
+
+#define mmTPC0_CFG_QM_SRF_8 0xE06CB0
+
+#define mmTPC0_CFG_QM_SRF_9 0xE06CB4
+
+#define mmTPC0_CFG_QM_SRF_10 0xE06CB8
+
+#define mmTPC0_CFG_QM_SRF_11 0xE06CBC
+
+#define mmTPC0_CFG_QM_SRF_12 0xE06CC0
+
+#define mmTPC0_CFG_QM_SRF_13 0xE06CC4
+
+#define mmTPC0_CFG_QM_SRF_14 0xE06CC8
+
+#define mmTPC0_CFG_QM_SRF_15 0xE06CCC
+
+#define mmTPC0_CFG_QM_SRF_16 0xE06CD0
+
+#define mmTPC0_CFG_QM_SRF_17 0xE06CD4
+
+#define mmTPC0_CFG_QM_SRF_18 0xE06CD8
+
+#define mmTPC0_CFG_QM_SRF_19 0xE06CDC
+
+#define mmTPC0_CFG_QM_SRF_20 0xE06CE0
+
+#define mmTPC0_CFG_QM_SRF_21 0xE06CE4
+
+#define mmTPC0_CFG_QM_SRF_22 0xE06CE8
+
+#define mmTPC0_CFG_QM_SRF_23 0xE06CEC
+
+#define mmTPC0_CFG_QM_SRF_24 0xE06CF0
+
+#define mmTPC0_CFG_QM_SRF_25 0xE06CF4
+
+#define mmTPC0_CFG_QM_SRF_26 0xE06CF8
+
+#define mmTPC0_CFG_QM_SRF_27 0xE06CFC
+
+#define mmTPC0_CFG_QM_SRF_28 0xE06D00
+
+#define mmTPC0_CFG_QM_SRF_29 0xE06D04
+
+#define mmTPC0_CFG_QM_SRF_30 0xE06D08
+
+#define mmTPC0_CFG_QM_SRF_31 0xE06D0C
+
+#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06D10
+
+#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D14
+
+#define mmTPC0_CFG_ARUSER 0xE06D18
+
+#define mmTPC0_CFG_AWUSER 0xE06D1C
+
+#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE06E00
+
+#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE06E04
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE06E08
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE06E0C
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE06E10
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE06E14
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE06E18
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE06E1C
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE06E20
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE06E24
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE06E28
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C
+
+#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
new file mode 100644
index 000000000000..9aa2d8b53207
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CMDQ_MASKS_H_
+#define ASIC_REG_TPC0_CMDQ_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+/* TPC0_CMDQ_GLBL_CFG0 */
+#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0
+#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1
+#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1
+#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2
+#define TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2
+#define TPC0_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4
+#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3
+#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* TPC0_CMDQ_GLBL_CFG1 */
+#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2
+#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4
+#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* TPC0_CMDQ_GLBL_PROT */
+#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0
+#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1
+#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1
+#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2
+#define TPC0_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2
+#define TPC0_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4
+#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3
+#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8
+#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* TPC0_CMDQ_GLBL_ERR_CFG */
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* TPC0_CMDQ_GLBL_ERR_ADDR_LO */
+#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_GLBL_ERR_ADDR_HI */
+#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_GLBL_ERR_WDATA */
+#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_GLBL_SECURE_PROPS */
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_CMDQ_GLBL_NON_SECURE_PROPS */
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_CMDQ_GLBL_STS0 */
+#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2
+#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4
+#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* TPC0_CMDQ_GLBL_STS1 */
+#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* TPC0_CMDQ_CQ_CFG0 */
+#define TPC0_CMDQ_CQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_CMDQ_CQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_CMDQ_CQ_CFG1 */
+#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_ARUSER */
+#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define TPC0_CMDQ_CQ_ARUSER_WORD_SHIFT 1
+#define TPC0_CMDQ_CQ_ARUSER_WORD_MASK 0x2
+
+/* TPC0_CMDQ_CQ_PTR_LO */
+#define TPC0_CMDQ_CQ_PTR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_PTR_HI */
+#define TPC0_CMDQ_CQ_PTR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_TSIZE */
+#define TPC0_CMDQ_CQ_TSIZE_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_CTL */
+#define TPC0_CMDQ_CQ_CTL_RPT_SHIFT 0
+#define TPC0_CMDQ_CQ_CTL_RPT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_CTL_CTL_SHIFT 16
+#define TPC0_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_PTR_LO_STS */
+#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_PTR_HI_STS */
+#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_TSIZE_STS */
+#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_CTL_STS */
+#define TPC0_CMDQ_CQ_CTL_STS_RPT_SHIFT 0
+#define TPC0_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_CTL_STS_CTL_SHIFT 16
+#define TPC0_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_STS0 */
+#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_CMDQ_CQ_STS1 */
+#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31
+#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_EN */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_SAT */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT */
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* TPC0_CMDQ_CQ_IFIFO_CNT */
+#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO */
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI */
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET */
+#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_FENCE0_RDATA */
+#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE1_RDATA */
+#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE2_RDATA */
+#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE3_RDATA */
+#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_CMDQ_CP_FENCE0_CNT */
+#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_FENCE1_CNT */
+#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_FENCE2_CNT */
+#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_FENCE3_CNT */
+#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CP_STS */
+#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_CMDQ_CP_STS_ERDY_SHIFT 16
+#define TPC0_CMDQ_CP_STS_ERDY_MASK 0x10000
+#define TPC0_CMDQ_CP_STS_RRDY_SHIFT 17
+#define TPC0_CMDQ_CP_STS_RRDY_MASK 0x20000
+#define TPC0_CMDQ_CP_STS_MRDY_SHIFT 18
+#define TPC0_CMDQ_CP_STS_MRDY_MASK 0x40000
+#define TPC0_CMDQ_CP_STS_SW_STOP_SHIFT 19
+#define TPC0_CMDQ_CP_STS_SW_STOP_MASK 0x80000
+#define TPC0_CMDQ_CP_STS_FENCE_ID_SHIFT 20
+#define TPC0_CMDQ_CP_STS_FENCE_ID_MASK 0x300000
+#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* TPC0_CMDQ_CP_CURRENT_INST_LO */
+#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_CURRENT_INST_HI */
+#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CP_BARRIER_CFG */
+#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* TPC0_CMDQ_CP_DBG_0 */
+#define TPC0_CMDQ_CP_DBG_0_VAL_SHIFT 0
+#define TPC0_CMDQ_CP_DBG_0_VAL_MASK 0xFF
+
+/* TPC0_CMDQ_CQ_BUF_ADDR */
+#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_CMDQ_CQ_BUF_RDATA */
+#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0
+#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
new file mode 100644
index 000000000000..3572752ba66e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CMDQ_REGS_H_
+#define ASIC_REG_TPC0_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC0_CMDQ_GLBL_CFG0 0xE09000
+
+#define mmTPC0_CMDQ_GLBL_CFG1 0xE09004
+
+#define mmTPC0_CMDQ_GLBL_PROT 0xE09008
+
+#define mmTPC0_CMDQ_GLBL_ERR_CFG 0xE0900C
+
+#define mmTPC0_CMDQ_GLBL_ERR_ADDR_LO 0xE09010
+
+#define mmTPC0_CMDQ_GLBL_ERR_ADDR_HI 0xE09014
+
+#define mmTPC0_CMDQ_GLBL_ERR_WDATA 0xE09018
+
+#define mmTPC0_CMDQ_GLBL_SECURE_PROPS 0xE0901C
+
+#define mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS 0xE09020
+
+#define mmTPC0_CMDQ_GLBL_STS0 0xE09024
+
+#define mmTPC0_CMDQ_GLBL_STS1 0xE09028
+
+#define mmTPC0_CMDQ_CQ_CFG0 0xE090B0
+
+#define mmTPC0_CMDQ_CQ_CFG1 0xE090B4
+
+#define mmTPC0_CMDQ_CQ_ARUSER 0xE090B8
+
+#define mmTPC0_CMDQ_CQ_PTR_LO 0xE090C0
+
+#define mmTPC0_CMDQ_CQ_PTR_HI 0xE090C4
+
+#define mmTPC0_CMDQ_CQ_TSIZE 0xE090C8
+
+#define mmTPC0_CMDQ_CQ_CTL 0xE090CC
+
+#define mmTPC0_CMDQ_CQ_PTR_LO_STS 0xE090D4
+
+#define mmTPC0_CMDQ_CQ_PTR_HI_STS 0xE090D8
+
+#define mmTPC0_CMDQ_CQ_TSIZE_STS 0xE090DC
+
+#define mmTPC0_CMDQ_CQ_CTL_STS 0xE090E0
+
+#define mmTPC0_CMDQ_CQ_STS0 0xE090E4
+
+#define mmTPC0_CMDQ_CQ_STS1 0xE090E8
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN 0xE090F0
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE090F4
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT 0xE090F8
+
+#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE090FC
+
+#define mmTPC0_CMDQ_CQ_IFIFO_CNT 0xE09108
+
+#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE09120
+
+#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE09124
+
+#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE09128
+
+#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE0912C
+
+#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE09130
+
+#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE09134
+
+#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE09138
+
+#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE0913C
+
+#define mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE09140
+
+#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE09144
+
+#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE09148
+
+#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE0914C
+
+#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE09150
+
+#define mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE09154
+
+#define mmTPC0_CMDQ_CP_FENCE0_RDATA 0xE09158
+
+#define mmTPC0_CMDQ_CP_FENCE1_RDATA 0xE0915C
+
+#define mmTPC0_CMDQ_CP_FENCE2_RDATA 0xE09160
+
+#define mmTPC0_CMDQ_CP_FENCE3_RDATA 0xE09164
+
+#define mmTPC0_CMDQ_CP_FENCE0_CNT 0xE09168
+
+#define mmTPC0_CMDQ_CP_FENCE1_CNT 0xE0916C
+
+#define mmTPC0_CMDQ_CP_FENCE2_CNT 0xE09170
+
+#define mmTPC0_CMDQ_CP_FENCE3_CNT 0xE09174
+
+#define mmTPC0_CMDQ_CP_STS 0xE09178
+
+#define mmTPC0_CMDQ_CP_CURRENT_INST_LO 0xE0917C
+
+#define mmTPC0_CMDQ_CP_CURRENT_INST_HI 0xE09180
+
+#define mmTPC0_CMDQ_CP_BARRIER_CFG 0xE09184
+
+#define mmTPC0_CMDQ_CP_DBG_0 0xE09188
+
+#define mmTPC0_CMDQ_CQ_BUF_ADDR 0xE09308
+
+#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C
+
+#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
new file mode 100644
index 000000000000..ed866d93c440
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_EML_CFG_MASKS_H_
+#define ASIC_REG_TPC0_EML_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_EML_CFG (Prototype: TPC_EML_CFG)
+ *****************************************
+ */
+
+/* TPC0_EML_CFG_DBG_CNT */
+#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_SHIFT 0
+#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_MASK 0x1
+#define TPC0_EML_CFG_DBG_CNT_DBG_EN_SHIFT 1
+#define TPC0_EML_CFG_DBG_CNT_DBG_EN_MASK 0x2
+#define TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT 2
+#define TPC0_EML_CFG_DBG_CNT_CORE_RST_MASK 0x4
+#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_SHIFT 4
+#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_MASK 0x10
+#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_SHIFT 5
+#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_MASK 0x20
+#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_SHIFT 6
+#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK 0x40
+#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_SHIFT 7
+#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_MASK 0x80
+#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_SHIFT 16
+#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_MASK 0x10000
+
+/* TPC0_EML_CFG_DBG_STS */
+#define TPC0_EML_CFG_DBG_STS_DBG_MODE_SHIFT 0
+#define TPC0_EML_CFG_DBG_STS_DBG_MODE_MASK 0x1
+#define TPC0_EML_CFG_DBG_STS_CORE_READY_SHIFT 1
+#define TPC0_EML_CFG_DBG_STS_CORE_READY_MASK 0x2
+#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_SHIFT 2
+#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_MASK 0x4
+#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_SHIFT 3
+#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_MASK 0x8
+#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_SHIFT 4
+#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_MASK 0x10
+#define TPC0_EML_CFG_DBG_STS_QM_IDLE_SHIFT 5
+#define TPC0_EML_CFG_DBG_STS_QM_IDLE_MASK 0x20
+#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_SHIFT 6
+#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_MASK 0x40
+#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_SHIFT 7
+#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_MASK 0x80
+#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_SHIFT 8
+#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_MASK 0xFFFFFF00
+
+/* TPC0_EML_CFG_DBG_PADD */
+#define TPC0_EML_CFG_DBG_PADD_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_PADD_COUNT */
+#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_PADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_PADD_EN */
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_SHIFT 2
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_MASK 0x4
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_SHIFT 3
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_MASK 0x8
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_SHIFT 4
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_MASK 0x10
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_SHIFT 5
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_MASK 0x20
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_SHIFT 6
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_MASK 0x40
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_SHIFT 7
+#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_MASK 0x80
+
+/* TPC0_EML_CFG_DBG_VPADD_HIGH */
+#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_MASK 0x1FF
+
+/* TPC0_EML_CFG_DBG_VPADD_LOW */
+#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_MASK 0x1FF
+
+/* TPC0_EML_CFG_DBG_VPADD_COUNT */
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_VPADD_EN */
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_SPADD_HIGH */
+#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_LOW */
+#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_COUNT */
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPADD_EN */
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH */
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_MSB_LOW */
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH */
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_LSB_LOW */
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_COUNT */
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AGUADD_EN */
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWADD_EN */
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW */
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH */
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW */
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT */
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWADD_EN */
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_SPDATA */
+#define TPC0_EML_CFG_DBG_SPDATA_DATA_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_DATA_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_SPDATA_COUNT */
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_SPDATA_EN */
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_SHIFT 0
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_MASK 0x1
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_SHIFT 1
+#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_MASK 0x2
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_SHIFT 2
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_MASK 0x4
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_SHIFT 3
+#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_MASK 0x8
+
+/* TPC0_EML_CFG_DBG_AXIHBWDATA */
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT */
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXIHBWDATA_EN */
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_MASK 0x2
+
+/* TPC0_EML_CFG_DBG_AXILBWDATA */
+#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_AXILBWDATA_COUNT */
+#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH */
+#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_MASK 0xFF
+
+/* TPC0_EML_CFG_DBG_AXILBWDATA_EN */
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_SHIFT 0
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_MASK 0x1
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_SHIFT 1
+#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_MASK 0x2
+
+/* TPC0_EML_CFG_DBG_D0_PC */
+#define TPC0_EML_CFG_DBG_D0_PC_PC_SHIFT 0
+#define TPC0_EML_CFG_DBG_D0_PC_PC_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_RTTCONFIG */
+#define TPC0_EML_CFG_RTTCONFIG_TR_EN_SHIFT 0
+#define TPC0_EML_CFG_RTTCONFIG_TR_EN_MASK 0x1
+#define TPC0_EML_CFG_RTTCONFIG_PRIO_SHIFT 1
+#define TPC0_EML_CFG_RTTCONFIG_PRIO_MASK 0x2
+
+/* TPC0_EML_CFG_RTTPREDICATE */
+#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_SHIFT 0
+#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_MASK 0x1
+#define TPC0_EML_CFG_RTTPREDICATE_GEN_SHIFT 1
+#define TPC0_EML_CFG_RTTPREDICATE_GEN_MASK 0x2
+#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_SHIFT 2
+#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_MASK 0x4
+#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_SHIFT 16
+#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_MASK 0xFFFF0000
+
+/* TPC0_EML_CFG_RTTPREDICATE_INTV */
+#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_SHIFT 0
+#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_RTTTS */
+#define TPC0_EML_CFG_RTTTS_TR_EN_SHIFT 0
+#define TPC0_EML_CFG_RTTTS_TR_EN_MASK 0x1
+#define TPC0_EML_CFG_RTTTS_GEN_SHIFT 1
+#define TPC0_EML_CFG_RTTTS_GEN_MASK 0x2
+#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_SHIFT 2
+#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_MASK 0x4
+
+/* TPC0_EML_CFG_RTTTS_INTV */
+#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_SHIFT 0
+#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_INST_INSERT */
+#define TPC0_EML_CFG_DBG_INST_INSERT_INST_SHIFT 0
+#define TPC0_EML_CFG_DBG_INST_INSERT_INST_MASK 0xFFFFFFFF
+
+/* TPC0_EML_CFG_DBG_INST_INSERT_CTL */
+#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_SHIFT 0
+#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1
+
+#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
new file mode 100644
index 000000000000..f1a1b4fa4841
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_EML_CFG_REGS_H_
+#define ASIC_REG_TPC0_EML_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_EML_CFG (Prototype: TPC_EML_CFG)
+ *****************************************
+ */
+
+#define mmTPC0_EML_CFG_DBG_CNT 0x3040000
+
+#define mmTPC0_EML_CFG_DBG_STS 0x3040004
+
+#define mmTPC0_EML_CFG_DBG_PADD_0 0x3040008
+
+#define mmTPC0_EML_CFG_DBG_PADD_1 0x304000C
+
+#define mmTPC0_EML_CFG_DBG_PADD_2 0x3040010
+
+#define mmTPC0_EML_CFG_DBG_PADD_3 0x3040014
+
+#define mmTPC0_EML_CFG_DBG_PADD_4 0x3040018
+
+#define mmTPC0_EML_CFG_DBG_PADD_5 0x304001C
+
+#define mmTPC0_EML_CFG_DBG_PADD_6 0x3040020
+
+#define mmTPC0_EML_CFG_DBG_PADD_7 0x3040024
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_0 0x3040028
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_1 0x304002C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_2 0x3040030
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_3 0x3040034
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_4 0x3040038
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_5 0x304003C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_6 0x3040040
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_7 0x3040044
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_0 0x3040048
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_1 0x304004C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_2 0x3040050
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_3 0x3040054
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_4 0x3040058
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_5 0x304005C
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_6 0x3040060
+
+#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_7 0x3040064
+
+#define mmTPC0_EML_CFG_DBG_PADD_EN 0x3040068
+
+#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_0 0x304006C
+
+#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_1 0x3040070
+
+#define mmTPC0_EML_CFG_DBG_VPADD_LOW_0 0x3040074
+
+#define mmTPC0_EML_CFG_DBG_VPADD_LOW_1 0x3040078
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_0 0x304007C
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_1 0x3040080
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_0 0x3040084
+
+#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_1 0x3040088
+
+#define mmTPC0_EML_CFG_DBG_VPADD_EN 0x304008C
+
+#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_0 0x3040090
+
+#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_1 0x3040094
+
+#define mmTPC0_EML_CFG_DBG_SPADD_LOW_0 0x3040098
+
+#define mmTPC0_EML_CFG_DBG_SPADD_LOW_1 0x304009C
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_0 0x30400A0
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_1 0x30400A4
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_0 0x30400A8
+
+#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_1 0x30400AC
+
+#define mmTPC0_EML_CFG_DBG_SPADD_EN 0x30400B0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_0 0x30400B4
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_1 0x30400B8
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_0 0x30400BC
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_1 0x30400C0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_0 0x30400C4
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_1 0x30400C8
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_0 0x30400CC
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_1 0x30400D0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_0 0x30400D4
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_1 0x30400D8
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_0 0x30400DC
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_1 0x30400E0
+
+#define mmTPC0_EML_CFG_DBG_AGUADD_EN 0x30400E4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_0 0x30400E8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_1 0x30400EC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_0 0x30400F0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_1 0x30400F4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_0 0x30400F8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_1 0x30400FC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_0 0x3040100
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_1 0x3040104
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_0 0x3040108
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_1 0x304010C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_0 0x3040110
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_1 0x3040114
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWADD_EN 0x3040118
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_0 0x304011C
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_1 0x3040120
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_0 0x3040124
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_1 0x3040128
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_0 0x304012C
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_1 0x3040130
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_0 0x3040134
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_1 0x3040138
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_0 0x304013C
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_1 0x3040140
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_0 0x3040144
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_1 0x3040148
+
+#define mmTPC0_EML_CFG_DBG_AXILBWADD_EN 0x304014C
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_0 0x3040150
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_1 0x3040154
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_0 0x3040158
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_1 0x304015C
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_0 0x3040160
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_1 0x3040164
+
+#define mmTPC0_EML_CFG_DBG_SPDATA_EN 0x3040168
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_0 0x304016C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_1 0x3040170
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_2 0x3040174
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_3 0x3040178
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_4 0x304017C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_5 0x3040180
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_6 0x3040184
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_7 0x3040188
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_8 0x304018C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_9 0x3040190
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_10 0x3040194
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_11 0x3040198
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_12 0x304019C
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_13 0x30401A0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_14 0x30401A4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_15 0x30401A8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_16 0x30401AC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_17 0x30401B0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_18 0x30401B4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_19 0x30401B8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_20 0x30401BC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_21 0x30401C0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_22 0x30401C4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_23 0x30401C8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_24 0x30401CC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_25 0x30401D0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_26 0x30401D4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_27 0x30401D8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_28 0x30401DC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_29 0x30401E0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_30 0x30401E4
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_31 0x30401E8
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_COUNT 0x30401EC
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH 0x30401F0
+
+#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_EN 0x30401F4
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDATA 0x30401F8
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDATA_COUNT 0x30401FC
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH 0x3040200
+
+#define mmTPC0_EML_CFG_DBG_AXILBWDATA_EN 0x3040204
+
+#define mmTPC0_EML_CFG_DBG_D0_PC 0x3040208
+
+#define mmTPC0_EML_CFG_RTTCONFIG 0x3040300
+
+#define mmTPC0_EML_CFG_RTTPREDICATE 0x3040304
+
+#define mmTPC0_EML_CFG_RTTPREDICATE_INTV 0x3040308
+
+#define mmTPC0_EML_CFG_RTTTS 0x304030C
+
+#define mmTPC0_EML_CFG_RTTTS_INTV 0x3040310
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_0 0x3040314
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_1 0x3040318
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_2 0x304031C
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_3 0x3040320
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_4 0x3040324
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_5 0x3040328
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_6 0x304032C
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_7 0x3040330
+
+#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334
+
+#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
new file mode 100644
index 000000000000..7f86621179a5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_NRTR_MASKS_H_
+#define ASIC_REG_TPC0_NRTR_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+/* TPC0_NRTR_HBW_MAX_CRED */
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8
+#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24
+#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* TPC0_NRTR_LBW_MAX_CRED */
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8
+#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24
+#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000
+
+/* TPC0_NRTR_DBG_E_ARB */
+#define TPC0_NRTR_DBG_E_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_E_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_E_ARB_S_SHIFT 8
+#define TPC0_NRTR_DBG_E_ARB_S_MASK 0x700
+#define TPC0_NRTR_DBG_E_ARB_N_SHIFT 16
+#define TPC0_NRTR_DBG_E_ARB_N_MASK 0x70000
+#define TPC0_NRTR_DBG_E_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_E_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_W_ARB */
+#define TPC0_NRTR_DBG_W_ARB_E_SHIFT 0
+#define TPC0_NRTR_DBG_W_ARB_E_MASK 0x7
+#define TPC0_NRTR_DBG_W_ARB_S_SHIFT 8
+#define TPC0_NRTR_DBG_W_ARB_S_MASK 0x700
+#define TPC0_NRTR_DBG_W_ARB_N_SHIFT 16
+#define TPC0_NRTR_DBG_W_ARB_N_MASK 0x70000
+#define TPC0_NRTR_DBG_W_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_W_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_N_ARB */
+#define TPC0_NRTR_DBG_N_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_N_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_N_ARB_E_SHIFT 8
+#define TPC0_NRTR_DBG_N_ARB_E_MASK 0x700
+#define TPC0_NRTR_DBG_N_ARB_S_SHIFT 16
+#define TPC0_NRTR_DBG_N_ARB_S_MASK 0x70000
+#define TPC0_NRTR_DBG_N_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_N_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_S_ARB */
+#define TPC0_NRTR_DBG_S_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_S_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_S_ARB_E_SHIFT 8
+#define TPC0_NRTR_DBG_S_ARB_E_MASK 0x700
+#define TPC0_NRTR_DBG_S_ARB_N_SHIFT 16
+#define TPC0_NRTR_DBG_S_ARB_N_MASK 0x70000
+#define TPC0_NRTR_DBG_S_ARB_L_SHIFT 24
+#define TPC0_NRTR_DBG_S_ARB_L_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_L_ARB */
+#define TPC0_NRTR_DBG_L_ARB_W_SHIFT 0
+#define TPC0_NRTR_DBG_L_ARB_W_MASK 0x7
+#define TPC0_NRTR_DBG_L_ARB_E_SHIFT 8
+#define TPC0_NRTR_DBG_L_ARB_E_MASK 0x700
+#define TPC0_NRTR_DBG_L_ARB_S_SHIFT 16
+#define TPC0_NRTR_DBG_L_ARB_S_MASK 0x70000
+#define TPC0_NRTR_DBG_L_ARB_N_SHIFT 24
+#define TPC0_NRTR_DBG_L_ARB_N_MASK 0x7000000
+
+/* TPC0_NRTR_DBG_E_ARB_MAX */
+#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_W_ARB_MAX */
+#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_N_ARB_MAX */
+#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_S_ARB_MAX */
+#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_DBG_L_ARB_MAX */
+#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0
+#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F
+
+/* TPC0_NRTR_SPLIT_COEF */
+#define TPC0_NRTR_SPLIT_COEF_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_CFG */
+#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0
+#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1
+#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1
+#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2
+#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2
+#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC
+#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4
+#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10
+#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5
+#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20
+#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6
+#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0
+
+/* TPC0_NRTR_SPLIT_RD_SAT */
+#define TPC0_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_RD_RST_TOKEN */
+#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_RD_TIMEOUT */
+#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_SPLIT_WR_SAT */
+#define TPC0_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_WPLIT_WR_TST_TOLEN */
+#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0
+#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF
+
+/* TPC0_NRTR_SPLIT_WR_TIMEOUT */
+#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0
+#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_HBW_RANGE_HIT */
+#define TPC0_NRTR_HBW_RANGE_HIT_IND_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF
+
+/* TPC0_NRTR_HBW_RANGE_MASK_L */
+#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_HBW_RANGE_MASK_H */
+#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF
+
+/* TPC0_NRTR_HBW_RANGE_BASE_L */
+#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_NRTR_HBW_RANGE_BASE_H */
+#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0
+#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF
+
+/* TPC0_NRTR_LBW_RANGE_HIT */
+#define TPC0_NRTR_LBW_RANGE_HIT_IND_SHIFT 0
+#define TPC0_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF
+
+/* TPC0_NRTR_LBW_RANGE_MASK */
+#define TPC0_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0
+#define TPC0_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF
+
+/* TPC0_NRTR_LBW_RANGE_BASE */
+#define TPC0_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0
+#define TPC0_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF
+
+/* TPC0_NRTR_RGLTR */
+#define TPC0_NRTR_RGLTR_WR_EN_SHIFT 0
+#define TPC0_NRTR_RGLTR_WR_EN_MASK 0x1
+#define TPC0_NRTR_RGLTR_RD_EN_SHIFT 4
+#define TPC0_NRTR_RGLTR_RD_EN_MASK 0x10
+
+/* TPC0_NRTR_RGLTR_WR_RESULT */
+#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0
+#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF
+
+/* TPC0_NRTR_RGLTR_RD_RESULT */
+#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0
+#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF
+
+/* TPC0_NRTR_SCRAMB_EN */
+#define TPC0_NRTR_SCRAMB_EN_VAL_SHIFT 0
+#define TPC0_NRTR_SCRAMB_EN_VAL_MASK 0x1
+
+/* TPC0_NRTR_NON_LIN_SCRAMB */
+#define TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0
+#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
+
+#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
new file mode 100644
index 000000000000..dc280f4e6608
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_NRTR_REGS_H_
+#define ASIC_REG_TPC0_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmTPC0_NRTR_HBW_MAX_CRED 0xE00100
+
+#define mmTPC0_NRTR_LBW_MAX_CRED 0xE00120
+
+#define mmTPC0_NRTR_DBG_E_ARB 0xE00300
+
+#define mmTPC0_NRTR_DBG_W_ARB 0xE00304
+
+#define mmTPC0_NRTR_DBG_N_ARB 0xE00308
+
+#define mmTPC0_NRTR_DBG_S_ARB 0xE0030C
+
+#define mmTPC0_NRTR_DBG_L_ARB 0xE00310
+
+#define mmTPC0_NRTR_DBG_E_ARB_MAX 0xE00320
+
+#define mmTPC0_NRTR_DBG_W_ARB_MAX 0xE00324
+
+#define mmTPC0_NRTR_DBG_N_ARB_MAX 0xE00328
+
+#define mmTPC0_NRTR_DBG_S_ARB_MAX 0xE0032C
+
+#define mmTPC0_NRTR_DBG_L_ARB_MAX 0xE00330
+
+#define mmTPC0_NRTR_SPLIT_COEF_0 0xE00400
+
+#define mmTPC0_NRTR_SPLIT_COEF_1 0xE00404
+
+#define mmTPC0_NRTR_SPLIT_COEF_2 0xE00408
+
+#define mmTPC0_NRTR_SPLIT_COEF_3 0xE0040C
+
+#define mmTPC0_NRTR_SPLIT_COEF_4 0xE00410
+
+#define mmTPC0_NRTR_SPLIT_COEF_5 0xE00414
+
+#define mmTPC0_NRTR_SPLIT_COEF_6 0xE00418
+
+#define mmTPC0_NRTR_SPLIT_COEF_7 0xE0041C
+
+#define mmTPC0_NRTR_SPLIT_COEF_8 0xE00420
+
+#define mmTPC0_NRTR_SPLIT_COEF_9 0xE00424
+
+#define mmTPC0_NRTR_SPLIT_CFG 0xE00440
+
+#define mmTPC0_NRTR_SPLIT_RD_SAT 0xE00444
+
+#define mmTPC0_NRTR_SPLIT_RD_RST_TOKEN 0xE00448
+
+#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_0 0xE0044C
+
+#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_1 0xE00450
+
+#define mmTPC0_NRTR_SPLIT_WR_SAT 0xE00454
+
+#define mmTPC0_NRTR_WPLIT_WR_TST_TOLEN 0xE00458
+
+#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_0 0xE0045C
+
+#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_1 0xE00460
+
+#define mmTPC0_NRTR_HBW_RANGE_HIT 0xE00470
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_0 0xE00480
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_1 0xE00484
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_2 0xE00488
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_3 0xE0048C
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_4 0xE00490
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_5 0xE00494
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_6 0xE00498
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_L_7 0xE0049C
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_0 0xE004A0
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_1 0xE004A4
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_2 0xE004A8
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_3 0xE004AC
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_4 0xE004B0
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_5 0xE004B4
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_6 0xE004B8
+
+#define mmTPC0_NRTR_HBW_RANGE_MASK_H_7 0xE004BC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_0 0xE004C0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_1 0xE004C4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_2 0xE004C8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_3 0xE004CC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_4 0xE004D0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_5 0xE004D4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_6 0xE004D8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_L_7 0xE004DC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_0 0xE004E0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_1 0xE004E4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_2 0xE004E8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_3 0xE004EC
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_4 0xE004F0
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_5 0xE004F4
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_6 0xE004F8
+
+#define mmTPC0_NRTR_HBW_RANGE_BASE_H_7 0xE004FC
+
+#define mmTPC0_NRTR_LBW_RANGE_HIT 0xE00500
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_0 0xE00510
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_1 0xE00514
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_2 0xE00518
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_3 0xE0051C
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_4 0xE00520
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_5 0xE00524
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_6 0xE00528
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_7 0xE0052C
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_8 0xE00530
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_9 0xE00534
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_10 0xE00538
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_11 0xE0053C
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_12 0xE00540
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_13 0xE00544
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_14 0xE00548
+
+#define mmTPC0_NRTR_LBW_RANGE_MASK_15 0xE0054C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_0 0xE00550
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_1 0xE00554
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_2 0xE00558
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_3 0xE0055C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_4 0xE00560
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_5 0xE00564
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_6 0xE00568
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_7 0xE0056C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_8 0xE00570
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_9 0xE00574
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_10 0xE00578
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_11 0xE0057C
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_12 0xE00580
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_13 0xE00584
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_14 0xE00588
+
+#define mmTPC0_NRTR_LBW_RANGE_BASE_15 0xE0058C
+
+#define mmTPC0_NRTR_RGLTR 0xE00590
+
+#define mmTPC0_NRTR_RGLTR_WR_RESULT 0xE00594
+
+#define mmTPC0_NRTR_RGLTR_RD_RESULT 0xE00598
+
+#define mmTPC0_NRTR_SCRAMB_EN 0xE00600
+
+#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604
+
+#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
new file mode 100644
index 000000000000..80d97ee3d8d6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_MASKS_H_
+#define ASIC_REG_TPC0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* TPC0_QM_GLBL_CFG0 */
+#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0x1
+#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 1
+#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x2
+#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 2
+#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x4
+#define TPC0_QM_GLBL_CFG0_DMA_EN_SHIFT 3
+#define TPC0_QM_GLBL_CFG0_DMA_EN_MASK 0x8
+
+/* TPC0_QM_GLBL_CFG1 */
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0x1
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 1
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x2
+#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 2
+#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x4
+#define TPC0_QM_GLBL_CFG1_DMA_STOP_SHIFT 3
+#define TPC0_QM_GLBL_CFG1_DMA_STOP_MASK 0x8
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400
+#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11
+#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800
+
+/* TPC0_QM_GLBL_PROT */
+#define TPC0_QM_GLBL_PROT_PQF_PROT_SHIFT 0
+#define TPC0_QM_GLBL_PROT_PQF_PROT_MASK 0x1
+#define TPC0_QM_GLBL_PROT_CQF_PROT_SHIFT 1
+#define TPC0_QM_GLBL_PROT_CQF_PROT_MASK 0x2
+#define TPC0_QM_GLBL_PROT_CP_PROT_SHIFT 2
+#define TPC0_QM_GLBL_PROT_CP_PROT_MASK 0x4
+#define TPC0_QM_GLBL_PROT_DMA_PROT_SHIFT 3
+#define TPC0_QM_GLBL_PROT_DMA_PROT_MASK 0x8
+#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4
+#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10
+#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5
+#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20
+#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6
+#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40
+#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7
+#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80
+
+/* TPC0_QM_GLBL_ERR_CFG */
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10
+#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400
+#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11
+#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800
+
+/* TPC0_QM_GLBL_ERR_ADDR_LO */
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_ADDR_HI */
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_WDATA */
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_SECURE_PROPS */
+#define TPC0_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_NON_SECURE_PROPS */
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_STS0 */
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0x1
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 1
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x2
+#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 2
+#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x4
+#define TPC0_QM_GLBL_STS0_DMA_IDLE_SHIFT 3
+#define TPC0_QM_GLBL_STS0_DMA_IDLE_MASK 0x8
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40
+#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7
+#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80
+
+/* TPC0_QM_GLBL_STS1 */
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8
+#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100
+#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9
+#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200
+#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10
+#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400
+#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11
+#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800
+
+/* TPC0_QM_PQ_BASE_LO */
+#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_BASE_HI */
+#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_SIZE */
+#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0
+#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PI */
+#define TPC0_QM_PQ_PI_VAL_SHIFT 0
+#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CI */
+#define TPC0_QM_PQ_CI_VAL_SHIFT 0
+#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CFG0 */
+#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_PQ_CFG1 */
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_ARUSER */
+#define TPC0_QM_PQ_ARUSER_NOSNOOP_SHIFT 0
+#define TPC0_QM_PQ_ARUSER_NOSNOOP_MASK 0x1
+#define TPC0_QM_PQ_ARUSER_WORD_SHIFT 1
+#define TPC0_QM_PQ_ARUSER_WORD_MASK 0x2
+
+/* TPC0_QM_PQ_PUSH0 */
+#define TPC0_QM_PQ_PUSH0_PTR_LO_SHIFT 0
+#define TPC0_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PUSH1 */
+#define TPC0_QM_PQ_PUSH1_PTR_HI_SHIFT 0
+#define TPC0_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PUSH2 */
+#define TPC0_QM_PQ_PUSH2_TSIZE_SHIFT 0
+#define TPC0_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PUSH3 */
+#define TPC0_QM_PQ_PUSH3_RPT_SHIFT 0
+#define TPC0_QM_PQ_PUSH3_RPT_MASK 0xFFFF
+#define TPC0_QM_PQ_PUSH3_CTL_SHIFT 16
+#define TPC0_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_STS0 */
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_STS1 */
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_PQ_RD_RATE_LIM_EN */
+#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN */
+#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_QM_PQ_RD_RATE_LIM_SAT */
+#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_QM_PQ_RD_RATE_LIM_TOUT */
+#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* TPC0_QM_CQ_CFG0 */
+#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_CQ_CFG1 */
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_ARUSER */
+#define TPC0_QM_CQ_ARUSER_NOSNOOP_SHIFT 0
+#define TPC0_QM_CQ_ARUSER_NOSNOOP_MASK 0x1
+#define TPC0_QM_CQ_ARUSER_WORD_SHIFT 1
+#define TPC0_QM_CQ_ARUSER_WORD_MASK 0x2
+
+/* TPC0_QM_CQ_PTR_LO */
+#define TPC0_QM_CQ_PTR_LO_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI */
+#define TPC0_QM_CQ_PTR_HI_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE */
+#define TPC0_QM_CQ_TSIZE_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL */
+#define TPC0_QM_CQ_CTL_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_STS */
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_STS */
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_STS */
+#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_STS */
+#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_STS0 */
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_STS1 */
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_CQ_RD_RATE_LIM_EN */
+#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1
+
+/* TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN */
+#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF
+
+/* TPC0_QM_CQ_RD_RATE_LIM_SAT */
+#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF
+
+/* TPC0_QM_CQ_RD_RATE_LIM_TOUT */
+#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0
+#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF
+
+/* TPC0_QM_CQ_IFIFO_CNT */
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET */
+#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET */
+#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_COMMIT_OFFSET */
+#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_FENCE0_RDATA */
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE1_RDATA */
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE2_RDATA */
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE3_RDATA */
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE0_CNT */
+#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_FENCE1_CNT */
+#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_FENCE2_CNT */
+#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_FENCE3_CNT */
+#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0xFF
+
+/* TPC0_QM_CP_STS */
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CP_STS_ERDY_SHIFT 16
+#define TPC0_QM_CP_STS_ERDY_MASK 0x10000
+#define TPC0_QM_CP_STS_RRDY_SHIFT 17
+#define TPC0_QM_CP_STS_RRDY_MASK 0x20000
+#define TPC0_QM_CP_STS_MRDY_SHIFT 18
+#define TPC0_QM_CP_STS_MRDY_MASK 0x40000
+#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19
+#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* TPC0_QM_CP_CURRENT_INST_LO */
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_CURRENT_INST_HI */
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_BARRIER_CFG */
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+
+/* TPC0_QM_CP_DBG_0 */
+#define TPC0_QM_CP_DBG_0_VAL_SHIFT 0
+#define TPC0_QM_CP_DBG_0_VAL_MASK 0xFF
+
+/* TPC0_QM_PQ_BUF_ADDR */
+#define TPC0_QM_PQ_BUF_ADDR_VAL_SHIFT 0
+#define TPC0_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_BUF_RDATA */
+#define TPC0_QM_PQ_BUF_RDATA_VAL_SHIFT 0
+#define TPC0_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_BUF_ADDR */
+#define TPC0_QM_CQ_BUF_ADDR_VAL_SHIFT 0
+#define TPC0_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_BUF_RDATA */
+#define TPC0_QM_CQ_BUF_RDATA_VAL_SHIFT 0
+#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
new file mode 100644
index 000000000000..7552d4ba61fe
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_REGS_H_
+#define ASIC_REG_TPC0_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC0_QM_GLBL_CFG0 0xE08000
+
+#define mmTPC0_QM_GLBL_CFG1 0xE08004
+
+#define mmTPC0_QM_GLBL_PROT 0xE08008
+
+#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08010
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08014
+
+#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08018
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS 0xE0801C
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS 0xE08020
+
+#define mmTPC0_QM_GLBL_STS0 0xE08024
+
+#define mmTPC0_QM_GLBL_STS1 0xE08028
+
+#define mmTPC0_QM_PQ_BASE_LO 0xE08060
+
+#define mmTPC0_QM_PQ_BASE_HI 0xE08064
+
+#define mmTPC0_QM_PQ_SIZE 0xE08068
+
+#define mmTPC0_QM_PQ_PI 0xE0806C
+
+#define mmTPC0_QM_PQ_CI 0xE08070
+
+#define mmTPC0_QM_PQ_CFG0 0xE08074
+
+#define mmTPC0_QM_PQ_CFG1 0xE08078
+
+#define mmTPC0_QM_PQ_ARUSER 0xE0807C
+
+#define mmTPC0_QM_PQ_PUSH0 0xE08080
+
+#define mmTPC0_QM_PQ_PUSH1 0xE08084
+
+#define mmTPC0_QM_PQ_PUSH2 0xE08088
+
+#define mmTPC0_QM_PQ_PUSH3 0xE0808C
+
+#define mmTPC0_QM_PQ_STS0 0xE08090
+
+#define mmTPC0_QM_PQ_STS1 0xE08094
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_EN 0xE080A0
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE080A4
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_SAT 0xE080A8
+
+#define mmTPC0_QM_PQ_RD_RATE_LIM_TOUT 0xE080AC
+
+#define mmTPC0_QM_CQ_CFG0 0xE080B0
+
+#define mmTPC0_QM_CQ_CFG1 0xE080B4
+
+#define mmTPC0_QM_CQ_ARUSER 0xE080B8
+
+#define mmTPC0_QM_CQ_PTR_LO 0xE080C0
+
+#define mmTPC0_QM_CQ_PTR_HI 0xE080C4
+
+#define mmTPC0_QM_CQ_TSIZE 0xE080C8
+
+#define mmTPC0_QM_CQ_CTL 0xE080CC
+
+#define mmTPC0_QM_CQ_PTR_LO_STS 0xE080D4
+
+#define mmTPC0_QM_CQ_PTR_HI_STS 0xE080D8
+
+#define mmTPC0_QM_CQ_TSIZE_STS 0xE080DC
+
+#define mmTPC0_QM_CQ_CTL_STS 0xE080E0
+
+#define mmTPC0_QM_CQ_STS0 0xE080E4
+
+#define mmTPC0_QM_CQ_STS1 0xE080E8
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_EN 0xE080F0
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE080F4
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_SAT 0xE080F8
+
+#define mmTPC0_QM_CQ_RD_RATE_LIM_TOUT 0xE080FC
+
+#define mmTPC0_QM_CQ_IFIFO_CNT 0xE08108
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO 0xE08120
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI 0xE08124
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO 0xE08128
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI 0xE0812C
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO 0xE08130
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI 0xE08134
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO 0xE08138
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI 0xE0813C
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET 0xE08140
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE08144
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE08148
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE0814C
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE08150
+
+#define mmTPC0_QM_CP_LDMA_COMMIT_OFFSET 0xE08154
+
+#define mmTPC0_QM_CP_FENCE0_RDATA 0xE08158
+
+#define mmTPC0_QM_CP_FENCE1_RDATA 0xE0815C
+
+#define mmTPC0_QM_CP_FENCE2_RDATA 0xE08160
+
+#define mmTPC0_QM_CP_FENCE3_RDATA 0xE08164
+
+#define mmTPC0_QM_CP_FENCE0_CNT 0xE08168
+
+#define mmTPC0_QM_CP_FENCE1_CNT 0xE0816C
+
+#define mmTPC0_QM_CP_FENCE2_CNT 0xE08170
+
+#define mmTPC0_QM_CP_FENCE3_CNT 0xE08174
+
+#define mmTPC0_QM_CP_STS 0xE08178
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO 0xE0817C
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI 0xE08180
+
+#define mmTPC0_QM_CP_BARRIER_CFG 0xE08184
+
+#define mmTPC0_QM_CP_DBG_0 0xE08188
+
+#define mmTPC0_QM_PQ_BUF_ADDR 0xE08300
+
+#define mmTPC0_QM_PQ_BUF_RDATA 0xE08304
+
+#define mmTPC0_QM_CQ_BUF_ADDR 0xE08308
+
+#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C
+
+#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
new file mode 100644
index 000000000000..19894413474a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_CFG_REGS_H_
+#define ASIC_REG_TPC1_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE46418
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE4641C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE46420
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE46424
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46428
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE4642C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE46430
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46434
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE46438
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE4643C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46440
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46444
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE46448
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE4644C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE46450
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46454
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46458
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE4645C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE46460
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE46464
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46468
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE4646C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE46470
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46474
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE46478
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE4647C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46480
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46484
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE46488
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE4648C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE46490
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE46494
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46498
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE4649C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE464A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE464A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE464A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE464AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE464B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE464B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE464B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE464BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE464C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE464C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE464C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE464CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE464D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE464D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE464E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE464FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE46500
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE46504
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE46508
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE4650C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE46510
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE46514
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE46518
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE4651C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE46520
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE46524
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE46528
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE4652C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE46530
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE46534
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE46538
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE4653C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE46540
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE46544
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE46548
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE4654C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE46550
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE46554
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46558
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE4655C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE46560
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46564
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE46568
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE4656C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46570
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46574
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE46578
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE4657C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE46580
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46584
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46588
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE4658C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE46590
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE46594
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46598
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE4659C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE465A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE465A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE465A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE465AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE465B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE465B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE465B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE465BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE465C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE465C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE465C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE465CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE465D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE465D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE465D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE465DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE465E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE465E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE465E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE465EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE465F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE465F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE465F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE465FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE46600
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE46604
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46608
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE4660C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE46610
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46614
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE46618
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE4661C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46620
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46624
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE46628
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE4662C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE46630
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE46634
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE46638
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE4663C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE46640
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE46644
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE46648
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE4664C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE46650
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE46654
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE46658
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE4665C
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46660
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE46664
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46668
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE4666C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46670
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE46674
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE46678
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE4667C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE46680
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE46684
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE46688
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE4668C
+
+#define mmTPC1_CFG_KERNEL_SRF_0 0xE46690
+
+#define mmTPC1_CFG_KERNEL_SRF_1 0xE46694
+
+#define mmTPC1_CFG_KERNEL_SRF_2 0xE46698
+
+#define mmTPC1_CFG_KERNEL_SRF_3 0xE4669C
+
+#define mmTPC1_CFG_KERNEL_SRF_4 0xE466A0
+
+#define mmTPC1_CFG_KERNEL_SRF_5 0xE466A4
+
+#define mmTPC1_CFG_KERNEL_SRF_6 0xE466A8
+
+#define mmTPC1_CFG_KERNEL_SRF_7 0xE466AC
+
+#define mmTPC1_CFG_KERNEL_SRF_8 0xE466B0
+
+#define mmTPC1_CFG_KERNEL_SRF_9 0xE466B4
+
+#define mmTPC1_CFG_KERNEL_SRF_10 0xE466B8
+
+#define mmTPC1_CFG_KERNEL_SRF_11 0xE466BC
+
+#define mmTPC1_CFG_KERNEL_SRF_12 0xE466C0
+
+#define mmTPC1_CFG_KERNEL_SRF_13 0xE466C4
+
+#define mmTPC1_CFG_KERNEL_SRF_14 0xE466C8
+
+#define mmTPC1_CFG_KERNEL_SRF_15 0xE466CC
+
+#define mmTPC1_CFG_KERNEL_SRF_16 0xE466D0
+
+#define mmTPC1_CFG_KERNEL_SRF_17 0xE466D4
+
+#define mmTPC1_CFG_KERNEL_SRF_18 0xE466D8
+
+#define mmTPC1_CFG_KERNEL_SRF_19 0xE466DC
+
+#define mmTPC1_CFG_KERNEL_SRF_20 0xE466E0
+
+#define mmTPC1_CFG_KERNEL_SRF_21 0xE466E4
+
+#define mmTPC1_CFG_KERNEL_SRF_22 0xE466E8
+
+#define mmTPC1_CFG_KERNEL_SRF_23 0xE466EC
+
+#define mmTPC1_CFG_KERNEL_SRF_24 0xE466F0
+
+#define mmTPC1_CFG_KERNEL_SRF_25 0xE466F4
+
+#define mmTPC1_CFG_KERNEL_SRF_26 0xE466F8
+
+#define mmTPC1_CFG_KERNEL_SRF_27 0xE466FC
+
+#define mmTPC1_CFG_KERNEL_SRF_28 0xE46700
+
+#define mmTPC1_CFG_KERNEL_SRF_29 0xE46704
+
+#define mmTPC1_CFG_KERNEL_SRF_30 0xE46708
+
+#define mmTPC1_CFG_KERNEL_SRF_31 0xE4670C
+
+#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE46710
+
+#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46714
+
+#define mmTPC1_CFG_RESERVED_DESC_END 0xE46738
+
+#define mmTPC1_CFG_ROUND_CSR 0xE467FC
+
+#define mmTPC1_CFG_TBUF_BASE_ADDR_LOW 0xE46800
+
+#define mmTPC1_CFG_TBUF_BASE_ADDR_HIGH 0xE46804
+
+#define mmTPC1_CFG_SEMAPHORE 0xE46808
+
+#define mmTPC1_CFG_VFLAGS 0xE4680C
+
+#define mmTPC1_CFG_SFLAGS 0xE46810
+
+#define mmTPC1_CFG_LFSR_POLYNOM 0xE46818
+
+#define mmTPC1_CFG_STATUS 0xE4681C
+
+#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46820
+
+#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46824
+
+#define mmTPC1_CFG_SM_BASE_ADDRESS_LOW 0xE46828
+
+#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4682C
+
+#define mmTPC1_CFG_TPC_CMD 0xE46830
+
+#define mmTPC1_CFG_TPC_EXECUTE 0xE46838
+
+#define mmTPC1_CFG_TPC_STALL 0xE4683C
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46840
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46844
+
+#define mmTPC1_CFG_MSS_CONFIG 0xE46854
+
+#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46858
+
+#define mmTPC1_CFG_TPC_INTR_MASK 0xE4685C
+
+#define mmTPC1_CFG_TSB_CONFIG 0xE46860
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04
+
+#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08
+
+#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE46A18
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A1C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A20
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE46A24
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A28
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A2C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE46A30
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A34
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A38
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE46A3C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A40
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A44
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE46A48
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A4C
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A50
+
+#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A54
+
+#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A58
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A5C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A60
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE46A64
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A68
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A6C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE46A70
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A74
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A78
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE46A7C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A80
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A84
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE46A88
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A8C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A90
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE46A94
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A98
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A9C
+
+#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46AA0
+
+#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46AA4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46AA8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46AAC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE46AB0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46AB4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46AB8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE46ABC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46AC0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46AC4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE46AC8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46ACC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46AD0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE46AD4
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AD8
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46ADC
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE46AE0
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AE4
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AE8
+
+#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AEC
+
+#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AF0
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AF4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46AF8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE46AFC
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46B00
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46B04
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE46B08
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46B0C
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46B10
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE46B14
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46B18
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46B1C
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE46B20
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46B24
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46B28
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE46B2C
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46B30
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46B34
+
+#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46B38
+
+#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46B3C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46B40
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46B44
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE46B48
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46B4C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46B50
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE46B54
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B58
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B5C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE46B60
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B64
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B68
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE46B6C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B70
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B74
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE46B78
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B7C
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B80
+
+#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B84
+
+#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B88
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B8C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B90
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE46B94
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B98
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B9C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE46BA0
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46BA4
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46BA8
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE46BAC
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46BB0
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46BB4
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE46BB8
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46BBC
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46BC0
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE46BC4
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46BC8
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46BCC
+
+#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46BD0
+
+#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46BD4
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46BD8
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46BDC
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE46BE0
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46BE4
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46BE8
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE46BEC
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46BF0
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46BF4
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE46BF8
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46BFC
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46C00
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE46C04
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46C08
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46C0C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE46C10
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46C14
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46C18
+
+#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46C1C
+
+#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46C20
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46C24
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46C28
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE46C2C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46C30
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46C34
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE46C38
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46C3C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46C40
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE46C44
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46C48
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46C4C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE46C50
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46C54
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46C58
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE46C5C
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46C60
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46C64
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46C68
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46C6C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46C70
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46C74
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46C78
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46C7C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46C80
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46C84
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46C88
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46C8C
+
+#define mmTPC1_CFG_QM_SRF_0 0xE46C90
+
+#define mmTPC1_CFG_QM_SRF_1 0xE46C94
+
+#define mmTPC1_CFG_QM_SRF_2 0xE46C98
+
+#define mmTPC1_CFG_QM_SRF_3 0xE46C9C
+
+#define mmTPC1_CFG_QM_SRF_4 0xE46CA0
+
+#define mmTPC1_CFG_QM_SRF_5 0xE46CA4
+
+#define mmTPC1_CFG_QM_SRF_6 0xE46CA8
+
+#define mmTPC1_CFG_QM_SRF_7 0xE46CAC
+
+#define mmTPC1_CFG_QM_SRF_8 0xE46CB0
+
+#define mmTPC1_CFG_QM_SRF_9 0xE46CB4
+
+#define mmTPC1_CFG_QM_SRF_10 0xE46CB8
+
+#define mmTPC1_CFG_QM_SRF_11 0xE46CBC
+
+#define mmTPC1_CFG_QM_SRF_12 0xE46CC0
+
+#define mmTPC1_CFG_QM_SRF_13 0xE46CC4
+
+#define mmTPC1_CFG_QM_SRF_14 0xE46CC8
+
+#define mmTPC1_CFG_QM_SRF_15 0xE46CCC
+
+#define mmTPC1_CFG_QM_SRF_16 0xE46CD0
+
+#define mmTPC1_CFG_QM_SRF_17 0xE46CD4
+
+#define mmTPC1_CFG_QM_SRF_18 0xE46CD8
+
+#define mmTPC1_CFG_QM_SRF_19 0xE46CDC
+
+#define mmTPC1_CFG_QM_SRF_20 0xE46CE0
+
+#define mmTPC1_CFG_QM_SRF_21 0xE46CE4
+
+#define mmTPC1_CFG_QM_SRF_22 0xE46CE8
+
+#define mmTPC1_CFG_QM_SRF_23 0xE46CEC
+
+#define mmTPC1_CFG_QM_SRF_24 0xE46CF0
+
+#define mmTPC1_CFG_QM_SRF_25 0xE46CF4
+
+#define mmTPC1_CFG_QM_SRF_26 0xE46CF8
+
+#define mmTPC1_CFG_QM_SRF_27 0xE46CFC
+
+#define mmTPC1_CFG_QM_SRF_28 0xE46D00
+
+#define mmTPC1_CFG_QM_SRF_29 0xE46D04
+
+#define mmTPC1_CFG_QM_SRF_30 0xE46D08
+
+#define mmTPC1_CFG_QM_SRF_31 0xE46D0C
+
+#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46D10
+
+#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D14
+
+#define mmTPC1_CFG_ARUSER 0xE46D18
+
+#define mmTPC1_CFG_AWUSER 0xE46D1C
+
+#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE46E00
+
+#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE46E04
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE46E08
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE46E0C
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE46E10
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE46E14
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE46E18
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE46E1C
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE46E20
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE46E24
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE46E28
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C
+
+#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
new file mode 100644
index 000000000000..9099ebd7ab23
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_CMDQ_REGS_H_
+#define ASIC_REG_TPC1_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC1_CMDQ_GLBL_CFG0 0xE49000
+
+#define mmTPC1_CMDQ_GLBL_CFG1 0xE49004
+
+#define mmTPC1_CMDQ_GLBL_PROT 0xE49008
+
+#define mmTPC1_CMDQ_GLBL_ERR_CFG 0xE4900C
+
+#define mmTPC1_CMDQ_GLBL_ERR_ADDR_LO 0xE49010
+
+#define mmTPC1_CMDQ_GLBL_ERR_ADDR_HI 0xE49014
+
+#define mmTPC1_CMDQ_GLBL_ERR_WDATA 0xE49018
+
+#define mmTPC1_CMDQ_GLBL_SECURE_PROPS 0xE4901C
+
+#define mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS 0xE49020
+
+#define mmTPC1_CMDQ_GLBL_STS0 0xE49024
+
+#define mmTPC1_CMDQ_GLBL_STS1 0xE49028
+
+#define mmTPC1_CMDQ_CQ_CFG0 0xE490B0
+
+#define mmTPC1_CMDQ_CQ_CFG1 0xE490B4
+
+#define mmTPC1_CMDQ_CQ_ARUSER 0xE490B8
+
+#define mmTPC1_CMDQ_CQ_PTR_LO 0xE490C0
+
+#define mmTPC1_CMDQ_CQ_PTR_HI 0xE490C4
+
+#define mmTPC1_CMDQ_CQ_TSIZE 0xE490C8
+
+#define mmTPC1_CMDQ_CQ_CTL 0xE490CC
+
+#define mmTPC1_CMDQ_CQ_PTR_LO_STS 0xE490D4
+
+#define mmTPC1_CMDQ_CQ_PTR_HI_STS 0xE490D8
+
+#define mmTPC1_CMDQ_CQ_TSIZE_STS 0xE490DC
+
+#define mmTPC1_CMDQ_CQ_CTL_STS 0xE490E0
+
+#define mmTPC1_CMDQ_CQ_STS0 0xE490E4
+
+#define mmTPC1_CMDQ_CQ_STS1 0xE490E8
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN 0xE490F0
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE490F4
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT 0xE490F8
+
+#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE490FC
+
+#define mmTPC1_CMDQ_CQ_IFIFO_CNT 0xE49108
+
+#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE49120
+
+#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE49124
+
+#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE49128
+
+#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE4912C
+
+#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE49130
+
+#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE49134
+
+#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE49138
+
+#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE4913C
+
+#define mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE49140
+
+#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE49144
+
+#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE49148
+
+#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE4914C
+
+#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE49150
+
+#define mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE49154
+
+#define mmTPC1_CMDQ_CP_FENCE0_RDATA 0xE49158
+
+#define mmTPC1_CMDQ_CP_FENCE1_RDATA 0xE4915C
+
+#define mmTPC1_CMDQ_CP_FENCE2_RDATA 0xE49160
+
+#define mmTPC1_CMDQ_CP_FENCE3_RDATA 0xE49164
+
+#define mmTPC1_CMDQ_CP_FENCE0_CNT 0xE49168
+
+#define mmTPC1_CMDQ_CP_FENCE1_CNT 0xE4916C
+
+#define mmTPC1_CMDQ_CP_FENCE2_CNT 0xE49170
+
+#define mmTPC1_CMDQ_CP_FENCE3_CNT 0xE49174
+
+#define mmTPC1_CMDQ_CP_STS 0xE49178
+
+#define mmTPC1_CMDQ_CP_CURRENT_INST_LO 0xE4917C
+
+#define mmTPC1_CMDQ_CP_CURRENT_INST_HI 0xE49180
+
+#define mmTPC1_CMDQ_CP_BARRIER_CFG 0xE49184
+
+#define mmTPC1_CMDQ_CP_DBG_0 0xE49188
+
+#define mmTPC1_CMDQ_CQ_BUF_ADDR 0xE49308
+
+#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C
+
+#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
new file mode 100644
index 000000000000..bc8b9a10391f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_QM_REGS_H_
+#define ASIC_REG_TPC1_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC1_QM_GLBL_CFG0 0xE48000
+
+#define mmTPC1_QM_GLBL_CFG1 0xE48004
+
+#define mmTPC1_QM_GLBL_PROT 0xE48008
+
+#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48010
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48014
+
+#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48018
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS 0xE4801C
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS 0xE48020
+
+#define mmTPC1_QM_GLBL_STS0 0xE48024
+
+#define mmTPC1_QM_GLBL_STS1 0xE48028
+
+#define mmTPC1_QM_PQ_BASE_LO 0xE48060
+
+#define mmTPC1_QM_PQ_BASE_HI 0xE48064
+
+#define mmTPC1_QM_PQ_SIZE 0xE48068
+
+#define mmTPC1_QM_PQ_PI 0xE4806C
+
+#define mmTPC1_QM_PQ_CI 0xE48070
+
+#define mmTPC1_QM_PQ_CFG0 0xE48074
+
+#define mmTPC1_QM_PQ_CFG1 0xE48078
+
+#define mmTPC1_QM_PQ_ARUSER 0xE4807C
+
+#define mmTPC1_QM_PQ_PUSH0 0xE48080
+
+#define mmTPC1_QM_PQ_PUSH1 0xE48084
+
+#define mmTPC1_QM_PQ_PUSH2 0xE48088
+
+#define mmTPC1_QM_PQ_PUSH3 0xE4808C
+
+#define mmTPC1_QM_PQ_STS0 0xE48090
+
+#define mmTPC1_QM_PQ_STS1 0xE48094
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_EN 0xE480A0
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE480A4
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_SAT 0xE480A8
+
+#define mmTPC1_QM_PQ_RD_RATE_LIM_TOUT 0xE480AC
+
+#define mmTPC1_QM_CQ_CFG0 0xE480B0
+
+#define mmTPC1_QM_CQ_CFG1 0xE480B4
+
+#define mmTPC1_QM_CQ_ARUSER 0xE480B8
+
+#define mmTPC1_QM_CQ_PTR_LO 0xE480C0
+
+#define mmTPC1_QM_CQ_PTR_HI 0xE480C4
+
+#define mmTPC1_QM_CQ_TSIZE 0xE480C8
+
+#define mmTPC1_QM_CQ_CTL 0xE480CC
+
+#define mmTPC1_QM_CQ_PTR_LO_STS 0xE480D4
+
+#define mmTPC1_QM_CQ_PTR_HI_STS 0xE480D8
+
+#define mmTPC1_QM_CQ_TSIZE_STS 0xE480DC
+
+#define mmTPC1_QM_CQ_CTL_STS 0xE480E0
+
+#define mmTPC1_QM_CQ_STS0 0xE480E4
+
+#define mmTPC1_QM_CQ_STS1 0xE480E8
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_EN 0xE480F0
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE480F4
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_SAT 0xE480F8
+
+#define mmTPC1_QM_CQ_RD_RATE_LIM_TOUT 0xE480FC
+
+#define mmTPC1_QM_CQ_IFIFO_CNT 0xE48108
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO 0xE48120
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI 0xE48124
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO 0xE48128
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI 0xE4812C
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO 0xE48130
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI 0xE48134
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO 0xE48138
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI 0xE4813C
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET 0xE48140
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE48144
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE48148
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE4814C
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE48150
+
+#define mmTPC1_QM_CP_LDMA_COMMIT_OFFSET 0xE48154
+
+#define mmTPC1_QM_CP_FENCE0_RDATA 0xE48158
+
+#define mmTPC1_QM_CP_FENCE1_RDATA 0xE4815C
+
+#define mmTPC1_QM_CP_FENCE2_RDATA 0xE48160
+
+#define mmTPC1_QM_CP_FENCE3_RDATA 0xE48164
+
+#define mmTPC1_QM_CP_FENCE0_CNT 0xE48168
+
+#define mmTPC1_QM_CP_FENCE1_CNT 0xE4816C
+
+#define mmTPC1_QM_CP_FENCE2_CNT 0xE48170
+
+#define mmTPC1_QM_CP_FENCE3_CNT 0xE48174
+
+#define mmTPC1_QM_CP_STS 0xE48178
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO 0xE4817C
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI 0xE48180
+
+#define mmTPC1_QM_CP_BARRIER_CFG 0xE48184
+
+#define mmTPC1_QM_CP_DBG_0 0xE48188
+
+#define mmTPC1_QM_PQ_BUF_ADDR 0xE48300
+
+#define mmTPC1_QM_PQ_BUF_RDATA 0xE48304
+
+#define mmTPC1_QM_CQ_BUF_ADDR 0xE48308
+
+#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C
+
+#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
new file mode 100644
index 000000000000..ae267f8f457e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_RTR_REGS_H_
+#define ASIC_REG_TPC1_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC1_RTR_HBW_RD_RQ_E_ARB 0xE40100
+
+#define mmTPC1_RTR_HBW_RD_RQ_W_ARB 0xE40104
+
+#define mmTPC1_RTR_HBW_RD_RQ_N_ARB 0xE40108
+
+#define mmTPC1_RTR_HBW_RD_RQ_S_ARB 0xE4010C
+
+#define mmTPC1_RTR_HBW_RD_RQ_L_ARB 0xE40110
+
+#define mmTPC1_RTR_HBW_E_ARB_MAX 0xE40120
+
+#define mmTPC1_RTR_HBW_W_ARB_MAX 0xE40124
+
+#define mmTPC1_RTR_HBW_N_ARB_MAX 0xE40128
+
+#define mmTPC1_RTR_HBW_S_ARB_MAX 0xE4012C
+
+#define mmTPC1_RTR_HBW_L_ARB_MAX 0xE40130
+
+#define mmTPC1_RTR_HBW_RD_RS_E_ARB 0xE40140
+
+#define mmTPC1_RTR_HBW_RD_RS_W_ARB 0xE40144
+
+#define mmTPC1_RTR_HBW_RD_RS_N_ARB 0xE40148
+
+#define mmTPC1_RTR_HBW_RD_RS_S_ARB 0xE4014C
+
+#define mmTPC1_RTR_HBW_RD_RS_L_ARB 0xE40150
+
+#define mmTPC1_RTR_HBW_WR_RQ_E_ARB 0xE40170
+
+#define mmTPC1_RTR_HBW_WR_RQ_W_ARB 0xE40174
+
+#define mmTPC1_RTR_HBW_WR_RQ_N_ARB 0xE40178
+
+#define mmTPC1_RTR_HBW_WR_RQ_S_ARB 0xE4017C
+
+#define mmTPC1_RTR_HBW_WR_RQ_L_ARB 0xE40180
+
+#define mmTPC1_RTR_HBW_WR_RS_E_ARB 0xE40190
+
+#define mmTPC1_RTR_HBW_WR_RS_W_ARB 0xE40194
+
+#define mmTPC1_RTR_HBW_WR_RS_N_ARB 0xE40198
+
+#define mmTPC1_RTR_HBW_WR_RS_S_ARB 0xE4019C
+
+#define mmTPC1_RTR_HBW_WR_RS_L_ARB 0xE401A0
+
+#define mmTPC1_RTR_LBW_RD_RQ_E_ARB 0xE40200
+
+#define mmTPC1_RTR_LBW_RD_RQ_W_ARB 0xE40204
+
+#define mmTPC1_RTR_LBW_RD_RQ_N_ARB 0xE40208
+
+#define mmTPC1_RTR_LBW_RD_RQ_S_ARB 0xE4020C
+
+#define mmTPC1_RTR_LBW_RD_RQ_L_ARB 0xE40210
+
+#define mmTPC1_RTR_LBW_E_ARB_MAX 0xE40220
+
+#define mmTPC1_RTR_LBW_W_ARB_MAX 0xE40224
+
+#define mmTPC1_RTR_LBW_N_ARB_MAX 0xE40228
+
+#define mmTPC1_RTR_LBW_S_ARB_MAX 0xE4022C
+
+#define mmTPC1_RTR_LBW_L_ARB_MAX 0xE40230
+
+#define mmTPC1_RTR_LBW_RD_RS_E_ARB 0xE40250
+
+#define mmTPC1_RTR_LBW_RD_RS_W_ARB 0xE40254
+
+#define mmTPC1_RTR_LBW_RD_RS_N_ARB 0xE40258
+
+#define mmTPC1_RTR_LBW_RD_RS_S_ARB 0xE4025C
+
+#define mmTPC1_RTR_LBW_RD_RS_L_ARB 0xE40260
+
+#define mmTPC1_RTR_LBW_WR_RQ_E_ARB 0xE40270
+
+#define mmTPC1_RTR_LBW_WR_RQ_W_ARB 0xE40274
+
+#define mmTPC1_RTR_LBW_WR_RQ_N_ARB 0xE40278
+
+#define mmTPC1_RTR_LBW_WR_RQ_S_ARB 0xE4027C
+
+#define mmTPC1_RTR_LBW_WR_RQ_L_ARB 0xE40280
+
+#define mmTPC1_RTR_LBW_WR_RS_E_ARB 0xE40290
+
+#define mmTPC1_RTR_LBW_WR_RS_W_ARB 0xE40294
+
+#define mmTPC1_RTR_LBW_WR_RS_N_ARB 0xE40298
+
+#define mmTPC1_RTR_LBW_WR_RS_S_ARB 0xE4029C
+
+#define mmTPC1_RTR_LBW_WR_RS_L_ARB 0xE402A0
+
+#define mmTPC1_RTR_DBG_E_ARB 0xE40300
+
+#define mmTPC1_RTR_DBG_W_ARB 0xE40304
+
+#define mmTPC1_RTR_DBG_N_ARB 0xE40308
+
+#define mmTPC1_RTR_DBG_S_ARB 0xE4030C
+
+#define mmTPC1_RTR_DBG_L_ARB 0xE40310
+
+#define mmTPC1_RTR_DBG_E_ARB_MAX 0xE40320
+
+#define mmTPC1_RTR_DBG_W_ARB_MAX 0xE40324
+
+#define mmTPC1_RTR_DBG_N_ARB_MAX 0xE40328
+
+#define mmTPC1_RTR_DBG_S_ARB_MAX 0xE4032C
+
+#define mmTPC1_RTR_DBG_L_ARB_MAX 0xE40330
+
+#define mmTPC1_RTR_SPLIT_COEF_0 0xE40400
+
+#define mmTPC1_RTR_SPLIT_COEF_1 0xE40404
+
+#define mmTPC1_RTR_SPLIT_COEF_2 0xE40408
+
+#define mmTPC1_RTR_SPLIT_COEF_3 0xE4040C
+
+#define mmTPC1_RTR_SPLIT_COEF_4 0xE40410
+
+#define mmTPC1_RTR_SPLIT_COEF_5 0xE40414
+
+#define mmTPC1_RTR_SPLIT_COEF_6 0xE40418
+
+#define mmTPC1_RTR_SPLIT_COEF_7 0xE4041C
+
+#define mmTPC1_RTR_SPLIT_COEF_8 0xE40420
+
+#define mmTPC1_RTR_SPLIT_COEF_9 0xE40424
+
+#define mmTPC1_RTR_SPLIT_CFG 0xE40440
+
+#define mmTPC1_RTR_SPLIT_RD_SAT 0xE40444
+
+#define mmTPC1_RTR_SPLIT_RD_RST_TOKEN 0xE40448
+
+#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_0 0xE4044C
+
+#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_1 0xE40450
+
+#define mmTPC1_RTR_SPLIT_WR_SAT 0xE40454
+
+#define mmTPC1_RTR_WPLIT_WR_TST_TOLEN 0xE40458
+
+#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_0 0xE4045C
+
+#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_1 0xE40460
+
+#define mmTPC1_RTR_HBW_RANGE_HIT 0xE40470
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_0 0xE40480
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_1 0xE40484
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_2 0xE40488
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_3 0xE4048C
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_4 0xE40490
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_5 0xE40494
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_6 0xE40498
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_L_7 0xE4049C
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_0 0xE404A0
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_1 0xE404A4
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_2 0xE404A8
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_3 0xE404AC
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_4 0xE404B0
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_5 0xE404B4
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_6 0xE404B8
+
+#define mmTPC1_RTR_HBW_RANGE_MASK_H_7 0xE404BC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_0 0xE404C0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_1 0xE404C4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_2 0xE404C8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_3 0xE404CC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_4 0xE404D0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_5 0xE404D4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_6 0xE404D8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_L_7 0xE404DC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_0 0xE404E0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_1 0xE404E4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_2 0xE404E8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_3 0xE404EC
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_4 0xE404F0
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_5 0xE404F4
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_6 0xE404F8
+
+#define mmTPC1_RTR_HBW_RANGE_BASE_H_7 0xE404FC
+
+#define mmTPC1_RTR_LBW_RANGE_HIT 0xE40500
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_0 0xE40510
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_1 0xE40514
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_2 0xE40518
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_3 0xE4051C
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_4 0xE40520
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_5 0xE40524
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_6 0xE40528
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_7 0xE4052C
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_8 0xE40530
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_9 0xE40534
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_10 0xE40538
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_11 0xE4053C
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_12 0xE40540
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_13 0xE40544
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_14 0xE40548
+
+#define mmTPC1_RTR_LBW_RANGE_MASK_15 0xE4054C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_0 0xE40550
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_1 0xE40554
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_2 0xE40558
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_3 0xE4055C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_4 0xE40560
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_5 0xE40564
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_6 0xE40568
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_7 0xE4056C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_8 0xE40570
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_9 0xE40574
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_10 0xE40578
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_11 0xE4057C
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_12 0xE40580
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_13 0xE40584
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_14 0xE40588
+
+#define mmTPC1_RTR_LBW_RANGE_BASE_15 0xE4058C
+
+#define mmTPC1_RTR_RGLTR 0xE40590
+
+#define mmTPC1_RTR_RGLTR_WR_RESULT 0xE40594
+
+#define mmTPC1_RTR_RGLTR_RD_RESULT 0xE40598
+
+#define mmTPC1_RTR_SCRAMB_EN 0xE40600
+
+#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604
+
+#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
new file mode 100644
index 000000000000..9c33fc039036
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_CFG_REGS_H_
+#define ASIC_REG_TPC2_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE86418
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE8641C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE86420
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE86424
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86428
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE8642C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE86430
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86434
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE86438
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE8643C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86440
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86444
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE86448
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE8644C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE86450
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86454
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86458
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE8645C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE86460
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE86464
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86468
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE8646C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE86470
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86474
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE86478
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE8647C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86480
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86484
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE86488
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE8648C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE86490
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE86494
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86498
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE8649C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE864A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE864A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE864A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE864AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE864B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE864B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE864B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE864BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE864C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE864C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE864C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE864CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE864D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE864D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE864E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE864FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE86500
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE86504
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE86508
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE8650C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE86510
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE86514
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE86518
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE8651C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE86520
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE86524
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE86528
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE8652C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE86530
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE86534
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE86538
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE8653C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE86540
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE86544
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE86548
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE8654C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE86550
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE86554
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86558
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE8655C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE86560
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86564
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE86568
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE8656C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86570
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86574
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE86578
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE8657C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE86580
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86584
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86588
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE8658C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE86590
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE86594
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86598
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE8659C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE865A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE865A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE865A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE865AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE865B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE865B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE865B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE865BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE865C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE865C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE865C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE865CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE865D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE865D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE865D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE865DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE865E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE865E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE865E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE865EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE865F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE865F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE865F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE865FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE86600
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE86604
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86608
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE8660C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE86610
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86614
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE86618
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE8661C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86620
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86624
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE86628
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE8662C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE86630
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE86634
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE86638
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE8663C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE86640
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE86644
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE86648
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE8664C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE86650
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE86654
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE86658
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE8665C
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86660
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE86664
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86668
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE8666C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86670
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE86674
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE86678
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE8667C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE86680
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE86684
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE86688
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE8668C
+
+#define mmTPC2_CFG_KERNEL_SRF_0 0xE86690
+
+#define mmTPC2_CFG_KERNEL_SRF_1 0xE86694
+
+#define mmTPC2_CFG_KERNEL_SRF_2 0xE86698
+
+#define mmTPC2_CFG_KERNEL_SRF_3 0xE8669C
+
+#define mmTPC2_CFG_KERNEL_SRF_4 0xE866A0
+
+#define mmTPC2_CFG_KERNEL_SRF_5 0xE866A4
+
+#define mmTPC2_CFG_KERNEL_SRF_6 0xE866A8
+
+#define mmTPC2_CFG_KERNEL_SRF_7 0xE866AC
+
+#define mmTPC2_CFG_KERNEL_SRF_8 0xE866B0
+
+#define mmTPC2_CFG_KERNEL_SRF_9 0xE866B4
+
+#define mmTPC2_CFG_KERNEL_SRF_10 0xE866B8
+
+#define mmTPC2_CFG_KERNEL_SRF_11 0xE866BC
+
+#define mmTPC2_CFG_KERNEL_SRF_12 0xE866C0
+
+#define mmTPC2_CFG_KERNEL_SRF_13 0xE866C4
+
+#define mmTPC2_CFG_KERNEL_SRF_14 0xE866C8
+
+#define mmTPC2_CFG_KERNEL_SRF_15 0xE866CC
+
+#define mmTPC2_CFG_KERNEL_SRF_16 0xE866D0
+
+#define mmTPC2_CFG_KERNEL_SRF_17 0xE866D4
+
+#define mmTPC2_CFG_KERNEL_SRF_18 0xE866D8
+
+#define mmTPC2_CFG_KERNEL_SRF_19 0xE866DC
+
+#define mmTPC2_CFG_KERNEL_SRF_20 0xE866E0
+
+#define mmTPC2_CFG_KERNEL_SRF_21 0xE866E4
+
+#define mmTPC2_CFG_KERNEL_SRF_22 0xE866E8
+
+#define mmTPC2_CFG_KERNEL_SRF_23 0xE866EC
+
+#define mmTPC2_CFG_KERNEL_SRF_24 0xE866F0
+
+#define mmTPC2_CFG_KERNEL_SRF_25 0xE866F4
+
+#define mmTPC2_CFG_KERNEL_SRF_26 0xE866F8
+
+#define mmTPC2_CFG_KERNEL_SRF_27 0xE866FC
+
+#define mmTPC2_CFG_KERNEL_SRF_28 0xE86700
+
+#define mmTPC2_CFG_KERNEL_SRF_29 0xE86704
+
+#define mmTPC2_CFG_KERNEL_SRF_30 0xE86708
+
+#define mmTPC2_CFG_KERNEL_SRF_31 0xE8670C
+
+#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE86710
+
+#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86714
+
+#define mmTPC2_CFG_RESERVED_DESC_END 0xE86738
+
+#define mmTPC2_CFG_ROUND_CSR 0xE867FC
+
+#define mmTPC2_CFG_TBUF_BASE_ADDR_LOW 0xE86800
+
+#define mmTPC2_CFG_TBUF_BASE_ADDR_HIGH 0xE86804
+
+#define mmTPC2_CFG_SEMAPHORE 0xE86808
+
+#define mmTPC2_CFG_VFLAGS 0xE8680C
+
+#define mmTPC2_CFG_SFLAGS 0xE86810
+
+#define mmTPC2_CFG_LFSR_POLYNOM 0xE86818
+
+#define mmTPC2_CFG_STATUS 0xE8681C
+
+#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86820
+
+#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86824
+
+#define mmTPC2_CFG_SM_BASE_ADDRESS_LOW 0xE86828
+
+#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8682C
+
+#define mmTPC2_CFG_TPC_CMD 0xE86830
+
+#define mmTPC2_CFG_TPC_EXECUTE 0xE86838
+
+#define mmTPC2_CFG_TPC_STALL 0xE8683C
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86840
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86844
+
+#define mmTPC2_CFG_MSS_CONFIG 0xE86854
+
+#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86858
+
+#define mmTPC2_CFG_TPC_INTR_MASK 0xE8685C
+
+#define mmTPC2_CFG_TSB_CONFIG 0xE86860
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04
+
+#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08
+
+#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE86A18
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A1C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A20
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE86A24
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A28
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A2C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE86A30
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A34
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A38
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE86A3C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A40
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A44
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE86A48
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A4C
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A50
+
+#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A54
+
+#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A58
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A5C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A60
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE86A64
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A68
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A6C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE86A70
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A74
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A78
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE86A7C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A80
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A84
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE86A88
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A8C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A90
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE86A94
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A98
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A9C
+
+#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86AA0
+
+#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86AA4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86AA8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86AAC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE86AB0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86AB4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86AB8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE86ABC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86AC0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86AC4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE86AC8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86ACC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86AD0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE86AD4
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AD8
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86ADC
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE86AE0
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AE4
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AE8
+
+#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AEC
+
+#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AF0
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AF4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86AF8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE86AFC
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86B00
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86B04
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE86B08
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86B0C
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86B10
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE86B14
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86B18
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86B1C
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE86B20
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86B24
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86B28
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE86B2C
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86B30
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86B34
+
+#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86B38
+
+#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86B3C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86B40
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86B44
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE86B48
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86B4C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86B50
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE86B54
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B58
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B5C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE86B60
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B64
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B68
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE86B6C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B70
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B74
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE86B78
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B7C
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B80
+
+#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B84
+
+#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B88
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B8C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B90
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE86B94
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B98
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B9C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE86BA0
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86BA4
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86BA8
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE86BAC
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86BB0
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86BB4
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE86BB8
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86BBC
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86BC0
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE86BC4
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86BC8
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86BCC
+
+#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86BD0
+
+#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86BD4
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86BD8
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86BDC
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE86BE0
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86BE4
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86BE8
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE86BEC
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86BF0
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86BF4
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE86BF8
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86BFC
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86C00
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE86C04
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86C08
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86C0C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE86C10
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86C14
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86C18
+
+#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86C1C
+
+#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86C20
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86C24
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86C28
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE86C2C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86C30
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86C34
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE86C38
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86C3C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86C40
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE86C44
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86C48
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86C4C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE86C50
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86C54
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86C58
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE86C5C
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86C60
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86C64
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86C68
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86C6C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86C70
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86C74
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86C78
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86C7C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86C80
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86C84
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86C88
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86C8C
+
+#define mmTPC2_CFG_QM_SRF_0 0xE86C90
+
+#define mmTPC2_CFG_QM_SRF_1 0xE86C94
+
+#define mmTPC2_CFG_QM_SRF_2 0xE86C98
+
+#define mmTPC2_CFG_QM_SRF_3 0xE86C9C
+
+#define mmTPC2_CFG_QM_SRF_4 0xE86CA0
+
+#define mmTPC2_CFG_QM_SRF_5 0xE86CA4
+
+#define mmTPC2_CFG_QM_SRF_6 0xE86CA8
+
+#define mmTPC2_CFG_QM_SRF_7 0xE86CAC
+
+#define mmTPC2_CFG_QM_SRF_8 0xE86CB0
+
+#define mmTPC2_CFG_QM_SRF_9 0xE86CB4
+
+#define mmTPC2_CFG_QM_SRF_10 0xE86CB8
+
+#define mmTPC2_CFG_QM_SRF_11 0xE86CBC
+
+#define mmTPC2_CFG_QM_SRF_12 0xE86CC0
+
+#define mmTPC2_CFG_QM_SRF_13 0xE86CC4
+
+#define mmTPC2_CFG_QM_SRF_14 0xE86CC8
+
+#define mmTPC2_CFG_QM_SRF_15 0xE86CCC
+
+#define mmTPC2_CFG_QM_SRF_16 0xE86CD0
+
+#define mmTPC2_CFG_QM_SRF_17 0xE86CD4
+
+#define mmTPC2_CFG_QM_SRF_18 0xE86CD8
+
+#define mmTPC2_CFG_QM_SRF_19 0xE86CDC
+
+#define mmTPC2_CFG_QM_SRF_20 0xE86CE0
+
+#define mmTPC2_CFG_QM_SRF_21 0xE86CE4
+
+#define mmTPC2_CFG_QM_SRF_22 0xE86CE8
+
+#define mmTPC2_CFG_QM_SRF_23 0xE86CEC
+
+#define mmTPC2_CFG_QM_SRF_24 0xE86CF0
+
+#define mmTPC2_CFG_QM_SRF_25 0xE86CF4
+
+#define mmTPC2_CFG_QM_SRF_26 0xE86CF8
+
+#define mmTPC2_CFG_QM_SRF_27 0xE86CFC
+
+#define mmTPC2_CFG_QM_SRF_28 0xE86D00
+
+#define mmTPC2_CFG_QM_SRF_29 0xE86D04
+
+#define mmTPC2_CFG_QM_SRF_30 0xE86D08
+
+#define mmTPC2_CFG_QM_SRF_31 0xE86D0C
+
+#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86D10
+
+#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D14
+
+#define mmTPC2_CFG_ARUSER 0xE86D18
+
+#define mmTPC2_CFG_AWUSER 0xE86D1C
+
+#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE86E00
+
+#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE86E04
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE86E08
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE86E0C
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE86E10
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE86E14
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE86E18
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE86E1C
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE86E20
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE86E24
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE86E28
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C
+
+#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
new file mode 100644
index 000000000000..7a643887d6e1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_CMDQ_REGS_H_
+#define ASIC_REG_TPC2_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC2_CMDQ_GLBL_CFG0 0xE89000
+
+#define mmTPC2_CMDQ_GLBL_CFG1 0xE89004
+
+#define mmTPC2_CMDQ_GLBL_PROT 0xE89008
+
+#define mmTPC2_CMDQ_GLBL_ERR_CFG 0xE8900C
+
+#define mmTPC2_CMDQ_GLBL_ERR_ADDR_LO 0xE89010
+
+#define mmTPC2_CMDQ_GLBL_ERR_ADDR_HI 0xE89014
+
+#define mmTPC2_CMDQ_GLBL_ERR_WDATA 0xE89018
+
+#define mmTPC2_CMDQ_GLBL_SECURE_PROPS 0xE8901C
+
+#define mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS 0xE89020
+
+#define mmTPC2_CMDQ_GLBL_STS0 0xE89024
+
+#define mmTPC2_CMDQ_GLBL_STS1 0xE89028
+
+#define mmTPC2_CMDQ_CQ_CFG0 0xE890B0
+
+#define mmTPC2_CMDQ_CQ_CFG1 0xE890B4
+
+#define mmTPC2_CMDQ_CQ_ARUSER 0xE890B8
+
+#define mmTPC2_CMDQ_CQ_PTR_LO 0xE890C0
+
+#define mmTPC2_CMDQ_CQ_PTR_HI 0xE890C4
+
+#define mmTPC2_CMDQ_CQ_TSIZE 0xE890C8
+
+#define mmTPC2_CMDQ_CQ_CTL 0xE890CC
+
+#define mmTPC2_CMDQ_CQ_PTR_LO_STS 0xE890D4
+
+#define mmTPC2_CMDQ_CQ_PTR_HI_STS 0xE890D8
+
+#define mmTPC2_CMDQ_CQ_TSIZE_STS 0xE890DC
+
+#define mmTPC2_CMDQ_CQ_CTL_STS 0xE890E0
+
+#define mmTPC2_CMDQ_CQ_STS0 0xE890E4
+
+#define mmTPC2_CMDQ_CQ_STS1 0xE890E8
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN 0xE890F0
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE890F4
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT 0xE890F8
+
+#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE890FC
+
+#define mmTPC2_CMDQ_CQ_IFIFO_CNT 0xE89108
+
+#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE89120
+
+#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE89124
+
+#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE89128
+
+#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE8912C
+
+#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE89130
+
+#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE89134
+
+#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE89138
+
+#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE8913C
+
+#define mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE89140
+
+#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE89144
+
+#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE89148
+
+#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE8914C
+
+#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE89150
+
+#define mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE89154
+
+#define mmTPC2_CMDQ_CP_FENCE0_RDATA 0xE89158
+
+#define mmTPC2_CMDQ_CP_FENCE1_RDATA 0xE8915C
+
+#define mmTPC2_CMDQ_CP_FENCE2_RDATA 0xE89160
+
+#define mmTPC2_CMDQ_CP_FENCE3_RDATA 0xE89164
+
+#define mmTPC2_CMDQ_CP_FENCE0_CNT 0xE89168
+
+#define mmTPC2_CMDQ_CP_FENCE1_CNT 0xE8916C
+
+#define mmTPC2_CMDQ_CP_FENCE2_CNT 0xE89170
+
+#define mmTPC2_CMDQ_CP_FENCE3_CNT 0xE89174
+
+#define mmTPC2_CMDQ_CP_STS 0xE89178
+
+#define mmTPC2_CMDQ_CP_CURRENT_INST_LO 0xE8917C
+
+#define mmTPC2_CMDQ_CP_CURRENT_INST_HI 0xE89180
+
+#define mmTPC2_CMDQ_CP_BARRIER_CFG 0xE89184
+
+#define mmTPC2_CMDQ_CP_DBG_0 0xE89188
+
+#define mmTPC2_CMDQ_CQ_BUF_ADDR 0xE89308
+
+#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C
+
+#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
new file mode 100644
index 000000000000..f3e32c018064
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_QM_REGS_H_
+#define ASIC_REG_TPC2_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC2_QM_GLBL_CFG0 0xE88000
+
+#define mmTPC2_QM_GLBL_CFG1 0xE88004
+
+#define mmTPC2_QM_GLBL_PROT 0xE88008
+
+#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88010
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88014
+
+#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88018
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS 0xE8801C
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS 0xE88020
+
+#define mmTPC2_QM_GLBL_STS0 0xE88024
+
+#define mmTPC2_QM_GLBL_STS1 0xE88028
+
+#define mmTPC2_QM_PQ_BASE_LO 0xE88060
+
+#define mmTPC2_QM_PQ_BASE_HI 0xE88064
+
+#define mmTPC2_QM_PQ_SIZE 0xE88068
+
+#define mmTPC2_QM_PQ_PI 0xE8806C
+
+#define mmTPC2_QM_PQ_CI 0xE88070
+
+#define mmTPC2_QM_PQ_CFG0 0xE88074
+
+#define mmTPC2_QM_PQ_CFG1 0xE88078
+
+#define mmTPC2_QM_PQ_ARUSER 0xE8807C
+
+#define mmTPC2_QM_PQ_PUSH0 0xE88080
+
+#define mmTPC2_QM_PQ_PUSH1 0xE88084
+
+#define mmTPC2_QM_PQ_PUSH2 0xE88088
+
+#define mmTPC2_QM_PQ_PUSH3 0xE8808C
+
+#define mmTPC2_QM_PQ_STS0 0xE88090
+
+#define mmTPC2_QM_PQ_STS1 0xE88094
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_EN 0xE880A0
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE880A4
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_SAT 0xE880A8
+
+#define mmTPC2_QM_PQ_RD_RATE_LIM_TOUT 0xE880AC
+
+#define mmTPC2_QM_CQ_CFG0 0xE880B0
+
+#define mmTPC2_QM_CQ_CFG1 0xE880B4
+
+#define mmTPC2_QM_CQ_ARUSER 0xE880B8
+
+#define mmTPC2_QM_CQ_PTR_LO 0xE880C0
+
+#define mmTPC2_QM_CQ_PTR_HI 0xE880C4
+
+#define mmTPC2_QM_CQ_TSIZE 0xE880C8
+
+#define mmTPC2_QM_CQ_CTL 0xE880CC
+
+#define mmTPC2_QM_CQ_PTR_LO_STS 0xE880D4
+
+#define mmTPC2_QM_CQ_PTR_HI_STS 0xE880D8
+
+#define mmTPC2_QM_CQ_TSIZE_STS 0xE880DC
+
+#define mmTPC2_QM_CQ_CTL_STS 0xE880E0
+
+#define mmTPC2_QM_CQ_STS0 0xE880E4
+
+#define mmTPC2_QM_CQ_STS1 0xE880E8
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_EN 0xE880F0
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE880F4
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_SAT 0xE880F8
+
+#define mmTPC2_QM_CQ_RD_RATE_LIM_TOUT 0xE880FC
+
+#define mmTPC2_QM_CQ_IFIFO_CNT 0xE88108
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO 0xE88120
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI 0xE88124
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO 0xE88128
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI 0xE8812C
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO 0xE88130
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI 0xE88134
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO 0xE88138
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI 0xE8813C
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET 0xE88140
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE88144
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE88148
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE8814C
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE88150
+
+#define mmTPC2_QM_CP_LDMA_COMMIT_OFFSET 0xE88154
+
+#define mmTPC2_QM_CP_FENCE0_RDATA 0xE88158
+
+#define mmTPC2_QM_CP_FENCE1_RDATA 0xE8815C
+
+#define mmTPC2_QM_CP_FENCE2_RDATA 0xE88160
+
+#define mmTPC2_QM_CP_FENCE3_RDATA 0xE88164
+
+#define mmTPC2_QM_CP_FENCE0_CNT 0xE88168
+
+#define mmTPC2_QM_CP_FENCE1_CNT 0xE8816C
+
+#define mmTPC2_QM_CP_FENCE2_CNT 0xE88170
+
+#define mmTPC2_QM_CP_FENCE3_CNT 0xE88174
+
+#define mmTPC2_QM_CP_STS 0xE88178
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO 0xE8817C
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI 0xE88180
+
+#define mmTPC2_QM_CP_BARRIER_CFG 0xE88184
+
+#define mmTPC2_QM_CP_DBG_0 0xE88188
+
+#define mmTPC2_QM_PQ_BUF_ADDR 0xE88300
+
+#define mmTPC2_QM_PQ_BUF_RDATA 0xE88304
+
+#define mmTPC2_QM_CQ_BUF_ADDR 0xE88308
+
+#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C
+
+#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
new file mode 100644
index 000000000000..0eb0cd1fbd19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_RTR_REGS_H_
+#define ASIC_REG_TPC2_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC2_RTR_HBW_RD_RQ_E_ARB 0xE80100
+
+#define mmTPC2_RTR_HBW_RD_RQ_W_ARB 0xE80104
+
+#define mmTPC2_RTR_HBW_RD_RQ_N_ARB 0xE80108
+
+#define mmTPC2_RTR_HBW_RD_RQ_S_ARB 0xE8010C
+
+#define mmTPC2_RTR_HBW_RD_RQ_L_ARB 0xE80110
+
+#define mmTPC2_RTR_HBW_E_ARB_MAX 0xE80120
+
+#define mmTPC2_RTR_HBW_W_ARB_MAX 0xE80124
+
+#define mmTPC2_RTR_HBW_N_ARB_MAX 0xE80128
+
+#define mmTPC2_RTR_HBW_S_ARB_MAX 0xE8012C
+
+#define mmTPC2_RTR_HBW_L_ARB_MAX 0xE80130
+
+#define mmTPC2_RTR_HBW_RD_RS_E_ARB 0xE80140
+
+#define mmTPC2_RTR_HBW_RD_RS_W_ARB 0xE80144
+
+#define mmTPC2_RTR_HBW_RD_RS_N_ARB 0xE80148
+
+#define mmTPC2_RTR_HBW_RD_RS_S_ARB 0xE8014C
+
+#define mmTPC2_RTR_HBW_RD_RS_L_ARB 0xE80150
+
+#define mmTPC2_RTR_HBW_WR_RQ_E_ARB 0xE80170
+
+#define mmTPC2_RTR_HBW_WR_RQ_W_ARB 0xE80174
+
+#define mmTPC2_RTR_HBW_WR_RQ_N_ARB 0xE80178
+
+#define mmTPC2_RTR_HBW_WR_RQ_S_ARB 0xE8017C
+
+#define mmTPC2_RTR_HBW_WR_RQ_L_ARB 0xE80180
+
+#define mmTPC2_RTR_HBW_WR_RS_E_ARB 0xE80190
+
+#define mmTPC2_RTR_HBW_WR_RS_W_ARB 0xE80194
+
+#define mmTPC2_RTR_HBW_WR_RS_N_ARB 0xE80198
+
+#define mmTPC2_RTR_HBW_WR_RS_S_ARB 0xE8019C
+
+#define mmTPC2_RTR_HBW_WR_RS_L_ARB 0xE801A0
+
+#define mmTPC2_RTR_LBW_RD_RQ_E_ARB 0xE80200
+
+#define mmTPC2_RTR_LBW_RD_RQ_W_ARB 0xE80204
+
+#define mmTPC2_RTR_LBW_RD_RQ_N_ARB 0xE80208
+
+#define mmTPC2_RTR_LBW_RD_RQ_S_ARB 0xE8020C
+
+#define mmTPC2_RTR_LBW_RD_RQ_L_ARB 0xE80210
+
+#define mmTPC2_RTR_LBW_E_ARB_MAX 0xE80220
+
+#define mmTPC2_RTR_LBW_W_ARB_MAX 0xE80224
+
+#define mmTPC2_RTR_LBW_N_ARB_MAX 0xE80228
+
+#define mmTPC2_RTR_LBW_S_ARB_MAX 0xE8022C
+
+#define mmTPC2_RTR_LBW_L_ARB_MAX 0xE80230
+
+#define mmTPC2_RTR_LBW_RD_RS_E_ARB 0xE80250
+
+#define mmTPC2_RTR_LBW_RD_RS_W_ARB 0xE80254
+
+#define mmTPC2_RTR_LBW_RD_RS_N_ARB 0xE80258
+
+#define mmTPC2_RTR_LBW_RD_RS_S_ARB 0xE8025C
+
+#define mmTPC2_RTR_LBW_RD_RS_L_ARB 0xE80260
+
+#define mmTPC2_RTR_LBW_WR_RQ_E_ARB 0xE80270
+
+#define mmTPC2_RTR_LBW_WR_RQ_W_ARB 0xE80274
+
+#define mmTPC2_RTR_LBW_WR_RQ_N_ARB 0xE80278
+
+#define mmTPC2_RTR_LBW_WR_RQ_S_ARB 0xE8027C
+
+#define mmTPC2_RTR_LBW_WR_RQ_L_ARB 0xE80280
+
+#define mmTPC2_RTR_LBW_WR_RS_E_ARB 0xE80290
+
+#define mmTPC2_RTR_LBW_WR_RS_W_ARB 0xE80294
+
+#define mmTPC2_RTR_LBW_WR_RS_N_ARB 0xE80298
+
+#define mmTPC2_RTR_LBW_WR_RS_S_ARB 0xE8029C
+
+#define mmTPC2_RTR_LBW_WR_RS_L_ARB 0xE802A0
+
+#define mmTPC2_RTR_DBG_E_ARB 0xE80300
+
+#define mmTPC2_RTR_DBG_W_ARB 0xE80304
+
+#define mmTPC2_RTR_DBG_N_ARB 0xE80308
+
+#define mmTPC2_RTR_DBG_S_ARB 0xE8030C
+
+#define mmTPC2_RTR_DBG_L_ARB 0xE80310
+
+#define mmTPC2_RTR_DBG_E_ARB_MAX 0xE80320
+
+#define mmTPC2_RTR_DBG_W_ARB_MAX 0xE80324
+
+#define mmTPC2_RTR_DBG_N_ARB_MAX 0xE80328
+
+#define mmTPC2_RTR_DBG_S_ARB_MAX 0xE8032C
+
+#define mmTPC2_RTR_DBG_L_ARB_MAX 0xE80330
+
+#define mmTPC2_RTR_SPLIT_COEF_0 0xE80400
+
+#define mmTPC2_RTR_SPLIT_COEF_1 0xE80404
+
+#define mmTPC2_RTR_SPLIT_COEF_2 0xE80408
+
+#define mmTPC2_RTR_SPLIT_COEF_3 0xE8040C
+
+#define mmTPC2_RTR_SPLIT_COEF_4 0xE80410
+
+#define mmTPC2_RTR_SPLIT_COEF_5 0xE80414
+
+#define mmTPC2_RTR_SPLIT_COEF_6 0xE80418
+
+#define mmTPC2_RTR_SPLIT_COEF_7 0xE8041C
+
+#define mmTPC2_RTR_SPLIT_COEF_8 0xE80420
+
+#define mmTPC2_RTR_SPLIT_COEF_9 0xE80424
+
+#define mmTPC2_RTR_SPLIT_CFG 0xE80440
+
+#define mmTPC2_RTR_SPLIT_RD_SAT 0xE80444
+
+#define mmTPC2_RTR_SPLIT_RD_RST_TOKEN 0xE80448
+
+#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_0 0xE8044C
+
+#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_1 0xE80450
+
+#define mmTPC2_RTR_SPLIT_WR_SAT 0xE80454
+
+#define mmTPC2_RTR_WPLIT_WR_TST_TOLEN 0xE80458
+
+#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_0 0xE8045C
+
+#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_1 0xE80460
+
+#define mmTPC2_RTR_HBW_RANGE_HIT 0xE80470
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_0 0xE80480
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_1 0xE80484
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_2 0xE80488
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_3 0xE8048C
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_4 0xE80490
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_5 0xE80494
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_6 0xE80498
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_L_7 0xE8049C
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_0 0xE804A0
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_1 0xE804A4
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_2 0xE804A8
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_3 0xE804AC
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_4 0xE804B0
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_5 0xE804B4
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_6 0xE804B8
+
+#define mmTPC2_RTR_HBW_RANGE_MASK_H_7 0xE804BC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_0 0xE804C0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_1 0xE804C4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_2 0xE804C8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_3 0xE804CC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_4 0xE804D0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_5 0xE804D4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_6 0xE804D8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_L_7 0xE804DC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_0 0xE804E0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_1 0xE804E4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_2 0xE804E8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_3 0xE804EC
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_4 0xE804F0
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_5 0xE804F4
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_6 0xE804F8
+
+#define mmTPC2_RTR_HBW_RANGE_BASE_H_7 0xE804FC
+
+#define mmTPC2_RTR_LBW_RANGE_HIT 0xE80500
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_0 0xE80510
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_1 0xE80514
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_2 0xE80518
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_3 0xE8051C
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_4 0xE80520
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_5 0xE80524
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_6 0xE80528
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_7 0xE8052C
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_8 0xE80530
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_9 0xE80534
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_10 0xE80538
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_11 0xE8053C
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_12 0xE80540
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_13 0xE80544
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_14 0xE80548
+
+#define mmTPC2_RTR_LBW_RANGE_MASK_15 0xE8054C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_0 0xE80550
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_1 0xE80554
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_2 0xE80558
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_3 0xE8055C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_4 0xE80560
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_5 0xE80564
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_6 0xE80568
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_7 0xE8056C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_8 0xE80570
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_9 0xE80574
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_10 0xE80578
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_11 0xE8057C
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_12 0xE80580
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_13 0xE80584
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_14 0xE80588
+
+#define mmTPC2_RTR_LBW_RANGE_BASE_15 0xE8058C
+
+#define mmTPC2_RTR_RGLTR 0xE80590
+
+#define mmTPC2_RTR_RGLTR_WR_RESULT 0xE80594
+
+#define mmTPC2_RTR_RGLTR_RD_RESULT 0xE80598
+
+#define mmTPC2_RTR_SCRAMB_EN 0xE80600
+
+#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604
+
+#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
new file mode 100644
index 000000000000..0baf63c69b25
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_CFG_REGS_H_
+#define ASIC_REG_TPC3_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6418
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC641C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC6420
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6424
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6428
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC642C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6430
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6434
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC6438
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xEC643C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6440
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6444
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6448
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC644C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC6450
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6454
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6458
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC645C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC6460
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6464
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6468
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC646C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6470
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6474
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC6478
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xEC647C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6480
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6484
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6488
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC648C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC6490
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6494
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6498
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC649C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC64A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC64A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC64A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC64AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xEC64B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC64B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC64B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xEC64BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC64C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC64C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xEC64C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC64CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC64D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xEC64D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xEC64E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xEC64FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC6500
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC6504
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6508
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC650C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC6510
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6514
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC6518
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC651C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6520
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC6524
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC6528
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xEC652C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC6530
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC6534
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC6538
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC653C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC6540
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC6544
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6548
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC654C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC6550
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6554
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6558
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC655C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6560
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6564
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC6568
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xEC656C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6570
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6574
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6578
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC657C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC6580
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6584
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6588
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC658C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC6590
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6594
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6598
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC659C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xEC65A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC65A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC65A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xEC65AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC65B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC65B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xEC65B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC65BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC65C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xEC65C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC65C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC65CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC65D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC65D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC65D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC65DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xEC65E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC65E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC65E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xEC65EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC65F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC65F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xEC65F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC65FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC6600
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6604
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6608
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC660C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6610
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6614
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC6618
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC661C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6620
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6624
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC6628
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xEC662C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC6630
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC6634
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6638
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC663C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC6640
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6644
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC6648
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC664C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6650
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC6654
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC6658
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xEC665C
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6660
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC6664
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6668
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC666C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6670
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC6674
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC6678
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC667C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC6680
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC6684
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC6688
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC668C
+
+#define mmTPC3_CFG_KERNEL_SRF_0 0xEC6690
+
+#define mmTPC3_CFG_KERNEL_SRF_1 0xEC6694
+
+#define mmTPC3_CFG_KERNEL_SRF_2 0xEC6698
+
+#define mmTPC3_CFG_KERNEL_SRF_3 0xEC669C
+
+#define mmTPC3_CFG_KERNEL_SRF_4 0xEC66A0
+
+#define mmTPC3_CFG_KERNEL_SRF_5 0xEC66A4
+
+#define mmTPC3_CFG_KERNEL_SRF_6 0xEC66A8
+
+#define mmTPC3_CFG_KERNEL_SRF_7 0xEC66AC
+
+#define mmTPC3_CFG_KERNEL_SRF_8 0xEC66B0
+
+#define mmTPC3_CFG_KERNEL_SRF_9 0xEC66B4
+
+#define mmTPC3_CFG_KERNEL_SRF_10 0xEC66B8
+
+#define mmTPC3_CFG_KERNEL_SRF_11 0xEC66BC
+
+#define mmTPC3_CFG_KERNEL_SRF_12 0xEC66C0
+
+#define mmTPC3_CFG_KERNEL_SRF_13 0xEC66C4
+
+#define mmTPC3_CFG_KERNEL_SRF_14 0xEC66C8
+
+#define mmTPC3_CFG_KERNEL_SRF_15 0xEC66CC
+
+#define mmTPC3_CFG_KERNEL_SRF_16 0xEC66D0
+
+#define mmTPC3_CFG_KERNEL_SRF_17 0xEC66D4
+
+#define mmTPC3_CFG_KERNEL_SRF_18 0xEC66D8
+
+#define mmTPC3_CFG_KERNEL_SRF_19 0xEC66DC
+
+#define mmTPC3_CFG_KERNEL_SRF_20 0xEC66E0
+
+#define mmTPC3_CFG_KERNEL_SRF_21 0xEC66E4
+
+#define mmTPC3_CFG_KERNEL_SRF_22 0xEC66E8
+
+#define mmTPC3_CFG_KERNEL_SRF_23 0xEC66EC
+
+#define mmTPC3_CFG_KERNEL_SRF_24 0xEC66F0
+
+#define mmTPC3_CFG_KERNEL_SRF_25 0xEC66F4
+
+#define mmTPC3_CFG_KERNEL_SRF_26 0xEC66F8
+
+#define mmTPC3_CFG_KERNEL_SRF_27 0xEC66FC
+
+#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6700
+
+#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6704
+
+#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6708
+
+#define mmTPC3_CFG_KERNEL_SRF_31 0xEC670C
+
+#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC6710
+
+#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6714
+
+#define mmTPC3_CFG_RESERVED_DESC_END 0xEC6738
+
+#define mmTPC3_CFG_ROUND_CSR 0xEC67FC
+
+#define mmTPC3_CFG_TBUF_BASE_ADDR_LOW 0xEC6800
+
+#define mmTPC3_CFG_TBUF_BASE_ADDR_HIGH 0xEC6804
+
+#define mmTPC3_CFG_SEMAPHORE 0xEC6808
+
+#define mmTPC3_CFG_VFLAGS 0xEC680C
+
+#define mmTPC3_CFG_SFLAGS 0xEC6810
+
+#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6818
+
+#define mmTPC3_CFG_STATUS 0xEC681C
+
+#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6820
+
+#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6824
+
+#define mmTPC3_CFG_SM_BASE_ADDRESS_LOW 0xEC6828
+
+#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC682C
+
+#define mmTPC3_CFG_TPC_CMD 0xEC6830
+
+#define mmTPC3_CFG_TPC_EXECUTE 0xEC6838
+
+#define mmTPC3_CFG_TPC_STALL 0xEC683C
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6840
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6844
+
+#define mmTPC3_CFG_MSS_CONFIG 0xEC6854
+
+#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6858
+
+#define mmTPC3_CFG_TPC_INTR_MASK 0xEC685C
+
+#define mmTPC3_CFG_TSB_CONFIG 0xEC6860
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04
+
+#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08
+
+#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6A18
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A1C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A20
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6A24
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A28
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A2C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6A30
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A34
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A38
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xEC6A3C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A40
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A44
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6A48
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A4C
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A50
+
+#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A54
+
+#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A58
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A5C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A60
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6A64
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A68
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A6C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6A70
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A74
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A78
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xEC6A7C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A80
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A84
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6A88
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A8C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A90
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6A94
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A98
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A9C
+
+#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6AA0
+
+#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6AA4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6AA8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6AAC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xEC6AB0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6AB4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6AB8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xEC6ABC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6AC0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6AC4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xEC6AC8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6ACC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6AD0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xEC6AD4
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AD8
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6ADC
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xEC6AE0
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AE4
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AE8
+
+#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AEC
+
+#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AF0
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AF4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6AF8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xEC6AFC
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6B00
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6B04
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6B08
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6B0C
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6B10
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6B14
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6B18
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6B1C
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6B20
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6B24
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6B28
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xEC6B2C
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6B30
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6B34
+
+#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6B38
+
+#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6B3C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6B40
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6B44
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6B48
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6B4C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6B50
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6B54
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B58
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B5C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6B60
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B64
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B68
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xEC6B6C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B70
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B74
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6B78
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B7C
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B80
+
+#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B84
+
+#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B88
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B8C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B90
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6B94
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B98
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B9C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xEC6BA0
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6BA4
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6BA8
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xEC6BAC
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6BB0
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6BB4
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xEC6BB8
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6BBC
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6BC0
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xEC6BC4
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6BC8
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6BCC
+
+#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6BD0
+
+#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6BD4
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6BD8
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6BDC
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xEC6BE0
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6BE4
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6BE8
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xEC6BEC
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6BF0
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6BF4
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xEC6BF8
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6BFC
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6C00
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6C04
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6C08
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6C0C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6C10
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6C14
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6C18
+
+#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6C1C
+
+#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6C20
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6C24
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6C28
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xEC6C2C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6C30
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6C34
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6C38
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6C3C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6C40
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6C44
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6C48
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6C4C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6C50
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6C54
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6C58
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xEC6C5C
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6C60
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6C64
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6C68
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6C6C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6C70
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6C74
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6C78
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6C7C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6C80
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6C84
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6C88
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6C8C
+
+#define mmTPC3_CFG_QM_SRF_0 0xEC6C90
+
+#define mmTPC3_CFG_QM_SRF_1 0xEC6C94
+
+#define mmTPC3_CFG_QM_SRF_2 0xEC6C98
+
+#define mmTPC3_CFG_QM_SRF_3 0xEC6C9C
+
+#define mmTPC3_CFG_QM_SRF_4 0xEC6CA0
+
+#define mmTPC3_CFG_QM_SRF_5 0xEC6CA4
+
+#define mmTPC3_CFG_QM_SRF_6 0xEC6CA8
+
+#define mmTPC3_CFG_QM_SRF_7 0xEC6CAC
+
+#define mmTPC3_CFG_QM_SRF_8 0xEC6CB0
+
+#define mmTPC3_CFG_QM_SRF_9 0xEC6CB4
+
+#define mmTPC3_CFG_QM_SRF_10 0xEC6CB8
+
+#define mmTPC3_CFG_QM_SRF_11 0xEC6CBC
+
+#define mmTPC3_CFG_QM_SRF_12 0xEC6CC0
+
+#define mmTPC3_CFG_QM_SRF_13 0xEC6CC4
+
+#define mmTPC3_CFG_QM_SRF_14 0xEC6CC8
+
+#define mmTPC3_CFG_QM_SRF_15 0xEC6CCC
+
+#define mmTPC3_CFG_QM_SRF_16 0xEC6CD0
+
+#define mmTPC3_CFG_QM_SRF_17 0xEC6CD4
+
+#define mmTPC3_CFG_QM_SRF_18 0xEC6CD8
+
+#define mmTPC3_CFG_QM_SRF_19 0xEC6CDC
+
+#define mmTPC3_CFG_QM_SRF_20 0xEC6CE0
+
+#define mmTPC3_CFG_QM_SRF_21 0xEC6CE4
+
+#define mmTPC3_CFG_QM_SRF_22 0xEC6CE8
+
+#define mmTPC3_CFG_QM_SRF_23 0xEC6CEC
+
+#define mmTPC3_CFG_QM_SRF_24 0xEC6CF0
+
+#define mmTPC3_CFG_QM_SRF_25 0xEC6CF4
+
+#define mmTPC3_CFG_QM_SRF_26 0xEC6CF8
+
+#define mmTPC3_CFG_QM_SRF_27 0xEC6CFC
+
+#define mmTPC3_CFG_QM_SRF_28 0xEC6D00
+
+#define mmTPC3_CFG_QM_SRF_29 0xEC6D04
+
+#define mmTPC3_CFG_QM_SRF_30 0xEC6D08
+
+#define mmTPC3_CFG_QM_SRF_31 0xEC6D0C
+
+#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6D10
+
+#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D14
+
+#define mmTPC3_CFG_ARUSER 0xEC6D18
+
+#define mmTPC3_CFG_AWUSER 0xEC6D1C
+
+#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC6E00
+
+#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC6E04
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC6E08
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC6E0C
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC6E10
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC6E14
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC6E18
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC6E1C
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC6E20
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC6E24
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC6E28
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C
+
+#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
new file mode 100644
index 000000000000..82a5261e852f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_CMDQ_REGS_H_
+#define ASIC_REG_TPC3_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC3_CMDQ_GLBL_CFG0 0xEC9000
+
+#define mmTPC3_CMDQ_GLBL_CFG1 0xEC9004
+
+#define mmTPC3_CMDQ_GLBL_PROT 0xEC9008
+
+#define mmTPC3_CMDQ_GLBL_ERR_CFG 0xEC900C
+
+#define mmTPC3_CMDQ_GLBL_ERR_ADDR_LO 0xEC9010
+
+#define mmTPC3_CMDQ_GLBL_ERR_ADDR_HI 0xEC9014
+
+#define mmTPC3_CMDQ_GLBL_ERR_WDATA 0xEC9018
+
+#define mmTPC3_CMDQ_GLBL_SECURE_PROPS 0xEC901C
+
+#define mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS 0xEC9020
+
+#define mmTPC3_CMDQ_GLBL_STS0 0xEC9024
+
+#define mmTPC3_CMDQ_GLBL_STS1 0xEC9028
+
+#define mmTPC3_CMDQ_CQ_CFG0 0xEC90B0
+
+#define mmTPC3_CMDQ_CQ_CFG1 0xEC90B4
+
+#define mmTPC3_CMDQ_CQ_ARUSER 0xEC90B8
+
+#define mmTPC3_CMDQ_CQ_PTR_LO 0xEC90C0
+
+#define mmTPC3_CMDQ_CQ_PTR_HI 0xEC90C4
+
+#define mmTPC3_CMDQ_CQ_TSIZE 0xEC90C8
+
+#define mmTPC3_CMDQ_CQ_CTL 0xEC90CC
+
+#define mmTPC3_CMDQ_CQ_PTR_LO_STS 0xEC90D4
+
+#define mmTPC3_CMDQ_CQ_PTR_HI_STS 0xEC90D8
+
+#define mmTPC3_CMDQ_CQ_TSIZE_STS 0xEC90DC
+
+#define mmTPC3_CMDQ_CQ_CTL_STS 0xEC90E0
+
+#define mmTPC3_CMDQ_CQ_STS0 0xEC90E4
+
+#define mmTPC3_CMDQ_CQ_STS1 0xEC90E8
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN 0xEC90F0
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xEC90F4
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT 0xEC90F8
+
+#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT 0xEC90FC
+
+#define mmTPC3_CMDQ_CQ_IFIFO_CNT 0xEC9108
+
+#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO 0xEC9120
+
+#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI 0xEC9124
+
+#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO 0xEC9128
+
+#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI 0xEC912C
+
+#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO 0xEC9130
+
+#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI 0xEC9134
+
+#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO 0xEC9138
+
+#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI 0xEC913C
+
+#define mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET 0xEC9140
+
+#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC9144
+
+#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC9148
+
+#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xEC914C
+
+#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xEC9150
+
+#define mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET 0xEC9154
+
+#define mmTPC3_CMDQ_CP_FENCE0_RDATA 0xEC9158
+
+#define mmTPC3_CMDQ_CP_FENCE1_RDATA 0xEC915C
+
+#define mmTPC3_CMDQ_CP_FENCE2_RDATA 0xEC9160
+
+#define mmTPC3_CMDQ_CP_FENCE3_RDATA 0xEC9164
+
+#define mmTPC3_CMDQ_CP_FENCE0_CNT 0xEC9168
+
+#define mmTPC3_CMDQ_CP_FENCE1_CNT 0xEC916C
+
+#define mmTPC3_CMDQ_CP_FENCE2_CNT 0xEC9170
+
+#define mmTPC3_CMDQ_CP_FENCE3_CNT 0xEC9174
+
+#define mmTPC3_CMDQ_CP_STS 0xEC9178
+
+#define mmTPC3_CMDQ_CP_CURRENT_INST_LO 0xEC917C
+
+#define mmTPC3_CMDQ_CP_CURRENT_INST_HI 0xEC9180
+
+#define mmTPC3_CMDQ_CP_BARRIER_CFG 0xEC9184
+
+#define mmTPC3_CMDQ_CP_DBG_0 0xEC9188
+
+#define mmTPC3_CMDQ_CQ_BUF_ADDR 0xEC9308
+
+#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C
+
+#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
new file mode 100644
index 000000000000..b05b1e18e664
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_QM_REGS_H_
+#define ASIC_REG_TPC3_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC3_QM_GLBL_CFG0 0xEC8000
+
+#define mmTPC3_QM_GLBL_CFG1 0xEC8004
+
+#define mmTPC3_QM_GLBL_PROT 0xEC8008
+
+#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8010
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8014
+
+#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8018
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS 0xEC801C
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS 0xEC8020
+
+#define mmTPC3_QM_GLBL_STS0 0xEC8024
+
+#define mmTPC3_QM_GLBL_STS1 0xEC8028
+
+#define mmTPC3_QM_PQ_BASE_LO 0xEC8060
+
+#define mmTPC3_QM_PQ_BASE_HI 0xEC8064
+
+#define mmTPC3_QM_PQ_SIZE 0xEC8068
+
+#define mmTPC3_QM_PQ_PI 0xEC806C
+
+#define mmTPC3_QM_PQ_CI 0xEC8070
+
+#define mmTPC3_QM_PQ_CFG0 0xEC8074
+
+#define mmTPC3_QM_PQ_CFG1 0xEC8078
+
+#define mmTPC3_QM_PQ_ARUSER 0xEC807C
+
+#define mmTPC3_QM_PQ_PUSH0 0xEC8080
+
+#define mmTPC3_QM_PQ_PUSH1 0xEC8084
+
+#define mmTPC3_QM_PQ_PUSH2 0xEC8088
+
+#define mmTPC3_QM_PQ_PUSH3 0xEC808C
+
+#define mmTPC3_QM_PQ_STS0 0xEC8090
+
+#define mmTPC3_QM_PQ_STS1 0xEC8094
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_EN 0xEC80A0
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xEC80A4
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_SAT 0xEC80A8
+
+#define mmTPC3_QM_PQ_RD_RATE_LIM_TOUT 0xEC80AC
+
+#define mmTPC3_QM_CQ_CFG0 0xEC80B0
+
+#define mmTPC3_QM_CQ_CFG1 0xEC80B4
+
+#define mmTPC3_QM_CQ_ARUSER 0xEC80B8
+
+#define mmTPC3_QM_CQ_PTR_LO 0xEC80C0
+
+#define mmTPC3_QM_CQ_PTR_HI 0xEC80C4
+
+#define mmTPC3_QM_CQ_TSIZE 0xEC80C8
+
+#define mmTPC3_QM_CQ_CTL 0xEC80CC
+
+#define mmTPC3_QM_CQ_PTR_LO_STS 0xEC80D4
+
+#define mmTPC3_QM_CQ_PTR_HI_STS 0xEC80D8
+
+#define mmTPC3_QM_CQ_TSIZE_STS 0xEC80DC
+
+#define mmTPC3_QM_CQ_CTL_STS 0xEC80E0
+
+#define mmTPC3_QM_CQ_STS0 0xEC80E4
+
+#define mmTPC3_QM_CQ_STS1 0xEC80E8
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_EN 0xEC80F0
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xEC80F4
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_SAT 0xEC80F8
+
+#define mmTPC3_QM_CQ_RD_RATE_LIM_TOUT 0xEC80FC
+
+#define mmTPC3_QM_CQ_IFIFO_CNT 0xEC8108
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO 0xEC8120
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI 0xEC8124
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO 0xEC8128
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI 0xEC812C
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO 0xEC8130
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI 0xEC8134
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO 0xEC8138
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI 0xEC813C
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET 0xEC8140
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC8144
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC8148
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xEC814C
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xEC8150
+
+#define mmTPC3_QM_CP_LDMA_COMMIT_OFFSET 0xEC8154
+
+#define mmTPC3_QM_CP_FENCE0_RDATA 0xEC8158
+
+#define mmTPC3_QM_CP_FENCE1_RDATA 0xEC815C
+
+#define mmTPC3_QM_CP_FENCE2_RDATA 0xEC8160
+
+#define mmTPC3_QM_CP_FENCE3_RDATA 0xEC8164
+
+#define mmTPC3_QM_CP_FENCE0_CNT 0xEC8168
+
+#define mmTPC3_QM_CP_FENCE1_CNT 0xEC816C
+
+#define mmTPC3_QM_CP_FENCE2_CNT 0xEC8170
+
+#define mmTPC3_QM_CP_FENCE3_CNT 0xEC8174
+
+#define mmTPC3_QM_CP_STS 0xEC8178
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO 0xEC817C
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI 0xEC8180
+
+#define mmTPC3_QM_CP_BARRIER_CFG 0xEC8184
+
+#define mmTPC3_QM_CP_DBG_0 0xEC8188
+
+#define mmTPC3_QM_PQ_BUF_ADDR 0xEC8300
+
+#define mmTPC3_QM_PQ_BUF_RDATA 0xEC8304
+
+#define mmTPC3_QM_CQ_BUF_ADDR 0xEC8308
+
+#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C
+
+#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
new file mode 100644
index 000000000000..5a2fd7652650
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_RTR_REGS_H_
+#define ASIC_REG_TPC3_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC3_RTR_HBW_RD_RQ_E_ARB 0xEC0100
+
+#define mmTPC3_RTR_HBW_RD_RQ_W_ARB 0xEC0104
+
+#define mmTPC3_RTR_HBW_RD_RQ_N_ARB 0xEC0108
+
+#define mmTPC3_RTR_HBW_RD_RQ_S_ARB 0xEC010C
+
+#define mmTPC3_RTR_HBW_RD_RQ_L_ARB 0xEC0110
+
+#define mmTPC3_RTR_HBW_E_ARB_MAX 0xEC0120
+
+#define mmTPC3_RTR_HBW_W_ARB_MAX 0xEC0124
+
+#define mmTPC3_RTR_HBW_N_ARB_MAX 0xEC0128
+
+#define mmTPC3_RTR_HBW_S_ARB_MAX 0xEC012C
+
+#define mmTPC3_RTR_HBW_L_ARB_MAX 0xEC0130
+
+#define mmTPC3_RTR_HBW_RD_RS_E_ARB 0xEC0140
+
+#define mmTPC3_RTR_HBW_RD_RS_W_ARB 0xEC0144
+
+#define mmTPC3_RTR_HBW_RD_RS_N_ARB 0xEC0148
+
+#define mmTPC3_RTR_HBW_RD_RS_S_ARB 0xEC014C
+
+#define mmTPC3_RTR_HBW_RD_RS_L_ARB 0xEC0150
+
+#define mmTPC3_RTR_HBW_WR_RQ_E_ARB 0xEC0170
+
+#define mmTPC3_RTR_HBW_WR_RQ_W_ARB 0xEC0174
+
+#define mmTPC3_RTR_HBW_WR_RQ_N_ARB 0xEC0178
+
+#define mmTPC3_RTR_HBW_WR_RQ_S_ARB 0xEC017C
+
+#define mmTPC3_RTR_HBW_WR_RQ_L_ARB 0xEC0180
+
+#define mmTPC3_RTR_HBW_WR_RS_E_ARB 0xEC0190
+
+#define mmTPC3_RTR_HBW_WR_RS_W_ARB 0xEC0194
+
+#define mmTPC3_RTR_HBW_WR_RS_N_ARB 0xEC0198
+
+#define mmTPC3_RTR_HBW_WR_RS_S_ARB 0xEC019C
+
+#define mmTPC3_RTR_HBW_WR_RS_L_ARB 0xEC01A0
+
+#define mmTPC3_RTR_LBW_RD_RQ_E_ARB 0xEC0200
+
+#define mmTPC3_RTR_LBW_RD_RQ_W_ARB 0xEC0204
+
+#define mmTPC3_RTR_LBW_RD_RQ_N_ARB 0xEC0208
+
+#define mmTPC3_RTR_LBW_RD_RQ_S_ARB 0xEC020C
+
+#define mmTPC3_RTR_LBW_RD_RQ_L_ARB 0xEC0210
+
+#define mmTPC3_RTR_LBW_E_ARB_MAX 0xEC0220
+
+#define mmTPC3_RTR_LBW_W_ARB_MAX 0xEC0224
+
+#define mmTPC3_RTR_LBW_N_ARB_MAX 0xEC0228
+
+#define mmTPC3_RTR_LBW_S_ARB_MAX 0xEC022C
+
+#define mmTPC3_RTR_LBW_L_ARB_MAX 0xEC0230
+
+#define mmTPC3_RTR_LBW_RD_RS_E_ARB 0xEC0250
+
+#define mmTPC3_RTR_LBW_RD_RS_W_ARB 0xEC0254
+
+#define mmTPC3_RTR_LBW_RD_RS_N_ARB 0xEC0258
+
+#define mmTPC3_RTR_LBW_RD_RS_S_ARB 0xEC025C
+
+#define mmTPC3_RTR_LBW_RD_RS_L_ARB 0xEC0260
+
+#define mmTPC3_RTR_LBW_WR_RQ_E_ARB 0xEC0270
+
+#define mmTPC3_RTR_LBW_WR_RQ_W_ARB 0xEC0274
+
+#define mmTPC3_RTR_LBW_WR_RQ_N_ARB 0xEC0278
+
+#define mmTPC3_RTR_LBW_WR_RQ_S_ARB 0xEC027C
+
+#define mmTPC3_RTR_LBW_WR_RQ_L_ARB 0xEC0280
+
+#define mmTPC3_RTR_LBW_WR_RS_E_ARB 0xEC0290
+
+#define mmTPC3_RTR_LBW_WR_RS_W_ARB 0xEC0294
+
+#define mmTPC3_RTR_LBW_WR_RS_N_ARB 0xEC0298
+
+#define mmTPC3_RTR_LBW_WR_RS_S_ARB 0xEC029C
+
+#define mmTPC3_RTR_LBW_WR_RS_L_ARB 0xEC02A0
+
+#define mmTPC3_RTR_DBG_E_ARB 0xEC0300
+
+#define mmTPC3_RTR_DBG_W_ARB 0xEC0304
+
+#define mmTPC3_RTR_DBG_N_ARB 0xEC0308
+
+#define mmTPC3_RTR_DBG_S_ARB 0xEC030C
+
+#define mmTPC3_RTR_DBG_L_ARB 0xEC0310
+
+#define mmTPC3_RTR_DBG_E_ARB_MAX 0xEC0320
+
+#define mmTPC3_RTR_DBG_W_ARB_MAX 0xEC0324
+
+#define mmTPC3_RTR_DBG_N_ARB_MAX 0xEC0328
+
+#define mmTPC3_RTR_DBG_S_ARB_MAX 0xEC032C
+
+#define mmTPC3_RTR_DBG_L_ARB_MAX 0xEC0330
+
+#define mmTPC3_RTR_SPLIT_COEF_0 0xEC0400
+
+#define mmTPC3_RTR_SPLIT_COEF_1 0xEC0404
+
+#define mmTPC3_RTR_SPLIT_COEF_2 0xEC0408
+
+#define mmTPC3_RTR_SPLIT_COEF_3 0xEC040C
+
+#define mmTPC3_RTR_SPLIT_COEF_4 0xEC0410
+
+#define mmTPC3_RTR_SPLIT_COEF_5 0xEC0414
+
+#define mmTPC3_RTR_SPLIT_COEF_6 0xEC0418
+
+#define mmTPC3_RTR_SPLIT_COEF_7 0xEC041C
+
+#define mmTPC3_RTR_SPLIT_COEF_8 0xEC0420
+
+#define mmTPC3_RTR_SPLIT_COEF_9 0xEC0424
+
+#define mmTPC3_RTR_SPLIT_CFG 0xEC0440
+
+#define mmTPC3_RTR_SPLIT_RD_SAT 0xEC0444
+
+#define mmTPC3_RTR_SPLIT_RD_RST_TOKEN 0xEC0448
+
+#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_0 0xEC044C
+
+#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_1 0xEC0450
+
+#define mmTPC3_RTR_SPLIT_WR_SAT 0xEC0454
+
+#define mmTPC3_RTR_WPLIT_WR_TST_TOLEN 0xEC0458
+
+#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_0 0xEC045C
+
+#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_1 0xEC0460
+
+#define mmTPC3_RTR_HBW_RANGE_HIT 0xEC0470
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_0 0xEC0480
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_1 0xEC0484
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_2 0xEC0488
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_3 0xEC048C
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_4 0xEC0490
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_5 0xEC0494
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_6 0xEC0498
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_L_7 0xEC049C
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_0 0xEC04A0
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_1 0xEC04A4
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_2 0xEC04A8
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_3 0xEC04AC
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_4 0xEC04B0
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_5 0xEC04B4
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_6 0xEC04B8
+
+#define mmTPC3_RTR_HBW_RANGE_MASK_H_7 0xEC04BC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_0 0xEC04C0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_1 0xEC04C4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_2 0xEC04C8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_3 0xEC04CC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_4 0xEC04D0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_5 0xEC04D4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_6 0xEC04D8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_L_7 0xEC04DC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_0 0xEC04E0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_1 0xEC04E4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_2 0xEC04E8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_3 0xEC04EC
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_4 0xEC04F0
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_5 0xEC04F4
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_6 0xEC04F8
+
+#define mmTPC3_RTR_HBW_RANGE_BASE_H_7 0xEC04FC
+
+#define mmTPC3_RTR_LBW_RANGE_HIT 0xEC0500
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_0 0xEC0510
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_1 0xEC0514
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_2 0xEC0518
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_3 0xEC051C
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_4 0xEC0520
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_5 0xEC0524
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_6 0xEC0528
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_7 0xEC052C
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_8 0xEC0530
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_9 0xEC0534
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_10 0xEC0538
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_11 0xEC053C
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_12 0xEC0540
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_13 0xEC0544
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_14 0xEC0548
+
+#define mmTPC3_RTR_LBW_RANGE_MASK_15 0xEC054C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_0 0xEC0550
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_1 0xEC0554
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_2 0xEC0558
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_3 0xEC055C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_4 0xEC0560
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_5 0xEC0564
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_6 0xEC0568
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_7 0xEC056C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_8 0xEC0570
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_9 0xEC0574
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_10 0xEC0578
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_11 0xEC057C
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_12 0xEC0580
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_13 0xEC0584
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_14 0xEC0588
+
+#define mmTPC3_RTR_LBW_RANGE_BASE_15 0xEC058C
+
+#define mmTPC3_RTR_RGLTR 0xEC0590
+
+#define mmTPC3_RTR_RGLTR_WR_RESULT 0xEC0594
+
+#define mmTPC3_RTR_RGLTR_RD_RESULT 0xEC0598
+
+#define mmTPC3_RTR_SCRAMB_EN 0xEC0600
+
+#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604
+
+#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
new file mode 100644
index 000000000000..d64a100075f2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_CFG_REGS_H_
+#define ASIC_REG_TPC4_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF06418
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF0641C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF06420
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF06424
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06428
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF0642C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF06430
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06434
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF06438
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF0643C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06440
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06444
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF06448
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF0644C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF06450
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06454
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06458
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF0645C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF06460
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF06464
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06468
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF0646C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF06470
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06474
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF06478
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF0647C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06480
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06484
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF06488
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF0648C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF06490
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF06494
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06498
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF0649C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF064A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF064A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF064A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF064AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF064B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF064B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF064B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF064BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF064C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF064C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF064C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF064CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF064D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF064D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF064E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF064FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF06500
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF06504
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF06508
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF0650C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF06510
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF06514
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF06518
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF0651C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF06520
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF06524
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF06528
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF0652C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF06530
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF06534
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF06538
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF0653C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF06540
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF06544
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF06548
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF0654C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF06550
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF06554
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06558
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF0655C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF06560
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06564
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF06568
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF0656C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06570
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06574
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF06578
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF0657C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF06580
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06584
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06588
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF0658C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF06590
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF06594
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06598
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF0659C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF065A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF065A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF065A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF065AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF065B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF065B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF065B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF065BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF065C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF065C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF065C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF065CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF065D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF065D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF065D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF065DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF065E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF065E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF065E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF065EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF065F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF065F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF065F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF065FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF06600
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF06604
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06608
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF0660C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF06610
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06614
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF06618
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF0661C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06620
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06624
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF06628
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF0662C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF06630
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF06634
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF06638
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF0663C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF06640
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF06644
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF06648
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF0664C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF06650
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF06654
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF06658
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF0665C
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06660
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF06664
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06668
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF0666C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06670
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF06674
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF06678
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF0667C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF06680
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF06684
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF06688
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF0668C
+
+#define mmTPC4_CFG_KERNEL_SRF_0 0xF06690
+
+#define mmTPC4_CFG_KERNEL_SRF_1 0xF06694
+
+#define mmTPC4_CFG_KERNEL_SRF_2 0xF06698
+
+#define mmTPC4_CFG_KERNEL_SRF_3 0xF0669C
+
+#define mmTPC4_CFG_KERNEL_SRF_4 0xF066A0
+
+#define mmTPC4_CFG_KERNEL_SRF_5 0xF066A4
+
+#define mmTPC4_CFG_KERNEL_SRF_6 0xF066A8
+
+#define mmTPC4_CFG_KERNEL_SRF_7 0xF066AC
+
+#define mmTPC4_CFG_KERNEL_SRF_8 0xF066B0
+
+#define mmTPC4_CFG_KERNEL_SRF_9 0xF066B4
+
+#define mmTPC4_CFG_KERNEL_SRF_10 0xF066B8
+
+#define mmTPC4_CFG_KERNEL_SRF_11 0xF066BC
+
+#define mmTPC4_CFG_KERNEL_SRF_12 0xF066C0
+
+#define mmTPC4_CFG_KERNEL_SRF_13 0xF066C4
+
+#define mmTPC4_CFG_KERNEL_SRF_14 0xF066C8
+
+#define mmTPC4_CFG_KERNEL_SRF_15 0xF066CC
+
+#define mmTPC4_CFG_KERNEL_SRF_16 0xF066D0
+
+#define mmTPC4_CFG_KERNEL_SRF_17 0xF066D4
+
+#define mmTPC4_CFG_KERNEL_SRF_18 0xF066D8
+
+#define mmTPC4_CFG_KERNEL_SRF_19 0xF066DC
+
+#define mmTPC4_CFG_KERNEL_SRF_20 0xF066E0
+
+#define mmTPC4_CFG_KERNEL_SRF_21 0xF066E4
+
+#define mmTPC4_CFG_KERNEL_SRF_22 0xF066E8
+
+#define mmTPC4_CFG_KERNEL_SRF_23 0xF066EC
+
+#define mmTPC4_CFG_KERNEL_SRF_24 0xF066F0
+
+#define mmTPC4_CFG_KERNEL_SRF_25 0xF066F4
+
+#define mmTPC4_CFG_KERNEL_SRF_26 0xF066F8
+
+#define mmTPC4_CFG_KERNEL_SRF_27 0xF066FC
+
+#define mmTPC4_CFG_KERNEL_SRF_28 0xF06700
+
+#define mmTPC4_CFG_KERNEL_SRF_29 0xF06704
+
+#define mmTPC4_CFG_KERNEL_SRF_30 0xF06708
+
+#define mmTPC4_CFG_KERNEL_SRF_31 0xF0670C
+
+#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF06710
+
+#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06714
+
+#define mmTPC4_CFG_RESERVED_DESC_END 0xF06738
+
+#define mmTPC4_CFG_ROUND_CSR 0xF067FC
+
+#define mmTPC4_CFG_TBUF_BASE_ADDR_LOW 0xF06800
+
+#define mmTPC4_CFG_TBUF_BASE_ADDR_HIGH 0xF06804
+
+#define mmTPC4_CFG_SEMAPHORE 0xF06808
+
+#define mmTPC4_CFG_VFLAGS 0xF0680C
+
+#define mmTPC4_CFG_SFLAGS 0xF06810
+
+#define mmTPC4_CFG_LFSR_POLYNOM 0xF06818
+
+#define mmTPC4_CFG_STATUS 0xF0681C
+
+#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06820
+
+#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06824
+
+#define mmTPC4_CFG_SM_BASE_ADDRESS_LOW 0xF06828
+
+#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0682C
+
+#define mmTPC4_CFG_TPC_CMD 0xF06830
+
+#define mmTPC4_CFG_TPC_EXECUTE 0xF06838
+
+#define mmTPC4_CFG_TPC_STALL 0xF0683C
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06840
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06844
+
+#define mmTPC4_CFG_MSS_CONFIG 0xF06854
+
+#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06858
+
+#define mmTPC4_CFG_TPC_INTR_MASK 0xF0685C
+
+#define mmTPC4_CFG_TSB_CONFIG 0xF06860
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04
+
+#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08
+
+#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF06A18
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A1C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A20
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF06A24
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A28
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A2C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF06A30
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A34
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A38
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF06A3C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A40
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A44
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF06A48
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A4C
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A50
+
+#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A54
+
+#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A58
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A5C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A60
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF06A64
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A68
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A6C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF06A70
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A74
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A78
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF06A7C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A80
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A84
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF06A88
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A8C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A90
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF06A94
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A98
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A9C
+
+#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06AA0
+
+#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06AA4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06AA8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06AAC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF06AB0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06AB4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06AB8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF06ABC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06AC0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06AC4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF06AC8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06ACC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06AD0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF06AD4
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AD8
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06ADC
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF06AE0
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AE4
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AE8
+
+#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AEC
+
+#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AF0
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AF4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06AF8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF06AFC
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06B00
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06B04
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF06B08
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06B0C
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06B10
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF06B14
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06B18
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06B1C
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF06B20
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06B24
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06B28
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF06B2C
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06B30
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06B34
+
+#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06B38
+
+#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06B3C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06B40
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06B44
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF06B48
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06B4C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06B50
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF06B54
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B58
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B5C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF06B60
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B64
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B68
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF06B6C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B70
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B74
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF06B78
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B7C
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B80
+
+#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B84
+
+#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B88
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B8C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B90
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF06B94
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B98
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B9C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF06BA0
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06BA4
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06BA8
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF06BAC
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06BB0
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06BB4
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF06BB8
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06BBC
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06BC0
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF06BC4
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06BC8
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06BCC
+
+#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06BD0
+
+#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06BD4
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06BD8
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06BDC
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF06BE0
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06BE4
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06BE8
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF06BEC
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06BF0
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06BF4
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF06BF8
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06BFC
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06C00
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF06C04
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06C08
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06C0C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF06C10
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06C14
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06C18
+
+#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06C1C
+
+#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06C20
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06C24
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06C28
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF06C2C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06C30
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06C34
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF06C38
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06C3C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06C40
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF06C44
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06C48
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06C4C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF06C50
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06C54
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06C58
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF06C5C
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06C60
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06C64
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06C68
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06C6C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06C70
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06C74
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06C78
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06C7C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06C80
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06C84
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06C88
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06C8C
+
+#define mmTPC4_CFG_QM_SRF_0 0xF06C90
+
+#define mmTPC4_CFG_QM_SRF_1 0xF06C94
+
+#define mmTPC4_CFG_QM_SRF_2 0xF06C98
+
+#define mmTPC4_CFG_QM_SRF_3 0xF06C9C
+
+#define mmTPC4_CFG_QM_SRF_4 0xF06CA0
+
+#define mmTPC4_CFG_QM_SRF_5 0xF06CA4
+
+#define mmTPC4_CFG_QM_SRF_6 0xF06CA8
+
+#define mmTPC4_CFG_QM_SRF_7 0xF06CAC
+
+#define mmTPC4_CFG_QM_SRF_8 0xF06CB0
+
+#define mmTPC4_CFG_QM_SRF_9 0xF06CB4
+
+#define mmTPC4_CFG_QM_SRF_10 0xF06CB8
+
+#define mmTPC4_CFG_QM_SRF_11 0xF06CBC
+
+#define mmTPC4_CFG_QM_SRF_12 0xF06CC0
+
+#define mmTPC4_CFG_QM_SRF_13 0xF06CC4
+
+#define mmTPC4_CFG_QM_SRF_14 0xF06CC8
+
+#define mmTPC4_CFG_QM_SRF_15 0xF06CCC
+
+#define mmTPC4_CFG_QM_SRF_16 0xF06CD0
+
+#define mmTPC4_CFG_QM_SRF_17 0xF06CD4
+
+#define mmTPC4_CFG_QM_SRF_18 0xF06CD8
+
+#define mmTPC4_CFG_QM_SRF_19 0xF06CDC
+
+#define mmTPC4_CFG_QM_SRF_20 0xF06CE0
+
+#define mmTPC4_CFG_QM_SRF_21 0xF06CE4
+
+#define mmTPC4_CFG_QM_SRF_22 0xF06CE8
+
+#define mmTPC4_CFG_QM_SRF_23 0xF06CEC
+
+#define mmTPC4_CFG_QM_SRF_24 0xF06CF0
+
+#define mmTPC4_CFG_QM_SRF_25 0xF06CF4
+
+#define mmTPC4_CFG_QM_SRF_26 0xF06CF8
+
+#define mmTPC4_CFG_QM_SRF_27 0xF06CFC
+
+#define mmTPC4_CFG_QM_SRF_28 0xF06D00
+
+#define mmTPC4_CFG_QM_SRF_29 0xF06D04
+
+#define mmTPC4_CFG_QM_SRF_30 0xF06D08
+
+#define mmTPC4_CFG_QM_SRF_31 0xF06D0C
+
+#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06D10
+
+#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D14
+
+#define mmTPC4_CFG_ARUSER 0xF06D18
+
+#define mmTPC4_CFG_AWUSER 0xF06D1C
+
+#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF06E00
+
+#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF06E04
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF06E08
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF06E0C
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF06E10
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF06E14
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF06E18
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF06E1C
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF06E20
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF06E24
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF06E28
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C
+
+#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
new file mode 100644
index 000000000000..565b42885b0d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_CMDQ_REGS_H_
+#define ASIC_REG_TPC4_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC4_CMDQ_GLBL_CFG0 0xF09000
+
+#define mmTPC4_CMDQ_GLBL_CFG1 0xF09004
+
+#define mmTPC4_CMDQ_GLBL_PROT 0xF09008
+
+#define mmTPC4_CMDQ_GLBL_ERR_CFG 0xF0900C
+
+#define mmTPC4_CMDQ_GLBL_ERR_ADDR_LO 0xF09010
+
+#define mmTPC4_CMDQ_GLBL_ERR_ADDR_HI 0xF09014
+
+#define mmTPC4_CMDQ_GLBL_ERR_WDATA 0xF09018
+
+#define mmTPC4_CMDQ_GLBL_SECURE_PROPS 0xF0901C
+
+#define mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS 0xF09020
+
+#define mmTPC4_CMDQ_GLBL_STS0 0xF09024
+
+#define mmTPC4_CMDQ_GLBL_STS1 0xF09028
+
+#define mmTPC4_CMDQ_CQ_CFG0 0xF090B0
+
+#define mmTPC4_CMDQ_CQ_CFG1 0xF090B4
+
+#define mmTPC4_CMDQ_CQ_ARUSER 0xF090B8
+
+#define mmTPC4_CMDQ_CQ_PTR_LO 0xF090C0
+
+#define mmTPC4_CMDQ_CQ_PTR_HI 0xF090C4
+
+#define mmTPC4_CMDQ_CQ_TSIZE 0xF090C8
+
+#define mmTPC4_CMDQ_CQ_CTL 0xF090CC
+
+#define mmTPC4_CMDQ_CQ_PTR_LO_STS 0xF090D4
+
+#define mmTPC4_CMDQ_CQ_PTR_HI_STS 0xF090D8
+
+#define mmTPC4_CMDQ_CQ_TSIZE_STS 0xF090DC
+
+#define mmTPC4_CMDQ_CQ_CTL_STS 0xF090E0
+
+#define mmTPC4_CMDQ_CQ_STS0 0xF090E4
+
+#define mmTPC4_CMDQ_CQ_STS1 0xF090E8
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN 0xF090F0
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF090F4
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT 0xF090F8
+
+#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF090FC
+
+#define mmTPC4_CMDQ_CQ_IFIFO_CNT 0xF09108
+
+#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF09120
+
+#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF09124
+
+#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF09128
+
+#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF0912C
+
+#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF09130
+
+#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF09134
+
+#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF09138
+
+#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF0913C
+
+#define mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF09140
+
+#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF09144
+
+#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF09148
+
+#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF0914C
+
+#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF09150
+
+#define mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF09154
+
+#define mmTPC4_CMDQ_CP_FENCE0_RDATA 0xF09158
+
+#define mmTPC4_CMDQ_CP_FENCE1_RDATA 0xF0915C
+
+#define mmTPC4_CMDQ_CP_FENCE2_RDATA 0xF09160
+
+#define mmTPC4_CMDQ_CP_FENCE3_RDATA 0xF09164
+
+#define mmTPC4_CMDQ_CP_FENCE0_CNT 0xF09168
+
+#define mmTPC4_CMDQ_CP_FENCE1_CNT 0xF0916C
+
+#define mmTPC4_CMDQ_CP_FENCE2_CNT 0xF09170
+
+#define mmTPC4_CMDQ_CP_FENCE3_CNT 0xF09174
+
+#define mmTPC4_CMDQ_CP_STS 0xF09178
+
+#define mmTPC4_CMDQ_CP_CURRENT_INST_LO 0xF0917C
+
+#define mmTPC4_CMDQ_CP_CURRENT_INST_HI 0xF09180
+
+#define mmTPC4_CMDQ_CP_BARRIER_CFG 0xF09184
+
+#define mmTPC4_CMDQ_CP_DBG_0 0xF09188
+
+#define mmTPC4_CMDQ_CQ_BUF_ADDR 0xF09308
+
+#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C
+
+#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
new file mode 100644
index 000000000000..196da3f12710
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_QM_REGS_H_
+#define ASIC_REG_TPC4_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC4_QM_GLBL_CFG0 0xF08000
+
+#define mmTPC4_QM_GLBL_CFG1 0xF08004
+
+#define mmTPC4_QM_GLBL_PROT 0xF08008
+
+#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08010
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08014
+
+#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08018
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS 0xF0801C
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS 0xF08020
+
+#define mmTPC4_QM_GLBL_STS0 0xF08024
+
+#define mmTPC4_QM_GLBL_STS1 0xF08028
+
+#define mmTPC4_QM_PQ_BASE_LO 0xF08060
+
+#define mmTPC4_QM_PQ_BASE_HI 0xF08064
+
+#define mmTPC4_QM_PQ_SIZE 0xF08068
+
+#define mmTPC4_QM_PQ_PI 0xF0806C
+
+#define mmTPC4_QM_PQ_CI 0xF08070
+
+#define mmTPC4_QM_PQ_CFG0 0xF08074
+
+#define mmTPC4_QM_PQ_CFG1 0xF08078
+
+#define mmTPC4_QM_PQ_ARUSER 0xF0807C
+
+#define mmTPC4_QM_PQ_PUSH0 0xF08080
+
+#define mmTPC4_QM_PQ_PUSH1 0xF08084
+
+#define mmTPC4_QM_PQ_PUSH2 0xF08088
+
+#define mmTPC4_QM_PQ_PUSH3 0xF0808C
+
+#define mmTPC4_QM_PQ_STS0 0xF08090
+
+#define mmTPC4_QM_PQ_STS1 0xF08094
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_EN 0xF080A0
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF080A4
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_SAT 0xF080A8
+
+#define mmTPC4_QM_PQ_RD_RATE_LIM_TOUT 0xF080AC
+
+#define mmTPC4_QM_CQ_CFG0 0xF080B0
+
+#define mmTPC4_QM_CQ_CFG1 0xF080B4
+
+#define mmTPC4_QM_CQ_ARUSER 0xF080B8
+
+#define mmTPC4_QM_CQ_PTR_LO 0xF080C0
+
+#define mmTPC4_QM_CQ_PTR_HI 0xF080C4
+
+#define mmTPC4_QM_CQ_TSIZE 0xF080C8
+
+#define mmTPC4_QM_CQ_CTL 0xF080CC
+
+#define mmTPC4_QM_CQ_PTR_LO_STS 0xF080D4
+
+#define mmTPC4_QM_CQ_PTR_HI_STS 0xF080D8
+
+#define mmTPC4_QM_CQ_TSIZE_STS 0xF080DC
+
+#define mmTPC4_QM_CQ_CTL_STS 0xF080E0
+
+#define mmTPC4_QM_CQ_STS0 0xF080E4
+
+#define mmTPC4_QM_CQ_STS1 0xF080E8
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_EN 0xF080F0
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF080F4
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_SAT 0xF080F8
+
+#define mmTPC4_QM_CQ_RD_RATE_LIM_TOUT 0xF080FC
+
+#define mmTPC4_QM_CQ_IFIFO_CNT 0xF08108
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO 0xF08120
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI 0xF08124
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO 0xF08128
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI 0xF0812C
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO 0xF08130
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI 0xF08134
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO 0xF08138
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI 0xF0813C
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET 0xF08140
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF08144
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF08148
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF0814C
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF08150
+
+#define mmTPC4_QM_CP_LDMA_COMMIT_OFFSET 0xF08154
+
+#define mmTPC4_QM_CP_FENCE0_RDATA 0xF08158
+
+#define mmTPC4_QM_CP_FENCE1_RDATA 0xF0815C
+
+#define mmTPC4_QM_CP_FENCE2_RDATA 0xF08160
+
+#define mmTPC4_QM_CP_FENCE3_RDATA 0xF08164
+
+#define mmTPC4_QM_CP_FENCE0_CNT 0xF08168
+
+#define mmTPC4_QM_CP_FENCE1_CNT 0xF0816C
+
+#define mmTPC4_QM_CP_FENCE2_CNT 0xF08170
+
+#define mmTPC4_QM_CP_FENCE3_CNT 0xF08174
+
+#define mmTPC4_QM_CP_STS 0xF08178
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO 0xF0817C
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI 0xF08180
+
+#define mmTPC4_QM_CP_BARRIER_CFG 0xF08184
+
+#define mmTPC4_QM_CP_DBG_0 0xF08188
+
+#define mmTPC4_QM_PQ_BUF_ADDR 0xF08300
+
+#define mmTPC4_QM_PQ_BUF_RDATA 0xF08304
+
+#define mmTPC4_QM_CQ_BUF_ADDR 0xF08308
+
+#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C
+
+#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
new file mode 100644
index 000000000000..8b54041d144a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_RTR_REGS_H_
+#define ASIC_REG_TPC4_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC4_RTR_HBW_RD_RQ_E_ARB 0xF00100
+
+#define mmTPC4_RTR_HBW_RD_RQ_W_ARB 0xF00104
+
+#define mmTPC4_RTR_HBW_RD_RQ_N_ARB 0xF00108
+
+#define mmTPC4_RTR_HBW_RD_RQ_S_ARB 0xF0010C
+
+#define mmTPC4_RTR_HBW_RD_RQ_L_ARB 0xF00110
+
+#define mmTPC4_RTR_HBW_E_ARB_MAX 0xF00120
+
+#define mmTPC4_RTR_HBW_W_ARB_MAX 0xF00124
+
+#define mmTPC4_RTR_HBW_N_ARB_MAX 0xF00128
+
+#define mmTPC4_RTR_HBW_S_ARB_MAX 0xF0012C
+
+#define mmTPC4_RTR_HBW_L_ARB_MAX 0xF00130
+
+#define mmTPC4_RTR_HBW_RD_RS_E_ARB 0xF00140
+
+#define mmTPC4_RTR_HBW_RD_RS_W_ARB 0xF00144
+
+#define mmTPC4_RTR_HBW_RD_RS_N_ARB 0xF00148
+
+#define mmTPC4_RTR_HBW_RD_RS_S_ARB 0xF0014C
+
+#define mmTPC4_RTR_HBW_RD_RS_L_ARB 0xF00150
+
+#define mmTPC4_RTR_HBW_WR_RQ_E_ARB 0xF00170
+
+#define mmTPC4_RTR_HBW_WR_RQ_W_ARB 0xF00174
+
+#define mmTPC4_RTR_HBW_WR_RQ_N_ARB 0xF00178
+
+#define mmTPC4_RTR_HBW_WR_RQ_S_ARB 0xF0017C
+
+#define mmTPC4_RTR_HBW_WR_RQ_L_ARB 0xF00180
+
+#define mmTPC4_RTR_HBW_WR_RS_E_ARB 0xF00190
+
+#define mmTPC4_RTR_HBW_WR_RS_W_ARB 0xF00194
+
+#define mmTPC4_RTR_HBW_WR_RS_N_ARB 0xF00198
+
+#define mmTPC4_RTR_HBW_WR_RS_S_ARB 0xF0019C
+
+#define mmTPC4_RTR_HBW_WR_RS_L_ARB 0xF001A0
+
+#define mmTPC4_RTR_LBW_RD_RQ_E_ARB 0xF00200
+
+#define mmTPC4_RTR_LBW_RD_RQ_W_ARB 0xF00204
+
+#define mmTPC4_RTR_LBW_RD_RQ_N_ARB 0xF00208
+
+#define mmTPC4_RTR_LBW_RD_RQ_S_ARB 0xF0020C
+
+#define mmTPC4_RTR_LBW_RD_RQ_L_ARB 0xF00210
+
+#define mmTPC4_RTR_LBW_E_ARB_MAX 0xF00220
+
+#define mmTPC4_RTR_LBW_W_ARB_MAX 0xF00224
+
+#define mmTPC4_RTR_LBW_N_ARB_MAX 0xF00228
+
+#define mmTPC4_RTR_LBW_S_ARB_MAX 0xF0022C
+
+#define mmTPC4_RTR_LBW_L_ARB_MAX 0xF00230
+
+#define mmTPC4_RTR_LBW_RD_RS_E_ARB 0xF00250
+
+#define mmTPC4_RTR_LBW_RD_RS_W_ARB 0xF00254
+
+#define mmTPC4_RTR_LBW_RD_RS_N_ARB 0xF00258
+
+#define mmTPC4_RTR_LBW_RD_RS_S_ARB 0xF0025C
+
+#define mmTPC4_RTR_LBW_RD_RS_L_ARB 0xF00260
+
+#define mmTPC4_RTR_LBW_WR_RQ_E_ARB 0xF00270
+
+#define mmTPC4_RTR_LBW_WR_RQ_W_ARB 0xF00274
+
+#define mmTPC4_RTR_LBW_WR_RQ_N_ARB 0xF00278
+
+#define mmTPC4_RTR_LBW_WR_RQ_S_ARB 0xF0027C
+
+#define mmTPC4_RTR_LBW_WR_RQ_L_ARB 0xF00280
+
+#define mmTPC4_RTR_LBW_WR_RS_E_ARB 0xF00290
+
+#define mmTPC4_RTR_LBW_WR_RS_W_ARB 0xF00294
+
+#define mmTPC4_RTR_LBW_WR_RS_N_ARB 0xF00298
+
+#define mmTPC4_RTR_LBW_WR_RS_S_ARB 0xF0029C
+
+#define mmTPC4_RTR_LBW_WR_RS_L_ARB 0xF002A0
+
+#define mmTPC4_RTR_DBG_E_ARB 0xF00300
+
+#define mmTPC4_RTR_DBG_W_ARB 0xF00304
+
+#define mmTPC4_RTR_DBG_N_ARB 0xF00308
+
+#define mmTPC4_RTR_DBG_S_ARB 0xF0030C
+
+#define mmTPC4_RTR_DBG_L_ARB 0xF00310
+
+#define mmTPC4_RTR_DBG_E_ARB_MAX 0xF00320
+
+#define mmTPC4_RTR_DBG_W_ARB_MAX 0xF00324
+
+#define mmTPC4_RTR_DBG_N_ARB_MAX 0xF00328
+
+#define mmTPC4_RTR_DBG_S_ARB_MAX 0xF0032C
+
+#define mmTPC4_RTR_DBG_L_ARB_MAX 0xF00330
+
+#define mmTPC4_RTR_SPLIT_COEF_0 0xF00400
+
+#define mmTPC4_RTR_SPLIT_COEF_1 0xF00404
+
+#define mmTPC4_RTR_SPLIT_COEF_2 0xF00408
+
+#define mmTPC4_RTR_SPLIT_COEF_3 0xF0040C
+
+#define mmTPC4_RTR_SPLIT_COEF_4 0xF00410
+
+#define mmTPC4_RTR_SPLIT_COEF_5 0xF00414
+
+#define mmTPC4_RTR_SPLIT_COEF_6 0xF00418
+
+#define mmTPC4_RTR_SPLIT_COEF_7 0xF0041C
+
+#define mmTPC4_RTR_SPLIT_COEF_8 0xF00420
+
+#define mmTPC4_RTR_SPLIT_COEF_9 0xF00424
+
+#define mmTPC4_RTR_SPLIT_CFG 0xF00440
+
+#define mmTPC4_RTR_SPLIT_RD_SAT 0xF00444
+
+#define mmTPC4_RTR_SPLIT_RD_RST_TOKEN 0xF00448
+
+#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_0 0xF0044C
+
+#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_1 0xF00450
+
+#define mmTPC4_RTR_SPLIT_WR_SAT 0xF00454
+
+#define mmTPC4_RTR_WPLIT_WR_TST_TOLEN 0xF00458
+
+#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_0 0xF0045C
+
+#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_1 0xF00460
+
+#define mmTPC4_RTR_HBW_RANGE_HIT 0xF00470
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_0 0xF00480
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_1 0xF00484
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_2 0xF00488
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_3 0xF0048C
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_4 0xF00490
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_5 0xF00494
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_6 0xF00498
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_L_7 0xF0049C
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_0 0xF004A0
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_1 0xF004A4
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_2 0xF004A8
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_3 0xF004AC
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_4 0xF004B0
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_5 0xF004B4
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_6 0xF004B8
+
+#define mmTPC4_RTR_HBW_RANGE_MASK_H_7 0xF004BC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_0 0xF004C0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_1 0xF004C4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_2 0xF004C8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_3 0xF004CC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_4 0xF004D0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_5 0xF004D4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_6 0xF004D8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_L_7 0xF004DC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_0 0xF004E0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_1 0xF004E4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_2 0xF004E8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_3 0xF004EC
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_4 0xF004F0
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_5 0xF004F4
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_6 0xF004F8
+
+#define mmTPC4_RTR_HBW_RANGE_BASE_H_7 0xF004FC
+
+#define mmTPC4_RTR_LBW_RANGE_HIT 0xF00500
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_0 0xF00510
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_1 0xF00514
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_2 0xF00518
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_3 0xF0051C
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_4 0xF00520
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_5 0xF00524
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_6 0xF00528
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_7 0xF0052C
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_8 0xF00530
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_9 0xF00534
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_10 0xF00538
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_11 0xF0053C
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_12 0xF00540
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_13 0xF00544
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_14 0xF00548
+
+#define mmTPC4_RTR_LBW_RANGE_MASK_15 0xF0054C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_0 0xF00550
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_1 0xF00554
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_2 0xF00558
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_3 0xF0055C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_4 0xF00560
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_5 0xF00564
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_6 0xF00568
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_7 0xF0056C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_8 0xF00570
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_9 0xF00574
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_10 0xF00578
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_11 0xF0057C
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_12 0xF00580
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_13 0xF00584
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_14 0xF00588
+
+#define mmTPC4_RTR_LBW_RANGE_BASE_15 0xF0058C
+
+#define mmTPC4_RTR_RGLTR 0xF00590
+
+#define mmTPC4_RTR_RGLTR_WR_RESULT 0xF00594
+
+#define mmTPC4_RTR_RGLTR_RD_RESULT 0xF00598
+
+#define mmTPC4_RTR_SCRAMB_EN 0xF00600
+
+#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604
+
+#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
new file mode 100644
index 000000000000..3f00954fcdba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_CFG_REGS_H_
+#define ASIC_REG_TPC5_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF46418
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF4641C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF46420
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF46424
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46428
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF4642C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF46430
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46434
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF46438
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF4643C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46440
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46444
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF46448
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF4644C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF46450
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46454
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46458
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF4645C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF46460
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF46464
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46468
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF4646C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF46470
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46474
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF46478
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF4647C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46480
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46484
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF46488
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF4648C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF46490
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF46494
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46498
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF4649C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF464A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF464A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF464A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF464AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF464B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF464B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF464B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF464BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF464C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF464C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF464C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF464CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF464D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF464D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF464E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF464FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF46500
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF46504
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF46508
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF4650C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF46510
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF46514
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF46518
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF4651C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF46520
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF46524
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF46528
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF4652C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF46530
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF46534
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF46538
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF4653C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF46540
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF46544
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF46548
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF4654C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF46550
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF46554
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46558
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF4655C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF46560
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46564
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF46568
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF4656C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46570
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46574
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF46578
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF4657C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF46580
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46584
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46588
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF4658C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF46590
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF46594
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46598
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF4659C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF465A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF465A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF465A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF465AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF465B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF465B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF465B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF465BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF465C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF465C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF465C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF465CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF465D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF465D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF465D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF465DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF465E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF465E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF465E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF465EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF465F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF465F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF465F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF465FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF46600
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF46604
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46608
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF4660C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF46610
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46614
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF46618
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF4661C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46620
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46624
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF46628
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF4662C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF46630
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF46634
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF46638
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF4663C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF46640
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF46644
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF46648
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF4664C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF46650
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF46654
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF46658
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF4665C
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46660
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF46664
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46668
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF4666C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46670
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF46674
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF46678
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF4667C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF46680
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF46684
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF46688
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF4668C
+
+#define mmTPC5_CFG_KERNEL_SRF_0 0xF46690
+
+#define mmTPC5_CFG_KERNEL_SRF_1 0xF46694
+
+#define mmTPC5_CFG_KERNEL_SRF_2 0xF46698
+
+#define mmTPC5_CFG_KERNEL_SRF_3 0xF4669C
+
+#define mmTPC5_CFG_KERNEL_SRF_4 0xF466A0
+
+#define mmTPC5_CFG_KERNEL_SRF_5 0xF466A4
+
+#define mmTPC5_CFG_KERNEL_SRF_6 0xF466A8
+
+#define mmTPC5_CFG_KERNEL_SRF_7 0xF466AC
+
+#define mmTPC5_CFG_KERNEL_SRF_8 0xF466B0
+
+#define mmTPC5_CFG_KERNEL_SRF_9 0xF466B4
+
+#define mmTPC5_CFG_KERNEL_SRF_10 0xF466B8
+
+#define mmTPC5_CFG_KERNEL_SRF_11 0xF466BC
+
+#define mmTPC5_CFG_KERNEL_SRF_12 0xF466C0
+
+#define mmTPC5_CFG_KERNEL_SRF_13 0xF466C4
+
+#define mmTPC5_CFG_KERNEL_SRF_14 0xF466C8
+
+#define mmTPC5_CFG_KERNEL_SRF_15 0xF466CC
+
+#define mmTPC5_CFG_KERNEL_SRF_16 0xF466D0
+
+#define mmTPC5_CFG_KERNEL_SRF_17 0xF466D4
+
+#define mmTPC5_CFG_KERNEL_SRF_18 0xF466D8
+
+#define mmTPC5_CFG_KERNEL_SRF_19 0xF466DC
+
+#define mmTPC5_CFG_KERNEL_SRF_20 0xF466E0
+
+#define mmTPC5_CFG_KERNEL_SRF_21 0xF466E4
+
+#define mmTPC5_CFG_KERNEL_SRF_22 0xF466E8
+
+#define mmTPC5_CFG_KERNEL_SRF_23 0xF466EC
+
+#define mmTPC5_CFG_KERNEL_SRF_24 0xF466F0
+
+#define mmTPC5_CFG_KERNEL_SRF_25 0xF466F4
+
+#define mmTPC5_CFG_KERNEL_SRF_26 0xF466F8
+
+#define mmTPC5_CFG_KERNEL_SRF_27 0xF466FC
+
+#define mmTPC5_CFG_KERNEL_SRF_28 0xF46700
+
+#define mmTPC5_CFG_KERNEL_SRF_29 0xF46704
+
+#define mmTPC5_CFG_KERNEL_SRF_30 0xF46708
+
+#define mmTPC5_CFG_KERNEL_SRF_31 0xF4670C
+
+#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF46710
+
+#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46714
+
+#define mmTPC5_CFG_RESERVED_DESC_END 0xF46738
+
+#define mmTPC5_CFG_ROUND_CSR 0xF467FC
+
+#define mmTPC5_CFG_TBUF_BASE_ADDR_LOW 0xF46800
+
+#define mmTPC5_CFG_TBUF_BASE_ADDR_HIGH 0xF46804
+
+#define mmTPC5_CFG_SEMAPHORE 0xF46808
+
+#define mmTPC5_CFG_VFLAGS 0xF4680C
+
+#define mmTPC5_CFG_SFLAGS 0xF46810
+
+#define mmTPC5_CFG_LFSR_POLYNOM 0xF46818
+
+#define mmTPC5_CFG_STATUS 0xF4681C
+
+#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46820
+
+#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46824
+
+#define mmTPC5_CFG_SM_BASE_ADDRESS_LOW 0xF46828
+
+#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4682C
+
+#define mmTPC5_CFG_TPC_CMD 0xF46830
+
+#define mmTPC5_CFG_TPC_EXECUTE 0xF46838
+
+#define mmTPC5_CFG_TPC_STALL 0xF4683C
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46840
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46844
+
+#define mmTPC5_CFG_MSS_CONFIG 0xF46854
+
+#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46858
+
+#define mmTPC5_CFG_TPC_INTR_MASK 0xF4685C
+
+#define mmTPC5_CFG_TSB_CONFIG 0xF46860
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04
+
+#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08
+
+#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF46A18
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A1C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A20
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF46A24
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A28
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A2C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF46A30
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A34
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A38
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF46A3C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A40
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A44
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF46A48
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A4C
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A50
+
+#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A54
+
+#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A58
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A5C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A60
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF46A64
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A68
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A6C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF46A70
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A74
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A78
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF46A7C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A80
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A84
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF46A88
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A8C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A90
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF46A94
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A98
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A9C
+
+#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46AA0
+
+#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46AA4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46AA8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46AAC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF46AB0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46AB4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46AB8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF46ABC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46AC0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46AC4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF46AC8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46ACC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46AD0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF46AD4
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AD8
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46ADC
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF46AE0
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AE4
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AE8
+
+#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AEC
+
+#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AF0
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AF4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46AF8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF46AFC
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46B00
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46B04
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF46B08
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46B0C
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46B10
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF46B14
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46B18
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46B1C
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF46B20
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46B24
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46B28
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF46B2C
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46B30
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46B34
+
+#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46B38
+
+#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46B3C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46B40
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46B44
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF46B48
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46B4C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46B50
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF46B54
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B58
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B5C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF46B60
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B64
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B68
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF46B6C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B70
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B74
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF46B78
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B7C
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B80
+
+#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B84
+
+#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B88
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B8C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B90
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF46B94
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B98
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B9C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF46BA0
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46BA4
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46BA8
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF46BAC
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46BB0
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46BB4
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF46BB8
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46BBC
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46BC0
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF46BC4
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46BC8
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46BCC
+
+#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46BD0
+
+#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46BD4
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46BD8
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46BDC
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF46BE0
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46BE4
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46BE8
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF46BEC
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46BF0
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46BF4
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF46BF8
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46BFC
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46C00
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF46C04
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46C08
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46C0C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF46C10
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46C14
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46C18
+
+#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46C1C
+
+#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46C20
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46C24
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46C28
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF46C2C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46C30
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46C34
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF46C38
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46C3C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46C40
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF46C44
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46C48
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46C4C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF46C50
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46C54
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46C58
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF46C5C
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46C60
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46C64
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46C68
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46C6C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46C70
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46C74
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46C78
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46C7C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46C80
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46C84
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46C88
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46C8C
+
+#define mmTPC5_CFG_QM_SRF_0 0xF46C90
+
+#define mmTPC5_CFG_QM_SRF_1 0xF46C94
+
+#define mmTPC5_CFG_QM_SRF_2 0xF46C98
+
+#define mmTPC5_CFG_QM_SRF_3 0xF46C9C
+
+#define mmTPC5_CFG_QM_SRF_4 0xF46CA0
+
+#define mmTPC5_CFG_QM_SRF_5 0xF46CA4
+
+#define mmTPC5_CFG_QM_SRF_6 0xF46CA8
+
+#define mmTPC5_CFG_QM_SRF_7 0xF46CAC
+
+#define mmTPC5_CFG_QM_SRF_8 0xF46CB0
+
+#define mmTPC5_CFG_QM_SRF_9 0xF46CB4
+
+#define mmTPC5_CFG_QM_SRF_10 0xF46CB8
+
+#define mmTPC5_CFG_QM_SRF_11 0xF46CBC
+
+#define mmTPC5_CFG_QM_SRF_12 0xF46CC0
+
+#define mmTPC5_CFG_QM_SRF_13 0xF46CC4
+
+#define mmTPC5_CFG_QM_SRF_14 0xF46CC8
+
+#define mmTPC5_CFG_QM_SRF_15 0xF46CCC
+
+#define mmTPC5_CFG_QM_SRF_16 0xF46CD0
+
+#define mmTPC5_CFG_QM_SRF_17 0xF46CD4
+
+#define mmTPC5_CFG_QM_SRF_18 0xF46CD8
+
+#define mmTPC5_CFG_QM_SRF_19 0xF46CDC
+
+#define mmTPC5_CFG_QM_SRF_20 0xF46CE0
+
+#define mmTPC5_CFG_QM_SRF_21 0xF46CE4
+
+#define mmTPC5_CFG_QM_SRF_22 0xF46CE8
+
+#define mmTPC5_CFG_QM_SRF_23 0xF46CEC
+
+#define mmTPC5_CFG_QM_SRF_24 0xF46CF0
+
+#define mmTPC5_CFG_QM_SRF_25 0xF46CF4
+
+#define mmTPC5_CFG_QM_SRF_26 0xF46CF8
+
+#define mmTPC5_CFG_QM_SRF_27 0xF46CFC
+
+#define mmTPC5_CFG_QM_SRF_28 0xF46D00
+
+#define mmTPC5_CFG_QM_SRF_29 0xF46D04
+
+#define mmTPC5_CFG_QM_SRF_30 0xF46D08
+
+#define mmTPC5_CFG_QM_SRF_31 0xF46D0C
+
+#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46D10
+
+#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D14
+
+#define mmTPC5_CFG_ARUSER 0xF46D18
+
+#define mmTPC5_CFG_AWUSER 0xF46D1C
+
+#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF46E00
+
+#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF46E04
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF46E08
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF46E0C
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF46E10
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF46E14
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF46E18
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF46E1C
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF46E20
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF46E24
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF46E28
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C
+
+#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
new file mode 100644
index 000000000000..d8e72a8e18d7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_CMDQ_REGS_H_
+#define ASIC_REG_TPC5_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC5_CMDQ_GLBL_CFG0 0xF49000
+
+#define mmTPC5_CMDQ_GLBL_CFG1 0xF49004
+
+#define mmTPC5_CMDQ_GLBL_PROT 0xF49008
+
+#define mmTPC5_CMDQ_GLBL_ERR_CFG 0xF4900C
+
+#define mmTPC5_CMDQ_GLBL_ERR_ADDR_LO 0xF49010
+
+#define mmTPC5_CMDQ_GLBL_ERR_ADDR_HI 0xF49014
+
+#define mmTPC5_CMDQ_GLBL_ERR_WDATA 0xF49018
+
+#define mmTPC5_CMDQ_GLBL_SECURE_PROPS 0xF4901C
+
+#define mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS 0xF49020
+
+#define mmTPC5_CMDQ_GLBL_STS0 0xF49024
+
+#define mmTPC5_CMDQ_GLBL_STS1 0xF49028
+
+#define mmTPC5_CMDQ_CQ_CFG0 0xF490B0
+
+#define mmTPC5_CMDQ_CQ_CFG1 0xF490B4
+
+#define mmTPC5_CMDQ_CQ_ARUSER 0xF490B8
+
+#define mmTPC5_CMDQ_CQ_PTR_LO 0xF490C0
+
+#define mmTPC5_CMDQ_CQ_PTR_HI 0xF490C4
+
+#define mmTPC5_CMDQ_CQ_TSIZE 0xF490C8
+
+#define mmTPC5_CMDQ_CQ_CTL 0xF490CC
+
+#define mmTPC5_CMDQ_CQ_PTR_LO_STS 0xF490D4
+
+#define mmTPC5_CMDQ_CQ_PTR_HI_STS 0xF490D8
+
+#define mmTPC5_CMDQ_CQ_TSIZE_STS 0xF490DC
+
+#define mmTPC5_CMDQ_CQ_CTL_STS 0xF490E0
+
+#define mmTPC5_CMDQ_CQ_STS0 0xF490E4
+
+#define mmTPC5_CMDQ_CQ_STS1 0xF490E8
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN 0xF490F0
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF490F4
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT 0xF490F8
+
+#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF490FC
+
+#define mmTPC5_CMDQ_CQ_IFIFO_CNT 0xF49108
+
+#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF49120
+
+#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF49124
+
+#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF49128
+
+#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF4912C
+
+#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF49130
+
+#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF49134
+
+#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF49138
+
+#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF4913C
+
+#define mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF49140
+
+#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF49144
+
+#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF49148
+
+#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF4914C
+
+#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF49150
+
+#define mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF49154
+
+#define mmTPC5_CMDQ_CP_FENCE0_RDATA 0xF49158
+
+#define mmTPC5_CMDQ_CP_FENCE1_RDATA 0xF4915C
+
+#define mmTPC5_CMDQ_CP_FENCE2_RDATA 0xF49160
+
+#define mmTPC5_CMDQ_CP_FENCE3_RDATA 0xF49164
+
+#define mmTPC5_CMDQ_CP_FENCE0_CNT 0xF49168
+
+#define mmTPC5_CMDQ_CP_FENCE1_CNT 0xF4916C
+
+#define mmTPC5_CMDQ_CP_FENCE2_CNT 0xF49170
+
+#define mmTPC5_CMDQ_CP_FENCE3_CNT 0xF49174
+
+#define mmTPC5_CMDQ_CP_STS 0xF49178
+
+#define mmTPC5_CMDQ_CP_CURRENT_INST_LO 0xF4917C
+
+#define mmTPC5_CMDQ_CP_CURRENT_INST_HI 0xF49180
+
+#define mmTPC5_CMDQ_CP_BARRIER_CFG 0xF49184
+
+#define mmTPC5_CMDQ_CP_DBG_0 0xF49188
+
+#define mmTPC5_CMDQ_CQ_BUF_ADDR 0xF49308
+
+#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C
+
+#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
new file mode 100644
index 000000000000..be2e68624709
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_QM_REGS_H_
+#define ASIC_REG_TPC5_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC5_QM_GLBL_CFG0 0xF48000
+
+#define mmTPC5_QM_GLBL_CFG1 0xF48004
+
+#define mmTPC5_QM_GLBL_PROT 0xF48008
+
+#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48010
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48014
+
+#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48018
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS 0xF4801C
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS 0xF48020
+
+#define mmTPC5_QM_GLBL_STS0 0xF48024
+
+#define mmTPC5_QM_GLBL_STS1 0xF48028
+
+#define mmTPC5_QM_PQ_BASE_LO 0xF48060
+
+#define mmTPC5_QM_PQ_BASE_HI 0xF48064
+
+#define mmTPC5_QM_PQ_SIZE 0xF48068
+
+#define mmTPC5_QM_PQ_PI 0xF4806C
+
+#define mmTPC5_QM_PQ_CI 0xF48070
+
+#define mmTPC5_QM_PQ_CFG0 0xF48074
+
+#define mmTPC5_QM_PQ_CFG1 0xF48078
+
+#define mmTPC5_QM_PQ_ARUSER 0xF4807C
+
+#define mmTPC5_QM_PQ_PUSH0 0xF48080
+
+#define mmTPC5_QM_PQ_PUSH1 0xF48084
+
+#define mmTPC5_QM_PQ_PUSH2 0xF48088
+
+#define mmTPC5_QM_PQ_PUSH3 0xF4808C
+
+#define mmTPC5_QM_PQ_STS0 0xF48090
+
+#define mmTPC5_QM_PQ_STS1 0xF48094
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_EN 0xF480A0
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF480A4
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_SAT 0xF480A8
+
+#define mmTPC5_QM_PQ_RD_RATE_LIM_TOUT 0xF480AC
+
+#define mmTPC5_QM_CQ_CFG0 0xF480B0
+
+#define mmTPC5_QM_CQ_CFG1 0xF480B4
+
+#define mmTPC5_QM_CQ_ARUSER 0xF480B8
+
+#define mmTPC5_QM_CQ_PTR_LO 0xF480C0
+
+#define mmTPC5_QM_CQ_PTR_HI 0xF480C4
+
+#define mmTPC5_QM_CQ_TSIZE 0xF480C8
+
+#define mmTPC5_QM_CQ_CTL 0xF480CC
+
+#define mmTPC5_QM_CQ_PTR_LO_STS 0xF480D4
+
+#define mmTPC5_QM_CQ_PTR_HI_STS 0xF480D8
+
+#define mmTPC5_QM_CQ_TSIZE_STS 0xF480DC
+
+#define mmTPC5_QM_CQ_CTL_STS 0xF480E0
+
+#define mmTPC5_QM_CQ_STS0 0xF480E4
+
+#define mmTPC5_QM_CQ_STS1 0xF480E8
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_EN 0xF480F0
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF480F4
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_SAT 0xF480F8
+
+#define mmTPC5_QM_CQ_RD_RATE_LIM_TOUT 0xF480FC
+
+#define mmTPC5_QM_CQ_IFIFO_CNT 0xF48108
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO 0xF48120
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI 0xF48124
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO 0xF48128
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI 0xF4812C
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO 0xF48130
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI 0xF48134
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO 0xF48138
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI 0xF4813C
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET 0xF48140
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF48144
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF48148
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF4814C
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF48150
+
+#define mmTPC5_QM_CP_LDMA_COMMIT_OFFSET 0xF48154
+
+#define mmTPC5_QM_CP_FENCE0_RDATA 0xF48158
+
+#define mmTPC5_QM_CP_FENCE1_RDATA 0xF4815C
+
+#define mmTPC5_QM_CP_FENCE2_RDATA 0xF48160
+
+#define mmTPC5_QM_CP_FENCE3_RDATA 0xF48164
+
+#define mmTPC5_QM_CP_FENCE0_CNT 0xF48168
+
+#define mmTPC5_QM_CP_FENCE1_CNT 0xF4816C
+
+#define mmTPC5_QM_CP_FENCE2_CNT 0xF48170
+
+#define mmTPC5_QM_CP_FENCE3_CNT 0xF48174
+
+#define mmTPC5_QM_CP_STS 0xF48178
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO 0xF4817C
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI 0xF48180
+
+#define mmTPC5_QM_CP_BARRIER_CFG 0xF48184
+
+#define mmTPC5_QM_CP_DBG_0 0xF48188
+
+#define mmTPC5_QM_PQ_BUF_ADDR 0xF48300
+
+#define mmTPC5_QM_PQ_BUF_RDATA 0xF48304
+
+#define mmTPC5_QM_CQ_BUF_ADDR 0xF48308
+
+#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C
+
+#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
new file mode 100644
index 000000000000..6f301c7bbc2f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_RTR_REGS_H_
+#define ASIC_REG_TPC5_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC5_RTR_HBW_RD_RQ_E_ARB 0xF40100
+
+#define mmTPC5_RTR_HBW_RD_RQ_W_ARB 0xF40104
+
+#define mmTPC5_RTR_HBW_RD_RQ_N_ARB 0xF40108
+
+#define mmTPC5_RTR_HBW_RD_RQ_S_ARB 0xF4010C
+
+#define mmTPC5_RTR_HBW_RD_RQ_L_ARB 0xF40110
+
+#define mmTPC5_RTR_HBW_E_ARB_MAX 0xF40120
+
+#define mmTPC5_RTR_HBW_W_ARB_MAX 0xF40124
+
+#define mmTPC5_RTR_HBW_N_ARB_MAX 0xF40128
+
+#define mmTPC5_RTR_HBW_S_ARB_MAX 0xF4012C
+
+#define mmTPC5_RTR_HBW_L_ARB_MAX 0xF40130
+
+#define mmTPC5_RTR_HBW_RD_RS_E_ARB 0xF40140
+
+#define mmTPC5_RTR_HBW_RD_RS_W_ARB 0xF40144
+
+#define mmTPC5_RTR_HBW_RD_RS_N_ARB 0xF40148
+
+#define mmTPC5_RTR_HBW_RD_RS_S_ARB 0xF4014C
+
+#define mmTPC5_RTR_HBW_RD_RS_L_ARB 0xF40150
+
+#define mmTPC5_RTR_HBW_WR_RQ_E_ARB 0xF40170
+
+#define mmTPC5_RTR_HBW_WR_RQ_W_ARB 0xF40174
+
+#define mmTPC5_RTR_HBW_WR_RQ_N_ARB 0xF40178
+
+#define mmTPC5_RTR_HBW_WR_RQ_S_ARB 0xF4017C
+
+#define mmTPC5_RTR_HBW_WR_RQ_L_ARB 0xF40180
+
+#define mmTPC5_RTR_HBW_WR_RS_E_ARB 0xF40190
+
+#define mmTPC5_RTR_HBW_WR_RS_W_ARB 0xF40194
+
+#define mmTPC5_RTR_HBW_WR_RS_N_ARB 0xF40198
+
+#define mmTPC5_RTR_HBW_WR_RS_S_ARB 0xF4019C
+
+#define mmTPC5_RTR_HBW_WR_RS_L_ARB 0xF401A0
+
+#define mmTPC5_RTR_LBW_RD_RQ_E_ARB 0xF40200
+
+#define mmTPC5_RTR_LBW_RD_RQ_W_ARB 0xF40204
+
+#define mmTPC5_RTR_LBW_RD_RQ_N_ARB 0xF40208
+
+#define mmTPC5_RTR_LBW_RD_RQ_S_ARB 0xF4020C
+
+#define mmTPC5_RTR_LBW_RD_RQ_L_ARB 0xF40210
+
+#define mmTPC5_RTR_LBW_E_ARB_MAX 0xF40220
+
+#define mmTPC5_RTR_LBW_W_ARB_MAX 0xF40224
+
+#define mmTPC5_RTR_LBW_N_ARB_MAX 0xF40228
+
+#define mmTPC5_RTR_LBW_S_ARB_MAX 0xF4022C
+
+#define mmTPC5_RTR_LBW_L_ARB_MAX 0xF40230
+
+#define mmTPC5_RTR_LBW_RD_RS_E_ARB 0xF40250
+
+#define mmTPC5_RTR_LBW_RD_RS_W_ARB 0xF40254
+
+#define mmTPC5_RTR_LBW_RD_RS_N_ARB 0xF40258
+
+#define mmTPC5_RTR_LBW_RD_RS_S_ARB 0xF4025C
+
+#define mmTPC5_RTR_LBW_RD_RS_L_ARB 0xF40260
+
+#define mmTPC5_RTR_LBW_WR_RQ_E_ARB 0xF40270
+
+#define mmTPC5_RTR_LBW_WR_RQ_W_ARB 0xF40274
+
+#define mmTPC5_RTR_LBW_WR_RQ_N_ARB 0xF40278
+
+#define mmTPC5_RTR_LBW_WR_RQ_S_ARB 0xF4027C
+
+#define mmTPC5_RTR_LBW_WR_RQ_L_ARB 0xF40280
+
+#define mmTPC5_RTR_LBW_WR_RS_E_ARB 0xF40290
+
+#define mmTPC5_RTR_LBW_WR_RS_W_ARB 0xF40294
+
+#define mmTPC5_RTR_LBW_WR_RS_N_ARB 0xF40298
+
+#define mmTPC5_RTR_LBW_WR_RS_S_ARB 0xF4029C
+
+#define mmTPC5_RTR_LBW_WR_RS_L_ARB 0xF402A0
+
+#define mmTPC5_RTR_DBG_E_ARB 0xF40300
+
+#define mmTPC5_RTR_DBG_W_ARB 0xF40304
+
+#define mmTPC5_RTR_DBG_N_ARB 0xF40308
+
+#define mmTPC5_RTR_DBG_S_ARB 0xF4030C
+
+#define mmTPC5_RTR_DBG_L_ARB 0xF40310
+
+#define mmTPC5_RTR_DBG_E_ARB_MAX 0xF40320
+
+#define mmTPC5_RTR_DBG_W_ARB_MAX 0xF40324
+
+#define mmTPC5_RTR_DBG_N_ARB_MAX 0xF40328
+
+#define mmTPC5_RTR_DBG_S_ARB_MAX 0xF4032C
+
+#define mmTPC5_RTR_DBG_L_ARB_MAX 0xF40330
+
+#define mmTPC5_RTR_SPLIT_COEF_0 0xF40400
+
+#define mmTPC5_RTR_SPLIT_COEF_1 0xF40404
+
+#define mmTPC5_RTR_SPLIT_COEF_2 0xF40408
+
+#define mmTPC5_RTR_SPLIT_COEF_3 0xF4040C
+
+#define mmTPC5_RTR_SPLIT_COEF_4 0xF40410
+
+#define mmTPC5_RTR_SPLIT_COEF_5 0xF40414
+
+#define mmTPC5_RTR_SPLIT_COEF_6 0xF40418
+
+#define mmTPC5_RTR_SPLIT_COEF_7 0xF4041C
+
+#define mmTPC5_RTR_SPLIT_COEF_8 0xF40420
+
+#define mmTPC5_RTR_SPLIT_COEF_9 0xF40424
+
+#define mmTPC5_RTR_SPLIT_CFG 0xF40440
+
+#define mmTPC5_RTR_SPLIT_RD_SAT 0xF40444
+
+#define mmTPC5_RTR_SPLIT_RD_RST_TOKEN 0xF40448
+
+#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_0 0xF4044C
+
+#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_1 0xF40450
+
+#define mmTPC5_RTR_SPLIT_WR_SAT 0xF40454
+
+#define mmTPC5_RTR_WPLIT_WR_TST_TOLEN 0xF40458
+
+#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_0 0xF4045C
+
+#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_1 0xF40460
+
+#define mmTPC5_RTR_HBW_RANGE_HIT 0xF40470
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_0 0xF40480
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_1 0xF40484
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_2 0xF40488
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_3 0xF4048C
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_4 0xF40490
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_5 0xF40494
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_6 0xF40498
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_L_7 0xF4049C
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_0 0xF404A0
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_1 0xF404A4
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_2 0xF404A8
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_3 0xF404AC
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_4 0xF404B0
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_5 0xF404B4
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_6 0xF404B8
+
+#define mmTPC5_RTR_HBW_RANGE_MASK_H_7 0xF404BC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_0 0xF404C0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_1 0xF404C4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_2 0xF404C8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_3 0xF404CC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_4 0xF404D0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_5 0xF404D4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_6 0xF404D8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_L_7 0xF404DC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_0 0xF404E0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_1 0xF404E4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_2 0xF404E8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_3 0xF404EC
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_4 0xF404F0
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_5 0xF404F4
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_6 0xF404F8
+
+#define mmTPC5_RTR_HBW_RANGE_BASE_H_7 0xF404FC
+
+#define mmTPC5_RTR_LBW_RANGE_HIT 0xF40500
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_0 0xF40510
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_1 0xF40514
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_2 0xF40518
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_3 0xF4051C
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_4 0xF40520
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_5 0xF40524
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_6 0xF40528
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_7 0xF4052C
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_8 0xF40530
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_9 0xF40534
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_10 0xF40538
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_11 0xF4053C
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_12 0xF40540
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_13 0xF40544
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_14 0xF40548
+
+#define mmTPC5_RTR_LBW_RANGE_MASK_15 0xF4054C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_0 0xF40550
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_1 0xF40554
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_2 0xF40558
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_3 0xF4055C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_4 0xF40560
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_5 0xF40564
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_6 0xF40568
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_7 0xF4056C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_8 0xF40570
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_9 0xF40574
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_10 0xF40578
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_11 0xF4057C
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_12 0xF40580
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_13 0xF40584
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_14 0xF40588
+
+#define mmTPC5_RTR_LBW_RANGE_BASE_15 0xF4058C
+
+#define mmTPC5_RTR_RGLTR 0xF40590
+
+#define mmTPC5_RTR_RGLTR_WR_RESULT 0xF40594
+
+#define mmTPC5_RTR_RGLTR_RD_RESULT 0xF40598
+
+#define mmTPC5_RTR_SCRAMB_EN 0xF40600
+
+#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604
+
+#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
new file mode 100644
index 000000000000..1e1168601c41
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_CFG_REGS_H_
+#define ASIC_REG_TPC6_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF86418
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF8641C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF86420
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF86424
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86428
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF8642C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF86430
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86434
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF86438
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF8643C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86440
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86444
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF86448
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF8644C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF86450
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86454
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86458
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF8645C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF86460
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF86464
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86468
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF8646C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF86470
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86474
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF86478
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF8647C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86480
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86484
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF86488
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF8648C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF86490
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF86494
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86498
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF8649C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF864A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF864A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF864A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF864AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF864B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF864B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF864B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF864BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF864C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF864C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF864C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF864CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF864D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF864D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF864E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF864FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF86500
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF86504
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF86508
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF8650C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF86510
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF86514
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF86518
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF8651C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF86520
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF86524
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF86528
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF8652C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF86530
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF86534
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF86538
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF8653C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF86540
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF86544
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF86548
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF8654C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF86550
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF86554
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86558
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF8655C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF86560
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86564
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF86568
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF8656C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86570
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86574
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF86578
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF8657C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF86580
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86584
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86588
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF8658C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF86590
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF86594
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86598
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF8659C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF865A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF865A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF865A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF865AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF865B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF865B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF865B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF865BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF865C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF865C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF865C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF865CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF865D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF865D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF865D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF865DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF865E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF865E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF865E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF865EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF865F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF865F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF865F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF865FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF86600
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF86604
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86608
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF8660C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF86610
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86614
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF86618
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF8661C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86620
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86624
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF86628
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF8662C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF86630
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF86634
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF86638
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF8663C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF86640
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF86644
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF86648
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF8664C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF86650
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF86654
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF86658
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF8665C
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86660
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF86664
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86668
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF8666C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86670
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF86674
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF86678
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF8667C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF86680
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF86684
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF86688
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF8668C
+
+#define mmTPC6_CFG_KERNEL_SRF_0 0xF86690
+
+#define mmTPC6_CFG_KERNEL_SRF_1 0xF86694
+
+#define mmTPC6_CFG_KERNEL_SRF_2 0xF86698
+
+#define mmTPC6_CFG_KERNEL_SRF_3 0xF8669C
+
+#define mmTPC6_CFG_KERNEL_SRF_4 0xF866A0
+
+#define mmTPC6_CFG_KERNEL_SRF_5 0xF866A4
+
+#define mmTPC6_CFG_KERNEL_SRF_6 0xF866A8
+
+#define mmTPC6_CFG_KERNEL_SRF_7 0xF866AC
+
+#define mmTPC6_CFG_KERNEL_SRF_8 0xF866B0
+
+#define mmTPC6_CFG_KERNEL_SRF_9 0xF866B4
+
+#define mmTPC6_CFG_KERNEL_SRF_10 0xF866B8
+
+#define mmTPC6_CFG_KERNEL_SRF_11 0xF866BC
+
+#define mmTPC6_CFG_KERNEL_SRF_12 0xF866C0
+
+#define mmTPC6_CFG_KERNEL_SRF_13 0xF866C4
+
+#define mmTPC6_CFG_KERNEL_SRF_14 0xF866C8
+
+#define mmTPC6_CFG_KERNEL_SRF_15 0xF866CC
+
+#define mmTPC6_CFG_KERNEL_SRF_16 0xF866D0
+
+#define mmTPC6_CFG_KERNEL_SRF_17 0xF866D4
+
+#define mmTPC6_CFG_KERNEL_SRF_18 0xF866D8
+
+#define mmTPC6_CFG_KERNEL_SRF_19 0xF866DC
+
+#define mmTPC6_CFG_KERNEL_SRF_20 0xF866E0
+
+#define mmTPC6_CFG_KERNEL_SRF_21 0xF866E4
+
+#define mmTPC6_CFG_KERNEL_SRF_22 0xF866E8
+
+#define mmTPC6_CFG_KERNEL_SRF_23 0xF866EC
+
+#define mmTPC6_CFG_KERNEL_SRF_24 0xF866F0
+
+#define mmTPC6_CFG_KERNEL_SRF_25 0xF866F4
+
+#define mmTPC6_CFG_KERNEL_SRF_26 0xF866F8
+
+#define mmTPC6_CFG_KERNEL_SRF_27 0xF866FC
+
+#define mmTPC6_CFG_KERNEL_SRF_28 0xF86700
+
+#define mmTPC6_CFG_KERNEL_SRF_29 0xF86704
+
+#define mmTPC6_CFG_KERNEL_SRF_30 0xF86708
+
+#define mmTPC6_CFG_KERNEL_SRF_31 0xF8670C
+
+#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF86710
+
+#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86714
+
+#define mmTPC6_CFG_RESERVED_DESC_END 0xF86738
+
+#define mmTPC6_CFG_ROUND_CSR 0xF867FC
+
+#define mmTPC6_CFG_TBUF_BASE_ADDR_LOW 0xF86800
+
+#define mmTPC6_CFG_TBUF_BASE_ADDR_HIGH 0xF86804
+
+#define mmTPC6_CFG_SEMAPHORE 0xF86808
+
+#define mmTPC6_CFG_VFLAGS 0xF8680C
+
+#define mmTPC6_CFG_SFLAGS 0xF86810
+
+#define mmTPC6_CFG_LFSR_POLYNOM 0xF86818
+
+#define mmTPC6_CFG_STATUS 0xF8681C
+
+#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86820
+
+#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86824
+
+#define mmTPC6_CFG_SM_BASE_ADDRESS_LOW 0xF86828
+
+#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8682C
+
+#define mmTPC6_CFG_TPC_CMD 0xF86830
+
+#define mmTPC6_CFG_TPC_EXECUTE 0xF86838
+
+#define mmTPC6_CFG_TPC_STALL 0xF8683C
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86840
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86844
+
+#define mmTPC6_CFG_MSS_CONFIG 0xF86854
+
+#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86858
+
+#define mmTPC6_CFG_TPC_INTR_MASK 0xF8685C
+
+#define mmTPC6_CFG_TSB_CONFIG 0xF86860
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04
+
+#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08
+
+#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF86A18
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A1C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A20
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF86A24
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A28
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A2C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF86A30
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A34
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A38
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF86A3C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A40
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A44
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF86A48
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A4C
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A50
+
+#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A54
+
+#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A58
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A5C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A60
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF86A64
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A68
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A6C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF86A70
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A74
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A78
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF86A7C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A80
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A84
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF86A88
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A8C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A90
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF86A94
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A98
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A9C
+
+#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86AA0
+
+#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86AA4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86AA8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86AAC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF86AB0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86AB4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86AB8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF86ABC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86AC0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86AC4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF86AC8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86ACC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86AD0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF86AD4
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AD8
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86ADC
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF86AE0
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AE4
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AE8
+
+#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AEC
+
+#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AF0
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AF4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86AF8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF86AFC
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86B00
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86B04
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF86B08
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86B0C
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86B10
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF86B14
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86B18
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86B1C
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF86B20
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86B24
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86B28
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF86B2C
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86B30
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86B34
+
+#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86B38
+
+#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86B3C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86B40
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86B44
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF86B48
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86B4C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86B50
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF86B54
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B58
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B5C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF86B60
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B64
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B68
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF86B6C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B70
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B74
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF86B78
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B7C
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B80
+
+#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B84
+
+#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B88
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B8C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B90
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF86B94
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B98
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B9C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF86BA0
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86BA4
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86BA8
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF86BAC
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86BB0
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86BB4
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF86BB8
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86BBC
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86BC0
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF86BC4
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86BC8
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86BCC
+
+#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86BD0
+
+#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86BD4
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86BD8
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86BDC
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF86BE0
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86BE4
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86BE8
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF86BEC
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86BF0
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86BF4
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF86BF8
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86BFC
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86C00
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF86C04
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86C08
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86C0C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF86C10
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86C14
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86C18
+
+#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86C1C
+
+#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86C20
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86C24
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86C28
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF86C2C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86C30
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86C34
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF86C38
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86C3C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86C40
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF86C44
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86C48
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86C4C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF86C50
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86C54
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86C58
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF86C5C
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86C60
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86C64
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86C68
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86C6C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86C70
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86C74
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86C78
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86C7C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86C80
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86C84
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86C88
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86C8C
+
+#define mmTPC6_CFG_QM_SRF_0 0xF86C90
+
+#define mmTPC6_CFG_QM_SRF_1 0xF86C94
+
+#define mmTPC6_CFG_QM_SRF_2 0xF86C98
+
+#define mmTPC6_CFG_QM_SRF_3 0xF86C9C
+
+#define mmTPC6_CFG_QM_SRF_4 0xF86CA0
+
+#define mmTPC6_CFG_QM_SRF_5 0xF86CA4
+
+#define mmTPC6_CFG_QM_SRF_6 0xF86CA8
+
+#define mmTPC6_CFG_QM_SRF_7 0xF86CAC
+
+#define mmTPC6_CFG_QM_SRF_8 0xF86CB0
+
+#define mmTPC6_CFG_QM_SRF_9 0xF86CB4
+
+#define mmTPC6_CFG_QM_SRF_10 0xF86CB8
+
+#define mmTPC6_CFG_QM_SRF_11 0xF86CBC
+
+#define mmTPC6_CFG_QM_SRF_12 0xF86CC0
+
+#define mmTPC6_CFG_QM_SRF_13 0xF86CC4
+
+#define mmTPC6_CFG_QM_SRF_14 0xF86CC8
+
+#define mmTPC6_CFG_QM_SRF_15 0xF86CCC
+
+#define mmTPC6_CFG_QM_SRF_16 0xF86CD0
+
+#define mmTPC6_CFG_QM_SRF_17 0xF86CD4
+
+#define mmTPC6_CFG_QM_SRF_18 0xF86CD8
+
+#define mmTPC6_CFG_QM_SRF_19 0xF86CDC
+
+#define mmTPC6_CFG_QM_SRF_20 0xF86CE0
+
+#define mmTPC6_CFG_QM_SRF_21 0xF86CE4
+
+#define mmTPC6_CFG_QM_SRF_22 0xF86CE8
+
+#define mmTPC6_CFG_QM_SRF_23 0xF86CEC
+
+#define mmTPC6_CFG_QM_SRF_24 0xF86CF0
+
+#define mmTPC6_CFG_QM_SRF_25 0xF86CF4
+
+#define mmTPC6_CFG_QM_SRF_26 0xF86CF8
+
+#define mmTPC6_CFG_QM_SRF_27 0xF86CFC
+
+#define mmTPC6_CFG_QM_SRF_28 0xF86D00
+
+#define mmTPC6_CFG_QM_SRF_29 0xF86D04
+
+#define mmTPC6_CFG_QM_SRF_30 0xF86D08
+
+#define mmTPC6_CFG_QM_SRF_31 0xF86D0C
+
+#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86D10
+
+#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D14
+
+#define mmTPC6_CFG_ARUSER 0xF86D18
+
+#define mmTPC6_CFG_AWUSER 0xF86D1C
+
+#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF86E00
+
+#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF86E04
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF86E08
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF86E0C
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF86E10
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF86E14
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF86E18
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF86E1C
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF86E20
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF86E24
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF86E28
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C
+
+#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
new file mode 100644
index 000000000000..fbca6b47284e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_CMDQ_REGS_H_
+#define ASIC_REG_TPC6_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC6_CMDQ_GLBL_CFG0 0xF89000
+
+#define mmTPC6_CMDQ_GLBL_CFG1 0xF89004
+
+#define mmTPC6_CMDQ_GLBL_PROT 0xF89008
+
+#define mmTPC6_CMDQ_GLBL_ERR_CFG 0xF8900C
+
+#define mmTPC6_CMDQ_GLBL_ERR_ADDR_LO 0xF89010
+
+#define mmTPC6_CMDQ_GLBL_ERR_ADDR_HI 0xF89014
+
+#define mmTPC6_CMDQ_GLBL_ERR_WDATA 0xF89018
+
+#define mmTPC6_CMDQ_GLBL_SECURE_PROPS 0xF8901C
+
+#define mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS 0xF89020
+
+#define mmTPC6_CMDQ_GLBL_STS0 0xF89024
+
+#define mmTPC6_CMDQ_GLBL_STS1 0xF89028
+
+#define mmTPC6_CMDQ_CQ_CFG0 0xF890B0
+
+#define mmTPC6_CMDQ_CQ_CFG1 0xF890B4
+
+#define mmTPC6_CMDQ_CQ_ARUSER 0xF890B8
+
+#define mmTPC6_CMDQ_CQ_PTR_LO 0xF890C0
+
+#define mmTPC6_CMDQ_CQ_PTR_HI 0xF890C4
+
+#define mmTPC6_CMDQ_CQ_TSIZE 0xF890C8
+
+#define mmTPC6_CMDQ_CQ_CTL 0xF890CC
+
+#define mmTPC6_CMDQ_CQ_PTR_LO_STS 0xF890D4
+
+#define mmTPC6_CMDQ_CQ_PTR_HI_STS 0xF890D8
+
+#define mmTPC6_CMDQ_CQ_TSIZE_STS 0xF890DC
+
+#define mmTPC6_CMDQ_CQ_CTL_STS 0xF890E0
+
+#define mmTPC6_CMDQ_CQ_STS0 0xF890E4
+
+#define mmTPC6_CMDQ_CQ_STS1 0xF890E8
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN 0xF890F0
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF890F4
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT 0xF890F8
+
+#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF890FC
+
+#define mmTPC6_CMDQ_CQ_IFIFO_CNT 0xF89108
+
+#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF89120
+
+#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF89124
+
+#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF89128
+
+#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF8912C
+
+#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF89130
+
+#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF89134
+
+#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF89138
+
+#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF8913C
+
+#define mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF89140
+
+#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF89144
+
+#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF89148
+
+#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF8914C
+
+#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF89150
+
+#define mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF89154
+
+#define mmTPC6_CMDQ_CP_FENCE0_RDATA 0xF89158
+
+#define mmTPC6_CMDQ_CP_FENCE1_RDATA 0xF8915C
+
+#define mmTPC6_CMDQ_CP_FENCE2_RDATA 0xF89160
+
+#define mmTPC6_CMDQ_CP_FENCE3_RDATA 0xF89164
+
+#define mmTPC6_CMDQ_CP_FENCE0_CNT 0xF89168
+
+#define mmTPC6_CMDQ_CP_FENCE1_CNT 0xF8916C
+
+#define mmTPC6_CMDQ_CP_FENCE2_CNT 0xF89170
+
+#define mmTPC6_CMDQ_CP_FENCE3_CNT 0xF89174
+
+#define mmTPC6_CMDQ_CP_STS 0xF89178
+
+#define mmTPC6_CMDQ_CP_CURRENT_INST_LO 0xF8917C
+
+#define mmTPC6_CMDQ_CP_CURRENT_INST_HI 0xF89180
+
+#define mmTPC6_CMDQ_CP_BARRIER_CFG 0xF89184
+
+#define mmTPC6_CMDQ_CP_DBG_0 0xF89188
+
+#define mmTPC6_CMDQ_CQ_BUF_ADDR 0xF89308
+
+#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C
+
+#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
new file mode 100644
index 000000000000..bf32465dabcb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_QM_REGS_H_
+#define ASIC_REG_TPC6_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC6_QM_GLBL_CFG0 0xF88000
+
+#define mmTPC6_QM_GLBL_CFG1 0xF88004
+
+#define mmTPC6_QM_GLBL_PROT 0xF88008
+
+#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88010
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88014
+
+#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88018
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS 0xF8801C
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS 0xF88020
+
+#define mmTPC6_QM_GLBL_STS0 0xF88024
+
+#define mmTPC6_QM_GLBL_STS1 0xF88028
+
+#define mmTPC6_QM_PQ_BASE_LO 0xF88060
+
+#define mmTPC6_QM_PQ_BASE_HI 0xF88064
+
+#define mmTPC6_QM_PQ_SIZE 0xF88068
+
+#define mmTPC6_QM_PQ_PI 0xF8806C
+
+#define mmTPC6_QM_PQ_CI 0xF88070
+
+#define mmTPC6_QM_PQ_CFG0 0xF88074
+
+#define mmTPC6_QM_PQ_CFG1 0xF88078
+
+#define mmTPC6_QM_PQ_ARUSER 0xF8807C
+
+#define mmTPC6_QM_PQ_PUSH0 0xF88080
+
+#define mmTPC6_QM_PQ_PUSH1 0xF88084
+
+#define mmTPC6_QM_PQ_PUSH2 0xF88088
+
+#define mmTPC6_QM_PQ_PUSH3 0xF8808C
+
+#define mmTPC6_QM_PQ_STS0 0xF88090
+
+#define mmTPC6_QM_PQ_STS1 0xF88094
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_EN 0xF880A0
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF880A4
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_SAT 0xF880A8
+
+#define mmTPC6_QM_PQ_RD_RATE_LIM_TOUT 0xF880AC
+
+#define mmTPC6_QM_CQ_CFG0 0xF880B0
+
+#define mmTPC6_QM_CQ_CFG1 0xF880B4
+
+#define mmTPC6_QM_CQ_ARUSER 0xF880B8
+
+#define mmTPC6_QM_CQ_PTR_LO 0xF880C0
+
+#define mmTPC6_QM_CQ_PTR_HI 0xF880C4
+
+#define mmTPC6_QM_CQ_TSIZE 0xF880C8
+
+#define mmTPC6_QM_CQ_CTL 0xF880CC
+
+#define mmTPC6_QM_CQ_PTR_LO_STS 0xF880D4
+
+#define mmTPC6_QM_CQ_PTR_HI_STS 0xF880D8
+
+#define mmTPC6_QM_CQ_TSIZE_STS 0xF880DC
+
+#define mmTPC6_QM_CQ_CTL_STS 0xF880E0
+
+#define mmTPC6_QM_CQ_STS0 0xF880E4
+
+#define mmTPC6_QM_CQ_STS1 0xF880E8
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_EN 0xF880F0
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF880F4
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_SAT 0xF880F8
+
+#define mmTPC6_QM_CQ_RD_RATE_LIM_TOUT 0xF880FC
+
+#define mmTPC6_QM_CQ_IFIFO_CNT 0xF88108
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO 0xF88120
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI 0xF88124
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO 0xF88128
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI 0xF8812C
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO 0xF88130
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI 0xF88134
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO 0xF88138
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI 0xF8813C
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET 0xF88140
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF88144
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF88148
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF8814C
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF88150
+
+#define mmTPC6_QM_CP_LDMA_COMMIT_OFFSET 0xF88154
+
+#define mmTPC6_QM_CP_FENCE0_RDATA 0xF88158
+
+#define mmTPC6_QM_CP_FENCE1_RDATA 0xF8815C
+
+#define mmTPC6_QM_CP_FENCE2_RDATA 0xF88160
+
+#define mmTPC6_QM_CP_FENCE3_RDATA 0xF88164
+
+#define mmTPC6_QM_CP_FENCE0_CNT 0xF88168
+
+#define mmTPC6_QM_CP_FENCE1_CNT 0xF8816C
+
+#define mmTPC6_QM_CP_FENCE2_CNT 0xF88170
+
+#define mmTPC6_QM_CP_FENCE3_CNT 0xF88174
+
+#define mmTPC6_QM_CP_STS 0xF88178
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO 0xF8817C
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI 0xF88180
+
+#define mmTPC6_QM_CP_BARRIER_CFG 0xF88184
+
+#define mmTPC6_QM_CP_DBG_0 0xF88188
+
+#define mmTPC6_QM_PQ_BUF_ADDR 0xF88300
+
+#define mmTPC6_QM_PQ_BUF_RDATA 0xF88304
+
+#define mmTPC6_QM_CQ_BUF_ADDR 0xF88308
+
+#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C
+
+#endif /* ASIC_REG_TPC6_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
new file mode 100644
index 000000000000..609bb90e1046
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_RTR_REGS_H_
+#define ASIC_REG_TPC6_RTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_RTR (Prototype: TPC_RTR)
+ *****************************************
+ */
+
+#define mmTPC6_RTR_HBW_RD_RQ_E_ARB 0xF80100
+
+#define mmTPC6_RTR_HBW_RD_RQ_W_ARB 0xF80104
+
+#define mmTPC6_RTR_HBW_RD_RQ_N_ARB 0xF80108
+
+#define mmTPC6_RTR_HBW_RD_RQ_S_ARB 0xF8010C
+
+#define mmTPC6_RTR_HBW_RD_RQ_L_ARB 0xF80110
+
+#define mmTPC6_RTR_HBW_E_ARB_MAX 0xF80120
+
+#define mmTPC6_RTR_HBW_W_ARB_MAX 0xF80124
+
+#define mmTPC6_RTR_HBW_N_ARB_MAX 0xF80128
+
+#define mmTPC6_RTR_HBW_S_ARB_MAX 0xF8012C
+
+#define mmTPC6_RTR_HBW_L_ARB_MAX 0xF80130
+
+#define mmTPC6_RTR_HBW_RD_RS_E_ARB 0xF80140
+
+#define mmTPC6_RTR_HBW_RD_RS_W_ARB 0xF80144
+
+#define mmTPC6_RTR_HBW_RD_RS_N_ARB 0xF80148
+
+#define mmTPC6_RTR_HBW_RD_RS_S_ARB 0xF8014C
+
+#define mmTPC6_RTR_HBW_RD_RS_L_ARB 0xF80150
+
+#define mmTPC6_RTR_HBW_WR_RQ_E_ARB 0xF80170
+
+#define mmTPC6_RTR_HBW_WR_RQ_W_ARB 0xF80174
+
+#define mmTPC6_RTR_HBW_WR_RQ_N_ARB 0xF80178
+
+#define mmTPC6_RTR_HBW_WR_RQ_S_ARB 0xF8017C
+
+#define mmTPC6_RTR_HBW_WR_RQ_L_ARB 0xF80180
+
+#define mmTPC6_RTR_HBW_WR_RS_E_ARB 0xF80190
+
+#define mmTPC6_RTR_HBW_WR_RS_W_ARB 0xF80194
+
+#define mmTPC6_RTR_HBW_WR_RS_N_ARB 0xF80198
+
+#define mmTPC6_RTR_HBW_WR_RS_S_ARB 0xF8019C
+
+#define mmTPC6_RTR_HBW_WR_RS_L_ARB 0xF801A0
+
+#define mmTPC6_RTR_LBW_RD_RQ_E_ARB 0xF80200
+
+#define mmTPC6_RTR_LBW_RD_RQ_W_ARB 0xF80204
+
+#define mmTPC6_RTR_LBW_RD_RQ_N_ARB 0xF80208
+
+#define mmTPC6_RTR_LBW_RD_RQ_S_ARB 0xF8020C
+
+#define mmTPC6_RTR_LBW_RD_RQ_L_ARB 0xF80210
+
+#define mmTPC6_RTR_LBW_E_ARB_MAX 0xF80220
+
+#define mmTPC6_RTR_LBW_W_ARB_MAX 0xF80224
+
+#define mmTPC6_RTR_LBW_N_ARB_MAX 0xF80228
+
+#define mmTPC6_RTR_LBW_S_ARB_MAX 0xF8022C
+
+#define mmTPC6_RTR_LBW_L_ARB_MAX 0xF80230
+
+#define mmTPC6_RTR_LBW_RD_RS_E_ARB 0xF80250
+
+#define mmTPC6_RTR_LBW_RD_RS_W_ARB 0xF80254
+
+#define mmTPC6_RTR_LBW_RD_RS_N_ARB 0xF80258
+
+#define mmTPC6_RTR_LBW_RD_RS_S_ARB 0xF8025C
+
+#define mmTPC6_RTR_LBW_RD_RS_L_ARB 0xF80260
+
+#define mmTPC6_RTR_LBW_WR_RQ_E_ARB 0xF80270
+
+#define mmTPC6_RTR_LBW_WR_RQ_W_ARB 0xF80274
+
+#define mmTPC6_RTR_LBW_WR_RQ_N_ARB 0xF80278
+
+#define mmTPC6_RTR_LBW_WR_RQ_S_ARB 0xF8027C
+
+#define mmTPC6_RTR_LBW_WR_RQ_L_ARB 0xF80280
+
+#define mmTPC6_RTR_LBW_WR_RS_E_ARB 0xF80290
+
+#define mmTPC6_RTR_LBW_WR_RS_W_ARB 0xF80294
+
+#define mmTPC6_RTR_LBW_WR_RS_N_ARB 0xF80298
+
+#define mmTPC6_RTR_LBW_WR_RS_S_ARB 0xF8029C
+
+#define mmTPC6_RTR_LBW_WR_RS_L_ARB 0xF802A0
+
+#define mmTPC6_RTR_DBG_E_ARB 0xF80300
+
+#define mmTPC6_RTR_DBG_W_ARB 0xF80304
+
+#define mmTPC6_RTR_DBG_N_ARB 0xF80308
+
+#define mmTPC6_RTR_DBG_S_ARB 0xF8030C
+
+#define mmTPC6_RTR_DBG_L_ARB 0xF80310
+
+#define mmTPC6_RTR_DBG_E_ARB_MAX 0xF80320
+
+#define mmTPC6_RTR_DBG_W_ARB_MAX 0xF80324
+
+#define mmTPC6_RTR_DBG_N_ARB_MAX 0xF80328
+
+#define mmTPC6_RTR_DBG_S_ARB_MAX 0xF8032C
+
+#define mmTPC6_RTR_DBG_L_ARB_MAX 0xF80330
+
+#define mmTPC6_RTR_SPLIT_COEF_0 0xF80400
+
+#define mmTPC6_RTR_SPLIT_COEF_1 0xF80404
+
+#define mmTPC6_RTR_SPLIT_COEF_2 0xF80408
+
+#define mmTPC6_RTR_SPLIT_COEF_3 0xF8040C
+
+#define mmTPC6_RTR_SPLIT_COEF_4 0xF80410
+
+#define mmTPC6_RTR_SPLIT_COEF_5 0xF80414
+
+#define mmTPC6_RTR_SPLIT_COEF_6 0xF80418
+
+#define mmTPC6_RTR_SPLIT_COEF_7 0xF8041C
+
+#define mmTPC6_RTR_SPLIT_COEF_8 0xF80420
+
+#define mmTPC6_RTR_SPLIT_COEF_9 0xF80424
+
+#define mmTPC6_RTR_SPLIT_CFG 0xF80440
+
+#define mmTPC6_RTR_SPLIT_RD_SAT 0xF80444
+
+#define mmTPC6_RTR_SPLIT_RD_RST_TOKEN 0xF80448
+
+#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_0 0xF8044C
+
+#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_1 0xF80450
+
+#define mmTPC6_RTR_SPLIT_WR_SAT 0xF80454
+
+#define mmTPC6_RTR_WPLIT_WR_TST_TOLEN 0xF80458
+
+#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_0 0xF8045C
+
+#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_1 0xF80460
+
+#define mmTPC6_RTR_HBW_RANGE_HIT 0xF80470
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_0 0xF80480
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_1 0xF80484
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_2 0xF80488
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_3 0xF8048C
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_4 0xF80490
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_5 0xF80494
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_6 0xF80498
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_L_7 0xF8049C
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_0 0xF804A0
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_1 0xF804A4
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_2 0xF804A8
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_3 0xF804AC
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_4 0xF804B0
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_5 0xF804B4
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_6 0xF804B8
+
+#define mmTPC6_RTR_HBW_RANGE_MASK_H_7 0xF804BC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_0 0xF804C0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_1 0xF804C4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_2 0xF804C8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_3 0xF804CC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_4 0xF804D0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_5 0xF804D4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_6 0xF804D8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_L_7 0xF804DC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_0 0xF804E0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_1 0xF804E4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_2 0xF804E8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_3 0xF804EC
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_4 0xF804F0
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_5 0xF804F4
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_6 0xF804F8
+
+#define mmTPC6_RTR_HBW_RANGE_BASE_H_7 0xF804FC
+
+#define mmTPC6_RTR_LBW_RANGE_HIT 0xF80500
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_0 0xF80510
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_1 0xF80514
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_2 0xF80518
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_3 0xF8051C
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_4 0xF80520
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_5 0xF80524
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_6 0xF80528
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_7 0xF8052C
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_8 0xF80530
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_9 0xF80534
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_10 0xF80538
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_11 0xF8053C
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_12 0xF80540
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_13 0xF80544
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_14 0xF80548
+
+#define mmTPC6_RTR_LBW_RANGE_MASK_15 0xF8054C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_0 0xF80550
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_1 0xF80554
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_2 0xF80558
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_3 0xF8055C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_4 0xF80560
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_5 0xF80564
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_6 0xF80568
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_7 0xF8056C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_8 0xF80570
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_9 0xF80574
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_10 0xF80578
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_11 0xF8057C
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_12 0xF80580
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_13 0xF80584
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_14 0xF80588
+
+#define mmTPC6_RTR_LBW_RANGE_BASE_15 0xF8058C
+
+#define mmTPC6_RTR_RGLTR 0xF80590
+
+#define mmTPC6_RTR_RGLTR_WR_RESULT 0xF80594
+
+#define mmTPC6_RTR_RGLTR_RD_RESULT 0xF80598
+
+#define mmTPC6_RTR_SCRAMB_EN 0xF80600
+
+#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604
+
+#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
new file mode 100644
index 000000000000..bf2fd0f73906
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
@@ -0,0 +1,887 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_CFG_REGS_H_
+#define ASIC_REG_TPC7_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6418
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC641C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC6420
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6424
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6428
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC642C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6430
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6434
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC6438
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xFC643C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6440
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6444
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6448
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC644C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC6450
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6454
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6458
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC645C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC6460
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6464
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6468
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC646C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6470
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6474
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC6478
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xFC647C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6480
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6484
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6488
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC648C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC6490
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6494
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6498
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC649C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC64A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC64A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC64A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC64AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xFC64B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC64B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC64B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xFC64BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC64C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC64C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xFC64C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC64CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC64D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xFC64D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xFC64E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xFC64FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC6500
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC6504
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6508
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC650C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC6510
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6514
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC6518
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC651C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6520
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC6524
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC6528
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xFC652C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC6530
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC6534
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC6538
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC653C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC6540
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC6544
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6548
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC654C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC6550
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6554
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6558
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC655C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6560
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6564
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC6568
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xFC656C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6570
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6574
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6578
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC657C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC6580
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6584
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6588
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC658C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC6590
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6594
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6598
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC659C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xFC65A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC65A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC65A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xFC65AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC65B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC65B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xFC65B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC65BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC65C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xFC65C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC65C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC65CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC65D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC65D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC65D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC65DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xFC65E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC65E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC65E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xFC65EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC65F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC65F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xFC65F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC65FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC6600
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6604
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6608
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC660C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6610
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6614
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC6618
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC661C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6620
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6624
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC6628
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xFC662C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC6630
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC6634
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6638
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC663C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC6640
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6644
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC6648
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC664C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6650
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC6654
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC6658
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xFC665C
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6660
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC6664
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6668
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC666C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6670
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC6674
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC6678
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC667C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC6680
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC6684
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC6688
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC668C
+
+#define mmTPC7_CFG_KERNEL_SRF_0 0xFC6690
+
+#define mmTPC7_CFG_KERNEL_SRF_1 0xFC6694
+
+#define mmTPC7_CFG_KERNEL_SRF_2 0xFC6698
+
+#define mmTPC7_CFG_KERNEL_SRF_3 0xFC669C
+
+#define mmTPC7_CFG_KERNEL_SRF_4 0xFC66A0
+
+#define mmTPC7_CFG_KERNEL_SRF_5 0xFC66A4
+
+#define mmTPC7_CFG_KERNEL_SRF_6 0xFC66A8
+
+#define mmTPC7_CFG_KERNEL_SRF_7 0xFC66AC
+
+#define mmTPC7_CFG_KERNEL_SRF_8 0xFC66B0
+
+#define mmTPC7_CFG_KERNEL_SRF_9 0xFC66B4
+
+#define mmTPC7_CFG_KERNEL_SRF_10 0xFC66B8
+
+#define mmTPC7_CFG_KERNEL_SRF_11 0xFC66BC
+
+#define mmTPC7_CFG_KERNEL_SRF_12 0xFC66C0
+
+#define mmTPC7_CFG_KERNEL_SRF_13 0xFC66C4
+
+#define mmTPC7_CFG_KERNEL_SRF_14 0xFC66C8
+
+#define mmTPC7_CFG_KERNEL_SRF_15 0xFC66CC
+
+#define mmTPC7_CFG_KERNEL_SRF_16 0xFC66D0
+
+#define mmTPC7_CFG_KERNEL_SRF_17 0xFC66D4
+
+#define mmTPC7_CFG_KERNEL_SRF_18 0xFC66D8
+
+#define mmTPC7_CFG_KERNEL_SRF_19 0xFC66DC
+
+#define mmTPC7_CFG_KERNEL_SRF_20 0xFC66E0
+
+#define mmTPC7_CFG_KERNEL_SRF_21 0xFC66E4
+
+#define mmTPC7_CFG_KERNEL_SRF_22 0xFC66E8
+
+#define mmTPC7_CFG_KERNEL_SRF_23 0xFC66EC
+
+#define mmTPC7_CFG_KERNEL_SRF_24 0xFC66F0
+
+#define mmTPC7_CFG_KERNEL_SRF_25 0xFC66F4
+
+#define mmTPC7_CFG_KERNEL_SRF_26 0xFC66F8
+
+#define mmTPC7_CFG_KERNEL_SRF_27 0xFC66FC
+
+#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6700
+
+#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6704
+
+#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6708
+
+#define mmTPC7_CFG_KERNEL_SRF_31 0xFC670C
+
+#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC6710
+
+#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6714
+
+#define mmTPC7_CFG_RESERVED_DESC_END 0xFC6738
+
+#define mmTPC7_CFG_ROUND_CSR 0xFC67FC
+
+#define mmTPC7_CFG_TBUF_BASE_ADDR_LOW 0xFC6800
+
+#define mmTPC7_CFG_TBUF_BASE_ADDR_HIGH 0xFC6804
+
+#define mmTPC7_CFG_SEMAPHORE 0xFC6808
+
+#define mmTPC7_CFG_VFLAGS 0xFC680C
+
+#define mmTPC7_CFG_SFLAGS 0xFC6810
+
+#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6818
+
+#define mmTPC7_CFG_STATUS 0xFC681C
+
+#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6820
+
+#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6824
+
+#define mmTPC7_CFG_SM_BASE_ADDRESS_LOW 0xFC6828
+
+#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC682C
+
+#define mmTPC7_CFG_TPC_CMD 0xFC6830
+
+#define mmTPC7_CFG_TPC_EXECUTE 0xFC6838
+
+#define mmTPC7_CFG_TPC_STALL 0xFC683C
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6840
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6844
+
+#define mmTPC7_CFG_MSS_CONFIG 0xFC6854
+
+#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6858
+
+#define mmTPC7_CFG_TPC_INTR_MASK 0xFC685C
+
+#define mmTPC7_CFG_TSB_CONFIG 0xFC6860
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04
+
+#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08
+
+#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6A18
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A1C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A20
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6A24
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A28
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A2C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6A30
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A34
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A38
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xFC6A3C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A40
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A44
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6A48
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A4C
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A50
+
+#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A54
+
+#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A58
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A5C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A60
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6A64
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A68
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A6C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6A70
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A74
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A78
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xFC6A7C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A80
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A84
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6A88
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A8C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A90
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6A94
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A98
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A9C
+
+#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6AA0
+
+#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6AA4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6AA8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6AAC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xFC6AB0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6AB4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6AB8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xFC6ABC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6AC0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6AC4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xFC6AC8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6ACC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6AD0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xFC6AD4
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AD8
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6ADC
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xFC6AE0
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AE4
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AE8
+
+#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AEC
+
+#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AF0
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AF4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6AF8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xFC6AFC
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6B00
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6B04
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6B08
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6B0C
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6B10
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6B14
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6B18
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6B1C
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6B20
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6B24
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6B28
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xFC6B2C
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6B30
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6B34
+
+#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6B38
+
+#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6B3C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6B40
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6B44
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6B48
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6B4C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6B50
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6B54
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B58
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B5C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6B60
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B64
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B68
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xFC6B6C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B70
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B74
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6B78
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B7C
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B80
+
+#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B84
+
+#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B88
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B8C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B90
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6B94
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B98
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B9C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xFC6BA0
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6BA4
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6BA8
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xFC6BAC
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6BB0
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6BB4
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xFC6BB8
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6BBC
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6BC0
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xFC6BC4
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6BC8
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6BCC
+
+#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6BD0
+
+#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6BD4
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6BD8
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6BDC
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xFC6BE0
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6BE4
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6BE8
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xFC6BEC
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6BF0
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6BF4
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xFC6BF8
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6BFC
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6C00
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6C04
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6C08
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6C0C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6C10
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6C14
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6C18
+
+#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6C1C
+
+#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6C20
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6C24
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6C28
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xFC6C2C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6C30
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6C34
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6C38
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6C3C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6C40
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6C44
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6C48
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6C4C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6C50
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6C54
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6C58
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xFC6C5C
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6C60
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6C64
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6C68
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6C6C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6C70
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6C74
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6C78
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6C7C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6C80
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6C84
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6C88
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6C8C
+
+#define mmTPC7_CFG_QM_SRF_0 0xFC6C90
+
+#define mmTPC7_CFG_QM_SRF_1 0xFC6C94
+
+#define mmTPC7_CFG_QM_SRF_2 0xFC6C98
+
+#define mmTPC7_CFG_QM_SRF_3 0xFC6C9C
+
+#define mmTPC7_CFG_QM_SRF_4 0xFC6CA0
+
+#define mmTPC7_CFG_QM_SRF_5 0xFC6CA4
+
+#define mmTPC7_CFG_QM_SRF_6 0xFC6CA8
+
+#define mmTPC7_CFG_QM_SRF_7 0xFC6CAC
+
+#define mmTPC7_CFG_QM_SRF_8 0xFC6CB0
+
+#define mmTPC7_CFG_QM_SRF_9 0xFC6CB4
+
+#define mmTPC7_CFG_QM_SRF_10 0xFC6CB8
+
+#define mmTPC7_CFG_QM_SRF_11 0xFC6CBC
+
+#define mmTPC7_CFG_QM_SRF_12 0xFC6CC0
+
+#define mmTPC7_CFG_QM_SRF_13 0xFC6CC4
+
+#define mmTPC7_CFG_QM_SRF_14 0xFC6CC8
+
+#define mmTPC7_CFG_QM_SRF_15 0xFC6CCC
+
+#define mmTPC7_CFG_QM_SRF_16 0xFC6CD0
+
+#define mmTPC7_CFG_QM_SRF_17 0xFC6CD4
+
+#define mmTPC7_CFG_QM_SRF_18 0xFC6CD8
+
+#define mmTPC7_CFG_QM_SRF_19 0xFC6CDC
+
+#define mmTPC7_CFG_QM_SRF_20 0xFC6CE0
+
+#define mmTPC7_CFG_QM_SRF_21 0xFC6CE4
+
+#define mmTPC7_CFG_QM_SRF_22 0xFC6CE8
+
+#define mmTPC7_CFG_QM_SRF_23 0xFC6CEC
+
+#define mmTPC7_CFG_QM_SRF_24 0xFC6CF0
+
+#define mmTPC7_CFG_QM_SRF_25 0xFC6CF4
+
+#define mmTPC7_CFG_QM_SRF_26 0xFC6CF8
+
+#define mmTPC7_CFG_QM_SRF_27 0xFC6CFC
+
+#define mmTPC7_CFG_QM_SRF_28 0xFC6D00
+
+#define mmTPC7_CFG_QM_SRF_29 0xFC6D04
+
+#define mmTPC7_CFG_QM_SRF_30 0xFC6D08
+
+#define mmTPC7_CFG_QM_SRF_31 0xFC6D0C
+
+#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6D10
+
+#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D14
+
+#define mmTPC7_CFG_ARUSER 0xFC6D18
+
+#define mmTPC7_CFG_AWUSER 0xFC6D1C
+
+#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC6E00
+
+#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC6E04
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC6E08
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC6E0C
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC6E10
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC6E14
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC6E18
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC6E1C
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC6E20
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC6E24
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC6E28
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C
+
+#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
new file mode 100644
index 000000000000..65d83043bf63
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_CMDQ_REGS_H_
+#define ASIC_REG_TPC7_CMDQ_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_CMDQ (Prototype: CMDQ)
+ *****************************************
+ */
+
+#define mmTPC7_CMDQ_GLBL_CFG0 0xFC9000
+
+#define mmTPC7_CMDQ_GLBL_CFG1 0xFC9004
+
+#define mmTPC7_CMDQ_GLBL_PROT 0xFC9008
+
+#define mmTPC7_CMDQ_GLBL_ERR_CFG 0xFC900C
+
+#define mmTPC7_CMDQ_GLBL_ERR_ADDR_LO 0xFC9010
+
+#define mmTPC7_CMDQ_GLBL_ERR_ADDR_HI 0xFC9014
+
+#define mmTPC7_CMDQ_GLBL_ERR_WDATA 0xFC9018
+
+#define mmTPC7_CMDQ_GLBL_SECURE_PROPS 0xFC901C
+
+#define mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS 0xFC9020
+
+#define mmTPC7_CMDQ_GLBL_STS0 0xFC9024
+
+#define mmTPC7_CMDQ_GLBL_STS1 0xFC9028
+
+#define mmTPC7_CMDQ_CQ_CFG0 0xFC90B0
+
+#define mmTPC7_CMDQ_CQ_CFG1 0xFC90B4
+
+#define mmTPC7_CMDQ_CQ_ARUSER 0xFC90B8
+
+#define mmTPC7_CMDQ_CQ_PTR_LO 0xFC90C0
+
+#define mmTPC7_CMDQ_CQ_PTR_HI 0xFC90C4
+
+#define mmTPC7_CMDQ_CQ_TSIZE 0xFC90C8
+
+#define mmTPC7_CMDQ_CQ_CTL 0xFC90CC
+
+#define mmTPC7_CMDQ_CQ_PTR_LO_STS 0xFC90D4
+
+#define mmTPC7_CMDQ_CQ_PTR_HI_STS 0xFC90D8
+
+#define mmTPC7_CMDQ_CQ_TSIZE_STS 0xFC90DC
+
+#define mmTPC7_CMDQ_CQ_CTL_STS 0xFC90E0
+
+#define mmTPC7_CMDQ_CQ_STS0 0xFC90E4
+
+#define mmTPC7_CMDQ_CQ_STS1 0xFC90E8
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN 0xFC90F0
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xFC90F4
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT 0xFC90F8
+
+#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT 0xFC90FC
+
+#define mmTPC7_CMDQ_CQ_IFIFO_CNT 0xFC9108
+
+#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO 0xFC9120
+
+#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI 0xFC9124
+
+#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO 0xFC9128
+
+#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI 0xFC912C
+
+#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO 0xFC9130
+
+#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI 0xFC9134
+
+#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO 0xFC9138
+
+#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI 0xFC913C
+
+#define mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET 0xFC9140
+
+#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC9144
+
+#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC9148
+
+#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xFC914C
+
+#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xFC9150
+
+#define mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET 0xFC9154
+
+#define mmTPC7_CMDQ_CP_FENCE0_RDATA 0xFC9158
+
+#define mmTPC7_CMDQ_CP_FENCE1_RDATA 0xFC915C
+
+#define mmTPC7_CMDQ_CP_FENCE2_RDATA 0xFC9160
+
+#define mmTPC7_CMDQ_CP_FENCE3_RDATA 0xFC9164
+
+#define mmTPC7_CMDQ_CP_FENCE0_CNT 0xFC9168
+
+#define mmTPC7_CMDQ_CP_FENCE1_CNT 0xFC916C
+
+#define mmTPC7_CMDQ_CP_FENCE2_CNT 0xFC9170
+
+#define mmTPC7_CMDQ_CP_FENCE3_CNT 0xFC9174
+
+#define mmTPC7_CMDQ_CP_STS 0xFC9178
+
+#define mmTPC7_CMDQ_CP_CURRENT_INST_LO 0xFC917C
+
+#define mmTPC7_CMDQ_CP_CURRENT_INST_HI 0xFC9180
+
+#define mmTPC7_CMDQ_CP_BARRIER_CFG 0xFC9184
+
+#define mmTPC7_CMDQ_CP_DBG_0 0xFC9188
+
+#define mmTPC7_CMDQ_CQ_BUF_ADDR 0xFC9308
+
+#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C
+
+#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
new file mode 100644
index 000000000000..3d5848d87304
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_NRTR_REGS_H_
+#define ASIC_REG_TPC7_NRTR_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_NRTR (Prototype: IF_NRTR)
+ *****************************************
+ */
+
+#define mmTPC7_NRTR_HBW_MAX_CRED 0xFC0100
+
+#define mmTPC7_NRTR_LBW_MAX_CRED 0xFC0120
+
+#define mmTPC7_NRTR_DBG_E_ARB 0xFC0300
+
+#define mmTPC7_NRTR_DBG_W_ARB 0xFC0304
+
+#define mmTPC7_NRTR_DBG_N_ARB 0xFC0308
+
+#define mmTPC7_NRTR_DBG_S_ARB 0xFC030C
+
+#define mmTPC7_NRTR_DBG_L_ARB 0xFC0310
+
+#define mmTPC7_NRTR_DBG_E_ARB_MAX 0xFC0320
+
+#define mmTPC7_NRTR_DBG_W_ARB_MAX 0xFC0324
+
+#define mmTPC7_NRTR_DBG_N_ARB_MAX 0xFC0328
+
+#define mmTPC7_NRTR_DBG_S_ARB_MAX 0xFC032C
+
+#define mmTPC7_NRTR_DBG_L_ARB_MAX 0xFC0330
+
+#define mmTPC7_NRTR_SPLIT_COEF_0 0xFC0400
+
+#define mmTPC7_NRTR_SPLIT_COEF_1 0xFC0404
+
+#define mmTPC7_NRTR_SPLIT_COEF_2 0xFC0408
+
+#define mmTPC7_NRTR_SPLIT_COEF_3 0xFC040C
+
+#define mmTPC7_NRTR_SPLIT_COEF_4 0xFC0410
+
+#define mmTPC7_NRTR_SPLIT_COEF_5 0xFC0414
+
+#define mmTPC7_NRTR_SPLIT_COEF_6 0xFC0418
+
+#define mmTPC7_NRTR_SPLIT_COEF_7 0xFC041C
+
+#define mmTPC7_NRTR_SPLIT_COEF_8 0xFC0420
+
+#define mmTPC7_NRTR_SPLIT_COEF_9 0xFC0424
+
+#define mmTPC7_NRTR_SPLIT_CFG 0xFC0440
+
+#define mmTPC7_NRTR_SPLIT_RD_SAT 0xFC0444
+
+#define mmTPC7_NRTR_SPLIT_RD_RST_TOKEN 0xFC0448
+
+#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_0 0xFC044C
+
+#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_1 0xFC0450
+
+#define mmTPC7_NRTR_SPLIT_WR_SAT 0xFC0454
+
+#define mmTPC7_NRTR_WPLIT_WR_TST_TOLEN 0xFC0458
+
+#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_0 0xFC045C
+
+#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_1 0xFC0460
+
+#define mmTPC7_NRTR_HBW_RANGE_HIT 0xFC0470
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_0 0xFC0480
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_1 0xFC0484
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_2 0xFC0488
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_3 0xFC048C
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_4 0xFC0490
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_5 0xFC0494
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_6 0xFC0498
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_L_7 0xFC049C
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_0 0xFC04A0
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_1 0xFC04A4
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_2 0xFC04A8
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_3 0xFC04AC
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_4 0xFC04B0
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_5 0xFC04B4
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_6 0xFC04B8
+
+#define mmTPC7_NRTR_HBW_RANGE_MASK_H_7 0xFC04BC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_0 0xFC04C0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_1 0xFC04C4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_2 0xFC04C8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_3 0xFC04CC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_4 0xFC04D0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_5 0xFC04D4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_6 0xFC04D8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_L_7 0xFC04DC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_0 0xFC04E0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_1 0xFC04E4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_2 0xFC04E8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_3 0xFC04EC
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_4 0xFC04F0
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_5 0xFC04F4
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_6 0xFC04F8
+
+#define mmTPC7_NRTR_HBW_RANGE_BASE_H_7 0xFC04FC
+
+#define mmTPC7_NRTR_LBW_RANGE_HIT 0xFC0500
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_0 0xFC0510
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_1 0xFC0514
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_2 0xFC0518
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_3 0xFC051C
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_4 0xFC0520
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_5 0xFC0524
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_6 0xFC0528
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_7 0xFC052C
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_8 0xFC0530
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_9 0xFC0534
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_10 0xFC0538
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_11 0xFC053C
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_12 0xFC0540
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_13 0xFC0544
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_14 0xFC0548
+
+#define mmTPC7_NRTR_LBW_RANGE_MASK_15 0xFC054C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_0 0xFC0550
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_1 0xFC0554
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_2 0xFC0558
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_3 0xFC055C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_4 0xFC0560
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_5 0xFC0564
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_6 0xFC0568
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_7 0xFC056C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_8 0xFC0570
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_9 0xFC0574
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_10 0xFC0578
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_11 0xFC057C
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_12 0xFC0580
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_13 0xFC0584
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_14 0xFC0588
+
+#define mmTPC7_NRTR_LBW_RANGE_BASE_15 0xFC058C
+
+#define mmTPC7_NRTR_RGLTR 0xFC0590
+
+#define mmTPC7_NRTR_RGLTR_WR_RESULT 0xFC0594
+
+#define mmTPC7_NRTR_RGLTR_RD_RESULT 0xFC0598
+
+#define mmTPC7_NRTR_SCRAMB_EN 0xFC0600
+
+#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604
+
+#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
new file mode 100644
index 000000000000..25f5095f68fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_QM_REGS_H_
+#define ASIC_REG_TPC7_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC7_QM_GLBL_CFG0 0xFC8000
+
+#define mmTPC7_QM_GLBL_CFG1 0xFC8004
+
+#define mmTPC7_QM_GLBL_PROT 0xFC8008
+
+#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8010
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8014
+
+#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8018
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS 0xFC801C
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS 0xFC8020
+
+#define mmTPC7_QM_GLBL_STS0 0xFC8024
+
+#define mmTPC7_QM_GLBL_STS1 0xFC8028
+
+#define mmTPC7_QM_PQ_BASE_LO 0xFC8060
+
+#define mmTPC7_QM_PQ_BASE_HI 0xFC8064
+
+#define mmTPC7_QM_PQ_SIZE 0xFC8068
+
+#define mmTPC7_QM_PQ_PI 0xFC806C
+
+#define mmTPC7_QM_PQ_CI 0xFC8070
+
+#define mmTPC7_QM_PQ_CFG0 0xFC8074
+
+#define mmTPC7_QM_PQ_CFG1 0xFC8078
+
+#define mmTPC7_QM_PQ_ARUSER 0xFC807C
+
+#define mmTPC7_QM_PQ_PUSH0 0xFC8080
+
+#define mmTPC7_QM_PQ_PUSH1 0xFC8084
+
+#define mmTPC7_QM_PQ_PUSH2 0xFC8088
+
+#define mmTPC7_QM_PQ_PUSH3 0xFC808C
+
+#define mmTPC7_QM_PQ_STS0 0xFC8090
+
+#define mmTPC7_QM_PQ_STS1 0xFC8094
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_EN 0xFC80A0
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xFC80A4
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_SAT 0xFC80A8
+
+#define mmTPC7_QM_PQ_RD_RATE_LIM_TOUT 0xFC80AC
+
+#define mmTPC7_QM_CQ_CFG0 0xFC80B0
+
+#define mmTPC7_QM_CQ_CFG1 0xFC80B4
+
+#define mmTPC7_QM_CQ_ARUSER 0xFC80B8
+
+#define mmTPC7_QM_CQ_PTR_LO 0xFC80C0
+
+#define mmTPC7_QM_CQ_PTR_HI 0xFC80C4
+
+#define mmTPC7_QM_CQ_TSIZE 0xFC80C8
+
+#define mmTPC7_QM_CQ_CTL 0xFC80CC
+
+#define mmTPC7_QM_CQ_PTR_LO_STS 0xFC80D4
+
+#define mmTPC7_QM_CQ_PTR_HI_STS 0xFC80D8
+
+#define mmTPC7_QM_CQ_TSIZE_STS 0xFC80DC
+
+#define mmTPC7_QM_CQ_CTL_STS 0xFC80E0
+
+#define mmTPC7_QM_CQ_STS0 0xFC80E4
+
+#define mmTPC7_QM_CQ_STS1 0xFC80E8
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_EN 0xFC80F0
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xFC80F4
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_SAT 0xFC80F8
+
+#define mmTPC7_QM_CQ_RD_RATE_LIM_TOUT 0xFC80FC
+
+#define mmTPC7_QM_CQ_IFIFO_CNT 0xFC8108
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO 0xFC8120
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI 0xFC8124
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO 0xFC8128
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI 0xFC812C
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO 0xFC8130
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI 0xFC8134
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO 0xFC8138
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI 0xFC813C
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET 0xFC8140
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC8144
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC8148
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xFC814C
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xFC8150
+
+#define mmTPC7_QM_CP_LDMA_COMMIT_OFFSET 0xFC8154
+
+#define mmTPC7_QM_CP_FENCE0_RDATA 0xFC8158
+
+#define mmTPC7_QM_CP_FENCE1_RDATA 0xFC815C
+
+#define mmTPC7_QM_CP_FENCE2_RDATA 0xFC8160
+
+#define mmTPC7_QM_CP_FENCE3_RDATA 0xFC8164
+
+#define mmTPC7_QM_CP_FENCE0_CNT 0xFC8168
+
+#define mmTPC7_QM_CP_FENCE1_CNT 0xFC816C
+
+#define mmTPC7_QM_CP_FENCE2_CNT 0xFC8170
+
+#define mmTPC7_QM_CP_FENCE3_CNT 0xFC8174
+
+#define mmTPC7_QM_CP_STS 0xFC8178
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO 0xFC817C
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI 0xFC8180
+
+#define mmTPC7_QM_CP_BARRIER_CFG 0xFC8184
+
+#define mmTPC7_QM_CP_DBG_0 0xFC8188
+
+#define mmTPC7_QM_PQ_BUF_ADDR 0xFC8300
+
+#define mmTPC7_QM_PQ_BUF_RDATA 0xFC8304
+
+#define mmTPC7_QM_CQ_BUF_ADDR 0xFC8308
+
+#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C
+
+#endif /* ASIC_REG_TPC7_QM_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
new file mode 100644
index 000000000000..920231d0afa5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC_PLL_REGS_H_
+#define ASIC_REG_TPC_PLL_REGS_H_
+
+/*
+ *****************************************
+ * TPC_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmTPC_PLL_NR 0xE01100
+
+#define mmTPC_PLL_NF 0xE01104
+
+#define mmTPC_PLL_OD 0xE01108
+
+#define mmTPC_PLL_NB 0xE0110C
+
+#define mmTPC_PLL_CFG 0xE01110
+
+#define mmTPC_PLL_LOSE_MASK 0xE01120
+
+#define mmTPC_PLL_LOCK_INTR 0xE01128
+
+#define mmTPC_PLL_LOCK_BYPASS 0xE0112C
+
+#define mmTPC_PLL_DATA_CHNG 0xE01130
+
+#define mmTPC_PLL_RST 0xE01134
+
+#define mmTPC_PLL_SLIP_WD_CNTR 0xE01150
+
+#define mmTPC_PLL_DIV_FACTOR_0 0xE01200
+
+#define mmTPC_PLL_DIV_FACTOR_1 0xE01204
+
+#define mmTPC_PLL_DIV_FACTOR_2 0xE01208
+
+#define mmTPC_PLL_DIV_FACTOR_3 0xE0120C
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_0 0xE01220
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_1 0xE01224
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_2 0xE01228
+
+#define mmTPC_PLL_DIV_FACTOR_CMD_3 0xE0122C
+
+#define mmTPC_PLL_DIV_SEL_0 0xE01280
+
+#define mmTPC_PLL_DIV_SEL_1 0xE01284
+
+#define mmTPC_PLL_DIV_SEL_2 0xE01288
+
+#define mmTPC_PLL_DIV_SEL_3 0xE0128C
+
+#define mmTPC_PLL_DIV_EN_0 0xE012A0
+
+#define mmTPC_PLL_DIV_EN_1 0xE012A4
+
+#define mmTPC_PLL_DIV_EN_2 0xE012A8
+
+#define mmTPC_PLL_DIV_EN_3 0xE012AC
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_0 0xE012C0
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_1 0xE012C4
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_2 0xE012C8
+
+#define mmTPC_PLL_DIV_FACTOR_BUSY_3 0xE012CC
+
+#define mmTPC_PLL_CLK_GATER 0xE01300
+
+#define mmTPC_PLL_CLK_RLX_0 0xE01310
+
+#define mmTPC_PLL_CLK_RLX_1 0xE01314
+
+#define mmTPC_PLL_CLK_RLX_2 0xE01318
+
+#define mmTPC_PLL_CLK_RLX_3 0xE0131C
+
+#define mmTPC_PLL_REF_CNTR_PERIOD 0xE01400
+
+#define mmTPC_PLL_REF_LOW_THRESHOLD 0xE01410
+
+#define mmTPC_PLL_REF_HIGH_THRESHOLD 0xE01420
+
+#define mmTPC_PLL_PLL_NOT_STABLE 0xE01430
+
+#define mmTPC_PLL_FREQ_CALC_EN 0xE01440
+
+#endif /* ASIC_REG_TPC_PLL_REGS_H_ */
+
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
new file mode 100644
index 000000000000..614149efa412
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_H
+#define GOYA_H
+
+#include "asic_reg/goya_regs.h"
+
+#include <linux/types.h>
+
+#define SRAM_CFG_BAR_ID 0
+#define MSIX_BAR_ID 2
+#define DDR_BAR_ID 4
+
+#define CFG_BAR_SIZE 0x10000000ull /* 256MB */
+#define MSIX_BAR_SIZE 0x1000ull /* 4KB */
+
+#define CFG_BASE 0x7FFC000000ull
+#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/
+
+#define SRAM_BASE_ADDR 0x7FF0000000ull
+#define SRAM_SIZE 0x32A0000 /* 50.625MB */
+
+#define DRAM_PHYS_BASE 0x0ull
+
+#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */
+#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */
+
+#define GOYA_MSIX_ENTRIES 8
+
+#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
+
+#define MAX_ASID 1024
+
+#define PROT_BITS_OFFS 0xF80
+
+#define DMA_MAX_NUM 5
+
+#define TPC_MAX_NUM 8
+
+#endif /* GOYA_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
new file mode 100644
index 000000000000..497937a17ee9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef __GOYA_ASYNC_EVENTS_H_
+#define __GOYA_ASYNC_EVENTS_H_
+
+enum goya_async_event_id {
+ GOYA_ASYNC_EVENT_ID_PCIE_IF = 33,
+ GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36,
+ GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39,
+ GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42,
+ GOYA_ASYNC_EVENT_ID_TPC3_ECC = 45,
+ GOYA_ASYNC_EVENT_ID_TPC4_ECC = 48,
+ GOYA_ASYNC_EVENT_ID_TPC5_ECC = 51,
+ GOYA_ASYNC_EVENT_ID_TPC6_ECC = 54,
+ GOYA_ASYNC_EVENT_ID_TPC7_ECC = 57,
+ GOYA_ASYNC_EVENT_ID_MME_ECC = 60,
+ GOYA_ASYNC_EVENT_ID_MME_ECC_EXT = 61,
+ GOYA_ASYNC_EVENT_ID_MMU_ECC = 63,
+ GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64,
+ GOYA_ASYNC_EVENT_ID_DMA_ECC = 66,
+ GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75,
+ GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78,
+ GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79,
+ GOYA_ASYNC_EVENT_ID_SRAM0 = 81,
+ GOYA_ASYNC_EVENT_ID_SRAM1 = 82,
+ GOYA_ASYNC_EVENT_ID_SRAM2 = 83,
+ GOYA_ASYNC_EVENT_ID_SRAM3 = 84,
+ GOYA_ASYNC_EVENT_ID_SRAM4 = 85,
+ GOYA_ASYNC_EVENT_ID_SRAM5 = 86,
+ GOYA_ASYNC_EVENT_ID_SRAM6 = 87,
+ GOYA_ASYNC_EVENT_ID_SRAM7 = 88,
+ GOYA_ASYNC_EVENT_ID_SRAM8 = 89,
+ GOYA_ASYNC_EVENT_ID_SRAM9 = 90,
+ GOYA_ASYNC_EVENT_ID_SRAM10 = 91,
+ GOYA_ASYNC_EVENT_ID_SRAM11 = 92,
+ GOYA_ASYNC_EVENT_ID_SRAM12 = 93,
+ GOYA_ASYNC_EVENT_ID_SRAM13 = 94,
+ GOYA_ASYNC_EVENT_ID_SRAM14 = 95,
+ GOYA_ASYNC_EVENT_ID_SRAM15 = 96,
+ GOYA_ASYNC_EVENT_ID_SRAM16 = 97,
+ GOYA_ASYNC_EVENT_ID_SRAM17 = 98,
+ GOYA_ASYNC_EVENT_ID_SRAM18 = 99,
+ GOYA_ASYNC_EVENT_ID_SRAM19 = 100,
+ GOYA_ASYNC_EVENT_ID_SRAM20 = 101,
+ GOYA_ASYNC_EVENT_ID_SRAM21 = 102,
+ GOYA_ASYNC_EVENT_ID_SRAM22 = 103,
+ GOYA_ASYNC_EVENT_ID_SRAM23 = 104,
+ GOYA_ASYNC_EVENT_ID_SRAM24 = 105,
+ GOYA_ASYNC_EVENT_ID_SRAM25 = 106,
+ GOYA_ASYNC_EVENT_ID_SRAM26 = 107,
+ GOYA_ASYNC_EVENT_ID_SRAM27 = 108,
+ GOYA_ASYNC_EVENT_ID_SRAM28 = 109,
+ GOYA_ASYNC_EVENT_ID_SRAM29 = 110,
+ GOYA_ASYNC_EVENT_ID_GIC500 = 112,
+ GOYA_ASYNC_EVENT_ID_PCIE_DEC = 115,
+ GOYA_ASYNC_EVENT_ID_TPC0_DEC = 117,
+ GOYA_ASYNC_EVENT_ID_TPC1_DEC = 120,
+ GOYA_ASYNC_EVENT_ID_TPC2_DEC = 123,
+ GOYA_ASYNC_EVENT_ID_TPC3_DEC = 126,
+ GOYA_ASYNC_EVENT_ID_TPC4_DEC = 129,
+ GOYA_ASYNC_EVENT_ID_TPC5_DEC = 132,
+ GOYA_ASYNC_EVENT_ID_TPC6_DEC = 135,
+ GOYA_ASYNC_EVENT_ID_TPC7_DEC = 138,
+ GOYA_ASYNC_EVENT_ID_AXI_ECC = 139,
+ GOYA_ASYNC_EVENT_ID_L2_RAM_ECC = 140,
+ GOYA_ASYNC_EVENT_ID_MME_WACS = 141,
+ GOYA_ASYNC_EVENT_ID_MME_WACSD = 142,
+ GOYA_ASYNC_EVENT_ID_PLL0 = 143,
+ GOYA_ASYNC_EVENT_ID_PLL1 = 144,
+ GOYA_ASYNC_EVENT_ID_PLL3 = 146,
+ GOYA_ASYNC_EVENT_ID_PLL4 = 147,
+ GOYA_ASYNC_EVENT_ID_PLL5 = 148,
+ GOYA_ASYNC_EVENT_ID_PLL6 = 149,
+ GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER = 155,
+ GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC = 159,
+ GOYA_ASYNC_EVENT_ID_PSOC = 160,
+ GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171,
+ GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG3 = 177,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG0 = 178,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG1 = 179,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG2 = 180,
+ GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG3 = 181,
+ GOYA_ASYNC_EVENT_ID_PCIE_APB = 182,
+ GOYA_ASYNC_EVENT_ID_PCIE_QDB = 183,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_D_P_WR = 184,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_D_RD = 185,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_U_P_WR = 186,
+ GOYA_ASYNC_EVENT_ID_PCIE_BM_U_RD = 187,
+ GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU = 190,
+ GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR = 191,
+ GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU = 200,
+ GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR = 201,
+ GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU = 210,
+ GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR = 211,
+ GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU = 220,
+ GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR = 221,
+ GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU = 230,
+ GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR = 231,
+ GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU = 240,
+ GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR = 241,
+ GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU = 250,
+ GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR = 251,
+ GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU = 260,
+ GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR = 261,
+ GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU0 = 270,
+ GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU1 = 271,
+ GOYA_ASYNC_EVENT_ID_MME_WACS_UP = 272,
+ GOYA_ASYNC_EVENT_ID_MME_WACS_DOWN = 273,
+ GOYA_ASYNC_EVENT_ID_MMU_PAGE_FAULT = 280,
+ GOYA_ASYNC_EVENT_ID_MMU_WR_PERM = 281,
+ GOYA_ASYNC_EVENT_ID_MMU_DBG_BM = 282,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 = 290,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH1 = 291,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH2 = 292,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH3 = 293,
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 = 294,
+ GOYA_ASYNC_EVENT_ID_DDR0_PHY_DFI = 300,
+ GOYA_ASYNC_EVENT_ID_DDR0_ECC_SCRUB = 301,
+ GOYA_ASYNC_EVENT_ID_DDR0_DB_ECC = 302,
+ GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC = 303,
+ GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC_MC = 304,
+ GOYA_ASYNC_EVENT_ID_DDR0_AXI_RD = 305,
+ GOYA_ASYNC_EVENT_ID_DDR0_AXI_WR = 306,
+ GOYA_ASYNC_EVENT_ID_DDR1_PHY_DFI = 310,
+ GOYA_ASYNC_EVENT_ID_DDR1_ECC_SCRUB = 311,
+ GOYA_ASYNC_EVENT_ID_DDR1_DB_ECC = 312,
+ GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC = 313,
+ GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC_MC = 314,
+ GOYA_ASYNC_EVENT_ID_DDR1_AXI_RD = 315,
+ GOYA_ASYNC_EVENT_ID_DDR1_AXI_WR = 316,
+ GOYA_ASYNC_EVENT_ID_CPU_BMON = 320,
+ GOYA_ASYNC_EVENT_ID_TS_EAST = 322,
+ GOYA_ASYNC_EVENT_ID_TS_WEST = 323,
+ GOYA_ASYNC_EVENT_ID_TS_NORTH = 324,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356,
+ GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361,
+ GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430,
+ GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431,
+ GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432,
+ GOYA_ASYNC_EVENT_ID_TPC3_CMDQ = 433,
+ GOYA_ASYNC_EVENT_ID_TPC4_CMDQ = 434,
+ GOYA_ASYNC_EVENT_ID_TPC5_CMDQ = 435,
+ GOYA_ASYNC_EVENT_ID_TPC6_CMDQ = 436,
+ GOYA_ASYNC_EVENT_ID_TPC7_CMDQ = 437,
+ GOYA_ASYNC_EVENT_ID_TPC0_QM = 438,
+ GOYA_ASYNC_EVENT_ID_TPC1_QM = 439,
+ GOYA_ASYNC_EVENT_ID_TPC2_QM = 440,
+ GOYA_ASYNC_EVENT_ID_TPC3_QM = 441,
+ GOYA_ASYNC_EVENT_ID_TPC4_QM = 442,
+ GOYA_ASYNC_EVENT_ID_TPC5_QM = 443,
+ GOYA_ASYNC_EVENT_ID_TPC6_QM = 444,
+ GOYA_ASYNC_EVENT_ID_TPC7_QM = 445,
+ GOYA_ASYNC_EVENT_ID_MME_QM = 447,
+ GOYA_ASYNC_EVENT_ID_MME_CMDQ = 448,
+ GOYA_ASYNC_EVENT_ID_DMA0_QM = 449,
+ GOYA_ASYNC_EVENT_ID_DMA1_QM = 450,
+ GOYA_ASYNC_EVENT_ID_DMA2_QM = 451,
+ GOYA_ASYNC_EVENT_ID_DMA3_QM = 452,
+ GOYA_ASYNC_EVENT_ID_DMA4_QM = 453,
+ GOYA_ASYNC_EVENT_ID_DMA_ON_HBW = 454,
+ GOYA_ASYNC_EVENT_ID_DMA0_CH = 455,
+ GOYA_ASYNC_EVENT_ID_DMA1_CH = 456,
+ GOYA_ASYNC_EVENT_ID_DMA2_CH = 457,
+ GOYA_ASYNC_EVENT_ID_DMA3_CH = 458,
+ GOYA_ASYNC_EVENT_ID_DMA4_CH = 459,
+ GOYA_ASYNC_EVENT_ID_PI_UPDATE = 484,
+ GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485,
+ GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486,
+ GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487,
+ GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023,
+ GOYA_ASYNC_EVENT_ID_SIZE
+};
+
+#endif /* __GOYA_ASYNC_EVENTS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
new file mode 100644
index 000000000000..a9920cb4a07b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_FW_IF_H
+#define GOYA_FW_IF_H
+
+#define CPU_BOOT_ADDR 0x7FF8040000ull
+
+#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
+#define LINUX_FW_OFFSET 0x800000 /* 8MB in DDR */
+
+enum goya_pll_index {
+ CPU_PLL = 0,
+ IC_PLL,
+ MC_PLL,
+ MME_PLL,
+ PCI_PLL,
+ EMMC_PLL,
+ TPC_PLL
+};
+
+#define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */
+
+#endif /* GOYA_FW_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
new file mode 100644
index 000000000000..a14407b975e4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2017-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_PACKETS_H
+#define GOYA_PACKETS_H
+
+#include <linux/types.h>
+
+#define PACKET_HEADER_PACKET_ID_SHIFT 56
+#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull
+
+enum packet_id {
+ PACKET_WREG_32 = 0x1,
+ PACKET_WREG_BULK = 0x2,
+ PACKET_MSG_LONG = 0x3,
+ PACKET_MSG_SHORT = 0x4,
+ PACKET_CP_DMA = 0x5,
+ PACKET_MSG_PROT = 0x7,
+ PACKET_FENCE = 0x8,
+ PACKET_LIN_DMA = 0x9,
+ PACKET_NOP = 0xA,
+ PACKET_STOP = 0xB,
+ MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >>
+ PACKET_HEADER_PACKET_ID_SHIFT) + 1
+};
+
+enum goya_dma_direction {
+ DMA_HOST_TO_DRAM,
+ DMA_HOST_TO_SRAM,
+ DMA_DRAM_TO_SRAM,
+ DMA_SRAM_TO_DRAM,
+ DMA_SRAM_TO_HOST,
+ DMA_DRAM_TO_HOST,
+ DMA_DRAM_TO_DRAM,
+ DMA_SRAM_TO_SRAM,
+ DMA_ENUM_MAX
+};
+
+#define GOYA_PKT_CTL_OPCODE_SHIFT 24
+#define GOYA_PKT_CTL_OPCODE_MASK 0x1F000000
+
+#define GOYA_PKT_CTL_EB_SHIFT 29
+#define GOYA_PKT_CTL_EB_MASK 0x20000000
+
+#define GOYA_PKT_CTL_RB_SHIFT 30
+#define GOYA_PKT_CTL_RB_MASK 0x40000000
+
+#define GOYA_PKT_CTL_MB_SHIFT 31
+#define GOYA_PKT_CTL_MB_MASK 0x80000000
+
+struct packet_nop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_stop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+#define GOYA_PKT_WREG32_CTL_REG_OFFSET_SHIFT 0
+#define GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK 0x0000FFFF
+
+struct packet_wreg32 {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_wreg_bulk {
+ __le32 size64;
+ __le32 ctl;
+ __le64 values[0]; /* data starts here */
+};
+
+struct packet_msg_long {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+struct packet_msg_short {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_msg_prot {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+struct packet_fence {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+#define GOYA_PKT_LIN_DMA_CTL_WO_SHIFT 0
+#define GOYA_PKT_LIN_DMA_CTL_WO_MASK 0x00000001
+
+#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_SHIFT 1
+#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK 0x00000002
+
+#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_SHIFT 2
+#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK 0x00000004
+
+#define GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT 6
+#define GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000040
+
+#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT 20
+#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK 0x00700000
+
+struct packet_lin_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct packet_cp_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+#endif /* GOYA_PACKETS_H */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
new file mode 100644
index 000000000000..7475732b9996
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HL_BOOT_IF_H
+#define HL_BOOT_IF_H
+
+enum cpu_boot_status {
+ CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
+ CPU_BOOT_STATUS_IN_WFE,
+ CPU_BOOT_STATUS_DRAM_RDY,
+ CPU_BOOT_STATUS_SRAM_AVAIL,
+ CPU_BOOT_STATUS_IN_BTL, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT,
+ CPU_BOOT_STATUS_IN_SPL,
+ CPU_BOOT_STATUS_IN_UBOOT,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL,
+ CPU_BOOT_STATUS_FIT_CORRUPTED
+};
+
+enum kmd_msg {
+ KMD_MSG_NA = 0,
+ KMD_MSG_GOTO_WFE,
+ KMD_MSG_FIT_RDY
+};
+
+#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
new file mode 100644
index 000000000000..b680052ee3f0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_MMU_GENERAL_H_
+#define INCLUDE_MMU_GENERAL_H_
+
+#define PAGE_SHIFT_4KB 12
+#define PAGE_SHIFT_2MB 21
+#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
+#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
+#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
+
+#define PAGE_PRESENT_MASK 0x0000000000001
+#define SWAP_OUT_MASK 0x0000000000004
+#define LAST_MASK 0x0000000000800
+#define PHYS_ADDR_MASK 0x3FFFFFFFFF000ull
+#define HOP0_MASK 0x3000000000000ull
+#define HOP1_MASK 0x0FF8000000000ull
+#define HOP2_MASK 0x0007FC0000000ull
+#define HOP3_MASK 0x000003FE00000
+#define HOP4_MASK 0x00000001FF000
+#define OFFSET_MASK 0x0000000000FFF
+
+#define HOP0_SHIFT 48
+#define HOP1_SHIFT 39
+#define HOP2_SHIFT 30
+#define HOP3_SHIFT 21
+#define HOP4_SHIFT 12
+
+#define PTE_PHYS_ADDR_SHIFT 12
+#define PTE_PHYS_ADDR_MASK ~0xFFF
+
+#define HL_PTE_SIZE sizeof(u64)
+#define HOP_TABLE_SIZE PAGE_SIZE_4KB
+#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE)
+#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID)
+
+#define MMU_HOP0_PA43_12_SHIFT 12
+#define MMU_HOP0_PA49_44_SHIFT (12 + 32)
+
+#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */
+
+#endif /* INCLUDE_MMU_GENERAL_H_ */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
new file mode 100644
index 000000000000..8539dd041f2c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_MMU_V1_0_H_
+#define INCLUDE_MMU_V1_0_H_
+
+#define MMU_HOP0_PA43_12 0x490004
+#define MMU_HOP0_PA49_44 0x490008
+#define MMU_ASID_BUSY 0x490000
+
+#endif /* INCLUDE_MMU_V1_0_H_ */
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h
new file mode 100644
index 000000000000..bf59bbe27fdc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/qman_if.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef QMAN_IF_H
+#define QMAN_IF_H
+
+#include <linux/types.h>
+
+/*
+ * PRIMARY QUEUE
+ */
+
+struct hl_bd {
+ __le64 ptr;
+ __le32 len;
+ __le32 ctl;
+};
+
+#define HL_BD_SIZE sizeof(struct hl_bd)
+
+/*
+ * BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is
+ * valid. 1 means the repeat field is valid, 0 means not-valid,
+ * i.e. repeat == 1
+ */
+#define BD_CTL_REPEAT_VALID_SHIFT 24
+#define BD_CTL_REPEAT_VALID_MASK 0x01000000
+
+#define BD_CTL_SHADOW_INDEX_SHIFT 0
+#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF
+
+/*
+ * COMPLETION QUEUE
+ */
+
+struct hl_cq_entry {
+ __le32 data;
+};
+
+#define HL_CQ_ENTRY_SIZE sizeof(struct hl_cq_entry)
+
+#define CQ_ENTRY_READY_SHIFT 31
+#define CQ_ENTRY_READY_MASK 0x80000000
+
+#define CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT 30
+#define CQ_ENTRY_SHADOW_INDEX_VALID_MASK 0x40000000
+
+#define CQ_ENTRY_SHADOW_INDEX_SHIFT BD_CTL_SHADOW_INDEX_SHIFT
+#define CQ_ENTRY_SHADOW_INDEX_MASK BD_CTL_SHADOW_INDEX_MASK
+
+
+#endif /* QMAN_IF_H */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
new file mode 100644
index 000000000000..e69a09c10e3f
--- /dev/null
+++ b/drivers/misc/habanalabs/irq.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/slab.h>
+
+/**
+ * This structure is used to schedule work of EQ entry and armcp_reset event
+ *
+ * @eq_work - workqueue object to run when EQ entry is received
+ * @hdev - pointer to device structure
+ * @eq_entry - copy of the EQ entry
+ */
+struct hl_eqe_work {
+ struct work_struct eq_work;
+ struct hl_device *hdev;
+ struct hl_eq_entry eq_entry;
+};
+
+/*
+ * hl_cq_inc_ptr - increment ci or pi of cq
+ *
+ * @ptr: the current ci or pi value of the completion queue
+ *
+ * Increment ptr by 1. If it reaches the number of completion queue
+ * entries, set it to 0
+ */
+inline u32 hl_cq_inc_ptr(u32 ptr)
+{
+ ptr++;
+ if (unlikely(ptr == HL_CQ_LENGTH))
+ ptr = 0;
+ return ptr;
+}
+
+/*
+ * hl_eq_inc_ptr - increment ci of eq
+ *
+ * @ptr: the current ci value of the event queue
+ *
+ * Increment ptr by 1. If it reaches the number of event queue
+ * entries, set it to 0
+ */
+inline u32 hl_eq_inc_ptr(u32 ptr)
+{
+ ptr++;
+ if (unlikely(ptr == HL_EQ_LENGTH))
+ ptr = 0;
+ return ptr;
+}
+
+static void irq_handle_eqe(struct work_struct *work)
+{
+ struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
+ eq_work);
+ struct hl_device *hdev = eqe_work->hdev;
+
+ hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
+
+ kfree(eqe_work);
+}
+
+/*
+ * hl_irq_handler_cq - irq handler for completion queue
+ *
+ * @irq: irq number
+ * @arg: pointer to completion queue structure
+ *
+ */
+irqreturn_t hl_irq_handler_cq(int irq, void *arg)
+{
+ struct hl_cq *cq = arg;
+ struct hl_device *hdev = cq->hdev;
+ struct hl_hw_queue *queue;
+ struct hl_cs_job *job;
+ bool shadow_index_valid;
+ u16 shadow_index;
+ u32 *cq_entry;
+ u32 *cq_base;
+
+ if (hdev->disabled) {
+ dev_dbg(hdev->dev,
+ "Device disabled but received IRQ %d for CQ %d\n",
+ irq, cq->hw_queue_id);
+ return IRQ_HANDLED;
+ }
+
+ cq_base = (u32 *) (uintptr_t) cq->kernel_address;
+
+ while (1) {
+ bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
+ >> CQ_ENTRY_READY_SHIFT);
+
+ if (!entry_ready)
+ break;
+
+ cq_entry = (u32 *) &cq_base[cq->ci];
+
+ /*
+ * Make sure we read CQ entry contents after we've
+ * checked the ownership bit.
+ */
+ dma_rmb();
+
+ shadow_index_valid =
+ ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
+ >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
+
+ shadow_index = (u16)
+ ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
+ >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
+
+ queue = &hdev->kernel_queues[cq->hw_queue_id];
+
+ if ((shadow_index_valid) && (!hdev->disabled)) {
+ job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
+ queue_work(hdev->cq_wq, &job->finish_work);
+ }
+
+ /*
+ * Update ci of the context's queue. There is no
+ * need to protect it with spinlock because this update is
+ * done only inside IRQ and there is a different IRQ per
+ * queue
+ */
+ queue->ci = hl_queue_inc_ptr(queue->ci);
+
+ /* Clear CQ entry ready bit */
+ cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
+
+ cq->ci = hl_cq_inc_ptr(cq->ci);
+
+ /* Increment free slots */
+ atomic_inc(&cq->free_slots_cnt);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * hl_irq_handler_eq - irq handler for event queue
+ *
+ * @irq: irq number
+ * @arg: pointer to event queue structure
+ *
+ */
+irqreturn_t hl_irq_handler_eq(int irq, void *arg)
+{
+ struct hl_eq *eq = arg;
+ struct hl_device *hdev = eq->hdev;
+ struct hl_eq_entry *eq_entry;
+ struct hl_eq_entry *eq_base;
+ struct hl_eqe_work *handle_eqe_work;
+
+ eq_base = (struct hl_eq_entry *) (uintptr_t) eq->kernel_address;
+
+ while (1) {
+ bool entry_ready =
+ ((__le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
+ EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
+
+ if (!entry_ready)
+ break;
+
+ eq_entry = &eq_base[eq->ci];
+
+ /*
+ * Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ dma_rmb();
+
+ if (hdev->disabled) {
+ dev_warn(hdev->dev,
+ "Device disabled but received IRQ %d for EQ\n",
+ irq);
+ goto skip_irq;
+ }
+
+ handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
+ if (handle_eqe_work) {
+ INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
+ handle_eqe_work->hdev = hdev;
+
+ memcpy(&handle_eqe_work->eq_entry, eq_entry,
+ sizeof(*eq_entry));
+
+ queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
+ }
+skip_irq:
+ /* Clear EQ entry ready bit */
+ eq_entry->hdr.ctl =
+ __cpu_to_le32(__le32_to_cpu(eq_entry->hdr.ctl) &
+ ~EQ_CTL_READY_MASK);
+
+ eq->ci = hl_eq_inc_ptr(eq->ci);
+
+ hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * hl_cq_init - main initialization function for an cq object
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to cq structure
+ * @hw_queue_id: The H/W queue ID this completion queue belongs to
+ *
+ * Allocate dma-able memory for the completion queue and initialize fields
+ * Returns 0 on success
+ */
+int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
+{
+ void *p;
+
+ BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
+
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
+ &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->hdev = hdev;
+ q->kernel_address = (u64) (uintptr_t) p;
+ q->hw_queue_id = hw_queue_id;
+ q->ci = 0;
+ q->pi = 0;
+
+ atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
+
+ return 0;
+}
+
+/*
+ * hl_cq_fini - destroy completion queue
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to cq structure
+ *
+ * Free the completion queue memory
+ */
+void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
+{
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+}
+
+void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
+{
+ q->ci = 0;
+ q->pi = 0;
+
+ atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
+
+ /*
+ * It's not enough to just reset the PI/CI because the H/W may have
+ * written valid completion entries before it was halted and therefore
+ * we need to clean the actual queues so we won't process old entries
+ * when the device is operational again
+ */
+
+ memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
+}
+
+/*
+ * hl_eq_init - main initialization function for an event queue object
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to eq structure
+ *
+ * Allocate dma-able memory for the event queue and initialize fields
+ * Returns 0 on success
+ */
+int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
+{
+ void *p;
+
+ BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
+
+ p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
+ &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->hdev = hdev;
+ q->kernel_address = (u64) (uintptr_t) p;
+ q->ci = 0;
+
+ return 0;
+}
+
+/*
+ * hl_eq_fini - destroy event queue
+ *
+ * @hdev: pointer to device structure
+ * @q: pointer to eq structure
+ *
+ * Free the event queue memory
+ */
+void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
+{
+ flush_workqueue(hdev->eq_wq);
+
+ hdev->asic_funcs->dma_free_coherent(hdev, HL_EQ_SIZE_IN_BYTES,
+ (void *) (uintptr_t) q->kernel_address, q->bus_address);
+}
+
+void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
+{
+ q->ci = 0;
+
+ /*
+ * It's not enough to just reset the PI/CI because the H/W may have
+ * written valid completion entries before it was halted and therefore
+ * we need to clean the actual queues so we won't process old entries
+ * when the device is operational again
+ */
+
+ memset((void *) (uintptr_t) q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
+}
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
new file mode 100644
index 000000000000..3a12fd1a5274
--- /dev/null
+++ b/drivers/misc/habanalabs/memory.c
@@ -0,0 +1,1723 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+
+#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
+#define HL_MMU_DEBUG 0
+
+/*
+ * The va ranges in context object contain a list with the available chunks of
+ * device virtual memory.
+ * There is one range for host allocations and one for DRAM allocations.
+ *
+ * On initialization each range contains one chunk of all of its available
+ * virtual range which is a half of the total device virtual range.
+ *
+ * On each mapping of physical pages, a suitable virtual range chunk (with a
+ * minimum size) is selected from the list. If the chunk size equals the
+ * requested size, the chunk is returned. Otherwise, the chunk is split into
+ * two chunks - one to return as result and a remainder to stay in the list.
+ *
+ * On each Unmapping of a virtual address, the relevant virtual chunk is
+ * returned to the list. The chunk is added to the list and if its edges match
+ * the edges of the adjacent chunks (means a contiguous chunk can be created),
+ * the chunks are merged.
+ *
+ * On finish, the list is checked to have only one chunk of all the relevant
+ * virtual range (which is a half of the device total virtual range).
+ * If not (means not all mappings were unmapped), a warning is printed.
+ */
+
+/*
+ * alloc_device_memory - allocate device memory
+ *
+ * @ctx : current context
+ * @args : host parameters containing the requested size
+ * @ret_handle : result handle
+ *
+ * This function does the following:
+ * - Allocate the requested size rounded up to 2MB pages
+ * - Return unique handle
+ */
+static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
+ u32 *ret_handle)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ u64 paddr = 0;
+ u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
+ int handle, rc, i;
+ bool contiguous;
+
+ num_curr_pgs = 0;
+ page_size = hdev->asic_prop.dram_page_size;
+ page_shift = __ffs(page_size);
+ num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
+ total_size = num_pgs << page_shift;
+
+ contiguous = args->flags & HL_MEM_CONTIGUOUS;
+
+ if (contiguous) {
+ paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
+ if (!paddr) {
+ dev_err(hdev->dev,
+ "failed to allocate %u huge contiguous pages\n",
+ num_pgs);
+ return -ENOMEM;
+ }
+ }
+
+ phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
+ if (!phys_pg_pack) {
+ rc = -ENOMEM;
+ goto pages_pack_err;
+ }
+
+ phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
+ phys_pg_pack->asid = ctx->asid;
+ phys_pg_pack->npages = num_pgs;
+ phys_pg_pack->page_size = page_size;
+ phys_pg_pack->total_size = total_size;
+ phys_pg_pack->flags = args->flags;
+ phys_pg_pack->contiguous = contiguous;
+
+ phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+ if (!phys_pg_pack->pages) {
+ rc = -ENOMEM;
+ goto pages_arr_err;
+ }
+
+ if (phys_pg_pack->contiguous) {
+ for (i = 0 ; i < num_pgs ; i++)
+ phys_pg_pack->pages[i] = paddr + i * page_size;
+ } else {
+ for (i = 0 ; i < num_pgs ; i++) {
+ phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
+ vm->dram_pg_pool,
+ page_size);
+ if (!phys_pg_pack->pages[i]) {
+ dev_err(hdev->dev,
+ "ioctl failed to allocate page\n");
+ rc = -ENOMEM;
+ goto page_err;
+ }
+
+ num_curr_pgs++;
+ }
+ }
+
+ spin_lock(&vm->idr_lock);
+ handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
+ GFP_ATOMIC);
+ spin_unlock(&vm->idr_lock);
+
+ if (handle < 0) {
+ dev_err(hdev->dev, "Failed to get handle for page\n");
+ rc = -EFAULT;
+ goto idr_err;
+ }
+
+ for (i = 0 ; i < num_pgs ; i++)
+ kref_get(&vm->dram_pg_pool_refcount);
+
+ phys_pg_pack->handle = handle;
+
+ atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
+ atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
+
+ *ret_handle = handle;
+
+ return 0;
+
+idr_err:
+page_err:
+ if (!phys_pg_pack->contiguous)
+ for (i = 0 ; i < num_curr_pgs ; i++)
+ gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
+ page_size);
+
+ kfree(phys_pg_pack->pages);
+pages_arr_err:
+ kfree(phys_pg_pack);
+pages_pack_err:
+ if (contiguous)
+ gen_pool_free(vm->dram_pg_pool, paddr, total_size);
+
+ return rc;
+}
+
+/*
+ * get_userptr_from_host_va - initialize userptr structure from given host
+ * virtual address
+ *
+ * @hdev : habanalabs device structure
+ * @args : parameters containing the virtual address and size
+ * @p_userptr : pointer to result userptr structure
+ *
+ * This function does the following:
+ * - Allocate userptr structure
+ * - Pin the given host memory using the userptr structure
+ * - Perform DMA mapping to have the DMA addresses of the pages
+ */
+static int get_userptr_from_host_va(struct hl_device *hdev,
+ struct hl_mem_in *args, struct hl_userptr **p_userptr)
+{
+ struct hl_userptr *userptr;
+ int rc;
+
+ userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
+ if (!userptr) {
+ rc = -ENOMEM;
+ goto userptr_err;
+ }
+
+ rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
+ args->map_host.mem_size, userptr);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to pin host memory\n");
+ goto pin_err;
+ }
+
+ rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents, DMA_BIDIRECTIONAL);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto dma_map_err;
+ }
+
+ userptr->dma_mapped = true;
+ userptr->dir = DMA_BIDIRECTIONAL;
+ userptr->vm_type = VM_TYPE_USERPTR;
+
+ *p_userptr = userptr;
+
+ return 0;
+
+dma_map_err:
+ hl_unpin_host_memory(hdev, userptr);
+pin_err:
+ kfree(userptr);
+userptr_err:
+
+ return rc;
+}
+
+/*
+ * free_userptr - free userptr structure
+ *
+ * @hdev : habanalabs device structure
+ * @userptr : userptr to free
+ *
+ * This function does the following:
+ * - Unpins the physical pages
+ * - Frees the userptr structure
+ */
+static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+{
+ hl_unpin_host_memory(hdev, userptr);
+ kfree(userptr);
+}
+
+/*
+ * dram_pg_pool_do_release - free DRAM pages pool
+ *
+ * @ref : pointer to reference object
+ *
+ * This function does the following:
+ * - Frees the idr structure of physical pages handles
+ * - Frees the generic pool of DRAM physical pages
+ */
+static void dram_pg_pool_do_release(struct kref *ref)
+{
+ struct hl_vm *vm = container_of(ref, struct hl_vm,
+ dram_pg_pool_refcount);
+
+ /*
+ * free the idr here as only here we know for sure that there are no
+ * allocated physical pages and hence there are no handles in use
+ */
+ idr_destroy(&vm->phys_pg_pack_handles);
+ gen_pool_destroy(vm->dram_pg_pool);
+}
+
+/*
+ * free_phys_pg_pack - free physical page pack
+ *
+ * @hdev : habanalabs device structure
+ * @phys_pg_pack : physical page pack to free
+ *
+ * This function does the following:
+ * - For DRAM memory only, iterate over the pack and free each physical block
+ * structure by returning it to the general pool
+ * - Free the hl_vm_phys_pg_pack structure
+ */
+static void free_phys_pg_pack(struct hl_device *hdev,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_vm *vm = &hdev->vm;
+ int i;
+
+ if (!phys_pg_pack->created_from_userptr) {
+ if (phys_pg_pack->contiguous) {
+ gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
+ phys_pg_pack->total_size);
+
+ for (i = 0; i < phys_pg_pack->npages ; i++)
+ kref_put(&vm->dram_pg_pool_refcount,
+ dram_pg_pool_do_release);
+ } else {
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ gen_pool_free(vm->dram_pg_pool,
+ phys_pg_pack->pages[i],
+ phys_pg_pack->page_size);
+ kref_put(&vm->dram_pg_pool_refcount,
+ dram_pg_pool_do_release);
+ }
+ }
+ }
+
+ kfree(phys_pg_pack->pages);
+ kfree(phys_pg_pack);
+}
+
+/*
+ * free_device_memory - free device memory
+ *
+ * @ctx : current context
+ * @handle : handle of the memory chunk to free
+ *
+ * This function does the following:
+ * - Free the device memory related to the given handle
+ */
+static int free_device_memory(struct hl_ctx *ctx, u32 handle)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ if (phys_pg_pack) {
+ if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
+ dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
+ handle);
+ spin_unlock(&vm->idr_lock);
+ return -EINVAL;
+ }
+
+ /*
+ * must remove from idr before the freeing of the physical
+ * pages as the refcount of the pool is also the trigger of the
+ * idr destroy
+ */
+ idr_remove(&vm->phys_pg_pack_handles, handle);
+ spin_unlock(&vm->idr_lock);
+
+ atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
+ atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
+
+ free_phys_pg_pack(hdev, phys_pg_pack);
+ } else {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev,
+ "free device memory failed, no match for handle %u\n",
+ handle);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * clear_va_list_locked - free virtual addresses list
+ *
+ * @hdev : habanalabs device structure
+ * @va_list : list of virtual addresses to free
+ *
+ * This function does the following:
+ * - Iterate over the list and free each virtual addresses block
+ *
+ * This function should be called only when va_list lock is taken
+ */
+static void clear_va_list_locked(struct hl_device *hdev,
+ struct list_head *va_list)
+{
+ struct hl_vm_va_block *va_block, *tmp;
+
+ list_for_each_entry_safe(va_block, tmp, va_list, node) {
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+}
+
+/*
+ * print_va_list_locked - print virtual addresses list
+ *
+ * @hdev : habanalabs device structure
+ * @va_list : list of virtual addresses to print
+ *
+ * This function does the following:
+ * - Iterate over the list and print each virtual addresses block
+ *
+ * This function should be called only when va_list lock is taken
+ */
+static void print_va_list_locked(struct hl_device *hdev,
+ struct list_head *va_list)
+{
+#if HL_MMU_DEBUG
+ struct hl_vm_va_block *va_block;
+
+ dev_dbg(hdev->dev, "print va list:\n");
+
+ list_for_each_entry(va_block, va_list, node)
+ dev_dbg(hdev->dev,
+ "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
+ va_block->start, va_block->end, va_block->size);
+#endif
+}
+
+/*
+ * merge_va_blocks_locked - merge a virtual block if possible
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_list : pointer to the virtual addresses block list
+ * @va_block : virtual block to merge with adjacent blocks
+ *
+ * This function does the following:
+ * - Merge the given blocks with the adjacent blocks if their virtual ranges
+ * create a contiguous virtual range
+ *
+ * This Function should be called only when va_list lock is taken
+ */
+static void merge_va_blocks_locked(struct hl_device *hdev,
+ struct list_head *va_list, struct hl_vm_va_block *va_block)
+{
+ struct hl_vm_va_block *prev, *next;
+
+ prev = list_prev_entry(va_block, node);
+ if (&prev->node != va_list && prev->end + 1 == va_block->start) {
+ prev->end = va_block->end;
+ prev->size = prev->end - prev->start;
+ list_del(&va_block->node);
+ kfree(va_block);
+ va_block = prev;
+ }
+
+ next = list_next_entry(va_block, node);
+ if (&next->node != va_list && va_block->end + 1 == next->start) {
+ next->start = va_block->start;
+ next->size = next->end - next->start;
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+}
+
+/*
+ * add_va_block_locked - add a virtual block to the virtual addresses list
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_list : pointer to the virtual addresses block list
+ * @start : start virtual address
+ * @end : end virtual address
+ *
+ * This function does the following:
+ * - Add the given block to the virtual blocks list and merge with other
+ * blocks if a contiguous virtual block can be created
+ *
+ * This Function should be called only when va_list lock is taken
+ */
+static int add_va_block_locked(struct hl_device *hdev,
+ struct list_head *va_list, u64 start, u64 end)
+{
+ struct hl_vm_va_block *va_block, *res = NULL;
+ u64 size = end - start;
+
+ print_va_list_locked(hdev, va_list);
+
+ list_for_each_entry(va_block, va_list, node) {
+ /* TODO: remove upon matureness */
+ if (hl_mem_area_crosses_range(start, size, va_block->start,
+ va_block->end)) {
+ dev_err(hdev->dev,
+ "block crossing ranges at start 0x%llx, end 0x%llx\n",
+ va_block->start, va_block->end);
+ return -EINVAL;
+ }
+
+ if (va_block->end < start)
+ res = va_block;
+ }
+
+ va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
+ if (!va_block)
+ return -ENOMEM;
+
+ va_block->start = start;
+ va_block->end = end;
+ va_block->size = size;
+
+ if (!res)
+ list_add(&va_block->node, va_list);
+ else
+ list_add(&va_block->node, &res->node);
+
+ merge_va_blocks_locked(hdev, va_list, va_block);
+
+ print_va_list_locked(hdev, va_list);
+
+ return 0;
+}
+
+/*
+ * add_va_block - wrapper for add_va_block_locked
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_list : pointer to the virtual addresses block list
+ * @start : start virtual address
+ * @end : end virtual address
+ *
+ * This function does the following:
+ * - Takes the list lock and calls add_va_block_locked
+ */
+static inline int add_va_block(struct hl_device *hdev,
+ struct hl_va_range *va_range, u64 start, u64 end)
+{
+ int rc;
+
+ mutex_lock(&va_range->lock);
+ rc = add_va_block_locked(hdev, &va_range->list, start, end);
+ mutex_unlock(&va_range->lock);
+
+ return rc;
+}
+
+/*
+ * get_va_block - get a virtual block with the requested size
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_range : pointer to the virtual addresses range
+ * @size : requested block size
+ * @hint_addr : hint for request address by the user
+ * @is_userptr : is host or DRAM memory
+ *
+ * This function does the following:
+ * - Iterate on the virtual block list to find a suitable virtual block for the
+ * requested size
+ * - Reserve the requested block and update the list
+ * - Return the start address of the virtual block
+ */
+static u64 get_va_block(struct hl_device *hdev,
+ struct hl_va_range *va_range, u32 size, u64 hint_addr,
+ bool is_userptr)
+{
+ struct hl_vm_va_block *va_block, *new_va_block = NULL;
+ u64 valid_start, valid_size, prev_start, prev_end, page_mask,
+ res_valid_start = 0, res_valid_size = 0;
+ u32 page_size;
+ bool add_prev = false;
+
+ if (is_userptr) {
+ /*
+ * We cannot know if the user allocated memory with huge pages
+ * or not, hence we continue with the biggest possible
+ * granularity.
+ */
+ page_size = PAGE_SIZE_2MB;
+ page_mask = PAGE_MASK_2MB;
+ } else {
+ page_size = hdev->asic_prop.dram_page_size;
+ page_mask = ~((u64)page_size - 1);
+ }
+
+ mutex_lock(&va_range->lock);
+
+ print_va_list_locked(hdev, &va_range->list);
+
+ list_for_each_entry(va_block, &va_range->list, node) {
+ /* calc the first possible aligned addr */
+ valid_start = va_block->start;
+
+
+ if (valid_start & (page_size - 1)) {
+ valid_start &= page_mask;
+ valid_start += page_size;
+ if (valid_start > va_block->end)
+ continue;
+ }
+
+ valid_size = va_block->end - valid_start;
+
+ if (valid_size >= size &&
+ (!new_va_block || valid_size < res_valid_size)) {
+
+ new_va_block = va_block;
+ res_valid_start = valid_start;
+ res_valid_size = valid_size;
+ }
+
+ if (hint_addr && hint_addr >= valid_start &&
+ ((hint_addr + size) <= va_block->end)) {
+ new_va_block = va_block;
+ res_valid_start = hint_addr;
+ res_valid_size = valid_size;
+ break;
+ }
+ }
+
+ if (!new_va_block) {
+ dev_err(hdev->dev, "no available va block for size %u\n", size);
+ goto out;
+ }
+
+ if (res_valid_start > new_va_block->start) {
+ prev_start = new_va_block->start;
+ prev_end = res_valid_start - 1;
+
+ new_va_block->start = res_valid_start;
+ new_va_block->size = res_valid_size;
+
+ add_prev = true;
+ }
+
+ if (new_va_block->size > size) {
+ new_va_block->start += size;
+ new_va_block->size = new_va_block->end - new_va_block->start;
+ } else {
+ list_del(&new_va_block->node);
+ kfree(new_va_block);
+ }
+
+ if (add_prev)
+ add_va_block_locked(hdev, &va_range->list, prev_start,
+ prev_end);
+
+ print_va_list_locked(hdev, &va_range->list);
+out:
+ mutex_unlock(&va_range->lock);
+
+ return res_valid_start;
+}
+
+/*
+ * get_sg_info - get number of pages and the DMA address from SG list
+ *
+ * @sg : the SG list
+ * @dma_addr : pointer to DMA address to return
+ *
+ * Calculate the number of consecutive pages described by the SG list. Take the
+ * offset of the address in the first page, add to it the length and round it up
+ * to the number of needed pages.
+ */
+static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
+{
+ *dma_addr = sg_dma_address(sg);
+
+ return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+}
+
+/*
+ * init_phys_pg_pack_from_userptr - initialize physical page pack from host
+ * memory
+ *
+ * @ctx : current context
+ * @userptr : userptr to initialize from
+ * @pphys_pg_pack : res pointer
+ *
+ * This function does the following:
+ * - Pin the physical pages related to the given virtual block
+ * - Create a physical page pack from the physical pages related to the given
+ * virtual block
+ */
+static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
+ struct hl_userptr *userptr,
+ struct hl_vm_phys_pg_pack **pphys_pg_pack)
+{
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct scatterlist *sg;
+ dma_addr_t dma_addr;
+ u64 page_mask;
+ u32 npages, total_npages, page_size = PAGE_SIZE;
+ bool first = true, is_huge_page_opt = true;
+ int rc, i, j;
+
+ phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
+ if (!phys_pg_pack)
+ return -ENOMEM;
+
+ phys_pg_pack->vm_type = userptr->vm_type;
+ phys_pg_pack->created_from_userptr = true;
+ phys_pg_pack->asid = ctx->asid;
+ atomic_set(&phys_pg_pack->mapping_cnt, 1);
+
+ /* Only if all dma_addrs are aligned to 2MB and their
+ * sizes is at least 2MB, we can use huge page mapping.
+ * We limit the 2MB optimization to this condition,
+ * since later on we acquire the related VA range as one
+ * consecutive block.
+ */
+ total_npages = 0;
+ for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ npages = get_sg_info(sg, &dma_addr);
+
+ total_npages += npages;
+
+ if (first) {
+ first = false;
+ dma_addr &= PAGE_MASK_2MB;
+ }
+
+ if ((npages % PGS_IN_2MB_PAGE) ||
+ (dma_addr & (PAGE_SIZE_2MB - 1)))
+ is_huge_page_opt = false;
+ }
+
+ if (is_huge_page_opt) {
+ page_size = PAGE_SIZE_2MB;
+ total_npages /= PGS_IN_2MB_PAGE;
+ }
+
+ page_mask = ~(((u64) page_size) - 1);
+
+ phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+ if (!phys_pg_pack->pages) {
+ rc = -ENOMEM;
+ goto page_pack_arr_mem_err;
+ }
+
+ phys_pg_pack->npages = total_npages;
+ phys_pg_pack->page_size = page_size;
+ phys_pg_pack->total_size = total_npages * page_size;
+
+ j = 0;
+ first = true;
+ for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
+ npages = get_sg_info(sg, &dma_addr);
+
+ /* align down to physical page size and save the offset */
+ if (first) {
+ first = false;
+ phys_pg_pack->offset = dma_addr & (page_size - 1);
+ dma_addr &= page_mask;
+ }
+
+ while (npages) {
+ phys_pg_pack->pages[j++] = dma_addr;
+ dma_addr += page_size;
+
+ if (is_huge_page_opt)
+ npages -= PGS_IN_2MB_PAGE;
+ else
+ npages--;
+ }
+ }
+
+ *pphys_pg_pack = phys_pg_pack;
+
+ return 0;
+
+page_pack_arr_mem_err:
+ kfree(phys_pg_pack);
+
+ return rc;
+}
+
+/*
+ * map_phys_page_pack - maps the physical page pack
+ *
+ * @ctx : current context
+ * @vaddr : start address of the virtual area to map from
+ * @phys_pg_pack : the pack of physical pages to map to
+ *
+ * This function does the following:
+ * - Maps each chunk of virtual memory to matching physical chunk
+ * - Stores number of successful mappings in the given argument
+ * - Returns 0 on success, error code otherwise.
+ */
+static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 next_vaddr = vaddr, paddr;
+ u32 page_size = phys_pg_pack->page_size;
+ int i, rc = 0, mapped_pg_cnt = 0;
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ paddr = phys_pg_pack->pages[i];
+
+ /* For accessing the host we need to turn on bit 39 */
+ if (phys_pg_pack->created_from_userptr)
+ paddr += hdev->asic_prop.host_phys_base_address;
+
+ rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
+ if (rc) {
+ dev_err(hdev->dev,
+ "map failed for handle %u, npages: %d, mapped: %d",
+ phys_pg_pack->handle, phys_pg_pack->npages,
+ mapped_pg_cnt);
+ goto err;
+ }
+
+ mapped_pg_cnt++;
+ next_vaddr += page_size;
+ }
+
+ return 0;
+
+err:
+ next_vaddr = vaddr;
+ for (i = 0 ; i < mapped_pg_cnt ; i++) {
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
+ phys_pg_pack->handle, next_vaddr,
+ phys_pg_pack->pages[i], page_size);
+
+ next_vaddr += page_size;
+ }
+
+ return rc;
+}
+
+static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
+ u64 *paddr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ u32 handle;
+
+ handle = lower_32_bits(args->map_device.handle);
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev, "no match for handle %u\n", handle);
+ return -EINVAL;
+ }
+
+ *paddr = phys_pg_pack->pages[0];
+
+ spin_unlock(&vm->idr_lock);
+
+ return 0;
+}
+
+/*
+ * map_device_va - map the given memory
+ *
+ * @ctx : current context
+ * @args : host parameters with handle/host virtual address
+ * @device_addr : pointer to result device virtual address
+ *
+ * This function does the following:
+ * - If given a physical device memory handle, map to a device virtual block
+ * and return the start address of this block
+ * - If given a host virtual address and size, find the related physical pages,
+ * map a device virtual block to this pages and return the start address of
+ * this block
+ */
+static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
+ u64 *device_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_userptr *userptr = NULL;
+ struct hl_vm_hash_node *hnode;
+ enum vm_type_t *vm_type;
+ u64 ret_vaddr, hint_addr;
+ u32 handle = 0;
+ int rc;
+ bool is_userptr = args->flags & HL_MEM_USERPTR;
+
+ /* Assume failure */
+ *device_addr = 0;
+
+ if (is_userptr) {
+ rc = get_userptr_from_host_va(hdev, args, &userptr);
+ if (rc) {
+ dev_err(hdev->dev, "failed to get userptr from va\n");
+ return rc;
+ }
+
+ rc = init_phys_pg_pack_from_userptr(ctx, userptr,
+ &phys_pg_pack);
+ if (rc) {
+ dev_err(hdev->dev,
+ "unable to init page pack for vaddr 0x%llx\n",
+ args->map_host.host_virt_addr);
+ goto init_page_pack_err;
+ }
+
+ vm_type = (enum vm_type_t *) userptr;
+ hint_addr = args->map_host.hint_addr;
+ } else {
+ handle = lower_32_bits(args->map_device.handle);
+
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev,
+ "no match for handle %u\n", handle);
+ return -EINVAL;
+ }
+
+ /* increment now to avoid freeing device memory while mapping */
+ atomic_inc(&phys_pg_pack->mapping_cnt);
+
+ spin_unlock(&vm->idr_lock);
+
+ vm_type = (enum vm_type_t *) phys_pg_pack;
+
+ hint_addr = args->map_device.hint_addr;
+ }
+
+ /*
+ * relevant for mapping device physical memory only, as host memory is
+ * implicitly shared
+ */
+ if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
+ phys_pg_pack->asid != ctx->asid) {
+ dev_err(hdev->dev,
+ "Failed to map memory, handle %u is not shared\n",
+ handle);
+ rc = -EPERM;
+ goto shared_err;
+ }
+
+ hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
+ if (!hnode) {
+ rc = -ENOMEM;
+ goto hnode_err;
+ }
+
+ ret_vaddr = get_va_block(hdev,
+ is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
+ phys_pg_pack->total_size, hint_addr, is_userptr);
+ if (!ret_vaddr) {
+ dev_err(hdev->dev, "no available va block for handle %u\n",
+ handle);
+ rc = -ENOMEM;
+ goto va_block_err;
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+
+ rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
+ if (rc) {
+ mutex_unlock(&ctx->mmu_lock);
+ dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
+ handle);
+ goto map_err;
+ }
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ ret_vaddr += phys_pg_pack->offset;
+
+ hnode->ptr = vm_type;
+ hnode->vaddr = ret_vaddr;
+
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ *device_addr = ret_vaddr;
+
+ if (is_userptr)
+ free_phys_pg_pack(hdev, phys_pg_pack);
+
+ return 0;
+
+map_err:
+ if (add_va_block(hdev,
+ is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
+ ret_vaddr,
+ ret_vaddr + phys_pg_pack->total_size - 1))
+ dev_warn(hdev->dev,
+ "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
+ handle, ret_vaddr);
+
+va_block_err:
+ kfree(hnode);
+hnode_err:
+shared_err:
+ atomic_dec(&phys_pg_pack->mapping_cnt);
+ if (is_userptr)
+ free_phys_pg_pack(hdev, phys_pg_pack);
+init_page_pack_err:
+ if (is_userptr)
+ free_userptr(hdev, userptr);
+
+ return rc;
+}
+
+/*
+ * unmap_device_va - unmap the given device virtual address
+ *
+ * @ctx : current context
+ * @vaddr : device virtual address to unmap
+ *
+ * This function does the following:
+ * - Unmap the physical pages related to the given virtual address
+ * - return the device virtual block to the virtual block list
+ */
+static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ struct hl_vm_hash_node *hnode = NULL;
+ struct hl_userptr *userptr = NULL;
+ enum vm_type_t *vm_type;
+ u64 next_vaddr;
+ u32 page_size;
+ bool is_userptr;
+ int i, rc;
+
+ /* protect from double entrance */
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
+ if (vaddr == hnode->vaddr)
+ break;
+
+ if (!hnode) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_err(hdev->dev,
+ "unmap failed, no mem hnode for vaddr 0x%llx\n",
+ vaddr);
+ return -EINVAL;
+ }
+
+ hash_del(&hnode->node);
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ vm_type = hnode->ptr;
+
+ if (*vm_type == VM_TYPE_USERPTR) {
+ is_userptr = true;
+ userptr = hnode->ptr;
+ rc = init_phys_pg_pack_from_userptr(ctx, userptr,
+ &phys_pg_pack);
+ if (rc) {
+ dev_err(hdev->dev,
+ "unable to init page pack for vaddr 0x%llx\n",
+ vaddr);
+ goto vm_type_err;
+ }
+ } else if (*vm_type == VM_TYPE_PHYS_PACK) {
+ is_userptr = false;
+ phys_pg_pack = hnode->ptr;
+ } else {
+ dev_warn(hdev->dev,
+ "unmap failed, unknown vm desc for vaddr 0x%llx\n",
+ vaddr);
+ rc = -EFAULT;
+ goto vm_type_err;
+ }
+
+ if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
+ dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
+ rc = -EINVAL;
+ goto mapping_cnt_err;
+ }
+
+ page_size = phys_pg_pack->page_size;
+ vaddr &= ~(((u64) page_size) - 1);
+
+ next_vaddr = vaddr;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size)
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ if (add_va_block(hdev,
+ is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
+ vaddr,
+ vaddr + phys_pg_pack->total_size - 1))
+ dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
+ vaddr);
+
+ atomic_dec(&phys_pg_pack->mapping_cnt);
+ kfree(hnode);
+
+ if (is_userptr) {
+ free_phys_pg_pack(hdev, phys_pg_pack);
+ free_userptr(hdev, userptr);
+ }
+
+ return 0;
+
+mapping_cnt_err:
+ if (is_userptr)
+ free_phys_pg_pack(hdev, phys_pg_pack);
+vm_type_err:
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_add(ctx->mem_hash, &hnode->node, vaddr);
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ return rc;
+}
+
+int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ union hl_mem_args *args = data;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
+ u64 device_addr = 0;
+ u32 handle = 0;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Device is disabled or in reset. Can't execute memory IOCTL\n");
+ return -EBUSY;
+ }
+
+ if (hdev->mmu_enable) {
+ switch (args->in.op) {
+ case HL_MEM_OP_ALLOC:
+ if (!hdev->dram_supports_virtual_memory) {
+ dev_err(hdev->dev,
+ "DRAM alloc is not supported\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ if (args->in.alloc.mem_size == 0) {
+ dev_err(hdev->dev,
+ "alloc size must be larger than 0\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = alloc_device_memory(ctx, &args->in, &handle);
+
+ memset(args, 0, sizeof(*args));
+ args->out.handle = (__u64) handle;
+ break;
+
+ case HL_MEM_OP_FREE:
+ if (!hdev->dram_supports_virtual_memory) {
+ dev_err(hdev->dev,
+ "DRAM free is not supported\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = free_device_memory(ctx, args->in.free.handle);
+ break;
+
+ case HL_MEM_OP_MAP:
+ rc = map_device_va(ctx, &args->in, &device_addr);
+
+ memset(args, 0, sizeof(*args));
+ args->out.device_virt_addr = device_addr;
+ break;
+
+ case HL_MEM_OP_UNMAP:
+ rc = unmap_device_va(ctx,
+ args->in.unmap.device_virt_addr);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+ rc = -ENOTTY;
+ break;
+ }
+ } else {
+ switch (args->in.op) {
+ case HL_MEM_OP_ALLOC:
+ if (args->in.alloc.mem_size == 0) {
+ dev_err(hdev->dev,
+ "alloc size must be larger than 0\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Force contiguous as there are no real MMU
+ * translations to overcome physical memory gaps
+ */
+ args->in.flags |= HL_MEM_CONTIGUOUS;
+ rc = alloc_device_memory(ctx, &args->in, &handle);
+
+ memset(args, 0, sizeof(*args));
+ args->out.handle = (__u64) handle;
+ break;
+
+ case HL_MEM_OP_FREE:
+ rc = free_device_memory(ctx, args->in.free.handle);
+ break;
+
+ case HL_MEM_OP_MAP:
+ if (args->in.flags & HL_MEM_USERPTR) {
+ device_addr = args->in.map_host.host_virt_addr;
+ rc = 0;
+ } else {
+ rc = get_paddr_from_handle(ctx, &args->in,
+ &device_addr);
+ }
+
+ memset(args, 0, sizeof(*args));
+ args->out.device_virt_addr = device_addr;
+ break;
+
+ case HL_MEM_OP_UNMAP:
+ rc = 0;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
+ rc = -ENOTTY;
+ break;
+ }
+ }
+
+out:
+ return rc;
+}
+
+/*
+ * hl_pin_host_memory - pins a chunk of host memory
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @addr : the user-space virtual address of the memory area
+ * @size : the size of the memory area
+ * @userptr : pointer to hl_userptr structure
+ *
+ * This function does the following:
+ * - Pins the physical pages
+ * - Create a SG list from those pages
+ */
+int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr *userptr)
+{
+ u64 start, end;
+ u32 npages, offset;
+ int rc;
+
+ if (!size) {
+ dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
+ return -EINVAL;
+ }
+
+ if (!access_ok((void __user *) (uintptr_t) addr, size)) {
+ dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
+ return -EFAULT;
+ }
+
+ /*
+ * If the combination of the address and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if (((addr + size) < addr) ||
+ PAGE_ALIGN(addr + size) < (addr + size)) {
+ dev_err(hdev->dev,
+ "user pointer 0x%llx + %llu causes integer overflow\n",
+ addr, size);
+ return -EINVAL;
+ }
+
+ start = addr & PAGE_MASK;
+ offset = addr & ~PAGE_MASK;
+ end = PAGE_ALIGN(addr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+
+ userptr->size = size;
+ userptr->addr = addr;
+ userptr->dma_mapped = false;
+ INIT_LIST_HEAD(&userptr->job_node);
+
+ userptr->vec = frame_vector_create(npages);
+ if (!userptr->vec) {
+ dev_err(hdev->dev, "Failed to create frame vector\n");
+ return -ENOMEM;
+ }
+
+ rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ userptr->vec);
+
+ if (rc != npages) {
+ dev_err(hdev->dev,
+ "Failed to map host memory, user ptr probably wrong\n");
+ if (rc < 0)
+ goto destroy_framevec;
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ if (frame_vector_to_pages(userptr->vec) < 0) {
+ dev_err(hdev->dev,
+ "Failed to translate frame vector to pages\n");
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+ if (!userptr->sgt) {
+ rc = -ENOMEM;
+ goto put_framevec;
+ }
+
+ rc = sg_alloc_table_from_pages(userptr->sgt,
+ frame_vector_pages(userptr->vec),
+ npages, offset, size, GFP_ATOMIC);
+ if (rc < 0) {
+ dev_err(hdev->dev, "failed to create SG table from pages\n");
+ goto free_sgt;
+ }
+
+ hl_debugfs_add_userptr(hdev, userptr);
+
+ return 0;
+
+free_sgt:
+ kfree(userptr->sgt);
+put_framevec:
+ put_vaddr_frames(userptr->vec);
+destroy_framevec:
+ frame_vector_destroy(userptr->vec);
+ return rc;
+}
+
+/*
+ * hl_unpin_host_memory - unpins a chunk of host memory
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @userptr : pointer to hl_userptr structure
+ *
+ * This function does the following:
+ * - Unpins the physical pages related to the host memory
+ * - Free the SG list
+ */
+int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
+{
+ struct page **pages;
+
+ hl_debugfs_remove_userptr(hdev, userptr);
+
+ if (userptr->dma_mapped)
+ hdev->asic_funcs->hl_dma_unmap_sg(hdev,
+ userptr->sgt->sgl,
+ userptr->sgt->nents,
+ userptr->dir);
+
+ pages = frame_vector_pages(userptr->vec);
+ if (!IS_ERR(pages)) {
+ int i;
+
+ for (i = 0; i < frame_vector_count(userptr->vec); i++)
+ set_page_dirty_lock(pages[i]);
+ }
+ put_vaddr_frames(userptr->vec);
+ frame_vector_destroy(userptr->vec);
+
+ list_del(&userptr->job_node);
+
+ sg_free_table(userptr->sgt);
+ kfree(userptr->sgt);
+
+ return 0;
+}
+
+/*
+ * hl_userptr_delete_list - clear userptr list
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @userptr_list : pointer to the list to clear
+ *
+ * This function does the following:
+ * - Iterates over the list and unpins the host memory and frees the userptr
+ * structure.
+ */
+void hl_userptr_delete_list(struct hl_device *hdev,
+ struct list_head *userptr_list)
+{
+ struct hl_userptr *userptr, *tmp;
+
+ list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
+ hl_unpin_host_memory(hdev, userptr);
+ kfree(userptr);
+ }
+
+ INIT_LIST_HEAD(userptr_list);
+}
+
+/*
+ * hl_userptr_is_pinned - returns whether the given userptr is pinned
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @userptr_list : pointer to the list to clear
+ * @userptr : pointer to userptr to check
+ *
+ * This function does the following:
+ * - Iterates over the list and checks if the given userptr is in it, means is
+ * pinned. If so, returns true, otherwise returns false.
+ */
+bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
+ u32 size, struct list_head *userptr_list,
+ struct hl_userptr **userptr)
+{
+ list_for_each_entry((*userptr), userptr_list, job_node) {
+ if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * hl_va_range_init - initialize virtual addresses range
+ *
+ * @hdev : pointer to the habanalabs device structure
+ * @va_range : pointer to the range to initialize
+ * @start : range start address
+ * @end : range end address
+ *
+ * This function does the following:
+ * - Initializes the virtual addresses list of the given range with the given
+ * addresses.
+ */
+static int hl_va_range_init(struct hl_device *hdev,
+ struct hl_va_range *va_range, u64 start, u64 end)
+{
+ int rc;
+
+ INIT_LIST_HEAD(&va_range->list);
+
+ /* PAGE_SIZE alignment */
+
+ if (start & (PAGE_SIZE - 1)) {
+ start &= PAGE_MASK;
+ start += PAGE_SIZE;
+ }
+
+ if (end & (PAGE_SIZE - 1))
+ end &= PAGE_MASK;
+
+ if (start >= end) {
+ dev_err(hdev->dev, "too small vm range for va list\n");
+ return -EFAULT;
+ }
+
+ rc = add_va_block(hdev, va_range, start, end);
+
+ if (rc) {
+ dev_err(hdev->dev, "Failed to init host va list\n");
+ return rc;
+ }
+
+ va_range->start_addr = start;
+ va_range->end_addr = end;
+
+ return 0;
+}
+
+/*
+ * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
+ *
+ * @ctx : pointer to the habanalabs context structure
+ * @host_range_start : host virtual addresses range start
+ * @host_range_end : host virtual addresses range end
+ * @dram_range_start : dram virtual addresses range start
+ * @dram_range_end : dram virtual addresses range end
+ *
+ * This function initializes the following:
+ * - MMU for context
+ * - Virtual address to area descriptor hashtable
+ * - Virtual block list of available virtual memory
+ */
+static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
+ u64 host_range_end, u64 dram_range_start,
+ u64 dram_range_end)
+{
+ struct hl_device *hdev = ctx->hdev;
+ int rc;
+
+ rc = hl_mmu_ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
+ return rc;
+ }
+
+ mutex_init(&ctx->mem_hash_lock);
+ hash_init(ctx->mem_hash);
+
+ mutex_init(&ctx->host_va_range.lock);
+
+ rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start,
+ host_range_end);
+ if (rc) {
+ dev_err(hdev->dev, "failed to init host vm range\n");
+ goto host_vm_err;
+ }
+
+ mutex_init(&ctx->dram_va_range.lock);
+
+ rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start,
+ dram_range_end);
+ if (rc) {
+ dev_err(hdev->dev, "failed to init dram vm range\n");
+ goto dram_vm_err;
+ }
+
+ hl_debugfs_add_ctx_mem_hash(hdev, ctx);
+
+ return 0;
+
+dram_vm_err:
+ mutex_destroy(&ctx->dram_va_range.lock);
+
+ mutex_lock(&ctx->host_va_range.lock);
+ clear_va_list_locked(hdev, &ctx->host_va_range.list);
+ mutex_unlock(&ctx->host_va_range.lock);
+host_vm_err:
+ mutex_destroy(&ctx->host_va_range.lock);
+ mutex_destroy(&ctx->mem_hash_lock);
+ hl_mmu_ctx_fini(ctx);
+
+ return rc;
+}
+
+int hl_vm_ctx_init(struct hl_ctx *ctx)
+{
+ struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
+ u64 host_range_start, host_range_end, dram_range_start,
+ dram_range_end;
+
+ atomic64_set(&ctx->dram_phys_mem, 0);
+
+ /*
+ * - If MMU is enabled, init the ranges as usual.
+ * - If MMU is disabled, in case of host mapping, the returned address
+ * is the given one.
+ * In case of DRAM mapping, the returned address is the physical
+ * address of the memory related to the given handle.
+ */
+ if (ctx->hdev->mmu_enable) {
+ dram_range_start = prop->va_space_dram_start_address;
+ dram_range_end = prop->va_space_dram_end_address;
+ host_range_start = prop->va_space_host_start_address;
+ host_range_end = prop->va_space_host_end_address;
+ } else {
+ dram_range_start = prop->dram_user_base_address;
+ dram_range_end = prop->dram_end_address;
+ host_range_start = prop->dram_user_base_address;
+ host_range_end = prop->dram_end_address;
+ }
+
+ return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
+ dram_range_start, dram_range_end);
+}
+
+/*
+ * hl_va_range_fini - clear a virtual addresses range
+ *
+ * @hdev : pointer to the habanalabs structure
+ * va_range : pointer to virtual addresses range
+ *
+ * This function initializes the following:
+ * - Checks that the given range contains the whole initial range
+ * - Frees the virtual addresses block list and its lock
+ */
+static void hl_va_range_fini(struct hl_device *hdev,
+ struct hl_va_range *va_range)
+{
+ struct hl_vm_va_block *va_block;
+
+ if (list_empty(&va_range->list)) {
+ dev_warn(hdev->dev,
+ "va list should not be empty on cleanup!\n");
+ goto out;
+ }
+
+ if (!list_is_singular(&va_range->list)) {
+ dev_warn(hdev->dev,
+ "va list should not contain multiple blocks on cleanup!\n");
+ goto free_va_list;
+ }
+
+ va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
+
+ if (va_block->start != va_range->start_addr ||
+ va_block->end != va_range->end_addr) {
+ dev_warn(hdev->dev,
+ "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
+ va_block->start, va_block->end);
+ goto free_va_list;
+ }
+
+free_va_list:
+ mutex_lock(&va_range->lock);
+ clear_va_list_locked(hdev, &va_range->list);
+ mutex_unlock(&va_range->lock);
+
+out:
+ mutex_destroy(&va_range->lock);
+}
+
+/*
+ * hl_vm_ctx_fini - virtual memory teardown of context
+ *
+ * @ctx : pointer to the habanalabs context structure
+ *
+ * This function perform teardown the following:
+ * - Virtual block list of available virtual memory
+ * - Virtual address to area descriptor hashtable
+ * - MMU for context
+ *
+ * In addition this function does the following:
+ * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
+ * hashtable should be empty as no valid mappings should exist at this
+ * point.
+ * - Frees any existing physical page list from the idr which relates to the
+ * current context asid.
+ * - This function checks the virtual block list for correctness. At this point
+ * the list should contain one element which describes the whole virtual
+ * memory range of the context. Otherwise, a warning is printed.
+ */
+void hl_vm_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+ struct hl_vm_phys_pg_pack *phys_pg_list;
+ struct hl_vm_hash_node *hnode;
+ struct hlist_node *tmp_node;
+ int i;
+
+ hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
+
+ if (!hash_empty(ctx->mem_hash))
+ dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
+
+ hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
+ dev_dbg(hdev->dev,
+ "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
+ hnode->vaddr, ctx->asid);
+ unmap_device_va(ctx, hnode->vaddr);
+ }
+
+ spin_lock(&vm->idr_lock);
+ idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
+ if (phys_pg_list->asid == ctx->asid) {
+ dev_dbg(hdev->dev,
+ "page list 0x%p of asid %d is still alive\n",
+ phys_pg_list, ctx->asid);
+ free_phys_pg_pack(hdev, phys_pg_list);
+ idr_remove(&vm->phys_pg_pack_handles, i);
+ }
+ spin_unlock(&vm->idr_lock);
+
+ hl_va_range_fini(hdev, &ctx->dram_va_range);
+ hl_va_range_fini(hdev, &ctx->host_va_range);
+
+ mutex_destroy(&ctx->mem_hash_lock);
+ hl_mmu_ctx_fini(ctx);
+}
+
+/*
+ * hl_vm_init - initialize virtual memory module
+ *
+ * @hdev : pointer to the habanalabs device structure
+ *
+ * This function initializes the following:
+ * - MMU module
+ * - DRAM physical pages pool of 2MB
+ * - Idr for device memory allocation handles
+ */
+int hl_vm_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_vm *vm = &hdev->vm;
+ int rc;
+
+ rc = hl_mmu_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to init MMU\n");
+ return rc;
+ }
+
+ vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
+ if (!vm->dram_pg_pool) {
+ dev_err(hdev->dev, "Failed to create dram page pool\n");
+ rc = -ENOMEM;
+ goto pool_create_err;
+ }
+
+ kref_init(&vm->dram_pg_pool_refcount);
+
+ rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
+ prop->dram_end_address - prop->dram_user_base_address,
+ -1);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to dram page pool %d\n", rc);
+ goto pool_add_err;
+ }
+
+ spin_lock_init(&vm->idr_lock);
+ idr_init(&vm->phys_pg_pack_handles);
+
+ atomic64_set(&hdev->dram_used_mem, 0);
+
+ vm->init_done = true;
+
+ return 0;
+
+pool_add_err:
+ gen_pool_destroy(vm->dram_pg_pool);
+pool_create_err:
+ hl_mmu_fini(hdev);
+
+ return rc;
+}
+
+/*
+ * hl_vm_fini - virtual memory module teardown
+ *
+ * @hdev : pointer to the habanalabs device structure
+ *
+ * This function perform teardown to the following:
+ * - Idr for device memory allocation handles
+ * - DRAM physical pages pool of 2MB
+ * - MMU module
+ */
+void hl_vm_fini(struct hl_device *hdev)
+{
+ struct hl_vm *vm = &hdev->vm;
+
+ if (!vm->init_done)
+ return;
+
+ /*
+ * At this point all the contexts should be freed and hence no DRAM
+ * memory should be in use. Hence the DRAM pool should be freed here.
+ */
+ if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
+ dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
+ __func__);
+
+ hl_mmu_fini(hdev);
+
+ vm->init_done = false;
+}
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
new file mode 100644
index 000000000000..2f2e99cb2743
--- /dev/null
+++ b/drivers/misc/habanalabs/mmu.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+
+static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
+{
+ struct pgt_info *pgt_info = NULL;
+
+ hash_for_each_possible(ctx->mmu_hash, pgt_info, node,
+ (unsigned long) addr)
+ if (addr == pgt_info->addr)
+ break;
+
+ return pgt_info;
+}
+
+static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+
+ gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+ hash_del(&pgt_info->node);
+
+ kfree(pgt_info);
+}
+
+static u64 alloc_hop(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ u64 addr;
+
+ pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
+ if (!pgt_info)
+ return ULLONG_MAX;
+
+ addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
+ hdev->asic_prop.mmu_hop_table_size);
+ if (!addr) {
+ dev_err(hdev->dev, "failed to allocate page\n");
+ kfree(pgt_info);
+ return ULLONG_MAX;
+ }
+
+ pgt_info->addr = addr;
+ pgt_info->ctx = ctx;
+ pgt_info->num_of_ptes = 0;
+ hash_add(ctx->mmu_hash, &pgt_info->node, addr);
+
+ return addr;
+}
+
+static inline void clear_pte(struct hl_device *hdev, u64 pte_addr)
+{
+ /* clear the last and present bits */
+ hdev->asic_funcs->write_pte(hdev, pte_addr, 0);
+}
+
+static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ get_pgt_info(ctx, hop_addr)->num_of_ptes++;
+}
+
+/*
+ * put_pte - decrement the num of ptes and free the hop if possible
+ *
+ * @ctx: pointer to the context structure
+ * @hop_addr: addr of the hop
+ *
+ * This function returns the number of ptes left on this hop. If the number is
+ * 0, it means the pte was freed.
+ */
+static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+ int num_of_ptes_left;
+
+ pgt_info->num_of_ptes--;
+
+ /*
+ * Need to save the number of ptes left because free_hop might free
+ * the pgt_info
+ */
+ num_of_ptes_left = pgt_info->num_of_ptes;
+ if (!num_of_ptes_left)
+ free_hop(ctx, hop_addr);
+
+ return num_of_ptes_left;
+}
+
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & mask) >> shift);
+}
+
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
+}
+
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
+}
+
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
+}
+
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
+}
+
+static inline u64 get_next_hop_addr(u64 curr_pte)
+{
+ if (curr_pte & PAGE_PRESENT_MASK)
+ return curr_pte & PHYS_ADDR_MASK;
+ else
+ return ULLONG_MAX;
+}
+
+static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
+ bool *is_new_hop)
+{
+ u64 hop_addr = get_next_hop_addr(curr_pte);
+
+ if (hop_addr == ULLONG_MAX) {
+ hop_addr = alloc_hop(ctx);
+ *is_new_hop = (hop_addr != ULLONG_MAX);
+ }
+
+ return hop_addr;
+}
+
+/*
+ * hl_mmu_init - init the mmu module
+ *
+ * @hdev: pointer to the habanalabs device structure
+ *
+ * This function does the following:
+ * - Allocate max_asid zeroed hop0 pgts so no mapping is available
+ * - Enable mmu in hw
+ * - Invalidate the mmu cache
+ * - Create a pool of pages for pgts
+ * - Returns 0 on success
+ *
+ * This function depends on DMA QMAN to be working!
+ */
+int hl_mmu_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ /* MMU HW init was already done in device hw_init() */
+
+ mutex_init(&hdev->mmu_cache_lock);
+
+ hdev->mmu_pgt_pool =
+ gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
+
+ if (!hdev->mmu_pgt_pool) {
+ dev_err(hdev->dev, "Failed to create page gen pool\n");
+ rc = -ENOMEM;
+ goto err_pool_create;
+ }
+
+ rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
+ prop->mmu_hop0_tables_total_size,
+ prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
+ -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
+ goto err_pool_add;
+ }
+
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(hdev->mmu_pgt_pool);
+err_pool_create:
+ mutex_destroy(&hdev->mmu_cache_lock);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_fini - release the mmu module.
+ *
+ * @hdev: pointer to the habanalabs device structure
+ *
+ * This function does the following:
+ * - Disable mmu in hw
+ * - free the pgts pool
+ *
+ * All ctxs should be freed before calling this func
+ */
+void hl_mmu_fini(struct hl_device *hdev)
+{
+ if (!hdev->mmu_enable)
+ return;
+
+ gen_pool_destroy(hdev->mmu_pgt_pool);
+
+ mutex_destroy(&hdev->mmu_cache_lock);
+
+ /* MMU HW fini will be done in device hw_fini() */
+}
+
+/**
+ * hl_mmu_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context and an optional DRAM default page
+ * mapping.
+ * Return: 0 on success, non-zero otherwise.
+ */
+int hl_mmu_ctx_init(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
+ hop3_pte_addr, pte_val;
+ int rc, i, j, hop3_allocated = 0;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ mutex_init(&ctx->mmu_lock);
+ hash_init(ctx->mmu_hash);
+
+ if (!hdev->dram_supports_virtual_memory ||
+ !hdev->dram_default_page_mapping)
+ return 0;
+
+ num_of_hop3 = prop->dram_size_for_default_page_mapping;
+ do_div(num_of_hop3, prop->dram_page_size);
+ do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+ /* add hop1 and hop2 */
+ total_hops = num_of_hop3 + 2;
+
+ ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
+ if (!ctx->dram_default_hops) {
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+
+ hop1_addr = alloc_hop(ctx);
+ if (hop1_addr == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 1\n");
+ rc = -ENOMEM;
+ goto hop1_err;
+ }
+
+ ctx->dram_default_hops[total_hops - 1] = hop1_addr;
+
+ hop2_addr = alloc_hop(ctx);
+ if (hop2_addr == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 2\n");
+ rc = -ENOMEM;
+ goto hop2_err;
+ }
+
+ ctx->dram_default_hops[total_hops - 2] = hop2_addr;
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ ctx->dram_default_hops[i] = alloc_hop(ctx);
+ if (ctx->dram_default_hops[i] == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
+ rc = -ENOMEM;
+ goto hop3_err;
+ }
+ hop3_allocated++;
+ }
+
+ /* need only pte 0 in hops 0 and 1 */
+ pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val);
+
+ pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val);
+ get_pte(ctx, hop1_addr);
+
+ hop2_pte_addr = hop2_addr;
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val);
+ get_pte(ctx, hop2_addr);
+ hop2_pte_addr += HL_PTE_SIZE;
+ }
+
+ pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
+ LAST_MASK | PAGE_PRESENT_MASK;
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ hop3_pte_addr = ctx->dram_default_hops[i];
+ for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ hdev->asic_funcs->write_pte(hdev, hop3_pte_addr,
+ pte_val);
+ get_pte(ctx, ctx->dram_default_hops[i]);
+ hop3_pte_addr += HL_PTE_SIZE;
+ }
+ }
+
+ /* flush all writes to reach PCI */
+ mb();
+ hdev->asic_funcs->read_pte(hdev, hop2_addr);
+
+ return 0;
+
+hop3_err:
+ for (i = 0 ; i < hop3_allocated ; i++)
+ free_hop(ctx, ctx->dram_default_hops[i]);
+ free_hop(ctx, hop2_addr);
+hop2_err:
+ free_hop(ctx, hop1_addr);
+hop1_err:
+ kfree(ctx->dram_default_hops);
+alloc_err:
+ mutex_destroy(&ctx->mmu_lock);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_ctx_fini - disable a ctx from using the mmu module
+ *
+ * @ctx: pointer to the context structure
+ *
+ * This function does the following:
+ * - Free any pgts which were not freed yet
+ * - Free the mutex
+ * - Free DRAM default page mapping hops
+ */
+void hl_mmu_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pgt_info *pgt_info;
+ struct hlist_node *tmp;
+ u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr,
+ hop3_pte_addr;
+ int i, j;
+
+ if (!ctx->hdev->mmu_enable)
+ return;
+
+ if (hdev->dram_supports_virtual_memory &&
+ hdev->dram_default_page_mapping) {
+
+ num_of_hop3 = prop->dram_size_for_default_page_mapping;
+ do_div(num_of_hop3, prop->dram_page_size);
+ do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+ /* add hop1 and hop2 */
+ total_hops = num_of_hop3 + 2;
+ hop1_addr = ctx->dram_default_hops[total_hops - 1];
+ hop2_addr = ctx->dram_default_hops[total_hops - 2];
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ hop3_pte_addr = ctx->dram_default_hops[i];
+ for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ clear_pte(hdev, hop3_pte_addr);
+ put_pte(ctx, ctx->dram_default_hops[i]);
+ hop3_pte_addr += HL_PTE_SIZE;
+ }
+ }
+
+ hop2_pte_addr = hop2_addr;
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ clear_pte(hdev, hop2_pte_addr);
+ put_pte(ctx, hop2_addr);
+ hop2_pte_addr += HL_PTE_SIZE;
+ }
+
+ clear_pte(hdev, hop1_addr);
+ put_pte(ctx, hop1_addr);
+ clear_pte(hdev, get_hop0_addr(ctx));
+
+ kfree(ctx->dram_default_hops);
+
+ /* flush all writes to reach PCI */
+ mb();
+ hdev->asic_funcs->read_pte(hdev, hop2_addr);
+ }
+
+ if (!hash_empty(ctx->mmu_hash))
+ dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
+
+ hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) {
+ dev_err(hdev->dev,
+ "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
+ pgt_info->addr, ctx->asid, pgt_info->num_of_ptes);
+ free_hop(ctx, pgt_info->addr);
+ }
+
+ mutex_destroy(&ctx->mmu_lock);
+}
+
+static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 hop0_addr = 0, hop0_pte_addr = 0,
+ hop1_addr = 0, hop1_pte_addr = 0,
+ hop2_addr = 0, hop2_pte_addr = 0,
+ hop3_addr = 0, hop3_pte_addr = 0,
+ hop4_addr = 0, hop4_pte_addr = 0,
+ curr_pte;
+ int clear_hop3 = 1;
+ bool is_dram_addr, is_huge, is_dram_default_page_mapping;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ hop0_addr = get_hop0_addr(ctx);
+
+ hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+
+ hop1_addr = get_next_hop_addr(curr_pte);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+
+ hop2_addr = get_next_hop_addr(curr_pte);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+
+ hop3_addr = get_next_hop_addr(curr_pte);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+
+ is_huge = curr_pte & LAST_MASK;
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev,
+ "DRAM unmapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ is_dram_default_page_mapping =
+ hdev->dram_default_page_mapping && is_dram_addr;
+
+ if (!is_huge) {
+ hop4_addr = get_next_hop_addr(curr_pte);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+
+ clear_hop3 = 0;
+ }
+
+ if (is_dram_default_page_mapping) {
+ u64 zero_pte = (prop->mmu_dram_default_page_addr &
+ PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ PAGE_PRESENT_MASK;
+ if (curr_pte == zero_pte) {
+ dev_err(hdev->dev,
+ "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
+ virt_addr);
+ goto not_mapped;
+ }
+
+ if (!(curr_pte & PAGE_PRESENT_MASK)) {
+ dev_err(hdev->dev,
+ "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
+ virt_addr);
+ goto not_mapped;
+ }
+
+ hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte);
+ put_pte(ctx, hop3_addr);
+ } else {
+ if (!(curr_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+
+ if (hop4_addr && !put_pte(ctx, hop4_addr))
+ clear_hop3 = 1;
+
+ if (!clear_hop3)
+ goto flush;
+ clear_pte(hdev, hop3_pte_addr);
+
+ if (put_pte(ctx, hop3_addr))
+ goto flush;
+ clear_pte(hdev, hop2_pte_addr);
+
+ if (put_pte(ctx, hop2_addr))
+ goto flush;
+ clear_pte(hdev, hop1_pte_addr);
+
+ if (put_pte(ctx, hop1_addr))
+ goto flush;
+ clear_pte(hdev, hop0_pte_addr);
+ }
+
+flush:
+ /* flush all writes from all cores to reach PCI */
+ mb();
+
+ hdev->asic_funcs->read_pte(hdev,
+ hop4_addr ? hop4_pte_addr : hop3_pte_addr);
+
+ return 0;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+
+ return -EINVAL;
+}
+
+/*
+ * hl_mmu_unmap - unmaps a virtual addr
+ *
+ * @ctx: pointer to the context structure
+ * @virt_addr: virt addr to map from
+ * @page_size: size of the page to unmap
+ *
+ * This function does the following:
+ * - Check that the virt addr is mapped
+ * - Unmap the virt addr and frees pgts if possible
+ * - Returns 0 on success, -EINVAL if the given addr is not mapped
+ *
+ * Because this function changes the page tables in the device and because it
+ * changes the MMU hash, it must be protected by a lock.
+ * However, because it maps only a single page, the lock should be implemented
+ * in a higher level in order to protect the entire mapping of the memory area
+ */
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 real_virt_addr;
+ u32 real_page_size, npages;
+ int i, rc;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ /*
+ * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
+ * is bigger, we break it to sub-pages and unmap them separately.
+ */
+ if ((page_size % PAGE_SIZE_2MB) == 0) {
+ real_page_size = PAGE_SIZE_2MB;
+ } else if ((page_size % PAGE_SIZE_4KB) == 0) {
+ real_page_size = PAGE_SIZE_4KB;
+ } else {
+ dev_err(hdev->dev,
+ "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
+ page_size);
+
+ return -EFAULT;
+ }
+
+ npages = page_size / real_page_size;
+ real_virt_addr = virt_addr;
+
+ for (i = 0 ; i < npages ; i++) {
+ rc = _hl_mmu_unmap(ctx, real_virt_addr);
+ if (rc)
+ return rc;
+
+ real_virt_addr += real_page_size;
+ }
+
+ return 0;
+}
+
+static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+ u32 page_size)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 hop0_addr = 0, hop0_pte_addr = 0,
+ hop1_addr = 0, hop1_pte_addr = 0,
+ hop2_addr = 0, hop2_pte_addr = 0,
+ hop3_addr = 0, hop3_pte_addr = 0,
+ hop4_addr = 0, hop4_pte_addr = 0,
+ curr_pte = 0;
+ bool hop1_new = false, hop2_new = false, hop3_new = false,
+ hop4_new = false, is_huge, is_dram_addr,
+ is_dram_default_page_mapping;
+ int rc = -ENOMEM;
+
+ /*
+ * This mapping function can map a 4KB/2MB page. For 2MB page there are
+ * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
+ * pages only but user memory could have been allocated with one of the
+ * two page sizes. Since this is a common code for all the three cases,
+ * we need this hugs page check.
+ */
+ is_huge = page_size == PAGE_SIZE_2MB;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ is_dram_default_page_mapping =
+ hdev->dram_default_page_mapping && is_dram_addr;
+
+ hop0_addr = get_hop0_addr(ctx);
+
+ hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
+
+ hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto err;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
+
+ hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto err;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
+
+ hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto err;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
+
+ if (!is_huge) {
+ hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto err;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+
+ curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
+ }
+
+ if (is_dram_default_page_mapping) {
+ u64 zero_pte = (prop->mmu_dram_default_page_addr &
+ PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ PAGE_PRESENT_MASK;
+
+ if (curr_pte != zero_pte) {
+ dev_err(hdev->dev,
+ "DRAM: mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (hop1_new || hop2_new || hop3_new || hop4_new) {
+ dev_err(hdev->dev,
+ "DRAM mapping should not allocate more hops\n");
+ rc = -EFAULT;
+ goto err;
+ }
+ } else if (curr_pte & PAGE_PRESENT_MASK) {
+ dev_err(hdev->dev,
+ "mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+
+ dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop0_pte_addr),
+ hop0_pte_addr);
+ dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop1_pte_addr),
+ hop1_pte_addr);
+ dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop2_pte_addr),
+ hop2_pte_addr);
+ dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev, hop3_pte_addr),
+ hop3_pte_addr);
+
+ if (!is_huge)
+ dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
+ hdev->asic_funcs->read_pte(hdev,
+ hop4_pte_addr),
+ hop4_pte_addr);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
+ | PAGE_PRESENT_MASK;
+
+ hdev->asic_funcs->write_pte(hdev,
+ is_huge ? hop3_pte_addr : hop4_pte_addr,
+ curr_pte);
+
+ if (hop1_new) {
+ curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr,
+ curr_pte);
+ }
+ if (hop2_new) {
+ curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr,
+ curr_pte);
+ get_pte(ctx, hop1_addr);
+ }
+ if (hop3_new) {
+ curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr,
+ curr_pte);
+ get_pte(ctx, hop2_addr);
+ }
+
+ if (!is_huge) {
+ if (hop4_new) {
+ curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ hop3_pte_addr, curr_pte);
+ get_pte(ctx, hop3_addr);
+ }
+
+ get_pte(ctx, hop4_addr);
+ } else {
+ get_pte(ctx, hop3_addr);
+ }
+
+ /* flush all writes from all cores to reach PCI */
+ mb();
+
+ hdev->asic_funcs->read_pte(hdev,
+ is_huge ? hop3_pte_addr : hop4_pte_addr);
+
+ return 0;
+
+err:
+ if (hop4_new)
+ free_hop(ctx, hop4_addr);
+ if (hop3_new)
+ free_hop(ctx, hop3_addr);
+ if (hop2_new)
+ free_hop(ctx, hop2_addr);
+ if (hop1_new)
+ free_hop(ctx, hop1_addr);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_map - maps a virtual addr to physical addr
+ *
+ * @ctx: pointer to the context structure
+ * @virt_addr: virt addr to map from
+ * @phys_addr: phys addr to map to
+ * @page_size: physical page size
+ *
+ * This function does the following:
+ * - Check that the virt addr is not mapped
+ * - Allocate pgts as necessary in order to map the virt addr to the phys
+ * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
+ *
+ * Because this function changes the page tables in the device and because it
+ * changes the MMU hash, it must be protected by a lock.
+ * However, because it maps only a single page, the lock should be implemented
+ * in a higher level in order to protect the entire mapping of the memory area
+ */
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 real_virt_addr;
+ u32 real_page_size, npages;
+ int i, rc, mapped_cnt = 0;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ /*
+ * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
+ * is bigger, we break it to sub-pages and map them separately.
+ */
+ if ((page_size % PAGE_SIZE_2MB) == 0) {
+ real_page_size = PAGE_SIZE_2MB;
+ } else if ((page_size % PAGE_SIZE_4KB) == 0) {
+ real_page_size = PAGE_SIZE_4KB;
+ } else {
+ dev_err(hdev->dev,
+ "page size of %u is not 4KB nor 2MB aligned, can't map\n",
+ page_size);
+
+ return -EFAULT;
+ }
+
+ npages = page_size / real_page_size;
+ real_virt_addr = virt_addr;
+
+ for (i = 0 ; i < npages ; i++) {
+ rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+ real_page_size);
+ if (rc)
+ goto err;
+
+ real_virt_addr += real_page_size;
+ mapped_cnt++;
+ }
+
+ return 0;
+
+err:
+ real_virt_addr = virt_addr;
+ for (i = 0 ; i < mapped_cnt ; i++) {
+ if (_hl_mmu_unmap(ctx, real_virt_addr))
+ dev_warn_ratelimited(hdev->dev,
+ "failed to unmap va: 0x%llx\n", real_virt_addr);
+
+ real_virt_addr += real_page_size;
+ }
+
+ return rc;
+}
+
+/*
+ * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+void hl_mmu_swap_out(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+void hl_mmu_swap_in(struct hl_ctx *ctx)
+{
+
+}
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
new file mode 100644
index 000000000000..c900ab15cceb
--- /dev/null
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#include <linux/pci.h>
+
+#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
+#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
+
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ if (curr)
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ else
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.pll_index = __cpu_to_le32(pll_index);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_CLK_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get frequency of PLL %d, error %d\n",
+ pll_index, rc);
+ result = rc;
+ }
+
+ return result;
+}
+
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.pll_index = __cpu_to_le32(pll_index);
+ pkt.value = __cpu_to_le64(freq);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_CLK_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set frequency to PLL %d, error %d\n",
+ pll_index, rc);
+}
+
+u64 hl_get_max_power(struct hl_device *hdev)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_PWR_PKT_TIMEOUT, &result);
+
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
+ result = rc;
+ }
+
+ return result;
+}
+
+void hl_set_max_power(struct hl_device *hdev, u64 value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SET_PWR_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
+}
+
+static ssize_t pm_mng_profile_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n",
+ (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
+ (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
+ "unknown");
+}
+
+static ssize_t pm_mng_profile_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ mutex_lock(&hdev->fd_open_cnt_lock);
+
+ if (atomic_read(&hdev->fd_open_cnt) > 0) {
+ dev_err(hdev->dev,
+ "Can't change PM profile while user process is opened on the device\n");
+ count = -EPERM;
+ goto unlock_mutex;
+ }
+
+ if (strncmp("auto", buf, strlen("auto")) == 0) {
+ /* Make sure we are in LOW PLL when changing modes */
+ if (hdev->pm_mng_profile == PM_MANUAL) {
+ atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
+ hl_device_set_frequency(hdev, PLL_LOW);
+ hdev->pm_mng_profile = PM_AUTO;
+ }
+ } else if (strncmp("manual", buf, strlen("manual")) == 0) {
+ /* Make sure we are in LOW PLL when changing modes */
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ flush_delayed_work(&hdev->work_freq);
+ hdev->pm_mng_profile = PM_MANUAL;
+ }
+ } else {
+ dev_err(hdev->dev, "value should be auto or manual\n");
+ count = -EINVAL;
+ goto unlock_mutex;
+ }
+
+unlock_mutex:
+ mutex_unlock(&hdev->fd_open_cnt_lock);
+out:
+ return count;
+}
+
+static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%u\n", hdev->high_pll);
+}
+
+static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hdev->high_pll = value;
+
+out:
+ return count;
+}
+
+static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.uboot_ver);
+}
+
+static ssize_t armcp_kernel_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s", hdev->asic_prop.armcp_info.kernel_version);
+}
+
+static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.armcp_version);
+}
+
+static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%08x\n",
+ hdev->asic_prop.armcp_info.cpld_version);
+}
+
+static ssize_t infineon_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%04x\n",
+ hdev->asic_prop.armcp_info.infineon_version);
+}
+
+static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.fuse_version);
+}
+
+static ssize_t thermal_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s", hdev->asic_prop.armcp_info.thermal_version);
+}
+
+static ssize_t preboot_btl_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.preboot_ver);
+}
+
+static ssize_t soft_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hl_device_reset(hdev, false, false);
+
+out:
+ return count;
+}
+
+static ssize_t hard_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hl_device_reset(hdev, true, false);
+
+out:
+ return count;
+}
+
+static ssize_t device_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ char *str;
+
+ switch (hdev->asic_type) {
+ case ASIC_GOYA:
+ str = "GOYA";
+ break;
+ default:
+ dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
+ hdev->asic_type);
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static ssize_t pci_addr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ /* Use dummy, fixed address for simulator */
+ if (!hdev->pdev)
+ return sprintf(buf, "0000:%02d:00.0\n", hdev->id);
+
+ return sprintf(buf, "%04x:%02x:%02x.%x\n",
+ pci_domain_nr(hdev->pdev->bus),
+ hdev->pdev->bus->number,
+ PCI_SLOT(hdev->pdev->devfn),
+ PCI_FUNC(hdev->pdev->devfn));
+}
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ char *str;
+
+ if (atomic_read(&hdev->in_reset))
+ str = "In reset";
+ else if (hdev->disabled)
+ str = "Malfunction";
+ else
+ str = "Operational";
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static ssize_t write_open_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->user_ctx ? 1 : 0);
+}
+
+static ssize_t soft_reset_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->soft_reset_cnt);
+}
+
+static ssize_t hard_reset_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->hard_reset_cnt);
+}
+
+static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long val;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ val = hl_get_max_power(hdev);
+
+ return sprintf(buf, "%lu\n", val);
+}
+
+static ssize_t max_power_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ unsigned long value;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hdev->max_power = value;
+ hl_set_max_power(hdev, value);
+
+out:
+ return count;
+}
+
+static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset,
+ size_t max_size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ char *data;
+ int rc;
+
+ if (!max_size)
+ return -EINVAL;
+
+ data = kzalloc(max_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rc = hdev->asic_funcs->get_eeprom_data(hdev, data, max_size);
+ if (rc)
+ goto out;
+
+ memcpy(buf, data, max_size);
+
+out:
+ kfree(data);
+
+ return max_size;
+}
+
+static DEVICE_ATTR_RO(armcp_kernel_ver);
+static DEVICE_ATTR_RO(armcp_ver);
+static DEVICE_ATTR_RO(cpld_ver);
+static DEVICE_ATTR_RO(device_type);
+static DEVICE_ATTR_RO(fuse_ver);
+static DEVICE_ATTR_WO(hard_reset);
+static DEVICE_ATTR_RO(hard_reset_cnt);
+static DEVICE_ATTR_RW(high_pll);
+static DEVICE_ATTR_RO(infineon_ver);
+static DEVICE_ATTR_RW(max_power);
+static DEVICE_ATTR_RO(pci_addr);
+static DEVICE_ATTR_RW(pm_mng_profile);
+static DEVICE_ATTR_RO(preboot_btl_ver);
+static DEVICE_ATTR_WO(soft_reset);
+static DEVICE_ATTR_RO(soft_reset_cnt);
+static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RO(thermal_ver);
+static DEVICE_ATTR_RO(uboot_ver);
+static DEVICE_ATTR_RO(write_open_cnt);
+
+static struct bin_attribute bin_attr_eeprom = {
+ .attr = {.name = "eeprom", .mode = (0444)},
+ .size = PAGE_SIZE,
+ .read = eeprom_read_handler
+};
+
+static struct attribute *hl_dev_attrs[] = {
+ &dev_attr_armcp_kernel_ver.attr,
+ &dev_attr_armcp_ver.attr,
+ &dev_attr_cpld_ver.attr,
+ &dev_attr_device_type.attr,
+ &dev_attr_fuse_ver.attr,
+ &dev_attr_hard_reset.attr,
+ &dev_attr_hard_reset_cnt.attr,
+ &dev_attr_high_pll.attr,
+ &dev_attr_infineon_ver.attr,
+ &dev_attr_max_power.attr,
+ &dev_attr_pci_addr.attr,
+ &dev_attr_pm_mng_profile.attr,
+ &dev_attr_preboot_btl_ver.attr,
+ &dev_attr_soft_reset.attr,
+ &dev_attr_soft_reset_cnt.attr,
+ &dev_attr_status.attr,
+ &dev_attr_thermal_ver.attr,
+ &dev_attr_uboot_ver.attr,
+ &dev_attr_write_open_cnt.attr,
+ NULL,
+};
+
+static struct bin_attribute *hl_dev_bin_attrs[] = {
+ &bin_attr_eeprom,
+ NULL
+};
+
+static struct attribute_group hl_dev_attr_group = {
+ .attrs = hl_dev_attrs,
+ .bin_attrs = hl_dev_bin_attrs,
+};
+
+static struct attribute_group hl_dev_clks_attr_group;
+
+static const struct attribute_group *hl_dev_attr_groups[] = {
+ &hl_dev_attr_group,
+ &hl_dev_clks_attr_group,
+ NULL,
+};
+
+int hl_sysfs_init(struct hl_device *hdev)
+{
+ int rc;
+
+ hdev->pm_mng_profile = PM_AUTO;
+ hdev->max_power = hdev->asic_prop.max_power_default;
+
+ hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
+
+ rc = device_add_groups(hdev->dev, hl_dev_attr_groups);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add groups to device, error %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void hl_sysfs_fini(struct hl_device *hdev)
+{
+ device_remove_groups(hdev->dev, hl_dev_attr_groups);
+}
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index e9c9ef52c76a..927309b86bab 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -29,6 +29,13 @@ static struct class *ilo_class;
static unsigned int ilo_major;
static unsigned int max_ccb = 16;
static char ilo_hwdev[MAX_ILO_DEV];
+static const struct pci_device_id ilo_blacklist[] = {
+ /* auxiliary iLO */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)},
+ /* CL */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)},
+ {}
+};
static inline int get_entry_id(int entry)
{
@@ -763,9 +770,10 @@ static int ilo_probe(struct pci_dev *pdev,
int devnum, minor, start, error = 0;
struct ilo_hwinfo *ilo_hw;
- /* Ignore subsystem_device = 0x1979 (set by BIOS) */
- if (pdev->subsystem_device == 0x1979)
- return 0;
+ if (pci_match_id(ilo_blacklist, pdev)) {
+ dev_dbg(&pdev->dev, "Not supported on this device\n");
+ return -ENODEV;
+ }
if (max_ccb > MAX_CCB)
max_ccb = MAX_CCB;
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index b8aaa684c397..2ed23c99f59f 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
*
* Return:
* 0 - Success
+ * Non-zero - Failure
*/
static int ibmvmc_open(struct inode *inode, struct file *file)
{
struct ibmvmc_file_session *session;
- int rc = 0;
pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
(unsigned long)inode, (unsigned long)file,
ibmvmc.state);
session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+
session->file = file;
file->private_data = session;
- return rc;
+ return 0;
}
/**
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 81a0541ef3ac..294fb2f66bfe 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -146,6 +146,8 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
*/
for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
+ if (temp < 0)
+ data->regs[regs_to_copy[i]] = 0;
data->regs[regs_to_copy[i]] = temp >> 8;
}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 2837dc77478e..b51cf182b031 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -37,16 +37,9 @@
#include <linux/kprobes.h>
#include <linux/list.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
#include <linux/slab.h>
-#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
-#ifdef CONFIG_IDE
-#include <linux/ide.h>
-#endif
-
#define DEFAULT_COUNT 10
static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
@@ -102,9 +95,7 @@ static struct crashpoint crashpoints[] = {
CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
CRASHPOINT("TIMERADD", "hrtimer_start"),
CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
-# ifdef CONFIG_IDE
CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
-# endif
#endif
};
@@ -152,7 +143,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(EXEC_VMALLOC),
CRASHTYPE(EXEC_RODATA),
CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
@@ -347,9 +340,9 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
crashtypes[i].name);
}
buf[n] = '\0';
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 3c6fd327e166..b69ee004a3f7 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
void lkdtm_EXEC_VMALLOC(void);
void lkdtm_EXEC_RODATA(void);
void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_EXEC_NULL(void);
void lkdtm_ACCESS_USERSPACE(void);
+void lkdtm_ACCESS_NULL(void);
/* lkdtm_refcount.c */
void lkdtm_REFCOUNT_INC_OVERFLOW(void);
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 53b85c9d16b8..62f76d506f04 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
{
void (*func)(void) = dst;
- pr_info("attempting ok execution at %p\n", do_nothing);
+ pr_info("attempting ok execution at %px\n", do_nothing);
do_nothing();
if (write == CODE_WRITE) {
@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
- pr_info("attempting bad execution at %p\n", func);
+ pr_info("attempting bad execution at %px\n", func);
func();
}
@@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
- pr_info("attempting ok execution at %p\n", do_nothing);
+ pr_info("attempting ok execution at %px\n", do_nothing);
do_nothing();
copied = access_process_vm(current, (unsigned long)dst, do_nothing,
EXEC_SIZE, FOLL_WRITE);
if (copied < EXEC_SIZE)
return;
- pr_info("attempting bad execution at %p\n", func);
+ pr_info("attempting bad execution at %px\n", func);
func();
}
@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
/* Explicitly cast away "const" for the test. */
unsigned long *ptr = (unsigned long *)&rodata;
- pr_info("attempting bad rodata write at %p\n", ptr);
+ pr_info("attempting bad rodata write at %px\n", ptr);
*ptr ^= 0xabcd1234;
}
@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
return;
}
- pr_info("attempting bad ro_after_init write at %p\n", ptr);
+ pr_info("attempting bad ro_after_init write at %px\n", ptr);
*ptr ^= 0xabcd1234;
}
@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
- pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ pr_info("attempting bad %zu byte write at %px\n", size, ptr);
memcpy(ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_EXEC_NULL(void)
+{
+ execute_location(NULL, CODE_AS_IS);
+}
+
void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
ptr = (unsigned long *)user_addr;
- pr_info("attempting bad read at %p\n", ptr);
+ pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
- pr_info("attempting bad write at %p\n", ptr);
+ pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_ACCESS_NULL(void)
+{
+ unsigned long tmp;
+ unsigned long *ptr = (unsigned long *)NULL;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+}
+
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index c49e1d2269af..74e2c667dce0 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -43,3 +43,13 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
+
+config INTEL_MEI_HDCP
+ tristate "Intel HDCP2.2 services of ME Interface"
+ select INTEL_MEI_ME
+ depends on DRM_I915
+ help
+ MEI Support for HDCP2.2 Services on Intel platforms.
+
+ Enables the ME FW services required for HDCP2.2 support through
+ I915 display driver of Intel.
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index d9215fc4e499..8c2d9565a4cb 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -24,3 +24,5 @@ mei-txe-objs += hw-txe.o
mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
+
+obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 80215c312f0e..5fcac02233af 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -40,6 +40,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
+#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \
+ 0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
+
#define MEI_UUID_ANY NULL_UUID_LE
/**
@@ -71,6 +74,18 @@ static void blacklist(struct mei_cl_device *cldev)
cldev->do_match = 0;
}
+/**
+ * whitelist - forcefully whitelist client
+ *
+ * @cldev: me clients device
+ */
+static void whitelist(struct mei_cl_device *cldev)
+{
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+
+ cldev->do_match = 1;
+}
+
#define OSTYPE_LINUX 2
struct mei_os_ver {
__le16 build;
@@ -472,6 +487,7 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
+ MEI_FIXUP(MEI_UUID_HDCP, whitelist),
};
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index fc3872fe7b25..65bec998eb6e 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -28,7 +28,6 @@
#include "client.h"
#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
-#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
/**
* __mei_cl_send - internal client send (write)
@@ -541,17 +540,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
goto out;
}
- if (!mei_cl_bus_module_get(cldev)) {
- dev_err(&cldev->dev, "get hw module failed");
- ret = -ENODEV;
- goto out;
- }
-
ret = mei_cl_connect(cl, cldev->me_cl, NULL);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&cldev->dev, "cannot connect\n");
- mei_cl_bus_module_put(cldev);
- }
out:
mutex_unlock(&bus->device_lock);
@@ -614,7 +605,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
if (err < 0)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
- mei_cl_bus_module_put(cldev);
out:
/* Flush queues and remove any pending read */
mei_cl_flush_queues(cl, NULL);
@@ -725,9 +715,16 @@ static int mei_cl_device_probe(struct device *dev)
if (!id)
return -ENODEV;
+ if (!mei_cl_bus_module_get(cldev)) {
+ dev_err(&cldev->dev, "get hw module failed");
+ return -ENODEV;
+ }
+
ret = cldrv->probe(cldev, id);
- if (ret)
+ if (ret) {
+ mei_cl_bus_module_put(cldev);
return ret;
+ }
__module_get(THIS_MODULE);
return 0;
@@ -755,6 +752,7 @@ static int mei_cl_device_remove(struct device *dev)
mei_cldev_unregister_callbacks(cldev);
+ mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
dev->driver = NULL;
return ret;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1fc8ea0f519b..ca4c9cc218a2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head,
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list) {
- if (cl == cb->cl)
+ if (cl == cb->cl) {
list_del_init(&cb->list);
+ if (cb->fop_type == MEI_FOP_READ)
+ mei_io_cb_free(cb);
+ }
}
}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 78c26cebf5d4..e6207f614816 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1029,29 +1029,36 @@ static void mei_hbm_config_features(struct mei_device *dev)
dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
dev->hbm_f_pg_supported = 1;
+ dev->hbm_f_dc_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
dev->hbm_f_dc_supported = 1;
+ dev->hbm_f_ie_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
dev->hbm_f_ie_supported = 1;
/* disconnect on connect timeout instead of link reset */
+ dev->hbm_f_dot_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
dev->hbm_f_dot_supported = 1;
/* Notification Event Support */
+ dev->hbm_f_ev_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
dev->hbm_f_ev_supported = 1;
/* Fixed Address Client Support */
+ dev->hbm_f_fa_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
dev->hbm_f_fa_supported = 1;
/* OS ver message Support */
+ dev->hbm_f_os_supported = 0;
if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
dev->hbm_f_os_supported = 1;
/* DMA Ring Support */
+ dev->hbm_f_dr_supported = 0;
if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
(dev->version.major_version == HBM_MAJOR_VERSION_DR &&
dev->version.minor_version >= HBM_MINOR_VERSION_DR))
@@ -1187,9 +1194,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
if (dma_setup_res->status) {
- dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
- dma_setup_res->status,
- mei_hbm_status_str(dma_setup_res->status));
+ u8 status = dma_setup_res->status;
+
+ if (status == MEI_HBMS_NOT_ALLOWED) {
+ dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
+ } else {
+ dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
+ status,
+ mei_hbm_status_str(status));
+ }
dev->hbm_f_dr_supported = 0;
mei_dmam_ring_free(dev);
}
diff --git a/drivers/misc/mei/hdcp/Makefile b/drivers/misc/mei/hdcp/Makefile
new file mode 100644
index 000000000000..adbe7506282d
--- /dev/null
+++ b/drivers/misc/mei/hdcp/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2019, Intel Corporation.
+#
+# Makefile - HDCP client driver for Intel MEI Bus Driver.
+
+obj-$(CONFIG_INTEL_MEI_HDCP) += mei_hdcp.o
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
new file mode 100644
index 000000000000..90b6ae8e9dae
--- /dev/null
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Mei_hdcp.c: HDCP client driver for mei bus
+ *
+ * Author:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+/**
+ * DOC: MEI_HDCP Client Driver
+ *
+ * This is a client driver to the mei_bus to make the HDCP2.2 services of
+ * ME FW available for the interested consumers like I915.
+ *
+ * This module will act as a translation layer between HDCP protocol
+ * implementor(I915) and ME FW by translating HDCP2.2 authentication
+ * messages to ME FW command payloads and vice versa.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/component.h>
+#include <drm/drm_connector.h>
+#include <drm/i915_component.h>
+#include <drm/i915_mei_hdcp_interface.h>
+
+#include "mei_hdcp.h"
+
+static inline u8 mei_get_ddi_index(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return MEI_DDI_A;
+ case PORT_B ... PORT_F:
+ return (u8)port;
+ default:
+ return MEI_DDI_INVALID_PORT;
+ }
+}
+
+/**
+ * mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @ake_data: AKE_Init msg output.
+ *
+ * Return: 0 on Success, <0 on Failure.
+ */
+static int
+mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
+ struct wired_cmd_initiate_hdcp2_session_out
+ session_init_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !ake_data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ session_init_in.header.api_version = HDCP_API_VERSION;
+ session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
+ session_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ session_init_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
+
+ session_init_in.port.integrated_port_type = data->port_type;
+ session_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_init_in.protocol = data->protocol;
+
+ byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
+ sizeof(session_init_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&session_init_out,
+ sizeof(session_init_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_INITIATE_HDCP2_SESSION,
+ session_init_out.header.status);
+ return -EIO;
+ }
+
+ ake_data->msg_id = HDCP_2_2_AKE_INIT;
+ ake_data->tx_caps = session_init_out.tx_caps;
+ memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_receiver_cert_prepare_km() - Verify the Receiver Certificate
+ * AKE_Send_Cert and prepare AKE_Stored_Km/AKE_No_Stored_Km
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rx_cert: AKE_Send_Cert for verification
+ * @km_stored: Pairing status flag output
+ * @ek_pub_km: AKE_Stored_Km/AKE_No_Stored_Km output msg
+ * @msg_sz : size of AKE_XXXXX_Km output msg
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
+ struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_rxcert_in.header.api_version = HDCP_API_VERSION;
+ verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
+ verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_rxcert_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
+
+ verify_rxcert_in.port.integrated_port_type = data->port_type;
+ verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ verify_rxcert_in.cert_rx = rx_cert->cert_rx;
+ memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
+ memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_rxcert_in,
+ sizeof(verify_rxcert_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed: %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_rxcert_out,
+ sizeof(verify_rxcert_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed: %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_VERIFY_RECEIVER_CERT,
+ verify_rxcert_out.header.status);
+ return -EIO;
+ }
+
+ *km_stored = !!verify_rxcert_out.km_stored;
+ if (verify_rxcert_out.km_stored) {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_stored_km);
+ } else {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
+ }
+
+ memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
+ sizeof(verify_rxcert_out.ekm_buff));
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_hprime() - Verify AKE_Send_H_prime at ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rx_hprime: AKE_Send_H_prime msg for ME FW verification
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
+ struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_hprime)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ send_hprime_in.header.api_version = HDCP_API_VERSION;
+ send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
+ send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
+
+ send_hprime_in.port.integrated_port_type = data->port_type;
+ send_hprime_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
+ HDCP_2_2_H_PRIME_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&send_hprime_in,
+ sizeof(send_hprime_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&send_hprime_out,
+ sizeof(send_hprime_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_store_pairing_info() - Store pairing info received at ME FW
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @pairing_info: AKE_Send_Pairing_Info msg input to ME FW
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
+ struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !pairing_info)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ pairing_info_in.header.api_version = HDCP_API_VERSION;
+ pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
+ pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ pairing_info_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
+
+ pairing_info_in.port.integrated_port_type = data->port_type;
+ pairing_info_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
+ HDCP_2_2_E_KH_KM_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&pairing_info_in,
+ sizeof(pairing_info_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&pairing_info_out,
+ sizeof(pairing_info_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_PAIRING_INFO,
+ pairing_info_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_initiate_locality_check() - Prepare LC_Init
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @lc_init_data: LC_Init msg output
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_initiate_locality_check(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data)
+{
+ struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
+ struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !lc_init_data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ lc_init_in.header.api_version = HDCP_API_VERSION;
+ lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
+ lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
+
+ lc_init_in.port.integrated_port_type = data->port_type;
+ lc_init_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&lc_init_out, sizeof(lc_init_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n",
+ WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
+ return -EIO;
+ }
+
+ lc_init_data->msg_id = HDCP_2_2_LC_INIT;
+ memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_lprime() - Verify lprime.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rx_lprime: LC_Send_L_prime msg for ME FW verification
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
+ struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_lprime)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_lprime_in.header.api_version = HDCP_API_VERSION;
+ verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
+ verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_lprime_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
+
+ verify_lprime_in.port.integrated_port_type = data->port_type;
+ verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
+ HDCP_2_2_L_PRIME_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_lprime_in,
+ sizeof(verify_lprime_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_lprime_out,
+ sizeof(verify_lprime_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VALIDATE_LOCALITY,
+ verify_lprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_get_session_key() - Prepare SKE_Send_Eks.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @ske_data: SKE_Send_Eks msg output from ME FW.
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int mei_hdcp_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
+ struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data || !ske_data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ get_skey_in.header.api_version = HDCP_API_VERSION;
+ get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
+ get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
+
+ get_skey_in.port.integrated_port_type = data->port_type;
+ get_skey_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&get_skey_out, sizeof(get_skey_out));
+
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_GET_SESSION_KEY, get_skey_out.header.status);
+ return -EIO;
+ }
+
+ ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
+ memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
+ HDCP_2_2_E_DKEY_KS_LEN);
+ memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_repeater_check_flow_prepare_ack() - Validate the Downstream topology
+ * and prepare rep_ack.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @rep_topology: Receiver ID List to be validated
+ * @rep_send_ack : repeater ack from ME FW.
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack)
+{
+ struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
+ struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !rep_topology || !rep_send_ack || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_repeater_in.header.api_version = HDCP_API_VERSION;
+ verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
+ verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_repeater_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
+
+ verify_repeater_in.port.integrated_port_type = data->port_type;
+ verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
+ HDCP_2_2_RXINFO_LEN);
+ memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
+ HDCP_2_2_SEQ_NUM_LEN);
+ memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_repeater_in,
+ sizeof(verify_repeater_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_repeater_out,
+ sizeof(verify_repeater_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VERIFY_REPEATER,
+ verify_repeater_out.header.status);
+ return -EIO;
+ }
+
+ memcpy(rep_send_ack->v, verify_repeater_out.v,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_verify_mprime() - Verify mprime.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ * @stream_ready: RepeaterAuth_Stream_Ready msg for ME FW verification.
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int mei_hdcp_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct wired_cmd_repeater_auth_stream_req_in
+ verify_mprime_in = { { 0 } };
+ struct wired_cmd_repeater_auth_stream_req_out
+ verify_mprime_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !stream_ready || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ verify_mprime_in.header.api_version = HDCP_API_VERSION;
+ verify_mprime_in.header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
+ verify_mprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_mprime_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
+
+ verify_mprime_in.port.integrated_port_type = data->port_type;
+ verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
+ HDCP_2_2_MPRIME_LEN);
+ drm_hdcp2_u32_to_seq_num(verify_mprime_in.seq_num_m, data->seq_num_m);
+ memcpy(verify_mprime_in.streams, data->streams,
+ (data->k * sizeof(struct hdcp2_streamid_type)));
+
+ verify_mprime_in.k = cpu_to_be16(data->k);
+
+ byte = mei_cldev_send(cldev, (u8 *)&verify_mprime_in,
+ sizeof(verify_mprime_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&verify_mprime_out,
+ sizeof(verify_mprime_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ verify_mprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_enable_authentication() - Mark a port as authenticated
+ * through ME FW
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int mei_hdcp_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
+{
+ struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
+ struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ enable_auth_in.header.api_version = HDCP_API_VERSION;
+ enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
+ enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
+
+ enable_auth_in.port.integrated_port_type = data->port_type;
+ enable_auth_in.port.physical_port = mei_get_ddi_index(data->port);
+ enable_auth_in.stream_type = data->streams[0].stream_type;
+
+ byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
+ sizeof(enable_auth_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&enable_auth_out,
+ sizeof(enable_auth_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_ENABLE_AUTH, enable_auth_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_hdcp_close_session() - Close the Wired HDCP Tx session of ME FW per port.
+ * This also disables the authenticated state of the port.
+ * @dev: device corresponding to the mei_cl_device
+ * @data: Intel HW specific hdcp data
+ *
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
+{
+ struct wired_cmd_close_session_in session_close_in = { { 0 } };
+ struct wired_cmd_close_session_out session_close_out = { { 0 } };
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ session_close_in.header.api_version = HDCP_API_VERSION;
+ session_close_in.header.command_id = WIRED_CLOSE_SESSION;
+ session_close_in.header.status = ME_HDCP_STATUS_SUCCESS;
+ session_close_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
+
+ session_close_in.port.integrated_port_type = data->port_type;
+ session_close_in.port.physical_port = mei_get_ddi_index(data->port);
+
+ byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
+ sizeof(session_close_in));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ byte = mei_cldev_recv(cldev, (u8 *)&session_close_out,
+ sizeof(session_close_out));
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) {
+ dev_dbg(dev, "Session Close Failed. status: 0x%X\n",
+ session_close_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct i915_hdcp_component_ops mei_hdcp_ops = {
+ .owner = THIS_MODULE,
+ .initiate_hdcp2_session = mei_hdcp_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ mei_hdcp_verify_receiver_cert_prepare_km,
+ .verify_hprime = mei_hdcp_verify_hprime,
+ .store_pairing_info = mei_hdcp_store_pairing_info,
+ .initiate_locality_check = mei_hdcp_initiate_locality_check,
+ .verify_lprime = mei_hdcp_verify_lprime,
+ .get_session_key = mei_hdcp_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ mei_hdcp_repeater_check_flow_prepare_ack,
+ .verify_mprime = mei_hdcp_verify_mprime,
+ .enable_hdcp_authentication = mei_hdcp_enable_authentication,
+ .close_hdcp_session = mei_hdcp_close_session,
+};
+
+static int mei_component_master_bind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_hdcp_comp_master *comp_master =
+ mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+ comp_master->ops = &mei_hdcp_ops;
+ comp_master->mei_dev = dev;
+ ret = component_bind_all(dev, comp_master);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mei_component_master_unbind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_hdcp_comp_master *comp_master =
+ mei_cldev_get_drvdata(cldev);
+
+ dev_dbg(dev, "%s\n", __func__);
+ component_unbind_all(dev, comp_master);
+}
+
+static const struct component_master_ops mei_component_master_ops = {
+ .bind = mei_component_master_bind,
+ .unbind = mei_component_master_unbind,
+};
+
+static int mei_hdcp_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ return !strcmp(dev->driver->name, "i915") &&
+ subcomponent == I915_COMPONENT_HDCP;
+}
+
+static int mei_hdcp_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct i915_hdcp_comp_master *comp_master;
+ struct component_match *master_match;
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
+ goto enable_err_exit;
+ }
+
+ comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
+ if (!comp_master) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ master_match = NULL;
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_hdcp_component_match, comp_master);
+ if (IS_ERR_OR_NULL(master_match)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ mei_cldev_set_drvdata(cldev, comp_master);
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_component_master_ops,
+ master_match);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ mei_cldev_set_drvdata(cldev, NULL);
+ kfree(comp_master);
+ mei_cldev_disable(cldev);
+enable_err_exit:
+ return ret;
+}
+
+static int mei_hdcp_remove(struct mei_cl_device *cldev)
+{
+ struct i915_hdcp_comp_master *comp_master =
+ mei_cldev_get_drvdata(cldev);
+
+ component_master_del(&cldev->dev, &mei_component_master_ops);
+ kfree(comp_master);
+ mei_cldev_set_drvdata(cldev, NULL);
+
+ return mei_cldev_disable(cldev);
+}
+
+#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
+ 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
+
+static struct mei_cl_device_id mei_hdcp_tbl[] = {
+ { .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_hdcp_tbl);
+
+static struct mei_cl_driver mei_hdcp_driver = {
+ .id_table = mei_hdcp_tbl,
+ .name = KBUILD_MODNAME,
+ .probe = mei_hdcp_probe,
+ .remove = mei_hdcp_remove,
+};
+
+module_mei_cl_driver(mei_hdcp_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI HDCP");
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
new file mode 100644
index 000000000000..5f74b908e486
--- /dev/null
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -0,0 +1,377 @@
+/* SPDX-License-Identifier: (GPL-2.0+) */
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Authors:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#ifndef __MEI_HDCP_H__
+#define __MEI_HDCP_H__
+
+#include <drm/drm_hdcp.h>
+
+/* me_hdcp_status: Enumeration of all HDCP Status Codes */
+enum me_hdcp_status {
+ ME_HDCP_STATUS_SUCCESS = 0x0000,
+
+ /* WiDi Generic Status Codes */
+ ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
+ ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
+ ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
+ ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
+ ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
+ ME_HDCP_STATUS_INVALID_PARAMS = 0x1005,
+ ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
+
+ /* WiDi Status Codes */
+ ME_HDCP_INVALID_SESSION_STATE = 0x6000,
+ ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
+ ME_HDCP_SRM_INVALID_LENGTH = 0x6002,
+ ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
+ ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
+ ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
+ ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
+ ME_HDCP_RX_REVOKED = 0x6007,
+ ME_HDCP_H_VERIFICATION_FAILED = 0x6008,
+ ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
+ ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
+ ME_HDCP_V_VERIFICATION_FAILED = 0x600B,
+ ME_HDCP_L_VERIFICATION_FAILED = 0x600C,
+ ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
+ ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
+ ME_HDCP_NONCE_GENERATION_FAILED = 0x600F,
+ ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
+ ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
+ ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
+ ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
+ ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
+ ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
+
+ /* New status for HDCP 2.1 */
+ ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
+
+ /* New status code for HDCP 2.2 Rx */
+ ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
+ ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
+ ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
+ ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
+ ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
+ ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
+ ME_HDCP_FAIL_NOT_EXPECTED = 0x6023,
+ ME_HDCP_FAIL_HDCP_OFF = 0x6024,
+ ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
+ ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
+ ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
+ ME_HDCP_DMA_READ_ERROR = 0x6028,
+ ME_HDCP_DMA_WRITE_ERROR = 0x6029,
+ ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
+ ME_HDCP_H264_PARSING_ERROR = 0x6031,
+ ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
+ ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
+ ME_HDCP_TX_ACTIVE_ERROR = 0x6034,
+ ME_HDCP_MODE_CHANGE_ERROR = 0x6035,
+ ME_HDCP_STREAM_TYPE_ERROR = 0x6036,
+ ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
+
+ ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
+ ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
+ ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
+ ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
+ ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
+ ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
+
+ /* hdcp capable bit is not set in rx_caps(error is unique to DP) */
+ ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
+
+ ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
+};
+
+#define HDCP_API_VERSION 0x00010000
+
+#define HDCP_M_LEN 16
+#define HDCP_KH_LEN 16
+
+/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
+/* Wired_Tx_AKE */
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
+
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
+
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
+
+/* Wired_Tx_LC */
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
+
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
+
+/* Wired_Tx_Repeater */
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
+ 32 + 2 + 2)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
+
+/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
+enum hdcp_command_id {
+ _WIDI_COMMAND_BASE = 0x00030000,
+ WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
+ HDCP_GET_SRM_STATUS,
+ HDCP_SEND_SRM_FRAGMENT,
+
+ /* The wired HDCP Tx commands */
+ _WIRED_COMMAND_BASE = 0x00031000,
+ WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
+ WIRED_VERIFY_RECEIVER_CERT,
+ WIRED_AKE_SEND_HPRIME,
+ WIRED_AKE_SEND_PAIRING_INFO,
+ WIRED_INIT_LOCALITY_CHECK,
+ WIRED_VALIDATE_LOCALITY,
+ WIRED_GET_SESSION_KEY,
+ WIRED_ENABLE_AUTH,
+ WIRED_VERIFY_REPEATER,
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ WIRED_CLOSE_SESSION,
+
+ _WIRED_COMMANDS_COUNT,
+};
+
+union encrypted_buff {
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+ struct {
+ u8 e_kh_km[HDCP_KH_LEN];
+ u8 m[HDCP_M_LEN];
+ } __packed;
+};
+
+/* HDCP HECI message header. All header values are little endian. */
+struct hdcp_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ enum me_hdcp_status status;
+ /* Length of the HECI message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+/* Empty command request or response. No data follows the header. */
+struct hdcp_cmd_no_data {
+ struct hdcp_cmd_header header;
+} __packed;
+
+/* Uniquely identifies the hdcp port being addressed for a given command. */
+struct hdcp_port_id {
+ u8 integrated_port_type;
+ u8 physical_port;
+ u16 reserved;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in
+ * support of the AKE protocol
+ */
+/* HECI struct for integrated wired HDCP Tx session initiation. */
+struct wired_cmd_initiate_hdcp2_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 protocol; /* for HDMI vs DP */
+} __packed;
+
+struct wired_cmd_initiate_hdcp2_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+/* HECI struct for ending an integrated wired HDCP Tx session. */
+struct wired_cmd_close_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_close_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
+struct wired_cmd_verify_receiver_cert_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct wired_cmd_verify_receiver_cert_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 km_stored;
+ u8 reserved[3];
+ union encrypted_buff ekm_buff;
+} __packed;
+
+/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
+struct wired_cmd_ake_send_hprime_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_hprime_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * HECI struct for sending in AKE pairing data generated by the Rx in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_ake_send_pairing_info_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_pairing_info_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
+/*
+ * HECI struct for initiating locality check with an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_init_locality_check_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_init_locality_check_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+/*
+ * HECI struct for validating an Rx's LPrime value in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_validate_locality_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_validate_locality_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of the
+ * SKE protocol
+ */
+/* HECI struct for creating session key */
+struct wired_cmd_get_session_key_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_get_session_key_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 r_iv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+/* HECI struct for the Tx enable authentication command */
+struct wired_cmd_enable_auth_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 stream_type;
+} __packed;
+
+struct wired_cmd_enable_auth_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of
+ * the repeater protocols
+ */
+/*
+ * HECI struct for verifying the downstream repeater's HDCP topology in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_verify_repeater_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct wired_cmd_verify_repeater_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 content_type_supported;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+/*
+ * HECI struct in support of stream management in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_repeater_auth_stream_req_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[1];
+} __packed;
+
+struct wired_cmd_repeater_auth_stream_req_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+enum mei_fw_ddi {
+ MEI_DDI_INVALID_PORT = 0x0,
+
+ MEI_DDI_B = 1,
+ MEI_DDI_C,
+ MEI_DDI_D,
+ MEI_DDI_E,
+ MEI_DDI_F,
+ MEI_DDI_A = 7,
+ MEI_DDI_RANGE_END = MEI_DDI_A,
+};
+#endif /* __MEI_HDCP_H__ */
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index e4b10b2d1a08..bb1ee9834a02 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
+
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
@@ -137,6 +139,8 @@
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
+#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 2b7f7677f8cc..b7d2487b8409 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -311,7 +311,8 @@ struct mei_client_properties {
u8 protocol_version;
u8 max_number_of_connections;
u8 fixed_address;
- u8 single_recv_buf;
+ u8 single_recv_buf:1;
+ u8 reserved:7;
u32 max_msg_length;
} __packed;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 73ace2d59dea..3ab946ad3257 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
@@ -103,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 227cc7443671..242dcee14689 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -38,7 +38,6 @@ comment "VOP Bus Driver"
config VOP_BUS
tristate "VOP Bus Driver"
- depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
@@ -132,7 +131,7 @@ comment "VOP Driver"
config VOP
tristate "VOP Driver"
- depends on 64BIT && PCI && X86 && VOP_BUS
+ depends on VOP_BUS
select VHOST_RING
select VIRTIO
help
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
index ff59568219ad..377a4f38cd7e 100644
--- a/drivers/misc/mic/bus/scif_bus.h
+++ b/drivers/misc/mic/bus/scif_bus.h
@@ -88,8 +88,8 @@ struct scif_driver {
* @send_intr: Send an interrupt to the remote node on a specified doorbell.
* @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
* which is specifically targeted for a peer to peer node.
- * @ioremap: Map a buffer with the specified physical address and length.
- * @iounmap: Unmap a buffer previously mapped.
+ * @remap: Map a buffer with the specified physical address and length.
+ * @unmap: Unmap a buffer previously mapped.
*/
struct scif_hw_ops {
int (*next_db)(struct scif_hw_dev *sdev);
@@ -104,9 +104,9 @@ struct scif_hw_ops {
void (*send_intr)(struct scif_hw_dev *sdev, int db);
void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
struct mic_mw *mw);
- void __iomem * (*ioremap)(struct scif_hw_dev *sdev,
+ void __iomem * (*remap)(struct scif_hw_dev *sdev,
phys_addr_t pa, size_t len);
- void (*iounmap)(struct scif_hw_dev *sdev, void __iomem *va);
+ void (*unmap)(struct scif_hw_dev *sdev, void __iomem *va);
};
int scif_register_driver(struct scif_driver *driver);
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
index fff7a865d721..cf5f3fae573c 100644
--- a/drivers/misc/mic/bus/vop_bus.h
+++ b/drivers/misc/mic/bus/vop_bus.h
@@ -87,8 +87,8 @@ struct vop_driver {
* @get_dp: Get access to the virtio device page used by the self
* node to add/remove/configure virtio devices.
* @send_intr: Send an interrupt to the peer node on a specified doorbell.
- * @ioremap: Map a buffer with the specified DMA address and length.
- * @iounmap: Unmap a buffer previously mapped.
+ * @remap: Map a buffer with the specified DMA address and length.
+ * @unmap: Unmap a buffer previously mapped.
* @dma_filter: The DMA filter function to use for obtaining access to
* a DMA channel on the peer node.
*/
@@ -104,9 +104,9 @@ struct vop_hw_ops {
void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
void * (*get_dp)(struct vop_device *vpdev);
void (*send_intr)(struct vop_device *vpdev, int db);
- void __iomem * (*ioremap)(struct vop_device *vpdev,
+ void __iomem * (*remap)(struct vop_device *vpdev,
dma_addr_t pa, size_t len);
- void (*iounmap)(struct vop_device *vpdev, void __iomem *va);
+ void (*unmap)(struct vop_device *vpdev, void __iomem *va);
};
struct vop_device *
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index e749af48f736..dcd07ef29801 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -245,8 +245,8 @@ static struct scif_hw_ops scif_hw_ops = {
.next_db = ___mic_next_db,
.send_intr = ___mic_send_intr,
.send_p2p_intr = ___mic_send_p2p_intr,
- .ioremap = ___mic_ioremap,
- .iounmap = ___mic_iounmap,
+ .remap = ___mic_ioremap,
+ .unmap = ___mic_iounmap,
};
static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
@@ -316,8 +316,8 @@ static struct vop_hw_ops vop_hw_ops = {
.next_db = __mic_next_db,
.get_remote_dp = __mic_get_remote_dp,
.send_intr = __mic_send_intr,
- .ioremap = __mic_ioremap,
- .iounmap = __mic_iounmap,
+ .remap = __mic_ioremap,
+ .unmap = __mic_iounmap,
};
static int mic_request_dma_chans(struct mic_driver *mdrv)
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 6479435ac96b..079c36f0ce6e 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -133,8 +133,8 @@ static struct vop_hw_ops vop_hw_ops = {
.get_dp = __mic_get_dp,
.get_remote_dp = __mic_get_remote_dp,
.send_intr = __mic_send_intr,
- .ioremap = __mic_ioremap,
- .iounmap = __mic_iounmap,
+ .remap = __mic_ioremap,
+ .unmap = __mic_iounmap,
};
static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
@@ -315,8 +315,8 @@ static struct scif_hw_ops scif_hw_ops = {
.ack_interrupt = ___mic_ack_interrupt,
.next_db = ___mic_next_db,
.send_intr = ___mic_send_intr,
- .ioremap = ___mic_ioremap,
- .iounmap = ___mic_iounmap,
+ .remap = ___mic_ioremap,
+ .unmap = ___mic_iounmap,
};
static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
index 3e86360ba5a6..7b380534eba1 100644
--- a/drivers/misc/mic/scif/scif_map.h
+++ b/drivers/misc/mic/scif/scif_map.h
@@ -97,7 +97,7 @@ scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
out_virt = phys_to_virt(phys);
else
out_virt = (void __force *)
- sdev->hw_ops->ioremap(sdev, phys, size);
+ sdev->hw_ops->remap(sdev, phys, size);
return out_virt;
}
@@ -107,7 +107,7 @@ scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
if (!scifdev_self(scifdev)) {
struct scif_hw_dev *sdev = scifdev->sdev;
- sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt);
+ sdev->hw_ops->unmap(sdev, (void __force __iomem *)virt);
}
}
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 749321eb91ae..f62216628fa6 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -672,8 +672,8 @@ int scif_unregister_window(struct scif_window *window)
{
window->unreg_state = OP_IN_PROGRESS;
send_msg = true;
- /* fall through */
}
+ /* fall through */
case OP_IN_PROGRESS:
{
scif_get_window(window, 1);
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 6b212c8b78e7..e37b2c2152a2 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "vop_main.h"
@@ -47,7 +48,8 @@
* @dc: Virtio device control
* @vpdev: VOP device which is the parent for this virtio device
* @vr: Buffer for accessing the VRING
- * @used: Buffer for used
+ * @used_virt: Virtual address of used ring
+ * @used: DMA address of used ring
* @used_size: Size of the used buffer
* @reset_done: Track whether VOP reset is complete
* @virtio_cookie: Cookie returned upon requesting a interrupt
@@ -61,6 +63,7 @@ struct _vop_vdev {
struct mic_device_ctrl __iomem *dc;
struct vop_device *vpdev;
void __iomem *vr[VOP_MAX_VRINGS];
+ void *used_virt[VOP_MAX_VRINGS];
dma_addr_t used[VOP_MAX_VRINGS];
int used_size[VOP_MAX_VRINGS];
struct completion reset_done;
@@ -116,7 +119,7 @@ _vop_total_desc_size(struct mic_device_desc __iomem *desc)
static u64 vop_get_features(struct virtio_device *vdev)
{
unsigned int i, bits;
- u32 features = 0;
+ u64 features = 0;
struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
u8 __iomem *in_features = _vop_vq_features(desc);
int feature_len = ioread8(&desc->feature_len);
@@ -124,7 +127,7 @@ static u64 vop_get_features(struct virtio_device *vdev)
bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++)
if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
- features |= BIT(i);
+ features |= BIT_ULL(i);
return features;
}
@@ -226,7 +229,7 @@ static void vop_reset_inform_host(struct virtio_device *dev)
if (ioread8(&dc->host_ack))
break;
msleep(100);
- };
+ }
dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
@@ -260,14 +263,14 @@ static bool vop_notify(struct virtqueue *vq)
static void vop_del_vq(struct virtqueue *vq, int n)
{
struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
- struct vring *vr = (struct vring *)(vq + 1);
struct vop_device *vpdev = vdev->vpdev;
dma_unmap_single(&vpdev->dev, vdev->used[n],
vdev->used_size[n], DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
+ free_pages((unsigned long)vdev->used_virt[n],
+ get_order(vdev->used_size[n]));
vring_del_virtqueue(vq);
- vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
+ vpdev->hw_ops->unmap(vpdev, vdev->vr[n]);
vdev->vr[n] = NULL;
}
@@ -283,6 +286,26 @@ static void vop_del_vqs(struct virtio_device *dev)
vop_del_vq(vq, idx++);
}
+static struct virtqueue *vop_new_virtqueue(unsigned int index,
+ unsigned int num,
+ struct virtio_device *vdev,
+ bool context,
+ void *pages,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ void *used)
+{
+ bool weak_barriers = false;
+ struct vring vring;
+
+ vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
+ vring.used = used;
+
+ return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
+ notify, callback, name);
+}
+
/*
* This routine will assign vring's allocated in host/io memory. Code in
* virtio_ring.c however continues to access this io memory as if it were local
@@ -302,7 +325,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
struct _mic_vring_info __iomem *info;
void *used;
int vr_size, _vr_size, err, magic;
- struct vring *vr;
u8 type = ioread8(&vdev->desc->type);
if (index >= ioread8(&vdev->desc->num_vq))
@@ -316,23 +338,12 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
memcpy_fromio(&config, vqconfig, sizeof(config));
_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
- va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
- vr_size);
+ va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
if (!va)
return ERR_PTR(-ENOMEM);
vdev->vr[index] = va;
memset_io(va, 0x0, _vr_size);
- vq = vring_new_virtqueue(
- index,
- le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
- dev,
- false,
- ctx,
- (void __force *)va, vop_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto unmap;
- }
+
info = va + _vr_size;
magic = ioread32(&info->magic);
@@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
goto unmap;
}
- /* Allocate and reassign used ring now */
vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
sizeof(struct vring_used_elem) *
le16_to_cpu(config.num));
used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(vdev->used_size[index]));
+ vdev->used_virt[index] = used;
if (!used) {
err = -ENOMEM;
dev_err(_vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, err);
- goto del_vq;
+ goto unmap;
+ }
+
+ vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
+ (void __force *)va, vop_notify, callback,
+ name, used);
+ if (!vq) {
+ err = -ENOMEM;
+ goto free_used;
}
+
vdev->used[index] = dma_map_single(&vpdev->dev, used,
vdev->used_size[index],
DMA_BIDIRECTIONAL);
@@ -360,28 +380,19 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
err = -ENOMEM;
dev_err(_vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, err);
- goto free_used;
+ goto del_vq;
}
writeq(vdev->used[index], &vqconfig->used_address);
- /*
- * To reassign the used ring here we are directly accessing
- * struct vring_virtqueue which is a private data structure
- * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
- * vring_new_virtqueue() would ensure that
- * (&vq->vring == (struct vring *) (&vq->vq + 1));
- */
- vr = (struct vring *)(vq + 1);
- vr->used = used;
vq->priv = vdev;
return vq;
+del_vq:
+ vring_del_virtqueue(vq);
free_used:
free_pages((unsigned long)used,
get_order(vdev->used_size[index]));
-del_vq:
- vring_del_virtqueue(vq);
unmap:
- vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
+ vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
return ERR_PTR(err);
}
@@ -394,16 +405,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
struct _vop_vdev *vdev = to_vopvdev(dev);
struct vop_device *vpdev = vdev->vpdev;
struct mic_device_ctrl __iomem *dc = vdev->dc;
- int i, err, retry;
+ int i, err, retry, queue_idx = 0;
/* We must have this many virtqueues. */
if (nvqs > ioread8(&vdev->desc->num_vq))
return -ENOENT;
for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
__func__, i, names[i]);
- vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
+ vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
@@ -421,7 +437,7 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
if (!ioread8(&dc->used_address_updated))
break;
msleep(100);
- };
+ }
dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
if (!retry) {
@@ -497,7 +513,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
vdev->desc = d;
vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
vdev->dnode = dnode;
- vdev->vdev.priv = (void *)(u64)dnode;
+ vdev->vdev.priv = (void *)(unsigned long)dnode;
init_completion(&vdev->reset_done);
vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
@@ -519,7 +535,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
offset, type);
goto free_irq;
}
- writeq((u64)vdev, &vdev->dc->vdev);
+ writeq((unsigned long)vdev, &vdev->dc->vdev);
dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
__func__, offset, type, vdev);
@@ -546,13 +562,18 @@ static int vop_match_desc(struct device *dev, void *data)
return vdev->desc == (void __iomem *)data;
}
+static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc)
+{
+ return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
+}
+
static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
unsigned int offset,
struct vop_device *vpdev)
{
struct mic_device_ctrl __iomem *dc
= (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+ struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
return;
@@ -571,11 +592,13 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
{
struct mic_device_ctrl __iomem *dc
= (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+ struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
u8 status;
int ret = -1;
if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+ struct device *dev = get_device(&vdev->vdev.dev);
+
dev_dbg(&vpdev->dev,
"%s %d config_change %d type %d vdev %p\n",
__func__, __LINE__,
@@ -587,7 +610,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
iowrite8(-1, &dc->h2c_vdev_db);
if (status & VIRTIO_CONFIG_S_DRIVER_OK)
wait_for_completion(&vdev->reset_done);
- put_device(&vdev->vdev.dev);
+ put_device(dev);
iowrite8(1, &dc->guest_ack);
dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
__func__, __LINE__, ioread8(&dc->guest_ack));
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index cbc8ebcff5cf..3632fce40590 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -80,7 +80,7 @@ static void vop_virtio_init_post(struct vop_vdev *vdev)
continue;
}
vdev->vvr[i].vrh.vring.used =
- (void __force *)vpdev->hw_ops->ioremap(
+ (void __force *)vpdev->hw_ops->remap(
vpdev,
le64_to_cpu(vqconfig[i].used_address),
used_size);
@@ -528,15 +528,15 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
int vr_idx)
{
struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
struct vop_vringh *vvr = &vdev->vvr[vr_idx];
struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
- size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
- bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_alignment;
+ bool x200;
size_t dma_offset, partlen;
int err;
- if (!VOP_USE_DMA) {
+ if (!VOP_USE_DMA || !vi->dma_ch) {
if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
err = -EFAULT;
dev_err(vop_dev(vdev), "%s %d err %d\n",
@@ -548,6 +548,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
goto err;
}
+ dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+
dma_offset = daddr - round_down(daddr, dma_alignment);
daddr -= dma_offset;
len += dma_offset;
@@ -585,9 +588,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
}
err = 0;
err:
- vpdev->hw_ops->iounmap(vpdev, dbuf);
+ vpdev->hw_ops->unmap(vpdev, dbuf);
dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
__func__, ubuf, dbuf, len, vr_idx);
return err;
}
@@ -603,21 +606,26 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
int vr_idx)
{
struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
struct vop_vringh *vvr = &vdev->vvr[vr_idx];
struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
- size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
- bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_alignment;
+ bool x200;
size_t partlen;
- bool dma = VOP_USE_DMA;
+ bool dma = VOP_USE_DMA && vi->dma_ch;
int err = 0;
- if (daddr & (dma_alignment - 1)) {
- vdev->tx_dst_unaligned += len;
- dma = false;
- } else if (ALIGN(len, dma_alignment) > dlen) {
- vdev->tx_len_unaligned += len;
- dma = false;
+ if (dma) {
+ dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+
+ if (daddr & (dma_alignment - 1)) {
+ vdev->tx_dst_unaligned += len;
+ dma = false;
+ } else if (ALIGN(len, dma_alignment) > dlen) {
+ vdev->tx_len_unaligned += len;
+ dma = false;
+ }
}
if (!dma)
@@ -668,9 +676,9 @@ memcpy:
vdev->out_bytes += len;
err = 0;
err:
- vpdev->hw_ops->iounmap(vpdev, dbuf);
+ vpdev->hw_ops->unmap(vpdev, dbuf);
dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
__func__, ubuf, dbuf, len, vr_idx);
return err;
}
@@ -704,16 +712,17 @@ static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
while (len && iov->i < iov->used) {
struct kvec *kiov = &iov->iov[iov->i];
+ unsigned long daddr = (unsigned long)kiov->iov_base;
partlen = min(kiov->iov_len, len);
if (read)
ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
- (u64)kiov->iov_base,
+ daddr,
kiov->iov_len,
vr_idx);
else
ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
- (u64)kiov->iov_base,
+ daddr,
kiov->iov_len,
vr_idx);
if (ret) {
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 595ac065b401..95ff7c5a1dfb 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
struct resource r;
if (acpi_dev_resource_io(res, &r)) {
+#ifdef CONFIG_HAS_IOPORT_MAP
base = ioport_map(r.start, resource_size(&r));
return AE_OK;
+#else
+ return AE_ERROR;
+#endif
} else if (acpi_dev_resource_memory(res, &r)) {
base = ioremap(r.start, resource_size(&r));
return AE_OK;
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 93be82fc338a..2ec5808ba464 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -616,8 +616,8 @@ irqreturn_t gru_intr_mblade(int irq, void *dev_id)
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
- gru_intr(0, blade);
- gru_intr(1, blade);
+ gru_intr(0, blade);
+ gru_intr(1, blade);
}
return IRQ_HANDLED;
}
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 0441abe87880..9e443df44b3b 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/numa.h>
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
@@ -61,7 +62,7 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
-static int xpc_mq_node = -1;
+static int xpc_mq_node = NUMA_NO_NODE;
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index f8240b87df22..ad807d5a3141 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -34,7 +34,6 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.5.0.0-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -73,15 +72,26 @@ enum vmwballoon_capabilities {
VMW_BALLOON_BATCHED_CMDS = (1 << 2),
VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
+ VMW_BALLOON_64_BIT_TARGET = (1 << 5)
};
-#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
+#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
| VMW_BALLOON_BATCHED_CMDS \
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
+/*
+ * 64-bit targets are only supported in 64-bit
+ */
+#ifdef CONFIG_64BIT
+#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
+ | VMW_BALLOON_64_BIT_TARGET)
+#else
+#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
+#endif
+
enum vmballoon_page_size_type {
VMW_BALLOON_4K_PAGE,
VMW_BALLOON_2M_PAGE,
@@ -557,6 +567,36 @@ vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
}
/**
+ * vmballoon_mark_page_offline() - mark a page as offline
+ * @page: pointer for the page.
+ * @page_size: the size of the page.
+ */
+static void
+vmballoon_mark_page_offline(struct page *page,
+ enum vmballoon_page_size_type page_size)
+{
+ int i;
+
+ for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
+ __SetPageOffline(page + i);
+}
+
+/**
+ * vmballoon_mark_page_online() - mark a page as online
+ * @page: pointer for the page.
+ * @page_size: the size of the page.
+ */
+static void
+vmballoon_mark_page_online(struct page *page,
+ enum vmballoon_page_size_type page_size)
+{
+ int i;
+
+ for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
+ __ClearPageOffline(page + i);
+}
+
+/**
* vmballoon_send_get_target() - Retrieve desired balloon size from the host.
*
* @b: pointer to the balloon.
@@ -572,8 +612,9 @@ static int vmballoon_send_get_target(struct vmballoon *b)
limit = totalram_pages();
- /* Ensure limit fits in 32-bits */
- if (limit != (u32)limit)
+ /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
+ if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
+ limit != (u32)limit)
return -EINVAL;
status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
@@ -612,6 +653,7 @@ static int vmballoon_alloc_page_list(struct vmballoon *b,
ctl->page_size);
if (page) {
+ vmballoon_mark_page_offline(page, ctl->page_size);
/* Success. Add the page to the list and continue. */
list_add(&page->lru, &ctl->pages);
continue;
@@ -850,6 +892,7 @@ static void vmballoon_release_page_list(struct list_head *page_list,
list_for_each_entry_safe(page, tmp, page_list, lru) {
list_del(&page->lru);
+ vmballoon_mark_page_online(page, page_size);
__free_pages(page, vmballoon_page_order(page_size));
}
@@ -1287,7 +1330,7 @@ static void vmballoon_reset(struct vmballoon *b)
vmballoon_pop(b);
if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
- return;
+ goto unlock;
if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
if (vmballoon_init_batching(b)) {
@@ -1298,7 +1341,7 @@ static void vmballoon_reset(struct vmballoon *b)
* The guest will retry in one second.
*/
vmballoon_send_start(b, 0);
- return;
+ goto unlock;
}
} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
vmballoon_deinit_batching(b);
@@ -1314,6 +1357,7 @@ static void vmballoon_reset(struct vmballoon *b)
if (vmballoon_send_guest_id(b))
pr_err("failed to send guest ID to the host\n");
+unlock:
up_write(&b->conf_sem);
}
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index b3fa738ae005..7824c7494916 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -330,7 +330,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
/*
* Register the notification bitmap with the host.
*/
-bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
+bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
{
int result;
struct vmci_notify_bm_set_msg bitmap_set_msg;
@@ -340,11 +340,14 @@ bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
VMCI_DG_HEADERSIZE;
- bitmap_set_msg.bitmap_ppn = bitmap_ppn;
+ if (vmci_use_ppn64())
+ bitmap_set_msg.bitmap_ppn64 = bitmap_ppn;
+ else
+ bitmap_set_msg.bitmap_ppn32 = (u32) bitmap_ppn;
result = vmci_send_datagram(&bitmap_set_msg.hdr);
if (result != VMCI_SUCCESS) {
- pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
+ pr_devel("Failed to register (PPN=%llu) as notification bitmap (error=%d)\n",
bitmap_ppn, result);
return false;
}
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
index e4c0b17486a5..410a21f8436f 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.h
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -45,7 +45,7 @@ struct dbell_cpt_state {
int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
-bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
+bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn);
void vmci_dbell_scan_notification_entries(u8 *bitmap);
#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index cee9e977d318..2fbf4a0ac657 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -54,4 +54,6 @@ void vmci_guest_exit(void);
bool vmci_guest_code_active(void);
u32 vmci_get_vm_context_id(void);
+bool vmci_use_ppn64(void);
+
#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index dad5abee656e..928708128177 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -64,6 +64,13 @@ struct vmci_guest_device {
dma_addr_t notification_base;
};
+static bool use_ppn64;
+
+bool vmci_use_ppn64(void)
+{
+ return use_ppn64;
+}
+
/* vmci_dev singleton device and supporting data*/
struct pci_dev *vmci_pdev;
static struct vmci_guest_device *vmci_dev_g;
@@ -432,6 +439,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
struct vmci_guest_device *vmci_dev;
void __iomem *iobase;
unsigned int capabilities;
+ unsigned int caps_in_use;
unsigned long cmd;
int vmci_err;
int error;
@@ -496,6 +504,23 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
error = -ENXIO;
goto err_free_data_buffer;
}
+ caps_in_use = VMCI_CAPS_DATAGRAM;
+
+ /*
+ * Use 64-bit PPNs if the device supports.
+ *
+ * There is no check for the return value of dma_set_mask_and_coherent
+ * since this driver can handle the default mask values if
+ * dma_set_mask_and_coherent fails.
+ */
+ if (capabilities & VMCI_CAPS_PPN64) {
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ use_ppn64 = true;
+ caps_in_use |= VMCI_CAPS_PPN64;
+ } else {
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ use_ppn64 = false;
+ }
/*
* If the hardware supports notifications, we will use that as
@@ -510,14 +535,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
"Unable to allocate notification bitmap\n");
} else {
memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
- capabilities |= VMCI_CAPS_NOTIFICATIONS;
+ caps_in_use |= VMCI_CAPS_NOTIFICATIONS;
}
}
- dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
+ dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use);
/* Let the host know which capabilities we intend to use. */
- iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
+ iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR);
/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
@@ -529,13 +554,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* Register notification bitmap with device if that capability is
* used.
*/
- if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) {
unsigned long bitmap_ppn =
vmci_dev->notification_base >> PAGE_SHIFT;
if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
dev_warn(&pdev->dev,
- "VMCI device unable to register notification bitmap with PPN 0x%x\n",
- (u32) bitmap_ppn);
+ "VMCI device unable to register notification bitmap with PPN 0x%lx\n",
+ bitmap_ppn);
error = -ENXIO;
goto err_remove_vmci_dev_g;
}
@@ -611,7 +636,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
/* Enable specific interrupt bits. */
cmd = VMCI_IMR_DATAGRAM;
- if (capabilities & VMCI_CAPS_NOTIFICATIONS)
+ if (caps_in_use & VMCI_CAPS_NOTIFICATIONS)
cmd |= VMCI_IMR_NOTIFICATION;
iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 264f4ed8eef2..f5f1aac9d163 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -435,8 +435,8 @@ static int qp_alloc_ppn_set(void *prod_q,
void *cons_q,
u64 num_consume_pages, struct ppn_set *ppn_set)
{
- u32 *produce_ppns;
- u32 *consume_ppns;
+ u64 *produce_ppns;
+ u64 *consume_ppns;
struct vmci_queue *produce_q = prod_q;
struct vmci_queue *consume_q = cons_q;
u64 i;
@@ -462,31 +462,13 @@ static int qp_alloc_ppn_set(void *prod_q,
return VMCI_ERROR_NO_MEM;
}
- for (i = 0; i < num_produce_pages; i++) {
- unsigned long pfn;
-
+ for (i = 0; i < num_produce_pages; i++)
produce_ppns[i] =
produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
- pfn = produce_ppns[i];
-
- /* Fail allocation if PFN isn't supported by hypervisor. */
- if (sizeof(pfn) > sizeof(*produce_ppns)
- && pfn != produce_ppns[i])
- goto ppn_error;
- }
-
- for (i = 0; i < num_consume_pages; i++) {
- unsigned long pfn;
+ for (i = 0; i < num_consume_pages; i++)
consume_ppns[i] =
consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
- pfn = consume_ppns[i];
-
- /* Fail allocation if PFN isn't supported by hypervisor. */
- if (sizeof(pfn) > sizeof(*consume_ppns)
- && pfn != consume_ppns[i])
- goto ppn_error;
- }
ppn_set->num_produce_pages = num_produce_pages;
ppn_set->num_consume_pages = num_consume_pages;
@@ -494,11 +476,6 @@ static int qp_alloc_ppn_set(void *prod_q,
ppn_set->consume_ppns = consume_ppns;
ppn_set->initialized = true;
return VMCI_SUCCESS;
-
- ppn_error:
- kfree(produce_ppns);
- kfree(consume_ppns);
- return VMCI_ERROR_INVALID_ARGS;
}
/*
@@ -520,12 +497,28 @@ static void qp_free_ppn_set(struct ppn_set *ppn_set)
*/
static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
{
- memcpy(call_buf, ppn_set->produce_ppns,
- ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
- memcpy(call_buf +
- ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
- ppn_set->consume_ppns,
- ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
+ if (vmci_use_ppn64()) {
+ memcpy(call_buf, ppn_set->produce_ppns,
+ ppn_set->num_produce_pages *
+ sizeof(*ppn_set->produce_ppns));
+ memcpy(call_buf +
+ ppn_set->num_produce_pages *
+ sizeof(*ppn_set->produce_ppns),
+ ppn_set->consume_ppns,
+ ppn_set->num_consume_pages *
+ sizeof(*ppn_set->consume_ppns));
+ } else {
+ int i;
+ u32 *ppns = (u32 *) call_buf;
+
+ for (i = 0; i < ppn_set->num_produce_pages; i++)
+ ppns[i] = (u32) ppn_set->produce_ppns[i];
+
+ ppns = &ppns[ppn_set->num_produce_pages];
+
+ for (i = 0; i < ppn_set->num_consume_pages; i++)
+ ppns[i] = (u32) ppn_set->consume_ppns[i];
+ }
return VMCI_SUCCESS;
}
@@ -951,13 +944,15 @@ static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
{
struct vmci_qp_alloc_msg *alloc_msg;
size_t msg_size;
+ size_t ppn_size;
int result;
if (!entry || entry->num_ppns <= 2)
return VMCI_ERROR_INVALID_ARGS;
+ ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
msg_size = sizeof(*alloc_msg) +
- (size_t) entry->num_ppns * sizeof(u32);
+ (size_t) entry->num_ppns * ppn_size;
alloc_msg = kmalloc(msg_size, GFP_KERNEL);
if (!alloc_msg)
return VMCI_ERROR_NO_MEM;
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index ed177f04ef24..46c0b6c7bafb 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -28,8 +28,8 @@ typedef int (*vmci_event_release_cb) (void *client_data);
struct ppn_set {
u64 num_produce_pages;
u64 num_consume_pages;
- u32 *produce_ppns;
- u32 *consume_ppns;
+ u64 *produce_ppns;
+ u64 *consume_ppns;
bool initialized;
};
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index abba078f7f49..95ffe008ebdf 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,7 +8,7 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- slot-gpio.o
+ slot-gpio.o regulator.o
mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
obj-$(CONFIG_PWRSEQ_SD8787) += pwrseq_sd8787.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index aef1185f383d..2c71a434c915 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1124,7 +1124,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg;
+ unsigned int from, nr;
int err = 0, type = MMC_BLK_DISCARD;
blk_status_t status = BLK_STS_OK;
@@ -1136,24 +1136,18 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_discard(card))
- arg = MMC_DISCARD_ARG;
- else if (mmc_can_trim(card))
- arg = MMC_TRIM_ARG;
- else
- arg = MMC_ERASE_ARG;
do {
err = 0;
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
- arg == MMC_TRIM_ARG ?
+ card->erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
0);
}
if (!err)
- err = mmc_erase(card, from, nr, arg);
+ err = mmc_erase(card, from, nr, card->erase_arg);
} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
if (err)
status = BLK_STS_IOERR;
@@ -2112,7 +2106,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
if (waiting)
wake_up(&mq->wait);
else
- kblockd_schedule_work(&mq->complete_work);
+ queue_work(mq->card->complete_wq, &mq->complete_work);
return;
}
@@ -2380,12 +2374,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%u%s", card->host->index, subname ? subname : "");
- if (mmc_card_mmc(card))
- blk_queue_logical_block_size(md->queue.queue,
- card->ext_csd.data_sector_size);
- else
- blk_queue_logical_block_size(md->queue.queue, 512);
-
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
@@ -2774,8 +2762,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
- NULL, "%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
+ NULL, "%08llx\n");
/* That is two digits * 512 + 1 for newline */
#define EXT_CSD_STR_LEN 1025
@@ -2863,8 +2851,9 @@ static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
if (mmc_card_mmc(card) || mmc_card_sd(card)) {
md->status_dentry =
- debugfs_create_file("status", S_IRUSR, root, card,
- &mmc_dbg_card_status_fops);
+ debugfs_create_file_unsafe("status", 0400, root,
+ card,
+ &mmc_dbg_card_status_fops);
if (!md->status_dentry)
return -EIO;
}
@@ -2924,6 +2913,13 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_fixup_device(card, mmc_blk_fixups);
+ card->complete_wq = alloc_workqueue("mmc_complete",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (unlikely(!card->complete_wq)) {
+ pr_err("Failed to create mmc completion workqueue");
+ return -ENOMEM;
+ }
+
md = mmc_blk_alloc(card);
if (IS_ERR(md))
return PTR_ERR(md);
@@ -2987,6 +2983,7 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL);
+ destroy_workqueue(card->complete_wq);
}
static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5bd58b95d318..6db36dc870b5 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -21,7 +21,6 @@
#include <linux/leds.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
-#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
@@ -52,6 +51,7 @@
/* The max erase timeout, used when host->max_busy_timeout isn't specified */
#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
+#define SD_DISCARD_TIMEOUT_MS (250)
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
@@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
if (!data)
return;
- if (cmd->error || data->error ||
+ if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
@@ -758,33 +758,6 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_set_data_timeout);
-/**
- * mmc_align_data_size - pads a transfer size to a more optimal value
- * @card: the MMC card associated with the data transfer
- * @sz: original transfer size
- *
- * Pads the original data size with a number of extra bytes in
- * order to avoid controller bugs and/or performance hits
- * (e.g. some controllers revert to PIO for certain sizes).
- *
- * Returns the improved size, which might be unmodified.
- *
- * Note that this function is only relevant when issuing a
- * single scatter gather entry.
- */
-unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
-{
- /*
- * FIXME: We don't have a system for the controller to tell
- * the core about its problems yet, so for now we just 32-bit
- * align the size.
- */
- sz = ((sz + 3) / 4) * 4;
-
- return sz;
-}
-EXPORT_SYMBOL(mmc_align_data_size);
-
/*
* Allow claiming an already claimed host if the context is the same or there is
* no context but the task is the same.
@@ -1112,55 +1085,6 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
return mask;
}
-EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
-
-#ifdef CONFIG_OF
-
-/**
- * mmc_of_parse_voltage - return mask of supported voltages
- * @np: The device node need to be parsed.
- * @mask: mask of voltages available for MMC/SD/SDIO
- *
- * Parse the "voltage-ranges" DT property, returning zero if it is not
- * found, negative errno if the voltage-range specification is invalid,
- * or one if the voltage-range is specified and successfully parsed.
- */
-int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
-{
- const u32 *voltage_ranges;
- int num_ranges, i;
-
- voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
- num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
- if (!voltage_ranges) {
- pr_debug("%pOF: voltage-ranges unspecified\n", np);
- return 0;
- }
- if (!num_ranges) {
- pr_err("%pOF: voltage-ranges empty\n", np);
- return -EINVAL;
- }
-
- for (i = 0; i < num_ranges; i++) {
- const int j = i * 2;
- u32 ocr_mask;
-
- ocr_mask = mmc_vddrange_to_ocrmask(
- be32_to_cpu(voltage_ranges[j]),
- be32_to_cpu(voltage_ranges[j + 1]));
- if (!ocr_mask) {
- pr_err("%pOF: voltage-range #%d is invalid\n",
- np, i);
- return -EINVAL;
- }
- *mask |= ocr_mask;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(mmc_of_parse_voltage);
-
-#endif /* CONFIG_OF */
static int mmc_of_get_func_num(struct device_node *node)
{
@@ -1190,246 +1114,6 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host,
return NULL;
}
-#ifdef CONFIG_REGULATOR
-
-/**
- * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
- * @vdd_bit: OCR bit number
- * @min_uV: minimum voltage value (mV)
- * @max_uV: maximum voltage value (mV)
- *
- * This function returns the voltage range according to the provided OCR
- * bit number. If conversion is not possible a negative errno value returned.
- */
-static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
-{
- int tmp;
-
- if (!vdd_bit)
- return -EINVAL;
-
- /*
- * REVISIT mmc_vddrange_to_ocrmask() may have set some
- * bits this regulator doesn't quite support ... don't
- * be too picky, most cards and regulators are OK with
- * a 0.1V range goof (it's a small error percentage).
- */
- tmp = vdd_bit - ilog2(MMC_VDD_165_195);
- if (tmp == 0) {
- *min_uV = 1650 * 1000;
- *max_uV = 1950 * 1000;
- } else {
- *min_uV = 1900 * 1000 + tmp * 100 * 1000;
- *max_uV = *min_uV + 100 * 1000;
- }
-
- return 0;
-}
-
-/**
- * mmc_regulator_get_ocrmask - return mask of supported voltages
- * @supply: regulator to use
- *
- * This returns either a negative errno, or a mask of voltages that
- * can be provided to MMC/SD/SDIO devices using the specified voltage
- * regulator. This would normally be called before registering the
- * MMC host adapter.
- */
-int mmc_regulator_get_ocrmask(struct regulator *supply)
-{
- int result = 0;
- int count;
- int i;
- int vdd_uV;
- int vdd_mV;
-
- count = regulator_count_voltages(supply);
- if (count < 0)
- return count;
-
- for (i = 0; i < count; i++) {
- vdd_uV = regulator_list_voltage(supply, i);
- if (vdd_uV <= 0)
- continue;
-
- vdd_mV = vdd_uV / 1000;
- result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
- }
-
- if (!result) {
- vdd_uV = regulator_get_voltage(supply);
- if (vdd_uV <= 0)
- return vdd_uV;
-
- vdd_mV = vdd_uV / 1000;
- result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
- }
-
- return result;
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
-
-/**
- * mmc_regulator_set_ocr - set regulator to match host->ios voltage
- * @mmc: the host to regulate
- * @supply: regulator to use
- * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
- *
- * Returns zero on success, else negative errno.
- *
- * MMC host drivers may use this to enable or disable a regulator using
- * a particular supply voltage. This would normally be called from the
- * set_ios() method.
- */
-int mmc_regulator_set_ocr(struct mmc_host *mmc,
- struct regulator *supply,
- unsigned short vdd_bit)
-{
- int result = 0;
- int min_uV, max_uV;
-
- if (vdd_bit) {
- mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
-
- result = regulator_set_voltage(supply, min_uV, max_uV);
- if (result == 0 && !mmc->regulator_enabled) {
- result = regulator_enable(supply);
- if (!result)
- mmc->regulator_enabled = true;
- }
- } else if (mmc->regulator_enabled) {
- result = regulator_disable(supply);
- if (result == 0)
- mmc->regulator_enabled = false;
- }
-
- if (result)
- dev_err(mmc_dev(mmc),
- "could not set regulator OCR (%d)\n", result);
- return result;
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
-
-static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
- int min_uV, int target_uV,
- int max_uV)
-{
- /*
- * Check if supported first to avoid errors since we may try several
- * signal levels during power up and don't want to show errors.
- */
- if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
- return -EINVAL;
-
- return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
- max_uV);
-}
-
-/**
- * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
- *
- * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
- * That will match the behavior of old boards where VQMMC and VMMC were supplied
- * by the same supply. The Bus Operating conditions for 3.3V signaling in the
- * SD card spec also define VQMMC in terms of VMMC.
- * If this is not possible we'll try the full 2.7-3.6V of the spec.
- *
- * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
- * requested voltage. This is definitely a good idea for UHS where there's a
- * separate regulator on the card that's trying to make 1.8V and it's best if
- * we match.
- *
- * This function is expected to be used by a controller's
- * start_signal_voltage_switch() function.
- */
-int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct device *dev = mmc_dev(mmc);
- int ret, volt, min_uV, max_uV;
-
- /* If no vqmmc supply then we can't change the voltage */
- if (IS_ERR(mmc->supply.vqmmc))
- return -EINVAL;
-
- switch (ios->signal_voltage) {
- case MMC_SIGNAL_VOLTAGE_120:
- return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- 1100000, 1200000, 1300000);
- case MMC_SIGNAL_VOLTAGE_180:
- return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- 1700000, 1800000, 1950000);
- case MMC_SIGNAL_VOLTAGE_330:
- ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
- if (ret < 0)
- return ret;
-
- dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
- __func__, volt, max_uV);
-
- min_uV = max(volt - 300000, 2700000);
- max_uV = min(max_uV + 200000, 3600000);
-
- /*
- * Due to a limitation in the current implementation of
- * regulator_set_voltage_triplet() which is taking the lowest
- * voltage possible if below the target, search for a suitable
- * voltage in two steps and try to stay close to vmmc
- * with a 0.3V tolerance at first.
- */
- if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- min_uV, volt, max_uV))
- return 0;
-
- return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- 2700000, volt, 3600000);
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
-
-#endif /* CONFIG_REGULATOR */
-
-/**
- * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
- * @mmc: the host to regulate
- *
- * Returns 0 or errno. errno should be handled, it is either a critical error
- * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
- * regulators have been found because they all are optional. If you require
- * certain regulators, you need to check separately in your driver if they got
- * populated after calling this function.
- */
-int mmc_regulator_get_supply(struct mmc_host *mmc)
-{
- struct device *dev = mmc_dev(mmc);
- int ret;
-
- mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
- mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
-
- if (IS_ERR(mmc->supply.vmmc)) {
- if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_dbg(dev, "No vmmc regulator found\n");
- } else {
- ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
- if (ret > 0)
- mmc->ocr_avail = ret;
- else
- dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
- }
-
- if (IS_ERR(mmc->supply.vqmmc)) {
- if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_dbg(dev, "No vqmmc regulator found\n");
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
-
/*
* Mask off any voltages we don't support and select
* the lowest voltage
@@ -1936,6 +1620,12 @@ static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
{
unsigned int erase_timeout;
+ /* for DISCARD none of the below calculation applies.
+ * the busy timeout is 250msec per discard command.
+ */
+ if (arg == SD_DISCARD_ARG)
+ return SD_DISCARD_TIMEOUT_MS;
+
if (card->ssr.erase_timeout) {
/* Erase timeout specified in SD Status Register (SSR) */
erase_timeout = card->ssr.erase_timeout * qty +
@@ -2164,7 +1854,7 @@ static unsigned int mmc_align_erase_size(struct mmc_card *card,
* @card: card to erase
* @from: first sector to erase
* @nr: number of sectors to erase
- * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ * @arg: erase command argument
*
* Caller must claim host before calling this function.
*/
@@ -2181,14 +1871,14 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
if (!card->erase_size)
return -EOPNOTSUPP;
- if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
+ if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
return -EOPNOTSUPP;
- if ((arg & MMC_SECURE_ARGS) &&
+ if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
- if ((arg & MMC_TRIM_ARGS) &&
+ if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
@@ -2381,9 +2071,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
- if (max_discard && mmc_can_trim(card)) {
+ if (mmc_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
- if (max_trim < max_discard)
+ if (max_trim < max_discard || max_discard == 0)
max_discard = max_trim;
} else if (max_discard < card->erase_size) {
max_discard = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 8fb6bc37f808..b5083b13d594 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -59,6 +59,7 @@ void mmc_power_up(struct mmc_host *host, u32 ocr);
void mmc_power_off(struct mmc_host *host);
void mmc_power_cycle(struct mmc_host *host, u32 ocr);
void mmc_set_initial_state(struct mmc_host *host);
+u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
static inline void mmc_delay(unsigned int ms)
{
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f57f5de54206..3a4402a79904 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -194,7 +194,7 @@ int mmc_of_parse(struct mmc_host *host)
switch (bus_width) {
case 8:
host->caps |= MMC_CAP_8_BIT_DATA;
- /* Hosts capable of 8-bit transfers can also do 4 bits */
+ /* fall through - Hosts capable of 8-bit can also do 4 bits */
case 4:
host->caps |= MMC_CAP_4_BIT_DATA;
break;
@@ -234,7 +234,7 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "broken-cd"))
host->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+ ret = mmc_gpiod_request_cd(host, "cd", 0, false,
cd_debounce_delay_ms * 1000,
&cd_gpio_invert);
if (!ret)
@@ -260,7 +260,7 @@ int mmc_of_parse(struct mmc_host *host)
/* Parse Write Protection */
ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
- ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
@@ -349,6 +349,50 @@ int mmc_of_parse(struct mmc_host *host)
EXPORT_SYMBOL(mmc_of_parse);
/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * Parse the "voltage-ranges" DT property, returning zero if it is not
+ * found, negative errno if the voltage-range specification is invalid,
+ * or one if the voltage-range is specified and successfully parsed.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+ const u32 *voltage_ranges;
+ int num_ranges, i;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!voltage_ranges) {
+ pr_debug("%pOF: voltage-ranges unspecified\n", np);
+ return 0;
+ }
+ if (!num_ranges) {
+ pr_err("%pOF: voltage-ranges empty\n", np);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 ocr_mask;
+
+ ocr_mask = mmc_vddrange_to_ocrmask(
+ be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!ocr_mask) {
+ pr_err("%pOF: voltage-range #%d is invalid\n",
+ np, i);
+ return -EINVAL;
+ }
+ *mask |= ocr_mask;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
* @dev: pointer to host device model structure
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index da892a599524..3e786ba204c3 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1594,6 +1594,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (oldcard) {
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
err = -ENOENT;
goto err;
}
@@ -1743,6 +1745,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
}
+ /* set erase_arg */
+ if (mmc_can_discard(card))
+ card->erase_arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
+ card->erase_arg = MMC_TRIM_ARG;
+ else
+ card->erase_arg = MMC_ERASE_ARG;
+
/*
* Select timing interface
*/
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 9054329fe903..c5208fb312ae 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -562,7 +562,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
- err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto out;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 35cc138b096d..15a45ec6518d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
+ unsigned block_size = 512;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ if (mmc_card_mmc(card))
+ block_size = card->ext_csd.data_sector_size;
+
+ blk_queue_logical_block_size(mq->queue, block_size);
+ blk_queue_max_segment_size(mq->queue,
+ round_down(host->max_seg_size, block_size));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
new file mode 100644
index 000000000000..b6febbcf8978
--- /dev/null
+++ b/drivers/mmc/core/regulator.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions for MMC regulators.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "host.h"
+
+#ifdef CONFIG_REGULATOR
+
+/**
+ * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
+ * @vdd_bit: OCR bit number
+ * @min_uV: minimum voltage value (mV)
+ * @max_uV: maximum voltage value (mV)
+ *
+ * This function returns the voltage range according to the provided OCR
+ * bit number. If conversion is not possible a negative errno value returned.
+ */
+static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
+{
+ int tmp;
+
+ if (!vdd_bit)
+ return -EINVAL;
+
+ /*
+ * REVISIT mmc_vddrange_to_ocrmask() may have set some
+ * bits this regulator doesn't quite support ... don't
+ * be too picky, most cards and regulators are OK with
+ * a 0.1V range goof (it's a small error percentage).
+ */
+ tmp = vdd_bit - ilog2(MMC_VDD_165_195);
+ if (tmp == 0) {
+ *min_uV = 1650 * 1000;
+ *max_uV = 1950 * 1000;
+ } else {
+ *min_uV = 1900 * 1000 + tmp * 100 * 1000;
+ *max_uV = *min_uV + 100 * 1000;
+ }
+
+ return 0;
+}
+
+/**
+ * mmc_regulator_get_ocrmask - return mask of supported voltages
+ * @supply: regulator to use
+ *
+ * This returns either a negative errno, or a mask of voltages that
+ * can be provided to MMC/SD/SDIO devices using the specified voltage
+ * regulator. This would normally be called before registering the
+ * MMC host adapter.
+ */
+static int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ int result = 0;
+ int count;
+ int i;
+ int vdd_uV;
+ int vdd_mV;
+
+ count = regulator_count_voltages(supply);
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ vdd_uV = regulator_list_voltage(supply, i);
+ if (vdd_uV <= 0)
+ continue;
+
+ vdd_mV = vdd_uV / 1000;
+ result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ if (!result) {
+ vdd_uV = regulator_get_voltage(supply);
+ if (vdd_uV <= 0)
+ return vdd_uV;
+
+ vdd_mV = vdd_uV / 1000;
+ result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ return result;
+}
+
+/**
+ * mmc_regulator_set_ocr - set regulator to match host->ios voltage
+ * @mmc: the host to regulate
+ * @supply: regulator to use
+ * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * MMC host drivers may use this to enable or disable a regulator using
+ * a particular supply voltage. This would normally be called from the
+ * set_ios() method.
+ */
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
+{
+ int result = 0;
+ int min_uV, max_uV;
+
+ if (vdd_bit) {
+ mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
+
+ result = regulator_set_voltage(supply, min_uV, max_uV);
+ if (result == 0 && !mmc->regulator_enabled) {
+ result = regulator_enable(supply);
+ if (!result)
+ mmc->regulator_enabled = true;
+ }
+ } else if (mmc->regulator_enabled) {
+ result = regulator_disable(supply);
+ if (result == 0)
+ mmc->regulator_enabled = false;
+ }
+
+ if (result)
+ dev_err(mmc_dev(mmc),
+ "could not set regulator OCR (%d)\n", result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+
+static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ /*
+ * Check if supported first to avoid errors since we may try several
+ * signal levels during power up and don't want to show errors.
+ */
+ if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
+ return -EINVAL;
+
+ return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
+ max_uV);
+}
+
+/**
+ * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
+ *
+ * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
+ * That will match the behavior of old boards where VQMMC and VMMC were supplied
+ * by the same supply. The Bus Operating conditions for 3.3V signaling in the
+ * SD card spec also define VQMMC in terms of VMMC.
+ * If this is not possible we'll try the full 2.7-3.6V of the spec.
+ *
+ * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
+ * requested voltage. This is definitely a good idea for UHS where there's a
+ * separate regulator on the card that's trying to make 1.8V and it's best if
+ * we match.
+ *
+ * This function is expected to be used by a controller's
+ * start_signal_voltage_switch() function.
+ */
+int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret, volt, min_uV, max_uV;
+
+ /* If no vqmmc supply then we can't change the voltage */
+ if (IS_ERR(mmc->supply.vqmmc))
+ return -EINVAL;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_120:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1100000, 1200000, 1300000);
+ case MMC_SIGNAL_VOLTAGE_180:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1700000, 1800000, 1950000);
+ case MMC_SIGNAL_VOLTAGE_330:
+ ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
+ __func__, volt, max_uV);
+
+ min_uV = max(volt - 300000, 2700000);
+ max_uV = min(max_uV + 200000, 3600000);
+
+ /*
+ * Due to a limitation in the current implementation of
+ * regulator_set_voltage_triplet() which is taking the lowest
+ * voltage possible if below the target, search for a suitable
+ * voltage in two steps and try to stay close to vmmc
+ * with a 0.3V tolerance at first.
+ */
+ if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ min_uV, volt, max_uV))
+ return 0;
+
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 2700000, volt, 3600000);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
+
+#else
+
+static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ return 0;
+}
+
+#endif /* CONFIG_REGULATOR */
+
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
+int mmc_regulator_get_supply(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret;
+
+ mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
+ mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
+
+ if (IS_ERR(mmc->supply.vmmc)) {
+ if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "No vmmc regulator found\n");
+ } else {
+ ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+ if (ret > 0)
+ mmc->ocr_avail = ret;
+ else
+ dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
+ }
+
+ if (IS_ERR(mmc->supply.vqmmc)) {
+ if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "No vqmmc regulator found\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d0d9f90e7cdf..265e1aeeb9d8 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -209,6 +209,11 @@ static int mmc_decode_scr(struct mmc_card *card)
/* Check if Physical Layer Spec v3.0 is supported */
scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
+ if (scr->sda_spec3) {
+ scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1);
+ scr->sda_specx = UNSTUFF_BITS(resp, 38, 4);
+ }
+
if (UNSTUFF_BITS(resp, 55, 1))
card->erased_byte = 0xFF;
else
@@ -226,6 +231,8 @@ static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
__be32 *raw_ssr;
+ u32 resp[4] = {};
+ u8 discard_support;
int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -271,6 +278,14 @@ static int mmc_read_ssr(struct mmc_card *card)
}
}
+ /*
+ * starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set
+ */
+ resp[3] = card->raw_ssr[6];
+ discard_support = UNSTUFF_BITS(resp, 313 - 288, 1);
+ card->erase_arg = (card->scr.sda_specx && discard_support) ?
+ SD_DISCARD_ARG : SD_ERASE_ARG;
+
return 0;
}
@@ -936,8 +951,11 @@ retry:
return err;
if (oldcard) {
- if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0)
+ if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
+ }
card = oldcard;
} else {
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 47056d8d1bac..0bb0b8419016 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -52,36 +52,17 @@ int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
}
EXPORT_SYMBOL_GPL(mmc_app_cmd);
-/**
- * mmc_wait_for_app_cmd - start an application command and wait for
- completion
- * @host: MMC host to start command
- * @card: Card to send MMC_APP_CMD to
- * @cmd: MMC command to start
- * @retries: maximum number of retries
- *
- * Sends a MMC_APP_CMD, checks the card response, sends the command
- * in the parameter and waits for it to complete. Return any error
- * that occurred while the command was executing. Do not attempt to
- * parse the response.
- */
-int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
- struct mmc_command *cmd, int retries)
+static int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
+ struct mmc_command *cmd)
{
struct mmc_request mrq = {};
-
- int i, err;
-
- if (retries < 0)
- retries = MMC_CMD_RETRIES;
-
- err = -EIO;
+ int i, err = -EIO;
/*
* We have to resend MMC_APP_CMD for each attempt so
* we cannot use the retries field in mmc_command.
*/
- for (i = 0;i <= retries;i++) {
+ for (i = 0; i <= MMC_CMD_RETRIES; i++) {
err = mmc_app_cmd(host, card);
if (err) {
/* no point in retrying; no APP commands allowed */
@@ -116,8 +97,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
return err;
}
-EXPORT_SYMBOL(mmc_wait_for_app_cmd);
-
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
struct mmc_command cmd = {};
@@ -136,7 +115,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
return -EINVAL;
}
- return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
+ return mmc_wait_for_app_cmd(card->host, card, &cmd);
}
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
@@ -152,7 +131,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
for (i = 100; i; i--) {
- err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES);
+ err = mmc_wait_for_app_cmd(host, NULL, &cmd);
if (err)
break;
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 0e6c3d51e66d..ffaed5cacc88 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -16,7 +16,6 @@
struct mmc_card;
struct mmc_host;
-struct mmc_command;
int mmc_app_set_bus_width(struct mmc_card *card, int width);
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
@@ -27,8 +26,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp);
int mmc_app_sd_status(struct mmc_card *card, void *ssr);
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
-int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
- struct mmc_command *cmd, int retries);
#endif
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index d8e17ea6126d..6718fc8bb40f 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -617,6 +617,8 @@ try_again:
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
mmc_remove_card(card);
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
}
} else {
@@ -624,6 +626,8 @@ try_again:
if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
mmc_remove_card(card);
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
}
}
@@ -736,8 +740,11 @@ try_again:
int same = (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device);
mmc_remove_card(card);
- if (!same)
+ if (!same) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
+ }
card = oldcard;
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index b6d8203e46eb..62b0f5ecc7f7 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -179,7 +179,6 @@ static int sdio_bus_remove(struct device *dev)
{
struct sdio_driver *drv = to_sdio_driver(dev->driver);
struct sdio_func *func = dev_to_sdio_func(dev);
- int ret = 0;
/* Make sure card is powered before invoking ->remove() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
@@ -205,7 +204,7 @@ static int sdio_bus_remove(struct device *dev)
dev_pm_domain_detach(dev, false);
- return ret;
+ return 0;
}
static const struct dev_pm_ops sdio_bus_pm_ops = {
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index d40744bbafa9..3f67fbbe0d75 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -10,6 +10,7 @@
*/
#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
@@ -203,6 +204,21 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
return min(mval, 512u); /* maximum size for byte mode */
}
+/*
+ * This is legacy code, which needs to be re-worked some day. Basically we need
+ * to take into account the properties of the host, as to enable the SDIO func
+ * driver layer to allocate optimal buffers.
+ */
+static inline unsigned int _sdio_align_size(unsigned int sz)
+{
+ /*
+ * FIXME: We don't have a system for the controller to tell
+ * the core about its problems yet, so for now we just 32-bit
+ * align the size.
+ */
+ return ALIGN(sz, 4);
+}
+
/**
* sdio_align_size - pads a transfer size to a more optimal value
* @func: SDIO function
@@ -230,7 +246,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* wants to increase the size up to a point where it
* might need more than one block.
*/
- sz = mmc_align_data_size(func->card, sz);
+ sz = _sdio_align_size(sz);
/*
* If we can still do this with just a byte transfer, then
@@ -252,7 +268,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
*/
blk_sz = ((sz + func->cur_blksize - 1) /
func->cur_blksize) * func->cur_blksize;
- blk_sz = mmc_align_data_size(func->card, blk_sz);
+ blk_sz = _sdio_align_size(blk_sz);
/*
* This value is only good if it is still just
@@ -265,8 +281,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* We failed to do one request, but at least try to
* pad the remainder properly.
*/
- byte_sz = mmc_align_data_size(func->card,
- sz % func->cur_blksize);
+ byte_sz = _sdio_align_size(sz % func->cur_blksize);
if (byte_sz <= sdio_max_byte_size(func)) {
blk_sz = sz / func->cur_blksize;
return blk_sz * func->cur_blksize + byte_sz;
@@ -276,16 +291,14 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* We need multiple requests, so first check that the
* controller can handle the chunk size;
*/
- chunk_sz = mmc_align_data_size(func->card,
- sdio_max_byte_size(func));
+ chunk_sz = _sdio_align_size(sdio_max_byte_size(func));
if (chunk_sz == sdio_max_byte_size(func)) {
/*
* Fix up the size of the remainder (if any)
*/
byte_sz = orig_sz % chunk_sz;
if (byte_sz) {
- byte_sz = mmc_align_data_size(func->card,
- byte_sz);
+ byte_sz = _sdio_align_size(byte_sz);
}
return (orig_sz / chunk_sz) * chunk_sz + byte_sz;
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index 96945cafbf0b..1f6d0447ea0f 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -25,7 +25,6 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
int sdio_reset(struct mmc_host *host);
-unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
void sdio_irq_work(struct work_struct *work);
static inline bool sdio_is_io_busy(u32 opcode, u32 arg)
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 319ccd93383d..4afc6b87b465 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -22,7 +22,6 @@
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- bool override_ro_active_level;
bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
@@ -71,10 +70,6 @@ int mmc_gpio_get_ro(struct mmc_host *host)
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- if (ctx->override_ro_active_level)
- return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^
- !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
-
return gpiod_get_value_cansleep(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
@@ -225,7 +220,6 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
* @host: mmc host
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
- * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
* @gpio_invert: will return whether the GPIO line is inverted or not,
* set to NULL to ignore
@@ -233,7 +227,7 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx, bool override_active_level,
+ unsigned int idx,
unsigned int debounce, bool *gpio_invert)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -253,7 +247,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
if (gpio_invert)
*gpio_invert = !gpiod_is_active_low(desc);
- ctx->override_ro_active_level = override_active_level;
ctx->ro_gpio = desc;
return 0;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e26b8145efb3..28fcd8f580a1 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -116,7 +116,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
- depends on MMC_SDHCI && ACPI
+ depends on MMC_SDHCI && ACPI && PCI
select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
@@ -224,6 +224,7 @@ config MMC_SDHCI_ESDHC_IMX
depends on ARCH_MXC
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Freescale eSDHC/uSDHC controller support
found on i.MX25, i.MX35 i.MX5x and i.MX6x.
@@ -250,6 +251,7 @@ config MMC_SDHCI_TEGRA
depends on ARCH_TEGRA
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Tegra SD/MMC controller. If you have a Tegra
platform with SD or MMC devices, say Y or M here.
@@ -978,7 +980,7 @@ config MMC_SDHCI_OMAP
tristate "TI SDHCI Controller Support"
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
- select TI_SOC_THERMAL
+ imply TI_SOC_THERMAL
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's DRA7 SOCs. The controller supports
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 47189f9ed4e2..735aa5871358 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1410,6 +1410,9 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ slot->sdc_reg |= ATMCI_SDCBUS_8BIT;
+ break;
}
if (ios->clock) {
@@ -2275,8 +2278,11 @@ static int atmci_init_slot(struct atmel_mci *host,
* use only one bit for data to prevent fifo underruns and overruns
* which will corrupt data.
*/
- if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
+ if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) {
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ if (slot_data->bus_width >= 8)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
+ }
if (atmci_get_version(host) < 0x200) {
mmc->max_segs = 256;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 50293529d6de..7e0d3a49c06d 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -148,7 +148,6 @@ struct bcm2835_host {
void __iomem *ioaddr;
u32 phys_addr;
- struct mmc_host *mmc;
struct platform_device *pdev;
int clock; /* Current clock speed */
@@ -618,7 +617,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
"failed to terminate DMA (%d)\n", err);
}
- mmc_request_done(host->mmc, mrq);
+ mmc_request_done(mmc_from_priv(host), mrq);
}
static
@@ -837,7 +836,7 @@ static void bcm2835_timeout(struct work_struct *work)
dev_err(dev, "timeout waiting for hardware interrupt.\n");
bcm2835_dumpregs(host);
- bcm2835_reset(host->mmc);
+ bcm2835_reset(mmc_from_priv(host));
if (host->data) {
host->data->error = -ETIMEDOUT;
@@ -1100,6 +1099,7 @@ static void bcm2835_dma_complete_work(struct work_struct *work)
static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
int div;
/* The SDCDIV register has 11 bits, and holds (div - 2). But
@@ -1143,18 +1143,18 @@ static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
div = SDCDIV_MAX_CDIV;
clock = host->max_clk / (div + 2);
- host->mmc->actual_clock = clock;
+ mmc->actual_clock = clock;
/* Calibrate some delays */
host->ns_per_fifo_word = (1000000000 / clock) *
- ((host->mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
+ ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
host->cdiv = div;
writel(host->cdiv, host->ioaddr + SDCDIV);
/* Set the timeout to 500ms */
- writel(host->mmc->actual_clock / 2, host->ioaddr + SDTOUT);
+ writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT);
}
static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1264,7 +1264,7 @@ static const struct mmc_host_ops bcm2835_ops = {
static int bcm2835_add_host(struct bcm2835_host *host)
{
- struct mmc_host *mmc = host->mmc;
+ struct mmc_host *mmc = mmc_from_priv(host);
struct device *dev = &host->pdev->dev;
char pio_limit_string[20];
int ret;
@@ -1286,7 +1286,7 @@ static int bcm2835_add_host(struct bcm2835_host *host)
spin_lock_init(&host->lock);
mutex_init(&host->mutex);
- if (IS_ERR_OR_NULL(host->dma_chan_rxtx)) {
+ if (!host->dma_chan_rxtx) {
dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n");
host->use_dma = false;
} else {
@@ -1370,7 +1370,6 @@ static int bcm2835_probe(struct platform_device *pdev)
mmc->ops = &bcm2835_ops;
host = mmc_priv(mmc);
- host->mmc = mmc;
host->pdev = pdev;
spin_lock_init(&host->lock);
@@ -1431,6 +1430,8 @@ static int bcm2835_probe(struct platform_device *pdev)
err:
dev_dbg(dev, "%s -> err %d\n", __func__, ret);
+ if (host->dma_chan_rxtx)
+ dma_release_channel(host->dma_chan_rxtx);
mmc_free_host(mmc);
return ret;
@@ -1439,8 +1440,9 @@ err:
static int bcm2835_remove(struct platform_device *pdev)
{
struct bcm2835_host *host = platform_get_drvdata(pdev);
+ struct mmc_host *mmc = mmc_from_priv(host);
- mmc_remove_host(host->mmc);
+ mmc_remove_host(mmc);
writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
@@ -1452,8 +1454,7 @@ static int bcm2835_remove(struct platform_device *pdev)
if (host->dma_chan_rxtx)
dma_release_channel(host->dma_chan_rxtx);
- mmc_free_host(host->mmc);
- platform_set_drvdata(pdev, NULL);
+ mmc_free_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 1087b4c79cd6..4c477dcd2ada 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -566,30 +566,32 @@ static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
cb710_mmc_select_clock_divider(mmc, ios->clock);
- if (ios->power_mode != reader->last_power_mode)
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- err = cb710_mmc_powerup(slot);
- if (err) {
- dev_warn(cb710_slot_dev(slot),
- "powerup failed (%d)- retrying\n", err);
- cb710_mmc_powerdown(slot);
- udelay(1);
+ if (ios->power_mode != reader->last_power_mode) {
+ switch (ios->power_mode) {
+ case MMC_POWER_ON:
err = cb710_mmc_powerup(slot);
- if (err)
+ if (err) {
dev_warn(cb710_slot_dev(slot),
- "powerup retry failed (%d) - expect errors\n",
+ "powerup failed (%d)- retrying\n", err);
+ cb710_mmc_powerdown(slot);
+ udelay(1);
+ err = cb710_mmc_powerup(slot);
+ if (err)
+ dev_warn(cb710_slot_dev(slot),
+ "powerup retry failed (%d) - expect errors\n",
err);
+ }
+ reader->last_power_mode = MMC_POWER_ON;
+ break;
+ case MMC_POWER_OFF:
+ cb710_mmc_powerdown(slot);
+ reader->last_power_mode = MMC_POWER_OFF;
+ break;
+ case MMC_POWER_UP:
+ default:
+ /* ignore */
+ break;
}
- reader->last_power_mode = MMC_POWER_ON;
- break;
- case MMC_POWER_OFF:
- cb710_mmc_powerdown(slot);
- reader->last_power_mode = MMC_POWER_OFF;
- break;
- case MMC_POWER_UP:
- default:
- /* ignore */;
}
cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1);
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 159270e947cf..a8af682a9182 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
- (cq_host->num_slots - 1);
+ cq_host->mmc->cqe_qdepth;
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size,
&cq_host->desc_dma_base,
GFP_KERNEL);
+ if (!cq_host->desc_base)
+ return -ENOMEM;
+
cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
cq_host->data_size,
&cq_host->trans_desc_dma_base,
GFP_KERNEL);
- if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ if (!cq_host->trans_desc_base) {
+ dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+ cq_host->desc_base = NULL;
+ cq_host->desc_dma_base = 0;
return -ENOMEM;
+ }
pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 9e68c3645e22..49e0daf2ef5e 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1193,7 +1193,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
else if (ret)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index ed8f2254b66a..aa38b1a8017e 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -1,11 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Mellanox Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/bitfield.h>
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 33215d66afa2..63303022669c 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -21,7 +21,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -36,7 +35,6 @@
#include <asm/cacheflush.h>
#include <asm/mach-jz4740/dma.h>
-#include <asm/mach-jz4740/jz4740_mmc.h>
#define JZ_REG_MMC_STRPCL 0x00
#define JZ_REG_MMC_STATUS 0x04
@@ -148,9 +146,7 @@ enum jz4780_cookie {
struct jz4740_mmc_host {
struct mmc_host *mmc;
struct platform_device *pdev;
- struct jz4740_mmc_platform_data *pdata;
struct clk *clk;
- struct gpio_desc *power;
enum jz4740_mmc_version version;
@@ -743,6 +739,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
jz_mmc_prepare_data_transfer(host);
+ /* fall through */
case JZ4740_MMC_STATE_TRANSFER_DATA:
if (host->use_dma) {
@@ -777,6 +774,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
}
jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
+ /* fall through */
case JZ4740_MMC_STATE_SEND_STOP:
if (!req->stop)
@@ -894,16 +892,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_UP:
jz4740_mmc_reset(host);
- if (host->power)
- gpiod_set_value(host->power, 1);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
host->cmdat |= JZ_MMC_CMDAT_INIT;
clk_prepare_enable(host->clk);
break;
case MMC_POWER_ON:
break;
default:
- if (host->power)
- gpiod_set_value(host->power, 0);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
clk_disable_unprepare(host->clk);
break;
}
@@ -936,38 +934,6 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
};
-static int jz4740_mmc_request_gpios(struct jz4740_mmc_host *host,
- struct mmc_host *mmc,
- struct platform_device *pdev)
-{
- struct jz4740_mmc_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- if (!pdata)
- return 0;
-
- if (!pdata->card_detect_active_low)
- mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
- if (!pdata->read_only_active_low)
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
- /*
- * Get optional card detect and write protect GPIOs,
- * only back out on probe deferral.
- */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- host->power = devm_gpiod_get_optional(&pdev->dev, "power",
- GPIOD_OUT_HIGH);
- return PTR_ERR_OR_ZERO(host->power);
-}
-
static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
@@ -982,9 +948,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
struct mmc_host *mmc;
struct jz4740_mmc_host *host;
const struct of_device_id *match;
- struct jz4740_mmc_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
if (!mmc) {
@@ -993,29 +956,25 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
}
host = mmc_priv(mmc);
- host->pdata = pdata;
match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
if (match) {
host->version = (enum jz4740_mmc_version)match->data;
- ret = mmc_of_parse(mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
- goto err_free_host;
- }
} else {
/* JZ4740 should be the only one using legacy probe */
host->version = JZ_MMC_JZ4740;
- mmc->caps |= MMC_CAP_SDIO_IRQ;
- if (!(pdata && pdata->data_1bit))
- mmc->caps |= MMC_CAP_4_BIT_DATA;
- ret = jz4740_mmc_request_gpios(host, mmc, pdev);
- if (ret)
- goto err_free_host;
}
+ ret = mmc_of_parse(mmc);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "could not parse device properties: %d\n", ret);
+ goto err_free_host;
+ }
+
+ mmc_regulator_get_supply(mmc);
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = host->irq;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c2690c1a50ff..2eba507790e4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -179,6 +179,8 @@ struct meson_host {
struct sd_emmc_desc *descs;
dma_addr_t descs_dma_addr;
+ int irq;
+
bool vqmmc_enabled;
};
@@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct meson_host *host = mmc_priv(mmc);
+ int adj = 0;
+
+ /* enable signal resampling w/o delay */
+ adj = ADJUST_ADJ_EN;
+ writel(adj, host->regs + host->data->adjust);
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
}
@@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ /* disable signal resampling */
+ writel(0, host->regs + host->data->adjust);
+
/* Reset rx phase */
clk_set_phase(host->rx_clk, 0);
@@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc)
static void meson_mmc_cfg_init(struct meson_host *host)
{
- u32 cfg = 0, adj = 0;
+ u32 cfg = 0;
cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
@@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host)
cfg |= CFG_ERR_ABORT;
writel(cfg, host->regs + SD_EMMC_CFG);
-
- /* enable signal resampling w/o delay */
- adj = ADJUST_ADJ_EN;
- writel(adj, host->regs + host->data->adjust);
}
static int meson_mmc_card_busy(struct mmc_host *mmc)
@@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
struct resource *res;
struct meson_host *host;
struct mmc_host *mmc;
- int ret, irq;
+ int ret;
mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
if (!mmc)
@@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
goto free_host;
}
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto free_host;
@@ -1331,9 +1337,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
host->regs + SD_EMMC_IRQ_EN);
- ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
- meson_mmc_irq_thread, IRQF_SHARED,
- NULL, host);
+ ret = request_threaded_irq(host->irq, meson_mmc_irq,
+ meson_mmc_irq_thread, IRQF_SHARED,
+ dev_name(&pdev->dev), host);
if (ret)
goto err_init_clk;
@@ -1351,7 +1357,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
if (host->bounce_buf == NULL) {
dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
ret = -ENOMEM;
- goto err_init_clk;
+ goto err_free_irq;
}
host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
@@ -1370,6 +1376,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
err_bounce_buf:
dma_free_coherent(host->dev, host->bounce_buf_size,
host->bounce_buf, host->bounce_dma_addr);
+err_free_irq:
+ free_irq(host->irq, host);
err_init_clk:
clk_disable_unprepare(host->mmc_clk);
err_core_clk:
@@ -1387,6 +1395,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
/* disable interrupts */
writel(0, host->regs + SD_EMMC_IRQ_EN);
+ free_irq(host->irq, host);
dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
host->descs, host->descs_dma_addr);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 10ba46b728e8..1b1498805972 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1450,9 +1450,10 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
+ mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
- status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL);
+ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e352f5ad5801..387ff14587b8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1127,6 +1127,12 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
writel(c, base + MMCICOMMAND);
}
+static void mmci_stop_command(struct mmci_host *host)
+{
+ host->stop_abort.error = 0;
+ mmci_start_command(host, &host->stop_abort, 0);
+}
+
static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
@@ -1196,10 +1202,16 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
/* The error clause is handled above, success! */
data->bytes_xfered = data->blksz * data->blocks;
- if (!data->stop || (host->mrq->sbc && !data->error))
+ if (!data->stop) {
+ if (host->variant->cmdreg_stop && data->error)
+ mmci_stop_command(host);
+ else
+ mmci_request_end(host, data->mrq);
+ } else if (host->mrq->sbc && !data->error) {
mmci_request_end(host, data->mrq);
- else
+ } else {
mmci_start_command(host, data->stop, 0);
+ }
}
}
@@ -1298,6 +1310,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
mmci_dma_error(host);
mmci_stop_data(host);
+ if (host->variant->cmdreg_stop && cmd->error) {
+ mmci_stop_command(host);
+ return;
+ }
}
mmci_request_end(host, host->mrq);
} else if (sbc) {
@@ -1956,6 +1972,11 @@ static int mmci_probe(struct amba_device *dev,
mmc->max_busy_timeout = 0;
}
+ /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
+ host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
+ host->stop_abort.arg = 0;
+ host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
mmc->ops = &mmci_ops;
/* We support these PM capabilities. */
@@ -2011,7 +2032,7 @@ static int mmci_probe(struct amba_device *dev,
if (ret == -EPROBE_DEFER)
goto clk_disable;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
if (ret == -EPROBE_DEFER)
goto clk_disable;
}
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 24229097d05c..14df81054438 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -377,6 +377,7 @@ struct mmci_host {
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
+ struct mmc_command stop_abort;
struct mmc_data *data;
struct mmc_host *mmc;
struct clk *clk;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8afeaf81ae66..833ef0590af8 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (timing == MMC_TIMING_MMC_HS400 &&
host->dev_comp->hs400_tune)
- sdr_set_field(host->base + PAD_CMD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4d17032d15ee..d54612257b06 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,14 +31,12 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include <asm/dma.h>
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index add1e70195ea..4f06fb03c0a2 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -25,7 +25,6 @@
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -39,7 +38,6 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index b294b221f225..8a274b91804e 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -61,9 +61,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
struct device *dev = &spi->dev;
struct device_node *np = dev->of_node;
struct of_mmc_spi *oms;
- const __be32 *voltage_ranges;
- int num_ranges;
- int i;
if (dev->platform_data || !np)
return dev->platform_data;
@@ -72,25 +69,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
if (!oms)
return NULL;
- voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
- num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
- if (!voltage_ranges || !num_ranges) {
- dev_err(dev, "OF: voltage-ranges unspecified\n");
+ if (mmc_of_parse_voltage(np, &oms->pdata.ocr_mask) <= 0)
goto err_ocr;
- }
-
- for (i = 0; i < num_ranges; i++) {
- const int j = i * 2;
- u32 mask;
-
- mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
- be32_to_cpu(voltage_ranges[j + 1]));
- if (!mask) {
- dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
- goto err_ocr;
- }
- oms->pdata.ocr_mask |= mask;
- }
oms->detect_irq = irq_of_parse_and_map(np, 0);
if (oms->detect_irq != 0) {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index c60a7625b1fa..b2873a2432b6 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques
reg &= ~(1 << 5);
OMAP_MMC_WRITE(host, SDIO, reg);
/* Set maximum timeout */
- OMAP_MMC_WRITE(host, CTO, 0xff);
+ OMAP_MMC_WRITE(host, CTO, 0xfd);
}
static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 8779bbaa6b69..c907bf502a12 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -743,7 +743,7 @@ static int pxamci_probe(struct platform_device *pdev)
goto out;
}
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_ro\n");
goto out;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index da1e49c45bec..8394a7bb1fc1 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -15,6 +15,7 @@
struct renesas_sdhi_scc {
unsigned long clk_rate; /* clock rate for SDR104 */
u32 tap; /* sampling clock position for SDR104 */
+ u32 tap_hs400; /* sampling clock position for HS400 */
};
struct renesas_sdhi_of_data {
@@ -49,6 +50,7 @@ struct renesas_sdhi {
struct pinctrl_state *pins_default, *pins_uhs;
void __iomem *scc_ctl;
u32 scc_tappos;
+ u32 scc_tappos_hs400;
};
#define host_to_priv(host) \
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 31a351a20dc0..71e13844df6c 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -337,6 +337,10 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
/* Set HS400 mode */
sd_ctrl_write16(host, CTL_SDIF_MODE, 0x0001 |
sd_ctrl_read16(host, CTL_SDIF_MODE));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
+ priv->scc_tappos_hs400);
+
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
@@ -396,6 +400,9 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
/* Reset HS400 mode */
sd_ctrl_write16(host, CTL_SDIF_MODE, ~0x0001 &
sd_ctrl_read16(host, CTL_SDIF_MODE));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
+
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
~(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
@@ -723,6 +730,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
+
+ /* SDR and HS200/400 registers requires HW reset */
+ if (of_data && of_data->scc_offset) {
+ priv->scc_ctl = host->ctl + of_data->scc_offset;
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+ host->hw_reset = renesas_sdhi_hw_reset;
+ }
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -775,12 +789,11 @@ int renesas_sdhi_probe(struct platform_device *pdev,
const struct renesas_sdhi_scc *taps = of_data->taps;
bool hit = false;
- host->mmc->caps |= MMC_CAP_HW_RESET;
-
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
priv->scc_tappos = taps->tap;
+ priv->scc_tappos_hs400 = taps->tap_hs400;
hit = true;
break;
}
@@ -789,12 +802,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!hit)
dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
- priv->scc_ctl = host->ctl + of_data->scc_offset;
host->init_tuning = renesas_sdhi_init_tuning;
host->prepare_tuning = renesas_sdhi_prepare_tuning;
host->select_tuning = renesas_sdhi_select_tuning;
host->check_scc_error = renesas_sdhi_check_scc_error;
- host->hw_reset = renesas_sdhi_hw_reset;
host->prepare_hs400_tuning =
renesas_sdhi_prepare_hs400_tuning;
host->hs400_downgrade = renesas_sdhi_disable_scc;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 92c9b15252da..9dfafa2a90a3 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -81,6 +81,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
.clk_rate = 0,
.tap = 0x00000300,
+ .tap_hs400 = 0x00000704,
},
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 8471160316e0..02cd878e209f 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
+ .max_blk_count = 0xffffffff,
};
/* Definitions for sampling clocks */
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 10f5219b3b40..f31333e831a7 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1530,7 +1530,7 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
return ret;
}
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index bdbd4897c0f7..a6c2bd202b45 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -18,12 +18,10 @@
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 552bddc5096c..1cd10356fc14 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -55,7 +55,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
}
sdhci_get_of_property(pdev);
- mmc_of_parse(host->mmc);
+ res = mmc_of_parse(host->mmc);
+ if (res)
+ goto err;
/*
* Supply the existing CAPS, but clear the UHS modes. This
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d0d319398a54..8dbbc1f62b70 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -25,6 +25,7 @@
#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
+#include "cqhci.h"
#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
#define ESDHC_CTRL_D3CD 0x08
@@ -50,6 +51,7 @@
#define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24)
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
#define ESDHC_MIX_CTRL_HS400_EN (1 << 26)
+#define ESDHC_MIX_CTRL_HS400_ES_EN (1 << 27)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
/* Tuning bits */
@@ -76,6 +78,9 @@
#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1)
#define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1
+#define ESDHC_VEND_SPEC2 0xc8
+#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8)
+
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
@@ -103,6 +108,9 @@
*/
#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28)
+/* the address offset of CQHCI */
+#define ESDHC_CQHCI_ADDR_OFFSET 0x100
+
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
* "11" when the STOP CMD12 is issued on imx53 to abort one
@@ -138,51 +146,71 @@
#define ESDHC_FLAG_HS200 BIT(8)
/* The IP supports HS400 mode */
#define ESDHC_FLAG_HS400 BIT(9)
-
-/* A clock frequency higher than this rate requires strobe dll control */
-#define ESDHC_STROBE_DLL_CLK_FREQ 100000000
+/*
+ * The IP has errata ERR010450
+ * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
+ * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
+ */
+#define ESDHC_FLAG_ERR010450 BIT(10)
+/* The IP supports HS400ES mode */
+#define ESDHC_FLAG_HS400_ES BIT(11)
+/* The IP has Host Controller Interface for Command Queuing */
+#define ESDHC_FLAG_CQHCI BIT(12)
struct esdhc_soc_data {
u32 flags;
};
-static struct esdhc_soc_data esdhc_imx25_data = {
+static const struct esdhc_soc_data esdhc_imx25_data = {
.flags = ESDHC_FLAG_ERR004536,
};
-static struct esdhc_soc_data esdhc_imx35_data = {
+static const struct esdhc_soc_data esdhc_imx35_data = {
.flags = ESDHC_FLAG_ERR004536,
};
-static struct esdhc_soc_data esdhc_imx51_data = {
+static const struct esdhc_soc_data esdhc_imx51_data = {
.flags = 0,
};
-static struct esdhc_soc_data esdhc_imx53_data = {
+static const struct esdhc_soc_data esdhc_imx53_data = {
.flags = ESDHC_FLAG_MULTIBLK_NO_INT,
};
-static struct esdhc_soc_data usdhc_imx6q_data = {
+static const struct esdhc_soc_data usdhc_imx6q_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING,
};
-static struct esdhc_soc_data usdhc_imx6sl_data = {
+static const struct esdhc_soc_data usdhc_imx6sl_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
| ESDHC_FLAG_HS200,
};
-static struct esdhc_soc_data usdhc_imx6sx_data = {
+static const struct esdhc_soc_data usdhc_imx6sx_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
};
-static struct esdhc_soc_data usdhc_imx7d_data = {
+static const struct esdhc_soc_data usdhc_imx6ull_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_ERR010450,
+};
+
+static const struct esdhc_soc_data usdhc_imx7d_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400,
};
+static struct esdhc_soc_data usdhc_imx8qxp_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_CQHCI,
+};
+
struct pltfm_imx_data {
u32 scratchpad;
struct pinctrl *pinctrl;
@@ -227,7 +255,9 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
+ { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, },
{ .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
+ { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -733,6 +763,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
+ unsigned int max_clock;
+
+ max_clock = imx_data->is_ddr ? 45000000 : 150000000;
+
+ clock = min(clock, max_clock);
+ }
+
while (host_clock / (16 * pre_div * ddr_pre_div) > clock &&
pre_div < 256)
pre_div *= 2;
@@ -801,6 +839,20 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
SDHCI_HOST_CONTROL);
}
+static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /*
+ * i.MX uSDHC internally already uses a fixed optimized timing for
+ * DDR50, normally does not require tuning for DDR50 mode.
+ */
+ if (host->timing == MMC_TIMING_UHS_DDR50)
+ return 0;
+
+ return sdhci_execute_tuning(mmc, opcode);
+}
+
static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
{
u32 reg;
@@ -864,6 +916,19 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
return ret;
}
+static void esdhc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 m;
+
+ m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (ios->enhanced_strobe)
+ m |= ESDHC_MIX_CTRL_HS400_ES_EN;
+ else
+ m &= ~ESDHC_MIX_CTRL_HS400_ES_EN;
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+}
+
static int esdhc_change_pinstate(struct sdhci_host *host,
unsigned int uhs)
{
@@ -905,39 +970,35 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
* edge of data_strobe line. Due to the time delay between CLK line and
* data_strobe line, if the delay time is larger than one clock cycle,
* then CLK and data_strobe line will be misaligned, read error shows up.
- * So when the CLK is higher than 100MHz, each clock cycle is short enough,
- * host should configure the delay target.
*/
static void esdhc_set_strobe_dll(struct sdhci_host *host)
{
u32 v;
- if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) {
- /* disable clock before enabling strobe dll */
- writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
- ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
- host->ioaddr + ESDHC_VENDOR_SPEC);
+ /* disable clock before enabling strobe dll */
+ writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
+ ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
- /* force a reset on strobe dll */
- writel(ESDHC_STROBE_DLL_CTRL_RESET,
- host->ioaddr + ESDHC_STROBE_DLL_CTRL);
- /*
- * enable strobe dll ctrl and adjust the delay target
- * for the uSDHC loopback read clock
- */
- v = ESDHC_STROBE_DLL_CTRL_ENABLE |
- (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
- writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
- /* wait 1us to make sure strobe dll status register stable */
- udelay(1);
- v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
- if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
- dev_warn(mmc_dev(host->mmc),
- "warning! HS400 strobe DLL status REF not lock!\n");
- if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
- dev_warn(mmc_dev(host->mmc),
- "warning! HS400 strobe DLL status SLV not lock!\n");
- }
+ /* force a reset on strobe dll */
+ writel(ESDHC_STROBE_DLL_CTRL_RESET,
+ host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /*
+ * enable strobe dll ctrl and adjust the delay target
+ * for the uSDHC loopback read clock
+ */
+ v = ESDHC_STROBE_DLL_CTRL_ENABLE |
+ (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
+ writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /* wait 1us to make sure strobe dll status register stable */
+ udelay(1);
+ v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
+ if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status REF not lock!\n");
+ if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status SLV not lock!\n");
}
static void esdhc_reset_tuning(struct sdhci_host *host)
@@ -979,6 +1040,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
case MMC_TIMING_UHS_SDR25:
case MMC_TIMING_UHS_SDR50:
case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS:
case MMC_TIMING_MMC_HS200:
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
break;
@@ -1042,6 +1104,19 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
SDHCI_TIMEOUT_CONTROL);
}
+static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -1058,6 +1133,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.set_bus_width = esdhc_pltfm_set_bus_width,
.set_uhs_signaling = esdhc_set_uhs_signaling,
.reset = esdhc_reset,
+ .irq = esdhc_cqhci_irq,
};
static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -1095,16 +1171,34 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
| ESDHC_BURST_LEN_EN_INCR,
host->ioaddr + SDHCI_HOST_CONTROL);
+
/*
- * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
- * TO1.1, it's harmless for MX6SL
- */
- writel(readl(host->ioaddr + 0x6c) | BIT(7),
+ * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+ * TO1.1, it's harmless for MX6SL
+ */
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
host->ioaddr + 0x6c);
/* disable DLL_CTRL delay line settings */
writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
+ /*
+ * For the case of command with busy, if set the bit
+ * ESDHC_VEND_SPEC2_EN_BUSY_IRQ, USDHC will generate a
+ * transfer complete interrupt when busy is deasserted.
+ * When CQHCI use DCMD to send a CMD need R1b respons,
+ * CQHCI require to set ESDHC_VEND_SPEC2_EN_BUSY_IRQ,
+ * otherwise DCMD will always meet timeout waiting for
+ * hardware interrupt issue.
+ */
+ if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) {
+ tmp = readl(host->ioaddr + ESDHC_VEND_SPEC2);
+ tmp |= ESDHC_VEND_SPEC2_EN_BUSY_IRQ;
+ writel(tmp, host->ioaddr + ESDHC_VEND_SPEC2);
+
+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+ }
+
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
tmp |= ESDHC_STD_TUNING_EN |
@@ -1120,10 +1214,81 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
<< ESDHC_TUNING_STEP_SHIFT;
}
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ /*
+ * ESDHC_STD_TUNING_EN may be configed in bootloader
+ * or ROM code, so clear this bit here to make sure
+ * the manual tuning can work.
+ */
+ tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ tmp &= ~ESDHC_STD_TUNING_EN;
+ writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
}
}
}
+static void esdhc_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 reg;
+ u16 mode;
+ int count = 10;
+
+ /*
+ * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
+ * the case after tuning, so ensure the buffer is drained.
+ */
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if (count-- == 0) {
+ dev_warn(mmc_dev(host->mmc),
+ "CQE may get stuck because the Buffer Read Enable bit is set\n");
+ break;
+ }
+ mdelay(1);
+ }
+
+ /*
+ * Runtime resume will reset the entire host controller, which
+ * will also clear the DMAEN/BCEN of register ESDHC_MIX_CTRL.
+ * Here set DMAEN and BCEN when enable CMDQ.
+ */
+ mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ mode |= SDHCI_TRNS_DMA;
+ if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
+ mode |= SDHCI_TRNS_BLK_CNT_EN;
+ sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+
+ /*
+ * Though Runtime resume reset the entire host controller,
+ * but do not impact the CQHCI side, need to clear the
+ * HALT bit, avoid CQHCI stuck in the first request when
+ * system resume back.
+ */
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT)
+ dev_err(mmc_dev(host->mmc),
+ "failed to exit halt state when enable CQE\n");
+
+
+ sdhci_cqe_enable(mmc);
+}
+
+static void esdhc_sdhci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static const struct cqhci_host_ops esdhc_cqhci_ops = {
+ .enable = esdhc_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = esdhc_sdhci_dumpregs,
+};
+
#ifdef CONFIG_OF
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@@ -1200,7 +1365,7 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
host->mmc->parent->platform_data);
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL);
+ err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
@@ -1255,6 +1420,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
of_match_device(imx_esdhc_dt_ids, &pdev->dev);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
+ struct cqhci_host *cq_host;
int err;
struct pltfm_imx_data *imx_data;
@@ -1321,6 +1487,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+
+ /*
+ * Link usdhc specific mmc_host_ops execute_tuning function,
+ * to replace the standard one in sdhci_ops.
+ */
+ host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
@@ -1333,6 +1505,28 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+ host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ esdhc_hs400_enhanced_strobe;
+ }
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) {
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ err = -ENOMEM;
+ goto disable_ahb_clk;
+ }
+
+ cq_host->mmio = host->ioaddr + ESDHC_CQHCI_ADDR_OFFSET;
+ cq_host->ops = &esdhc_cqhci_ops;
+
+ err = cqhci_init(cq_host, host->mmc, false);
+ if (err)
+ goto disable_ahb_clk;
+ }
+
if (of_id)
err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
else
@@ -1340,6 +1534,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ahb_clk;
+ host->tuning_delay = 1;
+
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);
@@ -1391,6 +1587,13 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
static int sdhci_esdhc_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ int ret;
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
@@ -1401,11 +1604,19 @@ static int sdhci_esdhc_suspend(struct device *dev)
static int sdhci_esdhc_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ int ret;
/* re-initialize hw state in case it's lost in low power mode */
sdhci_esdhc_imx_hwinit(host);
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret)
+ return ret;
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ ret = cqhci_resume(host->mmc);
+
+ return ret;
}
#endif
@@ -1417,6 +1628,12 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_runtime_suspend_host(host);
if (ret)
return ret;
@@ -1460,7 +1677,10 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
if (err)
goto disable_ipg_clk;
- return 0;
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ err = cqhci_resume(host->mmc);
+
+ return err;
disable_ipg_clk:
if (!sdhci_sdio_irq_enabled(host))
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0db99057c44f..9d12c06c7fd6 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
iproc_host->data = iproc_data;
- mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
sdhci_get_property(pdev);
host->mmc->caps |= iproc_host->data->mmc_caps;
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index c11c18a9aacb..b1a66ca3821a 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1097,7 +1097,6 @@ static int sdhci_omap_probe(struct platform_device *pdev)
goto err_put_sync;
}
- host->mmc_host_ops.get_ro = mmc_gpio_get_ro;
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_omap_start_signal_voltage_switch;
host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 2a6eba74b94e..99b0fec2836b 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1257,16 +1257,6 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
}
#endif
-static const struct sdhci_pci_fixes sdhci_o2 = {
- .probe = sdhci_pci_o2_probe,
- .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
- .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
- .probe_slot = sdhci_pci_o2_probe_slot,
-#ifdef CONFIG_PM_SLEEP
- .resume = sdhci_pci_o2_resume,
-#endif
-};
-
static const struct sdhci_pci_fixes sdhci_jmicron = {
.probe = jmicron_probe,
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index cc3ffeffd7a2..05a012a694b2 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -60,6 +60,13 @@
#define O2_SD_VENDOR_SETTING2 0x1C8
#define O2_SD_HW_TUNING_DISABLE BIT(4)
+#define O2_PLL_WDT_CONTROL1 0x1CC
+#define O2_PLL_FORCE_ACTIVE BIT(18)
+#define O2_PLL_LOCK_STATUS BIT(14)
+#define O2_PLL_SOFT_RESET BIT(12)
+
+#define O2_SD_DETECT_SETTING 0x324
+
static void sdhci_o2_set_tuning_mode(struct sdhci_host *host)
{
u16 reg;
@@ -283,6 +290,113 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
host->irq = pci_irq_vector(chip->pdev, 0);
}
+static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u32 scratch32;
+
+ /* Wait max 50 ms */
+ timeout = ktime_add_ms(ktime_get(), 50);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT
+ == (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT)
+ break;
+
+ if (timedout) {
+ pr_err("%s: Card Detect debounce never finished.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+}
+
+static void sdhci_o2_enable_internal_clock(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u16 scratch;
+ u32 scratch32;
+
+ /* PLL software reset */
+ scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1);
+ scratch32 |= O2_PLL_SOFT_RESET;
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+ udelay(1);
+ scratch32 &= ~(O2_PLL_SOFT_RESET);
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+
+ /* PLL force active */
+ scratch32 |= O2_PLL_FORCE_ACTIVE;
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ scratch = sdhci_readw(host, O2_PLL_WDT_CONTROL1);
+ if (scratch & O2_PLL_LOCK_STATUS)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ goto out;
+ }
+ udelay(10);
+ }
+
+ /* Wait for card detect finish */
+ udelay(1);
+ sdhci_o2_wait_card_detect_stable(host);
+
+out:
+ /* Cancel PLL force active */
+ scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1);
+ scratch32 &= ~O2_PLL_FORCE_ACTIVE;
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+}
+
+static int sdhci_o2_get_cd(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_o2_enable_internal_clock(host);
+
+ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+}
+
+static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk)
+{
+ /* Enable internal clock */
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (sdhci_o2_get_cd(host->mmc)) {
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_o2_enable_clk(host, clk);
+}
+
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
@@ -314,9 +428,14 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
mmc_hostname(host->mmc));
host->flags &= ~SDHCI_SIGNALING_330;
host->flags |= SDHCI_SIGNALING_180;
+ host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
host->mmc->caps2 |= MMC_CAP2_NO_SD;
host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_DETECT_SETTING, 3);
}
+
+ slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
}
host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
@@ -490,9 +609,6 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
case PCI_DEVICE_ID_O2_SEABIRD0:
- if (chip->pdev->revision == 0x01)
- chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
- /* fall through */
case PCI_DEVICE_ID_O2_SEABIRD1:
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev,
@@ -550,3 +666,21 @@ int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
return sdhci_pci_resume_host(chip);
}
#endif
+
+static const struct sdhci_ops sdhci_pci_o2_ops = {
+ .set_clock = sdhci_pci_o2_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_o2 = {
+ .probe = sdhci_pci_o2_probe,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = sdhci_pci_o2_probe_slot,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_o2_resume,
+#endif
+ .ops = &sdhci_pci_o2_ops,
+};
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 2ef0bdca9197..4ddb69a15cd7 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -179,13 +179,9 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
int sdhci_pci_enable_dma(struct sdhci_host *host);
-int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
-int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
-#ifdef CONFIG_PM_SLEEP
-int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
-#endif
extern const struct sdhci_pci_fixes sdhci_arasan;
extern const struct sdhci_pci_fixes sdhci_snps;
+extern const struct sdhci_pci_fixes sdhci_o2;
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 2c3827f54927..cdc8e16b4567 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -23,7 +23,6 @@
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/pxa_sdhci.h>
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index e6ace31e2a41..32e62904c0d3 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -33,6 +33,7 @@
#include <linux/ktime.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
/* Tegra SDHOST controller vendor register definitions */
#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
@@ -75,6 +76,7 @@
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
+#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
@@ -89,6 +91,9 @@
#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
+/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
+#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
+
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
u32 nvquirks;
@@ -121,6 +126,8 @@ struct sdhci_tegra {
struct pinctrl *pinctrl_sdmmc;
struct pinctrl_state *pinctrl_state_3v3;
struct pinctrl_state *pinctrl_state_1v8;
+ struct pinctrl_state *pinctrl_state_3v3_drv;
+ struct pinctrl_state *pinctrl_state_1v8_drv;
struct sdhci_tegra_autocal_offsets autocal_offsets;
ktime_t last_calib;
@@ -128,6 +135,7 @@ struct sdhci_tegra {
u32 default_tap;
u32 default_trim;
u32 dqs_trim;
+ bool enable_hwcq;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
@@ -237,11 +245,6 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
}
}
-static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
-{
- return mmc_gpio_get_ro(host->mmc);
-}
-
static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -411,6 +414,76 @@ static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
}
+static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
+ bool state_drvupdn)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_tegra_autocal_offsets *offsets =
+ &tegra_host->autocal_offsets;
+ struct pinctrl_state *pinctrl_drvupdn = NULL;
+ int ret = 0;
+ u8 drvup = 0, drvdn = 0;
+ u32 reg;
+
+ if (!state_drvupdn) {
+ /* PADS Drive Strength */
+ if (voltage == MMC_SIGNAL_VOLTAGE_180) {
+ if (tegra_host->pinctrl_state_1v8_drv) {
+ pinctrl_drvupdn =
+ tegra_host->pinctrl_state_1v8_drv;
+ } else {
+ drvup = offsets->pull_up_1v8_timeout;
+ drvdn = offsets->pull_down_1v8_timeout;
+ }
+ } else {
+ if (tegra_host->pinctrl_state_3v3_drv) {
+ pinctrl_drvupdn =
+ tegra_host->pinctrl_state_3v3_drv;
+ } else {
+ drvup = offsets->pull_up_3v3_timeout;
+ drvdn = offsets->pull_down_3v3_timeout;
+ }
+ }
+
+ if (pinctrl_drvupdn != NULL) {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ pinctrl_drvupdn);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "failed pads drvupdn, ret: %d\n", ret);
+ } else if ((drvup) || (drvdn)) {
+ reg = sdhci_readl(host,
+ SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+ reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
+ reg |= (drvup << 20) | (drvdn << 12);
+ sdhci_writel(host, reg,
+ SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+ }
+
+ } else {
+ /* Dual Voltage PADS Voltage selection */
+ if (!tegra_host->pad_control_available)
+ return 0;
+
+ if (voltage == MMC_SIGNAL_VOLTAGE_180) {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ tegra_host->pinctrl_state_1v8);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "setting 1.8V failed, ret: %d\n", ret);
+ } else {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ tegra_host->pinctrl_state_3v3);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "setting 3.3V failed, ret: %d\n", ret);
+ }
+ }
+
+ return ret;
+}
+
static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -437,6 +510,7 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
}
+ /* Set initial offset before auto-calibration */
tegra_sdhci_set_pad_autocal_offset(host, pdpu);
card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
@@ -460,19 +534,15 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
if (ret) {
dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
- pdpu = offsets.pull_down_1v8_timeout << 8 |
- offsets.pull_up_1v8_timeout;
- else
- pdpu = offsets.pull_down_3v3_timeout << 8 |
- offsets.pull_up_3v3_timeout;
-
- /* Disable automatic calibration and use fixed offsets */
+ /* Disable automatic cal and use fixed Drive Strengths */
reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
reg &= ~SDHCI_AUTO_CAL_ENABLE;
sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
- tegra_sdhci_set_pad_autocal_offset(host, pdpu);
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "Setting drive strengths failed: %d\n", ret);
}
}
@@ -511,26 +581,46 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
&autocal->pull_up_3v3_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
+ (tegra_host->pinctrl_state_3v3_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_up_3v3_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
&autocal->pull_down_3v3_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
+ (tegra_host->pinctrl_state_3v3_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_down_3v3_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
&autocal->pull_up_1v8_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
+ (tegra_host->pinctrl_state_1v8_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_up_1v8_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
&autocal->pull_down_1v8_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
+ (tegra_host->pinctrl_state_1v8_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_down_1v8_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-sdr104",
@@ -595,6 +685,20 @@ static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
tegra_host->dqs_trim = 0x11;
}
+static void tegra_sdhci_parse_dt(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
+ tegra_host->enable_hwcq = true;
+ else
+ tegra_host->enable_hwcq = false;
+
+ tegra_sdhci_parse_pad_autocal_dt(host);
+ tegra_sdhci_parse_tap_and_trim(host);
+}
+
static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -743,32 +847,6 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return mmc_send_tuning(host->mmc, opcode, NULL);
}
-static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
- int ret;
-
- if (!tegra_host->pad_control_available)
- return 0;
-
- if (voltage == MMC_SIGNAL_VOLTAGE_180) {
- ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
- tegra_host->pinctrl_state_1v8);
- if (ret < 0)
- dev_err(mmc_dev(host->mmc),
- "setting 1.8V failed, ret: %d\n", ret);
- } else {
- ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
- tegra_host->pinctrl_state_3v3);
- if (ret < 0)
- dev_err(mmc_dev(host->mmc),
- "setting 3.3V failed, ret: %d\n", ret);
- }
-
- return ret;
-}
-
static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -778,7 +856,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
int ret = 0;
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
- ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage);
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
if (ret < 0)
return ret;
ret = sdhci_start_signal_voltage_switch(mmc, ios);
@@ -786,7 +864,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
ret = sdhci_start_signal_voltage_switch(mmc, ios);
if (ret < 0)
return ret;
- ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage);
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
}
if (tegra_host->pad_calib_required)
@@ -805,6 +883,20 @@ static int tegra_sdhci_init_pinctrl_info(struct device *dev,
return -1;
}
+ tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
+ tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
+ if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
+ if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
+ tegra_host->pinctrl_state_1v8_drv = NULL;
+ }
+
+ tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
+ tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
+ if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
+ if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
+ tegra_host->pinctrl_state_3v3_drv = NULL;
+ }
+
tegra_host->pinctrl_state_3v3 =
pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
@@ -836,8 +928,50 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
tegra_host->pad_calib_required = true;
}
+static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 cqcfg = 0;
+
+ /*
+ * Tegra SDMMC Controller design prevents write access to BLOCK_COUNT
+ * registers when CQE is enabled.
+ */
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ if (cqcfg & CQHCI_ENABLE)
+ cqhci_writel(cq_host, (cqcfg & ~CQHCI_ENABLE), CQHCI_CFG);
+
+ sdhci_cqe_enable(mmc);
+
+ if (cqcfg & CQHCI_ENABLE)
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+}
+
+static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
+ .enable = sdhci_tegra_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_tegra_dumpregs,
+};
+
static const struct sdhci_ops tegra_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
@@ -893,7 +1027,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
};
static const struct sdhci_ops tegra114_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra_sdhci_writew,
.write_l = tegra_sdhci_writel,
@@ -947,7 +1080,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
};
static const struct sdhci_ops tegra210_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra210_sdhci_writew,
.write_l = tegra_sdhci_writel,
@@ -980,7 +1112,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
};
static const struct sdhci_ops tegra186_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
@@ -989,6 +1120,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
.voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
+ .irq = sdhci_tegra_cqhci_irq,
};
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
@@ -1030,6 +1162,54 @@ static const struct of_device_id sdhci_tegra_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
+static int sdhci_tegra_add_host(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!tegra_host->enable_hwcq)
+ return sdhci_add_host(host);
+
+ sdhci_enable_v4_mode(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+
+ cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_tegra_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_tegra_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -1077,9 +1257,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
host->mmc->caps |= MMC_CAP_1_8V_DDR;
- tegra_sdhci_parse_pad_autocal_dt(host);
-
- tegra_sdhci_parse_tap_and_trim(host);
+ tegra_sdhci_parse_dt(host);
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
GPIOD_OUT_HIGH);
@@ -1117,7 +1295,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
usleep_range(2000, 4000);
- rc = sdhci_add_host(host);
+ rc = sdhci_tegra_add_host(host);
if (rc)
goto err_add_host;
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 5b5eb53a63d2..8d07ee1b8f08 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -530,7 +530,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host,
ret = true;
break;
}
- /* else: fall through */
+ /* fall through */
default:
reg &= ~XENON_TIMING_ADJUST_SLOW_MODE;
ret = false;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a22e11a65658..a8141ff9be03 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -883,7 +883,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
bool *too_big)
{
u8 count;
- struct mmc_data *data = cmd->data;
+ struct mmc_data *data;
unsigned target_timeout, current_timeout;
*too_big = true;
@@ -897,6 +897,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return 0xE;
+ /* Unspecified command, asume max */
+ if (cmd == NULL)
+ return 0xE;
+
+ data = cmd->data;
/* Unspecified timeout, assume max */
if (!data && !cmd->busy_timeout)
return 0xE;
@@ -2048,6 +2053,8 @@ static int sdhci_check_ro(struct sdhci_host *host)
is_readonly = 0;
else if (host->ops->get_ro)
is_readonly = host->ops->get_ro(host);
+ else if (mmc_can_gpio_ro(host->mmc))
+ is_readonly = mmc_gpio_get_ro(host->mmc);
else
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
& SDHCI_WRITE_PROTECT);
@@ -2376,6 +2383,10 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return -ETIMEDOUT;
}
+ /* Spec does not require a delay between tuning cycles */
+ if (host->tuning_delay > 0)
+ mdelay(host->tuning_delay);
+
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
if (ctrl & SDHCI_CTRL_TUNED_CLK)
@@ -2383,9 +2394,6 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
break;
}
- /* Spec does not require a delay between tuning cycles */
- if (host->tuning_delay > 0)
- mdelay(host->tuning_delay);
}
pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
@@ -3353,7 +3361,14 @@ void sdhci_cqe_enable(struct mmc_host *mmc)
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl &= ~SDHCI_CTRL_DMA_MASK;
- if (host->flags & SDHCI_USE_64_BIT_DMA)
+ /*
+ * Host from V4.10 supports ADMA3 DMA type.
+ * ADMA3 performs integrated descriptor which is more suitable
+ * for cmd queuing to fetch both command and transfer descriptors.
+ */
+ if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
+ ctrl |= SDHCI_CTRL_ADMA3;
+ else if (host->flags & SDHCI_USE_64_BIT_DMA)
ctrl |= SDHCI_CTRL_ADMA64;
else
ctrl |= SDHCI_CTRL_ADMA32;
@@ -3363,7 +3378,7 @@ void sdhci_cqe_enable(struct mmc_host *mmc)
SDHCI_BLOCK_SIZE);
/* Set maximum timeout */
- sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
+ sdhci_set_timeout(host, NULL);
host->ier = host->cqe_ier;
@@ -3763,8 +3778,9 @@ int sdhci_setup_host(struct sdhci_host *host)
* Use zalloc to zero the reserved high 32-bits of 128-bit
* descriptors so that they never need to be written.
*/
- buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
- host->adma_table_sz, &dma, GFP_KERNEL);
+ buf = dma_alloc_coherent(mmc_dev(mmc),
+ host->align_buffer_sz + host->adma_table_sz,
+ &dma, GFP_KERNEL);
if (!buf) {
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 6cc9a3c2ac66..01002cba1359 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -73,6 +73,10 @@
#define SDHCI_SPACE_AVAILABLE 0x00000400
#define SDHCI_DATA_AVAILABLE 0x00000800
#define SDHCI_CARD_PRESENT 0x00010000
+#define SDHCI_CARD_PRES_SHIFT 16
+#define SDHCI_CD_STABLE 0x00020000
+#define SDHCI_CD_LVL 0x00040000
+#define SDHCI_CD_LVL_SHIFT 18
#define SDHCI_WRITE_PROTECT 0x00080000
#define SDHCI_DATA_LVL_MASK 0x00F00000
#define SDHCI_DATA_LVL_SHIFT 20
@@ -88,6 +92,7 @@
#define SDHCI_CTRL_ADMA1 0x08
#define SDHCI_CTRL_ADMA32 0x10
#define SDHCI_CTRL_ADMA64 0x18
+#define SDHCI_CTRL_ADMA3 0x18
#define SDHCI_CTRL_8BITBUS 0x20
#define SDHCI_CTRL_CDTEST_INS 0x40
#define SDHCI_CTRL_CDTEST_EN 0x80
@@ -230,6 +235,7 @@
#define SDHCI_RETUNING_MODE_SHIFT 14
#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
#define SDHCI_CLOCK_MUL_SHIFT 16
+#define SDHCI_CAN_DO_ADMA3 0x08000000
#define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
#define SDHCI_CAPABILITIES_1 0x44
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 8c05879850a0..eea183e90f1b 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -158,7 +158,7 @@ static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-struct sdhci_ops sdhci_am654_ops = {
+static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 279e326e397e..2901a5773d83 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -19,7 +19,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -32,7 +31,6 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -1399,13 +1397,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
- if (host->cfg->clk_delays || host->use_new_timings)
+ /*
+ * Some H5 devices do not have signal traces precise enough to
+ * use HS DDR mode for their eMMC chips.
+ *
+ * We still enable HS DDR modes for all the other controller
+ * variants that support them.
+ */
+ if ((host->cfg->clk_delays || host->use_new_timings) &&
+ !of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun50i-h5-emmc"))
mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
+ /*
+ * If we don't support delay chains in the SoC, we can't use any
+ * of the higher speed modes. Mask them out in case the device
+ * tree specifies the properties for them, which gets added to
+ * the caps by mmc_of_parse() above.
+ */
+ if (!(host->cfg->clk_delays || host->use_new_timings)) {
+ mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
+ MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
+ mmc->caps2 &= ~MMC_CAP2_HS200;
+ }
+
+ /* TODO: This driver doesn't support HS400 mode yet */
+ mmc->caps2 &= ~MMC_CAP2_HS400;
+
ret = sunxi_mmc_init_host(host);
if (ret)
goto error_free_dma;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index c03529e3f01a..2adb0d24360f 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ iowrite32(val, host->ctl + (addr << host->bus_shift));
+}
+
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 085a0fab769c..595949f1f001 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
+#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/workqueue.h>
@@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
return false;
}
-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
@@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
- return;
+ return false;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
+
+ return ireg;
}
irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
- __tmio_mmc_sdio_irq(host);
+ if (__tmio_mmc_sdio_irq(host))
+ return IRQ_HANDLED;
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
@@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
- sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+ if (host->mmc->max_blk_count >= SZ_64K)
+ sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
+ else
+ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data);
@@ -1066,7 +1073,7 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
/* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
- mmc->ocr_avail = pdata->ocr_mask;
+ mmc->ocr_avail = pdata->ocr_mask;
/*
* try again.
@@ -1287,6 +1294,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 3ba42f508014..4fd6da29489e 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/of.h>
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 6e8e7b1bb34b..79a53cb8507b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -756,7 +756,8 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
}
numvirtchips = cfi->numchips * numparts;
- newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
+ GFP_KERNEL);
if (!newcfi)
return -ENOMEM;
shared = kmalloc_array(cfi->numchips,
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 837b04ab96a9..839ed40625d6 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -135,7 +135,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
* our caller, and copy the appropriate data into them.
*/
- retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
+ retcfi = kmalloc(struct_size(retcfi, chips, cfi.numchips), GFP_KERNEL);
if (!retcfi) {
kfree(cfi.cfiq);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 4c94fc096696..7754803e3463 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1767,8 +1767,8 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
- docg3->device_id);
+ mtd->name = devm_kasprintf(docg3->dev, GFP_KERNEL, "docg3.%d",
+ docg3->device_id);
if (!mtd->name)
return -ENOMEM;
docg3->max_block = 2047;
@@ -1872,7 +1872,7 @@ nomem3:
nomem2:
kfree(docg3);
nomem1:
- return ERR_PTR(ret);
+ return ret ? ERR_PTR(ret) : NULL;
}
/**
@@ -1886,7 +1886,6 @@ static void doc_release_device(struct mtd_info *mtd)
mtd_device_unregister(mtd);
kfree(docg3->bbt);
kfree(docg3);
- kfree(mtd->name);
kfree(mtd);
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index c4a1d04b8c80..651bab6d4e31 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -195,7 +195,14 @@ static int m25p_probe(struct spi_mem *spimem)
spi_mem_set_drvdata(spimem, flash);
flash->spimem = spimem;
- if (spi->mode & SPI_RX_QUAD) {
+ if (spi->mode & SPI_RX_OCTAL) {
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+
+ if (spi->mode & SPI_TX_OCTAL)
+ hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 |
+ SNOR_HWCAPS_PP_1_1_8 |
+ SNOR_HWCAPS_PP_1_8_8);
+ } else if (spi->mode & SPI_RX_QUAD) {
hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
if (spi->mode & SPI_TX_QUAD)
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 46238796145f..1c97fabc4bf9 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -24,14 +24,12 @@ static unsigned long writebuf_size = 64;
#define MTDRAM_TOTAL_SIZE (total_size * 1024)
#define MTDRAM_ERASE_SIZE (erase_size * 1024)
-#ifdef MODULE
module_param(total_size, ulong, 0);
MODULE_PARM_DESC(total_size, "Total device size in KiB");
module_param(erase_size, ulong, 0);
MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
module_param(writebuf_size, ulong, 0);
MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)");
-#endif
// We could store these in the mtd structure, but we only support 1 device..
static struct mtd_info *mtd_info;
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 22f753e555ac..83f88b8b5d9f 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
* Going to have to check what details I need to set and how to
* get them
*/
- mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 69f2112340b1..175bdc3b72f4 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -181,8 +181,8 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
- retlpddr = kzalloc(sizeof(struct lpddr_private) +
- numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ retlpddr = kzalloc(struct_size(retlpddr, chips, numvirtchips),
+ GFP_KERNEL);
if (!retlpddr)
return NULL;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 21e3cdc04036..76b4264936ff 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -155,7 +155,6 @@ static ssize_t mtd_flags_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
-
}
static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
@@ -166,7 +165,6 @@ static ssize_t mtd_size_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)mtd->size);
-
}
static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
@@ -176,7 +174,6 @@ static ssize_t mtd_erasesize_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
-
}
static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
@@ -186,7 +183,6 @@ static ssize_t mtd_writesize_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
-
}
static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
@@ -197,7 +193,6 @@ static ssize_t mtd_subpagesize_show(struct device *dev,
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
-
}
static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
@@ -207,7 +202,6 @@ static ssize_t mtd_oobsize_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
-
}
static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
@@ -226,7 +220,6 @@ static ssize_t mtd_numeraseregions_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
-
}
static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
NULL);
@@ -237,7 +230,6 @@ static ssize_t mtd_name_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
-
}
static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
@@ -507,6 +499,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
{
struct nvmem_config config = {};
+ config.id = -1;
config.dev = &mtd->dev;
config.name = mtd->name;
config.owner = THIS_MODULE;
@@ -522,7 +515,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
mtd->nvmem = nvmem_register(&config);
if (IS_ERR(mtd->nvmem)) {
/* Just ignore if there is no NVMEM support in the kernel */
- if (PTR_ERR(mtd->nvmem) == -ENOSYS) {
+ if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
mtd->nvmem = NULL;
} else {
dev_err(&mtd->dev, "Failed to register NVMEM device\n");
@@ -559,6 +552,14 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
+ /*
+ * MTD drivers should implement ->_{write,read}() or
+ * ->_{write,read}_oob(), but not both.
+ */
+ if (WARN_ON((mtd->_write && mtd->_write_oob) ||
+ (mtd->_read && mtd->_read_oob)))
+ return -EINVAL;
+
if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
!(mtd->flags & MTD_NO_ERASE)))
return -EINVAL;
@@ -1089,67 +1090,32 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
- int ret_code;
- *retlen = 0;
- if (from < 0 || from >= mtd->size || len > mtd->size - from)
- return -EINVAL;
- if (!len)
- return 0;
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+ int ret;
- ledtrig_mtd_activity();
- /*
- * In the absence of an error, drivers return a non-negative integer
- * representing the maximum number of bitflips that were corrected on
- * any one ecc region (if applicable; zero otherwise).
- */
- if (mtd->_read) {
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
- } else if (mtd->_read_oob) {
- struct mtd_oob_ops ops = {
- .len = len,
- .datbuf = buf,
- };
-
- ret_code = mtd->_read_oob(mtd, from, &ops);
- *retlen = ops.retlen;
- } else {
- return -ENOTSUPP;
- }
+ ret = mtd_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
- if (unlikely(ret_code < 0))
- return ret_code;
- if (mtd->ecc_strength == 0)
- return 0; /* device lacks ecc */
- return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_read);
int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
- *retlen = 0;
- if (to < 0 || to >= mtd->size || len > mtd->size - to)
- return -EINVAL;
- if ((!mtd->_write && !mtd->_write_oob) ||
- !(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (!len)
- return 0;
- ledtrig_mtd_activity();
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
- if (!mtd->_write) {
- struct mtd_oob_ops ops = {
- .len = len,
- .datbuf = (u8 *)buf,
- };
- int ret;
+ ret = mtd_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
- ret = mtd->_write_oob(mtd, to, &ops);
- *retlen = ops.retlen;
- return ret;
- }
-
- return mtd->_write(mtd, to, len, retlen, buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_write);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 9887bda317cd..b31c868019ad 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -7,7 +7,7 @@
extern struct mutex mtd_table_mutex;
struct mtd_info *__mtd_next_device(int i);
-int add_mtd_device(struct mtd_info *mtd);
+int __must_check add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index b6af41b04622..37f174ccbcec 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
/* let's register it anyway to preserve ordering */
slave->offset = 0;
slave->mtd.size = 0;
+
+ /* Initialize ->erasesize to make add_mtd_device() happy. */
+ slave->mtd.erasesize = parent->erasesize;
+
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
@@ -618,10 +622,21 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
- add_mtd_device(&new->mtd);
+ ret = add_mtd_device(&new->mtd);
+ if (ret)
+ goto err_remove_part;
mtd_add_partition_attrs(new);
+ return 0;
+
+err_remove_part:
+ mutex_lock(&mtd_partitions_mutex);
+ list_del(&new->list);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ free_partition(new);
+
return ret;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
@@ -712,22 +727,31 @@ int add_mtd_partitions(struct mtd_info *master,
{
struct mtd_part *slave;
uint64_t cur_offset = 0;
- int i;
+ int i, ret;
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
slave = allocate_partition(master, parts + i, i, cur_offset);
if (IS_ERR(slave)) {
- del_mtd_partitions(master);
- return PTR_ERR(slave);
+ ret = PTR_ERR(slave);
+ goto err_del_partitions;
}
mutex_lock(&mtd_partitions_mutex);
list_add(&slave->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
- add_mtd_device(&slave->mtd);
+ ret = add_mtd_device(&slave->mtd);
+ if (ret) {
+ mutex_lock(&mtd_partitions_mutex);
+ list_del(&slave->list);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ free_partition(slave);
+ goto err_del_partitions;
+ }
+
mtd_add_partition_attrs(slave);
/* Look for subpartitions */
parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
@@ -736,6 +760,11 @@ int add_mtd_partitions(struct mtd_info *master,
}
return 0;
+
+err_del_partitions:
+ del_mtd_partitions(master);
+
+ return ret;
}
static DEFINE_SPINLOCK(part_parser_lock);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 1a55d3e3d4c5..e604625e2dfa 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -541,4 +541,21 @@ config MTD_NAND_TEGRA
is supported. Extra OOB bytes when using HW ECC are currently
not supported.
+config MTD_NAND_STM32_FMC2
+ tristate "Support for NAND controller on STM32MP SoCs"
+ depends on MACH_STM32MP157 || COMPILE_TEST
+ help
+ Enables support for NAND Flash chips on SoCs containing the FMC2
+ NAND controller. This controller is found on STM32MP SoCs.
+ The controller supports a maximum 8k page size and supports
+ a maximum 8-bit correction error per sector of 512 bytes.
+
+config MTD_NAND_MESON
+ tristate "Support for NAND controller on Amlogic's Meson SoCs"
+ depends on ARCH_MESON || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Enables support for NAND controller on Amlogic's Meson SoCs.
+ This controller is found on Meson SoCs.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 57159b349054..5a5a72f0793e 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -56,6 +56,8 @@ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
+obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
+obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 555a74e15269..9d3997840889 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -876,23 +876,32 @@ static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev,
{
struct platform_device *pdev;
struct atmel_pmecc *pmecc, **ptr;
+ int ret;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
+ pmecc = platform_get_drvdata(pdev);
+ if (!pmecc) {
+ ret = -EPROBE_DEFER;
+ goto err_put_device;
+ }
ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- get_device(&pdev->dev);
- pmecc = platform_get_drvdata(pdev);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
*ptr = pmecc;
devres_add(userdev, ptr);
return pmecc;
+
+err_put_device:
+ put_device(&pdev->dev);
+ return ERR_PTR(ret);
}
static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 };
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index eebac35304c6..24aeafc67cd4 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -37,9 +37,6 @@
#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
-/* MAP10 commands */
-#define DENALI_ERASE 0x01
-
#define DENALI_BANK(denali) ((denali)->active_bank << 24)
#define DENALI_INVALID_BANK -1
@@ -476,7 +473,7 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw)
+ size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
@@ -504,7 +501,7 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
}
static int denali_pio_write(struct denali_nand_info *denali,
- const void *buf, size_t size, int page, int raw)
+ const void *buf, size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
@@ -525,16 +522,16 @@ static int denali_pio_write(struct denali_nand_info *denali,
}
static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw, int write)
+ size_t size, int page, int write)
{
if (write)
- return denali_pio_write(denali, buf, size, page, raw);
+ return denali_pio_write(denali, buf, size, page);
else
- return denali_pio_read(denali, buf, size, page, raw);
+ return denali_pio_read(denali, buf, size, page);
}
static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw, int write)
+ size_t size, int page, int write)
{
dma_addr_t dma_addr;
uint32_t irq_mask, irq_status, ecc_err_mask;
@@ -544,7 +541,7 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
dma_addr = dma_map_single(denali->dev, buf, size, dir);
if (dma_mapping_error(denali->dev, dma_addr)) {
dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
- return denali_pio_xfer(denali, buf, size, page, raw, write);
+ return denali_pio_xfer(denali, buf, size, page, write);
}
if (write) {
@@ -598,9 +595,9 @@ static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
- return denali_dma_xfer(denali, buf, size, page, raw, write);
+ return denali_dma_xfer(denali, buf, size, page, write);
else
- return denali_pio_xfer(denali, buf, size, page, raw, write);
+ return denali_pio_xfer(denali, buf, size, page, write);
}
static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
@@ -754,9 +751,6 @@ static int denali_read_oob(struct nand_chip *chip, int page)
static int denali_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
-
- denali_reset_irq(denali);
denali_oob_xfer(mtd, chip, page, 1);
@@ -903,23 +897,6 @@ static int denali_waitfunc(struct nand_chip *chip)
return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
}
-static int denali_erase(struct nand_chip *chip, int page)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- uint32_t irq_status;
-
- denali_reset_irq(denali);
-
- denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
- DENALI_ERASE);
-
- /* wait for erase to complete or failure to occur */
- irq_status = denali_wait_for_irq(denali,
- INTR__ERASE_COMP | INTR__ERASE_FAIL);
-
- return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
-}
-
static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf)
{
@@ -1244,7 +1221,6 @@ static int denali_attach_chip(struct nand_chip *chip)
chip->ecc.write_page_raw = denali_write_page_raw;
chip->ecc.read_oob = denali_read_oob;
chip->ecc.write_oob = denali_write_oob;
- chip->legacy.erase = denali_erase;
ret = denali_multidev_fixup(denali);
if (ret)
@@ -1322,7 +1298,7 @@ int denali_init(struct denali_nand_info *denali)
}
/* clk rate info is needed for setup_data_interface */
- if (denali->clk_rate && denali->clk_x_rate)
+ if (!denali->clk_rate || !denali->clk_x_rate)
chip->options |= NAND_KEEP_TIMINGS;
chip->legacy.dummy_controller.ops = &denali_controller_ops;
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index 25c00601b8b3..c8c2620fc736 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -304,7 +304,6 @@ struct denali_nand_info {
u32 irq_status; /* interrupts that have happened */
int irq;
void *buf; /* for syndrome layout conversion */
- dma_addr_t dma_addr;
int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
int oob_skip_bytes; /* number of bytes reserved for BBM */
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 7c6a8a426606..0b5ae2418815 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -109,25 +109,17 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
- /*
- * A single anonymous clock is supported for the backward compatibility.
- * New platforms should support all the named clocks.
- */
dt->clk = devm_clk_get(dev, "nand");
if (IS_ERR(dt->clk))
- dt->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(dt->clk)) {
- dev_err(dev, "no clk available\n");
return PTR_ERR(dt->clk);
- }
dt->clk_x = devm_clk_get(dev, "nand_x");
if (IS_ERR(dt->clk_x))
- dt->clk_x = NULL;
+ return PTR_ERR(dt->clk_x);
dt->clk_ecc = devm_clk_get(dev, "ecc");
if (IS_ERR(dt->clk_ecc))
- dt->clk_ecc = NULL;
+ return PTR_ERR(dt->clk_ecc);
ret = clk_prepare_enable(dt->clk);
if (ret)
@@ -141,19 +133,8 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk_x;
- if (dt->clk_x) {
- denali->clk_rate = clk_get_rate(dt->clk);
- denali->clk_x_rate = clk_get_rate(dt->clk_x);
- } else {
- /*
- * Hardcode the clock rates for the backward compatibility.
- * This works for both SOCFPGA and UniPhier.
- */
- dev_notice(dev,
- "necessary clock is missing. default clock rates are used.\n");
- denali->clk_rate = 50000000;
- denali->clk_x_rate = 200000000;
- }
+ denali->clk_rate = clk_get_rate(dt->clk);
+ denali->clk_x_rate = clk_get_rate(dt->clk_x);
ret = denali_init(denali);
if (ret)
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 325b4414dccc..6c7ca41354be 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
}
-/* fsmc_select_chip - assert or deassert nCE */
-static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
-{
- u32 pc = readl(host->regs_va + FSMC_PC);
-
- if (!assert)
- writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
- else
- writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
-
- /*
- * nCE line changes must be applied before returning from this
- * function.
- */
- mb();
-}
-
/*
* fsmc_exec_op - hook called by the core to execute NAND operations
*
@@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
- fsmc_ce_ctrl(host, true);
-
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
@@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
}
}
- fsmc_ce_ctrl(host, false);
-
return ret;
}
@@ -986,6 +965,19 @@ static const struct nand_controller_ops fsmc_nand_controller_ops = {
.setup_data_interface = fsmc_setup_data_interface,
};
+/**
+ * fsmc_nand_disable() - Disables the NAND bank
+ * @host: The instance to disable
+ */
+static void fsmc_nand_disable(struct fsmc_nand_data *host)
+{
+ u32 val;
+
+ val = readl(host->regs_va + FSMC_PC);
+ val &= ~FSMC_ENABLE;
+ writel(val, host->regs_va + FSMC_PC);
+}
+
/*
* fsmc_nand_probe - Probe function
* @pdev: platform device structure
@@ -1141,6 +1133,7 @@ release_dma_read_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
disable_clk:
+ fsmc_nand_disable(host);
clk_disable_unprepare(host->clk);
return ret;
@@ -1155,6 +1148,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
if (host) {
nand_release(&host->nand);
+ fsmc_nand_disable(host);
if (host->mode == USE_DMA_ACCESS) {
dma_release_channel(host->write_dma_chan);
@@ -1185,6 +1179,7 @@ static int fsmc_nand_resume(struct device *dev)
clk_prepare_enable(host->clk);
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
+ nand_reset(&host->nand, 0);
}
return 0;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index bd4cfac6b5aa..a4768df5083f 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
/*
* Reset BCH here, too. We got failures otherwise :(
- * See later BCH reset for explanation of MX23 handling
+ * See later BCH reset for explanation of MX23 and MX28 handling
*/
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ ret = gpmi_reset_block(r->bch_regs,
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
- * On the other hand, the MX28 needs the reset, because one case has been
- * seen where the BCH produced ECC errors constantly after 10000
- * consecutive reboots. The latter case has not been seen on the MX23
- * yet, still we don't know if it could happen there as well.
+ * and MX28.
*/
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ ret = gpmi_reset_block(r->bch_regs,
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index f92ae5aa2a54..9526d5b23c80 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat,
}
static int jz_nand_ioremap_resource(struct platform_device *pdev,
- const char *name, struct resource **res, void *__iomem *base)
+ const char *name, struct resource **res, void __iomem **base)
{
int ret;
diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/jz4780_bch.c
index 7201827809e9..c5f74ed85862 100644
--- a/drivers/mtd/nand/raw/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/jz4780_bch.c
@@ -281,12 +281,15 @@ static struct jz4780_bch *jz4780_bch_get(struct device_node *np)
struct jz4780_bch *bch;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- get_device(&pdev->dev);
-
bch = platform_get_drvdata(pdev);
+ if (!bch) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
clk_prepare_enable(bch->clk);
return bch;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 84283c6bb0ff..f38e5c1b87e4 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2550,9 +2550,8 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
}
/* Alloc the nand chip structure */
- marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
- (nsels *
- sizeof(struct marvell_nand_chip_sel)),
+ marvell_nand = devm_kzalloc(dev,
+ struct_size(marvell_nand, sels, nsels),
GFP_KERNEL);
if (!marvell_nand) {
dev_err(dev, "could not allocate chip structure\n");
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
new file mode 100644
index 000000000000..3e8aa71407b5
--- /dev/null
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -0,0 +1,1464 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Amlogic Meson Nand Flash Controller Driver
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Liang Yang <liang.yang@amlogic.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched/task_stack.h>
+
+#define NFC_REG_CMD 0x00
+#define NFC_CMD_IDLE (0xc << 14)
+#define NFC_CMD_CLE (0x5 << 14)
+#define NFC_CMD_ALE (0x6 << 14)
+#define NFC_CMD_ADL ((0 << 16) | (3 << 20))
+#define NFC_CMD_ADH ((1 << 16) | (3 << 20))
+#define NFC_CMD_AIL ((2 << 16) | (3 << 20))
+#define NFC_CMD_AIH ((3 << 16) | (3 << 20))
+#define NFC_CMD_SEED ((8 << 16) | (3 << 20))
+#define NFC_CMD_M2N ((0 << 17) | (2 << 20))
+#define NFC_CMD_N2M ((1 << 17) | (2 << 20))
+#define NFC_CMD_RB BIT(20)
+#define NFC_CMD_SCRAMBLER_ENABLE BIT(19)
+#define NFC_CMD_SCRAMBLER_DISABLE 0
+#define NFC_CMD_SHORTMODE_DISABLE 0
+#define NFC_CMD_RB_INT BIT(14)
+
+#define NFC_CMD_GET_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
+
+#define NFC_REG_CFG 0x04
+#define NFC_REG_DADR 0x08
+#define NFC_REG_IADR 0x0c
+#define NFC_REG_BUF 0x10
+#define NFC_REG_INFO 0x14
+#define NFC_REG_DC 0x18
+#define NFC_REG_ADR 0x1c
+#define NFC_REG_DL 0x20
+#define NFC_REG_DH 0x24
+#define NFC_REG_CADR 0x28
+#define NFC_REG_SADR 0x2c
+#define NFC_REG_PINS 0x30
+#define NFC_REG_VER 0x38
+
+#define NFC_RB_IRQ_EN BIT(21)
+
+#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
+ ( \
+ (cmd_dir) | \
+ ((ran) << 19) | \
+ ((bch) << 14) | \
+ ((short_mode) << 13) | \
+ (((page_size) & 0x7f) << 6) | \
+ ((pages) & 0x3f) \
+ )
+
+#define GENCMDDADDRL(adl, addr) ((adl) | ((addr) & 0xffff))
+#define GENCMDDADDRH(adh, addr) ((adh) | (((addr) >> 16) & 0xffff))
+#define GENCMDIADDRL(ail, addr) ((ail) | ((addr) & 0xffff))
+#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff))
+
+#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
+
+#define ECC_CHECK_RETURN_FF (-1)
+
+#define NAND_CE0 (0xe << 10)
+#define NAND_CE1 (0xd << 10)
+
+#define DMA_BUSY_TIMEOUT 0x100000
+#define CMD_FIFO_EMPTY_TIMEOUT 1000
+
+#define MAX_CE_NUM 2
+
+/* eMMC clock register, misc control */
+#define CLK_SELECT_NAND BIT(31)
+
+#define NFC_CLK_CYCLE 6
+
+/* nand flash controller delay 3 ns */
+#define NFC_DEFAULT_DELAY 3000
+
+#define ROW_ADDER(page, index) (((page) >> (8 * (index))) & 0xff)
+#define MAX_CYCLE_ADDRS 5
+#define DIRREAD 1
+#define DIRWRITE 0
+
+#define ECC_PARITY_BCH8_512B 14
+#define ECC_COMPLETE BIT(31)
+#define ECC_ERR_CNT(x) (((x) >> 24) & GENMASK(5, 0))
+#define ECC_ZERO_CNT(x) (((x) >> 16) & GENMASK(5, 0))
+#define ECC_UNCORRECTABLE 0x3f
+
+#define PER_INFO_BYTE 8
+
+struct meson_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ unsigned long clk_rate;
+ unsigned long level1_divider;
+ u32 bus_timing;
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+
+ u32 bch_mode;
+ u8 *data_buf;
+ __le64 *info_buf;
+ u32 nsels;
+ u8 sels[0];
+};
+
+struct meson_nand_ecc {
+ u32 bch;
+ u32 strength;
+};
+
+struct meson_nfc_data {
+ const struct nand_ecc_caps *ecc_caps;
+};
+
+struct meson_nfc_param {
+ u32 chip_select;
+ u32 rb_select;
+};
+
+struct nand_rw_cmd {
+ u32 cmd0;
+ u32 addrs[MAX_CYCLE_ADDRS];
+ u32 cmd1;
+};
+
+struct nand_timing {
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+};
+
+struct meson_nfc {
+ struct nand_controller controller;
+ struct clk *core_clk;
+ struct clk *device_clk;
+ struct clk *phase_tx;
+ struct clk *phase_rx;
+
+ unsigned long clk_rate;
+ u32 bus_timing;
+
+ struct device *dev;
+ void __iomem *reg_base;
+ struct regmap *reg_clk;
+ struct completion completion;
+ struct list_head chips;
+ const struct meson_nfc_data *data;
+ struct meson_nfc_param param;
+ struct nand_timing timing;
+ union {
+ int cmd[32];
+ struct nand_rw_cmd rw;
+ } cmdfifo;
+
+ dma_addr_t daddr;
+ dma_addr_t iaddr;
+
+ unsigned long assigned_cs;
+};
+
+enum {
+ NFC_ECC_BCH8_1K = 2,
+ NFC_ECC_BCH24_1K,
+ NFC_ECC_BCH30_1K,
+ NFC_ECC_BCH40_1K,
+ NFC_ECC_BCH50_1K,
+ NFC_ECC_BCH60_1K,
+};
+
+#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)}
+
+static struct meson_nand_ecc meson_ecc[] = {
+ MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8),
+ MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24),
+ MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30),
+ MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40),
+ MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50),
+ MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60),
+};
+
+static int meson_nand_calc_ecc_bytes(int step_size, int strength)
+{
+ int ecc_bytes;
+
+ if (step_size == 512 && strength == 8)
+ return ECC_PARITY_BCH8_512B;
+
+ ecc_bytes = DIV_ROUND_UP(strength * fls(step_size * 8), 8);
+ ecc_bytes = ALIGN(ecc_bytes, 2);
+
+ return ecc_bytes;
+}
+
+NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
+ meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
+NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps,
+ meson_nand_calc_ecc_bytes, 1024, 8);
+
+static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct meson_nfc_nand_chip, nand);
+}
+
+static void meson_nfc_select_chip(struct nand_chip *nand, int chip)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret, value;
+
+ if (chip < 0 || WARN_ON_ONCE(chip >= meson_chip->nsels))
+ return;
+
+ nfc->param.chip_select = meson_chip->sels[chip] ? NAND_CE1 : NAND_CE0;
+ nfc->param.rb_select = nfc->param.chip_select;
+ nfc->timing.twb = meson_chip->twb;
+ nfc->timing.tadl = meson_chip->tadl;
+ nfc->timing.tbers_max = meson_chip->tbers_max;
+
+ if (nfc->clk_rate != meson_chip->clk_rate) {
+ ret = clk_set_rate(nfc->device_clk, meson_chip->clk_rate);
+ if (ret) {
+ dev_err(nfc->dev, "failed to set clock rate\n");
+ return;
+ }
+ nfc->clk_rate = meson_chip->clk_rate;
+ }
+ if (nfc->bus_timing != meson_chip->bus_timing) {
+ value = (NFC_CLK_CYCLE - 1) | (meson_chip->bus_timing << 5);
+ writel(value, nfc->reg_base + NFC_REG_CFG);
+ writel((1 << 31), nfc->reg_base + NFC_REG_CMD);
+ nfc->bus_timing = meson_chip->bus_timing;
+ }
+}
+
+static void meson_nfc_cmd_idle(struct meson_nfc *nfc, u32 time)
+{
+ writel(nfc->param.chip_select | NFC_CMD_IDLE | (time & 0x3ff),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed)
+{
+ writel(NFC_CMD_SEED | (0xc2 + (seed & 0x7fff)),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
+ int scrambler)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 bch = meson_chip->bch_mode, cmd;
+ int len = mtd->writesize, pagesize, pages;
+
+ pagesize = nand->ecc.size;
+
+ if (raw) {
+ len = mtd->writesize + mtd->oobsize;
+ cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ return;
+ }
+
+ pages = len / nand->ecc.size;
+
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch,
+ NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_drain_cmd(struct meson_nfc *nfc)
+{
+ /*
+ * Insert two commands to make sure all valid commands are finished.
+ *
+ * The Nand flash controller is designed as two stages pipleline -
+ * a) fetch and b) excute.
+ * There might be cases when the driver see command queue is empty,
+ * but the Nand flash controller still has two commands buffered,
+ * one is fetched into NFC request queue (ready to run), and another
+ * is actively executing. So pushing 2 "IDLE" commands guarantees that
+ * the pipeline is emptied.
+ */
+ meson_nfc_cmd_idle(nfc, 0);
+ meson_nfc_cmd_idle(nfc, 0);
+}
+
+static int meson_nfc_wait_cmd_finish(struct meson_nfc *nfc,
+ unsigned int timeout_ms)
+{
+ u32 cmd_size = 0;
+ int ret;
+
+ /* wait cmd fifo is empty */
+ ret = readl_relaxed_poll_timeout(nfc->reg_base + NFC_REG_CMD, cmd_size,
+ !NFC_CMD_GET_SIZE(cmd_size),
+ 10, timeout_ms * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for empty CMD FIFO time out\n");
+
+ return ret;
+}
+
+static int meson_nfc_wait_dma_finish(struct meson_nfc *nfc)
+{
+ meson_nfc_drain_cmd(nfc);
+
+ return meson_nfc_wait_cmd_finish(nfc, DMA_BUSY_TIMEOUT);
+}
+
+static u8 *meson_nfc_oob_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len;
+
+ len = nand->ecc.size * (i + 1) + (nand->ecc.bytes + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static u8 *meson_nfc_data_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len, temp;
+
+ temp = nand->ecc.size + nand->ecc.bytes;
+ len = (temp + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static void meson_nfc_get_data_oob(struct nand_chip *nand,
+ u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(buf, dsrc, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(oobbuf, osrc, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static void meson_nfc_set_data_oob(struct nand_chip *nand,
+ const u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(dsrc, buf, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(osrc, oobbuf, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms)
+{
+ u32 cmd, cfg;
+ int ret = 0;
+
+ meson_nfc_cmd_idle(nfc, nfc->timing.twb);
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ cfg |= NFC_RB_IRQ_EN;
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ init_completion(&nfc->completion);
+
+ /* use the max erase time as the maximum clock for waiting R/B */
+ cmd = NFC_CMD_RB | NFC_CMD_RB_INT
+ | nfc->param.chip_select | nfc->timing.tbers_max;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ ret = wait_for_completion_timeout(&nfc->completion,
+ msecs_to_jiffies(timeout_ms));
+ if (ret == 0)
+ ret = -1;
+
+ return ret;
+}
+
+static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ *info |= oob_buf[count];
+ *info |= oob_buf[count + 1] << 8;
+ }
+}
+
+static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ oob_buf[count] = *info;
+ oob_buf[count + 1] = *info >> 8;
+ }
+}
+
+static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
+ u64 *correct_bitmap)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int ret = 0, i;
+
+ for (i = 0; i < nand->ecc.steps; i++) {
+ info = &meson_chip->info_buf[i];
+ if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
+ mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
+ *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
+ *correct_bitmap |= 1 >> i;
+ continue;
+ }
+ if ((nand->options & NAND_NEED_SCRAMBLING) &&
+ ECC_ZERO_CNT(*info) < nand->ecc.strength) {
+ mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info);
+ *bitflips = max_t(u32, *bitflips,
+ ECC_ZERO_CNT(*info));
+ ret = ECC_CHECK_RETURN_FF;
+ } else {
+ ret = -EBADMSG;
+ }
+ }
+ return ret;
+}
+
+static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, u8 *databuf,
+ int datalen, u8 *infobuf, int infolen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ u32 cmd;
+ int ret = 0;
+
+ nfc->daddr = dma_map_single(nfc->dev, (void *)databuf, datalen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->daddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ return ret;
+ }
+ cmd = GENCMDDADDRL(NFC_CMD_ADL, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDDADDRH(NFC_CMD_ADH, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ if (infobuf) {
+ nfc->iaddr = dma_map_single(nfc->dev, infobuf, infolen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->iaddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ dma_unmap_single(nfc->dev,
+ nfc->daddr, datalen, dir);
+ return ret;
+ }
+ cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDIADDRH(NFC_CMD_AIH, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+
+ return ret;
+}
+
+static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
+ int infolen, int datalen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
+ dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
+ if (infolen)
+ dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
+}
+
+static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+ u8 *info;
+
+ info = kzalloc(PER_INFO_BYTE, GFP_KERNEL);
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, info,
+ PER_INFO_BYTE, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE);
+ kfree(info);
+
+ return ret;
+}
+
+static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, NULL,
+ 0, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, 0, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
+ int page, bool in)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&nand->data_interface);
+ u32 *addrs = nfc->cmdfifo.rw.addrs;
+ u32 cs = nfc->param.chip_select;
+ u32 cmd0, cmd_num, row_start;
+ int ret = 0, i;
+
+ cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int);
+
+ cmd0 = in ? NAND_CMD_READ0 : NAND_CMD_SEQIN;
+ nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0;
+
+ addrs[0] = cs | NFC_CMD_ALE | 0;
+ if (mtd->writesize <= 512) {
+ cmd_num--;
+ row_start = 1;
+ } else {
+ addrs[1] = cs | NFC_CMD_ALE | 0;
+ row_start = 2;
+ }
+
+ addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0);
+ addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1);
+
+ if (nand->options & NAND_ROW_ADDR_3)
+ addrs[row_start + 2] =
+ cs | NFC_CMD_ALE | ROW_ADDER(page, 2);
+ else
+ cmd_num--;
+
+ /* subtract cmd1 */
+ cmd_num--;
+
+ for (i = 0; i < cmd_num; i++)
+ writel_relaxed(nfc->cmdfifo.cmd[i],
+ nfc->reg_base + NFC_REG_CMD);
+
+ if (in) {
+ nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
+ writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max));
+ } else {
+ meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
+ }
+
+ return ret;
+}
+
+static int meson_nfc_write_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&nand->data_interface);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int data_len, info_len;
+ u32 cmd;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, (u8 *)meson_chip->info_buf,
+ info_len, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max));
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_write_page_raw(struct nand_chip *nand, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+
+ meson_nfc_set_data_oob(nand, buf, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 1);
+}
+
+static int meson_nfc_write_page_hwecc(struct nand_chip *nand,
+ const u8 *buf, int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u8 *oob_buf = nand->oob_poi;
+
+ memcpy(meson_chip->data_buf, buf, mtd->writesize);
+ memset(meson_chip->info_buf, 0, nand->ecc.steps * PER_INFO_BYTE);
+ meson_nfc_set_user_byte(nand, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 0);
+}
+
+static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
+ struct nand_chip *nand, int raw)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ u32 neccpages;
+ int ret;
+
+ neccpages = raw ? 1 : nand->ecc.steps;
+ info = &meson_chip->info_buf[neccpages - 1];
+ do {
+ usleep_range(10, 15);
+ /* info is updated by nfc dma engine*/
+ smp_rmb();
+ ret = *info & ECC_COMPLETE;
+ } while (!ret);
+}
+
+static int meson_nfc_read_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int data_len, info_len;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, (u8 *)meson_chip->info_buf,
+ info_len, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ ret = meson_nfc_wait_dma_finish(nfc);
+ meson_nfc_check_ecc_pages_valid(nfc, nand, raw);
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_read_page_raw(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+ int ret;
+
+ ret = meson_nfc_read_page_sub(nand, page, 1);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_data_oob(nand, buf, oob_buf);
+
+ return 0;
+}
+
+static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u64 correct_bitmap = 0;
+ u32 bitflips = 0;
+ u8 *oob_buf = nand->oob_poi;
+ int ret, i;
+
+ ret = meson_nfc_read_page_sub(nand, page, 0);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_user_byte(nand, oob_buf);
+ ret = meson_nfc_ecc_correct(nand, &bitflips, &correct_bitmap);
+ if (ret == ECC_CHECK_RETURN_FF) {
+ if (buf)
+ memset(buf, 0xff, mtd->writesize);
+ memset(oob_buf, 0xff, mtd->oobsize);
+ } else if (ret < 0) {
+ if ((nand->options & NAND_NEED_SCRAMBLING) || !buf) {
+ mtd->ecc_stats.failed++;
+ return bitflips;
+ }
+ ret = meson_nfc_read_page_raw(nand, buf, 0, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nand->ecc.steps ; i++) {
+ u8 *data = buf + i * ecc->size;
+ u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
+
+ if (correct_bitmap & (1 << i))
+ continue;
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 2,
+ NULL, 0,
+ ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ bitflips = max_t(u32, bitflips, ret);
+ }
+ }
+ } else if (buf && buf != meson_chip->data_buf) {
+ memcpy(buf, meson_chip->data_buf, mtd->writesize);
+ }
+
+ return bitflips;
+}
+
+static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_raw(nand, NULL, 1, page);
+}
+
+static int meson_nfc_read_oob(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_hwecc(nand, NULL, 1, page);
+}
+
+static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
+{
+ if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
+ return true;
+ return false;
+}
+
+static void *
+meson_nand_op_get_dma_safe_input_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.in))
+ return instr->ctx.data.buf.in;
+
+ return kzalloc(instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_input_buf(const struct nand_op_instr *instr,
+ void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf == instr->ctx.data.buf.in)
+ return;
+
+ memcpy(instr->ctx.data.buf.in, buf, instr->ctx.data.len);
+ kfree(buf);
+}
+
+static void *
+meson_nand_op_get_dma_safe_output_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.out))
+ return (void *)instr->ctx.data.buf.out;
+
+ return kmemdup(instr->ctx.data.buf.out,
+ instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_output_buf(const struct nand_op_instr *instr,
+ const void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf != instr->ctx.data.buf.out)
+ kfree(buf);
+}
+
+static int meson_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ const struct nand_op_instr *instr = NULL;
+ void *buf;
+ u32 op_id, delay_idle, cmd;
+ int i;
+
+ meson_nfc_select_chip(nand, op->cs);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ delay_idle = DIV_ROUND_UP(PSEC_TO_NSEC(instr->delay_ns),
+ meson_chip->level1_divider *
+ NFC_CLK_CYCLE);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = nfc->param.chip_select | NFC_CMD_CLE;
+ cmd |= instr->ctx.cmd.opcode & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ cmd = nfc->param.chip_select | NFC_CMD_ALE;
+ cmd |= instr->ctx.addr.addrs[i] & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ buf = meson_nand_op_get_dma_safe_input_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_read_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_input_buf(instr, buf);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ buf = meson_nand_op_get_dma_safe_output_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_write_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_output_buf(instr, buf);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms);
+ if (instr->delay_ns)
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+ }
+ }
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ return 0;
+}
+
+static int meson_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = 2 + (section * (2 + nand->ecc.bytes));
+ oobregion->length = nand->ecc.bytes;
+
+ return 0;
+}
+
+static int meson_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = section * (2 + nand->ecc.bytes);
+ oobregion->length = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
+ .ecc = meson_ooblayout_ecc,
+ .free = meson_ooblayout_free,
+};
+
+static int meson_nfc_clk_init(struct meson_nfc *nfc)
+{
+ int ret;
+
+ /* request core clock */
+ nfc->core_clk = devm_clk_get(nfc->dev, "core");
+ if (IS_ERR(nfc->core_clk)) {
+ dev_err(nfc->dev, "failed to get core clock\n");
+ return PTR_ERR(nfc->core_clk);
+ }
+
+ nfc->device_clk = devm_clk_get(nfc->dev, "device");
+ if (IS_ERR(nfc->device_clk)) {
+ dev_err(nfc->dev, "failed to get device clock\n");
+ return PTR_ERR(nfc->device_clk);
+ }
+
+ nfc->phase_tx = devm_clk_get(nfc->dev, "tx");
+ if (IS_ERR(nfc->phase_tx)) {
+ dev_err(nfc->dev, "failed to get TX clk\n");
+ return PTR_ERR(nfc->phase_tx);
+ }
+
+ nfc->phase_rx = devm_clk_get(nfc->dev, "rx");
+ if (IS_ERR(nfc->phase_rx)) {
+ dev_err(nfc->dev, "failed to get RX clk\n");
+ return PTR_ERR(nfc->phase_rx);
+ }
+
+ /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
+ regmap_update_bits(nfc->reg_clk,
+ 0, CLK_SELECT_NAND, CLK_SELECT_NAND);
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->device_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable device clock\n");
+ goto err_device_clk;
+ }
+
+ ret = clk_prepare_enable(nfc->phase_tx);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable TX clock\n");
+ goto err_phase_tx;
+ }
+
+ ret = clk_prepare_enable(nfc->phase_rx);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable RX clock\n");
+ goto err_phase_rx;
+ }
+
+ ret = clk_set_rate(nfc->device_clk, 24000000);
+ if (ret)
+ goto err_phase_rx;
+
+ return 0;
+err_phase_rx:
+ clk_disable_unprepare(nfc->phase_tx);
+err_phase_tx:
+ clk_disable_unprepare(nfc->device_clk);
+err_device_clk:
+ clk_disable_unprepare(nfc->core_clk);
+ return ret;
+}
+
+static void meson_nfc_disable_clk(struct meson_nfc *nfc)
+{
+ clk_disable_unprepare(nfc->phase_rx);
+ clk_disable_unprepare(nfc->phase_tx);
+ clk_disable_unprepare(nfc->device_clk);
+ clk_disable_unprepare(nfc->core_clk);
+}
+
+static void meson_nfc_free_buffer(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+
+ kfree(meson_chip->info_buf);
+ kfree(meson_chip->data_buf);
+}
+
+static int meson_chip_buffer_init(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 page_bytes, info_bytes, nsectors;
+
+ nsectors = mtd->writesize / nand->ecc.size;
+
+ page_bytes = mtd->writesize + mtd->oobsize;
+ info_bytes = nsectors * PER_INFO_BYTE;
+
+ meson_chip->data_buf = kmalloc(page_bytes, GFP_KERNEL);
+ if (!meson_chip->data_buf)
+ return -ENOMEM;
+
+ meson_chip->info_buf = kmalloc(info_bytes, GFP_KERNEL);
+ if (!meson_chip->info_buf) {
+ kfree(meson_chip->data_buf);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static
+int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ const struct nand_sdr_timings *timings;
+ u32 div, bt_min, bt_max, tbers_clocks;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ div = DIV_ROUND_UP((timings->tRC_min / 1000), NFC_CLK_CYCLE);
+ bt_min = (timings->tREA_max + NFC_DEFAULT_DELAY) / div;
+ bt_max = (NFC_DEFAULT_DELAY + timings->tRHOH_min +
+ timings->tRC_min / 2) / div;
+
+ meson_chip->twb = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tWB_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tadl = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tADL_min),
+ div * NFC_CLK_CYCLE);
+ tbers_clocks = DIV_ROUND_UP_ULL(PSEC_TO_NSEC(timings->tBERS_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tbers_max = ilog2(tbers_clocks);
+ if (!is_power_of_2(tbers_clocks))
+ meson_chip->tbers_max++;
+
+ bt_min = DIV_ROUND_UP(bt_min, 1000);
+ bt_max = DIV_ROUND_UP(bt_max, 1000);
+
+ if (bt_max < bt_min)
+ return -EINVAL;
+
+ meson_chip->level1_divider = div;
+ meson_chip->clk_rate = 1000000000 / meson_chip->level1_divider;
+ meson_chip->bus_timing = (bt_min + bt_max) / 2 + 1;
+
+ return 0;
+}
+
+static int meson_nand_bch_mode(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int i;
+
+ if (nand->ecc.strength > 60 || nand->ecc.strength < 8)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
+ if (meson_ecc[i].strength == nand->ecc.strength) {
+ meson_chip->bch_mode = meson_ecc[i].bch;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void meson_nand_detach_chip(struct nand_chip *nand)
+{
+ meson_nfc_free_buffer(nand);
+}
+
+static int meson_nand_attach_chip(struct nand_chip *nand)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ int nsectors = mtd->writesize / 1024;
+ int ret;
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand%d",
+ dev_name(nfc->dev),
+ meson_chip->sels[0]);
+ if (!mtd->name)
+ return -ENOMEM;
+ }
+
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
+ mtd->oobsize - 2 * nsectors);
+ if (ret) {
+ dev_err(nfc->dev, "failed to ECC init\n");
+ return -EINVAL;
+ }
+
+ ret = meson_nand_bch_mode(nand);
+ if (ret)
+ return -EINVAL;
+
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.write_page_raw = meson_nfc_write_page_raw;
+ nand->ecc.write_page = meson_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = nand_write_oob_std;
+ nand->ecc.write_oob = nand_write_oob_std;
+
+ nand->ecc.read_page_raw = meson_nfc_read_page_raw;
+ nand->ecc.read_page = meson_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = meson_nfc_read_oob_raw;
+ nand->ecc.read_oob = meson_nfc_read_oob;
+
+ if (nand->options & NAND_BUSWIDTH_16) {
+ dev_err(nfc->dev, "16bits bus width not supported");
+ return -EINVAL;
+ }
+ ret = meson_chip_buffer_init(nand);
+ if (ret)
+ return -ENOMEM;
+
+ return ret;
+}
+
+static const struct nand_controller_ops meson_nand_controller_ops = {
+ .attach_chip = meson_nand_attach_chip,
+ .detach_chip = meson_nand_detach_chip,
+ .setup_data_interface = meson_nfc_setup_data_interface,
+ .exec_op = meson_nfc_exec_op,
+};
+
+static int
+meson_nfc_nand_chip_init(struct device *dev,
+ struct meson_nfc *nfc, struct device_node *np)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int ret, i;
+ u32 tmp, nsels;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > MAX_CE_NUM) {
+ dev_err(dev, "invalid register property size\n");
+ return -EINVAL;
+ }
+
+ meson_chip = devm_kzalloc(dev,
+ sizeof(*meson_chip) + (nsels * sizeof(u8)),
+ GFP_KERNEL);
+ if (!meson_chip)
+ return -ENOMEM;
+
+ meson_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve register property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+ }
+
+ nand = &meson_chip->nand;
+ nand->controller = &nfc->controller;
+ nand->controller->ops = &meson_nand_controller_ops;
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USE_BOUNCE_BUFFER;
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register MTD device: %d\n", ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&meson_chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct mtd_info *mtd;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ meson_chip = list_first_entry(&nfc->chips,
+ struct meson_nfc_nand_chip, node);
+ mtd = nand_to_mtd(&meson_chip->nand);
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ meson_nfc_free_buffer(&meson_chip->nand);
+ nand_cleanup(&meson_chip->nand);
+ list_del(&meson_chip->node);
+ }
+
+ return 0;
+}
+
+static int meson_nfc_nand_chips_init(struct device *dev,
+ struct meson_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ meson_nfc_nand_chip_cleanup(nfc);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t meson_nfc_irq(int irq, void *id)
+{
+ struct meson_nfc *nfc = id;
+ u32 cfg;
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ if (!(cfg & NFC_RB_IRQ_EN))
+ return IRQ_NONE;
+
+ cfg &= ~(NFC_RB_IRQ_EN);
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ complete(&nfc->completion);
+ return IRQ_HANDLED;
+}
+
+static const struct meson_nfc_data meson_gxl_data = {
+ .ecc_caps = &meson_gxl_ecc_caps,
+};
+
+static const struct meson_nfc_data meson_axg_data = {
+ .ecc_caps = &meson_axg_ecc_caps,
+};
+
+static const struct of_device_id meson_nfc_id_table[] = {
+ {
+ .compatible = "amlogic,meson-gxl-nfc",
+ .data = &meson_gxl_data,
+ }, {
+ .compatible = "amlogic,meson-axg-nfc",
+ .data = &meson_axg_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_nfc_id_table);
+
+static int meson_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_nfc *nfc;
+ struct resource *res;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->data = of_device_get_match_data(&pdev->dev);
+ if (!nfc->data)
+ return -ENODEV;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->reg_base))
+ return PTR_ERR(nfc->reg_base);
+
+ nfc->reg_clk =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,mmc-syscon");
+ if (IS_ERR(nfc->reg_clk)) {
+ dev_err(dev, "Failed to lookup clock base\n");
+ return PTR_ERR(nfc->reg_clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no NFC IRQ resource\n");
+ return -EINVAL;
+ }
+
+ ret = meson_nfc_clk_init(nfc);
+ if (ret) {
+ dev_err(dev, "failed to initialize NAND clock\n");
+ return ret;
+ }
+
+ writel(0, nfc->reg_base + NFC_REG_CFG);
+ ret = devm_request_irq(dev, irq, meson_nfc_irq, 0, dev_name(dev), nfc);
+ if (ret) {
+ dev_err(dev, "failed to request NFC IRQ\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = meson_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init NAND chips\n");
+ goto err_clk;
+ }
+
+ return 0;
+err_clk:
+ meson_nfc_disable_clk(nfc);
+ return ret;
+}
+
+static int meson_nfc_remove(struct platform_device *pdev)
+{
+ struct meson_nfc *nfc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = meson_nfc_nand_chip_cleanup(nfc);
+ if (ret)
+ return ret;
+
+ meson_nfc_disable_clk(nfc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver meson_nfc_driver = {
+ .probe = meson_nfc_probe,
+ .remove = meson_nfc_remove,
+ .driver = {
+ .name = "meson-nand",
+ .of_match_table = meson_nfc_id_table,
+ },
+};
+module_platform_driver(meson_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Liang Yang <liang.yang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic's Meson NAND Flash Controller driver");
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 6432bd70c3b3..05b0c19d72d9 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -267,11 +267,15 @@ static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
struct mtk_ecc *ecc;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- get_device(&pdev->dev);
ecc = platform_get_drvdata(pdev);
+ if (!ecc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
clk_prepare_enable(ecc->clk);
mtk_ecc_hw_init(ecc);
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b6b4602f5132..2c0e09187773 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1451,8 +1451,7 @@ static int mtk_nfc_probe(struct platform_device *pdev)
if (!nfc)
return -ENOMEM;
- spin_lock_init(&nfc->controller.lock);
- init_waitqueue_head(&nfc->controller.wq);
+ nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
nfc->controller.ops = &mtk_nfc_controller_ops;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index cca4b24d2ffa..ddd396e93e32 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -278,11 +278,8 @@ EXPORT_SYMBOL_GPL(nand_deselect_target);
static void nand_release_device(struct nand_chip *chip)
{
/* Release the controller and the chip */
- spin_lock(&chip->controller->lock);
- chip->controller->active = NULL;
- chip->state = FL_READY;
- wake_up(&chip->controller->wq);
- spin_unlock(&chip->controller->lock);
+ mutex_unlock(&chip->controller->lock);
+ mutex_unlock(&chip->lock);
}
/**
@@ -331,57 +328,23 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
}
/**
- * panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @new_state: the state which is requested
- *
- * Used when in panic, no locks are taken.
- */
-static void panic_nand_get_device(struct nand_chip *chip, int new_state)
-{
- /* Hardware controller shared among independent devices */
- chip->controller->active = chip;
- chip->state = new_state;
-}
-
-/**
* nand_get_device - [GENERIC] Get chip for selected access
* @chip: NAND chip structure
- * @new_state: the state which is requested
*
- * Get the device and lock it for exclusive access
+ * Lock the device and its controller for exclusive access
+ *
+ * Return: -EBUSY if the chip has been suspended, 0 otherwise
*/
-static int
-nand_get_device(struct nand_chip *chip, int new_state)
+static int nand_get_device(struct nand_chip *chip)
{
- spinlock_t *lock = &chip->controller->lock;
- wait_queue_head_t *wq = &chip->controller->wq;
- DECLARE_WAITQUEUE(wait, current);
-retry:
- spin_lock(lock);
-
- /* Hardware controller shared among independent devices */
- if (!chip->controller->active)
- chip->controller->active = chip;
-
- if (chip->controller->active == chip && chip->state == FL_READY) {
- chip->state = new_state;
- spin_unlock(lock);
- return 0;
- }
- if (new_state == FL_PM_SUSPENDED) {
- if (chip->controller->active->state == FL_PM_SUSPENDED) {
- chip->state = FL_PM_SUSPENDED;
- spin_unlock(lock);
- return 0;
- }
+ mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ mutex_unlock(&chip->lock);
+ return -EBUSY;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(wq, &wait);
- spin_unlock(lock);
- schedule();
- remove_wait_queue(wq, &wait);
- goto retry;
+ mutex_lock(&chip->controller->lock);
+
+ return 0;
}
/**
@@ -410,6 +373,7 @@ static int nand_check_wp(struct nand_chip *chip)
/**
* nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @chip: NAND chip object
* @oob: oob data buffer
* @len: oob data write length
* @ops: oob ops structure
@@ -457,7 +421,7 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int chipnr, page, status, len;
+ int chipnr, page, status, len, ret;
pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
@@ -479,7 +443,9 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- nand_reset(chip, chipnr);
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
nand_select_target(chip, chipnr);
@@ -602,7 +568,10 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
- nand_get_device(chip, FL_WRITING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
ret = nand_markbad_bbm(chip, ofs);
nand_release_device(chip);
}
@@ -3580,7 +3549,9 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->mode != MTD_OPS_RAW)
return -ENOTSUPP;
- nand_get_device(chip, FL_READING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
if (!ops->datbuf)
ret = nand_do_read_oob(chip, from, ops);
@@ -4099,9 +4070,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
struct mtd_oob_ops ops;
int ret;
- /* Grab the device */
- panic_nand_get_device(chip, FL_WRITING);
-
nand_select_target(chip, chipnr);
/* Wait for the device to get ready */
@@ -4132,7 +4100,9 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
- nand_get_device(chip, FL_WRITING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -4155,23 +4125,6 @@ out:
}
/**
- * single_erase - [GENERIC] NAND standard block erase command function
- * @chip: NAND chip object
- * @page: the page address of the block which will be erased
- *
- * Standard erase command for NAND chips. Returns NAND status.
- */
-static int single_erase(struct nand_chip *chip, int page)
-{
- unsigned int eraseblock;
-
- /* Send commands to erase a block */
- eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
-
- return nand_erase_op(chip, eraseblock);
-}
-
-/**
* nand_erase - [MTD Interface] erase block(s)
* @mtd: MTD device structure
* @instr: erase instruction
@@ -4194,7 +4147,7 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt)
{
- int page, status, pages_per_block, ret, chipnr;
+ int page, pages_per_block, ret, chipnr;
loff_t len;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
@@ -4205,7 +4158,9 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
return -EINVAL;
/* Grab the lock and see if the device is available */
- nand_get_device(chip, FL_ERASING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -4246,17 +4201,11 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
(page + pages_per_block))
chip->pagebuf = -1;
- if (chip->legacy.erase)
- status = chip->legacy.erase(chip,
- page & chip->pagemask);
- else
- status = single_erase(chip, page & chip->pagemask);
-
- /* See if block erase succeeded */
- if (status) {
+ ret = nand_erase_op(chip, (page & chip->pagemask) >>
+ (chip->phys_erase_shift - chip->page_shift));
+ if (ret) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
- ret = -EIO;
instr->fail_addr =
((loff_t)page << chip->page_shift);
goto erase_exit;
@@ -4298,7 +4247,7 @@ static void nand_sync(struct mtd_info *mtd)
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(chip, FL_SYNCING);
+ WARN_ON(nand_get_device(chip));
/* Release it and go back */
nand_release_device(chip);
}
@@ -4315,7 +4264,10 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
int ret;
/* Select the NAND device */
- nand_get_device(chip, FL_READING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
nand_select_target(chip, chipnr);
ret = nand_block_checkbad(chip, offs, 0);
@@ -4388,7 +4340,13 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
*/
static int nand_suspend(struct mtd_info *mtd)
{
- return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ mutex_lock(&chip->lock);
+ chip->suspended = 1;
+ mutex_unlock(&chip->lock);
+
+ return 0;
}
/**
@@ -4399,11 +4357,13 @@ static void nand_resume(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if (chip->state == FL_PM_SUSPENDED)
- nand_release_device(chip);
+ mutex_lock(&chip->lock);
+ if (chip->suspended)
+ chip->suspended = 0;
else
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
+ mutex_unlock(&chip->lock);
}
/**
@@ -4413,7 +4373,7 @@ static void nand_resume(struct mtd_info *mtd)
*/
static void nand_shutdown(struct mtd_info *mtd)
{
- nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
+ nand_suspend(mtd);
}
/* Set default functions */
@@ -5018,6 +4978,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
/* Assume all dies are deselected when we enter nand_scan_ident(). */
chip->cur_cs = -1;
+ mutex_init(&chip->lock);
+
/* Enforce the right timings for reset/detection */
onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
@@ -5060,11 +5022,15 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
u8 id[2];
/* See comment in nand_get_flash_type for reset */
- nand_reset(chip, i);
+ ret = nand_reset(chip, i);
+ if (ret)
+ break;
nand_select_target(chip, i);
/* Send the command for reading device ID */
- nand_readid_op(chip, 0, id, sizeof(id));
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ break;
/* Read manufacturer and device IDs */
if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
nand_deselect_target(chip);
@@ -5555,6 +5521,7 @@ static int nand_scan_tail(struct nand_chip *chip)
}
if (!ecc->read_page)
ecc->read_page = nand_read_page_hwecc_oob_first;
+ /* fall through */
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
@@ -5574,6 +5541,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->read_subpage = nand_read_subpage;
if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
ecc->write_subpage = nand_write_subpage_hwecc;
+ /* fall through */
case NAND_ECC_HW_SYNDROME:
if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
@@ -5611,6 +5579,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->size, mtd->writesize);
ecc->mode = NAND_ECC_SOFT;
ecc->algo = NAND_ECC_HAMMING;
+ /* fall through */
case NAND_ECC_SOFT:
ret = nand_set_ecc_soft_ops(chip);
@@ -5717,9 +5686,6 @@ static int nand_scan_tail(struct nand_chip *chip)
}
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
- /* Initialize state */
- chip->state = FL_READY;
-
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 1b722fe9213c..19a2b563acdf 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
- * @chip: NAND chip object
+ * @this: NAND chip object
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index 43575943f13b..f2526ec616a6 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -331,6 +331,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
*/
if (column == -1 && page_addr == -1)
return;
+ /* fall through */
default:
/*
@@ -483,7 +484,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command,
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
- /* This applies to read commands */
+ /* fall through - This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 68e8b9f7f372..8f280a2962c8 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -994,12 +994,9 @@ static int omap_wait(struct nand_chip *this)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
unsigned long timeo = jiffies;
- int status, state = this->state;
+ int status;
- if (state == FL_ERASING)
- timeo += msecs_to_jiffies(400);
- else
- timeo += msecs_to_jiffies(20);
+ timeo += msecs_to_jiffies(400);
writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
@@ -2173,11 +2170,8 @@ static const struct nand_controller_ops omap_nand_controller_ops = {
};
/* Shared among all NAND instances to synchronize access to the ECC Engine */
-static struct nand_controller omap_gpmc_controller = {
- .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
- .ops = &omap_nand_controller_ops,
-};
+static struct nand_controller omap_gpmc_controller;
+static bool omap_gpmc_controller_initialized;
static int omap_nand_probe(struct platform_device *pdev)
{
@@ -2227,6 +2221,12 @@ static int omap_nand_probe(struct platform_device *pdev)
info->phys_base = res->start;
+ if (!omap_gpmc_controller_initialized) {
+ omap_gpmc_controller.ops = &omap_nand_controller_ops;
+ nand_controller_init(&omap_gpmc_controller);
+ omap_gpmc_controller_initialized = true;
+ }
+
nand_chip->controller = &omap_gpmc_controller;
nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 46c62a31fa46..920e7375084f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2833,6 +2833,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
return ret;
+ if (nandc->props->is_bam) {
+ free_bam_transaction(nandc);
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ return -ENOMEM;
+ }
+ }
+
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
nand_cleanup(chip);
@@ -2847,16 +2857,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
struct qcom_nand_host *host;
int ret;
- if (nandc->props->is_bam) {
- free_bam_transaction(nandc);
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- return -ENOMEM;
- }
- }
-
for_each_available_child_of_node(dn, child) {
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index c01422d953dd..86456216fb93 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -369,8 +369,7 @@ static int r852_wait(struct nand_chip *chip)
unsigned long timeout;
u8 status;
- timeout = jiffies + (chip->state == FL_ERASING ?
- msecs_to_jiffies(400) : msecs_to_jiffies(20));
+ timeout = jiffies + msecs_to_jiffies(400);
while (time_before(jiffies, timeout))
if (chip->legacy.dev_ready(chip))
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
new file mode 100644
index 000000000000..999ca6a66036
--- /dev/null
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -0,0 +1,2073 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018
+ * Author: Christophe Kerello <christophe.kerello@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* Bad block marker length */
+#define FMC2_BBM_LEN 2
+
+/* ECC step size */
+#define FMC2_ECC_STEP_SIZE 512
+
+/* BCHDSRx registers length */
+#define FMC2_BCHDSRS_LEN 20
+
+/* HECCR length */
+#define FMC2_HECCR_LEN 4
+
+/* Max requests done for a 8k nand page size */
+#define FMC2_MAX_SG 16
+
+/* Max chip enable */
+#define FMC2_MAX_CE 2
+
+/* Max ECC buffer length */
+#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+
+/* Timings */
+#define FMC2_THIZ 1
+#define FMC2_TIO 8000
+#define FMC2_TSYNC 3000
+#define FMC2_PCR_TIMING_MASK 0xf
+#define FMC2_PMEM_PATT_TIMING_MASK 0xff
+
+/* FMC2 Controller Registers */
+#define FMC2_BCR1 0x0
+#define FMC2_PCR 0x80
+#define FMC2_SR 0x84
+#define FMC2_PMEM 0x88
+#define FMC2_PATT 0x8c
+#define FMC2_HECCR 0x94
+#define FMC2_CSQCR 0x200
+#define FMC2_CSQCFGR1 0x204
+#define FMC2_CSQCFGR2 0x208
+#define FMC2_CSQCFGR3 0x20c
+#define FMC2_CSQAR1 0x210
+#define FMC2_CSQAR2 0x214
+#define FMC2_CSQIER 0x220
+#define FMC2_CSQISR 0x224
+#define FMC2_CSQICR 0x228
+#define FMC2_CSQEMSR 0x230
+#define FMC2_BCHIER 0x250
+#define FMC2_BCHISR 0x254
+#define FMC2_BCHICR 0x258
+#define FMC2_BCHPBR1 0x260
+#define FMC2_BCHPBR2 0x264
+#define FMC2_BCHPBR3 0x268
+#define FMC2_BCHPBR4 0x26c
+#define FMC2_BCHDSR0 0x27c
+#define FMC2_BCHDSR1 0x280
+#define FMC2_BCHDSR2 0x284
+#define FMC2_BCHDSR3 0x288
+#define FMC2_BCHDSR4 0x28c
+
+/* Register: FMC2_BCR1 */
+#define FMC2_BCR1_FMC2EN BIT(31)
+
+/* Register: FMC2_PCR */
+#define FMC2_PCR_PWAITEN BIT(1)
+#define FMC2_PCR_PBKEN BIT(2)
+#define FMC2_PCR_PWID_MASK GENMASK(5, 4)
+#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4)
+#define FMC2_PCR_PWID_BUSWIDTH_8 0
+#define FMC2_PCR_PWID_BUSWIDTH_16 1
+#define FMC2_PCR_ECCEN BIT(6)
+#define FMC2_PCR_ECCALG BIT(8)
+#define FMC2_PCR_TCLR_MASK GENMASK(12, 9)
+#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9)
+#define FMC2_PCR_TCLR_DEFAULT 0xf
+#define FMC2_PCR_TAR_MASK GENMASK(16, 13)
+#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13)
+#define FMC2_PCR_TAR_DEFAULT 0xf
+#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17)
+#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17)
+#define FMC2_PCR_ECCSS_512 1
+#define FMC2_PCR_ECCSS_2048 3
+#define FMC2_PCR_BCHECC BIT(24)
+#define FMC2_PCR_WEN BIT(25)
+
+/* Register: FMC2_SR */
+#define FMC2_SR_NWRF BIT(6)
+
+/* Register: FMC2_PMEM */
+#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0)
+#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8)
+#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16)
+#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24)
+#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_PATT */
+#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0)
+#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8)
+#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16)
+#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
+#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_CSQCR */
+#define FMC2_CSQCR_CSQSTART BIT(0)
+
+/* Register: FMC2_CSQCFGR1 */
+#define FMC2_CSQCFGR1_CMD2EN BIT(1)
+#define FMC2_CSQCFGR1_DMADEN BIT(2)
+#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4)
+#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8)
+#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR1_CMD1T BIT(24)
+#define FMC2_CSQCFGR1_CMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR2 */
+#define FMC2_CSQCFGR2_SQSDTEN BIT(0)
+#define FMC2_CSQCFGR2_RCMD2EN BIT(1)
+#define FMC2_CSQCFGR2_DMASEN BIT(2)
+#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8)
+#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR2_RCMD1T BIT(24)
+#define FMC2_CSQCFGR2_RCMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR3 */
+#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8)
+#define FMC2_CSQCFGR3_AC1T BIT(16)
+#define FMC2_CSQCFGR3_AC2T BIT(17)
+#define FMC2_CSQCFGR3_AC3T BIT(18)
+#define FMC2_CSQCFGR3_AC4T BIT(19)
+#define FMC2_CSQCFGR3_AC5T BIT(20)
+#define FMC2_CSQCFGR3_SDT BIT(21)
+#define FMC2_CSQCFGR3_RAC1T BIT(22)
+#define FMC2_CSQCFGR3_RAC2T BIT(23)
+
+/* Register: FMC2_CSQCAR1 */
+#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0)
+#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8)
+#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24)
+
+/* Register: FMC2_CSQCAR2 */
+#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0)
+#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10)
+#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16)
+
+/* Register: FMC2_CSQIER */
+#define FMC2_CSQIER_TCIE BIT(0)
+
+/* Register: FMC2_CSQICR */
+#define FMC2_CSQICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_CSQEMSR */
+#define FMC2_CSQEMSR_SEM GENMASK(15, 0)
+
+/* Register: FMC2_BCHIER */
+#define FMC2_BCHIER_DERIE BIT(1)
+#define FMC2_BCHIER_EPBRIE BIT(4)
+
+/* Register: FMC2_BCHICR */
+#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_BCHDSR0 */
+#define FMC2_BCHDSR0_DUE BIT(0)
+#define FMC2_BCHDSR0_DEF BIT(1)
+#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4)
+#define FMC2_BCHDSR0_DEN_SHIFT 4
+
+/* Register: FMC2_BCHDSR1 */
+#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR1_EBP2_SHIFT 16
+
+/* Register: FMC2_BCHDSR2 */
+#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR2_EBP4_SHIFT 16
+
+/* Register: FMC2_BCHDSR3 */
+#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR3_EBP6_SHIFT 16
+
+/* Register: FMC2_BCHDSR4 */
+#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR4_EBP8_SHIFT 16
+
+enum stm32_fmc2_ecc {
+ FMC2_ECC_HAM = 1,
+ FMC2_ECC_BCH4 = 4,
+ FMC2_ECC_BCH8 = 8
+};
+
+enum stm32_fmc2_irq_state {
+ FMC2_IRQ_UNKNOWN = 0,
+ FMC2_IRQ_BCH,
+ FMC2_IRQ_SEQ
+};
+
+struct stm32_fmc2_timings {
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 twait;
+ u8 thold_mem;
+ u8 tset_mem;
+ u8 thold_att;
+ u8 tset_att;
+};
+
+struct stm32_fmc2_nand {
+ struct nand_chip chip;
+ struct stm32_fmc2_timings timings;
+ int ncs;
+ int cs_used[FMC2_MAX_CE];
+};
+
+static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct stm32_fmc2_nand, chip);
+}
+
+struct stm32_fmc2_nfc {
+ struct nand_controller base;
+ struct stm32_fmc2_nand nand;
+ struct device *dev;
+ void __iomem *io_base;
+ void __iomem *data_base[FMC2_MAX_CE];
+ void __iomem *cmd_base[FMC2_MAX_CE];
+ void __iomem *addr_base[FMC2_MAX_CE];
+ phys_addr_t io_phys_addr;
+ phys_addr_t data_phys_addr[FMC2_MAX_CE];
+ struct clk *clk;
+ u8 irq_state;
+
+ struct dma_chan *dma_tx_ch;
+ struct dma_chan *dma_rx_ch;
+ struct dma_chan *dma_ecc_ch;
+ struct sg_table dma_data_sg;
+ struct sg_table dma_ecc_sg;
+ u8 *ecc_buf;
+ int dma_ecc_len;
+
+ struct completion complete;
+ struct completion dma_data_complete;
+ struct completion dma_ecc_complete;
+
+ u8 cs_assigned;
+ int cs_sel;
+};
+
+static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
+{
+ return container_of(base, struct stm32_fmc2_nfc, base);
+}
+
+/* Timings configuration */
+static void stm32_fmc2_timings_init(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *timings = &nand->timings;
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 pmem, patt;
+
+ /* Set tclr/tar timings */
+ pcr &= ~FMC2_PCR_TCLR_MASK;
+ pcr |= FMC2_PCR_TCLR(timings->tclr);
+ pcr &= ~FMC2_PCR_TAR_MASK;
+ pcr |= FMC2_PCR_TAR(timings->tar);
+
+ /* Set tset/twait/thold/thiz timings in common bank */
+ pmem = FMC2_PMEM_MEMSET(timings->tset_mem);
+ pmem |= FMC2_PMEM_MEMWAIT(timings->twait);
+ pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem);
+ pmem |= FMC2_PMEM_MEMHIZ(timings->thiz);
+
+ /* Set tset/twait/thold/thiz timings in attribut bank */
+ patt = FMC2_PATT_ATTSET(timings->tset_att);
+ patt |= FMC2_PATT_ATTWAIT(timings->twait);
+ patt |= FMC2_PATT_ATTHOLD(timings->thold_att);
+ patt |= FMC2_PATT_ATTHIZ(timings->thiz);
+
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM);
+ writel_relaxed(patt, fmc2->io_base + FMC2_PATT);
+}
+
+/* Controller configuration */
+static void stm32_fmc2_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ /* Configure ECC algorithm (default configuration is Hamming) */
+ pcr &= ~FMC2_PCR_ECCALG;
+ pcr &= ~FMC2_PCR_BCHECC;
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ pcr |= FMC2_PCR_ECCALG;
+ pcr |= FMC2_PCR_BCHECC;
+ } else if (chip->ecc.strength == FMC2_ECC_BCH4) {
+ pcr |= FMC2_PCR_ECCALG;
+ }
+
+ /* Set buswidth */
+ pcr &= ~FMC2_PCR_PWID_MASK;
+ if (chip->options & NAND_BUSWIDTH_16)
+ pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
+
+ /* Set ECC sector size */
+ pcr &= ~FMC2_PCR_ECCSS_MASK;
+ pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512);
+
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+}
+
+/* Select target */
+static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct dma_slave_config dma_cfg;
+ int ret;
+
+ if (nand->cs_used[chipnr] == fmc2->cs_sel)
+ return 0;
+
+ fmc2->cs_sel = nand->cs_used[chipnr];
+
+ /* FMC2 setup routine */
+ stm32_fmc2_setup(chip);
+
+ /* Apply timings */
+ stm32_fmc2_timings_init(chip);
+
+ if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) {
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel];
+ dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel];
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.src_maxburst = 32;
+ dma_cfg.dst_maxburst = 32;
+
+ ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(fmc2->dev, "tx DMA engine slave config failed\n");
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(fmc2->dev, "rx DMA engine slave config failed\n");
+ return ret;
+ }
+ }
+
+ if (fmc2->dma_ecc_ch) {
+ /*
+ * Hamming: we read HECCR register
+ * BCH4/BCH8: we read BCHDSRSx registers
+ */
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = fmc2->io_phys_addr;
+ dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR : FMC2_BCHDSR0;
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg);
+ if (ret) {
+ dev_err(fmc2->dev, "ECC DMA engine slave config failed\n");
+ return ret;
+ }
+
+ /* Calculate ECC length needed for one sector */
+ fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
+ }
+
+ return 0;
+}
+
+/* Set bus width to 16-bit or 8-bit */
+static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set)
+{
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ pcr &= ~FMC2_PCR_PWID_MASK;
+ if (set)
+ pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+}
+
+/* Enable/disable ECC */
+static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable)
+{
+ u32 pcr = readl(fmc2->io_base + FMC2_PCR);
+
+ pcr &= ~FMC2_PCR_ECCEN;
+ if (enable)
+ pcr |= FMC2_PCR_ECCEN;
+ writel(pcr, fmc2->io_base + FMC2_PCR);
+}
+
+/* Enable irq sources in case of the sequencer is used */
+static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+
+ csqier |= FMC2_CSQIER_TCIE;
+
+ fmc2->irq_state = FMC2_IRQ_SEQ;
+
+ writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+}
+
+/* Disable irq sources in case of the sequencer is used */
+static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+
+ csqier &= ~FMC2_CSQIER_TCIE;
+
+ writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+
+ fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+/* Clear irq sources in case of the sequencer is used */
+static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR);
+}
+
+/* Enable irq sources in case of bch is used */
+static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2,
+ int mode)
+{
+ u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+
+ if (mode == NAND_ECC_WRITE)
+ bchier |= FMC2_BCHIER_EPBRIE;
+ else
+ bchier |= FMC2_BCHIER_DERIE;
+
+ fmc2->irq_state = FMC2_IRQ_BCH;
+
+ writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+}
+
+/* Disable irq sources in case of bch is used */
+static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+
+ bchier &= ~FMC2_BCHIER_DERIE;
+ bchier &= ~FMC2_BCHIER_EPBRIE;
+
+ writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+
+ fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+/* Clear irq sources in case of bch is used */
+static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR);
+}
+
+/*
+ * Enable ECC logic and reset syndrome/parity bits previously calculated
+ * Syndrome/parity bits is cleared by setting the ECCEN bit to 0
+ */
+static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ if (chip->ecc.strength != FMC2_ECC_HAM) {
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ if (mode == NAND_ECC_WRITE)
+ pcr |= FMC2_PCR_WEN;
+ else
+ pcr &= ~FMC2_PCR_WEN;
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+
+ reinit_completion(&fmc2->complete);
+ stm32_fmc2_clear_bch_irq(fmc2);
+ stm32_fmc2_enable_bch_irq(fmc2, mode);
+ }
+
+ stm32_fmc2_set_ecc(fmc2, true);
+}
+
+/*
+ * ECC Hamming calculation
+ * ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+{
+ ecc[0] = ecc_sta;
+ ecc[1] = ecc_sta >> 8;
+ ecc[2] = ecc_sta >> 16;
+}
+
+static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 sr, heccr;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
+ sr, sr & FMC2_SR_NWRF, 10, 1000);
+ if (ret) {
+ dev_err(fmc2->dev, "ham timeout\n");
+ return ret;
+ }
+
+ heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR);
+
+ stm32_fmc2_ham_set_ecc(heccr, ecc);
+
+ /* Disable ECC */
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ u8 bit_position = 0, b0, b1, b2;
+ u32 byte_addr = 0, b;
+ u32 i, shifting = 1;
+
+ /* Indicate which bit and byte is faulty (if any) */
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ b2 = read_ecc[2] ^ calc_ecc[2];
+ b = b0 | (b1 << 8) | (b2 << 16);
+
+ /* No errors */
+ if (likely(!b))
+ return 0;
+
+ /* Calculate bit position */
+ for (i = 0; i < 3; i++) {
+ switch (b % 4) {
+ case 2:
+ bit_position += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Calculate byte position */
+ shifting = 1;
+ for (i = 0; i < 9; i++) {
+ switch (b % 4) {
+ case 2:
+ byte_addr += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Flip the bit */
+ dat[byte_addr] ^= (1 << bit_position);
+
+ return 1;
+}
+
+/*
+ * ECC BCH calculation and correction
+ * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
+ * max of 4-bit/8-bit)
+ */
+static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 bchpbr;
+
+ /* Wait until the BCH code is ready */
+ if (!wait_for_completion_timeout(&fmc2->complete,
+ msecs_to_jiffies(1000))) {
+ dev_err(fmc2->dev, "bch timeout\n");
+ stm32_fmc2_disable_bch_irq(fmc2);
+ return -ETIMEDOUT;
+ }
+
+ /* Read parity bits */
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1);
+ ecc[0] = bchpbr;
+ ecc[1] = bchpbr >> 8;
+ ecc[2] = bchpbr >> 16;
+ ecc[3] = bchpbr >> 24;
+
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2);
+ ecc[4] = bchpbr;
+ ecc[5] = bchpbr >> 8;
+ ecc[6] = bchpbr >> 16;
+
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ ecc[7] = bchpbr >> 24;
+
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3);
+ ecc[8] = bchpbr;
+ ecc[9] = bchpbr >> 8;
+ ecc[10] = bchpbr >> 16;
+ ecc[11] = bchpbr >> 24;
+
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4);
+ ecc[12] = bchpbr;
+ }
+
+ /* Disable ECC */
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ return 0;
+}
+
+/* BCH algorithm correction */
+static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
+{
+ u32 bchdsr0 = ecc_sta[0];
+ u32 bchdsr1 = ecc_sta[1];
+ u32 bchdsr2 = ecc_sta[2];
+ u32 bchdsr3 = ecc_sta[3];
+ u32 bchdsr4 = ecc_sta[4];
+ u16 pos[8];
+ int i, den;
+ unsigned int nb_errs = 0;
+
+ /* No errors found */
+ if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF)))
+ return 0;
+
+ /* Too many errors detected */
+ if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
+ return -EBADMSG;
+
+ pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK;
+ pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT;
+ pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK;
+ pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT;
+ pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK;
+ pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT;
+ pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK;
+ pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT;
+
+ den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT;
+ for (i = 0; i < den; i++) {
+ if (pos[i] < eccsize * 8) {
+ change_bit(pos[i], (unsigned long *)dat);
+ nb_errs++;
+ }
+ }
+
+ return nb_errs;
+}
+
+static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 ecc_sta[5];
+
+ /* Wait until the decoding error is ready */
+ if (!wait_for_completion_timeout(&fmc2->complete,
+ msecs_to_jiffies(1000))) {
+ dev_err(fmc2->dev, "bch timeout\n");
+ stm32_fmc2_disable_bch_irq(fmc2);
+ return -ETIMEDOUT;
+ }
+
+ ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0);
+ ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1);
+ ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2);
+ ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3);
+ ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4);
+
+ /* Disable ECC */
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta);
+}
+
+static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps;
+ s++, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ /* Read the nand page sector (512 bytes) */
+ ret = nand_change_read_column_op(chip, s * eccsize, p,
+ eccsize, false);
+ if (ret)
+ return ret;
+
+ /* Read the corresponding ECC bytes */
+ ret = nand_change_read_column_op(chip, i, ecc_code,
+ eccbytes, false);
+ if (ret)
+ return ret;
+
+ /* Correct the data */
+ stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc);
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ ecc_code, eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Read oob */
+ if (oob_required) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return max_bitflips;
+}
+
+/* Sequencer read/write configuration */
+static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 csqcfgr1, csqcfgr2, csqcfgr3;
+ u32 csqar1, csqar2;
+ u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ if (write_data)
+ pcr |= FMC2_PCR_WEN;
+ else
+ pcr &= ~FMC2_PCR_WEN;
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+
+ /*
+ * - Set Program Page/Page Read command
+ * - Enable DMA request data
+ * - Set timings
+ */
+ csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
+ if (write_data)
+ csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN);
+ else
+ csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) |
+ FMC2_CSQCFGR1_CMD2EN |
+ FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) |
+ FMC2_CSQCFGR1_CMD2T;
+
+ /*
+ * - Set Random Data Input/Random Data Read command
+ * - Enable the sequencer to access the Spare data area
+ * - Enable DMA request status decoding for read
+ * - Set timings
+ */
+ if (write_data)
+ csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN);
+ else
+ csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) |
+ FMC2_CSQCFGR2_RCMD2EN |
+ FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) |
+ FMC2_CSQCFGR2_RCMD1T |
+ FMC2_CSQCFGR2_RCMD2T;
+ if (!raw) {
+ csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
+ csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN;
+ }
+
+ /*
+ * - Set the number of sectors to be written
+ * - Set timings
+ */
+ csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1);
+ if (write_data) {
+ csqcfgr3 |= FMC2_CSQCFGR3_RAC2T;
+ if (chip->options & NAND_ROW_ADDR_3)
+ csqcfgr3 |= FMC2_CSQCFGR3_AC5T;
+ else
+ csqcfgr3 |= FMC2_CSQCFGR3_AC4T;
+ }
+
+ /*
+ * Set the fourth first address cycles
+ * Byte 1 and byte 2 => column, we start at 0x0
+ * Byte 3 and byte 4 => page
+ */
+ csqar1 = FMC2_CSQCAR1_ADDC3(page);
+ csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8);
+
+ /*
+ * - Set chip enable number
+ * - Set ECC byte offset in the spare area
+ * - Calculate the number of address cycles to be issued
+ * - Set byte 5 of address cycle if needed
+ */
+ csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel);
+ if (chip->options & NAND_BUSWIDTH_16)
+ csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1);
+ else
+ csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset);
+ if (chip->options & NAND_ROW_ADDR_3) {
+ csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5);
+ csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16);
+ } else {
+ csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4);
+ }
+
+ writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1);
+ writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2);
+ writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3);
+ writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1);
+ writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2);
+}
+
+static void stm32_fmc2_dma_callback(void *arg)
+{
+ complete((struct completion *)arg);
+}
+
+/* Read/write data from/to a page */
+static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct dma_async_tx_descriptor *desc_data, *desc_ecc;
+ struct scatterlist *sg;
+ struct dma_chan *dma_ch = fmc2->dma_rx_ch;
+ enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
+ enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
+ u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR);
+ int eccsteps = chip->ecc.steps;
+ int eccsize = chip->ecc.size;
+ const u8 *p = buf;
+ int s, ret;
+
+ /* Configure DMA data */
+ if (write_data) {
+ dma_data_dir = DMA_TO_DEVICE;
+ dma_transfer_dir = DMA_MEM_TO_DEV;
+ dma_ch = fmc2->dma_tx_ch;
+ }
+
+ for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, eccsize);
+ p += eccsize;
+ }
+
+ ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (ret < 0)
+ return ret;
+
+ desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_data) {
+ ret = -ENOMEM;
+ goto err_unmap_data;
+ }
+
+ reinit_completion(&fmc2->dma_data_complete);
+ reinit_completion(&fmc2->complete);
+ desc_data->callback = stm32_fmc2_dma_callback;
+ desc_data->callback_param = &fmc2->dma_data_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_data));
+ if (ret)
+ goto err_unmap_data;
+
+ dma_async_issue_pending(dma_ch);
+
+ if (!write_data && !raw) {
+ /* Configure DMA ECC status */
+ p = fmc2->ecc_buf;
+ for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, fmc2->dma_ecc_len);
+ p += fmc2->dma_ecc_len;
+ }
+
+ ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (ret < 0)
+ goto err_unmap_data;
+
+ desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch,
+ fmc2->dma_ecc_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_ecc) {
+ ret = -ENOMEM;
+ goto err_unmap_ecc;
+ }
+
+ reinit_completion(&fmc2->dma_ecc_complete);
+ desc_ecc->callback = stm32_fmc2_dma_callback;
+ desc_ecc->callback_param = &fmc2->dma_ecc_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_ecc));
+ if (ret)
+ goto err_unmap_ecc;
+
+ dma_async_issue_pending(fmc2->dma_ecc_ch);
+ }
+
+ stm32_fmc2_clear_seq_irq(fmc2);
+ stm32_fmc2_enable_seq_irq(fmc2);
+
+ /* Start the transfer */
+ csqcr |= FMC2_CSQCR_CSQSTART;
+ writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR);
+
+ /* Wait end of sequencer transfer */
+ if (!wait_for_completion_timeout(&fmc2->complete,
+ msecs_to_jiffies(1000))) {
+ dev_err(fmc2->dev, "seq timeout\n");
+ stm32_fmc2_disable_seq_irq(fmc2);
+ dmaengine_terminate_all(dma_ch);
+ if (!write_data && !raw)
+ dmaengine_terminate_all(fmc2->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ goto err_unmap_ecc;
+ }
+
+ /* Wait DMA data transfer completion */
+ if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
+ msecs_to_jiffies(100))) {
+ dev_err(fmc2->dev, "data DMA timeout\n");
+ dmaengine_terminate_all(dma_ch);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait DMA ECC transfer completion */
+ if (!write_data && !raw) {
+ if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
+ msecs_to_jiffies(100))) {
+ dev_err(fmc2->dev, "ECC DMA timeout\n");
+ dmaengine_terminate_all(fmc2->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+err_unmap_ecc:
+ if (!write_data && !raw)
+ dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+
+err_unmap_data:
+ dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir);
+
+ return ret;
+}
+
+static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_rw_page_init(chip, page, raw, true);
+
+ /* Write the page */
+ ret = stm32_fmc2_xfer(chip, buf, raw, true);
+ if (ret)
+ return ret;
+
+ /* Write oob */
+ if (oob_required) {
+ ret = nand_change_write_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false);
+}
+
+static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true);
+}
+
+/* Get a status indicating which sectors have errors */
+static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR);
+
+ return csqemsr & FMC2_CSQEMSR_SEM;
+}
+
+static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ int i, s, eccsize = chip->ecc.size;
+ u32 *ecc_sta = (u32 *)fmc2->ecc_buf;
+ u16 sta_map = stm32_fmc2_get_mapping_status(fmc2);
+ unsigned int max_bitflips = 0;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
+ int stat = 0;
+
+ if (eccstrength == FMC2_ECC_HAM) {
+ /* Ecc_sta = FMC2_HECCR */
+ if (sta_map & BIT(s)) {
+ stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]);
+ stat = stm32_fmc2_ham_correct(chip, dat,
+ &read_ecc[i],
+ &calc_ecc[i]);
+ }
+ ecc_sta++;
+ } else {
+ /*
+ * Ecc_sta[0] = FMC2_BCHDSR0
+ * Ecc_sta[1] = FMC2_BCHDSR1
+ * Ecc_sta[2] = FMC2_BCHDSR2
+ * Ecc_sta[3] = FMC2_BCHDSR3
+ * Ecc_sta[4] = FMC2_BCHDSR4
+ */
+ if (sta_map & BIT(s))
+ stat = stm32_fmc2_bch_decode(eccsize, dat,
+ ecc_sta);
+ ecc_sta += 5;
+ }
+
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(dat, eccsize,
+ &read_ecc[i],
+ eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ u16 sta_map;
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_rw_page_init(chip, page, 0, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_xfer(chip, buf, 0, false);
+ if (ret)
+ return ret;
+
+ sta_map = stm32_fmc2_get_mapping_status(fmc2);
+
+ /* Check if errors happen */
+ if (likely(!sta_map)) {
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+
+ return 0;
+ }
+
+ /* Read oob */
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* Correct data */
+ return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
+}
+
+static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_rw_page_init(chip, page, 1, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_xfer(chip, buf, 1, false);
+ if (ret)
+ return ret;
+
+ /* Read oob */
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+
+ return 0;
+}
+
+static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id)
+{
+ struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id;
+
+ if (fmc2->irq_state == FMC2_IRQ_SEQ)
+ /* Sequencer is used */
+ stm32_fmc2_disable_seq_irq(fmc2);
+ else if (fmc2->irq_state == FMC2_IRQ_BCH)
+ /* BCH is used */
+ stm32_fmc2_disable_bch_irq(fmc2);
+
+ complete(&fmc2->complete);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ *(u32 *)buf = readl_relaxed(io_addr_r);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Read remaining bytes */
+ if (len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, true);
+}
+
+static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ writel_relaxed(*(u32 *)buf, io_addr_w);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Write remaining bytes */
+ if (len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, true);
+}
+
+static int stm32_fmc2_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id, i;
+ int ret;
+
+ ret = stm32_fmc2_select_chip(chip, op->cs);
+ if (ret)
+ return ret;
+
+ if (check_only)
+ return ret;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb_relaxed(instr->ctx.cmd.opcode,
+ fmc2->cmd_base[fmc2->cs_sel]);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb_relaxed(instr->ctx.addr.addrs[i],
+ fmc2->addr_base[fmc2->cs_sel]);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ stm32_fmc2_read_data(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ stm32_fmc2_write_data(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = nand_soft_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Controller initialization */
+static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1);
+
+ /* Set CS used to undefined */
+ fmc2->cs_sel = -1;
+
+ /* Enable wait feature and nand flash memory bank */
+ pcr |= FMC2_PCR_PWAITEN;
+ pcr |= FMC2_PCR_PBKEN;
+
+ /* Set buswidth to 8 bits mode for identification */
+ pcr &= ~FMC2_PCR_PWID_MASK;
+
+ /* ECC logic is disabled */
+ pcr &= ~FMC2_PCR_ECCEN;
+
+ /* Default mode */
+ pcr &= ~FMC2_PCR_ECCALG;
+ pcr &= ~FMC2_PCR_BCHECC;
+ pcr &= ~FMC2_PCR_WEN;
+
+ /* Set default ECC sector size */
+ pcr &= ~FMC2_PCR_ECCSS_MASK;
+ pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048);
+
+ /* Set default tclr/tar timings */
+ pcr &= ~FMC2_PCR_TCLR_MASK;
+ pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT);
+ pcr &= ~FMC2_PCR_TAR_MASK;
+ pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT);
+
+ /* Enable FMC2 controller */
+ bcr1 |= FMC2_BCR1_FMC2EN;
+
+ writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1);
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM);
+ writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT);
+}
+
+/* Controller timings */
+static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ const struct nand_sdr_timings *sdrt)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *tims = &nand->timings;
+ unsigned long hclk = clk_get_rate(fmc2->clk);
+ unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+ int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att;
+
+ tar = hclkp;
+ if (tar < sdrt->tAR_min)
+ tar = sdrt->tAR_min;
+ tims->tar = DIV_ROUND_UP(tar, hclkp) - 1;
+ if (tims->tar > FMC2_PCR_TIMING_MASK)
+ tims->tar = FMC2_PCR_TIMING_MASK;
+
+ tclr = hclkp;
+ if (tclr < sdrt->tCLR_min)
+ tclr = sdrt->tCLR_min;
+ tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1;
+ if (tims->tclr > FMC2_PCR_TIMING_MASK)
+ tims->tclr = FMC2_PCR_TIMING_MASK;
+
+ tims->thiz = FMC2_THIZ;
+ thiz = (tims->thiz + 1) * hclkp;
+
+ /*
+ * tWAIT > tRP
+ * tWAIT > tWP
+ * tWAIT > tREA + tIO
+ */
+ twait = hclkp;
+ if (twait < sdrt->tRP_min)
+ twait = sdrt->tRP_min;
+ if (twait < sdrt->tWP_min)
+ twait = sdrt->tWP_min;
+ if (twait < sdrt->tREA_max + FMC2_TIO)
+ twait = sdrt->tREA_max + FMC2_TIO;
+ tims->twait = DIV_ROUND_UP(twait, hclkp);
+ if (tims->twait == 0)
+ tims->twait = 1;
+ else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->twait = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tSETUP_MEM > tCS - tWAIT
+ * tSETUP_MEM > tALS - tWAIT
+ * tSETUP_MEM > tDS - (tWAIT - tHIZ)
+ */
+ tset_mem = hclkp;
+ if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait))
+ tset_mem = sdrt->tCS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait))
+ tset_mem = sdrt->tALS_min - twait;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_mem < sdrt->tDS_min - (twait - thiz)))
+ tset_mem = sdrt->tDS_min - (twait - thiz);
+ tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp);
+ if (tims->tset_mem == 0)
+ tims->tset_mem = 1;
+ else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tHOLD_MEM > tCH
+ * tHOLD_MEM > tREH - tSETUP_MEM
+ * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
+ */
+ thold_mem = hclkp;
+ if (thold_mem < sdrt->tCH_min)
+ thold_mem = sdrt->tCH_min;
+ if (sdrt->tREH_min > tset_mem &&
+ (thold_mem < sdrt->tREH_min - tset_mem))
+ thold_mem = sdrt->tREH_min - tset_mem;
+ if ((sdrt->tRC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tRC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tRC_min - (tset_mem + twait);
+ if ((sdrt->tWC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tWC_min - (tset_mem + twait);
+ tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp);
+ if (tims->thold_mem == 0)
+ tims->thold_mem = 1;
+ else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tSETUP_ATT > tCS - tWAIT
+ * tSETUP_ATT > tCLS - tWAIT
+ * tSETUP_ATT > tALS - tWAIT
+ * tSETUP_ATT > tRHW - tHOLD_MEM
+ * tSETUP_ATT > tDS - (tWAIT - tHIZ)
+ */
+ tset_att = hclkp;
+ if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait))
+ tset_att = sdrt->tCS_min - twait;
+ if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait))
+ tset_att = sdrt->tCLS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait))
+ tset_att = sdrt->tALS_min - twait;
+ if (sdrt->tRHW_min > thold_mem &&
+ (tset_att < sdrt->tRHW_min - thold_mem))
+ tset_att = sdrt->tRHW_min - thold_mem;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_att < sdrt->tDS_min - (twait - thiz)))
+ tset_att = sdrt->tDS_min - (twait - thiz);
+ tims->tset_att = DIV_ROUND_UP(tset_att, hclkp);
+ if (tims->tset_att == 0)
+ tims->tset_att = 1;
+ else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tHOLD_ATT > tALH
+ * tHOLD_ATT > tCH
+ * tHOLD_ATT > tCLH
+ * tHOLD_ATT > tCOH
+ * tHOLD_ATT > tDH
+ * tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM
+ * tHOLD_ATT > tADL - tSETUP_MEM
+ * tHOLD_ATT > tWH - tSETUP_MEM
+ * tHOLD_ATT > tWHR - tSETUP_MEM
+ * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
+ * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
+ */
+ thold_att = hclkp;
+ if (thold_att < sdrt->tALH_min)
+ thold_att = sdrt->tALH_min;
+ if (thold_att < sdrt->tCH_min)
+ thold_att = sdrt->tCH_min;
+ if (thold_att < sdrt->tCLH_min)
+ thold_att = sdrt->tCLH_min;
+ if (thold_att < sdrt->tCOH_min)
+ thold_att = sdrt->tCOH_min;
+ if (thold_att < sdrt->tDH_min)
+ thold_att = sdrt->tDH_min;
+ if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
+ (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
+ thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
+ if (sdrt->tADL_min > tset_mem &&
+ (thold_att < sdrt->tADL_min - tset_mem))
+ thold_att = sdrt->tADL_min - tset_mem;
+ if (sdrt->tWH_min > tset_mem &&
+ (thold_att < sdrt->tWH_min - tset_mem))
+ thold_att = sdrt->tWH_min - tset_mem;
+ if (sdrt->tWHR_min > tset_mem &&
+ (thold_att < sdrt->tWHR_min - tset_mem))
+ thold_att = sdrt->tWHR_min - tset_mem;
+ if ((sdrt->tRC_min > tset_att + twait) &&
+ (thold_att < sdrt->tRC_min - (tset_att + twait)))
+ thold_att = sdrt->tRC_min - (tset_att + twait);
+ if ((sdrt->tWC_min > tset_att + twait) &&
+ (thold_att < sdrt->tWC_min - (tset_att + twait)))
+ thold_att = sdrt->tWC_min - (tset_att + twait);
+ tims->thold_att = DIV_ROUND_UP(thold_att, hclkp);
+ if (tims->thold_att == 0)
+ tims->thold_att = 1;
+ else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK;
+}
+
+static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ const struct nand_sdr_timings *sdrt;
+
+ sdrt = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ stm32_fmc2_calc_timings(chip, sdrt);
+
+ /* Apply timings */
+ stm32_fmc2_timings_init(chip);
+
+ return 0;
+}
+
+/* DMA configuration */
+static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
+{
+ int ret;
+
+ fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
+ fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
+ fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
+
+ if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
+ dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
+ return 0;
+ }
+
+ ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ /* Allocate a buffer to store ECC status registers */
+ fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN,
+ GFP_KERNEL);
+ if (!fmc2->ecc_buf)
+ return -ENOMEM;
+
+ ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ init_completion(&fmc2->dma_data_complete);
+ init_completion(&fmc2->dma_ecc_complete);
+
+ return 0;
+}
+
+/* NAND callbacks setup */
+static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+
+ /*
+ * Specific callbacks to read/write a page depending on
+ * the mode (polling/sequencer) and the algo used (Hamming, BCH).
+ */
+ if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) {
+ /* DMA => use sequencer mode callbacks */
+ chip->ecc.correct = stm32_fmc2_sequencer_correct;
+ chip->ecc.write_page = stm32_fmc2_sequencer_write_page;
+ chip->ecc.read_page = stm32_fmc2_sequencer_read_page;
+ chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw;
+ chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw;
+ } else {
+ /* No DMA => use polling mode callbacks */
+ chip->ecc.hwctl = stm32_fmc2_hwctl;
+ if (chip->ecc.strength == FMC2_ECC_HAM) {
+ /* Hamming is used */
+ chip->ecc.calculate = stm32_fmc2_ham_calculate;
+ chip->ecc.correct = stm32_fmc2_ham_correct;
+ chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
+ } else {
+ /* BCH is used */
+ chip->ecc.calculate = stm32_fmc2_bch_calculate;
+ chip->ecc.correct = stm32_fmc2_bch_correct;
+ chip->ecc.read_page = stm32_fmc2_read_page;
+ }
+ }
+
+ /* Specific configurations depending on the algo used */
+ if (chip->ecc.strength == FMC2_ECC_HAM)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3;
+ else if (chip->ecc.strength == FMC2_ECC_BCH8)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13;
+ else
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
+}
+
+/* FMC2 layout */
+static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - FMC2_BBM_LEN;
+ oobregion->offset = ecc->total + FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = {
+ .ecc = stm32_fmc2_nand_ooblayout_ecc,
+ .free = stm32_fmc2_nand_ooblayout_free,
+};
+
+/* FMC2 caps */
+static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
+{
+ /* Hamming */
+ if (strength == FMC2_ECC_HAM)
+ return 4;
+
+ /* BCH8 */
+ if (strength == FMC2_ECC_BCH8)
+ return 14;
+
+ /* BCH4 */
+ return 8;
+}
+
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes,
+ FMC2_ECC_STEP_SIZE,
+ FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
+
+/* FMC2 controller ops */
+static int stm32_fmc2_attach_chip(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Only NAND_ECC_HW mode is actually supported
+ * Hamming => ecc.strength = 1
+ * BCH4 => ecc.strength = 4
+ * BCH8 => ecc.strength = 8
+ * ECC sector size = 512
+ */
+ if (chip->ecc.mode != NAND_ECC_HW) {
+ dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n");
+ return -EINVAL;
+ }
+
+ ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps,
+ mtd->oobsize - FMC2_BBM_LEN);
+ if (ret) {
+ dev_err(fmc2->dev, "no valid ECC settings set\n");
+ return ret;
+ }
+
+ if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
+ dev_err(fmc2->dev, "nand page size is not supported\n");
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* NAND callbacks setup */
+ stm32_fmc2_nand_callbacks_setup(chip);
+
+ /* Define ECC layout */
+ mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops);
+
+ /* Configure bus width to 16-bit */
+ if (chip->options & NAND_BUSWIDTH_16)
+ stm32_fmc2_set_buswidth_16(fmc2, true);
+
+ return 0;
+}
+
+static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = {
+ .attach_chip = stm32_fmc2_attach_chip,
+ .exec_op = stm32_fmc2_exec_op,
+ .setup_data_interface = stm32_fmc2_setup_interface,
+};
+
+/* FMC2 probe */
+static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
+ struct device_node *dn)
+{
+ struct stm32_fmc2_nand *nand = &fmc2->nand;
+ u32 cs;
+ int ret, i;
+
+ if (!of_get_property(dn, "reg", &nand->ncs))
+ return -EINVAL;
+
+ nand->ncs /= sizeof(u32);
+ if (!nand->ncs) {
+ dev_err(fmc2->dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nand->ncs; i++) {
+ ret = of_property_read_u32_index(dn, "reg", i, &cs);
+ if (ret) {
+ dev_err(fmc2->dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs > FMC2_MAX_CE) {
+ dev_err(fmc2->dev, "invalid reg value: %d\n", cs);
+ return -EINVAL;
+ }
+
+ if (fmc2->cs_assigned & BIT(cs)) {
+ dev_err(fmc2->dev, "cs already assigned: %d\n", cs);
+ return -EINVAL;
+ }
+
+ fmc2->cs_assigned |= BIT(cs);
+ nand->cs_used[i] = cs;
+ }
+
+ nand_set_flash_node(&nand->chip, dn);
+
+ return 0;
+}
+
+static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
+{
+ struct device_node *dn = fmc2->dev->of_node;
+ struct device_node *child;
+ int nchips = of_get_child_count(dn);
+ int ret = 0;
+
+ if (!nchips) {
+ dev_err(fmc2->dev, "NAND chip not defined\n");
+ return -EINVAL;
+ }
+
+ if (nchips > 1) {
+ dev_err(fmc2->dev, "too many NAND chips defined\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dn, child) {
+ ret = stm32_fmc2_parse_child(fmc2, child);
+ if (ret < 0) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int stm32_fmc2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ struct stm32_fmc2_nfc *fmc2;
+ struct stm32_fmc2_nand *nand;
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int chip_cs, mem_region, ret, irq;
+
+ fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL);
+ if (!fmc2)
+ return -ENOMEM;
+
+ fmc2->dev = dev;
+ nand_controller_init(&fmc2->base);
+ fmc2->base.ops = &stm32_fmc2_nand_controller_ops;
+
+ ret = stm32_fmc2_parse_dt(fmc2);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fmc2->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->io_base))
+ return PTR_ERR(fmc2->io_base);
+
+ fmc2->io_phys_addr = res->start;
+
+ for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE;
+ chip_cs++, mem_region += 3) {
+ if (!(fmc2->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
+ fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->data_base[chip_cs]))
+ return PTR_ERR(fmc2->data_base[chip_cs]);
+
+ fmc2->data_phys_addr[chip_cs] = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ mem_region + 1);
+ fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->cmd_base[chip_cs]))
+ return PTR_ERR(fmc2->cmd_base[chip_cs]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ mem_region + 2);
+ fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->addr_base[chip_cs]))
+ return PTR_ERR(fmc2->addr_base[chip_cs]);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
+ dev_name(dev), fmc2);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ init_completion(&fmc2->complete);
+
+ fmc2->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(fmc2->clk))
+ return PTR_ERR(fmc2->clk);
+
+ ret = clk_prepare_enable(fmc2->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
+
+ /* DMA setup */
+ ret = stm32_fmc2_dma_setup(fmc2);
+ if (ret)
+ return ret;
+
+ /* FMC2 init routine */
+ stm32_fmc2_init(fmc2);
+
+ nand = &fmc2->nand;
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ chip->controller = &fmc2->base;
+ chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USE_BOUNCE_BUFFER;
+
+ /* Default ECC settings */
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = FMC2_ECC_STEP_SIZE;
+ chip->ecc.strength = FMC2_ECC_BCH8;
+
+ /* Scan to find existence of the device */
+ ret = nand_scan(chip, nand->ncs);
+ if (ret)
+ goto err_scan;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_device_register;
+
+ platform_set_drvdata(pdev, fmc2);
+
+ return 0;
+
+err_device_register:
+ nand_cleanup(chip);
+
+err_scan:
+ if (fmc2->dma_ecc_ch)
+ dma_release_channel(fmc2->dma_ecc_ch);
+ if (fmc2->dma_tx_ch)
+ dma_release_channel(fmc2->dma_tx_ch);
+ if (fmc2->dma_rx_ch)
+ dma_release_channel(fmc2->dma_rx_ch);
+
+ sg_free_table(&fmc2->dma_data_sg);
+ sg_free_table(&fmc2->dma_ecc_sg);
+
+ clk_disable_unprepare(fmc2->clk);
+
+ return ret;
+}
+
+static int stm32_fmc2_remove(struct platform_device *pdev)
+{
+ struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev);
+ struct stm32_fmc2_nand *nand = &fmc2->nand;
+
+ nand_release(&nand->chip);
+
+ if (fmc2->dma_ecc_ch)
+ dma_release_channel(fmc2->dma_ecc_ch);
+ if (fmc2->dma_tx_ch)
+ dma_release_channel(fmc2->dma_tx_ch);
+ if (fmc2->dma_rx_ch)
+ dma_release_channel(fmc2->dma_rx_ch);
+
+ sg_free_table(&fmc2->dma_data_sg);
+ sg_free_table(&fmc2->dma_ecc_sg);
+
+ clk_disable_unprepare(fmc2->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_suspend(struct device *dev)
+{
+ struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(fmc2->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_resume(struct device *dev)
+{
+ struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
+ struct stm32_fmc2_nand *nand = &fmc2->nand;
+ int chip_cs, ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = clk_prepare_enable(fmc2->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ stm32_fmc2_init(fmc2);
+
+ for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
+ if (!(fmc2->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ nand_reset(&nand->chip, chip_cs);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend,
+ stm32_fmc2_resume);
+
+static const struct of_device_id stm32_fmc2_match[] = {
+ {.compatible = "st,stm32mp15-fmc2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_fmc2_match);
+
+static struct platform_driver stm32_fmc2_driver = {
+ .probe = stm32_fmc2_probe,
+ .remove = stm32_fmc2_remove,
+ .driver = {
+ .name = "stm32_fmc2_nand",
+ .of_match_table = stm32_fmc2_match,
+ .pm = &stm32_fmc2_pm_ops,
+ },
+};
+module_platform_driver(stm32_fmc2_driver);
+
+MODULE_ALIAS("platform:stm32_fmc2_nand");
+MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index e828ee50a201..4282bc477761 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
*
@@ -10,16 +11,6 @@
*
* Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
* Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/dma-mapping.h>
@@ -163,38 +154,36 @@
#define NFC_MAX_CS 7
-/*
- * Chip Select structure: stores information related to NAND Chip Select
+/**
+ * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select
*
- * @cs: the NAND CS id used to communicate with a NAND Chip
- * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the
- * NFC
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC
*/
struct sunxi_nand_chip_sel {
u8 cs;
s8 rb;
};
-/*
- * sunxi HW ECC infos: stores information related to HW ECC support
+/**
+ * struct sunxi_nand_hw_ecc - stores information related to HW ECC support
*
- * @mode: the sunxi ECC mode field deduced from ECC requirements
+ * @mode: the sunxi ECC mode field deduced from ECC requirements
*/
struct sunxi_nand_hw_ecc {
int mode;
};
-/*
- * NAND chip structure: stores NAND chip device related information
+/**
+ * struct sunxi_nand_chip - stores NAND chip device related information
*
- * @node: used to store NAND chips into a list
- * @nand: base NAND chip structure
- * @mtd: base MTD structure
- * @clk_rate: clk_rate required for this NAND chip
- * @timing_cfg TIMING_CFG register value for this NAND chip
- * @selected: current active CS
- * @nsels: number of CS lines required by the NAND chip
- * @sels: array of CS lines descriptions
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @timing_cfg: TIMING_CFG register value for this NAND chip
+ * @timing_ctl: TIMING_CTL register value for this NAND chip
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
*/
struct sunxi_nand_chip {
struct list_head node;
@@ -202,11 +191,6 @@ struct sunxi_nand_chip {
unsigned long clk_rate;
u32 timing_cfg;
u32 timing_ctl;
- int selected;
- int addr_cycles;
- u32 addr[2];
- int cmd_cycles;
- u8 cmd[2];
int nsels;
struct sunxi_nand_chip_sel sels[0];
};
@@ -216,20 +200,21 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
return container_of(nand, struct sunxi_nand_chip, nand);
}
-/*
- * NAND Controller structure: stores sunxi NAND controller information
+/**
+ * struct sunxi_nfc - stores sunxi NAND controller information
*
- * @controller: base controller structure
- * @dev: parent device (used to print error messages)
- * @regs: NAND controller registers
- * @ahb_clk: NAND Controller AHB clock
- * @mod_clk: NAND Controller mod clock
- * @assigned_cs: bitmask describing already assigned CS lines
- * @clk_rate: NAND controller current clock rate
- * @chips: a list containing all the NAND chips attached to
- * this NAND controller
- * @complete: a completion object used to wait for NAND
- * controller events
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND controller AHB clock
+ * @mod_clk: NAND controller mod clock
+ * @reset: NAND controller reset line
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to this NAND
+ * controller
+ * @complete: a completion object used to wait for NAND controller events
+ * @dmac: the DMA channel attached to the NAND controller
*/
struct sunxi_nfc {
struct nand_controller controller;
@@ -339,13 +324,11 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
return ret;
}
-static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
+static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
int chunksize, int nchunks,
enum dma_data_direction ddir,
struct scatterlist *sg)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct dma_async_tx_descriptor *dmad;
enum dma_transfer_direction tdir;
dma_cookie_t dmat;
@@ -388,38 +371,16 @@ err_unmap_buf:
return ret;
}
-static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
+static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc,
enum dma_data_direction ddir,
struct scatterlist *sg)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-
dma_unmap_sg(nfc->dev, sg, 1, ddir);
writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
nfc->regs + NFC_REG_CTL);
}
-static int sunxi_nfc_dev_ready(struct nand_chip *nand)
-{
- struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
- u32 mask;
-
- if (sunxi_nand->selected < 0)
- return 0;
-
- if (sunxi_nand->sels[sunxi_nand->selected].rb < 0) {
- dev_err(nfc->dev, "cannot check R/B NAND status!\n");
- return 0;
- }
-
- mask = NFC_RB_STATE(sunxi_nand->sels[sunxi_nand->selected].rb);
-
- return !!(readl(nfc->regs + NFC_REG_ST) & mask);
-}
-
-static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip)
+static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
@@ -427,40 +388,27 @@ static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip)
struct sunxi_nand_chip_sel *sel;
u32 ctl;
- if (chip > 0 && chip >= sunxi_nand->nsels)
- return;
-
- if (chip == sunxi_nand->selected)
+ if (cs > 0 && cs >= sunxi_nand->nsels)
return;
ctl = readl(nfc->regs + NFC_REG_CTL) &
~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
- if (chip >= 0) {
- sel = &sunxi_nand->sels[chip];
+ sel = &sunxi_nand->sels[cs];
+ ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift);
+ if (sel->rb >= 0)
+ ctl |= NFC_RB_SEL(sel->rb);
- ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
- NFC_PAGE_SHIFT(nand->page_shift);
- if (sel->rb < 0) {
- nand->legacy.dev_ready = NULL;
- } else {
- nand->legacy.dev_ready = sunxi_nfc_dev_ready;
- ctl |= NFC_RB_SEL(sel->rb);
- }
-
- writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
- if (nfc->clk_rate != sunxi_nand->clk_rate) {
- clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
- nfc->clk_rate = sunxi_nand->clk_rate;
- }
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
}
writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
writel(ctl, nfc->regs + NFC_REG_CTL);
-
- sunxi_nand->selected = chip;
}
static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
@@ -537,71 +485,6 @@ static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
}
}
-static uint8_t sunxi_nfc_read_byte(struct nand_chip *nand)
-{
- uint8_t ret = 0;
-
- sunxi_nfc_read_buf(nand, &ret, 1);
-
- return ret;
-}
-
-static void sunxi_nfc_cmd_ctrl(struct nand_chip *nand, int dat,
- unsigned int ctrl)
-{
- struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
- int ret;
-
- if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
- !(ctrl & (NAND_CLE | NAND_ALE))) {
- u32 cmd = 0;
-
- if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
- return;
-
- if (sunxi_nand->cmd_cycles--)
- cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
-
- if (sunxi_nand->cmd_cycles--) {
- cmd |= NFC_SEND_CMD2;
- writel(sunxi_nand->cmd[1],
- nfc->regs + NFC_REG_RCMD_SET);
- }
-
- sunxi_nand->cmd_cycles = 0;
-
- if (sunxi_nand->addr_cycles) {
- cmd |= NFC_SEND_ADR |
- NFC_ADR_NUM(sunxi_nand->addr_cycles);
- writel(sunxi_nand->addr[0],
- nfc->regs + NFC_REG_ADDR_LOW);
- }
-
- if (sunxi_nand->addr_cycles > 4)
- writel(sunxi_nand->addr[1],
- nfc->regs + NFC_REG_ADDR_HIGH);
-
- ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
- if (ret)
- return;
-
- writel(cmd, nfc->regs + NFC_REG_CMD);
- sunxi_nand->addr[0] = 0;
- sunxi_nand->addr[1] = 0;
- sunxi_nand->addr_cycles = 0;
- sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
- }
-
- if (ctrl & NAND_CLE) {
- sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
- } else if (ctrl & NAND_ALE) {
- sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
- dat << ((sunxi_nand->addr_cycles % 4) * 8);
- sunxi_nand->addr_cycles++;
- }
-}
-
/* These seed values have been extracted from Allwinner's BSP */
static const u16 sunxi_nfc_randomizer_page_seeds[] = {
0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
@@ -684,8 +567,10 @@ static u16 sunxi_nfc_randomizer_step(u16 state, int count)
return state;
}
-static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
+static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page,
+ bool ecc)
{
+ struct mtd_info *mtd = nand_to_mtd(nand);
const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
int mod = mtd_div_by_ws(mtd->erasesize, mtd);
@@ -702,10 +587,9 @@ static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
return seeds[page % mod];
}
-static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
- int page, bool ecc)
+static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
+ bool ecc)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
u16 state;
@@ -714,14 +598,13 @@ static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
return;
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
- state = sunxi_nfc_randomizer_state(mtd, page, ecc);
+ state = sunxi_nfc_randomizer_state(nand, page, ecc);
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
+static void sunxi_nfc_randomizer_enable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
if (!(nand->options & NAND_NEED_SCRAMBLING))
@@ -731,9 +614,8 @@ static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
+static void sunxi_nfc_randomizer_disable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
if (!(nand->options & NAND_NEED_SCRAMBLING))
@@ -743,36 +625,35 @@ static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
+static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm)
{
- u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
+ u16 state = sunxi_nfc_randomizer_state(nand, page, true);
bbm[0] ^= state;
bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
}
-static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
+static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand,
const uint8_t *buf, int len,
bool ecc, int page)
{
- sunxi_nfc_randomizer_config(mtd, page, ecc);
- sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_write_buf(mtd_to_nand(mtd), buf, len);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_write_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
}
-static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
+static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf,
int len, bool ecc, int page)
{
- sunxi_nfc_randomizer_config(mtd, page, ecc);
- sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_read_buf(mtd_to_nand(mtd), buf, len);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_read_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
}
-static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
+static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
u32 ecc_ctl;
@@ -789,9 +670,8 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
+static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
@@ -811,10 +691,9 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
}
-static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
+static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob,
int step, bool bbm, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
@@ -822,21 +701,20 @@ static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
/* De-randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
- sunxi_nfc_randomize_bbm(mtd, page, oob);
+ sunxi_nfc_randomize_bbm(nand, page, oob);
}
-static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
const u8 *oob, int step,
bool bbm, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
u8 user_data[4];
/* Randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
memcpy(user_data, oob, sizeof(user_data));
- sunxi_nfc_randomize_bbm(mtd, page, user_data);
+ sunxi_nfc_randomize_bbm(nand, page, user_data);
oob = user_data;
}
@@ -844,9 +722,11 @@ static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
nfc->regs + NFC_REG_USER_DATA(step));
}
-static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
unsigned int *max_bitflips, int ret)
{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
if (ret < 0) {
mtd->ecc_stats.failed++;
} else {
@@ -855,10 +735,9 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
}
}
-static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
+static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
int step, u32 status, bool *erased)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
u32 tmp;
@@ -892,14 +771,13 @@ static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
return NFC_ECC_ERR_CNT(step, tmp);
}
-static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
+static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
u8 *data, int data_off,
u8 *oob, int oob_off,
int *cur_off,
unsigned int *max_bitflips,
bool bbm, bool oob_required, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int raw_mode = 0;
@@ -909,7 +787,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (*cur_off != data_off)
nand_change_read_column_op(nand, data_off, NULL, 0, false);
- sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
+ sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
nand_change_read_column_op(nand, oob_off, NULL, 0, false);
@@ -918,18 +796,18 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (ret)
return ret;
- sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
if (ret)
return ret;
*cur_off = oob_off + ecc->bytes + 4;
- ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+ ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0,
readl(nfc->regs + NFC_REG_ECC_ST),
&erased);
if (erased)
@@ -961,24 +839,24 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (oob_required) {
nand_change_read_column_op(nand, oob_off, NULL, 0,
false);
- sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
+ sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4,
true, page);
- sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0,
bbm, page);
}
}
- sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
+ sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret);
return raw_mode;
}
-static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand,
u8 *oob, int *cur_off,
bool randomize, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int offset = ((ecc->bytes + 4) * ecc->steps);
int len = mtd->oobsize - offset;
@@ -993,20 +871,20 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
if (!randomize)
sunxi_nfc_read_buf(nand, oob + offset, len);
else
- sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
+ sunxi_nfc_randomizer_read_buf(nand, oob + offset, len,
false, page);
if (cur_off)
*cur_off = mtd->oobsize + mtd->writesize;
}
-static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf,
int oob_required, int page,
int nchunks)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
bool randomized = nand->options & NAND_NEED_SCRAMBLING;
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
unsigned int max_bitflips = 0;
int ret, i, raw_mode = 0;
@@ -1017,14 +895,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (ret)
return ret;
- ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks,
DMA_FROM_DEVICE, &sg);
if (ret)
return ret;
- sunxi_nfc_hw_ecc_enable(mtd);
- sunxi_nfc_randomizer_config(mtd, page, false);
- sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
@@ -1038,10 +916,10 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (ret)
dmaengine_terminate_all(nfc->dmac);
- sunxi_nfc_randomizer_disable(mtd);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
- sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg);
if (ret)
return ret;
@@ -1055,7 +933,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
u8 *oob = nand->oob_poi + oob_off;
bool erased;
- ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
+ ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL,
oob_required ? oob : NULL,
i, status, &erased);
@@ -1069,14 +947,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
mtd->writesize + oob_off,
oob, ecc->bytes + 4, false);
- sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i,
!i, page);
}
if (erased)
raw_mode = 1;
- sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
}
if (status & NFC_ECC_ERR_MSK) {
@@ -1111,25 +989,24 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (ret >= 0)
raw_mode = 1;
- sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
}
}
if (oob_required)
- sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi,
NULL, !raw_mode,
page);
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
+static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
const u8 *data, int data_off,
const u8 *oob, int oob_off,
int *cur_off, bool bbm,
int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret;
@@ -1137,7 +1014,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
if (data_off != *cur_off)
nand_change_write_column_op(nand, data_off, NULL, 0, false);
- sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
+ sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
nand_change_write_column_op(nand, oob_off, NULL, 0, false);
@@ -1146,15 +1023,15 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
if (ret)
return ret;
- sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
NFC_ACCESS_DIR | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
if (ret)
return ret;
@@ -1163,11 +1040,11 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
return 0;
}
-static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand,
u8 *oob, int *cur_off,
int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int offset = ((ecc->bytes + 4) * ecc->steps);
int len = mtd->oobsize - offset;
@@ -1179,32 +1056,34 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
nand_change_write_column_op(nand, offset + mtd->writesize,
NULL, 0, false);
- sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
+ sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page);
if (cur_off)
*cur_off = mtd->oobsize + mtd->writesize;
}
-static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
+static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf,
int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
unsigned int max_bitflips = 0;
int ret, i, cur_off = 0;
bool raw_mode = false;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = buf + data_off;
- u8 *oob = chip->oob_poi + oob_off;
+ u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, &max_bitflips,
!i, oob_required, page);
@@ -1215,52 +1094,55 @@ static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
}
if (oob_required)
- sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off,
!raw_mode, page);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *chip, u8 *buf,
+static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf,
int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
- chip->ecc.steps);
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page,
+ nand->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- return sunxi_nfc_hw_ecc_read_page(chip, buf, oob_required, page);
+ return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page);
}
-static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand,
u32 data_offs, u32 readlen,
u8 *bufpoi, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = bufpoi + data_off;
- u8 *oob = chip->oob_poi + oob_off;
+ u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off,
oob,
oob_off + mtd->writesize,
&cur_off, &max_bitflips, !i,
@@ -1269,113 +1151,118 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip,
return ret;
}
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand,
u32 data_offs, u32 readlen,
u8 *buf, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+ int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size);
int ret;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- return sunxi_nfc_hw_ecc_read_subpage(chip, data_offs, readlen,
+ return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen,
buf, page);
}
-static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand,
const uint8_t *buf, int oob_required,
int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
const u8 *data = buf + data_off;
- const u8 *oob = chip->oob_poi + oob_off;
+ const u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, !i, page);
if (ret)
return ret;
}
- if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
- sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
&cur_off, page);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
}
-static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand,
u32 data_offs, u32 data_len,
const u8 *buf, int oob_required,
int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
- sunxi_nfc_hw_ecc_enable(mtd);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
const u8 *data = buf + data_off;
- const u8 *oob = chip->oob_poi + oob_off;
+ const u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, !i, page);
if (ret)
return ret;
}
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
}
-static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
const u8 *buf,
int oob_required,
int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct scatterlist sg;
int ret, i;
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return ret;
- ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps,
DMA_TO_DEVICE, &sg);
if (ret)
goto pio_fallback;
@@ -1383,14 +1270,14 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
for (i = 0; i < ecc->steps; i++) {
const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
- sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
}
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
- sunxi_nfc_randomizer_config(mtd, page, false);
- sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
nfc->regs + NFC_REG_WCMD_SET);
@@ -1405,46 +1292,46 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
if (ret)
dmaengine_terminate_all(nfc->dmac);
- sunxi_nfc_randomizer_disable(mtd);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
- sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg);
if (ret)
return ret;
- if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
/* TODO: use DMA to transfer extra OOB bytes ? */
- sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
NULL, page);
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
pio_fallback:
- return sunxi_nfc_hw_ecc_write_page(chip, buf, oob_required, page);
+ return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page);
}
-static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *chip, int page)
+static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
{
- chip->pagebuf = -1;
+ nand->pagebuf = -1;
- return chip->ecc.read_page(chip, chip->data_buf, 1, page);
+ return nand->ecc.read_page(nand, nand->data_buf, 1, page);
}
-static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *chip, int page)
+static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_info *mtd = nand_to_mtd(nand);
int ret;
- chip->pagebuf = -1;
+ nand->pagebuf = -1;
- memset(chip->data_buf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(chip, chip->data_buf, 1, page);
+ memset(nand->data_buf, 0xff, mtd->writesize);
+ ret = nand->ecc.write_page(nand, nand->data_buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1471,8 +1358,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
const struct nand_data_interface *conf)
{
- struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
const struct nand_sdr_timings *timings;
u32 min_clk_period = 0;
s32 tWB, tADL, tWHR, tRHW, tCAD;
@@ -1555,6 +1442,20 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
if (timings->tRHW_min > (min_clk_period * 20))
min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
+ /*
+ * In non-EDO, tREA should be less than tRP to guarantee that the
+ * controller does not sample the IO lines too early. Unfortunately,
+ * the sunxi NAND controller does not allow us to have different
+ * values for tRP and tREH (tRP = tREH = tRW / 2).
+ *
+ * We have 2 options to overcome this limitation:
+ *
+ * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint
+ * 2/ Use EDO mode (only works if timings->tRLOH > 0)
+ */
+ if (timings->tREA_max > min_clk_period && !timings->tRLOH_min)
+ min_clk_period = timings->tREA_max;
+
tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
min_clk_period);
if (tWB < 0) {
@@ -1591,7 +1492,7 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
tCAD = 0x7;
/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
- chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+ sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
/* Convert min_clk_period from picoseconds to nanoseconds */
min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
@@ -1602,21 +1503,24 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
* This new formula was verified with a scope and validated by
* Allwinner engineers.
*/
- chip->clk_rate = NSEC_PER_SEC / min_clk_period;
- real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+ sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period;
+ real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate);
if (real_clk_rate <= 0) {
- dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
+ dev_err(nfc->dev, "Unable to round clk %lu\n",
+ sunxi_nand->clk_rate);
return -EINVAL;
}
+ sunxi_nand->timing_ctl = 0;
+
/*
* ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
* output cycle timings shall be used if the host drives tRC less than
- * 30 ns.
+ * 30 ns. We should also use EDO mode if tREA is bigger than tRP.
*/
min_clk_period = NSEC_PER_SEC / real_clk_rate;
- chip->timing_ctl = ((min_clk_period * 2) < 30) ?
- NFC_TIMING_CTL_EDO : 0;
+ if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max)
+ sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO;
return 0;
}
@@ -1677,14 +1581,13 @@ static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
kfree(ecc->priv);
}
-static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct sunxi_nand_hw_ecc *data;
int nsectors;
int ret;
@@ -1808,7 +1711,6 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
static int sunxi_nand_attach_chip(struct nand_chip *nand)
{
- struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct device_node *np = nand_get_flash_node(nand);
int ret;
@@ -1831,7 +1733,7 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
switch (ecc->mode) {
case NAND_ECC_HW:
- ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
+ ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
if (ret)
return ret;
break;
@@ -1845,15 +1747,165 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
return 0;
}
+static int sunxi_nfc_exec_subop(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { };
+ unsigned int i, j, remaining, start;
+ void *inbuf = NULL;
+ int ret;
+
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (cmd & NFC_SEND_CMD1) {
+ if (WARN_ON(cmd & NFC_SEND_CMD2))
+ return -EINVAL;
+
+ cmd |= NFC_SEND_CMD2;
+ extcmd |= instr->ctx.cmd.opcode;
+ } else {
+ cmd |= NFC_SEND_CMD1 |
+ NFC_CMD(instr->ctx.cmd.opcode);
+ }
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ remaining = nand_subop_get_num_addr_cyc(subop, i);
+ start = nand_subop_get_addr_start_off(subop, i);
+ for (j = 0; j < 8 && j + start < remaining; j++) {
+ u32 addr = instr->ctx.addr.addrs[j + start];
+
+ addrs[j / 4] |= addr << (j % 4) * 8;
+ }
+
+ if (j)
+ cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j);
+
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ start = nand_subop_get_data_start_off(subop, i);
+ remaining = nand_subop_get_data_len(subop, i);
+ cnt = min_t(u32, remaining, NFC_SRAM_SIZE);
+ cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR) {
+ cmd |= NFC_ACCESS_DIR;
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE,
+ instr->ctx.data.buf.out + start,
+ cnt);
+ } else {
+ inbuf = instr->ctx.data.buf.in + start;
+ }
+
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= NFC_WAIT_FLAG;
+ break;
+ }
+ }
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ if (cmd & NFC_SEND_ADR) {
+ writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW);
+ writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH);
+ }
+
+ if (cmd & NFC_SEND_CMD2)
+ writel(extcmd,
+ nfc->regs +
+ (cmd & NFC_ACCESS_DIR ?
+ NFC_REG_WCMD_SET : NFC_REG_RCMD_SET));
+
+ if (cmd & NFC_DATA_TRANS)
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+
+ writel(cmd, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG,
+ !(cmd & NFC_WAIT_FLAG) && cnt < 64,
+ 0);
+ if (ret)
+ return ret;
+
+ if (inbuf)
+ memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt);
+
+ return 0;
+}
+
+static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ return nand_soft_waitrdy(nand,
+ subop->instrs[0].ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+);
+
+static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int sunxi_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ const struct nand_op_parser *parser;
+
+ sunxi_nfc_select_chip(nand, op->cs);
+
+ if (sunxi_nand->sels[op->cs].rb >= 0)
+ parser = &sunxi_nfc_op_parser;
+ else
+ parser = &sunxi_nfc_norb_op_parser;
+
+ return nand_op_parser_exec_op(nand, parser, op, check_only);
+}
+
static const struct nand_controller_ops sunxi_nand_controller_ops = {
.attach_chip = sunxi_nand_attach_chip,
.setup_data_interface = sunxi_nfc_setup_data_interface,
+ .exec_op = sunxi_nfc_exec_op,
};
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
struct device_node *np)
{
- struct sunxi_nand_chip *chip;
+ struct sunxi_nand_chip *sunxi_nand;
struct mtd_info *mtd;
struct nand_chip *nand;
int nsels;
@@ -1870,17 +1922,14 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
return -EINVAL;
}
- chip = devm_kzalloc(dev,
- sizeof(*chip) +
- (nsels * sizeof(struct sunxi_nand_chip_sel)),
- GFP_KERNEL);
- if (!chip) {
+ sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels),
+ GFP_KERNEL);
+ if (!sunxi_nand) {
dev_err(dev, "could not allocate chip\n");
return -ENOMEM;
}
- chip->nsels = nsels;
- chip->selected = -1;
+ sunxi_nand->nsels = nsels;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &tmp);
@@ -1902,18 +1951,17 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
return -EINVAL;
}
- chip->sels[i].cs = tmp;
+ sunxi_nand->sels[i].cs = tmp;
if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
tmp < 2)
- chip->sels[i].rb = tmp;
+ sunxi_nand->sels[i].rb = tmp;
else
- chip->sels[i].rb = -1;
+ sunxi_nand->sels[i].rb = -1;
}
- nand = &chip->nand;
+ nand = &sunxi_nand->nand;
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
- nand->legacy.chip_delay = 200;
nand->controller = &nfc->controller;
nand->controller->ops = &sunxi_nand_controller_ops;
@@ -1923,11 +1971,6 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
*/
nand->ecc.mode = NAND_ECC_HW;
nand_set_flash_node(nand, np);
- nand->legacy.select_chip = sunxi_nfc_select_chip;
- nand->legacy.cmd_ctrl = sunxi_nfc_cmd_ctrl;
- nand->legacy.read_buf = sunxi_nfc_read_buf;
- nand->legacy.write_buf = sunxi_nfc_write_buf;
- nand->legacy.read_byte = sunxi_nfc_read_byte;
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
@@ -1943,7 +1986,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
return ret;
}
- list_add_tail(&chip->node, &nfc->chips);
+ list_add_tail(&sunxi_nand->node, &nfc->chips);
return 0;
}
@@ -1973,14 +2016,15 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
{
- struct sunxi_nand_chip *chip;
+ struct sunxi_nand_chip *sunxi_nand;
while (!list_empty(&nfc->chips)) {
- chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
- node);
- nand_release(&chip->nand);
- sunxi_nand_ecc_cleanup(&chip->nand.ecc);
- list_del(&chip->node);
+ sunxi_nand = list_first_entry(&nfc->chips,
+ struct sunxi_nand_chip,
+ node);
+ nand_release(&sunxi_nand->nand);
+ sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc);
+ list_del(&sunxi_nand->node);
}
}
@@ -2124,7 +2168,7 @@ static struct platform_driver sunxi_nfc_driver = {
};
module_platform_driver(sunxi_nfc_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris BREZILLON");
MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index f3b59e649b7d..db030f1701ee 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -104,6 +104,7 @@
struct tmio_nand {
struct nand_chip chip;
+ struct completion comp;
struct platform_device *dev;
@@ -168,15 +169,11 @@ static int tmio_nand_dev_ready(struct nand_chip *chip)
static irqreturn_t tmio_irq(int irq, void *__tmio)
{
struct tmio_nand *tmio = __tmio;
- struct nand_chip *nand_chip = &tmio->chip;
/* disable RDYREQ interrupt */
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ complete(&tmio->comp);
- if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
- dev_warn(&tmio->dev->dev, "spurious interrupt\n");
-
- wake_up(&nand_chip->controller->wq);
return IRQ_HANDLED;
}
@@ -193,18 +190,18 @@ static int tmio_nand_wait(struct nand_chip *nand_chip)
u8 status;
/* enable RDYREQ interrupt */
+
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+ reinit_completion(&tmio->comp);
tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
- timeout = wait_event_timeout(nand_chip->controller->wq,
- tmio_nand_dev_ready(nand_chip),
- msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
+ timeout = 400;
+ timeout = wait_for_completion_timeout(&tmio->comp,
+ msecs_to_jiffies(timeout));
if (unlikely(!tmio_nand_dev_ready(nand_chip))) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
- dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
- nand_chip->state == FL_ERASING ? "erase" : "program",
- nand_chip->state == FL_ERASING ? 400 : 20);
+ dev_warn(&tmio->dev->dev, "still busy after 400 ms\n");
} else if (unlikely(!timeout)) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
@@ -378,6 +375,8 @@ static int tmio_probe(struct platform_device *dev)
if (!tmio)
return -ENOMEM;
+ init_completion(&tmio->comp);
+
tmio->dev = dev;
platform_set_drvdata(dev, tmio);
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 479c2f2cf17f..fa87ae28cdfe 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct nand_page_io_req adjreq = *req;
- unsigned int nbytes = 0;
- void *buf = NULL;
+ void *buf = spinand->databuf;
+ unsigned int nbytes;
u16 column = 0;
int ret;
- memset(spinand->databuf, 0xff,
- nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand));
+ /*
+ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
+ * the cache content to 0xFF (depends on vendor implementation), so we
+ * must fill the page cache entirely even if we only want to program
+ * the data portion of the page, otherwise we might corrupt the BBM or
+ * user data previously programmed in OOB area.
+ */
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ memset(spinand->databuf, 0xff, nbytes);
+ adjreq.dataoffs = 0;
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.databuf.out = spinand->databuf;
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ adjreq.oobbuf.out = spinand->oobbuf;
- if (req->datalen) {
+ if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req->datalen);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- nbytes = adjreq.datalen;
- buf = spinand->databuf;
- }
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req->ooblen);
-
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- nbytes += nanddev_per_page_oobsize(nand);
- if (!buf) {
- buf = spinand->oobbuf;
- column = nanddev_page_size(nand);
- }
}
spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
/*
* We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation resets
- * the cache to 0xff.
+ * more than one iteration, because the LOAD operation might
+ * reset the cache to 0xff.
*/
if (nbytes) {
column = op.addr.val;
@@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand)
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
if (ret)
- goto err_free_bufs;
+ goto err_manuf_cleanup;
ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
if (ret)
- goto err_free_bufs;
+ goto err_manuf_cleanup;
}
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e4141c20947a..0b49d8264bef 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -12,6 +12,8 @@
#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
+#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -81,11 +83,83 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 64;
+ region->length = 64;
+
+ return 0;
+}
+
+static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 1 bytes for the BBM. */
+ region->offset = 1;
+ region->length = 63;
+
+ return 0;
+}
+
+static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQ4UEXXG_REG_STATUS2,
+ &status2);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ /*
+ * Read status2 register to determine a more fine grained
+ * bit error status
+ */
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * 4 ... 7 bits are flipped (1..4 can't be detected, so
+ * report the maximum of 4 in this case
+ */
+ /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ return ((status & STATUS_ECC_MASK) >> 2) |
+ ((status2 & STATUS_ECC_MASK) >> 4);
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
.ecc = gd5fxgq4xa_ooblayout_ecc,
.free = gd5fxgq4xa_ooblayout_free,
};
+static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = {
+ .ecc = gd5fxgq4uexxg_ooblayout_ecc,
+ .free = gd5fxgq4uexxg_ooblayout_free,
+};
+
static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO("GD5F1GQ4xA", 0xF1,
NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
@@ -114,6 +188,15 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
};
static int gigadevice_spinand_detect(struct spinand_device *spinand)
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 98f6b9c4b684..d16b57081c95 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -10,6 +10,7 @@
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
+#define MACRONIX_ECCSR_MASK 0x0F
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
SPI_MEM_OP_DUMMY(1, 1),
SPI_MEM_OP_DATA_IN(1, eccsr, 1));
- return spi_mem_exec_op(spinand->spimem, &op);
+ int ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ *eccsr &= MACRONIX_ECCSR_MASK;
+ return 0;
}
static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 081265557e70..db8021da45b5 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -25,19 +25,19 @@ static SPINAND_OP_VARIANTS(write_cache_variants,
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int tc58cvg2s0h_ooblayout_ecc(struct mtd_info *mtd, int section,
+static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
- if (section > 7)
+ if (section > 0)
return -ERANGE;
- region->offset = 128 + 16 * section;
- region->length = 16;
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
return 0;
}
-static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section,
+static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 0)
@@ -45,17 +45,17 @@ static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section,
/* 2 bytes reserved for BBM */
region->offset = 2;
- region->length = 126;
+ region->length = (mtd->oobsize / 2) - 2;
return 0;
}
-static const struct mtd_ooblayout_ops tc58cvg2s0h_ooblayout = {
- .ecc = tc58cvg2s0h_ooblayout_ecc,
- .free = tc58cvg2s0h_ooblayout_free,
+static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = {
+ .ecc = tc58cxgxsx_ooblayout_ecc,
+ .free = tc58cxgxsx_ooblayout_free,
};
-static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand,
+static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -94,15 +94,66 @@ static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info toshiba_spinand_table[] = {
- SPINAND_INFO("TC58CVG2S0H", 0xCD,
+ /* 3.3V 1Gb */
+ SPINAND_INFO("TC58CVG0S3", 0xC2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 3.3V 2Gb */
+ SPINAND_INFO("TC58CVG1S3", 0xCB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 3.3V 4Gb */
+ SPINAND_INFO("TC58CVG2S0", 0xCD,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 1.8V 1Gb */
+ SPINAND_INFO("TC58CYG0S3", 0xB2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 1.8V 2Gb */
+ SPINAND_INFO("TC58CYG1S3", 0xBB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 1.8V 4Gb */
+ SPINAND_INFO("TC58CYG2S0", 0xBD,
NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&tc58cvg2s0h_ooblayout,
- tc58cvg2s0h_ecc_get_status)),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
};
static int toshiba_spinand_detect(struct spinand_device *spinand)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 44fe8018733c..dab986691267 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -7,14 +7,6 @@ menuconfig MTD_SPI_NOR
if MTD_SPI_NOR
-config MTD_MT81xx_NOR
- tristate "Mediatek MT81xx SPI NOR flash controller"
- depends on HAS_IOMEM
- help
- This enables access to SPI NOR flash, using MT81xx SPI NOR flash
- controller. This controller does not support generic SPI BUS, it only
- supports SPI NOR Flash.
-
config MTD_SPI_NOR_USE_4K_SECTORS
bool "Use small 4096 B erase sectors"
default y
@@ -50,15 +42,6 @@ config SPI_CADENCE_QUADSPI
device with a Cadence QSPI controller and want to access the
Flash as an MTD device.
-config SPI_FSL_QUADSPI
- tristate "Freescale Quad SPI controller"
- depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables support for the Quad SPI controller in master mode.
- This controller does not support generic SPI. It only supports
- SPI NOR.
-
config SPI_HISI_SFC
tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST
@@ -66,6 +49,14 @@ config SPI_HISI_SFC
help
This enables support for hisilicon SPI-NOR flash controller.
+config SPI_MTK_QUADSPI
+ tristate "MediaTek Quad SPI controller"
+ depends on HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This controller does not support generic SPI. It only supports
+ SPI NOR.
+
config SPI_NXP_SPIFI
tristate "NXP SPI Flash Interface (SPIFI)"
depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index a552efd22958..189a15cca3ec 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -2,9 +2,8 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
-obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
-obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
+obj-$(CONFIG_SPI_MTK_QUADSPI) += mtk-quadspi.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 04cedd3a2bf6..792628750eec 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -44,6 +44,12 @@
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
+/* Capabilities mask */
+#define CQSPI_BASE_HWCAPS_MASK \
+ (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \
+ SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \
+ SNOR_HWCAPS_PP)
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -93,6 +99,11 @@ struct cqspi_st {
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
+struct cqspi_driver_platdata {
+ u32 hwcaps_mask;
+ u8 quirks;
+};
+
/* Operation timeout value */
#define CQSPI_TIMEOUT_MS 500
#define CQSPI_READ_TIMEOUT_MS 10
@@ -101,6 +112,7 @@ struct cqspi_st {
#define CQSPI_INST_TYPE_SINGLE 0
#define CQSPI_INST_TYPE_DUAL 1
#define CQSPI_INST_TYPE_QUAD 2
+#define CQSPI_INST_TYPE_OCTAL 3
#define CQSPI_DUMMY_CLKS_PER_BYTE 8
#define CQSPI_DUMMY_BYTES_MAX 4
@@ -418,9 +430,10 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
unsigned int data;
+ u32 write_len;
int ret;
- if (n_tx > 4 || (n_tx && !txbuf)) {
+ if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(nor->dev,
"Invalid input argument, cmdlen %d txbuf 0x%p\n",
n_tx, txbuf);
@@ -433,10 +446,18 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
data = 0;
- memcpy(&data, txbuf, n_tx);
+ write_len = (n_tx > 4) ? 4 : n_tx;
+ memcpy(&data, txbuf, write_len);
+ txbuf += write_len;
writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
- }
+ if (n_tx > 4) {
+ data = 0;
+ write_len = n_tx - 4;
+ memcpy(&data, txbuf, write_len);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
+ }
+ }
ret = cqspi_exec_flash_cmd(cqspi, reg);
return ret;
}
@@ -911,6 +932,9 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
case SNOR_PROTO_1_1_4:
f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
break;
+ case SNOR_PROTO_1_1_8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
default:
return -EINVAL;
}
@@ -1213,21 +1237,23 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_READ_1_1_2 |
- SNOR_HWCAPS_READ_1_1_4 |
- SNOR_HWCAPS_PP,
- };
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
+ const struct cqspi_driver_platdata *ddata;
+ struct spi_nor_hwcaps hwcaps;
struct cqspi_flash_pdata *f_pdata;
struct spi_nor *nor;
struct mtd_info *mtd;
unsigned int cs;
int i, ret;
+ ddata = of_device_get_match_data(dev);
+ if (!ddata) {
+ dev_err(dev, "Couldn't find driver data\n");
+ return -EINVAL;
+ }
+ hwcaps.mask = ddata->hwcaps_mask;
+
/* Get flash device data */
for_each_available_child_of_node(dev->of_node, np) {
ret = of_property_read_u32(np, "reg", &cs);
@@ -1310,7 +1336,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct cqspi_st *cqspi;
struct resource *res;
struct resource *res_ahb;
- unsigned long data;
+ const struct cqspi_driver_platdata *ddata;
int ret;
int irq;
@@ -1377,8 +1403,8 @@ static int cqspi_probe(struct platform_device *pdev)
}
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
- data = (unsigned long)of_device_get_match_data(dev);
- if (data & CQSPI_NEEDS_WR_DELAY)
+ ddata = of_device_get_match_data(dev);
+ if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
@@ -1460,14 +1486,32 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#define CQSPI_DEV_PM_OPS NULL
#endif
+static const struct cqspi_driver_platdata cdns_qspi = {
+ .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
+};
+
+static const struct cqspi_driver_platdata k2g_qspi = {
+ .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
+ .quirks = CQSPI_NEEDS_WR_DELAY,
+};
+
+static const struct cqspi_driver_platdata am654_ospi = {
+ .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8,
+ .quirks = CQSPI_NEEDS_WR_DELAY,
+};
+
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
- .data = (void *)0,
+ .data = &cdns_qspi,
},
{
.compatible = "ti,k2g-qspi",
- .data = (void *)CQSPI_NEEDS_WR_DELAY,
+ .data = &k2g_qspi,
+ },
+ {
+ .compatible = "ti,am654-ospi",
+ .data = &am654_ospi,
},
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
deleted file mode 100644
index 1ff3430f82c8..000000000000
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ /dev/null
@@ -1,1224 +0,0 @@
-/*
- * Freescale QuadSPI driver.
- *
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/completion.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-#include <linux/mutex.h>
-#include <linux/pm_qos.h>
-#include <linux/sizes.h>
-
-/* Controller needs driver to swap endian */
-#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0)
-/* Controller needs 4x internal clock */
-#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1)
-/*
- * TKT253890, Controller needs driver to fill txfifo till 16 byte to
- * trigger data transfer even though extern data will not transferred.
- */
-#define QUADSPI_QUIRK_TKT253890 (1 << 2)
-/* Controller cannot wake up from wait mode, TKT245618 */
-#define QUADSPI_QUIRK_TKT245618 (1 << 3)
-
-/* The registers */
-#define QUADSPI_MCR 0x00
-#define QUADSPI_MCR_RESERVED_SHIFT 16
-#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT)
-#define QUADSPI_MCR_MDIS_SHIFT 14
-#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT)
-#define QUADSPI_MCR_CLR_TXF_SHIFT 11
-#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT)
-#define QUADSPI_MCR_CLR_RXF_SHIFT 10
-#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT)
-#define QUADSPI_MCR_DDR_EN_SHIFT 7
-#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT)
-#define QUADSPI_MCR_END_CFG_SHIFT 2
-#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT)
-#define QUADSPI_MCR_SWRSTHD_SHIFT 1
-#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT)
-#define QUADSPI_MCR_SWRSTSD_SHIFT 0
-#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT)
-
-#define QUADSPI_IPCR 0x08
-#define QUADSPI_IPCR_SEQID_SHIFT 24
-#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT)
-
-#define QUADSPI_BUF0CR 0x10
-#define QUADSPI_BUF1CR 0x14
-#define QUADSPI_BUF2CR 0x18
-#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
-
-#define QUADSPI_BUF3CR 0x1c
-#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
-#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
-#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8
-#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT)
-
-#define QUADSPI_BFGENCR 0x20
-#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
-#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT))
-#define QUADSPI_BFGENCR_SEQID_SHIFT 12
-#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT)
-
-#define QUADSPI_BUF0IND 0x30
-#define QUADSPI_BUF1IND 0x34
-#define QUADSPI_BUF2IND 0x38
-#define QUADSPI_SFAR 0x100
-
-#define QUADSPI_SMPR 0x108
-#define QUADSPI_SMPR_DDRSMP_SHIFT 16
-#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT)
-#define QUADSPI_SMPR_FSDLY_SHIFT 6
-#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT)
-#define QUADSPI_SMPR_FSPHS_SHIFT 5
-#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT)
-#define QUADSPI_SMPR_HSENA_SHIFT 0
-#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT)
-
-#define QUADSPI_RBSR 0x10c
-#define QUADSPI_RBSR_RDBFL_SHIFT 8
-#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT)
-
-#define QUADSPI_RBCT 0x110
-#define QUADSPI_RBCT_WMRK_MASK 0x1F
-#define QUADSPI_RBCT_RXBRD_SHIFT 8
-#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT)
-
-#define QUADSPI_TBSR 0x150
-#define QUADSPI_TBDR 0x154
-#define QUADSPI_SR 0x15c
-#define QUADSPI_SR_IP_ACC_SHIFT 1
-#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT)
-#define QUADSPI_SR_AHB_ACC_SHIFT 2
-#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT)
-
-#define QUADSPI_FR 0x160
-#define QUADSPI_FR_TFF_MASK 0x1
-
-#define QUADSPI_SFA1AD 0x180
-#define QUADSPI_SFA2AD 0x184
-#define QUADSPI_SFB1AD 0x188
-#define QUADSPI_SFB2AD 0x18c
-#define QUADSPI_RBDR 0x200
-
-#define QUADSPI_LUTKEY 0x300
-#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
-
-#define QUADSPI_LCKCR 0x304
-#define QUADSPI_LCKER_LOCK 0x1
-#define QUADSPI_LCKER_UNLOCK 0x2
-
-#define QUADSPI_RSER 0x164
-#define QUADSPI_RSER_TFIE (0x1 << 0)
-
-#define QUADSPI_LUT_BASE 0x310
-
-/*
- * The definition of the LUT register shows below:
- *
- * ---------------------------------------------------
- * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
- * ---------------------------------------------------
- */
-#define OPRND0_SHIFT 0
-#define PAD0_SHIFT 8
-#define INSTR0_SHIFT 10
-#define OPRND1_SHIFT 16
-
-/* Instruction set for the LUT register. */
-#define LUT_STOP 0
-#define LUT_CMD 1
-#define LUT_ADDR 2
-#define LUT_DUMMY 3
-#define LUT_MODE 4
-#define LUT_MODE2 5
-#define LUT_MODE4 6
-#define LUT_FSL_READ 7
-#define LUT_FSL_WRITE 8
-#define LUT_JMP_ON_CS 9
-#define LUT_ADDR_DDR 10
-#define LUT_MODE_DDR 11
-#define LUT_MODE2_DDR 12
-#define LUT_MODE4_DDR 13
-#define LUT_FSL_READ_DDR 14
-#define LUT_FSL_WRITE_DDR 15
-#define LUT_DATA_LEARN 16
-
-/*
- * The PAD definitions for LUT register.
- *
- * The pad stands for the lines number of IO[0:3].
- * For example, the Quad read need four IO lines, so you should
- * set LUT_PAD4 which means we use four IO lines.
- */
-#define LUT_PAD1 0
-#define LUT_PAD2 1
-#define LUT_PAD4 2
-
-/* Oprands for the LUT register. */
-#define ADDR24BIT 0x18
-#define ADDR32BIT 0x20
-
-/* Macros for constructing the LUT register. */
-#define LUT0(ins, pad, opr) \
- (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
- ((LUT_##ins) << INSTR0_SHIFT))
-
-#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
-
-/* other macros for LUT register. */
-#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4)
-#define QUADSPI_LUT_NUM 64
-
-/* SEQID -- we can have 16 seqids at most. */
-#define SEQID_READ 0
-#define SEQID_WREN 1
-#define SEQID_WRDI 2
-#define SEQID_RDSR 3
-#define SEQID_SE 4
-#define SEQID_CHIP_ERASE 5
-#define SEQID_PP 6
-#define SEQID_RDID 7
-#define SEQID_WRSR 8
-#define SEQID_RDCR 9
-#define SEQID_EN4B 10
-#define SEQID_BRWR 11
-
-#define QUADSPI_MIN_IOMAP SZ_4M
-
-enum fsl_qspi_devtype {
- FSL_QUADSPI_VYBRID,
- FSL_QUADSPI_IMX6SX,
- FSL_QUADSPI_IMX7D,
- FSL_QUADSPI_IMX6UL,
- FSL_QUADSPI_LS1021A,
- FSL_QUADSPI_LS2080A,
-};
-
-struct fsl_qspi_devtype_data {
- enum fsl_qspi_devtype devtype;
- int rxfifo;
- int txfifo;
- int ahb_buf_size;
- int driver_data;
-};
-
-static const struct fsl_qspi_devtype_data vybrid_data = {
- .devtype = FSL_QUADSPI_VYBRID,
- .rxfifo = 128,
- .txfifo = 64,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
-};
-
-static const struct fsl_qspi_devtype_data imx6sx_data = {
- .devtype = FSL_QUADSPI_IMX6SX,
- .rxfifo = 128,
- .txfifo = 512,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_4X_INT_CLK
- | QUADSPI_QUIRK_TKT245618,
-};
-
-static const struct fsl_qspi_devtype_data imx7d_data = {
- .devtype = FSL_QUADSPI_IMX7D,
- .rxfifo = 512,
- .txfifo = 512,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_TKT253890
- | QUADSPI_QUIRK_4X_INT_CLK,
-};
-
-static const struct fsl_qspi_devtype_data imx6ul_data = {
- .devtype = FSL_QUADSPI_IMX6UL,
- .rxfifo = 128,
- .txfifo = 512,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_TKT253890
- | QUADSPI_QUIRK_4X_INT_CLK,
-};
-
-static struct fsl_qspi_devtype_data ls1021a_data = {
- .devtype = FSL_QUADSPI_LS1021A,
- .rxfifo = 128,
- .txfifo = 64,
- .ahb_buf_size = 1024,
- .driver_data = 0,
-};
-
-static const struct fsl_qspi_devtype_data ls2080a_data = {
- .devtype = FSL_QUADSPI_LS2080A,
- .rxfifo = 128,
- .txfifo = 64,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_TKT253890,
-};
-
-
-#define FSL_QSPI_MAX_CHIP 4
-struct fsl_qspi {
- struct spi_nor nor[FSL_QSPI_MAX_CHIP];
- void __iomem *iobase;
- void __iomem *ahb_addr;
- u32 memmap_phy;
- u32 memmap_offs;
- u32 memmap_len;
- struct clk *clk, *clk_en;
- struct device *dev;
- struct completion c;
- const struct fsl_qspi_devtype_data *devtype_data;
- u32 nor_size;
- u32 nor_num;
- u32 clk_rate;
- unsigned int chip_base_addr; /* We may support two chips. */
- bool has_second_chip;
- bool big_endian;
- struct mutex lock;
- struct pm_qos_request pm_qos_req;
-};
-
-static inline int needs_swap_endian(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN;
-}
-
-static inline int needs_4x_clock(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK;
-}
-
-static inline int needs_fill_txfifo(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890;
-}
-
-static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
-}
-
-/*
- * R/W functions for big- or little-endian registers:
- * The qSPI controller's endian is independent of the CPU core's endian.
- * So far, although the CPU core is little-endian but the qSPI have two
- * versions for big-endian and little-endian.
- */
-static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
-{
- if (q->big_endian)
- iowrite32be(val, addr);
- else
- iowrite32(val, addr);
-}
-
-static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
-{
- if (q->big_endian)
- return ioread32be(addr);
- else
- return ioread32(addr);
-}
-
-/*
- * An IC bug makes us to re-arrange the 32-bit data.
- * The following chips, such as IMX6SLX, have fixed this bug.
- */
-static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
-{
- return needs_swap_endian(q) ? __swab32(a) : a;
-}
-
-static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
-{
- qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
- qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
-}
-
-static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
-{
- qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
- qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
-}
-
-static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
-{
- struct fsl_qspi *q = dev_id;
- u32 reg;
-
- /* clear interrupt */
- reg = qspi_readl(q, q->iobase + QUADSPI_FR);
- qspi_writel(q, reg, q->iobase + QUADSPI_FR);
-
- if (reg & QUADSPI_FR_TFF_MASK)
- complete(&q->c);
-
- dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg);
- return IRQ_HANDLED;
-}
-
-static void fsl_qspi_init_lut(struct fsl_qspi *q)
-{
- void __iomem *base = q->iobase;
- int rxfifo = q->devtype_data->rxfifo;
- u32 lut_base;
- int i;
-
- struct spi_nor *nor = &q->nor[0];
- u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
- u8 read_op = nor->read_opcode;
- u8 read_dm = nor->read_dummy;
-
- fsl_qspi_unlock_lut(q);
-
- /* Clear all the LUT table */
- for (i = 0; i < QUADSPI_LUT_NUM; i++)
- qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
-
- /* Read */
- lut_base = SEQID_READ * 4;
-
- qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen),
- base + QUADSPI_LUT(lut_base));
- qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
- LUT1(FSL_READ, PAD4, rxfifo),
- base + QUADSPI_LUT(lut_base + 1));
-
- /* Write enable */
- lut_base = SEQID_WREN * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
- base + QUADSPI_LUT(lut_base));
-
- /* Page Program */
- lut_base = SEQID_PP * 4;
-
- qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) |
- LUT1(ADDR, PAD1, addrlen),
- base + QUADSPI_LUT(lut_base));
- qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
- base + QUADSPI_LUT(lut_base + 1));
-
- /* Read Status */
- lut_base = SEQID_RDSR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) |
- LUT1(FSL_READ, PAD1, 0x1),
- base + QUADSPI_LUT(lut_base));
-
- /* Erase a sector */
- lut_base = SEQID_SE * 4;
-
- qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) |
- LUT1(ADDR, PAD1, addrlen),
- base + QUADSPI_LUT(lut_base));
-
- /* Erase the whole chip */
- lut_base = SEQID_CHIP_ERASE * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
- base + QUADSPI_LUT(lut_base));
-
- /* READ ID */
- lut_base = SEQID_RDID * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) |
- LUT1(FSL_READ, PAD1, 0x8),
- base + QUADSPI_LUT(lut_base));
-
- /* Write Register */
- lut_base = SEQID_WRSR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) |
- LUT1(FSL_WRITE, PAD1, 0x2),
- base + QUADSPI_LUT(lut_base));
-
- /* Read Configuration Register */
- lut_base = SEQID_RDCR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) |
- LUT1(FSL_READ, PAD1, 0x1),
- base + QUADSPI_LUT(lut_base));
-
- /* Write disable */
- lut_base = SEQID_WRDI * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI),
- base + QUADSPI_LUT(lut_base));
-
- /* Enter 4 Byte Mode (Micron) */
- lut_base = SEQID_EN4B * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B),
- base + QUADSPI_LUT(lut_base));
-
- /* Enter 4 Byte Mode (Spansion) */
- lut_base = SEQID_BRWR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
- base + QUADSPI_LUT(lut_base));
-
- fsl_qspi_lock_lut(q);
-}
-
-/* Get the SEQID for the command */
-static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
-{
- switch (cmd) {
- case SPINOR_OP_READ_1_1_4:
- case SPINOR_OP_READ_1_1_4_4B:
- return SEQID_READ;
- case SPINOR_OP_WREN:
- return SEQID_WREN;
- case SPINOR_OP_WRDI:
- return SEQID_WRDI;
- case SPINOR_OP_RDSR:
- return SEQID_RDSR;
- case SPINOR_OP_SE:
- return SEQID_SE;
- case SPINOR_OP_CHIP_ERASE:
- return SEQID_CHIP_ERASE;
- case SPINOR_OP_PP:
- return SEQID_PP;
- case SPINOR_OP_RDID:
- return SEQID_RDID;
- case SPINOR_OP_WRSR:
- return SEQID_WRSR;
- case SPINOR_OP_RDCR:
- return SEQID_RDCR;
- case SPINOR_OP_EN4B:
- return SEQID_EN4B;
- case SPINOR_OP_BRWR:
- return SEQID_BRWR;
- default:
- if (cmd == q->nor[0].erase_opcode)
- return SEQID_SE;
- dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
- break;
- }
- return -EINVAL;
-}
-
-static int
-fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
-{
- void __iomem *base = q->iobase;
- int seqid;
- u32 reg, reg2;
- int err;
-
- init_completion(&q->c);
- dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
- q->chip_base_addr, addr, len, cmd);
-
- /* save the reg */
- reg = qspi_readl(q, base + QUADSPI_MCR);
-
- qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
- base + QUADSPI_SFAR);
- qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
- base + QUADSPI_RBCT);
- qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
-
- do {
- reg2 = qspi_readl(q, base + QUADSPI_SR);
- if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
- udelay(1);
- dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
- continue;
- }
- break;
- } while (1);
-
- /* trigger the LUT now */
- seqid = fsl_qspi_get_seqid(q, cmd);
- if (seqid < 0)
- return seqid;
-
- qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
- base + QUADSPI_IPCR);
-
- /* Wait for the interrupt. */
- if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
- dev_err(q->dev,
- "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
- cmd, addr, qspi_readl(q, base + QUADSPI_FR),
- qspi_readl(q, base + QUADSPI_SR));
- err = -ETIMEDOUT;
- } else {
- err = 0;
- }
-
- /* restore the MCR */
- qspi_writel(q, reg, base + QUADSPI_MCR);
-
- return err;
-}
-
-/* Read out the data from the QUADSPI_RBDR buffer registers. */
-static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
-{
- u32 tmp;
- int i = 0;
-
- while (len > 0) {
- tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4);
- tmp = fsl_qspi_endian_xchg(q, tmp);
- dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
- q->chip_base_addr, tmp);
-
- if (len >= 4) {
- *((u32 *)rxbuf) = tmp;
- rxbuf += 4;
- } else {
- memcpy(rxbuf, &tmp, len);
- break;
- }
-
- len -= 4;
- i++;
- }
-}
-
-/*
- * If we have changed the content of the flash by writing or erasing,
- * we need to invalidate the AHB buffer. If we do not do so, we may read out
- * the wrong data. The spec tells us reset the AHB domain and Serial Flash
- * domain at the same time.
- */
-static inline void fsl_qspi_invalid(struct fsl_qspi *q)
-{
- u32 reg;
-
- reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
- reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
- qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
-
- /*
- * The minimum delay : 1 AHB + 2 SFCK clocks.
- * Delay 1 us is enough.
- */
- udelay(1);
-
- reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
- qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
-}
-
-static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
- u8 opcode, unsigned int to, u32 *txbuf,
- unsigned count)
-{
- int ret, i, j;
- u32 tmp;
-
- dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
- q->chip_base_addr, to, count);
-
- /* clear the TX FIFO. */
- tmp = qspi_readl(q, q->iobase + QUADSPI_MCR);
- qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
-
- /* fill the TX data to the FIFO */
- for (j = 0, i = ((count + 3) / 4); j < i; j++) {
- tmp = fsl_qspi_endian_xchg(q, *txbuf);
- qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
- txbuf++;
- }
-
- /* fill the TXFIFO upto 16 bytes for i.MX7d */
- if (needs_fill_txfifo(q))
- for (; i < 4; i++)
- qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
-
- /* Trigger it */
- ret = fsl_qspi_runcmd(q, opcode, to, count);
-
- if (ret == 0)
- return count;
-
- return ret;
-}
-
-static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
-{
- int nor_size = q->nor_size;
- void __iomem *base = q->iobase;
-
- qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
- qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
- qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
- qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
-}
-
-/*
- * There are two different ways to read out the data from the flash:
- * the "IP Command Read" and the "AHB Command Read".
- *
- * The IC guy suggests we use the "AHB Command Read" which is faster
- * then the "IP Command Read". (What's more is that there is a bug in
- * the "IP Command Read" in the Vybrid.)
- *
- * After we set up the registers for the "AHB Command Read", we can use
- * the memcpy to read the data directly. A "missed" access to the buffer
- * causes the controller to clear the buffer, and use the sequence pointed
- * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
- */
-static int fsl_qspi_init_ahb_read(struct fsl_qspi *q)
-{
- void __iomem *base = q->iobase;
- int seqid;
-
- /* AHB configuration for access buffer 0/1/2 .*/
- qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
- qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
- qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
- /*
- * Set ADATSZ with the maximum AHB buffer size to improve the
- * read performance.
- */
- qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
- ((q->devtype_data->ahb_buf_size / 8)
- << QUADSPI_BUF3CR_ADATSZ_SHIFT),
- base + QUADSPI_BUF3CR);
-
- /* We only use the buffer3 */
- qspi_writel(q, 0, base + QUADSPI_BUF0IND);
- qspi_writel(q, 0, base + QUADSPI_BUF1IND);
- qspi_writel(q, 0, base + QUADSPI_BUF2IND);
-
- /* Set the default lut sequence for AHB Read. */
- seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
- if (seqid < 0)
- return seqid;
-
- qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
- q->iobase + QUADSPI_BFGENCR);
-
- return 0;
-}
-
-/* This function was used to prepare and enable QSPI clock */
-static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
-{
- int ret;
-
- ret = clk_prepare_enable(q->clk_en);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(q->clk);
- if (ret) {
- clk_disable_unprepare(q->clk_en);
- return ret;
- }
-
- if (needs_wakeup_wait_mode(q))
- pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
-
- return 0;
-}
-
-/* This function was used to disable and unprepare QSPI clock */
-static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
-{
- if (needs_wakeup_wait_mode(q))
- pm_qos_remove_request(&q->pm_qos_req);
-
- clk_disable_unprepare(q->clk);
- clk_disable_unprepare(q->clk_en);
-
-}
-
-/* We use this function to do some basic init for spi_nor_scan(). */
-static int fsl_qspi_nor_setup(struct fsl_qspi *q)
-{
- void __iomem *base = q->iobase;
- u32 reg;
- int ret;
-
- /* disable and unprepare clock to avoid glitch pass to controller */
- fsl_qspi_clk_disable_unprep(q);
-
- /* the default frequency, we will change it in the future. */
- ret = clk_set_rate(q->clk, 66000000);
- if (ret)
- return ret;
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- return ret;
-
- /* Reset the module */
- qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
- base + QUADSPI_MCR);
- udelay(1);
-
- /* Init the LUT table. */
- fsl_qspi_init_lut(q);
-
- /* Disable the module */
- qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
- base + QUADSPI_MCR);
-
- reg = qspi_readl(q, base + QUADSPI_SMPR);
- qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
- | QUADSPI_SMPR_FSPHS_MASK
- | QUADSPI_SMPR_HSENA_MASK
- | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
-
- /* Enable the module */
- qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
- base + QUADSPI_MCR);
-
- /* clear all interrupt status */
- qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
-
- /* enable the interrupt */
- qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
-
- return 0;
-}
-
-static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
-{
- unsigned long rate = q->clk_rate;
- int ret;
-
- if (needs_4x_clock(q))
- rate *= 4;
-
- /* disable and unprepare clock to avoid glitch pass to controller */
- fsl_qspi_clk_disable_unprep(q);
-
- ret = clk_set_rate(q->clk, rate);
- if (ret)
- return ret;
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- return ret;
-
- /* Init the LUT table again. */
- fsl_qspi_init_lut(q);
-
- /* Init for AHB read */
- return fsl_qspi_init_ahb_read(q);
-}
-
-static const struct of_device_id fsl_qspi_dt_ids[] = {
- { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
- { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
- { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
- { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
- { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
-
-static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor)
-{
- q->chip_base_addr = q->nor_size * (nor - q->nor);
-}
-
-static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
-{
- int ret;
- struct fsl_qspi *q = nor->priv;
-
- ret = fsl_qspi_runcmd(q, opcode, 0, len);
- if (ret)
- return ret;
-
- fsl_qspi_read_data(q, len, buf);
- return 0;
-}
-
-static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
-{
- struct fsl_qspi *q = nor->priv;
- int ret;
-
- if (!buf) {
- ret = fsl_qspi_runcmd(q, opcode, 0, 1);
- if (ret)
- return ret;
-
- if (opcode == SPINOR_OP_CHIP_ERASE)
- fsl_qspi_invalid(q);
-
- } else if (len > 0) {
- ret = fsl_qspi_nor_write(q, nor, opcode, 0,
- (u32 *)buf, len);
- if (ret > 0)
- return 0;
- } else {
- dev_err(q->dev, "invalid cmd %d\n", opcode);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
- size_t len, const u_char *buf)
-{
- struct fsl_qspi *q = nor->priv;
- ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
- (u32 *)buf, len);
-
- /* invalid the data in the AHB buffer. */
- fsl_qspi_invalid(q);
- return ret;
-}
-
-static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
- size_t len, u_char *buf)
-{
- struct fsl_qspi *q = nor->priv;
- u8 cmd = nor->read_opcode;
-
- /* if necessary,ioremap buffer before AHB read, */
- if (!q->ahb_addr) {
- q->memmap_offs = q->chip_base_addr + from;
- q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
-
- q->ahb_addr = ioremap_nocache(
- q->memmap_phy + q->memmap_offs,
- q->memmap_len);
- if (!q->ahb_addr) {
- dev_err(q->dev, "ioremap failed\n");
- return -ENOMEM;
- }
- /* ioremap if the data requested is out of range */
- } else if (q->chip_base_addr + from < q->memmap_offs
- || q->chip_base_addr + from + len >
- q->memmap_offs + q->memmap_len) {
- iounmap(q->ahb_addr);
-
- q->memmap_offs = q->chip_base_addr + from;
- q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
- q->ahb_addr = ioremap_nocache(
- q->memmap_phy + q->memmap_offs,
- q->memmap_len);
- if (!q->ahb_addr) {
- dev_err(q->dev, "ioremap failed\n");
- return -ENOMEM;
- }
- }
-
- dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n",
- cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
- len);
-
- /* Read out the data directly from the AHB buffer.*/
- memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
- len);
-
- return len;
-}
-
-static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
-{
- struct fsl_qspi *q = nor->priv;
- int ret;
-
- dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
- nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs);
-
- ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
- if (ret)
- return ret;
-
- fsl_qspi_invalid(q);
- return 0;
-}
-
-static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
- struct fsl_qspi *q = nor->priv;
- int ret;
-
- mutex_lock(&q->lock);
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- goto err_mutex;
-
- fsl_qspi_set_base_addr(q, nor);
- return 0;
-
-err_mutex:
- mutex_unlock(&q->lock);
- return ret;
-}
-
-static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
- struct fsl_qspi *q = nor->priv;
-
- fsl_qspi_clk_disable_unprep(q);
- mutex_unlock(&q->lock);
-}
-
-static int fsl_qspi_probe(struct platform_device *pdev)
-{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ_1_1_4 |
- SNOR_HWCAPS_PP,
- };
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct fsl_qspi *q;
- struct resource *res;
- struct spi_nor *nor;
- struct mtd_info *mtd;
- int ret, i = 0;
-
- q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
- q->nor_num = of_get_child_count(dev->of_node);
- if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
- return -ENODEV;
-
- q->dev = dev;
- q->devtype_data = of_device_get_match_data(dev);
- if (!q->devtype_data)
- return -ENODEV;
- platform_set_drvdata(pdev, q);
-
- /* find the resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
- q->iobase = devm_ioremap_resource(dev, res);
- if (IS_ERR(q->iobase))
- return PTR_ERR(q->iobase);
-
- q->big_endian = of_property_read_bool(np, "big-endian");
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "QuadSPI-memory");
- if (!devm_request_mem_region(dev, res->start, resource_size(res),
- res->name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return -EBUSY;
- }
-
- q->memmap_phy = res->start;
-
- /* find the clocks */
- q->clk_en = devm_clk_get(dev, "qspi_en");
- if (IS_ERR(q->clk_en))
- return PTR_ERR(q->clk_en);
-
- q->clk = devm_clk_get(dev, "qspi");
- if (IS_ERR(q->clk))
- return PTR_ERR(q->clk);
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- goto clk_failed;
- }
-
- /* find the irq */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "failed to get the irq: %d\n", ret);
- goto irq_failed;
- }
-
- ret = devm_request_irq(dev, ret,
- fsl_qspi_irq_handler, 0, pdev->name, q);
- if (ret) {
- dev_err(dev, "failed to request irq: %d\n", ret);
- goto irq_failed;
- }
-
- ret = fsl_qspi_nor_setup(q);
- if (ret)
- goto irq_failed;
-
- if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
- q->has_second_chip = true;
-
- mutex_init(&q->lock);
-
- /* iterate the subnodes. */
- for_each_available_child_of_node(dev->of_node, np) {
- /* skip the holes */
- if (!q->has_second_chip)
- i *= 2;
-
- nor = &q->nor[i];
- mtd = &nor->mtd;
-
- nor->dev = dev;
- spi_nor_set_flash_node(nor, np);
- nor->priv = q;
-
- if (q->nor_num > 1 && !mtd->name) {
- int spiflash_idx;
-
- ret = of_property_read_u32(np, "reg", &spiflash_idx);
- if (!ret) {
- mtd->name = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%d",
- dev_name(dev),
- spiflash_idx);
- if (!mtd->name) {
- ret = -ENOMEM;
- goto mutex_failed;
- }
- } else {
- dev_warn(dev, "reg property is missing\n");
- }
- }
-
- /* fill the hooks */
- nor->read_reg = fsl_qspi_read_reg;
- nor->write_reg = fsl_qspi_write_reg;
- nor->read = fsl_qspi_read;
- nor->write = fsl_qspi_write;
- nor->erase = fsl_qspi_erase;
-
- nor->prepare = fsl_qspi_prep;
- nor->unprepare = fsl_qspi_unprep;
-
- ret = of_property_read_u32(np, "spi-max-frequency",
- &q->clk_rate);
- if (ret < 0)
- goto mutex_failed;
-
- /* set the chip address for READID */
- fsl_qspi_set_base_addr(q, nor);
-
- ret = spi_nor_scan(nor, NULL, &hwcaps);
- if (ret)
- goto mutex_failed;
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret)
- goto mutex_failed;
-
- /* Set the correct NOR size now. */
- if (q->nor_size == 0) {
- q->nor_size = mtd->size;
-
- /* Map the SPI NOR to accessiable address */
- fsl_qspi_set_map_addr(q);
- }
-
- /*
- * The TX FIFO is 64 bytes in the Vybrid, but the Page Program
- * may writes 265 bytes per time. The write is working in the
- * unit of the TX FIFO, not in the unit of the SPI NOR's page
- * size.
- *
- * So shrink the spi_nor->page_size if it is larger then the
- * TX FIFO.
- */
- if (nor->page_size > q->devtype_data->txfifo)
- nor->page_size = q->devtype_data->txfifo;
-
- i++;
- }
-
- /* finish the rest init. */
- ret = fsl_qspi_nor_setup_last(q);
- if (ret)
- goto last_init_failed;
-
- fsl_qspi_clk_disable_unprep(q);
- return 0;
-
-last_init_failed:
- for (i = 0; i < q->nor_num; i++) {
- /* skip the holes */
- if (!q->has_second_chip)
- i *= 2;
- mtd_device_unregister(&q->nor[i].mtd);
- }
-mutex_failed:
- mutex_destroy(&q->lock);
-irq_failed:
- fsl_qspi_clk_disable_unprep(q);
-clk_failed:
- dev_err(dev, "Freescale QuadSPI probe failed\n");
- return ret;
-}
-
-static int fsl_qspi_remove(struct platform_device *pdev)
-{
- struct fsl_qspi *q = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < q->nor_num; i++) {
- /* skip the holes */
- if (!q->has_second_chip)
- i *= 2;
- mtd_device_unregister(&q->nor[i].mtd);
- }
-
- /* disable the hardware */
- qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
- qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
-
- mutex_destroy(&q->lock);
-
- if (q->ahb_addr)
- iounmap(q->ahb_addr);
-
- return 0;
-}
-
-static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
-{
- return 0;
-}
-
-static int fsl_qspi_resume(struct platform_device *pdev)
-{
- int ret;
- struct fsl_qspi *q = platform_get_drvdata(pdev);
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- return ret;
-
- fsl_qspi_nor_setup(q);
- fsl_qspi_set_map_addr(q);
- fsl_qspi_nor_setup_last(q);
-
- fsl_qspi_clk_disable_unprep(q);
-
- return 0;
-}
-
-static struct platform_driver fsl_qspi_driver = {
- .driver = {
- .name = "fsl-quadspi",
- .of_match_table = fsl_qspi_dt_ids,
- },
- .probe = fsl_qspi_probe,
- .remove = fsl_qspi_remove,
- .suspend = fsl_qspi_suspend,
- .resume = fsl_qspi_resume,
-};
-module_platform_driver(fsl_qspi_driver);
-
-MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
-MODULE_AUTHOR("Freescale Semiconductor Inc.");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index 5442993b71ff..d9eed6844ba1 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -431,7 +431,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ_FAST |
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
SNOR_HWCAPS_READ_1_1_2 |
SNOR_HWCAPS_PP,
};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 6e13bbd1aaa5..fae147452aff 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -68,7 +68,7 @@ enum spi_nor_read_command_index {
SNOR_CMD_READ_4_4_4,
SNOR_CMD_READ_1_4_4_DTR,
- /* Octo SPI */
+ /* Octal SPI */
SNOR_CMD_READ_1_1_8,
SNOR_CMD_READ_1_8_8,
SNOR_CMD_READ_8_8_8,
@@ -85,7 +85,7 @@ enum spi_nor_pp_command_index {
SNOR_CMD_PP_1_4_4,
SNOR_CMD_PP_4_4_4,
- /* Octo SPI */
+ /* Octal SPI */
SNOR_CMD_PP_1_1_8,
SNOR_CMD_PP_1_8_8,
SNOR_CMD_PP_8_8_8,
@@ -278,6 +278,7 @@ struct flash_info {
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
+#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -398,6 +399,8 @@ static u8 spi_nor_convert_3to4_read(u8 opcode)
{ SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
{ SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
{ SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
{ SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
{ SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
@@ -414,6 +417,8 @@ static u8 spi_nor_convert_3to4_program(u8 opcode)
{ SPINOR_OP_PP, SPINOR_OP_PP_4B },
{ SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
{ SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
};
return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
@@ -1740,7 +1745,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
@@ -1836,6 +1845,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
@@ -1847,6 +1858,8 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &mx25l25635_fixups },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
@@ -1872,7 +1885,8 @@ static const struct flash_info spi_nor_ids[] = {
/* Micron */
{
"mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES)
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES)
},
/* PMC */
@@ -1885,13 +1899,17 @@ static const struct flash_info spi_nor_ids[] = {
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
@@ -3591,6 +3609,13 @@ static int spi_nor_init_params(struct spi_nor *nor,
SNOR_PROTO_1_1_4);
}
+ if (info->flags & SPI_NOR_OCTAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ 0, 8, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_1_1_8);
+ }
+
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6371958dd170..5e4ca082cfcd 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -145,13 +145,16 @@ config MACVTAP
To compile this driver as a module, choose M here: the module
will be called macvtap.
+config IPVLAN_L3S
+ depends on NETFILTER
+ depends on IPVLAN
+ def_bool y
+ select NET_L3_MASTER_DEV
config IPVLAN
tristate "IP-VLAN support"
depends on INET
depends on IPV6 || !IPV6
- depends on NETFILTER
- select NET_L3_MASTER_DEV
---help---
This allows one to create virtual devices off of a main interface
and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
@@ -197,9 +200,9 @@ config VXLAN
config GENEVE
tristate "Generic Network Virtualization Encapsulation"
- depends on INET && NET_UDP_TUNNEL
+ depends on INET
depends on IPV6 || !IPV6
- select NET_IP_TUNNEL
+ select NET_UDP_TUNNEL
select GRO_CELLS
---help---
This allows one to create geneve virtual interfaces that provide
@@ -502,7 +505,6 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
- depends on MAY_USE_DEVLINK
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
@@ -519,7 +521,7 @@ config NET_FAILOVER
and destroy a failover master netdev and manages a primary and
standby slave netdevs that get registered via the generic failover
infrastructure. This can be used by paravirtual drivers to enable
- an alternate low latency datapath. It alsoenables live migration of
+ an alternate low latency datapath. It also enables live migration of
a VM with direct attached VF by failing over to the paravirtual
datapath when the VF is unplugged.
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index f90bb723985f..b3c63d2f16aa 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -301,7 +301,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
dev->irq = cops_irq(ioaddr, board);
if (dev->irq)
break;
- /* No IRQ found on this port, fallthrough */
+ /* fall through - Once no IRQ found on this port. */
case 1:
retval = -EINVAL;
goto err_out;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 7c46d9f4fefd..9274dcc6e9b0 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -31,6 +31,7 @@
#include <net/net_namespace.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
+#include <net/netlink.h>
/* General definitions */
#define AD_SHORT_TIMEOUT 1
@@ -851,6 +852,9 @@ static int ad_lacpdu_send(struct port *port)
if (!skb)
return -ENOMEM;
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_tx);
+ atomic64_inc(&BOND_AD_INFO(slave->bond).stats.lacpdu_tx);
+
skb->dev = slave->dev;
skb_reset_mac_header(skb);
skb->network_header = skb->mac_header + ETH_HLEN;
@@ -892,6 +896,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
if (!skb)
return -ENOMEM;
+ switch (marker->tlv_type) {
+ case AD_MARKER_INFORMATION_SUBTYPE:
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_tx);
+ atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_tx);
+ break;
+ case AD_MARKER_RESPONSE_SUBTYPE:
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_resp_tx);
+ atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_resp_tx);
+ break;
+ }
+
skb_reserve(skb, 16);
skb->dev = slave->dev;
@@ -1086,6 +1101,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
*/
last_state = port->sm_rx_state;
+ if (lacpdu) {
+ atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.lacpdu_rx);
+ atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.lacpdu_rx);
+ }
/* check if state machine should change state */
/* first, check if port was reinitialized */
@@ -1922,6 +1941,9 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
{
struct bond_marker marker;
+ atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_rx);
+ atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_rx);
+
/* copy the received marker data to the response marker */
memcpy(&marker, marker_info, sizeof(struct bond_marker));
/* change the marker subtype to marker response */
@@ -1946,6 +1968,9 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
static void ad_marker_response_received(struct bond_marker *marker,
struct port *port)
{
+ atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_resp_rx);
+ atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_resp_rx);
+
/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
}
@@ -2348,66 +2373,68 @@ re_arm:
* bond_3ad_rx_indication - handle a received frame
* @lacpdu: received lacpdu
* @slave: slave struct to work on
- * @length: length of the data received
*
* It is assumed that frames that were sent on this NIC don't returned as new
* received frames (loopback). Since only the payload is given to this
* function, it check for loopback.
*/
-static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
- u16 length)
+static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave)
{
- struct port *port;
+ struct bonding *bond = slave->bond;
int ret = RX_HANDLER_ANOTHER;
+ struct bond_marker *marker;
+ struct port *port;
+ atomic64_t *stat;
- if (length >= sizeof(struct lacpdu)) {
-
- port = &(SLAVE_AD_INFO(slave)->port);
-
- if (!port->slave) {
- net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->bond->dev->name);
- return ret;
- }
+ port = &(SLAVE_AD_INFO(slave)->port);
+ if (!port->slave) {
+ net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
+ slave->dev->name, slave->bond->dev->name);
+ return ret;
+ }
- switch (lacpdu->subtype) {
- case AD_TYPE_LACPDU:
- ret = RX_HANDLER_CONSUMED;
- netdev_dbg(slave->bond->dev,
- "Received LACPDU on port %d slave %s\n",
- port->actor_port_number,
- slave->dev->name);
- /* Protect against concurrent state machines */
- spin_lock(&slave->bond->mode_lock);
- ad_rx_machine(lacpdu, port);
- spin_unlock(&slave->bond->mode_lock);
+ switch (lacpdu->subtype) {
+ case AD_TYPE_LACPDU:
+ ret = RX_HANDLER_CONSUMED;
+ netdev_dbg(slave->bond->dev,
+ "Received LACPDU on port %d slave %s\n",
+ port->actor_port_number, slave->dev->name);
+ /* Protect against concurrent state machines */
+ spin_lock(&slave->bond->mode_lock);
+ ad_rx_machine(lacpdu, port);
+ spin_unlock(&slave->bond->mode_lock);
+ break;
+ case AD_TYPE_MARKER:
+ ret = RX_HANDLER_CONSUMED;
+ /* No need to convert fields to Little Endian since we
+ * don't use the marker's fields.
+ */
+ marker = (struct bond_marker *)lacpdu;
+ switch (marker->tlv_type) {
+ case AD_MARKER_INFORMATION_SUBTYPE:
+ netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n",
+ port->actor_port_number);
+ ad_marker_info_received(marker, port);
break;
-
- case AD_TYPE_MARKER:
- ret = RX_HANDLER_CONSUMED;
- /* No need to convert fields to Little Endian since we
- * don't use the marker's fields.
- */
-
- switch (((struct bond_marker *)lacpdu)->tlv_type) {
- case AD_MARKER_INFORMATION_SUBTYPE:
- netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n",
- port->actor_port_number);
- ad_marker_info_received((struct bond_marker *)lacpdu, port);
- break;
-
- case AD_MARKER_RESPONSE_SUBTYPE:
- netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n",
- port->actor_port_number);
- ad_marker_response_received((struct bond_marker *)lacpdu, port);
- break;
-
- default:
- netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n",
- port->actor_port_number);
- }
+ case AD_MARKER_RESPONSE_SUBTYPE:
+ netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n",
+ port->actor_port_number);
+ ad_marker_response_received(marker, port);
+ break;
+ default:
+ netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n",
+ port->actor_port_number);
+ stat = &SLAVE_AD_INFO(slave)->stats.marker_unknown_rx;
+ atomic64_inc(stat);
+ stat = &BOND_AD_INFO(bond).stats.marker_unknown_rx;
+ atomic64_inc(stat);
}
+ break;
+ default:
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_unknown_rx);
+ atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_unknown_rx);
}
+
return ret;
}
@@ -2643,10 +2670,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
return RX_HANDLER_ANOTHER;
lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
- if (!lacpdu)
+ if (!lacpdu) {
+ atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_illegal_rx);
+ atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_illegal_rx);
return RX_HANDLER_ANOTHER;
+ }
- return bond_3ad_rx_indication(lacpdu, slave, skb->len);
+ return bond_3ad_rx_indication(lacpdu, slave);
}
/**
@@ -2678,3 +2708,61 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
}
spin_unlock_bh(&bond->mode_lock);
}
+
+size_t bond_3ad_stats_size(void)
+{
+ return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_TX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_UNKNOWN_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_ILLEGAL_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_TX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_RX */
+ nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_TX */
+ nla_total_size_64bit(sizeof(u64)); /* BOND_3AD_STAT_MARKER_UNKNOWN_RX */
+}
+
+int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats)
+{
+ u64 val;
+
+ val = atomic64_read(&stats->lacpdu_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->lacpdu_tx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_TX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->lacpdu_unknown_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_UNKNOWN_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->lacpdu_illegal_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_ILLEGAL_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+
+ val = atomic64_read(&stats->marker_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_tx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_TX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_resp_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_resp_tx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_TX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+ val = atomic64_read(&stats->marker_unknown_rx);
+ if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_UNKNOWN_RX, val,
+ BOND_3AD_STAT_PAD))
+ return -EMSGSIZE;
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a9d597f28023..b59708c35faf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,7 +77,6 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_dissector.h>
-#include <net/switchdev.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
@@ -1183,29 +1182,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
- /* Link-local multicast packets should be passed to the
- * stack on the link they arrive as well as pass them to the
- * bond-master device. These packets are mostly usable when
- * stack receives it with the link on which they arrive
- * (e.g. LLDP) they also must be available on master. Some of
- * the use cases include (but are not limited to): LLDP agents
- * that must be able to operate both on enslaved interfaces as
- * well as on bonds themselves; linux bridges that must be able
- * to process/pass BPDUs from attached bonds when any kind of
- * STP version is enabled on the network.
+ /*
+ * For packets determined by bond_should_deliver_exact_match() call to
+ * be suppressed we want to make an exception for link-local packets.
+ * This is necessary for e.g. LLDP daemons to be able to monitor
+ * inactive slave links without being forced to bind to them
+ * explicitly.
+ *
+ * At the same time, packets that are passed to the bonding master
+ * (including link-local ones) can have their originating interface
+ * determined via PACKET_ORIGDEV socket option.
*/
- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (nskb) {
- nskb->dev = bond->dev;
- nskb->queue_mapping = 0;
- netif_rx(nskb);
- }
- return RX_HANDLER_PASS;
- }
- if (bond_should_deliver_exact_match(skb, slave, bond))
+ if (bond_should_deliver_exact_match(skb, slave, bond)) {
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ return RX_HANDLER_PASS;
return RX_HANDLER_EXACT;
+ }
skb->dev = bond->dev;
@@ -1963,6 +1955,9 @@ static int __bond_release_one(struct net_device *bond_dev,
if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
+ bond->nest_level = SINGLE_DEPTH_NESTING;
+ } else {
+ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
}
unblock_netpoll_tx();
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 6b9ad8673218..b286f591242e 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -675,6 +675,71 @@ nla_put_failure:
return -EMSGSIZE;
}
+static size_t bond_get_linkxstats_size(const struct net_device *dev, int attr)
+{
+ switch (attr) {
+ case IFLA_STATS_LINK_XSTATS:
+ case IFLA_STATS_LINK_XSTATS_SLAVE:
+ break;
+ default:
+ return 0;
+ }
+
+ return bond_3ad_stats_size() + nla_total_size(0);
+}
+
+static int bond_fill_linkxstats(struct sk_buff *skb,
+ const struct net_device *dev,
+ int *prividx, int attr)
+{
+ struct nlattr *nla __maybe_unused;
+ struct slave *slave = NULL;
+ struct nlattr *nest, *nest2;
+ struct bonding *bond;
+
+ switch (attr) {
+ case IFLA_STATS_LINK_XSTATS:
+ bond = netdev_priv(dev);
+ break;
+ case IFLA_STATS_LINK_XSTATS_SLAVE:
+ slave = bond_slave_get_rtnl(dev);
+ if (!slave)
+ return 0;
+ bond = slave->bond;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BOND);
+ if (!nest)
+ return -EMSGSIZE;
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ struct bond_3ad_stats *stats;
+
+ if (slave)
+ stats = &SLAVE_AD_INFO(slave)->stats;
+ else
+ stats = &BOND_AD_INFO(bond).stats;
+
+ nest2 = nla_nest_start(skb, BOND_XSTATS_3AD);
+ if (!nest2) {
+ nla_nest_end(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ if (bond_3ad_stats_fill(skb, stats)) {
+ nla_nest_cancel(skb, nest2);
+ nla_nest_end(skb, nest);
+ return -EMSGSIZE;
+ }
+ nla_nest_end(skb, nest2);
+ }
+ nla_nest_end(skb, nest);
+
+ return 0;
+}
+
struct rtnl_link_ops bond_link_ops __read_mostly = {
.kind = "bond",
.priv_size = sizeof(struct bonding),
@@ -689,6 +754,8 @@ struct rtnl_link_ops bond_link_ops __read_mostly = {
.get_num_tx_queues = bond_get_num_tx_queues,
.get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
as for TX queues */
+ .fill_linkxstats = bond_fill_linkxstats,
+ .get_linkxstats_size = bond_get_linkxstats_size,
.slave_maxtype = IFLA_BOND_SLAVE_MAX,
.slave_policy = bond_slave_policy,
.slave_changelink = bond_slave_changelink,
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 4d5d01cb8141..da1fc17295d9 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1375,6 +1375,7 @@ static int bond_option_slaves_set(struct bonding *bond,
sscanf(newval->string, "%16s", command); /* IFNAMSIZ*/
ifname = command + 1;
if ((strlen(command) <= 1) ||
+ (command[0] != '+' && command[0] != '-') ||
!dev_valid_name(ifname))
goto err_no_cmd;
@@ -1398,6 +1399,7 @@ static int bond_option_slaves_set(struct bonding *bond,
break;
default:
+ /* should not run here. */
goto err_no_cmd;
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index a0f954f36c09..44e6c7b1b222 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser)
if (skb->len == 0) {
struct sk_buff *tmp = skb_dequeue(&ser->head);
WARN_ON(tmp != skb);
- if (in_interrupt())
- dev_kfree_skb_irq(skb);
- else
- kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
}
/* Send flow off if queue is empty */
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index d28a1398c091..7608bc3e00df 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -73,35 +73,37 @@ MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
#define LOW_WATER_MARK 100
#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
-#ifdef CONFIG_UML
+#ifndef CONFIG_HAS_DMA
/*
* We sometimes use UML for debugging, but it cannot handle
* dma_alloc_coherent so we have to wrap it.
*/
-static inline void *dma_alloc(dma_addr_t *daddr)
+static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
{
return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
}
-static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
+ dma_addr_t handle)
{
kfree(cpu_addr);
}
#else
-static inline void *dma_alloc(dma_addr_t *daddr)
+static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
{
- return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
+ return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr,
GFP_KERNEL);
}
-static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
+ dma_addr_t handle)
{
- dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
+ dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle);
}
-#endif /* CONFIG_UML */
+#endif /* CONFIG_HAS_DMA */
#ifdef CONFIG_DEBUG_FS
@@ -610,13 +612,13 @@ static int cfspi_init(struct net_device *dev)
}
/* Allocate DMA buffers. */
- cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+ cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]);
if (!cfspi->xfer.va_tx[0]) {
res = -ENODEV;
goto err_dma_alloc_tx_0;
}
- cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
+ cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx);
if (!cfspi->xfer.va_rx) {
res = -ENODEV;
@@ -665,9 +667,9 @@ static int cfspi_init(struct net_device *dev)
return 0;
err_create_wq:
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
err_dma_alloc_rx:
- dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
err_dma_alloc_tx_0:
return res;
}
@@ -683,8 +685,8 @@ static void cfspi_uninit(struct net_device *dev)
cfspi->ndev = NULL;
/* Free DMA buffers. */
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
set_bit(SPI_TERMINATE, &cfspi->state);
wake_up_interruptible(&cfspi->wait);
destroy_workqueue(cfspi->wq);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3b3f88ffab53..c05e4d50d43d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
- struct sk_buff *skb = priv->echo_skb[idx];
- struct canfd_frame *cf;
if (idx >= priv->echo_skb_max) {
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
return NULL;
}
- if (!skb) {
- netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
- __func__, idx);
- return NULL;
- }
+ if (priv->echo_skb[idx]) {
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len = cf->len;
- /* Using "struct canfd_frame::len" for the frame
- * length is supported on both CAN and CANFD frames.
- */
- cf = (struct canfd_frame *)skb->data;
- *len_ptr = cf->len;
- priv->echo_skb[idx] = NULL;
+ *len_ptr = len;
+ priv->echo_skb[idx] = NULL;
- return skb;
+ return skb;
+ }
+
+ return NULL;
}
/*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0f36eafe3ac1..1c66fb2ad76b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev)
}
} else {
/* clear and invalidate unused mailboxes first */
- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) {
+ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
&mb->can_ctrl);
@@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
gpr_np = of_find_node_by_phandle(phandle);
if (!gpr_np) {
dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
- return PTR_ERR(gpr_np);
+ return -ENODEV;
}
priv = netdev_priv(dev);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0e4bbdcc614f..0852e5e08177 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
}
-static void b53_enable_vlan(struct b53_device *dev, bool enable)
+static void b53_enable_vlan(struct b53_device *dev, bool enable,
+ bool enable_filtering)
{
u8 mgmt, vc0, vc1, vc4 = 0, vc5;
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
- vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
- vc5 |= VC5_DROP_VTABLE_MISS;
+ if (enable_filtering) {
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+ } else {
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+ }
if (is5325(dev))
vc0 &= ~VC0_RESERVED_1;
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
}
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+ dev->vlan_enabled = enable;
+ dev->vlan_filtering_enabled = enable_filtering;
}
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -534,7 +543,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_enable_port);
-void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+void b53_disable_port(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
u8 reg;
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
+static u16 b53_default_pvid(struct b53_device *dev)
+{
+ if (is5325(dev) || is5365(dev))
+ return 1;
+ else
+ return 0;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 };
- int i;
+ int i, def_vid;
+
+ def_vid = b53_default_pvid(dev);
/* clear all vlan entries */
if (is5325(dev) || is5365(dev)) {
- for (i = 1; i < dev->num_vlans; i++)
+ for (i = def_vid; i < dev->num_vlans; i++)
b53_set_vlan_entry(dev, i, &vl);
} else {
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, false);
+ b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), 1);
+ B53_VLAN_PORT_DEF_TAG(i), def_vid);
if (!is5325(dev) && !is5365(dev))
b53_set_jumbo(dev, dev->enable_jumbo, false);
@@ -944,7 +963,7 @@ static int b53_setup(struct dsa_switch *ds)
if (dsa_is_cpu_port(ds, port))
b53_enable_cpu_port(dev, port);
else if (dsa_is_unused_port(ds, port))
- b53_disable_port(ds, port, NULL);
+ b53_disable_port(ds, port);
}
return ret;
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
+ struct b53_device *dev = ds->priv;
+ struct net_device *bridge_dev;
+ unsigned int i;
+ u16 pvid, new_pvid;
+
+ /* Handle the case were multiple bridges span the same switch device
+ * and one of them has a different setting than what is being requested
+ * which would be breaking filtering semantics for any of the other
+ * bridge devices.
+ */
+ b53_for_each_port(dev, i) {
+ bridge_dev = dsa_to_port(ds, i)->bridge_dev;
+ if (bridge_dev &&
+ bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
+ br_vlan_enabled(bridge_dev) != vlan_filtering) {
+ netdev_err(bridge_dev,
+ "VLAN filtering is global to the switch!\n");
+ return -EINVAL;
+ }
+ }
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ new_pvid = pvid;
+ if (dev->vlan_filtering_enabled && !vlan_filtering) {
+ /* Filtering is currently enabled, use the default PVID since
+ * the bridge does not expect tagging anymore
+ */
+ dev->ports[port].pvid = pvid;
+ new_pvid = b53_default_pvid(dev);
+ } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
+ /* Filtering is currently disabled, restore the previous PVID */
+ new_pvid = dev->ports[port].pvid;
+ }
+
+ if (pvid != new_pvid)
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ new_pvid);
+
+ b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
+
return 0;
}
EXPORT_SYMBOL(b53_vlan_filtering);
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid_end > dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, true);
+ b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
return 0;
}
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_fast_age_vlan(dev, vid);
}
- if (pvid) {
+ if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end);
b53_fast_age_vlan(dev, vid);
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
vl->members &= ~BIT(port);
- if (pvid == vid) {
- if (is5325(dev) || is5365(dev))
- pvid = 1;
- else
- pvid = 0;
- }
+ if (pvid == vid)
+ pvid = b53_default_pvid(dev);
if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag &= ~(BIT(port));
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
- if (is5325(dev) || is5365(dev))
- pvid = 1;
- else
- pvid = 0;
+ pvid = b53_default_pvid(dev);
/* Make this port join all VLANs without VLAN entries */
if (is58xx(dev)) {
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index ec796482792d..e3441dcf2d21 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,6 +91,7 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
struct ethtool_eee eee;
+ u16 pvid;
};
struct b53_vlan {
@@ -137,6 +138,8 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
+ bool vlan_enabled;
+ bool vlan_filtering_enabled;
unsigned int num_ports;
struct b53_port *ports;
};
@@ -353,7 +356,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
-void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 90f514252987..d9c56a779c08 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
/* Clear all pending interrupts */
writel(0xffffffff, priv->regs + B53_SRAB_INTR);
- if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
- return;
-
for (i = 0; i < B53_N_PORTS; i++) {
port = &priv->port_intrs[i];
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 361fbde76654..c8e3f05e1d72 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -221,8 +221,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
return b53_enable_port(ds, port, phy);
}
-static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
@@ -241,7 +240,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, false);
- b53_disable_port(ds, port, phy);
+ b53_disable_port(ds, port);
/* Power down the port memory */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -690,9 +689,9 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
* port, the other ones have already been disabled during
* bcm_sf2_sw_setup
*/
- for (port = 0; port < DSA_MAX_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
- bcm_sf2_port_disable(ds, port, NULL);
+ bcm_sf2_port_disable(ds, port);
}
return 0;
@@ -726,10 +725,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
{
struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_wolinfo pwol;
+ struct ethtool_wolinfo pwol = { };
/* Get the parent device WoL settings */
- p->ethtool_ops->get_wol(p, &pwol);
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
/* Advertise the parent device supported settings */
wol->supported = pwol.supported;
@@ -750,9 +750,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
- struct ethtool_wolinfo pwol;
+ struct ethtool_wolinfo pwol = { };
- p->ethtool_ops->get_wol(p, &pwol);
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
if (wol->wolopts & ~pwol.supported)
return -EINVAL;
@@ -786,7 +787,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
else
- bcm_sf2_port_disable(ds, port, NULL);
+ bcm_sf2_port_disable(ds, port);
}
b53_configure_vlan(ds);
@@ -894,12 +895,44 @@ static const struct b53_io_ops bcm_sf2_io_ops = {
.write64 = bcm_sf2_core_write64,
};
+static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ int cnt = b53_get_sset_count(ds, port, stringset);
+
+ b53_get_strings(ds, port, stringset, data);
+ bcm_sf2_cfp_get_strings(ds, port, stringset,
+ data + cnt * ETH_GSTRING_LEN);
+}
+
+static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS);
+
+ b53_get_ethtool_stats(ds, port, data);
+ bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt);
+}
+
+static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
+ int sset)
+{
+ int cnt = b53_get_sset_count(ds, port, sset);
+
+ if (cnt < 0)
+ return cnt;
+
+ cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset);
+
+ return cnt;
+}
+
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
- .get_strings = b53_get_strings,
- .get_ethtool_stats = b53_get_ethtool_stats,
- .get_sset_count = b53_get_sset_count,
+ .get_strings = bcm_sf2_sw_get_strings,
+ .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
+ .get_sset_count = bcm_sf2_sw_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
.phylink_validate = bcm_sf2_sw_validate,
@@ -1062,7 +1095,6 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
spin_lock_init(&priv->indir_lock);
- mutex_init(&priv->stats_mutex);
mutex_init(&priv->cfp.lock);
INIT_LIST_HEAD(&priv->cfp.rules_list);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index faaef320ec48..eb3655bea467 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -87,9 +87,6 @@ struct bcm_sf2_priv {
/* Backing b53_device */
struct b53_device *dev;
- /* Mutex protecting access to the MIB counters */
- struct mutex stats_mutex;
-
struct bcm_sf2_hw_params hw_params;
struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS];
@@ -216,5 +213,10 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
void bcm_sf2_cfp_exit(struct dsa_switch *ds);
int bcm_sf2_cfp_resume(struct dsa_switch *ds);
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data);
+void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset);
#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index e14663ab6dbc..e6234d209787 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/bitmap.h>
+#include <net/flow_offload.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -212,6 +213,7 @@ static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
unsigned int rule_index,
+ int src_port,
unsigned int port_num,
unsigned int queue_num,
bool fwd_map_change)
@@ -229,6 +231,10 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
else
reg = 0;
+ /* Enable looping back to the original port */
+ if (src_port == port_num)
+ reg |= LOOP_BK_EN;
+
core_writel(priv, reg, CORE_ACT_POL_DATA0);
/* Set classification ID that needs to be put in Broadcom tag */
@@ -257,7 +263,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
}
static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
- struct ethtool_tcpip4_spec *v4_spec,
+ struct flow_dissector_key_ipv4_addrs *addrs,
+ struct flow_dissector_key_ports *ports,
unsigned int slice_num,
bool mask)
{
@@ -278,7 +285,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A6 [23:8]
* UDF_n_A5 [7:0]
*/
- reg = be16_to_cpu(v4_spec->pdst) >> 8;
+ reg = be16_to_cpu(ports->dst) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
@@ -289,9 +296,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A4 [23:8]
* UDF_n_A3 [7:0]
*/
- reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
- (u32)be16_to_cpu(v4_spec->psrc) << 8 |
- (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
+ reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
+ (u32)be16_to_cpu(ports->src) << 8 |
+ (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
@@ -302,9 +309,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A2 [23:8]
* UDF_n_A1 [7:0]
*/
- reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
- (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
- (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
+ reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
+ (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
+ (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
@@ -317,8 +324,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* Slice ID [3:2]
* Slice valid [1:0]
*/
- reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
- (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
+ reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
+ (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
SLICE_NUM(slice_num) | SLICE_VALID;
if (mask)
offset = CORE_CFP_MASK_PORT(0);
@@ -332,9 +339,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int queue_num,
struct ethtool_rx_flow_spec *fs)
{
- struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
+ struct ethtool_rx_flow_spec_input input = {};
const struct cfp_udf_layout *layout;
unsigned int slice_num, rule_index;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_match_ipv4_addrs ipv4;
+ struct flow_match_ports ports;
+ struct flow_match_ip ip;
u8 ip_proto, ip_frag;
u8 num_udf;
u32 reg;
@@ -343,13 +354,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
ip_proto = IPPROTO_TCP;
- v4_spec = &fs->h_u.tcp_ip4_spec;
- v4_m_spec = &fs->m_u.tcp_ip4_spec;
break;
case UDP_V4_FLOW:
ip_proto = IPPROTO_UDP;
- v4_spec = &fs->h_u.udp_ip4_spec;
- v4_m_spec = &fs->m_u.udp_ip4_spec;
break;
default:
return -EINVAL;
@@ -367,11 +374,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
if (rule_index > bcm_sf2_cfp_rule_size(priv))
return -ENOSPC;
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
+ flow_rule_match_ports(flow->rule, &ports);
+ flow_rule_match_ip(flow->rule, &ip);
+
layout = &udf_tcpip4_layout;
/* We only use one UDF slice for now */
slice_num = bcm_sf2_get_slice_number(layout, 0);
- if (slice_num == UDF_NUM_SLICES)
- return -EINVAL;
+ if (slice_num == UDF_NUM_SLICES) {
+ ret = -EINVAL;
+ goto out_err_flow_rule;
+ }
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
@@ -398,7 +416,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
* Reserved [1]
* UDF_Valid[8] [0]
*/
- core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
+ core_writel(priv, ip.key->tos << IPTOS_SHIFT |
ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
udf_upper_bits(num_udf),
CORE_CFP_DATA_PORT(6));
@@ -417,8 +435,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
/* Program the match and the mask */
- bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
- bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index);
@@ -426,14 +444,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index);
- return ret;
+ goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now */
- ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
queue_num, true);
if (ret)
- return ret;
+ goto out_err_flow_rule;
/* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -446,6 +464,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
fs->location = rule_index;
return 0;
+
+out_err_flow_rule:
+ ethtool_rx_flow_rule_destroy(flow);
+ return ret;
}
static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
@@ -581,9 +603,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int queue_num,
struct ethtool_rx_flow_spec *fs)
{
- struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
+ struct ethtool_rx_flow_spec_input input = {};
unsigned int slice_num, rule_index[2];
const struct cfp_udf_layout *layout;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_match_ipv6_addrs ipv6;
+ struct flow_match_ports ports;
u8 ip_proto, ip_frag;
int ret = 0;
u8 num_udf;
@@ -592,13 +617,9 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V6_FLOW:
ip_proto = IPPROTO_TCP;
- v6_spec = &fs->h_u.tcp_ip6_spec;
- v6_m_spec = &fs->m_u.tcp_ip6_spec;
break;
case UDP_V6_FLOW:
ip_proto = IPPROTO_UDP;
- v6_spec = &fs->h_u.udp_ip6_spec;
- v6_m_spec = &fs->m_u.udp_ip6_spec;
break;
default:
return -EINVAL;
@@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
goto out_err;
}
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow)) {
+ ret = PTR_ERR(flow);
+ goto out_err;
+ }
+ flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
+ flow_rule_match_ports(flow->rule, &ports);
+
/* Apply the UDF layout for this filter */
bcm_sf2_cfp_udf_set(priv, layout, slice_num);
@@ -688,10 +718,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
/* Slice the IPv6 source address and port */
- bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
- slice_num, false);
- bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
- SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
+ ports.key->src, slice_num, false);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
+ ports.mask->src, SLICE_NUM_MASK, true);
/* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,20 +729,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
- goto out_err;
+ goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now */
- ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
queue_num, false);
if (ret)
- goto out_err;
+ goto out_err_flow_rule;
/* Now deal with the second slice to chain this rule */
slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
if (slice_num == UDF_NUM_SLICES) {
ret = -EINVAL;
- goto out_err;
+ goto out_err_flow_rule;
}
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
@@ -748,10 +778,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
/* Mask all */
core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
- bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
- false);
- bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
- SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
+ ports.key->dst, slice_num, false);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
+ ports.key->dst, SLICE_NUM_MASK, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
@@ -759,16 +789,16 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
- goto out_err;
+ goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now, set chain ID to
* the one we are chained to
*/
- ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
queue_num, true);
if (ret)
- goto out_err;
+ goto out_err_flow_rule;
/* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
return ret;
+out_err_flow_rule:
+ ethtool_rx_flow_rule_destroy(flow);
out_err:
clear_bit(rule_index[1], priv->cfp.used);
return ret;
@@ -1169,3 +1201,91 @@ int bcm_sf2_cfp_resume(struct dsa_switch *ds)
return ret;
}
+
+static const struct bcm_sf2_cfp_stat {
+ unsigned int offset;
+ unsigned int ram_loc;
+ const char *name;
+} bcm_sf2_cfp_stats[] = {
+ {
+ .offset = CORE_STAT_GREEN_CNTR,
+ .ram_loc = GREEN_STAT_RAM,
+ .name = "Green"
+ },
+ {
+ .offset = CORE_STAT_YELLOW_CNTR,
+ .ram_loc = YELLOW_STAT_RAM,
+ .name = "Yellow"
+ },
+ {
+ .offset = CORE_STAT_RED_CNTR,
+ .ram_loc = RED_STAT_RAM,
+ .name = "Red"
+ },
+};
+
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
+ char buf[ETH_GSTRING_LEN];
+ unsigned int i, j, iter;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 1; i < priv->num_cfp_rules; i++) {
+ for (j = 0; j < s; j++) {
+ snprintf(buf, sizeof(buf),
+ "CFP%03d_%sCntr",
+ i, bcm_sf2_cfp_stats[j].name);
+ iter = (i - 1) * s + j;
+ strlcpy(data + iter * ETH_GSTRING_LEN,
+ buf, ETH_GSTRING_LEN);
+ }
+ }
+}
+
+void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
+ const struct bcm_sf2_cfp_stat *stat;
+ unsigned int i, j, iter;
+ struct cfp_rule *rule;
+ int ret;
+
+ mutex_lock(&priv->cfp.lock);
+ for (i = 1; i < priv->num_cfp_rules; i++) {
+ rule = bcm_sf2_cfp_rule_find(priv, port, i);
+ if (!rule)
+ continue;
+
+ for (j = 0; j < s; j++) {
+ stat = &bcm_sf2_cfp_stats[j];
+
+ bcm_sf2_cfp_rule_addr_set(priv, i);
+ ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
+ if (ret)
+ continue;
+
+ iter = (i - 1) * s + j;
+ data[iter] = core_readl(priv, stat->offset);
+ }
+
+ }
+ mutex_unlock(&priv->cfp.lock);
+}
+
+int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ /* 3 counters per CFP rules */
+ return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
+}
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 0a1e530d52b7..67f056206f37 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -400,6 +400,10 @@ enum bcm_sf2_reg_offs {
#define CORE_RATE_METER6 0x281e0
#define CIR_REF_CNT_MASK 0x7ffff
+#define CORE_STAT_GREEN_CNTR 0x28200
+#define CORE_STAT_YELLOW_CNTR 0x28210
+#define CORE_STAT_RED_CNTR 0x28220
+
#define CORE_CFP_CTL_REG 0x28400
#define CFP_EN_MAP_MASK 0x1ff
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 816f34d64736..17482ae09aa5 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -343,7 +343,7 @@ static int __init dsa_loop_init(void)
unsigned int i;
for (i = 0; i < NUM_FIXED_PHYS; i++)
- phydevs[i] = fixed_phy_register(PHY_POLL, &status, -1, NULL);
+ phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
return mdio_driver_register(&dsa_loop_drv);
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b4f6e1a67dd9..2ffab7ee3d80 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1091,8 +1091,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port,
return lan9303_enable_processing_port(chip, port);
}
-static void lan9303_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
struct lan9303 *chip = ds->priv;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 693a67f45bef..d8328866908c 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -480,8 +480,7 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
return 0;
}
-static void gswip_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+static void gswip_port_disable(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
@@ -549,7 +548,7 @@ static int gswip_setup(struct dsa_switch *ds)
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++)
- gswip_port_disable(ds, i, NULL);
+ gswip_port_disable(ds, i);
/* enable Switch */
gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
@@ -1069,10 +1068,10 @@ static int gswip_probe(struct platform_device *pdev)
version = gswip_switch_r(priv, GSWIP_VERSION);
/* bring up the mdio bus */
- gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL,
- "lantiq,gphy-fw");
+ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
if (gphy_fw_np) {
err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
+ of_node_put(gphy_fw_np);
if (err) {
dev_err(dev, "gphy fw probe failed\n");
return err;
@@ -1080,13 +1079,12 @@ static int gswip_probe(struct platform_device *pdev)
}
/* bring up the mdio bus */
- mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
- "lantiq,xrx200-mdio");
+ mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
if (mdio_np) {
err = gswip_mdio(priv, mdio_np);
if (err) {
dev_err(dev, "mdio probe failed\n");
- goto gphy_fw;
+ goto put_mdio_node;
}
}
@@ -1099,7 +1097,7 @@ static int gswip_probe(struct platform_device *pdev)
dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
priv->hw_info->cpu_port);
err = -EINVAL;
- goto mdio_bus;
+ goto disable_switch;
}
platform_set_drvdata(pdev, priv);
@@ -1109,10 +1107,14 @@ static int gswip_probe(struct platform_device *pdev)
(version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
return 0;
+disable_switch:
+ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
+ dsa_unregister_switch(priv->ds);
mdio_bus:
if (mdio_np)
mdiobus_unregister(priv->ds->slave_mii_bus);
-gphy_fw:
+put_mdio_node:
+ of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
return err;
@@ -1123,16 +1125,15 @@ static int gswip_remove(struct platform_device *pdev)
struct gswip_priv *priv = platform_get_drvdata(pdev);
int i;
- if (!priv)
- return 0;
-
/* disable the switch */
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
- if (priv->ds->slave_mii_bus)
+ if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ }
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
@@ -1162,6 +1163,12 @@ static struct platform_driver gswip_driver = {
module_platform_driver(gswip_driver);
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 89ed059bb576..f16e1d7d8615 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,24 +2,26 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
*/
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/iopoll.h>
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
-#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_priv.h"
-#include "ksz_common.h"
#include "ksz9477_reg.h"
+#include "ksz_common.h"
+
+/* Used with variable features to indicate capabilities. */
+#define GBIT_SUPPORT BIT(0)
+#define NEW_XMII BIT(1)
+#define IS_9893 BIT(2)
static const struct {
int index;
@@ -259,10 +261,84 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
return 0;
}
+static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt)
+{
+ struct ksz_poll_ctx ctx = {
+ .dev = dev,
+ .port = port,
+ .offset = REG_PORT_MIB_CTRL_STAT__4,
+ };
+ struct ksz_port *p = &dev->ports[port];
+ u32 data;
+ int ret;
+
+ /* retain the flush/freeze bit */
+ data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
+ data |= MIB_COUNTER_READ;
+ data |= (addr << MIB_COUNTER_INDEX_S);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
+
+ ret = readx_poll_timeout(ksz_pread32_poll, &ctx, data,
+ !(data & MIB_COUNTER_READ), 10, 1000);
+
+ /* failed to read MIB. get out of loop */
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to get MIB\n");
+ return;
+ }
+
+ /* count resets upon read */
+ ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+ *cnt += data;
+}
+
+static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ addr = ksz9477_mib_names[addr].index;
+ ksz9477_r_mib_cnt(dev, port, addr, cnt);
+}
+
+static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+{
+ u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
+ struct ksz_port *p = &dev->ports[port];
+
+ /* enable/disable the port for flush/freeze function */
+ mutex_lock(&p->mib.cnt_mutex);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
+
+ /* used by MIB counter reading code to know freeze is enabled */
+ p->freeze = freeze;
+ mutex_unlock(&p->mib.cnt_mutex);
+}
+
+static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+
+ /* flush all enabled port MIB counters */
+ mutex_lock(&mib->cnt_mutex);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
+ MIB_COUNTER_FLUSH_FREEZE);
+ ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
+ mutex_unlock(&mib->cnt_mutex);
+
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
+}
+
static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
int port)
{
- return DSA_TAG_PROTO_KSZ9477;
+ enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->features & IS_9893)
+ proto = DSA_TAG_PROTO_KSZ9893;
+ return proto;
}
static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
@@ -323,6 +399,10 @@ static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
/* No real PHY after this. */
if (addr >= dev->phy_port_cnt)
return 0;
+
+ /* No gigabit support. Do not write to this register. */
+ if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
+ return 0;
ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
return 0;
@@ -342,47 +422,6 @@ static void ksz9477_get_strings(struct dsa_switch *ds, int port,
}
}
-static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *buf)
-{
- struct ksz_device *dev = ds->priv;
- int i;
- u32 data;
- int timeout;
-
- mutex_lock(&dev->stats_mutex);
-
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- data = MIB_COUNTER_READ;
- data |= ((ksz9477_mib_names[i].index & 0xFF) <<
- MIB_COUNTER_INDEX_S);
- ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
-
- timeout = 1000;
- do {
- ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
- &data);
- usleep_range(1, 10);
- if (!(data & MIB_COUNTER_READ))
- break;
- } while (timeout-- > 0);
-
- /* failed to read MIB. get out of loop */
- if (!timeout) {
- dev_dbg(dev->dev, "Failed to get MIB\n");
- break;
- }
-
- /* count resets upon read */
- ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
-
- dev->mib_value[i] += (uint64_t)data;
- buf[i] = dev->mib_value[i];
- }
-
- mutex_unlock(&dev->stats_mutex);
-}
-
static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
u8 member)
{
@@ -397,6 +436,7 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
struct ksz_port *p = &dev->ports[port];
u8 data;
int member = -1;
+ int forward = dev->member;
ksz_pread8(dev, port, P_STP_CTRL, &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
@@ -424,12 +464,14 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
break;
member = dev->host_mask | p->vid_member;
+ mutex_lock(&dev->dev_mutex);
/* Port is a member of a bridge. */
if (dev->br_member & (1 << port)) {
dev->member |= (1 << port);
member = dev->member;
}
+ mutex_unlock(&dev->dev_mutex);
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
@@ -444,6 +486,7 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
ksz_pwrite8(dev, port, P_STP_CTRL, data);
p->stp_state = state;
+ mutex_lock(&dev->dev_mutex);
if (data & PORT_RX_ENABLE)
dev->rx_ports |= (1 << port);
else
@@ -464,10 +507,11 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
}
/* When topology has changed the function ksz_update_port_member
- * should be called to modify port forwarding behavior. However
- * as the offload_fwd_mark indication cannot be reported here
- * the switch forwarding function is not enabled.
+ * should be called to modify port forwarding behavior.
*/
+ if (forward != dev->member)
+ ksz_update_port_member(dev, port);
+ mutex_unlock(&dev->dev_mutex);
}
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -965,6 +1009,161 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
PORT_MIRROR_SNIFFER, false);
}
+static void ksz9477_phy_setup(struct ksz_device *dev, int port,
+ struct phy_device *phy)
+{
+ /* Only apply to port with PHY. */
+ if (port >= dev->phy_port_cnt)
+ return;
+
+ /* The MAC actually cannot run in 1000 half-duplex mode. */
+ phy_remove_link_mode(phy,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* PHY does not support gigabit. */
+ if (!(dev->features & GBIT_SUPPORT))
+ phy_remove_link_mode(phy,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+}
+
+static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
+{
+ bool gbit;
+
+ if (dev->features & NEW_XMII)
+ gbit = !(data & PORT_MII_NOT_1GBIT);
+ else
+ gbit = !!(data & PORT_MII_1000MBIT_S1);
+ return gbit;
+}
+
+static void ksz9477_set_gbit(struct ksz_device *dev, bool gbit, u8 *data)
+{
+ if (dev->features & NEW_XMII) {
+ if (gbit)
+ *data &= ~PORT_MII_NOT_1GBIT;
+ else
+ *data |= PORT_MII_NOT_1GBIT;
+ } else {
+ if (gbit)
+ *data |= PORT_MII_1000MBIT_S1;
+ else
+ *data &= ~PORT_MII_1000MBIT_S1;
+ }
+}
+
+static int ksz9477_get_xmii(struct ksz_device *dev, u8 data)
+{
+ int mode;
+
+ if (dev->features & NEW_XMII) {
+ switch (data & PORT_MII_SEL_M) {
+ case PORT_MII_SEL:
+ mode = 0;
+ break;
+ case PORT_RMII_SEL:
+ mode = 1;
+ break;
+ case PORT_GMII_SEL:
+ mode = 2;
+ break;
+ default:
+ mode = 3;
+ }
+ } else {
+ switch (data & PORT_MII_SEL_M) {
+ case PORT_MII_SEL_S1:
+ mode = 0;
+ break;
+ case PORT_RMII_SEL_S1:
+ mode = 1;
+ break;
+ case PORT_GMII_SEL_S1:
+ mode = 2;
+ break;
+ default:
+ mode = 3;
+ }
+ }
+ return mode;
+}
+
+static void ksz9477_set_xmii(struct ksz_device *dev, int mode, u8 *data)
+{
+ u8 xmii;
+
+ if (dev->features & NEW_XMII) {
+ switch (mode) {
+ case 0:
+ xmii = PORT_MII_SEL;
+ break;
+ case 1:
+ xmii = PORT_RMII_SEL;
+ break;
+ case 2:
+ xmii = PORT_GMII_SEL;
+ break;
+ default:
+ xmii = PORT_RGMII_SEL;
+ break;
+ }
+ } else {
+ switch (mode) {
+ case 0:
+ xmii = PORT_MII_SEL_S1;
+ break;
+ case 1:
+ xmii = PORT_RMII_SEL_S1;
+ break;
+ case 2:
+ xmii = PORT_GMII_SEL_S1;
+ break;
+ default:
+ xmii = PORT_RGMII_SEL_S1;
+ break;
+ }
+ }
+ *data &= ~PORT_MII_SEL_M;
+ *data |= xmii;
+}
+
+static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
+{
+ phy_interface_t interface;
+ bool gbit;
+ int mode;
+ u8 data8;
+
+ if (port < dev->phy_port_cnt)
+ return PHY_INTERFACE_MODE_NA;
+ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
+ gbit = ksz9477_get_gbit(dev, data8);
+ mode = ksz9477_get_xmii(dev, data8);
+ switch (mode) {
+ case 2:
+ interface = PHY_INTERFACE_MODE_GMII;
+ if (gbit)
+ break;
+ case 0:
+ interface = PHY_INTERFACE_MODE_MII;
+ break;
+ case 1:
+ interface = PHY_INTERFACE_MODE_RMII;
+ break;
+ default:
+ interface = PHY_INTERFACE_MODE_RGMII;
+ if (data8 & PORT_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ if (data8 & PORT_RGMII_ID_IG_ENABLE) {
+ interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ if (data8 & PORT_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_ID;
+ }
+ break;
+ }
+ return interface;
+}
+
static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
u8 data8;
@@ -1011,24 +1210,25 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* configure MAC to 1G & RGMII mode */
ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- data8 &= ~PORT_MII_NOT_1GBIT;
- data8 &= ~PORT_MII_SEL_M;
switch (dev->interface) {
case PHY_INTERFACE_MODE_MII:
- data8 |= PORT_MII_NOT_1GBIT;
- data8 |= PORT_MII_SEL;
+ ksz9477_set_xmii(dev, 0, &data8);
+ ksz9477_set_gbit(dev, false, &data8);
p->phydev.speed = SPEED_100;
break;
case PHY_INTERFACE_MODE_RMII:
- data8 |= PORT_MII_NOT_1GBIT;
- data8 |= PORT_RMII_SEL;
+ ksz9477_set_xmii(dev, 1, &data8);
+ ksz9477_set_gbit(dev, false, &data8);
p->phydev.speed = SPEED_100;
break;
case PHY_INTERFACE_MODE_GMII:
- data8 |= PORT_GMII_SEL;
+ ksz9477_set_xmii(dev, 2, &data8);
+ ksz9477_set_gbit(dev, true, &data8);
p->phydev.speed = SPEED_1000;
break;
default:
+ ksz9477_set_xmii(dev, 3, &data8);
+ ksz9477_set_gbit(dev, true, &data8);
data8 &= ~PORT_RGMII_ID_IG_ENABLE;
data8 &= ~PORT_RGMII_ID_EG_ENABLE;
if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
@@ -1037,13 +1237,13 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
data8 |= PORT_RGMII_ID_EG_ENABLE;
- data8 |= PORT_RGMII_SEL;
p->phydev.speed = SPEED_1000;
break;
}
ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
p->phydev.duplex = 1;
}
+ mutex_lock(&dev->dev_mutex);
if (cpu_port) {
member = dev->port_mask;
dev->on_ports = dev->host_mask;
@@ -1056,6 +1256,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
if (p->phydev.link)
dev->live_ports |= (1 << port);
}
+ mutex_unlock(&dev->dev_mutex);
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
@@ -1073,10 +1274,25 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->port_cnt; i++) {
if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ phy_interface_t interface;
+
dev->cpu_port = i;
dev->host_mask = (1 << dev->cpu_port);
dev->port_mask |= dev->host_mask;
+ /* Read from XMII register to determine host port
+ * interface. If set specifically in device tree
+ * note the difference to help debugging.
+ */
+ interface = ksz9477_get_interface(dev, i);
+ if (!dev->interface)
+ dev->interface = interface;
+ if (interface && interface != dev->interface)
+ dev_info(dev->dev,
+ "use %s instead of %s\n",
+ phy_modes(dev->interface),
+ phy_modes(interface));
+
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
p = &dev->ports[dev->cpu_port];
@@ -1130,6 +1346,9 @@ static int ksz9477_setup(struct dsa_switch *ds)
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
true);
+ /* Do not work correctly with tail tagging. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
+
/* accept packet up to 2000bytes */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
@@ -1140,9 +1359,14 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* queue based egress rate limit */
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
+ /* enable global MIB counter freeze function */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+
/* start switch */
ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
+ ksz_init_mib_timer(dev);
+
return 0;
}
@@ -1151,6 +1375,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.setup = ksz9477_setup,
.phy_read = ksz9477_phy_read16,
.phy_write = ksz9477_phy_write16,
+ .adjust_link = ksz_adjust_link,
.port_enable = ksz_enable_port,
.port_disable = ksz_disable_port,
.get_strings = ksz9477_get_strings,
@@ -1182,6 +1407,8 @@ static u32 ksz9477_get_port_addr(int port, int offset)
static int ksz9477_switch_detect(struct ksz_device *dev)
{
u8 data8;
+ u8 id_hi;
+ u8 id_lo;
u32 id32;
int ret;
@@ -1199,11 +1426,40 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
if (ret)
return ret;
+ ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
+ if (ret)
+ return ret;
/* Number of ports can be reduced depending on chip. */
dev->mib_port_cnt = TOTAL_PORT_NUM;
dev->phy_port_cnt = 5;
+ /* Default capability is gigabit capable. */
+ dev->features = GBIT_SUPPORT;
+
+ id_hi = (u8)(id32 >> 16);
+ id_lo = (u8)(id32 >> 8);
+ if ((id_lo & 0xf) == 3) {
+ /* Chip is from KSZ9893 design. */
+ dev->features |= IS_9893;
+
+ /* Chip does not support gigabit. */
+ if (data8 & SW_QW_ABLE)
+ dev->features &= ~GBIT_SUPPORT;
+ dev->mib_port_cnt = 3;
+ dev->phy_port_cnt = 2;
+ } else {
+ /* Chip uses new XMII register definitions. */
+ dev->features |= NEW_XMII;
+
+ /* Chip does not support gigabit. */
+ if (!(data8 & SW_GIGABIT_ABLE))
+ dev->features &= ~GBIT_SUPPORT;
+ }
+
+ /* Change chip id to known ones so it can be matched against them. */
+ id32 = (id_hi << 16) | (id_lo << 8);
+
dev->chip_id = id32;
return 0;
@@ -1238,6 +1494,15 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
},
+ {
+ .chip_id = 0x00989300,
+ .dev_name = "KSZ9893",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ },
};
static int ksz9477_switch_init(struct ksz_device *dev)
@@ -1276,6 +1541,7 @@ static int ksz9477_switch_init(struct ksz_device *dev)
if (!dev->ports)
return -ENOMEM;
for (i = 0; i < dev->mib_port_cnt; i++) {
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
dev->ports[i].mib.counters =
devm_kzalloc(dev->dev,
sizeof(u64) *
@@ -1284,7 +1550,6 @@ static int ksz9477_switch_init(struct ksz_device *dev)
if (!dev->ports[i].mib.counters)
return -ENOMEM;
}
- dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
return 0;
}
@@ -1298,7 +1563,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .phy_setup = ksz9477_phy_setup,
.port_setup = ksz9477_port_setup,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
.shutdown = ksz9477_reset_switch,
.detect = ksz9477_switch_detect,
.init = ksz9477_switch_init,
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index d757ba151cb1..75178624d3f5 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series register access through SPI
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
*/
#include <asm/unaligned.h>
@@ -155,6 +155,8 @@ static void ksz9477_spi_shutdown(struct spi_device *spi)
static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9477" },
{ .compatible = "microchip,ksz9897" },
+ { .compatible = "microchip,ksz9893" },
+ { .compatible = "microchip,ksz9563" },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 3b12e2dcff31..39dace8e3512 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,12 +2,11 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
*/
#include <linux/delay.h>
#include <linux/export.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -15,13 +14,22 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_priv.h"
+void ksz_port_cleanup(struct ksz_device *dev, int port)
+{
+ /* Common code for port cleanup. */
+ mutex_lock(&dev->dev_mutex);
+ dev->on_ports &= ~(1 << port);
+ dev->live_ports &= ~(1 << port);
+ mutex_unlock(&dev->dev_mutex);
+}
+EXPORT_SYMBOL_GPL(ksz_port_cleanup);
+
void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p;
@@ -42,6 +50,85 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
}
EXPORT_SYMBOL_GPL(ksz_update_port_member);
+static void port_r_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+ u64 *dropped;
+
+ /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
+ &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+
+ /* last one in storage */
+ dropped = &mib->counters[dev->mib_cnt];
+
+ /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->mib_cnt) {
+ dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
+ dropped, &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ mib->cnt_ptr = 0;
+}
+
+static void ksz_mib_read_work(struct work_struct *work)
+{
+ struct ksz_device *dev = container_of(work, struct ksz_device,
+ mib_read);
+ struct ksz_port_mib *mib;
+ struct ksz_port *p;
+ int i;
+
+ for (i = 0; i < dev->mib_port_cnt; i++) {
+ p = &dev->ports[i];
+ mib = &p->mib;
+ mutex_lock(&mib->cnt_mutex);
+
+ /* Only read MIB counters when the port is told to do.
+ * If not, read only dropped counters when link is not up.
+ */
+ if (!p->read) {
+ const struct dsa_port *dp = dsa_to_port(dev->ds, i);
+
+ if (!netif_carrier_ok(dp->slave))
+ mib->cnt_ptr = dev->reg_mib_cnt;
+ }
+ port_r_cnt(dev, i);
+ p->read = false;
+ mutex_unlock(&mib->cnt_mutex);
+ }
+}
+
+static void mib_monitor(struct timer_list *t)
+{
+ struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
+
+ mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
+ schedule_work(&dev->mib_read);
+}
+
+void ksz_init_mib_timer(struct ksz_device *dev)
+{
+ int i;
+
+ /* Read MIB counters every 30 seconds to avoid overflow. */
+ dev->mib_read_interval = msecs_to_jiffies(30000);
+
+ INIT_WORK(&dev->mib_read, ksz_mib_read_work);
+ timer_setup(&dev->mib_read_timer, mib_monitor, 0);
+
+ for (i = 0; i < dev->mib_port_cnt; i++)
+ dev->dev_ops->port_init_cnt(dev, i);
+
+ /* Start the timer 2 seconds later. */
+ dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
+ add_timer(&dev->mib_read_timer);
+}
+EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
+
int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
@@ -63,6 +150,27 @@ int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
}
EXPORT_SYMBOL_GPL(ksz_phy_write16);
+void ksz_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ /* Read all MIB counters when the link is going down. */
+ if (!phydev->link) {
+ p->read = true;
+ schedule_work(&dev->mib_read);
+ }
+ mutex_lock(&dev->dev_mutex);
+ if (!phydev->link)
+ dev->live_ports &= ~(1 << port);
+ else
+ /* Remember which port is connected and active. */
+ dev->live_ports |= (1 << port) & dev->on_ports;
+ mutex_unlock(&dev->dev_mutex);
+}
+EXPORT_SYMBOL_GPL(ksz_adjust_link);
+
int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
@@ -74,12 +182,32 @@ int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
}
EXPORT_SYMBOL_GPL(ksz_sset_count);
+void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ mutex_lock(&mib->cnt_mutex);
+
+ /* Only read dropped counters if no link. */
+ if (!netif_carrier_ok(dp->slave))
+ mib->cnt_ptr = dev->reg_mib_cnt;
+ port_r_cnt(dev, port);
+ memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
+ mutex_unlock(&mib->cnt_mutex);
+}
+EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
+
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *br)
{
struct ksz_device *dev = ds->priv;
+ mutex_lock(&dev->dev_mutex);
dev->br_member |= (1 << port);
+ mutex_unlock(&dev->dev_mutex);
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -94,8 +222,10 @@ void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
{
struct ksz_device *dev = ds->priv;
+ mutex_lock(&dev->dev_mutex);
dev->br_member &= ~(1 << port);
dev->member &= ~(1 << port);
+ mutex_unlock(&dev->dev_mutex);
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
@@ -240,6 +370,7 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
/* setup slave port */
dev->dev_ops->port_setup(dev, port, false);
+ dev->dev_ops->phy_setup(dev, port, phy);
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
@@ -249,7 +380,7 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL_GPL(ksz_enable_port);
-void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+void ksz_disable_port(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
@@ -307,6 +438,7 @@ int ksz_switch_register(struct ksz_device *dev,
gpiod_set_value(dev->reset_gpio, 0);
}
+ mutex_init(&dev->dev_mutex);
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
mutex_init(&dev->alu_mutex);
@@ -321,7 +453,9 @@ int ksz_switch_register(struct ksz_device *dev,
if (ret)
return ret;
- dev->interface = PHY_INTERFACE_MODE_MII;
+ /* Host port interface will be self detected, or specifically set in
+ * device tree.
+ */
if (dev->dev->of_node) {
ret = of_get_phy_mode(dev->dev->of_node);
if (ret >= 0)
@@ -340,6 +474,12 @@ EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
+ /* timer started */
+ if (dev->mib_read_timer.expires) {
+ del_timer_sync(&dev->mib_read_timer);
+ flush_work(&dev->mib_read);
+ }
+
dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 2dd832de0d52..21cd794e18f1 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,19 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0
* Microchip switch driver common header
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
#define __KSZ_COMMON_H
+void ksz_port_cleanup(struct ksz_device *dev, int port);
void ksz_update_port_member(struct ksz_device *dev, int port);
+void ksz_init_mib_timer(struct ksz_device *dev);
/* Common DSA access functions */
int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
+void ksz_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev);
int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
+void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *br);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
@@ -30,7 +35,7 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
-void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void ksz_disable_port(struct dsa_switch *ds, int port);
/* Common register access functions */
@@ -211,4 +216,18 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
ksz_write8(dev, addr, data);
}
+struct ksz_poll_ctx {
+ struct ksz_device *dev;
+ int port;
+ int offset;
+};
+
+static inline u32 ksz_pread32_poll(struct ksz_poll_ctx *ctx)
+{
+ u32 data;
+
+ ksz_pread32(ctx->dev, ctx->port, ctx->offset, &data);
+ return data;
+}
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
index 60b49010904b..b52e5ca17ab4 100644
--- a/drivers/net/dsa/microchip/ksz_priv.h
+++ b/drivers/net/dsa/microchip/ksz_priv.h
@@ -2,7 +2,7 @@
*
* Microchip KSZ series switch common definitions
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
*/
#ifndef __KSZ_PRIV_H
@@ -14,8 +14,6 @@
#include <linux/etherdevice.h>
#include <net/dsa.h>
-#include "ksz9477_reg.h"
-
struct ksz_io_ops;
struct vlan_table {
@@ -23,6 +21,7 @@ struct vlan_table {
};
struct ksz_port_mib {
+ struct mutex cnt_mutex; /* structure access */
u8 cnt_ptr;
u64 *counters;
};
@@ -38,7 +37,8 @@ struct ksz_port {
u32 fiber:1; /* port is fiber */
u32 sgmii:1; /* port is SGMII */
u32 force:1;
- u32 link_just_down:1; /* link just goes down */
+ u32 read:1; /* read MIB counters in background */
+ u32 freeze:1; /* MIB counter freeze is enabled */
struct ksz_port_mib mib;
};
@@ -48,6 +48,7 @@ struct ksz_device {
struct ksz_platform_data *pdata;
const char *name;
+ struct mutex dev_mutex; /* device access */
struct mutex reg_mutex; /* register access */
struct mutex stats_mutex; /* status access */
struct mutex alu_mutex; /* ALU access */
@@ -79,8 +80,6 @@ struct ksz_device {
struct vlan_table *vlan_cache;
- u64 mib_value[TOTAL_SWITCH_COUNTER_NUM];
-
u8 *txbuf;
struct ksz_port *ports;
@@ -137,6 +136,9 @@ struct ksz_dev_ops {
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
+ void (*phy_setup)(struct ksz_device *dev, int port,
+ struct phy_device *phy);
+ void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
@@ -151,6 +153,7 @@ struct ksz_dev_ops {
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
int (*shutdown)(struct ksz_device *dev);
int (*detect)(struct ksz_device *dev);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 74547f43b938..7357b4fc0185 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -18,7 +18,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
@@ -622,17 +621,19 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
if (phy_is_pseudo_fixed_link(phydev)) {
- dev_dbg(priv->dev, "phy-mode for master device = %x\n",
- phydev->interface);
-
- /* Setup TX circuit incluing relevant PAD and driving */
- mt7530_pad_clk_setup(ds, phydev->interface);
-
- /* Setup RX circuit, relevant PAD and driving on the host
- * which must be placed after the setup on the device side is
- * all finished.
- */
- mt7623_pad_clk_setup(ds);
+ if (priv->id == ID_MT7530) {
+ dev_dbg(priv->dev, "phy-mode for master device = %x\n",
+ phydev->interface);
+
+ /* Setup TX circuit incluing relevant PAD and driving */
+ mt7530_pad_clk_setup(ds, phydev->interface);
+
+ /* Setup RX circuit, relevant PAD and driving on the
+ * host which must be placed after the setup on the
+ * device side is all finished.
+ */
+ mt7623_pad_clk_setup(ds);
+ }
} else {
u16 lcl_adv = 0, rmt_adv = 0;
u8 flowctrl;
@@ -645,7 +646,7 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
case SPEED_100:
mcr |= PMCR_FORCE_SPEED_100;
break;
- };
+ }
if (phydev->link)
mcr |= PMCR_FORCE_LNK;
@@ -688,6 +689,10 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
/* Unknown unicast frame fordwarding to the cpu port */
mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
+ /* Set CPU port number */
+ if (priv->id == ID_MT7621)
+ mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+
/* CPU port gets connected to all user ports of
* the switch
*/
@@ -724,8 +729,7 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
}
static void
-mt7530_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
@@ -1220,24 +1224,27 @@ mt7530_setup(struct dsa_switch *ds)
* as two netdev instances.
*/
dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent;
- priv->ethernet = syscon_node_to_regmap(dn);
- if (IS_ERR(priv->ethernet))
- return PTR_ERR(priv->ethernet);
- regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
- ret = regulator_enable(priv->core_pwr);
- if (ret < 0) {
- dev_err(priv->dev,
- "Failed to enable core power: %d\n", ret);
- return ret;
- }
+ if (priv->id == ID_MT7530) {
+ priv->ethernet = syscon_node_to_regmap(dn);
+ if (IS_ERR(priv->ethernet))
+ return PTR_ERR(priv->ethernet);
+
+ regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
+ ret = regulator_enable(priv->core_pwr);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "Failed to enable core power: %d\n", ret);
+ return ret;
+ }
- regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
- ret = regulator_enable(priv->io_pwr);
- if (ret < 0) {
- dev_err(priv->dev, "Failed to enable io pwr: %d\n",
- ret);
- return ret;
+ regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
+ ret = regulator_enable(priv->io_pwr);
+ if (ret < 0) {
+ dev_err(priv->dev, "Failed to enable io pwr: %d\n",
+ ret);
+ return ret;
+ }
}
/* Reset whole chip through gpio pin or memory-mapped registers for
@@ -1293,7 +1300,7 @@ mt7530_setup(struct dsa_switch *ds)
if (dsa_is_cpu_port(ds, i))
mt7530_cpu_port_enable(priv, i);
else
- mt7530_port_disable(ds, i, NULL);
+ mt7530_port_disable(ds, i);
}
/* Flush the FDB table */
@@ -1327,6 +1334,13 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_del = mt7530_port_vlan_del,
};
+static const struct of_device_id mt7530_of_match[] = {
+ { .compatible = "mediatek,mt7621", .data = (void *)ID_MT7621, },
+ { .compatible = "mediatek,mt7530", .data = (void *)ID_MT7530, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt7530_of_match);
+
static int
mt7530_probe(struct mdio_device *mdiodev)
{
@@ -1357,13 +1371,21 @@ mt7530_probe(struct mdio_device *mdiodev)
}
}
- priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
- if (IS_ERR(priv->core_pwr))
- return PTR_ERR(priv->core_pwr);
+ /* Get the hardware identifier from the devicetree node.
+ * We will need it for some of the clock and regulator setup.
+ */
+ priv->id = (unsigned int)(unsigned long)
+ of_device_get_match_data(&mdiodev->dev);
+
+ if (priv->id == ID_MT7530) {
+ priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
+ if (IS_ERR(priv->core_pwr))
+ return PTR_ERR(priv->core_pwr);
- priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
- if (IS_ERR(priv->io_pwr))
- return PTR_ERR(priv->io_pwr);
+ priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
+ if (IS_ERR(priv->io_pwr))
+ return PTR_ERR(priv->io_pwr);
+ }
/* Not MCM that indicates switch works as the remote standalone
* integrated circuit so the GPIO pin would be used to complete
@@ -1409,12 +1431,6 @@ mt7530_remove(struct mdio_device *mdiodev)
mutex_destroy(&priv->reg_mutex);
}
-static const struct of_device_id mt7530_of_match[] = {
- { .compatible = "mediatek,mt7530" },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, mt7530_of_match);
-
static struct mdio_driver mt7530_mdio_driver = {
.probe = mt7530_probe,
.remove = mt7530_remove,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index d9b407a22a58..a95ed958df5b 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -19,6 +19,11 @@
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
+enum {
+ ID_MT7530 = 0,
+ ID_MT7621 = 1,
+};
+
#define NUM_TRGMII_CTRL 5
#define TRGMII_BASE(x) (0x10000 + (x))
@@ -36,6 +41,9 @@
#define UNM_FFP(x) (((x) & 0xff) << 16)
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
+#define CPU_EN BIT(7)
+#define CPU_PORT(x) ((x) << 4)
+#define CPU_MASK (0xf << 4)
/* Registers for address table access */
#define MT7530_ATA1 0x74
@@ -430,6 +438,7 @@ struct mt7530_priv {
struct regulator *core_pwr;
struct regulator *io_pwr;
struct gpio_desc *reset;
+ unsigned int id;
bool mcm;
struct mt7530_port ports[MT7530_NUM_PORTS];
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8a517d8fb9d1..96728d1e9824 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
unsigned int sub_irq;
unsigned int n;
u16 reg;
+ u16 ctl1;
int err;
mutex_lock(&chip->reg_lock);
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
if (err)
goto out;
- for (n = 0; n < chip->g1_irq.nirqs; ++n) {
- if (reg & (1 << n)) {
- sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
- handle_nested_irq(sub_irq);
- ++nhandled;
+ do {
+ for (n = 0; n < chip->g1_irq.nirqs; ++n) {
+ if (reg & (1 << n)) {
+ sub_irq = irq_find_mapping(chip->g1_irq.domain,
+ n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
}
- }
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
+ if (err)
+ goto unlock;
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
+unlock:
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ goto out;
+ ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
+ } while (reg & ctl1);
+
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
@@ -426,16 +442,26 @@ out_mapping:
static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
{
+ static struct lock_class_key lock_key;
+ static struct lock_class_key request_key;
int err;
err = mv88e6xxx_g1_irq_setup_common(chip);
if (err)
return err;
+ /* These lock classes tells lockdep that global 1 irqs are in
+ * a different category than their parent GPIO, so it won't
+ * report false recursion.
+ */
+ irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
+
+ mutex_unlock(&chip->reg_lock);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
+ mutex_lock(&chip->reg_lock);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
@@ -464,7 +490,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
- chip->kworker = kthread_create_worker(0, dev_name(chip->dev));
+ chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
@@ -523,9 +549,9 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
return mv88e6xxx_write(chip, addr, reg, val);
}
-static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
- int link, int speed, int duplex, int pause,
- phy_interface_t mode)
+int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
+ int speed, int duplex, int pause,
+ phy_interface_t mode)
{
int err;
@@ -632,6 +658,20 @@ static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
mv88e6065_phylink_validate(chip, port, mask, state);
}
+static void mv88e6341_phylink_validate(struct mv88e6xxx_chip *chip, int port,
+ unsigned long *mask,
+ struct phylink_link_state *state)
+{
+ if (port >= 5)
+ phylink_set(mask, 2500baseX_Full);
+
+ /* No ethtool bits for 200Mbps */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ mv88e6065_phylink_validate(chip, port, mask, state);
+}
+
static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
unsigned long *mask,
struct phylink_link_state *state)
@@ -647,8 +687,10 @@ static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
unsigned long *mask,
struct phylink_link_state *state)
{
- if (port >= 9)
+ if (port >= 9) {
phylink_set(mask, 2500baseX_Full);
+ phylink_set(mask, 2500baseT_Full);
+ }
/* No ethtool bits for 200Mbps */
phylink_set(mask, 1000baseT_Full);
@@ -880,7 +922,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
default:
return U64_MAX;
}
- value = (((u64)high) << 16) | low;
+ value = (((u64)high) << 32) | low;
return value;
}
@@ -2360,8 +2402,7 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2403,6 +2444,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
+/* The mv88e6390 has some hidden registers used for debug and
+ * development. The errata also makes use of them.
+ */
+static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
+ int reg, u16 val)
+{
+ u16 ctrl;
+ int err;
+
+ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
+ PORT_RESERVED_1A, val);
+ if (err)
+ return err;
+
+ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
+ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+ PORT_RESERVED_1A, ctrl);
+}
+
+static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
+ PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
+}
+
+
+static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
+ int reg, u16 *val)
+{
+ u16 ctrl;
+ int err;
+
+ ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
+ PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+ PORT_RESERVED_1A, ctrl);
+ if (err)
+ return err;
+
+ err = mv88e6390_hidden_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
+ PORT_RESERVED_1A, val);
+}
+
+/* Check if the errata has already been applied. */
+static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+ u16 val;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6390_hidden_read(chip, port, 0, &val);
+ if (err) {
+ dev_err(chip->dev,
+ "Error reading hidden register: %d\n", err);
+ return false;
+ }
+ if (val != 0x01c0)
+ return false;
+ }
+
+ return true;
+}
+
+/* The 6390 copper ports have an errata which require poking magic
+ * values into undocumented hidden registers and then performing a
+ * software reset.
+ */
+static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ if (mv88e6390_setup_errata_applied(chip))
+ return 0;
+
+ /* Set the ports into blocking mode */
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
+ if (err)
+ return err;
+ }
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
+ if (err)
+ return err;
+ }
+
+ return mv88e6xxx_software_reset(chip);
+}
+
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2415,6 +2557,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
mutex_lock(&chip->reg_lock);
+ if (chip->info->ops->setup_errata) {
+ err = chip->info->ops->setup_errata(chip);
+ if (err)
+ goto unlock;
+ }
+
/* Cache the cmode of each port. */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (chip->info->ops->port_get_cmode) {
@@ -2945,7 +3093,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6341_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_validate = mv88e6341_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -2970,7 +3118,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
- .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3226,6 +3374,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3269,6 +3418,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3312,6 +3462,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3404,6 +3555,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3573,7 +3725,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_validate = mv88e6341_phylink_validate,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -3709,6 +3861,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3756,6 +3909,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -4093,7 +4247,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6190",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4116,7 +4270,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6190X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4139,7 +4293,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6191",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.max_vid = 8191,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
@@ -4186,7 +4340,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6290",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4348,7 +4502,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6390",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4371,7 +4525,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6390X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4466,6 +4620,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
return 0;
}
+static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
+}
+
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port)
{
@@ -4502,6 +4664,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
if (err)
goto free;
+ mv88e6xxx_ports_cmode_init(chip);
+
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_switch_reset(chip);
mutex_unlock(&chip->reg_lock);
@@ -4561,6 +4725,22 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
return err;
}
+static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
+ bool unicast, bool multicast)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+ if (chip->info->ops->port_set_egress_floods)
+ err = chip->info->ops->port_set_egress_floods(chip, port,
+ unicast,
+ multicast);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
.probe = mv88e6xxx_drv_probe,
@@ -4588,6 +4768,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.set_ageing_time = mv88e6xxx_set_ageing_time,
.port_bridge_join = mv88e6xxx_port_bridge_join,
.port_bridge_leave = mv88e6xxx_port_bridge_leave,
+ .port_egress_floods = mv88e6xxx_port_egress_floods,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
@@ -4651,6 +4832,21 @@ static const void *pdata_device_get_match_data(struct device *dev)
return NULL;
}
+/* There is no suspend to RAM support at DSA level yet, the switch configuration
+ * would be lost after a power cycle so prevent it to be suspended.
+ */
+static int __maybe_unused mv88e6xxx_suspend(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static int __maybe_unused mv88e6xxx_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mv88e6xxx_pm_ops, mv88e6xxx_suspend, mv88e6xxx_resume);
+
static int mv88e6xxx_probe(struct mdio_device *mdiodev)
{
struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data;
@@ -4708,6 +4904,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out;
+ mv88e6xxx_ports_cmode_init(chip);
mv88e6xxx_phy_init(chip);
if (chip->info->ops->get_eeprom) {
@@ -4835,6 +5032,7 @@ static struct mdio_driver mv88e6xxx_driver = {
.mdiodrv.driver = {
.name = "mv88e6085",
.of_match_table = mv88e6xxx_of_match,
+ .pm = &mv88e6xxx_pm_ops,
},
};
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f9ecb7872d32..adcf60779895 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
};
struct mv88e6xxx_ops {
+ /* Switch Setup Errata, called early in the switch setup to
+ * allow any errata actions to be performed
+ */
+ int (*setup_errata)(struct mv88e6xxx_chip *chip);
+
int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
@@ -574,6 +579,9 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 update);
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask);
+int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
+ int speed, int duplex, int pause,
+ phy_interface_t mode);
struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip);
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 5200e4bdce93..ea243840ee0f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
struct mv88e6xxx_atu_entry entry;
+ int spid;
int err;
u16 val;
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
+ spid = entry.state;
+
if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU age out violation for %pM\n",
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev,
- "ATU member violation for %pM portvec %x\n",
- entry.mac, entry.portvec);
- chip->ports[entry.portvec].atu_member_violation++;
+ "ATU member violation for %pM portvec %x spid %d\n",
+ entry.mac, entry.portvec, spid);
+ chip->ports[spid].atu_member_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
dev_err_ratelimited(chip->dev,
- "ATU miss violation for %pM portvec %x\n",
- entry.mac, entry.portvec);
- chip->ports[entry.portvec].atu_miss_violation++;
+ "ATU miss violation for %pM portvec %x spid %d\n",
+ entry.mac, entry.portvec, spid);
+ chip->ports[spid].atu_miss_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
dev_err_ratelimited(chip->dev,
- "ATU full violation for %pM portvec %x\n",
- entry.mac, entry.portvec);
- chip->ports[entry.portvec].atu_full_violation++;
+ "ATU full violation for %pM portvec %x spid %d\n",
+ entry.mac, entry.portvec, spid);
+ chip->ports[spid].atu_full_violation++;
}
mutex_unlock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index ebd26b6a93e6..0796c6feec55 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
/* normal duplex detection */
break;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
@@ -398,6 +398,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
+ /* cmode doesn't change, nothing to do for us */
+ if (cmode == chip->ports[port].cmode)
+ return 0;
+
lane = mv88e6390x_serdes_get_lane(chip, port);
if (lane < 0)
return lane;
@@ -408,7 +412,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return err;
}
- err = mv88e6390_serdes_power(chip, port, false);
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
@@ -424,7 +428,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- err = mv88e6390_serdes_power(chip, port, true);
+ err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
@@ -444,6 +448,8 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
switch (mode) {
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
case PHY_INTERFACE_MODE_XGMII:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 0d81866d0e4a..4aadf321edb7 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,6 +52,7 @@
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
+#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
@@ -251,6 +252,16 @@
/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
+/* Offset 0x1a: Magic undocumented errata register */
+#define PORT_RESERVED_1A 0x1a
+#define PORT_RESERVED_1A_BUSY BIT(15)
+#define PORT_RESERVED_1A_WRITE BIT(14)
+#define PORT_RESERVED_1A_READ 0
+#define PORT_RESERVED_1A_PORT_SHIFT 5
+#define PORT_RESERVED_1A_BLOCK (0xf << 10)
+#define PORT_RESERVED_1A_CTRL_PORT 4
+#define PORT_RESERVED_1A_DATA_PORT 5
+
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val);
int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 4b336d8d4c67..42872d21857b 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -400,7 +400,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.owner = THIS_MODULE;
snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
- dev_name(chip->dev));
+ "%s", dev_name(chip->dev));
chip->ptp_clock_info.max_adj = 1000000;
chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2caa8c8b4b55..6a5de1b72f6c 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -510,21 +510,48 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane)
{
struct dsa_switch *ds = chip->ds;
+ int duplex = DUPLEX_UNKNOWN;
+ int speed = SPEED_UNKNOWN;
+ int link, err;
u16 status;
- bool up;
- mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_STATUS, &status);
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_PHY_STATUS, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read SGMII PHY status: %d\n", err);
+ return;
+ }
- /* Status must be read twice in order to give the current link
- * status. Otherwise the change in link status since the last
- * read of the register is returned.
- */
- mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_STATUS, &status);
- up = status & MV88E6390_SGMII_STATUS_LINK;
+ link = status & MV88E6390_SGMII_PHY_STATUS_LINK ?
+ LINK_FORCED_UP : LINK_FORCED_DOWN;
+
+ if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
+ duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
+ speed = SPEED_1000;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
+ speed = SPEED_100;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_10:
+ speed = SPEED_10;
+ break;
+ default:
+ dev_err(chip->dev, "invalid PHY speed\n");
+ return;
+ }
+ }
- dsa_port_phylink_mac_change(ds, port, up);
+ err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex,
+ PAUSE_OFF, PHY_INTERFACE_MODE_NA);
+ if (err)
+ dev_err(chip->dev, "can't propagate PHY settings to MAC: %d\n",
+ err);
+ else
+ dsa_port_phylink_mac_change(ds, port, link == LINK_FORCED_UP);
}
static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
@@ -664,7 +691,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
if (port < 9)
return 0;
- return mv88e6390_serdes_irq_setup(chip, port);
+ return mv88e6390x_serdes_irq_setup(chip, port);
}
void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 573dce8b1eb4..c2e7eedfa9b9 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -69,6 +69,14 @@
#define MV88E6390_SGMII_INT_SYMBOL_ERROR BIT(8)
#define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7)
#define MV88E6390_SGMII_INT_STATUS 0xa002
+#define MV88E6390_SGMII_PHY_STATUS 0xa003
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_MASK GENMASK(15, 14)
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_1000 0x8000
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_100 0x4000
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_10 0x0000
+#define MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL BIT(13)
+#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
+#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 7e97e620bd44..576b37d12a63 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -420,7 +420,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
static int
qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
{
- u32 reg;
+ u32 reg, val;
switch (port) {
case 0:
@@ -439,15 +439,19 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
*/
switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
- qca8k_write(priv, reg,
- QCA8K_PORT_PAD_RGMII_EN |
- QCA8K_PORT_PAD_RGMII_TX_DELAY(3) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY(3));
-
- /* According to the datasheet, RGMII delay is enabled through
+ /* RGMII mode means no delay so don't enable the delay */
+ val = QCA8K_PORT_PAD_RGMII_EN;
+ qca8k_write(priv, reg, val);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* RGMII_ID needs internal delay. This is enabled through
* PORT5_PAD_CTRL for all ports, rather than individual port
* registers
*/
+ qca8k_write(priv, reg,
+ QCA8K_PORT_PAD_RGMII_EN |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY));
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
break;
@@ -797,8 +801,7 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
}
static void
-qca8k_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+qca8k_port_disable(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 613fe5c50236..d146e54c8a6c 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -40,6 +40,7 @@
((0x8 + (x & 0x3)) << 22)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \
((0x10 + (x & 0x3)) << 20)
+#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_MODULE_EN 0x030
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
index b4b839a1d095..ad41ec63cc9f 100644
--- a/drivers/net/dsa/realtek-smi.c
+++ b/drivers/net/dsa/realtek-smi.c
@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
struct device_node *mdio_np;
int ret;
- mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
- "realtek,smi-mdio");
+ mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
if (!mdio_np) {
dev_err(smi->dev, "no MDIO bus node\n");
return -ENODEV;
}
smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus)
- return -ENOMEM;
+ if (!smi->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
smi->slave_mii_bus->priv = smi;
smi->slave_mii_bus->name = "SMI slave MII";
smi->slave_mii_bus->read = realtek_smi_mdio_read;
@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
if (ret) {
dev_err(smi->dev, "unable to register MDIO bus %s\n",
smi->slave_mii_bus->id);
- of_node_put(mdio_np);
+ goto err_put_node;
}
return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
}
static int realtek_smi_probe(struct platform_device *pdev)
@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
dsa_unregister_switch(smi->ds);
+ if (smi->slave_mii_bus)
+ of_node_put(smi->slave_mii_bus->dev.of_node);
gpiod_set_value(smi->reset, 1);
return 0;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a4d5049df692..40b3974970c6 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -1073,8 +1073,7 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
}
static void
-rtl8366rb_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+rtl8366rb_port_disable(struct dsa_switch *ds, int port)
{
struct realtek_smi *smi = ds->priv;
int ret;
diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c
index 9f1b5f2e8a64..d4780610ea8a 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.c
+++ b/drivers/net/dsa/vitesse-vsc73xx.c
@@ -1013,8 +1013,7 @@ static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
return 0;
}
-static void vsc73xx_port_disable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+static void vsc73xx_port_disable(struct dsa_switch *ds, int port)
{
struct vsc73xx *vsc = ds->priv;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index b223769d6a5e..3da97996bdf3 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1266,12 +1266,14 @@ el3_up(struct net_device *dev)
pr_cont("Forcing 3c5x9b full-duplex mode");
break;
}
+ /* fall through */
case 8:
/* set full-duplex mode based on eeprom config setting */
if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
break;
}
+ /* fall through */
default:
/* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
pr_cont("Setting 3c5x9/3c5x9B half-duplex mode");
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index b648e3f95c01..808abb6b3671 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1177,7 +1177,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry]))
break; /* It still hasn't been processed. */
if (lp->tx_skbuff[entry]) {
- dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ dev_consume_skb_irq(lp->tx_skbuff[entry]);
lp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
@@ -1192,7 +1192,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
#ifdef VORTEX_BUS_MASTER
if (status & DMADone) {
outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
- dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */
+ dev_consume_skb_irq(lp->tx_skb); /* Release the transferred buffer */
netif_wake_queue(dev);
}
#endif
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 40f421dbdf57..147051404194 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2307,7 +2307,7 @@ _vortex_interrupt(int irq, struct net_device *dev)
dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
pkts_compl++;
bytes_compl += vp->tx_skb->len;
- dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
+ dev_consume_skb_irq(vp->tx_skb); /* Release the transferred buffer */
if (ioread16(ioaddr + TxFree) > 1536) {
/*
* AKPM: FIXME: I don't think we need this. If the queue was stopped due to
@@ -2449,7 +2449,7 @@ _boomerang_interrupt(int irq, struct net_device *dev)
#endif
pkts_compl++;
bytes_compl += skb->len;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
pr_debug("boomerang_interrupt: no skb!\n");
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 097467f44b0d..816540e6beac 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1390,7 +1390,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
}
}
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
np->tx_done_q[np->tx_done].status = 0;
np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 91fc64c1145e..47e5984f16fb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL);
+ greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->tx_bd_base) {
err = -ENOMEM;
goto error3;
}
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL);
+ greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->rx_bd_base) {
err = -ENOMEM;
goto error4;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 0b60921c392f..16477aa6d61f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev)
size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
- descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr,
- GFP_KERNEL);
+ descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
+ GFP_KERNEL);
if (!descs) {
netdev_err(sdev->netdev,
"failed to allocate status descriptors\n");
@@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev)
struct slic_shmem_data *sm_data;
dma_addr_t paddr;
- sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
- &paddr, GFP_KERNEL);
+ sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
+ &paddr, GFP_KERNEL);
if (!sm_data) {
dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
return -ENOMEM;
@@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev)
int err = 0;
u8 *mac[2];
- eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
- &paddr, GFP_KERNEL);
+ eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
+ &paddr, GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 4f11f98347ed..1827ef1f6d55 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev,
if (skb) {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
info->skb = NULL;
}
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986ba3290..0ae723f75341 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
& 0xffff;
if (inuse) { /* Tx FIFO is not empty */
- ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+ ready = max_t(int,
+ priv->tx_prod - priv->tx_cons - inuse - 1, 0);
} else {
/* Check for buffered last packet */
status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 02921d877c08..aa1d1f5339d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
priv->phy_iface);
- if (IS_ERR(phydev))
+ if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
+ phydev = NULL;
+ }
} else {
int ret;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 420cede41ca4..b17d435de09f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
struct ena_com_admin_sq *sq = &queue->sq;
u16 size = ADMIN_SQ_SIZE(queue->q_depth);
- sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
- GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
+ GFP_KERNEL);
if (!sq->entries) {
pr_err("memory allocation failed");
@@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
struct ena_com_admin_cq *cq = &queue->cq;
u16 size = ADMIN_CQ_SIZE(queue->q_depth);
- cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
- GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
+ GFP_KERNEL);
if (!cq->entries) {
pr_err("memory allocation failed");
@@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
- GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
+ GFP_KERNEL);
if (!aenq->entries) {
pr_err("memory allocation failed");
@@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
@@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
- GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size,
+ &io_cq->cdesc_addr.phys_addr,
+ GFP_KERNEL);
}
if (!io_cq->cdesc_addr.virt_addr) {
@@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
rss->hash_key =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
@@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
rss->hash_ctrl =
- dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
@@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
sizeof(struct ena_admin_rss_ind_table_entry);
rss->rss_ind_tbl =
- dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, tbl_size,
+ &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
@@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
spin_lock_init(&mmio_read->lock);
mmio_read->read_resp =
- dma_zalloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
@@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
host_attr->host_info =
- dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
@@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
host_attr->debug_area_virt_addr =
- dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
- &host_attr->debug_area_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
+ &host_attr->debug_area_dma_addr,
+ GFP_KERNEL);
if (unlikely(!host_attr->debug_area_virt_addr)) {
host_attr->debug_area_size = 0;
return -ENOMEM;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a70bb1bb90e7..a6eacf2099c3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
- clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
- /* Make sure we don't have a race with AENQ Links state handler */
- if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
- netif_carrier_on(adapter->netdev);
-
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc) {
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
}
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_err(&pdev->dev,
"Device reset completed successfully, Driver info: %s\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index dc8b6173d8d8..63870072cbbd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
#define DRV_MODULE_VER_MAJOR 2
#define DRV_MODULE_VER_MINOR 0
-#define DRV_MODULE_VER_SUBMINOR 2
+#define DRV_MODULE_VER_SUBMINOR 3
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a90080f12e67..145fe71fd155 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -435,7 +435,7 @@ static int amd8111e_restart(struct net_device *dev)
int i,reg_val;
/* stop the chip */
- writel(RUN, mmio + CMD0);
+ writel(RUN, mmio + CMD0);
if(amd8111e_init_ring(dev))
return -ENOMEM;
@@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev)
pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
lp->tx_skbuff[tx_index]->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
+ dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
lp->tx_skbuff[tx_index] = NULL;
lp->tx_dma_addr[tx_index] = 0;
}
@@ -1720,7 +1720,7 @@ static void amd8111e_config_ipg(struct timer_list *t)
writew((u32)tmp_ipg, mmio + IPG);
writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
}
- mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
+ mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
return;
}
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index e833d1b3fe18..e5073aeea06a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1167,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
- aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE *
+ aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0,
DMA_ATTR_NON_CONSISTENT);
@@ -1349,7 +1349,7 @@ err_remap3:
err_remap2:
iounmap(aup->mac);
err_remap1:
- dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr,
DMA_ATTR_NON_CONSISTENT);
err_vaddr:
@@ -1383,7 +1383,7 @@ static int au1000_remove(struct platform_device *pdev)
if (aup->tx_db_inuse[i])
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
- dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr,
DMA_ATTR_NON_CONSISTENT);
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index b56d84c7df46..f90b454b1642 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -1084,7 +1084,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
/* We must free the original skb if it's not a data-only copy
in the bounce buffer. */
if (lp->tx_skbuff[entry]) {
- dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ dev_consume_skb_irq(lp->tx_skbuff[entry]);
lp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 8931ce6bab7b..87ff5d6d1b22 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1028,7 +1028,7 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
#ifdef XMT_VIA_SKB
if(p->tmd_skb[p->tmdlast]) {
- dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
+ dev_consume_skb_irq(p->tmd_skb[p->tmdlast]);
p->tmd_skb[p->tmdlast] = NULL;
}
#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index d272dc6984ac..b40d4377cc71 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -431,8 +431,6 @@
#define MAC_MDIOSCAR_PA_WIDTH 5
#define MAC_MDIOSCAR_RA_INDEX 0
#define MAC_MDIOSCAR_RA_WIDTH 16
-#define MAC_MDIOSCAR_REG_INDEX 0
-#define MAC_MDIOSCAR_REG_WIDTH 21
#define MAC_MDIOSCCDR_BUSY_INDEX 22
#define MAC_MDIOSCCDR_BUSY_WIDTH 1
#define MAC_MDIOSCCDR_CMD_INDEX 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1e929a1e4ca7..4666084eda16 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
}
}
+static unsigned int xgbe_create_mdio_sca(int port, int reg)
+{
+ unsigned int mdio_sca, da;
+
+ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
+
+ return mdio_sca;
+}
+
static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
int reg, u16 val)
{
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
reinit_completion(&pdata->mdio_complete);
- mdio_sca = 0;
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
reinit_completion(&pdata->mdio_complete);
- mdio_sca = 0;
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
- XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 0f2ad50f3bd7..87b142a312e0 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/* Packet buffers should be 64B aligned */
- pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
- GFP_ATOMIC);
+ pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+ GFP_ATOMIC);
if (unlikely(!pkt_buf)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
ring->ndev = ndev;
size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
- ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
- GFP_KERNEL);
+ ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
+ GFP_KERNEL);
if (!ring->desc_addr)
goto err;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 6a8e2567f2bd..4d3855ceb500 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
if (bp->tx_bufs[bp->tx_empty]) {
++dev->stats.tx_packets;
- dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
+ dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
}
bp->tx_bufs[bp->tx_empty] = NULL;
bp->tx_fullup = 0;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 68b9ee489489..4d9819d2894d 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -764,7 +764,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
dev->stats.tx_bytes += mp->tx_bufs[i]->len;
++dev->stats.tx_packets;
}
- dev_kfree_skb_irq(mp->tx_bufs[i]);
+ dev_consume_skb_irq(mp->tx_bufs[i]);
--mp->tx_active;
if (++i >= N_TX_RING)
i = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 38e87eed76b9..a718d7a1f76c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -138,7 +138,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
u8 *p = data;
if (stringset == ETH_SS_STATS) {
- memcpy(p, *aq_ethtool_stat_names,
+ memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
for (i = 0; i < cfg->vecs; i++) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index dc88a1221f1d..bc711238ca0c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -14,6 +14,8 @@
#ifndef AQ_HW_UTILS_H
#define AQ_HW_UTILS_H
+#include <linux/iopoll.h>
+
#include "aq_common.h"
#ifndef HIDWORD
@@ -23,18 +25,6 @@
#define AQ_HW_SLEEP(_US_) mdelay(_US_)
-#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
-do { \
- unsigned int AQ_HW_WAIT_FOR_i; \
- for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\
- --AQ_HW_WAIT_FOR_i) {\
- udelay(_US_); \
- } \
- if (!AQ_HW_WAIT_FOR_i) {\
- err = -ETIME; \
- } \
-} while (0)
-
#define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
#define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0147c037ca96..ff83667410bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -986,4 +986,4 @@ void aq_nic_shutdown(struct aq_nic_s *self)
err_exit:
rtnl_unlock();
-} \ No newline at end of file
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index c8b44cdb91c1..0217ff4669a4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -170,6 +170,8 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
continue;
+ if (i >= AQ_CFG_VECS_MAX)
+ continue;
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 2469ed4d86b9..f6f8338153a2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -85,6 +85,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
+ u32 val;
hw_atl_glb_glb_reg_res_dis_set(self, 1U);
hw_atl_pci_pci_reg_res_dis_set(self, 0U);
@@ -95,7 +96,9 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
+ err = readx_poll_timeout_atomic(hw_atl_glb_soft_res_get,
+ self, val, val == 0,
+ 1000U, 10000U);
if (err < 0)
goto err_exit;
@@ -103,7 +106,9 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
+ err = readx_poll_timeout_atomic(hw_atl_itr_res_irq_get,
+ self, val, val == 0,
+ 1000U, 10000U);
if (err < 0)
goto err_exit;
@@ -181,6 +186,7 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
+ u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
@@ -188,8 +194,9 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
hw_atl_rpf_rss_key_wr_data_set(self, key_data);
hw_atl_rpf_rss_key_addr_set(self, addr);
hw_atl_rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
- 1000U, 10U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
if (err < 0)
goto err_exit;
}
@@ -207,8 +214,9 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
int err = 0;
- u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX *
- HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+ u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX *
+ HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+ u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -222,8 +230,9 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
- 1000U, 10U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
if (err < 0)
goto err_exit;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index b58ca7cb8e9d..b31dba1b1a55 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -173,6 +173,7 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
+ u32 val;
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
@@ -180,8 +181,9 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
hw_atl_rpf_rss_key_wr_data_set(self, key_data);
hw_atl_rpf_rss_key_addr_set(self, addr);
hw_atl_rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
- 1000U, 10U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
if (err < 0)
goto err_exit;
}
@@ -199,8 +201,9 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
int err = 0;
- u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
- HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+ u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
+ HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+ u32 val;
memset(bitary, 0, sizeof(bitary));
@@ -214,8 +217,9 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
- 1000U, 10U);
+ err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
+ self, val, val == 0,
+ 1000U, 10000U);
if (err < 0)
goto err_exit;
}
@@ -275,6 +279,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
+ /* Tx TC/Queue number config */
+ hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 939f77e2e117..0722b8e01964 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1274,6 +1274,15 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT,
+ tx_traf_class_mode);
+}
+
void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
@@ -1585,3 +1594,24 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
HW_ATL_RPF_L3_DSTA_ADR(location + i),
ipv6_dest[i]);
}
+
+u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+}
+
+u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp)
+{
+ return aq_hw_read_reg(aq_hw,
+ HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp));
+}
+
+u32 hw_atl_scrpad12_get(struct aq_hw_s *self)
+{
+ return hw_atl_scrpad_get(self, 0xB);
+}
+
+u32 hw_atl_scrpad25_get(struct aq_hw_s *self)
+{
+ return hw_atl_scrpad_get(self, 0x18);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 03c570d115fe..d46351890b16 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -605,6 +605,10 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
/* tpb */
+/* set TX Traffic Class Mode */
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode);
+
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
@@ -752,4 +756,16 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_dest);
+/* get global microprocessor ram semaphore */
+u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
+
+/* get global microprocessor scratch pad register */
+u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
+
+/* get global microprocessor scratch pad 12 register */
+u32 hw_atl_scrpad12_get(struct aq_hw_s *self);
+
+/* get global microprocessor scratch pad 25 register */
+u32 hw_atl_scrpad25_get(struct aq_hw_s *self);
+
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 8470d92db812..fb45bc2d99cf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1948,6 +1948,19 @@
/* default value of bitfield tx_buf_en */
#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
@@ -2519,4 +2532,6 @@
/* Default value of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+#define HW_ATL_FW_SM_RAM 0x2U
+
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 9b74a3197d7f..eb4b99d56081 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -25,7 +25,9 @@
#define HW_ATL_MIF_ADDR 0x0208U
#define HW_ATL_MIF_VAL 0x020CU
-#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_RPC_CONTROL_ADR 0x0338U
+#define HW_ATL_RPC_STATE_ADR 0x033CU
+
#define HW_ATL_MPI_FW_VERSION 0x18
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
@@ -53,6 +55,12 @@ static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
+static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
+static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
+static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
+static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
+static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
+
int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
@@ -234,6 +242,7 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
{
int k;
u32 boot_exit_code = 0;
+ u32 val;
for (k = 0; k < 1000; ++k) {
u32 flb_status = aq_hw_read_reg(self,
@@ -260,9 +269,11 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
int err = 0;
hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
- AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
- HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
- 10, 1000U);
+ err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state,
+ self, val,
+ (val & HW_ATL_MPI_STATE_MSK) ==
+ MPI_DEINIT,
+ 10, 10000U);
if (err)
return err;
}
@@ -277,16 +288,17 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt)
{
int err = 0;
+ u32 val;
- AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
- HW_ATL_FW_SM_RAM) == 1U,
- 1U, 10000U);
+ err = readx_poll_timeout_atomic(hw_atl_sem_ram_get,
+ self, val, val == 1U,
+ 1U, 10000U);
if (err < 0) {
bool is_locked;
hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_sem_ram_get(self);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -299,13 +311,14 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
if (IS_CHIP_FEATURE(REVISION_B1))
- AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self,
- HW_ATL_MIF_ADDR),
- 1, 1000U);
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
+ self, val, val != a,
+ 1U, 1000U);
else
- AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self,
- HW_ATL_MIF_CMD)),
- 1, 1000U);
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
+ self, val,
+ !(val & 0x100),
+ 1U, 1000U);
*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
a += 4;
@@ -320,10 +333,11 @@ err_exit:
static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
u32 cnt)
{
+ u32 val;
int err = 0;
bool is_locked;
- is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_sem_ram_get(self);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -337,10 +351,11 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
(0x80000000 | (0xFFFF & (offset * 4))));
hw_atl_mcp_up_force_intr_set(self, 1);
/* 1000 times by 10us = 10ms */
- AQ_HW_WAIT_FOR((aq_hw_read_reg(self,
- 0x32C) & 0xF0000000) !=
- 0x80000000,
- 10, 1000);
+ err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
+ self, val,
+ (val & 0xF0000000) ==
+ 0x80000000,
+ 10U, 10000U);
}
} else {
u32 offset = 0;
@@ -351,8 +366,10 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
aq_hw_write_reg(self, 0x20C, p[offset]);
aq_hw_write_reg(self, 0x200, 0xC000);
- AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) &
- 0x100) == 0, 10, 1000);
+ err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
+ self, val,
+ (val & 0x100) == 0,
+ 1000U, 10000U);
}
}
@@ -395,15 +412,14 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
- aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
+ err = readx_poll_timeout_atomic(hw_atl_scrpad25_get,
+ self, self->mbox_addr,
+ self->mbox_addr != 0U,
+ 1000U, 10000U);
return err;
}
-#define HW_ATL_RPC_CONTROL_ADR 0x0338U
-#define HW_ATL_RPC_STATE_ADR 0x033CU
-
struct aq_hw_atl_utils_fw_rpc_tid_s {
union {
u32 val;
@@ -452,10 +468,10 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
self->rpc_tid = sw.tid;
- AQ_HW_WAIT_FOR(sw.tid ==
- (fw.val =
- aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
- fw.tid), 1000U, 100U);
+ err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get,
+ self, fw.val,
+ sw.tid == fw.tid,
+ 1000U, 100000U);
if (fw.len == 0xFFFFU) {
err = hw_atl_utils_fw_rpc_call(self, sw.len);
@@ -559,10 +575,11 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
transaction_id = mbox.transaction_id;
- AQ_HW_WAIT_FOR(transaction_id !=
- (hw_atl_utils_mpi_read_mbox(self, &mbox),
- mbox.transaction_id),
- 1000U, 100U);
+ err = readx_poll_timeout_atomic(hw_atl_utils_get_mpi_mbox_tid,
+ self, mbox.transaction_id,
+ transaction_id !=
+ mbox.transaction_id,
+ 1000U, 100000U);
if (err < 0)
goto err_exit;
}
@@ -585,7 +602,7 @@ err_exit:
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
- u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
+ u32 cp0x036C = hw_atl_utils_mpi_get_state(self);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
@@ -905,6 +922,35 @@ err_exit:
return err;
}
+static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self)
+{
+ struct hw_atl_utils_mbox_header mbox;
+
+ hw_atl_utils_mpi_read_mbox(self, &mbox);
+
+ return mbox.transaction_id;
+}
+
+static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
+}
+
+static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MIF_CMD);
+}
+
+static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MIF_ADDR);
+}
+
+static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
+}
+
const struct aq_fw_ops aq_fw_1x_ops = {
.init = hw_atl_utils_mpi_create,
.deinit = hw_atl_fw1x_deinit,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 7de3220d9cab..fe6c5658e016 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -20,15 +20,14 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
-#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
-#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
-
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
-#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
@@ -72,17 +71,24 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
static int aq_fw2x_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
+static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
+static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
+static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
+
static int aq_fw2x_init(struct aq_hw_s *self)
{
int err = 0;
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
- aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
- 1000U, 10U);
- AQ_HW_WAIT_FOR(0U != (self->rpc_addr =
- aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)),
- 1000U, 100U);
+ err = readx_poll_timeout_atomic(aq_fw2x_mbox_get,
+ self, self->mbox_addr,
+ self->mbox_addr != 0U,
+ 1000U, 10000U);
+
+ err = readx_poll_timeout_atomic(aq_fw2x_rpc_get,
+ self, self->rpc_addr,
+ self->rpc_addr != 0U,
+ 1000U, 100000U);
return err;
}
@@ -286,16 +292,18 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
int err = 0;
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
+ u32 stats_val;
/* Toggle statistics bit for FW to update */
mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
/* Wait FW to report back */
- AQ_HW_WAIT_FOR(orig_stats_val !=
- (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
- BIT(CAPS_HI_STATISTICS)),
- 1U, 10000U);
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
+ self, stats_val,
+ orig_stats_val != (stats_val &
+ BIT(CAPS_HI_STATISTICS)),
+ 1U, 10000U);
if (err)
return err;
@@ -309,6 +317,7 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
unsigned int rpc_size = 0U;
u32 mpi_opts;
int err = 0;
+ u32 val;
rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg);
@@ -337,8 +346,10 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
- AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
- HW_ATL_FW2X_CTRL_SLEEP_PROXY), 1U, 10000U);
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
+ self, val,
+ val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
+ 1U, 10000U);
err_exit:
return err;
@@ -350,6 +361,7 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
struct fw2x_msg_wol *msg = NULL;
u32 mpi_opts;
int err = 0;
+ u32 val;
err = hw_atl_utils_fw_rpc_wait(self, &rpc);
if (err < 0)
@@ -374,8 +386,9 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
mpi_opts |= HW_ATL_FW2X_CTRL_WOL;
aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
- AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
- HW_ATL_FW2X_CTRL_WOL), 1U, 10000U);
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
+ self, val, val & HW_ATL_FW2X_CTRL_WOL,
+ 1U, 10000U);
err_exit:
return err;
@@ -425,7 +438,7 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
*supported_rates = fw2x_to_eee_mask(caps_hi);
- mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+ mpi_state = aq_fw2x_state2_get(self);
*rate = fw2x_to_eee_mask(mpi_state);
return err;
@@ -455,7 +468,7 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
{
- u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+ u32 mpi_state = aq_fw2x_state2_get(self);
if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
@@ -471,6 +484,21 @@ static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
return 0;
}
+static u32 aq_fw2x_mbox_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR);
+}
+
+static u32 aq_fw2x_rpc_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR);
+}
+
+static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 4406325fdd9f..ff3d68532f5f 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -148,7 +148,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
/* return the sk_buff to system */
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
txbd->data = 0;
txbd->info = 0;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c131cfc1b79d..e3538ba7d0e7 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx)
alx->num_txq +
sizeof(struct alx_rrd) * alx->rx_ringsz +
sizeof(struct alx_rfd) * alx->rx_ringsz;
- alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
- alx->descmem.size,
- &alx->descmem.dma,
- GFP_KERNEL);
+ alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ &alx->descmem.dma, GFP_KERNEL);
if (!alx->descmem.virt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 7087b88550db..0f1eb1981469 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
8 * 4;
- ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
- &ring_header->dma, GFP_KERNEL);
+ ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
goto err_nomem;
@@ -1833,10 +1833,10 @@ rrs_checked:
atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
- if (netif_msg_rx_err(adapter))
- dev_warn(&pdev->dev,
- "wrong packet! rrs word3 is %x\n",
- rrs->word3);
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev,
+ "wrong packet! rrs word3 is %x\n",
+ rrs->word3);
continue;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 3164aad29bcf..9dfe6a9431e6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1259,7 +1259,7 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
}
if (tx_buffer->skb) {
- dev_kfree_skb_irq(tx_buffer->skb);
+ dev_consume_skb_irq(tx_buffer->skb);
tx_buffer->skb = NULL;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 63edc5706c09..9e07b469066a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2088,7 +2088,7 @@ static int atl1_intr_tx(struct atl1_adapter *adapter)
}
if (buffer_info->skb) {
- dev_kfree_skb_irq(buffer_info->skb);
+ dev_consume_skb_irq(buffer_info->skb);
buffer_info->skb = NULL;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..d99317b3d891 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -909,7 +909,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
(adapter->txd_write_ptr >> 2));
mmiowb();
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
- static int cards_found;
+ static int cards_found = 0;
unsigned long mmio_start;
int mmio_len;
int err;
- cards_found = 0;
-
err = pci_enable_device(pdev);
if (err)
return err;
@@ -2946,7 +2944,7 @@ static int atl2_validate_option(int *value, struct atl2_option *opt)
if (*value == ent->i) {
if (ent->str[0] != '\0')
printk(KERN_INFO "%s\n", ent->str);
- return 0;
+ return 0;
}
}
break;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index c1d3ee9baf7e..716bfbba59cf 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -194,7 +194,6 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
- depends on MAY_USE_DEVLINK
select FW_LOADER
select LIBCRC32C
---help---
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f44808959ff3..97ab0dd25552 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp)
bytes_compl += skb->len;
pkts_compl++;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
@@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
skb = bounce_skb;
}
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6bae973d4dce..09cd188826b1 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_freeirq_tx;
@@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_free_rx_ring;
@@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
@@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev)
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4574275ef445..bc3ac369cbe3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
reg = rxchk_readl(priv, RXCHK_CONTROL);
+ /* Clear L2 header checks, which would prevent BPDUs
+ * from being received.
+ */
+ reg &= ~RXCHK_L2_HDR_DIS;
if (priv->rx_chk_en)
reg |= RXCHK_EN;
else
@@ -520,7 +524,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- u32 reg;
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
@@ -528,11 +531,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
if (!(priv->wolopts & WAKE_MAGICSECURE))
return;
- /* Return the programmed SecureOn password */
- reg = umac_readl(priv, UMAC_PSW_MS);
- put_unaligned_be16(reg, &wol->sopass[0]);
- reg = umac_readl(priv, UMAC_PSW_LS);
- put_unaligned_be32(reg, &wol->sopass[2]);
+ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
static int bcm_sysport_set_wol(struct net_device *dev,
@@ -548,13 +547,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
if (wol->wolopts & ~supported)
return -EINVAL;
- /* Program the SecureOn password */
- if (wol->wolopts & WAKE_MAGICSECURE) {
- umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
- UMAC_PSW_MS);
- umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
- UMAC_PSW_LS);
- }
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
@@ -1506,8 +1500,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* We just need one DMA descriptor which is DMA-able, since writing to
* the port will allocate a new descriptor in its internal linked-list
*/
- p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
- GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+ GFP_KERNEL);
if (!p) {
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
return -ENOMEM;
@@ -2649,13 +2643,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
unsigned int index, i = 0;
u32 reg;
- /* Password has already been programmed */
reg = umac_readl(priv, UMAC_MPD_CTRL);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
reg |= MPD_EN;
reg &= ~PSW_EN;
- if (priv->wolopts & WAKE_MAGICSECURE)
+ if (priv->wolopts & WAKE_MAGICSECURE) {
+ /* Program the SecureOn password */
+ umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+ UMAC_PSW_MS);
+ umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+ UMAC_PSW_LS);
reg |= PSW_EN;
+ }
umac_writel(priv, reg, UMAC_MPD_CTRL);
if (priv->wolopts & WAKE_FILTER) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 0887e6356649..0b192fea9c5d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -12,6 +12,7 @@
#define __BCM_SYSPORT_H
#include <linux/bitmap.h>
+#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/net_dim.h>
@@ -778,6 +779,7 @@ struct bcm_sysport_priv {
unsigned int crc_fwd:1;
u16 rev;
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
unsigned int wol_irq_disabled:1;
/* MIB related fields */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index cabc8e49ad24..4632dd5dbad1 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
/* Alloc ring of descriptors */
size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
- ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
- &ring->dma_base,
- GFP_KERNEL);
+ ring->cpu_base = dma_alloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
if (!ring->cpu_base) {
dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
ring->mmio_base);
@@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
/* Alloc ring of descriptors */
size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
- ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
- &ring->dma_base,
- GFP_KERNEL);
+ ring->cpu_base = dma_alloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
if (!ring->cpu_base) {
dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
ring->mmio_base);
@@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
+ phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index bbb247116045..d63371d70bce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev)
BNX2_SBLK_MSIX_ALIGN_SIZE);
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
if (!status_blk)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 5cd3135dfe30..6026b53137aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -32,7 +32,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.712.30-0"
+#define DRV_MODULE_VERSION "1.713.36-0"
#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
@@ -2081,7 +2081,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
#define BNX2X_ILT_ZALLOC(x, y, size) \
- x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 142bc11b9fbb..2462e7aa0c5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -52,7 +52,7 @@ extern int bnx2x_num_queues;
#define BNX2X_PCI_ALLOC(y, size) \
({ \
- void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x) \
DP(NETIF_MSG_HW, \
"BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 46ee2c01f4c5..066765fbef06 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -449,7 +449,7 @@ static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
ccd[cos] =
(u32)input_data->cos_min_rate[cos] * 100 *
(T_FAIR_COEF / (8 * 100 * cosWeightSum));
- if (ccd[cos] < pdata->fair_vars.fair_threshold
+ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ MIN_ABOVE_THRESH) {
ccd[cos] =
pdata->fair_vars.fair_threshold +
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 98d4c5a3ff21..d581d0ae6584 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -837,49 +837,45 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
switch (cos_entry) {
case 0:
- nig_reg_adress_crd_weight =
- (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
case 1:
- nig_reg_adress_crd_weight = (port) ?
- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
case 2:
- nig_reg_adress_crd_weight = (port) ?
- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
- break;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
case 3:
- if (port)
+ if (port)
return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
- pbf_reg_adress_crd_weight =
- PBF_REG_COS3_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0;
+ break;
case 4:
- if (port)
- return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
- pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
- break;
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
case 5:
- if (port)
- return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
- pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
- break;
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
}
REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
@@ -966,7 +962,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (pri >= max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter Illegal strict priority\n");
- return -EINVAL;
+ return -EINVAL;
}
if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
@@ -1845,28 +1841,28 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- /* pause enable/disable */
- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
+ /* pause enable/disable */
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_PFC_ENABLED)) {
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
-
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- } else
- bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_FLOW_EN);
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -6339,7 +6335,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
- /* else: fall through */
+ /* fall through */
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -6478,9 +6474,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* Link is up only if both local phy and external phy are up */
- if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
- return -ESRCH;
+ /* Link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
}
/* In XGXS loopback mode, do not check external PHY */
if (params->loopback_mode == LOOPBACK_XGXS)
@@ -7293,8 +7289,8 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
DP(NETIF_MSG_LINK,
"XAUI workaround has completed\n");
return 0;
- }
- usleep_range(3000, 6000);
+ }
+ usleep_range(3000, 6000);
}
break;
}
@@ -12675,39 +12671,39 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->mac_type = MAC_TYPE_BMAC;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_BMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_xgxs_deassert(params);
+ bnx2x_xgxs_deassert(params);
- /* Set bmac loopback */
- bnx2x_bmac_enable(params, vars, 1, 1);
+ /* Set bmac loopback */
+ bnx2x_bmac_enable(params, vars, 1, 1);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void bnx2x_init_emac_loopback(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- vars->link_up = 1;
- vars->line_speed = SPEED_1000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->mac_type = MAC_TYPE_EMAC;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_EMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_xgxs_deassert(params);
- /* Set bmac loopback */
- bnx2x_emac_enable(params, vars, 1);
- bnx2x_emac_program(params, vars);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ bnx2x_xgxs_deassert(params);
+ /* Set bmac loopback */
+ bnx2x_emac_enable(params, vars, 1);
+ bnx2x_emac_program(params, vars);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void bnx2x_init_xmac_loopback(struct link_params *params,
@@ -13073,12 +13069,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
}
- if (!CHIP_IS_E3(bp)) {
- bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
- } else {
- bnx2x_set_xmac_rxtx(params, 0);
- bnx2x_set_umac_rxtx(params, 0);
- }
+ if (!CHIP_IS_E3(bp)) {
+ bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+ } else {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
/* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3b5b47e98c73..626b491f7674 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11298,7 +11298,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
dev_info.port_hw_config[port].external_phy_config),
SHMEM_RD(bp,
dev_info.port_hw_config[port].external_phy_config2));
- return;
+ return;
}
if (CHIP_IS_E3(bp))
@@ -11998,7 +11998,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
- int vn, mfw_vn;
+ int vn;
u32 val = 0, val2 = 0;
int rc = 0;
@@ -12083,12 +12083,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
/*
* Initialize MF configuration
*/
-
bp->mf_ov = 0;
bp->mf_mode = 0;
bp->mf_sub_mode = 0;
vn = BP_VN(bp);
- mfw_vn = BP_FW_MB_IDX(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index a9eaaf3e73a4..7b22a6d8514c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2977,8 +2977,8 @@ static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
cmd_pos->data.macs_num--;
- DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
- cmd_pos->data.macs_num, cnt);
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+ cmd_pos->data.macs_num, cnt);
/* Break if we reached the maximum
* number of rules.
@@ -3597,8 +3597,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
p->mcast_list_len = reg_sz;
- DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
- cmd, p->mcast_list_len);
+ DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+ cmd, p->mcast_list_len);
break;
case BNX2X_MCAST_CMD_ADD:
@@ -3735,8 +3735,8 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
- DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- cfg_data.mac);
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ cfg_data.mac);
}
*rdata_idx = i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index c835f6c7ecd0..c97b642e6537 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2230,7 +2230,7 @@ int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
rc = bnx2x_vf_close(bp, vf);
if (rc)
goto op_err;
- /* Fallthrough to release resources */
+ /* Fall through - to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 8e0a317b31f7..a9bdc21873d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1654,13 +1654,9 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
{
int i, j;
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
- size_t fsz;
- fsz = tlv->n_mac_vlan_filters *
- sizeof(struct bnx2x_vf_mac_vlan_filter) +
- sizeof(struct bnx2x_vf_mac_vlan_filters);
-
- fl = kzalloc(fsz, GFP_KERNEL);
+ fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
+ GFP_KERNEL);
if (!fl)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3aa80da973d7..0bb9d7b3a2b6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2018 Broadcom Limited
+ * Copyright (c) 2016-2019 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,6 +31,7 @@
#include <asm/page.h>
#include <linux/time.h>
#include <linux/mii.h>
+#include <linux/mdio.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/if_bridge.h>
@@ -112,6 +113,7 @@ enum board_idx {
BCM57454,
BCM5745x_NPAR,
BCM57508,
+ BCM57504,
BCM58802,
BCM58804,
BCM58808,
@@ -155,6 +157,7 @@ static const struct {
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
+ [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -201,6 +204,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
+ { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -500,6 +504,12 @@ normal_tx:
}
length >>= 9;
+ if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+ dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+ skb->len);
+ i = 0;
+ goto tx_dma_error;
+ }
flags |= bnxt_lhint_arr[length];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
@@ -3449,10 +3459,10 @@ alloc_ext_stats:
goto alloc_tx_ext_stats;
bp->hw_rx_port_stats_ext =
- dma_zalloc_coherent(&pdev->dev,
- sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map,
- GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct rx_port_stats_ext),
+ &bp->hw_rx_port_stats_ext_map,
+ GFP_KERNEL);
if (!bp->hw_rx_port_stats_ext)
return 0;
@@ -3462,10 +3472,10 @@ alloc_tx_ext_stats:
if (bp->hwrm_spec_code >= 0x10902) {
bp->hw_tx_port_stats_ext =
- dma_zalloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
@@ -3903,7 +3913,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (len)
break;
/* on first few passes, just barely sleep */
- if (i < DFLT_HWRM_CMD_TIMEOUT)
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
else
@@ -3926,7 +3936,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
dma_rmb();
if (*valid)
break;
- udelay(1);
+ usleep_range(1, 5);
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
@@ -4973,12 +4983,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
u32 map_idx = ring->map_idx;
+ unsigned int vector;
+ vector = bp->irq_tbl[map_idx].vector;
+ disable_irq_nosync(vector);
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
- if (rc)
+ if (rc) {
+ enable_irq(vector);
goto err_out;
+ }
bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ enable_irq(vector);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
if (!i) {
@@ -5601,7 +5617,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
else
flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
}
@@ -6221,9 +6238,12 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- if (i == (nr_tbls - 1))
- rmem->nr_pages = ctx_pg->nr_pages %
- MAX_CTX_PAGES;
+ if (i == (nr_tbls - 1)) {
+ int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
+
+ if (rem)
+ rmem->nr_pages = rem;
+ }
rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
if (rc)
break;
@@ -6670,6 +6690,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -8604,24 +8628,88 @@ static int bnxt_close(struct net_device *dev)
return 0;
}
+static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
+ u16 *val)
+{
+ struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_phy_mdio_read_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a00)
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.phy_addr = phy_addr;
+ req.reg_addr = cpu_to_le16(reg & 0x1f);
+ if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
+ req.cl45_mdio = 1;
+ req.phy_addr = mdio_phy_id_prtad(phy_addr);
+ req.dev_addr = mdio_phy_id_devad(phy_addr);
+ req.reg_addr = cpu_to_le16(reg);
+ }
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *val = le16_to_cpu(resp->reg_data);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
+ u16 val)
+{
+ struct hwrm_port_phy_mdio_write_input req = {0};
+
+ if (bp->hwrm_spec_code < 0x10a00)
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.phy_addr = phy_addr;
+ req.reg_addr = cpu_to_le16(reg & 0x1f);
+ if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
+ req.cl45_mdio = 1;
+ req.phy_addr = mdio_phy_id_prtad(phy_addr);
+ req.dev_addr = mdio_phy_id_devad(phy_addr);
+ req.reg_addr = cpu_to_le16(reg);
+ }
+ req.reg_data = cpu_to_le16(val);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
/* rtnl_lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct mii_ioctl_data *mdio = if_mii(ifr);
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
switch (cmd) {
case SIOCGMIIPHY:
+ mdio->phy_id = bp->link_info.phy_addr;
+
/* fallthru */
case SIOCGMIIREG: {
+ u16 mii_regval = 0;
+
if (!netif_running(dev))
return -EAGAIN;
- return 0;
+ rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
+ &mii_regval);
+ mdio->val_out = mii_regval;
+ return rc;
}
case SIOCSMIIREG:
if (!netif_running(dev))
return -EAGAIN;
- return 0;
+ return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
+ mdio->val_in);
default:
/* do nothing */
@@ -9977,8 +10065,11 @@ static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
return 0;
}
-int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
+int bnxt_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
+ struct bnxt *bp = netdev_priv(dev);
+
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return -EOPNOTSUPP;
@@ -9986,27 +10077,12 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(bp->switch_id);
- memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
+ ppid->id_len = sizeof(bp->switch_id);
+ memcpy(ppid->id, bp->switch_id, ppid->id_len);
-static int bnxt_swdev_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- return bnxt_port_attr_get(netdev_priv(dev), attr);
+ return 0;
}
-static const struct switchdev_ops bnxt_switchdev_ops = {
- .switchdev_port_attr_get = bnxt_swdev_port_attr_get
-};
-
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -10038,6 +10114,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
+ .ndo_get_port_parent_id = bnxt_get_port_parent_id,
.ndo_get_phys_port_name = bnxt_get_phys_port_name
};
@@ -10396,7 +10473,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &bnxt_netdev_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
- SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a451796deefe..cf81ace7a6e6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -22,7 +22,6 @@
#include <linux/rhashtable.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
-#include <net/switchdev.h>
#include <net/xdp.h>
#include <linux/net_dim.h>
@@ -582,7 +581,7 @@ struct nqe_cn {
(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
-#define HWRM_VALID_BIT_DELAY_USEC 20
+#define HWRM_VALID_BIT_DELAY_USEC 150
#define BNXT_HWRM_CHNL_CHIMP 0
#define BNXT_HWRM_CHNL_KONG 1
@@ -946,6 +945,7 @@ struct bnxt_vf_info {
* stored by PF.
*/
u16 vlan;
+ u16 func_qcfg_flags;
u32 flags;
#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
@@ -1479,6 +1479,7 @@ struct bnxt {
#define BNXT_FW_CAP_IF_CHANGE 0x00000010
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
+ #define BNXT_FW_CAP_TRUSTED_VF 0x00000800
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1609,6 +1610,7 @@ struct bnxt {
/* devlink interface and vf-rep structs */
struct devlink *dl;
+ struct devlink_port dl_port;
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
@@ -1794,7 +1796,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
-int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+int bnxt_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 15c7041e937b..70775158c8c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
- data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
- GFP_KERNEL);
+ data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 140dbd62106d..e1feb97bcd81 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return -EFAULT;
}
- data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
- &data_dma_addr, GFP_KERNEL);
+ data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
+ &data_dma_addr, GFP_KERNEL);
if (!data_addr)
return -ENOMEM;
@@ -188,6 +188,9 @@ static const struct devlink_param bnxt_dl_params[] = {
NULL),
};
+static const struct devlink_param bnxt_dl_port_params[] = {
+};
+
int bnxt_dl_register(struct bnxt *bp)
{
struct devlink *dl;
@@ -225,8 +228,29 @@ int bnxt_dl_register(struct bnxt *bp)
goto err_dl_unreg;
}
+ rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
+ if (rc) {
+ netdev_err(bp->dev, "devlink_port_register failed");
+ goto err_dl_param_unreg;
+ }
+ devlink_port_type_eth_set(&bp->dl_port, bp->dev);
+
+ rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+ if (rc) {
+ netdev_err(bp->dev, "devlink_port_params_register failed");
+ goto err_dl_port_unreg;
+ }
+
+ devlink_params_publish(dl);
+
return 0;
+err_dl_port_unreg:
+ devlink_port_unregister(&bp->dl_port);
+err_dl_param_unreg:
+ devlink_params_unregister(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
err_dl_unreg:
devlink_unregister(dl);
err_dl_free:
@@ -242,6 +266,9 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
+ ARRAY_SIZE(bnxt_dl_port_params));
+ devlink_port_unregister(&bp->dl_port);
devlink_params_unregister(dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
devlink_unregister(dl);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f1aaac8e6268..b6c610339501 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2018 Broadcom Limited
+ * Copyright (c) 2016-2019 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -98,6 +98,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
#define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
#define HWRM_FUNC_VF_CFG 0xfUL
@@ -221,6 +222,7 @@ struct cmd_nums {
#define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
#define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
#define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
#define HWRM_CFA_VFR_ALLOC 0xfdUL
#define HWRM_CFA_VFR_FREE 0xfeUL
#define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
@@ -269,6 +271,7 @@ struct cmd_nums {
#define HWRM_ENGINE_CKV_FLUSH 0x133UL
#define HWRM_ENGINE_CKV_RNG_GET 0x134UL
#define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
#define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
#define HWRM_ENGINE_QG_QUERY 0x13dUL
#define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
@@ -296,6 +299,7 @@ struct cmd_nums {
#define HWRM_ENGINE_NQ_ALLOC 0x162UL
#define HWRM_ENGINE_NQ_FREE 0x163UL
#define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_ENGINE_FUNC_QCFG 0x165UL
#define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
#define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
#define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
@@ -379,15 +383,15 @@ struct hwrm_err_output {
};
#define HWRM_NA_SIGNATURE ((__le32)(-1))
#define HWRM_MAX_REQ_LEN 128
-#define HWRM_MAX_RESP_LEN 280
+#define HWRM_MAX_RESP_LEN 704
#define HW_HASH_INDEX_SIZE 0x80
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 33
-#define HWRM_VERSION_STR "1.10.0.33"
+#define HWRM_VERSION_RSVD 47
+#define HWRM_VERSION_STR "1.10.0.47"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -580,6 +584,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
#define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
@@ -595,6 +600,9 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -724,6 +732,30 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
+/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_recovery {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
+};
+
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
@@ -1014,6 +1046,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
#define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1184,6 +1217,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1389,6 +1424,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
#define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -2023,6 +2059,89 @@ struct hwrm_func_backing_store_cfg_output {
u8 valid;
};
+/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
+struct hwrm_error_recovery_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
+struct hwrm_error_recovery_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
+ __le32 driver_polling_freq;
+ __le32 master_func_wait_period;
+ __le32 normal_func_wait_period;
+ __le32 master_func_wait_period_after_reset;
+ __le32 max_bailout_time_after_reset;
+ __le32 fw_health_status_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
+ __le32 fw_heartbeat_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
+ __le32 fw_reset_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg_mask;
+ u8 unused_0[3];
+ u8 reg_array_cnt;
+ __le32 reset_reg[16];
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
+ __le32 reset_reg_val[16];
+ u8 delay_after_reset[16];
+ u8 unused_1[7];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -2954,6 +3073,7 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL
__le16 supported_speeds_auto_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
@@ -2969,6 +3089,7 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL
__le16 supported_speeds_eee_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
@@ -4918,6 +5039,35 @@ struct hwrm_ring_free_output {
u8 valid;
};
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[4];
+ u8 consumer_idx[3];
+ u8 valid;
+};
+
/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
struct hwrm_ring_aggint_qcaps_input {
__le16 req_type;
@@ -5445,19 +5595,21 @@ struct hwrm_cfa_encap_record_alloc_input {
__le64 resp_addr;
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6
u8 unused_0[3];
__le32 encap_data[20];
};
@@ -5505,6 +5657,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -5626,7 +5779,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- u8 unused_0[4];
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
@@ -5891,13 +6045,15 @@ struct hwrm_cfa_flow_info_input {
__le64 ext_flow_handle;
};
-/* hwrm_cfa_flow_info_output (size:448b/56B) */
+/* hwrm_cfa_flow_info_output (size:5632b/704B) */
struct hwrm_cfa_flow_info_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
u8 profile;
__le16 src_fid;
__le16 dst_fid;
@@ -5909,7 +6065,10 @@ struct hwrm_cfa_flow_info_output {
__le16 flow_handle;
__le32 tunnel_handle;
__le16 flow_timer;
- u8 unused_0[5];
+ u8 unused_0[6];
+ __le32 flow_key_data[130];
+ __le32 flow_action_info[30];
+ u8 unused_1[7];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d80f5c981d90..2b90a2bb1a1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -121,6 +121,54 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
return rc;
}
+static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(vf->fw_fid);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return -EIO;
+ }
+ vf->func_qcfg_flags = le16_to_cpu(resp->flags);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
+ return !!(vf->flags & BNXT_VF_TRUST);
+
+ bnxt_hwrm_func_qcfg_flags(bp, vf);
+ return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
+}
+
+static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(vf->fw_fid);
+ if (vf->flags & BNXT_VF_TRUST)
+ req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+ else
+ req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -EIO;
+ return 0;
+}
+
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
{
struct bnxt *bp = netdev_priv(dev);
@@ -135,6 +183,7 @@ int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
else
vf->flags &= ~BNXT_VF_TRUST;
+ bnxt_hwrm_set_trusted_vf(bp, vf);
return 0;
}
@@ -164,7 +213,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
else
ivi->qos = 0;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
- ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
+ ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->flags & BNXT_VF_LINK_UP)
@@ -935,9 +984,10 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
* if the PF assigned MAC address is zero
*/
if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+ bool trust = bnxt_is_trusted_vf(bp, vf);
+
if (is_valid_ether_addr(req->dflt_mac_addr) &&
- ((vf->flags & BNXT_VF_TRUST) ||
- !is_valid_ether_addr(vf->mac_addr) ||
+ (trust || !is_valid_ether_addr(vf->mac_addr) ||
ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
@@ -962,7 +1012,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
* Otherwise, it must match the VF MAC address if firmware spec >=
* 1.2.2
*/
- if (vf->flags & BNXT_VF_TRUST) {
+ if (bnxt_is_trusted_vf(bp, vf)) {
mac_ok = true;
} else if (is_valid_ether_addr(vf->mac_addr)) {
if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index c683b5e96b1d..44d6c5743fb9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -45,7 +45,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
struct bnxt *bp;
/* check if dev belongs to the same switch */
- if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
+ if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
dev->ifindex);
return BNXT_FID_INVALID;
@@ -61,9 +61,9 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
static int bnxt_tc_parse_redir(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+ const struct flow_action_entry *act)
{
- struct net_device *dev = tcf_mirred_dev(tc_act);
+ struct net_device *dev = act->dev;
if (!dev) {
netdev_info(bp->dev, "no dev in mirred action");
@@ -77,16 +77,16 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
static int bnxt_tc_parse_vlan(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+ const struct flow_action_entry *act)
{
- switch (tcf_vlan_action(tc_act)) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
break;
- case TCA_VLAN_ACT_PUSH:
+ case FLOW_ACTION_VLAN_PUSH:
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
- actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
- actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+ actions->push_vlan_tci = htons(act->vlan.vid);
+ actions->push_vlan_tpid = act->vlan.proto;
break;
default:
return -EOPNOTSUPP;
@@ -96,10 +96,10 @@ static int bnxt_tc_parse_vlan(struct bnxt *bp,
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+ const struct flow_action_entry *act)
{
- struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
- struct ip_tunnel_key *tun_key = &tun_info->key;
+ const struct ip_tunnel_info *tun_info = act->tunnel;
+ const struct ip_tunnel_key *tun_key = &tun_info->key;
if (ip_tunnel_info_af(tun_info) != AF_INET) {
netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
@@ -113,51 +113,43 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- struct tcf_exts *tc_exts)
+ struct flow_action *flow_action)
{
- const struct tc_action *tc_act;
+ struct flow_action_entry *act;
int i, rc;
- if (!tcf_exts_has_actions(tc_exts)) {
+ if (!flow_action_has_entries(flow_action)) {
netdev_info(bp->dev, "no actions");
return -EINVAL;
}
- tcf_exts_for_each_action(i, tc_act, tc_exts) {
- /* Drop action */
- if (is_tcf_gact_shot(tc_act)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
return 0; /* don't bother with other actions */
- }
-
- /* Redirect action */
- if (is_tcf_mirred_egress_redirect(tc_act)) {
- rc = bnxt_tc_parse_redir(bp, actions, tc_act);
+ case FLOW_ACTION_REDIRECT:
+ rc = bnxt_tc_parse_redir(bp, actions, act);
if (rc)
return rc;
- continue;
- }
-
- /* Push/pop VLAN */
- if (is_tcf_vlan(tc_act)) {
- rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
+ rc = bnxt_tc_parse_vlan(bp, actions, act);
if (rc)
return rc;
- continue;
- }
-
- /* Tunnel encap */
- if (is_tcf_tunnel_set(tc_act)) {
- rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
+ break;
+ case FLOW_ACTION_TUNNEL_ENCAP:
+ rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
if (rc)
return rc;
- continue;
- }
-
- /* Tunnel decap */
- if (is_tcf_tunnel_release(tc_act)) {
+ break;
+ case FLOW_ACTION_TUNNEL_DECAP:
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
- continue;
+ break;
+ default:
+ break;
}
}
@@ -177,18 +169,12 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
return 0;
}
-#define GET_KEY(flow_cmd, key_type) \
- skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
- (flow_cmd)->key)
-#define GET_MASK(flow_cmd, key_type) \
- skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
- (flow_cmd)->mask)
-
static int bnxt_tc_parse_flow(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd,
struct bnxt_tc_flow *flow)
{
- struct flow_dissector *dissector = tc_flow_cmd->dissector;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(tc_flow_cmd);
+ struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
@@ -198,143 +184,123 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return -EOPNOTSUPP;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
- struct flow_dissector_key_basic *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- flow->l2_key.ether_type = key->n_proto;
- flow->l2_mask.ether_type = mask->n_proto;
+ flow_rule_match_basic(rule, &match);
+ flow->l2_key.ether_type = match.key->n_proto;
+ flow->l2_mask.ether_type = match.mask->n_proto;
- if (key->n_proto == htons(ETH_P_IP) ||
- key->n_proto == htons(ETH_P_IPV6)) {
- flow->l4_key.ip_proto = key->ip_proto;
- flow->l4_mask.ip_proto = mask->ip_proto;
+ if (match.key->n_proto == htons(ETH_P_IP) ||
+ match.key->n_proto == htons(ETH_P_IPV6)) {
+ flow->l4_key.ip_proto = match.key->ip_proto;
+ flow->l4_mask.ip_proto = match.mask->ip_proto;
}
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
- struct flow_dissector_key_eth_addrs *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ flow_rule_match_eth_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
- ether_addr_copy(flow->l2_key.dmac, key->dst);
- ether_addr_copy(flow->l2_mask.dmac, mask->dst);
- ether_addr_copy(flow->l2_key.smac, key->src);
- ether_addr_copy(flow->l2_mask.smac, mask->src);
+ ether_addr_copy(flow->l2_key.dmac, match.key->dst);
+ ether_addr_copy(flow->l2_mask.dmac, match.mask->dst);
+ ether_addr_copy(flow->l2_key.smac, match.key->src);
+ ether_addr_copy(flow->l2_mask.smac, match.mask->src);
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
- struct flow_dissector_key_vlan *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+ flow_rule_match_vlan(rule, &match);
flow->l2_key.inner_vlan_tci =
- cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
+ cpu_to_be16(VLAN_TCI(match.key->vlan_id,
+ match.key->vlan_priority));
flow->l2_mask.inner_vlan_tci =
- cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
+ cpu_to_be16((VLAN_TCI(match.mask->vlan_id,
+ match.mask->vlan_priority)));
flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
flow->l2_mask.inner_vlan_tpid = htons(0xffff);
flow->l2_key.num_vlans = 1;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- struct flow_dissector_key_ipv4_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
- struct flow_dissector_key_ipv4_addrs *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+ flow_rule_match_ipv4_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
- flow->l3_key.ipv4.daddr.s_addr = key->dst;
- flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
- flow->l3_key.ipv4.saddr.s_addr = key->src;
- flow->l3_mask.ipv4.saddr.s_addr = mask->src;
- } else if (dissector_uses_key(dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- struct flow_dissector_key_ipv6_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
- struct flow_dissector_key_ipv6_addrs *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
-
+ flow->l3_key.ipv4.daddr.s_addr = match.key->dst;
+ flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst;
+ flow->l3_key.ipv4.saddr.s_addr = match.key->src;
+ flow->l3_mask.ipv4.saddr.s_addr = match.mask->src;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
- flow->l3_key.ipv6.daddr = key->dst;
- flow->l3_mask.ipv6.daddr = mask->dst;
- flow->l3_key.ipv6.saddr = key->src;
- flow->l3_mask.ipv6.saddr = mask->src;
+ flow->l3_key.ipv6.daddr = match.key->dst;
+ flow->l3_mask.ipv6.daddr = match.mask->dst;
+ flow->l3_key.ipv6.saddr = match.key->src;
+ flow->l3_mask.ipv6.saddr = match.mask->src;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
- struct flow_dissector_key_ports *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+ flow_rule_match_ports(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
- flow->l4_key.ports.dport = key->dst;
- flow->l4_mask.ports.dport = mask->dst;
- flow->l4_key.ports.sport = key->src;
- flow->l4_mask.ports.sport = mask->src;
+ flow->l4_key.ports.dport = match.key->dst;
+ flow->l4_mask.ports.dport = match.mask->dst;
+ flow->l4_key.ports.sport = match.key->src;
+ flow->l4_mask.ports.sport = match.mask->src;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
- struct flow_dissector_key_icmp *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
- struct flow_dissector_key_icmp *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+ flow_rule_match_icmp(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
- flow->l4_key.icmp.type = key->type;
- flow->l4_key.icmp.code = key->code;
- flow->l4_mask.icmp.type = mask->type;
- flow->l4_mask.icmp.code = mask->code;
+ flow->l4_key.icmp.type = match.key->type;
+ flow->l4_key.icmp.code = match.key->code;
+ flow->l4_mask.icmp.type = match.mask->type;
+ flow->l4_mask.icmp.code = match.mask->code;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- struct flow_dissector_key_ipv4_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
- struct flow_dissector_key_ipv4_addrs *mask =
- GET_MASK(tc_flow_cmd,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
- flow->tun_key.u.ipv4.dst = key->dst;
- flow->tun_mask.u.ipv4.dst = mask->dst;
- flow->tun_key.u.ipv4.src = key->src;
- flow->tun_mask.u.ipv4.src = mask->src;
- } else if (dissector_uses_key(dissector,
+ flow->tun_key.u.ipv4.dst = match.key->dst;
+ flow->tun_mask.u.ipv4.dst = match.mask->dst;
+ flow->tun_key.u.ipv4.src = match.key->src;
+ flow->tun_mask.u.ipv4.src = match.mask->src;
+ } else if (flow_rule_match_key(rule,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
return -EOPNOTSUPP;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
- struct flow_dissector_key_keyid *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+ flow_rule_match_enc_keyid(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
- flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
- flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
+ flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid);
+ flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid);
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- struct flow_dissector_key_ports *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
- struct flow_dissector_key_ports *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ struct flow_match_ports match;
+ flow_rule_match_enc_ports(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
- flow->tun_key.tp_dst = key->dst;
- flow->tun_mask.tp_dst = mask->dst;
- flow->tun_key.tp_src = key->src;
- flow->tun_mask.tp_src = mask->src;
+ flow->tun_key.tp_dst = match.key->dst;
+ flow->tun_mask.tp_dst = match.mask->dst;
+ flow->tun_key.tp_src = match.key->src;
+ flow->tun_mask.tp_src = match.mask->src;
}
- return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
+ return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
@@ -1324,7 +1290,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
bnxt_tc_set_flow_dir(bp, flow, src_fid);
if (!bnxt_tc_can_offload(bp, flow)) {
- rc = -ENOSPC;
+ rc = -EOPNOTSUPP;
goto free_node;
}
@@ -1422,8 +1388,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
lastused = flow->lastused;
spin_unlock(&flow->stats_lock);
- tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
- lastused);
+ flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
+ lastused);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 9a25c05aa571..2bdd2da9aac7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -237,21 +237,17 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int bnxt_vf_rep_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
+static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
/* as only PORT_PARENT_ID is supported currently use common code
* between PF and VF-rep for now.
*/
- return bnxt_port_attr_get(vf_rep->bp, attr);
+ return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid);
}
-static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = {
- .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get
-};
-
static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = {
.get_drvinfo = bnxt_vf_rep_get_drvinfo
};
@@ -262,6 +258,7 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_start_xmit = bnxt_vf_rep_xmit,
.ndo_get_stats64 = bnxt_vf_rep_get_stats64,
.ndo_setup_tc = bnxt_vf_rep_setup_tc,
+ .ndo_get_port_parent_id = bnxt_vf_rep_get_port_parent_id,
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
@@ -392,7 +389,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
- SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops);
/* Just inherit all the featues of the parent PF as the VF-R
* uses the RX/TX rings of the parent PF
*/
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index aceb9b7b55bd..51880d83131a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -525,7 +525,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
.asym_pause = 0,
};
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phydev || IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 5db9f4158e62..134ae2862efa 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
* for transmits, we just free buffers.
*/
- dev_kfree_skb_irq(sb);
+ dev_consume_skb_irq(sb);
/*
* .. and advance to the next buffer.
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b1397af81f7..328373e0578f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -721,7 +721,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
- /* else: fall through */
+ /* fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -782,7 +782,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
- /* else: fall through */
+ /* fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
- tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
}
@@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping, GFP_KERNEL);
+ tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping, GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
@@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping,
- GFP_KERNEL);
+ tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index a36e38676640..84741d288ffa 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -743,7 +743,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_TIMEOUT:
bfa_nw_ioc_hw_sem_release(ioc);
- bfa_ioc_pf_failed(ioc);
+ bfa_ioc_pf_failed(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
@@ -788,9 +788,8 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_INITFAIL:
del_timer(&ioc->iocpf_timer);
- /*
- * !!! fall through !!!
- */
+ /* fall through */
+
case IOCPF_E_TIMEOUT:
bfa_nw_ioc_hw_sem_release(ioc);
if (event == IOCPF_E_TIMEOUT)
@@ -858,9 +857,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
case IOCPF_E_FAIL:
del_timer(&ioc->iocpf_timer);
- /*
- * !!! fall through !!!
- */
+ /* fall through*/
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3d45f4c92cf6..acc66a7e7b95 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -643,6 +643,7 @@
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
+#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -714,6 +715,8 @@
__v; \
})
+#define MACB_READ_NSR(bp) macb_readl(bp, NSR)
+
/* struct macb_dma_desc - Hardware DMA descriptor
* @addr: DMA address of data buffer
* @ctrl: Control and status bits
@@ -1082,7 +1085,7 @@ struct macb_config {
unsigned int dma_burst_length;
int (*clk_init)(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
- struct clk **rx_clk);
+ struct clk **rx_clk, struct clk **tsu_clk);
int (*init)(struct platform_device *pdev);
int jumbo_max_len;
};
@@ -1162,6 +1165,7 @@ struct macb {
struct clk *hclk;
struct clk *tx_clk;
struct clk *rx_clk;
+ struct clk *tsu_clk;
struct net_device *dev;
union {
struct macb_stats macb;
@@ -1214,6 +1218,8 @@ struct macb {
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
+
+ u32 rx_intr_mask;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b126926ef7f5..ad099fd01b45 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -36,6 +36,8 @@
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/tcp.h>
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
#include "macb.h"
#define MACB_RX_BUFFER_SIZE 128
@@ -56,8 +58,7 @@
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
-#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
- | MACB_BIT(ISR_ROVR))
+#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
| MACB_BIT(ISR_RLE) \
| MACB_BIT(TXERR))
@@ -80,6 +81,10 @@
*/
#define MACB_HALT_TIMEOUT 1230
+#define MACB_PM_TIMEOUT 100 /* ms */
+
+#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
+
/* DMA buffer descriptor might be different size
* depends on hardware configuration:
*
@@ -319,10 +324,26 @@ static void macb_get_hwaddr(struct macb *bp)
eth_hw_addr_random(bp->dev);
}
+static int macb_mdio_wait_for_idle(struct macb *bp)
+{
+ u32 val;
+
+ return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
+ 1, MACB_MDIO_TIMEOUT);
+}
+
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct macb *bp = bus->priv;
- int value;
+ int status;
+
+ status = pm_runtime_get_sync(&bp->pdev->dev);
+ if (status < 0)
+ goto mdio_pm_exit;
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
| MACB_BF(RW, MACB_MAN_READ)
@@ -330,19 +351,32 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
| MACB_BF(REGA, regnum)
| MACB_BF(CODE, MACB_MAN_CODE)));
- /* wait for end of transfer */
- while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
- cpu_relax();
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
- value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+ status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
- return value;
+mdio_read_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
}
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_get_sync(&bp->pdev->dev);
+ if (status < 0)
+ goto mdio_pm_exit;
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
| MACB_BF(RW, MACB_MAN_WRITE)
@@ -351,11 +385,15 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
| MACB_BF(CODE, MACB_MAN_CODE)
| MACB_BF(DATA, value)));
- /* wait for end of transfer */
- while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
- cpu_relax();
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
- return 0;
+mdio_write_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
}
/**
@@ -1270,7 +1308,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
} else {
- queue_writel(queue, IER, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IER, bp->rx_intr_mask);
}
}
@@ -1288,7 +1326,7 @@ static void macb_hresp_error_task(unsigned long data)
u32 ctrl;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ queue_writel(queue, IDR, bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@@ -1318,7 +1356,7 @@ static void macb_hresp_error_task(unsigned long data)
/* Enable interrupts */
queue_writel(queue, IER,
- MACB_RX_INT_FLAGS |
+ bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@@ -1372,14 +1410,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
(unsigned int)(queue - bp->queues),
(unsigned long)status);
- if (status & MACB_RX_INT_FLAGS) {
+ if (status & bp->rx_intr_mask) {
/* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
* now.
*/
- queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IDR, bp->rx_intr_mask);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
@@ -1412,8 +1450,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
/* There is a hardware issue under heavy load where DMA can
* stop, this causes endless "used buffer descriptor read"
* interrupts but it can be cleared by re-enabling RX. See
- * the at91 manual, section 41.3.1 or the Zynq manual
- * section 16.7.4 for details.
+ * the at91rm9200 manual, section 41.3.1 or the Zynq manual
+ * section 16.7.4 for details. RXUBR is only enabled for
+ * these two versions.
*/
if (status & MACB_BIT(RXUBR)) {
ctrl = macb_readl(bp, NCR);
@@ -1734,16 +1773,12 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
if (!nskb)
return -ENOMEM;
- dev_kfree_skb_any(*skb);
+ dev_consume_skb_any(*skb);
*skb = nskb;
}
- if (padlen) {
- if (padlen >= ETH_FCS_LEN)
- skb_put_zero(*skb, padlen - ETH_FCS_LEN);
- else
- skb_trim(*skb, ETH_FCS_LEN - padlen);
- }
+ if (padlen > ETH_FCS_LEN)
+ skb_put_zero(*skb, padlen - ETH_FCS_LEN);
add_fcs:
/* set FCS to packet */
@@ -2263,7 +2298,7 @@ static void macb_init_hw(struct macb *bp)
/* Enable interrupts */
queue_writel(queue, IER,
- MACB_RX_INT_FLAGS |
+ bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@@ -2401,12 +2436,18 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
+ err = pm_runtime_get_sync(&bp->pdev->dev);
+ if (err < 0)
+ goto pm_exit;
+
/* carrier starts down */
netif_carrier_off(dev);
/* if the phy is not yet register, retry later*/
- if (!dev->phydev)
- return -EAGAIN;
+ if (!dev->phydev) {
+ err = -EAGAIN;
+ goto pm_exit;
+ }
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2415,7 +2456,7 @@ static int macb_open(struct net_device *dev)
if (err) {
netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
err);
- return err;
+ goto pm_exit;
}
bp->macbgem_ops.mog_init_rings(bp);
@@ -2432,6 +2473,11 @@ static int macb_open(struct net_device *dev)
if (bp->ptp_info)
bp->ptp_info->ptp_init(dev);
+pm_exit:
+ if (err) {
+ pm_runtime_put_sync(&bp->pdev->dev);
+ return err;
+ }
return 0;
}
@@ -2460,6 +2506,8 @@ static int macb_close(struct net_device *dev)
if (bp->ptp_info)
bp->ptp_info->ptp_remove(dev);
+ pm_runtime_put(&bp->pdev->dev);
+
return 0;
}
@@ -3308,7 +3356,7 @@ static void macb_probe_queues(void __iomem *mem,
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
- struct clk **rx_clk)
+ struct clk **rx_clk, struct clk **tsu_clk)
{
struct macb_platform_data *pdata;
int err;
@@ -3342,6 +3390,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
if (IS_ERR(*rx_clk))
*rx_clk = NULL;
+ *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+ if (IS_ERR(*tsu_clk))
+ *tsu_clk = NULL;
+
err = clk_prepare_enable(*pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
@@ -3366,8 +3418,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
goto err_disable_txclk;
}
+ err = clk_prepare_enable(*tsu_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tsu_clk (%u)\n", err);
+ goto err_disable_rxclk;
+ }
+
return 0;
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+
err_disable_txclk:
clk_disable_unprepare(*tx_clk);
@@ -3677,9 +3738,9 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
/* Store packet information (to free when Tx completed) */
lp->skb = skb;
lp->skb_length = skb->len;
- lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(NULL, lp->skb_physaddr)) {
+ lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
netdev_err(dev, "%s: DMA mapping error\n", __func__);
@@ -3767,9 +3828,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
dev->stats.tx_errors++;
if (lp->skb) {
- dev_kfree_skb_irq(lp->skb);
+ dev_consume_skb_irq(lp->skb);
lp->skb = NULL;
- dma_unmap_single(NULL, lp->skb_physaddr,
+ dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
lp->skb_length, DMA_TO_DEVICE);
dev->stats.tx_packets++;
dev->stats.tx_bytes += lp->skb_length;
@@ -3818,13 +3879,14 @@ static const struct net_device_ops at91ether_netdev_ops = {
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
- struct clk **rx_clk)
+ struct clk **rx_clk, struct clk **tsu_clk)
{
int err;
*hclk = NULL;
*tx_clk = NULL;
*rx_clk = NULL;
+ *tsu_clk = NULL;
*pclk = devm_clk_get(&pdev->dev, "ether_clk");
if (IS_ERR(*pclk))
@@ -3911,6 +3973,7 @@ static const struct macb_config sama5d4_config = {
};
static const struct macb_config emac_config = {
+ .caps = MACB_CAPS_NEEDS_RSTONUBR,
.clk_init = at91ether_clk_init,
.init = at91ether_init,
};
@@ -3932,7 +3995,8 @@ static const struct macb_config zynqmp_config = {
};
static const struct macb_config zynq_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
+ MACB_CAPS_NEEDS_RSTONUBR,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -3945,6 +4009,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,np4-macb", .data = &np4_config },
{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
{ .compatible = "cdns,gem", .data = &pc302gem_config },
+ { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
@@ -3972,11 +4037,12 @@ static int macb_probe(struct platform_device *pdev)
{
const struct macb_config *macb_config = &default_gem_config;
int (*clk_init)(struct platform_device *, struct clk **,
- struct clk **, struct clk **, struct clk **)
- = macb_config->clk_init;
+ struct clk **, struct clk **, struct clk **,
+ struct clk **) = macb_config->clk_init;
int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
+ struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
bool native_io;
@@ -4004,10 +4070,15 @@ static int macb_probe(struct platform_device *pdev)
}
}
- err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
+ err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
if (err)
return err;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
native_io = hw_is_native_io(mem);
macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
@@ -4041,6 +4112,7 @@ static int macb_probe(struct platform_device *pdev)
bp->hclk = hclk;
bp->tx_clk = tx_clk;
bp->rx_clk = rx_clk;
+ bp->tsu_clk = tsu_clk;
if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
@@ -4087,6 +4159,10 @@ static int macb_probe(struct platform_device *pdev)
macb_dma_desc_get_size(bp);
}
+ bp->rx_intr_mask = MACB_RX_INT_FLAGS;
+ if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
+ bp->rx_intr_mask |= MACB_BIT(RXUBR);
+
mac = of_get_mac_address(np);
if (mac) {
ether_addr_copy(bp->dev->dev_addr, mac);
@@ -4138,6 +4214,9 @@ static int macb_probe(struct platform_device *pdev)
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+
return 0;
err_out_unregister_mdio:
@@ -4156,6 +4235,10 @@ err_disable_clocks:
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
clk_disable_unprepare(rx_clk);
+ clk_disable_unprepare(tsu_clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return err;
}
@@ -4179,10 +4262,16 @@ static int macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (!pm_runtime_suspended(&pdev->dev)) {
+ clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
+ clk_disable_unprepare(bp->rx_clk);
+ clk_disable_unprepare(bp->tsu_clk);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
of_node_put(bp->phy_node);
free_netdev(dev);
}
@@ -4194,21 +4283,36 @@ static int __maybe_unused macb_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
+ struct macb_queue *queue = bp->queues;
+ unsigned long flags;
+ unsigned int q;
+
+ if (!netif_running(netdev))
+ return 0;
- netif_carrier_off(netdev);
- netif_device_detach(netdev);
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG));
enable_irq_wake(bp->queues[0].irq);
+ netif_device_detach(netdev);
} else {
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
+ netif_device_detach(netdev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_disable(&queue->napi);
+ phy_stop(netdev->phydev);
+ phy_suspend(netdev->phydev);
+ spin_lock_irqsave(&bp->lock, flags);
+ macb_reset_hw(bp);
+ spin_unlock_irqrestore(&bp->lock, flags);
}
+ netif_carrier_off(netdev);
+ if (bp->ptp_info)
+ bp->ptp_info->ptp_remove(netdev);
+ pm_runtime_force_suspend(dev);
+
return 0;
}
@@ -4216,24 +4320,76 @@ static int __maybe_unused macb_resume(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
+ struct macb_queue *queue = bp->queues;
+ unsigned int q;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IDR, MACB_BIT(WOL));
macb_writel(bp, WOL, 0);
disable_irq_wake(bp->queues[0].irq);
} else {
+ macb_writel(bp, NCR, MACB_BIT(MPE));
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_enable(&queue->napi);
+ phy_resume(netdev->phydev);
+ phy_init_hw(netdev->phydev);
+ phy_start(netdev->phydev);
+ }
+
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_hw(bp);
+ macb_set_rx_mode(netdev);
+ netif_device_attach(netdev);
+ if (bp->ptp_info)
+ bp->ptp_info->ptp_init(netdev);
+
+ return 0;
+}
+
+static int __maybe_unused macb_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+
+ if (!(device_may_wakeup(&bp->dev->dev))) {
+ clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
+ clk_disable_unprepare(bp->rx_clk);
+ }
+ clk_disable_unprepare(bp->tsu_clk);
+
+ return 0;
+}
+
+static int __maybe_unused macb_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+
+ if (!(device_may_wakeup(&bp->dev->dev))) {
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
}
-
- netif_device_attach(netdev);
+ clk_prepare_enable(bp->tsu_clk);
return 0;
}
-static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+static const struct dev_pm_ops macb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
+ SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
+};
static struct platform_driver macb_driver = {
.probe = macb_probe,
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 5f03199a3acf..6650e2a5f171 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -54,7 +54,6 @@ config CAVIUM_PTP
tristate "Cavium PTP coprocessor as PTP clock"
depends on 64BIT && PCI
imply PTP_1588_CLOCK
- default y
---help---
This driver adds support for the Precision Time Protocol Clocks and
Timestamping coprocessor (PTP) found on Cavium processors.
@@ -65,7 +64,6 @@ config CAVIUM_PTP
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT && PCI
- depends on MAY_USE_DEVLINK
depends on PCI
imply PTP_1588_CLOCK
select FW_LOADER
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9f4f3c1d5043..43d11c38b38a 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1450,7 +1450,7 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
mbox_cmd.fn = NULL;
- mbox_cmd.fn_arg = 0;
+ mbox_cmd.fn_arg = NULL;
ether_addr_copy(mbox_cmd.msg.s.params, mac);
mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
octeon_mbox_write(oct, &mbox_cmd);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 825a28e5b544..e21bf3724611 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -661,7 +661,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
(((rh->r_dh.encap_on) &&
(rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
(!(rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
+ ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
+ CNNIC_CSUM_VERIFIED))))
/* checksum has already been verified */
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 3d24133e5e49..9b7819fdc9de 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -21,7 +21,6 @@
#include <linux/firmware.h>
#include <net/vxlan.h>
#include <linux/kthread.h>
-#include <net/switchdev.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -2908,7 +2907,7 @@ static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.param2 = enable;
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3184,7 +3183,8 @@ static const struct devlink_ops liquidio_devlink_ops = {
};
static int
-lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+liquidio_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct lio *lio = GET_LIO(dev);
struct octeon_device *oct = lio->oct_dev;
@@ -3192,24 +3192,12 @@ lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return -EOPNOTSUPP;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id,
- (void *)&lio->linfo.hw_addr + 2);
- break;
-
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = ETH_ALEN;
+ ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
return 0;
}
-static const struct switchdev_ops lio_pf_switchdev_ops = {
- .switchdev_port_attr_get = lio_pf_switchdev_attr_get,
-};
-
static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
struct ifla_vf_stats *vf_stats)
{
@@ -3259,6 +3247,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_trust = liquidio_set_vf_trust,
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
.ndo_get_vf_stats = liquidio_get_vf_stats,
+ .ndo_get_port_parent_id = liquidio_get_port_parent_id,
};
/** \brief Entry point for the liquidio module
@@ -3534,7 +3523,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
* netdev tasks.
*/
netdev->netdev_ops = &lionetdevops;
- SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
if (retval) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index de61060721c4..f3f2e71431ac 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -25,7 +25,6 @@
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
-#include <net/switchdev.h>
#include "lio_vf_rep.h"
static int lio_vf_rep_open(struct net_device *ndev);
@@ -38,6 +37,8 @@ static int lio_vf_rep_phys_port_name(struct net_device *dev,
static void lio_vf_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64);
static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
+static int lio_vf_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
static const struct net_device_ops lio_vf_rep_ndev_ops = {
.ndo_open = lio_vf_rep_open,
@@ -47,6 +48,7 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = {
.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
.ndo_get_stats64 = lio_vf_rep_get_stats64,
.ndo_change_mtu = lio_vf_rep_change_mtu,
+ .ndo_get_port_parent_id = lio_vf_get_port_parent_id,
};
static int
@@ -443,31 +445,19 @@ xmit_failed:
return NETDEV_TX_OK;
}
-static int
-lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int lio_vf_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
struct net_device *parent_ndev = vf_rep->parent_ndev;
struct lio *lio = GET_LIO(parent_ndev);
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id,
- (void *)&lio->linfo.hw_addr + 2);
- break;
-
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = ETH_ALEN;
+ ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
return 0;
}
-static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
- .switchdev_port_attr_get = lio_vf_rep_attr_get,
-};
-
static void
lio_vf_rep_fetch_stats(struct work_struct *work)
{
@@ -524,7 +514,6 @@ lio_vf_rep_create(struct octeon_device *oct)
ndev->min_mtu = LIO_MIN_MTU_SIZE;
ndev->max_mtu = LIO_MAX_MTU_SIZE;
ndev->netdev_ops = &lio_vf_rep_ndev_ops;
- SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
vf_rep = netdev_priv(ndev);
memset(vf_rep, 0, sizeof(*vf_rep));
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
};
struct nicvf_work {
- struct delayed_work work;
+ struct work_struct work;
u8 mode;
struct xcast_addr_list *mc;
};
@@ -327,7 +327,11 @@ struct nicvf {
struct nicvf_work rx_mode_work;
/* spinlock to protect workqueue arguments from concurrent access */
spinlock_t rx_mode_wq_lock;
-
+ /* workqueue for handling kernel ndo_set_rx_mode() calls */
+ struct workqueue_struct *nicvf_rx_mode_wq;
+ /* mutex to protect VF's mailbox contents from concurrent access */
+ struct mutex rx_mode_mtx;
+ struct delayed_work link_change_work;
/* PTP timestamp */
struct cavium_ptp *ptp_clock;
/* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
struct xcast {
u8 msg;
- union {
- u8 mode;
- u64 mac;
- } data;
+ u8 mode;
+ u64 mac:48;
};
/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
u8 *vf_lmac_map;
- struct delayed_work dwork;
- struct workqueue_struct *check_link;
- u8 *link;
- u8 *duplex;
- u32 *speed;
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
- bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
}
+/* Get BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_link_status_get(struct nicpf *nic, u8 vf)
+{
+ union nic_mbx mbx = {};
+ struct bgx_link_status link;
+ u8 bgx, lmac;
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ mbx.link_status.mac_type = link.mac_type;
+
+ /* reply with link status */
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
int i;
int ret = 0;
- nic->mbx_lock[vf] = true;
-
mbx_addr = nic_get_mbx_addr(vf);
mbx_data = (u64 *)&mbx;
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- if (vf < nic->num_vf_en) {
- nic->link[vf] = 0;
- nic->duplex[vf] = 0;
- nic->speed[vf] = 0;
- }
- goto unlock;
+ return;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_RSS_SIZE:
nic_send_rss_size(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_RSS_CFG:
case NIC_MBOX_MSG_RSS_CFG_CONT:
nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
nic_enable_vf(nic, vf, true);
- goto unlock;
+ break;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_NICVF_PTR:
nic->nicvf[vf] = mbx.nicvf.nicvf;
break;
case NIC_MBOX_MSG_PNICVF_PTR:
nic_send_pnicvf(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_SNICVF_PTR:
nic_send_snicvf(nic, &mbx.nicvf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_BGX_STATS:
nic_get_bgx_stats(nic, &mbx.bgx_stats);
- goto unlock;
+ return;
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_PTP_CFG:
nic_config_timestamp(nic, vf, &mbx.ptp);
break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
- mbx.xcast.data.mac,
+ mbx.xcast.mac,
vf < NIC_VF_PER_MBX_REG ? vf :
vf - NIC_VF_PER_MBX_REG);
break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
}
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode);
+ bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ nic_link_status_get(nic, vf);
+ return;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
mbx.msg.msg, vf);
nic_mbx_send_nack(nic, vf);
}
-unlock:
- nic->mbx_lock[vf] = false;
}
static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
return 0;
}
-/* Poll for BGX LMAC link status and update corresponding VF
- * if there is a change, valid only if internal L2 switch
- * is not present otherwise VF link is always treated as up
- */
-static void nic_poll_for_link(struct work_struct *work)
-{
- union nic_mbx mbx = {};
- struct nicpf *nic;
- struct bgx_link_status link;
- u8 vf, bgx, lmac;
-
- nic = container_of(work, struct nicpf, dwork.work);
-
- mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
-
- for (vf = 0; vf < nic->num_vf_en; vf++) {
- /* Poll only if VF is UP */
- if (!nic->vf_enabled[vf])
- continue;
-
- /* Get BGX, LMAC indices for the VF */
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- /* Get interface link status */
- bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
-
- /* Inform VF only if link status changed */
- if (nic->link[vf] == link.link_up)
- continue;
-
- if (!nic->mbx_lock[vf]) {
- nic->link[vf] = link.link_up;
- nic->duplex[vf] = link.duplex;
- nic->speed[vf] = link.speed;
-
- /* Send a mbox message to VF with current link status */
- mbx.link_status.link_up = link.link_up;
- mbx.link_status.duplex = link.duplex;
- mbx.link_status.speed = link.speed;
- mbx.link_status.mac_type = link.mac_type;
- nic_send_msg_to_vf(nic, vf, &mbx);
- }
- }
- queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
-}
-
static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!nic->vf_lmac_map)
goto err_release_regions;
- nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->link)
- goto err_release_regions;
-
- nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->duplex)
- goto err_release_regions;
-
- nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
- if (!nic->speed)
- goto err_release_regions;
-
/* Initialize hardware */
nic_init_hw(nic);
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_unregister_interrupts;
- /* Register a physical link status poll fn() */
- nic->check_link = alloc_workqueue("check_link_status",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!nic->check_link) {
- err = -ENOMEM;
- goto err_disable_sriov;
- }
-
- INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
- queue_delayed_work(nic->check_link, &nic->dwork, 0);
-
return 0;
-err_disable_sriov:
- if (nic->flags & NIC_SRIOV_ENABLED)
- pci_disable_sriov(pdev);
err_unregister_interrupts:
nic_unregister_interrupts(nic);
err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
if (nic->flags & NIC_SRIOV_ENABLED)
pci_disable_sriov(pdev);
- if (nic->check_link) {
- /* Destroy work Queue */
- cancel_delayed_work_sync(&nic->dwork);
- destroy_workqueue(nic->check_link);
- }
-
nic_unregister_interrupts(nic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
-/* workqueue for handling kernel ndo_set_rx_mode() calls */
-static struct workqueue_struct *nicvf_rx_mode_wq;
-
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
int timeout = NIC_MBOX_MSG_TIMEOUT;
int sleep = 10;
+ int ret = 0;
+
+ mutex_lock(&nic->rx_mode_mtx);
nic->pf_acked = false;
nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
netdev_err(nic->netdev,
"PF NACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
msleep(sleep);
if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
netdev_err(nic->netdev,
"PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
- return -EBUSY;
+ ret = -EBUSY;
+ break;
}
}
- return 0;
+ mutex_unlock(&nic->rx_mode_mtx);
+ return ret;
}
/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
return 1;
}
+static void nicvf_send_cfg_done(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to CFG DONE msg\n");
+ }
+}
+
static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
{
if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = true;
- nic->link_up = mbx.link_status.link_up;
- nic->duplex = mbx.link_status.duplex;
- nic->speed = mbx.link_status.speed;
- nic->mac_type = mbx.link_status.mac_type;
- if (nic->link_up) {
- netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
- nic->speed,
- nic->duplex == DUPLEX_FULL ?
- "Full" : "Half");
- netif_carrier_on(nic->netdev);
- netif_tx_start_all_queues(nic->netdev);
- } else {
- netdev_info(nic->netdev, "Link is Down\n");
- netif_carrier_off(nic->netdev);
- netif_tx_stop_all_queues(nic->netdev);
+ if (nic->link_up != mbx.link_status.link_up) {
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ nic->mac_type = mbx.link_status.mac_type;
+ if (nic->link_up) {
+ netdev_info(nic->netdev,
+ "Link is Up %d Mbps %s duplex\n",
+ nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
+ netif_carrier_on(nic->netdev);
+ netif_tx_start_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "Link is Down\n");
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
}
break;
case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
+ cancel_delayed_work_sync(&nic->link_change_work);
+
+ /* wait till all queued set_rx_mode tasks completes */
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
return nicvf_send_msg_to_pf(nic, &mbx);
}
+static void nicvf_link_status_check_task(struct work_struct *work_arg)
+{
+ struct nicvf *nic = container_of(work_arg,
+ struct nicvf,
+ link_change_work.work);
+ union nic_mbx mbx = {};
+ mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 2 * HZ);
+}
+
int nicvf_open(struct net_device *netdev)
{
int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
- union nic_mbx mbx = {};
+
+ /* wait till all queued set_rx_mode tasks completes if any */
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
/* Send VF config done msg to PF */
- mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
- nicvf_write_to_mbx(nic, &mbx);
+ nicvf_send_cfg_done(nic);
+
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
return 0;
cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
/* flush DMAC filters and reset RX mode */
mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
- nicvf_send_msg_to_pf(nic, &mbx);
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
if (mode & BGX_XCAST_MCAST_FILTER) {
/* once enabling filtering, we need to signal to PF to add
* its' own LMAC to the filter to accept packets for it.
*/
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = 0;
- nicvf_send_msg_to_pf(nic, &mbx);
+ mbx.xcast.mac = 0;
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
}
/* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
/* now go through kernel list of MACs and add them one by one */
for (idx = 0; idx < mc_addrs->count; idx++) {
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = mc_addrs->mc[idx];
- nicvf_send_msg_to_pf(nic, &mbx);
+ mbx.xcast.mac = mc_addrs->mc[idx];
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
}
- kfree(mc_addrs);
}
/* and finally set rx mode for PF accordingly */
mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
- mbx.xcast.data.mode = mode;
+ mbx.xcast.mode = mode;
nicvf_send_msg_to_pf(nic, &mbx);
+free_mc:
+ kfree(mc_addrs);
}
static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
{
struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
- work.work);
+ work);
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
u8 mode;
struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
kfree(nic->rx_mode_work.mc);
nic->rx_mode_work.mc = mc_list;
nic->rx_mode_work.mode = mode;
- queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
+ queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
spin_unlock(&nic->rx_mode_wq_lock);
}
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&nic->reset_task, nicvf_reset_task);
- INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
+ nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
+ WQ_MEM_RECLAIM,
+ nic->vf_id);
+ INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
spin_lock_init(&nic->rx_mode_wq_lock);
+ mutex_init(&nic->rx_mode_mtx);
err = register_netdev(netdev);
if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
nic = netdev_priv(netdev);
pnetdev = nic->pnicvf->netdev;
- cancel_delayed_work_sync(&nic->rx_mode_work.work);
-
/* Check if this Qset is assigned to different VF.
* If yes, clean primary and all secondary Qsets.
*/
if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
unregister_netdev(pnetdev);
+ if (nic->nicvf_rx_mode_wq) {
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
+ nic->nicvf_rx_mode_wq = NULL;
+ }
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
static int __init nicvf_init_module(void)
{
pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
- nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
- WQ_MEM_RECLAIM);
return pci_register_driver(&nicvf_driver);
}
static void __exit nicvf_cleanup_module(void)
{
- if (nicvf_rx_mode_wq) {
- destroy_workqueue(nicvf_rx_mode_wq);
- nicvf_rx_mode_wq = NULL;
- }
pci_unregister_driver(&nicvf_driver);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index fcaf18fa3904..5b4d3badcb73 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
dmem->q_len = q_len;
dmem->size = (desc_size * q_len) + align_bytes;
/* Save address, need it while freeing */
- dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
&dmem->dma, GFP_KERNEL);
if (!dmem->unalign_base)
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
/* Disable MAC steering (NCSI traffic) */
for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
- bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
}
static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
#define RX_DMAC_COUNT 32
-#define BGX_CMR_RX_STREERING 0x300
+#define BGX_CMR_RX_STEERING 0x300
#define RX_TRAFFIC_STEER_RULE_COUNT 8
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 30de26ef3da4..47b5c8e2104b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -585,8 +585,7 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
sizeof(struct cpl_rx_data) +
sge->freelQ[!sge->jumbo_fl].dma_offset;
- size = (16 * 1024) -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 20b6e1b3f5e3..89db739b7819 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
{
size_t len = nelem * elem_size;
void *s = NULL;
- void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+ void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
@@ -2381,7 +2381,7 @@ no_mem:
lro_add_page(adap, qs, fl,
G_RSPD_LEN(len),
flags & F_RSPD_EOP);
- goto next_fl;
+ goto next_fl;
}
skb = get_packet_pg(adap, fl, q,
@@ -3214,11 +3214,13 @@ void t3_start_sge_timers(struct adapter *adap)
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *q = &adap->sge.qs[i];
- if (q->tx_reclaim_timer.function)
- mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+ if (q->tx_reclaim_timer.function)
+ mod_timer(&q->tx_reclaim_timer,
+ jiffies + TX_RECLAIM_PERIOD);
- if (q->rx_reclaim_timer.function)
- mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+ if (q->rx_reclaim_timer.function)
+ mod_timer(&q->rx_reclaim_timer,
+ jiffies + RX_RECLAIM_PERIOD);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 080918af773c..0a9f2c596624 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1082,7 +1082,7 @@ int t3_check_fw_version(struct adapter *adapter)
CH_WARN(adapter, "found newer FW version(%u.%u), "
"driver compiled for version %u.%u\n", major, minor,
FW_VERSION_MAJOR, FW_VERSION_MINOR);
- return 0;
+ return 0;
}
return -EINVAL;
}
@@ -3619,7 +3619,7 @@ int t3_reset_adapter(struct adapter *adapter)
static int init_parity(struct adapter *adap)
{
- int i, err, addr;
+ int i, err, addr;
if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
return -EBUSY;
@@ -3806,6 +3806,6 @@ int t3_replay_prep_adapter(struct adapter *adapter)
p->phy.ops->power_down(&p->phy, 1);
}
-return 0;
+ return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 5701272aa7f7..ce28820c57c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -289,8 +289,7 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
return NULL;
- ctbl = kvzalloc(sizeof(*ctbl) +
- clipt_size*sizeof(struct list_head), GFP_KERNEL);
+ ctbl = kvzalloc(struct_size(ctbl, hash_list, clipt_size), GFP_KERNEL);
if (!ctbl)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 127b1f624413..7c5bfc931128 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -81,7 +81,7 @@ static int is_fw_attached(struct cudbg_init *pdbg_init)
{
struct adapter *padap = pdbg_init->adap;
- if (!(padap->flags & FW_OK) || padap->use_bd)
+ if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd)
return 0;
return 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2d1ca920601e..956219c178e1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -568,7 +568,7 @@ struct sge_rspq;
struct port_info {
struct adapter *adapter;
u16 viid;
- s16 xact_addr_filt; /* index of exact MAC address filter */
+ int xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
s8 mdio_addr;
enum fw_port_type port_type;
@@ -606,17 +606,18 @@ struct dentry;
struct work_struct;
enum { /* adapter flags */
- FULL_INIT_DONE = (1 << 0),
- DEV_ENABLED = (1 << 1),
- USING_MSI = (1 << 2),
- USING_MSIX = (1 << 3),
- FW_OK = (1 << 4),
- RSS_TNLALLLOOKUP = (1 << 5),
- USING_SOFT_PARAMS = (1 << 6),
- MASTER_PF = (1 << 7),
- FW_OFLD_CONN = (1 << 9),
- ROOT_NO_RELAXED_ORDERING = (1 << 10),
- SHUTTING_DOWN = (1 << 11),
+ CXGB4_FULL_INIT_DONE = (1 << 0),
+ CXGB4_DEV_ENABLED = (1 << 1),
+ CXGB4_USING_MSI = (1 << 2),
+ CXGB4_USING_MSIX = (1 << 3),
+ CXGB4_FW_OK = (1 << 4),
+ CXGB4_RSS_TNLALLLOOKUP = (1 << 5),
+ CXGB4_USING_SOFT_PARAMS = (1 << 6),
+ CXGB4_MASTER_PF = (1 << 7),
+ CXGB4_FW_OFLD_CONN = (1 << 9),
+ CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10),
+ CXGB4_SHUTTING_DOWN = (1 << 11),
+ CXGB4_SGE_DBQ_TIMER = (1 << 12),
};
enum {
@@ -756,6 +757,8 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
#ifdef CONFIG_CHELSIO_T4_DCB
u8 dcb_prio; /* DCB Priority bound to queue */
#endif
+ u8 dbqt; /* SGE Doorbell Queue Timer in use */
+ unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */
unsigned long tso; /* # of TSO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
@@ -816,6 +819,8 @@ struct sge {
u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
+ u16 dbqtimer_tick;
+ u16 dbqtimer_val[SGE_NDBQTIMERS];
u32 fl_pg_order; /* large page allocation size */
u32 stat_len; /* length of status page at ring end */
u32 pktshift; /* padding between CPL & packet data */
@@ -860,6 +865,7 @@ struct doorbell_stats {
struct hash_mac_addr {
struct list_head list;
u8 addr[ETH_ALEN];
+ unsigned int iface_mac;
};
struct uld_msix_bmap {
@@ -879,6 +885,7 @@ struct vf_info {
unsigned int tx_rate;
bool pf_set_mac;
u16 vlan;
+ int link_state;
};
enum {
@@ -1401,7 +1408,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
rspq_flush_handler_t flush_handler, int cong);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
- unsigned int iqid);
+ unsigned int iqid, u8 dbqt);
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid);
@@ -1414,6 +1421,8 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
+int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q,
+ int maxreclaim);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
@@ -1820,6 +1829,8 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
+int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
+ u16 *dbqtimers);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_update_port_info(struct port_info *pi);
int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index b0ff9fa183f4..3130b43bba52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3143,7 +3143,7 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
- } else if (adap->flags & FW_OFLD_CONN) {
+ } else if (adap->flags & CXGB4_FW_OFLD_CONN) {
seq_printf(seq, "TID range: %u..%u/%u..%u",
t->aftid_base,
t->aftid_end,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index d07230c892a5..bec4711005cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -446,8 +446,10 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
- __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
- link_mode_mask)
+ do { \
+ __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
+ link_mode_mask); \
+ } while (0)
#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
do { \
@@ -541,7 +543,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
case FW_PORT_TYPE_CR4_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
- FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
@@ -552,6 +554,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
break;
}
+ if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
+ FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
+ FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
+ } else {
+ SET_LMM(FEC_NONE);
+ }
+
FW_CAPS_TO_LMM(ANEG, Autoneg);
FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
@@ -679,18 +688,15 @@ static int set_link_ksettings(struct net_device *dev,
base->autoneg == AUTONEG_DISABLE) {
fw_caps = speed_to_fw_caps(base->speed);
- /* Must only specify a single speed which must be supported
- * as part of the Physical Port Capabilities.
- */
- if ((fw_caps & (fw_caps - 1)) != 0 ||
- !(lc->pcaps & fw_caps))
+ /* Speed must be supported by Physical Port Capabilities. */
+ if (!(lc->pcaps & fw_caps))
return -EINVAL;
lc->speed_caps = fw_caps;
lc->acaps = fw_caps;
} else {
fw_caps =
- lmm_to_fw_caps(link_ksettings->link_modes.advertising);
+ lmm_to_fw_caps(link_ksettings->link_modes.advertising);
if (!(lc->pcaps & fw_caps))
return -EINVAL;
lc->speed_caps = 0;
@@ -869,7 +875,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL;
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
return -EBUSY;
for (i = 0; i < pi->nqsets; ++i) {
@@ -926,11 +932,190 @@ static int get_adaptive_rx_setting(struct net_device *dev)
return q->rspq.adaptive_rx;
}
-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+/* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
+ * Ethernet TX Queues.
+ */
+static int get_dbqtimer_tick(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ return adap->sge.dbqtimer_tick;
+}
+
+/* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
+ * associated with a Network Device.
+ */
+static int get_dbqtimer(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_txq *txq;
+
+ txq = &adap->sge.ethtxq[pi->first_qset];
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* all of the TX Queues use the same Timer Index */
+ return adap->sge.dbqtimer_val[txq->dbqtimerix];
+}
+
+/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
+ * Queues. This is the fundamental "Tick" that sets the scale of values which
+ * can be used. Individual Ethernet TX Queues index into a relatively small
+ * array of Tick Multipliers. Changing the base Tick will thus change all of
+ * the resulting Timer Values associated with those multipliers for all
+ * Ethernet TX Queues.
+ */
+static int set_dbqtimer_tick(struct net_device *dev, int usecs)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ u32 param, val;
+ int ret;
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* return early if it's the same Timer Tick we're already using */
+ if (s->dbqtimer_tick == usecs)
+ return 0;
+
+ /* attempt to set the new Timer Tick value */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
+ val = usecs;
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+ if (ret)
+ return ret;
+ s->dbqtimer_tick = usecs;
+
+ /* if successful, reread resulting dependent Timer values */
+ ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
+ s->dbqtimer_val);
+ return ret;
+}
+
+/* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
+ * associated with a Network Device. There is a relatively small array of
+ * possible Timer Values so we need to pick the closest value available.
+ */
+static int set_dbqtimer(struct net_device *dev, int usecs)
+{
+ int qix, timerix, min_timerix, delta, min_delta;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ struct sge_eth_txq *txq;
+ u32 param, val;
+ int ret;
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* Find the SGE Doorbell Timer Value that's closest to the requested
+ * value.
+ */
+ min_delta = INT_MAX;
+ min_timerix = 0;
+ for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
+ delta = s->dbqtimer_val[timerix] - usecs;
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ min_timerix = timerix;
+ }
+ }
+
+ /* Return early if it's the same Timer Index we're already using.
+ * We use the same Timer Index for all of the TX Queues for an
+ * interface so it's only necessary to check the first one.
+ */
+ txq = &s->ethtxq[pi->first_qset];
+ if (txq->dbqtimerix == min_timerix)
+ return 0;
+
+ for (qix = 0; qix < pi->nqsets; qix++, txq++) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
+ param =
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
+ FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
+ val = min_timerix;
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
+ 1, &param, &val);
+ if (ret)
+ return ret;
+ }
+ txq->dbqtimerix = min_timerix;
+ }
+ return 0;
+}
+
+/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
+ * Queues and the Timer Value for the Ethernet TX Queues associated with a
+ * Network Device. Since changing the global Tick changes all of the
+ * available Timer Values, we need to do this first before selecting the
+ * resulting closest Timer Value. Moreover, since the Tick is global,
+ * changing it affects the Timer Values for all Network Devices on the
+ * adapter. So, before changing the Tick, we grab all of the current Timer
+ * Values for other Network Devices on this Adapter and then attempt to select
+ * new Timer Values which are close to the old values ...
+ */
+static int set_dbqtimer_tickval(struct net_device *dev,
+ int tick_usecs, int timer_usecs)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int timer[MAX_NPORTS];
+ unsigned int port;
+ int ret;
+
+ /* Grab the other adapter Network Interface current timers and fill in
+ * the new one for this Network Interface.
+ */
+ for_each_port(adap, port)
+ if (port == pi->port_id)
+ timer[port] = timer_usecs;
+ else
+ timer[port] = get_dbqtimer(adap->port[port]);
+
+ /* Change the global Tick first ... */
+ ret = set_dbqtimer_tick(dev, tick_usecs);
+ if (ret)
+ return ret;
+
+ /* ... and then set all of the Network Interface Timer Values ... */
+ for_each_port(adap, port) {
+ ret = set_dbqtimer(adap->port[port], timer[port]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
{
- set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
- return set_rx_intr_params(dev, c->rx_coalesce_usecs,
- c->rx_max_coalesced_frames);
+ int ret;
+
+ set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
+
+ ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
+ coalesce->rx_max_coalesced_frames);
+ if (ret)
+ return ret;
+
+ return set_dbqtimer_tickval(dev,
+ coalesce->tx_coalesce_usecs_irq,
+ coalesce->tx_coalesce_usecs);
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -943,6 +1128,8 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
adap->sge.counter_val[rq->pktcnt_idx] : 0;
c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
+ c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
+ c->tx_coalesce_usecs = get_dbqtimer(dev);
return 0;
}
@@ -1076,7 +1263,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
* firmware image otherwise we'll try to do the entire job from the
* host ... and we always "force" the operation in this path.
*/
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
mbox = adap->mbox;
ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
@@ -1155,7 +1342,7 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
return 0;
/* Interface must be brought up atleast once */
- if (pi->adapter->flags & FULL_INIT_DONE) {
+ if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
index 6c8a62eefe51..33b2c0c45509 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -74,7 +74,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
if (is_t4(adap->params.chip))
return -EINVAL;
- if (!(adap->flags & FULL_INIT_DONE))
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
return -EINVAL;
dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 7dddb9e748b8..5afb43000049 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -524,7 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
return -ENOMEM;
fwr = __skb_put(skb, len);
- t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
+ t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1
: adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -1569,7 +1569,7 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id,
int ret;
/* If we are shutting down the adapter do not wait for completion */
- if (netdev2adap(dev)->flags & SHUTTING_DOWN)
+ if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
return __cxgb4_del_filter(dev, filter_id, fs, NULL);
init_completion(&ctx.completion);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6ba9099ca7fe..89179e316687 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -433,6 +433,60 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
}
/**
+ * cxgb4_change_mac - Update match filter for a MAC address.
+ * @pi: the port_info
+ * @viid: the VI id
+ * @tcam_idx: TCAM index of existing filter for old value of MAC address,
+ * or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an MPS filter and sets it to the new MAC address if
+ * @tcam_idx >= 0, or adds the MAC address to a new filter if
+ * @tcam_idx < 0. In the latter case the address is added persistently
+ * if @persist is %true.
+ * Addresses are programmed to hash region, if tcam runs out of entries.
+ *
+ */
+static int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
+ int *tcam_idx, const u8 *addr, bool persist,
+ u8 *smt_idx)
+{
+ struct adapter *adapter = pi->adapter;
+ struct hash_mac_addr *entry, *new_entry;
+ int ret;
+
+ ret = t4_change_mac(adapter, adapter->mbox, viid,
+ *tcam_idx, addr, persist, smt_idx);
+ /* We ran out of TCAM entries. try programming hash region. */
+ if (ret == -ENOMEM) {
+ /* If the MAC address to be updated is in the hash addr
+ * list, update it from the list
+ */
+ list_for_each_entry(entry, &adapter->mac_hlist, list) {
+ if (entry->iface_mac) {
+ ether_addr_copy(entry->addr, addr);
+ goto set_hash;
+ }
+ }
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+ ether_addr_copy(new_entry->addr, addr);
+ new_entry->iface_mac = true;
+ list_add_tail(&new_entry->list, &adapter->mac_hlist);
+set_hash:
+ ret = cxgb4_set_addr_hash(pi);
+ } else if (ret >= 0) {
+ *tcam_idx = ret;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
* link_start - enable a port
* @dev: the port to enable
*
@@ -450,15 +504,9 @@ static int link_start(struct net_device *dev)
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
- if (ret == 0) {
- ret = t4_change_mac(pi->adapter, mb, pi->viid,
- pi->xact_addr_filt, dev->dev_addr, true,
- &pi->smt_idx);
- if (ret >= 0) {
- pi->xact_addr_filt = ret;
- ret = 0;
- }
- }
+ if (ret == 0)
+ ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt,
+ dev->dev_addr, true, &pi->smt_idx);
if (ret == 0)
ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
@@ -527,7 +575,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
- netif_tx_wake_queue(eq->txq);
+ t4_sge_eth_txq_egress_update(q->adap, eq, -1);
} else {
struct sge_uld_txq *oq;
@@ -603,12 +651,12 @@ out:
static void disable_msi(struct adapter *adapter)
{
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4_USING_MSIX;
+ } else if (adapter->flags & CXGB4_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4_USING_MSI;
}
}
@@ -624,7 +672,7 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
adap->swintr = 1;
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
}
- if (adap->flags & MASTER_PF)
+ if (adap->flags & CXGB4_MASTER_PF)
t4_slow_intr_handler(adap);
return IRQ_HANDLED;
}
@@ -789,9 +837,9 @@ static void quiesce_rx(struct adapter *adap)
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
- if (adap->flags & FULL_INIT_DONE) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
t4_intr_disable(adap);
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
free_msix_queue_irqs(adap);
free_irq(adap->msix_info[0].vec, adap);
} else {
@@ -832,7 +880,7 @@ static int setup_fw_sge_queues(struct adapter *adap)
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
@@ -885,10 +933,13 @@ static int setup_sge_queues(struct adapter *adap)
q->rspq.idx = j;
memset(&q->stats, 0, sizeof(q->stats));
}
- for (j = 0; j < pi->nqsets; j++, t++) {
+
+ q = &s->ethrxq[pi->first_qset];
+ for (j = 0; j < pi->nqsets; j++, t++, q++) {
err = t4_sge_alloc_eth_txq(adap, t, dev,
netdev_get_tx_queue(dev, j),
- s->fw_evtq.cntxt_id);
+ q->rspq.cntxt_id,
+ !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
if (err)
goto freeout;
}
@@ -910,7 +961,7 @@ static int setup_sge_queues(struct adapter *adap)
if (!is_t4(adap->params.chip)) {
err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
netdev_get_tx_queue(adap->port[0], 0)
- , s->fw_evtq.cntxt_id);
+ , s->fw_evtq.cntxt_id, false);
if (err)
goto freeout;
}
@@ -2229,7 +2280,7 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto freeq;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
@@ -2242,7 +2293,8 @@ static int cxgb_up(struct adapter *adap)
}
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
- (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ (adap->flags & CXGB4_USING_MSI) ? 0
+ : IRQF_SHARED,
adap->port[0]->name, adap);
if (err)
goto irq_err;
@@ -2251,7 +2303,7 @@ static int cxgb_up(struct adapter *adap)
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
- adap->flags |= FULL_INIT_DONE;
+ adap->flags |= CXGB4_FULL_INIT_DONE;
mutex_unlock(&uld_mutex);
notify_ulds(adap, CXGB4_STATE_UP);
@@ -2280,7 +2332,7 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
- adapter->flags &= ~FULL_INIT_DONE;
+ adapter->flags &= ~CXGB4_FULL_INIT_DONE;
}
/*
@@ -2294,7 +2346,7 @@ static int cxgb_open(struct net_device *dev)
netif_carrier_off(dev);
- if (!(adapter->flags & FULL_INIT_DONE)) {
+ if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
err = cxgb_up(adapter);
if (err < 0)
return err;
@@ -2689,6 +2741,7 @@ static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
ivi->min_tx_rate = 0;
ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
ivi->vlan = vfinfo->vlan;
+ ivi->linkstate = vfinfo->link_state;
return 0;
}
@@ -2828,6 +2881,49 @@ static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
return ret;
}
+
+static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
+ int link)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ u32 param, val;
+ int ret = 0;
+
+ if (vf >= adap->num_vfs)
+ return -EINVAL;
+
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ val = FW_VF_LINK_STATE_AUTO;
+ break;
+
+ case IFLA_VF_LINK_STATE_ENABLE:
+ val = FW_VF_LINK_STATE_ENABLE;
+ break;
+
+ case IFLA_VF_LINK_STATE_DISABLE:
+ val = FW_VF_LINK_STATE_DISABLE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
+ &param, &val);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Error %d in setting PF %d VF %d link state\n",
+ ret, adap->pf, vf);
+ return -EINVAL;
+ }
+
+ adap->vfinfo[vf].link_state = link;
+ return ret;
+}
#endif /* CONFIG_PCI_IOV */
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2839,9 +2935,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
- pi->xact_addr_filt, addr->sa_data, true,
- &pi->smt_idx);
+ ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt,
+ addr->sa_data, true, &pi->smt_idx);
if (ret < 0)
return ret;
@@ -2856,7 +2951,7 @@ static void cxgb_netpoll(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
int i;
struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
@@ -2883,7 +2978,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
if (index < 0 || index > pi->nqsets - 1)
return -EINVAL;
- if (!(adap->flags & FULL_INIT_DONE)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to rate limit on queue %d. Link Down?\n",
index);
@@ -2984,7 +3079,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- if (!(adap->flags & FULL_INIT_DONE)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to setup tc on port %d. Link Down?\n",
pi->port_id);
@@ -3244,12 +3339,13 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = cxgb4_mgmt_open,
- .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
- .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
- .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
- .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
- .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
+ .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
};
#endif
@@ -4115,7 +4211,7 @@ static int adap_init0(struct adapter *adap)
return ret;
}
if (ret == adap->mbox)
- adap->flags |= MASTER_PF;
+ adap->flags |= CXGB4_MASTER_PF;
/*
* If we're the Master PF Driver and the device is uninitialized,
@@ -4130,7 +4226,7 @@ static int adap_init0(struct adapter *adap)
/* If firmware is too old (not supported by driver) force an update. */
if (ret)
state = DEV_STATE_UNINIT;
- if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
+ if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
struct fw_info *fw_info;
struct fw_hdr *card_fw;
const struct firmware *fw;
@@ -4192,7 +4288,7 @@ static int adap_init0(struct adapter *adap)
ret);
dev_info(adap->pdev_dev, "Coming up as %s: "\
"Adapter already initialized\n",
- adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+ adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
@@ -4278,6 +4374,24 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
+ /* Grab the SGE Doorbell Queue Timer values. If successful, that
+ * indicates that the Firmware and Hardware support this.
+ */
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+
+ if (!ret) {
+ adap->sge.dbqtimer_tick = val[0];
+ ret = t4_read_sge_dbqtimers(adap,
+ ARRAY_SIZE(adap->sge.dbqtimer_val),
+ adap->sge.dbqtimer_val);
+ }
+
+ if (!ret)
+ adap->flags |= CXGB4_SGE_DBQ_TIMER;
+
if (is_bypass_device(adap->pdev->device))
adap->params.bypass = 1;
@@ -4400,7 +4514,7 @@ static int adap_init0(struct adapter *adap)
* offload connection through firmware work request
*/
if ((val[0] != val[1]) && (ret >= 0)) {
- adap->flags |= FW_OFLD_CONN;
+ adap->flags |= CXGB4_FW_OFLD_CONN;
adap->tids.aftid_base = val[0];
adap->tids.aftid_end = val[1];
}
@@ -4493,7 +4607,7 @@ static int adap_init0(struct adapter *adap)
* 2. Server filter: This are special filters which are used
* to redirect SYN packets to offload queue.
*/
- if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
+ if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
adap->tids.sftid_base = adap->tids.ftid_base +
DIV_ROUND_UP(adap->tids.nftids, 3);
adap->tids.nsftids = adap->tids.nftids -
@@ -4672,7 +4786,7 @@ static int adap_init0(struct adapter *adap)
adap->params.b_wnd);
}
t4_init_sge_params(adap);
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
t4_init_tp_params(adap, true);
return 0;
@@ -4707,7 +4821,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
goto out;
rtnl_lock();
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
spin_lock(&adap->stats_lock);
for_each_port(adap, i) {
@@ -4719,12 +4833,12 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
}
spin_unlock(&adap->stats_lock);
disable_interrupts(adap);
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
- if ((adap->flags & DEV_ENABLED)) {
+ if ((adap->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
- adap->flags &= ~DEV_ENABLED;
+ adap->flags &= ~CXGB4_DEV_ENABLED;
}
out: return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -4742,13 +4856,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
- if (!(adap->flags & DEV_ENABLED)) {
+ if (!(adap->flags & CXGB4_DEV_ENABLED)) {
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Cannot reenable PCI "
"device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- adap->flags |= DEV_ENABLED;
+ adap->flags |= CXGB4_DEV_ENABLED;
}
pci_set_master(pdev);
@@ -4759,7 +4873,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
if (adap_init1(adap, &c))
return PCI_ERS_RESULT_DISCONNECT;
@@ -4871,7 +4985,7 @@ static int cfg_queues(struct adapter *adap)
* at all is problematic ...
*/
niqflint = adap->params.pfres.niqflint - 1;
- if (!(adap->flags & USING_MSIX))
+ if (!(adap->flags & CXGB4_USING_MSIX))
niqflint--;
neq = adap->params.pfres.neq / 2;
avail_eth_qsets = min(niqflint, neq);
@@ -5153,8 +5267,8 @@ static void print_adapter_info(struct adapter *adapter)
/* Software/Hardware configuration */
dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
is_offload(adapter) ? "R" : "",
- ((adapter->flags & USING_MSIX) ? "MSI-X" :
- (adapter->flags & USING_MSI) ? "MSI" : ""),
+ ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
+ (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
is_offload(adapter) ? "Offload" : "non-Offload");
}
@@ -5229,13 +5343,13 @@ static void free_some_resources(struct adapter *adapter)
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
- if (adapter->flags & FW_OK)
+ if (adapter->flags & CXGB4_FW_OK)
t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
- NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
static int t4_get_chip_type(struct adapter *adap, int ver)
@@ -5533,7 +5647,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* PCI device has been enabled */
- adapter->flags |= DEV_ENABLED;
+ adapter->flags |= CXGB4_DEV_ENABLED;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5551,7 +5665,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* using Relaxed Ordering.
*/
if (!pcie_relaxed_ordering_enabled(pdev))
- adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+ adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
@@ -5642,7 +5756,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
@@ -5651,9 +5765,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM |
NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_HW_TLS_RECORD;
}
if (highdma)
@@ -5680,7 +5797,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
- if (adapter->flags & FW_OK) {
+ if (adapter->flags & CXGB4_FW_OK) {
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
@@ -5702,7 +5819,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- if (!(adapter->flags & FW_OK))
+ if (!(adapter->flags & CXGB4_FW_OK))
goto fw_attach_fail;
/* Configure queues and allocate tables now, they can be needed as
@@ -5796,9 +5913,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
- adapter->flags |= USING_MSIX;
+ adapter->flags |= CXGB4_USING_MSIX;
else if (msi > 0 && pci_enable_msi(pdev) == 0) {
- adapter->flags |= USING_MSI;
+ adapter->flags |= CXGB4_USING_MSI;
if (msi > 1)
free_msix_info(adapter);
}
@@ -5866,7 +5983,7 @@ fw_attach_fail:
cxgb4_ptp_init(adapter);
if (IS_REACHABLE(CONFIG_THERMAL) &&
- !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
+ !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
cxgb4_thermal_init(adapter);
print_adapter_info(adapter);
@@ -5875,7 +5992,7 @@ fw_attach_fail:
out_free_dev:
t4_free_sge_resources(adapter);
free_some_resources(adapter);
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4_USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
@@ -5908,7 +6025,7 @@ static void remove_one(struct pci_dev *pdev)
return;
}
- adapter->flags |= SHUTTING_DOWN;
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
int i;
@@ -5943,10 +6060,10 @@ static void remove_one(struct pci_dev *pdev)
*/
clear_all_filters(adapter);
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adapter);
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4_USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
@@ -5970,9 +6087,9 @@ static void remove_one(struct pci_dev *pdev)
#endif
iounmap(adapter->regs);
pci_disable_pcie_error_reporting(pdev);
- if ((adapter->flags & DEV_ENABLED)) {
+ if ((adapter->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
- adapter->flags &= ~DEV_ENABLED;
+ adapter->flags &= ~CXGB4_DEV_ENABLED;
}
pci_release_regions(pdev);
kfree(adapter->mbox_log);
@@ -5998,7 +6115,7 @@ static void shutdown_one(struct pci_dev *pdev)
return;
}
- adapter->flags |= SHUTTING_DOWN;
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
int i;
@@ -6016,7 +6133,7 @@ static void shutdown_one(struct pci_dev *pdev)
disable_msi(adapter);
t4_sge_stop(adapter);
- if (adapter->flags & FW_OK)
+ if (adapter->flags & CXGB4_FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 9f9d6cae39d5..58a039c3224a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter)
int err;
memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F |
- FW_PTP_CMD_PORTID_V(0));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.scmd.sc = FW_PTP_SC_INIT_TIMER;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index c116f96956fe..82a8d1970060 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -83,28 +83,23 @@ static void cxgb4_process_flow_match(struct net_device *dev,
struct tc_cls_flower_offload *cls,
struct ch_filter_specification *fs)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
u16 addr_type = 0;
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- cls->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->mask);
- u16 ethtype_key = ntohs(key->n_proto);
- u16 ethtype_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+ u16 ethtype_key, ethtype_mask;
+
+ flow_rule_match_basic(rule, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
if (ethtype_key == ETH_P_ALL) {
ethtype_key = 0;
@@ -116,115 +111,89 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->val.ethtype = ethtype_key;
fs->mask.ethtype = ethtype_mask;
- fs->val.proto = key->ip_proto;
- fs->mask.proto = mask->ip_proto;
+ fs->val.proto = match.key->ip_proto;
+ fs->mask.proto = match.mask->ip_proto;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- cls->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- cls->mask);
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
fs->type = 0;
- memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
- memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
- memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
- memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
+ memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
+ memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
+ memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
/* also initialize nat_lip/fip to same values */
- memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
- memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
-
+ memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
+ memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- cls->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- cls->mask);
+ struct flow_match_ipv6_addrs match;
+ flow_rule_match_ipv6_addrs(rule, &match);
fs->type = 1;
- memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
- memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
- memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
- memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
+ memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
+ sizeof(match.key->dst));
+ memcpy(&fs->val.fip[0], match.key->src.s6_addr,
+ sizeof(match.key->src));
+ memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
+ sizeof(match.mask->dst));
+ memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
+ sizeof(match.mask->src));
/* also initialize nat_lip/fip to same values */
- memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
- memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
+ memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
+ sizeof(match.key->dst));
+ memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
+ sizeof(match.key->src));
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- cls->mask);
- fs->val.lport = cpu_to_be16(key->dst);
- fs->mask.lport = cpu_to_be16(mask->dst);
- fs->val.fport = cpu_to_be16(key->src);
- fs->mask.fport = cpu_to_be16(mask->src);
+ flow_rule_match_ports(rule, &match);
+ fs->val.lport = cpu_to_be16(match.key->dst);
+ fs->mask.lport = cpu_to_be16(match.mask->dst);
+ fs->val.fport = cpu_to_be16(match.key->src);
+ fs->mask.fport = cpu_to_be16(match.mask->src);
/* also initialize nat_lport/fport to same values */
- fs->nat_lport = cpu_to_be16(key->dst);
- fs->nat_fport = cpu_to_be16(key->src);
+ fs->nat_lport = cpu_to_be16(match.key->dst);
+ fs->nat_fport = cpu_to_be16(match.key->src);
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *key, *mask;
-
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->mask);
- fs->val.tos = key->tos;
- fs->mask.tos = mask->tos;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ fs->val.tos = match.key->tos;
+ fs->mask.tos = match.mask->tos;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key, *mask;
-
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- cls->mask);
- fs->val.vni = be32_to_cpu(key->keyid);
- fs->mask.vni = be32_to_cpu(mask->keyid);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+
+ flow_rule_match_enc_keyid(rule, &match);
+ fs->val.vni = be32_to_cpu(match.key->keyid);
+ fs->mask.vni = be32_to_cpu(match.mask->keyid);
if (fs->mask.vni) {
fs->val.encap_vld = 1;
fs->mask.encap_vld = 1;
}
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
u16 vlan_tci, vlan_tci_mask;
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- cls->mask);
- vlan_tci = key->vlan_id | (key->vlan_priority <<
- VLAN_PRIO_SHIFT);
- vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
- VLAN_PRIO_SHIFT);
+ flow_rule_match_vlan(rule, &match);
+ vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT);
+ vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
+ VLAN_PRIO_SHIFT);
fs->val.ivlan = vlan_tci;
fs->mask.ivlan = vlan_tci_mask;
@@ -255,10 +224,12 @@ static void cxgb4_process_flow_match(struct net_device *dev,
static int cxgb4_validate_flow_match(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 ethtype_mask = 0;
u16 ethtype_key = 0;
- if (cls->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
@@ -268,36 +239,29 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(dev, "Unsupported key used: 0x%x\n",
- cls->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->mask);
- ethtype_key = ntohs(key->n_proto);
- ethtype_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
u16 eth_ip_type = ethtype_key & ethtype_mask;
- struct flow_dissector_key_ip *mask;
+ struct flow_match_ip match;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
netdev_err(dev, "IP Key supported only with IPv4/v6");
return -EINVAL;
}
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->mask);
- if (mask->ttl) {
+ flow_rule_match_ip(rule, &match);
+ if (match.mask->ttl) {
netdev_warn(dev, "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
@@ -328,7 +292,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
u32 mask, u32 offset, u8 htype)
{
switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
switch (offset) {
case PEDIT_ETH_DMAC_31_0:
fs->newdmac = 1;
@@ -346,7 +310,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
offload_pedit(fs, val, mask, ETH_SMAC_47_16);
}
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
@@ -356,7 +320,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
@@ -384,7 +348,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -397,7 +361,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -416,56 +380,63 @@ static void cxgb4_process_flow_actions(struct net_device *in,
struct tc_cls_flower_offload *cls,
struct ch_filter_specification *fs)
{
- const struct tc_action *a;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_action_entry *act;
int i;
- tcf_exts_for_each_action(i, a, cls->exts) {
- if (is_tcf_gact_ok(a)) {
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS;
- } else if (is_tcf_gact_shot(a)) {
+ break;
+ case FLOW_ACTION_DROP:
fs->action = FILTER_DROP;
- } else if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *out = tcf_mirred_dev(a);
+ break;
+ case FLOW_ACTION_REDIRECT: {
+ struct net_device *out = act->dev;
struct port_info *pi = netdev_priv(out);
fs->action = FILTER_SWITCH;
fs->eport = pi->port_id;
- } else if (is_tcf_vlan(a)) {
- u32 vlan_action = tcf_vlan_action(a);
- u8 prio = tcf_vlan_push_prio(a);
- u16 vid = tcf_vlan_push_vid(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u8 prio = act->vlan.prio;
+ u16 vid = act->vlan.vid;
u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
-
- switch (vlan_action) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
fs->newvlan |= VLAN_REMOVE;
break;
- case TCA_VLAN_ACT_PUSH:
+ case FLOW_ACTION_VLAN_PUSH:
fs->newvlan |= VLAN_INSERT;
fs->vlan = vlan_tci;
break;
- case TCA_VLAN_ACT_MODIFY:
+ case FLOW_ACTION_VLAN_MANGLE:
fs->newvlan |= VLAN_REWRITE;
fs->vlan = vlan_tci;
break;
default:
break;
}
- } else if (is_tcf_pedit(a)) {
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
u32 mask, val, offset;
- int nkeys, i;
u8 htype;
- nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- mask = tcf_pedit_mask(a, i);
- val = tcf_pedit_val(a, i);
- offset = tcf_pedit_offset(a, i);
+ htype = act->mangle.htype;
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
- process_pedit_field(fs, val, mask, offset,
- htype);
+ process_pedit_field(fs, val, mask, offset, htype);
}
+ break;
+ default:
+ break;
}
}
}
@@ -484,101 +455,89 @@ static bool valid_l4_mask(u32 mask)
}
static bool valid_pedit_action(struct net_device *dev,
- const struct tc_action *a)
+ const struct flow_action_entry *act)
{
u32 mask, offset;
- u8 cmd, htype;
- int nkeys, i;
-
- nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- cmd = tcf_pedit_cmd(a, i);
- mask = tcf_pedit_mask(a, i);
- offset = tcf_pedit_offset(a, i);
-
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
- netdev_err(dev, "%s: Unsupported pedit cmd\n",
+ u8 htype;
+
+ htype = act->mangle.htype;
+ mask = act->mangle.mask;
+ offset = act->mangle.offset;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ switch (offset) {
+ case PEDIT_ETH_DMAC_31_0:
+ case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
+ case PEDIT_ETH_SMAC_47_16:
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- switch (offset) {
- case PEDIT_ETH_DMAC_31_0:
- case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
- case PEDIT_ETH_SMAC_47_16:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- switch (offset) {
- case PEDIT_IP4_SRC:
- case PEDIT_IP4_DST:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (offset) {
+ case PEDIT_IP4_SRC:
+ case PEDIT_IP4_DST:
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- switch (offset) {
- case PEDIT_IP6_SRC_31_0:
- case PEDIT_IP6_SRC_63_32:
- case PEDIT_IP6_SRC_95_64:
- case PEDIT_IP6_SRC_127_96:
- case PEDIT_IP6_DST_31_0:
- case PEDIT_IP6_DST_63_32:
- case PEDIT_IP6_DST_95_64:
- case PEDIT_IP6_DST_127_96:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (offset) {
+ case PEDIT_IP6_SRC_31_0:
+ case PEDIT_IP6_SRC_63_32:
+ case PEDIT_IP6_SRC_95_64:
+ case PEDIT_IP6_SRC_127_96:
+ case PEDIT_IP6_DST_31_0:
+ case PEDIT_IP6_DST_63_32:
+ case PEDIT_IP6_DST_95_64:
+ case PEDIT_IP6_DST_127_96:
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- switch (offset) {
- case PEDIT_TCP_SPORT_DPORT:
- if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
- __func__);
- return false;
- }
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ switch (offset) {
+ case PEDIT_TCP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
__func__);
return false;
}
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- switch (offset) {
- case PEDIT_UDP_SPORT_DPORT:
- if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
- __func__);
- return false;
- }
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ switch (offset) {
+ case PEDIT_UDP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
__func__);
return false;
}
break;
default:
- netdev_err(dev, "%s: Unsupported pedit type\n",
+ netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
+ return false;
}
return true;
}
@@ -586,24 +545,26 @@ static bool valid_pedit_action(struct net_device *dev,
static int cxgb4_validate_flow_actions(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
- const struct tc_action *a;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
int i;
- tcf_exts_for_each_action(i, a, cls->exts) {
- if (is_tcf_gact_ok(a)) {
- /* Do nothing */
- } else if (is_tcf_gact_shot(a)) {
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ case FLOW_ACTION_DROP:
/* Do nothing */
- } else if (is_tcf_mirred_egress_redirect(a)) {
+ break;
+ case FLOW_ACTION_REDIRECT: {
struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
unsigned int i;
bool found = false;
- target_dev = tcf_mirred_dev(a);
+ target_dev = act->dev;
for_each_port(adap, i) {
n_dev = adap->port[i];
if (target_dev == n_dev) {
@@ -621,15 +582,18 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
return -EINVAL;
}
act_redir = true;
- } else if (is_tcf_vlan(a)) {
- u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
- u32 vlan_action = tcf_vlan_action(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u16 proto = be16_to_cpu(act->vlan.proto);
- switch (vlan_action) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
break;
- case TCA_VLAN_ACT_PUSH:
- case TCA_VLAN_ACT_MODIFY:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
if (proto != ETH_P_8021Q) {
netdev_err(dev, "%s: Unsupported vlan proto\n",
__func__);
@@ -642,13 +606,17 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
return -EOPNOTSUPP;
}
act_vlan = true;
- } else if (is_tcf_pedit(a)) {
- bool pedit_valid = valid_pedit_action(dev, a);
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
+ bool pedit_valid = valid_pedit_action(dev, act);
if (!pedit_valid)
return -EOPNOTSUPP;
act_pedit = true;
- } else {
+ }
+ break;
+ default:
netdev_err(dev, "%s: Unsupported action\n", __func__);
return -EOPNOTSUPP;
}
@@ -843,9 +811,9 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
if (ofld_stats->packet_count != packets) {
if (ofld_stats->prev_packet_count != packets)
ofld_stats->last_used = jiffies;
- tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
- packets - ofld_stats->packet_count,
- ofld_stats->last_used);
+ flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
+ packets - ofld_stats->packet_count,
+ ofld_stats->last_used);
ofld_stats->packet_count = packets;
ofld_stats->byte_count = bytes;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index c7d2b4dc7568..02fc63fa7f25 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -444,8 +444,7 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
if (!max_tids)
return NULL;
- t = kvzalloc(sizeof(*t) +
- (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
+ t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL);
if (!t)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 9a6065a3fa46..6c685b920713 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
unsigned long flags;
spin_lock_irqsave(&bmap->lock, flags);
- __clear_bit(msix_idx, bmap->msix_bmap);
+ __clear_bit(msix_idx, bmap->msix_bmap);
spin_unlock_irqrestore(&bmap->lock, flags);
}
@@ -147,7 +147,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
per_chan = rxq_info->nrxq / adap->params.nports;
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
msi_idx = 1;
else
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -195,7 +195,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
sizeof(unsigned short),
GFP_KERNEL);
@@ -206,7 +206,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
- if (adap->flags & FULL_INIT_DONE &&
+ if (adap->flags & CXGB4_FULL_INIT_DONE &&
!ret && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
unsigned int cmplqid;
@@ -239,7 +239,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
u32 param, cmdop, cmplqid = 0;
int i;
@@ -258,7 +258,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
kfree(rxq_info->msix_tbl);
}
@@ -273,7 +273,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!rxq_info)
return -ENOMEM;
- if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
i = s->nqs_per_uld;
rxq_info->nrxq = roundup(i, adap->params.nports);
} else {
@@ -284,7 +284,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!uld_info->ciq) {
rxq_info->nciq = 0;
} else {
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
rxq_info->nciq = min_t(int, s->nqs_per_uld,
num_online_cpus());
else
@@ -611,10 +611,10 @@ static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type
adap->uld[type].add = NULL;
release_sge_txq_uld(adap, type);
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type);
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp;
lld->ucq_density = 1 << adap->params.sge.iq_qpp;
+ lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
lld->filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
@@ -672,7 +673,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->sge_egrstatuspagesize = adap->sge.stat_len;
lld->sge_pktshift = adap->sge.pktshift;
lld->ulp_crypto = adap->params.crypto;
- lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
lld->max_ordird_qp = adap->params.max_ordird_qp;
lld->max_ird_adapter = adap->params.max_ird_adapter;
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
@@ -701,7 +702,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
adap->uld[uld].handle = handle;
t4_register_netevent_notifier();
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
}
@@ -736,13 +737,13 @@ void cxgb4_register_uld(enum cxgb4_uld type,
ret = setup_sge_queues_uld(adap, type, p->lro);
if (ret)
goto free_queues;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
}
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
if (adap->uld[type].add)
goto free_irq;
@@ -753,9 +754,9 @@ void cxgb4_register_uld(enum cxgb4_uld type,
uld_attach(adap, type);
continue;
free_irq:
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
free_sge_queues_uld(adap, type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 5fa9a2d5fc4b..21da34a4ca24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -336,6 +336,7 @@ struct cxgb4_lld_info {
unsigned int cclk_ps; /* Core clock period in psec */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
+ unsigned int sge_host_page_size; /* SGE host page size */
unsigned short filt_mode; /* filter optional components */
unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
/* scheduler queue */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 4852febbfec3..1a407d3c1d67 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -646,7 +646,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
if (l2t_size < L2T_MIN_HASH_BUCKETS)
return NULL;
- d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL);
+ d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL);
if (!d)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 52edb688942b..ba6c153ee45c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -478,7 +478,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
struct sched_table *s;
unsigned int i;
- s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL);
+ s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
if (!s)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b90188401d4a..88773ca58e6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -80,9 +80,10 @@
* Max number of Tx descriptors we clean up at a time. Should be modest as
* freeing skbs isn't cheap and it happens while holding locks. We just need
* to free packets faster than they arrive, we eventually catch up and keep
- * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
+ * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
+ * also match the CIDX Flush Threshold.
*/
-#define MAX_TX_RECLAIM 16
+#define MAX_TX_RECLAIM 32
/*
* Max number of Rx buffers we replenish at a time. Again keep this modest,
@@ -401,31 +402,52 @@ static inline int reclaimable(const struct sge_txq *q)
}
/**
- * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
+ * reclaim_completed_tx - reclaims completed TX Descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
+ * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
* @unmap: whether the buffers should be unmapped for DMA
*
- * Reclaims Tx descriptors that the SGE has indicated it has processed,
- * and frees the associated buffers if possible. Called with the Tx
- * queue locked.
+ * Reclaims Tx Descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. If @max == -1, then
+ * we'll use a defaiult maximum. Called with the TX Queue locked.
*/
-inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
- bool unmap)
+static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+ int maxreclaim, bool unmap)
{
- int avail = reclaimable(q);
+ int reclaim = reclaimable(q);
- if (avail) {
+ if (reclaim) {
/*
* Limit the amount of clean up work we do at a time to keep
* the Tx lock hold time O(1).
*/
- if (avail > MAX_TX_RECLAIM)
- avail = MAX_TX_RECLAIM;
+ if (maxreclaim < 0)
+ maxreclaim = MAX_TX_RECLAIM;
+ if (reclaim > maxreclaim)
+ reclaim = maxreclaim;
- free_tx_desc(adap, q, avail, unmap);
- q->in_use -= avail;
+ free_tx_desc(adap, q, reclaim, unmap);
+ q->in_use -= reclaim;
}
+
+ return reclaim;
+}
+
+/**
+ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adap: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue locked.
+ */
+void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+ bool unmap)
+{
+ (void)reclaim_completed_tx(adap, q, -1, unmap);
}
EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
@@ -694,7 +716,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
- void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL);
+ void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
if (!p)
return NULL;
@@ -1288,6 +1310,44 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
}
/**
+ * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
+ * @adap: the adapter
+ * @eq: the Ethernet TX Queue
+ * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
+ *
+ * We're typically called here to update the state of an Ethernet TX
+ * Queue with respect to the hardware's progress in consuming the TX
+ * Work Requests that we've put on that Egress Queue. This happens
+ * when we get Egress Queue Update messages and also prophylactically
+ * in regular timer-based Ethernet TX Queue maintenance.
+ */
+int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
+ int maxreclaim)
+{
+ struct sge_txq *q = &eq->q;
+ unsigned int reclaimed;
+
+ if (!q->in_use || !__netif_tx_trylock(eq->txq))
+ return 0;
+
+ /* Reclaim pending completed TX Descriptors. */
+ reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
+
+ /* If the TX Queue is currently stopped and there's now more than half
+ * the queue available, restart it. Otherwise bail out since the rest
+ * of what we want do here is with the possibility of shipping any
+ * currently buffered Coalesced TX Work Request.
+ */
+ if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
+ netif_tx_wake_queue(eq->txq);
+ eq->q.restarts++;
+ }
+
+ __netif_tx_unlock(eq->txq);
+ return reclaimed;
+}
+
+/**
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
@@ -1357,7 +1417,7 @@ out_free: dev_kfree_skb_any(skb);
}
skb_tx_timestamp(skb);
- cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
@@ -1400,8 +1460,25 @@ out_free: dev_kfree_skb_any(skb);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+
+ /* If we're using the SGE Doorbell Queue Timer facility, we
+ * don't need to ask the Firmware to send us Egress Queue CIDX
+ * Updates: the Hardware will do this automatically. And
+ * since we send the Ingress Queue CIDX Updates to the
+ * corresponding Ethernet Response Queue, we'll get them very
+ * quickly.
+ */
+ if (!q->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1671,7 +1748,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
/* Take this opportunity to reclaim any TX Descriptors whose DMA
* transfers have completed.
*/
- cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
+ reclaim_completed_tx(adapter, &txq->q, -1, true);
/* Calculate the number of flits and TX Descriptors we're going to
* need along with how many TX Descriptors will be left over after
@@ -1715,7 +1792,16 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+
+ /* If we're using the SGE Doorbell Queue Timer facility, we
+ * don't need to ask the Firmware to send us Egress Queue CIDX
+ * Updates: the Hardware will do this automatically. And
+ * since we send the Ingress Queue CIDX Updates to the
+ * corresponding Ethernet Response Queue, we'll get them very
+ * quickly.
+ */
+ if (!txq->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -2794,6 +2880,74 @@ static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
}
/**
+ * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
+ * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
+ * @rsp: Response Entry pointer into Response Queue
+ * @gl: Gather List pointer
+ *
+ * For adapters which support the SGE Doorbell Queue Timer facility,
+ * we configure the Ethernet TX Queues to send CIDX Updates to the
+ * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
+ * messages. This adds a small load to PCIe Link RX bandwidth and,
+ * potentially, higher CPU Interrupt load, but allows us to respond
+ * much more quickly to the CIDX Updates. This is important for
+ * Upper Layer Software which isn't willing to have a large amount
+ * of TX Data outstanding before receiving DMA Completions.
+ */
+static void t4_tx_completion_handler(struct sge_rspq *rspq,
+ const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+ struct port_info *pi = netdev_priv(rspq->netdev);
+ struct adapter *adapter = rspq->adap;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_txq *txq;
+
+ /* skip RSS header */
+ rsp++;
+
+ /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ if (unlikely(opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)rsp)->type ==
+ FW_TYPE_RSSCPL)) {
+ rsp++;
+ opcode = ((const struct rss_header *)rsp)->opcode;
+ rsp++;
+ }
+
+ if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
+ pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
+ __func__, opcode);
+ return;
+ }
+
+ txq = &s->ethtxq[pi->first_qset + rspq->idx];
+
+ /* We've got the Hardware Consumer Index Update in the Egress Update
+ * message. If we're using the SGE Doorbell Queue Timer mechanism,
+ * these Egress Update messages will be our sole CIDX Updates we get
+ * since we don't want to chew up PCIe bandwidth for both Ingress
+ * Messages and Status Page writes. However, The code which manages
+ * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
+ * stored in the Status Page at the end of the TX Queue. It's easiest
+ * to simply copy the CIDX Update value from the Egress Update message
+ * to the Status Page. Also note that no Endian issues need to be
+ * considered here since both are Big Endian and we're just copying
+ * bytes consistently ...
+ */
+ if (txq->dbqt) {
+ struct cpl_sge_egr_update *egr;
+
+ egr = (struct cpl_sge_egr_update *)rsp;
+ WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
+ }
+
+ t4_sge_eth_txq_egress_update(adapter, txq, -1);
+}
+
+/**
* t4_ethrx_handler - process an ingress ethernet packet
* @q: the response queue that received the packet
* @rsp: the response queue descriptor holding the RX_PKT message
@@ -2816,6 +2970,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct port_info *pi;
int ret = 0;
+ /* If we're looking at TX Queue CIDX Update, handle that separately
+ * and return.
+ */
+ if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
+ (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
+ t4_tx_completion_handler(q, rsp, si);
+ return 0;
+ }
+
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
@@ -3212,7 +3375,7 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie)
{
struct adapter *adap = cookie;
- if (adap->flags & MASTER_PF)
+ if (adap->flags & CXGB4_MASTER_PF)
t4_slow_intr_handler(adap);
process_intrq(adap);
return IRQ_HANDLED;
@@ -3228,7 +3391,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
struct adapter *adap = cookie;
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
- if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
+ if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
process_intrq(adap))
return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */
@@ -3243,9 +3406,9 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
*/
irq_handler_t t4_intr_handler(struct adapter *adap)
{
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
return t4_sge_intr_msix;
- if (adap->flags & USING_MSI)
+ if (adap->flags & CXGB4_USING_MSI)
return t4_intr_msi;
return t4_intr_intx;
}
@@ -3278,7 +3441,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
* global Master PF activities like checking for chip ingress stalls,
* etc.
*/
- if (!(adap->flags & MASTER_PF))
+ if (!(adap->flags & CXGB4_MASTER_PF))
goto done;
t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
@@ -3289,10 +3452,10 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- unsigned long m;
- unsigned int i, budget;
struct adapter *adap = from_timer(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
+ unsigned long m, period;
+ unsigned int i, budget;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
@@ -3320,29 +3483,29 @@ static void sge_tx_timer_cb(struct timer_list *t)
budget = MAX_TIMER_TX_RECLAIM;
i = s->ethtxq_rover;
do {
- struct sge_eth_txq *q = &s->ethtxq[i];
-
- if (q->q.in_use &&
- time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
- __netif_tx_trylock(q->txq)) {
- int avail = reclaimable(&q->q);
-
- if (avail) {
- if (avail > budget)
- avail = budget;
-
- free_tx_desc(adap, &q->q, avail, true);
- q->q.in_use -= avail;
- budget -= avail;
- }
- __netif_tx_unlock(q->txq);
- }
+ budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
+ budget);
+ if (!budget)
+ break;
if (++i >= s->ethqsets)
i = 0;
- } while (budget && i != s->ethtxq_rover);
+ } while (i != s->ethtxq_rover);
s->ethtxq_rover = i;
- mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+
+ if (budget == 0) {
+ /* If we found too many reclaimable packets schedule a timer
+ * in the near future to continue where we left off.
+ */
+ period = 2;
+ } else {
+ /* We reclaimed all reclaimable TX Descriptors, so reschedule
+ * at the normal period.
+ */
+ period = TX_QCHECK_PERIOD;
+ }
+
+ mod_timer(&s->tx_timer, jiffies + period);
}
/**
@@ -3386,7 +3549,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
- int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
+ int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16);
@@ -3421,7 +3584,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
: FW_IQ_IQTYPE_OFLD));
if (fl) {
- enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int chip_ver =
+ CHELSIO_CHIP_VERSION(adap->params.chip);
/* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
@@ -3459,10 +3623,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* the smaller 64-byte value there).
*/
c.fl0dcaen_to_fl0cidxfthresh =
- htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
+ htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
FETCHBURSTMIN_128B_X :
- FETCHBURSTMIN_64B_X) |
- FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMIN_64B_T6_X) |
+ FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
c.fl0size = htons(flsz);
@@ -3584,14 +3748,24 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
+/**
+ * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
+ * @adap: the adapter
+ * @txq: the SGE Ethernet TX Queue to initialize
+ * @dev: the Linux Network Device
+ * @netdevq: the corresponding Linux TX Queue
+ * @iqid: the Ingress Queue to which to deliver CIDX Update messages
+ * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
+ */
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
- unsigned int iqid)
+ unsigned int iqid, u8 dbqt)
{
- int ret, nentries;
- struct fw_eq_eth_cmd c;
- struct sge *s = &adap->sge;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
struct port_info *pi = netdev_priv(dev);
+ struct sge *s = &adap->sge;
+ struct fw_eq_eth_cmd c;
+ int ret, nentries;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -3610,19 +3784,47 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
FW_EQ_ETH_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
- c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
- FW_EQ_ETH_CMD_VIID_V(pi->viid));
+
+ /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
+ * mechanism, we use Ingress Queue messages for Hardware Consumer
+ * Index Updates on the TX Queue. Otherwise we have the Hardware
+ * write the CIDX Updates into the Status Page at the end of the
+ * TX Queue.
+ */
+ c.autoequiqe_to_viid = htonl((dbqt
+ ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
+ : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
+
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
+ ? HOSTFCMODE_INGRESS_QUEUE_X
+ : HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+
+ /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
c.dcaen_to_eqsize =
- htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_64B_X
+ : FETCHBURSTMIN_64B_T6_X) |
FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+ /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
+ * currently configured Timer Index. THis can be changed later via an
+ * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
+ * Doorbell Queue mode is currently automatically enabled in the
+ * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
+ */
+ if (dbqt)
+ c.timeren_timerix =
+ cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
+ FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
+
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
@@ -3639,6 +3841,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->txq = netdevq;
txq->tso = txq->tx_cso = txq->vlan_ins = 0;
txq->mapping_err = 0;
+ txq->dbqt = dbqt;
+
return 0;
}
@@ -3646,10 +3850,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid)
{
- int ret, nentries;
- struct fw_eq_ctrl_cmd c;
- struct sge *s = &adap->sge;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
struct port_info *pi = netdev_priv(dev);
+ struct sge *s = &adap->sge;
+ struct fw_eq_ctrl_cmd c;
+ int ret, nentries;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -3673,7 +3878,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_64B_X
+ : FETCHBURSTMIN_64B_T6_X) |
FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
@@ -3713,6 +3920,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type)
{
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
int ret, nentries;
struct fw_eq_ofld_cmd c;
struct sge *s = &adap->sge;
@@ -3743,7 +3951,9 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_64B_X
+ : FETCHBURSTMIN_64B_T6_X) |
FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index 7b2207a2a130..eaf1fb74689c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -47,8 +47,7 @@ struct smt_data *t4_init_smt(void)
smt_size = SMT_SIZE;
- s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry),
- GFP_KERNEL);
+ s = kvzalloc(struct_size(s, smtab, smt_size), GFP_KERNEL);
if (!s)
return NULL;
s->smt_size = smt_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 82b70a565e24..9a54302bb046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -77,7 +77,7 @@ int cxgb4_get_srq_entry(struct net_device *dev,
adap = netdev2adap(dev);
s = adap->srq;
- if (!(adap->flags & FULL_INIT_DONE) || !s)
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
goto out;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8c34292a0ec..a3544041ad32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -198,7 +198,7 @@ static void t4_report_fw_error(struct adapter *adap)
if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
}
}
@@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap,
/* If we have version number support, then check to see if the adapter
* already has up-to-date PHY firmware loaded.
*/
- if (phy_fw_version) {
+ if (phy_fw_version) {
new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
if (ret < 0)
@@ -4105,6 +4105,9 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
* @mbox: the Firmware Mailbox to use
* @port: the Port ID
* @lc: the Port's Link Configuration
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
@@ -4124,6 +4127,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
int ret;
fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
+
/* Convert driver coding of Pause Frame Flow Control settings into the
* Firmware's API.
*/
@@ -4143,8 +4147,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
fw_fec = cc_to_fwcap_fec(cc_fec);
/* Figure out what our Requested Port Capabilities are going to be.
+ * Note parallel structure in t4_handle_get_port_info() and
+ * init_link_config().
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ if (lc->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
rcap = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
@@ -4156,7 +4165,11 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
- /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
+ /* Some Requested Port Capabilities are trivially wrong if they exceed
+ * the Physical Port Capabilities. We can check that here and provide
+ * moderately useful feedback in the system log.
+ *
+ * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
* we need to exclude this from this check in order to maintain
* compatibility ...
*/
@@ -4185,6 +4198,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
sleep_ok, timeout);
+
+ /* Unfortunately, even if the Requested Port Capabilities "fit" within
+ * the Physical Port Capabilities, some combinations of features may
+ * still not be leagal. For example, 40Gb/s and Reed-Solomon Forward
+ * Error Correction. So if the Firmware rejects the L1 Configure
+ * request, flag that here.
+ */
if (ret) {
dev_err(adapter->pdev_dev,
"Requested Port Capabilities %#x rejected, error %d\n",
@@ -4942,7 +4962,13 @@ static void pl_intr_handler(struct adapter *adap)
*/
int t4_slow_intr_handler(struct adapter *adapter)
{
- u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
+ /* There are rare cases where a PL_INT_CAUSE bit may end up getting
+ * set when the corresponding PL_INT_ENABLE bit isn't set. It's
+ * easiest just to mask that case here.
+ */
+ u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
+ u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
+ u32 cause = raw_cause & enable;
if (!(cause & GLBL_INTR_MASK))
return 0;
@@ -4994,7 +5020,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
ulptx_intr_handler(adapter);
/* Clear the interrupts just processed for which we are the master. */
- t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
+ t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
return 1;
}
@@ -5217,7 +5243,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap)
{
- return (adap->flags & FW_OK) && !adap->use_bd;
+ return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
}
/**
@@ -6106,7 +6132,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
* ( MPSBGMAP[Port 1] << 8 ) |
* ( MPSBGMAP[Port 0] << 0 ))
*/
- if (adapter->flags & FW_OK) {
+ if (adapter->flags & CXGB4_FW_OK) {
u32 param, val;
int ret;
@@ -6693,6 +6719,47 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
}
/**
+ * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values
+ * @adap - the adapter
+ * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
+ * @dbqtimers: SGE Doorbell Queue Timer table
+ *
+ * Reads the SGE Doorbell Queue Timer values into the provided table.
+ * Returns 0 on success (Firmware and Hardware support this feature),
+ * an error on failure.
+ */
+int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
+ u16 *dbqtimers)
+{
+ int ret, dbqtimerix;
+
+ ret = 0;
+ dbqtimerix = 0;
+ while (dbqtimerix < ndbqtimers) {
+ int nparams, param;
+ u32 params[7], vals[7];
+
+ nparams = ndbqtimers - dbqtimerix;
+ if (nparams > ARRAY_SIZE(params))
+ nparams = ARRAY_SIZE(params);
+
+ for (param = 0; param < nparams; param++)
+ params[param] =
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
+ FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ nparams, params, vals);
+ if (ret)
+ break;
+
+ for (param = 0; param < nparams; param++)
+ dbqtimers[dbqtimerix++] = vals[param];
+ }
+ return ret;
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -7026,10 +7093,10 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
- /* Disable FW_OK flag so that mbox commands with FW_OK flag set
- * wont be sent when we are flashing FW.
+ /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
+ * set wont be sent when we are flashing FW.
*/
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
@@ -7068,7 +7135,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
*/
(void)t4_init_devlog_params(adap);
out:
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
return ret;
}
@@ -8461,6 +8528,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
+ /* Reset state for communicating new Transceiver Module status and
+ * whether the OS-dependent layer wants us to redo the current
+ * "sticky" L1 Configure Link Parameters.
+ */
lc->new_module = false;
lc->redo_l1cfg = false;
@@ -8497,9 +8568,15 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
*/
pi->port_type = port_type;
+ /* Record new Module Type information.
+ */
pi->mod_type = mod_type;
+ /* Let the OS-dependent layer know if we have a new
+ * Transceiver Module inserted.
+ */
lc->new_module = t4_is_inserted_mod_type(mod_type);
+
t4_os_portmod_changed(adapter, pi->port_id);
}
@@ -8507,8 +8584,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
fc != lc->fc || fec != lc->fec) { /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
- dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
- pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ dev_warn_ratelimited(adapter->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->tx_chan,
+ t4_link_down_rc_str(linkdnrc));
}
lc->link_ok = link_ok;
lc->speed = speed;
@@ -8518,6 +8597,11 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
lc->lpacaps = lpacaps;
lc->acaps = acaps & ADVERT_MASK;
+ /* If we're not physically capable of Auto-Negotiation, note
+ * this as Auto-Negotiation disabled. Otherwise, we track
+ * what Auto-Negotiation settings we have. Note parallel
+ * structure in t4_link_l1cfg_core() and init_link_config().
+ */
if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
lc->autoneg = AUTONEG_DISABLE;
} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
@@ -8535,6 +8619,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
t4_os_link_changed(adapter, pi->port_id, link_ok);
}
+ /* If we have a new Transceiver Module and the OS-dependent code has
+ * told us that it wants us to redo whatever "sticky" L1 Configuration
+ * Link Parameters are set, do that now.
+ */
if (lc->new_module && lc->redo_l1cfg) {
struct link_config old_lc;
int ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 361d5032c288..002fc62ea726 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -91,6 +91,7 @@ enum {
SGE_CTXT_SIZE = 24, /* size of SGE context */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+ SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */
SGE_MAX_IQ_SIZE = 65520,
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index bf7325f6d553..0c5373462ced 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -218,6 +218,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index f6558cbfc54e..eb1aa82149db 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -71,12 +71,18 @@
#define FETCHBURSTMIN_64B_X 2
#define FETCHBURSTMIN_128B_X 3
+/* T6 and later use a single-bit encoding for FetchBurstMin */
+#define FETCHBURSTMIN_64B_T6_X 0
+#define FETCHBURSTMIN_128B_T6_X 1
+
#define FETCHBURSTMAX_256B_X 2
#define FETCHBURSTMAX_512B_X 3
+#define HOSTFCMODE_INGRESS_QUEUE_X 1
#define HOSTFCMODE_STATUS_PAGE_X 2
#define CIDXFLUSHTHRESH_32_X 5
+#define CIDXFLUSHTHRESH_128_X 7
#define UPDATEDELIVERY_INTERRUPT_X 1
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 1d9b3e1e5f94..b2a618e72fcf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1254,6 +1254,8 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
+ FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
+ FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
};
/*
@@ -1310,6 +1312,14 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
+ FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
+};
+
+/* Virtual link state as seen by the specified VF */
+enum vf_link_states {
+ FW_VF_LINK_STATE_AUTO = 0x00,
+ FW_VF_LINK_STATE_ENABLE = 0x01,
+ FW_VF_LINK_STATE_DISABLE = 0x02,
};
/*
@@ -1322,6 +1332,7 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+ FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX = 0x15,
FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
};
@@ -1751,8 +1762,8 @@ struct fw_eq_eth_cmd {
__be32 fetchszm_to_iqid;
__be32 dcaen_to_eqsize;
__be64 eqaddr;
- __be32 viid_pkd;
- __be32 r8_lo;
+ __be32 autoequiqe_to_viid;
+ __be32 timeren_timerix;
__be64 r9;
};
@@ -1847,6 +1858,10 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_EQSIZE_S 0
#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S)
+#define FW_EQ_ETH_CMD_AUTOEQUIQE_S 31
+#define FW_EQ_ETH_CMD_AUTOEQUIQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUIQE_S)
+#define FW_EQ_ETH_CMD_AUTOEQUIQE_F FW_EQ_ETH_CMD_AUTOEQUIQE_V(1U)
+
#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30
#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S)
#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U)
@@ -1854,6 +1869,19 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_VIID_S 16
#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S)
+#define FW_EQ_ETH_CMD_TIMEREN_S 3
+#define FW_EQ_ETH_CMD_TIMEREN_M 0x1
+#define FW_EQ_ETH_CMD_TIMEREN_V(x) ((x) << FW_EQ_ETH_CMD_TIMEREN_S)
+#define FW_EQ_ETH_CMD_TIMEREN_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_TIMEREN_S) & FW_EQ_ETH_CMD_TIMEREN_M)
+#define FW_EQ_ETH_CMD_TIMEREN_F FW_EQ_ETH_CMD_TIMEREN_V(1U)
+
+#define FW_EQ_ETH_CMD_TIMERIX_S 0
+#define FW_EQ_ETH_CMD_TIMERIX_M 0x7
+#define FW_EQ_ETH_CMD_TIMERIX_V(x) ((x) << FW_EQ_ETH_CMD_TIMERIX_S)
+#define FW_EQ_ETH_CMD_TIMERIX_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_TIMERIX_S) & FW_EQ_ETH_CMD_TIMERIX_M)
+
struct fw_eq_ctrl_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index a844296135b4..9125ddd89dd1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x14
-#define T4FW_VERSION_MICRO 0x08
+#define T4FW_VERSION_MINOR 0x16
+#define T4FW_VERSION_MICRO 0x09
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x14
-#define T5FW_VERSION_MICRO 0x08
+#define T5FW_VERSION_MINOR 0x16
+#define T5FW_VERSION_MICRO 0x09
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x14
-#define T6FW_VERSION_MICRO 0x08
+#define T6FW_VERSION_MINOR 0x16
+#define T6FW_VERSION_MICRO 0x09
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 5883f09e3804..3782e48dada2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -94,7 +94,7 @@ struct port_info {
struct adapter *adapter; /* our adapter */
u32 vlan_id; /* vlan id for VST */
u16 viid; /* virtual interface ID */
- s16 xact_addr_filt; /* index of our MAC address filter */
+ int xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
s8 mdio_addr;
@@ -352,6 +352,7 @@ struct sge {
struct hash_mac_addr {
struct list_head list;
u8 addr[ETH_ALEN];
+ unsigned int iface_mac;
};
struct mbox_list {
@@ -405,11 +406,12 @@ struct adapter {
};
enum { /* adapter flags */
- FULL_INIT_DONE = (1UL << 0),
- USING_MSI = (1UL << 1),
- USING_MSIX = (1UL << 2),
- QUEUES_BOUND = (1UL << 3),
- ROOT_NO_RELAXED_ORDERING = (1UL << 4),
+ CXGB4VF_FULL_INIT_DONE = (1UL << 0),
+ CXGB4VF_USING_MSI = (1UL << 1),
+ CXGB4VF_USING_MSIX = (1UL << 2),
+ CXGB4VF_QUEUES_BOUND = (1UL << 3),
+ CXGB4VF_ROOT_NO_RELAXED_ORDERING = (1UL << 4),
+ CXGB4VF_FW_OK = (1UL << 5),
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2fab87e86561..adc4d481815b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -155,6 +155,8 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
const char *fc;
const struct port_info *pi = netdev_priv(dev);
+ netif_carrier_on(dev);
+
switch (pi->link_cfg.speed) {
case 100:
s = "100Mbps";
@@ -200,6 +202,7 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
} else {
+ netif_carrier_off(dev);
netdev_info(dev, "link down\n");
}
}
@@ -236,6 +239,73 @@ void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
"inserted\n", dev->name, pi->mod_type);
}
+static int cxgb4vf_set_addr_hash(struct port_info *pi)
+{
+ struct adapter *adapter = pi->adapter;
+ u64 vec = 0;
+ bool ucast = false;
+ struct hash_mac_addr *entry;
+
+ /* Calculate the hash vector for the updated list and program it */
+ list_for_each_entry(entry, &adapter->mac_hlist, list) {
+ ucast |= is_unicast_ether_addr(entry->addr);
+ vec |= (1ULL << hash_mac_addr(entry->addr));
+ }
+ return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
+}
+
+/**
+ * cxgb4vf_change_mac - Update match filter for a MAC address.
+ * @pi: the port_info
+ * @viid: the VI id
+ * @tcam_idx: TCAM index of existing filter for old value of MAC address,
+ * or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an MPS filter and sets it to the new MAC address if
+ * @tcam_idx >= 0, or adds the MAC address to a new filter if
+ * @tcam_idx < 0. In the latter case the address is added persistently
+ * if @persist is %true.
+ * Addresses are programmed to hash region, if tcam runs out of entries.
+ *
+ */
+static int cxgb4vf_change_mac(struct port_info *pi, unsigned int viid,
+ int *tcam_idx, const u8 *addr, bool persistent)
+{
+ struct hash_mac_addr *new_entry, *entry;
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent);
+ /* We ran out of TCAM entries. try programming hash region. */
+ if (ret == -ENOMEM) {
+ /* If the MAC address to be updated is in the hash addr
+ * list, update it from the list
+ */
+ list_for_each_entry(entry, &adapter->mac_hlist, list) {
+ if (entry->iface_mac) {
+ ether_addr_copy(entry->addr, addr);
+ goto set_hash;
+ }
+ }
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+ ether_addr_copy(new_entry->addr, addr);
+ new_entry->iface_mac = true;
+ list_add_tail(&new_entry->list, &adapter->mac_hlist);
+set_hash:
+ ret = cxgb4vf_set_addr_hash(pi);
+ } else if (ret >= 0) {
+ *tcam_idx = ret;
+ ret = 0;
+ }
+
+ return ret;
+}
+
/*
* Net device operations.
* ======================
@@ -259,14 +329,10 @@ static int link_start(struct net_device *dev)
*/
ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
true);
- if (ret == 0) {
- ret = t4vf_change_mac(pi->adapter, pi->viid,
- pi->xact_addr_filt, dev->dev_addr, true);
- if (ret >= 0) {
- pi->xact_addr_filt = ret;
- ret = 0;
- }
- }
+ if (ret == 0)
+ ret = cxgb4vf_change_mac(pi, pi->viid,
+ &pi->xact_addr_filt,
+ dev->dev_addr, true);
/*
* We don't need to actually "start the link" itself since the
@@ -276,16 +342,6 @@ static int link_start(struct net_device *dev)
if (ret == 0)
ret = t4vf_enable_pi(pi->adapter, pi, true, true);
- /* The Virtual Interfaces are connected to an internal switch on the
- * chip which allows VIs attached to the same port to talk to each
- * other even when the port link is down. As a result, we generally
- * want to always report a VI's link as being "up", provided there are
- * no errors in enabling vi.
- */
-
- if (ret == 0)
- netif_carrier_on(dev);
-
return ret;
}
@@ -406,7 +462,7 @@ static void enable_rx(struct adapter *adapter)
* The interrupt queue doesn't use NAPI so we do the 0-increment of
* its Going To Sleep register here to get it started.
*/
- if (adapter->flags & USING_MSI)
+ if (adapter->flags & CXGB4VF_USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC_V(0) |
SEINTARM_V(s->intrq.intr_params) |
@@ -550,7 +606,7 @@ static int setup_sge_queues(struct adapter *adapter)
* the intrq's queue ID as the interrupt forwarding queue for the
* subsequent calls ...
*/
- if (adapter->flags & USING_MSI) {
+ if (adapter->flags & CXGB4VF_USING_MSI) {
err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
adapter->port[0], 0, NULL, NULL);
if (err)
@@ -710,7 +766,7 @@ static int adapter_up(struct adapter *adapter)
* adapter setup. Once we've done this, many of our adapter
* parameters can no longer be changed ...
*/
- if ((adapter->flags & FULL_INIT_DONE) == 0) {
+ if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
err = setup_sge_queues(adapter);
if (err)
return err;
@@ -720,17 +776,18 @@ static int adapter_up(struct adapter *adapter)
return err;
}
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4VF_USING_MSIX)
name_msix_vecs(adapter);
- adapter->flags |= FULL_INIT_DONE;
+ adapter->flags |= CXGB4VF_FULL_INIT_DONE;
}
/*
* Acquire our interrupt resources. We only support MSI-X and MSI.
*/
- BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
- if (adapter->flags & USING_MSIX)
+ BUG_ON((adapter->flags &
+ (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
+ if (adapter->flags & CXGB4VF_USING_MSIX)
err = request_msix_queue_irqs(adapter);
else
err = request_irq(adapter->pdev->irq,
@@ -761,7 +818,7 @@ static void adapter_down(struct adapter *adapter)
/*
* Free interrupt resources.
*/
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4VF_USING_MSIX)
free_msix_queue_irqs(adapter);
else
free_irq(adapter->pdev->irq, adapter);
@@ -782,6 +839,13 @@ static int cxgb4vf_open(struct net_device *dev)
struct adapter *adapter = pi->adapter;
/*
+ * If we don't have a connection to the firmware there's nothing we
+ * can do.
+ */
+ if (!(adapter->flags & CXGB4VF_FW_OK))
+ return -ENXIO;
+
+ /*
* If this is the first interface that we're opening on the "adapter",
* bring the "adapter" up now.
*/
@@ -791,6 +855,13 @@ static int cxgb4vf_open(struct net_device *dev)
return err;
}
+ /* It's possible that the basic port information could have
+ * changed since we first read it.
+ */
+ err = t4vf_update_port_info(pi);
+ if (err < 0)
+ return err;
+
/*
* Note that this interface is up and start everything up ...
*/
@@ -863,21 +934,6 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
return ns;
}
-static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
-{
- struct adapter *adapter = pi->adapter;
- u64 vec = 0;
- bool ucast = false;
- struct hash_mac_addr *entry;
-
- /* Calculate the hash vector for the updated list and program it */
- list_for_each_entry(entry, &adapter->mac_hlist, list) {
- ucast |= is_unicast_ether_addr(entry->addr);
- vec |= (1ULL << hash_mac_addr(entry->addr));
- }
- return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
-}
-
static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{
struct port_info *pi = netdev_priv(netdev);
@@ -1159,13 +1215,12 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
- addr->sa_data, true);
+ ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt,
+ addr->sa_data, true);
if (ret < 0)
return ret;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- pi->xact_addr_filt = ret;
return 0;
}
@@ -1179,7 +1234,7 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
struct sge_eth_rxq *rxq;
int nqsets;
@@ -1354,7 +1409,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
case FW_PORT_TYPE_CR4_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
- FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
@@ -1365,6 +1420,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
break;
}
+ if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
+ FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
+ FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
+ } else {
+ SET_LMM(FEC_NONE);
+ }
+
FW_CAPS_TO_LMM(ANEG, Autoneg);
FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
@@ -1587,7 +1649,7 @@ static int cxgb4vf_set_ringparam(struct net_device *dev,
rp->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL;
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4VF_FULL_INIT_DONE)
return -EBUSY;
for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
@@ -1871,6 +1933,8 @@ static void cxgb4vf_get_wol(struct net_device *dev,
* TCP Segmentation Offload flags which we support.
*/
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_link_ksettings = cxgb4vf_get_link_ksettings,
@@ -2102,7 +2166,7 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
static int sge_queue_entries(const struct adapter *adapter)
{
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
- ((adapter->flags & USING_MSI) != 0);
+ ((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
@@ -2248,7 +2312,7 @@ static int sge_qstats_show(struct seq_file *seq, void *v)
static int sge_qstats_entries(const struct adapter *adapter)
{
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
- ((adapter->flags & USING_MSI) != 0);
+ ((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
@@ -2657,6 +2721,7 @@ static int adap_init0(struct adapter *adapter)
*/
size_nports_qsets(adapter);
+ adapter->flags |= CXGB4VF_FW_OK;
return 0;
}
@@ -2691,7 +2756,8 @@ static void cfg_queues(struct adapter *adapter)
* support. In particular, this means that we need to know what kind
* of interrupts we'll be using ...
*/
- BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+ BUG_ON((adapter->flags &
+ (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
/*
* Count the number of 10GbE Virtual Interfaces that we have.
@@ -3017,11 +3083,13 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* using Relaxed Ordering.
*/
if (!pcie_relaxed_ordering_enabled(pdev))
- adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+ adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING;
err = adap_init0(adapter);
if (err)
- goto err_unmap_bar;
+ dev_err(&pdev->dev,
+ "Adapter initialization failed, error %d. Continuing in debug mode\n",
+ err);
/* Initialize hash mac addr list */
INIT_LIST_HEAD(&adapter->mac_hlist);
@@ -3046,13 +3114,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
break;
port_id = ffs(pmask) - 1;
pmask &= ~(1 << port_id);
- viid = t4vf_alloc_vi(adapter, port_id);
- if (viid < 0) {
- dev_err(&pdev->dev, "cannot allocate VI for port %d:"
- " err=%d\n", port_id, viid);
- err = viid;
- goto err_free_dev;
- }
/*
* Allocate our network device and stitch things together.
@@ -3060,7 +3121,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_PORT_QSETS);
if (netdev == NULL) {
- t4vf_free_vi(adapter, viid);
err = -ENOMEM;
goto err_free_dev;
}
@@ -3070,26 +3130,21 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
pi->adapter = adapter;
pi->pidx = pidx;
pi->port_id = port_id;
- pi->viid = viid;
/*
* Initialize the starting state of our "port" and register
* it.
*/
pi->xact_addr_filt = -1;
- netif_carrier_off(netdev);
netdev->irq = pdev->irq;
- netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
- netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 81;
@@ -3100,6 +3155,23 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->dev_port = pi->port_id;
/*
+ * If we haven't been able to contact the firmware, there's
+ * nothing else we can do for this "port" ...
+ */
+ if (!(adapter->flags & CXGB4VF_FW_OK))
+ continue;
+
+ viid = t4vf_alloc_vi(adapter, port_id);
+ if (viid < 0) {
+ dev_err(&pdev->dev,
+ "cannot allocate VI for port %d: err=%d\n",
+ port_id, viid);
+ err = viid;
+ goto err_free_dev;
+ }
+ pi->viid = viid;
+
+ /*
* Initialize the hardware/software state for the port.
*/
err = t4vf_port_init(adapter, pidx);
@@ -3136,7 +3208,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* get MSI interrupts we bail with the error.
*/
if (msi == MSI_MSIX && enable_msix(adapter) == 0)
- adapter->flags |= USING_MSIX;
+ adapter->flags |= CXGB4VF_USING_MSIX;
else {
if (msi == MSI_MSIX) {
dev_info(adapter->pdev_dev,
@@ -3156,7 +3228,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
" err=%d\n", err);
goto err_free_dev;
}
- adapter->flags |= USING_MSI;
+ adapter->flags |= CXGB4VF_USING_MSI;
}
/* Now that we know how many "ports" we have and what interrupt
@@ -3186,6 +3258,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
continue;
}
+ netif_carrier_off(netdev);
set_bit(pidx, &adapter->registered_device_map);
}
if (adapter->registered_device_map == 0) {
@@ -3214,8 +3287,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
for_each_port(adapter, pidx) {
dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
adapter->port[pidx]->name,
- (adapter->flags & USING_MSIX) ? "MSI-X" :
- (adapter->flags & USING_MSI) ? "MSI" : "");
+ (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" :
+ (adapter->flags & CXGB4VF_USING_MSI) ? "MSI" : "");
}
/*
@@ -3228,12 +3301,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* so far and return the error.
*/
err_disable_interrupts:
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4VF_USING_MSIX;
+ } else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4VF_USING_MSI;
}
err_free_dev:
@@ -3242,13 +3315,13 @@ err_free_dev:
if (netdev == NULL)
continue;
pi = netdev_priv(netdev);
- t4vf_free_vi(adapter, pi->viid);
+ if (pi->viid)
+ t4vf_free_vi(adapter, pi->viid);
if (test_bit(pidx, &adapter->registered_device_map))
unregister_netdev(netdev);
free_netdev(netdev);
}
-err_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
@@ -3293,12 +3366,12 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
if (test_bit(pidx, &adapter->registered_device_map))
unregister_netdev(adapter->port[pidx]);
t4vf_sge_stop(adapter);
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4VF_USING_MSIX;
+ } else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4VF_USING_MSI;
}
/*
@@ -3321,7 +3394,8 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
continue;
pi = netdev_priv(netdev);
- t4vf_free_vi(adapter, pi->viid);
+ if (pi->viid)
+ t4vf_free_vi(adapter, pi->viid);
free_netdev(netdev);
}
iounmap(adapter->regs);
@@ -3369,12 +3443,12 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
* Interrupts allowing various internal pathways to drain.
*/
t4vf_sge_stop(adapter);
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4VF_USING_MSIX;
+ } else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4VF_USING_MSI;
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 3007e1ac1e61..f71c973398ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
* Allocate the hardware ring and PCI DMA bus address space for said.
*/
size_t hwlen = nelem * hwsize + stat_size;
- void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+ void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
if (!hwring)
return NULL;
@@ -2044,8 +2044,9 @@ static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
*/
irq_handler_t t4vf_intr_handler(struct adapter *adapter)
{
- BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
- if (adapter->flags & USING_MSIX)
+ BUG_ON((adapter->flags &
+ (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
+ if (adapter->flags & CXGB4VF_USING_MSIX)
return t4vf_sge_intr_msix;
else
return t4vf_intr_msi;
@@ -2209,7 +2210,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
- int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
+ int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
/*
* If we're using MSI interrupts and we're not initializing the
@@ -2218,7 +2219,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
* the Forwarded Interrupt Queue must be set up before any other
* ingress queue ...
*/
- if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
+ if ((adapter->flags & CXGB4VF_USING_MSI) &&
+ rspq != &adapter->sge.intrq) {
iqandst = SGE_INTRDST_IQ;
intr_dest = adapter->sge.intrq.abs_id;
} else
@@ -2268,7 +2270,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
if (fl) {
- enum chip_type chip =
+ unsigned int chip_ver =
CHELSIO_CHIP_VERSION(adapter->params.chip);
/*
* Allocate the ring for the hardware free list (with space
@@ -2319,10 +2321,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
*/
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
- FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
- FETCHBURSTMIN_128B_X :
- FETCHBURSTMIN_64B_X) |
- FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_128B_X
+ : FETCHBURSTMIN_64B_T6_X) |
+ FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
cmd.fl0size = cpu_to_be16(flsz);
@@ -2411,10 +2413,11 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *devq,
unsigned int iqid)
{
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ struct port_info *pi = netdev_priv(dev);
+ struct fw_eq_eth_cmd cmd, rpl;
struct sge *s = &adapter->sge;
int ret, nentries;
- struct fw_eq_eth_cmd cmd, rpl;
- struct port_info *pi = netdev_priv(dev);
/*
* Calculate the size of the hardware TX Queue (including the Status
@@ -2448,17 +2451,19 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
FW_EQ_ETH_CMD_EQSTART_F |
FW_LEN16(cmd));
- cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
- FW_EQ_ETH_CMD_VIID_V(pi->viid));
+ cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
cmd.fetchszm_to_iqid =
cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
FW_EQ_ETH_CMD_IQID_V(iqid));
cmd.dcaen_to_eqsize =
- cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
- FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
+ cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_64B_X
+ : FETCHBURSTMIN_64B_T6_X) |
+ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(
- SGE_CIDXFLUSHTHRESH_32) |
+ CIDXFLUSHTHRESH_32_X) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 5b8c08cf523f..84dff74ca9cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -2005,8 +2005,10 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
fc != lc->fc || fec != lc->fec) { /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
- dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
- pi->port_id, t4vf_link_down_rc_str(linkdnrc));
+ dev_warn_ratelimited(adapter->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->port_id,
+ t4vf_link_down_rc_str(linkdnrc));
}
lc->link_ok = link_ok;
lc->speed = speed;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 99038dfc7fbe..9900993b6aea 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -32,7 +32,8 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
break;
default:
return -EPROTONOSUPPORT;
- };
+ }
+
data.type = FILTER_IPV4_5TUPLE;
data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 60641e202534..9a7f70db20c7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
* csum is correct or is zero.
*/
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
+ tcp_udp_csum_ok && outer_csum_ok &&
+ (ipv4_csum_ok || ipv6)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = encap;
}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0a82fcf16d35..c2586f44c29d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -395,6 +395,7 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
case 3:
dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
+ /* fall through */
case 2:
db->dumpblk = dm9000_dumpblk_16bit;
db->outblk = dm9000_outblk_16bit;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 13430f75496c..f1a2da15dd0a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de)
netif_dbg(de, tx_done, de->dev,
"tx done, slot %d\n", tx_tail);
}
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
next:
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index 1812f4916917..ba0a69b363f8 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -224,9 +224,7 @@ subsequent_board:
return;
}
- mtable = kmalloc(sizeof(struct mediatable) +
- count * sizeof(struct medialeaf),
- GFP_KERNEL);
+ mtable = kmalloc(struct_size(mtable, mleaf, count), GFP_KERNEL);
if (mtable == NULL)
return; /* Horrible, impossible failure. */
last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index d8d423f22c4f..cfcdfee718b0 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -843,9 +843,9 @@ rio_free_tx (struct net_device *dev, int irq)
desc_to_dma(&np->tx_ring[entry]),
skb->len, PCI_DMA_TODEVICE);
if (irq)
- dev_kfree_skb_irq (skb);
+ dev_consume_skb_irq(skb);
else
- dev_kfree_skb (skb);
+ dev_kfree_skb(skb);
np->tx_skbuff[entry] = NULL;
entry = (entry + 1) % TX_RING_SIZE;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 1a27176381fb..4a37a69764ce 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1193,7 +1193,6 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
int handled = 0;
int i;
-
do {
int intr_status = ioread16(ioaddr + IntrStatus);
iowrite16(intr_status, ioaddr + IntrStatus);
@@ -1286,7 +1285,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(np->tx_ring[entry].frag[0].addr),
skb->len, DMA_TO_DEVICE);
- dev_kfree_skb_irq (np->tx_skbuff[entry]);
+ dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
@@ -1305,7 +1304,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
dma_unmap_single(&np->pci_dev->dev,
le32_to_cpu(np->tx_ring[entry].frag[0].addr),
skb->len, DMA_TO_DEVICE);
- dev_kfree_skb_irq (np->tx_skbuff[entry]);
+ dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1e9d882c04ef..59a7f0b99069 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len;
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
- get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- get_fat_cmd.size,
- &get_fat_cmd.dma, GFP_ATOMIC);
+ get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma, GFP_ATOMIC);
if (!get_fat_cmd.va)
return -ENOMEM;
@@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -EINVAL;
cmd.size = sizeof(struct be_cmd_resp_port_type);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
return -ENOMEM;
@@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
}
flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
- flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
- GFP_KERNEL);
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+ GFP_KERNEL);
if (!flash_cmd.va)
return -ENOMEM;
@@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
- attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- attribs_cmd.size,
- &attribs_cmd.dma, GFP_ATOMIC);
+ attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ attribs_cmd.size,
+ &attribs_cmd.dma, GFP_ATOMIC);
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
- get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma,
- GFP_ATOMIC);
+ get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma,
+ GFP_ATOMIC);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
@@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- extfat_cmd.size, &extfat_cmd.dma,
- GFP_ATOMIC);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va)
return -ENOMEM;
@@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
- extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- extfat_cmd.size, &extfat_cmd.dma,
- GFP_ATOMIC);
+ extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ extfat_cmd.size, &extfat_cmd.dma,
+ GFP_ATOMIC);
if (!extfat_cmd.va) {
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
@@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_profile_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_ATOMIC);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_ATOMIC);
if (!cmd.va)
return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 3f6749fc889f..4c218341c51b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
int status = 0;
read_cmd.size = LANCER_READ_FILE_CHUNK;
- read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
- &read_cmd.dma, GFP_ATOMIC);
+ read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
+ &read_cmd.dma, GFP_ATOMIC);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- ddrdma_cmd.size, &ddrdma_cmd.dma,
- GFP_KERNEL);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ ddrdma_cmd.size, &ddrdma_cmd.dma,
+ GFP_KERNEL);
if (!ddrdma_cmd.va)
return -ENOMEM;
@@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
- eeprom_cmd.size, &eeprom_cmd.dma,
- GFP_KERNEL);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ eeprom_cmd.size, &eeprom_cmd.dma,
+ GFP_KERNEL);
if (!eeprom_cmd.va)
return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 852f5bfe5f6d..3c7c04406a2b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ &mem->dma, GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -1270,10 +1270,6 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
#define is_arp_allowed_on_bmc(adapter, skb) \
(is_arp(skb) && is_arp_filt_enabled(adapter))
-#define is_broadcast_packet(eh, adapter) \
- (is_multicast_ether_addr(eh->h_dest) && \
- !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
-
#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
#define is_arp_filt_enabled(adapter) \
@@ -5766,9 +5762,9 @@ static int be_drv_init(struct be_adapter *adapter)
int status = 0;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
- &mbox_mem_alloc->dma,
- GFP_KERNEL);
+ mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va)
return -ENOMEM;
@@ -5777,8 +5773,8 @@ static int be_drv_init(struct be_adapter *adapter)
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
- &rx_filter->dma, GFP_KERNEL);
+ rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
if (!rx_filter->va) {
status = -ENOMEM;
goto free_mbox;
@@ -5792,8 +5788,8 @@ static int be_drv_init(struct be_adapter *adapter)
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
else
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
- stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
- &stats_cmd->dma, GFP_KERNEL);
+ stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
+ &stats_cmd->dma, GFP_KERNEL);
if (!stats_cmd->va) {
status = -ENOMEM;
goto free_rx_filter;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4d673225ed3e..b17b79e612a3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
return -ENOMEM;
/* Allocate descriptors */
- priv->rxdes = dma_zalloc_coherent(priv->dev,
- MAX_RX_QUEUE_ENTRIES *
- sizeof(struct ftgmac100_rxdes),
- &priv->rxdes_dma, GFP_KERNEL);
+ priv->rxdes = dma_alloc_coherent(priv->dev,
+ MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
+ &priv->rxdes_dma, GFP_KERNEL);
if (!priv->rxdes)
return -ENOMEM;
- priv->txdes = dma_zalloc_coherent(priv->dev,
- MAX_TX_QUEUE_ENTRIES *
- sizeof(struct ftgmac100_txdes),
- &priv->txdes_dma, GFP_KERNEL);
+ priv->txdes = dma_alloc_coherent(priv->dev,
+ MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
+ &priv->txdes_dma, GFP_KERNEL);
if (!priv->txdes)
return -ENOMEM;
@@ -1639,7 +1637,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
- };
+ }
/* Get PHY mode from device-tree */
if (np) {
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 084f24daf2b5..2a0e820526dc 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_zalloc_coherent(priv->dev,
- sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL);
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index ae55da60ed0e..c24fd56a2c71 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1531,7 +1531,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
/* Free the original skb. */
pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(np->cur_tx->skbuff);
+ dev_consume_skb_irq(np->cur_tx->skbuff);
np->cur_tx->skbuff = NULL;
--np->really_tx_count;
if (np->cur_tx->control & TXLD) {
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index d3a62bc1f1c6..71793e03c3c8 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -97,5 +97,6 @@ config GIANFAR
source "drivers/net/ethernet/freescale/dpaa/Kconfig"
source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
+source "drivers/net/ethernet/freescale/enetc/Kconfig"
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3b4ff08e3841..6a93293d31e0 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -23,3 +23,6 @@ obj-$(CONFIG_FSL_FMAN) += fman/
obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
+
+obj-$(CONFIG_FSL_ENETC) += enetc/
+obj-$(CONFIG_FSL_ENETC_VF) += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f53090cde041..dfebc30c4841 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
int offset = 0;
@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
+
+ /* LLTX requires to do our own update of trans_start */
+ txq->trans_start = jiffies;
+
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 62497119c85f..bdee441bc3b7 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -501,7 +501,7 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
struct device_node *mac_node = dev->of_node;
struct device_node *fman_node = NULL, *ptp_node = NULL;
struct platform_device *ptp_dev = NULL;
- struct qoriq_ptp *ptp = NULL;
+ struct ptp_qoriq *ptp = NULL;
info->phc_index = -1;
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index 809a155eb193..f6d244c663fd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -9,8 +9,9 @@ config FSL_DPAA2_ETH
config FSL_DPAA2_PTP_CLOCK
tristate "Freescale DPAA2 PTP Clock"
- depends on FSL_DPAA2_ETH && POSIX_TIMERS
- select PTP_1588_CLOCK
+ depends on FSL_DPAA2_ETH
+ imply PTP_1588_CLOCK
+ default y
help
This driver adds support for using the DPAA2 1588 timer module
as a PTP clock.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 2f424e0a8225..d1e78cdd512f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
# Needed by the tracing framework
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
new file mode 100644
index 000000000000..a027f4a9d0cc
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "dpaa2-eth.h"
+#include "dpaa2-eth-debugfs.h"
+
+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
+static struct dentry *dpaa2_dbg_root;
+
+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct rtnl_link_stats64 *stats;
+ struct dpaa2_eth_drv_stats *extras;
+ int i;
+
+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
+ "Tx SG", "Tx realloc", "Enq busy");
+
+ for_each_online_cpu(i) {
+ stats = per_cpu_ptr(priv->percpu_stats, i);
+ extras = per_cpu_ptr(priv->percpu_extras, i);
+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
+ i,
+ stats->rx_packets,
+ stats->rx_errors,
+ extras->rx_sg_frames,
+ stats->tx_packets,
+ stats->tx_errors,
+ extras->tx_conf_frames,
+ extras->tx_sg_frames,
+ extras->tx_reallocs,
+ extras->tx_portal_busy);
+ }
+
+ return 0;
+}
+
+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+ err = single_open(file, dpaa2_dbg_cpu_show, priv);
+ if (err < 0)
+ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpaa2_dbg_cpu_ops = {
+ .open = dpaa2_dbg_cpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
+{
+ switch (fq->type) {
+ case DPAA2_RX_FQ:
+ return "Rx";
+ case DPAA2_TX_CONF_FQ:
+ return "Tx conf";
+ default:
+ return "N/A";
+ }
+}
+
+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt, bcnt;
+ int i, err;
+
+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "Type", "Frames", "Pending frames");
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err)
+ fcnt = 0;
+
+ seq_printf(file, "%5d%16d%16s%16llu%16u\n",
+ fq->fqid,
+ fq->target_cpu,
+ fq_type_to_str(fq),
+ fq->stats.frames,
+ fcnt);
+ }
+
+ return 0;
+}
+
+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+ err = single_open(file, dpaa2_dbg_fqs_show, priv);
+ if (err < 0)
+ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpaa2_dbg_fq_ops = {
+ .open = dpaa2_dbg_fqs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s%16s%16s%16s%16s\n",
+ "CHID", "CPU", "Deq busy", "CDANs", "Buf count");
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ seq_printf(file, "%4d%16d%16llu%16llu%16d\n",
+ ch->ch_id,
+ ch->nctx.desired_cpu,
+ ch->stats.dequeue_portal_busy,
+ ch->stats.cdan,
+ ch->buf_count);
+ }
+
+ return 0;
+}
+
+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
+ err = single_open(file, dpaa2_dbg_ch_show, priv);
+ if (err < 0)
+ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpaa2_dbg_ch_ops = {
+ .open = dpaa2_dbg_ch_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
+{
+ if (!dpaa2_dbg_root)
+ return;
+
+ /* Create a directory for the interface */
+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
+ dpaa2_dbg_root);
+ if (!priv->dbg.dir) {
+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
+ return;
+ }
+
+ /* per-cpu stats file */
+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
+ priv->dbg.dir, priv,
+ &dpaa2_dbg_cpu_ops);
+ if (!priv->dbg.cpu_stats) {
+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+ goto err_cpu_stats;
+ }
+
+ /* per-fq stats file */
+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
+ priv->dbg.dir, priv,
+ &dpaa2_dbg_fq_ops);
+ if (!priv->dbg.fq_stats) {
+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+ goto err_fq_stats;
+ }
+
+ /* per-fq stats file */
+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
+ priv->dbg.dir, priv,
+ &dpaa2_dbg_ch_ops);
+ if (!priv->dbg.fq_stats) {
+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
+ goto err_ch_stats;
+ }
+
+ return;
+
+err_ch_stats:
+ debugfs_remove(priv->dbg.fq_stats);
+err_fq_stats:
+ debugfs_remove(priv->dbg.cpu_stats);
+err_cpu_stats:
+ debugfs_remove(priv->dbg.dir);
+}
+
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
+ debugfs_remove(priv->dbg.fq_stats);
+ debugfs_remove(priv->dbg.ch_stats);
+ debugfs_remove(priv->dbg.cpu_stats);
+ debugfs_remove(priv->dbg.dir);
+}
+
+void dpaa2_eth_dbg_init(void)
+{
+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
+ if (!dpaa2_dbg_root) {
+ pr_err("DPAA2-ETH: debugfs create failed\n");
+ return;
+ }
+
+ pr_debug("DPAA2-ETH: debugfs created\n");
+}
+
+void dpaa2_eth_dbg_exit(void)
+{
+ debugfs_remove(dpaa2_dbg_root);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
new file mode 100644
index 000000000000..4f63de997a26
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ */
+#ifndef DPAA2_ETH_DEBUGFS_H
+#define DPAA2_ETH_DEBUGFS_H
+
+#include <linux/dcache.h>
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_debugfs {
+ struct dentry *dir;
+ struct dentry *fq_stats;
+ struct dentry *ch_stats;
+ struct dentry *cpu_stats;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void dpaa2_eth_dbg_init(void);
+void dpaa2_eth_dbg_exit(void);
+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
+#else
+static inline void dpaa2_eth_dbg_init(void) {}
+static inline void dpaa2_eth_dbg_exit(void) {}
+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* DPAA2_ETH_DEBUGFS_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 1ca9a18139ec..2ba49e959c3f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
- skb_free_frag(sg_vaddr);
+ free_pages((unsigned long)sg_vaddr, 0);
if (dpaa2_sg_is_final(&sgt[i]))
break;
}
free_buf:
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
}
/* Build a linear skb based on a single-buffer frame descriptor */
@@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
ch->buf_count--;
- skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
if (unlikely(!skb))
return NULL;
@@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
- dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
if (i == 0) {
/* We build the skb around the first data buffer */
- skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
if (unlikely(!skb)) {
/* Free the first SG entry now, since we already
* unmapped it and obtained the virtual address
*/
- skb_free_frag(sg_vaddr);
+ free_pages((unsigned long)sg_vaddr, 0);
/* We still need to subtract the buffers used
* by this FD from our software counter
@@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- skb_free_frag(vaddr);
+ dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, 0);
}
}
@@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
fq = &priv->fq[queue_id];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
- priv->tx_qdid, 0,
- fq->tx_qdbin, fd);
+ err = priv->enqueue(priv, fq, fd, 0);
if (err != -EBUSY)
break;
}
@@ -298,6 +296,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
xdp_set_data_meta_invalid(&xdp);
+ xdp.rxq = &ch->xdp_rxq;
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -330,8 +329,20 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
xdp_release_buf(priv, ch, addr);
ch->stats.xdp_drop++;
break;
+ case XDP_REDIRECT:
+ dma_unmap_page(priv->net_dev->dev.parent, addr,
+ DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+ ch->buf_count--;
+ xdp.data_hard_start = vaddr;
+ err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+ if (unlikely(err))
+ ch->stats.xdp_drop++;
+ else
+ ch->stats.xdp_redirect++;
+ break;
}
+ ch->xdp.res |= xdp_act;
out:
rcu_read_unlock();
return xdp_act;
@@ -378,16 +389,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
percpu_extras->rx_sg_frames++;
percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
} else {
@@ -573,10 +584,11 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
* all of them on Tx Conf.
*/
swa = (struct dpaa2_eth_swa *)sgt_buf;
- swa->skb = skb;
- swa->scl = scl;
- swa->num_sg = num_sg;
- swa->sgt_size = sgt_buf_size;
+ swa->type = DPAA2_ETH_SWA_SG;
+ swa->sg.skb = skb;
+ swa->sg.scl = scl;
+ swa->sg.num_sg = num_sg;
+ swa->sg.sgt_size = sgt_buf_size;
/* Separately map the SGT buffer */
addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
@@ -611,7 +623,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
{
struct device *dev = priv->net_dev->dev.parent;
u8 *buffer_start, *aligned_start;
- struct sk_buff **skbh;
+ struct dpaa2_eth_swa *swa;
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
@@ -628,8 +640,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* (in the private data area) such that we can release it
* on Tx confirm
*/
- skbh = (struct sk_buff **)buffer_start;
- *skbh = skb;
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+ swa->type = DPAA2_ETH_SWA_SINGLE;
+ swa->single.skb = skb;
addr = dma_map_single(dev, buffer_start,
skb_tail_pointer(skb) - buffer_start,
@@ -657,47 +670,65 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* dpaa2_eth_tx().
*/
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
- const struct dpaa2_fd *fd)
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr;
- struct sk_buff **skbh, *skb;
+ struct sk_buff *skb = NULL;
unsigned char *buffer_start;
struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
+ u32 fd_len = dpaa2_fd_get_len(fd);
fd_addr = dpaa2_fd_get_addr(fd);
- skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+ buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+ swa = (struct dpaa2_eth_swa *)buffer_start;
if (fd_format == dpaa2_fd_single) {
- skb = *skbh;
- buffer_start = (unsigned char *)skbh;
- /* Accessing the skb buffer is safe before dma unmap, because
- * we didn't map the actual skb shell.
- */
- dma_unmap_single(dev, fd_addr,
- skb_tail_pointer(skb) - buffer_start,
- DMA_BIDIRECTIONAL);
+ if (swa->type == DPAA2_ETH_SWA_SINGLE) {
+ skb = swa->single.skb;
+ /* Accessing the skb buffer is safe before dma unmap,
+ * because we didn't map the actual skb shell.
+ */
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_BIDIRECTIONAL);
+ } else {
+ WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
+ dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
+ DMA_BIDIRECTIONAL);
+ }
} else if (fd_format == dpaa2_fd_sg) {
- swa = (struct dpaa2_eth_swa *)skbh;
- skb = swa->skb;
+ skb = swa->sg.skb;
/* Unmap the scatterlist */
- dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
- kfree(swa->scl);
+ dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
+ DMA_BIDIRECTIONAL);
+ kfree(swa->sg.scl);
/* Unmap the SGT buffer */
- dma_unmap_single(dev, fd_addr, swa->sgt_size,
+ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
DMA_BIDIRECTIONAL);
} else {
netdev_dbg(priv->net_dev, "Invalid FD format\n");
return;
}
+ if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
+ fq->dq_frames++;
+ fq->dq_bytes += fd_len;
+ }
+
+ if (swa->type == DPAA2_ETH_SWA_XDP) {
+ xdp_return_frame(swa->xdp.xdpf);
+ return;
+ }
+
/* Get the timestamp value */
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
struct skb_shared_hwtstamps shhwtstamps;
- __le64 *ts = dpaa2_get_ts(skbh, true);
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
u64 ns;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -709,10 +740,10 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
/* Free SGT buffer allocated on tx */
if (fd_format != dpaa2_fd_single)
- skb_free_frag(skbh);
+ skb_free_frag(buffer_start);
/* Move on with skb release */
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, in_napi);
}
static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
@@ -785,9 +816,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
queue_mapping = skb_get_queue_mapping(skb);
fq = &priv->fq[queue_mapping];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
- priv->tx_qdid, 0,
- fq->tx_qdbin, &fd);
+ err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
break;
}
@@ -795,7 +824,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, &fd);
+ free_tx_fd(priv, fq, &fd, false);
} else {
fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
@@ -832,12 +861,9 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras->tx_conf_frames++;
percpu_extras->tx_conf_bytes += fd_len;
- fq->dq_frames++;
- fq->dq_bytes += fd_len;
-
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- free_tx_fd(priv, fd);
+ free_tx_fd(priv, fq, fd, true);
if (likely(!fd_errors))
return;
@@ -903,7 +929,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
- void *buf;
+ struct page *page;
dma_addr_t addr;
int i, err;
@@ -911,14 +937,16 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* Allocate buffer visible to WRIOP + skb shared info +
* alignment padding
*/
- buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
- if (unlikely(!buf))
+ /* allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page)
goto err_alloc;
- buf = PTR_ALIGN(buf, priv->rx_buf_align);
-
- addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@@ -926,7 +954,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
- buf, dpaa2_eth_buf_raw_size(priv),
+ page, DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, DPAA2_ETH_RX_BUF_SIZE,
bpid);
}
@@ -948,7 +976,7 @@ release_bufs:
return i;
err_map:
- skb_free_frag(buf);
+ __free_pages(page, 0);
err_alloc:
/* If we managed to allocate at least some buffers,
* release them to hardware
@@ -1083,6 +1111,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
+ ch->xdp.res = 0;
priv = ch->priv;
do {
@@ -1128,7 +1157,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
work_done = max(rx_cleaned, 1);
out:
- if (txc_fq) {
+ if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
txc_fq->dq_bytes);
@@ -1136,6 +1165,9 @@ out:
txc_fq->dq_bytes = 0;
}
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush_map();
+
return work_done;
}
@@ -1243,34 +1275,36 @@ enable_err:
return err;
}
-/* The DPIO store must be empty when we call this,
- * at the end of every NAPI cycle.
- */
-static u32 drain_channel(struct dpaa2_eth_channel *ch)
+/* Total number of in-flight frames on ingress queues */
+static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
{
- u32 drained = 0, total = 0;
+ struct dpaa2_eth_fq *fq;
+ u32 fcnt = 0, bcnt = 0, total = 0;
+ int i, err;
- do {
- pull_channel(ch);
- drained = consume_frames(ch, NULL);
- total += drained;
- } while (drained);
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(priv->net_dev, "query_fq_count failed");
+ break;
+ }
+ total += fcnt;
+ }
return total;
}
-static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
+static void wait_for_fq_empty(struct dpaa2_eth_priv *priv)
{
- struct dpaa2_eth_channel *ch;
- int i;
- u32 drained = 0;
-
- for (i = 0; i < priv->num_channels; i++) {
- ch = priv->channel[i];
- drained += drain_channel(ch);
- }
+ int retries = 10;
+ u32 pending;
- return drained;
+ do {
+ pending = ingress_fq_count(priv);
+ if (pending)
+ msleep(100);
+ } while (pending && --retries);
}
static int dpaa2_eth_stop(struct net_device *net_dev)
@@ -1278,14 +1312,22 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int dpni_enabled = 0;
int retries = 10;
- u32 drained;
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
- /* Loop while dpni_disable() attempts to drain the egress FQs
- * and confirm them back to us.
+ /* On dpni_disable(), the MC firmware will:
+ * - stop MAC Rx and wait for all Rx frames to be enqueued to software
+ * - cut off WRIOP dequeues from egress FQs and wait until transmission
+ * of all in flight Tx frames is finished (and corresponding Tx conf
+ * frames are enqueued back to software)
+ *
+ * Before calling dpni_disable(), we wait for all Tx frames to arrive
+ * on WRIOP. After it finishes, wait until all remaining frames on Rx
+ * and Tx conf queues are consumed on NAPI poll.
*/
+ msleep(500);
+
do {
dpni_disable(priv->mc_io, 0, priv->mc_token);
dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
@@ -1300,19 +1342,9 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
*/
}
- /* Wait for NAPI to complete on every core and disable it.
- * In particular, this will also prevent NAPI from being rescheduled if
- * a new CDAN is serviced, effectively discarding the CDAN. We therefore
- * don't even need to disarm the channels, except perhaps for the case
- * of a huge coalescing value.
- */
+ wait_for_fq_empty(priv);
disable_ch_napi(priv);
- /* Manually drain the Rx and TxConf queues */
- drained = drain_ingress_frames(priv);
- if (drained)
- netdev_dbg(net_dev, "Drained %d frames.\n", drained);
-
/* Empty the buffer pool */
drain_pool(priv);
@@ -1730,6 +1762,105 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return 0;
}
+static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
+ struct xdp_frame *xdpf)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ unsigned int needed_headroom;
+ struct dpaa2_eth_swa *swa;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd fd;
+ void *buffer_start, *aligned_start;
+ dma_addr_t addr;
+ int err, i;
+
+ /* We require a minimum headroom to be able to transmit the frame.
+ * Otherwise return an error and let the original net_device handle it
+ */
+ needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
+ if (xdpf->headroom < needed_headroom)
+ return -EINVAL;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ /* Setup the FD fields */
+ memset(&fd, 0, sizeof(fd));
+
+ /* Align FD address, if possible */
+ buffer_start = xdpf->data - needed_headroom;
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= xdpf->data - xdpf->headroom)
+ buffer_start = aligned_start;
+
+ swa = (struct dpaa2_eth_swa *)buffer_start;
+ /* fill in necessary fields here */
+ swa->type = DPAA2_ETH_SWA_XDP;
+ swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
+ swa->xdp.xdpf = xdpf;
+
+ addr = dma_map_single(dev, buffer_start,
+ swa->xdp.dma_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ percpu_stats->tx_dropped++;
+ return -ENOMEM;
+ }
+
+ dpaa2_fd_set_addr(&fd, addr);
+ dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
+ dpaa2_fd_set_len(&fd, xdpf->len);
+ dpaa2_fd_set_format(&fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
+
+ fq = &priv->fq[smp_processor_id()];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+ err = priv->enqueue(priv, fq, &fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+ percpu_extras->tx_portal_busy += i;
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ /* let the Rx device handle the cleanup */
+ return err;
+ }
+
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+
+ return 0;
+}
+
+static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ int drops = 0;
+ int i, err;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!netif_running(net_dev))
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
+ if (err) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ return n - drops;
+}
+
static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx,
@@ -1741,6 +1872,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_do_ioctl = dpaa2_eth_ioctl,
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp,
+ .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
};
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -1902,7 +2034,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
/* Register the new context */
channel->dpio = dpaa2_io_service_select(i);
- err = dpaa2_io_service_register(channel->dpio, nctx);
+ err = dpaa2_io_service_register(channel->dpio, nctx, dev);
if (err) {
dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
/* If no affine DPIO for this core, there's probably
@@ -1942,7 +2074,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
return 0;
err_set_cdan:
- dpaa2_io_service_deregister(channel->dpio, nctx);
+ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
@@ -1962,13 +2094,14 @@ err_alloc_ch:
static void free_dpio(struct dpaa2_eth_priv *priv)
{
- int i;
+ struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_channel *ch;
+ int i;
/* deregister CDAN notifications and free channels */
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
+ dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
free_channel(priv, ch);
}
}
@@ -2134,6 +2267,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_buffer_layout buf_layout = {0};
+ u16 rx_buf_align;
int err;
/* We need to check for WRIOP version 1.0.0, but depending on the MC
@@ -2142,9 +2276,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
*/
if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
- priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
else
- priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
@@ -2184,7 +2318,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
/* rx buffer */
buf_layout.pass_frame_status = true;
buf_layout.pass_parser_result = true;
- buf_layout.data_align = priv->rx_buf_align;
+ buf_layout.data_align = rx_buf_align;
buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
buf_layout.private_data_size = 0;
buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
@@ -2202,6 +2336,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
return 0;
}
+#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
+#define DPNI_ENQUEUE_FQID_VER_MINOR 9
+
+static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio)
+{
+ return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, prio,
+ fq->tx_qdbin, fd);
+}
+
+static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd,
+ u8 prio __always_unused)
+{
+ return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
+ fq->tx_fqid, fd);
+}
+
+static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
+{
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+ else
+ priv->enqueue = dpaa2_eth_enqueue_fq;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2255,6 +2419,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
if (err)
goto close;
+ set_enqueue_mode(priv);
+
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules)
@@ -2302,9 +2468,14 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.type = DPNI_DEST_DPCON;
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
+ queue.flc.stash_control = 1;
+ queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
+ /* 01 01 00 - data, annotation, flow context */
+ queue.flc.value |= 0x14;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, 0, fq->flowid,
- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
+ DPNI_QUEUE_OPT_FLC,
&queue);
if (err) {
dev_err(dev, "dpni_set_queue(RX) failed\n");
@@ -2320,6 +2491,21 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
return err;
}
+ /* xdp_rxq setup */
+ err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
+ fq->flowid);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg failed\n");
+ return err;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ return err;
+ }
+
return 0;
}
@@ -2339,6 +2525,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
}
fq->tx_qdbin = qid.qdbin;
+ fq->tx_fqid = qid.fqid;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
@@ -3083,6 +3270,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_netdev_reg;
}
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_add(priv);
+#endif
+
dev_info(dev, "Probed interface %s\n", net_dev->name);
return 0;
@@ -3126,6 +3317,9 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
+#ifdef CONFIG_DEBUG_FS
+ dpaa2_dbg_remove(priv);
+#endif
unregister_netdev(net_dev);
if (priv->do_link_poll)
@@ -3170,4 +3364,25 @@ static struct fsl_mc_driver dpaa2_eth_driver = {
.match_id_table = dpaa2_eth_match_id_table
};
-module_fsl_mc_driver(dpaa2_eth_driver);
+static int __init dpaa2_eth_driver_init(void)
+{
+ int err;
+
+ dpaa2_eth_dbg_init();
+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
+ if (err) {
+ dpaa2_eth_dbg_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_eth_driver_exit(void)
+{
+ dpaa2_eth_dbg_exit();
+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
+}
+
+module_init(dpaa2_eth_driver_init);
+module_exit(dpaa2_eth_driver_exit);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 69c965de192b..7879622aa3e6 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -16,6 +16,7 @@
#include "dpni-cmd.h"
#include "dpaa2-eth-trace.h"
+#include "dpaa2-eth-debugfs.h"
#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
@@ -52,7 +53,8 @@
*/
#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
-#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
+#define DPAA2_ETH_REFILL_THRESH \
+ (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
/* Maximum number of buffers that can be acquired/released through a single
* QBMan command
@@ -62,9 +64,11 @@
/* Hardware requires alignment for ingress/egress buffer addresses */
#define DPAA2_ETH_TX_BUF_ALIGN 64
-#define DPAA2_ETH_RX_BUF_SIZE 2048
-#define DPAA2_ETH_SKB_SIZE \
- (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_ETH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_ETH_RX_BUF_SIZE \
+ (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
/* Hardware annotation area in RX/TX buffers */
#define DPAA2_ETH_RX_HWA_SIZE 64
@@ -85,12 +89,33 @@
*/
#define DPAA2_ETH_SWA_SIZE 64
+/* We store different information in the software annotation area of a Tx frame
+ * based on what type of frame it is
+ */
+enum dpaa2_eth_swa_type {
+ DPAA2_ETH_SWA_SINGLE,
+ DPAA2_ETH_SWA_SG,
+ DPAA2_ETH_SWA_XDP,
+};
+
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
struct dpaa2_eth_swa {
- struct sk_buff *skb;
- struct scatterlist *scl;
- int num_sg;
- int sgt_size;
+ enum dpaa2_eth_swa_type type;
+ union {
+ struct {
+ struct sk_buff *skb;
+ } single;
+ struct {
+ struct sk_buff *skb;
+ struct scatterlist *scl;
+ int num_sg;
+ int sgt_size;
+ } sg;
+ struct {
+ int dma_size;
+ struct xdp_frame *xdpf;
+ } xdp;
+ };
};
/* Annotation valid bits in FD FRC */
@@ -253,6 +278,7 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_drop;
__u64 xdp_tx;
__u64 xdp_tx_err;
+ __u64 xdp_redirect;
};
/* Maximum number of queues associated with a DPNI */
@@ -273,6 +299,7 @@ struct dpaa2_eth_priv;
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
+ u32 tx_fqid;
u16 flowid;
int target_cpu;
u32 dq_frames;
@@ -291,6 +318,7 @@ struct dpaa2_eth_ch_xdp {
struct bpf_prog *prog;
u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
int drop_cnt;
+ unsigned int res;
};
struct dpaa2_eth_channel {
@@ -305,6 +333,7 @@ struct dpaa2_eth_channel {
int buf_count;
struct dpaa2_eth_ch_stats stats;
struct dpaa2_eth_ch_xdp xdp;
+ struct xdp_rxq_info xdp_rxq;
};
struct dpaa2_eth_dist_fields {
@@ -325,6 +354,9 @@ struct dpaa2_eth_priv {
u8 num_fqs;
struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+ int (*enqueue)(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio);
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
@@ -342,7 +374,6 @@ struct dpaa2_eth_priv {
bool rx_tstamp; /* Rx timestamping enabled */
u16 tx_qdid;
- u16 rx_buf_align;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
* This is the cpu set on which Rx and Tx conf frames are processed
@@ -365,6 +396,9 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
struct bpf_prog *xdp_prog;
+#ifdef CONFIG_DEBUG_FS
+ struct dpaa2_debugfs dbg;
+#endif
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -405,26 +439,27 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
+/* We have exactly one {Rx, Tx conf} queue per channel */
+#define dpaa2_eth_queue_count(priv) \
+ ((priv)->num_channels)
+
enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_HASH,
DPAA2_ETH_RX_DIST_CLS
};
-/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
- * the buffer also needs space for its shared info struct, and we need
- * to allocate enough to accommodate hardware alignment restrictions
- */
-static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
-{
- return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
-}
-
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
{
unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+ /* If we don't have an skb (e.g. XDP buffer), we only need space for
+ * the software annotation area
+ */
+ if (!skb)
+ return headroom;
+
/* For non-linear skbs we have no headroom requirement, as we build a
* SG frame with a newly allocated SGT buffer
*/
@@ -443,14 +478,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
*/
static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
{
- return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
- DPAA2_ETH_RX_HWA_SIZE;
-}
-
-/* We have exactly one {Rx, Tx conf} queue per channel */
-static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
-{
- return priv->num_channels;
+ return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index a7389e722c49..591dfcf76adb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -48,6 +48,7 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] xdp drop",
"[drv] xdp tx",
"[drv] xdp tx errors",
+ "[drv] xdp redirect",
/* FQ stats */
"[qbman] rx pending frames",
"[qbman] rx pending bytes",
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
new file mode 100644
index 000000000000..8429f5c1d810
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC
+ tristate "ENETC PF driver"
+ depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ physical function (PF) devices, managing ENETC Ports at a privileged
+ level.
+
+ If compiled as module (M), the module name is fsl-enetc.
+
+config FSL_ENETC_VF
+ tristate "ENETC VF driver"
+ depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ virtual function (VF) devices enabled by the ENETC PF driver.
+
+ If compiled as module (M), the module name is fsl-enetc-vf.
+
+config FSL_ENETC_PTP_CLOCK
+ tristate "ENETC PTP clock driver"
+ depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF)
+ default y
+ help
+ This driver adds support for using the ENETC 1588 timer
+ as a PTP clock. This clock is only useful if your PTP
+ programs are getting hardware time stamps on the PTP Ethernet
+ packets using the SO_TIMESTAMPING API.
+
+ If compiled as module (M), the module name is fsl-enetc-ptp.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
new file mode 100644
index 000000000000..7139e414dccf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
+fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \
+ enetc_mdio.o
+fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
+fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
+
+obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+
+ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
+fsl-enetc-vf-objs := enetc_vf.o
+else
+fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
+ enetc_ethtool.o
+fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
+endif
+
+obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
+fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
new file mode 100644
index 000000000000..5bb9eb35d76d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -0,0 +1,1604 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/of_mdio.h>
+#include <linux/vmalloc.h>
+
+/* ENETC overhead: optional extension BD + 1 BD gap */
+#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
+/* max # of chained Tx BDs is 15, including head and extension BD */
+#define ENETC_MAX_SKB_FRAGS 13
+#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int count;
+
+ tx_ring = priv->tx_ring[skb->queue_mapping];
+
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ if (unlikely(!count))
+ goto drop_packet_err;
+
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ netif_stop_subqueue(ndev, tx_ring->index);
+
+ return NETDEV_TX_OK;
+
+drop_packet_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
+{
+ int l3_start, l3_hsize;
+ u16 l3_flags, l4_flags;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ l4_flags = ENETC_TXBD_L4_TCP;
+ break;
+ case offsetof(struct udphdr, check):
+ l4_flags = ENETC_TXBD_L4_UDP;
+ break;
+ default:
+ skb_checksum_help(skb);
+ return false;
+ }
+
+ l3_start = skb_network_offset(skb);
+ l3_hsize = skb_network_header_len(skb);
+
+ l3_flags = 0;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ l3_flags = ENETC_TXBD_L3_IPV6;
+
+ /* write BD fields */
+ txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
+ txbd->l4_csoff = l4_flags;
+
+ return true;
+}
+
+static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_dma_page)
+ dma_unmap_page(tx_ring->dev, tx_swbd->dma,
+ tx_swbd->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(tx_ring->dev, tx_swbd->dma,
+ tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->dma = 0;
+}
+
+static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->dma)
+ enetc_unmap_tx_buff(tx_ring, tx_swbd);
+
+ if (tx_swbd->skb) {
+ dev_kfree_skb_any(tx_swbd->skb);
+ tx_swbd->skb = NULL;
+ }
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct skb_frag_struct *frag;
+ int len = skb_headlen(skb);
+ union enetc_tx_bd temp_bd;
+ union enetc_tx_bd *txbd;
+ bool do_vlan, do_tstamp;
+ int i, count = 0;
+ unsigned int f;
+ dma_addr_t dma;
+ u8 flags = 0;
+
+ i = tx_ring->next_to_use;
+ txbd = ENETC_TXBD(*tx_ring, i);
+ prefetchw(txbd);
+
+ dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+ goto dma_err;
+
+ temp_bd.addr = cpu_to_le64(dma);
+ temp_bd.buf_len = cpu_to_le16(len);
+ temp_bd.lstatus = 0;
+
+ tx_swbd = &tx_ring->tx_swbd[i];
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ count++;
+
+ do_vlan = skb_vlan_tag_present(skb);
+ do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+
+ if (do_vlan || do_tstamp)
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ if (enetc_tx_csum(skb, &temp_bd))
+ flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
+
+ /* first BD needs frm_len and offload flags set */
+ temp_bd.frm_len = cpu_to_le16(skb->len);
+ temp_bd.flags = flags;
+
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ u8 e_flags = 0;
+ *txbd = temp_bd;
+ enetc_clear_tx_bd(&temp_bd);
+
+ /* add extension BD for VLAN and/or timestamping */
+ flags = 0;
+ tx_swbd++;
+ txbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ txbd = ENETC_TXBD(*tx_ring, 0);
+ }
+ prefetchw(txbd);
+
+ if (do_vlan) {
+ temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ temp_bd.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ if (do_tstamp) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
+ }
+
+ temp_bd.ext.e_flags = e_flags;
+ count++;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_err;
+
+ *txbd = temp_bd;
+ enetc_clear_tx_bd(&temp_bd);
+
+ flags = 0;
+ tx_swbd++;
+ txbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ txbd = ENETC_TXBD(*tx_ring, 0);
+ }
+ prefetchw(txbd);
+
+ temp_bd.addr = cpu_to_le64(dma);
+ temp_bd.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ count++;
+ }
+
+ /* last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ temp_bd.flags = flags;
+ *txbd = temp_bd;
+
+ tx_ring->tx_swbd[i].skb = skb;
+
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+
+ /* let H/W know BD ring has been updated */
+ enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
+
+ return count;
+
+dma_err:
+ dev_err(tx_ring->dev, "DMA map error");
+
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_skb(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
+static irqreturn_t enetc_msix(int irq, void *data)
+{
+ struct enetc_int_vector *v = data;
+ int i;
+
+ /* disable interrupts */
+ enetc_wr_reg(v->rbier, 0);
+
+ for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
+
+ napi_schedule_irqoff(&v->napi);
+
+ return IRQ_HANDLED;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit);
+
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+ struct enetc_int_vector
+ *v = container_of(napi, struct enetc_int_vector, napi);
+ bool complete = true;
+ int work_done;
+ int i;
+
+ for (i = 0; i < v->count_tx_rings; i++)
+ if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ complete = false;
+
+ work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
+ if (work_done == budget)
+ complete = false;
+
+ if (!complete)
+ return budget;
+
+ napi_complete_done(napi, work_done);
+
+ /* enable interrupts */
+ enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
+
+ for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ return work_done;
+}
+
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+{
+ int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+
+ return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
+
+static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+{
+ struct net_device *ndev = tx_ring->ndev;
+ int tx_frm_cnt = 0, tx_byte_cnt = 0;
+ struct enetc_tx_swbd *tx_swbd;
+ int i, bds_to_clean;
+
+ i = tx_ring->next_to_clean;
+ tx_swbd = &tx_ring->tx_swbd[i];
+ bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+
+ while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
+ bool is_eof = !!tx_swbd->skb;
+
+ enetc_unmap_tx_buff(tx_ring, tx_swbd);
+ if (is_eof) {
+ napi_consume_skb(tx_swbd->skb, napi_budget);
+ tx_swbd->skb = NULL;
+ }
+
+ tx_byte_cnt += tx_swbd->len;
+
+ bds_to_clean--;
+ tx_swbd++;
+ i++;
+ if (unlikely(i == tx_ring->bd_count)) {
+ i = 0;
+ tx_swbd = tx_ring->tx_swbd;
+ }
+
+ /* BD iteration loop end */
+ if (is_eof) {
+ tx_frm_cnt++;
+ /* re-arm interrupt source */
+ enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
+ BIT(16 + tx_ring->index));
+ }
+
+ if (unlikely(!bds_to_clean))
+ bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ }
+
+ tx_ring->next_to_clean = i;
+ tx_ring->stats.packets += tx_frm_cnt;
+ tx_ring->stats.bytes += tx_byte_cnt;
+
+ if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
+ __netif_subqueue_stopped(ndev, tx_ring->index) &&
+ (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ netif_wake_subqueue(ndev, tx_ring->index);
+ }
+
+ return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
+}
+
+static bool enetc_new_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
+ struct page *page;
+ dma_addr_t addr;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
+
+ addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
+ __free_page(page);
+
+ return false;
+ }
+
+ rx_swbd->dma = addr;
+ rx_swbd->page = page;
+ rx_swbd->page_offset = ENETC_RXB_PAD;
+
+ return true;
+}
+
+static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
+{
+ struct enetc_rx_swbd *rx_swbd;
+ union enetc_rx_bd *rxbd;
+ int i, j;
+
+ i = rx_ring->next_to_use;
+ rx_swbd = &rx_ring->rx_swbd[i];
+ rxbd = ENETC_RXBD(*rx_ring, i);
+
+ for (j = 0; j < buff_cnt; j++) {
+ /* try reuse page */
+ if (unlikely(!rx_swbd->page)) {
+ if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
+ rx_ring->stats.rx_alloc_errs++;
+ break;
+ }
+ }
+
+ /* update RxBD */
+ rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
+ rx_swbd->page_offset);
+ /* clear 'R" as well */
+ rxbd->r.lstatus = 0;
+
+ rx_swbd++;
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rx_swbd = rx_ring->rx_swbd;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+ }
+
+ if (likely(j)) {
+ rx_ring->next_to_alloc = i; /* keep track from page reuse */
+ rx_ring->next_to_use = i;
+ /* update ENETC's consumer index */
+ enetc_wr_reg(rx_ring->rcir, i);
+ }
+
+ return j;
+}
+
+static void enetc_get_offloads(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd *rxbd, struct sk_buff *skb)
+{
+ /* TODO: add tstamp, hashing */
+ if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
+ u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
+
+ skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ /* copy VLAN to skb, if one is extracted, for now we assume it's a
+ * standard TPID, but HW also supports custom values
+ */
+ if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxbd->r.vlan_opt));
+}
+
+static void enetc_process_skb(struct enetc_bdr *rx_ring,
+ struct sk_buff *skb)
+{
+ skb_record_rx_queue(skb, rx_ring->index);
+ skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+}
+
+static bool enetc_page_reusable(struct page *page)
+{
+ return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
+}
+
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *old)
+{
+ struct enetc_rx_swbd *new;
+
+ new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+ /* next buf that may reuse a page */
+ enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+ /* copy page reference */
+ *new = *old;
+}
+
+static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
+ int i, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ size, DMA_FROM_DEVICE);
+ return rx_swbd;
+}
+
+static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
+ if (likely(enetc_page_reusable(rx_swbd->page))) {
+ rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
+ page_ref_inc(rx_swbd->page);
+
+ enetc_reuse_page(rx_ring, rx_swbd);
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ ENETC_RXB_DMA_SIZE,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ rx_swbd->page = NULL;
+}
+
+static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
+ int i, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ struct sk_buff *skb;
+ void *ba;
+
+ ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
+ skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
+ if (unlikely(!skb)) {
+ rx_ring->stats.rx_alloc_errs++;
+ return NULL;
+ }
+
+ skb_reserve(skb, ENETC_RXB_PAD);
+ __skb_put(skb, size);
+
+ enetc_put_rx_buff(rx_ring, rx_swbd);
+
+ return skb;
+}
+
+static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
+ u16 size, struct sk_buff *skb)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
+ rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
+
+ enetc_put_rx_buff(rx_ring, rx_swbd);
+}
+
+#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
+
+static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit)
+{
+ int rx_frm_cnt = 0, rx_byte_cnt = 0;
+ int cleaned_cnt, i;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd;
+ struct sk_buff *skb;
+ u32 bd_status;
+ u16 size;
+
+ if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
+ int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+
+ cleaned_cnt -= count;
+ }
+
+ rxbd = ENETC_RXBD(*rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
+ dma_rmb(); /* for reading other rxbd fields */
+ size = le16_to_cpu(rxbd->r.buf_len);
+ skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+ if (!skb)
+ break;
+
+ enetc_get_offloads(rx_ring, rxbd, skb);
+
+ cleaned_cnt++;
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+
+ if (unlikely(bd_status &
+ ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+ dev_kfree_skb(skb);
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ dma_rmb();
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+ }
+
+ rx_ring->ndev->stats.rx_dropped++;
+ rx_ring->ndev->stats.rx_errors++;
+
+ break;
+ }
+
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ size = ENETC_RXB_DMA_SIZE;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu(rxbd->r.buf_len);
+ }
+
+ enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
+
+ cleaned_cnt++;
+ rxbd++;
+ i++;
+ if (unlikely(i == rx_ring->bd_count)) {
+ i = 0;
+ rxbd = ENETC_RXBD(*rx_ring, 0);
+ }
+ }
+
+ rx_byte_cnt += skb->len;
+
+ enetc_process_skb(rx_ring, skb);
+
+ napi_gro_receive(napi, skb);
+
+ rx_frm_cnt++;
+ }
+
+ rx_ring->next_to_clean = i;
+
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
+
+ return rx_frm_cnt;
+}
+
+/* Probing and Init */
+#define ENETC_MAX_RFS_SIZE 64
+void enetc_get_si_caps(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ /* find out how many of various resources we have to work with */
+ val = enetc_rd(hw, ENETC_SICAPR0);
+ si->num_rx_rings = (val >> 16) & 0xff;
+ si->num_tx_rings = val & 0xff;
+
+ val = enetc_rd(hw, ENETC_SIRFSCAPR);
+ si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+ si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+
+ si->num_rss = 0;
+ val = enetc_rd(hw, ENETC_SIPCAPR0);
+ if (val & ENETC_SIPCAPR0_RSS) {
+ val = enetc_rd(hw, ENETC_SIRSSCAPR);
+ si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
+ }
+}
+
+static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+{
+ r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
+ &r->bd_dma_base, GFP_KERNEL);
+ if (!r->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(r->bd_dma_base, 128)) {
+ dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
+ r->bd_dma_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+{
+ int err;
+
+ txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
+ if (!txr->tx_swbd)
+ return -ENOMEM;
+
+ err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+ if (err) {
+ vfree(txr->tx_swbd);
+ return err;
+ }
+
+ txr->next_to_clean = 0;
+ txr->next_to_use = 0;
+
+ return 0;
+}
+
+static void enetc_free_txbdr(struct enetc_bdr *txr)
+{
+ int size, i;
+
+ for (i = 0; i < txr->bd_count; i++)
+ enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+
+ size = txr->bd_count * sizeof(union enetc_tx_bd);
+
+ dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+}
+
+static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+{
+ int i, err;
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ err = enetc_alloc_txbdr(priv->tx_ring[i]);
+
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i-- > 0)
+ enetc_free_txbdr(priv->tx_ring[i]);
+
+ return err;
+}
+
+static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_free_txbdr(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
+{
+ int err;
+
+ rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
+ if (!rxr->rx_swbd)
+ return -ENOMEM;
+
+ err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
+ if (err) {
+ vfree(rxr->rx_swbd);
+ return err;
+ }
+
+ rxr->next_to_clean = 0;
+ rxr->next_to_use = 0;
+ rxr->next_to_alloc = 0;
+
+ return 0;
+}
+
+static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+{
+ int size;
+
+ size = rxr->bd_count * sizeof(union enetc_rx_bd);
+
+ dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
+ rxr->bd_base = NULL;
+
+ vfree(rxr->rx_swbd);
+ rxr->rx_swbd = NULL;
+}
+
+static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+{
+ int i, err;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ err = enetc_alloc_rxbdr(priv->rx_ring[i]);
+
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i-- > 0)
+ enetc_free_rxbdr(priv->rx_ring[i]);
+
+ return err;
+}
+
+static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_free_rxbdr(priv->rx_ring[i]);
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int i;
+
+ if (!tx_ring->tx_swbd)
+ return;
+
+ for (i = 0; i < tx_ring->bd_count; i++) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_skb(tx_ring, tx_swbd);
+ }
+
+ tx_ring->next_to_clean = 0;
+ tx_ring->next_to_use = 0;
+}
+
+static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
+{
+ int i;
+
+ if (!rx_ring->rx_swbd)
+ return;
+
+ for (i = 0; i < rx_ring->bd_count; i++) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
+
+ if (!rx_swbd->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(rx_swbd->page);
+ rx_swbd->page = NULL;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_alloc = 0;
+}
+
+static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_free_rx_ring(priv->rx_ring[i]);
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_free_tx_ring(priv->tx_ring[i]);
+}
+
+static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+ int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+ cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
+ GFP_KERNEL);
+ if (!cbdr->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
+ dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+ return -EINVAL;
+ }
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+
+ return 0;
+}
+
+static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+{
+ int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+ dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
+ cbdr->bd_base = NULL;
+}
+
+static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
+{
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
+
+ enetc_wr(hw, ENETC_SICBDRPIR, 0);
+ enetc_wr(hw, ENETC_SICBDRCIR, 0);
+
+ /* enable ring */
+ enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
+
+ cbdr->pir = hw->reg + ENETC_SICBDRPIR;
+ cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+}
+
+static void enetc_clear_cbdr(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC_SICBDRMR, 0);
+}
+
+static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
+{
+ int *rss_table;
+ int i;
+
+ rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ /* Set up RSS table defaults */
+ for (i = 0; i < si->num_rss; i++)
+ rss_table[i] = i % num_groups;
+
+ enetc_set_rss_table(si, rss_table, si->num_rss);
+
+ kfree(rss_table);
+
+ return 0;
+}
+
+static int enetc_configure_si(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ int err;
+
+ enetc_setup_cbdr(hw, &si->cbd_ring);
+ /* set SI cache attributes */
+ enetc_wr(hw, ENETC_SICAR0,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+ enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
+ /* enable SI */
+ enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+
+ if (si->num_rss) {
+ err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ int cpus = num_online_cpus();
+
+ priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+ priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+
+ /* Enable all available TX rings in order to configure as many
+ * priorities as possible, when needed.
+ * TODO: Make # of TX rings run-time configurable
+ */
+ priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
+ priv->num_tx_rings = si->num_tx_rings;
+ priv->bdr_int_num = cpus;
+
+ /* SI specific */
+ si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
+}
+
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+ int err;
+
+ err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
+ if (err)
+ return err;
+
+ priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
+ GFP_KERNEL);
+ if (!priv->cls_rules) {
+ err = -ENOMEM;
+ goto err_alloc_cls;
+ }
+
+ err = enetc_configure_si(priv);
+ if (err)
+ goto err_config_si;
+
+ return 0;
+
+err_config_si:
+ kfree(priv->cls_rules);
+err_alloc_cls:
+ enetc_clear_cbdr(&si->hw);
+ enetc_free_cbdr(priv->dev, &si->cbd_ring);
+
+ return err;
+}
+
+void enetc_free_si_resources(struct enetc_ndev_priv *priv)
+{
+ struct enetc_si *si = priv->si;
+
+ enetc_clear_cbdr(&si->hw);
+ enetc_free_cbdr(priv->dev, &si->cbd_ring);
+
+ kfree(priv->cls_rules);
+}
+
+static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ u32 tbmr;
+
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
+ lower_32_bits(tx_ring->bd_dma_base));
+
+ enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
+ upper_32_bits(tx_ring->bd_dma_base));
+
+ WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
+ enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
+ ENETC_RTBLENR_LEN(tx_ring->bd_count));
+
+ /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
+ tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
+ tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
+
+ /* enable Tx ints by setting pkt thr to 1 */
+ enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
+
+ tbmr = ENETC_TBMR_EN;
+ if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tbmr |= ENETC_TBMR_VIH;
+
+ /* enable ring */
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+
+ tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
+ tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
+ tx_ring->idr = hw->reg + ENETC_SITXIDR;
+}
+
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+ u32 rbmr;
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
+ lower_32_bits(rx_ring->bd_dma_base));
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
+ upper_32_bits(rx_ring->bd_dma_base));
+
+ WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
+ ENETC_RTBLENR_LEN(rx_ring->bd_count));
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+
+ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+
+ /* enable Rx ints by setting pkt thr to 1 */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
+
+ rbmr = ENETC_RBMR_EN;
+ if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rbmr |= ENETC_RBMR_VTE;
+
+ rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
+ rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+
+ /* enable ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+}
+
+static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+
+ /* disable EN bit on ring */
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
+}
+
+static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int delay = 8, timeout = 100;
+ int idx = tx_ring->index;
+
+ /* disable EN bit on ring */
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+
+ /* wait for busy to clear */
+ while (delay < timeout &&
+ enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
+ msleep(delay);
+ delay *= 2;
+ }
+
+ if (delay >= timeout)
+ netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
+ idx);
+}
+
+static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+
+ udelay(1);
+}
+
+static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ cpumask_t cpu_mask;
+ int i, j, err;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+ struct enetc_int_vector *v = priv->int_vector[i];
+ int entry = ENETC_BDR_INT_BASE_IDX + i;
+ struct enetc_hw *hw = &priv->si->hw;
+
+ snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
+ priv->ndev->name, i);
+ err = request_irq(irq, enetc_msix, 0, v->name, v);
+ if (err) {
+ dev_err(priv->dev, "request_irq() failed!\n");
+ goto irq_err;
+ }
+
+ v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
+ v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
+
+ enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ int idx = v->tx_ring[j].index;
+
+ enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(irq, &cpu_mask);
+ }
+
+ return 0;
+
+irq_err:
+ while (i--) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, priv->int_vector[i]);
+ }
+
+ return err;
+}
+
+static void enetc_free_irqs(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ int i;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, priv->int_vector[i]);
+ }
+}
+
+static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ /* enable Tx & Rx event indication */
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ enetc_rxbdr_wr(&priv->si->hw, i,
+ ENETC_RBIER, ENETC_RBIER_RXTIE);
+ }
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ enetc_txbdr_wr(&priv->si->hw, i,
+ ENETC_TBIER, ENETC_TBIER_TXTIE);
+ }
+}
+
+static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+}
+
+static void adjust_link(struct net_device *ndev)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ phy_print_status(phydev);
+}
+
+static int enetc_phy_connect(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev;
+
+ if (!priv->phy_node)
+ return 0; /* phy-less mode */
+
+ phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
+ 0, priv->if_mode);
+ if (!phydev) {
+ dev_err(&ndev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+int enetc_open(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i, err;
+
+ err = enetc_setup_irqs(priv);
+ if (err)
+ return err;
+
+ err = enetc_phy_connect(ndev);
+ if (err)
+ goto err_phy_connect;
+
+ err = enetc_alloc_tx_resources(priv);
+ if (err)
+ goto err_alloc_tx;
+
+ err = enetc_alloc_rx_resources(priv);
+ if (err)
+ goto err_alloc_rx;
+
+ enetc_setup_bdrs(priv);
+
+ err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
+ if (err)
+ goto err_set_queues;
+
+ for (i = 0; i < priv->bdr_int_num; i++)
+ napi_enable(&priv->int_vector[i]->napi);
+
+ enetc_enable_interrupts(priv);
+
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
+ else
+ netif_carrier_on(ndev);
+
+ netif_tx_start_all_queues(ndev);
+
+ return 0;
+
+err_set_queues:
+ enetc_free_rx_resources(priv);
+err_alloc_rx:
+ enetc_free_tx_resources(priv);
+err_alloc_tx:
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+err_phy_connect:
+ enetc_free_irqs(priv);
+
+ return err;
+}
+
+int enetc_close(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ netif_tx_stop_all_queues(ndev);
+
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ } else {
+ netif_carrier_off(ndev);
+ }
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ napi_synchronize(&priv->int_vector[i]->napi);
+ napi_disable(&priv->int_vector[i]->napi);
+ }
+
+ enetc_disable_interrupts(priv);
+ enetc_clear_bdrs(priv);
+
+ enetc_free_rxtx_rings(priv);
+ enetc_free_rx_resources(priv);
+ enetc_free_tx_resources(priv);
+ enetc_free_irqs(priv);
+
+ return 0;
+}
+
+struct net_device_stats *enetc_get_stats(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned long packets = 0, bytes = 0;
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ packets += priv->rx_ring[i]->stats.packets;
+ bytes += priv->rx_ring[i]->stats.bytes;
+ }
+
+ stats->rx_packets = packets;
+ stats->rx_bytes = bytes;
+ bytes = 0;
+ packets = 0;
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ packets += priv->tx_ring[i]->stats.packets;
+ bytes += priv->tx_ring[i]->stats.bytes;
+ }
+
+ stats->tx_packets = packets;
+ stats->tx_bytes = bytes;
+
+ return stats;
+}
+
+static int enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+
+ return 0;
+}
+
+int enetc_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+
+ if (changed & NETIF_F_RXHASH)
+ enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+
+ return 0;
+}
+
+int enetc_alloc_msix(struct enetc_ndev_priv *priv)
+{
+ struct pci_dev *pdev = priv->si->pdev;
+ int size, v_tx_rings;
+ int i, n, err, nvec;
+
+ nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
+ /* allocate MSIX for both messaging and Rx/Tx interrupts */
+ n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+
+ if (n < 0)
+ return n;
+
+ if (n != nvec)
+ return -EPERM;
+
+ /* # of tx rings per int vector */
+ v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+ size = sizeof(struct enetc_int_vector) +
+ sizeof(struct enetc_bdr) * v_tx_rings;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ struct enetc_int_vector *v;
+ struct enetc_bdr *bdr;
+ int j;
+
+ v = kzalloc(size, GFP_KERNEL);
+ if (!v) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ priv->int_vector[i] = v;
+
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll,
+ NAPI_POLL_WEIGHT);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
+ idx = 2 * j + i; /* 2 CPUs */
+ else
+ idx = j + i * v_tx_rings; /* default */
+
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ priv->rx_ring[i] = bdr;
+ }
+
+ return 0;
+
+fail:
+ while (i--) {
+ netif_napi_del(&priv->int_vector[i]->napi);
+ kfree(priv->int_vector[i]);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ return err;
+}
+
+void enetc_free_msix(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ struct enetc_int_vector *v = priv->int_vector[i];
+
+ netif_napi_del(&v->napi);
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ priv->rx_ring[i] = NULL;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ priv->tx_ring[i] = NULL;
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ kfree(priv->int_vector[i]);
+ priv->int_vector[i] = NULL;
+ }
+
+ /* disable all MSIX for this device */
+ pci_free_irq_vectors(priv->si->pdev);
+}
+
+static void enetc_kfree_si(struct enetc_si *si)
+{
+ char *p = (char *)si - si->pad;
+
+ kfree(p);
+}
+
+static void enetc_detect_errata(struct enetc_si *si)
+{
+ if (si->pdev->revision == ENETC_REV1)
+ si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
+ ENETC_ERR_UCMCSWP;
+}
+
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
+{
+ struct enetc_si *si, *p;
+ struct enetc_hw *hw;
+ size_t alloc_size;
+ int err, len;
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "device enable failed\n");
+ return err;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+ }
+
+ err = pci_request_mem_regions(pdev, name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+ goto err_pci_mem_reg;
+ }
+
+ pci_set_master(pdev);
+
+ alloc_size = sizeof(struct enetc_si);
+ if (sizeof_priv) {
+ /* align priv to 32B */
+ alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
+ alloc_size += sizeof_priv;
+ }
+ /* force 32B alignment for enetc_si */
+ alloc_size += ENETC_SI_ALIGN - 1;
+
+ p = kzalloc(alloc_size, GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto err_alloc_si;
+ }
+
+ si = PTR_ALIGN(p, ENETC_SI_ALIGN);
+ si->pad = (char *)si - (char *)p;
+
+ pci_set_drvdata(pdev, si);
+ si->pdev = pdev;
+ hw = &si->hw;
+
+ len = pci_resource_len(pdev, ENETC_BAR_REGS);
+ hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+ if (!hw->reg) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "ioremap() failed\n");
+ goto err_ioremap;
+ }
+ if (len > ENETC_PORT_BASE)
+ hw->port = hw->reg + ENETC_PORT_BASE;
+ if (len > ENETC_GLOBAL_BASE)
+ hw->global = hw->reg + ENETC_GLOBAL_BASE;
+
+ enetc_detect_errata(si);
+
+ return 0;
+
+err_ioremap:
+ enetc_kfree_si(si);
+err_alloc_si:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+void enetc_pci_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_hw *hw = &si->hw;
+
+ iounmap(hw->reg);
+ enetc_kfree_si(si);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
new file mode 100644
index 000000000000..b274135c5103
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+#include "enetc_hw.h"
+
+#define ENETC_MAC_MAXFRM_SIZE 9600
+#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
+ (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+
+struct enetc_tx_swbd {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u16 len;
+ u16 is_dma_page;
+};
+
+#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
+#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
+#define ENETC_RXB_DMA_SIZE \
+ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+
+struct enetc_rx_swbd {
+ dma_addr_t dma;
+ struct page *page;
+ u16 page_offset;
+};
+
+struct enetc_ring_stats {
+ unsigned int packets;
+ unsigned int bytes;
+ unsigned int rx_alloc_errs;
+};
+
+#define ENETC_BDR_DEFAULT_SIZE 1024
+#define ENETC_DEFAULT_TX_WORK 256
+
+struct enetc_bdr {
+ struct device *dev; /* for DMA mapping */
+ struct net_device *ndev;
+ void *bd_base; /* points to Rx or Tx BD ring */
+ union {
+ void __iomem *tpir;
+ void __iomem *rcir;
+ };
+ u16 index;
+ int bd_count; /* # of BDs */
+ int next_to_use;
+ int next_to_clean;
+ union {
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_rx_swbd *rx_swbd;
+ };
+ union {
+ void __iomem *tcir; /* Tx */
+ int next_to_alloc; /* Rx */
+ };
+ void __iomem *idr; /* Interrupt Detect Register pointer */
+
+ struct enetc_ring_stats stats;
+
+ dma_addr_t bd_dma_base;
+} ____cacheline_aligned_in_smp;
+
+static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
+{
+ if (unlikely(++*i == bdr->bd_count))
+ *i = 0;
+}
+
+static inline int enetc_bd_unused(struct enetc_bdr *bdr)
+{
+ if (bdr->next_to_clean > bdr->next_to_use)
+ return bdr->next_to_clean - bdr->next_to_use - 1;
+
+ return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
+}
+
+/* Control BD ring */
+#define ENETC_CBDR_DEFAULT_SIZE 64
+struct enetc_cbdr {
+ void *bd_base; /* points to Rx or Tx BD ring */
+ void __iomem *pir;
+ void __iomem *cir;
+
+ int bd_count; /* # of BDs */
+ int next_to_use;
+ int next_to_clean;
+
+ dma_addr_t bd_dma_base;
+};
+
+#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
+#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+struct enetc_msg_swbd {
+ void *vaddr;
+ dma_addr_t dma;
+ int size;
+};
+
+#define ENETC_REV1 0x1
+enum enetc_errata {
+ ENETC_ERR_TXCSUM = BIT(0),
+ ENETC_ERR_VLAN_ISOL = BIT(1),
+ ENETC_ERR_UCMCSWP = BIT(2),
+};
+
+/* PCI IEP device data */
+struct enetc_si {
+ struct pci_dev *pdev;
+ struct enetc_hw hw;
+ enum enetc_errata errata;
+
+ struct net_device *ndev; /* back ref. */
+
+ struct enetc_cbdr cbd_ring;
+
+ int num_rx_rings; /* how many rings are available in the SI */
+ int num_tx_rings;
+ int num_fs_entries;
+ int num_rss; /* number of RSS buckets */
+ unsigned short pad;
+};
+
+#define ENETC_SI_ALIGN 32
+
+static inline void *enetc_si_priv(const struct enetc_si *si)
+{
+ return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
+}
+
+static inline bool enetc_si_is_pf(struct enetc_si *si)
+{
+ return !!(si->hw.port);
+}
+
+#define ENETC_MAX_NUM_TXQS 8
+#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
+
+struct enetc_int_vector {
+ void __iomem *rbier;
+ void __iomem *tbier_base;
+ unsigned long tx_rings_map;
+ int count_tx_rings;
+ struct napi_struct napi;
+ char name[ENETC_INT_NAME_MAX];
+
+ struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
+ struct enetc_bdr tx_ring[0];
+};
+
+struct enetc_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ int used;
+};
+
+#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+
+struct enetc_ndev_priv {
+ struct net_device *ndev;
+ struct device *dev; /* dma-mapping device */
+ struct enetc_si *si;
+
+ int bdr_int_num; /* number of Rx/Tx ring interrupts */
+ struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
+ u16 num_rx_rings, num_tx_rings;
+ u16 rx_bd_count, tx_bd_count;
+
+ u16 msg_enable;
+
+ struct enetc_bdr *tx_ring[16];
+ struct enetc_bdr *rx_ring[16];
+
+ struct enetc_cls_rule *cls_rules;
+
+ struct device_node *phy_node;
+ phy_interface_t if_mode;
+};
+
+/* Messaging */
+
+/* VF-PF set primary MAC address message format */
+struct enetc_msg_cmd_set_primary_mac {
+ struct enetc_msg_cmd_header header;
+ struct sockaddr mac;
+};
+
+#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i]))
+
+#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
+
+/* SI common */
+int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
+void enetc_pci_remove(struct pci_dev *pdev);
+int enetc_alloc_msix(struct enetc_ndev_priv *priv);
+void enetc_free_msix(struct enetc_ndev_priv *priv);
+void enetc_get_si_caps(struct enetc_si *si);
+void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
+int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
+void enetc_free_si_resources(struct enetc_ndev_priv *priv);
+
+int enetc_open(struct net_device *ndev);
+int enetc_close(struct net_device *ndev);
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
+struct net_device_stats *enetc_get_stats(struct net_device *ndev);
+int enetc_set_features(struct net_device *ndev,
+ netdev_features_t features);
+/* ethtool */
+void enetc_set_ethtool_ops(struct net_device *ndev);
+
+/* control buffer descriptor ring (CBDR) */
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+ char *mac_addr, int si_map);
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+ int index);
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
new file mode 100644
index 000000000000..de466b71bf8f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+static void enetc_clean_cbdr(struct enetc_si *si)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ struct enetc_cbd *dest_cbd;
+ int i, status;
+
+ i = ring->next_to_clean;
+
+ while (enetc_rd_reg(ring->cir) != i) {
+ dest_cbd = ENETC_CBD(*ring, i);
+ status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
+ if (status)
+ dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
+ status, dest_cbd->cmd);
+
+ memset(dest_cbd, 0, sizeof(*dest_cbd));
+
+ i = (i + 1) % ring->bd_count;
+ }
+
+ ring->next_to_clean = i;
+}
+
+static int enetc_cbd_unused(struct enetc_cbdr *r)
+{
+ return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
+ r->bd_count;
+}
+
+static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ int timeout = ENETC_CBDR_TIMEOUT;
+ struct enetc_cbd *dest_cbd;
+ int i;
+
+ if (unlikely(!ring->bd_base))
+ return -EIO;
+
+ if (unlikely(!enetc_cbd_unused(ring)))
+ enetc_clean_cbdr(si);
+
+ i = ring->next_to_use;
+ dest_cbd = ENETC_CBD(*ring, i);
+
+ /* copy command to the ring */
+ *dest_cbd = *cbd;
+ i = (i + 1) % ring->bd_count;
+
+ ring->next_to_use = i;
+ /* let H/W know BD ring has been updated */
+ enetc_wr_reg(ring->pir, i);
+
+ do {
+ if (enetc_rd_reg(ring->cir) == i)
+ break;
+ udelay(10); /* cannot sleep, rtnl_lock() */
+ timeout -= 10;
+ } while (timeout);
+
+ if (!timeout)
+ return -EBUSY;
+
+ enetc_clean_cbdr(si);
+
+ return 0;
+}
+
+int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
+{
+ struct enetc_cbd cbd;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.cls = 1;
+ cbd.status_flags = ENETC_CBD_FLAGS_SF;
+ cbd.index = cpu_to_le16(index);
+
+ return enetc_send_cmd(si, &cbd);
+}
+
+int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+ char *mac_addr, int si_map)
+{
+ struct enetc_cbd cbd;
+ u32 upper;
+ u16 lower;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ /* fill up the "set" descriptor */
+ cbd.cls = 1;
+ cbd.status_flags = ENETC_CBD_FLAGS_SF;
+ cbd.index = cpu_to_le16(index);
+ cbd.opt[3] = cpu_to_le32(si_map);
+ /* enable entry */
+ cbd.opt[0] = cpu_to_le32(BIT(31));
+
+ upper = *(const u32 *)mac_addr;
+ lower = *(const u16 *)(mac_addr + 4);
+ cbd.addr[0] = cpu_to_le32(upper);
+ cbd.addr[1] = cpu_to_le32(lower);
+
+ return enetc_send_cmd(si, &cbd);
+}
+
+#define RFSE_ALIGN 64
+/* Set entry in RFS table */
+int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
+ int index)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ dma_addr_t dma, dma_align;
+ void *tmp, *tmp_align;
+ int err;
+
+ /* fill up the "set" descriptor */
+ cbd.cmd = 0;
+ cbd.cls = 4;
+ cbd.index = cpu_to_le16(index);
+ cbd.length = cpu_to_le16(sizeof(*rfse));
+ cbd.opt[3] = cpu_to_le32(0); /* SI */
+
+ tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ &dma, GFP_KERNEL);
+ if (!tmp) {
+ dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
+ return -ENOMEM;
+ }
+
+ dma_align = ALIGN(dma, RFSE_ALIGN);
+ tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
+ memcpy(tmp_align, rfse, sizeof(*rfse));
+
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+
+ err = enetc_send_cmd(si, &cbd);
+ if (err)
+ dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
+
+ dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ tmp, dma);
+
+ return err;
+}
+
+#define RSSE_ALIGN 64
+static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
+ bool read)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ dma_addr_t dma, dma_align;
+ u8 *tmp, *tmp_align;
+ int err, i;
+
+ if (count < RSSE_ALIGN)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
+ &dma, GFP_KERNEL);
+ if (!tmp) {
+ dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
+ return -ENOMEM;
+ }
+ dma_align = ALIGN(dma, RSSE_ALIGN);
+ tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
+
+ if (!read)
+ for (i = 0; i < count; i++)
+ tmp_align[i] = (u8)(table[i]);
+
+ /* fill up the descriptor */
+ cbd.cmd = read ? 2 : 1;
+ cbd.cls = 3;
+ cbd.length = cpu_to_le16(count);
+
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+
+ err = enetc_send_cmd(si, &cbd);
+ if (err)
+ dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
+
+ if (read)
+ for (i = 0; i < count; i++)
+ table[i] = tmp_align[i];
+
+ dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
+
+ return err;
+}
+
+/* Get RSS table */
+int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return enetc_cmd_rss_table(si, table, count, true);
+}
+
+/* Set RSS table */
+int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return enetc_cmd_rss_table(si, (u32 *)table, count, false);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
new file mode 100644
index 000000000000..1ecad9ffabae
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/net_tstamp.h>
+#include <linux/module.h>
+#include "enetc.h"
+
+static const u32 enetc_si_regs[] = {
+ ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR,
+ ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR,
+ ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1,
+ ENETC_SIUEFDCR
+};
+
+static const u32 enetc_txbdr_regs[] = {
+ ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1,
+ ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER
+};
+
+static const u32 enetc_rxbdr_regs[] = {
+ ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0,
+ ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER
+};
+
+static const u32 enetc_port_regs[] = {
+ ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0),
+ ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1,
+ ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0),
+ ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
+};
+
+static int enetc_get_reglen(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int len;
+
+ len = ARRAY_SIZE(enetc_si_regs);
+ len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings;
+ len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings;
+
+ if (hw->port)
+ len += ARRAY_SIZE(enetc_port_regs);
+
+ len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
+
+ return len;
+}
+
+static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+ void *regbuf)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 *buf = (u32 *)regbuf;
+ int i, j;
+ u32 addr;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) {
+ *buf++ = enetc_si_regs[i];
+ *buf++ = enetc_rd(hw, enetc_si_regs[i]);
+ }
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) {
+ addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]);
+
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) {
+ addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]);
+
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
+
+ if (!hw->port)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) {
+ addr = ENETC_PORT_BASE + enetc_port_regs[i];
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+}
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN];
+} enetc_si_counters[] = {
+ { ENETC_SIROCT, "SI rx octets" },
+ { ENETC_SIRFRM, "SI rx frames" },
+ { ENETC_SIRUCA, "SI rx u-cast frames" },
+ { ENETC_SIRMCA, "SI rx m-cast frames" },
+ { ENETC_SITOCT, "SI tx octets" },
+ { ENETC_SITFRM, "SI tx frames" },
+ { ENETC_SITUCA, "SI tx u-cast frames" },
+ { ENETC_SITMCA, "SI tx m-cast frames" },
+ { ENETC_RBDCR(0), "Rx ring 0 discarded frames" },
+ { ENETC_RBDCR(1), "Rx ring 1 discarded frames" },
+ { ENETC_RBDCR(2), "Rx ring 2 discarded frames" },
+ { ENETC_RBDCR(3), "Rx ring 3 discarded frames" },
+ { ENETC_RBDCR(4), "Rx ring 4 discarded frames" },
+ { ENETC_RBDCR(5), "Rx ring 5 discarded frames" },
+ { ENETC_RBDCR(6), "Rx ring 6 discarded frames" },
+ { ENETC_RBDCR(7), "Rx ring 7 discarded frames" },
+ { ENETC_RBDCR(8), "Rx ring 8 discarded frames" },
+ { ENETC_RBDCR(9), "Rx ring 9 discarded frames" },
+ { ENETC_RBDCR(10), "Rx ring 10 discarded frames" },
+ { ENETC_RBDCR(11), "Rx ring 11 discarded frames" },
+ { ENETC_RBDCR(12), "Rx ring 12 discarded frames" },
+ { ENETC_RBDCR(13), "Rx ring 13 discarded frames" },
+ { ENETC_RBDCR(14), "Rx ring 14 discarded frames" },
+ { ENETC_RBDCR(15), "Rx ring 15 discarded frames" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN];
+} enetc_port_counters[] = {
+ { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
+ { ENETC_PM0_RALN, "MAC rx alignment errors" },
+ { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
+ { ENETC_PM0_RFRM, "MAC rx valid frames" },
+ { ENETC_PM0_RFCS, "MAC rx fcs errors" },
+ { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
+ { ENETC_PM0_RERR, "MAC rx frame errors" },
+ { ENETC_PM0_RUCA, "MAC rx unicast frames" },
+ { ENETC_PM0_RMCA, "MAC rx multicast frames" },
+ { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
+ { ENETC_PM0_RDRP, "MAC rx dropped packets" },
+ { ENETC_PM0_RPKT, "MAC rx packets" },
+ { ENETC_PM0_RUND, "MAC rx undersized packets" },
+ { ENETC_PM0_R64, "MAC rx 64 byte packets" },
+ { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
+ { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
+ { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
+ { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
+ { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
+ { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+ { ENETC_PM0_ROVR, "MAC rx oversized packets" },
+ { ENETC_PM0_RJBR, "MAC rx jabber packets" },
+ { ENETC_PM0_RFRG, "MAC rx fragment packets" },
+ { ENETC_PM0_RCNP, "MAC rx control packets" },
+ { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
+ { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
+ { ENETC_PM0_TOCT, "MAC tx octets" },
+ { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
+ { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
+ { ENETC_PM0_TFRM, "MAC tx frames" },
+ { ENETC_PM0_TFCS, "MAC tx fcs errors" },
+ { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
+ { ENETC_PM0_TERR, "MAC tx frames" },
+ { ENETC_PM0_TUCA, "MAC tx unicast frames" },
+ { ENETC_PM0_TMCA, "MAC tx multicast frames" },
+ { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
+ { ENETC_PM0_TPKT, "MAC tx packets" },
+ { ENETC_PM0_TUND, "MAC tx undersized packets" },
+ { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
+ { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
+ { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
+ { ENETC_PM0_TCNP, "MAC tx control packets" },
+ { ENETC_PM0_TDFR, "MAC tx deferred packets" },
+ { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
+ { ENETC_PM0_TSCOL, "MAC tx single collisions" },
+ { ENETC_PM0_TLCOL, "MAC tx late collisions" },
+ { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+};
+
+static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
+ "Rx ring %2d frames",
+ "Rx ring %2d alloc errors",
+};
+
+static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
+ "Tx ring %2d frames",
+};
+
+static int enetc_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(enetc_si_counters) +
+ ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
+ ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings +
+ (enetc_si_is_pf(priv->si) ?
+ ARRAY_SIZE(enetc_port_counters) : 0);
+
+ return -EOPNOTSUPP;
+}
+
+static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u8 *p = data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
+ strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
+ snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
+ i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
+ snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
+ i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ if (!enetc_si_is_pf(priv->si))
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
+ strlcpy(p, enetc_port_counters[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void enetc_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int i, o = 0;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
+ data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ data[o++] = priv->tx_ring[i]->stats.packets;
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ data[o++] = priv->rx_ring[i]->stats.packets;
+ data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
+ }
+
+ if (!enetc_si_is_pf(priv->si))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
+ data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+}
+
+#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
+ RXH_IP_DST)
+#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+{
+ static const u32 rsshash[] = {
+ [TCP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [UDP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [SCTP_V4_FLOW] = ENETC_RSSHASH_L4,
+ [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3,
+ [IPV4_FLOW] = ENETC_RSSHASH_L3,
+ [TCP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [UDP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [SCTP_V6_FLOW] = ENETC_RSSHASH_L4,
+ [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3,
+ [IPV6_FLOW] = ENETC_RSSHASH_L3,
+ [ETHER_FLOW] = 0,
+ };
+
+ if (rxnfc->flow_type >= ARRAY_SIZE(rsshash))
+ return -EINVAL;
+
+ rxnfc->data = rsshash[rxnfc->flow_type];
+
+ return 0;
+}
+
+/* current HW spec does byte reversal on everything including MAC addresses */
+static void ether_addr_copy_swap(u8 *dst, const u8 *src)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[i] = src[ETH_ALEN - i - 1];
+}
+
+static int enetc_set_cls_entry(struct enetc_si *si,
+ struct ethtool_rx_flow_spec *fs, bool en)
+{
+ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m;
+ struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m;
+ struct ethhdr *eth_h, *eth_m;
+ struct enetc_cmd_rfse rfse = { {0} };
+
+ if (!en)
+ goto done;
+
+ switch (fs->flow_type & 0xff) {
+ case TCP_V4_FLOW:
+ l4ip4_h = &fs->h_u.tcp_ip4_spec;
+ l4ip4_m = &fs->m_u.tcp_ip4_spec;
+ goto l4ip4;
+ case UDP_V4_FLOW:
+ l4ip4_h = &fs->h_u.udp_ip4_spec;
+ l4ip4_m = &fs->m_u.udp_ip4_spec;
+ goto l4ip4;
+ case SCTP_V4_FLOW:
+ l4ip4_h = &fs->h_u.sctp_ip4_spec;
+ l4ip4_m = &fs->m_u.sctp_ip4_spec;
+l4ip4:
+ rfse.sip_h[0] = l4ip4_h->ip4src;
+ rfse.sip_m[0] = l4ip4_m->ip4src;
+ rfse.dip_h[0] = l4ip4_h->ip4dst;
+ rfse.dip_m[0] = l4ip4_m->ip4dst;
+ rfse.sport_h = ntohs(l4ip4_h->psrc);
+ rfse.sport_m = ntohs(l4ip4_m->psrc);
+ rfse.dport_h = ntohs(l4ip4_h->pdst);
+ rfse.dport_m = ntohs(l4ip4_m->pdst);
+ if (l4ip4_m->tos)
+ netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+ rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+ rfse.ethtype_m = 0xffff;
+ break;
+ case IP_USER_FLOW:
+ l3ip4_h = &fs->h_u.usr_ip4_spec;
+ l3ip4_m = &fs->m_u.usr_ip4_spec;
+
+ rfse.sip_h[0] = l3ip4_h->ip4src;
+ rfse.sip_m[0] = l3ip4_m->ip4src;
+ rfse.dip_h[0] = l3ip4_h->ip4dst;
+ rfse.dip_m[0] = l3ip4_m->ip4dst;
+ if (l3ip4_m->tos)
+ netdev_warn(si->ndev, "ToS field is not supported and was ignored\n");
+ rfse.ethtype_h = ETH_P_IP; /* IPv4 */
+ rfse.ethtype_m = 0xffff;
+ break;
+ case ETHER_FLOW:
+ eth_h = &fs->h_u.ether_spec;
+ eth_m = &fs->m_u.ether_spec;
+
+ ether_addr_copy_swap(rfse.smac_h, eth_h->h_source);
+ ether_addr_copy_swap(rfse.smac_m, eth_m->h_source);
+ ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest);
+ ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest);
+ rfse.ethtype_h = ntohs(eth_h->h_proto);
+ rfse.ethtype_m = ntohs(eth_m->h_proto);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rfse.mode |= ENETC_RFSE_EN;
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC) {
+ rfse.mode |= ENETC_RFSE_MODE_BD;
+ rfse.result = fs->ring_cookie;
+ }
+done:
+ return enetc_set_fs_entry(si, &rfse, fs->location);
+}
+
+static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i, j;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->num_rx_rings;
+ break;
+ case ETHTOOL_GRXFH:
+ /* get RSS hash config */
+ return enetc_get_rsshash(rxnfc);
+ case ETHTOOL_GRXCLSRLCNT:
+ /* total number of entries */
+ rxnfc->data = priv->si->num_fs_entries;
+ /* number of entries in use */
+ rxnfc->rule_cnt = 0;
+ for (i = 0; i < priv->si->num_fs_entries; i++)
+ if (priv->cls_rules[i].used)
+ rxnfc->rule_cnt++;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ /* get entry x */
+ rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ /* total number of entries */
+ rxnfc->data = priv->si->num_fs_entries;
+ /* array of indexes of used entries */
+ j = 0;
+ for (i = 0; i < priv->si->num_fs_entries; i++) {
+ if (!priv->cls_rules[i].used)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[j++] = i;
+ }
+ /* number of entries in use */
+ rxnfc->rule_cnt = j;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ if (rxnfc->fs.ring_cookie >= priv->num_rx_rings &&
+ rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true);
+ if (err)
+ return err;
+ priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs;
+ priv->cls_rules[rxnfc->fs.location].used = 1;
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (rxnfc->fs.location >= priv->si->num_fs_entries)
+ return -EINVAL;
+
+ err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false);
+ if (err)
+ return err;
+ priv->cls_rules[rxnfc->fs.location].used = 0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 enetc_get_rxfh_key_size(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ /* return the size of the RX flow hash key. PF only */
+ return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0;
+}
+
+static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ /* return the size of the RX flow hash indirection table */
+ return priv->si->num_rss;
+}
+
+static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int err = 0, i;
+
+ /* return hash function */
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ /* return hash key */
+ if (key && hw->port)
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
+
+ /* return RSS table */
+ if (indir)
+ err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
+
+ return err;
+}
+
+void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+{
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+}
+
+static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ int err = 0;
+
+ /* set hash key, if PF */
+ if (key && hw->port)
+ enetc_set_rss_key(hw, key);
+
+ /* set RSS table */
+ if (indir)
+ err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
+
+ return err;
+}
+
+static void enetc_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ ring->rx_pending = priv->rx_bd_count;
+ ring->tx_pending = priv->tx_bd_count;
+
+ /* do some h/w sanity checks for BDR length */
+ if (netif_running(ndev)) {
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR);
+
+ if (val != priv->rx_bd_count)
+ netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val);
+
+ val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR);
+
+ if (val != priv->tx_bd_count)
+ netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val);
+ }
+}
+
+static const struct ethtool_ops enetc_pf_ethtool_ops = {
+ .get_regs_len = enetc_get_reglen,
+ .get_regs = enetc_get_regs,
+ .get_sset_count = enetc_get_sset_count,
+ .get_strings = enetc_get_strings,
+ .get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rxnfc = enetc_get_rxnfc,
+ .set_rxnfc = enetc_set_rxnfc,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_ringparam = enetc_get_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct ethtool_ops enetc_vf_ethtool_ops = {
+ .get_regs_len = enetc_get_reglen,
+ .get_regs = enetc_get_regs,
+ .get_sset_count = enetc_get_sset_count,
+ .get_strings = enetc_get_strings,
+ .get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rxnfc = enetc_get_rxnfc,
+ .set_rxnfc = enetc_set_rxnfc,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_ringparam = enetc_get_ringparam,
+};
+
+void enetc_set_ethtool_ops(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ if (enetc_si_is_pf(priv->si))
+ ndev->ethtool_ops = &enetc_pf_ethtool_ops;
+ else
+ ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
new file mode 100644
index 000000000000..df8eb8882d92
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include <linux/bitops.h>
+
+/* ENETC device IDs */
+#define ENETC_DEV_ID_PF 0xe100
+#define ENETC_DEV_ID_VF 0xef00
+#define ENETC_DEV_ID_PTP 0xee02
+
+/* ENETC register block BAR */
+#define ENETC_BAR_REGS 0
+
+/** SI regs, offset: 0h */
+#define ENETC_SIMR 0
+#define ENETC_SIMR_EN BIT(31)
+#define ENETC_SIMR_RSSE BIT(0)
+#define ENETC_SICTR0 0x18
+#define ENETC_SICTR1 0x1c
+#define ENETC_SIPCAPR0 0x20
+#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR1 0x24
+#define ENETC_SITGTGR 0x30
+#define ENETC_SIRBGCR 0x38
+/* cache attribute registers for transactions initiated by ENETC */
+#define ENETC_SICAR0 0x40
+#define ENETC_SICAR1 0x44
+#define ENETC_SICAR2 0x48
+/* rd snoop, no alloc
+ * wr snoop, no alloc, partial cache line update for BDs and full cache line
+ * update for data
+ */
+#define ENETC_SICAR_RD_COHERENT 0x2b2b0000
+#define ENETC_SICAR_WR_COHERENT 0x00006727
+#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */
+
+#define ENETC_SIPMAR0 0x80
+#define ENETC_SIPMAR1 0x84
+
+/* VF-PF Message passing */
+#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
+/* msg size encoding: default and max msg value of 1024B encoded as 0 */
+static inline u32 enetc_vsi_set_msize(u32 size)
+{
+ return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0;
+}
+
+#define ENETC_PSIMSGRR 0x204
+#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */
+#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8)
+
+#define ENETC_VSIMSGSR 0x204 /* RO */
+#define ENETC_VSIMSGSR_MB BIT(0)
+#define ENETC_VSIMSGSR_MS BIT(1)
+#define ENETC_VSIMSGSNDAR0 0x210
+#define ENETC_VSIMSGSNDAR1 0x214
+
+#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16)
+#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16)
+
+/* SI statistics */
+#define ENETC_SIROCT 0x300
+#define ENETC_SIRFRM 0x308
+#define ENETC_SIRUCA 0x310
+#define ENETC_SIRMCA 0x318
+#define ENETC_SITOCT 0x320
+#define ENETC_SITFRM 0x328
+#define ENETC_SITUCA 0x330
+#define ENETC_SITMCA 0x338
+#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200)
+
+/* Control BDR regs */
+#define ENETC_SICBDRMR 0x800
+#define ENETC_SICBDRSR 0x804 /* RO */
+#define ENETC_SICBDRBAR0 0x810
+#define ENETC_SICBDRBAR1 0x814
+#define ENETC_SICBDRPIR 0x818
+#define ENETC_SICBDRCIR 0x81c
+#define ENETC_SICBDRLENR 0x820
+
+#define ENETC_SICAPR0 0x900
+#define ENETC_SICAPR1 0x904
+
+#define ENETC_PSIIER 0xa00
+#define ENETC_PSIIER_MR_MASK GENMASK(2, 1)
+#define ENETC_PSIIDR 0xa08
+#define ENETC_SITXIDR 0xa18
+#define ENETC_SIRXIDR 0xa28
+#define ENETC_SIMSIVR 0xa30
+
+#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4)
+#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4)
+
+#define ENETC_SIUEFDCR 0xe28
+
+#define ENETC_SIRFSCAPR 0x1200
+#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f)
+#define ENETC_SIRSSCAPR 0x1600
+#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
+
+/** SI BDR sub-blocks, n = 0..7 */
+enum enetc_bdr_type {TX, RX};
+#define ENETC_BDR_OFF(i) ((i) * 0x200)
+#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r))
+/* RX BDR reg offsets */
+#define ENETC_RBMR 0
+#define ENETC_RBMR_BDS BIT(2)
+#define ENETC_RBMR_VTE BIT(5)
+#define ENETC_RBMR_EN BIT(31)
+#define ENETC_RBSR 0x4
+#define ENETC_RBBSR 0x8
+#define ENETC_RBCIR 0xc
+#define ENETC_RBBAR0 0x10
+#define ENETC_RBBAR1 0x14
+#define ENETC_RBPIR 0x18
+#define ENETC_RBLENR 0x20
+#define ENETC_RBIER 0xa0
+#define ENETC_RBIER_RXTIE BIT(0)
+#define ENETC_RBIDR 0xa4
+#define ENETC_RBICIR0 0xa8
+#define ENETC_RBICIR0_ICEN BIT(31)
+
+/* TX BDR reg offsets */
+#define ENETC_TBMR 0
+#define ENETC_TBSR_BUSY BIT(0)
+#define ENETC_TBMR_VIH BIT(9)
+#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0)
+#define ENETC_TBMR_PRIO_SET(val) val
+#define ENETC_TBMR_EN BIT(31)
+#define ENETC_TBSR 0x4
+#define ENETC_TBBAR0 0x10
+#define ENETC_TBBAR1 0x14
+#define ENETC_TBPIR 0x18
+#define ENETC_TBCIR 0x1c
+#define ENETC_TBCIR_IDX_MASK 0xffff
+#define ENETC_TBLENR 0x20
+#define ENETC_TBIER 0xa0
+#define ENETC_TBIER_TXTIE BIT(0)
+#define ENETC_TBIDR 0xa4
+#define ENETC_TBICIR0 0xa8
+#define ENETC_TBICIR0_ICEN BIT(31)
+
+#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7)
+
+/* Port regs, offset: 1_0000h */
+#define ENETC_PORT_BASE 0x10000
+#define ENETC_PMR 0x0000
+#define ENETC_PMR_EN GENMASK(18, 16)
+#define ENETC_PSR 0x0004 /* RO */
+#define ENETC_PSIPMR 0x0018
+#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */
+#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16)
+#define ENETC_PSIPVMR 0x001c
+#define ENETC_VLAN_PROMISC_MAP_ALL 0x7
+#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7)
+#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16)
+#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */
+#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8)
+#define ENETC_PVCLCTR 0x0208
+#define ENETC_VLAN_TYPE_C BIT(0)
+#define ENETC_VLAN_TYPE_S BIT(1)
+#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */
+#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */
+#define ENETC_PSIVLAN_EN BIT(31)
+#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12)
+#define ENETC_PTXMBAR 0x0608
+#define ENETC_PCAPR0 0x0900
+#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
+#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR1 0x0904
+#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
+#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
+#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16)
+#define ENETC_PSICFGR0_VTE BIT(12)
+#define ENETC_PSICFGR0_SIVIE BIT(14)
+#define ENETC_PSICFGR0_ASE BIT(15)
+#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */
+
+#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
+#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
+#define ENETC_PSIVLANFMR 0x1700
+#define ENETC_PSIVLANFMR_VS BIT(0)
+#define ENETC_PRFSMR 0x1800
+#define ENETC_PRFSMR_RFSE BIT(31)
+#define ENETC_PRFSCAPR 0x1804
+#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16)
+#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
+#define ENETC_PFPMR 0x1900
+#define ENETC_PFPMR_PMACE BIT(1)
+#define ENETC_PFPMR_MWLM BIT(0)
+#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
+#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
+#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10)
+#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10)
+#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
+#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
+#define ENETC_MMCSR 0x1f00
+#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+
+#define ENETC_PM0_CMD_CFG 0x8008
+#define ENETC_PM1_CMD_CFG 0x9008
+#define ENETC_PM0_TX_EN BIT(0)
+#define ENETC_PM0_RX_EN BIT(1)
+#define ENETC_PM0_PROMISC BIT(4)
+#define ENETC_PM0_CMD_XGLP BIT(10)
+#define ENETC_PM0_CMD_TXP BIT(11)
+#define ENETC_PM0_CMD_PHY_TX_EN BIT(15)
+#define ENETC_PM0_CMD_SFD BIT(21)
+#define ENETC_PM0_MAXFRM 0x8014
+#define ENETC_SET_TX_MTU(val) ((val) << 16)
+#define ENETC_SET_MAXFRM(val) ((val) & 0xffff)
+#define ENETC_PM0_IF_MODE 0x8300
+#define ENETC_PMO_IFM_RG BIT(2)
+#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
+#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
+#define ENETC_PM0_IFM_XGMII BIT(12)
+
+/* MAC counters */
+#define ENETC_PM0_REOCT 0x8100
+#define ENETC_PM0_RALN 0x8110
+#define ENETC_PM0_RXPF 0x8118
+#define ENETC_PM0_RFRM 0x8120
+#define ENETC_PM0_RFCS 0x8128
+#define ENETC_PM0_RVLAN 0x8130
+#define ENETC_PM0_RERR 0x8138
+#define ENETC_PM0_RUCA 0x8140
+#define ENETC_PM0_RMCA 0x8148
+#define ENETC_PM0_RBCA 0x8150
+#define ENETC_PM0_RDRP 0x8158
+#define ENETC_PM0_RPKT 0x8160
+#define ENETC_PM0_RUND 0x8168
+#define ENETC_PM0_R64 0x8170
+#define ENETC_PM0_R127 0x8178
+#define ENETC_PM0_R255 0x8180
+#define ENETC_PM0_R511 0x8188
+#define ENETC_PM0_R1023 0x8190
+#define ENETC_PM0_R1518 0x8198
+#define ENETC_PM0_R1519X 0x81A0
+#define ENETC_PM0_ROVR 0x81A8
+#define ENETC_PM0_RJBR 0x81B0
+#define ENETC_PM0_RFRG 0x81B8
+#define ENETC_PM0_RCNP 0x81C0
+#define ENETC_PM0_RDRNTP 0x81C8
+#define ENETC_PM0_TEOCT 0x8200
+#define ENETC_PM0_TOCT 0x8208
+#define ENETC_PM0_TCRSE 0x8210
+#define ENETC_PM0_TXPF 0x8218
+#define ENETC_PM0_TFRM 0x8220
+#define ENETC_PM0_TFCS 0x8228
+#define ENETC_PM0_TVLAN 0x8230
+#define ENETC_PM0_TERR 0x8238
+#define ENETC_PM0_TUCA 0x8240
+#define ENETC_PM0_TMCA 0x8248
+#define ENETC_PM0_TBCA 0x8250
+#define ENETC_PM0_TPKT 0x8260
+#define ENETC_PM0_TUND 0x8268
+#define ENETC_PM0_T127 0x8278
+#define ENETC_PM0_T1023 0x8290
+#define ENETC_PM0_T1518 0x8298
+#define ENETC_PM0_TCNP 0x82C0
+#define ENETC_PM0_TDFR 0x82D0
+#define ENETC_PM0_TMCOL 0x82D8
+#define ENETC_PM0_TSCOL 0x82E0
+#define ENETC_PM0_TLCOL 0x82E8
+#define ENETC_PM0_TECOL 0x82F0
+
+/* Port counters */
+#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
+#define ENETC_PBFDSIR 0x0810
+#define ENETC_PFDMSAPR 0x0814
+#define ENETC_UFDMF 0x1680
+#define ENETC_MFDMF 0x1684
+#define ENETC_PUFDVFR 0x1780
+#define ENETC_PMFDVFR 0x1784
+#define ENETC_PBFDVFR 0x1788
+
+/** Global regs, offset: 2_0000h */
+#define ENETC_GLOBAL_BASE 0x20000
+#define ENETC_G_EIPBRR0 0x0bf8
+#define ENETC_G_EIPBRR1 0x0bfc
+#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
+#define ENETC_G_EPFBLPR1_XGMII 0x80000000
+
+/* PCI device info */
+struct enetc_hw {
+ /* SI registers, used by all PCI functions */
+ void __iomem *reg;
+ /* Port registers, PF only */
+ void __iomem *port;
+ /* IP global registers, PF only */
+ void __iomem *global;
+};
+
+/* general register accessors */
+#define enetc_rd_reg(reg) ioread32((reg))
+#define enetc_wr_reg(reg, val) iowrite32((val), (reg))
+#ifdef ioread64
+#define enetc_rd_reg64(reg) ioread64((reg))
+#else
+/* using this to read out stats on 32b systems */
+static inline u64 enetc_rd_reg64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32(reg + 4);
+ low = ioread32(reg);
+ tmp = ioread32(reg + 4);
+ } while (high != tmp);
+
+ return le64_to_cpu((__le64)high << 32 | low);
+}
+#endif
+
+#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
+#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
+#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off))
+/* port register accessors - PF only */
+#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
+#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+/* global register accessors - PF only */
+#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
+#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
+/* BDR register accessors, see ENETC_BDR() */
+#define enetc_bdr_rd(hw, t, n, off) \
+ enetc_rd(hw, ENETC_BDR(t, n, off))
+#define enetc_bdr_wr(hw, t, n, off, val) \
+ enetc_wr(hw, ENETC_BDR(t, n, off), val)
+#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off)
+#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off)
+#define enetc_txbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, TX, n, off, val)
+#define enetc_rxbdr_wr(hw, n, off, val) \
+ enetc_bdr_wr(hw, RX, n, off, val)
+
+/* Buffer Descriptors (BD) */
+union enetc_tx_bd {
+ struct {
+ __le64 addr;
+ __le16 buf_len;
+ __le16 frm_len;
+ union {
+ struct {
+ __le16 l3_csoff;
+ u8 l4_csoff;
+ u8 flags;
+ }; /* default layout */
+ __le32 lstatus;
+ };
+ };
+ struct {
+ __le32 tstamp;
+ __le16 tpid;
+ __le16 vid;
+ u8 reserved[6];
+ u8 e_flags;
+ u8 flags;
+ } ext; /* Tx BD extension */
+};
+
+#define ENETC_TXBD_FLAGS_L4CS BIT(0)
+#define ENETC_TXBD_FLAGS_W BIT(2)
+#define ENETC_TXBD_FLAGS_CSUM BIT(3)
+#define ENETC_TXBD_FLAGS_EX BIT(6)
+#define ENETC_TXBD_FLAGS_F BIT(7)
+
+static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
+{
+ memset(txbd, 0, sizeof(*txbd));
+}
+
+/* L3 csum flags */
+#define ENETC_TXBD_L3_IPCS BIT(7)
+#define ENETC_TXBD_L3_IPV6 BIT(15)
+
+#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0)
+#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8)
+
+/* Extension flags */
+#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
+#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
+
+static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
+{
+ return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
+ (start & ENETC_TXBD_L3_START_MASK));
+}
+
+/* L4 csum flags */
+#define ENETC_TXBD_L4_UDP BIT(5)
+#define ENETC_TXBD_L4_TCP BIT(6)
+
+union enetc_rx_bd {
+ struct {
+ __le64 addr;
+ u8 reserved[8];
+ } w;
+ struct {
+ __le16 inet_csum;
+ __le16 parse_summary;
+ __le32 rss_hash;
+ __le16 buf_len;
+ __le16 vlan_opt;
+ union {
+ struct {
+ __le16 flags;
+ __le16 error;
+ };
+ __le32 lstatus;
+ };
+ } r;
+};
+
+#define ENETC_RXBD_LSTATUS_R BIT(30)
+#define ENETC_RXBD_LSTATUS_F BIT(31)
+#define ENETC_RXBD_ERR_MASK 0xff
+#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16)
+#define ENETC_RXBD_FLAG_VLAN BIT(9)
+#define ENETC_RXBD_FLAG_TSTMP BIT(10)
+
+#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */
+#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */
+#define ENETC_MAX_NUM_VFS 2
+
+struct enetc_cbd {
+ union {
+ struct {
+ __le32 addr[2];
+ __le32 opt[4];
+ };
+ __le32 data[6];
+ };
+ __le16 index;
+ __le16 length;
+ u8 cmd;
+ u8 cls;
+ u8 _res;
+ u8 status_flags;
+};
+
+#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
+#define ENETC_CBD_STATUS_MASK 0xf
+
+struct enetc_cmd_rfse {
+ u8 smac_h[6];
+ u8 smac_m[6];
+ u8 dmac_h[6];
+ u8 dmac_m[6];
+ u32 sip_h[4];
+ u32 sip_m[4];
+ u32 dip_h[4];
+ u32 dip_m[4];
+ u16 ethtype_h;
+ u16 ethtype_m;
+ u16 ethtype4_h;
+ u16 ethtype4_m;
+ u16 sport_h;
+ u16 sport_m;
+ u16 dport_h;
+ u16 dport_m;
+ u16 vlan_h;
+ u16 vlan_m;
+ u8 proto_h;
+ u8 proto_m;
+ u16 flags;
+ u16 result;
+ u16 mode;
+};
+
+#define ENETC_RFSE_EN BIT(15)
+#define ENETC_RFSE_MODE_BD 2
+
+static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+{
+ *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
+ *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+}
+
+#define ENETC_SI_INT_IDX 0
+/* base index for Rx/Tx interrupts */
+#define ENETC_BDR_INT_BASE_IDX 1
+
+/* Messaging */
+
+/* Command completion status */
+enum enetc_msg_cmd_status {
+ ENETC_MSG_CMD_STATUS_OK,
+ ENETC_MSG_CMD_STATUS_FAIL
+};
+
+/* VSI-PSI command message types */
+enum enetc_msg_cmd_type {
+ ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */
+ ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */
+ ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */
+};
+
+/* VSI-PSI command action types */
+enum enetc_msg_cmd_action_type {
+ ENETC_MSG_CMD_MNG_ADD = 1,
+ ENETC_MSG_CMD_MNG_REMOVE
+};
+
+/* PSI-VSI command header format */
+struct enetc_msg_cmd_header {
+ u16 type; /* command class type */
+ u16 id; /* denotes the specific required action */
+};
+
+/* Common H/W utility functions */
+
+static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
+ bool en)
+{
+ u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+
+ val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
+ enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+}
+
+static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
+ bool en)
+{
+ u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+
+ val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
+ enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
new file mode 100644
index 000000000000..77b9cd10ba2b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/mdio.h>
+#include <linux/of_mdio.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+
+#include "enetc_pf.h"
+
+struct enetc_mdio_regs {
+ u32 mdio_cfg; /* MDIO configuration and status */
+ u32 mdio_ctl; /* MDIO control */
+ u32 mdio_data; /* MDIO data */
+ u32 mdio_addr; /* MDIO address */
+};
+
+#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv)
+
+#define ENETC_MDIO_REG_OFFSET 0x1c00
+#define ENETC_MDC_DIV 258
+
+#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
+#define MDIO_CFG_BSY BIT(0)
+#define MDIO_CFG_RD_ER BIT(1)
+#define MDIO_CFG_ENC45 BIT(6)
+ /* external MDIO only - driven on neg MDC edge */
+#define MDIO_CFG_NEG BIT(23)
+
+#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
+#define MDIO_CTL_READ BIT(15)
+#define MDIO_DATA(x) ((x) & 0xffff)
+
+#define TIMEOUT 1000
+static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs)
+{
+ u32 val;
+
+ return readx_poll_timeout(enetc_rd_reg, &regs->mdio_cfg, val,
+ !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
+}
+
+static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
+{
+ struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ u32 mdio_ctl, mdio_cfg;
+ u16 dev_addr;
+ int ret;
+
+ mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_cfg |= MDIO_CFG_ENC45;
+ } else {
+ /* clause 22 (ie 1G) */
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+ }
+
+ enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(regs);
+ if (ret)
+ return ret;
+
+ /* set port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+
+ /* set the register address */
+ if (regnum & MII_ADDR_C45) {
+ enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+
+ ret = enetc_mdio_wait_complete(regs);
+ if (ret)
+ return ret;
+ }
+
+ /* write the value */
+ enetc_wr_reg(&regs->mdio_data, MDIO_DATA(value));
+
+ ret = enetc_mdio_wait_complete(regs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ u32 mdio_ctl, mdio_cfg;
+ u16 dev_addr, value;
+ int ret;
+
+ mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG;
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_cfg |= MDIO_CFG_ENC45;
+ } else {
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+ }
+
+ enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(regs);
+ if (ret)
+ return ret;
+
+ /* set port and device addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+
+ /* set the register address */
+ if (regnum & MII_ADDR_C45) {
+ enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+
+ ret = enetc_mdio_wait_complete(regs);
+ if (ret)
+ return ret;
+ }
+
+ /* initiate the read */
+ enetc_wr_reg(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+
+ ret = enetc_mdio_wait_complete(regs);
+ if (ret)
+ return ret;
+
+ /* return all Fs if nothing was there */
+ if (enetc_rd_reg(&regs->mdio_cfg) & MDIO_CFG_RD_ER) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%hhu\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
+ }
+
+ value = enetc_rd_reg(&regs->mdio_data) & 0xffff;
+
+ return value;
+}
+
+int enetc_mdio_probe(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_regs __iomem *regs;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = mdiobus_alloc_size(sizeof(regs));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ /* store the enetc mdio base address for this bus */
+ regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET;
+ bus->priv = regs;
+
+ np = of_get_child_by_name(dev->of_node, "mdio");
+ if (!np) {
+ dev_err(dev, "MDIO node missing\n");
+ ret = -EINVAL;
+ goto err_registration;
+ }
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret) {
+ of_node_put(np);
+ dev_err(dev, "cannot register MDIO bus\n");
+ goto err_registration;
+ }
+
+ of_node_put(np);
+ pf->mdio = bus;
+
+ return 0;
+
+err_registration:
+ mdiobus_free(bus);
+
+ return ret;
+}
+
+void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio) {
+ mdiobus_unregister(pf->mdio);
+ mdiobus_free(pf->mdio);
+ }
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
new file mode 100644
index 000000000000..40d22ebe9224
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include "enetc_pf.h"
+
+static void enetc_msg_disable_mr_int(struct enetc_hw *hw)
+{
+ u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+ /* disable MR int source(s) */
+ enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK);
+}
+
+static void enetc_msg_enable_mr_int(struct enetc_hw *hw)
+{
+ u32 psiier = enetc_rd(hw, ENETC_PSIIER);
+
+ enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK);
+}
+
+static irqreturn_t enetc_msg_psi_msix(int irq, void *data)
+{
+ struct enetc_si *si = (struct enetc_si *)data;
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ enetc_msg_disable_mr_int(&si->hw);
+ schedule_work(&pf->msg_task);
+
+ return IRQ_HANDLED;
+}
+
+static void enetc_msg_task(struct work_struct *work)
+{
+ struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task);
+ struct enetc_hw *hw = &pf->si->hw;
+ unsigned long mr_mask;
+ int i;
+
+ for (;;) {
+ mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK;
+ if (!mr_mask) {
+ /* re-arm MR interrupts, w1c the IDR reg */
+ enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK);
+ enetc_msg_enable_mr_int(hw);
+ return;
+ }
+
+ for (i = 0; i < pf->num_vfs; i++) {
+ u32 psimsgrr;
+ u16 msg_code;
+
+ if (!(ENETC_PSIMSGRR_MR(i) & mr_mask))
+ continue;
+
+ enetc_msg_handle_rxmsg(pf, i, &msg_code);
+
+ psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code);
+ psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */
+ enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr);
+ }
+ }
+}
+
+/* Init */
+static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct enetc_msg_swbd *msg;
+ u32 val;
+
+ msg = &pf->rxmsg[idx];
+ /* allocate and set receive buffer */
+ msg->size = ENETC_DEFAULT_MSG_SIZE;
+
+ msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma,
+ GFP_KERNEL);
+ if (!msg->vaddr) {
+ dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n",
+ msg->size);
+ return -ENOMEM;
+ }
+
+ /* set multiple of 32 bytes */
+ val = lower_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val);
+ val = upper_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val);
+
+ return 0;
+}
+
+static void enetc_msg_free_mbx(struct enetc_si *si, int idx)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ struct enetc_msg_swbd *msg;
+
+ msg = &pf->rxmsg[idx];
+ dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma);
+ memset(msg, 0, sizeof(*msg));
+
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0);
+ enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0);
+}
+
+int enetc_msg_psi_init(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+ int vector, i, err;
+
+ /* register message passing interrupt handler */
+ snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg",
+ si->ndev->name);
+ vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX);
+ err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si);
+ if (err) {
+ dev_err(&si->pdev->dev,
+ "PSI messaging: request_irq() failed!\n");
+ return err;
+ }
+
+ /* set one IRQ entry for PSI message receive notification (SI int) */
+ enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX);
+
+ /* initialize PSI mailbox */
+ INIT_WORK(&pf->msg_task, enetc_msg_task);
+
+ for (i = 0; i < pf->num_vfs; i++) {
+ err = enetc_msg_alloc_mbx(si, i);
+ if (err)
+ goto err_init_mbx;
+ }
+
+ /* enable MR interrupts */
+ enetc_msg_enable_mr_int(&si->hw);
+
+ return 0;
+
+err_init_mbx:
+ for (i--; i >= 0; i--)
+ enetc_msg_free_mbx(si, i);
+
+ free_irq(vector, si);
+
+ return err;
+}
+
+void enetc_msg_psi_free(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+ int i;
+
+ cancel_work_sync(&pf->msg_task);
+
+ /* disable MR interrupts */
+ enetc_msg_disable_mr_int(&si->hw);
+
+ for (i = 0; i < pf->num_vfs; i++)
+ enetc_msg_free_mbx(si, i);
+
+ /* de-register message passing interrupt handler */
+ free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
new file mode 100644
index 000000000000..15876a6e7598
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -0,0 +1,943 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include "enetc_pf.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+ __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC PF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
+{
+ u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
+ u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
+
+ *(u32 *)addr = upper;
+ *(u16 *)(addr + 4) = lower;
+}
+
+static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
+ const u8 *addr)
+{
+ u32 upper = *(const u32 *)addr;
+ u16 lower = *(const u16 *)(addr + 4);
+
+ __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
+ __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
+}
+
+static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+ enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+
+ return 0;
+}
+
+static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
+{
+ u32 val = enetc_port_rd(hw, ENETC_PSIPVMR);
+
+ val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL);
+ enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
+}
+
+static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
+{
+ return pf->vlan_promisc_simap & BIT(si_idx);
+}
+
+static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
+{
+ int i;
+
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
+ return true;
+
+ return false;
+}
+
+static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+ pf->vlan_promisc_simap |= BIT(si_idx);
+ enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
+{
+ pf->vlan_promisc_simap &= ~BIT(si_idx);
+ enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap);
+}
+
+static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
+{
+ u32 val = 0;
+
+ if (vlan)
+ val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan;
+
+ enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
+}
+
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+
+static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ /* add exact match addr */
+ ether_addr_copy(filter->mac_addr, addr);
+ filter->mac_addr_cnt++;
+}
+
+static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+
+static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
+{
+ bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+ if (type == UC) {
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0);
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0);
+ } else { /* MC */
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0);
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0);
+ }
+}
+
+static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
+ u32 *hash)
+{
+ bool err = si->errata & ENETC_ERR_UCMCSWP;
+
+ if (type == UC) {
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash);
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1));
+ } else { /* MC */
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash);
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1));
+ }
+}
+
+static void enetc_sync_mac_filters(struct enetc_pf *pf)
+{
+ struct enetc_mac_filter *f = pf->mac_filter;
+ struct enetc_si *si = pf->si;
+ int i, pos;
+
+ pos = EMETC_MAC_ADDR_FILT_RES;
+
+ for (i = 0; i < MADDR_TYPE; i++, f++) {
+ bool em = (f->mac_addr_cnt == 1) && (i == UC);
+ bool clear = !f->mac_addr_cnt;
+
+ if (clear) {
+ if (i == UC)
+ enetc_clear_mac_flt_entry(si, pos);
+
+ enetc_clear_mac_ht_flt(si, 0, i);
+ continue;
+ }
+
+ /* exact match filter */
+ if (em) {
+ int err;
+
+ enetc_clear_mac_ht_flt(si, 0, UC);
+
+ err = enetc_set_mac_flt_entry(si, pos, f->mac_addr,
+ BIT(0));
+ if (!err)
+ continue;
+
+ /* fallback to HT filtering */
+ dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n",
+ err);
+ }
+
+ /* hash table filter, clear EM filter for UC entries */
+ if (i == UC)
+ enetc_clear_mac_flt_entry(si, pos);
+
+ enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table);
+ }
+}
+
+static void enetc_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct enetc_hw *hw = &priv->si->hw;
+ bool uprom = false, mprom = false;
+ struct enetc_mac_filter *filter;
+ struct netdev_hw_addr *ha;
+ u32 psipmr = 0;
+ bool em;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* enable promisc mode for SI0 (PF) */
+ psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+ uprom = true;
+ mprom = true;
+ /* enable VLAN promisc mode for SI0 */
+ if (!enetc_si_vlan_promisc_is_on(pf, 0))
+ enetc_enable_si_vlan_promisc(pf, 0);
+
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ /* enable multi cast promisc mode for SI0 (PF) */
+ psipmr = ENETC_PSIPMR_SET_MP(0);
+ mprom = true;
+ }
+
+ /* first 2 filter entries belong to PF */
+ if (!uprom) {
+ /* Update unicast filters */
+ filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(filter);
+
+ em = (netdev_uc_count(ndev) == 1);
+ netdev_for_each_uc_addr(ha, ndev) {
+ if (em) {
+ enetc_add_mac_addr_em_filter(filter, ha->addr);
+ break;
+ }
+
+ enetc_add_mac_addr_ht_filter(filter, ha->addr);
+ }
+ }
+
+ if (!mprom) {
+ /* Update multicast filters */
+ filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(filter);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (!is_multicast_ether_addr(ha->addr))
+ continue;
+
+ enetc_add_mac_addr_ht_filter(filter, ha->addr);
+ }
+ }
+
+ if (!uprom || !mprom)
+ /* update PF entries */
+ enetc_sync_mac_filters(pf);
+
+ psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) &
+ ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0));
+ enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
+}
+
+static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
+ u32 *hash)
+{
+ enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash);
+ enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1));
+}
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
+{
+ int i;
+
+ if (rehash) {
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+ }
+
+ enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter);
+}
+
+static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ if (enetc_si_vlan_promisc_is_on(pf, 0))
+ enetc_disable_si_vlan_promisc(pf, 0);
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_sync_vlan_ht_filter(pf, false);
+
+ return 0;
+}
+
+static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ __clear_bit(vid, pf->active_vlans);
+ enetc_sync_vlan_ht_filter(pf, true);
+
+ if (!enetc_vlan_filter_is_on(pf))
+ enetc_enable_si_vlan_promisc(pf, 0);
+
+ return 0;
+}
+
+static void enetc_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ if (reg & ENETC_PMO_IFM_RG) {
+ /* RGMII mode */
+ reg = (reg & ~ENETC_PM0_IFM_RLP) |
+ (en ? ENETC_PM0_IFM_RLP : 0);
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+ } else {
+ /* assume SGMII mode */
+ reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ reg = (reg & ~ENETC_PM0_CMD_XGLP) |
+ (en ? ENETC_PM0_CMD_XGLP : 0);
+ reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
+ (en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+ }
+}
+
+static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct enetc_vf_state *vf_state;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ vf_state = &pf->vf_state[vf];
+ vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC;
+ enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac);
+ return 0;
+}
+
+static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan,
+ u8 qos, __be16 proto)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (priv->si->errata & ENETC_ERR_VLAN_ISOL)
+ return -EOPNOTSUPP;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ /* only C-tags supported for now */
+ return -EPROTONOSUPPORT;
+
+ enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos);
+ return 0;
+}
+
+static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ u32 cfgr;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1));
+ cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0);
+ enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr);
+
+ return 0;
+}
+
+static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
+{
+ unsigned char mac_addr[MAX_ADDR_LEN];
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
+ for (i = 0; i < pf->total_vfs + 1; i++) {
+ enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
+ if (!is_zero_ether_addr(mac_addr))
+ continue;
+ eth_random_addr(mac_addr);
+ dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
+ i, mac_addr);
+ enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
+ }
+}
+
+static void enetc_port_assign_rfs_entries(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int num_entries, vf_entries, i;
+ u32 val;
+
+ /* split RFS entries between functions */
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+ vf_entries = num_entries / (pf->total_vfs + 1);
+
+ for (i = 0; i < pf->total_vfs; i++)
+ enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries);
+ enetc_port_wr(hw, ENETC_PSIRFSCFGR(0),
+ num_entries - vf_entries * pf->total_vfs);
+
+ /* enable RFS on port */
+ enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
+}
+
+static void enetc_port_si_configure(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_hw *hw = &si->hw;
+ int num_rings, i;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PCAPR0);
+ num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
+
+ val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS);
+ val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS);
+
+ if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) {
+ val = ENETC_PSICFGR0_SET_TXBDR(num_rings);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+
+ dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n",
+ num_rings, ENETC_PF_NUM_RINGS);
+
+ num_rings = 0;
+ }
+
+ /* Add default one-time settings for SI0 (PF) */
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+ enetc_port_wr(hw, ENETC_PSICFGR0(0), val);
+
+ if (num_rings)
+ num_rings -= ENETC_PF_NUM_RINGS;
+
+ /* Configure the SIs for each available VF */
+ val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+ if (num_rings) {
+ num_rings /= pf->total_vfs;
+ val |= ENETC_PSICFGR0_SET_TXBDR(num_rings);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rings);
+ }
+
+ for (i = 0; i < pf->total_vfs; i++)
+ enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val);
+
+ /* Port level VLAN settings */
+ val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+ enetc_port_wr(hw, ENETC_PVCLCTR, val);
+ /* use outer tag for VLAN filtering */
+ enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+ enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
+ ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
+ ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ /* set auto-speed for RGMII */
+ if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG)
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
+ if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII)
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
+}
+
+static void enetc_configure_port_pmac(struct enetc_hw *hw)
+{
+ u32 temp;
+
+ /* Set pMAC step lock */
+ temp = enetc_port_rd(hw, ENETC_PFPMR);
+ enetc_port_wr(hw, ENETC_PFPMR,
+ temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
+
+ temp = enetc_port_rd(hw, ENETC_MMCSR);
+ enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+}
+
+static void enetc_configure_port(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
+ struct enetc_hw *hw = &pf->si->hw;
+
+ enetc_configure_port_pmac(hw);
+
+ enetc_configure_port_mac(hw);
+
+ enetc_port_si_configure(pf->si);
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(hw, hash_key);
+
+ /* split up RFS entries */
+ enetc_port_assign_rfs_entries(pf->si);
+
+ /* fix-up primary MAC addresses, if not set already */
+ enetc_port_setup_primary_mac_address(pf->si);
+
+ /* enforce VLAN promisc mode for all SIs */
+ pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
+ enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
+
+ enetc_port_wr(hw, ENETC_PSIPMR, 0);
+
+ /* enable port */
+ enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN);
+}
+
+/* Messaging */
+static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf,
+ int vf_id)
+{
+ struct enetc_vf_state *vf_state = &pf->vf_state[vf_id];
+ struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+ struct enetc_msg_cmd_set_primary_mac *cmd;
+ struct device *dev = &pf->si->pdev->dev;
+ u16 cmd_id;
+ char *addr;
+
+ cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr;
+ cmd_id = cmd->header.id;
+ if (cmd_id != ENETC_MSG_CMD_MNG_ADD)
+ return ENETC_MSG_CMD_STATUS_FAIL;
+
+ addr = cmd->mac.sa_data;
+ if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC)
+ dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n",
+ vf_id);
+ else
+ enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr);
+
+ return ENETC_MSG_CMD_STATUS_OK;
+}
+
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status)
+{
+ struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id];
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_msg_cmd_header *cmd_hdr;
+ u16 cmd_type;
+
+ *status = ENETC_MSG_CMD_STATUS_OK;
+ cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr;
+ cmd_type = cmd_hdr->type;
+
+ switch (cmd_type) {
+ case ENETC_MSG_CMD_MNG_MAC:
+ *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id);
+ break;
+ default:
+ dev_err(dev, "command not supported (cmd_type: 0x%x)\n",
+ cmd_type);
+ }
+}
+
+#ifdef CONFIG_PCI_IOV
+static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ int err;
+
+ if (!num_vfs) {
+ enetc_msg_psi_free(pf);
+ kfree(pf->vf_state);
+ pf->num_vfs = 0;
+ pci_disable_sriov(pdev);
+ } else {
+ pf->num_vfs = num_vfs;
+
+ pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
+ GFP_KERNEL);
+ if (!pf->vf_state) {
+ pf->num_vfs = 0;
+ return -ENOMEM;
+ }
+
+ err = enetc_msg_psi_init(pf);
+ if (err) {
+ dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
+ goto err_msg_psi;
+ }
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err);
+ goto err_en_sriov;
+ }
+ }
+
+ return num_vfs;
+
+err_en_sriov:
+ enetc_msg_psi_free(pf);
+err_msg_psi:
+ kfree(pf->vf_state);
+ pf->num_vfs = 0;
+
+ return err;
+}
+#else
+#define enetc_sriov_configure(pdev, num_vfs) (void)0
+#endif
+
+static int enetc_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ enetc_enable_rxvlan(&priv->si->hw, 0,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+ enetc_enable_txvlan(&priv->si->hw, 0,
+ !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ return enetc_set_features(ndev, features);
+}
+
+static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc_pf_set_rx_mode,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_set_vf_mac = enetc_pf_set_vf_mac,
+ .ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
+ .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
+ .ndo_set_features = enetc_pf_set_features,
+};
+
+static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_LOOPBACK;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (si->num_rss)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ if (si->errata & ENETC_ERR_TXCSUM) {
+ ndev->hw_features &= ~NETIF_F_HW_CSUM;
+ ndev->features &= ~NETIF_F_HW_CSUM;
+ }
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* pick up primary MAC address from SI */
+ enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct device_node *np = priv->dev->of_node;
+ int err;
+
+ if (!np) {
+ dev_err(priv->dev, "missing ENETC port node\n");
+ return -ENODEV;
+ }
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!priv->phy_node) {
+ if (!of_phy_is_fixed_link(np)) {
+ dev_err(priv->dev, "PHY not specified\n");
+ return -ENODEV;
+ }
+
+ err = of_phy_register_fixed_link(np);
+ if (err < 0) {
+ dev_err(priv->dev, "fixed link registration failed\n");
+ return err;
+ }
+
+ priv->phy_node = of_node_get(np);
+ }
+
+ if (!of_phy_is_fixed_link(np)) {
+ err = enetc_mdio_probe(pf);
+ if (err) {
+ of_node_put(priv->phy_node);
+ return err;
+ }
+ }
+
+ priv->if_mode = of_get_phy_mode(np);
+ if (priv->if_mode < 0) {
+ dev_err(priv->dev, "missing phy type\n");
+ of_node_put(priv->phy_node);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ else
+ enetc_mdio_remove(pf);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void enetc_of_put_phy(struct enetc_ndev_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+
+ if (np && of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+}
+
+static int enetc_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ return -ENODEV;
+ }
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ if (err) {
+ dev_err(&pdev->dev, "PCI probing failed\n");
+ return err;
+ }
+
+ si = pci_get_drvdata(pdev);
+ if (!si->hw.port || !si->hw.global) {
+ err = -ENODEV;
+ dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
+ goto err_map_pf_space;
+ }
+
+ pf = enetc_si_priv(si);
+ pf->si = si;
+ pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+
+ enetc_configure_port(pf);
+
+ enetc_get_si_caps(si);
+
+ ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+ if (!ndev) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "netdev creation failed\n");
+ goto err_alloc_netdev;
+ }
+
+ enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+ priv = netdev_priv(ndev);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_alloc_si_resources(priv);
+ if (err) {
+ dev_err(&pdev->dev, "SI resource alloc failed\n");
+ goto err_alloc_si_res;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MSIX alloc failed\n");
+ goto err_alloc_msix;
+ }
+
+ err = enetc_of_get_phy(priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_reg_netdev;
+
+ netif_carrier_off(ndev);
+
+ netif_info(priv, probe, ndev, "%s v%s\n",
+ enetc_drv_name, enetc_drv_ver);
+
+ return 0;
+
+err_reg_netdev:
+ enetc_of_put_phy(priv);
+ enetc_free_msix(priv);
+err_alloc_msix:
+ enetc_free_si_resources(priv);
+err_alloc_si_res:
+ si->ndev = NULL;
+ free_netdev(ndev);
+err_alloc_netdev:
+err_map_pf_space:
+ enetc_pci_remove(pdev);
+
+ return err;
+}
+
+static void enetc_pf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct enetc_ndev_priv *priv;
+
+ if (pf->num_vfs)
+ enetc_sriov_configure(pdev, 0);
+
+ priv = netdev_priv(si->ndev);
+ netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+ enetc_drv_name, enetc_drv_ver);
+
+ unregister_netdev(si->ndev);
+
+ enetc_mdio_remove(pf);
+ enetc_of_put_phy(priv);
+
+ enetc_free_msix(priv);
+
+ enetc_free_si_resources(priv);
+
+ free_netdev(si->ndev);
+
+ enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pf_id_table);
+
+static struct pci_driver enetc_pf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pf_id_table,
+ .probe = enetc_pf_probe,
+ .remove = enetc_pf_remove,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = enetc_sriov_configure,
+#endif
+};
+module_pci_driver(enetc_pf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
new file mode 100644
index 000000000000..10dd1b53bb08
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2017-2019 NXP */
+
+#include "enetc.h"
+
+#define ENETC_PF_NUM_RINGS 8
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
+
+#define ENETC_MADDR_HASH_TBL_SZ 64
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
+#define ENETC_VLAN_HT_SIZE 64
+
+enum enetc_vf_flags {
+ ENETC_VF_FLAG_PF_SET_MAC = BIT(0),
+};
+
+struct enetc_vf_state {
+ enum enetc_vf_flags flags;
+};
+
+struct enetc_pf {
+ struct enetc_si *si;
+ int num_vfs; /* number of active VFs, after sriov_init */
+ int total_vfs; /* max number of VFs, set for PF at probe */
+ struct enetc_vf_state *vf_state;
+
+ struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT];
+
+ struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS];
+ struct work_struct msg_task;
+ char msg_int_name[ENETC_INT_NAME_MAX];
+
+ char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */
+ DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ DECLARE_BITMAP(active_vlans, VLAN_N_VID);
+
+ struct mii_bus *mdio; /* saved for cleanup */
+};
+
+int enetc_msg_psi_init(struct enetc_pf *pf);
+void enetc_msg_psi_free(struct enetc_pf *pf);
+void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
+
+/* MDIO */
+int enetc_mdio_probe(struct enetc_pf *pf);
+void enetc_mdio_remove(struct enetc_pf *pf);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
new file mode 100644
index 000000000000..8c1497e7d9c5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+#include "enetc.h"
+
+static struct ptp_clock_info enetc_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "ENETC PTP clock",
+ .max_adj = 512000,
+ .n_alarm = 0,
+ .n_ext_ts = 2,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfine = ptp_qoriq_adjfine,
+ .adjtime = ptp_qoriq_adjtime,
+ .gettime64 = ptp_qoriq_gettime,
+ .settime64 = ptp_qoriq_settime,
+ .enable = ptp_qoriq_enable,
+};
+
+static int enetc_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct ptp_qoriq *ptp_qoriq;
+ void __iomem *base;
+ int err, len, n;
+
+ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ return -ENODEV;
+ }
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "device enable failed\n");
+ return err;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+ }
+
+ err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
+ goto err_pci_mem_reg;
+ }
+
+ pci_set_master(pdev);
+
+ ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL);
+ if (!ptp_qoriq) {
+ err = -ENOMEM;
+ goto err_alloc_ptp;
+ }
+
+ len = pci_resource_len(pdev, ENETC_BAR_REGS);
+
+ base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
+ if (!base) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "ioremap() failed\n");
+ goto err_ioremap;
+ }
+
+ /* Allocate 1 interrupt */
+ n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ if (n != 1) {
+ err = -EPERM;
+ goto err_irq;
+ }
+
+ ptp_qoriq->irq = pci_irq_vector(pdev, 0);
+
+ err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq);
+ if (err) {
+ dev_err(&pdev->dev, "request_irq() failed!\n");
+ goto err_irq;
+ }
+
+ ptp_qoriq->dev = &pdev->dev;
+
+ err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps);
+ if (err)
+ goto err_no_clock;
+
+ pci_set_drvdata(pdev, ptp_qoriq);
+
+ return 0;
+
+err_no_clock:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+err_irq:
+ iounmap(base);
+err_ioremap:
+ kfree(ptp_qoriq);
+err_alloc_ptp:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+err_dma:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void enetc_ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
+
+ ptp_qoriq_free(ptp_qoriq);
+ kfree(ptp_qoriq);
+
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_ptp_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table);
+
+static struct pci_driver enetc_ptp_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_ptp_id_table,
+ .probe = enetc_ptp_probe,
+ .remove = enetc_ptp_remove,
+};
+module_pci_driver(enetc_ptp_driver);
+
+MODULE_DESCRIPTION("ENETC PTP clock driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
new file mode 100644
index 000000000000..64bebee9f52a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2017-2019 NXP */
+
+#include <linux/module.h>
+#include "enetc.h"
+
+#define ENETC_DRV_VER_MAJ 1
+#define ENETC_DRV_VER_MIN 0
+
+#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
+ __stringify(ENETC_DRV_VER_MIN)
+static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
+#define ENETC_DRV_NAME_STR "ENETC VF driver"
+static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
+
+/* Messaging */
+static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
+ struct enetc_msg_swbd *msg)
+{
+ u32 val;
+
+ val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma);
+ enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma));
+ enetc_wr(hw, ENETC_VSIMSGSNDAR0, val);
+}
+
+static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg)
+{
+ int timeout = 100;
+ u32 vsimsgsr;
+
+ enetc_msg_vsi_write_msg(&si->hw, msg);
+
+ do {
+ vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR);
+ if (!(vsimsgsr & ENETC_VSIMSGSR_MB))
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* check for message delivery error */
+ if (vsimsgsr & ENETC_VSIMSGSR_MS) {
+ dev_err(&si->pdev->dev, "VSI command execute error: %d\n",
+ ENETC_SIMSGSR_GET_MC(vsimsgsr));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv,
+ struct sockaddr *saddr)
+{
+ struct enetc_msg_cmd_set_primary_mac *cmd;
+ struct enetc_msg_swbd msg;
+ int err;
+
+ msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64);
+ msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma,
+ GFP_KERNEL);
+ if (!msg.vaddr) {
+ dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n",
+ msg.size);
+ return -ENOMEM;
+ }
+
+ cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr;
+ cmd->header.type = ENETC_MSG_CMD_MNG_MAC;
+ cmd->header.id = ENETC_MSG_CMD_MNG_ADD;
+ memcpy(&cmd->mac, saddr, sizeof(struct sockaddr));
+
+ /* send the command and wait */
+ err = enetc_msg_vsi_send(priv->si, &msg);
+
+ dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma);
+
+ return err;
+}
+
+static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int enetc_vf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ return enetc_set_features(ndev, features);
+}
+
+/* Probing/ Init */
+static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_vf_set_mac_addr,
+ .ndo_set_features = enetc_vf_set_features,
+};
+
+static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (si->num_rss)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ if (si->errata & ENETC_ERR_TXCSUM) {
+ ndev->hw_features &= ~NETIF_F_HW_CSUM;
+ ndev->features &= ~NETIF_F_HW_CSUM;
+ }
+
+ /* pick up primary MAC address from SI */
+ enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+}
+
+static int enetc_vf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ int err;
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI probing failed\n");
+ return err;
+ }
+
+ si = pci_get_drvdata(pdev);
+
+ enetc_get_si_caps(si);
+
+ ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS);
+ if (!ndev) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "netdev creation failed\n");
+ goto err_alloc_netdev;
+ }
+
+ enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops);
+
+ priv = netdev_priv(ndev);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_alloc_si_resources(priv);
+ if (err) {
+ dev_err(&pdev->dev, "SI resource alloc failed\n");
+ goto err_alloc_si_res;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MSIX alloc failed\n");
+ goto err_alloc_msix;
+ }
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_reg_netdev;
+
+ netif_carrier_off(ndev);
+
+ netif_info(priv, probe, ndev, "%s v%s\n",
+ enetc_drv_name, enetc_drv_ver);
+
+ return 0;
+
+err_reg_netdev:
+ enetc_free_msix(priv);
+err_alloc_msix:
+ enetc_free_si_resources(priv);
+err_alloc_si_res:
+ si->ndev = NULL;
+ free_netdev(ndev);
+err_alloc_netdev:
+ enetc_pci_remove(pdev);
+
+ return err;
+}
+
+static void enetc_vf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(si->ndev);
+ netif_info(priv, drv, si->ndev, "%s v%s remove\n",
+ enetc_drv_name, enetc_drv_ver);
+ unregister_netdev(si->ndev);
+
+ enetc_free_msix(priv);
+
+ enetc_free_si_resources(priv);
+
+ free_netdev(si->ndev);
+
+ enetc_pci_remove(pdev);
+}
+
+static const struct pci_device_id enetc_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_vf_id_table);
+
+static struct pci_driver enetc_vf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_vf_id_table,
+ .probe = enetc_vf_probe,
+ .remove = enetc_vf_remove,
+};
+module_pci_driver(enetc_vf_driver);
+
+MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ae0f88bce9aa..697c2427f2b7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+static __u32 fec_enet_register_version = 2;
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = {
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
#else
+static __u32 fec_enet_register_version = 1;
static u32 fec_enet_register_offset[] = {
FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
@@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
u32 *buf = (u32 *)regbuf;
u32 i, off;
+ regs->version = fec_enet_register_version;
+
memset(buf, 0, regs->len);
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
@@ -3467,7 +3471,7 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_clk_ipg;
- fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
if (ret) {
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b90bab72efdb..c1968b3ecec8 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
DMA_TO_DEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
spin_unlock(&priv->lock);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 71f4205f14e7..3c21486c6c84 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -855,9 +855,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (err < 0)
dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
- dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
- mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
- mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+ dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
if (IS_ERR(priv->eth_dev)) {
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 241325c35cb4..27ed995f439a 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1492,7 +1492,7 @@ static int gfar_get_ts_info(struct net_device *dev,
struct gfar_private *priv = netdev_priv(dev);
struct platform_device *ptp_dev;
struct device_node *ptp_node;
- struct qoriq_ptp *ptp = NULL;
+ struct ptp_qoriq *ptp = NULL;
info->phc_index = -1;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c3d539e209ed..eb3e65e8868f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
u16 i, j;
u8 __iomem *bd;
+ netdev_reset_queue(ugeth->ndev);
+
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 471805ea363b..e5d853b7b454 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
for (i = 0; i < QUEUE_NUMS; i++) {
size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
- virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
- GFP_KERNEL);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
if (virt_addr == NULL)
goto error_free_pool;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ad1779fc410e..a78bfafd212c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
int i;
- vf_cb->mac_cb = NULL;
-
- kfree(vf_cb);
-
for (i = 0; i < handle->q_num; i++)
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+
+ kfree(vf_cb);
}
static int hns_ae_wait_flow_down(struct hnae_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 3b9e74be5fbd..ac55db065f16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
dsaf_dev = dev_get_drvdata(&pdev->dev);
if (!dsaf_dev) {
dev_err(&pdev->dev, "dsaf_dev is NULL\n");
+ put_device(&pdev->dev);
return -ENODEV;
}
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
dsaf_dev->ae_dev.name);
+ put_device(&pdev->dev);
return -ENODEV;
}
@@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
}
+
+ put_device(&pdev->dev);
+
return 0;
}
EXPORT_SYMBOL(hns_dsaf_roce_reset);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 0942e4916d9d..3d07c8a7639d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -83,8 +83,9 @@ static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
else
ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
- ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) +
- ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL);
+ ppe_common = devm_kzalloc(dsaf_dev->dev,
+ struct_size(ppe_common, ppe_cb, ppe_num),
+ GFP_KERNEL);
if (!ppe_common)
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 5d64519b9b1d..6bf346c11b25 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -788,8 +788,9 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
int ring_num = hns_rcb_get_ring_num(dsaf_dev);
rcb_common =
- devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
- ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
+ devm_kzalloc(dsaf_dev->dev,
+ struct_size(rcb_common, ring_pair_cb, ring_num),
+ GFP_KERNEL);
if (!rcb_common) {
dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 5748d3f722f6..60e7d7ae3787 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1170,6 +1170,13 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (!h->phy_dev)
return 0;
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+ phy_dev->autoneg = false;
+
if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
phy_dev->dev_flags = 0;
@@ -1181,16 +1188,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
- ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
- linkmode_and(phy_dev->supported, phy_dev->supported, supported);
- linkmode_copy(phy_dev->advertising, phy_dev->supported);
-
- if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
- phy_dev->autoneg = false;
-
- if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
- phy_stop(phy_dev);
-
return 0;
}
@@ -2421,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
out_notify_fail:
(void)cancel_work_sync(&priv->service_task);
out_read_prop_fail:
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
free_netdev(ndev);
return ret;
}
@@ -2450,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
set_bit(NIC_STATE_REMOVING, &priv->state);
(void)cancel_work_sync(&priv->service_task);
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
+
free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 8e9b95871d30..ce15d2350db9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
*/
static int hns_nic_nway_reset(struct net_device *netdev)
{
- int ret = 0;
struct phy_device *phy = netdev->phydev;
- if (netif_running(netdev)) {
- /* if autoneg is disabled, don't restart auto-negotiation */
- if (phy && phy->autoneg == AUTONEG_ENABLE)
- ret = genphy_restart_aneg(phy);
- }
+ if (!netif_running(netdev))
+ return 0;
- return ret;
+ if (!phy)
+ return -EOPNOTSUPP;
+
+ if (phy->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ return genphy_restart_aneg(phy);
}
static u32
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 691d12174902..299b277bc7ae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -21,6 +21,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
+ HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
@@ -40,6 +41,10 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
+ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
+ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
+
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
};
/* below are per-VF mac-vlan subcodes */
@@ -60,7 +65,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
-#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 781e5dee3c70..17ab4f4af6ad 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited)
{
+ if (!client || !ae_dev)
+ return;
+
switch (client->type) {
case HNAE3_CLIENT_KNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
@@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client)
struct hnae3_ae_dev *ae_dev;
int ret = 0;
+ if (!client)
+ return -ENODEV;
+
mutex_lock(&hnae3_common_lock);
/* one system should only have one client for every type */
list_for_each_entry(client_tmp, &hnae3_client_list, node) {
@@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client)
{
struct hnae3_ae_dev *ae_dev;
+ if (!client)
+ return;
+
mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
@@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
struct hnae3_client *client;
int ret = 0;
+ if (!ae_algo)
+ return;
+
mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
@@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
if (!id)
continue;
- /* ae_dev init should set flag */
+ if (!ae_algo->ops) {
+ dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
+ continue;
+ }
ae_dev->ops = ae_algo->ops;
+
ret = ae_algo->ops->init_ae_dev(ae_dev);
if (ret) {
dev_err(&ae_dev->pdev->dev,
@@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
continue;
}
+ /* ae_dev init should set flag */
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and
@@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
+ if (!ae_algo)
+ return;
+
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
@@ -238,13 +258,16 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo);
* @ae_dev: the AE device
* NOTE: the duplicated name will not be checked
*/
-void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
+int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
{
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
int ret = 0;
+ if (!ae_dev)
+ return -ENODEV;
+
mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
@@ -255,14 +278,13 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!id)
continue;
- ae_dev->ops = ae_algo->ops;
-
- if (!ae_dev->ops) {
- dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
+ if (!ae_algo->ops) {
+ dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
+ ret = -EOPNOTSUPP;
goto out_err;
}
+ ae_dev->ops = ae_algo->ops;
- /* ae_dev init should set flag */
ret = ae_dev->ops->init_ae_dev(ae_dev);
if (ret) {
dev_err(&ae_dev->pdev->dev,
@@ -270,6 +292,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
goto out_err;
}
+ /* ae_dev init should set flag */
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break;
}
@@ -285,8 +308,15 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
ret);
}
+ mutex_unlock(&hnae3_common_lock);
+
+ return 0;
+
out_err:
+ list_del(&ae_dev->node);
mutex_unlock(&hnae3_common_lock);
+
+ return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_dev);
@@ -299,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
+ if (!ae_dev)
+ return;
+
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 36eab37d8a40..66d7a8b80e76 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -87,7 +87,8 @@ struct hnae3_queue {
struct hnae3_handle *handle;
int tqp_index; /* index in a handle */
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
- u16 desc_num; /* total number of desc */
+ u16 tx_desc_num;/* total number of tx desc */
+ u16 rx_desc_num;/* total number of rx desc */
};
/*hnae3 loop mode*/
@@ -124,6 +125,7 @@ enum hnae3_reset_notify_type {
HNAE3_DOWN_CLIENT,
HNAE3_INIT_CLIENT,
HNAE3_UNINIT_CLIENT,
+ HNAE3_RESTORE_CLIENT,
};
enum hnae3_reset_type {
@@ -432,7 +434,8 @@ struct hnae3_ae_ops {
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
u16 *alloc_tqps, u16 *max_rss_size);
- int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured);
void (*get_flowctrl_adv)(struct hnae3_handle *handle,
u32 *flowctrl_adv);
int (*set_led_id)(struct hnae3_handle *handle,
@@ -459,9 +462,11 @@ struct hnae3_ae_ops {
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
- int (*set_gro_en)(struct hnae3_handle *handle, int enable);
+ int (*set_gro_en)(struct hnae3_handle *handle, bool enable);
u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
+ int (*mac_connect_phy)(struct hnae3_handle *handle);
+ void (*mac_disconnect_phy)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
@@ -475,7 +480,6 @@ struct hnae3_dcb_ops {
u8 (*getdcbx)(struct hnae3_handle *);
u8 (*setdcbx)(struct hnae3_handle *, u8);
- int (*map_update)(struct hnae3_handle *);
int (*setup_tc)(struct hnae3_handle *, u8, u8 *);
};
@@ -500,8 +504,10 @@ struct hnae3_tc_info {
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
+ u16 req_rss_size;
u16 rx_buf_len;
- u16 num_desc;
+ u16 num_tx_desc;
+ u16 num_rx_desc;
u8 num_tc; /* Total number of enabled TCs */
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
@@ -533,7 +539,9 @@ struct hnae3_roce_private_info {
struct hnae3_unic_private_info {
struct net_device *netdev;
u16 rx_buf_len;
- u16 num_desc;
+ u16 num_tx_desc;
+ u16 num_rx_desc;
+
u16 num_tqps; /* total number of tqps in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
};
@@ -585,7 +593,7 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field((origin), (0x1 << (shift)), (shift))
-void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
+int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 07cd58798083..3cb43b1f1c2e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -21,6 +21,8 @@
#include "hnae3.h"
#include "hns3_enet.h"
+#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
static void hns3_remove_hw_addr(struct net_device *netdev);
@@ -348,6 +350,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
return ret;
}
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
/* enable the vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
@@ -361,11 +365,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
goto out_start_err;
- clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
-
return 0;
out_start_err:
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
while (j--)
hns3_tqp_disable(h->kinfo.tqp[j]);
@@ -377,6 +380,29 @@ out_start_err:
return ret;
}
+static void hns3_config_xps(struct hns3_nic_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
+ struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
+
+ while (ring) {
+ int ret;
+
+ ret = netif_set_xps_queue(priv->netdev,
+ &tqp_vector->affinity_mask,
+ ring->tqp->tqp_index);
+ if (ret)
+ netdev_warn(priv->netdev,
+ "set xps queue failed: %d", ret);
+
+ ring = ring->next;
+ }
+ }
+}
+
static int hns3_nic_net_open(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -409,6 +435,7 @@ static int hns3_nic_net_open(struct net_device *netdev)
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
+ hns3_config_xps(priv);
return 0;
}
@@ -506,7 +533,7 @@ static u8 hns3_get_netdev_flags(struct net_device *netdev)
u8 flags = 0;
if (netdev->flags & IFF_PROMISC) {
- flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
+ flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
} else {
flags |= HNAE3_VLAN_FLTR;
if (netdev->flags & IFF_ALLMULTI)
@@ -541,13 +568,13 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
}
}
- hns3_update_promisc_mode(netdev, new_flags);
/* User mode Promisc mode enable and vlan filtering is disabled to
* let all packets in. MAC-VLAN Table overflow Promisc enabled and
* vlan fitering is enabled
*/
hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
h->netdev_flags = new_flags;
+ hns3_update_promisc_mode(netdev, new_flags);
}
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
@@ -594,7 +621,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
return 0;
ret = skb_cow_head(skb, 0);
- if (ret)
+ if (unlikely(ret))
return ret;
l3.hdr = skb_network_header(skb);
@@ -633,7 +660,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* normal or tunnel packet*/
l4_offset = l4.hdr - skb->data;
- hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ hdr_len = (l4.tcp->doff << 2) + l4_offset;
/* remove payload length from inner pseudo checksum when tso*/
l4_paylen = skb->len - l4_offset;
@@ -642,8 +669,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* find the txbd field values */
*paylen = skb->len - hdr_len;
- hnae3_set_bit(*type_cs_vlan_tso,
- HNS3_TXD_TSO_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@@ -654,11 +680,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
u8 *il4_proto)
{
- union {
- struct iphdr *v4;
- struct ipv6hdr *v6;
- unsigned char *hdr;
- } l3;
+ union l3_hdr_info l3;
unsigned char *l4_hdr;
unsigned char *exthdr;
u8 l4_proto_tmp;
@@ -711,17 +733,8 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
u8 il4_proto, u32 *type_cs_vlan_tso,
u32 *ol_type_vlan_len_msec)
{
- union {
- struct iphdr *v4;
- struct ipv6hdr *v6;
- unsigned char *hdr;
- } l3;
- union {
- struct tcphdr *tcp;
- struct udphdr *udp;
- struct gre_base_hdr *gre;
- unsigned char *hdr;
- } l4;
+ union l3_hdr_info l3;
+ union l4_hdr_info l4;
unsigned char *l2_hdr;
u8 l4_proto = ol4_proto;
u32 ol2_len;
@@ -735,21 +748,19 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute L2 header size for normal packet, defined in 2 Bytes */
l2_len = l3.hdr - skb->data;
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/
if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len;
- hnae3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L2LEN_S, ol2_len >> 1);
/* compute OL3 header size, defined in 4 Bytes */
ol3_len = l4.hdr - l3.hdr;
- hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, ol3_len >> 2);
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
+ ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -758,17 +769,16 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute OL4 header size, defined in 4 Bytes. */
ol4_len = l2_hdr - l4.hdr;
- hnae3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
- ol4_len >> 2);
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L4LEN_S, ol4_len >> 2);
/* switch IP header ptr from outer to inner header */
l3.hdr = skb_inner_network_header(skb);
/* compute inner l2 header size, defined in 2 Bytes. */
l2_len = l3.hdr - l2_hdr;
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
+ l2_len >> 1);
} else {
/* skb packet types not supported by hardware,
* txbd len fild doesn't be filled.
@@ -784,24 +794,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, l3_len >> 2);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, l4.tcp->doff);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ l4.tcp->doff);
break;
case IPPROTO_SCTP:
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S,
- (sizeof(struct sctphdr) >> 2));
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
case IPPROTO_UDP:
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S,
- (sizeof(struct udphdr) >> 2));
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
default:
/* skb packet types not supported by hardware,
@@ -820,12 +827,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
#define IANA_VXLAN_PORT 4789
- union {
- struct tcphdr *tcp;
- struct udphdr *udp;
- struct gre_base_hdr *gre;
- unsigned char *hdr;
- } l4;
+ union l4_hdr_info l4;
l4.hdr = skb_transport_header(skb);
@@ -841,11 +843,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
u8 il4_proto, u32 *type_cs_vlan_tso,
u32 *ol_type_vlan_len_msec)
{
- union {
- struct iphdr *v4;
- struct ipv6hdr *v6;
- unsigned char *hdr;
- } l3;
+ union l3_hdr_info l3;
u32 l4_proto = ol4_proto;
l3.hdr = skb_network_header(skb);
@@ -855,34 +853,30 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* define outer network header type.*/
if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb))
- hnae3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
else
- hnae3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
}
/* define tunnel type(OL4).*/
switch (l4_proto) {
case IPPROTO_UDP:
- hnae3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
break;
case IPPROTO_GRE:
- hnae3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -903,43 +897,37 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
}
if (l3.v4->version == 4) {
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
+ HNS3_L3T_IPV4);
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
if (skb_is_gso(skb))
- hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
} else if (l3.v6->version == 6) {
- hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
+ HNS3_L3T_IPV6);
}
switch (l4_proto) {
case IPPROTO_TCP:
- hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
- hnae3_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
break;
- hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
- hnae3_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
case IPPROTO_SCTP:
- hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
- hnae3_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -961,11 +949,8 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
- HNS3_TXD_BDTYPE_S, 0);
- hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
- hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+ hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+ hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -998,10 +983,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
- hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -1009,7 +994,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
int rc;
rc = skb_cow_head(skb, 0);
- if (rc < 0)
+ if (unlikely(rc < 0))
return rc;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
@@ -1026,26 +1011,21 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- u32 ol_type_vlan_len_msec = 0;
u16 bdtp_fe_sc_vld_ra_ri = 0;
struct skb_frag_struct *frag;
unsigned int frag_buf_num;
- u32 type_cs_vlan_tso = 0;
- struct sk_buff *skb;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
- unsigned int k;
- int sizeoflast;
- u32 paylen = 0;
+ int k, sizeoflast;
dma_addr_t dma;
- u16 mss = 0;
- u8 ol4_proto;
- u8 il4_proto;
- int ret;
if (type == DESC_TYPE_SKB) {
- skb = (struct sk_buff *)priv;
- paylen = skb->len;
+ struct sk_buff *skb = (struct sk_buff *)priv;
+ u32 ol_type_vlan_len_msec = 0;
+ u32 type_cs_vlan_tso = 0;
+ u32 paylen = skb->len;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
+ u16 mss = 0;
+ int ret;
ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
&ol_type_vlan_len_msec,
@@ -1054,10 +1034,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 ol4_proto, il4_proto;
+
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (ret)
+ if (unlikely(ret))
return ret;
hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
@@ -1065,12 +1047,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
- if (ret)
+ if (unlikely(ret))
return ret;
ret = hns3_set_tso(skb, &paylen, &mss,
&type_cs_vlan_tso);
- if (ret)
+ if (unlikely(ret))
return ret;
}
@@ -1090,15 +1072,15 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
- if (dma_mapping_error(ring->dev, dma)) {
+ if (unlikely(dma_mapping_error(ring->dev, dma))) {
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
desc_cb->length = size;
- frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
- sizeoflast = size % HNS3_MAX_BD_SIZE;
+ frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+ sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
@@ -1133,6 +1115,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
struct hns3_enet_ring *ring)
{
struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
struct skb_frag_struct *frag;
int bdnum_for_frag;
int frag_num;
@@ -1141,21 +1124,34 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
int i;
size = skb_headlen(skb);
- buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+ buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
frag_num = skb_shinfo(skb)->nr_frags;
for (i = 0; i < frag_num; i++) {
frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
- bdnum_for_frag =
- (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
- if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
+ bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
+ HNS3_MAX_BD_SIZE_OFFSET;
+ if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
buf_num += bdnum_for_frag;
}
- if (buf_num > ring_space(ring))
+ if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
+ buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
+ HNS3_MAX_BD_SIZE_OFFSET;
+ if (ring_space(ring) < buf_num)
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+ }
+
+ if (unlikely(ring_space(ring) < buf_num))
return -EBUSY;
*bnum = buf_num;
@@ -1166,11 +1162,24 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
struct hns3_enet_ring *ring)
{
struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
int buf_num;
/* No. of segments (plus a header) */
buf_num = skb_shinfo(skb)->nr_frags + 1;
+ if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
+ buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+ if (ring_space(ring) < buf_num)
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+ }
+
if (unlikely(ring_space(ring) < buf_num))
return -EBUSY;
@@ -1252,9 +1261,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
- ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
- DESC_TYPE_SKB);
- if (ret)
+ ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
+ DESC_TYPE_SKB);
+ if (unlikely(ret))
goto head_fill_err;
next_to_use_frag = ring->next_to_use;
@@ -1263,11 +1272,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
size = skb_frag_size(frag);
- ret = priv->ops.fill_desc(ring, frag, size,
- seg_num - 1 == i ? 1 : 0,
- DESC_TYPE_PAGE);
+ ret = hns3_fill_desc(ring, frag, size,
+ seg_num - 1 == i ? 1 : 0,
+ DESC_TYPE_PAGE);
- if (ret)
+ if (unlikely(ret))
goto frag_fill_err;
}
@@ -1344,6 +1353,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ bool enable;
int ret;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
@@ -1354,38 +1364,29 @@ static int hns3_nic_set_features(struct net_device *netdev,
}
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
- if (features & NETIF_F_GRO_HW)
- ret = h->ae_algo->ops->set_gro_en(h, true);
- else
- ret = h->ae_algo->ops->set_gro_en(h, false);
+ enable = !!(features & NETIF_F_GRO_HW);
+ ret = h->ae_algo->ops->set_gro_en(h, enable);
if (ret)
return ret;
}
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
- if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- h->ae_algo->ops->enable_vlan_filter(h, true);
- else
- h->ae_algo->ops->enable_vlan_filter(h, false);
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ h->ae_algo->ops->enable_vlan_filter(h, enable);
}
if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
h->ae_algo->ops->enable_hw_strip_rxvtag) {
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
- else
- ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
-
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
if (ret)
return ret;
}
if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
- if (features & NETIF_F_NTUPLE)
- h->ae_algo->ops->enable_fd(h, true);
- else
- h->ae_algo->ops->enable_fd(h, false);
+ enable = !!(features & NETIF_F_NTUPLE);
+ h->ae_algo->ops->enable_fd(h, enable);
}
netdev->features = features;
@@ -1399,7 +1400,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
int queue_num = priv->ae_handle->kinfo.num_tqps;
struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring;
+ u64 rx_length_errors = 0;
+ u64 rx_crc_errors = 0;
+ u64 rx_multicast = 0;
unsigned int start;
+ u64 tx_errors = 0;
+ u64 rx_errors = 0;
unsigned int idx;
u64 tx_bytes = 0;
u64 rx_bytes = 0;
@@ -1420,8 +1426,8 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
- tx_drop += ring->stats.tx_busy;
tx_drop += ring->stats.sw_err_cnt;
+ tx_errors += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1431,8 +1437,13 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
rx_drop += ring->stats.non_vld_descs;
- rx_drop += ring->stats.err_pkt_len;
rx_drop += ring->stats.l2_err;
+ rx_errors += ring->stats.non_vld_descs;
+ rx_errors += ring->stats.l2_err;
+ rx_crc_errors += ring->stats.l2_err;
+ rx_crc_errors += ring->stats.l3l4_csum_err;
+ rx_multicast += ring->stats.rx_multicast;
+ rx_length_errors += ring->stats.err_pkt_len;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
@@ -1441,15 +1452,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
stats->rx_bytes = rx_bytes;
stats->rx_packets = rx_pkts;
- stats->rx_errors = netdev->stats.rx_errors;
- stats->multicast = netdev->stats.multicast;
- stats->rx_length_errors = netdev->stats.rx_length_errors;
- stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+ stats->rx_errors = rx_errors;
+ stats->multicast = rx_multicast;
+ stats->rx_length_errors = rx_length_errors;
+ stats->rx_crc_errors = rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- stats->tx_errors = netdev->stats.tx_errors;
- stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
- stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
+ stats->tx_errors = tx_errors;
+ stats->rx_dropped = rx_drop;
+ stats->tx_dropped = tx_drop;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1472,8 +1483,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u8 tc = mqprio_qopt->qopt.num_tc;
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
- bool if_running;
- int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
@@ -1485,24 +1494,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (!netdev)
return -EINVAL;
- if_running = netif_running(netdev);
- if (if_running) {
- hns3_nic_net_stop(netdev);
- msleep(100);
- }
-
- ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
+ return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
- if (ret)
- goto out;
-
- ret = hns3_nic_set_real_num_queue(netdev);
-
-out:
- if (if_running)
- hns3_nic_net_open(netdev);
-
- return ret;
}
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -1755,9 +1748,13 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
- hnae3_register_ae_dev(ae_dev);
+ ret = hnae3_register_ae_dev(ae_dev);
+ if (ret) {
+ devm_kfree(&pdev->dev, ae_dev);
+ pci_set_drvdata(pdev, NULL);
+ }
- return 0;
+ return ret;
}
/* hns3_remove - Device removal routine
@@ -1771,6 +1768,7 @@ static void hns3_remove(struct pci_dev *pdev)
hns3_disable_sriov(pdev);
hnae3_unregister_ae_dev(ae_dev);
+ pci_set_drvdata(pdev, NULL);
}
/**
@@ -1935,8 +1933,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_GRO_HW;
+ netdev->hw_features |= NETIF_F_GRO_HW;
netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
@@ -2041,9 +2038,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
- &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
@@ -2322,13 +2318,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
}
/* check if hardware has done checksum */
- if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
+ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
- if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
- hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
- hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
- hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
+ if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) ||
+ BIT(HNS3_RXD_OL3E_B) ||
+ BIT(HNS3_RXD_OL4E_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
u64_stats_update_end(&ring->syncp);
@@ -2336,11 +2331,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
- l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
- HNS3_RXD_L4ID_S);
-
ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
HNS3_RXD_OL4ID_S);
switch (ol4_type) {
@@ -2349,6 +2339,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
skb->csum_level = 1;
/* fall through */
case HNS3_OL4_TYPE_NO_TUN:
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
+
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
if ((l3_type == HNS3_L3_TYPE_IPV4 ||
l3_type == HNS3_L3_TYPE_IPV6) &&
@@ -2473,11 +2468,11 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
}
- while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
return -ENXIO;
if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
@@ -2573,6 +2568,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ enum hns3_pkt_l2t_type l2_frame_type;
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
@@ -2590,7 +2586,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Check valid BD */
- if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
return -ENXIO;
if (!skb)
@@ -2653,7 +2649,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
vlan_tag);
}
- if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.non_vld_descs++;
u64_stats_update_end(&ring->syncp);
@@ -2663,25 +2659,26 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
if (unlikely((!desc->rx.pkt_len) ||
- hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
+ (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+ BIT(HNS3_RXD_L2E_B))))) {
u64_stats_update_begin(&ring->syncp);
- ring->stats.err_pkt_len++;
+ if (l234info & BIT(HNS3_RXD_L2E_B))
+ ring->stats.l2_err++;
+ else
+ ring->stats.err_pkt_len++;
u64_stats_update_end(&ring->syncp);
dev_kfree_skb_any(skb);
return -EFAULT;
}
- if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.l2_err++;
- u64_stats_update_end(&ring->syncp);
-
- dev_kfree_skb_any(skb);
- return -EFAULT;
- }
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
u64_stats_update_begin(&ring->syncp);
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
u64_stats_update_end(&ring->syncp);
@@ -2770,7 +2767,7 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
u32 time_passed_ms;
u16 new_int_gl;
- if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
+ if (!tqp_vector->last_jiffies)
return false;
if (ring_group->total_packets == 0) {
@@ -2873,7 +2870,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
}
if (tx_group->coal.gl_adapt_enable) {
- tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
+ tx_update = hns3_get_new_int_gl(tx_group);
if (tx_update)
hns3_set_vector_coalesce_tx_gl(tqp_vector,
tx_group->coal.int_gl);
@@ -3176,25 +3173,23 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
group->count = 0;
}
-static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
+static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
- int i, ret;
+ int i;
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- ret = hns3_get_vector_ring_chain(tqp_vector,
- &vector_ring_chain);
- if (ret)
- return ret;
+ if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
+ continue;
+
+ hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
- ret = h->ae_algo->ops->unmap_ring_from_vector(h,
+ h->ae_algo->ops->unmap_ring_from_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
- if (ret)
- return ret;
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
@@ -3206,13 +3201,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
}
- priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi);
}
-
- return 0;
}
static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
@@ -3241,16 +3233,19 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
int queue_num = priv->ae_handle->kinfo.num_tqps;
struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring;
+ int desc_num;
ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
if (!ring)
return -ENOMEM;
if (ring_type == HNAE3_RING_TYPE_TX) {
+ desc_num = priv->ae_handle->kinfo.num_tx_desc;
ring_data[q->tqp_index].ring = ring;
ring_data[q->tqp_index].queue_index = q->tqp_index;
ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else {
+ desc_num = priv->ae_handle->kinfo.num_rx_desc;
ring_data[q->tqp_index + queue_num].ring = ring;
ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
ring->io_base = q->io_base;
@@ -3264,7 +3259,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->dev = priv->dev;
ring->desc_dma_addr = 0;
ring->buf_size = q->buf_size;
- ring->desc_num = q->desc_num;
+ ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
@@ -3376,6 +3371,11 @@ static void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
+ ring->pending_buf = 0;
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ }
}
static int hns3_buf_size2type(u32 buf_size)
@@ -3516,6 +3516,25 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init)
return ret;
}
+static int hns3_init_phy(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = 0;
+
+ if (h->ae_algo->ops->mac_connect_phy)
+ ret = h->ae_algo->ops->mac_connect_phy(h);
+
+ return ret;
+}
+
+static void hns3_uninit_phy(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->mac_disconnect_phy)
+ h->ae_algo->ops->mac_disconnect_phy(h);
+}
+
static int hns3_restore_fd_rules(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -3539,7 +3558,6 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- priv->ops.fill_desc = hns3_fill_desc;
if ((netdev->features & NETIF_F_TSO) ||
(netdev->features & NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
@@ -3582,6 +3600,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->netdev = netdev;
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
@@ -3624,6 +3643,10 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_init_ring_data;
}
+ ret = hns3_init_phy(netdev);
+ if (ret)
+ goto out_init_phy;
+
ret = register_netdev(netdev);
if (ret) {
dev_err(priv->dev, "probe register netdev fail!\n");
@@ -3633,7 +3656,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
- goto out_reg_netdev_fail;
+ goto out_client_start;
}
hns3_dcbnl_setup(handle);
@@ -3647,9 +3670,14 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
+out_client_start:
+ unregister_netdev(netdev);
out_reg_netdev_fail:
+ hns3_uninit_phy(netdev);
+out_init_phy:
+ hns3_uninit_all_ring(priv);
out_init_ring_data:
- (void)hns3_nic_uninit_vector_data(priv);
+ hns3_nic_uninit_vector_data(priv);
out_init_vector_data:
hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data:
@@ -3682,9 +3710,9 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_force_clear_all_rx_ring(handle);
- ret = hns3_nic_uninit_vector_data(priv);
- if (ret)
- netdev_err(netdev, "uninit vector error\n");
+ hns3_uninit_phy(netdev);
+
+ hns3_nic_uninit_vector_data(priv);
ret = hns3_nic_dealloc_vector_data(priv);
if (ret)
@@ -3726,8 +3754,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
- bool if_running;
- int ret;
if (tc > HNAE3_MAX_TC)
return -EINVAL;
@@ -3735,25 +3761,7 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (!ndev)
return -ENODEV;
- if_running = netif_running(ndev);
-
- if (if_running) {
- (void)hns3_nic_net_stop(ndev);
- msleep(100);
- }
-
- ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
- kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
- if (ret)
- goto err_out;
-
- ret = hns3_nic_set_real_num_queue(ndev);
-
-err_out:
- if (if_running)
- (void)hns3_nic_net_open(ndev);
-
- return ret;
+ return hns3_nic_set_real_num_queue(ndev);
}
static int hns3_recover_hw_addr(struct net_device *ndev)
@@ -4014,41 +4022,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- bool vlan_filter_enable;
int ret;
- ret = hns3_init_mac_addr(netdev, false);
- if (ret)
- return ret;
-
- ret = hns3_recover_hw_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
- if (ret)
- return ret;
-
- vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
- hns3_enable_vlan_filter(netdev, vlan_filter_enable);
-
- /* Hardware table is only clear when pf resets */
- if (!(handle->flags & HNAE3_SUPPORT_VF)) {
- ret = hns3_restore_vlan(netdev);
- if (ret)
- return ret;
- }
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
- ret = hns3_restore_fd_rules(netdev);
+ ret = hns3_get_ring_config(priv);
if (ret)
return ret;
- /* Carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
-
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
- return ret;
+ goto err_put_ring;
hns3_restore_coal(priv);
@@ -4069,10 +4054,44 @@ err_uninit_vector:
priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
+err_put_ring:
+ hns3_put_ring_config(priv);
+ priv->ring_data = NULL;
return ret;
}
+static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ bool vlan_filter_enable;
+ int ret;
+
+ ret = hns3_init_mac_addr(netdev, false);
+ if (ret)
+ return ret;
+
+ ret = hns3_recover_hw_addr(netdev);
+ if (ret)
+ return ret;
+
+ ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
+ if (ret)
+ return ret;
+
+ vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(netdev, vlan_filter_enable);
+
+ /* Hardware table is only clear when pf resets */
+ if (!(handle->flags & HNAE3_SUPPORT_VF)) {
+ ret = hns3_restore_vlan(netdev);
+ if (ret)
+ return ret;
+ }
+
+ return hns3_restore_fd_rules(netdev);
+}
+
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
@@ -4086,11 +4105,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_force_clear_all_rx_ring(handle);
- ret = hns3_nic_uninit_vector_data(priv);
- if (ret) {
- netdev_err(netdev, "uninit vector error\n");
- return ret;
- }
+ hns3_nic_uninit_vector_data(priv);
hns3_store_coal(priv);
@@ -4102,6 +4117,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
+ hns3_put_ring_config(priv);
+ priv->ring_data = NULL;
+
clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
@@ -4125,6 +4143,9 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle);
break;
+ case HNAE3_RESTORE_CLIENT:
+ ret = hns3_reset_notify_restore_enet(handle);
+ break;
default:
break;
}
@@ -4132,57 +4153,12 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
-static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
- int ret;
-
- ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
- if (ret)
- return ret;
-
- ret = hns3_get_ring_config(priv);
- if (ret)
- return ret;
-
- ret = hns3_nic_alloc_vector_data(priv);
- if (ret)
- goto err_alloc_vector;
-
- hns3_restore_coal(priv);
-
- ret = hns3_nic_init_vector_data(priv);
- if (ret)
- goto err_uninit_vector;
-
- ret = hns3_init_all_ring(priv);
- if (ret)
- goto err_put_ring;
-
- return 0;
-
-err_put_ring:
- hns3_put_ring_config(priv);
-err_uninit_vector:
- hns3_nic_uninit_vector_data(priv);
-err_alloc_vector:
- hns3_nic_dealloc_vector_data(priv);
- return ret;
-}
-
-static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
-{
- return (new_tqp_num / num_tc) * num_tc;
-}
-
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
- bool if_running = netif_running(netdev);
+ bool rxfh_configured = netif_is_rxfh_configured(netdev);
u32 new_tqp_num = ch->combined_count;
u16 org_tqp_num;
int ret;
@@ -4191,39 +4167,29 @@ int hns3_set_channels(struct net_device *netdev,
return -EINVAL;
if (new_tqp_num > hns3_get_max_available_channels(h) ||
- new_tqp_num < kinfo->num_tc) {
+ new_tqp_num < 1) {
dev_err(&netdev->dev,
- "Change tqps fail, the tqp range is from %d to %d",
- kinfo->num_tc,
+ "Change tqps fail, the tqp range is from 1 to %d",
hns3_get_max_available_channels(h));
return -EINVAL;
}
- new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
- if (kinfo->num_tqps == new_tqp_num)
+ if (kinfo->rss_size == new_tqp_num)
return 0;
- if (if_running)
- hns3_nic_net_stop(netdev);
-
- ret = hns3_nic_uninit_vector_data(priv);
- if (ret) {
- dev_err(&netdev->dev,
- "Unbind vector with tqp fail, nothing is changed");
- goto open_netdev;
- }
-
- hns3_store_coal(priv);
-
- hns3_nic_dealloc_vector_data(priv);
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
- hns3_uninit_all_ring(priv);
- hns3_put_ring_config(priv);
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
org_tqp_num = h->kinfo.num_tqps;
- ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+ ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
if (ret) {
- ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+ ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
+ rxfh_configured);
if (ret) {
/* If revert to old tqp failed, fatal error occurred */
dev_err(&netdev->dev,
@@ -4233,12 +4199,11 @@ int hns3_set_channels(struct net_device *netdev,
dev_info(&netdev->dev,
"Change tqp num fail, Revert to old tqp num");
}
+ ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
-open_netdev:
- if (if_running)
- hns3_nic_net_open(netdev);
-
- return ret;
+ return hns3_reset_notify(h, HNAE3_UP_CLIENT);
}
static const struct hnae3_client_ops client_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index e55995e93bb0..1db0bd41d209 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -74,7 +74,7 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
#define HNS3_RING_MAX_PENDING 32768
-#define HNS3_RING_MIN_PENDING 8
+#define HNS3_RING_MIN_PENDING 24
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
#define HNS3_MAC_MAX_FRAME 9728
@@ -184,6 +184,8 @@ enum hns3_nic_state {
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
+#define HNS3_TX_LAST_SIZE_M 0xffff
+
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
@@ -191,6 +193,7 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
+#define HNS3_MAX_BD_SIZE_OFFSET 16
#define HNS3_MAX_BD_PER_FRAG 8
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
@@ -202,6 +205,13 @@ enum hns3_nic_state {
#define HNS3_RING_EN_B 0
+enum hns3_pkt_l2t_type {
+ HNS3_L2_TYPE_UNICAST,
+ HNS3_L2_TYPE_MULTICAST,
+ HNS3_L2_TYPE_BROADCAST,
+ HNS3_L2_TYPE_INVALID,
+};
+
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
@@ -376,6 +386,7 @@ struct ring_stats {
u64 err_bd_num;
u64 l2_err;
u64 l3l4_csum_err;
+ u64 rx_multicast;
};
};
};
@@ -412,7 +423,6 @@ struct hns3_enet_ring {
unsigned char *va; /* first buffer address for current packet */
u32 flag; /* ring attribute */
- int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
@@ -434,11 +444,8 @@ struct hns3_nic_ring_data {
};
struct hns3_nic_ops {
- int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
- int size, int frag_end, enum hns_desc_type type);
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hns3_enet_ring *ring);
- void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
};
enum hns3_flow_level_range {
@@ -567,6 +574,7 @@ union l3_hdr_info {
union l4_hdr_info {
struct tcphdr *tcp;
struct udphdr *udp;
+ struct gre_base_hdr *gre;
unsigned char *hdr;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index e678b6939da3..359d4731fb2d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -47,6 +47,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("multicast", rx_multicast),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -116,7 +117,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
- return 0;
+ return ret;
}
static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
@@ -333,10 +334,10 @@ static void hns3_self_test(struct net_device *ndev,
continue;
data[test_index] = hns3_lp_up(ndev, loop_type);
- if (!data[test_index]) {
+ if (!data[test_index])
data[test_index] = hns3_lp_run_test(ndev, loop_type);
- hns3_lp_down(ndev, loop_type);
- }
+
+ hns3_lp_down(ndev, loop_type);
if (data[test_index])
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -620,12 +621,11 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_COPPER:
- if (!netdev->phydev)
- return -EOPNOTSUPP;
-
cmd->base.port = PORT_TP;
- phy_ethtool_ksettings_get(netdev->phydev, cmd);
-
+ if (!netdev->phydev)
+ hns3_get_ksettings(h, cmd);
+ else
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
break;
default:
@@ -748,15 +748,19 @@ static int hns3_get_rxnfc(struct net_device *netdev,
}
static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
- u32 new_desc_num)
+ u32 tx_desc_num, u32 rx_desc_num)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
- h->kinfo.num_desc = new_desc_num;
+ h->kinfo.num_tx_desc = tx_desc_num;
+ h->kinfo.num_rx_desc = rx_desc_num;
- for (i = 0; i < h->kinfo.num_tqps * 2; i++)
- priv->ring_data[i].ring->desc_num = new_desc_num;
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ priv->ring_data[i].ring->desc_num = tx_desc_num;
+ priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
+ rx_desc_num;
+ }
return hns3_init_all_ring(priv);
}
@@ -767,7 +771,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
bool if_running = netif_running(ndev);
- u32 old_desc_num, new_desc_num;
+ u32 old_tx_desc_num, new_tx_desc_num;
+ u32 old_rx_desc_num, new_rx_desc_num;
+ int queue_num = h->kinfo.num_tqps;
int ret;
if (hns3_nic_resetting(ndev))
@@ -776,43 +782,41 @@ static int hns3_set_ringparam(struct net_device *ndev,
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
- if (param->tx_pending != param->rx_pending) {
- netdev_err(ndev,
- "Descriptors of tx and rx must be equal");
- return -EINVAL;
- }
-
if (param->tx_pending > HNS3_RING_MAX_PENDING ||
- param->tx_pending < HNS3_RING_MIN_PENDING) {
- netdev_err(ndev,
- "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n",
- param->tx_pending, HNS3_RING_MIN_PENDING,
- HNS3_RING_MAX_PENDING);
+ param->tx_pending < HNS3_RING_MIN_PENDING ||
+ param->rx_pending > HNS3_RING_MAX_PENDING ||
+ param->rx_pending < HNS3_RING_MIN_PENDING) {
+ netdev_err(ndev, "Queue depth out of range [%d-%d]\n",
+ HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING);
return -EINVAL;
}
- new_desc_num = param->tx_pending;
-
/* Hardware requires that its descriptors must be multiple of eight */
- new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE);
- old_desc_num = h->kinfo.num_desc;
- if (old_desc_num == new_desc_num)
+ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
+ new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
+ old_tx_desc_num = priv->ring_data[0].ring->desc_num;
+ old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num;
+ if (old_tx_desc_num == new_tx_desc_num &&
+ old_rx_desc_num == new_rx_desc_num)
return 0;
netdev_info(ndev,
- "Changing descriptor count from %d to %d.\n",
- old_desc_num, new_desc_num);
+ "Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
+ old_tx_desc_num, old_rx_desc_num,
+ new_tx_desc_num, new_rx_desc_num);
if (if_running)
- dev_close(ndev);
+ ndev->netdev_ops->ndo_stop(ndev);
ret = hns3_uninit_all_ring(priv);
if (ret)
return ret;
- ret = hns3_change_all_ring_bd_num(priv, new_desc_num);
+ ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num,
+ new_rx_desc_num);
if (ret) {
- ret = hns3_change_all_ring_bd_num(priv, old_desc_num);
+ ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
+ old_rx_desc_num);
if (ret) {
netdev_err(ndev,
"Revert to old bd num fail, ret=%d.\n", ret);
@@ -821,7 +825,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
if (if_running)
- ret = dev_open(ndev, NULL);
+ ret = ndev->netdev_ops->ndo_open(ndev);
return ret;
}
@@ -1114,6 +1118,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_channels = hns3_get_channels,
.get_coalesce = hns3_get_coalesce,
.set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
.get_link = hns3_get_link,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 8af0cef5609b..3a093a92eac5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
- size, &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
@@ -171,8 +170,12 @@ static bool hclge_is_special_opcode(u16 opcode)
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
- u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
+ u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -183,6 +186,38 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
+static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num, int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+ int retval;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc >= hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ if (likely(!hclge_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+
+ if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
+ retval = 0;
+ else if (desc_ret == HCLGE_CMD_NO_AUTH)
+ retval = -EPERM;
+ else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
+ retval = -EOPNOTSUPP;
+ else
+ retval = -EIO;
+ hw->cmq.last_status = desc_ret;
+
+ return retval;
+}
+
/**
* hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct
@@ -200,7 +235,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
u32 timeout = 0;
int handle = 0;
int retval = 0;
- u16 opcode, desc_ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -216,12 +250,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- opcode = le16_to_cpu(desc[0].opcode);
while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle];
(hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
hw->cmq.csq.next_to_use = 0;
handle++;
}
@@ -247,27 +280,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
if (!complete) {
retval = -EAGAIN;
} else {
- handle = 0;
- while (handle < num) {
- /* Get the result of hardware write back */
- desc_to_use = &hw->cmq.csq.desc[ntc];
- desc[handle] = *desc_to_use;
-
- if (likely(!hclge_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[handle].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
-
- if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
- retval = 0;
- else
- retval = -EIO;
- hw->cmq.last_status = desc_ret;
- ntc++;
- handle++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
+ retval = hclge_cmd_check_retval(hw, desc, num, ntc);
}
/* Clean the command send queue */
@@ -377,6 +390,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
return 0;
}
+static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
+{
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
+}
+
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
{
spin_lock(&ring->lock);
@@ -389,3 +416,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw)
hclge_destroy_queue(&hw->cmq.csq);
hclge_destroy_queue(&hw->cmq.crq);
}
+
+void hclge_cmd_uninit(struct hclge_dev *hdev)
+{
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hclge_cmd_uninit_regs(&hdev->hw);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ hclge_destroy_cmd_queue(&hdev->hw);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f23042b24c09..3714733c96d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -39,7 +39,7 @@ struct hclge_cmq_ring {
enum hclge_cmd_return_status {
HCLGE_CMD_EXEC_SUCCESS = 0,
HCLGE_CMD_NO_AUTH = 1,
- HCLGE_CMD_NOT_EXEC = 2,
+ HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3,
};
@@ -82,6 +82,8 @@ enum hclge_opcode_type {
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
+ HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HCLGE_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
@@ -310,16 +312,16 @@ struct hclge_ctrl_vector_chain_cmd {
u8 rsv;
};
-#define HCLGE_TC_NUM 8
+#define HCLGE_MAX_TC_NUM 8
#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
struct hclge_tx_buff_alloc_cmd {
- __le16 tx_pkt_buff[HCLGE_TC_NUM];
+ __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM];
u8 tx_buff_rsv[8];
};
struct hclge_rx_priv_buff_cmd {
- __le16 buf_num[HCLGE_TC_NUM];
+ __le16 buf_num[HCLGE_MAX_TC_NUM];
__le16 shared_buf;
u8 rsv[6];
};
@@ -365,7 +367,6 @@ struct hclge_priv_buf {
u32 enable; /* Enable TC private buffer or not */
};
-#define HCLGE_MAX_TC_NUM 8
struct hclge_shared_buf {
struct hclge_waterline self;
struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM];
@@ -692,7 +693,9 @@ struct hclge_mac_vlan_remove_cmd {
struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
- u8 rsv[22];
+ u8 rsv1[2];
+ u8 vf_id;
+ u8 rsv2[19];
};
struct hclge_vlan_filter_pf_cfg_cmd {
@@ -974,6 +977,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
struct hclge_desc *desc);
-void hclge_destroy_cmd_queue(struct hclge_hw *hw);
+void hclge_cmd_uninit(struct hclge_dev *hdev);
int hclge_cmd_queue_init(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index f6323b2501dc..1161361a973b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -93,13 +93,11 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
}
}
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- if (num_tc > hdev->vport[i].alloc_tqps) {
- dev_err(&hdev->pdev->dev,
- "allocated tqp(%u) checking failed, %u > tqp(%u)\n",
- i, num_tc, hdev->vport[i].alloc_tqps);
- return -EINVAL;
- }
+ if (num_tc > hdev->vport[0].alloc_tqps) {
+ dev_err(&hdev->pdev->dev,
+ "allocated tqp checking failed, %u > tqp(%u)\n",
+ num_tc, hdev->vport[0].alloc_tqps);
+ return -EINVAL;
}
return 0;
@@ -156,21 +154,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
return 0;
}
-static int hclge_map_update(struct hnae3_handle *h)
+static int hclge_map_update(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hclge_get_vport(h);
- struct hclge_dev *hdev = vport->back;
int ret;
- ret = hclge_tm_map_cfg(hdev);
+ ret = hclge_tm_schd_setup_hw(hdev);
if (ret)
return ret;
- ret = hclge_tm_schd_mode_hw(hdev);
- if (ret)
- return ret;
-
- ret = hclge_pause_setup_hw(hdev);
+ ret = hclge_pause_setup_hw(hdev, false);
if (ret)
return ret;
@@ -222,19 +214,51 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
+ if (map_changed) {
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+ }
+
hclge_tm_schd_info_update(hdev, num_tc);
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
- return ret;
+ goto err_out;
if (map_changed) {
+ ret = hclge_map_update(hdev);
+ if (ret)
+ goto err_out;
+
ret = hclge_client_setup_tc(hdev);
if (ret)
+ goto err_out;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
return ret;
}
return hclge_tm_dwrr_cfg(hdev);
+
+err_out:
+ if (!map_changed)
+ return ret;
+
+ if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
+ return ret;
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
}
static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
@@ -283,6 +307,9 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
return -EINVAL;
+ if (pfc->pfc_en == hdev->tm_info.pfc_en)
+ return 0;
+
prio_tc = hdev->tm_info.prio_tc;
pfc_map = 0;
@@ -295,12 +322,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
}
}
- if (pfc_map == hdev->tm_info.hw_pfc_map)
- return 0;
-
hdev->tm_info.hw_pfc_map = pfc_map;
+ hdev->tm_info.pfc_en = pfc->pfc_en;
- return hclge_pause_setup_hw(hdev);
+ return hclge_pause_setup_hw(hdev, false);
}
/* DCBX configuration */
@@ -345,12 +370,24 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (ret)
return -EINVAL;
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
hclge_tm_schd_info_update(hdev, tc);
hclge_tm_prio_tc_info_update(hdev, prio_tc);
- ret = hclge_tm_init_hw(hdev);
+ ret = hclge_tm_init_hw(hdev, false);
if (ret)
- return ret;
+ goto err_out;
+
+ ret = hclge_client_setup_tc(hdev);
+ if (ret)
+ goto err_out;
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
@@ -359,7 +396,18 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
else
hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
- return 0;
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+
+err_out:
+ if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
+ return ret;
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
}
static const struct hnae3_dcb_ops hns3_dcb_ops = {
@@ -369,7 +417,6 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setpfc = hclge_ieee_setpfc,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
- .map_update = hclge_map_update,
.setup_tc = hclge_setup_tc,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 26d80504c730..1192cf6f2321 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -691,7 +691,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
- for (i = 0; i < HCLGE_TC_NUM; i++)
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
tx_buf_cmd->tx_pkt_buff[i]);
@@ -703,7 +703,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
- for (i = 0; i < HCLGE_TC_NUM; i++)
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
rx_buf_cmd->buf_num[i]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index d0f654123b9b..1feceff1477c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -80,7 +80,7 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
@@ -219,6 +219,12 @@ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
{ .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
+ { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
+ { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
+ { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
+ { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
+ { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
{ /* sentinel */ }
};
@@ -277,6 +283,45 @@ static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
{ /* sentinel */ }
};
+#define HCLGE_SSU_MEM_ECC_ERR(x) \
+ { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
+
+static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
+ HCLGE_SSU_MEM_ECC_ERR(0),
+ HCLGE_SSU_MEM_ECC_ERR(1),
+ HCLGE_SSU_MEM_ECC_ERR(2),
+ HCLGE_SSU_MEM_ECC_ERR(3),
+ HCLGE_SSU_MEM_ECC_ERR(4),
+ HCLGE_SSU_MEM_ECC_ERR(5),
+ HCLGE_SSU_MEM_ECC_ERR(6),
+ HCLGE_SSU_MEM_ECC_ERR(7),
+ HCLGE_SSU_MEM_ECC_ERR(8),
+ HCLGE_SSU_MEM_ECC_ERR(9),
+ HCLGE_SSU_MEM_ECC_ERR(10),
+ HCLGE_SSU_MEM_ECC_ERR(11),
+ HCLGE_SSU_MEM_ECC_ERR(12),
+ HCLGE_SSU_MEM_ECC_ERR(13),
+ HCLGE_SSU_MEM_ECC_ERR(14),
+ HCLGE_SSU_MEM_ECC_ERR(15),
+ HCLGE_SSU_MEM_ECC_ERR(16),
+ HCLGE_SSU_MEM_ECC_ERR(17),
+ HCLGE_SSU_MEM_ECC_ERR(18),
+ HCLGE_SSU_MEM_ECC_ERR(19),
+ HCLGE_SSU_MEM_ECC_ERR(20),
+ HCLGE_SSU_MEM_ECC_ERR(21),
+ HCLGE_SSU_MEM_ECC_ERR(22),
+ HCLGE_SSU_MEM_ECC_ERR(23),
+ HCLGE_SSU_MEM_ECC_ERR(24),
+ HCLGE_SSU_MEM_ECC_ERR(25),
+ HCLGE_SSU_MEM_ECC_ERR(26),
+ HCLGE_SSU_MEM_ECC_ERR(27),
+ HCLGE_SSU_MEM_ECC_ERR(28),
+ HCLGE_SSU_MEM_ECC_ERR(29),
+ HCLGE_SSU_MEM_ECC_ERR(30),
+ HCLGE_SSU_MEM_ECC_ERR(31),
+ { /* sentinel */ }
+};
+
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
@@ -835,13 +880,15 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
if (status) {
- dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n");
+ hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+ &hclge_ssu_mem_ecc_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
- dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n");
+ dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
@@ -997,6 +1044,13 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
&hclge_igu_egu_tnl_int[0], status);
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+ &hclge_ppu_pf_abnormal_int[0], status);
+
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
@@ -1094,10 +1148,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
return 0;
}
-static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
+static enum hnae3_reset_type
+hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
{
- enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET;
- struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
unsigned int status;
@@ -1110,17 +1164,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
/* reset everything for now */
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
- return ret;
+ return HNAE3_GLOBAL_RESET;
}
status = le32_to_cpu(desc[0].data[0]);
- if (status & HCLGE_ROCEE_RERR_INT_MASK)
+ if (status & HCLGE_ROCEE_RERR_INT_MASK) {
dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+ reset_type = HNAE3_FUNC_RESET;
+ }
- if (status & HCLGE_ROCEE_BERR_INT_MASK)
+ if (status & HCLGE_ROCEE_BERR_INT_MASK) {
dev_warn(dev, "ROCEE RAS AXI bresp error\n");
+ reset_type = HNAE3_FUNC_RESET;
+ }
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
@@ -1132,9 +1189,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) {
dev_err(dev, "failed(%d) to process ovf error\n", ret);
/* reset everything for now */
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
- return ret;
+ return HNAE3_GLOBAL_RESET;
}
+ reset_type = HNAE3_FUNC_RESET;
}
/* clear error status */
@@ -1143,12 +1200,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
/* reset everything for now */
- reset_type = HNAE3_GLOBAL_RESET;
+ return HNAE3_GLOBAL_RESET;
}
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
-
- return ret;
+ return reset_type;
}
static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
@@ -1178,15 +1233,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
+static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
+ enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21)
- return HNAE3_NONE_RESET;
+ return;
- return hclge_log_and_clear_rocee_ras_error(hdev);
+ reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
+ if (reset_type != HNAE3_NONE_RESET)
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
}
static const struct hclge_hw_blk hw_blk[] = {
@@ -1332,14 +1390,13 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
}
- /* log PPU(RCB) errors */
+ /* log PPU(RCB) MPF errors */
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status) {
- dev_warn(dev,
- "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n",
- status);
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0], status);
set_bit(HNAE3_CORE_RESET, reset_requests);
}
@@ -1386,7 +1443,7 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
&hclge_ppp_pf_abnormal_int[0], status);
- /* PPU(RCB) PF errors */
+ /* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
if (status)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 51a7d4eb066a..fc068280d391 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -45,8 +45,8 @@
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
-#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0)
-#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0)
+#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
+#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
@@ -79,6 +79,7 @@
#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
+#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f7637c08bb3a..deda606c51e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -118,6 +118,12 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
{"mac_rx_mac_pause_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
+ {"mac_tx_control_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
+ {"mac_rx_control_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
+ {"mac_tx_pfc_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
{"mac_tx_pfc_pri0_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
{"mac_tx_pfc_pri1_pkt_num",
@@ -134,6 +140,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
{"mac_tx_pfc_pri7_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
+ {"mac_rx_pfc_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
{"mac_rx_pfc_pri0_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
{"mac_rx_pfc_pri1_pkt_num",
@@ -287,10 +295,17 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static int hclge_mac_update_stats(struct hclge_dev *hdev)
+static const u8 hclge_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
-#define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
@@ -308,15 +323,18 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
}
for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
+ /* for special opcode 0032, only the first desc has the head */
if (unlikely(i == 0)) {
desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RTN_DATA_NUM - 2;
+ n = HCLGE_RD_FIRST_STATS_NUM;
} else {
desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RTN_DATA_NUM;
+ n = HCLGE_RD_OTHER_STATS_NUM;
}
+
for (k = 0; k < n; k++) {
- *data++ += le64_to_cpu(*desc_data);
+ *data += le64_to_cpu(*desc_data);
+ data++;
desc_data++;
}
}
@@ -324,6 +342,85 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
return 0;
}
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+{
+ u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ struct hclge_desc *desc;
+ __le64 *desc_data;
+ u16 i, k, n;
+ int ret;
+
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
+ if (ret) {
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < desc_num; i++) {
+ /* for special opcode 0034, only the first desc has the head */
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_RD_FIRST_STATS_NUM;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_RD_OTHER_STATS_NUM;
+ }
+
+ for (k = 0; k < n; k++) {
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
+ }
+ }
+
+ kfree(desc);
+
+ return 0;
+}
+
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+{
+ struct hclge_desc desc;
+ __le32 *desc_data;
+ u32 reg_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ desc_data = (__le32 *)(&desc.data[0]);
+ reg_num = le32_to_cpu(*desc_data);
+
+ *desc_num = 1 + ((reg_num - 3) >> 2) +
+ (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
+
+ return 0;
+}
+
+static int hclge_mac_update_stats(struct hclge_dev *hdev)
+{
+ u32 desc_num;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, &desc_num);
+
+ /* The firmware supports the new statistics acquisition method */
+ if (!ret)
+ ret = hclge_mac_update_stats_complete(hdev, desc_num);
+ else if (ret == -EOPNOTSUPP)
+ ret = hclge_mac_update_stats_defective(hdev);
+ else
+ dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
+
+ return ret;
+}
+
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -461,26 +558,6 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return (u8 *)buff;
}
-static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
- struct net_device_stats *net_stats)
-{
- net_stats->tx_dropped = 0;
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
-
- net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
- net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
-
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
- net_stats->rx_length_errors =
- hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_length_errors +=
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
- net_stats->rx_over_errors =
- hw_stats->mac_stats.mac_rx_oversize_pkt_num;
-}
-
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
{
struct hnae3_handle *handle;
@@ -500,8 +577,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
if (status)
dev_err(&hdev->pdev->dev,
"Update MAC stats fail, status = %d.\n", status);
-
- hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
}
static void hclge_update_stats(struct hnae3_handle *handle,
@@ -509,7 +584,6 @@ static void hclge_update_stats(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status;
if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
@@ -527,8 +601,6 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update TQPS stats fail, status = %d.\n",
status);
- hclge_update_netstat(hw_stats, net_stats);
-
clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
@@ -767,37 +839,67 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
unsigned long *supported = hdev->hw.mac.supported;
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
- set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ supported);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+}
+
+static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
+ u8 speed_ability)
+{
+ unsigned long *supported = hdev->hw.mac.supported;
+
+ /* default to support all speed for GE port */
+ if (!speed_ability)
+ speed_ability = HCLGE_SUPPORT_GE;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ supported);
+ }
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ }
- set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
- set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
}
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
{
u8 media_type = hdev->hw.mac.media_type;
- if (media_type != HNAE3_MEDIA_TYPE_FIBER)
- return;
-
- hclge_parse_fiber_link_mode(hdev, speed_ability);
+ if (media_type == HNAE3_MEDIA_TYPE_FIBER)
+ hclge_parse_fiber_link_mode(hdev, speed_ability);
+ else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
+ hclge_parse_copper_link_mode(hdev, speed_ability);
}
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
@@ -931,12 +1033,16 @@ static int hclge_configure(struct hclge_dev *hdev)
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
hdev->hw.mac.phy_addr = cfg.phy_addr;
- hdev->num_desc = cfg.tqp_desc_num;
+ hdev->num_tx_desc = cfg.tqp_desc_num;
+ hdev->num_rx_desc = cfg.tqp_desc_num;
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
+ if (hnae3_dev_fd_supported(hdev))
+ hdev->fd_en = true;
+
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
@@ -1035,7 +1141,8 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
tqp->q.ae_algo = &ae_algo;
tqp->q.buf_size = hdev->rx_buf_len;
- tqp->q.desc_num = hdev->num_desc;
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
i * HCLGE_TQP_REG_SIZE;
@@ -1068,64 +1175,51 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
return ret;
}
-static int hclge_assign_tqp(struct hclge_vport *vport)
+static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
- alloced < kinfo->num_tqps; i++) {
+ alloced < num_tqps; i++) {
if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced;
- hdev->htqp[i].q.desc_num = kinfo->num_desc;
+ hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
+ hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
kinfo->tqp[alloced] = &hdev->htqp[i].q;
hdev->htqp[i].alloced = true;
alloced++;
}
}
- vport->alloc_tqps = kinfo->num_tqps;
+ vport->alloc_tqps = alloced;
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max,
+ vport->alloc_tqps / hdev->tm_info.num_tc);
return 0;
}
-static int hclge_knic_setup(struct hclge_vport *vport,
- u16 num_tqps, u16 num_desc)
+static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
+ u16 num_tx_desc, u16 num_rx_desc)
+
{
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back;
- int i, ret;
+ int ret;
- kinfo->num_desc = num_desc;
- kinfo->rx_buf_len = hdev->rx_buf_len;
- kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
- kinfo->rss_size
- = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
- kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
+ kinfo->num_tx_desc = num_tx_desc;
+ kinfo->num_rx_desc = num_rx_desc;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (hdev->hw_tc_map & BIT(i)) {
- kinfo->tc_info[i].enable = true;
- kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
- kinfo->tc_info[i].tqp_count = kinfo->rss_size;
- kinfo->tc_info[i].tc = i;
- } else {
- /* Set to default queue if TC is disable */
- kinfo->tc_info[i].enable = false;
- kinfo->tc_info[i].tqp_offset = 0;
- kinfo->tc_info[i].tqp_count = 1;
- kinfo->tc_info[i].tc = 0;
- }
- }
+ kinfo->rx_buf_len = hdev->rx_buf_len;
- kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
return -ENOMEM;
- ret = hclge_assign_tqp(vport);
+ ret = hclge_assign_tqp(vport, num_tqps);
if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
@@ -1140,7 +1234,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
u16 i;
kinfo = &nic->kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
+ for (i = 0; i < vport->alloc_tqps; i++) {
struct hclge_tqp *q =
container_of(kinfo->tqp[i], struct hclge_tqp, q);
bool is_pf;
@@ -1191,7 +1285,9 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->numa_node_mask = hdev->numa_node_mask;
if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
- ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
+ ret = hclge_knic_setup(vport, num_tqps,
+ hdev->num_tx_desc, hdev->num_rx_desc);
+
if (ret) {
dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
ret);
@@ -1241,6 +1337,9 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->back = hdev;
vport->vport_id = i;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ INIT_LIST_HEAD(&vport->vlan_list);
+ INIT_LIST_HEAD(&vport->uc_mac_list);
+ INIT_LIST_HEAD(&vport->mc_mac_list);
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -1273,7 +1372,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
- for (i = 0; i < HCLGE_TC_NUM; i++) {
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
req->tx_pkt_buff[i] =
@@ -1448,13 +1547,14 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
- if (total_size < hdev->tx_buf_size)
- return -ENOMEM;
+ if (hdev->hw_tc_map & BIT(i)) {
+ if (total_size < hdev->tx_buf_size)
+ return -ENOMEM;
- if (hdev->hw_tc_map & BIT(i))
priv->tx_buf_size = hdev->tx_buf_size;
- else
+ } else {
priv->tx_buf_size = 0;
+ }
total_size -= priv->tx_buf_size;
}
@@ -1462,66 +1562,15 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
return 0;
}
-/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
- * @hdev: pointer to struct hclge_dev
- * @buf_alloc: pointer to buffer calculation data
- * @return: 0: calculate sucessful, negative: fail
- */
-static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
- struct hclge_pkt_buf_alloc *buf_alloc)
+static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
- u32 rx_all = hdev->pkt_buf_size, aligned_mps;
- int no_pfc_priv_num, pfc_priv_num;
- struct hclge_priv_buf *priv;
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
int i;
- aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
- rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
-
- /* When DCB is not supported, rx private
- * buffer is not allocated.
- */
- if (!hnae3_dev_dcb_supported(hdev)) {
- if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
- return -ENOMEM;
-
- return 0;
- }
-
- /* step 1, try to alloc private buffer for all enabled tc */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- priv = &buf_alloc->priv_buf[i];
- if (hdev->hw_tc_map & BIT(i)) {
- priv->enable = 1;
- if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = aligned_mps;
- priv->wl.high =
- roundup(priv->wl.low + aligned_mps,
- HCLGE_BUF_SIZE_UNIT);
- priv->buf_size = priv->wl.high +
- hdev->dv_buf_size;
- } else {
- priv->wl.low = 0;
- priv->wl.high = 2 * aligned_mps;
- priv->buf_size = priv->wl.high +
- hdev->dv_buf_size;
- }
- } else {
- priv->enable = 0;
- priv->wl.low = 0;
- priv->wl.high = 0;
- priv->buf_size = 0;
- }
- }
-
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
- return 0;
-
- /* step 2, try to decrease the buffer size of
- * no pfc TC's private buffer
- */
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- priv = &buf_alloc->priv_buf[i];
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
priv->enable = 0;
priv->wl.low = 0;
@@ -1534,28 +1583,30 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = 256;
- priv->wl.high = priv->wl.low + aligned_mps;
- priv->buf_size = priv->wl.high + hdev->dv_buf_size;
+ priv->wl.low = max ? aligned_mps : 256;
+ priv->wl.high = roundup(priv->wl.low + aligned_mps,
+ HCLGE_BUF_SIZE_UNIT);
} else {
priv->wl.low = 0;
- priv->wl.high = aligned_mps;
- priv->buf_size = priv->wl.high + hdev->dv_buf_size;
+ priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
}
+
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
}
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
- return 0;
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
- /* step 3, try to reduce the number of pfc disabled TCs,
- * which have private buffer
- */
- /* get the total no pfc enable TC number, which have private buffer */
- no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
+static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
+ int i;
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
- priv = &buf_alloc->priv_buf[i];
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i) &&
!(hdev->tm_info.hw_pfc_map & BIT(i))) {
@@ -1572,17 +1623,19 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
break;
}
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
- return 0;
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
- /* step 4, try to reduce the number of pfc enabled TCs
- * which have private buffer.
- */
- pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
+static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
+ int i;
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
- priv = &buf_alloc->priv_buf[i];
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i) &&
hdev->tm_info.hw_pfc_map & BIT(i)) {
@@ -1598,7 +1651,40 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
pfc_priv_num == 0)
break;
}
- if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
+
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
+
+/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
+ * @hdev: pointer to struct hclge_dev
+ * @buf_alloc: pointer to buffer calculation data
+ * @return: 0: calculate sucessful, negative: fail
+ */
+static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ /* When DCB is not supported, rx private buffer is not allocated. */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ u32 rx_all = hdev->pkt_buf_size;
+
+ rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
+ if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
+ return 0;
+
+ /* try to decrease the buffer size */
+ if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
+ return 0;
+
+ if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
+ return 0;
+
+ if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
return 0;
return -ENOMEM;
@@ -2122,7 +2208,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
+ struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
+ struct hnae3_handle *rhandle;
struct hnae3_handle *handle;
int state;
int i;
@@ -2134,6 +2222,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
handle = &hdev->vport[i].nic;
client->ops->link_status_change(handle, state);
+ rhandle = &hdev->vport[i].roce;
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
}
hdev->hw.mac.link = state;
}
@@ -2418,8 +2510,8 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
hclge_free_vector(hdev, 0);
}
-static int hclge_notify_client(struct hclge_dev *hdev,
- enum hnae3_reset_notify_type type)
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
u16 i;
@@ -2548,7 +2640,7 @@ static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
{
int i;
@@ -2883,6 +2975,10 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset_lock;
+ ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
@@ -3597,8 +3693,11 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
static void hclge_rss_init_cfg(struct hclge_dev *hdev)
{
+ int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
- int i;
+
+ if (hdev->pdev->revision >= 0x21)
+ rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport[i].rss_tuple_sets.ipv4_tcp_en =
@@ -3618,9 +3717,10 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ vport[i].rss_algo = rss_algo;
- netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
+ memcpy(vport[i].rss_hash_key, hclge_hash_key,
+ HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
@@ -3788,8 +3888,16 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_promisc_param param;
+ bool en_bc_pmc = true;
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
+ /* For revision 0x20, if broadcast promisc enabled, vlan filter is
+ * always bypassed. So broadcast promisc should be disabled until
+ * user enable promisc mode
+ */
+ if (handle->pdev->revision == 0x20)
+ en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
+
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
vport->vport_id);
return hclge_cmd_set_promisc_mode(hdev, &param);
}
@@ -3898,7 +4006,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
return -EOPNOTSUPP;
}
- hdev->fd_cfg.fd_en = true;
hdev->fd_cfg.proto_support =
TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
@@ -4656,7 +4763,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
- if (!hdev->fd_cfg.fd_en) {
+ if (!hdev->fd_en) {
dev_warn(&hdev->pdev->dev,
"Please enable flow director first\n");
return -EOPNOTSUPP;
@@ -4809,7 +4916,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
return 0;
/* if fd is disabled, should not restore it when reset */
- if (!hdev->fd_cfg.fd_en)
+ if (!hdev->fd_en)
return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
@@ -5095,7 +5202,7 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- hdev->fd_cfg.fd_en = enable;
+ hdev->fd_en = enable;
if (!enable)
hclge_del_all_fd_entries(handle, false);
else
@@ -5174,8 +5281,15 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
{
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
+
+#define HCLGE_MAC_LINK_STATUS_MS 20
+#define HCLGE_MAC_LINK_STATUS_NUM 10
+#define HCLGE_MAC_LINK_STATUS_DOWN 0
+#define HCLGE_MAC_LINK_STATUS_UP 1
+
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
+ int mac_link_ret = 0;
int ret, i = 0;
u8 loop_mode_b;
@@ -5198,8 +5312,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
if (en) {
req->enable = loop_mode_b;
req->mask = loop_mode_b;
+ mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
} else {
req->mask = loop_mode_b;
+ mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -5231,7 +5347,19 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
}
hclge_cfg_mac_mode(hdev, en);
- return 0;
+
+ i = 0;
+ do {
+ /* serdes Internal loopback, independent of the network cable.*/
+ msleep(HCLGE_MAC_LINK_STATUS_MS);
+ ret = hclge_get_mac_link_status(hdev);
+ if (ret == mac_link_ret)
+ return 0;
+ } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+
+ dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
+
+ return -EBUSY;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
@@ -5258,6 +5386,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back;
int i, ret;
@@ -5276,7 +5405,11 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
break;
}
- for (i = 0; i < vport->alloc_tqps; i++) {
+ if (ret)
+ return ret;
+
+ kinfo = &vport->nic.kinfo;
+ for (i = 0; i < kinfo->num_tqps; i++) {
ret = hclge_tqp_enable(hdev, i, 0, en);
if (ret)
return ret;
@@ -5288,11 +5421,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo;
struct hnae3_queue *queue;
struct hclge_tqp *tqp;
int i;
- for (i = 0; i < vport->alloc_tqps; i++) {
+ kinfo = &vport->nic.kinfo;
+ for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
@@ -5493,13 +5628,19 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
}
static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
- const u8 *addr)
+ const u8 *addr, bool is_mc)
{
const unsigned char *mac_addr = addr;
u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
(mac_addr[0]) | (mac_addr[1] << 8);
u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
+ hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ if (is_mc) {
+ hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ }
+
new_req->mac_addr_hi32 = cpu_to_le32(high_val);
new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}
@@ -5730,9 +5871,12 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
if (is_free) {
if (vport->used_umv_num > hdev->priv_umv_size)
hdev->share_umv_size++;
- vport->used_umv_num--;
+
+ if (vport->used_umv_num > 0)
+ vport->used_umv_num--;
} else {
- if (vport->used_umv_num >= hdev->priv_umv_size)
+ if (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size > 0)
hdev->share_umv_size--;
vport->used_umv_num++;
}
@@ -5770,14 +5914,13 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
req.egress_port = cpu_to_le16(egress_port);
- hclge_prepare_mac_addr(&req, addr);
+ hclge_prepare_mac_addr(&req, addr, false);
/* Lookup the mac address in the mac_vlan table, and add
* it if the entry is inexistent. Repeated unicast entry
@@ -5835,9 +5978,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hclge_prepare_mac_addr(&req, addr);
+ hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
if (!ret)
hclge_update_umv_space(vport, true);
@@ -5869,11 +6011,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hclge_prepare_mac_addr(&req, addr);
+ hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
/* This mac addr exist, update VFID for it */
@@ -5919,11 +6058,8 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hclge_prepare_mac_addr(&req, addr);
+ hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
/* This mac addr exist, remove this handle's VFID for it */
@@ -5949,6 +6085,103 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
return status;
}
+void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_vport_mac_addr_cfg *mac_cfg;
+ struct list_head *list;
+
+ if (!vport->vport_id)
+ return;
+
+ mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
+ if (!mac_cfg)
+ return;
+
+ mac_cfg->hd_tbl_status = true;
+ memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ list_add_tail(&mac_cfg->node, list);
+}
+
+void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
+ bool is_write_tbl,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
+ struct list_head *list;
+ bool uc_flag, mc_flag;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
+ mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (uc_flag && mac_cfg->hd_tbl_status)
+ hclge_rm_uc_addr_common(vport, mac_addr);
+
+ if (mc_flag && mac_cfg->hd_tbl_status)
+ hclge_rm_mc_addr_common(vport, mac_addr);
+
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ break;
+ }
+ }
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
+ struct list_head *list;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
+ hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
+
+ if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
+ hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
+
+ mac_cfg->hd_tbl_status = false;
+ if (is_del_list) {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ }
+}
+
+void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport_mac_addr_cfg *mac, *tmp;
+ struct hclge_vport *vport;
+ int i;
+
+ mutex_lock(&hdev->vport_cfg_mutex);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
+ list_del(&mac->node);
+ kfree(mac);
+ }
+
+ list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
+ list_del(&mac->node);
+ kfree(mac);
+ }
+ }
+ mutex_unlock(&hdev->vport_cfg_mutex);
+}
+
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
u16 cmdq_resp, u8 resp_code)
{
@@ -6104,7 +6337,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
- u8 fe_type, bool filter_en)
+ u8 fe_type, bool filter_en, u8 vf_id)
{
struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
@@ -6115,6 +6348,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
req->vlan_fe = filter_en ? fe_type : 0;
+ req->vf_id = vf_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -6143,12 +6377,13 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
if (hdev->pdev->revision >= 0x21) {
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS, enable);
+ HCLGE_FILTER_FE_EGRESS, enable, 0);
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, enable);
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
} else {
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B, enable);
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable,
+ 0);
}
if (enable)
handle->netdev_flags |= HNAE3_VLAN_FLTR;
@@ -6456,19 +6691,27 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
int i;
if (hdev->pdev->revision >= 0x21) {
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS, true);
- if (ret)
- return ret;
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ ret = hclge_set_vlan_filter_ctrl(hdev,
+ HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS,
+ true,
+ vport->vport_id);
+ if (ret)
+ return ret;
+ }
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true);
+ HCLGE_FILTER_FE_INGRESS, true,
+ 0);
if (ret)
return ret;
} else {
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS_V1_B,
- true);
+ true, 0);
if (ret)
return ret;
}
@@ -6522,6 +6765,84 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
+void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
+{
+ struct hclge_vport_vlan_cfg *vlan;
+
+ /* vlan 0 is reserved */
+ if (!vlan_id)
+ return;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return;
+
+ vlan->hd_tbl_status = true;
+ vlan->vlan_id = vlan_id;
+
+ list_add_tail(&vlan->node, &vport->vlan_list);
+}
+
+void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool is_write_tbl)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ if (is_write_tbl && vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan_id, 0,
+ true);
+
+ list_del(&vlan->node);
+ kfree(vlan);
+ break;
+ }
+ }
+}
+
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, 0,
+ true);
+
+ vlan->hd_tbl_status = false;
+ if (is_del_list) {
+ list_del(&vlan->node);
+ kfree(vlan);
+ }
+ }
+}
+
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_vport *vport;
+ int i;
+
+ mutex_lock(&hdev->vport_cfg_mutex);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ list_del(&vlan->node);
+ kfree(vlan);
+ }
+ }
+ mutex_unlock(&hdev->vport_cfg_mutex);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6959,16 +7280,6 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
-static int hclge_init_instance_hw(struct hclge_dev *hdev)
-{
- return hclge_mac_connect_phy(hdev);
-}
-
-static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
-{
- hclge_mac_disconnect_phy(hdev);
-}
-
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -6988,13 +7299,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (ret)
goto clear_nic;
- ret = hclge_init_instance_hw(hdev);
- if (ret) {
- client->ops->uninit_instance(&vport->nic,
- 0);
- goto clear_nic;
- }
-
hnae3_set_client_init_flag(client, ae_dev, 1);
if (hdev->roce_client &&
@@ -7079,7 +7383,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (hdev->nic_client && client->ops->uninit_instance) {
- hclge_uninit_instance_hw(hdev);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@@ -7222,6 +7525,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
+ mutex_init(&hdev->vport_cfg_mutex);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -7298,7 +7602,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_init_umv_space(hdev);
if (ret) {
dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
- goto err_msi_irq_uninit;
+ goto err_mdiobus_unreg;
}
ret = hclge_mac_init(hdev);
@@ -7383,7 +7687,7 @@ err_msi_irq_uninit:
err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
- hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_cmd_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.io_base);
pci_clear_master(pdev);
@@ -7456,7 +7760,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- ret = hclge_tm_init_hw(hdev);
+ ret = hclge_tm_init_hw(hdev, true);
if (ret) {
dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
return ret;
@@ -7510,10 +7814,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
synchronize_irq(hdev->misc_vector.vector_irq);
hclge_hw_error_set_state(hdev, false);
- hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
+ hclge_uninit_vport_mac_table(hdev);
+ hclge_uninit_vport_vlan_table(hdev);
+ mutex_destroy(&hdev->vport_cfg_mutex);
ae_dev->priv = NULL;
}
@@ -7523,18 +7830,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+ return min_t(u32, hdev->rss_size_max,
+ vport->alloc_tqps / kinfo->num_tc);
}
static void hclge_get_channels(struct hnae3_handle *handle,
struct ethtool_channels *ch)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
-
ch->max_combined = hclge_get_max_channels(handle);
ch->other_count = 1;
ch->max_other = 1;
- ch->combined_count = vport->alloc_tqps;
+ ch->combined_count = handle->kinfo.rss_size;
}
static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
@@ -7547,26 +7853,8 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->rss_size_max;
}
-static void hclge_release_tqp(struct hclge_vport *vport)
-{
- struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- struct hclge_dev *hdev = vport->back;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp =
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
-
- tqp->q.handle = NULL;
- tqp->q.tqp_index = 0;
- tqp->alloced = false;
- }
-
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
- kinfo->tqp = NULL;
-}
-
-static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -7580,24 +7868,11 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
u32 *rss_indir;
int ret, i;
- /* Free old tqps, and reallocate with new tqp number when nic setup */
- hclge_release_tqp(vport);
-
- ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
- if (ret) {
- dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
- return ret;
- }
-
- ret = hclge_map_tqp_to_vport(hdev, vport);
- if (ret) {
- dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
- return ret;
- }
+ kinfo->req_rss_size = new_tqps_num;
- ret = hclge_tm_schd_init(hdev);
+ ret = hclge_tm_vport_map_update(hdev);
if (ret) {
- dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
return ret;
}
@@ -7618,6 +7893,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
if (ret)
return ret;
+ /* RSS indirection table has been configuared by user */
+ if (rxfh_configured)
+ goto out;
+
/* Reinitializes the rss indirect table according to the new RSS size */
rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
if (!rss_indir)
@@ -7633,6 +7912,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
kfree(rss_indir);
+out:
if (!ret)
dev_info(&hdev->pdev->dev,
"Channels changed, rss_size from %d to %d, tqps from %d to %d",
@@ -7927,7 +8207,7 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
-static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -8012,6 +8292,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_gro_en = hclge_gro_en,
.get_global_queue_id = hclge_covert_handle_qid_global,
.set_timer_task = hclge_set_timer_task,
+ .mac_connect_phy = hclge_mac_connect_phy,
+ .mac_disconnect_phy = hclge_mac_disconnect_phy,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 6615b85a1c52..b57ac4beb313 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -16,6 +16,9 @@
#define HCLGE_MAX_PF_NUM 8
+#define HCLGE_RD_FIRST_STATS_NUM 2
+#define HCLGE_RD_OTHER_STATS_NUM 4
+
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
@@ -185,6 +188,10 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_25G_BIT BIT(2)
#define HCLGE_SUPPORT_50G_BIT BIT(3)
#define HCLGE_SUPPORT_100G_BIT BIT(4)
+#define HCLGE_SUPPORT_100M_BIT BIT(6)
+#define HCLGE_SUPPORT_10M_BIT BIT(7)
+#define HCLGE_SUPPORT_GE \
+ (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
@@ -322,6 +329,7 @@ struct hclge_tm_info {
struct hclge_tc_info tc_info[HNAE3_MAX_TC];
enum hclge_fc_mode fc_mode;
u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
+ u8 pfc_en; /* PFC enabled or not for user priority */
};
struct hclge_comm_stats_str {
@@ -415,6 +423,10 @@ struct hclge_mac_stats {
u64 mac_rx_fcs_err_pkt_num;
u64 mac_rx_send_app_good_pkt_num;
u64 mac_rx_send_app_bad_pkt_num;
+ u64 mac_tx_pfc_pause_pkt_num;
+ u64 mac_rx_pfc_pause_pkt_num;
+ u64 mac_tx_ctrl_pkt_num;
+ u64 mac_rx_ctrl_pkt_num;
};
#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
@@ -575,7 +587,6 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
- u8 fd_en;
u16 max_key_length;
u32 proto_support;
u32 rule_num[2]; /* rule entry number */
@@ -621,6 +632,23 @@ struct hclge_fd_ad_data {
u16 rule_id;
};
+struct hclge_vport_mac_addr_cfg {
+ struct list_head node;
+ int hd_tbl_status;
+ u8 mac_addr[ETH_ALEN];
+};
+
+enum HCLGE_MAC_ADDR_TYPE {
+ HCLGE_MAC_ADDR_UC,
+ HCLGE_MAC_ADDR_MC
+};
+
+struct hclge_vport_vlan_cfg {
+ struct list_head node;
+ int hd_tbl_status;
+ u16 vlan_id;
+};
+
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
@@ -678,7 +706,8 @@ struct hclge_dev {
u16 num_alloc_vport; /* Num vports this driver supports */
u32 numa_node_mask;
u16 rx_buf_len;
- u16 num_desc;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
u8 tc_num_last_time;
enum hclge_fc_mode fc_mode_last_time;
@@ -750,6 +779,7 @@ struct hclge_dev {
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
u16 hclge_fd_rule_num;
+ u8 fd_en;
u16 wanted_umv_size;
/* max available unicast mac vlan space */
@@ -759,6 +789,8 @@ struct hclge_dev {
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
struct mutex umv_mutex; /* protect share_umv_size */
+
+ struct mutex vport_cfg_mutex; /* Protect stored vf table */
};
/* VPort level vlan tag configuration for TX direction */
@@ -826,6 +858,10 @@ struct hclge_vport {
unsigned long state;
unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
+
+ struct list_head uc_mac_list; /* Store VF unicast table */
+ struct list_head mc_mac_list; /* Store VF multicast table */
+ struct list_head vlan_list; /* Store VF vlan table */
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -878,4 +914,19 @@ void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type);
+void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
+ enum HCLGE_MAC_ADDR_TYPE mac_type);
+void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
+ bool is_write_tbl,
+ enum HCLGE_MAC_ADDR_TYPE mac_type);
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type);
+void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
+void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
+void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool is_write_tbl);
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a1de451a85df..306a23e486de 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -203,12 +203,11 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en_uc = req->msg[1] ? true : false;
- bool en_mc = req->msg[2] ? true : false;
+ bool en_bc = req->msg[1] ? true : false;
struct hclge_promisc_param param;
- /* always enable broadcast promisc bit */
- hclge_promisc_param_init(&param, en_uc, en_mc, true, vport->vport_id);
+ /* vf is not allowed to enable unicast/multicast broadcast */
+ hclge_promisc_param_init(&param, false, false, en_bc, vport->vport_id);
return hclge_cmd_set_promisc_mode(vport->back, &param);
}
@@ -225,12 +224,24 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
hclge_rm_uc_addr_common(vport, old_addr);
status = hclge_add_uc_addr_common(vport, mac_addr);
- if (status)
+ if (status) {
hclge_add_uc_addr_common(vport, old_addr);
+ } else {
+ hclge_rm_vport_mac_table(vport, mac_addr,
+ false, HCLGE_MAC_ADDR_UC);
+ hclge_add_vport_mac_table(vport, mac_addr,
+ HCLGE_MAC_ADDR_UC);
+ }
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
status = hclge_add_uc_addr_common(vport, mac_addr);
+ if (!status)
+ hclge_add_vport_mac_table(vport, mac_addr,
+ HCLGE_MAC_ADDR_UC);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
status = hclge_rm_uc_addr_common(vport, mac_addr);
+ if (!status)
+ hclge_rm_vport_mac_table(vport, mac_addr,
+ false, HCLGE_MAC_ADDR_UC);
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %d\n",
@@ -256,8 +267,14 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
status = hclge_add_mc_addr_common(vport, mac_addr);
+ if (!status)
+ hclge_add_vport_mac_table(vport, mac_addr,
+ HCLGE_MAC_ADDR_MC);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
+ if (!status)
+ hclge_rm_vport_mac_table(vport, mac_addr,
+ false, HCLGE_MAC_ADDR_MC);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
@@ -288,6 +305,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
+ if (!status)
+ is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false)
+ : hclge_add_vport_vlan_table(vport, vlan);
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = mbx_req->msg[2] ? true : false;
@@ -320,10 +340,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
- struct hclge_dev *hdev = vport->back;
- int ret;
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ u8 vf_tc_map = 0;
+ int i, ret;
- ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
+ for (i = 0; i < kinfo->num_tc; i++)
+ vf_tc_map |= BIT(i);
+
+ ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
sizeof(u8));
return ret;
@@ -333,35 +357,52 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
-#define HCLGE_TQPS_RSS_INFO_LEN 8
+#define HCLGE_TQPS_RSS_INFO_LEN 6
u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
struct hclge_dev *hdev = vport->back;
/* get the queue related info */
memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
- memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
- memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
+ memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16));
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
HCLGE_TQPS_RSS_INFO_LEN);
}
+static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+#define HCLGE_TQPS_DEPTH_INFO_LEN 4
+ u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN];
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue depth info */
+ memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16));
+ memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16));
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ HCLGE_TQPS_DEPTH_INFO_LEN);
+}
+
static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[8];
+ u8 msg_data[10];
+ u16 media_type;
u8 dest_vfid;
u16 duplex;
/* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex;
+ media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16));
+ memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */
@@ -369,6 +410,29 @@ static int hclge_get_link_info(struct hclge_vport *vport,
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
}
+static void hclge_get_link_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+#define HCLGE_SUPPORTED 1
+ struct hclge_dev *hdev = vport->back;
+ unsigned long advertising;
+ unsigned long supported;
+ unsigned long send_data;
+ u8 msg_data[10];
+ u8 dest_vfid;
+
+ advertising = hdev->hw.mac.advertising[0];
+ supported = hdev->hw.mac.supported[0];
+ dest_vfid = mbx_req->mbx_src_vfid;
+ msg_data[0] = mbx_req->msg[2];
+
+ send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
+
+ memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
+ hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
+}
+
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
@@ -426,6 +490,24 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
}
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+#define HCLGE_RSS_MBX_RESP_LEN 8
+ u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
+ struct hclge_dev *hdev = vport->back;
+ u8 index;
+
+ index = mbx_req->msg[2];
+
+ memcpy(&resp_data[0],
+ &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
+ HCLGE_RSS_MBX_RESP_LEN);
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ HCLGE_RSS_MBX_RESP_LEN);
+}
+
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
@@ -517,6 +599,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to get Q info for VF\n",
ret);
break;
+ case HCLGE_MBX_GET_QDEPTH:
+ ret = hclge_get_vf_queue_depth(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get Q depth for VF\n",
+ ret);
+ break;
+
case HCLGE_MBX_GET_TCINFO:
ret = hclge_get_vf_tcinfo(vport, req, true);
if (ret)
@@ -553,6 +643,25 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to get qid for VF\n",
ret);
break;
+ case HCLGE_MBX_GET_RSS_KEY:
+ ret = hclge_get_rss_key(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to get rss key for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_LINK_MODE:
+ hclge_get_link_mode(vport, req);
+ break;
+ case HCLGE_MBX_GET_VF_FLR_STATUS:
+ mutex_lock(&hdev->vport_cfg_mutex);
+ hclge_rm_vport_all_mac_table(vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, true);
+ mutex_unlock(&hdev->vport_cfg_mutex);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index dabb8437f8dc..48eda2c6fdae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -8,12 +8,6 @@
#include "hclge_main.h"
#include "hclge_mdio.h"
-#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
- SUPPORTED_TP | \
- PHY_10BT_FEATURES | \
- PHY_100BT_FEATURES | \
- SUPPORTED_1000baseT_Full)
-
enum hclge_mdio_c22_op_seq {
HCLGE_MDIO_C22_WRITE = 1,
HCLGE_MDIO_C22_READ = 2
@@ -195,8 +189,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
netdev_err(netdev, "failed to configure flow control.\n");
}
-int hclge_mac_connect_phy(struct hclge_dev *hdev)
+int hclge_mac_connect_phy(struct hnae3_handle *handle)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
@@ -215,22 +211,17 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
return ret;
}
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- mask);
+ linkmode_copy(mask, hdev->hw.mac.supported);
linkmode_and(phydev->supported, phydev->supported, mask);
- phy_support_asym_pause(phydev);
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
-void hclge_mac_disconnect_phy(struct hclge_dev *hdev)
+void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
if (!phydev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index 5fbf7dddb5d9..ef095d9c566f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -5,8 +5,8 @@
#define __HCLGE_MDIO_H
int hclge_mac_mdio_config(struct hclge_dev *hdev);
-int hclge_mac_connect_phy(struct hclge_dev *hdev);
-void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
+int hclge_mac_connect_phy(struct hnae3_handle *handle);
+void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 00458da67503..aafc69f4bfdd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -517,20 +517,39 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
+ u16 max_rss_size;
u8 i;
- vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
- kinfo->num_tc =
- min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
- kinfo->rss_size
- = min_t(u16, hdev->rss_size_max,
- kinfo->num_tqps / kinfo->num_tc);
- vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
+ /* TC configuration is shared by PF/VF in one port, only allow
+ * one tc for VF for simplicity. VF's vport_id is non zero.
+ */
+ kinfo->num_tc = vport->vport_id ? 1 :
+ min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
+ vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
+ (vport->vport_id ? (vport->vport_id - 1) : 0);
+
+ max_rss_size = min_t(u16, hdev->rss_size_max,
+ vport->alloc_tqps / kinfo->num_tc);
+
+ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
+ kinfo->req_rss_size <= max_rss_size) {
+ dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+ kinfo->rss_size, kinfo->req_rss_size);
+ kinfo->rss_size = kinfo->req_rss_size;
+ } else if (kinfo->rss_size > max_rss_size ||
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+ kinfo->rss_size, max_rss_size);
+ kinfo->rss_size = max_rss_size;
+ }
+
+ kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
+ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
- for (i = 0; i < kinfo->num_tc; i++) {
- if (hdev->hw_tc_map & BIT(i)) {
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
@@ -753,13 +772,17 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, one by one mapping */
- for (k = 0; k < hdev->num_alloc_vport; k++)
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo =
+ &vport[k].nic.kinfo;
+
+ for (i = 0; i < kinfo->num_tc; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(
hdev, vport[k].qs_offset + i, i);
if (ret)
return ret;
}
+ }
} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
for (k = 0; k < hdev->num_alloc_vport; k++)
@@ -934,6 +957,36 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
return 0;
}
+static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
+{
+#define DEFAULT_TC_WEIGHT 1
+#define DEFAULT_TC_OFFSET 14
+
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ int i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hclge_pg_info *pg_info;
+
+ ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ pg_info =
+ &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
+ }
+
+ ets_weight->weight_offset = DEFAULT_TC_OFFSET;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -983,6 +1036,19 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
if (ret)
return ret;
+
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "fw %08x does't support ets tc weight cmd\n",
+ hdev->fw_version);
+ ret = 0;
+ }
+
+ return ret;
} else {
ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
if (ret)
@@ -992,7 +1058,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
return 0;
}
-int hclge_tm_map_cfg(struct hclge_dev *hdev)
+static int hclge_tm_map_cfg(struct hclge_dev *hdev)
{
int ret;
@@ -1107,7 +1173,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
return 0;
}
-int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
+static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
{
int ret;
@@ -1118,7 +1184,7 @@ int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
return hclge_tm_lvl34_schd_mode_cfg(hdev);
}
-static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
{
int ret;
@@ -1159,7 +1225,7 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
HCLGE_RX_MAC_PAUSE_EN_MSK;
return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
- hdev->tm_info.hw_pfc_map);
+ hdev->tm_info.pfc_en);
}
/* Each Tc has a 1024 queue sets to backpress, it divides to
@@ -1228,10 +1294,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
}
-int hclge_pause_setup_hw(struct hclge_dev *hdev)
+static int hclge_tm_bp_setup(struct hclge_dev *hdev)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ ret = hclge_bp_setup_hw(hdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
{
int ret;
- u8 i;
ret = hclge_pause_param_setup_hw(hdev);
if (ret)
@@ -1245,18 +1324,17 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (!hnae3_dev_dcb_supported(hdev))
return 0;
- /* When MAC is GE Mode, hdev does not support pfc setting */
+ /* GE MAC does not support PFC, when driver is initializing and MAC
+ * is in GE Mode, ignore the error here, otherwise initialization
+ * will fail.
+ */
ret = hclge_pfc_setup_hw(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
-
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_bp_setup_hw(hdev, i);
- if (ret)
- return ret;
- }
+ if (init && ret == -EOPNOTSUPP)
+ dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
+ else
+ return ret;
- return 0;
+ return hclge_tm_bp_setup(hdev);
}
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
@@ -1294,7 +1372,7 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev);
}
-int hclge_tm_init_hw(struct hclge_dev *hdev)
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{
int ret;
@@ -1306,7 +1384,7 @@ int hclge_tm_init_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_pause_setup_hw(hdev);
+ ret = hclge_pause_setup_hw(hdev, init);
if (ret)
return ret;
@@ -1325,5 +1403,22 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
if (ret)
return ret;
- return hclge_tm_init_hw(hdev);
+ return hclge_tm_init_hw(hdev, true);
+}
+
+int hclge_tm_vport_map_update(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+
+ hclge_tm_vport_tc_info_update(vport);
+
+ ret = hclge_vport_q_to_qs_map(hdev, vport);
+ if (ret)
+ return ret;
+
+ if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
+ return 0;
+
+ return hclge_tm_bp_setup(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index b6496a439304..f60e540c7a62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -142,13 +142,13 @@ struct hclge_port_shapping_cmd {
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
-int hclge_pause_setup_hw(struct hclge_dev *hdev);
-int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
+int hclge_tm_vport_map_update(struct hclge_dev *hdev);
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
-int hclge_tm_map_cfg(struct hclge_dev *hdev);
-int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5765c8cf3a3..9441b453d38d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
- size, &ring->desc_dma_addr,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
@@ -363,8 +362,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
return 0;
}
+static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+}
+
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ hclgevf_cmd_uninit_regs(&hdev->hw);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 82103d5fa815..8bc28e6f465f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -21,6 +21,14 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
{0, }
};
+static const u8 hclgevf_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
@@ -78,7 +86,12 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
struct hnae3_handle *handle)
{
- return container_of(handle, struct hclgevf_dev, nic);
+ if (!handle->client)
+ return container_of(handle, struct hclgevf_dev, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclgevf_dev, roce);
+ else
+ return container_of(handle, struct hclgevf_dev, nic);
}
static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
@@ -234,7 +247,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_TQPS_RSS_INFO_LEN 8
+#define HCLGEVF_TQPS_RSS_INFO_LEN 6
u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
int status;
@@ -250,8 +263,29 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
- memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
- memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16));
+
+ return 0;
+}
+
+static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
+ u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0,
+ true, resp_msg,
+ HCLGEVF_TQPS_DEPTH_INFO_LEN);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp depth info from PF failed %d",
+ ret);
+ return ret;
+ }
+
+ memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16));
+ memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16));
return 0;
}
@@ -291,7 +325,8 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
tqp->q.ae_algo = &ae_algovf;
tqp->q.buf_size = hdev->rx_buf_len;
- tqp->q.desc_num = hdev->num_desc;
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
i * HCLGEVF_TQP_REG_SIZE;
@@ -310,7 +345,8 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
kinfo = &nic->kinfo;
kinfo->num_tc = 0;
- kinfo->num_desc = hdev->num_desc;
+ kinfo->num_tx_desc = hdev->num_tx_desc;
+ kinfo->num_rx_desc = hdev->num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
@@ -349,20 +385,40 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
{
+ struct hnae3_handle *rhandle = &hdev->roce;
struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *rclient;
struct hnae3_client *client;
client = handle->client;
+ rclient = hdev->roce_client;
link_state =
test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
if (link_state != hdev->hw.mac.link) {
client->ops->link_status_change(handle, !!link_state);
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle, !!link_state);
hdev->hw.mac.link = link_state;
}
}
+void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_ADVERTISING 0
+#define HCLGEVF_SUPPORTED 1
+ u8 send_msg;
+ u8 resp_msg;
+
+ send_msg = HCLGEVF_ADVERTISING;
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
+ sizeof(u8), false, &resp_msg, sizeof(u8));
+ send_msg = HCLGEVF_SUPPORTED;
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
+ sizeof(u8), false, &resp_msg, sizeof(u8));
+}
+
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
@@ -564,12 +620,50 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
return status;
}
+/* for revision 0x20, vf shared the same rss config with pf */
+static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_RSS_MBX_RESP_LEN 8
+
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
+ u16 msg_num, hash_key_index;
+ u8 index;
+ int ret;
+
+ msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
+ HCLGEVF_RSS_MBX_RESP_LEN;
+ for (index = 0; index < msg_num; index++) {
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0,
+ &index, sizeof(index),
+ true, resp_msg,
+ HCLGEVF_RSS_MBX_RESP_LEN);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF get rss hash key from PF failed, ret=%d",
+ ret);
+ return ret;
+ }
+
+ hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
+ if (index == msg_num - 1)
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
+ &resp_msg[0],
+ HCLGEVF_RSS_KEY_SIZE - hash_key_index);
+ else
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
+ &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
+ }
+
+ return 0;
+}
+
static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i;
+ int i, ret;
if (handle->pdev->revision >= 0x21) {
/* Get hash algorithm */
@@ -591,6 +685,16 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
if (key)
memcpy(key, rss_cfg->rss_hash_key,
HCLGEVF_RSS_KEY_SIZE);
+ } else {
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (key) {
+ ret = hclgevf_get_rss_hash_key(hdev);
+ if (ret)
+ return ret;
+ memcpy(key, rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
+ }
}
if (indir)
@@ -964,33 +1068,29 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
}
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
- bool en_uc_pmc, bool en_mc_pmc)
+ bool en_bc_pmc)
{
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
- int status;
+ int ret;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
- req->msg[1] = en_uc_pmc ? 1 : 0;
- req->msg[2] = en_mc_pmc ? 1 : 0;
+ req->msg[1] = en_bc_pmc ? 1 : 0;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
dev_err(&hdev->pdev->dev,
- "Set promisc mode fail, status is %d.\n", status);
+ "Set promisc mode fail, status is %d.\n", ret);
- return status;
+ return ret;
}
-static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
- bool en_uc_pmc, bool en_mc_pmc)
+static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-
- return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
+ return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
@@ -1264,7 +1364,7 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- return 0;
+ return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1610,6 +1710,10 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
int ret;
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return;
+
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
0, false, &respmsg, sizeof(u8));
if (ret)
@@ -1628,6 +1732,8 @@ static void hclgevf_service_task(struct work_struct *work)
*/
hclgevf_request_link_info(hdev);
+ hclgevf_update_link_mode(hdev);
+
hclgevf_deferred_task_schedule(hdev);
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
@@ -1708,12 +1814,16 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
- hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
-
/* get queue configuration from PF */
ret = hclgevf_get_queue_info(hdev);
if (ret)
return ret;
+
+ /* get queue depth info from PF */
+ ret = hclgevf_get_queue_depth(hdev);
+ if (ret)
+ return ret;
+
/* get tc configuration from PF */
return hclgevf_get_tc_info(hdev);
}
@@ -1788,9 +1898,9 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->rss_size_max;
if (hdev->pdev->revision >= 0x21) {
- rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- netdev_rss_key_fill(rss_cfg->rss_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key);
@@ -1862,6 +1972,8 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
hclgevf_request_link_info(hdev);
+ hclgevf_update_link_mode(hdev);
+
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
return 0;
@@ -2377,6 +2489,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_config;
+ /* vf is not allowed to enable unicast/multicast promisc mode.
+ * For revision 0x20, default to disable broadcast promisc mode,
+ * firmware makes sure broadcast packets can be accepted.
+ * For revision 0x21, default to enable broadcast promisc mode.
+ */
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2461,7 +2582,8 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
struct hnae3_handle *nic = &hdev->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
- return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+ return min_t(u32, hdev->rss_size_max,
+ hdev->num_tqps / kinfo->num_tc);
}
/**
@@ -2482,7 +2604,7 @@ static void hclgevf_get_channels(struct hnae3_handle *handle,
ch->max_combined = hclgevf_get_max_channels(hdev);
ch->other_count = 0;
ch->max_other = 0;
- ch->combined_count = hdev->num_tqps;
+ ch->combined_count = handle->kinfo.rss_size;
}
static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
@@ -2522,7 +2644,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
-static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -2558,6 +2680,16 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
return hdev->reset_count;
}
+static void hclgevf_get_link_mode(struct hnae3_handle *handle,
+ unsigned long *supported,
+ unsigned long *advertising)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ *supported = hdev->hw.mac.supported;
+ *advertising = hdev->hw.mac.advertising;
+}
+
#define MAX_SEPARATE_NUM 4
#define SEPARATOR_VALUE 0xFFFFFFFF
#define REG_NUM_PER_LINE 4
@@ -2640,7 +2772,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_vector = hclgevf_get_vector,
.put_vector = hclgevf_put_vector,
.reset_queue = hclgevf_reset_tqp,
- .set_promisc_mode = hclgevf_set_promisc_mode,
.get_mac_addr = hclgevf_get_mac_addr,
.set_mac_addr = hclgevf_set_mac_addr,
.add_uc_addr = hclgevf_add_uc_addr,
@@ -2677,6 +2808,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_mtu = hclgevf_set_mtu,
.get_global_queue_id = hclgevf_get_qid_global,
.set_timer_task = hclgevf_set_timer_task,
+ .get_link_mode = hclgevf_get_link_mode,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 787bc06944e5..c128863ee7d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -145,6 +145,8 @@ struct hclgevf_mac {
int link;
u8 duplex;
u32 speed;
+ u64 supported;
+ u64 advertising;
};
struct hclgevf_hw {
@@ -237,7 +239,8 @@ struct hclgevf_dev {
u16 num_alloc_vport; /* num vports this driver supports */
u32 numa_node_mask;
u16 rx_buf_len;
- u16 num_desc;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
u16 num_msi;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 84653f58b2d1..7dc3c9f79169 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -197,6 +197,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break;
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
+ case HCLGE_MBX_LINK_STAT_MODE:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -247,6 +248,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
u8 duplex;
u32 speed;
u32 tail;
+ u8 idx;
/* we can safely clear it now as we are at start of the async message
* processing
@@ -270,12 +272,22 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]);
+ hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
hclgevf_update_speed_duplex(hdev, speed, duplex);
break;
+ case HCLGE_MBX_LINK_STAT_MODE:
+ idx = (u8)le16_to_cpu(msg_q[1]);
+ if (idx)
+ memcpy(&hdev->hw.mac.supported, &msg_q[2],
+ sizeof(unsigned long));
+ else
+ memcpy(&hdev->hw.mac.advertising, &msg_q[2],
+ sizeof(unsigned long));
+ break;
case HCLGE_MBX_ASSERTING_RESET:
/* PF has asserted reset hence VF should go in pending
* state and poll for the hardware reset status till it
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 017e08452d8c..baf5cc251f32 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ MDIO_C45_READ, phy_id, devad);
}
/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index c40603a183df..b4fefb4c3064 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
u8 *cmd_vaddr;
int err = 0;
- cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- &cmd_paddr, GFP_KERNEL);
+ cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
+ &cmd_paddr, GFP_KERNEL);
if (!cmd_vaddr) {
dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
return -ENOMEM;
@@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
dma_addr_t node_paddr;
int err;
- node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
- &node_paddr, GFP_KERNEL);
+ node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
+ GFP_KERNEL);
if (!node) {
dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
return -ENOMEM;
@@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
if (!chain->cell_ctxt)
return -ENOMEM;
- chain->wb_status = dma_zalloc_coherent(&pdev->dev,
- sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
+ chain->wb_status = dma_alloc_coherent(&pdev->dev,
+ sizeof(*chain->wb_status),
+ &chain->wb_status_paddr,
+ GFP_KERNEL);
if (!chain->wb_status) {
dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 6b19607a4caa..3875f39f43bb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -1008,3 +1008,16 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
&hw_ci, sizeof(hw_ci), NULL,
NULL, HINIC_MGMT_MSG_SYNC);
}
+
+/**
+ * hinic_hwdev_set_msix_state- set msix state
+ * @hwdev: the NIC HW device
+ * @msix_index: IRQ corresponding index number
+ * @flag: msix state
+ *
+ **/
+void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
+ enum hinic_msix_state flag)
+{
+ hinic_set_msix_state(hwdev->hwif, msix_index, flag);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index d1a7d2522d82..c9e621e19dd0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -240,4 +240,7 @@ int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
u8 pending_limit, u8 coalesc_timer);
+void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
+ enum hinic_msix_state flag);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 7cb8b9b94726..683e67515016 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq)
}
for (pg = 0; pg < eq->num_pages; pg++) {
- eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
- eq->page_size,
- &eq->dma_addr[pg],
- GFP_KERNEL);
+ eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
+ eq->page_size,
+ &eq->dma_addr[pg],
+ GFP_KERNEL);
if (!eq->virt_addr[pg]) {
err = -ENOMEM;
goto err_dma_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 823a17061a97..9b160f076904 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -168,6 +168,22 @@ void hinic_db_state_set(struct hinic_hwif *hwif,
hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
}
+void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
+ enum hinic_msix_state flag)
+{
+ u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE +
+ HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL;
+ u32 mask_bits;
+
+ mask_bits = readl(hwif->intr_regs_base + offset);
+ mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ if (flag)
+ mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ writel(mask_bits, hwif->intr_regs_base + offset);
+}
+
/**
* hwif_ready - test if the HW is ready for use
* @hwif: the HW interface of a pci function device
@@ -321,6 +337,13 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
return -ENOMEM;
}
+ hwif->intr_regs_base = pci_ioremap_bar(pdev, HINIC_PCI_INTR_REGS_BAR);
+ if (!hwif->intr_regs_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ err = -ENOMEM;
+ goto err_map_intr_bar;
+ }
+
err = hwif_ready(hwif);
if (err) {
dev_err(&pdev->dev, "HW interface is not ready\n");
@@ -337,7 +360,11 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
return 0;
err_hwif_ready:
+ iounmap(hwif->intr_regs_base);
+
+err_map_intr_bar:
iounmap(hwif->cfg_regs_bar);
+
return err;
}
@@ -347,5 +374,6 @@ err_hwif_ready:
**/
void hinic_free_hwif(struct hinic_hwif *hwif)
{
+ iounmap(hwif->intr_regs_base);
iounmap(hwif->cfg_regs_bar);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index 5b4760c0e9f5..22ec7f73e0a6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -152,6 +152,7 @@
#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
#define HINIC_PCI_CFG_REGS_BAR 0
+#define HINIC_PCI_INTR_REGS_BAR 2
#define HINIC_PCI_DB_BAR 4
#define HINIC_PCIE_ST_DISABLE 0
@@ -164,6 +165,10 @@
#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */
#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */
+#define HINIC_PCI_MSIX_ENTRY_SIZE 16
+#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+
enum hinic_pcie_nosnoop {
HINIC_PCIE_SNOOP = 0,
HINIC_PCIE_NO_SNOOP = 1,
@@ -207,6 +212,11 @@ enum hinic_db_state {
HINIC_DB_DISABLE = 1,
};
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
+};
+
struct hinic_func_attr {
u16 func_idx;
u8 pf_idx;
@@ -226,6 +236,7 @@ struct hinic_func_attr {
struct hinic_hwif {
struct pci_dev *pdev;
void __iomem *cfg_regs_bar;
+ void __iomem *intr_regs_base;
struct hinic_func_attr attr;
};
@@ -251,6 +262,9 @@ int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
u8 *lli_timer, u8 *lli_credit_limit,
u8 *resend_timer);
+void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
+ enum hinic_msix_state flag);
+
int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index);
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 8e5897669a3a..a322a22d9357 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
goto err_sq_db;
}
- ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- &func_to_io->ci_dma_base,
- GFP_KERNEL);
+ ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
+ &func_to_io->ci_dma_base,
+ GFP_KERNEL);
if (!ci_addr_base) {
dev_err(&pdev->dev, "Failed to allocate CI area\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index bbf9bdd0ee3e..d62cf509646a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq)
goto err_cqe_dma_arr_alloc;
for (i = 0; i < wq->q_depth; i++) {
- rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
- sizeof(*rq->cqe[i]),
- &rq->cqe_dma[i], GFP_KERNEL);
+ rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
+ sizeof(*rq->cqe[i]),
+ &rq->cqe_dma[i], GFP_KERNEL);
if (!rq->cqe[i])
goto err_cqe_alloc;
}
@@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
/* HW requirements: Must be at least 32 bit */
pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
- &rq->pi_dma_addr, GFP_KERNEL);
+ rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
+ &rq->pi_dma_addr, GFP_KERNEL);
if (!rq->pi_virt_addr) {
dev_err(&pdev->dev, "Failed to allocate PI address\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 1dfa7eb05c10..cb66e7024659 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
struct pci_dev *pdev = hwif->pdev;
dma_addr_t dma_addr;
- *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
- GFP_KERNEL);
+ *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
+ GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
return -ENOMEM;
@@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
u64 *paddr = &wq->block_vaddr[i];
dma_addr_t dma_addr;
- *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
- &dma_addr, GFP_KERNEL);
+ *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
+ &dma_addr, GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate wq page\n");
goto err_alloc_wq_pages;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index da323b9e1f62..e64bc664f687 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -51,9 +51,10 @@ static unsigned int rx_weight = 64;
module_param(rx_weight, uint, 0644);
MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
-#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
-#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200
-#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201
+#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
+#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200
+#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205
+#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210
#define HINIC_WQ_NAME "hinic_dev"
@@ -1113,8 +1114,9 @@ static void hinic_shutdown(struct pci_dev *pdev)
static const struct pci_device_id hinic_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
- { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
{ 0, 0}
};
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 0098b206e7e9..b6d218768ec1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -381,6 +381,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
static int rx_poll(struct napi_struct *napi, int budget)
{
struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
+ struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
struct hinic_rq *rq = rxq->rq;
int pkts;
@@ -389,7 +390,10 @@ static int rx_poll(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- enable_irq(rq->irq);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
return pkts;
}
@@ -414,7 +418,10 @@ static irqreturn_t rx_irq(int irq, void *data)
struct hinic_dev *nic_dev;
/* Disable the interrupt until napi will be completed */
- disable_irq_nosync(rq->irq);
+ nic_dev = netdev_priv(rxq->netdev);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_DISABLE);
nic_dev = netdev_priv(rxq->netdev);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 11e73e67358d..e17bf33eba0c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -655,7 +655,9 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
if (pkts < budget) {
napi_complete(napi);
- enable_irq(sq->irq);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ sq->msix_entry,
+ HINIC_MSIX_ENABLE);
return pkts;
}
@@ -682,7 +684,9 @@ static irqreturn_t tx_irq(int irq, void *data)
nic_dev = netdev_priv(txq->netdev);
/* Disable the interrupt until napi will be completed */
- disable_irq_nosync(txq->sq->irq);
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ txq->sq->msix_entry,
+ HINIC_MSIX_DISABLE);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index d719668a6684..92929750f832 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
dev->stats.tx_aborted_errors++;
}
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
tx_cmd->cmd.command = 0; /* Mark free */
break;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 2f7ae118217f..1274ad24d6af 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1194,7 +1194,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
dma_unmap_single(dev->dev.parent,
tx_cmd->dma_addr,
skb->len, DMA_TO_DEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
tx_cmd->cmd.command = 0; /* Mark free */
break;
diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig
index 90d49191beb3..eacf7e141fdc 100644
--- a/drivers/net/ethernet/ibm/emac/Kconfig
+++ b/drivers/net/ethernet/ibm/emac/Kconfig
@@ -28,18 +28,6 @@ config IBM_EMAC_RX_COPY_THRESHOLD
depends on IBM_EMAC
default "256"
-config IBM_EMAC_RX_SKB_HEADROOM
- int "Additional RX skb headroom (bytes)"
- depends on IBM_EMAC
- default "0"
- help
- Additional receive skb headroom. Note, that driver
- will always reserve at least 2 bytes to make IP header
- aligned, so usually there is no need to add any additional
- headroom.
-
- If unsure, set to 0.
-
config IBM_EMAC_DEBUG
bool "Debugging"
depends on IBM_EMAC
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 209255495bc9..3c2a5759844a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1071,7 +1071,9 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
/* Second pass, allocate new skbs */
for (i = 0; i < NUM_RX_BUFF; ++i) {
- struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
if (!skb) {
ret = -ENOMEM;
goto oom;
@@ -1080,10 +1082,10 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
BUG_ON(!dev->rx_skb[i]);
dev_kfree_skb(dev->rx_skb[i]);
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
dev->rx_desc[i].data_ptr =
- dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
- DMA_FROM_DEVICE) + 2;
+ dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+ rx_sync_size, DMA_FROM_DEVICE)
+ + NET_IP_ALIGN;
dev->rx_skb[i] = skb;
}
skip:
@@ -1174,20 +1176,18 @@ static void emac_clean_rx_ring(struct emac_instance *dev)
}
}
-static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
- gfp_t flags)
+static int
+__emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
{
- struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
if (unlikely(!skb))
return -ENOMEM;
dev->rx_skb[slot] = skb;
dev->rx_desc[slot].data_len = 0;
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
dev->rx_desc[slot].data_ptr =
- dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
- DMA_FROM_DEVICE) + 2;
+ dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+ dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
wmb();
dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
(slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
@@ -1195,6 +1195,27 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
return 0;
}
+static int
+emac_alloc_rx_skb(struct emac_instance *dev, int slot)
+{
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
+ GFP_KERNEL);
+
+ return __emac_prepare_rx_skb(skb, dev, slot);
+}
+
+static int
+emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
+{
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
+
+ return __emac_prepare_rx_skb(skb, dev, slot);
+}
+
static void emac_print_link_status(struct emac_instance *dev)
{
if (netif_carrier_ok(dev->ndev))
@@ -1225,7 +1246,7 @@ static int emac_open(struct net_device *ndev)
/* Allocate RX ring */
for (i = 0; i < NUM_RX_BUFF; ++i)
- if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
+ if (emac_alloc_rx_skb(dev, i)) {
printk(KERN_ERR "%s: failed to allocate RX ring\n",
ndev->name);
goto oom;
@@ -1660,8 +1681,9 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
DBG2(dev, "recycle %d %d" NL, slot, len);
if (len)
- dma_map_single(&dev->ofdev->dev, skb->data - 2,
- EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
+ dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+ SKB_DATA_ALIGN(len + NET_IP_ALIGN),
+ DMA_FROM_DEVICE);
dev->rx_desc[slot].data_len = 0;
wmb();
@@ -1713,7 +1735,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
int len = dev->rx_desc[slot].data_len;
int tot_len = dev->rx_sg_skb->len + len;
- if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
+ if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
++dev->estats.rx_dropped_mtu;
dev_kfree_skb(dev->rx_sg_skb);
dev->rx_sg_skb = NULL;
@@ -1769,16 +1791,18 @@ static int emac_poll_rx(void *param, int budget)
}
if (len && len < EMAC_RX_COPY_THRESH) {
- struct sk_buff *copy_skb =
- alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
+ struct sk_buff *copy_skb;
+
+ copy_skb = napi_alloc_skb(&dev->mal->napi, len);
if (unlikely(!copy_skb))
goto oom;
- skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
- memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
+ memcpy(copy_skb->data - NET_IP_ALIGN,
+ skb->data - NET_IP_ALIGN,
+ len + NET_IP_ALIGN);
emac_recycle_rx_skb(dev, slot, len);
skb = copy_skb;
- } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
+ } else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
goto oom;
skb_put(skb, len);
@@ -1799,7 +1823,7 @@ static int emac_poll_rx(void *param, int budget)
sg:
if (ctrl & MAL_RX_CTRL_FIRST) {
BUG_ON(dev->rx_sg_skb);
- if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
+ if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
DBG(dev, "rx OOM %d" NL, slot);
++dev->estats.rx_dropped_oom;
emac_recycle_rx_skb(dev, slot, 0);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 84caa4a3fc52..187689cd8212 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -68,22 +68,18 @@ static inline int emac_rx_size(int mtu)
return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
}
-#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
-
-#define EMAC_RX_SKB_HEADROOM \
- EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
-
/* Size of RX skb for the given MTU */
static inline int emac_rx_skb_size(int mtu)
{
int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
- return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
+
+ return SKB_DATA_ALIGN(size + NET_IP_ALIGN) + NET_SKB_PAD;
}
/* RX DMA sync size */
static inline int emac_rx_sync_size(int mtu)
{
- return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
+ return SKB_DATA_ALIGN(emac_rx_size(mtu) + NET_IP_ALIGN);
}
/* Driver statistcs is split into two parts to make it more cache friendly:
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fff09dcf9e34..787d5aca5278 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 098d8764c0ea..dd71d5db7274 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
u16 mss = 0;
-restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1401,7 +1400,6 @@ restart_poll:
napi_reschedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- goto restart_poll;
}
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 31fb76ee9d82..a1246e89aad4 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -159,7 +159,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
- select MDIO_DEVICE
+ select PHYLIB
imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2569a168334c..a41008523c98 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 257bd59bc9c6..f86d55657959 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -696,11 +696,16 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ret_val =
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
- if (ret_val)
- return ret_val;
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ e_dbg("Error disabling far-end loopback\n");
+ } else {
+ e_dbg("Error disabling far-end loopback\n");
+ }
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val)
@@ -754,11 +759,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ e_dbg("Error disabling far-end loopback\n");
+ } else {
+ e_dbg("Error disabling far-end loopback\n");
+ }
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 308c006cb41d..7acc61e4f645 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2106,7 +2106,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
if (strlen(netdev->name) < (IFNAMSIZ - 5))
snprintf(adapter->rx_ring->name,
sizeof(adapter->rx_ring->name) - 1,
- "%s-rx-0", netdev->name);
+ "%.14s-rx-0", netdev->name);
else
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -2122,7 +2122,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
if (strlen(netdev->name) < (IFNAMSIZ - 5))
snprintf(adapter->tx_ring->name,
sizeof(adapter->tx_ring->name) - 1,
- "%s-tx-0", netdev->name);
+ "%.14s-tx-0", netdev->name);
else
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
- ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
- GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
@@ -5309,8 +5309,13 @@ static void e1000_watchdog_task(struct work_struct *work)
/* 8000ES2LAN requires a Rx packet buffer work-around
* on link down event; reset the controller to flush
* the Rx packet buffer.
+ *
+ * If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
*/
- if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+ e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
@@ -5333,14 +5338,6 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
- /* If the link is lost the controller stops DMA, but
- * if there is queued Tx work it cannot be done. So
- * reset the controller to flush the Tx packet buffers.
- */
- if (!netif_carrier_ok(netdev) &&
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
- adapter->flags |= FLAG_RESTART_NOW;
-
/* If reset is necessary, do it outside of interrupt context. */
if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
@@ -7351,6 +7348,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e1000_print_device_info(adapter);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 6fd15a734324..5a0419421511 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1603,14 +1603,12 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
{
struct fm10k_q_vector *q_vector;
struct fm10k_ring *ring;
- int ring_count, size;
+ int ring_count;
ring_count = txr_count + rxr_count;
- size = sizeof(struct fm10k_q_vector) +
- (sizeof(struct fm10k_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 8f0a99b6a537..cb4d02629b86 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1148,7 +1148,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
* @results: Pointer array to message, results[0] is pointer to message
* @mbx: Pointer to mailbox information structure
*
- * This function is a default handler for MSI-X requests from the VF. The
+ * This function is a default handler for MSI-X requests from the VF. The
* assumption is that in this case it is acceptable to just directly
* hand off the message from the VF to the underlying shared code.
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 8de9085bba9e..d684998ba2b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
@@ -523,6 +524,8 @@ struct i40e_pf {
#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
+#define I40E_FLAG_RS_FEC BIT(25)
+#define I40E_FLAG_BASE_R_FEC BIT(26)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
@@ -787,11 +790,6 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
-
- /* AF_XDP zero-copy */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1091,6 +1089,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
return !!vsi->xdp_prog;
@@ -1104,10 +1104,10 @@ static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (ring_is_xdp(ring))
qid -= ring->vsi->alloc_queue_pairs;
- if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on)
+ if (!xdp_on)
return NULL;
- return ring->vsi->xsk_umems[qid];
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
}
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index a20d1cf058ad..c67d485d6f99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1642,30 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
}
- if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
- if (cnt != 1) {
- dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
- goto netdev_ops_write_done;
- }
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "tx_timeout: VSI %d not found\n", vsi_seid);
- } else if (!vsi->netdev) {
- dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
- vsi_seid);
- } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
- dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
- vsi_seid);
- } else if (rtnl_trylock()) {
- vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
- rtnl_unlock();
- dev_info(&pf->pdev->dev, "tx_timeout called\n");
- } else {
- dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
- }
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
int mtu;
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
@@ -1733,7 +1710,6 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
i40e_dbg_netdev_ops_buf);
dev_info(&pf->pdev->dev, "available commands\n");
- dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a6bc7847346b..4c885801fa26 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -438,6 +438,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+ I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
+ I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -606,6 +608,24 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
}
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
/* need to add new 10G PHY types */
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
@@ -721,6 +741,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -825,6 +852,9 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -838,6 +868,10 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, advertising,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -855,6 +889,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
break;
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
@@ -862,9 +903,15 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
-
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -1261,6 +1308,154 @@ done:
return err;
}
+static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status status = 0;
+ u32 flags = 0;
+ int err = 0;
+
+ flags = READ_ONCE(pf->flags);
+ i40e_set_fec_in_flags(fec_cfg, &flags);
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) {
+ struct i40e_aq_set_phy_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status) {
+ netdev_info(netdev,
+ "Set phy config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ goto done;
+ }
+ pf->flags = flags;
+ status = i40e_update_link_info(hw);
+ if (status)
+ /* debug level message only due to relation to the link
+ * itself rather than to the FEC settings
+ * (e.g. no physical connection etc.)
+ */
+ netdev_dbg(netdev,
+ "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+
+done:
+ return err;
+}
+
+static int i40e_get_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status status = 0;
+ int err = 0;
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ fecparam->fec = 0;
+ if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+ if ((abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_REQUEST_RS) ||
+ (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_ABILITY_RS))
+ fecparam->fec |= ETHTOOL_FEC_RS;
+ if ((abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_REQUEST_KR) ||
+ (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+ fecparam->fec |= ETHTOOL_FEC_BASER;
+ if (abilities.fec_cfg_curr_mod_ext_info == 0)
+ fecparam->fec |= ETHTOOL_FEC_OFF;
+
+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+done:
+ return err;
+}
+
+static int i40e_set_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 fec_cfg = 0;
+ int err = 0;
+
+ if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
+ hw->device_id != I40E_DEV_ID_25G_B) {
+ err = -EPERM;
+ goto done;
+ }
+
+ switch (fecparam->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ break;
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_NONE:
+ fec_cfg = 0;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ err = -EINVAL;
+ goto done;
+ }
+
+ err = i40e_set_fec_cfg(netdev, fec_cfg);
+
+done:
+ return err;
+}
+
static int i40e_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
@@ -1376,7 +1571,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.requested_mode = I40E_FC_NONE;
else
- return -EINVAL;
+ return -EINVAL;
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
@@ -2175,7 +2370,7 @@ static int i40e_get_ts_info(struct net_device *dev,
return 0;
}
-static int i40e_link_test(struct net_device *netdev, u64 *data)
+static u64 i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2198,7 +2393,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_reg_test(struct net_device *netdev, u64 *data)
+static u64 i40e_reg_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2209,7 +2404,7 @@ static int i40e_reg_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
+static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2223,7 +2418,7 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_intr_test(struct net_device *netdev, u64 *data)
+static u64 i40e_intr_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2440,10 +2635,10 @@ static int i40e_set_phys_id(struct net_device *netdev,
default:
break;
}
- if (ret)
- return -ENOENT;
- else
- return 0;
+ if (ret)
+ return -ENOENT;
+ else
+ return 0;
}
/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
@@ -4676,6 +4871,15 @@ flags_complete:
}
}
+ if (((changed_flags & I40E_FLAG_RS_FEC) ||
+ (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
/* Now that we've checked to ensure that the new flags are valid, load
* them into place. Since we only modify flags either (a) during
* initialization or (b) while holding the RTNL lock, we don't need
@@ -4714,6 +4918,24 @@ flags_complete:
}
}
+ if ((changed_flags & I40E_FLAG_RS_FEC) ||
+ (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+ u8 fec_cfg = 0;
+
+ if (pf->flags & I40E_FLAG_RS_FEC &&
+ pf->flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ } else if (pf->flags & I40E_FLAG_RS_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ } else if (pf->flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ }
+ if (i40e_set_fec_cfg(dev, fec_cfg))
+ dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
+ }
+
if ((changed_flags & pf->flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4948,6 +5170,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_per_queue_coalesce = i40e_set_per_queue_coalesce,
.get_link_ksettings = i40e_get_link_ksettings,
.set_link_ksettings = i40e_set_link_ksettings,
+ .get_fecparam = i40e_get_fec_param,
+ .set_fecparam = i40e_set_fec_param,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4d40878e395a..da62218eb70a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 7
-#define DRV_VERSION_BUILD 6
+#define DRV_VERSION_MINOR 8
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
struct i40e_pf *pf = (struct i40e_pf *)hw->back;
mem->size = ALIGN(size, alignment);
- mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
!i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
if (!ok) {
+ /* Log this in case the user has forgotten to give the kernel
+ * any buffers, even later in the application.
+ */
dev_info(&vsi->back->pdev->dev,
- "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "",
ring->queue_index, pf_q);
}
@@ -3610,7 +3613,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
(I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
- if (i40e_enabled_xdp_vsi(vsi))
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit
+ * calls are completed.
+ */
+ synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
+ }
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
@@ -7169,11 +7177,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
struct tc_cls_flower_offload *f,
struct i40e_cloud_filter *filter)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
struct i40e_pf *pf = vsi->back;
u8 field_flags = 0;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -7183,143 +7193,109 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
-
- if (mask->keyid != 0)
+ flow_rule_match_enc_keyid(rule, &match);
+ if (match.mask->keyid != 0)
field_flags |= I40E_CLOUD_FIELD_TEN_ID;
- filter->tenant_id = be32_to_cpu(key->keyid);
+ filter->tenant_id = be32_to_cpu(match.key->keyid);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
-
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- n_proto_key = ntohs(key->n_proto);
- n_proto_mask = ntohs(mask->n_proto);
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0;
n_proto_mask = 0;
}
filter->n_proto = n_proto_key & n_proto_mask;
- filter->ip_proto = key->ip_proto;
+ filter->ip_proto = match.key->ip_proto;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
+ flow_rule_match_eth_addrs(rule, &match);
/* use is_broadcast and is_zero to check for all 0xf or 0 */
- if (!is_zero_ether_addr(mask->dst)) {
- if (is_broadcast_ether_addr(mask->dst)) {
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (is_broadcast_ether_addr(match.mask->dst)) {
field_flags |= I40E_CLOUD_FIELD_OMAC;
} else {
dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
- mask->dst);
+ match.mask->dst);
return I40E_ERR_CONFIG;
}
}
- if (!is_zero_ether_addr(mask->src)) {
- if (is_broadcast_ether_addr(mask->src)) {
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (is_broadcast_ether_addr(match.mask->src)) {
field_flags |= I40E_CLOUD_FIELD_IMAC;
} else {
dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
- mask->src);
+ match.mask->src);
return I40E_ERR_CONFIG;
}
}
- ether_addr_copy(filter->dst_mac, key->dst);
- ether_addr_copy(filter->src_mac, key->src);
+ ether_addr_copy(filter->dst_mac, match.key->dst);
+ ether_addr_copy(filter->src_mac, match.key->src);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
- if (mask->vlan_id) {
- if (mask->vlan_id == VLAN_VID_MASK) {
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
field_flags |= I40E_CLOUD_FIELD_IVLAN;
} else {
dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
- mask->vlan_id);
+ match.mask->vlan_id);
return I40E_ERR_CONFIG;
}
}
- filter->vlan_id = cpu_to_be16(key->vlan_id);
+ filter->vlan_id = cpu_to_be16(match.key->vlan_id);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
-
- if (mask->dst) {
- if (mask->dst == cpu_to_be32(0xffffffff)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
- &mask->dst);
+ &match.mask->dst);
return I40E_ERR_CONFIG;
}
}
- if (mask->src) {
- if (mask->src == cpu_to_be32(0xffffffff)) {
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
- &mask->src);
+ &match.mask->src);
return I40E_ERR_CONFIG;
}
}
@@ -7328,70 +7304,60 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
return I40E_ERR_CONFIG;
}
- filter->dst_ipv4 = key->dst;
- filter->src_ipv4 = key->src;
+ filter->dst_ipv4 = match.key->dst;
+ filter->src_ipv4 = match.key->src;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
/* src and dest IPV6 address should not be LOOPBACK
* (0:0:0:0:0:0:0:1), which can be represented as ::1
*/
- if (ipv6_addr_loopback(&key->dst) ||
- ipv6_addr_loopback(&key->src)) {
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
dev_err(&pf->pdev->dev,
"Bad ipv6, addr is LOOPBACK\n");
return I40E_ERR_CONFIG;
}
- if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+ if (!ipv6_addr_any(&match.mask->dst) ||
+ !ipv6_addr_any(&match.mask->src))
field_flags |= I40E_CLOUD_FIELD_IIP;
- memcpy(&filter->src_ipv6, &key->src.s6_addr32,
+ memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
sizeof(filter->src_ipv6));
- memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
+ memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
sizeof(filter->dst_ipv6));
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
- if (mask->src) {
- if (mask->src == cpu_to_be16(0xffff)) {
+ flow_rule_match_ports(rule, &match);
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
- be16_to_cpu(mask->src));
+ be16_to_cpu(match.mask->src));
return I40E_ERR_CONFIG;
}
}
- if (mask->dst) {
- if (mask->dst == cpu_to_be16(0xffff)) {
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
- be16_to_cpu(mask->dst));
+ be16_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
- filter->dst_port = key->dst;
- filter->src_port = key->src;
+ filter->dst_port = match.key->dst;
+ filter->src_port = match.key->src;
switch (filter->ip_proto) {
case IPPROTO_TCP:
@@ -8131,8 +8097,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
- set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
exit:
@@ -11042,6 +11008,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
return 0;
+ queue_count = min_t(int, queue_count, num_online_cpus());
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) {
@@ -11644,7 +11611,8 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_pf *pf = np->vsi->back;
@@ -11895,6 +11863,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
if (old_prog)
bpf_prog_put(old_prog);
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+ if (need_reset && prog)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i]->xsk_umem)
+ (void)i40e_xsk_async_xmit(vsi->netdev, i);
+
return 0;
}
@@ -11955,8 +11931,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
{
i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
- if (i40e_enabled_xdp_vsi(vsi))
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit calls are
+ * completed.
+ */
+ synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
+ }
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
}
@@ -12168,9 +12149,6 @@ static int i40e_xdp(struct net_device *dev,
case XDP_QUERY_PROG:
xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
return 0;
- case XDP_QUERY_XSK_UMEM:
- return i40e_xsk_umem_query(vsi, &xdp->xsk.umem,
- xdp->xsk.queue_id);
case XDP_SETUP_XSK_UMEM:
return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
xdp->xsk.queue_id);
@@ -13859,6 +13837,29 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
}
/**
+ * i40e_set_fec_in_flags - helper function for setting FEC options in flags
+ * @fec_cfg: FEC option to set in flags
+ * @flags: ptr to flags in which we set FEC option
+ **/
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
+{
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+ *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
+ *flags |= I40E_FLAG_RS_FEC;
+ *flags &= ~I40E_FLAG_BASE_R_FEC;
+ }
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
+ *flags |= I40E_FLAG_BASE_R_FEC;
+ *flags &= ~I40E_FLAG_RS_FEC;
+ }
+ if (fec_cfg == 0)
+ *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
+}
+
+/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
@@ -14349,6 +14350,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ /* set the FEC config due to the board capabilities */
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_ring *xdp_ring;
int drops = 0;
int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 2ac23ebfbf31..831d52bc3c9a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2069,6 +2069,11 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
+ if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
@@ -3384,8 +3389,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_info(&pf->pdev->dev,
"VF %d: Invalid input/s, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
- goto err;
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
@@ -3656,7 +3661,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
int ret;
pf->vf_aq_requests++;
- if (local_vf_id >= pf->num_alloc_vfs)
+ if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[local_vf_id]);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..b5c182e688e3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,69 +10,6 @@
#include "i40e_xsk.h"
/**
- * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs
- * @vsi: Current VSI
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
-{
- if (vsi->xsk_umems)
- return 0;
-
- vsi->num_xsk_umems_used = 0;
- vsi->num_xsk_umems = vsi->alloc_queue_pairs;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
- GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
- * @vsi: Current VSI
- * @umem: UMEM to store
- * @qid: Ring/qid to associate with the UMEM
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
- u16 qid)
-{
- int err;
-
- err = i40e_alloc_xsk_umems(vsi);
- if (err)
- return err;
-
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
-
- return 0;
-}
-
-/**
- * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
- * @vsi: Current VSI
- * @qid: Ring/qid associated with the UMEM
- **/
-static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid)
-{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
-
- if (vsi->num_xsk_umems == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
- }
-}
-
-/**
* i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
* @vsi: Current VSI
* @umem: UMEM to DMA map
@@ -140,6 +77,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -150,12 +88,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (qid >= vsi->num_queue_pairs)
return -EINVAL;
- if (vsi->xsk_umems) {
- if (qid >= vsi->num_xsk_umems)
- return -EINVAL;
- if (vsi->xsk_umems[qid])
- return -EBUSY;
- }
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
if (!reuseq)
@@ -173,16 +108,15 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
err = i40e_queue_pair_disable(vsi, qid);
if (err)
return err;
- }
- err = i40e_add_xsk_umem(vsi, umem, qid);
- if (err)
- return err;
-
- if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
+
+ /* Kick start the NAPI context so that receiving will start */
+ err = i40e_xsk_async_xmit(vsi->netdev, qid);
+ if (err)
+ return err;
}
return 0;
@@ -197,11 +131,13 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
**/
static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
+ struct xdp_umem *umem;
bool if_running;
int err;
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ umem = xdp_get_umem_from_qid(netdev, qid);
+ if (!umem)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -212,8 +148,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
- i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
- i40e_remove_xsk_umem(vsi, qid);
+ i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -225,36 +160,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
/**
- * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM
- * @vsi: Current VSI
- * @umem: UMEM associated to the ring, if any
- * @qid: Rx ring to associate UMEM to
- *
- * This function will store, if any, the UMEM associated to certain ring.
- *
- * Returns 0 on success, <0 on failure
- **/
-int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
- u16 qid)
-{
- if (vsi->type != I40E_VSI_MAIN)
- return -EINVAL;
-
- if (qid >= vsi->num_queue_pairs)
- return -EINVAL;
-
- if (vsi->xsk_umems) {
- if (qid >= vsi->num_xsk_umems)
- return -EINVAL;
- *umem = vsi->xsk_umems[qid];
- return 0;
- }
-
- *umem = NULL;
- return 0;
-}
-
-/**
* i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
* @vsi: Current VSI
* @umem: UMEM to enable/associate to a ring, or NULL to disable
@@ -945,13 +850,11 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
+ struct net_device *netdev = vsi->netdev;
int i;
- if (!vsi->xsk_umems)
- return false;
-
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->xsk_umems[i])
+ if (xdp_get_umem_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index 9038c5d5cf08..8cc0a2e7d9a2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -10,8 +10,6 @@ struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
-int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
- u16 qid);
int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid);
void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 9f2b7b7adf6b..4569d69a2b55 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2439,6 +2439,8 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
struct tc_cls_flower_offload *f,
struct iavf_cloud_filter *filter)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0;
u16 n_proto_key = 0;
u8 field_flags = 0;
@@ -2447,7 +2449,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
int i = 0;
struct virtchnl_filter *vf = &filter->f;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -2457,32 +2459,24 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
- if (mask->keyid != 0)
+ flow_rule_match_enc_keyid(rule, &match);
+ if (match.mask->keyid != 0)
field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- n_proto_key = ntohs(key->n_proto);
- n_proto_mask = ntohs(mask->n_proto);
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0;
@@ -2496,122 +2490,103 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
}
- if (key->ip_proto != IPPROTO_TCP) {
+ if (match.key->ip_proto != IPPROTO_TCP) {
dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
return -EINVAL;
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
/* use is_broadcast and is_zero to check for all 0xf or 0 */
- if (!is_zero_ether_addr(mask->dst)) {
- if (is_broadcast_ether_addr(mask->dst)) {
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (is_broadcast_ether_addr(match.mask->dst)) {
field_flags |= IAVF_CLOUD_FIELD_OMAC;
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
- mask->dst);
+ match.mask->dst);
return I40E_ERR_CONFIG;
}
}
- if (!is_zero_ether_addr(mask->src)) {
- if (is_broadcast_ether_addr(mask->src)) {
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (is_broadcast_ether_addr(match.mask->src)) {
field_flags |= IAVF_CLOUD_FIELD_IMAC;
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
- mask->src);
+ match.mask->src);
return I40E_ERR_CONFIG;
}
}
- if (!is_zero_ether_addr(key->dst))
- if (is_valid_ether_addr(key->dst) ||
- is_multicast_ether_addr(key->dst)) {
+ if (!is_zero_ether_addr(match.key->dst))
+ if (is_valid_ether_addr(match.key->dst) ||
+ is_multicast_ether_addr(match.key->dst)) {
/* set the mask if a valid dst_mac address */
for (i = 0; i < ETH_ALEN; i++)
vf->mask.tcp_spec.dst_mac[i] |= 0xff;
ether_addr_copy(vf->data.tcp_spec.dst_mac,
- key->dst);
+ match.key->dst);
}
- if (!is_zero_ether_addr(key->src))
- if (is_valid_ether_addr(key->src) ||
- is_multicast_ether_addr(key->src)) {
+ if (!is_zero_ether_addr(match.key->src))
+ if (is_valid_ether_addr(match.key->src) ||
+ is_multicast_ether_addr(match.key->src)) {
/* set the mask if a valid dst_mac address */
for (i = 0; i < ETH_ALEN; i++)
vf->mask.tcp_spec.src_mac[i] |= 0xff;
ether_addr_copy(vf->data.tcp_spec.src_mac,
- key->src);
+ match.key->src);
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
- if (mask->vlan_id) {
- if (mask->vlan_id == VLAN_VID_MASK) {
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
field_flags |= IAVF_CLOUD_FIELD_IVLAN;
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
- mask->vlan_id);
+ match.mask->vlan_id);
return I40E_ERR_CONFIG;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
- vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
+ vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
-
- if (mask->dst) {
- if (mask->dst == cpu_to_be32(0xffffffff)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
- be32_to_cpu(mask->dst));
+ be32_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
- if (mask->src) {
- if (mask->src == cpu_to_be32(0xffffffff)) {
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
- be32_to_cpu(mask->dst));
+ be32_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
@@ -2620,28 +2595,23 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
return I40E_ERR_CONFIG;
}
- if (key->dst) {
+ if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
- vf->data.tcp_spec.dst_ip[0] = key->dst;
+ vf->data.tcp_spec.dst_ip[0] = match.key->dst;
}
- if (key->src) {
+ if (match.key->src) {
vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
- vf->data.tcp_spec.src_ip[0] = key->src;
+ vf->data.tcp_spec.src_ip[0] = match.key->src;
}
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
/* validate mask, make sure it is not IPV6_ADDR_ANY */
- if (ipv6_addr_any(&mask->dst)) {
+ if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
return I40E_ERR_CONFIG;
@@ -2650,61 +2620,56 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
/* src and dest IPv6 address should not be LOOPBACK
* (0:0:0:0:0:0:0:1) which can be represented as ::1
*/
- if (ipv6_addr_loopback(&key->dst) ||
- ipv6_addr_loopback(&key->src)) {
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
return I40E_ERR_CONFIG;
}
- if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+ if (!ipv6_addr_any(&match.mask->dst) ||
+ !ipv6_addr_any(&match.mask->src))
field_flags |= IAVF_CLOUD_FIELD_IIP;
for (i = 0; i < 4; i++)
vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
- memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
+ memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
sizeof(vf->data.tcp_spec.dst_ip));
for (i = 0; i < 4; i++)
vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
- memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
+ memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
sizeof(vf->data.tcp_spec.src_ip));
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
-
- if (mask->src) {
- if (mask->src == cpu_to_be16(0xffff)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be16(0xffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
- be16_to_cpu(mask->src));
+ be16_to_cpu(match.mask->src));
return I40E_ERR_CONFIG;
}
}
- if (mask->dst) {
- if (mask->dst == cpu_to_be16(0xffff)) {
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be16(0xffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
- be16_to_cpu(mask->dst));
+ be16_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
- if (key->dst) {
+ if (match.key->dst) {
vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
- vf->data.tcp_spec.dst_port = key->dst;
+ vf->data.tcp_spec.dst_port = match.key->dst;
}
- if (key->src) {
+ if (match.key->src) {
vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
- vf->data.tcp_spec.src_port = key->src;
+ vf->data.tcp_spec.src_port = match.key->src;
}
}
vf->field_flags = field_flags;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a385575600f6..89440775aea1 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -26,6 +26,7 @@
#include <linux/bitmap.h>
#include <linux/log2.h>
#include <linux/ip.h>
+#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
#include <linux/avf/virtchnl.h>
@@ -82,7 +83,7 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+ (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
@@ -110,6 +111,9 @@ extern const char ice_drv_ver[];
#define ice_for_each_alloc_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
+#define ice_for_each_q_vector(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -129,6 +133,17 @@ struct ice_res_tracker {
u16 list[1];
};
+struct ice_qs_cfg {
+ struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
+ unsigned long *pf_map;
+ unsigned long pf_map_size;
+ unsigned int q_count;
+ unsigned int scatter_count;
+ u16 *vsi_map;
+ u16 vsi_map_offset;
+ u8 mapping_mode;
+};
+
struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
@@ -270,6 +285,7 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
+ ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_PF_FLAGS_NBITS /* must be last */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index fcdcd80b18e7..242c78469181 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -657,8 +657,13 @@ struct ice_aqc_get_topo {
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
+ * Add TSE (indirect 0x0401)
+ * Delete TSE (indirect 0x040F)
+ * Move TSE (indirect 0x0408)
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
*/
-struct ice_aqc_get_cfg_elem {
+struct ice_aqc_sched_elem_cmd {
__le16 num_elem_req; /* Used by commands */
__le16 num_elem_resp; /* Used by responses */
__le32 reserved;
@@ -674,18 +679,6 @@ struct ice_aqc_suspend_resume_elem {
__le32 teid[1];
};
-/* Add TSE (indirect 0x0401)
- * Delete TSE (indirect 0x040F)
- * Move TSE (indirect 0x0408)
- */
-struct ice_aqc_add_move_delete_elem {
- __le16 num_grps_req;
- __le16 num_grps_updated;
- __le32 reserved;
- __le32 addr_high;
- __le32 addr_low;
-};
-
struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx;
__le16 bw_alloc;
@@ -854,11 +847,46 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33)
#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34)
#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35)
+#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41)
+#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42)
+#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43)
+#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45)
+#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48)
+#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49)
+#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52)
+#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56)
+#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57)
+#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60)
+#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62)
+#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63)
#define ICE_PHY_TYPE_LOW_MAX_INDEX 63
+/* The second set of defines is for phy_type_high. */
+#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 reserved;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0)
#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1)
@@ -923,7 +951,7 @@ struct ice_aqc_set_phy_cfg {
/* Set PHY config command data structure */
struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 rsvd0;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
@@ -1032,10 +1060,12 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_20GB BIT(6)
#define ICE_AQ_LINK_SPEED_25GB BIT(7)
#define ICE_AQ_LINK_SPEED_40GB BIT(8)
+#define ICE_AQ_LINK_SPEED_50GB BIT(9)
+#define ICE_AQ_LINK_SPEED_100GB BIT(10)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
__le32 reserved3; /* Aligns next field to 8-byte boundary */
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 reserved4;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
};
/* Set event mask command (direct 0x0613) */
@@ -1055,6 +1085,16 @@ struct ice_aqc_set_event_mask {
u8 reserved1[6];
};
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ice_aqc_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0)
+#define ICE_AQC_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1341,12 +1381,12 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_get_topo get_topo;
- struct ice_aqc_get_cfg_elem get_update_elem;
+ struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
- struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
@@ -1442,6 +1482,7 @@ enum ice_adminq_opc {
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
+ ice_aqc_opc_set_port_id_led = 0x06E9,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4c1d35da940d..63f003441300 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -165,8 +165,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
- if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ }
return status;
}
@@ -183,6 +185,9 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN;
hw_link_info = &pi->phy.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) {
switch (hw_link_info->phy_type_low) {
@@ -196,6 +201,15 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T:
@@ -209,6 +223,11 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
case ICE_PHY_TYPE_LOW_25GBASE_CR1:
case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
return ICE_MEDIA_DA;
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
@@ -219,10 +238,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
}
}
-
return ICE_MEDIA_UNKNOWN;
}
@@ -274,6 +301,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* update current link status information */
hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
hw_link_info->link_info = link_data.link_info;
hw_link_info->an_info = link_data.an_info;
@@ -750,6 +778,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
status = ICE_ERR_CFG;
goto err_unroll_sched;
}
+ INIT_LIST_HEAD(&hw->agg_list);
status = ice_init_fltr_mgmt_struct(hw);
if (status)
@@ -800,6 +829,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_cleanup_fltr_mgmt_struct(hw);
ice_sched_cleanup_all(hw);
+ ice_sched_clear_agg(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -1655,7 +1685,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
* This function is used to write MAC address to the NVM (0x0108).
*/
enum ice_status
-ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_write *cmd;
@@ -1667,8 +1697,8 @@ ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
cmd->flags = flags;
/* Prep values for flags, sah, sal */
- cmd->sah = htons(*((u16 *)mac_addr));
- cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
+ cmd->sah = htons(*((const u16 *)mac_addr));
+ cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -1705,16 +1735,20 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
+ * @phy_type_high: higher part of phy_type
*
- * This helper function will convert a phy_type_low to its corresponding link
+ * This helper function will convert an entry in phy type structure
+ * [phy_type_low, phy_type_high] to its corresponding link speed.
+ * Note: In the structure of [phy_type_low, phy_type_high], there should
+ * be one bit set, as this function will convert one phy type to its
* speed.
- * Note: In the structure of phy_type_low, there should be one bit set, as
- * this function will convert one phy type to its speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/
-static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{
+ u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
switch (phy_type_low) {
@@ -1768,41 +1802,110 @@ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
case ICE_PHY_TYPE_LOW_40G_XLAUI:
speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
break;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
+ break;
default:
speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
}
- return speed_phy_type_low;
+ switch (phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return speed_phy_type_low;
+ else
+ return speed_phy_type_high;
}
/**
* ice_update_phy_type
* @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
* @link_speeds_bitmap: targeted link speeds bitmap
*
* Note: For the link_speeds_bitmap structure, you can check it at
* [ice_aqc_get_link_status->link_speed]. Caller can pass in
* link_speeds_bitmap include multiple speeds.
*
- * The value of phy_type_low will present a certain link speed. This helper
- * function will turn on bits in the phy_type_low based on the value of
+ * Each entry in this [phy_type_low, phy_type_high] structure will
+ * present a certain link speed. This helper function will turn on bits
+ * in [phy_type_low, phy_type_high] structure based on the value of
* link_speeds_bitmap input parameter.
*/
-void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
+void
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap)
{
u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+ u64 pt_high;
u64 pt_low;
int index;
/* We first check with low part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
pt_low = BIT_ULL(index);
- speed = ice_get_link_speed_based_on_phy_type(pt_low);
+ speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
if (link_speeds_bitmap & speed)
*phy_type_low |= BIT_ULL(index);
}
+
+ /* We then check with high part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
+ pt_high = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_high |= BIT_ULL(index);
+ }
}
/**
@@ -1934,6 +2037,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
if (ena_auto_link_update)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Copy over all the old settings */
+ cfg.phy_type_high = pcaps->phy_type_high;
cfg.phy_type_low = pcaps->phy_type_low;
cfg.low_power_ctrl = pcaps->low_power_ctrl;
cfg.eee_cap = pcaps->eee_cap;
@@ -2032,6 +2136,34 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
+ * ice_aq_set_port_id_led
+ * @pi: pointer to the port information
+ * @is_orig_mode: is this LED set to original mode (by the net-list)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set LED value for the given port (0x06e9)
+ */
+enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_port_id_led *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
+
+ if (is_orig_mode)
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
@@ -2318,6 +2450,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
+ enum ice_status status;
u16 i, sz = 0;
cmd = &desc.params.dis_txqs;
@@ -2353,6 +2486,8 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
break;
}
+ /* flush pipe on time out */
+ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
/* If no queue group info, we are in a reset flow. Issue the AQ */
if (!qg_list)
goto do_aq;
@@ -2378,7 +2513,17 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
return ICE_ERR_PARAM;
do_aq:
- return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+ status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+ if (status) {
+ if (!qg_list)
+ ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
+ vmvf_num, hw->adminq.sq_last_status);
+ else
+ ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n",
+ le16_to_cpu(qg_list[0].q_id[0]),
+ hw->adminq.sq_last_status);
+ }
+ return status;
}
/* End of FW Admin Queue command wrappers */
@@ -2664,8 +2809,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
/* add the lan q */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
- if (status)
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
+ le16_to_cpu(buf->txqs[0].txq_id),
+ hw->adminq.sq_last_status);
goto ena_txq_exit;
+ }
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index cf760c24a6aa..d7c7c2ed8823 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -28,6 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
+enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
+ u16 *data);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -70,9 +72,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
void
-ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap);
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap);
enum ice_status
-ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
@@ -86,6 +89,10 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd);
+
+enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cmd_details);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3b6e387f5440..eb8d149e317c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -63,45 +63,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* is queried on the base PF netdev.
*/
static const struct ice_stats ice_gstrings_pf_stats[] = {
- ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
- ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
- ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
- ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
- ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
- ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
- ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
- ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
- ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
- ICE_PF_STAT("tx_size_64", stats.tx_size_64),
- ICE_PF_STAT("rx_size_64", stats.rx_size_64),
- ICE_PF_STAT("tx_size_127", stats.tx_size_127),
- ICE_PF_STAT("rx_size_127", stats.rx_size_127),
- ICE_PF_STAT("tx_size_255", stats.tx_size_255),
- ICE_PF_STAT("rx_size_255", stats.rx_size_255),
- ICE_PF_STAT("tx_size_511", stats.tx_size_511),
- ICE_PF_STAT("rx_size_511", stats.rx_size_511),
- ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
- ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
- ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
- ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
- ICE_PF_STAT("tx_size_big", stats.tx_size_big),
- ICE_PF_STAT("rx_size_big", stats.rx_size_big),
- ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
- ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
- ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
- ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
- ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
- ICE_PF_STAT("rx_undersize", stats.rx_undersize),
- ICE_PF_STAT("rx_fragments", stats.rx_fragments),
- ICE_PF_STAT("rx_oversize", stats.rx_oversize),
- ICE_PF_STAT("rx_jabber", stats.rx_jabber),
- ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
- ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
- ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
- ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
- ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
- ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
- ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+ ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
+ ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
+ ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
+ ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
+ ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
+ ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
+ ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
+ ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
+ ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors),
+ ICE_PF_STAT("port.tx_size_64", stats.tx_size_64),
+ ICE_PF_STAT("port.rx_size_64", stats.rx_size_64),
+ ICE_PF_STAT("port.tx_size_127", stats.tx_size_127),
+ ICE_PF_STAT("port.rx_size_127", stats.rx_size_127),
+ ICE_PF_STAT("port.tx_size_255", stats.tx_size_255),
+ ICE_PF_STAT("port.rx_size_255", stats.rx_size_255),
+ ICE_PF_STAT("port.tx_size_511", stats.tx_size_511),
+ ICE_PF_STAT("port.rx_size_511", stats.rx_size_511),
+ ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
+ ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
+ ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
+ ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
+ ICE_PF_STAT("port.tx_size_big", stats.tx_size_big),
+ ICE_PF_STAT("port.rx_size_big", stats.rx_size_big),
+ ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
+ ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
+ ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
+ ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
+ ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
+ ICE_PF_STAT("port.rx_undersize", stats.rx_undersize),
+ ICE_PF_STAT("port.rx_fragments", stats.rx_fragments),
+ ICE_PF_STAT("port.rx_oversize", stats.rx_oversize),
+ ICE_PF_STAT("port.rx_jabber", stats.rx_jabber),
+ ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
+ ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors),
+ ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
+ ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors),
+ ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
+ ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
+ ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
};
static const u32 ice_regs_dump_list[] = {
@@ -114,6 +114,22 @@ static const u32 ice_regs_dump_list[] = {
QRX_ITR(0),
};
+struct ice_priv_flag {
+ char name[ETH_GSTRING_LEN];
+ u32 bitno; /* bit position in pf->flags */
+};
+
+#define ICE_PRIV_FLAG(_name, _bitno) { \
+ .name = _name, \
+ .bitno = _bitno, \
+}
+
+static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
+ ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
+};
+
+#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
+
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
@@ -152,6 +168,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
static int ice_get_regs_len(struct net_device __always_unused *netdev)
@@ -203,6 +220,55 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
+static int ice_get_eeprom_len(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ return (int)(pf->hw.nvm.sr_words * sizeof(u16));
+}
+
+static int
+ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ u16 first_word, last_word, nwords;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct device *dev;
+ int ret = 0;
+ u16 *buf;
+
+ dev = &pf->pdev->dev;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ nwords = last_word - first_word + 1;
+
+ buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ status = ice_read_sr_buf(hw, first_word, &nwords, buf);
+ if (status) {
+ dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ eeprom->len = sizeof(u16) * nwords;
+ ret = -EIO;
+ goto out;
+ }
+
+ memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len);
+out:
+ devm_kfree(dev, buf);
+ return ret;
+}
+
static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -238,17 +304,105 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ snprintf(p, ETH_GSTRING_LEN, "%s",
ice_gstrings_pf_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ ice_gstrings_priv_flags[i].name);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
default:
break;
}
}
+static int
+ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ bool led_active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ led_active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ led_active = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_get_priv_flags - report device private flags
+ * @netdev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ */
+static u32 ice_get_priv_flags(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u32 i, ret_flags = 0;
+
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
+ const struct ice_priv_flag *priv_flag;
+
+ priv_flag = &ice_gstrings_priv_flags[i];
+
+ if (test_bit(priv_flag->bitno, pf->flags))
+ ret_flags |= BIT(i);
+ }
+
+ return ret_flags;
+}
+
+/**
+ * ice_set_priv_flags - set private flags
+ * @netdev: network interface device structure
+ * @flags: bit flags to be set
+ */
+static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u32 i;
+
+ if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
+ return -EINVAL;
+
+ for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
+ const struct ice_priv_flag *priv_flag;
+
+ priv_flag = &ice_gstrings_priv_flags[i];
+
+ if (flags & BIT(i))
+ set_bit(priv_flag->bitno, pf->flags);
+ else
+ clear_bit(priv_flag->bitno, pf->flags);
+ }
+
+ return 0;
+}
+
static int ice_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
@@ -272,6 +426,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* not safe.
*/
return ICE_ALL_STATS_LEN(netdev);
+ case ETH_SS_PRIV_FLAGS:
+ return ICE_PRIV_FLAG_ARRAY_SIZE;
default:
return -EOPNOTSUPP;
}
@@ -337,16 +493,20 @@ ice_get_ethtool_stats(struct net_device *netdev,
* @netdev: network interface device structure
* @ks: ethtool link ksettings struct to fill out
*/
-static void ice_phy_type_to_ethtool(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+static void
+ice_phy_type_to_ethtool(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
+ bool need_add_adv_mode = false;
struct ice_vsi *vsi = np->vsi;
+ u64 phy_types_high;
u64 phy_types_low;
hw_link_info = &vsi->port_info->phy.link_info;
phy_types_low = vsi->port_info->phy.phy_type_low;
+ phy_types_high = vsi->port_info->phy.phy_type_high;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
@@ -495,6 +655,95 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseLR4_Full);
}
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseCR2_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseKR2_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseKR2_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseSR2_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseSR2_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode) {
+ need_add_adv_mode = false;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseCR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode) {
+ need_add_adv_mode = false;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseSR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseLR4_ER4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode) {
+ need_add_adv_mode = false;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseLR4_ER4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ need_add_adv_mode = true;
+ }
+ if (need_add_adv_mode)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseKR4_Full);
/* Autoneg PHY types */
if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
@@ -520,6 +769,24 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, advertising,
Autoneg);
}
+ if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
+ phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -531,13 +798,15 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
* @ks: ethtool ksettings to fill in
* @netdev: network interface device structure
*/
-static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
- struct net_device *netdev)
+static void
+ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
+ struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
+ bool unrecog_phy_high = false;
bool unrecog_phy_low = false;
link_info = &vsi->port_info->phy.link_info;
@@ -699,25 +968,133 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseKR4_Full);
break;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseCR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseKR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 50000baseKR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseSR2_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseLR4_ER4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseKR4_Full);
+ break;
default:
unrecog_phy_low = true;
}
- if (unrecog_phy_low) {
+ switch (link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100000baseKR4_Full);
+ break;
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR4_Full);
+ break;
+ default:
+ unrecog_phy_high = true;
+ }
+
+ if (unrecog_phy_low && unrecog_phy_high) {
/* if we got here and link is up something bad is afoot */
- netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n",
+ netdev_info(netdev,
+ "WARNING: Unrecognized PHY_Low (0x%llx).\n",
(u64)link_info->phy_type_low);
+ netdev_info(netdev,
+ "WARNING: Unrecognized PHY_High (0x%llx).\n",
+ (u64)link_info->phy_type_high);
}
/* Now that we've worked out everything that could be supported by the
* current PHY type, get what is supported by the NVM and intersect
* them to get what is truly supported
*/
- memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
+ memset(&cap_ksettings, 0, sizeof(cap_ksettings));
ice_phy_type_to_ethtool(netdev, &cap_ksettings);
ethtool_intersect_link_masks(ks, &cap_ksettings);
switch (link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ ks->base.speed = SPEED_100000;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ ks->base.speed = SPEED_50000;
+ break;
case ICE_AQ_LINK_SPEED_40GB:
ks->base.speed = SPEED_40000;
break;
@@ -911,6 +1288,23 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
ethtool_link_ksettings_test_link_mode(ks, advertising,
40000baseKR4_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseCR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseKR2_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseSR2_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseCR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseSR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseLR4_ER4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseKR4_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
return adv_link_speed;
}
@@ -981,8 +1375,9 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
*
* Set speed/duplex per media_types advertised/forced
*/
-static int ice_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *ks)
+static int
+ice_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
{
u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -994,6 +1389,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
struct ice_port_info *p;
u8 autoneg_changed = 0;
enum ice_status status;
+ u64 phy_type_high;
u64 phy_type_low;
int err = 0;
bool linkup;
@@ -1020,7 +1416,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
/* copy the ksettings to copy_ks to avoid modifying the original */
- memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
+ memcpy(&copy_ks, ks, sizeof(copy_ks));
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
@@ -1039,7 +1435,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
return -EINVAL;
/* get our own copy of the bits to check against */
- memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
+ memset(&safe_ks, 0, sizeof(safe_ks));
safe_ks.base.cmd = copy_ks.base.cmd;
safe_ks.base.link_mode_masks_nwords =
copy_ks.base.link_mode_masks_nwords;
@@ -1053,8 +1449,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
*/
- if (memcmp(&copy_ks.base, &safe_ks.base,
- sizeof(struct ethtool_link_settings)))
+ if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base)))
return -EOPNOTSUPP;
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
@@ -1078,7 +1473,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
}
/* Copy abilities to config in case autoneg is not set below */
- memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data));
+ memset(&config, 0, sizeof(config));
config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
if (abilities->caps & ICE_AQC_PHY_AN_MODE)
config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
@@ -1109,7 +1504,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */
- ice_update_phy_type(&phy_type_low, adv_link_speed);
+ ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -1128,7 +1523,9 @@ static int ice_set_link_ksettings(struct net_device *netdev,
/* set link and auto negotiation so changes take effect */
config.caps |= ICE_AQ_PHY_ENA_LINK;
- if (phy_type_low) {
+ if (phy_type_low || phy_type_high) {
+ config.phy_type_high = cpu_to_le64(phy_type_high) &
+ abilities->phy_type_high;
config.phy_type_low = cpu_to_le64(phy_type_low) &
abilities->phy_type_low;
} else {
@@ -1270,7 +1667,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
vsi->tx_rings[0]->count, new_tx_cnt);
tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
- sizeof(struct ice_ring), GFP_KERNEL);
+ sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
@@ -1302,7 +1699,7 @@ process_rx:
vsi->rx_rings[0]->count, new_rx_cnt);
rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
- sizeof(struct ice_ring), GFP_KERNEL);
+ sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) {
err = -ENOMEM;
goto done;
@@ -1421,21 +1818,36 @@ static void
ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_port_info *pi;
+ struct ice_port_info *pi = np->vsi->port_info;
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_vsi *vsi = np->vsi;
+ enum ice_status status;
- pi = np->vsi->port_info;
- pause->autoneg =
- ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ /* Initialize pause params */
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
- if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
- pause->rx_pause = 1;
- } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
+ pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
+ GFP_KERNEL);
+ if (!pcaps)
+ return;
+
+ /* Get current phy config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
pause->tx_pause = 1;
- } else if (pi->fc.current_mode == ICE_FC_FULL) {
+ if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
+
+out:
+ devm_kfree(&vsi->back->pdev->dev, pcaps);
}
/**
@@ -1667,6 +2079,258 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
}
+enum ice_container_type {
+ ICE_RX_CONTAINER,
+ ICE_TX_CONTAINER,
+};
+
+/**
+ * ice_get_rc_coalesce - get ITR values for specific ring container
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @c_type: container type, RX or TX
+ * @rc: ring container that the ITR values will come from
+ *
+ * Query the device for ice_ring_container specific ITR values. This is
+ * done per ice_ring_container because each q_vector can have 1 or more rings
+ * and all of said ring(s) will have the same ITR values.
+ *
+ * Returns 0 on success, negative otherwise.
+ */
+static int
+ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
+ struct ice_ring_container *rc)
+{
+ struct ice_pf *pf = rc->ring->vsi->back;
+
+ switch (c_type) {
+ case ICE_RX_CONTAINER:
+ ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
+ ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ break;
+ case ICE_TX_CONTAINER:
+ ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
+ ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ break;
+ default:
+ dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __ice_get_coalesce - get ITR/INTRL values for the device
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @q_num: queue number to get the coalesce settings for
+ */
+static int
+__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
+ int q_num)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ int tx = -EINVAL, rx = -EINVAL;
+ struct ice_vsi *vsi = np->vsi;
+
+ if (q_num < 0) {
+ rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[0]->q_vector->rx);
+ tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[0]->q_vector->tx);
+
+ goto update_coalesced_frames;
+ }
+
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx);
+ tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx);
+ } else if (q_num < vsi->num_rxq) {
+ rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx);
+ } else if (q_num < vsi->num_txq) {
+ tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx);
+ } else {
+ /* q_num is invalid for both Rx and Tx queues */
+ return -EINVAL;
+ }
+
+update_coalesced_frames:
+ /* either q_num is invalid for both Rx and Tx queues or setting coalesce
+ * failed completely
+ */
+ if (tx && rx)
+ return -EINVAL;
+
+ if (q_num < vsi->num_txq)
+ ec->tx_max_coalesced_frames_irq = vsi->work_lmt;
+
+ if (q_num < vsi->num_rxq)
+ ec->rx_max_coalesced_frames_irq = vsi->work_lmt;
+
+ return 0;
+}
+
+static int
+ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ return __ice_get_coalesce(netdev, ec, -1);
+}
+
+static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ return __ice_get_coalesce(netdev, ec, q_num);
+}
+
+/**
+ * ice_set_rc_coalesce - set ITR values for specific ring container
+ * @c_type: container type, RX or TX
+ * @ec: ethtool structure from user to update ITR settings
+ * @rc: ring container that the ITR values will come from
+ * @vsi: VSI associated to the ring container
+ *
+ * Set specific ITR values. This is done per ice_ring_container because each
+ * q_vector can have 1 or more rings and all of said ring(s) will have the same
+ * ITR values.
+ *
+ * Returns 0 on success, negative otherwise.
+ */
+static int
+ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
+ struct ice_ring_container *rc, struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 itr_setting;
+
+ if (!rc->ring)
+ return -EINVAL;
+
+ itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+
+ switch (c_type) {
+ case ICE_RX_CONTAINER:
+ if (ec->rx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_rx_coalesce) {
+ netdev_info(vsi->netdev,
+ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs > ICE_ITR_MAX) {
+ netdev_info(vsi->netdev,
+ "Invalid value, rx-usecs range is 0-%d\n",
+ ICE_ITR_MAX);
+ return -EINVAL;
+ }
+
+ if (ec->use_adaptive_rx_coalesce) {
+ rc->itr_setting |= ICE_ITR_DYNAMIC;
+ } else {
+ rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ }
+ break;
+ case ICE_TX_CONTAINER:
+ if (ec->tx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_tx_coalesce) {
+ netdev_info(vsi->netdev,
+ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
+ return -EINVAL;
+ }
+
+ if (ec->tx_coalesce_usecs > ICE_ITR_MAX) {
+ netdev_info(vsi->netdev,
+ "Invalid value, tx-usecs range is 0-%d\n",
+ ICE_ITR_MAX);
+ return -EINVAL;
+ }
+
+ if (ec->use_adaptive_tx_coalesce) {
+ rc->itr_setting |= ICE_ITR_DYNAMIC;
+ } else {
+ rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ }
+ break;
+ default:
+ dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
+ int q_num)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ int rx = -EINVAL, tx = -EINVAL;
+ struct ice_vsi *vsi = np->vsi;
+
+ if (q_num < 0) {
+ int i;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &q_vector->rx, vsi) ||
+ ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &q_vector->tx, vsi))
+ return -EINVAL;
+ }
+
+ goto set_work_lmt;
+ }
+
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi);
+ tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi);
+ } else if (q_num < vsi->num_rxq) {
+ rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi);
+ } else if (q_num < vsi->num_txq) {
+ tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi);
+ }
+
+ /* either q_num is invalid for both Rx and Tx queues or setting coalesce
+ * failed completely
+ */
+ if (rx && tx)
+ return -EINVAL;
+
+set_work_lmt:
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
+ ec->rx_max_coalesced_frames_irq);
+
+ return 0;
+}
+
+static int
+ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ return __ice_set_coalesce(netdev, ec, -1);
+}
+
+static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ return __ice_set_coalesce(netdev, ec, q_num);
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
@@ -1676,8 +2340,15 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_eeprom_len = ice_get_eeprom_len,
+ .get_eeprom = ice_get_eeprom,
+ .get_coalesce = ice_get_coalesce,
+ .set_coalesce = ice_set_coalesce,
.get_strings = ice_get_strings,
+ .set_phys_id = ice_set_phys_id,
.get_ethtool_stats = ice_get_ethtool_stats,
+ .get_priv_flags = ice_get_priv_flags,
+ .set_priv_flags = ice_set_priv_flags,
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.get_ringparam = ice_get_ringparam,
@@ -1689,6 +2360,9 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_per_queue_coalesce = ice_get_per_q_coalesce,
+ .set_per_queue_coalesce = ice_set_per_q_coalesce,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 5507928c8fbe..6bf5cc064270 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -30,6 +30,7 @@
#define PF_FW_ATQLEN_ATQVFE_M BIT(28)
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
+#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400
@@ -110,6 +111,7 @@
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index bb51dd7defb5..ef4c79b5aa32 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -346,6 +346,7 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
};
@@ -488,5 +489,7 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
#define ICE_LINK_SPEED_20000MBPS 20000
#define ICE_LINK_SPEED_25000MBPS 25000
#define ICE_LINK_SPEED_40000MBPS 40000
+#define ICE_LINK_SPEED_50000MBPS 50000
+#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 29b1dcfd4331..fa61203bee26 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -249,12 +249,12 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
/* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
- sizeof(struct ice_ring *), GFP_KERNEL);
+ sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings)
goto err_txrings;
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
- sizeof(struct ice_ring *), GFP_KERNEL);
+ sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings)
goto err_rxrings;
@@ -262,7 +262,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
/* allocate memory for q_vector pointers */
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
vsi->num_q_vectors,
- sizeof(struct ice_q_vector *),
+ sizeof(*vsi->q_vectors),
GFP_KERNEL);
if (!vsi->q_vectors)
goto err_vectors;
@@ -348,19 +348,25 @@ static int ice_get_free_slot(void *array, int size, int curr)
void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- struct ice_vsi_ctx ctxt;
+ struct ice_vsi_ctx *ctxt;
enum ice_status status;
+ ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return;
+
if (vsi->type == ICE_VSI_VF)
- ctxt.vf_num = vsi->vf_id;
- ctxt.vsi_num = vsi->vsi_num;
+ ctxt->vf_num = vsi->vf_id;
+ ctxt->vsi_num = vsi->vsi_num;
- memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
+ memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
- status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
+ status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
vsi->vsi_num);
+
+ devm_kfree(&pf->pdev->dev, ctxt);
}
/**
@@ -514,110 +520,89 @@ unlock_pf:
}
/**
- * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @vsi: the VSI getting queues
+ * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
- * Return 0 on success and a negative value on error
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
-static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{
- struct ice_pf *pf = vsi->back;
- int offset, ret = 0;
+ int offset, i;
- mutex_lock(&pf->avail_q_mutex);
- /* look for contiguous block of queues for Tx */
- offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
- 0, vsi->alloc_txq, 0);
- if (offset < ICE_MAX_TXQS) {
- int i;
-
- bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
- for (i = 0; i < vsi->alloc_txq; i++)
- vsi->txq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ mutex_lock(qs_cfg->qs_mutex);
+ offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
+ 0, qs_cfg->q_count, 0);
+ if (offset >= qs_cfg->pf_map_size) {
+ mutex_unlock(qs_cfg->qs_mutex);
+ return -ENOMEM;
}
- /* look for contiguous block of queues for Rx */
- offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
- 0, vsi->alloc_rxq, 0);
- if (offset < ICE_MAX_RXQS) {
- int i;
-
- bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
- for (i = 0; i < vsi->alloc_rxq; i++)
- vsi->rxq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
- }
- mutex_unlock(&pf->avail_q_mutex);
+ bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
+ for (i = 0; i < qs_cfg->q_count; i++)
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ mutex_unlock(qs_cfg->qs_mutex);
- return ret;
+ return 0;
}
/**
- * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
- * @vsi: the VSI getting queues
+ * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
- * Return 0 on success and a negative value on error
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
-static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
+static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{
- struct ice_pf *pf = vsi->back;
int i, index = 0;
- mutex_lock(&pf->avail_q_mutex);
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_txq; i++) {
- index = find_next_zero_bit(pf->avail_txqs,
- ICE_MAX_TXQS, index);
- if (index < ICE_MAX_TXQS) {
- set_bit(index, pf->avail_txqs);
- vsi->txq_map[i] = index;
- } else {
- goto err_scatter_tx;
- }
- }
- }
-
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
- index = find_next_zero_bit(pf->avail_rxqs,
- ICE_MAX_RXQS, index);
- if (index < ICE_MAX_RXQS) {
- set_bit(index, pf->avail_rxqs);
- vsi->rxq_map[i] = index;
- } else {
- goto err_scatter_rx;
- }
- }
+ mutex_lock(qs_cfg->qs_mutex);
+ for (i = 0; i < qs_cfg->q_count; i++) {
+ index = find_next_zero_bit(qs_cfg->pf_map,
+ qs_cfg->pf_map_size, index);
+ if (index >= qs_cfg->pf_map_size)
+ goto err_scatter;
+ set_bit(index, qs_cfg->pf_map);
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
}
+ mutex_unlock(qs_cfg->qs_mutex);
- mutex_unlock(&pf->avail_q_mutex);
return 0;
-
-err_scatter_rx:
- /* unflag any queues we have grabbed (i is failed position) */
- for (index = 0; index < i; index++) {
- clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
- vsi->rxq_map[index] = 0;
- }
- i = vsi->alloc_txq;
-err_scatter_tx:
- /* i is either position of failed attempt or vsi->alloc_txq */
+err_scatter:
for (index = 0; index < i; index++) {
- clear_bit(vsi->txq_map[index], pf->avail_txqs);
- vsi->txq_map[index] = 0;
+ clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
+ qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
}
+ mutex_unlock(qs_cfg->qs_mutex);
- mutex_unlock(&pf->avail_q_mutex);
return -ENOMEM;
}
/**
+ * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ *
+ * This is an internal function for assigning queues from the PF to VSI and
+ * initially tries to find contiguous space. If it is not successful to find
+ * contiguous space, then it tries with the scatter approach.
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
+{
+ int ret = 0;
+
+ ret = __ice_vsi_get_qs_contig(qs_cfg);
+ if (ret) {
+ /* contig failed, so try with scatter approach */
+ qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
+ qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->scatter_count);
+ ret = __ice_vsi_get_qs_sc(qs_cfg);
+ }
+ return ret;
+}
+
+/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -625,25 +610,35 @@ err_scatter_tx:
*/
static int ice_vsi_get_qs(struct ice_vsi *vsi)
{
+ struct ice_pf *pf = vsi->back;
+ struct ice_qs_cfg tx_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_txqs,
+ .pf_map_size = ICE_MAX_TXQS,
+ .q_count = vsi->alloc_txq,
+ .scatter_count = ICE_MAX_SCATTER_TXQS,
+ .vsi_map = vsi->txq_map,
+ .vsi_map_offset = 0,
+ .mapping_mode = vsi->tx_mapping_mode
+ };
+ struct ice_qs_cfg rx_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_rxqs,
+ .pf_map_size = ICE_MAX_RXQS,
+ .q_count = vsi->alloc_rxq,
+ .scatter_count = ICE_MAX_SCATTER_RXQS,
+ .vsi_map = vsi->rxq_map,
+ .vsi_map_offset = 0,
+ .mapping_mode = vsi->rx_mapping_mode
+ };
int ret = 0;
vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
- /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping
- * modes individually to scatter if assigning contiguous queues
- * to Rx or Tx fails
- */
- ret = ice_vsi_get_qs_contig(vsi);
- if (ret < 0) {
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
- ICE_MAX_SCATTER_TXQS);
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
- ICE_MAX_SCATTER_RXQS);
- ret = ice_vsi_get_qs_scatter(vsi);
- }
+ ret = __ice_vsi_get_qs(&tx_qs_cfg);
+ if (!ret)
+ ret = __ice_vsi_get_qs(&rx_qs_cfg);
return ret;
}
@@ -919,37 +914,41 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
*/
static int ice_vsi_init(struct ice_vsi *vsi)
{
- struct ice_vsi_ctx ctxt = { 0 };
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi_ctx *ctxt;
int ret = 0;
+ ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
switch (vsi->type) {
case ICE_VSI_PF:
- ctxt.flags = ICE_AQ_VSI_TYPE_PF;
+ ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
case ICE_VSI_VF:
- ctxt.flags = ICE_AQ_VSI_TYPE_VF;
+ ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
- ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
break;
default:
return -ENODEV;
}
- ice_set_dflt_vsi_ctx(&ctxt);
+ ice_set_dflt_vsi_ctx(ctxt);
/* if the switch is in VEB mode, allow VSI loopback */
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
- ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_set_rss_vsi_ctx(&ctxt, vsi);
+ ice_set_rss_vsi_ctx(ctxt, vsi);
- ctxt.info.sw_id = vsi->port_info->sw_id;
- ice_vsi_setup_q_map(vsi, &ctxt);
+ ctxt->info.sw_id = vsi->port_info->sw_id;
+ ice_vsi_setup_q_map(vsi, ctxt);
- ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
+ ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
"Add VSI failed, err %d\n", ret);
@@ -957,11 +956,12 @@ static int ice_vsi_init(struct ice_vsi *vsi)
}
/* keep context for update VSI operations */
- vsi->info = ctxt.info;
+ vsi->info = ctxt->info;
/* record VSI number returned */
- vsi->vsi_num = ctxt.vsi_num;
+ vsi->vsi_num = ctxt->vsi_num;
+ devm_kfree(&pf->pdev->dev, ctxt);
return ret;
}
@@ -1614,11 +1614,14 @@ setup_rings:
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @offset: offset within vsi->txq_map
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
-int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
@@ -1626,9 +1629,9 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
- int err = 0, tc = 0;
+ int err = 0, tc;
- buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
+ buf_len = sizeof(*qg_buf);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
@@ -1644,9 +1647,8 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 };
- pf_q = vsi->txq_map[q_idx];
- ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
- pf_q);
+ pf_q = vsi->txq_map[q_idx + offset];
+ ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
@@ -1655,7 +1657,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
- vsi->tx_rings[q_idx]->tail =
+ rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len,
@@ -1674,7 +1676,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[q_idx]->txq_teid =
+ rings[q_idx]->txq_teid =
le32_to_cpu(txq->q_teid);
q_idx++;
@@ -1686,6 +1688,18 @@ err_cfg_txqs:
}
/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
+}
+
+/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
* @gran: interrupt rate limit granularity in usecs
@@ -1714,22 +1728,34 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
static void
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
{
- u8 itr_gran = hw->itr_gran;
-
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
- rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_RX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx;
- rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_TX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
}
@@ -1808,26 +1834,34 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_vsi_ctx *ctxt;
enum ice_status status;
+ int ret = 0;
+
+ ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
/* Here we are configuring the VSI to let the driver add VLAN tags by
* setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
* insertion happens in the Tx hot path, in ice_tx_map.
*/
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- vsi->info.vlan_flags = ctxt.info.vlan_flags;
- return 0;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+out:
+ devm_kfree(dev, ctxt);
+ return ret;
}
/**
@@ -1839,35 +1873,42 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_vsi_ctx *ctxt;
enum ice_status status;
+ int ret = 0;
+
+ ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
/* Here we are configuring what the VSI should do with the VLAN tag in
* the Rx packet. We can either leave the tag in the packet or put it in
* the Rx descriptor.
*/
- if (ena) {
+ if (ena)
/* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
- } else {
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
+ else
/* Disable stripping. Leave tag in packet */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
- }
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
/* Allow all packets untagged/tagged */
- ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+ ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- vsi->info.vlan_flags = ctxt.info.vlan_flags;
- return 0;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+out:
+ devm_kfree(dev, ctxt);
+ return ret;
}
/**
@@ -1897,9 +1938,12 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
+ * @rings: Tx ring array to be stopped
+ * @offset: offset within vsi->txq_map
*/
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num)
+static int
+ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring **rings, int offset)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1927,19 +1971,18 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;
- if (!vsi->tx_rings || !vsi->tx_rings[i] ||
- !vsi->tx_rings[i]->q_vector) {
+ if (!rings || !rings[i] || !rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}
- q_ids[i] = vsi->txq_map[i];
- q_teids[i] = vsi->tx_rings[i]->txq_teid;
+ q_ids[i] = vsi->txq_map[i + offset];
+ q_teids[i] = rings[i]->txq_teid;
/* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+ wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
/* software is expected to wait for 100 ns */
ndelay(100);
@@ -1947,7 +1990,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
/* trigger a software interrupt for the vector associated to
* the queue to schedule NAPI handler
*/
- v_idx = vsi->tx_rings[i]->q_vector->v_idx;
+ v_idx = rings[i]->q_vector->v_idx;
wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -1977,6 +2020,19 @@ err_alloc_q_ids:
}
/**
+ * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative id of VF/VM
+ */
+int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
+ enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
+{
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
+ 0);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -2462,13 +2518,15 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
+ struct ice_vf *vf = NULL;
struct ice_pf *pf;
- struct ice_vf *vf;
if (!vsi->back)
return -ENODEV;
pf = vsi->back;
- vf = &pf->vf[vsi->vf_id];
+
+ if (vsi->type == ICE_VSI_VF)
+ vf = &pf->vf[vsi->vf_id];
/* do not unregister and free netdevs while driver is in the reset
* recovery pending state. Since reset/rebuild happens through PF
* service task workqueue, its not a good idea to unregister netdev
@@ -2581,6 +2639,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
+ /* Do not exit if configuring RSS had an issue, at least
+ * receive traffic on first queue. Hence no need to capture
+ * return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 3831b4f0960a..7988a53729a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -15,7 +15,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-int ice_vsi_cfg_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+int
+ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8725569d11f0..47cc3f905b7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -609,7 +609,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
}
}
- ice_vc_notify_link_state(pf);
+ if (!new_link_same_as_old && pf->num_alloc_vfs)
+ ice_vc_notify_link_state(pf);
return 0;
}
@@ -1356,14 +1357,39 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
/**
+ * ice_dis_ctrlq_interrupts - disable control queue interrupts
+ * @hw: pointer to HW structure
+ */
+static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
+{
+ /* disable Admin queue Interrupt causes */
+ wr32(hw, PFINT_FW_CTL,
+ rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
+
+ /* disable Mailbox queue Interrupt causes */
+ wr32(hw, PFINT_MBX_CTL,
+ rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
+
+ /* disable Control queue Interrupt causes */
+ wr32(hw, PFINT_OICR_CTL,
+ rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
+
+ ice_flush(hw);
+}
+
+/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
static void ice_free_irq_msix_misc(struct ice_pf *pf)
{
+ struct ice_hw *hw = &pf->hw;
+
+ ice_dis_ctrlq_interrupts(hw);
+
/* disable OICR interrupt */
- wr32(&pf->hw, PFINT_OICR_ENA, 0);
- ice_flush(&pf->hw);
+ wr32(hw, PFINT_OICR_ENA, 0);
+ ice_flush(hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
@@ -1378,6 +1404,32 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
}
/**
+ * ice_ena_ctrlq_interrupts - enable control queue interrupts
+ * @hw: pointer to HW structure
+ * @v_idx: HW vector index to associate the control queue interrupts with
+ */
+static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx)
+{
+ u32 val;
+
+ val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ PFINT_OICR_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_OICR_CTL, val);
+
+ /* enable Admin queue Interrupt causes */
+ val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ PFINT_FW_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_FW_CTL, val);
+
+ /* enable Mailbox queue Interrupt causes */
+ val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_MBX_CTL, val);
+
+ ice_flush(hw);
+}
+
+/**
* ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
* @pf: board private structure
*
@@ -1389,8 +1441,6 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0;
- u8 itr_gran;
- u32 val;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
@@ -1439,24 +1489,9 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
skip_req_irq:
ice_ena_misc_vector(pf);
- val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
- PFINT_OICR_CTL_CAUSE_ENA_M);
- wr32(hw, PFINT_OICR_CTL, val);
-
- /* This enables Admin queue Interrupt causes */
- val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
- PFINT_FW_CTL_CAUSE_ENA_M);
- wr32(hw, PFINT_FW_CTL, val);
-
- /* This enables Mailbox queue Interrupt causes */
- val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
- PFINT_MBX_CTL_CAUSE_ENA_M);
- wr32(hw, PFINT_MBX_CTL, val);
-
- itr_gran = hw->itr_gran;
-
+ ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx);
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
- ITR_TO_REG(ICE_ITR_8K, itr_gran));
+ ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
ice_flush(hw);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1516,8 +1551,8 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
- netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
- vsi->alloc_txq, vsi->alloc_rxq);
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
if (!netdev)
return -ENOMEM;
@@ -1531,6 +1566,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
csumo_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CRC |
NETIF_F_IPV6_CSUM;
vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -1869,7 +1905,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= pf->num_lan_msix;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
- sizeof(struct msix_entry), GFP_KERNEL);
+ sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
@@ -1957,7 +1993,6 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int vectors = 0, hw_vectors = 0;
- ssize_t size;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
vectors = ice_ena_msix_range(pf);
@@ -1968,9 +2003,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return vectors;
/* set up vector assignment tracking */
- size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
-
- pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ pf->sw_irq_tracker =
+ devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) +
+ (sizeof(u16) * vectors), GFP_KERNEL);
if (!pf->sw_irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
@@ -1982,9 +2017,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
/* set up HW vector assignment tracking */
hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors);
-
- pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ pf->hw_irq_tracker =
+ devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) +
+ (sizeof(u16) * hw_vectors), GFP_KERNEL);
if (!pf->hw_irq_tracker) {
ice_clear_interrupt_scheme(pf);
return -ENOMEM;
@@ -1998,6 +2033,23 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_verify_itr_gran - verify driver's assumption of ITR granularity
+ * @pf: pointer to the PF structure
+ *
+ * There is no error returned here because the driver will be able to handle a
+ * different ITR granularity, but interrupt moderation will not be accurate if
+ * the driver's assumptions are not verified. This assumption is made so we can
+ * use constants in the hot path instead of accessing structure members.
+ */
+static void ice_verify_itr_gran(struct ice_pf *pf)
+{
+ if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
+ dev_warn(&pf->pdev->dev,
+ "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
+ (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
+}
+
+/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
@@ -2101,7 +2153,7 @@ static int ice_probe(struct pci_dev *pdev,
}
pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
- sizeof(struct ice_vsi *), GFP_KERNEL);
+ sizeof(*pf->vsi), GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
@@ -2133,7 +2185,7 @@ static int ice_probe(struct pci_dev *pdev,
}
/* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
+ pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw),
GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
@@ -2163,6 +2215,7 @@ static int ice_probe(struct pci_dev *pdev,
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
ice_verify_cacheline_size(pf);
+ ice_verify_itr_gran(pf);
return 0;
@@ -2419,10 +2472,12 @@ static void ice_set_rx_mode(struct net_device *netdev)
* @addr: the MAC address entry being added
* @vid: VLAN id
* @flags: instructions from stack about fdb operation
+ * @extack: netlink extended ack
*/
-static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags)
+static int
+ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
+ struct net_device *dev, const unsigned char *addr, u16 vid,
+ u16 flags, struct netlink_ext_ack __always_unused *extack)
{
int err;
@@ -2546,7 +2601,8 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
- err = ice_vsi_cfg_txqs(vsi);
+
+ err = ice_vsi_cfg_lan_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -2945,12 +3001,91 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_force_phys_link_state - Force the physical link state
+ * @vsi: VSI to force the physical link state to up/down
+ * @link_up: true/false indicates to set the physical link to up/down
+ *
+ * Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes a link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ struct device *dev;
+ int retcode;
+
+ if (!vsi || !vsi->port_info || !vsi->back)
+ return -EINVAL;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ dev = &vsi->back->pdev->dev;
+
+ pi = vsi->port_info;
+
+ pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (retcode) {
+ dev_err(dev,
+ "Failed to get phy capabilities, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ goto out;
+ }
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ retcode = -ENOMEM;
+ goto out;
+ }
+
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg->low_power_ctrl = pcaps->low_power_ctrl;
+ cfg->eee_cap = pcaps->eee_cap;
+ cfg->eeer_value = pcaps->eeer_value;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg->caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ if (retcode) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ }
+
+ devm_kfree(dev, cfg);
+out:
+ devm_kfree(dev, pcaps);
+ return retcode;
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err;
+ int i, tx_err, rx_err, link_err = 0;
/* Caller of this function is expected to set the
* vsi->state __ICE_DOWN bit
@@ -2961,7 +3096,8 @@ int ice_down(struct ice_vsi *vsi)
}
ice_vsi_dis_irq(vsi);
- tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
+
+ tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
@@ -2975,13 +3111,21 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ link_err = ice_force_phys_link_state(vsi, false);
+ if (link_err)
+ netdev_err(vsi->netdev,
+ "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ }
+
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err) {
+ if (tx_err || rx_err || link_err) {
netdev_err(vsi->netdev,
"Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
@@ -3601,30 +3745,39 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
struct device *dev = &vsi->back->pdev->dev;
struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_vsi_ctx *ctxt;
enum ice_status status;
+ int ret = 0;
vsi_props = &vsi->info;
- ctxt.info = vsi->info;
+
+ ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
if (bmode == BRIDGE_MODE_VEB)
/* change from VEPA to VEB mode */
- ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
else
/* change from VEB to VEPA mode */
- ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+ ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
/* Update sw flags for book keeping */
- vsi_props->sw_flags = ctxt.info.sw_flags;
+ vsi_props->sw_flags = ctxt->info.sw_flags;
- return 0;
+out:
+ devm_kfree(dev, ctxt);
+ return ret;
}
/**
@@ -3641,7 +3794,8 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
*/
static int
ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 __always_unused flags, struct netlink_ext_ack *extack)
+ u16 __always_unused flags,
+ struct netlink_ext_ack __always_unused *extack)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_pf *pf = np->vsi->back;
@@ -3814,8 +3968,14 @@ static int ice_open(struct net_device *netdev)
netif_carrier_off(netdev);
- err = ice_vsi_open(vsi);
+ err = ice_force_phys_link_state(vsi, true);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set physical link up, error %d\n", err);
+ return err;
+ }
+ err = ice_vsi_open(vsi);
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 3274c543283c..413fdbbcc4d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -125,6 +125,63 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
}
/**
+ * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq
+ * method. Ownership of the NVM is taken before reading the buffer and later
+ * released.
+ */
+static enum ice_status
+ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
+{
+ enum ice_status status;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ do {
+ u16 read_size, off_w;
+
+ /* Calculate number of bytes we should read in this step.
+ * It's not allowed to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;
+ read_size = off_w ?
+ min_t(u16, *words,
+ (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :
+ min_t(u16, (*words - words_read),
+ ICE_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ status = ice_read_sr_aq(hw, offset, read_size,
+ data + words_read, last_cmd);
+ if (status)
+ goto read_nvm_buf_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buf_aq_exit:
+ *words = words_read;
+ return status;
+}
+
+/**
* ice_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure
* @access: NVM access type (read or write)
@@ -234,3 +291,28 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
+
+/**
+ * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
+ * method. The buf read is preceded by the NVM ownership take
+ * and followed by the release.
+ */
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
+{
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (!status) {
+ status = ice_read_sr_buf_aq(hw, offset, words, data);
+ ice_release_nvm(hw);
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index a1681853df2e..56049739a250 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -85,37 +85,59 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
- * ice_aq_query_sched_elems - query scheduler elements
+ * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the hw struct
- * @elems_req: number of elements to query
+ * @cmd_opc: cmd opcode
+ * @elems_req: number of elements to request
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
- * @elems_ret: returns total number of elements returned
+ * @elems_resp: returns total number of elements response
* @cd: pointer to command details structure or NULL
*
- * Query scheduling elements (0x0404)
+ * This function sends a scheduling elements cmd (cmd_opc)
*/
static enum ice_status
-ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
- u16 *elems_ret, struct ice_sq_cd *cd)
+ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
+ u16 elems_req, void *buf, u16 buf_size,
+ u16 *elems_resp, struct ice_sq_cd *cd)
{
- struct ice_aqc_get_cfg_elem *cmd;
+ struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
enum ice_status status;
- cmd = &desc.params.get_update_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
+ cmd = &desc.params.sched_elem_cmd;
+ ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
cmd->num_elem_req = cpu_to_le16(elems_req);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && elems_ret)
- *elems_ret = le16_to_cpu(cmd->num_elem_resp);
+ if (!status && elems_resp)
+ *elems_resp = le16_to_cpu(cmd->num_elem_resp);
return status;
}
/**
+ * ice_aq_query_sched_elems - query scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to query
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements returned
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduling elements (0x0404)
+ */
+static enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
+}
+
+/**
* ice_sched_query_elem - query element information from hw
* @hw: pointer to the hw struct
* @node_teid: node teid to be queried
@@ -218,20 +240,9 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
{
- struct ice_aqc_add_move_delete_elem *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- cmd = &desc.params.add_move_delete_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- cmd->num_grps_req = cpu_to_le16(grps_req);
-
- status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && grps_del)
- *grps_del = le16_to_cpu(cmd->num_grps_updated);
-
- return status;
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_del, cd);
}
/**
@@ -442,52 +453,9 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
{
- struct ice_aqc_add_move_delete_elem *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- cmd = &desc.params.add_move_delete_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd->num_grps_req = cpu_to_le16(grps_req);
- status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && grps_added)
- *grps_added = le16_to_cpu(cmd->num_grps_updated);
-
- return status;
-}
-
-/**
- * ice_suspend_resume_elems - suspend/resume scheduler elements
- * @hw: pointer to the hw struct
- * @elems_req: number of elements to suspend
- * @buf: pointer to buffer
- * @buf_size: buffer size in bytes
- * @elems_ret: returns total number of elements suspended
- * @cd: pointer to command details structure or NULL
- * @cmd_code: command code for suspend or resume
- *
- * suspend/resume scheduler elements
- */
-static enum ice_status
-ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
- u16 *elems_ret, struct ice_sq_cd *cd,
- enum ice_adminq_opc cmd_code)
-{
- struct ice_aqc_get_cfg_elem *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- cmd = &desc.params.get_update_elem;
- ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
- cmd->num_elem_req = cpu_to_le16(elems_req);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status && elems_ret)
- *elems_ret = le16_to_cpu(cmd->num_elem_resp);
- return status;
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_added, cd);
}
/**
@@ -506,8 +474,9 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
- return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
- cd, ice_aqc_opc_suspend_sched_elems);
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
}
/**
@@ -526,8 +495,9 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
- return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
- cd, ice_aqc_opc_resume_sched_elems);
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
}
/**
@@ -591,23 +561,18 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
}
/**
- * ice_sched_clear_tx_topo - clears the schduler tree nodes
- * @pi: port information structure
+ * ice_sched_clear_agg - clears the agg related information
+ * @hw: pointer to the hardware structure
*
- * This function removes all the nodes from HW as well as from SW DB.
+ * This function removes agg list and free up agg related memory
+ * previously allocated.
*/
-static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
+void ice_sched_clear_agg(struct ice_hw *hw)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
- struct ice_hw *hw;
-
- if (!pi)
- return;
-
- hw = pi->hw;
- list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *vtmp;
@@ -616,8 +581,21 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
list_del(&agg_vsi_info->list_entry);
devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
}
+ list_del(&agg_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), agg_info);
}
+}
+/**
+ * ice_sched_clear_tx_topo - clears the scheduler tree nodes
+ * @pi: port information structure
+ *
+ * This function removes all the nodes from HW as well as from SW DB.
+ */
+static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
+{
+ if (!pi)
+ return;
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -1035,7 +1013,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
/* initialize the port for handling the scheduler tree */
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
- INIT_LIST_HEAD(&pi->agg_list);
err_init_port:
if (status && pi->root) {
@@ -1089,11 +1066,10 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
hw->max_children[i] = le16_to_cpu(max_sibl);
}
- hw->layer_info = (struct ice_aqc_layer_props *)
- devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
- (hw->num_tx_sched_layers *
- sizeof(*hw->layer_info)),
- GFP_KERNEL);
+ hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
+ (hw->num_tx_sched_layers *
+ sizeof(*hw->layer_info)),
+ GFP_KERNEL);
if (!hw->layer_info) {
status = ICE_ERR_NO_MEMORY;
goto sched_query_out;
@@ -1367,9 +1343,14 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
node = node->sibling;
}
+ /* tree has one intermediate node to add this new VSI.
+ * So no need to calculate supported nodes for below
+ * layers.
+ */
+ if (node)
+ break;
/* all the nodes are full, allocate a new one */
- if (!node)
- num_nodes[i]++;
+ num_nodes[i]++;
}
}
@@ -1618,7 +1599,8 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
- list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
+ list_entry) {
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *vtmp;
@@ -1634,6 +1616,23 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
}
/**
+ * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
+ * @node: pointer to the sub-tree node
+ *
+ * This function checks for a leaf node presence in a given sub-tree node.
+ */
+static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
+{
+ u8 i;
+
+ for (i = 0; i < node->num_children; i++)
+ if (ice_sched_is_leaf_node_present(node->children[i]))
+ return true;
+ /* check for a leaf node */
+ return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
+}
+
+/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1667,6 +1666,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node)
continue;
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "VSI has leaf nodes in TC %d\n", i);
+ status = ICE_ERR_IN_USE;
+ goto exit_sched_rm_vsi_cfg;
+ }
while (j < vsi_node->num_children) {
if (vsi_node->children[j]->owner == owner) {
ice_free_sched_node(pi, vsi_node->children[j]);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index da5b4c166da8..bee8221ad146 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -28,6 +28,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
+void ice_sched_clear_agg(struct ice_hw *hw);
+
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 533b989a23e1..d2db0d04e117 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -85,6 +85,12 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
case ICE_AQ_LINK_SPEED_40GB:
speed = ICE_LINK_SPEED_40000MBPS;
break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = ICE_LINK_SPEED_50000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = ICE_LINK_SPEED_100000MBPS;
+ break;
default:
speed = ICE_LINK_SPEED_UNKNOWN;
break;
@@ -116,6 +122,9 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
break;
case ICE_AQ_LINK_SPEED_40GB:
/* fall through */
+ case ICE_AQ_LINK_SPEED_50GB:
+ /* fall through */
+ case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
default:
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index f49f299ddf2c..683f48824a29 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -22,6 +22,7 @@ enum ice_status {
ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
+ ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_BUF_TOO_SHORT = -52,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 2e5693107fa4..09d1c314b68f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -98,7 +98,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
u8 i;
recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
- sizeof(struct ice_sw_recipe), GFP_KERNEL);
+ sizeof(*recps), GFP_KERNEL);
if (!recps)
return ICE_ERR_NO_MEMORY;
@@ -1538,9 +1538,20 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
} else if (!list_elem->vsi_list_info) {
status = ICE_ERR_DOES_NOT_EXIST;
goto exit;
+ } else if (list_elem->vsi_list_info->ref_cnt > 1) {
+ /* a ref_cnt > 1 indicates that the vsi_list is being
+ * shared by multiple rules. Decrement the ref_cnt and
+ * remove this rule, but do not modify the list, as it
+ * is in-use by other rules.
+ */
+ list_elem->vsi_list_info->ref_cnt--;
+ remove_rule = true;
} else {
- if (list_elem->vsi_list_info->ref_cnt > 1)
- list_elem->vsi_list_info->ref_cnt--;
+ /* a ref_cnt of 1 indicates the vsi_list is only used
+ * by one rule. However, the original removal request is only
+ * for a single VSI. Update the vsi_list first, and only
+ * remove the rule if there are no further VSIs in this list.
+ */
vsi_handle = f_entry->fltr_info.vsi_handle;
status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 49fc38094185..c289d97f477d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -48,7 +48,6 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring)
*/
void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
- unsigned long size;
u16 i;
/* ring already cleared, nothing to do */
@@ -59,8 +58,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
- size = sizeof(struct ice_tx_buf) * tx_ring->count;
- memset(tx_ring->tx_buf, 0, size);
+ memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
/* Zero out the descriptor ring */
memset(tx_ring->desc, 0, tx_ring->size);
@@ -226,21 +224,21 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
int ice_setup_tx_ring(struct ice_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
- int bi_size;
if (!dev)
return -ENOMEM;
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf);
- bi_size = sizeof(struct ice_tx_buf) * tx_ring->count;
- tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
+ tx_ring->tx_buf =
+ devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
+ GFP_KERNEL);
if (!tx_ring->tx_buf)
return -ENOMEM;
/* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ 4096);
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
@@ -267,7 +265,6 @@ err:
void ice_clean_rx_ring(struct ice_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- unsigned long size;
u16 i;
/* ring already cleared, nothing to do */
@@ -292,8 +289,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
rx_buf->page_offset = 0;
}
- size = sizeof(struct ice_rx_buf) * rx_ring->count;
- memset(rx_ring->rx_buf, 0, size);
+ memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -331,15 +327,15 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
int ice_setup_rx_ring(struct ice_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int bi_size;
if (!dev)
return -ENOMEM;
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
- bi_size = sizeof(struct ice_rx_buf) * rx_ring->count;
- rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
+ rx_ring->rx_buf =
+ devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
+ GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
@@ -1053,6 +1049,69 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
}
/**
+ * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
+ * @itr_idx: interrupt throttling index
+ * @reg_itr: interrupt throttling value adjusted based on ITR granularity
+ */
+static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
+{
+ return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
+ (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
+}
+
+/**
+ * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
+ * @vsi: the VSI associated with the q_vector
+ * @q_vector: q_vector for which ITR is being updated and interrupt enabled
+ */
+static void
+ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_ring_container *rc;
+ u32 itr_val;
+
+ /* This block of logic allows us to get away with only updating
+ * one ITR value with each interrupt. The idea is to perform a
+ * pseudo-lazy update with the following criteria.
+ *
+ * 1. Rx is given higher priority than Tx if both are in same state
+ * 2. If we must reduce an ITR that is given highest priority.
+ * 3. We then give priority to increasing ITR based on amount.
+ */
+ if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+ rc = &q_vector->rx;
+ /* Rx ITR needs to be reduced, this is highest priority */
+ itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+ rc->current_itr = rc->target_itr;
+ } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+ ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+ (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+ rc = &q_vector->tx;
+ /* Tx ITR needs to be reduced, this is second priority
+ * Tx ITR needs to be increased more than Rx, fourth priority
+ */
+ itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+ rc->current_itr = rc->target_itr;
+ } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+ rc = &q_vector->rx;
+ /* Rx ITR needs to be increased, third priority */
+ itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
+ rc->current_itr = rc->target_itr;
+ } else {
+ /* Still have to re-enable the interrupts */
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ }
+
+ if (!test_bit(__ICE_DOWN, vsi->state)) {
+ int vector = vsi->hw_base_vector + q_vector->v_idx;
+
+ wr32(hw, GLINT_DYN_CTL(vector), itr_val);
+ }
+}
+
+/**
* ice_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1108,9 +1167,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
+ ice_update_ena_itr(vsi, q_vector);
- return min(work_done, budget - 1);
+ return min_t(int, work_done, budget - 1);
}
/* helper function for building cmd/type/offset */
@@ -1402,6 +1461,12 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
break;
case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+ l4_len = sizeof(struct sctphdr) >> 2;
+ offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+
default:
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return -1;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 75d0eaf6c9dd..fc358ea81816 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -116,16 +116,17 @@ enum ice_rx_dtype {
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
-#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define ICE_ITR_8K 125
+#define ICE_ITR_8K 124
#define ICE_ITR_20K 50
-#define ICE_DFLT_TX_ITR ICE_ITR_20K
-#define ICE_DFLT_RX_ITR ICE_ITR_20K
-/* apply ITR granularity translation to program the register. itr_gran is either
- * 2 or 4 usecs so we need to divide by 2 first then shift by that value
- */
-#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \
- ((itr_gran) / 2))
+#define ICE_ITR_MAX 8160
+#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
+#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
+#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
+#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
+#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
#define ICE_DFLT_INTRL 0
@@ -180,13 +181,20 @@ enum ice_latency_range {
};
struct ice_ring_container {
- /* array of pointers to rings */
+ /* head of linked-list of rings */
struct ice_ring *ring;
+ unsigned long next_update; /* jiffies value of next queue update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range;
- int itr_idx; /* index in the interrupt vector */
- u16 itr;
+ int itr_idx; /* index in the interrupt vector */
+ u16 target_itr; /* value in usecs divided by the hw->itr_gran */
+ u16 current_itr; /* value in usecs divided by the hw->itr_gran */
+ /* high bit set means dynamic ITR, rest is used to store user
+ * readable ITR value in usecs and must be converted before programming
+ * to a register.
+ */
+ u16 itr_setting;
};
/* iterator for handling rings in ring container */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 0ea428104215..17086d5b5c33 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -90,6 +90,7 @@ enum ice_vsi_type {
struct ice_link_status {
/* Refer to ice_aq_phy_type for bits definition */
u64 phy_type_low;
+ u64 phy_type_high;
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
@@ -118,6 +119,7 @@ struct ice_phy_info {
struct ice_link_status link_info;
struct ice_link_status link_info_old;
u64 phy_type_low;
+ u64 phy_type_high;
enum ice_media_type media_type;
u8 get_link_info;
};
@@ -272,7 +274,6 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
u8 is_vf;
@@ -326,6 +327,7 @@ struct ice_hw {
u8 max_cgds;
u8 sw_entry_point_layer;
u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct list_head agg_list; /* lists all aggregator */
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 05ff4f910649..57155b4a59dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -173,7 +173,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
- first = vf->first_vector_idx;
+ first = vf->first_vector_idx +
+ hw->func_caps.common_cap.msix_vector_first_id;
last = first + pf->num_vf_msix - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -224,13 +225,15 @@ void ice_free_vfs(struct ice_pf *pf)
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct ice_vsi *vsi;
+
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
continue;
+ vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
/* stop rings without wait time */
- ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
- ICE_NO_RESET, i);
- ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
+ ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
@@ -308,6 +311,11 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ /* Clear the VF's ARQLEN register. This is how the VF detects reset,
+ * since the VFGEN_RSTAT register doesn't stick at 0 after reset.
+ */
+ wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
+
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
*/
@@ -343,25 +351,33 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_vsi_ctx *ctxt;
enum ice_status status;
+ int ret = 0;
+
+ ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR;
- ctxt.info.pvid = cpu_to_le16(vid);
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR);
+ ctxt->info.pvid = cpu_to_le16(vid);
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- vsi->info.pvid = ctxt.info.pvid;
- vsi->info.vlan_flags = ctxt.info.vlan_flags;
- return 0;
+ vsi->info.pvid = ctxt->info.pvid;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+out:
+ devm_kfree(dev, ctxt);
+ return ret;
}
/**
@@ -508,7 +524,8 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
- first = vf->first_vector_idx;
+ first = vf->first_vector_idx +
+ hw->func_caps.common_cap.msix_vector_first_id;
last = (first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
@@ -831,6 +848,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
bool rsd = false;
u32 reg;
int i;
@@ -843,17 +861,18 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_trigger_vf_reset(vf, is_vflr);
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
- vf->vf_id);
- ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
} else {
/* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR
*/
- ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
- NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
+ ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET,
+ vf->vf_id, NULL);
}
/* poll VPGEN_VFRSTAT reg to make sure
@@ -1614,7 +1633,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
+ if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num);
@@ -1784,7 +1803,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs;
- if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
+ if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
aq_ret = 0;
else
aq_ret = ICE_ERR_PARAM;
@@ -2475,11 +2494,12 @@ int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi_ctx ctx = { 0 };
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct ice_vsi_ctx *ctx;
+ enum ice_status status;
struct ice_vf *vf;
- int status;
+ int ret = 0;
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
@@ -2499,25 +2519,31 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
return 0;
}
- ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
if (ena) {
- ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
- ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
}
- status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL);
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
dev_dbg(&pf->pdev->dev,
"Error %d, failed to update VSI* parameters\n", status);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
vf->spoofchk = ena;
- vsi->info.sec_flags = ctx.info.sec_flags;
- vsi->info.sw_flags2 = ctx.info.sw_flags2;
-
- return status;
+ vsi->info.sec_flags = ctx->info.sec_flags;
+ vsi->info.sw_flags2 = ctx->info.sw_flags2;
+out:
+ devm_kfree(&pf->pdev->dev, ctx);
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index fe1592ae8769..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
/* OS defined structs */
struct pci_dev *pdev;
- struct mutex stats64_lock;
+ spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7426060b678f..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
int i, j;
char *p;
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
}
static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 87bdf1604ae2..69b230c53fed 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -39,7 +39,7 @@
#include "igb.h"
#define MAJ 5
-#define MIN 4
+#define MIN 6
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
@@ -1189,15 +1189,15 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
{
struct igb_q_vector *q_vector;
struct igb_ring *ring;
- int ring_count, size;
+ int ring_count;
+ size_t size;
/* igb only supports 1 Tx and/or 1 Rx queue per vector */
if (txr_count > 1 || rxr_count > 1)
return -ENOMEM;
ring_count = txr_count + rxr_count;
- size = sizeof(struct igb_q_vector) +
- (sizeof(struct igb_ring) * ring_count);
+ size = struct_size(q_vector, ring, ring_count);
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
@@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -2486,7 +2486,8 @@ static int igb_set_features(struct net_device *netdev,
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
@@ -2580,9 +2581,11 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
int traffic_class,
struct igb_nfc_filter *input)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = f->common.extack;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -2592,78 +2595,60 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
-
- if (!is_zero_ether_addr(mask->dst)) {
- if (!is_broadcast_ether_addr(mask->dst)) {
+ flow_rule_match_eth_addrs(rule, &match);
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (!is_broadcast_ether_addr(match.mask->dst)) {
NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
return -EINVAL;
}
input->filter.match_flags |=
IGB_FILTER_FLAG_DST_MAC_ADDR;
- ether_addr_copy(input->filter.dst_addr, key->dst);
+ ether_addr_copy(input->filter.dst_addr, match.key->dst);
}
- if (!is_zero_ether_addr(mask->src)) {
- if (!is_broadcast_ether_addr(mask->src)) {
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (!is_broadcast_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
return -EINVAL;
}
input->filter.match_flags |=
IGB_FILTER_FLAG_SRC_MAC_ADDR;
- ether_addr_copy(input->filter.src_addr, key->src);
+ ether_addr_copy(input->filter.src_addr, match.key->src);
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- if (mask->n_proto) {
- if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ flow_rule_match_basic(rule, &match);
+ if (match.mask->n_proto) {
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
return -EINVAL;
}
input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
- input->filter.etype = key->n_proto;
+ input->filter.etype = match.key->n_proto;
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
- if (mask->vlan_priority) {
- if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority) {
+ if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
return -EINVAL;
}
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
- input->filter.vlan_tci = key->vlan_priority;
+ input->filter.vlan_tci = match.key->vlan_priority;
}
}
@@ -3840,7 +3825,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
- mutex_init(&adapter->stats64_lock);
+ spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -5406,9 +5391,9 @@ no_wait:
}
}
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6235,10 +6220,10 @@ static void igb_get_stats64(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- mutex_lock(&adapter->stats64_lock);
+ spin_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
memcpy(stats, &adapter->stats64, sizeof(*stats));
- mutex_unlock(&adapter->stats64_lock);
+ spin_unlock(&adapter->stats64_lock);
}
/**
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 4387f6ba8e67..88c6f88baac5 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_IGC) += igc.o
-igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o
+igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
+igc_ethtool.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index b1039dd3dd13..80faccc34cda 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -13,19 +13,43 @@
#include "igc_hw.h"
-/* main */
+/* forward declaration */
+void igc_set_ethtool_ops(struct net_device *);
+
+struct igc_adapter;
+struct igc_ring;
+
+void igc_up(struct igc_adapter *adapter);
+void igc_down(struct igc_adapter *adapter);
+int igc_setup_tx_resources(struct igc_ring *ring);
+int igc_setup_rx_resources(struct igc_ring *ring);
+void igc_free_tx_resources(struct igc_ring *ring);
+void igc_free_rx_resources(struct igc_ring *ring);
+unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
+void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues);
+int igc_reinit_queues(struct igc_adapter *adapter);
+bool igc_has_link(struct igc_adapter *adapter);
+void igc_reset(struct igc_adapter *adapter);
+int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
+
extern char igc_driver_name[];
extern char igc_driver_version[];
+#define IGC_REGS_LEN 740
+#define IGC_RETA_SIZE 128
+
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_FLAG_HAS_MSI BIT(0)
-#define IGC_FLAG_QUEUE_PAIRS BIT(4)
+#define IGC_FLAG_QUEUE_PAIRS BIT(3)
+#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
+#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
@@ -60,6 +84,7 @@ extern char igc_driver_version[];
#define IGC_RXBUFFER_2048 2048
#define IGC_RXBUFFER_3072 3072
+#define AUTO_ALL_MODES 0
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* RX and TX descriptor control thresholds.
@@ -340,6 +365,8 @@ struct igc_adapter {
struct igc_mac_addr *mac_table;
+ u8 rss_indir_tbl[IGC_RETA_SIZE];
+
unsigned long link_check_timeout;
struct igc_info ei;
};
@@ -418,6 +445,9 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
return 0;
}
+/* forward declaration */
+void igc_reinit_locked(struct igc_adapter *);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index df40af759542..51a8b8769c67 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -54,22 +54,6 @@ out:
}
/**
- * igc_check_for_link_base - Check for link
- * @hw: pointer to the HW structure
- *
- * If sgmii is enabled, then use the pcs register to determine link, otherwise
- * use the generic interface for determining link.
- */
-static s32 igc_check_for_link_base(struct igc_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = igc_check_for_copper_link(hw);
-
- return ret_val;
-}
-
-/**
* igc_reset_hw_base - Reset hardware
* @hw: pointer to the HW structure
*
@@ -124,22 +108,6 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
}
/**
- * igc_get_phy_id_base - Retrieve PHY addr and id
- * @hw: pointer to the HW structure
- *
- * Retrieves the PHY address and ID for both PHY's which do and do not use
- * sgmi interface.
- */
-static s32 igc_get_phy_id_base(struct igc_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = igc_get_phy_id(hw);
-
- return ret_val;
-}
-
-/**
* igc_init_nvm_params_base - Init NVM func ptrs.
* @hw: pointer to the HW structure
*/
@@ -163,6 +131,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
if (size > 15)
size = 15;
+ nvm->type = igc_nvm_eeprom_spi;
nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
@@ -261,11 +230,11 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
goto out;
}
- ret_val = igc_get_phy_id_base(hw);
+ ret_val = igc_get_phy_id(hw);
if (ret_val)
return ret_val;
- igc_check_for_link_base(hw);
+ igc_check_for_copper_link(hw);
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
@@ -350,26 +319,6 @@ static void igc_release_phy_base(struct igc_hw *hw)
}
/**
- * igc_get_link_up_info_base - Get link speed/duplex info
- * @hw: pointer to the HW structure
- * @speed: stores the current speed
- * @duplex: stores the current duplex
- *
- * This is a wrapper function, if using the serial gigabit media independent
- * interface, use PCS to retrieve the link speed and duplex information.
- * Otherwise, use the generic function to get the link speed and duplex info.
- */
-static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed,
- u16 *duplex)
-{
- s32 ret_val;
-
- ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex);
-
- return ret_val;
-}
-
-/**
* igc_init_hw_base - Initialize hardware
* @hw: pointer to the HW structure
*
@@ -408,19 +357,6 @@ static s32 igc_init_hw_base(struct igc_hw *hw)
}
/**
- * igc_read_mac_addr_base - Read device MAC address
- * @hw: pointer to the HW structure
- */
-static s32 igc_read_mac_addr_base(struct igc_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = igc_read_mac_addr(hw);
-
- return ret_val;
-}
-
-/**
* igc_power_down_phy_copper_base - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -512,10 +448,10 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
static struct igc_mac_operations igc_mac_ops_base = {
.init_hw = igc_init_hw_base,
- .check_for_link = igc_check_for_link_base,
+ .check_for_link = igc_check_for_copper_link,
.rar_set = igc_rar_set,
- .read_mac_addr = igc_read_mac_addr_base,
- .get_speed_and_duplex = igc_get_link_up_info_base,
+ .read_mac_addr = igc_read_mac_addr,
+ .get_speed_and_duplex = igc_get_speed_and_duplex_copper,
};
static const struct igc_phy_operations igc_phy_ops_base = {
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 35588fa7b8c5..76d4991d7284 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -36,28 +36,6 @@ union igc_adv_tx_desc {
#define IGC_RAR_ENTRIES 16
-struct igc_adv_data_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- u32 data;
- struct {
- u32 datalen:16; /* Data buffer length */
- u32 rsvd:4;
- u32 dtyp:4; /* Descriptor type */
- u32 dcmd:8; /* Descriptor command */
- } config;
- } lower;
- union {
- u32 data;
- struct {
- u32 status:4; /* Descriptor status */
- u32 idx:4;
- u32 popts:6; /* Packet Options */
- u32 paylen:18; /* Payload length */
- } options;
- } upper;
-};
-
/* Receive Descriptor - Advanced */
union igc_adv_rx_desc {
struct {
@@ -90,9 +68,6 @@ union igc_adv_rx_desc {
} wb; /* writeback */
};
-/* Adv Transmit Descriptor Config Masks */
-#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
-
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 8740754ea1fd..7d1bdcd1225a 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -4,6 +4,10 @@
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
/* PCI Bus Info */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
new file mode 100644
index 000000000000..eff37a6c0afa
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -0,0 +1,1032 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+/* ethtool support for igc */
+#include <linux/pm_runtime.h>
+
+#include "igc.h"
+
+static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
+
+static void igc_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version));
+
+ /* add fw_version here */
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN;
+}
+
+static int igc_get_regs_len(struct net_device *netdev)
+{
+ return IGC_REGS_LEN * sizeof(u32);
+}
+
+static void igc_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u8 i;
+
+ memset(p, 0, IGC_REGS_LEN * sizeof(u32));
+
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = rd32(IGC_CTRL);
+ regs_buff[1] = rd32(IGC_STATUS);
+ regs_buff[2] = rd32(IGC_CTRL_EXT);
+ regs_buff[3] = rd32(IGC_MDIC);
+ regs_buff[4] = rd32(IGC_CONNSW);
+
+ /* NVM Register */
+ regs_buff[5] = rd32(IGC_EECD);
+
+ /* Interrupt */
+ /* Reading EICS for EICR because they read the
+ * same but EICS does not clear on read
+ */
+ regs_buff[6] = rd32(IGC_EICS);
+ regs_buff[7] = rd32(IGC_EICS);
+ regs_buff[8] = rd32(IGC_EIMS);
+ regs_buff[9] = rd32(IGC_EIMC);
+ regs_buff[10] = rd32(IGC_EIAC);
+ regs_buff[11] = rd32(IGC_EIAM);
+ /* Reading ICS for ICR because they read the
+ * same but ICS does not clear on read
+ */
+ regs_buff[12] = rd32(IGC_ICS);
+ regs_buff[13] = rd32(IGC_ICS);
+ regs_buff[14] = rd32(IGC_IMS);
+ regs_buff[15] = rd32(IGC_IMC);
+ regs_buff[16] = rd32(IGC_IAC);
+ regs_buff[17] = rd32(IGC_IAM);
+
+ /* Flow Control */
+ regs_buff[18] = rd32(IGC_FCAL);
+ regs_buff[19] = rd32(IGC_FCAH);
+ regs_buff[20] = rd32(IGC_FCTTV);
+ regs_buff[21] = rd32(IGC_FCRTL);
+ regs_buff[22] = rd32(IGC_FCRTH);
+ regs_buff[23] = rd32(IGC_FCRTV);
+
+ /* Receive */
+ regs_buff[24] = rd32(IGC_RCTL);
+ regs_buff[25] = rd32(IGC_RXCSUM);
+ regs_buff[26] = rd32(IGC_RLPML);
+ regs_buff[27] = rd32(IGC_RFCTL);
+
+ /* Transmit */
+ regs_buff[28] = rd32(IGC_TCTL);
+ regs_buff[29] = rd32(IGC_TIPG);
+
+ /* Wake Up */
+
+ /* MAC */
+
+ /* Statistics */
+ regs_buff[30] = adapter->stats.crcerrs;
+ regs_buff[31] = adapter->stats.algnerrc;
+ regs_buff[32] = adapter->stats.symerrs;
+ regs_buff[33] = adapter->stats.rxerrc;
+ regs_buff[34] = adapter->stats.mpc;
+ regs_buff[35] = adapter->stats.scc;
+ regs_buff[36] = adapter->stats.ecol;
+ regs_buff[37] = adapter->stats.mcc;
+ regs_buff[38] = adapter->stats.latecol;
+ regs_buff[39] = adapter->stats.colc;
+ regs_buff[40] = adapter->stats.dc;
+ regs_buff[41] = adapter->stats.tncrs;
+ regs_buff[42] = adapter->stats.sec;
+ regs_buff[43] = adapter->stats.htdpmc;
+ regs_buff[44] = adapter->stats.rlec;
+ regs_buff[45] = adapter->stats.xonrxc;
+ regs_buff[46] = adapter->stats.xontxc;
+ regs_buff[47] = adapter->stats.xoffrxc;
+ regs_buff[48] = adapter->stats.xofftxc;
+ regs_buff[49] = adapter->stats.fcruc;
+ regs_buff[50] = adapter->stats.prc64;
+ regs_buff[51] = adapter->stats.prc127;
+ regs_buff[52] = adapter->stats.prc255;
+ regs_buff[53] = adapter->stats.prc511;
+ regs_buff[54] = adapter->stats.prc1023;
+ regs_buff[55] = adapter->stats.prc1522;
+ regs_buff[56] = adapter->stats.gprc;
+ regs_buff[57] = adapter->stats.bprc;
+ regs_buff[58] = adapter->stats.mprc;
+ regs_buff[59] = adapter->stats.gptc;
+ regs_buff[60] = adapter->stats.gorc;
+ regs_buff[61] = adapter->stats.gotc;
+ regs_buff[62] = adapter->stats.rnbc;
+ regs_buff[63] = adapter->stats.ruc;
+ regs_buff[64] = adapter->stats.rfc;
+ regs_buff[65] = adapter->stats.roc;
+ regs_buff[66] = adapter->stats.rjc;
+ regs_buff[67] = adapter->stats.mgprc;
+ regs_buff[68] = adapter->stats.mgpdc;
+ regs_buff[69] = adapter->stats.mgptc;
+ regs_buff[70] = adapter->stats.tor;
+ regs_buff[71] = adapter->stats.tot;
+ regs_buff[72] = adapter->stats.tpr;
+ regs_buff[73] = adapter->stats.tpt;
+ regs_buff[74] = adapter->stats.ptc64;
+ regs_buff[75] = adapter->stats.ptc127;
+ regs_buff[76] = adapter->stats.ptc255;
+ regs_buff[77] = adapter->stats.ptc511;
+ regs_buff[78] = adapter->stats.ptc1023;
+ regs_buff[79] = adapter->stats.ptc1522;
+ regs_buff[80] = adapter->stats.mptc;
+ regs_buff[81] = adapter->stats.bptc;
+ regs_buff[82] = adapter->stats.tsctc;
+ regs_buff[83] = adapter->stats.iac;
+ regs_buff[84] = adapter->stats.rpthc;
+ regs_buff[85] = adapter->stats.hgptc;
+ regs_buff[86] = adapter->stats.hgorc;
+ regs_buff[87] = adapter->stats.hgotc;
+ regs_buff[88] = adapter->stats.lenerrs;
+ regs_buff[89] = adapter->stats.scvpc;
+ regs_buff[90] = adapter->stats.hrmpc;
+
+ for (i = 0; i < 4; i++)
+ regs_buff[91 + i] = rd32(IGC_SRRCTL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[95 + i] = rd32(IGC_PSRTYPE(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[99 + i] = rd32(IGC_RDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[103 + i] = rd32(IGC_RDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[107 + i] = rd32(IGC_RDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[111 + i] = rd32(IGC_RDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[115 + i] = rd32(IGC_RDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[119 + i] = rd32(IGC_RXDCTL(i));
+
+ for (i = 0; i < 10; i++)
+ regs_buff[123 + i] = rd32(IGC_EITR(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[139 + i] = rd32(IGC_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[145 + i] = rd32(IGC_RAH(i));
+
+ for (i = 0; i < 4; i++)
+ regs_buff[149 + i] = rd32(IGC_TDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[152 + i] = rd32(IGC_TDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[156 + i] = rd32(IGC_TDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[160 + i] = rd32(IGC_TDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[164 + i] = rd32(IGC_TDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
+}
+
+static u32 igc_get_msglevel(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void igc_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static int igc_nway_reset(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ return 0;
+}
+
+static u32 igc_get_link(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_mac_info *mac = &adapter->hw.mac;
+
+ /* If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ mac->get_link_status = 1;
+
+ return igc_has_link(adapter);
+}
+
+static int igc_get_eeprom_len(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->hw.nvm.word_size * 2;
+}
+
+static int igc_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int first_word, last_word;
+ u16 *eeprom_buff;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->nvm.type == igc_nvm_eeprom_spi) {
+ ret_val = hw->nvm.ops.read(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ } else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int igc_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 *eeprom_buff;
+ void *ptr;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (hw->mac.type >= igc_i225 &&
+ !igc_get_flash_presence_i225(hw)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->nvm.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
+ ret_val = hw->nvm.ops.read(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) {
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
+ ret_val = hw->nvm.ops.read(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = hw->nvm.ops.write(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum if nvm write succeeded */
+ if (ret_val == 0)
+ hw->nvm.ops.update(hw);
+
+ /* check if need: igc_set_fw_version(adapter); */
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void igc_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = IGC_MAX_RXD;
+ ring->tx_max_pending = IGC_MAX_TXD;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
+}
+
+static int igc_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_ring *temp_ring;
+ u16 new_rx_count, new_tx_count;
+ int i, err = 0;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD);
+ new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD);
+ new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if (new_tx_count == adapter->tx_ring_count &&
+ new_rx_count == adapter->rx_ring_count) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ if (adapter->num_tx_queues > adapter->num_rx_queues)
+ temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
+ adapter->num_tx_queues));
+ else
+ temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
+ adapter->num_rx_queues));
+
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igc_down(adapter);
+
+ /* We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the Tx and Rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring_count) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
+ sizeof(struct igc_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = igc_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igc_free_tx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ igc_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct igc_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
+ sizeof(struct igc_ring));
+
+ temp_ring[i].count = new_rx_count;
+ err = igc_setup_rx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igc_free_rx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_free_rx_resources(adapter->rx_ring[i]);
+
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct igc_ring));
+ }
+
+ adapter->rx_ring_count = new_rx_count;
+ }
+err_setup:
+ igc_up(adapter);
+ vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGC_RESETTING, &adapter->state);
+ return err;
+}
+
+static void igc_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc.current_mode == igc_fc_rx_pause) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == igc_fc_tx_pause) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == igc_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int igc_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = igc_fc_default;
+ if (netif_running(adapter->netdev)) {
+ igc_down(adapter);
+ igc_up(adapter);
+ } else {
+ igc_reset(adapter);
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ retval = ((hw->phy.media_type == igc_media_type_copper) ?
+ igc_force_mac_fc(hw) : igc_setup_link(hw));
+ }
+
+ clear_bit(__IGC_RESETTING, &adapter->state);
+ return retval;
+}
+
+static int igc_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->rx_itr_setting <= 3)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) {
+ if (adapter->tx_itr_setting <= 3)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+ }
+
+ return 0;
+}
+
+static int igc_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_rx_coalesce ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -ENOTSUPP;
+
+ if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS ||
+ (ec->rx_coalesce_usecs > 3 &&
+ ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
+ ec->rx_coalesce_usecs == 2)
+ return -EINVAL;
+
+ if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS ||
+ (ec->tx_coalesce_usecs > 3 &&
+ ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
+ ec->tx_coalesce_usecs == 2)
+ return -EINVAL;
+
+ if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ /* If ITR is disabled, disable DMAC */
+ if (ec->rx_coalesce_usecs == 0) {
+ if (adapter->flags & IGC_FLAG_DMAC)
+ adapter->flags &= ~IGC_FLAG_DMAC;
+ }
+
+ /* convert to rate of irq's per second */
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+
+ /* convert to rate of irq's per second */
+ if (adapter->flags & IGC_FLAG_QUEUE_PAIRS)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+ else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igc_q_vector *q_vector = adapter->q_vector[i];
+
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+ if (q_vector->rx.ring)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ else
+ q_vector->itr_val = adapter->tx_itr_setting;
+ if (q_vector->itr_val && q_vector->itr_val <= 3)
+ q_vector->itr_val = IGC_START_ITR;
+ q_vector->set_itr = 1;
+ }
+
+ return 0;
+}
+
+void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg = IGC_RETA(0);
+ u32 shift = 0;
+ int i = 0;
+
+ while (i < IGC_RETA_SIZE) {
+ u32 val = 0;
+ int j;
+
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+ wr32(reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+}
+
+static u32 igc_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return IGC_RETA_SIZE;
+}
+
+static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+ for (i = 0; i < IGC_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+
+ return 0;
+}
+
+static int igc_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u32 num_queues;
+ int i;
+
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
+ num_queues = adapter->rss_queues;
+
+ /* Verify user input. */
+ for (i = 0; i < IGC_RETA_SIZE; i++)
+ if (indir[i] >= num_queues)
+ return -EINVAL;
+
+ for (i = 0; i < IGC_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+
+ igc_write_rss_indir_tbl(adapter);
+
+ return 0;
+}
+
+static unsigned int igc_max_channels(struct igc_adapter *adapter)
+{
+ return igc_get_max_rss_queues(adapter);
+}
+
+static void igc_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = igc_max_channels(adapter);
+
+ /* Report info for other vector */
+ if (adapter->flags & IGC_FLAG_HAS_MSIX) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ ch->combined_count = adapter->rss_queues;
+}
+
+static int igc_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ unsigned int count = ch->combined_count;
+ unsigned int max_combined = 0;
+
+ /* Verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* Verify other_count is valid and has not been changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* Verify the number of channels doesn't exceed hw limits */
+ max_combined = igc_max_channels(adapter);
+ if (count > max_combined)
+ return -EINVAL;
+
+ if (count != adapter->rss_queues) {
+ adapter->rss_queues = count;
+ igc_set_flag_queue_pairs(adapter, max_combined);
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match the new configuration.
+ */
+ return igc_reinit_queues(adapter);
+ }
+
+ return 0;
+}
+
+static u32 igc_get_priv_flags(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags & IGC_FLAG_RX_LEGACY)
+ priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags = adapter->flags;
+
+ flags &= ~IGC_FLAG_RX_LEGACY;
+ if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX)
+ flags |= IGC_FLAG_RX_LEGACY;
+
+ if (flags != adapter->flags) {
+ adapter->flags = flags;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static int igc_ethtool_begin(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ return 0;
+}
+
+static void igc_ethtool_complete(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
+static int igc_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 status;
+ u32 speed;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ /* supported link modes */
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full);
+
+ /* twisted pair */
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
+
+ /* advertising link modes */
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+
+ /* set autoneg settings */
+ if (hw->mac.autoneg == 1) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Autoneg);
+ }
+
+ switch (hw->fc.requested_mode) {
+ case igc_fc_full:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ break;
+ case igc_fc_rx_pause:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+ break;
+ case igc_fc_tx_pause:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+ break;
+ default:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+ }
+
+ status = rd32(IGC_STATUS);
+
+ if (status & IGC_STATUS_LU) {
+ if (status & IGC_STATUS_SPEED_1000) {
+ /* For I225, STATUS will indicate 1G speed in both
+ * 1 Gbps and 2.5 Gbps link modes.
+ * An additional bit is used
+ * to differentiate between 1 Gbps and 2.5 Gbps.
+ */
+ if (hw->mac.type == igc_i225 &&
+ (status & IGC_STATUS_SPEED_2500)) {
+ speed = SPEED_2500;
+ hw_dbg("2500 Mbs, ");
+ } else {
+ speed = SPEED_1000;
+ hw_dbg("1000 Mbs, ");
+ }
+ } else if (status & IGC_STATUS_SPEED_100) {
+ speed = SPEED_100;
+ hw_dbg("100 Mbs, ");
+ } else {
+ speed = SPEED_10;
+ hw_dbg("10 Mbs, ");
+ }
+ if ((status & IGC_STATUS_FD) ||
+ hw->phy.media_type != igc_media_type_copper)
+ cmd->base.duplex = DUPLEX_FULL;
+ else
+ cmd->base.duplex = DUPLEX_HALF;
+ } else {
+ speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.speed = speed;
+ if (hw->mac.autoneg)
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ else
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if (hw->phy.media_type == igc_media_type_copper)
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
+
+ return 0;
+}
+
+static int igc_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 advertising;
+
+ /* When adapter in resetting mode, autoneg/speed/duplex
+ * cannot be changed
+ */
+ if (igc_check_reset_block(hw)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot change link characteristics when reset is active.\n");
+ return -EINVAL;
+ }
+
+ /* MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (cmd->base.eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO &&
+ cmd->base.autoneg != AUTONEG_ENABLE) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_advertised = advertising;
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = igc_fc_default;
+ } else {
+ /* calling this overrides forced MDI setting */
+ dev_info(&adapter->pdev->dev,
+ "Force mode currently not supported\n");
+ }
+
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (cmd->base.eth_tp_mdix_ctrl) {
+ /* fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
+ }
+
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ igc_down(adapter);
+ igc_up(adapter);
+ } else {
+ igc_reset(adapter);
+ }
+
+ clear_bit(__IGC_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+static const struct ethtool_ops igc_ethtool_ops = {
+ .get_drvinfo = igc_get_drvinfo,
+ .get_regs_len = igc_get_regs_len,
+ .get_regs = igc_get_regs,
+ .get_msglevel = igc_get_msglevel,
+ .set_msglevel = igc_set_msglevel,
+ .nway_reset = igc_nway_reset,
+ .get_link = igc_get_link,
+ .get_eeprom_len = igc_get_eeprom_len,
+ .get_eeprom = igc_get_eeprom,
+ .set_eeprom = igc_set_eeprom,
+ .get_ringparam = igc_get_ringparam,
+ .set_ringparam = igc_set_ringparam,
+ .get_pauseparam = igc_get_pauseparam,
+ .set_pauseparam = igc_set_pauseparam,
+ .get_coalesce = igc_get_coalesce,
+ .set_coalesce = igc_set_coalesce,
+ .get_rxfh_indir_size = igc_get_rxfh_indir_size,
+ .get_rxfh = igc_get_rxfh,
+ .set_rxfh = igc_set_rxfh,
+ .get_channels = igc_get_channels,
+ .set_channels = igc_set_channels,
+ .get_priv_flags = igc_get_priv_flags,
+ .set_priv_flags = igc_set_priv_flags,
+ .begin = igc_ethtool_begin,
+ .complete = igc_ethtool_complete,
+ .get_link_ksettings = igc_get_link_ksettings,
+ .set_link_ksettings = igc_set_link_ksettings,
+};
+
+void igc_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &igc_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index c50414f48f0d..7c88b7bd4799 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -55,6 +55,7 @@ enum igc_media_type {
enum igc_nvm_type {
igc_nvm_unknown = 0,
+ igc_nvm_eeprom_spi,
igc_nvm_flash_hw,
igc_nvm_invm,
};
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index f20183037fb2..87a11879bf2d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -12,6 +12,8 @@
#define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
static int debug = -1;
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -66,7 +68,7 @@ enum latency_range {
latency_invalid = 255
};
-static void igc_reset(struct igc_adapter *adapter)
+void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
@@ -150,7 +152,7 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
*
* Free all transmit software resources
*/
-static void igc_free_tx_resources(struct igc_ring *tx_ring)
+void igc_free_tx_resources(struct igc_ring *tx_ring)
{
igc_clean_tx_ring(tx_ring);
@@ -261,7 +263,7 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
*
* Return 0 on success, negative on failure
*/
-static int igc_setup_tx_resources(struct igc_ring *tx_ring)
+int igc_setup_tx_resources(struct igc_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int size = 0;
@@ -381,7 +383,7 @@ static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
*
* Free all receive software resources
*/
-static void igc_free_rx_resources(struct igc_ring *rx_ring)
+void igc_free_rx_resources(struct igc_ring *rx_ring)
{
igc_clean_rx_ring(rx_ring);
@@ -418,7 +420,7 @@ static void igc_free_all_rx_resources(struct igc_adapter *adapter)
*
* Returns 0 on success, negative on failure
*/
-static int igc_setup_rx_resources(struct igc_ring *rx_ring)
+int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int size, desc_len;
@@ -1703,7 +1705,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
*/
-static void igc_up(struct igc_adapter *adapter)
+void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
int i = 0;
@@ -1748,7 +1750,7 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
* igc_down - Close the interface
* @adapter: board private structure
*/
-static void igc_down(struct igc_adapter *adapter)
+void igc_down(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
@@ -1810,7 +1812,7 @@ static void igc_down(struct igc_adapter *adapter)
igc_clean_all_rx_rings(adapter);
}
-static void igc_reinit_locked(struct igc_adapter *adapter)
+void igc_reinit_locked(struct igc_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
@@ -1922,7 +1924,7 @@ static void igc_configure(struct igc_adapter *adapter)
/**
* igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: Pointer to adapter structure
+ * @adapter: address of board private structure
* @index: Index of the RAR entry which need to be synced with MAC table
*/
static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
@@ -2298,7 +2300,7 @@ static void igc_update_phy_info(struct timer_list *t)
* igc_has_link - check shared code for link and determine up/down
* @adapter: pointer to driver private info
*/
-static bool igc_has_link(struct igc_adapter *adapter)
+bool igc_has_link(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
bool link_active = false;
@@ -2956,22 +2958,21 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter,
{
struct igc_q_vector *q_vector;
struct igc_ring *ring;
- int ring_count, size;
+ int ring_count;
/* igc only supports 1 Tx and/or 1 Rx queue per vector */
if (txr_count > 1 || rxr_count > 1)
return -ENOMEM;
ring_count = txr_count + rxr_count;
- size = sizeof(struct igc_q_vector) +
- (sizeof(struct igc_ring) * ring_count);
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
if (!q_vector)
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
else
- memset(q_vector, 0, size);
+ memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
if (!q_vector)
return -ENOMEM;
@@ -3501,6 +3502,57 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
return value;
}
+int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work
+ */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_10_FULL;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_100_HALF;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_100_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ goto err_inval;
+ case SPEED_2500 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
+ break;
+ case SPEED_2500 + DUPLEX_HALF: /* not supported */
+ default:
+ goto err_inval;
+ }
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
+ return 0;
+
+err_inval:
+ dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+}
+
/**
* igc_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -3568,7 +3620,7 @@ static int igc_probe(struct pci_dev *pdev,
hw = &adapter->hw;
hw->back = adapter;
adapter->port_num = hw->bus.func;
- adapter->msg_enable = GENMASK(debug - 1, 0);
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
err = pci_save_state(pdev);
if (err)
@@ -3584,7 +3636,7 @@ static int igc_probe(struct pci_dev *pdev,
hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igc_netdev_ops;
-
+ igc_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
netdev->mem_start = pci_resource_start(pdev, 0);
@@ -3744,8 +3796,8 @@ static struct pci_driver igc_driver = {
.remove = igc_remove,
};
-static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
- const u32 max_rss_queues)
+void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues)
{
/* Determine if we need to pair queues. */
/* If rss_queues > half of max_rss_queues, pair the queues in
@@ -3757,7 +3809,7 @@ static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
}
-static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
+unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
{
unsigned int max_rss_queues;
@@ -3837,6 +3889,32 @@ static int igc_sw_init(struct igc_adapter *adapter)
}
/**
+ * igc_reinit_queues - return error
+ * @adapter: pointer to adapter structure
+ */
+int igc_reinit_queues(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (netif_running(netdev))
+ igc_close(netdev);
+
+ igc_reset_interrupt_capability(adapter);
+
+ if (igc_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ err = igc_open(netdev);
+
+ return err;
+}
+
+/**
* igc_get_hw_dev - return device
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 38e43e6fc1c7..4c8f96a9a148 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -152,7 +152,6 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
s32 igc_check_downshift(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
- u16 phy_data, offset, mask;
s32 ret_val;
switch (phy->type) {
@@ -161,15 +160,8 @@ s32 igc_check_downshift(struct igc_hw *hw)
/* speed downshift not supported */
phy->speed_downgraded = false;
ret_val = 0;
- goto out;
}
- ret_val = phy->ops.read_reg(hw, offset, &phy_data);
-
- if (!ret_val)
- phy->speed_downgraded = (phy_data & mask) ? true : false;
-
-out:
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index a1bd3216c906..5afe7a8d3faf 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -80,6 +80,9 @@
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
+/* Redirection Table - RW Array */
+#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
+
/* Receive Register Descriptions */
#define IGC_RCTL 0x00100 /* Rx Control - RW */
#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40))
@@ -188,7 +191,6 @@
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
-#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
/* Management registers */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1d4d1686909a..e5ac2d3fd816 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
return -ENOMEM;
@@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1e49716f52bc..109f8de5a1c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1048,7 +1048,7 @@ mac_reset_top:
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- hw->mac.num_rar_entries = 128;
+ hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES;
hw->mac.ops.init_rx_addrs(hw);
/* Store the permanent SAN mac address */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 62e6499e4146..cc3196ae5aea 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -836,12 +836,10 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring;
int node = NUMA_NO_NODE;
int cpu = -1;
- int ring_count, size;
+ int ring_count;
u8 tcs = adapter->hw_tcs;
ring_count = txr_count + rxr_count + xdp_count;
- size = sizeof(struct ixgbe_q_vector) +
- (sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
@@ -855,9 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
}
/* allocate q_vector and rings */
- q_vector = kzalloc_node(size, GFP_KERNEL, node);
+ q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL, node);
if (!q_vector)
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..e100054a3765 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -27,6 +27,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
+#include <linux/numa.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
@@ -3953,8 +3954,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
- /* Enable L3/L4 for Tx Switched packets */
- mrqc |= IXGBE_MRQC_L3L4TXSWEN;
+ /* Enable L3/L4 for Tx Switched packets only for X550,
+ * older devices do not support this feature
+ */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ mrqc |= IXGBE_MRQC_L3L4TXSWEN;
} else {
if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -6415,7 +6419,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev);
- int ring_node = -1;
+ int ring_node = NUMA_NO_NODE;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -6509,7 +6513,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
{
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
- int ring_node = -1;
+ int ring_node = NUMA_NO_NODE;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -9910,7 +9914,8 @@ static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
@@ -10225,6 +10230,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct bpf_prog *old_prog;
+ bool need_reset;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return -EINVAL;
@@ -10247,9 +10253,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -ENOMEM;
old_prog = xchg(&adapter->xdp_prog, prog);
+ need_reset = (!!prog != !!old_prog);
/* If transitioning XDP modes reconfigure rings */
- if (!!prog != !!old_prog) {
+ if (need_reset) {
int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
@@ -10265,6 +10272,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+ if (need_reset && prog)
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->xdp_ring[i]->xsk_umem)
+ (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
+
return 0;
}
@@ -10279,9 +10294,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
- case XDP_QUERY_XSK_UMEM:
- return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,
- xdp->xsk.queue_id);
case XDP_SETUP_XSK_UMEM:
return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
xdp->xsk.queue_id);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 53d4089f5644..d93a690aff74 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -30,8 +30,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring);
-int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
- u16 qid);
int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
u16 qid);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..bfe95ce0bd7f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
ixgbe_txrx_ring_disable(adapter, qid);
err = ixgbe_add_xsk_umem(adapter, umem, qid);
+ if (err)
+ return err;
- if (if_running)
+ if (if_running) {
ixgbe_txrx_ring_enable(adapter, qid);
- return err;
+ /* Kick start the NAPI context so that receiving will start */
+ err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -174,23 +182,6 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
return 0;
}
-int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
- u16 qid)
-{
- if (qid >= adapter->num_rx_queues)
- return -EINVAL;
-
- if (adapter->xsk_umems) {
- if (qid >= adapter->num_xsk_umems)
- return -EINVAL;
- *umem = adapter->xsk_umems[qid];
- return 0;
- }
-
- *umem = NULL;
- return 0;
-}
-
int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
u16 qid)
{
@@ -634,7 +625,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
+ !netif_carrier_ok(xdp_ring->netdev)) {
work_done = false;
break;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index a5ab6f3403ae..763ee5281177 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2034,10 +2034,9 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
ctxbi->len,
PCI_DMA_TODEVICE);
- ctxbi->mapping = 0;
- ctxbi->len = 0;
+ ctxbi->mapping = 0;
+ ctxbi->len = 0;
}
-
}
static int
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 32ac9045cdae..f9bb890733b5 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -112,10 +112,12 @@ struct ltq_etop_priv {
static int
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
{
+ struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
+
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
+ ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
DMA_FROM_DEVICE);
ch->dma.desc_base[ch->dma.desc].addr =
@@ -487,7 +489,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
- desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
+ desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
DMA_TO_DEVICE)) - byte_offset;
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 2d4d10a017e5..d29104de0d53 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -335,7 +335,6 @@ static const struct net_device_ops xrx200_netdev_ops = {
.ndo_start_xmit = xrx200_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
};
static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2f427271a793..292a668ce88e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
- return ret;
+ goto err_put_clk;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp);
return 0;
+
+err_put_clk:
+ if (!IS_ERR(msp->clk))
+ clk_disable_unprepare(msp->clk);
+ return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d4568eb2297..c0a3718b2e2a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -27,6 +27,7 @@
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/phy/phy.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/platform_device.h>
@@ -436,6 +437,7 @@ struct mvneta_port {
struct device_node *dn;
unsigned int tx_csum_limit;
struct phylink *phylink;
+ struct phy *comphy;
struct mvneta_bm *bm_priv;
struct mvneta_bm_pool *pool_long;
@@ -2146,7 +2148,7 @@ err_drop_frame:
if (unlikely(!skb))
goto err_drop_frame_ret_pool;
- dma_sync_single_range_for_cpu(dev->dev.parent,
+ dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
rx_desc->buf_phys_addr,
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
@@ -3147,10 +3149,27 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
return 0;
}
+static int mvneta_comphy_init(struct mvneta_port *pp)
+{
+ int ret;
+
+ if (!pp->comphy)
+ return 0;
+
+ ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
+ pp->phy_interface);
+ if (ret)
+ return ret;
+
+ return phy_power_on(pp->comphy);
+}
+
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
+ WARN_ON(mvneta_comphy_init(pp));
+
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3213,6 +3232,8 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
mvneta_tx_reset(pp);
mvneta_rx_reset(pp);
+
+ WARN_ON(phy_power_off(pp->comphy));
}
static void mvneta_percpu_enable(void *arg)
@@ -3338,6 +3359,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
struct phylink_link_state *state)
{
+ struct mvneta_port *pp = netdev_priv(ndev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
/* We only support QSGMII, SGMII, 802.3z and RGMII modes */
@@ -3358,8 +3380,13 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
phylink_set(mask, Pause);
/* Half-duplex at speeds higher than 100Mbit is unsupported */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseX_Full);
+ }
if (!phy_interface_mode_is_8023z(state->interface)) {
/* 10M and 100M are only supported in non-802.3z mode */
@@ -3373,6 +3400,11 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
static int mvneta_mac_link_state(struct net_device *ndev,
@@ -3384,7 +3416,9 @@ static int mvneta_mac_link_state(struct net_device *ndev,
gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
if (gmac_stat & MVNETA_GMAC_SPEED_1000)
- state->speed = SPEED_1000;
+ state->speed =
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ?
+ SPEED_2500 : SPEED_1000;
else if (gmac_stat & MVNETA_GMAC_SPEED_100)
state->speed = SPEED_100;
else
@@ -3499,12 +3533,23 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
MVNETA_GMAC_FORCE_LINK_DOWN);
}
+
/* When at 2.5G, the link partner can send frames with shortened
* preambles.
*/
if (state->speed == SPEED_2500)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
+ if (pp->comphy && pp->phy_interface != state->interface &&
+ (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+ pp->phy_interface = state->interface;
+
+ WARN_ON(phy_power_off(pp->comphy));
+ WARN_ON(mvneta_comphy_init(pp));
+ }
+
if (new_ctrl0 != gmac_ctrl0)
mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
if (new_ctrl2 != gmac_ctrl2)
@@ -4404,7 +4449,7 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
- phy_mode == PHY_INTERFACE_MODE_1000BASEX)
+ phy_interface_mode_is_8023z(phy_mode))
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
else if (!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
@@ -4421,6 +4466,7 @@ static int mvneta_probe(struct platform_device *pdev)
struct mvneta_port *pp;
struct net_device *dev;
struct phylink *phylink;
+ struct phy *comphy;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
@@ -4446,6 +4492,14 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
+ if (comphy == ERR_PTR(-EPROBE_DEFER)) {
+ err = -EPROBE_DEFER;
+ goto err_free_irq;
+ } else if (IS_ERR(comphy)) {
+ comphy = NULL;
+ }
+
phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
&mvneta_phylink_ops);
if (IS_ERR(phylink)) {
@@ -4462,6 +4516,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
spin_lock_init(&pp->lock);
pp->phylink = phylink;
+ pp->comphy = comphy;
pp->phy_interface = phy_mode;
pp->dn = dn;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 398328f10743..ff0f4c503f53 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -389,7 +389,7 @@
#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4)
-#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
#define MVPP2_GMAC_FC_ADV_EN BIT(9)
@@ -402,8 +402,8 @@
#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1)
#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2)
#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3)
-#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6)
-#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7)
+#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(4)
+#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(5)
#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11)
#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
@@ -430,6 +430,8 @@
#define MVPP22_XLG_CTRL0_REG 0x100
#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
+#define MVPP22_XLG_CTRL0_FORCE_LINK_DOWN BIT(2)
+#define MVPP22_XLG_CTRL0_FORCE_LINK_PASS BIT(3)
#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8)
#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
@@ -481,6 +483,7 @@
/* XPCS registers. PPv2.2 only */
#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
#define MVPP22_XPCS_CFG0 0x0
+#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0)
#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
@@ -549,8 +552,8 @@
#define MVPP2_MAX_TSO_SEGS 300
#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
-/* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ 1
+/* Max number of RXQs per port */
+#define MVPP2_PORT_MAX_RXQ 32
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD_MAX 1024
@@ -803,7 +806,7 @@ struct mvpp2_port {
u8 id;
/* Index of the port from the "group of ports" complex point
- * of view
+ * of view. This is specific to PPv2.2.
*/
int gop_id;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e0875476a780..25fbed2b8d94 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -965,6 +965,11 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
}
/* Port configuration routines */
+static bool mvpp2_is_xlg(phy_interface_t interface)
+{
+ return interface == PHY_INTERFACE_MODE_10GKR ||
+ interface == PHY_INTERFACE_MODE_XAUI;
+}
static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
{
@@ -1010,27 +1015,20 @@ static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
u32 val;
- /* XPCS */
val = readl(xpcs + MVPP22_XPCS_CFG0);
val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
writel(val, xpcs + MVPP22_XPCS_CFG0);
- /* MPCS */
val = readl(mpcs + MVPP22_MPCS_CTRL);
val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
writel(val, mpcs + MVPP22_MPCS_CTRL);
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
- MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
-
- val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
- val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
}
static int mvpp22_gop_init(struct mvpp2_port *port)
@@ -1090,9 +1088,8 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
u32 val;
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
/* Enable the GMAC link status irq for this port */
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
@@ -1102,7 +1099,7 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
if (port->gop_id == 0) {
/* Enable the XLG/GIG irqs for this port */
val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
- if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+ if (mvpp2_is_xlg(port->phy_interface))
val |= MVPP22_XLG_EXT_INT_MASK_XLG;
else
val |= MVPP22_XLG_EXT_INT_MASK_GIG;
@@ -1122,9 +1119,8 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
}
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
@@ -1135,10 +1131,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
{
u32 val;
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ if (port->phylink ||
+ phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_MASK);
val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_MASK);
@@ -1183,12 +1179,9 @@ static void mvpp2_port_enable(struct mvpp2_port *port)
u32 val;
/* Only GOP port 0 has an XLG MAC */
- if (port->gop_id == 0 &&
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val |= MVPP22_XLG_CTRL0_PORT_EN |
- MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+ val |= MVPP22_XLG_CTRL0_PORT_EN;
val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
} else {
@@ -1204,21 +1197,15 @@ static void mvpp2_port_disable(struct mvpp2_port *port)
u32 val;
/* Only GOP port 0 has an XLG MAC */
- if (port->gop_id == 0 &&
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
+ if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
val &= ~MVPP22_XLG_CTRL0_PORT_EN;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
-
- /* Disable & reset should be done separately */
- val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~(MVPP2_GMAC_PORT_EN_MASK);
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
}
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
}
/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
@@ -1244,9 +1231,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port,
else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
+ if (phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII)
val |= MVPP2_GMAC_PCS_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -1371,22 +1357,75 @@ static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
-static void mvpp2_port_reset(struct mvpp2_port *port)
+static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
{
- u32 val;
unsigned int i;
+ u32 val;
/* Read the GOP statistics to reset the hardware counters */
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- ~MVPP2_GMAC_PORT_RESET_MASK;
+ val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
+ MVPP2_GMAC_PORT_RESET_MASK;
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- MVPP2_GMAC_PORT_RESET_MASK)
- continue;
+ if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
+ ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ }
+}
+
+static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs, *xpcs;
+ u32 val;
+
+ if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ return;
+
+ mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+ val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
+ val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+}
+
+static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs, *xpcs;
+ u32 val;
+
+ if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ return;
+
+ mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
+ val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
+ MAC_CLK_RESET_SD_TX;
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+ break;
+ default:
+ break;
+ }
}
/* Change maximum receive size of the port */
@@ -2044,9 +2083,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
u32 txq_dma;
/* Allocate memory for TX descriptors */
- aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_dma, GFP_KERNEL);
+ aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
@@ -2462,8 +2501,7 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
mvpp22_gop_mask_irq(port);
- if (port->gop_id == 0 &&
- port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
+ if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
val = readl(port->base + MVPP22_XLG_INT_STAT);
if (val & MVPP22_XLG_INT_STAT_LINK) {
event = true;
@@ -2472,9 +2510,8 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
link = true;
}
} else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_STAT);
if (val & MVPP22_GMAC_INT_STAT_LINK) {
event = true;
@@ -3143,19 +3180,26 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
{
u32 ctrl3;
+ /* Set the GMAC & XLG MAC in reset */
+ mvpp2_mac_reset_assert(port);
+
+ /* Set the MPCS and XPCS in reset */
+ mvpp22_pcs_reset_assert(port);
+
/* comphy reconfiguration */
mvpp22_comphy_init(port);
/* gop reconfiguration */
mvpp22_gop_init(port);
+ mvpp22_pcs_reset_deassert(port);
+
/* Only GOP port 0 has an XLG MAC */
if (port->gop_id == 0) {
ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
- if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+ if (mvpp2_is_xlg(port->phy_interface))
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
else
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
@@ -3163,9 +3207,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
}
- if (port->gop_id == 0 &&
- (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR))
+ if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface))
mvpp2_xlg_max_rx_size_set(port);
else
mvpp2_gmac_max_rx_size_set(port);
@@ -3483,6 +3525,9 @@ static int mvpp2_stop(struct net_device *dev)
cancel_delayed_work_sync(&port->stats_work);
+ mvpp2_mac_reset_assert(port);
+ mvpp22_pcs_reset_assert(port);
+
return 0;
}
@@ -4072,8 +4117,8 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
- v->first_rxq = i * MVPP2_DEFAULT_RXQ;
- v->nrxqs = MVPP2_DEFAULT_RXQ;
+ v->first_rxq = i;
+ v->nrxqs = 1;
} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
i == (port->nqvecs - 1)) {
v->first_rxq = 0;
@@ -4166,8 +4211,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
- if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
- port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
+ if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
return -EINVAL;
/* Disable port */
@@ -4374,7 +4418,7 @@ static void mvpp2_phylink_validate(struct net_device *dev,
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (port->gop_id == 0)
+ if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
goto empty_set;
break;
default:
@@ -4414,6 +4458,7 @@ static void mvpp2_phylink_validate(struct net_device *dev,
case PHY_INTERFACE_MODE_2500BASEX:
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
break;
default:
@@ -4505,131 +4550,198 @@ static int mvpp2_phylink_mac_link_state(struct net_device *dev,
static void mvpp2_mac_an_restart(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
- u32 val;
-
- if (port->phy_interface != PHY_INTERFACE_MODE_SGMII)
- return;
+ u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- /* The RESTART_AN bit is cleared by the h/w after restarting the AN
- * process.
- */
- val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
}
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
- u32 ctrl0, ctrl4;
+ u32 old_ctrl0, ctrl0;
+ u32 old_ctrl4, ctrl4;
- ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
- ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
+ old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
+
+ ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS;
if (state->pause & MLO_PAUSE_TX)
ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+ else
+ ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+
if (state->pause & MLO_PAUSE_RX)
ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+ else
+ ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
- writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
- writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
+ if (old_ctrl0 != ctrl0)
+ writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
+ if (old_ctrl4 != ctrl4)
+ writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
+
+ if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) {
+ while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) &
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS))
+ continue;
+ }
}
static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
- u32 an, ctrl0, ctrl2, ctrl4;
-
- an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+ u32 old_an, an;
+ u32 old_ctrl0, ctrl0;
+ u32 old_ctrl2, ctrl2;
+ u32 old_ctrl4, ctrl4;
- /* Force link down */
- an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- an |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- /* Set the GMAC in a reset state */
- ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
- writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+ old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+ old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
- MVPP2_GMAC_FORCE_LINK_DOWN);
+ MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
-
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
- * they negotiate duplex: they are always operating with a fixed
- * speed of 1000/2500Mbps in full duplex, so force 1000/2500
- * speed and full duplex here.
- */
- ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
- an |= MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX;
- } else if (!phy_interface_mode_is_rgmii(state->interface)) {
- an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
+ MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+
+ /* Configure port type */
+ if (phy_interface_mode_is_8023z(state->interface)) {
+ ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
+ ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_DP_CLK_SEL |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
+ ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_DP_CLK_SEL |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ } else if (phy_interface_mode_is_rgmii(state->interface)) {
+ ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
+ ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+ MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
}
- if (state->duplex)
- an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ /* Configure advertisement bits */
if (phylink_test(state->advertising, Pause))
an |= MVPP2_GMAC_FC_ADV_EN;
if (phylink_test(state->advertising, Asym_Pause))
an |= MVPP2_GMAC_FC_ADV_ASM_EN;
- if (state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- an |= MVPP2_GMAC_IN_BAND_AUTONEG;
- ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
+ /* Configure negotiation style */
+ if (!phylink_autoneg_inband(mode)) {
+ /* Phy or fixed speed - no in-band AN */
+ if (state->duplex)
+ an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
- ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL |
- MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
- ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
- MVPP22_CTRL4_DP_CLK_SEL |
- MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
+ an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (state->speed == SPEED_100)
+ an |= MVPP2_GMAC_CONFIG_MII_SPEED;
if (state->pause & MLO_PAUSE_TX)
ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
if (state->pause & MLO_PAUSE_RX)
ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
- } else if (phy_interface_mode_is_rgmii(state->interface)) {
- an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS;
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII in-band mode receives the speed and duplex from
+ * the PHY. Flow control information is not received. */
+ an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+ an |= MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_AN_DUPLEX_EN;
- if (state->speed == SPEED_1000)
- an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
- else if (state->speed == SPEED_100)
- an |= MVPP2_GMAC_CONFIG_MII_SPEED;
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
+ if (state->pause & MLO_PAUSE_RX)
+ ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+ } else if (phy_interface_mode_is_8023z(state->interface)) {
+ /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
+ * they negotiate duplex: they are always operating with a fixed
+ * speed of 1000/2500Mbps in full duplex, so force 1000/2500
+ * speed and full duplex here.
+ */
+ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
+ an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+ an |= MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
- ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
- ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
- MVPP22_CTRL4_SYNC_BYPASS_DIS |
- MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ if (state->pause & MLO_PAUSE_AN && state->an_enabled) {
+ an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+ } else {
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
+ if (state->pause & MLO_PAUSE_RX)
+ ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+ }
}
- writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
- writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
- writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+/* Some fields of the auto-negotiation register require the port to be down when
+ * their value is updated.
+ */
+#define MVPP2_GMAC_AN_PORT_DOWN_MASK \
+ (MVPP2_GMAC_IN_BAND_AUTONEG | \
+ MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
+ MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
+ MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
+ MVPP2_GMAC_AN_DUPLEX_EN)
+
+ if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
+ (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
+ (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
+ /* Force link down */
+ old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* Set the GMAC in a reset state - do this in a way that
+ * ensures we clear it below.
+ */
+ old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
+ writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+ }
+
+ if (old_ctrl0 != ctrl0)
+ writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
+ if (old_ctrl2 != ctrl2)
+ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+ if (old_ctrl4 != ctrl4)
+ writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
+ if (old_an != an)
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+ }
}
static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
const struct phylink_link_state *state)
{
struct mvpp2_port *port = netdev_priv(dev);
+ bool change_interface = port->phy_interface != state->interface;
/* Check for invalid configuration */
- if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) {
+ if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
netdev_err(dev, "Invalid mode on %s\n", dev->name);
return;
}
@@ -4637,8 +4749,9 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
- if (port->priv->hw_version == MVPP22 &&
- port->phy_interface != state->interface) {
+ if (port->priv->hw_version == MVPP22 && change_interface) {
+ mvpp22_gop_mask_irq(port);
+
port->phy_interface = state->interface;
/* Reconfigure the serdes lanes */
@@ -4647,17 +4760,19 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
}
/* mac (re)configuration */
- if (state->interface == PHY_INTERFACE_MODE_10GKR)
+ if (mvpp2_is_xlg(state->interface))
mvpp2_xlg_config(port, mode, state);
else if (phy_interface_mode_is_rgmii(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
mvpp2_gmac_config(port, mode, state);
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
+ if (port->priv->hw_version == MVPP22 && change_interface)
+ mvpp22_gop_unmask_irq(port);
+
mvpp2_port_enable(port);
}
@@ -4667,13 +4782,18 @@ static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
- if (!phylink_autoneg_inband(mode) &&
- interface != PHY_INTERFACE_MODE_10GKR) {
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
- if (phy_interface_mode_is_rgmii(interface))
+ if (!phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
val |= MVPP2_GMAC_FORCE_LINK_PASS;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
}
mvpp2_port_enable(port);
@@ -4689,25 +4809,24 @@ static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
- if (!phylink_autoneg_inband(mode) &&
- interface != PHY_INTERFACE_MODE_10GKR) {
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- val |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ if (!phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
}
netif_tx_stop_all_queues(dev);
mvpp2_egress_disable(port);
mvpp2_ingress_disable(port);
- /* When using link interrupts to notify phylink of a MAC state change,
- * we do not want the port to be disabled (we want to receive further
- * interrupts, to be notified when the port will have a link later).
- */
- if (!port->has_phy)
- return;
-
mvpp2_port_disable(port);
}
@@ -4749,10 +4868,18 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
ntxqs = MVPP2_MAX_TXQ;
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
- nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
- else
- nrxqs = MVPP2_DEFAULT_RXQ;
+ if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) {
+ nrxqs = 1;
+ } else {
+ /* According to the PPv2.2 datasheet and our experiments on
+ * PPv2.1, RX queues have an allocation granularity of 4 (when
+ * more than a single one on PPv2.2).
+ * Round up to nearest multiple of 4.
+ */
+ nrxqs = (num_possible_cpus() + 3) & ~0x3;
+ if (nrxqs > MVPP2_PORT_MAX_RXQ)
+ nrxqs = MVPP2_PORT_MAX_RXQ;
+ }
dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
if (!dev)
@@ -4883,7 +5010,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_port_periodic_xon_disable(port);
- mvpp2_port_reset(port);
+ mvpp2_mac_reset_assert(port);
+ mvpp22_pcs_reset_assert(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 742f0c1f60df..6d55e3d0b7ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -825,7 +825,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!cgx->cgx_cmd_workq) {
dev_err(dev, "alloc workqueue failed for cgx cmd");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_free_irq_vectors;
}
list_add(&cgx->cgx_list, &cgx_list);
@@ -841,6 +841,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_release_lmac:
cgx_lmac_exit(cgx);
list_del(&cgx->cgx_list);
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index ec50a21c5aaf..e332e82fc066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -64,7 +64,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q,
qmem->entry_sz = entry_sz;
qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
- qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
+ qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
&qmem->iova, GFP_KERNEL);
if (!qmem->base)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 0bd4351b2a49..35f2142aac5e 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -201,6 +201,7 @@ struct tx_desc {
};
struct pxa168_eth_private {
+ struct platform_device *pdev;
int port_num; /* User Ethernet port number */
int phy_addr;
int phy_speed;
@@ -331,7 +332,7 @@ static void rxq_refill(struct net_device *dev)
used_rx_desc = pep->rx_used_desc_q;
p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
size = skb_end_pointer(skb) - skb->data;
- p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+ p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
skb->data,
size,
DMA_FROM_DEVICE);
@@ -557,9 +558,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
* table is full.
*/
if (!pep->htpr) {
- pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma, GFP_KERNEL);
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
if (!pep->htpr)
return -ENOMEM;
} else {
@@ -743,7 +744,7 @@ static int txq_reclaim(struct net_device *dev, int force)
netdev_err(dev, "Error in TX\n");
dev->stats.tx_errors++;
}
- dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+ dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
if (skb)
dev_kfree_skb_irq(skb);
released++;
@@ -805,7 +806,7 @@ static int rxq_process(struct net_device *dev, int budget)
if (rx_next_curr_desc == rx_used_desc)
pep->rx_resource_err = 1;
pep->rx_desc_count--;
- dma_unmap_single(NULL, rx_desc->buf_ptr,
+ dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
rx_desc->buf_size,
DMA_FROM_DEVICE);
received_packets++;
@@ -1044,9 +1045,9 @@ static int rxq_init(struct net_device *dev)
pep->rx_desc_count = 0;
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
- pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma,
- GFP_KERNEL);
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_rx_desc_area)
goto out;
@@ -1103,9 +1104,9 @@ static int txq_init(struct net_device *dev)
pep->tx_desc_count = 0;
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
- pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma,
- GFP_KERNEL);
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_tx_desc_area)
goto out;
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
@@ -1274,7 +1275,8 @@ pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
length = skb->len;
pep->tx_skb[tx_index] = skb;
desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
+ DMA_TO_DEVICE);
skb_tx_timestamp(skb);
@@ -1528,6 +1530,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
if (err)
goto err_free_mdio;
+ pep->pdev = pdev;
SET_NETDEV_DEV(dev, &pdev->dev);
pxa168_init_hw(pep);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 04fd1f135011..654ac534b10e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
memset(p, 0, regs->len);
memcpy_fromio(p, io, B3_RAM_ADDR);
- memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
- regs->len - B3_RI_WTO_R1);
+ if (regs->len > B3_RI_WTO_R1) {
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+ regs->len - B3_RI_WTO_R1);
+ }
}
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f3a5fa84860f..8b3495ee2b6e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -46,6 +46,7 @@
#include <linux/mii.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/dmi.h>
#include <asm/irq.h>
@@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
-static int disable_msi = 0;
+static int disable_msi = -1;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
@@ -4917,6 +4918,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
return buf;
}
+static const struct dmi_system_id msi_blacklist[] = {
+ {
+ .ident = "Dell Inspiron 1545",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
+ },
+ },
+ {
+ .ident = "Gateway P-79",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+ },
+ },
+ {}
+};
+
static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev, *dev1;
@@ -5028,6 +5047,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_pci;
}
+ if (disable_msi == -1)
+ disable_msi = !!dmi_check_system(msi_blacklist);
+
if (!disable_msi && pci_enable_msi(pdev) == 0) {
err = sky2_test_msi(hw);
if (err) {
@@ -5073,7 +5095,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 200;
+ pdev->d3_delay = 300;
return 0;
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index f9149d2a4694..43656f961891 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,6 +1,6 @@
config NET_VENDOR_MEDIATEK
bool "MediaTek ethernet driver"
- depends on ARCH_MEDIATEK
+ depends on ARCH_MEDIATEK || SOC_MT7621
---help---
If you have a Mediatek SoC with ethernet, say Y.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 399f565dd85a..549d36497b8c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -226,7 +226,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
case SPEED_100:
mcr |= MAC_MCR_SPEED_100;
break;
- };
+ }
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
!mac->id && !mac->trgmii)
@@ -258,11 +258,6 @@ static void mtk_phy_link_adjust(struct net_device *dev)
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
- if (dev->phydev->link)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
-
if (!of_phy_is_fixed_link(mac->of_node))
phy_print_status(dev->phydev);
}
@@ -347,17 +342,6 @@ static int mtk_phy_connect(struct net_device *dev)
if (mtk_phy_connect_node(eth, mac, np))
goto err_phy;
- dev->phydev->autoneg = AUTONEG_ENABLE;
- dev->phydev->speed = 0;
- dev->phydev->duplex = 0;
-
- phy_set_max_speed(dev->phydev, SPEED_1000);
- phy_support_asym_pause(dev->phydev);
- linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- dev->phydev->advertising);
- phy_start_aneg(dev->phydev);
-
of_node_put(np);
return 0;
@@ -598,10 +582,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_zalloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &eth->phy_scratch_ring,
- GFP_ATOMIC);
+ eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &eth->phy_scratch_ring,
+ GFP_ATOMIC);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -1213,8 +1197,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
@@ -1310,9 +1294,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_zalloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
@@ -1761,6 +1745,22 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
return IRQ_HANDLED;
}
+static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
+ if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+ mtk_handle_irq_rx(irq, _eth);
+ }
+ if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT)
+ mtk_handle_irq_tx(irq, _eth);
+ }
+
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void mtk_poll_controller(struct net_device *dev)
{
@@ -2501,7 +2501,10 @@ static int mtk_probe(struct platform_device *pdev)
}
for (i = 0; i < 3; i++) {
- eth->irq[i] = platform_get_irq(pdev, i);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
+ eth->irq[i] = eth->irq[0];
+ else
+ eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
return -ENXIO;
@@ -2544,13 +2547,21 @@ static int mtk_probe(struct platform_device *pdev)
goto err_deinit_hw;
}
- err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
- dev_name(eth->dev), eth);
- if (err)
- goto err_free_dev;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ err = devm_request_irq(eth->dev, eth->irq[0],
+ mtk_handle_irq, 0,
+ dev_name(eth->dev), eth);
+ } else {
+ err = devm_request_irq(eth->dev, eth->irq[1],
+ mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
- err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
- dev_name(eth->dev), eth);
+ err = devm_request_irq(eth->dev, eth->irq[2],
+ mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ }
if (err)
goto err_free_dev;
@@ -2623,6 +2634,12 @@ static const struct mtk_soc_data mt2701_data = {
.required_pctl = true,
};
+static const struct mtk_soc_data mt7621_data = {
+ .caps = MTK_SHARED_INT,
+ .required_clks = MT7621_CLKS_BITMAP,
+ .required_pctl = false,
+};
+
static const struct mtk_soc_data mt7622_data = {
.caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
.required_clks = MT7622_CLKS_BITMAP,
@@ -2637,6 +2654,7 @@ static const struct mtk_soc_data mt7623_data = {
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
+ { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{},
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 46819297fc3e..f7501997cea0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -363,6 +363,7 @@
#define ETHSYS_CHIPID4_7 0x4
#define MT7623_ETH 7623
#define MT7622_ETH 7622
+#define MT7621_ETH 7621
/* ethernet subsystem config register */
#define ETHSYS_SYSCFG0 0x14
@@ -488,6 +489,8 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL))
+#define MT7621_CLKS_BITMAP (0)
+
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
@@ -567,6 +570,7 @@ struct mtk_rx_ring {
#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
MTK_GMAC2_SGMII)
#define MTK_HWLRO BIT(12)
+#define MTK_SHARED_INT BIT(13)
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
/* struct mtk_eth_data - This is the structure holding all differences
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index f200b8c420d5..ff8057ed97ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -4,7 +4,6 @@
config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
- depends on MAY_USE_DEVLINK
depends on PCI && NETDEVICES && ETHERNET && INET
select MLX4_CORE
imply PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 9af34e03892c..b330020dc0d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -185,8 +185,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
bitmap->avail = num - reserved_top - reserved_bot;
bitmap->effective_len = bitmap->avail;
spin_lock_init(&bitmap->lock);
- bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
- GFP_KERNEL);
+ bitmap->table = bitmap_zalloc(bitmap->max, GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
@@ -197,7 +196,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
{
- kfree(bitmap->table);
+ bitmap_free(bitmap->table);
}
struct mlx4_zone_allocator {
@@ -584,8 +583,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf =
- dma_zalloc_coherent(&dev->persist->pdev->dev,
- size, &t, GFP_KERNEL);
+ dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
+ GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -624,8 +623,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE, &t, GFP_KERNEL);
+ dma_alloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE, &t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e65bc3c95630..c19e74e6ac94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -3274,7 +3274,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
link_state, slave, port);
return -EINVAL;
- };
+ }
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->link_state = link_state;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index db909b6069b5..65f8a4b6ed0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
if (entries_per_copy < entries) {
for (i = 0; i < entries / entries_per_copy; i++) {
- err = copy_to_user(buf, init_ents, PAGE_SIZE);
+ err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
+ -EFAULT : 0;
if (err)
goto out;
buf += PAGE_SIZE;
}
} else {
- err = copy_to_user(buf, init_ents, entries * cqe_size);
+ err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
+ -EFAULT : 0;
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6b88881b8e35..c1438ae52a11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9a0881cb7f51..6c01314e87b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
}
#endif
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
/* We reach this function only after checking that any of
* the (IPv4 | IPv6) bits are set in cqe->status.
*/
@@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
__wsum hw_checksum = 0;
+ void *hdr;
+
+ /* CQE csum doesn't cover padding octets in short ethernet
+ * frames. And the pad field is appended prior to calculating
+ * and appending the FCS field.
+ *
+ * Detecting these padded frames requires to verify and parse
+ * IP headers, so we simply force all those small frames to skip
+ * checksum complete.
+ */
+ if (short_frame(skb->len))
+ return -EINVAL;
- void *hdr = (u8 *)va + sizeof(struct ethhdr);
-
+ hdr = (u8 *)va + sizeof(struct ethhdr);
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -819,6 +832,11 @@ xdp_drop_no_cnt:
skb_record_rx_queue(skb, cq_ring);
if (likely(dev->features & NETIF_F_RXCSUM)) {
+ /* TODO: For IP non TCP/UDP packets when csum complete is
+ * not an option (not supported or any other reason) we can
+ * actually check cqe IPOK status bit and report
+ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
+ */
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2df92dbd38e1..a5be27772b8e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -100,7 +100,7 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
req_not << 31),
eq->doorbell);
/* We still want ordering, just not swabbing, so add a barrier */
- mb();
+ wmb();
}
static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
@@ -558,6 +558,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
+ /* fall through */
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -820,7 +821,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
- };
+ }
++eq->cons_index;
eqes_found = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7df728f1e5b5..6e501af0e532 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
+ u64 qword_field;
u32 dword_field;
- int err;
+ u16 word_field;
u8 byte_field;
+ int err;
static const u8 a0_dmfs_query_hw_steering[] = {
[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* QPC/EEC/CQC/EQC/RDMARC attributes */
- MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
- MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
- MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
- MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
- MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
- MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
- MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
- MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
- MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
- MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
- MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
- MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
- MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+ MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
+ param->qpc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
+ param->log_num_qps = byte_field & 0x1f;
+ MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+ param->srqc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+ param->log_num_srqs = byte_field & 0x1f;
+ MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
+ param->cqc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
+ param->log_num_cqs = byte_field & 0x1f;
+ MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+ param->altc_base = qword_field;
+ MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+ param->auxc_base = qword_field;
+ MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
+ param->eqc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ param->log_num_eqs = byte_field & 0x1f;
+ MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+ param->num_sys_eqs = word_field & 0xfff;
+ MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+ param->rdmarc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
+ param->log_rd_per_qp = byte_field & 0x7;
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
- MLX4_GET(byte_field, outbox,
- INIT_HCA_FS_A0_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ param->log_mc_entry_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ param->log_mc_table_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
param->dmfs_high_steer_mode =
a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
} else {
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_hash_sz, outbox,
- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ param->log_mc_entry_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ param->log_mc_hash_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ param->log_mc_table_sz = byte_field & 0x1f;
}
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
- MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
- MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
+ param->mw_enabled = byte_field >> 7;
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ param->log_mpt_sz = byte_field & 0x3f;
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
- MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ param->log_uar_sz = byte_field & 0xf;
/* phv_check enable */
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 4b4351141b94..d89a3da89e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
int i;
if (chunk->nsg > 0)
- pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
+ DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
- __free_pages(sg_page(&chunk->mem[i]),
- get_order(chunk->mem[i].length));
+ __free_pages(sg_page(&chunk->sg[i]),
+ get_order(chunk->sg[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->persist->pdev->dev,
- chunk->mem[i].length,
- lowmem_page_address(sg_page(&chunk->mem[i])),
- sg_dma_address(&chunk->mem[i]));
+ chunk->buf[i].size,
+ chunk->buf[i].addr,
+ chunk->buf[i].dma_addr);
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
return 0;
}
-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
- int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
+ int order, gfp_t gfp_mask)
{
- void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
- &sg_dma_address(mem), gfp_mask);
- if (!buf)
+ buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
+ &buf->dma_addr, gfp_mask);
+ if (!buf->addr)
return -ENOMEM;
- if (offset_in_page(buf)) {
- dma_free_coherent(dev, PAGE_SIZE << order,
- buf, sg_dma_address(mem));
+ if (offset_in_page(buf->addr)) {
+ dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
+ buf->dma_addr);
return -ENOMEM;
}
- sg_set_buf(mem, buf, PAGE_SIZE << order);
- sg_dma_len(mem) = PAGE_SIZE << order;
+ buf->size = PAGE_SIZE << order;
return 0;
}
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (npages > 0) {
if (!chunk) {
- chunk = kmalloc_node(sizeof(*chunk),
+ chunk = kzalloc_node(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN),
dev->numa_node);
if (!chunk) {
- chunk = kmalloc(sizeof(*chunk),
+ chunk = kzalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN));
if (!chunk)
goto fail;
}
+ chunk->coherent = coherent;
- sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
- chunk->npages = 0;
- chunk->nsg = 0;
+ if (!coherent)
+ sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
list_add_tail(&chunk->list, &icm->chunk_list);
}
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
- &chunk->mem[chunk->npages],
- cur_order, mask);
+ &chunk->buf[chunk->npages],
+ cur_order, mask);
else
- ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
+ ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
cur_order, mask,
dev->numa_node);
@@ -205,9 +204,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
+ chunk->sg, chunk->npages,
+ DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
@@ -220,9 +219,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
}
if (!coherent && chunk) {
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
+ chunk->npages, DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
@@ -320,7 +318,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
u64 idx;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
- struct page *page = NULL;
+ void *addr = NULL;
if (!table->lowmem)
return NULL;
@@ -336,28 +334,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
+ dma_addr_t dma_addr;
+ size_t len;
+
+ if (table->coherent) {
+ len = chunk->buf[i].size;
+ dma_addr = chunk->buf[i].dma_addr;
+ addr = chunk->buf[i].addr;
+ } else {
+ struct page *page;
+
+ len = sg_dma_len(&chunk->sg[i]);
+ dma_addr = sg_dma_address(&chunk->sg[i]);
+
+ /* XXX: we should never do this for highmem
+ * allocation. This function either needs
+ * to be split, or the kernel virtual address
+ * return needs to be made optional.
+ */
+ page = sg_page(&chunk->sg[i]);
+ addr = lowmem_page_address(page);
+ }
+
if (dma_handle && dma_offset >= 0) {
- if (sg_dma_len(&chunk->mem[i]) > dma_offset)
- *dma_handle = sg_dma_address(&chunk->mem[i]) +
- dma_offset;
- dma_offset -= sg_dma_len(&chunk->mem[i]);
+ if (len > dma_offset)
+ *dma_handle = dma_addr + dma_offset;
+ dma_offset -= len;
}
+
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
- if (chunk->mem[i].length > offset) {
- page = sg_page(&chunk->mem[i]);
+ if (len > offset)
goto out;
- }
- offset -= chunk->mem[i].length;
+ offset -= len;
}
}
+ addr = NULL;
out:
mutex_unlock(&table->mutex);
- return page ? lowmem_page_address(page) + offset : NULL;
+ return addr ? addr + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index c9169a490557..d199874b1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -47,11 +47,21 @@ enum {
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
};
+struct mlx4_icm_buf {
+ void *addr;
+ size_t size;
+ dma_addr_t dma_addr;
+};
+
struct mlx4_icm_chunk {
struct list_head list;
int npages;
int nsg;
- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
+ bool coherent;
+ union {
+ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
+ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
+ };
};
struct mlx4_icm {
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
{
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].dma_addr;
+ else
+ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
}
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
{
- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].size;
+ else
+ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index bdb8dd161923..1f6e16d5ea6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3981,6 +3981,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_params_unregister;
+ devlink_params_publish(devlink);
pci_save_state(pdev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 37a551436e4a..6debffb8336b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,7 +4,6 @@
config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
- depends on MAY_USE_DEVLINK
depends on PCI
imply PTP_1588_CLOCK
imply VXLAN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9de9abacf7f6..1a16f6d73cbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
+ transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
@@ -22,7 +22,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o en/monitor_stats.o
+ en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o
#
# Netdev extra
@@ -30,12 +30,12 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o
#
# Core extra
#
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 456f30007ad6..9008e17126db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
mutex_lock(&priv->alloc_mutex);
original_node = dev_to_node(&dev->pdev->dev);
set_dev_node(&dev->pdev->dev, node);
- cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
- dma_handle, GFP_KERNEL);
+ cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
+ GFP_KERNEL);
set_dev_node(&dev->pdev->dev, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
@@ -186,10 +186,7 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
if (!pgdir)
return NULL;
- pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
- sizeof(unsigned long),
- GFP_KERNEL);
-
+ pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
if (!pgdir->bitmap) {
kfree(pgdir);
return NULL;
@@ -200,7 +197,7 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
- kfree(pgdir->bitmap);
+ bitmap_free(pgdir->bitmap);
kfree(pgdir);
return NULL;
}
@@ -280,7 +277,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
- kfree(db->u.pgdir->bitmap);
+ bitmap_free(db->u.pgdir->bitmap);
kfree(db->u.pgdir);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d3125cdf69db..be48c6440251 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -316,6 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+ case MLX5_CMD_OP_QUERY_HOST_PARAMS:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -627,6 +628,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
+ MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS);
default: return "unknown command opcode";
}
}
@@ -1583,6 +1585,24 @@ no_trig:
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
}
+void mlx5_cmd_flush(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int i;
+
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ while (down_trylock(&cmd->sem))
+ mlx5_cmd_trigger_completions(dev);
+
+ while (down_trylock(&cmd->pages_sem))
+ mlx5_cmd_trigger_completions(dev);
+
+ /* Unlock cmdif */
+ up(&cmd->pages_sem);
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ up(&cmd->sem);
+}
+
static int status_to_err(u8 status)
{
return status ? -1 : 0; /* TBD more meaningful codes */
@@ -1711,12 +1731,57 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
}
EXPORT_SYMBOL(mlx5_cmd_exec);
-int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size, mlx5_cmd_cbk_t callback,
- void *context)
+void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
+ struct mlx5_async_ctx *ctx)
+{
+ ctx->dev = dev;
+ /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
+ atomic_set(&ctx->num_inflight, 1);
+ init_waitqueue_head(&ctx->wait);
+}
+EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
+
+/**
+ * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
+ * @ctx: The ctx to clean
+ *
+ * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
+ * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
+ * the call mlx5_cleanup_async_ctx().
+ */
+void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
+{
+ atomic_dec(&ctx->num_inflight);
+ wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
+}
+EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
+
+static void mlx5_cmd_exec_cb_handler(int status, void *_work)
+{
+ struct mlx5_async_work *work = _work;
+ struct mlx5_async_ctx *ctx = work->ctx;
+
+ work->user_callback(status, work);
+ if (atomic_dec_and_test(&ctx->num_inflight))
+ wake_up(&ctx->wait);
+}
+
+int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+ void *out, int out_size, mlx5_async_cbk_t callback,
+ struct mlx5_async_work *work)
{
- return cmd_exec(dev, in, in_size, out, out_size, callback, context,
- false);
+ int ret;
+
+ work->ctx = ctx;
+ work->user_callback = callback;
+ if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
+ return -EIO;
+ ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+ mlx5_cmd_exec_cb_handler, work, false);
+ if (ret && atomic_dec_and_test(&ctx->num_inflight))
+ wake_up(&ctx->wait);
+
+ return ret;
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
@@ -1789,8 +1854,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
struct device *ddev = &dev->pdev->dev;
- cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
- &cmd->alloc_dma, GFP_KERNEL);
+ cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+ &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
@@ -1804,9 +1869,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma);
- cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
- 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
- &cmd->alloc_dma, GFP_KERNEL);
+ cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
+ 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
+ &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 424457ff9759..8ecac81a385d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -258,6 +258,8 @@ const char *parse_fs_dst(struct trace_seq *p,
return ret;
}
+EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_ft);
+EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_ft);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_set_fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index d027ce00c8ce..a4cf123e3f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -61,6 +61,41 @@ const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id);
+TRACE_EVENT(mlx5_fs_add_ft,
+ TP_PROTO(const struct mlx5_flow_table *ft),
+ TP_ARGS(ft),
+ TP_STRUCT__entry(
+ __field(const struct mlx5_flow_table *, ft)
+ __field(u32, id)
+ __field(u32, level)
+ __field(u32, type)
+ ),
+ TP_fast_assign(
+ __entry->ft = ft;
+ __entry->id = ft->id;
+ __entry->level = ft->level;
+ __entry->type = ft->type;
+ ),
+ TP_printk("ft=%p id=%u level=%u type=%u \n",
+ __entry->ft, __entry->id, __entry->level, __entry->type)
+ );
+
+TRACE_EVENT(mlx5_fs_del_ft,
+ TP_PROTO(const struct mlx5_flow_table *ft),
+ TP_ARGS(ft),
+ TP_STRUCT__entry(
+ __field(const struct mlx5_flow_table *, ft)
+ __field(u32, id)
+ ),
+ TP_fast_assign(
+ __entry->ft = ft;
+ __entry->id = ft->id;
+
+ ),
+ TP_printk("ft=%p id=%u\n",
+ __entry->ft, __entry->id)
+ );
+
TRACE_EVENT(mlx5_fs_add_fg,
TP_PROTO(const struct mlx5_flow_group *fg),
TP_ARGS(fg),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
new file mode 100644
index 000000000000..4746f2d28fb6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "ecpf.h"
+
+bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
+{
+ return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
+}
+
+static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
+
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ MLX5_SET(enable_hca_in, in, function_id, 0);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
+
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ MLX5_SET(disable_hca_in, in, function_id, 0);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_peer_pf_enable_hca(dev);
+ if (err)
+ mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
+ err);
+
+ return err;
+}
+
+static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_peer_pf_disable_hca(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
+ err);
+ return;
+ }
+
+ err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
+ if (err)
+ mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
+ err);
+}
+
+int mlx5_ec_init(struct mlx5_core_dev *dev)
+{
+ int err = 0;
+
+ if (!mlx5_core_is_ecpf(dev))
+ return 0;
+
+ /* ECPF shall enable HCA for peer PF in the same way a PF
+ * does this for its VFs.
+ */
+ err = mlx5_peer_pf_init(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_is_ecpf(dev))
+ return;
+
+ mlx5_peer_pf_cleanup(dev);
+}
+
+static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
+
+ MLX5_SET(query_host_params_in, in, opcode,
+ MLX5_CMD_OP_QUERY_HOST_PARAMS);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
+{
+ u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
+ int err;
+
+ err = mlx5_query_host_params_context(dev, out, sizeof(out));
+ if (err)
+ return err;
+
+ *num_vf = MLX5_GET(query_host_params_out, out,
+ host_params_context.host_num_of_vfs);
+ mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
new file mode 100644
index 000000000000..346372df218f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_ECPF_H__
+#define __MLX5_ECPF_H__
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+enum {
+ MLX5_ECPU_BIT_NUM = 23,
+};
+
+bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
+int mlx5_ec_init(struct mlx5_core_dev *dev);
+void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
+int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline bool
+mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
+static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
+static inline int
+mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
+{ return -EOPNOTSUPP; }
+
+#endif /* CONFIG_MLX5_ESWITCH */
+
+#endif /* __MLX5_ECPF_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8fa8fdd30b85..71c65cc17904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -76,15 +76,14 @@ struct page_pool;
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MLX5E_RX_MAX_HEAD (256)
+
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
-#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
-#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
-#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
- (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
- MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
+#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
+ MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -119,8 +118,6 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5E_RX_MAX_HEAD (256)
-
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
@@ -309,16 +306,18 @@ struct mlx5e_cq {
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_cq_decomp {
/* cqe decompression */
struct mlx5_cqe64 title;
struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
u8 mini_arr_idx;
- u16 decmprs_left;
- u16 decmprs_wqe_counter;
-
- /* control */
- struct mlx5_core_dev *mdev;
- struct mlx5_wq_ctrl wq_ctrl;
+ u16 left;
+ u16 wqe_counter;
} ____cacheline_aligned_in_smp;
struct mlx5e_tx_wqe_info {
@@ -388,10 +387,7 @@ struct mlx5e_txqsq {
struct mlx5e_channel *channel;
int txq_ix;
u32 rate_limit;
- struct mlx5e_txqsq_recover {
- struct work_struct recover_work;
- u64 last_recover;
- } recover;
+ struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
@@ -581,6 +577,7 @@ struct mlx5e_rq {
struct net_device *netdev;
struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq;
+ struct mlx5e_cq_decomp cqd;
struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
@@ -638,6 +635,7 @@ struct mlx5e_channel {
struct hwtstamp_config *tstamp;
int ix;
int cpu;
+ cpumask_var_t xps_cpumask;
};
struct mlx5e_channels {
@@ -657,6 +655,7 @@ struct mlx5e_channel_stats {
enum {
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
+ MLX5E_STATE_XDP_TX_ENABLED,
};
struct mlx5e_rqt {
@@ -682,6 +681,13 @@ struct mlx5e_rss_params {
u8 hfunc;
};
+struct mlx5e_modify_sq_param {
+ int curr_state;
+ int next_state;
+ int rl_update;
+ int rl_index;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
@@ -737,6 +743,7 @@ struct mlx5e_priv {
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_tls *tls;
#endif
+ struct devlink_health_reporter *tx_reporter;
};
struct mlx5e_profile {
@@ -803,6 +810,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_update_stats(struct mlx5e_priv *priv);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
@@ -850,9 +858,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
* switching channels
*/
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
-void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify);
+int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_hw_modify hw_modify);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
@@ -866,6 +874,11 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
+ struct mlx5e_modify_sq_param *p);
+void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
+void mlx5e_tx_disable_queue(struct netdev_queue *txq);
+
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 2ce420851e77..7cd5b02e0f10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -66,7 +66,7 @@ static int mlx5e_monitor_event_handler(struct notifier_block *nb,
return NOTIFY_OK;
}
-void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
+static void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
{
MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
MONITOR_COUNTER);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 4a37713023be..122927f3a600 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -63,66 +63,168 @@ static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
[MLX5E_50GBASE_KR2] = 50000,
};
-u32 mlx5e_port_ptys2speed(u32 eth_proto_oper)
+static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = 100,
+ [MLX5E_1000BASE_X_SGMII] = 1000,
+ [MLX5E_5GBASE_R] = 5000,
+ [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
+ [MLX5E_400GAUI_8] = 400000,
+};
+
+static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
+ const u32 **arr, u32 *size)
+{
+ bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
+ ARRAY_SIZE(mlx5e_link_speed);
+ *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+}
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5e_port_eth_proto *eproto)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ if (!eproto)
+ return -EINVAL;
+
+ if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
+ return -EOPNOTSUPP;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
+ if (err)
+ return err;
+
+ eproto->cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
+ eth_proto_capability);
+ eproto->admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin);
+ eproto->oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
+ return 0;
+}
+
+void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
+ u8 *an_disable_cap, u8 *an_disable_admin)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+
+ *an_status = 0;
+ *an_disable_cap = 0;
+ *an_disable_admin = 0;
+
+ if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1))
+ return;
+
+ *an_status = MLX5_GET(ptys_reg, out, an_status);
+ *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
+ *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+}
+
+int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, bool ext)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u8 an_disable_admin;
+ u8 an_disable_cap;
+ u8 an_status;
+
+ mlx5_port_query_eth_autoneg(dev, &an_status, &an_disable_cap,
+ &an_disable_admin);
+ if (!an_disable_cap && an_disable)
+ return -EPERM;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
+ MLX5_SET(ptys_reg, in, proto_mask, MLX5_PTYS_EN);
+ if (ext)
+ MLX5_SET(ptys_reg, in, ext_eth_proto_admin, proto_admin);
+ else
+ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+}
+
+u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
{
unsigned long temp = eth_proto_oper;
+ const u32 *table;
u32 speed = 0;
+ u32 max_size;
int i;
- i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER);
- if (i < MLX5E_LINK_MODES_NUMBER)
- speed = mlx5e_link_speed[i];
-
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ i = find_first_bit(&temp, max_size);
+ if (i < max_size)
+ speed = table[i];
return speed;
}
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
- u32 eth_proto_oper;
+ struct mlx5e_port_eth_proto eproto;
+ bool ext;
int err;
- err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+ ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
- return err;
+ goto out;
- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- *speed = mlx5e_port_ptys2speed(eth_proto_oper);
+ *speed = mlx5e_port_ptys2speed(mdev, eproto.oper);
if (!(*speed))
err = -EINVAL;
+out:
return err;
}
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
+ struct mlx5e_port_eth_proto eproto;
u32 max_speed = 0;
- u32 proto_cap;
+ const u32 *table;
+ u32 max_size;
+ bool ext;
int err;
int i;
- err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+ ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
return err;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
- if (proto_cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, mlx5e_link_speed[i]);
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ for (i = 0; i < max_size; ++i)
+ if (eproto.cap & MLX5E_PROT_MASK(i))
+ max_speed = max(max_speed, table[i]);
*speed = max_speed;
return 0;
}
-u32 mlx5e_port_speed2linkmodes(u32 speed)
+u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed)
{
u32 link_modes = 0;
+ const u32 *table;
+ u32 max_size;
int i;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (mlx5e_link_speed[i] == speed)
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ for (i = 0; i < max_size; ++i) {
+ if (table[i] == speed)
link_modes |= MLX5E_PROT_MASK(i);
}
-
return link_modes;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index cd2160b8c9bf..70f536ec51c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -36,10 +36,22 @@
#include <linux/mlx5/driver.h>
#include "en.h"
-u32 mlx5e_port_ptys2speed(u32 eth_proto_oper);
+struct mlx5e_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5e_port_eth_proto *eproto);
+void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
+ u8 *an_disable_cap, u8 *an_disable_admin);
+int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, bool ext);
+u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper);
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-u32 mlx5e_port_speed2linkmodes(u32 speed);
+u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
new file mode 100644
index 000000000000..e78e92753d73
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5E_EN_REPORTER_H
+#define __MLX5E_EN_REPORTER_H
+
+#include <linux/mlx5/driver.h>
+#include "en.h"
+
+int mlx5e_tx_reporter_create(struct mlx5e_priv *priv);
+void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv);
+void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq);
+int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
new file mode 100644
index 000000000000..9d38e62cdf24
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <net/devlink.h>
+#include "reporter.h"
+#include "lib/eq.h"
+
+#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256
+
+struct mlx5e_tx_err_ctx {
+ int (*recover)(struct mlx5e_txqsq *sq);
+ struct mlx5e_txqsq *sq;
+};
+
+static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+
+ while (time_before(jiffies, exp_time)) {
+ if (sq->cc == sq->pc)
+ return 0;
+
+ msleep(20);
+ }
+
+ netdev_err(sq->channel->netdev,
+ "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+
+ return -ETIMEDOUT;
+}
+
+static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
+{
+ WARN_ONCE(sq->cc != sq->pc,
+ "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+ sq->cc = 0;
+ sq->dma_fifo_cc = 0;
+ sq->pc = 0;
+}
+
+static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
+{
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ struct mlx5e_modify_sq_param msp = {0};
+ int err;
+
+ msp.curr_state = curr_state;
+ msp.next_state = MLX5_SQC_STATE_RST;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
+ return err;
+ }
+
+ memset(&msp, 0, sizeof(msp));
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ u8 state;
+ int err;
+
+ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ return 0;
+
+ err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
+ sq->sqn, err);
+ return err;
+ }
+
+ if (state != MLX5_SQC_STATE_ERR) {
+ netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
+ return -EINVAL;
+ }
+
+ mlx5e_tx_disable_queue(sq->txq);
+
+ err = mlx5e_wait_for_sq_flush(sq);
+ if (err)
+ return err;
+
+ /* At this point, no new packets will arrive from the stack as TXQ is
+ * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
+ * pending WQEs. SQ can safely reset the SQ.
+ */
+
+ err = mlx5e_sq_to_ready(sq, state);
+ if (err)
+ return err;
+
+ mlx5e_reset_txqsq_cc_pc(sq);
+ sq->stats->recover++;
+ mlx5e_activate_txqsq(sq);
+
+ return 0;
+}
+
+static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
+ char *err_str,
+ struct mlx5e_tx_err_ctx *err_ctx)
+{
+ if (IS_ERR_OR_NULL(tx_reporter)) {
+ netdev_err(err_ctx->sq->channel->netdev, err_str);
+ return err_ctx->recover(err_ctx->sq);
+ }
+
+ return devlink_health_report(tx_reporter, err_str, err_ctx);
+}
+
+void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
+{
+ char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
+ struct mlx5e_tx_err_ctx err_ctx = {0};
+
+ err_ctx.sq = sq;
+ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
+ sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
+
+ mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
+ &err_ctx);
+}
+
+static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
+ u32 eqe_count;
+ int ret;
+
+ netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ ret = eqe_count ? false : true;
+ if (!eqe_count) {
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ return ret;
+ }
+
+ netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
+ eqe_count, eq->core.eqn);
+ sq->channel->stats->eq_rearm++;
+ return ret;
+}
+
+int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
+{
+ char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
+ struct mlx5e_tx_err_ctx err_ctx;
+
+ err_ctx.sq = sq;
+ err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+ sprintf(err_str,
+ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+ sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - sq->txq->trans_start));
+
+ return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
+ &err_ctx);
+}
+
+/* state lock cannot be grabbed within this function.
+ * It can cause a dead lock or a read-after-free.
+ */
+static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
+{
+ return err_ctx->recover(err_ctx->sq);
+}
+
+static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
+{
+ int err;
+
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
+ void *context)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_tx_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
+ mlx5e_tx_reporter_recover_all(priv);
+}
+
+static int
+mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
+ u32 sqn, u8 state, bool stopped)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ int i, err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < priv->channels.num * priv->channels.params.num_tc;
+ i++) {
+ struct mlx5e_txqsq *sq = priv->txq2sq[i];
+ u8 state;
+
+ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ if (err)
+ break;
+
+ err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
+ state,
+ netif_xmit_stopped(sq->txq));
+ if (err)
+ break;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
+ .name = "tx",
+ .recover = mlx5e_tx_reporter_recover,
+ .diagnose = mlx5e_tx_reporter_diagnose,
+};
+
+#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
+
+int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct devlink *devlink = priv_to_devlink(mdev);
+
+ priv->tx_reporter =
+ devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
+ MLX5_REPORTER_TX_GRACEFUL_PERIOD,
+ true, priv);
+ if (IS_ERR(priv->tx_reporter))
+ netdev_warn(priv->netdev,
+ "Failed to create tx reporter, err = %ld\n",
+ PTR_ERR(priv->tx_reporter));
+ return IS_ERR_OR_NULL(priv->tx_reporter);
+}
+
+void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
+{
+ if (IS_ERR_OR_NULL(priv->tx_reporter))
+ return;
+
+ devlink_health_reporter_destroy(priv->tx_reporter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 046948ead152..fa2a3c444cdc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -25,7 +25,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
*/
- if (!switchdev_port_same_parent_id(priv->netdev, dev) ||
+ if (!netdev_port_same_parent_id(priv->netdev, dev) ||
dst_is_lag_dev) {
*route_dev = uplink_dev;
*out_dev = *route_dev;
@@ -54,12 +54,24 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct neighbour *n = NULL;
#if IS_ENABLED(CONFIG_INET)
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct net_device *uplink_dev;
int ret;
+ if (mlx5_lag_is_multipath(mdev)) {
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ fl4->flowi4_oif = uplink_dev->ifindex;
+ }
+
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
ret = PTR_ERR_OR_ZERO(rt);
if (ret)
return ret;
+
+ if (mlx5_lag_is_multipath(mdev) && !rt->rt_gateway)
+ return -ENETUNREACH;
#else
return -EOPNOTSUPP;
#endif
@@ -256,6 +268,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
+ e->route_dev = route_dev;
/* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -294,7 +307,9 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- err = -EAGAIN;
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
goto out;
}
@@ -369,6 +384,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
+ e->route_dev = route_dev;
/* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -406,7 +422,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- err = -EAGAIN;
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
goto out;
}
@@ -496,25 +514,21 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
void *headers_c,
void *headers_v)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->mask);
void *misc_c = MLX5_ADDR_OF(fte_match_param,
spec->match_criteria,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters);
+ struct flow_match_ports enc_ports;
+
+ flow_rule_match_enc_ports(rule, &enc_ports);
/* Full udp dst port must be given */
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
- memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
+ memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
NL_SET_ERR_MSG_MOD(extack,
"VXLAN decap filter must include enc_dst_port condition");
netdev_warn(priv->netdev,
@@ -523,12 +537,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
}
/* udp dst port must be knonwn as a VXLAN port */
- if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
NL_SET_ERR_MSG_MOD(extack,
"Matched UDP port is not registered as a VXLAN port");
netdev_warn(priv->netdev,
"UDP port %d is not registered as a VXLAN port\n",
- be16_to_cpu(key->dst));
+ be16_to_cpu(enc_ports.key->dst));
return -EOPNOTSUPP;
}
@@ -536,26 +550,26 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
+ ntohs(enc_ports.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ ntohs(enc_ports.key->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
+ ntohs(enc_ports.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+ ntohs(enc_ports.key->src));
/* match on VNI */
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
- be32_to_cpu(mask->keyid));
+ be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
- be32_to_cpu(key->keyid));
+ be32_to_cpu(enc_keyid.key->keyid));
}
return 0;
}
@@ -570,6 +584,7 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
NL_SET_ERR_MSG_MOD(f->common.extack,
@@ -587,21 +602,14 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
/* gre key */
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *mask = NULL;
- struct flow_dissector_key_keyid *key = NULL;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
MLX5_SET(fte_match_set_misc, misc_c,
- gre_key.key, be32_to_cpu(mask->keyid));
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
+ gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v,
- gre_key.key, be32_to_cpu(key->keyid));
+ gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
return 0;
@@ -612,16 +620,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
- void *headers_v)
+ void *headers_v, u8 *match_level)
{
int tunnel_type;
int err = 0;
tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ *match_level = MLX5_MATCH_L4;
err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
headers_c, headers_v);
} else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+ *match_level = MLX5_MATCH_L3;
err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
headers_c, headers_v);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 706ce7bf15e7..b63f15de899d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
- void *headers_v);
+ void *headers_v, u8 *match_level);
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 3740177eed09..03b2a9f9c589 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
int sq_num;
int i;
- if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
+ /* this flag is sufficient, no need to test internal sq state */
+ if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
sq = &priv->channels.c[sq_num]->xdpsq;
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return -ENETDOWN;
-
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
struct mlx5e_xdp_info xdpi;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 3a67cb3cd179..ee27a7c8cd87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+}
+
+static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
+{
+ clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+ /* let other device's napi(s) see our new state */
+ synchronize_rcu();
+}
+
+static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
+{
+ return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+}
+
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
if (sq->doorbell_cseg) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 722998d68564..554672edf8c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1126,9 +1126,7 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
priv->channels.params.tx_min_inline_mode)
goto out;
- if (mlx5e_open_channels(priv, &new_channels))
- goto out;
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ mlx5e_safe_switch_channels(priv, &new_channels, NULL);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c9df08133718..0804b478ad19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -63,76 +63,147 @@ struct ptys2ethtool_config {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
};
-static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
+static
+struct ptys2ethtool_config ptys2legacy_ethtool_table[MLX5E_LINK_MODES_NUMBER];
+static
+struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER];
-#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, ...) \
+#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, table, ...) \
({ \
struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \
- unsigned int i; \
- cfg = &ptys2ethtool_table[reg_]; \
+ unsigned int i, bit, idx; \
+ cfg = &ptys2##table##_ethtool_table[reg_]; \
bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
- __set_bit(modes[i], cfg->supported); \
- __set_bit(modes[i], cfg->advertised); \
+ bit = modes[i] % 64; \
+ idx = modes[i] / 64; \
+ __set_bit(bit, &cfg->supported[idx]); \
+ __set_bit(bit, &cfg->advertised[idx]); \
} \
})
void mlx5e_build_ptys2ethtool_map(void)
{
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII,
+ memset(ptys2legacy_ethtool_table, 0, sizeof(ptys2legacy_ethtool_table));
+ memset(ptys2ext_ethtool_table, 0, sizeof(ptys2ext_ethtool_table));
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, legacy,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, legacy,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, legacy,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, legacy,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, legacy,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, legacy,
ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, legacy,
ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, legacy,
ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, legacy,
ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, legacy,
ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, legacy,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, legacy,
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, legacy,
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, legacy,
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, legacy,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, legacy,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
- MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2,
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, legacy,
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_SGMII_100M, ext,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_X_SGMII, ext,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_5GBASE_R, ext,
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_XFI_XAUI_1, ext,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_XLAUI_4_XLPPI_4, ext,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GAUI_1_25GBASE_CR_KR, ext,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
+ ext,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR, ext,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_CAUI_4_100GBASE_CR4_KR4, ext,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_2_100GBASE_CR2_KR2, ext,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_4_200GBASE_CR4_KR4, ext,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
+}
+
+static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
+ struct ptys2ethtool_config **arr,
+ u32 *size)
+{
+ bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+
+ *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+ *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+ ARRAY_SIZE(ptys2legacy_ethtool_table);
}
typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
@@ -298,11 +369,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
goto unlock;
}
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- goto unlock;
-
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
unlock:
mutex_unlock(&priv->state_lock);
@@ -354,32 +421,29 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
- if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto out;
}
- /* Create fresh channels with new parameters */
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- goto out;
-
arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
+
/* Switch to new channels, set new parameters and close old ones */
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
if (arfs_enabled) {
- err = mlx5e_arfs_enable(priv);
- if (err)
+ int err2 = mlx5e_arfs_enable(priv);
+
+ if (err2)
netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
- __func__, err);
+ __func__, err2);
}
out:
@@ -505,12 +569,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
goto out;
}
- /* open fresh channels with new coal parameters */
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- goto out;
-
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
out:
mutex_unlock(&priv->state_lock);
@@ -525,27 +584,35 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return mlx5e_ethtool_set_coalesce(priv, coal);
}
-static void ptys2ethtool_supported_link(unsigned long *supported_modes,
+static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
+ unsigned long *supported_modes,
u32 eth_proto_cap)
{
unsigned long proto_cap = eth_proto_cap;
+ struct ptys2ethtool_config *table;
+ u32 max_size;
int proto;
- for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+ mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+ for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(supported_modes, supported_modes,
- ptys2ethtool_table[proto].supported,
+ table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
+ unsigned long *advertising_modes,
u32 eth_proto_cap)
{
unsigned long proto_cap = eth_proto_cap;
+ struct ptys2ethtool_config *table;
+ u32 max_size;
int proto;
- for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+ mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+ for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(advertising_modes, advertising_modes,
- ptys2ethtool_table[proto].advertised,
+ table[proto].advertised,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
@@ -695,13 +762,14 @@ static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
struct ethtool_link_ksettings *link_ksettings)
{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5e_port_ptys2speed(eth_proto_oper);
+ speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper);
if (!speed) {
speed = SPEED_UNKNOWN;
goto out;
@@ -714,22 +782,22 @@ out:
link_ksettings->base.duplex = duplex;
}
-static void get_supported(u32 eth_proto_cap,
+static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
+ ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
- ptys2ethtool_supported_link(supported, eth_proto_cap);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
- u8 rx_pause,
+static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
+ u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
+ ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
- ptys2ethtool_adver_link(advertising, eth_proto_cap);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
@@ -779,12 +847,12 @@ static u8 get_connector_port(u32 eth_proto, u8 connector_type)
return PORT_OTHER;
}
-static void get_lp_advertising(u32 eth_proto_lp,
+static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
- ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
+ ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
}
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -801,6 +869,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
u8 an_disable_admin;
u8 an_status;
u8 connector_type;
+ bool ext;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@@ -809,22 +878,25 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
__func__, err);
goto err_query_regs;
}
-
- eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
- eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
- an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
- an_status = MLX5_GET(ptys_reg, out, an_status);
- connector_type = MLX5_GET(ptys_reg, out, connector_type);
+ ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
+ eth_proto_capability);
+ eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
+ eth_proto_admin);
+ eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
+ eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+ an_status = MLX5_GET(ptys_reg, out, an_status);
+ connector_type = MLX5_GET(ptys_reg, out, connector_type);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- get_supported(eth_proto_cap, link_ksettings);
- get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+ get_supported(mdev, eth_proto_cap, link_ksettings);
+ get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -833,7 +905,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
connector_type);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
connector_type);
- get_lp_advertising(eth_proto_lp, link_ksettings);
+ get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -844,9 +916,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
- if (get_fec_supported_advertised(mdev, link_ksettings))
+ err = get_fec_supported_advertised(mdev, link_ksettings);
+ if (err) {
netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
+ err = 0; /* don't fail caps query because of FEC error */
+ }
if (!an_disable_admin)
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -869,7 +944,9 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (bitmap_intersects(ptys2ethtool_table[i].advertised,
+ if (*ptys2legacy_ethtool_table[i].advertised == 0)
+ continue;
+ if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised,
link_modes,
__ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
@@ -878,13 +955,34 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
return ptys_modes;
}
+static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
+{
+ u32 i, ptys_modes = 0;
+ unsigned long modes[2];
+
+ for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
+ if (*ptys2ext_ethtool_table[i].advertised == 0)
+ continue;
+ memset(modes, 0, sizeof(modes));
+ bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
+ link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] &&
+ modes[1] == ptys2ext_ethtool_table[i].advertised[1])
+ ptys_modes |= MLX5E_PROT_MASK(i);
+ }
+ return ptys_modes;
+}
+
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 eth_proto_cap, eth_proto_admin;
+ struct mlx5e_port_eth_proto eproto;
bool an_changes = false;
u8 an_disable_admin;
+ bool ext_supported;
+ bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
@@ -892,20 +990,33 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
u32 speed;
int err;
- speed = link_ksettings->base.speed;
+ u32 (*ethtool2ptys_adver_func)(const unsigned long *adver);
- link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
- mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
- mlx5e_port_speed2linkmodes(speed);
+#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
- err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+ ext_requested = (link_ksettings->link_modes.advertising[0] >
+ MLX5E_PTYS_EXT);
+ ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+
+ /*when ptys_extended_ethernet is set legacy link modes are deprecated */
+ if (ext_requested != ext_supported)
+ return -EPROTONOSUPPORT;
+
+ speed = link_ksettings->base.speed;
+ ethtool2ptys_adver_func = ext_requested ?
+ mlx5e_ethtool2ptys_ext_adver_link :
+ mlx5e_ethtool2ptys_adver_link;
+ err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
if (err) {
- netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n",
+ netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
goto out;
}
+ link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
+ ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) :
+ mlx5e_port_speed2linkmodes(mdev, speed);
- link_modes = link_modes & eth_proto_cap;
+ link_modes = link_modes & eproto.cap;
if (!link_modes) {
netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
__func__);
@@ -913,24 +1024,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
- if (err) {
- netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n",
- __func__, err);
- goto out;
- }
-
- mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
- &an_disable_cap, &an_disable_admin);
+ mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
+ &an_disable_admin);
an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
an_changes = ((!an_disable && an_disable_admin) ||
(an_disable && !an_disable_admin));
- if (!an_changes && link_modes == eth_proto_admin)
+ if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
mlx5_toggle_port_link(mdev);
out:
@@ -1518,7 +1622,6 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
struct mlx5e_channels new_channels = {};
bool mode_changed;
u8 cq_period_mode, current_cq_period_mode;
- int err = 0;
cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
@@ -1546,12 +1649,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
return 0;
}
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- return err;
-
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
- return 0;
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -1584,11 +1682,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return 0;
}
- err = mlx5e_open_channels(priv, &new_channels);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
if (err)
return err;
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
MLX5E_GET_PFLAG(&priv->channels.params,
MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
@@ -1621,7 +1718,6 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- int err;
if (enable) {
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
@@ -1643,12 +1739,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return 0;
}
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- return err;
-
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
- return 0;
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
@@ -1691,12 +1782,8 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
return 0;
}
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- return err;
-
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
- return 0;
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ return err;
}
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8cfd2ec7c0a2..b5fdbd3190d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <linux/bpf.h>
+#include <linux/if_bridge.h>
#include <net/page_pool.h>
#include "eswitch.h"
#include "en.h"
@@ -51,6 +52,7 @@
#include "en/xdp.h"
#include "lib/eq.h"
#include "en/monitor_stats.h"
+#include "en/reporter.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -171,8 +173,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
- return MLX5E_MPWQE_STRIDE_SZ(mdev,
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
+ return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
@@ -950,7 +951,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
return 0;
@@ -1160,7 +1161,7 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
return 0;
}
-static void mlx5e_sq_recover(struct work_struct *work);
+static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
@@ -1182,7 +1183,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
- INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
+ INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev))
@@ -1270,15 +1271,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
return err;
}
-struct mlx5e_modify_sq_param {
- int curr_state;
- int next_state;
- bool rl_update;
- int rl_index;
-};
-
-static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
- struct mlx5e_modify_sq_param *p)
+int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
+ struct mlx5e_modify_sq_param *p)
{
void *in;
void *sqc;
@@ -1376,17 +1370,7 @@ err_free_txqsq:
return err;
}
-static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
-{
- WARN_ONCE(sq->cc != sq->pc,
- "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
- sq->sqn, sq->cc, sq->pc);
- sq->cc = 0;
- sq->dma_fifo_cc = 0;
- sq->pc = 0;
-}
-
-static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
@@ -1395,7 +1379,7 @@ static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
netif_tx_start_queue(sq->txq);
}
-static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+void mlx5e_tx_disable_queue(struct netdev_queue *txq)
{
__netif_tx_lock_bh(txq);
netif_tx_stop_queue(txq);
@@ -1411,7 +1395,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
/* prevent netif_tx_wake_queue */
napi_synchronize(&c->napi);
- netif_tx_disable_queue(sq->txq);
+ mlx5e_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
@@ -1431,6 +1415,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_rate_limit rl = {0};
cancel_work_sync(&sq->dim.work);
+ cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
rl.rate = sq->rate_limit;
@@ -1440,105 +1425,12 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
mlx5e_free_txqsq(sq);
}
-static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
-{
- unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
-
- while (time_before(jiffies, exp_time)) {
- if (sq->cc == sq->pc)
- return 0;
-
- msleep(20);
- }
-
- netdev_err(sq->channel->netdev,
- "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
- sq->sqn, sq->cc, sq->pc);
-
- return -ETIMEDOUT;
-}
-
-static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
-{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- struct mlx5e_modify_sq_param msp = {0};
- int err;
-
- msp.curr_state = curr_state;
- msp.next_state = MLX5_SQC_STATE_RST;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
- return err;
- }
-
- memset(&msp, 0, sizeof(msp));
- msp.curr_state = MLX5_SQC_STATE_RST;
- msp.next_state = MLX5_SQC_STATE_RDY;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
- return err;
- }
-
- return 0;
-}
-
-static void mlx5e_sq_recover(struct work_struct *work)
+static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
{
- struct mlx5e_txqsq_recover *recover =
- container_of(work, struct mlx5e_txqsq_recover,
- recover_work);
- struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
- recover);
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- u8 state;
- int err;
-
- err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
- if (err) {
- netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
- sq->sqn, err);
- return;
- }
-
- if (state != MLX5_RQC_STATE_ERR) {
- netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
- return;
- }
-
- netif_tx_disable_queue(sq->txq);
+ struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
+ recover_work);
- if (mlx5e_wait_for_sq_flush(sq))
- return;
-
- /* If the interval between two consecutive recovers per SQ is too
- * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
- * If we reached this state, there is probably a bug that needs to be
- * fixed. let's keep the queue close and let tx timeout cleanup.
- */
- if (jiffies_to_msecs(jiffies - recover->last_recover) <
- MLX5E_SQ_RECOVER_MIN_INTERVAL) {
- netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
- sq->sqn);
- return;
- }
-
- /* At this point, no new packets will arrive from the stack as TXQ is
- * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
- * pending WQEs. SQ can safely reset the SQ.
- */
- if (mlx5e_sq_to_ready(sq, state))
- return;
-
- mlx5e_reset_txqsq_cc_pc(sq);
- sq->stats->recover++;
- recover->last_recover = jiffies;
- mlx5e_activate_txqsq(sq);
+ mlx5e_tx_reporter_err_cqe(sq);
}
static int mlx5e_open_icosq(struct mlx5e_channel *c,
@@ -1950,6 +1842,29 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
+ struct mlx5e_params *params)
+{
+ int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
+ int irq;
+
+ if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
+
+ cpumask_set_cpu(cpu, c->xps_cpumask);
+ }
+
+ return 0;
+}
+
+static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
+{
+ free_cpumask_var(c->xps_cpumask);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -1982,9 +1897,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
-
c->irq_desc = irq_to_desc(irq);
+ err = mlx5e_alloc_xps_cpumask(c, params);
+ if (err)
+ goto err_free_channel;
+
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
@@ -2067,6 +1985,9 @@ err_close_icosq_cq:
err_napi_del:
netif_napi_del(&c->napi);
+ mlx5e_free_xps_cpumask(c);
+
+err_free_channel:
kvfree(c);
return err;
@@ -2079,7 +2000,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_rq(&c->rq);
- netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
+ netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2107,6 +2028,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
+ mlx5e_free_xps_cpumask(c);
kvfree(c);
}
@@ -2380,6 +2302,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
+ if (!IS_ERR_OR_NULL(priv->tx_reporter))
+ devlink_health_reporter_state_update(priv->tx_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+
kvfree(cparam);
return 0;
@@ -2938,6 +2864,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_build_tx2sq_maps(priv);
mlx5e_activate_channels(&priv->channels);
+ mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
@@ -2959,16 +2886,18 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
*/
netif_tx_stop_all_queues(priv->netdev);
netif_tx_disable(priv->netdev);
+ mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}
-void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify)
+static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_hw_modify hw_modify)
{
struct net_device *netdev = priv->netdev;
int new_num_txqs;
int carrier_ok;
+
new_num_txqs = new_chs->num * new_chs->params.num_tc;
carrier_ok = netif_carrier_ok(netdev);
@@ -2994,6 +2923,20 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
+int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_hw_modify hw_modify)
+{
+ int err;
+
+ err = mlx5e_open_channels(priv, new_chs);
+ if (err)
+ return err;
+
+ mlx5e_switch_priv_channels(priv, new_chs, hw_modify);
+ return 0;
+}
+
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
@@ -3207,6 +3150,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
int tc;
+ mlx5e_tx_reporter_destroy(priv);
for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
}
@@ -3409,13 +3353,12 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
goto out;
}
- err = mlx5e_open_channels(priv, &new_channels);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
if (err)
goto out;
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
new_channels.params.num_tc);
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3492,11 +3435,32 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
+{
+ int i;
+
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
+ struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
+ struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
+ int j;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+
+ for (j = 0; j < priv->max_opened_tc; j++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ s->tx_dropped += sq_stats->dropped;
+ }
+ }
+}
+
void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_sw_stats *sstats = &priv->stats.sw;
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
@@ -3511,12 +3475,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
} else {
- mlx5e_grp_sw_update_stats(priv);
- stats->rx_packets = sstats->rx_packets;
- stats->rx_bytes = sstats->rx_bytes;
- stats->tx_packets = sstats->tx_packets;
- stats->tx_bytes = sstats->tx_bytes;
- stats->tx_dropped = sstats->tx_queue_dropped;
+ mlx5e_fold_sw_stats64(priv, stats);
}
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
@@ -3609,11 +3568,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- goto out;
-
- mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3831,11 +3786,10 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
goto out;
}
- err = mlx5e_open_channels(priv, &new_channels);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb);
if (err)
goto out;
- mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb);
netdev->mtu = new_channels.params.sw_mtu;
out:
@@ -4178,31 +4132,13 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
-static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
- struct mlx5e_txqsq *sq)
-{
- struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
- u32 eqe_count;
-
- netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eq->core.eqn, eq->core.cons_index, eq->core.irqn);
-
- eqe_count = mlx5_eq_poll_irq_disabled(eq);
- if (!eqe_count)
- return false;
-
- netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn);
- sq->channel->stats->eq_rearm++;
- return true;
-}
-
static void mlx5e_tx_timeout_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
tx_timeout_work);
- struct net_device *dev = priv->netdev;
- bool reopen_channels = false;
- int i, err;
+ bool report_failed = false;
+ int err;
+ int i;
rtnl_lock();
mutex_lock(&priv->state_lock);
@@ -4211,31 +4147,22 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
goto unlock;
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
+ struct netdev_queue *dev_queue =
+ netdev_get_tx_queue(priv->netdev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
if (!netif_xmit_stopped(dev_queue))
continue;
- netdev_err(dev,
- "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
- i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
- jiffies_to_usecs(jiffies - dev_queue->trans_start));
-
- /* If we recover a lost interrupt, most likely TX timeout will
- * be resolved, skip reopening channels
- */
- if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- reopen_channels = true;
- }
+ if (mlx5e_tx_reporter_timeout(sq))
+ report_failed = true;
}
- if (!reopen_channels)
+ if (!report_failed)
goto unlock;
- mlx5e_close_locked(dev);
- err = mlx5e_open_locked(dev);
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
if (err)
netdev_err(priv->netdev,
"mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
@@ -4383,6 +4310,61 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+#ifdef CONFIG_MLX5_ESWITCH
+static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 mode, setting;
+ int err;
+
+ err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
+ if (err)
+ return err;
+ mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ mode,
+ 0, 0, nlflags, filter_mask, NULL);
+}
+
+static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct nlattr *attr, *br_spec;
+ u16 mode = BRIDGE_MODE_UNDEF;
+ u8 setting;
+ int rem;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
+ mode = nla_get_u16(attr);
+ if (mode > BRIDGE_MODE_VEPA)
+ return -EINVAL;
+
+ break;
+ }
+
+ if (mode == BRIDGE_MODE_UNDEF)
+ return -EINVAL;
+
+ setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
+ return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
+}
+#endif
+
const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@@ -4409,6 +4391,9 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
#ifdef CONFIG_MLX5_ESWITCH
+ .ndo_bridge_setlink = mlx5e_bridge_setlink,
+ .ndo_bridge_getlink = mlx5e_bridge_getlink,
+
/* SRIOV E-Switch NDOs */
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
@@ -4908,6 +4893,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
#endif
+ mlx5e_tx_reporter_create(priv);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 96cc0c6a4014..a1a3e2774989 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -44,6 +44,7 @@
#include "en_tc.h"
#include "en/tc_tun.h"
#include "fs_core.h"
+#include "lib/port_tun.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -58,7 +59,8 @@ struct mlx5e_rep_indr_block_priv {
struct list_head list;
};
-static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
+static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev);
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
@@ -152,7 +154,7 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- if (rep->vport == FDB_UPLINK_VPORT)
+ if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_uplink_rep_update_hw_counters(priv);
else
mlx5e_vf_rep_update_hw_counters(priv);
@@ -161,26 +163,16 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
- struct mlx5e_rq_stats *rq_stats;
- struct mlx5e_sq_stats *sq_stats;
- int i, j;
+ struct rtnl_link_stats64 stats64 = {};
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_channel *c = priv->channels.c[i];
-
- rq_stats = c->rq.stats;
-
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
-
- for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = c->sq[j].stats;
+ mlx5e_fold_sw_stats64(priv, &stats64);
- s->tx_packets += sq_stats->packets;
- s->tx_bytes += sq_stats->bytes;
- }
- }
+ s->rx_packets = stats64.rx_packets;
+ s->rx_bytes = stats64.rx_bytes;
+ s->tx_packets = stats64.tx_packets;
+ s->tx_bytes = stats64.tx_bytes;
+ s->tx_queue_dropped = stats64.tx_dropped;
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
@@ -193,8 +185,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
return;
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_rep_update_sw_counters(priv);
+ mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_hw_counters(priv);
mutex_unlock(&priv->state_lock);
@@ -391,7 +382,8 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -408,20 +400,14 @@ static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
uplink_priv = netdev_priv(uplink_dev);
}
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = ETH_ALEN;
- if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
- ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr);
- } else {
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
+ ppid->id_len = ETH_ALEN;
+ if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
+ ether_addr_copy(ppid->id, uplink_upper->dev_addr);
+ } else {
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
- ether_addr_copy(attr->u.ppid.id, rep->hw_id);
- }
- break;
- default:
- return -EOPNOTSUPP;
+ ether_addr_copy(ppid->id, rep->hw_id);
}
return 0;
@@ -594,6 +580,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
ether_addr_copy(e->h_dest, ha);
ether_addr_copy(eth->h_dest, ha);
+ /* Update the encap source mac, in case that we delete
+ * the flows when encap source mac changed.
+ */
+ ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
mlx5e_tc_encap_flows_add(priv, e);
}
@@ -663,7 +653,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
list_for_each_entry_safe(cb_priv, temp, head, list) {
- mlx5e_rep_indr_unregister_block(cb_priv->netdev);
+ mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
kfree(cb_priv);
}
}
@@ -735,7 +725,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
err = tcf_block_cb_register(f->block,
mlx5e_rep_indr_setup_block_cb,
- netdev, indr_priv, f->extack);
+ indr_priv, indr_priv, f->extack);
if (err) {
list_del(&indr_priv->list);
kfree(indr_priv);
@@ -743,14 +733,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
return err;
case TC_BLOCK_UNBIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (!indr_priv)
+ return -ENOENT;
+
tcf_block_cb_unregister(f->block,
mlx5e_rep_indr_setup_block_cb,
- netdev);
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
- if (indr_priv) {
- list_del(&indr_priv->list);
- kfree(indr_priv);
- }
+ indr_priv);
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
return 0;
default:
@@ -779,7 +770,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
err = __tc_indr_block_cb_register(netdev, rpriv,
mlx5e_rep_indr_setup_tc_cb,
- netdev);
+ rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
@@ -789,10 +780,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
return err;
}
-static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
+static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
{
__tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
- netdev);
+ rpriv);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -811,7 +803,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
mlx5e_rep_indr_register_block(rpriv, netdev);
break;
case NETDEV_UNREGISTER:
- mlx5e_rep_indr_unregister_block(netdev);
+ mlx5e_rep_indr_unregister_block(rpriv, netdev);
break;
}
return NOTIFY_OK;
@@ -1053,14 +1045,23 @@ static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
struct mlx5e_neigh_hash_entry *nhe;
int err;
+ err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
+ if (err)
+ return err;
nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
if (!nhe) {
err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
- if (err)
+ if (err) {
+ mlx5_tun_entropy_refcount_dec(tun_entropy,
+ e->reformat_type);
return err;
+ }
}
list_add(&e->encap_list, &nhe->encap_list);
return 0;
@@ -1069,6 +1070,9 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
struct mlx5e_neigh_hash_entry *nhe;
list_del(&e->encap_list);
@@ -1076,6 +1080,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
if (list_empty(&nhe->encap_list))
mlx5e_rep_neigh_entry_destroy(priv, nhe);
+ mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
}
static int mlx5e_vf_rep_open(struct net_device *dev)
@@ -1092,7 +1097,8 @@ static int mlx5e_vf_rep_open(struct net_device *dev)
if (!mlx5_modify_vport_admin_state(priv->mdev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
+ rep->vport, 1,
+ MLX5_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
@@ -1110,7 +1116,8 @@ static int mlx5e_vf_rep_close(struct net_device *dev)
mutex_lock(&priv->state_lock);
mlx5_modify_vport_admin_state(priv->mdev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
+ rep->vport, 1,
+ MLX5_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
@@ -1122,9 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- int ret;
+ int ret, pf_num;
+
+ ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
+ if (ret)
+ return ret;
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ ret = snprintf(buf, len, "p%d", pf_num);
+ else
+ ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
- ret = snprintf(buf, len, "%d", rep->vport - 1);
if (ret >= len)
return -EOPNOTSUPP;
@@ -1207,7 +1222,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
return false;
rep = rpriv->rep;
- return (rep->vport == FDB_UPLINK_VPORT);
+ return (rep->vport == MLX5_VPORT_UPLINK);
}
static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
@@ -1225,17 +1240,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_sw_stats *sstats = &priv->stats.sw;
-
- mlx5e_rep_update_sw_counters(priv);
-
- stats->rx_packets = sstats->rx_packets;
- stats->rx_bytes = sstats->rx_bytes;
- stats->tx_packets = sstats->tx_packets;
- stats->tx_bytes = sstats->tx_bytes;
-
- stats->tx_dropped = sstats->tx_queue_dropped;
+ mlx5e_fold_sw_stats64(priv, stats);
return 0;
}
@@ -1281,9 +1287,17 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
- .switchdev_port_attr_get = mlx5e_attr_get,
-};
+static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
+
+ if (vlan != 0)
+ return -EOPNOTSUPP;
+
+ /* allow setting 0-vid for compatibility with libvirt */
+ return 0;
+}
static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
.ndo_open = mlx5e_vf_rep_open,
@@ -1295,6 +1309,7 @@ static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_vf_rep_change_mtu,
+ .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
};
static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
@@ -1315,6 +1330,8 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_set_vf_rate = mlx5e_set_vf_rate,
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
+ .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
};
bool mlx5e_eswitch_rep(struct net_device *netdev)
@@ -1343,7 +1360,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->sw_mtu = netdev->mtu;
/* SQ */
- if (rep->vport == FDB_UPLINK_VPORT)
+ if (rep->vport == MLX5_VPORT_UPLINK)
params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
else
params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
@@ -1370,7 +1387,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
- if (rep->vport == FDB_UPLINK_VPORT) {
+ if (rep->vport == MLX5_VPORT_UPLINK) {
SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
@@ -1389,8 +1406,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->watchdog_timeo = 15 * HZ;
- netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
-
netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
@@ -1402,7 +1417,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
- if (rep->vport != FDB_UPLINK_VPORT)
+ if (rep->vport != MLX5_VPORT_UPLINK)
netdev->features |= NETIF_F_VLAN_CHALLENGED;
netdev->features |= netdev->hw_features;
@@ -1555,14 +1570,18 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
- if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
uplink_priv = &rpriv->uplink_priv;
+ INIT_LIST_HEAD(&uplink_priv->unready_flows);
+
/* init shared tc flow table */
err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
if (err)
goto destroy_tises;
+ mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
+
/* init indirect block notifications */
INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
@@ -1591,7 +1610,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
- if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
/* clean indirect TC block notifications */
unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
mlx5e_rep_indr_clean_block_privs(rpriv);
@@ -1615,27 +1634,38 @@ static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
- struct mlx5_eqe *eqe = data;
- if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
- return NOTIFY_DONE;
+ if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
+ struct mlx5_eqe *eqe = data;
- switch (eqe->sub_type) {
- case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
- case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
- queue_work(priv->wq, &priv->update_carrier_work);
- break;
- default:
- return NOTIFY_DONE;
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ queue_work(priv->wq, &priv->update_carrier_work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
}
- return NOTIFY_OK;
+ if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
+
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
}
static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
u16 max_mtu;
netdev->min_mtu = ETH_MIN_MTU;
@@ -1643,6 +1673,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
mlx5e_set_dev_port_mtu(priv);
+ INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
+ mlx5e_tc_reoffload_flows_work);
+
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
@@ -1655,11 +1688,13 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
+ cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
mlx5_lag_remove(mdev);
}
@@ -1710,7 +1745,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rpriv->rep = rep;
nch = mlx5e_get_max_num_channels(dev);
- profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
+ profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
@@ -1723,7 +1758,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rep->rep_if[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
- if (rep->vport == FDB_UPLINK_VPORT) {
+ if (rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_create_mdev_resources(dev);
if (err)
goto err_destroy_netdev;
@@ -1759,7 +1794,7 @@ err_detach_netdev:
mlx5e_detach_netdev(netdev_priv(netdev));
err_destroy_mdev_resources:
- if (rep->vport == FDB_UPLINK_VPORT)
+ if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(dev);
err_destroy_netdev:
@@ -1779,7 +1814,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
unregister_netdev(netdev);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
- if (rep->vport == FDB_UPLINK_VPORT)
+ if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
kfree(ppriv); /* mlx5e_rep_priv */
@@ -1797,25 +1832,18 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- int vport;
+ struct mlx5_eswitch_rep_if rep_if = {};
- for (vport = 0; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep_if rep_if = {};
+ rep_if.load = mlx5e_vport_rep_load;
+ rep_if.unload = mlx5e_vport_rep_unload;
+ rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
- rep_if.load = mlx5e_vport_rep_load;
- rep_if.unload = mlx5e_vport_rep_unload;
- rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
- mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
- }
+ mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH);
}
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- int vport;
- for (vport = total_vfs - 1; vport >= 0; vport--)
- mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
+ mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index edd722824697..83b573b1abac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -37,6 +37,7 @@
#include <linux/rhashtable.h>
#include "eswitch.h"
#include "en.h"
+#include "lib/port_tun.h"
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5e_neigh_update_table {
@@ -71,6 +72,11 @@ struct mlx5_rep_uplink_priv {
*/
struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb;
+
+ struct mlx5_tun_entropy tun_entropy;
+
+ struct list_head unready_flows;
+ struct work_struct reoffload_flows_work;
};
struct mlx5e_rep_priv {
@@ -148,6 +154,7 @@ struct mlx5e_encap_entry {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
+ struct net_device *route_dev;
int tunnel_type;
int tunnel_hlen;
int reformat_type;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1d0bb5ff8c26..be396e5e4e39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,40 +52,45 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
-static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
- void *data)
+static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
+ u32 cqcc, void *data)
{
- u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc);
+ u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
- memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+ memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
}
static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq, u32 cqcc)
+ struct mlx5_cqwq *wq,
+ u32 cqcc)
{
- mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
- cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
- cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ struct mlx5_cqe64 *title = &cqd->title;
+
+ mlx5e_read_cqe_slot(wq, cqcc, title);
+ cqd->left = be32_to_cpu(title->byte_cnt);
+ cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
rq->stats->cqe_compress_blks++;
}
-static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
+ struct mlx5e_cq_decomp *cqd,
+ u32 cqcc)
{
- mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
- cq->mini_arr_idx = 0;
+ mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
+ cqd->mini_arr_idx = 0;
}
-static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
{
- struct mlx5_cqwq *wq = &cq->wq;
-
+ u32 cqcc = wq->cc;
u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
u32 wq_sz = mlx5_cqwq_get_size(wq);
u32 ci_top = min_t(u32, wq_sz, ci + n);
for (; ci < ci_top; ci++, n--) {
- struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe->op_own = op_own;
}
@@ -93,7 +98,7 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
if (unlikely(ci == wq_sz)) {
op_own = !op_own;
for (ci = 0; ci < n; ci++) {
- struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe->op_own = op_own;
}
@@ -101,68 +106,79 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
}
static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq, u32 cqcc)
+ struct mlx5_cqwq *wq,
+ u32 cqcc)
{
- cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
- cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
- cq->title.op_own &= 0xf0;
- cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
- cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
+ struct mlx5_cqe64 *title = &cqd->title;
+
+ title->byte_cnt = mini_cqe->byte_cnt;
+ title->check_sum = mini_cqe->checksum;
+ title->op_own &= 0xf0;
+ title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
+ title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- cq->decmprs_wqe_counter +=
- mpwrq_get_cqe_consumed_strides(&cq->title);
+ cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
else
- cq->decmprs_wqe_counter =
- mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1);
+ cqd->wqe_counter =
+ mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
}
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq, u32 cqcc)
+ struct mlx5_cqwq *wq,
+ u32 cqcc)
{
- mlx5e_decompress_cqe(rq, cq, cqcc);
- cq->title.rss_hash_type = 0;
- cq->title.rss_hash_result = 0;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+
+ mlx5e_decompress_cqe(rq, wq, cqcc);
+ cqd->title.rss_hash_type = 0;
+ cqd->title.rss_hash_result = 0;
}
static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq,
+ struct mlx5_cqwq *wq,
int update_owner_only,
int budget_rem)
{
- u32 cqcc = cq->wq.cc + update_owner_only;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ u32 cqcc = wq->cc + update_owner_only;
u32 cqe_count;
u32 i;
- cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+ cqe_count = min_t(u32, cqd->left, budget_rem);
for (i = update_owner_only; i < cqe_count;
- i++, cq->mini_arr_idx++, cqcc++) {
- if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
- mlx5e_read_mini_arr_slot(cq, cqcc);
+ i++, cqd->mini_arr_idx++, cqcc++) {
+ if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+ mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
- mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
- rq->handle_rx_cqe(rq, &cq->title);
+ mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
+ rq->handle_rx_cqe(rq, &cqd->title);
}
- mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
- cq->wq.cc = cqcc;
- cq->decmprs_left -= cqe_count;
+ mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
+ wq->cc = cqcc;
+ cqd->left -= cqe_count;
rq->stats->cqe_compress_pkts += cqe_count;
return cqe_count;
}
static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
- struct mlx5e_cq *cq,
+ struct mlx5_cqwq *wq,
int budget_rem)
{
- mlx5e_read_title_slot(rq, cq, cq->wq.cc);
- mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
- mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
- rq->handle_rx_cqe(rq, &cq->title);
- cq->mini_arr_idx++;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ u32 cc = wq->cc;
+
+ mlx5e_read_title_slot(rq, wq, cc);
+ mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
+ mlx5e_decompress_cqe(rq, wq, cc);
+ rq->handle_rx_cqe(rq, &cqd->title);
+ cqd->mini_arr_idx++;
- return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+ return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
static inline bool mlx5e_page_is_reserved(struct page *page)
@@ -369,7 +385,7 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
- int offset_from, int offset_to, u32 headlen)
+ int offset_from, u32 headlen)
{
const void *from = page_address(dma_info->page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
@@ -377,24 +393,7 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
DMA_FROM_DEVICE);
- skb_copy_to_linear_data_offset(skb, offset_to, from, len);
-}
-
-static inline void
-mlx5e_copy_skb_header_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_dma_info *dma_info,
- u32 offset, u32 headlen)
-{
- u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
-
- mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg);
-
- if (unlikely(offset + headlen > PAGE_SIZE)) {
- dma_info++;
- mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg,
- headlen - headlen_pg);
- }
+ skb_copy_to_linear_data(skb, from, len);
}
static void
@@ -732,6 +731,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
((struct ipv6hdr *)ip_p)->nexthdr;
}
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -754,6 +755,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
goto csum_unnecessary;
+ /* CQE csum doesn't cover padding octets in short ethernet
+ * frames. And the pad field is appended prior to calculating
+ * and appending the FCS field.
+ *
+ * Detecting these padded frames requires to verify and parse
+ * IP headers, so we simply force all those small frames to be
+ * CHECKSUM_UNNECESSARY even if they are not padded.
+ */
+ if (short_frame(skb->len))
+ goto csum_unnecessary;
+
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
@@ -960,8 +972,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset,
- 0, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1083,8 +1094,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
di++;
}
/* copy header */
- mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di,
- head_offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1190,16 +1200,17 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5_cqwq *cqwq = &cq->wq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
- if (cq->decmprs_left)
- work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
+ if (rq->cqd.left)
+ work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ cqe = mlx5_cqwq_get_cqe(cqwq);
if (!cqe) {
if (unlikely(work_done))
goto out;
@@ -1209,21 +1220,21 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
do {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
work_done +=
- mlx5e_decompress_cqes_start(rq, cq,
+ mlx5e_decompress_cqes_start(rq, cqwq,
budget - work_done);
continue;
}
- mlx5_cqwq_pop(&cq->wq);
+ mlx5_cqwq_pop(cqwq);
rq->handle_rx_cqe(rq, cqe);
- } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
if (rq->xdp_prog)
mlx5e_xdp_rx_poll_complete(rq);
- mlx5_cqwq_update_db_record(&cq->wq);
+ mlx5_cqwq_update_db_record(cqwq);
/* ensure cq space is freed before enabling more cqes */
wmb();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index d3fe48ff9da9..1a78e05cbba8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -127,9 +127,9 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
-void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats temp, *s = &temp;
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
memset(s, 0, sizeof(*s));
@@ -212,8 +212,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_cqes += sq_stats->cqes;
}
}
-
- memcpy(&priv->stats.sw, s, sizeof(*s));
}
static const struct counter_desc q_stats_desc[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index fe91ec06e3c7..4640d4f986f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -277,7 +277,6 @@ struct mlx5e_stats_grp {
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps;
-void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cae6c6d48984..b4967a0ff8c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -38,7 +38,6 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
-#include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -76,6 +75,7 @@ enum {
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
+ MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5),
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -117,6 +117,7 @@ struct mlx5e_tc_flow {
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
+ struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -128,6 +129,7 @@ struct mlx5e_tc_flow_parse_attr {
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
+ int max_mod_hdr_actions;
void *mod_hdr_actions;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
};
@@ -850,12 +852,12 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, int out_index);
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
- struct ip_tunnel_info *tun_info,
- struct net_device *mirred_dev,
- struct net_device **encap_dev,
struct mlx5e_tc_flow *flow,
+ struct net_device *mirred_dev,
+ int out_index,
struct netlink_ext_ack *extack,
- int out_index);
+ struct net_device **encap_dev,
+ bool *encap_valid);
static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
@@ -927,21 +929,42 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
flow->flags &= ~MLX5E_TC_FLOW_SLOW;
}
+static void add_unready_flow(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+
+ esw = flow->priv->mdev->priv.eswitch;
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &rpriv->uplink_priv;
+
+ flow->flags |= MLX5E_TC_FLOW_NOT_READY;
+ list_add_tail(&flow->unready, &uplink_priv->unready_flows);
+}
+
+static void remove_unready_flow(struct mlx5e_tc_flow *flow)
+{
+ list_del(&flow->unready);
+ flow->flags &= ~MLX5E_TC_FLOW_NOT_READY;
+}
+
static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
u16 max_prio = mlx5_eswitch_get_prio_range(esw);
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
- int err = 0, encap_err = 0;
+ bool encap_valid = true;
+ int err = 0;
int out_index;
if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
@@ -967,17 +990,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
- mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index];
+ mirred_ifindex = parse_attr->mirred_ifindex[out_index];
out_dev = __dev_get_by_index(dev_net(priv->netdev),
mirred_ifindex);
- err = mlx5e_attach_encap(priv,
- &parse_attr->tun_info[out_index],
- out_dev, &encap_dev, flow,
- extack, out_index);
- if (err && err != -EAGAIN)
+ err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
+ extack, &encap_dev, &encap_valid);
+ if (err)
goto err_attach_encap;
- if (err == -EAGAIN)
- encap_err = err;
+
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->dests[out_index].rep = rpriv->rep;
@@ -1005,10 +1025,11 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
attr->counter = counter;
}
- /* we get here if (1) there's no error or when
- * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
+ /* we get here if one of the following takes place:
+ * (1) there's no error
+ * (2) there's an encap action and we don't have valid neigh
*/
- if (encap_err == -EAGAIN) {
+ if (!encap_valid) {
/* continue with goto slow path rule instead */
struct mlx5_esw_flow_attr slow_attr;
@@ -1048,6 +1069,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr;
int out_index;
+ if (flow->flags & MLX5E_TC_FLOW_NOT_READY) {
+ remove_unready_flow(flow);
+ kvfree(attr->parse_attr);
+ return;
+ }
+
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
if (flow->flags & MLX5E_TC_FLOW_SLOW)
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
@@ -1302,101 +1329,89 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
- struct net_device *filter_dev)
+ struct net_device *filter_dev, u8 *match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
-
- struct flow_dissector_key_control *enc_control =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- f->key);
- int err = 0;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_control enc_control;
+ int err;
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
- headers_c, headers_v);
+ headers_c, headers_v, match_level);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"failed to parse tunnel attributes");
return err;
}
- if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- f->mask);
+ flow_rule_match_enc_control(rule, &enc_control);
+
+ if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(mask->src));
+ ntohl(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(key->src));
+ ntohl(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(mask->dst));
+ ntohl(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(key->dst));
+ ntohl(match.key->dst));
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
- } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
- f->mask);
+ } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_dissector_key_ip *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IP,
- f->key);
- struct flow_dissector_key_ip *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IP,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+ flow_rule_match_enc_ip(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
+ match.mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
+ match.key->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
+ match.mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
+ match.key->ttl);
- if (mask->ttl &&
+ if (match.mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB
(priv->mdev,
ft_field_support.outer_ipv4_ttl)) {
@@ -1426,7 +1441,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
struct net_device *filter_dev,
- u8 *match_level)
+ u8 *match_level, u8 *tunnel_match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1437,12 +1452,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
u8 ip_proto = 0;
*match_level = MLX5_MATCH_NONE;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -1461,23 +1478,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if ((dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- f->key);
- switch (key->addr_type) {
+ if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_enc_control(rule, &match);
+ switch (match.key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- if (parse_tunnel_attr(priv, spec, f, filter_dev))
+ if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
return -EOPNOTSUPP;
break;
default:
@@ -1493,35 +1508,27 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
inner_headers);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(mask->n_proto));
+ ntohs(match.mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(key->n_proto));
+ ntohs(match.key->n_proto));
- if (mask->n_proto)
+ if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
- if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
- if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id ||
+ match.mask->vlan_priority ||
+ match.mask->vlan_tpid) {
+ if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
@@ -1533,11 +1540,15 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
cvlan_tag, 1);
}
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
+ match.mask->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
+ match.key->vlan_id);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
+ match.mask->vlan_priority);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
+ match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
}
@@ -1547,17 +1558,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CVLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CVLAN,
- f->mask);
- if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
- if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id ||
+ match.mask->vlan_priority ||
+ match.mask->vlan_tpid) {
+ if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
@@ -1570,69 +1578,58 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
- mask->vlan_id);
+ match.mask->vlan_id);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
- key->vlan_id);
+ match.key->vlan_id);
MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
- mask->vlan_priority);
+ match.mask->vlan_priority);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
- key->vlan_priority);
+ match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ flow_rule_match_eth_addrs(rule, &match);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
- mask->dst);
+ match.mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
- key->dst);
+ match.key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
- mask->src);
+ match.mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
- key->src);
+ match.key->src);
- if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+ if (!is_zero_ether_addr(match.mask->src) ||
+ !is_zero_ether_addr(match.mask->dst))
*match_level = MLX5_MATCH_L2;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- struct flow_dissector_key_control *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->mask);
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
- if (mask->flags & FLOW_DIS_FIRST_FRAG)
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
return -EOPNOTSUPP;
- if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
- key->flags & FLOW_DIS_IS_FRAGMENT);
+ match.key->flags & FLOW_DIS_IS_FRAGMENT);
/* the HW doesn't need L3 inline to match on frag=no */
- if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
+ if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
*match_level = MLX5_MATCH_L2;
/* *** L2 attributes parsing up to here *** */
else
@@ -1640,102 +1637,85 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- ip_proto = key->ip_proto;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ ip_proto = match.key->ip_proto;
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
- mask->ip_proto);
+ match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
- key->ip_proto);
+ match.key->ip_proto);
- if (mask->ip_proto)
+ if (match.mask->ip_proto)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
+ struct flow_match_ipv4_addrs match;
+ flow_rule_match_ipv4_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &mask->src, sizeof(mask->src));
+ &match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &key->src, sizeof(key->src));
+ &match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &mask->dst, sizeof(mask->dst));
+ &match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &key->dst, sizeof(key->dst));
+ &match.key->dst, sizeof(match.key->dst));
- if (mask->src || mask->dst)
+ if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+ flow_rule_match_ipv6_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &mask->src, sizeof(mask->src));
+ &match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &key->src, sizeof(key->src));
+ &match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &mask->dst, sizeof(mask->dst));
+ &match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &key->dst, sizeof(key->dst));
+ &match.key->dst, sizeof(match.key->dst));
- if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
- ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
+ if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
+ ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
*match_level = MLX5_MATCH_L3;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->key);
- struct flow_dissector_key_ip *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+ flow_rule_match_ip(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
+ match.mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
+ match.key->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
+ match.mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
+ match.key->ttl);
- if (mask->ttl &&
+ if (match.mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
ft_field_support.outer_ipv4_ttl)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1743,44 +1723,39 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (mask->tos || mask->ttl)
+ if (match.mask->tos || match.mask->ttl)
*match_level = MLX5_MATCH_L3;
}
/* *** L3 attributes parsing up to here *** */
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
switch (ip_proto) {
case IPPROTO_TCP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- tcp_sport, ntohs(mask->src));
+ tcp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- tcp_sport, ntohs(key->src));
+ tcp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- tcp_dport, ntohs(mask->dst));
+ tcp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- tcp_dport, ntohs(key->dst));
+ tcp_dport, ntohs(match.key->dst));
break;
case IPPROTO_UDP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_sport, ntohs(mask->src));
+ udp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_sport, ntohs(key->src));
+ udp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_dport, ntohs(mask->dst));
+ udp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_dport, ntohs(key->dst));
+ udp_dport, ntohs(match.key->dst));
break;
default:
NL_SET_ERR_MSG_MOD(extack,
@@ -1790,26 +1765,20 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EINVAL;
}
- if (mask->src || mask->dst)
+ if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L4;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
- struct flow_dissector_key_tcp *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->key);
- struct flow_dissector_key_tcp *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+ flow_rule_match_tcp(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
- ntohs(mask->flags));
+ ntohs(match.mask->flags));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
- ntohs(key->flags));
+ ntohs(match.key->flags));
- if (mask->flags)
+ if (match.mask->flags)
*match_level = MLX5_MATCH_L4;
}
@@ -1826,15 +1795,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
struct mlx5_eswitch_rep *rep;
- u8 match_level;
int err;
- err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
+ err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
- if (rep->vport != FDB_UPLINK_VPORT &&
+ if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1846,10 +1815,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
flow->esw_attr->match_level = match_level;
- else
+ flow->esw_attr->tunnel_match_level = tunnel_match_level;
+ } else {
flow->nic_attr->match_level = match_level;
+ }
return err;
}
@@ -1862,27 +1833,29 @@ struct pedit_headers {
struct udphdr udp;
};
+struct pedit_headers_action {
+ struct pedit_headers vals;
+ struct pedit_headers masks;
+ u32 pedits;
+};
+
static int pedit_header_offsets[] = {
- [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+ [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
};
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers *masks,
- struct pedit_headers *vals)
+ struct pedit_headers_action *hdrs)
{
u32 *curr_pmask, *curr_pval;
- if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
- goto out_err;
-
- curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
- curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
+ curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
if (*curr_pmask & mask) /* disallow acting twice on the same location */
goto out_err;
@@ -1934,12 +1907,11 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
};
-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
- * max from the SW pedit action. On success, it says how many HW actions were
- * actually parsed.
+/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
+ * says how many HW actions were actually parsed.
*/
-static int offload_pedit_fields(struct pedit_headers *masks,
- struct pedit_headers *vals,
+static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
@@ -1954,15 +1926,17 @@ static int offload_pedit_fields(struct pedit_headers *masks,
__be16 mask_be16;
void *action;
- set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
- add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
- set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
- add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
+ set_masks = &hdrs[0].masks;
+ add_masks = &hdrs[1].masks;
+ set_vals = &hdrs[0].vals;
+ add_vals = &hdrs[1].vals;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- action = parse_attr->mod_hdr_actions;
- max_actions = parse_attr->num_mod_hdr_actions;
- nactions = 0;
+ action = parse_attr->mod_hdr_actions +
+ parse_attr->num_mod_hdr_actions * action_size;
+
+ max_actions = parse_attr->max_mod_hdr_actions;
+ nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
f = &fields[i];
@@ -2053,12 +2027,14 @@ static int offload_pedit_fields(struct pedit_headers *masks,
}
static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
- const struct tc_action *a, int namespace,
+ struct pedit_headers_action *hdrs,
+ int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
int nkeys, action_size, max_actions;
- nkeys = tcf_pedit_nkeys(a);
+ nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
+ hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
@@ -2073,62 +2049,67 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
if (!parse_attr->mod_hdr_actions)
return -ENOMEM;
- parse_attr->num_mod_hdr_actions = max_actions;
+ parse_attr->max_mod_hdr_actions = max_actions;
return 0;
}
static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
- const struct tc_action *a, int namespace,
+ const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
- struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
- int nkeys, i, err = -EOPNOTSUPP;
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ int err = -EOPNOTSUPP;
u32 mask, val, offset;
- u8 cmd, htype;
+ u8 htype;
- nkeys = tcf_pedit_nkeys(a);
+ htype = act->mangle.htype;
+ err = -EOPNOTSUPP; /* can't be all optimistic */
- memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
- memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+ if (htype == FLOW_ACT_MANGLE_UNSPEC) {
+ NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
+ goto out_err;
+ }
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- cmd = tcf_pedit_cmd(a, i);
- err = -EOPNOTSUPP; /* can't be all optimistic */
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
- if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
- NL_SET_ERR_MSG_MOD(extack,
- "legacy pedit isn't offloaded");
- goto out_err;
- }
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
+ if (err)
+ goto out_err;
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
- NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
- goto out_err;
- }
+ hdrs[cmd].pedits++;
+
+ return 0;
+out_err:
+ return err;
+}
- mask = tcf_pedit_mask(a, i);
- val = tcf_pedit_val(a, i);
- offset = tcf_pedit_offset(a, i);
+static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ struct pedit_headers *cmd_masks;
+ int err;
+ u8 cmd;
- err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
+ if (!parse_attr->mod_hdr_actions) {
+ err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
if (err)
goto out_err;
}
- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
- if (err)
- goto out_err;
-
- err = offload_pedit_fields(masks, vals, parse_attr, extack);
+ err = offload_pedit_fields(hdrs, parse_attr, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
- cmd_masks = &masks[cmd];
+ cmd_masks = &hdrs[cmd].masks;
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
NL_SET_ERR_MSG_MOD(extack,
"attempt to offload an unsupported field");
@@ -2178,17 +2159,22 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
- struct tcf_exts *exts,
+ struct flow_action *flow_action,
+ u32 actions,
struct netlink_ext_ack *extack)
{
- const struct tc_action *a;
+ const struct flow_action_entry *act;
bool modify_ip_header;
u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
- int nkeys, i;
+ int i;
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
+ else
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
@@ -2196,20 +2182,16 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
goto out_ok;
modify_ip_header = false;
- tcf_exts_for_each_action(i, a, exts) {
- int k;
-
- if (!is_tcf_pedit(a))
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id != FLOW_ACTION_MANGLE &&
+ act->id != FLOW_ACTION_ADD)
continue;
- nkeys = tcf_pedit_nkeys(a);
- for (k = 0; k < nkeys; k++) {
- htype = tcf_pedit_htype(a, k);
- if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
- htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
- modify_ip_header = true;
- break;
- }
+ htype = act->mangle.htype;
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
+ htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ modify_ip_header = true;
+ break;
}
}
@@ -2227,7 +2209,7 @@ out_ok:
}
static bool actions_match_supported(struct mlx5e_priv *priv,
- struct tcf_exts *exts,
+ struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -2244,7 +2226,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(&parse_attr->spec, exts,
+ return modify_header_match_supported(&parse_attr->spec,
+ flow_action, actions,
extack);
return true;
@@ -2264,52 +2247,50 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
-static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+static int parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- const struct tc_action *a;
+ struct pedit_headers_action hdrs[2] = {};
+ const struct flow_action_entry *act;
u32 action = 0;
int err, i;
- if (!tcf_exts_has_actions(exts))
+ if (!flow_action_has_entries(flow_action))
return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- tcf_exts_for_each_action(i, a, exts) {
- if (is_tcf_gact_shot(a)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- continue;
- }
-
- if (is_tcf_pedit(a)) {
- err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, extack);
+ break;
+ case FLOW_ACTION_MANGLE:
+ case FLOW_ACTION_ADD:
+ err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr, hdrs, extack);
if (err)
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- continue;
- }
-
- if (is_tcf_csum(a)) {
+ break;
+ case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a),
+ act->csum_flags,
extack))
- continue;
+ break;
return -EOPNOTSUPP;
- }
-
- if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *peer_dev = tcf_mirred_dev(a);
+ case FLOW_ACTION_REDIRECT: {
+ struct net_device *peer_dev = act->dev;
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
@@ -2324,11 +2305,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
peer_dev->name);
return -EINVAL;
}
- continue;
- }
-
- if (is_tcf_skbedit_mark(a)) {
- u32 mark = tcf_skbedit_mark(a);
+ }
+ break;
+ case FLOW_ACTION_MARK: {
+ u32 mark = act->mark;
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
@@ -2338,14 +2318,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->flow_tag = mark;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- continue;
+ }
+ break;
+ default:
+ return -EINVAL;
}
+ }
- return -EINVAL;
+ if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
+ hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr, hdrs, extack);
+ if (err)
+ return err;
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -2371,31 +2360,37 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
peer_priv = netdev_priv(peer_netdev);
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
- (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
- same_hw_devs(priv, peer_priv) &&
- MLX5_VPORT_MANAGER(peer_priv->mdev) &&
- (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
+ mlx5e_eswitch_rep(priv->netdev) &&
+ mlx5e_eswitch_rep(peer_netdev) &&
+ same_hw_devs(priv, peer_priv));
}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
- struct ip_tunnel_info *tun_info,
- struct net_device *mirred_dev,
- struct net_device **encap_dev,
struct mlx5e_tc_flow *flow,
+ struct net_device *mirred_dev,
+ int out_index,
struct netlink_ext_ack *extack,
- int out_index)
+ struct net_device **encap_dev,
+ bool *encap_valid)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct ip_tunnel_key *key = &tun_info->key;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct ip_tunnel_info *tun_info;
+ struct ip_tunnel_key *key;
struct mlx5e_encap_entry *e;
+ unsigned short family;
uintptr_t hash_key;
bool found = false;
int err = 0;
+ parse_attr = attr->parse_attr;
+ tun_info = &parse_attr->tun_info[out_index];
+ family = ip_tunnel_info_af(tun_info);
+ key = &tun_info->key;
+
hash_key = hash_encap_info(key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
@@ -2426,7 +2421,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
else if (family == AF_INET6)
err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
- if (err && err != -EAGAIN)
+ if (err)
goto out_err;
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
@@ -2438,8 +2433,9 @@ attach_flow:
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
attr->dests[out_index].encap_id = e->encap_id;
attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ *encap_valid = true;
} else {
- err = -EAGAIN;
+ *encap_valid = false;
}
return err;
@@ -2450,7 +2446,7 @@ out_err:
}
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
- const struct tc_action *a,
+ const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action)
{
@@ -2459,7 +2455,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
return -EOPNOTSUPP;
- if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH))
@@ -2469,10 +2466,11 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
} else {
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
}
- } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
- attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
- attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ attr->vlan_vid[vlan_idx] = act->vlan.vid;
+ attr->vlan_prio[vlan_idx] = act->vlan.prio;
+ attr->vlan_proto[vlan_idx] = act->vlan.proto;
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
@@ -2484,13 +2482,15 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
- tcf_vlan_push_prio(a)))
+ (act->vlan.proto != htons(ETH_P_8021Q) ||
+ act->vlan.prio))
return -EOPNOTSUPP;
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
- } else { /* action is TCA_VLAN_ACT_MODIFY */
+ break;
+ default:
+ /* action is FLOW_ACT_VLAN_MANGLE */
return -EOPNOTSUPP;
}
@@ -2499,58 +2499,56 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct ip_tunnel_info *info = NULL;
- const struct tc_action *a;
+ const struct ip_tunnel_info *info = NULL;
+ const struct flow_action_entry *act;
bool encap = false;
u32 action = 0;
int err, i;
- if (!tcf_exts_has_actions(exts))
+ if (!flow_action_has_entries(flow_action))
return -EINVAL;
attr->in_rep = rpriv->rep;
attr->in_mdev = priv->mdev;
- tcf_exts_for_each_action(i, a, exts) {
- if (is_tcf_gact_shot(a)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- continue;
- }
-
- if (is_tcf_pedit(a)) {
- err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, extack);
+ break;
+ case FLOW_ACTION_MANGLE:
+ case FLOW_ACTION_ADD:
+ err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr, hdrs, extack);
if (err)
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->split_count = attr->out_count;
- continue;
- }
-
- if (is_tcf_csum(a)) {
+ break;
+ case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a),
- extack))
- continue;
+ act->csum_flags, extack))
+ break;
return -EOPNOTSUPP;
- }
-
- if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_MIRRED: {
struct mlx5e_priv *out_priv;
struct net_device *out_dev;
- out_dev = tcf_mirred_dev(a);
+ out_dev = act->dev;
if (!out_dev) {
/* out_dev is NULL when filters with
* non-existing mirred device are replayed to
@@ -2569,8 +2567,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (switchdev_port_same_parent_id(priv->netdev,
- out_dev) ||
+ if (netdev_port_same_parent_id(priv->netdev,
+ out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
@@ -2615,35 +2613,29 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
priv->netdev->name, out_dev->name);
return -EINVAL;
}
- continue;
- }
-
- if (is_tcf_tunnel_set(a)) {
- info = tcf_tunnel_info(a);
+ }
+ break;
+ case FLOW_ACTION_TUNNEL_ENCAP:
+ info = act->tunnel;
if (info)
encap = true;
else
return -EOPNOTSUPP;
- continue;
- }
-
- if (is_tcf_vlan(a)) {
- err = parse_tc_vlan_action(priv, a, attr, &action);
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ err = parse_tc_vlan_action(priv, act, attr, &action);
if (err)
return err;
attr->split_count = attr->out_count;
- continue;
- }
-
- if (is_tcf_tunnel_release(a)) {
+ break;
+ case FLOW_ACTION_TUNNEL_DECAP:
action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
- continue;
- }
-
- if (is_tcf_gact_goto_chain(a)) {
- u32 dest_chain = tcf_gact_goto_chain_index(a);
+ break;
+ case FLOW_ACTION_GOTO: {
+ u32 dest_chain = act->chain_index;
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
if (dest_chain <= attr->chain) {
@@ -2656,15 +2648,23 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = dest_chain;
-
- continue;
+ break;
+ }
+ default:
+ return -EINVAL;
}
+ }
- return -EINVAL;
+ if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
+ hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr, hdrs, extack);
+ if (err)
+ return err;
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->dest_chain) {
@@ -2724,15 +2724,22 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT &&
+ bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
flow->flags & MLX5E_TC_FLOW_INGRESS;
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS);
- return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) &&
- (is_rep_ingress || act_is_encap);
+ if (!esw_paired)
+ return false;
+
+ if ((mlx5_lag_is_sriov(attr->in_mdev) ||
+ mlx5_lag_is_multipath(attr->in_mdev)) &&
+ (is_rep_ingress || act_is_encap))
+ return true;
+
+ return false;
}
static int
@@ -2767,17 +2774,40 @@ err_free:
return err;
}
-static int
+static void
+mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
+ struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct tc_cls_flower_offload *f,
+ struct mlx5_eswitch_rep *in_rep,
+ struct mlx5_core_dev *in_mdev)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ esw_attr->parse_attr = parse_attr;
+ esw_attr->chain = f->common.chain_index;
+ esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
+
+ esw_attr->in_rep = in_rep;
+ esw_attr->in_mdev = in_mdev;
+
+ if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
+ MLX5_COUNTER_SOURCE_ESWITCH)
+ esw_attr->counter_dev = in_mdev;
+ else
+ esw_attr->counter_dev = priv->mdev;
+}
+
+static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u16 flow_flags,
struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
- struct mlx5_core_dev *in_mdev,
- struct mlx5e_tc_flow **__flow)
+ struct mlx5_core_dev *in_mdev)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
int attr_size, err;
@@ -2788,45 +2818,41 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
&parse_attr, &flow);
if (err)
goto out;
+
parse_attr->filter_dev = filter_dev;
- flow->esw_attr->parse_attr = parse_attr;
+ mlx5e_flow_esw_attr_init(flow->esw_attr,
+ priv, parse_attr,
+ f, in_rep, in_mdev);
+
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;
- flow->esw_attr->chain = f->common.chain_index;
- flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
- err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
+ err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
if (err)
goto err_free;
- flow->esw_attr->in_rep = in_rep;
- flow->esw_attr->in_mdev = in_mdev;
-
- if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
- MLX5_COUNTER_SOURCE_ESWITCH)
- flow->esw_attr->counter_dev = in_mdev;
- else
- flow->esw_attr->counter_dev = priv->mdev;
-
- err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
- if (err)
- goto err_free;
+ err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
+ if (err) {
+ if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
+ goto err_free;
- *__flow = flow;
+ add_unready_flow(flow);
+ }
- return 0;
+ return flow;
err_free:
kfree(flow);
kvfree(parse_attr);
out:
- return err;
+ return ERR_PTR(err);
}
static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ u16 flow_flags)
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
@@ -2849,17 +2875,19 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
* original flow and packets redirected from uplink use the
* peer mdev.
*/
- if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
+ if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
parse_attr = flow->esw_attr->parse_attr;
- err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
- parse_attr->filter_dev,
- flow->esw_attr->in_rep, in_mdev, &peer_flow);
- if (err)
+ peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
+ parse_attr->filter_dev,
+ flow->esw_attr->in_rep, in_mdev);
+ if (IS_ERR(peer_flow)) {
+ err = PTR_ERR(peer_flow);
goto out;
+ }
flow->peer_flow = peer_flow;
flow->flags |= MLX5E_TC_FLOW_DUP;
@@ -2885,13 +2913,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err;
- err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
- in_mdev, &flow);
- if (err)
- goto out;
+ flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
+ in_mdev);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
if (is_peer_flow_needed(flow)) {
- err = mlx5e_tc_add_fdb_peer_flow(f, flow);
+ err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
if (err) {
mlx5e_tc_del_fdb_flow(priv, flow);
goto out;
@@ -2913,6 +2941,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -2935,7 +2964,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
+ err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -3055,23 +3084,25 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
- u64 bytes;
- u64 packets;
- u64 lastuse;
+ u64 lastuse = 0;
+ u64 packets = 0;
+ u64 bytes = 0;
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
if (!flow || !same_flow_direction(flow, flags))
return -EINVAL;
- if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
- return 0;
-
- counter = mlx5e_tc_get_counter(flow);
- if (!counter)
- return 0;
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ counter = mlx5e_tc_get_counter(flow);
+ if (!counter)
+ return 0;
- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ }
+ /* Under multipath it's possible for one rule to be currently
+ * un-offloaded while the other rule is offloaded.
+ */
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (!peer_esw)
goto out;
@@ -3083,6 +3114,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 lastuse2;
counter = mlx5e_tc_get_counter(flow->peer_flow);
+ if (!counter)
+ goto no_peer_counter;
mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
bytes += bytes2;
@@ -3090,10 +3123,10 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
lastuse = max_t(u64, lastuse, lastuse2);
}
+no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
-
out:
- tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse);
return 0;
}
@@ -3213,3 +3246,18 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
__mlx5e_tc_del_fdb_peer_flow(flow);
}
+
+void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
+{
+ struct mlx5_rep_uplink_priv *rpriv =
+ container_of(work, struct mlx5_rep_uplink_priv,
+ reoffload_flows_work);
+ struct mlx5e_tc_flow *flow, *tmp;
+
+ rtnl_lock();
+ list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
+ if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
+ remove_unready_flow(flow);
+ }
+ rtnl_unlock();
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index d2d87f978c06..f62e81902d27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -72,6 +72,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags);
+void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 598ad7e4d5c9..25a8f8260c14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -148,12 +148,8 @@ static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
{
- struct flow_keys keys;
-
if (skb_transport_header_was_set(skb))
return skb_transport_offset(skb);
- else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
- return keys.control.thoff;
else
return mlx5e_skb_l2_header_offset(skb);
}
@@ -172,15 +168,8 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
hlen += VLAN_HLEN;
break;
case MLX5_INLINE_MODE_IP:
- /* When transport header is set to zero, it means no transport
- * header. When transport header is set to 0xff's, it means
- * transport header wasn't set.
- */
- if (skb_transport_offset(skb)) {
- hlen = mlx5e_skb_l3_header_offset(skb);
- break;
- }
- /* fall through */
+ hlen = mlx5e_skb_l3_header_offset(skb);
+ break;
case MLX5_INLINE_MODE_L2:
default:
hlen = mlx5e_skb_l2_header_offset(skb);
@@ -387,8 +376,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+#ifdef CONFIG_MLX5_EN_IPSEC
+ struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
+#endif
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ wqe->eth = cur_eth;
+#endif
}
/* fill wqe */
@@ -514,7 +509,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(sq,
(struct mlx5_err_cqe *)cqe);
queue_work(cq->channel->priv->wq,
- &sq->recover.recover_work);
+ &sq->recover_work);
}
stats->cqe_err++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ee04aab65a9f..bb6e5b5d9681 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -34,6 +34,7 @@
#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
@@ -114,11 +115,11 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *cq = NULL;
- spin_lock(&table->lock);
+ rcu_read_lock();
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
mlx5_cq_hold(cq);
- spin_unlock(&table->lock);
+ rcu_read_unlock();
return cq;
}
@@ -371,9 +372,9 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
struct mlx5_cq_table *table = &eq->cq_table;
int err;
- spin_lock_irq(&table->lock);
+ spin_lock(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
- spin_unlock_irq(&table->lock);
+ spin_unlock(&table->lock);
return err;
}
@@ -383,9 +384,9 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
- spin_lock_irq(&table->lock);
+ spin_lock(&table->lock);
tmp = radix_tree_delete(&table->tree, cq->cqn);
- spin_unlock_irq(&table->lock);
+ spin_unlock(&table->lock);
if (!tmp) {
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
@@ -530,6 +531,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
+ if (mlx5_core_is_ecpf_esw_manager(dev))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
+
return async_event_mask;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a44ea7b85614..d0b28251abf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -39,8 +39,7 @@
#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
-
-#define UPLINK_VPORT 0xFFFF
+#include "ecpf.h"
enum {
MLX5_ACTION_NONE = 0,
@@ -52,7 +51,7 @@ enum {
struct vport_addr {
struct l2addr_node node;
u8 action;
- u32 vport;
+ u16 vport;
struct mlx5_flow_handle *flow_rule;
bool mpfs; /* UC MAC was added to MPFs */
/* A flag indicating that mac was added due to mc promiscuous vport */
@@ -65,11 +64,36 @@ enum {
PROMISC_CHANGE = BIT(3),
};
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
+static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
+
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
+/* The vport getter/iterator are only valid after esw->total_vports
+ * and vport->vport are initialized in mlx5_eswitch_init.
+ */
+#define mlx5_esw_for_all_vports(esw, i, vport) \
+ for ((i) = MLX5_VPORT_PF; \
+ (vport) = &(esw)->vports[i], \
+ (i) < (esw)->total_vports; (i)++)
+
+#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
+ for ((i) = MLX5_VPORT_FIRST_VF; \
+ (vport) = &(esw)->vports[i], \
+ (i) <= (nvfs); (i)++)
+
+static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+
+ WARN_ON(idx > esw->total_vports - 1);
+ return &esw->vports[idx];
+}
+
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
@@ -115,7 +139,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
-static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
+static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
@@ -152,7 +176,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
/* E-Switch FDB */
static struct mlx5_flow_handle *
-__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
@@ -188,7 +212,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
misc_parameters);
mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
- MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
}
@@ -215,7 +239,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
static struct mlx5_flow_handle *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
{
u8 mac_c[ETH_ALEN];
@@ -224,7 +248,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
}
static struct mlx5_flow_handle *
-esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
{
u8 mac_c[ETH_ALEN];
u8 mac_v[ETH_ALEN];
@@ -237,7 +261,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
}
static struct mlx5_flow_handle *
-esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
{
u8 mac_c[ETH_ALEN];
u8 mac_v[ETH_ALEN];
@@ -247,6 +271,37 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
+enum {
+ LEGACY_VEPA_PRIO = 0,
+ LEGACY_FDB_PRIO,
+};
+
+static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb;
+ int err;
+
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* num FTE 2, num FG 2 */
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
+ 2, 2, 0, 0);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
+ return err;
+ }
+ esw->fdb_table.legacy.vepa_fdb = fdb;
+
+ return 0;
+}
+
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -275,8 +330,8 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
return -ENOMEM;
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-
ft_attr.max_fte = table_size;
+ ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
@@ -335,41 +390,65 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
esw->fdb_table.legacy.promisc_grp = g;
out:
- if (err) {
- if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
- mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
- esw->fdb_table.legacy.allmulti_grp = NULL;
- }
- if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
- mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
- esw->fdb_table.legacy.addr_grp = NULL;
- }
- if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) {
- mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
- esw->fdb_table.legacy.fdb = NULL;
- }
- }
+ if (err)
+ esw_destroy_legacy_fdb_table(esw);
kvfree(flow_group_in);
return err;
}
+static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
+{
+ esw_debug(esw->dev, "Destroy VEPA Table\n");
+ if (!esw->fdb_table.legacy.vepa_fdb)
+ return;
+
+ mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
+ esw->fdb_table.legacy.vepa_fdb = NULL;
+}
+
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
{
+ esw_debug(esw->dev, "Destroy FDB Table\n");
if (!esw->fdb_table.legacy.fdb)
return;
- esw_debug(esw->dev, "Destroy FDB Table\n");
- mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
- mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
- mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+ if (esw->fdb_table.legacy.promisc_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+ if (esw->fdb_table.legacy.allmulti_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ if (esw->fdb_table.legacy.addr_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
+
esw->fdb_table.legacy.fdb = NULL;
esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL;
}
+static int esw_create_legacy_table(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ err = esw_create_legacy_vepa_table(esw);
+ if (err)
+ return err;
+
+ err = esw_create_legacy_fdb_table(esw);
+ if (err)
+ esw_destroy_legacy_vepa_table(esw);
+
+ return err;
+}
+
+static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
+{
+ esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_fdb_table(esw);
+ esw_destroy_legacy_vepa_table(esw);
+}
+
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -377,19 +456,19 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
u8 *mac = vaddr->node.addr;
- u32 vport = vaddr->vport;
+ u16 vport = vaddr->vport;
int err;
- /* Skip mlx5_mpfs_add_mac for PFs,
- * it is already done by the PF netdev in mlx5e_execute_l2_action
+ /* Skip mlx5_mpfs_add_mac for eswitch_managers,
+ * it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (!vport)
+ if (esw->manager_vport == vport)
goto fdb_add;
err = mlx5_mpfs_add_mac(esw->dev, mac);
if (err) {
esw_warn(esw->dev,
- "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n",
+ "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
mac, vport, err);
return err;
}
@@ -409,13 +488,13 @@ fdb_add:
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
u8 *mac = vaddr->node.addr;
- u32 vport = vaddr->vport;
+ u16 vport = vaddr->vport;
int err = 0;
- /* Skip mlx5_mpfs_del_mac for PFs,
- * it is already done by the PF netdev in mlx5e_execute_l2_action
+ /* Skip mlx5_mpfs_del_mac for eswitch managerss,
+ * it is already done by its netdev in mlx5e_execute_l2_action
*/
- if (!vport || !vaddr->mpfs)
+ if (!vaddr->mpfs || esw->manager_vport == vport)
goto fdb_del;
err = mlx5_mpfs_del_mac(esw->dev, mac);
@@ -438,17 +517,18 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
struct esw_mc_addr *esw_mc)
{
u8 *mac = vaddr->node.addr;
- u32 vport_idx = 0;
+ struct mlx5_vport *vport;
+ u16 i, vport_num;
- for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
- struct mlx5_vport *vport = &esw->vports[vport_idx];
+ mlx5_esw_for_all_vports(esw, i, vport) {
struct hlist_head *vport_hash = vport->mc_list;
struct vport_addr *iter_vaddr =
l2addr_hash_find(vport_hash,
mac,
struct vport_addr);
+ vport_num = vport->vport;
if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
- vaddr->vport == vport_idx)
+ vaddr->vport == vport_num)
continue;
switch (vaddr->action) {
case MLX5_ACTION_ADD:
@@ -460,14 +540,14 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
if (!iter_vaddr) {
esw_warn(esw->dev,
"ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
- mac, vport_idx);
+ mac, vport_num);
continue;
}
- iter_vaddr->vport = vport_idx;
+ iter_vaddr->vport = vport_num;
iter_vaddr->flow_rule =
esw_fdb_set_vport_rule(esw,
mac,
- vport_idx);
+ vport_num);
iter_vaddr->mc_promisc = true;
break;
case MLX5_ACTION_DEL:
@@ -485,7 +565,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
struct hlist_head *hash = esw->mc_table;
struct esw_mc_addr *esw_mc;
u8 *mac = vaddr->node.addr;
- u32 vport = vaddr->vport;
+ u16 vport = vaddr->vport;
if (!esw->fdb_table.legacy.fdb)
return 0;
@@ -499,7 +579,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
return -ENOMEM;
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
- esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+ esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
/* Add this multicast mac to all the mc promiscuous vports */
update_allmulti_vports(esw, vaddr, esw_mc);
@@ -525,7 +605,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
struct hlist_head *hash = esw->mc_table;
struct esw_mc_addr *esw_mc;
u8 *mac = vaddr->node.addr;
- u32 vport = vaddr->vport;
+ u16 vport = vaddr->vport;
if (!esw->fdb_table.legacy.fdb)
return 0;
@@ -564,9 +644,9 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
- u32 vport_num, int list_type)
+ u16 vport_num, int list_type)
{
- struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
vport_addr_action vport_addr_add;
vport_addr_action vport_addr_del;
@@ -599,9 +679,9 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
- u32 vport_num, int list_type)
+ u16 vport_num, int list_type)
{
- struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
u8 (*mac_list)[ETH_ALEN];
struct l2addr_node *node;
@@ -686,9 +766,9 @@ out:
/* Sync vport UC/MC list from vport context
* Must be called after esw_update_vport_addr_list
*/
-static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
{
- struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct l2addr_node *node;
struct vport_addr *addr;
struct hlist_head *hash;
@@ -721,11 +801,11 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
}
/* Apply vport rx mode to HW FDB table */
-static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
bool promisc, bool mc_promisc)
{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
- struct mlx5_vport *vport = &esw->vports[vport_num];
if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
goto promisc;
@@ -736,7 +816,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
if (!allmulti_addr->uplink_rule)
allmulti_addr->uplink_rule =
esw_fdb_set_vport_allmulti_rule(esw,
- UPLINK_VPORT);
+ MLX5_VPORT_UPLINK);
allmulti_addr->refcnt++;
} else if (vport->allmulti_rule) {
mlx5_del_flow_rules(vport->allmulti_rule);
@@ -764,9 +844,9 @@ promisc:
}
/* Sync vport rx mode from vport context */
-static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
{
- struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int promisc_all = 0;
int promisc_uc = 0;
int promisc_mc = 0;
@@ -1134,13 +1214,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
int err = 0;
u8 *smac_v;
- if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
- mlx5_core_warn(esw->dev,
- "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
- vport->vport);
- return -EPERM;
- }
-
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1350,8 +1423,8 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
u32 initial_max_rate, u32 initial_bw_share)
{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
- struct mlx5_vport *vport = &esw->vports[vport_num];
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
int err = 0;
@@ -1390,7 +1463,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
{
- struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int err = 0;
if (!vport->qos.enabled)
@@ -1409,8 +1482,8 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
u32 max_rate, u32 bw_share)
{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
- struct mlx5_vport *vport = &esw->vports[vport_num];
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
u32 bitmask = 0;
@@ -1466,15 +1539,22 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
{
int vport_num = vport->vport;
- if (!vport_num)
+ if (esw->manager_vport == vport_num)
return;
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport_num,
+ vport_num, 1,
vport->info.link_state);
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
+
+ /* Host PF has its own mac/guid. */
+ if (vport_num) {
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
+ vport->info.mac);
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
+ vport->info.node_guid);
+ }
+
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
(vport->info.vlan || vport->info.qos));
@@ -1520,10 +1600,10 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
mlx5_fc_destroy(dev, vport->egress.drop_counter);
}
-static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
+static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
int enable_events)
{
- struct mlx5_vport *vport = &esw->vports[vport_num];
+ u16 vport_num = vport->vport;
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
@@ -1546,8 +1626,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
vport->enabled_events = enable_events;
vport->enabled = true;
- /* only PF is trusted by default */
- if (!vport_num)
+ /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
+ * in smartNIC as it's a vport group manager.
+ */
+ if (esw->manager_vport == vport_num ||
+ (!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
esw_vport_change_handle_locked(vport);
@@ -1557,9 +1640,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
mutex_unlock(&esw->state_lock);
}
-static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
+static void esw_disable_vport(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = &esw->vports[vport_num];
+ u16 vport_num = vport->vport;
if (!vport->enabled)
return;
@@ -1580,10 +1664,11 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport_num);
- if (vport_num && esw->mode == SRIOV_LEGACY) {
+ if (esw->manager_vport != vport_num &&
+ esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport_num,
+ vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
@@ -1602,7 +1687,7 @@ static int eswitch_vport_event(struct notifier_block *nb,
u16 vport_num;
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
- vport = &esw->vports[vport_num];
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
@@ -1614,6 +1699,8 @@ static int eswitch_vport_event(struct notifier_block *nb,
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
+ int vf_nvports = 0, total_nvports = 0;
+ struct mlx5_vport *vport;
int err;
int i, enabled_events;
@@ -1631,16 +1718,30 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
+ if (mode == SRIOV_OFFLOADS) {
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
+ if (err)
+ return err;
+ total_nvports = esw->total_vports;
+ } else {
+ vf_nvports = nvfs;
+ total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
+ }
+ }
+
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == SRIOV_LEGACY) {
- err = esw_create_legacy_fdb_table(esw);
+ err = esw_create_legacy_table(esw);
+ if (err)
+ goto abort;
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw, nvfs + 1);
+ err = esw_offloads_init(esw, vf_nvports, total_nvports);
}
if (err)
@@ -1655,8 +1756,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
* 2. FDB/Eswitch is programmed by user space tools
*/
enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
- for (i = 0; i <= nvfs; i++)
- esw_enable_vport(esw, i, enabled_events);
+
+ /* Enable PF vport */
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_enable_vport(esw, vport, enabled_events);
+
+ /* Enable ECPF vports */
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_enable_vport(esw, vport, enabled_events);
+ }
+
+ /* Enable VF vports */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
+ esw_enable_vport(esw, vport, enabled_events);
if (mode == SRIOV_LEGACY) {
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
@@ -1681,8 +1794,8 @@ abort:
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
+ struct mlx5_vport *vport;
int old_mode;
- int nvports;
int i;
if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
@@ -1692,13 +1805,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw->enabled_vports, esw->mode);
mc_promisc = &esw->mc_promisc;
- nvports = esw->enabled_vports;
if (esw->mode == SRIOV_LEGACY)
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
- for (i = 0; i < esw->total_vports; i++)
- esw_disable_vport(esw, i);
+ mlx5_esw_for_all_vports(esw, i, vport)
+ esw_disable_vport(esw, vport);
if (mc_promisc && mc_promisc->uplink_rule)
mlx5_del_flow_rules(mc_promisc->uplink_rule);
@@ -1706,9 +1818,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_destroy_tsar(esw);
if (esw->mode == SRIOV_LEGACY)
- esw_destroy_legacy_fdb_table(esw);
+ esw_destroy_legacy_table(esw);
else if (esw->mode == SRIOV_OFFLOADS)
- esw_offloads_cleanup(esw, nvports);
+ esw_offloads_cleanup(esw);
old_mode = esw->mode;
esw->mode = SRIOV_NONE;
@@ -1725,10 +1837,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int total_vports = MLX5_TOTAL_VPORTS(dev);
struct mlx5_eswitch *esw;
- int vport_num;
- int err;
+ struct mlx5_vport *vport;
+ int err, i;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!MLX5_VPORT_MANAGER(dev))
return 0;
esw_info(dev,
@@ -1742,6 +1854,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
return -ENOMEM;
esw->dev = dev;
+ esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
@@ -1756,6 +1869,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ esw->total_vports = total_vports;
+
err = esw_offloads_init_reps(esw);
if (err)
goto abort;
@@ -1764,17 +1879,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.mod_hdr_tbl);
mutex_init(&esw->state_lock);
- for (vport_num = 0; vport_num < total_vports; vport_num++) {
- struct mlx5_vport *vport = &esw->vports[vport_num];
-
- vport->vport = vport_num;
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
}
- esw->total_vports = total_vports;
esw->enabled_vports = 0;
esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
@@ -1797,7 +1909,7 @@ abort:
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{
- if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
+ if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
return;
esw_info(esw->dev, "cleanup\n");
@@ -1827,13 +1939,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
+ if (evport->info.spoofchk && !is_valid_ether_addr(mac))
mlx5_core_warn(esw->dev,
- "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+ "Set invalid MAC while spoofchk is on, vport(%d)\n",
vport);
- err = -EPERM;
- goto unlock;
- }
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
if (err) {
@@ -1876,7 +1985,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
err = mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport, link_state);
+ vport, 1, link_state);
if (err) {
mlx5_core_warn(esw->dev,
"Failed to set vport %d link state, err = %d",
@@ -1979,6 +2088,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
evport = &esw->vports[vport];
pschk = evport->info.spoofchk;
evport->info.spoofchk = spoofchk;
+ if (pschk && !is_valid_ether_addr(evport->info.mac))
+ mlx5_core_warn(esw->dev,
+ "Spoofchk in set while MAC is invalid, vport(%d)\n",
+ evport->vport);
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
if (err)
@@ -1988,6 +2101,127 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
return err;
}
+static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
+{
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
+
+ if (esw->fdb_table.legacy.vepa_star_rule)
+ mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
+
+ esw->fdb_table.legacy.vepa_uplink_rule = NULL;
+ esw->fdb_table.legacy.vepa_star_rule = NULL;
+}
+
+static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
+ u8 setting)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+ void *misc;
+
+ if (!setting) {
+ esw_cleanup_vepa_rules(esw);
+ return 0;
+ }
+
+ if (esw->fdb_table.legacy.vepa_uplink_rule)
+ return 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Uplink rule forward uplink traffic to FDB */
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = esw->fdb_table.legacy.fdb;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ } else {
+ esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
+ }
+
+ /* Star rule to forward all traffic to uplink vport */
+ memset(spec, 0, sizeof(*spec));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = MLX5_VPORT_UPLINK;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ } else {
+ esw->fdb_table.legacy.vepa_star_rule = flow_rule;
+ }
+
+out:
+ kvfree(spec);
+ if (err)
+ esw_cleanup_vepa_rules(esw);
+ return err;
+}
+
+int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
+{
+ int err = 0;
+
+ if (!esw)
+ return -EOPNOTSUPP;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != SRIOV_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = _mlx5_eswitch_set_vepa_locked(esw, setting);
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+{
+ int err = 0;
+
+ if (!esw)
+ return -EOPNOTSUPP;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->mode != SRIOV_LEGACY) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport, bool setting)
{
@@ -2015,8 +2249,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 max_guarantee = 0;
int i;
- for (i = 0; i < esw->total_vports; i++) {
- evport = &esw->vports[i];
+ mlx5_esw_for_all_vports(esw, i, evport) {
if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue;
max_guarantee = evport->info.min_rate;
@@ -2035,8 +2268,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int err;
int i;
- for (i = 0; i < esw->total_vports; i++) {
- evport = &esw->vports[i];
+ mlx5_esw_for_all_vports(esw, i, evport) {
if (!evport->enabled)
continue;
vport_min_rate = evport->info.min_rate;
@@ -2051,7 +2283,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
if (bw_share == evport->qos.bw_share)
continue;
- err = esw_vport_qos_config(esw, i, vport_max_rate,
+ err = esw_vport_qos_config(esw, evport->vport, vport_max_rate,
bw_share);
if (!err)
evport->qos.bw_share = bw_share;
@@ -2134,7 +2366,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
return 0;
- err = mlx5_query_vport_down_stats(dev, vport_idx,
+ err = mlx5_query_vport_down_stats(dev, vport_idx, 1,
&rx_discard_vport_down,
&tx_discard_vport_down);
if (err)
@@ -2171,8 +2403,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_vport_counter_in, in, other_vport, 1);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 1);
memset(out, 0, outlen);
err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
@@ -2245,3 +2476,10 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
return false;
}
+
+bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
+ struct mlx5_core_dev *dev1)
+{
+ return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
+ dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 9c89eea9b2c3..3f3cd32ae60a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -38,6 +38,7 @@
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
@@ -49,8 +50,6 @@
#define MLX5_MAX_MC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
-#define FDB_UPLINK_VPORT 0xffff
-
#define MLX5_MIN_BW_SHARE 1
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
@@ -138,6 +137,9 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *addr_grp;
struct mlx5_flow_group *allmulti_grp;
struct mlx5_flow_group *promisc_grp;
+ struct mlx5_flow_table *vepa_fdb;
+ struct mlx5_flow_handle *vepa_uplink_rule;
+ struct mlx5_flow_handle *vepa_star_rule;
} legacy;
struct offloads_fdb {
@@ -183,6 +185,16 @@ struct esw_mc_addr { /* SRIOV only */
u32 refcnt;
};
+struct mlx5_host_work {
+ struct work_struct work;
+ struct mlx5_eswitch *esw;
+};
+
+struct mlx5_host_info {
+ struct mlx5_nb nb;
+ u16 num_vfs;
+};
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -206,10 +218,13 @@ struct mlx5_eswitch {
struct mlx5_esw_offload offloads;
int mode;
int nvports;
+ u16 manager_vport;
+ struct mlx5_host_info host_info;
};
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup(struct mlx5_eswitch *esw);
+int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
+ int total_nvports);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
@@ -230,6 +245,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate);
+int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
+int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -312,6 +329,7 @@ struct mlx5_esw_flow_attr {
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
u8 match_level;
+ u8 tunnel_match_level;
struct mlx5_fc *counter;
u32 chain;
u16 prio;
@@ -353,6 +371,8 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
+bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
+ struct mlx5_core_dev *dev1);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
@@ -364,6 +384,53 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
#define esw_debug(dev, format, ...) \
mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
+
+/* The returned number is valid only when the dev is eswitch manager. */
+static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf_esw_manager(dev) ?
+ MLX5_VPORT_ECPF : MLX5_VPORT_PF;
+}
+
+static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
+{
+ /* Uplink always locate at the last element of the array.*/
+ return esw->total_vports - 1;
+}
+
+static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
+{
+ return esw->total_vports - 2;
+}
+
+static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ if (vport_num == MLX5_VPORT_ECPF) {
+ if (!mlx5_ecpf_vport_exists(esw->dev))
+ esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
+ return mlx5_eswitch_ecpf_idx(esw);
+ }
+
+ if (vport_num == MLX5_VPORT_UPLINK)
+ return mlx5_eswitch_uplink_idx(esw);
+
+ return vport_num;
+}
+
+static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
+ int index)
+{
+ if (index == mlx5_eswitch_ecpf_idx(esw) &&
+ mlx5_ecpf_vport_exists(esw->dev))
+ return MLX5_VPORT_ECPF;
+
+ if (index == mlx5_eswitch_uplink_idx(esw))
+ return MLX5_VPORT_UPLINK;
+
+ return index;
+}
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 53065b6ae593..f2260391be5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -40,15 +40,59 @@
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
+#include "ecpf.h"
+#include "lib/eq.h"
enum {
FDB_FAST_PATH = 0,
FDB_SLOW_PATH
};
+/* There are two match-all miss flows, one for unicast dst mac and
+ * one for multicast.
+ */
+#define MLX5_ESW_MISS_FLOWS (2)
+
#define fdb_prio_table(esw, chain, prio, level) \
(esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
+#define UPLINK_REP_INDEX 0
+
+/* The rep getter/iterator are only valid after esw->total_vports
+ * and vport->vport are initialized in mlx5_eswitch_init.
+ */
+#define mlx5_esw_for_all_reps(esw, i, rep) \
+ for ((i) = MLX5_VPORT_PF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) < (esw)->total_vports; (i)++)
+
+#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
+ for ((i) = MLX5_VPORT_FIRST_VF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) <= (nvfs); (i)++)
+
+#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
+ for ((i) = (nvfs); \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) >= MLX5_VPORT_FIRST_VF; (i)--)
+
+#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
+ for ((vport) = MLX5_VPORT_FIRST_VF; \
+ (vport) <= (nvfs); (vport)++)
+
+#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
+ for ((vport) = (nvfs); \
+ (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
+
+static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+
+ WARN_ON(idx > esw->total_vports - 1);
+ return &esw->offloads.vport_reps[idx];
+}
+
static struct mlx5_flow_table *
esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
static void
@@ -160,14 +204,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
- if (attr->match_level == MLX5_MATCH_NONE)
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
- else
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS;
-
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
- spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (attr->tunnel_match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ if (attr->match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
+ } else if (attr->match_level != MLX5_MATCH_NONE) {
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
@@ -318,7 +363,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (!rep->rep_if[REP_ETH].valid)
+ if (rep->rep_if[REP_ETH].state != REP_LOADED)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -359,15 +404,15 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
in_rep = attr->in_rep;
out_rep = attr->dests[0].rep;
- if (push && in_rep->vport == FDB_UPLINK_VPORT)
+ if (push && in_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
- if (pop && out_rep->vport == FDB_UPLINK_VPORT)
+ if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
if (!push && !pop && fwd)
- if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
+ if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
goto out_notsupp;
/* protects against (1) setting rules with different vlans to push and
@@ -409,7 +454,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
+ if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
@@ -469,7 +514,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
+ if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
return 0;
@@ -516,7 +561,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
- MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
+ /* source vport is the esw manager */
+ MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
@@ -561,7 +607,7 @@ static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
source_eswitch_owner_vhca_id);
dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest->vport.num = 0;
+ dest->vport.num = peer_dev->priv.eswitch->manager_vport;
dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
}
@@ -595,14 +641,35 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- for (i = 1; i < nvports; i++) {
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF);
+ flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_pf_flow_err;
+ }
+ flows[MLX5_VPORT_PF] = flow;
+ }
+
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
+ flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_ecpf_flow_err;
+ }
+ flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
+ }
+
+ mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) {
MLX5_SET(fte_match_set_misc, misc, source_port, i);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
- esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
- goto add_flow_err;
+ goto add_vf_flow_err;
}
flows[i] = flow;
}
@@ -612,9 +679,18 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
kvfree(spec);
return 0;
-add_flow_err:
- for (i--; i > 0; i--)
+add_vf_flow_err:
+ nvports = --i;
+ mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports)
mlx5_del_flow_rules(flows[i]);
+
+ if (mlx5_ecpf_vport_exists(esw->dev))
+ mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
+add_ecpf_flow_err:
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev))
+ mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
+add_pf_flow_err:
+ esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
kvfree(flows);
alloc_flows_err:
kvfree(spec);
@@ -628,9 +704,15 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
flows = esw->fdb_table.offloads.peer_miss_rules;
- for (i = 1; i < esw->total_vports; i++)
+ mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[i]);
+ if (mlx5_ecpf_vport_exists(esw->dev))
+ mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
+
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev))
+ mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
+
kvfree(flows);
}
@@ -660,7 +742,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_c[0] = 0x01;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport.num = 0;
+ dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
@@ -904,8 +986,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.offloads.fdb_left[i] =
ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
- table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
- esw->total_vports;
+ table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
+ MLX5_ESW_MISS_FLOWS + esw->total_vports;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -999,7 +1081,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ix + MLX5_ESW_MISS_FLOWS);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
@@ -1048,7 +1131,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_destroy_offloads_fast_fdb_tables(esw);
}
-static int esw_create_offloads_table(struct mlx5_eswitch *esw)
+static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -1062,7 +1145,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
+ ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
@@ -1082,16 +1165,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
mlx5_destroy_flow_table(offloads->ft_offloads);
}
-static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
+static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
- struct mlx5_priv *priv = &esw->dev->priv;
u32 *flow_group_in;
void *match_criteria, *misc;
int err = 0;
- int nvports = priv->sriov.num_vfs + 2;
+ nvports = nvports + MLX5_ESW_MISS_FLOWS;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1168,7 +1250,8 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
- if (esw->mode != SRIOV_LEGACY) {
+ if (esw->mode != SRIOV_LEGACY &&
+ !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
@@ -1206,9 +1289,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_esw_offload *offloads;
struct mlx5_eswitch_rep *rep;
- u8 hw_id[ETH_ALEN];
+ u8 hw_id[ETH_ALEN], rep_type;
int vport;
esw->offloads.vport_reps = kcalloc(total_vfs,
@@ -1217,75 +1299,203 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
if (!esw->offloads.vport_reps)
return -ENOMEM;
- offloads = &esw->offloads;
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
- for (vport = 0; vport < total_vfs; vport++) {
- rep = &offloads->vport_reps[vport];
-
- rep->vport = vport;
+ mlx5_esw_for_all_reps(esw, vport, rep) {
+ rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
ether_addr_copy(rep->hw_id, hw_id);
- }
- offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+ rep->rep_if[rep_type].state = REP_UNREGISTERED;
+ }
return 0;
}
-static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (rep->rep_if[rep_type].state != REP_LOADED)
+ return;
+
+ rep->rep_if[rep_type].unload(rep);
+ rep->rep_if[rep_type].state = REP_REGISTERED;
+}
+
+static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int vport;
- for (vport = nvports - 1; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->rep_if[rep_type].valid)
- continue;
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+ }
- rep->rep_if[rep_type].unload(rep);
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
+
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
-static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int i;
+
+ mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+}
+
+static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = NUM_REP_TYPES;
+
+ while (rep_type-- > 0)
+ __unload_reps_vf_vport(esw, nvports, rep_type);
+}
+
+static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ __unload_reps_vf_vport(esw, nvports, rep_type);
+
+ /* Special vports must be the last to unload. */
+ __unload_reps_special_vport(esw, rep_type);
+}
+
+static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
- esw_offloads_unload_reps_type(esw, nvports, rep_type);
+ __unload_reps_all_vport(esw, nvports, rep_type);
+}
+
+static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ int err = 0;
+
+ if (rep->rep_if[rep_type].state != REP_REGISTERED)
+ return 0;
+
+ err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (err)
+ return err;
+
+ rep->rep_if[rep_type].state = REP_LOADED;
+
+ return 0;
}
-static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
- int vport;
int err;
- for (vport = 0; vport < nvports; vport++) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->rep_if[rep_type].valid)
- continue;
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ return err;
- err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
- goto err_reps;
+ goto err_pf;
+ }
+
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto err_ecpf;
+ }
+
+ return 0;
+
+err_ecpf:
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+ }
+
+err_pf:
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+ return err;
+}
+
+static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int err, i;
+
+ mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto err_vf;
}
return 0;
+err_vf:
+ __unload_reps_vf_vport(esw, --i, rep_type);
+ return err;
+}
+
+static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = 0;
+ int err;
+
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = __load_reps_vf_vport(esw, nvports, rep_type);
+ if (err)
+ goto err_reps;
+ }
+
+ return err;
+
err_reps:
- esw_offloads_unload_reps_type(esw, vport, rep_type);
+ while (rep_type-- > 0)
+ __unload_reps_vf_vport(esw, nvports, rep_type);
+ return err;
+}
+
+static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ int err;
+
+ /* Special vports must be loaded first. */
+ err = __load_reps_special_vport(esw, rep_type);
+ if (err)
+ return err;
+
+ err = __load_reps_vf_vport(esw, nvports, rep_type);
+ if (err)
+ goto err_vfs;
+
+ return 0;
+
+err_vfs:
+ __unload_reps_special_vport(esw, rep_type);
return err;
}
-static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = 0;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+ err = __load_reps_all_vport(esw, nvports, rep_type);
if (err)
goto err_reps;
}
@@ -1294,7 +1504,7 @@ static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
err_reps:
while (rep_type-- > 0)
- esw_offloads_unload_reps_type(esw, nvports, rep_type);
+ __unload_reps_all_vport(esw, nvports, rep_type);
return err;
}
@@ -1397,7 +1607,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
@@ -1407,24 +1617,16 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
return err;
- err = esw_create_offloads_table(esw);
+ err = esw_create_offloads_table(esw, nvports);
if (err)
goto create_ft_err;
- err = esw_create_vport_rx_group(esw);
+ err = esw_create_vport_rx_group(esw, nvports);
if (err)
goto create_fg_err;
- err = esw_offloads_load_reps(esw, nvports);
- if (err)
- goto err_reps;
-
- esw_offloads_devcom_init(esw);
return 0;
-err_reps:
- esw_destroy_vport_rx_group(esw);
-
create_fg_err:
esw_destroy_offloads_table(esw);
@@ -1434,6 +1636,95 @@ create_ft_err:
return err;
}
+static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
+{
+ esw_destroy_vport_rx_group(esw);
+ esw_destroy_offloads_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
+}
+
+static void esw_host_params_event_handler(struct work_struct *work)
+{
+ struct mlx5_host_work *host_work;
+ struct mlx5_eswitch *esw;
+ int err, num_vf = 0;
+
+ host_work = container_of(work, struct mlx5_host_work, work);
+ esw = host_work->esw;
+
+ err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
+ if (err || num_vf == esw->host_info.num_vfs)
+ goto out;
+
+ /* Number of VFs can only change from "0 to x" or "x to 0". */
+ if (esw->host_info.num_vfs > 0) {
+ esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
+ } else {
+ err = esw_offloads_load_vf_reps(esw, num_vf);
+
+ if (err)
+ goto out;
+ }
+
+ esw->host_info.num_vfs = num_vf;
+
+out:
+ kfree(host_work);
+}
+
+static int esw_host_params_event(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_host_work *host_work;
+ struct mlx5_host_info *host_info;
+ struct mlx5_eswitch *esw;
+
+ host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
+ if (!host_work)
+ return NOTIFY_DONE;
+
+ host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
+ esw = container_of(host_info, struct mlx5_eswitch, host_info);
+
+ host_work->esw = esw;
+
+ INIT_WORK(&host_work->work, esw_host_params_event_handler);
+ queue_work(esw->work_queue, &host_work->work);
+
+ return NOTIFY_OK;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
+ int total_nvports)
+{
+ int err;
+
+ mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
+
+ err = esw_offloads_steering_init(esw, total_nvports);
+ if (err)
+ return err;
+
+ err = esw_offloads_load_all_reps(esw, vf_nvports);
+ if (err)
+ goto err_reps;
+
+ esw_offloads_devcom_init(esw);
+
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
+ HOST_PARAMS_CHANGE);
+ mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
+ esw->host_info.num_vfs = vf_nvports;
+ }
+
+ return 0;
+
+err_reps:
+ esw_offloads_steering_cleanup(esw);
+ return err;
+}
+
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
@@ -1453,13 +1744,21 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{
+ u16 num_vfs;
+
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
+ flush_workqueue(esw->work_queue);
+ num_vfs = esw->host_info.num_vfs;
+ } else {
+ num_vfs = esw->dev->priv.sriov.num_vfs;
+ }
+
esw_offloads_devcom_cleanup(esw);
- esw_offloads_unload_reps(esw, nvports);
- esw_destroy_vport_rx_group(esw);
- esw_destroy_offloads_table(esw);
- esw_destroy_offloads_fdb_tables(esw);
+ esw_offloads_unload_all_reps(esw, num_vfs);
+ esw_offloads_steering_cleanup(esw);
}
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
@@ -1548,7 +1847,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
if(!MLX5_ESWITCH_MANAGER(dev))
return -EPERM;
- if (dev->priv.eswitch->mode == SRIOV_NONE)
+ if (dev->priv.eswitch->mode == SRIOV_NONE &&
+ !mlx5_core_is_ecpf_esw_manager(dev))
return -EOPNOTSUPP;
return 0;
@@ -1760,47 +2060,45 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
return 0;
}
-void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- int vport_index,
- struct mlx5_eswitch_rep_if *__rep_if,
- u8 rep_type)
+void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep_if *__rep_if,
+ u8 rep_type)
{
- struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep_if *rep_if;
+ struct mlx5_eswitch_rep *rep;
+ int i;
- rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
-
- rep_if->load = __rep_if->load;
- rep_if->unload = __rep_if->unload;
- rep_if->get_proto_dev = __rep_if->get_proto_dev;
- rep_if->priv = __rep_if->priv;
+ mlx5_esw_for_all_reps(esw, i, rep) {
+ rep_if = &rep->rep_if[rep_type];
+ rep_if->load = __rep_if->load;
+ rep_if->unload = __rep_if->unload;
+ rep_if->get_proto_dev = __rep_if->get_proto_dev;
+ rep_if->priv = __rep_if->priv;
- rep_if->valid = true;
+ rep_if->state = REP_REGISTERED;
+ }
}
-EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
+EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
-void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index, u8 rep_type)
+void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{
- struct mlx5_esw_offload *offloads = &esw->offloads;
+ u16 max_vf = mlx5_core_max_vfs(esw->dev);
struct mlx5_eswitch_rep *rep;
+ int i;
- rep = &offloads->vport_reps[vport_index];
-
- if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
- rep->rep_if[rep_type].unload(rep);
+ if (esw->mode == SRIOV_OFFLOADS)
+ __unload_reps_all_vport(esw, max_vf, rep_type);
- rep->rep_if[rep_type].valid = false;
+ mlx5_esw_for_all_reps(esw, i, rep)
+ rep->rep_if[rep_type].state = REP_UNREGISTERED;
}
-EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
+EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
-#define UPLINK_REP_INDEX 0
- struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- rep = &offloads->vport_reps[UPLINK_REP_INDEX];
+ rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
return rep->rep_if[rep_type].priv;
}
@@ -1808,15 +2106,11 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
int vport,
u8 rep_type)
{
- struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- if (vport == FDB_UPLINK_VPORT)
- vport = UPLINK_REP_INDEX;
-
- rep = &offloads->vport_reps[vport];
+ rep = mlx5_eswitch_get_rep(esw, vport);
- if (rep->rep_if[rep_type].valid &&
+ if (rep->rep_if[rep_type].state == REP_LOADED &&
rep->rep_if[rep_type].get_proto_dev)
return rep->rep_if[rep_type].get_proto_dev(rep);
return NULL;
@@ -1825,13 +2119,13 @@ EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
{
- return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
+ return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
}
EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
int vport)
{
- return &esw->offloads.vport_reps[vport];
+ return mlx5_eswitch_get_rep(esw, vport);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index fbc42b7252a9..5d5864e8df3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -103,6 +103,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD";
+ case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE:
+ return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
@@ -211,11 +213,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
enum port_module_event_status_type module_status;
enum port_module_event_error_type error_type;
struct mlx5_eqe_port_module *module_event_eqe;
- const char *status_str, *error_str;
+ const char *status_str;
u8 module_num;
module_event_eqe = &eqe->data.port_module;
- module_num = module_event_eqe->module;
module_status = module_event_eqe->module_status &
PORT_MODULE_EVENT_MODULE_STATUS_MASK;
error_type = module_event_eqe->error_type &
@@ -223,25 +224,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
if (module_status < MLX5_MODULE_STATUS_NUM)
events->pme_stats.status_counters[module_status]++;
- status_str = mlx5_pme_status_to_string(module_status);
- if (module_status == MLX5_MODULE_STATUS_ERROR) {
+ if (module_status == MLX5_MODULE_STATUS_ERROR)
if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
events->pme_stats.error_counters[error_type]++;
- error_str = mlx5_pme_error_to_string(error_type);
- }
if (!printk_ratelimit())
return NOTIFY_OK;
- if (module_status == MLX5_MODULE_STATUS_ERROR)
+ module_num = module_event_eqe->module;
+ status_str = mlx5_pme_status_to_string(module_status);
+ if (module_status == MLX5_MODULE_STATUS_ERROR) {
+ const char *error_str = mlx5_pme_error_to_string(error_type);
+
mlx5_core_err(events->dev,
"Port module event[error]: module %u, %s, %s\n",
module_num, status_str, error_str);
- else
+ } else {
mlx5_core_info(events->dev,
"Port module event: module %u, %s\n",
module_num, status_str);
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 27c5f6c7d36a..d046d1ec2a86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -317,7 +317,6 @@ static int mlx5_fpga_event(struct mlx5_fpga_device *fdev,
const char *event_name;
bool teardown = false;
unsigned long flags;
- u32 fpga_qpn;
u8 syndrome;
switch (event) {
@@ -328,7 +327,6 @@ static int mlx5_fpga_event(struct mlx5_fpga_device *fdev,
case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome);
event_name = mlx5_fpga_qp_syndrome_to_string(syndrome);
- fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
break;
default:
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 79f122b45def..f2cfa012315e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
@@ -397,6 +398,7 @@ static void del_hw_flow_table(struct fs_node *node)
fs_get_obj(ft, node);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
+ trace_mlx5_fs_del_ft(ft);
if (node->active) {
err = root->cmds->destroy_flow_table(dev, ft);
@@ -618,7 +620,8 @@ static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steer
if (ret) {
kmem_cache_free(steering->fgs_cache, fg);
return ERR_PTR(ret);
-}
+ }
+
ida_init(&fg->fte_allocator);
fg->mask.match_criteria_enable = match_criteria_enable;
memcpy(&fg->mask.match_criteria, match_criteria,
@@ -1019,6 +1022,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
+ trace_mlx5_fs_add_ft(ft);
return ft;
destroy_ft:
root->cmds->destroy_flow_table(root->dev, ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 196c07383082..cb9fa3430c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
mlx5_core_err(dev, "start\n");
if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
- mlx5_cmd_trigger_completions(dev);
+ mlx5_cmd_flush(dev);
}
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index bfc0f6581729..4eac42555c7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -446,11 +446,11 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
- err = mlx5e_open_channels(priv, &new_channels);
+
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
if (err)
goto out;
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
netdev->mtu = new_channels.params.sw_mtu;
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 3a6baed722d8..48aa6e030bcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -35,37 +35,8 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "eswitch.h"
-
-enum {
- MLX5_LAG_FLAG_ROCE = 1 << 0,
- MLX5_LAG_FLAG_SRIOV = 1 << 1,
-};
-
-#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV)
-
-struct lag_func {
- struct mlx5_core_dev *dev;
- struct net_device *netdev;
-};
-
-/* Used for collection of netdev event info. */
-struct lag_tracker {
- enum netdev_lag_tx_type tx_type;
- struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
- bool is_bonded;
-};
-
-/* LAG data of a ConnectX card.
- * It serves both its phys functions.
- */
-struct mlx5_lag {
- u8 flags;
- u8 v2p_map[MLX5_MAX_PORTS];
- struct lag_func pf[MLX5_MAX_PORTS];
- struct lag_tracker tracker;
- struct delayed_work bond_work;
- struct notifier_block nb;
-};
+#include "lag.h"
+#include "lag_mp.h"
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -147,13 +118,8 @@ static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
-static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
-{
- return dev->priv.lag;
-}
-
-static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
- struct net_device *ndev)
+int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
+ struct net_device *ndev)
{
int i;
@@ -174,11 +140,6 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
}
-static bool __mlx5_lag_is_active(struct mlx5_lag *ldev)
-{
- return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
-}
-
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2)
{
@@ -195,8 +156,8 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port2 = 1;
}
-static void mlx5_modify_lag(struct mlx5_lag *ldev,
- struct lag_tracker *tracker)
+void mlx5_modify_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
u8 v2p_port1, v2p_port2;
@@ -241,9 +202,9 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
return err;
}
-static int mlx5_activate_lag(struct mlx5_lag *ldev,
- struct lag_tracker *tracker,
- u8 flags)
+int mlx5_activate_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ u8 flags)
{
bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
@@ -343,6 +304,11 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
roce_lag = !mlx5_sriov_is_enabled(dev0) &&
!mlx5_sriov_is_enabled(dev1);
+#ifdef CONFIG_MLX5_ESWITCH
+ roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE &&
+ dev1->priv.eswitch->mode == SRIOV_NONE;
+#endif
+
if (roce_lag)
mlx5_lag_remove_ib_devices(ldev);
@@ -381,7 +347,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
{
- schedule_delayed_work(&ldev->bond_work, delay);
+ queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
}
static void mlx5_do_bond_work(struct work_struct *work)
@@ -533,6 +499,12 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void)
if (!ldev)
return NULL;
+ ldev->wq = create_singlethread_workqueue("mlx5_lag");
+ if (!ldev->wq) {
+ kfree(ldev);
+ return NULL;
+ }
+
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
return ldev;
@@ -540,6 +512,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void)
static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
{
+ destroy_workqueue(ldev->wq);
kfree(ldev);
}
@@ -587,6 +560,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
+ int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
@@ -614,6 +588,32 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
}
+
+ err = mlx5_lag_mp_init(ldev);
+ if (err)
+ mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
+ err);
+}
+
+int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
+{
+ struct mlx5_lag *ldev;
+ int n;
+
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev) {
+ mlx5_core_warn(dev, "no lag device, can't get pf num\n");
+ return -EINVAL;
+ }
+
+ for (n = 0; n < MLX5_MAX_PORTS; n++)
+ if (ldev->pf[n].dev == dev) {
+ *pf_num = n;
+ return 0;
+ }
+
+ mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
+ return -EINVAL;
}
/* Must be called with intf_mutex held */
@@ -638,6 +638,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (i == MLX5_MAX_PORTS) {
if (ldev->nb.notifier_call)
unregister_netdevice_notifier(&ldev->nb);
+ mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
new file mode 100644
index 000000000000..1dea0b1c9826
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_LAG_H__
+#define __MLX5_LAG_H__
+
+#include "mlx5_core.h"
+#include "lag_mp.h"
+
+enum {
+ MLX5_LAG_FLAG_ROCE = 1 << 0,
+ MLX5_LAG_FLAG_SRIOV = 1 << 1,
+ MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
+};
+
+#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
+ MLX5_LAG_FLAG_MULTIPATH)
+
+struct lag_func {
+ struct mlx5_core_dev *dev;
+ struct net_device *netdev;
+};
+
+/* Used for collection of netdev event info. */
+struct lag_tracker {
+ enum netdev_lag_tx_type tx_type;
+ struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
+ unsigned int is_bonded:1;
+};
+
+/* LAG data of a ConnectX card.
+ * It serves both its phys functions.
+ */
+struct mlx5_lag {
+ u8 flags;
+ u8 v2p_map[MLX5_MAX_PORTS];
+ struct lag_func pf[MLX5_MAX_PORTS];
+ struct lag_tracker tracker;
+ struct workqueue_struct *wq;
+ struct delayed_work bond_work;
+ struct notifier_block nb;
+ struct lag_mp lag_mp;
+};
+
+static inline struct mlx5_lag *
+mlx5_lag_dev_get(struct mlx5_core_dev *dev)
+{
+ return dev->priv.lag;
+}
+
+static inline bool
+__mlx5_lag_is_active(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
+}
+
+void mlx5_modify_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker);
+int mlx5_activate_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ u8 flags);
+int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
+ struct net_device *ndev);
+
+#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
new file mode 100644
index 000000000000..5633f8572800
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/netdevice.h>
+#include "lag.h"
+#include "lag_mp.h"
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "lib/mlx5.h"
+
+static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
+{
+ if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ return false;
+
+ return mlx5_esw_multipath_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+}
+
+static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+}
+
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ bool res;
+
+ ldev = mlx5_lag_dev_get(dev);
+ res = ldev && __mlx5_lag_is_multipath(ldev);
+
+ return res;
+}
+
+/**
+ * Set lag port affinity
+ *
+ * @ldev: lag device
+ * @port:
+ * 0 - set normal affinity.
+ * 1 - set affinity to port 1.
+ * 2 - set affinity to port 2.
+ *
+ **/
+static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
+{
+ struct lag_tracker tracker;
+
+ if (!__mlx5_lag_is_multipath(ldev))
+ return;
+
+ switch (port) {
+ case 0:
+ tracker.netdev_state[0].tx_enabled = true;
+ tracker.netdev_state[1].tx_enabled = true;
+ tracker.netdev_state[0].link_up = true;
+ tracker.netdev_state[1].link_up = true;
+ break;
+ case 1:
+ tracker.netdev_state[0].tx_enabled = true;
+ tracker.netdev_state[0].link_up = true;
+ tracker.netdev_state[1].tx_enabled = false;
+ tracker.netdev_state[1].link_up = false;
+ break;
+ case 2:
+ tracker.netdev_state[0].tx_enabled = false;
+ tracker.netdev_state[0].link_up = false;
+ tracker.netdev_state[1].tx_enabled = true;
+ tracker.netdev_state[1].link_up = true;
+ break;
+ default:
+ mlx5_core_warn(ldev->pf[0].dev, "Invalid affinity port %d",
+ port);
+ return;
+ }
+
+ if (tracker.netdev_state[0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[0].dev->priv.events,
+ MLX5_DEV_EVENT_PORT_AFFINITY,
+ (void *)0);
+
+ if (tracker.netdev_state[1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[1].dev->priv.events,
+ MLX5_DEV_EVENT_PORT_AFFINITY,
+ (void *)0);
+
+ mlx5_modify_lag(ldev, &tracker);
+}
+
+static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
+{
+ struct lag_mp *mp = container_of(nb, struct lag_mp, fib_nb);
+ struct mlx5_lag *ldev = container_of(mp, struct mlx5_lag, lag_mp);
+
+ flush_workqueue(ldev->wq);
+}
+
+struct mlx5_fib_event_work {
+ struct work_struct work;
+ struct mlx5_lag *ldev;
+ unsigned long event;
+ union {
+ struct fib_entry_notifier_info fen_info;
+ struct fib_nh_notifier_info fnh_info;
+ };
+};
+
+static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
+ unsigned long event,
+ struct fib_info *fi)
+{
+ struct lag_mp *mp = &ldev->lag_mp;
+
+ /* Handle delete event */
+ if (event == FIB_EVENT_ENTRY_DEL) {
+ /* stop track */
+ if (mp->mfi == fi)
+ mp->mfi = NULL;
+ return;
+ }
+
+ /* Handle add/replace event */
+ if (fi->fib_nhs == 1) {
+ if (__mlx5_lag_is_active(ldev)) {
+ struct net_device *nh_dev = fi->fib_nh[0].nh_dev;
+ int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
+
+ mlx5_lag_set_port_affinity(ldev, ++i);
+ }
+ return;
+ }
+
+ if (fi->fib_nhs != 2)
+ return;
+
+ /* Verify next hops are ports of the same hca */
+ if (!(fi->fib_nh[0].nh_dev == ldev->pf[0].netdev &&
+ fi->fib_nh[1].nh_dev == ldev->pf[1].netdev) &&
+ !(fi->fib_nh[0].nh_dev == ldev->pf[1].netdev &&
+ fi->fib_nh[1].nh_dev == ldev->pf[0].netdev)) {
+ mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
+ return;
+ }
+
+ /* First time we see multipath route */
+ if (!mp->mfi && !__mlx5_lag_is_active(ldev)) {
+ struct lag_tracker tracker;
+
+ tracker = ldev->tracker;
+ mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH);
+ }
+
+ mlx5_lag_set_port_affinity(ldev, 0);
+ mp->mfi = fi;
+}
+
+static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
+ unsigned long event,
+ struct fib_nh *fib_nh,
+ struct fib_info *fi)
+{
+ struct lag_mp *mp = &ldev->lag_mp;
+
+ /* Check the nh event is related to the route */
+ if (!mp->mfi || mp->mfi != fi)
+ return;
+
+ /* nh added/removed */
+ if (event == FIB_EVENT_NH_DEL) {
+ int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->nh_dev);
+
+ if (i >= 0) {
+ i = (i + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, i);
+ }
+ } else if (event == FIB_EVENT_NH_ADD &&
+ fi->fib_nhs == 2) {
+ mlx5_lag_set_port_affinity(ldev, 0);
+ }
+}
+
+static void mlx5_lag_fib_update(struct work_struct *work)
+{
+ struct mlx5_fib_event_work *fib_work =
+ container_of(work, struct mlx5_fib_event_work, work);
+ struct mlx5_lag *ldev = fib_work->ldev;
+ struct fib_nh *fib_nh;
+
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+ mlx5_lag_fib_route_event(ldev, fib_work->event,
+ fib_work->fen_info.fi);
+ fib_info_put(fib_work->fen_info.fi);
+ break;
+ case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_DEL:
+ fib_nh = fib_work->fnh_info.fib_nh;
+ mlx5_lag_fib_nexthop_event(ldev,
+ fib_work->event,
+ fib_work->fnh_info.fib_nh,
+ fib_nh->nh_parent);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
+ break;
+ }
+
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+static struct mlx5_fib_event_work *
+mlx5_lag_init_fib_work(struct mlx5_lag *ldev, unsigned long event)
+{
+ struct mlx5_fib_event_work *fib_work;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NULL;
+
+ INIT_WORK(&fib_work->work, mlx5_lag_fib_update);
+ fib_work->ldev = ldev;
+ fib_work->event = event;
+
+ return fib_work;
+}
+
+static int mlx5_lag_fib_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct lag_mp *mp = container_of(nb, struct lag_mp, fib_nb);
+ struct mlx5_lag *ldev = container_of(mp, struct mlx5_lag, lag_mp);
+ struct fib_notifier_info *info = ptr;
+ struct mlx5_fib_event_work *fib_work;
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_nh_notifier_info *fnh_info;
+ struct fib_info *fi;
+
+ if (info->family != AF_INET)
+ return NOTIFY_DONE;
+
+ if (!mlx5_lag_multipath_check_prereq(ldev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ fi = fen_info->fi;
+ if (fi->fib_dev != ldev->pf[0].netdev &&
+ fi->fib_dev != ldev->pf[1].netdev) {
+ return NOTIFY_DONE;
+ }
+ fib_work = mlx5_lag_init_fib_work(ldev, event);
+ if (!fib_work)
+ return NOTIFY_DONE;
+ fib_work->fen_info = *fen_info;
+ /* Take reference on fib_info to prevent it from being
+ * freed while work is queued. Release it afterwards.
+ */
+ fib_info_hold(fib_work->fen_info.fi);
+ break;
+ case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_DEL:
+ fnh_info = container_of(info, struct fib_nh_notifier_info,
+ info);
+ fib_work = mlx5_lag_init_fib_work(ldev, event);
+ if (!fib_work)
+ return NOTIFY_DONE;
+ fib_work->fnh_info = *fnh_info;
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ queue_work(ldev->wq, &fib_work->work);
+
+ return NOTIFY_DONE;
+}
+
+int mlx5_lag_mp_init(struct mlx5_lag *ldev)
+{
+ struct lag_mp *mp = &ldev->lag_mp;
+ int err;
+
+ if (mp->fib_nb.notifier_call)
+ return 0;
+
+ mp->fib_nb.notifier_call = mlx5_lag_fib_event;
+ err = register_fib_notifier(&mp->fib_nb,
+ mlx5_lag_fib_event_flush);
+ if (err)
+ mp->fib_nb.notifier_call = NULL;
+
+ return err;
+}
+
+void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
+{
+ struct lag_mp *mp = &ldev->lag_mp;
+
+ if (!mp->fib_nb.notifier_call)
+ return;
+
+ unregister_fib_notifier(&mp->fib_nb);
+ mp->fib_nb.notifier_call = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
new file mode 100644
index 000000000000..6d14b1100be9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_LAG_MP_H__
+#define __MLX5_LAG_MP_H__
+
+#include "lag.h"
+#include "mlx5_core.h"
+
+struct lag_mp {
+ struct notifier_block fib_nb;
+ struct fib_info *mfi; /* used in tracking fib events */
+};
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+int mlx5_lag_mp_init(struct mlx5_lag *ldev);
+void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
+static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
+
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_LAG_MP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 98359559c77e..a71d5b9c7ab2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -108,8 +108,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
mutex_init(&mpfs->lock);
mpfs->size = l2table_size;
- mpfs->bitmap = kcalloc(BITS_TO_LONGS(l2table_size),
- sizeof(uintptr_t), GFP_KERNEL);
+ mpfs->bitmap = bitmap_zalloc(l2table_size, GFP_KERNEL);
if (!mpfs->bitmap) {
kfree(mpfs);
return -ENOMEM;
@@ -127,7 +126,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
return;
WARN_ON(!hlist_empty(mpfs->hash));
- kfree(mpfs->bitmap);
+ bitmap_free(mpfs->bitmap);
kfree(mpfs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
new file mode 100644
index 000000000000..40f4a19b1ce1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/port.h>
+#include <linux/mlx5/cmd.h>
+#include "mlx5_core.h"
+#include "lib/port_tun.h"
+
+struct mlx5_port_tun_entropy_flags {
+ bool force_supported, force_enabled;
+ bool calc_supported, calc_enabled;
+ bool gre_calc_supported, gre_calc_enabled;
+};
+
+static void mlx5_query_port_tun_entropy(struct mlx5_core_dev *mdev,
+ struct mlx5_port_tun_entropy_flags *entropy_flags)
+{
+ u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+ /* Default values for FW which do not support MLX5_REG_PCMR */
+ entropy_flags->force_supported = false;
+ entropy_flags->calc_supported = false;
+ entropy_flags->gre_calc_supported = false;
+ entropy_flags->force_enabled = false;
+ entropy_flags->calc_enabled = true;
+ entropy_flags->gre_calc_enabled = true;
+
+ if (!MLX5_CAP_GEN(mdev, ports_check))
+ return;
+
+ if (mlx5_query_ports_check(mdev, out, sizeof(out)))
+ return;
+
+ entropy_flags->force_supported = !!(MLX5_GET(pcmr_reg, out, entropy_force_cap));
+ entropy_flags->calc_supported = !!(MLX5_GET(pcmr_reg, out, entropy_calc_cap));
+ entropy_flags->gre_calc_supported = !!(MLX5_GET(pcmr_reg, out, entropy_gre_calc_cap));
+ entropy_flags->force_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_force));
+ entropy_flags->calc_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_calc));
+ entropy_flags->gre_calc_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_gre_calc));
+}
+
+static int mlx5_set_port_tun_entropy_calc(struct mlx5_core_dev *mdev, u8 enable,
+ u8 force)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
+ int err;
+
+ err = mlx5_query_ports_check(mdev, in, sizeof(in));
+ if (err)
+ return err;
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, entropy_force, force);
+ MLX5_SET(pcmr_reg, in, entropy_calc, enable);
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+static int mlx5_set_port_gre_tun_entropy_calc(struct mlx5_core_dev *mdev,
+ u8 enable, u8 force)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
+ int err;
+
+ err = mlx5_query_ports_check(mdev, in, sizeof(in));
+ if (err)
+ return err;
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, entropy_force, force);
+ MLX5_SET(pcmr_reg, in, entropy_gre_calc, enable);
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5_port_tun_entropy_flags entropy_flags;
+
+ tun_entropy->mdev = mdev;
+ mutex_init(&tun_entropy->lock);
+ mlx5_query_port_tun_entropy(mdev, &entropy_flags);
+ tun_entropy->num_enabling_entries = 0;
+ tun_entropy->num_disabling_entries = 0;
+ tun_entropy->enabled = entropy_flags.calc_enabled;
+ tun_entropy->enabled =
+ (entropy_flags.calc_supported) ?
+ entropy_flags.calc_enabled : true;
+}
+
+static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
+ int reformat_type, bool enable)
+{
+ struct mlx5_port_tun_entropy_flags entropy_flags;
+ int err;
+
+ mlx5_query_port_tun_entropy(tun_entropy->mdev, &entropy_flags);
+ /* Tunnel entropy calculation may be controlled either on port basis
+ * for all tunneling protocols or specifically for GRE protocol.
+ * Prioritize GRE protocol control (if capable) over global port
+ * configuration.
+ */
+ if (entropy_flags.gre_calc_supported &&
+ reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) {
+ /* Other applications may change the global FW entropy
+ * calculations settings. Check that the current entropy value
+ * is the negative of the updated value.
+ */
+ if (entropy_flags.force_enabled &&
+ enable == entropy_flags.gre_calc_enabled) {
+ mlx5_core_warn(tun_entropy->mdev,
+ "Unexpected GRE entropy calc setting - expected %d",
+ !entropy_flags.gre_calc_enabled);
+ return -EOPNOTSUPP;
+ }
+ err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, enable,
+ entropy_flags.force_supported);
+ if (err)
+ return err;
+ /* if we turn on the entropy we don't need to force it anymore */
+ if (entropy_flags.force_supported && enable) {
+ err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, 1, 0);
+ if (err)
+ return err;
+ }
+ } else if (entropy_flags.calc_supported) {
+ /* Other applications may change the global FW entropy
+ * calculations settings. Check that the current entropy value
+ * is the negative of the updated value.
+ */
+ if (entropy_flags.force_enabled &&
+ enable == entropy_flags.calc_enabled) {
+ mlx5_core_warn(tun_entropy->mdev,
+ "Unexpected entropy calc setting - expected %d",
+ !entropy_flags.calc_enabled);
+ return -EOPNOTSUPP;
+ }
+ /* GRE requires disabling entropy calculation. if there are
+ * enabling entries (i.e VXLAN) we cannot turn it off for them,
+ * thus fail.
+ */
+ if (tun_entropy->num_enabling_entries)
+ return -EOPNOTSUPP;
+ err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, enable,
+ entropy_flags.force_supported);
+ if (err)
+ return err;
+ tun_entropy->enabled = enable;
+ /* if we turn on the entropy we don't need to force it anymore */
+ if (entropy_flags.force_supported && enable) {
+ err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, 1, 0);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* the function manages the refcount for enabling/disabling tunnel types.
+ * the return value indicates if the inc is successful or not, depending on
+ * entropy capabilities and configuration.
+ */
+int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy,
+ int reformat_type)
+{
+ /* the default is error for unknown (non VXLAN/GRE tunnel types) */
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&tun_entropy->lock);
+ if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN &&
+ tun_entropy->enabled) {
+ /* in case entropy calculation is enabled for all tunneling
+ * types, it is ok for VXLAN, so approve.
+ * otherwise keep the error default.
+ */
+ tun_entropy->num_enabling_entries++;
+ err = 0;
+ } else if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) {
+ /* turn off the entropy only for the first GRE rule.
+ * for the next rules the entropy was already disabled
+ * successfully.
+ */
+ if (tun_entropy->num_disabling_entries == 0)
+ err = mlx5_set_entropy(tun_entropy, reformat_type, 0);
+ else
+ err = 0;
+ if (!err)
+ tun_entropy->num_disabling_entries++;
+ }
+ mutex_unlock(&tun_entropy->lock);
+
+ return err;
+}
+
+void mlx5_tun_entropy_refcount_dec(struct mlx5_tun_entropy *tun_entropy,
+ int reformat_type)
+{
+ mutex_lock(&tun_entropy->lock);
+ if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN)
+ tun_entropy->num_enabling_entries--;
+ else if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE &&
+ --tun_entropy->num_disabling_entries == 0)
+ mlx5_set_entropy(tun_entropy, reformat_type, 1);
+ mutex_unlock(&tun_entropy->lock);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h
new file mode 100644
index 000000000000..54c42a88705e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_PORT_TUN_H__
+#define __MLX5_PORT_TUN_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_tun_entropy {
+ struct mlx5_core_dev *mdev;
+ u32 num_enabling_entries;
+ u32 num_disabling_entries;
+ u8 enabled;
+ struct mutex lock; /* lock the entropy fields */
+};
+
+void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy,
+ struct mlx5_core_dev *mdev);
+int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy,
+ int reformat_type);
+void mlx5_tun_entropy_refcount_dec(struct mlx5_tun_entropy *tun_entropy,
+ int reformat_type);
+
+#endif /* __MLX5_PORT_TUN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
deleted file mode 100644
index 3a3b0005fd2b..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
- u16 opmod, u8 port)
-{
- int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
- int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
- int err = -ENOMEM;
- void *data;
- void *resp;
- u32 *out;
- u32 *in;
-
- in = kzalloc(inlen, GFP_KERNEL);
- out = kzalloc(outlen, GFP_KERNEL);
- if (!in || !out)
- goto out;
-
- MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
- MLX5_SET(mad_ifc_in, in, op_mod, opmod);
- MLX5_SET(mad_ifc_in, in, port, port);
-
- data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
- memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
-
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
- if (err)
- goto out;
-
- resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
- memcpy(outb, resp,
- MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
-
-out:
- kfree(out);
- kfree(in);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index be81b319b0dc..8391dde869a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -65,6 +65,7 @@
#include "lib/vxlan.h"
#include "lib/devcom.h"
#include "diag/fw_tracer.h"
+#include "ecpf.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -459,6 +460,50 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
return err;
}
+static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
+{
+ void *set_hca_cap;
+ void *set_ctx;
+ int set_sz;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, pg))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
+ if (err)
+ return err;
+
+ if (!(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive) ||
+ MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive) ||
+ MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)))
+ return 0;
+
+ set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
+ if (!set_ctx)
+ return -ENOMEM;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
+ MLX5_ST_SZ_BYTES(odp_cap));
+
+ /* set ODP SRQ support for RC/UD and XRC transports */
+ MLX5_SET(odp_cap, set_hca_cap, ud_odp_caps.srq_receive,
+ MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive));
+
+ MLX5_SET(odp_cap, set_hca_cap, rc_odp_caps.srq_receive,
+ MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive));
+
+ MLX5_SET(odp_cap, set_hca_cap, xrc_odp_caps.srq_receive,
+ MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive));
+
+ err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+
+ kfree(set_ctx);
+ return err;
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev)
{
void *set_ctx = NULL;
@@ -567,6 +612,8 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function,
+ dev->caps.embedded_cpu);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
@@ -577,6 +624,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function,
+ dev->caps.embedded_cpu);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -693,6 +742,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_clr_master;
}
+ if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
+ mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
+
dev->iseg_base = pci_resource_start(dev->pdev, 0);
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) {
@@ -849,6 +903,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
struct pci_dev *pdev = dev->pdev;
int err;
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
@@ -926,6 +981,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
+ err = handle_hca_cap_odp(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ goto reclaim_boot_pages;
+ }
+
err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
dev_err(&pdev->dev, "failed to allocate init pages\n");
@@ -1014,6 +1075,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_sriov;
}
+ err = mlx5_ec_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init embedded CPU\n");
+ goto err_ec;
+ }
+
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@@ -1031,6 +1098,9 @@ out:
return 0;
err_reg_dev:
+ mlx5_ec_cleanup(dev);
+
+err_ec:
mlx5_sriov_detach(dev);
err_sriov:
@@ -1105,6 +1175,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
+ mlx5_ec_cleanup(dev);
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
@@ -1415,6 +1486,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
{ PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
+ { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
+ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ 0, }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index c68dcea5985b..9529cf9623e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -121,11 +121,12 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 modify_bitmask);
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
-int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
+int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts);
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
+void mlx5_cmd_flush(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
@@ -187,6 +188,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
+int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
+
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
void mlx5_lag_update(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 0670165afd5f..ea744d8466ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -51,9 +51,10 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
- u32 *in, int inlen,
- u32 *out, int outlen,
- mlx5_cmd_cbk_t callback, void *context)
+ struct mlx5_async_ctx *async_ctx, u32 *in,
+ int inlen, u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
{
struct mlx5_mkey_table *table = &dev->priv.mkey_table;
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
@@ -71,7 +72,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
MLX5_SET(mkc, mkc, mkey_7_0, key);
if (callback)
- return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
+ return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
callback, context);
err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
@@ -105,7 +106,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen)
{
- return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
+ return mlx5_core_create_mkey_cb(dev, mkey, NULL, in, inlen,
NULL, 0, NULL, NULL);
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index a83b517b0714..41025387ff2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -48,6 +48,7 @@ enum {
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
+ u8 ec_function;
s32 npages;
struct work_struct work;
};
@@ -143,6 +144,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
MLX5_SET(query_pages_in, in, op_mod, boot ?
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
+ MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@@ -253,7 +255,8 @@ err_mapping:
return err;
}
-static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
+ bool ec_function)
{
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
@@ -262,6 +265,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@@ -270,7 +274,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
- int notify_fail)
+ int notify_fail, bool ec_function)
{
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
@@ -305,6 +309,7 @@ retry:
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
@@ -316,8 +321,11 @@ retry:
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
+ dev->priv.peer_pf_pages += npages;
- mlx5_core_dbg(dev, "err %d\n", err);
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
+ npages, ec_function, func_id, err);
kvfree(in);
return 0;
@@ -328,7 +336,7 @@ out_4k:
out_free:
kvfree(in);
if (notify_fail)
- page_notify_fail(dev, func_id);
+ page_notify_fail(dev, func_id, ec_function);
return err;
}
@@ -364,7 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
- int *nclaimed)
+ int *nclaimed, bool ec_function)
{
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
@@ -385,6 +393,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+ MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
@@ -410,6 +419,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
+ dev->priv.peer_pf_pages -= num_claimed;
out_free:
kvfree(out);
@@ -423,9 +434,10 @@ static void pages_work_handler(struct work_struct *work)
int err = 0;
if (req->npages < 0)
- err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
+ err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
+ req->ec_function);
else if (req->npages > 0)
- err = give_pages(dev, req->func_id, req->npages, 1);
+ err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
if (err)
mlx5_core_warn(dev, "%s fail %d\n",
@@ -434,6 +446,10 @@ static void pages_work_handler(struct work_struct *work)
kfree(req);
}
+enum {
+ EC_FUNCTION_MASK = 0x8000,
+};
+
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
@@ -441,6 +457,7 @@ static int req_pages_handler(struct notifier_block *nb,
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
+ bool ec_function;
u16 func_id;
s32 npages;
@@ -450,6 +467,7 @@ static int req_pages_handler(struct notifier_block *nb,
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
+ ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
func_id, npages);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
@@ -461,6 +479,7 @@ static int req_pages_handler(struct notifier_block *nb,
req->dev = dev;
req->func_id = func_id;
req->npages = npages;
+ req->ec_function = ec_function;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
@@ -479,7 +498,7 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
- return give_pages(dev, func_id, npages, 0);
+ return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
enum {
@@ -513,7 +532,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
fwp = rb_entry(p, struct fw_page, rb_node);
err = reclaim_pages(dev, fwp->func_id,
optimal_reclaimed_pages(),
- &nclaimed);
+ &nclaimed, mlx5_core_is_ecpf(dev));
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
@@ -535,6 +554,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.vfs_pages,
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages);
+ WARN(dev->priv.peer_pf_pages,
+ "Peer PF FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.peer_pf_pages);
return 0;
}
@@ -567,10 +589,10 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
flush_workqueue(dev->priv.pg_wq);
}
-int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
+int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
- int prev_vfs_pages = dev->priv.vfs_pages;
+ int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -578,16 +600,16 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
return 0;
}
- mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
+ mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages,
dev->priv.name);
- while (dev->priv.vfs_pages) {
+ while (*pages) {
if (time_after(jiffies, end)) {
- mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
+ mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
return -ETIMEDOUT;
}
- if (dev->priv.vfs_pages < prev_vfs_pages) {
+ if (*pages < prev_pages) {
end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
- prev_vfs_pages = dev->priv.vfs_pages;
+ prev_pages = *pages;
}
msleep(50);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 2b82f35f4c35..21b7f05b16a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -30,10 +30,7 @@
* SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
@@ -157,44 +154,6 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
- u32 *proto_cap, int proto_mask)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
-
- err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
- if (err)
- return err;
-
- if (proto_mask == MLX5_PTYS_EN)
- *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
- else
- *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
-
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
- u32 *proto_admin, int proto_mask)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
-
- err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
- if (err)
- return err;
-
- if (proto_mask == MLX5_PTYS_EN)
- *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
- else
- *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
-
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port)
{
@@ -211,23 +170,6 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
-int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
- u32 *proto_oper, u8 local_port)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
-
- err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
- local_port);
- if (err)
- return err;
-
- *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
-
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, u8 local_port)
{
@@ -245,35 +187,6 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
-int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
- u32 proto_admin, int proto_mask)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- u32 in[MLX5_ST_SZ_DW(ptys_reg)];
- u8 an_disable_admin;
- u8 an_disable_cap;
- u8 an_status;
-
- mlx5_query_port_autoneg(dev, proto_mask, &an_status,
- &an_disable_cap, &an_disable_admin);
- if (!an_disable_cap && an_disable)
- return -EPERM;
-
- memset(in, 0, sizeof(in));
-
- MLX5_SET(ptys_reg, in, local_port, 1);
- MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
- MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
- if (proto_mask == MLX5_PTYS_EN)
- MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
- else
- MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
-
- return mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PTYS, 0, 1);
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
-
/* This function should be used after setting a port register only */
void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
{
@@ -606,25 +519,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
-void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
- u8 *an_status,
- u8 *an_disable_cap, u8 *an_disable_admin)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-
- *an_status = 0;
- *an_disable_cap = 0;
- *an_disable_admin = 0;
-
- if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
- return;
-
- *an_status = MLX5_GET(ptys_reg, out, an_status);
- *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
- *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
-
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
@@ -870,8 +764,7 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
}
EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
-static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
- int outlen)
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
@@ -880,7 +773,7 @@ static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
outlen, MLX5_REG_PCMR, 0, 0);
}
-static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
@@ -891,7 +784,11 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
+ int err;
+ err = mlx5_query_ports_check(mdev, in, sizeof(in));
+ if (err)
+ return err;
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, fcs_chk, enable);
return mlx5_set_ports_check(mdev, in, sizeof(in));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 388f205a497f..370ca94b6775 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common *
mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
{
struct mlx5_core_rsc_common *common;
+ unsigned long flags;
- spin_lock(&table->lock);
+ spin_lock_irqsave(&table->lock, flags);
common = radix_tree_lookup(&table->tree, rsn);
if (common)
atomic_inc(&common->refcount);
- spin_unlock(&table->lock);
+ spin_unlock_irqrestore(&table->lock, flags);
return common;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 6e178030d8fb..7b23fa8d2d60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -147,7 +147,7 @@ out:
if (MLX5_ESWITCH_MANAGER(dev))
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
- if (mlx5_wait_for_vf_pages(dev))
+ if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8b97066dd1f1..94464723ff77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -90,8 +90,8 @@ static void up_rel_func(struct kref *kref)
iounmap(up->map);
if (mlx5_cmd_free_uar(up->mdev, up->index))
mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
- kfree(up->reg_bitmap);
- kfree(up->fp_bitmap);
+ bitmap_free(up->reg_bitmap);
+ bitmap_free(up->fp_bitmap);
kfree(up);
}
@@ -110,11 +110,11 @@ static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
return ERR_PTR(err);
up->mdev = mdev;
- up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+ up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
if (!up->reg_bitmap)
goto error1;
- up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+ up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
if (!up->fp_bitmap)
goto error1;
@@ -157,8 +157,8 @@ error2:
if (mlx5_cmd_free_uar(mdev, up->index))
mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
error1:
- kfree(up->fp_bitmap);
- kfree(up->reg_bitmap);
+ bitmap_free(up->fp_bitmap);
+ bitmap_free(up->reg_bitmap);
kfree(up);
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 9b150ce9d315..ef95feca9961 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -64,7 +64,7 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
}
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport, u8 state)
+ u16 vport, u8 other_vport, u8 state)
{
u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
@@ -73,8 +73,7 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
MLX5_SET(modify_vport_state_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+ MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
MLX5_SET(modify_vport_state_in, in, admin_state, state);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
@@ -255,7 +254,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size)
@@ -373,7 +372,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
u16 vlans[],
int *size)
{
@@ -526,7 +525,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
- u32 vport, u64 node_guid)
+ u16 vport, u64 node_guid)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *nic_vport_context;
@@ -827,7 +826,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
- u32 vport,
+ u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all)
@@ -1057,7 +1056,7 @@ free:
EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
- u64 *rx_discard_vport_down,
+ u8 other_vport, u64 *rx_discard_vport_down,
u64 *tx_discard_vport_down)
{
u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
@@ -1068,8 +1067,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
MLX5_CMD_OP_QUERY_VNIC_ENV);
MLX5_SET(query_vnic_env_in, in, op_mod, 0);
MLX5_SET(query_vnic_env_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_vnic_env_in, in, other_vport, 1);
+ MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 7a712b6b09ec..14c0c62f8e73 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_H
#define _MLXFW_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 2cf89126fb23..240c027e5f07 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) "mlxfw: " fmt
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 993cb5ba934e..544344ac4894 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) "mlxfw_mfa2: " fmt
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
index 20472aa139cd..5bba6ad79d34 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_MFA2_H
#define _MLXFW_MFA2_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
index f667942b1ea3..874c0a2474ae 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_MFA2_FILE_H
#define _MLXFW_MFA2_FILE_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
index dd66737c033d..b001e5258091 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
@@ -1,36 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXFW_MFA2_FORMAT_H
#define _MLXFW_MFA2_FORMAT_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
index cc013e77b326..33c971190bba 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#ifndef _MLXFW_MFA2_TLV_H
#define _MLXFW_MFA2_TLV_H
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
index 0094b92a233b..017d68f1e123 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
@@ -1,36 +1,5 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) "MFA2: " fmt
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
index 2c667894f3a2..633284eeded7 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
@@ -1,36 +1,6 @@
-/*
- * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */
+
#ifndef _MLXFW_MFA2_TLV_MULTI_H
#define _MLXFW_MFA2_TLV_MULTI_H
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 080ddd1942ec..9c195dfed031 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -4,7 +4,6 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
- depends on MAY_USE_DEVLINK
---help---
This driver supports Mellanox Technologies Switch ASICs family.
@@ -78,6 +77,7 @@ config MLXSW_SPECTRUM
depends on IPV6 || IPV6=n
depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n
+ depends on VXLAN || VXLAN=n
select GENERIC_ALLOCATOR
select PARMAN
select OBJAGG
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index bbf45f10c208..a01d15546e37 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
- core_acl_flex_actions.o
+ core_acl_flex_actions.o core_env.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index ddedf8ab5b64..d23d53c0e284 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1062,6 +1062,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_driver_init;
}
+ if (mlxsw_driver->params_register && !reload)
+ devlink_params_publish(devlink);
+
return 0;
err_driver_init:
@@ -1131,6 +1134,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
+ if (mlxsw_core->driver->params_unregister && !reload)
+ devlink_params_unpublish(devlink);
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
@@ -1460,13 +1465,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
if (trans->retries)
dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
- if (err)
+ if (err) {
dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
trans->tid, trans->reg->id,
mlxsw_reg_id_str(trans->reg->id),
mlxsw_core_reg_access_type_str(trans->type),
trans->emad_status,
mlxsw_emad_op_tlv_status_str(trans->emad_status));
+ trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
+ trans->emad_status,
+ mlxsw_emad_op_tlv_status_str(trans->emad_status));
+ }
list_del(&trans->bulk_list);
kfree_rcu(trans, rcu);
@@ -1908,6 +1917,43 @@ void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
+int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
+ struct mlxsw_res *res)
+{
+ int index, i;
+ u64 data;
+ u16 id;
+ int err;
+
+ if (!res)
+ return 0;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
+ index++) {
+ err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
+ id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
+ data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
+
+ if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
+ return 0;
+
+ mlxsw_res_parse(res, id, data);
+ }
+ }
+
+ /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
+ * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
+ */
+ return -EIO;
+}
+EXPORT_SYMBOL(mlxsw_core_resources_query);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 4e114f35ee0d..8ec53f027575 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -182,6 +182,8 @@ int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
void mlxsw_core_flush_owq(void);
+int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
+ struct mlxsw_res *res);
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
@@ -344,6 +346,7 @@ struct mlxsw_bus_info {
struct mlxsw_fw_rev fw_rev;
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+ u8 low_frequency;
};
struct mlxsw_hwmon;
@@ -394,4 +397,9 @@ static inline void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
#endif
+enum mlxsw_devlink_param_id {
+ MLXSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+};
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index df78d23b3ec3..cb3e663b1d37 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -236,12 +236,10 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_element_usage *elusage)
{
struct mlxsw_afk_key_info *key_info;
- size_t alloc_size;
int err;
- alloc_size = sizeof(*key_info) +
- sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks;
- key_info = kzalloc(alloc_size, GFP_KERNEL);
+ key_info = kzalloc(struct_size(key_info, blocks, mlxsw_afk->max_blocks),
+ GFP_KERNEL);
if (!key_info)
return ERR_PTR(-ENOMEM);
err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
new file mode 100644
index 000000000000..7a15e932ed2f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "core.h"
+#include "core_env.h"
+#include "item.h"
+#include "reg.h"
+
+static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
+ bool *qsfp)
+{
+ char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ u8 ident;
+ int err;
+
+ mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
+ MLXSW_REG_MCIA_I2C_ADDR_LOW);
+ err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ ident = eeprom_tmp[0];
+ switch (ident) {
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
+ *qsfp = false;
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ *qsfp = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
+ u16 offset, u16 size, void *data,
+ unsigned int *p_read_size)
+{
+ char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ u16 i2c_addr;
+ int status;
+ int err;
+
+ size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
+
+ if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
+ offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ /* Cross pages read, read until offset 256 in low page */
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
+
+ i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
+ if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
+ i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
+ offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
+ }
+
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0, 0, offset, size, i2c_addr);
+
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+
+ status = mlxsw_reg_mcia_status_get(mcia_pl);
+ if (status)
+ return -EIO;
+
+ mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ memcpy(data, eeprom_tmp, size);
+ *p_read_size = size;
+
+ return 0;
+}
+
+int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
+ int off, int *temp)
+{
+ char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
+ union {
+ u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE];
+ u16 temp;
+ } temp_thresh;
+ char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
+ char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
+ u16 module_temp;
+ bool qsfp;
+ int err;
+
+ mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
+ 1);
+ err = mlxsw_reg_query(core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err)
+ return err;
+
+ /* Don't read temperature thresholds for module with no valid info. */
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &module_temp, NULL);
+ switch (module_temp) {
+ case MLXSW_REG_MTBR_BAD_SENS_INFO: /* fall-through */
+ case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
+ case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
+ case MLXSW_REG_MTBR_INDEX_NA:
+ *temp = 0;
+ return 0;
+ default:
+ /* Do not consider thresholds for zero temperature. */
+ if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) {
+ *temp = 0;
+ return 0;
+ }
+ break;
+ }
+
+ /* Read Free Side Device Temperature Thresholds from page 03h
+ * (MSB at lower byte address).
+ * Bytes:
+ * 128-129 - Temp High Alarm (SFP_TEMP_HIGH_ALARM);
+ * 130-131 - Temp Low Alarm (SFP_TEMP_LOW_ALARM);
+ * 132-133 - Temp High Warning (SFP_TEMP_HIGH_WARN);
+ * 134-135 - Temp Low Warning (SFP_TEMP_LOW_WARN);
+ */
+
+ /* Validate module identifier value. */
+ err = mlxsw_env_validate_cable_ident(core, module, &qsfp);
+ if (err)
+ return err;
+
+ if (qsfp)
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0,
+ MLXSW_REG_MCIA_TH_PAGE_NUM,
+ MLXSW_REG_MCIA_TH_PAGE_OFF + off,
+ MLXSW_REG_MCIA_TH_ITEM_SIZE,
+ MLXSW_REG_MCIA_I2C_ADDR_LOW);
+ else
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0,
+ MLXSW_REG_MCIA_PAGE0_LO,
+ off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
+ MLXSW_REG_MCIA_I2C_ADDR_HIGH);
+
+ err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+ memcpy(temp_thresh.buf, eeprom_tmp, MLXSW_REG_MCIA_TH_ITEM_SIZE);
+ *temp = temp_thresh.temp * 1000;
+
+ return 0;
+}
+
+int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
+ struct ethtool_modinfo *modinfo)
+{
+ u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
+ u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
+ u8 module_rev_id, module_id;
+ unsigned int read_size;
+ int err;
+
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
+ module_info, &read_size);
+ if (err)
+ return err;
+
+ if (read_size < offset)
+ return -EIO;
+
+ module_rev_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID];
+ module_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID];
+
+ switch (module_id) {
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
+ if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 ||
+ module_rev_id >=
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_info);
+
+int mlxsw_env_get_module_eeprom(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, int module,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ int offset = ee->offset;
+ unsigned int read_size;
+ int i = 0;
+ int err;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
+ ee->len - i, data + i,
+ &read_size);
+ if (err) {
+ netdev_err(netdev, "Eeprom query failed\n");
+ return err;
+ }
+
+ i += read_size;
+ offset += read_size;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_eeprom);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
new file mode 100644
index 000000000000..064d0e770c01
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_CORE_ENV_H
+#define _MLXSW_CORE_ENV_H
+
+int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
+ int off, int *temp);
+
+int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
+ struct ethtool_modinfo *modinfo);
+
+int mlxsw_env_get_module_eeprom(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, int module,
+ struct ethtool_eeprom *ee, u8 *data);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index e04e8162aa14..6956bbebe2f1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -7,8 +7,10 @@
#include <linux/sysfs.h>
#include <linux/hwmon.h>
#include <linux/err.h>
+#include <linux/sfp.h>
#include "core.h"
+#include "core_env.h"
#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127
#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \
@@ -30,6 +32,7 @@ struct mlxsw_hwmon {
struct attribute *attrs[MLXSW_HWMON_ATTR_COUNT + 1];
struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT];
unsigned int attrs_count;
+ u8 sensor_count;
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -121,6 +124,27 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
return sprintf(buf, "%u\n", mlxsw_reg_mfsm_rpm_get(mfsm_pl));
}
+static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ char fore_pl[MLXSW_REG_FORE_LEN];
+ bool fault;
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(fore), fore_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
+ return err;
+ }
+ mlxsw_reg_fore_unpack(fore_pl, mlwsw_hwmon_attr->type_index, &fault);
+
+ return sprintf(buf, "%u\n", fault);
+}
+
static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -167,12 +191,160 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
return len;
}
+static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
+ u16 temp;
+ u8 module;
+ int err;
+
+ module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
+ 1);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature sensor\n");
+ return err;
+ }
+
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
+ /* Update status and temperature cache. */
+ switch (temp) {
+ case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
+ case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
+ case MLXSW_REG_MTBR_INDEX_NA:
+ temp = 0;
+ break;
+ case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ /* Untrusted cable is connected. Reading temperature from its
+ * sensor is faulty.
+ */
+ temp = 0;
+ break;
+ default:
+ temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ break;
+ }
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
+ u8 module, fault;
+ u16 temp;
+ int err;
+
+ module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
+ 1);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature sensor\n");
+ return err;
+ }
+
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
+
+ /* Update status and temperature cache. */
+ switch (temp) {
+ case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ /* Untrusted cable is connected. Reading temperature from its
+ * sensor is faulty.
+ */
+ fault = 1;
+ break;
+ case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
+ case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
+ case MLXSW_REG_MTBR_INDEX_NA:
+ default:
+ fault = 0;
+ break;
+ }
+
+ return sprintf(buf, "%u\n", fault);
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ int temp;
+ u8 module;
+ int err;
+
+ module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
+ SFP_TEMP_HIGH_WARN, &temp);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature thresholds\n");
+ return err;
+ }
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ u8 module;
+ int temp;
+ int err;
+
+ module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
+ SFP_TEMP_HIGH_ALARM, &temp);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature thresholds\n");
+ return err;
+ }
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+
+ return sprintf(buf, "front panel %03u\n",
+ mlwsw_hwmon_attr->type_index);
+}
+
enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST,
MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
+ MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
MLXSW_HWMON_ATTR_TYPE_PWM,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
};
static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
@@ -209,6 +381,12 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"fan%u_input", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_FAN_FAULT:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_fault_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "fan%u_fault", num + 1);
+ break;
case MLXSW_HWMON_ATTR_TYPE_PWM:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show;
mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store;
@@ -216,6 +394,40 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"pwm%u", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE:
+ mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_module_temp_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_input", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_fault_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_fault", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_critical_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_crit", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_emergency_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_emergency", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_module_temp_label_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_label", num + 1);
+ break;
default:
WARN_ON(1);
}
@@ -233,7 +445,6 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- u8 sensor_count;
int i;
int err;
@@ -242,8 +453,8 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n");
return err;
}
- sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
- for (i = 0; i < sensor_count; i++) {
+ mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
+ for (i = 0; i < mlxsw_hwmon->sensor_count; i++) {
mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
MLXSW_REG(mtmp), mtmp_pl);
@@ -280,10 +491,14 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active);
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) {
- if (tacho_active & BIT(type_index))
+ if (tacho_active & BIT(type_index)) {
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
+ type_index, num);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
type_index, num++);
+ }
}
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) {
@@ -295,6 +510,53 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
+static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ unsigned int module_count = mlxsw_core_max_ports(mlxsw_hwmon->core);
+ char pmlp_pl[MLXSW_REG_PMLP_LEN] = {0};
+ int i, index;
+ u8 width;
+ int err;
+
+ /* Add extra attributes for module temperature. Sensor index is
+ * assigned to sensor_count value, while all indexed before
+ * sensor_count are already utilized by the sensors connected through
+ * mtmp register by mlxsw_hwmon_temp_init().
+ */
+ index = mlxsw_hwmon->sensor_count;
+ for (i = 1; i < module_count; i++) {
+ mlxsw_reg_pmlp_pack(pmlp_pl, i);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(pmlp),
+ pmlp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to read module index %d\n",
+ i);
+ return err;
+ }
+ width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ if (!width)
+ continue;
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, index,
+ index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
+ index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
+ index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
+ index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
+ index, index);
+ index++;
+ }
+
+ return 0;
+}
+
int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct mlxsw_hwmon **p_hwmon)
@@ -317,6 +579,10 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_fans_init;
+ err = mlxsw_hwmon_module_init(mlxsw_hwmon);
+ if (err)
+ goto err_temp_module_init;
+
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
@@ -333,6 +599,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
err_hwmon_register:
+err_temp_module_init:
err_fans_init:
err_temp_init:
kfree(mlxsw_hwmon);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 61f897b40f82..0b85c7252f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -9,11 +9,20 @@
#include <linux/sysfs.h>
#include <linux/thermal.h>
#include <linux/err.h>
+#include <linux/sfp.h>
#include "core.h"
+#include "core_env.h"
#define MLXSW_THERMAL_POLL_INT 1000 /* ms */
-#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */
+#define MLXSW_THERMAL_SLOW_POLL_INT 20000 /* ms */
+#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
+#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
+#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
+#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */
+#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
+#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
+#define MLXSW_THERMAL_ZONE_MAX_NAME 16
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MAX_DUTY 255
/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
@@ -26,9 +35,22 @@
#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2)
#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */
+/* External cooling devices, allowed for binding to mlxsw thermal zones. */
+static char * const mlxsw_thermal_external_allowed_cdev[] = {
+ "mlxreg_fan",
+};
+
+enum mlxsw_thermal_trips {
+ MLXSW_THERMAL_TEMP_TRIP_NORM,
+ MLXSW_THERMAL_TEMP_TRIP_HIGH,
+ MLXSW_THERMAL_TEMP_TRIP_HOT,
+ MLXSW_THERMAL_TEMP_TRIP_CRIT,
+};
+
struct mlxsw_thermal_trip {
int type;
int temp;
+ int hyst;
int min_state;
int max_state;
};
@@ -36,32 +58,29 @@ struct mlxsw_thermal_trip {
static const struct mlxsw_thermal_trip default_thermal_trips[] = {
{ /* In range - 0-40% PWM */
.type = THERMAL_TRIP_ACTIVE,
- .temp = 75000,
+ .temp = MLXSW_THERMAL_ASIC_TEMP_NORM,
+ .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
.min_state = 0,
.max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
},
- { /* High - 40-100% PWM */
- .type = THERMAL_TRIP_ACTIVE,
- .temp = 80000,
- .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
- .max_state = MLXSW_THERMAL_MAX_STATE,
- },
{
- /* Very high - 100% PWM */
+ /* In range - 40-100% PWM */
.type = THERMAL_TRIP_ACTIVE,
- .temp = 85000,
- .min_state = MLXSW_THERMAL_MAX_STATE,
+ .temp = MLXSW_THERMAL_ASIC_TEMP_HIGH,
+ .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
+ .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
.max_state = MLXSW_THERMAL_MAX_STATE,
},
{ /* Warning */
.type = THERMAL_TRIP_HOT,
- .temp = 105000,
+ .temp = MLXSW_THERMAL_ASIC_TEMP_HOT,
+ .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
.min_state = MLXSW_THERMAL_MAX_STATE,
.max_state = MLXSW_THERMAL_MAX_STATE,
},
{ /* Critical - soft poweroff */
.type = THERMAL_TRIP_CRITICAL,
- .temp = MLXSW_THERMAL_MAX_TEMP,
+ .temp = MLXSW_THERMAL_ASIC_TEMP_CRIT,
.min_state = MLXSW_THERMAL_MAX_STATE,
.max_state = MLXSW_THERMAL_MAX_STATE,
}
@@ -72,14 +91,27 @@ static const struct mlxsw_thermal_trip default_thermal_trips[] = {
/* Make sure all trips are writable */
#define MLXSW_THERMAL_TRIP_MASK (BIT(MLXSW_THERMAL_NUM_TRIPS) - 1)
+struct mlxsw_thermal;
+
+struct mlxsw_thermal_module {
+ struct mlxsw_thermal *parent;
+ struct thermal_zone_device *tzdev;
+ struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ enum thermal_device_mode mode;
+ int module;
+};
+
struct mlxsw_thermal {
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
+ int polling_delay;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
+ struct mlxsw_thermal_module *tz_module_arr;
+ unsigned int tz_module_num;
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -103,9 +135,67 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
if (thermal->cdevs[i] == cdev)
return i;
+ /* Allow mlxsw thermal zone binding to an external cooling device */
+ for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
+ if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
+ sizeof(cdev->type)))
+ return 0;
+ }
+
return -ENODEV;
}
+static void
+mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
+{
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = 0;
+}
+
+static int
+mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal_module *tz)
+{
+ int crit_temp, emerg_temp;
+ int err;
+
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ SFP_TEMP_HIGH_WARN,
+ &crit_temp);
+ if (err)
+ return err;
+
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ SFP_TEMP_HIGH_ALARM,
+ &emerg_temp);
+ if (err)
+ return err;
+
+ /* According to the system thermal requirements, the thermal zones are
+ * defined with four trip points. The critical and emergency
+ * temperature thresholds, provided by QSFP module are set as "active"
+ * and "hot" trip points, "normal" and "critical" trip points are
+ * derived from "active" and "hot" by subtracting or adding double
+ * hysteresis value.
+ */
+ if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
+ MLXSW_THERMAL_MODULE_TEMP_SHIFT;
+ else
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
+ if (emerg_temp > crit_temp)
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+ MLXSW_THERMAL_MODULE_TEMP_SHIFT;
+ else
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
+
+ return 0;
+}
+
static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
struct thermal_cooling_device *cdev)
{
@@ -172,7 +262,7 @@ static int mlxsw_thermal_set_mode(struct thermal_zone_device *tzdev,
mutex_lock(&tzdev->lock);
if (mode == THERMAL_DEVICE_ENABLED)
- tzdev->polling_delay = MLXSW_THERMAL_POLL_INT;
+ tzdev->polling_delay = thermal->polling_delay;
else
tzdev->polling_delay = 0;
@@ -237,13 +327,31 @@ static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
struct mlxsw_thermal *thermal = tzdev->devdata;
if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS ||
- temp > MLXSW_THERMAL_MAX_TEMP)
+ temp > MLXSW_THERMAL_ASIC_TEMP_CRIT)
return -EINVAL;
thermal->trips[trip].temp = temp;
return 0;
}
+static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev,
+ int trip, int *p_hyst)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ *p_hyst = thermal->trips[trip].hyst;
+ return 0;
+}
+
+static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
+ int trip, int hyst)
+{
+ struct mlxsw_thermal *thermal = tzdev->devdata;
+
+ thermal->trips[trip].hyst = hyst;
+ return 0;
+}
+
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.bind = mlxsw_thermal_bind,
.unbind = mlxsw_thermal_unbind,
@@ -253,6 +361,206 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.get_trip_type = mlxsw_thermal_get_trip_type,
.get_trip_temp = mlxsw_thermal_get_trip_temp,
.set_trip_temp = mlxsw_thermal_set_trip_temp,
+ .get_trip_hyst = mlxsw_thermal_get_trip_hyst,
+ .set_trip_hyst = mlxsw_thermal_set_trip_hyst,
+};
+
+static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ int i, j, err;
+
+ /* If the cooling device is one of ours bind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ const struct mlxsw_thermal_trip *trip = &tz->trips[i];
+
+ err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+ trip->max_state,
+ trip->min_state,
+ THERMAL_WEIGHT_DEFAULT);
+ if (err < 0)
+ goto err_bind_cooling_device;
+ }
+ return 0;
+
+err_bind_cooling_device:
+ for (j = i - 1; j >= 0; j--)
+ thermal_zone_unbind_cooling_device(tzdev, j, cdev);
+ return err;
+}
+
+static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
+ struct thermal_cooling_device *cdev)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ int i;
+ int err;
+
+ /* If the cooling device is one of ours unbind it */
+ if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0)
+ return 0;
+
+ for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+ err = thermal_zone_unbind_cooling_device(tzdev, i, cdev);
+ WARN_ON(err);
+ }
+ return err;
+}
+
+static int mlxsw_thermal_module_mode_get(struct thermal_zone_device *tzdev,
+ enum thermal_device_mode *mode)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ *mode = tz->mode;
+
+ return 0;
+}
+
+static int mlxsw_thermal_module_mode_set(struct thermal_zone_device *tzdev,
+ enum thermal_device_mode mode)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+
+ mutex_lock(&tzdev->lock);
+
+ if (mode == THERMAL_DEVICE_ENABLED)
+ tzdev->polling_delay = thermal->polling_delay;
+ else
+ tzdev->polling_delay = 0;
+
+ mutex_unlock(&tzdev->lock);
+
+ tz->mode = mode;
+ thermal_zone_device_update(tzdev, THERMAL_EVENT_UNSPECIFIED);
+
+ return 0;
+}
+
+static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ struct device *dev = thermal->bus_info->dev;
+ char mtbr_pl[MLXSW_REG_MTBR_LEN];
+ u16 temp;
+ int err;
+
+ /* Read module temperature. */
+ mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX +
+ tz->module, 1);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
+ /* Update temperature. */
+ switch (temp) {
+ case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
+ case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
+ case MLXSW_REG_MTBR_INDEX_NA: /* fall-through */
+ case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ temp = 0;
+ break;
+ default:
+ temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ /* Reset all trip point. */
+ mlxsw_thermal_module_trips_reset(tz);
+ /* Update trip points. */
+ err = mlxsw_thermal_module_trips_update(dev, thermal->core,
+ tz);
+ if (err)
+ return err;
+ break;
+ }
+
+ *p_temp = (int) temp;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip,
+ enum thermal_trip_type *p_type)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_type = tz->trips[trip].type;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev,
+ int trip, int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ *p_temp = tz->trips[trip].temp;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
+ int trip, int temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS ||
+ temp > tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp)
+ return -EINVAL;
+
+ tz->trips[trip].temp = temp;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip,
+ int *p_hyst)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ *p_hyst = tz->trips[trip].hyst;
+ return 0;
+}
+
+static int
+mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
+ int hyst)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+
+ tz->trips[trip].hyst = hyst;
+ return 0;
+}
+
+static struct thermal_zone_params mlxsw_thermal_module_params = {
+ .governor_name = "user_space",
+};
+
+static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ .bind = mlxsw_thermal_module_bind,
+ .unbind = mlxsw_thermal_module_unbind,
+ .get_mode = mlxsw_thermal_module_mode_get,
+ .set_mode = mlxsw_thermal_module_mode_set,
+ .get_temp = mlxsw_thermal_module_temp_get,
+ .get_trip_type = mlxsw_thermal_module_trip_type_get,
+ .get_trip_temp = mlxsw_thermal_module_trip_temp_get,
+ .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
+ .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
+ .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
@@ -355,6 +663,123 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
.set_cur_state = mlxsw_thermal_set_cur_state,
};
+static int
+mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
+{
+ char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ int err;
+
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ module_tz->module + 1);
+ module_tz->tzdev = thermal_zone_device_register(tz_name,
+ MLXSW_THERMAL_NUM_TRIPS,
+ MLXSW_THERMAL_TRIP_MASK,
+ module_tz,
+ &mlxsw_thermal_module_ops,
+ &mlxsw_thermal_module_params,
+ 0, 0);
+ if (IS_ERR(module_tz->tzdev)) {
+ err = PTR_ERR(module_tz->tzdev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
+{
+ thermal_zone_device_unregister(tzdev);
+}
+
+static int
+mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal *thermal, u8 local_port)
+{
+ struct mlxsw_thermal_module *module_tz;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ u8 width, module;
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+
+ width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ if (!width)
+ return 0;
+
+ module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ module_tz = &thermal->tz_module_arr[module];
+ module_tz->module = module;
+ module_tz->parent = thermal;
+ memcpy(module_tz->trips, default_thermal_trips,
+ sizeof(thermal->trips));
+ /* Initialize all trip point. */
+ mlxsw_thermal_module_trips_reset(module_tz);
+ /* Update trip point according to the module data. */
+ err = mlxsw_thermal_module_trips_update(dev, core, module_tz);
+ if (err)
+ return err;
+
+ thermal->tz_module_num++;
+
+ return 0;
+}
+
+static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
+{
+ if (module_tz && module_tz->tzdev) {
+ mlxsw_thermal_module_tz_fini(module_tz->tzdev);
+ module_tz->tzdev = NULL;
+ }
+}
+
+static int
+mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal *thermal)
+{
+ unsigned int module_count = mlxsw_core_max_ports(core);
+ int i, err;
+
+ thermal->tz_module_arr = kcalloc(module_count,
+ sizeof(*thermal->tz_module_arr),
+ GFP_KERNEL);
+ if (!thermal->tz_module_arr)
+ return -ENOMEM;
+
+ for (i = 1; i < module_count; i++) {
+ err = mlxsw_thermal_module_init(dev, core, thermal, i);
+ if (err)
+ goto err_unreg_tz_module_arr;
+ }
+
+ for (i = 0; i < thermal->tz_module_num; i++) {
+ err = mlxsw_thermal_module_tz_init(&thermal->tz_module_arr[i]);
+ if (err)
+ goto err_unreg_tz_module_arr;
+ }
+
+ return 0;
+
+err_unreg_tz_module_arr:
+ for (i = module_count - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
+ kfree(thermal->tz_module_arr);
+ return err;
+}
+
+static void
+mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
+{
+ unsigned int module_count = mlxsw_core_max_ports(thermal->core);
+ int i;
+
+ for (i = module_count - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
+ kfree(thermal->tz_module_arr);
+}
+
int mlxsw_thermal_init(struct mlxsw_core *core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_thermal **p_thermal)
@@ -407,8 +832,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
if (pwm_active & BIT(i)) {
struct thermal_cooling_device *cdev;
- cdev = thermal_cooling_device_register("Fan", thermal,
- &mlxsw_cooling_ops);
+ cdev = thermal_cooling_device_register("mlxsw_fan",
+ thermal,
+ &mlxsw_cooling_ops);
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
@@ -423,22 +849,36 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
i);
+ thermal->polling_delay = bus_info->low_frequency ?
+ MLXSW_THERMAL_SLOW_POLL_INT :
+ MLXSW_THERMAL_POLL_INT;
+
thermal->tzdev = thermal_zone_device_register("mlxsw",
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
thermal,
&mlxsw_thermal_ops,
NULL, 0,
- MLXSW_THERMAL_POLL_INT);
+ thermal->polling_delay);
if (IS_ERR(thermal->tzdev)) {
err = PTR_ERR(thermal->tzdev);
dev_err(dev, "Failed to register thermal zone\n");
goto err_unreg_cdevs;
}
+ err = mlxsw_thermal_modules_init(dev, core, thermal);
+ if (err)
+ goto err_unreg_tzdev;
+
thermal->mode = THERMAL_DEVICE_ENABLED;
*p_thermal = thermal;
return 0;
+
+err_unreg_tzdev:
+ if (thermal->tzdev) {
+ thermal_zone_device_unregister(thermal->tzdev);
+ thermal->tzdev = NULL;
+ }
err_unreg_cdevs:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
if (thermal->cdevs[i])
@@ -452,6 +892,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
{
int i;
+ mlxsw_thermal_modules_fini(thermal);
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 798bd5aca384..06aea1999518 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -14,14 +14,17 @@
#include "cmd.h"
#include "core.h"
#include "i2c.h"
+#include "resources.h"
#define MLXSW_I2C_CIR2_BASE 0x72000
#define MLXSW_I2C_CIR_STATUS_OFF 0x18
#define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \
MLXSW_I2C_CIR_STATUS_OFF)
#define MLXSW_I2C_OPMOD_SHIFT 12
+#define MLXSW_I2C_EVENT_BIT_SHIFT 22
#define MLXSW_I2C_GO_BIT_SHIFT 23
#define MLXSW_I2C_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_I2C_EVENT_BIT BIT(MLXSW_I2C_EVENT_BIT_SHIFT)
#define MLXSW_I2C_GO_BIT BIT(MLXSW_I2C_GO_BIT_SHIFT)
#define MLXSW_I2C_GO_OPMODE BIT(MLXSW_I2C_OPMOD_SHIFT)
#define MLXSW_I2C_SET_IMM_CMD (MLXSW_I2C_GO_OPMODE | \
@@ -33,6 +36,9 @@
#define MLXSW_I2C_TLV_HDR_SIZE 0x10
#define MLXSW_I2C_ADDR_WIDTH 4
#define MLXSW_I2C_PUSH_CMD_SIZE (MLXSW_I2C_ADDR_WIDTH + 4)
+#define MLXSW_I2C_SET_EVENT_CMD (MLXSW_I2C_EVENT_BIT)
+#define MLXSW_I2C_PUSH_EVENT_CMD (MLXSW_I2C_GO_BIT | \
+ MLXSW_I2C_SET_EVENT_CMD)
#define MLXSW_I2C_READ_SEMA_SIZE 4
#define MLXSW_I2C_PREP_SIZE (MLXSW_I2C_ADDR_WIDTH + 28)
#define MLXSW_I2C_MBOX_SIZE 20
@@ -44,6 +50,7 @@
#define MLXSW_I2C_BLK_MAX 32
#define MLXSW_I2C_RETRY 5
#define MLXSW_I2C_TIMEOUT_MSECS 5000
+#define MLXSW_I2C_MAX_DATA_SIZE 256
/**
* struct mlxsw_i2c - device private data:
@@ -167,7 +174,7 @@ static int mlxsw_i2c_wait_go_bit(struct i2c_client *client,
return err > 0 ? 0 : err;
}
-/* Routine posts a command to ASIC though mail box. */
+/* Routine posts a command to ASIC through mail box. */
static int mlxsw_i2c_write_cmd(struct i2c_client *client,
struct mlxsw_i2c *mlxsw_i2c,
int immediate)
@@ -213,6 +220,66 @@ static int mlxsw_i2c_write_cmd(struct i2c_client *client,
return 0;
}
+/* Routine posts initialization command to ASIC through mail box. */
+static int
+mlxsw_i2c_write_init_cmd(struct i2c_client *client,
+ struct mlxsw_i2c *mlxsw_i2c, u16 opcode, u32 in_mod)
+{
+ __be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = {
+ 0, cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD)
+ };
+ __be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = {
+ 0, 0, 0, 0, 0, 0,
+ cpu_to_be32(client->adapter->nr & 0xffff),
+ cpu_to_be32(MLXSW_I2C_SET_EVENT_CMD)
+ };
+ struct i2c_msg push_cmd =
+ MLXSW_I2C_WRITE_MSG(client, push_cmd_buf,
+ MLXSW_I2C_PUSH_CMD_SIZE);
+ struct i2c_msg prep_cmd =
+ MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE);
+ u8 status;
+ int err;
+
+ push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD | opcode);
+ prep_cmd_buf[3] = cpu_to_be32(in_mod);
+ prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_GO_BIT | opcode);
+ mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf,
+ MLXSW_I2C_CIR2_BASE);
+ mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf,
+ MLXSW_I2C_CIR2_OFF_STATUS);
+
+ /* Prepare Command Interface Register for transaction */
+ err = i2c_transfer(client->adapter, &prep_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ /* Write out Command Interface Register GO bit to push transaction */
+ err = i2c_transfer(client->adapter, &push_cmd, 1);
+ if (err < 0)
+ return err;
+ else if (err != 1)
+ return -EIO;
+
+ /* Wait until go bit is cleared. */
+ err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, &status);
+ if (err) {
+ dev_err(&client->dev, "HW semaphore is not released");
+ return err;
+ }
+
+ /* Validate transaction completion status. */
+ if (status) {
+ dev_err(&client->dev, "Bad transaction completion status %x\n",
+ status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* Routine obtains mail box offsets from ASIC register space. */
static int mlxsw_i2c_get_mbox(struct i2c_client *client,
struct mlxsw_i2c *mlxsw_i2c)
@@ -310,8 +377,8 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
/* Routine executes I2C command. */
static int
-mlxsw_i2c_cmd(struct device *dev, size_t in_mbox_size, u8 *in_mbox,
- size_t out_mbox_size, u8 *out_mbox, u8 *status)
+mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
+ u8 *in_mbox, size_t out_mbox_size, u8 *out_mbox, u8 *status)
{
struct i2c_client *client = to_i2c_client(dev);
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
@@ -326,24 +393,40 @@ mlxsw_i2c_cmd(struct device *dev, size_t in_mbox_size, u8 *in_mbox,
WARN_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
- reg_size = mlxsw_i2c_get_reg_size(in_mbox);
- num = reg_size / MLXSW_I2C_BLK_MAX;
- if (reg_size % MLXSW_I2C_BLK_MAX)
- num++;
+ if (in_mbox) {
+ reg_size = mlxsw_i2c_get_reg_size(in_mbox);
+ num = reg_size / MLXSW_I2C_BLK_MAX;
+ if (reg_size % MLXSW_I2C_BLK_MAX)
+ num++;
- if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
- dev_err(&client->dev, "Could not acquire lock");
- return -EINVAL;
- }
+ if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
+ dev_err(&client->dev, "Could not acquire lock");
+ return -EINVAL;
+ }
+
+ err = mlxsw_i2c_write(dev, reg_size, in_mbox, num, status);
+ if (err)
+ goto cmd_fail;
+
+ /* No out mailbox is case of write transaction. */
+ if (!out_mbox) {
+ mutex_unlock(&mlxsw_i2c->cmd.lock);
+ return 0;
+ }
+ } else {
+ /* No input mailbox is case of initialization query command. */
+ reg_size = MLXSW_I2C_MAX_DATA_SIZE;
+ num = reg_size / MLXSW_I2C_BLK_MAX;
- err = mlxsw_i2c_write(dev, reg_size, in_mbox, num, status);
- if (err)
- goto cmd_fail;
+ if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
+ dev_err(&client->dev, "Could not acquire lock");
+ return -EINVAL;
+ }
- /* No out mailbox is case of write transaction. */
- if (!out_mbox) {
- mutex_unlock(&mlxsw_i2c->cmd.lock);
- return 0;
+ err = mlxsw_i2c_write_init_cmd(client, mlxsw_i2c, opcode,
+ in_mod);
+ if (err)
+ goto cmd_fail;
}
/* Send read transaction to get output mailbox content. */
@@ -395,8 +478,8 @@ static int mlxsw_i2c_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
{
struct mlxsw_i2c *mlxsw_i2c = bus_priv;
- return mlxsw_i2c_cmd(mlxsw_i2c->dev, in_mbox_size, in_mbox,
- out_mbox_size, out_mbox, status);
+ return mlxsw_i2c_cmd(mlxsw_i2c->dev, opcode, in_mod, in_mbox_size,
+ in_mbox, out_mbox_size, out_mbox, status);
}
static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv,
@@ -414,13 +497,22 @@ static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb,
static int
mlxsw_i2c_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
- struct mlxsw_res *resources)
+ struct mlxsw_res *res)
{
struct mlxsw_i2c *mlxsw_i2c = bus_priv;
+ char *mbox;
+ int err;
mlxsw_i2c->core = mlxsw_core;
- return 0;
+ mbox = mlxsw_cmd_mbox_alloc();
+ if (!mbox)
+ return -ENOMEM;
+
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+
+ mlxsw_cmd_mbox_free(mbox);
+ return err;
}
static void mlxsw_i2c_fini(void *bus_priv)
@@ -503,6 +595,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->bus_info.device_kind = id->name;
mlxsw_i2c->bus_info.device_name = client->name;
mlxsw_i2c->bus_info.dev = &client->dev;
+ mlxsw_i2c->bus_info.low_frequency = true;
mlxsw_i2c->dev = &client->dev;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 5a6c4457fb55..68bee9572a1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
+/* Copyright (c) 2016-2019 Mellanox Technologies. All rights reserved */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -8,59 +11,377 @@
#include <linux/types.h>
#include "core.h"
+#include "core_env.h"
#include "i2c.h"
-static const char mlxsw_minimal_driver_name[] = "mlxsw_minimal";
+static const char mlxsw_m_driver_name[] = "mlxsw_minimal";
-static const struct mlxsw_config_profile mlxsw_minimal_config_profile;
+struct mlxsw_m_port;
-static struct mlxsw_driver mlxsw_minimal_driver = {
- .kind = mlxsw_minimal_driver_name,
- .priv_size = 1,
- .profile = &mlxsw_minimal_config_profile,
+struct mlxsw_m {
+ struct mlxsw_m_port **ports;
+ int *module_to_port;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 base_mac[ETH_ALEN];
+ u8 max_ports;
};
-static const struct i2c_device_id mlxsw_minimal_i2c_id[] = {
+struct mlxsw_m_port {
+ struct net_device *dev;
+ struct mlxsw_m *mlxsw_m;
+ u8 local_port;
+ u8 module;
+};
+
+static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
+{
+ return 0;
+}
+
+static int
+mlxsw_m_port_get_phys_port_name(struct net_device *dev, char *name, size_t len)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+ u8 local_port = mlxsw_m_port->local_port;
+
+ return mlxsw_core_port_get_phys_port_name(core, local_port, name, len);
+}
+
+static int mlxsw_m_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ ppid->id_len = sizeof(mlxsw_m->base_mac);
+ memcpy(&ppid->id, &mlxsw_m->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops mlxsw_m_port_netdev_ops = {
+ .ndo_open = mlxsw_m_port_dummy_open_stop,
+ .ndo_stop = mlxsw_m_port_dummy_open_stop,
+ .ndo_get_phys_port_name = mlxsw_m_port_get_phys_port_name,
+ .ndo_get_port_parent_id = mlxsw_m_port_get_port_parent_id,
+};
+
+static int mlxsw_m_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_info(core, mlxsw_m_port->module, modinfo);
+}
+
+static int
+mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module,
+ ee, data);
+}
+
+static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
+ .get_module_info = mlxsw_m_get_module_info,
+ .get_module_eeprom = mlxsw_m_get_module_eeprom,
+};
+
+static int
+mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u8 local_port,
+ u8 *p_module, u8 *p_width)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+
+ return 0;
+}
+
+static int
+mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+ struct net_device *dev = mlxsw_m_port->dev;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+ int err;
+
+ mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+ /* The last byte value in base mac address is guaranteed
+ * to be such it does not overflow when adding local_port
+ * value.
+ */
+ dev->dev_addr[ETH_ALEN - 1] += mlxsw_m_port->module + 1;
+ return 0;
+}
+
+static int
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
+{
+ struct mlxsw_m_port *mlxsw_m_port;
+ struct net_device *dev;
+ int err;
+
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n",
+ local_port);
+ return err;
+ }
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_m_port));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(dev, mlxsw_m->bus_info->dev);
+ mlxsw_m_port = netdev_priv(dev);
+ mlxsw_m_port->dev = dev;
+ mlxsw_m_port->mlxsw_m = mlxsw_m;
+ mlxsw_m_port->local_port = local_port;
+ mlxsw_m_port->module = module;
+
+ dev->netdev_ops = &mlxsw_m_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_m_port_ethtool_ops;
+
+ err = mlxsw_m_port_dev_addr_get(mlxsw_m_port);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Port %d: Unable to get port mac address\n",
+ mlxsw_m_port->local_port);
+ goto err_dev_addr_get;
+ }
+
+ netif_carrier_off(dev);
+ mlxsw_m->ports[local_port] = mlxsw_m_port;
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_m_port->local_port);
+ goto err_register_netdev;
+ }
+
+ mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port,
+ mlxsw_m_port, dev, module + 1, false, 0);
+
+ return 0;
+
+err_register_netdev:
+ mlxsw_m->ports[local_port] = NULL;
+ free_netdev(dev);
+err_dev_addr_get:
+err_alloc_etherdev:
+ mlxsw_core_port_fini(mlxsw_m->core, local_port);
+ return err;
+}
+
+static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
+{
+ struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port];
+
+ mlxsw_core_port_clear(mlxsw_m->core, local_port, mlxsw_m);
+ unregister_netdev(mlxsw_m_port->dev); /* This calls ndo_stop */
+ mlxsw_m->ports[local_port] = NULL;
+ free_netdev(mlxsw_m_port->dev);
+ mlxsw_core_port_fini(mlxsw_m->core, local_port);
+}
+
+static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
+ u8 *last_module)
+{
+ u8 module, width;
+ int err;
+
+ /* Fill out to local port mapping array */
+ err = mlxsw_m_port_module_info_get(mlxsw_m, local_port, &module,
+ &width);
+ if (err)
+ return err;
+
+ if (!width)
+ return 0;
+ /* Skip, if port belongs to the cluster */
+ if (module == *last_module)
+ return 0;
+ *last_module = module;
+ mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
+
+ return 0;
+}
+
+static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
+{
+ mlxsw_m->module_to_port[module] = -1;
+}
+
+static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ u8 last_module = max_ports;
+ int i;
+ int err;
+
+ mlxsw_m->ports = kcalloc(max_ports, sizeof(*mlxsw_m->ports),
+ GFP_KERNEL);
+ if (!mlxsw_m->ports)
+ return -ENOMEM;
+
+ mlxsw_m->module_to_port = kmalloc_array(max_ports, sizeof(int),
+ GFP_KERNEL);
+ if (!mlxsw_m->module_to_port) {
+ err = -ENOMEM;
+ goto err_module_to_port_alloc;
+ }
+
+ /* Invalidate the entries of module to local port mapping array */
+ for (i = 0; i < max_ports; i++)
+ mlxsw_m->module_to_port[i] = -1;
+
+ /* Fill out module to local port mapping array */
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
+ if (err)
+ goto err_module_to_port_map;
+ }
+
+ /* Create port objects for each valid entry */
+ for (i = 0; i < mlxsw_m->max_ports; i++) {
+ if (mlxsw_m->module_to_port[i] > 0) {
+ err = mlxsw_m_port_create(mlxsw_m,
+ mlxsw_m->module_to_port[i],
+ i);
+ if (err)
+ goto err_module_to_port_create;
+ }
+ }
+
+ return 0;
+
+err_module_to_port_create:
+ for (i--; i >= 0; i--) {
+ if (mlxsw_m->module_to_port[i] > 0)
+ mlxsw_m_port_remove(mlxsw_m,
+ mlxsw_m->module_to_port[i]);
+ }
+ i = max_ports;
+err_module_to_port_map:
+ for (i--; i > 0; i--)
+ mlxsw_m_port_module_unmap(mlxsw_m, i);
+ kfree(mlxsw_m->module_to_port);
+err_module_to_port_alloc:
+ kfree(mlxsw_m->ports);
+ return err;
+}
+
+static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_m->max_ports; i++) {
+ if (mlxsw_m->module_to_port[i] > 0) {
+ mlxsw_m_port_remove(mlxsw_m,
+ mlxsw_m->module_to_port[i]);
+ mlxsw_m_port_module_unmap(mlxsw_m, i);
+ }
+ }
+
+ kfree(mlxsw_m->module_to_port);
+ kfree(mlxsw_m->ports);
+}
+
+static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ int err;
+
+ mlxsw_m->core = mlxsw_core;
+ mlxsw_m->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_m_ports_create(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_m_ports_remove(mlxsw_m);
+}
+
+static const struct mlxsw_config_profile mlxsw_m_config_profile;
+
+static struct mlxsw_driver mlxsw_m_driver = {
+ .kind = mlxsw_m_driver_name,
+ .priv_size = sizeof(struct mlxsw_m),
+ .init = mlxsw_m_init,
+ .fini = mlxsw_m_fini,
+ .profile = &mlxsw_m_config_profile,
+ .res_query_enabled = true,
+};
+
+static const struct i2c_device_id mlxsw_m_i2c_id[] = {
{ "mlxsw_minimal", 0},
{ },
};
-static struct i2c_driver mlxsw_minimal_i2c_driver = {
+static struct i2c_driver mlxsw_m_i2c_driver = {
.driver.name = "mlxsw_minimal",
.class = I2C_CLASS_HWMON,
- .id_table = mlxsw_minimal_i2c_id,
+ .id_table = mlxsw_m_i2c_id,
};
-static int __init mlxsw_minimal_module_init(void)
+static int __init mlxsw_m_module_init(void)
{
int err;
- err = mlxsw_core_driver_register(&mlxsw_minimal_driver);
+ err = mlxsw_core_driver_register(&mlxsw_m_driver);
if (err)
return err;
- err = mlxsw_i2c_driver_register(&mlxsw_minimal_i2c_driver);
+ err = mlxsw_i2c_driver_register(&mlxsw_m_i2c_driver);
if (err)
goto err_i2c_driver_register;
return 0;
err_i2c_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_minimal_driver);
+ mlxsw_core_driver_unregister(&mlxsw_m_driver);
return err;
}
-static void __exit mlxsw_minimal_module_exit(void)
+static void __exit mlxsw_m_module_exit(void)
{
- mlxsw_i2c_driver_unregister(&mlxsw_minimal_i2c_driver);
- mlxsw_core_driver_unregister(&mlxsw_minimal_driver);
+ mlxsw_i2c_driver_unregister(&mlxsw_m_i2c_driver);
+ mlxsw_core_driver_unregister(&mlxsw_m_driver);
}
-module_init(mlxsw_minimal_module_init);
-module_exit(mlxsw_minimal_module_exit);
+module_init(mlxsw_m_module_init);
+module_exit(mlxsw_m_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
MODULE_DESCRIPTION("Mellanox minimal driver");
-MODULE_DEVICE_TABLE(i2c, mlxsw_minimal_i2c_id);
+MODULE_DEVICE_TABLE(i2c, mlxsw_m_i2c_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 66b8098c6fd2..b40455f8293d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+ memcpy(ncqe, cqe, q->elem_size);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
if (sendq) {
struct mlxsw_pci_queue *sdq;
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, cqe);
+ wqe_counter, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->u.cq.v, cqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_rdq_count++;
}
if (++items == credits)
break;
}
- if (items) {
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ if (items)
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- }
}
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
@@ -1037,42 +1039,6 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
}
-static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
- struct mlxsw_res *res)
-{
- int index, i;
- u64 data;
- u16 id;
- int err;
-
- if (!res)
- return 0;
-
- mlxsw_cmd_mbox_zero(mbox);
-
- for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
- index++) {
- err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
- if (err)
- return err;
-
- for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
- id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
- data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
-
- if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
- return 0;
-
- mlxsw_res_parse(res, id, data);
- }
- }
-
- /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
- * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
- */
- return -EIO;
-}
-
static int
mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_config_profile *profile,
@@ -1365,10 +1331,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
- break;
+ return 0;
cond_resched();
} while (time_before(jiffies, end));
- return 0;
+ return -EBUSY;
}
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
@@ -1457,7 +1423,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_boardinfo;
- err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res);
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
if (err)
goto err_query_resources;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index bb99f6d41fe0..ffee38e36ce8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -53,6 +53,7 @@
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 9b48dffc9f63..eb4c5e8964cd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2199,6 +2199,14 @@ MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8);
*/
MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16);
+/* reg_pagt_multi
+ * Multi-ACL
+ * 0 - This ACL is the last ACL in the multi-ACL
+ * 1 - This ACL is part of a multi-ACL
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pagt, multi, 0x30, 31, 1, 0x04, 0x00, false);
+
/* reg_pagt_acl_id
* ACL identifier
* Access: RW
@@ -2212,12 +2220,13 @@ static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id)
}
static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index,
- u16 acl_id)
+ u16 acl_id, bool multi)
{
u8 size = mlxsw_reg_pagt_size_get(payload);
if (index >= size)
mlxsw_reg_pagt_size_set(payload, index + 1);
+ mlxsw_reg_pagt_multi_set(payload, index, multi);
mlxsw_reg_pagt_acl_id_set(payload, index, acl_id);
}
@@ -3962,6 +3971,25 @@ enum {
*/
MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M BIT(0)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII BIT(1)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII BIT(2)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R BIT(3)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G BIT(4)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G BIT(5)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR BIT(6)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 BIT(7)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+
+/* reg_ptys_ext_eth_proto_cap
+ * Extended Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
+
#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
@@ -4016,6 +4044,12 @@ MLXSW_ITEM32(reg, ptys, ib_link_width_cap, 0x10, 16, 16);
*/
MLXSW_ITEM32(reg, ptys, ib_proto_cap, 0x10, 0, 16);
+/* reg_ptys_ext_eth_proto_admin
+ * Extended speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, ext_eth_proto_admin, 0x14, 0, 32);
+
/* reg_ptys_eth_proto_admin
* Speed and protocol to set port to.
* Access: RW
@@ -4034,6 +4068,12 @@ MLXSW_ITEM32(reg, ptys, ib_link_width_admin, 0x1C, 16, 16);
*/
MLXSW_ITEM32(reg, ptys, ib_proto_admin, 0x1C, 0, 16);
+/* reg_ptys_ext_eth_proto_oper
+ * The extended current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, ext_eth_proto_oper, 0x20, 0, 32);
+
/* reg_ptys_eth_proto_oper
* The current speed and protocol configured for the port.
* Access: RO
@@ -4052,12 +4092,23 @@ MLXSW_ITEM32(reg, ptys, ib_link_width_oper, 0x28, 16, 16);
*/
MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16);
-/* reg_ptys_eth_proto_lp_advertise
- * The protocols that were advertised by the link partner during
- * autonegotiation.
+enum mlxsw_reg_ptys_connector_type {
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA,
+ MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER,
+};
+
+/* reg_ptys_connector_type
+ * Connector type indication.
* Access: RO
*/
-MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
+MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4);
static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
u32 proto_admin, bool autoneg)
@@ -4069,17 +4120,46 @@ static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
}
+static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u8 local_port,
+ u32 proto_admin, bool autoneg)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+ mlxsw_reg_ptys_ext_eth_proto_admin_set(payload, proto_admin);
+ mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
+}
+
static inline void mlxsw_reg_ptys_eth_unpack(char *payload,
u32 *p_eth_proto_cap,
- u32 *p_eth_proto_adm,
+ u32 *p_eth_proto_admin,
u32 *p_eth_proto_oper)
{
if (p_eth_proto_cap)
- *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
- if (p_eth_proto_adm)
- *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+ *p_eth_proto_cap =
+ mlxsw_reg_ptys_eth_proto_cap_get(payload);
+ if (p_eth_proto_admin)
+ *p_eth_proto_admin =
+ mlxsw_reg_ptys_eth_proto_admin_get(payload);
+ if (p_eth_proto_oper)
+ *p_eth_proto_oper =
+ mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
+ u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper)
+{
+ if (p_eth_proto_cap)
+ *p_eth_proto_cap =
+ mlxsw_reg_ptys_ext_eth_proto_cap_get(payload);
+ if (p_eth_proto_admin)
+ *p_eth_proto_admin =
+ mlxsw_reg_ptys_ext_eth_proto_admin_get(payload);
if (p_eth_proto_oper)
- *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+ *p_eth_proto_oper =
+ mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
}
static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port,
@@ -5666,6 +5746,8 @@ enum mlxsw_reg_ritr_loopback_protocol {
MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4,
/* IPinIP IPv6 underlay Unicast */
MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6,
+ /* IPinIP generic - used for Spectrum-2 underlay RIF */
+ MLXSW_REG_RITR_LOOPBACK_GENERIC,
};
/* reg_ritr_loopback_protocol
@@ -5706,6 +5788,13 @@ MLXSW_ITEM32(reg, ritr, loopback_ipip_options, 0x10, 20, 4);
*/
MLXSW_ITEM32(reg, ritr, loopback_ipip_uvr, 0x10, 0, 16);
+/* reg_ritr_loopback_ipip_underlay_rif
+ * Underlay ingress router interface.
+ * Reserved for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, loopback_ipip_underlay_rif, 0x14, 0, 16);
+
/* reg_ritr_loopback_ipip_usip*
* Encapsulation Underlay source IP.
* Access: RW
@@ -5821,11 +5910,12 @@ static inline void
mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
- u16 uvr_id, u32 gre_key)
+ u16 uvr_id, u16 underlay_rif, u32 gre_key)
{
mlxsw_reg_ritr_loopback_ipip_type_set(payload, ipip_type);
mlxsw_reg_ritr_loopback_ipip_options_set(payload, options);
mlxsw_reg_ritr_loopback_ipip_uvr_set(payload, uvr_id);
+ mlxsw_reg_ritr_loopback_ipip_underlay_rif_set(payload, underlay_rif);
mlxsw_reg_ritr_loopback_ipip_gre_key_set(payload, gre_key);
}
@@ -5833,12 +5923,12 @@ static inline void
mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
- u16 uvr_id, u32 usip, u32 gre_key)
+ u16 uvr_id, u16 underlay_rif, u32 usip, u32 gre_key)
{
mlxsw_reg_ritr_loopback_protocol_set(payload,
MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4);
mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
- uvr_id, gre_key);
+ uvr_id, underlay_rif, gre_key);
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
@@ -7200,6 +7290,13 @@ MLXSW_ITEM32(reg, rtdp, type, 0x00, 28, 4);
*/
MLXSW_ITEM32(reg, rtdp, tunnel_index, 0x00, 0, 24);
+/* reg_rtdp_egress_router_interface
+ * Underlay egress router interface.
+ * Valid range is from 0 to cap_max_router_interfaces - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rtdp, egress_router_interface, 0x40, 0, 16);
+
/* IPinIP */
/* reg_rtdp_ipip_irif
@@ -7849,6 +7946,35 @@ static inline void mlxsw_reg_mfsl_unpack(char *payload, u8 tacho,
*p_tach_max = mlxsw_reg_mfsl_tach_max_get(payload);
}
+/* FORE - Fan Out of Range Event Register
+ * --------------------------------------
+ * This register reports the status of the controlled fans compared to the
+ * range defined by the MFSL register.
+ */
+#define MLXSW_REG_FORE_ID 0x9007
+#define MLXSW_REG_FORE_LEN 0x0C
+
+MLXSW_REG_DEFINE(fore, MLXSW_REG_FORE_ID, MLXSW_REG_FORE_LEN);
+
+/* fan_under_limit
+ * Fan speed is below the low limit defined in MFSL register. Each bit relates
+ * to a single tachometer and indicates the specific tachometer reading is
+ * below the threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, fore, fan_under_limit, 0x00, 16, 10);
+
+static inline void mlxsw_reg_fore_unpack(char *payload, u8 tacho,
+ bool *fault)
+{
+ u16 limit;
+
+ if (fault) {
+ limit = mlxsw_reg_fore_fan_under_limit_get(payload);
+ *fault = limit & BIT(tacho);
+ }
+}
+
/* MTCAP - Management Temperature Capabilities
* -------------------------------------------
* This register exposes the capabilities of the device and
@@ -7975,6 +8101,80 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
+/* MTBR - Management Temperature Bulk Register
+ * -------------------------------------------
+ * This register is used for bulk temperature reading.
+ */
+#define MLXSW_REG_MTBR_ID 0x900F
+#define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */
+#define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \
+ MLXSW_REG_MTBR_REC_LEN * \
+ MLXSW_REG_MTBR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
+
+/* reg_mtbr_base_sensor_index
+ * Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors,
+ * 64-127 are mapped to the SFP+/QSFP modules sequentially).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 7);
+
+/* reg_mtbr_num_rec
+ * Request: Number of records to read
+ * Response: Number of records read
+ * See above description for more details.
+ * Range 1..255
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtbr, num_rec, 0x04, 0, 8);
+
+/* reg_mtbr_rec_max_temp
+ * The highest measured temperature from the sensor.
+ * When the bit mte is cleared, the field max_temperature is reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
+ 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false);
+
+/* reg_mtbr_rec_temp
+ * Temperature reading from the sensor. Reading is in 0..125 Celsius
+ * degrees units.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
+ MLXSW_REG_MTBR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_mtbr_pack(char *payload, u8 base_sensor_index,
+ u8 num_rec)
+{
+ MLXSW_REG_ZERO(mtbr, payload);
+ mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
+ mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
+}
+
+/* Error codes from temperatute reading */
+enum mlxsw_reg_mtbr_temp_status {
+ MLXSW_REG_MTBR_NO_CONN = 0x8000,
+ MLXSW_REG_MTBR_NO_TEMP_SENS = 0x8001,
+ MLXSW_REG_MTBR_INDEX_NA = 0x8002,
+ MLXSW_REG_MTBR_BAD_SENS_INFO = 0x8003,
+};
+
+/* Base index for reading modules temperature */
+#define MLXSW_REG_MTBR_BASE_MODULE_INDEX 64
+
+static inline void mlxsw_reg_mtbr_temp_unpack(char *payload, int rec_ind,
+ u16 *p_temp, u16 *p_max_temp)
+{
+ if (p_temp)
+ *p_temp = mlxsw_reg_mtbr_rec_temp_get(payload, rec_ind);
+ if (p_max_temp)
+ *p_max_temp = mlxsw_reg_mtbr_rec_max_temp_get(payload, rec_ind);
+}
+
/* MCIA - Management Cable Info Access
* -----------------------------------
* MCIA register is used to access the SFP+ and QSFP connector's EPROM.
@@ -8029,13 +8229,41 @@ MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
*/
MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
-#define MLXSW_SP_REG_MCIA_EEPROM_SIZE 48
+#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256
+#define MLXSW_REG_MCIA_EEPROM_SIZE 48
+#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50
+#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51
+#define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0
+#define MLXSW_REG_MCIA_TH_ITEM_SIZE 2
+#define MLXSW_REG_MCIA_TH_PAGE_NUM 3
+#define MLXSW_REG_MCIA_PAGE0_LO 0
+#define MLXSW_REG_MCIA_TH_PAGE_OFF 0x80
+
+enum mlxsw_reg_mcia_eeprom_module_info_rev_id {
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
+};
+
+enum mlxsw_reg_mcia_eeprom_module_info_id {
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP = 0x03,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18,
+};
+
+enum mlxsw_reg_mcia_eeprom_module_info {
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE,
+};
/* reg_mcia_eeprom
* Bytes to read/write.
* Access: RW
*/
-MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
+MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
u8 page_number, u16 device_addr,
@@ -9723,8 +9951,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
MLXSW_REG(mfsl),
+ MLXSW_REG(fore),
MLXSW_REG(mtcap),
MLXSW_REG(mtmp),
+ MLXSW_REG(mtbr),
MLXSW_REG(mcia),
MLXSW_REG(mpat),
MLXSW_REG(mpar),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index b8b3a01c2a9e..773ef7fdb285 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -26,6 +26,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_MAX_BUFFER_SIZE,
MLXSW_RES_ID_CELL_SIZE,
+ MLXSW_RES_ID_MAX_HEADROOM_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
MLXSW_RES_ID_ACL_MAX_REGIONS,
@@ -79,6 +80,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
+ [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
[MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
[MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index eed1045e4d96..9eb63300c1d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -32,6 +32,7 @@
#include "spectrum.h"
#include "pci.h"
#include "core.h"
+#include "core_env.h"
#include "reg.h"
#include "port.h"
#include "trap.h"
@@ -852,8 +853,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
u16 delay = !!my_pfc ? my_pfc->delay : 0;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ u32 taken_headroom_cells = 0;
+ u32 max_headroom_cells;
int i, j, err;
+ max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
+
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
if (err)
@@ -862,8 +867,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
bool configure = false;
bool pfc = false;
+ u16 thres_cells;
+ u16 delay_cells;
+ u16 total_cells;
bool lossy;
- u16 thres;
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
if (prio_tc[j] == i) {
@@ -877,10 +884,17 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
continue;
lossy = !(pfc || pause_en);
- thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
- pause_en);
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
+ thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+ delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
+ pfc, pause_en);
+ total_cells = thres_cells + delay_cells;
+
+ taken_headroom_cells += total_cells;
+ if (taken_headroom_cells > max_headroom_cells)
+ return -ENOBUFS;
+
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
+ thres_cells, lossy);
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
@@ -1700,6 +1714,18 @@ static int mlxsw_sp_set_features(struct net_device *dev,
mlxsw_sp_feature_hw_tc);
}
+static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ ppid->id_len = sizeof(mlxsw_sp->base_mac);
+ memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -1715,6 +1741,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
.ndo_set_features = mlxsw_sp_set_features,
+ .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -2103,7 +2130,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
int i;
for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
- snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
mlxsw_sp_port_hw_prio_stats[i].str, prio);
*p += ETH_GSTRING_LEN;
}
@@ -2114,7 +2141,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
int i;
for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
- snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
mlxsw_sp_port_hw_tc_stats[i].str, tc);
*p += ETH_GSTRING_LEN;
}
@@ -2310,13 +2337,13 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
}
}
-struct mlxsw_sp_port_link_mode {
+struct mlxsw_sp1_port_link_mode {
enum ethtool_link_mode_bit_indices mask_ethtool;
u32 mask;
u32 speed;
};
-static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
+static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
.mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
@@ -2388,11 +2415,6 @@ static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
.speed = SPEED_25000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
- .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- .speed = SPEED_25000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
.mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
.speed = SPEED_50000,
@@ -2449,11 +2471,12 @@ static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
},
};
-#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
+#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
static void
-mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd)
+mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
@@ -2471,19 +2494,23 @@ mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
}
-static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
+static void
+mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+ unsigned long *mode)
{
int i;
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+ __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
mode);
}
}
-static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd)
+static void
+mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
@@ -2492,9 +2519,9 @@ static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
if (!carrier_ok)
goto out;
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
- speed = mlxsw_sp_port_link_mode[i].speed;
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
+ speed = mlxsw_sp1_port_link_mode[i].speed;
duplex = DUPLEX_FULL;
break;
}
@@ -2504,129 +2531,559 @@ out:
cmd->base.duplex = duplex;
}
-static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
+static u32
+mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd)
{
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_SGMII))
- return PORT_FIBRE;
+ u32 ptys_proto = 0;
+ int i;
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
- return PORT_DA;
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
+ cmd->link_modes.advertising))
+ ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
- if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
- MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
- return PORT_NONE;
+static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
- return PORT_OTHER;
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sp1_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
+ }
+ return ptys_proto;
}
static u32
-mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
+mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
{
u32 ptys_proto = 0;
int i;
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
- cmd->link_modes.advertising))
- ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed)
+ ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
}
return ptys_proto;
}
-static u32 mlxsw_sp_to_ptys_speed(u32 speed)
+static int
+mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u32 *base_speed)
+{
+ *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
+ return 0;
+}
+
+static void
+mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u8 local_port, u32 proto_admin, bool autoneg)
+{
+ mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
+}
+
+static void
+mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper)
+{
+ mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin,
+ p_eth_proto_oper);
+}
+
+static const struct mlxsw_sp_port_type_speed_ops
+mlxsw_sp1_port_type_speed_ops = {
+ .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
+ .from_ptys_link = mlxsw_sp1_from_ptys_link,
+ .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
+ .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
+ .to_ptys_speed = mlxsw_sp1_to_ptys_speed,
+ .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed,
+ .port_speed_base = mlxsw_sp1_port_speed_base,
+ .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
+ .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
+};
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_sgmii_100m[] = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_5gbase_r[] = {
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = {
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = {
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = {
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = {
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = {
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+
+struct mlxsw_sp2_port_link_mode {
+ const enum ethtool_link_mode_bit_indices *mask_ethtool;
+ int m_ethtool_len;
+ u32 mask;
+ u32 speed;
+};
+
+static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
+ .speed = SPEED_100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
+ .speed = SPEED_1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
+ .speed = SPEED_2500,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
+ .speed = SPEED_5000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
+ .speed = SPEED_10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
+ .speed = SPEED_40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
+ .speed = SPEED_200000,
+ },
+};
+
+#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
+
+static void
+mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
+{
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+}
+
+static void
+mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
+ unsigned long *mode)
+{
+ int i;
+
+ for (i = 0; i < link_mode->m_ethtool_len; i++)
+ __set_bit(link_mode->mask_ethtool[i], mode);
+}
+
+static void
+mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+ unsigned long *mode)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
+ mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ mode);
+ }
+}
+
+static void
+mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
+ speed = mlxsw_sp2_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
+}
+
+static bool
+mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
+ const unsigned long *mode)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < link_mode->m_ethtool_len; i++) {
+ if (test_bit(link_mode->mask_ethtool[i], mode))
+ cnt++;
+ }
+
+ return cnt == link_mode->m_ethtool_len;
+}
+
+static u32
+mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
int i;
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (speed == mlxsw_sp_port_link_mode[i].speed)
- ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ cmd->link_modes.advertising))
+ ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
}
-static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
+static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
{
u32 ptys_proto = 0;
int i;
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
- ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sp2_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
}
-static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
- struct ethtool_link_ksettings *cmd)
+static u32
+mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed)
+ ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int
+mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u32 *base_speed)
+{
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ int err;
+
+ /* In Spectrum-2, the speed of 1x can change from port to port, so query
+ * it from firmware.
+ */
+ mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+
+ if (eth_proto_cap &
+ MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) {
+ *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G;
+ return 0;
+ }
+
+ if (eth_proto_cap &
+ MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) {
+ *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static void
+mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u8 local_port, u32 proto_admin,
+ bool autoneg)
+{
+ mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
+}
+
+static void
+mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper)
+{
+ mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap,
+ p_eth_proto_admin, p_eth_proto_oper);
+}
+
+static const struct mlxsw_sp_port_type_speed_ops
+mlxsw_sp2_port_type_speed_ops = {
+ .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
+ .from_ptys_link = mlxsw_sp2_from_ptys_link,
+ .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
+ .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
+ .to_ptys_speed = mlxsw_sp2_to_ptys_speed,
+ .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed,
+ .port_speed_base = mlxsw_sp2_port_speed_base,
+ .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
+ .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
+};
+
+static void
+mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
- mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
- mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
+ ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
+ ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported);
}
-static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
- struct ethtool_link_ksettings *cmd)
+static void
+mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
+ u32 eth_proto_admin, bool autoneg,
+ struct ethtool_link_ksettings *cmd)
{
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
if (!autoneg)
return;
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
- mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
+ ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
+ cmd->link_modes.advertising);
}
-static void
-mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
- struct ethtool_link_ksettings *cmd)
+static u8
+mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
{
- if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
- return;
-
- ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
- mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
+ switch (connector_type) {
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR:
+ return PORT_OTHER;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE:
+ return PORT_NONE;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP:
+ return PORT_TP;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI:
+ return PORT_AUI;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC:
+ return PORT_BNC;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII:
+ return PORT_MII;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE:
+ return PORT_FIBRE;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA:
+ return PORT_DA;
+ case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER:
+ return PORT_OTHER;
+ default:
+ WARN_ON_ONCE(1);
+ return PORT_OTHER;
+ }
}
static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
- u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- u8 autoneg_status;
+ u8 connector_type;
bool autoneg;
int err;
+ ops = mlxsw_sp->port_type_speed_ops;
+
autoneg = mlxsw_sp_port->link.autoneg;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ 0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
- mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
- &eth_proto_oper);
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
- mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
+ mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
- mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
-
- eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
- autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
- mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
+ mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
+ cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
- mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
- cmd);
+ connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
+ cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
+ ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
return 0;
}
@@ -2637,21 +3094,25 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
u32 eth_proto_cap, eth_proto_new;
bool autoneg;
int err;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ 0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
- mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
eth_proto_new = autoneg ?
- mlxsw_sp_to_ptys_advert_link(cmd) :
- mlxsw_sp_to_ptys_speed(cmd->base.speed);
+ ops->to_ptys_advert_link(mlxsw_sp, cmd) :
+ ops->to_ptys_speed(mlxsw_sp, cmd->base.speed);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
@@ -2659,8 +3120,8 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
return -EINVAL;
}
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_new, autoneg);
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_new, autoneg);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2701,117 +3162,18 @@ out:
return err;
}
-#define MLXSW_SP_I2C_ADDR_LOW 0x50
-#define MLXSW_SP_I2C_ADDR_HIGH 0x51
-#define MLXSW_SP_EEPROM_PAGE_LENGTH 256
-
-static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 offset, u16 size, void *data,
- unsigned int *p_read_size)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
- char mcia_pl[MLXSW_REG_MCIA_LEN];
- u16 i2c_addr;
- int status;
- int err;
-
- size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
-
- if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
- offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
- /* Cross pages read, read until offset 256 in low page */
- size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
-
- i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
- if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
- offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
- }
-
- mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
- 0, 0, offset, size, i2c_addr);
-
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
- if (err)
- return err;
-
- status = mlxsw_reg_mcia_status_get(mcia_pl);
- if (status)
- return -EIO;
-
- mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
- memcpy(data, eeprom_tmp, size);
- *p_read_size = size;
-
- return 0;
-}
-
-enum mlxsw_sp_eeprom_module_info_rev_id {
- MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
- MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
- MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
-};
-
-enum mlxsw_sp_eeprom_module_info_id {
- MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
- MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
- MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
- MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
-};
-
-enum mlxsw_sp_eeprom_module_info {
- MLXSW_SP_EEPROM_MODULE_INFO_ID,
- MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
- MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
-};
-
static int mlxsw_sp_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
- u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
- u8 module_rev_id, module_id;
- unsigned int read_size;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
- err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
- MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
- module_info, &read_size);
- if (err)
- return err;
-
- if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
- return -EIO;
-
- module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
- module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
+ err = mlxsw_env_get_module_info(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module,
+ modinfo);
- switch (module_id) {
- case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
- case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
- case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
- if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
- module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
- modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
- } else {
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- }
- break;
- case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return err;
}
static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
@@ -2819,30 +3181,14 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
u8 *data)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
- int offset = ee->offset;
- unsigned int read_size;
- int i = 0;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
- if (!ee->len)
- return -EINVAL;
-
- memset(data, 0, ee->len);
+ err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module, ee,
+ data);
- while (i < ee->len) {
- err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
- ee->len - i, data + i,
- &read_size);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
- return err;
- }
-
- i += read_size;
- offset += read_size;
- }
-
- return 0;
+ return err;
}
static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
@@ -2865,13 +3211,24 @@ static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
u32 eth_proto_admin;
+ u32 upper_speed;
+ u32 base_speed;
+ int err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port,
+ &base_speed);
+ if (err)
+ return err;
+ upper_speed = base_speed * width;
- eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_admin, mlxsw_sp_port->link.autoneg);
+ eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_admin, mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -3207,7 +3564,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
- mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
if (err) {
@@ -3224,7 +3580,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
- mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
err_port_vlan_create:
err_port_pvid_set:
@@ -3267,7 +3622,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
- mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
@@ -3744,8 +4098,8 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
burst_size = 7;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- rate = 4 * 1024;
- burst_size = 4;
+ rate = 1024;
+ burst_size = 7;
break;
default:
continue;
@@ -4094,6 +4448,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
+ mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -4110,6 +4467,9 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -4398,6 +4758,71 @@ static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
ARRAY_SIZE(mlxsw_sp_devlink_params));
}
+static int
+mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
+ return 0;
+}
+
+static int
+mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
+}
+
+static const struct devlink_param mlxsw_sp2_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+ "acl_region_rehash_interval",
+ DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlxsw_sp_params_acl_region_rehash_intrvl_get,
+ mlxsw_sp_params_acl_region_rehash_intrvl_set,
+ NULL),
+};
+
+static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = mlxsw_sp_params_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
+ ARRAY_SIZE(mlxsw_sp2_devlink_params));
+ if (err)
+ goto err_devlink_params_register;
+
+ value.vu32 = 0;
+ devlink_param_driverinit_value_set(devlink,
+ MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+ value);
+ return 0;
+
+err_devlink_params_register:
+ mlxsw_sp_params_unregister(mlxsw_core);
+ return err;
+}
+
+static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core),
+ mlxsw_sp2_devlink_params,
+ ARRAY_SIZE(mlxsw_sp2_devlink_params));
+ mlxsw_sp_params_unregister(mlxsw_core);
+}
+
static struct mlxsw_driver mlxsw_sp1_driver = {
.kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -4446,8 +4871,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp_params_register,
- .params_unregister = mlxsw_sp_params_unregister,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
@@ -4691,9 +5116,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
if (err)
goto err_col_port_add;
- err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
- if (err)
- goto err_col_port_enable;
mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
mlxsw_sp_port->local_port);
@@ -4707,8 +5129,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
-err_col_port_enable:
- mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
err_col_port_add:
if (!lag->ref_count)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -4727,7 +5147,6 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
WARN_ON(lag->ref_count == 0);
- mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
/* Any VLANs configured on the port are no longer valid */
@@ -4772,21 +5191,56 @@ static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}
-static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool lag_tx_enabled)
+static int
+mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
{
- if (lag_tx_enabled)
- return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
- mlxsw_sp_port->lag_id);
- else
- return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
- mlxsw_sp_port->lag_id);
+ int err;
+
+ err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
+ mlxsw_sp_port->lag_id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
+ if (err)
+ goto err_dist_port_add;
+
+ return 0;
+
+err_dist_port_add:
+ mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
+ return err;
+}
+
+static int
+mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
+ mlxsw_sp_port->lag_id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
+ mlxsw_sp_port->lag_id);
+ if (err)
+ goto err_col_port_disable;
+
+ return 0;
+
+err_col_port_disable:
+ mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
+ return err;
}
static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
struct netdev_lag_lower_state_info *info)
{
- return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
+ if (info->tx_enabled)
+ return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
+ else
+ return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
}
static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -5005,12 +5459,14 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
lower_dev,
upper_dev);
} else if (netif_is_lag_master(upper_dev)) {
- if (info->linking)
+ if (info->linking) {
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
- else
+ } else {
+ mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
+ }
} else if (netif_is_ovs_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a1c32a81b011..da6278b0caa4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -33,7 +33,8 @@
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
-#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
+#define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */
+#define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
@@ -75,6 +76,11 @@ enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_MAX,
};
+struct mlxsw_sp_rif_ops;
+
+extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[];
+extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[];
+
enum mlxsw_sp_fid_type {
MLXSW_SP_FID_TYPE_8021Q,
MLXSW_SP_FID_TYPE_8021D,
@@ -128,6 +134,8 @@ struct mlxsw_sp_kvdl_ops;
struct mlxsw_sp_mr_tcam_ops;
struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_nve_ops;
+struct mlxsw_sp_sb_vals;
+struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -161,6 +169,9 @@ struct mlxsw_sp {
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
+ const struct mlxsw_sp_sb_vals *sb_vals;
+ const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
};
static inline struct mlxsw_sp_upper *
@@ -250,6 +261,29 @@ struct mlxsw_sp_port {
struct mlxsw_sp_acl_block *eg_acl_block;
};
+struct mlxsw_sp_port_type_speed_ops {
+ void (*from_ptys_supported_port)(struct mlxsw_sp *mlxsw_sp,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd);
+ void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+ unsigned long *mode);
+ void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
+ bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd);
+ u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd);
+ u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 speed);
+ u32 (*to_ptys_upper_speed)(struct mlxsw_sp *mlxsw_sp, u32 upper_speed);
+ int (*port_speed_base)(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u32 *base_speed);
+ void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u8 local_port, u32 proto_admin, bool autoneg);
+ void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
+ u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper);
+};
+
static inline struct net_device *
mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
{
@@ -365,12 +399,14 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
u32 *p_cur, u32 *p_max);
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
+u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp);
+
+extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
+extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
/* spectrum_switchdev.c */
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
-void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
void
@@ -501,6 +537,9 @@ void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
const union mlxsw_sp_l3addr *ul_sip);
int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
u16 *vr_id);
+int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ u16 *ul_rif_index);
+void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
@@ -681,6 +720,8 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
+u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val);
/* spectrum_acl_tcam.c */
struct mlxsw_sp_acl_tcam;
@@ -695,10 +736,13 @@ struct mlxsw_sp_acl_tcam_ops {
size_t region_priv_size;
int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
void *tcam_priv,
- struct mlxsw_sp_acl_tcam_region *region);
+ struct mlxsw_sp_acl_tcam_region *region,
+ void *hints_priv);
void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region);
+ void * (*region_rehash_hints_get)(void *region_priv);
+ void (*region_rehash_hints_put)(void *hints_priv);
size_t chunk_priv_size;
void (*chunk_init)(void *region_priv, void *chunk_priv,
unsigned int priority);
@@ -712,8 +756,7 @@ struct mlxsw_sp_acl_tcam_ops {
void *region_priv, void *chunk_priv,
void *entry_priv);
int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp,
- void *region_priv, void *chunk_priv,
- void *entry_priv,
+ void *region_priv, void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei);
int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
index fe270c1a26a6..3a636f753607 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -112,7 +112,8 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
void *tcam_priv,
- struct mlxsw_sp_acl_tcam_region *_region)
+ struct mlxsw_sp_acl_tcam_region *_region,
+ void *hints_priv)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
int err;
@@ -194,8 +195,7 @@ static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp1_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *region_priv, void *chunk_priv,
- void *entry_priv,
+ void *region_priv, void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 234ab51916db..6c66a0f1b79e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -139,7 +139,8 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
static int
mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
void *tcam_priv,
- struct mlxsw_sp_acl_tcam_region *_region)
+ struct mlxsw_sp_acl_tcam_region *_region,
+ void *hints_priv)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
@@ -147,7 +148,8 @@ mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
region->region = _region;
return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
- &region->aregion, _region,
+ &region->aregion,
+ _region, hints_priv,
&mlxsw_sp2_acl_ctcam_region_ops);
}
@@ -166,6 +168,18 @@ mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
}
+static void *mlxsw_sp2_acl_tcam_region_rehash_hints_get(void *region_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+
+ return mlxsw_sp_acl_atcam_rehash_hints_get(&region->aregion);
+}
+
+static void mlxsw_sp2_acl_tcam_region_rehash_hints_put(void *hints_priv)
+{
+ mlxsw_sp_acl_atcam_rehash_hints_put(hints_priv);
+}
+
static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
unsigned int priority)
{
@@ -212,18 +226,15 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *region_priv, void *chunk_priv,
- void *entry_priv,
+ void *region_priv, void *entry_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
- struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
entry->act_block = rulei->act_block;
return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
&region->aregion,
- &chunk->achunk,
&entry->aentry, rulei);
}
@@ -246,6 +257,8 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
.region_init = mlxsw_sp2_acl_tcam_region_init,
.region_fini = mlxsw_sp2_acl_tcam_region_fini,
.region_associate = mlxsw_sp2_acl_tcam_region_associate,
+ .region_rehash_hints_get = mlxsw_sp2_acl_tcam_region_rehash_hints_get,
+ .region_rehash_hints_put = mlxsw_sp2_acl_tcam_region_rehash_hints_put,
.chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
.chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
.chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 695d33358988..a146a44634e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -588,7 +588,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
{
u8 ethertype;
- if (action == TCA_VLAN_ACT_MODIFY) {
+ if (action == FLOW_ACTION_VLAN_MANGLE) {
switch (proto) {
case ETH_P_8021Q:
ethertype = 0;
@@ -640,7 +640,7 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
- rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
+ rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
@@ -742,8 +742,7 @@ int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
rulei = mlxsw_sp_acl_rule_rulei(rule);
rulei->act_block = afa_block;
- return ops->rule_action_replace(mlxsw_sp, ruleset->priv, rule->priv,
- rule->rulei);
+ return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
}
struct mlxsw_sp_acl_rule *
@@ -806,7 +805,7 @@ static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
msecs_to_jiffies(interval));
}
-static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
+static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
{
struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
rule_activity_update.dw.work);
@@ -885,7 +884,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
- mlxsw_sp_acl_rul_activity_update_work);
+ mlxsw_sp_acl_rule_activity_update_work);
acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
return 0;
@@ -913,3 +912,19 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_afk_destroy(acl->afk);
kfree(acl);
}
+
+u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+ return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
+ &acl->tcam);
+}
+
+int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
+{
+ struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+ return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
+ &acl->tcam, val);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 80fb268d51a5..ded4cf658680 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -7,6 +7,8 @@
#include <linux/gfp.h>
#include <linux/refcount.h>
#include <linux/rhashtable.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mlxsw.h>
#include "reg.h"
#include "core.h"
@@ -316,6 +318,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_tcam_region *region,
+ void *hints_priv,
const struct mlxsw_sp_acl_ctcam_region_ops *ops)
{
int err;
@@ -332,7 +335,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
err = aregion->ops->init(aregion);
if (err)
goto err_ops_init;
- err = mlxsw_sp_acl_erp_region_init(aregion);
+ err = mlxsw_sp_acl_erp_region_init(aregion, hints_priv);
if (err)
goto err_erp_region_init;
err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion,
@@ -390,8 +393,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
- erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -399,7 +401,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
- aentry->ht_key.enc_key, erp_id,
+ aentry->enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -424,12 +426,11 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
- char *enc_key = aentry->ht_key.enc_key;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
region->tcam_region_info,
- enc_key, erp_id,
+ aentry->enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -458,7 +459,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
priority, region->tcam_region_info,
- aentry->ht_key.enc_key, erp_id,
+ aentry->enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -481,15 +482,15 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->full_enc_key, mask);
+ aentry->ht_key.full_enc_key, mask);
erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
if (IS_ERR(erp_mask))
return PTR_ERR(erp_mask);
aentry->erp_mask = erp_mask;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
- memcpy(aentry->ht_key.enc_key, aentry->full_enc_key,
- sizeof(aentry->ht_key.enc_key));
+ memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
+ sizeof(aentry->enc_key));
/* Compute all needed delta information and clear the delta bits
* from the encrypted key.
@@ -498,8 +499,9 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
aentry->delta_info.value =
- mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key);
- mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
+ mlxsw_sp_acl_erp_delta_value(delta,
+ aentry->ht_key.full_enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
/* Add rule to the list of A-TCAM rules, assuming this
* rule is intended to A-TCAM. In case this rule does
@@ -579,6 +581,7 @@ int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
/* It is possible we failed to add the rule to the A-TCAM due to
* exceeded number of masks. Try to spill into C-TCAM.
*/
+ trace_mlxsw_sp_acl_atcam_entry_add_ctcam_spill(mlxsw_sp, aregion);
err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion,
&achunk->cchunk, &aentry->centry,
rulei, true);
@@ -603,7 +606,6 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
int
mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei)
{
@@ -612,7 +614,6 @@ mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_acl_atcam_is_centry(aentry))
err = mlxsw_sp_acl_ctcam_entry_action_replace(mlxsw_sp,
&aregion->cregion,
- &achunk->cchunk,
&aentry->centry,
rulei);
else
@@ -634,3 +635,14 @@ void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam);
}
+
+void *
+mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ return mlxsw_sp_acl_erp_rehash_hints_get(aregion);
+}
+
+void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv)
+{
+ mlxsw_sp_acl_erp_rehash_hints_put(hints_priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 505b87846acc..3a2de13fcb68 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -5,11 +5,13 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/refcount.h>
+#include <linux/mutex.h>
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
struct mlxsw_sp_acl_bf {
+ struct mutex lock; /* Protects Bloom Filter updates. */
unsigned int bank_size;
refcount_t refcnt[0];
};
@@ -133,7 +135,7 @@ mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id,
sizeof(erp_region_id));
memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET,
- &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
+ &aentry->enc_key[chunk_key_offsets[chunk_index]],
MLXSW_BLOOM_CHUNK_KEY_BYTES);
chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES;
}
@@ -172,26 +174,36 @@ mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
u16 bf_index;
int err;
+ mutex_lock(&bf->lock);
+
bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
- if (refcount_inc_not_zero(&bf->refcnt[rule_index]))
- return 0;
+ if (refcount_inc_not_zero(&bf->refcnt[rule_index])) {
+ err = 0;
+ goto unlock;
+ }
peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
- if (!peabfe_pl)
- return -ENOMEM;
+ if (!peabfe_pl) {
+ err = -ENOMEM;
+ goto unlock;
+ }
mlxsw_reg_peabfe_pack(peabfe_pl);
mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 1, erp_bank, bf_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
kfree(peabfe_pl);
if (err)
- return err;
+ goto unlock;
refcount_set(&bf->refcnt[rule_index], 1);
- return 0;
+ err = 0;
+
+unlock:
+ mutex_unlock(&bf->lock);
+ return err;
}
void
@@ -205,6 +217,8 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
char *peabfe_pl;
u16 bf_index;
+ mutex_lock(&bf->lock);
+
bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
@@ -212,13 +226,16 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
if (refcount_dec_and_test(&bf->refcnt[rule_index])) {
peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
if (!peabfe_pl)
- return;
+ goto unlock;
mlxsw_reg_peabfe_pack(peabfe_pl);
mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 0, erp_bank, bf_index);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
kfree(peabfe_pl);
}
+
+unlock:
+ mutex_unlock(&bf->lock);
}
struct mlxsw_sp_acl_bf *
@@ -234,16 +251,19 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
* is 2^ACL_MAX_BF_LOG
*/
bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
- bf = kzalloc(sizeof(*bf) + bf_bank_size * num_erp_banks *
- sizeof(*bf->refcnt), GFP_KERNEL);
+ bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
+ GFP_KERNEL);
if (!bf)
return ERR_PTR(-ENOMEM);
bf->bank_size = bf_bank_size;
+ mutex_init(&bf->lock);
+
return bf;
}
void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf)
{
+ mutex_destroy(&bf->lock);
kfree(bf);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index b0f2d8e8ded0..05680a7e6c56 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -72,7 +72,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
act_set = mlxsw_afa_block_first_set(rulei->act_block);
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ goto err_ptce2_write;
+
+ return 0;
+
+err_ptce2_write:
+ cregion->ops->entry_remove(cregion, centry);
+ return err;
}
static void
@@ -215,7 +223,6 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
- struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 1c19feefa5f2..c1a9cc9a3292 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -7,6 +7,7 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/objagg.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@@ -63,6 +64,7 @@ struct mlxsw_sp_acl_erp_table {
unsigned int num_ctcam_erps;
unsigned int num_deltas;
struct objagg *objagg;
+ struct mutex objagg_lock; /* guards objagg manipulation */
};
struct mlxsw_sp_acl_erp_table_ops {
@@ -1001,17 +1003,15 @@ struct mlxsw_sp_acl_erp_mask *
mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam)
{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
struct objagg_obj *objagg_obj;
- /* eRPs are allocated from a shared resource, but currently all
- * allocations are done under RTNL.
- */
- ASSERT_RTNL();
-
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
- objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
+ mutex_lock(&erp_table->objagg_lock);
+ objagg_obj = objagg_obj_get(erp_table->objagg, &key);
+ mutex_unlock(&erp_table->objagg_lock);
if (IS_ERR(objagg_obj))
return ERR_CAST(objagg_obj);
return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
@@ -1021,9 +1021,11 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp_mask *erp_mask)
{
struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
- ASSERT_RTNL();
- objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
+ mutex_lock(&erp_table->objagg_lock);
+ objagg_obj_put(erp_table->objagg, objagg_obj);
+ mutex_unlock(&erp_table->objagg_lock);
}
int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
@@ -1035,7 +1037,6 @@ int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
unsigned int erp_bank;
- ASSERT_RTNL();
if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
return 0;
@@ -1054,7 +1055,6 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
unsigned int erp_bank;
- ASSERT_RTNL();
if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
return;
@@ -1202,6 +1202,32 @@ mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
return 0;
}
+static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
+ const void *obj)
+{
+ const struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+ const struct mlxsw_sp_acl_erp_key *key = obj;
+ u16 delta_start;
+ u8 delta_mask;
+ int err;
+
+ err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+ &delta_start, &delta_mask);
+ return err ? false : true;
+}
+
+static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
+{
+ const struct mlxsw_sp_acl_erp_key *key1 = obj1;
+ const struct mlxsw_sp_acl_erp_key *key2 = obj2;
+
+ /* For hints purposes, two objects are considered equal
+ * in case the masks are the same. Does not matter what
+ * the "ctcam" value is.
+ */
+ return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
+}
+
static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
void *obj)
{
@@ -1256,12 +1282,17 @@ static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
kfree(delta);
}
-static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj)
+static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj,
+ unsigned int root_id)
{
struct mlxsw_sp_acl_atcam_region *aregion = priv;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key *key = obj;
+ if (!key->ctcam &&
+ root_id != OBJAGG_OBJ_ROOT_ID_INVALID &&
+ root_id >= MLXSW_SP_ACL_ERP_MAX_PER_REGION)
+ return ERR_PTR(-ENOBUFS);
return erp_table->ops->erp_create(erp_table, key);
}
@@ -1275,6 +1306,8 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ .delta_check = mlxsw_sp_acl_erp_delta_check,
+ .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
.delta_create = mlxsw_sp_acl_erp_delta_create,
.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
.root_create = mlxsw_sp_acl_erp_root_create,
@@ -1282,7 +1315,8 @@ static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
};
static struct mlxsw_sp_acl_erp_table *
-mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
+mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct objagg_hints *hints)
{
struct mlxsw_sp_acl_erp_table *erp_table;
int err;
@@ -1292,7 +1326,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
return ERR_PTR(-ENOMEM);
erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
- aregion);
+ hints, aregion);
if (IS_ERR(erp_table->objagg)) {
err = PTR_ERR(erp_table->objagg);
goto err_objagg_create;
@@ -1302,6 +1336,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
erp_table->ops = &erp_no_mask_ops;
INIT_LIST_HEAD(&erp_table->atcam_erps_list);
erp_table->aregion = aregion;
+ mutex_init(&erp_table->objagg_lock);
return erp_table;
@@ -1314,6 +1349,7 @@ static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
+ mutex_destroy(&erp_table->objagg_lock);
objagg_destroy(erp_table->objagg);
kfree(erp_table);
}
@@ -1339,12 +1375,93 @@ mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
}
-int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion)
+static int
+mlxsw_sp_acl_erp_hints_check(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct objagg_hints *hints, bool *p_rehash_needed)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ const struct objagg_stats *ostats;
+ const struct objagg_stats *hstats;
+ int err;
+
+ *p_rehash_needed = false;
+
+ mutex_lock(&erp_table->objagg_lock);
+ ostats = objagg_stats_get(erp_table->objagg);
+ mutex_unlock(&erp_table->objagg_lock);
+ if (IS_ERR(ostats)) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP stats\n");
+ return PTR_ERR(ostats);
+ }
+
+ hstats = objagg_hints_stats_get(hints);
+ if (IS_ERR(hstats)) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP hints stats\n");
+ err = PTR_ERR(hstats);
+ goto err_hints_stats_get;
+ }
+
+ /* Very basic criterion for now. */
+ if (hstats->root_count < ostats->root_count)
+ *p_rehash_needed = true;
+
+ err = 0;
+
+ objagg_stats_put(hstats);
+err_hints_stats_get:
+ objagg_stats_put(ostats);
+ return err;
+}
+
+void *
+mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct objagg_hints *hints;
+ bool rehash_needed;
+ int err;
+
+ mutex_lock(&erp_table->objagg_lock);
+ hints = objagg_hints_get(erp_table->objagg,
+ OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
+ mutex_unlock(&erp_table->objagg_lock);
+ if (IS_ERR(hints)) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to create ERP hints\n");
+ return ERR_CAST(hints);
+ }
+ err = mlxsw_sp_acl_erp_hints_check(mlxsw_sp, aregion, hints,
+ &rehash_needed);
+ if (err)
+ goto errout;
+
+ if (!rehash_needed) {
+ err = -EAGAIN;
+ goto errout;
+ }
+ return hints;
+
+errout:
+ objagg_hints_put(hints);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv)
+{
+ struct objagg_hints *hints = hints_priv;
+
+ objagg_hints_put(hints);
+}
+
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ void *hints_priv)
{
struct mlxsw_sp_acl_erp_table *erp_table;
+ struct objagg_hints *hints = hints_priv;
int err;
- erp_table = mlxsw_sp_acl_erp_table_create(aregion);
+ erp_table = mlxsw_sp_acl_erp_table_create(aregion, hints);
if (IS_ERR(erp_table))
return PTR_ERR(erp_table);
aregion->erp_table = erp_table;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index fe230acf92a9..8811f6513e36 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -8,6 +8,8 @@
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <trace/events/mlxsw.h>
#include "reg.h"
#include "core.h"
@@ -23,6 +25,10 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
return ops->priv_size;
}
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
+
int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam)
{
@@ -33,6 +39,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
size_t alloc_size;
int err;
+ mutex_init(&tcam->lock);
+ tcam->vregion_rehash_intrvl =
+ MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
+ INIT_LIST_HEAD(&tcam->vregion_list);
+
max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_TCAM_REGIONS);
max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
@@ -76,6 +87,7 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ mutex_destroy(&tcam->lock);
ops->fini(mlxsw_sp, tcam->priv);
kfree(tcam->used_groups);
kfree(tcam->used_regions);
@@ -153,37 +165,100 @@ struct mlxsw_sp_acl_tcam_pattern {
struct mlxsw_sp_acl_tcam_group {
struct mlxsw_sp_acl_tcam *tcam;
u16 id;
+ struct mutex lock; /* guards region list updates */
struct list_head region_list;
unsigned int region_count;
- struct rhashtable chunk_ht;
- struct mlxsw_sp_acl_tcam_group_ops *ops;
+};
+
+struct mlxsw_sp_acl_tcam_vgroup {
+ struct mlxsw_sp_acl_tcam_group group;
+ struct list_head vregion_list;
+ struct rhashtable vchunk_ht;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
bool tmplt_elusage_set;
struct mlxsw_afk_element_usage tmplt_elusage;
+ bool vregion_rehash_enabled;
};
-struct mlxsw_sp_acl_tcam_chunk {
- struct list_head list; /* Member of a TCAM region */
- struct rhash_head ht_node; /* Member of a chunk HT */
- unsigned int priority; /* Priority within the region and group */
- struct mlxsw_sp_acl_tcam_group *group;
+struct mlxsw_sp_acl_tcam_rehash_ctx {
+ void *hints_priv;
+ bool this_is_rollback;
+ struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
+ * currently migrated.
+ */
+ struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
+ * migration from in
+ * a vchunk being
+ * currently migrated.
+ */
+ struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
+ * migration at
+ * a vchunk being
+ * currently migrated.
+ */
+};
+
+struct mlxsw_sp_acl_tcam_vregion {
+ struct mutex lock; /* Protects consistency of region, region2 pointers
+ * and vchunk_list.
+ */
struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
+ struct list_head list; /* Member of a TCAM group */
+ struct list_head tlist; /* Member of a TCAM */
+ struct list_head vchunk_list; /* List of vchunks under this vregion */
+ struct mlxsw_afk_key_info *key_info;
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup;
+ struct {
+ struct delayed_work dw;
+ struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
+ } rehash;
+ struct mlxsw_sp *mlxsw_sp;
+ bool failed_rollback; /* Indicates failed rollback during migration */
unsigned int ref_count;
+};
+
+struct mlxsw_sp_acl_tcam_vchunk;
+
+struct mlxsw_sp_acl_tcam_chunk {
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct mlxsw_sp_acl_tcam_region *region;
unsigned long priv[0];
/* priv has to be always the last item */
};
+struct mlxsw_sp_acl_tcam_vchunk {
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
+ struct list_head list; /* Member of a TCAM vregion */
+ struct rhash_head ht_node; /* Member of a chunk HT */
+ struct list_head ventry_list;
+ unsigned int priority; /* Priority within the vregion and group */
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ unsigned int ref_count;
+};
+
struct mlxsw_sp_acl_tcam_entry {
+ struct mlxsw_sp_acl_tcam_ventry *ventry;
struct mlxsw_sp_acl_tcam_chunk *chunk;
unsigned long priv[0];
/* priv has to be always the last item */
};
-static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
+struct mlxsw_sp_acl_tcam_ventry {
+ struct mlxsw_sp_acl_tcam_entry *entry;
+ struct list_head list; /* Member of a TCAM vchunk */
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct mlxsw_sp_acl_rule_info *rulei;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
.key_len = sizeof(unsigned int),
- .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
- .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
+ .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
+ .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
.automatic_shrinking = true,
};
@@ -195,55 +270,90 @@ static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
int acl_index = 0;
mlxsw_reg_pagt_pack(pagt_pl, group->id);
- list_for_each_entry(region, &group->region_list, list)
- mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
+ list_for_each_entry(region, &group->region_list, list) {
+ bool multi = false;
+
+ /* Check if the next entry in the list has the same vregion. */
+ if (region->list.next != &group->region_list &&
+ list_next_entry(region, list)->vregion == region->vregion)
+ multi = true;
+ mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
+ region->id, multi);
+ }
mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
}
static int
-mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam,
- struct mlxsw_sp_acl_tcam_group *group,
- const struct mlxsw_sp_acl_tcam_pattern *patterns,
- unsigned int patterns_count,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
+ struct mlxsw_sp_acl_tcam_group *group)
{
int err;
group->tcam = tcam;
- group->patterns = patterns;
- group->patterns_count = patterns_count;
- if (tmplt_elusage) {
- group->tmplt_elusage_set = true;
- memcpy(&group->tmplt_elusage, tmplt_elusage,
- sizeof(group->tmplt_elusage));
- }
+ mutex_init(&group->lock);
INIT_LIST_HEAD(&group->region_list);
+
err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
if (err)
return err;
- err = rhashtable_init(&group->chunk_ht,
- &mlxsw_sp_acl_tcam_chunk_ht_params);
+ return 0;
+}
+
+static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
+{
+ struct mlxsw_sp_acl_tcam *tcam = group->tcam;
+
+ mutex_destroy(&group->lock);
+ mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
+ WARN_ON(!list_empty(&group->region_list));
+}
+
+static int
+mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ const struct mlxsw_sp_acl_tcam_pattern *patterns,
+ unsigned int patterns_count,
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ bool vregion_rehash_enabled)
+{
+ int err;
+
+ vgroup->patterns = patterns;
+ vgroup->patterns_count = patterns_count;
+ vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
+
+ if (tmplt_elusage) {
+ vgroup->tmplt_elusage_set = true;
+ memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
+ sizeof(vgroup->tmplt_elusage));
+ }
+ INIT_LIST_HEAD(&vgroup->vregion_list);
+
+ err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&vgroup->vchunk_ht,
+ &mlxsw_sp_acl_tcam_vchunk_ht_params);
if (err)
goto err_rhashtable_init;
return 0;
err_rhashtable_init:
- mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
+ mlxsw_sp_acl_tcam_group_del(&vgroup->group);
return err;
}
-static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group)
+static void
+mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
{
- struct mlxsw_sp_acl_tcam *tcam = group->tcam;
-
- rhashtable_destroy(&group->chunk_ht);
- mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
- WARN_ON(!list_empty(&group->region_list));
+ rhashtable_destroy(&vgroup->vchunk_ht);
+ mlxsw_sp_acl_tcam_group_del(&vgroup->group);
+ WARN_ON(!list_empty(&vgroup->vregion_list));
}
static int
@@ -283,146 +393,194 @@ mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
}
static unsigned int
-mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
+mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
- if (list_empty(&region->chunk_list))
+ if (list_empty(&vregion->vchunk_list))
return 0;
- /* As a priority of a region, return priority of the first chunk */
- chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
- return chunk->priority;
+ /* As a priority of a vregion, return priority of the first vchunk */
+ vchunk = list_first_entry(&vregion->vchunk_list,
+ typeof(*vchunk), list);
+ return vchunk->priority;
}
static unsigned int
-mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
+mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
- if (list_empty(&region->chunk_list))
+ if (list_empty(&vregion->vchunk_list))
return 0;
- chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
- return chunk->priority;
+ vchunk = list_last_entry(&vregion->vchunk_list,
+ typeof(*vchunk), list);
+ return vchunk->priority;
}
-static void
-mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
- struct mlxsw_sp_acl_tcam_region *region)
+static int
+mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int priority,
+ struct mlxsw_sp_acl_tcam_region *next_region)
{
struct mlxsw_sp_acl_tcam_region *region2;
struct list_head *pos;
+ int err;
- /* Position the region inside the list according to priority */
- list_for_each(pos, &group->region_list) {
- region2 = list_entry(pos, typeof(*region2), list);
- if (mlxsw_sp_acl_tcam_region_prio(region2) >
- mlxsw_sp_acl_tcam_region_prio(region))
- break;
+ mutex_lock(&group->lock);
+ if (group->region_count == group->tcam->max_group_size) {
+ err = -ENOBUFS;
+ goto err_region_count_check;
+ }
+
+ if (next_region) {
+ /* If the next region is defined, place the new one
+ * before it. The next one is a sibling.
+ */
+ pos = &next_region->list;
+ } else {
+ /* Position the region inside the list according to priority */
+ list_for_each(pos, &group->region_list) {
+ region2 = list_entry(pos, typeof(*region2), list);
+ if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
+ priority)
+ break;
+ }
}
list_add_tail(&region->list, pos);
+ region->group = group;
+
+ err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ if (err)
+ goto err_group_update;
+
group->region_count++;
+ mutex_unlock(&group->lock);
+ return 0;
+
+err_group_update:
+ list_del(&region->list);
+err_region_count_check:
+ mutex_unlock(&group->lock);
+ return err;
}
static void
-mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
- struct mlxsw_sp_acl_tcam_region *region)
+mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
{
- group->region_count--;
+ struct mlxsw_sp_acl_tcam_group *group = region->group;
+
+ mutex_lock(&group->lock);
list_del(&region->list);
+ group->region_count--;
+ mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ mutex_unlock(&group->lock);
}
static int
-mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group,
- struct mlxsw_sp_acl_tcam_region *region)
+mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ unsigned int priority)
{
+ struct mlxsw_sp_acl_tcam_vregion *vregion2;
+ struct list_head *pos;
int err;
- if (group->region_count == group->tcam->max_group_size)
- return -ENOBUFS;
-
- mlxsw_sp_acl_tcam_group_list_add(group, region);
+ /* Position the vregion inside the list according to priority */
+ list_for_each(pos, &vgroup->vregion_list) {
+ vregion2 = list_entry(pos, typeof(*vregion2), list);
+ if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
+ break;
+ }
+ list_add_tail(&vregion->list, pos);
- err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
+ vregion->region,
+ priority, NULL);
if (err)
- goto err_group_update;
- region->group = group;
+ goto err_region_attach;
return 0;
-err_group_update:
- mlxsw_sp_acl_tcam_group_list_del(group, region);
- mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+err_region_attach:
+ list_del(&vregion->list);
return err;
}
static void
-mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
+mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- struct mlxsw_sp_acl_tcam_group *group = region->group;
-
- mlxsw_sp_acl_tcam_group_list_del(group, region);
- mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+ list_del(&vregion->list);
+ if (vregion->region2)
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
+ vregion->region2);
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
}
-static struct mlxsw_sp_acl_tcam_region *
-mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
- unsigned int priority,
- struct mlxsw_afk_element_usage *elusage,
- bool *p_need_split)
+static struct mlxsw_sp_acl_tcam_vregion *
+mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage,
+ bool *p_need_split)
{
- struct mlxsw_sp_acl_tcam_region *region, *region2;
+ struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
struct list_head *pos;
bool issubset;
- list_for_each(pos, &group->region_list) {
- region = list_entry(pos, typeof(*region), list);
+ list_for_each(pos, &vgroup->vregion_list) {
+ vregion = list_entry(pos, typeof(*vregion), list);
/* First, check if the requested priority does not rather belong
- * under some of the next regions.
+ * under some of the next vregions.
*/
- if (pos->next != &group->region_list) { /* not last */
- region2 = list_entry(pos->next, typeof(*region2), list);
- if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
+ if (pos->next != &vgroup->vregion_list) { /* not last */
+ vregion2 = list_entry(pos->next, typeof(*vregion2),
+ list);
+ if (priority >=
+ mlxsw_sp_acl_tcam_vregion_prio(vregion2))
continue;
}
- issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
+ issubset = mlxsw_afk_key_info_subset(vregion->key_info,
+ elusage);
/* If requested element usage would not fit and the priority
- * is lower than the currently inspected region we cannot
- * use this region, so return NULL to indicate new region has
+ * is lower than the currently inspected vregion we cannot
+ * use this region, so return NULL to indicate new vregion has
* to be created.
*/
if (!issubset &&
- priority < mlxsw_sp_acl_tcam_region_prio(region))
+ priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
return NULL;
/* If requested element usage would not fit and the priority
- * is higher than the currently inspected region we cannot
- * use this region. There is still some hope that the next
- * region would be the fit. So let it be processed and
+ * is higher than the currently inspected vregion we cannot
+ * use this vregion. There is still some hope that the next
+ * vregion would be the fit. So let it be processed and
* eventually break at the check right above this.
*/
if (!issubset &&
- priority > mlxsw_sp_acl_tcam_region_max_prio(region))
+ priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
continue;
- /* Indicate if the region needs to be split in order to add
+ /* Indicate if the vregion needs to be split in order to add
* the requested priority. Split is needed when requested
- * element usage won't fit into the found region.
+ * element usage won't fit into the found vregion.
*/
*p_need_split = !issubset;
- return region;
+ return vregion;
}
- return NULL; /* New region has to be created. */
+ return NULL; /* New vregion has to be created. */
}
static void
-mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
- struct mlxsw_afk_element_usage *elusage,
- struct mlxsw_afk_element_usage *out)
+mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ struct mlxsw_afk_element_usage *elusage,
+ struct mlxsw_afk_element_usage *out)
{
const struct mlxsw_sp_acl_tcam_pattern *pattern;
int i;
@@ -430,14 +588,14 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
/* In case the template is set, we don't have to look up the pattern
* and just use the template.
*/
- if (group->tmplt_elusage_set) {
- memcpy(out, &group->tmplt_elusage, sizeof(*out));
+ if (vgroup->tmplt_elusage_set) {
+ memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
return;
}
- for (i = 0; i < group->patterns_count; i++) {
- pattern = &group->patterns[i];
+ for (i = 0; i < vgroup->patterns_count; i++) {
+ pattern = &vgroup->patterns[i];
mlxsw_afk_element_usage_fill(out, pattern->elements,
pattern->elements_count);
if (mlxsw_afk_element_usage_subset(elusage, out))
@@ -511,24 +669,19 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
- struct mlxsw_afk_element_usage *elusage)
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ void *hints_priv)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_tcam_region *region;
int err;
region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
if (!region)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&region->chunk_list);
region->mlxsw_sp = mlxsw_sp;
-
- region->key_info = mlxsw_afk_key_info_get(afk, elusage);
- if (IS_ERR(region->key_info)) {
- err = PTR_ERR(region->key_info);
- goto err_key_info_get;
- }
+ region->vregion = vregion;
+ region->key_info = vregion->key_info;
err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
if (err)
@@ -547,7 +700,8 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tcam_region_enable;
- err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
+ err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
+ region, hints_priv);
if (err)
goto err_tcam_region_init;
@@ -561,8 +715,6 @@ err_tcam_region_alloc:
err_tcam_region_associate:
mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
- mlxsw_afk_key_info_put(region->key_info);
-err_key_info_get:
kfree(region);
return ERR_PTR(err);
}
@@ -576,220 +728,415 @@ mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
- mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
- mlxsw_afk_key_info_put(region->key_info);
+ mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
+ region->id);
kfree(region);
}
-static int
-mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group,
- unsigned int priority,
- struct mlxsw_afk_element_usage *elusage,
- struct mlxsw_sp_acl_tcam_chunk *chunk)
+static void
+mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- struct mlxsw_sp_acl_tcam_region *region;
- bool region_created = false;
- bool need_split;
- int err;
+ unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
+
+ if (!interval)
+ return;
+ mlxsw_core_schedule_dw(&vregion->rehash.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ int *credits);
- region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
- &need_split);
- if (region && need_split) {
- /* According to priority, the chunk should belong to an
- * existing region. However, this chunk needs elements
- * that region does not contain. We need to split the existing
- * region into two and create a new region for this chunk
- * in between. This is not supported now.
+static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion =
+ container_of(work, struct mlxsw_sp_acl_tcam_vregion,
+ rehash.dw.work);
+ int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+
+ mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ if (credits < 0)
+ /* Rehash gone out of credits so it was interrupted.
+ * Schedule the work as soon as possible to continue.
*/
- return -EOPNOTSUPP;
- }
- if (!region) {
- struct mlxsw_afk_element_usage region_elusage;
-
- mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
- &region_elusage);
- region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
- &region_elusage);
- if (IS_ERR(region))
- return PTR_ERR(region);
- region_created = true;
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
+ else
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
+}
+
+static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
+
+ /* If a rule was added or deleted from vchunk which is currently
+ * under rehash migration, we have to reset the ventry pointers
+ * to make sure all rules are properly migrated.
+ */
+ if (vregion->rehash.ctx.current_vchunk == vchunk) {
+ vregion->rehash.ctx.start_ventry = NULL;
+ vregion->rehash.ctx.stop_ventry = NULL;
}
+}
- chunk->region = region;
- list_add_tail(&chunk->list, &region->chunk_list);
+static void
+mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ /* If a chunk was added or deleted from vregion we have to reset
+ * the current chunk pointer to make sure all chunks
+ * are properly migrated.
+ */
+ vregion->rehash.ctx.current_vchunk = NULL;
+}
- if (!region_created)
- return 0;
+static struct mlxsw_sp_acl_tcam_vregion *
+mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ int err;
+
+ vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
+ if (!vregion)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&vregion->vchunk_list);
+ mutex_init(&vregion->lock);
+ vregion->tcam = tcam;
+ vregion->mlxsw_sp = mlxsw_sp;
+ vregion->vgroup = vgroup;
+ vregion->ref_count = 1;
+
+ vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
+ if (IS_ERR(vregion->key_info)) {
+ err = PTR_ERR(vregion->key_info);
+ goto err_key_info_get;
+ }
- err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
+ vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
+ vregion, NULL);
+ if (IS_ERR(vregion->region)) {
+ err = PTR_ERR(vregion->region);
+ goto err_region_create;
+ }
+
+ err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
+ priority);
if (err)
- goto err_group_region_attach;
+ goto err_vgroup_vregion_attach;
+
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ /* Create the delayed work for vregion periodic rehash */
+ INIT_DELAYED_WORK(&vregion->rehash.dw,
+ mlxsw_sp_acl_tcam_vregion_rehash_work);
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
+ mutex_lock(&tcam->lock);
+ list_add_tail(&vregion->tlist, &tcam->vregion_list);
+ mutex_unlock(&tcam->lock);
+ }
- return 0;
+ return vregion;
-err_group_region_attach:
- mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
- return err;
+err_vgroup_vregion_attach:
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
+err_region_create:
+ mlxsw_afk_key_info_put(vregion->key_info);
+err_key_info_get:
+ kfree(vregion);
+ return ERR_PTR(err);
}
static void
-mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_chunk *chunk)
+mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
+ struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
+
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ mutex_lock(&tcam->lock);
+ list_del(&vregion->tlist);
+ mutex_unlock(&tcam->lock);
+ cancel_delayed_work_sync(&vregion->rehash.dw);
+ }
+ mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
+ if (vregion->region2)
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
+ mlxsw_afk_key_info_put(vregion->key_info);
+ mutex_destroy(&vregion->lock);
+ kfree(vregion);
+}
+
+u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ u32 vregion_rehash_intrvl;
+
+ if (WARN_ON(!ops->region_rehash_hints_get))
+ return 0;
+ vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
+ return vregion_rehash_intrvl;
+}
+
+int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ u32 val)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+
+ if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
+ return -EINVAL;
+ if (WARN_ON(!ops->region_rehash_hints_get))
+ return -EOPNOTSUPP;
+ tcam->vregion_rehash_intrvl = val;
+ mutex_lock(&tcam->lock);
+ list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
+ if (val)
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
+ else
+ cancel_delayed_work_sync(&vregion->rehash.dw);
+ }
+ mutex_unlock(&tcam->lock);
+ return 0;
+}
+
+static struct mlxsw_sp_acl_tcam_vregion *
+mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
{
- struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+ struct mlxsw_afk_element_usage vregion_elusage;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ bool need_split;
- list_del(&chunk->list);
- if (list_empty(&region->chunk_list)) {
- mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
- mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
+ vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
+ elusage, &need_split);
+ if (vregion) {
+ if (need_split) {
+ /* According to priority, new vchunk should belong to
+ * an existing vregion. However, this vchunk needs
+ * elements that vregion does not contain. We need
+ * to split the existing vregion into two and create
+ * a new vregion for the new vchunk in between.
+ * This is not supported now.
+ */
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ vregion->ref_count++;
+ return vregion;
}
+
+ mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
+ &vregion_elusage);
+
+ return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
+ &vregion_elusage);
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
+{
+ if (--vregion->ref_count)
+ return;
+ mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
static struct mlxsw_sp_acl_tcam_chunk *
mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group,
- unsigned int priority,
- struct mlxsw_afk_element_usage *elusage)
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_region *region)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+ chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
+ if (!chunk)
+ return ERR_PTR(-ENOMEM);
+ chunk->vchunk = vchunk;
+ chunk->region = region;
+
+ ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
+ return chunk;
+}
+
+static void
+mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->chunk_fini(chunk->priv);
+ kfree(chunk);
+}
+
+static struct mlxsw_sp_acl_tcam_vchunk *
+mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
return ERR_PTR(-EINVAL);
- chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
- if (!chunk)
+ vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
+ if (!vchunk)
return ERR_PTR(-ENOMEM);
- chunk->priority = priority;
- chunk->group = group;
- chunk->ref_count = 1;
-
- err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
- elusage, chunk);
- if (err)
- goto err_chunk_assoc;
+ INIT_LIST_HEAD(&vchunk->ventry_list);
+ vchunk->priority = priority;
+ vchunk->vgroup = vgroup;
+ vchunk->ref_count = 1;
+
+ vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
+ priority, elusage);
+ if (IS_ERR(vregion)) {
+ err = PTR_ERR(vregion);
+ goto err_vregion_get;
+ }
- ops->chunk_init(chunk->region->priv, chunk->priv, priority);
+ vchunk->vregion = vregion;
- err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
- mlxsw_sp_acl_tcam_chunk_ht_params);
+ err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
if (err)
goto err_rhashtable_insert;
- return chunk;
+ mutex_lock(&vregion->lock);
+ vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
+ vchunk->vregion->region);
+ if (IS_ERR(vchunk->chunk)) {
+ mutex_unlock(&vregion->lock);
+ err = PTR_ERR(vchunk->chunk);
+ goto err_chunk_create;
+ }
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
+ list_add_tail(&vchunk->list, &vregion->vchunk_list);
+ mutex_unlock(&vregion->lock);
+
+ return vchunk;
+
+err_chunk_create:
+ rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
err_rhashtable_insert:
- ops->chunk_fini(chunk->priv);
- mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
-err_chunk_assoc:
- kfree(chunk);
+ mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
+err_vregion_get:
+ kfree(vchunk);
return ERR_PTR(err);
}
static void
-mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_chunk *chunk)
+mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_group *group = chunk->group;
-
- rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
- mlxsw_sp_acl_tcam_chunk_ht_params);
- ops->chunk_fini(chunk->priv);
- mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
- kfree(chunk);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
+
+ mutex_lock(&vregion->lock);
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
+ list_del(&vchunk->list);
+ if (vchunk->chunk2)
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
+ mutex_unlock(&vregion->lock);
+ rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
+ mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
+ kfree(vchunk);
}
-static struct mlxsw_sp_acl_tcam_chunk *
-mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group,
- unsigned int priority,
- struct mlxsw_afk_element_usage *elusage)
+static struct mlxsw_sp_acl_tcam_vchunk *
+mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ unsigned int priority,
+ struct mlxsw_afk_element_usage *elusage)
{
- struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
- chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
- mlxsw_sp_acl_tcam_chunk_ht_params);
- if (chunk) {
- if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
+ vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
+ if (vchunk) {
+ if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
elusage)))
return ERR_PTR(-EINVAL);
- chunk->ref_count++;
- return chunk;
+ vchunk->ref_count++;
+ return vchunk;
}
- return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
- priority, elusage);
+ return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
+ priority, elusage);
}
-static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_chunk *chunk)
+static void
+mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
- if (--chunk->ref_count)
+ if (--vchunk->ref_count)
return;
- mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
-}
-
-static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
-
- return ops->entry_priv_size;
+ mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
}
-static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group,
- struct mlxsw_sp_acl_tcam_entry *entry,
- struct mlxsw_sp_acl_rule_info *rulei)
+static struct mlxsw_sp_acl_tcam_entry *
+mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_chunk *chunk;
- struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_tcam_entry *entry;
int err;
- chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
- &rulei->values.elusage);
- if (IS_ERR(chunk))
- return PTR_ERR(chunk);
-
- region = chunk->region;
+ entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+ entry->ventry = ventry;
+ entry->chunk = chunk;
- err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
- entry->priv, rulei);
+ err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
+ entry->priv, ventry->rulei);
if (err)
goto err_entry_add;
- entry->chunk = chunk;
- return 0;
+ return entry;
err_entry_add:
- mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
- return err;
+ kfree(entry);
+ return ERR_PTR(err);
}
-static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_entry *entry)
+static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_entry *entry)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
- struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
- mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
+ ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
+ entry->chunk->priv, entry->priv);
+ kfree(entry);
}
static int
mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_region *region,
struct mlxsw_sp_acl_tcam_entry *entry,
struct mlxsw_sp_acl_rule_info *rulei)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
- struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv,
+ return ops->entry_action_replace(mlxsw_sp, region->priv,
entry->priv, rulei);
}
@@ -799,13 +1146,377 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
bool *activity)
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
- struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- return ops->entry_activity_get(mlxsw_sp, region->priv,
+ return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
entry->priv, activity);
}
+static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ int err;
+
+ vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
+ &rulei->values.elusage);
+ if (IS_ERR(vchunk))
+ return PTR_ERR(vchunk);
+
+ ventry->vchunk = vchunk;
+ ventry->rulei = rulei;
+ vregion = vchunk->vregion;
+
+ mutex_lock(&vregion->lock);
+ ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
+ vchunk->chunk);
+ if (IS_ERR(ventry->entry)) {
+ mutex_unlock(&vregion->lock);
+ err = PTR_ERR(ventry->entry);
+ goto err_entry_create;
+ }
+
+ list_add_tail(&ventry->list, &vchunk->ventry_list);
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
+ mutex_unlock(&vregion->lock);
+
+ return 0;
+
+err_entry_create:
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
+ return err;
+}
+
+static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
+
+ mutex_lock(&vregion->lock);
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
+ list_del(&ventry->list);
+ mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
+ mutex_unlock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
+}
+
+static int
+mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
+
+ return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
+ vchunk->vregion->region,
+ ventry->entry, rulei);
+}
+
+static int
+mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ bool *activity)
+{
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
+ ventry->entry, activity);
+}
+
+static int
+mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ struct mlxsw_sp_acl_tcam_chunk *chunk,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_entry *new_entry;
+
+ /* First check if the entry is not already where we want it to be. */
+ if (ventry->entry->chunk == chunk)
+ return 0;
+
+ if (--(*credits) < 0)
+ return 0;
+
+ new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
+ if (IS_ERR(new_entry))
+ return PTR_ERR(new_entry);
+ mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
+ ventry->entry = new_entry;
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_region *region,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+
+ new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
+ if (IS_ERR(new_chunk)) {
+ if (ctx->this_is_rollback)
+ vchunk->vregion->failed_rollback = true;
+ return PTR_ERR(new_chunk);
+ }
+ vchunk->chunk2 = vchunk->chunk;
+ vchunk->chunk = new_chunk;
+ ctx->current_vchunk = vchunk;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
+ vchunk->chunk2 = NULL;
+ ctx->current_vchunk = NULL;
+}
+
+static int
+mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
+ struct mlxsw_sp_acl_tcam_region *region,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_ventry *ventry;
+ int err;
+
+ if (vchunk->chunk->region != region) {
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
+ region, ctx);
+ if (err)
+ return err;
+ } else if (!vchunk->chunk2) {
+ /* The chunk is already as it should be, nothing to do. */
+ return 0;
+ }
+
+ /* If the migration got interrupted, we have the ventry to start from
+ * stored in context.
+ */
+ if (ctx->start_ventry)
+ ventry = ctx->start_ventry;
+ else
+ ventry = list_first_entry(&vchunk->ventry_list,
+ typeof(*ventry), list);
+
+ list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
+ /* During rollback, once we reach the ventry that failed
+ * to migrate, we are done.
+ */
+ if (ventry == ctx->stop_ventry)
+ break;
+
+ err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
+ vchunk->chunk, credits);
+ if (err) {
+ if (ctx->this_is_rollback)
+ return err;
+ /* Swap the chunk and chunk2 pointers so the follow-up
+ * rollback call will see the original chunk pointer
+ * in vchunk->chunk.
+ */
+ swap(vchunk->chunk, vchunk->chunk2);
+ /* The rollback has to be done from beginning of the
+ * chunk, that is why we have to null the start_ventry.
+ * However, we know where to stop the rollback,
+ * at the current ventry.
+ */
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = ventry;
+ return err;
+ } else if (*credits < 0) {
+ /* We are out of credits, the rest of the ventries
+ * will be migrated later. Save the ventry
+ * which we ended with.
+ */
+ ctx->start_ventry = ventry;
+ return 0;
+ }
+ }
+
+ mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ int err;
+
+ /* If the migration got interrupted, we have the vchunk
+ * we are working on stored in context.
+ */
+ if (ctx->current_vchunk)
+ vchunk = ctx->current_vchunk;
+ else
+ vchunk = list_first_entry(&vregion->vchunk_list,
+ typeof(*vchunk), list);
+
+ list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
+ vregion->region,
+ ctx, credits);
+ if (err || *credits < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
+ int *credits)
+{
+ int err, err2;
+
+ trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
+ /* In case migration was not successful, we need to swap
+ * so the original region pointer is assigned again
+ * to vregion->region.
+ */
+ swap(vregion->region, vregion->region2);
+ ctx->current_vchunk = NULL;
+ ctx->this_is_rollback = true;
+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err2)
+ vregion->failed_rollback = true;
+ }
+ mutex_unlock(&vregion->lock);
+ trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
+ return err;
+}
+
+static bool
+mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ return ctx->hints_priv;
+}
+
+static int
+mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
+ struct mlxsw_sp_acl_tcam_region *new_region;
+ void *hints_priv;
+ int err;
+
+ trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
+ if (vregion->failed_rollback)
+ return -EBUSY;
+
+ hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
+ if (IS_ERR(hints_priv))
+ return PTR_ERR(hints_priv);
+
+ new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
+ vregion, hints_priv);
+ if (IS_ERR(new_region)) {
+ err = PTR_ERR(new_region);
+ goto err_region_create;
+ }
+
+ /* vregion->region contains the pointer to the new region
+ * we are going to migrate to.
+ */
+ vregion->region2 = vregion->region;
+ vregion->region = new_region;
+ err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
+ vregion->region2->group,
+ new_region, priority,
+ vregion->region2);
+ if (err)
+ goto err_group_region_attach;
+
+ ctx->hints_priv = hints_priv;
+ ctx->this_is_rollback = false;
+
+ return 0;
+
+err_group_region_attach:
+ vregion->region = vregion->region2;
+ vregion->region2 = NULL;
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
+err_region_create:
+ ops->region_rehash_hints_put(hints_priv);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ if (!vregion->failed_rollback) {
+ vregion->region2 = NULL;
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
+ }
+ ops->region_rehash_hints_put(ctx->hints_priv);
+ ctx->hints_priv = NULL;
+}
+
+static void
+mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
+ int *credits)
+{
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+ int err;
+
+ /* Check if the previous rehash work was interrupted
+ * which means we have to continue it now.
+ * If not, start a new rehash.
+ */
+ if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
+ err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
+ vregion, ctx);
+ if (err) {
+ if (err != -EAGAIN)
+ dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
+ return;
+ }
+ }
+
+ err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ if (vregion->failed_rollback) {
+ trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
+ vregion);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
+ }
+ }
+
+ if (*credits >= 0)
+ mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
+}
+
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC_32_47,
@@ -856,20 +1567,11 @@ static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
struct mlxsw_sp_acl_tcam_flower_ruleset {
- struct mlxsw_sp_acl_tcam_group group;
+ struct mlxsw_sp_acl_tcam_vgroup vgroup;
};
struct mlxsw_sp_acl_tcam_flower_rule {
- struct mlxsw_sp_acl_tcam_entry entry;
-};
-
-struct mlxsw_sp_acl_tcam_mr_ruleset {
- struct mlxsw_sp_acl_tcam_chunk *chunk;
- struct mlxsw_sp_acl_tcam_group group;
-};
-
-struct mlxsw_sp_acl_tcam_mr_rule {
- struct mlxsw_sp_acl_tcam_entry entry;
+ struct mlxsw_sp_acl_tcam_ventry ventry;
};
static int
@@ -880,10 +1582,10 @@ mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
- mlxsw_sp_acl_tcam_patterns,
- MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage);
+ return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
+ mlxsw_sp_acl_tcam_patterns,
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage, true);
}
static void
@@ -892,7 +1594,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
}
static int
@@ -903,7 +1605,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
+ return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
mlxsw_sp_port, ingress);
}
@@ -915,7 +1617,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
mlxsw_sp_port, ingress);
}
@@ -924,13 +1626,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
-}
-
-static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
-{
- return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
- mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
}
static int
@@ -941,8 +1637,8 @@ mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
- return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
- &rule->entry, rulei);
+ return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
+ &rule->ventry, rulei);
}
static void
@@ -950,12 +1646,11 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
{
struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
- mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
+ mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
}
static int
mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv,
void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
@@ -968,8 +1663,8 @@ mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
- activity);
+ return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
+ activity);
}
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
@@ -979,13 +1674,22 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
- .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
+ .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
.rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
+struct mlxsw_sp_acl_tcam_mr_ruleset {
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ struct mlxsw_sp_acl_tcam_vgroup vgroup;
+};
+
+struct mlxsw_sp_acl_tcam_mr_rule {
+ struct mlxsw_sp_acl_tcam_ventry ventry;
+};
+
static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
@@ -995,10 +1699,10 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
int err;
- err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
- mlxsw_sp_acl_tcam_patterns,
- MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage);
+ err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
+ mlxsw_sp_acl_tcam_patterns,
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage, false);
if (err)
return err;
@@ -1008,17 +1712,18 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
* specific ACL Group ID which must exist in HW before multicast router
* is initialized.
*/
- ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group,
- 1, tmplt_elusage);
- if (IS_ERR(ruleset->chunk)) {
- err = PTR_ERR(ruleset->chunk);
+ ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
+ &ruleset->vgroup, 1,
+ tmplt_elusage);
+ if (IS_ERR(ruleset->vchunk)) {
+ err = PTR_ERR(ruleset->vchunk);
goto err_chunk_get;
}
return 0;
err_chunk_get:
- mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
return err;
}
@@ -1027,8 +1732,8 @@ mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk);
- mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
}
static int
@@ -1053,13 +1758,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
- return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
-}
-
-static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
-{
- return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) +
- mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
}
static int
@@ -1070,8 +1769,8 @@ mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
- return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
- &rule->entry, rulei);
+ return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
+ &rule->ventry, rulei);
}
static void
@@ -1079,19 +1778,18 @@ mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
{
struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
- mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
+ mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
}
static int
mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
+ void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei)
{
- struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
- return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group,
- &rule->entry, rulei);
+ return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
+ rulei);
}
static int
@@ -1100,8 +1798,8 @@ mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
- activity);
+ return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
+ activity);
}
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
@@ -1111,7 +1809,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
.ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
- .rule_priv_size = mlxsw_sp_acl_tcam_mr_rule_priv_size,
+ .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
.rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
.rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
.rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 0f1a9dee63de..5965913565a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -17,6 +17,9 @@ struct mlxsw_sp_acl_tcam {
unsigned long *used_groups; /* bit array */
unsigned int max_groups;
unsigned int max_group_size;
+ struct mutex lock; /* guards vregion list */
+ struct list_head vregion_list;
+ u32 vregion_rehash_intrvl; /* ms */
unsigned long priv[0];
/* priv has to be always the last item */
};
@@ -26,6 +29,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
+u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ u32 val);
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority);
@@ -43,13 +51,12 @@ struct mlxsw_sp_acl_profile_ops {
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
- size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp);
+ size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
- int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
+ int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
bool *activity);
@@ -68,11 +75,12 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
(MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE)
struct mlxsw_sp_acl_tcam_group;
+struct mlxsw_sp_acl_tcam_vregion;
struct mlxsw_sp_acl_tcam_region {
- struct list_head list; /* Member of a TCAM group */
- struct list_head chunk_list; /* List of chunks under this region */
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
struct mlxsw_sp_acl_tcam_group *group;
+ struct list_head list; /* Member of a TCAM group */
enum mlxsw_reg_ptar_key_type key_type;
u16 id; /* ACL ID and region ID - they are same */
char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
@@ -126,7 +134,6 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_entry *centry);
int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
- struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei);
static inline unsigned int
@@ -163,9 +170,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
- * minus delta bits.
- */
+ char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
+ * key.
+ */
u8 erp_id;
};
@@ -177,7 +184,9 @@ struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct list_head list; /* Member in entries_list */
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
- char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+ * minus delta bits.
+ */
struct {
u16 start;
u8 mask;
@@ -207,6 +216,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_tcam_region *region,
+ void *hints_priv,
const struct mlxsw_sp_acl_ctcam_region_ops *ops);
void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
@@ -224,13 +234,15 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_entry *aentry);
int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry,
struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
+void *
+mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv);
struct mlxsw_sp_acl_erp_delta;
@@ -261,7 +273,11 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp_mask *erp_mask,
struct mlxsw_sp_acl_atcam_entry *aentry);
-int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
+void *
+mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv);
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ void *hints_priv);
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 12c61e0cc570..9a79b5e11597 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -37,13 +37,19 @@ struct mlxsw_sp_sb_pm {
struct mlxsw_cp_sb_occ occ;
};
+struct mlxsw_sp_sb_mm {
+ u32 min_buff;
+ u32 max_buff;
+ u16 pool_index;
+};
+
struct mlxsw_sp_sb_pool_des {
enum mlxsw_reg_sbxx_dir dir;
u8 pool;
};
/* Order ingress pools before egress pools. */
-static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = {
+static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_INGRESS, 0},
{MLXSW_REG_SBXX_DIR_INGRESS, 1},
{MLXSW_REG_SBXX_DIR_INGRESS, 2},
@@ -55,7 +61,16 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
};
-#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess)
+static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
+ {MLXSW_REG_SBXX_DIR_INGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+};
#define MLXSW_SP_SB_ING_TC_COUNT 8
#define MLXSW_SP_SB_EG_TC_COUNT 16
@@ -63,16 +78,32 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = {
struct mlxsw_sp_sb_port {
struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
- struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN];
+ struct mlxsw_sp_sb_pm *pms;
};
struct mlxsw_sp_sb {
- struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN];
+ struct mlxsw_sp_sb_pr *prs;
struct mlxsw_sp_sb_port *ports;
u32 cell_size;
+ u32 max_headroom_cells;
u64 sb_size;
};
+struct mlxsw_sp_sb_vals {
+ unsigned int pool_count;
+ const struct mlxsw_sp_sb_pool_des *pool_dess;
+ const struct mlxsw_sp_sb_pm *pms;
+ const struct mlxsw_sp_sb_pr *prs;
+ const struct mlxsw_sp_sb_mm *mms;
+ const struct mlxsw_sp_sb_cm *cms_ingress;
+ const struct mlxsw_sp_sb_cm *cms_egress;
+ const struct mlxsw_sp_sb_cm *cms_cpu;
+ unsigned int mms_count;
+ unsigned int cms_ingress_count;
+ unsigned int cms_egress_count;
+ unsigned int cms_cpu_count;
+};
+
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
{
return mlxsw_sp->sb->cell_size * cells;
@@ -83,6 +114,11 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}
+u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp->sb->max_headroom_cells;
+}
+
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
u16 pool_index)
{
@@ -121,7 +157,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
u32 size, bool infi_size)
{
const struct mlxsw_sp_sb_pool_des *des =
- &mlxsw_sp_sb_pool_dess[pool_index];
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
char sbpr_pl[MLXSW_REG_SBPR_LEN];
struct mlxsw_sp_sb_pr *pr;
int err;
@@ -145,7 +181,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool infi_max, u16 pool_index)
{
const struct mlxsw_sp_sb_pool_des *des =
- &mlxsw_sp_sb_pool_dess[pool_index];
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
char sbcm_pl[MLXSW_REG_SBCM_LEN];
struct mlxsw_sp_sb_cm *cm;
int err;
@@ -174,7 +210,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u16 pool_index, u32 min_buff, u32 max_buff)
{
const struct mlxsw_sp_sb_pool_des *des =
- &mlxsw_sp_sb_pool_dess[pool_index];
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
char sbpm_pl[MLXSW_REG_SBPM_LEN];
struct mlxsw_sp_sb_pm *pm;
int err;
@@ -195,7 +231,7 @@ static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
- &mlxsw_sp_sb_pool_dess[pool_index];
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
char sbpm_pl[MLXSW_REG_SBPM_LEN];
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
@@ -217,7 +253,7 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
- &mlxsw_sp_sb_pool_dess[pool_index];
+ &mlxsw_sp->sb_vals->pool_dess[pool_index];
char sbpm_pl[MLXSW_REG_SBPM_LEN];
struct mlxsw_sp_sb_pm *pm;
@@ -230,24 +266,24 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
(unsigned long) pm);
}
-static const u16 mlxsw_sp_pbs[] = {
- [0] = 2 * ETH_FRAME_LEN,
- [9] = 2 * MLXSW_PORT_MAX_MTU,
-};
-
-#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
+#define MLXSW_SP_PB_HEADROOM 25632
#define MLXSW_SP_PB_UNUSED 8
static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ const u32 pbs[] = {
+ [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
+ [9] = 2 * MLXSW_PORT_MAX_MTU,
+ };
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
int i;
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
0xffff, 0xffff / 2);
- for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
- u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
+ for (i = 0; i < ARRAY_SIZE(pbs); i++) {
+ u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
if (i == MLXSW_SP_PB_UNUSED)
continue;
@@ -280,50 +316,119 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
}
+static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_sb_port *sb_port)
+{
+ struct mlxsw_sp_sb_pm *pms;
+
+ pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
+ GFP_KERNEL);
+ if (!pms)
+ return -ENOMEM;
+ sb_port->pms = pms;
+ return 0;
+}
+
+static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
+{
+ kfree(sb_port->pms);
+}
+
static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_sb_pr *prs;
+ int i;
+ int err;
mlxsw_sp->sb->ports = kcalloc(max_ports,
sizeof(struct mlxsw_sp_sb_port),
GFP_KERNEL);
if (!mlxsw_sp->sb->ports)
return -ENOMEM;
+
+ prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
+ GFP_KERNEL);
+ if (!prs) {
+ err = -ENOMEM;
+ goto err_alloc_prs;
+ }
+ mlxsw_sp->sb->prs = prs;
+
+ for (i = 0; i < max_ports; i++) {
+ err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
+ if (err)
+ goto err_sb_port_init;
+ }
+
return 0;
+
+err_sb_port_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
+ kfree(mlxsw_sp->sb->prs);
+err_alloc_prs:
+ kfree(mlxsw_sp->sb->ports);
+ return err;
}
static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
{
+ int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ int i;
+
+ for (i = max_ports - 1; i >= 0; i--)
+ mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
+ kfree(mlxsw_sp->sb->prs);
kfree(mlxsw_sp->sb->ports);
}
-#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
-#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
-#define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
-
#define MLXSW_SP_SB_PR(_mode, _size) \
{ \
.mode = _mode, \
.size = _size, \
}
-static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = {
+#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
+#define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
+#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
+
+static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
/* Ingress pools. */
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_SB_PR_INGRESS_SIZE),
+ MLXSW_SP1_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
+ MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
/* Egress pools. */
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_EGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
};
-#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs)
+#define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000
+#define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
+#define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000
+
+static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
+ /* Ingress pools. */
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_INGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
+ /* Egress pools. */
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_EGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+};
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_sb_pr *prs,
@@ -357,7 +462,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
.pool_index = _pool, \
}
-static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
+static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
MLXSW_SP_SB_CM(10000, 8, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
@@ -370,9 +475,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
MLXSW_SP_SB_CM(20000, 1, 3),
};
-#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
+static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
+ MLXSW_SP_SB_CM(0, 7, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(20000, 1, 3),
+};
-static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
+static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
MLXSW_SP_SB_CM(1500, 9, 4),
MLXSW_SP_SB_CM(1500, 9, 4),
MLXSW_SP_SB_CM(1500, 9, 4),
@@ -392,7 +508,25 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
MLXSW_SP_SB_CM(1, 0xff, 4),
};
-#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
+static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(1, 0xff, 4),
+};
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
@@ -431,9 +565,6 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
};
-#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
- ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
-
static bool
mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
{
@@ -447,6 +578,7 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const struct mlxsw_sp_sb_cm *cms,
size_t cms_len)
{
+ const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
int i;
int err;
@@ -458,7 +590,7 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
continue; /* PG number 8 does not exist, skip it */
cm = &cms[i];
- if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir))
+ if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
continue;
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
@@ -484,27 +616,28 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
- err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
mlxsw_sp_port->local_port,
MLXSW_REG_SBXX_DIR_INGRESS,
- mlxsw_sp_sb_cms_ingress,
- MLXSW_SP_SB_CMS_INGRESS_LEN);
+ mlxsw_sp->sb_vals->cms_ingress,
+ mlxsw_sp->sb_vals->cms_ingress_count);
if (err)
return err;
return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->local_port,
MLXSW_REG_SBXX_DIR_EGRESS,
- mlxsw_sp_sb_cms_egress,
- MLXSW_SP_SB_CMS_EGRESS_LEN);
+ mlxsw_sp->sb_vals->cms_egress,
+ mlxsw_sp->sb_vals->cms_egress_count);
}
static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
{
return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
- mlxsw_sp_cpu_port_sb_cms,
- MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+ mlxsw_sp->sb_vals->cms_cpu,
+ mlxsw_sp->sb_vals->cms_cpu_count);
}
#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
@@ -513,7 +646,7 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
.max_buff = _max_buff, \
}
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
/* Ingress pools. */
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
@@ -527,7 +660,18 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
MLXSW_SP_SB_PM(10000, 90000),
};
-#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
+ /* Ingress pools. */
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+ /* Egress pools. */
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+};
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -535,8 +679,8 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
- const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i];
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
u32 max_buff;
u32 min_buff;
@@ -552,12 +696,6 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-struct mlxsw_sp_sb_mm {
- u32 min_buff;
- u32 max_buff;
- u16 pool_index;
-};
-
#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
{ \
.min_buff = _min_buff, \
@@ -583,21 +721,19 @@ static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
MLXSW_SP_SB_MM(0, 6, 4),
};
-#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
-
static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
{
char sbmm_pl[MLXSW_REG_SBMM_LEN];
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+ for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
const struct mlxsw_sp_sb_pool_des *des;
const struct mlxsw_sp_sb_mm *mc;
u32 min_buff;
- mc = &mlxsw_sp_sb_mms[i];
- des = &mlxsw_sp_sb_pool_dess[mc->pool_index];
+ mc = &mlxsw_sp->sb_vals->mms[i];
+ des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
/* All pools used by sb_mm's are initialized using dynamic
* thresholds, therefore 'max_buff' isn't specified in cells.
*/
@@ -611,22 +747,55 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len)
+static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
+ u16 *p_ingress_len, u16 *p_egress_len)
{
int i;
- for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i)
- if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS)
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
+ if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
+ MLXSW_REG_SBXX_DIR_EGRESS)
goto out;
WARN(1, "No egress pools\n");
out:
*p_ingress_len = i;
- *p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i;
+ *p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
}
+const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
+ .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
+ .pool_dess = mlxsw_sp1_sb_pool_dess,
+ .pms = mlxsw_sp1_sb_pms,
+ .prs = mlxsw_sp1_sb_prs,
+ .mms = mlxsw_sp_sb_mms,
+ .cms_ingress = mlxsw_sp1_sb_cms_ingress,
+ .cms_egress = mlxsw_sp1_sb_cms_egress,
+ .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
+ .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
+ .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
+ .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
+ .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
+};
+
+const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
+ .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
+ .pool_dess = mlxsw_sp2_sb_pool_dess,
+ .pms = mlxsw_sp2_sb_pms,
+ .prs = mlxsw_sp2_sb_prs,
+ .mms = mlxsw_sp_sb_mms,
+ .cms_ingress = mlxsw_sp2_sb_cms_ingress,
+ .cms_egress = mlxsw_sp2_sb_cms_egress,
+ .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
+ .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
+ .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
+ .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
+ .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
+};
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
+ u32 max_headroom_size;
u16 ing_pool_count;
u16 eg_pool_count;
int err;
@@ -637,18 +806,26 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
return -EIO;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
+ return -EIO;
+
mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
if (!mlxsw_sp->sb)
return -ENOMEM;
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_BUFFER_SIZE);
+ max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_HEADROOM_SIZE);
+ /* Round down, because this limit must not be overstepped. */
+ mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
+ mlxsw_sp->sb->cell_size;
err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err)
goto err_sb_ports_init;
- err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs,
- MLXSW_SP_SB_PRS_LEN);
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
+ mlxsw_sp->sb_vals->pool_count);
if (err)
goto err_sb_prs_init;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
@@ -657,7 +834,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
if (err)
goto err_sb_mms_init;
- mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count);
+ mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
mlxsw_sp->sb->sb_size,
ing_pool_count,
@@ -705,14 +882,16 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
- enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir;
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ enum mlxsw_reg_sbxx_dir dir;
struct mlxsw_sp_sb_pr *pr;
+ dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
pool_info->pool_type = (enum devlink_sb_pool_type) dir;
pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
+ pool_info->cell_size = mlxsw_sp->sb->cell_size;
return 0;
}
@@ -833,7 +1012,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
u32 max_buff;
int err;
- if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir)
+ if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir)
return -EINVAL;
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
@@ -931,7 +1110,7 @@ next_batch:
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
- for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
&bulk_list);
if (err)
@@ -990,7 +1169,7 @@ next_batch:
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
- for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
&bulk_list);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 41e607a14846..49933818c6f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -220,7 +220,7 @@ start_again:
for (; i < rif_count; i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
- if (!rif)
+ if (!rif || !mlxsw_sp_rif_dev(rif))
continue;
err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
counters_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 055cc6943b34..46baf3b44309 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE - 1,
- .end_index = MLXSW_SP_RFID_BASE - 1,
+ .start_index = VLAN_N_VID - 1,
+ .end_index = VLAN_N_VID - 1,
.ops = &mlxsw_sp_fid_dummy_ops,
};
@@ -1188,8 +1188,7 @@ static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
fid_family->mlxsw_sp = mlxsw_sp;
INIT_LIST_HEAD(&fid_family->fids_list);
- fid_family->fids_bitmap = kcalloc(BITS_TO_LONGS(nr_fids),
- sizeof(unsigned long), GFP_KERNEL);
+ fid_family->fids_bitmap = bitmap_zalloc(nr_fids, GFP_KERNEL);
if (!fid_family->fids_bitmap) {
err = -ENOMEM;
goto err_alloc_fids_bitmap;
@@ -1206,7 +1205,7 @@ static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
return 0;
err_fid_flood_tables_init:
- kfree(fid_family->fids_bitmap);
+ bitmap_free(fid_family->fids_bitmap);
err_alloc_fids_bitmap:
kfree(fid_family);
return err;
@@ -1217,7 +1216,7 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid_family *fid_family)
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
- kfree(fid_family->fids_bitmap);
+ bitmap_free(fid_family->fids_bitmap);
WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
kfree(fid_family);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ff072358d950..15f804453cd6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -17,13 +17,13 @@
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tcf_exts *exts,
+ struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
- const struct tc_action *a;
+ const struct flow_action_entry *act;
int err, i;
- if (!tcf_exts_has_actions(exts))
+ if (!flow_action_has_entries(flow_action))
return 0;
/* Count action is inserted first */
@@ -31,27 +31,31 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- tcf_exts_for_each_action(i, a, exts) {
- if (is_tcf_gact_ok(a)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
err = mlxsw_sp_acl_rulei_act_terminate(rulei);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
return err;
}
- } else if (is_tcf_gact_shot(a)) {
+ break;
+ case FLOW_ACTION_DROP:
err = mlxsw_sp_acl_rulei_act_drop(rulei);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
}
- } else if (is_tcf_gact_trap(a)) {
+ break;
+ case FLOW_ACTION_TRAP:
err = mlxsw_sp_acl_rulei_act_trap(rulei);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
return err;
}
- } else if (is_tcf_gact_goto_chain(a)) {
- u32 chain_index = tcf_gact_goto_chain_index(a);
+ break;
+ case FLOW_ACTION_GOTO: {
+ u32 chain_index = act->chain_index;
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
@@ -67,7 +71,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
return err;
}
- } else if (is_tcf_mirred_egress_redirect(a)) {
+ }
+ break;
+ case FLOW_ACTION_REDIRECT: {
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
u16 fid_index;
@@ -79,29 +85,33 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- out_dev = tcf_mirred_dev(a);
+ out_dev = act->dev;
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
out_dev, extack);
if (err)
return err;
- } else if (is_tcf_mirred_egress_mirror(a)) {
- struct net_device *out_dev = tcf_mirred_dev(a);
+ }
+ break;
+ case FLOW_ACTION_MIRRED: {
+ struct net_device *out_dev = act->dev;
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
if (err)
return err;
- } else if (is_tcf_vlan(a)) {
- u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
- u32 action = tcf_vlan_action(a);
- u8 prio = tcf_vlan_push_prio(a);
- u16 vid = tcf_vlan_push_vid(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u16 proto = be16_to_cpu(act->vlan.proto);
+ u8 prio = act->vlan.prio;
+ u16 vid = act->vlan.vid;
return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
- action, vid,
+ act->id, vid,
proto, prio, extack);
- } else {
+ }
+ default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
@@ -113,59 +123,49 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(f->rule, &match);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
- (char *) &key->src,
- (char *) &mask->src, 4);
+ (char *) &match.key->src,
+ (char *) &match.mask->src, 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
- (char *) &key->dst,
- (char *) &mask->dst, 4);
+ (char *) &match.key->dst,
+ (char *) &match.mask->dst, 4);
}
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(f->rule, &match);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
- &key->src.s6_addr[0x0],
- &mask->src.s6_addr[0x0], 4);
+ &match.key->src.s6_addr[0x0],
+ &match.mask->src.s6_addr[0x0], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
- &key->src.s6_addr[0x4],
- &mask->src.s6_addr[0x4], 4);
+ &match.key->src.s6_addr[0x4],
+ &match.mask->src.s6_addr[0x4], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
- &key->src.s6_addr[0x8],
- &mask->src.s6_addr[0x8], 4);
+ &match.key->src.s6_addr[0x8],
+ &match.mask->src.s6_addr[0x8], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
- &key->src.s6_addr[0xC],
- &mask->src.s6_addr[0xC], 4);
+ &match.key->src.s6_addr[0xC],
+ &match.mask->src.s6_addr[0xC], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
- &key->dst.s6_addr[0x0],
- &mask->dst.s6_addr[0x0], 4);
+ &match.key->dst.s6_addr[0x0],
+ &match.mask->dst.s6_addr[0x0], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
- &key->dst.s6_addr[0x4],
- &mask->dst.s6_addr[0x4], 4);
+ &match.key->dst.s6_addr[0x4],
+ &match.mask->dst.s6_addr[0x4], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
- &key->dst.s6_addr[0x8],
- &mask->dst.s6_addr[0x8], 4);
+ &match.key->dst.s6_addr[0x8],
+ &match.mask->dst.s6_addr[0x8], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
- &key->dst.s6_addr[0xC],
- &mask->dst.s6_addr[0xC], 4);
+ &match.key->dst.s6_addr[0xC],
+ &match.mask->dst.s6_addr[0xC], 4);
}
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
@@ -173,9 +173,10 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
struct tc_cls_flower_offload *f,
u8 ip_proto)
{
- struct flow_dissector_key_ports *key, *mask;
+ const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_ports match;
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
return 0;
if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
@@ -184,16 +185,13 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
+ flow_rule_match_ports(rule, &match);
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
- ntohs(key->dst), ntohs(mask->dst));
+ ntohs(match.key->dst),
+ ntohs(match.mask->dst));
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
- ntohs(key->src), ntohs(mask->src));
+ ntohs(match.key->src),
+ ntohs(match.mask->src));
return 0;
}
@@ -202,9 +200,10 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
struct tc_cls_flower_offload *f,
u8 ip_proto)
{
- struct flow_dissector_key_tcp *key, *mask;
+ const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_tcp match;
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
return 0;
if (ip_proto != IPPROTO_TCP) {
@@ -213,14 +212,11 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->mask);
+ flow_rule_match_tcp(rule, &match);
+
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
- ntohs(key->flags), ntohs(mask->flags));
+ ntohs(match.key->flags),
+ ntohs(match.mask->flags));
return 0;
}
@@ -229,9 +225,10 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
struct tc_cls_flower_offload *f,
u16 n_proto)
{
- struct flow_dissector_key_ip *key, *mask;
+ const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_ip match;
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
return 0;
if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
@@ -240,20 +237,18 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->mask);
+ flow_rule_match_ip(rule, &match);
+
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
- key->ttl, mask->ttl);
+ match.key->ttl, match.mask->ttl);
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
- key->tos & 0x3, mask->tos & 0x3);
+ match.key->tos & 0x3,
+ match.mask->tos & 0x3);
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
- key->tos >> 6, mask->tos >> 6);
+ match.key->tos >> 6,
+ match.mask->tos >> 6);
return 0;
}
@@ -263,13 +258,15 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0;
u16 n_proto_key = 0;
u16 addr_type = 0;
u8 ip_proto = 0;
int err;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -286,25 +283,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
- addr_type = key->addr_type;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- n_proto_key = ntohs(key->n_proto);
- n_proto_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0;
@@ -314,60 +305,53 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
MLXSW_AFK_ELEMENT_ETHERTYPE,
n_proto_key, n_proto_mask);
- ip_proto = key->ip_proto;
+ ip_proto = match.key->ip_proto;
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_IP_PROTO,
- key->ip_proto, mask->ip_proto);
+ match.key->ip_proto,
+ match.mask->ip_proto);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ flow_rule_match_eth_addrs(rule, &match);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_DMAC_32_47,
- key->dst, mask->dst, 2);
+ match.key->dst,
+ match.mask->dst, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_DMAC_0_31,
- key->dst + 2, mask->dst + 2, 4);
+ match.key->dst + 2,
+ match.mask->dst + 2, 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_SMAC_32_47,
- key->src, mask->src, 2);
+ match.key->src,
+ match.mask->src, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_SMAC_0_31,
- key->src + 2, mask->src + 2, 4);
+ match.key->src + 2,
+ match.mask->src + 2, 4);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+ flow_rule_match_vlan(rule, &match);
if (mlxsw_sp_acl_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
- if (mask->vlan_id != 0)
+ if (match.mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
- key->vlan_id,
- mask->vlan_id);
- if (mask->vlan_priority != 0)
+ match.key->vlan_id,
+ match.mask->vlan_id);
+ if (match.mask->vlan_priority != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_PCP,
- key->vlan_priority,
- mask->vlan_priority);
+ match.key->vlan_priority,
+ match.mask->vlan_priority);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -387,7 +371,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
+ &f->rule->action,
f->common.extack);
}
@@ -486,7 +471,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rule_get_stats;
- tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 00db26c96bf5..6400cd644b7a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -145,6 +145,7 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
char rtdp_pl[MLXSW_REG_RTDP_LEN];
struct ip_tunnel_parm parms;
unsigned int type_check;
@@ -157,6 +158,7 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
ikey = mlxsw_sp_ipip_parms4_ikey(parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
type_check = has_ikey ?
MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 0a31fff2516e..1df164a4b06d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -267,8 +267,8 @@ mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nve_mc_record *mc_record;
int err;
- mc_record = kzalloc(sizeof(*mc_record) + num_max_entries *
- sizeof(struct mlxsw_sp_nve_mc_entry), GFP_KERNEL);
+ mc_record = kzalloc(struct_size(mc_record, entries, num_max_entries),
+ GFP_KERNEL);
if (!mc_record)
return ERR_PTR(-ENOMEM);
@@ -816,14 +816,14 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
ops = nve->nve_ops_arr[params->type];
if (!ops->can_offload(nve, params->dev, extack))
- return -EOPNOTSUPP;
+ return -EINVAL;
memset(&config, 0, sizeof(config));
ops->nve_config(nve, params->dev, &config);
if (nve->num_nve_tunnels &&
memcmp(&config, &nve->config, sizeof(config))) {
NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
- return -EOPNOTSUPP;
+ return -EINVAL;
}
err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
@@ -841,11 +841,9 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
nve->config = config;
- err = ops->fdb_replay(params->dev, params->vni);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB");
+ err = ops->fdb_replay(params->dev, params->vni, extack);
+ if (err)
goto err_fdb_replay;
- }
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 02937ea95bc3..0035640156a1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -28,6 +28,7 @@ struct mlxsw_sp_nve {
unsigned int num_nve_tunnels; /* Protected by RTNL */
unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
u32 tunnel_index;
+ u16 ul_rif_index; /* Reserved for Spectrum */
};
struct mlxsw_sp_nve_ops {
@@ -41,7 +42,8 @@ struct mlxsw_sp_nve_ops {
int (*init)(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config);
void (*fini)(struct mlxsw_sp_nve *nve);
- int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni);
+ int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni,
+ struct netlink_ext_ack *extack);
void (*fdb_clear_offload)(const struct net_device *nve_dev, __be32 vni);
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 74e564c4ac19..93ccd9fc2266 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -7,6 +7,7 @@
#include <net/vxlan.h>
#include "reg.h"
+#include "spectrum.h"
#include "spectrum_nve.h"
/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
@@ -20,9 +21,9 @@
#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_LEARN)
-static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
- struct netlink_ext_ack *extack)
+static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_config *cfg = &vxlan->cfg;
@@ -112,13 +113,30 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
}
+static void
+mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
+ const struct mlxsw_sp_nve_config *config)
+{
+ u8 udp_sport;
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
+ config->ttl);
+ /* VxLAN driver's default UDP source port range is 32768 (0x8000)
+ * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
+ * to a random number between 0x80 and 0xee
+ */
+ get_random_bytes(&udp_sport, sizeof(udp_sport));
+ udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
+}
+
static int
mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_config *config)
{
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
u16 ul_vr_id;
- u8 udp_sport;
int err;
err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
@@ -126,18 +144,9 @@ mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
- config->ttl);
- /* VxLAN driver's default UDP source port range is 32768 (0x8000)
- * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
- * to a random number between 0x80 and 0xee
- */
- get_random_bytes(&udp_sport, sizeof(udp_sport));
- udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
- mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
+ mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
- mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
}
@@ -212,11 +221,13 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
}
static int
-mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni)
+mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
+ struct netlink_ext_ack *extack)
{
if (WARN_ON(!netif_is_vxlan(nve_dev)))
return -EINVAL;
- return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier);
+ return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
+ extack);
}
static void
@@ -229,7 +240,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
- .can_offload = mlxsw_sp1_nve_vxlan_can_offload,
+ .can_offload = mlxsw_sp_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp1_nve_vxlan_init,
.fini = mlxsw_sp1_nve_vxlan_fini,
@@ -237,26 +248,126 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
-static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
- const struct net_device *dev,
- struct netlink_ext_ack *extack)
+static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+ bool learning_en)
{
- return false;
+ char tnpc_pl[MLXSW_REG_TNPC_LEN];
+
+ mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
+ learning_en);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
+}
+
+static int
+mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_config *config)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ u16 ul_rif_index;
+ int err;
+
+ err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
+ &ul_rif_index);
+ if (err)
+ return err;
+ mlxsw_sp->nve->ul_rif_index = ul_rif_index;
+
+ err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
+ if (err)
+ goto err_vxlan_learning_set;
+
+ mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
+ mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+ if (err)
+ goto err_tngcr_write;
+
+ return 0;
+
+err_tngcr_write:
+ mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
+err_vxlan_learning_set:
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
+ return err;
+}
+
+static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+ mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
+}
+
+static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int tunnel_index,
+ u16 ul_rif_index)
+{
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
}
static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config)
{
- return -EOPNOTSUPP;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+ int err;
+
+ err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
+ MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
+ config->udp_dport);
+ if (err)
+ return err;
+
+ err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
+ if (err)
+ goto err_config_set;
+
+ err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
+ nve->ul_rif_index);
+ if (err)
+ goto err_rtdp_set;
+
+ err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto,
+ &config->ul_sip,
+ nve->tunnel_index);
+ if (err)
+ goto err_promote_decap;
+
+ return 0;
+
+err_promote_decap:
+err_rtdp_set:
+ mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
+err_config_set:
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
+ return err;
}
static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
{
+ struct mlxsw_sp_nve_config *config = &nve->config;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+
+ mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto, &config->ul_sip);
+ mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
}
const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
- .can_offload = mlxsw_sp2_nve_vxlan_can_offload,
+ .can_offload = mlxsw_sp_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp2_nve_vxlan_init,
.fini = mlxsw_sp2_nve_vxlan_fini,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 98e5ffd71b91..52fed8c7bf1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -80,7 +80,7 @@ struct mlxsw_sp_router {
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
- struct net_device *dev;
+ struct net_device *dev; /* NULL for underlay RIF */
struct mlxsw_sp_fid *fid;
unsigned char addr[ETH_ALEN];
int mtu;
@@ -120,6 +120,7 @@ struct mlxsw_sp_rif_ipip_lb {
struct mlxsw_sp_rif common;
struct mlxsw_sp_rif_ipip_lb_config lb_config;
u16 ul_vr_id; /* Reserved for Spectrum-2. */
+ u16 ul_rif_id; /* Reserved for Spectrum. */
};
struct mlxsw_sp_rif_params_ipip_lb {
@@ -363,6 +364,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
+ MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
/* This is a special case of local delivery, where a packet should be
* decapsulated on reception. Note that there is no corresponding ENCAP,
@@ -440,6 +442,8 @@ struct mlxsw_sp_vr {
struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6;
struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
+ struct mlxsw_sp_rif *ul_rif;
+ refcount_t ul_rif_refcnt;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
@@ -1437,8 +1441,8 @@ mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
- struct mlxsw_sp_vr *ul_vr, bool enable)
+mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
+ u16 ul_rif_id, bool enable)
{
struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
struct mlxsw_sp_rif *rif = &lb_rif->common;
@@ -1453,7 +1457,7 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr->id, saddr4, lb_cf.okey);
+ ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
break;
case MLXSW_SP_L3_PROTO_IPV6:
@@ -1468,14 +1472,13 @@ static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif_ipip_lb *lb_rif;
- struct mlxsw_sp_vr *ul_vr;
int err = 0;
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
if (ipip_entry) {
lb_rif = ipip_entry->ol_lb;
- ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
- err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
+ lb_rif->ul_rif_id, true);
if (err)
goto out;
lb_rif->common.mtu = ol_dev->mtu;
@@ -3811,13 +3814,11 @@ mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
struct mlxsw_sp_nexthop_group *nh_grp;
struct mlxsw_sp_nexthop *nh;
struct fib_nh *fib_nh;
- size_t alloc_size;
int i;
int err;
- alloc_size = sizeof(*nh_grp) +
- fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
- nh_grp = kzalloc(alloc_size, GFP_KERNEL);
+ nh_grp = kzalloc(struct_size(nh_grp, nexthops, fi->fib_nhs),
+ GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
nh_grp->priv = fi;
@@ -3926,6 +3927,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
return !!nh_group->adj_index_valid;
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
return !!nh_group->nh_rif;
+ case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
return true;
@@ -3961,6 +3963,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
int i;
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
@@ -4002,7 +4005,8 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
+ if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
return;
@@ -4170,6 +4174,19 @@ static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
+static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -4209,6 +4226,8 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
fib_entry, op);
@@ -4277,8 +4296,10 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
case RTN_BROADCAST:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
+ case RTN_BLACKHOLE:
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
+ return 0;
case RTN_UNREACHABLE: /* fall through */
- case RTN_BLACKHOLE: /* fall through */
case RTN_PROHIBIT:
/* Packets hitting these routes need to be trapped, but
* can do so with a lower priority than packets directed
@@ -5043,13 +5064,11 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
struct mlxsw_sp_nexthop *nh;
- size_t alloc_size;
int i = 0;
int err;
- alloc_size = sizeof(*nh_grp) +
- fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
- nh_grp = kzalloc(alloc_size, GFP_KERNEL);
+ nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
+ GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nh_grp->fib_list);
@@ -5227,6 +5246,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
*/
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ else if (rt->fib6_type == RTN_BLACKHOLE)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
@@ -6121,7 +6142,7 @@ static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
- if (WARN_ON_ONCE(err))
+ if (err)
return err;
mlxsw_reg_ritr_enable_set(ritr_pl, false);
@@ -6224,10 +6245,12 @@ static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
INIT_LIST_HEAD(&rif->nexthop_list);
INIT_LIST_HEAD(&rif->neigh_list);
- ether_addr_copy(rif->addr, l3_dev->dev_addr);
- rif->mtu = l3_dev->mtu;
+ if (l3_dev) {
+ ether_addr_copy(rif->addr, l3_dev->dev_addr);
+ rif->mtu = l3_dev->mtu;
+ rif->dev = l3_dev;
+ }
rif->vr_id = vr_id;
- rif->dev = l3_dev;
rif->rif_index = rif_index;
return rif;
@@ -6251,7 +6274,19 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
- return lb_rif->ul_vr_id;
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
+ struct mlxsw_sp_vr *ul_vr;
+
+ ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
+ if (WARN_ON(IS_ERR(ul_vr)))
+ return 0;
+
+ return ul_vr->id;
+}
+
+u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
+{
+ return lb_rif->ul_rif_id;
}
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
@@ -6284,7 +6319,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
int i, err;
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
- ops = mlxsw_sp->router->rif_ops_arr[type];
+ ops = mlxsw_sp->rif_ops_arr[type];
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
if (IS_ERR(vr))
@@ -6303,6 +6338,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_rif_alloc;
}
dev_hold(rif->dev);
+ mlxsw_sp->router->rifs[rif_index] = rif;
rif->mlxsw_sp = mlxsw_sp;
rif->ops = ops;
@@ -6329,7 +6365,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_rif_counters_alloc(rif);
- mlxsw_sp->router->rifs[rif_index] = rif;
return rif;
@@ -6341,6 +6376,7 @@ err_configure:
if (fid)
mlxsw_sp_fid_put(fid);
err_fid_get:
+ mlxsw_sp->router->rifs[rif_index] = NULL;
dev_put(rif->dev);
kfree(rif);
err_rif_alloc:
@@ -6361,7 +6397,6 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -6369,6 +6404,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
if (fid)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
+ mlxsw_sp->router->rifs[rif->rif_index] = NULL;
dev_put(rif->dev);
kfree(rif);
vr->rif_count--;
@@ -6750,7 +6786,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
rif = mlxsw_sp->router->rifs[i];
- if (rif && rif->dev != dev &&
+ if (rif && rif->dev && rif->dev != dev &&
!ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
mlxsw_sp->mac_mask)) {
NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
@@ -7294,7 +7330,8 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
info.addr = mac;
info.vid = vid;
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
@@ -7381,7 +7418,8 @@ static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
info.addr = mac;
info.vid = 0;
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
@@ -7422,7 +7460,7 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
}
static int
-mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
@@ -7434,11 +7472,12 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
if (IS_ERR(ul_vr))
return PTR_ERR(ul_vr);
- err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
if (err)
goto err_loopback_op;
lb_rif->ul_vr_id = ul_vr->id;
+ lb_rif->ul_rif_id = 0;
++ul_vr->rif_count;
return 0;
@@ -7447,32 +7486,213 @@ err_loopback_op:
return err;
}
-static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
+static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_vr *ul_vr;
ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
- mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
+ mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
--ul_vr->rif_count;
mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
+ .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
+ .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
+ .setup = mlxsw_sp_rif_ipip_lb_setup,
+ .configure = mlxsw_sp1_rif_ipip_lb_configure,
+ .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
+};
+
+const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
+ [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
+ [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
+};
+
+static int
+mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
+ mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
+ MLXSW_REG_RITR_LOOPBACK_GENERIC);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *ul_rif;
+ u16 rif_index;
+ int err;
+
+ err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
+ return ERR_PTR(err);
+ }
+
+ ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
+ if (!ul_rif)
+ return ERR_PTR(-ENOMEM);
+
+ mlxsw_sp->router->rifs[rif_index] = ul_rif;
+ ul_rif->mlxsw_sp = mlxsw_sp;
+ err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
+ if (err)
+ goto ul_rif_op_err;
+
+ return ul_rif;
+
+ul_rif_op_err:
+ mlxsw_sp->router->rifs[rif_index] = NULL;
+ kfree(ul_rif);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+
+ mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
+ mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
+ kfree(ul_rif);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
+ return vr->ul_rif;
+
+ vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
+ if (IS_ERR(vr->ul_rif)) {
+ err = PTR_ERR(vr->ul_rif);
+ goto err_ul_rif_create;
+ }
+
+ vr->rif_count++;
+ refcount_set(&vr->ul_rif_refcnt, 1);
+
+ return vr->ul_rif;
+
+err_ul_rif_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
+{
+ struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ struct mlxsw_sp_vr *vr;
+
+ vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
+
+ if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
+ return;
+
+ vr->rif_count--;
+ mlxsw_sp_ul_rif_destroy(ul_rif);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ u16 *ul_rif_index)
+{
+ struct mlxsw_sp_rif *ul_rif;
+
+ ASSERT_RTNL();
+
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ if (IS_ERR(ul_rif))
+ return PTR_ERR(ul_rif);
+ *ul_rif_index = ul_rif->rif_index;
+
+ return 0;
+}
+
+void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
+{
+ struct mlxsw_sp_rif *ul_rif;
+
+ ASSERT_RTNL();
+
+ ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
+ if (WARN_ON(!ul_rif))
+ return;
+
+ mlxsw_sp_ul_rif_put(ul_rif);
+}
+
+static int
+mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif *ul_rif;
+ int err;
+
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ if (IS_ERR(ul_rif))
+ return PTR_ERR(ul_rif);
+
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
+ if (err)
+ goto err_loopback_op;
+
+ lb_rif->ul_vr_id = 0;
+ lb_rif->ul_rif_id = ul_rif->rif_index;
+
+ return 0;
+
+err_loopback_op:
+ mlxsw_sp_ul_rif_put(ul_rif);
+ return err;
+}
+
+static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif *ul_rif;
+
+ ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
+ mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
+ mlxsw_sp_ul_rif_put(ul_rif);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
.type = MLXSW_SP_RIF_TYPE_IPIP_LB,
.rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
.setup = mlxsw_sp_rif_ipip_lb_setup,
- .configure = mlxsw_sp_rif_ipip_lb_configure,
- .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
+ .configure = mlxsw_sp2_rif_ipip_lb_configure,
+ .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
};
-static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
+const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
- [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
+ [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
@@ -7485,8 +7705,6 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->router->rifs)
return -ENOMEM;
- mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 3dbafdeaab2b..cc1de91e8217 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -29,6 +29,7 @@ struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
+u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index ad5a9b9e1466..536c23c578c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -305,7 +305,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, parms.link, tun->fwmark);
+ 0, 0, parms.link, tun->fwmark, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 1bd2c6e15f8d..f6ce386c3036 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -431,46 +431,6 @@ static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
}
-static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
- struct net_device *dev,
- unsigned long *brport_flags)
-{
- struct mlxsw_sp_bridge_port *bridge_port;
-
- bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
- if (WARN_ON(!bridge_port))
- return;
-
- memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
-}
-
-static int mlxsw_sp_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
- memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
- attr->u.ppid.id_len);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
- &attr->u.brport_flags);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
- attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
- BR_MCAST_FLOOD;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int
mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
@@ -620,6 +580,17 @@ err_port_bridge_vlan_learning_set:
return err;
}
+static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
+ *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ unsigned long brport_flags)
+{
+ if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
@@ -866,6 +837,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
attr->orig_dev,
attr->u.stp_state);
break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
+ trans,
+ attr->u.brport_flags);
+ break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
attr->orig_dev,
@@ -1078,8 +1054,7 @@ static int
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
u16 vid, bool is_untagged, bool is_pvid,
- struct netlink_ext_ack *extack,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -1095,9 +1070,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_vlan->bridge_port != bridge_port)
return -EEXIST;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (!mlxsw_sp_port_vlan) {
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
vid);
@@ -1188,6 +1160,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1200,7 +1175,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
vid, flag_untagged,
- flag_pvid, extack, trans);
+ flag_pvid, extack);
if (err)
return err;
}
@@ -1234,7 +1209,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
{
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
}
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1291,7 +1266,7 @@ out:
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
- bool dynamic)
+ enum mlxsw_reg_sfd_rec_policy policy)
{
char *sfd_pl;
u8 num_rec;
@@ -1302,8 +1277,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1322,7 +1296,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+ MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1330,7 +1305,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
- false);
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
@@ -1808,7 +1783,7 @@ static void
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
- u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
+ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -1963,11 +1938,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
- .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
- .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
-};
-
static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -2028,6 +1998,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
return 0;
if (mlxsw_sp_fid_vni_is_set(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
err = -EINVAL;
goto err_vni_exists;
}
@@ -2214,10 +2185,13 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
int err;
fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
- if (!fid)
+ if (!fid) {
+ NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
return -EINVAL;
+ }
if (mlxsw_sp_fid_vni_is_set(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
err = -EINVAL;
goto err_vni_exists;
}
@@ -2444,7 +2418,7 @@ static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
ether_addr_copy(info.eth_addr, mac);
info.vni = vni;
info.offloaded = adding;
- call_switchdev_notifiers(type, dev, &info.info);
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
}
static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
@@ -2469,7 +2443,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
info.addr = mac;
info.vid = vid;
info.offloaded = offloaded;
- call_switchdev_notifiers(type, dev, &info.info);
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
}
static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
@@ -2820,7 +2794,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
return;
vxlan_fdb_info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info.info);
+ &vxlan_fdb_info.info, NULL);
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
vxlan_fdb_info.eth_addr,
fdb_info->vid, dev, true);
@@ -2833,7 +2807,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
false);
vxlan_fdb_info.offloaded = false;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info.info);
+ &vxlan_fdb_info.info, NULL);
break;
}
}
@@ -2978,7 +2952,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
}
vxlan_fdb_info->offloaded = true;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info->info);
+ &vxlan_fdb_info->info, NULL);
mlxsw_sp_fid_put(fid);
return;
}
@@ -2999,7 +2973,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
goto err_fdb_tunnel_uc_op;
vxlan_fdb_info->offloaded = true;
call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
- &vxlan_fdb_info->info);
+ &vxlan_fdb_info->info, NULL);
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
vxlan_fdb_info->eth_addr, vid, dev, true);
@@ -3100,23 +3074,34 @@ mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
struct vxlan_config *cfg = &vxlan->cfg;
+ struct netlink_ext_ack *extack;
+ extack = switchdev_notifier_info_to_extack(info);
vxlan_fdb_info = container_of(info,
struct switchdev_notifier_vxlan_fdb_info,
info);
- if (vxlan_fdb_info->remote_port != cfg->dst_port)
- return -EOPNOTSUPP;
- if (vxlan_fdb_info->remote_vni != cfg->vni)
+ if (vxlan_fdb_info->remote_port != cfg->dst_port) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
return -EOPNOTSUPP;
- if (vxlan_fdb_info->vni != cfg->vni)
+ }
+ if (vxlan_fdb_info->remote_vni != cfg->vni ||
+ vxlan_fdb_info->vni != cfg->vni) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
return -EOPNOTSUPP;
- if (vxlan_fdb_info->remote_ifindex)
+ }
+ if (vxlan_fdb_info->remote_ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
return -EOPNOTSUPP;
- if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
+ }
+ if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
return -EOPNOTSUPP;
- if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
+ }
+ if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
return -EOPNOTSUPP;
+ }
switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
@@ -3134,6 +3119,13 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
struct net_device *br_dev;
int err;
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
/* Tunnel devices are not our uppers, so check their master instead */
br_dev = netdev_master_upper_dev_get_rcu(dev);
if (!br_dev)
@@ -3207,7 +3199,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device,
const struct net_device *vxlan_dev, u16 vid,
bool flag_untagged, bool flag_pvid,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
@@ -3222,11 +3213,10 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
* the lookup function to return 'vxlan_dev'
*/
if (flag_untagged && flag_pvid &&
- mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
+ mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
return -EINVAL;
-
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ }
if (!netif_running(vxlan_dev))
return 0;
@@ -3345,6 +3335,9 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
port_obj_info->handled = true;
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
return -EINVAL;
@@ -3358,8 +3351,7 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
vxlan_dev, vid,
flag_untagged,
- flag_pvid, trans,
- extack);
+ flag_pvid, extack);
if (err)
return err;
}
@@ -3457,6 +3449,11 @@ static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
mlxsw_sp_port_dev_check,
mlxsw_sp_port_obj_del);
return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_attr_set);
+ return notifier_from_errno(err);
}
return NOTIFY_DONE;
@@ -3544,11 +3541,3 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->bridge);
}
-void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
-}
-
-void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
-{
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 2d4f213e154d..533fe6235b7c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
-#include <net/switchdev.h>
#include "pci.h"
#include "core.h"
@@ -390,6 +389,18 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
name, len);
}
+static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ ppid->id_len = sizeof(mlxsw_sx->hw_id);
+ memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_open = mlxsw_sx_port_open,
.ndo_stop = mlxsw_sx_port_stop,
@@ -397,6 +408,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_change_mtu = mlxsw_sx_port_change_mtu,
.ndo_get_stats64 = mlxsw_sx_port_get_stats64,
.ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
+ .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id,
};
static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
@@ -901,28 +913,6 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
.set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
};
-static int mlxsw_sx_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
- memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
- .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
-};
-
static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
{
char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
@@ -1034,7 +1024,6 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
- dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
if (err) {
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index b881f5d4a7f9..6006d47707cb 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -391,7 +391,7 @@ ks8695_tx_irq(int irq, void *dev_id)
ksp->tx_buffers[buff_n].dma_ptr,
ksp->tx_buffers[buff_n].length,
DMA_TO_DEVICE);
- dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
+ dev_consume_skb_irq(ksp->tx_buffers[buff_n].skb);
ksp->tx_buffers[buff_n].skb = NULL;
ksp->tx_ring_used--;
}
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index f6ecfa778660..8f72587b5a2c 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1681,5 +1681,5 @@ MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
MODULE_LICENSE("GPL");
module_param_named(debug, debug.msg_enable, int, 0);
-MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)");
+MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)");
MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 07c1eb63415a..3a0b289d9771 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -14,61 +14,138 @@
#define EEPROM_INDICATOR_1 (0xA5)
#define EEPROM_INDICATOR_2 (0xAA)
#define EEPROM_MAC_OFFSET (0x01)
-#define MAX_EEPROM_SIZE 512
+#define MAX_EEPROM_SIZE (512)
+#define MAX_OTP_SIZE (1024)
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
-static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
- u32 length, u8 *data)
+static int lan743x_otp_power_up(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, OTP_PWR_DN);
+
+ if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+ /* clear it and wait to be cleared */
+ reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, OTP_PWR_DN, reg_value);
+
+ usleep_range(100, 20000);
+ }
+
+ return 0;
+}
+
+static void lan743x_otp_power_down(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+ /* set power down bit */
+ reg_value |= OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, OTP_PWR_DN, reg_value);
+ }
+}
+
+static void lan743x_otp_set_address(struct lan743x_adapter *adapter,
+ u32 address)
+{
+ lan743x_csr_write(adapter, OTP_ADDR_HIGH, (address >> 8) & 0x03);
+ lan743x_csr_write(adapter, OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_otp_read_go(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_otp_wait_till_not_busy(struct lan743x_adapter *adapter)
{
unsigned long timeout;
- u32 buf;
+ u32 reg_val;
+
+ timeout = jiffies + HZ;
+ do {
+ if (time_after(jiffies, timeout)) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Timeout on OTP_STATUS completion\n");
+ return -EIO;
+ }
+ udelay(1);
+ reg_val = lan743x_csr_read(adapter, OTP_STATUS);
+ } while (reg_val & OTP_STATUS_BUSY_);
+
+ return 0;
+}
+
+static int lan743x_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
int i;
- buf = lan743x_csr_read(adapter, OTP_PWR_DN);
+ if (offset + length > MAX_OTP_SIZE)
+ return -EINVAL;
- if (buf & OTP_PWR_DN_PWRDN_N_) {
- /* clear it and wait to be cleared */
- lan743x_csr_write(adapter, OTP_PWR_DN, 0);
-
- timeout = jiffies + HZ;
- do {
- udelay(1);
- buf = lan743x_csr_read(adapter, OTP_PWR_DN);
- if (time_after(jiffies, timeout)) {
- netif_warn(adapter, drv, adapter->netdev,
- "timeout on OTP_PWR_DN completion\n");
- return -EIO;
- }
- } while (buf & OTP_PWR_DN_PWRDN_N_);
+ ret = lan743x_otp_power_up(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < length; i++) {
+ lan743x_otp_set_address(adapter, offset + i);
+
+ lan743x_otp_read_go(adapter);
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ data[i] = lan743x_csr_read(adapter, OTP_READ_DATA);
}
+ lan743x_otp_power_down(adapter);
+
+ return 0;
+}
+
+static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ if (offset + length > MAX_OTP_SIZE)
+ return -EINVAL;
+
+ ret = lan743x_otp_power_up(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
/* set to BYTE program mode */
lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
for (i = 0; i < length; i++) {
- lan743x_csr_write(adapter, OTP_ADDR1,
- ((offset + i) >> 8) &
- OTP_ADDR1_15_11_MASK_);
- lan743x_csr_write(adapter, OTP_ADDR2,
- ((offset + i) &
- OTP_ADDR2_10_3_MASK_));
+ lan743x_otp_set_address(adapter, offset + i);
+
lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]);
lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_);
- timeout = jiffies + HZ;
- do {
- udelay(1);
- buf = lan743x_csr_read(adapter, OTP_STATUS);
- if (time_after(jiffies, timeout)) {
- netif_warn(adapter, drv, adapter->netdev,
- "Timeout on OTP_STATUS completion\n");
- return -EIO;
- }
- } while (buf & OTP_STATUS_BUSY_);
+ ret = lan743x_otp_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
}
+ lan743x_otp_power_down(adapter);
+
return 0;
}
@@ -120,6 +197,9 @@ static int lan743x_eeprom_read(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_eeprom_confirm_not_busy(adapter);
if (retval)
return retval;
@@ -148,6 +228,9 @@ static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_eeprom_confirm_not_busy(adapter);
if (retval)
return retval;
@@ -207,6 +290,11 @@ static void lan743x_ethtool_set_msglevel(struct net_device *netdev,
static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
+ return MAX_OTP_SIZE;
+
return MAX_EEPROM_SIZE;
}
@@ -214,8 +302,14 @@ static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
+ ret = lan743x_otp_read(adapter, ee->offset, ee->len, data);
+ else
+ ret = lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
- return lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+ return ret;
}
static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
@@ -224,17 +318,18 @@ static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
struct lan743x_adapter *adapter = netdev_priv(netdev);
int ret = -EINVAL;
- if (ee->magic == LAN743X_EEPROM_MAGIC)
- ret = lan743x_eeprom_write(adapter, ee->offset, ee->len,
- data);
- /* Beware! OTP is One Time Programming ONLY!
- * So do some strict condition check before messing up
- */
- else if ((ee->magic == LAN743X_OTP_MAGIC) &&
- (ee->offset == 0) &&
- (ee->len == MAX_EEPROM_SIZE) &&
- (data[0] == OTP_INDICATOR_1))
- ret = lan743x_otp_write(adapter, ee->offset, ee->len, data);
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+ /* Beware! OTP is One Time Programming ONLY! */
+ if (ee->magic == LAN743X_OTP_MAGIC) {
+ ret = lan743x_otp_write(adapter, ee->offset,
+ ee->len, data);
+ }
+ } else {
+ if (ee->magic == LAN743X_EEPROM_MAGIC) {
+ ret = lan743x_eeprom_write(adapter, ee->offset,
+ ee->len, data);
+ }
+ }
return ret;
}
@@ -360,6 +455,10 @@ static const u32 lan743x_set2_hw_cnt_addr[] = {
STAT_TX_COUNTER_ROLLOVER_STATUS
};
+static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "OTP_ACCESS",
+};
+
static void lan743x_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
@@ -375,6 +474,10 @@ static void lan743x_ethtool_get_strings(struct net_device *netdev,
lan743x_set2_hw_cnt_strings,
sizeof(lan743x_set2_hw_cnt_strings));
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, lan743x_priv_flags_strings,
+ sizeof(lan743x_priv_flags_strings));
+ break;
}
}
@@ -399,6 +502,22 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
}
}
+static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->flags;
+}
+
+static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ adapter->flags = flags;
+
+ return 0;
+}
+
static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
@@ -411,6 +530,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
return ret;
}
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(lan743x_priv_flags_strings);
default:
return -EOPNOTSUPP;
}
@@ -705,6 +826,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.set_eeprom = lan743x_ethtool_set_eeprom,
.get_strings = lan743x_ethtool_get_strings,
.get_ethtool_stats = lan743x_ethtool_get_ethtool_stats,
+ .get_priv_flags = lan743x_ethtool_get_priv_flags,
+ .set_priv_flags = lan743x_ethtool_set_priv_flags,
.get_sset_count = lan743x_ethtool_get_sset_count,
.get_rxnfc = lan743x_ethtool_get_rxnfc,
.get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 20c9377e99cb..4d1b4a24907f 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
memset(&ksettings, 0, sizeof(ksettings));
phy_ethtool_get_link_ksettings(netdev, &ksettings);
- local_advertisement = phy_read(phydev, MII_ADVERTISE);
- if (local_advertisement < 0)
- return;
-
- remote_advertisement = phy_read(phydev, MII_LPA);
- if (remote_advertisement < 0)
- return;
+ local_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->advertising);
+ remote_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
lan743x_phy_update_flowcontrol(adapter,
ksettings.base.duplex,
@@ -1403,7 +1400,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
}
static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
- unsigned int frame_length)
+ unsigned int frame_length,
+ int nr_frags)
{
/* called only from within lan743x_tx_xmit_frame.
* assuming tx->ring_lock has already been acquired.
@@ -1413,6 +1411,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
/* wrap up previous descriptor */
tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = tx->frame_data0;
@@ -1517,8 +1519,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
u32 tx_tail_flags = 0;
/* wrap up previous descriptor */
- tx->frame_data0 |= TX_DESC_DATA0_LS_;
- tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
@@ -1603,7 +1608,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
if (gso)
- lan743x_tx_frame_add_lso(tx, frame_length);
+ lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
if (nr_frags <= 0)
goto finish;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 2d6eea18973e..3b02eeae5f45 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -26,6 +26,8 @@
#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF)
#define HW_CFG (0x010)
+#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
+#define HW_CFG_EE_OTP_RELOAD_ BIT(4)
#define HW_CFG_LRST_ BIT(1)
#define PMT_CTL (0x014)
@@ -453,17 +455,19 @@
#define OTP_PWR_DN (0x1000)
#define OTP_PWR_DN_PWRDN_N_ BIT(0)
-#define OTP_ADDR1 (0x1004)
-#define OTP_ADDR1_15_11_MASK_ (0x1F)
-
-#define OTP_ADDR2 (0x1008)
-#define OTP_ADDR2_10_3_MASK_ (0xFF)
+#define OTP_ADDR_HIGH (0x1004)
+#define OTP_ADDR_LOW (0x1008)
#define OTP_PRGM_DATA (0x1010)
#define OTP_PRGM_MODE (0x1014)
#define OTP_PRGM_MODE_BYTE_ BIT(0)
+#define OTP_READ_DATA (0x1018)
+
+#define OTP_FUNC_CMD (0x1020)
+#define OTP_FUNC_CMD_READ_ BIT(0)
+
#define OTP_TST_CMD (0x1024)
#define OTP_TST_CMD_PRGVRFY_ BIT(3)
@@ -713,6 +717,9 @@ struct lan743x_adapter {
struct lan743x_phy phy;
struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS];
struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS];
+
+#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
+ u32 flags;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index b34055ac476f..e1651756bf9d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -81,11 +81,13 @@ static void moxart_mac_free_memory(struct net_device *ndev)
priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
- dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
+ dma_free_coherent(&priv->pdev->dev,
+ TX_REG_DESC_SIZE * TX_DESC_NUM,
priv->tx_desc_base, priv->tx_base);
if (priv->rx_desc_base)
- dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
+ dma_free_coherent(&priv->pdev->dev,
+ RX_REG_DESC_SIZE * RX_DESC_NUM,
priv->rx_desc_base, priv->rx_base);
kfree(priv->tx_buf_base);
@@ -298,7 +300,7 @@ static void moxart_tx_finished(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
- dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
+ dev_consume_skb_irq(priv->tx_skb[tx_tail]);
priv->tx_skb[tx_tail] = NULL;
tx_tail = TX_NEXT(tx_tail);
@@ -476,6 +478,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
+ priv->pdev = pdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ndev->base_addr = res->start;
@@ -491,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -499,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index bee608b547d1..bf4c3029cd0c 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -292,6 +292,7 @@
#define LINK_STATUS 0x4
struct moxart_mac_priv_t {
+ struct platform_device *pdev;
void __iomem *base;
unsigned int reg_maccr;
unsigned int reg_imr;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 215a45374d7b..a1d0d6e42533 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -721,7 +721,8 @@ static void ocelot_get_stats64(struct net_device *dev,
static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags)
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
@@ -915,6 +916,18 @@ static int ocelot_set_features(struct net_device *dev,
return 0;
}
+static int ocelot_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ ppid->id_len = sizeof(ocelot->base_mac);
+ memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -929,6 +942,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
+ .ndo_get_port_parent_id = ocelot_get_port_parent_id,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1012,25 +1026,6 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int ocelot_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(ocelot->base_mac);
- memcpy(&attr->u.ppid.id, &ocelot->base_mac,
- attr->u.ppid.id_len);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
struct switchdev_trans *trans,
u8 state)
@@ -1329,11 +1324,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-static const struct switchdev_ops ocelot_port_switchdev_ops = {
- .switchdev_port_attr_get = ocelot_port_attr_get,
- .switchdev_port_attr_set = ocelot_port_attr_set,
-};
-
static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
struct net_device *bridge)
{
@@ -1588,6 +1578,28 @@ struct notifier_block ocelot_netdevice_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_netdevice_nb);
+static int ocelot_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_nb __read_mostly = {
+ .notifier_call = ocelot_switchdev_event,
+};
+EXPORT_SYMBOL(ocelot_switchdev_nb);
+
static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -1606,6 +1618,11 @@ static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
ocelot_netdevice_dev_check,
ocelot_port_obj_del);
return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_attr_set);
+ return notifier_from_errno(err);
}
return NOTIFY_DONE;
@@ -1639,7 +1656,6 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->netdev_ops = &ocelot_port_netdev_ops;
dev->ethtool_ops = &ocelot_ethtool_ops;
- dev->switchdev_ops = &ocelot_port_switchdev_ops;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 086775f7b52f..ba3b3380b4d0 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -499,6 +499,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
struct phy_device *phy);
extern struct notifier_block ocelot_netdevice_nb;
+extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index ca3ea2fbfcd0..e7f90101d2e0 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -267,6 +267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct phy *serdes;
void __iomem *regs;
char res_name[8];
+ int phy_mode;
u32 port;
if (of_property_read_u32(portnp, "reg", &port))
@@ -292,11 +293,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
return err;
- err = of_get_phy_mode(portnp);
- if (err < 0)
+ phy_mode = of_get_phy_mode(portnp);
+ if (phy_mode < 0)
ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
else
- ocelot->ports[port]->phy_mode = err;
+ ocelot->ports[port]->phy_mode = phy_mode;
switch (ocelot->ports[port]->phy_mode) {
case PHY_INTERFACE_MODE_NA:
@@ -304,6 +305,13 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
case PHY_INTERFACE_MODE_SGMII:
break;
case PHY_INTERFACE_MODE_QSGMII:
+ /* Ensure clock signals and speed is set on all
+ * QSGMII links
+ */
+ ocelot_port_writel(ocelot->ports[port],
+ DEV_CLOCK_CFG_LINK_SPEED
+ (OCELOT_SPEED_1000),
+ DEV_CLOCK_CFG);
break;
default:
dev_err(ocelot->dev,
@@ -329,6 +337,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
register_netdevice_notifier(&ocelot_netdevice_nb);
+ register_switchdev_notifier(&ocelot_switchdev_nb);
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -345,6 +354,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
return 0;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5f384f73007d..e0340f778d8f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1408,7 +1408,7 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
dma_unmap_addr(&tx->info[idx],
@@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
- ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
- &ss->rx_done.bus,
- GFP_KERNEL);
+ ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+ &ss->rx_done.bus,
+ GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index b9a1a9f999ea..1a2634cbbb69 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2173,7 +2173,7 @@ static void netdev_tx_done(struct net_device *dev)
np->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
/* Free the original skb. */
- dev_kfree_skb_irq(np->tx_skbuff[entry]);
+ dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
}
if (netif_queue_stopped(dev) &&
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 958fced4dacf..9098ee7fe0d1 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1003,7 +1003,7 @@ static void do_tx_done(struct net_device *ndev)
addr,
len,
PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
} else
pci_unmap_page(dev->pci_dev,
@@ -1869,56 +1869,28 @@ static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigne
static void ns83820_probe_phy(struct net_device *ndev)
{
struct ns83820 *dev = PRIV(ndev);
- static int first;
- int i;
-#define MII_PHYIDR1 0x02
-#define MII_PHYIDR2 0x03
-
-#if 0
- if (!first) {
- unsigned tmp;
- ns83820_mii_read_reg(dev, 1, 0x09);
- ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e);
-
- tmp = ns83820_mii_read_reg(dev, 1, 0x00);
- ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000);
- udelay(1300);
- ns83820_mii_read_reg(dev, 1, 0x09);
- }
-#endif
- first = 1;
-
- for (i=1; i<2; i++) {
- int j;
- unsigned a, b;
- a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1);
- b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2);
-
- //printk("%s: phy %d: 0x%04x 0x%04x\n",
- // ndev->name, i, a, b);
-
- for (j=0; j<0x16; j+=4) {
- dprintk("%s: [0x%02x] %04x %04x %04x %04x\n",
- ndev->name, j,
- ns83820_mii_read_reg(dev, i, 0 + j),
- ns83820_mii_read_reg(dev, i, 1 + j),
- ns83820_mii_read_reg(dev, i, 2 + j),
- ns83820_mii_read_reg(dev, i, 3 + j)
- );
- }
- }
- {
- unsigned a, b;
- /* read firmware version: memory addr is 0x8402 and 0x8403 */
- ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
- ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
- a = ns83820_mii_read_reg(dev, 1, 0x1d);
-
- ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
- ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
- b = ns83820_mii_read_reg(dev, 1, 0x1d);
- dprintk("version: 0x%04x 0x%04x\n", a, b);
+ int j;
+ unsigned a, b;
+
+ for (j = 0; j < 0x16; j += 4) {
+ dprintk("%s: [0x%02x] %04x %04x %04x %04x\n",
+ ndev->name, j,
+ ns83820_mii_read_reg(dev, 1, 0 + j),
+ ns83820_mii_read_reg(dev, 1, 1 + j),
+ ns83820_mii_read_reg(dev, 1, 2 + j),
+ ns83820_mii_read_reg(dev, 1, 3 + j)
+ );
}
+
+ /* read firmware version: memory addr is 0x8402 and 0x8403 */
+ ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+ ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+ a = ns83820_mii_read_reg(dev, 1, 0x1d);
+
+ ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+ ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+ b = ns83820_mii_read_reg(dev, 1, 0x1d);
+ dprintk("version: 0x%04x 0x%04x\n", a, b);
}
#endif
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index c805dcbebd02..aaec00912ea0 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -328,7 +328,7 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
}
/* We must free the original skb */
- dev_kfree_skb_irq(lp->tx_skb[entry]);
+ dev_consume_skb_irq(lp->tx_skb[entry]);
lp->tx_skb[entry] = NULL;
/* and unmap DMA buffer */
dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 82be90075695..feda9644289d 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3055,7 +3055,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
/* Updating the statistics block */
swstats->mem_freed += skb->truesize;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
get_info.offset++;
if (get_info.offset == get_info.fifo_len + 1)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 5ae3fa82909f..b877acec5cde 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -114,7 +114,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
/* free SKBs */
for (temp = completed; temp != skb_ptr; temp++)
- dev_kfree_skb_irq(*temp);
+ dev_consume_skb_irq(*temp);
} while (more);
}
@@ -2553,7 +2553,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
vxge_debug_init(VXGE_ERR,
"%s: Defaulting to INTA",
vdev->ndev->name);
- goto INTA_MODE;
+ goto INTA_MODE;
}
msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 66f15b05b65e..549898d5d450 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -19,7 +19,6 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
- depends on MAY_USE_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..f272247d1708 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1266,7 +1266,7 @@ wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u64 imm = insn->imm; /* sign extend */
if (skip) {
- meta->skip = true;
+ meta->flags |= FLAG_INSN_SKIP_NOOP;
return 0;
}
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum alu_op alu_op, bool skip)
+ enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
- if (skip) {
- meta->skip = true;
- return 0;
- }
-
wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
@@ -1334,8 +1329,9 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
- wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
- insn->src_reg * 2 + 1, br_mask, insn->off);
+ if (is_mbpf_jmp64(meta))
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
+ insn->src_reg * 2 + 1, br_mask, insn->off);
return 0;
}
@@ -1390,13 +1386,15 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
else
emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
- tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
- if (!code->swap)
- emit_alu(nfp_prog, reg_none(),
- reg_a(reg + 1), carry_op, tmp_reg);
- else
- emit_alu(nfp_prog, reg_none(),
- tmp_reg, carry_op, reg_a(reg + 1));
+ if (is_mbpf_jmp64(meta)) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ if (!code->swap)
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(reg + 1), carry_op, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(),
+ tmp_reg, carry_op, reg_a(reg + 1));
+ }
emit_br(nfp_prog, code->br_mask, insn->off, 0);
@@ -1423,8 +1421,9 @@ static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
- emit_alu(nfp_prog, reg_none(),
- reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
+ if (is_mbpf_jmp64(meta))
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
@@ -1963,6 +1962,9 @@ static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
*/
static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{
+ if (!shift_amt)
+ return 0;
+
if (shift_amt < 32) {
emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1),
SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF,
@@ -2075,6 +2077,9 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
*/
static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{
+ if (!shift_amt)
+ return 0;
+
if (shift_amt < 32) {
emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_DSHF, shift_amt);
@@ -2176,6 +2181,9 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
*/
static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{
+ if (!shift_amt)
+ return 0;
+
if (shift_amt < 32) {
emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_DSHF, shift_amt);
@@ -2309,7 +2317,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
}
static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2327,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
}
static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2337,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
}
static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2347,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
}
static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2357,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
}
static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2384,10 +2392,13 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{
- /* Set signedness bit (MSB of result). */
- emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0));
- emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst),
- SHF_SC_R_SHF, shift_amt);
+ if (shift_amt) {
+ /* Set signedness bit (MSB of result). */
+ emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR,
+ reg_imm(0));
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
+ reg_b(dst), SHF_SC_R_SHF, shift_amt);
+ }
wrp_immed(nfp_prog, reg_both(dst + 1), 0);
return 0;
@@ -2425,18 +2436,75 @@ static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return __ashr_imm(nfp_prog, dst, insn->imm);
}
+static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+{
+ if (shift_amt)
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_R_SHF, shift_amt);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ return 0;
+}
+
+static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ return __shr_imm(nfp_prog, dst, insn->imm);
+}
+
+static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 umin, umax;
+ u8 dst, src;
+
+ dst = insn->dst_reg * 2;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
+ if (umin == umax)
+ return __shr_imm(nfp_prog, dst, umin);
+
+ src = insn->src_reg * 2;
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
+ emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_R_SHF);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ return 0;
+}
+
+static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+{
+ if (shift_amt)
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
+ reg_b(dst), SHF_SC_L_SHF, shift_amt);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ return 0;
+}
+
static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
- if (!insn->imm)
- return 1; /* TODO: zero shift means indirect */
+ return __shl_imm(nfp_prog, dst, insn->imm);
+}
- emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
- reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
- SHF_SC_L_SHF, insn->imm);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 umin, umax;
+ u8 dst, src;
+
+ dst = insn->dst_reg * 2;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
+ if (umin == umax)
+ return __shl_imm(nfp_prog, dst, umin);
+ src = insn->src_reg * 2;
+ shl_reg64_lt32_low(nfp_prog, dst, src);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
return 0;
}
@@ -3048,6 +3116,19 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ swreg tmp_reg;
+
+ tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -3061,9 +3142,10 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
/* Upper word of the mask can only be 0 or ~0 from sign extension,
* so either ignore it or OR the whole thing in.
*/
- if (imm >> 32)
+ if (is_mbpf_jmp64(meta) && imm >> 32) {
emit_alu(nfp_prog, reg_none(),
reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
+ }
emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
@@ -3073,11 +3155,16 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
+ bool is_jmp32 = is_mbpf_jmp32(meta);
swreg tmp_reg;
if (!imm) {
- emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
- ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
+ if (is_jmp32)
+ emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE,
+ reg_b(insn->dst_reg * 2));
+ else
+ emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
+ ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
}
@@ -3087,6 +3174,9 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ if (is_jmp32)
+ return 0;
+
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
emit_alu(nfp_prog, reg_none(),
reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
@@ -3101,10 +3191,13 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
- emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
- ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
- emit_alu(nfp_prog, reg_none(),
- imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
+ if (is_mbpf_jmp64(meta)) {
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR,
+ reg_b(insn->src_reg * 2 + 1));
+ emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR,
+ imm_b(nfp_prog));
+ }
emit_br(nfp_prog, BR_BEQ, insn->off, 0);
return 0;
@@ -3182,7 +3275,7 @@ bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
} else {
ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
- emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1);
+ emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1);
offset_br = nfp_prog_current_offset(nfp_prog);
}
wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
@@ -3321,7 +3414,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_DIV | BPF_X] = div_reg,
[BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
+ [BPF_ALU | BPF_LSH | BPF_X] = shl_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_ALU | BPF_RSH | BPF_X] = shr_reg,
+ [BPF_ALU | BPF_RSH | BPF_K] = shr_imm,
[BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg,
[BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
@@ -3369,6 +3465,28 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm,
+ [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm,
+ [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm,
+ [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm,
+ [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm,
+ [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg,
+ [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg,
+ [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg,
+ [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg,
+ [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
[BPF_JMP | BPF_EXIT] = jmp_exit,
};
@@ -3395,9 +3513,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
int err;
list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
- if (BPF_CLASS(meta->insn.code) != BPF_JMP)
+ if (!is_mbpf_jmp(meta))
continue;
if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
!nfp_is_main_function(meta))
@@ -3439,7 +3557,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
jmp_dst = meta->jmp_dst;
- if (jmp_dst->skip) {
+ if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) {
pr_err("Branch landing on removed instruction!!\n");
return -ELOOP;
}
@@ -3689,7 +3807,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
return nfp_prog->error;
}
- if (meta->skip) {
+ if (meta->flags & FLAG_INSN_SKIP_MASK) {
nfp_prog->n_translated++;
continue;
}
@@ -3737,10 +3855,10 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
/* Programs start with R6 = R1 but we ignore the skb pointer */
if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
insn.src_reg == 1 && insn.dst_reg == 6)
- meta->skip = true;
+ meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
/* Return as soon as something doesn't match */
- if (!meta->skip)
+ if (!(meta->flags & FLAG_INSN_SKIP_MASK))
return;
}
}
@@ -3755,19 +3873,17 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
list_for_each_entry(meta, &nfp_prog->insns, l) {
struct bpf_insn insn = meta->insn;
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
- if (BPF_CLASS(insn.code) != BPF_ALU &&
- BPF_CLASS(insn.code) != BPF_ALU64 &&
- BPF_CLASS(insn.code) != BPF_JMP)
+ if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta))
continue;
if (BPF_SRC(insn.code) != BPF_K)
continue;
if (insn.imm >= 0)
continue;
- if (BPF_CLASS(insn.code) == BPF_JMP) {
+ if (is_mbpf_jmp(meta)) {
switch (BPF_OP(insn.code)) {
case BPF_JGE:
case BPF_JSGE:
@@ -3829,7 +3945,7 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
continue;
- meta2->skip = true;
+ meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
@@ -3869,8 +3985,8 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
meta3->flags & FLAG_INSN_IS_JUMP_DST)
continue;
- meta2->skip = true;
- meta3->skip = true;
+ meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
+ meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
@@ -4065,7 +4181,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
}
head_ld_meta->paired_st = &head_st_meta->insn;
- head_st_meta->skip = true;
+ head_st_meta->flags |=
+ FLAG_INSN_SKIP_PREC_DEPENDENT;
} else {
head_ld_meta->ldst_gather_len = 0;
}
@@ -4098,8 +4215,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
head_ld_meta = meta1;
head_st_meta = meta2;
} else {
- meta1->skip = true;
- meta2->skip = true;
+ meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
+ meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
@@ -4124,7 +4241,7 @@ static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
if (meta->flags & FLAG_INSN_IS_JUMP_DST)
cache_avail = false;
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
insn = &meta->insn;
@@ -4210,7 +4327,7 @@ start_new:
}
list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
+ if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
@@ -4246,7 +4363,8 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
- if (meta1->skip || meta2->skip)
+ if (meta1->flags & FLAG_INSN_SKIP_MASK ||
+ meta2->flags & FLAG_INSN_SKIP_MASK)
continue;
if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
@@ -4325,7 +4443,7 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
return ret;
}
-void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
@@ -4336,7 +4454,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
unsigned int dst_idx;
bool pseudo_call;
- if (BPF_CLASS(code) != BPF_JMP)
+ if (!is_mbpf_jmp(meta))
continue;
if (BPF_OP(code) == BPF_EXIT)
continue;
@@ -4353,7 +4471,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
else
dst_idx = meta->n + 1 + meta->insn.off;
- dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt);
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx);
if (pseudo_call)
dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index dccae0319204..275de9f4c61c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app)
app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
}
- bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops);
+ bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops, bpf);
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
goto err_free_neutral_maps;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 941277936475..b25a48218bcf 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -243,6 +243,16 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_IS_JUMP_DST BIT(0)
#define FLAG_INSN_IS_SUBPROG_START BIT(1)
#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
+/* Instruction is pointless, noop even on its own */
+#define FLAG_INSN_SKIP_NOOP BIT(3)
+/* Instruction is optimized out based on preceding instructions */
+#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
+/* Instruction is optimized by the verifier */
+#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
+
+#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
+ FLAG_INSN_SKIP_PREC_DEPENDENT | \
+ FLAG_INSN_SKIP_VERIFIER_OPT)
/**
* struct nfp_insn_meta - BPF instruction wrapper
@@ -271,7 +281,6 @@ struct nfp_bpf_reg_state {
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
* @subprog_idx: index of subprogram to which the instruction belongs
- * @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
*/
@@ -319,7 +328,6 @@ struct nfp_insn_meta {
unsigned short n;
unsigned short flags;
unsigned short subprog_idx;
- bool skip;
instr_cb_t double_cb;
struct list_head l;
@@ -357,6 +365,21 @@ static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
}
+static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_JMP32;
+}
+
+static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_JMP;
+}
+
+static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
+{
+ return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
+}
+
static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
@@ -407,6 +430,20 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
+static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
+{
+ u8 op;
+
+ if (is_mbpf_jmp32(meta))
+ return true;
+
+ if (!is_mbpf_jmp64(meta))
+ return false;
+
+ op = mbpf_op(meta);
+ return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
+}
+
static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
{
struct bpf_insn insn = meta->insn;
@@ -457,6 +494,7 @@ struct nfp_bpf_subprog_info {
* @subprog_cnt: number of sub-programs, including main function
* @map_records: the map record pointers from bpf->maps_neutral
* @subprog: pointer to an array of objects holding info about sub-programs
+ * @n_insns: number of instructions on @insns list
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -489,6 +527,7 @@ struct nfp_prog {
struct nfp_bpf_neutral_map **map_records;
struct nfp_bpf_subprog_info *subprog;
+ unsigned int n_insns;
struct list_head insns;
};
@@ -505,7 +544,7 @@ struct nfp_bpf_vnic {
};
bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
-void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
@@ -513,6 +552,10 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
int nfp_bpf_finalize(struct bpf_verifier_env *env);
+int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
+
extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf;
@@ -526,7 +569,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- unsigned int insn_idx, unsigned int n_insns);
+ unsigned int insn_idx);
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index f0283854fade..15dce97650a5 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -163,8 +163,9 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns);
}
+ nfp_prog->n_insns = cnt;
- nfp_bpf_jit_prepare(nfp_prog, cnt);
+ nfp_bpf_jit_prepare(nfp_prog);
return 0;
}
@@ -184,8 +185,6 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
{
- struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
- struct nfp_app *app = nn->app;
struct nfp_prog *nfp_prog;
int ret;
@@ -196,7 +195,7 @@ static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
- nfp_prog->bpf = app->priv;
+ nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev);
ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
if (ret)
@@ -219,6 +218,10 @@ static int nfp_bpf_translate(struct bpf_prog *prog)
unsigned int max_instr;
int err;
+ /* We depend on dead code elimination succeeding */
+ if (prog->aux->offload->opt_failed)
+ return -EINVAL;
+
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
@@ -591,6 +594,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
.insn_hook = nfp_verify_insn,
.finalize = nfp_bpf_finalize,
+ .replace_insn = nfp_bpf_opt_replace_insn,
+ .remove_insns = nfp_bpf_opt_remove_insns,
.prepare = nfp_bpf_verifier_prep,
.translate = nfp_bpf_translate,
.destroy = nfp_bpf_destroy,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 337bb862ec1d..36f56eb4cbe2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -18,15 +18,15 @@
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- unsigned int insn_idx, unsigned int n_insns)
+ unsigned int insn_idx)
{
unsigned int forward, backward, i;
backward = meta->n - insn_idx;
forward = insn_idx - meta->n;
- if (min(forward, backward) > n_insns - insn_idx - 1) {
- backward = n_insns - insn_idx - 1;
+ if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) {
+ backward = nfp_prog->n_insns - insn_idx - 1;
meta = nfp_prog_last_meta(nfp_prog);
}
if (min(forward, backward) > insn_idx && backward > insn_idx) {
@@ -629,7 +629,7 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
- meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx);
nfp_prog->verifier_meta = meta;
if (!nfp_bpf_supported_opcode(meta->insn.code)) {
@@ -690,8 +690,7 @@ nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
return 0;
}
-static unsigned int
-nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
+static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
unsigned int max_depth = 0, depth = 0, frame = 0;
@@ -726,7 +725,7 @@ continue_subprog:
/* Find the callee and start processing it. */
meta = nfp_bpf_goto_meta(nfp_prog, meta,
- meta->n + 1 + meta->insn.imm, cnt);
+ meta->n + 1 + meta->insn.imm);
idx = meta->subprog_idx;
frame++;
goto process_subprog;
@@ -778,8 +777,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
nn = netdev_priv(env->prog->aux->offload->netdev);
max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
- nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog,
- env->prog->len);
+ nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog);
if (nfp_prog->stack_size > max_stack) {
pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
nfp_prog->stack_size, max_stack);
@@ -788,3 +786,61 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
+
+int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn)
+{
+ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
+
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
+ nfp_prog->verifier_meta = meta;
+
+ /* conditional jump to jump conversion */
+ if (is_mbpf_cond_jump(meta) &&
+ insn->code == (BPF_JMP | BPF_JA | BPF_K)) {
+ unsigned int tgt_off;
+
+ tgt_off = off + insn->off + 1;
+
+ if (!insn->off) {
+ meta->jmp_dst = list_next_entry(meta, l);
+ meta->jump_neg_op = false;
+ } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) {
+ pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n",
+ off, meta->jmp_dst->n,
+ aux_data[tgt_off].orig_idx);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n",
+ meta->insn.code, insn->code);
+ return -EINVAL;
+}
+
+int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
+{
+ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
+ unsigned int i;
+
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
+
+ for (i = 0; i < cnt; i++) {
+ if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns))
+ return -EINVAL;
+
+ /* doesn't count if it already has the flag */
+ if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT)
+ i--;
+
+ meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT;
+ meta = list_next_entry(meta, l);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 8d54b36afee8..eeda4ed98333 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -3,7 +3,6 @@
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
-#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
@@ -37,7 +36,7 @@ static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
static void
nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
- const struct tc_action *action)
+ const struct flow_action_entry *act)
{
size_t act_size = sizeof(struct nfp_fl_push_vlan);
u16 tmp_push_vlan_tci;
@@ -45,17 +44,17 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
push_vlan->reserved = 0;
- push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
+ push_vlan->vlan_tpid = act->vlan.proto;
tmp_push_vlan_tci =
- FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
NFP_FL_PUSH_VLAN_CFI;
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
static int
-nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
+nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_flow, int act_len)
{
size_t act_size = sizeof(struct nfp_fl_pre_lag);
@@ -63,7 +62,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
struct net_device *out_dev;
int err;
- out_dev = tcf_mirred_dev(action);
+ out_dev = act->dev;
if (!out_dev || !netif_is_lag_master(out_dev))
return 0;
@@ -92,7 +91,8 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
- const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
+ const struct flow_action_entry *act,
+ struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
{
@@ -104,7 +104,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- out_dev = tcf_mirred_dev(action);
+ out_dev = act->dev;
if (!out_dev)
return -EOPNOTSUPP;
@@ -137,7 +137,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
if (nfp_netdev_is_nfp_repr(in_dev)) {
/* Confirm ingress and egress are on same device. */
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ if (!netdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
}
@@ -155,9 +155,9 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
- const struct tc_action *action)
+ const struct flow_action_entry *act)
{
- struct ip_tunnel_info *tun = tcf_tunnel_info(action);
+ const struct ip_tunnel_info *tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
switch (tun->key.tp_dst) {
@@ -195,9 +195,9 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
static int
nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
- const struct tc_action *action)
+ const struct flow_action_entry *act)
{
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
int opt_len, opt_cnt, act_start, tot_push_len;
u8 *src = ip_tunnel_info_opts(ip_tun);
@@ -259,13 +259,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
static int
nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
struct nfp_fl_set_ipv4_udp_tun *set_tun,
- const struct tc_action *action,
+ const struct flow_action_entry *act,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type,
struct net_device *netdev)
{
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ const struct ip_tunnel_info *ip_tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
@@ -345,7 +345,7 @@ static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
}
static int
-nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_eth *set_eth)
{
u32 exact, mask;
@@ -353,8 +353,8 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
if (off + 4 > ETH_ALEN * 2)
return -EOPNOTSUPP;
- mask = ~tcf_pedit_mask(action, idx);
- exact = tcf_pedit_val(action, idx);
+ mask = ~act->mangle.mask;
+ exact = act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -376,7 +376,7 @@ struct ipv4_ttl_word {
};
static int
-nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ip4_addrs *set_ip_addr,
struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
{
@@ -387,8 +387,8 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
__be32 exact, mask;
/* We are expecting tcf_pedit to return a big endian value */
- mask = (__force __be32)~tcf_pedit_mask(action, idx);
- exact = (__force __be32)tcf_pedit_val(action, idx);
+ mask = (__force __be32)~act->mangle.mask;
+ exact = (__force __be32)act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -505,7 +505,7 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
}
static int
-nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
struct nfp_fl_set_ipv6_addr *ip_src,
struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
@@ -515,8 +515,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
u8 word;
/* We are expecting tcf_pedit to return a big endian value */
- mask = (__force __be32)~tcf_pedit_mask(action, idx);
- exact = (__force __be32)tcf_pedit_val(action, idx);
+ mask = (__force __be32)~act->mangle.mask;
+ exact = (__force __be32)act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -541,7 +541,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
}
static int
-nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_tport *set_tport, int opcode)
{
u32 exact, mask;
@@ -549,8 +549,8 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
if (off)
return -EOPNOTSUPP;
- mask = ~tcf_pedit_mask(action, idx);
- exact = tcf_pedit_val(action, idx);
+ mask = ~act->mangle.mask;
+ exact = act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -584,20 +584,22 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
}
static int
-nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
+nfp_fl_pedit(const struct flow_action_entry *act,
+ struct tc_cls_flower_offload *flow,
char *nfp_action, int *a_len, u32 *csum_updated)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
+ enum flow_action_mangle_base htype;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
- enum pedit_header_type htype;
- int idx, nkeys, err;
size_t act_size = 0;
- u32 offset, cmd;
u8 ip_proto = 0;
+ u32 offset;
+ int err;
memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
@@ -606,50 +608,41 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
memset(&set_ip_addr, 0, sizeof(set_ip_addr));
memset(&set_tport, 0, sizeof(set_tport));
memset(&set_eth, 0, sizeof(set_eth));
- nkeys = tcf_pedit_nkeys(action);
-
- for (idx = 0; idx < nkeys; idx++) {
- cmd = tcf_pedit_cmd(action, idx);
- htype = tcf_pedit_htype(action, idx);
- offset = tcf_pedit_offset(action, idx);
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
- return -EOPNOTSUPP;
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- err = nfp_fl_set_eth(action, idx, offset, &set_eth);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
- &set_ip_ttl_tos);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
- &set_ip6_src, &set_ip6_tc_hl_fl);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- err = nfp_fl_set_tport(action, idx, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- err = nfp_fl_set_tport(action, idx, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (err)
- return err;
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ err = nfp_fl_set_eth(act, offset, &set_eth);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
+ &set_ip_ttl_tos);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
+ &set_ip6_src, &set_ip6_tc_hl_fl);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ err = nfp_fl_set_tport(act, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ err = nfp_fl_set_tport(act, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+ if (err)
+ return err;
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *basic;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
- ip_proto = basic->ip_proto;
+ flow_rule_match_basic(rule, &match);
+ ip_proto = match.key->ip_proto;
}
if (set_eth.head.len_lw) {
@@ -733,7 +726,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
}
static int
-nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
+nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -753,7 +746,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
+ err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
tun_out_cnt);
if (err)
return err;
@@ -764,7 +757,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
- prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
+ prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len);
if (prelag_size < 0)
return prelag_size;
else if (prelag_size > 0 && (!last || *out_cnt))
@@ -778,7 +771,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
}
static int
-nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
+nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
@@ -791,23 +784,25 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_pop_vlan *pop_v;
int err;
- if (is_tcf_gact_shot(a)) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
- } else if (is_tcf_mirred_egress_redirect(a)) {
- err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
+ break;
+ case FLOW_ACTION_REDIRECT:
+ err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
out_cnt, csum_updated);
if (err)
return err;
-
- } else if (is_tcf_mirred_egress_mirror(a)) {
- err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
+ break;
+ case FLOW_ACTION_MIRRED:
+ err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
out_cnt, csum_updated);
if (err)
return err;
-
- } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ break;
+ case FLOW_ACTION_VLAN_POP:
if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
@@ -816,19 +811,21 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl_pop_vlan(pop_v);
*a_len += sizeof(struct nfp_fl_pop_vlan);
- } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
- nfp_fl_push_vlan(psh_v, a);
+ nfp_fl_push_vlan(psh_v, act);
*a_len += sizeof(struct nfp_fl_push_vlan);
- } else if (is_tcf_tunnel_set(a)) {
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
+ break;
+ case FLOW_ACTION_TUNNEL_ENCAP: {
+ const struct ip_tunnel_info *ip_tun = act->tunnel;
- *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
@@ -847,32 +844,36 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, act);
if (err)
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
+ err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
*tun_type, netdev);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
- } else if (is_tcf_tunnel_release(a)) {
+ }
+ break;
+ case FLOW_ACTION_TUNNEL_DECAP:
/* Tunnel decap is handled by default so accept action. */
return 0;
- } else if (is_tcf_pedit(a)) {
- if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
+ case FLOW_ACTION_MANGLE:
+ if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
a_len, csum_updated))
return -EOPNOTSUPP;
- } else if (is_tcf_csum(a)) {
+ break;
+ case FLOW_ACTION_CSUM:
/* csum action requests recalc of something we have not fixed */
- if (tcf_csum_update_flags(a) & ~*csum_updated)
+ if (act->csum_flags & ~*csum_updated)
return -EOPNOTSUPP;
/* If we will correctly fix the csum we can remove it from the
* csum update list. Which will later be used to check support.
*/
- *csum_updated &= ~tcf_csum_update_flags(a);
- } else {
+ *csum_updated &= ~act->csum_flags;
+ break;
+ default:
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
}
@@ -887,7 +888,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
enum nfp_flower_tun_type tun_type;
- const struct tc_action *a;
+ struct flow_action_entry *act;
u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -898,8 +899,8 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0;
out_cnt = 0;
- tcf_exts_for_each_action(i, a, flow->exts) {
- err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
+ flow_action_for_each(i, act, &flow->rule->action) {
+ err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated);
if (err)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 4c5eaf36d5bb..cf9e1118ee8f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -45,11 +45,9 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
{
struct nfp_flower_cmsg_mac_repr *msg;
struct sk_buff *skb;
- unsigned int size;
- size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
- skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
- GFP_KERNEL);
+ skb = nfp_flower_cmsg_alloc(app, struct_size(msg, ports, num_ports),
+ NFP_FLOWER_CMSG_TYPE_MAC_REPR, GFP_KERNEL);
if (!skb)
return NULL;
@@ -203,7 +201,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
}
atomic_inc(&priv->reify_replies);
- wake_up_interruptible(&priv->reify_wait_queue);
+ wake_up(&priv->reify_wait_queue);
}
static void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 15f41cfef9f1..4fcaf11ed56e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -97,6 +97,9 @@
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
+/* Cmesg reply (empirical) timeout*/
+#define NFP_FL_REPLY_TIMEOUT msecs_to_jiffies(40)
+
#define nfp_flower_cmsg_warn(app, fmt, args...) \
do { \
if (net_ratelimit()) \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 5059110a1768..408089133599 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,71 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
return DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
+static struct nfp_flower_non_repr_priv *
+nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_non_repr_priv *entry;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(entry, &priv->non_repr_priv, list)
+ if (entry->netdev == netdev)
+ return entry;
+
+ return NULL;
+}
+
+void
+__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
+{
+ non_repr_priv->ref_count++;
+}
+
+struct nfp_flower_non_repr_priv *
+nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_non_repr_priv *entry;
+
+ entry = nfp_flower_non_repr_priv_lookup(app, netdev);
+ if (entry)
+ goto inc_ref;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->netdev = netdev;
+ list_add(&entry->list, &priv->non_repr_priv);
+
+inc_ref:
+ __nfp_flower_non_repr_priv_get(entry);
+ return entry;
+}
+
+void
+__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
+{
+ if (--non_repr_priv->ref_count)
+ return;
+
+ list_del(&non_repr_priv->list);
+ kfree(non_repr_priv);
+}
+
+void
+nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_non_repr_priv *entry;
+
+ entry = nfp_flower_non_repr_priv_lookup(app, netdev);
+ if (!entry)
+ return;
+
+ __nfp_flower_non_repr_priv_put(entry);
+}
+
static enum nfp_repr_type
nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
{
@@ -107,16 +172,14 @@ static int
nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
{
struct nfp_flower_priv *priv = app->priv;
- int err;
if (!tot_repl)
return 0;
lockdep_assert_held(&app->pf->lock);
- err = wait_event_interruptible_timeout(priv->reify_wait_queue,
- atomic_read(replies) >= tot_repl,
- msecs_to_jiffies(10));
- if (err <= 0) {
+ if (!wait_event_timeout(priv->reify_wait_queue,
+ atomic_read(replies) >= tot_repl,
+ NFP_FL_REPLY_TIMEOUT)) {
nfp_warn(app->cpp, "Not all reprs responded to reify\n");
return -EIO;
}
@@ -223,6 +286,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv;
+ repr_priv->nfp_repr = nfp_repr;
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
@@ -337,6 +401,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv;
+ repr_priv->nfp_repr = nfp_repr;
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
@@ -476,8 +541,8 @@ err_clear_nn:
static int nfp_flower_init(struct nfp_app *app)
{
+ u64 version, features, ctx_count, num_mems;
const struct nfp_pf *pf = app->pf;
- u64 version, features, ctx_count;
struct nfp_flower_priv *app_priv;
int err;
@@ -502,6 +567,23 @@ static int nfp_flower_init(struct nfp_app *app)
return err;
}
+ num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
+ &err);
+ if (err) {
+ nfp_warn(app->cpp,
+ "FlowerNIC: unsupported host context memory: %d\n",
+ err);
+ err = 0;
+ num_mems = 1;
+ }
+
+ if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
+ nfp_warn(app->cpp,
+ "FlowerNIC: invalid host context memory: %llu\n",
+ num_mems);
+ return -EINVAL;
+ }
+
ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
&err);
if (err) {
@@ -522,6 +604,8 @@ static int nfp_flower_init(struct nfp_app *app)
if (!app_priv)
return -ENOMEM;
+ app_priv->total_mem_units = num_mems;
+ app_priv->active_mem_unit = 0;
app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
app->priv = app_priv;
app_priv->app = app;
@@ -533,7 +617,7 @@ static int nfp_flower_init(struct nfp_app *app)
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
spin_lock_init(&app_priv->mtu_conf.lock);
- err = nfp_flower_metadata_init(app, ctx_count);
+ err = nfp_flower_metadata_init(app, ctx_count, num_mems);
if (err)
goto err_free_app_priv;
@@ -558,6 +642,7 @@ static int nfp_flower_init(struct nfp_app *app)
}
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
+ INIT_LIST_HEAD(&app_priv->non_repr_priv);
return 0;
@@ -601,7 +686,7 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
{
struct nfp_flower_priv *app_priv = app->priv;
struct nfp_repr *repr = netdev_priv(netdev);
- int err, ack;
+ int err;
/* Only need to config FW for physical port MTU change. */
if (repr->port->type != NFP_PORT_PHYS_PORT)
@@ -628,11 +713,9 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
}
/* Wait for fw to ack the change. */
- ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
- nfp_flower_check_ack(app_priv),
- msecs_to_jiffies(10));
-
- if (!ack) {
+ if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
+ nfp_flower_check_ack(app_priv),
+ NFP_FL_REPLY_TIMEOUT)) {
spin_lock_bh(&app_priv->mtu_conf.lock);
app_priv->mtu_conf.requested_val = 0;
spin_unlock_bh(&app_priv->mtu_conf.lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index b858bac47621..c0945a5fd1a4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -20,6 +20,9 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
+#define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22)
+#define NFP_FL_STAT_ID_STAT GENMASK(21, 0)
+
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
@@ -54,6 +57,26 @@ struct nfp_fl_stats_id {
};
/**
+ * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
+ * @offloaded_macs: Hashtable of the offloaded MAC addresses
+ * @ipv4_off_list: List of IPv4 addresses to offload
+ * @neigh_off_list: List of neighbour offloads
+ * @ipv4_off_lock: Lock for the IPv4 address list
+ * @neigh_off_lock: Lock for the neighbour address list
+ * @mac_off_ids: IDA to manage id assignment for offloaded MACs
+ * @neigh_nb: Notifier to monitor neighbour state
+ */
+struct nfp_fl_tunnel_offloads {
+ struct rhashtable offloaded_macs;
+ struct list_head ipv4_off_list;
+ struct list_head neigh_off_list;
+ struct mutex ipv4_off_lock;
+ spinlock_t neigh_off_lock;
+ struct ida mac_off_ids;
+ struct notifier_block neigh_nb;
+};
+
+/**
* struct nfp_mtu_conf - manage MTU setting
* @portnum: NFP port number of repr with requested MTU change
* @requested_val: MTU value requested for repr
@@ -113,23 +136,16 @@ struct nfp_fl_lag {
* processing
* @cmsg_skbs_low: List of lower priority skbs for control message
* processing
- * @nfp_mac_off_list: List of MAC addresses to offload
- * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
- * @nfp_ipv4_off_list: List of IPv4 addresses to offload
- * @nfp_neigh_off_list: List of neighbour offloads
- * @nfp_mac_off_lock: Lock for the MAC address list
- * @nfp_mac_index_lock: Lock for the MAC index list
- * @nfp_ipv4_off_lock: Lock for the IPv4 address list
- * @nfp_neigh_off_lock: Lock for the neighbour address list
- * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
- * @nfp_mac_off_count: Number of MACs in address list
- * @nfp_tun_neigh_nb: Notifier to monitor neighbour state
+ * @tun: Tunnel offload data
* @reify_replies: atomically stores the number of replies received
* from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
* @indr_block_cb_priv: List of priv data passed to indirect block cbs
+ * @non_repr_priv: List of offloaded non-repr ports and their priv data
+ * @active_mem_unit: Current active memory unit for flower rules
+ * @total_mem_units: Total number of available memory units for flower rules
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -147,30 +163,47 @@ struct nfp_flower_priv {
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
- struct list_head nfp_mac_off_list;
- struct list_head nfp_mac_index_list;
- struct list_head nfp_ipv4_off_list;
- struct list_head nfp_neigh_off_list;
- struct mutex nfp_mac_off_lock;
- struct mutex nfp_mac_index_lock;
- struct mutex nfp_ipv4_off_lock;
- spinlock_t nfp_neigh_off_lock;
- struct ida nfp_mac_off_ids;
- int nfp_mac_off_count;
- struct notifier_block nfp_tun_neigh_nb;
+ struct nfp_fl_tunnel_offloads tun;
atomic_t reify_replies;
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
struct list_head indr_block_cb_priv;
+ struct list_head non_repr_priv;
+ unsigned int active_mem_unit;
+ unsigned int total_mem_units;
};
/**
* struct nfp_flower_repr_priv - Flower APP per-repr priv data
+ * @nfp_repr: Back pointer to nfp_repr
* @lag_port_flags: Extended port flags to record lag state of repr
+ * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
+ * @offloaded_mac_addr: MAC address that has been offloaded for repr
+ * @mac_list: List entry of reprs that share the same offloaded MAC
*/
struct nfp_flower_repr_priv {
+ struct nfp_repr *nfp_repr;
unsigned long lag_port_flags;
+ bool mac_offloaded;
+ u8 offloaded_mac_addr[ETH_ALEN];
+ struct list_head mac_list;
+};
+
+/**
+ * struct nfp_flower_non_repr_priv - Priv data for non-repr offloaded ports
+ * @list: List entry of offloaded reprs
+ * @netdev: Pointer to non-repr net_device
+ * @ref_count: Number of references held for this priv data
+ * @mac_offloaded: Flag indicating a MAC address is offloaded for device
+ * @offloaded_mac_addr: MAC address that has been offloaded for dev
+ */
+struct nfp_flower_non_repr_priv {
+ struct list_head list;
+ struct net_device *netdev;
+ int ref_count;
+ bool mac_offloaded;
+ u8 offloaded_mac_addr[ETH_ALEN];
};
struct nfp_fl_key_ls {
@@ -217,7 +250,8 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
-int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count);
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
@@ -252,7 +286,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app);
int nfp_tunnel_mac_event_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event, void *ptr);
-void nfp_tunnel_write_macs(struct nfp_app *app);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
@@ -273,4 +306,12 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event);
+void
+__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
+struct nfp_flower_non_repr_priv *
+nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev);
+void
+__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
+void
+nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index cdf75595f627..e03c8ef2c28c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -8,31 +8,41 @@
#include "main.h"
static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
- struct tc_cls_flower_offload *flow, u8 key_type,
- bool mask_version)
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct tc_cls_flower_offload *flow, u8 key_type)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_vlan *flow_vlan;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
u16 tmp_tci;
- memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
+ memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
+ memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
+
/* Populate the metadata frame. */
- frame->nfp_flow_key_layer = key_type;
- frame->mask_id = ~0;
+ ext->nfp_flow_key_layer = key_type;
+ ext->mask_id = ~0;
+
+ msk->nfp_flow_key_layer = key_type;
+ msk->mask_id = ~0;
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- flow_vlan = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- target);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
+ if (match.key->vlan_id || match.key->vlan_priority) {
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- flow_vlan->vlan_priority) |
+ match.key->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- flow_vlan->vlan_id) |
+ match.key->vlan_id) |
NFP_FLOWER_MASK_VLAN_CFI;
- frame->tci = cpu_to_be16(tmp_tci);
+ ext->tci = cpu_to_be16(tmp_tci);
+ tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.mask->vlan_id) |
+ NFP_FLOWER_MASK_VLAN_CFI;
+ msk->tci = cpu_to_be16(tmp_tci);
}
}
}
@@ -64,231 +74,249 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
}
static void
-nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_eth_addrs *addr;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+
+ memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
+ memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
- memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- addr = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- target);
+ flow_rule_match_eth_addrs(rule, &match);
/* Populate mac frame. */
- ether_addr_copy(frame->mac_dst, &addr->dst[0]);
- ether_addr_copy(frame->mac_src, &addr->src[0]);
+ ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
+ ether_addr_copy(ext->mac_src, &match.key->src[0]);
+ ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
+ ether_addr_copy(msk->mac_src, &match.mask->src[0]);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
- struct flow_dissector_key_mpls *mpls;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match;
u32 t_mpls;
- mpls = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_MPLS,
- target);
-
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
+ flow_rule_match_mpls(rule, &match);
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
-
- frame->mpls_lse = cpu_to_be32(t_mpls);
- } else if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC)) {
+ ext->mpls_lse = cpu_to_be32(t_mpls);
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+ msk->mpls_lse = cpu_to_be32(t_mpls);
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
* bit, which indicates an mpls ether type but without any
* mpls fields.
*/
- struct flow_dissector_key_basic *key_basic;
-
- key_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
- if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
- key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
- frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+ match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
+ ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ }
}
}
static void
-nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
+ struct nfp_flower_tp_ports *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ports *tp;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+
+ memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
+ memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
- memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- tp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- target);
- frame->port_src = tp->src;
- frame->port_dst = tp->dst;
+ flow_rule_match_ports(rule, &match);
+ ext->port_src = match.key->src;
+ ext->port_dst = match.key->dst;
+ msk->port_src = match.mask->src;
+ msk->port_dst = match.mask->dst;
}
}
static void
-nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
+ struct nfp_flower_ip_ext *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *basic;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
- frame->proto = basic->ip_proto;
+ flow_rule_match_basic(rule, &match);
+ ext->proto = match.key->ip_proto;
+ msk->proto = match.mask->ip_proto;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *flow_ip;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
- flow_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- target);
- frame->tos = flow_ip->tos;
- frame->ttl = flow_ip->ttl;
+ flow_rule_match_ip(rule, &match);
+ ext->tos = match.key->tos;
+ ext->ttl = match.key->ttl;
+ msk->tos = match.mask->tos;
+ msk->ttl = match.mask->ttl;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
- struct flow_dissector_key_tcp *tcp;
- u32 tcp_flags;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ u16 tcp_flags, tcp_flags_mask;
+ struct flow_match_tcp match;
- tcp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_TCP, target);
- tcp_flags = be16_to_cpu(tcp->flags);
+ flow_rule_match_tcp(rule, &match);
+ tcp_flags = be16_to_cpu(match.key->flags);
+ tcp_flags_mask = be16_to_cpu(match.mask->flags);
if (tcp_flags & TCPHDR_FIN)
- frame->flags |= NFP_FL_TCP_FLAG_FIN;
+ ext->flags |= NFP_FL_TCP_FLAG_FIN;
+ if (tcp_flags_mask & TCPHDR_FIN)
+ msk->flags |= NFP_FL_TCP_FLAG_FIN;
+
if (tcp_flags & TCPHDR_SYN)
- frame->flags |= NFP_FL_TCP_FLAG_SYN;
+ ext->flags |= NFP_FL_TCP_FLAG_SYN;
+ if (tcp_flags_mask & TCPHDR_SYN)
+ msk->flags |= NFP_FL_TCP_FLAG_SYN;
+
if (tcp_flags & TCPHDR_RST)
- frame->flags |= NFP_FL_TCP_FLAG_RST;
+ ext->flags |= NFP_FL_TCP_FLAG_RST;
+ if (tcp_flags_mask & TCPHDR_RST)
+ msk->flags |= NFP_FL_TCP_FLAG_RST;
+
if (tcp_flags & TCPHDR_PSH)
- frame->flags |= NFP_FL_TCP_FLAG_PSH;
+ ext->flags |= NFP_FL_TCP_FLAG_PSH;
+ if (tcp_flags_mask & TCPHDR_PSH)
+ msk->flags |= NFP_FL_TCP_FLAG_PSH;
+
if (tcp_flags & TCPHDR_URG)
- frame->flags |= NFP_FL_TCP_FLAG_URG;
+ ext->flags |= NFP_FL_TCP_FLAG_URG;
+ if (tcp_flags_mask & TCPHDR_URG)
+ msk->flags |= NFP_FL_TCP_FLAG_URG;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key;
-
- key = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- target);
- if (key->flags & FLOW_DIS_IS_FRAGMENT)
- frame->flags |= NFP_FL_IP_FRAGMENTED;
- if (key->flags & FLOW_DIS_FIRST_FRAG)
- frame->flags |= NFP_FL_IP_FRAG_FIRST;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
+ ext->flags |= NFP_FL_IP_FRAGMENTED;
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
+ msk->flags |= NFP_FL_IP_FRAGMENTED;
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG)
+ ext->flags |= NFP_FL_IP_FRAG_FIRST;
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
+ msk->flags |= NFP_FL_IP_FRAG_FIRST;
}
}
static void
-nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
+ struct nfp_flower_ipv4 *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *addr;
-
- memset(frame, 0, sizeof(struct nfp_flower_ipv4));
-
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- addr = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- target);
- frame->ipv4_src = addr->src;
- frame->ipv4_dst = addr->dst;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_match_ipv4_addrs match;
+
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ flow_rule_match_ipv4_addrs(rule, &match);
+ ext->ipv4_src = match.key->src;
+ ext->ipv4_dst = match.key->dst;
+ msk->ipv4_src = match.mask->src;
+ msk->ipv4_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
static void
-nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
+ struct nfp_flower_ipv6 *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv6_addrs *addr;
-
- memset(frame, 0, sizeof(struct nfp_flower_ipv6));
-
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- addr = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- target);
- frame->ipv6_src = addr->src;
- frame->ipv6_dst = addr->dst;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ ext->ipv6_src = match.key->src;
+ ext->ipv6_dst = match.key->dst;
+ msk->ipv6_src = match.mask->src;
+ msk->ipv6_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
static int
-nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_geneve_opt(void *ext, void *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_enc_opts *opts;
+ struct flow_match_enc_opts match;
- opts = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_OPTS,
- target);
- memcpy(key_buf, opts->data, opts->len);
+ flow_rule_match_enc_opts(flow->rule, &match);
+ memcpy(ext, match.key->data, match.key->len);
+ memcpy(msk, match.mask->data, match.mask->len);
return 0;
}
static void
-nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
+ struct nfp_flower_ipv4_udp_tun *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *tun_ips;
- struct flow_dissector_key_keyid *vni;
- struct flow_dissector_key_ip *ip;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
- memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
u32 temp_vni;
- vni = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- target);
- temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
- frame->tun_id = cpu_to_be32(temp_vni);
+ flow_rule_match_enc_keyid(rule, &match);
+ temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ ext->tun_id = cpu_to_be32(temp_vni);
+ temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ msk->tun_id = cpu_to_be32(temp_vni);
}
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- tun_ips =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- target);
- frame->ip_src = tun_ips->src;
- frame->ip_dst = tun_ips->dst;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ ext->ip_src = match.key->src;
+ ext->ip_dst = match.key->dst;
+ msk->ip_src = match.mask->src;
+ msk->ip_dst = match.mask->dst;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
- ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IP,
- target);
- frame->tos = ip->tos;
- frame->ttl = ip->ttl;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ ext->tos = match.key->tos;
+ ext->ttl = match.key->ttl;
+ msk->tos = match.mask->tos;
+ msk->ttl = match.mask->ttl;
}
}
@@ -313,12 +341,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
- /* Populate Exact Metadata. */
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
- flow, key_ls->key_layer, false);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
- flow, key_ls->key_layer, true);
+ (struct nfp_flower_meta_tci *)msk,
+ flow, key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -348,45 +373,33 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
- /* Populate Exact MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
- flow, false);
- /* Populate Mask MAC Data. */
- nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
- flow, true);
+ (struct nfp_flower_mac_mpls *)msk,
+ flow);
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
- /* Populate Exact TP Data. */
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
- flow, false);
- /* Populate Mask TP Data. */
- nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
- flow, true);
+ (struct nfp_flower_tp_ports *)msk,
+ flow);
ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports);
}
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
- /* Populate Exact IPv4 Data. */
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
- flow, false);
- /* Populate Mask IPv4 Data. */
- nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
- flow, true);
+ (struct nfp_flower_ipv4 *)msk,
+ flow);
ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4);
}
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
- /* Populate Exact IPv4 Data. */
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
- flow, false);
- /* Populate Mask IPv4 Data. */
- nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
- flow, true);
+ (struct nfp_flower_ipv6 *)msk,
+ flow);
ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6);
}
@@ -395,17 +408,11 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst;
- /* Populate Exact VXLAN Data. */
- nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
- /* Populate Mask VXLAN Data. */
- nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+ nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
- /* Configure tunnel end point MAC. */
- nfp_tunnel_write_macs(app);
-
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
@@ -413,11 +420,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, flow, false);
- if (err)
- return err;
-
- err = nfp_flower_compile_geneve_opt(msk, flow, true);
+ err = nfp_flower_compile_geneve_opt(ext, msk, flow);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 573a4400a26c..492837b852b6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -4,6 +4,7 @@
#include <linux/hash.h>
#include <linux/hashtable.h>
#include <linux/jhash.h>
+#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
@@ -52,8 +53,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
- *stats_context_id = priv->stats_ids.init_unalloc - 1;
- priv->stats_ids.init_unalloc--;
+ if (priv->active_mem_unit == priv->total_mem_units) {
+ priv->stats_ids.init_unalloc--;
+ priv->active_mem_unit = 0;
+ }
+
+ *stats_context_id =
+ FIELD_PREP(NFP_FL_STAT_ID_STAT,
+ priv->stats_ids.init_unalloc - 1) |
+ FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
+ priv->active_mem_unit);
+ priv->active_mem_unit++;
return 0;
}
@@ -381,10 +391,11 @@ const struct rhashtable_params nfp_flower_table_params = {
.automatic_shrinking = true,
};
-int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ unsigned int host_num_mems)
{
struct nfp_flower_priv *priv = app->priv;
- int err;
+ int err, stats_size;
hash_init(priv->mask_table);
@@ -417,10 +428,12 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
- priv->stats_ids.init_unalloc = host_ctx_count;
+ priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
- priv->stats = kvmalloc_array(priv->stats_ring_size,
- sizeof(struct nfp_fl_stats), GFP_KERNEL);
+ stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
+ FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
+ priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
+ GFP_KERNEL);
if (!priv->stats)
goto err_free_ring_buf;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2cdbf29ecbe7..450d7296fd57 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -102,23 +102,22 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
{
- return dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
- dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
- dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS) ||
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+
+ return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
static int
-nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
u32 *key_layer_two, int *key_size)
{
- if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
return -EOPNOTSUPP;
- if (enc_opts->len > 0) {
+ if (enc_opts->key->len > 0) {
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
*key_size += sizeof(struct nfp_flower_geneve_options);
}
@@ -133,20 +132,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
enum nfp_flower_tun_type *tun_type)
{
- struct flow_dissector_key_basic *mask_basic = NULL;
- struct flow_dissector_key_basic *key_basic = NULL;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two;
u8 key_layer;
int key_size;
int err;
- if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
+ if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
/* If any tun dissector is used then the required set must be used. */
- if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
- (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
return -EOPNOTSUPP;
@@ -155,76 +155,52 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size = sizeof(struct nfp_flower_meta_tci) +
sizeof(struct nfp_flower_in_port);
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
- dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
key_layer |= NFP_FLOWER_LAYER_MAC;
key_size += sizeof(struct nfp_flower_mac_mpls);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *flow_vlan;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan vlan;
- flow_vlan = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- flow->mask);
+ flow_rule_match_vlan(rule, &vlan);
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
- flow_vlan->vlan_priority)
+ vlan.key->vlan_priority)
return -EOPNOTSUPP;
}
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
- struct flow_dissector_key_ports *mask_enc_ports = NULL;
- struct flow_dissector_key_enc_opts *enc_op = NULL;
- struct flow_dissector_key_ports *enc_ports = NULL;
- struct flow_dissector_key_control *mask_enc_ctl =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- flow->mask);
- struct flow_dissector_key_control *enc_ctl =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- flow->key);
-
- if (mask_enc_ctl->addr_type != 0xffff ||
- enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_enc_opts enc_op = { NULL, NULL };
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_control enc_ctl;
+ struct flow_match_ports enc_ports;
+
+ flow_rule_match_enc_control(rule, &enc_ctl);
+
+ if (enc_ctl.mask->addr_type != 0xffff ||
+ enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
return -EOPNOTSUPP;
/* These fields are already verified as used. */
- mask_ipv4 =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- flow->mask);
- if (mask_ipv4->dst != cpu_to_be32(~0))
+ flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
return -EOPNOTSUPP;
- mask_enc_ports =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- flow->mask);
- enc_ports =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- flow->key);
-
- if (mask_enc_ports->dst != cpu_to_be16(~0))
+ flow_rule_match_enc_ports(rule, &enc_ports);
+ if (enc_ports.mask->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_OPTS)) {
- enc_op = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_OPTS,
- flow->key);
- }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
+ flow_rule_match_enc_opts(rule, &enc_op);
- switch (enc_ports->dst) {
+ switch (enc_ports.key->dst) {
case htons(NFP_FL_VXLAN_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
- if (enc_op)
+ if (enc_op.key)
return -EOPNOTSUPP;
break;
case htons(NFP_FL_GENEVE_PORT):
@@ -236,11 +212,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
- if (!enc_op)
+ if (!enc_op.key)
break;
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
return -EOPNOTSUPP;
- err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
+ err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
&key_size);
if (err)
return err;
@@ -254,19 +230,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
return -EOPNOTSUPP;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- mask_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->mask);
-
- key_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
- }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
+ flow_rule_match_basic(rule, &basic);
- if (mask_basic && mask_basic->n_proto) {
+ if (basic.mask && basic.mask->n_proto) {
/* Ethernet type is present in the key. */
- switch (key_basic->n_proto) {
+ switch (basic.key->n_proto) {
case cpu_to_be16(ETH_P_IP):
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
@@ -305,9 +274,9 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
- if (mask_basic && mask_basic->ip_proto) {
+ if (basic.mask && basic.mask->ip_proto) {
/* Ethernet type is present in the key. */
- switch (key_basic->ip_proto) {
+ switch (basic.key->ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
@@ -324,14 +293,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
- struct flow_dissector_key_tcp *tcp;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp tcp;
u32 tcp_flags;
- tcp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- flow->key);
- tcp_flags = be16_to_cpu(tcp->flags);
+ flow_rule_match_tcp(rule, &tcp);
+ tcp_flags = be16_to_cpu(tcp.key->flags);
if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
return -EOPNOTSUPP;
@@ -347,12 +314,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
* space, thus we need to ensure we include a IPv4/IPv6 key
* layer if we have not done so already.
*/
- if (!key_basic)
+ if (!basic.key)
return -EOPNOTSUPP;
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
- switch (key_basic->n_proto) {
+ switch (basic.key->n_proto) {
case cpu_to_be16(ETH_P_IP):
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
@@ -369,14 +336,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key_ctl;
-
- key_ctl = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- flow->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control ctl;
- if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
+ flow_rule_match_control(rule, &ctl);
+ if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
return -EOPNOTSUPP;
}
@@ -589,9 +553,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
- tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
- priv->stats[ctx_id].pkts,
- priv->stats[ctx_id].used);
+ flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 2d9f26a725c2..4d78be4ec4e9 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -98,47 +98,51 @@ struct nfp_ipv4_addr_entry {
struct list_head list;
};
-/**
- * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
- * @reserved: reserved for future use
- * @count: number of MAC addresses in the message
- * @addresses.index: index of MAC address in the lookup table
- * @addresses.addr: interface MAC address
- * @addresses: series of MACs to offload
- */
-struct nfp_tun_mac_addr {
- __be16 reserved;
- __be16 count;
- struct index_mac_addr {
- __be16 index;
- u8 addr[ETH_ALEN];
- } addresses[];
-};
+#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
/**
- * struct nfp_tun_mac_offload_entry - list of MACs to offload
- * @index: index of MAC address for offloading
+ * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
+ * @flags: MAC address offload options
+ * @count: number of MAC addresses in the message (should be 1)
+ * @index: index of MAC address in the lookup table
* @addr: interface MAC address
- * @list: list pointer
*/
-struct nfp_tun_mac_offload_entry {
+struct nfp_tun_mac_addr_offload {
+ __be16 flags;
+ __be16 count;
__be16 index;
u8 addr[ETH_ALEN];
- struct list_head list;
+};
+
+enum nfp_flower_mac_offload_cmd {
+ NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
+ NFP_TUNNEL_MAC_OFFLOAD_MOD = 2,
};
#define NFP_MAX_MAC_INDEX 0xff
/**
- * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
- * @ifindex: netdev ifindex of the device
- * @index: index of netdevs mac on NFP
- * @list: list pointer
+ * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
+ * @ht_node: Hashtable entry
+ * @addr: Offloaded MAC address
+ * @index: Offloaded index for given MAC address
+ * @ref_count: Number of devs using this MAC address
+ * @repr_list: List of reprs sharing this MAC address
*/
-struct nfp_tun_mac_non_nfp_idx {
- int ifindex;
- u8 index;
- struct list_head list;
+struct nfp_tun_offloaded_mac {
+ struct rhash_head ht_node;
+ u8 addr[ETH_ALEN];
+ u16 index;
+ int ref_count;
+ struct list_head repr_list;
+};
+
+static const struct rhashtable_params offloaded_macs_params = {
+ .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
+ .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node),
+ .key_len = ETH_ALEN,
+ .automatic_shrinking = true,
};
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
@@ -205,15 +209,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
- spin_lock_bh(&priv->nfp_neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ spin_lock_bh(&priv->tun.neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
return true;
}
}
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
return false;
}
@@ -223,24 +227,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
- spin_lock_bh(&priv->nfp_neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ spin_lock_bh(&priv->tun.neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
return;
}
}
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
return;
}
entry->ipv4_addr = ipv4_addr;
- list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ list_add_tail(&entry->list, &priv->tun.neigh_off_list);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
}
static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
@@ -249,8 +253,8 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
struct nfp_ipv4_route_entry *entry;
struct list_head *ptr, *storage;
- spin_lock_bh(&priv->nfp_neigh_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ spin_lock_bh(&priv->tun.neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
if (entry->ipv4_addr == ipv4_addr) {
list_del(&entry->list);
@@ -258,7 +262,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
break;
}
}
- spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ spin_unlock_bh(&priv->tun.neigh_off_lock);
}
static void
@@ -326,7 +330,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
if (!nfp_netdev_is_nfp_repr(n->dev))
return NOTIFY_DONE;
- app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
+ app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
/* Only concerned with changes to routes already added to NFP. */
@@ -401,11 +405,11 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
int count;
memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
- mutex_lock(&priv->nfp_ipv4_off_lock);
+ mutex_lock(&priv->tun.ipv4_off_lock);
count = 0;
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
if (count >= NFP_FL_IPV4_ADDRS_MAX) {
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
return;
}
@@ -413,7 +417,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
payload.ipv4_addr[count++] = entry->ipv4_addr;
}
payload.count = cpu_to_be32(count);
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
sizeof(struct nfp_tun_ipv4_addr),
@@ -426,26 +430,26 @@ void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
struct nfp_ipv4_addr_entry *entry;
struct list_head *ptr, *storage;
- mutex_lock(&priv->nfp_ipv4_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ mutex_lock(&priv->tun.ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
if (entry->ipv4_addr == ipv4) {
entry->ref_count++;
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
return;
}
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
return;
}
entry->ipv4_addr = ipv4;
entry->ref_count = 1;
- list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_tun_write_ipv4_list(app);
}
@@ -456,8 +460,8 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
struct nfp_ipv4_addr_entry *entry;
struct list_head *ptr, *storage;
- mutex_lock(&priv->nfp_ipv4_off_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ mutex_lock(&priv->tun.ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
if (entry->ipv4_addr == ipv4) {
entry->ref_count--;
@@ -468,191 +472,357 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
break;
}
}
- mutex_unlock(&priv->nfp_ipv4_off_lock);
+ mutex_unlock(&priv->tun.ipv4_off_lock);
nfp_tun_write_ipv4_list(app);
}
-void nfp_tunnel_write_macs(struct nfp_app *app)
+static int
+__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_offload_entry *entry;
- struct nfp_tun_mac_addr *payload;
- struct list_head *ptr, *storage;
- int mac_count, err, pay_size;
+ struct nfp_tun_mac_addr_offload payload;
- mutex_lock(&priv->nfp_mac_off_lock);
- if (!priv->nfp_mac_off_count) {
- mutex_unlock(&priv->nfp_mac_off_lock);
- return;
- }
+ memset(&payload, 0, sizeof(payload));
- pay_size = sizeof(struct nfp_tun_mac_addr) +
- sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
+ if (del)
+ payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
- payload = kzalloc(pay_size, GFP_KERNEL);
- if (!payload) {
- mutex_unlock(&priv->nfp_mac_off_lock);
- return;
- }
+ /* FW supports multiple MACs per cmsg but restrict to single. */
+ payload.count = cpu_to_be16(1);
+ payload.index = cpu_to_be16(idx);
+ ether_addr_copy(payload.addr, mac);
- payload->count = cpu_to_be16(priv->nfp_mac_off_count);
+ return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
+ sizeof(struct nfp_tun_mac_addr_offload),
+ &payload, GFP_KERNEL);
+}
- mac_count = 0;
- list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
- list);
- payload->addresses[mac_count].index = entry->index;
- ether_addr_copy(payload->addresses[mac_count].addr,
- entry->addr);
- mac_count++;
- }
+static bool nfp_tunnel_port_is_phy_repr(int port)
+{
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
+ return true;
- err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
- pay_size, payload, GFP_KERNEL);
+ return false;
+}
- kfree(payload);
+static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
+{
+ return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
+}
- if (err) {
- mutex_unlock(&priv->nfp_mac_off_lock);
- /* Write failed so retain list for future retry. */
- return;
- }
+static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
+{
+ return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+}
+
+static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
+{
+ return nfp_mac_idx >> 8;
+}
+
+static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
+{
+ return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+}
+
+static struct nfp_tun_offloaded_mac *
+nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
+ offloaded_macs_params);
+}
+
+static void
+nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
+ struct net_device *netdev, bool mod)
+{
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
- /* If list was successfully offloaded, flush it. */
- list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
- list);
- list_del(&entry->list);
- kfree(entry);
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+
+ /* If modifing MAC, remove repr from old list first. */
+ if (mod)
+ list_del(&repr_priv->mac_list);
+
+ list_add_tail(&repr_priv->mac_list, &entry->repr_list);
}
- priv->nfp_mac_off_count = 0;
- mutex_unlock(&priv->nfp_mac_off_lock);
+ entry->ref_count++;
}
-static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
+static int
+nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
+ int port, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_non_nfp_idx *entry;
- struct list_head *ptr, *storage;
- int idx;
-
- mutex_lock(&priv->nfp_mac_index_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
- if (entry->ifindex == ifindex) {
- idx = entry->index;
- mutex_unlock(&priv->nfp_mac_index_lock);
- return idx;
- }
+ int ida_idx = NFP_MAX_MAC_INDEX, err;
+ struct nfp_tun_offloaded_mac *entry;
+ u16 nfp_mac_idx = 0;
+
+ entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
+ if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
+ return 0;
}
- idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
- if (idx < 0) {
- mutex_unlock(&priv->nfp_mac_index_lock);
- return idx;
+ /* Assign a global index if non-repr or MAC address is now shared. */
+ if (entry || !port) {
+ ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (ida_idx < 0)
+ return ida_idx;
+
+ nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
+ } else {
+ nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
}
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- mutex_unlock(&priv->nfp_mac_index_lock);
- return -ENOMEM;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto err_free_ida;
+ }
+
+ ether_addr_copy(entry->addr, netdev->dev_addr);
+ INIT_LIST_HEAD(&entry->repr_list);
+
+ if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
+ &entry->ht_node,
+ offloaded_macs_params)) {
+ err = -ENOMEM;
+ goto err_free_entry;
+ }
+ }
+
+ err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
+ nfp_mac_idx, false);
+ if (err) {
+ /* If not shared then free. */
+ if (!entry->ref_count)
+ goto err_remove_hash;
+ goto err_free_ida;
}
- entry->ifindex = ifindex;
- entry->index = idx;
- list_add_tail(&entry->list, &priv->nfp_mac_index_list);
- mutex_unlock(&priv->nfp_mac_index_lock);
- return idx;
+ entry->index = nfp_mac_idx;
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
+
+ return 0;
+
+err_remove_hash:
+ rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
+ offloaded_macs_params);
+err_free_entry:
+ kfree(entry);
+err_free_ida:
+ if (ida_idx != NFP_MAX_MAC_INDEX)
+ ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+
+ return err;
}
-static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
+static int
+nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
+ u8 *mac, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_non_nfp_idx *entry;
- struct list_head *ptr, *storage;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_tun_offloaded_mac *entry;
+ struct nfp_repr *repr;
+ int ida_idx;
+
+ entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
+ if (!entry)
+ return 0;
+
+ entry->ref_count--;
+ /* If del is part of a mod then mac_list is still in use elsewheree. */
+ if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+ list_del(&repr_priv->mac_list);
+ }
- mutex_lock(&priv->nfp_mac_index_lock);
- list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
- entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
- if (entry->ifindex == ifindex) {
- ida_simple_remove(&priv->nfp_mac_off_ids,
- entry->index);
- list_del(&entry->list);
- kfree(entry);
- break;
+ /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
+ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
+ u16 nfp_mac_idx;
+ int port, err;
+
+ repr_priv = list_first_entry(&entry->repr_list,
+ struct nfp_flower_repr_priv,
+ mac_list);
+ repr = repr_priv->nfp_repr;
+ port = nfp_repr_get_port_id(repr->netdev);
+ nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
+ netdev_name(netdev));
+ return 0;
}
+
+ ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
+ ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ entry->index = nfp_mac_idx;
+ return 0;
}
- mutex_unlock(&priv->nfp_mac_index_lock);
-}
-static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
- struct nfp_app *app)
-{
- struct nfp_flower_priv *priv = app->priv;
- struct nfp_tun_mac_offload_entry *entry;
- u16 nfp_mac_idx;
- int port = 0;
+ if (entry->ref_count)
+ return 0;
- /* Check if MAC should be offloaded. */
- if (!is_valid_ether_addr(netdev->dev_addr))
- return;
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
+ &entry->ht_node,
+ offloaded_macs_params));
+ /* If MAC has global ID then extract and free the ida entry. */
+ if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+ ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
+ ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ }
- if (nfp_netdev_is_nfp_repr(netdev))
+ kfree(entry);
+
+ return __nfp_tunnel_offload_mac(app, mac, 0, true);
+}
+
+static int
+nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
+ enum nfp_flower_mac_offload_cmd cmd)
+{
+ struct nfp_flower_non_repr_priv *nr_priv = NULL;
+ bool non_repr = false, *mac_offloaded;
+ u8 *off_mac = NULL;
+ int err, port = 0;
+
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
+
+ repr = netdev_priv(netdev);
+ if (repr->app != app)
+ return 0;
+
+ repr_priv = repr->app_priv;
+ mac_offloaded = &repr_priv->mac_offloaded;
+ off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev);
- else if (!nfp_fl_is_netdev_to_offload(netdev))
- return;
+ if (!nfp_tunnel_port_is_phy_repr(port))
+ return 0;
+ } else if (nfp_fl_is_netdev_to_offload(netdev)) {
+ nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
+ if (!nr_priv)
+ return -ENOMEM;
+
+ mac_offloaded = &nr_priv->mac_offloaded;
+ off_mac = &nr_priv->offloaded_mac_addr[0];
+ non_repr = true;
+ } else {
+ return 0;
+ }
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
- return;
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ err = -EINVAL;
+ goto err_put_non_repr_priv;
}
- if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
- NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
- nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
- } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
- NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
- port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
- nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
- } else {
- /* Must assign our own unique 8-bit index. */
- int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
+ if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
+ cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
- if (idx < 0) {
- nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
- kfree(entry);
- return;
- }
- nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+ switch (cmd) {
+ case NFP_TUNNEL_MAC_OFFLOAD_ADD:
+ err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
+ if (err)
+ goto err_put_non_repr_priv;
+
+ if (non_repr)
+ __nfp_flower_non_repr_priv_get(nr_priv);
+
+ *mac_offloaded = true;
+ ether_addr_copy(off_mac, netdev->dev_addr);
+ break;
+ case NFP_TUNNEL_MAC_OFFLOAD_DEL:
+ /* Only attempt delete if add was successful. */
+ if (!*mac_offloaded)
+ break;
+
+ if (non_repr)
+ __nfp_flower_non_repr_priv_put(nr_priv);
+
+ *mac_offloaded = false;
+
+ err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
+ false);
+ if (err)
+ goto err_put_non_repr_priv;
+
+ break;
+ case NFP_TUNNEL_MAC_OFFLOAD_MOD:
+ /* Ignore if changing to the same address. */
+ if (ether_addr_equal(netdev->dev_addr, off_mac))
+ break;
+
+ err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
+ if (err)
+ goto err_put_non_repr_priv;
+
+ /* Delete the previous MAC address. */
+ err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
+ netdev_name(netdev));
+
+ ether_addr_copy(off_mac, netdev->dev_addr);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_put_non_repr_priv;
}
- entry->index = cpu_to_be16(nfp_mac_idx);
- ether_addr_copy(entry->addr, netdev->dev_addr);
+ if (non_repr)
+ __nfp_flower_non_repr_priv_put(nr_priv);
+
+ return 0;
+
+err_put_non_repr_priv:
+ if (non_repr)
+ __nfp_flower_non_repr_priv_put(nr_priv);
- mutex_lock(&priv->nfp_mac_off_lock);
- priv->nfp_mac_off_count++;
- list_add_tail(&entry->list, &priv->nfp_mac_off_list);
- mutex_unlock(&priv->nfp_mac_off_lock);
+ return err;
}
int nfp_tunnel_mac_event_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event, void *ptr)
{
- if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
- /* If non-nfp netdev then free its offload index. */
- if (nfp_fl_is_netdev_to_offload(netdev))
- nfp_tun_del_mac_idx(app, netdev->ifindex);
- } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
- event == NETDEV_REGISTER) {
- nfp_tun_add_to_mac_offload_list(netdev, app);
-
- /* Force a list write to keep NFP up to date. */
- nfp_tunnel_write_macs(app);
+ int err;
+
+ if (event == NETDEV_DOWN) {
+ err = nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
+ netdev_name(netdev));
+ } else if (event == NETDEV_UP) {
+ err = nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_ADD);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
+ netdev_name(netdev));
+ } else if (event == NETDEV_CHANGEADDR) {
+ /* Only offload addr change if netdev is already up. */
+ if (!(netdev->flags & IFF_UP))
+ return NOTIFY_OK;
+
+ err = nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_MOD);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
+ netdev_name(netdev));
}
return NOTIFY_OK;
}
@@ -660,68 +830,62 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ /* Initialise rhash for MAC offload tracking. */
+ err = rhashtable_init(&priv->tun.offloaded_macs,
+ &offloaded_macs_params);
+ if (err)
+ return err;
- /* Initialise priv data for MAC offloading. */
- priv->nfp_mac_off_count = 0;
- mutex_init(&priv->nfp_mac_off_lock);
- INIT_LIST_HEAD(&priv->nfp_mac_off_list);
- mutex_init(&priv->nfp_mac_index_lock);
- INIT_LIST_HEAD(&priv->nfp_mac_index_list);
- ida_init(&priv->nfp_mac_off_ids);
+ ida_init(&priv->tun.mac_off_ids);
/* Initialise priv data for IPv4 offloading. */
- mutex_init(&priv->nfp_ipv4_off_lock);
- INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+ mutex_init(&priv->tun.ipv4_off_lock);
+ INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
/* Initialise priv data for neighbour offloading. */
- spin_lock_init(&priv->nfp_neigh_off_lock);
- INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
- priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+ spin_lock_init(&priv->tun.neigh_off_lock);
+ INIT_LIST_HEAD(&priv->tun.neigh_off_list);
+ priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+
+ err = register_netevent_notifier(&priv->tun.neigh_nb);
+ if (err) {
+ rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
+ nfp_check_rhashtable_empty, NULL);
+ return err;
+ }
- return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
+ return 0;
}
void nfp_tunnel_config_stop(struct nfp_app *app)
{
- struct nfp_tun_mac_offload_entry *mac_entry;
struct nfp_flower_priv *priv = app->priv;
struct nfp_ipv4_route_entry *route_entry;
- struct nfp_tun_mac_non_nfp_idx *mac_idx;
struct nfp_ipv4_addr_entry *ip_entry;
struct list_head *ptr, *storage;
- unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
-
- /* Free any memory that may be occupied by MAC list. */
- list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
- mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
- list);
- list_del(&mac_entry->list);
- kfree(mac_entry);
- }
-
- /* Free any memory that may be occupied by MAC index list. */
- list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
- mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
- list);
- list_del(&mac_idx->list);
- kfree(mac_idx);
- }
+ unregister_netevent_notifier(&priv->tun.neigh_nb);
- ida_destroy(&priv->nfp_mac_off_ids);
+ ida_destroy(&priv->tun.mac_off_ids);
/* Free any memory that may be occupied by ipv4 list. */
- list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
list_del(&ip_entry->list);
kfree(ip_entry);
}
/* Free any memory that may be occupied by the route list. */
- list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
list);
list_del(&route_entry->list);
kfree(route_entry);
}
+
+ /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
+ rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
+ nfp_check_rhashtable_empty, NULL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index d578d856a009..f8d422713705 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -433,4 +433,6 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
+struct devlink *nfp_devlink_get_devlink(struct net_device *netdev);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 808647ec3573..e9eca99cf493 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -4,6 +4,7 @@
#include <linux/rtnetlink.h>
#include <net/devlink.h>
+#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
@@ -171,6 +172,173 @@ static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
return ret;
}
+static const struct nfp_devlink_versions_simple {
+ const char *key;
+ const char *hwinfo;
+} nfp_devlink_versions_hwinfo[] = {
+ { DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, "assembly.partno", },
+ { DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, "assembly.revision", },
+ { DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE, "assembly.vendor", },
+ { "board.model", /* code name */ "assembly.model", },
+};
+
+static int
+nfp_devlink_versions_get_hwinfo(struct nfp_pf *pf, struct devlink_info_req *req)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(nfp_devlink_versions_hwinfo); i++) {
+ const struct nfp_devlink_versions_simple *info;
+ const char *val;
+
+ info = &nfp_devlink_versions_hwinfo[i];
+
+ val = nfp_hwinfo_lookup(pf->hwinfo, info->hwinfo);
+ if (!val)
+ continue;
+
+ err = devlink_info_version_fixed_put(req, info->key, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct nfp_devlink_versions {
+ enum nfp_nsp_versions id;
+ const char *key;
+} nfp_devlink_versions_nsp[] = {
+ { NFP_VERSIONS_BUNDLE, "fw.bundle_id", },
+ { NFP_VERSIONS_BSP, DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, },
+ { NFP_VERSIONS_CPLD, "fw.cpld", },
+ { NFP_VERSIONS_APP, DEVLINK_INFO_VERSION_GENERIC_FW_APP, },
+ { NFP_VERSIONS_UNDI, DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, },
+ { NFP_VERSIONS_NCSI, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, },
+ { NFP_VERSIONS_CFGR, "chip.init", },
+};
+
+static int
+nfp_devlink_versions_get_nsp(struct devlink_info_req *req, bool flash,
+ const u8 *buf, unsigned int size)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(nfp_devlink_versions_nsp); i++) {
+ const struct nfp_devlink_versions *info;
+ const char *version;
+
+ info = &nfp_devlink_versions_nsp[i];
+
+ version = nfp_nsp_versions_get(info->id, flash, buf, size);
+ if (IS_ERR(version)) {
+ if (PTR_ERR(version) == -ENOENT)
+ continue;
+ else
+ return PTR_ERR(version);
+ }
+
+ if (flash)
+ err = devlink_info_version_stored_put(req, info->key,
+ version);
+ else
+ err = devlink_info_version_running_put(req, info->key,
+ version);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nfp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ const char *sn, *vendor, *part;
+ struct nfp_nsp *nsp;
+ char *buf = NULL;
+ int err;
+
+ err = devlink_info_driver_name_put(req, "nfp");
+ if (err)
+ return err;
+
+ vendor = nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor");
+ part = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
+ sn = nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial");
+ if (vendor && part && sn) {
+ char *buf;
+
+ buf = kmalloc(strlen(vendor) + strlen(part) + strlen(sn) + 1,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = '\0';
+ strcat(buf, vendor);
+ strcat(buf, part);
+ strcat(buf, sn);
+
+ err = devlink_info_serial_number_put(req, buf);
+ kfree(buf);
+ if (err)
+ return err;
+ }
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ NL_SET_ERR_MSG_MOD(extack, "can't access NSP");
+ return PTR_ERR(nsp);
+ }
+
+ if (nfp_nsp_has_versions(nsp)) {
+ buf = kzalloc(NFP_NSP_VERSION_BUFSZ, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_close_nsp;
+ }
+
+ err = nfp_nsp_versions(nsp, buf, NFP_NSP_VERSION_BUFSZ);
+ if (err)
+ goto err_free_buf;
+
+ err = nfp_devlink_versions_get_nsp(req, false,
+ buf, NFP_NSP_VERSION_BUFSZ);
+ if (err)
+ goto err_free_buf;
+
+ err = nfp_devlink_versions_get_nsp(req, true,
+ buf, NFP_NSP_VERSION_BUFSZ);
+ if (err)
+ goto err_free_buf;
+
+ kfree(buf);
+ }
+
+ nfp_nsp_close(nsp);
+
+ return nfp_devlink_versions_get_hwinfo(pf, req);
+
+err_free_buf:
+ kfree(buf);
+err_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int
+nfp_devlink_flash_update(struct devlink *devlink, const char *path,
+ const char *component, struct netlink_ext_ack *extack)
+{
+ if (component)
+ return -EOPNOTSUPP;
+ return nfp_flash_update_common(devlink_priv(devlink), path, extack);
+}
+
const struct devlink_ops nfp_devlink_ops = {
.port_split = nfp_devlink_port_split,
.port_unsplit = nfp_devlink_port_unsplit,
@@ -178,6 +346,8 @@ const struct devlink_ops nfp_devlink_ops = {
.sb_pool_set = nfp_devlink_sb_pool_set,
.eswitch_mode_get = nfp_devlink_eswitch_mode_get,
.eswitch_mode_set = nfp_devlink_eswitch_mode_set,
+ .info_get = nfp_devlink_info_get,
+ .flash_update = nfp_devlink_flash_update,
};
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
@@ -206,3 +376,14 @@ void nfp_devlink_port_unregister(struct nfp_port *port)
{
devlink_port_unregister(&port->dl_port);
}
+
+struct devlink *nfp_devlink_get_devlink(struct net_device *netdev)
+{
+ struct nfp_app *app;
+
+ app = nfp_app_from_netdev(netdev);
+ if (!app)
+ return NULL;
+
+ return priv_to_devlink(app->pf);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 6c10e8d119e4..f4c8776e42b6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -300,6 +300,47 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
return nfp_pcie_sriov_enable(pdev, num_vfs);
}
+int nfp_flash_update_common(struct nfp_pf *pf, const char *path,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = &pf->pdev->dev;
+ const struct firmware *fw;
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ if (extack)
+ NL_SET_ERR_MSG_MOD(extack, "can't access NSP");
+ else
+ dev_err(dev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ err = request_firmware_direct(&fw, path, dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unable to read flash file from disk");
+ goto exit_close_nsp;
+ }
+
+ dev_info(dev, "Please be patient while writing flash image: %s\n",
+ path);
+
+ err = nfp_nsp_write_flash(nsp, fw);
+ if (err < 0)
+ goto exit_release_fw;
+ dev_info(dev, "Finished writing flash image\n");
+ err = 0;
+
+exit_release_fw:
+ release_firmware(fw);
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static const struct firmware *
nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index a3613a2e0aa5..b7211f200d22 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -164,6 +164,8 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area);
int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
void *out_data, u64 out_length);
+int nfp_flash_update_common(struct nfp_pf *pf, const char *path,
+ struct netlink_ext_ack *extack);
enum nfp_dump_diag {
NFP_DUMP_NSP_DIAG = 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e97636d2e6ee..6d1b8816552e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -36,7 +36,6 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
-#include <net/switchdev.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
@@ -2170,9 +2169,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->cnt = dp->txd_cnt;
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
- tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
+ tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
if (!tx_ring->txds) {
netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
tx_ring->cnt);
@@ -2328,9 +2327,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
- rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
+ rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
if (!rx_ring->rxds) {
netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
rx_ring->cnt);
@@ -3531,6 +3530,8 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp,
+ .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
+ .ndo_get_devlink = nfp_devlink_get_devlink,
};
/**
@@ -3815,8 +3816,6 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->netdev_ops = &nfp_net_netdev_ops;
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
- SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
-
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index cb9c512abc76..690b62718dbb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1234,57 +1234,6 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
-static int
-nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash)
-{
- const struct firmware *fw;
- struct nfp_app *app;
- struct nfp_nsp *nsp;
- struct device *dev;
- int err;
-
- if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
- return -EOPNOTSUPP;
-
- app = nfp_app_from_netdev(netdev);
- if (!app)
- return -EOPNOTSUPP;
-
- dev = &app->pdev->dev;
-
- nsp = nfp_nsp_open(app->cpp);
- if (IS_ERR(nsp)) {
- err = PTR_ERR(nsp);
- dev_err(dev, "Failed to access the NSP: %d\n", err);
- return err;
- }
-
- err = request_firmware_direct(&fw, flash->data, dev);
- if (err)
- goto exit_close_nsp;
-
- dev_info(dev, "Please be patient while writing flash image: %s\n",
- flash->data);
- dev_hold(netdev);
- rtnl_unlock();
-
- err = nfp_nsp_write_flash(nsp, fw);
- if (err < 0) {
- dev_err(dev, "Flash write failed: %d\n", err);
- goto exit_rtnl_lock;
- }
- dev_info(dev, "Finished writing flash image\n");
-
-exit_rtnl_lock:
- rtnl_lock();
- dev_put(netdev);
- release_firmware(fw);
-
-exit_close_nsp:
- nfp_nsp_close(nsp);
- return err;
-}
-
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1295,7 +1244,6 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_sset_count = nfp_net_get_sset_count,
.get_rxnfc = nfp_net_get_rxnfc,
.set_rxnfc = nfp_net_set_rxnfc,
- .flash_device = nfp_net_flash_device,
.get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
@@ -1321,7 +1269,6 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
.get_sset_count = nfp_port_get_sset_count,
- .flash_device = nfp_net_flash_device,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 69d7aebda09b..d2c803bb4e56 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -5,7 +5,6 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/lockdep.h>
#include <net/dst_metadata.h>
-#include <net/switchdev.h>
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
@@ -273,6 +272,8 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
+ .ndo_get_devlink = nfp_devlink_get_devlink,
};
void
@@ -336,8 +337,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->max_mtu = pf_netdev->max_mtu;
- SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
-
/* Set features the lower device can support with representors */
if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 86bc149ca231..93c5bfc0510b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -3,7 +3,6 @@
#include <linux/lockdep.h>
#include <linux/netdevice.h>
-#include <net/switchdev.h>
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
@@ -31,34 +30,22 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
return NULL;
}
-static int
-nfp_port_attr_get(struct net_device *netdev, struct switchdev_attr *attr)
+int nfp_port_get_port_parent_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid)
{
struct nfp_port *port;
+ const u8 *serial;
port = nfp_port_from_netdev(netdev);
if (!port)
return -EOPNOTSUPP;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: {
- const u8 *serial;
- /* N.B: attr->u.ppid.id is binary data */
- attr->u.ppid.id_len = nfp_cpp_serial(port->app->cpp, &serial);
- memcpy(&attr->u.ppid.id, serial, attr->u.ppid.id_len);
- break;
- }
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial);
+ memcpy(&ppid->id, serial, ppid->id_len);
return 0;
}
-const struct switchdev_ops nfp_port_switchdev_ops = {
- .switchdev_port_attr_get = nfp_port_attr_get,
-};
-
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index b2479a2a49e5..90ae053f5c07 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -7,6 +7,7 @@
#include <net/devlink.h>
struct net_device;
+struct netdev_phys_item_id;
struct nfp_app;
struct nfp_pf;
struct nfp_port;
@@ -90,7 +91,6 @@ struct nfp_port {
};
extern const struct ethtool_ops nfp_port_ethtool_ops;
-extern const struct switchdev_ops nfp_port_switchdev_ops;
__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...);
@@ -106,6 +106,8 @@ int
nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
+int nfp_port_get_port_parent_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid);
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
index 814360ed3a20..ea2e3f829aba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
@@ -48,6 +48,7 @@ int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
pool_info->pool_type = le32_to_cpu(get_data.pool_type);
pool_info->threshold_type = le32_to_cpu(get_data.threshold_type);
pool_info->size = le32_to_cpu(get_data.size) * unit_size;
+ pool_info->cell_size = unit_size;
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index ce1577bbbd2a..3a4e224a64b7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -7,11 +7,13 @@
* Jason McMullan <jason.mcmullan@netronome.com>
*/
+#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -36,6 +38,7 @@
#define NSP_COMMAND 0x08
#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32)
#define NSP_COMMAND_CODE GENMASK_ULL(31, 16)
+#define NSP_COMMAND_DMA_BUF BIT_ULL(1)
#define NSP_COMMAND_START BIT_ULL(0)
/* CPP address to retrieve the data from */
@@ -48,8 +51,12 @@
#define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0)
#define NSP_DFLT_BUFFER_CONFIG 0x20
+#define NSP_DFLT_BUFFER_DMA_CHUNK_ORDER GENMASK_ULL(63, 58)
+#define NSP_DFLT_BUFFER_SIZE_4KB GENMASK_ULL(15, 8)
#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0)
+#define NFP_CAP_CMD_DMA_SG 0x28
+
#define NSP_MAGIC 0xab10
#define NSP_MAJOR 0
#define NSP_MINOR 8
@@ -62,6 +69,16 @@
#define NFP_HWINFO_LOOKUP_SIZE GENMASK(11, 0)
+#define NFP_VERSIONS_SIZE GENMASK(11, 0)
+#define NFP_VERSIONS_CNT_OFF 0
+#define NFP_VERSIONS_BSP_OFF 2
+#define NFP_VERSIONS_CPLD_OFF 6
+#define NFP_VERSIONS_APP_OFF 10
+#define NFP_VERSIONS_BUNDLE_OFF 14
+#define NFP_VERSIONS_UNDI_OFF 18
+#define NFP_VERSIONS_NCSI_OFF 22
+#define NFP_VERSIONS_CFGR_OFF 26
+
enum nfp_nsp_cmd {
SPCODE_NOOP = 0, /* No operation */
SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
@@ -77,6 +94,17 @@ enum nfp_nsp_cmd {
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */
SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */
+ SPCODE_VERSIONS = 21, /* Report FW versions */
+};
+
+struct nfp_nsp_dma_buf {
+ __le32 chunk_cnt;
+ __le32 reserved[3];
+ struct {
+ __le32 size;
+ __le32 reserved;
+ __le64 addr;
+ } descs[];
};
static const struct {
@@ -107,18 +135,18 @@ struct nfp_nsp {
/**
* struct nfp_nsp_command_arg - NFP command argument structure
* @code: NFP SP Command Code
+ * @dma: @buf points to a host buffer, not NSP buffer
* @timeout_sec:Timeout value to wait for completion in seconds
* @option: NFP SP Command Argument
- * @buff_cpp: NFP SP Buffer CPP Address info
- * @buff_addr: NFP SP Buffer Host address
+ * @buf: NFP SP Buffer Address
* @error_cb: Callback for interpreting option if error occurred
*/
struct nfp_nsp_command_arg {
u16 code;
+ bool dma;
unsigned int timeout_sec;
u32 option;
- u32 buff_cpp;
- u64 buff_addr;
+ u64 buf;
void (*error_cb)(struct nfp_nsp *state, u32 ret_val);
};
@@ -332,22 +360,14 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg)
if (err)
return err;
- if (!FIELD_FIT(NSP_BUFFER_CPP, arg->buff_cpp >> 8) ||
- !FIELD_FIT(NSP_BUFFER_ADDRESS, arg->buff_addr)) {
- nfp_err(cpp, "Host buffer out of reach %08x %016llx\n",
- arg->buff_cpp, arg->buff_addr);
- return -EINVAL;
- }
-
- err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
- FIELD_PREP(NSP_BUFFER_CPP, arg->buff_cpp >> 8) |
- FIELD_PREP(NSP_BUFFER_ADDRESS, arg->buff_addr));
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, arg->buf);
if (err < 0)
return err;
err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
FIELD_PREP(NSP_COMMAND_OPTION, arg->option) |
FIELD_PREP(NSP_COMMAND_CODE, arg->code) |
+ FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) |
FIELD_PREP(NSP_COMMAND_START, 1));
if (err < 0)
return err;
@@ -399,36 +419,14 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code)
}
static int
-nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg)
+nfp_nsp_command_buf_def(struct nfp_nsp *nsp,
+ struct nfp_nsp_command_buf_arg *arg)
{
struct nfp_cpp *cpp = nsp->cpp;
- unsigned int max_size;
u64 reg, cpp_buf;
- int ret, err;
+ int err, ret;
u32 cpp_id;
- if (nsp->ver.minor < 13) {
- nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n",
- arg->arg.code, nsp->ver.major, nsp->ver.minor);
- return -EOPNOTSUPP;
- }
-
- err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
- nfp_resource_address(nsp->res) +
- NSP_DFLT_BUFFER_CONFIG,
- &reg);
- if (err < 0)
- return err;
-
- max_size = max(arg->in_size, arg->out_size);
- if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
- nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n",
- arg->arg.code,
- FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
- max_size);
- return -EINVAL;
- }
-
err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
nfp_resource_address(nsp->res) +
NSP_DFLT_BUFFER,
@@ -447,15 +445,21 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg)
}
/* Zero out remaining part of the buffer */
if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) {
- memset(arg->out_buf, 0, arg->out_size - arg->in_size);
err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size,
arg->out_buf, arg->out_size - arg->in_size);
if (err < 0)
return err;
}
- arg->arg.buff_cpp = cpp_id;
- arg->arg.buff_addr = cpp_buf;
+ if (!FIELD_FIT(NSP_BUFFER_CPP, cpp_id >> 8) ||
+ !FIELD_FIT(NSP_BUFFER_ADDRESS, cpp_buf)) {
+ nfp_err(cpp, "Buffer out of reach %08x %016llx\n",
+ cpp_id, cpp_buf);
+ return -EINVAL;
+ }
+
+ arg->arg.buf = FIELD_PREP(NSP_BUFFER_CPP, cpp_id >> 8) |
+ FIELD_PREP(NSP_BUFFER_ADDRESS, cpp_buf);
ret = __nfp_nsp_command(nsp, &arg->arg);
if (ret < 0)
return ret;
@@ -470,6 +474,210 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg)
return ret;
}
+static int
+nfp_nsp_command_buf_dma_sg(struct nfp_nsp *nsp,
+ struct nfp_nsp_command_buf_arg *arg,
+ unsigned int max_size, unsigned int chunk_order,
+ unsigned int dma_order)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ struct nfp_nsp_dma_buf *desc;
+ struct {
+ dma_addr_t dma_addr;
+ unsigned long len;
+ void *chunk;
+ } *chunks;
+ size_t chunk_size, dma_size;
+ dma_addr_t dma_desc;
+ struct device *dev;
+ unsigned long off;
+ int i, ret, nseg;
+ size_t desc_sz;
+
+ chunk_size = BIT_ULL(chunk_order);
+ dma_size = BIT_ULL(dma_order);
+ nseg = DIV_ROUND_UP(max_size, chunk_size);
+
+ chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL);
+ if (!chunks)
+ return -ENOMEM;
+
+ off = 0;
+ ret = -ENOMEM;
+ for (i = 0; i < nseg; i++) {
+ unsigned long coff;
+
+ chunks[i].chunk = kmalloc(chunk_size,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunks[i].chunk)
+ goto exit_free_prev;
+
+ chunks[i].len = min_t(u64, chunk_size, max_size - off);
+
+ coff = 0;
+ if (arg->in_size > off) {
+ coff = min_t(u64, arg->in_size - off, chunk_size);
+ memcpy(chunks[i].chunk, arg->in_buf + off, coff);
+ }
+ memset(chunks[i].chunk + coff, 0, chunk_size - coff);
+
+ off += chunks[i].len;
+ }
+
+ dev = nfp_cpp_device(cpp)->parent;
+
+ for (i = 0; i < nseg; i++) {
+ dma_addr_t addr;
+
+ addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len,
+ DMA_BIDIRECTIONAL);
+ chunks[i].dma_addr = addr;
+
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ goto exit_unmap_prev;
+
+ if (WARN_ONCE(round_down(addr, dma_size) !=
+ round_down(addr + chunks[i].len - 1, dma_size),
+ "unaligned DMA address: %pad %lu %zd\n",
+ &addr, chunks[i].len, dma_size)) {
+ ret = -EFAULT;
+ i++;
+ goto exit_unmap_prev;
+ }
+ }
+
+ desc_sz = struct_size(desc, descs, nseg);
+ desc = kmalloc(desc_sz, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto exit_unmap_all;
+ }
+
+ desc->chunk_cnt = cpu_to_le32(nseg);
+ for (i = 0; i < nseg; i++) {
+ desc->descs[i].size = cpu_to_le32(chunks[i].len);
+ desc->descs[i].addr = cpu_to_le64(chunks[i].dma_addr);
+ }
+
+ dma_desc = dma_map_single(dev, desc, desc_sz, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, dma_desc);
+ if (ret)
+ goto exit_free_desc;
+
+ arg->arg.dma = true;
+ arg->arg.buf = dma_desc;
+ ret = __nfp_nsp_command(nsp, &arg->arg);
+ if (ret < 0)
+ goto exit_unmap_desc;
+
+ i = 0;
+ off = 0;
+ while (off < arg->out_size) {
+ unsigned int len;
+
+ len = min_t(u64, chunks[i].len, arg->out_size - off);
+ memcpy(arg->out_buf + off, chunks[i].chunk, len);
+ off += len;
+ i++;
+ }
+
+exit_unmap_desc:
+ dma_unmap_single(dev, dma_desc, desc_sz, DMA_TO_DEVICE);
+exit_free_desc:
+ kfree(desc);
+exit_unmap_all:
+ i = nseg;
+exit_unmap_prev:
+ while (--i >= 0)
+ dma_unmap_single(dev, chunks[i].dma_addr, chunks[i].len,
+ DMA_BIDIRECTIONAL);
+ i = nseg;
+exit_free_prev:
+ while (--i >= 0)
+ kfree(chunks[i].chunk);
+ kfree(chunks);
+ if (ret < 0)
+ nfp_err(cpp, "NSP: SG DMA failed for command 0x%04x: %d (sz:%d cord:%d)\n",
+ arg->arg.code, ret, max_size, chunk_order);
+ return ret;
+}
+
+static int
+nfp_nsp_command_buf_dma(struct nfp_nsp *nsp,
+ struct nfp_nsp_command_buf_arg *arg,
+ unsigned int max_size, unsigned int dma_order)
+{
+ unsigned int chunk_order, buf_order;
+ struct nfp_cpp *cpp = nsp->cpp;
+ bool sg_ok;
+ u64 reg;
+ int err;
+
+ buf_order = order_base_2(roundup_pow_of_two(max_size));
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) + NFP_CAP_CMD_DMA_SG,
+ &reg);
+ if (err < 0)
+ return err;
+ sg_ok = reg & BIT_ULL(arg->arg.code - 1);
+
+ if (!sg_ok) {
+ if (buf_order > dma_order) {
+ nfp_err(cpp, "NSP: can't service non-SG DMA for command 0x%04x\n",
+ arg->arg.code);
+ return -ENOMEM;
+ }
+ chunk_order = buf_order;
+ } else {
+ chunk_order = min_t(unsigned int, dma_order, PAGE_SHIFT);
+ }
+
+ return nfp_nsp_command_buf_dma_sg(nsp, arg, max_size, chunk_order,
+ dma_order);
+}
+
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg)
+{
+ unsigned int dma_order, def_size, max_size;
+ struct nfp_cpp *cpp = nsp->cpp;
+ u64 reg;
+ int err;
+
+ if (nsp->ver.minor < 13) {
+ nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n",
+ arg->arg.code, nsp->ver.major, nsp->ver.minor);
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER_CONFIG,
+ &reg);
+ if (err < 0)
+ return err;
+
+ /* Zero out undefined part of the out buffer */
+ if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size)
+ memset(arg->out_buf, 0, arg->out_size - arg->in_size);
+
+ max_size = max(arg->in_size, arg->out_size);
+ def_size = FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M +
+ FIELD_GET(NSP_DFLT_BUFFER_SIZE_4KB, reg) * SZ_4K;
+ dma_order = FIELD_GET(NSP_DFLT_BUFFER_DMA_CHUNK_ORDER, reg);
+ if (def_size >= max_size) {
+ return nfp_nsp_command_buf_def(nsp, arg);
+ } else if (!dma_order) {
+ nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%u < %u)\n",
+ arg->arg.code, def_size, max_size);
+ return -EINVAL;
+ }
+
+ return nfp_nsp_command_buf_dma(nsp, arg, max_size, dma_order);
+}
+
int nfp_nsp_wait(struct nfp_nsp *state)
{
const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ;
@@ -591,10 +799,7 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw)
{
.code = SPCODE_NSP_WRITE_FLASH,
.option = fw->size,
- /* The flash time is specified to take a maximum of 70s
- * so we add an additional factor to this spec time.
- */
- .timeout_sec = 2.5 * 70,
+ .timeout_sec = 900,
},
.in_buf = fw->data,
.in_size = fw->size,
@@ -711,3 +916,52 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size)
return 0;
}
+
+int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg versions = {
+ {
+ .code = SPCODE_VERSIONS,
+ .option = min_t(u32, size, NFP_VERSIONS_SIZE),
+ },
+ .out_buf = buf,
+ .out_size = min_t(u32, size, NFP_VERSIONS_SIZE),
+ };
+
+ return nfp_nsp_command_buf(state, &versions);
+}
+
+const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash,
+ const u8 *buf, unsigned int size)
+{
+ static const u32 id2off[] = {
+ [NFP_VERSIONS_BSP] = NFP_VERSIONS_BSP_OFF,
+ [NFP_VERSIONS_CPLD] = NFP_VERSIONS_CPLD_OFF,
+ [NFP_VERSIONS_APP] = NFP_VERSIONS_APP_OFF,
+ [NFP_VERSIONS_BUNDLE] = NFP_VERSIONS_BUNDLE_OFF,
+ [NFP_VERSIONS_UNDI] = NFP_VERSIONS_UNDI_OFF,
+ [NFP_VERSIONS_NCSI] = NFP_VERSIONS_NCSI_OFF,
+ [NFP_VERSIONS_CFGR] = NFP_VERSIONS_CFGR_OFF,
+ };
+ unsigned int field, buf_field_cnt, buf_off;
+
+ if (id >= ARRAY_SIZE(id2off) || !id2off[id])
+ return ERR_PTR(-EINVAL);
+
+ field = id * 2 + flash;
+
+ buf_field_cnt = get_unaligned_le16(buf);
+ if (buf_field_cnt <= field)
+ return ERR_PTR(-ENOENT);
+
+ buf_off = get_unaligned_le16(buf + id2off[id] + flash * 2);
+ if (!buf_off)
+ return ERR_PTR(-ENOENT);
+
+ if (buf_off >= size)
+ return ERR_PTR(-EINVAL);
+ if (strnlen(&buf[buf_off], size - buf_off) == size - buf_off)
+ return ERR_PTR(-EINVAL);
+
+ return (const char *)&buf[buf_off];
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index ff33ac54097a..bd9c358c646f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -38,12 +38,18 @@ static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state)
return nfp_nsp_get_abi_ver_minor(state) > 24;
}
+static inline bool nfp_nsp_has_versions(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 27;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
NFP_INTERFACE_SFPP = 10,
NFP_INTERFACE_SFP28 = 28,
NFP_INTERFACE_QSFP = 40,
+ NFP_INTERFACE_RJ45 = 45,
NFP_INTERFACE_CXP = 100,
NFP_INTERFACE_QSFP28 = 112,
};
@@ -208,4 +214,19 @@ enum nfp_nsp_sensor_id {
int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
long *val);
+#define NFP_NSP_VERSION_BUFSZ 1024 /* reasonable size, not in the ABI */
+
+enum nfp_nsp_versions {
+ NFP_VERSIONS_BSP,
+ NFP_VERSIONS_CPLD,
+ NFP_VERSIONS_APP,
+ NFP_VERSIONS_BUNDLE,
+ NFP_VERSIONS_UNDI,
+ NFP_VERSIONS_NCSI,
+ NFP_VERSIONS_CFGR,
+};
+
+int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size);
+const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash,
+ const u8 *buf, unsigned int size);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 802c9224bb32..311a5be25acb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -206,6 +206,9 @@ nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry)
if (entry->interface == NFP_INTERFACE_NONE) {
entry->port_type = PORT_NONE;
return;
+ } else if (entry->interface == NFP_INTERFACE_RJ45) {
+ entry->port_type = PORT_TP;
+ return;
}
if (entry->media == NFP_MEDIA_FIBRE)
@@ -269,8 +272,7 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
goto err;
}
- table = kzalloc(sizeof(*table) +
- sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
+ table = kzalloc(struct_size(table, ports, cnt), GFP_KERNEL);
if (!table)
goto err;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 0611f2335b4a..96f7a9818294 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -105,6 +105,12 @@
#define NIXGE_MAX_JUMBO_FRAME_SIZE \
(NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
+enum nixge_version {
+ NIXGE_V2,
+ NIXGE_V3,
+ NIXGE_VERSION_COUNT
+};
+
struct nixge_hw_dma_bd {
u32 next_lo;
u32 next_hi;
@@ -287,9 +293,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
priv->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*priv->tx_bd_v) * TX_BD_NUM,
- &priv->tx_bd_p, GFP_KERNEL);
+ priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*priv->tx_bd_v) * TX_BD_NUM,
+ &priv->tx_bd_p, GFP_KERNEL);
if (!priv->tx_bd_v)
goto out;
@@ -299,9 +305,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
if (!priv->tx_skb)
goto out;
- priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*priv->rx_bd_v) * RX_BD_NUM,
- &priv->rx_bd_p, GFP_KERNEL);
+ priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*priv->rx_bd_v) * RX_BD_NUM,
+ &priv->rx_bd_p, GFP_KERNEL);
if (!priv->rx_bd_v)
goto out;
@@ -1225,11 +1231,60 @@ static void *nixge_get_nvmem_address(struct device *dev)
return mac;
}
+/* Match table for of_platform binding */
+static const struct of_device_id nixge_dt_ids[] = {
+ { .compatible = "ni,xge-enet-2.00", .data = (void *)NIXGE_V2 },
+ { .compatible = "ni,xge-enet-3.00", .data = (void *)NIXGE_V3 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nixge_dt_ids);
+
+static int nixge_of_get_resources(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ enum nixge_version version;
+ struct resource *ctrlres;
+ struct resource *dmares;
+ struct net_device *ndev;
+ struct nixge_priv *priv;
+
+ ndev = platform_get_drvdata(pdev);
+ priv = netdev_priv(ndev);
+ of_id = of_match_node(nixge_dt_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -ENODEV;
+
+ version = (enum nixge_version)of_id->data;
+ if (version <= NIXGE_V2)
+ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ else
+ dmares = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dma");
+
+ priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
+ if (IS_ERR(priv->dma_regs)) {
+ netdev_err(ndev, "failed to map dma regs\n");
+ return PTR_ERR(priv->dma_regs);
+ }
+ if (version <= NIXGE_V2) {
+ priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
+ } else {
+ ctrlres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ctrl");
+ priv->ctrl_regs = devm_ioremap_resource(&pdev->dev, ctrlres);
+ }
+ if (IS_ERR(priv->ctrl_regs)) {
+ netdev_err(ndev, "failed to map ctrl regs\n");
+ return PTR_ERR(priv->ctrl_regs);
+ }
+ return 0;
+}
+
static int nixge_probe(struct platform_device *pdev)
{
+ struct device_node *mn, *phy_node;
struct nixge_priv *priv;
struct net_device *ndev;
- struct resource *dmares;
const u8 *mac_addr;
int err;
@@ -1261,14 +1316,9 @@ static int nixge_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
-
- dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
- if (IS_ERR(priv->dma_regs)) {
- netdev_err(ndev, "failed to map dma regs\n");
- return PTR_ERR(priv->dma_regs);
- }
- priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
+ err = nixge_of_get_resources(pdev);
+ if (err)
+ return err;
__nixge_hw_set_mac_address(ndev);
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
@@ -1286,10 +1336,14 @@ static int nixge_probe(struct platform_device *pdev)
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- err = nixge_mdio_setup(priv, pdev->dev.of_node);
- if (err) {
- netdev_err(ndev, "error registering mdio bus");
- goto free_netdev;
+ mn = of_get_child_by_name(pdev->dev.of_node, "mdio");
+ if (mn) {
+ err = nixge_mdio_setup(priv, mn);
+ of_node_put(mn);
+ if (err) {
+ netdev_err(ndev, "error registering mdio bus");
+ goto free_netdev;
+ }
}
priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
@@ -1299,23 +1353,33 @@ static int nixge_probe(struct platform_device *pdev)
goto unregister_mdio;
}
- priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (!priv->phy_node) {
- netdev_err(ndev, "not find \"phy-handle\" property\n");
- err = -EINVAL;
- goto unregister_mdio;
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(pdev->dev.of_node)) {
+ err = of_phy_register_fixed_link(pdev->dev.of_node);
+ if (err < 0) {
+ netdev_err(ndev, "broken fixed-link specification\n");
+ goto unregister_mdio;
+ }
+ phy_node = of_node_get(pdev->dev.of_node);
}
+ priv->phy_node = phy_node;
err = register_netdev(priv->ndev);
if (err) {
netdev_err(ndev, "register_netdev() error (%i)\n", err);
- goto unregister_mdio;
+ goto free_phy;
}
return 0;
+free_phy:
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
+ of_node_put(phy_node);
+
unregister_mdio:
- mdiobus_unregister(priv->mii_bus);
+ if (priv->mii_bus)
+ mdiobus_unregister(priv->mii_bus);
free_netdev:
free_netdev(ndev);
@@ -1330,20 +1394,18 @@ static int nixge_remove(struct platform_device *pdev)
unregister_netdev(ndev);
- mdiobus_unregister(priv->mii_bus);
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
+ of_node_put(priv->phy_node);
+
+ if (priv->mii_bus)
+ mdiobus_unregister(priv->mii_bus);
free_netdev(ndev);
return 0;
}
-/* Match table for of_platform binding */
-static const struct of_device_id nixge_dt_ids[] = {
- { .compatible = "ni,xge-enet-2.00", },
- {},
-};
-MODULE_DEVICE_TABLE(of, nixge_dt_ids);
-
static struct platform_driver nixge_driver = {
.probe = nixge_probe,
.remove = nixge_remove,
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index c662c6f5bee3..67bf02b0763a 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -630,7 +630,7 @@ static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
ether->skb = skb;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
return 0;
}
return -EAGAIN;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 43c0c10dfeb7..552d930e3940 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool =
- dma_zalloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
+ dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
if (!rx_ring->rx_buff_pool)
return -ENOMEM;
@@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
- tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
return -ENOMEM;
@@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
return -ENOMEM;
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
- rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
vfree(rx_ring->buffer_info);
return -ENOMEM;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index c9529c29a0a7..eee883a2aa8d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1337,7 +1337,7 @@ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance)
leXX_to_cpu(hmp->tx_ring[entry].addr),
skb->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 54224d1822e3..6f8d6584f809 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -925,7 +925,7 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
/* Free the original skb. */
pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
yp->tx_skbuff[entry] = NULL;
}
if (yp->tx_full &&
@@ -983,7 +983,7 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
pci_unmap_single(yp->pci_dev,
yp->tx_ring[entry<<1].addr, skb->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
yp->tx_skbuff[entry] = 0;
/* Mark status as empty. */
yp->tx_status[entry].tx_errs = 0;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 8a31a02c9f47..d21041554507 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
goto out_ring_desc;
- ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
- RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma, GFP_KERNEL);
+ ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
+ RX_RING_SIZE * sizeof(u64),
+ &ring->buf_dma, GFP_KERNEL);
if (!ring->buffers)
goto out_ring_desc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 24a90163775e..43a57ec296fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -53,7 +53,7 @@
extern const struct qed_common_ops qed_common_ops_pass;
#define QED_MAJOR_VERSION 8
-#define QED_MINOR_VERSION 33
+#define QED_MINOR_VERSION 37
#define QED_REVISION_VERSION 0
#define QED_ENGINEERING_VERSION 20
@@ -554,7 +554,6 @@ struct qed_hwfn {
u8 dp_level;
char name[NAME_SIZE];
- bool first_on_engine;
bool hw_init_done;
u8 num_funcs_on_engine;
@@ -754,6 +753,7 @@ struct qed_dev {
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
+ u8 num_ports;
u8 num_ports_in_engine;
u8 num_funcs_in_port;
@@ -805,6 +805,9 @@ struct qed_dev {
u32 mcp_nvm_resp;
+ /* Recovery */
+ bool recov_in_prog;
+
/* Linux specific here */
struct qede_dev *edev;
struct pci_dev *pdev;
@@ -890,7 +893,6 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_device_num_engines(struct qed_dev *cdev);
-int qed_device_get_port_id(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
@@ -937,6 +939,10 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
writel((u32)val, (void __iomem *)((u8 __iomem *)\
(cdev->doorbells) + (db_addr)))
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ qed_device_num_ports((_p_hwfn)->cdev))
+int qed_device_num_ports(struct qed_dev *cdev);
+
/* Prototypes */
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info);
@@ -944,6 +950,7 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
+void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index dc1c1b616084..e61d1d905415 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
u32 size = min_t(u32, total_size, psz);
void **p_virt = &p_mngr->t2[i].p_virt;
- *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
- size, &p_mngr->t2[i].p_phys,
- GFP_KERNEL);
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_mngr->t2[i].p_phys,
+ GFP_KERNEL);
if (!p_mngr->t2[i].p_virt) {
rc = -ENOMEM;
goto t2_fail;
@@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 size;
size = min_t(u32, sz_left, p_blk->real_size_in_page);
- p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
- &p_phys, GFP_KERNEL);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
@@ -2129,17 +2129,18 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
rdma_tasks);
/* no need for break since RoCE coexist with Ethernet */
}
+ /* fall through */
case QED_PCI_ETH:
{
struct qed_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- if (!p_params->num_vf_cons)
- p_params->num_vf_cons =
- ETH_PF_PARAMS_VF_CONS_DEFAULT;
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons,
- p_params->num_vf_cons);
+ if (!p_params->num_vf_cons)
+ p_params->num_vf_cons =
+ ETH_PF_PARAMS_VF_CONS_DEFAULT;
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons,
+ p_params->num_vf_cons);
p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break;
}
@@ -2306,9 +2307,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
goto out0;
}
- p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_blk->real_size_in_page, &p_phys,
- GFP_KERNEL);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page, &p_phys,
+ GFP_KERNEL);
if (!p_virt) {
rc = -ENOMEM;
goto out1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 8f6551421945..9df8c4b3b54e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
/* get pq index according to PQ_FLAGS */
static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
- u32 pq_flags)
+ unsigned long pq_flags)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
/* Can't have multiple flags set here */
- if (bitmap_weight((unsigned long *)&pq_flags,
+ if (bitmap_weight(&pq_flags,
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
- DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+ DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
goto err;
}
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
- DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
+ DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
goto err;
}
@@ -1959,11 +1959,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
(p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
- /* Cleanup chip from previous driver if such remains exist */
- rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc)
- return rc;
-
/* Sanity check before the PF init sequence that uses DMAE */
rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
if (rc)
@@ -2007,17 +2002,15 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 enable)
+int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_enable)
{
- u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
- /* Change PF in PXP */
- qed_wr(p_hwfn, p_ptt,
- PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+ /* Configure the PF's internal FID_enable for master transactions */
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
- /* wait until value is set - try for 1 second every 50us */
+ /* Wait until value is set - try for 1 second every 50us */
for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
val = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
@@ -2071,13 +2064,19 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ BIT(p_hwfn->abs_pf_id));
+}
+
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{
struct qed_load_req_params load_req_params;
u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
- int rc = 0, mfw_rc, i;
+ int rc = 0, i;
u16 ether_type;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
@@ -2092,7 +2091,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
}
for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ p_hwfn = &cdev->hwfns[i];
/* If management didn't provide a default, set one of our own */
if (!p_hwfn->hw_info.mtu) {
@@ -2105,9 +2104,6 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
continue;
}
- /* Enable DMAE in PXP */
- rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
-
rc = qed_calc_hw_mode(p_hwfn);
if (rc)
return rc;
@@ -2144,12 +2140,43 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
"Load request was sent. Load code: 0x%x\n",
load_code);
+ /* Only relevant for recovery:
+ * Clear the indication after LOAD_REQ is responded by the MFW.
+ */
+ cdev->recov_in_prog = false;
+
qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
- p_hwfn->first_on_engine = (load_code ==
- FW_MSG_CODE_DRV_LOAD_ENGINE);
+ /* Clean up chip from previous driver if such remains exist.
+ * This is not needed when the PF is the first one on the
+ * engine, since afterwards we are going to init the FW.
+ */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+ rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->rel_pf_id, false);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Final cleanup failed\n");
+ goto load_err;
+ }
+ }
+
+ /* Log and clear previous pglue_b errors if such exist */
+ qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Enable the PF's internal FID_enable in the PXP */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ true);
+ if (rc)
+ goto load_err;
+
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
@@ -2180,39 +2207,29 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
break;
}
- if (rc)
+ if (rc) {
DP_NOTICE(p_hwfn,
"init phase failed for loadcode 0x%x (rc %d)\n",
- load_code, rc);
+ load_code, rc);
+ goto load_err;
+ }
- /* ACK mfw regardless of success or failure of initialization */
- mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_LOAD_DONE,
- 0, &load_code, &param);
+ rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
return rc;
- if (mfw_rc) {
- DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
- return mfw_rc;
- }
-
- /* Check if there is a DID mismatch between nvm-cfg/efuse */
- if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
- DP_NOTICE(p_hwfn,
- "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn,
QED_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
- mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
- &load_code, &param);
- if (mfw_rc) {
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &resp, &param);
+ if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send DCBX attention request\n");
- return mfw_rc;
+ return rc;
}
p_hwfn->hw_init_done = true;
@@ -2261,6 +2278,12 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
}
return 0;
+
+load_err:
+ /* The MFW load lock should be released also when initialization fails.
+ */
+ qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ return rc;
}
#define QED_HW_STOP_RETRY_LIMIT (10)
@@ -2273,6 +2296,9 @@ static void qed_hw_timers_stop(struct qed_dev *cdev,
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ if (cdev->recov_in_prog)
+ return;
+
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
@@ -2335,12 +2361,14 @@ int qed_hw_stop(struct qed_dev *cdev)
p_hwfn->hw_init_done = false;
/* Send unload command to MCP */
- rc = qed_mcp_unload_req(p_hwfn, p_ptt);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed sending a UNLOAD_REQ command. rc = %d.\n",
- rc);
- rc2 = -EINVAL;
+ if (!cdev->recov_in_prog) {
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
qed_slowpath_irq_sync(p_hwfn);
@@ -2382,27 +2410,31 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
- qed_mcp_unload_done(p_hwfn, p_ptt);
- if (rc) {
- DP_NOTICE(p_hwfn,
- "Failed sending a UNLOAD_DONE command. rc = %d.\n",
- rc);
- rc2 = -EINVAL;
+ if (!cdev->recov_in_prog) {
+ rc = qed_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
}
- if (IS_PF(cdev)) {
+ if (IS_PF(cdev) && !cdev->recov_in_prog) {
p_hwfn = QED_LEADING_HWFN(cdev);
p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
+ /* Clear the PF's internal FID_enable in the PXP.
+ * In CMT this should only be done for first hw-function, and
+ * only after all transactions have stopped for all active
+ * hw-functions.
*/
- rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
if (rc) {
DP_NOTICE(p_hwfn,
- "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
rc2 = -EINVAL;
}
}
@@ -2502,9 +2534,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
- /* Clean Previous errors if such exist */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+ /* Clean previous pglue_b errors if such exist */
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -3238,55 +3269,43 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
-static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
-{
- u32 port_mode;
-
- port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
-
- if (port_mode < 3) {
- p_hwfn->cdev->num_ports_in_engine = 1;
- } else if (port_mode <= 5) {
- p_hwfn->cdev->num_ports_in_engine = 2;
- } else {
- DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
- p_hwfn->cdev->num_ports_in_engine);
-
- /* Default num_ports_in_engine to something */
- p_hwfn->cdev->num_ports_in_engine = 1;
- }
-}
-
-static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 port;
- int i;
-
- p_hwfn->cdev->num_ports_in_engine = 0;
+ u32 addr, global_offsize, global_addr, port_mode;
+ struct qed_dev *cdev = p_hwfn->cdev;
- for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
- port = qed_rd(p_hwfn, p_ptt,
- CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
- if (port & 1)
- p_hwfn->cdev->num_ports_in_engine++;
+ /* In CMT there is always only one port */
+ if (cdev->num_hwfns > 1) {
+ cdev->num_ports_in_engine = 1;
+ cdev->num_ports = 1;
+ return;
}
- if (!p_hwfn->cdev->num_ports_in_engine) {
- DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
-
- /* Default num_ports_in_engine to something */
- p_hwfn->cdev->num_ports_in_engine = 1;
+ /* Determine the number of ports per engine */
+ port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE);
+ switch (port_mode) {
+ case 0x0:
+ cdev->num_ports_in_engine = 1;
+ break;
+ case 0x1:
+ cdev->num_ports_in_engine = 2;
+ break;
+ case 0x2:
+ cdev->num_ports_in_engine = 4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode);
+ cdev->num_ports_in_engine = 1; /* Default to something */
+ break;
}
-}
-static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- if (QED_IS_BB(p_hwfn->cdev))
- qed_hw_info_port_num_bb(p_hwfn, p_ptt);
- else
- qed_hw_info_port_num_ah(p_hwfn, p_ptt);
+ /* Get the total number of ports of the device */
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + offsetof(struct public_global, max_ports);
+ cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr);
}
static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -3324,7 +3343,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
return rc;
}
- qed_hw_info_port_num(p_hwfn, p_ptt);
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_hw_info_port_num(p_hwfn, p_ptt);
qed_mcp_get_capabilities(p_hwfn, p_ptt);
@@ -3440,6 +3460,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_doorbells,
enum qed_pci_personality personality)
{
+ struct qed_dev *cdev = p_hwfn->cdev;
int rc = 0;
/* Split PCI bars evenly between hwfns */
@@ -3492,7 +3513,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Sending a mailbox to the MFW should be done after qed_get_hw_info()
* is called as it sets the ports number in an engine.
*/
- if (IS_LEAD_HWFN(p_hwfn)) {
+ if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) {
rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
@@ -4728,23 +4749,9 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
}
-int qed_device_num_engines(struct qed_dev *cdev)
-{
- return QED_IS_BB(cdev) ? 2 : 1;
-}
-
-static int qed_device_num_ports(struct qed_dev *cdev)
-{
- /* in CMT always only one port */
- if (cdev->num_hwfns > 1)
- return 1;
-
- return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
-}
-
-int qed_device_get_port_id(struct qed_dev *cdev)
+int qed_device_num_ports(struct qed_dev *cdev)
{
- return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
+ return cdev->num_ports;
}
void qed_set_fw_mac_addr(__le16 *fw_msb,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index acccd85170aa..e4b4e3b78e8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -473,6 +473,18 @@ int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
/**
+ * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_enable - true/false
+ *
+ * @return int
+ */
+int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_enable);
+
+/**
* @brief db_recovery_add - add doorbell information to the doorbell
* recovery mechanism.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index b13cfb449d8f..37edaa847512 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12796,6 +12796,7 @@ struct public_drv_mb {
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
/* get MFW feature support response */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
@@ -12827,7 +12828,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LLDP_DATA_UPDATED,
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_RESERVED4,
+ MFW_DRV_MSG_ERROR_RECOVERY,
MFW_DRV_MSG_BW_UPDATE,
MFW_DRV_MSG_S_TAG_UPDATE,
MFW_DRV_MSG_GET_LAN_STATS,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 70504dcf4087..72ec1c6bdf70 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -703,6 +703,17 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_status = 0;
u32 offset = 0;
+ if (p_hwfn->cdev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
+ src_addr, src_type, dst_addr, dst_type,
+ size_in_dwords);
+
+ /* Let the flow complete w/o any error handling */
+ return 0;
+ }
+
qed_dmae_opcode(p_hwfn,
(src_type == QED_DMAE_ADDRESS_GRC),
(dst_type == QED_DMAE_ADDRESS_GRC),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 92340919d852..e23980e301b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -255,112 +255,114 @@ out:
#define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
-static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
u32 tmp;
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & PGLUE_ATTENTION_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
- details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal write by chip to [%08x:%08x] blocked.\n"
- "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
- "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
- GET_FIELD(details,
- PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
- tmp,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ DP_NOTICE(p_hwfn,
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
}
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
if (tmp & PGLUE_ATTENTION_RD_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
- details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal read by chip from [%08x:%08x] blocked.\n"
- " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
- " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
- GET_FIELD(details,
- PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
- tmp,
- GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0,
- GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
- GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0);
+ DP_NOTICE(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
}
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
if (tmp & PGLUE_ATTENTION_ICPL_VALID)
- DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+ DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
u32 addr_hi, addr_lo;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
- DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
- tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
}
- tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
if (tmp & PGLUE_ATTENTION_ILT_VALID) {
u32 addr_hi, addr_lo, details;
- addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
- addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
- details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_DETAILS);
- DP_INFO(p_hwfn,
- "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
- details, tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
}
/* Clear the indications */
- qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2));
return 0;
}
+static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+}
+
#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
@@ -540,7 +542,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{"PGLUE misc_flr", ATTENTION_SINGLE,
NULL, MAX_BLOCK_ID},
{"PGLUE B RBC", ATTENTION_PAR_INT,
- qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
+ qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
{"PGLUE misc_mctp", ATTENTION_SINGLE,
NULL, MAX_BLOCK_ID},
{"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index d81a62ebd524..1f356ed4f761 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -431,4 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index beb8e5d6401a..ded556b7bab5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
+ if (!ether_addr_equal(ethh->h_dest,
+ p_hwfn->p_rdma_info->iwarp.mac_addr)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "Got unexpected mac %pM instead of %pM\n",
+ ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ return -EINVAL;
+ }
+
ether_addr_copy(remote_mac_addr, ethh->h_source);
ether_addr_copy(local_mac_addr, ethh->h_dest);
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
- u32 mpa_buff_size;
+ u32 buff_size;
u16 n_ooo_bufs;
int rc = 0;
int i;
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
- data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
+ data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
goto err;
}
+ buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
QED_IWARP_LL2_SYN_RX_SIZE,
- QED_IWARP_MAX_SYN_PKT_SIZE,
+ buff_size,
iwarp_info->ll2_syn_handle);
if (rc)
goto err;
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
- mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
data.input.rx_num_desc,
- mpa_buff_size,
+ buff_size,
iwarp_info->ll2_mpa_handle);
if (rc)
goto err;
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
- iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
+ iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
if (!iwarp_info->mpa_intermediate_buf)
goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index b8f612d00241..7ac959038324 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_LL2_SYN_TX_SIZE (128)
#define QED_IWARP_LL2_SYN_RX_SIZE (256)
-#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define QED_IWARP_MAX_OOO (16)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 67c02ea93906..57641728df69 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
!!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & QED_ACCEPT_BCAST));
@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
return rc;
}
+ if (p_params->update_ctl_frame_check) {
+ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+ }
+
/* Update mcast bins for VFs, PF doesn't use this functionality */
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
@@ -1889,6 +1898,7 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
: NULL;
+ bool b_get_port_stats;
if (IS_PF(cdev)) {
/* The main vport index is relative first */
@@ -1903,8 +1913,9 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
continue;
}
+ b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn);
__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
- IS_PF(cdev) ? true : false);
+ b_get_port_stats);
out:
if (IS_PF(cdev) && p_ptt)
@@ -2207,7 +2218,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
u16 num_queues = 0;
/* Since the feature controls only queue-zones,
- * make sure we have the contexts [rx, tx, xdp] to
+ * make sure we have the contexts [rx, xdp, tcs] to
* match.
*/
for_each_hwfn(cdev, i) {
@@ -2217,7 +2228,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
u16 cids;
cids = hwfn->pf_params.eth_pf_params.num_cons;
- num_queues += min_t(u16, l2_queues, cids / 3);
+ cids /= (2 + info->num_tc);
+ num_queues += min_t(u16, l2_queues, cids);
}
/* queues might theoretically be >256, but interrupts'
@@ -2688,7 +2700,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
QED_ACCEPT_MCAST_UNMATCHED;
- accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
@@ -2860,7 +2873,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
p_hwfn = p_cid->p_owner;
rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
if (rc)
- DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "Unable to read queue coalescing\n");
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8d80f1095d17..7127d5aaac42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
+ u8 update_ctl_frame_check;
+ u8 mac_chk_en;
+ u8 ethtype_chk_en;
};
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 90afd514ffe1..b5f419b71287 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1619,6 +1619,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
rx_prod.bd_prod = cpu_to_le16(bd_prod);
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+
+ /* Make sure chain element is updated before ringing the doorbell */
+ dma_wmb();
+
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
}
@@ -2447,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
{
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
+ u8 flags = 0, nr_frags;
int rc = -EINVAL, i;
dma_addr_t mapping;
u16 vlan = 0;
- u8 flags = 0;
if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
return -EINVAL;
}
- if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ /* Cache number of fragments from SKB since SKB may be freed by
+ * the completion routine after calling qed_ll2_prepare_tx_packet()
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
- 1 + skb_shinfo(skb)->nr_frags);
+ 1 + nr_frags);
return -EINVAL;
}
@@ -2481,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
}
memset(&pkt, 0, sizeof(pkt));
- pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+ pkt.num_of_bds = 1 + nr_frags;
pkt.vlan = vlan;
pkt.bd_flags = flags;
pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2492,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
pkt.remove_stag = true;
+ /* qed_ll2_prepare_tx_packet() may actually send the packet if
+ * there are no fragments in the skb and subsequently the completion
+ * routine may run and free the SKB, so no dereferencing the SKB
+ * beyond this point unless skb has any fragments.
+ */
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
&pkt, 1);
if (rc)
goto err;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 6adf5bda9811..f164d4acebcb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -281,6 +281,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
dev_info->wol_support = true;
+ dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
+
dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
@@ -359,6 +361,8 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
qed_init_dp(cdev, params->dp_module, params->dp_level);
+ cdev->recov_in_prog = params->recov_in_prog;
+
rc = qed_init_pci(cdev, pdev);
if (rc) {
DP_ERR(cdev, "init pci failed\n");
@@ -2203,6 +2207,15 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
return qed_mcp_get_nvm_image(hwfn, type, buf, len);
}
+void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+
+ if (ops && ops->schedule_recovery_handler)
+ ops->schedule_recovery_handler(cookie);
+}
+
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
@@ -2226,6 +2239,23 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
+static int qed_recovery_process(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ rc = qed_start_recovery_process(p_hwfn, p_ptt);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
static int qed_update_wol(struct qed_dev *cdev, bool enabled)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -2380,6 +2410,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.nvm_get_image = &qed_nvm_get_image,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
+ .recovery_process = &qed_recovery_process,
+ .recovery_prolog = &qed_recovery_prolog,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index e7f18e34ff0d..cc27fd60d689 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1070,6 +1070,27 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return 0;
+}
+
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params mb_params;
@@ -1528,6 +1549,60 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
return 0;
}
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
+ path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
+
+ proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path, process_kill)) &
+ PROCESS_KILL_COUNTER_MASK;
+
+ return proc_kill_cnt;
+}
+
+static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 proc_kill_cnt;
+
+ /* Prevent possible attentions/interrupts during the recovery handling
+ * and till its load phase, during which they will be re-enabled.
+ */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ DP_NOTICE(p_hwfn, "Received a process kill indication\n");
+
+ /* The following operations should be done once, and thus in CMT mode
+ * are carried out by only the first HW function.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(cdev))
+ return;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Ignoring the indication since a recovery process is already in progress\n");
+ return;
+ }
+
+ cdev->recov_in_prog = true;
+
+ proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
+
+ qed_schedule_recovery_handler(p_hwfn);
+}
+
static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum MFW_DRV_MSG_TYPE type)
@@ -1758,6 +1833,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_ERROR_RECOVERY:
+ qed_mcp_handle_process_kill(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_GET_LAN_STATS:
case MFW_DRV_MSG_GET_FCOE_STATS:
case MFW_DRV_MSG_GET_ISCSI_STATS:
@@ -2303,6 +2381,43 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Avoid triggering a recovery since such a process is already in progress\n");
+ return -EAGAIN;
+ }
+
+ DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+ return 0;
+}
+
+#define QED_RECOVERY_PROLOG_SLEEP_MS 100
+
+int qed_recovery_prolog(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ int rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
static int
qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num)
@@ -3539,6 +3654,12 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
}
}
+bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
+}
+
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index eddf67798d6f..261c1a392e2c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -441,6 +441,38 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_mcp_drv_version *p_ver);
/**
+ * @brief Read the MFW process kill counter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Trigger a recovery process
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief A recovery handler must call this function as its first step.
+ * It is assumed that the handler is not run from an interrupt context.
+ *
+ * @param cdev
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_recovery_prolog(struct qed_dev *cdev);
+
+/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
@@ -659,10 +691,6 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engine * \
- qed_device_num_engines((_p_hwfn)->cdev)))
-
struct qed_mcp_info {
/* List for mailbox commands which were sent and wait for a response */
struct list_head cmd_list;
@@ -801,6 +829,16 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_load_req_params *p_params);
/**
+ * @brief Sends a LOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
@@ -1106,6 +1144,16 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock,
enum qed_resc_lock
resource, bool b_is_permanent);
+
+/**
+ * @brief - Return whether management firmware support smart AN
+ *
+ * @param p_hwfn
+ *
+ * @return bool - true if feature is supported.
+ */
+bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
+
/**
* @brief Learn of supported MFW features; To be done during early init
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 5a90d69dc2f8..1302b308bd87 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -47,7 +47,7 @@
static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
{
- switch (qed_device_get_port_id(p_hwfn->cdev)) {
+ switch (MFW_PORT(p_hwfn)) {
case 0:
return QED_RESC_LOCK_PTP_PORT0;
case 1:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 8939ed6e08b7..5ce825ca5f24 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -518,6 +518,8 @@
0x180824UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
+#define MISC_REG_AEU_GENERAL_ATTN_35 \
+ 0x00848cUL
#define CAU_REG_SB_ADDR_MEMORY \
0x1c8000UL
#define CAU_REG_SB_VAR_MEMORY \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 4179c9013fc6..96ab77ae6af5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
* @param p_hwfn
*/
void qed_consq_free(struct qed_hwfn *p_hwfn);
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
/**
* @file
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 888274fa208b..5a495fda9e9d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ p_ent->ramrod.pf_update.mf_vlan |=
+ cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
return qed_spq_post(p_hwfn, p_ent, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index eb88bbc6b193..79b311b86f66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+ /* Attempt to post pending requests */
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ rc = qed_spq_pend_post(p_hwfn);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+
return rc;
}
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
return 0;
}
-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
@@ -790,6 +795,17 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
+static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ if (!fw_return_code)
+ return;
+
+ if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
+ p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
+ *fw_return_code = RDMA_RETURN_OK;
+}
+
/* Avoid overriding of SPQ entries when getting out-of-order completions, by
* marking the completions in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
@@ -825,6 +841,17 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+ if (p_hwfn->cdev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+
+ /* Let the flow complete w/o any error handling */
+ qed_spq_recov_set_ret_code(p_ent, fw_return_code);
+ return 0;
+ }
+
/* Complete the entry */
rc = qed_spq_fill_entry(p_hwfn, p_ent);
@@ -905,7 +932,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_spq_entry *tmp;
struct qed_spq_entry *found = NULL;
- int rc;
if (!p_hwfn)
return -EINVAL;
@@ -963,12 +989,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
*/
qed_spq_return_entry(p_hwfn, found);
- /* Attempt to post pending requests */
- spin_lock_bh(&p_spq->lock);
- rc = qed_spq_pend_post(p_hwfn);
- spin_unlock_bh(&p_spq->lock);
-
- return rc;
+ return 0;
}
int qed_consq_alloc(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ca6290fa0f30..9faaa6df78ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
- params.check_mac = true;
+
+ /* Non trusted VFs should enable control frame filtering */
+ params.check_mac = !vf->p_vf_info.is_trusted_configured;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc) {
@@ -4447,6 +4449,13 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
pci_disable_sriov(cdev->pdev);
+ if (cdev->recov_in_prog) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Skip SRIOV disable operations in the device since a recovery is in progress\n");
+ goto out;
+ }
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
@@ -4486,7 +4495,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
qed_ptt_release(hwfn, ptt);
}
-
+out:
qed_iov_set_vfs_to_disable(cdev, false);
return 0;
@@ -5130,6 +5139,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
+ params.update_ctl_frame_check = 1;
+ params.mac_chk_en = !vf_info->is_trusted_configured;
+
if (vf_info->rx_accept_mode & mask) {
flags->update_rx_mode_config = 1;
flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5159,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
}
if (flags->update_rx_mode_config ||
- flags->update_tx_mode_config)
+ flags->update_tx_mode_config ||
+ params.update_ctl_frame_check)
qed_sp_vport_update(hwfn, &params,
QED_SPQ_MODE_EBLOCK, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index b6cccf44bf40..5dda547772c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct vf_pf_resc_request *p_resc;
+ u8 retry_cnt = VF_ACQUIRE_THRESH;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ /* Re-try acquire in case of vf-pf hw channel timeout */
+ if (retry_cnt && rc == -EBUSY) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF retrying to acquire due to VPC timeout\n");
+ retry_cnt--;
+ continue;
+ }
+
if (rc)
goto exit;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 613249d1e967..63a78162cfaf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -56,7 +56,7 @@
#include <net/tc_act/tc_gact.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 33
+#define QEDE_MINOR_VERSION 37
#define QEDE_REVISION_VERSION 0
#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -162,6 +162,7 @@ struct qede_rdma_dev {
struct list_head entry;
struct list_head rdma_event_list;
struct workqueue_struct *rdma_wq;
+ bool exp_recovery;
};
struct qede_ptp;
@@ -264,6 +265,7 @@ struct qede_dev {
enum QEDE_STATE {
QEDE_STATE_CLOSED,
QEDE_STATE_OPEN,
+ QEDE_STATE_RECOVERY,
};
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
@@ -462,6 +464,7 @@ struct qede_fastpath {
#define QEDE_CSUM_UNNECESSARY BIT(1)
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
+#define QEDE_SP_RECOVERY 0
#define QEDE_SP_RX_MODE 1
#ifdef CONFIG_RFS_ACCEL
@@ -494,6 +497,9 @@ struct qede_reload_args {
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 16331c6c6fa7..c6238083e898 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -186,11 +186,13 @@ static const struct {
enum {
QEDE_PRI_FLAG_CMT,
+ QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
QEDE_PRI_FLAG_LEN,
};
static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
+ "SmartAN capable",
};
enum qede_ethtool_tests {
@@ -404,8 +406,15 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
static u32 qede_get_priv_flags(struct net_device *dev)
{
struct qede_dev *edev = netdev_priv(dev);
+ u32 flags = 0;
- return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+ if (edev->dev_info.common.num_hwfns > 1)
+ flags |= BIT(QEDE_PRI_FLAG_CMT);
+
+ if (edev->dev_info.common.smart_an)
+ flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
+
+ return flags;
}
struct qede_link_mode_mapping {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index b16ce7d93caf..add922b93d2c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1665,198 +1665,6 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
return 0;
}
-static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- if ((fs->h_u.tcp_ip4_spec.ip4src &
- fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) {
- DP_INFO(edev, "Don't support IP-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip4_spec.ip4dst &
- fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) {
- DP_INFO(edev, "Don't support IP-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip4_spec.psrc &
- fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip4_spec.pdst &
- fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if (fs->h_u.tcp_ip4_spec.tos) {
- DP_INFO(edev, "Don't support tos\n");
- return -EOPNOTSUPP;
- }
-
- t->eth_proto = htons(ETH_P_IP);
- t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
- t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
- t->src_port = fs->h_u.tcp_ip4_spec.psrc;
- t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
-
- return qede_set_v4_tuple_to_profile(edev, t);
-}
-
-static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_TCP;
-
- if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_UDP;
-
- if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- struct in6_addr zero_addr;
-
- memset(&zero_addr, 0, sizeof(zero_addr));
-
- if ((fs->h_u.tcp_ip6_spec.psrc &
- fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip6_spec.pdst &
- fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if (fs->h_u.tcp_ip6_spec.tclass) {
- DP_INFO(edev, "Don't support tclass\n");
- return -EOPNOTSUPP;
- }
-
- t->eth_proto = htons(ETH_P_IPV6);
- memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
- sizeof(struct in6_addr));
- memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst,
- sizeof(struct in6_addr));
- t->src_port = fs->h_u.tcp_ip6_spec.psrc;
- t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
-
- return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
-}
-
-static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_TCP;
-
- if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_UDP;
-
- if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- memset(t, 0, sizeof(*t));
-
- if (qede_flow_spec_validate_unused(edev, fs))
- return -EOPNOTSUPP;
-
- switch ((fs->flow_type & ~FLOW_EXT)) {
- case TCP_V4_FLOW:
- return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
- case UDP_V4_FLOW:
- return qede_flow_spec_to_tuple_udpv4(edev, t, fs);
- case TCP_V6_FLOW:
- return qede_flow_spec_to_tuple_tcpv6(edev, t, fs);
- case UDP_V6_FLOW:
- return qede_flow_spec_to_tuple_udpv6(edev, t, fs);
- default:
- DP_VERBOSE(edev, NETIF_MSG_IFUP,
- "Can't support flow of type %08x\n", fs->flow_type);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int qede_flow_spec_validate(struct qede_dev *edev,
- struct ethtool_rx_flow_spec *fs,
- struct qede_arfs_tuple *t)
-{
- if (fs->location >= QEDE_RFS_MAX_FLTR) {
- DP_INFO(edev, "Location out-of-bounds\n");
- return -EINVAL;
- }
-
- /* Check location isn't already in use */
- if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) {
- DP_INFO(edev, "Location already in use\n");
- return -EINVAL;
- }
-
- /* Check if the filtering-mode could support the filter */
- if (edev->arfs->filter_count &&
- edev->arfs->mode != t->mode) {
- DP_INFO(edev,
- "flow_spec would require filtering mode %08x, but %08x is configured\n",
- t->mode, edev->arfs->filter_count);
- return -EINVAL;
- }
-
- /* If drop requested then no need to validate other data */
- if (fs->ring_cookie == RX_CLS_FLOW_DISC)
- return 0;
-
- if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
- return 0;
-
- if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
- DP_INFO(edev, "Queue out-of-bounds\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/* Must be called while qede lock is held */
static struct qede_arfs_fltr_node *
qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
@@ -1896,72 +1704,6 @@ static void qede_flow_set_destination(struct qede_dev *edev,
"Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
}
-int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
-{
- struct ethtool_rx_flow_spec *fsp = &info->fs;
- struct qede_arfs_fltr_node *n;
- struct qede_arfs_tuple t;
- int min_hlen, rc;
-
- __qede_lock(edev);
-
- if (!edev->arfs) {
- rc = -EPERM;
- goto unlock;
- }
-
- /* Translate the flow specification into something fittign our DB */
- rc = qede_flow_spec_to_tuple(edev, &t, fsp);
- if (rc)
- goto unlock;
-
- /* Make sure location is valid and filter isn't already set */
- rc = qede_flow_spec_validate(edev, fsp, &t);
- if (rc)
- goto unlock;
-
- if (qede_flow_find_fltr(edev, &t)) {
- rc = -EINVAL;
- goto unlock;
- }
-
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n) {
- rc = -ENOMEM;
- goto unlock;
- }
-
- min_hlen = qede_flow_get_min_header_size(&t);
- n->data = kzalloc(min_hlen, GFP_KERNEL);
- if (!n->data) {
- kfree(n);
- rc = -ENOMEM;
- goto unlock;
- }
-
- n->sw_id = fsp->location;
- set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
- n->buf_len = min_hlen;
-
- memcpy(&n->tuple, &t, sizeof(n->tuple));
-
- qede_flow_set_destination(edev, n, fsp);
-
- /* Build a minimal header according to the flow */
- n->tuple.build_hdr(&n->tuple, n->data);
-
- rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
- if (rc)
- goto unlock;
-
- qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
- rc = qede_poll_arfs_filter_config(edev, n);
-unlock:
- __qede_unlock(edev);
-
- return rc;
-}
-
int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{
struct qede_arfs_fltr_node *fltr = NULL;
@@ -2004,190 +1746,172 @@ unlock:
}
static int qede_parse_actions(struct qede_dev *edev,
- struct tcf_exts *exts)
+ struct flow_action *flow_action)
{
- int rc = -EINVAL, num_act = 0, i;
- const struct tc_action *a;
- bool is_drop = false;
+ const struct flow_action_entry *act;
+ int i;
- if (!tcf_exts_has_actions(exts)) {
- DP_NOTICE(edev, "No tc actions received\n");
- return rc;
+ if (!flow_action_has_entries(flow_action)) {
+ DP_NOTICE(edev, "No actions received\n");
+ return -EINVAL;
}
- tcf_exts_for_each_action(i, a, exts) {
- num_act++;
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ break;
+ case FLOW_ACTION_QUEUE:
+ if (act->queue.vf)
+ break;
- if (is_tcf_gact_shot(a))
- is_drop = true;
+ if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
+ DP_INFO(edev, "Queue out-of-bounds\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
}
- if (num_act == 1 && is_drop)
- return 0;
-
- return rc;
+ return 0;
}
static int
-qede_tc_parse_ports(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
- struct qede_arfs_tuple *t)
-{
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
-
- if ((key->src && mask->src != U16_MAX) ||
- (key->dst && mask->dst != U16_MAX)) {
+qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *t)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if ((match.key->src && match.mask->src != U16_MAX) ||
+ (match.key->dst && match.mask->dst != U16_MAX)) {
DP_NOTICE(edev, "Do not support ports masks\n");
return -EINVAL;
}
- t->src_port = key->src;
- t->dst_port = key->dst;
+ t->src_port = match.key->src;
+ t->dst_port = match.key->dst;
}
return 0;
}
static int
-qede_tc_parse_v6_common(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *t)
{
struct in6_addr zero_addr, addr;
memset(&zero_addr, 0, sizeof(addr));
memset(&addr, 0xff, sizeof(addr));
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- struct flow_dissector_key_ipv6_addrs *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
- if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
- memcmp(&mask->src, &addr, sizeof(addr))) ||
- (memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
- memcmp(&mask->dst, &addr, sizeof(addr)))) {
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
+ memcmp(&match.mask->src, &addr, sizeof(addr))) ||
+ (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
+ memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
DP_NOTICE(edev,
"Do not support IPv6 address prefix/mask\n");
return -EINVAL;
}
- memcpy(&t->src_ipv6, &key->src, sizeof(addr));
- memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
+ memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
+ memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
}
- if (qede_tc_parse_ports(edev, f, t))
+ if (qede_flow_parse_ports(edev, rule, t))
return -EINVAL;
return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}
static int
-qede_tc_parse_v4_common(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *t)
{
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- struct flow_dissector_key_ipv4_addrs *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
- if ((key->src && mask->src != U32_MAX) ||
- (key->dst && mask->dst != U32_MAX)) {
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if ((match.key->src && match.mask->src != U32_MAX) ||
+ (match.key->dst && match.mask->dst != U32_MAX)) {
DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
return -EINVAL;
}
- t->src_ipv4 = key->src;
- t->dst_ipv4 = key->dst;
+ t->src_ipv4 = match.key->src;
+ t->dst_ipv4 = match.key->dst;
}
- if (qede_tc_parse_ports(edev, f, t))
+ if (qede_flow_parse_ports(edev, rule, t))
return -EINVAL;
return qede_set_v4_tuple_to_profile(edev, t);
}
static int
-qede_tc_parse_tcp_v6(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_tc_parse_v6_common(edev, f, tuple);
+ return qede_flow_parse_v6_common(edev, rule, tuple);
}
static int
-qede_tc_parse_tcp_v4(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_tc_parse_v4_common(edev, f, tuple);
+ return qede_flow_parse_v4_common(edev, rule, tuple);
}
static int
-qede_tc_parse_udp_v6(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_tc_parse_v6_common(edev, f, tuple);
+ return qede_flow_parse_v6_common(edev, rule, tuple);
}
static int
-qede_tc_parse_udp_v4(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_tc_parse_v4_common(edev, f, tuple);
+ return qede_flow_parse_v4_common(edev, rule, tuple);
}
static int
-qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
- struct tc_cls_flower_offload *f,
- struct qede_arfs_tuple *tuple)
+qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
+ struct flow_rule *rule, struct qede_arfs_tuple *tuple)
{
+ struct flow_dissector *dissector = rule->match.dissector;
int rc = -EINVAL;
u8 ip_proto = 0;
memset(tuple, 0, sizeof(*tuple));
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
DP_NOTICE(edev, "Unsupported key set:0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -2197,25 +1921,23 @@ qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
return -EPROTONOSUPPORT;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- ip_proto = key->ip_proto;
+ flow_rule_match_basic(rule, &match);
+ ip_proto = match.key->ip_proto;
}
if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
- rc = qede_tc_parse_tcp_v4(edev, f, tuple);
+ rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
- rc = qede_tc_parse_tcp_v6(edev, f, tuple);
+ rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
- rc = qede_tc_parse_udp_v4(edev, f, tuple);
+ rc = qede_flow_parse_udp_v4(edev, rule, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
- rc = qede_tc_parse_udp_v6(edev, f, tuple);
+ rc = qede_flow_parse_udp_v6(edev, rule, tuple);
else
- DP_NOTICE(edev, "Invalid tc protocol request\n");
+ DP_NOTICE(edev, "Invalid protocol request\n");
return rc;
}
@@ -2235,7 +1957,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flower_attr(edev, proto, f, &t))
+ if (qede_parse_flow_attr(edev, proto, f->rule, &t))
goto unlock;
/* Validate profile mode and number of filters */
@@ -2248,7 +1970,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, f->exts))
+ if (qede_parse_actions(edev, &f->rule->action))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -2290,3 +2012,141 @@ unlock:
__qede_unlock(edev);
return rc;
}
+
+static int qede_flow_spec_validate(struct qede_dev *edev,
+ struct flow_action *flow_action,
+ struct qede_arfs_tuple *t,
+ __u32 location)
+{
+ if (location >= QEDE_RFS_MAX_FLTR) {
+ DP_INFO(edev, "Location out-of-bounds\n");
+ return -EINVAL;
+ }
+
+ /* Check location isn't already in use */
+ if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
+ DP_INFO(edev, "Location already in use\n");
+ return -EINVAL;
+ }
+
+ /* Check if the filtering-mode could support the filter */
+ if (edev->arfs->filter_count &&
+ edev->arfs->mode != t->mode) {
+ DP_INFO(edev,
+ "flow_spec would require filtering mode %08x, but %08x is configured\n",
+ t->mode, edev->arfs->filter_count);
+ return -EINVAL;
+ }
+
+ if (qede_parse_actions(edev, flow_action))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_rule(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_rx_flow_spec_input input = {};
+ struct ethtool_rx_flow_rule *flow;
+ __be16 proto;
+ int err = 0;
+
+ if (qede_flow_spec_validate_unused(edev, fs))
+ return -EOPNOTSUPP;
+
+ switch ((fs->flow_type & ~FLOW_EXT)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ proto = htons(ETH_P_IP);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Can't support flow of type %08x\n", fs->flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /* Make sure location is valid and filter isn't already set */
+ err = qede_flow_spec_validate(edev, &flow->rule->action, t,
+ fs->location);
+err_out:
+ ethtool_rx_flow_rule_destroy(flow);
+ return err;
+
+}
+
+int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct ethtool_rx_flow_spec *fsp = &info->fs;
+ struct qede_arfs_fltr_node *n;
+ struct qede_arfs_tuple t;
+ int min_hlen, rc;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ /* Translate the flow specification into something fittign our DB */
+ rc = qede_flow_spec_to_rule(edev, &t, fsp);
+ if (rc)
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ min_hlen = qede_flow_get_min_header_size(&t);
+ n->data = kzalloc(min_hlen, GFP_KERNEL);
+ if (!n->data) {
+ kfree(n);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ n->sw_id = fsp->location;
+ set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
+ n->buf_len = min_hlen;
+
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
+
+ qede_flow_set_destination(edev, n, fsp);
+
+ /* Build a minimal header according to the flow */
+ n->tuple.build_hdr(&n->tuple, n->data);
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+ if (rc)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+ rc = qede_poll_arfs_filter_config(edev, n);
+unlock:
+ __qede_unlock(edev);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index bdf816fe5a16..31b046e24565 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int total_txq;
+
+ total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
+
+ return QEDE_TSS_COUNT(edev) ?
+ fallback(dev, skb, NULL) % total_txq : 0;
+}
+
/* 8B udp header + 8B base tunnel header + 32B option length */
#define QEDE_MAX_TUN_HDR_LEN 48
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 5a74fcbdbc2b..02a97c659e29 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -133,23 +133,12 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
+static void qede_schedule_recovery_handler(void *dev);
+static void qede_recovery_handler(struct qede_dev *edev);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-/* The qede lock is used to protect driver state change and driver flows that
- * are not reentrant.
- */
-void __qede_lock(struct qede_dev *edev)
-{
- mutex_lock(&edev->qede_lock);
-}
-
-void __qede_unlock(struct qede_dev *edev)
-{
- mutex_unlock(&edev->qede_lock);
-}
-
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -231,6 +220,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
.arfs_filter_op = qede_arfs_filter_op,
#endif
.link_update = qede_link_update,
+ .schedule_recovery_handler = qede_schedule_recovery_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -631,6 +621,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -666,6 +657,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -684,6 +676,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -950,11 +943,57 @@ err:
return -ENOMEM;
}
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+ mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* This version of the lock should be used when acquiring the RTNL lock is also
+ * needed in addition to the internal qede lock.
+ */
+void qede_lock(struct qede_dev *edev)
+{
+ rtnl_lock();
+ __qede_lock(edev);
+}
+
+void qede_unlock(struct qede_dev *edev)
+{
+ __qede_unlock(edev);
+ rtnl_unlock();
+}
+
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ /* The locking scheme depends on the specific flag:
+ * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
+ * ensure that ongoing flows are ended and new ones are not started.
+ * In other cases - only the internal qede lock should be acquired.
+ */
+
+ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ qede_lock(edev);
+ qede_recovery_handler(edev);
+ qede_unlock(edev);
+ }
+
__qede_lock(edev);
if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
@@ -1031,6 +1070,7 @@ static void qede_log_probe(struct qede_dev *edev)
enum qede_probe_mode {
QEDE_PROBE_NORMAL,
+ QEDE_PROBE_RECOVERY,
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
@@ -1051,6 +1091,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
probe_params.dp_module = dp_module;
probe_params.dp_level = dp_level;
probe_params.is_vf = is_vf;
+ probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
@@ -1078,11 +1119,20 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (rc)
goto err2;
- edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
- dp_level);
- if (!edev) {
- rc = -ENOMEM;
- goto err2;
+ if (mode != QEDE_PROBE_RECOVERY) {
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+ } else {
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ edev = netdev_priv(ndev);
+ edev->cdev = cdev;
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
}
if (is_vf)
@@ -1090,28 +1140,31 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev);
- rc = qede_rdma_dev_add(edev);
+ rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
if (rc)
goto err3;
- /* Prepare the lock prior to the registration of the netdev,
- * as once it's registered we might reach flows requiring it
- * [it's even possible to reach a flow needing it directly
- * from there, although it's unlikely].
- */
- INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
- mutex_init(&edev->qede_lock);
- rc = register_netdev(edev->ndev);
- if (rc) {
- DP_NOTICE(edev, "Cannot register net-device\n");
- goto err4;
+ if (mode != QEDE_PROBE_RECOVERY) {
+ /* Prepare the lock prior to the registration of the netdev,
+ * as once it's registered we might reach flows requiring it
+ * [it's even possible to reach a flow needing it directly
+ * from there, although it's unlikely].
+ */
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err4;
+ }
}
edev->ops->common->set_name(cdev, edev->ndev->name);
/* PTP not supported on VFs */
if (!is_vf)
- qede_ptp_enable(edev, true);
+ qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -1126,7 +1179,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
err4:
- qede_rdma_dev_remove(edev);
+ qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
err3:
free_netdev(edev->ndev);
err2:
@@ -1162,6 +1215,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
enum qede_remove_mode {
QEDE_REMOVE_NORMAL,
+ QEDE_REMOVE_RECOVERY,
};
static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
@@ -1172,15 +1226,19 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
- qede_rdma_dev_remove(edev);
- unregister_netdev(ndev);
- cancel_delayed_work_sync(&edev->sp_task);
+ qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
- qede_ptp_disable(edev);
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ unregister_netdev(ndev);
- edev->ops->common->set_power_state(cdev, PCI_D0);
+ cancel_delayed_work_sync(&edev->sp_task);
- pci_set_drvdata(pdev, NULL);
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+ }
+
+ qede_ptp_disable(edev);
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
@@ -1194,7 +1252,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
- free_netdev(ndev);
+ if (mode != QEDE_REMOVE_RECOVERY)
+ free_netdev(ndev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@@ -1539,6 +1598,58 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
return 0;
}
+static void qede_empty_tx_queue(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct netdev_queue *netdev_txq;
+ int rc, len = 0;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
+
+ while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl)) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev,
+ "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index,
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+}
+
+static void qede_empty_tx_queues(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_queue(i)
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_fastpath *fp;
+
+ fp = &edev->fp_array[i];
+ qede_empty_tx_queue(edev,
+ &fp->txq[cos]);
+ }
+ }
+}
+
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
@@ -2053,6 +2164,7 @@ out:
enum qede_unload_mode {
QEDE_UNLOAD_NORMAL,
+ QEDE_UNLOAD_RECOVERY,
};
static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
@@ -2068,7 +2180,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
- edev->state = QEDE_STATE_CLOSED;
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ edev->state = QEDE_STATE_CLOSED;
qede_rdma_dev_event_close(edev);
@@ -2076,17 +2189,20 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
- /* Reset the link */
- memset(&link_params, 0, sizeof(link_params));
- link_params.link_up = false;
- edev->ops->common->set_link(edev->cdev, &link_params);
- rc = qede_stop_queues(edev);
- if (rc) {
- qede_sync_free_irqs(edev);
- goto out;
- }
+ if (mode != QEDE_UNLOAD_RECOVERY) {
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
- DP_INFO(edev, "Stopped Queues\n");
+ rc = qede_stop_queues(edev);
+ if (rc) {
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+ }
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
@@ -2102,18 +2218,26 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_napi_disable_remove(edev);
+ if (mode == QEDE_UNLOAD_RECOVERY)
+ qede_empty_tx_queues(edev);
+
qede_free_mem_load(edev);
qede_free_fp_array(edev);
out:
if (!is_locked)
__qede_unlock(edev);
+
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ DP_NOTICE(edev, "Link is down\n");
+
DP_INFO(edev, "Ending qede unload\n");
}
enum qede_load_mode {
QEDE_LOAD_NORMAL,
QEDE_LOAD_RELOAD,
+ QEDE_LOAD_RECOVERY,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
@@ -2293,6 +2417,77 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
}
+static void qede_schedule_recovery_handler(void *dev)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev,
+ "Avoid scheduling a recovery handling since already in recovery state\n");
+ return;
+ }
+
+ set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a recovery handler\n");
+}
+
+static void qede_recovery_failed(struct qede_dev *edev)
+{
+ netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
+
+ netif_device_detach(edev->ndev);
+
+ if (edev->cdev)
+ edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
+}
+
+static void qede_recovery_handler(struct qede_dev *edev)
+{
+ u32 curr_state = edev->state;
+ int rc;
+
+ DP_NOTICE(edev, "Starting a recovery process\n");
+
+ /* No need to acquire first the qede_lock since is done by qede_sp_task
+ * before calling this function.
+ */
+ edev->state = QEDE_STATE_RECOVERY;
+
+ edev->ops->common->recovery_prolog(edev->cdev);
+
+ if (curr_state == QEDE_STATE_OPEN)
+ qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
+
+ __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
+
+ rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
+ IS_VF(edev), QEDE_PROBE_RECOVERY);
+ if (rc) {
+ edev->cdev = NULL;
+ goto err;
+ }
+
+ if (curr_state == QEDE_STATE_OPEN) {
+ rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
+ if (rc)
+ goto err;
+
+ qede_config_rx_mode(edev->ndev);
+ udp_tunnel_get_rx_info(edev->ndev);
+ }
+
+ edev->state = curr_state;
+
+ DP_NOTICE(edev, "Recovery handling is done\n");
+
+ return;
+
+err:
+ qede_recovery_failed(edev);
+}
+
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 1900bf7e67d1..ffabc2d2f082 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -50,6 +50,8 @@ static void _qede_rdma_dev_add(struct qede_dev *edev)
if (!qedr_drv)
return;
+ /* Leftovers from previous error recovery */
+ edev->rdma_info.exp_recovery = false;
edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
edev->ndev);
}
@@ -87,21 +89,26 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
destroy_workqueue(edev->rdma_info.rdma_wq);
}
-int qede_rdma_dev_add(struct qede_dev *edev)
+int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
{
- int rc = 0;
+ int rc;
- if (qede_rdma_supported(edev)) {
- rc = qede_rdma_create_wq(edev);
- if (rc)
- return rc;
+ if (!qede_rdma_supported(edev))
+ return 0;
- INIT_LIST_HEAD(&edev->rdma_info.entry);
- mutex_lock(&qedr_dev_list_lock);
- list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
- _qede_rdma_dev_add(edev);
- mutex_unlock(&qedr_dev_list_lock);
- }
+ /* Cannot start qedr while recovering since it wasn't fully stopped */
+ if (recovery)
+ return 0;
+
+ rc = qede_rdma_create_wq(edev);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&edev->rdma_info.entry);
+ mutex_lock(&qedr_dev_list_lock);
+ list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+ _qede_rdma_dev_add(edev);
+ mutex_unlock(&qedr_dev_list_lock);
return rc;
}
@@ -110,19 +117,30 @@ static void _qede_rdma_dev_remove(struct qede_dev *edev)
{
if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
qedr_drv->remove(edev->rdma_info.qedr_dev);
- edev->rdma_info.qedr_dev = NULL;
}
-void qede_rdma_dev_remove(struct qede_dev *edev)
+void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery)
{
if (!qede_rdma_supported(edev))
return;
- qede_rdma_destroy_wq(edev);
- mutex_lock(&qedr_dev_list_lock);
- _qede_rdma_dev_remove(edev);
- list_del(&edev->rdma_info.entry);
- mutex_unlock(&qedr_dev_list_lock);
+ /* Cannot remove qedr while recovering since it wasn't fully stopped */
+ if (!recovery) {
+ qede_rdma_destroy_wq(edev);
+ mutex_lock(&qedr_dev_list_lock);
+ if (!edev->rdma_info.exp_recovery)
+ _qede_rdma_dev_remove(edev);
+ edev->rdma_info.qedr_dev = NULL;
+ list_del(&edev->rdma_info.entry);
+ mutex_unlock(&qedr_dev_list_lock);
+ } else {
+ if (!edev->rdma_info.exp_recovery) {
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_remove(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+ }
+ edev->rdma_info.exp_recovery = true;
+ }
}
static void _qede_rdma_dev_open(struct qede_dev *edev)
@@ -204,7 +222,8 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv)
mutex_lock(&qedr_dev_list_lock);
list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
- if (edev->rdma_info.qedr_dev)
+ /* If device has experienced recovery it was already removed */
+ if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery)
_qede_rdma_dev_remove(edev);
}
qedr_drv = NULL;
@@ -284,6 +303,10 @@ static void qede_rdma_add_event(struct qede_dev *edev,
{
struct qede_rdma_event_work *event_node;
+ /* If a recovery was experienced avoid adding the event */
+ if (edev->rdma_info.exp_recovery)
+ return;
+
if (!edev->rdma_info.qedr_dev)
return;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d344e9d43832..af38d3d73291 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
@@ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args cmd;
size_t nic_size = sizeof(struct qlcnic_info_le);
- nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
void *pci_info_addr;
int err = 0, i;
- pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t, GFP_KERNEL);
+ pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
@@ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
return -EIO;
}
- stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
@@ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
if (mac_stats == NULL)
return -ENOMEM;
- stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 16d0479f6891..7a873002e626 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -396,7 +396,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 vid, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 5edbd532127d..a6886cc5654c 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -249,8 +249,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
spin_lock(&qdev->stats_lock);
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get xgmac sem.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get xgmac sem.\n");
goto quit;
}
/*
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 059ba9429e51..096515c27263 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1159,10 +1159,10 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
- dma_unmap_addr_set(lbq_desc, mapaddr, map);
+ dma_unmap_addr_set(lbq_desc, mapaddr, map);
dma_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
- *lbq_desc->addr = cpu_to_le64(map);
+ *lbq_desc->addr = cpu_to_le64(map);
pci_dma_sync_single_for_device(qdev->pdev, map,
rx_ring->lbq_buf_size,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 031f6e6ee9c1..20d2400ad300 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
ring_header->used = 0;
- ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size,
+ ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
&ring_header->dma_addr,
GFP_KERNEL);
if (!ring_header->v_addr)
@@ -1204,7 +1204,7 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
if (tpbuf->skb) {
pkts_compl++;
bytes_compl += tpbuf->skb->len;
- dev_kfree_skb_irq(tpbuf->skb);
+ dev_consume_skb_irq(tpbuf->skb);
tpbuf->skb = NULL;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 44f6e4873aad..4f910c4f67b0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp)
}
bytes_compl += skb->len;
pkts_compl++;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
cp->tx_skb[tx_tail] = NULL;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 7e011c1c1e6e..cfb67b746595 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -454,14 +454,14 @@ static void hardware_init(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- int i;
+ int i;
/* Turn off the printer multiplexer on the 8012. */
for (i = 0; i < 8; i++)
outb(mux_8012[i], ioaddr + PAR_DATA);
write_reg_high(ioaddr, CMR1, CMR1h_RESET);
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++)
write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
write_reg_high(ioaddr, CMR2, lp->addr_mode);
@@ -471,15 +471,15 @@ static void hardware_init(struct net_device *dev)
(read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
}
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
- write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
/* Enable the interrupt line from the serial port. */
outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
/* Unmask the interesting interrupts. */
- write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
- write_reg_high(ioaddr, IMR, ISRh_RxErr);
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
lp->tx_unit_busy = 0;
lp->pac_cnt_in_tx_buf = 0;
@@ -610,10 +610,12 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
write_reg(ioaddr, CMR2, CMR2_NULL);
write_reg(ioaddr, IMR, 0);
- if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name);
- while (--boguscount > 0) {
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: In interrupt ", dev->name);
+ while (--boguscount > 0) {
int status = read_nibble(ioaddr, ISR);
- if (net_debug > 5) printk("loop status %02x..", status);
+ if (net_debug > 5)
+ printk("loop status %02x..", status);
if (status & (ISR_RxOK<<3)) {
handled = 1;
@@ -640,7 +642,8 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
} while (--boguscount > 0);
} else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
handled = 1;
- if (net_debug > 6) printk("handling Tx done..");
+ if (net_debug > 6)
+ printk("handling Tx done..");
/* Clear the Tx interrupt. We should check for too many failures
and reinitialize the adapter. */
write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
@@ -680,7 +683,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
break;
} else
break;
- }
+ }
/* This following code fixes a rare (and very difficult to track down)
problem where the adapter forgets its ethernet address. */
@@ -694,7 +697,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
}
/* Tell the adapter that it can go back to using the output line as IRQ. */
- write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
/* Enable the physical interrupt line, which is sure to be low until.. */
outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
/* .. we enable the interrupt sources. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 298930d39b79..c29dde064078 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -205,6 +205,8 @@ enum cfg_version {
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
+ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
{ PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
{ PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
@@ -227,7 +229,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static int use_dac = -1;
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -637,6 +638,7 @@ struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
struct net_device *dev;
+ struct phy_device *phydev;
struct napi_struct napi;
u32 msg_enable;
u16 mac_version;
@@ -677,12 +679,12 @@ struct rtl8169_private {
} wk;
unsigned supports_gmii:1;
- struct mii_bus *mii_bus;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
+ const char *fw_name;
struct rtl_fw {
const struct firmware *fw;
@@ -695,17 +697,15 @@ struct rtl8169_private {
size_t size;
} phy_action;
} *rtl_fw;
-#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
u32 ocp_base;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
-module_param(use_dac, int, 0);
-MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_SOFTDEP("pre: realtek");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
@@ -742,6 +742,16 @@ static void rtl_unlock_work(struct rtl8169_private *tp)
mutex_unlock(&tp->wk.mutex);
}
+static void rtl_lock_config_regs(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+}
+
+static void rtl_unlock_config_regs(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+}
+
static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
{
pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
@@ -1275,11 +1285,6 @@ static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
-static u16 rtl_get_events(struct rtl8169_private *tp)
-{
- return RTL_R16(tp, IntrStatus);
-}
-
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
{
RTL_W16(tp, IntrStatus, bits);
@@ -1310,7 +1315,7 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
- struct phy_device *phydev = dev->phydev;
+ struct phy_device *phydev = tp->phydev;
if (!netif_running(dev))
return;
@@ -1366,41 +1371,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
-static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
-{
- u8 options;
- u32 wolopts = 0;
-
- options = RTL_R8(tp, Config1);
- if (!(options & PMEnable))
- return 0;
-
- options = RTL_R8(tp, Config3);
- if (options & LinkUp)
- wolopts |= WAKE_PHY;
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
- wolopts |= WAKE_MAGIC;
- break;
- default:
- if (options & MagicPacket)
- wolopts |= WAKE_MAGIC;
- break;
- }
-
- options = RTL_R8(tp, Config5);
- if (options & UWF)
- wolopts |= WAKE_UCAST;
- if (options & BWF)
- wolopts |= WAKE_BCAST;
- if (options & MWF)
- wolopts |= WAKE_MCAST;
-
- return wolopts;
-}
-
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1428,7 +1398,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
};
u8 options;
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
@@ -1476,7 +1446,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
}
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
}
@@ -1505,11 +1475,6 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
-{
- return rtl_chip_infos[tp->mac_version].fw_name;
-}
-
static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1519,7 +1484,7 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
- if (!IS_ERR_OR_NULL(rtl_fw))
+ if (rtl_fw)
strlcpy(info->fw_version, rtl_fw->version,
sizeof(info->fw_version));
}
@@ -1679,11 +1644,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
static bool rtl8169_update_counters(struct rtl8169_private *tp)
{
+ u8 val = RTL_R8(tp, ChipCmd);
+
/*
* Some chips are unable to dump tally counters when the receiver
- * is disabled.
+ * is disabled. If 0xff chip may be in a PCI power-save state.
*/
- if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
+ if (!(val & CmdRxEnb) || val == 0xff)
return true;
return rtl8169_do_counters(tp, CounterDump);
@@ -1982,6 +1949,196 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
return 0;
}
+static int rtl_get_eee_supp(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5c);
+ ret = phy_read(phydev, 0x12);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5d);
+ ret = phy_read(phydev, 0x11);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl_get_eee_adv(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5d);
+ ret = phy_read(phydev, 0x10);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
+{
+ struct phy_device *phydev = tp->phydev;
+ int ret = 0;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ phy_write(phydev, 0x1f, 0x0a5d);
+ phy_write(phydev, 0x10, val);
+ phy_write(phydev, 0x1f, 0x0000);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ break;
+ }
+
+ return ret;
+}
+
+static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = tp_to_dev(tp);
+ int ret;
+
+ pm_runtime_get_noresume(d);
+
+ if (!pm_runtime_active(d)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Get Supported EEE */
+ ret = rtl_get_eee_supp(tp);
+ if (ret < 0)
+ goto out;
+ data->supported = mmd_eee_cap_to_ethtool_sup_t(ret);
+
+ /* Get advertisement EEE */
+ ret = rtl_get_eee_adv(tp);
+ if (ret < 0)
+ goto out;
+ data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
+ data->eee_enabled = !!data->advertised;
+
+ /* Get LP advertisement EEE */
+ ret = rtl_get_eee_lpadv(tp);
+ if (ret < 0)
+ goto out;
+ data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
+ data->eee_active = !!(data->advertised & data->lp_advertised);
+out:
+ pm_runtime_put_noidle(d);
+ return ret < 0 ? ret : 0;
+}
+
+static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = tp_to_dev(tp);
+ int old_adv, adv = 0, cap, ret;
+
+ pm_runtime_get_noresume(d);
+
+ if (!dev->phydev || !pm_runtime_active(d)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (dev->phydev->autoneg == AUTONEG_DISABLE ||
+ dev->phydev->duplex != DUPLEX_FULL) {
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ /* Get Supported EEE */
+ ret = rtl_get_eee_supp(tp);
+ if (ret < 0)
+ goto out;
+ cap = ret;
+
+ ret = rtl_get_eee_adv(tp);
+ if (ret < 0)
+ goto out;
+ old_adv = ret;
+
+ if (data->eee_enabled) {
+ adv = !data->advertised ? cap :
+ ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
+ /* Mask prohibited EEE modes */
+ adv &= ~dev->phydev->eee_broken_modes;
+ }
+
+ if (old_adv != adv) {
+ ret = rtl_set_eee_adv(tp, adv);
+ if (ret < 0)
+ goto out;
+
+ /* Restart autonegotiation so the new modes get sent to the
+ * link partner.
+ */
+ ret = phy_restart_aneg(dev->phydev);
+ }
+
+out:
+ pm_runtime_put_noidle(d);
+ return ret < 0 ? ret : 0;
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
@@ -1998,10 +2155,20 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = phy_ethtool_nway_reset,
+ .get_eee = rtl8169_get_eee,
+ .set_eee = rtl8169_set_eee,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
+static void rtl_enable_eee(struct rtl8169_private *tp)
+{
+ int supported = rtl_get_eee_supp(tp);
+
+ if (supported > 0)
+ rtl_set_eee_adv(tp, supported);
+}
+
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{
/*
@@ -2194,7 +2361,7 @@ static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
if (fw->size % FW_OPCODE_SIZE)
goto out;
- strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
+ strlcpy(version, tp->fw_name, RTL_VER_SIZE);
pa->code = (__le32 *)fw->data;
pa->size = fw->size / FW_OPCODE_SIZE;
@@ -2369,20 +2536,18 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
static void rtl_release_firmware(struct rtl8169_private *tp)
{
- if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
+ if (tp->rtl_fw) {
release_firmware(tp->rtl_fw->fw);
kfree(tp->rtl_fw);
+ tp->rtl_fw = NULL;
}
- tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
}
static void rtl_apply_firmware(struct rtl8169_private *tp)
{
- struct rtl_fw *rtl_fw = tp->rtl_fw;
-
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(rtl_fw))
- rtl_phy_write_fw(tp, rtl_fw);
+ if (tp->rtl_fw)
+ rtl_phy_write_fw(tp, tp->rtl_fw);
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2393,6 +2558,33 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
rtl_apply_firmware(tp);
}
+static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
+{
+ rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
+}
+
+static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ phy_write(phydev, 0x1f, 0x0007);
+ phy_write(phydev, 0x1e, 0x0020);
+ phy_set_bits(phydev, 0x15, BIT(8));
+
+ phy_write(phydev, 0x1f, 0x0005);
+ phy_write(phydev, 0x05, 0x8b85);
+ phy_set_bits(phydev, 0x06, BIT(13));
+
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
+static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
+{
+ phy_write(tp->phydev, 0x1f, 0x0a43);
+ phy_set_bits(tp->phydev, 0x11, BIT(4));
+ phy_write(tp->phydev, 0x1f, 0x0000);
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3160,22 +3352,8 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* EEE setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0002);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0007);
- rtl_writephy(tp, 0x0e, 0x003c);
- rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0006);
- rtl_writephy(tp, 0x0d, 0x0000);
+ rtl8168f_config_eee_phy(tp);
+ rtl_enable_eee(tp);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
@@ -3210,6 +3388,9 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x05, 0x8b86);
rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl8168f_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3343,22 +3524,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* eee setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
- rtl_writephy(tp, 0x1f, 0x0005);
- rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0004);
- rtl_writephy(tp, 0x1f, 0x0007);
- rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0007);
- rtl_writephy(tp, 0x0e, 0x003c);
- rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
- rtl_writephy(tp, 0x0d, 0x0000);
-
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
@@ -3366,6 +3531,30 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
+{
+ phy_write(tp->phydev, 0x1f, 0x0a43);
+ phy_clear_bits(tp->phydev, 0x10, BIT(2));
+}
+
+static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ phy_write(phydev, 0x1f, 0x0bcc);
+ phy_clear_bits(phydev, 0x14, BIT(8));
+
+ phy_write(phydev, 0x1f, 0x0a44);
+ phy_set_bits(phydev, 0x11, BIT(7) | BIT(6));
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x8084);
+ phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
+ phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0));
+
+ phy_write(phydev, 0x1f, 0x0000);
+}
+
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
{
rtl_apply_firmware(tp);
@@ -3392,14 +3581,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0a44);
rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8084);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
- rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
+ rtl8168g_phy_adjust_10m_aldps(tp);
/* EEE auto-fallback function */
rtl_writephy(tp, 0x1f, 0x0a4b);
@@ -3424,17 +3606,16 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x9065);
rtl_writephy(tp, 0x14, 0x1065);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
{
rtl_apply_firmware(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3539,12 +3720,9 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3612,12 +3790,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3627,16 +3802,7 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* patch 10M & ALDPS */
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8084);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
- rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable EEE auto-fallback function */
rtl_writephy(tp, 0x1f, 0x0a4b);
@@ -3654,26 +3820,14 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
{
- /* patch 10M & ALDPS */
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0a43);
- rtl_writephy(tp, 0x13, 0x8084);
- rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
- rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable UC LPF tune function */
rtl_writephy(tp, 0x1f, 0x0a43);
@@ -3745,12 +3899,9 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x1065);
rtl_writephy(tp, 0x1f, 0x0000);
- /* Check ALDPS bit, disable it if enabled */
- rtl_writephy(tp, 0x1f, 0x0a43);
- if (rtl_readphy(tp, 0x10) & 0x0004)
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
-
- rtl_writephy(tp, 0x1f, 0x0000);
+ rtl8168g_disable_aldps(tp);
+ rtl8168g_config_eee_phy(tp);
+ rtl_enable_eee(tp);
}
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
@@ -3989,24 +4140,24 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
}
/* We may have called phy_speed_down before */
- phy_speed_up(dev->phydev);
+ phy_speed_up(tp->phydev);
- genphy_soft_reset(dev->phydev);
+ genphy_soft_reset(tp->phydev);
/* It was reported that several chips end up with 10MBit/Half on a
* 1GBit link after resuming from S3. For whatever reason the PHY on
* these chips doesn't properly start a renegotiation when soft-reset.
* Explicitly requesting a renegotiation fixes this.
*/
- if (dev->phydev->autoneg == AUTONEG_ENABLE)
- phy_restart_aneg(dev->phydev);
+ if (tp->phydev->autoneg == AUTONEG_ENABLE)
+ phy_restart_aneg(tp->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
{
rtl_lock_work(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
RTL_R32(tp, MAC4);
@@ -4017,7 +4168,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
rtl_rar_exgmac_set(tp, addr);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
rtl_unlock_work(tp);
}
@@ -4044,10 +4195,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
if (!netif_running(dev))
return -ENODEV;
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return phy_mii_ioctl(tp->phydev, ifr, cmd);
}
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
@@ -4094,22 +4247,6 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
}
}
-static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
-{
- struct phy_device *phydev;
-
- if (!__rtl8169_get_wol(tp))
- return false;
-
- /* phydev may not be attached to netdevice */
- phydev = mdiobus_get_phy(tp->mii_bus, 0);
-
- phy_speed_down(phydev, false);
- rtl_wol_suspend_quirk(tp);
-
- return true;
-}
-
static void r8168_pll_power_down(struct rtl8169_private *tp)
{
if (r8168_check_dash(tp))
@@ -4119,8 +4256,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(tp, 0x19, 0xff64);
- if (rtl_wol_pll_power_down(tp))
+ if (device_may_wakeup(tp_to_dev(tp))) {
+ phy_speed_down(tp->phydev, false);
+ rtl_wol_suspend_quirk(tp);
return;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
@@ -4173,7 +4313,7 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
break;
}
- phy_resume(tp->dev->phydev);
+ phy_resume(tp->phydev);
/* give MAC/PHY some time to resume */
msleep(20);
}
@@ -4229,18 +4369,18 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
if (tp->jumbo_ops.enable) {
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
tp->jumbo_ops.enable(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
}
}
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
if (tp->jumbo_ops.disable) {
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
tp->jumbo_ops.disable(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
}
}
@@ -4373,21 +4513,20 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
-static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+static void rtl_request_firmware(struct rtl8169_private *tp)
{
struct rtl_fw *rtl_fw;
- const char *name;
int rc = -ENOMEM;
- name = rtl_lookup_firmware_name(tp);
- if (!name)
- goto out_no_firmware;
+ /* firmware loaded already or no firmware available */
+ if (tp->rtl_fw || !tp->fw_name)
+ return;
rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
if (!rtl_fw)
goto err_warn;
- rc = request_firmware(&rtl_fw->fw, name, tp_to_dev(tp));
+ rc = request_firmware(&rtl_fw->fw, tp->fw_name, tp_to_dev(tp));
if (rc < 0)
goto err_free;
@@ -4396,7 +4535,7 @@ static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
goto err_release_firmware;
tp->rtl_fw = rtl_fw;
-out:
+
return;
err_release_firmware:
@@ -4405,16 +4544,7 @@ err_free:
kfree(rtl_fw);
err_warn:
netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
- name, rc);
-out_no_firmware:
- tp->rtl_fw = NULL;
- goto out;
-}
-
-static void rtl_request_firmware(struct rtl8169_private *tp)
-{
- if (IS_ERR(tp->rtl_fw))
- rtl_request_uncached_firmware(tp);
+ tp->fw_name, rc);
}
static void rtl_rx_close(struct rtl8169_private *tp)
@@ -4561,13 +4691,13 @@ static void rtl_set_rx_mode(struct net_device *dev)
static void rtl_hw_start(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
tp->hw_start(tp);
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
@@ -4691,18 +4821,10 @@ static void rtl_enable_clock_request(struct rtl8169_private *tp)
PCI_EXP_LNKCTL_CLKREQ_EN);
}
-static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
{
- u8 data;
-
- data = RTL_R8(tp, Config3);
-
- if (enable)
- data |= Rdy_to_L23;
- else
- data &= ~Rdy_to_L23;
-
- RTL_W8(tp, Config3, data);
+ /* work around an issue when PCI reset occurs during L2/L3 state */
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
}
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
@@ -4960,6 +5082,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
@@ -4992,6 +5116,8 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+
+ rtl8168_config_eee_mac(tp);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5023,7 +5149,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
};
rtl_hw_start_8168f(tp);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
@@ -5054,10 +5180,12 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
@@ -5156,6 +5284,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -5163,7 +5293,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
rtl_writephy(tp, 0x1f, 0x0c42);
rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
@@ -5236,11 +5366,13 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl8168_config_eee_mac(tp);
+
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
@@ -5511,7 +5643,7 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5546,7 +5678,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
}
static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5560,7 +5692,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
- rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_pcie_state_l2l3_disable(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5660,11 +5792,6 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
}
-static inline void *rtl8169_align(void *data)
-{
- return (void *)ALIGN((long)data, 16);
-}
-
static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
struct RxDesc *desc)
{
@@ -5677,15 +5804,13 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
if (!data)
return NULL;
- if (rtl8169_align(data) != data) {
- kfree(data);
- data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node);
- if (!data)
- return NULL;
+ /* Memory should be properly aligned, but better check. */
+ if (!IS_ALIGNED((unsigned long)data, 8)) {
+ netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n");
+ goto err_out;
}
- mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
if (net_ratelimit())
netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
@@ -6067,7 +6192,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
u32 opts[2], len;
- bool stop_queue;
int frags;
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@@ -6109,6 +6233,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
+ netdev_sent_queue(dev, skb->len);
+
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
@@ -6121,14 +6247,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
- if (unlikely(stop_queue))
- netif_stop_queue(dev);
-
- if (__netdev_sent_queue(dev, skb->len, skb->xmit_more))
- RTL_W8(tp, TxPoll, NPQ);
+ RTL_W8(tp, TxPoll, NPQ);
- if (unlikely(stop_queue)) {
+ if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
+ /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
+ * not miss a ring update when it notices a stopped queue.
+ */
+ smp_wmb();
+ netif_stop_queue(dev);
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -6188,16 +6314,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
- /* The infamous DAC f*ckup only happens at boot time */
- if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
- netif_info(tp, intr, dev, "disabling PCI DAC\n");
- tp->cp_cmd &= ~PCIDAC;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- dev->features &= ~NETIF_F_HIGHDMA;
- }
-
- rtl8169_hw_reset(tp);
-
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
@@ -6293,7 +6409,6 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
struct sk_buff *skb;
struct device *d = tp_to_dev(tp);
- data = rtl8169_align(data);
dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
prefetch(data);
skb = napi_alloc_skb(&tp->napi, pkt_size);
@@ -6404,7 +6519,7 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
- u16 status = rtl_get_events(tp);
+ u16 status = RTL_R16(tp, IntrStatus);
u16 irq_mask = RTL_R16(tp, IntrMask);
if (status == 0xffff || !(status & irq_mask))
@@ -6415,8 +6530,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
goto out;
}
- if (status & LinkChg && tp->dev->phydev)
- phy_mac_interrupt(tp->dev->phydev);
+ if (status & LinkChg)
+ phy_mac_interrupt(tp->phydev);
if (unlikely(status & RxFIFOOver &&
tp->mac_version == RTL_GIGA_MAC_VER_11)) {
@@ -6507,12 +6622,12 @@ static void r8169_phylink_handler(struct net_device *ndev)
}
if (net_ratelimit())
- phy_print_status(ndev->phydev);
+ phy_print_status(tp->phydev);
}
static int r8169_phy_connect(struct rtl8169_private *tp)
{
- struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0);
+ struct phy_device *phydev = tp->phydev;
phy_interface_t phy_mode;
int ret;
@@ -6539,7 +6654,7 @@ static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- phy_stop(dev->phydev);
+ phy_stop(tp->phydev);
napi_disable(&tp->napi);
netif_stop_queue(dev);
@@ -6581,7 +6696,7 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- phy_disconnect(dev->phydev);
+ phy_disconnect(tp->phydev);
pci_free_irq(pdev, 0, tp);
@@ -6632,10 +6747,6 @@ static int rtl_open(struct net_device *dev)
if (retval < 0)
goto err_free_rx_1;
- INIT_WORK(&tp->wk.work, rtl_task);
-
- smp_mb();
-
rtl_request_firmware(tp);
retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
@@ -6662,7 +6773,7 @@ static int rtl_open(struct net_device *dev)
if (!rtl8169_init_counter_offsets(tp))
netif_warn(tp, hw, dev, "counter reset/update failed\n");
- phy_start(dev->phydev);
+ phy_start(tp->phydev);
netif_start_queue(dev);
rtl_unlock_work(tp);
@@ -6751,7 +6862,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
if (!netif_running(dev))
return;
- phy_stop(dev->phydev);
+ phy_stop(tp->phydev);
netif_device_detach(dev);
rtl_lock_work(tp);
@@ -6786,14 +6897,13 @@ static void __rtl8169_resume(struct net_device *dev)
rtl_pll_power_up(tp);
rtl8169_init_phy(dev, tp);
- phy_start(tp->dev->phydev);
+ phy_start(tp->phydev);
rtl_lock_work(tp);
napi_enable(&tp->napi);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ rtl_reset_work(tp);
rtl_unlock_work(tp);
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
static int rtl8169_resume(struct device *device)
@@ -6930,7 +7040,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
netif_napi_del(&tp->napi);
unregister_netdev(dev);
- mdiobus_unregister(tp->mii_bus);
+ mdiobus_unregister(tp->phydev->mdio.bus);
rtl_release_firmware(tp);
@@ -6990,9 +7100,9 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
unsigned int flags;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ rtl_unlock_config_regs(tp);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ rtl_lock_config_regs(tp);
flags = PCI_IRQ_LEGACY;
} else {
flags = PCI_IRQ_ALL_TYPES;
@@ -7001,6 +7111,30 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
}
+static void rtl_read_mac_address(struct rtl8169_private *tp,
+ u8 mac_addr[ETH_ALEN])
+{
+ u32 value;
+
+ /* Get MAC address */
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
+ mac_addr[0] = (value >> 0) & 0xff;
+ mac_addr[1] = (value >> 8) & 0xff;
+ mac_addr[2] = (value >> 16) & 0xff;
+ mac_addr[3] = (value >> 24) & 0xff;
+
+ value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ mac_addr[4] = (value >> 0) & 0xff;
+ mac_addr[5] = (value >> 8) & 0xff;
+ break;
+ default:
+ break;
+ }
+}
+
DECLARE_RTL_COND(rtl_link_list_ready_cond)
{
return RTL_R8(tp, MCU) & LINK_LIST_RDY;
@@ -7037,7 +7171,6 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
- struct phy_device *phydev;
struct mii_bus *new_bus;
int ret;
@@ -7059,16 +7192,14 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
if (ret)
return ret;
- phydev = mdiobus_get_phy(new_bus, 0);
- if (!phydev) {
+ tp->phydev = mdiobus_get_phy(new_bus, 0);
+ if (!tp->phydev) {
mdiobus_unregister(new_bus);
return -ENODEV;
}
/* PHY will be woken up in rtl_open() */
- phy_suspend(phydev);
-
- tp->mii_bus = new_bus;
+ phy_suspend(tp->phydev);
return 0;
}
@@ -7166,9 +7297,37 @@ static void rtl_disable_clk(void *data)
clk_disable_unprepare(data);
}
+static int rtl_get_ether_clk(struct rtl8169_private *tp)
+{
+ struct device *d = tp_to_dev(tp);
+ struct clk *clk;
+ int rc;
+
+ clk = devm_clk_get(d, "ether_clk");
+ if (IS_ERR(clk)) {
+ rc = PTR_ERR(clk);
+ if (rc == -ENOENT)
+ /* clk-core allows NULL (for suspend / resume) */
+ rc = 0;
+ else if (rc != -EPROBE_DEFER)
+ dev_err(d, "failed to get clk: %d\n", rc);
+ } else {
+ tp->clk = clk;
+ rc = clk_prepare_enable(clk);
+ if (rc)
+ dev_err(d, "failed to enable clk: %d\n", rc);
+ else
+ rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
+ }
+
+ return rc;
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ /* align to u16 for is_valid_ether_addr() */
+ u8 mac_addr[ETH_ALEN] __aligned(2) = {};
struct rtl8169_private *tp;
struct net_device *dev;
int chipset, region, i;
@@ -7187,30 +7346,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->supports_gmii = cfg->has_gmii;
/* Get the *optional* external "ether_clk" used on some boards */
- tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
- if (IS_ERR(tp->clk)) {
- rc = PTR_ERR(tp->clk);
- if (rc == -ENOENT) {
- /* clk-core allows NULL (for suspend / resume) */
- tp->clk = NULL;
- } else if (rc == -EPROBE_DEFER) {
- return rc;
- } else {
- dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
- return rc;
- }
- } else {
- rc = clk_prepare_enable(tp->clk);
- if (rc) {
- dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
- return rc;
- }
-
- rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
- tp->clk);
- if (rc)
- return rc;
- }
+ rc = rtl_get_ether_clk(tp);
+ if (rc)
+ return rc;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -7255,13 +7393,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
- if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 &&
- tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
+ if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
-
- /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
- if (!pci_is_pcie(pdev))
- tp->cp_cmd |= PCIDAC;
dev->features |= NETIF_F_HIGHDMA;
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -7292,26 +7425,19 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
- tp->saved_wolopts = __rtl8169_get_wol(tp);
-
mutex_init(&tp->wk.mutex);
+ INIT_WORK(&tp->wk.work, rtl_task);
u64_stats_init(&tp->rx_stats.syncp);
u64_stats_init(&tp->tx_stats.syncp);
- /* Get MAC address */
- switch (tp->mac_version) {
- u8 mac_addr[ETH_ALEN] __aligned(4);
- case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
- *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ /* get MAC address */
+ rc = eth_platform_get_mac_address(&pdev->dev, mac_addr);
+ if (rc)
+ rtl_read_mac_address(tp, mac_addr);
+
+ if (is_valid_ether_addr(mac_addr))
+ rtl_rar_set(tp, mac_addr);
- if (is_valid_ether_addr(mac_addr))
- rtl_rar_set(tp, mac_addr);
- break;
- default:
- break;
- }
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
@@ -7360,7 +7486,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
tp->coalesce_info = cfg->coalesce_info;
- tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+ tp->fw_name = rtl_chip_infos[chipset].fw_name;
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr,
@@ -7401,7 +7527,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_mdio_unregister:
- mdiobus_unregister(tp->mii_bus);
+ mdiobus_unregister(tp->phydev->mdio.bus);
return rc;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ffc1ada4e6da..d28c8f9ca55b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int i;
priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
- ETH_HLEN + VLAN_HLEN;
+ ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
- /* The hardware checksum is 2 bytes appended to packet data */
- if (unlikely(skb->len < 2))
+ /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
+ * appended to packet data
+ */
+ if (unlikely(skb->len < sizeof(__sum16)))
return;
- hw_csum = skb_tail_pointer(skb) - 2;
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
skb->ip_summed = CHECKSUM_COMPLETE;
- skb_trim(skb, skb->len - 2);
+ skb_trim(skb, skb->len - sizeof(__sum16));
}
/* Packet receive function for Ethernet AVB */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f27a0dc8c563..339b2eae2100 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -555,7 +555,7 @@ static int sh_eth_soft_reset_gether(struct net_device *ndev)
sh_eth_write(ndev, 0, RDFFR);
/* Reset HW CRC register */
- if (mdp->cd->hw_checksum)
+ if (mdp->cd->csmr)
sh_eth_write(ndev, 0, CSMR);
/* Select MII mode */
@@ -619,7 +619,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.tsu = 1,
.no_tx_cntrs = 1,
};
@@ -668,7 +669,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.tsu = 1,
.select_mii = 1,
.magic = 1,
@@ -793,7 +795,8 @@ static struct sh_eth_cpu_data r8a77980_data = {
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.select_mii = 1,
.magic = 1,
.cexcr = 1,
@@ -1045,7 +1048,8 @@ static struct sh_eth_cpu_data sh7734_data = {
.no_ade = 1,
.xdfar_rw = 1,
.tsu = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.select_mii = 1,
.magic = 1,
.cexcr = 1,
@@ -1088,6 +1092,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.irq_flags = IRQF_SHARED,
.magic = 1,
.cexcr = 1,
+ .rx_csum = 1,
.dual_port = 1,
};
@@ -1532,8 +1537,9 @@ static int sh_eth_dev_init(struct net_device *ndev)
mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- /* PAUSE Prohibition */
+ /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
+ (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
if (mdp->cd->set_rate)
@@ -1592,6 +1598,19 @@ static void sh_eth_dev_exit(struct net_device *ndev)
update_mac_address(ndev);
}
+static void sh_eth_rx_csum(struct sk_buff *skb)
+{
+ u8 *hw_csum;
+
+ /* The hardware checksum is 2 bytes appended to packet data */
+ if (unlikely(skb->len < sizeof(__sum16)))
+ return;
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb_trim(skb, skb->len - sizeof(__sum16));
+}
+
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
{
@@ -1633,7 +1652,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
* the RFS bits are from bit 25 to bit 16. So, the
* driver needs right shifting by 16.
*/
- if (mdp->cd->hw_checksum)
+ if (mdp->cd->csmr)
desc_status >>= 16;
skb = mdp->rx_skbuff[entry];
@@ -1666,6 +1685,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ sh_eth_rx_csum(skb);
netif_receive_skb(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
@@ -2173,7 +2194,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(MAFCR);
if (cd->rtrate)
add_reg(RTRATE);
- if (cd->hw_checksum)
+ if (cd->csmr)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
@@ -2921,6 +2942,39 @@ static void sh_eth_set_rx_mode(struct net_device *ndev)
spin_unlock_irqrestore(&mdp->lock, flags);
}
+static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* Disable TX and RX */
+ sh_eth_rcv_snd_disable(ndev);
+
+ /* Modify RX Checksum setting */
+ sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
+
+ /* Enable TX and RX */
+ sh_eth_rcv_snd_enable(ndev);
+
+ spin_unlock_irqrestore(&mdp->lock, flags);
+}
+
+static int sh_eth_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
+ sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
+
+ ndev->features = features;
+
+ return 0;
+}
+
static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
{
if (!mdp->port)
@@ -3102,6 +3156,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = sh_eth_set_features,
};
static const struct net_device_ops sh_eth_netdev_ops_tsu = {
@@ -3117,6 +3172,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = sh_eth_set_features,
};
#ifdef CONFIG_OF
@@ -3245,6 +3301,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
+ if (mdp->cd->rx_csum) {
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+ }
+
/* set function */
if (mdp->cd->tsu)
ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
@@ -3294,7 +3355,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
mdp->port = port;
- ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
/* Need to init only the first port of the two sharing a TSU */
if (port == 0) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 0c18650bbfe6..850726301e1c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -499,7 +499,8 @@ struct sh_eth_cpu_data {
unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */
unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */
unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */
- unsigned hw_checksum:1; /* E-DMAC has CSMR */
+ unsigned csmr:1; /* E-DMAC has CSMR */
+ unsigned rx_csum:1; /* EtherC has ECMR.RCSC */
unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
unsigned rtrate:1; /* EtherC has RTRATE register */
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 748fb12260a6..2b2e1c4f0dc3 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -109,8 +109,6 @@ struct rocker_world_ops {
int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
unsigned long brport_flags,
struct switchdev_trans *trans);
- int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port,
- unsigned long *p_brport_flags);
int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
rocker_port,
unsigned long *
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 6213827e3956..c883aa89b7ca 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1566,45 +1566,57 @@ static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
}
static int
-rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
+ rocker_port,
+ unsigned long *
+ p_brport_flags_support)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (!wops->port_attr_bridge_flags_set)
+ if (!wops->port_attr_bridge_flags_support_get)
return -EOPNOTSUPP;
-
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
- trans);
+ return wops->port_attr_bridge_flags_support_get(rocker_port,
+ p_brport_flags_support);
}
static int
-rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
- unsigned long *p_brport_flags)
+rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
+ unsigned long brport_flags,
+ struct switchdev_trans *trans)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
+ unsigned long brport_flags_s;
+ int err;
- if (!wops->port_attr_bridge_flags_get)
+ if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
- return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
+
+ err = rocker_world_port_attr_bridge_flags_support_get(rocker_port,
+ &brport_flags_s);
+ if (err)
+ return err;
+
+ if (brport_flags & ~brport_flags_s)
+ return -EINVAL;
+
+ return 0;
}
static int
-rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
- rocker_port,
- unsigned long *
- p_brport_flags_support)
+rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
+ unsigned long brport_flags,
+ struct switchdev_trans *trans)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (!wops->port_attr_bridge_flags_support_get)
+ if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
- return wops->port_attr_bridge_flags_support_get(rocker_port,
- p_brport_flags_support);
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
+ trans);
}
static int
@@ -2026,6 +2038,18 @@ static void rocker_port_neigh_destroy(struct net_device *dev,
err);
}
+static int rocker_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker *rocker = rocker_port->rocker;
+
+ ppid->id_len = sizeof(rocker->hw.id);
+ memcpy(&ppid->id, &rocker->hw.id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_open = rocker_port_open,
.ndo_stop = rocker_port_stop,
@@ -2035,39 +2059,13 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
.ndo_change_proto_down = rocker_port_change_proto_down,
.ndo_neigh_destroy = rocker_port_neigh_destroy,
+ .ndo_get_port_parent_id = rocker_port_get_port_parent_id,
};
/********************
* swdev interface
********************/
-static int rocker_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- const struct rocker_port *rocker_port = netdev_priv(dev);
- const struct rocker *rocker = rocker_port->rocker;
- int err = 0;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(rocker->hw.id);
- memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = rocker_world_port_attr_bridge_flags_get(rocker_port,
- &attr->u.brport_flags);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
- err = rocker_world_port_attr_bridge_flags_support_get(rocker_port,
- &attr->u.brport_flags_support);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return err;
-}
-
static int rocker_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
@@ -2081,6 +2079,11 @@ static int rocker_port_attr_set(struct net_device *dev,
attr->u.stp_state,
trans);
break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
+ attr->u.brport_flags,
+ trans);
+ break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_set(rocker_port,
attr->u.brport_flags,
@@ -2139,11 +2142,6 @@ static int rocker_port_obj_del(struct net_device *dev,
return err;
}
-static const struct switchdev_ops rocker_port_switchdev_ops = {
- .switchdev_port_attr_get = rocker_port_attr_get,
- .switchdev_port_attr_set = rocker_port_attr_set,
-};
-
struct rocker_fib_event_work {
struct work_struct work;
union {
@@ -2597,7 +2595,6 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
- dev->switchdev_ops = &rocker_port_switchdev_ops;
netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
NAPI_POLL_WEIGHT);
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
@@ -2708,6 +2705,19 @@ static bool rocker_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &rocker_port_netdev_ops;
}
+static int
+rocker_switchdev_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *port_attr_info)
+{
+ int err;
+
+ err = rocker_port_attr_set(netdev, port_attr_info->attr,
+ port_attr_info->trans);
+
+ port_attr_info->handled = true;
+ return notifier_from_errno(err);
+}
+
struct rocker_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
@@ -2725,7 +2735,7 @@ rocker_fdb_offload_notify(struct rocker_port *rocker_port,
info.vid = recv_info->vid;
info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
- rocker_port->dev, &info.info);
+ rocker_port->dev, &info.info, NULL);
}
static void rocker_switchdev_event_work(struct work_struct *work)
@@ -2777,6 +2787,9 @@ static int rocker_switchdev_event(struct notifier_block *unused,
if (!rocker_port_dev_check(dev))
return NOTIFY_DONE;
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ return rocker_switchdev_port_attr_set_event(dev, ptr);
+
rocker_port = netdev_priv(dev);
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (WARN_ON(!switchdev_work))
@@ -2839,6 +2852,8 @@ static int rocker_switchdev_blocking_event(struct notifier_block *unused,
case SWITCHDEV_PORT_OBJ_ADD:
case SWITCHDEV_PORT_OBJ_DEL:
return rocker_switchdev_port_obj_event(event, dev, ptr);
+ case SWITCHDEV_PORT_ATTR_SET:
+ return rocker_switchdev_port_attr_set_event(dev, ptr);
}
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 6473cc68c2d5..fa296a7c255d 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1833,10 +1833,10 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
rtnl_lock();
if (learned && removing)
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- lw->ofdpa_port->dev, &info.info);
+ lw->ofdpa_port->dev, &info.info, NULL);
else if (learned && !removing)
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- lw->ofdpa_port->dev, &info.info);
+ lw->ofdpa_port->dev, &info.info, NULL);
rtnl_unlock();
kfree(work);
@@ -2512,16 +2512,6 @@ static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
}
static int
-ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
- unsigned long *p_brport_flags)
-{
- const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-
- *p_brport_flags = ofdpa_port->brport_flags;
- return 0;
-}
-
-static int
ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
rocker_port,
unsigned long *
@@ -2823,7 +2813,6 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_stop = ofdpa_port_stop,
.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
- .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
.port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 690aee88f0eb..6d22dd500790 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
}
/* allocate memory for TX descriptors */
- tx_ring->dma_tx = dma_zalloc_coherent(dev,
- tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
- &tx_ring->dma_tx_phy, GFP_KERNEL);
+ tx_ring->dma_tx = dma_alloc_coherent(dev,
+ tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ &tx_ring->dma_tx_phy, GFP_KERNEL);
if (!tx_ring->dma_tx)
return -ENOMEM;
@@ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
rx_ring->queue_no = queue_no;
/* allocate memory for RX descriptors */
- rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
- rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
- &rx_ring->dma_rx_phy, GFP_KERNEL);
+ rx_ring->dma_rx = dma_alloc_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ &rx_ring->dma_rx_phy, GFP_KERNEL);
if (rx_ring->dma_rx == NULL)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index b6a50058bb8d..e888b479c596 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6041,27 +6041,33 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
- /* MUM and SUC firmware share the same partition type */
{ NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
{ NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
- { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
+ { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" },
+ { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" },
+ { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" },
+ { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" },
+ { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" },
};
+#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
struct efx_mcdi_mtd_partition *part,
- unsigned int type)
+ unsigned int type,
+ unsigned long *found)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
size_t size, erase_size, outlen;
+ int type_idx = 0;
bool protected;
int rc;
- for (info = efx_ef10_nvram_types; ; info++) {
- if (info ==
- efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+ for (type_idx = 0; ; type_idx++) {
+ if (type_idx == EF10_NVRAM_PARTITION_COUNT)
return -ENODEV;
+ info = efx_ef10_nvram_types + type_idx;
if ((type & ~info->type_mask) == info->type)
break;
}
@@ -6071,8 +6077,22 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
if (rc)
return rc;
+ if (protected &&
+ (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS &&
+ type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS))
+ /* Hide protected partitions that don't provide defaults. */
+ return -ENODEV;
+
if (protected)
- return -ENODEV; /* hide it */
+ /* Protected partitions are read only. */
+ erase_size = 0;
+
+ /* If we've already exposed a partition of this type, hide this
+ * duplicate. All operations on MTDs are keyed by the type anyway,
+ * so we can't act on the duplicate.
+ */
+ if (__test_and_set_bit(type_idx, found))
+ return -EEXIST;
part->nvram_type = type;
@@ -6105,6 +6125,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
static int efx_ef10_mtd_probe(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+ DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
struct efx_mcdi_mtd_partition *parts;
size_t outlen, n_parts_total, i, n_parts;
unsigned int type;
@@ -6133,11 +6154,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
for (i = 0; i < n_parts_total; i++) {
type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
i);
- rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
- if (rc == 0)
- n_parts++;
- else if (rc != -ENODEV)
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
+ found);
+ if (rc == -EEXIST || rc == -ENODEV)
+ continue;
+ if (rc)
goto fail;
+ n_parts++;
}
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3643015a55cf..bc655ffc9e02 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -915,7 +915,7 @@ rollback:
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
- mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
}
static bool efx_default_channel_want_txqs(struct efx_channel *channel)
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index a8ecb33390da..9c07b5175581 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -33,8 +33,8 @@
int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, gfp_flags);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index dfad93fca0a6..295ec1787b9f 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -2074,22 +2074,26 @@ fail:
static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+ MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
+ NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+ 1);
BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
NULL, 0, NULL);
+
return rc;
}
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
MCDI_DECLARE_BUF(outbuf,
MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
size_t outlen;
@@ -2098,6 +2102,8 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -2147,15 +2153,51 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
- int rc;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
+ size_t outlen;
+ int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+ /* Always set this flag. Old firmware ignores it */
+ MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+ 1);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ outbuf, sizeof(outbuf), &outlen);
+ if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ netif_err(efx, drv, efx->net_dev,
+ "NVRAM update failed verification with code 0x%x\n",
+ rc2);
+ switch (rc2) {
+ case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
+ case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
+ rc = -EIO;
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
+ case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
+ rc = -EINVAL;
+ break;
+ case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
+ case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
+ case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ rc = -EPERM;
+ break;
+ default:
+ netif_err(efx, drv, efx->net_dev,
+ "Unknown response to NVRAM_UPDATE_FINISH\n");
+ rc = -EIO;
+ }
+ }
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 3839eec783ea..20a5523bf9f3 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -6784,6 +6784,14 @@
* subset of the information stored in this partition.
*/
#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
+/* enum: Bundle image partition */
+#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00
+/* enum: Bundle metadata partition that holds additional information related to
+ * a bundle update in TLV format
+ */
+#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01
+/* enum: Bundle update non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 4ac30b6e5dab..0d03e0577d85 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -66,6 +66,9 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
part->mtd.writesize = 1;
+ if (!(part->mtd.flags & MTD_NO_ERASE))
+ part->mtd.flags |= MTD_WRITEABLE;
+
part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx;
part->mtd.name = part->name;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aa1945a858d5..c2d45a40eb48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -34,8 +34,8 @@
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, gfp_flags);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 396ff01298cd..8702ab44d80b 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -360,8 +360,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
rc = efx_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
- if (rx_queue->added_count == rx_queue->removed_count)
- efx_schedule_slow_fill(rx_queue);
+ efx_schedule_slow_fill(rx_queue);
goto out;
}
} while ((space -= batch_size) >= batch_size);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 22eb059086f7..06c8f282263f 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -471,7 +471,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
if (IS_ERR(segments))
return PTR_ERR(segments);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
skb = segments;
while (skb) {
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 3140999642ba..358e66b81926 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -666,7 +666,7 @@ static inline void ioc3_tx(struct net_device *dev)
packets++;
skb = ip->tx_skbs[o_entry];
bytes += skb->len;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
ip->tx_skbs[o_entry] = NULL;
o_entry = (o_entry + 1) & 127; /* Next */
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 703fbbefea44..f1271402ca21 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -68,6 +68,8 @@ module_param(timeout, int, 0);
* packets in and out, so there is place for a packet
*/
struct meth_private {
+ struct platform_device *pdev;
+
/* in-memory copy of MAC Control register */
u64 mac_ctrl;
@@ -211,8 +213,8 @@ static void meth_check_link(struct net_device *dev)
static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
- priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma, GFP_ATOMIC);
+ priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev,
+ TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_ATOMIC);
if (!priv->tx_ring)
return -ENOMEM;
@@ -236,7 +238,7 @@ static int meth_init_rx_ring(struct meth_private *priv)
priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
/* I'll need to re-sync it after each RX */
priv->rx_ring_dmas[i] =
- dma_map_single(NULL, priv->rx_ring[i],
+ dma_map_single(&priv->pdev->dev, priv->rx_ring[i],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
mace->eth.rx_fifo = priv->rx_ring_dmas[i];
}
@@ -253,7 +255,7 @@ static void meth_free_tx_ring(struct meth_private *priv)
dev_kfree_skb(priv->tx_skbs[i]);
priv->tx_skbs[i] = NULL;
}
- dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring,
+ dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
priv->tx_ring_dma);
}
@@ -263,7 +265,7 @@ static void meth_free_rx_ring(struct meth_private *priv)
int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
- dma_unmap_single(NULL, priv->rx_ring_dmas[i],
+ dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
priv->rx_ring[i] = 0;
priv->rx_ring_dmas[i] = 0;
@@ -393,7 +395,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
fifo_rptr = (fifo_rptr - 1) & 0x0f;
}
while (priv->rx_write != fifo_rptr) {
- dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write],
+ dma_unmap_single(&priv->pdev->dev,
+ priv->rx_ring_dmas[priv->rx_write],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
status = priv->rx_ring[priv->rx_write]->status.raw;
#if MFE_DEBUG
@@ -454,7 +457,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
priv->rx_ring[priv->rx_write]->status.raw = 0;
priv->rx_ring_dmas[priv->rx_write] =
- dma_map_single(NULL, priv->rx_ring[priv->rx_write],
+ dma_map_single(&priv->pdev->dev,
+ priv->rx_ring[priv->rx_write],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
ADVANCE_RX_PTR(priv->rx_write);
@@ -521,7 +525,7 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
DPRINTK("RPTR points us here, but packet not done?\n");
break;
}
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
priv->tx_skbs[priv->tx_read] = NULL;
priv->tx_ring[priv->tx_read].header.raw = 0;
priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1);
@@ -637,7 +641,7 @@ static void meth_tx_1page_prepare(struct meth_private *priv,
}
/* first page */
- catbuf = dma_map_single(NULL, buffer_data, buffer_len,
+ catbuf = dma_map_single(&priv->pdev->dev, buffer_data, buffer_len,
DMA_TO_DEVICE);
desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
desc->data.cat_buf[0].form.len = buffer_len - 1;
@@ -663,12 +667,12 @@ static void meth_tx_2page_prepare(struct meth_private *priv,
}
/* first page */
- catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len,
+ catbuf1 = dma_map_single(&priv->pdev->dev, buffer1_data, buffer1_len,
DMA_TO_DEVICE);
desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
desc->data.cat_buf[0].form.len = buffer1_len - 1;
/* second page */
- catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len,
+ catbuf2 = dma_map_single(&priv->pdev->dev, buffer2_data, buffer2_len,
DMA_TO_DEVICE);
desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3;
desc->data.cat_buf[1].form.len = buffer2_len - 1;
@@ -840,6 +844,7 @@ static int meth_probe(struct platform_device *pdev)
memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
priv = netdev_priv(dev);
+ priv->pdev = pdev;
spin_lock_init(&priv->meth_lock);
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 808cf9816673..5b351beb78cb 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -714,7 +714,7 @@ static void sis190_tx_interrupt(struct net_device *dev,
sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
tp->Tx_skbuff[entry] = NULL;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
if (tp->dirty_tx != dirty_tx) {
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 4bb89f74742c..6073387511f8 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1927,7 +1927,7 @@ static void sis900_finish_xmit (struct net_device *net_dev)
pci_unmap_single(sis_priv->pci_dev,
sis_priv->tx_ring[entry].bufptr, skb->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
sis_priv->tx_skbuff[entry] = NULL;
sis_priv->tx_ring[entry].bufptr = 0;
sis_priv->tx_ring[entry].cmdsts = 0;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 15c62c160953..be47d864f8b9 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
skb = ep->tx_skbuff[entry];
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8355dfbb8ec3..b550e624500d 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1188,7 +1188,7 @@ smc911x_tx_dma_irq(void *data)
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
BUG_ON(skb == NULL);
- dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
+ dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
netif_trans_update(dev);
dev_kfree_skb_irq(skb);
lp->current_tx_skb = NULL;
@@ -1219,7 +1219,7 @@ smc911x_rx_dma_irq(void *data)
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
- dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
+ dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
BUG_ON(skb == NULL);
lp->current_rx_skb = NULL;
PRINT_PKT(skb->data, skb->len);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 05a0948ad929..a18149720aa2 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1029,8 +1029,8 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
struct netsec_desc_ring *dring = &priv->desc_ring[id];
int i;
- dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
- &dring->desc_dma, GFP_KERNEL);
+ dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
if (!dring->vaddr)
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6209cc1fb305..f194235153f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -105,6 +105,16 @@ config DWMAC_OXNAS
This selects the Oxford Semiconductor OXNASSoC glue layer support for
the stmmac device driver. This driver is used for OX820.
+config DWMAC_QCOM_ETHQOS
+ tristate "Qualcomm ETHQOS support"
+ default ARCH_QCOM
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ help
+ Support for the Qualcomm ETHQOS core.
+
+ This selects the Qualcomm ETHQOS glue layer support for the
+ stmmac device driver.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index bf09701d2623..c529c21e9bdd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
+obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
new file mode 100644
index 000000000000..7ec895407d23
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-19, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define RGMII_IO_MACRO_CONFIG 0x0
+#define SDCC_HC_REG_DLL_CONFIG 0x4
+#define SDCC_HC_REG_DDR_CONFIG 0xC
+#define SDCC_HC_REG_DLL_CONFIG2 0x10
+#define SDC4_STATUS 0x14
+#define SDCC_USR_CTL 0x18
+#define RGMII_IO_MACRO_CONFIG2 0x1C
+#define RGMII_IO_MACRO_DEBUG1 0x20
+#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
+
+/* RGMII_IO_MACRO_CONFIG fields */
+#define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
+#define RGMII_CONFIG_POS_NEG_DATA_SEL BIT(23)
+#define RGMII_CONFIG_GPIO_CFG_RX_INT GENMASK(21, 20)
+#define RGMII_CONFIG_GPIO_CFG_TX_INT GENMASK(19, 17)
+#define RGMII_CONFIG_MAX_SPD_PRG_9 GENMASK(16, 8)
+#define RGMII_CONFIG_MAX_SPD_PRG_2 GENMASK(7, 6)
+#define RGMII_CONFIG_INTF_SEL GENMASK(5, 4)
+#define RGMII_CONFIG_BYPASS_TX_ID_EN BIT(3)
+#define RGMII_CONFIG_LOOPBACK_EN BIT(2)
+#define RGMII_CONFIG_PROG_SWAP BIT(1)
+#define RGMII_CONFIG_DDR_MODE BIT(0)
+
+/* SDCC_HC_REG_DLL_CONFIG fields */
+#define SDCC_DLL_CONFIG_DLL_RST BIT(30)
+#define SDCC_DLL_CONFIG_PDN BIT(29)
+#define SDCC_DLL_CONFIG_MCLK_FREQ GENMASK(26, 24)
+#define SDCC_DLL_CONFIG_CDR_SELEXT GENMASK(23, 20)
+#define SDCC_DLL_CONFIG_CDR_EXT_EN BIT(19)
+#define SDCC_DLL_CONFIG_CK_OUT_EN BIT(18)
+#define SDCC_DLL_CONFIG_CDR_EN BIT(17)
+#define SDCC_DLL_CONFIG_DLL_EN BIT(16)
+#define SDCC_DLL_MCLK_GATING_EN BIT(5)
+#define SDCC_DLL_CDR_FINE_PHASE GENMASK(3, 2)
+
+/* SDCC_HC_REG_DDR_CONFIG fields */
+#define SDCC_DDR_CONFIG_PRG_DLY_EN BIT(31)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY GENMASK(26, 21)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE GENMASK(29, 27)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN BIT(30)
+#define SDCC_DDR_CONFIG_PRG_RCLK_DLY GENMASK(8, 0)
+
+/* SDCC_HC_REG_DLL_CONFIG2 fields */
+#define SDCC_DLL_CONFIG2_DLL_CLOCK_DIS BIT(21)
+#define SDCC_DLL_CONFIG2_MCLK_FREQ_CALC GENMASK(17, 10)
+#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL GENMASK(3, 2)
+#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW BIT(1)
+#define SDCC_DLL_CONFIG2_DDR_CAL_EN BIT(0)
+
+/* SDC4_STATUS bits */
+#define SDC4_STATUS_DLL_LOCK BIT(7)
+
+/* RGMII_IO_MACRO_CONFIG2 fields */
+#define RGMII_CONFIG2_RSVD_CONFIG15 GENMASK(31, 17)
+#define RGMII_CONFIG2_RGMII_CLK_SEL_CFG BIT(16)
+#define RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN BIT(13)
+#define RGMII_CONFIG2_CLK_DIVIDE_SEL BIT(12)
+#define RGMII_CONFIG2_RX_PROG_SWAP BIT(7)
+#define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6)
+#define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5)
+
+struct ethqos_emac_por {
+ unsigned int offset;
+ unsigned int value;
+};
+
+struct qcom_ethqos {
+ struct platform_device *pdev;
+ void __iomem *rgmii_base;
+
+ unsigned int rgmii_clk_rate;
+ struct clk *rgmii_clk;
+ unsigned int speed;
+
+ const struct ethqos_emac_por *por;
+ unsigned int num_por;
+};
+
+static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+{
+ return readl(ethqos->rgmii_base + offset);
+}
+
+static void rgmii_writel(struct qcom_ethqos *ethqos,
+ int value, unsigned int offset)
+{
+ writel(value, ethqos->rgmii_base + offset);
+}
+
+static void rgmii_updatel(struct qcom_ethqos *ethqos,
+ int mask, int val, unsigned int offset)
+{
+ unsigned int temp;
+
+ temp = rgmii_readl(ethqos, offset);
+ temp = (temp & ~(mask)) | val;
+ rgmii_writel(ethqos, temp, offset);
+}
+
+static void rgmii_dump(struct qcom_ethqos *ethqos)
+{
+ dev_dbg(&ethqos->pdev->dev, "Rgmii register dump\n");
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DDR_CONFIG: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DDR_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG2: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG2));
+ dev_dbg(&ethqos->pdev->dev, "SDC4_STATUS: %x\n",
+ rgmii_readl(ethqos, SDC4_STATUS));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_USR_CTL: %x\n",
+ rgmii_readl(ethqos, SDCC_USR_CTL));
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG2: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG2));
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_DEBUG1: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_DEBUG1));
+ dev_dbg(&ethqos->pdev->dev, "EMAC_SYSTEM_LOW_POWER_DEBUG: %x\n",
+ rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
+}
+
+/* Clock rates */
+#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
+#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
+#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
+
+static void
+ethqos_update_rgmii_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ ethqos->rgmii_clk_rate = RGMII_1000_NOM_CLK_FREQ;
+ break;
+
+ case SPEED_100:
+ ethqos->rgmii_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
+ break;
+
+ case SPEED_10:
+ ethqos->rgmii_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
+ break;
+ }
+
+ clk_set_rate(ethqos->rgmii_clk, ethqos->rgmii_clk_rate);
+}
+
+static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
+{
+ rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
+ RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+}
+
+static const struct ethqos_emac_por emac_v2_3_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x00C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
+{
+ unsigned int val;
+ int retry = 1000;
+
+ /* Set CDR_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
+ SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set CDR_EXT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
+ SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set DLL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Wait for CK_OUT_EN clear */
+ do {
+ val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
+ val &= SDCC_DLL_CONFIG_CK_OUT_EN;
+ if (!val)
+ break;
+ mdelay(1);
+ retry--;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev, "Clear CK_OUT_EN timedout\n");
+
+ /* Set CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Wait for CK_OUT_EN set */
+ retry = 1000;
+ do {
+ val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
+ val &= SDCC_DLL_CONFIG_CK_OUT_EN;
+ if (val)
+ break;
+ mdelay(1);
+ retry--;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev, "Set CK_OUT_EN timedout\n");
+
+ /* Set DDR_CAL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
+ SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ 0, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
+ 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
+ BIT(2), SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_HC_REG_DLL_CONFIG2);
+
+ return 0;
+}
+
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+{
+ /* Disable loopback mode */
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG2);
+
+ /* Select RGMII, write 0 to interface select */
+ rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+
+ switch (ethqos->speed) {
+ case SPEED_1000:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
+
+ /* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
+ 57, SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
+ SDCC_DDR_CONFIG_PRG_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ break;
+
+ case SPEED_100:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
+ BIT(6), RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ /* Write 0x5 to PRG_RCLK_DLY_CODE */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
+ (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ break;
+
+ case SPEED_10:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
+ BIT(12) | GENMASK(9, 8),
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ /* Write 0x5 to PRG_RCLK_DLY_CODE */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
+ (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ break;
+ default:
+ dev_err(&ethqos->pdev->dev,
+ "Invalid speed %d\n", ethqos->speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ethqos_configure(struct qcom_ethqos *ethqos)
+{
+ volatile unsigned int dll_lock;
+ unsigned int i, retry = 1000;
+
+ /* Reset to POR values and enable clk */
+ for (i = 0; i < ethqos->num_por; i++)
+ rgmii_writel(ethqos, ethqos->por[i].value,
+ ethqos->por[i].offset);
+ ethqos_set_func_clk_en(ethqos);
+
+ /* Initialize the DLL first */
+
+ /* Set DLL_RST */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
+ SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set PDN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
+ SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear DLL_RST */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear PDN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ /* Set DLL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set USR_CTL bit 26 with mask of 3 bits */
+ rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL);
+
+ /* wait for DLL LOCK */
+ do {
+ mdelay(1);
+ dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
+ if (dll_lock & SDC4_STATUS_DLL_LOCK)
+ break;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev,
+ "Timeout while waiting for DLL lock\n");
+ }
+
+ if (ethqos->speed == SPEED_1000)
+ ethqos_dll_configure(ethqos);
+
+ ethqos_rgmii_macro_init(ethqos);
+
+ return 0;
+}
+
+static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct qcom_ethqos *ethqos = priv;
+
+ ethqos->speed = speed;
+ ethqos_update_rgmii_clk(ethqos, speed);
+ ethqos_configure(ethqos);
+}
+
+static int qcom_ethqos_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct qcom_ethqos *ethqos;
+ struct resource *res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+
+ ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
+ if (!ethqos) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ ethqos->pdev = pdev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii");
+ ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ethqos->rgmii_base)) {
+ dev_err(&pdev->dev, "Can't get rgmii base\n");
+ ret = PTR_ERR(ethqos->rgmii_base);
+ goto err_mem;
+ }
+
+ ethqos->por = of_device_get_match_data(&pdev->dev);
+
+ ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
+ if (IS_ERR(ethqos->rgmii_clk)) {
+ ret = PTR_ERR(ethqos->rgmii_clk);
+ goto err_mem;
+ }
+
+ ret = clk_prepare_enable(ethqos->rgmii_clk);
+ if (ret)
+ goto err_mem;
+
+ ethqos->speed = SPEED_1000;
+ ethqos_update_rgmii_clk(ethqos, SPEED_1000);
+ ethqos_set_func_clk_en(ethqos);
+
+ plat_dat->bsp_priv = ethqos;
+ plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
+ plat_dat->has_gmac4 = 1;
+ plat_dat->pmt = 1;
+ plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_clk;
+
+ rgmii_dump(ethqos);
+
+ return ret;
+
+err_clk:
+ clk_disable_unprepare(ethqos->rgmii_clk);
+
+err_mem:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int qcom_ethqos_remove(struct platform_device *pdev)
+{
+ struct qcom_ethqos *ethqos;
+ int ret;
+
+ ethqos = get_stmmac_bsp_priv(&pdev->dev);
+ if (!ethqos)
+ return -ENODEV;
+
+ ret = stmmac_pltfr_remove(pdev);
+ clk_disable_unprepare(ethqos->rgmii_clk);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ethqos_match[] = {
+ { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
+
+static struct platform_driver qcom_ethqos_driver = {
+ .probe = qcom_ethqos_probe,
+ .remove = qcom_ethqos_remove,
+ .driver = {
+ .name = "qcom-ethqos",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(qcom_ethqos_match),
+ },
+};
+module_platform_driver(qcom_ethqos_driver);
+
+MODULE_DESCRIPTION("Qualcomm ETHQOS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7b923362ee55..3b174eae77c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
}
ret = phy_power_on(bsp_priv, true);
- if (ret)
+ if (ret) {
+ gmac_clk_enable(bsp_priv, false);
return ret;
+ }
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 20299f6f65fc..7fbb6a4dbf51 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwmac4_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
+ unsigned int rdes1 = le32_to_cpu(p->des1);
+ unsigned int rdes3 = le32_to_cpu(p->des3);
u32 own, ctxt;
int ret = 1;
- own = p->des3 & RDES3_OWN;
- ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+ own = rdes3 & RDES3_OWN;
+ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
if (likely(!own && ctxt)) {
- if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
/* Corrupted value */
ret = -EINVAL;
else
@@ -268,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
int ret = -EINVAL;
/* Get the status from normal w/b descriptor */
- if (likely(p->des3 & TDES3_RS1V)) {
+ if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) {
if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
int i = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 49f5687879df..545cb9c47433 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -124,9 +124,9 @@ void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
int dwmac4_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
{
- int ret = 0;
-
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
+ u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+ int ret = 0;
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
@@ -151,16 +151,11 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
x->normal_irq_n++;
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u32 value;
-
- value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
- /* to schedule NAPI on real RIE event. */
- if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
- x->rx_normal_irq_n++;
- ret |= handle_rx;
- }
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
}
- if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
+ if (likely(intr_status & (DMA_CHAN_STATUS_TI |
+ DMA_CHAN_STATUS_TBU))) {
x->tx_normal_irq_n++;
ret |= handle_tx;
}
@@ -168,12 +163,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
x->rx_early_irq++;
}
- /* Clear the interrupt by writing a logic 1 to the chanX interrupt
- * status [21-0] expect reserved bits [5-3]
- */
- writel((intr_status & 0x3fffc7),
- ioaddr + DMA_CHAN_STATUS(chan));
-
+ writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan));
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index d6bb953685fa..37d5e6fe7473 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -193,9 +193,10 @@
#define XGMAC_AIE BIT(14)
#define XGMAC_RBUE BIT(7)
#define XGMAC_RIE BIT(6)
+#define XGMAC_TBUE BIT(2)
#define XGMAC_TIE BIT(0)
#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
- XGMAC_RIE | XGMAC_TIE)
+ XGMAC_RIE | XGMAC_TBUE | XGMAC_TIE)
#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
#define XGMAC_RWT GENMASK(7, 0)
#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
@@ -204,6 +205,7 @@
#define XGMAC_FBE BIT(12)
#define XGMAC_RBU BIT(7)
#define XGMAC_RI BIT(6)
+#define XGMAC_TBU BIT(2)
#define XGMAC_TPS BIT(1)
#define XGMAC_TI BIT(0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 6c5092e7771c..2ba712b48a89 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
{
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
/* ABNORMAL interrupts */
@@ -282,20 +283,17 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
x->normal_irq_n++;
if (likely(intr_status & XGMAC_RI)) {
- u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
- if (likely(value & XGMAC_RIE)) {
- x->rx_normal_irq_n++;
- ret |= handle_rx;
- }
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
}
- if (likely(intr_status & XGMAC_TI)) {
+ if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
x->tx_normal_irq_n++;
ret |= handle_tx;
}
}
/* Clear interrupts */
- writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 63e1064b27a2..dd95d959c1ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include "common.h"
#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
#include <linux/reset.h>
struct stmmac_resources {
@@ -78,11 +79,10 @@ struct stmmac_rx_queue {
};
struct stmmac_channel {
- struct napi_struct napi ____cacheline_aligned_in_smp;
+ struct napi_struct rx_napi ____cacheline_aligned_in_smp;
+ struct napi_struct tx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data;
u32 index;
- int has_rx;
- int has_tx;
};
struct stmmac_tc_entry {
@@ -174,6 +174,7 @@ struct stmmac_priv {
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
+ struct hwtstamp_config tstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1f61c25d82b..3c749c327cbd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -696,33 +696,38 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_eee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
- priv->eee_enabled = edata->eee_enabled;
-
- if (!priv->eee_enabled)
+ if (!edata->eee_enabled) {
stmmac_disable_eee_mode(priv);
- else {
+ } else {
/* We are asking for enabling the EEE but it is safe
* to verify all by invoking the eee_init function.
* In case of failure it will return an error.
*/
- priv->eee_enabled = stmmac_eee_init(priv);
- if (!priv->eee_enabled)
+ edata->eee_enabled = stmmac_eee_init(priv);
+ if (!edata->eee_enabled)
return -EOPNOTSUPP;
-
- /* Do not change tx_lpi_timer in case of failure */
- priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(dev->phydev, edata);
+ ret = phy_ethtool_set_eee(dev->phydev, edata);
+ if (ret)
+ return ret;
+
+ priv->eee_enabled = edata->eee_enabled;
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ return 0;
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
- if (!clk)
- return 0;
+ if (!clk) {
+ clk = priv->plat->clk_ref_rate;
+ if (!clk)
+ return 0;
+ }
return (usec * (clk / 1000000)) / 256;
}
@@ -731,8 +736,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
- if (!clk)
- return 0;
+ if (!clk) {
+ clk = priv->plat->clk_ref_rate;
+ if (!clk)
+ return 0;
+ }
return (riwt * 256) / (clk / 1000000);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0e0a0789c2ed..e2a13ec2e30b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -155,7 +155,10 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
- napi_disable(&ch->napi);
+ if (queue < rx_queues_cnt)
+ napi_disable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_disable(&ch->tx_napi);
}
}
@@ -173,7 +176,10 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
- napi_enable(&ch->napi);
+ if (queue < rx_queues_cnt)
+ napi_enable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_enable(&ch->tx_napi);
}
}
@@ -534,7 +540,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
}
/**
- * stmmac_hwtstamp_ioctl - control hardware timestamping.
+ * stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
* @ifr: An IOCTL specific structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
@@ -544,7 +550,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
@@ -573,7 +579,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
}
if (copy_from_user(&config, ifr->ifr_data,
- sizeof(struct hwtstamp_config)))
+ sizeof(config)))
return -EFAULT;
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
@@ -597,12 +603,13 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* PTP v1, UDP, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- /* take time stamp for all event messages */
- if (xmac)
- snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
- else
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-
+ /* 'xmac' hardware can support Sync, Pdelay_Req and
+ * Pdelay_resp by setting bit14 and bits17/16 to 01
+ * This leaves Delay_Req timestamps out.
+ * Enable all events *and* general purpose message
+ * timestamping
+ */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
break;
@@ -633,10 +640,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (xmac)
- snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
- else
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -669,12 +673,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* PTP v2/802.AS1 any layer, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
- /* take time stamp for all event messages */
- if (xmac)
- snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
- else
- snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
ptp_over_ethernet = PTP_TCR_TSIPENA;
@@ -765,8 +764,31 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
(u32)now.tv_sec, now.tv_nsec);
}
+ memcpy(&priv->tstamp_config, &config, sizeof(config));
+
return copy_to_user(ifr->ifr_data, &config,
- sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_hwtstamp_get - read hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function obtain the current hardware timestamping settings
+ as requested.
+ */
+static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config *config = &priv->tstamp_config;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
}
/**
@@ -1549,22 +1571,18 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
if (priv->extend_desc) {
- rx_q->dma_erx = dma_zalloc_coherent(priv->device,
- DMA_RX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
if (!rx_q->dma_erx)
goto err_dma;
} else {
- rx_q->dma_rx = dma_zalloc_coherent(priv->device,
- DMA_RX_SIZE *
- sizeof(struct
- dma_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
if (!rx_q->dma_rx)
goto err_dma;
}
@@ -1612,21 +1630,17 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
if (priv->extend_desc) {
- tx_q->dma_etx = dma_zalloc_coherent(priv->device,
- DMA_TX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
+ tx_q->dma_etx = dma_alloc_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_extended_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
if (!tx_q->dma_etx)
goto err_dma;
} else {
- tx_q->dma_tx = dma_zalloc_coherent(priv->device,
- DMA_TX_SIZE *
- sizeof(struct
- dma_desc),
- &tx_q->dma_tx_phy,
- GFP_KERNEL);
+ tx_q->dma_tx = dma_alloc_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
if (!tx_q->dma_tx)
goto err_dma;
}
@@ -1947,6 +1961,10 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
}
+ /* We still have pending packets, let's call for a new scheduling */
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
+
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
return count;
@@ -2037,23 +2055,15 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan);
struct stmmac_channel *ch = &priv->channel[chan];
- bool needs_work = false;
-
- if ((status & handle_rx) && ch->has_rx) {
- needs_work = true;
- } else {
- status &= ~handle_rx;
- }
- if ((status & handle_tx) && ch->has_tx) {
- needs_work = true;
- } else {
- status &= ~handle_tx;
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ napi_schedule_irqoff(&ch->rx_napi);
}
- if (needs_work && napi_schedule_prep(&ch->napi)) {
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
- __napi_schedule(&ch->napi);
+ napi_schedule_irqoff(&ch->tx_napi);
}
return status;
@@ -2249,8 +2259,14 @@ static void stmmac_tx_timer(struct timer_list *t)
ch = &priv->channel[tx_q->queue_index];
- if (likely(napi_schedule_prep(&ch->napi)))
- __napi_schedule(&ch->napi);
+ /*
+ * If NAPI is already running we can miss some events. Let's rearm
+ * the timer and try again.
+ */
+ if (likely(napi_schedule_prep(&ch->tx_napi)))
+ __napi_schedule(&ch->tx_napi);
+ else
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
}
/**
@@ -3031,10 +3047,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q = &priv->tx_queue[queue];
+ if (priv->tx_path_in_lpi_mode)
+ stmmac_disable_eee_mode(priv);
+
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ /*
+ * There is no way to determine the number of TSO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO is supported then at least this
+ * one will be capable.
+ */
+ skb_set_queue_mapping(skb, 0);
+
return stmmac_tso_xmit(skb, dev);
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3049,9 +3077,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (priv->tx_path_in_lpi_mode)
- stmmac_disable_eee_mode(priv);
-
entry = tx_q->cur_tx;
first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3497,7 +3522,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&ch->napi, skb);
+ napi_gro_receive(&ch->rx_napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
@@ -3512,41 +3537,47 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
return count;
}
-/**
- * stmmac_poll - stmmac poll method (NAPI)
- * @napi : pointer to the napi structure.
- * @budget : maximum number of packets that the current CPU can receive from
- * all interfaces.
- * Description :
- * To look at the incoming frames and clear the tx resources.
- */
-static int stmmac_napi_poll(struct napi_struct *napi, int budget)
+static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
{
struct stmmac_channel *ch =
- container_of(napi, struct stmmac_channel, napi);
+ container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
- int work_done = 0, work_rem = budget;
u32 chan = ch->index;
+ int work_done;
priv->xstats.napi_poll++;
- if (ch->has_tx) {
- int done = stmmac_tx_clean(priv, work_rem, chan);
+ work_done = stmmac_rx(priv, budget, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ return work_done;
+}
- work_done += done;
- work_rem -= done;
- }
+static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, tx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_tx_queue *tx_q;
+ u32 chan = ch->index;
+ int work_done;
- if (ch->has_rx) {
- int done = stmmac_rx(priv, work_rem, chan);
+ priv->xstats.napi_poll++;
- work_done += done;
- work_rem -= done;
- }
+ work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
+ work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done))
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ /* Force transmission restart */
+ tx_q = &priv->tx_queue[chan];
+ if (tx_q->cur_tx != tx_q->dirty_tx) {
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
+ chan);
+ }
+
return work_done;
}
@@ -3774,7 +3805,10 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = phy_mii_ioctl(dev->phydev, rq, cmd);
break;
case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_ioctl(dev, rq);
+ ret = stmmac_hwtstamp_set(dev, rq);
+ break;
+ case SIOCGHWTSTAMP:
+ ret = stmmac_hwtstamp_get(dev, rq);
break;
default:
break;
@@ -4168,6 +4202,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
return ret;
}
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
return 0;
}
@@ -4300,18 +4346,6 @@ int stmmac_dvr_probe(struct device *device,
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
- /* Rx Watchdog is available in the COREs newer than the 3.40.
- * In some case, for example on bugged HW this feature
- * has to be disable and this can be done by passing the
- * riwt_off field from the platform.
- */
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
- priv->use_riwt = 1;
- dev_info(priv->device,
- "Enable RX Mitigation via HW Watchdog Timer\n");
- }
-
/* Setup channels NAPI */
maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
@@ -4321,13 +4355,14 @@ int stmmac_dvr_probe(struct device *device,
ch->priv_data = priv;
ch->index = queue;
- if (queue < priv->plat->rx_queues_to_use)
- ch->has_rx = true;
- if (queue < priv->plat->tx_queues_to_use)
- ch->has_tx = true;
-
- netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
- NAPI_POLL_WEIGHT);
+ if (queue < priv->plat->rx_queues_to_use) {
+ netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+ netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
+ }
}
mutex_init(&priv->lock);
@@ -4383,7 +4418,10 @@ error_mdio_register:
for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue];
- netif_napi_del(&ch->napi);
+ if (queue < priv->plat->rx_queues_to_use)
+ netif_napi_del(&ch->rx_napi);
+ if (queue < priv->plat->tx_queues_to_use)
+ netif_napi_del(&ch->tx_napi);
}
error_hw_init:
destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index c54a50dbd5ac..d819e8eaba12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
+ int i;
+
stmmac_dvr_remove(&pdev->dev);
+
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index ecccf895fd7e..e852821289cf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -59,9 +59,14 @@
#define PTP_TCR_TSEVNTENA BIT(14)
/* Enable Snapshot for Messages Relevant to Master */
#define PTP_TCR_TSMSTRENA BIT(15)
-/* Select PTP packets for Taking Snapshots */
+/* Select PTP packets for Taking Snapshots
+ * On gmac4 specifically:
+ * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled.
+ * or
+ * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp,
+ * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled
+ */
#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
-#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
/* Enable MAC address for PTP Frame Filtering */
#define PTP_TCR_TSENMACADDR BIT(18)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 531294f4978b..58ea18af9813 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
/* Queue 0 is not AVB capable */
if (queue <= 0 || queue >= tx_queues_count)
return -EINVAL;
+ if (!priv->dma_cap.av)
+ return -EOPNOTSUPP;
if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 9020b084b953..6fc05c106afc 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1,22 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
*
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* This driver uses the sungem driver (c) David Miller
* (davem@redhat.com) as its basis.
*
@@ -1911,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
cp->net_stats[ring].tx_packets++;
cp->net_stats[ring].tx_bytes += skb->len;
spin_unlock(&cp->stat_lock[ring]);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
cp->tx_old[ring] = entry;
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index 13f3860496a8..ae5f05f03f88 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -1,23 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
* cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
*
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* vendor id: 0x108E (Sun Microsystems, Inc.)
* device id: 0xabba (Cassini)
* revision ids: 0x01 = Cassini
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 720b7ac77f3b..e9b757b03b56 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp)
DTX(("skb(%p) ", skb));
bp->tx_skbs[elem] = NULL;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
elem = NEXT_TX(elem);
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index b9221fc1674d..3e7631160384 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2760,7 +2760,7 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
void __iomem *p = pci_map_rom(pdev, &size);
if (p) {
- int found;
+ int found;
found = readb(p) == 0x55 &&
readb(p + 1) == 0xaa &&
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ff641cf30a4e..d007dfeba5c3 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp)
this = &txbase[elem];
}
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
dev->stats.tx_packets++;
}
hp->tx_old = elem;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dc966ddb6d81..b24c11187017 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
tx_level -= db->rptr->len; /* '-' koz len is negative */
/* now should come skb pointer - free it */
- dev_kfree_skb_irq(db->rptr->addr.skb);
+ dev_consume_skb_irq(db->rptr->addr.skb);
bdx_tx_db_inc_rptr(db);
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index bb126be1eb72..8b21b40a9fe5 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,10 +49,11 @@ config TI_DAVINCI_CPDMA
will be called davinci_cpdma. This is recommended.
config TI_CPSW_PHY_SEL
- bool
+ bool "TI CPSW Phy mode Selection (DEPRECATED)"
+ default n
---help---
This driver supports configuring of the phy mode connected to
- the CPSW.
+ the CPSW. DEPRECATED: use PHY_TI_GMII_SEL.
config TI_CPSW_ALE
tristate "TI CPSW ALE Support"
@@ -64,7 +65,6 @@ config TI_CPSW
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
- select TI_CPSW_PHY_SEL
select TI_CPSW_ALE
select MFD_SYSCON
select REGMAP
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 810dfc7de1f9..e2d47b24a869 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
netdev_dbg(dev, "sent 0x%p, len=%d\n",
desc->skb, desc->skb->len);
- dev_kfree_skb_irq(desc->skb);
+ dev_consume_skb_irq(desc->skb);
desc->skb = NULL;
if (__netif_subqueue_stopped(dev, queue))
netif_wake_subqueue(dev, queue);
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 396e1cd10667..fec275e2208d 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -78,7 +78,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
- };
+ }
mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
mask |= BIT(slave + 4);
@@ -133,7 +133,7 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
- };
+ }
switch (slave) {
case 0:
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index cf111db3dc27..907e05fc22e4 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -21,7 +21,13 @@
((mac)[2] << 16) | ((mac)[3] << 24))
#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+#if IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL)
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+#else
+static inline
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{}
+#endif
int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1f612268c998..d847f672a705 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
const char *name;
char node_name[32];
- if (of_property_read_string(node, "label", &name) < 0) {
+ if (of_property_read_string(child, "label", &name) < 0) {
snprintf(node_name, sizeof(node_name), "%pOFn", child);
name = node_name;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index edcd1e60b30d..37925a1d58de 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1311,13 +1311,13 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size,
- &data->rxdma, GFP_KERNEL);
+ data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
+ &data->rxdma, GFP_KERNEL);
if (!data->rxring)
return -ENOMEM;
- data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size,
- &data->txdma, GFP_KERNEL);
+ data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
+ &data->txdma, GFP_KERNEL);
if (!data->txring) {
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 82412691ee66..27f6cf140845 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
le16_to_cpu(pktlen), DMA_TO_DEVICE);
}
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
tdinfo->skb = NULL;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2241f9897092..44efffbe7970 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
@@ -630,7 +630,7 @@ static void temac_start_xmit_done(struct net_device *ndev)
dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
DMA_TO_DEVICE);
if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 12a14609ec47..ec7e7ec24ff9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -199,15 +199,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
@@ -595,7 +595,7 @@ static void axienet_start_xmit_done(struct net_device *ndev)
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
/*cur_p->phys = 0;*/
cur_p->app0 = 0;
cur_p->app1 = 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 639e3e99af46..b03a417d0073 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -581,7 +581,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
return;
dev->stats.tx_bytes += lp->deferred_skb->len;
- dev_kfree_skb_irq(lp->deferred_skb);
+ dev_consume_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index aee55c03def0..ed6623a9801e 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -139,7 +139,7 @@
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
-#define free_buffer_irq dev_kfree_skb_irq
+#define free_buffer_irq dev_consume_skb_irq
#else
typedef void buffer_t;
#define free_buffer kfree
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 61fceee73c1b..56b7791911bf 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1139,9 +1139,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
- bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
- &bp->kmalloced_dma,
- GFP_ATOMIC);
+ bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
+ &bp->kmalloced_dma,
+ GFP_ATOMIC);
if (top_v == NULL)
return DFX_K_FAILURE;
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp)
bp->descr_block_virt->xmt_data[comp].long_1,
p_xmt_drv_descr->p_skb->len,
DMA_TO_DEVICE);
- dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
+ dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
/*
* Move to start of next packet by updating completion index
diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index 6ef44c480bd5..f29f5a6a45ab 100644
--- a/drivers/net/fddi/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
@@ -851,6 +851,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
case ACTIONS(PC5_SIGNAL) :
ACTIONS_DONE() ;
+ /* fall through */
case PC5_SIGNAL :
if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT))
break ;
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 72433f3efc74..5d661f60b101 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -409,10 +409,10 @@ static int skfp_driver_init(struct net_device *dev)
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
- bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev,
- bp->SharedMemSize,
- &bp->SharedMemDMA,
- GFP_ATOMIC);
+ bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
+ bp->SharedMemSize,
+ &bp->SharedMemDMA,
+ GFP_ATOMIC);
if (!bp->SharedMemAddr) {
printk("could not allocate mem for ");
printk("hardware module: %ld byte\n",
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 58bbba8582b0..5583d993480d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -692,15 +692,20 @@ out:
static int geneve_open(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
- bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
bool metadata = geneve->collect_md;
+ bool ipv4, ipv6;
int ret = 0;
+ ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+ ipv4 = !ipv6 || metadata;
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6 || metadata)
+ if (ipv6) {
ret = geneve_sock_add(geneve, true);
+ if (ret < 0 && ret != -EAFNOSUPPORT)
+ ipv4 = false;
+ }
#endif
- if (!ret && (!ipv6 || metadata))
+ if (ipv4)
ret = geneve_sock_add(geneve, false);
if (ret < 0)
geneve_sock_release(geneve);
@@ -1512,9 +1517,13 @@ static void geneve_link_config(struct net_device *dev,
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
- struct rt6_info *rt = rt6_lookup(geneve->net,
- &info->key.u.ipv6.dst, NULL, 0,
- NULL, 0);
+ struct rt6_info *rt;
+
+ if (!__in6_dev_get(dev))
+ break;
+
+ rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
+ NULL, 0);
if (rt && rt->dst.dev)
ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 190f66c88479..ed0841630990 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -203,32 +203,6 @@ static inline void ser12_set_divisor(struct net_device *dev,
*/
}
-/* --------------------------------------------------------------------- */
-
-#if 0
-static inline unsigned int hweight16(unsigned int w)
- __attribute__ ((unused));
-static inline unsigned int hweight8(unsigned int w)
- __attribute__ ((unused));
-
-static inline unsigned int hweight16(unsigned int w)
-{
- unsigned short res = (w & 0x5555) + ((w >> 1) & 0x5555);
- res = (res & 0x3333) + ((res >> 2) & 0x3333);
- res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
- return (res & 0x00FF) + ((res >> 8) & 0x00FF);
-}
-
-static inline unsigned int hweight8(unsigned int w)
-{
- unsigned short res = (w & 0x55) + ((w >> 1) & 0x55);
- res = (res & 0x33) + ((res >> 2) & 0x33);
- return (res & 0x0F) + ((res >> 4) & 0x0F);
-}
-#endif
-
-/* --------------------------------------------------------------------- */
-
static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timespec64 *ts, unsigned char curs)
{
int timediff;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ef6f766f6389..e859ae2e42d5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -144,6 +144,8 @@ struct hv_netvsc_packet {
u32 total_data_buflen;
};
+#define NETVSC_HASH_KEYLEN 40
+
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
u32 num_chn;
@@ -151,6 +153,8 @@ struct netvsc_device_info {
u32 recv_sections;
u32 send_section_size;
u32 recv_section_size;
+
+ u8 rss_key[NETVSC_HASH_KEYLEN];
};
enum rndis_device_state {
@@ -160,8 +164,6 @@ enum rndis_device_state {
RNDIS_DEV_DATAINITIALIZED,
};
-#define NETVSC_HASH_KEYLEN 40
-
struct rndis_device {
struct net_device *ndev;
@@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
+int rndis_set_subchannel(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_device_info *dev_info);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
@@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type {
enum rndis_per_pkt_info_interal_type {
RNDIS_PKTINFO_ID = 1,
- /* Add more memebers here */
+ /* Add more members here */
RNDIS_PKTINFO_MAX
};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 922054c1d544..813d195bbd57 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w)
rdev = nvdev->extension;
if (rdev) {
- ret = rndis_set_subchannel(rdev->ndev, nvdev);
+ ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
if (ret == 0) {
netif_device_attach(rdev->ndev);
} else {
@@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context)
prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
if (napi_schedule_prep(&nvchan->napi)) {
- /* disable interupts from host */
+ /* disable interrupts from host */
hv_begin_read(rbi);
__napi_schedule_irqoff(&nvchan->napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 91ed15ea5883..cf4897043e83 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
{
int j = 0;
- /* Deal with compund pages by ignoring unused part
+ /* Deal with compound pages by ignoring unused part
* of the page.
*/
page += (offset >> PAGE_SHIFT);
@@ -744,6 +744,14 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph->check = 0;
+ iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct netvsc_channel *nvchan)
{
@@ -770,9 +778,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
/* skb is already created with CHECKSUM_NONE */
skb_checksum_none_assert(skb);
- /*
- * In Linux, the IP checksum is always checked.
- * Do L4 checksum offload if enabled and present.
+ /* Incoming packets may have IP header checksum verified by the host.
+ * They may not have IP header checksum computed after coalescing.
+ * We compute it here if the flags are set, because on Linux, the IP
+ * checksum is always checked.
+ */
+ if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+ csum_info->receive.ip_checksum_succeeded &&
+ skb->protocol == htons(ETH_P_IP))
+ netvsc_comp_ipcsum(skb);
+
+ /* Do L4 checksum offload if enabled and present.
*/
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
@@ -858,6 +874,39 @@ static void netvsc_get_channels(struct net_device *net,
}
}
+/* Alloc struct netvsc_device_info, and initialize it from either existing
+ * struct netvsc_device, or from default values.
+ */
+static struct netvsc_device_info *netvsc_devinfo_get
+ (struct netvsc_device *nvdev)
+{
+ struct netvsc_device_info *dev_info;
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
+
+ if (!dev_info)
+ return NULL;
+
+ if (nvdev) {
+ dev_info->num_chn = nvdev->num_chn;
+ dev_info->send_sections = nvdev->send_section_cnt;
+ dev_info->send_section_size = nvdev->send_section_size;
+ dev_info->recv_sections = nvdev->recv_section_cnt;
+ dev_info->recv_section_size = nvdev->recv_section_size;
+
+ memcpy(dev_info->rss_key, nvdev->extension->rss_key,
+ NETVSC_HASH_KEYLEN);
+ } else {
+ dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+ dev_info->send_sections = NETVSC_DEFAULT_TX;
+ dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
+ dev_info->recv_sections = NETVSC_DEFAULT_RX;
+ dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
+ }
+
+ return dev_info;
+}
+
static int netvsc_detach(struct net_device *ndev,
struct netvsc_device *nvdev)
{
@@ -909,7 +958,7 @@ static int netvsc_attach(struct net_device *ndev,
return PTR_ERR(nvdev);
if (nvdev->num_chn > 1) {
- ret = rndis_set_subchannel(ndev, nvdev);
+ ret = rndis_set_subchannel(ndev, nvdev, dev_info);
/* if unavailable, just proceed with one queue */
if (ret) {
@@ -943,7 +992,7 @@ static int netvsc_set_channels(struct net_device *net,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
unsigned int orig, count = channels->combined_count;
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info;
int ret;
/* We do not support separate count for rx, tx, or other */
@@ -962,24 +1011,26 @@ static int netvsc_set_channels(struct net_device *net,
orig = nvdev->num_chn;
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = count;
- device_info.send_sections = nvdev->send_section_cnt;
- device_info.send_section_size = nvdev->send_section_size;
- device_info.recv_sections = nvdev->recv_section_cnt;
- device_info.recv_section_size = nvdev->recv_section_size;
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
+ device_info->num_chn = count;
ret = netvsc_detach(net, nvdev);
if (ret)
- return ret;
+ goto out;
- ret = netvsc_attach(net, &device_info);
+ ret = netvsc_attach(net, device_info);
if (ret) {
- device_info.num_chn = orig;
- if (netvsc_attach(net, &device_info))
+ device_info->num_chn = orig;
+ if (netvsc_attach(net, device_info))
netdev_err(net, "restoring channel setting failed\n");
}
+out:
+ kfree(device_info);
return ret;
}
@@ -1048,48 +1099,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
int orig_mtu = ndev->mtu;
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info;
int ret = 0;
if (!nvdev || nvdev->destroy)
return -ENODEV;
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
/* Change MTU of underlying VF netdev first. */
if (vf_netdev) {
ret = dev_set_mtu(vf_netdev, mtu);
if (ret)
- return ret;
+ goto out;
}
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = nvdev->num_chn;
- device_info.send_sections = nvdev->send_section_cnt;
- device_info.send_section_size = nvdev->send_section_size;
- device_info.recv_sections = nvdev->recv_section_cnt;
- device_info.recv_section_size = nvdev->recv_section_size;
-
ret = netvsc_detach(ndev, nvdev);
if (ret)
goto rollback_vf;
ndev->mtu = mtu;
- ret = netvsc_attach(ndev, &device_info);
- if (ret)
- goto rollback;
-
- return 0;
+ ret = netvsc_attach(ndev, device_info);
+ if (!ret)
+ goto out;
-rollback:
/* Attempt rollback to original MTU */
ndev->mtu = orig_mtu;
- if (netvsc_attach(ndev, &device_info))
+ if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
rollback_vf:
if (vf_netdev)
dev_set_mtu(vf_netdev, orig_mtu);
+out:
+ kfree(device_info);
return ret;
}
@@ -1674,7 +1722,7 @@ static int netvsc_set_ringparam(struct net_device *ndev,
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info;
struct ethtool_ringparam orig;
u32 new_tx, new_rx;
int ret = 0;
@@ -1694,26 +1742,29 @@ static int netvsc_set_ringparam(struct net_device *ndev,
new_rx == orig.rx_pending)
return 0; /* no change */
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = nvdev->num_chn;
- device_info.send_sections = new_tx;
- device_info.send_section_size = nvdev->send_section_size;
- device_info.recv_sections = new_rx;
- device_info.recv_section_size = nvdev->recv_section_size;
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
+ device_info->send_sections = new_tx;
+ device_info->recv_sections = new_rx;
ret = netvsc_detach(ndev, nvdev);
if (ret)
- return ret;
+ goto out;
- ret = netvsc_attach(ndev, &device_info);
+ ret = netvsc_attach(ndev, device_info);
if (ret) {
- device_info.send_sections = orig.tx_pending;
- device_info.recv_sections = orig.rx_pending;
+ device_info->send_sections = orig.tx_pending;
+ device_info->recv_sections = orig.rx_pending;
- if (netvsc_attach(ndev, &device_info))
+ if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring ringparam failed");
}
+out:
+ kfree(device_info);
return ret;
}
@@ -2088,7 +2139,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
return NOTIFY_DONE;
- /* if syntihetic interface is a different namespace,
+ /* if synthetic interface is a different namespace,
* then move the VF to that namespace; join will be
* done again in that context.
*/
@@ -2167,7 +2218,7 @@ static int netvsc_probe(struct hv_device *dev,
{
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
int ret = -ENOMEM;
@@ -2214,21 +2265,21 @@ static int netvsc_probe(struct hv_device *dev,
netif_set_real_num_rx_queues(net, 1);
/* Notify the netvsc driver of the new device */
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = VRSS_CHANNEL_DEFAULT;
- device_info.send_sections = NETVSC_DEFAULT_TX;
- device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
- device_info.recv_sections = NETVSC_DEFAULT_RX;
- device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
-
- nvdev = rndis_filter_device_add(dev, &device_info);
+ device_info = netvsc_devinfo_get(NULL);
+
+ if (!device_info) {
+ ret = -ENOMEM;
+ goto devinfo_failed;
+ }
+
+ nvdev = rndis_filter_device_add(dev, device_info);
if (IS_ERR(nvdev)) {
ret = PTR_ERR(nvdev);
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
goto rndis_failed;
}
- memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2236,7 +2287,7 @@ static int netvsc_probe(struct hv_device *dev,
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
- * finally netvsc_subchan_work() hangs for ever.
+ * finally netvsc_subchan_work() hangs forever.
*/
rtnl_lock();
@@ -2266,12 +2317,16 @@ static int netvsc_probe(struct hv_device *dev,
list_add(&net_device_ctx->list, &netvsc_dev_list);
rtnl_unlock();
+
+ kfree(device_info);
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
+ kfree(device_info);
+devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
no_stats:
hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8b537a049c1e..73b60592de06 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -774,8 +774,8 @@ cleanup:
return ret;
}
-int rndis_filter_set_rss_param(struct rndis_device *rdev,
- const u8 *rss_key)
+static int rndis_set_rss_param_msg(struct rndis_device *rdev,
+ const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
@@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
- rssp->flag = 0;
+ rssp->flag = flag;
rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
NDIS_HASH_TCP_IPV6;
@@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status == RNDIS_STATUS_SUCCESS)
- memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
- else {
+ if (set_complete->status == RNDIS_STATUS_SUCCESS) {
+ if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
+ !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
+ memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+
+ } else {
netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
set_complete->status);
ret = -EINVAL;
@@ -842,6 +845,16 @@ cleanup:
return ret;
}
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *rss_key)
+{
+ /* Disable RSS before change */
+ rndis_set_rss_param_msg(rdev, rss_key,
+ NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
+
+ return rndis_set_rss_param_msg(rdev, rss_key, 0);
+}
+
static int rndis_filter_query_device_link_status(struct rndis_device *dev,
struct netvsc_device *net_device)
{
@@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
* This breaks overlap of processing the host message for the
* new primary channel with the initialization of sub-channels.
*/
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
+int rndis_set_subchannel(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_device_info *dev_info)
{
struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
wait_event(nvdev->subchan_open,
atomic_read(&nvdev->open_chn) == nvdev->num_chn);
- /* ignore failues from setting rss parameters, still have channels */
- rndis_filter_set_rss_param(rdev, netvsc_hash_key);
+ /* ignore failures from setting rss parameters, still have channels */
+ if (dev_info)
+ rndis_filter_set_rss_param(rdev, dev_info->rss_key);
+ else
+ rndis_filter_set_rss_param(rdev, netvsc_hash_key);
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 44de81e5f140..c589f5ae75bb 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context)
}
break;
case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
- /* rx is starting */
- dev_dbg(printdev(lp), "RX is starting\n");
- mcr20a_handle_rx(lp);
+ /* rx is starting */
+ dev_dbg(printdev(lp), "RX is starting\n");
+ mcr20a_handle_rx(lp);
break;
case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile
index 8a2c64dc9641..3ee95367a994 100644
--- a/drivers/net/ipvlan/Makefile
+++ b/drivers/net/ipvlan/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_IPVLAN) += ipvlan.o
obj-$(CONFIG_IPVTAP) += ipvtap.o
-ipvlan-objs := ipvlan_core.o ipvlan_main.o
+ipvlan-objs-$(CONFIG_IPVLAN_L3S) += ipvlan_l3s.o
+ipvlan-objs := ipvlan_core.o ipvlan_main.o $(ipvlan-objs-y)
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index adb826f55e60..b906d2f6bd04 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -165,10 +165,9 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6);
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr);
-struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
- u16 proto);
-unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state);
+struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ int addr_type, bool use_dest);
+void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type);
void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
unsigned int len, bool success, bool mcast);
int ipvlan_link_new(struct net *src_net, struct net_device *dev,
@@ -177,6 +176,36 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
void ipvlan_link_delete(struct net_device *dev, struct list_head *head);
void ipvlan_link_setup(struct net_device *dev);
int ipvlan_link_register(struct rtnl_link_ops *ops);
+#ifdef CONFIG_IPVLAN_L3S
+int ipvlan_l3s_register(struct ipvl_port *port);
+void ipvlan_l3s_unregister(struct ipvl_port *port);
+void ipvlan_migrate_l3s_hook(struct net *oldnet, struct net *newnet);
+int ipvlan_l3s_init(void);
+void ipvlan_l3s_cleanup(void);
+#else
+static inline int ipvlan_l3s_register(struct ipvl_port *port)
+{
+ return -ENOTSUPP;
+}
+
+static inline void ipvlan_l3s_unregister(struct ipvl_port *port)
+{
+}
+
+static inline void ipvlan_migrate_l3s_hook(struct net *oldnet,
+ struct net *newnet)
+{
+}
+
+static inline int ipvlan_l3s_init(void)
+{
+ return 0;
+}
+
+static inline void ipvlan_l3s_cleanup(void)
+{
+}
+#endif /* CONFIG_IPVLAN_L3S */
static inline bool netif_is_ipvlan_port(const struct net_device *dev)
{
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 1a8132eb2a3e..e0f5bc82b10c 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -138,7 +138,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
return ret;
}
-static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
+void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
@@ -355,9 +355,8 @@ out:
return ret;
}
-static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
- void *lyr3h, int addr_type,
- bool use_dest)
+struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ int addr_type, bool use_dest)
{
struct ipvl_addr *addr = NULL;
@@ -647,7 +646,9 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
case IPVLAN_MODE_L2:
return ipvlan_xmit_mode_l2(skb, dev);
case IPVLAN_MODE_L3:
+#ifdef CONFIG_IPVLAN_L3S
case IPVLAN_MODE_L3S:
+#endif
return ipvlan_xmit_mode_l3(skb, dev);
}
@@ -743,8 +744,10 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
return ipvlan_handle_mode_l2(pskb, port);
case IPVLAN_MODE_L3:
return ipvlan_handle_mode_l3(pskb, port);
+#ifdef CONFIG_IPVLAN_L3S
case IPVLAN_MODE_L3S:
return RX_HANDLER_PASS;
+#endif
}
/* Should not reach here */
@@ -753,97 +756,3 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
-
-static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct ipvl_addr *addr = NULL;
- struct ipvl_port *port;
- void *lyr3h;
- int addr_type;
-
- if (!dev || !netif_is_ipvlan_port(dev))
- goto out;
-
- port = ipvlan_port_get_rcu(dev);
- if (!port || port->mode != IPVLAN_MODE_L3S)
- goto out;
-
- lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
- if (!lyr3h)
- goto out;
-
- addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
-out:
- return addr;
-}
-
-struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
- u16 proto)
-{
- struct ipvl_addr *addr;
- struct net_device *sdev;
-
- addr = ipvlan_skb_to_addr(skb, dev);
- if (!addr)
- goto out;
-
- sdev = addr->master->dev;
- switch (proto) {
- case AF_INET:
- {
- int err;
- struct iphdr *ip4h = ip_hdr(skb);
-
- err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
- ip4h->tos, sdev);
- if (unlikely(err))
- goto out;
- break;
- }
-#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
- {
- struct dst_entry *dst;
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
- int flags = RT6_LOOKUP_F_HAS_SADDR;
- struct flowi6 fl6 = {
- .flowi6_iif = sdev->ifindex,
- .daddr = ip6h->daddr,
- .saddr = ip6h->saddr,
- .flowlabel = ip6_flowinfo(ip6h),
- .flowi6_mark = skb->mark,
- .flowi6_proto = ip6h->nexthdr,
- };
-
- skb_dst_drop(skb);
- dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6,
- skb, flags);
- skb_dst_set(skb, dst);
- break;
- }
-#endif
- default:
- break;
- }
-
-out:
- return skb;
-}
-
-unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct ipvl_addr *addr;
- unsigned int len;
-
- addr = ipvlan_skb_to_addr(skb, skb->dev);
- if (!addr)
- goto out;
-
- skb->dev = addr->master->dev;
- len = skb->len + ETH_HLEN;
- ipvlan_count_rx(addr->master, len, true, false);
-out:
- return NF_ACCEPT;
-}
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
new file mode 100644
index 000000000000..d17480a911a3
--- /dev/null
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -0,0 +1,227 @@
+/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include "ipvlan.h"
+
+static unsigned int ipvlan_netid __read_mostly;
+
+struct ipvlan_netns {
+ unsigned int ipvl_nf_hook_refcnt;
+};
+
+static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ipvl_addr *addr = NULL;
+ struct ipvl_port *port;
+ int addr_type;
+ void *lyr3h;
+
+ if (!dev || !netif_is_ipvlan_port(dev))
+ goto out;
+
+ port = ipvlan_port_get_rcu(dev);
+ if (!port || port->mode != IPVLAN_MODE_L3S)
+ goto out;
+
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+out:
+ return addr;
+}
+
+static struct sk_buff *ipvlan_l3_rcv(struct net_device *dev,
+ struct sk_buff *skb, u16 proto)
+{
+ struct ipvl_addr *addr;
+ struct net_device *sdev;
+
+ addr = ipvlan_skb_to_addr(skb, dev);
+ if (!addr)
+ goto out;
+
+ sdev = addr->master->dev;
+ switch (proto) {
+ case AF_INET:
+ {
+ struct iphdr *ip4h = ip_hdr(skb);
+ int err;
+
+ err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
+ ip4h->tos, sdev);
+ if (unlikely(err))
+ goto out;
+ break;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ {
+ struct dst_entry *dst;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct flowi6 fl6 = {
+ .flowi6_iif = sdev->ifindex,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
+
+ skb_dst_drop(skb);
+ dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6,
+ skb, flags);
+ skb_dst_set(skb, dst);
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+out:
+ return skb;
+}
+
+static const struct l3mdev_ops ipvl_l3mdev_ops = {
+ .l3mdev_l3_rcv = ipvlan_l3_rcv,
+};
+
+static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct ipvl_addr *addr;
+ unsigned int len;
+
+ addr = ipvlan_skb_to_addr(skb, skb->dev);
+ if (!addr)
+ goto out;
+
+ skb->dev = addr->master->dev;
+ len = skb->len + ETH_HLEN;
+ ipvlan_count_rx(addr->master, len, true, false);
+out:
+ return NF_ACCEPT;
+}
+
+static const struct nf_hook_ops ipvl_nfops[] = {
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+#endif
+};
+
+static int ipvlan_register_nf_hook(struct net *net)
+{
+ struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
+ int err = 0;
+
+ if (!vnet->ipvl_nf_hook_refcnt) {
+ err = nf_register_net_hooks(net, ipvl_nfops,
+ ARRAY_SIZE(ipvl_nfops));
+ if (!err)
+ vnet->ipvl_nf_hook_refcnt = 1;
+ } else {
+ vnet->ipvl_nf_hook_refcnt++;
+ }
+
+ return err;
+}
+
+static void ipvlan_unregister_nf_hook(struct net *net)
+{
+ struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
+
+ if (WARN_ON(!vnet->ipvl_nf_hook_refcnt))
+ return;
+
+ vnet->ipvl_nf_hook_refcnt--;
+ if (!vnet->ipvl_nf_hook_refcnt)
+ nf_unregister_net_hooks(net, ipvl_nfops,
+ ARRAY_SIZE(ipvl_nfops));
+}
+
+void ipvlan_migrate_l3s_hook(struct net *oldnet, struct net *newnet)
+{
+ struct ipvlan_netns *old_vnet;
+
+ ASSERT_RTNL();
+
+ old_vnet = net_generic(oldnet, ipvlan_netid);
+ if (!old_vnet->ipvl_nf_hook_refcnt)
+ return;
+
+ ipvlan_register_nf_hook(newnet);
+ ipvlan_unregister_nf_hook(oldnet);
+}
+
+static void ipvlan_ns_exit(struct net *net)
+{
+ struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
+
+ if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) {
+ vnet->ipvl_nf_hook_refcnt = 0;
+ nf_unregister_net_hooks(net, ipvl_nfops,
+ ARRAY_SIZE(ipvl_nfops));
+ }
+}
+
+static struct pernet_operations ipvlan_net_ops = {
+ .id = &ipvlan_netid,
+ .size = sizeof(struct ipvlan_netns),
+ .exit = ipvlan_ns_exit,
+};
+
+int ipvlan_l3s_init(void)
+{
+ return register_pernet_subsys(&ipvlan_net_ops);
+}
+
+void ipvlan_l3s_cleanup(void)
+{
+ unregister_pernet_subsys(&ipvlan_net_ops);
+}
+
+int ipvlan_l3s_register(struct ipvl_port *port)
+{
+ struct net_device *dev = port->dev;
+ int ret;
+
+ ASSERT_RTNL();
+
+ ret = ipvlan_register_nf_hook(read_pnet(&port->pnet));
+ if (!ret) {
+ dev->l3mdev_ops = &ipvl_l3mdev_ops;
+ dev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
+ }
+
+ return ret;
+}
+
+void ipvlan_l3s_unregister(struct ipvl_port *port)
+{
+ struct net_device *dev = port->dev;
+
+ ASSERT_RTNL();
+
+ dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
+ ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
+ dev->l3mdev_ops = NULL;
+}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 19bdde60680c..bbeb1623e2d5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -9,73 +9,10 @@
#include "ipvlan.h"
-static unsigned int ipvlan_netid __read_mostly;
-
-struct ipvlan_netns {
- unsigned int ipvl_nf_hook_refcnt;
-};
-
-static const struct nf_hook_ops ipvl_nfops[] = {
- {
- .hook = ipvlan_nf_input,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = INT_MAX,
- },
-#if IS_ENABLED(CONFIG_IPV6)
- {
- .hook = ipvlan_nf_input,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = INT_MAX,
- },
-#endif
-};
-
-static const struct l3mdev_ops ipvl_l3mdev_ops = {
- .l3mdev_l3_rcv = ipvlan_l3_rcv,
-};
-
-static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
-{
- ipvlan->dev->mtu = dev->mtu;
-}
-
-static int ipvlan_register_nf_hook(struct net *net)
-{
- struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
- int err = 0;
-
- if (!vnet->ipvl_nf_hook_refcnt) {
- err = nf_register_net_hooks(net, ipvl_nfops,
- ARRAY_SIZE(ipvl_nfops));
- if (!err)
- vnet->ipvl_nf_hook_refcnt = 1;
- } else {
- vnet->ipvl_nf_hook_refcnt++;
- }
-
- return err;
-}
-
-static void ipvlan_unregister_nf_hook(struct net *net)
-{
- struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
-
- if (WARN_ON(!vnet->ipvl_nf_hook_refcnt))
- return;
-
- vnet->ipvl_nf_hook_refcnt--;
- if (!vnet->ipvl_nf_hook_refcnt)
- nf_unregister_net_hooks(net, ipvl_nfops,
- ARRAY_SIZE(ipvl_nfops));
-}
-
static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
struct netlink_ext_ack *extack)
{
struct ipvl_dev *ipvlan;
- struct net_device *mdev = port->dev;
unsigned int flags;
int err;
@@ -97,17 +34,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
}
if (nval == IPVLAN_MODE_L3S) {
/* New mode is L3S */
- err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
- if (!err) {
- mdev->l3mdev_ops = &ipvl_l3mdev_ops;
- mdev->priv_flags |= IFF_L3MDEV_MASTER;
- } else
+ err = ipvlan_l3s_register(port);
+ if (err)
goto fail;
} else if (port->mode == IPVLAN_MODE_L3S) {
/* Old mode was L3S */
- mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
- ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
- mdev->l3mdev_ops = NULL;
+ ipvlan_l3s_unregister(port);
}
port->mode = nval;
}
@@ -166,11 +98,8 @@ static void ipvlan_port_destroy(struct net_device *dev)
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
struct sk_buff *skb;
- if (port->mode == IPVLAN_MODE_L3S) {
- dev->priv_flags &= ~IFF_L3MDEV_MASTER;
- ipvlan_unregister_nf_hook(dev_net(dev));
- dev->l3mdev_ops = NULL;
- }
+ if (port->mode == IPVLAN_MODE_L3S)
+ ipvlan_l3s_unregister(port);
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
@@ -446,6 +375,11 @@ static const struct header_ops ipvlan_header_ops = {
.cache_update = eth_header_cache_update,
};
+static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
+{
+ ipvlan->dev->mtu = dev->mtu;
+}
+
static bool netif_is_ipvlan(const struct net_device *dev)
{
/* both ipvlan and ipvtap devices use the same netdev_ops */
@@ -499,6 +433,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
if (!data)
return 0;
+ if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +537,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct ipvl_dev *tmp = netdev_priv(phy_dev);
phy_dev = tmp->phy_dev;
+ if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
} else if (!netif_is_ipvlan_port(phy_dev)) {
/* Exit early if the underlying link is invalid or busy */
if (phy_dev->type != ARPHRD_ETHER ||
@@ -781,7 +719,6 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_REGISTER: {
struct net *oldnet, *newnet = dev_net(dev);
- struct ipvlan_netns *old_vnet;
oldnet = read_pnet(&port->pnet);
if (net_eq(newnet, oldnet))
@@ -789,12 +726,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
write_pnet(&port->pnet, newnet);
- old_vnet = net_generic(oldnet, ipvlan_netid);
- if (!old_vnet->ipvl_nf_hook_refcnt)
- break;
-
- ipvlan_register_nf_hook(newnet);
- ipvlan_unregister_nf_hook(oldnet);
+ ipvlan_migrate_l3s_hook(oldnet, newnet);
break;
}
case NETDEV_UNREGISTER:
@@ -1068,23 +1000,6 @@ static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = {
};
#endif
-static void ipvlan_ns_exit(struct net *net)
-{
- struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
-
- if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) {
- vnet->ipvl_nf_hook_refcnt = 0;
- nf_unregister_net_hooks(net, ipvl_nfops,
- ARRAY_SIZE(ipvl_nfops));
- }
-}
-
-static struct pernet_operations ipvlan_net_ops = {
- .id = &ipvlan_netid,
- .size = sizeof(struct ipvlan_netns),
- .exit = ipvlan_ns_exit,
-};
-
static int __init ipvlan_init_module(void)
{
int err;
@@ -1099,13 +1014,13 @@ static int __init ipvlan_init_module(void)
register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block);
- err = register_pernet_subsys(&ipvlan_net_ops);
+ err = ipvlan_l3s_init();
if (err < 0)
goto error;
err = ipvlan_link_register(&ipvlan_link_ops);
if (err < 0) {
- unregister_pernet_subsys(&ipvlan_net_ops);
+ ipvlan_l3s_cleanup();
goto error;
}
@@ -1126,7 +1041,7 @@ error:
static void __exit ipvlan_cleanup_module(void)
{
rtnl_link_unregister(&ipvlan_link_ops);
- unregister_pernet_subsys(&ipvlan_net_ops);
+ ipvlan_l3s_cleanup();
unregister_netdevice_notifier(&ipvlan_notifier_block);
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
unregister_inetaddr_validator_notifier(
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fc726ce4c164..0c0f105657d3 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -119,8 +119,6 @@ static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->rx_handler_data);
}
-#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
-
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
@@ -337,7 +335,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
if (src)
dev_put(src->dev);
- kfree_skb(skb);
+ consume_skb(skb);
}
}
@@ -963,7 +961,8 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
@@ -1123,6 +1122,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
#endif
.ndo_get_iflink = macvlan_dev_get_iflink,
.ndo_features_check = passthru_features_check,
+ .ndo_change_proto_down = dev_change_proto_down_generic,
};
void macvlan_common_setup(struct net_device *dev)
@@ -1377,7 +1377,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
- if (!macvlan_port_exists(lowerdev)) {
+ if (!netif_is_macvlan_port(lowerdev)) {
err = macvlan_port_create(lowerdev);
if (err < 0)
return err;
@@ -1637,7 +1637,7 @@ static int macvlan_device_event(struct notifier_block *unused,
struct macvlan_port *port;
LIST_HEAD(list_kill);
- if (!macvlan_port_exists(dev))
+ if (!netif_is_macvlan_port(dev))
return NOTIFY_DONE;
port = macvlan_port_get_rtnl(dev);
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 172b271c8bd2..f92c43453ec6 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -248,7 +248,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
{
- struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev);
+ struct netdevsim *ns = bpf_offload_dev_priv(prog->aux->offload->offdev);
if (!ns->bpf_bind_accept)
return -EOPNOTSUPP;
@@ -589,7 +589,8 @@ int nsim_bpf_init(struct netdevsim *ns)
if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
return -ENOMEM;
- ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops);
+ ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops,
+ ns);
err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
if (err)
return err;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 8d8e2b3f263e..75a50b59cb8f 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -22,7 +22,6 @@
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
-#include <net/switchdev.h>
#include "netdevsim.h"
@@ -148,26 +147,16 @@ static struct device_type nsim_dev_type = {
.release = nsim_dev_release,
};
-static int
-nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int nsim_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct netdevsim *ns = netdev_priv(dev);
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(ns->sdev->switch_id);
- memcpy(&attr->u.ppid.id, &ns->sdev->switch_id,
- attr->u.ppid.id_len);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = sizeof(ns->sdev->switch_id);
+ memcpy(&ppid->id, &ns->sdev->switch_id, ppid->id_len);
+ return 0;
}
-static const struct switchdev_ops nsim_switchdev_ops = {
- .switchdev_port_attr_get = nsim_port_attr_get,
-};
-
static int nsim_init(struct net_device *dev)
{
char sdev_ddir_name[10], sdev_link_name[32];
@@ -214,7 +203,6 @@ static int nsim_init(struct net_device *dev)
goto err_bpf_uninit;
SET_NETDEV_DEV(dev, &ns->dev);
- SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops);
err = nsim_devlink_setup(ns);
if (err)
@@ -493,6 +481,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
+ .ndo_get_port_parent_id = nsim_get_port_parent_id,
};
static void nsim_setup(struct net_device *dev)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3d187cd50eb0..071869db44cf 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -87,6 +87,18 @@ config MDIO_BUS_MUX_MMIOREG
Currently, only 8/16/32 bits registers are supported.
+config MDIO_BUS_MUX_MULTIPLEXER
+ tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
+ depends on OF_MDIO
+ select MULTIPLEXER
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexer
+ that is controlled via the kernel multiplexer subsystem. The
+ bus multiplexer connects one of several child MDIO busses to
+ a parent bus. Child bus selection is under the control of
+ the kernel multiplexer subsystem.
+
config MDIO_CAVIUM
tristate
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 5805c0b7d60e..ece5dae67174 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
@@ -45,6 +46,10 @@ sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
obj-$(CONFIG_AMD_PHY) += amd.o
+aquantia-objs += aquantia_main.o
+ifdef CONFIG_HWMON
+aquantia-objs += aquantia_hwmon.o
+endif
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
obj-$(CONFIG_ASIX_PHY) += asix.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 9d0504f3e3b2..65b4b0960b1e 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for AMD am79c PHYs
*
* Author: Heiko Schocher <hs@denx.de>
*
* Copyright (c) 2011 DENX Software Engineering GmbH
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
deleted file mode 100644
index beb3309bb0f0..000000000000
--- a/drivers/net/phy/aquantia.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Driver for Aquantia PHY
- *
- * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
- *
- * Copyright 2015 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/mdio.h>
-
-#define PHY_ID_AQ1202 0x03a1b445
-#define PHY_ID_AQ2104 0x03a1b460
-#define PHY_ID_AQR105 0x03a1b4a2
-#define PHY_ID_AQR106 0x03a1b4d0
-#define PHY_ID_AQR107 0x03a1b4e0
-#define PHY_ID_AQR405 0x03a1b4b0
-
-static int aquantia_config_aneg(struct phy_device *phydev)
-{
- linkmode_copy(phydev->supported, phy_10gbit_features);
- linkmode_copy(phydev->advertising, phydev->supported);
-
- return 0;
-}
-
-static int aquantia_config_intr(struct phy_device *phydev)
-{
- int err;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
- } else {
- err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
- }
-
- return err;
-}
-
-static int aquantia_ack_interrupt(struct phy_device *phydev)
-{
- int reg;
-
- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
- return (reg < 0) ? reg : 0;
-}
-
-static int aquantia_read_status(struct phy_device *phydev)
-{
- int reg;
-
- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
- if (reg & MDIO_STAT1_LSTATUS)
- phydev->link = 1;
- else
- phydev->link = 0;
-
- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
- mdelay(10);
- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
-
- switch (reg) {
- case 0x9:
- phydev->speed = SPEED_2500;
- break;
- case 0x5:
- phydev->speed = SPEED_1000;
- break;
- case 0x3:
- phydev->speed = SPEED_100;
- break;
- case 0x7:
- default:
- phydev->speed = SPEED_10000;
- break;
- }
- phydev->duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-static struct phy_driver aquantia_driver[] = {
-{
- .phy_id = PHY_ID_AQ1202,
- .phy_id_mask = 0xfffffff0,
- .name = "Aquantia AQ1202",
- .features = PHY_10GBIT_FULL_FEATURES,
- .aneg_done = genphy_c45_aneg_done,
- .config_aneg = aquantia_config_aneg,
- .config_intr = aquantia_config_intr,
- .ack_interrupt = aquantia_ack_interrupt,
- .read_status = aquantia_read_status,
-},
-{
- .phy_id = PHY_ID_AQ2104,
- .phy_id_mask = 0xfffffff0,
- .name = "Aquantia AQ2104",
- .features = PHY_10GBIT_FULL_FEATURES,
- .aneg_done = genphy_c45_aneg_done,
- .config_aneg = aquantia_config_aneg,
- .config_intr = aquantia_config_intr,
- .ack_interrupt = aquantia_ack_interrupt,
- .read_status = aquantia_read_status,
-},
-{
- .phy_id = PHY_ID_AQR105,
- .phy_id_mask = 0xfffffff0,
- .name = "Aquantia AQR105",
- .features = PHY_10GBIT_FULL_FEATURES,
- .aneg_done = genphy_c45_aneg_done,
- .config_aneg = aquantia_config_aneg,
- .config_intr = aquantia_config_intr,
- .ack_interrupt = aquantia_ack_interrupt,
- .read_status = aquantia_read_status,
-},
-{
- .phy_id = PHY_ID_AQR106,
- .phy_id_mask = 0xfffffff0,
- .name = "Aquantia AQR106",
- .features = PHY_10GBIT_FULL_FEATURES,
- .aneg_done = genphy_c45_aneg_done,
- .config_aneg = aquantia_config_aneg,
- .config_intr = aquantia_config_intr,
- .ack_interrupt = aquantia_ack_interrupt,
- .read_status = aquantia_read_status,
-},
-{
- .phy_id = PHY_ID_AQR107,
- .phy_id_mask = 0xfffffff0,
- .name = "Aquantia AQR107",
- .features = PHY_10GBIT_FULL_FEATURES,
- .aneg_done = genphy_c45_aneg_done,
- .config_aneg = aquantia_config_aneg,
- .config_intr = aquantia_config_intr,
- .ack_interrupt = aquantia_ack_interrupt,
- .read_status = aquantia_read_status,
-},
-{
- .phy_id = PHY_ID_AQR405,
- .phy_id_mask = 0xfffffff0,
- .name = "Aquantia AQR405",
- .features = PHY_10GBIT_FULL_FEATURES,
- .aneg_done = genphy_c45_aneg_done,
- .config_aneg = aquantia_config_aneg,
- .config_intr = aquantia_config_intr,
- .ack_interrupt = aquantia_ack_interrupt,
- .read_status = aquantia_read_status,
-},
-};
-
-module_phy_driver(aquantia_driver);
-
-static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
- { PHY_ID_AQ1202, 0xfffffff0 },
- { PHY_ID_AQ2104, 0xfffffff0 },
- { PHY_ID_AQR105, 0xfffffff0 },
- { PHY_ID_AQR106, 0xfffffff0 },
- { PHY_ID_AQR107, 0xfffffff0 },
- { PHY_ID_AQR405, 0xfffffff0 },
- { }
-};
-
-MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
-
-MODULE_DESCRIPTION("Aquantia PHY driver");
-MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/aquantia.h b/drivers/net/phy/aquantia.h
new file mode 100644
index 000000000000..5a16caab7b2f
--- /dev/null
+++ b/drivers/net/phy/aquantia.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * HWMON driver for Aquantia PHY
+ *
+ * Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+ * Author: Andrew Lunn <andrew@lunn.ch>
+ * Author: Heiner Kallweit <hkallweit1@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/phy.h>
+
+#if IS_REACHABLE(CONFIG_HWMON)
+int aqr_hwmon_probe(struct phy_device *phydev);
+#else
+static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
+#endif
diff --git a/drivers/net/phy/aquantia_hwmon.c b/drivers/net/phy/aquantia_hwmon.c
new file mode 100644
index 000000000000..19c4c280a6cd
--- /dev/null
+++ b/drivers/net/phy/aquantia_hwmon.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/* HWMON driver for Aquantia PHY
+ *
+ * Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+ * Author: Andrew Lunn <andrew@lunn.ch>
+ * Author: Heiner Kallweit <hkallweit1@gmail.com>
+ */
+
+#include <linux/phy.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/hwmon.h>
+
+#include "aquantia.h"
+
+/* Vendor specific 1, MDIO_MMD_VEND2 */
+#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
+#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
+#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
+#define VEND1_THERMAL_PROV_LOW_TEMP_WARN 0xc424
+#define VEND1_THERMAL_STAT1 0xc820
+#define VEND1_THERMAL_STAT2 0xc821
+#define VEND1_THERMAL_STAT2_VALID BIT(0)
+#define VEND1_GENERAL_STAT1 0xc830
+#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL BIT(14)
+#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL BIT(13)
+#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN BIT(12)
+#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN BIT(11)
+
+#if IS_REACHABLE(CONFIG_HWMON)
+
+static umode_t aqr_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_lcrit_alarm:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ case hwmon_temp_lcrit:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int aqr_hwmon_get(struct phy_device *phydev, int reg, long *value)
+{
+ int temp = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
+
+ if (temp < 0)
+ return temp;
+
+ /* 16 bit value is 2's complement with LSB = 1/256th degree Celsius */
+ *value = (s16)temp * 1000 / 256;
+
+ return 0;
+}
+
+static int aqr_hwmon_set(struct phy_device *phydev, int reg, long value)
+{
+ int temp;
+
+ if (value >= 128000 || value < -128000)
+ return -ERANGE;
+
+ temp = value * 256 / 1000;
+
+ /* temp is in s16 range and we're interested in lower 16 bits only */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1, reg, (u16)temp);
+}
+
+static int aqr_hwmon_test_bit(struct phy_device *phydev, int reg, int bit)
+{
+ int val = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
+
+ if (val < 0)
+ return val;
+
+ return !!(val & bit);
+}
+
+static int aqr_hwmon_status1(struct phy_device *phydev, int bit, long *value)
+{
+ int val = aqr_hwmon_test_bit(phydev, VEND1_GENERAL_STAT1, bit);
+
+ if (val < 0)
+ return val;
+
+ *value = val;
+
+ return 0;
+}
+
+static int aqr_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int reg;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = aqr_hwmon_test_bit(phydev, VEND1_THERMAL_STAT2,
+ VEND1_THERMAL_STAT2_VALID);
+ if (reg < 0)
+ return reg;
+ if (!reg)
+ return -EBUSY;
+
+ return aqr_hwmon_get(phydev, VEND1_THERMAL_STAT1, value);
+
+ case hwmon_temp_lcrit:
+ return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
+ value);
+ case hwmon_temp_min:
+ return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
+ value);
+ case hwmon_temp_max:
+ return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
+ value);
+ case hwmon_temp_crit:
+ return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
+ value);
+ case hwmon_temp_lcrit_alarm:
+ return aqr_hwmon_status1(phydev,
+ VEND1_GENERAL_STAT1_LOW_TEMP_FAIL,
+ value);
+ case hwmon_temp_min_alarm:
+ return aqr_hwmon_status1(phydev,
+ VEND1_GENERAL_STAT1_LOW_TEMP_WARN,
+ value);
+ case hwmon_temp_max_alarm:
+ return aqr_hwmon_status1(phydev,
+ VEND1_GENERAL_STAT1_HIGH_TEMP_WARN,
+ value);
+ case hwmon_temp_crit_alarm:
+ return aqr_hwmon_status1(phydev,
+ VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL,
+ value);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aqr_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_lcrit:
+ return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
+ value);
+ case hwmon_temp_min:
+ return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
+ value);
+ case hwmon_temp_max:
+ return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
+ value);
+ case hwmon_temp_crit:
+ return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
+ value);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops aqr_hwmon_ops = {
+ .is_visible = aqr_hwmon_is_visible,
+ .read = aqr_hwmon_read,
+ .write = aqr_hwmon_write,
+};
+
+static u32 aqr_hwmon_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0,
+};
+
+static const struct hwmon_channel_info aqr_hwmon_chip = {
+ .type = hwmon_chip,
+ .config = aqr_hwmon_chip_config,
+};
+
+static u32 aqr_hwmon_temp_config[] = {
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
+ 0,
+};
+
+static const struct hwmon_channel_info aqr_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = aqr_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *aqr_hwmon_info[] = {
+ &aqr_hwmon_chip,
+ &aqr_hwmon_temp,
+ NULL,
+};
+
+static const struct hwmon_chip_info aqr_hwmon_chip_info = {
+ .ops = &aqr_hwmon_ops,
+ .info = aqr_hwmon_info,
+};
+
+int aqr_hwmon_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon_dev;
+ char *hwmon_name;
+ int i, j;
+
+ hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!hwmon_name)
+ return -ENOMEM;
+
+ for (i = j = 0; hwmon_name[i]; i++) {
+ if (isalnum(hwmon_name[i])) {
+ if (i != j)
+ hwmon_name[j] = hwmon_name[i];
+ j++;
+ }
+ }
+ hwmon_name[j] = '\0';
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
+ phydev, &aqr_hwmon_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+#endif
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
new file mode 100644
index 000000000000..37218e5d7cc9
--- /dev/null
+++ b/drivers/net/phy/aquantia_main.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Aquantia PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+
+#include "aquantia.h"
+
+#define PHY_ID_AQ1202 0x03a1b445
+#define PHY_ID_AQ2104 0x03a1b460
+#define PHY_ID_AQR105 0x03a1b4a2
+#define PHY_ID_AQR106 0x03a1b4d0
+#define PHY_ID_AQR107 0x03a1b4e0
+#define PHY_ID_AQCS109 0x03a1b5c2
+#define PHY_ID_AQR405 0x03a1b4b0
+
+#define MDIO_AN_VEND_PROV 0xc400
+#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
+#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+
+#define MDIO_AN_TX_VEND_STATUS1 0xc800
+#define MDIO_AN_TX_VEND_STATUS1_10BASET (0x0 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_100BASETX (0x1 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_1000BASET (0x2 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_10GBASET (0x3 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_2500BASET (0x4 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_5000BASET (0x5 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK (0x7 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX BIT(0)
+
+#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01
+
+#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
+#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0)
+
+#define MDIO_AN_RX_LP_STAT1 0xe820
+#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL BIT(15)
+#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF BIT(14)
+
+/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
+#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
+
+#define VEND1_GLOBAL_INT_STD_MASK 0xff00
+#define VEND1_GLOBAL_INT_STD_MASK_PMA1 BIT(15)
+#define VEND1_GLOBAL_INT_STD_MASK_PMA2 BIT(14)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS1 BIT(13)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS2 BIT(12)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS3 BIT(11)
+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1 BIT(10)
+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2 BIT(9)
+#define VEND1_GLOBAL_INT_STD_MASK_AN1 BIT(8)
+#define VEND1_GLOBAL_INT_STD_MASK_AN2 BIT(7)
+#define VEND1_GLOBAL_INT_STD_MASK_GBE BIT(6)
+#define VEND1_GLOBAL_INT_STD_MASK_ALL BIT(0)
+
+#define VEND1_GLOBAL_INT_VEND_MASK 0xff01
+#define VEND1_GLOBAL_INT_VEND_MASK_PMA BIT(15)
+#define VEND1_GLOBAL_INT_VEND_MASK_PCS BIT(14)
+#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS BIT(13)
+#define VEND1_GLOBAL_INT_VEND_MASK_AN BIT(12)
+#define VEND1_GLOBAL_INT_VEND_MASK_GBE BIT(11)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1 BIT(2)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+
+static int aqr_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u16 reg;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return genphy_c45_pma_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* Clause 45 has no standardized support for 1000BaseT, therefore
+ * use vendor registers for this mode.
+ */
+ reg = 0;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_1000BASET_HALF |
+ MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int aqr_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_write_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_TX_VEND_INT_MASK2,
+ MDIO_AN_TX_VEND_INT_MASK2_LINK);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_INT_STD_MASK,
+ VEND1_GLOBAL_INT_STD_MASK_ALL);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_INT_VEND_MASK,
+ VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+ VEND1_GLOBAL_INT_VEND_MASK_AN);
+ } else {
+ err = phy_write_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_TX_VEND_INT_MASK2, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_INT_STD_MASK, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_INT_VEND_MASK, 0);
+ }
+
+ return err;
+}
+
+static int aqr_ack_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_TX_VEND_INT_STATUS2);
+ return (reg < 0) ? reg : 0;
+}
+
+static int aqr_read_status(struct phy_device *phydev)
+{
+ int val;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->lp_advertising,
+ val & MDIO_AN_RX_LP_STAT1_1000BASET_FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->lp_advertising,
+ val & MDIO_AN_RX_LP_STAT1_1000BASET_HALF);
+ }
+
+ return genphy_c45_read_status(phydev);
+}
+
+static int aqcs109_config_init(struct phy_device *phydev)
+{
+ /* AQCS109 belongs to a chip family partially supporting 10G and 5G.
+ * PMA speed ability bits are the same for all members of the family,
+ * AQCS109 however supports speeds up to 2.5G only.
+ */
+ return phy_set_max_speed(phydev, SPEED_2500);
+}
+
+static struct phy_driver aqr_driver[] = {
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
+ .name = "Aquantia AQ1202",
+ .aneg_done = genphy_c45_aneg_done,
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .ack_interrupt = aqr_ack_interrupt,
+ .read_status = aqr_read_status,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
+ .name = "Aquantia AQ2104",
+ .aneg_done = genphy_c45_aneg_done,
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .ack_interrupt = aqr_ack_interrupt,
+ .read_status = aqr_read_status,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
+ .name = "Aquantia AQR105",
+ .aneg_done = genphy_c45_aneg_done,
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .ack_interrupt = aqr_ack_interrupt,
+ .read_status = aqr_read_status,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
+ .name = "Aquantia AQR106",
+ .aneg_done = genphy_c45_aneg_done,
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .ack_interrupt = aqr_ack_interrupt,
+ .read_status = aqr_read_status,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
+ .name = "Aquantia AQR107",
+ .aneg_done = genphy_c45_aneg_done,
+ .get_features = genphy_c45_pma_read_abilities,
+ .probe = aqr_hwmon_probe,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .ack_interrupt = aqr_ack_interrupt,
+ .read_status = aqr_read_status,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
+ .name = "Aquantia AQCS109",
+ .aneg_done = genphy_c45_aneg_done,
+ .get_features = genphy_c45_pma_read_abilities,
+ .probe = aqr_hwmon_probe,
+ .config_init = aqcs109_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .ack_interrupt = aqr_ack_interrupt,
+ .read_status = aqr_read_status,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
+ .name = "Aquantia AQR405",
+ .aneg_done = genphy_c45_aneg_done,
+ .get_features = genphy_c45_pma_read_abilities,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .ack_interrupt = aqr_ack_interrupt,
+ .read_status = aqr_read_status,
+},
+};
+
+module_phy_driver(aqr_driver);
+
+static struct mdio_device_id __maybe_unused aqr_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR106) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, aqr_tbl);
+
+MODULE_DESCRIPTION("Aquantia PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c
index 8ebe7f5484ae..f14ba5366b91 100644
--- a/drivers/net/phy/asix.c
+++ b/drivers/net/phy/asix.c
@@ -1,13 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/* Driver for Asix PHYs
*
* Author: Michael Schmitz <schmitzmic@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f9432d053a22..f3e96191eb6f 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/at803x.c
*
* Driver for Atheros 803x PHY
*
* Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/phy.h>
@@ -39,9 +35,6 @@
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
-#define AT803X_MMD_ACCESS_CONTROL 0x0D
-#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
-#define AT803X_FUNC_DATA 0x4003
#define AT803X_REG_CHIP_CONFIG 0x1f
#define AT803X_BT_BX_REG_SEL 0x8000
@@ -110,16 +103,28 @@ static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
return phy_write(phydev, AT803X_DEBUG_DATA, val);
}
-static inline int at803x_enable_rx_delay(struct phy_device *phydev)
+static int at803x_enable_rx_delay(struct phy_device *phydev)
{
return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
- AT803X_DEBUG_RX_CLK_DLY_EN);
+ AT803X_DEBUG_RX_CLK_DLY_EN);
}
-static inline int at803x_enable_tx_delay(struct phy_device *phydev)
+static int at803x_enable_tx_delay(struct phy_device *phydev)
{
return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
- AT803X_DEBUG_TX_CLK_DLY_EN);
+ AT803X_DEBUG_TX_CLK_DLY_EN);
+}
+
+static int at803x_disable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0,
+ AT803X_DEBUG_RX_CLK_DLY_EN, 0);
+}
+
+static int at803x_disable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5,
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
}
/* save relevant PHY registers to private copy */
@@ -168,16 +173,9 @@ static int at803x_set_wol(struct phy_device *phydev,
if (!is_valid_ether_addr(mac))
return -EINVAL;
- for (i = 0; i < 3; i++) {
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
- AT803X_DEVICE_ADDR);
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
- offsets[i]);
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
- AT803X_FUNC_DATA);
- phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
- mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
- }
+ for (i = 0; i < 3; i++)
+ phy_write_mmd(phydev, AT803X_DEVICE_ADDR, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
value = phy_read(phydev, AT803X_INTR_ENABLE);
value |= AT803X_INTR_ENABLE_WOL;
@@ -255,21 +253,42 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
+ /* The RX and TX delay default is:
+ * after HW reset: RX delay enabled and TX delay disabled
+ * after SW reset: RX delay enabled, while TX delay retains the
+ * value before reset.
+ *
+ * So let's first disable the RX and TX delays in PHY and enable
+ * them based on the mode selected (this also takes care of RGMII
+ * mode where we expect delays to be disabled)
+ */
+
+ ret = at803x_disable_rx_delay(phydev);
+ if (ret < 0)
+ return ret;
+ ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ /* If RGMII_ID or RGMII_RXID are specified enable RX delay,
+ * otherwise keep it disabled
+ */
ret = at803x_enable_rx_delay(phydev);
if (ret < 0)
return ret;
}
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* If RGMII_ID or RGMII_TXID are specified enable TX delay,
+ * otherwise keep it disabled
+ */
ret = at803x_enable_tx_delay(phydev);
- if (ret < 0)
- return ret;
}
- return 0;
+ return ret;
}
static int at803x_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index e757b09f1889..ab8e12922bf9 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/* Broadcom Cygnus SoC internal transceivers support. */
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index e10e7b54ec4b..a75642051b8b 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "bcm-phy-lib.h"
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 81cceaa412fe..17faaefcfd60 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_BCM_PHY_LIB_H
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index a88dd14a25c0..44e6cff419a0 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Broadcom 63xx SOCs integrated PHYs
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "bcm-phy-lib.h"
#include <linux/module.h>
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 712224cc442d..b8415f8fae14 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Broadcom BCM7xxx internal transceivers support.
*
* Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 1b350183bffb..f0c0eefe2202 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2011 - 2012 Cavium, Inc.
*/
@@ -197,6 +194,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
+ .features = PHY_10GBIT_FEC_FEATURES,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -208,6 +206,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
+ .features = PHY_10GBIT_FEC_FEATURES,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -219,4 +218,4 @@ static struct phy_driver bcm87xx_driver[] = {
module_phy_driver(bcm87xx_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index aa73c5cc5f86..9605d4fe540b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/broadcom.c
*
@@ -7,11 +8,6 @@
* Copyright (c) 2006 Maciej W. Rozycki
*
* Inspired by code written by Amy Fong.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "bcm-phy-lib.h"
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index fea61c81bda9..108ed24f8489 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/cicada.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 8022cd317f62..856cdc36aacd 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2017 NXP
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* CORTINA is a registered trademark of Cortina Systems, Inc.
*
*/
@@ -88,10 +79,10 @@ static struct phy_driver cortina_driver[] = {
.phy_id = PHY_ID_CS4340,
.phy_id_mask = 0xffffffff,
.name = "Cortina CS4340",
- .config_init = gen10g_config_init,
+ .features = PHY_10GBIT_FEATURES,
.config_aneg = gen10g_config_aneg,
.read_status = cortina_read_status,
- .soft_reset = gen10g_no_soft_reset,
+ .soft_reset = genphy_no_soft_reset,
.probe = cortina_probe,
},
};
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 97162008f42b..bf39baa7f2c8 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/davicom.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 18b41bc345ab..2fe2ebaf62d1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the National Semiconductor DP83640 PHYTER
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -898,14 +885,14 @@ static void decode_txts(struct dp83640_private *dp83640,
struct phy_txts *phy_txts)
{
struct skb_shared_hwtstamps shhwtstamps;
+ struct dp83640_skb_info *skb_info;
struct sk_buff *skb;
- u64 ns;
u8 overflow;
+ u64 ns;
/* We must already have the skb that triggered this. */
-
+again:
skb = skb_dequeue(&dp83640->tx_queue);
-
if (!skb) {
pr_debug("have timestamp but tx_queue empty\n");
return;
@@ -920,6 +907,11 @@ static void decode_txts(struct dp83640_private *dp83640,
}
return;
}
+ skb_info = (struct dp83640_skb_info *)skb->cb;
+ if (time_after(jiffies, skb_info->tmo)) {
+ kfree_skb(skb);
+ goto again;
+ }
ns = phy2txts(phy_txts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1472,6 +1464,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
static void dp83640_txtstamp(struct phy_device *phydev,
struct sk_buff *skb, int type)
{
+ struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
struct dp83640_private *dp83640 = phydev->priv;
switch (dp83640->hwts_tx_en) {
@@ -1484,6 +1477,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
/* fall through */
case HWTSTAMP_TX_ON:
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
skb_queue_tail(&dp83640->tx_queue, skb);
break;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 24c7f149f3e6..bbd8c22067f3 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Texas Instruments DP83822 PHY
*
* Copyright (C) 2017 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/ethtool.h>
@@ -338,4 +330,4 @@ MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
MODULE_DESCRIPTION("Texas Instruments DP83822 PHY driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index a6b55909d1dc..f55dc907c2f3 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Texas Instruments DP83848 PHY
*
* Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -133,4 +125,4 @@ module_phy_driver(dp83848_driver);
MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index da6a67d47ce9..8448d01819ef 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Texas Instruments DP83867 PHY
*
* Copyright (C) 2015 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/ethtool.h>
@@ -19,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/delay.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -135,17 +128,13 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
{
struct dp83867_private *dp83867 =
(struct dp83867_private *)phydev->priv;
- u16 val;
-
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN)
- val |= DP83867_CFG4_PORT_MIRROR_EN;
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ DP83867_CFG4_PORT_MIRROR_EN);
else
- val &= ~DP83867_CFG4_PORT_MIRROR_EN;
-
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
-
+ phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ DP83867_CFG4_PORT_MIRROR_EN);
return 0;
}
@@ -230,11 +219,9 @@ static int dp83867_config_init(struct phy_device *phydev)
}
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
- if (dp83867->rxctrl_strap_quirk) {
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
- val &= ~BIT(7);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
- }
+ if (dp83867->rxctrl_strap_quirk)
+ phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(7));
if (phy_interface_is_rgmii(phydev)) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
@@ -283,17 +270,11 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
delay);
- if (dp83867->io_impedance >= 0) {
- val = phy_read_mmd(phydev, DP83867_DEVADDR,
- DP83867_IO_MUX_CFG);
-
- val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
- val |= dp83867->io_impedance &
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
-
- phy_write_mmd(phydev, DP83867_DEVADDR,
- DP83867_IO_MUX_CFG, val);
- }
+ if (dp83867->io_impedance >= 0)
+ phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83867->io_impedance &
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
}
/* Enable Interrupt output INT_OE in CFG3 register */
@@ -307,12 +288,11 @@ static int dp83867_config_init(struct phy_device *phydev)
dp83867_config_port_mirroring(phydev);
/* Clock output selection if muxing property is set */
- if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) {
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG);
- val &= ~DP83867_IO_MUX_CFG_CLK_O_SEL_MASK;
- val |= (dp83867->clk_output_sel << DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, val);
- }
+ if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK)
+ phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
+ DP83867_IO_MUX_CFG_CLK_O_SEL_MASK,
+ dp83867->clk_output_sel <<
+ DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT);
return 0;
}
@@ -325,6 +305,8 @@ static int dp83867_phy_reset(struct phy_device *phydev)
if (err < 0)
return err;
+ usleep_range(10, 20);
+
return dp83867_config_init(phydev);
}
@@ -357,4 +339,4 @@ MODULE_DEVICE_TABLE(mdio, dp83867_tbl);
MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index da13356999e5..e9704af1d239 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -144,11 +144,8 @@ static int dp83811_set_wol(struct phy_device *phydev,
phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
value);
} else {
- value = phy_read_mmd(phydev, DP83811_DEVADDR,
- MII_DP83811_WOL_CFG);
- value &= ~DP83811_WOL_EN;
- phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
- value);
+ phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ DP83811_WOL_EN);
}
return 0;
@@ -328,14 +325,10 @@ static int dp83811_suspend(struct phy_device *phydev)
static int dp83811_resume(struct phy_device *phydev)
{
- int value;
-
genphy_resume(phydev);
- value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG);
-
- phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, value |
- DP83811_WOL_CLR_INDICATION);
+ phy_set_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ DP83811_WOL_CLR_INDICATION);
return 0;
}
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 565e49e7f76f..2aa367c04a8e 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/et1011c.c
*
@@ -6,12 +7,6 @@
* Author: Chaithrika U S
*
* Copyright (c) 2008 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 72d43c88e6ff..1acd8bfdb3bc 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Fixed MDIO bus (MDIO bus emulation with fixed PHYs)
*
@@ -5,11 +6,6 @@
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* Copyright (c) 2006-2007 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
@@ -22,10 +18,11 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/seqlock.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
+#include <linux/linkmode.h>
#include "swphy.h"
@@ -42,7 +39,7 @@ struct fixed_phy {
bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
- int link_gpio;
+ struct gpio_desc *link_gpiod;
};
static struct platform_device *pdev;
@@ -71,8 +68,8 @@ EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
static void fixed_phy_update(struct fixed_phy *fp)
{
- if (!fp->no_carrier && gpio_is_valid(fp->link_gpio))
- fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
+ if (!fp->no_carrier && fp->link_gpiod)
+ fp->status.link = !!gpiod_get_value_cansleep(fp->link_gpiod);
}
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
@@ -89,11 +86,11 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
s = read_seqcount_begin(&fp->seqcount);
fp->status.link = !fp->no_carrier;
/* Issue callback if user registered it. */
- if (fp->link_update) {
+ if (fp->link_update)
fp->link_update(fp->phydev->attached_dev,
&fp->status);
- fixed_phy_update(fp);
- }
+ /* Check the GPIO for change in status */
+ fixed_phy_update(fp);
state = fp->status;
} while (read_seqcount_retry(&fp->seqcount, s));
@@ -137,9 +134,9 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status,
- int link_gpio)
+static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
+ struct fixed_phy_status *status,
+ struct gpio_desc *gpiod)
{
int ret;
struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -160,24 +157,19 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
fp->addr = phy_addr;
fp->status = *status;
- fp->link_gpio = link_gpio;
-
- if (gpio_is_valid(fp->link_gpio)) {
- ret = gpio_request_one(fp->link_gpio, GPIOF_DIR_IN,
- "fixed-link-gpio-link");
- if (ret)
- goto err_regs;
- }
+ fp->link_gpiod = gpiod;
fixed_phy_update(fp);
list_add_tail(&fp->node, &fmb->phys);
return 0;
+}
-err_regs:
- kfree(fp);
- return ret;
+int fixed_phy_add(unsigned int irq, int phy_addr,
+ struct fixed_phy_status *status) {
+
+ return fixed_phy_add_gpiod(irq, phy_addr, status, NULL);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
@@ -191,8 +183,8 @@ static void fixed_phy_del(int phy_addr)
list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
if (fp->addr == phy_addr) {
list_del(&fp->node);
- if (gpio_is_valid(fp->link_gpio))
- gpio_free(fp->link_gpio);
+ if (fp->link_gpiod)
+ gpiod_put(fp->link_gpiod);
kfree(fp);
ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
@@ -200,10 +192,48 @@ static void fixed_phy_del(int phy_addr)
}
}
-struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- int link_gpio,
- struct device_node *np)
+#ifdef CONFIG_OF_GPIO
+static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
+{
+ struct device_node *fixed_link_node;
+ struct gpio_desc *gpiod;
+
+ if (!np)
+ return NULL;
+
+ fixed_link_node = of_get_child_by_name(np, "fixed-link");
+ if (!fixed_link_node)
+ return NULL;
+
+ /*
+ * As the fixed link is just a device tree node without any
+ * Linux device associated with it, we simply have obtain
+ * the GPIO descriptor from the device tree like this.
+ */
+ gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0,
+ GPIOD_IN, "mdio");
+ of_node_put(fixed_link_node);
+ if (IS_ERR(gpiod)) {
+ if (PTR_ERR(gpiod) == -EPROBE_DEFER)
+ return gpiod;
+ pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
+ fixed_link_node);
+ gpiod = NULL;
+ }
+
+ return gpiod;
+}
+#else
+static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+static struct phy_device *__fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np,
+ struct gpio_desc *gpiod)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
struct phy_device *phy;
@@ -213,12 +243,19 @@ struct phy_device *fixed_phy_register(unsigned int irq,
if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
return ERR_PTR(-EPROBE_DEFER);
+ /* Check if we have a GPIO associated with this fixed phy */
+ if (!gpiod) {
+ gpiod = fixed_phy_get_gpiod(np);
+ if (IS_ERR(gpiod))
+ return ERR_CAST(gpiod);
+ }
+
/* Get the next available PHY address, up to PHY_MAX_ADDR */
phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
+ ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
if (ret < 0) {
ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
@@ -264,6 +301,8 @@ struct phy_device *fixed_phy_register(unsigned int irq,
phy->supported);
}
+ linkmode_copy(phy->advertising, phy->supported);
+
ret = phy_device_register(phy);
if (ret) {
phy_device_free(phy);
@@ -274,8 +313,24 @@ struct phy_device *fixed_phy_register(unsigned int irq,
return phy;
}
+
+struct phy_device *fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np)
+{
+ return __fixed_phy_register(irq, status, np, NULL);
+}
EXPORT_SYMBOL_GPL(fixed_phy_register);
+struct phy_device *
+fixed_phy_register_with_gpiod(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct gpio_desc *gpiod)
+{
+ return __fixed_phy_register(irq, status, NULL, gpiod);
+}
+EXPORT_SYMBOL_GPL(fixed_phy_register_with_gpiod);
+
void fixed_phy_unregister(struct phy_device *phy)
{
phy_device_remove(phy);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 7d5938b87660..ebef8354bc81 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for ICPlus PHYs
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index fc0f5024a29e..02d9713318b6 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
* Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/mdio.h>
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index c8bb29ae1a2a..a93d673baf35 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/lxt.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a9c7c7f41b0c..3ccba37bd6dd 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/marvell.c
*
@@ -8,12 +9,6 @@
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
* Copyright (c) 2013 Michael Stapelberg <michael@stapelberg.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
@@ -847,7 +842,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
-
/* Select page 18 */
err = marvell_set_page(phydev, 18);
if (err < 0)
@@ -870,21 +864,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
-
- /* There appears to be a bug in the 88e1512 when used in
- * SGMII to copper mode, where the AN advertisement register
- * clears the pause bits each time a negotiation occurs.
- * This means we can never be truely sure what was advertised,
- * so disable Pause support.
- */
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->advertising);
}
return m88e1318_config_init(phydev);
@@ -1046,6 +1025,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
return 0;
}
+/* The VOD can be out of specification on link up. Poke an
+ * undocumented register, in an undocumented page, with a magic value
+ * to fix this.
+ */
+static int m88e6390_errata(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_BMCR,
+ BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (err)
+ return err;
+
+ usleep_range(300, 400);
+
+ err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
+ if (err)
+ return err;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int m88e6390_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ err = m88e6390_errata(phydev);
+ if (err)
+ return err;
+
+ return m88e1510_config_aneg(phydev);
+}
+
/**
* fiber_lpa_mod_linkmode_lpa_t
* @advertising: the linkmode advertisement settings
@@ -1402,7 +1414,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
* before enabling it if !phy_interrupt_is_valid()
*/
if (!phy_interrupt_is_valid(phydev))
- phy_read(phydev, MII_M1011_IEVENT);
+ __phy_read(phydev, MII_M1011_IEVENT);
/* Enable the WOL interrupt */
err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
@@ -2283,7 +2295,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
- .config_aneg = &m88e1510_config_aneg,
+ .config_aneg = &m88e6390_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..100b401b1f4a 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell 10G 88x3310 PHY driver
*
@@ -26,6 +27,9 @@
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
+#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
+
enum {
MV_PCS_BASE_T = 0x0000,
MV_PCS_BASE_R = 0x1000,
@@ -57,24 +61,6 @@ struct mv3310_priv {
char *hwmon_name;
};
-static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
- u16 mask, u16 bits)
-{
- int old, val, ret;
-
- old = phy_read_mmd(phydev, devad, reg);
- if (old < 0)
- return old;
-
- val = (old & ~mask) | (bits & mask);
- if (val == old)
- return 0;
-
- ret = phy_write_mmd(phydev, devad, reg, val);
-
- return ret < 0 ? ret : 1;
-}
-
#ifdef CONFIG_HWMON
static umode_t mv3310_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
@@ -158,10 +144,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
return ret;
val = enable ? MV_V2_TEMP_CTRL_SAMPLE : MV_V2_TEMP_CTRL_DISABLE;
- ret = mv3310_modify(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL,
- MV_V2_TEMP_CTRL_MASK, val);
- return ret < 0 ? ret : 0;
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL,
+ MV_V2_TEMP_CTRL_MASK, val);
}
static void mv3310_hwmon_disable(void *data)
@@ -249,95 +234,58 @@ static int mv3310_resume(struct phy_device *phydev)
return mv3310_hwmon_config(phydev, true);
}
-static int mv3310_config_init(struct phy_device *phydev)
+/* Some PHYs in the Alaska family such as the 88X3310 and the 88E2010
+ * don't set bit 14 in PMA Extended Abilities (1.11), although they do
+ * support 2.5GBASET and 5GBASET. For these models, we can still read their
+ * 2.5G/5G extended abilities register (1.21). We detect these models based on
+ * the PMA device identifier, with a mask matching models known to have this
+ * issue
+ */
+static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
- int val;
+ if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_PMAPMD))
+ return false;
+ /* Only some revisions of the 88X3310 family PMA seem to be impacted */
+ return (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
+ MV_PHY_ALASKA_NBT_QUIRK_MASK) == MV_PHY_ALASKA_NBT_QUIRK_REV;
+}
+
+static int mv3310_config_init(struct phy_device *phydev)
+{
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
phydev->interface != PHY_INTERFACE_MODE_10GKR)
return -ENODEV;
- __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
- __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
-
- if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
- if (val < 0)
- return val;
+ return 0;
+}
- if (val & MDIO_AN_STAT1_ABLE)
- __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
- }
+static int mv3310_get_features(struct phy_device *phydev)
+{
+ int ret, val;
- val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT2);
- if (val < 0)
- return val;
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
- /* Ethtool does not support the WAN mode bits */
- if (val & (MDIO_PMA_STAT2_10GBSR | MDIO_PMA_STAT2_10GBLR |
- MDIO_PMA_STAT2_10GBER | MDIO_PMA_STAT2_10GBLX4 |
- MDIO_PMA_STAT2_10GBSW | MDIO_PMA_STAT2_10GBLW |
- MDIO_PMA_STAT2_10GBEW))
- __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
- if (val & MDIO_PMA_STAT2_10GBSR)
- __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported);
- if (val & MDIO_PMA_STAT2_10GBLR)
- __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported);
- if (val & MDIO_PMA_STAT2_10GBER)
- __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported);
-
- if (val & MDIO_PMA_STAT2_EXTABLE) {
- val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
+ if (mv3310_has_pma_ngbaset_quirk(phydev)) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ MDIO_PMA_NG_EXTABLE);
if (val < 0)
return val;
- if (val & (MDIO_PMA_EXTABLE_10GBT | MDIO_PMA_EXTABLE_1000BT |
- MDIO_PMA_EXTABLE_100BTX | MDIO_PMA_EXTABLE_10BT))
- __set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
- if (val & MDIO_PMA_EXTABLE_10GBLRM)
- __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
- if (val & (MDIO_PMA_EXTABLE_10GBKX4 | MDIO_PMA_EXTABLE_10GBKR |
- MDIO_PMA_EXTABLE_1000BKX))
- __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, supported);
- if (val & MDIO_PMA_EXTABLE_10GBLRM)
- __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
- supported);
- if (val & MDIO_PMA_EXTABLE_10GBT)
- __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- supported);
- if (val & MDIO_PMA_EXTABLE_10GBKX4)
- __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- supported);
- if (val & MDIO_PMA_EXTABLE_10GBKR)
- __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- supported);
- if (val & MDIO_PMA_EXTABLE_1000BT)
- __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- supported);
- if (val & MDIO_PMA_EXTABLE_1000BKX)
- __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- supported);
- if (val & MDIO_PMA_EXTABLE_100BTX) {
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- supported);
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- supported);
- }
- if (val & MDIO_PMA_EXTABLE_10BT) {
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- supported);
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- supported);
- }
- }
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_NG_EXTABLE_2_5GBT);
- linkmode_copy(phydev->supported, supported);
- linkmode_and(phydev->advertising, phydev->advertising,
- phydev->supported);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_NG_EXTABLE_5GBT);
+ }
return 0;
}
@@ -351,52 +299,27 @@ static int mv3310_config_aneg(struct phy_device *phydev)
/* We don't support manual MDI control */
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- if (phydev->autoneg == AUTONEG_DISABLE) {
- ret = genphy_c45_pma_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- return genphy_c45_an_disable_aneg(phydev);
- }
-
- linkmode_and(phydev->advertising, phydev->advertising,
- phydev->supported);
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return genphy_c45_pma_setup_forced(phydev);
- ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
- ADVERTISE_ALL | ADVERTISE_100BASE4 |
- ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
- linkmode_adv_to_mii_adv_t(phydev->advertising));
+ ret = genphy_c45_an_config_aneg(phydev);
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
+ /* Clause 45 has no standardized support for 1000BaseT, therefore
+ * use vendor registers for this mode.
+ */
reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
- ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
- ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
- /* 10G control register */
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->advertising))
- reg = MDIO_AN_10GBT_CTRL_ADV10G;
- else
- reg = 0;
-
- ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G, reg);
- if (ret < 0)
- return ret;
- if (ret > 0)
- changed = true;
-
- if (changed)
- ret = genphy_c45_restart_aneg(phydev);
-
- return ret;
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
}
static int mv3310_aneg_done(struct phy_device *phydev)
@@ -416,18 +339,29 @@ static int mv3310_aneg_done(struct phy_device *phydev)
static void mv3310_update_interface(struct phy_device *phydev)
{
if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
/* The PHY automatically switches its serdes interface (and
- * active PHYXS instance) between Cisco SGMII and 10GBase-KR
- * modes according to the speed. Florian suggests setting
- * phydev->interface to communicate this to the MAC. Only do
- * this if we are already in either SGMII or 10GBase-KR mode.
+ * active PHYXS instance) between Cisco SGMII, 10GBase-KR and
+ * 2500BaseX modes according to the speed. Florian suggests
+ * setting phydev->interface to communicate this to the MAC.
+ * Only do this if we are already in one of the above modes.
*/
- if (phydev->speed == SPEED_10000)
+ switch (phydev->speed) {
+ case SPEED_10000:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
- else if (phydev->speed >= SPEED_10 &&
- phydev->speed < SPEED_10000)
+ break;
+ case SPEED_2500:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case SPEED_1000:
+ case SPEED_100:
+ case SPEED_10:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ default:
+ break;
+ }
}
}
@@ -445,16 +379,8 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev)
static int mv3310_read_status(struct phy_device *phydev)
{
- u32 mmd_mask = phydev->c45_ids.devices_in_package;
int val;
- /* The vendor devads do not report link status. Avoid the PHYXS
- * instance as there are three, and its status depends on the MAC
- * being appropriately configured for the negotiated speed.
- */
- mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2) |
- BIT(MDIO_MMD_PHYXS));
-
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
linkmode_zero(phydev->lp_advertising);
@@ -470,12 +396,10 @@ static int mv3310_read_status(struct phy_device *phydev)
if (val & MDIO_STAT1_LSTATUS)
return mv3310_read_10gbr_status(phydev);
- val = genphy_c45_read_link(phydev, mmd_mask);
+ val = genphy_c45_read_link(phydev);
if (val < 0)
return val;
- phydev->link = val > 0 ? 1 : 0;
-
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
if (val < 0)
return val;
@@ -531,11 +455,11 @@ static int mv3310_read_status(struct phy_device *phydev)
static struct phy_driver mv3310_drivers[] = {
{
- .phy_id = 0x002b09aa,
+ .phy_id = MARVELL_PHY_ID_88X3310,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
- .features = PHY_10GBIT_FEATURES,
- .soft_reset = gen10g_no_soft_reset,
+ .get_features = mv3310_get_features,
+ .soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.probe = mv3310_probe,
.suspend = mv3310_suspend,
@@ -544,12 +468,25 @@ static struct phy_driver mv3310_drivers[] = {
.aneg_done = mv3310_aneg_done,
.read_status = mv3310_read_status,
},
+ {
+ .phy_id = MARVELL_PHY_ID_88E2110,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "mv88x2110",
+ .get_features = genphy_c45_pma_read_abilities,
+ .probe = mv3310_probe,
+ .soft_reset = genphy_no_soft_reset,
+ .config_init = mv3310_config_init,
+ .config_aneg = mv3310_config_aneg,
+ .aneg_done = mv3310_aneg_done,
+ .read_status = mv3310_read_status,
+ },
};
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { 0x002b09aa, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
};
MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index 46fe1ae919a3..7d0f388d8db8 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index df75efa96a7d..8295bc7c8c20 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Broadcom UniMAC MDIO bus controller driver
*
* Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 15352f987bdf..5136275c8e73 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Bitbanged MDIO support.
*
@@ -11,10 +12,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -232,4 +229,4 @@ void free_mdio_bitbang(struct mii_bus *bus)
}
EXPORT_SYMBOL(free_mdio_bitbang);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 863496fa5d13..d9b54c67ef9f 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mdio-boardinfo - Collect pre-declarations for MDIO devices
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/phy/mdio-cavium.c
index 6df2fa755bb4..1afd6fc1a351 100644
--- a/drivers/net/phy/mdio-cavium.c
+++ b/drivers/net/phy/mdio-cavium.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2016 Cavium, Inc.
*/
@@ -150,4 +147,4 @@ EXPORT_SYMBOL(cavium_mdiobus_write);
MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers");
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
index 4bccd45d24e2..ed5f9bb5448d 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/phy/mdio-cavium.h
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2016 Cavium, Inc.
*/
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index ea9a0e339778..1b00235d7dc5 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO based MDIO bitbang driver.
* Supports OpenFirmware.
@@ -14,10 +15,6 @@
*
* 2005 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/module.h>
@@ -216,5 +213,5 @@ module_platform_driver(mdio_gpio_driver);
MODULE_ALIAS("platform:mdio-gpio");
MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Generic driver for MDIO bus emulation using GPIO");
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c
index b03fedd6c1d8..287f3ccf1da1 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/phy/mdio-hisi-femac.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Hisilicon Fast Ethernet MDIO Bus Driver
*
* Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
@@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver);
MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver");
MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c
index 6d24fd13ca86..0dce67672548 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/phy/mdio-i2c.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MDIO I2C bridge
*
* Copyright (C) 2015-2016 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Network PHYs can appear on I2C buses when they are part of SFP module.
* This driver exposes these PHYs to the networking PHY code, allowing
* our PHY drivers access to these PHYs, and so allowing configuration
diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
index 889ab57d7f3e..751dab281f57 100644
--- a/drivers/net/phy/mdio-i2c.h
+++ b/drivers/net/phy/mdio-i2c.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MDIO I2C bridge
*
* Copyright (C) 2015 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MDIO_I2C_H
#define MDIO_I2C_H
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index 5bb56d126693..af3910fe8ec7 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/* MOXA ART Ethernet (RTL8201CP) MDIO interface driver
*
* Copyright (C) 2013 Jonas Jensen <jonas.jensen@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/delay.h>
@@ -190,4 +187,4 @@ module_platform_driver(moxart_mdio_driver);
MODULE_DESCRIPTION("MOXA ART MDIO interface driver");
MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 696bdf1e4576..88d409e48c1f 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation (the "GPL").
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 (GPLv2) for more details.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 (GPLv2) along with this source code.
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index fe34576262bd..6c8960df43b0 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
@@ -103,4 +100,4 @@ module_platform_driver(mdio_mux_gpio_driver);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 70f6115530af..d1a8780e24d8 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Simple memory-mapped device MDIO MUX driver
*
* Author: Timur Tabi <timur@freescale.com>
*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/platform_device.h>
diff --git a/drivers/net/phy/mdio-mux-multiplexer.c b/drivers/net/phy/mdio-mux-multiplexer.c
new file mode 100644
index 000000000000..d6564381aa3e
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-multiplexer.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* MDIO bus multiplexer using kernel multiplexer subsystem
+ *
+ * Copyright 2019 NXP
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+
+struct mdio_mux_multiplexer_state {
+ struct mux_control *muxc;
+ bool do_deselect;
+ void *mux_handle;
+};
+
+/**
+ * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux
+ * layer when it thinks the mdio bus
+ * multiplexer needs to switch.
+ * @current_child: current value of the mux register.
+ * @desired_child: value of the 'reg' property of the target child MDIO node.
+ * @data: Private data used by this switch_fn passed to mdio_mux_init function
+ * via mdio_mux_init(.., .., .., .., data, ..).
+ *
+ * The first time this function is called, current_child == -1.
+ * If current_child == desired_child, then the mux is already set to the
+ * correct bus.
+ */
+static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct platform_device *pdev;
+ struct mdio_mux_multiplexer_state *s;
+ int ret = 0;
+
+ pdev = (struct platform_device *)data;
+ s = platform_get_drvdata(pdev);
+
+ if (!(current_child ^ desired_child))
+ return 0;
+
+ if (s->do_deselect)
+ ret = mux_control_deselect(s->muxc);
+ if (ret) {
+ dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = mux_control_select(s->muxc, desired_child);
+ if (!ret) {
+ dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child,
+ desired_child);
+ s->do_deselect = true;
+ } else {
+ s->do_deselect = false;
+ }
+
+ return ret;
+}
+
+static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mdio_mux_multiplexer_state *s;
+ int ret = 0;
+
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->muxc = devm_mux_control_get(dev, NULL);
+ if (IS_ERR(s->muxc)) {
+ ret = PTR_ERR(s->muxc);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, s);
+
+ ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
+ mdio_mux_multiplexer_switch_fn, &s->mux_handle,
+ pdev, NULL);
+
+ return ret;
+}
+
+static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
+{
+ struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev);
+
+ mdio_mux_uninit(s->mux_handle);
+
+ if (s->do_deselect)
+ mux_control_deselect(s->muxc);
+
+ return 0;
+}
+
+static const struct of_device_id mdio_mux_multiplexer_match[] = {
+ { .compatible = "mdio-mux-multiplexer", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match);
+
+static struct platform_driver mdio_mux_multiplexer_driver = {
+ .driver = {
+ .name = "mdio-mux-multiplexer",
+ .of_match_table = mdio_mux_multiplexer_match,
+ },
+ .probe = mdio_mux_multiplexer_probe,
+ .remove = mdio_mux_multiplexer_remove,
+};
+
+module_platform_driver(mdio_mux_multiplexer_driver);
+
+MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem");
+MODULE_AUTHOR("Pankaj Bansal <pankaj.bansal@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 0a86f1e4c02f..6a1d3540210b 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
@@ -210,4 +207,4 @@ EXPORT_SYMBOL_GPL(mdio_mux_uninit);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index ab6914f8bd50..8327382aa568 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2015 Cavium, Inc.
*/
@@ -122,4 +119,4 @@ module_platform_driver(octeon_mdiobus_driver);
MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver");
MODULE_AUTHOR("David Daney");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 6425ce04d3f9..20ffd8fb79ce 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner EMAC MDIO interface driver
*
@@ -6,10 +7,6 @@
*
* Based on the Linux driver provided by Allwinner:
* Copyright (C) 1997 Sten Wang
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/delay.h>
@@ -179,4 +176,4 @@ module_platform_driver(sun4i_mdio_driver);
MODULE_DESCRIPTION("Allwinner EMAC MDIO interface driver");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c
index 1546f6398831..b6128ae7f14f 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/phy/mdio-thunder.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2009-2016 Cavium, Inc.
*/
@@ -151,4 +148,4 @@ static struct pci_driver thunder_mdiobus_driver = {
module_pci_driver(thunder_mdiobus_driver);
MODULE_DESCRIPTION("Cavium ThunderX MDIO bus driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 07c6048200c6..717cc2a056e8 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Applied Micro X-Gene SoC MDIO Driver
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
* Author: Iyappan Subramanian <isubramanian@apm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index 3c85f3e30baa..b1f5ccb4ad9c 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Applied Micro X-Gene SoC MDIO Driver
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
* Author: Iyappan Subramanian <isubramanian@apm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDIO_XGENE_H__
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e59a8419b17..4be4cc09eb90 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/* MDIO Bus interface
*
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -39,8 +34,6 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <asm/irq.h>
-
#define CREATE_TRACE_POINTS
#include <trace/events/mdio.h>
@@ -55,11 +48,12 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode,
"reset-gpios", 0, GPIOD_OUT_LOW,
"PHY reset");
- if (PTR_ERR(gpiod) == -ENOENT ||
- PTR_ERR(gpiod) == -ENOSYS)
- gpiod = NULL;
- else if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
+ if (IS_ERR(gpiod)) {
+ if (PTR_ERR(gpiod) == -ENOENT || PTR_ERR(gpiod) == -ENOSYS)
+ gpiod = NULL;
+ else
+ return PTR_ERR(gpiod);
+ }
mdiodev->reset = gpiod;
@@ -379,7 +373,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
- put_device(&bus->dev);
return -EINVAL;
}
@@ -390,6 +383,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
if (IS_ERR(gpiod)) {
dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
bus->id);
+ device_del(&bus->dev);
return PTR_ERR(gpiod);
} else if (gpiod) {
bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index c924700cf37b..887076292e50 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Framework for MDIO devices, other than PHYs.
*
* Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index b03bcf2c388a..a238388eb1a5 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -1,20 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Amlogic Meson GXL Internal PHY Driver
*
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
* Copyright (C) 2016 BayLibre, SAS. All rights reserved.
* Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -233,6 +223,7 @@ static struct phy_driver meson_gxl_phy[] = {
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
.flags = PHY_IS_INTERNAL,
+ .soft_reset = genphy_soft_reset,
.config_init = meson_gxl_config_init,
.aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c33384710d26..352da24f1f33 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/micrel.c
*
@@ -8,11 +9,6 @@
* Copyright (c) 2010-2013 Micrel, Inc.
* Copyright (c) 2014 Johan Hovold <johan@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Support : Micrel Phys:
* Giga phys: ksz9021, ksz9031, ksz9131
* 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041
@@ -344,6 +340,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ return kszphy_config_init(phydev);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -426,9 +433,6 @@ static int ksz9021_config_init(struct phy_device *phydev)
return 0;
}
-#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d
-#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e
-#define OP_DATA 1
#define KSZ9031_PS_TO_REG 60
/* Extended registers */
@@ -446,24 +450,6 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define MII_KSZ9031RN_EDPD 0x23
#define MII_KSZ9031RN_EDPD_ENABLE BIT(0)
-static int ksz9031_extended_write(struct phy_device *phydev,
- u8 mode, u32 dev_addr, u32 regnum, u16 val)
-{
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
- phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
- return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
-}
-
-static int ksz9031_extended_read(struct phy_device *phydev,
- u8 mode, u32 dev_addr, u32 regnum)
-{
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
- phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
- phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
- return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
-}
-
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg, size_t field_sz,
@@ -484,7 +470,7 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
return 0;
if (matches < numfields)
- newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ newval = phy_read_mmd(phydev, 2, reg);
else
newval = 0;
@@ -498,7 +484,7 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
<< (field_sz * i));
}
- return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+ return phy_write_mmd(phydev, 2, reg, newval);
}
/* Center KSZ9031RNX FLP timing at 16ms. */
@@ -506,13 +492,13 @@ static int ksz9031_center_flp_timing(struct phy_device *phydev)
{
int result;
- result = ksz9031_extended_write(phydev, OP_DATA, 0,
- MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+ result = phy_write_mmd(phydev, 0, MII_KSZ9031RN_FLP_BURST_TX_HI,
+ 0x0006);
if (result)
return result;
- result = ksz9031_extended_write(phydev, OP_DATA, 0,
- MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
+ result = phy_write_mmd(phydev, 0, MII_KSZ9031RN_FLP_BURST_TX_LO,
+ 0x1A80);
if (result)
return result;
@@ -524,11 +510,11 @@ static int ksz9031_enable_edpd(struct phy_device *phydev)
{
int reg;
- reg = ksz9031_extended_read(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD);
+ reg = phy_read_mmd(phydev, 0x1C, MII_KSZ9031RN_EDPD);
if (reg < 0)
return reg;
- return ksz9031_extended_write(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD,
- reg | MII_KSZ9031RN_EDPD_ENABLE);
+ return phy_write_mmd(phydev, 0x1C, MII_KSZ9031RN_EDPD,
+ reg | MII_KSZ9031RN_EDPD_ENABLE);
}
static int ksz9031_config_init(struct phy_device *phydev)
@@ -654,7 +640,7 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
return 0;
if (matches < numfields)
- newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ newval = phy_read_mmd(phydev, 2, reg);
else
newval = 0;
@@ -668,7 +654,7 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
<< (field_sz * i));
}
- return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+ return phy_write_mmd(phydev, 2, reg, newval);
}
static int ksz9131_config_init(struct phy_device *phydev)
@@ -1040,7 +1026,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .config_init = kszphy_config_init,
+ .config_init = ksz8061_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
@@ -1070,6 +1056,7 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
+ .soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1098,6 +1085,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
+ .features = PHY_BASIC_FEATURES,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 7557bebd5d7f..c6cbb3aa8ae0 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Microchip Technology
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 3949fe299b18..db50efb30df5 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Driver for Microsemi VSC85xx PHYs
*
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 139bed2c8ab4..42282a86b680 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/national.c
*
@@ -7,12 +8,6 @@
* Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*
* Copyright (c) 2008 STMicroelectronics Limited
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 03af927fa5ad..9e24d9569424 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -47,6 +47,16 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
/* Assume 1000base-T */
ctrl2 |= MDIO_PMA_CTRL2_1000BT;
break;
+ case SPEED_2500:
+ ctrl1 |= MDIO_CTRL1_SPEED2_5G;
+ /* Assume 2.5Gbase-T */
+ ctrl2 |= MDIO_PMA_CTRL2_2_5GBT;
+ break;
+ case SPEED_5000:
+ ctrl1 |= MDIO_CTRL1_SPEED5G;
+ /* Assume 5Gbase-T */
+ ctrl2 |= MDIO_PMA_CTRL2_5GBT;
+ break;
case SPEED_10000:
ctrl1 |= MDIO_CTRL1_SPEED10G;
/* Assume 10Gbase-T */
@@ -60,11 +70,60 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev)
if (ret < 0)
return ret;
- return phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
+ if (ret < 0)
+ return ret;
+
+ return genphy_c45_an_disable_aneg(phydev);
}
EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
/**
+ * genphy_c45_an_config_aneg - configure advertisement registers
+ * @phydev: target phy_device struct
+ *
+ * Configure advertisement registers based on modes set in phydev->advertising
+ *
+ * Returns negative errno code on failure, 0 if advertisement didn't change,
+ * or 1 if advertised modes changed.
+ */
+int genphy_c45_an_config_aneg(struct phy_device *phydev)
+{
+ int changed, ret;
+ u32 adv;
+
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ changed = genphy_config_eee_advert(phydev);
+
+ adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_100BASE4 |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
+ adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = 1;
+
+ adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G |
+ MDIO_AN_10GBT_CTRL_ADV5G |
+ MDIO_AN_10GBT_CTRL_ADV2_5G, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = 1;
+
+ return changed;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg);
+
+/**
* genphy_c45_an_disable_aneg - disable auto-negotiation
* @phydev: target phy_device struct
*
@@ -75,15 +134,9 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
*/
int genphy_c45_an_disable_aneg(struct phy_device *phydev)
{
- int val;
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
- if (val < 0)
- return val;
-
- val &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
-
- return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val);
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
}
EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
@@ -97,17 +150,40 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
*/
int genphy_c45_restart_aneg(struct phy_device *phydev)
{
- int val;
+ return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
- if (val < 0)
- return val;
+/**
+ * genphy_c45_check_and_restart_aneg - Enable and restart auto-negotiation
+ * @phydev: target phy_device struct
+ * @restart: whether aneg restart is requested
+ *
+ * This assumes that the auto-negotiation MMD is present.
+ *
+ * Check, and restart auto-negotiation if needed.
+ */
+int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart)
+{
+ int ret = 0;
+
+ if (!restart) {
+ /* Configure and restart aneg if it wasn't set before */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
- val |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
+ if (!(ret & MDIO_AN_CTRL1_ENABLE))
+ restart = true;
+ }
+
+ if (restart)
+ ret = genphy_c45_restart_aneg(phydev);
- return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val);
+ return ret;
}
-EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
+EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg);
/**
* genphy_c45_aneg_done - return auto-negotiation complete status
@@ -131,25 +207,33 @@ EXPORT_SYMBOL_GPL(genphy_c45_aneg_done);
/**
* genphy_c45_read_link - read the overall link status from the MMDs
* @phydev: target phy_device struct
- * @mmd_mask: MMDs to read status from
*
* Read the link status from the specified MMDs, and if they all indicate
- * that the link is up, return positive. If an error is encountered,
+ * that the link is up, set phydev->link to 1. If an error is encountered,
* a negative errno will be returned, otherwise zero.
*/
-int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
+int genphy_c45_read_link(struct phy_device *phydev)
{
+ u32 mmd_mask = MDIO_DEVS_PMAPMD;
int val, devad;
bool link = true;
- while (mmd_mask) {
+ while (mmd_mask && link) {
devad = __ffs(mmd_mask);
mmd_mask &= ~BIT(devad);
/* The link state is latched low so that momentary link
- * drops can be detected. Do not double-read the status
- * register if the link is down.
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops.
*/
+ if (!phy_polling_mode(phydev)) {
+ val = phy_read_mmd(phydev, devad, MDIO_STAT1);
+ if (val < 0)
+ return val;
+ else if (val & MDIO_STAT1_LSTATUS)
+ continue;
+ }
+
val = phy_read_mmd(phydev, devad, MDIO_STAT1);
if (val < 0)
return val;
@@ -158,7 +242,9 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
link = false;
}
- return link;
+ phydev->link = link;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_read_link);
@@ -181,7 +267,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
if (val < 0)
return val;
- mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val);
+ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, val);
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
@@ -190,9 +276,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
if (val < 0)
return val;
- if (val & MDIO_AN_10GBT_STAT_LP10G)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->lp_advertising);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, val);
return 0;
}
@@ -220,6 +304,12 @@ int genphy_c45_read_pma(struct phy_device *phydev)
case MDIO_PMA_CTRL1_SPEED1000:
phydev->speed = SPEED_1000;
break;
+ case MDIO_CTRL1_SPEED2_5G:
+ phydev->speed = SPEED_2500;
+ break;
+ case MDIO_CTRL1_SPEED5G:
+ phydev->speed = SPEED_5000;
+ break;
case MDIO_CTRL1_SPEED10G:
phydev->speed = SPEED_10000;
break;
@@ -267,75 +357,162 @@ int genphy_c45_read_mdix(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
-/* The gen10g_* functions are the old Clause 45 stub */
-
-int gen10g_config_aneg(struct phy_device *phydev)
+/**
+ * genphy_c45_pma_read_abilities - read supported link modes from PMA
+ * @phydev: target phy_device struct
+ *
+ * Read the supported link modes from the PMA Status 2 (1.8) register. If bit
+ * 1.8.9 is set, the list of supported modes is build using the values in the
+ * PMA Extended Abilities (1.11) register, indicating 1000BASET an 10G related
+ * modes. If bit 1.11.14 is set, then the list is also extended with the modes
+ * in the 2.5G/5G PMA Extended register (1.21), indicating if 2.5GBASET and
+ * 5GBASET are supported.
+ */
+int genphy_c45_pma_read_abilities(struct phy_device *phydev)
{
- return 0;
-}
-EXPORT_SYMBOL_GPL(gen10g_config_aneg);
+ int val;
-int gen10g_read_status(struct phy_device *phydev)
-{
- u32 mmd_mask = phydev->c45_ids.devices_in_package;
- int ret;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
- /* For now just lie and say it's 10G all the time */
- phydev->speed = SPEED_10000;
- phydev->duplex = DUPLEX_FULL;
+ if (val & MDIO_AN_STAT1_ABLE)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
+ }
- /* Avoid reading the vendor MMDs */
- mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2));
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT2);
+ if (val < 0)
+ return val;
- ret = genphy_c45_read_link(phydev, mmd_mask);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_STAT2_10GBSR);
- phydev->link = ret > 0 ? 1 : 0;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_STAT2_10GBLR);
- return 0;
-}
-EXPORT_SYMBOL_GPL(gen10g_read_status);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_STAT2_10GBER);
+
+ if (val & MDIO_PMA_STAT2_EXTABLE) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBLRM);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBKR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_1000BT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_1000BKX);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_100BTX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_100BTX);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10BT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10BT);
+
+ if (val & MDIO_PMA_EXTABLE_NBT) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ MDIO_PMA_NG_EXTABLE);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_NG_EXTABLE_2_5GBT);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_NG_EXTABLE_5GBT);
+ }
+ }
-int gen10g_no_soft_reset(struct phy_device *phydev)
-{
- /* Do nothing for now */
return 0;
}
-EXPORT_SYMBOL_GPL(gen10g_no_soft_reset);
+EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities);
-int gen10g_config_init(struct phy_device *phydev)
+/**
+ * genphy_c45_read_status - read PHY status
+ * @phydev: target phy_device struct
+ *
+ * Reads status from PHY and sets phy_device members accordingly.
+ */
+int genphy_c45_read_status(struct phy_device *phydev)
{
- /* Temporarily just say we support everything */
- linkmode_zero(phydev->supported);
+ int ret;
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->supported);
- linkmode_copy(phydev->advertising, phydev->supported);
+ ret = genphy_c45_read_link(phydev);
+ if (ret)
+ return ret;
- return 0;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+ } else {
+ ret = genphy_c45_read_pma(phydev);
+ }
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(gen10g_config_init);
+EXPORT_SYMBOL_GPL(genphy_c45_read_status);
-int gen10g_suspend(struct phy_device *phydev)
+/* The gen10g_* functions are the old Clause 45 stub */
+
+int gen10g_config_aneg(struct phy_device *phydev)
{
return 0;
}
-EXPORT_SYMBOL_GPL(gen10g_suspend);
+EXPORT_SYMBOL_GPL(gen10g_config_aneg);
-int gen10g_resume(struct phy_device *phydev)
+static int gen10g_read_status(struct phy_device *phydev)
{
- return 0;
+ /* For now just lie and say it's 10G all the time */
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+
+ return genphy_c45_read_link(phydev);
}
-EXPORT_SYMBOL_GPL(gen10g_resume);
struct phy_driver genphy_10g_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic 10G PHY",
- .soft_reset = gen10g_no_soft_reset,
- .config_init = gen10g_config_init,
+ .soft_reset = genphy_no_soft_reset,
.features = PHY_10GBIT_FEATURES,
.config_aneg = gen10g_config_aneg,
.read_status = gen10g_read_status,
- .suspend = gen10g_suspend,
- .resume = gen10g_resume,
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 20fbd5eb56fd..5016cd5fd7c7 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Core PHY library, taken from phy.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/export.h>
#include <linux/phy.h>
+#include <linux/of.h>
const char *phy_speed_to_str(int speed)
{
@@ -342,6 +339,77 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
+static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+ const struct phy_setting *p;
+ int i;
+
+ for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
+ if (p->speed > max_speed)
+ linkmode_clear_bit(p->bit, phydev->supported);
+ else
+ break;
+ }
+
+ return 0;
+}
+
+int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+{
+ int err;
+
+ err = __set_phy_supported(phydev, max_speed);
+ if (err)
+ return err;
+
+ linkmode_copy(phydev->advertising, phydev->supported);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_set_max_speed);
+
+void of_set_phy_supported(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u32 max_speed;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return;
+
+ if (!node)
+ return;
+
+ if (!of_property_read_u32(node, "max-speed", &max_speed))
+ __set_phy_supported(phydev, max_speed);
+}
+
+void of_set_phy_eee_broken(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u32 broken = 0;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return;
+
+ if (!node)
+ return;
+
+ if (of_property_read_bool(node, "eee-broken-100tx"))
+ broken |= MDIO_EEE_100TX;
+ if (of_property_read_bool(node, "eee-broken-1000t"))
+ broken |= MDIO_EEE_1000T;
+ if (of_property_read_bool(node, "eee-broken-10gt"))
+ broken |= MDIO_EEE_10GT;
+ if (of_property_read_bool(node, "eee-broken-1000kx"))
+ broken |= MDIO_EEE_1000KX;
+ if (of_property_read_bool(node, "eee-broken-10gkx4"))
+ broken |= MDIO_EEE_10GKX4;
+ if (of_property_read_bool(node, "eee-broken-10gkr"))
+ broken |= MDIO_EEE_10GKR;
+
+ phydev->eee_broken_modes = broken;
+}
+
/**
* phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
* @phydev: The phy_device struct
@@ -353,45 +421,16 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
void phy_resolve_aneg_linkmode(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ int i;
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) {
- phydev->speed = SPEED_10000;
- phydev->duplex = DUPLEX_FULL;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- common)) {
- phydev->speed = SPEED_5000;
- phydev->duplex = DUPLEX_FULL;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- common)) {
- phydev->speed = SPEED_2500;
- phydev->duplex = DUPLEX_FULL;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- common)) {
- phydev->speed = SPEED_1000;
- phydev->duplex = DUPLEX_FULL;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- common)) {
- phydev->speed = SPEED_1000;
- phydev->duplex = DUPLEX_HALF;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- common)) {
- phydev->speed = SPEED_100;
- phydev->duplex = DUPLEX_FULL;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- common)) {
- phydev->speed = SPEED_100;
- phydev->duplex = DUPLEX_HALF;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- common)) {
- phydev->speed = SPEED_10;
- phydev->duplex = DUPLEX_FULL;
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- common)) {
- phydev->speed = SPEED_10;
- phydev->duplex = DUPLEX_HALF;
- }
+ for (i = 0; i < ARRAY_SIZE(settings); i++)
+ if (test_bit(settings[i].bit, common)) {
+ phydev->speed = settings[i].speed;
+ phydev->duplex = settings[i].duplex;
+ break;
+ }
if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
@@ -418,15 +457,15 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
}
/**
- * phy_read_mmd - Convenience function for reading a register
+ * __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to read from (0..31)
* @regnum: The register on the MMD to read (0..65535)
*
- * Same rules as for phy_read();
+ * Same rules as for __phy_read();
*/
-int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
{
int val;
@@ -438,33 +477,52 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
- val = mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
+ val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
} else {
struct mii_bus *bus = phydev->mdio.bus;
int phy_addr = phydev->mdio.addr;
- mutex_lock(&bus->mdio_lock);
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Read the content of the MMD's selected register */
val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
- mutex_unlock(&bus->mdio_lock);
}
return val;
}
+EXPORT_SYMBOL(__phy_read_mmd);
+
+/**
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Same rules as for phy_read();
+ */
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_read_mmd(phydev, devad, regnum);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(phy_read_mmd);
/**
- * phy_write_mmd - Convenience function for writing a register
+ * __phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to read from
* @regnum: The register on the MMD to read
* @val: value to write to @regnum
*
- * Same rules as for phy_write();
+ * Same rules as for __phy_write();
*/
-int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
+int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
{
int ret;
@@ -476,27 +534,47 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
- ret = mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
- addr, val);
+ ret = __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
+ addr, val);
} else {
struct mii_bus *bus = phydev->mdio.bus;
int phy_addr = phydev->mdio.addr;
- mutex_lock(&bus->mdio_lock);
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Write the data into MMD's selected register */
__mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
- mutex_unlock(&bus->mdio_lock);
ret = 0;
}
return ret;
}
+EXPORT_SYMBOL(__phy_write_mmd);
+
+/**
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for phy_write();
+ */
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_write_mmd(phydev, devad, regnum, val);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(phy_write_mmd);
/**
- * __phy_modify() - Convenience function for modifying a PHY register
+ * __phy_modify_changed() - Convenience function for modifying a PHY register
* @phydev: a pointer to a &struct phy_device
* @regnum: register number
* @mask: bit mask of bits to clear
@@ -504,16 +582,69 @@ EXPORT_SYMBOL(phy_write_mmd);
*
* Unlocked helper function which allows a PHY register to be modified as
* new register value = (old register value & ~mask) | set
+ *
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
*/
-int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
+ u16 set)
{
- int ret;
+ int new, ret;
ret = __phy_read(phydev, regnum);
if (ret < 0)
return ret;
- ret = __phy_write(phydev, regnum, (ret & ~mask) | set);
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ ret = __phy_write(phydev, regnum, new);
+
+ return ret < 0 ? ret : 1;
+}
+EXPORT_SYMBOL_GPL(__phy_modify_changed);
+
+/**
+ * phy_modify_changed - Function for modifying a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ *
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
+ */
+int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_modify_changed(phydev, regnum, mask, set);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify_changed);
+
+/**
+ * __phy_modify - Convenience function for modifying a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_modify_changed(phydev, regnum, mask, set);
return ret < 0 ? ret : 0;
}
@@ -542,6 +673,113 @@ int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
}
EXPORT_SYMBOL_GPL(phy_modify);
+/**
+ * __phy_modify_mmd_changed - Function for modifying a register on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * Unlocked helper function which allows a MMD register to be modified as
+ * new register value = (old register value & ~mask) | set
+ *
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
+ */
+int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int new, ret;
+
+ ret = __phy_read_mmd(phydev, devad, regnum);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ ret = __phy_write_mmd(phydev, devad, regnum, new);
+
+ return ret < 0 ? ret : 1;
+}
+EXPORT_SYMBOL_GPL(__phy_modify_mmd_changed);
+
+/**
+ * phy_modify_mmd_changed - Function for modifying a register on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ *
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
+ */
+int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify_mmd_changed);
+
+/**
+ * __phy_modify_mmd - Convenience function for modifying a register on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(__phy_modify_mmd);
+
+/**
+ * phy_modify_mmd - Convenience function for modifying a register on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_modify_mmd(phydev, devad, regnum, mask, set);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify_mmd);
+
static int __phy_read_page(struct phy_device *phydev)
{
return phydev->drv->read_page(phydev);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d33e7b3caf03..3745220c5c98 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Framework for configuring and reading PHY devices
* Based on code in sungem_phy.c and gianfar_phy.c
*
@@ -5,16 +6,8 @@
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
* Copyright (c) 2006, 2007 Maciej W. Rozycki
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -36,8 +29,6 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
-#include <asm/irq.h>
-
#define PHY_STATE_STR(_state) \
case PHY_##_state: \
return __stringify(_state); \
@@ -51,7 +42,6 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
PHY_STATE_STR(FORCING)
- PHY_STATE_STR(CHANGELINK)
PHY_STATE_STR(HALTED)
PHY_STATE_STR(RESUMING)
}
@@ -154,14 +144,10 @@ int phy_aneg_done(struct phy_device *phydev)
{
if (phydev->drv && phydev->drv->aneg_done)
return phydev->drv->aneg_done(phydev);
-
- /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
- * implement Clause 22 registers
- */
- if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
- return -EINVAL;
-
- return genphy_aneg_done(phydev);
+ else if (phydev->is_c45)
+ return genphy_c45_aneg_done(phydev);
+ else
+ return genphy_aneg_done(phydev);
}
EXPORT_SYMBOL(phy_aneg_done);
@@ -543,13 +529,6 @@ int phy_start_aneg(struct phy_device *phydev)
mutex_lock(&phydev->lock);
- if (!__phy_is_started(phydev)) {
- WARN(1, "called from state %s\n",
- phy_state_to_str(phydev->state));
- err = -EBUSY;
- goto out_unlock;
- }
-
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
@@ -560,11 +539,13 @@ int phy_start_aneg(struct phy_device *phydev)
if (err < 0)
goto out_unlock;
- if (phydev->autoneg == AUTONEG_ENABLE) {
- err = phy_check_link_status(phydev);
- } else {
- phydev->state = PHY_FORCING;
- phydev->link_timeout = PHY_FORCE_TIMEOUT;
+ if (phy_is_started(phydev)) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ err = phy_check_link_status(phydev);
+ } else {
+ phydev->state = PHY_FORCING;
+ phydev->link_timeout = PHY_FORCE_TIMEOUT;
+ }
}
out_unlock:
@@ -714,7 +695,7 @@ void phy_stop_machine(struct phy_device *phydev)
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (__phy_is_started(phydev))
+ if (phy_is_started(phydev))
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
}
@@ -767,9 +748,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
- if (!phy_is_started(phydev))
- return IRQ_NONE; /* It can't be ours. */
-
if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
return IRQ_NONE;
@@ -800,46 +778,27 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
- * phy_start_interrupts - request and enable interrupts for a PHY device
+ * phy_request_interrupt - request interrupt for a PHY device
* @phydev: target phy_device struct
*
* Description: Request the interrupt for the given PHY.
* If this fails, then we set irq to PHY_POLL.
- * Otherwise, we enable the interrupts in the PHY.
* This should only be called with a valid IRQ number.
- * Returns 0 on success or < 0 on error.
*/
-int phy_start_interrupts(struct phy_device *phydev)
+void phy_request_interrupt(struct phy_device *phydev)
{
- if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
- IRQF_ONESHOT | IRQF_SHARED,
- phydev_name(phydev), phydev) < 0) {
- pr_warn("%s: Can't get IRQ %d (PHY)\n",
- phydev->mdio.bus->name, phydev->irq);
+ int err;
+
+ err = request_threaded_irq(phydev->irq, NULL, phy_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED,
+ phydev_name(phydev), phydev);
+ if (err) {
+ phydev_warn(phydev, "Error %d requesting IRQ %d, falling back to polling\n",
+ err, phydev->irq);
phydev->irq = PHY_POLL;
- return 0;
}
-
- return phy_enable_interrupts(phydev);
}
-EXPORT_SYMBOL(phy_start_interrupts);
-
-/**
- * phy_stop_interrupts - disable interrupts from a PHY device
- * @phydev: target phy_device struct
- */
-int phy_stop_interrupts(struct phy_device *phydev)
-{
- int err = phy_disable_interrupts(phydev);
-
- if (err)
- phy_error(phydev);
-
- free_irq(phydev->irq, phydev);
-
- return err;
-}
-EXPORT_SYMBOL(phy_stop_interrupts);
+EXPORT_SYMBOL(phy_request_interrupt);
/**
* phy_stop - Bring down the PHY link, and stop checking the status
@@ -847,15 +806,14 @@ EXPORT_SYMBOL(phy_stop_interrupts);
*/
void phy_stop(struct phy_device *phydev)
{
- mutex_lock(&phydev->lock);
-
- if (!__phy_is_started(phydev)) {
+ if (!phy_is_started(phydev)) {
WARN(1, "called from state %s\n",
phy_state_to_str(phydev->state));
- mutex_unlock(&phydev->lock);
return;
}
+ mutex_lock(&phydev->lock);
+
if (phy_interrupt_is_valid(phydev))
phy_disable_interrupts(phydev);
@@ -864,6 +822,7 @@ void phy_stop(struct phy_device *phydev)
mutex_unlock(&phydev->lock);
phy_state_machine(&phydev->state_queue.work);
+ phy_stop_machine(phydev);
/* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but PHY_HALTED shall guarantee irq handler
@@ -884,33 +843,34 @@ EXPORT_SYMBOL(phy_stop);
*/
void phy_start(struct phy_device *phydev)
{
- int err = 0;
+ int err;
mutex_lock(&phydev->lock);
- switch (phydev->state) {
- case PHY_READY:
- phydev->state = PHY_UP;
- break;
- case PHY_HALTED:
- /* if phy was suspended, bring the physical link up again */
- __phy_resume(phydev);
+ if (phydev->state != PHY_READY && phydev->state != PHY_HALTED) {
+ WARN(1, "called from state %s\n",
+ phy_state_to_str(phydev->state));
+ goto out;
+ }
- /* make sure interrupts are re-enabled for the PHY */
- if (phy_interrupt_is_valid(phydev)) {
- err = phy_enable_interrupts(phydev);
- if (err < 0)
- break;
- }
+ /* if phy was suspended, bring the physical link up again */
+ __phy_resume(phydev);
- phydev->state = PHY_RESUMING;
- break;
- default:
- break;
+ /* make sure interrupts are enabled for the PHY */
+ if (phy_interrupt_is_valid(phydev)) {
+ err = phy_enable_interrupts(phydev);
+ if (err < 0)
+ goto out;
}
- mutex_unlock(&phydev->lock);
- phy_trigger_machine(phydev);
+ if (phydev->state == PHY_READY)
+ phydev->state = PHY_UP;
+ else
+ phydev->state = PHY_RESUMING;
+
+ phy_start_machine(phydev);
+out:
+ mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_start);
@@ -944,7 +904,6 @@ void phy_state_machine(struct work_struct *work)
break;
case PHY_NOLINK:
case PHY_RUNNING:
- case PHY_CHANGELINK:
case PHY_RESUMING:
err = phy_check_link_status(phydev);
break;
@@ -994,8 +953,10 @@ void phy_state_machine(struct work_struct *work)
* state machine would be pointless and possibly error prone when
* called from phy_disconnect() synchronously.
*/
+ mutex_lock(&phydev->lock);
if (phy_polling_mode(phydev) && phy_is_started(phydev))
phy_queue_state_machine(phydev, PHY_STATE_TIME);
+ mutex_unlock(&phydev->lock);
}
/**
@@ -1093,17 +1054,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phy_check_valid(phydev->speed, phydev->duplex, common))
goto eee_exit_err;
- if (clk_stop_enable) {
+ if (clk_stop_enable)
/* Configure the PHY to stop receiving xMII
* clock while it is signaling LPI.
*/
- int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (val < 0)
- return val;
-
- val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
- }
+ phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN);
return 0; /* EEE supported */
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 51990002d495..49fdd1ee798e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Framework for finding and configuring PHYs.
* Also contains generic PHY driver
*
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -35,9 +30,6 @@
#include <linux/mdio.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <linux/of.h>
-
-#include <asm/irq.h>
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
@@ -61,6 +53,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_features);
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
+
static const int phy_basic_ports_array[] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
@@ -109,6 +104,11 @@ const int phy_10gbit_features_array[1] = {
};
EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
+const int phy_10gbit_fec_features_array[1] = {
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+};
+EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
@@ -191,6 +191,10 @@ static void features_init(void)
linkmode_set_bit_array(phy_10gbit_full_features_array,
ARRAY_SIZE(phy_10gbit_full_features_array),
phy_10gbit_full_features);
+ /* 10G FEC only */
+ linkmode_set_bit_array(phy_10gbit_fec_features_array,
+ ARRAY_SIZE(phy_10gbit_fec_features_array),
+ phy_10gbit_fec_features);
}
void phy_device_free(struct phy_device *phydev)
@@ -548,12 +552,33 @@ static const struct device_type mdio_bus_phy_type = {
.pm = MDIO_BUS_PHY_PM_OPS,
};
+static int phy_request_driver_module(struct phy_device *dev, int phy_id)
+{
+ int ret;
+
+ ret = request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phy_id));
+ /* We only check for failures in executing the usermode binary,
+ * not whether a PHY driver module exists for the PHY ID.
+ * Accept -ENOENT because this may occur in case no initramfs exists,
+ * then modprobe isn't available.
+ */
+ if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) {
+ phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n",
+ ret, phy_id);
+ return ret;
+ }
+
+ return 0;
+}
+
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
struct mdio_device *mdiodev;
+ int ret = 0;
/* We allocate the device, and initialize the default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -610,15 +635,21 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
if (!(c45_ids->devices_in_package & (1 << i)))
continue;
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
- MDIO_ID_ARGS(c45_ids->device_ids[i]));
+ ret = phy_request_driver_module(dev,
+ c45_ids->device_ids[i]);
+ if (ret)
+ break;
}
} else {
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
- MDIO_ID_ARGS(phy_id));
+ ret = phy_request_driver_module(dev, phy_id);
}
- device_initialize(&mdiodev->dev);
+ if (!ret) {
+ device_initialize(&mdiodev->dev);
+ } else {
+ kfree(dev);
+ dev = ERR_PTR(ret);
+ }
return dev;
}
@@ -644,13 +675,16 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
phy_reg = mdiobus_read(bus, addr, reg_addr);
if (phy_reg < 0)
return -EIO;
- *devices_in_package = (phy_reg & 0xffff) << 16;
+ *devices_in_package = phy_reg << 16;
reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1;
phy_reg = mdiobus_read(bus, addr, reg_addr);
if (phy_reg < 0)
return -EIO;
- *devices_in_package |= (phy_reg & 0xffff);
+ *devices_in_package |= phy_reg;
+
+ /* Bit 0 doesn't represent a device, it indicates c22 regs presence */
+ *devices_in_package &= ~BIT(0);
return 0;
}
@@ -711,13 +745,13 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
phy_reg = mdiobus_read(bus, addr, reg_addr);
if (phy_reg < 0)
return -EIO;
- c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16;
+ c45_ids->device_ids[i] = phy_reg << 16;
reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2;
phy_reg = mdiobus_read(bus, addr, reg_addr);
if (phy_reg < 0)
return -EIO;
- c45_ids->device_ids[i] |= (phy_reg & 0xffff);
+ c45_ids->device_ids[i] |= phy_reg;
}
*phy_id = 0;
return 0;
@@ -750,25 +784,18 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
if (phy_reg < 0) {
- /* if there is no device, return without an error so scanning
- * the bus works properly
- */
- if (phy_reg == -EIO || phy_reg == -ENODEV) {
- *phy_id = 0xffffffff;
- return 0;
- }
-
- return -EIO;
+ /* returning -ENODEV doesn't stop bus scanning */
+ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
}
- *phy_id = (phy_reg & 0xffff) << 16;
+ *phy_id = phy_reg << 16;
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
if (phy_reg < 0)
return -EIO;
- *phy_id |= (phy_reg & 0xffff);
+ *phy_id |= phy_reg;
return 0;
}
@@ -819,13 +846,13 @@ int phy_device_register(struct phy_device *phydev)
/* Run all of the fixups for this PHY */
err = phy_scan_fixups(phydev);
if (err) {
- pr_err("PHY %d failed to initialize\n", phydev->mdio.addr);
+ phydev_err(phydev, "failed to initialize\n");
goto out;
}
err = device_add(&phydev->mdio.dev);
if (err) {
- pr_err("PHY %d failed to add\n", phydev->mdio.addr);
+ phydev_err(phydev, "failed to add\n");
goto out;
}
@@ -926,9 +953,8 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
return rc;
phy_prepare_link(phydev, handler);
- phy_start_machine(phydev);
- if (phydev->irq > 0)
- phy_start_interrupts(phydev);
+ if (phy_interrupt_is_valid(phydev))
+ phy_request_interrupt(phydev);
return 0;
}
@@ -983,10 +1009,11 @@ EXPORT_SYMBOL(phy_connect);
*/
void phy_disconnect(struct phy_device *phydev)
{
- if (phydev->irq > 0)
- phy_stop_interrupts(phydev);
+ if (phy_is_started(phydev))
+ phy_stop(phydev);
- phy_stop_machine(phydev);
+ if (phy_interrupt_is_valid(phydev))
+ free_irq(phydev->irq, phydev);
phydev->adjust_link = NULL;
@@ -1041,7 +1068,7 @@ int phy_init_hw(struct phy_device *phydev)
/* Deassert the reset signal */
phy_device_reset(phydev, 0);
- if (!phydev->drv || !phydev->drv->config_init)
+ if (!phydev->drv)
return 0;
if (phydev->drv->soft_reset)
@@ -1054,7 +1081,10 @@ int phy_init_hw(struct phy_device *phydev)
if (ret < 0)
return ret;
- return phydev->drv->config_init(phydev);
+ if (phydev->drv->config_init)
+ ret = phydev->drv->config_init(phydev);
+
+ return ret;
}
EXPORT_SYMBOL(phy_init_hw);
@@ -1279,6 +1309,36 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
}
EXPORT_SYMBOL(phy_attach);
+static bool phy_driver_is_genphy_kind(struct phy_device *phydev,
+ struct device_driver *driver)
+{
+ struct device *d = &phydev->mdio.dev;
+ bool ret = false;
+
+ if (!phydev->drv)
+ return ret;
+
+ get_device(d);
+ ret = d->driver == driver;
+ put_device(d);
+
+ return ret;
+}
+
+bool phy_driver_is_genphy(struct phy_device *phydev)
+{
+ return phy_driver_is_genphy_kind(phydev,
+ &genphy_driver.mdiodrv.driver);
+}
+EXPORT_SYMBOL_GPL(phy_driver_is_genphy);
+
+bool phy_driver_is_genphy_10g(struct phy_device *phydev)
+{
+ return phy_driver_is_genphy_kind(phydev,
+ &genphy_10g_driver.mdiodrv.driver);
+}
+EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
+
/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
@@ -1310,8 +1370,8 @@ void phy_detach(struct phy_device *phydev)
* from the generic driver so that there's a chance a
* real driver could be loaded
*/
- if (phydev->mdio.dev.driver == &genphy_10g_driver.mdiodrv.driver ||
- phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver)
+ if (phy_driver_is_genphy(phydev) ||
+ phy_driver_is_genphy_10g(phydev))
device_release_driver(&phydev->mdio.dev);
/*
@@ -1455,7 +1515,7 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable);
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
- int oldadv, adv, bmsr;
+ int bmsr, adv;
int err, changed = 0;
/* Only allow advertising what this PHY supports */
@@ -1468,22 +1528,14 @@ static int genphy_config_advert(struct phy_device *phydev)
phydev->advertising);
/* Setup standard advertisement */
- adv = phy_read(phydev, MII_ADVERTISE);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
- adv |= ethtool_adv_to_mii_adv_t(advertise);
-
- if (adv != oldadv) {
- err = phy_write(phydev, MII_ADVERTISE, adv);
-
- if (err < 0)
- return err;
+ err = phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_100BASE4 |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
+ ethtool_adv_to_mii_adv_t(advertise));
+ if (err < 0)
+ return err;
+ if (err > 0)
changed = 1;
- }
bmsr = phy_read(phydev, MII_BMSR);
if (bmsr < 0)
@@ -1497,25 +1549,20 @@ static int genphy_config_advert(struct phy_device *phydev)
return changed;
/* Configure gigabit if it's supported */
- adv = phy_read(phydev, MII_CTRL1000);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-
+ adv = 0;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
phydev->supported) ||
linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
phydev->supported))
- adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-
- if (adv != oldadv)
- changed = 1;
+ adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- err = phy_write(phydev, MII_CTRL1000, adv);
+ err = phy_modify_changed(phydev, MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF,
+ adv);
if (err < 0)
return err;
+ if (err > 0)
+ changed = 1;
return changed;
}
@@ -1528,34 +1575,20 @@ static int genphy_config_advert(struct phy_device *phydev)
* efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't
* changed, and 1 if it has changed.
*/
-static int genphy_config_eee_advert(struct phy_device *phydev)
+int genphy_config_eee_advert(struct phy_device *phydev)
{
- int broken = phydev->eee_broken_modes;
- int old_adv, adv;
+ int err;
/* Nothing to disable */
- if (!broken)
- return 0;
-
- /* If the following call fails, we assume that EEE is not
- * supported by the phy. If we read 0, EEE is not advertised
- * In both case, we don't need to continue
- */
- adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- if (adv <= 0)
+ if (!phydev->eee_broken_modes)
return 0;
- old_adv = adv;
- adv &= ~broken;
-
- /* Advertising remains unchanged with the broken mask */
- if (old_adv == adv)
- return 0;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
-
- return 1;
+ err = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ phydev->eee_broken_modes, 0);
+ /* If the call failed, we assume that EEE is not supported */
+ return err < 0 ? 0 : err;
}
+EXPORT_SYMBOL(genphy_config_eee_advert);
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -1671,10 +1704,19 @@ int genphy_update_link(struct phy_device *phydev)
{
int status;
- /* Do a fake read */
- status = phy_read(phydev, MII_BMSR);
- if (status < 0)
- return status;
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops.
+ */
+ if (!phy_polling_mode(phydev)) {
+ status = phy_read(phydev, MII_BMSR);
+ if (status < 0) {
+ return status;
+ } else if (status & BMSR_LSTATUS) {
+ phydev->link = 1;
+ return 0;
+ }
+ }
/* Read link and autonegotiation status */
status = phy_read(phydev, MII_BMSR);
@@ -1705,8 +1747,6 @@ int genphy_read_status(struct phy_device *phydev)
int err;
int lpa;
int lpagb = 0;
- int common_adv;
- int common_adv_gb = 0;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
@@ -1738,7 +1778,6 @@ int genphy_read_status(struct phy_device *phydev)
mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
lpagb);
- common_adv_gb = lpagb & adv << 2;
}
lpa = phy_read(phydev, MII_LPA);
@@ -1747,35 +1786,12 @@ int genphy_read_status(struct phy_device *phydev)
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
- adv = phy_read(phydev, MII_ADVERTISE);
- if (adv < 0)
- return adv;
-
- common_adv = lpa & adv;
-
- phydev->speed = SPEED_10;
- phydev->duplex = DUPLEX_HALF;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
- if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) {
- phydev->speed = SPEED_1000;
-
- if (common_adv_gb & LPA_1000FULL)
- phydev->duplex = DUPLEX_FULL;
- } else if (common_adv & (LPA_100FULL | LPA_100HALF)) {
- phydev->speed = SPEED_100;
-
- if (common_adv & LPA_100FULL)
- phydev->duplex = DUPLEX_FULL;
- } else
- if (common_adv & LPA_10FULL)
- phydev->duplex = DUPLEX_FULL;
-
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
+ phy_resolve_aneg_linkmode(phydev);
} else {
int bmcr = phy_read(phydev, MII_BMCR);
@@ -1907,44 +1923,6 @@ int genphy_loopback(struct phy_device *phydev, bool enable)
}
EXPORT_SYMBOL(genphy_loopback);
-static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
-{
- switch (max_speed) {
- case SPEED_10:
- linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- phydev->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- phydev->supported);
- /* fall through */
- case SPEED_100:
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->supported);
- break;
- case SPEED_1000:
- break;
- default:
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
-{
- int err;
-
- err = __set_phy_supported(phydev, max_speed);
- if (err)
- return err;
-
- linkmode_copy(phydev->advertising, phydev->supported);
-
- return 0;
-}
-EXPORT_SYMBOL(phy_set_max_speed);
-
/**
* phy_remove_link_mode - Remove a supported link mode
* @phydev: phy_device structure to remove link mode from
@@ -2075,48 +2053,6 @@ bool phy_validate_pause(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_validate_pause);
-static void of_set_phy_supported(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- u32 max_speed;
-
- if (!IS_ENABLED(CONFIG_OF_MDIO))
- return;
-
- if (!node)
- return;
-
- if (!of_property_read_u32(node, "max-speed", &max_speed))
- __set_phy_supported(phydev, max_speed);
-}
-
-static void of_set_phy_eee_broken(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- u32 broken = 0;
-
- if (!IS_ENABLED(CONFIG_OF_MDIO))
- return;
-
- if (!node)
- return;
-
- if (of_property_read_bool(node, "eee-broken-100tx"))
- broken |= MDIO_EEE_100TX;
- if (of_property_read_bool(node, "eee-broken-1000t"))
- broken |= MDIO_EEE_1000T;
- if (of_property_read_bool(node, "eee-broken-10gt"))
- broken |= MDIO_EEE_10GT;
- if (of_property_read_bool(node, "eee-broken-1000kx"))
- broken |= MDIO_EEE_1000KX;
- if (of_property_read_bool(node, "eee-broken-10gkx4"))
- broken |= MDIO_EEE_10GKX4;
- if (of_property_read_bool(node, "eee-broken-10gkr"))
- broken |= MDIO_EEE_10GKR;
-
- phydev->eee_broken_modes = broken;
-}
-
static bool phy_drv_supports_irq(struct phy_driver *phydrv)
{
return phydrv->config_intr && phydrv->ack_interrupt;
@@ -2150,11 +2086,30 @@ static int phy_probe(struct device *dev)
mutex_lock(&phydev->lock);
+ if (phydev->drv->probe) {
+ /* Deassert the reset signal */
+ phy_device_reset(phydev, 0);
+
+ err = phydev->drv->probe(phydev);
+ if (err) {
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+ goto out;
+ }
+ }
+
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
* or both of these values
*/
- linkmode_copy(phydev->supported, phydrv->features);
+ if (phydrv->features) {
+ linkmode_copy(phydev->supported, phydrv->features);
+ } else {
+ err = phydrv->get_features(phydev);
+ if (err)
+ goto out;
+ }
+
of_set_phy_supported(phydev);
linkmode_copy(phydev->advertising, phydev->supported);
@@ -2174,20 +2129,8 @@ static int phy_probe(struct device *dev)
* (e.g. hardware erratum) where the driver wants to set only one
* of these bits.
*/
- if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) ||
- test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) {
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->supported);
- if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features))
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->supported);
- if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydrv->features))
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->supported);
- } else {
+ if (!test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported) &&
+ !test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported)) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
@@ -2197,17 +2140,7 @@ static int phy_probe(struct device *dev)
/* Set the state to READY by default */
phydev->state = PHY_READY;
- if (phydev->drv->probe) {
- /* Deassert the reset signal */
- phy_device_reset(phydev, 0);
-
- err = phydev->drv->probe(phydev);
- if (err) {
- /* Assert the reset signal */
- phy_device_reset(phydev, 1);
- }
- }
-
+out:
mutex_unlock(&phydev->lock);
return err;
@@ -2243,6 +2176,15 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
{
int retval;
+ /* Either the features are hard coded, or dynamically
+ * determine. It cannot be both or neither
+ */
+ if (WARN_ON((!new_driver->features && !new_driver->get_features) ||
+ (new_driver->features && new_driver->get_features))) {
+ pr_err("%s: Driver features are missing\n", new_driver->name);
+ return -EINVAL;
+ }
+
new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
new_driver->mdiodrv.driver.name = new_driver->name;
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
@@ -2250,14 +2192,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.remove = phy_remove;
new_driver->mdiodrv.driver.owner = owner;
- /* The following works around an issue where the PHY driver doesn't bind
- * to the device, resulting in the genphy driver being used instead of
- * the dedicated driver. The root cause of the issue isn't known yet
- * and seems to be in the base driver core. Once this is fixed we may
- * remove this workaround.
- */
- new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
-
retval = driver_register(&new_driver->mdiodrv.driver);
if (retval) {
pr_err("%s: Error %d in registering driver\n",
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 263385b75bba..b86a4b2116f8 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -1,15 +1,5 @@
-/* Copyright (C) 2016 National Instruments Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2016 National Instruments Corp. */
#include <linux/leds.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e7becc7379d7..89750c7dfd6f 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* phylink models the MAC to optional PHY connection, supporting
* technologies such as SFP cages where the PHY is hot-pluggable.
*
* Copyright (C) 2015 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/ethtool.h>
#include <linux/export.h>
@@ -305,6 +302,13 @@ static void phylink_mac_config(struct phylink *pl,
pl->ops->mac_config(pl->netdev, pl->link_an_mode, state);
}
+static void phylink_mac_config_up(struct phylink *pl,
+ const struct phylink_link_state *state)
+{
+ if (state->link)
+ phylink_mac_config(pl, state);
+}
+
static void phylink_mac_an_restart(struct phylink *pl)
{
if (pl->link_config.an_enabled &&
@@ -320,6 +324,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = MLO_PAUSE_NONE;
+ state->an_complete = 0;
state->link = 1;
return pl->ops->mac_link_state(ndev, state);
@@ -404,12 +412,12 @@ static void phylink_resolve(struct work_struct *w)
case MLO_AN_PHY:
link_state = pl->phy_state;
phylink_resolve_flow(pl, &link_state);
- phylink_mac_config(pl, &link_state);
+ phylink_mac_config_up(pl, &link_state);
break;
case MLO_AN_FIXED:
phylink_get_fixed_state(pl, &link_state);
- phylink_mac_config(pl, &link_state);
+ phylink_mac_config_up(pl, &link_state);
break;
case MLO_AN_INBAND:
@@ -474,6 +482,17 @@ static void phylink_run_resolve(struct phylink *pl)
queue_work(system_power_efficient_wq, &pl->resolve);
}
+static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
+{
+ unsigned long state = pl->phylink_disable_state;
+
+ set_bit(bit, &pl->phylink_disable_state);
+ if (state == 0) {
+ queue_work(system_power_efficient_wq, &pl->resolve);
+ flush_work(&pl->resolve);
+ }
+}
+
static void phylink_fixed_poll(struct timer_list *t)
{
struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -679,9 +698,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
- phy_start_machine(phy);
- if (phy->irq > 0)
- phy_start_interrupts(phy);
+ if (phy_interrupt_is_valid(phy))
+ phy_request_interrupt(phy);
return 0;
}
@@ -924,9 +942,7 @@ void phylink_stop(struct phylink *pl)
if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
del_timer_sync(&pl->link_poll);
- set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
- queue_work(system_power_efficient_wq, &pl->resolve);
- flush_work(&pl->resolve);
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
}
EXPORT_SYMBOL_GPL(phylink_stop);
@@ -1269,6 +1285,24 @@ int phylink_get_eee_err(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_get_eee_err);
/**
+ * phylink_init_eee() - init and check the EEE features
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @clk_stop_enable: allow PHY to stop receive clock
+ *
+ * Must be called either with RTNL held or within mac_link_up()
+ */
+int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (pl->phydev)
+ ret = phy_init_eee(pl->phydev, clk_stop_enable);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phylink_init_eee);
+
+/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @eee: a pointer to a &struct ethtool_eee for the read parameters
@@ -1632,9 +1666,7 @@ static void phylink_sfp_link_down(void *upstream)
ASSERT_RTNL();
- set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
- queue_work(system_power_efficient_wq, &pl->resolve);
- flush_work(&pl->resolve);
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
}
static void phylink_sfp_link_up(void *upstream)
@@ -1697,4 +1729,4 @@ void phylink_helper_basex_speed(struct phylink_link_state *state)
}
EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index cfe2313dbefd..5486f6fb2ab2 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/qsemi.c
*
@@ -6,12 +7,6 @@
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..10df52ccddfe 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/realtek.c
*
@@ -6,12 +7,6 @@
* Author: Johnson Leung <r58129@freescale.com>
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/bitops.h>
#include <linux/phy.h>
@@ -278,10 +273,26 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc800),
+ .name = "Generic Realtek PHY",
+ .features = PHY_GBIT_FEATURES,
+ .config_init = genphy_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.features = PHY_GBIT_FEATURES,
.config_init = &rtl8366rb_config_init,
+ /* These interrupts are handled by the irq controller
+ * embedded inside the RTL8366RB, they get unmasked when the
+ * irq is requested and ACKed by reading the status register,
+ * which is done by the irqchip code.
+ */
+ .ack_interrupt = genphy_no_ack_interrupt,
+ .config_intr = genphy_no_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index f1da70b9b55f..95abf7072f32 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/**
* drivers/net/phy/rockchip.c
*
@@ -6,12 +7,6 @@
* Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
*
* David Wu <david.wu@rock-chips.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/ethtool.h>
@@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl);
MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>");
MODULE_DESCRIPTION("Rockchip Ethernet PHY driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ad9db652874d..fef701bfad62 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
return ret;
}
}
+ bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
bus->netdev->sfp_bus = bus;
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
if (bus->registered) {
if (bus->started)
bus->socket_ops->stop(bus->sfp);
+ bus->socket_ops->detach(bus->sfp);
if (bus->phydev && ops && ops->disconnect_phy)
ops->disconnect_phy(bus->upstream);
}
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index fd8bb998ae52..d4635c2178d1 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -184,6 +185,7 @@ struct sfp {
struct gpio_desc *gpio[GPIO_MAX];
+ bool attached;
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
@@ -1475,7 +1477,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
*/
switch (sfp->sm_mod_state) {
default:
- if (event == SFP_E_INSERT) {
+ if (event == SFP_E_INSERT && sfp->attached) {
sfp_module_tx_disable(sfp);
sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
}
@@ -1607,6 +1609,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
mutex_unlock(&sfp->sm_mutex);
}
+static void sfp_attach(struct sfp *sfp)
+{
+ sfp->attached = true;
+ if (sfp->state & SFP_F_PRESENT)
+ sfp_sm_event(sfp, SFP_E_INSERT);
+}
+
+static void sfp_detach(struct sfp *sfp)
+{
+ sfp->attached = false;
+ sfp_sm_event(sfp, SFP_E_REMOVE);
+}
+
static void sfp_start(struct sfp *sfp)
{
sfp_sm_event(sfp, SFP_E_DEV_UP);
@@ -1667,6 +1682,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
}
static const struct sfp_socket_ops sfp_module_ops = {
+ .attach = sfp_attach,
+ .detach = sfp_detach,
.start = sfp_start,
.stop = sfp_stop,
.module_info = sfp_module_info,
@@ -1834,10 +1851,6 @@ static int sfp_probe(struct platform_device *pdev)
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
- sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
- if (!sfp->sfp_bus)
- return -ENOMEM;
-
/* Get the initial state, and always signal TX disable,
* since the network interface will not be up.
*/
@@ -1848,10 +1861,6 @@ static int sfp_probe(struct platform_device *pdev)
sfp->state |= SFP_F_RATE_SELECT;
sfp_set_state(sfp, sfp->state);
sfp_module_tx_disable(sfp);
- rtnl_lock();
- if (sfp->state & SFP_F_PRESENT)
- sfp_sm_event(sfp, SFP_E_INSERT);
- rtnl_unlock();
for (i = 0; i < GPIO_MAX; i++) {
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1884,6 +1893,10 @@ static int sfp_probe(struct platform_device *pdev)
dev_warn(sfp->dev,
"No tx_disable pin: SFP modules will always be emitting.\n");
+ sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
+ if (!sfp->sfp_bus)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 31b0acf337e2..64f54b0bbd8c 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -7,6 +7,8 @@
struct sfp;
struct sfp_socket_ops {
+ void (*attach)(struct sfp *sfp);
+ void (*detach)(struct sfp *sfp);
void (*start)(struct sfp *sfp);
void (*stop)(struct sfp *sfp);
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index f9477ff55545..c94d3bfbc772 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/smsc.c
*
@@ -7,11 +8,6 @@
*
* Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@shawell.net
*
*/
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f17b3441779b..92b64e254b44 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
*
@@ -5,10 +6,6 @@
*
* This file was based on: drivers/spi/at25.c
* Copyright (C) 2006 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 33d733684f5b..5b6acf431f98 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/net/phy/ste10Xp.c
*
@@ -6,12 +7,6 @@
* Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*
* Copyright (c) 2008 STMicroelectronics Limited
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
index 34f58f2349e9..dad22481d9c1 100644
--- a/drivers/net/phy/swphy.c
+++ b/drivers/net/phy/swphy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Software PHY emulation
*
@@ -7,11 +8,6 @@
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* Copyright (c) 2006-2007 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/export.h>
#include <linux/mii.h>
@@ -23,7 +19,6 @@
#define MII_REGS_NUM 29
struct swmii_regs {
- u16 bmcr;
u16 bmsr;
u16 lpa;
u16 lpagb;
@@ -44,16 +39,13 @@ enum {
*/
static const struct swmii_regs speed[] = {
[SWMII_SPEED_10] = {
- .bmcr = BMCR_FULLDPLX,
.lpa = LPA_10FULL | LPA_10HALF,
},
[SWMII_SPEED_100] = {
- .bmcr = BMCR_FULLDPLX | BMCR_SPEED100,
.bmsr = BMSR_100FULL | BMSR_100HALF,
.lpa = LPA_100FULL | LPA_100HALF,
},
[SWMII_SPEED_1000] = {
- .bmcr = BMCR_FULLDPLX | BMCR_SPEED1000,
.bmsr = BMSR_ESTATEN,
.lpagb = LPA_1000FULL | LPA_1000HALF,
},
@@ -61,13 +53,11 @@ static const struct swmii_regs speed[] = {
static const struct swmii_regs duplex[] = {
[SWMII_DUPLEX_HALF] = {
- .bmcr = ~BMCR_FULLDPLX,
.bmsr = BMSR_ESTATEN | BMSR_100HALF,
.lpa = LPA_10HALF | LPA_100HALF,
.lpagb = LPA_1000HALF,
},
[SWMII_DUPLEX_FULL] = {
- .bmcr = ~0,
.bmsr = BMSR_ESTATEN | BMSR_100FULL,
.lpa = LPA_10FULL | LPA_100FULL,
.lpagb = LPA_1000FULL,
@@ -122,7 +112,6 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
{
int speed_index, duplex_index;
u16 bmsr = BMSR_ANEGCAPABLE;
- u16 bmcr = 0;
u16 lpagb = 0;
u16 lpa = 0;
@@ -140,7 +129,6 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
if (state->link) {
bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
- bmcr |= speed[speed_index].bmcr & duplex[duplex_index].bmcr;
lpa |= speed[speed_index].lpa & duplex[duplex_index].lpa;
lpagb |= speed[speed_index].lpagb & duplex[duplex_index].lpagb;
@@ -153,7 +141,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
switch (reg) {
case MII_BMCR:
- return bmcr;
+ return BMCR_ANENABLE;
case MII_BMSR:
return bmsr;
case MII_PHYSID1:
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 22f3bdd8206c..beb054b931ee 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Teranetics PHY
*
* Author: Shaohui Xie <Shaohui.Xie@freescale.com>
*
* Copyright 2015 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -80,9 +77,9 @@ static struct phy_driver teranetics_driver[] = {
.phy_id = PHY_ID_TN2020,
.phy_id_mask = 0xffffffff,
.name = "Teranetics TN2020",
- .soft_reset = gen10g_no_soft_reset,
+ .features = PHY_10GBIT_FEATURES,
+ .soft_reset = genphy_no_soft_reset,
.aneg_done = teranetics_aneg_done,
- .config_init = gen10g_config_init,
.config_aneg = gen10g_config_aneg,
.read_status = teranetics_read_status,
.match_phy_device = teranetics_match_phy_device,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 1e4fc42e4629..219fc7cdc2b3 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the Renesas PHY uPD60620.
*
* Copyright (C) 2015 Softing Industrial Automation GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 0646af458f6a..dc0dd87a6694 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -1,15 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Vitesse PHYs
*
* Author: Kriston Carson
- *
- * Copyright (c) 2005, 2009, 2011 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782313cf..2d1449345959 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Xilinx GMII2RGMII Converter driver
*
* Copyright (C) 2016 Xilinx, Inc.
@@ -8,16 +9,6 @@
*
* Description:
* This driver is developed for Xilinx GMII2RGMII Converter
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -44,7 +35,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
u16 val = 0;
int err;
- err = priv->phy_drv->read_status(phydev);
+ if (priv->phy_drv->read_status)
+ err = priv->phy_drv->read_status(phydev);
+ else
+ err = genphy_read_status(phydev);
if (err < 0)
return err;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 62dc564b251d..f22639f0116a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (pskb_trim_rcsum(skb, len))
goto drop;
+ ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
/* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 7820fced33f6..941cfa8f1c2a 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -535,17 +535,20 @@ sb1000_activate(const int ioaddr[], const char* name)
int status;
ssleep(1);
- if ((status = card_send_command(ioaddr, name, Command0, st)))
+ status = card_send_command(ioaddr, name, Command0, st);
+ if (status)
return status;
- if ((status = card_send_command(ioaddr, name, Command1, st)))
+ status = card_send_command(ioaddr, name, Command1, st);
+ if (status)
return status;
if (st[3] != 0xf1) {
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
+ status = sb1000_start_get_set_command(ioaddr, name);
+ if (status)
return status;
return -EIO;
}
udelay(1000);
- return sb1000_start_get_set_command(ioaddr, name);
+ return sb1000_start_get_set_command(ioaddr, name);
}
/* get SB1000 firmware version */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index c0b52e48f0e6..2ea9b4976f4a 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -712,7 +712,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
goto err_kfree;
}
- skb_probe_transport_header(skb, ETH_HLEN);
+ skb_probe_transport_header(skb);
/* Move network header to the right position for VLAN tagged packets */
if ((skb->protocol == htons(ETH_P_8021Q) ||
@@ -1187,7 +1187,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
tap = rcu_dereference(q->tap);
if (tap) {
skb->dev = tap->dev;
- skb_probe_transport_header(skb, ETH_HLEN);
+ skb_probe_transport_header(skb);
dev_queue_xmit(skb);
} else {
kfree_skb(skb);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index afd9d25d1992..6ed96fdfd96d 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -28,7 +28,6 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
-#include <net/switchdev.h>
#include <generated/utsrelease.h>
#include <linux/if_team.h>
@@ -256,17 +255,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
-static bool __team_option_inst_tmp_find(const struct list_head *opts,
- const struct team_option_inst *needle)
-{
- struct team_option_inst *opt_inst;
-
- list_for_each_entry(opt_inst, opts, tmp_list)
- if (opt_inst == needle)
- return true;
- return false;
-}
-
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1267,7 +1255,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
__team_compute_features(team);
- __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
+ __team_port_change_port_added(port, !!netif_oper_up(port_dev));
__team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -2460,7 +2448,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
int err = 0;
int i;
struct nlattr *nl_option;
- LIST_HEAD(opt_inst_list);
rtnl_lock();
@@ -2480,6 +2467,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
struct nlattr *attr;
struct nlattr *attr_data;
+ LIST_HEAD(opt_inst_list);
enum team_option_type opt_type;
int opt_port_ifindex = 0; /* != 0 for per-port options */
u32 opt_array_index = 0;
@@ -2584,23 +2572,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
-
- /* dumb/evil user-space can send us duplicate opt,
- * keep only the last one
- */
- if (__team_option_inst_tmp_find(&opt_inst_list,
- opt_inst))
- continue;
-
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
err = -ENOENT;
goto team_put;
}
- }
- err = team_nl_send_event_options_get(team, &opt_inst_list);
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
+ if (err)
+ break;
+ }
team_put:
team_nl_team_put(team);
@@ -2932,7 +2914,7 @@ static int team_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
- if (netif_carrier_ok(dev))
+ if (netif_oper_up(dev))
team_port_change_check(port, true);
break;
case NETDEV_DOWN:
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index a5ef97010eb3..5541e1c19936 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -325,6 +325,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
return 0;
}
+static void lb_bpf_func_free(struct team *team)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct bpf_prog *fp;
+
+ if (!lb_priv->ex->orig_fprog)
+ return;
+
+ __fprog_destroy(lb_priv->ex->orig_fprog);
+ fp = rcu_dereference_protected(lb_priv->fp,
+ lockdep_is_held(&team->lock));
+ bpf_prog_destroy(fp);
+}
+
static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
@@ -639,6 +653,7 @@ static void lb_exit(struct team *team)
team_options_unregister(team, lb_options,
ARRAY_SIZE(lb_options));
+ lb_bpf_func_free(team);
cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
free_percpu(lb_priv->pcpu_stats);
kfree(lb_priv->ex);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a4fdad475594..1d68921723dc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -856,10 +856,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
err = 0;
}
- rcu_assign_pointer(tfile->tun, tun);
- rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
- tun->numqueues++;
-
if (tfile->detached) {
tun_enable_queue(tfile);
} else {
@@ -870,12 +866,18 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
if (rtnl_dereference(tun->xdp_prog))
sock_set_flag(&tfile->sk, SOCK_XDP);
- tun_set_real_num_queues(tun);
-
/* device is allowed to go away first, so no need to hold extra
* refcnt.
*/
+ /* Publish tfile->tun and tun->tfiles only after we've fully
+ * initialized tfile; otherwise we risk using half-initialized
+ * object.
+ */
+ rcu_assign_pointer(tfile->tun, tun);
+ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ tun->numqueues++;
+ tun_set_real_num_queues(tun);
out:
return err;
}
@@ -1927,7 +1929,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
skb_reset_network_header(skb);
- skb_probe_transport_header(skb, 0);
+ skb_probe_transport_header(skb);
if (skb_xdp) {
struct bpf_prog *xdp_prog;
@@ -2165,9 +2167,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
}
add_wait_queue(&tfile->wq.wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
ptr = ptr_ring_consume(&tfile->tx_ring);
if (ptr)
break;
@@ -2183,7 +2185,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
schedule();
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tfile->wq.wait, &wait);
out:
@@ -2480,7 +2482,7 @@ build:
skb->protocol = eth_type_trans(skb, tun->dev);
skb_reset_network_header(skb);
- skb_probe_transport_header(skb, 0);
+ skb_probe_transport_header(skb);
if (skb_xdp) {
err = do_xdp_generic(xdp_prog, skb);
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 57f1c94fca0b..820a2fe7d027 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = {
#undef ASIX112_DESC
+static const struct driver_info trendnet_info = {
+ .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter",
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = {
{AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
{AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
+ {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
{ },/* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b654f05b2ccd..3d93993e74da 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
chipcode &= AX_CHIPCODE_MASK;
- (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
- ax88772a_hw_reset(dev, 0);
+ ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
+ ax88772a_hw_reset(dev, 0);
+
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
+ return ret;
+ }
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 78b16eb9e58c..63aaae487995 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -361,8 +361,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
else
return -EINVAL;
- dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size,
- ifname, NET_NAME_UNKNOWN, usbpn_setup);
+ dev = alloc_netdev(struct_size(pnd, urbs, rxq_size), ifname,
+ NET_NAME_UNKNOWN, usbpn_setup);
if (!dev)
return -ENOMEM;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index b3b3c05903a1..5512a1038721 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -179,10 +179,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* probed with) and a slave/data interface; union
* descriptors sort this all out.
*/
- info->control = usb_ifnum_to_if(dev->udev,
- info->u->bMasterInterface0);
- info->data = usb_ifnum_to_if(dev->udev,
- info->u->bSlaveInterface0);
+ info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0);
+ info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0);
if (!info->control || !info->data) {
dev_dbg(&intf->dev,
"master #%u/%p slave #%u/%p\n",
@@ -216,18 +214,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
/* a data interface altsetting does the real i/o */
d = &info->data->cur_altsetting->desc;
if (d->bInterfaceClass != USB_CLASS_CDC_DATA) {
- dev_dbg(&intf->dev, "slave class %u\n",
- d->bInterfaceClass);
+ dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass);
goto bad_desc;
}
skip:
- if ( rndis &&
- header.usb_cdc_acm_descriptor &&
- header.usb_cdc_acm_descriptor->bmCapabilities) {
- dev_dbg(&intf->dev,
- "ACM capabilities %02x, not really RNDIS?\n",
- header.usb_cdc_acm_descriptor->bmCapabilities);
- goto bad_desc;
+ if (rndis && header.usb_cdc_acm_descriptor &&
+ header.usb_cdc_acm_descriptor->bmCapabilities) {
+ dev_dbg(&intf->dev,
+ "ACM capabilities %02x, not really RNDIS?\n",
+ header.usb_cdc_acm_descriptor->bmCapabilities);
+ goto bad_desc;
}
if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
@@ -238,7 +234,7 @@ skip:
}
if (header.usb_cdc_mdlm_desc &&
- memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) {
+ memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) {
dev_dbg(&intf->dev, "GUID doesn't match\n");
goto bad_desc;
}
@@ -302,7 +298,7 @@ skip:
if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
struct usb_endpoint_descriptor *desc;
- dev->status = &info->control->cur_altsetting->endpoint [0];
+ dev->status = &info->control->cur_altsetting->endpoint[0];
desc = &dev->status->desc;
if (!usb_endpoint_is_int_in(desc) ||
(le16_to_cpu(desc->wMaxPacketSize)
@@ -847,6 +843,14 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index e96bc0c6140f..3d92ea6fcc02 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2051,8 +2051,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
phydev = phy_find_first(dev->mdiobus);
if (!phydev) {
netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
- NULL);
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (IS_ERR(phydev)) {
netdev_err(dev->net, "No PHY/fixed_PHY found\n");
return NULL;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f4247b275e09..63e44e746ccc 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1011,6 +1011,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = pegasus->phy;
+ /* fall through */
case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 774e1ff01c9a..74bebbdb4b15 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
dev->addr_len = 0;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->netdev_ops = &qmimux_netdev_ops;
+ dev->mtu = 1500;
dev->needs_free_netdev = true;
}
@@ -975,6 +976,13 @@ static const struct usb_device_id products[] = {
0xff),
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
},
+ { /* Quectel EG12/EM12 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC,
+ 0xff),
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
+ },
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1200,8 +1208,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
@@ -1342,17 +1350,20 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
return false;
}
-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+static bool quectel_diag_detected(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+ u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
+ u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
- if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
- le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
- intf_desc.bNumEndpoints == 2)
- return true;
+ if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
+ return false;
- return false;
+ if (id_product == 0x0306 || id_product == 0x0512)
+ return true;
+ else
+ return false;
}
static int qmi_wwan_probe(struct usb_interface *intf,
@@ -1389,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_interface *intf,
return -ENODEV;
}
- /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+ /* Several Quectel modems supports dynamic interface configuration, so
* we need to match on class/subclass/protocol. These values are
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
* different. Ignore the current interface if the number of endpoints
* the number for the diag interface (two).
*/
- if (quectel_ep06_diag_detected(intf))
+ if (quectel_diag_detected(intf))
return -ENODEV;
return usbnet_probe(intf, id);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 60dd1ec1665f..86c8c64fbb0f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -557,6 +557,7 @@ enum spd_duplex {
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
#define BND_MASK 0x0004
+#define BD_MASK 0x0001
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
return -ENODEV;
}
} else {
- /* test for RTL8153-BND */
+ /* test for RTL8153-BND and RTL8153-BD */
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
- if ((ocp_data & BND_MASK) == 0) {
+ if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
netif_dbg(tp, probe, tp->netdev,
"Invalid variant for MAC pass through\n");
return -ENODEV;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 80373a9171dd..59dbdbb5feff 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -388,7 +388,6 @@ static void read_bulk_callback(struct urb *urb)
unsigned pkt_len, res;
struct sk_buff *skb;
struct net_device *netdev;
- u16 rx_stat;
int status = urb->status;
int result;
unsigned long flags;
@@ -424,7 +423,6 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
res = urb->actual_length;
- rx_stat = le16_to_cpu(*(__le16 *)(urb->transfer_buffer + res - 4));
pkt_len = res - 4;
skb_put(dev->rx_skb, pkt_len);
@@ -849,6 +847,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = dev->phy;
+ /* fall through */
case SIOCDEVPRIVATE + 1:
read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]);
break;
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 6ac232e52bf7..e04c8054c2cf 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -434,7 +434,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
usbnet_skb_return(dev, sr_skb);
skb_pull(skb, len + SR_RX_OVERHEAD);
- };
+ }
return 0;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f412ea1cef18..569e87a51a33 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -115,7 +115,8 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
p += sizeof(ethtool_stats_keys);
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
+ snprintf(p, ETH_GSTRING_LEN,
+ "rx_queue_%u_%.11s",
i, veth_rq_stats_desc[j].desc);
p += ETH_GSTRING_LEN;
}
@@ -540,8 +541,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
goto err_xdp;
}
@@ -661,8 +664,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
goto drop;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 023725086046..7eb38ea9ba56 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
+#define VIRTIO_XDP_FLAG BIT(0)
+
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -252,6 +254,21 @@ struct padded_vnet_hdr {
char padding[4];
};
+static bool is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & VIRTIO_XDP_FLAG;
+}
+
+static void *xdp_to_ptr(struct xdp_frame *ptr)
+{
+ return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
+}
+
+static struct xdp_frame *ptr_to_xdp(void *ptr)
+{
+ return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
+}
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
sg_init_one(sq->sg, xdpf->data, xdpf->len);
- err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
+ GFP_ATOMIC);
if (unlikely(err))
return -ENOSPC; /* Caller handle free/refcnt */
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
struct receive_queue *rq = vi->rq;
- struct xdp_frame *xdpf_sent;
struct bpf_prog *xdp_prog;
struct send_queue *sq;
unsigned int len;
+ int packets = 0;
+ int bytes = 0;
int drops = 0;
int kicks = 0;
int ret, err;
+ void *ptr;
int i;
- sq = virtnet_xdp_sq(vi);
-
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
- ret = -EINVAL;
- drops = n;
- goto out;
- }
-
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_dereference(rq->xdp_prog);
- if (!xdp_prog) {
- ret = -ENXIO;
+ if (!xdp_prog)
+ return -ENXIO;
+
+ sq = virtnet_xdp_sq(vi);
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+ ret = -EINVAL;
drops = n;
goto out;
}
/* Free up any pending old buffers before queueing new ones. */
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
- xdp_return_frame(xdpf_sent);
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ if (likely(is_xdp_frame(ptr))) {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ bytes += frame->len;
+ xdp_return_frame(frame);
+ } else {
+ struct sk_buff *skb = ptr;
+
+ bytes += skb->len;
+ napi_consume_skb(skb, false);
+ }
+ packets++;
+ }
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
out:
u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
sq->stats.xdp_tx += n;
sq->stats.xdp_tx_drops += drops;
sq->stats.kicks += kicks;
@@ -1035,6 +1066,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
goto frame_err;
}
+ skb_record_rx_queue(skb, vq2rxq(rq->vq));
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
@@ -1330,20 +1362,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}
-static void free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
- struct sk_buff *skb;
unsigned int len;
unsigned int packets = 0;
unsigned int bytes = 0;
+ void *ptr;
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- pr_debug("Sent skb %p\n", skb);
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ if (likely(!is_xdp_frame(ptr))) {
+ struct sk_buff *skb = ptr;
- bytes += skb->len;
- packets++;
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ } else {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
- dev_consume_skb_any(skb);
+ bytes += frame->len;
+ xdp_return_frame(frame);
+ }
+ packets++;
}
/* Avoid overhead when no packets have been processed
@@ -1358,6 +1398,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&sq->stats.syncp);
}
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1365,11 +1415,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
struct send_queue *sq = &vi->sq[index];
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
- if (!sq->napi.weight)
+ if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
return;
if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
}
@@ -1442,10 +1492,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
struct send_queue *sq = container_of(napi, struct send_queue, napi);
struct virtnet_info *vi = sq->vq->vdev->priv;
- struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+ unsigned int index = vq2txq(sq->vq);
+ struct netdev_queue *txq;
+ if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+ /* We don't need to enable cb for XDP */
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, true);
__netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1514,7 +1572,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, false);
if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq);
@@ -1557,7 +1615,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
@@ -2395,6 +2453,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
return -ENOMEM;
}
+ old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
+ if (!prog && !old_prog)
+ return 0;
+
if (prog) {
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
if (IS_ERR(prog))
@@ -2402,36 +2464,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
/* Make sure NAPI is not using any XDP TX queues for RX. */
- if (netif_running(dev))
- for (i = 0; i < vi->max_queue_pairs; i++)
+ if (netif_running(dev)) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
+ }
+ }
+
+ if (!prog) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+ if (i == 0)
+ virtnet_restore_guest_offloads(vi);
+ }
+ synchronize_net();
+ }
- netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err)
goto err;
+ netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
vi->xdp_queue_pairs = xdp_qp;
- for (i = 0; i < vi->max_queue_pairs; i++) {
- old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
- rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
- if (i == 0) {
- if (!old_prog)
+ if (prog) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+ if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
- if (!prog)
- virtnet_restore_guest_offloads(vi);
}
+ }
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
- if (netif_running(dev))
+ if (netif_running(dev)) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
}
return 0;
err:
- for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ if (!prog) {
+ virtnet_clear_guest_offloads(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
+ }
+
+ if (netif_running(dev)) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
+ }
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
return err;
@@ -2613,16 +2701,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
- return false;
- else if (q < vi->curr_queue_pairs)
- return true;
- else
- return false;
-}
-
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -2631,10 +2709,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_raw_buffer_queue(vi, i))
+ if (!is_xdp_frame(buf))
dev_kfree_skb(buf);
else
- put_page(virt_to_head_page(buf));
+ xdp_return_frame(ptr_to_xdp(buf));
}
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e454dfc9ad8f..89984fcab01e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -535,8 +535,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
}
sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
- tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
- &tq->buf_info_pa, GFP_KERNEL);
+ tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &tq->buf_info_pa, GFP_KERNEL);
if (!tq->buf_info)
goto err;
@@ -1815,8 +1815,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
- GFP_KERNEL);
+ bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
+ GFP_KERNEL);
if (!bi)
goto err;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95909e262ba4..7c1430ed0244 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+
+ dev->min_mtu = 0;
+ dev->max_mtu = 0;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5209ee9aac47..a3c46d78d216 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -361,10 +361,11 @@ errout:
static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
const struct vxlan_rdst *rd,
+ struct netlink_ext_ack *extack,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
fdb_info->info.dev = vxlan->dev;
- fdb_info->info.extack = NULL;
+ fdb_info->info.extack = extack;
fdb_info->remote_ip = rd->remote_ip;
fdb_info->remote_port = rd->remote_port;
fdb_info->remote_vni = rd->remote_vni;
@@ -375,41 +376,50 @@ static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
}
-static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
- struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd,
- bool adding)
+static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
+ struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd,
+ bool adding,
+ struct netlink_ext_ack *extack)
{
struct switchdev_notifier_vxlan_fdb_info info;
enum switchdev_notifier_type notifier_type;
+ int ret;
if (WARN_ON(!rd))
- return;
+ return 0;
notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
: SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
- vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, &info);
- call_switchdev_notifiers(notifier_type, vxlan->dev,
- &info.info);
+ vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info);
+ ret = call_switchdev_notifiers(notifier_type, vxlan->dev,
+ &info.info, extack);
+ return notifier_to_errno(ret);
}
-static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd, int type, bool swdev_notify)
+static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd, int type, bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (swdev_notify) {
switch (type) {
case RTM_NEWNEIGH:
- vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
- true);
+ err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+ true, extack);
+ if (err)
+ return err;
break;
case RTM_DELNEIGH:
vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
- false);
+ false, extack);
break;
}
}
__vxlan_fdb_notify(vxlan, fdb, rd, type);
+ return 0;
}
static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
@@ -423,7 +433,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
.remote_vni = cpu_to_be32(VXLAN_N_VID),
};
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -435,7 +445,7 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
memcpy(f.eth_addr, eth_addr, ETH_ALEN);
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
/* Hash Ethernet address */
@@ -545,7 +555,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
}
rdst = first_remote_rcu(f);
- vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, fdb_info);
+ vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info);
out:
rcu_read_unlock();
@@ -556,19 +566,21 @@ EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
static int vxlan_fdb_notify_one(struct notifier_block *nb,
const struct vxlan_dev *vxlan,
const struct vxlan_fdb *f,
- const struct vxlan_rdst *rdst)
+ const struct vxlan_rdst *rdst,
+ struct netlink_ext_ack *extack)
{
struct switchdev_notifier_vxlan_fdb_info fdb_info;
int rc;
- vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, &fdb_info);
+ vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info);
rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
&fdb_info);
return notifier_to_errno(rc);
}
int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
- struct notifier_block *nb)
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
@@ -586,7 +598,8 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
if (f->vni == vni) {
list_for_each_entry(rdst, &f->remotes, list) {
rc = vxlan_fdb_notify_one(nb, vxlan,
- f, rdst);
+ f, rdst,
+ extack);
if (rc)
goto out;
}
@@ -625,7 +638,7 @@ EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
/* Replace destination of unicast mac */
static int vxlan_fdb_replace(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __be32 vni,
- __u32 ifindex)
+ __u32 ifindex, struct vxlan_rdst *oldrd)
{
struct vxlan_rdst *rd;
@@ -637,6 +650,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
if (!rd)
return 0;
+ *oldrd = *rd;
dst_cache_reset(&rd->dst_cache);
rd->remote_ip = *ip;
rd->remote_port = port;
@@ -826,92 +840,6 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return 0;
}
-/* Add new entry to forwarding table -- assumes lock held */
-static int vxlan_fdb_update(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __u16 flags,
- __be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u16 ndm_flags,
- bool swdev_notify)
-{
- __u16 fdb_flags = (ndm_flags & ~NTF_USE);
- struct vxlan_rdst *rd = NULL;
- struct vxlan_fdb *f;
- int notify = 0;
- int rc;
-
- f = __vxlan_find_mac(vxlan, mac, src_vni);
- if (f) {
- if (flags & NLM_F_EXCL) {
- netdev_dbg(vxlan->dev,
- "lost race to create %pM\n", mac);
- return -EEXIST;
- }
-
- /* Do not allow an externally learned entry to take over an
- * entry added by the user.
- */
- if (!(fdb_flags & NTF_EXT_LEARNED) ||
- !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
- if (f->state != state) {
- f->state = state;
- f->updated = jiffies;
- notify = 1;
- }
- if (f->flags != fdb_flags) {
- f->flags = fdb_flags;
- f->updated = jiffies;
- notify = 1;
- }
- }
-
- if ((flags & NLM_F_REPLACE)) {
- /* Only change unicasts */
- if (!(is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
- notify |= vxlan_fdb_replace(f, ip, port, vni,
- ifindex);
- } else
- return -EOPNOTSUPP;
- }
- if ((flags & NLM_F_APPEND) &&
- (is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
- rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
-
- if (rc < 0)
- return rc;
- notify |= rc;
- }
-
- if (ndm_flags & NTF_USE)
- f->used = jiffies;
- } else {
- if (!(flags & NLM_F_CREATE))
- return -ENOENT;
-
- /* Disallow replace to add a multicast entry */
- if ((flags & NLM_F_REPLACE) &&
- (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
- return -EOPNOTSUPP;
-
- netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
- vni, ifindex, fdb_flags, &f);
- if (rc < 0)
- return rc;
- notify = 1;
- }
-
- if (notify) {
- if (rd == NULL)
- rd = first_remote_rtnl(f);
- vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify);
- }
-
- return 0;
-}
-
static void vxlan_fdb_free(struct rcu_head *head)
{
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
@@ -929,14 +857,13 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
{
struct vxlan_rdst *rd;
- netdev_dbg(vxlan->dev,
- "delete %pM\n", f->eth_addr);
+ netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
if (do_notify)
list_for_each_entry(rd, &f->remotes, list)
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
- swdev_notify);
+ swdev_notify, NULL);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@@ -950,11 +877,157 @@ static void vxlan_dst_free(struct rcu_head *head)
kfree(rd);
}
+static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
+ union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags,
+ struct vxlan_fdb *f,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
+{
+ __u16 fdb_flags = (ndm_flags & ~NTF_USE);
+ struct vxlan_rdst *rd = NULL;
+ struct vxlan_rdst oldrd;
+ int notify = 0;
+ int rc = 0;
+ int err;
+
+ /* Do not allow an externally learned entry to take over an entry added
+ * by the user.
+ */
+ if (!(fdb_flags & NTF_EXT_LEARNED) ||
+ !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
+ if (f->state != state) {
+ f->state = state;
+ f->updated = jiffies;
+ notify = 1;
+ }
+ if (f->flags != fdb_flags) {
+ f->flags = fdb_flags;
+ f->updated = jiffies;
+ notify = 1;
+ }
+ }
+
+ if ((flags & NLM_F_REPLACE)) {
+ /* Only change unicasts */
+ if (!(is_multicast_ether_addr(f->eth_addr) ||
+ is_zero_ether_addr(f->eth_addr))) {
+ rc = vxlan_fdb_replace(f, ip, port, vni,
+ ifindex, &oldrd);
+ notify |= rc;
+ } else {
+ return -EOPNOTSUPP;
+ }
+ }
+ if ((flags & NLM_F_APPEND) &&
+ (is_multicast_ether_addr(f->eth_addr) ||
+ is_zero_ether_addr(f->eth_addr))) {
+ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+
+ if (rc < 0)
+ return rc;
+ notify |= rc;
+ }
+
+ if (ndm_flags & NTF_USE)
+ f->used = jiffies;
+
+ if (notify) {
+ if (rd == NULL)
+ rd = first_remote_rtnl(f);
+
+ err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH,
+ swdev_notify, extack);
+ if (err)
+ goto err_notify;
+ }
+
+ return 0;
+
+err_notify:
+ if ((flags & NLM_F_REPLACE) && rc)
+ *rd = oldrd;
+ else if ((flags & NLM_F_APPEND) && rc) {
+ list_del_rcu(&rd->list);
+ call_rcu(&rd->rcu, vxlan_dst_free);
+ }
+ return err;
+}
+
+static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
+{
+ __u16 fdb_flags = (ndm_flags & ~NTF_USE);
+ struct vxlan_fdb *f;
+ int rc;
+
+ /* Disallow replace to add a multicast entry */
+ if ((flags & NLM_F_REPLACE) &&
+ (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
+ return -EOPNOTSUPP;
+
+ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+ rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+ vni, ifindex, fdb_flags, &f);
+ if (rc < 0)
+ return rc;
+
+ rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
+ swdev_notify, extack);
+ if (rc)
+ goto err_notify;
+
+ return 0;
+
+err_notify:
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ return rc;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_fdb *f;
+
+ f = __vxlan_find_mac(vxlan, mac, src_vni);
+ if (f) {
+ if (flags & NLM_F_EXCL) {
+ netdev_dbg(vxlan->dev,
+ "lost race to create %pM\n", mac);
+ return -EEXIST;
+ }
+
+ return vxlan_fdb_update_existing(vxlan, ip, state, flags, port,
+ vni, ifindex, ndm_flags, f,
+ swdev_notify, extack);
+ } else {
+ if (!(flags & NLM_F_CREATE))
+ return -ENOENT;
+
+ return vxlan_fdb_update_create(vxlan, mac, ip, state, flags,
+ port, src_vni, vni, ifindex,
+ ndm_flags, swdev_notify, extack);
+ }
+}
+
static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
struct vxlan_rdst *rd, bool swdev_notify)
{
list_del_rcu(&rd->list);
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL);
call_rcu(&rd->rcu, vxlan_dst_free);
}
@@ -1025,7 +1098,8 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
/* Add static entry (via netlink) */
static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
@@ -1055,7 +1129,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
- true);
+ true, extack);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -1223,7 +1297,7 @@ static bool vxlan_snoop(struct net_device *dev,
rdst->remote_ip = *src_ip;
f->updated = jiffies;
- vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true);
+ vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
@@ -1236,7 +1310,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
- ifindex, NTF_SELF, true);
+ ifindex, NTF_SELF, true, NULL);
spin_unlock(&vxlan->hash_lock);
}
@@ -2219,7 +2293,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
- struct net_device *dev = skb->dev;
+ struct net_device *dev;
int len = skb->len;
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
@@ -2239,9 +2313,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
#endif
}
+ rcu_read_lock();
+ dev = skb->dev;
+ if (unlikely(!(dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ goto drop;
+ }
+
if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
- vni);
+ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -2254,8 +2334,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
rx_stats->rx_bytes += len;
u64_stats_update_end(&rx_stats->syncp);
} else {
+drop:
dev->stats.rx_dropped++;
}
+ rcu_read_unlock();
}
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
@@ -2841,6 +2923,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_fdb_dump = vxlan_fdb_dump,
.ndo_fdb_get = vxlan_fdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
+ .ndo_change_proto_down = dev_change_proto_down_generic,
};
static const struct net_device_ops vxlan_netdev_raw_ops = {
@@ -3478,9 +3561,12 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
goto errout;
/* notify default fdb entry */
- if (f)
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
- true);
+ if (f) {
+ err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
+ RTM_NEWNEIGH, true, extack);
+ if (err)
+ goto errout;
+ }
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
@@ -3497,11 +3583,40 @@ errout:
return err;
}
+/* Set/clear flags based on attribute */
+static int vxlan_nl2flag(struct vxlan_config *conf, struct nlattr *tb[],
+ int attrtype, unsigned long mask, bool changelink,
+ bool changelink_supported,
+ struct netlink_ext_ack *extack)
+{
+ unsigned long flags;
+
+ if (!tb[attrtype])
+ return 0;
+
+ if (changelink && !changelink_supported) {
+ vxlan_flag_attr_error(attrtype, extack);
+ return -EOPNOTSUPP;
+ }
+
+ if (vxlan_policy[attrtype].type == NLA_FLAG)
+ flags = conf->flags | mask;
+ else if (nla_get_u8(tb[attrtype]))
+ flags = conf->flags | mask;
+ else
+ flags = conf->flags & ~mask;
+
+ conf->flags = flags;
+
+ return 0;
+}
+
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
- bool changelink)
+ bool changelink, struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err = 0;
memset(conf, 0, sizeof(*conf));
@@ -3512,40 +3627,54 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_VXLAN_ID]) {
__be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
- if (changelink && (vni != conf->vni))
+ if (changelink && (vni != conf->vni)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI");
return -EOPNOTSUPP;
+ }
conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
}
if (data[IFLA_VXLAN_GROUP]) {
- if (changelink && (conf->remote_ip.sa.sa_family != AF_INET))
+ if (changelink && (conf->remote_ip.sa.sa_family != AF_INET)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP], "New group address family does not match old group");
return -EOPNOTSUPP;
+ }
conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
conf->remote_ip.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_GROUP6]) {
- if (!IS_ENABLED(CONFIG_IPV6))
+ if (!IS_ENABLED(CONFIG_IPV6)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
+ }
- if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6))
+ if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "New group address family does not match old group");
return -EOPNOTSUPP;
+ }
conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
conf->remote_ip.sa.sa_family = AF_INET6;
}
if (data[IFLA_VXLAN_LOCAL]) {
- if (changelink && (conf->saddr.sa.sa_family != AF_INET))
+ if (changelink && (conf->saddr.sa.sa_family != AF_INET)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL], "New local address family does not match old");
return -EOPNOTSUPP;
+ }
conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
conf->saddr.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_LOCAL6]) {
- if (!IS_ENABLED(CONFIG_IPV6))
+ if (!IS_ENABLED(CONFIG_IPV6)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
+ }
- if (changelink && (conf->saddr.sa.sa_family != AF_INET6))
+ if (changelink && (conf->saddr.sa.sa_family != AF_INET6)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "New local address family does not match old");
return -EOPNOTSUPP;
+ }
/* TODO: respect scope id */
conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
@@ -3562,9 +3691,12 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
if (data[IFLA_VXLAN_TTL_INHERIT]) {
- if (changelink)
- return -EOPNOTSUPP;
- conf->flags |= VXLAN_F_TTL_INHERIT;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_TTL_INHERIT,
+ VXLAN_F_TTL_INHERIT, changelink, false,
+ extack);
+ if (err)
+ return err;
+
}
if (data[IFLA_VXLAN_LABEL])
@@ -3572,10 +3704,11 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
IPV6_FLOWLABEL_MASK;
if (data[IFLA_VXLAN_LEARNING]) {
- if (nla_get_u8(data[IFLA_VXLAN_LEARNING]))
- conf->flags |= VXLAN_F_LEARN;
- else
- conf->flags &= ~VXLAN_F_LEARN;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LEARNING,
+ VXLAN_F_LEARN, changelink, true,
+ extack);
+ if (err)
+ return err;
} else if (!changelink) {
/* default to learn on a new device */
conf->flags |= VXLAN_F_LEARN;
@@ -3585,44 +3718,52 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
if (data[IFLA_VXLAN_PROXY]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_PROXY]))
- conf->flags |= VXLAN_F_PROXY;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_PROXY,
+ VXLAN_F_PROXY, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_RSC]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_RSC]))
- conf->flags |= VXLAN_F_RSC;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_RSC,
+ VXLAN_F_RSC, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_L2MISS]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_L2MISS]))
- conf->flags |= VXLAN_F_L2MISS;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L2MISS,
+ VXLAN_F_L2MISS, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_L3MISS]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_L3MISS]))
- conf->flags |= VXLAN_F_L3MISS;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L3MISS,
+ VXLAN_F_L3MISS, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_LIMIT]) {
- if (changelink)
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LIMIT],
+ "Cannot change limit");
return -EOPNOTSUPP;
+ }
conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
}
if (data[IFLA_VXLAN_COLLECT_METADATA]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
- conf->flags |= VXLAN_F_COLLECT_METADATA;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_COLLECT_METADATA,
+ VXLAN_F_COLLECT_METADATA, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_PORT_RANGE]) {
@@ -3632,72 +3773,92 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->port_min = ntohs(p->low);
conf->port_max = ntohs(p->high);
} else {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
+ "Cannot change port range");
return -EOPNOTSUPP;
}
}
if (data[IFLA_VXLAN_PORT]) {
- if (changelink)
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT],
+ "Cannot change port");
return -EOPNOTSUPP;
+ }
conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
}
if (data[IFLA_VXLAN_UDP_CSUM]) {
- if (changelink)
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_UDP_CSUM],
+ "Cannot change UDP_CSUM flag");
return -EOPNOTSUPP;
+ }
if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
}
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
- conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
+ VXLAN_F_UDP_ZERO_CSUM6_TX, changelink,
+ false, extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
- conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+ VXLAN_F_UDP_ZERO_CSUM6_RX, changelink,
+ false, extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_REMCSUM_TX]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
- conf->flags |= VXLAN_F_REMCSUM_TX;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_TX,
+ VXLAN_F_REMCSUM_TX, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_REMCSUM_RX]) {
- if (changelink)
- return -EOPNOTSUPP;
- if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
- conf->flags |= VXLAN_F_REMCSUM_RX;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_RX,
+ VXLAN_F_REMCSUM_RX, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_GBP]) {
- if (changelink)
- return -EOPNOTSUPP;
- conf->flags |= VXLAN_F_GBP;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GBP,
+ VXLAN_F_GBP, changelink, false, extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_GPE]) {
- if (changelink)
- return -EOPNOTSUPP;
- conf->flags |= VXLAN_F_GPE;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GPE,
+ VXLAN_F_GPE, changelink, false,
+ extack);
+ if (err)
+ return err;
}
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
- if (changelink)
- return -EOPNOTSUPP;
- conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_NOPARTIAL,
+ VXLAN_F_REMCSUM_NOPARTIAL, changelink,
+ false, extack);
+ if (err)
+ return err;
}
if (tb[IFLA_MTU]) {
- if (changelink)
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
+ "Cannot change mtu");
return -EOPNOTSUPP;
+ }
conf->mtu = nla_get_u32(tb[IFLA_MTU]);
}
@@ -3714,7 +3875,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct vxlan_config conf;
int err;
- err = vxlan_nl2conf(tb, data, dev, &conf, false);
+ err = vxlan_nl2conf(tb, data, dev, &conf, false, extack);
if (err)
return err;
@@ -3727,56 +3888,51 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
- unsigned long old_age_interval;
- struct vxlan_rdst old_dst;
+ struct net_device *lowerdev;
struct vxlan_config conf;
int err;
- err = vxlan_nl2conf(tb, data,
- dev, &conf, true);
+ err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
if (err)
return err;
- old_age_interval = vxlan->cfg.age_interval;
- memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
-
- err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
+ err = vxlan_config_validate(vxlan->net, &conf, &lowerdev,
+ vxlan, extack);
if (err)
return err;
- if (old_age_interval != vxlan->cfg.age_interval)
- mod_timer(&vxlan->age_timer, jiffies);
-
/* handle default dst entry */
- if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
+ if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
spin_lock_bh(&vxlan->hash_lock);
- if (!vxlan_addr_any(&old_dst.remote_ip))
- __vxlan_fdb_delete(vxlan, all_zeros_mac,
- old_dst.remote_ip,
- vxlan->cfg.dst_port,
- old_dst.remote_vni,
- old_dst.remote_vni,
- old_dst.remote_ifindex,
- true);
-
- if (!vxlan_addr_any(&dst->remote_ip)) {
+ if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
- &dst->remote_ip,
+ &conf.remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
NLM_F_APPEND | NLM_F_CREATE,
vxlan->cfg.dst_port,
- dst->remote_vni,
- dst->remote_vni,
- dst->remote_ifindex,
- NTF_SELF, true);
+ conf.vni, conf.vni,
+ conf.remote_ifindex,
+ NTF_SELF, true, extack);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
}
+ if (!vxlan_addr_any(&dst->remote_ip))
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ dst->remote_ip,
+ vxlan->cfg.dst_port,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
+ true);
spin_unlock_bh(&vxlan->hash_lock);
}
+ if (conf.age_interval != vxlan->cfg.age_interval)
+ mod_timer(&vxlan->age_timer, jiffies);
+
+ vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
return 0;
}
@@ -4051,8 +4207,11 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
struct switchdev_notifier_vxlan_fdb_info *fdb_info)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
int err;
+ extack = switchdev_notifier_info_to_extack(&fdb_info->info);
+
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
@@ -4062,7 +4221,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_vni,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
- false);
+ false, extack);
spin_unlock_bh(&vxlan->hash_lock);
return err;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f6b000ddcd15..55f76e422fa0 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -769,7 +769,7 @@ static int cosa_net_tx_done(struct channel_data *chan, int size)
chan->netdev->stats.tx_aborted_errors++;
return 1;
}
- dev_kfree_skb_irq(chan->tx_skb);
+ dev_consume_skb_irq(chan->tx_skb);
chan->tx_skb = NULL;
chan->netdev->stats.tx_packets++;
chan->netdev->stats.tx_bytes += size;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c0b0f525c87c..fa78d2b14136 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -456,16 +456,16 @@ static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
int ret = 0;
if (debug > 1) {
- if (SOURCE_ID(state) != dpriv->dev_id) {
- printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
- dev->name, msg, SOURCE_ID(state), state );
+ if (SOURCE_ID(state) != dpriv->dev_id) {
+ printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
+ dev->name, msg, SOURCE_ID(state), state);
ret = -1;
- }
- if (state & 0x0df80c00) {
- printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
- dev->name, msg, state);
+ }
+ if (state & 0x0df80c00) {
+ printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
+ dev->name, msg, state);
ret = -1;
- }
+ }
}
return ret;
}
@@ -1575,7 +1575,7 @@ try:
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
}
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
dpriv->tx_skbuff[cur] = NULL;
++dpriv->tx_dirty;
} else {
@@ -1760,25 +1760,25 @@ try:
} else { /* SccEvt */
if (debug > 1) {
//FIXME: verifier la presence de tous les evenements
- static struct {
- u32 mask;
- const char *irq_name;
- } evts[] = {
- { 0x00008000, "TIN"},
- { 0x00000020, "RSC"},
- { 0x00000010, "PCE"},
- { 0x00000008, "PLLA"},
- { 0, NULL}
- }, *evt;
-
- for (evt = evts; evt->irq_name; evt++) {
- if (state & evt->mask) {
+ static struct {
+ u32 mask;
+ const char *irq_name;
+ } evts[] = {
+ { 0x00008000, "TIN"},
+ { 0x00000020, "RSC"},
+ { 0x00000010, "PCE"},
+ { 0x00000008, "PLLA"},
+ { 0, NULL}
+ }, *evt;
+
+ for (evt = evts; evt->irq_name; evt++) {
+ if (state & evt->mask) {
printk(KERN_DEBUG "%s: %s\n",
- dev->name, evt->irq_name);
- if (!(state &= ~evt->mask))
- goto try;
+ dev->name, evt->irq_name);
+ if (!(state &= ~evt->mask))
+ goto try;
+ }
}
- }
} else {
if (!(state &= ~0x0000c03c))
goto try;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 839fa7715709..a08f04c3f644 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -279,10 +279,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
/* Get BD buffer */
- bd_buffer = dma_zalloc_coherent(priv->dev,
- (RX_BD_RING_LEN + TX_BD_RING_LEN) *
- MAX_RX_BUF_LENGTH,
- &bd_dma_addr, GFP_KERNEL);
+ bd_buffer = dma_alloc_coherent(priv->dev,
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
+ &bd_dma_addr, GFP_KERNEL);
if (!bd_buffer) {
dev_err(priv->dev, "Could not allocate buffer descriptors\n");
@@ -483,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
memset(priv->tx_buffer +
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
0, skb->len);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
priv->skb_dirtytx =
@@ -1057,6 +1056,54 @@ static const struct net_device_ops uhdlc_ops = {
.ndo_tx_timeout = uhdlc_tx_timeout,
};
+static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+ struct resource *res;
+ static int siram_init_flag;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, name);
+ if (!np)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ pr_err("%pOFn: failed to lookup pdev\n", np);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ of_node_put(np);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ goto error_put_device;
+ }
+ *ptr = ioremap(res->start, resource_size(res));
+ if (!*ptr) {
+ ret = -ENOMEM;
+ goto error_put_device;
+ }
+
+ /* We've remapped the addresses, and we don't need the device any
+ * more, so we should release it.
+ */
+ put_device(&pdev->dev);
+
+ if (init_flag && siram_init_flag == 0) {
+ memset_io(*ptr, 0, resource_size(res));
+ siram_init_flag = 1;
+ }
+ return 0;
+
+error_put_device:
+ put_device(&pdev->dev);
+
+ return ret;
+}
+
static int ucc_hdlc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1151,6 +1198,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
ret = ucc_of_parse_tdm(np, utdm, ut_info);
if (ret)
goto free_utdm;
+
+ ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
+ (void __iomem **)&utdm->si_regs);
+ if (ret)
+ goto free_utdm;
+ ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
+ (void __iomem **)&utdm->siram);
+ if (ret)
+ goto unmap_si_regs;
}
if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
@@ -1159,7 +1215,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
ret = uhdlc_init(uhdlc_priv);
if (ret) {
dev_err(&pdev->dev, "Failed to init uhdlc\n");
- goto free_utdm;
+ goto undo_uhdlc_init;
}
dev = alloc_hdlcdev(uhdlc_priv);
@@ -1188,6 +1244,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
free_dev:
free_netdev(dev);
undo_uhdlc_init:
+ iounmap(utdm->siram);
+unmap_si_regs:
+ iounmap(utdm->si_regs);
free_utdm:
if (uhdlc_priv->tsa)
kfree(utdm);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 6a505c26a3e7..5c60dc60a8e6 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -246,7 +246,7 @@
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
-#define free_buffer_irq dev_kfree_skb_irq
+#define free_buffer_irq dev_consume_skb_irq
#else
typedef void buffer_t;
#define free_buffer kfree
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
index 609710d64eb5..247f60c401ef 100644
--- a/drivers/net/wan/lmc/Makefile
+++ b/drivers/net/wan/lmc/Makefile
@@ -14,4 +14,4 @@ lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
# -DDEBUG \
# -DLMC_PACKET_LOG
-ccflags-y := -I. $(DBGDEF)
+ccflags-y := $(DBGDEF)
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4907453f17f5..22b065ff6d39 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1320,8 +1320,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
sc->lmc_device->stats.tx_packets++;
}
- // dev_kfree_skb(sc->lmc_txq[i]);
- dev_kfree_skb_irq(sc->lmc_txq[i]);
+ dev_consume_skb_irq(sc->lmc_txq[i]);
sc->lmc_txq[i] = NULL;
badtx++;
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 8e8c4c0e1b64..40c04ea1200a 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -761,7 +761,7 @@ send_complete( struct net_device *dev )
dev->stats.tx_packets++;
dev->stats.tx_bytes += nl->tx_buf_p->len;
#endif
- dev_kfree_skb_irq( nl->tx_buf_p );
+ dev_consume_skb_irq(nl->tx_buf_p);
nl->tx_buf_p = NULL;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index d573a57bc301..10d5333b7a88 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -185,7 +185,7 @@ static inline void wanxl_tx_intr(struct port *port)
desc->stat = PACKET_EMPTY; /* Free descriptor */
pci_unmap_single(port->card->pdev, desc->address, skb->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
}
}
@@ -565,7 +565,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
u32 plx_phy; /* PLX PCI base address */
u32 mem_phy; /* memory PCI base addr */
u8 __iomem *mem; /* memory virtual base addr */
- int i, ports, alloc_size;
+ int i, ports;
#ifndef MODULE
pr_info_once("%s\n", version);
@@ -601,8 +601,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
default: ports = 4;
}
- alloc_size = sizeof(struct card) + ports * sizeof(struct port);
- card = kzalloc(alloc_size, GFP_KERNEL);
+ card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
if (card == NULL) {
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index deea41e96f01..0e56ab0aed9a 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1536,7 +1536,7 @@ static void z8530_tx_done(struct z8530_channel *c)
z8530_tx_begin(c);
c->netdevice->stats.tx_packets++;
c->netdevice->stats.tx_bytes += skb->len;
- dev_kfree_skb_irq(skb);
+ dev_consume_skb_irq(skb);
}
/**
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 0b602951ff6b..d28b96d06919 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1260,8 +1260,8 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
goto error_msg_hdr_check;
result = -EIO;
num_pls = le16_to_cpu(msg_hdr->num_pls);
- pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
- num_pls * sizeof(msg_hdr->pld[0]);
+ /* Check payload descriptor(s) */
+ pl_itr = struct_size(msg_hdr, pld, num_pls);
pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
if (pl_itr > skb_len) { /* got all the payload descriptors? */
dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index f8eb66ef2944..73842a830645 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -210,6 +210,7 @@ retry:
msleep(10); /* give the device some time */
goto retry;
}
+ /* fall through */
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 66326b949ab1..142c777b287f 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_ATH10K) += ath10k_core.o
ath10k_core-y += mac.o \
debug.o \
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 4cd69aca75e2..0bf726c55736 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/of.h>
@@ -661,7 +650,8 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
ath10k_pci_flush(ar);
}
-static int ath10k_ahb_hif_power_up(struct ath10k *ar)
+static int ath10k_ahb_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
{
int ret;
diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h
index d43e375215c8..cee11a3ae2a5 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.h
+++ b/drivers/net/wireless/ath/ath10k/ahb.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _AHB_H_
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 1750b182209b..95dc4be82e5c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "bmi.h"
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 725c9afc63f2..ef3bdba43bed 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BMI_H_
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f6d3ecbdd3a3..24b983edb357 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hif.h"
@@ -228,11 +217,31 @@ ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
}
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
- u32 ce_ctrl_addr,
- unsigned int addr)
+ u32 ce_id,
+ u64 addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
+ ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+
ath10k_ce_write32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->sr_base_addr, addr);
+ ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
}
static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
@@ -313,11 +322,36 @@ static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
}
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
- u32 ce_ctrl_addr,
- u32 addr)
+ u32 ce_id,
+ u64 addr)
{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
ath10k_ce_write32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->dr_base_addr, addr);
+ ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
+ ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
+{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+ u32 reg_value;
+
+ reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi);
+ reg_value &= ~CE_DESC_ADDR_HI_MASK;
+ reg_value |= addr_hi;
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi, reg_value);
}
static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
@@ -500,14 +534,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
/* WORKAROUND */
- if (!(flags & CE_SEND_FLAG_GATHER)) {
- if (ar->hw_params.shadow_reg_support)
- ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
- write_index);
- else
- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
- write_index);
- }
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
src_ring->write_index = write_index;
exit:
@@ -563,7 +591,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
addr = (__le32 *)&sdesc.addr;
- flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+ flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
addr[0] = __cpu_to_le32(buffer);
addr[1] = __cpu_to_le32(flags);
if (flags & CE_SEND_FLAG_GATHER)
@@ -581,8 +609,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
/* Update Source Ring Write Index */
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
- if (!(flags & CE_SEND_FLAG_GATHER))
- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+ if (!(flags & CE_SEND_FLAG_GATHER)) {
+ if (ar->hw_params.shadow_reg_support)
+ ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
+ write_index);
+ else
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ }
src_ring->write_index = write_index;
exit:
@@ -731,7 +765,7 @@ static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
return -ENOSPC;
desc->addr = __cpu_to_le64(paddr);
- desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+ desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
desc->nbytes = 0;
@@ -1032,8 +1066,8 @@ EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp)
+static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
@@ -1084,6 +1118,66 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
+
+static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int read_index;
+ struct ce_desc_64 *desc;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ if (read_index == 0xffffffff)
+ return -ENODEV;
+
+ read_index &= nentries_mask;
+ src_ring->hw_index = read_index;
+ }
+
+ if (ar->hw_params.rri_on_ddr)
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ else
+ read_index = src_ring->hw_index;
+
+ if (read_index == sw_index)
+ return -EIO;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ sw_index);
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ return ce_state->ops->ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp);
+}
EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
static void ath10k_ce_extract_desc_data(struct ath10k *ar,
@@ -1346,7 +1440,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
@@ -1385,7 +1479,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
@@ -1404,12 +1498,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
u32 nentries)
{
src_ring->shadow_base_unaligned = kcalloc(nentries,
- sizeof(struct ce_desc),
+ sizeof(struct ce_desc_64),
GFP_KERNEL);
if (!src_ring->shadow_base_unaligned)
return -ENOMEM;
- src_ring->shadow_base = (struct ce_desc *)
+ src_ring->shadow_base = (struct ce_desc_64 *)
PTR_ALIGN(src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
return 0;
@@ -1461,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
+ (nentries * sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
@@ -1553,10 +1647,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- dma_zalloc_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr, GFP_KERNEL);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
return ERR_PTR(-ENOMEM);
@@ -1661,7 +1755,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
@@ -1671,7 +1765,7 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
}
@@ -1803,6 +1897,9 @@ static const struct ath10k_ce_ops ce_ops = {
.ce_extract_desc_data = ath10k_ce_extract_desc_data,
.ce_free_pipe = _ath10k_ce_free_pipe,
.ce_send_nolock = _ath10k_ce_send_nolock,
+ .ce_set_src_ring_base_addr_hi = NULL,
+ .ce_set_dest_ring_base_addr_hi = NULL,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
};
static const struct ath10k_ce_ops ce_64_ops = {
@@ -1815,6 +1912,9 @@ static const struct ath10k_ce_ops ce_64_ops = {
.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
.ce_free_pipe = _ath10k_ce_free_pipe_64,
.ce_send_nolock = _ath10k_ce_send_nolock_64,
+ .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
+ .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
};
static void ath10k_ce_set_ops(struct ath10k *ar,
@@ -1910,7 +2010,7 @@ void ath10k_ce_alloc_rri(struct ath10k *ar)
lower_32_bits(ce->paddr_rri));
ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
(upper_32_bits(ce->paddr_rri) &
- CE_DESC_FLAGS_GET_MASK));
+ CE_DESC_ADDR_HI_MASK));
for (i = 0; i < CE_COUNT; i++) {
ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index ead9987c3259..a7478c240f78 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CE_H_
@@ -39,8 +28,8 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
-#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0)
-#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0)
+#define CE_DESC_ADDR_MASK GENMASK_ULL(34, 0)
+#define CE_DESC_ADDR_HI_MASK GENMASK(4, 0)
/* Following desc flags are used in QCA99X0 */
#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
@@ -104,7 +93,7 @@ struct ath10k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/*
* Actual start of descriptors.
@@ -115,10 +104,10 @@ struct ath10k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
char *shadow_base_unaligned;
- struct ce_desc *shadow_base;
+ struct ce_desc_64 *shadow_base;
/* keep last */
void *per_transfer_context[0];
@@ -334,6 +323,14 @@ struct ath10k_ce_ops {
void *per_transfer_context,
dma_addr_t buffer, u32 nbytes,
u32 transfer_id, u32 flags);
+ void (*ce_set_src_ring_base_addr_hi)(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr);
+ void (*ce_set_dest_ring_base_addr_hi)(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr);
+ int (*ce_completed_send_next_nolock)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp);
};
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 399b501f3c3c..835b8de92d55 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
@@ -548,7 +537,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
.id = WCN3990_HW_1_0_DEV_VERSION,
.dev_id = 0,
- .bus = ATH10K_BUS_PCI,
+ .bus = ATH10K_BUS_SNOC,
.name = "wcn3990 hw1.0",
.continuous_frag_desc = true,
.tx_chain_mask = 0x7,
@@ -560,10 +549,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
- .num_peers = TARGET_HL_10_TLV_NUM_PEERS,
- .n_cipher_suites = 8,
- .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
- .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+ .num_peers = TARGET_HL_TLV_NUM_PEERS,
+ .n_cipher_suites = 11,
+ .ast_skid_limit = TARGET_HL_TLV_AST_SKID_LIMIT,
+ .num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
.per_ce_irq = true,
@@ -648,11 +637,24 @@ static void ath10k_init_sdio(struct ath10k *ar)
ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
ath10k_bmi_read32(ar, hi_acs_flags, &param);
- param |= (HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET |
- HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET |
- HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE);
+ /* Data transfer is not initiated, when reduced Tx completion
+ * is used for SDIO. disable it until fixed
+ */
+ param &= ~HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
+ /* Alternate credit size of 1544 as used by SDIO firmware is
+ * not big enough for mac80211 / native wifi frames. disable it
+ */
+ param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+ param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
ath10k_bmi_write32(ar, hi_acs_flags, param);
+
+ /* Explicitly set fwlog prints to zero as target may turn it on
+ * based on scratch registers.
+ */
+ ath10k_bmi_read32(ar, hi_option_flag, &param);
+ param |= HI_OPTION_DISABLE_DBGLOG;
+ ath10k_bmi_write32(ar, hi_option_flag, param);
}
static int ath10k_init_configure_target(struct ath10k *ar)
@@ -2309,10 +2311,14 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
- ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ if (ar->hif.bus == ATH10K_BUS_SDIO)
+ ar->htt.max_num_pending_tx =
+ TARGET_TLV_NUM_MSDU_DESC_HL;
+ else
+ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
- ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
- WMI_STAT_PEER;
+ ar->fw_stats_req_mask = WMI_TLV_STAT_PDEV | WMI_TLV_STAT_VDEV |
+ WMI_TLV_STAT_PEER | WMI_TLV_STAT_PEER_EXTD;
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;
@@ -2556,6 +2562,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_hif_swap_mailbox(ar);
+ if (status) {
+ ath10k_err(ar, "failed to swap mailbox: %d\n", status);
+ goto err_hif_stop;
+ }
+
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_htt_connect(&ar->htt);
if (status) {
@@ -2621,6 +2633,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->wmi.svc_map))
val |= WMI_10_4_TX_DATA_ACK_RSSI;
+ if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
+ val |= WMI_10_4_REPORT_AIRTIME;
+
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
@@ -2649,6 +2664,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_wmi_pdev_set_base_macaddr(ar, ar->mac_addr);
+ if (status && status != -EOPNOTSUPP) {
+ ath10k_err(ar,
+ "failed to set base mac address: %d\n", status);
+ goto err_hif_stop;
+ }
+
/* Some firmware revisions do not properly set up hardware rx filter
* registers.
*
@@ -2763,7 +2785,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
struct bmi_target_info target_info;
int ret = 0;
- ret = ath10k_hif_power_up(ar);
+ ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath10k_err(ar, "could not power on hif bus (%d)\n", ret);
return ret;
@@ -2977,8 +2999,8 @@ err:
int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params)
{
- ar->chip_id = bus_params->chip_id;
- ar->dev_type = bus_params->dev_type;
+ ar->bus_param = *bus_params;
+
queue_work(ar->workqueue, &ar->register_work);
return 0;
@@ -3098,9 +3120,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
- spin_lock_init(&ar->txqs_lock);
- INIT_LIST_HEAD(&ar->txqs);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_waitqueue_head(&ar->htt.empty_tx_wq);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 46e9c8c97a4d..e08a17b01e03 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CORE_H_
@@ -90,6 +79,9 @@
/* The magic used by QCA spec */
#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+/* Default Airtime weight multipler (Tuned for multiclient performance) */
+#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4
+
struct ath10k;
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -116,6 +108,7 @@ enum ath10k_skb_flags {
ATH10K_SKB_F_DELIVER_CAB = BIT(2),
ATH10K_SKB_F_MGMT = BIT(3),
ATH10K_SKB_F_QOS = BIT(4),
+ ATH10K_SKB_F_RAW_TX = BIT(5),
};
struct ath10k_skb_cb {
@@ -123,6 +116,7 @@ struct ath10k_skb_cb {
u8 flags;
u8 eid;
u16 msdu_id;
+ u16 airtime_est;
struct ieee80211_vif *vif;
struct ieee80211_txq *txq;
} __packed;
@@ -195,7 +189,7 @@ struct ath10k_fw_stats_peer {
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
- u32 rx_duration;
+ u64 rx_duration;
};
struct ath10k_fw_extd_stats_peer {
@@ -443,14 +437,14 @@ enum ath10k_amsdu_subfrm_num {
};
struct ath10k_sta_tid_stats {
- unsigned long int rx_pkt_from_fw;
- unsigned long int rx_pkt_unchained;
- unsigned long int rx_pkt_drop_chained;
- unsigned long int rx_pkt_drop_filter;
- unsigned long int rx_pkt_err[ATH10K_PKT_RX_ERR_MAX];
- unsigned long int rx_pkt_queued_for_mac;
- unsigned long int rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX];
- unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
+ unsigned long rx_pkt_from_fw;
+ unsigned long rx_pkt_unchained;
+ unsigned long rx_pkt_drop_chained;
+ unsigned long rx_pkt_drop_filter;
+ unsigned long rx_pkt_err[ATH10K_PKT_RX_ERR_MAX];
+ unsigned long rx_pkt_queued_for_mac;
+ unsigned long rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX];
+ unsigned long rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
};
enum ath10k_counter_type {
@@ -495,6 +489,7 @@ struct ath10k_sta {
u16 peer_id;
struct rate_info txrate;
struct ieee80211_tx_info tx_info;
+ u32 last_tx_bitrate;
struct work_struct update_wk;
u64 rx_duration;
@@ -571,6 +566,7 @@ struct ath10k_vif {
bool nohwcrypt;
int num_legacy_stations;
int txpower;
+ bool ftm_responder;
struct wmi_wmm_params_all_arg wmm_params;
struct work_struct ap_csa_work;
struct delayed_work connection_loss_work;
@@ -922,6 +918,7 @@ enum ath10k_dev_type {
struct ath10k_bus_params {
u32 chip_id;
enum ath10k_dev_type dev_type;
+ bool link_can_suspend;
};
struct ath10k {
@@ -1068,10 +1065,7 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
- /* protects: ar->txqs, artxq->list */
- spinlock_t txqs_lock;
- struct list_head txqs;
struct list_head arvifs;
struct list_head peers;
struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
@@ -1182,6 +1176,7 @@ struct ath10k {
u32 ampdu_reference;
+ const u8 *wmi_key_cipher;
void *ce_priv;
u32 sta_tid_stats_mask;
@@ -1190,6 +1185,7 @@ struct ath10k {
enum ath10k_radar_confirmation_state radar_conf_state;
struct ath10k_radar_found_info last_radar_info;
struct work_struct radar_confirmation_work;
+ struct ath10k_bus_params bus_param;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index eadae2f9206b..33838d9c1cb6 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "coredump.h"
@@ -1167,7 +1156,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
guid_copy(&dump_data->guid, &crash_data->guid);
- dump_data->chip_id = cpu_to_le32(ar->chip_id);
+ dump_data->chip_id = cpu_to_le32(ar->bus_param.chip_id);
dump_data->bus_type = cpu_to_le32(0);
dump_data->target_version = cpu_to_le32(ar->target_version);
dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 5dac653e1649..09de41922f97 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _COREDUMP_H_
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 02988fc378a1..32d967a31c65 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
@@ -58,7 +47,7 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x",
ar->hw_params.name,
ar->target_version,
- ar->chip_id,
+ ar->bus_param.chip_id,
ar->id.subsystem_vendor, ar->id.subsystem_device);
ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
@@ -625,7 +614,7 @@ static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
size_t len;
char buf[50];
- len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->bus_param.chip_id);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1263,6 +1252,9 @@ static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
return -EINVAL;
+ if (ar->hw_params.cal_data_len == 0)
+ return -EOPNOTSUPP;
+
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 5cf16d690724..db78e855a80f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _DEBUG_H_
@@ -213,12 +202,12 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void ath10k_sta_update_rx_duration(struct ath10k *ar,
struct ath10k_fw_stats *stats);
void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
- unsigned long int num_msdus,
+ unsigned long num_msdus,
enum ath10k_pkt_rx_err err,
- unsigned long int unchain_cnt,
- unsigned long int drop_cnt,
- unsigned long int drop_cnt_filter,
- unsigned long int queued_msdus);
+ unsigned long unchain_cnt,
+ unsigned long drop_cnt,
+ unsigned long drop_cnt_filter,
+ unsigned long queued_msdus);
void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar,
u16 peer_id, u8 tid,
struct htt_rx_indication_mpdu_range *ranges,
@@ -232,12 +221,12 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar,
static inline
void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
- unsigned long int num_msdus,
+ unsigned long num_msdus,
enum ath10k_pkt_rx_err err,
- unsigned long int unchain_cnt,
- unsigned long int drop_cnt,
- unsigned long int drop_cnt_filter,
- unsigned long int queued_msdus)
+ unsigned long unchain_cnt,
+ unsigned long drop_cnt,
+ unsigned long drop_cnt_filter,
+ unsigned long queued_msdus)
{
}
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 4778a455d81a..c704ae371c4d 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
@@ -87,12 +76,12 @@ out:
}
void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
- unsigned long int num_msdus,
+ unsigned long num_msdus,
enum ath10k_pkt_rx_err err,
- unsigned long int unchain_cnt,
- unsigned long int drop_cnt,
- unsigned long int drop_cnt_filter,
- unsigned long int queued_msdus)
+ unsigned long unchain_cnt,
+ unsigned long drop_cnt,
+ unsigned long drop_cnt_filter,
+ unsigned long queued_msdus)
{
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
@@ -696,11 +685,12 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
" %llu ", stats->ht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
- " BW %s (20,40,80,160 MHz)\n", str[j]);
+ " BW %s (20,5,10,40,80,160 MHz)\n", str[j]);
len += scnprintf(buf + len, size - len,
- " %llu %llu %llu %llu\n",
+ " %llu %llu %llu %llu %llu %llu\n",
stats->bw[j][0], stats->bw[j][1],
- stats->bw[j][2], stats->bw[j][3]);
+ stats->bw[j][2], stats->bw[j][3],
+ stats->bw[j][4], stats->bw[j][5]);
len += scnprintf(buf + len, size - len,
" NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
len += scnprintf(buf + len, size - len,
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 1a59ea0068c2..fe5417962f40 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HIF_H_
@@ -59,6 +48,8 @@ struct ath10k_hif_ops {
*/
void (*stop)(struct ath10k *ar);
+ int (*swap_mailbox)(struct ath10k *ar);
+
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
@@ -81,7 +72,7 @@ struct ath10k_hif_ops {
void (*write32)(struct ath10k *ar, u32 address, u32 value);
/* Power up the device and enter BMI transfer mode for FW download */
- int (*power_up)(struct ath10k *ar);
+ int (*power_up)(struct ath10k *ar, enum ath10k_firmware_mode fw_mode);
/* Power down the device and free up resources. stop() must be called
* before this if start() was called earlier
@@ -139,6 +130,13 @@ static inline void ath10k_hif_stop(struct ath10k *ar)
return ar->hif.ops->stop(ar);
}
+static inline int ath10k_hif_swap_mailbox(struct ath10k *ar)
+{
+ if (ar->hif.ops->swap_mailbox)
+ return ar->hif.ops->swap_mailbox(ar);
+ return 0;
+}
+
static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
@@ -165,9 +163,10 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
return ar->hif.ops->get_free_queue_number(ar, pipe_id);
}
-static inline int ath10k_hif_power_up(struct ath10k *ar)
+static inline int ath10k_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
{
- return ar->hif.ops->power_up(ar);
+ return ar->hif.ops->power_up(ar, fw_mode);
}
static inline void ath10k_hif_power_down(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 28daed5981a1..805a7f8a04f2 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
@@ -53,7 +42,7 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL)
+ if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
@@ -88,7 +77,8 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
- hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+ if (ep->tx_credit_flow_enabled)
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
@@ -138,7 +128,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
skb_cb->eid = eid;
- if (ar->dev_type != ATH10K_DEV_TYPE_HL) {
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
@@ -161,7 +151,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return 0;
err_unmap:
- if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 51fda6c23f69..f55d3caec61f 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HTC_H_
@@ -51,7 +40,6 @@ struct ath10k;
*/
#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
-#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 21a67f82f037..d235ff3098e8 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
@@ -268,7 +257,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
return status;
}
- status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+ status = htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
htt->max_num_ampdu,
htt->max_num_amsdu);
if (status) {
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index a76f7c9e2199..4cee5492abc8 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HTT_H_
@@ -357,6 +346,13 @@ struct htt_aggr_conf {
u8 max_num_amsdu_subframes;
} __packed;
+struct htt_aggr_conf_v2 {
+ u8 max_num_ampdu_subframes;
+ /* amsdu_subframes is limited by 0x1F mask */
+ u8 max_num_amsdu_subframes;
+ u8 reserved;
+} __packed;
+
#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
struct htt_mgmt_tx_desc_qca99x0 {
__le32 rate;
@@ -564,6 +560,7 @@ struct htt_mgmt_tx_completion {
#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7)
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
@@ -576,7 +573,14 @@ struct htt_mgmt_tx_completion {
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
-#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
+#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
+#define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1)
+#define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2)
+#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3)
+
+#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
+#define HTT_TX_DATA_APPEND_RETRIES BIT(0)
+#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
struct htt_rx_indication_hdr {
u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
@@ -852,6 +856,88 @@ enum htt_data_tx_flags {
#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+struct htt_append_retries {
+ __le16 msdu_id;
+ u8 tx_retries;
+ u8 flag;
+} __packed;
+
+struct htt_data_tx_completion_ext {
+ struct htt_append_retries a_retries;
+ __le32 t_stamp;
+ __le16 msdus_rssi[0];
+} __packed;
+
+/**
+ * @brief target -> host TX completion indication message definition
+ *
+ * @details
+ * The following diagram shows the format of the TX completion indication sent
+ * from the target to the host
+ *
+ * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0|
+ * |-------------------------------------------------------------|
+ * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type |
+ * |-------------------------------------------------------------|
+ * payload: | MSDU1 ID | MSDU0 ID |
+ * |-------------------------------------------------------------|
+ * : MSDU3 ID : MSDU2 ID :
+ * |-------------------------------------------------------------|
+ * | struct htt_tx_compl_ind_append_retries |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | struct htt_tx_compl_ind_append_tx_tstamp |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | MSDU1 ACK RSSI | MSDU0 ACK RSSI |
+ * |-------------------------------------------------------------|
+ * : MSDU3 ACK RSSI : MSDU2 ACK RSSI :
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * -msg_type
+ * Bits 7:0
+ * Purpose: identifies this as HTT TX completion indication
+ * -status
+ * Bits 10:8
+ * Purpose: the TX completion status of payload fragmentations descriptors
+ * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
+ * -tid
+ * Bits 14:11
+ * Purpose: the tid associated with those fragmentation descriptors. It is
+ * valid or not, depending on the tid_invalid bit.
+ * Value: 0 to 15
+ * -tid_invalid
+ * Bits 15:15
+ * Purpose: this bit indicates whether the tid field is valid or not
+ * Value: 0 indicates valid, 1 indicates invalid
+ * -num
+ * Bits 23:16
+ * Purpose: the number of payload in this indication
+ * Value: 1 to 255
+ * -A0 = append
+ * Bits 24:24
+ * Purpose: append the struct htt_tx_compl_ind_append_retries which contains
+ * the number of tx retries for one MSDU at the end of this message
+ * Value: 0 indicates no appending, 1 indicates appending
+ * -A1 = append1
+ * Bits 25:25
+ * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
+ * contains the timestamp info for each TX msdu id in payload.
+ * Value: 0 indicates no appending, 1 indicates appending
+ * -TP = MSDU tx power presence
+ * Bits 26:26
+ * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
+ * for each MSDU referenced by the TX_COMPL_IND message.
+ * The order of the per-MSDU tx power reports matches the order
+ * of the MSDU IDs.
+ * Value: 0 indicates not appending, 1 indicates appending
+ * -A2 = append2
+ * Bits 27:27
+ * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
+ * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report
+ * matches the order of the MSDU IDs.
+ * The ACK RSSI values are valid when status is COMPLETE_OK (and
+ * this append2 bit is set).
+ * Value: 0 indicates not appending, 1 indicates appending
+ */
+
struct htt_data_tx_completion {
union {
u8 flags;
@@ -866,6 +952,21 @@ struct htt_data_tx_completion {
__le16 msdus[0]; /* variable length based on %num_msdus */
} __packed;
+#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0)
+#define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16)
+
+struct htt_data_tx_ppdu_dur {
+ __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */
+ __le32 tx_duration; /* in usecs */
+} __packed;
+
+#define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0)
+
+struct htt_data_tx_compl_ppdu_dur {
+ __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
+ struct htt_data_tx_ppdu_dur ppdu_dur[0];
+} __packed;
+
struct htt_tx_compl_ind_base {
u32 hdr;
u16 payload[1/*or more*/];
@@ -1650,6 +1751,7 @@ struct htt_cmd {
struct htt_stats_req stats_req;
struct htt_oob_sync_req oob_sync_req;
struct htt_aggr_conf aggr_conf;
+ struct htt_aggr_conf_v2 aggr_conf_v2;
struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
struct htt_tx_fetch_resp tx_fetch_resp;
@@ -1716,14 +1818,14 @@ struct ath10k_htt_txbuf_32 {
struct ath10k_htc_hdr htc_hdr;
struct htt_cmd_hdr cmd_hdr;
struct htt_data_tx_desc cmd_tx;
-} __packed;
+} __packed __aligned(4);
struct ath10k_htt_txbuf_64 {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
struct htt_cmd_hdr cmd_hdr;
struct htt_data_tx_desc_64 cmd_tx;
-} __packed;
+} __packed __aligned(4);
struct ath10k_htt {
struct ath10k *ar;
@@ -1890,6 +1992,9 @@ struct ath10k_htt_tx_ops {
struct sk_buff *msdu);
int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
void (*htt_free_txbuff)(struct ath10k_htt *htt);
+ int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu);
};
static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index f42bac204ef8..a20ea270d519 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
@@ -265,7 +254,7 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
struct ath10k_htt *htt = &ar->htt;
int ret;
- if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return 0;
spin_lock_bh(&htt->rx_ring.lock);
@@ -282,7 +271,7 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
- if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL)
+ if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
@@ -760,7 +749,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
- if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return 0;
htt->rx_confused = false;
@@ -1905,7 +1894,7 @@ static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
}
static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
- unsigned long int *unchain_cnt)
+ unsigned long *unchain_cnt)
{
struct sk_buff *skb, *first;
int space;
@@ -1954,8 +1943,8 @@ static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
struct sk_buff_head *amsdu,
- unsigned long int *drop_cnt,
- unsigned long int *unchain_cnt)
+ unsigned long *drop_cnt,
+ unsigned long *unchain_cnt)
{
struct sk_buff *first;
struct htt_rx_desc *rxd;
@@ -2005,7 +1994,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
static void ath10k_htt_rx_h_filter(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status,
- unsigned long int *drop_cnt)
+ unsigned long *drop_cnt)
{
if (skb_queue_empty(amsdu))
return;
@@ -2025,10 +2014,10 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
int ret;
- unsigned long int drop_cnt = 0;
- unsigned long int unchain_cnt = 0;
- unsigned long int drop_cnt_filter = 0;
- unsigned long int msdus_to_queue, num_msdus;
+ unsigned long drop_cnt = 0;
+ unsigned long unchain_cnt = 0;
+ unsigned long drop_cnt_filter = 0;
+ unsigned long msdus_to_queue, num_msdus;
enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
@@ -2130,9 +2119,15 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
hdr = (struct ieee80211_hdr *)skb->data;
rx_status = IEEE80211_SKB_RXCB(skb);
rx_status->chains |= BIT(0);
- rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
- rx->ppdu.combined_rssi;
- rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+ if (rx->ppdu.combined_rssi == 0) {
+ /* SDIO firmware does not provide signal */
+ rx_status->signal = 0;
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ } else {
+ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rx->ppdu.combined_rssi;
+ rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+ }
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
@@ -2220,8 +2215,12 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
__le16 msdu_id, *msdus;
bool rssi_enabled = false;
- u8 msdu_count = 0;
- int i;
+ u8 msdu_count = 0, num_airtime_records, tid;
+ int i, htt_pad = 0;
+ struct htt_data_tx_compl_ppdu_dur *ppdu_info;
+ struct ath10k_peer *peer;
+ u16 ppdu_info_offset = 0, peer_id;
+ u32 tx_duration;
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
@@ -2245,12 +2244,14 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
resp->data_tx_completion.num_msdus);
msdu_count = resp->data_tx_completion.num_msdus;
+ msdus = resp->data_tx_completion.msdus;
+ rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
- if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI)
- rssi_enabled = true;
+ if (rssi_enabled)
+ htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
+ resp);
for (i = 0; i < msdu_count; i++) {
- msdus = resp->data_tx_completion.msdus;
msdu_id = msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
@@ -2260,10 +2261,10 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
* last msdu id with 0xffff
*/
if (msdu_count & 0x01) {
- msdu_id = msdus[msdu_count + i + 1];
+ msdu_id = msdus[msdu_count + i + 1 + htt_pad];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
} else {
- msdu_id = msdus[msdu_count + i];
+ msdu_id = msdus[msdu_count + i + htt_pad];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
}
}
@@ -2282,6 +2283,50 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
ath10k_txrx_tx_unref(htt, &tx_done);
}
}
+
+ if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
+ return;
+
+ ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
+
+ if (rssi_enabled)
+ ppdu_info_offset += ppdu_info_offset;
+
+ if (resp->data_tx_completion.flags2 &
+ (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
+ ppdu_info_offset += 2;
+
+ ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
+ num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
+ __le32_to_cpu(ppdu_info->info0));
+
+ for (i = 0; i < num_airtime_records; i++) {
+ struct htt_data_tx_ppdu_dur *ppdu_dur;
+ u32 info0;
+
+ ppdu_dur = &ppdu_info->ppdu_dur[i];
+ info0 = __le32_to_cpu(ppdu_dur->info0);
+
+ peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
+ info0);
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+ continue;
+ }
+
+ tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0);
+ tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
+
+ ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
+
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+ }
}
static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
@@ -2596,6 +2641,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid;
int ret;
int i;
+ bool may_tx;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
@@ -2668,8 +2714,13 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
num_msdus = 0;
num_bytes = 0;
+ ieee80211_txq_schedule_start(hw, txq->ac);
+ may_tx = ieee80211_txq_may_transmit(hw, txq);
while (num_msdus < max_num_msdus &&
num_bytes < max_num_bytes) {
+ if (!may_tx)
+ break;
+
ret = ath10k_mac_tx_push_txq(hw, txq);
if (ret < 0)
break;
@@ -2677,6 +2728,8 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
num_msdus++;
num_bytes += ret;
}
+ ieee80211_return_txq(hw, txq);
+ ieee80211_txq_schedule_end(hw, txq->ac);
record->num_msdus = cpu_to_le16(num_msdus);
record->num_bytes = cpu_to_le32(num_bytes);
@@ -2868,17 +2921,19 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
struct rate_info *txrate = &arsta->txrate;
struct ath10k_htt_tx_stats *tx_stats;
int idx, ht_idx, gi, mcs, bw, nss;
+ unsigned long flags;
if (!arsta->tx_stats)
return;
tx_stats = arsta->tx_stats;
- gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI);
- ht_idx = txrate->mcs + txrate->nss * 8;
- mcs = txrate->mcs;
+ flags = txrate->flags;
+ gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
+ mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
bw = txrate->bw;
nss = txrate->nss;
- idx = mcs * 8 + 8 * 10 * nss;
+ ht_idx = mcs + (nss - 1) * 8;
+ idx = mcs * 8 + 8 * 10 * (nss - 1);
idx += bw * 2 + gi;
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
@@ -2924,7 +2979,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
}
STATS_OP_FMT(AMPDU).bw[0][bw] +=
pstats->succ_bytes + pstats->retry_bytes;
- STATS_OP_FMT(AMPDU).nss[0][nss] +=
+ STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
pstats->succ_bytes + pstats->retry_bytes;
@@ -2932,7 +2987,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
pstats->succ_pkts + pstats->retry_pkts;
- STATS_OP_FMT(AMPDU).nss[1][nss] +=
+ STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
pstats->succ_pkts + pstats->retry_pkts;
@@ -2944,27 +2999,27 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
}
STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
- STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
- STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
- STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
- STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
- STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
- STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
@@ -2975,6 +3030,8 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
}
+
+ tx_stats->tx_duration += pstats->duration;
}
static void
@@ -3070,6 +3127,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->txrate.nss = txrate.nss;
arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
+ arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
@@ -3141,6 +3199,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
+ p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
}
@@ -3234,7 +3293,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return ath10k_htt_rx_proc_rx_ind_hl(htt,
&resp->rx_ind_hl,
skb);
@@ -3530,7 +3589,7 @@ void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
htt->rx_ops = &htt_rx_ops_hl;
else if (ar->hw_params.target_64bit)
htt->rx_ops = &htt_rx_ops_64;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index ad05ab714c9b..d8e9cc0bb772 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
@@ -495,7 +484,7 @@ int ath10k_htt_tx_start(struct ath10k_htt *htt)
if (htt->tx_mem_allocated)
return 0;
- if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return 0;
ret = ath10k_htt_tx_alloc_buf(htt);
@@ -1035,6 +1024,53 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
return 0;
}
+static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_aggr_conf_v2 *aggr_conf;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len;
+ int ret;
+
+ /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
+
+ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
+ return -EINVAL;
+
+ if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr);
+ len += sizeof(cmd->aggr_conf_v2);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
+
+ aggr_conf = &cmd->aggr_conf_v2;
+ aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
+ aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+ aggr_conf->max_num_amsdu_subframes,
+ aggr_conf->max_num_ampdu_subframes);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
__le32 token,
__le16 fetch_seq_num,
@@ -1177,7 +1213,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return 0;
err_unmap_msdu:
- if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
@@ -1498,7 +1534,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
u16 msdu_id, flags1 = 0;
u16 freq = 0;
dma_addr_t frags_paddr = 0;
- u32 txbuf_paddr;
+ dma_addr_t txbuf_paddr;
struct htt_msdu_ext_desc_64 *ext_desc = NULL;
struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
@@ -1692,6 +1728,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
.htt_tx = ath10k_htt_tx_32,
.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+ .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg,
};
static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
@@ -1702,6 +1739,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
.htt_tx = ath10k_htt_tx_64,
.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+ .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,
};
static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
@@ -1714,7 +1752,7 @@ void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
htt->tx_ops = &htt_tx_ops_hl;
else if (ar->hw_params.target_64bit)
htt->tx_ops = &htt_tx_ops_64;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 61ecf931ba4d..ad082b7d7643 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
@@ -318,9 +307,11 @@ static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
};
const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
- .sr_base_addr = 0x00000000,
+ .sr_base_addr_lo = 0x00000000,
+ .sr_base_addr_hi = 0x00000004,
.sr_size_addr = 0x00000008,
- .dr_base_addr = 0x0000000c,
+ .dr_base_addr_lo = 0x0000000c,
+ .dr_base_addr_hi = 0x00000010,
.dr_size_addr = 0x00000014,
.misc_ie_addr = 0x00000034,
.sr_wr_index_addr = 0x0000003c,
@@ -464,9 +455,9 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
};
const struct ath10k_hw_ce_regs qcax_ce_regs = {
- .sr_base_addr = 0x00000000,
+ .sr_base_addr_lo = 0x00000000,
.sr_size_addr = 0x00000004,
- .dr_base_addr = 0x00000008,
+ .dr_base_addr_lo = 0x00000008,
.dr_size_addr = 0x0000000c,
.ce_cmd_addr = 0x00000018,
.misc_ie_addr = 0x00000034,
@@ -1109,6 +1100,32 @@ int ath10k_hw_diag_fast_download(struct ath10k *ar,
return ret;
}
+static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp)
+{
+ return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI);
+}
+
+static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp)
+{
+ return (resp->data_tx_completion.flags2 &
+ HTT_TX_DATA_RSSI_ENABLE_WCN3990);
+}
+
+static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp)
+{
+ struct htt_data_tx_completion_ext extd;
+ int pad_bytes = 0;
+
+ if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES)
+ pad_bytes += sizeof(extd.a_retries) /
+ sizeof(extd.msdus_rssi[0]);
+
+ if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP)
+ pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]);
+
+ return pad_bytes;
+}
+
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
};
@@ -1133,6 +1150,10 @@ const struct ath10k_hw_ops qca99x0_ops = {
const struct ath10k_hw_ops qca6174_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
-const struct ath10k_hw_ops wcn3990_ops = {};
+const struct ath10k_hw_ops wcn3990_ops = {
+ .tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index e50a8dc5b093..71314999aa24 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HW_H_
@@ -353,9 +342,11 @@ struct ath10k_hw_ce_ctrl1_upd {
};
struct ath10k_hw_ce_regs {
- u32 sr_base_addr;
+ u32 sr_base_addr_lo;
+ u32 sr_base_addr_hi;
u32 sr_size_addr;
- u32 dr_base_addr;
+ u32 dr_base_addr_lo;
+ u32 dr_base_addr_hi;
u32 dr_size_addr;
u32 ce_cmd_addr;
u32 misc_ie_addr;
@@ -618,6 +609,8 @@ struct ath10k_hw_params {
};
struct htt_rx_desc;
+struct htt_resp;
+struct htt_data_tx_completion_ext;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
@@ -625,6 +618,8 @@ struct ath10k_hw_ops {
void (*set_coverage_class)(struct ath10k *ar, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
+ int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
+ int (*is_rssi_enable)(struct htt_resp *resp);
};
extern const struct ath10k_hw_ops qca988x_ops;
@@ -652,6 +647,24 @@ ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
return false;
}
+static inline int
+ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw,
+ struct htt_resp *htt)
+{
+ if (hw->hw_ops->tx_data_rssi_pad_bytes)
+ return hw->hw_ops->tx_data_rssi_pad_bytes(htt);
+ return 0;
+}
+
+static inline int
+ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
+ struct htt_resp *resp)
+{
+ if (hw->hw_ops->is_rssi_enable)
+ return hw->hw_ops->is_rssi_enable(resp);
+ return 0;
+}
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -734,13 +747,14 @@ ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+#define TARGET_TLV_NUM_MSDU_DESC_HL 64
#define TARGET_TLV_NUM_WOW_PATTERNS 22
#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
/* Target specific defines for WMI-HL-1.0 firmware */
-#define TARGET_HL_10_TLV_NUM_PEERS 14
-#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6
-#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2
+#define TARGET_HL_TLV_NUM_PEERS 33
+#define TARGET_HL_TLV_AST_SKID_LIMIT 16
+#define TARGET_HL_TLV_NUM_WDS_ENTRIES 2
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e49b36752ba2..b73c23d4ce86 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "mac.h"
@@ -250,24 +239,24 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
- arg.key_cipher = WMI_CIPHER_AES_CCM;
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_TKIP:
- arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP];
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- arg.key_cipher = WMI_CIPHER_WEP;
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP];
break;
case WLAN_CIPHER_SUITE_CCMP_256:
- arg.key_cipher = WMI_CIPHER_AES_CCM;
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- arg.key_cipher = WMI_CIPHER_AES_GCM;
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM];
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
@@ -284,7 +273,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (cmd == DISABLE_KEY) {
- arg.key_cipher = WMI_CIPHER_NONE;
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
arg.key_data = NULL;
}
@@ -3397,6 +3386,7 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
struct sk_buff *skb)
{
const struct ieee80211_hdr *hdr = (void *)skb->data;
+ const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
__le16 fc = hdr->frame_control;
if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
@@ -3438,7 +3428,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
if (ieee80211_is_data_present(fc) && sta && sta->tdls)
return ATH10K_HW_TXRX_ETHERNET;
- if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) ||
+ skb_cb->flags & ATH10K_SKB_F_RAW_TX)
return ATH10K_HW_TXRX_RAW;
return ATH10K_HW_TXRX_NATIVE_WIFI;
@@ -3544,10 +3535,13 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_txq *txq,
- struct sk_buff *skb)
+ struct sk_buff *skb, u16 airtime)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool is_data = ieee80211_is_data(hdr->frame_control) ||
+ ieee80211_is_data_qos(hdr->frame_control);
cb->flags = 0;
if (!ath10k_tx_h_use_hwcrypto(vif, skb))
@@ -3559,8 +3553,19 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
if (ieee80211_is_data_qos(hdr->frame_control))
cb->flags |= ATH10K_SKB_F_QOS;
+ /* Data frames encrypted in software will be posted to firmware
+ * with tx encap mode set to RAW. Ex: Multicast traffic generated
+ * for a specific VLAN group will always be encrypted in software.
+ */
+ if (is_data && ieee80211_has_protected(hdr->frame_control) &&
+ !info->control.hw_key) {
+ cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+ cb->flags |= ATH10K_SKB_F_RAW_TX;
+ }
+
cb->vif = vif;
cb->txq = txq;
+ cb->airtime_est = airtime;
}
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
@@ -3667,6 +3672,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
int ret;
/* We should disable CCK RATE due to P2P */
@@ -3684,7 +3690,8 @@ static int ath10k_mac_tx(struct ath10k *ar,
ath10k_tx_h_8023(skb);
break;
case ATH10K_HW_TXRX_RAW:
- if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) &&
+ !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
WARN_ON_ONCE(1);
ieee80211_free_txskb(hw, skb);
return -ENOTSUPP;
@@ -3863,7 +3870,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
ret);
dma_unmap_single(ar->dev, paddr, skb->len,
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
ieee80211_free_txskb(ar->hw, skb);
}
} else {
@@ -3890,7 +3897,6 @@ static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
{
- struct ath10k_txq *artxq;
struct ath10k_skb_cb *cb;
struct sk_buff *msdu;
int msdu_id;
@@ -3898,12 +3904,6 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
if (!txq)
return;
- artxq = (void *)txq->drv_priv;
- spin_lock_bh(&ar->txqs_lock);
- if (!list_empty(&artxq->list))
- list_del_init(&artxq->list);
- spin_unlock_bh(&ar->txqs_lock);
-
spin_lock_bh(&ar->htt.tx_lock);
idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
cb = ATH10K_SKB_CB(msdu);
@@ -3943,7 +3943,6 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
struct ath10k_txq *artxq = (void *)txq->drv_priv;
/* No need to get locks */
-
if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
return true;
@@ -3956,6 +3955,52 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
return false;
}
+/* Return estimated airtime in microsecond, which is calculated using last
+ * reported TX rate. This is just a rough estimation because host driver has no
+ * knowledge of the actual transmit rate, retries or aggregation. If actual
+ * airtime can be reported by firmware, then delta between estimated and actual
+ * airtime can be adjusted from deficit.
+ */
+#define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */
+#define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */
+static u16 ath10k_mac_update_airtime(struct ath10k *ar,
+ struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ath10k_sta *arsta;
+ u32 pktlen;
+ u16 airtime = 0;
+
+ if (!txq || !txq->sta)
+ return airtime;
+
+ if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
+ return airtime;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta = (struct ath10k_sta *)txq->sta->drv_priv;
+
+ pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */
+ if (arsta->last_tx_bitrate) {
+ /* airtime in us, last_tx_bitrate in 100kbps */
+ airtime = (pktlen * 8 * (1000 / 100))
+ / arsta->last_tx_bitrate;
+ /* overhead for media access time and IFS */
+ airtime += IEEE80211_ATF_OVERHEAD_IFS;
+ } else {
+ /* This is mostly for throttle excessive BC/MC frames, and the
+ * airtime/rate doesn't need be exact. Airtime of BC/MC frames
+ * in 2G get some discount, which helps prevent very low rate
+ * frames from being blocked for too long.
+ */
+ airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */
+ airtime += IEEE80211_ATF_OVERHEAD;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return airtime;
+}
+
int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
@@ -3971,6 +4016,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
size_t skb_len;
bool is_mgmt, is_presp;
int ret;
+ u16 airtime;
spin_lock_bh(&ar->htt.tx_lock);
ret = ath10k_htt_tx_inc_pending(htt);
@@ -3988,7 +4034,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
return -ENOENT;
}
- ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+ airtime = ath10k_mac_update_airtime(ar, txq, skb);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
skb_len = skb->len;
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
@@ -4030,48 +4077,45 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
return skb_len;
}
-void ath10k_mac_tx_push_pending(struct ath10k *ar)
+static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
{
- struct ieee80211_hw *hw = ar->hw;
struct ieee80211_txq *txq;
- struct ath10k_txq *artxq;
- struct ath10k_txq *last;
- int ret;
- int max;
-
- if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
- return;
-
- spin_lock_bh(&ar->txqs_lock);
- rcu_read_lock();
-
- last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
- while (!list_empty(&ar->txqs)) {
- artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
- txq = container_of((void *)artxq, struct ieee80211_txq,
- drv_priv);
+ int ret = 0;
- /* Prevent aggressive sta/tid taking over tx queue */
- max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
- ret = 0;
- while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+ ieee80211_txq_schedule_start(hw, ac);
+ while ((txq = ieee80211_next_txq(hw, ac))) {
+ while (ath10k_mac_tx_can_push(hw, txq)) {
ret = ath10k_mac_tx_push_txq(hw, txq);
if (ret < 0)
break;
}
+ ieee80211_return_txq(hw, txq);
+ ath10k_htt_tx_txq_update(hw, txq);
+ if (ret == -EBUSY)
+ break;
+ }
+ ieee80211_txq_schedule_end(hw, ac);
- list_del_init(&artxq->list);
- if (ret != -ENOENT)
- list_add_tail(&artxq->list, &ar->txqs);
+ return ret;
+}
- ath10k_htt_tx_txq_update(hw, txq);
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ u32 ac;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
+ return;
+
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
+ return;
- if (artxq == last || (ret < 0 && ret != -ENOENT))
+ rcu_read_lock();
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY)
break;
}
-
rcu_read_unlock();
- spin_unlock_bh(&ar->txqs_lock);
}
EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
@@ -4258,8 +4302,10 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
bool is_mgmt;
bool is_presp;
int ret;
+ u16 airtime;
- ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+ airtime = ath10k_mac_update_airtime(ar, txq, skb);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
@@ -4310,31 +4356,28 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct ath10k *ar = hw->priv;
- struct ath10k_txq *artxq = (void *)txq->drv_priv;
- struct ieee80211_txq *f_txq;
- struct ath10k_txq *f_artxq;
- int ret = 0;
- int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
+ int ret;
+ u8 ac;
- spin_lock_bh(&ar->txqs_lock);
- if (list_empty(&artxq->list))
- list_add_tail(&artxq->list, &ar->txqs);
+ ath10k_htt_tx_txq_update(hw, txq);
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
+ return;
- f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
- f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
- list_del_init(&f_artxq->list);
+ ac = txq->ac;
+ ieee80211_txq_schedule_start(hw, ac);
+ txq = ieee80211_next_txq(hw, ac);
+ if (!txq)
+ goto out;
- while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
- ret = ath10k_mac_tx_push_txq(hw, f_txq);
+ while (ath10k_mac_tx_can_push(hw, txq)) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
if (ret < 0)
break;
}
- if (ret != -ENOENT)
- list_add_tail(&f_artxq->list, &ar->txqs);
- spin_unlock_bh(&ar->txqs_lock);
-
- ath10k_htt_tx_txq_update(hw, f_txq);
+ ieee80211_return_txq(hw, txq);
ath10k_htt_tx_txq_update(hw, txq);
+out:
+ ieee80211_txq_schedule_end(hw, ac);
}
/* Must not be called with conf_mutex held as workers can use that also. */
@@ -4549,7 +4592,8 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
ht_cap.cap |= stbc;
}
- if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+ if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info &
+ WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC)))
ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
@@ -4704,7 +4748,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err;
}
- ret = ath10k_hif_power_up(ar);
+ ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath10k_err(ar, "Could not init hif: %d\n", ret);
goto err_off;
@@ -5169,10 +5213,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_AP) {
- arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
- IEEE80211_MAX_FRAME_LEN,
- &arvif->beacon_paddr,
- GFP_ATOMIC);
+ arvif->beacon_buf = dma_alloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
if (!arvif->beacon_buf) {
ret = -ENOMEM;
ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
@@ -5341,6 +5385,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_peer_delete;
}
+ if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
+ vdev_param = ar->wmi.vdev_param->rtt_responder_role;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->ftm_responder);
+
+ /* It is harmless to not set FTM role. Do not warn */
+ if (ret && ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n",
+ arvif->vdev_id, ret);
+ }
+
if (vif->type == NL80211_IFTYPE_MONITOR) {
ar->monitor_arvif = arvif;
ret = ath10k_monitor_recalc(ar);
@@ -5615,6 +5670,20 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
ether_addr_copy(arvif->bssid, info->bssid);
+ if (changed & BSS_CHANGED_FTM_RESPONDER &&
+ arvif->ftm_responder != info->ftm_responder &&
+ test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
+ arvif->ftm_responder = info->ftm_responder;
+
+ vdev_param = ar->wmi.vdev_param->rtt_responder_role;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->ftm_responder);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d ftm_responder %d:ret %d\n",
+ arvif->vdev_id, arvif->ftm_responder, ret);
+ }
+
if (changed & BSS_CHANGED_BEACON_ENABLED)
ath10k_control_beaconing(arvif, info);
@@ -8617,6 +8686,15 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+ if (ath10k_peer_stats_enabled(ar) ||
+ test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+
+ if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
@@ -8726,12 +8804,19 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER;
+
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
goto err_dfs_detector_exit;
}
+ if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) {
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
+ ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
+ }
+
if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
ret = regulatory_hint(ar->hw->wiphy,
ar->ath_common.regulatory.alpha2);
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 570493d2d648..1fe84948b868 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _MAC_H_
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
index 7e621ee194e3..29c737b2f432 100644
--- a/drivers/net/wireless/ath/ath10k/p2p.c
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
index 7be616e2e121..7d7f44809fbb 100644
--- a/drivers/net/wireless/ath/ath10k/p2p.h
+++ b/drivers/net/wireless/ath/ath10k/p2p.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _P2P_H
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 01b4edb00e9e..271f92c24d44 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/pci.h>
@@ -913,7 +902,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
int nbytes)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0;
u32 *buf;
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
@@ -924,8 +912,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
void *data_buf = NULL;
int i;
- spin_lock_bh(&ce->ce_lock);
-
+ mutex_lock(&ar_pci->ce_diag_mutex);
ce_diag = ar_pci->ce_diag;
/*
@@ -936,8 +923,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
*/
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
- data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
- alloc_nbytes,
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
@@ -961,19 +947,17 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+ ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
/* Request CE to send from Target(!) address to Host buffer */
- ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
- 0);
+ ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
if (ret)
goto done;
i = 0;
- while (ath10k_ce_completed_send_next_nolock(ce_diag,
- NULL) != 0) {
+ while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
@@ -984,10 +968,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag,
- (void **)&buf,
- &completed_nbytes)
- != 0) {
+ while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
+ &completed_nbytes) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
@@ -1020,7 +1002,7 @@ done:
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
- spin_unlock_bh(&ce->ce_lock);
+ mutex_unlock(&ar_pci->ce_diag_mutex);
return ret;
}
@@ -1068,7 +1050,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
const void *data, int nbytes)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0;
u32 *buf;
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
@@ -1077,8 +1058,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
dma_addr_t ce_data_base = 0;
int i;
- spin_lock_bh(&ce->ce_lock);
-
+ mutex_lock(&ar_pci->ce_diag_mutex);
ce_diag = ar_pci->ce_diag;
/*
@@ -1119,7 +1099,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
memcpy(data_buf, data, nbytes);
/* Set up to receive directly into Target(!) address */
- ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
+ ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1127,14 +1107,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
- nbytes, 0, 0);
+ ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
if (ret != 0)
goto done;
i = 0;
- while (ath10k_ce_completed_send_next_nolock(ce_diag,
- NULL) != 0) {
+ while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
@@ -1145,10 +1123,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag,
- (void **)&buf,
- &completed_nbytes)
- != 0) {
+ while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
+ &completed_nbytes) != 0) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
@@ -1183,7 +1159,7 @@ done:
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
- spin_unlock_bh(&ce->ce_lock);
+ mutex_unlock(&ar_pci->ce_diag_mutex);
return ret;
}
@@ -2284,7 +2260,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
return 1;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
- switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
+ switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV:
case QCA6174_HW_2_1_CHIP_ID_REV:
@@ -2807,7 +2783,8 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
return ar_pci->pci_hard_reset(ar);
}
-static int ath10k_pci_hif_power_up(struct ath10k *ar)
+static int ath10k_pci_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
@@ -3463,6 +3440,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
spin_lock_init(&ce->ce_lock);
spin_lock_init(&ar_pci->ps_lock);
+ mutex_init(&ar_pci->ce_diag_mutex);
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
@@ -3554,7 +3532,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
- pci_soft_reset = NULL;
+ pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
@@ -3637,6 +3615,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.link_can_suspend = true;
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index e8d86331c539..3773c79f322f 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -1,24 +1,14 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _PCI_H_
#define _PCI_H_
#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include "hw.h"
#include "ce.h"
@@ -128,6 +118,8 @@ struct ath10k_pci {
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
+ /* For protecting ce_diag */
+ struct mutex ce_diag_mutex;
struct ath10k_ce ce;
struct timer_list rx_post_retry;
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 37b3bd629f48..a7bc2c70d076 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/completion.h>
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index 1efe1d22fc2f..e4aa20445666 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _ATH10K_QMI_H_
#define _ATH10K_QMI_H_
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
index ba79c2e4aed6..1fe05c6218c3 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/soc/qcom/qmi.h>
@@ -1763,14 +1752,239 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
daemon_support_valid),
},
{
- .data_type = QMI_UNSIGNED_1_BYTE,
+ .data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
- .elem_size = sizeof(u8),
+ .elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
daemon_support),
},
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
{}
};
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
index c5e3870b8871..bca1186e1560 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WCN3990_QMI_SVC_V01_H
@@ -553,12 +542,38 @@ struct wlfw_mac_addr_resp_msg_v01 {
#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
struct wlfw_host_cap_req_msg_v01 {
u8 daemon_support_valid;
- u8 daemon_support;
-};
-
-#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+ u32 daemon_support;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index dfbfe674e11e..dec1582005b9 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _RX_DESC_H_
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 983ecfef1d28..fae56c67766f 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
@@ -1381,7 +1370,8 @@ static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
return ret;
}
-static int ath10k_sdio_hif_power_up(struct ath10k *ar)
+static int ath10k_sdio_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
@@ -1392,6 +1382,12 @@ static int ath10k_sdio_hif_power_up(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
+ ret = ath10k_sdio_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to config sdio: %d\n", ret);
+ return ret;
+ }
+
sdio_claim_host(func);
ret = sdio_enable_func(func);
@@ -1429,11 +1425,19 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
/* Disable the card */
sdio_claim_host(ar_sdio->func);
+
ret = sdio_disable_func(ar_sdio->func);
- sdio_release_host(ar_sdio->func);
+ if (ret) {
+ ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
+ sdio_release_host(ar_sdio->func);
+ return;
+ }
+ ret = mmc_hw_reset(ar_sdio->func->card->host);
if (ret)
- ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
+ ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
ar_sdio->is_disabled = true;
}
@@ -1615,12 +1619,33 @@ static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
return 0;
}
+static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 addr, val;
+ int ret = 0;
+
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
+
+ ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
+ if (ret) {
+ ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
+ return ret;
+ }
+
+ if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio mailbox swap service enabled\n");
+ ar_sdio->swap_mbox = true;
+ }
+ return 0;
+}
+
/* HIF start/stop */
static int ath10k_sdio_hif_start(struct ath10k *ar)
{
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
- u32 addr, val;
int ret;
/* Sleep 20 ms before HIF interrupts are disabled.
@@ -1654,20 +1679,6 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
if (ret)
ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
- addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
-
- ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
- if (ret) {
- ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret);
- return ret;
- }
-
- if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
- ath10k_dbg(ar, ATH10K_DBG_SDIO,
- "sdio mailbox swap service enabled\n");
- ar_sdio->swap_mbox = true;
- }
-
/* Enable sleep and then disable it again */
ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
if (ret)
@@ -1898,6 +1909,7 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
.start = ath10k_sdio_hif_start,
.stop = ath10k_sdio_hif_stop,
+ .swap_mailbox = ath10k_sdio_hif_swap_mailbox,
.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
.get_default_pipe = ath10k_sdio_hif_get_default_pipe,
.send_complete_check = ath10k_sdio_hif_send_complete_check,
@@ -2030,12 +2042,6 @@ static int ath10k_sdio_probe(struct sdio_func *func,
ath10k_sdio_set_mbox_info(ar);
- ret = ath10k_sdio_config(ar);
- if (ret) {
- ath10k_err(ar, "failed to config sdio: %d\n", ret);
- goto err_free_wq;
- }
-
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with SDIO */
bus_params.chip_id = 0;
@@ -2088,7 +2094,10 @@ static struct sdio_driver ath10k_sdio_driver = {
.id_table = ath10k_sdio_devices,
.probe = ath10k_sdio_probe,
.remove = ath10k_sdio_remove,
- .drv.pm = ATH10K_SDIO_PM_OPS,
+ .drv = {
+ .owner = THIS_MODULE,
+ .pm = ATH10K_SDIO_PM_OPS,
+ },
};
static int __init ath10k_sdio_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 453eb6263143..b8c7ac0330bd 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _SDIO_H_
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 54efe6be8f1d..873cb4ce419b 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/clk.h>
@@ -30,6 +19,7 @@
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
+#define ATH10K_SNOC_WAKE_IRQ 2
static char *const ce_name[] = {
"WLAN_CE_0",
@@ -66,7 +56,7 @@ static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ath10k_snoc_drv_priv drv_priv = {
.hw_rev = ATH10K_HW_WCN3990,
- .dma_mask = DMA_BIT_MASK(37),
+ .dma_mask = DMA_BIT_MASK(35),
.msa_size = 0x100000,
};
@@ -875,13 +865,11 @@ static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
{
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
- struct ath10k_snoc *ar_snoc;
struct sk_buff *skb;
struct ath10k *ar;
int i;
ar = snoc_pipe->hif_ce_state;
- ar_snoc = ath10k_snoc_priv(ar);
ce_pipe = snoc_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
@@ -958,7 +946,8 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar)
return 0;
}
-static int ath10k_snoc_wlan_enable(struct ath10k *ar)
+static int ath10k_snoc_wlan_enable(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
{
struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
struct ath10k_qmi_wlan_enable_cfg cfg;
@@ -992,7 +981,17 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar)
cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
- mode = QMI_WLFW_MISSION_V01;
+ switch (fw_mode) {
+ case ATH10K_FIRMWARE_MODE_NORMAL:
+ mode = QMI_WLFW_MISSION_V01;
+ break;
+ case ATH10K_FIRMWARE_MODE_UTF:
+ mode = QMI_WLFW_FTM_V01;
+ break;
+ default:
+ ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
+ return -EINVAL;
+ }
return ath10k_qmi_wlan_enable(ar, &cfg, mode,
NULL);
@@ -1000,7 +999,16 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar)
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
{
- if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
+ * flags are not set, it means that the driver has restarted
+ * due to a crash inject via debugfs. In this case, the driver
+ * needs to restart the firmware and hence send qmi wlan disable,
+ * during the driver restart sequence.
+ */
+ if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
+ !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
ath10k_qmi_wlan_disable(ar);
}
@@ -1012,14 +1020,15 @@ static void ath10k_snoc_hif_power_down(struct ath10k *ar)
ath10k_ce_free_rri(ar);
}
-static int ath10k_snoc_hif_power_up(struct ath10k *ar)
+static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
__func__, ar->state);
- ret = ath10k_snoc_wlan_enable(ar);
+ ret = ath10k_snoc_wlan_enable(ar, fw_mode);
if (ret) {
ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
return ret;
@@ -1041,6 +1050,46 @@ err_wlan_enable:
return ret;
}
+#ifdef CONFIG_PM
+static int ath10k_snoc_hif_suspend(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return -EPERM;
+
+ ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
+ if (ret) {
+ ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
+
+ return ret;
+}
+
+static int ath10k_snoc_hif_resume(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return -EPERM;
+
+ ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
+ if (ret) {
+ ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
+
+ return ret;
+}
+#endif
+
static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
.read32 = ath10k_snoc_read32,
.write32 = ath10k_snoc_write32,
@@ -1054,6 +1103,10 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
.send_complete_check = ath10k_snoc_hif_send_complete_check,
.get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
.get_target_info = ath10k_snoc_hif_get_target_info,
+#ifdef CONFIG_PM
+ .suspend = ath10k_snoc_hif_suspend,
+ .resume = ath10k_snoc_hif_resume,
+#endif
};
static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index 2b2f23cf7c5d..d62f53501fbb 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _SNOC_H_
@@ -90,7 +79,7 @@ struct ath10k_snoc {
struct ath10k_vreg_info *vreg;
struct ath10k_clk_info *clk;
struct ath10k_qmi *qmi;
- unsigned long int flags;
+ unsigned long flags;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 653b6d013207..5db6bff5193b 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/relay.h>
@@ -494,6 +483,9 @@ static struct dentry *create_buf_file_handler(const char *filename,
buf_file = debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
+ if (IS_ERR(buf_file))
+ return NULL;
+
*is_global = 1;
return buf_file;
}
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 13276f4dc12c..5f481f11c6e5 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef SPECTRAL_H
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index e7f57efadae1..4dddeee684b4 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file has implementation for code swap logic. With code swap feature,
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index fa602f15fa93..25e0ad36ddb1 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _SWAP_H_
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index b11a1c3d87b4..dff6c8ac9dba 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __TARGADDRS_H__
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index c24ee616833c..6433ff10d80e 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "testmode.h"
@@ -270,7 +259,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
- ret = ath10k_hif_power_up(ar);
+ ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_UTF);
if (ret) {
ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h
index 9cdd150815db..6488fd514ae3 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.h
+++ b/drivers/net/wireless/ath/ath10k/testmode.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
index 6514d1a14242..ee1cb27c1d60 100644
--- a/drivers/net/wireless/ath/ath10k/testmode_i.h
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* "API" level of the ath10k testmode interface. Bump it after every
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index fe35edcd3ec8..36c9a1364253 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/device.h>
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index 65e2419543f9..5fdb020f4da3 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _THERMAL_
#define _THERMAL_
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
index 4a31e2c6fbd4..3ecdff17f64e 100644
--- a/drivers/net/wireless/ath/ath10k/trace.c
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 7d2fac342150..ba977bbe6291 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 23606b6972d0..c5818d28f55a 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
@@ -95,7 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
- if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+ if (txq && txq->sta && skb_cb->airtime_est)
+ ieee80211_sta_register_airtime(txq->sta, txq->tid,
+ skb_cb->airtime_est, 0);
+
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
ath10k_report_offchan_tx(htt->ar, msdu);
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index 2bf401e436d3..ecac441d83a7 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _TXRX_H_
#define _TXRX_H_
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index f731d35ee76d..970cf69ac35f 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
@@ -706,7 +695,8 @@ static void ath10k_usb_hif_send_complete_check(struct ath10k *ar,
{
}
-static int ath10k_usb_hif_power_up(struct ath10k *ar)
+static int ath10k_usb_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
{
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
index f60a3cc7d712..34d683e8fc18 100644
--- a/drivers/net/wireless/ath/ath10k/usb.h
+++ b/drivers/net/wireless/ath/ath10k/usb.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _USB_H_
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 04663076d27a..1491c25518bb 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _WMI_OPS_H_
@@ -33,6 +22,9 @@ struct wmi_ops {
struct wmi_mgmt_rx_ev_arg *arg);
int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
+ int (*pull_mgmt_tx_bundle_compl)(
+ struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
@@ -66,6 +58,8 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
+ const u8 macaddr[ETH_ALEN]);
struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg);
@@ -280,6 +274,16 @@ ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
{
@@ -507,6 +511,22 @@ ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
}
static inline int
+ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
+}
+
+static inline int
ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
{
struct sk_buff *skb;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 892bd8c30dd9..582fb11f648a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "debug.h"
@@ -24,6 +13,7 @@
#include "wmi-tlv.h"
#include "p2p.h"
#include "testmode.h"
+#include <linux/bitfield.h>
/***************/
/* TLV helpers */
@@ -620,6 +610,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
ath10k_wmi_event_mgmt_tx_compl(ar, skb);
break;
+ case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
+ break;
default:
ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
break;
@@ -681,11 +674,88 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
arg->desc_id = ev->desc_id;
arg->status = ev->status;
arg->pdev_id = ev->pdev_id;
+ arg->ppdu_id = ev->ppdu_id;
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ arg->ack_rssi = ev->ack_rssi;
kfree(tb);
return 0;
}
+struct wmi_tlv_tx_bundle_compl_parse {
+ const __le32 *num_reports;
+ const __le32 *desc_ids;
+ const __le32 *status;
+ const __le32 *ppdu_ids;
+ const __le32 *ack_rssi;
+ bool desc_ids_done;
+ bool status_done;
+ bool ppdu_ids_done;
+ bool ack_rssi_done;
+};
+
+static int
+ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
+ bundle_tx_compl->num_reports = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_UINT32:
+ if (!bundle_tx_compl->desc_ids_done) {
+ bundle_tx_compl->desc_ids_done = true;
+ bundle_tx_compl->desc_ids = ptr;
+ } else if (!bundle_tx_compl->status_done) {
+ bundle_tx_compl->status_done = true;
+ bundle_tx_compl->status = ptr;
+ } else if (!bundle_tx_compl->ppdu_ids_done) {
+ bundle_tx_compl->ppdu_ids_done = true;
+ bundle_tx_compl->ppdu_ids = ptr;
+ } else if (!bundle_tx_compl->ack_rssi_done) {
+ bundle_tx_compl->ack_rssi_done = true;
+ bundle_tx_compl->ack_rssi = ptr;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
+ struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
+{
+ struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
+ &bundle_tx_compl);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
+ !bundle_tx_compl.status)
+ return -EPROTO;
+
+ arg->num_reports = *bundle_tx_compl.num_reports;
+ arg->desc_ids = bundle_tx_compl.desc_ids;
+ arg->status = bundle_tx_compl.status;
+ arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ arg->ack_rssi = bundle_tx_compl.ack_rssi;
+
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
@@ -1227,6 +1297,7 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
{
const void **tb;
const struct wmi_tlv_stats_ev *ev;
+ u32 num_peer_stats_extd;
const void *data;
u32 num_pdev_stats;
u32 num_vdev_stats;
@@ -1234,6 +1305,7 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
u32 num_bcnflt_stats;
u32 num_chan_stats;
size_t data_len;
+ u32 stats_id;
int ret;
int i;
@@ -1258,11 +1330,13 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
+ num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
+ "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
num_pdev_stats, num_vdev_stats, num_peer_stats,
- num_bcnflt_stats, num_chan_stats);
+ num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_pdev_stats *src;
@@ -1327,6 +1401,28 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
ath10k_wmi_pull_peer_stats(&src->old, dst);
dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+ if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
+ const struct wmi_tlv_peer_stats_extd *extd;
+ unsigned long rx_duration_high;
+
+ extd = data + sizeof(*src) * (num_peer_stats - i - 1)
+ + sizeof(*extd) * i;
+
+ dst->rx_duration = __le32_to_cpu(extd->rx_duration);
+ rx_duration_high = __le32_to_cpu
+ (extd->rx_duration_high);
+
+ if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
+ &rx_duration_high)) {
+ rx_duration_high =
+ FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
+ rx_duration_high);
+ dst->rx_duration |= (u64)rx_duration_high <<
+ WMI_TLV_PEER_RX_DURATION_SHIFT;
+ }
+ }
+
list_add_tail(&dst->list, &stats->peers);
}
@@ -1514,21 +1610,55 @@ ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
+ param_id, param_value);
return skb;
}
+static void
+ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
+{
+ struct host_memory_chunk *chunk;
+ struct wmi_tlv *tlv;
+ int i;
+ __le16 tlv_len, tlv_tag;
+
+ tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
+ tlv_len = __cpu_to_le16(sizeof(*chunk));
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ tlv = host_mem_chunks;
+ tlv->tag = tlv_tag;
+ tlv->len = tlv_len;
+ chunk = (void *)tlv->value;
+
+ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
+ i,
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr,
+ ar->wmi.mem_chunks[i].req_id);
+
+ host_mem_chunks += sizeof(*tlv);
+ host_mem_chunks += sizeof(*chunk);
+ }
+}
+
static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
{
struct sk_buff *skb;
struct wmi_tlv *tlv;
struct wmi_tlv_init_cmd *cmd;
struct wmi_tlv_resource_config *cfg;
- struct wmi_host_mem_chunks *chunks;
+ void *chunks;
size_t len, chunks_len;
void *ptr;
- chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
+ chunks_len = ar->wmi.num_mem_chunks *
+ (sizeof(struct host_memory_chunk) + sizeof(*tlv));
len = (sizeof(*tlv) + sizeof(*cmd)) +
(sizeof(*tlv) + sizeof(*cfg)) +
(sizeof(*tlv) + chunks_len);
@@ -1611,7 +1741,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
cfg->vow_config = __cpu_to_le32(0);
cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
- cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
+ cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
cfg->max_frag_entries = __cpu_to_le32(2);
cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
@@ -1626,9 +1756,12 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_ocb_vdevs = __cpu_to_le32(0);
cfg->num_ocb_channels = __cpu_to_le32(0);
cfg->num_ocb_schedules = __cpu_to_le32(0);
- cfg->host_capab = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
- ath10k_wmi_put_host_mem_chunks(ar, chunks);
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
+
+ ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
return skb;
@@ -1984,7 +2117,8 @@ ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
+ vdev_id, param_id, param_value);
return skb;
}
@@ -1998,9 +2132,11 @@ ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
size_t len;
void *ptr;
- if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
+ arg->key_data)
return ERR_PTR(-EINVAL);
- if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
+ !arg->key_data)
return ERR_PTR(-EINVAL);
len = sizeof(*tlv) + sizeof(*cmd) +
@@ -2298,7 +2434,9 @@ ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
cmd->param_value = __cpu_to_le32(param_value);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
+ vdev_id, peer_addr, param_id, param_value);
return skb;
}
@@ -2692,7 +2830,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
arvif = (void *)cb->vif->drv_priv;
vdev_id = arvif->vdev_id;
- if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
+ (!(ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)))))
return ERR_PTR(-EINVAL);
len = sizeof(*cmd) + 2 * sizeof(*tlv);
@@ -2700,10 +2840,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control)) {
- len += IEEE80211_CCMP_MIC_LEN;
+ ieee80211_has_protected(hdr->frame_control))
buf_len += IEEE80211_CCMP_MIC_LEN;
- }
buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
buf_len = round_up(buf_len, 4);
@@ -3259,6 +3397,8 @@ ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
cmd = (void *)tlv->value;
cmd->enable = __cpu_to_le32(1);
+ if (!ar->bus_param.link_can_suspend)
+ cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
return skb;
@@ -4093,6 +4233,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
.pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
+ .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index e07e9907e355..65e6aa520b06 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
@@ -25,6 +14,8 @@
#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64
+#define WMI_RSRC_CFG_FLAG_TX_ACK_RSSI BIT(18)
+
enum wmi_tlv_grp_id {
WMI_TLV_GRP_START = 0x3,
WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
@@ -321,6 +312,7 @@ enum wmi_tlv_event_id {
WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
+ WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
WMI_TLV_BA_RSP_SSN_EVENTID,
@@ -1394,6 +1386,25 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_AP_TWT = 153,
WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
WMI_TLV_MAX_EXT_SERVICE = 256,
};
@@ -1567,6 +1578,8 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
WMI_SERVICE_THERM_THROT,
WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
+ WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE);
}
#undef SVCMAP
@@ -1592,10 +1605,14 @@ struct chan_info_params {
u32 mac_clk_mhz;
};
+#define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9)
+
struct wmi_tlv_mgmt_tx_compl_ev {
__le32 desc_id;
__le32 status;
__le32 pdev_id;
+ __le32 ppdu_id;
+ __le32 ack_rssi;
};
#define WMI_TLV_MGMT_RX_NUM_RSSI 4
@@ -1872,6 +1889,22 @@ struct wmi_tlv_req_stats_cmd {
struct wmi_mac_addr peer_macaddr;
} __packed;
+#define WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT 31
+#define WMI_TLV_PEER_RX_DURATION_HIGH_MASK GENMASK(30, 0)
+#define WMI_TLV_PEER_RX_DURATION_SHIFT 32
+
+struct wmi_tlv_peer_stats_extd {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 rx_duration;
+ __le32 peer_tx_bytes;
+ __le32 peer_rx_bytes;
+ __le32 last_tx_rate_code;
+ __le32 last_tx_power;
+ __le32 rx_mc_bc_cnt;
+ __le32 rx_duration_high;
+ __le32 reserved[2];
+} __packed;
+
struct wmi_tlv_vdev_stats {
__le32 vdev_id;
__le32 beacon_snr;
@@ -1965,6 +1998,10 @@ struct wmi_tlv_stats_ev {
__le32 num_peer_stats;
__le32 num_bcnflt_stats;
__le32 num_chan_stats;
+ __le32 num_mib_stats;
+ __le32 pdev_id;
+ __le32 num_bcn_stats;
+ __le32 num_peer_stats_extd;
} __packed;
struct wmi_tlv_p2p_noa_ev {
@@ -1998,8 +2035,15 @@ struct wmi_tlv_set_quiet_cmd {
__le32 enabled;
} __packed;
+enum wmi_tlv_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
struct wmi_tlv_wow_enable_cmd {
__le32 enable;
+ __le32 pause_iface_config;
+ __le32 flags;
} __packed;
struct wmi_tlv_wow_host_wakeup_ind {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ba837403e266..98a90e49d666 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/skbuff.h>
@@ -827,6 +816,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -903,6 +893,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -978,6 +969,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1056,6 +1048,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
.dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
.disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
+ .rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1606,6 +1599,30 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
.enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
};
+static const u8 wmi_key_cipher_suites[] = {
+ [WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
+ [WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
+ [WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
+ [WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
+ [WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
+ [WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
+ [WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
+ [WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
+ [WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
+};
+
+static const u8 wmi_tlv_key_cipher_suites[] = {
+ [WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
+ [WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
+ [WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
+ [WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
+ [WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
+ [WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
+ [WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
+ [WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
+ [WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
+};
+
static const struct wmi_peer_flags_map wmi_peer_flags_map = {
.auth = WMI_PEER_AUTH,
.qos = WMI_PEER_QOS,
@@ -2325,8 +2342,8 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
return true;
}
-static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
- u32 status)
+static int
+wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
{
struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
struct ath10k_wmi *wmi = &ar->wmi;
@@ -2336,30 +2353,34 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
spin_lock_bh(&ar->data_lock);
- pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
+ pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
if (!pkt_addr) {
ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
- desc_id);
+ param->desc_id);
ret = -ENOENT;
goto out;
}
msdu = pkt_addr->vaddr;
dma_unmap_single(ar->dev, pkt_addr->paddr,
- msdu->len, DMA_FROM_DEVICE);
+ msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
- if (status)
+ if (param->status) {
info->flags &= ~IEEE80211_TX_STAT_ACK;
- else
+ } else {
info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ param->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ }
ieee80211_tx_status_irqsafe(ar->hw, msdu);
ret = 0;
out:
- idr_remove(&wmi->mgmt_pending_tx, desc_id);
+ idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
spin_unlock_bh(&ar->data_lock);
return ret;
}
@@ -2367,6 +2388,7 @@ out:
int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+ struct mgmt_tx_compl_params param;
int ret;
ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
@@ -2375,14 +2397,50 @@ int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
return ret;
}
- wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
- __le32_to_cpu(arg.status));
+ memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
+ param.desc_id = __le32_to_cpu(arg.desc_id);
+ param.status = __le32_to_cpu(arg.status);
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
+
+ wmi_process_mgmt_tx_comp(ar, &param);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
return 0;
}
+int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
+ struct mgmt_tx_compl_params param;
+ u32 num_reports;
+ int i, ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
+ return ret;
+ }
+
+ num_reports = __le32_to_cpu(arg.num_reports);
+
+ for (i = 0; i < num_reports; i++) {
+ memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
+ param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
+ param.status = __le32_to_cpu(arg.desc_ids[i]);
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
+ wmi_process_mgmt_tx_comp(ar, &param);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
+
+ return 0;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -5193,7 +5251,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
void *vaddr;
pool_size = num_units * round_up(unit_len, 4);
- vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+ vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
@@ -6240,6 +6298,25 @@ int ath10k_wmi_connect(struct ath10k *ar)
}
static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct wmi_pdev_set_base_macaddr_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
+ ether_addr_copy(cmd->mac_addr.addr, macaddr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev basemac %pM\n", macaddr);
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg)
@@ -8245,7 +8322,7 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
"Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate", peer->peer_rx_rate);
- len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
"Peer RX duration", peer->rx_duration);
len += scnprintf(buf + len, buf_len - len, "\n");
@@ -9048,6 +9125,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.gen_peer_create = ath10k_wmi_op_gen_peer_create,
.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
@@ -9166,6 +9244,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
.gen_init = ath10k_wmi_10_4_op_gen_init,
@@ -9229,6 +9308,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
ar->wmi.cmd = &wmi_10_2_4_cmd_map;
@@ -9236,6 +9316,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
case ATH10K_FW_WMI_OP_VERSION_10_2:
ar->wmi.cmd = &wmi_10_2_cmd_map;
@@ -9243,6 +9324,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
ar->wmi.cmd = &wmi_10x_cmd_map;
@@ -9250,6 +9332,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->wmi.cmd = &wmi_cmd_map;
@@ -9257,9 +9340,11 @@ int ath10k_wmi_attach(struct ath10k *ar)
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
ar->wmi.peer_flags = &wmi_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ath10k_wmi_tlv_attach(ar);
+ ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2034ccc7cc72..e1c40bb69932 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -1,26 +1,15 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _WMI_H_
#define _WMI_H_
#include <linux/types.h>
-#include <net/mac80211.h>
+#include <linux/ieee80211.h>
/*
* This file specifies the WMI interface for the Unified Software
@@ -208,6 +197,11 @@ enum wmi_service {
WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT,
WMI_SERVICE_THERM_THROT,
+ WMI_SERVICE_RTT_RESPONDER_ROLE,
+ WMI_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_SERVICE_REPORT_AIRTIME,
+
+ /* Remember to add the new value to wmi_service_name()! */
/* keep last */
WMI_SERVICE_MAX,
@@ -368,9 +362,14 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_HTT_ASSERT_TRIGGER_SUPPORT,
WMI_10_4_SERVICE_VDEV_FILTER_NEIGHBOR_RX_PACKETS,
WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ WMI_10_4_SERVICE_PEER_CHWIDTH_CHANGE,
+ WMI_10_4_SERVICE_RX_FILTER_OUT_COUNT,
+ WMI_10_4_SERVICE_RTT_RESPONDER_ROLE,
+ WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ WMI_10_4_SERVICE_REPORT_AIRTIME,
};
-static inline char *wmi_service_name(int service_id)
+static inline char *wmi_service_name(enum wmi_service service_id)
{
#define SVCSTR(x) case x: return #x
@@ -467,6 +466,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
SVCSTR(WMI_SERVICE_VDEV_RX_FILTER);
+ SVCSTR(WMI_SERVICE_BTCOEX);
SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION);
SVCSTR(WMI_SERVICE_DBGLOG_WARN2);
SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE);
@@ -476,18 +476,29 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+ SVCSTR(WMI_SERVICE_MGMT_TX_WMI);
SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS);
SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
SVCSTR(WMI_SERVICE_RESET_CHIP);
+ SVCSTR(WMI_SERVICE_SPOOF_MAC_SUPPORT);
SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI);
SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT);
- default:
+ SVCSTR(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT);
+ SVCSTR(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT);
+ SVCSTR(WMI_SERVICE_THERM_THROT);
+ SVCSTR(WMI_SERVICE_RTT_RESPONDER_ROLE);
+ SVCSTR(WMI_SERVICE_PER_PACKET_SW_ENCRYPT);
+ SVCSTR(WMI_SERVICE_REPORT_AIRTIME);
+
+ case WMI_SERVICE_MAX:
return NULL;
}
#undef SVCSTR
+
+ return NULL;
}
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
@@ -579,6 +590,8 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
SVCMAP(WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT,
WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -799,6 +812,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_RTT_RESPONDER_ROLE,
+ WMI_SERVICE_RTT_RESPONDER_ROLE, len);
+ SVCMAP(WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len);
+ SVCMAP(WMI_10_4_SERVICE_REPORT_AIRTIME,
+ WMI_SERVICE_REPORT_AIRTIME, len);
}
#undef SVCMAP
@@ -1986,7 +2005,7 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
/* no default handler to allow compiler to check that the
* enum is fully handled
*/
- };
+ }
return "<unknown>";
}
@@ -2075,6 +2094,8 @@ enum wmi_channel_change_cause {
#define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */
#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000 /* LDPC RX support */
+#define WMI_HT_CAP_TX_LDPC 0x2000 /* LDPC TX support */
#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
WMI_HT_CAP_HT20_SGI | \
@@ -2972,6 +2993,8 @@ enum wmi_10_4_feature_mask {
WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12),
WMI_10_4_TX_DATA_ACK_RSSI = BIT(16),
+ WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT = BIT(17),
+ WMI_10_4_REPORT_AIRTIME = BIT(18),
};
@@ -4083,6 +4106,10 @@ struct wmi_pdev_set_param_cmd {
__le32 param_value;
} __packed;
+struct wmi_pdev_set_base_macaddr_cmd {
+ struct wmi_mac_addr mac_addr;
+} __packed;
+
/* valid period is 1 ~ 60000ms, unit in millisecond */
#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
@@ -4507,6 +4534,13 @@ enum wmi_10_4_stats_id {
WMI_10_4_STAT_VDEV_EXTD = BIT(4),
};
+enum wmi_tlv_stats_id {
+ WMI_TLV_STAT_PDEV = BIT(0),
+ WMI_TLV_STAT_VDEV = BIT(1),
+ WMI_TLV_STAT_PEER = BIT(2),
+ WMI_TLV_STAT_PEER_EXTD = BIT(10),
+};
+
struct wlan_inst_rssi_args {
__le16 cfg_retry_count;
__le16 retry_count;
@@ -4929,15 +4963,30 @@ struct wmi_key_seq_counter {
__le32 key_seq_counter_h;
} __packed;
-#define WMI_CIPHER_NONE 0x0 /* clear key */
-#define WMI_CIPHER_WEP 0x1
-#define WMI_CIPHER_TKIP 0x2
-#define WMI_CIPHER_AES_OCB 0x3
-#define WMI_CIPHER_AES_CCM 0x4
-#define WMI_CIPHER_WAPI 0x5
-#define WMI_CIPHER_CKIP 0x6
-#define WMI_CIPHER_AES_CMAC 0x7
-#define WMI_CIPHER_AES_GCM 0x8
+enum wmi_cipher_suites {
+ WMI_CIPHER_NONE,
+ WMI_CIPHER_WEP,
+ WMI_CIPHER_TKIP,
+ WMI_CIPHER_AES_OCB,
+ WMI_CIPHER_AES_CCM,
+ WMI_CIPHER_WAPI,
+ WMI_CIPHER_CKIP,
+ WMI_CIPHER_AES_CMAC,
+ WMI_CIPHER_AES_GCM,
+};
+
+enum wmi_tlv_cipher_suites {
+ WMI_TLV_CIPHER_NONE,
+ WMI_TLV_CIPHER_WEP,
+ WMI_TLV_CIPHER_TKIP,
+ WMI_TLV_CIPHER_AES_OCB,
+ WMI_TLV_CIPHER_AES_CCM,
+ WMI_TLV_CIPHER_WAPI,
+ WMI_TLV_CIPHER_CKIP,
+ WMI_TLV_CIPHER_AES_CMAC,
+ WMI_TLV_CIPHER_ANY,
+ WMI_TLV_CIPHER_AES_GCM,
+};
struct wmi_vdev_install_key_cmd {
__le32 vdev_id;
@@ -5003,12 +5052,13 @@ enum wmi_rate_preamble {
#define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1)
#define ATH10K_VHT_MCS_NUM 10
-#define ATH10K_BW_NUM 4
+#define ATH10K_BW_NUM 6
#define ATH10K_NSS_NUM 4
#define ATH10K_LEGACY_NUM 12
#define ATH10K_GI_NUM 2
#define ATH10K_HT_MCS_NUM 32
#define ATH10K_RATE_TABLE_NUM 320
+#define ATH10K_RATE_INFO_FLAGS_SGI_BIT 2
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@@ -5083,6 +5133,7 @@ struct wmi_vdev_param_map {
u32 inc_tsf;
u32 dec_tsf;
u32 disable_4addr_src_lrn;
+ u32 rtt_responder_role;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -6682,10 +6733,27 @@ struct wmi_scan_ev_arg {
__le32 vdev_id;
};
+struct mgmt_tx_compl_params {
+ u32 desc_id;
+ u32 status;
+ u32 ppdu_id;
+ int ack_rssi;
+};
+
struct wmi_tlv_mgmt_tx_compl_ev_arg {
__le32 desc_id;
__le32 status;
__le32 pdev_id;
+ __le32 ppdu_id;
+ __le32 ack_rssi;
+};
+
+struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg {
+ __le32 num_reports;
+ const __le32 *desc_ids;
+ const __le32 *status;
+ const __le32 *ppdu_ids;
+ const __le32 *ack_rssi;
};
struct wmi_mgmt_rx_ev_arg {
@@ -7244,6 +7312,7 @@ int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 36d4245c308e..8c26adddd034 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mac.h"
@@ -77,7 +66,7 @@ static int ath10k_wow_cleanup(struct ath10k *ar)
return 0;
}
-/**
+/*
* Convert a 802.3 format to a 802.11 format.
* +------------+-----------+--------+----------------+
* 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
@@ -88,9 +77,8 @@ static int ath10k_wow_cleanup(struct ath10k *ar)
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
-static void ath10k_wow_convert_8023_to_80211
- (struct cfg80211_pkt_pattern *new,
- const struct cfg80211_pkt_pattern *old)
+static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
index 6e810105b775..14ea4e1e925e 100644
--- a/drivers/net/wireless/ath/ath10k/wow.h
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _WOW_H_
#define _WOW_H_
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 54132af70094..aa1c71a76ef7 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1140,7 +1140,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
len -= ie_len;
data += ie_len;
- };
+ }
ret = 0;
out:
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 9d7ac1ab2d02..68854c45d0a4 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -776,10 +776,8 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
- ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
NO_SYNC_WMIFLAG);
-
- return 0;
}
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index f019a20e5a1f..2b29bf4730f6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3457,9 +3457,9 @@ static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size,
if (!((pBase->txrxMask >> i) & 1))
continue;
- len += snprintf(buf + len, size - len, "Chain %d\n", i);
+ len += scnprintf(buf + len, size - len, "Chain %d\n", i);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"Freq\t ref\tvolt\ttemp\tnf_cal\tnf_pow\trx_temp\n");
for (j = 0; j < cal_pier_nr; j++) {
@@ -3471,10 +3471,10 @@ static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size,
freq = 4800 + eep->calFreqPier5G[j] * 5;
}
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"%d\t", freq);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"%d\t%d\t%d\t%d\t%d\t%d\n",
cal_pier->refPower,
cal_pier->voltMeas,
@@ -3505,12 +3505,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
len += scnprintf(buf + len, size - len, "Calibration data\n");
len = ar9003_dump_cal_data(ah, buf, len, size, true);
- len += snprintf(buf + len, size - len,
+ len += scnprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
- len += snprintf(buf + len, size - len, "Calibration data\n");
+ len += scnprintf(buf + len, size - len, "Calibration data\n");
len = ar9003_dump_cal_data(ah, buf, len, size, false);
goto out;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 0fca44e91a71..a412b352182c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -112,8 +112,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01
-#define ATH_AIRTIME_QUANTUM 300 /* usec */
-
/* Stop tx traffic 1ms before the GO goes away */
#define ATH_P2P_PS_STOP_TIME 1000
@@ -246,10 +244,8 @@ struct ath_atx_tid {
s8 bar_index;
bool active;
bool clear_ps_filter;
- bool has_queued;
};
-void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
struct ath_node {
@@ -263,12 +259,9 @@ struct ath_node {
bool sleeping;
bool no_ps_filter;
- s64 airtime_deficit[IEEE80211_NUM_ACS];
- u32 airtime_rx_start;
#ifdef CONFIG_ATH9K_STATION_STATISTICS
struct ath_rx_rate_stats rx_rate_stats;
- struct ath_airtime_stats airtime_stats;
#endif
u8 key_idx[4];
@@ -986,11 +979,6 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH9K_NUM_CHANCTX 2 /* supports 2 operating channels */
-#define AIRTIME_USE_TX BIT(0)
-#define AIRTIME_USE_RX BIT(1)
-#define AIRTIME_USE_NEW_QUEUES BIT(2)
-#define AIRTIME_ACTIVE(flags) (!!(flags & (AIRTIME_USE_TX|AIRTIME_USE_RX)))
-
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
@@ -1034,8 +1022,6 @@ struct ath_softc {
short nbcnvifs;
unsigned long ps_usecount;
- u16 airtime_flags; /* AIRTIME_* */
-
struct ath_rx rx;
struct ath_tx tx;
struct ath_beacon beacon;
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 6aa3ec024ffa..21191955a7c1 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -1039,6 +1039,9 @@ static struct dentry *create_buf_file_handler(const char *filename,
buf_file = debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
+ if (IS_ERR(buf_file))
+ return NULL;
+
*is_global = 1;
return buf_file;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 4399e9ad058f..26ea51a72156 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -148,7 +148,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
{ "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
{ "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
{ "SPUR UP", ah->stats.ast_ani_spurup },
- { "SPUR DOWN", ah->stats.ast_ani_spurup },
+ { "SPUR DOWN", ah->stats.ast_ani_spurdown },
{ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
{ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
{ "MRC-CCK ON", ah->stats.ast_ani_ccklow },
@@ -1443,9 +1443,6 @@ int ath9k_init_debug(struct ath_hw *ah)
#endif
debugfs_create_file("tpc", 0600, sc->debug.debugfs_phy, sc, &fops_tpc);
- debugfs_create_u16("airtime_flags", 0600,
- sc->debug.debugfs_phy, &sc->airtime_flags);
-
debugfs_create_file("nf_override", 0600,
sc->debug.debugfs_phy, sc, &fops_nf_override);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 79607db14387..33826aa13687 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -319,20 +319,12 @@ ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
void ath_debug_rate_stats(struct ath_softc *sc,
struct ath_rx_status *rs,
struct sk_buff *skb);
-void ath_debug_airtime(struct ath_softc *sc,
- struct ath_node *an,
- u32 rx, u32 tx);
#else
static inline void ath_debug_rate_stats(struct ath_softc *sc,
struct ath_rx_status *rs,
struct sk_buff *skb)
{
}
-static inline void ath_debug_airtime(struct ath_softc *sc,
- struct ath_node *an,
- u32 rx, u32 tx)
-{
-}
#endif /* CONFIG_ATH9K_STATION_STATISTICS */
#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index e8fcd3e1c470..d95cabddce33 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -242,75 +242,6 @@ static const struct file_operations fops_node_recv = {
.llseek = default_llseek,
};
-void ath_debug_airtime(struct ath_softc *sc,
- struct ath_node *an,
- u32 rx,
- u32 tx)
-{
- struct ath_airtime_stats *astats = &an->airtime_stats;
-
- astats->rx_airtime += rx;
- astats->tx_airtime += tx;
-}
-
-static ssize_t read_airtime(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_node *an = file->private_data;
- struct ath_airtime_stats *astats;
- static const char *qname[4] = {
- "VO", "VI", "BE", "BK"
- };
- u32 len = 0, size = 256;
- char *buf;
- size_t retval;
- int i;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- astats = &an->airtime_stats;
-
- len += scnprintf(buf + len, size - len, "RX: %u us\n", astats->rx_airtime);
- len += scnprintf(buf + len, size - len, "TX: %u us\n", astats->tx_airtime);
- len += scnprintf(buf + len, size - len, "Deficit: ");
- for (i = 0; i < 4; i++)
- len += scnprintf(buf+len, size - len, "%s: %lld us ", qname[i], an->airtime_deficit[i]);
- if (len < size)
- buf[len++] = '\n';
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static ssize_t
-write_airtime_reset_stub(struct file *file, const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath_node *an = file->private_data;
- struct ath_airtime_stats *astats;
- int i;
-
- astats = &an->airtime_stats;
- astats->rx_airtime = 0;
- astats->tx_airtime = 0;
- for (i = 0; i < 4; i++)
- an->airtime_deficit[i] = ATH_AIRTIME_QUANTUM;
- return count;
-}
-
-static const struct file_operations fops_airtime = {
- .read = read_airtime,
- .write = write_airtime_reset_stub,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-
void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -320,5 +251,4 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr);
debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv);
- debugfs_create_file("airtime", 0644, dir, an, &fops_airtime);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 799010ed04e0..4e8e80ac8341 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -574,12 +574,12 @@ void ath9k_tx_failed_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
- spin_lock_bh(&priv->tx.tx_lock);
+ spin_lock(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
- spin_unlock_bh(&priv->tx.tx_lock);
+ spin_unlock(&priv->tx.tx_lock);
return;
}
- spin_unlock_bh(&priv->tx.tx_lock);
+ spin_unlock(&priv->tx.tx_lock);
ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c070a9e51ebf..98141b699c88 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
ret = ath9k_eeprom_request(sc, eeprom_name);
if (ret)
return ret;
+
+ ah->ah_flags &= ~AH_USE_EEPROM;
+ ah->ah_flags |= AH_NO_EEP_SWAP;
}
mac = of_get_mac_address(np);
if (mac)
ether_addr_copy(common->macaddr, mac);
- ah->ah_flags &= ~AH_USE_EEPROM;
- ah->ah_flags |= AH_NO_EEP_SWAP;
-
return 0;
}
@@ -676,8 +676,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
/* Will be cleared in ath9k_start() */
set_bit(ATH_OP_INVALID, &common->op_flags);
- sc->airtime_flags = (AIRTIME_USE_TX | AIRTIME_USE_RX |
- AIRTIME_USE_NEW_QUEUES);
sc->sc_ah = ah;
sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
@@ -1013,6 +1011,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
}
int ath9k_init_device(u16 devid, struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 30d1bd832d90..4e97f7f3b2a3 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1006,9 +1006,6 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
struct ath_rx_status *rs,
struct sk_buff *skb)
{
- struct ath_node *an;
- struct ath_acq *acq;
- struct ath_vif *avp;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1019,7 +1016,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
int phy;
u16 len = rs->rs_datalen;
u32 airtime = 0;
- u8 tidno, acno;
+ u8 tidno;
if (!ieee80211_is_data(hdr->frame_control))
return;
@@ -1029,11 +1026,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
if (!sta)
goto exit;
- an = (struct ath_node *) sta->drv_priv;
- avp = (struct ath_vif *) an->vif->drv_priv;
tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- acno = TID_TO_WME_AC(tidno);
- acq = &avp->chanctx->acq[acno];
rxs = IEEE80211_SKB_RXCB(skb);
@@ -1054,14 +1047,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
len, rxs->rate_idx, is_sp);
}
- if (!!(sc->airtime_flags & AIRTIME_USE_RX)) {
- spin_lock_bh(&acq->lock);
- an->airtime_deficit[acno] -= airtime;
- if (an->airtime_deficit[acno] <= 0)
- __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno));
- spin_unlock_bh(&acq->lock);
- }
- ath_debug_airtime(sc, an, airtime, 0);
+ ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
exit:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f448d5716639..773d428ff1b0 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -113,44 +113,14 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_status(hw, skb);
}
-void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
-{
- struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
- struct ath_chanctx *ctx = avp->chanctx;
- struct ath_acq *acq;
- struct list_head *tid_list;
- u8 acno = TID_TO_WME_AC(tid->tidno);
-
- if (!ctx || !list_empty(&tid->list))
- return;
-
-
- acq = &ctx->acq[acno];
- if ((sc->airtime_flags & AIRTIME_USE_NEW_QUEUES) &&
- tid->an->airtime_deficit[acno] > 0)
- tid_list = &acq->acq_new;
- else
- tid_list = &acq->acq_old;
-
- list_add_tail(&tid->list, tid_list);
-}
-
void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
- struct ath_chanctx *ctx = avp->chanctx;
- struct ath_acq *acq;
+ struct ieee80211_txq *queue =
+ container_of((void *)tid, struct ieee80211_txq, drv_priv);
- if (!ctx || !list_empty(&tid->list))
- return;
-
- acq = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
- spin_lock_bh(&acq->lock);
- __ath_tx_queue_tid(sc, tid);
- spin_unlock_bh(&acq->lock);
+ ieee80211_schedule_txq(sc->hw, queue);
}
-
void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
{
struct ath_softc *sc = hw->priv;
@@ -163,11 +133,7 @@ void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
tid->tidno);
ath_txq_lock(sc, txq);
-
- tid->has_queued = true;
- ath_tx_queue_tid(sc, tid);
ath_txq_schedule(sc, txq);
-
ath_txq_unlock(sc, txq);
}
@@ -217,8 +183,8 @@ ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
return ATH_AN_2_TID(an, tidno);
}
-static struct sk_buff *
-ath_tid_pull(struct ath_atx_tid *tid)
+static int
+ath_tid_pull(struct ath_atx_tid *tid, struct sk_buff **skbuf)
{
struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv);
struct ath_softc *sc = tid->an->sc;
@@ -229,20 +195,16 @@ ath_tid_pull(struct ath_atx_tid *tid)
};
struct sk_buff *skb;
struct ath_frame_info *fi;
- int q;
-
- if (!tid->has_queued)
- return NULL;
+ int q, ret;
skb = ieee80211_tx_dequeue(hw, txq);
- if (!skb) {
- tid->has_queued = false;
- return NULL;
- }
+ if (!skb)
+ return -ENOENT;
- if (ath_tx_prepare(hw, skb, &txctl)) {
+ ret = ath_tx_prepare(hw, skb, &txctl);
+ if (ret) {
ieee80211_free_txskb(hw, skb);
- return NULL;
+ return ret;
}
q = skb_get_queue_mapping(skb);
@@ -252,24 +214,19 @@ ath_tid_pull(struct ath_atx_tid *tid)
++tid->txq->pending_frames;
}
- return skb;
-}
-
-
-static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
-{
- return !skb_queue_empty(&tid->retry_q) || tid->has_queued;
+ *skbuf = skb;
+ return 0;
}
-static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
+static int ath_tid_dequeue(struct ath_atx_tid *tid,
+ struct sk_buff **skb)
{
- struct sk_buff *skb;
+ int ret = 0;
+ *skb = __skb_dequeue(&tid->retry_q);
+ if (!*skb)
+ ret = ath_tid_pull(tid, skb);
- skb = __skb_dequeue(&tid->retry_q);
- if (!skb)
- skb = ath_tid_pull(tid);
-
- return skb;
+ return ret;
}
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
@@ -365,11 +322,12 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
+ int ret;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((skb = ath_tid_dequeue(tid))) {
+ while ((ret = ath_tid_dequeue(tid, &skb)) == 0) {
fi = get_frame_info(skb);
bf = fi->bf;
@@ -681,7 +639,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
if (!an->sleeping) {
ath_tx_queue_tid(sc, tid);
-
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
}
@@ -708,11 +665,11 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}
-static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an,
- struct ath_atx_tid *tid, struct ath_buf *bf,
+static void ath_tx_count_airtime(struct ath_softc *sc,
+ struct ieee80211_sta *sta,
+ struct ath_buf *bf,
struct ath_tx_status *ts)
{
- struct ath_txq *txq = tid->txq;
u32 airtime = 0;
int i;
@@ -722,17 +679,7 @@ static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an,
airtime += rate_dur * bf->rates[i].count;
}
- if (sc->airtime_flags & AIRTIME_USE_TX) {
- int q = txq->mac80211_qnum;
- struct ath_acq *acq = &sc->cur_chan->acq[q];
-
- spin_lock_bh(&acq->lock);
- an->airtime_deficit[q] -= airtime;
- if (an->airtime_deficit[q] <= 0)
- __ath_tx_queue_tid(sc, tid);
- spin_unlock_bh(&acq->lock);
- }
- ath_debug_airtime(sc, an, 0, airtime);
+ ieee80211_sta_register_airtime(sta, ts->tid, airtime, 0);
}
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
@@ -762,7 +709,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (sta) {
struct ath_node *an = (struct ath_node *)sta->drv_priv;
tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
- ath_tx_count_airtime(sc, an, tid, bf, ts);
+ ath_tx_count_airtime(sc, sta, bf, ts);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
}
@@ -947,20 +894,21 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
return ndelim;
}
-static struct ath_buf *
+static int
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+ struct ath_atx_tid *tid, struct ath_buf **buf)
{
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
- struct sk_buff *skb, *first_skb = NULL;
struct ath_buf *bf;
+ struct sk_buff *skb, *first_skb = NULL;
u16 seqno;
+ int ret;
while (1) {
- skb = ath_tid_dequeue(tid);
- if (!skb)
- break;
+ ret = ath_tid_dequeue(tid, &skb);
+ if (ret < 0)
+ return ret;
fi = get_frame_info(skb);
bf = fi->bf;
@@ -992,7 +940,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
bf->bf_state.bf_type = 0;
- return bf;
+ break;
}
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
@@ -1011,7 +959,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
first_skb = skb;
continue;
}
- break;
+ return -EINPROGRESS;
}
if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
@@ -1028,10 +976,11 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
if (bf_isampdu(bf))
ath_tx_addto_baw(sc, tid, bf);
- return bf;
+ break;
}
- return NULL;
+ *buf = bf;
+ return 0;
}
static int
@@ -1041,7 +990,7 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
struct ath_buf *bf = bf_first, *bf_prev = NULL;
- int nframes = 0, ndelim;
+ int nframes = 0, ndelim, ret;
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
struct ieee80211_tx_info *tx_info;
@@ -1093,7 +1042,9 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf_prev = bf;
- bf = ath_tx_get_tid_subframe(sc, txq, tid);
+ ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
+ if (ret < 0)
+ break;
}
goto finish;
stop:
@@ -1490,7 +1441,7 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf_first)
{
struct ath_buf *bf = bf_first, *bf_prev = NULL;
- int nframes = 0;
+ int nframes = 0, ret;
do {
struct ieee80211_tx_info *tx_info;
@@ -1504,8 +1455,8 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
if (nframes >= 2)
break;
- bf = ath_tx_get_tid_subframe(sc, txq, tid);
- if (!bf)
+ ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
+ if (ret < 0)
break;
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
@@ -1518,30 +1469,27 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
} while (1);
}
-static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+static int ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid)
{
- struct ath_buf *bf;
+ struct ath_buf *bf = NULL;
struct ieee80211_tx_info *tx_info;
struct list_head bf_q;
- int aggr_len = 0;
+ int aggr_len = 0, ret;
bool aggr;
- if (!ath_tid_has_buffered(tid))
- return false;
-
INIT_LIST_HEAD(&bf_q);
- bf = ath_tx_get_tid_subframe(sc, txq, tid);
- if (!bf)
- return false;
+ ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
+ if (ret < 0)
+ return ret;
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
(!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
__skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
- return false;
+ return -EBUSY;
}
ath_set_rates(tid->an->vif, tid->an->sta, bf);
@@ -1551,7 +1499,7 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
if (list_empty(&bf_q))
- return false;
+ return -EAGAIN;
if (tid->clear_ps_filter || tid->an->no_ps_filter) {
tid->clear_ps_filter = false;
@@ -1560,7 +1508,7 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_fill_desc(sc, bf, txq, aggr_len);
ath_tx_txqaddbuf(sc, txq, &bf_q, false);
- return true;
+ return 0;
}
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1623,28 +1571,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_tid *tid;
- struct ath_txq *txq;
int tidno;
ath_dbg(common, XMIT, "%s called\n", __func__);
for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
tid = ath_node_to_tid(an, tidno);
- txq = tid->txq;
-
- ath_txq_lock(sc, txq);
-
- if (list_empty(&tid->list)) {
- ath_txq_unlock(sc, txq);
- continue;
- }
if (!skb_queue_empty(&tid->retry_q))
ieee80211_sta_set_buffered(sta, tid->tidno, true);
- list_del_init(&tid->list);
-
- ath_txq_unlock(sc, txq);
}
}
@@ -1663,11 +1599,12 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_lock(sc, txq);
tid->clear_ps_filter = true;
- if (ath_tid_has_buffered(tid)) {
+ if (!skb_queue_empty(&tid->retry_q)) {
ath_tx_queue_tid(sc, tid);
ath_txq_schedule(sc, txq);
}
ath_txq_unlock_complete(sc, txq);
+
}
}
@@ -1698,9 +1635,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ath_txq *txq = sc->tx.uapsdq;
struct ieee80211_tx_info *info;
struct list_head bf_q;
- struct ath_buf *bf_tail = NULL, *bf;
+ struct ath_buf *bf_tail = NULL, *bf = NULL;
int sent = 0;
- int i;
+ int i, ret;
INIT_LIST_HEAD(&bf_q);
for (i = 0; tids && nframes; i++, tids >>= 1) {
@@ -1713,8 +1650,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
ath_txq_lock(sc, tid->txq);
while (nframes > 0) {
- bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
- if (!bf)
+ ret = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq,
+ tid, &bf);
+ if (ret < 0)
break;
ath9k_set_moredata(sc, bf, true);
@@ -1980,11 +1918,11 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
+ struct ieee80211_hw *hw = sc->hw;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ieee80211_txq *queue;
struct ath_atx_tid *tid;
- struct list_head *tid_list;
- struct ath_acq *acq;
- bool active = AIRTIME_ACTIVE(sc->airtime_flags);
+ int ret;
if (txq->mac80211_qnum < 0)
return;
@@ -1992,58 +1930,26 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return;
+ ieee80211_txq_schedule_start(hw, txq->mac80211_qnum);
spin_lock_bh(&sc->chan_lock);
rcu_read_lock();
- acq = &sc->cur_chan->acq[txq->mac80211_qnum];
if (sc->cur_chan->stopped)
goto out;
-begin:
- tid_list = &acq->acq_new;
- if (list_empty(tid_list)) {
- tid_list = &acq->acq_old;
- if (list_empty(tid_list))
- goto out;
- }
- tid = list_first_entry(tid_list, struct ath_atx_tid, list);
-
- if (active && tid->an->airtime_deficit[txq->mac80211_qnum] <= 0) {
- spin_lock_bh(&acq->lock);
- tid->an->airtime_deficit[txq->mac80211_qnum] += ATH_AIRTIME_QUANTUM;
- list_move_tail(&tid->list, &acq->acq_old);
- spin_unlock_bh(&acq->lock);
- goto begin;
- }
-
- if (!ath_tid_has_buffered(tid)) {
- spin_lock_bh(&acq->lock);
- if ((tid_list == &acq->acq_new) && !list_empty(&acq->acq_old))
- list_move_tail(&tid->list, &acq->acq_old);
- else {
- list_del_init(&tid->list);
- }
- spin_unlock_bh(&acq->lock);
- goto begin;
- }
+ while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+ tid = (struct ath_atx_tid *)queue->drv_priv;
+ ret = ath_tx_sched_aggr(sc, txq, tid);
+ ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
- /*
- * If we succeed in scheduling something, immediately restart to make
- * sure we keep the HW busy.
- */
- if(ath_tx_sched_aggr(sc, txq, tid)) {
- if (!active) {
- spin_lock_bh(&acq->lock);
- list_move_tail(&tid->list, &acq->acq_old);
- spin_unlock_bh(&acq->lock);
- }
- goto begin;
+ ieee80211_return_txq(hw, queue);
}
out:
rcu_read_unlock();
spin_unlock_bh(&sc->chan_lock);
+ ieee80211_txq_schedule_end(hw, txq->mac80211_qnum);
}
void ath_txq_schedule_all(struct ath_softc *sc)
@@ -2646,6 +2552,9 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
}
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+ /* we report airtime in ath_tx_count_airtime(), don't report twice */
+ tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
@@ -2887,9 +2796,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
struct ath_atx_tid *tid;
int tidno, acno;
- for (acno = 0; acno < IEEE80211_NUM_ACS; acno++)
- an->airtime_deficit[acno] = ATH_AIRTIME_QUANTUM;
-
for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
tid = ath_node_to_tid(an, tidno);
tid->an = an;
@@ -2899,7 +2805,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->baw_head = tid->baw_tail = 0;
tid->active = false;
tid->clear_ps_filter = true;
- tid->has_queued = false;
__skb_queue_head_init(&tid->retry_q);
INIT_LIST_HEAD(&tid->list);
acno = TID_TO_WME_AC(tidno);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index f7c2f19e81c1..8e154f6364a3 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -427,7 +427,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar,
if (head->plcp[6] & 0x80)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f);
+ status->rate_idx = clamp(head->plcp[3] & 0x7f, 0, 75);
status->encoding = RX_ENC_HT;
break;
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index d73e45e26547..75ddaefdd049 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -185,7 +185,9 @@ enum CountryCode {
CTRY_UKRAINE = 804,
CTRY_UNITED_KINGDOM = 826,
CTRY_UNITED_STATES = 840,
+ CTRY_UNITED_STATES2 = 841,
CTRY_UNITED_STATES_FCC49 = 842,
+ CTRY_UNITED_STATES3 = 843,
CTRY_URUGUAY = 858,
CTRY_UZBEKISTAN = 860,
CTRY_VENEZUELA = 862,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 4021e37a225a..c4bd26e65949 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -483,6 +483,8 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_UAE, NULL1_WORLD, "AE"},
{CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"},
{CTRY_UNITED_STATES, FCC3_FCCA, "US"},
+ {CTRY_UNITED_STATES2, FCC3_FCCA, "US"},
+ {CTRY_UNITED_STATES3, FCC3_FCCA, "US"},
/* This "PS" is for US public safety actually... to support this we
* would need to assign new special alpha2 to CRDA db as with the world
* regdomain and use another alpha2 */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 5ab3e31c9ffa..bab30f7a443c 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -174,9 +174,8 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
int i;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
- wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size,
- &wcn_ch->dma_addr,
- GFP_KERNEL);
+ wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
if (!wcn_ch->cpu_addr)
return -ENOMEM;
@@ -627,9 +626,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
- cpu_addr = dma_zalloc_coherent(wcn->dev, s,
- &wcn->mgmt_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
+ &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
@@ -642,9 +641,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
- cpu_addr = dma_zalloc_coherent(wcn->dev, s,
- &wcn->data_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(wcn->dev, s,
+ &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9b2f9f543952..a1e226652b4a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -395,7 +395,7 @@ static int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx)
{
int i;
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != mid)
@@ -1580,6 +1580,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
u8 *buf, *dpos;
const u8 *spos;
+ if (!ies1)
+ ies1_len = 0;
+
+ if (!ies2)
+ ies2_len = 0;
+
if (ies1_len == 0 && ies2_len == 0) {
*merged_ies = NULL;
*merged_len = 0;
@@ -1589,17 +1595,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf, ies1, ies1_len);
+ if (ies1)
+ memcpy(buf, ies1, ies1_len);
dpos = buf + ies1_len;
spos = ies2;
- while (spos + 1 < ies2 + ies2_len) {
+ while (spos && (spos + 1 < ies2 + ies2_len)) {
/* IE tag at offset 0, length at offset 1 */
u16 ielen = 2 + spos[1];
if (spos + ielen > ies2 + ies2_len)
break;
if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
- !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
+ (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
+ spos, ielen))) {
memcpy(dpos, spos, ielen);
dpos += ielen;
}
@@ -3007,7 +3015,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy,
wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX,
sector_type, WIL_CID_ALL);
if (rc == -EINVAL) {
- for (i = 0; i < WIL6210_MAX_CID; i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].mid != vif->mid)
continue;
rc = wil_rf_sector_wmi_set_selected(
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 835c902b84c1..7ad4e5328439 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -162,7 +162,7 @@ static int ring_show(struct seq_file *s, void *data)
snprintf(name, sizeof(name), "tx_%2d", i);
- if (cid < WIL6210_MAX_CID)
+ if (cid < max_assoc_sta)
seq_printf(s,
"\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
@@ -792,14 +792,14 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
"BACK: del_rx require at least 2 params\n");
return -EINVAL;
}
- if (p1 < 0 || p1 >= WIL6210_MAX_CID) {
+ if (p1 < 0 || p1 >= max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", p1);
return -EINVAL;
}
if (rc < 4)
p3 = WLAN_REASON_QSTA_LEAVE_QBSS;
sta = &wil->sta[p1];
- wmi_delba_rx(wil, sta->mid, mk_cidxtid(p1, p2), p3);
+ wmi_delba_rx(wil, sta->mid, p1, p2, p3);
} else {
wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd);
return -EINVAL;
@@ -1243,7 +1243,7 @@ static int bf_show(struct seq_file *s, void *data)
memset(&reply, 0, sizeof(reply));
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
u32 status;
cmd.cid = i;
@@ -1340,7 +1340,7 @@ static int link_show(struct seq_file *s, void *data)
if (!sinfo)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
struct wil6210_vif *vif;
@@ -1542,7 +1542,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
struct wil6210_priv *wil = s->private;
int i, tid, mcs;
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
@@ -1651,7 +1651,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
struct wil6210_priv *wil = s->private;
int i, bin;
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
@@ -1740,7 +1740,7 @@ static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
wil->tx_latency_res = val;
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
struct wil_sta_info *sta = &wil->sta[i];
kfree(sta->tx_latency_bins);
@@ -1825,7 +1825,7 @@ static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
}
seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != vif->mid)
@@ -2386,6 +2386,7 @@ static const struct dbg_off dbg_statics[] = {
{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
{"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
{"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
+ {"drop_if_ring_full", 0644, (ulong)&drop_if_ring_full, doff_u8},
{},
};
@@ -2439,7 +2440,7 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
wil->debug = NULL;
kfree(wil->dbg_data.data_arr);
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++)
+ for (i = 0; i < max_assoc_sta; i++)
kfree(wil->sta[i].tx_latency_bins);
/* free pmc memory without sending command to fw, as it will
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5d287a8e1b45..3f5bd177d55f 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -575,10 +575,14 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
- wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
- wil6210_mask_halp(wil);
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
- complete(&wil->halp.comp);
+ if (wil->halp.handle_icr) {
+ /* no need to handle HALP ICRs until next vote */
+ wil->halp.handle_icr = false;
+ wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
+ wil6210_mask_halp(wil);
+ complete(&wil->halp.comp);
+ }
}
wil->isr_misc = isr;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 5b7de00affe2..277abfdf3322 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -219,7 +219,7 @@ static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
{
int i;
- for (i = 0; i < WIL6210_MAX_CID; i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status == wil_sta_connected)
return true;
@@ -322,7 +322,7 @@ static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
wil_disconnect_cid_complete(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect complete all\n");
- for (cid = 0; cid < WIL6210_MAX_CID; cid++)
+ for (cid = 0; cid < max_assoc_sta; cid++)
wil_disconnect_cid_complete(vif, cid, reason_code);
}
@@ -434,7 +434,7 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
wil_disconnect_cid(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect all\n");
- for (cid = 0; cid < WIL6210_MAX_CID; cid++)
+ for (cid = 0; cid < max_assoc_sta; cid++)
wil_disconnect_cid(vif, cid, reason_code);
}
@@ -1895,7 +1895,7 @@ int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac)
int i;
int rc = -ENOENT;
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ for (i = 0; i < max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status != wil_sta_unused &&
ether_addr_equal(wil->sta[i].addr, mac)) {
@@ -1919,11 +1919,14 @@ void wil_halp_vote(struct wil6210_priv *wil)
if (++wil->halp.ref_cnt == 1) {
reinit_completion(&wil->halp.comp);
+ /* mark to IRQ context to handle HALP ICR */
+ wil->halp.handle_icr = true;
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
wil_err(wil, "HALP vote timed out\n");
/* Mask HALP as done in case the interrupt is raised */
+ wil->halp.handle_icr = false;
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 983bd001b53b..32b14fc33a59 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -307,8 +307,8 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
}
/* Block Ack - Rx side (recipient) */
-int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
- u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
@@ -316,7 +316,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
u16 agg_timeout = le16_to_cpu(ba_timeout);
u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
struct wil_sta_info *sta;
- u8 cid, tid;
u16 agg_wsize = 0;
/* bit 0: A-MSDU supported
* bit 1: policy (should be 0 for us)
@@ -335,10 +334,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
int rc = 0;
might_sleep();
- parse_cidxtid(cidxtid, &cid, &tid);
/* sanity checks */
- if (cid >= WIL6210_MAX_CID) {
+ if (cid >= max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", cid);
rc = -EINVAL;
goto out;
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 853abc3a73e4..36ebfcf9ef30 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -181,7 +182,7 @@ TRACE_EVENT(wil6210_rx,
__entry->seq = wil_rxdesc_seq(d);
__entry->mcs = wil_rxdesc_mcs(d);
),
- TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x"
+ TP_printk("index %d len %d mid %d cid (%%8) %d tid %d mcs %d seq 0x%03x"
" type 0x%1x subtype 0x%1x", __entry->index, __entry->len,
__entry->mid, __entry->cid, __entry->tid, __entry->mcs,
__entry->seq, __entry->type, __entry->subtype)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 3e1c831ab2fb..4ccfd1404458 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -30,11 +30,6 @@
#include "trace.h"
#include "txrx_edma.h"
-static bool rtap_include_phy_info;
-module_param(rtap_include_phy_info, bool, 0444);
-MODULE_PARM_DESC(rtap_include_phy_info,
- " Include PHY info in the radiotap header, default - no");
-
bool rx_align_2;
module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
@@ -43,6 +38,9 @@ bool rx_large_buf;
module_param(rx_large_buf, bool, 0444);
MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
+/* Drop Tx packets in case Tx ring is full */
+bool drop_if_ring_full;
+
static inline uint wil_rx_snaplen(void)
{
return rx_align_2 ? 6 : 0;
@@ -332,87 +330,34 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
u8 mcs_flags;
u8 mcs_index;
} __packed;
- struct wil6210_rtap_vendor {
- struct wil6210_rtap rtap;
- /* vendor */
- u8 vendor_oui[3] __aligned(2);
- u8 vendor_ns;
- __le16 vendor_skip;
- u8 vendor_data[0];
- } __packed;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- struct wil6210_rtap_vendor *rtap_vendor;
+ struct wil6210_rtap *rtap;
int rtap_len = sizeof(struct wil6210_rtap);
- int phy_length = 0; /* phy info header size, bytes */
- static char phy_data[128];
struct ieee80211_channel *ch = wil->monitor_chandef.chan;
- if (rtap_include_phy_info) {
- rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
- /* calculate additional length */
- if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
- /**
- * PHY info starts from 8-byte boundary
- * there are 8-byte lines, last line may be partially
- * written (HW bug), thus FW configures for last line
- * to be excessive. Driver skips this last line.
- */
- int len = min_t(int, 8 + sizeof(phy_data),
- wil_rxdesc_phy_length(d));
-
- if (len > 8) {
- void *p = skb_tail_pointer(skb);
- void *pa = PTR_ALIGN(p, 8);
-
- if (skb_tailroom(skb) >= len + (pa - p)) {
- phy_length = len - 8;
- memcpy(phy_data, pa, phy_length);
- }
- }
- }
- rtap_len += phy_length;
- }
-
if (skb_headroom(skb) < rtap_len &&
pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
return;
}
- rtap_vendor = skb_push(skb, rtap_len);
- memset(rtap_vendor, 0, rtap_len);
+ rtap = skb_push(skb, rtap_len);
+ memset(rtap, 0, rtap_len);
- rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
- rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
- rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
- (1 << IEEE80211_RADIOTAP_FLAGS) |
+ rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ rtap->rthdr.it_len = cpu_to_le16(rtap_len);
+ rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_MCS));
if (d->dma.status & RX_DMA_STATUS_ERROR)
- rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
-
- rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
- rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
-
- rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
- rtap_vendor->rtap.mcs_flags = 0;
- rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
-
- if (rtap_include_phy_info) {
- rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
- IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
- /* OUI for Wilocity 04:ce:14 */
- rtap_vendor->vendor_oui[0] = 0x04;
- rtap_vendor->vendor_oui[1] = 0xce;
- rtap_vendor->vendor_oui[2] = 0x14;
- rtap_vendor->vendor_ns = 1;
- /* Rx descriptor + PHY data */
- rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
- phy_length);
- memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
- memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
- phy_length);
- }
+ rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+ rtap->chnl_flags = cpu_to_le16(0);
+
+ rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+ rtap->mcs_flags = 0;
+ rtap->mcs_index = wil_rxdesc_mcs(d);
}
static bool wil_is_rx_idle(struct wil6210_priv *wil)
@@ -427,6 +372,76 @@ static bool wil_is_rx_idle(struct wil6210_priv *wil)
return true;
}
+static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int mid = wil_rxdesc_mid(d);
+ struct wil6210_vif *vif = wil->vifs[mid];
+ /* cid from DMA descriptor is limited to 3 bits.
+ * In case of cid>=8, the value would be cid modulo 8 and we need to
+ * find real cid by locating the transmitter (ta) inside sta array
+ */
+ int cid = wil_rxdesc_cid(d);
+ unsigned int snaplen = wil_rx_snaplen();
+ struct ieee80211_hdr_3addr *hdr;
+ int i;
+ unsigned char *ta;
+ u8 ftype;
+
+ /* in monitor mode there are no connections */
+ if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
+ return cid;
+
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (likely(ftype == IEEE80211_FTYPE_DATA)) {
+ if (unlikely(skb->len < ETH_HLEN + snaplen)) {
+ wil_err_ratelimited(wil,
+ "Short data frame, len = %d\n",
+ skb->len);
+ return -ENOENT;
+ }
+ ta = wil_skb_get_sa(skb);
+ } else {
+ if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
+ wil_err_ratelimited(wil, "Short frame, len = %d\n",
+ skb->len);
+ return -ENOENT;
+ }
+ hdr = (void *)skb->data;
+ ta = hdr->addr2;
+ }
+
+ if (max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
+ return cid;
+
+ /* assuming no concurrency between AP interfaces and STA interfaces.
+ * multista is used only in P2P_GO or AP mode. In other modes return
+ * cid from the rx descriptor
+ */
+ if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
+ vif->wdev.iftype != NL80211_IFTYPE_AP)
+ return cid;
+
+ /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
+ * to find the real cid, compare transmitter address with the stored
+ * stations mac address in the driver sta array
+ */
+ for (i = cid; i < max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
+ if (wil->sta[i].status != wil_sta_unused &&
+ ether_addr_equal(wil->sta[i].addr, ta)) {
+ cid = i;
+ break;
+ }
+ }
+ if (i >= max_assoc_sta) {
+ wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
+ ta, vif->wdev.iftype, ftype, skb->len);
+ cid = -ENOENT;
+ }
+
+ return cid;
+}
+
/**
* reap 1 frame from @swhead
*
@@ -452,7 +467,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
int i;
struct wil_net_stats *stats;
- BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
+ BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
again:
if (unlikely(wil_ring_is_empty(vring)))
@@ -484,7 +499,6 @@ again:
wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
- cid = wil_rxdesc_cid(d);
mid = wil_rxdesc_mid(d);
vif = wil->vifs[mid];
@@ -495,11 +509,9 @@ again:
goto again;
}
ndev = vif_to_ndev(vif);
- stats = &wil->sta[cid].stats;
-
if (unlikely(dmalen > sz)) {
- wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
- stats->rx_large_frame++;
+ wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
+ dmalen);
kfree_skb(skb);
goto again;
}
@@ -510,6 +522,14 @@ again:
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
+ cid = wil_rx_get_cid_by_skb(wil, skb);
+ if (cid == -ENOENT) {
+ kfree_skb(skb);
+ goto again;
+ }
+ wil_skb_set_cid(skb, (u8)cid);
+ stats = &wil->sta[cid].stats;
+
stats->last_mcs_rx = wil_rxdesc_mcs(d);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
@@ -556,13 +576,6 @@ again:
goto again;
}
- if (unlikely(skb->len < ETH_HLEN + snaplen)) {
- wil_err(wil, "Short frame, len = %d\n", skb->len);
- stats->rx_short_frame++;
- kfree_skb(skb);
- goto again;
- }
-
/* L4 IDENT is on when HW calculated checksum, check status
* and in case of error drop the packet
* higher stack layers will handle retransmission (if required)
@@ -659,7 +672,7 @@ int reverse_memcmp(const void *cs, const void *ct, size_t count)
static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- int cid = wil_rxdesc_cid(d);
+ int cid = wil_skb_get_cid(skb);
int tid = wil_rxdesc_tid(d);
int key_id = wil_rxdesc_key_id(d);
int mc = wil_rxdesc_mcast(d);
@@ -707,7 +720,7 @@ static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
{
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
- *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ *cid = wil_skb_get_cid(skb);
*security = wil_rxdesc_security(d);
}
@@ -724,11 +737,11 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
unsigned int len = skb->len;
int cid;
int security;
- struct ethhdr *eth = (void *)skb->data;
+ u8 *sa, *da = wil_skb_get_da(skb);
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
*/
- int mcast = is_multicast_ether_addr(eth->h_dest);
+ int mcast = is_multicast_ether_addr(da);
struct wil_net_stats *stats;
struct sk_buff *xmit_skb = NULL;
static const char * const gro_res_str[] = {
@@ -759,7 +772,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
}
if (wdev->iftype == NL80211_IFTYPE_STATION) {
- if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) {
+ sa = wil_skb_get_sa(skb);
+ if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
/* mcast packet looped back to us */
rc = GRO_DROP;
dev_kfree_skb(skb);
@@ -772,8 +786,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
*/
xmit_skb = skb_copy(skb, GFP_ATOMIC);
} else {
- int xmit_cid = wil_find_cid(wil, vif->mid,
- eth->h_dest);
+ int xmit_cid = wil_find_cid(wil, vif->mid, da);
if (xmit_cid >= 0) {
/* The destination station is associated to
@@ -971,7 +984,6 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
.ring_size = cpu_to_le16(size),
},
.ringid = id,
- .cidxtid = mk_cidxtid(cid, tid),
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
@@ -991,6 +1003,14 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
struct wil_ring *vring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
+ if (cid >= WIL6210_RX_DESC_MAX_CID) {
+ cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
+ cmd.vring_cfg.cid = cid;
+ cmd.vring_cfg.tid = tid;
+ } else {
+ cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
+ }
+
wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@@ -1043,7 +1063,7 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring);
- wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[id][0] = max_assoc_sta;
wil->ring2cid_tid[id][1] = 0;
out:
@@ -1128,7 +1148,7 @@ fail:
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
- wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[ring_id][0] = max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
return rc;
}
@@ -1175,7 +1195,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
if (rc)
goto out;
- wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+ wil->ring2cid_tid[id][0] = max_assoc_sta; /* CID */
wil->ring2cid_tid[id][1] = 0; /* TID */
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1217,12 +1237,13 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct sk_buff *skb)
{
- int i;
- struct ethhdr *eth = (void *)skb->data;
- int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
+ int i, cid;
+ const u8 *da = wil_skb_get_da(skb);
int min_ring_id = wil_get_min_tx_ring_id(wil);
- if (cid < 0)
+ cid = wil_find_cid(wil, vif->mid, da);
+
+ if (cid < 0 || cid >= max_assoc_sta)
return NULL;
/* TODO: fix for multiple TID */
@@ -1235,7 +1256,7 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
- eth->h_dest, i);
+ da, i);
if (v->va && txdata->enabled) {
return v;
} else {
@@ -1274,7 +1295,7 @@ static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
continue;
cid = wil->ring2cid_tid[i][0];
- if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ if (cid >= max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
@@ -1326,10 +1347,10 @@ static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
static void wil_set_da_for_vring(struct wil6210_priv *wil,
struct sk_buff *skb, int vring_index)
{
- struct ethhdr *eth = (void *)skb->data;
+ u8 *da = wil_skb_get_da(skb);
int cid = wil->ring2cid_tid[vring_index][0];
- ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
+ ether_addr_copy(da, wil->sta[cid].addr);
}
static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
@@ -1340,8 +1361,7 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
struct sk_buff *skb2;
int i;
u8 cid;
- struct ethhdr *eth = (void *)skb->data;
- char *src = eth->h_source;
+ const u8 *src = wil_skb_get_sa(skb);
struct wil_ring_tx_data *txdata, *txdata2;
int min_ring_id = wil_get_min_tx_ring_id(wil);
@@ -1353,7 +1373,7 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
continue;
cid = wil->ring2cid_tid[i][0];
- if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ if (cid >= max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
@@ -1381,7 +1401,7 @@ found:
if (!v2->va || txdata2->mid != vif->mid)
continue;
cid = wil->ring2cid_tid[i][0];
- if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ if (cid >= max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
@@ -2032,6 +2052,10 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
check_stop, vif->mid, vif->net_queue_stopped);
+ if (ring && drop_if_ring_full)
+ /* no need to stop/wake net queues */
+ return;
+
if (check_stop == vif->net_queue_stopped)
/* net queues already in desired state */
return;
@@ -2095,8 +2119,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = vif_to_wil(vif);
- struct ethhdr *eth = (void *)skb->data;
- bool bcast = is_multicast_ether_addr(eth->h_dest);
+ const u8 *da = wil_skb_get_da(skb);
+ bool bcast = is_multicast_ether_addr(da);
struct wil_ring *ring;
static bool pr_once_fw;
int rc;
@@ -2143,7 +2167,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ring = wil_find_tx_ucast(wil, vif, skb);
}
if (unlikely(!ring)) {
- wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
+ wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
goto drop;
}
/* set up vring entry */
@@ -2157,6 +2181,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
case -ENOMEM:
+ if (drop_if_ring_full)
+ goto drop;
return NETDEV_TX_BUSY;
default:
break; /* goto drop; */
@@ -2228,7 +2254,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
used_before_complete = wil_ring_used_tx(vring);
- if (cid < WIL6210_MAX_CID)
+ if (cid < max_assoc_sta)
stats = &wil->sta[cid].stats;
while (!wil_ring_is_empty(vring)) {
@@ -2337,7 +2363,7 @@ static void wil_get_reorder_params(struct wil6210_priv *wil,
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
*tid = wil_rxdesc_tid(d);
- *cid = wil_rxdesc_cid(d);
+ *cid = wil_skb_get_cid(skb);
*mid = wil_rxdesc_mid(d);
*seq = wil_rxdesc_seq(d);
*mcast = wil_rxdesc_mcast(d);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 9d83be481839..c0da1340c2d2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -458,6 +458,18 @@ union wil_ring_desc {
union wil_rx_desc rx;
} __packed;
+struct packet_rx_info {
+ u8 cid;
+};
+
+/* this struct will be stored in the skb cb buffer
+ * max length of the struct is limited to 48 bytes
+ */
+struct skb_rx_info {
+ struct vring_rx_desc rx_desc;
+ struct packet_rx_info rx_info;
+};
+
static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d0, 0, 3);
@@ -530,11 +542,6 @@ static inline int wil_rxdesc_mcast(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d1, 13, 14);
}
-static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d)
-{
- return WIL_GET_BITS(d->dma.d0, 16, 29);
-}
-
static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
{
return (void *)skb->cb;
@@ -560,11 +567,25 @@ static inline int wil_ring_is_full(struct wil_ring *ring)
return wil_ring_next_tail(ring) == ring->swhead;
}
-static inline bool wil_need_txstat(struct sk_buff *skb)
+static inline u8 *wil_skb_get_da(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return eth->h_dest;
+}
+
+static inline u8 *wil_skb_get_sa(struct sk_buff *skb)
{
struct ethhdr *eth = (void *)skb->data;
- return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
+ return eth->h_source;
+}
+
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+ const u8 *da = wil_skb_get_da(skb);
+
+ return is_unicast_ether_addr(da) && skb->sk &&
(skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
}
@@ -610,6 +631,20 @@ static inline bool wil_val_in_range(int val, int min, int max)
return val >= min && val < max;
}
+static inline u8 wil_skb_get_cid(struct sk_buff *skb)
+{
+ struct skb_rx_info *skb_rx_info = (void *)skb->cb;
+
+ return skb_rx_info->rx_info.cid;
+}
+
+static inline void wil_skb_set_cid(struct sk_buff *skb, u8 cid)
+{
+ struct skb_rx_info *skb_rx_info = (void *)skb->cb;
+
+ skb_rx_info->rx_info.cid = cid;
+}
+
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 05a8348bd7b9..c38773878ae3 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -99,7 +99,7 @@ static int wil_sring_alloc(struct wil6210_priv *wil,
/* Status messages are allocated and initialized to 0. This is necessary
* since DR bit should be initialized to 0.
*/
- sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
+ sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
if (!sring->va)
return -ENOMEM;
@@ -381,15 +381,15 @@ static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
if (!ring->ctx)
goto err;
- ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
+ ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
if (!ring->va)
goto err_free_ctx;
if (ring->is_rx) {
sz = sizeof(*ring->edma_rx_swtail.va);
ring->edma_rx_swtail.va =
- dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
- GFP_KERNEL);
+ dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
+ GFP_KERNEL);
if (!ring->edma_rx_swtail.va)
goto err_free_va;
}
@@ -727,7 +727,7 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_ring_free_edma(wil, ring);
- wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[ring_id][0] = max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
out:
@@ -932,7 +932,7 @@ again:
eop = wil_rx_status_get_eop(msg);
cid = wil_rx_status_get_cid(msg);
- if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
+ if (unlikely(!wil_val_in_range(cid, 0, max_assoc_sta))) {
wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
cid, sring->swhead);
rxdata->skipping = true;
@@ -1137,7 +1137,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
/* Total number of completed descriptors in all descriptor rings */
int desc_cnt = 0;
int cid;
- struct wil_net_stats *stats = NULL;
+ struct wil_net_stats *stats;
struct wil_tx_enhanced_desc *_d;
unsigned int ring_id;
unsigned int num_descs;
@@ -1187,8 +1187,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
ndev = vif_to_ndev(vif);
cid = wil->ring2cid_tid[ring_id][0];
- if (cid < WIL6210_MAX_CID)
- stats = &wil->sta[cid].stats;
+ stats = (cid < max_assoc_sta ? &wil->sta[cid].stats : NULL);
wil_dbg_txrx(wil,
"tx_status: completed desc_ring (%d), num_descs (%d)\n",
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 0f3be3ffc6a2..e1b1039b13ab 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -38,6 +38,8 @@ extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
extern bool ftm_mode;
+extern bool drop_if_ring_full;
+extern uint max_assoc_sta;
struct wil6210_priv;
struct wil6210_vif;
@@ -89,7 +91,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_RING_SIZE_ORDER_MIN (5)
#define WIL_RING_SIZE_ORDER_MAX (15)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
-#define WIL6210_MAX_CID (8) /* HW limit */
+#define WIL6210_MAX_CID (20) /* max number of stations */
+#define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
@@ -457,7 +460,7 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
*/
static inline bool wil_cid_valid(u8 cid)
{
- return cid < WIL6210_MAX_CID;
+ return (cid >= 0 && cid < max_assoc_sta);
}
struct wil6210_mbox_ring {
@@ -791,6 +794,7 @@ struct wil_halp {
struct mutex lock; /* protect halp ref_cnt */
unsigned int ref_cnt;
struct completion comp;
+ u8 handle_icr;
};
struct wil_blob_wrapper {
@@ -1235,7 +1239,7 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout);
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason);
-int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason);
+int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason);
int wmi_addba_rx_resp(struct wil6210_priv *wil,
u8 mid, u8 cid, u8 tid, u8 token,
u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
@@ -1248,8 +1252,8 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid,
const u8 *mac, enum nl80211_iftype iftype);
int wmi_port_delete(struct wil6210_priv *wil, u8 mid);
int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval);
-int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
- u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
+ u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 345f05969190..bda4a9712f91 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -24,8 +24,9 @@
#include "wmi.h"
#include "trace.h"
-static uint max_assoc_sta = WIL6210_MAX_CID;
-module_param(max_assoc_sta, uint, 0644);
+/* set the default max assoc sta to max supported by driver */
+uint max_assoc_sta = WIL6210_MAX_CID;
+module_param(max_assoc_sta, uint, 0444);
MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
int agg_wsize; /* = 0; */
@@ -770,6 +771,7 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
struct wil6210_priv *wil = vif_to_wil(vif);
struct wiphy *wiphy = wil_to_wiphy(wil);
struct wmi_ready_event *evt = d;
+ u8 fw_max_assoc_sta;
wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
wil->fw_version, le32_to_cpu(evt->sw_version),
@@ -787,6 +789,25 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
evt->rfc_read_calib_result);
wil->fw_calib_result = evt->rfc_read_calib_result;
}
+
+ fw_max_assoc_sta = WIL6210_RX_DESC_MAX_CID;
+ if (len > offsetof(struct wmi_ready_event, max_assoc_sta) &&
+ evt->max_assoc_sta > 0) {
+ fw_max_assoc_sta = evt->max_assoc_sta;
+ wil_dbg_wmi(wil, "fw reported max assoc sta %d\n",
+ fw_max_assoc_sta);
+
+ if (fw_max_assoc_sta > WIL6210_MAX_CID) {
+ wil_dbg_wmi(wil,
+ "fw max assoc sta %d exceeds max driver supported %d\n",
+ fw_max_assoc_sta, WIL6210_MAX_CID);
+ fw_max_assoc_sta = WIL6210_MAX_CID;
+ }
+ }
+
+ max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta);
+ wil_dbg_wmi(wil, "setting max assoc sta to %d\n", max_assoc_sta);
+
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
/* let the reset sequence continue */
@@ -952,7 +973,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
evt->assoc_req_len, evt->assoc_resp_len);
return;
}
- if (evt->cid >= WIL6210_MAX_CID) {
+ if (evt->cid >= max_assoc_sta) {
wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
return;
}
@@ -1271,9 +1292,16 @@ static void wmi_evt_addba_rx_req(struct wil6210_vif *vif, int id,
void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
+ u8 cid, tid;
struct wmi_rcp_addba_req_event *evt = d;
- wil_addba_rx_request(wil, vif->mid, evt->cidxtid, evt->dialog_token,
+ if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
+ parse_cidxtid(evt->cidxtid, &cid, &tid);
+ } else {
+ cid = evt->cid;
+ tid = evt->tid;
+ }
+ wil_addba_rx_request(wil, vif->mid, cid, tid, evt->dialog_token,
evt->ba_param_set, evt->ba_timeout,
evt->ba_seq_ctrl);
}
@@ -1289,7 +1317,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
struct wil_tid_ampdu_rx *r;
might_sleep();
- parse_cidxtid(evt->cidxtid, &cid, &tid);
+
+ if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) {
+ parse_cidxtid(evt->cidxtid, &cid, &tid);
+ } else {
+ cid = evt->cid;
+ tid = evt->tid;
+ }
wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n",
vif->mid, cid, tid,
evt->from_initiator ? "originator" : "recipient",
@@ -1404,7 +1438,7 @@ static void wil_link_stats_store_basic(struct wil6210_vif *vif,
u8 cid = basic->cid;
struct wil_sta_info *sta;
- if (cid < 0 || cid >= WIL6210_MAX_CID) {
+ if (cid < 0 || cid >= max_assoc_sta) {
wil_err(wil, "invalid cid %d\n", cid);
return;
}
@@ -1554,7 +1588,7 @@ static int wil_find_cid_ringid_sta(struct wil6210_priv *wil,
continue;
lcid = wil->ring2cid_tid[i][0];
- if (lcid >= WIL6210_MAX_CID) /* skip BCAST */
+ if (lcid >= max_assoc_sta) /* skip BCAST */
continue;
wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid);
@@ -2120,10 +2154,9 @@ int wmi_pcp_start(struct wil6210_vif *vif,
if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
(cmd.pcp_max_assoc_sta <= 0)) {
- wil_info(wil,
- "Requested connection limit %u, valid values are 1 - %d. Setting to %d\n",
- max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID);
- cmd.pcp_max_assoc_sta = WIL6210_MAX_CID;
+ wil_err(wil, "unexpected max_assoc_sta %d\n",
+ cmd.pcp_max_assoc_sta);
+ return -EOPNOTSUPP;
}
if (disable_ap_sme &&
@@ -2516,7 +2549,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
if (ch)
cmd.sniffer_cfg.channel = ch->hw_value - 1;
cmd.sniffer_cfg.phy_info_mode =
- cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+ cpu_to_le32(WMI_SNIFFER_PHY_INFO_DISABLED);
cmd.sniffer_cfg.phy_support =
cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS);
@@ -2651,15 +2684,22 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
}
-int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
+int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason)
{
struct wmi_rcp_delba_cmd cmd = {
- .cidxtid = cidxtid,
.reason = cpu_to_le16(reason),
};
- wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf,
- (cidxtid >> 4) & 0xf, reason);
+ if (cid >= WIL6210_RX_DESC_MAX_CID) {
+ cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
+ cmd.cid = cid;
+ cmd.tid = tid;
+ } else {
+ cmd.cidxtid = mk_cidxtid(cid, tid);
+ }
+
+ wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cid,
+ tid, reason);
return wmi_send(wil, WMI_RCP_DELBA_CMDID, mid, &cmd, sizeof(cmd));
}
@@ -2670,7 +2710,6 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
{
int rc;
struct wmi_rcp_addba_resp_cmd cmd = {
- .cidxtid = mk_cidxtid(cid, tid),
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
@@ -2689,6 +2728,14 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
.evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
};
+ if (cid >= WIL6210_RX_DESC_MAX_CID) {
+ cmd.cidxtid = CIDXTID_EXTENDED_CID_TID;
+ cmd.cid = cid;
+ cmd.tid = tid;
+ } else {
+ cmd.cidxtid = mk_cidxtid(cid, tid);
+ }
+
wil_dbg_wmi(wil,
"ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
mid, cid, tid, agg_wsize,
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index 77046384dd80..976c8ec4e992 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -668,15 +668,13 @@ static void b43_remove_dynamic_debug(struct b43_wldev *dev)
static void b43_add_dynamic_debug(struct b43_wldev *dev)
{
struct b43_dfsentry *e = dev->dfsentry;
- struct dentry *d;
-#define add_dyn_dbg(name, id, initstate) do { \
- e->dyn_debug[id] = (initstate); \
- d = debugfs_create_bool(name, 0600, e->subdir, \
- &(e->dyn_debug[id])); \
- if (!IS_ERR(d)) \
- e->dyn_debug_dentries[id] = d; \
- } while (0)
+#define add_dyn_dbg(name, id, initstate) do { \
+ e->dyn_debug[id] = (initstate); \
+ e->dyn_debug_dentries[id] = \
+ debugfs_create_bool(name, 0600, e->subdir, \
+ &(e->dyn_debug[id])); \
+ } while (0)
add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false);
add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, false);
@@ -718,19 +716,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy));
e->subdir = debugfs_create_dir(devdir, rootdir);
- if (!e->subdir || IS_ERR(e->subdir)) {
- if (e->subdir == ERR_PTR(-ENODEV)) {
- b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not "
- "enabled in kernel config\n");
- } else {
- b43err(dev->wl, "debugfs: cannot create %s directory\n",
- devdir);
- }
- dev->dfsentry = NULL;
- kfree(log->log);
- kfree(e);
- return;
- }
e->mmio16read_next = 0xFFFF; /* invalid address */
e->mmio32read_next = 0xFFFF; /* invalid address */
@@ -741,13 +726,10 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- struct dentry *d; \
- d = debugfs_create_file(__stringify(name), \
+ e->file_##name.dentry = \
+ debugfs_create_file(__stringify(name), \
mode, e->subdir, dev, \
&fops_##name.fops); \
- e->file_##name.dentry = NULL; \
- if (!IS_ERR(d)) \
- e->file_##name.dentry = d; \
} while (0)
@@ -818,8 +800,6 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
void b43_debugfs_init(void)
{
rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(rootdir))
- rootdir = NULL;
}
void b43_debugfs_exit(void)
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index dfc4c34298d4..b34e51933257 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
- ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
- ring_mem_size, &(ring->dmabase),
- GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ ring_mem_size, &(ring->dmabase),
+ GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 82ef56ed7ca1..8150adee3e34 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -361,15 +361,13 @@ static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev)
static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev)
{
struct b43legacy_dfsentry *e = dev->dfsentry;
- struct dentry *d;
-#define add_dyn_dbg(name, id, initstate) do { \
- e->dyn_debug[id] = (initstate); \
- d = debugfs_create_bool(name, 0600, e->subdir, \
- &(e->dyn_debug[id])); \
- if (!IS_ERR(d)) \
- e->dyn_debug_dentries[id] = d; \
- } while (0)
+#define add_dyn_dbg(name, id, initstate) do { \
+ e->dyn_debug[id] = (initstate); \
+ e->dyn_debug_dentries[id] = \
+ debugfs_create_bool(name, 0600, e->subdir, \
+ &(e->dyn_debug[id])); \
+ } while (0)
add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false);
add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, false);
@@ -408,29 +406,14 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev)
snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy));
e->subdir = debugfs_create_dir(devdir, rootdir);
- if (!e->subdir || IS_ERR(e->subdir)) {
- if (e->subdir == ERR_PTR(-ENODEV)) {
- b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not "
- "enabled in kernel config\n");
- } else {
- b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n",
- devdir);
- }
- dev->dfsentry = NULL;
- kfree(log->log);
- kfree(e);
- return;
- }
#define ADD_FILE(name, mode) \
do { \
- struct dentry *d; \
- d = debugfs_create_file(__stringify(name), \
+ e->file_##name.dentry = \
+ debugfs_create_file(__stringify(name), \
mode, e->subdir, dev, \
&fops_##name.fops); \
e->file_##name.dentry = NULL; \
- if (!IS_ERR(d)) \
- e->file_##name.dentry = d; \
} while (0)
@@ -492,8 +475,6 @@ void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev,
void b43legacy_debugfs_init(void)
{
rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(rootdir))
- rootdir = NULL;
}
void b43legacy_debugfs_exit(void)
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 1b1da7d83652..2ce1537d983c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -331,9 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
- ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
- B43legacy_DMA_RINGMEMSIZE,
- &(ring->dmabase), GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43legacy_DMA_RINGMEMSIZE,
+ &(ring->dmabase), GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index 22fd95a736a8..f7cf3e5f4849 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -16,8 +16,8 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y += \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \
- -Idrivers/net/wireless/broadcom/brcm80211/include
+ -I $(srctree)/$(src) \
+ -I $(srctree)/$(src)/../include
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 1068a2a4494c..73d3c1a0a7c9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -178,8 +178,8 @@ brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
*fwerr = 0;
ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
if (ret < 0) {
- brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
- ret);
+ bphy_err(drvr, "brcmf_proto_bcdc_msg failed w/status %d\n",
+ ret);
goto done;
}
@@ -195,9 +195,9 @@ retry:
if ((id < bcdc->reqid) && (++retries < RETRIES))
goto retry;
if (id != bcdc->reqid) {
- brcmf_err("%s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id,
- bcdc->reqid);
+ bphy_err(drvr, "%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id,
+ bcdc->reqid);
ret = -EINVAL;
goto done;
}
@@ -245,9 +245,9 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
if (id != bcdc->reqid) {
- brcmf_err("%s: unexpected request id %d (expected %d)\n",
- brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id,
- bcdc->reqid);
+ bphy_err(drvr, "%s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id,
+ bcdc->reqid);
ret = -EINVAL;
goto done;
}
@@ -312,8 +312,8 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
}
if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) !=
BCDC_PROTO_VER) {
- brcmf_err("%s: non-BCDC packet received, flags 0x%x\n",
- brcmf_ifname(tmp_if), h->flags);
+ bphy_err(drvr, "%s: non-BCDC packet received, flags 0x%x\n",
+ brcmf_ifname(tmp_if), h->flags);
return -EBADE;
}
@@ -460,7 +460,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
/* ensure that the msg buf directly follows the cdc msg struct */
if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) {
- brcmf_err("struct brcmf_proto_bcdc is not correctly defined\n");
+ bphy_err(drvr, "struct brcmf_proto_bcdc is not correctly defined\n");
goto fail;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index d64bf233b12c..ec129864cc9c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -315,7 +315,7 @@ static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
/* bail out as things are really fishy here */
WARN(1, "invalid sdio function number: %d\n", func->num);
err = -ENOMEDIUM;
- };
+ }
if (err == -ENOMEDIUM)
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index c4965184cdf3..3d441c5c745c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -90,6 +90,7 @@ struct brcmf_bus_ops {
int (*get_memdump)(struct device *dev, void *data, size_t len);
int (*get_fwname)(struct device *dev, const char *ext,
unsigned char *fw_name);
+ void (*debugfs_create)(struct device *dev);
};
@@ -235,6 +236,15 @@ int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext,
return bus->ops->get_fwname(bus->dev, ext, fw_name);
}
+static inline
+void brcmf_bus_debugfs_create(struct brcmf_bus *bus)
+{
+ if (!bus->ops->debugfs_create)
+ return;
+
+ return bus->ops->debugfs_create(bus->dev);
+}
+
/*
* interface functions from common layer
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 35301237d435..e92f6351bd22 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -457,6 +457,7 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
static int
send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key)
{
+ struct brcmf_pub *drvr = ifp->drvr;
int err;
struct brcmf_wsec_key_le key_le;
@@ -468,7 +469,7 @@ send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key)
sizeof(key_le));
if (err)
- brcmf_err("wsec_key error (%d)\n", err);
+ bphy_err(drvr, "wsec_key error (%d)\n", err);
return err;
}
@@ -508,6 +509,7 @@ static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr)
static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_mbss_ssid_le mbss_ssid_le;
int bsscfgidx;
int err;
@@ -524,7 +526,7 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le,
sizeof(mbss_ssid_le));
if (err < 0)
- brcmf_err("setting ssid failed %d\n", err);
+ bphy_err(drvr, "setting ssid failed %d\n", err);
return err;
}
@@ -542,6 +544,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
int err;
@@ -567,7 +570,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
BRCMF_VIF_EVENT_TIMEOUT);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
if (!err) {
- brcmf_err("timeout occurred\n");
+ bphy_err(drvr, "timeout occurred\n");
err = -EIO;
goto fail;
}
@@ -575,7 +578,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
/* interface created in firmware */
ifp = vif->ifp;
if (!ifp) {
- brcmf_err("no if pointer provided\n");
+ bphy_err(drvr, "no if pointer provided\n");
err = -ENOENT;
goto fail;
}
@@ -583,7 +586,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
err = brcmf_net_attach(ifp, true);
if (err) {
- brcmf_err("Registering netdevice failed\n");
+ bphy_err(drvr, "Registering netdevice failed\n");
free_netdev(ifp->ndev);
goto fail;
}
@@ -614,13 +617,15 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
enum nl80211_iftype type,
struct vif_params *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct wireless_dev *wdev;
int err;
brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type);
if (err) {
- brcmf_err("iface validation failed: err=%d\n", err);
+ bphy_err(drvr, "iface validation failed: err=%d\n", err);
return ERR_PTR(err);
}
switch (type) {
@@ -645,8 +650,8 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
}
if (IS_ERR(wdev))
- brcmf_err("add iface %s type %d failed: err=%d\n",
- name, type, (int)PTR_ERR(wdev));
+ bphy_err(drvr, "add iface %s type %d failed: err=%d\n", name,
+ type, (int)PTR_ERR(wdev));
else
brcmf_cfg80211_update_proto_addr_mode(wdev);
@@ -661,12 +666,13 @@ static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc)
void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
{
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err = 0;
if (check_vif_up(ifp->vif)) {
err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
if (err) {
- brcmf_err("fail to set mpc\n");
+ bphy_err(drvr, "fail to set mpc\n");
return;
}
brcmf_dbg(INFO, "MPC : %d\n", mpc);
@@ -677,6 +683,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp, bool aborted,
bool fw_abort)
{
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_scan_params_le params_le;
struct cfg80211_scan_request *scan_request;
u64 reqid;
@@ -711,7 +718,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
&params_le, sizeof(params_le));
if (err)
- brcmf_err("Scan abort failed\n");
+ bphy_err(drvr, "Scan abort failed\n");
}
brcmf_scan_config_mpc(ifp, 1);
@@ -756,6 +763,7 @@ static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = wdev->netdev;
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
int ret;
int err;
@@ -763,7 +771,7 @@ static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy,
err = brcmf_fil_bsscfg_data_set(ifp, "interface_remove", NULL, 0);
if (err) {
- brcmf_err("interface_remove failed %d\n", err);
+ bphy_err(drvr, "interface_remove failed %d\n", err);
goto err_unarm;
}
@@ -771,7 +779,7 @@ static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy,
ret = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_DEL,
BRCMF_VIF_EVENT_TIMEOUT);
if (!ret) {
- brcmf_err("timeout occurred\n");
+ bphy_err(drvr, "timeout occurred\n");
err = -EIO;
goto err_unarm;
}
@@ -834,6 +842,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_vif *vif = ifp->vif;
+ struct brcmf_pub *drvr = cfg->pub;
s32 infra = 0;
s32 ap = 0;
s32 err = 0;
@@ -873,14 +882,14 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
if (err) {
- brcmf_err("iface validation failed: err=%d\n", err);
+ bphy_err(drvr, "iface validation failed: err=%d\n", err);
return err;
}
switch (type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_WDS:
- brcmf_err("type (%d) : currently we do not support this type\n",
- type);
+ bphy_err(drvr, "type (%d) : currently we do not support this type\n",
+ type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
infra = 0;
@@ -908,7 +917,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
} else {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
if (err) {
- brcmf_err("WLC_SET_INFRA error (%d)\n", err);
+ bphy_err(drvr, "WLC_SET_INFRA error (%d)\n", err);
err = -EAGAIN;
goto done;
}
@@ -999,6 +1008,7 @@ static s32
brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
struct cfg80211_scan_request *request)
{
+ struct brcmf_pub *drvr = cfg->pub;
s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
offsetof(struct brcmf_escan_params_le, params_le);
struct brcmf_escan_params_le *params;
@@ -1030,7 +1040,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
if (err == -EBUSY)
brcmf_dbg(INFO, "system busy : escan canceled\n");
else
- brcmf_err("error (%d)\n", err);
+ bphy_err(drvr, "error (%d)\n", err);
}
kfree(params);
@@ -1067,6 +1077,7 @@ static s32
brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
s32 err = 0;
@@ -1076,21 +1087,22 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EIO;
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
- brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
+ bphy_err(drvr, "Scanning already: status (%lu)\n",
+ cfg->scan_status);
return -EAGAIN;
}
if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) {
- brcmf_err("Scanning being aborted: status (%lu)\n",
- cfg->scan_status);
+ bphy_err(drvr, "Scanning being aborted: status (%lu)\n",
+ cfg->scan_status);
return -EAGAIN;
}
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
- brcmf_err("Scanning suppressed: status (%lu)\n",
- cfg->scan_status);
+ bphy_err(drvr, "Scanning suppressed: status (%lu)\n",
+ cfg->scan_status);
return -EAGAIN;
}
if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state)) {
- brcmf_err("Connecting: status (%lu)\n", vif->sme_state);
+ bphy_err(drvr, "Connecting: status (%lu)\n", vif->sme_state);
return -EAGAIN;
}
@@ -1124,7 +1136,7 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return 0;
scan_out:
- brcmf_err("scan error (%d)\n", err);
+ bphy_err(drvr, "scan error (%d)\n", err);
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
cfg->scan_request = NULL;
return err;
@@ -1132,36 +1144,41 @@ scan_out:
static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err = 0;
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "rtsthresh",
- rts_threshold);
+ err = brcmf_fil_iovar_int_set(ifp, "rtsthresh", rts_threshold);
if (err)
- brcmf_err("Error (%d)\n", err);
+ bphy_err(drvr, "Error (%d)\n", err);
return err;
}
static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err = 0;
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "fragthresh",
+ err = brcmf_fil_iovar_int_set(ifp, "fragthresh",
frag_threshold);
if (err)
- brcmf_err("Error (%d)\n", err);
+ bphy_err(drvr, "Error (%d)\n", err);
return err;
}
static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err = 0;
u32 cmd = (l ? BRCMF_C_SET_LRL : BRCMF_C_SET_SRL);
- err = brcmf_fil_cmd_int_set(netdev_priv(ndev), cmd, retry);
+ err = brcmf_fil_cmd_int_set(ifp, cmd, retry);
if (err) {
- brcmf_err("cmd (%d) , error (%d)\n", cmd, err);
+ bphy_err(drvr, "cmd (%d) , error (%d)\n", cmd, err);
return err;
}
return err;
@@ -1237,6 +1254,7 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
int i, err;
@@ -1250,8 +1268,8 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
&pmk, sizeof(pmk));
if (err < 0)
- brcmf_err("failed to change PSK in firmware (len=%u)\n",
- pmk_len);
+ bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
+ pmk_len);
return err;
}
@@ -1259,6 +1277,7 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1268,7 +1287,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
err = brcmf_fil_cmd_data_set(vif->ifp,
BRCMF_C_DISASSOC, NULL, 0);
if (err) {
- brcmf_err("WLC_DISASSOC failed (%d)\n", err);
+ bphy_err(drvr, "WLC_DISASSOC failed (%d)\n", err);
}
if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) ||
(vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT))
@@ -1292,6 +1311,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_join_params join_params;
size_t join_params_size = 0;
s32 err = 0;
@@ -1356,7 +1376,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_iovar_int_set(ifp, "wsec", wsec);
if (err) {
- brcmf_err("wsec failed (%d)\n", err);
+ bphy_err(drvr, "wsec failed (%d)\n", err);
goto done;
}
@@ -1368,7 +1388,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, bcnprd);
if (err) {
- brcmf_err("WLC_SET_BCNPRD failed (%d)\n", err);
+ bphy_err(drvr, "WLC_SET_BCNPRD failed (%d)\n", err);
goto done;
}
@@ -1413,7 +1433,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_CHANNEL,
target_channel);
if (err) {
- brcmf_err("WLC_SET_CHANNEL failed (%d)\n", err);
+ bphy_err(drvr, "WLC_SET_CHANNEL failed (%d)\n", err);
goto done;
}
} else
@@ -1425,7 +1445,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, join_params_size);
if (err) {
- brcmf_err("WLC_SET_SSID failed (%d)\n", err);
+ bphy_err(drvr, "WLC_SET_SSID failed (%d)\n", err);
goto done;
}
@@ -1461,7 +1481,9 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
static s32 brcmf_set_wpa_version(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1473,9 +1495,9 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", val);
if (err) {
- brcmf_err("set wpa_auth failed (%d)\n", err);
+ bphy_err(drvr, "set wpa_auth failed (%d)\n", err);
return err;
}
sec = &profile->sec;
@@ -1486,7 +1508,9 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
static s32 brcmf_set_auth_type(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1506,9 +1530,9 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
break;
}
- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
+ err = brcmf_fil_bsscfg_int_set(ifp, "auth", val);
if (err) {
- brcmf_err("set auth failed (%d)\n", err);
+ bphy_err(drvr, "set auth failed (%d)\n", err);
return err;
}
sec = &profile->sec;
@@ -1520,7 +1544,9 @@ static s32
brcmf_set_wsec_mode(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_cfg80211_security *sec;
s32 pval = 0;
s32 gval = 0;
@@ -1543,8 +1569,8 @@ brcmf_set_wsec_mode(struct net_device *ndev,
pval = AES_ENABLED;
break;
default:
- brcmf_err("invalid cipher pairwise (%d)\n",
- sme->crypto.ciphers_pairwise[0]);
+ bphy_err(drvr, "invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
return -EINVAL;
}
}
@@ -1564,8 +1590,8 @@ brcmf_set_wsec_mode(struct net_device *ndev,
gval = AES_ENABLED;
break;
default:
- brcmf_err("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
}
@@ -1578,9 +1604,9 @@ brcmf_set_wsec_mode(struct net_device *ndev,
pval = AES_ENABLED;
wsec = pval | gval;
- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err) {
- brcmf_err("error (%d)\n", err);
+ bphy_err(drvr, "error (%d)\n", err);
return err;
}
@@ -1596,6 +1622,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct brcmf_pub *drvr = ifp->drvr;
s32 val;
s32 err;
const struct brcmf_tlv *rsn_ie;
@@ -1613,7 +1640,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val);
if (err) {
- brcmf_err("could not get wpa_auth (%d)\n", err);
+ bphy_err(drvr, "could not get wpa_auth (%d)\n", err);
return err;
}
if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
@@ -1627,8 +1654,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
val = WPA_AUTH_PSK;
break;
default:
- brcmf_err("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1658,8 +1685,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
break;
default:
- brcmf_err("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
}
@@ -1705,7 +1732,7 @@ skip_mfp_config:
brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
- brcmf_err("could not set wpa_auth (%d)\n", err);
+ bphy_err(drvr, "could not set wpa_auth (%d)\n", err);
return err;
}
@@ -1716,6 +1743,8 @@ static s32
brcmf_set_sharedkey(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
struct brcmf_wsec_key key;
@@ -1742,7 +1771,7 @@ brcmf_set_sharedkey(struct net_device *ndev,
key.len = (u32) sme->key_len;
key.index = (u32) sme->key_idx;
if (key.len > sizeof(key.data)) {
- brcmf_err("Too long key length (%u)\n", key.len);
+ bphy_err(drvr, "Too long key length (%u)\n", key.len);
return -EINVAL;
}
memcpy(key.data, sme->key, key.len);
@@ -1755,24 +1784,24 @@ brcmf_set_sharedkey(struct net_device *ndev,
key.algo = CRYPTO_ALGO_WEP128;
break;
default:
- brcmf_err("Invalid algorithm (%d)\n",
- sme->crypto.ciphers_pairwise[0]);
+ bphy_err(drvr, "Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
return -EINVAL;
}
/* Set the new key/index */
brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n",
key.len, key.index, key.algo);
brcmf_dbg(CONN, "key \"%s\"\n", key.data);
- err = send_key_to_dongle(netdev_priv(ndev), &key);
+ err = send_key_to_dongle(ifp, &key);
if (err)
return err;
if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
brcmf_dbg(CONN, "set auth_type to shared key\n");
val = WL_AUTH_SHARED_KEY; /* shared key */
- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
+ err = brcmf_fil_bsscfg_int_set(ifp, "auth", val);
if (err)
- brcmf_err("set auth failed (%d)\n", err);
+ bphy_err(drvr, "set auth failed (%d)\n", err);
}
return err;
}
@@ -1792,6 +1821,7 @@ enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
static void brcmf_set_join_pref(struct brcmf_if *ifp,
struct cfg80211_bss_selection *bss_select)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_join_pref_params join_pref_params[2];
enum nl80211_band band;
int err, i = 0;
@@ -1830,7 +1860,7 @@ static void brcmf_set_join_pref(struct brcmf_if *ifp,
err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
sizeof(join_pref_params));
if (err)
- brcmf_err("Set join_pref error (%d)\n", err);
+ bphy_err(drvr, "Set join_pref error (%d)\n", err);
}
static s32
@@ -1841,6 +1871,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct ieee80211_channel *chan = sme->channel;
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_join_params join_params;
size_t join_params_size;
const struct brcmf_tlv *rsn_ie;
@@ -1857,7 +1888,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
return -EIO;
if (!sme->ssid) {
- brcmf_err("Invalid ssid\n");
+ bphy_err(drvr, "Invalid ssid\n");
return -EOPNOTSUPP;
}
@@ -1886,7 +1917,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
sme->ie, sme->ie_len);
if (err)
- brcmf_err("Set Assoc REQ IE Failed\n");
+ bphy_err(drvr, "Set Assoc REQ IE Failed\n");
else
brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
@@ -1907,32 +1938,32 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_set_wpa_version(ndev, sme);
if (err) {
- brcmf_err("wl_set_wpa_version failed (%d)\n", err);
+ bphy_err(drvr, "wl_set_wpa_version failed (%d)\n", err);
goto done;
}
sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type);
err = brcmf_set_auth_type(ndev, sme);
if (err) {
- brcmf_err("wl_set_auth_type failed (%d)\n", err);
+ bphy_err(drvr, "wl_set_auth_type failed (%d)\n", err);
goto done;
}
err = brcmf_set_wsec_mode(ndev, sme);
if (err) {
- brcmf_err("wl_set_set_cipher failed (%d)\n", err);
+ bphy_err(drvr, "wl_set_set_cipher failed (%d)\n", err);
goto done;
}
err = brcmf_set_key_mgmt(ndev, sme);
if (err) {
- brcmf_err("wl_set_key_mgmt failed (%d)\n", err);
+ bphy_err(drvr, "wl_set_key_mgmt failed (%d)\n", err);
goto done;
}
err = brcmf_set_sharedkey(ndev, sme);
if (err) {
- brcmf_err("brcmf_set_sharedkey failed (%d)\n", err);
+ bphy_err(drvr, "brcmf_set_sharedkey failed (%d)\n", err);
goto done;
}
@@ -1949,7 +1980,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
/* enable firmware supplicant for this interface */
err = brcmf_fil_iovar_int_set(ifp, "sup_wpa", 1);
if (err < 0) {
- brcmf_err("failed to enable fw supplicant\n");
+ bphy_err(drvr, "failed to enable fw supplicant\n");
goto done;
}
}
@@ -2044,7 +2075,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, join_params_size);
if (err)
- brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err);
+ bphy_err(drvr, "BRCMF_C_SET_SSID failed (%d)\n", err);
done:
if (err)
@@ -2057,8 +2088,10 @@ static s32
brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
u16 reason_code)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_scb_val_le scbval;
s32 err = 0;
@@ -2075,7 +2108,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_DISASSOC,
&scbval, sizeof(scbval));
if (err)
- brcmf_err("error (%d)\n", err);
+ bphy_err(drvr, "error (%d)\n", err);
brcmf_dbg(TRACE, "Exit\n");
return err;
@@ -2088,6 +2121,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
s32 err;
s32 disable;
u32 qdbm = 127;
@@ -2102,7 +2136,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
case NL80211_TX_POWER_LIMITED:
case NL80211_TX_POWER_FIXED:
if (mbm < 0) {
- brcmf_err("TX_POWER_FIXED - dbm is negative\n");
+ bphy_err(drvr, "TX_POWER_FIXED - dbm is negative\n");
err = -EINVAL;
goto done;
}
@@ -2112,7 +2146,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
qdbm |= WL_TXPWR_OVERRIDE;
break;
default:
- brcmf_err("Unsupported type %d\n", type);
+ bphy_err(drvr, "Unsupported type %d\n", type);
err = -EINVAL;
goto done;
}
@@ -2120,11 +2154,11 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
disable = WL_RADIO_SW_DISABLE << 16;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_RADIO, disable);
if (err)
- brcmf_err("WLC_SET_RADIO error (%d)\n", err);
+ bphy_err(drvr, "WLC_SET_RADIO error (%d)\n", err);
err = brcmf_fil_iovar_int_set(ifp, "qtxpower", qdbm);
if (err)
- brcmf_err("qtxpower error (%d)\n", err);
+ bphy_err(drvr, "qtxpower error (%d)\n", err);
done:
brcmf_dbg(TRACE, "Exit %d (qdbm)\n", qdbm & ~WL_TXPWR_OVERRIDE);
@@ -2135,7 +2169,9 @@ static s32
brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
s32 *dbm)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
+ struct brcmf_pub *drvr = cfg->pub;
s32 qdbm = 0;
s32 err;
@@ -2145,7 +2181,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
err = brcmf_fil_iovar_int_get(vif->ifp, "qtxpower", &qdbm);
if (err) {
- brcmf_err("error (%d)\n", err);
+ bphy_err(drvr, "error (%d)\n", err);
goto done;
}
*dbm = (qdbm & ~WL_TXPWR_OVERRIDE) / 4;
@@ -2160,6 +2196,7 @@ brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool unicast, bool multicast)
{
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
u32 index;
u32 wsec;
s32 err = 0;
@@ -2171,7 +2208,7 @@ brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
- brcmf_err("WLC_GET_WSEC error (%d)\n", err);
+ bphy_err(drvr, "WLC_GET_WSEC error (%d)\n", err);
goto done;
}
@@ -2181,7 +2218,7 @@ brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp,
BRCMF_C_SET_KEY_PRIMARY, index);
if (err)
- brcmf_err("error (%d)\n", err);
+ bphy_err(drvr, "error (%d)\n", err);
}
done:
brcmf_dbg(TRACE, "Exit\n");
@@ -2230,7 +2267,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_wsec_key *key;
s32 val;
s32 wsec;
@@ -2245,7 +2284,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
/* we ignore this key index in this case */
- brcmf_err("invalid key index (%d)\n", key_idx);
+ bphy_err(drvr, "invalid key index (%d)\n", key_idx);
return -EINVAL;
}
@@ -2254,7 +2293,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
mac_addr);
if (params->key_len > sizeof(key->data)) {
- brcmf_err("Too long key length (%u)\n", params->key_len);
+ bphy_err(drvr, "Too long key length (%u)\n", params->key_len);
return -EINVAL;
}
@@ -2308,7 +2347,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
break;
default:
- brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
+ bphy_err(drvr, "Invalid cipher (0x%x)\n", params->cipher);
err = -EINVAL;
goto done;
}
@@ -2319,13 +2358,13 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
- brcmf_err("get wsec error (%d)\n", err);
+ bphy_err(drvr, "get wsec error (%d)\n", err);
goto done;
}
wsec |= val;
err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err) {
- brcmf_err("set wsec error (%d)\n", err);
+ bphy_err(drvr, "set wsec error (%d)\n", err);
goto done;
}
@@ -2340,9 +2379,11 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx,
void (*callback)(void *cookie,
struct key_params *params))
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct key_params params;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_security *sec;
s32 wsec;
s32 err = 0;
@@ -2356,7 +2397,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx,
err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
- brcmf_err("WLC_GET_WSEC error (%d)\n", err);
+ bphy_err(drvr, "WLC_GET_WSEC error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
err = -EAGAIN;
goto done;
@@ -2377,7 +2418,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx,
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
} else {
- brcmf_err("Invalid algo (0x%x)\n", wsec);
+ bphy_err(drvr, "Invalid algo (0x%x)\n", wsec);
err = -EINVAL;
goto done;
}
@@ -2407,6 +2448,7 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
static void
brcmf_cfg80211_reconfigure_wep(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err;
u8 key_idx;
struct brcmf_wsec_key *key;
@@ -2423,18 +2465,18 @@ brcmf_cfg80211_reconfigure_wep(struct brcmf_if *ifp)
err = send_key_to_dongle(ifp, key);
if (err) {
- brcmf_err("Setting WEP key failed (%d)\n", err);
+ bphy_err(drvr, "Setting WEP key failed (%d)\n", err);
return;
}
err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
- brcmf_err("get wsec error (%d)\n", err);
+ bphy_err(drvr, "get wsec error (%d)\n", err);
return;
}
wsec |= WEP_ENABLED;
err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err)
- brcmf_err("set wsec error (%d)\n", err);
+ bphy_err(drvr, "set wsec error (%d)\n", err);
}
static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si)
@@ -2460,6 +2502,7 @@ static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si)
static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct {
__le32 len;
struct brcmf_bss_info_le bss_le;
@@ -2475,7 +2518,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, buf,
WL_BSS_INFO_MAX);
if (err) {
- brcmf_err("Failed to get bss info (%d)\n", err);
+ bphy_err(drvr, "Failed to get bss info (%d)\n", err);
goto out_kfree;
}
si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
@@ -2497,6 +2540,7 @@ static s32
brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
struct station_info *sinfo)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_scb_val_le scbval;
struct brcmf_pktcnt_le pktcnt;
s32 err;
@@ -2506,7 +2550,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
/* Get the current tx rate */
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
if (err < 0) {
- brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err);
+ bphy_err(drvr, "BRCMF_C_GET_RATE error (%d)\n", err);
return err;
}
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
@@ -2516,7 +2560,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, &scbval,
sizeof(scbval));
if (err) {
- brcmf_err("BRCMF_C_GET_RSSI error (%d)\n", err);
+ bphy_err(drvr, "BRCMF_C_GET_RSSI error (%d)\n", err);
return err;
}
rssi = le32_to_cpu(scbval.val);
@@ -2526,7 +2570,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt,
sizeof(pktcnt));
if (err) {
- brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err);
+ bphy_err(drvr, "BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err);
return err;
}
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) |
@@ -2545,7 +2589,9 @@ static s32
brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
@@ -2574,7 +2620,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
&sta_info_le,
sizeof(sta_info_le));
if (err < 0) {
- brcmf_err("GET STA INFO failed, %d\n", err);
+ bphy_err(drvr, "GET STA INFO failed, %d\n", err);
goto done;
}
}
@@ -2643,7 +2689,8 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
&scb_val, sizeof(scb_val));
if (err) {
- brcmf_err("Could not get rssi (%d)\n", err);
+ bphy_err(drvr, "Could not get rssi (%d)\n",
+ err);
goto done;
} else {
rssi = le32_to_cpu(scb_val.val);
@@ -2664,6 +2711,7 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
s32 err;
brcmf_dbg(TRACE, "Enter, idx %d\n", idx);
@@ -2674,8 +2722,8 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
&cfg->assoclist,
sizeof(cfg->assoclist));
if (err) {
- brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
- err);
+ bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
+ err);
cfg->assoclist.count = 0;
return -EOPNOTSUPP;
}
@@ -2695,6 +2743,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
brcmf_dbg(TRACE, "Enter\n");
@@ -2723,9 +2772,9 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
if (err) {
if (err == -ENODEV)
- brcmf_err("net_device is not ready yet\n");
+ bphy_err(drvr, "net_device is not ready yet\n");
else
- brcmf_err("error (%d)\n", err);
+ bphy_err(drvr, "error (%d)\n", err);
}
done:
brcmf_dbg(TRACE, "Exit\n");
@@ -2736,6 +2785,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg);
+ struct brcmf_pub *drvr = cfg->pub;
struct cfg80211_bss *bss;
enum nl80211_band band;
struct brcmu_chan ch;
@@ -2748,7 +2798,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
struct cfg80211_inform_bss bss_data = {};
if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
- brcmf_err("Bss info is larger than buffer. Discarding\n");
+ bphy_err(drvr, "Bss info is larger than buffer. Discarding\n");
return 0;
}
@@ -2807,6 +2857,7 @@ next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
{
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_scan_results *bss_list;
struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
s32 err = 0;
@@ -2815,8 +2866,8 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
bss_list = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
if (bss_list->count != 0 &&
bss_list->version != BRCMF_BSS_INFO_VERSION) {
- brcmf_err("Version %d != WL_BSS_INFO_VERSION\n",
- bss_list->version);
+ bphy_err(drvr, "Version %d != WL_BSS_INFO_VERSION\n",
+ bss_list->version);
return -EOPNOTSUPP;
}
brcmf_dbg(SCAN, "scanned AP count (%d)\n", bss_list->count);
@@ -2833,6 +2884,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev, const u8 *bssid)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg);
+ struct brcmf_pub *drvr = cfg->pub;
struct ieee80211_channel *notify_channel;
struct brcmf_bss_info_le *bi = NULL;
struct ieee80211_supported_band *band;
@@ -2860,7 +2912,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_cmd_data_get(netdev_priv(ndev), BRCMF_C_GET_BSS_INFO,
buf, WL_BSS_INFO_MAX);
if (err) {
- brcmf_err("WLC_GET_BSS_INFO failed: %d\n", err);
+ bphy_err(drvr, "WLC_GET_BSS_INFO failed: %d\n", err);
goto CleanUp;
}
@@ -2914,6 +2966,7 @@ CleanUp:
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_bss_info_le *bi;
const struct brcmf_tlv *tim;
u16 beacon_interval;
@@ -2930,7 +2983,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
cfg->extra_buf, WL_EXTRA_BUF_MAX);
if (err) {
- brcmf_err("Could not get bss info %d\n", err);
+ bphy_err(drvr, "Could not get bss info %d\n", err);
goto update_bss_info_out;
}
@@ -2955,7 +3008,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
u32 var;
err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
if (err) {
- brcmf_err("wl dtim_assoc failed (%d)\n", err);
+ bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err);
goto update_bss_info_out;
}
dtim_period = (u8)var;
@@ -2993,9 +3046,10 @@ static void brcmf_escan_timeout(struct timer_list *t)
{
struct brcmf_cfg80211_info *cfg =
from_timer(cfg, t, escan_timeout);
+ struct brcmf_pub *drvr = cfg->pub;
if (cfg->int_escan_map || cfg->scan_request) {
- brcmf_err("timer expired\n");
+ bphy_err(drvr, "timer expired\n");
schedule_work(&cfg->escan_timeout_work);
}
}
@@ -3043,7 +3097,8 @@ static s32
brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
- struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_cfg80211_info *cfg = drvr->config;
s32 status;
struct brcmf_escan_result_le *escan_result_le;
u32 escan_buflen;
@@ -3060,32 +3115,33 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
goto exit;
if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
- brcmf_err("scan not ready, bsscfgidx=%d\n", ifp->bsscfgidx);
+ bphy_err(drvr, "scan not ready, bsscfgidx=%d\n",
+ ifp->bsscfgidx);
return -EPERM;
}
if (status == BRCMF_E_STATUS_PARTIAL) {
brcmf_dbg(SCAN, "ESCAN Partial result\n");
if (e->datalen < sizeof(*escan_result_le)) {
- brcmf_err("invalid event data length\n");
+ bphy_err(drvr, "invalid event data length\n");
goto exit;
}
escan_result_le = (struct brcmf_escan_result_le *) data;
if (!escan_result_le) {
- brcmf_err("Invalid escan result (NULL pointer)\n");
+ bphy_err(drvr, "Invalid escan result (NULL pointer)\n");
goto exit;
}
escan_buflen = le32_to_cpu(escan_result_le->buflen);
if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
escan_buflen > e->datalen ||
escan_buflen < sizeof(*escan_result_le)) {
- brcmf_err("Invalid escan buffer length: %d\n",
- escan_buflen);
+ bphy_err(drvr, "Invalid escan buffer length: %d\n",
+ escan_buflen);
goto exit;
}
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
- brcmf_err("Invalid bss_count %d: ignoring\n",
- escan_result_le->bss_count);
+ bphy_err(drvr, "Invalid bss_count %d: ignoring\n",
+ escan_result_le->bss_count);
goto exit;
}
bss_info_le = &escan_result_le->bss_info_le;
@@ -3100,8 +3156,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
bi_length = le32_to_cpu(bss_info_le->length);
if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
- brcmf_err("Ignoring invalid bss_info length: %d\n",
- bi_length);
+ bphy_err(drvr, "Ignoring invalid bss_info length: %d\n",
+ bi_length);
goto exit;
}
@@ -3109,7 +3165,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
BIT(NL80211_IFTYPE_ADHOC))) {
if (le16_to_cpu(bss_info_le->capability) &
WLAN_CAPABILITY_IBSS) {
- brcmf_err("Ignoring IBSS result\n");
+ bphy_err(drvr, "Ignoring IBSS result\n");
goto exit;
}
}
@@ -3117,7 +3173,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
list = (struct brcmf_scan_results *)
cfg->escan_info.escan_buf;
if (bi_length > BRCMF_ESCAN_BUF_SIZE - list->buflen) {
- brcmf_err("Buffer is too small: ignoring\n");
+ bphy_err(drvr, "Buffer is too small: ignoring\n");
goto exit;
}
@@ -3276,7 +3332,8 @@ static s32
brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
- struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_cfg80211_info *cfg = drvr->config;
struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
struct cfg80211_scan_request *request = NULL;
struct wiphy *wiphy = cfg_to_wiphy(cfg);
@@ -3309,14 +3366,14 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count);
if (!result_count) {
- brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
+ bphy_err(drvr, "FALSE PNO Event. (pfn_count == 0)\n");
goto out_err;
}
netinfo_start = brcmf_get_netinfo_array(pfn_result);
datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
if (datalen < result_count * sizeof(*netinfo)) {
- brcmf_err("insufficient event data\n");
+ bphy_err(drvr, "insufficient event data\n");
goto out_err;
}
@@ -3363,15 +3420,16 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_sched_scan_request *req)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
brcmf_dbg(SCAN, "Enter: n_match_sets=%d n_ssids=%d\n",
req->n_match_sets, req->n_ssids);
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
- brcmf_err("Scanning suppressed: status=%lu\n",
- cfg->scan_status);
+ bphy_err(drvr, "Scanning suppressed: status=%lu\n",
+ cfg->scan_status);
return -EAGAIN;
}
@@ -3449,7 +3507,8 @@ static s32
brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
void *data)
{
- struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_cfg80211_info *cfg = drvr->config;
struct brcmf_pno_scanresults_le *pfn_result;
struct brcmf_pno_net_info_le *netinfo;
@@ -3468,12 +3527,14 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
}
if (le32_to_cpu(pfn_result->count) < 1) {
- brcmf_err("Invalid result count, expected 1 (%d)\n",
- le32_to_cpu(pfn_result->count));
+ bphy_err(drvr, "Invalid result count, expected 1 (%d)\n",
+ le32_to_cpu(pfn_result->count));
return -EINVAL;
}
netinfo = brcmf_get_netinfo_array(pfn_result);
+ if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
+ netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
cfg->wowl.nd->n_channels = 1;
@@ -3496,6 +3557,7 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_wowl_wakeind_le wake_ind_le;
struct cfg80211_wowlan_wakeup wakeup_data;
struct cfg80211_wowlan_wakeup *wakeup;
@@ -3506,7 +3568,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le,
sizeof(wake_ind_le));
if (err) {
- brcmf_err("Get wowl_wakeind failed, err = %d\n", err);
+ bphy_err(drvr, "Get wowl_wakeind failed, err = %d\n", err);
return;
}
@@ -3547,7 +3609,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
cfg->wowl.nd_data_completed,
BRCMF_ND_INFO_TIMEOUT);
if (!timeout)
- brcmf_err("No result for wowl net detect\n");
+ bphy_err(drvr, "No result for wowl net detect\n");
else
wakeup_data.net_detect = cfg->wowl.nd_info;
}
@@ -3736,6 +3798,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pmksa *pmk = &cfg->pmk_list.pmk[0];
+ struct brcmf_pub *drvr = cfg->pub;
s32 err;
u32 npmk, i;
@@ -3755,7 +3818,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
cfg->pmk_list.npmk = cpu_to_le32(npmk);
}
} else {
- brcmf_err("Too many PMKSA entries cached %d\n", npmk);
+ bphy_err(drvr, "Too many PMKSA entries cached %d\n", npmk);
return -EINVAL;
}
@@ -3778,6 +3841,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pmksa *pmk = &cfg->pmk_list.pmk[0];
+ struct brcmf_pub *drvr = cfg->pub;
s32 err;
u32 npmk, i;
@@ -3801,7 +3865,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
memset(&pmk[i], 0, sizeof(*pmk));
cfg->pmk_list.npmk = cpu_to_le32(npmk - 1);
} else {
- brcmf_err("Cache entry not found\n");
+ bphy_err(drvr, "Cache entry not found\n");
return -EINVAL;
}
@@ -3833,19 +3897,20 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err;
s32 wpa_val;
/* set auth */
err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0);
if (err < 0) {
- brcmf_err("auth error %d\n", err);
+ bphy_err(drvr, "auth error %d\n", err);
return err;
}
/* set wsec */
err = brcmf_fil_bsscfg_int_set(ifp, "wsec", 0);
if (err < 0) {
- brcmf_err("wsec error %d\n", err);
+ bphy_err(drvr, "wsec error %d\n", err);
return err;
}
/* set upper-layer auth */
@@ -3855,7 +3920,7 @@ static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
wpa_val = WPA_AUTH_DISABLED;
err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_val);
if (err < 0) {
- brcmf_err("wpa_auth error %d\n", err);
+ bphy_err(drvr, "wpa_auth error %d\n", err);
return err;
}
@@ -3875,6 +3940,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
const struct brcmf_vs_tlv *wpa_ie,
bool is_rsn_ie)
{
+ struct brcmf_pub *drvr = ifp->drvr;
u32 auth = 0; /* d11 open authentication */
u16 count;
s32 err = 0;
@@ -3905,13 +3971,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
/* check for multicast cipher suite */
if (offset + WPA_IE_MIN_OUI_LEN > len) {
err = -EINVAL;
- brcmf_err("no multicast cipher suite\n");
+ bphy_err(drvr, "no multicast cipher suite\n");
goto exit;
}
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- brcmf_err("ivalid OUI\n");
+ bphy_err(drvr, "ivalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -3933,7 +3999,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
break;
default:
err = -EINVAL;
- brcmf_err("Invalid multi cast cipher info\n");
+ bphy_err(drvr, "Invalid multi cast cipher info\n");
goto exit;
}
@@ -3944,13 +4010,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
/* Check for unicast suite(s) */
if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
err = -EINVAL;
- brcmf_err("no unicast cipher suite\n");
+ bphy_err(drvr, "no unicast cipher suite\n");
goto exit;
}
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- brcmf_err("ivalid OUI\n");
+ bphy_err(drvr, "ivalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -3968,7 +4034,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
pval |= AES_ENABLED;
break;
default:
- brcmf_err("Invalid unicast security info\n");
+ bphy_err(drvr, "Invalid unicast security info\n");
}
offset++;
}
@@ -3978,13 +4044,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
/* Check for auth key management suite(s) */
if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
err = -EINVAL;
- brcmf_err("no auth key mgmt suite\n");
+ bphy_err(drvr, "no auth key mgmt suite\n");
goto exit;
}
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- brcmf_err("ivalid OUI\n");
+ bphy_err(drvr, "ivalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -4012,7 +4078,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
wpa_auth |= WPA2_AUTH_1X_SHA256;
break;
default:
- brcmf_err("Invalid key mgmt info\n");
+ bphy_err(drvr, "Invalid key mgmt info\n");
}
offset++;
}
@@ -4054,7 +4120,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable",
wme_bss_disable);
if (err < 0) {
- brcmf_err("wme_bss_disable error %d\n", err);
+ bphy_err(drvr, "wme_bss_disable error %d\n", err);
goto exit;
}
@@ -4068,7 +4134,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
&data[offset],
WPA_IE_MIN_OUI_LEN);
if (err < 0) {
- brcmf_err("bip error %d\n", err);
+ bphy_err(drvr, "bip error %d\n", err);
goto exit;
}
}
@@ -4079,13 +4145,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
/* set auth */
err = brcmf_fil_bsscfg_int_set(ifp, "auth", auth);
if (err < 0) {
- brcmf_err("auth error %d\n", err);
+ bphy_err(drvr, "auth error %d\n", err);
goto exit;
}
/* set wsec */
err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err < 0) {
- brcmf_err("wsec error %d\n", err);
+ bphy_err(drvr, "wsec error %d\n", err);
goto exit;
}
/* Configure MFP, this needs to go after wsec otherwise the wsec command
@@ -4094,14 +4160,14 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) {
err = brcmf_fil_bsscfg_int_set(ifp, "mfp", mfp);
if (err < 0) {
- brcmf_err("mfp error %d\n", err);
+ bphy_err(drvr, "mfp error %d\n", err);
goto exit;
}
}
/* set upper-layer auth */
err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth);
if (err < 0) {
- brcmf_err("wpa_auth error %d\n", err);
+ bphy_err(drvr, "wpa_auth error %d\n", err);
goto exit;
}
@@ -4187,6 +4253,7 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len)
{
+ struct brcmf_pub *drvr;
struct brcmf_if *ifp;
struct vif_saved_ie *saved_ie;
s32 err = 0;
@@ -4208,6 +4275,7 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
if (!vif)
return -ENODEV;
ifp = vif->ifp;
+ drvr = ifp->drvr;
saved_ie = &vif->saved_ie;
brcmf_dbg(TRACE, "bsscfgidx %d, pktflag : 0x%02X\n", ifp->bsscfgidx,
@@ -4239,13 +4307,13 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
break;
default:
err = -EPERM;
- brcmf_err("not suitable type\n");
+ bphy_err(drvr, "not suitable type\n");
goto exit;
}
if (vndr_ie_len > mgmt_ie_buf_len) {
err = -ENOMEM;
- brcmf_err("extra IE size too big\n");
+ bphy_err(drvr, "extra IE size too big\n");
goto exit;
}
@@ -4306,8 +4374,8 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
/* verify remained buf size before copy data */
if (remained_buf_len < (vndrie_info->vndrie.len +
VNDR_IE_VSIE_OFFSET)) {
- brcmf_err("no space in mgmt_ie_buf: len left %d",
- remained_buf_len);
+ bphy_err(drvr, "no space in mgmt_ie_buf: len left %d",
+ remained_buf_len);
break;
}
remained_buf_len -= (vndrie_info->ie_len +
@@ -4338,7 +4406,7 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
err = brcmf_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf,
total_ie_buf_len);
if (err)
- brcmf_err("vndr ie set error : %d\n", err);
+ bphy_err(drvr, "vndr ie set error : %d\n", err);
}
exit:
@@ -4366,13 +4434,14 @@ static s32
brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
struct cfg80211_beacon_data *beacon)
{
+ struct brcmf_pub *drvr = vif->ifp->drvr;
s32 err;
/* Set Beacon IEs to FW */
err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG,
beacon->tail, beacon->tail_len);
if (err) {
- brcmf_err("Set Beacon IE Failed\n");
+ bphy_err(drvr, "Set Beacon IE Failed\n");
return err;
}
brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
@@ -4382,7 +4451,7 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
beacon->proberesp_ies,
beacon->proberesp_ies_len);
if (err)
- brcmf_err("Set Probe Resp IE Failed\n");
+ bphy_err(drvr, "Set Probe Resp IE Failed\n");
else
brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
@@ -4396,6 +4465,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
s32 ie_offset;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
const struct brcmf_tlv *ssid_ie;
const struct brcmf_tlv *country_ie;
struct brcmf_ssid_le ssid_le;
@@ -4491,7 +4561,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
is_11d);
if (err < 0) {
- brcmf_err("Regulatory Set Error, %d\n", err);
+ bphy_err(drvr, "Regulatory Set Error, %d\n",
+ err);
goto exit;
}
}
@@ -4499,8 +4570,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
settings->beacon_interval);
if (err < 0) {
- brcmf_err("Beacon Interval Set Error, %d\n",
- err);
+ bphy_err(drvr, "Beacon Interval Set Error, %d\n",
+ err);
goto exit;
}
}
@@ -4508,7 +4579,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
settings->dtim_period);
if (err < 0) {
- brcmf_err("DTIM Interval Set Error, %d\n", err);
+ bphy_err(drvr, "DTIM Interval Set Error, %d\n",
+ err);
goto exit;
}
}
@@ -4518,7 +4590,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0) {
- brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ bphy_err(drvr, "BRCMF_C_DOWN error %d\n",
+ err);
goto exit;
}
brcmf_fil_iovar_int_set(ifp, "apsta", 0);
@@ -4526,7 +4599,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
- brcmf_err("SET INFRA error %d\n", err);
+ bphy_err(drvr, "SET INFRA error %d\n", err);
goto exit;
}
} else if (WARN_ON(supports_11d && (is_11d != ifp->vif->is_11d))) {
@@ -4542,7 +4615,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
if (err < 0) {
- brcmf_err("setting AP mode failed %d\n", err);
+ bphy_err(drvr, "setting AP mode failed %d\n",
+ err);
goto exit;
}
if (!mbss) {
@@ -4551,14 +4625,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
*/
err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
- brcmf_err("Set Channel failed: chspec=%d, %d\n",
- chanspec, err);
+ bphy_err(drvr, "Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
goto exit;
}
}
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0) {
- brcmf_err("BRCMF_C_UP error (%d)\n", err);
+ bphy_err(drvr, "BRCMF_C_UP error (%d)\n", err);
goto exit;
}
/* On DOWN the firmware removes the WEP keys, reconfigure
@@ -4573,14 +4647,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, sizeof(join_params));
if (err < 0) {
- brcmf_err("SET SSID error (%d)\n", err);
+ bphy_err(drvr, "SET SSID error (%d)\n", err);
goto exit;
}
if (settings->hidden_ssid) {
err = brcmf_fil_iovar_int_set(ifp, "closednet", 1);
if (err) {
- brcmf_err("closednet error (%d)\n", err);
+ bphy_err(drvr, "closednet error (%d)\n", err);
goto exit;
}
}
@@ -4589,14 +4663,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
} else if (dev_role == NL80211_IFTYPE_P2P_GO) {
err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
- brcmf_err("Set Channel failed: chspec=%d, %d\n",
- chanspec, err);
+ bphy_err(drvr, "Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
goto exit;
}
err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
sizeof(ssid_le));
if (err < 0) {
- brcmf_err("setting ssid failed %d\n", err);
+ bphy_err(drvr, "setting ssid failed %d\n", err);
goto exit;
}
bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx);
@@ -4604,7 +4678,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
sizeof(bss_enable));
if (err < 0) {
- brcmf_err("bss_enable config failed %d\n", err);
+ bphy_err(drvr, "bss_enable config failed %d\n", err);
goto exit;
}
@@ -4627,7 +4701,9 @@ exit:
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = cfg->pub;
s32 err;
struct brcmf_fil_bss_enable_le bss_enable;
struct brcmf_join_params join_params;
@@ -4652,13 +4728,13 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, sizeof(join_params));
if (err < 0)
- brcmf_err("SET SSID error (%d)\n", err);
+ bphy_err(drvr, "SET SSID error (%d)\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0)
- brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ bphy_err(drvr, "BRCMF_C_DOWN error %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0)
- brcmf_err("setting AP mode failed %d\n", err);
+ bphy_err(drvr, "setting AP mode failed %d\n", err);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
@@ -4666,7 +4742,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* Bring device back up so it can be used again */
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0)
- brcmf_err("BRCMF_C_UP error %d\n", err);
+ bphy_err(drvr, "BRCMF_C_UP error %d\n", err);
brcmf_vif_clear_mgmt_ies(ifp->vif);
} else {
@@ -4675,7 +4751,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
sizeof(bss_enable));
if (err < 0)
- brcmf_err("bss_enable config failed %d\n", err);
+ bphy_err(drvr, "bss_enable config failed %d\n", err);
}
brcmf_set_mpc(ifp, 1);
brcmf_configure_arp_nd_offload(ifp, true);
@@ -4704,6 +4780,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
struct station_del_parameters *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_scb_val_le scbval;
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
@@ -4723,7 +4800,8 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scbval));
if (err)
- brcmf_err("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
+ bphy_err(drvr, "SCB_DEAUTHENTICATE_FOR_REASON failed %d\n",
+ err);
brcmf_dbg(TRACE, "Exit\n");
return err;
@@ -4733,6 +4811,8 @@ static int
brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_parameters *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
@@ -4753,7 +4833,7 @@ brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_DEAUTHORIZE,
(void *)mac, ETH_ALEN);
if (err < 0)
- brcmf_err("Setting SCB (de-)authorize failed, %d\n", err);
+ bphy_err(drvr, "Setting SCB (de-)authorize failed, %d\n", err);
return err;
}
@@ -4783,6 +4863,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct ieee80211_channel *chan = params->chan;
+ struct brcmf_pub *drvr = cfg->pub;
const u8 *buf = params->buf;
size_t len = params->len;
const struct ieee80211_mgmt *mgmt;
@@ -4803,7 +4884,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mgmt = (const struct ieee80211_mgmt *)buf;
if (!ieee80211_is_mgmt(mgmt->frame_control)) {
- brcmf_err("Driver only allows MGMT packet type\n");
+ bphy_err(drvr, "Driver only allows MGMT packet type\n");
return -EPERM;
}
@@ -4834,13 +4915,13 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
GFP_KERNEL);
} else if (ieee80211_is_action(mgmt->frame_control)) {
if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
- brcmf_err("invalid action frame length\n");
+ bphy_err(drvr, "invalid action frame length\n");
err = -EINVAL;
goto exit;
}
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
if (af_params == NULL) {
- brcmf_err("unable to allocate frame\n");
+ bphy_err(drvr, "unable to allocate frame\n");
err = -ENOMEM;
goto exit;
}
@@ -4891,6 +4972,7 @@ brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
u64 cookie)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
int err = 0;
@@ -4898,7 +4980,7 @@ brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
if (vif == NULL) {
- brcmf_err("No p2p device available for probe response\n");
+ bphy_err(drvr, "No p2p device available for probe response\n");
err = -ENODEV;
goto exit;
}
@@ -4913,6 +4995,7 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = wdev->netdev;
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_if *ifp;
struct brcmu_chan ch;
enum nl80211_band band = 0;
@@ -4926,7 +5009,7 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec);
if (err) {
- brcmf_err("chanspec failed (%d)\n", err);
+ bphy_err(drvr, "chanspec failed (%d)\n", err);
return err;
}
@@ -5048,6 +5131,8 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
struct net_device *ndev, const u8 *peer,
enum nl80211_tdls_operation oper)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_if *ifp;
struct brcmf_tdls_iovar_le info;
int ret = 0;
@@ -5065,7 +5150,7 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint",
&info, sizeof(info));
if (ret < 0)
- brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret);
+ bphy_err(drvr, "tdls_endpoint iovar failed: ret=%d\n", ret);
return ret;
}
@@ -5076,6 +5161,8 @@ brcmf_cfg80211_update_conn_params(struct wiphy *wiphy,
struct cfg80211_connect_params *sme,
u32 changed)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_if *ifp;
int err;
@@ -5086,7 +5173,7 @@ brcmf_cfg80211_update_conn_params(struct wiphy *wiphy,
err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
sme->ie, sme->ie_len);
if (err)
- brcmf_err("Set Assoc REQ IE Failed\n");
+ bphy_err(drvr, "Set Assoc REQ IE Failed\n");
else
brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
@@ -5098,6 +5185,8 @@ static int
brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_gtk_rekey_data *gtk)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_gtk_keyinfo_le gtk_le;
int ret;
@@ -5112,7 +5201,7 @@ brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev,
ret = brcmf_fil_iovar_data_set(ifp, "gtk_key_info", &gtk_le,
sizeof(gtk_le));
if (ret < 0)
- brcmf_err("gtk_key_info iovar failed: ret=%d\n", ret);
+ bphy_err(drvr, "gtk_key_info iovar failed: ret=%d\n", ret);
return ret;
}
@@ -5344,6 +5433,7 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
u32 req_len;
@@ -5355,7 +5445,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_iovar_data_get(ifp, "assoc_info",
cfg->extra_buf, WL_ASSOC_INFO_MAX);
if (err) {
- brcmf_err("could not get assoc info (%d)\n", err);
+ bphy_err(drvr, "could not get assoc info (%d)\n", err);
return err;
}
assoc_info =
@@ -5367,7 +5457,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
cfg->extra_buf,
WL_ASSOC_INFO_MAX);
if (err) {
- brcmf_err("could not get assoc req (%d)\n", err);
+ bphy_err(drvr, "could not get assoc req (%d)\n", err);
return err;
}
conn_info->req_ie_len = req_len;
@@ -5383,7 +5473,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
cfg->extra_buf,
WL_ASSOC_INFO_MAX);
if (err) {
- brcmf_err("could not get assoc resp (%d)\n", err);
+ bphy_err(drvr, "could not get assoc resp (%d)\n", err);
return err;
}
conn_info->resp_ie_len = resp_len;
@@ -5510,6 +5600,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_pub *drvr = cfg->pub;
static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
@@ -5527,7 +5618,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
(reason == BRCMF_E_STATUS_SUCCESS)) {
if (!data) {
- brcmf_err("No IEs present in ASSOC/REASSOC_IND");
+ bphy_err(drvr, "No IEs present in ASSOC/REASSOC_IND\n");
return -EINVAL;
}
@@ -5819,6 +5910,7 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err;
u32 bcn_timeout;
__le32 roamtrigger[2];
@@ -5831,7 +5923,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
bcn_timeout = BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_ON;
err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
if (err) {
- brcmf_err("bcn_timeout error (%d)\n", err);
+ bphy_err(drvr, "bcn_timeout error (%d)\n", err);
goto roam_setup_done;
}
@@ -5843,7 +5935,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
err = brcmf_fil_iovar_int_set(ifp, "roam_off",
ifp->drvr->settings->roamoff);
if (err) {
- brcmf_err("roam_off error (%d)\n", err);
+ bphy_err(drvr, "roam_off error (%d)\n", err);
goto roam_setup_done;
}
@@ -5852,7 +5944,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
(void *)roamtrigger, sizeof(roamtrigger));
if (err) {
- brcmf_err("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
+ bphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d)\n", err);
goto roam_setup_done;
}
@@ -5861,7 +5953,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
(void *)roam_delta, sizeof(roam_delta));
if (err) {
- brcmf_err("WLC_SET_ROAM_DELTA error (%d)\n", err);
+ bphy_err(drvr, "WLC_SET_ROAM_DELTA error (%d)\n", err);
goto roam_setup_done;
}
@@ -5872,25 +5964,26 @@ roam_setup_done:
static s32
brcmf_dongle_scantime(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
s32 err = 0;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
BRCMF_SCAN_CHANNEL_TIME);
if (err) {
- brcmf_err("Scan assoc time error (%d)\n", err);
+ bphy_err(drvr, "Scan assoc time error (%d)\n", err);
goto dongle_scantime_out;
}
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
BRCMF_SCAN_UNASSOC_TIME);
if (err) {
- brcmf_err("Scan unassoc time error (%d)\n", err);
+ bphy_err(drvr, "Scan unassoc time error (%d)\n", err);
goto dongle_scantime_out;
}
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME,
BRCMF_SCAN_PASSIVE_TIME);
if (err) {
- brcmf_err("Scan passive time error (%d)\n", err);
+ bphy_err(drvr, "Scan passive time error (%d)\n", err);
goto dongle_scantime_out;
}
@@ -5922,10 +6015,11 @@ static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
u32 bw_cap[])
{
- struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg);
+ struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
struct ieee80211_supported_band *band;
struct ieee80211_channel *channel;
- struct wiphy *wiphy;
struct brcmf_chanspec_list *list;
struct brcmu_chan ch;
int err;
@@ -5944,11 +6038,10 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf,
BRCMF_DCMD_MEDLEN);
if (err) {
- brcmf_err("get chanspecs error (%d)\n", err);
+ bphy_err(drvr, "get chanspecs error (%d)\n", err);
goto fail_pbuf;
}
- wiphy = cfg_to_wiphy(cfg);
band = wiphy->bands[NL80211_BAND_2GHZ];
if (band)
for (i = 0; i < band->n_channels; i++)
@@ -5968,7 +6061,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
} else if (ch.band == BRCMU_CHAN_BAND_5G) {
band = wiphy->bands[NL80211_BAND_5GHZ];
} else {
- brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
+ bphy_err(drvr, "Invalid channel Spec. 0x%x.\n",
+ ch.chspec);
continue;
}
if (!band)
@@ -5991,8 +6085,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
/* It seems firmware supports some channel we never
* considered. Something new in IEEE standard?
*/
- brcmf_err("Ignoring unexpected firmware channel %d\n",
- ch.control_ch_num);
+ bphy_err(drvr, "Ignoring unexpected firmware channel %d\n",
+ ch.control_ch_num);
continue;
}
@@ -6002,11 +6096,21 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
/* assuming the chanspecs order is HT20,
* HT40 upper, HT40 lower, and VHT80.
*/
- if (ch.bw == BRCMU_CHAN_BW_80) {
+ switch (ch.bw) {
+ case BRCMU_CHAN_BW_160:
+ channel->flags &= ~IEEE80211_CHAN_NO_160MHZ;
+ break;
+ case BRCMU_CHAN_BW_80:
channel->flags &= ~IEEE80211_CHAN_NO_80MHZ;
- } else if (ch.bw == BRCMU_CHAN_BW_40) {
+ break;
+ case BRCMU_CHAN_BW_40:
brcmf_update_bw40_channel_flag(channel, &ch);
- } else {
+ break;
+ default:
+ wiphy_warn(wiphy, "Firmware reported unsupported bandwidth %d\n",
+ ch.bw);
+ /* fall through */
+ case BRCMU_CHAN_BW_20:
/* enable the channel and disable other bandwidths
* for now as mentioned order assure they are enabled
* for subsequent chanspecs.
@@ -6038,7 +6142,8 @@ fail_pbuf:
static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
+ struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
struct ieee80211_supported_band *band;
struct brcmf_fil_bwcap_le band_bwcap;
struct brcmf_chanspec_list *list;
@@ -6084,7 +6189,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf,
BRCMF_DCMD_MEDLEN);
if (err) {
- brcmf_err("get chanspecs error (%d)\n", err);
+ bphy_err(drvr, "get chanspecs error (%d)\n", err);
kfree(pbuf);
return err;
}
@@ -6115,6 +6220,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
{
+ struct brcmf_pub *drvr = ifp->drvr;
u32 band, mimo_bwcap;
int err;
@@ -6150,7 +6256,7 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
break;
default:
- brcmf_err("invalid mimo_bw_cap value\n");
+ bphy_err(drvr, "invalid mimo_bw_cap value\n");
}
}
@@ -6225,8 +6331,9 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
- struct wiphy *wiphy;
+ struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg);
u32 nmode = 0;
u32 vhtmode = 0;
u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
@@ -6242,7 +6349,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
(void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
if (err) {
- brcmf_err("nmode error (%d)\n", err);
+ bphy_err(drvr, "nmode error (%d)\n", err);
} else {
brcmf_get_bwcap(ifp, bw_cap);
}
@@ -6252,7 +6359,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
if (err) {
- brcmf_err("rxchain error (%d)\n", err);
+ bphy_err(drvr, "rxchain error (%d)\n", err);
nchain = 1;
} else {
for (nchain = 0; rxchain; nchain++)
@@ -6262,7 +6369,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
err = brcmf_construct_chaninfo(cfg, bw_cap);
if (err) {
- brcmf_err("brcmf_construct_chaninfo failed (%d)\n", err);
+ bphy_err(drvr, "brcmf_construct_chaninfo failed (%d)\n", err);
return err;
}
@@ -6274,7 +6381,6 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
&txbf_bfr_cap);
}
- wiphy = cfg_to_wiphy(cfg);
for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) {
band = wiphy->bands[i];
if (band == NULL)
@@ -6470,12 +6576,13 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp)
{
#ifdef CONFIG_PM
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_pub *drvr = cfg->pub;
struct wiphy_wowlan_support *wowl;
wowl = kmemdup(&brcmf_wowlan_support, sizeof(brcmf_wowlan_support),
GFP_KERNEL);
if (!wowl) {
- brcmf_err("only support basic wowlan features\n");
+ bphy_err(drvr, "only support basic wowlan features\n");
wiphy->wowlan = &brcmf_wowlan_support;
return;
}
@@ -6572,7 +6679,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
sizeof(bandlist));
if (err) {
- brcmf_err("could not obtain band info: err=%d\n", err);
+ bphy_err(drvr, "could not obtain band info: err=%d\n", err);
return err;
}
/* first entry in bandlist is number of bands */
@@ -6621,6 +6728,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
{
+ struct brcmf_pub *drvr = cfg->pub;
struct net_device *ndev;
struct wireless_dev *wdev;
struct brcmf_if *ifp;
@@ -6658,7 +6766,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_FAKEFRAG, 1);
if (err) {
- brcmf_err("failed to set frameburst mode\n");
+ bphy_err(drvr, "failed to set frameburst mode\n");
goto default_conf_out;
}
@@ -6839,6 +6947,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_fil_country_le ccreq;
s32 err;
int i;
@@ -6850,8 +6959,8 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
/* ignore non-ISO3166 country codes */
for (i = 0; i < 2; i++)
if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
- brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n",
- req->alpha2[0], req->alpha2[1]);
+ bphy_err(drvr, "not an ISO3166 code (0x%02x 0x%02x)\n",
+ req->alpha2[0], req->alpha2[1]);
return;
}
@@ -6860,7 +6969,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
err = brcmf_fil_iovar_data_get(ifp, "country", &ccreq, sizeof(ccreq));
if (err) {
- brcmf_err("Country code iovar returned err = %d\n", err);
+ bphy_err(drvr, "Country code iovar returned err = %d\n", err);
return;
}
@@ -6870,7 +6979,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
err = brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
if (err) {
- brcmf_err("Firmware rejected country setting\n");
+ bphy_err(drvr, "Firmware rejected country setting\n");
return;
}
brcmf_setup_wiphybands(cfg);
@@ -6916,13 +7025,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
u16 *cap = NULL;
if (!ndev) {
- brcmf_err("ndev is invalid\n");
+ bphy_err(drvr, "ndev is invalid\n");
return NULL;
}
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
- brcmf_err("Could not allocate wiphy device\n");
+ bphy_err(drvr, "Could not allocate wiphy device\n");
return NULL;
}
@@ -6943,7 +7052,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
err = wl_init_priv(cfg);
if (err) {
- brcmf_err("Failed to init iwm_priv (%d)\n", err);
+ bphy_err(drvr, "Failed to init iwm_priv (%d)\n", err);
brcmf_free_vif(vif);
goto wiphy_out;
}
@@ -6952,7 +7061,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
/* determine d11 io type before wiphy setup */
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION, &io_type);
if (err) {
- brcmf_err("Failed to get D11 version (%d)\n", err);
+ bphy_err(drvr, "Failed to get D11 version (%d)\n", err);
goto priv_out;
}
cfg->d11inf.io_type = (u8)io_type;
@@ -6986,13 +7095,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
#endif
err = wiphy_register(wiphy);
if (err < 0) {
- brcmf_err("Could not register wiphy device (%d)\n", err);
+ bphy_err(drvr, "Could not register wiphy device (%d)\n", err);
goto priv_out;
}
err = brcmf_setup_wiphybands(cfg);
if (err) {
- brcmf_err("Setting wiphy bands failed (%d)\n", err);
+ bphy_err(drvr, "Setting wiphy bands failed (%d)\n", err);
goto wiphy_unreg_out;
}
@@ -7010,24 +7119,24 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
err = brcmf_fweh_activate_events(ifp);
if (err) {
- brcmf_err("FWEH activation failed (%d)\n", err);
+ bphy_err(drvr, "FWEH activation failed (%d)\n", err);
goto wiphy_unreg_out;
}
err = brcmf_p2p_attach(cfg, p2pdev_forced);
if (err) {
- brcmf_err("P2P initialisation failed (%d)\n", err);
+ bphy_err(drvr, "P2P initialisation failed (%d)\n", err);
goto wiphy_unreg_out;
}
err = brcmf_btcoex_attach(cfg);
if (err) {
- brcmf_err("BT-coex initialisation failed (%d)\n", err);
+ bphy_err(drvr, "BT-coex initialisation failed (%d)\n", err);
brcmf_p2p_detach(&cfg->p2p);
goto wiphy_unreg_out;
}
err = brcmf_pno_attach(cfg);
if (err) {
- brcmf_err("PNO initialisation failed (%d)\n", err);
+ bphy_err(drvr, "PNO initialisation failed (%d)\n", err);
brcmf_btcoex_detach(cfg);
brcmf_p2p_detach(&cfg->p2p);
goto wiphy_unreg_out;
@@ -7047,7 +7156,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
/* (re-) activate FWEH event handling */
err = brcmf_fweh_activate_events(ifp);
if (err) {
- brcmf_err("FWEH activation failed (%d)\n", err);
+ bphy_err(drvr, "FWEH activation failed (%d)\n", err);
goto detach;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 1f1e95a15a17..96b8d5b3aeed 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -90,6 +90,7 @@ struct brcmf_mp_global_t brcmf_mp_global;
void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_join_pref_params join_pref_params[2];
int err;
@@ -106,7 +107,7 @@ void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
sizeof(join_pref_params));
if (err)
- brcmf_err("Set join_pref error (%d)\n", err);
+ bphy_err(drvr, "Set join_pref error (%d)\n", err);
}
static int brcmf_c_download(struct brcmf_if *ifp, u16 flag,
@@ -129,7 +130,8 @@ static int brcmf_c_download(struct brcmf_if *ifp, u16 flag,
static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
{
- struct brcmf_bus *bus = ifp->drvr->bus_if;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_bus *bus = drvr->bus_if;
struct brcmf_dload_data_le *chunk_buf;
const struct firmware *clm = NULL;
u8 clm_name[BRCMF_FW_NAME_LEN];
@@ -145,11 +147,11 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
memset(clm_name, 0, sizeof(clm_name));
err = brcmf_bus_get_fwname(bus, ".clm_blob", clm_name);
if (err) {
- brcmf_err("get CLM blob file name failed (%d)\n", err);
+ bphy_err(drvr, "get CLM blob file name failed (%d)\n", err);
return err;
}
- err = request_firmware(&clm, clm_name, bus->dev);
+ err = firmware_request_nowarn(&clm, clm_name, bus->dev);
if (err) {
brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
err);
@@ -182,12 +184,12 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
} while ((datalen > 0) && (err == 0));
if (err) {
- brcmf_err("clmload (%zu byte file) failed (%d); ",
- clm->size, err);
+ bphy_err(drvr, "clmload (%zu byte file) failed (%d)\n",
+ clm->size, err);
/* Retrieve clmload_status and print */
err = brcmf_fil_iovar_int_get(ifp, "clmload_status", &status);
if (err)
- brcmf_err("get clmload_status failed (%d)\n", err);
+ bphy_err(drvr, "get clmload_status failed (%d)\n", err);
else
brcmf_dbg(INFO, "clmload_status=%d\n", status);
err = -EIO;
@@ -201,6 +203,7 @@ done:
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_bus *bus;
@@ -214,7 +217,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
sizeof(ifp->mac_addr));
if (err < 0) {
- brcmf_err("Retrieving cur_etheraddr failed, %d\n", err);
+ bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
goto done;
}
memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
@@ -226,7 +229,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_REVINFO,
&revinfo, sizeof(revinfo));
if (err < 0) {
- brcmf_err("retrieving revision info failed, %d\n", err);
+ bphy_err(drvr, "retrieving revision info failed, %d\n", err);
strlcpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname));
} else {
ri->vendorid = le32_to_cpu(revinfo.vendorid);
@@ -260,7 +263,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* Do any CLM downloading */
err = brcmf_c_process_clm_blob(ifp);
if (err < 0) {
- brcmf_err("download CLM blob file failed, %d\n", err);
+ bphy_err(drvr, "download CLM blob file failed, %d\n", err);
goto done;
}
@@ -269,8 +272,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
strcpy(buf, "ver");
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
if (err < 0) {
- brcmf_err("Retrieving version information failed, %d\n",
- err);
+ bphy_err(drvr, "Retrieving version information failed, %d\n",
+ err);
goto done;
}
ptr = (char *)buf;
@@ -304,7 +307,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* set mpc */
err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
if (err) {
- brcmf_err("failed setting mpc\n");
+ bphy_err(drvr, "failed setting mpc\n");
goto done;
}
@@ -314,14 +317,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
BRCMF_EVENTING_MASK_LEN);
if (err) {
- brcmf_err("Get event_msgs error (%d)\n", err);
+ bphy_err(drvr, "Get event_msgs error (%d)\n", err);
goto done;
}
setbit(eventmask, BRCMF_E_IF);
err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
BRCMF_EVENTING_MASK_LEN);
if (err) {
- brcmf_err("Set event_msgs error (%d)\n", err);
+ bphy_err(drvr, "Set event_msgs error (%d)\n", err);
goto done;
}
@@ -329,8 +332,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
if (err) {
- brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
- err);
+ bphy_err(drvr, "BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
+ err);
goto done;
}
@@ -338,8 +341,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
if (err) {
- brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
- err);
+ bphy_err(drvr, "BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
+ err);
goto done;
}
@@ -350,7 +353,7 @@ done:
}
#ifndef CONFIG_BRCM_TRACING
-void __brcmf_err(const char *func, const char *fmt, ...)
+void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -359,7 +362,10 @@ void __brcmf_err(const char *func, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- pr_err("%s: %pV", func, &vaf);
+ if (bus)
+ dev_err(bus->dev, "%s: %pV", func, &vaf);
+ else
+ pr_err("%s: %pV", func, &vaf);
va_end(args);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 860a4372cb56..4fbe8791f674 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -43,6 +43,36 @@
#define BRCMF_BSSIDX_INVALID -1
+#define RXS_PBPRES BIT(2)
+
+#define D11_PHY_HDR_LEN 6
+
+struct d11rxhdr_le {
+ __le16 RxFrameSize;
+ u16 PAD;
+ __le16 PhyRxStatus_0;
+ __le16 PhyRxStatus_1;
+ __le16 PhyRxStatus_2;
+ __le16 PhyRxStatus_3;
+ __le16 PhyRxStatus_4;
+ __le16 PhyRxStatus_5;
+ __le16 RxStatus1;
+ __le16 RxStatus2;
+ __le16 RxTSFTime;
+ __le16 RxChan;
+ u8 unknown[12];
+} __packed;
+
+struct wlc_d11rxhdr {
+ struct d11rxhdr_le rxhdr;
+ __le32 tsf_l;
+ s8 rssi;
+ s8 rxpwr0;
+ s8 rxpwr1;
+ s8 do_rssi_ma;
+ s8 rxpwr[4];
+} __packed;
+
char *brcmf_ifname(struct brcmf_if *ifp)
{
if (!ifp)
@@ -60,7 +90,7 @@ struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx)
s32 bsscfgidx;
if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
- brcmf_err("ifidx %d out of range\n", ifidx);
+ bphy_err(drvr, "ifidx %d out of range\n", ifidx);
return NULL;
}
@@ -111,7 +141,9 @@ void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable)
static void _brcmf_set_multicast_list(struct work_struct *work)
{
- struct brcmf_if *ifp;
+ struct brcmf_if *ifp = container_of(work, struct brcmf_if,
+ multicast_work);
+ struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
struct netdev_hw_addr *ha;
u32 cmd_value, cnt;
@@ -120,8 +152,6 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
u32 buflen;
s32 err;
- ifp = container_of(work, struct brcmf_if, multicast_work);
-
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
ndev = ifp->ndev;
@@ -151,7 +181,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
if (err < 0) {
- brcmf_err("Setting mcast_list failed, %d\n", err);
+ bphy_err(drvr, "Setting mcast_list failed, %d\n", err);
cmd_value = cnt ? true : cmd_value;
}
@@ -164,25 +194,25 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
*/
err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
if (err < 0)
- brcmf_err("Setting allmulti failed, %d\n", err);
+ bphy_err(drvr, "Setting allmulti failed, %d\n", err);
/*Finally, pick up the PROMISC flag */
cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
if (err < 0)
- brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
- err);
+ bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, %d\n",
+ err);
brcmf_configure_arp_nd_offload(ifp, !cmd_value);
}
#if IS_ENABLED(CONFIG_IPV6)
static void _brcmf_update_ndtable(struct work_struct *work)
{
- struct brcmf_if *ifp;
+ struct brcmf_if *ifp = container_of(work, struct brcmf_if,
+ ndoffload_work);
+ struct brcmf_pub *drvr = ifp->drvr;
int i, ret;
- ifp = container_of(work, struct brcmf_if, ndoffload_work);
-
/* clear the table in firmware */
ret = brcmf_fil_iovar_data_set(ifp, "nd_hostip_clear", NULL, 0);
if (ret) {
@@ -195,7 +225,7 @@ static void _brcmf_update_ndtable(struct work_struct *work)
&ifp->ipv6_addr_tbl[i],
sizeof(struct in6_addr));
if (ret)
- brcmf_err("add nd ip err %d\n", ret);
+ bphy_err(drvr, "add nd ip err %d\n", ret);
}
}
#else
@@ -208,6 +238,7 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct sockaddr *sa = (struct sockaddr *)addr;
+ struct brcmf_pub *drvr = ifp->drvr;
int err;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -215,7 +246,7 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
ETH_ALEN);
if (err < 0) {
- brcmf_err("Setting cur_etheraddr failed, %d\n", err);
+ bphy_err(drvr, "Setting cur_etheraddr failed, %d\n", err);
} else {
brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
@@ -275,7 +306,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
/* Can the device send data? */
if (drvr->bus_if->state != BRCMF_BUS_UP) {
- brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
+ bphy_err(drvr, "xmit rejected state=%d\n", drvr->bus_if->state);
netif_stop_queue(ndev);
dev_kfree_skb(skb);
ret = -ENODEV;
@@ -309,8 +340,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0,
GFP_ATOMIC);
if (ret < 0) {
- brcmf_err("%s: failed to expand headroom\n",
- brcmf_ifname(ifp));
+ bphy_err(drvr, "%s: failed to expand headroom\n",
+ brcmf_ifname(ifp));
atomic_inc(&drvr->bus_if->stats.pktcow_failed);
goto done;
}
@@ -409,6 +440,31 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_RADIOTAP)) {
/* Do nothing */
+ } else if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_HW_RX_HDR)) {
+ struct wlc_d11rxhdr *wlc_rxhdr = (struct wlc_d11rxhdr *)skb->data;
+ struct ieee80211_radiotap_header *radiotap;
+ unsigned int offset;
+ u16 RxStatus1;
+
+ RxStatus1 = le16_to_cpu(wlc_rxhdr->rxhdr.RxStatus1);
+
+ offset = sizeof(struct wlc_d11rxhdr);
+ /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU
+ * subframes
+ */
+ if (RxStatus1 & RXS_PBPRES)
+ offset += 2;
+ offset += D11_PHY_HDR_LEN;
+
+ skb_pull(skb, offset);
+
+ /* TODO: use RX header to fill some radiotap data */
+ radiotap = skb_push(skb, sizeof(*radiotap));
+ memset(radiotap, 0, sizeof(*radiotap));
+ radiotap->it_len = cpu_to_le16(sizeof(*radiotap));
+
+ /* TODO: 4 bytes with receive status? */
+ skb->len -= 4;
} else {
struct ieee80211_radiotap_header *radiotap;
@@ -464,7 +520,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
} else {
/* Process special event packets */
if (handle_event)
- brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb,
+ BCMILCP_SUBTYPE_VENDOR_LONG);
brcmf_netif_rx(ifp, skb);
}
@@ -481,7 +538,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
if (brcmf_rx_hdrpull(drvr, skb, &ifp))
return;
- brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0);
brcmu_pkt_buf_free_skb(skb);
}
@@ -551,7 +608,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
/* If bus is not ready, can't continue */
if (bus_if->state != BRCMF_BUS_UP) {
- brcmf_err("failed bus is not ready\n");
+ bphy_err(drvr, "failed bus is not ready\n");
return -EAGAIN;
}
@@ -565,7 +622,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
ndev->features &= ~NETIF_F_IP_CSUM;
if (brcmf_cfg80211_up(ndev)) {
- brcmf_err("failed to bring up cfg80211\n");
+ bphy_err(drvr, "failed to bring up cfg80211\n");
return -EIO;
}
@@ -610,7 +667,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
else
err = register_netdev(ndev);
if (err != 0) {
- brcmf_err("couldn't register the net device\n");
+ bphy_err(drvr, "couldn't register the net device\n");
goto fail;
}
@@ -687,6 +744,7 @@ static const struct net_device_ops brcmf_netdev_ops_p2p = {
static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d mac=%pM\n", ifp->bsscfgidx,
@@ -699,7 +757,7 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
if (register_netdev(ndev) != 0) {
- brcmf_err("couldn't register the p2p net device\n");
+ bphy_err(drvr, "couldn't register the p2p net device\n");
goto fail;
}
@@ -728,8 +786,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
*/
if (ifp) {
if (ifidx) {
- brcmf_err("ERROR: netdev:%s already exists\n",
- ifp->ndev->name);
+ bphy_err(drvr, "ERROR: netdev:%s already exists\n",
+ ifp->ndev->name);
netif_stop_queue(ifp->ndev);
brcmf_net_detach(ifp->ndev, false);
drvr->iflist[bsscfgidx] = NULL;
@@ -787,7 +845,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
ifp = drvr->iflist[bsscfgidx];
drvr->iflist[bsscfgidx] = NULL;
if (!ifp) {
- brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx);
+ bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx);
return;
}
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
@@ -837,16 +895,17 @@ static int brcmf_psm_watchdog_notify(struct brcmf_if *ifp,
const struct brcmf_event_msg *evtmsg,
void *data)
{
+ struct brcmf_pub *drvr = ifp->drvr;
int err;
brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
- brcmf_err("PSM's watchdog has fired!\n");
+ bphy_err(drvr, "PSM's watchdog has fired!\n");
err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
evtmsg->datalen);
if (err)
- brcmf_err("Failed to get memory dump, %d\n", err);
+ bphy_err(drvr, "Failed to get memory dump, %d\n", err);
return err;
}
@@ -890,7 +949,7 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb,
ret = brcmf_fil_iovar_data_get(ifp, "arp_hostip", addr_table,
sizeof(addr_table));
if (ret) {
- brcmf_err("fail to get arp ip table err:%d\n", ret);
+ bphy_err(drvr, "fail to get arp ip table err:%d\n", ret);
return NOTIFY_OK;
}
@@ -907,7 +966,7 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb,
ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip",
&ifa->ifa_address, sizeof(ifa->ifa_address));
if (ret)
- brcmf_err("add arp ip err %d\n", ret);
+ bphy_err(drvr, "add arp ip err %d\n", ret);
}
break;
case NETDEV_DOWN:
@@ -919,8 +978,8 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb,
ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear",
NULL, 0);
if (ret) {
- brcmf_err("fail to clear arp ip table err:%d\n",
- ret);
+ bphy_err(drvr, "fail to clear arp ip table err:%d\n",
+ ret);
return NOTIFY_OK;
}
for (i = 0; i < ARPOL_MAX_ENTRIES; i++) {
@@ -930,8 +989,8 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb,
&addr_table[i],
sizeof(addr_table[i]));
if (ret)
- brcmf_err("add arp ip err %d\n",
- ret);
+ bphy_err(drvr, "add arp ip err %d\n",
+ ret);
}
}
break;
@@ -1100,11 +1159,12 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
brcmf_feat_debugfs_create(drvr);
brcmf_proto_debugfs_create(drvr);
+ brcmf_bus_debugfs_create(bus_if);
return 0;
fail:
- brcmf_err("failed: %d\n", ret);
+ bphy_err(drvr, "failed: %d\n", ret);
if (drvr->config) {
brcmf_cfg80211_detach(drvr->config);
drvr->config = NULL;
@@ -1156,7 +1216,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
/* Attach and link in the protocol */
ret = brcmf_proto_attach(drvr);
if (ret != 0) {
- brcmf_err("brcmf_prot_attach failed\n");
+ bphy_err(drvr, "brcmf_prot_attach failed\n");
goto fail;
}
@@ -1169,7 +1229,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
ret = brcmf_bus_started(drvr, ops);
if (ret != 0) {
- brcmf_err("dongle is not responding: err=%d\n", ret);
+ bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
goto fail;
}
@@ -1269,6 +1329,7 @@ static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
int err;
err = wait_event_timeout(ifp->pend_8021x_wait,
@@ -1276,7 +1337,7 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
MAX_WAIT_FOR_8021X_TX);
if (!err)
- brcmf_err("Timed out waiting for no pending 802.1x packets\n");
+ bphy_err(drvr, "Timed out waiting for no pending 802.1x packets\n");
return !err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index dcf6e27cc16f..d8085ce579f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -36,7 +36,7 @@
#define BRCMF_DCMD_MEDLEN 1536
#define BRCMF_DCMD_MAXLEN 8192
-/* IOCTL from host to device are limited in lenght. A device can only handle
+/* IOCTL from host to device are limited in length. A device can only handle
* ethernet frame size. This limitation is to be applied by protocol layer.
*/
#define BRCMF_TX_IOCTL_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index cfed0626bf5a..2998726b62c3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -45,17 +45,30 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-__printf(2, 3)
-void __brcmf_err(const char *func, const char *fmt, ...);
+struct brcmf_bus;
+
+__printf(3, 4)
+void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...);
/* Macro for error messages. When debugging / tracing the driver all error
* messages are important to us.
*/
+#ifndef brcmf_err
#define brcmf_err(fmt, ...) \
do { \
if (IS_ENABLED(CONFIG_BRCMDBG) || \
IS_ENABLED(CONFIG_BRCM_TRACING) || \
net_ratelimit()) \
- __brcmf_err(__func__, fmt, ##__VA_ARGS__); \
+ __brcmf_err(NULL, __func__, fmt, ##__VA_ARGS__);\
+ } while (0)
+#endif
+
+#define bphy_err(drvr, fmt, ...) \
+ do { \
+ if (IS_ENABLED(CONFIG_BRCMDBG) || \
+ IS_ENABLED(CONFIG_BRCM_TRACING) || \
+ net_ratelimit()) \
+ wiphy_err((drvr)->wiphy, "%s: " fmt, __func__, \
+ ##__VA_ARGS__); \
} while (0)
#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 51d76ac45075..7535cb0d4ac0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -43,6 +43,10 @@ static const struct brcmf_dmi_data meegopad_t08_data = {
BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08"
};
+static const struct brcmf_dmi_data pov_tab_p1006w_data = {
+ BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data"
+};
+
static const struct dmi_system_id dmi_platform_data[] = {
{
/* Match for the GPDwin which unfortunately uses somewhat
@@ -81,6 +85,17 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&meegopad_t08_data,
},
+ {
+ /* Point of View TAB-P1006W-232 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Note 105b is Foxcon's USB/PCI vendor id */
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
+ },
+ .driver_data = (void *)&pov_tab_p1006w_data,
+ },
{}
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 4c5a3995dc35..acca719b3907 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -103,6 +103,10 @@ static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = {
{ "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) },
/* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */
{ "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) },
+ /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 211de1679a68 */
+ { "01-801fb449", BIT(BRCMF_FEAT_MONITOR_FMT_HW_RX_HDR) },
+ /* brcmfmac4366c-pcie.bin from linux-firmware.git commit 211de1679a68 */
+ { "01-d2cbb8fd", BIT(BRCMF_FEAT_MONITOR_FMT_HW_RX_HDR) },
};
static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
@@ -181,13 +185,14 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
#define MAX_CAPS_BUFFER_SIZE 768
static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
char caps[MAX_CAPS_BUFFER_SIZE];
enum brcmf_feat_id id;
int i, err;
err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
if (err) {
- brcmf_err("could not get firmware cap (%d)\n", err);
+ bphy_err(drvr, "could not get firmware cap (%d)\n", err);
return;
}
@@ -212,14 +217,15 @@ static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
static int brcmf_feat_fwcap_debugfs_read(struct seq_file *seq, void *data)
{
struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
- struct brcmf_if *ifp = brcmf_get_ifp(bus_if->drvr, 0);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
char caps[MAX_CAPS_BUFFER_SIZE + 1] = { };
char *tmp;
int err;
err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
if (err) {
- brcmf_err("could not get firmware cap (%d)\n", err);
+ bphy_err(drvr, "could not get firmware cap (%d)\n", err);
return err;
}
@@ -268,9 +274,15 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
BIT(BRCMF_FEAT_WOWL_GTK);
}
}
- /* MBSS does not work for 43362 */
- if (drvr->bus_if->chip == BRCM_CC_43362_CHIP_ID)
+ /* MBSS does not work for all chips */
+ switch (drvr->bus_if->chip) {
+ case BRCM_CC_4330_CHIP_ID:
+ case BRCM_CC_43362_CHIP_ID:
ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_MBSS);
+ break;
+ default:
+ break;
+ }
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_RSDB, "rsdb_mode");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_TDLS, "tdls_enable");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MFP, "mfp");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 0b4974df353a..5e88a7f16ad2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -35,6 +35,7 @@
* FWSUP: Firmware supplicant.
* MONITOR: firmware can pass monitor packets to host.
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
+ * MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -52,7 +53,8 @@
BRCMF_FEAT_DEF(GSCAN) \
BRCMF_FEAT_DEF(FWSUP) \
BRCMF_FEAT_DEF(MONITOR) \
- BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP)
+ BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
+ BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 14b948917a1a..8209a42dea72 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -47,7 +47,7 @@ enum nvram_parser_state {
* @state: current parser state.
* @data: input buffer being parsed.
* @nvram: output buffer with parse result.
- * @nvram_len: lenght of parse result.
+ * @nvram_len: length of parse result.
* @line: current line.
* @column: current column in line.
* @pos: byte offset in input buffer.
@@ -719,8 +719,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
break;
}
+ brcmf_chip_name(chip, chiprev, chipname, sizeof(chipname));
+
if (i == table_size) {
- brcmf_err("Unknown chipid %d [%d]\n", chip, chiprev);
+ brcmf_err("Unknown chip %s\n", chipname);
return NULL;
}
@@ -729,8 +731,6 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
if (!fwreq)
return NULL;
- brcmf_chip_name(chip, chiprev, chipname, sizeof(chipname));
-
brcmf_info("using %s for chip %s\n",
mapping_table[i].fw_base, chipname);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index e7eaa57d11d9..63e98fd583ab 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -102,7 +102,8 @@ static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
schedule_work(&fweh->event_work);
}
-static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp,
+static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
+ struct brcmf_if *ifp,
enum brcmf_fweh_event_code code,
struct brcmf_event_msg *emsg,
void *data)
@@ -117,9 +118,9 @@ static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp,
if (fweh->evt_handler[code])
err = fweh->evt_handler[code](ifp, emsg, data);
else
- brcmf_err("unhandled event %d ignored\n", code);
+ bphy_err(drvr, "unhandled event %d ignored\n", code);
} else {
- brcmf_err("no interface object\n");
+ bphy_err(drvr, "no interface object\n");
}
return err;
}
@@ -158,7 +159,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
}
if (ifevent->ifidx >= BRCMF_MAX_IFS) {
- brcmf_err("invalid interface index: %u\n", ifevent->ifidx);
+ bphy_err(drvr, "invalid interface index: %u\n", ifevent->ifidx);
return;
}
@@ -181,7 +182,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
brcmf_proto_reset_if(drvr, ifp);
- err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
+ err = brcmf_fweh_call_event_handler(drvr, ifp, emsg->event_code, emsg,
+ data);
if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
bool armed = brcmf_cfg80211_vif_event_armed(drvr->config);
@@ -268,11 +270,11 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
ifp = drvr->iflist[0];
else
ifp = drvr->iflist[emsg.bsscfgidx];
- err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
- event->data);
+ err = brcmf_fweh_call_event_handler(drvr, ifp, event->code,
+ &emsg, event->data);
if (err) {
- brcmf_err("event handler failed (%d)\n",
- event->code);
+ bphy_err(drvr, "event handler failed (%d)\n",
+ event->code);
err = 0;
}
event_free:
@@ -339,7 +341,7 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
brcmf_fweh_handler_t handler)
{
if (drvr->fweh.evt_handler[code]) {
- brcmf_err("event code %d already registered\n", code);
+ bphy_err(drvr, "event code %d already registered\n", code);
return -ENOSPC;
}
drvr->fweh.evt_handler[code] = handler;
@@ -369,6 +371,7 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
*/
int brcmf_fweh_activate_events(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
int i, err;
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
@@ -388,7 +391,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
eventmask, BRCMF_EVENTING_MASK_LEN);
if (err)
- brcmf_err("Set event_msgs error (%d)\n", err);
+ bphy_err(drvr, "Set event_msgs error (%d)\n", err);
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 816f80ea925b..7027243db17e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -211,7 +211,7 @@ enum brcmf_fweh_event_code {
*/
#define BRCM_OUI "\x00\x10\x18"
#define BCMILCP_BCM_SUBTYPE_EVENT 1
-
+#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
/**
* struct brcm_ethhdr - broadcom specific ether header.
@@ -266,7 +266,7 @@ struct brcmf_event {
* @status: status information.
* @reason: reason code.
* @auth_type: authentication type.
- * @datalen: lenght of event data buffer.
+ * @datalen: length of event data buffer.
* @addr: ether address.
* @ifname: interface name.
* @ifidx: interface index.
@@ -334,10 +334,10 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
- struct sk_buff *skb)
+ struct sk_buff *skb, u16 stype)
{
struct brcmf_event *event_packet;
- u16 usr_stype;
+ u16 subtype, usr_stype;
/* only process events when protocol matches */
if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
@@ -346,8 +346,16 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
if ((skb->len + ETH_HLEN) < sizeof(*event_packet))
return;
- /* check for BRCM oui match */
event_packet = (struct brcmf_event *)skb_mac_header(skb);
+
+ /* check subtype if needed */
+ if (unlikely(stype)) {
+ subtype = get_unaligned_be16(&event_packet->hdr.subtype);
+ if (subtype != stype)
+ return;
+ }
+
+ /* check for BRCM oui match */
if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0],
sizeof(event_packet->hdr.oui)))
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index 802d7cb73b80..8ea27489734e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -110,7 +110,7 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
s32 err, fwerr;
if (drvr->bus_if->state != BRCMF_BUS_UP) {
- brcmf_err("bus is down. we have nothing to do.\n");
+ bphy_err(drvr, "bus is down. we have nothing to do.\n");
return -EIO;
}
@@ -242,7 +242,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
buflen, true);
} else {
err = -EPERM;
- brcmf_err("Creating iovar failed\n");
+ bphy_err(drvr, "Creating iovar failed\n");
}
mutex_unlock(&drvr->proto_block);
@@ -268,7 +268,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
memcpy(data, drvr->proto_buf, len);
} else {
err = -EPERM;
- brcmf_err("Creating iovar failed\n");
+ bphy_err(drvr, "Creating iovar failed\n");
}
brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len);
@@ -366,7 +366,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
buflen, true);
} else {
err = -EPERM;
- brcmf_err("Creating bsscfg failed\n");
+ bphy_err(drvr, "Creating bsscfg failed\n");
}
mutex_unlock(&drvr->proto_block);
@@ -392,7 +392,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
memcpy(data, drvr->proto_buf, len);
} else {
err = -EPERM;
- brcmf_err("Creating bsscfg failed\n");
+ bphy_err(drvr, "Creating bsscfg failed\n");
}
brcmf_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d\n", ifp->ifidx,
ifp->bsscfgidx, name, len);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 02759ebd207c..abeb305492e0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1255,6 +1255,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
enum brcmf_fws_skb_state state, int fifo,
struct sk_buff *p)
{
+ struct brcmf_pub *drvr = fws->drvr;
int prec = 2 * fifo;
u32 *qfull_stat = &fws->stats.delayq_full_error;
struct brcmf_fws_mac_descriptor *entry;
@@ -1267,7 +1268,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
entry = brcmf_skbcb(p)->mac;
if (entry == NULL) {
- brcmf_err("no mac descriptor found for skb %p\n", p);
+ bphy_err(drvr, "no mac descriptor found for skb %p\n", p);
return -ENOENT;
}
@@ -1457,6 +1458,7 @@ static int
brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
u32 genbit, u16 seq, u8 compcnt)
{
+ struct brcmf_pub *drvr = fws->drvr;
u32 fifo;
u8 cnt = 0;
int ret;
@@ -1481,14 +1483,14 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
fws->stats.txs_host_tossed += compcnt;
else
- brcmf_err("unexpected txstatus\n");
+ bphy_err(drvr, "unexpected txstatus\n");
while (cnt < compcnt) {
ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
remove_from_hanger);
if (ret != 0) {
- brcmf_err("no packet in hanger slot: hslot=%d\n",
- hslot);
+ bphy_err(drvr, "no packet in hanger slot: hslot=%d\n",
+ hslot);
goto cont;
}
@@ -1612,12 +1614,13 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
{
- struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_fws_info *fws = drvr_to_fws(drvr);
int i;
u8 *credits = data;
if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
- brcmf_err("event payload too small (%d)\n", e->datalen);
+ bphy_err(drvr, "event payload too small (%d)\n", e->datalen);
return -EINVAL;
}
@@ -1681,6 +1684,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
{
+ struct brcmf_pub *drvr = ifp->drvr;
u8 *reorder_data;
u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
struct brcmf_ampdu_rx_reorder *rfi;
@@ -1695,7 +1699,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
/* validate flags and flow id */
if (flags == 0xFF) {
- brcmf_err("invalid flags...so ignore this packet\n");
+ bphy_err(drvr, "invalid flags...so ignore this packet\n");
brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1732,7 +1736,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
flow_id, max_idx);
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
- brcmf_err("failed to alloc buffer\n");
+ bphy_err(drvr, "failed to alloc buffer\n");
brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1996,6 +2000,7 @@ static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
struct sk_buff *skb, int fifo)
{
+ struct brcmf_pub *drvr = fws->drvr;
struct brcmf_fws_mac_descriptor *entry;
struct sk_buff *pktout;
int qidx, hslot;
@@ -2009,11 +2014,11 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb);
if (pktout == NULL) {
- brcmf_err("%s queue %d full\n", entry->name, qidx);
+ bphy_err(drvr, "%s queue %d full\n", entry->name, qidx);
rc = -ENOSPC;
}
} else {
- brcmf_err("%s entry removed\n", entry->name);
+ bphy_err(drvr, "%s entry removed\n", entry->name);
rc = -ENOENT;
}
@@ -2118,7 +2123,8 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
{
- struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_fws_info *fws = drvr_to_fws(drvr);
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct ethhdr *eh = (struct ethhdr *)(skb->data);
int fifo = BRCMF_FWS_FIFO_BCMC;
@@ -2146,7 +2152,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
brcmf_fws_schedule_deq(fws);
} else {
- brcmf_err("drop skb: no hanger slot\n");
+ bphy_err(drvr, "drop skb: no hanger slot\n");
brcmf_txfinalize(ifp, skb, false);
rc = -ENOMEM;
}
@@ -2365,7 +2371,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
if (fws->fws_wq == NULL) {
- brcmf_err("workqueue creation failed\n");
+ bphy_err(drvr, "workqueue creation failed\n");
rc = -EBADF;
goto fail;
}
@@ -2381,13 +2387,13 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
brcmf_fws_notify_credit_map);
if (rc < 0) {
- brcmf_err("register credit map handler failed\n");
+ bphy_err(drvr, "register credit map handler failed\n");
goto fail;
}
rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT,
brcmf_fws_notify_bcmc_credit_support);
if (rc < 0) {
- brcmf_err("register bcmc credit handler failed\n");
+ bphy_err(drvr, "register bcmc credit handler failed\n");
brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
goto fail;
}
@@ -2399,7 +2405,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
fws->fw_signals = true;
ifp = brcmf_get_ifp(drvr, 0);
if (brcmf_fil_iovar_int_set(ifp, "tlv", tlv)) {
- brcmf_err("failed to set bdcv2 tlv signaling\n");
+ bphy_err(drvr, "failed to set bdcv2 tlv signaling\n");
fws->fcmode = BRCMF_FWS_FCMODE_NONE;
fws->fw_signals = false;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 4e8397a0cbc8..d3780eae7f19 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -134,6 +134,22 @@ struct msgbuf_completion_hdr {
__le16 flow_ring_id;
};
+/* Data struct for the MSGBUF_TYPE_GEN_STATUS */
+struct msgbuf_gen_status {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le16 write_idx;
+ __le32 rsvd0[3];
+};
+
+/* Data struct for the MSGBUF_TYPE_RING_STATUS */
+struct msgbuf_ring_status {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le16 write_idx;
+ __le16 rsvd0[5];
+};
+
struct msgbuf_rx_event {
struct msgbuf_common_hdr msg;
struct msgbuf_completion_hdr compl_hdr;
@@ -431,7 +447,7 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
brcmf_commonring_lock(commonring);
ret_ptr = brcmf_commonring_reserve_for_write(commonring);
if (!ret_ptr) {
- brcmf_err("Failed to reserve space in commonring\n");
+ bphy_err(drvr, "Failed to reserve space in commonring\n");
brcmf_commonring_unlock(commonring);
return -ENOMEM;
}
@@ -495,7 +511,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
if (!timeout) {
- brcmf_err("Timeout on response for query command\n");
+ bphy_err(drvr, "Timeout on response for query command\n");
return -EIO;
}
@@ -572,6 +588,7 @@ static u32
brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
struct brcmf_msgbuf_work_item *work)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct msgbuf_tx_flowring_create_req *create;
struct brcmf_commonring *commonring;
void *ret_ptr;
@@ -587,7 +604,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
&msgbuf->flowring_dma_handle[flowid],
GFP_KERNEL);
if (!dma_buf) {
- brcmf_err("dma_alloc_coherent failed\n");
+ bphy_err(drvr, "dma_alloc_coherent failed\n");
brcmf_flowring_delete(msgbuf->flow, flowid);
return BRCMF_FLOWRING_INVALID_ID;
}
@@ -600,7 +617,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
brcmf_commonring_lock(commonring);
ret_ptr = brcmf_commonring_reserve_for_write(commonring);
if (!ret_ptr) {
- brcmf_err("Failed to reserve space in commonring\n");
+ bphy_err(drvr, "Failed to reserve space in commonring\n");
brcmf_commonring_unlock(commonring);
brcmf_msgbuf_remove_flowring(msgbuf, flowid);
return BRCMF_FLOWRING_INVALID_ID;
@@ -627,7 +644,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
if (err) {
- brcmf_err("Failed to write commonring\n");
+ bphy_err(drvr, "Failed to write commonring\n");
brcmf_msgbuf_remove_flowring(msgbuf, flowid);
return BRCMF_FLOWRING_INVALID_ID;
}
@@ -686,6 +703,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
{
struct brcmf_flowring *flow = msgbuf->flow;
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct brcmf_commonring *commonring;
void *ret_ptr;
u32 count;
@@ -705,8 +723,8 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
while (brcmf_flowring_qlen(flow, flowid)) {
skb = brcmf_flowring_dequeue(flow, flowid);
if (skb == NULL) {
- brcmf_err("No SKB, but qlen %d\n",
- brcmf_flowring_qlen(flow, flowid));
+ bphy_err(drvr, "No SKB, but qlen %d\n",
+ brcmf_flowring_qlen(flow, flowid));
break;
}
skb_orphan(skb);
@@ -714,7 +732,7 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
msgbuf->tx_pktids, skb, ETH_HLEN,
&physaddr, &pktid)) {
brcmf_flowring_reinsert(flow, flowid, skb);
- brcmf_err("No PKTID available !!\n");
+ bphy_err(drvr, "No PKTID available !!\n");
break;
}
ret_ptr = brcmf_commonring_reserve_for_write(commonring);
@@ -885,6 +903,7 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct brcmf_commonring *commonring;
void *ret_ptr;
struct sk_buff *skb;
@@ -912,7 +931,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
if (skb == NULL) {
- brcmf_err("Failed to alloc SKB\n");
+ bphy_err(drvr, "Failed to alloc SKB\n");
brcmf_commonring_write_cancel(commonring, alloced - i);
break;
}
@@ -922,7 +941,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
msgbuf->rx_pktids, skb, 0,
&physaddr, &pktid)) {
dev_kfree_skb_any(skb);
- brcmf_err("No PKTID available !!\n");
+ bphy_err(drvr, "No PKTID available !!\n");
brcmf_commonring_write_cancel(commonring, alloced - i);
break;
}
@@ -992,6 +1011,7 @@ static u32
brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
u32 count)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct brcmf_commonring *commonring;
void *ret_ptr;
struct sk_buff *skb;
@@ -1009,7 +1029,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
count,
&alloced);
if (!ret_ptr) {
- brcmf_err("Failed to reserve space in commonring\n");
+ bphy_err(drvr, "Failed to reserve space in commonring\n");
brcmf_commonring_unlock(commonring);
return 0;
}
@@ -1021,7 +1041,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
if (skb == NULL) {
- brcmf_err("Failed to alloc SKB\n");
+ bphy_err(drvr, "Failed to alloc SKB\n");
brcmf_commonring_write_cancel(commonring, alloced - i);
break;
}
@@ -1031,7 +1051,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
msgbuf->rx_pktids, skb, 0,
&physaddr, &pktid)) {
dev_kfree_skb_any(skb);
- brcmf_err("No PKTID available !!\n");
+ bphy_err(drvr, "No PKTID available !!\n");
brcmf_commonring_write_cancel(commonring, alloced - i);
break;
}
@@ -1083,6 +1103,7 @@ static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct msgbuf_rx_event *event;
u32 idx;
u16 buflen;
@@ -1109,14 +1130,14 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
if (!ifp || !ifp->ndev) {
- brcmf_err("Received pkt for invalid ifidx %d\n",
- event->msg.ifidx);
+ bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
+ event->msg.ifidx);
goto exit;
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0);
exit:
brcmu_pkt_buf_free_skb(skb);
@@ -1126,6 +1147,7 @@ exit:
static void
brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct msgbuf_rx_complete *rx_complete;
struct sk_buff *skb;
u16 data_offset;
@@ -1159,7 +1181,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
ifp = msgbuf->drvr->mon_if;
if (!ifp) {
- brcmf_err("Received unexpected monitor pkt\n");
+ bphy_err(drvr, "Received unexpected monitor pkt\n");
brcmu_pkt_buf_free_skb(skb);
return;
}
@@ -1170,8 +1192,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
if (!ifp || !ifp->ndev) {
- brcmf_err("Received pkt for invalid ifidx %d\n",
- rx_complete->msg.ifidx);
+ bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
+ rx_complete->msg.ifidx);
brcmu_pkt_buf_free_skb(skb);
return;
}
@@ -1180,11 +1202,39 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
brcmf_netif_rx(ifp, skb);
}
+static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
+ void *buf)
+{
+ struct msgbuf_gen_status *gen_status = buf;
+ struct brcmf_pub *drvr = msgbuf->drvr;
+ int err;
+
+ err = le16_to_cpu(gen_status->compl_hdr.status);
+ if (err)
+ bphy_err(drvr, "Firmware reported general error: %d\n", err);
+}
+
+static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf,
+ void *buf)
+{
+ struct msgbuf_ring_status *ring_status = buf;
+ struct brcmf_pub *drvr = msgbuf->drvr;
+ int err;
+
+ err = le16_to_cpu(ring_status->compl_hdr.status);
+ if (err) {
+ int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id);
+
+ bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring,
+ err);
+ }
+}
static void
brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
void *buf)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct msgbuf_flowring_create_resp *flowring_create_resp;
u16 status;
u16 flowid;
@@ -1196,7 +1246,7 @@ brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
status = le16_to_cpu(flowring_create_resp->compl_hdr.status);
if (status) {
- brcmf_err("Flowring creation failed, code %d\n", status);
+ bphy_err(drvr, "Flowring creation failed, code %d\n", status);
brcmf_msgbuf_remove_flowring(msgbuf, flowid);
return;
}
@@ -1213,6 +1263,7 @@ static void
brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
void *buf)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct msgbuf_flowring_delete_resp *flowring_delete_resp;
u16 status;
u16 flowid;
@@ -1224,7 +1275,7 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
status = le16_to_cpu(flowring_delete_resp->compl_hdr.status);
if (status) {
- brcmf_err("Flowring deletion failed, code %d\n", status);
+ bphy_err(drvr, "Flowring deletion failed, code %d\n", status);
brcmf_flowring_delete(msgbuf->flow, flowid);
return;
}
@@ -1237,10 +1288,19 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
{
+ struct brcmf_pub *drvr = msgbuf->drvr;
struct msgbuf_common_hdr *msg;
msg = (struct msgbuf_common_hdr *)buf;
switch (msg->msgtype) {
+ case MSGBUF_TYPE_GEN_STATUS:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n");
+ brcmf_msgbuf_process_gen_status(msgbuf, buf);
+ break;
+ case MSGBUF_TYPE_RING_STATUS:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n");
+ brcmf_msgbuf_process_ring_status(msgbuf, buf);
+ break;
case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
@@ -1269,7 +1329,7 @@ static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
brcmf_msgbuf_process_rx_complete(msgbuf, buf);
break;
default:
- brcmf_err("Unsupported msgtype %d\n", msg->msgtype);
+ bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype);
break;
}
}
@@ -1352,7 +1412,7 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
brcmf_commonring_lock(commonring);
ret_ptr = brcmf_commonring_reserve_for_write(commonring);
if (!ret_ptr) {
- brcmf_err("FW unaware, flowring will be removed !!\n");
+ bphy_err(drvr, "FW unaware, flowring will be removed !!\n");
brcmf_commonring_unlock(commonring);
brcmf_msgbuf_remove_flowring(msgbuf, flowid);
return;
@@ -1376,7 +1436,7 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
if (err) {
- brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
+ bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n");
brcmf_msgbuf_remove_flowring(msgbuf, flowid);
}
}
@@ -1451,8 +1511,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
if_msgbuf = drvr->bus_if->msgbuf;
if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
- brcmf_err("driver not configured for this many flowrings %d\n",
- if_msgbuf->max_flowrings);
+ bphy_err(drvr, "driver not configured for this many flowrings %d\n",
+ if_msgbuf->max_flowrings);
if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
}
@@ -1462,7 +1522,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
if (msgbuf->txflow_wq == NULL) {
- brcmf_err("workqueue creation failed\n");
+ bphy_err(drvr, "workqueue creation failed\n");
goto fail;
}
INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 456a1bf008b3..73a0e550f2b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -434,6 +434,7 @@ static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
*/
static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
{
+ struct brcmf_pub *drvr = ifp->drvr;
s32 ret = 0;
brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
@@ -450,7 +451,7 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac,
ETH_ALEN);
if (ret)
- brcmf_err("failed to update device address ret %d\n", ret);
+ bphy_err(drvr, "failed to update device address ret %d\n", ret);
return ret;
}
@@ -570,13 +571,14 @@ static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p)
*/
static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
{
+ struct brcmf_pub *drvr = p2p->cfg->pub;
struct brcmf_cfg80211_vif *vif;
s32 ret = 0;
brcmf_dbg(TRACE, "enter\n");
vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
if (!vif) {
- brcmf_err("P2P config device not available\n");
+ bphy_err(drvr, "P2P config device not available\n");
ret = -EPERM;
goto exit;
}
@@ -590,13 +592,13 @@ static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1);
if (ret < 0) {
- brcmf_err("set p2p_disc error\n");
+ bphy_err(drvr, "set p2p_disc error\n");
goto exit;
}
vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
if (ret < 0) {
- brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n");
+ bphy_err(drvr, "unable to set WL_P2P_DISC_ST_SCAN\n");
goto exit;
}
@@ -608,7 +610,7 @@ static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
*/
ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED);
if (ret < 0) {
- brcmf_err("wsec error %d\n", ret);
+ bphy_err(drvr, "wsec error %d\n", ret);
goto exit;
}
@@ -630,6 +632,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
u16 chanspecs[], s32 search_state,
enum p2p_bss_type bss_type)
{
+ struct brcmf_pub *drvr = p2p->cfg->pub;
s32 ret = 0;
s32 memsize = offsetof(struct brcmf_p2p_scan_le,
eparams.params_le.channel_list);
@@ -648,7 +651,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
vif = p2p->bss_idx[bss_type].vif;
if (vif == NULL) {
- brcmf_err("no vif for bss type %d\n", bss_type);
+ bphy_err(drvr, "no vif for bss type %d\n", bss_type);
ret = -EINVAL;
goto exit;
}
@@ -676,7 +679,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
BRCMF_P2P_WILDCARD_SSID_LEN);
break;
default:
- brcmf_err(" invalid search state %d\n", search_state);
+ bphy_err(drvr, " invalid search state %d\n", search_state);
ret = -EINVAL;
goto exit;
}
@@ -760,6 +763,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
struct cfg80211_scan_request *request)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_pub *drvr = cfg->pub;
s32 err = 0;
s32 search_state = WL_P2P_DISC_ST_SCAN;
struct brcmf_cfg80211_vif *vif;
@@ -822,7 +826,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
}
exit:
if (err)
- brcmf_err("error (%d)\n", err);
+ bphy_err(drvr, "error (%d)\n", err);
return err;
}
@@ -917,19 +921,20 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy,
static s32
brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
{
+ struct brcmf_pub *drvr = p2p->cfg->pub;
struct brcmf_cfg80211_vif *vif;
struct brcmu_chan ch;
s32 err = 0;
vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
if (!vif) {
- brcmf_err("Discovery is not set, so we have nothing to do\n");
+ bphy_err(drvr, "Discovery is not set, so we have nothing to do\n");
err = -EPERM;
goto exit;
}
if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) {
- brcmf_err("Previous LISTEN is not completed yet\n");
+ bphy_err(drvr, "Previous LISTEN is not completed yet\n");
/* WAR: prevent cookie mismatch in wpa_supplicant return OK */
goto exit;
}
@@ -1046,6 +1051,7 @@ void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
*/
static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
{
+ struct brcmf_pub *drvr = p2p->cfg->pub;
s32 err;
u32 channel_cnt;
u16 *default_chan_list;
@@ -1061,7 +1067,7 @@ static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
default_chan_list = kcalloc(channel_cnt, sizeof(*default_chan_list),
GFP_KERNEL);
if (default_chan_list == NULL) {
- brcmf_err("channel list allocation failed\n");
+ bphy_err(drvr, "channel list allocation failed\n");
err = -ENOMEM;
goto exit;
}
@@ -1103,6 +1109,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
struct brcmf_p2p_info *p2p = container_of(afx_hdl,
struct brcmf_p2p_info,
afx_hdl);
+ struct brcmf_pub *drvr = p2p->cfg->pub;
s32 err;
if (!afx_hdl->is_active)
@@ -1116,7 +1123,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
if (err) {
- brcmf_err("ERROR occurred! value is (%d)\n", err);
+ bphy_err(drvr, "ERROR occurred! value is (%d)\n", err);
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status))
complete(&afx_hdl->act_frm_scan);
@@ -1338,7 +1345,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
{
- struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_cfg80211_info *cfg = drvr->config;
struct brcmf_p2p_info *p2p = &cfg->p2p;
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
struct wireless_dev *wdev;
@@ -1409,7 +1417,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) +
mgmt_frame_len, GFP_KERNEL);
if (!mgmt_frame) {
- brcmf_err("No memory available for action frame\n");
+ bphy_err(drvr, "No memory available for action frame\n");
return -ENOMEM;
}
memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
@@ -1492,6 +1500,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
struct brcmf_fil_af_params_le *af_params)
{
+ struct brcmf_pub *drvr = p2p->cfg->pub;
struct brcmf_cfg80211_vif *vif;
s32 err = 0;
s32 timeout = 0;
@@ -1506,7 +1515,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
sizeof(*af_params));
if (err) {
- brcmf_err(" sending action frame has failed\n");
+ bphy_err(drvr, " sending action frame has failed\n");
goto exit;
}
@@ -1556,6 +1565,7 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
struct brcmf_config_af_params *config_af_params)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_fil_action_frame_le *action_frame;
struct brcmf_p2p_pub_act_frame *act_frm;
s32 err = 0;
@@ -1634,8 +1644,8 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
config_af_params->extra_listen = false;
break;
default:
- brcmf_err("Unknown p2p pub act frame subtype: %d\n",
- act_frm->subtype);
+ bphy_err(drvr, "Unknown p2p pub act frame subtype: %d\n",
+ act_frm->subtype);
err = -EINVAL;
}
return err;
@@ -1657,6 +1667,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
struct brcmf_fil_action_frame_le *action_frame;
struct brcmf_config_af_params config_af_params;
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct brcmf_pub *drvr = cfg->pub;
u16 action_frame_len;
bool ack = false;
u8 category;
@@ -1692,7 +1703,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) {
/* Just send unknown subtype frame with */
/* default parameters. */
- brcmf_err("P2P Public action frame, unknown subtype.\n");
+ bphy_err(drvr, "P2P Public action frame, unknown subtype.\n");
}
} else if (brcmf_p2p_is_gas_action(action_frame->data,
action_frame_len)) {
@@ -1714,7 +1725,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
af_params->dwell_time =
cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
} else {
- brcmf_err("Unknown action type: %d\n", action);
+ bphy_err(drvr, "Unknown action type: %d\n", action);
goto exit;
}
} else if (brcmf_p2p_is_p2p_action(action_frame->data,
@@ -1722,8 +1733,8 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
/* do not configure anything. it will be */
/* sent with a default configuration */
} else {
- brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n",
- category, action);
+ bphy_err(drvr, "Unknown Frame: category 0x%x, action 0x%x\n",
+ category, action);
return false;
}
@@ -1761,7 +1772,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
if (brcmf_p2p_af_searching_channel(p2p) ==
P2P_INVALID_CHANNEL) {
- brcmf_err("Couldn't find peer's channel.\n");
+ bphy_err(drvr, "Couldn't find peer's channel.\n");
goto exit;
}
@@ -1783,7 +1794,8 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
tx_retry++;
}
if (ack == false) {
- brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry);
+ bphy_err(drvr, "Failed to send Action Frame(retry %d)\n",
+ tx_retry);
clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
}
@@ -1965,6 +1977,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
enum brcmf_fil_p2p_if_types if_type)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
struct brcmf_fil_p2p_if_le if_request;
s32 err;
@@ -1974,13 +1987,13 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
if (!vif) {
- brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
+ bphy_err(drvr, "vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
return -EPERM;
}
brcmf_notify_escan_complete(cfg, vif->ifp, true, true);
vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
if (!vif) {
- brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
+ bphy_err(drvr, "vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
return -EPERM;
}
brcmf_set_mpc(vif->ifp, 0);
@@ -1998,7 +2011,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
sizeof(if_request));
if (err) {
- brcmf_err("p2p_ifupd FAILED, err=%d\n", err);
+ bphy_err(drvr, "p2p_ifupd FAILED, err=%d\n", err);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
return err;
}
@@ -2006,7 +2019,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
BRCMF_VIF_EVENT_TIMEOUT);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
if (!err) {
- brcmf_err("No BRCMF_E_IF_CHANGE event received\n");
+ bphy_err(drvr, "No BRCMF_E_IF_CHANGE event received\n");
return -EIO;
}
@@ -2069,6 +2082,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
struct wiphy *wiphy,
u8 *addr)
{
+ struct brcmf_pub *drvr = p2p->cfg->pub;
struct brcmf_cfg80211_vif *p2p_vif;
struct brcmf_if *p2p_ifp;
struct brcmf_if *pri_ifp;
@@ -2080,7 +2094,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE);
if (IS_ERR(p2p_vif)) {
- brcmf_err("could not create discovery vif\n");
+ bphy_err(drvr, "could not create discovery vif\n");
return (struct wireless_dev *)p2p_vif;
}
@@ -2088,7 +2102,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
/* firmware requires unique mac address for p2pdev interface */
if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
- brcmf_err("discovery vif must be different from primary interface\n");
+ bphy_err(drvr, "discovery vif must be different from primary interface\n");
return ERR_PTR(-EINVAL);
}
@@ -2101,7 +2115,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
/* Initialize P2P Discovery in the firmware */
err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
if (err < 0) {
- brcmf_err("set p2p_disc error\n");
+ bphy_err(drvr, "set p2p_disc error\n");
brcmf_fweh_p2pdev_setup(pri_ifp, false);
brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
goto fail;
@@ -2113,7 +2127,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
brcmf_fweh_p2pdev_setup(pri_ifp, false);
if (!err) {
- brcmf_err("timeout occurred\n");
+ bphy_err(drvr, "timeout occurred\n");
err = -EIO;
goto fail;
}
@@ -2127,7 +2141,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
/* verify bsscfg index for P2P discovery */
err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bsscfgidx);
if (err < 0) {
- brcmf_err("retrieving discover bsscfg index failed\n");
+ bphy_err(drvr, "retrieving discover bsscfg index failed\n");
goto fail;
}
@@ -2161,6 +2175,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_cfg80211_vif *vif;
enum brcmf_fil_p2p_if_types iftype;
int err;
@@ -2201,7 +2216,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
BRCMF_VIF_EVENT_TIMEOUT);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
if (!err) {
- brcmf_err("timeout occurred\n");
+ bphy_err(drvr, "timeout occurred\n");
err = -EIO;
goto fail;
}
@@ -2209,7 +2224,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
/* interface created in firmware */
ifp = vif->ifp;
if (!ifp) {
- brcmf_err("no if pointer provided\n");
+ bphy_err(drvr, "no if pointer provided\n");
err = -ENOENT;
goto fail;
}
@@ -2218,7 +2233,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
ifp->ndev->name_assign_type = name_assign_type;
err = brcmf_net_attach(ifp, true);
if (err) {
- brcmf_err("Registering netdevice failed\n");
+ bphy_err(drvr, "Registering netdevice failed\n");
free_netdev(ifp->ndev);
goto fail;
}
@@ -2373,6 +2388,7 @@ void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev)
*/
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
{
+ struct brcmf_pub *drvr = cfg->pub;
struct brcmf_p2p_info *p2p;
struct brcmf_if *pri_ifp;
s32 err = 0;
@@ -2387,7 +2403,7 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
if (p2pdev_forced) {
err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
if (IS_ERR(err_ptr)) {
- brcmf_err("P2P device creation failed.\n");
+ bphy_err(drvr, "P2P device creation failed.\n");
err = PTR_ERR(err_ptr);
}
} else {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 16d7dda965d8..58a6bc379358 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -30,6 +30,15 @@
#include <brcmu_wifi.h>
#include <brcm_hw_ids.h>
+/* Custom brcmf_err() that takes bus arg and passes it further */
+#define brcmf_err(bus, fmt, ...) \
+ do { \
+ if (IS_ENABLED(CONFIG_BRCMDBG) || \
+ IS_ENABLED(CONFIG_BRCM_TRACING) || \
+ net_ratelimit()) \
+ __brcmf_err(bus, __func__, fmt, ##__VA_ARGS__); \
+ } while (0)
+
#include "debug.h"
#include "bus.h"
#include "commonring.h"
@@ -531,6 +540,7 @@ static void
brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
{
const struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
struct brcmf_core *core;
u32 bar0_win;
@@ -548,7 +558,7 @@ brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
}
}
} else {
- brcmf_err("Unsupported core selected %x\n", coreid);
+ brcmf_err(bus, "Unsupported core selected %x\n", coreid);
}
}
@@ -848,9 +858,8 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
{
- struct pci_dev *pdev;
-
- pdev = devinfo->pdev;
+ struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
brcmf_pcie_intr_disable(devinfo);
@@ -861,7 +870,7 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
brcmf_pcie_isr_thread, IRQF_SHARED,
"brcmf_pcie_intr", devinfo)) {
pci_disable_msi(pdev);
- brcmf_err("Failed to request IRQ %d\n", pdev->irq);
+ brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
return -EIO;
}
devinfo->irq_allocated = true;
@@ -871,15 +880,14 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
u32 status;
u32 count;
if (!devinfo->irq_allocated)
return;
- pdev = devinfo->pdev;
-
brcmf_pcie_intr_disable(devinfo);
free_irq(pdev->irq, devinfo);
pci_disable_msi(pdev);
@@ -891,7 +899,7 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
count++;
}
if (devinfo->in_irq)
- brcmf_err("Still in IRQ (processing) !!!\n");
+ brcmf_err(bus, "Still in IRQ (processing) !!!\n");
status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
@@ -1102,6 +1110,7 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
{
+ struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
struct brcmf_pcie_ringbuf *ring;
struct brcmf_pcie_ringbuf *rings;
u32 d2h_w_idx_ptr;
@@ -1254,7 +1263,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
return 0;
fail:
- brcmf_err("Allocating ring buffers failed\n");
+ brcmf_err(bus, "Allocating ring buffers failed\n");
brcmf_pcie_release_ringbuffers(devinfo);
return -ENOMEM;
}
@@ -1277,14 +1286,15 @@ brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
{
+ struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
u64 address;
u32 addr;
devinfo->shared.scratch =
- dma_zalloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
- &devinfo->shared.scratch_dmahandle,
- GFP_KERNEL);
+ dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+ &devinfo->shared.scratch_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.scratch)
goto fail;
@@ -1298,10 +1308,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
devinfo->shared.ringupd =
- dma_zalloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
- &devinfo->shared.ringupd_dmahandle,
- GFP_KERNEL);
+ dma_alloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+ &devinfo->shared.ringupd_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.ringupd)
goto fail;
@@ -1316,7 +1326,7 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
return 0;
fail:
- brcmf_err("Allocating scratch buffers failed\n");
+ brcmf_err(bus, "Allocating scratch buffers failed\n");
brcmf_pcie_release_scratchbuffers(devinfo);
return -ENOMEM;
}
@@ -1437,6 +1447,7 @@ static int
brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
u32 sharedram_addr)
{
+ struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
struct brcmf_pcie_shared_info *shared;
u32 addr;
@@ -1448,7 +1459,8 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
(shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
- brcmf_err("Unsupported PCIE version %d\n", shared->version);
+ brcmf_err(bus, "Unsupported PCIE version %d\n",
+ shared->version);
return -EINVAL;
}
@@ -1490,6 +1502,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
const struct firmware *fw, void *nvram,
u32 nvram_len)
{
+ struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
u32 sharedram_addr;
u32 sharedram_addr_written;
u32 loop_counter;
@@ -1544,7 +1557,13 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
loop_counter--;
}
if (sharedram_addr == sharedram_addr_written) {
- brcmf_err("FW failed to initialize\n");
+ brcmf_err(bus, "FW failed to initialize\n");
+ return -ENODEV;
+ }
+ if (sharedram_addr < devinfo->ci->rambase ||
+ sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
+ brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
+ sharedram_addr);
return -ENODEV;
}
brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
@@ -1555,16 +1574,15 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
int err;
phys_addr_t bar0_addr, bar1_addr;
ulong bar1_size;
- pdev = devinfo->pdev;
-
err = pci_enable_device(pdev);
if (err) {
- brcmf_err("pci_enable_device failed err=%d\n", err);
+ brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
return err;
}
@@ -1577,7 +1595,7 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
/* read Bar-1 mapped memory range */
bar1_size = pci_resource_len(pdev, 2);
if ((bar1_size == 0) || (bar1_addr == 0)) {
- brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
+ brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
bar1_size, (unsigned long long)bar1_addr);
return -EINVAL;
}
@@ -1586,7 +1604,7 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
if (!devinfo->regs || !devinfo->tcm) {
- brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
+ brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
devinfo->tcm);
return -EINVAL;
}
@@ -1873,7 +1891,7 @@ fail_bus:
kfree(bus->msgbuf);
kfree(bus);
fail:
- brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
+ brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
brcmf_pcie_release_resource(devinfo);
if (devinfo->ci)
brcmf_chip_detach(devinfo->ci);
@@ -1947,7 +1965,7 @@ static int brcmf_pcie_pm_enter_D3(struct device *dev)
wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
BRCMF_PCIE_MBDATA_TIMEOUT);
if (!devinfo->mbdata_completed) {
- brcmf_err("Timeout on response for entering D3 substate\n");
+ brcmf_err(bus, "Timeout on response for entering D3 substate\n");
brcmf_bus_change_state(bus, BRCMF_BUS_UP);
return -EIO;
}
@@ -1993,7 +2011,7 @@ cleanup:
err = brcmf_pcie_probe(pdev, NULL);
if (err)
- brcmf_err("probe after resume failed, err=%d\n", err);
+ brcmf_err(bus, "probe after resume failed, err=%d\n", err);
return err;
}
@@ -2064,7 +2082,8 @@ void brcmf_pcie_register(void)
brcmf_dbg(PCIE, "Enter\n");
err = pci_register_driver(&brcmf_pciedrvr);
if (err)
- brcmf_err("PCIE driver registration failed, err=%d\n", err);
+ brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
+ err);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index ffa243e2e2d0..0fb97f7dd5a2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -109,6 +109,7 @@ static int brcmf_pno_channel_config(struct brcmf_if *ifp,
static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
u32 mscan, u32 bestn)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_pno_param_le pfn_param;
u16 flags;
u32 pfnmem;
@@ -132,13 +133,13 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
/* set bestn in firmware */
err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
if (err < 0) {
- brcmf_err("failed to set pfnmem\n");
+ bphy_err(drvr, "failed to set pfnmem\n");
goto exit;
}
/* get max mscan which the firmware supports */
err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
if (err < 0) {
- brcmf_err("failed to get pfnmem\n");
+ bphy_err(drvr, "failed to get pfnmem\n");
goto exit;
}
mscan = min_t(u32, mscan, pfnmem);
@@ -152,7 +153,7 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
sizeof(pfn_param));
if (err)
- brcmf_err("pfn_set failed, err=%d\n", err);
+ bphy_err(drvr, "pfn_set failed, err=%d\n", err);
exit:
return err;
@@ -160,6 +161,7 @@ exit:
static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_pno_macaddr_le pfn_mac;
u8 *mac_addr = NULL;
u8 *mac_mask = NULL;
@@ -194,7 +196,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
- brcmf_err("pfn_macaddr failed, err=%d\n", err);
+ bphy_err(drvr, "pfn_macaddr failed, err=%d\n", err);
return err;
}
@@ -202,6 +204,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
bool active)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_pno_net_param_le pfn;
int err;
@@ -218,12 +221,13 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
if (err < 0)
- brcmf_err("adding failed: err=%d\n", err);
+ bphy_err(drvr, "adding failed: err=%d\n", err);
return err;
}
static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_pno_bssid_le bssid_cfg;
int err;
@@ -234,7 +238,7 @@ static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
sizeof(bssid_cfg));
if (err < 0)
- brcmf_err("adding failed: err=%d\n", err);
+ bphy_err(drvr, "adding failed: err=%d\n", err);
return err;
}
@@ -258,6 +262,7 @@ static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
static int brcmf_pno_clean(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
int ret;
/* Disable pfn */
@@ -267,7 +272,7 @@ static int brcmf_pno_clean(struct brcmf_if *ifp)
ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
}
if (ret < 0)
- brcmf_err("failed code %d\n", ret);
+ bphy_err(drvr, "failed code %d\n", ret);
return ret;
}
@@ -392,6 +397,7 @@ static int brcmf_pno_config_networks(struct brcmf_if *ifp,
static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_pno_info *pi;
struct brcmf_gscan_config *gscan_cfg;
struct brcmf_gscan_bucket_config *buckets;
@@ -416,7 +422,7 @@ static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
/* clean up everything */
err = brcmf_pno_clean(ifp);
if (err < 0) {
- brcmf_err("failed error=%d\n", err);
+ bphy_err(drvr, "failed error=%d\n", err);
goto free_gscan;
}
@@ -496,6 +502,11 @@ int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
pi = ifp_to_pno(ifp);
+
+ /* No PNO request */
+ if (!pi->n_reqs)
+ return 0;
+
err = brcmf_pno_remove_request(pi, reqid);
if (err)
return err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
index c5ff551ec659..024c643052bc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
@@ -47,8 +47,8 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
if (brcmf_proto_msgbuf_attach(drvr))
goto fail;
} else {
- brcmf_err("Unsupported proto type %d\n",
- drvr->bus_if->proto_type);
+ bphy_err(drvr, "Unsupported proto type %d\n",
+ drvr->bus_if->proto_type);
goto fail;
}
if (!proto->tx_queue_data || (proto->hdrpull == NULL) ||
@@ -56,7 +56,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
(proto->configure_addr_mode == NULL) ||
(proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL) ||
(proto->debugfs_create == NULL)) {
- brcmf_err("Not all proto handlers have been installed\n");
+ bphy_err(drvr, "Not all proto handlers have been installed\n");
goto fail;
}
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 0cd5b8d970d7..4d104ab80fd8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2999,21 +2999,35 @@ static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus,
if (error < 0)
return error;
- seq_printf(seq,
- "dongle trap info: type 0x%x @ epc 0x%08x\n"
- " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
- " lr 0x%08x pc 0x%08x offset 0x%x\n"
- " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
- " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
- le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
- le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
- le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
- le32_to_cpu(tr.pc), sh->trap_addr,
- le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
- le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
- le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
- le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
-
+ if (seq)
+ seq_printf(seq,
+ "dongle trap info: type 0x%x @ epc 0x%08x\n"
+ " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
+ " lr 0x%08x pc 0x%08x offset 0x%x\n"
+ " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
+ " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
+ le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
+ le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
+ le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
+ le32_to_cpu(tr.pc), sh->trap_addr,
+ le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
+ le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
+ le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
+ le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
+ else
+ pr_debug("dongle trap info: type 0x%x @ epc 0x%08x\n"
+ " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
+ " lr 0x%08x pc 0x%08x offset 0x%x\n"
+ " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
+ " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
+ le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
+ le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
+ le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
+ le32_to_cpu(tr.pc), sh->trap_addr,
+ le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
+ le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
+ le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
+ le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
return 0;
}
@@ -3067,8 +3081,10 @@ static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
else if (sh.flags & SDPCM_SHARED_ASSERT)
brcmf_err("assertion in dongle\n");
- if (sh.flags & SDPCM_SHARED_TRAP)
+ if (sh.flags & SDPCM_SHARED_TRAP) {
brcmf_err("firmware trap in dongle\n");
+ brcmf_sdio_trap_info(NULL, bus, &sh);
+ }
return 0;
}
@@ -3143,9 +3159,12 @@ static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data)
return 0;
}
-static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+static void brcmf_sdio_debugfs_create(struct device *dev)
{
- struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
if (IS_ERR_OR_NULL(dentry))
@@ -3165,7 +3184,7 @@ static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
return 0;
}
-static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+static void brcmf_sdio_debugfs_create(struct device *dev)
{
}
#endif /* DEBUG */
@@ -3477,8 +3496,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
if (bus->rxbuf)
bus->rxblen = value;
- brcmf_sdio_debugfs_create(bus);
-
/* the commands below use the terms tx and rx from
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
@@ -4088,6 +4105,7 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.get_ramsize = brcmf_sdio_bus_get_ramsize,
.get_memdump = brcmf_sdio_bus_get_memdump,
.get_fwname = brcmf_sdio_get_fwname,
+ .debugfs_create = brcmf_sdio_debugfs_create
};
#define BRCMF_SDIO_FW_CODE 0
@@ -4197,7 +4215,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
} else {
/* Disable F2 again */
sdio_disable_func(sdiod->func2);
- goto release;
+ goto checkdied;
}
if (brcmf_chip_sr_capable(bus->ci)) {
@@ -4218,8 +4236,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
}
/* If we didn't come up, turn off backplane clock */
- if (err != 0)
+ if (err != 0) {
brcmf_sdio_clkctl(bus, CLK_NONE, false);
+ goto checkdied;
+ }
sdio_release_host(sdiod->func1);
@@ -4233,12 +4253,15 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
err = brcmf_attach(sdiod->dev, sdiod->settings);
if (err != 0) {
brcmf_err("brcmf_attach failed\n");
- goto fail;
+ sdio_claim_host(sdiod->func1);
+ goto checkdied;
}
/* ready */
return;
+checkdied:
+ brcmf_sdio_checkdied(bus);
release:
sdio_release_host(sdiod->func1);
fail:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
index fe6755944b7b..a5c271bff446 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
@@ -14,14 +14,16 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/device.h>
#include <linux/module.h> /* bug in tracepoint.h, it should include this */
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
+#include "bus.h"
#include "tracepoint.h"
#include "debug.h"
-void __brcmf_err(const char *func, const char *fmt, ...)
+void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -30,7 +32,10 @@ void __brcmf_err(const char *func, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
- pr_err("%s: %pV", func, &vaf);
+ if (bus)
+ dev_err(bus->dev, "%s: %pV", func, &vaf);
+ else
+ pr_err("%s: %pV", func, &vaf);
trace_brcmf_err(func, &vaf);
va_end(args);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index a4308c6e72d7..e9cbfd077710 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -508,7 +508,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
skb = req->skb;
req->skb = NULL;
- /* zero lenght packets indicate usb "failure". Do not refill */
+ /* zero length packets indicate usb "failure". Do not refill */
if (urb->status != 0 || !urb->actual_length) {
brcmu_pkt_buf_free_skb(skb);
brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
@@ -575,7 +575,6 @@ static void
brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
{
struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
- int old_state;
brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n",
devinfo->bus_pub.state, state);
@@ -583,7 +582,6 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
if (devinfo->bus_pub.state == state)
return;
- old_state = devinfo->bus_pub.state;
devinfo->bus_pub.state = state;
/* update state of upper layer */
@@ -1550,6 +1548,10 @@ void brcmf_usb_exit(void)
void brcmf_usb_register(void)
{
+ int ret;
+
brcmf_dbg(USB, "Enter\n");
- usb_register(&brcmf_usbdrvr);
+ ret = usb_register(&brcmf_usbdrvr);
+ if (ret)
+ brcmf_err("usb_register failed %d\n", ret);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index ed83f33aceb7..482d7737764d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -16,9 +16,9 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \
- -Idrivers/net/wireless/broadcom/brcm80211/include
+ -I $(srctree)/$(src) \
+ -I $(srctree)/$(src)/phy \
+ -I $(srctree)/$(src)/../include
brcmsmac-y := \
mac80211_if.o \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
index 3bd54f125776..6d776ef6ff54 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
@@ -37,27 +37,18 @@ static struct dentry *root_folder;
void brcms_debugfs_init(void)
{
root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(root_folder))
- root_folder = NULL;
}
void brcms_debugfs_exit(void)
{
- if (!root_folder)
- return;
-
debugfs_remove_recursive(root_folder);
root_folder = NULL;
}
-int brcms_debugfs_attach(struct brcms_pub *drvr)
+void brcms_debugfs_attach(struct brcms_pub *drvr)
{
- if (!root_folder)
- return -ENODEV;
-
drvr->dbgfs_dir = debugfs_create_dir(
dev_name(&drvr->wlc->hw->d11core->dev), root_folder);
- return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
}
void brcms_debugfs_detach(struct brcms_pub *drvr)
@@ -195,7 +186,7 @@ static const struct file_operations brcms_debugfs_def_ops = {
.llseek = seq_lseek
};
-static int
+static void
brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{
@@ -203,27 +194,18 @@ brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
struct dentry *dentry = drvr->dbgfs_dir;
struct brcms_debugfs_entry *entry;
- if (IS_ERR_OR_NULL(dentry))
- return -ENOENT;
-
entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
if (!entry)
- return -ENOMEM;
+ return;
entry->read = read_fn;
entry->drvr = drvr;
- dentry = debugfs_create_file(fn, 0444, dentry, entry,
- &brcms_debugfs_def_ops);
-
- return PTR_ERR_OR_ZERO(dentry);
+ debugfs_create_file(fn, 0444, dentry, entry, &brcms_debugfs_def_ops);
}
void brcms_debugfs_create_files(struct brcms_pub *drvr)
{
- if (IS_ERR_OR_NULL(drvr->dbgfs_dir))
- return;
-
brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read);
brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h
index 822781cf15d4..56898e6d789d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h
@@ -68,7 +68,7 @@ void __brcms_dbg(struct device *dev, u32 level, const char *func,
struct brcms_pub;
void brcms_debugfs_init(void);
void brcms_debugfs_exit(void);
-int brcms_debugfs_attach(struct brcms_pub *drvr);
+void brcms_debugfs_attach(struct brcms_pub *drvr);
void brcms_debugfs_detach(struct brcms_pub *drvr);
struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr);
void brcms_debugfs_create_files(struct brcms_pub *drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index e78a93a45741..c6e107f41948 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1199,8 +1199,6 @@ wlc_lcnphy_rx_iq_est(struct brcms_phy *pi,
{
int wait_count = 0;
bool result = true;
- u8 phybw40;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5);
@@ -3082,7 +3080,7 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
u8 bbmult;
struct phytbl_info tab;
s32 a1, b0, b1;
- s32 tssi, pwr, maxtargetpwr, mintargetpwr;
+ s32 tssi, pwr, mintargetpwr;
bool suspend;
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@@ -3119,7 +3117,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
- maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
@@ -4212,7 +4209,7 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
s8 index;
struct phytbl_info tab;
s32 a1, b0, b1;
- s32 tssi, pwr, maxtargetpwr, mintargetpwr;
+ s32 tssi, pwr, mintargetpwr;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_lastcal = pi->sh->now;
@@ -4249,7 +4246,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
- maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
@@ -4622,13 +4618,10 @@ static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
{
uint idx;
- u8 phybw40;
struct phytbl_info tab;
const struct phytbl_info *tb;
u32 val;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
-
for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++)
wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]);
@@ -4831,9 +4824,7 @@ static void wlc_lcnphy_baseband_init(struct brcms_phy *pi)
void wlc_phy_init_lcnphy(struct brcms_phy *pi)
{
- u8 phybw40;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
pi_lcn->lcnphy_cal_counter = 0;
pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
index 256c91f9ac4b..bb02c6220a88 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
@@ -15,9 +15,7 @@
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-ccflags-y := \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmutil \
- -Idrivers/net/wireless/broadcom/brcm80211/include
+ccflags-y := -I $(srctree)/$(src)/../include
obj-$(CONFIG_BRCMUTIL) += brcmutil.o
brcmutil-objs = utils.o d11.o
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 57e3b6cca234..271977f7fbb0 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3756,10 +3756,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_remove_sysfs;
- err = il_dbgfs_register(il, DRV_NAME);
- if (err)
- IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
- err);
+ il_dbgfs_register(il, DRV_NAME);
/* Start monitoring the killswitch */
queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 6b4488a178a7..94222ae464ae 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4988,10 +4988,7 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto out_unbind;
- err = il_dbgfs_register(il, DRV_NAME);
- if (err)
- IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
- err);
+ il_dbgfs_register(il, DRV_NAME);
err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
if (err) {
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index dc6a74a05983..b079c64ca014 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2974,13 +2974,11 @@ il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
#endif /* CONFIG_IWLEGACY_DEBUG */
#ifdef CONFIG_IWLEGACY_DEBUGFS
-int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_register(struct il_priv *il, const char *name);
void il_dbgfs_unregister(struct il_priv *il);
#else
-static inline int
-il_dbgfs_register(struct il_priv *il, const char *name)
+static inline void il_dbgfs_register(struct il_priv *il, const char *name)
{
- return 0;
}
static inline void
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index d76073def677..fa211412e0ac 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -128,23 +128,12 @@ EXPORT_SYMBOL(il_update_stats);
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, il, \
- &il_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, il, \
+ &il_dbgfs_##name##_ops); \
} while (0)
#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
+ debugfs_create_bool(#name, 0600, parent, ptr); \
} while (0)
/* file operation */
@@ -1341,27 +1330,18 @@ DEBUGFS_WRITE_FILE_OPS(wd_timeout);
* Create the debugfs files and directories
*
*/
-int
+void
il_dbgfs_register(struct il_priv *il, const char *name)
{
struct dentry *phyd = il->hw->wiphy->debugfsdir;
struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
dir_drv = debugfs_create_dir(name, phyd);
- if (!dir_drv)
- return -ENOMEM;
-
il->debugfs_dir = dir_drv;
dir_data = debugfs_create_dir("data", dir_drv);
- if (!dir_data)
- goto err;
dir_rf = debugfs_create_dir("rf", dir_drv);
- if (!dir_rf)
- goto err;
dir_debug = debugfs_create_dir("debug", dir_drv);
- if (!dir_debug)
- goto err;
DEBUGFS_ADD_FILE(nvm, dir_data, 0400);
DEBUGFS_ADD_FILE(sram, dir_data, 0600);
@@ -1399,12 +1379,6 @@ il_dbgfs_register(struct il_priv *il, const char *name)
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&il->disable_chain_noise_cal);
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
- return 0;
-
-err:
- IL_ERR("Can't create the debugfs directory\n");
- il_dbgfs_unregister(il);
- return -ENOMEM;
}
EXPORT_SYMBOL(il_dbgfs_register);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 491ca3c8b43c..83d5bceea08f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -1,6 +1,6 @@
config IWLWIFI
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
- depends on PCI && HAS_IOMEM
+ depends on PCI && HAS_IOMEM && CFG80211
select FW_LOADER
---help---
Select to build the driver supporting the:
@@ -47,6 +47,7 @@ if IWLWIFI
config IWLWIFI_LEDS
bool
depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+ depends on IWLMVM || IWLDVM
select LEDS_TRIGGERS
select MAC80211_LEDS
default y
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 7e65073834b7..fdc56f821b5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,7 +56,7 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 43
+#define IWL_22000_UCODE_API_MAX 46
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -79,10 +79,15 @@
#define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
-#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
+#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
+#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
+#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
+#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
+#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -96,14 +101,26 @@
IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_CC_A_MODULE_FIRMWARE(api) \
+ IWL_CC_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(api) \
+ IWL_22000_SO_A_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_22000_SO_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -164,6 +181,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 60 * 1024
+#define IWL_DEVICE_AX200_COMMON \
+ IWL_DEVICE_22000_COMMON, \
+ .umac_prph_offset = 0x300000
+
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
.device_family = IWL_DEVICE_FAMILY_22000, \
@@ -176,6 +197,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.base_params = &iwl_22560_base_params, \
.csr = &iwl_csr_v2
+#define IWL_DEVICE_AX210 \
+ IWL_DEVICE_AX200_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_AX210, \
+ .base_params = &iwl_22000_base_params, \
+ .csr = &iwl_csr_v1, \
+ .min_txq_size = 128
+
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
@@ -195,8 +223,8 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
IWL_DEVICE_22500,
};
-const struct iwl_cfg iwl22000_2ax_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
+const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -207,6 +235,45 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl22260_2ax_cfg = {
+ .name = "Intel(R) Wireless-AX 22260",
+ .fw_name_pre = IWL_CC_A_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .bisr_workaround = 1,
+};
+
+const struct iwl_cfg killer1650x_2ax_cfg = {
+ .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+ .fw_name_pre = IWL_CC_A_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .bisr_workaround = 1,
+};
+
+const struct iwl_cfg killer1650w_2ax_cfg = {
+ .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+ .fw_name_pre = IWL_CC_A_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .bisr_workaround = 1,
+};
+
/*
* All JF radio modules are part of the 9000 series, but the MAC part
* looks more like 22000. That's why this device is here, but called
@@ -230,6 +297,24 @@ const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
+const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
@@ -242,9 +327,9 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
-const struct iwl_cfg iwl22000_2ax_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
+ .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -254,9 +339,21 @@ const struct iwl_cfg iwl22000_2ax_cfg_jf = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
+const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
+ .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -266,9 +363,9 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_B_FW_PRE,
+ .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -278,9 +375,9 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_JF_B0_FW_PRE,
+ .fw_name_pre = IWL_22000_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -315,12 +412,41 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
+ IWL_DEVICE_AX210,
+};
+
+const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
+ IWL_DEVICE_AX210,
+};
+
+const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX211 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
+ IWL_DEVICE_AX210,
+};
+
+const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+ .fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
+ IWL_DEVICE_AX210,
+};
+
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index f2114137c13f..3225b64eb845 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -57,7 +57,7 @@
#include "fw/file.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 43
+#define IWL9000_UCODE_API_MAX 46
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
@@ -73,21 +73,12 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
-#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
-#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
-#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000A_MODULE_FIRMWARE(api) \
- IWL9000A_FW_PRE __stringify(api) ".ucode"
-#define IWL9000B_MODULE_FIRMWARE(api) \
- IWL9000B_FW_PRE __stringify(api) ".ucode"
-#define IWL9000RFB_MODULE_FIRMWARE(api) \
- IWL9000RFB_FW_PRE __stringify(api) ".ucode"
-#define IWL9260A_MODULE_FIRMWARE(api) \
- IWL9260A_FW_PRE __stringify(api) ".ucode"
-#define IWL9260B_MODULE_FIRMWARE(api) \
- IWL9260B_FW_PRE __stringify(api) ".ucode"
+#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
+#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
+#define IWL9000_MODULE_FIRMWARE(api) \
+ IWL9000_FW_PRE __stringify(api) ".ucode"
+#define IWL9260_MODULE_FIRMWARE(api) \
+ IWL9260_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -162,81 +153,87 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+};
+
+const struct iwl_cfg iwl9260_2ac_160_cfg = {
+ .name = "Intel(R) Wireless-AC 9260 160MHz",
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9260_killer_2ac_cfg = {
.name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9460_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9461_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9462_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg_soc = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -244,9 +241,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -254,9 +249,7 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -264,9 +257,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -275,9 +266,7 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -286,9 +275,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -297,9 +284,16 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -308,9 +302,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -319,17 +311,12 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
-MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 702d42b2d452..0486b17d7c41 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
-ccflags-y += -I$(src)/../
+ccflags-y += -I $(srctree)/$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 431e13c6ee35..254a5ce52456 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -439,13 +439,10 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
+void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
#else
-static inline int iwl_dbgfs_register(struct iwl_priv *priv,
- struct dentry *dbgfs_dir)
-{
- return 0;
-}
+static inline void iwl_dbgfs_register(struct iwl_priv *priv,
+ struct dentry *dbgfs_dir) { }
#endif /* CONFIG_IWLWIFI_DEBUGFS */
#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 3d2e44a642de..d4b19673b06a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -3,6 +3,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -36,31 +37,8 @@
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, priv, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_u32(#name, mode, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, priv, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
/* file operation */
@@ -2238,7 +2216,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
- if (sscanf(buf, "%d", &event_log_flag) != 1)
+ if (sscanf(buf, "%u", &event_log_flag) != 1)
return -EFAULT;
if (event_log_flag == 1)
iwl_dump_nic_event_log(priv, true, NULL);
@@ -2347,21 +2325,15 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
* Create the debugfs files and directories
*
*/
-int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
+void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
{
struct dentry *dir_data, *dir_rf, *dir_debug;
priv->debugfs_dir = dbgfs_dir;
dir_data = debugfs_create_dir("data", dbgfs_dir);
- if (!dir_data)
- goto err;
dir_rf = debugfs_create_dir("rf", dbgfs_dir);
- if (!dir_rf)
- goto err;
dir_debug = debugfs_create_dir("debug", dbgfs_dir);
- if (!dir_debug)
- goto err;
DEBUGFS_ADD_FILE(nvm, dir_data, 0400);
DEBUGFS_ADD_FILE(sram, dir_data, 0600);
@@ -2421,13 +2393,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
snprintf(buf, 100, "../../%pd2", dev_dir);
- if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
- goto err;
+ debugfs_create_symlink("iwlwifi", mac80211_dir, buf);
}
-
- return 0;
-
-err:
- IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
- return -ENOMEM;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 49b71dbf8490..54b759cec8b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -710,24 +711,6 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
-{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
- return false;
- return true;
-}
-
-static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
-{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
- return false;
- if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
- return true;
-
- /* disabled by default */
- return false;
-}
-
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
@@ -752,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (!iwl_enable_rx_ampdu(priv->cfg))
+ if (!iwl_enable_rx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -764,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
- if (!iwl_enable_tx_ampdu(priv->cfg))
+ if (!iwl_enable_tx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index c219bca5cff4..7c68a86ed9e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1054,7 +1054,7 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
else
IWL_ERR(priv,
- "Cannot request restart before registrating with mac80211\n");
+ "Cannot request restart before registering with mac80211\n");
} else {
WARN_ON(1);
}
@@ -1509,13 +1509,10 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
goto out_destroy_workqueue;
- if (iwl_dbgfs_register(priv, dbgfs_dir))
- goto out_mac80211_unregister;
+ iwl_dbgfs_register(priv, dbgfs_dir);
return op_mode;
-out_mac80211_unregister:
- iwlagn_mac_unregister(priv);
out_destroy_workqueue:
iwl_tt_exit(priv);
iwl_cancel_deferred_work(priv);
@@ -1881,7 +1878,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
return pos;
}
- if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
+ if (!(iwl_have_debug_level(IWL_DL_FW)) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
@@ -1897,7 +1894,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
if (!*buf)
return -ENOMEM;
}
- if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
+ if (iwl_have_debug_level(IWL_DL_FW) || full_log) {
/*
* if uCode has wrapped back to top of log,
* start at the oldest entry,
@@ -1927,7 +1924,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
unsigned int reload_msec;
unsigned long reload_jiffies;
- if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
+ if (iwl_have_debug_level(IWL_DL_FW))
iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
/* uCode is no longer loaded. */
@@ -1965,12 +1962,12 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
if (iwlwifi_mod_params.fw_restart) {
- IWL_DEBUG_FW_ERRORS(priv,
- "Restarting adapter due to uCode error.\n");
+ IWL_DEBUG_FW(priv,
+ "Restarting adapter due to uCode error.\n");
queue_work(priv->workqueue, &priv->restart);
} else
- IWL_DEBUG_FW_ERRORS(priv,
- "Detected FW error, but not restarting\n");
+ IWL_DEBUG_FW(priv,
+ "Detected FW error, but not restarting\n");
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 6f17a5e24e82..e224b23f0ba8 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -2,6 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -592,7 +593,7 @@ static int iwlagn_set_decrypted_flag(struct iwl_priv *priv,
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_KEY_TTAK)
break;
-
+ /* fall through */
case RX_RES_STATUS_SEC_TYPE_WEP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_ICV_MIC) {
@@ -601,6 +602,7 @@ static int iwlagn_set_decrypted_flag(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "Packet destroyed\n");
return -1;
}
+ /* fall through */
case RX_RES_STATUS_SEC_TYPE_CCMP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_DECRYPT_OK) {
@@ -729,7 +731,7 @@ static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
- /* fall through if TTAK OK */
+ /* fall through */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 8d7aafb4d9e9..f190f7beb3a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -3,6 +3,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -418,7 +419,7 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
limit /= 2;
dwell_time = min(limit, dwell_time);
- /* fall through to limit further */
+ /* fall through */
case 1:
limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
limit /= n_active;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 4de2727ac63e..a156dcf5b7d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -325,9 +326,9 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
iwl_prepare_ct_kill_task(priv);
tt->state = old_state;
}
- } else if (old_state == IWL_TI_CT_KILL &&
- tt->state != IWL_TI_CT_KILL)
+ } else if (old_state == IWL_TI_CT_KILL) {
iwl_perform_ct_kill_task(priv, false);
+ }
IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
tt->state);
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 32d000cffe9f..405038ce98d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -205,3 +207,33 @@ out:
return dflt_pwr_limit;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
+
+int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret;
+
+ data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ *extl_clk = wifi_pkg->package.elements[1].integer.value;
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 7492dfb6729b..f5704e16643f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,6 +67,7 @@
#define ACPI_WGDS_METHOD "WGDS"
#define ACPI_WRDD_METHOD "WRDD"
#define ACPI_SPLC_METHOD "SPLC"
+#define ACPI_ECKV_METHOD "ECKV"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -86,6 +87,7 @@
#define ACPI_WGDS_WIFI_DATA_SIZE 19
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
+#define ACPI_ECKV_WIFI_DATA_SIZE 2
#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
@@ -109,6 +111,17 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc);
u64 iwl_acpi_get_pwr_limit(struct device *dev);
+/*
+ * iwl_acpi_get_eckv - read external clock validation from ACPI, if available
+ *
+ * @dev: the struct device
+ * @extl_clk: output var (2 bytes) that will get the clk indication.
+ *
+ * This function tries to read the external clock indication
+ * from ACPI if available.
+ */
+int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -133,5 +146,10 @@ static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
return 0;
}
+static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+{
+ return -ENOENT;
+}
+
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 08d3d8a190f6..df1bd0d2450e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -96,14 +96,7 @@ enum {
#define IWL_ALIVE_FLG_RFKILL BIT(0)
-struct iwl_lmac_alive {
- __le32 ucode_major;
- __le32 ucode_minor;
- u8 ver_subtype;
- u8 ver_type;
- u8 mac;
- u8 opt;
- __le32 timestamp;
+struct iwl_lmac_debug_addrs {
__le32 error_event_table_ptr; /* SRAM address for error log */
__le32 log_event_table_ptr; /* SRAM address for LMAC event log */
__le32 cpu_register_ptr;
@@ -112,13 +105,28 @@ struct iwl_lmac_alive {
__le32 scd_base_ptr; /* SRAM address for SCD */
__le32 st_fwrd_addr; /* pointer to Store and forward */
__le32 st_fwrd_size;
+} __packed; /* UCODE_DEBUG_ADDRS_API_S_VER_2 */
+
+struct iwl_lmac_alive {
+ __le32 ucode_major;
+ __le32 ucode_minor;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le32 timestamp;
+ struct iwl_lmac_debug_addrs dbg_ptrs;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+struct iwl_umac_debug_addrs {
+ __le32 error_info_addr; /* SRAM address for UMAC error log */
+ __le32 dbg_print_buff_addr;
+} __packed; /* UMAC_DEBUG_ADDRS_API_S_VER_1 */
+
struct iwl_umac_alive {
__le32 umac_major; /* UMAC version: major */
__le32 umac_minor; /* UMAC version: minor */
- __le32 error_info_addr; /* SRAM address for UMAC error log */
- __le32 dbg_print_buff_addr;
+ struct iwl_umac_debug_addrs dbg_ptrs;
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
struct mvm_alive_resp_v3 {
@@ -189,4 +197,24 @@ struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+/**
+ * enum iwl_error_recovery_flags - flags for error recovery cmd
+ * @ERROR_RECOVERY_UPDATE_DB: update db from blob sent
+ * @ERROR_RECOVERY_END_OF_RECOVERY: end of recovery
+ */
+enum iwl_error_recovery_flags {
+ ERROR_RECOVERY_UPDATE_DB = BIT(0),
+ ERROR_RECOVERY_END_OF_RECOVERY = BIT(1),
+};
+
+/**
+ * struct iwl_fw_error_recovery_cmd - recovery cmd sent upon assert
+ * @flags: &enum iwl_error_recovery_flags
+ * @buf_size: db buffer size in bytes
+ */
+struct iwl_fw_error_recovery_cmd {
+ __le32 flags;
+ __le32 buf_size;
+} __packed; /* ERROR_RECOVERY_CMD_HDR_API_S_VER_1 */
+
#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 8b4922bbe139..4d2274bcc0b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -77,7 +77,8 @@
* @DATA_PATH_GROUP: data path group, uses command IDs from
* &enum iwl_data_path_subcmd_ids
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
- * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids
+ * @LOCATION_GROUP: location group, uses command IDs from
+ * &enum iwl_location_subcmd_ids
* @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
* &enum iwl_prot_offload_subcmd_ids
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
@@ -92,7 +93,7 @@ enum iwl_mvm_command_groups {
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
NAN_GROUP = 0x7,
- TOF_GROUP = 0x8,
+ LOCATION_GROUP = 0x8,
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
@@ -353,16 +354,6 @@ enum iwl_legacy_cmds {
PHY_DB_CMD = 0x6c,
/**
- * @TOF_CMD: &struct iwl_tof_config_cmd
- */
- TOF_CMD = 0x10,
-
- /**
- * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd
- */
- TOF_NOTIFICATION = 0x11,
-
- /**
* @POWER_TABLE_CMD: &struct iwl_device_power_cmd
*/
POWER_TABLE_CMD = 0x77,
@@ -415,7 +406,11 @@ enum iwl_legacy_cmds {
TX_ANT_CONFIGURATION_CMD = 0x98,
/**
- * @STATISTICS_CMD: &struct iwl_statistics_cmd
+ * @STATISTICS_CMD:
+ * one of &struct iwl_statistics_cmd,
+ * &struct iwl_notif_statistics_v11,
+ * &struct iwl_notif_statistics_v10,
+ * &struct iwl_notif_statistics
*/
STATISTICS_CMD = 0x9c,
@@ -423,7 +418,7 @@ enum iwl_legacy_cmds {
* @STATISTICS_NOTIFICATION:
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
- * &struct iwl_notif_statistics_cdb
+ * &struct iwl_notif_statistics
*/
STATISTICS_NOTIFICATION = 0x9d,
@@ -648,6 +643,11 @@ enum iwl_system_subcmd_ids {
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
+
+ /**
+ * @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd
+ */
+ FW_ERROR_RECOVERY_CMD = 0x7,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 6fae02fa4cad..86ea0784e1a3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -224,8 +224,18 @@ struct iwl_wowlan_pattern {
#define IWL_WOWLAN_MAX_PATTERNS 20
+/**
+ * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
+ */
struct iwl_wowlan_patterns_cmd {
+ /**
+ * @n_patterns: number of patterns
+ */
__le32 n_patterns;
+
+ /**
+ * @patterns: the patterns, array length in @n_patterns
+ */
struct iwl_wowlan_pattern patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index fdc54a5dc9de..93c06e6c1ced 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -105,6 +105,12 @@ enum iwl_data_path_subcmd_ids {
HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
/**
+ * @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI
+ * matrix collection, uses &struct iwl_channel_estimation_cfg
+ */
+ CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
+
+ /**
* @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
*/
RX_NO_DATA_NOTIF = 0xF5,
@@ -156,4 +162,53 @@ struct iwl_mu_group_mgmt_notif {
__le32 user_position[4];
} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
+enum iwl_channel_estimation_flags {
+ IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0),
+ IWL_CHANNEL_ESTIMATION_TIMER = BIT(1),
+ IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2),
+};
+
+/**
+ * struct iwl_channel_estimation_cfg - channel estimation reporting config
+ */
+struct iwl_channel_estimation_cfg {
+ /**
+ * @flags: flags, see &enum iwl_channel_estimation_flags
+ */
+ __le32 flags;
+ /**
+ * @timer: if enabled via flags, automatically disable after this many
+ * microseconds
+ */
+ __le32 timer;
+ /**
+ * @count: if enabled via flags, automatically disable after this many
+ * frames with channel estimation matrix were captured
+ */
+ __le32 count;
+ /**
+ * @rate_n_flags_mask: only try to record the channel estimation matrix
+ * if the rate_n_flags value for the received frame (let's call
+ * that rx_rnf) matches the mask/value given here like this:
+ * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val.
+ */
+ __le32 rate_n_flags_mask;
+ /**
+ * @rate_n_flags_val: see @rate_n_flags_mask
+ */
+ __le32 rate_n_flags_val;
+ /**
+ * @reserved: reserved (for alignment)
+ */
+ __le32 reserved;
+ /**
+ * @frame_types: bitmap of frame types to capture, the received frame's
+ * subtype|type takes 6 bits in the frame and the corresponding bit
+ * in this field must be set to 1 to capture channel estimation for
+ * that frame type. Set to all-ones to enable capturing for all
+ * frame types.
+ */
+ __le64 frame_types;
+} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index ab82b7a67967..33858787817b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -25,7 +25,7 @@
*
* BSD LICENSE
*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,7 +70,7 @@ struct iwl_fw_ini_header {
__le32 tlv_version;
__le32 apply_point;
u8 data[];
-} __packed; /* FW_INI_HEADER_TLV_S */
+} __packed; /* FW_DEBUG_TLV_HEADER_S */
/**
* struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION)
@@ -92,7 +92,7 @@ struct iwl_fw_ini_allocation_tlv {
__le32 size;
__le32 max_fragments;
__le32 min_frag_size;
-} __packed; /* FW_INI_BUFFER_ALLOCATION_TLV_S_VER_1 */
+} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
/**
* struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD)
@@ -108,7 +108,7 @@ struct iwl_fw_ini_hcmd {
u8 group;
__le16 padding;
u8 data[0];
-} __packed; /* FW_INI_HCMD_S */
+} __packed; /* FW_DEBUG_TLV_HCMD_DATA_S */
/**
* struct iwl_fw_ini_hcmd_tlv
@@ -118,7 +118,7 @@ struct iwl_fw_ini_hcmd {
struct iwl_fw_ini_hcmd_tlv {
struct iwl_fw_ini_header header;
struct iwl_fw_ini_hcmd hcmd;
-} __packed; /* FW_INI_HCMD_TLV_S_VER_1 */
+} __packed; /* FW_DEBUG_TLV_HCMD_S_VER_1 */
/*
* struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW)
@@ -129,20 +129,50 @@ struct iwl_fw_ini_hcmd_tlv {
struct iwl_fw_ini_debug_flow_tlv {
struct iwl_fw_ini_header header;
__le32 debug_flow_cfg;
-} __packed; /* FW_INI_DEBUG_FLOW_TLV_S_VER_1 */
+} __packed; /* FW_DEBUG_TLV_FLOW_TLV_S_VER_1 */
-#define IWL_FW_INI_MAX_REGION_ID 20
+#define IWL_FW_INI_MAX_REGION_ID 64
#define IWL_FW_INI_MAX_NAME 32
+
+/**
+ * struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
+ * @num_of_range: the amount of ranges in the region
+ * @range_data_size: size of the data to read per range, in bytes.
+ */
+struct iwl_fw_ini_region_cfg_internal {
+ __le32 num_of_ranges;
+ __le32 range_data_size;
+} __packed; /* FW_DEBUG_TLV_REGION_NIC_INTERNAL_RANGES_S */
+
+/**
+ * struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
+ * @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
+ * @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
+ * It is unused for tx.
+ * @num_of_registers: number of prph registers in the region, each register is
+ * 4 bytes size.
+ * @header_only: none zero value indicates that this region does not include
+ * fifo data and includes only the given registers.
+ */
+struct iwl_fw_ini_region_cfg_fifos {
+ __le32 fid1;
+ __le32 fid2;
+ __le32 num_of_registers;
+ __le32 header_only;
+} __packed; /* FW_DEBUG_TLV_REGION_FIFOS_S */
+
/**
* struct iwl_fw_ini_region_cfg
* @region_id: ID of this dump configuration
* @region_type: &enum iwl_fw_ini_region_type
* @num_regions: amount of regions in the address array.
- * @allocation_id: For DRAM type field substitutes for allocation_id.
* @name_len: name length
* @name: file name to use for this region
- * @size: size of the data, in bytes.(unused for IWL_FW_INI_REGION_DRAM_BUFFER)
- * @start_addr: array of addresses. (unused for IWL_FW_INI_REGION_DRAM_BUFFER)
+ * @internal: used in case the region uses internal memory.
+ * @allocation_id: For DRAM type field substitutes for allocation_id
+ * @fifos: used in case of fifos region.
+ * @offset: offset to use for each memory base address
+ * @start_addr: array of addresses.
*/
struct iwl_fw_ini_region_cfg {
__le32 region_id;
@@ -150,32 +180,38 @@ struct iwl_fw_ini_region_cfg {
__le32 name_len;
u8 name[IWL_FW_INI_MAX_NAME];
union {
- __le32 num_regions;
+ struct iwl_fw_ini_region_cfg_internal internal;
__le32 allocation_id;
+ struct iwl_fw_ini_region_cfg_fifos fifos;
};
- __le32 size;
+ __le32 offset;
__le32 start_addr[];
-} __packed; /* FW_INI_REGION_CONFIG_S */
+} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_S */
/**
* struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG)
* DUMP sections define IDs and triggers that use those IDs TLV
* @header: header
* @num_regions: how many different region section and IDs are coming next
- * @iwl_fw_ini_dump dump_config: list of dump configurations
+ * @region_config: list of dump configurations
*/
struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_header header;
__le32 num_regions;
struct iwl_fw_ini_region_cfg region_config[];
-} __packed; /* FW_INI_REGION_CFG_S */
+} __packed; /* FW_DEBUG_TLV_REGIONS_S_VER_1 */
/**
* struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG)
* Region sections define IDs and triggers that use those IDs TLV
*
* @trigger_id: enum &iwl_fw_ini_tigger_id
- * @ignore_default: override FW TLV with binary TLV
+ * @override_trig: determines how apply trigger in case a trigger with the
+ * same id is already in use. Using the first 2 bytes:
+ * Byte 0: if 0, override trigger configuration, otherwise use the
+ * existing configuration.
+ * Byte 1: if 0, override trigger regions, otherwise append regions to
+ * existing trigger.
* @dump_delay: delay from trigger fire to dump, in usec
* @occurrences: max amount of times to be fired
* @ignore_consec: ignore consecutive triggers, in usec
@@ -187,7 +223,7 @@ struct iwl_fw_ini_region_tlv {
*/
struct iwl_fw_ini_trigger {
__le32 trigger_id;
- __le32 ignore_default;
+ __le32 override_trig;
__le32 dump_delay;
__le32 occurrences;
__le32 ignore_consec;
@@ -196,7 +232,7 @@ struct iwl_fw_ini_trigger {
__le32 trigger_data;
__le32 num_regions;
__le32 data[];
-} __packed; /* FW_INI_TRIGGER_CONFIG_S */
+} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_S */
/**
* struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG)
@@ -210,20 +246,17 @@ struct iwl_fw_ini_trigger_tlv {
struct iwl_fw_ini_header header;
__le32 num_triggers;
struct iwl_fw_ini_trigger trigger_config[];
-} __packed; /* FW_INI_TRIGGER_CFG_S */
+} __packed; /* FW_TLV_DEBUG_TRIGGERS_S_VER_1 */
/**
* enum iwl_fw_ini_trigger_id
* @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
- * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
* @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
- * @IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR: FW error notification
- * @IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING: FW warning notification
- * @IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO: FW info notification
- * @IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG: FW debug notification
+ * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
+ * @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
+ * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION: FW generic notification
* @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
- * @FW_DEBUG_TLV_TRIGGER_ID_HOST_DID_INITIATED_EVENT: undefined
* @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
* threshold was crossed
* @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed
@@ -257,50 +290,53 @@ struct iwl_fw_ini_trigger_tlv {
* @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
*/
enum iwl_fw_ini_trigger_id {
+ IWL_FW_TRIGGER_ID_INVALID = 0,
+
/* Errors triggers */
IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
- IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 2,
- IWL_FW_TRIGGER_ID_FW_HW_ERROR = 3,
- /* Generic triggers */
- IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR = 4,
- IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING = 5,
- IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO = 6,
- IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG = 7,
- /* User Trigger */
- IWL_FW_TRIGGER_ID_USER_TRIGGER = 8,
+ IWL_FW_TRIGGER_ID_FW_HW_ERROR = 2,
+ IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 3,
+
+ /* FW triggers */
+ IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
+ IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION = 5,
+
+ /* User trigger */
+ IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
+
/* Host triggers */
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 9,
- IWL_FW_TRIGGER_ID_HOST_DID_INITIATED_EVENT = 10,
- IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 11,
- IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 12,
- IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 13,
- IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 14,
- IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 15,
- IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 16,
- IWL_FW_TRIGGER_ID_HOST_SCAN_START = 17,
- IWL_FW_TRIGGER_ID_HOST_SCAN_SUBITTED = 18,
- IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 19,
- IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 20,
- IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 21,
- IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 22,
- IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 23,
- IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 24,
- IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 25,
- IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 26,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 27,
- IWL_FW_TRIGGER_ID_HOST_D3_START = 28,
- IWL_FW_TRIGGER_ID_HOST_D3_END = 29,
- IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 30,
- IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 31,
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 32,
- IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 33,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 34,
- IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 35,
- IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 36,
- IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 37,
- IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 38,
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 7,
+ IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 8,
+ IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 9,
+ IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 10,
+ IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 11,
+ IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 12,
+ IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 13,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_START = 14,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 15,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 16,
+ IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 17,
+ IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 18,
+ IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 19,
+ IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 20,
+ IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 21,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 22,
+ IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 23,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 24,
+ IWL_FW_TRIGGER_ID_HOST_D3_START = 25,
+ IWL_FW_TRIGGER_ID_HOST_D3_END = 26,
+ IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 27,
+ IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 28,
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 29,
+ IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 30,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 31,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 32,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 33,
+ IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 34,
+ IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 35,
+
IWL_FW_TRIGGER_ID_NUM,
-}; /* FW_INI_TRIGGER_ID_E_VER_1 */
+}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
/**
* enum iwl_fw_ini_apply_point
@@ -320,7 +356,7 @@ enum iwl_fw_ini_apply_point {
IWL_FW_INI_APPLY_MISSED_BEACONS,
IWL_FW_INI_APPLY_SCAN_COMPLETE,
IWL_FW_INI_APPLY_NUM,
-}; /* FW_INI_APPLY_POINT_E_VER_1 */
+}; /* FW_DEBUG_TLV_APPLY_POINT_E_VER_1 */
/**
* enum iwl_fw_ini_allocation_id
@@ -340,7 +376,7 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_SDFX,
IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
-}; /* FW_INI_ALLOCATION_ID_E_VER_1 */
+}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
/**
* enum iwl_fw_ini_buffer_location
@@ -349,10 +385,10 @@ enum iwl_fw_ini_allocation_id {
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
*/
enum iwl_fw_ini_buffer_location {
- IWL_FW_INI_LOCATION_SRAM_INVALID,
+ IWL_FW_INI_LOCATION_INVALID,
IWL_FW_INI_LOCATION_SRAM_PATH,
IWL_FW_INI_LOCATION_DRAM_PATH,
-}; /* FW_INI_BUFFER_LOCATION_E_VER_1 */
+}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
* enum iwl_fw_ini_debug_flow
@@ -364,7 +400,7 @@ enum iwl_fw_ini_debug_flow {
IWL_FW_INI_DEBUG_INVALID,
IWL_FW_INI_DEBUG_DBTR_FLOW,
IWL_FW_INI_DEBUG_TB2DTF_FLOW,
-}; /* FW_INI_DEBUG_FLOW_E_VER_1 */
+}; /* FW_DEBUG_TLV_FLOW_E_VER_1 */
/**
* enum iwl_fw_ini_region_type
@@ -396,6 +432,6 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_PAGING,
IWL_FW_INI_REGION_CSR,
IWL_FW_INI_REGION_NUM
-}; /* FW_INI_REGION_TYPE_E_VER_1*/
+}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index dc1fa377087a..988584973aba 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -335,29 +335,11 @@ struct iwl_dbg_mem_access_rsp {
__le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
-#define CONT_REC_COMMAND_SIZE 80
-#define ENABLE_CONT_RECORDING 0x15
-#define DISABLE_CONT_RECORDING 0x16
+#define LDBG_CFG_COMMAND_SIZE 80
#define BUFFER_ALLOCATION 0x27
#define START_DEBUG_RECORDING 0x29
#define STOP_DEBUG_RECORDING 0x2A
-/*
- * struct iwl_continuous_record_mode - recording mode
- */
-struct iwl_continuous_record_mode {
- __le16 enable_recording;
-} __packed;
-
-/*
- * struct iwl_continuous_record_cmd - enable/disable continuous recording
- */
-struct iwl_continuous_record_cmd {
- struct iwl_continuous_record_mode record_mode;
- u8 pad[CONT_REC_COMMAND_SIZE -
- sizeof(struct iwl_continuous_record_mode)];
-} __packed;
-
/* maximum fragments to be allocated per target of allocationId */
#define IWL_BUFFER_LOCATION_MAX_FRAGS 2
@@ -385,4 +367,17 @@ struct iwl_buffer_allocation_cmd {
struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */
+/**
+ * struct iwl_ldbg_config_cmd - LDBG config command
+ * @type: configuration type
+ * @pad: reserved space for type-dependent data
+ */
+struct iwl_ldbg_config_cmd {
+ __le32 type;
+ union {
+ u8 pad[LDBG_CFG_COMMAND_SIZE - sizeof(__le32)];
+ struct iwl_buffer_allocation_cmd buffer_allocation;
+ }; /* LDBG_CFG_BODY_API_U_VER_2 (partially) */
+} __packed; /* LDBG_CFG_CMD_API_S_VER_2 */
+
#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
new file mode 100644
index 000000000000..5dddb21c1c4d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -0,0 +1,878 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_location_h__
+#define __iwl_fw_api_location_h__
+
+/**
+ * enum iwl_location_subcmd_ids - location group command IDs
+ */
+enum iwl_location_subcmd_ids {
+ /**
+ * @TOF_RANGE_REQ_CMD: TOF ranging request,
+ * uses &struct iwl_tof_range_req_cmd
+ */
+ TOF_RANGE_REQ_CMD = 0x0,
+ /**
+ * @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd
+ */
+ TOF_CONFIG_CMD = 0x1,
+ /**
+ * @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses
+ * &struct iwl_tof_range_abort_cmd
+ */
+ TOF_RANGE_ABORT_CMD = 0x2,
+ /**
+ * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config,
+ * uses &struct iwl_tof_range_request_ext_cmd
+ */
+ TOF_RANGE_REQ_EXT_CMD = 0x3,
+ /**
+ * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration,
+ * uses &struct iwl_tof_responder_config_cmd
+ */
+ TOF_RESPONDER_CONFIG_CMD = 0x4,
+ /**
+ * @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration,
+ * uses &struct iwl_tof_responder_dyn_config_cmd
+ */
+ TOF_RESPONDER_DYN_CONFIG_CMD = 0x5,
+ /**
+ * @CSI_HEADER_NOTIFICATION: CSI header
+ */
+ CSI_HEADER_NOTIFICATION = 0xFA,
+ /**
+ * @CSI_CHUNKS_NOTIFICATION: CSI chunk,
+ * uses &struct iwl_csi_chunk_notification
+ */
+ CSI_CHUNKS_NOTIFICATION = 0xFB,
+ /**
+ * @TOF_LC_NOTIF: used for LCI/civic location, contains just
+ * the action frame
+ */
+ TOF_LC_NOTIF = 0xFC,
+ /**
+ * @TOF_RESPONDER_STATS: FTM responder statistics notification,
+ * uses &struct iwl_ftm_responder_stats
+ */
+ TOF_RESPONDER_STATS = 0xFD,
+ /**
+ * @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses
+ * &struct iwl_tof_mcsi_notif
+ */
+ TOF_MCSI_DEBUG_NOTIF = 0xFE,
+ /**
+ * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using
+ * &struct iwl_tof_range_rsp_ntfy
+ */
+ TOF_RANGE_RESPONSE_NOTIF = 0xFF,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: indicates if ToF is disabled (or not)
+ * @one_sided_disabled: indicates if one-sided is disabled (or not)
+ * @is_debug_mode: indiciates if debug mode is active
+ * @is_buf_required: indicates if channel estimation buffer is required
+ */
+struct iwl_tof_config_cmd {
+ u8 tof_disabled;
+ u8 one_sided_disabled;
+ u8 is_debug_mode;
+ u8 is_buf_required;
+} __packed;
+
+/**
+ * enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth
+ * @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT
+ * @IWL_TOF_BW_20_HT: 20 MHz HT
+ * @IWL_TOF_BW_40: 40 MHz
+ * @IWL_TOF_BW_80: 80 MHz
+ * @IWL_TOF_BW_160: 160 MHz
+ */
+enum iwl_tof_bandwidth {
+ IWL_TOF_BW_20_LEGACY,
+ IWL_TOF_BW_20_HT,
+ IWL_TOF_BW_40,
+ IWL_TOF_BW_80,
+ IWL_TOF_BW_160,
+}; /* LOCAT_BW_TYPE_E */
+
+/*
+ * enum iwl_tof_algo_type - Algorithym type for range measurement request
+ */
+enum iwl_tof_algo_type {
+ IWL_TOF_ALGO_TYPE_MAX_LIKE = 0,
+ IWL_TOF_ALGO_TYPE_LINEAR_REG = 1,
+ IWL_TOF_ALGO_TYPE_FFT = 2,
+
+ /* Keep last */
+ IWL_TOF_ALGO_TYPE_INVALID,
+}; /* ALGO_TYPE_E */
+
+/*
+ * enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications
+ */
+enum iwl_tof_mcsi_enable {
+ IWL_TOF_MCSI_DISABLED = 0,
+ IWL_TOF_MCSI_ENABLED = 1,
+}; /* MCSI_ENABLE_E */
+
+/**
+ * enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg
+ * @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is
+ * valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report
+ * support is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid
+ */
+enum iwl_tof_responder_cmd_valid_field {
+ IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0),
+ IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1),
+ IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2),
+ IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3),
+ IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4),
+ IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5),
+ IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6),
+ IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7),
+ IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8),
+ IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9),
+ IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10),
+ IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11),
+ IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12),
+};
+
+/**
+ * enum iwl_tof_responder_cfg_flags - responder configuration flags
+ * @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support
+ * @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics
+ * @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI
+ * @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type
+ * @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode
+ * @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode
+ * @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode
+ * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support
+ * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail
+ * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask
+ */
+enum iwl_tof_responder_cfg_flags {
+ IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0),
+ IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1),
+ IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2),
+ IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5),
+ IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6),
+ IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7),
+ IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8),
+ IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9),
+ IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10),
+ IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK,
+};
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @reserved2: reserved
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 bandwidth;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+
+#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
+
+/**
+ * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
+ * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
+ * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if
+ * needed, 0-padding such that the next part is dword-aligned, then CIVIC
+ * data (if exists) follows, and then 0-padding again to complete a
+ * 4-multiple long buffer.
+ */
+struct iwl_tof_responder_dyn_config_cmd {
+ __le32 lci_len;
+ __le32 civic_len;
+ u8 lci_civic[];
+} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @reserved: reserved
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ * in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ * value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ * value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ * value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+ __le16 tsf_timer_offset_msec;
+ __le16 reserved;
+ u8 min_delta_ftm;
+ u8 ftm_format_and_bw20M;
+ u8 ftm_format_and_bw40M;
+ u8 ftm_format_and_bw80M;
+} __packed;
+
+/**
+ * enum iwl_tof_location_query - values for query bitmap
+ * @IWL_TOF_LOC_LCI: query LCI
+ * @IWL_TOF_LOC_CIVIC: query civic
+ */
+enum iwl_tof_location_query {
+ IWL_TOF_LOC_LCI = 0x01,
+ IWL_TOF_LOC_CIVIC = 0x02,
+};
+
+ /**
+ * struct iwl_tof_range_req_ap_entry_v2 - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth.
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @bssid: AP's BSSID
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
+ * number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31);
+ * 1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ * in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ * The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR;
+ * Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ * 0: Initiator interact with regular AP;
+ * 1: Initiator interact with Responder machine: need to send the
+ * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ * use it for its ch est measurement (this flag will be set when we
+ * configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ * legal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ * @algo_type: &enum iwl_tof_algo_type
+ * @notify_mcsi: &enum iwl_tof_mcsi_ntfy.
+ * @reserved: For alignment and future use
+ */
+struct iwl_tof_range_req_ap_entry_v2 {
+ u8 channel_num;
+ u8 bandwidth;
+ u8 tsf_delta_direction;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ u8 measure_type;
+ u8 num_of_bursts;
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 retries_per_sample;
+ __le32 tsf_delta;
+ u8 location_req;
+ u8 asap_mode;
+ u8 enable_dyn_ack;
+ s8 rssi;
+ u8 algo_type;
+ u8 notify_mcsi;
+ __le16 reserved;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_2 */
+
+/**
+ * enum iwl_initiator_ap_flags - per responder FTM configuration flags
+ * @IWL_INITIATOR_AP_FLAGS_ASAP: Request for ASAP measurement.
+ * @IWL_INITIATOR_AP_FLAGS_LCI_REQUEST: Request for LCI information
+ * @IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST: Request for CIVIC information
+ * @IWL_INITIATOR_AP_FLAGS_DYN_ACK: Send HT/VHT ack for FTM frames. If not set,
+ * 20Mhz dup acks will be sent.
+ * @IWL_INITIATOR_AP_FLAGS_ALGO_LR: Use LR algo type for rtt calculation.
+ * Default algo type is ML.
+ * @IWL_INITIATOR_AP_FLAGS_ALGO_FFT: Use FFT algo type for rtt calculation.
+ * Default algo type is ML.
+ * @IWL_INITIATOR_AP_FLAGS_MCSI_REPORT: Send the MCSI for each FTM frame to the
+ * driver.
+ */
+enum iwl_initiator_ap_flags {
+ IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
+ IWL_INITIATOR_AP_FLAGS_LCI_REQUEST = BIT(2),
+ IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST = BIT(3),
+ IWL_INITIATOR_AP_FLAGS_DYN_ACK = BIT(4),
+ IWL_INITIATOR_AP_FLAGS_ALGO_LR = BIT(5),
+ IWL_INITIATOR_AP_FLAGS_ALGO_FFT = BIT(6),
+ IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = BIT(8),
+};
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @bandwidth: AP bandwidth. One of iwl_tof_bandwidth.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @reserved: For alignment and future use
+ * @tsf_delta: not in use
+ */
+struct iwl_tof_range_req_ap_entry {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 bandwidth;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ __le16 reserved;
+ __le32 tsf_delta;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
+ * possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon
+ * timeout expiration
+ * @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the
+ * earlier of: measurements completion / timeout
+ * expiration.
+ */
+enum iwl_tof_response_mode {
+ IWL_MVM_TOF_RESPONSE_ASAP,
+ IWL_MVM_TOF_RESPONSE_TIMEOUT,
+ IWL_MVM_TOF_RESPONSE_COMPLETE,
+};
+
+/**
+ * enum iwl_tof_initiator_flags
+ *
+ * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run
+ * the algo on ant A+B, instead of only one of them.
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM: use random mac address for FTM
+ * @IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB: use the specific calib value from
+ * the range request command
+ * @IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB: use the common calib value from the
+ * ragne request command
+ * @IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT: support non-asap measurements
+ */
+enum iwl_tof_initiator_flags {
+ IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6),
+ IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM = BIT(7),
+ IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB = BIT(15),
+ IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB = BIT(16),
+ IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20),
+}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
+
+#define IWL_MVM_TOF_MAX_APS 5
+#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5
+
+/**
+ * struct iwl_tof_range_req_cmd_v5 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @initiator: 0- NW initiated, 1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ * '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ * This is equivalent to the session time configured to the
+ * LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ * the range report will be uploaded as a batch when ready or
+ * when the session is done (successfully / partially).
+ * one of iwl_tof_response_mode.
+ * @reserved0: reserved
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ * '1' Use MAC Address randomization according to the below
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT)
+ * @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT)
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data
+ */
+struct iwl_tof_range_req_cmd_v5 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 initiator;
+ u8 one_sided_los_disable;
+ u8 req_timeout;
+ u8 report_policy;
+ u8 reserved0;
+ u8 num_of_ap;
+ u8 macaddr_random;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 ftm_rx_chains;
+ u8 ftm_tx_chains;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
+
+/*
+ * enum iwl_tof_range_request_status - status of the sent request
+ * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
+ * request
+ * @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the
+ * sent request will not be handled
+ */
+enum iwl_tof_range_request_status {
+ IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS,
+ IWL_TOF_RANGE_REQUEST_STATUS_BUSY,
+};
+
+/**
+ * enum iwl_tof_entry_status
+ *
+ * @IWL_TOF_ENTRY_SUCCESS: successful measurement.
+ * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure.
+ * @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request.
+ * @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request.
+ * @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet.
+ * @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no
+ * measurement was completed.
+ * @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch
+ * from the primary channel.
+ * @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM.
+ * @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown
+ * reason.
+ * @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid
+ * T1/T4.
+ * @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame
+ * structure.
+ * @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled.
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the
+ * initiator for some period, period supplied in @refusal_period.
+ * @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments.
+ * @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled.
+ * @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original
+ * parameters within the current session.
+ */
+enum iwl_tof_entry_status {
+ IWL_TOF_ENTRY_SUCCESS = 0,
+ IWL_TOF_ENTRY_GENERAL_FAILURE = 1,
+ IWL_TOF_ENTRY_NO_RESPONSE = 2,
+ IWL_TOF_ENTRY_REQUEST_REJECTED = 3,
+ IWL_TOF_ENTRY_NOT_SCHEDULED = 4,
+ IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5,
+ IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6,
+ IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7,
+ IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8,
+ IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9,
+ IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10,
+ IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11,
+ IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12,
+ IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13,
+ IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14,
+ IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15,
+}; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v3 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @reserved: reserved
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 reserved;
+ u8 refusal_period;
+ __le32 range;
+ __le32 range_variance;
+ __le32 timestamp;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
+
+/**
+ * enum iwl_tof_response_status - tof response status
+ *
+ * @IWL_TOF_RESPONSE_SUCCESS: successful range.
+ * @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration.
+ * partial result of ranges done so far is included in the response.
+ * @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command.
+ * @IWL_TOF_RESPONSE_FAILED: Measurement request command failed.
+ */
+enum iwl_tof_response_status {
+ IWL_TOF_RESPONSE_SUCCESS = 0,
+ IWL_TOF_RESPONSE_TIMEOUT = 1,
+ IWL_TOF_RESPONSE_ABORTED = 4,
+ IWL_TOF_RESPONSE_FAILED = 5,
+}; /* LOCATION_RNG_RSP_STATUS */
+
+/**
+ * struct iwl_tof_range_rsp_ntfy_v5 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session, one of
+ * &enum iwl_tof_response_status.
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v5 {
+ u8 request_id;
+ u8 request_status;
+ u8 last_in_batch;
+ u8 num_of_aps;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
+
+/**
+ * struct iwl_tof_range_rsp_ntfy - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @reserved: reserved
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+ u8 token;
+ u8 role;
+ __le16 reserved;
+ u8 initiator_bssid[ETH_ALEN];
+ u8 responder_bssid[ETH_ALEN];
+ u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ * @reserved: reserved
+ */
+struct iwl_tof_range_abort_cmd {
+ u8 request_id;
+ u8 reserved[3];
+} __packed;
+
+enum ftm_responder_stats_flags {
+ FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0),
+ FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1),
+ FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2),
+ FTM_RESP_STAT_TRIGGER_DUP = BIT(3),
+ FTM_RESP_STAT_DUP = BIT(4),
+ FTM_RESP_STAT_DUP_IN_WIN = BIT(5),
+ FTM_RESP_STAT_DUP_OUT_WIN = BIT(6),
+ FTM_RESP_STAT_SCHED_SUCCESS = BIT(7),
+ FTM_RESP_STAT_ASAP_REQ = BIT(8),
+ FTM_RESP_STAT_NON_ASAP_REQ = BIT(9),
+ FTM_RESP_STAT_ASAP_RESP = BIT(10),
+ FTM_RESP_STAT_NON_ASAP_RESP = BIT(11),
+ FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12),
+ FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13),
+ FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14),
+ FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15),
+ FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16),
+ FTM_RESP_STAT_FAIL_GC = BIT(17),
+ FTM_RESP_STAT_SUCCESS = BIT(18),
+ FTM_RESP_STAT_INTEL_IE = BIT(19),
+ FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20),
+ FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21),
+ FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22),
+ FTM_RESP_STAT_PROCESS_FAIL = BIT(23),
+ FTM_RESP_STAT_ACK = BIT(24),
+ FTM_RESP_STAT_NACK = BIT(25),
+ FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26),
+ FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27),
+ FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28),
+ FTM_RESP_STAT_INITIATOR_ADDED = BIT(29),
+ FTM_RESP_STAT_ERR_LIST_FULL = BIT(30),
+ FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31),
+}; /* RESP_IND_E */
+
+/**
+ * struct iwl_ftm_responder_stats - FTM responder statistics
+ * @addr: initiator address
+ * @success_ftm: number of successful ftm frames
+ * @ftm_per_burst: num of FTM frames that were received
+ * @flags: &enum ftm_responder_stats_flags
+ * @duration: actual duration of FTM
+ * @allocated_duration: time that was allocated for this FTM session
+ * @bw: FTM request bandwidth
+ * @rate: FTM request rate
+ * @reserved: for alingment and future use
+ */
+struct iwl_ftm_responder_stats {
+ u8 addr[ETH_ALEN];
+ u8 success_ftm;
+ u8 ftm_per_burst;
+ __le32 flags;
+ __le32 duration;
+ __le32 allocated_duration;
+ u8 bw;
+ u8 rate;
+ __le16 reserved;
+} __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */
+
+#define IWL_CSI_CHUNK_CTL_NUM_MASK 0x3
+#define IWL_CSI_CHUNK_CTL_IDX_MASK 0xc
+
+struct iwl_csi_chunk_notification {
+ __le32 token;
+ __le16 seq;
+ __le16 ctl;
+ __le32 size;
+ u8 data[];
+} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_location_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index ca49db786ed6..6b4d59daacd6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -74,6 +74,10 @@ enum iwl_mac_conf_subcmd_ids {
*/
LOW_LATENCY_CMD = 0x3,
/**
+ * @CHANNEL_SWITCH_TIME_EVENT_CMD: &struct iwl_chan_switch_te_cmd
+ */
+ CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4,
+ /**
* @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
*/
PROBE_RESPONSE_DATA_NOTIF = 0xFC,
@@ -136,6 +140,29 @@ struct iwl_channel_switch_noa_notif {
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
/**
+ * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
+ *
+ * @mac_id: MAC ID for channel switch
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @tsf: beacon tsf
+ * @cs_count: channel switch count from CSA/eCSA IE
+ * @cs_delayed_bcn_count: if set to N (!= 0) GO/AP can delay N beacon intervals
+ * at the new channel after the channel switch, otherwise (N == 0) expect
+ * beacon right after the channel switch.
+ * @cs_mode: 1 - quiet, 0 - otherwise
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_chan_switch_te_cmd {
+ __le32 mac_id;
+ __le32 action;
+ __le32 tsf;
+ u8 cs_count;
+ u8 cs_delayed_bcn_count;
+ u8 cs_mode;
+ u8 reserved;
+} __packed; /* MAC_CHANNEL_SWITCH_TIME_EVENT_S_VER_2 */
+
+/**
* struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode'
*
* @mac_id: MAC ID to whom to apply the low-latency configurations
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 7a3f7b7e6358..941c50477003 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -433,6 +433,28 @@ struct iwl_he_backoff_conf {
__le16 mu_time;
} __packed; /* AC_QOS_DOT11AX_API_S */
+/**
+ * enum iwl_he_pkt_ext_constellations - PPE constellation indices
+ * @IWL_HE_PKT_EXT_BPSK: BPSK
+ * @IWL_HE_PKT_EXT_QPSK: QPSK
+ * @IWL_HE_PKT_EXT_16QAM: 16-QAM
+ * @IWL_HE_PKT_EXT_64QAM: 64-QAM
+ * @IWL_HE_PKT_EXT_256QAM: 256-QAM
+ * @IWL_HE_PKT_EXT_1024QAM: 1024-QAM
+ * @IWL_HE_PKT_EXT_RESERVED: reserved value
+ * @IWL_HE_PKT_EXT_NONE: not defined
+ */
+enum iwl_he_pkt_ext_constellations {
+ IWL_HE_PKT_EXT_BPSK = 0,
+ IWL_HE_PKT_EXT_QPSK,
+ IWL_HE_PKT_EXT_16QAM,
+ IWL_HE_PKT_EXT_64QAM,
+ IWL_HE_PKT_EXT_256QAM,
+ IWL_HE_PKT_EXT_1024QAM,
+ IWL_HE_PKT_EXT_RESERVED,
+ IWL_HE_PKT_EXT_NONE,
+};
+
#define MAX_HE_SUPP_NSS 2
#define MAX_HE_CHANNEL_BW_INDX 4
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 45f61c6af14e..b833b80ea3d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -95,17 +97,36 @@
#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
/*
+ * struct iwl_fw_channel_info_v1 - channel information
+ *
* @band: PHY_BAND_*
* @channel: channel number
* @width: PHY_[VHT|LEGACY]_CHANNEL_*
* @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
*/
-struct iwl_fw_channel_info {
+struct iwl_fw_channel_info_v1 {
u8 band;
u8 channel;
u8 width;
u8 ctrl_pos;
-} __packed;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
+
+/*
+ * struct iwl_fw_channel_info - channel information
+ *
+ * @channel: channel number
+ * @band: PHY_BAND_*
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ * @reserved: for future use and alignment
+ */
+struct iwl_fw_channel_info {
+ __le32 channel;
+ u8 band;
+ u8 width;
+ u8 ctrl_pos;
+ u8 reserved;
+} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */
#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
@@ -134,6 +155,22 @@ struct iwl_fw_channel_info {
/* TODO: complete missing documentation */
/**
+ * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with
+ * various channel structures.
+ *
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwl_phy_context_cmd_tail {
+ __le32 txchain_info;
+ __le32 rxchain_info;
+ __le32 acquisition_data;
+ __le32 dsp_cfg_flags;
+} __packed;
+
+/**
* struct iwl_phy_context_cmd - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
* @id_and_color: ID and color of the relevant Binding
@@ -142,10 +179,7 @@ struct iwl_fw_channel_info {
* other value means apply new params after X usecs
* @tx_param_color: ???
* @ci: channel info
- * @txchain_info: ???
- * @rxchain_info: ???
- * @acquisition_data: ???
- * @dsp_cfg_flags: set to 0
+ * @tail: command tail
*/
struct iwl_phy_context_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@@ -155,10 +189,7 @@ struct iwl_phy_context_cmd {
__le32 apply_time;
__le32 tx_param_color;
struct iwl_fw_channel_info ci;
- __le32 txchain_info;
- __le32 rxchain_info;
- __le32 acquisition_data;
- __le32 dsp_cfg_flags;
+ struct iwl_phy_context_cmd_tail tail;
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 286a22da232d..01f003c6cff9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -200,9 +200,16 @@ struct iwl_powertable_cmd {
* @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
* '1' Allow to save power by turning off
* receiver and transmitter. '0' - does not allow.
+ * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK:
+ * Device Retention indication, '1' indicate retention is enabled.
+ * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK:
+ * 32Khz external slow clock valid indication, '1' indicate cloack is
+ * valid.
*/
enum iwl_device_power_flags {
- DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+ DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+ DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1),
+ DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12),
};
/**
@@ -470,6 +477,13 @@ struct iwl_geo_tx_power_profiles_resp {
* @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
* for a longer period of time then this escape-timeout. Units: Beacons.
* @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ * @bf_threshold_absolute_low: See below.
+ * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated
+ * for this beacon crossed this absolute threshold. For the 'Increase'
+ * direction the bf_energy_absolute_low[i] is used. For the 'Decrease'
+ * direction the bf_energy_absolute_high[i] is used. Zero value means
+ * that this specific threshold is ignored for beacon filtering, and
+ * beacon will not be forced to be sent to driver due to this setting.
*/
struct iwl_beacon_filter_cmd {
__le32 bf_energy_delta;
@@ -483,7 +497,9 @@ struct iwl_beacon_filter_cmd {
__le32 bf_escape_timer;
__le32 ba_escape_timer;
__le32 ba_enable_beacon_abort;
-} __packed;
+ __le32 bf_threshold_absolute_low[2];
+ __le32 bf_threshold_absolute_high[2];
+} __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */
/* Beacon filtering and beacon abort */
#define IWL_BF_ENERGY_DELTA_DEFAULT 5
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 0791a854fc8f..6e8224ce8906 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -209,8 +209,6 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask
* @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift
- * @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status
- * @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2
*/
enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
@@ -238,8 +236,6 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
- RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
- RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
};
/* 9000 series API */
@@ -337,6 +333,8 @@ enum iwl_rx_mpdu_phy_info {
IWL_RX_MPDU_PHY_AMPDU = BIT(5),
IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6),
IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7),
+ /* short preamble is only for CCK, for non-CCK overridden by this */
+ IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7),
IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8),
};
@@ -723,6 +721,9 @@ struct iwl_rx_mpdu_desc {
#define RX_NO_DATA_FRAME_TIME_POS 0
#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS)
+#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000
+#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000
+
/**
* struct iwl_rx_no_data - RX no data descriptor
* @info: 7:0 frame type, 15:8 RX error type
@@ -743,7 +744,7 @@ struct iwl_rx_no_data {
__le32 fr_time;
__le32 rate;
__le32 phy_info[2];
- __le32 rx_vec[3];
+ __le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 18741889ec30..890a939c463d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -434,7 +434,7 @@ struct iwl_periodic_scan_complete {
/* The maximum of either of these cannot exceed 8, because we use an
* 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
*/
-#define IWL_MVM_MAX_UMAC_SCANS 8
+#define IWL_MVM_MAX_UMAC_SCANS 4
#define IWL_MVM_MAX_LMAC_SCANS 1
enum scan_config_flags {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 53cb622aa9ab..318843138490 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,6 +30,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -363,14 +365,7 @@ struct mvm_statistics_general_v8 {
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
-struct mvm_statistics_general_cdb_v9 {
- struct mvm_statistics_general_common_v19 common;
- __le32 beacon_counter[NUM_MAC_INDEX_CDB];
- u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
- u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
-} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
-
-struct mvm_statistics_general_cdb {
+struct mvm_statistics_general {
struct mvm_statistics_general_common common;
__le32 beacon_counter[MAC_INDEX_AUX];
u8 beacon_average_energy[MAC_INDEX_AUX];
@@ -435,11 +430,11 @@ struct iwl_notif_statistics_v11 {
struct mvm_statistics_load_v1 load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
-struct iwl_notif_statistics_cdb {
+struct iwl_notif_statistics {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
- struct mvm_statistics_general_cdb general;
+ struct mvm_statistics_general general;
struct mvm_statistics_load load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index 7c6c2462d0e8..b089285ac466 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,6 +113,17 @@ struct iwl_tdls_channel_switch_frame {
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
/**
+ * struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd
+ *
+ * @timing: timing related data for command
+ * @frame: channel-switch request/response template, depending to switch_type
+ */
+struct iwl_tdls_channel_switch_cmd_tail {
+ struct iwl_tdls_channel_switch_timing timing;
+ struct iwl_tdls_channel_switch_frame frame;
+} __packed;
+
+/**
* struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
*
* The command is sent to initiate a channel switch and also in response to
@@ -119,15 +132,13 @@ struct iwl_tdls_channel_switch_frame {
* @switch_type: see &enum iwl_tdls_channel_switch_type
* @peer_sta_id: station id of TDLS peer
* @ci: channel we switch to
- * @timing: timing related data for command
- * @frame: channel-switch request/response template, depending to switch_type
+ * @tail: command tail
*/
struct iwl_tdls_channel_switch_cmd {
u8 switch_type;
__le32 peer_sta_id;
struct iwl_fw_channel_info ci;
- struct iwl_tdls_channel_switch_timing timing;
- struct iwl_tdls_channel_switch_frame frame;
+ struct iwl_tdls_channel_switch_cmd_tail tail;
} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index f824bebceb06..4621ef93a2cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -318,6 +320,25 @@ struct iwl_time_event_notif {
} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
/*
+ * struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req
+ *
+ * @node_addr: Our MAC Address
+ * @reserved: reserved for alignment
+ * @apply_time: GP2 value to start (should always be the current GP2 value)
+ * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
+ * time by which start of the event is allowed to be postponed.
+ * @duration: event duration in TU To calculate event duration:
+ * timeEventDuration = min(duration, remainingQuota)
+ */
+struct iwl_hs20_roc_req_tail {
+ u8 node_addr[ETH_ALEN];
+ __le16 reserved;
+ __le32 apply_time;
+ __le32 apply_time_max_delay;
+ __le32 duration;
+} __packed;
+
+/*
* Aux ROC command
*
* Command requests the firmware to create a time event for a certain duration
@@ -336,13 +357,6 @@ struct iwl_time_event_notif {
* @sta_id_and_color: station id and color, resumed during "Remain On Channel"
* activity.
* @channel_info: channel info
- * @node_addr: Our MAC Address
- * @reserved: reserved for alignment
- * @apply_time: GP2 value to start (should always be the current GP2 value)
- * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
- * time by which start of the event is allowed to be postponed.
- * @duration: event duration in TU To calculate event duration:
- * timeEventDuration = min(duration, remainingQuota)
*/
struct iwl_hs20_roc_req {
/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
@@ -351,11 +365,7 @@ struct iwl_hs20_roc_req {
__le32 event_unique_id;
__le32 sta_id_and_color;
struct iwl_fw_channel_info channel_info;
- u8 node_addr[ETH_ALEN];
- __le16 reserved;
- __le32 apply_time;
- __le32 apply_time_max_delay;
- __le32 duration;
+ struct iwl_hs20_roc_req_tail tail;
} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h
deleted file mode 100644
index 7328a1606146..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h
+++ /dev/null
@@ -1,393 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_fw_api_tof_h__
-#define __iwl_fw_api_tof_h__
-
-/* ToF sub-group command IDs */
-enum iwl_mvm_tof_sub_grp_ids {
- TOF_RANGE_REQ_CMD = 0x1,
- TOF_CONFIG_CMD = 0x2,
- TOF_RANGE_ABORT_CMD = 0x3,
- TOF_RANGE_REQ_EXT_CMD = 0x4,
- TOF_RESPONDER_CONFIG_CMD = 0x5,
- TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
- TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
- TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
- TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
- TOF_RANGE_RESPONSE_NOTIF = 0xFE,
- TOF_MCSI_DEBUG_NOTIF = 0xFB,
-};
-
-/**
- * struct iwl_tof_config_cmd - ToF configuration
- * @tof_disabled: 0 enabled, 1 - disabled
- * @one_sided_disabled: 0 enabled, 1 - disabled
- * @is_debug_mode: 1 debug mode, 0 - otherwise
- * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
- */
-struct iwl_tof_config_cmd {
- __le32 sub_grp_cmd_id;
- u8 tof_disabled;
- u8 one_sided_disabled;
- u8 is_debug_mode;
- u8 is_buf_required;
-} __packed;
-
-/**
- * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
- * @burst_period: future use: (currently hard coded in the LMAC)
- * The interval between two sequential bursts.
- * @min_delta_ftm: future use: (currently hard coded in the LMAC)
- * The minimum delay between two sequential FTM Responses
- * in the same burst.
- * @burst_duration: future use: (currently hard coded in the LMAC)
- * The total time for all FTMs handshake in the same burst.
- * Affect the time events duration in the LMAC.
- * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
- * The number of bursts for the current ToF request. Affect
- * the number of events allocations in the current iteration.
- * @get_ch_est: for xVT only, NA for driver
- * @abort_responder: when set to '1' - Responder will terminate its activity
- * (all other fields in the command are ignored)
- * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
- * params and use the recomended Initiator params.
- * 0 - otherwise
- * @channel_num: current AP Channel
- * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
- * @rate: current AP rate
- * @ctrl_ch_position: coding of the control channel position relative to
- * the center frequency:
- *
- * 40 MHz
- * 0 below center, 1 above center
- *
- * 80 MHz
- * bits [0..1]
- * * 0 the near 20MHz to the center,
- * * 1 the far 20MHz to the center
- * bit[2]
- * as above 40MHz
- * @ftm_per_burst: FTMs per Burst
- * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
- * '1' - we measure over the Initial FTM Response
- * @asap_mode: ASAP / Non ASAP mode for the current WLS station
- * @sta_id: index of the AP STA when in AP mode
- * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
- * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
- * purposes, simulating station movement by adding various values
- * to this field
- * @bssid: Current AP BSSID
- */
-struct iwl_tof_responder_config_cmd {
- __le32 sub_grp_cmd_id;
- __le16 burst_period;
- u8 min_delta_ftm;
- u8 burst_duration;
- u8 num_of_burst_exp;
- u8 get_ch_est;
- u8 abort_responder;
- u8 recv_sta_req_params;
- u8 channel_num;
- u8 bandwidth;
- u8 rate;
- u8 ctrl_ch_position;
- u8 ftm_per_burst;
- u8 ftm_resp_ts_avail;
- u8 asap_mode;
- u8 sta_id;
- __le16 tsf_timer_offset_msecs;
- __le16 toa_offset;
- u8 bssid[ETH_ALEN];
-} __packed;
-
-/**
- * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
- * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
- * @reserved: reserved
- * @min_delta_ftm: Minimal time between two consecutive measurements,
- * in units of 100us. 0 means no preference by station
- * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
- * value be sent to the AP
- * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
- * value to be sent to the AP
- * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
- * value to be sent to the AP
- */
-struct iwl_tof_range_req_ext_cmd {
- __le32 sub_grp_cmd_id;
- __le16 tsf_timer_offset_msec;
- __le16 reserved;
- u8 min_delta_ftm;
- u8 ftm_format_and_bw20M;
- u8 ftm_format_and_bw40M;
- u8 ftm_format_and_bw80M;
-} __packed;
-
-#define IWL_MVM_TOF_MAX_APS 21
-
-/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
- * @channel_num: Current AP Channel
- * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
- * @tsf_delta_direction: TSF relatively to the subject AP
- * @ctrl_ch_position: Coding of the control channel position relative to the
- * center frequency.
- * 40MHz 0 below center, 1 above center
- * 80MHz bits [0..1]: 0 the near 20MHz to the center,
- * 1 the far 20MHz to the center
- * bit[2] as above 40MHz
- * @bssid: AP's bss id
- * @measure_type: Measurement type: 0 - two sided, 1 - One sided
- * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
- * number of measurement iterations (min 2^0 = 1, max 2^14)
- * @burst_period: Recommended value to be sent to the AP. Measurement
- * periodicity In units of 100ms. ignored if num_of_bursts = 0
- * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
- * 1-sided: how many rts/cts pairs should be used per burst.
- * @retries_per_sample: Max number of retries that the LMAC should send
- * in case of no replies by the AP.
- * @tsf_delta: TSF Delta in units of microseconds.
- * The difference between the AP TSF and the device local clock.
- * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
- * Bit[1] Civic should be sent in the FTMR
- * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
- * @enable_dyn_ack: Enable Dynamic ACK BW.
- * 0 Initiator interact with regular AP
- * 1 Initiator interact with Responder machine: need to send the
- * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
- * use it for its ch est measurement (this flag will be set when we
- * configure the opposite machine to be Responder).
- * @rssi: Last received value
- * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
- */
-struct iwl_tof_range_req_ap_entry {
- u8 channel_num;
- u8 bandwidth;
- u8 tsf_delta_direction;
- u8 ctrl_ch_position;
- u8 bssid[ETH_ALEN];
- u8 measure_type;
- u8 num_of_bursts;
- __le16 burst_period;
- u8 samples_per_burst;
- u8 retries_per_sample;
- __le32 tsf_delta;
- u8 location_req;
- u8 asap_mode;
- u8 enable_dyn_ack;
- s8 rssi;
-} __packed;
-
-/**
- * enum iwl_tof_response_mode
- * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
- * possible (not supported for this release)
- * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
- * timeout expiration
- * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
- * earlier of: measurements completion / timeout
- * expiration.
- */
-enum iwl_tof_response_mode {
- IWL_MVM_TOF_RESPOSE_ASAP = 1,
- IWL_MVM_TOF_RESPOSE_TIMEOUT,
- IWL_MVM_TOF_RESPOSE_COMPLETE,
-};
-
-/**
- * struct iwl_tof_range_req_cmd - start measurement cmd
- * @request_id: A Token incremented per request. The same Token will be
- * sent back in the range response
- * @initiator: 0- NW initiated, 1 - Client Initiated
- * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
- * '1' - run ML-Algo for ToF only
- * @req_timeout: Requested timeout of the response in units of 100ms.
- * This is equivalent to the session time configured to the
- * LMAC in Initiator Request
- * @report_policy: Supported partially for this release: For current release -
- * the range report will be uploaded as a batch when ready or
- * when the session is done (successfully / partially).
- * one of iwl_tof_response_mode.
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
- * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
- * '1' Use MAC Address randomization according to the below
- * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
- * Bits set to 1 shall be randomized by the UMAC
- * @ap: per-AP request data
- */
-struct iwl_tof_range_req_cmd {
- __le32 sub_grp_cmd_id;
- u8 request_id;
- u8 initiator;
- u8 one_sided_los_disable;
- u8 req_timeout;
- u8 report_policy;
- u8 los_det_disable;
- u8 num_of_ap;
- u8 macaddr_random;
- u8 macaddr_template[ETH_ALEN];
- u8 macaddr_mask[ETH_ALEN];
- struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
-} __packed;
-
-/**
- * struct iwl_tof_gen_resp_cmd - generic ToF response
- */
-struct iwl_tof_gen_resp_cmd {
- __le32 sub_grp_cmd_id;
- u8 data[];
-} __packed;
-
-/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
- * @bssid: BSSID of the AP
- * @measure_status: current APs measurement status, one of
- * &enum iwl_tof_entry_status.
- * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
- * @rtt: The Round Trip Time that took for the last measurement for
- * current AP [nSec]
- * @rtt_variance: The Variance of the RTT values measured for current AP
- * @rtt_spread: The Difference between the maximum and the minimum RTT
- * values measured for current AP in the current session [nsec]
- * @rssi: RSSI as uploaded in the Channel Estimation notification
- * @rssi_spread: The Difference between the maximum and the minimum RSSI values
- * measured for current AP in the current session
- * @reserved: reserved
- * @range: Measured range [cm]
- * @range_variance: Measured range variance [cm]
- * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
- * uploaded by the LMAC
- */
-struct iwl_tof_range_rsp_ap_entry_ntfy {
- u8 bssid[ETH_ALEN];
- u8 measure_status;
- u8 measure_bw;
- __le32 rtt;
- __le32 rtt_variance;
- __le32 rtt_spread;
- s8 rssi;
- u8 rssi_spread;
- __le16 reserved;
- __le32 range;
- __le32 range_variance;
- __le32 timestamp;
-} __packed;
-
-/**
- * struct iwl_tof_range_rsp_ntfy -
- * @request_id: A Token ID of the corresponding Range request
- * @request_status: status of current measurement session
- * @last_in_batch: reprot policy (when not all responses are uploaded at once)
- * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
- * @ap: per-AP data
- */
-struct iwl_tof_range_rsp_ntfy {
- u8 request_id;
- u8 request_status;
- u8 last_in_batch;
- u8 num_of_aps;
- struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
-} __packed;
-
-#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
-/**
- * struct iwl_tof_mcsi_notif - used for debug
- * @token: token ID for the current session
- * @role: '0' - initiator, '1' - responder
- * @reserved: reserved
- * @initiator_bssid: initiator machine
- * @responder_bssid: responder machine
- * @mcsi_buffer: debug data
- */
-struct iwl_tof_mcsi_notif {
- u8 token;
- u8 role;
- __le16 reserved;
- u8 initiator_bssid[ETH_ALEN];
- u8 responder_bssid[ETH_ALEN];
- u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
-} __packed;
-
-/**
- * struct iwl_tof_neighbor_report_notif
- * @bssid: BSSID of the AP which sent the report
- * @request_token: same token as the corresponding request
- * @status:
- * @report_ie_len: the length of the response frame starting from the Element ID
- * @data: the IEs
- */
-struct iwl_tof_neighbor_report {
- u8 bssid[ETH_ALEN];
- u8 request_token;
- u8 status;
- __le16 report_ie_len;
- u8 data[];
-} __packed;
-
-/**
- * struct iwl_tof_range_abort_cmd
- * @request_id: corresponds to a range request
- * @reserved: reserved
- */
-struct iwl_tof_range_abort_cmd {
- __le32 sub_grp_cmd_id;
- u8 request_id;
- u8 reserved[3];
-} __packed;
-
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 358bdf051e83..8511e735c374 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -847,13 +847,13 @@ struct iwl_beacon_notif {
} __packed;
/**
- * struct iwl_extended_beacon_notif - notifies about beacon transmission
+ * struct iwl_extended_beacon_notif_v5 - notifies about beacon transmission
* @beacon_notify_hdr: tx response command associated with the beacon
* @tsf: last beacon tsf
* @ibss_mgr_status: whether IBSS is manager
* @gp2: last beacon time in gp2
*/
-struct iwl_extended_beacon_notif {
+struct iwl_extended_beacon_notif_v5 {
struct iwl_mvm_tx_resp beacon_notify_hdr;
__le64 tsf;
__le32 ibss_mgr_status;
@@ -861,6 +861,20 @@ struct iwl_extended_beacon_notif {
} __packed; /* BEACON_NTFY_API_S_VER_5 */
/**
+ * struct iwl_extended_beacon_notif - notifies about beacon transmission
+ * @status: the status of the Tx response of the beacon
+ * @tsf: last beacon tsf
+ * @ibss_mgr_status: whether IBSS is manager
+ * @gp2: last beacon time in gp2
+ */
+struct iwl_extended_beacon_notif {
+ __le32 status;
+ __le64 tsf;
+ __le32 ibss_mgr_status;
+ __le32 gp2;
+} __packed; /* BEACON_NTFY_API_S_VER_6_ */
+
+/**
* enum iwl_dump_control - dump (flush) control flags
* @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
* and the TFD queues are empty.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 2a19b178c5e8..f119c49cd39c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -8,7 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -242,7 +242,8 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
cfg->lmac[0].rxfifo1_size, 0, 0);
/* Pull RXF2 */
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
- RXF_DIFF_FROM_PREV, 1);
+ RXF_DIFF_FROM_PREV +
+ fwrt->trans->cfg->umac_prph_offset, 1);
/* Pull LMAC2 RXF1 */
if (fwrt->smem_cfg.num_lmacs > 1)
iwl_fwrt_dump_rxf(fwrt, dump_data,
@@ -469,6 +470,93 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a02400, .end = 0x00a02758 },
};
+static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
+ { .start = 0x00a00000, .end = 0x00a00000 },
+ { .start = 0x00a0000c, .end = 0x00a00024 },
+ { .start = 0x00a0002c, .end = 0x00a00034 },
+ { .start = 0x00a0003c, .end = 0x00a0003c },
+ { .start = 0x00a00410, .end = 0x00a00418 },
+ { .start = 0x00a00420, .end = 0x00a00420 },
+ { .start = 0x00a00428, .end = 0x00a00428 },
+ { .start = 0x00a00430, .end = 0x00a0043c },
+ { .start = 0x00a00444, .end = 0x00a00444 },
+ { .start = 0x00a00840, .end = 0x00a00840 },
+ { .start = 0x00a00850, .end = 0x00a00858 },
+ { .start = 0x00a01004, .end = 0x00a01008 },
+ { .start = 0x00a01010, .end = 0x00a01010 },
+ { .start = 0x00a01018, .end = 0x00a01018 },
+ { .start = 0x00a01024, .end = 0x00a01024 },
+ { .start = 0x00a0102c, .end = 0x00a01034 },
+ { .start = 0x00a0103c, .end = 0x00a01040 },
+ { .start = 0x00a01048, .end = 0x00a01050 },
+ { .start = 0x00a01058, .end = 0x00a01058 },
+ { .start = 0x00a01060, .end = 0x00a01070 },
+ { .start = 0x00a0108c, .end = 0x00a0108c },
+ { .start = 0x00a01c20, .end = 0x00a01c28 },
+ { .start = 0x00a01d10, .end = 0x00a01d10 },
+ { .start = 0x00a01e28, .end = 0x00a01e2c },
+ { .start = 0x00a01e60, .end = 0x00a01e60 },
+ { .start = 0x00a01e80, .end = 0x00a01e80 },
+ { .start = 0x00a01ea0, .end = 0x00a01ea0 },
+ { .start = 0x00a02000, .end = 0x00a0201c },
+ { .start = 0x00a02024, .end = 0x00a02024 },
+ { .start = 0x00a02040, .end = 0x00a02048 },
+ { .start = 0x00a020c0, .end = 0x00a020e0 },
+ { .start = 0x00a02400, .end = 0x00a02404 },
+ { .start = 0x00a0240c, .end = 0x00a02414 },
+ { .start = 0x00a0241c, .end = 0x00a0243c },
+ { .start = 0x00a02448, .end = 0x00a024bc },
+ { .start = 0x00a024c4, .end = 0x00a024cc },
+ { .start = 0x00a02508, .end = 0x00a02508 },
+ { .start = 0x00a02510, .end = 0x00a02514 },
+ { .start = 0x00a0251c, .end = 0x00a0251c },
+ { .start = 0x00a0252c, .end = 0x00a0255c },
+ { .start = 0x00a02564, .end = 0x00a025a0 },
+ { .start = 0x00a025a8, .end = 0x00a025b4 },
+ { .start = 0x00a025c0, .end = 0x00a025c0 },
+ { .start = 0x00a025e8, .end = 0x00a025f4 },
+ { .start = 0x00a02c08, .end = 0x00a02c18 },
+ { .start = 0x00a02c2c, .end = 0x00a02c38 },
+ { .start = 0x00a02c68, .end = 0x00a02c78 },
+ { .start = 0x00a03000, .end = 0x00a03000 },
+ { .start = 0x00a03010, .end = 0x00a03014 },
+ { .start = 0x00a0301c, .end = 0x00a0302c },
+ { .start = 0x00a03034, .end = 0x00a03038 },
+ { .start = 0x00a03040, .end = 0x00a03044 },
+ { .start = 0x00a03060, .end = 0x00a03068 },
+ { .start = 0x00a03070, .end = 0x00a03070 },
+ { .start = 0x00a0307c, .end = 0x00a03084 },
+ { .start = 0x00a0308c, .end = 0x00a03090 },
+ { .start = 0x00a03098, .end = 0x00a03098 },
+ { .start = 0x00a030a0, .end = 0x00a030a0 },
+ { .start = 0x00a030a8, .end = 0x00a030b4 },
+ { .start = 0x00a030bc, .end = 0x00a030c0 },
+ { .start = 0x00a030c8, .end = 0x00a030f4 },
+ { .start = 0x00a03100, .end = 0x00a0312c },
+ { .start = 0x00a03c00, .end = 0x00a03c5c },
+ { .start = 0x00a04400, .end = 0x00a04454 },
+ { .start = 0x00a04460, .end = 0x00a04474 },
+ { .start = 0x00a044c0, .end = 0x00a044ec },
+ { .start = 0x00a04500, .end = 0x00a04504 },
+ { .start = 0x00a04510, .end = 0x00a04538 },
+ { .start = 0x00a04540, .end = 0x00a04548 },
+ { .start = 0x00a04560, .end = 0x00a04560 },
+ { .start = 0x00a04570, .end = 0x00a0457c },
+ { .start = 0x00a04590, .end = 0x00a04590 },
+ { .start = 0x00a04598, .end = 0x00a04598 },
+ { .start = 0x00a045c0, .end = 0x00a045f4 },
+ { .start = 0x00a0c000, .end = 0x00a0c018 },
+ { .start = 0x00a0c020, .end = 0x00a0c028 },
+ { .start = 0x00a0c038, .end = 0x00a0c094 },
+ { .start = 0x00a0c0c0, .end = 0x00a0c104 },
+ { .start = 0x00a0c10c, .end = 0x00a0c118 },
+ { .start = 0x00a0c150, .end = 0x00a0c174 },
+ { .start = 0x00a0c17c, .end = 0x00a0c188 },
+ { .start = 0x00a0c190, .end = 0x00a0c198 },
+ { .start = 0x00a0c1a0, .end = 0x00a0c1a8 },
+ { .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
+};
+
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
u32 len_bytes, __le32 *data)
{
@@ -478,15 +566,20 @@ static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
}
-static void iwl_dump_prph(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data,
+static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
const struct iwl_prph_range *iwl_prph_dump_addr,
- u32 range_len)
+ u32 range_len, void *ptr)
{
struct iwl_fw_error_dump_prph *prph;
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_fw_error_dump_data **data =
+ (struct iwl_fw_error_dump_data **)ptr;
unsigned long flags;
u32 i;
+ if (!data)
+ return;
+
IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
if (!iwl_trans_grab_nic_access(trans, &flags))
@@ -552,37 +645,49 @@ static struct scatterlist *alloc_sgtable(int size)
return table;
}
-static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
+static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
+ const struct iwl_prph_range *iwl_prph_dump_addr,
+ u32 range_len, void *ptr)
{
- u32 prph_len = 0;
- int i;
+ u32 *prph_len = (u32 *)ptr;
+ int i, num_bytes_in_chunk;
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
- i++) {
+ if (!prph_len)
+ return;
+
+ for (i = 0; i < range_len; i++) {
/* The range includes both boundaries */
- int num_bytes_in_chunk =
- iwl_prph_dump_addr_comm[i].end -
- iwl_prph_dump_addr_comm[i].start + 4;
+ num_bytes_in_chunk =
+ iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
- prph_len += sizeof(struct iwl_fw_error_dump_data) +
+ *prph_len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
}
+}
+
+static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
+ void (*handler)(struct iwl_fw_runtime *,
+ const struct iwl_prph_range *,
+ u32, void *))
+{
+ u32 range_len;
- if (fwrt->trans->cfg->mq_rx_supported) {
- for (i = 0; i <
- ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
- /* The range includes both boundaries */
- int num_bytes_in_chunk =
- iwl_prph_dump_addr_9000[i].end -
- iwl_prph_dump_addr_9000[i].start + 4;
-
- prph_len += sizeof(struct iwl_fw_error_dump_data) +
- sizeof(struct iwl_fw_error_dump_prph) +
- num_bytes_in_chunk;
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* TODO */
+ } else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
+ handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
+ } else {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
+ handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
+
+ if (fwrt->trans->cfg->mq_rx_supported) {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
+ handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
}
}
- return prph_len;
}
static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
@@ -605,28 +710,6 @@ static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
}
-static void iwl_fw_dump_named_mem(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_error_dump_data **dump_data,
- u32 len, u32 ofs, u8 *name, u8 name_len)
-{
- struct iwl_fw_error_dump_named_mem *dump_mem;
-
- if (!len)
- return;
-
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
- dump_mem = (void *)(*dump_data)->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_NAMED_MEM);
- dump_mem->offset = cpu_to_le32(ofs);
- dump_mem->name_len = name_len;
- memcpy(dump_mem->name, name, name_len);
- iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
- *dump_data = iwl_fw_error_next_data(*dump_data);
-
- IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
-}
-
#define ADD_LEN(len, item_len, const_len) \
do {size_t item = item_len; len += (!!item) * const_len + item; } \
while (0)
@@ -646,6 +729,9 @@ static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
/* Count RXF1 sizes */
+ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
+ mem_cfg->num_lmacs = MAX_NUM_LMAC;
+
for (i = 0; i < mem_cfg->num_lmacs; i++)
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
@@ -664,6 +750,9 @@ static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
goto dump_internal_txf;
/* Count TXF sizes */
+ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
+ mem_cfg->num_lmacs = MAX_NUM_LMAC;
+
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
@@ -707,6 +796,9 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
DMA_BIDIRECTIONAL);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
+ dma_sync_single_for_device(fwrt->trans->dev, addr,
+ PAGING_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
(*data) = iwl_fw_error_next_data(*data);
}
}
@@ -733,6 +825,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
+ if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
+ return NULL;
img = &fwrt->fw->img[fwrt->cur_fw_img];
sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
@@ -747,9 +841,9 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
/* Make room for PRPH registers */
- if (!fwrt->trans->cfg->gen2 &&
- iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
- prph_len += iwl_fw_get_prph_len(fwrt);
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
+ iwl_fw_prph_handler(fwrt, &prph_len,
+ iwl_fw_get_prph_len);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
@@ -828,7 +922,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
- dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status);
+ dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
+ dump_info->lmac_err_id[0] =
+ cpu_to_le32(fwrt->dump.lmac_err_id[0]);
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ dump_info->lmac_err_id[1] =
+ cpu_to_le32(fwrt->dump.lmac_err_id[1]);
+ dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
dump_data = iwl_fw_error_next_data(dump_data);
}
@@ -935,133 +1035,653 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (iwl_fw_dbg_is_paging_enabled(fwrt))
iwl_dump_paging(fwrt, &dump_data);
- if (prph_len) {
- iwl_dump_prph(fwrt->trans, &dump_data,
- iwl_prph_dump_addr_comm,
- ARRAY_SIZE(iwl_prph_dump_addr_comm));
-
- if (fwrt->trans->cfg->mq_rx_supported)
- iwl_dump_prph(fwrt->trans, &dump_data,
- iwl_prph_dump_addr_9000,
- ARRAY_SIZE(iwl_prph_dump_addr_9000));
- }
+ if (prph_len)
+ iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph);
out:
dump_file->file_len = cpu_to_le32(file_len);
return dump_file;
}
-static void iwl_dump_prph_ini(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data,
- struct iwl_fw_ini_region_cfg *reg)
+static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *range_ptr, int idx)
{
- struct iwl_fw_error_dump_prph *prph;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 addr, prph_val, offset = le32_to_cpu(reg->offset);
+ int i;
+
+ range->start_addr = reg->start_addr[idx];
+ range->range_data_size = reg->internal.range_data_size;
+ for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
+ addr = le32_to_cpu(range->start_addr) + i;
+ prph_val = iwl_read_prph(fwrt->trans, addr + offset);
+ if (prph_val == 0x5a5a5a5a)
+ return -EBUSY;
+ *val++ = cpu_to_le32(prph_val);
+ }
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 addr, offset = le32_to_cpu(reg->offset);
+ int i;
+
+ range->start_addr = reg->start_addr[idx];
+ range->range_data_size = reg->internal.range_data_size;
+ for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
+ addr = le32_to_cpu(range->start_addr) + i;
+ *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans,
+ addr + offset));
+ }
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(range->start_addr);
+ u32 offset = le32_to_cpu(reg->offset);
+
+ range->start_addr = reg->start_addr[idx];
+ range->range_data_size = reg->internal.range_data_size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr + offset, range->data,
+ le32_to_cpu(reg->internal.range_data_size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int
+iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 page_size = fwrt->trans->init_dram.paging[idx].size;
+
+ range->start_addr = cpu_to_le32(idx);
+ range->range_data_size = cpu_to_le32(page_size);
+ memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
+ page_size);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *range_ptr, int idx)
+{
+ /* increase idx by 1 since the pages are from 1 to
+ * fwrt->num_of_paging_blk + 1
+ */
+ struct page *page = fwrt->fw_paging_db[++idx].fw_paging_block;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
+ u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
+
+ range->start_addr = cpu_to_le32(idx);
+ range->range_data_size = cpu_to_le32(page_size);
+ dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size,
+ DMA_BIDIRECTIONAL);
+ memcpy(range->data, page_address(page), page_size);
+ dma_sync_single_for_device(fwrt->trans->dev, addr, page_size,
+ DMA_BIDIRECTIONAL);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static int
+iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg, void *range_ptr,
+ int idx)
+{
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 start_addr = iwl_read_umac_prph(fwrt->trans,
+ MON_BUFF_BASE_ADDR_VER2);
+
+ if (start_addr == 0x5a5a5a5a)
+ return -EBUSY;
+
+ range->start_addr = cpu_to_le32(start_addr);
+ range->range_data_size = cpu_to_le32(fwrt->trans->fw_mon[idx].size);
+
+ memcpy(range->data, fwrt->trans->fw_mon[idx].block,
+ fwrt->trans->fw_mon[idx].size);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+struct iwl_ini_txf_iter_data {
+ int fifo;
+ int lmac;
+ u32 fifo_size;
+ bool internal_txf;
+ bool init;
+};
+
+static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ struct iwl_ini_txf_iter_data *iter = fwrt->dump.fifo_iter;
+ struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
+ int txf_num = cfg->num_txfifo_entries;
+ int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size);
+ u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid1);
+
+ if (!iter)
+ return false;
+
+ if (iter->init) {
+ if (le32_to_cpu(reg->offset) &&
+ WARN_ONCE(cfg->num_lmacs == 1,
+ "Invalid lmac offset: 0x%x\n",
+ le32_to_cpu(reg->offset)))
+ return false;
+
+ iter->init = false;
+ iter->internal_txf = false;
+ iter->fifo_size = 0;
+ iter->fifo = -1;
+ if (le32_to_cpu(reg->offset))
+ iter->lmac = 1;
+ else
+ iter->lmac = 0;
+ }
+
+ if (!iter->internal_txf)
+ for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
+ iter->fifo_size =
+ cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
+ if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
+ return true;
+ }
+
+ iter->internal_txf = true;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ return false;
+
+ for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) {
+ iter->fifo_size =
+ cfg->internal_txfifo_size[iter->fifo - txf_num];
+ if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
+ return true;
+ }
+
+ return false;
+}
+
+static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
+ struct iwl_ini_txf_iter_data *iter;
+ u32 offs = le32_to_cpu(reg->offset), addr;
+ u32 registers_size =
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+ __le32 *val = range->data;
unsigned long flags;
- u32 i, size = le32_to_cpu(reg->num_regions);
+ int i;
- IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
+ if (!iwl_ini_txf_iter(fwrt, reg))
+ return -EIO;
- if (!iwl_trans_grab_nic_access(trans, &flags))
+ if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ return -EBUSY;
+
+ iter = fwrt->dump.fifo_iter;
+
+ range->fifo_num = cpu_to_le32(iter->fifo);
+ range->num_of_registers = reg->fifos.num_of_registers;
+ range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
+
+ iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
+
+ /* read txf registers */
+ for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
+ addr = le32_to_cpu(reg->start_addr[i]) + offs;
+
+ *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ }
+
+ if (reg->fifos.header_only) {
+ range->range_data_size = cpu_to_le32(registers_size);
+ goto out;
+ }
+
+ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+ iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs,
+ TXF_WR_PTR + offs);
+
+ /* Dummy-read to advance the read pointer to the head */
+ iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs);
+
+ /* Read FIFO */
+ addr = TXF_READ_MODIFY_DATA + offs;
+ for (i = 0; i < iter->fifo_size; i += sizeof(__le32))
+ *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+
+out:
+ iwl_trans_release_nic_access(fwrt->trans, &flags);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+struct iwl_ini_rxf_data {
+ u32 fifo_num;
+ u32 size;
+ u32 offset;
+};
+
+static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_ini_rxf_data *data)
+{
+ u32 fid1 = le32_to_cpu(reg->fifos.fid1);
+ u32 fid2 = le32_to_cpu(reg->fifos.fid2);
+ u32 fifo_idx;
+
+ if (!data)
return;
- for (i = 0; i < size; i++) {
- (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
- (*data)->len = cpu_to_le32(le32_to_cpu(reg->size) +
- sizeof(*prph));
- prph = (void *)(*data)->data;
- prph->prph_start = reg->start_addr[i];
- prph->data[0] = cpu_to_le32(iwl_read_prph_no_grab(trans,
- le32_to_cpu(prph->prph_start)));
- *data = iwl_fw_error_next_data(*data);
+ memset(data, 0, sizeof(*data));
+
+ if (WARN_ON_ONCE((fid1 && fid2) || (!fid1 && !fid2)))
+ return;
+
+ fifo_idx = ffs(fid1) - 1;
+ if (fid1 && !WARN_ON_ONCE((~BIT(fifo_idx) & fid1) ||
+ fifo_idx >= MAX_NUM_LMAC)) {
+ data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size;
+ data->fifo_num = fifo_idx;
+ return;
+ }
+
+ fifo_idx = ffs(fid2) - 1;
+ if (fid2 && !WARN_ON_ONCE(fifo_idx != 0)) {
+ data->size = fwrt->smem_cfg.rxfifo2_size;
+ data->offset = RXF_DIFF_FROM_PREV;
+ /* use bit 31 to distinguish between umac and lmac rxf while
+ * parsing the dump
+ */
+ data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT;
+ return;
}
- iwl_trans_release_nic_access(trans, &flags);
}
-static void iwl_dump_csr_ini(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data,
- struct iwl_fw_ini_region_cfg *reg)
+static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *range_ptr, int idx)
{
- int i, num = le32_to_cpu(reg->num_regions);
- u32 size = le32_to_cpu(reg->size);
+ struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
+ struct iwl_ini_rxf_data rxf_data;
+ u32 offs = le32_to_cpu(reg->offset), addr;
+ u32 registers_size =
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+ __le32 *val = range->data;
+ unsigned long flags;
+ int i;
- IWL_DEBUG_INFO(trans, "WRT CSR dump\n");
+ iwl_ini_get_rxf_data(fwrt, reg, &rxf_data);
+ if (!rxf_data.size)
+ return -EIO;
- for (i = 0; i < num; i++) {
- u32 add = le32_to_cpu(reg->start_addr[i]);
- __le32 *val;
- int j;
+ if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ return -EBUSY;
- (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
- (*data)->len = cpu_to_le32(size);
- val = (void *)(*data)->data;
+ offs += rxf_data.offset;
- for (j = 0; j < size; j += 4)
- *val++ = cpu_to_le32(iwl_trans_read32(trans, j + add));
+ range->fifo_num = cpu_to_le32(rxf_data.fifo_num);
+ range->num_of_registers = reg->fifos.num_of_registers;
+ range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
- *data = iwl_fw_error_next_data(*data);
+ /* read rxf registers */
+ for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
+ addr = le32_to_cpu(reg->start_addr[i]) + offs;
+
+ *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ }
+
+ if (reg->fifos.header_only) {
+ range->range_data_size = cpu_to_le32(registers_size);
+ goto out;
+ }
+
+ /* Lock fence */
+ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
+ /* Set fence pointer to the same place like WR pointer */
+ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1);
+ /* Set fence offset */
+ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs,
+ 0x0);
+
+ /* Read FIFO */
+ addr = RXF_FIFO_RD_FENCE_INC + offs;
+ for (i = 0; i < rxf_data.size; i += sizeof(__le32))
+ *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+
+out:
+ iwl_trans_release_nic_access(fwrt->trans, &flags);
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
+static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *data)
+{
+ struct iwl_fw_ini_error_dump *dump = data;
+
+ return dump->ranges;
+}
+
+static void
+*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *data)
+{
+ struct iwl_fw_ini_monitor_dram_dump *mon_dump = (void *)data;
+ u32 write_ptr, cycle_cnt;
+ unsigned long flags;
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
+ IWL_ERR(fwrt, "Failed to get DRAM monitor header\n");
+ return NULL;
+ }
+ write_ptr = iwl_read_umac_prph_no_grab(fwrt->trans,
+ MON_BUFF_WRPTR_VER2);
+ cycle_cnt = iwl_read_umac_prph_no_grab(fwrt->trans,
+ MON_BUFF_CYCLE_CNT_VER2);
+ iwl_trans_release_nic_access(fwrt->trans, &flags);
+
+ mon_dump->write_ptr = cpu_to_le32(write_ptr);
+ mon_dump->cycle_cnt = cpu_to_le32(cycle_cnt);
+
+ return mon_dump->ranges;
+}
+
+static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *data)
+{
+ struct iwl_fw_ini_fifo_error_dump *dump = data;
+
+ return dump->ranges;
+}
+
+static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ return le32_to_cpu(reg->internal.num_of_ranges);
+}
+
+static u32 iwl_dump_ini_paging_gen2_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ return fwrt->trans->init_dram.paging_cnt;
+}
+
+static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ return fwrt->num_of_paging_blk;
+}
+
+static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ return 1;
+}
+
+static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ struct iwl_ini_txf_iter_data iter = { .init = true };
+ void *fifo_iter = fwrt->dump.fifo_iter;
+ u32 num_of_fifos = 0;
+
+ fwrt->dump.fifo_iter = &iter;
+ while (iwl_ini_txf_iter(fwrt, reg))
+ num_of_fifos++;
+
+ fwrt->dump.fifo_iter = fifo_iter;
+
+ return num_of_fifos;
+}
+
+static u32 iwl_dump_ini_rxf_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ /* Each Rx fifo needs a different offset and therefore, it's
+ * region can contain only one fifo, i.e. 1 memory range.
+ */
+ return 1;
+}
+
+static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ return sizeof(struct iwl_fw_ini_error_dump) +
+ iwl_dump_ini_mem_ranges(fwrt, reg) *
+ (sizeof(struct iwl_fw_ini_error_dump_range) +
+ le32_to_cpu(reg->internal.range_data_size));
+}
+
+static u32 iwl_dump_ini_paging_gen2_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ int i;
+ u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
+ u32 size = sizeof(struct iwl_fw_ini_error_dump);
+
+ for (i = 0; i < iwl_dump_ini_paging_gen2_ranges(fwrt, reg); i++)
+ size += range_header_len +
+ fwrt->trans->init_dram.paging[i].size;
+
+ return size;
+}
+
+static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ int i;
+ u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
+ u32 size = sizeof(struct iwl_fw_ini_error_dump);
+
+ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++)
+ size += range_header_len + fwrt->fw_paging_db[i].fw_paging_size;
+
+ return size;
+}
+
+static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ u32 size = sizeof(struct iwl_fw_ini_monitor_dram_dump);
+
+ if (fwrt->trans->num_blocks)
+ size += fwrt->trans->fw_mon[0].size;
+
+ return size;
+}
+
+static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ struct iwl_ini_txf_iter_data iter = { .init = true };
+ void *fifo_iter = fwrt->dump.fifo_iter;
+ u32 size = 0;
+ u32 fifo_hdr = sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+
+ fwrt->dump.fifo_iter = &iter;
+ while (iwl_ini_txf_iter(fwrt, reg)) {
+ size += fifo_hdr;
+ if (!reg->fifos.header_only)
+ size += iter.fifo_size;
+ }
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_fifo_error_dump);
+
+ fwrt->dump.fifo_iter = fifo_iter;
+
+ return size;
+}
+
+static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ struct iwl_ini_rxf_data rx_data;
+ u32 size = sizeof(struct iwl_fw_ini_fifo_error_dump) +
+ sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+
+ if (reg->fifos.header_only)
+ return size;
+
+ iwl_ini_get_rxf_data(fwrt, reg, &rx_data);
+ size += rx_data.size;
+
+ return size;
+}
+
+/**
+ * struct iwl_dump_ini_mem_ops - ini memory dump operations
+ * @get_num_of_ranges: returns the number of memory ranges in the region.
+ * @get_size: returns the total size of the region.
+ * @fill_mem_hdr: fills region type specific headers and returns pointer to
+ * the first range or NULL if failed to fill headers.
+ * @fill_range: copies a given memory range into the dump.
+ * Returns the size of the range or negative error value otherwise.
+ */
+struct iwl_dump_ini_mem_ops {
+ u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg);
+ u32 (*get_size)(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg);
+ void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg, void *data);
+ int (*fill_range)(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg, void *range,
+ int idx);
+};
+
+/**
+ * iwl_dump_ini_mem - copy a memory region into the dump
+ * @fwrt: fw runtime struct.
+ * @data: dump memory data.
+ * @reg: region to copy to the dump.
+ */
+static void
+iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_region_type type,
+ struct iwl_fw_error_dump_data **data,
+ struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_dump_ini_mem_ops *ops)
+{
+ struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data;
+ void *range;
+ u32 num_of_ranges, i;
+
+ if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size ||
+ !ops->fill_mem_hdr || !ops->fill_range))
+ return;
+
+ num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
+
+ (*data)->type = cpu_to_le32(type | INI_DUMP_BIT);
+ (*data)->len = cpu_to_le32(ops->get_size(fwrt, reg));
+
+ header->num_of_ranges = cpu_to_le32(num_of_ranges);
+ header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME,
+ le32_to_cpu(reg->name_len)));
+ memcpy(header->name, reg->name, le32_to_cpu(header->name_len));
+
+ range = ops->fill_mem_hdr(fwrt, reg, header);
+ if (!range) {
+ IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
+ le32_to_cpu(reg->region_id), type);
+ return;
}
+
+ for (i = 0; i < num_of_ranges; i++) {
+ int range_size = ops->fill_range(fwrt, reg, range, i);
+
+ if (range_size < 0) {
+ IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
+ le32_to_cpu(reg->region_id), type);
+ return;
+ }
+ range = range + range_size;
+ }
+ *data = iwl_fw_error_next_data(*data);
}
static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger *trigger)
{
- int i, num, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
+ int i, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
if (!trigger || !trigger->num_regions)
return 0;
- num = le32_to_cpu(trigger->num_regions);
- for (i = 0; i < num; i++) {
+ for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) {
u32 reg_id = le32_to_cpu(trigger->data[i]);
struct iwl_fw_ini_region_cfg *reg;
enum iwl_fw_ini_region_type type;
- u32 num_entries;
if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
continue;
- reg = fwrt->dump.active_regs[reg_id].reg;
+ reg = fwrt->dump.active_regs[reg_id];
if (WARN(!reg, "Unassigned region %d\n", reg_id))
continue;
type = le32_to_cpu(reg->region_type);
- num_entries = le32_to_cpu(reg->num_regions);
-
switch (type) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
- size += hdr_len +
- sizeof(struct iwl_fw_error_dump_named_mem) +
- le32_to_cpu(reg->size);
- break;
case IWL_FW_INI_REGION_PERIPHERY_MAC:
case IWL_FW_INI_REGION_PERIPHERY_PHY:
case IWL_FW_INI_REGION_PERIPHERY_AUX:
- size += num_entries *
- (hdr_len +
- sizeof(struct iwl_fw_error_dump_prph) +
- sizeof(u32));
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ case IWL_FW_INI_REGION_CSR:
+ size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg);
break;
case IWL_FW_INI_REGION_TXF:
- size += iwl_fw_txf_len(fwrt, &fwrt->smem_cfg);
+ size += hdr_len + iwl_dump_ini_txf_get_size(fwrt, reg);
break;
case IWL_FW_INI_REGION_RXF:
- size += iwl_fw_rxf_len(fwrt, &fwrt->smem_cfg);
+ size += hdr_len + iwl_dump_ini_rxf_get_size(fwrt, reg);
break;
- case IWL_FW_INI_REGION_PAGING:
- if (!iwl_fw_dbg_is_paging_enabled(fwrt))
- break;
- size += fwrt->num_of_paging_blk *
- (hdr_len +
- sizeof(struct iwl_fw_error_dump_paging) +
- PAGING_BLOCK_SIZE);
- break;
- case IWL_FW_INI_REGION_CSR:
- size += num_entries *
- (hdr_len + le32_to_cpu(reg->size));
+ case IWL_FW_INI_REGION_PAGING: {
+ size += hdr_len;
+ if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
+ size += iwl_dump_ini_paging_get_size(fwrt, reg);
+ } else {
+ size += iwl_dump_ini_paging_gen2_get_size(fwrt,
+ reg);
+ }
break;
+ }
case IWL_FW_INI_REGION_DRAM_BUFFER:
- /* Transport takes care of DRAM dumping */
- case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ if (!fwrt->trans->num_blocks)
+ break;
+ size += hdr_len +
+ iwl_dump_ini_mon_dram_get_size(fwrt, reg);
+ break;
case IWL_FW_INI_REGION_DRAM_IMR:
/* Undefined yet */
default:
@@ -1073,8 +1693,7 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger *trigger,
- struct iwl_fw_error_dump_data **data,
- u32 *dump_mask)
+ struct iwl_fw_error_dump_data **data)
{
int i, num = le32_to_cpu(trigger->num_regions);
@@ -1082,11 +1701,12 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
u32 reg_id = le32_to_cpu(trigger->data[i]);
enum iwl_fw_ini_region_type type;
struct iwl_fw_ini_region_cfg *reg;
+ struct iwl_dump_ini_mem_ops ops;
if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))
continue;
- reg = fwrt->dump.active_regs[reg_id].reg;
+ reg = fwrt->dump.active_regs[reg_id];
/* Don't warn, get_trigger_len already warned */
if (!reg)
continue;
@@ -1094,39 +1714,75 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
type = le32_to_cpu(reg->region_type);
switch (type) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
- if (WARN_ON(le32_to_cpu(reg->num_regions) > 1))
- continue;
- iwl_fw_dump_named_mem(fwrt, data,
- le32_to_cpu(reg->size),
- le32_to_cpu(reg->start_addr[0]),
- reg->name,
- le32_to_cpu(reg->name_len));
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
+ ops.get_size = iwl_dump_ini_mem_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
+ ops.fill_range = iwl_dump_ini_dev_mem_iter;
+ iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
break;
case IWL_FW_INI_REGION_PERIPHERY_MAC:
case IWL_FW_INI_REGION_PERIPHERY_PHY:
case IWL_FW_INI_REGION_PERIPHERY_AUX:
- iwl_dump_prph_ini(fwrt->trans, data, reg);
+ ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
+ ops.get_size = iwl_dump_ini_mem_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
+ ops.fill_range = iwl_dump_ini_prph_iter;
+ iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
break;
case IWL_FW_INI_REGION_DRAM_BUFFER:
- *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR;
+ ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges;
+ ops.get_size = iwl_dump_ini_mon_dram_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header;
+ ops.fill_range = iwl_dump_ini_mon_dram_iter;
+ iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
break;
- case IWL_FW_INI_REGION_PAGING:
- if (iwl_fw_dbg_is_paging_enabled(fwrt))
- iwl_dump_paging(fwrt, data);
- else
- *dump_mask |= IWL_FW_ERROR_DUMP_PAGING;
+ case IWL_FW_INI_REGION_PAGING: {
+ ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
+ if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
+ ops.get_num_of_ranges =
+ iwl_dump_ini_paging_ranges;
+ ops.get_size = iwl_dump_ini_paging_get_size;
+ ops.fill_range = iwl_dump_ini_paging_iter;
+ } else {
+ ops.get_num_of_ranges =
+ iwl_dump_ini_paging_gen2_ranges;
+ ops.get_size =
+ iwl_dump_ini_paging_gen2_get_size;
+ ops.fill_range = iwl_dump_ini_paging_gen2_iter;
+ }
+
+ iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
break;
- case IWL_FW_INI_REGION_TXF:
- iwl_fw_dump_txf(fwrt, data);
+ }
+ case IWL_FW_INI_REGION_TXF: {
+ struct iwl_ini_txf_iter_data iter = { .init = true };
+ void *fifo_iter = fwrt->dump.fifo_iter;
+
+ fwrt->dump.fifo_iter = &iter;
+ ops.get_num_of_ranges = iwl_dump_ini_txf_ranges;
+ ops.get_size = iwl_dump_ini_txf_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
+ ops.fill_range = iwl_dump_ini_txf_iter;
+ iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ fwrt->dump.fifo_iter = fifo_iter;
break;
+ }
case IWL_FW_INI_REGION_RXF:
- iwl_fw_dump_rxf(fwrt, data);
+ ops.get_num_of_ranges = iwl_dump_ini_rxf_ranges;
+ ops.get_size = iwl_dump_ini_rxf_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
+ ops.fill_range = iwl_dump_ini_rxf_iter;
+ iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
break;
case IWL_FW_INI_REGION_CSR:
- iwl_dump_csr_ini(fwrt->trans, data, reg);
+ ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
+ ops.get_size = iwl_dump_ini_mem_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
+ ops.fill_range = iwl_dump_ini_csr_iter;
+ iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
break;
case IWL_FW_INI_REGION_DRAM_IMR:
- case IWL_FW_INI_REGION_INTERNAL_BUFFER:
/* This is undefined yet */
default:
break;
@@ -1136,30 +1792,23 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
static struct iwl_fw_error_dump_file *
_iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dump_ptrs *fw_error_dump,
- u32 *dump_mask)
+ struct iwl_fw_dump_ptrs *fw_error_dump)
{
int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_file *dump_file;
- struct iwl_fw_ini_trigger *trigger, *ext;
+ struct iwl_fw_ini_trigger *trigger;
if (id == FW_DBG_TRIGGER_FW_ASSERT)
id = IWL_FW_TRIGGER_ID_FW_ASSERT;
- else if (id == FW_DBG_TRIGGER_USER)
- id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
- else if (id < FW_DBG_TRIGGER_MAX)
- return NULL;
- if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
+ if (!iwl_fw_ini_trigger_on(fwrt, id))
return NULL;
- trigger = fwrt->dump.active_trigs[id].conf;
- ext = fwrt->dump.active_trigs[id].conf_ext;
+ trigger = fwrt->dump.active_trigs[id].trig;
size = sizeof(*dump_file);
size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
- size += iwl_fw_ini_get_trigger_len(fwrt, ext);
if (!size)
return NULL;
@@ -1174,11 +1823,7 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
dump_data = (void *)dump_file->data;
dump_file->file_len = cpu_to_le32(size);
- *dump_mask = 0;
- if (trigger)
- iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data, dump_mask);
- if (ext)
- iwl_fw_ini_dump_trigger(fwrt, ext, &dump_data, dump_mask);
+ iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data);
return dump_file;
}
@@ -1204,8 +1849,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
goto out;
if (fwrt->trans->ini_valid)
- dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump,
- &dump_mask);
+ dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump);
else
dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
@@ -1217,7 +1861,10 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
- fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
+ if (!fwrt->trans->ini_valid)
+ fw_error_dump->trans_ptr =
+ iwl_trans_dump_data(fwrt->trans, dump_mask);
+
file_len = le32_to_cpu(dump_file->file_len);
fw_error_dump->fwrt_len = file_len;
if (fw_error_dump->trans_ptr) {
@@ -1258,61 +1905,12 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
};
IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
-void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt)
-{
- IWL_INFO(fwrt, "error dump due to fw assert\n");
- fwrt->dump.desc = &iwl_dump_desc_assert;
- iwl_fw_error_dump(fwrt);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump);
-
-void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
-{
- struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
- kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
-
- if (!iwl_dump_desc_no_alive)
- return;
-
- iwl_dump_desc_no_alive->trig_desc.type =
- cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
- iwl_dump_desc_no_alive->len = 0;
-
- if (WARN_ON(fwrt->dump.desc))
- iwl_fw_free_dump_desc(fwrt);
-
- IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
- FW_DBG_TRIGGER_NO_ALIVE);
-
- fwrt->dump.desc = iwl_dump_desc_no_alive;
- iwl_fw_error_dump(fwrt);
- clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
-
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only,
unsigned int delay)
{
- /*
- * If the loading of the FW completed successfully, the next step is to
- * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
- * zero, the FW was already loaded successully. If the state is "NO_FW"
- * in such a case - exit, since FW may be dead. Otherwise, we
- * can try to collect the data, since FW might just not be fully
- * loaded (no "ALIVE" yet), and the debug data is accessible.
- *
- * Corner case: got the FW alive but crashed before getting the SMEM
- * config. In such a case, due to HW access problems, we might
- * collect garbage.
- */
- if (fwrt->trans->state == IWL_TRANS_NO_FW &&
- fwrt->smem_cfg.num_lmacs)
- return -EIO;
-
- if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
- test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
+ if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return -EBUSY;
if (WARN_ON(fwrt->dump.desc))
@@ -1324,12 +1922,39 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
fwrt->dump.desc = desc;
fwrt->dump.monitor_only = monitor_only;
- schedule_delayed_work(&fwrt->dump.wk, delay);
+ schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
+int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig_type)
+{
+ int ret;
+ struct iwl_fw_dump_desc *iwl_dump_error_desc =
+ kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
+
+ if (!iwl_dump_error_desc)
+ return -ENOMEM;
+
+ iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type);
+ iwl_dump_error_desc->len = 0;
+
+ ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
+ if (ret) {
+ kfree(iwl_dump_error_desc);
+ } else {
+ set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
+
+ /* trigger nmi to halt the fw */
+ iwl_force_nmi(fwrt->trans);
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
+
int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
@@ -1353,8 +1978,10 @@ int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
}
trigger->occurrences = cpu_to_le16(occurrences);
- delay = le16_to_cpu(trigger->trig_dis_ms);
monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
+
+ /* convert msec to usec */
+ delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
}
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
@@ -1374,6 +2001,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
u32 id, const char *str, size_t len)
{
struct iwl_fw_dump_desc *desc;
+ struct iwl_fw_ini_active_triggers *active;
u32 occur, delay;
if (!fwrt->trans->ini_valid)
@@ -1382,15 +2010,17 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
if (id == FW_DBG_TRIGGER_USER)
id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
- if (WARN_ON(!fwrt->dump.active_trigs[id].active))
+ active = &fwrt->dump.active_trigs[id];
+
+ if (WARN_ON(!active->active))
return -EINVAL;
- delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec);
- occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences);
+ delay = le32_to_cpu(active->trig->dump_delay);
+ occur = le32_to_cpu(active->trig->occurrences);
if (!occur)
return 0;
- if (le32_to_cpu(fwrt->dump.active_trigs[id].conf->force_restart)) {
+ if (le32_to_cpu(active->trig->force_restart)) {
IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
iwl_force_nmi(fwrt->trans);
return 0;
@@ -1400,8 +2030,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
if (!desc)
return -ENOMEM;
- occur--;
- fwrt->dump.active_trigs[id].conf->occurrences = cpu_to_le32(occur);
+ active->trig->occurrences = cpu_to_le32(--occur);
desc->len = len;
desc->trig_desc.type = cpu_to_le32(id);
@@ -1566,55 +2195,81 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
static void
-iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_allocation_tlv *alloc)
+iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
{
struct iwl_trans *trans = fwrt->trans;
- struct iwl_continuous_record_cmd cont_rec = {};
- struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0];
- struct iwl_host_cmd hcmd = {
- .id = LDBG_CONFIG_CMD,
- .flags = CMD_ASYNC,
- .data[0] = &cont_rec,
- .len[0] = sizeof(cont_rec),
- };
void *virtual_addr = NULL;
- u32 size = le32_to_cpu(alloc->size);
dma_addr_t phys_addr;
- cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
-
- if (!trans->num_blocks &&
- le32_to_cpu(alloc->buffer_location) !=
- IWL_FW_INI_LOCATION_DRAM_PATH)
+ if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon)))
return;
- virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size,
- &phys_addr, GFP_KERNEL);
+ virtual_addr =
+ dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO |
+ __GFP_COMP);
/* TODO: alloc fragments if needed */
if (!virtual_addr)
IWL_ERR(fwrt, "Failed to allocate debug memory\n");
- if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon)))
- return;
-
trans->fw_mon[trans->num_blocks].block = virtual_addr;
trans->fw_mon[trans->num_blocks].physical = phys_addr;
trans->fw_mon[trans->num_blocks].size = size;
trans->num_blocks++;
IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
+}
+
+static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_allocation_data *alloc,
+ enum iwl_fw_ini_apply_point pnt)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_ldbg_config_cmd ldbg_cmd = {
+ .type = cpu_to_le32(BUFFER_ALLOCATION),
+ };
+ struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation;
+ struct iwl_host_cmd hcmd = {
+ .id = LDBG_CONFIG_CMD,
+ .flags = CMD_ASYNC,
+ .data[0] = &ldbg_cmd,
+ .len[0] = sizeof(ldbg_cmd),
+ };
+ int block_idx = trans->num_blocks;
+ u32 buf_location = le32_to_cpu(alloc->tlv.buffer_location);
+
+ if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) {
+ if (!WARN(pnt != IWL_FW_INI_APPLY_EARLY,
+ "Invalid apply point %d for SMEM buffer allocation",
+ pnt))
+ /* set sram monitor by enabling bit 7 */
+ iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+ return;
+ }
+
+ if (buf_location != IWL_FW_INI_LOCATION_DRAM_PATH)
+ return;
+
+ if (!alloc->is_alloc) {
+ iwl_fw_dbg_buffer_allocation(fwrt,
+ le32_to_cpu(alloc->tlv.size));
+ if (block_idx == trans->num_blocks)
+ return;
+ alloc->is_alloc = 1;
+ }
/* First block is assigned via registers / context info */
if (trans->num_blocks == 1)
return;
cmd->num_frags = cpu_to_le32(1);
- cmd->fragments[0].address = cpu_to_le64(phys_addr);
- cmd->fragments[0].size = alloc->size;
- cmd->allocation_id = alloc->allocation_id;
- cmd->buffer_location = alloc->buffer_location;
+ cmd->fragments[0].address =
+ cpu_to_le64(trans->fw_mon[block_idx].physical);
+ cmd->fragments[0].size = alloc->tlv.size;
+ cmd->allocation_id = alloc->tlv.allocation_id;
+ cmd->buffer_location = alloc->tlv.buffer_location;
iwl_trans_send_cmd(trans, &hcmd);
}
@@ -1643,9 +2298,9 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
int i, size = le32_to_cpu(tlv->num_regions);
for (i = 0; i < size; i++) {
- struct iwl_fw_ini_region_cfg *reg = iter;
+ struct iwl_fw_ini_region_cfg *reg = iter, **active;
int id = le32_to_cpu(reg->region_id);
- struct iwl_fw_ini_active_regs *active;
+ u32 type = le32_to_cpu(reg->region_type);
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
"Invalid region id %d for apply point %d\n", id, pnt))
@@ -1653,26 +2308,47 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
active = &fwrt->dump.active_regs[id];
- if (ext && active->apply_point == pnt)
- IWL_WARN(fwrt->trans,
- "External region TLV overrides FW default %x\n",
- id);
+ if (*active)
+ IWL_WARN(fwrt->trans, "region TLV %d override\n", id);
IWL_DEBUG_FW(fwrt,
"%s: apply point %d, activating region ID %d\n",
__func__, pnt, id);
- active->reg = reg;
- active->apply_point = pnt;
+ *active = reg;
- if (le32_to_cpu(reg->region_type) !=
- IWL_FW_INI_REGION_DRAM_BUFFER)
- iter += le32_to_cpu(reg->num_regions) * sizeof(__le32);
+ if (type == IWL_FW_INI_REGION_TXF ||
+ type == IWL_FW_INI_REGION_RXF)
+ iter += le32_to_cpu(reg->fifos.num_of_registers) *
+ sizeof(__le32);
+ else if (type != IWL_FW_INI_REGION_DRAM_BUFFER)
+ iter += le32_to_cpu(reg->internal.num_of_ranges) *
+ sizeof(__le32);
iter += sizeof(*reg);
}
}
+static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_active_triggers *active,
+ u32 id, int size)
+{
+ void *ptr;
+
+ if (size <= active->size)
+ return 0;
+
+ ptr = krealloc(active->trig, size, GFP_KERNEL);
+ if (!ptr) {
+ IWL_ERR(fwrt, "Failed to allocate memory for trigger %d\n", id);
+ return -ENOMEM;
+ }
+ active->trig = ptr;
+ active->size = size;
+
+ return 0;
+}
+
static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger_tlv *tlv,
bool ext,
@@ -1685,43 +2361,61 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger *trig = iter;
struct iwl_fw_ini_active_triggers *active;
int id = le32_to_cpu(trig->trigger_id);
- u32 num;
+ u32 trig_regs_size = le32_to_cpu(trig->num_regions) *
+ sizeof(__le32);
if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
break;
active = &fwrt->dump.active_trigs[id];
- if (active->apply_point != apply_point) {
- active->conf = NULL;
- active->conf_ext = NULL;
- }
+ if (!active->active) {
+ size_t trig_size = sizeof(*trig) + trig_regs_size;
+
+ if (iwl_fw_dbg_trig_realloc(fwrt, active, id,
+ trig_size))
+ goto next;
- num = le32_to_cpu(trig->num_regions);
+ memcpy(active->trig, trig, trig_size);
- if (ext && active->apply_point == apply_point) {
- num += le32_to_cpu(active->conf->num_regions);
- if (trig->ignore_default) {
- active->conf_ext = active->conf;
- active->conf = trig;
+ } else {
+ u32 conf_override =
+ !(le32_to_cpu(trig->override_trig) & 0xff);
+ u32 region_override =
+ !(le32_to_cpu(trig->override_trig) & 0xff00);
+ u32 offset = 0;
+ u32 active_regs =
+ le32_to_cpu(active->trig->num_regions);
+ u32 new_regs = le32_to_cpu(trig->num_regions);
+ int mem_to_add = trig_regs_size;
+
+ if (region_override) {
+ mem_to_add -= active_regs * sizeof(__le32);
} else {
- active->conf_ext = trig;
+ offset += active_regs;
+ new_regs += active_regs;
}
- } else {
- active->conf = trig;
+
+ if (iwl_fw_dbg_trig_realloc(fwrt, active, id,
+ active->size + mem_to_add))
+ goto next;
+
+ if (conf_override)
+ memcpy(active->trig, trig, sizeof(*trig));
+
+ memcpy(active->trig->data + offset, trig->data,
+ trig_regs_size);
+ active->trig->num_regions = cpu_to_le32(new_regs);
}
/* Since zero means infinity - just set to -1 */
- if (!le32_to_cpu(trig->occurrences))
- trig->occurrences = cpu_to_le32(-1);
- if (!le32_to_cpu(trig->ignore_consec))
- trig->ignore_consec = cpu_to_le32(-1);
+ if (!le32_to_cpu(active->trig->occurrences))
+ active->trig->occurrences = cpu_to_le32(-1);
- iter += sizeof(*trig) +
- le32_to_cpu(trig->num_regions) * sizeof(__le32);
+ active->active = true;
+next:
+ iter += sizeof(*trig) + trig_regs_size;
- active->active = num;
- active->apply_point = apply_point;
}
}
@@ -1738,9 +2432,13 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
u32 type = le32_to_cpu(tlv->type);
switch (type) {
- case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
- iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv);
+ case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: {
+ struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv;
+
+ iwl_fw_dbg_buffer_apply(fwrt, ini_tlv, pnt);
+ iter += sizeof(buf_alloc->is_alloc);
break;
+ }
case IWL_UCODE_TLV_TYPE_HCMD:
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
IWL_ERR(fwrt,
@@ -1771,6 +2469,16 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_apply_point apply_point)
{
void *data = &fwrt->trans->apply_points[apply_point];
+ int i;
+
+ if (apply_point == IWL_FW_INI_APPLY_EARLY) {
+ for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++)
+ fwrt->dump.active_regs[i] = NULL;
+
+ /* disable the triggers, used in recovery flow */
+ for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++)
+ fwrt->dump.active_trigs[i].active = false;
+ }
_iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
@@ -1778,3 +2486,27 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
+
+void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
+{
+ /* if the wait event timeout elapses instead of wake up then
+ * the driver did not receive NMI interrupt and can not assume the FW
+ * is halted
+ */
+ int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
+ !test_bit(STATUS_FW_WAIT_DUMP,
+ &fwrt->trans->status),
+ msecs_to_jiffies(2000));
+ if (!ret) {
+ /* failed to receive NMI interrupt, assuming the FW is stuck */
+ set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
+
+ clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
+ }
+
+ /* Assuming the op mode mutex is held at this point */
+ iwl_fw_dbg_collect_sync(fwrt);
+
+ iwl_trans_stop_device(fwrt->trans);
+}
+IWL_EXPORT_SYMBOL(iwl_fwrt_stop_device);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 6aabbdd72326..a199056234d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -8,7 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -102,13 +102,18 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
- fwrt->dump.rt_status = 0;
+ fwrt->dump.lmac_err_id[0] = 0;
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ fwrt->dump.lmac_err_id[1] = 0;
+ fwrt->dump.umac_err_id = 0;
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only, unsigned int delay);
+int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig_type);
int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
@@ -157,9 +162,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
}
static inline bool
-iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms)
+iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_usec)
{
- unsigned long wind_jiff = msecs_to_jiffies(dis_ms);
+ unsigned long wind_jiff = usecs_to_jiffies(dis_usec);
/* If this is the first event checked, jump to update start ts */
if (fwrt->dump.non_collect_ts_start[id] &&
@@ -176,11 +181,12 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
struct iwl_fw_dbg_trigger_tlv *trig)
{
+ u32 usec = le16_to_cpu(trig->trig_dis_ms) * USEC_PER_MSEC;
+
if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
return false;
- if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id),
- le16_to_cpu(trig->trig_dis_ms))) {
+ if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), usec)) {
IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
trig->id);
return false;
@@ -217,23 +223,22 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
})
static inline bool
-_iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
- const enum iwl_fw_dbg_trigger id)
+iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_trigger_id id)
{
- struct iwl_fw_ini_active_triggers *trig = &fwrt->dump.active_trigs[id];
- u32 ms;
+ struct iwl_fw_ini_trigger *trig;
+ u32 usec;
- if (!fwrt->trans->ini_valid)
- return false;
- if (!trig || !trig->active)
+
+ if (!fwrt->trans->ini_valid || id >= IWL_FW_TRIGGER_ID_NUM ||
+ !fwrt->dump.active_trigs[id].active)
return false;
- ms = le32_to_cpu(trig->conf->ignore_consec);
- if (ms)
- ms /= USEC_PER_MSEC;
+ trig = fwrt->dump.active_trigs[id].trig;
+ usec = le32_to_cpu(trig->ignore_consec);
- if (iwl_fw_dbg_no_trig_window(fwrt, id, ms)) {
+ if (iwl_fw_dbg_no_trig_window(fwrt, id, usec)) {
IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id);
return false;
}
@@ -241,12 +246,6 @@ _iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
return true;
}
-#define iwl_fw_ini_trigger_on(fwrt, wdev, id) ({ \
- BUILD_BUG_ON(!__builtin_constant_p(id)); \
- BUILD_BUG_ON((id) >= IWL_FW_TRIGGER_ID_NUM); \
- _iwl_fw_ini_trigger_on((fwrt), (wdev), (id)); \
-})
-
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@@ -266,20 +265,20 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
-static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
+static inline int
+iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
{
- struct iwl_continuous_record_cmd cont_rec = {};
+ struct iwl_ldbg_config_cmd cmd = {
+ .type = start ? cpu_to_le32(START_DEBUG_RECORDING) :
+ cpu_to_le32(STOP_DEBUG_RECORDING),
+ };
struct iwl_host_cmd hcmd = {
.id = LDBG_CONFIG_CMD,
.flags = CMD_ASYNC,
- .data[0] = &cont_rec,
- .len[0] = sizeof(cont_rec),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
};
- cont_rec.record_mode.enable_recording = start ?
- cpu_to_le16(START_DEBUG_RECORDING) :
- cpu_to_le16(STOP_DEBUG_RECORDING);
-
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
@@ -293,13 +292,13 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
}
if (params) {
- params->in_sample = iwl_read_prph(trans, DBGC_IN_SAMPLE);
- params->out_ctrl = iwl_read_prph(trans, DBGC_OUT_CTRL);
+ params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE);
+ params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL);
}
- iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+ iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0);
udelay(100);
- iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+ iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0);
#ifdef CONFIG_IWLWIFI_DEBUGFS
trans->dbg_rec_on = false;
#endif
@@ -327,9 +326,9 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
} else {
- iwl_write_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
+ iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
udelay(100);
- iwl_write_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
+ iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
}
}
@@ -370,7 +369,9 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
{
return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
- fwrt->trans->cfg->d3_debug_data_length &&
+ fwrt->trans->cfg->d3_debug_data_length && fwrt->ops &&
+ fwrt->ops->d3_debug_enable &&
+ fwrt->ops->d3_debug_enable(fwrt->ops_ctx) &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
}
@@ -378,6 +379,7 @@ static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
{
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
+ fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block;
}
@@ -430,10 +432,33 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt);
-void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_apply_point apply_point);
+void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt);
+
+static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans,
+ u32 lmac_error_event_table)
+{
+ if (!(trans->error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_LMAC1) ||
+ WARN_ON(trans->lmac_error_event_table[0] !=
+ lmac_error_event_table))
+ trans->lmac_error_event_table[0] = lmac_error_event_table;
+}
+
+static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
+ u32 umac_error_event_table)
+{
+ if (!(trans->error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_UMAC) ||
+ WARN_ON(trans->umac_error_event_table !=
+ umac_error_event_table))
+ trans->umac_error_event_table = umac_error_event_table;
+}
+
+/* This bit is used to differentiate the legacy dump from the ini dump */
+#define INI_DUMP_BIT BIT(31)
+
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 3e120dd47305..c1aa4360736b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -173,9 +173,8 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
_FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
- if (!debugfs_create_file(alias, mode, parent, fwrt, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(alias, mode, parent, fwrt, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
@@ -321,14 +320,10 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
-int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
- return 0;
-err:
- IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
- return -ENOMEM;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
index 88255035e8ef..fde40ff88451 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -63,14 +63,11 @@
#include "runtime.h"
#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir);
#else
-static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
- struct dentry *dbgfs_dir)
-{
- return 0;
-}
+static inline void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir) { }
#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 65faecf552cd..9b5077bd46c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -8,7 +8,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -180,6 +180,8 @@ enum iwl_fw_error_dump_family {
IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
};
+#define MAX_NUM_LMAC 2
+
/**
* struct iwl_fw_error_dump_info - info on the device / firmware
* @device_family: the family of the device (7 / 8)
@@ -187,7 +189,10 @@ enum iwl_fw_error_dump_family {
* @fw_human_readable: human readable FW version
* @dev_human_readable: name of the device
* @bus_human_readable: name of the bus used
- * @rt_status: the error_id/rt_status that that triggered the latest dump
+ * @num_of_lmacs: the number of lmacs
+ * @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump
+ * if the dump collection was not initiated by an assert, the value is 0
+ * @umac_err_id: the umac error_id/rt_status that triggered the latest dump
* if the dump collection was not initiated by an assert, the value is 0
*/
struct iwl_fw_error_dump_info {
@@ -196,7 +201,9 @@ struct iwl_fw_error_dump_info {
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
u8 dev_human_readable[64];
u8 bus_human_readable[8];
- __le32 rt_status;
+ u8 num_of_lmacs;
+ __le32 umac_err_id;
+ __le32 lmac_err_id[MAX_NUM_LMAC];
} __packed;
/**
@@ -268,22 +275,68 @@ struct iwl_fw_error_dump_mem {
};
/**
- * struct iwl_fw_error_dump_named_mem - chunk of memory
- * @type: &enum iwl_fw_error_dump_mem_type
- * @offset: the offset from which the memory was read
- * @name_len: name length
- * @name: file name
- * @data: the content of the memory
+ * struct iwl_fw_ini_error_dump_range - range of memory
+ * @start_addr: the start address of this range
+ * @range_data_size: the size of this range, in bytes
+ * @data: the actual memory
*/
-struct iwl_fw_error_dump_named_mem {
- __le32 type;
- __le32 offset;
- u8 name_len;
- u8 name[32];
- u8 data[];
+struct iwl_fw_ini_error_dump_range {
+ __le32 start_addr;
+ __le32 range_data_size;
+ __le32 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_ini_error_dump_header - ini region dump header
+ * @num_of_ranges: number of ranges in this region
+ * @name_len: number of bytes allocated to the name string of this region
+ * @name: name of the region
+ */
+struct iwl_fw_ini_error_dump_header {
+ __le32 num_of_ranges;
+ __le32 name_len;
+ u8 name[IWL_FW_INI_MAX_NAME];
};
/**
+ * struct iwl_fw_ini_error_dump - ini region dump
+ * @header: the header of this region
+ * @ranges: the memory ranges of this region
+ */
+struct iwl_fw_ini_error_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ struct iwl_fw_ini_error_dump_range ranges[];
+} __packed;
+
+/* This bit is used to differentiate between lmac and umac rxf */
+#define IWL_RXF_UMAC_BIT BIT(31)
+
+/**
+ * struct iwl_fw_ini_fifo_error_dump_range - ini fifo range dump
+ * @fifo_num: the fifo num. In case of rxf and umac rxf, set BIT(31) to
+ * distinguish between lmac and umac
+ * @num_of_registers: num of registers to dump, dword size each
+ * @range_data_size: the size of the registers and fifo data
+ * @data: fifo data
+ */
+struct iwl_fw_ini_fifo_error_dump_range {
+ __le32 fifo_num;
+ __le32 num_of_registers;
+ __le32 range_data_size;
+ __le32 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_ini_fifo_error_dump - ini fifo region dump
+ * @header: the header of this region
+ * @ranges: the memory ranges of this region
+ */
+struct iwl_fw_ini_fifo_error_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ struct iwl_fw_ini_fifo_error_dump_range ranges[];
+} __packed;
+
+/**
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
@@ -298,6 +351,20 @@ struct iwl_fw_error_dump_rb {
};
/**
+ * struct iwl_fw_ini_monitor_dram_dump - ini dram monitor dump
+ * @header - header of the region
+ * @write_ptr - write pointer position in the dram
+ * @cycle_cnt - cycles count
+ * @ranges - the memory ranges of this this region
+ */
+struct iwl_fw_ini_monitor_dram_dump {
+ struct iwl_fw_ini_error_dump_header header;
+ __le32 write_ptr;
+ __le32 cycle_cnt;
+ struct iwl_fw_ini_error_dump_range ranges[];
+} __packed;
+
+/**
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
@@ -348,7 +415,9 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
- * @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails
+ * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
+ * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
+ * in the driver.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@@ -366,7 +435,8 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_TX_LATENCY,
FW_DBG_TRIGGER_TDLS,
FW_DBG_TRIGGER_TX_STATUS,
- FW_DBG_TRIGGER_NO_ALIVE,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT,
+ FW_DBG_TRIGGER_DRIVER,
/* must be last */
FW_DBG_TRIGGER_MAX,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 81f557c0b58d..641c95d03b15 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -9,6 +9,7 @@
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +33,7 @@
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,6 +145,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
+ IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
+ IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
+ IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
@@ -263,6 +268,12 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS
* @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of
* the REDUCE_TX_POWER_CMD.
+ * @IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF: This ucode supports the short
+ * version of the beacon notification.
+ * @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of
+ * BEACON_FILTER_CONFIG_API_S_VER_4.
+ * @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of
+ * LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -287,6 +298,9 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42,
IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44,
IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
+ IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46,
+ IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -303,7 +317,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
- * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
@@ -334,6 +347,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
* @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2
+ * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command
+ * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
+ * (6 GHz).
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -357,19 +373,24 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
* command size (command version 4) that supports toggling ACK TX
* power reduction.
- * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
* @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
* @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
* capability.
+ * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured
+ * to report the CSI information with (certain) RX frames
+ * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both
+ * initiator and responder
+ *
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
enum iwl_ucode_tlv_capa {
+ /* set 0 */
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
- IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
@@ -387,6 +408,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
+
+ /* set 1 */
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
@@ -394,6 +417,11 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46,
+ IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48,
+ IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47,
+
+ /* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@@ -412,6 +440,9 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
+ IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90,
+
+ /* set 3 */
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 12333167ea23..f4c5a4d73206 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -105,6 +105,8 @@ struct iwl_ucode_capabilities {
u32 n_scan_channels;
u32 standard_phy_calibration_size;
u32 flags;
+ u32 error_log_addr;
+ u32 error_log_size;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
};
@@ -223,27 +225,24 @@ struct iwl_fw_dbg {
};
/**
+ * @tlv: the buffer allocation tlv
+ * @is_alloc: indicates if the buffer was already allocated
+ */
+struct iwl_fw_ini_allocation_data {
+ struct iwl_fw_ini_allocation_tlv tlv;
+ u32 is_alloc;
+} __packed;
+
+/**
* struct iwl_fw_ini_active_triggers
* @active: is this trigger active
- * @apply_point: last apply point that updated this trigger
- * @conf: active trigger
- * @conf_ext: second trigger, contains extra regions to dump
+ * @size: allocated memory size of the trigger
+ * @trig: trigger
*/
struct iwl_fw_ini_active_triggers {
bool active;
- enum iwl_fw_ini_apply_point apply_point;
- struct iwl_fw_ini_trigger *conf;
- struct iwl_fw_ini_trigger *conf_ext;
-};
-
-/**
- * struct iwl_fw_ini_active_regs
- * @reg: active region from TLV
- * @apply_point: apply point where it became active
- */
-struct iwl_fw_ini_active_regs {
- struct iwl_fw_ini_region_cfg *reg;
- enum iwl_fw_ini_apply_point apply_point;
+ size_t size;
+ struct iwl_fw_ini_trigger *trig;
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 2efac307909e..7adf4e4e841a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,6 +76,7 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
+ init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 4f7090f88cb0..a5fe1a8ca426 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,6 +73,7 @@ struct iwl_fw_runtime_ops {
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
+ bool (*d3_debug_enable)(void *ctx);
};
#define MAX_NUM_LMAC 2
@@ -90,7 +91,6 @@ struct iwl_fwrt_shared_mem_cfg {
enum iwl_fw_runtime_status {
IWL_FWRT_STATUS_DUMPING = 0,
- IWL_FWRT_STATUS_WAIT_ALIVE,
};
/**
@@ -138,11 +138,13 @@ struct iwl_fw_runtime {
u8 conf;
/* ts of the beginning of a non-collect fw dbg data period */
- unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM - 1];
+ unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM];
u32 *d3_debug_data;
- struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
+ struct iwl_fw_ini_region_cfg *active_regs[IWL_FW_INI_MAX_REGION_ID];
struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
- u32 rt_status;
+ u32 lmac_err_id[MAX_NUM_LMAC];
+ u32 umac_err_id;
+ void *fifo_iter;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
@@ -160,8 +162,20 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
{
+ int i;
+
kfree(fwrt->dump.d3_debug_data);
fwrt->dump.d3_debug_data = NULL;
+
+ for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) {
+ struct iwl_fw_ini_active_triggers *active =
+ &fwrt->dump.active_trigs[i];
+
+ active->active = false;
+ active->size = 0;
+ kfree(active->trig);
+ active->trig = NULL;
+ }
}
void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 91861a9cbe57..f5f87773667b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,6 +89,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
IWL_DEVICE_FAMILY_22560,
+ IWL_DEVICE_FAMILY_AX210,
};
/*
@@ -335,10 +336,6 @@ struct iwl_csr_params {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
- * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
- * (if supported)
- * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
- * next step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section (only DVM)
@@ -383,6 +380,9 @@ struct iwl_csr_params {
* @nvm_type: see &enum iwl_nvm_type
* @d3_debug_data_base_addr: base address where D3 debug data is stored
* @d3_debug_data_length: length of the D3 debug data
+ * @bisr_workaround: BISR hardware workaround (for 22260 series devices)
+ * @min_txq_size: minimum number of slots required in a TX queue
+ * @umac_prph_offset: offset to add to UMAC periphery address
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -392,8 +392,6 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
- const char *fw_name_pre_b_or_c_step;
- const char *fw_name_pre_rf_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
@@ -434,7 +432,8 @@ struct iwl_cfg {
use_tfh:1,
gen2:1,
cdb:1,
- dbgc_supported:1;
+ dbgc_supported:1,
+ bisr_workaround:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -449,37 +448,12 @@ struct iwl_cfg {
u32 extra_phy_cfg_flags;
u32 d3_debug_data_base_addr;
u32 d3_debug_data_length;
+ u32 min_txq_size;
+ u32 umac_prph_offset;
};
-static const struct iwl_csr_params iwl_csr_v1 = {
- .flag_mac_clock_ready = 0,
- .flag_val_mac_access_en = 0,
- .flag_init_done = 2,
- .flag_mac_access_req = 3,
- .flag_sw_reset = 7,
- .flag_master_dis = 8,
- .flag_stop_master = 9,
- .addr_sw_reset = (CSR_BASE + 0x020),
- .mac_addr0_otp = 0x380,
- .mac_addr1_otp = 0x384,
- .mac_addr0_strap = 0x388,
- .mac_addr1_strap = 0x38C
-};
-
-static const struct iwl_csr_params iwl_csr_v2 = {
- .flag_init_done = 6,
- .flag_mac_clock_ready = 20,
- .flag_val_mac_access_en = 20,
- .flag_mac_access_req = 21,
- .flag_master_dis = 28,
- .flag_stop_master = 29,
- .flag_sw_reset = 31,
- .addr_sw_reset = (CSR_BASE + 0x024),
- .mac_addr0_otp = 0x30,
- .mac_addr1_otp = 0x34,
- .mac_addr0_strap = 0x38,
- .mac_addr1_strap = 0x3C
-};
+extern const struct iwl_csr_params iwl_csr_v1;
+extern const struct iwl_csr_params iwl_csr_v2;
/*
* This list declares the config structures for all devices.
@@ -551,38 +525,53 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260_2ac_160_cfg;
extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
+extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
+extern const struct iwl_cfg killer1650x_2ax_cfg;
+extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
+extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
+extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
+extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
+extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index caa5806acd81..aea6d03e545a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -180,6 +180,7 @@
/* Bits for CSR_HW_IF_CONFIG_REG */
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080)
#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
@@ -325,12 +326,16 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
+#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
+#define CSR_HW_REV_TYPE_SO (0x0000370)
+#define CSR_HW_REV_TYPE_TY (0x0000420)
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105100)
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
+#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
@@ -592,6 +597,7 @@ enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_IML = BIT(2),
MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 43d815cb3ce9..5798f434f68f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -71,6 +71,7 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
u32 apply_point = le32_to_cpu(header->apply_point);
int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
+ int offset_size = copy_size;
if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM,
"Invalid apply point id %d\n", apply_point))
@@ -81,17 +82,25 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
else
data = &trans->apply_points[apply_point];
+ /* add room for is_alloc field in &iwl_fw_ini_allocation_data struct */
+ if (le32_to_cpu(tlv->type) == IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) {
+ struct iwl_fw_ini_allocation_data *buf_alloc =
+ (void *)tlv->data;
+
+ offset_size += sizeof(buf_alloc->is_alloc);
+ }
+
/*
* Make sure we still have room to copy this TLV. Offset points to the
* location the last copy ended.
*/
- if (WARN_ONCE(data->offset + copy_size > data->size,
+ if (WARN_ONCE(data->offset + offset_size > data->size,
"Not enough memory for apply point %d\n",
apply_point))
return;
memcpy(data->data + data->offset, (void *)tlv, copy_size);
- data->offset += copy_size;
+ data->offset += offset_size;
}
void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
@@ -129,6 +138,16 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM))
continue;
+ /* add room for is_alloc field in &iwl_fw_ini_allocation_data
+ * struct
+ */
+ if (tlv_type == IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) {
+ struct iwl_fw_ini_allocation_data *buf_alloc =
+ (void *)tlv->data;
+
+ size[apply] += sizeof(buf_alloc->is_alloc);
+ }
+
size[apply] += sizeof(*tlv) + tlv_len;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index a2af68a0d34b..655ff5694560 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -159,7 +160,7 @@ do { \
/* 0x000F0000 - 0x00010000 */
#define IWL_DL_FW 0x00010000
#define IWL_DL_RF_KILL 0x00020000
-#define IWL_DL_FW_ERRORS 0x00040000
+#define IWL_DL_TPT 0x00040000
/* 0x00F00000 - 0x00100000 */
#define IWL_DL_RATE 0x00100000
#define IWL_DL_CALIB 0x00200000
@@ -193,7 +194,6 @@ do { \
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
-#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
@@ -215,6 +215,7 @@ do { \
#define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index bf1be985f36b..689a65b11cc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -8,6 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,6 +77,7 @@
#include "iwl-dbg-tlv.h"
#include "iwl-config.h"
#include "iwl-modparams.h"
+#include "fw/api/alive.h"
/******************************************************************************
*
@@ -210,18 +213,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const struct iwl_cfg *cfg = drv->trans->cfg;
char tag[8];
- const char *fw_pre_name;
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
- CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
- fw_pre_name = cfg->fw_name_pre_b_or_c_step;
- else if (drv->trans->cfg->integrated &&
- CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
- cfg->fw_name_pre_rf_next_step)
- fw_pre_name = cfg->fw_name_pre_rf_next_step;
- else
- fw_pre_name = cfg->fw_name_pre;
+ (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP &&
+ CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) {
+ IWL_ERR(drv,
+ "Only HW steps B and C are currently supported (0x%0x)\n",
+ drv->trans->hw_rev);
+ return -EINVAL;
+ }
if (first) {
drv->fw_index = cfg->ucode_api_max;
@@ -235,15 +235,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
IWL_ERR(drv, "no suitable firmware found!\n");
if (cfg->ucode_api_min == cfg->ucode_api_max) {
- IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
+ IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre,
cfg->ucode_api_max);
} else {
IWL_ERR(drv, "minimum version required: %s%d\n",
- fw_pre_name,
- cfg->ucode_api_min);
+ cfg->fw_name_pre, cfg->ucode_api_min);
IWL_ERR(drv, "maximum version supported: %s%d\n",
- fw_pre_name,
- cfg->ucode_api_max);
+ cfg->fw_name_pre, cfg->ucode_api_max);
}
IWL_ERR(drv,
@@ -252,7 +250,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
}
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
- fw_pre_name, tag);
+ cfg->fw_name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
@@ -593,6 +591,8 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
return 0;
}
+#define FW_ADDR_CACHE_CONTROL 0xC0000000
+
static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces,
@@ -1090,6 +1090,52 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -ENOMEM;
break;
}
+ case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
+ struct {
+ __le32 buf_addr;
+ __le32 buf_size;
+ } *recov_info = (void *)tlv_data;
+
+ if (tlv_len != sizeof(*recov_info))
+ goto invalid_tlv_len;
+ capa->error_log_addr =
+ le32_to_cpu(recov_info->buf_addr);
+ capa->error_log_size =
+ le32_to_cpu(recov_info->buf_size);
+ }
+ break;
+ case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
+ struct iwl_umac_debug_addrs *dbg_ptrs =
+ (void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs))
+ goto invalid_tlv_len;
+ if (drv->trans->cfg->device_family <
+ IWL_DEVICE_FAMILY_22000)
+ break;
+ drv->trans->umac_error_event_table =
+ le32_to_cpu(dbg_ptrs->error_info_addr) &
+ ~FW_ADDR_CACHE_CONTROL;
+ drv->trans->error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_UMAC;
+ break;
+ }
+ case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
+ struct iwl_lmac_debug_addrs *dbg_ptrs =
+ (void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs))
+ goto invalid_tlv_len;
+ if (drv->trans->cfg->device_family <
+ IWL_DEVICE_FAMILY_22000)
+ break;
+ drv->trans->lmac_error_event_table[0] =
+ le32_to_cpu(dbg_ptrs->error_event_table_ptr) &
+ ~FW_ADDR_CACHE_CONTROL;
+ drv->trans->error_event_table_tlv_status |=
+ IWL_ERROR_EVENT_TABLE_LMAC1;
+ break;
+ }
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:
case IWL_UCODE_TLV_TYPE_REGIONS:
@@ -1207,11 +1253,6 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
#ifdef CONFIG_IWLWIFI_DEBUGFS
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
drv->dbgfs_drv);
- if (!drv->dbgfs_op_mode) {
- IWL_ERR(drv,
- "failed to create opmode debugfs directory\n");
- return op_mode;
- }
dbgfs_dir = drv->dbgfs_op_mode;
#endif
@@ -1267,8 +1308,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
- /* dump all fw memory areas by default except d3 debug data */
- fw->dbg.dump_mask = 0xfffdffff;
+ /* dump all fw memory areas by default */
+ fw->dbg.dump_mask = 0xffffffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@@ -1574,20 +1615,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
iwl_dbgfs_root);
- if (!drv->dbgfs_drv) {
- IWL_ERR(drv, "failed to create debugfs directory\n");
- ret = -ENOMEM;
- goto err_free_tlv;
- }
-
/* Create transport layer debugfs dir */
drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
-
- if (!drv->trans->dbgfs_dir) {
- IWL_ERR(drv, "failed to create transport debugfs directory\n");
- ret = -ENOMEM;
- goto err_free_dbgfs;
- }
#endif
ret = iwl_request_firmware(drv, true);
@@ -1600,9 +1629,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
-err_free_dbgfs:
debugfs_remove_recursive(drv->dbgfs_drv);
-err_free_tlv:
iwl_fw_dbg_free(drv->trans);
#endif
kfree(drv);
@@ -1713,9 +1740,6 @@ static int __init iwl_drv_init(void)
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the root of iwlwifi debugfs subsystem. */
iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
-
- if (!iwl_dbgfs_root)
- return -EFAULT;
#endif
return iwl_pci_register_driver();
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 75940ac406b9..04338c3a6205 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -850,8 +850,7 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
if (WARN_ON(!cfg || !cfg->eeprom_params))
return NULL;
- data = kzalloc(sizeof(*data) +
- sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+ data = kzalloc(struct_size(data, channels, IWL_NUM_CHANNELS),
GFP_KERNEL);
if (!data)
return NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
index a6db6a814257..82e87192119e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -193,34 +193,25 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
{
int ret;
- /* Enable 40MHz radio clock */
- iwl_write32(trans, CSR_GP_CNTRL,
- iwl_read32(trans, CSR_GP_CNTRL) |
- BIT(trans->cfg->csr->flag_init_done));
-
- /* wait for clock to be ready */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- 25000);
- if (ret < 0) {
- IWL_ERR(trans, "Time out access OTP\n");
- } else {
- iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- /*
- * CSR auto clock gate disable bit -
- * this is only applicable for HW with OTP shadow RAM
- */
- if (trans->cfg->base_params->shadow_ram_support)
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- }
- return ret;
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
+ return ret;
+
+ iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+
+ /*
+ * CSR auto clock gate disable bit -
+ * this is only applicable for HW with OTP shadow RAM
+ */
+ if (trans->cfg->base_params->shadow_ram_support)
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
+ return 0;
}
static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 4f10914f6048..a704e25af810 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -1,9 +1,13 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- *
- * Portions of this file are derived from the ipw3945 project.
+ * Copyright(C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -15,12 +19,45 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#include <linux/delay.h>
#include <linux/device.h>
@@ -33,6 +70,36 @@
#include "iwl-prph.h"
#include "iwl-fh.h"
+const struct iwl_csr_params iwl_csr_v1 = {
+ .flag_mac_clock_ready = 0,
+ .flag_val_mac_access_en = 0,
+ .flag_init_done = 2,
+ .flag_mac_access_req = 3,
+ .flag_sw_reset = 7,
+ .flag_master_dis = 8,
+ .flag_stop_master = 9,
+ .addr_sw_reset = CSR_BASE + 0x020,
+ .mac_addr0_otp = 0x380,
+ .mac_addr1_otp = 0x384,
+ .mac_addr0_strap = 0x388,
+ .mac_addr1_strap = 0x38C
+};
+
+const struct iwl_csr_params iwl_csr_v2 = {
+ .flag_init_done = 6,
+ .flag_mac_clock_ready = 20,
+ .flag_val_mac_access_en = 20,
+ .flag_mac_access_req = 21,
+ .flag_master_dis = 28,
+ .flag_stop_master = 29,
+ .flag_sw_reset = 31,
+ .addr_sw_reset = CSR_BASE + 0x024,
+ .mac_addr0_otp = 0x30,
+ .mac_addr1_otp = 0x34,
+ .mac_addr0_strap = 0x38,
+ .mac_addr1_strap = 0x3C
+};
+
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
@@ -240,9 +307,12 @@ void iwl_force_nmi(struct iwl_trans *trans)
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000)
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_DRV);
+ else if (trans->cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
+ UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK);
else
- iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER,
- UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK);
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_NMI_BIT);
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
@@ -421,3 +491,43 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
return 0;
}
+
+int iwl_finish_nic_init(struct iwl_trans *trans)
+{
+ int err;
+
+ if (trans->cfg->bisr_workaround) {
+ /* ensure the TOP FSM isn't still in previous reset */
+ mdelay(2);
+ }
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_init_done));
+
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ err = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ 25000);
+ if (err < 0)
+ IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
+
+ if (trans->cfg->bisr_workaround) {
+ /* ensure BISR shift has finished */
+ udelay(200);
+ }
+
+ return err < 0 ? err : 0;
+}
+IWL_EXPORT_SYMBOL(iwl_finish_nic_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 38085850a2d3..920e2146ea3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,8 +1,11 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
*
- * Portions of this file are derived from the ipw3945 project.
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -14,14 +17,44 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright (C) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
-
#ifndef __iwl_io_h__
#define __iwl_io_h__
@@ -66,7 +99,48 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_force_nmi(struct iwl_trans *trans);
+int iwl_finish_nic_init(struct iwl_trans *trans);
+
/* Error handling */
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
+/*
+ * UMAC periphery address space changed from 0xA00000 to 0xD00000 starting from
+ * device family AX200. So peripheries used in families above and below AX200
+ * should go through iwl_..._umac_..._prph.
+ */
+static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
+{
+ return ofs + trans->cfg->umac_prph_offset;
+}
+
+static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_read_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset);
+}
+
+static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_read_prph(trans, ofs + trans->cfg->umac_prph_offset);
+}
+
+static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
+ u32 val)
+{
+ iwl_write_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset, val);
+}
+
+static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
+ u32 val)
+{
+ iwl_write_prph(trans, ofs + trans->cfg->umac_prph_offset, val);
+}
+
+static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ return iwl_poll_prph_bit(trans, addr + trans->cfg->umac_prph_offset,
+ bits, mask, timeout);
+}
+
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 73b1c46f1158..0cae2ef9b9df 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -152,4 +152,22 @@ struct iwl_mod_params {
bool enable_ini;
};
+static inline bool iwl_enable_rx_ampdu(void)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(void)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* enabled by default */
+ return true;
+}
+
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index d9afedc3d1d9..87d6de7efdd2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -8,7 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -463,6 +463,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
}
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
+
+ vht_cap->vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
@@ -479,7 +482,6 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
- IEEE80211_HE_MAC_CAP2_MU_CASCADING |
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
@@ -490,7 +492,9 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.mac_cap_info[5] =
IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 |
IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 |
- IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
@@ -498,18 +502,13 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
.phy_cap_info[2] =
- IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -517,16 +516,8 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
.phy_cap_info[5] =
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
- IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK,
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
- IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
- IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
- IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
@@ -537,11 +528,12 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ,
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
},
/*
* Set default Tx/Rx HE MCS NSS Support field.
@@ -569,35 +561,32 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE |
- IEEE80211_HE_MAC_CAP0_TWT_RES,
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_BSR |
- IEEE80211_HE_MAC_CAP2_MU_CASCADING |
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
.mac_cap_info[4] =
IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .mac_cap_info[5] =
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
.phy_cap_info[1] =
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
.phy_cap_info[2] =
- IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -605,12 +594,8 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
.phy_cap_info[5] =
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
- IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK,
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
@@ -620,10 +605,11 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ,
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
.phy_cap_info[9] =
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
},
/*
* Set default Tx/Rx HE MCS NSS Support field.
@@ -947,15 +933,13 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const __le16 *ch_section;
if (cfg->nvm_type != IWL_NVM_EXT)
- data = kzalloc(sizeof(*data) +
- sizeof(struct ieee80211_channel) *
- IWL_NVM_NUM_CHANNELS,
- GFP_KERNEL);
+ data = kzalloc(struct_size(data, channels,
+ IWL_NVM_NUM_CHANNELS),
+ GFP_KERNEL);
else
- data = kzalloc(sizeof(*data) +
- sizeof(struct ieee80211_channel) *
- IWL_NVM_NUM_CHANNELS_EXT,
- GFP_KERNEL);
+ data = kzalloc(struct_size(data, channels,
+ IWL_NVM_NUM_CHANNELS_EXT),
+ GFP_KERNEL);
if (!data)
return NULL;
@@ -1103,12 +1087,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
- if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
- return ERR_PTR(-EINVAL);
-
if (WARN_ON(num_of_ch > max_num_ch))
num_of_ch = max_num_ch;
+ if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
+ return ERR_PTR(-EINVAL);
+
IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
num_of_ch);
@@ -1196,14 +1180,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd_to_copy = sizeof(struct ieee80211_regdomain) +
valid_rules * sizeof(struct ieee80211_reg_rule);
- copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
+ copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
if (!copy_rd) {
copy_rd = ERR_PTR(-ENOMEM);
goto out;
}
- memcpy(copy_rd, regd, regd_to_copy);
-
out:
kfree(regdb_ptrs);
kfree(regd);
@@ -1444,9 +1426,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
if (empty_otp)
IWL_INFO(trans, "OTP is empty\n");
- nvm = kzalloc(sizeof(*nvm) +
- sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
- GFP_KERNEL);
+ nvm = kzalloc(struct_size(nvm, channels, IWL_NUM_CHANNELS), GFP_KERNEL);
if (!nvm) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 9d89b7d7f9fa..1af9f9e1ecd4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -8,7 +8,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -358,12 +358,12 @@
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
-#define MON_BUFF_BASE_ADDR (0xa03c3c)
+#define MON_BUFF_BASE_ADDR (0xa03c1c)
#define MON_BUFF_END_ADDR (0xa03c40)
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
/* FW monitor family 8000 and on */
-#define MON_BUFF_BASE_ADDR_VER2 (0xa03c3c)
+#define MON_BUFF_BASE_ADDR_VER2 (0xa03c1c)
#define MON_BUFF_END_ADDR_VER2 (0xa03c20)
#define MON_BUFF_WRPTR_VER2 (0xa03c24)
#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28)
@@ -433,4 +433,7 @@ enum {
#define HPM_DEBUG 0xA03440
#define PERSISTENCE_BIT BIT(12)
#define PREG_WFPM_ACCESS BIT(12)
+
+#define UREG_DOORBELL_TO_ISR6 0xA05C04
+#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0)
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index a7009cd4232d..bbebbf3efd57 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -8,6 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -230,6 +232,12 @@ enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_DUP = BIT(1),
};
+enum iwl_error_event_table_status {
+ IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
+ IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
+ IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
+};
+
/**
* struct iwl_host_cmd - Host command to the uCode
*
@@ -330,6 +338,7 @@ enum iwl_d3_status {
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
+ * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -342,6 +351,7 @@ enum iwl_trans_status {
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
+ STATUS_FW_WAIT_DUMP,
};
static inline int
@@ -684,6 +694,9 @@ enum iwl_plat_pm_mode {
*/
#define IWL_TRANS_IDLE_TIMEOUT 2000
+/* Max time to wait for nmi interrupt */
+#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
+
/**
* struct iwl_dram_data
* @physical: page phy pointer
@@ -697,6 +710,20 @@ struct iwl_dram_data {
};
/**
+ * struct iwl_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @fw_cnt: total number of items in array
+ * @paging: paging dram data
+ * @paging_cnt: total number of items in array
+ */
+struct iwl_self_init_dram {
+ struct iwl_dram_data *fw;
+ int fw_cnt;
+ struct iwl_dram_data *paging;
+ int paging_cnt;
+};
+
+/**
* struct iwl_trans - transport common data
*
* @ops - pointer to iwl_trans_ops
@@ -738,6 +765,10 @@ struct iwl_dram_data {
* mode is set during the initialization phase and is not
* supposed to change during runtime.
* @dbg_rec_on: true iff there is a fw debug recording currently active
+ * @lmac_error_event_table: addrs of lmacs error tables
+ * @umac_error_event_table: addr of umac error table
+ * @error_event_table_tlv_status: bitmap that indicates what error table
+ * pointers was recevied via TLV. use enum &iwl_error_event_table_status
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -790,12 +821,18 @@ struct iwl_trans {
u8 dbg_n_dest_reg;
int num_blocks;
struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
+ struct iwl_self_init_dram init_dram;
enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode;
bool suspending;
bool dbg_rec_on;
+ u32 lmac_error_event_table[2];
+ u32 umac_error_event_table;
+ unsigned int error_event_table_tlv_status;
+ wait_queue_head_t fw_halt_waitq;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -1202,6 +1239,10 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
iwl_op_mode_nic_error(trans->op_mode);
+
+ if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
+ wake_up(&trans->fw_halt_waitq);
+
}
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 9ffd21918b5a..dd268c4bd371 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -5,9 +5,9 @@ iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
+iwlmvm-y += ftm-responder.o ftm-initiator.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
-iwlmvm-y += tof.o
iwlmvm-$(CONFIG_PM) += d3.o
-ccflags-y += -I$(src)/../
+ccflags-y += -I $(srctree)/$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 730e37744dc0..3d2abbc5c76c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -241,7 +241,6 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
struct iwl_mvm_sta *mvmsta;
u32 value;
- int ret;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
@@ -262,10 +261,8 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
cmd.reduced_txp = cpu_to_le32(value);
mvmsta->bt_reduced_txpower = enable;
- ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC,
- sizeof(cmd), &cmd);
-
- return ret;
+ return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP,
+ CMD_ASYNC, sizeof(cmd), &cmd);
}
struct iwl_bt_iterator_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index d96ada3c06fc..dff14f1ec55f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -8,6 +8,7 @@
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +65,7 @@
#define __MVM_CONSTANTS_H
#include <linux/ieee80211.h>
+#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
@@ -114,6 +117,7 @@
#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */
#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */
#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM
+#define IWL_MVM_NON_TRANSMITTING_AP 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
@@ -145,5 +149,8 @@
#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
#define IWL_MVM_ENABLE_EBS 1
+#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
+#define IWL_MVM_FTM_INITIATOR_DYNACK true
+#define IWL_MVM_D3_DEBUG false
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 01b5338201d6..808bc6f363d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1899,7 +1899,7 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- u32 base = mvm->error_event_table[0];
+ u32 base = mvm->trans->lmac_error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
@@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private;
- ieee80211_stop_queues(mvm->hw);
synchronize_net();
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
@@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
rtnl_unlock();
if (err > 0)
err = -EINVAL;
- if (err) {
- ieee80211_wake_queues(mvm->hw);
+ if (err)
return err;
- }
+
mvm->d3_test_active = true;
mvm->keep_vif = NULL;
return 0;
@@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
- ieee80211_wake_queues(mvm->hw);
-
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 33b0af24a537..2453ceabf00d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -60,7 +60,6 @@
*
*****************************************************************************/
#include "mvm.h"
-#include "fw/api/tof.h"
#include "debugfs.h"
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -523,753 +522,30 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = -EINVAL;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("tof_disabled=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.tof_disabled = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.one_sided_disabled = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("is_debug_mode=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.is_debug_mode = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("is_buf=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.is_buf_required = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- ret = iwl_mvm_tof_config_cmd(mvm);
- goto out;
- }
- }
-
-out:
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_config_cmd *cmd;
-
- cmd = &mvm->tof_data.tof_cfg;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
- cmd->tof_disabled);
- pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
- cmd->one_sided_disabled);
- pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
- cmd->is_debug_mode);
- pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
- cmd->is_buf_required);
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("burst_period=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (!ret)
- mvm->tof_data.responder_cfg.burst_period =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.min_delta_ftm = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("burst_duration=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.burst_duration = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.num_of_burst_exp = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("abort_responder=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.abort_responder = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("get_ch_est=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.get_ch_est = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.recv_sta_req_params = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("channel_num=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.channel_num = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("bandwidth=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.bandwidth = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("rate=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.rate = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("bssid=", buf);
- if (data) {
- u8 *mac = mvm->tof_data.responder_cfg.bssid;
-
- if (!mac_pton(data, mac)) {
- ret = -EINVAL;
- goto out;
- }
- }
-
- data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("toa_offset=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.toa_offset =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("center_freq=", buf);
- if (data) {
- struct iwl_tof_responder_config_cmd *cmd =
- &mvm->tof_data.responder_cfg;
-
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- enum nl80211_band band = (cmd->channel_num <= 14) ?
- NL80211_BAND_2GHZ :
- NL80211_BAND_5GHZ;
- struct ieee80211_channel chn = {
- .band = band,
- .center_freq = ieee80211_channel_to_frequency(
- cmd->channel_num, band),
- };
- struct cfg80211_chan_def chandef = {
- .chan = &chn,
- .center_freq1 =
- ieee80211_channel_to_frequency(value,
- band),
- };
-
- cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
- }
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.ftm_per_burst = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("asap_mode=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.asap_mode = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- ret = iwl_mvm_tof_responder_cmd(mvm, vif);
- goto out;
- }
- }
-
-out:
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_responder_config_cmd *cmd;
-
- cmd = &mvm->tof_data.responder_cfg;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
- le16_to_cpu(cmd->burst_period));
- pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
- cmd->burst_duration);
- pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
- cmd->bandwidth);
- pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
- cmd->channel_num);
- pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
- cmd->ctrl_ch_position);
- pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
- cmd->bssid);
- pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
- cmd->min_delta_ftm);
- pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
- cmd->num_of_burst_exp);
- pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
- pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
- cmd->abort_responder);
- pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
- cmd->get_ch_est);
- pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
- cmd->recv_sta_req_params);
- pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
- cmd->ftm_per_burst);
- pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
- cmd->ftm_resp_ts_avail);
- pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
- cmd->asap_mode);
- pos += scnprintf(buf + pos, bufsz - pos,
- "tsf_timer_offset_msecs = %d\n",
- le16_to_cpu(cmd->tsf_timer_offset_msecs));
- pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
- le16_to_cpu(cmd->toa_offset));
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("request_id=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.request_id = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("initiator=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.initiator = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.one_sided_los_disable = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("req_timeout=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.req_timeout = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("report_policy=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.report_policy = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("macaddr_random=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.macaddr_random = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("num_of_ap=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.num_of_ap = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("macaddr_template=", buf);
- if (data) {
- u8 mac[ETH_ALEN];
-
- if (!mac_pton(data, mac)) {
- ret = -EINVAL;
- goto out;
- }
- memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("macaddr_mask=", buf);
- if (data) {
- u8 mac[ETH_ALEN];
-
- if (!mac_pton(data, mac)) {
- ret = -EINVAL;
- goto out;
- }
- memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ap=", buf);
- if (data) {
- struct iwl_tof_range_req_ap_entry ap = {};
- int size = sizeof(struct iwl_tof_range_req_ap_entry);
- u16 burst_period;
- u8 *mac = ap.bssid;
- unsigned int i;
-
- if (sscanf(data, "%u %hhd %hhd %hhd"
- "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
- "%hhd %hhd %hd"
- "%hhd %hhd %d"
- "%hhx %hhd %hhd %hhd",
- &i, &ap.channel_num, &ap.bandwidth,
- &ap.ctrl_ch_position,
- mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
- &ap.measure_type, &ap.num_of_bursts,
- &burst_period,
- &ap.samples_per_burst, &ap.retries_per_sample,
- &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
- &ap.enable_dyn_ack, &ap.rssi) != 20) {
- ret = -EINVAL;
- goto out;
- }
- if (i >= IWL_MVM_TOF_MAX_APS) {
- IWL_ERR(mvm, "Invalid AP index %d\n", i);
- ret = -EINVAL;
- goto out;
- }
-
- ap.burst_period = cpu_to_le16(burst_period);
-
- memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_range_request=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value)
- ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
- goto out;
- }
-
- ret = -EINVAL;
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[512];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_range_req_cmd *cmd;
- int i;
-
- cmd = &mvm->tof_data.range_req;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
- cmd->request_id);
- pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
- cmd->initiator);
- pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
- cmd->one_sided_los_disable);
- pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
- cmd->req_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
- cmd->report_policy);
- pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
- cmd->macaddr_random);
- pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
- cmd->macaddr_template);
- pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
- cmd->macaddr_mask);
- pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
- cmd->num_of_ap);
- for (i = 0; i < cmd->num_of_ap; i++) {
- struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "ap %.2d: channel_num=%hhd bw=%hhd"
- " control=%hhd bssid=%pM type=%hhd"
- " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
- " retries=%hhd tsf_delta=%d"
- " tsf_delta_direction=%hhd location_req=0x%hhx "
- " asap=%hhd enable=%hhd rssi=%hhd\n",
- i, ap->channel_num, ap->bandwidth,
- ap->ctrl_ch_position, ap->bssid,
- ap->measure_type, ap->num_of_bursts,
- ap->burst_period, ap->samples_per_burst,
- ap->retries_per_sample, ap->tsf_delta,
- ap->tsf_delta_direction,
- ap->location_req, ap->asap_mode,
- ap->enable_dyn_ack, ap->rssi);
- }
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.min_delta_ftm = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
- value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
- value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
- value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value)
- ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
- goto out;
- }
-
- ret = -EINVAL;
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_range_req_ext_cmd *cmd;
-
- cmd = &mvm->tof_data.range_req_ext;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "tsf_timer_offset_msec = %hd\n",
- cmd->tsf_timer_offset_msec);
- pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
- cmd->min_delta_ftm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ftm_format_and_bw20M = %hhd\n",
- cmd->ftm_format_and_bw20M);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ftm_format_and_bw40M = %hhd\n",
- cmd->ftm_format_and_bw40M);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ftm_format_and_bw80M = %hhd\n",
- cmd->ftm_format_and_bw80M);
-
- mutex_unlock(&mvm->mutex);
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int abort_id, ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("abort_id=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.last_abort_id = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_range_abort=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- abort_id = mvm->tof_data.last_abort_id;
- ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
- goto out;
- }
- }
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[32];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- int last_abort_id;
-
- mutex_lock(&mvm->mutex);
- last_abort_id = mvm->tof_data.last_abort_id;
- mutex_unlock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
- last_abort_id);
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
{
- struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
- char *buf;
- int pos = 0;
- const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
- struct iwl_tof_range_rsp_ntfy *cmd;
- int i, ret;
+ u8 value;
+ int ret;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
mutex_lock(&mvm->mutex);
- cmd = &mvm->tof_data.range_resp;
-
- pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
- cmd->request_id);
- pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
- cmd->request_status);
- pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
- cmd->last_in_batch);
- pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
- cmd->num_of_aps);
- for (i = 0; i < cmd->num_of_aps; i++) {
- struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
- " rtt=%d rtt_var=%d rtt_spread=%d"
- " rssi=%hhd rssi_spread=%hhd"
- " range=%d range_var=%d"
- " time_stamp=%d\n",
- i, ap->bssid, ap->measure_status,
- ap->measure_bw,
- ap->rtt, ap->rtt_variance, ap->rtt_spread,
- ap->rssi, ap->rssi_spread, ap->range,
- ap->range_variance, ap->timestamp);
- }
+ iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
mutex_unlock(&mvm->mutex);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return count;
}
-static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
- size_t count, loff_t *ppos)
+static ssize_t
+iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
@@ -1279,13 +555,24 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
ret = kstrtou8(buf, 0, &value);
if (ret)
return ret;
- if (value > 1)
+
+ if (value > NUM_LOW_LATENCY_FORCE)
return -EINVAL;
mutex_lock(&mvm->mutex);
- iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
+ if (value == LOW_LATENCY_FORCE_UNSET) {
+ iwl_mvm_update_low_latency(mvm, vif, false,
+ LOW_LATENCY_DEBUGFS_FORCE);
+ iwl_mvm_update_low_latency(mvm, vif, false,
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
+ } else {
+ iwl_mvm_update_low_latency(mvm, vif,
+ value == LOW_LATENCY_FORCE_ON,
+ LOW_LATENCY_DEBUGFS_FORCE);
+ iwl_mvm_update_low_latency(mvm, vif, true,
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
+ }
mutex_unlock(&mvm->mutex);
-
return count;
}
@@ -1295,15 +582,25 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[30] = {};
+ char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n"
+ "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n";
+
+ /*
+ * all values in format are boolean so the size of format is enough
+ * for holding the result string
+ */
+ char buf[sizeof(format) + 1] = {};
int len;
- len = scnprintf(buf, sizeof(buf) - 1,
- "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
+ len = scnprintf(buf, sizeof(buf) - 1, format,
!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
!!(mvmvif->low_latency & LOW_LATENCY_VCMD),
- !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
+ !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE),
+ !!(mvmvif->low_latency &
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE),
+ !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE),
+ !!(mvmvif->low_latency_actual));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1456,14 +753,9 @@ MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
-MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
@@ -1497,6 +789,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
@@ -1506,24 +799,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif == mvm->bf_allowed_vif)
MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
- !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
- if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
- MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
- mvmvif->dbgfs_dir, 0600);
-
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
- 0400);
- }
-
/*
* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 52c361a6124c..776b24f54200 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,6 +69,7 @@
#include "sta.h"
#include "iwl-io.h"
#include "debugfs.h"
+#include "iwl-modparams.h"
#include "fw/error-dump.h"
static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
@@ -1187,64 +1188,128 @@ out:
return ret ?: count;
}
-static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
{
- struct iwl_mvm *mvm = file->private_data;
- int conf;
- char buf[8];
- const size_t bufsz = sizeof(buf);
- int pos = 0;
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
+ struct sk_buff *beacon;
+ struct ieee80211_tx_info *info;
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ u8 rate;
+ u16 flags;
+ int i;
+
+ len /= 2;
+
+ /* Element len should be represented by u8 */
+ if (len >= U8_MAX)
+ return -EINVAL;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (!iwl_mvm_has_new_tx_api(mvm) &&
+ !fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE))
+ return -EINVAL;
+
+ rcu_read_lock();
+
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, true);
+ if (!vif)
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ break;
+ }
+
+ if (i == NUM_MAC_INDEX_DRIVER || !vif)
+ goto out_err;
+
+ mvm->hw->extra_beacon_tailroom = len;
+
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+ if (!beacon)
+ goto out_err;
+
+ if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) {
+ dev_kfree_skb(beacon);
+ goto out_err;
+ }
+
+ mvm->beacon_inject_active = true;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ info = IEEE80211_SKB_CB(beacon);
+ rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+ flags = iwl_mvm_mac80211_idx_to_hwrate(rate);
+
+ if (rate == IWL_FIRST_CCK_RATE)
+ flags |= IWL_MAC_BEACON_CCK;
+
+ beacon_cmd.flags = cpu_to_le16(flags);
+ beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
mutex_lock(&mvm->mutex);
- conf = mvm->fwrt.dump.conf;
+ iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
mutex_unlock(&mvm->mutex);
- pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
+ dev_kfree_skb(beacon);
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ rcu_read_unlock();
+ return 0;
+
+out_err:
+ rcu_read_unlock();
+ return -EINVAL;
}
-/*
- * Enable / Disable continuous recording.
- * Cause the FW to start continuous recording, by sending the relevant hcmd.
- * Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING.
- * Disable: for 0 as input, DISABLE_CONT_RECORDING.
- */
-static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
+static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
{
- struct iwl_trans *trans = mvm->trans;
- const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
- struct iwl_continuous_record_cmd cont_rec = {};
- int ret, rec_mode;
+ int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count);
- if (!iwl_mvm_firmware_running(mvm))
- return -EIO;
-
- if (!dest)
- return -EOPNOTSUPP;
+ mvm->hw->extra_beacon_tailroom = 0;
+ return ret ?: count;
+}
- if (dest->monitor_mode != SMEM_MODE ||
- trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
- return -EOPNOTSUPP;
+static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm,
+ char *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0);
- ret = kstrtoint(buf, 0, &rec_mode);
- if (ret)
- return ret;
+ mvm->hw->extra_beacon_tailroom = 0;
+ mvm->beacon_inject_active = false;
+ return ret ?: count;
+}
- cont_rec.record_mode.enable_recording = rec_mode ?
- cpu_to_le16(ENABLE_CONT_RECORDING) :
- cpu_to_le16(DISABLE_CONT_RECORDING);
+static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int conf;
+ char buf[8];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0,
- sizeof(cont_rec), &cont_rec);
+ conf = mvm->fwrt.dump.conf;
mutex_unlock(&mvm->mutex);
- return ret ?: count;
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
@@ -1722,11 +1787,36 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
+struct iwl_mvm_sniffer_apply {
+ struct iwl_mvm *mvm;
+ u8 *bssid;
+ u16 aid;
+};
+
+static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm_sniffer_apply *apply = data;
+
+ apply->mvm->cur_aid = cpu_to_le16(apply->aid);
+ memcpy(apply->mvm->cur_bssid, apply->bssid,
+ sizeof(apply->mvm->cur_bssid));
+
+ return true;
+}
+
static ssize_t
iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
+ struct iwl_notification_wait wait;
struct iwl_he_monitor_cmd he_mon_cmd = {};
+ struct iwl_mvm_sniffer_apply apply = {
+ .mvm = mvm,
+ };
+ u16 wait_cmds[] = {
+ iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
+ };
u32 aid;
int ret;
@@ -1742,16 +1832,54 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
he_mon_cmd.aid = cpu_to_le16(aid);
+ apply.aid = aid;
+ apply.bssid = (void *)he_mon_cmd.bssid;
+
mutex_lock(&mvm->mutex);
+
+ /*
+ * Use the notification waiter to get our function triggered
+ * in sequence with other RX. This ensures that frames we get
+ * on the RX queue _before_ the new configuration is applied
+ * still have mvm->cur_aid pointing to the old AID, and that
+ * frames on the RX queue _after_ the firmware processed the
+ * new configuration (and sent the response, synchronously)
+ * get mvm->cur_aid correctly set to the new AID.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait,
+ wait_cmds, ARRAY_SIZE(wait_cmds),
+ iwl_mvm_sniffer_apply, &apply);
+
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
DATA_PATH_GROUP, 0), 0,
sizeof(he_mon_cmd), &he_mon_cmd);
+
+ /* no need to really wait, we already did anyway */
+ iwl_remove_notification(&mvm->notif_wait, &wait);
+
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t
+iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ u8 buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf),
+ "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
+ le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0],
+ mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3],
+ mvm->cur_bssid[4], mvm->cur_bssid[5]);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1800,11 +1928,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
-MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512);
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
@@ -1820,7 +1949,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
-MVM_DEBUGFS_WRITE_FILE_OPS(he_sniffer_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32);
static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2004,13 +2133,14 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200);
#ifdef CONFIG_ACPI
MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
#endif
- MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600);
if (!debugfs_create_bool("enable_scan_iteration_notif",
0600,
@@ -2071,6 +2201,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_blob("nvm_phy_sku", 0400,
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
goto err;
+ if (!debugfs_create_blob("nvm_reg", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_reg_blob))
+ goto err;
debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
new file mode 100644
index 000000000000..e9822a3ec373
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -0,0 +1,654 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/math64.h>
+#include <net/cfg80211.h>
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "constants.h"
+
+struct iwl_mvm_loc_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 lci_len, civic_len;
+ u8 buf[];
+};
+
+static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_loc_entry *e, *t;
+
+ mvm->ftm_initiator.req = NULL;
+ mvm->ftm_initiator.req_wdev = NULL;
+ memset(mvm->ftm_initiator.responses, 0,
+ sizeof(mvm->ftm_initiator.responses));
+ list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
+{
+ struct cfg80211_pmsr_result result = {
+ .status = NL80211_PMSR_STATUS_FAILURE,
+ .final = 1,
+ .host_time = ktime_get_boot_ns(),
+ .type = NL80211_PMSR_TYPE_FTM,
+ };
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->ftm_initiator.req)
+ return;
+
+ for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
+ memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
+ ETH_ALEN);
+ result.ftm.burst_index = mvm->ftm_initiator.responses[i];
+
+ cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req,
+ &result, GFP_KERNEL);
+ }
+
+ cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req, GFP_KERNEL);
+ iwl_mvm_ftm_reset(mvm);
+}
+
+static int
+iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
+{
+ switch (s) {
+ case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
+ return 0;
+ case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
+ return -EBUSY;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+
+static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd_v5 *cmd,
+ struct cfg80211_pmsr_request *req)
+{
+ int i;
+
+ cmd->request_id = req->cookie;
+ cmd->num_of_ap = req->n_peers;
+
+ /* use maximum for "no timeout" or bigger than what we can do */
+ if (!req->timeout || req->timeout > 255 * 100)
+ cmd->req_timeout = 255;
+ else
+ cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
+
+ /*
+ * We treat it always as random, since if not we'll
+ * have filled our local address there instead.
+ */
+ cmd->macaddr_random = 1;
+ memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
+
+ if (vif->bss_conf.assoc)
+ memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
+ else
+ eth_broadcast_addr(cmd->range_req_bssid);
+}
+
+static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd *cmd,
+ struct cfg80211_pmsr_request *req)
+{
+ int i;
+
+ cmd->initiator_flags =
+ cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
+ IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
+ cmd->request_id = req->cookie;
+ cmd->num_of_ap = req->n_peers;
+
+ /*
+ * Use a large value for "no timeout". Don't use the maximum value
+ * because of fw limitations.
+ */
+ if (req->timeout)
+ cmd->req_timeout_ms = cpu_to_le32(req->timeout);
+ else
+ cmd->req_timeout_ms = cpu_to_le32(0xfffff);
+
+ memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
+
+ if (vif->bss_conf.assoc)
+ memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
+ else
+ eth_broadcast_addr(cmd->range_req_bssid);
+
+ /* TODO: fill in tsf_mac_id if needed */
+ cmd->tsf_mac_id = cpu_to_le32(0xff);
+}
+
+static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *bandwidth,
+ u8 *ctrl_ch_position)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+
+ *channel = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *bandwidth = IWL_TOF_BW_20_LEGACY;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *bandwidth = IWL_TOF_BW_20_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *bandwidth = IWL_TOF_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *bandwidth = IWL_TOF_BW_80;
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+ }
+
+ *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
+
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v2 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ target->measure_type = 0; /* regular two-sided FTM */
+ target->retries_per_sample = peer->ftm.ftmr_retries;
+ target->asap_mode = peer->ftm.asap;
+ target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
+
+ if (peer->ftm.request_lci)
+ target->location_req |= IWL_TOF_LOC_LCI;
+ if (peer->ftm.request_civicloc)
+ target->location_req |= IWL_TOF_LOC_CIVIC;
+
+ target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
+
+ return 0;
+}
+
+#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
+ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
+
+static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ target->ftmr_max_retries = peer->ftm.ftmr_retries;
+ target->initiator_ap_flags = cpu_to_le32(0);
+
+ if (peer->ftm.asap)
+ FTM_PUT_FLAG(ASAP);
+
+ if (peer->ftm.request_lci)
+ FTM_PUT_FLAG(LCI_REQUEST);
+
+ if (peer->ftm.request_civicloc)
+ FTM_PUT_FLAG(CIVIC_REQUEST);
+
+ if (IWL_MVM_FTM_INITIATOR_DYNACK)
+ FTM_PUT_FLAG(DYN_ACK);
+
+ if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
+ FTM_PUT_FLAG(ALGO_LR);
+ else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
+ FTM_PUT_FLAG(ALGO_FFT);
+
+ return 0;
+}
+
+int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v5 cmd_v5;
+ struct iwl_tof_range_req_cmd cmd;
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
+ u8 num_of_ap;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+ u32 status = 0;
+ int err, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->ftm_initiator.req)
+ return -EBUSY;
+
+ if (new_api) {
+ iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
+ hcmd.data[0] = &cmd;
+ hcmd.len[0] = sizeof(cmd);
+ num_of_ap = cmd.num_of_ap;
+ } else {
+ iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
+ hcmd.data[0] = &cmd_v5;
+ hcmd.len[0] = sizeof(cmd_v5);
+ num_of_ap = cmd_v5.num_of_ap;
+ }
+
+ for (i = 0; i < num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ if (new_api)
+ err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]);
+ else
+ err = iwl_mvm_ftm_put_target_v2(mvm, peer,
+ &cmd_v5.ap[i]);
+
+ if (err)
+ return err;
+ }
+
+ err = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
+ if (!err && status) {
+ IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
+ status);
+ err = iwl_ftm_range_request_status_to_err(status);
+ }
+
+ if (!err) {
+ mvm->ftm_initiator.req = req;
+ mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
+ }
+
+ return err;
+}
+
+void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_abort_cmd cmd = {
+ .request_id = req->cookie,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (req != mvm->ftm_initiator.req)
+ return;
+
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
+ LOCATION_GROUP, 0),
+ 0, sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "failed to abort FTM process\n");
+}
+
+static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
+ const u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < req->n_peers; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ if (ether_addr_equal_unaligned(peer->addr, addr))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
+{
+ u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
+ u32 curr_gp2, diff;
+ u64 now_from_boot_ns;
+
+ iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns);
+
+ if (curr_gp2 >= gp2_ts)
+ diff = curr_gp2 - gp2_ts;
+ else
+ diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
+
+ return now_from_boot_ns - (u64)diff * 1000;
+}
+
+static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_result *res)
+{
+ struct iwl_mvm_loc_entry *entry;
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
+ if (!ether_addr_equal_unaligned(res->addr, entry->addr))
+ continue;
+
+ if (entry->lci_len) {
+ res->ftm.lci_len = entry->lci_len;
+ res->ftm.lci = entry->buf;
+ }
+
+ if (entry->civic_len) {
+ res->ftm.civicloc_len = entry->civic_len;
+ res->ftm.civicloc = entry->buf + entry->lci_len;
+ }
+
+ /* we found the entry we needed */
+ break;
+ }
+}
+
+static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
+ u8 num_of_aps)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
+ IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
+ request_id, (u8)mvm->ftm_initiator.req->cookie);
+ return -EINVAL;
+ }
+
+ if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
+ IWL_ERR(mvm, "FTM range response invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
+ struct cfg80211_pmsr_result *res)
+{
+ s64 rtt_avg = res->ftm.rtt_avg * 100;
+
+ do_div(rtt_avg, 6666);
+
+ IWL_DEBUG_INFO(mvm, "entry %d\n", index);
+ IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
+ IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
+ IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
+ IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index);
+ IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
+ IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
+ IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread);
+ IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
+ IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
+ IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
+ IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
+}
+
+void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
+ int i;
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
+ u8 num_of_aps, last_in_batch;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->ftm_initiator.req) {
+ IWL_ERR(mvm, "Got FTM response but have no request?\n");
+ return;
+ }
+
+ if (new_api) {
+ if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp->request_id,
+ fw_resp->num_of_aps))
+ return;
+
+ num_of_aps = fw_resp->num_of_aps;
+ last_in_batch = fw_resp->last_report;
+ } else {
+ if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
+ fw_resp_v5->num_of_aps))
+ return;
+
+ num_of_aps = fw_resp_v5->num_of_aps;
+ last_in_batch = fw_resp_v5->last_in_batch;
+ }
+
+ IWL_DEBUG_INFO(mvm, "Range response received\n");
+ IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n",
+ mvm->ftm_initiator.req->cookie, num_of_aps);
+
+ for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
+ struct cfg80211_pmsr_result result = {};
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
+ int peer_idx;
+
+ if (new_api) {
+ fw_ap = &fw_resp->ap[i];
+ result.final = fw_resp->ap[i].last_burst;
+ } else {
+ /* the first part is the same for old and new APIs */
+ fw_ap = (void *)&fw_resp_v5->ap[i];
+ /*
+ * FIXME: the firmware needs to report this, we don't
+ * even know the number of bursts the responder picked
+ * (if we asked it to)
+ */
+ result.final = 0;
+ }
+
+ peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
+ fw_ap->bssid);
+ if (peer_idx < 0) {
+ IWL_WARN(mvm,
+ "Unknown address (%pM, target #%d) in FTM response\n",
+ fw_ap->bssid, i);
+ continue;
+ }
+
+ switch (fw_ap->measure_status) {
+ case IWL_TOF_ENTRY_SUCCESS:
+ result.status = NL80211_PMSR_STATUS_SUCCESS;
+ break;
+ case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
+ result.status = NL80211_PMSR_STATUS_TIMEOUT;
+ break;
+ case IWL_TOF_ENTRY_NO_RESPONSE:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
+ break;
+ case IWL_TOF_ENTRY_REQUEST_REJECTED:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
+ result.ftm.busy_retry_time = fw_ap->refusal_period;
+ break;
+ default:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
+ break;
+ }
+ memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
+ result.host_time = iwl_mvm_ftm_get_host_time(mvm,
+ fw_ap->timestamp);
+ result.type = NL80211_PMSR_TYPE_FTM;
+ result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
+ mvm->ftm_initiator.responses[peer_idx]++;
+ result.ftm.rssi_avg = fw_ap->rssi;
+ result.ftm.rssi_avg_valid = 1;
+ result.ftm.rssi_spread = fw_ap->rssi_spread;
+ result.ftm.rssi_spread_valid = 1;
+ result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
+ result.ftm.rtt_avg_valid = 1;
+ result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
+ result.ftm.rtt_variance_valid = 1;
+ result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
+ result.ftm.rtt_spread_valid = 1;
+
+ iwl_mvm_ftm_get_lci_civic(mvm, &result);
+
+ cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req,
+ &result, GFP_KERNEL);
+
+ iwl_mvm_debug_range_resp(mvm, i, &result);
+ }
+
+ if (last_in_batch) {
+ cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req,
+ GFP_KERNEL);
+ iwl_mvm_ftm_reset(mvm);
+ }
+}
+
+void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
+ size_t len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mvm_loc_entry *entry;
+ const u8 *ies, *lci, *civic, *msr_ie;
+ size_t ies_len, lci_len = 0, civic_len = 0;
+ size_t baselen = IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.ftm);
+ static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
+ static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
+
+ if (len <= baselen)
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ies = mgmt->u.action.u.ftm.variable;
+ ies_len = len - baselen;
+
+ msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
+ &rprt_type_lci, 1, 4);
+ if (msr_ie) {
+ lci = msr_ie + 2;
+ lci_len = msr_ie[1];
+ }
+
+ msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
+ &rprt_type_civic, 1, 4);
+ if (msr_ie) {
+ civic = msr_ie + 2;
+ civic_len = msr_ie[1];
+ }
+
+ entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
+ if (!entry)
+ return;
+
+ memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
+
+ entry->lci_len = lci_len;
+ if (lci_len)
+ memcpy(entry->buf, lci, lci_len);
+
+ entry->civic_len = civic_len;
+ if (civic_len)
+ memcpy(entry->buf + lci_len, civic, civic_len);
+
+ list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
new file mode 100644
index 000000000000..1513b8b4062f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -0,0 +1,244 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/cfg80211.h>
+#include <linux/etherdevice.h>
+#include "mvm.h"
+#include "constants.h"
+
+static int
+iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_tof_responder_config_cmd cmd = {
+ .channel_num = chandef->chan->hw_value,
+ .cmd_valid_fields =
+ cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
+ IWL_TOF_RESPONDER_CMD_VALID_BSSID |
+ IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ cmd.bandwidth = IWL_TOF_BW_20_LEGACY;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ cmd.bandwidth = IWL_TOF_BW_20_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ cmd.bandwidth = IWL_TOF_BW_40;
+ cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ cmd.bandwidth = IWL_TOF_BW_80;
+ cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ memcpy(cmd.bssid, vif->addr, ETH_ALEN);
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
+ LOCATION_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+}
+
+static int
+iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params)
+{
+ struct iwl_tof_responder_dyn_config_cmd cmd = {
+ .lci_len = cpu_to_le32(params->lci_len + 2),
+ .civic_len = cpu_to_le32(params->civicloc_len + 2),
+ };
+ u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
+ LOCATION_GROUP, 0),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ .data[1] = &data,
+ /* .len[1] set later */
+ /* may not be able to DMA from stack */
+ .dataflags[1] = IWL_HCMD_DFL_DUP,
+ };
+ u32 aligned_lci_len = ALIGN(params->lci_len + 2, 4);
+ u32 aligned_civicloc_len = ALIGN(params->civicloc_len + 2, 4);
+ u8 *pos = data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (aligned_lci_len + aligned_civicloc_len > sizeof(data)) {
+ IWL_ERR(mvm, "LCI/civicloc data too big (%zd + %zd)\n",
+ params->lci_len, params->civicloc_len);
+ return -ENOBUFS;
+ }
+
+ pos[0] = WLAN_EID_MEASURE_REPORT;
+ pos[1] = params->lci_len;
+ memcpy(pos + 2, params->lci, params->lci_len);
+
+ pos += aligned_lci_len;
+ pos[0] = WLAN_EID_MEASURE_REPORT;
+ pos[1] = params->civicloc_len;
+ memcpy(pos + 2, params->civicloc, params->civicloc_len);
+
+ hcmd.len[1] = aligned_lci_len + aligned_civicloc_len;
+
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
+
+int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_ftm_responder_params *params;
+ struct ieee80211_chanctx_conf ctx, *pctx;
+ u16 *phy_ctxt_id;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ int ret;
+
+ params = vif->bss_conf.ftmr_params;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!vif->bss_conf.ftm_responder))
+ return -EINVAL;
+
+ if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
+ !mvmvif->ap_ibss_active) {
+ IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+ return -EIO;
+ }
+
+ rcu_read_lock();
+ pctx = rcu_dereference(vif->chanctx_conf);
+ /* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care
+ * about changes in the ctx after releasing the lock because the driver
+ * is still protected by the mutex. */
+ ctx = *pctx;
+ phy_ctxt_id = (u16 *)pctx->drv_priv;
+ rcu_read_unlock();
+
+ phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def,
+ ctx.rx_chains_static,
+ ctx.rx_chains_dynamic);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def);
+ if (ret)
+ return ret;
+
+ if (params)
+ ret = iwl_mvm_ftm_responder_dyn_cfg_cmd(mvm, vif, params);
+
+ return ret;
+}
+
+void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ if (!vif->bss_conf.ftm_responder)
+ return;
+
+ iwl_mvm_ftm_start_responder(mvm, vif);
+}
+
+void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_ftm_responder_stats *resp = (void *)pkt->data;
+ struct cfg80211_ftm_responder_stats *stats = &mvm->ftm_resp_stats;
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (resp->success_ftm == resp->ftm_per_burst)
+ stats->success_num++;
+ else if (resp->success_ftm >= 2)
+ stats->partial_num++;
+ else
+ stats->failed_num++;
+
+ if ((flags & FTM_RESP_STAT_ASAP_REQ) &&
+ (flags & FTM_RESP_STAT_ASAP_RESP))
+ stats->asap_num++;
+
+ if (flags & FTM_RESP_STAT_NON_ASAP_RESP)
+ stats->non_asap_num++;
+
+ stats->total_duration_ms += le32_to_cpu(resp->duration) / USEC_PER_MSEC;
+
+ if (flags & FTM_RESP_STAT_TRIGGER_UNKNOWN)
+ stats->unknown_triggers_num++;
+
+ if (flags & FTM_RESP_STAT_DUP)
+ stats->reschedule_requests_num++;
+
+ if (flags & FTM_RESP_STAT_NON_ASAP_OUT_WIN)
+ stats->out_of_window_triggers_num++;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 143c7fcaea41..e3eb812e0248 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,7 +91,7 @@
#include "fw/api/sf.h"
#include "fw/api/sta.h"
#include "fw/api/stats.h"
-#include "fw/api/tof.h"
+#include "fw/api/location.h"
#include "fw/api/tx.h"
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 0d6c313b6669..00a47f6f1d81 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -105,12 +105,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
int i;
struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE),
- .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
- IWL_RSS_HASH_TYPE_IPV4_UDP |
- IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
- IWL_RSS_HASH_TYPE_IPV6_TCP |
- IWL_RSS_HASH_TYPE_IPV6_UDP |
- IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+ .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
};
if (mvm->trans->num_rx_queues == 1)
@@ -127,13 +127,17 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
static int iwl_configure_rxq(struct iwl_mvm *mvm)
{
- int i, num_queues, size;
+ int i, num_queues, size, ret;
struct iwl_rfh_queue_config *cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
/* Do not configure default queue, it is configured via context info */
num_queues = mvm->trans->num_rx_queues - 1;
- size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
+ size = struct_size(cmd, data, num_queues);
cmd = kzalloc(size, GFP_KERNEL);
if (!cmd)
@@ -154,10 +158,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm)
cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
}
- return iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(DATA_PATH_GROUP,
- RFH_QUEUE_CONFIG_CMD),
- 0, size, cmd);
+ hcmd.data[0] = cmd;
+ hcmd.len[0] = size;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+
+ kfree(cmd);
+
+ return ret;
}
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
@@ -210,7 +218,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
- u32 umac_error_event_table;
+ u32 lmac_error_event_table, umac_error_event_table;
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
@@ -225,30 +233,35 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
status = le16_to_cpu(palive3->status);
}
- mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
+ lmac_error_event_table =
+ le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
+ iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
+
if (lmac2)
- mvm->error_event_table[1] =
- le32_to_cpu(lmac2->error_event_table_ptr);
- mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
+ mvm->trans->lmac_error_event_table[1] =
+ le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
- umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+ umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
if (!umac_error_event_table) {
mvm->support_umac_log = false;
} else if (umac_error_event_table >=
mvm->trans->cfg->min_umac_error_event_table) {
mvm->support_umac_log = true;
- mvm->umac_error_event_table = umac_error_event_table;
} else {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
- mvm->umac_error_event_table,
+ umac_error_event_table,
(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
"Init" : "RT");
mvm->support_umac_log = false;
}
- alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
+ if (mvm->support_umac_log)
+ iwl_fw_umac_set_alive_err_table(mvm->trans,
+ umac_error_event_table);
+
+ alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
@@ -293,13 +306,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
- struct iwl_mvm_alive_data alive_data;
+ struct iwl_mvm_alive_data alive_data = {};
const struct fw_img *fw;
- int ret, i;
+ int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
- set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
@@ -332,11 +344,16 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
+ if (ret == -ETIMEDOUT)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
- iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
- iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
+ iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
+ iwl_read_umac_prph(trans,
+ UMAG_SB_CPU_2_STATUS));
else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
@@ -373,14 +390,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
BIT(IWL_MAX_TID_COUNT + 2);
- for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
- atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
-
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_fw_set_dbg_rec_on(&mvm->fwrt);
#endif
- clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
return 0;
}
@@ -406,13 +419,15 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
iwl_wait_init_complete,
NULL);
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
+
/* Will also start the device */
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- iwl_fw_assert_error_dump(&mvm->fwrt);
goto error;
}
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
/* Send init config command to mark that we are sending NVM access
* commands
@@ -631,10 +646,10 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
}
#ifdef CONFIG_ACPI
-static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
- union acpi_object *table,
- struct iwl_mvm_sar_profile *profile,
- bool enabled)
+static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
+ union acpi_object *table,
+ struct iwl_mvm_sar_profile *profile,
+ bool enabled)
{
int i;
@@ -965,6 +980,57 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
}
#endif /* CONFIG_ACPI */
+void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
+{
+ u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
+ int ret;
+ u32 resp;
+
+ struct iwl_fw_error_recovery_cmd recovery_cmd = {
+ .flags = cpu_to_le32(flags),
+ .buf_size = 0,
+ };
+ struct iwl_host_cmd host_cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
+ .flags = CMD_WANT_SKB,
+ .data = {&recovery_cmd, },
+ .len = {sizeof(recovery_cmd), },
+ };
+
+ /* no error log was defined in TLV */
+ if (!error_log_size)
+ return;
+
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ /* no buf was allocated while HW reset */
+ if (!mvm->error_recovery_buf)
+ return;
+
+ host_cmd.data[1] = mvm->error_recovery_buf;
+ host_cmd.len[1] = error_log_size;
+ host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+ recovery_cmd.buf_size = cpu_to_le32(error_log_size);
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &host_cmd);
+ kfree(mvm->error_recovery_buf);
+ mvm->error_recovery_buf = NULL;
+
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
+ return;
+ }
+
+ /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
+ if (resp)
+ IWL_ERR(mvm,
+ "Failed to send recovery cmd blob was invalid %d\n",
+ resp);
+ }
+}
+
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
int ret;
@@ -1055,7 +1121,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- iwl_fw_assert_error_dump(&mvm->fwrt);
+ iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
goto error;
}
@@ -1201,6 +1267,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
+
+ if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
+ IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
+
ret = iwl_mvm_sar_init(mvm);
if (ret == 0) {
ret = iwl_mvm_sar_geo_init(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index 9bb1de1cad64..4348bb00e761 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -113,6 +115,7 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
switch (mode) {
case IWL_LED_BLINK:
IWL_ERR(mvm, "Blink led mode not supported, used default\n");
+ /* fall through */
case IWL_LED_DEFAULT:
case IWL_LED_RF_STATE:
mode = IWL_LED_RF_STATE;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 7779951a9533..6a70dece447d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data {
bool found_vif;
};
-struct iwl_mvm_hw_queues_iface_iterator_data {
- struct ieee80211_vif *exclude_vif;
- unsigned long used_hw_queues;
-};
-
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
data->preferred_tsf = NUM_TSF_IDS;
}
-/*
- * Get the mask of the queues used by the vif
- */
-u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
-{
- u32 qmask = 0, ac;
-
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
-
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
- qmask |= BIT(vif->hw_queue[ac]);
- }
-
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)
- qmask |= BIT(vif->cab_queue);
-
- return qmask;
-}
-
-static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
-
- /* exclude the given vif */
- if (vif == data->exclude_vif)
- return;
-
- data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
-}
-
-unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
- struct ieee80211_vif *exclude_vif)
-{
- struct iwl_mvm_hw_queues_iface_iterator_data data = {
- .exclude_vif = exclude_vif,
- .used_hw_queues =
- BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(mvm->aux_queue) |
- BIT(IWL_MVM_DQA_GCAST_QUEUE),
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- /* mark all VIF used hw queues */
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- iwl_mvm_iface_hw_queues_iter, &data);
-
- return data.used_hw_queues;
-}
-
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_iface_iterator, &data);
- used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
-
/*
* In the case we're getting here during resume, it's similar to
* firmware restart, and with RESUME_ALL the iterator will find
@@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* the ones here - no real limit
*/
queue_limit = IEEE80211_MAX_QUEUES;
- BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
- BITS_PER_BYTE *
- sizeof(mvm->hw_queue_to_mac80211[0]));
/*
* Find available queues, and allocate them to the ACs. When in
@@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* queue value (when queue is enabled).
*/
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- } else {
- vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
@@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
exit_fail:
memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
- memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
- vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
return ret;
}
@@ -776,29 +706,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
- if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) {
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u8 sta_id = mvmvif->ap_sta_id;
-
+ if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
- if (sta_id != IWL_MVM_INVALID_STA) {
- struct ieee80211_sta *sta;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /*
- * TODO: we should check the ext cap IE but it is
- * unclear why the spec requires two bits (one in HE
- * cap IE, and one in the ext cap IE). In the meantime
- * rely on the HE cap IE only.
- */
- if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] &
- IEEE80211_HE_MAC_CAP0_TWT_RES))
- ctxt_sta->data_policy |=
- cpu_to_le32(TWT_SUPPORTED);
- }
+ if (vif->bss_conf.twt_requester)
+ ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
}
@@ -881,8 +792,6 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
- cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
-
/* Override the filter flags to accept only probe requests */
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
@@ -902,9 +811,9 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
-static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
- __le32 *tim_index, __le32 *tim_size,
- u8 *beacon, u32 frame_size)
+void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+ __le32 *tim_index, __le32 *tim_size,
+ u8 *beacon, u32 frame_size)
{
u32 tim_idx;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
@@ -944,8 +853,8 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
return ie - beacon;
}
-static u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
- struct ieee80211_vif *vif)
+u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
{
u8 rate;
@@ -995,9 +904,9 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
}
-static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
- struct sk_buff *beacon,
- void *data, int len)
+int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
+ struct sk_buff *beacon,
+ void *data, int len)
{
struct iwl_host_cmd cmd = {
.id = BEACON_TEMPLATE_CMD,
@@ -1100,13 +1009,16 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
sizeof(beacon_cmd));
}
-static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct sk_buff *beacon)
+int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
{
if (WARN_ON(!beacon))
return -EINVAL;
+ if (IWL_MVM_NON_TRANSMITTING_AP)
+ return 0;
+
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD))
return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon);
@@ -1132,6 +1044,11 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
if (!beacon)
return -ENOMEM;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->beacon_inject_active)
+ return -EBUSY;
+#endif
+
ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
dev_kfree_skb(beacon);
return ret;
@@ -1203,7 +1120,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
- ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+ ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
/*
* Only set the beacon time when the MAC is being added, when we
@@ -1420,7 +1337,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
- struct iwl_mvm_tx_resp *beacon_notify_hdr;
+ struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data;
struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif;
struct agg_tx_status *agg_status;
@@ -1428,18 +1345,29 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
- mvm->ibss_manager = beacon->ibss_mgr_status != 0;
- agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
- status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
- IWL_DEBUG_RX(mvm,
- "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
- status, beacon_notify_hdr->failure_frame,
- le64_to_cpu(beacon->tsf),
- mvm->ap_last_beacon_gp2,
- le32_to_cpu(beacon_notify_hdr->initial_rate));
+ if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) {
+ struct iwl_mvm_tx_resp *beacon_notify_hdr =
+ &beacon_v5->beacon_notify_hdr;
+
+ mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0;
+ agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+ status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
+ IWL_DEBUG_RX(mvm,
+ "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n",
+ status, beacon_notify_hdr->failure_frame,
+ le64_to_cpu(beacon->tsf),
+ mvm->ap_last_beacon_gp2,
+ le32_to_cpu(beacon_notify_hdr->initial_rate));
+ } else {
+ mvm->ibss_manager = beacon->ibss_mgr_status != 0;
+ status = le32_to_cpu(beacon->status) & TX_STATUS_MSK;
+ IWL_DEBUG_RX(mvm,
+ "beacon status %#x tsf:0x%016llX gp2:0x%X\n",
+ status, le64_to_cpu(beacon->tsf),
+ mvm->ap_last_beacon_gp2);
+ }
csa_vif = rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
@@ -1472,35 +1400,48 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
}
}
-static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
- struct iwl_missed_beacons_notif *missed_beacons = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
struct iwl_fw_dbg_trigger_tlv *trigger;
u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(mb->mac_id);
- if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
- return;
+ IWL_DEBUG_INFO(mvm,
+ "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+ le32_to_cpu(mb->mac_id),
+ le32_to_cpu(mb->consec_missed_beacons),
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+ le32_to_cpu(mb->num_recvd_beacons),
+ le32_to_cpu(mb->num_expected_beacons));
+
+ rcu_read_lock();
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif)
+ goto out;
- rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+ rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons);
rx_missed_bcon_since_rx =
- le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx);
/*
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
- if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
- IWL_MVM_MISSED_BEACONS_THRESHOLD)
+ if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
- return;
+ goto out;
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
@@ -1512,28 +1453,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
-}
-
-void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
-
- IWL_DEBUG_INFO(mvm,
- "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
- le32_to_cpu(mb->mac_id),
- le32_to_cpu(mb->consec_missed_beacons),
- le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
- le32_to_cpu(mb->num_recvd_beacons),
- le32_to_cpu(mb->num_expected_beacons));
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_beacon_loss_iterator,
- mb);
iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS);
+
+out:
+ rcu_read_unlock();
}
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
@@ -1575,16 +1499,29 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
}
-static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
- struct iwl_probe_resp_data_notif *notif = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
struct iwl_probe_resp_data *old_data, *new_data;
+ int len = iwl_rx_packet_payload_len(pkt);
+ u32 id = le32_to_cpu(notif->mac_id);
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
+
+ if (WARN_ON_ONCE(len < sizeof(*notif)))
+ return;
+
+ IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
+ notif->noa_active, notif->csa_counter);
- if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id))
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
+ if (!vif)
return;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
if (!new_data)
return;
@@ -1615,66 +1552,63 @@ static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
ieee80211_csa_set_counter(vif, notif->csa_counter);
}
-void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
- int len = iwl_rx_packet_payload_len(pkt);
-
- if (WARN_ON_ONCE(len < sizeof(*notif)))
- return;
-
- IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
- notif->noa_active, notif->csa_counter);
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_ACTIVE,
- iwl_mvm_probe_resp_data_iter,
- notif);
-}
-
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
- struct ieee80211_vif *csa_vif;
+ struct ieee80211_vif *csa_vif, *vif;
struct iwl_mvm_vif *mvmvif;
int len = iwl_rx_packet_payload_len(pkt);
- u32 id_n_color;
+ u32 id_n_color, csa_id, mac_id;
if (WARN_ON_ONCE(len < sizeof(*notif)))
return;
- rcu_read_lock();
-
- csa_vif = rcu_dereference(mvm->csa_vif);
- if (WARN_ON(!csa_vif || !csa_vif->csa_active))
- goto out_unlock;
-
id_n_color = le32_to_cpu(notif->id_and_color);
+ mac_id = id_n_color & FW_CTXT_ID_MSK;
+
+ if (WARN_ON_ONCE(mac_id >= NUM_MAC_INDEX_DRIVER))
+ return;
- mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
- if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color,
- "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
- FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color))
- goto out_unlock;
+ rcu_read_lock();
+ vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]);
- IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ csa_vif = rcu_dereference(mvm->csa_vif);
+ if (WARN_ON(!csa_vif || !csa_vif->csa_active ||
+ csa_vif != vif))
+ goto out_unlock;
- schedule_delayed_work(&mvm->cs_tx_unblock_dwork,
- msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
- csa_vif->bss_conf.beacon_int));
+ mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
+ csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+ if (WARN(csa_id != id_n_color,
+ "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
+ csa_id, id_n_color))
+ goto out_unlock;
- ieee80211_csa_finish(csa_vif);
+ IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
- rcu_read_unlock();
+ schedule_delayed_work(&mvm->cs_tx_unblock_dwork,
+ msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
+ csa_vif->bss_conf.beacon_int));
- RCU_INIT_POINTER(mvm->csa_vif, NULL);
+ ieee80211_csa_finish(csa_vif);
- return;
+ rcu_read_unlock();
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+ return;
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_csa_client_absent(mvm, vif);
+ ieee80211_chswitch_done(vif, true);
+ break;
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ break;
+ }
out_unlock:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 97dc464379d2..3a92c09d4692 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -184,6 +184,29 @@ static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
};
#endif
+static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
+ .max_peers = IWL_MVM_TOF_MAX_APS,
+ .report_ap_tsf = 1,
+ .randomize_mac_addr = 1,
+
+ .ftm = {
+ .supported = 1,
+ .asap = 1,
+ .non_asap = 1,
+ .request_lci = 1,
+ .request_civicloc = 1,
+ .max_bursts_exponent = -1, /* all supported */
+ .max_ftms_per_burst = 0, /* no limits */
+ .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ .preambles = BIT(NL80211_PREAMBLE_LEGACY) |
+ BIT(NL80211_PREAMBLE_HT) |
+ BIT(NL80211_PREAMBLE_VHT),
+ },
+};
+
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!iwl_mvm_is_d0i3_supported(mvm))
@@ -395,6 +418,21 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
return ret;
}
+const static u8 he_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = he_if_types_ext_capa_sta,
+ .extended_capabilities_mask = he_if_types_ext_capa_sta,
+ .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
+ },
+};
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -405,12 +443,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
};
+#ifdef CONFIG_PM_SLEEP
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+#endif
/* Tell mac80211 our characteristics */
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SPECTRUM_MGMT);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, QUEUE_CONTROL);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
@@ -424,6 +465,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
+ ieee80211_hw_set(hw, STA_MMPDU_TXQ);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
if (iwl_mvm_has_tlc_offload(mvm)) {
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
@@ -469,6 +514,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ hw->max_tx_fragments = mvm->trans->max_skb_frags;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
@@ -525,6 +571,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+ hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
+ }
+
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -534,6 +587,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
+ hw->txq_data_size = sizeof(struct iwl_mvm_txq);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -673,6 +727,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
}
+ if (mvm->nvm_data->sku_cap_11ax_enable &&
+ !iwlwifi_mod_params.disable_11ax) {
+ hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(he_iftypes_ext_capa);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -682,7 +743,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->wowlan = &mvm->wowlan;
}
- if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
+ if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
@@ -735,15 +796,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
}
- ret = ieee80211_register_hw(mvm->hw);
- if (ret)
- iwl_mvm_leds_exit(mvm);
- mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
-
if (mvm->cfg->vht_mu_mimo_supported)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ ret = ieee80211_register_hw(mvm->hw);
+ if (ret) {
+ iwl_mvm_leds_exit(mvm);
+ }
+
return ret;
}
@@ -776,7 +837,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out;
__skb_queue_tail(&mvm->d0i3_tx, skb);
- ieee80211_stop_queues(mvm->hw);
/* trigger wakeup */
iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
@@ -796,13 +856,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_sta *sta = control->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
+ bool offchannel = IEEE80211_SKB_CB(skb)->flags &
+ IEEE80211_TX_CTL_TX_OFFCHAN;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
goto drop;
}
- if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ if (offchannel &&
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@@ -815,8 +877,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
sta = NULL;
/* If there is no sta, and it's not offchannel - send through AP */
- if (info->control.vif->type == NL80211_IFTYPE_STATION &&
- info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
+ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
+ !offchannel) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
@@ -844,22 +906,101 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb);
}
-static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
- return false;
- return true;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ struct sk_buff *skb = NULL;
+
+ /*
+ * No need for threads to be pending here, they can leave the first
+ * taker all the work.
+ *
+ * mvmtxq->tx_request logic:
+ *
+ * If 0, no one is currently TXing, set to 1 to indicate current thread
+ * will now start TX and other threads should quit.
+ *
+ * If 1, another thread is currently TXing, set to 2 to indicate to
+ * that thread that there was another request. Since that request may
+ * have raced with the check whether the queue is empty, the TXing
+ * thread should check the queue's status one more time before leaving.
+ * This check is done in order to not leave any TX hanging in the queue
+ * until the next TX invocation (which may not even happen).
+ *
+ * If 2, another thread is currently TXing, and it will already double
+ * check the queue, so do nothing.
+ */
+ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
+ return;
+
+ rcu_read_lock();
+ do {
+ while (likely(!mvmtxq->stopped &&
+ (mvm->trans->system_pm_mode ==
+ IWL_PLAT_PM_MODE_DISABLED))) {
+ skb = ieee80211_tx_dequeue(hw, txq);
+
+ if (!skb) {
+ if (txq->sta)
+ IWL_DEBUG_TX(mvm,
+ "TXQ of sta %pM tid %d is now empty\n",
+ txq->sta->addr,
+ txq->tid);
+ break;
+ }
+
+ if (!txq->sta)
+ iwl_mvm_tx_skb_non_sta(mvm, skb);
+ else
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
+ }
+ } while (atomic_dec_return(&mvmtxq->tx_request));
+ rcu_read_unlock();
}
-static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
- return false;
- if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
- return true;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
- /* enabled by default */
- return true;
+ /*
+ * Please note that racing is handled very carefully here:
+ * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
+ * deleted afterwards.
+ * This means that if:
+ * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
+ * queue is allocated and we can TX.
+ * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
+ * a race, should defer the frame.
+ * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
+ * need to allocate the queue and defer the frame.
+ * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
+ * queue is already scheduled for allocation, no need to allocate,
+ * should defer the frame.
+ */
+
+ /* If the queue is allocated TX and return. */
+ if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
+ /*
+ * Check that list is empty to avoid a race where txq_id is
+ * already updated, but the queue allocation work wasn't
+ * finished
+ */
+ if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
+ return;
+
+ iwl_mvm_mac_itxq_xmit(hw, txq);
+ return;
+ }
+
+ /* The list is being deleted only after the queue is fully allocated. */
+ if (!list_empty(&mvmtxq->list))
+ return;
+
+ list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+ schedule_work(&mvm->add_stream_wk);
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
@@ -974,7 +1115,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
}
- if (!iwl_enable_rx_ampdu(mvm->cfg)) {
+ if (!iwl_enable_rx_ampdu()) {
ret = -EINVAL;
break;
}
@@ -986,7 +1127,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
timeout);
break;
case IEEE80211_AMPDU_TX_START:
- if (!iwl_enable_tx_ampdu(mvm->cfg)) {
+ if (!iwl_enable_tx_ampdu()) {
ret = -EINVAL;
break;
}
@@ -1066,6 +1207,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_stop_device(mvm);
+ mvm->cur_aid = 0;
+
mvm->scan_status = 0;
mvm->ps_disabled = false;
mvm->calibrating = false;
@@ -1074,6 +1217,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_cleanup_roc_te(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_ftm_restart(mvm);
+
/*
* cleanup all interfaces, even inactive ones, as some might have
* gone down during the HW restart
@@ -1085,7 +1230,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
- memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -1188,6 +1332,8 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
/* allow transport/FW low power modes */
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY);
+
/*
* If we have TDLS peers, remove them. We don't know the last seqno/PN
* of packets the FW sent out, so we must reconnect.
@@ -1391,6 +1537,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
+ rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
+
/* Counting number of interfaces is needed for legacy PM */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
@@ -1549,6 +1697,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI);
}
+ if (vif->bss_conf.ftm_responder)
+ memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats));
+
iwl_mvm_vif_dbgfs_clean(mvm, vif);
/*
@@ -1582,6 +1733,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
+ RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
+
if (vif->type == NL80211_IFTYPE_MONITOR)
mvm->monitor_on = false;
@@ -2076,6 +2229,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
}
flags |= STA_CTXT_HE_PACKET_EXT;
+ } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) !=
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) {
+ int low_th = -1;
+ int high_th = -1;
+
+ /* Take the PPE thresholds from the nominal padding info */
+ switch (sta->he_cap.he_cap_elem.phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) {
+ case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US:
+ low_th = IWL_HE_PKT_EXT_BPSK;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_BPSK;
+ break;
+ }
+
+ /* Set the PPE thresholds accordingly */
+ if (low_th >= 0 && high_th >= 0) {
+ u8 ***pkt_ext_qam =
+ (void *)sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th;
+
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ u8 bw;
+
+ for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
+ bw++) {
+ pkt_ext_qam[i][bw][0] = low_th;
+ pkt_ext_qam[i][bw][1] = high_th;
+ }
+ }
+
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ }
}
rcu_read_unlock();
@@ -2146,6 +2339,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
}
+ /* Update MU EDCA params */
+ if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
+ bss_conf->assoc && vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
/*
* If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old
@@ -2211,7 +2410,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
*/
- WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
"Failed to update SF upon disassociation\n");
/*
@@ -2432,6 +2634,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (iwl_mvm_phy_ctx_count(mvm) > 1)
iwl_mvm_teardown_tdls_peers(mvm);
+ iwl_mvm_ftm_restart_responder(mvm, vif);
+
goto out_unlock;
out_quota_failed:
@@ -2543,6 +2747,15 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
+
+ if (changes & BSS_CHANGED_FTM_RESPONDER) {
+ int ret = iwl_mvm_ftm_start_responder(mvm, vif);
+
+ if (ret)
+ IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n",
+ ret);
+ }
+
}
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -2672,7 +2885,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
return;
spin_lock_bh(&mvmsta->lock);
- for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
@@ -2861,32 +3074,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
peer_addr, action);
}
-static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta)
-{
- struct iwl_mvm_tid_data *tid_data;
- struct sk_buff *skb;
- int i;
-
- spin_lock_bh(&mvm_sta->lock);
- for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
- tid_data = &mvm_sta->tid_data[i];
-
- while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- /*
- * The first deferred frame should've stopped the MAC
- * queues, so we should never get a second deferred
- * frame for the RA/TID.
- */
- iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
- ieee80211_free_txskb(mvm->hw, skb);
- }
- }
- spin_unlock_bh(&mvm_sta->lock);
-}
-
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2920,7 +3107,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
*/
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) {
- iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk);
/*
@@ -2967,6 +3153,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
NL80211_TDLS_SETUP);
}
+
+ sta->max_rc_amsdu_len = 1;
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
/*
@@ -2979,11 +3167,15 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
if (vif->type == NL80211_IFTYPE_AP) {
+ vif->bss_conf.he_support = sta->he_cap.has_he;
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ vif->bss_conf.he_support = sta->he_cap.has_he;
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
@@ -2991,6 +3183,24 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_update_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
+ /* if wep is used, need to set the key for the station now */
+ if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
+ mvm_sta->wep_key =
+ kmemdup(mvmvif->ap_wep_key,
+ sizeof(*mvmvif->ap_wep_key) +
+ mvmvif->ap_wep_key->keylen,
+ GFP_KERNEL);
+ if (!mvm_sta->wep_key) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta,
+ mvm_sta->wep_key,
+ STA_KEY_IDX_INVALID);
+ } else {
+ ret = 0;
+ }
/* we don't support TDLS during DCM */
if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3005,18 +3215,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
-
- /* if wep is used, need to set the key for the station now */
- if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key)
- ret = iwl_mvm_set_sta_key(mvm, vif, sta,
- mvmvif->ap_wep_key,
- STA_KEY_IDX_INVALID);
- else
- ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
- WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ WARN_ON(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -3036,6 +3241,22 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
NL80211_TDLS_DISABLE_LINK);
}
+
+ /* Remove STA key if this is an AP using WEP */
+ if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
+ int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
+ mvm_sta->wep_key);
+
+ if (!ret)
+ ret = rm_ret;
+ kfree(mvm_sta->wep_key);
+ mvm_sta->wep_key = NULL;
+ }
+
+ if (unlikely(ret &&
+ test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status)))
+ ret = 0;
} else {
ret = -EIO;
}
@@ -3431,14 +3652,20 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
- /* Set the channel info data */
- .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
- PHY_BAND_24 : PHY_BAND_5,
- .channel_info.channel = channel->hw_value,
- .channel_info.width = PHY_VHT_CHANNEL_MODE20,
- /* Set the time and duration */
- .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
- };
+ };
+ struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm,
+ &aux_roc_req.channel_info);
+ u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
+ (channel->band == NL80211_BAND_2GHZ) ?
+ PHY_BAND_24 : PHY_BAND_5,
+ PHY_VHT_CHANNEL_MODE20,
+ 0);
+
+ /* Set the time and duration */
+ tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg));
delay = AUX_ROC_MIN_DELAY;
req_dur = MSEC_TO_TU(duration);
@@ -3463,15 +3690,15 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
}
}
- aux_roc_req.duration = cpu_to_le32(req_dur);
- aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
+ tail->duration = cpu_to_le32(req_dur);
+ tail->apply_time_max_delay = cpu_to_le32(delay);
IWL_DEBUG_TE(mvm,
"ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
channel->hw_value, req_dur, duration, delay,
dtim_interval);
/* Set the node address */
- memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
+ memcpy(tail->node_addr, vif->addr, ETH_ALEN);
lockdep_assert_held(&mvm->mutex);
@@ -3502,7 +3729,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_rx_aux_roc, te_data);
- res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
+ res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len,
&aux_roc_req);
if (res) {
@@ -3673,11 +3900,43 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
return 0;
}
+struct iwl_mvm_ftm_responder_iter_data {
+ bool responder;
+ struct ieee80211_chanctx_conf *ctx;
+};
+
+static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_ftm_responder_iter_data *data = _data;
+
+ if (rcu_access_pointer(vif->chanctx_conf) == data->ctx &&
+ vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params)
+ data->responder = true;
+}
+
+static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm_ftm_responder_iter_data data = {
+ .responder = false,
+ .ctx = ctx,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ftm_responder_chanctx_iter,
+ &data);
+ return data.responder;
+}
+
static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx)
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt;
+ bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx);
+ struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -3690,7 +3949,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
goto out;
}
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -3745,6 +4004,8 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+ bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx);
+ struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def;
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
@@ -3759,17 +4020,17 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
/* we are only changing the min_width, may be a noop */
if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
- if (phy_ctxt->width == ctx->min_def.width)
+ if (phy_ctxt->width == def->width)
goto out_unlock;
/* we are just toggling between 20_NOHT and 20 */
if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
- ctx->min_def.width <= NL80211_CHAN_WIDTH_20)
+ def->width <= NL80211_CHAN_WIDTH_20)
goto out_unlock;
}
iwl_mvm_bt_coex_vif_change(mvm);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
@@ -3798,6 +4059,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
mvmvif->ap_ibss_active = true;
break;
}
+ /* fall through */
case NL80211_IFTYPE_ADHOC:
/*
* The AP binding flow is handled as part of the start_ap flow
@@ -3850,25 +4112,30 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
}
if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
- u32 duration = 3 * vif->bss_conf.beacon_int;
+ mvmvif->csa_bcn_pending = true;
- /* iwl_mvm_protect_session() reads directly from the
- * device (the system time), so make sure it is
- * available.
- */
- ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
- if (ret)
- goto out_remove_binding;
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
+ u32 duration = 3 * vif->bss_conf.beacon_int;
- /* Protect the session to make sure we hear the first
- * beacon on the new channel.
- */
- mvmvif->csa_bcn_pending = true;
- iwl_mvm_protect_session(mvm, vif, duration, duration,
- vif->bss_conf.beacon_int / 2,
- true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
+ /* iwl_mvm_protect_session() reads directly from the
+ * device (the system time), so make sure it is
+ * available.
+ */
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
+ if (ret)
+ goto out_remove_binding;
+
+ /* Protect the session to make sure we hear the first
+ * beacon on the new channel.
+ */
+ iwl_mvm_protect_session(mvm, vif, duration, duration,
+ vif->bss_conf.beacon_int / 2,
+ true);
+
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
+ }
iwl_mvm_update_quotas(mvm, false, NULL);
}
@@ -3938,7 +4205,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
disabled_vif = vif;
- iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
+ iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
break;
default:
break;
@@ -4189,6 +4458,27 @@ static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
"dummy channel switch op\n");
}
+static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .tsf = cpu_to_le32(chsw->timestamp),
+ .cs_count = chsw->count,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd);
+}
+
static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
@@ -4256,14 +4546,19 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
if (chsw->block_tx)
iwl_mvm_csa_client_absent(mvm, vif);
- iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
- apply_time);
if (mvmvif->bf_data.bf_enabled) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
if (ret)
goto out_unlock;
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
+ iwl_mvm_schedule_client_csa(mvm, vif, chsw);
+ else
+ iwl_mvm_schedule_csa_period(mvm, vif,
+ vif->bss_conf.beacon_int,
+ apply_time);
break;
default:
break;
@@ -4656,8 +4951,89 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
mutex_unlock(&mvm->mutex);
}
+static int
+iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_ftm_responder_stats *stats)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
+ !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ *stats = mvm->ftm_resp_stats;
+ mutex_unlock(&mvm->mutex);
+
+ stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) |
+ BIT(NL80211_FTM_STATS_PARTIAL_NUM) |
+ BIT(NL80211_FTM_STATS_FAILED_NUM) |
+ BIT(NL80211_FTM_STATS_ASAP_NUM) |
+ BIT(NL80211_FTM_STATS_NON_ASAP_NUM) |
+ BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) |
+ BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) |
+ BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) |
+ BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM);
+
+ return 0;
+}
+
+static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_ftm_start(mvm, vif, request);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_ftm_abort(mvm, request);
+ mutex_unlock(&mvm->mutex);
+}
+
+static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
+{
+ u8 protocol = ip_hdr(skb)->protocol;
+
+ if (!IS_ENABLED(CONFIG_INET))
+ return false;
+
+ return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
+}
+
+static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* For now don't aggregate IPv6 in AMSDU */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ if (!iwl_mvm_is_csum_supported(mvm))
+ return true;
+
+ return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head);
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
+ .wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
@@ -4731,6 +5107,11 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
#endif
.get_survey = iwl_mvm_mac_get_survey,
.sta_statistics = iwl_mvm_mac_sta_statistics,
+ .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats,
+ .start_pmsr = iwl_mvm_start_pmsr,
+ .abort_pmsr = iwl_mvm_abort_pmsr,
+
+ .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1aa690e081ff..bca6f6b536d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -83,7 +83,6 @@
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
-#include "tof.h"
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
@@ -95,6 +94,8 @@
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
+
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@@ -299,17 +300,38 @@ enum iwl_bt_force_ant_mode {
};
/**
+ * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs
+ * @LOW_LATENCY_FORCE_UNSET: unset force mode
+ * @LOW_LATENCY_FORCE_ON: for low latency on
+ * @LOW_LATENCY_FORCE_OFF: for low latency off
+ * @NUM_LOW_LATENCY_FORCE: max num of modes
+ */
+enum iwl_mvm_low_latency_force {
+ LOW_LATENCY_FORCE_UNSET,
+ LOW_LATENCY_FORCE_ON,
+ LOW_LATENCY_FORCE_OFF,
+ NUM_LOW_LATENCY_FORCE
+};
+
+/**
* struct iwl_mvm_low_latency_cause - low latency set causes
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
* @LOW_LATENCY_VCMD: low latency mode set from vendor command
* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
+* @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled
+* the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE
+* @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs
+* set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag
+* in low_latency.
*/
enum iwl_mvm_low_latency_cause {
LOW_LATENCY_TRAFFIC = BIT(0),
LOW_LATENCY_DEBUGFS = BIT(1),
LOW_LATENCY_VCMD = BIT(2),
LOW_LATENCY_VIF_TYPE = BIT(3),
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4),
+ LOW_LATENCY_DEBUGFS_FORCE = BIT(5),
};
/**
@@ -360,8 +382,10 @@ struct iwl_probe_resp_data {
* @pm_enabled - Indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
- * @low_latency: indicates low latency is set, see
- * enum &iwl_mvm_low_latency_cause for causes.
+ * @low_latency: bit flags for low latency
+ * see enum &iwl_mvm_low_latency_cause for causes.
+ * @low_latency_actual: boolean, indicates low latency is set,
+ * as a result from low_latency bit flags and takes force into account.
* @ps_disabled: indicates that this interface requires PS to be disabled
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
@@ -393,7 +417,8 @@ struct iwl_mvm_vif {
bool ap_ibss_active;
bool pm_enabled;
bool monitor_active;
- u8 low_latency;
+ u8 low_latency: 6;
+ u8 low_latency_actual: 1;
bool ps_disabled;
struct iwl_mvm_vif_bf_data bf_data;
@@ -778,6 +803,39 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
+struct iwl_mvm_txq {
+ struct list_head list;
+ u16 txq_id;
+ atomic_t tx_request;
+ bool stopped;
+};
+
+static inline struct iwl_mvm_txq *
+iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
+{
+ return (void *)txq->drv_priv;
+}
+
+static inline struct iwl_mvm_txq *
+iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
+{
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IEEE80211_NUM_TIDS;
+
+ return (void *)sta->txq[tid]->drv_priv;
+}
+
+/**
+ * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
+ *
+ * @sta_id: sta id
+ * @txq_tid: txq tid
+ */
+struct iwl_mvm_tvqm_txq_info {
+ u8 sta_id;
+ u8 txq_tid;
+};
+
struct iwl_mvm_dqa_txq_info {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
@@ -821,9 +879,6 @@ struct iwl_mvm {
bool hw_registered;
bool calibrating;
- u32 error_event_table[2];
- u32 log_event_table;
- u32 umac_error_event_table;
bool support_umac_log;
u32 ampdu_ref;
@@ -843,13 +898,13 @@ struct iwl_mvm {
u64 on_time_scan;
} radio_stats, accu_radio_stats;
- u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
-
- struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
+ struct list_head add_stream_txqs;
+ union {
+ struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
+ struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
+ };
struct work_struct add_stream_wk; /* To add streams to queues */
- atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
-
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
/* NVM sections */
@@ -863,7 +918,6 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
- unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -924,6 +978,7 @@ struct iwl_mvm {
u32 dbgfs_prph_reg_addr;
bool disable_power_off;
bool disable_power_off_d3;
+ bool beacon_inject_active;
bool scan_iter_notif_enabled;
@@ -932,6 +987,7 @@ struct iwl_mvm {
struct debugfs_blob_wrapper nvm_calib_blob;
struct debugfs_blob_wrapper nvm_prod_blob;
struct debugfs_blob_wrapper nvm_phy_sku_blob;
+ struct debugfs_blob_wrapper nvm_reg_blob;
struct iwl_mvm_frame_stats drv_rx_stats;
spinlock_t drv_stats_lock;
@@ -955,9 +1011,11 @@ struct iwl_mvm {
u8 refs[IWL_MVM_REF_COUNT];
u8 vif_count;
+ struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
/* -1 for always, 0 for never, >0 for that many times */
s8 fw_restart;
+ u8 *error_recovery_buf;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@@ -1047,6 +1105,8 @@ struct iwl_mvm {
/* Indicate if device power save is allowed */
u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
+ /* Indicate if 32Khz external clock is valid */
+ u32 ext_clock_valid;
unsigned int max_amsdu_len; /* used for debugfs only */
struct ieee80211_vif __rcu *csa_vif;
@@ -1090,7 +1150,14 @@ struct iwl_mvm {
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
- struct iwl_mvm_tof_data tof_data;
+
+ struct cfg80211_ftm_responder_stats ftm_resp_stats;
+ struct {
+ struct cfg80211_pmsr_request *req;
+ struct wireless_dev *req_wdev;
+ struct list_head loc_list;
+ int responses[IWL_MVM_TOF_MAX_APS];
+ } ftm_initiator;
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
@@ -1106,6 +1173,11 @@ struct iwl_mvm {
/* does a monitor vif exist (only one can exist hence bool) */
bool monitor_on;
+
+ /* sniffer data to include in radiotap */
+ __le16 cur_aid;
+ u8 cur_bssid[ETH_ALEN];
+
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
@@ -1149,8 +1221,6 @@ enum iwl_mvm_status {
enum iwl_mvm_init_status {
IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
- IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2),
- IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3),
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -1207,6 +1277,19 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
return iwl_mvm_sta_from_mac80211(sta);
}
+static inline struct ieee80211_vif *
+iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
+{
+ if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac)))
+ return NULL;
+
+ if (rcu)
+ return rcu_dereference(mvm->vif_id_to_mac[vif_id]);
+
+ return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id],
+ lockdep_is_held(&mvm->mutex));
+}
+
static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
{
return !iwlwifi_mod_params.d0i3_disable &&
@@ -1237,6 +1320,12 @@ static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm)
return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS);
}
+static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF);
+}
+
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
@@ -1470,6 +1559,11 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
+void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ unsigned int tid);
+
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
@@ -1567,6 +1661,7 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags);
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -1599,9 +1694,19 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon);
+int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
+ struct sk_buff *beacon,
+ void *data, int len);
+u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif);
+void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+ __le32 *tim_index, __le32 *tim_size,
+ u8 *beacon, u32 frame_size);
void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
@@ -1615,8 +1720,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
-unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
- struct ieee80211_vif *exclude_vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
@@ -1870,17 +1973,43 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
* binding, so this has no real impact. For now, just return
* the current desired low-latency state.
*/
- return mvmvif->low_latency;
+ return mvmvif->low_latency_actual;
}
static inline
void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
enum iwl_mvm_low_latency_cause cause)
{
+ u8 new_state;
+
if (set)
mvmvif->low_latency |= cause;
else
mvmvif->low_latency &= ~cause;
+
+ /*
+ * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are
+ * allowed to actual mode.
+ */
+ if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE &&
+ cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE)
+ return;
+
+ if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set)
+ /*
+ * We enter force state
+ */
+ new_state = !!(mvmvif->low_latency &
+ LOW_LATENCY_DEBUGFS_FORCE);
+ else
+ /*
+ * Check if any other one set low latency
+ */
+ new_state = !!(mvmvif->low_latency &
+ ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE |
+ LOW_LATENCY_DEBUGFS_FORCE));
+
+ mvmvif->low_latency_actual = new_state;
}
/* Return a bitmask with all the hw supported queues, except for the
@@ -1895,21 +2024,24 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
- /* calling this function without using dump_start/end since at this
- * point we already hold the op mode mutex
+ /* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received
+ * an assert. Since we failed to bring the interface up, mac80211
+ * will not attempt to reconfig the device,
+ * which handles the dump collection in assert flow,
+ * so trigger dump collection here.
*/
- iwl_fw_dbg_collect_sync(&mvm->fwrt);
+ if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ false, 0);
+
iwl_fw_cancel_timestamp(&mvm->fwrt);
- iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ iwl_fwrt_stop_device(&mvm->fwrt);
+ iwl_free_fw_paging(&mvm->fwrt);
iwl_fw_dump_conf_clear(&mvm->fwrt);
- iwl_trans_stop_device(mvm->trans);
}
-/* Stop/start all mac queues in a given bitmap */
-void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
-void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
-
/* Re-configure the SCD for a queue that has already been configured */
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn);
@@ -1949,6 +2081,23 @@ void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool added_vif);
+/* FTM responder */
+int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* FTM initiator */
+void iwl_mvm_ftm_restart(struct iwl_mvm *mvm);
+void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
+
/* TDLS */
/*
@@ -2015,4 +2164,59 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
+/* Channel info utils */
+static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS);
+}
+
+static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci)
+{
+ return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ?
+ sizeof(struct iwl_fw_channel_info) :
+ sizeof(struct iwl_fw_channel_info_v1));
+}
+
+static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 :
+ sizeof(struct iwl_fw_channel_info) -
+ sizeof(struct iwl_fw_channel_info_v1);
+}
+
+static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci,
+ u32 chan, u8 band, u8 width,
+ u8 ctrl_pos)
+{
+ if (iwl_mvm_has_ultra_hb_channel(mvm)) {
+ ci->channel = cpu_to_le32(chan);
+ ci->band = band;
+ ci->width = width;
+ ci->ctrl_pos = ctrl_pos;
+ } else {
+ struct iwl_fw_channel_info_v1 *ci_v1 =
+ (struct iwl_fw_channel_info_v1 *)ci;
+
+ ci_v1->channel = chan;
+ ci_v1->band = band;
+ ci_v1->width = width;
+ ci_v1->ctrl_pos = ctrl_pos;
+ }
+}
+
+static inline void
+iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci,
+ struct cfg80211_chan_def *chandef)
+{
+ iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
+ (chandef->chan->band == NL80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5),
+ iwl_mvm_get_channel_width(chandef),
+ iwl_mvm_get_ctrl_pos(chandef));
+}
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 6fc5cc1f2b5b..7bdbd010ae6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -179,7 +179,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
ret, mvm->cfg->name);
- ret = -EIO;
+ ret = -ENODATA;
}
goto exit;
}
@@ -380,8 +380,12 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* we override the constness for initial read */
ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
size_read);
- if (ret < 0)
+ if (ret == -ENODATA) {
+ ret = 0;
continue;
+ }
+ if (ret < 0)
+ break;
size_read += ret;
temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
if (!temp) {
@@ -412,6 +416,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_phy_sku_blob.data = temp;
mvm->nvm_phy_sku_blob.size = ret;
break;
+ case NVM_SECTION_TYPE_REGULATORY_SDP:
+ case NVM_SECTION_TYPE_REGULATORY:
+ mvm->nvm_reg_blob.data = temp;
+ mvm->nvm_reg_blob.size = ret;
+ break;
default:
if (section == mvm->cfg->nvm_hw_section_num) {
mvm->nvm_hw_blob.data = temp;
@@ -454,7 +463,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
mvm->nvm_data->nvm_version);
- return 0;
+ return ret < 0 ? ret : 0;
}
struct iwl_mcc_update_resp *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 30c5127034a0..ba27dce4c2bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -82,7 +82,6 @@
#include "fw/api/scan.h"
#include "time-event.h"
#include "fw-api.h"
-#include "fw/api/scan.h"
#include "fw/acpi.h"
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
@@ -301,8 +300,14 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
RX_HANDLER_SYNC),
- RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
- RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
+ iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED),
+
+ RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
+ iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF,
+ iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
+
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
@@ -329,8 +334,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(SCAN_REQ_UMAC),
HCMD_NAME(SCAN_ABORT_UMAC),
HCMD_NAME(SCAN_COMPLETE_UMAC),
- HCMD_NAME(TOF_CMD),
- HCMD_NAME(TOF_NOTIFICATION),
HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
HCMD_NAME(ADD_STA_KEY),
HCMD_NAME(ADD_STA),
@@ -419,12 +422,14 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
+ HCMD_NAME(FW_ERROR_RECOVERY_CMD),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
};
@@ -449,6 +454,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+ HCMD_NAME(TLC_MNG_CONFIG_CMD),
+ HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -464,6 +471,22 @@ static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
+ HCMD_NAME(TOF_RANGE_REQ_CMD),
+ HCMD_NAME(TOF_CONFIG_CMD),
+ HCMD_NAME(TOF_RANGE_ABORT_CMD),
+ HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
+ HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
+ HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
+ HCMD_NAME(TOF_LC_NOTIF),
+ HCMD_NAME(TOF_RESPONDER_STATS),
+ HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
+ HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF),
};
@@ -483,6 +506,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
@@ -577,11 +601,17 @@ static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
return ret;
}
+static bool iwl_mvm_d3_debug_enable(void *ctx)
+{
+ return IWL_MVM_D3_DEBUG;
+}
+
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
.fw_running = iwl_mvm_fwrt_fw_running,
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
+ .d3_debug_enable = iwl_mvm_d3_debug_enable,
};
static struct iwl_op_mode *
@@ -676,6 +706,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->aux_roc_te_list);
INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock);
+ INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@@ -685,6 +716,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
+ INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock);
@@ -736,6 +768,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.rx_buf_size = rb_size_default;
}
+ BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) !=
+ LDBG_CFG_COMMAND_SIZE);
+
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
@@ -799,8 +834,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
- if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status))
- iwl_fw_alive_error_dump(&mvm->fwrt);
+ if (err)
+ iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -842,8 +877,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_is_d0i3_supported(mvm))
iwl_trans_unref(mvm->trans);
- iwl_mvm_tof_init(mvm);
-
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
return op_mode;
@@ -886,15 +919,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_thermal_exit(mvm);
- if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
- ieee80211_unregister_hw(mvm->hw);
- mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
- }
+ ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
+ kfree(mvm->error_recovery_buf);
+ mvm->error_recovery_buf = NULL;
+
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
#endif
@@ -909,8 +942,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
cancel_delayed_work_sync(&mvm->tcm.work);
- iwl_mvm_tof_clean(mvm);
-
iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);
@@ -1079,24 +1110,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt);
}
-void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
-{
- int q;
-
- if (WARN_ON_ONCE(!mq))
- return;
-
- for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
- if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "mac80211 %d already stopped\n", q);
- continue;
- }
-
- ieee80211_stop_queue(mvm->hw, q);
- }
-}
-
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
const struct iwl_device_cmd *cmd)
{
@@ -1109,38 +1122,81 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
iwl_trans_block_txq_ptrs(mvm->trans, false);
}
-static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
-
- iwl_mvm_stop_mac_queues(mvm, mq);
+ return queue == mvm->aux_queue || queue == mvm->probe_queue ||
+ queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
}
-void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
+ int hw_queue, bool start)
{
- int q;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_sta *sta;
+ struct ieee80211_txq *txq;
+ struct iwl_mvm_txq *mvmtxq;
+ int i;
+ unsigned long tid_bitmap;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+
+ sta_id = iwl_mvm_has_new_tx_api(mvm) ?
+ mvm->tvqm_info[hw_queue].sta_id :
+ mvm->queue_info[hw_queue].ra_sta_id;
- if (WARN_ON_ONCE(!mq))
+ if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
return;
- for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
- if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "mac80211 %d still stopped\n", q);
- continue;
- }
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ goto out;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
+ if (!start)
+ ieee80211_stop_queues(mvm->hw);
+ else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
+ ieee80211_wake_queues(mvm->hw);
+
+ goto out;
+ }
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int tid = mvm->tvqm_info[hw_queue].txq_tid;
+
+ tid_bitmap = BIT(tid);
+ } else {
+ tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
+ }
+
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int tid = i;
+
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IEEE80211_NUM_TIDS;
- ieee80211_wake_queue(mvm->hw, q);
+ txq = sta->txq[tid];
+ mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ mvmtxq->stopped = !start;
+
+ if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
}
+
+out:
+ rcu_read_unlock();
}
-static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
+ iwl_mvm_queue_state_change(op_mode, hw_queue, false);
+}
- iwl_mvm_start_mac_queues(mvm, mq);
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mvm_queue_state_change(op_mode, hw_queue, true);
}
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
@@ -1261,12 +1317,29 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
reprobe->dev = mvm->trans->dev;
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
+ } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status)) {
+ IWL_ERR(mvm, "HW restart already requested, but not started\n");
} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
mvm->hw_registered &&
!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ if (mvm->fw->ucode_capa.error_log_size) {
+ u32 src_size = mvm->fw->ucode_capa.error_log_size;
+ u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
+ u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC);
+
+ if (recover_buf) {
+ mvm->error_recovery_buf = recover_buf;
+ iwl_trans_read_mem_bytes(mvm->trans,
+ src_addr,
+ recover_buf,
+ src_size);
+ }
+ }
+
if (fw_error && mvm->fw_restart > 0)
mvm->fw_restart--;
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 7f5434b34d0d..86e40bae57e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -109,6 +109,7 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
return PHY_VHT_CTRL_POS_4_ABOVE;
default:
WARN(1, "Invalid channel definition");
+ /* fall through */
case 0:
/*
* The FW is expected to check the control channel position only
@@ -143,14 +144,11 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
u8 chains_static, u8 chains_dynamic)
{
u8 active_cnt, idle_cnt;
+ struct iwl_phy_context_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
/* Set the channel info data */
- cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
- PHY_BAND_24 : PHY_BAND_5);
-
- cmd->ci.channel = chandef->chan->hw_value;
- cmd->ci.width = iwl_mvm_get_channel_width(chandef);
- cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* Set rx the chains */
idle_cnt = chains_static;
@@ -168,17 +166,17 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
active_cnt = 2;
}
- cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
- cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
- cmd->rxchain_info |= cpu_to_le32(active_cnt <<
+ tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ tail->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (unlikely(mvm->dbgfs_rx_phyinfo))
- cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+ tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
#endif
- cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
}
/*
@@ -195,6 +193,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
{
struct iwl_phy_context_cmd cmd;
int ret;
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
/* Set the command header fields */
iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
@@ -203,9 +202,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
chains_static, chains_dynamic);
- ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
- sizeof(struct iwl_phy_context_cmd),
- &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd);
if (ret)
IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 5a0a28fd762d..36f5fa1ee793 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -79,6 +81,8 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
struct iwl_beacon_filter_cmd *cmd,
u32 flags)
{
+ u16 len;
+
IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
le32_to_cpu(cmd->ba_enable_beacon_abort));
IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
@@ -101,9 +105,23 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
le32_to_cpu(cmd->bf_temp_fast_filter));
IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
le32_to_cpu(cmd->bf_temp_slow_filter));
+ IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_low is: %d, %d\n",
+ le32_to_cpu(cmd->bf_threshold_absolute_low[0]),
+ le32_to_cpu(cmd->bf_threshold_absolute_low[1]));
+
+ IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_high is: %d, %d\n",
+ le32_to_cpu(cmd->bf_threshold_absolute_high[0]),
+ le32_to_cpu(cmd->bf_threshold_absolute_high[1]));
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BEACON_FILTER_V4))
+ len = sizeof(struct iwl_beacon_filter_cmd);
+ else
+ len = offsetof(struct iwl_beacon_filter_cmd,
+ bf_threshold_absolute_low);
return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
- sizeof(struct iwl_beacon_filter_cmd), cmd);
+ len, cmd);
}
static
@@ -526,6 +544,9 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
cmd.flags &=
cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
+ if (mvm->ext_clock_valid)
+ cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK);
+
IWL_DEBUG_POWER(mvm,
"Sending device power command with flags = 0x%X\n",
cmd.flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index dabbc04853ac..a28283ff7295 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -149,14 +149,9 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
if (he_cap && he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) {
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
- if (he_cap->he_cap_elem.phy_cap_info[3] &
- IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2)
- flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK;
- }
-
return flags;
}
@@ -320,12 +315,26 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
u16 size = le32_to_cpu(notif->amsdu_size);
+ int i;
if (WARN_ON(sta->max_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
+ sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ if (mvmsta->amsdu_enabled & BIT(i))
+ sta->max_tid_amsdu_len[i] =
+ iwl_mvm_max_amsdu_size(mvm, sta, i);
+ else
+ /*
+ * Not so elegant, but this will effectively
+ * prevent AMSDU on this TID
+ */
+ sta->max_tid_amsdu_len[i] = 1;
+ }
IWL_DEBUG_RATE(mvm,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 089972280daa..e231a44d2423 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -3,7 +3,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1643,8 +1643,26 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
{
+ struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->vht_cap;
+ struct ieee80211_vht_cap vht_cap = {
+ .vht_cap_info = cpu_to_le32(sta_vht_cap->cap),
+ .supp_mcs = sta_vht_cap->vht_mcs,
+ };
+
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
+ /*
+ * Don't use 160 MHz if VHT extended NSS support
+ * says we cannot use 2 streams, we don't want to
+ * deal with this.
+ * We only check MCS 0 - they will support that if
+ * we got here at all and we don't care which MCS,
+ * we want to determine a more global state.
+ */
+ if (ieee80211_get_vht_max_nss(&vht_cap,
+ IEEE80211_VHT_CHANWIDTH_160MHZ,
+ 0, true) < sta->rx_nss)
+ return RATE_MCS_CHAN_WIDTH_80;
return RATE_MCS_CHAN_WIDTH_160;
case IEEE80211_STA_RX_BW_80:
return RATE_MCS_CHAN_WIDTH_80;
@@ -1744,6 +1762,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum rs_action scale_action)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int i;
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
@@ -1756,7 +1775,25 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
else
mvmsta->amsdu_enabled = 0xFFFF;
- mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ if (mvmsta->vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ else
+ mvmsta->max_amsdu_len = min_t(int, sta->max_amsdu_len, 8500);
+
+ sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ if (mvmsta->amsdu_enabled)
+ sta->max_tid_amsdu_len[i] =
+ iwl_mvm_max_amsdu_size(mvm, sta, i);
+ else
+ /*
+ * Not so elegant, but this will effectively
+ * prevent AMSDU on this TID
+ */
+ sta->max_tid_amsdu_len[i] = 1;
+ }
}
/*
@@ -1777,7 +1814,7 @@ static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
struct iwl_scale_tbl_info *tbl,
enum rs_action scale_action)
{
- if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
+ if (rs_bw_from_sta_bw(sta) != RATE_MCS_CHAN_WIDTH_80)
return false;
if (!is_vht_siso(&tbl->rate))
@@ -3332,12 +3369,12 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
* column the rate table should look like this:
*
- * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
- * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
- * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
- * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
- * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
- * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
* rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
* rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
* rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
@@ -4108,6 +4145,7 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
.add_sta_debugfs = rs_drv_add_sta_debugfs,
.remove_sta_debugfs = rs_remove_sta_debugfs,
#endif
+ .capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW,
};
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 6653a238f32e..fbd3014e8b82 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -222,7 +222,7 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;
- /* fall through if TTAK OK */
+ /* fall through */
case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
@@ -599,8 +599,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
- if (iwl_mvm_is_cdb_supported(mvm)) {
- struct mvm_statistics_general_cdb *general =
+ if (iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_general *general =
data->general;
mvmvif->beacon_stats.num_beacons =
@@ -723,7 +723,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
else
expected_size = sizeof(struct iwl_notif_statistics_v10);
} else {
- expected_size = sizeof(struct iwl_notif_statistics_cdb);
+ expected_size = sizeof(struct iwl_notif_statistics);
}
if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
@@ -753,7 +753,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
flags = stats->flag;
} else {
- struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+ struct iwl_notif_statistics *stats = (void *)&pkt->data;
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
@@ -792,7 +792,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
bytes = (void *)&v11->load_stats.byte_count;
air_time = (void *)&v11->load_stats.air_time;
} else {
- struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+ struct iwl_notif_statistics *stats = (void *)&pkt->data;
energy = (void *)&stats->load_stats.avg_energy;
bytes = (void *)&stats->load_stats.byte_count;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 7bd8676508f5..1e03acf30762 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -66,11 +66,37 @@
#include "mvm.h"
#include "fw-api.h"
+static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ u8 *data = skb->data;
+
+ /* Alignment concerns */
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4);
+ BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4);
+
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+ data += sizeof(struct ieee80211_radiotap_he);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ data += sizeof(struct ieee80211_radiotap_he_mu);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG)
+ data += sizeof(struct ieee80211_radiotap_lsig);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
+ struct ieee80211_vendor_radiotap *radiotap = (void *)data;
+
+ data += sizeof(*radiotap) + radiotap->len + radiotap->pad;
+ }
+
+ return data;
+}
+
static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
int queue, struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
struct iwl_mvm_key_pn *ptk_pn;
int res;
@@ -192,27 +218,50 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
}
}
+static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_vendor_radiotap *radiotap;
+ const int size = sizeof(*radiotap) + sizeof(__le16);
+
+ if (!mvm->cur_aid)
+ return;
+
+ /* ensure alignment */
+ BUILD_BUG_ON((size + 2) % 4);
+
+ radiotap = skb_put(skb, size + 2);
+ radiotap->align = 1;
+ /* Intel OUI */
+ radiotap->oui[0] = 0xf6;
+ radiotap->oui[1] = 0x54;
+ radiotap->oui[2] = 0x25;
+ /* radiotap sniffer config sub-namespace */
+ radiotap->subns = 1;
+ radiotap->present = 0x1;
+ radiotap->len = size - sizeof(*radiotap);
+ radiotap->pad = 2;
+
+ /* fill the data now */
+ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid));
+ /* and clear the padding */
+ memset(radiotap->data + sizeof(__le16), 0, radiotap->pad);
+
+ rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA;
+}
+
/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ bool csi)
{
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
-
- if (!(rx_status->flag & RX_FLAG_NO_PSDU) &&
- iwl_mvm_check_pn(mvm, skb, queue, sta)) {
+ if (iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
- } else {
- unsigned int radiotap_len = 0;
-
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
- radiotap_len += sizeof(struct ieee80211_radiotap_he);
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
- radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
- __skb_push(skb, radiotap_len);
+ else
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
- }
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -289,7 +338,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
stats->flag |= RX_FLAG_MMIC_ERROR;
*crypt_len = IEEE80211_TKIP_IV_LEN;
- /* fall through if TTAK OK */
+ /* fall through */
case IWL_RX_MPDU_STATUS_SEC_WEP:
if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
return -1;
@@ -473,7 +522,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta);
+ sta, false);
reorder_buf->num_stored--;
}
}
@@ -642,7 +691,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
@@ -666,6 +715,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* This also covers the case of receiving a Block Ack Request
* outside a BA session; we'll pass it to mac80211 and that
* then sends a delBA action frame.
+ * This also covers pure monitor mode, in which case we won't
+ * have any BA sessions.
*/
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
@@ -937,6 +988,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
* the TSF/timers are not be transmitted in HE-MU.
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
@@ -979,19 +1031,27 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
- if (he_mu) {
#define CHECK_BW(bw) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
- CHECK_BW(20);
- CHECK_BW(40);
- CHECK_BW(80);
- CHECK_BW(160);
+ CHECK_BW(20);
+ CHECK_BW(40);
+ CHECK_BW(80);
+ CHECK_BW(160);
+
+ if (he_mu)
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
- }
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ he->data6 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ rate_n_flags),
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
}
static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
@@ -1014,16 +1074,16 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
- he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
- he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
- he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
- he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
/* fall through */
@@ -1033,7 +1093,6 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
case IWL_RX_PHY_INFO_TYPE_HE_TB:
/* HE common */
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
@@ -1053,9 +1112,6 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
- he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
- IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
- IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
@@ -1076,6 +1132,20 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
switch (phy_data->info_type) {
case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+ break;
+ default:
+ /* nothing here */
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
he_mu->flags1 |=
le16_encode_bits(le16_get_bits(phy_data->d4,
IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
@@ -1142,30 +1212,22 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
};
- unsigned int radiotap_len = 0;
he = skb_put_data(skb, &known, sizeof(known));
- radiotap_len += sizeof(known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
- radiotap_len += sizeof(mu_known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
}
- /* temporarily hide the radiotap data */
- __skb_pull(skb, radiotap_len);
-
- if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_SU) {
- /* report the AMPDU-EOF bit on single frames */
- if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
- rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
- if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
- }
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
@@ -1178,9 +1240,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
/* toggle is switched whenever new aggregation starts */
- if (toggle_bit != mvm->ampdu_toggle &&
- (he_type == RATE_MCS_HE_TYPE_MU ||
- he_type == RATE_MCS_HE_TYPE_SU)) {
+ if (toggle_bit != mvm->ampdu_toggle) {
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
@@ -1314,6 +1374,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
.d4 = desc->phy_data4,
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
};
+ bool csi = false;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -1412,7 +1473,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* set the preamble flag if appropriate */
- if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ if (rate_n_flags & RATE_MCS_CCK_MSK &&
+ phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
@@ -1441,14 +1503,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status->ampdu_reference = mvm->ampdu_ref;
- /* toggle is switched whenever new aggregation starts */
+ /*
+ * Toggle is switched whenever new aggregation starts. Make
+ * sure ampdu_reference is never 0 so we can later use it to
+ * see if the frame was really part of an A-MPDU or not.
+ */
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
+ if (mvm->ampdu_ref == 0)
+ mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
}
+ rx_status->ampdu_reference = mvm->ampdu_ref;
}
+ if (unlikely(mvm->monitor_on))
+ iwl_mvm_add_rtap_sniffer_config(mvm, skb);
+
rcu_read_lock();
if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
@@ -1602,7 +1673,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
+ sta, csi);
out:
rcu_read_unlock();
}
@@ -1705,15 +1777,24 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
- RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
- } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
+ /*
+ * take the nss from the rx_vec since the rate_n_flags has
+ * only 2 bits for the nss which gives a max of 4 ss but
+ * there may be up to 8 spatial streams
+ */
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
+ } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
+ } else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
@@ -1726,7 +1807,7 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->rate_idx = rate;
}
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 86d598d5b68f..78694bc38e76 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1580,6 +1580,11 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
* scheduled scan before starting a normal scan.
*/
+ /* FW supports only a single periodic scan */
+ if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
+ mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT))
+ return -EBUSY;
+
if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
return 0;
@@ -1616,10 +1621,10 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
true);
-
- /* fall through, something is wrong if no scan was
- * running but we ran out of scans.
+ /* Something is wrong if no scan was running but we
+ * ran out of scans.
*/
+ /* fall through */
default:
WARN_ON(1);
break;
@@ -1976,9 +1981,8 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
return ret;
}
- ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-
- return ret;
+ return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
+ 1 * HZ);
}
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index d1d76bb9a750..9da0dae78510 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,7 +66,7 @@ struct iwl_mvm_active_iface_iterator_data {
struct ieee80211_vif *ignore_vif;
u8 sta_vif_ap_sta_id;
enum iwl_sf_state sta_vif_state;
- int num_active_macs;
+ u32 num_active_macs;
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index e28009832da0..498c315291cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -314,7 +314,6 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
struct iwl_mvm_sta *mvmsta;
u32 status;
u8 sta_id;
- int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
@@ -349,31 +348,21 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
/* Notify FW of queue removal from the STA queues */
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
- iwl_mvm_add_sta_cmd_size(mvm),
- &cmd, &status);
-
- return ret;
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
}
-static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u8 tid, u8 flags)
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int queue, u8 tid, u8 flags)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
- bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
int ret;
- if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
- return -EINVAL;
-
if (iwl_mvm_has_new_tx_api(mvm)) {
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
-
iwl_trans_txq_free(mvm->trans, queue);
return 0;
@@ -384,36 +373,15 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- /*
- * If there is another TID with the same AC - don't remove the MAC queue
- * from the mapping
- */
- if (tid < IWL_MAX_TID_COUNT) {
- unsigned long tid_bitmap =
- mvm->queue_info[queue].tid_bitmap;
- int ac = tid_to_mac80211_ac[tid];
- int i;
-
- for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
- if (tid_to_mac80211_ac[i] == ac)
- remove_mac_queue = false;
- }
- }
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
-
cmd.action = mvm->queue_info[queue].tid_bitmap ?
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
- "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ "Disabling TXQ #%d tids=0x%x\n",
queue,
- mvm->queue_info[queue].tid_bitmap,
- mvm->hw_queue_to_mac80211[queue]);
+ mvm->queue_info[queue].tid_bitmap);
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
@@ -423,15 +391,19 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */
- WARN(mvm->queue_info[queue].tid_bitmap ||
- mvm->hw_queue_to_mac80211[queue],
- "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
- queue, mvm->hw_queue_to_mac80211[queue],
- mvm->queue_info[queue].tid_bitmap);
+ WARN(mvm->queue_info[queue].tid_bitmap,
+ "TXQ #%d info out-of-sync - tids=0x%x\n",
+ queue, mvm->queue_info[queue].tid_bitmap);
/* If we are here - the queue is freed and we can zero out these vals */
mvm->queue_info[queue].tid_bitmap = 0;
- mvm->hw_queue_to_mac80211[queue] = 0;
+
+ if (sta) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
@@ -517,9 +489,14 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
spin_lock_bh(&mvmsta->lock);
/* Unmap MAC queues and TIDs from this queue */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -541,10 +518,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
}
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
+ struct ieee80211_sta *old_sta,
u8 new_sta_id)
{
struct iwl_mvm_sta *mvmsta;
- u8 txq_curr_ac, sta_id, tid;
+ u8 sta_id, tid;
unsigned long disable_agg_tids = 0;
bool same_sta;
int ret;
@@ -554,7 +532,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
- txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
tid = mvm->queue_info[queue].txq_tid;
@@ -570,9 +547,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
- ret = iwl_mvm_disable_txq(mvm, queue,
- mvmsta->vif->hw_queue[txq_curr_ac],
- tid, 0);
+ ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
@@ -662,16 +637,15 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
-static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force)
+static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force, struct iwl_mvm_txq *txq)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
bool shared_queue;
- unsigned long mq;
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
@@ -695,14 +669,14 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
- mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]);
- /* Stop MAC queues and wait for this queue to empty */
- iwl_mvm_stop_mac_queues(mvm, mq);
+ /* Stop the queue and wait for it to empty */
+ txq->stopped = true;
+
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
@@ -743,8 +717,8 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
out:
- /* Continue using the MAC queues */
- iwl_mvm_start_mac_queues(mvm, mq);
+ /* Continue using the queue */
+ txq->stopped = false;
return ret;
}
@@ -769,14 +743,15 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
return -ENOSPC;
}
-static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
- size = IWL_MGMT_QUEUE_SIZE;
+ size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+ mvm->trans->cfg->min_txq_size);
}
queue = iwl_trans_txq_alloc(mvm->trans,
cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
@@ -792,10 +767,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d (mac80211 map:0x%x)\n",
- queue, mvm->hw_queue_to_mac80211[queue]);
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
return queue;
}
@@ -805,9 +777,10 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
- u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
lockdep_assert_held(&mvm->mutex);
@@ -815,11 +788,14 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating queue for sta %d on tid %d\n",
mvmsta->sta_id, tid);
- queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
- wdg_timeout);
+ queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
if (queue < 0)
return queue;
+ mvmtxq->txq_id = queue;
+ mvm->tvqm_info[queue].txq_tid = tid;
+ mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
+
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
spin_lock_bh(&mvmsta->lock);
@@ -829,8 +805,9 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
return 0;
}
-static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u8 sta_id, u8 tid)
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ int queue, u8 sta_id, u8 tid)
{
bool enable_queue = true;
@@ -845,14 +822,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
if (mvm->queue_info[queue].tid_bitmap)
enable_queue = false;
- if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
- WARN(mac80211_queue >=
- BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
- "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
- mac80211_queue, queue, sta_id, tid);
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- }
-
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
mvm->queue_info[queue].ra_sta_id = sta_id;
@@ -866,16 +835,22 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
mvm->queue_info[queue].txq_tid = tid;
}
+ if (sta) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
+ mvmtxq->txq_id = queue;
+ }
+
IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
- queue, mvm->queue_info[queue].tid_bitmap,
- mvm->hw_queue_to_mac80211[queue]);
+ "Enabling TXQ #%d tids=0x%x\n",
+ queue, mvm->queue_info[queue].tid_bitmap);
return enable_queue;
}
-static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u16 ssn,
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
@@ -895,8 +870,7 @@ static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
return false;
/* Send the enabling command if we need to */
- if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
- cfg->sta_id, cfg->tid))
+ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
return false;
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
@@ -989,9 +963,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
- tid_to_mac80211_ac[tid], ssn,
- wdg_timeout, true);
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true,
+ iwl_mvm_txq_from_tid(sta, tid));
if (ret) {
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
return;
@@ -1068,11 +1043,9 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
u16 tid_bitmap;
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -1105,10 +1078,6 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* sure all TIDs have existing corresponding mac queues enabled
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- mvm->hw_queue_to_mac80211[queue] |=
- BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
- }
/* If the queue is marked as shared - "unshare" it */
if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
@@ -1136,6 +1105,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
unsigned long unshare_queues = 0;
unsigned long changetid_queues = 0;
int i, ret, free_queue = -ENOSPC;
+ struct ieee80211_sta *queue_owner = NULL;
lockdep_assert_held(&mvm->mutex);
@@ -1201,13 +1171,14 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
inactive_tid_bitmap,
&unshare_queues,
&changetid_queues);
- if (ret >= 0 && free_queue < 0)
+ if (ret >= 0 && free_queue < 0) {
+ queue_owner = sta;
free_queue = ret;
+ }
/* only unlock sta lock - we still need the queue info lock */
spin_unlock_bh(&mvmsta->lock);
}
- rcu_read_unlock();
/* Reconfigure queues requiring reconfiguation */
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
@@ -1216,18 +1187,21 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
iwl_mvm_change_queue_tid(mvm, i);
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
- ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
alloc_for_sta);
- if (ret)
+ if (ret) {
+ rcu_read_unlock();
return ret;
+ }
}
+ rcu_read_unlock();
+
return free_queue;
}
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, u8 ac, int tid,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_sta *sta, u8 ac, int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_trans_txq_scd_cfg cfg = {
@@ -1238,7 +1212,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
- u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
@@ -1257,12 +1230,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
spin_unlock_bh(&mvmsta->lock);
- /*
- * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
- * exists
- */
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ if (tid == IWL_MAX_TID_COUNT) {
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_MGMT_QUEUE,
IWL_MVM_DQA_MAX_MGMT_QUEUE);
@@ -1341,8 +1309,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
}
- inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
- ssn, &cfg, wdg_timeout);
+ inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
/*
* Mark queue as shared in transport if shared
@@ -1384,8 +1351,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
} else {
/* Redirect queue, if needed */
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
- wdg_timeout, false);
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
+ wdg_timeout, false,
+ iwl_mvm_txq_from_tid(sta, tid));
if (ret)
goto out_err;
}
@@ -1393,7 +1361,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
return 0;
out_err:
- iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
+ iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
return ret;
}
@@ -1406,87 +1374,32 @@ static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
return tid_to_mac80211_ac[tid];
}
-static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, int tid)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- struct sk_buff_head deferred_tx;
- u8 mac_queue;
- bool no_queue = false; /* Marks if there is a problem with the queue */
- u8 ac;
-
- lockdep_assert_held(&mvm->mutex);
-
- skb = skb_peek(&tid_data->deferred_tx_frames);
- if (!skb)
- return;
- hdr = (void *)skb->data;
-
- ac = iwl_mvm_tid_to_ac_queue(tid);
- mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
-
- if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
- iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
- IWL_ERR(mvm,
- "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
- mvmsta->sta_id, tid);
-
- /*
- * Mark queue as problematic so later the deferred traffic is
- * freed, as we can do nothing with it
- */
- no_queue = true;
- }
-
- __skb_queue_head_init(&deferred_tx);
-
- /* Disable bottom-halves when entering TX path */
- local_bh_disable();
- spin_lock(&mvmsta->lock);
- skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
- mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
- spin_unlock(&mvmsta->lock);
-
- while ((skb = __skb_dequeue(&deferred_tx)))
- if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
- ieee80211_free_txskb(mvm->hw, skb);
- local_bh_enable();
-
- /* Wake queue */
- iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
-}
-
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
add_stream_wk);
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- unsigned long deferred_tid_traffic;
- int sta_id, tid;
mutex_lock(&mvm->mutex);
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
- /* Go over all stations with deferred traffic */
- for_each_set_bit(sta_id, mvm->sta_deferred_frames,
- IWL_MVM_STATION_COUNT) {
- clear_bit(sta_id, mvm->sta_deferred_frames);
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
- continue;
+ while (!list_empty(&mvm->add_stream_txqs)) {
+ struct iwl_mvm_txq *mvmtxq;
+ struct ieee80211_txq *txq;
+ u8 tid;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
- deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+ mvmtxq = list_first_entry(&mvm->add_stream_txqs,
+ struct iwl_mvm_txq, list);
+
+ txq = container_of((void *)mvmtxq, struct ieee80211_txq,
+ drv_priv);
+ tid = txq->tid;
+ if (tid == IEEE80211_NUM_TIDS)
+ tid = IWL_MAX_TID_COUNT;
- for_each_set_bit(tid, &deferred_tid_traffic,
- IWL_MAX_TID_COUNT + 1)
- iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+ iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
+ list_del_init(&mvmtxq->list);
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
}
mutex_unlock(&mvm->mutex);
@@ -1542,10 +1455,11 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
* Note that re-enabling aggregations isn't done in this function.
*/
static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta)
+ struct ieee80211_sta *sta)
{
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int wdg =
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
int i;
struct iwl_trans_txq_scd_cfg cfg = {
.sta_id = mvm_sta->sta_id,
@@ -1561,23 +1475,18 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
int txq_id = tid_data->txq_id;
int ac;
- u8 mac_queue;
if (txq_id == IWL_MVM_INVALID_QUEUE)
continue;
- skb_queue_head_init(&tid_data->deferred_tx_frames);
-
ac = tid_to_mac80211_ac[i];
- mac_queue = mvm_sta->vif->hw_queue[ac];
if (iwl_mvm_has_new_tx_api(mvm)) {
IWL_DEBUG_TX_QUEUES(mvm,
"Re-mapping sta %d tid %d\n",
mvm_sta->sta_id, i);
- txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
- mvm_sta->sta_id,
- i, wdg_timeout);
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
+ i, wdg);
tid_data->txq_id = txq_id;
/*
@@ -1600,8 +1509,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
"Re-mapping sta %d tid %d to queue %d\n",
mvm_sta->sta_id, i, txq_id);
- iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
- wdg_timeout);
+ iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
}
}
@@ -1691,7 +1599,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (ret)
goto err;
- iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
+ iwl_mvm_realloc_queues_after_restart(mvm, sta);
sta_update = true;
sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
goto update_fw;
@@ -1724,9 +1632,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
* frames until the queue is allocated
*/
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
- skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
}
- mvm_sta->deferred_traffic_tid_map = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ INIT_LIST_HEAD(&mvmtxq->list);
+ atomic_set(&mvmtxq->tx_request, 0);
+ }
+
mvm_sta->agg_tids = 0;
if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -1861,9 +1777,9 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mvm_sta *mvm_sta)
+ struct ieee80211_sta *sta)
{
- int ac;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int i;
lockdep_assert_held(&mvm->mutex);
@@ -1872,11 +1788,17 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
- ac = iwl_mvm_tid_to_ac_queue(i);
- iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
- vif->hw_queue[ac], i, 0);
+ iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
+ 0);
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
}
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
@@ -1938,7 +1860,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
- iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+ iwl_mvm_disable_sta_queues(mvm, vif, sta);
/* If there is a TXQ still marked as reserved - free it */
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
@@ -2044,7 +1966,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
if (iwl_mvm_has_new_tx_api(mvm)) {
int tvqm_queue =
- iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
+ iwl_mvm_tvqm_enable_txq(mvm, sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
*queue = tvqm_queue;
@@ -2057,7 +1979,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
.frame_limit = IWL_FRAME_LIMIT,
};
- iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
+ iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
}
}
@@ -2135,8 +2057,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
- IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2195,8 +2116,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
bsta->tfd_queue_msk |= BIT(queue);
- iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
- &cfg, wdg_timeout);
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -2215,8 +2135,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
- queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
- bsta->sta_id,
+ queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
@@ -2254,7 +2173,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
return;
}
- iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
if (iwl_mvm_has_new_tx_api(mvm))
return;
@@ -2377,10 +2296,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* Note that this is done here as we want to avoid making DQA
* changes in mac80211 layer.
*/
- if (vif->type == NL80211_IFTYPE_ADHOC) {
- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- mvmvif->cab_queue = vif->cab_queue;
- }
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
/*
* While in previous FWs we had to exclude cab queue from TFD queue
@@ -2388,9 +2305,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
*/
if (!iwl_mvm_has_new_tx_api(mvm) &&
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
- &cfg, timeout);
- msta->tfd_queue_msk |= BIT(vif->cab_queue);
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
+ timeout);
+ msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
}
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
mvmvif->id, mvmvif->color);
@@ -2407,24 +2324,25 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* tfd_queue_mask.
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
- int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
- msta->sta_id,
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
0,
timeout);
mvmvif->cab_queue = queue;
} else if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
- &cfg, timeout);
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
+ timeout);
if (mvmvif->ap_wep_key) {
u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
+ __set_bit(key_offset, mvm->fw_key_table);
+
if (key_offset == STA_KEY_IDX_INVALID)
return -ENOSPC;
ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
- mvmvif->ap_wep_key, 1, 0, NULL, 0,
+ mvmvif->ap_wep_key, true, 0, NULL, 0,
key_offset, 0);
if (ret)
return ret;
@@ -2433,6 +2351,59 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return 0;
}
+static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
+ struct ieee80211_key_conf *keyconf,
+ bool mcast)
+{
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+ __le16 key_flags;
+ int ret, size;
+ u32 status;
+
+ /* This is a valid situation for GTK removal */
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return 0;
+
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+ if (mcast)
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ /*
+ * The fields assigned here are in the same location at the start
+ * of the command, so we can do this union trick.
+ */
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.key_offset = keyconf->hw_key_idx;
+ u.cmd.common.sta_id = sta_id;
+
+ size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
+ &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
/*
* Send the FW a request to remove the station from it's internal data
* structures, and in addition remove it from the local data structure.
@@ -2446,8 +2417,29 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
- iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
- 0, 0);
+ iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
+
+ if (mvmvif->ap_wep_key) {
+ int i;
+
+ if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
+ mvm->fw_key_table)) {
+ IWL_ERR(mvm, "offset %d not used in fw key table.\n",
+ mvmvif->ap_wep_key->hw_key_idx);
+ return -ENOENT;
+ }
+
+ /* track which key was deleted last */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (mvm->fw_key_deleted[i] < U8_MAX)
+ mvm->fw_key_deleted[i]++;
+ }
+ mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
+ ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
+ mvmvif->ap_wep_key, true);
+ if (ret)
+ return ret;
+ }
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@@ -2781,7 +2773,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data;
u16 normalized_ssn;
- int txq_id;
+ u16 txq_id;
int ret;
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
@@ -2823,17 +2815,24 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
txq_id = mvmsta->tid_data[tid].txq_id;
if (txq_id == IWL_MVM_INVALID_QUEUE) {
- txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
- IWL_MVM_DQA_MIN_DATA_QUEUE,
- IWL_MVM_DQA_MAX_DATA_QUEUE);
- if (txq_id < 0) {
- ret = txq_id;
+ ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (ret < 0) {
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto out;
}
+ txq_id = ret;
+
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
+ } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
+ ret = -ENXIO;
+ IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
+ tid, IWL_MAX_HW_QUEUES - 1);
+ goto out;
+
} else if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
@@ -2976,8 +2975,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
if (alloc_queue)
- iwl_mvm_enable_txq(mvm, queue,
- vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
+ iwl_mvm_enable_txq(mvm, sta, queue, ssn,
&cfg, wdg_timeout);
/* Send ADD_STA command to enable aggs only if the queue isn't shared */
@@ -3474,59 +3472,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
return ret;
}
-static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
- struct ieee80211_key_conf *keyconf,
- bool mcast)
-{
- union {
- struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
- struct iwl_mvm_add_sta_key_cmd cmd;
- } u = {};
- bool new_api = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
- __le16 key_flags;
- int ret, size;
- u32 status;
-
- /* This is a valid situation for GTK removal */
- if (sta_id == IWL_MVM_INVALID_STA)
- return 0;
-
- key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
- STA_KEY_FLG_KEYID_MSK);
- key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
- key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
-
- if (mcast)
- key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
-
- /*
- * The fields assigned here are in the same location at the start
- * of the command, so we can do this union trick.
- */
- u.cmd.common.key_flags = key_flags;
- u.cmd.common.key_offset = keyconf->hw_key_idx;
- u.cmd.common.sta_id = sta_id;
-
- size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
-
- status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
- &status);
-
- switch (status) {
- case ADD_STA_SUCCESS:
- IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
- break;
- }
-
- return ret;
-}
-
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index d52cd888f77d..79700c7310a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -297,7 +297,6 @@ enum iwl_mvm_agg_state {
/**
* struct iwl_mvm_tid_data - holds the states for each RA / TID
- * @deferred_tx_frames: deferred TX frames for this RA/TID
* @seq_number: the next WiFi sequence number to use
* @next_reclaimed: the WiFi sequence number of the next packet to be acked.
* This is basically (last acked packet++).
@@ -318,7 +317,6 @@ enum iwl_mvm_agg_state {
* tpt_meas_start
*/
struct iwl_mvm_tid_data {
- struct sk_buff_head deferred_tx_frames;
u16 seq_number;
u16 next_reclaimed;
/* The rest is Tx AGG related */
@@ -396,6 +394,7 @@ struct iwl_mvm_rxq_dup_data {
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
+ * @wep_key: used in AP mode. Is a duplicate of the WEP key.
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
@@ -427,7 +426,7 @@ struct iwl_mvm_sta {
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
- u16 deferred_traffic_tid_map;
+ struct ieee80211_key_conf *wep_key;
u8 reserved_queue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index e02f4eb20359..859aa5a4e6b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -399,6 +399,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct iwl_tdls_channel_switch_cmd cmd = {0};
+ struct iwl_tdls_channel_switch_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -414,9 +417,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
cmd.switch_type = type;
- cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
- cmd.timing.switch_time = cpu_to_le32(switch_time);
- cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
+ tail->timing.frame_timestamp = cpu_to_le32(timestamp);
+ tail->timing.switch_time = cpu_to_le32(switch_time);
+ tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
rcu_read_lock();
sta = ieee80211_find_sta(vif, peer);
@@ -448,21 +451,16 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
}
- if (chandef) {
- cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
- PHY_BAND_24 : PHY_BAND_5);
- cmd.ci.channel = chandef->chan->hw_value;
- cmd.ci.width = iwl_mvm_get_channel_width(chandef);
- cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
- }
+ if (chandef)
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
/* keep quota calculation simple for now - 50% of DTIM for TDLS */
- cmd.timing.max_offchan_duration =
+ tail->timing.max_offchan_duration =
cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int) / 2);
/* Switch time is the first element in the switch-timing IE. */
- cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+ tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
info = IEEE80211_SKB_CB(skb);
hdr = (void *)skb->data;
@@ -472,20 +470,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
ret = -EINVAL;
goto out;
}
- iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+ iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
}
- iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
+ iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
mvmsta->sta_id);
- iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
+ iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
hdr->frame_control);
rcu_read_unlock();
- memcpy(cmd.frame.data, skb->data, skb->len);
+ memcpy(tail->frame.data, skb->data, skb->len);
- ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index e1a6f4e22253..9693fa4cdc39 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -85,7 +85,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
{
lockdep_assert_held(&mvm->time_event_lock);
- if (!te_data->vif)
+ if (!te_data || !te_data->vif)
return;
list_del(&te_data->list);
@@ -334,6 +334,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
@@ -686,6 +687,8 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data)
{
struct iwl_hs20_roc_req aux_cmd = {};
+ u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
+
u32 uid;
int ret;
@@ -699,7 +702,7 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
le32_to_cpu(aux_cmd.event_unique_id));
ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
- sizeof(aux_cmd), &aux_cmd);
+ len, &aux_cmd);
if (WARN_ON(ret))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
deleted file mode 100644
index 01e0a999063b..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "mvm.h"
-#include "fw/api/tof.h"
-
-#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
-
-void iwl_mvm_tof_init(struct iwl_mvm *mvm)
-{
- struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return;
-
- memset(tof_data, 0, sizeof(*tof_data));
-
- tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (IWL_MVM_TOF_IS_RESPONDER) {
- tof_data->responder_cfg.sub_grp_cmd_id =
- cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
- tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
- }
-#endif
-
- tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
- tof_data->range_req.req_timeout = 1;
- tof_data->range_req.initiator = 1;
- tof_data->range_req.report_policy = 3;
-
- tof_data->range_req_ext.sub_grp_cmd_id =
- cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
-
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
- mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
-}
-
-void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
-{
- struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
-
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
- !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE))
- return;
-
- memset(tof_data, 0, sizeof(*tof_data));
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
- mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
-}
-
-static void iwl_tof_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- bool *enabled = _data;
-
- /* non bss vif exists */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
- *enabled = false;
-}
-
-int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
-{
- struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
- bool enabled;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_tof_iterator, &enabled);
- if (!enabled) {
- IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
- return -EINVAL;
- }
-
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(*cmd), cmd);
-}
-
-int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
-{
- struct iwl_tof_range_abort_cmd cmd = {
- .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
- .request_id = id,
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (id != mvm->tof_data.active_range_request) {
- IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
- id, mvm->tof_data.active_range_request);
- return -EINVAL;
- }
-
- /* after abort is sent there's no active request anymore */
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
-
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(cmd), &cmd);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
- !mvmvif->ap_ibss_active) {
- IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
- return -EIO;
- }
-
- cmd->sta_id = mvmvif->bcast_sta.sta_id;
- memcpy(cmd->bssid, vif->addr, ETH_ALEN);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(*cmd), cmd);
-}
-#endif
-
-int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
- .len = { sizeof(mvm->tof_data.range_req), },
- /* no copy because of the command size */
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
- IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
- return -EIO;
- }
-
- /* nesting of range requests is not supported in FW */
- if (mvm->tof_data.active_range_request !=
- IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
- IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
- mvm->tof_data.active_range_request);
- return -EIO;
- }
-
- mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
-
- cmd.data[0] = &mvm->tof_data.range_req;
- return iwl_mvm_send_cmd(mvm, &cmd);
-}
-
-int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
- IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
- return -EIO;
- }
-
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(mvm->tof_data.range_req_ext),
- &mvm->tof_data.range_req_ext);
-}
-
-static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
-{
- struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
-
- if (resp->request_id != mvm->tof_data.active_range_request) {
- IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
- resp->request_id, mvm->tof_data.active_range_request);
- return -EIO;
- }
-
- memcpy(&mvm->tof_data.range_resp, resp,
- sizeof(struct iwl_tof_range_rsp_ntfy));
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
-
- return 0;
-}
-
-static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
-{
- struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
-
- IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
- return 0;
-}
-
-static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
-{
- struct iwl_tof_neighbor_report *report =
- (struct iwl_tof_neighbor_report *)data;
-
- IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
- report->bssid, report->request_token, report->status);
- return 0;
-}
-
-void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
-
- lockdep_assert_held(&mvm->mutex);
-
- switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
- case TOF_RANGE_RESPONSE_NOTIF:
- iwl_mvm_tof_range_resp(mvm, resp->data);
- break;
- case TOF_MCSI_DEBUG_NOTIF:
- iwl_mvm_tof_mcsi_notif(mvm, resp->data);
- break;
- case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
- iwl_mvm_tof_nb_report_notif(mvm, resp->data);
- break;
- default:
- IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
- resp->sub_grp_cmd_id);
- break;
- }
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
deleted file mode 100644
index 8138d0606c52..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __tof_h__
-#define __tof_h__
-
-#include "fw/api/tof.h"
-
-struct iwl_mvm_tof_data {
- struct iwl_tof_config_cmd tof_cfg;
- struct iwl_tof_range_req_cmd range_req;
- struct iwl_tof_range_req_ext_cmd range_req_ext;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct iwl_tof_responder_config_cmd responder_cfg;
-#endif
- struct iwl_tof_range_rsp_ntfy range_resp;
- u8 last_abort_id;
- u16 active_range_request;
-};
-
-void iwl_mvm_tof_init(struct iwl_mvm *mvm);
-void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
-int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
-int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
-int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
-int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-#endif
-#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 995fe2a6abbb..0c2aabc842f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -209,7 +209,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 offload_assist = 0;
u8 ac;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (ieee80211_is_probe_resp(fc) &&
+ !is_multicast_ether_addr(hdr->addr1)))
tx_flags |= TX_CMD_FLG_ACK;
else
tx_flags &= ~TX_CMD_FLG_ACK;
@@ -278,7 +280,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
}
if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
- !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+ !is_multicast_ether_addr(hdr->addr1))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -533,10 +535,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/*
* For data packets rate info comes from the fw. Only
- * set rate/antenna during connection establishment.
+ * set rate/antenna during connection establishment or in case
+ * no station is given.
*/
- if (sta && (!ieee80211_is_data(hdr->frame_control) ||
- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
+ if (!sta || !ieee80211_is_data(hdr->frame_control) ||
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
@@ -602,11 +605,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
}
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info, __le16 fc)
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr)
{
- struct iwl_mvm_vif *mvmvif;
-
- mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ __le16 fc = hdr->frame_control;
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
@@ -625,7 +629,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
(!ieee80211_is_bufferable_mmpdu(fc) ||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
return mvm->probe_queue;
- if (info->hw_queue == info->control.vif->cab_queue)
+
+ if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
+ is_multicast_ether_addr(hdr->addr1))
return mvmvif->cab_queue;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
@@ -634,8 +640,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
return mvm->p2p_dev_queue;
- if (info->hw_queue == info->control.vif->cab_queue)
- return mvmvif->cab_queue;
WARN_ON_ONCE(1);
return mvm->p2p_dev_queue;
@@ -713,18 +717,18 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
+ bool offchannel = IEEE80211_SKB_CB(skb)->flags &
+ IEEE80211_TX_CTL_TX_OFFCHAN;
int queue = -1;
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
+ return -1;
+
memcpy(&info, skb->cb, sizeof(info));
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1;
- if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
- (!info.control.vif ||
- info.hw_queue != info.control.vif->cab_queue)))
- return -1;
-
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
@@ -737,14 +741,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
else
sta_id = mvmvif->mcast_sta.sta_id;
- queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
- hdr->frame_control);
-
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) {
+ offchannel) {
/*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
* that can be used in 2 different types of vifs, P2P &
@@ -758,8 +760,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
}
}
- if (queue < 0)
+ if (queue < 0) {
+ IWL_ERR(mvm, "No queue was found. Dropping TX\n");
return -1;
+ }
if (unlikely(ieee80211_is_probe_resp(fc)))
iwl_mvm_probe_resp_set_noa(mvm, skb);
@@ -781,6 +785,35 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return 0;
}
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, unsigned int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
+ u8 ac = tid_to_mac80211_ac[tid];
+ unsigned int txf;
+ int lmac = IWL_LMAC_24G_INDEX;
+
+ if (iwl_mvm_is_cdb_supported(mvm) &&
+ band == NL80211_BAND_5GHZ)
+ lmac = IWL_LMAC_5G_INDEX;
+
+ /* For HE redirect to trigger based fifos */
+ if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+ ac += 4;
+
+ txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+
+ /*
+ * Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+ return min_t(unsigned int, mvmsta->max_amsdu_len,
+ mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
#ifdef CONFIG_INET
static int
@@ -850,36 +883,6 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
return 0;
}
-static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- unsigned int tid)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
- u8 ac = tid_to_mac80211_ac[tid];
- unsigned int txf;
- int lmac = IWL_LMAC_24G_INDEX;
-
- if (iwl_mvm_is_cdb_supported(mvm) &&
- band == NL80211_BAND_5GHZ)
- lmac = IWL_LMAC_5G_INDEX;
-
- /* For HE redirect to trigger based fifos */
- if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
- ac += 4;
-
- txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
-
- /*
- * Don't send an AMSDU that will be longer than the TXF.
- * Add a security margin of 256 for the TX command + headers.
- * We also want to have the start of the next packet inside the
- * fifo to be able to send bursts.
- */
- return min_t(unsigned int, mvmsta->max_amsdu_len,
- mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
-}
-
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@@ -1002,34 +1005,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
#endif
-static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta, u8 tid,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u8 mac_queue = info->hw_queue;
- struct sk_buff_head *deferred_tx_frames;
-
- lockdep_assert_held(&mvm_sta->lock);
-
- mvm_sta->deferred_traffic_tid_map |= BIT(tid);
- set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
-
- deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
-
- skb_queue_tail(deferred_tx_frames, skb);
-
- /*
- * The first deferred frame should've stopped the MAC queues, so we
- * should never get a second deferred frame for the RA/TID.
- * In case of GSO the first packet may have been split, so don't warn.
- */
- if (skb_queue_len(deferred_tx_frames) == 1) {
- iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
- schedule_work(&mvm->add_stream_wk);
- }
-}
-
/* Check if there are any timed-out TIDs on a given shared TXQ */
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
{
@@ -1054,7 +1029,12 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
int airtime)
{
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ struct iwl_mvm_tcm_mac *mdata;
+
+ if (mac >= NUM_MAC_INDEX_DRIVER)
+ return;
+
+ mdata = &mvm->tcm.data[mac];
if (mvm->tcm.paused)
return;
@@ -1065,14 +1045,21 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
mdata->tx.airtime += airtime;
}
-static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvmsta, int tid)
+static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int tid)
{
u32 ac = tid_to_mac80211_ac[tid];
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ struct iwl_mvm_tcm_mac *mdata;
+
+ if (mac >= NUM_MAC_INDEX_DRIVER)
+ return -EINVAL;
+
+ mdata = &mvm->tcm.data[mac];
mdata->tx.pkts[ac]++;
+
+ return 0;
}
/*
@@ -1088,7 +1075,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
- u16 txq_id = info->hw_queue;
+ u16 txq_id;
bool is_ampdu = false;
int hdrlen;
@@ -1096,6 +1083,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
fc = hdr->frame_control;
hdrlen = ieee80211_hdrlen(fc);
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
+ return -1;
+
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1125,12 +1115,14 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
*/
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
tid = ieee80211_get_tid(hdr);
- if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid))
goto drop_unlock_sta;
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
- if (WARN_ON_ONCE(is_ampdu &&
- mvmsta->tid_data[tid].state != IWL_AGG_ON))
+ if (WARN_ONCE(is_ampdu &&
+ mvmsta->tid_data[tid].state != IWL_AGG_ON,
+ "Invalid internal agg state %d for TID %d",
+ mvmsta->tid_data[tid].state, tid))
goto drop_unlock_sta;
seq_number = mvmsta->tid_data[tid].seq_number;
@@ -1152,14 +1144,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
- /* Check if TXQ needs to be allocated or re-activated */
- if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
- iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
-
- /*
- * The frame is now deferred, and the worker scheduled
- * will re-allocate it, so we can free it for now.
- */
+ if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
@@ -1199,7 +1184,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
- iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
+ if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
+ tid == IWL_MAX_TID_COUNT ? 0 : tid))
+ goto drop;
return 0;
@@ -1207,6 +1194,7 @@ drop_unlock_sta:
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
drop:
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid);
return -1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index d116c6ae18ff..4649327abb45 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -248,7 +248,7 @@ void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
le16_to_cpu(err_resp->bad_cmd_seq_num),
le32_to_cpu(err_resp->error_service));
- IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
+ IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
le64_to_cpu(err_resp->timestamp));
}
@@ -294,6 +294,7 @@ static const struct {
{ "SYSASSERT", 0x35 },
{ "UCODE_VERSION_MISMATCH", 0x37 },
{ "BAD_COMMAND", 0x38 },
+ { "BAD_COMMAND", 0x39 },
{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
{ "FATAL_ERROR", 0x3D },
{ "NMI_TRM_HW_ERR", 0x46 },
@@ -456,12 +457,17 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_umac_error_event_table table;
+ u32 base = mvm->trans->umac_error_event_table;
- if (!mvm->support_umac_log)
+ if (!mvm->support_umac_log &&
+ !(mvm->trans->error_event_table_tlv_status &
+ IWL_ERROR_EVENT_TABLE_UMAC))
return;
- iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
- sizeof(table));
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (table.valid)
+ mvm->fwrt.dump.umac_err_id = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -486,11 +492,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
-static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
+static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
- u32 val;
+ u32 val, base = mvm->trans->lmac_error_event_table[lmac_num];
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
if (!base)
@@ -519,29 +525,15 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
/* reset the device */
iwl_trans_sw_reset(trans);
- /* set INIT_DONE flag */
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
-
- /* and wait for clock stabilization */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- udelay(2);
-
- err = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- 25000);
- if (err < 0) {
- IWL_DEBUG_INFO(trans,
- "Failed to reset the card for the dump\n");
+ err = iwl_finish_nic_init(trans);
+ if (err)
return;
- }
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (table.valid)
- mvm->fwrt.dump.rt_status = table.error_id;
+ mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -598,10 +590,10 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
return;
}
- iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
+ iwl_mvm_dump_lmac_error_log(mvm, 0);
- if (mvm->error_event_table[1])
- iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
+ if (mvm->trans->lmac_error_event_table[1])
+ iwl_mvm_dump_lmac_error_log(mvm, 1);
iwl_mvm_dump_umac_error_log(mvm);
}
@@ -1133,19 +1125,14 @@ static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
"AP isn't using AMPDU with uAPSD enabled");
}
-static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- int *mac_id = data;
if (vif->type != NL80211_IFTYPE_STATION)
return;
- if (mvmvif->id != *mac_id)
- return;
-
if (!vif->bss_conf.assoc)
return;
@@ -1155,10 +1142,10 @@ static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
!mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
return;
- if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected)
+ if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
return;
- mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true;
+ mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
IWL_INFO(mvm,
"detected AP should do aggregation but isn't, likely due to U-APSD\n");
schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
@@ -1171,6 +1158,7 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
u64 tpt;
unsigned long rate;
+ struct ieee80211_vif *vif;
rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
@@ -1199,9 +1187,11 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
return;
}
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_uapsd_agg_disconnect_iter, &mac);
+ rcu_read_lock();
+ vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
+ if (vif)
+ iwl_mvm_uapsd_agg_disconnect(mvm, vif);
+ rcu_read_unlock();
}
static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index ceb3aa03d561..1e36459948db 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
void *iml_img;
u32 control_flags = 0;
int ret;
+ int cmdq_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
@@ -151,7 +152,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
- cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+ cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
ctxt_info_gen3->mcr_size =
cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
@@ -175,8 +176,13 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_write64(trans, CSR_IML_DATA_ADDR,
trans_pcie->iml_dma_addr);
iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
- iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
+ CSR_AUTO_FUNC_BOOT_ENA);
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+ else
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 7f4aaa810ea1..9274e317cc77 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -59,8 +59,7 @@
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ struct iwl_self_init_dram *dram = &trans->init_dram;
int i;
if (!dram->paging) {
@@ -83,8 +82,7 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
struct iwl_context_info_dram *ctxt_dram)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ struct iwl_self_init_dram *dram = &trans->init_dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
if (WARN(dram->paging,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 353581ccc01e..2b94e4cef56c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -8,7 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -513,10 +513,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -525,23 +527,26 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -550,26 +555,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
@@ -593,24 +600,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -619,6 +630,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
@@ -628,17 +640,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -656,17 +668,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
@@ -684,18 +696,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
@@ -704,24 +717,25 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -739,17 +753,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -758,6 +773,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
@@ -767,19 +783,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -805,18 +821,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -825,6 +842,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
@@ -834,17 +852,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -862,41 +880,98 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+
+ {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+
+ {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
+
+ {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax210_2ax_cfg_so_hr_a0)},
#endif /* CONFIG_IWLMVM */
@@ -949,7 +1024,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rf_id_chp == jf_chp_id) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
- cfg = &iwl22000_2ax_cfg_qnj_jf_b0;
+ cfg = &iwl9560_2ac_cfg_qnj_jf_b0;
else
cfg = &iwl22000_2ac_cfg_jf;
} else if (rf_id_chp == hr_chp_id) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d6fc6ce73e0a..bf8b61a476c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,12 +1,14 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__
@@ -364,6 +400,8 @@ struct iwl_txq {
u32 id;
int low_mark;
int high_mark;
+
+ bool overflow_tx;
};
static inline dma_addr_t
@@ -418,20 +456,6 @@ enum iwl_image_response_code {
};
/**
- * struct iwl_self_init_dram - dram data used by self init process
- * @fw: lmac and umac dram data
- * @fw_cnt: total number of items in array
- * @paging: paging dram data
- * @paging_cnt: total number of items in array
- */
-struct iwl_self_init_dram {
- struct iwl_dram_data *fw;
- int fw_cnt;
- struct iwl_dram_data *paging;
- int paging_cnt;
-};
-
-/**
* struct cont_rec: continuous recording data structure
* @prev_wr_ptr: the last address that was read in monitor_data
* debugfs file
@@ -502,6 +526,8 @@ struct cont_rec {
* @fh_mask: current unmasked fh causes
* @hw_mask: current unmasked hw causes
* @in_rescan: true if we have triggered a device rescan
+ * @base_rb_stts: base virtual address of receive buffer status for all queues
+ * @base_rb_stts_dma: base physical address of receive buffer status
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -518,7 +544,6 @@ struct iwl_trans_pcie {
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
dma_addr_t iml_dma_addr;
- struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
struct net_device napi_dev;
@@ -594,6 +619,9 @@ struct iwl_trans_pcie {
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
u16 tx_cmd_queue_size;
bool in_rescan;
+
+ void *base_rb_stts;
+ dma_addr_t base_rb_stts_dma;
};
static inline struct iwl_trans_pcie *
@@ -777,8 +805,7 @@ static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ struct iwl_self_init_dram *dram = &trans->init_dram;
int i;
if (!dram->fw) {
@@ -1016,6 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
+void iwl_trans_sync_nmi(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
@@ -1029,8 +1057,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
-void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
-
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common functions that are used by gen2 transport */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e965cc588850..8d4f0628622b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,12 +1,14 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
@@ -166,9 +202,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
/* TODO: remove this for 22560 once fw does it */
- iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
- return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
- RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+ return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
} else if (trans->cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
@@ -211,7 +247,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
iwl_write32(trans, HBUS_TARG_WRPTR,
(rxq->write_actual |
((FIRST_RX_QUEUE + rxq->id) << 16)));
@@ -256,6 +292,9 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
}
+
+ IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
+ (u32)rxb->vid, rxq->id, rxq->write);
}
/*
@@ -499,9 +538,9 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct list_head local_empty;
- int pending = atomic_xchg(&rba->req_pending, 0);
+ int pending = atomic_read(&rba->req_pending);
- IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+ IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
/* If we were scheduled - there is at least one request */
spin_lock(&rba->lock);
@@ -554,12 +593,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
i++;
}
+ atomic_dec(&rba->req_pending);
pending--;
+
if (!pending) {
- pending = atomic_xchg(&rba->req_pending, 0);
- IWL_DEBUG_RX(trans,
- "Pending allocation requests = %d\n",
- pending);
+ pending = atomic_read(&rba->req_pending);
+ if (pending)
+ IWL_DEBUG_TPT(trans,
+ "Got more pending allocation requests = %d\n",
+ pending);
}
spin_lock(&rba->lock);
@@ -570,12 +612,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
spin_unlock(&rba->lock);
atomic_inc(&rba->req_ready);
+
}
spin_lock(&rba->lock);
/* return unused rbds to the allocator empty list */
list_splice_tail(&local_empty, &rba->rbd_empty);
spin_unlock(&rba->lock);
+
+ IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
}
/*
@@ -657,11 +702,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->bd_dma = 0;
rxq->bd = NULL;
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- use_rx_td ? sizeof(__le16) :
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
rxq->rb_stts_dma = 0;
rxq->rb_stts = NULL;
@@ -698,6 +738,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
int free_size;
bool use_rx_td = (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560);
+ size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
+ sizeof(struct iwl_rb_status);
spin_lock_init(&rxq->lock);
if (trans->cfg->mq_rx_supported)
@@ -711,47 +753,36 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
* Allocate the circular buffer of Read Buffer Descriptors
* (RBDs)
*/
- rxq->bd = dma_zalloc_coherent(dev,
- free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
+ rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
if (!rxq->bd)
goto err;
if (trans->cfg->mq_rx_supported) {
- rxq->used_bd = dma_zalloc_coherent(dev,
- (use_rx_td ?
- sizeof(*rxq->cd) :
- sizeof(__le32)) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
+ rxq->used_bd = dma_alloc_coherent(dev,
+ (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+ &rxq->used_bd_dma,
+ GFP_KERNEL);
if (!rxq->used_bd)
goto err;
}
- /* Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
- sizeof(__le16) :
- sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma,
- GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err;
+ rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
+ rxq->rb_stts_dma =
+ trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
if (!use_rx_td)
return 0;
/* Allocate the driver's pointer to TR tail */
- rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
- &rxq->tr_tail_dma,
- GFP_KERNEL);
+ rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
+ &rxq->tr_tail_dma, GFP_KERNEL);
if (!rxq->tr_tail)
goto err;
/* Allocate the driver's pointer to CR tail */
- rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
- &rxq->cr_tail_dma,
- GFP_KERNEL);
+ rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
+ &rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
/*
@@ -768,7 +799,6 @@ err:
iwl_pcie_free_rxq_dma(trans, rxq);
}
- kfree(trans_pcie->rxq);
return -ENOMEM;
}
@@ -778,6 +808,9 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, ret;
+ size_t rb_stts_size = trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560 ?
+ sizeof(__le16) : sizeof(struct iwl_rb_status);
if (WARN_ON(trans_pcie->rxq))
return -EINVAL;
@@ -785,18 +818,46 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
if (!trans_pcie->rxq)
- return -EINVAL;
+ return -ENOMEM;
spin_lock_init(&rba->lock);
+ /*
+ * Allocate the driver's pointer to receive buffer status.
+ * Allocate for all queues continuously (HW requirement).
+ */
+ trans_pcie->base_rb_stts =
+ dma_alloc_coherent(trans->dev,
+ rb_stts_size * trans->num_rx_queues,
+ &trans_pcie->base_rb_stts_dma,
+ GFP_KERNEL);
+ if (!trans_pcie->base_rb_stts) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+ rxq->id = i;
ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
if (ret)
- return ret;
+ goto err;
}
return 0;
+
+err:
+ if (trans_pcie->base_rb_stts) {
+ dma_free_coherent(trans->dev,
+ rb_stts_size * trans->num_rx_queues,
+ trans_pcie->base_rb_stts,
+ trans_pcie->base_rb_stts_dma);
+ trans_pcie->base_rb_stts = NULL;
+ trans_pcie->base_rb_stts_dma = 0;
+ }
+ kfree(trans_pcie->rxq);
+
+ return ret;
}
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
@@ -868,30 +929,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
-void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
-{
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
- return;
-
- if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
- return;
-
- if (!trans->cfg->integrated)
- return;
-
- /*
- * Turn on the chicken-bits that cause MAC wakeup for RX-related
- * values.
- * This costs some power, but needed for W/A 9000 integrated A-step
- * bug where shadow registers are not in the retention list and their
- * value is lost when NIC powers down
- */
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
- CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
- CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
-}
-
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -979,8 +1016,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- iwl_pcie_enable_rx_wake(trans, true);
}
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@@ -1031,8 +1066,6 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- rxq->id = i;
-
spin_lock(&rxq->lock);
/*
* Set read write pointer to reflect that we have processed
@@ -1119,6 +1152,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
+ size_t rb_stts_size = trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560 ?
+ sizeof(__le16) : sizeof(struct iwl_rb_status);
/*
* if rxq is NULL, it means that nothing has been allocated,
@@ -1133,6 +1169,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
iwl_pcie_free_rbs_pool(trans);
+ if (trans_pcie->base_rb_stts) {
+ dma_free_coherent(trans->dev,
+ rb_stts_size * trans->num_rx_queues,
+ trans_pcie->base_rb_stts,
+ trans_pcie->base_rb_stts_dma);
+ trans_pcie->base_rb_stts = NULL;
+ trans_pcie->base_rb_stts_dma = 0;
+ }
+
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
@@ -1368,6 +1413,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
if (rxb->invalid)
goto out_err;
+ IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
+
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
@@ -1417,13 +1464,17 @@ restart:
!emergency)) {
iwl_pcie_rx_move_to_allocator(rxq, rba);
emergency = true;
+ IWL_DEBUG_TPT(trans,
+ "RX path is in emergency. Pending allocations %d\n",
+ rb_pending_alloc);
}
+ IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
+
rxb = iwl_pcie_get_rxb(trans, rxq, i);
if (!rxb)
goto out;
- IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
i = (i + 1) & (rxq->queue_size - 1);
@@ -1445,8 +1496,12 @@ restart:
count++;
if (count == 8) {
count = 0;
- if (rb_pending_alloc < rxq->queue_size / 3)
+ if (rb_pending_alloc < rxq->queue_size / 3) {
+ IWL_DEBUG_TPT(trans,
+ "RX path exited emergency. Pending allocations %d\n",
+ rb_pending_alloc);
emergency = false;
+ }
rxq->read = i;
spin_unlock(&rxq->lock);
@@ -1947,9 +2002,8 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_zalloc_coherent(trans->dev, ICT_SIZE,
- &trans_pcie->ict_tbl_dma,
- GFP_KERNEL);
+ dma_alloc_coherent(trans->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma, GFP_KERNEL);
if (!trans_pcie->ict_tbl)
return -ENOMEM;
@@ -2113,7 +2167,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
/* Reflect IML transfer status */
int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
@@ -2132,6 +2186,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
isr_stats->wakeup++;
}
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) {
+ /* Reflect IML transfer status */
+ int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+ IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+ if (res == IWL_IMAGE_RESP_FAIL) {
+ isr_stats->sw++;
+ iwl_pcie_irq_handle_error(trans);
+ }
+ }
+
/* Chip got too hot and stopped itself */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
IWL_ERR(trans, "Microcode CT kill error detected.\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 77f3610e5ca9..9c203ca75de9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -92,26 +92,9 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_write_prph()
- * and accesses to uCode SRAM.
- */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- 25000);
- if (ret < 0) {
- IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
return ret;
- }
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -188,7 +171,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
iwl_pcie_ctxt_info_gen3_free(trans);
else
iwl_pcie_ctxt_info_free(trans);
@@ -251,6 +234,7 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
spin_lock(&trans_pcie->irq_lock);
@@ -264,7 +248,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, TFD_CMD_SLOTS))
+ if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -349,7 +333,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
else
ret = iwl_pcie_ctxt_info_init(trans, fw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f97aea5ffc44..fe8269d023de 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -8,7 +8,7 @@
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -364,26 +364,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
if (trans->cfg->base_params->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_write_prph()
- * and accesses to uCode SRAM.
- */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- 25000);
- if (ret < 0) {
- IWL_ERR(trans, "Failed to init the card\n");
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
return ret;
- }
if (trans->cfg->host_interrupt_operation_mode) {
/*
@@ -453,23 +436,8 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans);
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is possible.
- */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- 25000);
- if (WARN_ON(ret < 0)) {
- IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+ ret = iwl_finish_nic_init(trans);
+ if (WARN_ON(ret)) {
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
@@ -928,13 +896,13 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
if (!trans->num_blocks)
return;
- iwl_write_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- trans->fw_mon[0].physical >>
- MON_BUFF_SHIFT_VER2);
- iwl_write_prph(trans, MON_BUFF_END_ADDR_VER2,
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size - 256) >>
- MON_BUFF_SHIFT_VER2);
+ iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
+ trans->fw_mon[0].physical >>
+ MON_BUFF_SHIFT_VER2);
+ iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
+ (trans->fw_mon[0].physical +
+ trans->fw_mon[0].size - 256) >>
+ MON_BUFF_SHIFT_VER2);
return;
}
@@ -1126,6 +1094,7 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
{MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_IML, CSR_MSIX_HW_INT_MASK_AD, 0x12},
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
@@ -1158,7 +1127,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
int i, arr_size =
- (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
/*
@@ -1168,7 +1137,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
*/
for (i = 0; i < arr_size; i++) {
struct iwl_causes_list *causes =
- (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
causes_list : causes_list_v2;
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
@@ -1214,8 +1183,8 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- iwl_write_prph(trans, UREG_CHICK,
- UREG_CHICK_MSI_ENABLE);
+ iwl_write_umac_prph(trans, UREG_CHICK,
+ UREG_CHICK_MSI_ENABLE);
return;
}
/*
@@ -1224,7 +1193,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
* prph.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
+ iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
* Each cause from the causes list above and the RX causes is
@@ -1530,8 +1499,6 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_init_done));
- iwl_pcie_enable_rx_wake(trans, false);
-
if (reset) {
/*
* reset TX queues -- some of their registers reset during S3
@@ -1558,24 +1525,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
- iwl_pcie_enable_rx_wake(trans, true);
-
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
- udelay(2);
-
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- 25000);
- if (ret < 0) {
- IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
return ret;
- }
/*
* Reconfigure IVAR table in case of MSIX or reset ict table in
@@ -1606,7 +1561,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
- iwl_read_prph(trans, WFPM_GP2));
+ iwl_read_umac_prph(trans, WFPM_GP2));
val = iwl_read32(trans, CSR_RESET);
if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
@@ -1755,15 +1710,18 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
return err;
}
- hpm = iwl_trans_read_prph(trans, HPM_DEBUG);
+ hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
- if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) &
- PREG_WFPM_ACCESS) {
+ int wfpm_val = iwl_read_umac_prph_no_grab(trans,
+ PREG_PRPH_WPROT_0);
+
+ if (wfpm_val & PREG_WFPM_ACCESS) {
IWL_ERR(trans,
"Error, can not clear persistence bit\n");
return -EPERM;
}
- iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT);
+ iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
+ hpm & ~PERSISTENCE_BIT);
}
iwl_trans_pcie_sw_reset(trans);
@@ -1968,7 +1926,7 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
struct iwl_trans_pcie_removal *removal =
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
- char *prop[] = {"EVENT=INACCESSIBLE", NULL};
+ static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
dev_err(&pdev->dev, "Device gone - attempting removal\n");
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
@@ -2285,6 +2243,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
+ bool overflow_tx;
u8 wr_ptr;
/* Make sure the NIC is still alive in the bus */
@@ -2296,18 +2255,37 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx];
+
+ spin_lock_bh(&txq->lock);
+ overflow_tx = txq->overflow_tx ||
+ !skb_queue_empty(&txq->overflow_q);
+ spin_unlock_bh(&txq->lock);
+
wr_ptr = READ_ONCE(txq->write_ptr);
- while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
+ while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
+ overflow_tx) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
u8 write_ptr = READ_ONCE(txq->write_ptr);
- if (WARN_ONCE(wr_ptr != write_ptr,
+ /*
+ * If write pointer moved during the wait, warn only
+ * if the TX came from op mode. In case TX came from
+ * trans layer (overflow TX) don't warn.
+ */
+ if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
"WR pointer moved while flushing %d -> %d\n",
wr_ptr, write_ptr))
return -ETIMEDOUT;
+ wr_ptr = write_ptr;
+
usleep_range(1000, 2000);
+
+ spin_lock_bh(&txq->lock);
+ overflow_tx = txq->overflow_tx ||
+ !skb_queue_empty(&txq->overflow_q);
+ spin_unlock_bh(&txq->lock);
}
if (txq->read_ptr != txq->write_ptr) {
@@ -2993,7 +2971,8 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
i += sizeof(u32))
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
else
- for (i = FH_MEM_LOWER_BOUND_GEN2; i < FH_MEM_UPPER_BOUND_GEN2;
+ for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
+ i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
i += sizeof(u32))
*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
i));
@@ -3018,11 +2997,11 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
if (!iwl_trans_grab_nic_access(trans, &flags))
return 0;
- iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+ iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
for (i = 0; i < buf_size_in_dwords; i++)
- buffer[i] = iwl_read_prph_no_grab(trans,
- MON_DMARB_RD_DATA_ADDR);
- iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+ buffer[i] = iwl_read_umac_prph_no_grab(trans,
+ MON_DMARB_RD_DATA_ADDR);
+ iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
iwl_trans_release_nic_access(trans, &flags);
@@ -3037,9 +3016,9 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
/* If there was a dest TLV - use the values from there */
if (trans->ini_valid) {
- base = MON_BUFF_BASE_ADDR_VER2;
- write_ptr = MON_BUFF_WRPTR_VER2;
- wrap_cnt = MON_BUFF_CYCLE_CNT_VER2;
+ base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2);
+ write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2);
+ wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2);
} else if (trans->dbg_dest_tlv) {
write_ptr = le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
@@ -3118,7 +3097,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
return len;
}
-static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
+static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
if (trans->num_blocks) {
*len += sizeof(struct iwl_fw_error_dump_data) +
@@ -3173,8 +3152,7 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs = 0;
- u32 monitor_len;
+ u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
!trans->cfg->mq_rx_supported &&
@@ -3191,19 +3169,8 @@ static struct iwl_trans_dump_data
cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
- monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
-
- if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) {
- dump_data = vzalloc(len);
- if (!dump_data)
- return NULL;
-
- data = (void *)dump_data->data;
- len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
- dump_data->len = len;
-
- return dump_data;
- }
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
/* CSR registers */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
@@ -3213,8 +3180,8 @@ static struct iwl_trans_dump_data
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
if (trans->cfg->gen2)
len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND_GEN2 -
- FH_MEM_LOWER_BOUND_GEN2);
+ (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
+ iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
else
len += sizeof(*data) +
(FH_MEM_UPPER_BOUND -
@@ -3236,10 +3203,10 @@ static struct iwl_trans_dump_data
/* Paged memory for gen2 HW */
if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
- for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
+ for (i = 0; i < trans->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
- trans_pcie->init_dram.paging[i].size;
+ trans->init_dram.paging[i].size;
dump_data = vzalloc(len);
if (!dump_data)
@@ -3291,20 +3258,16 @@ static struct iwl_trans_dump_data
/* Paged memory for gen2 HW */
if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
- for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
+ for (i = 0; i < trans->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
- dma_addr_t addr =
- trans_pcie->init_dram.paging[i].physical;
- u32 page_len = trans_pcie->init_dram.paging[i].size;
+ u32 page_len = trans->init_dram.paging[i].size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
data->len = cpu_to_le32(sizeof(*paging) + page_len);
paging = (void *)data->data;
paging->index = cpu_to_le32(i);
- dma_sync_single_for_cpu(trans->dev, addr, page_len,
- DMA_BIDIRECTIONAL);
memcpy(paging->data,
- trans_pcie->init_dram.paging[i].block, page_len);
+ trans->init_dram.paging[i].block, page_len);
data = iwl_fw_error_next_data(data);
len += sizeof(*data) + sizeof(*paging) + page_len;
@@ -3541,25 +3504,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* in-order to recognize C step driver should read chip version
* id located at the AUX bus MISC address space.
*/
- iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
- udelay(2);
-
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- BIT(trans->cfg->csr->flag_mac_clock_ready),
- 25000);
- if (ret < 0) {
- IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
goto out_no_pci;
- }
if (iwl_trans_grab_nic_access(trans, &flags)) {
u32 hw_step;
- hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
+ hw_step = iwl_read_umac_prph_no_grab(trans,
+ WFPM_CTRL_REG);
hw_step |= ENABLE_WFPM;
- iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
+ iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG,
+ hw_step);
hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
if (hw_step == 0x3)
@@ -3569,24 +3525,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
- /*
- * 9000-series integrated A-step has a problem with suspend/resume
- * and sometimes even causes the whole platform to get stuck. This
- * workaround makes the hardware not go into the problematic state.
- */
- if (trans->cfg->integrated &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)
- iwl_set_bit(trans, CSR_HOST_CHICKEN,
- CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME);
+ IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (cfg == &iwl22000_2ax_cfg_hr) {
+ if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
+ if (trans->hw_rev == CSR_HW_REV_TYPE_TY) {
+ trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+ trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
+ trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
+ }
+ } else if (cfg == &iwl_ax101_cfg_qu_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- trans->cfg = &iwl22000_2ax_cfg_hr;
+ trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
trans->cfg = &iwl22000_2ax_cfg_jf;
@@ -3602,7 +3559,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ (trans->cfg != &iwl22260_2ax_cfg ||
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
@@ -3677,3 +3636,28 @@ out_no_pci:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
+
+void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+ unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+
+ iwl_disable_interrupts(trans);
+ iwl_force_nmi(trans);
+ while (time_after(timeout, jiffies)) {
+ u32 inta_hw = iwl_read32(trans,
+ CSR_MSIX_HW_INT_CAUSES_AD);
+
+ /* Error detected by uCode */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) {
+ /* Clear causes register */
+ iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
+ inta_hw &
+ MSIX_HW_INT_CAUSES_REG_SW_ERR);
+ break;
+ }
+
+ mdelay(1);
+ }
+ iwl_enable_interrupts(trans);
+ iwl_trans_fw_error(trans);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 156ca1b1f621..88530d9f4a54 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -214,7 +214,11 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tb *tb;
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
/* Each TFD can point to a maximum max_tbs Tx buffers */
if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
@@ -408,7 +412,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
goto out_err;
/* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
return tfd;
out_err:
@@ -469,7 +473,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
@@ -834,14 +838,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
- memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+ memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
tb0_size);
/* map first command fragment, if any remains */
if (copy_size > tb0_size) {
phys_addr = dma_map_single(trans->dev,
- ((u8 *)&out_cmd->hdr) + tb0_size,
+ (u8 *)out_cmd + tb0_size,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
@@ -961,9 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
cmd_str);
ret = -ETIMEDOUT;
- iwl_force_nmi(trans);
- iwl_trans_fw_error(trans);
-
+ iwl_trans_sync_nmi(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index ee990a7a5411..9fbd37d23e85 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,12 +1,14 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
@@ -959,7 +995,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ if (cmd_queue)
+ slots_num = max_t(u32, TFD_CMD_SLOTS,
+ trans->cfg->min_txq_size);
+ else
+ slots_num = TFD_TX_CMD_SLOTS;
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
@@ -1008,7 +1048,11 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ if (cmd_queue)
+ slots_num = max_t(u32, TFD_CMD_SLOTS,
+ trans->cfg->min_txq_size);
+ else
+ slots_num = TFD_TX_CMD_SLOTS;
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
if (ret) {
@@ -1138,6 +1182,15 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
/*
+ * We are going to transmit from the overflow queue.
+ * Remember this state so that wait_for_txq_empty will know we
+ * are adding more packets to the TFD queue. It cannot rely on
+ * the state of &txq->overflow_q, as we just emptied it, but
+ * haven't TXed the content yet.
+ */
+ txq->overflow_tx = true;
+
+ /*
* This is tricky: we are in reclaim path which is non
* re-entrant, so noone will try to take the access the
* txq data from that path. We stopped tx, so we can't
@@ -1165,6 +1218,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_wake_queue(trans, txq);
spin_lock_bh(&txq->lock);
+ txq->overflow_tx = false;
}
if (txq->read_ptr == txq->write_ptr) {
@@ -1906,9 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id));
ret = -ETIMEDOUT;
- iwl_force_nmi(trans);
- iwl_trans_fw_error(trans);
-
+ iwl_trans_sync_nmi(trans);
goto cancel;
}
@@ -2438,8 +2490,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* building the A-MSDU might have changed this data, so memcpy it now */
- memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index 709d9ab3e7bc..67b0c05afbdb 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -18,16 +18,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
{
priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_mic)) {
- printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
- "crypto API michael_mic\n");
+ printk(KERN_DEBUG "%s: could not allocate "
+ "crypto API michael_mic\n", __func__);
priv->tx_tfm_mic = NULL;
return -ENOMEM;
}
priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_mic)) {
- printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
- "crypto API michael_mic\n");
+ printk(KERN_DEBUG "%s: could not allocate "
+ "crypto API michael_mic\n", __func__);
priv->rx_tfm_mic = NULL;
return -ENOMEM;
}
@@ -52,7 +52,7 @@ int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
int err;
if (tfm_michael == NULL) {
- printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n");
+ printk(KERN_WARNING "%s: tfm_michael == NULL\n", __func__);
return -1;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3a4b8786f7ea..0838af04d681 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1273,10 +1273,12 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
* probably doesn't really matter.
*/
if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ ieee80211_is_probe_resp(hdr->frame_control)) {
+ rx_status.boottime_ns = ktime_get_boot_ns();
now = data->abs_bcn_ts;
- else
+ } else {
now = mac80211_hwsim_get_tsf_raw();
+ }
/* Copy skb to all enabled radios that are on the current frequency */
spin_lock(&hwsim_radio_lock);
@@ -2761,6 +2763,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
BIT(NL80211_CHAN_WIDTH_160);
}
+ if (!n_limits) {
+ err = -EINVAL;
+ goto failed_hw;
+ }
+
data->if_combination.n_limits = n_limits;
data->if_combination.max_interfaces = 2048;
data->if_combination.limits = data->if_limits;
@@ -2794,6 +2801,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, TDLS_WIDER_BW);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
@@ -3549,7 +3557,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
- genlmsg_reply(skb, info);
+ res = genlmsg_reply(skb, info);
break;
}
diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c
index c83f44f9ddf1..fe14814af300 100644
--- a/drivers/net/wireless/marvell/libertas/debugfs.c
+++ b/drivers/net/wireless/marvell/libertas/debugfs.c
@@ -708,8 +708,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
goto exit;
priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir);
- if (!priv->debugfs_dir)
- goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_files); i++) {
files = &debugfs_files[i];
@@ -721,8 +719,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
}
priv->events_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir);
- if (!priv->events_dir)
- goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_events_files); i++) {
files = &debugfs_events_files[i];
@@ -734,8 +730,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
}
priv->regs_dir = debugfs_create_dir("registers", priv->debugfs_dir);
- if (!priv->regs_dir)
- goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_regs_files); i++) {
files = &debugfs_regs_files[i];
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index b0cb16ef8d1d..2315fdff56c2 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -797,7 +797,12 @@ static void lbs_persist_config_init(struct net_device *dev)
{
int ret;
ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
+ if (ret)
+ pr_err("failed to create boot_opts_group.\n");
+
ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
+ if (ret)
+ pr_err("failed to create mesh_ie_group.\n");
}
static void lbs_persist_config_remove(struct net_device *dev)
diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c
index 909ac3685010..130f578daafd 100644
--- a/drivers/net/wireless/marvell/libertas_tf/cmd.c
+++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c
@@ -256,7 +256,7 @@ static void lbtf_submit_command(struct lbtf_private *priv,
command, le16_to_cpu(cmd->seqnum), cmdsize);
lbtf_deb_hex(LBTF_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize);
- ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
+ ret = priv->ops->hw_host_to_card(priv, MVMS_CMD, (u8 *)cmd, cmdsize);
spin_unlock_irqrestore(&priv->driver_lock, flags);
if (ret) {
@@ -737,10 +737,9 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
respcmd = le16_to_cpu(resp->command);
result = le16_to_cpu(resp->result);
- if (net_ratelimit())
- pr_info("libertastf: cmd response 0x%04x, seq %d, size %d\n",
- respcmd, le16_to_cpu(resp->seqnum),
- le16_to_cpu(resp->size));
+ lbtf_deb_cmd("libertastf: cmd response 0x%04x, seq %d, size %d\n",
+ respcmd, le16_to_cpu(resp->seqnum),
+ le16_to_cpu(resp->size));
if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 789337ea676a..a4b9ede70705 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -42,14 +42,14 @@ MODULE_DEVICE_TABLE(usb, if_usb_table);
static void if_usb_receive(struct urb *urb);
static void if_usb_receive_fwload(struct urb *urb);
-static int if_usb_prog_firmware(struct if_usb_card *cardp);
+static int if_usb_prog_firmware(struct lbtf_private *priv);
static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
uint8_t *payload, uint16_t nb);
static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
uint16_t nb, u8 data);
static void if_usb_free(struct if_usb_card *cardp);
static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
-static int if_usb_reset_device(struct if_usb_card *cardp);
+static int if_usb_reset_device(struct lbtf_private *priv);
/**
* if_usb_wrike_bulk_callback - call back to handle URB status
@@ -131,6 +131,12 @@ static void if_usb_fw_timeo(struct timer_list *t)
lbtf_deb_leave(LBTF_DEB_USB);
}
+static const struct lbtf_ops if_usb_ops = {
+ .hw_host_to_card = if_usb_host_to_card,
+ .hw_prog_firmware = if_usb_prog_firmware,
+ .hw_reset_device = if_usb_reset_device,
+};
+
/**
* if_usb_probe - sets the configuration values
*
@@ -216,17 +222,11 @@ static int if_usb_probe(struct usb_interface *intf,
goto dealloc;
}
- priv = lbtf_add_card(cardp, &udev->dev);
+ cardp->boot2_version = udev->descriptor.bcdDevice;
+ priv = lbtf_add_card(cardp, &udev->dev, &if_usb_ops);
if (!priv)
goto dealloc;
- cardp->priv = priv;
-
- priv->hw_host_to_card = if_usb_host_to_card;
- priv->hw_prog_firmware = if_usb_prog_firmware;
- priv->hw_reset_device = if_usb_reset_device;
- cardp->boot2_version = udev->descriptor.bcdDevice;
-
usb_get_dev(udev);
usb_set_intfdata(intf, cardp);
@@ -251,7 +251,7 @@ static void if_usb_disconnect(struct usb_interface *intf)
lbtf_deb_enter(LBTF_DEB_MAIN);
- if_usb_reset_device(cardp);
+ if_usb_reset_device(priv);
if (priv)
lbtf_remove_card(priv);
@@ -334,8 +334,9 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
return 0;
}
-static int if_usb_reset_device(struct if_usb_card *cardp)
+static int if_usb_reset_device(struct lbtf_private *priv)
{
+ struct if_usb_card *cardp = priv->card;
struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
int ret;
@@ -433,8 +434,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
skb_tail_pointer(skb),
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
- cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
-
lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
cardp->rx_urb);
ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
@@ -808,14 +807,17 @@ static int check_fwfile_format(const u8 *data, u32 totlen)
}
-static int if_usb_prog_firmware(struct if_usb_card *cardp)
+static int if_usb_prog_firmware(struct lbtf_private *priv)
{
+ struct if_usb_card *cardp = priv->card;
int i = 0;
static int reset_count = 10;
int ret = 0;
lbtf_deb_enter(LBTF_DEB_USB);
+ cardp->priv = priv;
+
kernel_param_lock(THIS_MODULE);
ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
if (ret < 0) {
@@ -851,7 +853,7 @@ restart:
if (cardp->bootcmdresp <= 0) {
if (--reset_count >= 0) {
- if_usb_reset_device(cardp);
+ if_usb_reset_device(priv);
goto restart;
}
return -1;
@@ -880,7 +882,7 @@ restart:
if (!cardp->fwdnldover) {
pr_info("failed to load fw, resetting device!\n");
if (--reset_count >= 0) {
- if_usb_reset_device(cardp);
+ if_usb_reset_device(priv);
goto restart;
}
@@ -889,8 +891,6 @@ restart:
goto release_fw;
}
- cardp->priv->fw_ready = 1;
-
release_fw:
release_firmware(cardp->fw);
cardp->fw = NULL;
diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
index ad77b92d0b41..3ed1fbe28798 100644
--- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
@@ -173,10 +173,19 @@ struct channel_range {
struct if_usb_card;
+struct lbtf_ops {
+ /** Hardware access */
+ int (*hw_host_to_card)(struct lbtf_private *priv, u8 type,
+ u8 *payload, u16 nb);
+ int (*hw_prog_firmware)(struct lbtf_private *priv);
+ int (*hw_reset_device)(struct lbtf_private *priv);
+};
+
/** Private structure for the MV device */
struct lbtf_private {
void *card;
struct ieee80211_hw *hw;
+ const struct lbtf_ops *ops;
/* Command response buffer */
u8 cmd_resp_buff[LBS_UPLD_SIZE];
@@ -188,11 +197,6 @@ struct lbtf_private {
struct work_struct cmd_work;
struct work_struct tx_work;
- /** Hardware access */
- int (*hw_host_to_card) (struct lbtf_private *priv, u8 type, u8 *payload, u16 nb);
- int (*hw_prog_firmware) (struct if_usb_card *cardp);
- int (*hw_reset_device) (struct if_usb_card *cardp);
-
/** Wlan adapter data structure*/
/** STATUS variables */
@@ -250,7 +254,6 @@ struct lbtf_private {
struct ieee80211_supported_band band;
struct lbtf_offset_value offsetvalue;
- u8 fw_ready;
u8 surpriseremoved;
struct sk_buff_head bc_ps_buf;
@@ -486,7 +489,8 @@ void lbtf_cmd_response_rx(struct lbtf_private *priv);
/* main.c */
struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
int *cfp_no);
-struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev);
+struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
+ const struct lbtf_ops *ops);
int lbtf_remove_card(struct lbtf_private *priv);
int lbtf_start_card(struct lbtf_private *priv);
int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 1d45da187b9b..5799e9886d83 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -118,11 +118,6 @@ static void lbtf_cmd_work(struct work_struct *work)
priv->cmd_timed_out = 0;
spin_unlock_irq(&priv->driver_lock);
- if (!priv->fw_ready) {
- lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
- return;
- }
-
/* Execute the next command */
if (!priv->cur_cmd)
lbtf_execute_next_command(priv);
@@ -131,37 +126,6 @@ static void lbtf_cmd_work(struct work_struct *work)
}
/**
- * lbtf_setup_firmware: initialize firmware.
- *
- * @priv A pointer to struct lbtf_private structure
- *
- * Returns: 0 on success.
- */
-static int lbtf_setup_firmware(struct lbtf_private *priv)
-{
- int ret = -1;
-
- lbtf_deb_enter(LBTF_DEB_FW);
- /*
- * Read priv address from HW
- */
- eth_broadcast_addr(priv->current_addr);
- ret = lbtf_update_hw_spec(priv);
- if (ret) {
- ret = -1;
- goto done;
- }
-
- lbtf_set_mac_control(priv);
- lbtf_set_radio_control(priv);
-
- ret = 0;
-done:
- lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
- return ret;
-}
-
-/**
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
@@ -281,7 +245,7 @@ static void lbtf_tx_work(struct work_struct *work)
BUG_ON(priv->tx_skb);
spin_lock_irq(&priv->driver_lock);
priv->tx_skb = skb;
- err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
+ err = priv->ops->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
spin_unlock_irq(&priv->driver_lock);
if (err) {
dev_kfree_skb_any(skb);
@@ -294,38 +258,17 @@ static void lbtf_tx_work(struct work_struct *work)
static int lbtf_op_start(struct ieee80211_hw *hw)
{
struct lbtf_private *priv = hw->priv;
- void *card = priv->card;
- int ret = -1;
lbtf_deb_enter(LBTF_DEB_MACOPS);
- if (!priv->fw_ready)
- /* Upload firmware */
- if (priv->hw_prog_firmware(card))
- goto err_prog_firmware;
-
- /* poke the firmware */
priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
priv->radioon = RADIO_ON;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
- ret = lbtf_setup_firmware(priv);
- if (ret)
- goto err_prog_firmware;
-
- if ((priv->fwrelease < LBTF_FW_VER_MIN) ||
- (priv->fwrelease > LBTF_FW_VER_MAX)) {
- ret = -1;
- goto err_prog_firmware;
- }
+ lbtf_set_mac_control(priv);
+ lbtf_set_radio_control(priv);
- printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
-
-err_prog_firmware:
- priv->hw_reset_device(card);
- lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programming fw; ret=%d", ret);
- return ret;
}
static void lbtf_op_stop(struct ieee80211_hw *hw)
@@ -551,11 +494,15 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
struct ieee80211_rx_status stats;
struct rxpd *prxpd;
int need_padding;
- unsigned int flags;
struct ieee80211_hdr *hdr;
lbtf_deb_enter(LBTF_DEB_RX);
+ if (priv->radioon != RADIO_ON) {
+ lbtf_deb_rx("rx before we turned on the radio");
+ goto done;
+ }
+
prxpd = (struct rxpd *) skb->data;
memset(&stats, 0, sizeof(stats));
@@ -563,7 +510,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
stats.freq = priv->cur_freq;
stats.band = NL80211_BAND_2GHZ;
- stats.signal = prxpd->snr;
+ stats.signal = prxpd->snr - prxpd->nf;
priv->noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
@@ -572,7 +519,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
skb_pull(skb, sizeof(struct rxpd));
hdr = (struct ieee80211_hdr *)skb->data;
- flags = le32_to_cpu(*(__le32 *)(skb->data + 4));
need_padding = ieee80211_is_data_qos(hdr->frame_control);
need_padding ^= ieee80211_has_a4(hdr->frame_control);
@@ -594,19 +540,21 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
ieee80211_rx_irqsafe(priv->hw, skb);
+done:
lbtf_deb_leave(LBTF_DEB_RX);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_rx);
/**
- * lbtf_add_card: Add and initialize the card, no fw upload yet.
+ * lbtf_add_card: Add and initialize the card.
*
* @card A pointer to card
*
* Returns: pointer to struct lbtf_priv.
*/
-struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
+struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
+ const struct lbtf_ops *ops)
{
struct ieee80211_hw *hw;
struct lbtf_private *priv = NULL;
@@ -623,10 +571,13 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->hw = hw;
priv->card = card;
+ priv->ops = ops;
priv->tx_skb = NULL;
+ priv->radioon = RADIO_OFF;
hw->queues = 1;
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
hw->extra_tx_headroom = sizeof(struct txpd);
memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
@@ -646,9 +597,31 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
INIT_WORK(&priv->tx_work, lbtf_tx_work);
+
+ if (priv->ops->hw_prog_firmware(priv)) {
+ lbtf_deb_usbd(dmdev, "Error programming the firmware\n");
+ priv->ops->hw_reset_device(priv);
+ goto err_init_adapter;
+ }
+
+ eth_broadcast_addr(priv->current_addr);
+ if (lbtf_update_hw_spec(priv))
+ goto err_init_adapter;
+
+ if (priv->fwrelease < LBTF_FW_VER_MIN ||
+ priv->fwrelease > LBTF_FW_VER_MAX) {
+ goto err_init_adapter;
+ }
+
+ /* The firmware seems to start with the radio enabled. Turn it
+ * off before an actual mac80211 start callback is invoked.
+ */
+ lbtf_set_radio_control(priv);
+
if (ieee80211_register_hw(hw))
goto err_init_adapter;
+ dev_info(dmdev, "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
goto done;
err_init_adapter:
@@ -676,7 +649,7 @@ int lbtf_remove_card(struct lbtf_private *priv)
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
- lbtf_deb_leave(LBTF_DEB_MAIN);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_remove_card);
diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig
index 279167ddd293..524fd565cb2a 100644
--- a/drivers/net/wireless/marvell/mwifiex/Kconfig
+++ b/drivers/net/wireless/marvell/mwifiex/Kconfig
@@ -9,7 +9,7 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 1467af22e394..c46f0a54a0c7 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -376,11 +376,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
struct mwifiex_power_cfg power_cfg;
int dbm = MBM_TO_DBM(mbm);
- if (type == NL80211_TX_POWER_FIXED) {
+ switch (type) {
+ case NL80211_TX_POWER_FIXED:
power_cfg.is_power_auto = 0;
+ power_cfg.is_power_fixed = 1;
power_cfg.power_level = dbm;
- } else {
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ power_cfg.is_power_auto = 0;
+ power_cfg.is_power_fixed = 0;
+ power_cfg.power_level = dbm;
+ break;
+ case NL80211_TX_POWER_AUTOMATIC:
power_cfg.is_power_auto = 1;
+ break;
}
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -4310,11 +4319,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP);
+ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
if (adapter->config_bands & BAND_A)
wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
@@ -4374,11 +4385,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
- wiphy->features |= NL80211_FEATURE_HT_IBSS |
- NL80211_FEATURE_INACTIVITY_TIMER |
+ wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ wiphy->features |= NL80211_FEATURE_HT_IBSS;
+
if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info))
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index cbe4493b3266..8ab114cf3467 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -922,9 +922,8 @@ mwifiex_reset_write(struct file *file,
}
#define MWIFIEX_DFS_ADD_FILE(name) do { \
- if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \
- priv, &mwifiex_dfs_##name##_fops)) \
- return; \
+ debugfs_create_file(#name, 0644, priv->dfs_dev_dir, priv, \
+ &mwifiex_dfs_##name##_fops); \
} while (0);
#define MWIFIEX_DFS_FILE_OPS(name) \
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 48e154e1865d..0dd592ea6e83 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -267,6 +267,7 @@ struct mwifiex_ds_encrypt_key {
struct mwifiex_power_cfg {
u32 is_power_auto;
+ u32 is_power_fixed;
u32 power_level;
};
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index d49fbd58afa7..a85648342d15 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -489,6 +489,8 @@ static void mwifiex_sdio_coredump(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
/* Device ID for SD8801 */
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
+/* Device ID for SD8977 */
+#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145)
/* Device ID for SD8997 */
#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
@@ -507,6 +509,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8977},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
@@ -2726,4 +2730,5 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index dccf7fd1aef3..912de2cde8d9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -36,6 +36,7 @@
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define BLOCK_MODE 1
@@ -371,6 +372,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
0x59, 0x5c, 0x5d},
};
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
.start_rd_port = 0,
.start_wr_port = 0,
@@ -532,6 +586,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+ .firmware = SD8977_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8977,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.firmware = SD8997_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd8997,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index b454b5f85503..ebc0e41e5d3b 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -688,6 +688,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
if (!power_cfg->is_power_auto) {
+ u16 dbm_min = power_cfg->is_power_fixed ?
+ dbm : priv->min_tx_power_level;
+
txp_cfg->mode = cpu_to_le32(1);
pg_tlv = (struct mwifiex_types_power_group *)
(buf + sizeof(struct host_cmd_ds_txpwr_cfg));
@@ -702,7 +705,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x03;
pg->modulation_class = MOD_CLASS_HR_DSSS;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg++;
/* Power group for modulation class OFDM */
@@ -710,7 +713,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x07;
pg->modulation_class = MOD_CLASS_OFDM;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg++;
/* Power group for modulation class HTBW20 */
@@ -718,7 +721,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_20;
pg++;
@@ -727,7 +730,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_40;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index e86217a6b9ca..ca759d9c0253 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -300,7 +300,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
break;
case EVENT_BT_COEX_WLAN_PARA_CHANGE:
- dev_err(adapter->dev, "EVENT: BT coex wlan param update\n");
+ mwifiex_dbg(adapter, EVENT, "event: BT coex wlan param update\n");
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index c30d8f5bbf2a..dbe8c70a8f73 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -21,3 +21,4 @@ config MT76x02_USB
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 1a45cb30f39f..3fd1b64b4aa7 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -4,9 +4,10 @@ obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
- mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
+ tx.o agg-rx.o mcu.o
-mt76-usb-y := usb.o usb_trace.o usb_mcu.o
+mt76-usb-y := usb.o usb_trace.o
CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)
@@ -21,3 +22,4 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
+obj-$(CONFIG_MT7603E) += mt7603/
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index e2ba26378575..6eedc0ec7661 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -242,6 +242,30 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
iowrite32(q->head, &q->regs->cpu_idx);
}
+static int
+mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, u32 tx_info)
+{
+ struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue_buf buf;
+ dma_addr_t addr;
+
+ addr = dma_map_single(dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ return -ENOMEM;
+
+ buf.addr = addr;
+ buf.len = skb->len;
+
+ spin_lock_bh(&q->lock);
+ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ mt76_dma_kick_queue(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ return 0;
+}
+
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
@@ -300,7 +324,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
goto unmap;
- return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+ return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
unmap:
ret = -ENOMEM;
@@ -318,7 +342,7 @@ free:
EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
static int
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
dma_addr_t addr;
void *buf;
@@ -392,7 +416,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q, false);
+ mt76_dma_rx_fill(dev, q);
}
static void
@@ -417,10 +441,9 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
+ int len, data_len, done = 0;
struct sk_buff *skb;
unsigned char *data;
- int len;
- int done = 0;
bool more;
while (done < budget) {
@@ -430,6 +453,19 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (!data)
break;
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ if (data_len < len + q->buf_offset) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+
+ skb_free_frag(data);
+ continue;
+ }
+
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more);
continue;
@@ -440,12 +476,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
skb_free_frag(data);
continue;
}
-
skb_reserve(skb, q->buf_offset);
- if (skb->tail + len > skb->end) {
- dev_kfree_skb(skb);
- continue;
- }
if (q == &dev->q_rx[MT_RXQ_MCU]) {
u32 *rxfce = (u32 *) skb->cb;
@@ -463,7 +494,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
- mt76_dma_rx_fill(dev, q, true);
+ mt76_dma_rx_fill(dev, q);
return done;
}
@@ -504,7 +535,7 @@ mt76_dma_init(struct mt76_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}
@@ -515,17 +546,16 @@ mt76_dma_init(struct mt76_dev *dev)
static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
- .add_buf = mt76_dma_add_buf,
+ .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
};
-int mt76_dma_attach(struct mt76_dev *dev)
+void mt76_dma_attach(struct mt76_dev *dev)
{
dev->queue_ops = &mt76_dma_ops;
- return 0;
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 357cc356342d..e3292df5e9b2 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -54,7 +54,7 @@ enum mt76_mcu_evt_type {
EVT_EVENT_DFS_DETECT_RSP,
};
-int mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 530e5593765c..a1529920d877 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -54,22 +54,30 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
part = np->name;
mtd = get_mtd_device_nm(part);
- if (IS_ERR(mtd))
- return PTR_ERR(mtd);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto out_put_node;
+ }
- if (size <= sizeof(*list))
- return -EINVAL;
+ if (size <= sizeof(*list)) {
+ ret = -EINVAL;
+ goto out_put_node;
+ }
offset = be32_to_cpup(list);
ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
put_mtd_device(mtd);
if (ret)
- return ret;
+ goto out_put_node;
- if (retlen < len)
- return -EINVAL;
+ if (retlen < len) {
+ ret = -EINVAL;
+ goto out_put_node;
+ }
- return 0;
+out_put_node:
+ of_node_put(np);
+ return ret;
#else
return -ENOENT;
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 7b926dfa6b97..a033745adb2f 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -124,7 +124,7 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
bool vht)
{
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
- int i, nstream = __sw_hweight8(dev->antenna_mask);
+ int i, nstream = hweight8(dev->antenna_mask);
struct ieee80211_sta_vht_cap *vht_cap;
u16 mcs_map = 0;
@@ -269,7 +269,9 @@ mt76_check_sband(struct mt76_dev *dev, int band)
}
struct mt76_dev *
-mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
+mt76_alloc_device(struct device *pdev, unsigned int size,
+ const struct ieee80211_ops *ops,
+ const struct mt76_driver_ops *drv_ops)
{
struct ieee80211_hw *hw;
struct mt76_dev *dev;
@@ -280,6 +282,9 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
dev = hw->priv;
dev->hw = hw;
+ dev->dev = pdev;
+ dev->drv = drv_ops;
+
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
@@ -328,6 +333,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -547,7 +553,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
}
static void
-mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
+mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -566,6 +572,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv);
+ if (status->signal <= 0)
+ ewma_signal_add(&wcid->rssi, -status->signal);
+
+ wcid->inactive_count = 0;
+
if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
return;
@@ -625,7 +636,7 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
__skb_queue_head_init(&frames);
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
- mt76_check_ps(dev, skb);
+ mt76_check_sta(dev, skb);
mt76_rx_aggr_reorder(skb, &frames);
}
@@ -659,6 +670,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_txq_init(dev, sta->txq[i]);
}
+ ewma_signal_init(&wcid->rssi);
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
out:
@@ -702,6 +714,11 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(dev, vif, sta);
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ dev->drv->sta_assoc)
+ dev->drv->sta_assoc(dev, vif, sta);
+
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
mt76_sta_remove(dev, vif, sta);
@@ -709,3 +726,60 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct mt76_dev *dev = hw->priv;
+ int n_chains = hweight8(dev->antenna_mask);
+
+ *dbm = dev->txpower_cur / 2;
+
+ /* convert from per-chain power to combined
+ * output on 2x2 devices
+ */
+ if (n_chains > 1)
+ *dbm += 3;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_txpower);
+
+static void
+__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ ieee80211_csa_finish(vif);
+}
+
+void mt76_csa_finish(struct mt76_dev *dev)
+{
+ if (!dev->csa_complete)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(dev->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ __mt76_csa_finish, dev);
+
+ dev->csa_complete = 0;
+}
+EXPORT_SYMBOL_GPL(mt76_csa_finish);
+
+static void
+__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76_dev *dev = priv;
+
+ if (!vif->csa_active)
+ return;
+
+ dev->csa_complete |= ieee80211_csa_is_complete(vif);
+}
+
+void mt76_csa_check(struct mt76_dev *dev)
+{
+ ieee80211_iterate_active_interfaces_atomic(dev->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ __mt76_csa_check, dev);
+}
+EXPORT_SYMBOL_GPL(mt76_csa_check);
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
new file mode 100644
index 000000000000..dbb57b593a87
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+
+struct sk_buff *
+mt76_mcu_msg_alloc(const void *data, int head_len,
+ int data_len, int tail_len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(head_len + data_len + tail_len,
+ GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, head_len);
+ if (data && data_len)
+ skb_put_data(skb, data, data_len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
+
+/* mmio */
+struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
+ unsigned long expires)
+{
+ unsigned long timeout;
+
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ timeout = expires - jiffies;
+ wait_event_timeout(dev->mmio.mcu.wait,
+ !skb_queue_empty(&dev->mmio.mcu.res_q),
+ timeout);
+ return skb_dequeue(&dev->mmio.mcu.res_q);
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_get_response);
+
+void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ skb_queue_tail(&dev->mmio.mcu.res_q, skb);
+ wake_up(&dev->mmio.mcu.wait);
+}
+EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5cd508a68609..5dfb0601f101 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -23,6 +23,7 @@
#include <linux/skbuff.h>
#include <linux/leds.h>
#include <linux/usb.h>
+#include <linux/average.h>
#include <net/mac80211.h>
#include "util.h"
@@ -86,6 +87,7 @@ struct mt76u_buf {
struct mt76_dev *dev;
struct urb *urb;
size_t len;
+ void *buf;
bool done;
};
@@ -156,6 +158,9 @@ struct mt76_queue_ops {
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
+ int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, u32 tx_info);
+
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more);
@@ -174,6 +179,8 @@ enum mt76_wcid_flags {
#define MT76_N_WCIDS 128
+DECLARE_EWMA(signal, 10, 8);
+
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
@@ -181,6 +188,9 @@ struct mt76_wcid {
unsigned long flags;
+ struct ewma_signal rssi;
+ int inactive_count;
+
u8 idx;
u8 hw_key_idx;
@@ -239,7 +249,9 @@ struct mt76_rx_tid {
#define MT_TX_CB_TXS_FAILED BIT(2)
#define MT_PACKET_ID_MASK GENMASK(7, 0)
-#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK
+#define MT_PACKET_ID_NO_ACK 0
+#define MT_PACKET_ID_NO_SKB 1
+#define MT_PACKET_ID_FIRST 2
#define MT_TX_STATUS_SKB_TIMEOUT HZ
@@ -292,6 +304,9 @@ struct mt76_driver_ops {
int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+ void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
};
@@ -368,11 +383,11 @@ struct mt76_usb {
u16 out_max_packet;
u8 in_ep[__MT_EP_IN_MAX];
u16 in_max_packet;
+ bool sg_en;
struct mt76u_mcu {
struct mutex mutex;
- struct completion cmpl;
- struct mt76u_buf res;
+ u8 *data;
u32 msg_seq;
/* multiple reads */
@@ -421,6 +436,7 @@ struct mt76_dev {
struct mt76_queue q_tx[__MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
+ int tx_dma_idx[4];
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@@ -454,6 +470,8 @@ struct mt76_dev {
bool led_al;
u8 led_pin;
+ u8 csa_complete;
+
u32 rxfilter;
union {
@@ -488,7 +506,7 @@ struct mt76_rx_status {
u8 rate_idx;
u8 nss;
u8 band;
- u8 signal;
+ s8 signal;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
@@ -551,7 +569,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
-#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
@@ -571,8 +589,9 @@ mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
return &msband->chan[idx];
}
-struct mt76_dev *mt76_alloc_device(unsigned int size,
- const struct ieee80211_ops *ops);
+struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
+ const struct ieee80211_ops *ops,
+ const struct mt76_driver_ops *drv_ops);
int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
@@ -677,6 +696,14 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm);
+
+void mt76_csa_check(struct mt76_dev *dev);
+void mt76_csa_finish(struct mt76_dev *dev);
+
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
@@ -703,14 +730,21 @@ static inline u8 q2ep(u8 qid)
return qid + 1;
}
-static inline bool mt76u_check_sg(struct mt76_dev *dev)
+static inline int
+mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
+ int timeout)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76_usb *usb = &dev->usb;
+ unsigned int pipe;
+
+ if (actual_len)
+ pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
+ else
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
- return (udev->bus->sg_tablesize > 0 &&
- (udev->bus->no_sg_constraint ||
- udev->speed == USB_SPEED_WIRELESS));
+ return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
@@ -719,21 +753,17 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
-void mt76u_deinit(struct mt76_dev *dev);
-int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
- int nsgs, int len, int sglen, gfp_t gfp);
-void mt76u_buf_free(struct mt76u_buf *buf);
-int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
- struct mt76u_buf *buf, gfp_t gfp,
- usb_complete_t complete_fn, void *context);
int mt76u_submit_rx_buffers(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_queues(struct mt76_dev *dev);
void mt76u_stop_stat_wk(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
-void mt76u_mcu_complete_urb(struct urb *urb);
-int mt76u_mcu_init_rx(struct mt76_dev *dev);
-void mt76u_mcu_deinit(struct mt76_dev *dev);
+struct sk_buff *
+mt76_mcu_msg_alloc(const void *data, int head_len,
+ int data_len, int tail_len);
+void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
+struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
+ unsigned long expires);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
new file mode 100644
index 000000000000..087945c3d8f3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
@@ -0,0 +1,9 @@
+config MT7603E
+ tristate "MediaTek MT7603E (PCIe) and MT76x8 WLAN support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7603E wireless PCIe devices and the WLAN core on
+ MT7628/MT7688 SoC devices
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Makefile b/drivers/net/wireless/mediatek/mt76/mt7603/Makefile
new file mode 100644
index 000000000000..d95a30421c62
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MT7603E) += mt7603e.o
+
+mt7603e-y := \
+ pci.o soc.o main.o init.o mcu.o \
+ core.o dma.o mac.o eeprom.o \
+ beacon.o debugfs.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
new file mode 100644
index 000000000000..afcd86f735b4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: ISC */
+
+#include "mt7603.h"
+
+struct beacon_bc_data {
+ struct mt7603_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[MT7603_MAX_INTERFACES];
+ int count[MT7603_MAX_INTERFACES];
+};
+
+static void
+mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7603_dev *dev = (struct mt7603_dev *)priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
+ &mvif->sta.wcid, NULL);
+
+ spin_lock_bh(&dev->ps_lock);
+ mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+ dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+
+ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
+ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
+
+ spin_unlock_bh(&dev->ps_lock);
+}
+
+static void
+mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt7603_dev *dev = data->dev;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+ data->count[mvif->idx]++;
+}
+
+void mt7603_pre_tbtt_tasklet(unsigned long arg)
+{
+ struct mt7603_dev *dev = (struct mt7603_dev *)arg;
+ struct mt76_queue *q;
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i, nframes;
+
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
+ q = &dev->mt76.q_tx[MT_TXQ_BEACON];
+ spin_lock_bh(&q->lock);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7603_update_beacon_iter, dev);
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ /* Flush all previous CAB queue packets */
+ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false);
+
+ mt76_csa_check(&dev->mt76);
+ if (dev->mt76.csa_complete)
+ goto out;
+
+ q = &dev->mt76.q_tx[MT_TXQ_CAB];
+ do {
+ nframes = skb_queue_len(&data.q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7603_add_buffered_bc, &data);
+ } while (nframes != skb_queue_len(&data.q) &&
+ skb_queue_len(&data.q) < 8);
+
+ if (skb_queue_empty(&data.q))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+ continue;
+
+ mt76_skb_set_moredata(data.tail[i], false);
+ }
+
+ spin_lock_bh(&q->lock);
+ while ((skb = __skb_dequeue(&data.q)) != NULL) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+
+ mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
+ NULL);
+ }
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ for (i = 0; i < ARRAY_SIZE(data.count); i++)
+ mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i),
+ data.count[i] << MT_WF_ARB_CAB_COUNT_B0_SHIFT(i));
+
+ mt76_wr(dev, MT_WF_ARB_CAB_START,
+ MT_WF_ARB_CAB_START_BSSn(0) |
+ (MT_WF_ARB_CAB_START_BSS0n(1) *
+ ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+
+out:
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
+ if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
+ __sw_hweight8(dev->beacon_mask))
+ dev->beacon_check++;
+}
+
+void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+{
+ u32 pre_tbtt = MT7603_PRE_TBTT_TIME / 64;
+
+ if (idx >= 0) {
+ if (intval)
+ dev->beacon_mask |= BIT(idx);
+ else
+ dev->beacon_mask &= ~BIT(idx);
+ }
+
+ if (!dev->beacon_mask || (!intval && idx < 0)) {
+ mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
+ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
+ mt76_wr(dev, MT_HW_INT_MASK(3), 0);
+ return;
+ }
+
+ dev->beacon_int = intval;
+ mt76_wr(dev, MT_TBTT,
+ FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
+
+ mt76_wr(dev, MT_TBTT_TIMER_CFG, 0x99); /* start timer */
+
+ mt76_rmw_field(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK,
+ MT_BCNQ_OPMODE_AP);
+ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCN_PRIO);
+ mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCAST_PRIO);
+
+ mt76_wr(dev, MT_PRE_TBTT, pre_tbtt);
+
+ mt76_set(dev, MT_HW_INT_MASK(3),
+ MT_HW_INT3_PRE_TBTT0 | MT_HW_INT3_TBTT0);
+
+ mt76_set(dev, MT_WF_ARB_BCN_START,
+ MT_WF_ARB_BCN_START_BSSn(0) |
+ ((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1)));
+ mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
+
+ if (dev->beacon_mask & ~BIT(0))
+ mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
+ else
+ mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
new file mode 100644
index 000000000000..1086dcd376a0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: ISC */
+
+#include "mt7603.h"
+
+void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+ dev->mt76.mmio.irqmask &= ~clear;
+ dev->mt76.mmio.irqmask |= set;
+ mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
+ spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+}
+
+void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7603_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return IRQ_NONE;
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_MAC_IRQ3) {
+ u32 hwintr = mt76_rr(dev, MT_HW_INT_STATUS(3));
+
+ mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
+ if (hwintr & MT_HW_INT3_PRE_TBTT0)
+ tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+ if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
+ mt76_csa_finish(&dev->mt76);
+ }
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
+{
+ u32 base = addr & GENMASK(31, 19);
+ u32 offset = addr & GENMASK(18, 0);
+
+ dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
new file mode 100644
index 000000000000..f8b3b6ab6297
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: ISC */
+
+#include "mt7603.h"
+
+static int
+mt7603_reset_read(struct seq_file *s, void *data)
+{
+ struct mt7603_dev *dev = dev_get_drvdata(s->private);
+ static const char * const reset_cause_str[] = {
+ [RESET_CAUSE_TX_HANG] = "TX hang",
+ [RESET_CAUSE_TX_BUSY] = "TX DMA busy stuck",
+ [RESET_CAUSE_RX_BUSY] = "RX DMA busy stuck",
+ [RESET_CAUSE_RX_PSE_BUSY] = "RX PSE busy stuck",
+ [RESET_CAUSE_BEACON_STUCK] = "Beacon stuck",
+ [RESET_CAUSE_MCU_HANG] = "MCU hang",
+ [RESET_CAUSE_RESET_FAILED] = "PSE reset failed",
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reset_cause_str); i++) {
+ if (!reset_cause_str[i])
+ continue;
+
+ seq_printf(s, "%20s: %u\n", reset_cause_str[i],
+ dev->reset_cause[i]);
+ }
+
+ return 0;
+}
+
+static int
+mt7603_radio_read(struct seq_file *s, void *data)
+{
+ struct mt7603_dev *dev = dev_get_drvdata(s->private);
+
+ seq_printf(s, "Sensitivity: %d\n", dev->sensitivity);
+ seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
+ dev->false_cca_ofdm, dev->false_cca_cck);
+
+ return 0;
+}
+
+void mt7603_init_debugfs(struct mt7603_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return;
+
+ debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
+ mt7603_reset_read);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
+ mt7603_radio_read);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
new file mode 100644
index 000000000000..d69e82c66ab2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: ISC */
+
+#include "mt7603.h"
+#include "mac.h"
+#include "../dma.h"
+
+static int
+mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc)
+{
+ int ret;
+
+ q->hw_idx = idx;
+ q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+ return 0;
+}
+
+static void
+mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+{
+ __le32 *txd = (__le32 *)skb->data;
+ struct mt7603_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx;
+ u32 val;
+
+ if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
+ goto free;
+
+ val = le32_to_cpu(txd[1]);
+ idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
+ skb->priority = FIELD_GET(MT_TXD1_TID, val);
+
+ if (idx >= MT7603_WTBL_STA - 1)
+ goto free;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (!wcid)
+ goto free;
+
+ msta = container_of(wcid, struct mt7603_sta, wcid);
+ val = le32_to_cpu(txd[0]);
+ skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
+
+ spin_lock_bh(&dev->ps_lock);
+ __skb_queue_tail(&msta->psq, skb);
+ if (skb_queue_len(&msta->psq) >= 64) {
+ skb = __skb_dequeue(&msta->psq);
+ dev_kfree_skb(skb);
+ }
+ spin_unlock_bh(&dev->ps_lock);
+ return;
+
+free:
+ dev_kfree_skb(skb);
+}
+
+void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ if (q == MT_RXQ_MCU) {
+ if (type == PKT_TYPE_RX_EVENT)
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ else
+ mt7603_rx_loopback_skb(dev, skb);
+ return;
+ }
+
+ switch (type) {
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 5 <= end; rxd += 5)
+ mt7603_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ return;
+ case PKT_TYPE_NORMAL:
+ if (mt7603_mac_fill_rx(dev, skb) == 0) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ /* fall through */
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+static int
+mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize)
+{
+ int ret;
+
+ q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+ return 0;
+}
+
+static void
+mt7603_tx_tasklet(unsigned long data)
+{
+ struct mt7603_dev *dev = (struct mt7603_dev *)data;
+ int i;
+
+ dev->tx_dma_check = 0;
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt7603_dma_init(struct mt7603_dev *dev)
+{
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ int ret;
+ int i;
+
+ mt76_dma_attach(&dev->mt76);
+
+ init_waitqueue_head(&dev->mt76.mmio.mcu.wait);
+ skb_queue_head_init(&dev->mt76.mmio.mcu.res_q);
+
+ tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+ mt7603_pse_client_reset(dev);
+
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i],
+ wmm_queue_map[i],
+ MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON],
+ MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB],
+ MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+ MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_DELAY_INT_CFG, 0);
+ return mt76_init_queues(dev);
+}
+
+void mt7603_dma_cleanup(struct mt7603_dev *dev)
+{
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ tasklet_kill(&dev->tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
new file mode 100644
index 000000000000..8c120e4461b0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: ISC */
+
+#include "mt7603.h"
+#include "eeprom.h"
+
+static int
+mt7603_efuse_read(struct mt7603_dev *dev, u32 base, u16 addr, u8 *data)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, base + MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
+ WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt7603_efuse_init(struct mt7603_dev *dev)
+{
+ u32 base = mt7603_reg_map(dev, MT_EFUSE_BASE);
+ int len = MT7603_EEPROM_SIZE;
+ void *buf;
+ int ret, i;
+
+ if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
+ return 0;
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+ dev->mt76.otp.size = len;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ buf = dev->mt76.otp.data;
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt7603_efuse_read(dev, base, i, buf + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+mt7603_has_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
+{
+ if (!efuse[MT_EE_TEMP_SENSOR_CAL])
+ return false;
+
+ if (get_unaligned_le16(efuse + MT_EE_TX_POWER_0_START_2G) == 0)
+ return false;
+
+ if (get_unaligned_le16(efuse + MT_EE_TX_POWER_1_START_2G) == 0)
+ return false;
+
+ if (!efuse[MT_EE_CP_FT_VERSION])
+ return false;
+
+ if (!efuse[MT_EE_XTAL_FREQ_OFFSET])
+ return false;
+
+ if (!efuse[MT_EE_XTAL_WF_RFCAL])
+ return false;
+
+ return true;
+}
+
+static void
+mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse)
+{
+ static const u8 cal_free_bytes[] = {
+ MT_EE_TEMP_SENSOR_CAL,
+ MT_EE_CP_FT_VERSION,
+ MT_EE_XTAL_FREQ_OFFSET,
+ MT_EE_XTAL_WF_RFCAL,
+ /* Skip for MT7628 */
+ MT_EE_TX_POWER_0_START_2G,
+ MT_EE_TX_POWER_0_START_2G + 1,
+ MT_EE_TX_POWER_1_START_2G,
+ MT_EE_TX_POWER_1_START_2G + 1,
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ int n = ARRAY_SIZE(cal_free_bytes);
+ int i;
+
+ if (!mt7603_has_cal_free_data(dev, efuse))
+ return;
+
+ if (is_mt7628(dev))
+ n -= 4;
+
+ for (i = 0; i < n; i++) {
+ int offset = cal_free_bytes[i];
+
+ eeprom[offset] = efuse[offset];
+ }
+}
+
+static int
+mt7603_eeprom_load(struct mt7603_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7603_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return mt7603_efuse_init(dev);
+}
+
+static int mt7603_check_eeprom(struct mt76_dev *dev)
+{
+ u16 val = get_unaligned_le16(dev->eeprom.data);
+
+ switch (val) {
+ case 0x7628:
+ case 0x7603:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int mt7603_eeprom_init(struct mt7603_dev *dev)
+{
+ int ret;
+
+ ret = mt7603_eeprom_load(dev);
+ if (ret < 0)
+ return ret;
+
+ if (dev->mt76.otp.data) {
+ if (mt7603_check_eeprom(&dev->mt76) == 0)
+ mt7603_apply_cal_free_data(dev, dev->mt76.otp.data);
+ else
+ memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
+ MT7603_EEPROM_SIZE);
+ }
+
+ dev->mt76.cap.has_2ghz = true;
+ memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mt76);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
new file mode 100644
index 000000000000..f27b99b7e359
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_EEPROM_H
+#define __MT7603_EEPROM_H
+
+#include "mt7603.h"
+
+enum mt7603_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_NIC_CONF_0 = 0x034,
+ MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_NIC_CONF_2 = 0x042,
+
+ MT_EE_XTAL_TRIM_1 = 0x03a,
+
+ MT_EE_RSSI_OFFSET_2G = 0x046,
+ MT_EE_WIFI_RF_SETTING = 0x048,
+ MT_EE_RSSI_OFFSET_5G = 0x04a,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x050,
+ MT_EE_TX_POWER_DELTA_BW80 = 0x052,
+
+ MT_EE_TX_POWER_EXT_PA_5G = 0x054,
+
+ MT_EE_TEMP_SENSOR_CAL = 0x055,
+
+ MT_EE_TX_POWER_0_START_2G = 0x056,
+ MT_EE_TX_POWER_1_START_2G = 0x05c,
+
+ /* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G 5
+#define MT_TX_POWER_GROUPS_5G 6
+ MT_EE_TX_POWER_0_START_5G = 0x062,
+
+ MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
+ MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
+
+ MT_EE_TX_POWER_1_START_5G = 0x080,
+
+ MT_EE_TX_POWER_CCK = 0x0a0,
+ MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2,
+ MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4,
+ MT_EE_TX_POWER_OFDM_2G_54M = 0x0a6,
+ MT_EE_TX_POWER_HT_BPSK_QPSK = 0x0a8,
+ MT_EE_TX_POWER_HT_16_64_QAM = 0x0aa,
+ MT_EE_TX_POWER_HT_64_QAM = 0x0ac,
+
+ MT_EE_ELAN_RX_MODE_GAIN = 0x0c0,
+ MT_EE_ELAN_RX_MODE_NF = 0x0c1,
+ MT_EE_ELAN_RX_MODE_P1DB = 0x0c2,
+
+ MT_EE_ELAN_BYPASS_MODE_GAIN = 0x0c3,
+ MT_EE_ELAN_BYPASS_MODE_NF = 0x0c4,
+ MT_EE_ELAN_BYPASS_MODE_P1DB = 0x0c5,
+
+ MT_EE_STEP_NUM_NEG_6_7 = 0x0c6,
+ MT_EE_STEP_NUM_NEG_4_5 = 0x0c8,
+ MT_EE_STEP_NUM_NEG_2_3 = 0x0ca,
+ MT_EE_STEP_NUM_NEG_0_1 = 0x0cc,
+
+ MT_EE_REF_STEP_24G = 0x0ce,
+
+ MT_EE_STEP_NUM_PLUS_1_2 = 0x0d0,
+ MT_EE_STEP_NUM_PLUS_3_4 = 0x0d2,
+ MT_EE_STEP_NUM_PLUS_5_6 = 0x0d4,
+ MT_EE_STEP_NUM_PLUS_7 = 0x0d6,
+
+ MT_EE_CP_FT_VERSION = 0x0f0,
+
+ MT_EE_XTAL_FREQ_OFFSET = 0x0f4,
+ MT_EE_XTAL_TRIM_2_COMP = 0x0f5,
+ MT_EE_XTAL_TRIM_3_COMP = 0x0f6,
+ MT_EE_XTAL_WF_RFCAL = 0x0f7,
+
+ __MT_EE_MAX
+};
+
+enum mt7603_eeprom_source {
+ MT_EE_SRC_PROM,
+ MT_EE_SRC_EFUSE,
+ MT_EE_SRC_FLASH,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
new file mode 100644
index 000000000000..15cc8f33b34d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: ISC */
+
+#include <linux/etherdevice.h>
+#include "mt7603.h"
+#include "mac.h"
+#include "eeprom.h"
+
+const struct mt76_driver_ops mt7603_drv_ops = {
+ .txwi_size = MT_TXD_SIZE,
+ .tx_prepare_skb = mt7603_tx_prepare_skb,
+ .tx_complete_skb = mt7603_tx_complete_skb,
+ .rx_skb = mt7603_queue_rx_skb,
+ .rx_poll_complete = mt7603_rx_poll_complete,
+ .sta_ps = mt7603_sta_ps,
+ .sta_add = mt7603_sta_add,
+ .sta_assoc = mt7603_sta_assoc,
+ .sta_remove = mt7603_sta_remove,
+ .update_survey = mt7603_update_channel,
+};
+
+static void
+mt7603_set_tmac_template(struct mt7603_dev *dev)
+{
+ u32 desc[5] = {
+ [1] = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 0xf),
+ [3] = MT_TXD5_SW_POWER_MGMT
+ };
+ u32 addr;
+ int i;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
+ addr += MT_CLIENT_TMAC_INFO_TEMPLATE;
+ for (i = 0; i < ARRAY_SIZE(desc); i++)
+ mt76_wr(dev, addr + 4 * i, desc[i]);
+}
+
+static void
+mt7603_dma_sched_init(struct mt7603_dev *dev)
+{
+ int page_size = 128;
+ int page_count;
+ int max_len = 1792;
+ int max_amsdu_pages = 4096 / page_size;
+ int max_mcu_len = 4096;
+ int max_beacon_len = 512 * 4 + max_len;
+ int max_mcast_pages = 4 * max_len / page_size;
+ int reserved_count = 0;
+ int beacon_pages;
+ int mcu_pages;
+ int i;
+
+ page_count = mt76_get_field(dev, MT_PSE_FC_P0,
+ MT_PSE_FC_P0_MAX_QUOTA);
+ beacon_pages = 4 * (max_beacon_len / page_size);
+ mcu_pages = max_mcu_len / page_size;
+
+ mt76_wr(dev, MT_PSE_FRP,
+ FIELD_PREP(MT_PSE_FRP_P0, 7) |
+ FIELD_PREP(MT_PSE_FRP_P1, 6) |
+ FIELD_PREP(MT_PSE_FRP_P2_RQ2, 4));
+
+ mt76_wr(dev, MT_HIGH_PRIORITY_1, 0x55555553);
+ mt76_wr(dev, MT_HIGH_PRIORITY_2, 0x78555555);
+
+ mt76_wr(dev, MT_QUEUE_PRIORITY_1, 0x2b1a096e);
+ mt76_wr(dev, MT_QUEUE_PRIORITY_2, 0x785f4d3c);
+
+ mt76_wr(dev, MT_PRIORITY_MASK, 0xffffffff);
+
+ mt76_wr(dev, MT_SCH_1, page_count | (2 << 28));
+ mt76_wr(dev, MT_SCH_2, max_amsdu_pages);
+
+ for (i = 0; i <= 4; i++)
+ mt76_wr(dev, MT_PAGE_COUNT(i), max_amsdu_pages);
+ reserved_count += 5 * max_amsdu_pages;
+
+ mt76_wr(dev, MT_PAGE_COUNT(5), mcu_pages);
+ reserved_count += mcu_pages;
+
+ mt76_wr(dev, MT_PAGE_COUNT(7), beacon_pages);
+ reserved_count += beacon_pages;
+
+ mt76_wr(dev, MT_PAGE_COUNT(8), max_mcast_pages);
+ reserved_count += max_mcast_pages;
+
+ if (is_mt7603(dev))
+ reserved_count = 0;
+
+ mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count);
+
+ if (is_mt7603(dev) && mt76xx_rev(dev) >= MT7603_REV_E2) {
+ mt76_wr(dev, MT_GROUP_THRESH(0),
+ page_count - beacon_pages - mcu_pages);
+ mt76_wr(dev, MT_GROUP_THRESH(1), beacon_pages);
+ mt76_wr(dev, MT_BMAP_0, 0x0080ff5f);
+ mt76_wr(dev, MT_GROUP_THRESH(2), mcu_pages);
+ mt76_wr(dev, MT_BMAP_1, 0x00000020);
+ } else {
+ mt76_wr(dev, MT_GROUP_THRESH(0), page_count);
+ mt76_wr(dev, MT_BMAP_0, 0xffff);
+ }
+
+ mt76_wr(dev, MT_SCH_4, 0);
+
+ for (i = 0; i <= 15; i++)
+ mt76_wr(dev, MT_TXTIME_THRESH(i), 0xfffff);
+
+ mt76_set(dev, MT_SCH_4, BIT(6));
+}
+
+static void
+mt7603_phy_init(struct mt7603_dev *dev)
+{
+ int rx_chains = dev->mt76.antenna_mask;
+ int tx_chains = __sw_hweight8(rx_chains) - 1;
+
+ mt76_rmw(dev, MT_WF_RMAC_RMCR,
+ (MT_WF_RMAC_RMCR_SMPS_MODE |
+ MT_WF_RMAC_RMCR_RX_STREAMS),
+ (FIELD_PREP(MT_WF_RMAC_RMCR_SMPS_MODE, 3) |
+ FIELD_PREP(MT_WF_RMAC_RMCR_RX_STREAMS, rx_chains)));
+
+ mt76_rmw_field(dev, MT_TMAC_TCR, MT_TMAC_TCR_TX_STREAMS,
+ tx_chains);
+
+ dev->agc0 = mt76_rr(dev, MT_AGC(0));
+ dev->agc3 = mt76_rr(dev, MT_AGC(3));
+}
+
+static void
+mt7603_mac_init(struct mt7603_dev *dev)
+{
+ u8 bc_addr[ETH_ALEN];
+ u32 addr;
+ int i;
+
+ mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_0,
+ (MT_AGG_SIZE_LIMIT(0) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(1) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(2) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(3) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
+
+ mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_1,
+ (MT_AGG_SIZE_LIMIT(4) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(5) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(6) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) |
+ (MT_AGG_SIZE_LIMIT(7) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT));
+
+ mt76_wr(dev, MT_AGG_LIMIT,
+ FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
+
+ mt76_wr(dev, MT_AGG_LIMIT_1,
+ FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) |
+ FIELD_PREP(MT_AGG_LIMIT_AC(3), 24));
+
+ mt76_wr(dev, MT_AGG_CONTROL,
+ FIELD_PREP(MT_AGG_CONTROL_BAR_RATE, 0x4b) |
+ FIELD_PREP(MT_AGG_CONTROL_CFEND_RATE, 0x69) |
+ MT_AGG_CONTROL_NO_BA_AR_RULE);
+
+ mt76_wr(dev, MT_AGG_RETRY_CONTROL,
+ FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
+ FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
+
+ mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096);
+
+ mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
+ mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
+
+ mt76_clear(dev, MT_WF_RMAC_TMR_PA, BIT(31));
+
+ mt76_set(dev, MT_WF_RMACDR, MT_WF_RMACDR_MAXLEN_20BIT);
+ mt76_rmw(dev, MT_WF_RMAC_MAXMINLEN, 0xffffff, 0x19000);
+
+ mt76_wr(dev, MT_WF_RFCR1, 0);
+
+ mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE);
+
+ mt7603_set_tmac_template(dev);
+
+ /* Enable RX group to HIF */
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR);
+ mt76_set(dev, addr + MT_CLIENT_RXINF, MT_CLIENT_RXINF_RXSH_GROUPS);
+
+ /* Enable RX group to MCU */
+ mt76_set(dev, MT_DMA_DCR1, GENMASK(13, 11));
+
+ mt76_rmw_field(dev, MT_AGG_PCR_RTS, MT_AGG_PCR_RTS_PKT_THR, 3);
+ mt76_set(dev, MT_TMAC_PCR, MT_TMAC_PCR_SPE_EN);
+
+ /* include preamble detection in CCA trigger signal */
+ mt76_rmw_field(dev, MT_TXREQ, MT_TXREQ_CCA_SRC_SEL, 2);
+
+ mt76_wr(dev, MT_RXREQ, 4);
+
+ /* Configure all rx packets to HIF */
+ mt76_wr(dev, MT_DMA_RCFR0, 0xc0000000);
+
+ /* Configure MCU txs selection with aggregation */
+ mt76_wr(dev, MT_DMA_TCFR0,
+ FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
+ MT_DMA_TCFR_TXS_AGGR_COUNT);
+
+ /* Configure HIF txs selection with aggregation */
+ mt76_wr(dev, MT_DMA_TCFR1,
+ FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */
+ MT_DMA_TCFR_TXS_AGGR_COUNT | /* Maximum count */
+ MT_DMA_TCFR_TXS_BIT_MAP);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_1, MT_PSE_WTBL_2_PHYS_ADDR);
+
+ for (i = 0; i < MT7603_WTBL_SIZE; i++)
+ mt7603_wtbl_clear(dev, i);
+
+ eth_broadcast_addr(bc_addr);
+ mt7603_wtbl_init(dev, MT7603_WTBL_RESERVED, -1, bc_addr);
+ dev->global_sta.wcid.idx = MT7603_WTBL_RESERVED;
+ rcu_assign_pointer(dev->mt76.wcid[MT7603_WTBL_RESERVED],
+ &dev->global_sta.wcid);
+
+ mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2);
+ mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2);
+
+ mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
+ mt76_wr(dev, MT_AGG_ARDCR,
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
+ max_t(int, 0, MT7603_RATE_RETRY - 2)) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7603_RATE_RETRY - 1));
+
+ mt76_wr(dev, MT_AGG_ARCR,
+ (MT_AGG_ARCR_INIT_RATE1 |
+ FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+ MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+ FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+ FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+
+ mt76_set(dev, MT_WTBL_RMVTCR, MT_WTBL_RMVTCR_RX_MV_MODE);
+
+ mt76_clear(dev, MT_SEC_SCR, MT_SEC_SCR_MASK_ORDER);
+ mt76_clear(dev, MT_SEC_SCR, BIT(18));
+
+ /* Set secondary beacon time offsets */
+ for (i = 0; i <= 4; i++)
+ mt76_rmw_field(dev, MT_LPON_SBTOR(i), MT_LPON_SBTOR_TIME_OFFSET,
+ (i + 1) * (20 + 4096));
+}
+
+static int
+mt7603_init_hardware(struct mt7603_dev *dev)
+{
+ int i, ret;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ ret = mt7603_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7603_dma_init(dev);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850);
+ mt7603_mac_dma_start(dev);
+ dev->rxfilter = mt76_rr(dev, MT_WF_RFCR);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ for (i = 0; i < MT7603_WTBL_SIZE; i++) {
+ mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE |
+ FIELD_PREP(MT_PSE_RTA_TAG_ID, i));
+ mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
+ }
+
+ ret = mt7603_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ mt7603_dma_sched_init(dev);
+ mt7603_mcu_set_eeprom(dev);
+ mt7603_phy_init(dev);
+ mt7603_mac_init(dev);
+
+ return 0;
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+static struct ieee80211_rate mt7603_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = MT7603_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ }
+};
+
+static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+ u8 delay_off)
+{
+ struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev,
+ mt76);
+ u32 val, addr;
+
+ val = MT_LED_STATUS_DURATION(0xffff) |
+ MT_LED_STATUS_OFF(delay_off) |
+ MT_LED_STATUS_ON(delay_on);
+
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ addr = mt7603_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+}
+
+static int mt7603_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7603_led_set_config(mt76, delta_on, delta_off);
+ return 0;
+}
+
+static void mt7603_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+
+ if (!brightness)
+ mt7603_led_set_config(mt76, 0, 0xff);
+ else
+ mt7603_led_set_config(mt76, 0xff, 0);
+}
+
+static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr)
+{
+ if (addr < 0x100000)
+ return addr;
+
+ return mt7603_reg_map(dev, addr);
+}
+
+static u32 mt7603_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ u32 addr = __mt7603_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7603_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ u32 addr = __mt7603_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7603_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ u32 addr = __mt7603_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+static void
+mt7603_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7603_dev *dev = hw->priv;
+
+ dev->ed_monitor = request->dfs_region == NL80211_DFS_ETSI;
+}
+
+static int
+mt7603_txpower_signed(int val)
+{
+ bool sign = val & BIT(6);
+
+ if (!(val & BIT(7)))
+ return 0;
+
+ val &= GENMASK(5, 0);
+ if (!sign)
+ val = -val;
+
+ return val;
+}
+
+static void
+mt7603_init_txpower(struct mt7603_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ u8 *eeprom = (u8 *)dev->mt76.eeprom.data;
+ int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
+ u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
+ int max_offset, cur_offset;
+ int i;
+
+ if (target_power & BIT(6))
+ target_power = -(target_power & GENMASK(5, 0));
+
+ max_offset = 0;
+ for (i = 0; i < 14; i++) {
+ cur_offset = mt7603_txpower_signed(rate_power[i]);
+ max_offset = max(max_offset, cur_offset);
+ }
+
+ target_power += max_offset;
+
+ dev->tx_power_limit = target_power;
+ dev->mt76.txpower_cur = target_power;
+
+ target_power = DIV_ROUND_UP(target_power, 2);
+
+ /* add 3 dBm for 2SS devices (combined output) */
+ if (dev->mt76.antenna_mask & BIT(1))
+ target_power += 3;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ chan->max_power = target_power;
+ }
+}
+
+
+int mt7603_register_device(struct mt7603_dev *dev)
+{
+ struct mt76_bus_ops *bus_ops;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7603_rr;
+ bus_ops->wr = mt7603_wr;
+ bus_ops->rmw = mt7603_rmw;
+ dev->mt76.bus = bus_ops;
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
+ tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
+ (unsigned long)dev);
+
+ /* Check for 7688, which only has 1SS */
+ dev->mt76.antenna_mask = 3;
+ if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
+ dev->mt76.antenna_mask = 1;
+
+ dev->slottime = 9;
+
+ ret = mt7603_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ hw->queues = 4;
+ hw->max_rates = 3;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 11;
+
+ hw->sta_data_size = sizeof(struct mt7603_sta);
+ hw->vif_data_size = sizeof(struct mt7603_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
+ }
+
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ wiphy->reg_notifier = mt7603_regd_notifier;
+
+ ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
+ ARRAY_SIZE(mt7603_rates));
+ if (ret)
+ return ret;
+
+ mt7603_init_debugfs(dev);
+ mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband);
+
+ return 0;
+}
+
+void mt7603_unregister_device(struct mt7603_dev *dev)
+{
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ mt76_unregister_device(&dev->mt76);
+ mt7603_mcu_exit(dev);
+ mt7603_dma_cleanup(dev);
+ ieee80211_free_hw(mt76_hw(dev));
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
new file mode 100644
index 000000000000..0a0115861b51
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -0,0 +1,1749 @@
+/* SPDX-License-Identifier: ISC */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7603.h"
+#include "mac.h"
+
+#define MT_PSE_PAGE_SIZE 128
+
+static u32
+mt7603_ac_queue_mask0(u32 mask)
+{
+ u32 ret = 0;
+
+ ret |= GENMASK(3, 0) * !!(mask & BIT(0));
+ ret |= GENMASK(8, 5) * !!(mask & BIT(1));
+ ret |= GENMASK(13, 10) * !!(mask & BIT(2));
+ ret |= GENMASK(19, 16) * !!(mask & BIT(3));
+ return ret;
+}
+
+static void
+mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask)
+{
+ mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask));
+}
+
+static void
+mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
+{
+ mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
+}
+
+void mt7603_mac_set_timing(struct mt7603_dev *dev)
+{
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
+ int offset = 3 * dev->coverage_class;
+ u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ int sifs;
+ u32 val;
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ udelay(1);
+
+ mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset);
+ mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset);
+ mt76_wr(dev, MT_IFS,
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, dev->slottime));
+
+ if (dev->slottime < 20)
+ val = MT7603_CFEND_RATE_DEFAULT;
+ else
+ val = MT7603_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val);
+
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+static void
+mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+}
+
+static u32
+mt7603_wtbl1_addr(int idx)
+{
+ return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
+}
+
+static u32
+mt7603_wtbl2_addr(int idx)
+{
+ /* Mapped to WTBL2 */
+ return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE;
+}
+
+static u32
+mt7603_wtbl3_addr(int idx)
+{
+ u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE);
+
+ return base + idx * MT_WTBL3_SIZE;
+}
+
+static u32
+mt7603_wtbl4_addr(int idx)
+{
+ u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE);
+
+ return base + idx * MT_WTBL4_SIZE;
+}
+
+void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
+ const u8 *mac_addr)
+{
+ const void *_mac = mac_addr;
+ u32 addr = mt7603_wtbl1_addr(idx);
+ u32 w0 = 0, w1 = 0;
+ int i;
+
+ if (_mac) {
+ w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI,
+ get_unaligned_le16(_mac + 4));
+ w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO,
+ get_unaligned_le32(_mac));
+ }
+
+ if (vif < 0)
+ vif = 0;
+ else
+ w0 |= MT_WTBL1_W0_RX_CHECK_A1;
+ w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif);
+
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ mt76_set(dev, addr + 0 * 4, w0);
+ mt76_set(dev, addr + 1 * 4, w1);
+ mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL);
+
+ mt76_stop_tx_ac(dev, GENMASK(3, 0));
+ addr = mt7603_wtbl2_addr(idx);
+ for (i = 0; i < MT_WTBL2_SIZE; i += 4)
+ mt76_wr(dev, addr + i, 0);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
+ mt76_start_tx_ac(dev, GENMASK(3, 0));
+
+ addr = mt7603_wtbl3_addr(idx);
+ for (i = 0; i < MT_WTBL3_SIZE; i += 4)
+ mt76_wr(dev, addr + i, 0);
+
+ addr = mt7603_wtbl4_addr(idx);
+ for (i = 0; i < MT_WTBL4_SIZE; i += 4)
+ mt76_wr(dev, addr + i, 0);
+}
+
+static void
+mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled)
+{
+ u32 addr = mt7603_wtbl1_addr(idx);
+ u32 val = mt76_rr(dev, addr + 3 * 4);
+
+ val &= ~MT_WTBL1_W3_SKIP_TX;
+ val |= enabled * MT_WTBL1_W3_SKIP_TX;
+
+ mt76_wr(dev, addr + 3 * 4, val);
+}
+
+void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
+{
+ int i, port, queue;
+
+ if (abort) {
+ port = 3; /* PSE */
+ queue = 8; /* free queue */
+ } else {
+ port = 0; /* HIF */
+ queue = 1; /* MCU queue */
+ }
+
+ mt7603_wtbl_set_skip_tx(dev, idx, true);
+
+ mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN |
+ FIELD_PREP(MT_TX_ABORT_WCID, idx));
+
+ for (i = 0; i < 4; i++) {
+ mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) |
+ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
+ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
+
+ WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY,
+ 0, 5000));
+ }
+
+ mt76_wr(dev, MT_TX_ABORT, 0);
+
+ mt7603_wtbl_set_skip_tx(dev, idx, false);
+}
+
+void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled)
+{
+ u32 addr = mt7603_wtbl1_addr(sta->wcid.idx);
+
+ if (sta->smps == enabled)
+ return;
+
+ mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled);
+ sta->smps = enabled;
+}
+
+void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled)
+{
+ int idx = sta->wcid.idx;
+ u32 addr;
+
+ spin_lock_bh(&dev->ps_lock);
+
+ if (sta->ps == enabled)
+ goto out;
+
+ mt76_wr(dev, MT_PSE_RTA,
+ FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) |
+ FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) |
+ FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) |
+ FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) |
+ MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY);
+
+ mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
+
+ if (enabled)
+ mt7603_filter_tx(dev, idx, false);
+
+ addr = mt7603_wtbl1_addr(idx);
+ mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+ mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE,
+ enabled * MT_WTBL1_W3_POWER_SAVE);
+ mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+ sta->ps = enabled;
+
+out:
+ spin_unlock_bh(&dev->ps_lock);
+}
+
+void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx)
+{
+ int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE;
+ int wtbl2_frame = idx / wtbl2_frame_size;
+ int wtbl2_entry = idx % wtbl2_frame_size;
+
+ int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE;
+ int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE;
+ int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size;
+ int wtbl3_entry = (idx % wtbl3_frame_size) * 2;
+
+ int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE;
+ int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE;
+ int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size;
+ int wtbl4_entry = idx % wtbl4_frame_size;
+
+ u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
+ int i;
+
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ mt76_wr(dev, addr + 0 * 4,
+ MT_WTBL1_W0_RX_CHECK_A1 |
+ MT_WTBL1_W0_RX_CHECK_A2 |
+ MT_WTBL1_W0_RX_VALID);
+ mt76_wr(dev, addr + 1 * 4, 0);
+ mt76_wr(dev, addr + 2 * 4, 0);
+
+ mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+
+ mt76_wr(dev, addr + 3 * 4,
+ FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) |
+ FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) |
+ FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) |
+ MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM);
+ mt76_wr(dev, addr + 4 * 4,
+ FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) |
+ FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) |
+ FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry));
+
+ mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
+
+ addr = mt7603_wtbl2_addr(idx);
+
+ /* Clear BA information */
+ mt76_wr(dev, addr + (15 * 4), 0);
+
+ mt76_stop_tx_ac(dev, GENMASK(3, 0));
+ for (i = 2; i <= 4; i++)
+ mt76_wr(dev, addr + (i * 4), 0);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
+ mt76_start_tx_ac(dev, GENMASK(3, 0));
+
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+ mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+}
+
+void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
+{
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ int idx = msta->wcid.idx;
+ u32 addr;
+ u32 val;
+
+ addr = mt7603_wtbl1_addr(idx);
+
+ val = mt76_rr(dev, addr + 2 * 4);
+ val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
+ val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
+ FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
+ MT_WTBL1_W2_TXS_BAF_REPORT;
+
+ if (sta->ht_cap.cap)
+ val |= MT_WTBL1_W2_HT;
+ if (sta->vht_cap.cap)
+ val |= MT_WTBL1_W2_VHT;
+
+ mt76_wr(dev, addr + 2 * 4, val);
+
+ addr = mt7603_wtbl2_addr(idx);
+ val = mt76_rr(dev, addr + 9 * 4);
+ val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
+ MT_WTBL2_W9_SHORT_GI_80);
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ val |= MT_WTBL2_W9_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ val |= MT_WTBL2_W9_SHORT_GI_40;
+ mt76_wr(dev, addr + 9 * 4, val);
+}
+
+void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
+{
+ mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr));
+ mt76_wr(dev, MT_BA_CONTROL_1,
+ (get_unaligned_le16(addr + 4) |
+ FIELD_PREP(MT_BA_CONTROL_1_TID, tid) |
+ MT_BA_CONTROL_1_RESET));
+}
+
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+ int ba_size)
+{
+ u32 addr = mt7603_wtbl2_addr(wcid);
+ u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
+ (MT_WTBL2_W15_BA_WIN_SIZE <<
+ (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT));
+ u32 tid_val;
+ int i;
+
+ if (ba_size < 0) {
+ /* disable */
+ mt76_clear(dev, addr + (15 * 4), tid_mask);
+ return;
+ }
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ mt7603_mac_stop(dev);
+ switch (tid) {
+ case 0:
+ mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
+ break;
+ case 1:
+ mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
+ break;
+ case 2:
+ mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
+ ssn);
+ mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
+ ssn >> 8);
+ break;
+ case 3:
+ mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
+ break;
+ case 4:
+ mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
+ break;
+ case 5:
+ mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
+ ssn);
+ mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
+ ssn >> 4);
+ break;
+ case 6:
+ mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
+ break;
+ case 7:
+ mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
+ break;
+ }
+ mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
+ mt7603_mac_start(dev);
+
+ for (i = 7; i > 0; i--) {
+ if (ba_size >= MT_AGG_SIZE_LIMIT(i))
+ break;
+ }
+
+ tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
+ i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT);
+
+ mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
+}
+
+static int
+mt7603_get_rate(struct mt7603_dev *dev, struct ieee80211_supported_band *sband,
+ int idx, bool cck)
+{
+ int offset = 0;
+ int len = sband->n_bitrates;
+ int i;
+
+ if (cck) {
+ if (sband == &dev->mt76.sband_5g.sband)
+ return 0;
+
+ idx &= ~BIT(2); /* short preamble */
+ } else if (sband == &dev->mt76.sband_2g.sband) {
+ offset = 4;
+ }
+
+ for (i = offset; i < len; i++) {
+ if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
+ return i;
+ }
+
+ return 0;
+}
+
+static struct mt76_wcid *
+mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
+{
+ struct mt7603_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7603_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+static void
+mt7603_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u8 *pn = status->iv;
+ u8 *hdr;
+
+ __skb_push(skb, 8);
+ memmove(skb->data, skb->data + 8, hdr_len);
+ hdr = skb->data + hdr_len;
+
+ hdr[0] = pn[5];
+ hdr[1] = pn[4];
+ hdr[2] = 0;
+ hdr[3] = 0x20 | (key_id << 6);
+ hdr[4] = pn[3];
+ hdr[5] = pn[2];
+ hdr[6] = pn[1];
+ hdr[7] = pn[0];
+
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+}
+
+int
+mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ bool unicast = rxd1 & MT_RXD1_NORMAL_U2M;
+ bool insert_ccmp_hdr = false;
+ bool remove_pad;
+ int idx;
+ int i;
+
+ memset(status, 0, sizeof(*status));
+
+ i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+ sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband;
+ i >>= 1;
+
+ idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+ status->wcid = mt7603_rx_get_wcid(dev, idx, unicast);
+
+ status->band = sband->band;
+ if (i < sband->n_channels)
+ status->freq = sband->channels[i].center_freq;
+
+ if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+ !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ rxd += 4;
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg0 = le32_to_cpu(rxd[0]);
+ u32 rxdg3 = le32_to_cpu(rxd[3]);
+ bool cck = false;
+
+ i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
+ switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ /* fall through */
+ case MT_PHY_TYPE_OFDM:
+ i = mt7603_get_rate(dev, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 15)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rxdg0 & MT_RXV1_HT_SHORT_GI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rxdg0 & MT_RXV1_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK *
+ FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
+
+ status->rate_idx = i;
+
+ status->chains = dev->mt76.antenna_mask;
+ status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
+ dev->rssi_offset[0];
+ status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
+ dev->rssi_offset[1];
+
+ status->signal = status->chain_signal[0];
+ if (status->chains & BIT(1))
+ status->signal = max(status->signal,
+ status->chain_signal[1]);
+
+ if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
+ status->bw = RATE_INFO_BW_40;
+
+ rxd += 6;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt7603_insert_ccmp_hdr(skb, key_id);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = hdr->seq_ctrl >> 4;
+
+ return 0;
+}
+
+static u16
+mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
+ const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw)
+{
+ u8 phy, nss, rate_idx;
+ u16 rateval;
+
+ *bw = 0;
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ nss = 1;
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ }
+
+ rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+ FIELD_PREP(MT_TX_RATE_MODE, phy));
+
+ if (stbc && nss == 1)
+ rateval |= MT_TX_RATE_STBC;
+
+ return rateval;
+}
+
+void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ int wcid = sta->wcid.idx;
+ u32 addr = mt7603_wtbl2_addr(wcid);
+ bool stbc = false;
+ int n_rates = sta->n_rates;
+ u8 bw, bw_prev, bw_idx = 0;
+ u16 val[4];
+ u16 probe_val;
+ u32 w9 = mt76_rr(dev, addr + 9 * 4);
+ int i;
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return;
+
+ for (i = n_rates; i < 4; i++)
+ rates[i] = rates[n_rates - 1];
+
+ w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
+ MT_WTBL2_W9_SHORT_GI_80;
+
+ val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+ bw_prev = bw;
+
+ if (probe_rate) {
+ probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+ if (bw)
+ bw_idx = 1;
+ else
+ bw_prev = 0;
+ } else {
+ probe_val = val[0];
+ }
+
+ w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw);
+ w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw);
+
+ val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 3;
+ bw_prev = bw;
+ }
+
+ val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 5;
+ bw_prev = bw;
+ }
+
+ val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+ if (bw_prev)
+ bw_idx = 7;
+
+ w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE,
+ bw_idx ? bw_idx - 1 : 7);
+
+ mt76_wr(dev, MT_WTBL_RIUCR0, w9);
+
+ mt76_wr(dev, MT_WTBL_RIUCR1,
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR2,
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR3,
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+
+ mt76_wr(dev, MT_WTBL_UPDATE,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
+ MT_WTBL_UPDATE_RATE_UPDATE |
+ MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+ if (!sta->wcid.tx_rate_set)
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
+ sta->wcid.tx_rate_set = true;
+}
+
+static enum mt7603_cipher_type
+mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(key_data + 16, key->key + 24, 8);
+ memcpy(key_data + 24, key->key + 16, 8);
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
+ struct ieee80211_key_conf *key)
+{
+ enum mt7603_cipher_type cipher;
+ u32 addr = mt7603_wtbl3_addr(wcid);
+ u8 key_data[32];
+ int key_len = sizeof(key_data);
+
+ cipher = mt7603_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) {
+ addr += key->keyidx * 16;
+ key_len = 16;
+ }
+
+ mt76_wr_copy(dev, addr, key_data, key_len);
+
+ addr = mt7603_wtbl1_addr(wcid);
+ mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher);
+ if (key)
+ mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx);
+ mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key);
+
+ return 0;
+}
+
+static int
+mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ int pid, struct ieee80211_key_conf *key)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7603_vif *mvif;
+ int wlan_idx;
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ int tx_count = 8;
+ u8 frame_type, frame_subtype;
+ u16 fc = le16_to_cpu(hdr->frame_control);
+ u8 vif_idx = 0;
+ u32 val;
+ u8 bw;
+
+ if (vif) {
+ mvif = (struct mt7603_vif *)vif->drv_priv;
+ vif_idx = mvif->idx;
+ if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
+ vif_idx += 0x10;
+ }
+
+ if (sta) {
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+
+ tx_count = msta->rate_count;
+ }
+
+ if (wcid)
+ wlan_idx = wcid->idx;
+ else
+ wlan_idx = MT7603_WTBL_RESERVED;
+
+ frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2;
+ frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4;
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) |
+ FIELD_PREP(MT_TXD1_TID,
+ skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) |
+ FIELD_PREP(MT_TXD1_PROTECTED, !!key);
+ txwi[1] = cpu_to_le32(val);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK);
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) |
+ FIELD_PREP(MT_TXD2_MULTICAST,
+ is_multicast_ether_addr(hdr->addr1));
+ txwi[2] = cpu_to_le32(val);
+
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ txwi[4] = 0;
+
+ val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
+ FIELD_PREP(MT_TXD5_PID, pid);
+ txwi[5] = cpu_to_le32(val);
+
+ txwi[6] = 0;
+
+ if (rate->idx >= 0 && rate->count &&
+ !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+ bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
+ u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw);
+
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_BW, bw) |
+ FIELD_PREP(MT_TXD6_TX_RATE, rateval);
+ txwi[6] |= cpu_to_le32(val);
+
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
+
+ if (!(rate->flags & IEEE80211_TX_RC_MCS))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ tx_count = rate->count;
+ }
+
+ /* use maximum tx count for beacons and buffered multicast */
+ if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
+ tx_count = 0x1f;
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
+ FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+ txwi[3] = cpu_to_le32(val);
+
+ if (key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+
+ txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID);
+ txwi[4] = cpu_to_le32(pn & GENMASK(31, 0));
+ txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32));
+ }
+
+ txwi[7] = 0;
+
+ return 0;
+}
+
+int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int pid;
+
+ if (!wcid)
+ wcid = &dev->global_sta.wcid;
+
+ if (sta) {
+ msta = (struct mt7603_sta *)sta->drv_priv;
+
+ if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
+ IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
+ (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
+ mt7603_wtbl_set_ps(dev, msta, false);
+ }
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ spin_lock_bh(&dev->mt76.lock);
+ msta->rate_probe = true;
+ mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
+ msta->rates);
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key);
+
+ return 0;
+}
+
+static bool
+mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ struct ieee80211_tx_info *info, __le32 *txs_data)
+{
+ struct ieee80211_supported_band *sband;
+ int final_idx = 0;
+ u32 final_rate;
+ u32 final_rate_flags;
+ bool final_mpdu;
+ bool ack_timeout;
+ bool fixed_rate;
+ bool probe;
+ bool ampdu;
+ bool cck = false;
+ int count;
+ u32 txs;
+ u8 pid;
+ int idx;
+ int i;
+
+ fixed_rate = info->status.rates[0].count;
+ probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+ txs = le32_to_cpu(txs_data[4]);
+ final_mpdu = txs & MT_TXS4_ACKED_MPDU;
+ ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
+ pid = FIELD_GET(MT_TXS4_PID, txs);
+ count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
+
+ txs = le32_to_cpu(txs_data[0]);
+ final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+ ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+
+ if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
+ return false;
+
+ if (txs & MT_TXS0_QUEUE_TIMEOUT)
+ return false;
+
+ if (!ack_timeout)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
+ info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+
+ if (fixed_rate && !probe) {
+ info->status.rates[0].count = count;
+ goto out;
+ }
+
+ for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ int cur_count = min_t(int, count, 2 * MT7603_RATE_RETRY);
+
+ if (!i && probe) {
+ cur_count = 1;
+ } else {
+ info->status.rates[i] = sta->rates[idx];
+ idx++;
+ }
+
+ if (i && info->status.rates[i].idx < 0) {
+ info->status.rates[i - 1].count += count;
+ break;
+ }
+
+ if (!count) {
+ info->status.rates[i].idx = -1;
+ break;
+ }
+
+ info->status.rates[i].count = cur_count;
+ final_idx = i;
+ count -= cur_count;
+ }
+
+out:
+ final_rate_flags = info->status.rates[final_idx].flags;
+
+ switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ /* fall through */
+ case MT_PHY_TYPE_OFDM:
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mt76.sband_5g.sband;
+ else
+ sband = &dev->mt76.sband_2g.sband;
+ final_rate &= GENMASK(5, 0);
+ final_rate = mt7603_get_rate(dev, sband, final_rate, cck);
+ final_rate_flags = 0;
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ final_rate_flags |= IEEE80211_TX_RC_MCS;
+ final_rate &= GENMASK(5, 0);
+ if (i > 15)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ info->status.rates[final_idx].idx = final_rate;
+ info->status.rates[final_idx].flags = final_rate_flags;
+
+ return true;
+}
+
+static bool
+mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
+ __le32 *txs_data)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (pid < MT_PACKET_ID_FIRST)
+ return false;
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ spin_lock_bh(&dev->mt76.lock);
+ if (sta->rate_probe) {
+ mt7603_wtbl_set_rates(dev, sta, NULL,
+ sta->rates);
+ sta->rate_probe = false;
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ }
+
+ mt76_tx_status_skb_done(mdev, skb, &list);
+ }
+ mt76_tx_status_unlock(mdev, &list);
+
+ return !!skb;
+}
+
+void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt7603_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u32 txs;
+ u8 wcidx;
+ u8 pid;
+
+ txs = le32_to_cpu(txs_data[4]);
+ pid = FIELD_GET(MT_TXS4_PID, txs);
+ txs = le32_to_cpu(txs_data[3]);
+ wcidx = FIELD_GET(MT_TXS3_WCID, txs);
+
+ if (pid == MT_PACKET_ID_NO_ACK)
+ return;
+
+ if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ msta = container_of(wcid, struct mt7603_sta, wcid);
+ sta = wcid_to_sta(wcid);
+
+ if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
+ goto out;
+
+ if (wcidx >= MT7603_WTBL_STA || !sta)
+ goto out;
+
+ if (mt7603_fill_txs(dev, msta, &info, txs_data))
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct sk_buff *skb = e->skb;
+
+ if (!e->txwi) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (q - dev->mt76.q_tx < 4)
+ dev->tx_hang_check = 0;
+
+ mt76_tx_complete_skb(mdev, skb);
+}
+
+static bool
+wait_for_wpdma(struct mt7603_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
+static void mt7603_pse_reset(struct mt7603_dev *dev)
+{
+ /* Clear previous reset result */
+ if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+ mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S);
+
+ /* Reset PSE */
+ mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
+
+ if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET,
+ MT_MCU_DEBUG_RESET_PSE_S,
+ MT_MCU_DEBUG_RESET_PSE_S, 500)) {
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED]++;
+ mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
+ } else {
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
+ mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES);
+ }
+
+ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3)
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
+}
+
+void mt7603_mac_dma_start(struct mt7603_dev *dev)
+{
+ mt7603_mac_start(dev);
+
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ (MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE));
+
+ mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+}
+
+void mt7603_mac_start(struct mt7603_dev *dev)
+{
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0);
+ mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
+}
+
+void mt7603_mac_stop(struct mt7603_dev *dev)
+{
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ mt76_wr(dev, MT_WF_ARB_TX_START_0, 0);
+ mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
+}
+
+void mt7603_pse_client_reset(struct mt7603_dev *dev)
+{
+ u32 addr;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR +
+ MT_CLIENT_RESET_TX);
+
+ /* Clear previous reset state */
+ mt76_clear(dev, addr,
+ MT_CLIENT_RESET_TX_R_E_1 |
+ MT_CLIENT_RESET_TX_R_E_2 |
+ MT_CLIENT_RESET_TX_R_E_1_S |
+ MT_CLIENT_RESET_TX_R_E_2_S);
+
+ /* Start PSE client TX abort */
+ mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
+ mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
+ MT_CLIENT_RESET_TX_R_E_1_S, 500);
+
+ mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2);
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
+
+ /* Wait for PSE client to clear TX FIFO */
+ mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S,
+ MT_CLIENT_RESET_TX_R_E_2_S, 500);
+
+ /* Clear PSE client TX abort state */
+ mt76_clear(dev, addr,
+ MT_CLIENT_RESET_TX_R_E_1 |
+ MT_CLIENT_RESET_TX_R_E_2);
+}
+
+static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
+{
+ if (!is_mt7628(dev))
+ return;
+
+ mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
+ mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
+}
+
+static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+{
+ int beacon_int = dev->beacon_int;
+ u32 mask = dev->mt76.mmio.irqmask;
+ int i;
+
+ ieee80211_stop_queues(dev->mt76.hw);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mt76);
+
+ tasklet_disable(&dev->tx_tasklet);
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7603_beacon_set_timer(dev, -1, 0);
+
+ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+ dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+ dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+ dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+ mt7603_pse_reset(dev);
+
+ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+ goto skip_dma_reset;
+
+ mt7603_mac_stop(dev);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+ usleep_range(1000, 2000);
+
+ mt7603_irq_disable(dev, mask);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+
+ mt7603_pse_client_reset(dev);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_queue_rx_reset(dev, i);
+
+ mt7603_dma_sched_reset(dev);
+
+ mt7603_mac_dma_start(dev);
+
+ mt7603_irq_enable(dev, mask);
+
+skip_dma_reset:
+ clear_bit(MT76_RESET, &dev->mt76.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ tasklet_enable(&dev->tx_tasklet);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ mt7603_beacon_set_timer(dev, -1, beacon_int);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ ieee80211_wake_queues(dev->mt76.hw);
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
+{
+ u32 val;
+
+ mt76_wr(dev, MT_WPDMA_DEBUG,
+ FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) |
+ MT_WPDMA_DEBUG_SEL);
+
+ val = mt76_rr(dev, MT_WPDMA_DEBUG);
+ return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val);
+}
+
+static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev)
+{
+ if (is_mt7628(dev))
+ return mt7603_dma_debug(dev, 9) & BIT(9);
+
+ return mt7603_dma_debug(dev, 2) & BIT(8);
+}
+
+static bool mt7603_rx_dma_busy(struct mt7603_dev *dev)
+{
+ if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return false;
+
+ return mt7603_rx_fifo_busy(dev);
+}
+
+static bool mt7603_tx_dma_busy(struct mt7603_dev *dev)
+{
+ u32 val;
+
+ if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY))
+ return false;
+
+ val = mt7603_dma_debug(dev, 9);
+ return (val & BIT(8)) && (val & 0xf) != 0xf;
+}
+
+static bool mt7603_tx_hang(struct mt7603_dev *dev)
+{
+ struct mt76_queue *q;
+ u32 dma_idx, prev_dma_idx;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ q = &dev->mt76.q_tx[i];
+
+ if (!q->queued)
+ continue;
+
+ prev_dma_idx = dev->tx_dma_idx[i];
+ dma_idx = ioread32(&q->regs->dma_idx);
+ dev->tx_dma_idx[i] = dma_idx;
+
+ if (dma_idx == prev_dma_idx &&
+ dma_idx != ioread32(&q->regs->cpu_idx))
+ break;
+ }
+
+ return i < 4;
+}
+
+static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+{
+ u32 addr, val;
+
+ if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
+ return true;
+
+ if (mt7603_rx_fifo_busy(dev))
+ return false;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+ mt76_wr(dev, addr, 3);
+ val = mt76_rr(dev, addr) >> 16;
+
+ if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
+ return true;
+
+ return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
+}
+
+static bool
+mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter,
+ enum mt7603_reset_cause cause,
+ bool (*check)(struct mt7603_dev *dev))
+{
+ if (dev->reset_test == cause + 1) {
+ dev->reset_test = 0;
+ goto trigger;
+ }
+
+ if (check) {
+ if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) {
+ *counter = 0;
+ return false;
+ }
+
+ (*counter)++;
+ }
+
+ if (*counter < MT7603_WATCHDOG_TIMEOUT)
+ return false;
+trigger:
+ dev->cur_reset_cause = cause;
+ dev->reset_cause[cause]++;
+ return true;
+}
+
+void mt7603_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt76_channel_state *state;
+ ktime_t cur_time;
+ u32 busy;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return;
+
+ state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+ busy = mt76_rr(dev, MT_MIB_STAT_PSCCA);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ cur_time = ktime_get_boottime();
+ state->cc_busy += busy;
+ state->cc_active += ktime_to_us(ktime_sub(cur_time, dev->survey_time));
+ dev->survey_time = cur_time;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void
+mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val)
+{
+ u32 rxtd_6 = 0xd7c80000;
+
+ if (val == dev->ed_strict_mode)
+ return;
+
+ dev->ed_strict_mode = val;
+
+ /* Ensure that ED/CCA does not trigger if disabled */
+ if (!dev->ed_monitor)
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34);
+ else
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d);
+
+ if (dev->ed_monitor && !dev->ed_strict_mode)
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f);
+ else
+ rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10);
+
+ mt76_wr(dev, MT_RXTD(6), rxtd_6);
+
+ mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN,
+ dev->ed_monitor && !dev->ed_strict_mode);
+}
+
+static void
+mt7603_edcca_check(struct mt7603_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_AGC(41));
+ ktime_t cur_time;
+ int rssi0, rssi1;
+ u32 active;
+ u32 ed_busy;
+
+ if (!dev->ed_monitor)
+ return;
+
+ rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val);
+ if (rssi0 > 128)
+ rssi0 -= 256;
+
+ rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
+ if (rssi1 > 128)
+ rssi1 -= 256;
+
+ if (max(rssi0, rssi1) >= -40 &&
+ dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
+ dev->ed_strong_signal++;
+ else if (dev->ed_strong_signal > 0)
+ dev->ed_strong_signal--;
+
+ cur_time = ktime_get_boottime();
+ ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK;
+
+ active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
+ dev->ed_time = cur_time;
+
+ if (!active)
+ return;
+
+ if (100 * ed_busy / active > 90) {
+ if (dev->ed_trigger < 0)
+ dev->ed_trigger = 0;
+ dev->ed_trigger++;
+ } else {
+ if (dev->ed_trigger > 0)
+ dev->ed_trigger = 0;
+ dev->ed_trigger--;
+ }
+
+ if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH ||
+ dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) {
+ mt7603_edcca_set_strict(dev, true);
+ } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) {
+ mt7603_edcca_set_strict(dev, false);
+ }
+
+ if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH)
+ dev->ed_trigger = MT7603_EDCCA_BLOCK_TH;
+ else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH)
+ dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH;
+}
+
+void mt7603_cca_stats_reset(struct mt7603_dev *dev)
+{
+ mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
+ mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
+ mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN);
+}
+
+static void
+mt7603_adjust_sensitivity(struct mt7603_dev *dev)
+{
+ u32 agc0 = dev->agc0, agc3 = dev->agc3;
+ u32 adj;
+
+ if (!dev->sensitivity || dev->sensitivity < -100) {
+ dev->sensitivity = 0;
+ } else if (dev->sensitivity <= -84) {
+ adj = 7 + (dev->sensitivity + 92) / 2;
+
+ agc0 = 0x56f0076f;
+ agc0 |= adj << 12;
+ agc0 |= adj << 16;
+ agc3 = 0x81d0d5e3;
+ } else if (dev->sensitivity <= -72) {
+ adj = 7 + (dev->sensitivity + 80) / 2;
+
+ agc0 = 0x6af0006f;
+ agc0 |= adj << 8;
+ agc0 |= adj << 12;
+ agc0 |= adj << 16;
+
+ agc3 = 0x8181d5e3;
+ } else {
+ if (dev->sensitivity > -54)
+ dev->sensitivity = -54;
+
+ adj = 7 + (dev->sensitivity + 80) / 2;
+
+ agc0 = 0x7ff0000f;
+ agc0 |= adj << 4;
+ agc0 |= adj << 8;
+ agc0 |= adj << 12;
+ agc0 |= adj << 16;
+
+ agc3 = 0x818181e3;
+ }
+
+ mt76_wr(dev, MT_AGC(0), agc0);
+ mt76_wr(dev, MT_AGC1(0), agc0);
+
+ mt76_wr(dev, MT_AGC(3), agc3);
+ mt76_wr(dev, MT_AGC1(3), agc3);
+}
+
+static void
+mt7603_false_cca_check(struct mt7603_dev *dev)
+{
+ int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm;
+ int false_cca;
+ int min_signal;
+ u32 val;
+
+ val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
+ pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
+ pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
+
+ val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY);
+ mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val);
+ mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val);
+
+ dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+ dev->false_cca_cck = pd_cck - mdrdy_cck;
+
+ mt7603_cca_stats_reset(dev);
+
+ min_signal = mt76_get_min_avg_rssi(&dev->mt76);
+ if (!min_signal) {
+ dev->sensitivity = 0;
+ dev->last_cca_adj = jiffies;
+ goto out;
+ }
+
+ min_signal -= 15;
+
+ false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
+ if (false_cca > 600) {
+ if (!dev->sensitivity)
+ dev->sensitivity = -92;
+ else
+ dev->sensitivity += 2;
+ dev->last_cca_adj = jiffies;
+ } else if (false_cca < 100 ||
+ time_after(jiffies, dev->last_cca_adj + 10 * HZ)) {
+ dev->last_cca_adj = jiffies;
+ if (!dev->sensitivity)
+ goto out;
+
+ dev->sensitivity -= 2;
+ }
+
+ if (dev->sensitivity && dev->sensitivity > min_signal) {
+ dev->sensitivity = min_signal;
+ dev->last_cca_adj = jiffies;
+ }
+
+out:
+ mt7603_adjust_sensitivity(dev);
+}
+
+void mt7603_mac_work(struct work_struct *work)
+{
+ struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
+ mac_work.work);
+ bool reset = false;
+
+ mt76_tx_status_check(&dev->mt76, NULL, false);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->mac_work_count++;
+ mt7603_update_channel(&dev->mt76);
+ mt7603_edcca_check(dev);
+
+ if (dev->mac_work_count == 10)
+ mt7603_false_cca_check(dev);
+
+ if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
+ RESET_CAUSE_RX_PSE_BUSY,
+ mt7603_rx_pse_busy) ||
+ mt7603_watchdog_check(dev, &dev->beacon_check,
+ RESET_CAUSE_BEACON_STUCK,
+ NULL) ||
+ mt7603_watchdog_check(dev, &dev->tx_hang_check,
+ RESET_CAUSE_TX_HANG,
+ mt7603_tx_hang) ||
+ mt7603_watchdog_check(dev, &dev->tx_dma_check,
+ RESET_CAUSE_TX_BUSY,
+ mt7603_tx_dma_busy) ||
+ mt7603_watchdog_check(dev, &dev->rx_dma_check,
+ RESET_CAUSE_RX_BUSY,
+ mt7603_rx_dma_busy) ||
+ mt7603_watchdog_check(dev, &dev->mcu_hang,
+ RESET_CAUSE_MCU_HANG,
+ NULL) ||
+ dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
+ dev->beacon_check = 0;
+ dev->tx_dma_check = 0;
+ dev->tx_hang_check = 0;
+ dev->rx_dma_check = 0;
+ dev->rx_pse_check = 0;
+ dev->mcu_hang = 0;
+ dev->rx_dma_idx = ~0;
+ memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
+ reset = true;
+ dev->mac_work_count = 0;
+ }
+
+ if (dev->mac_work_count >= 10)
+ dev->mac_work_count = 0;
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (reset)
+ mt7603_mac_watchdog_reset(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ msecs_to_jiffies(MT7603_WATCHDOG_TIME));
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.h b/drivers/net/wireless/mediatek/mt76/mt7603/mac.h
new file mode 100644
index 000000000000..17e34ecf2bfb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_MAC_H
+#define __MT7603_MAC_H
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
+#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
+#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
+#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS = 0,
+ PKT_TYPE_TXRXV = 1,
+ PKT_TYPE_NORMAL = 2,
+ PKT_TYPE_RX_DUP_RFB = 3,
+ PKT_TYPE_RX_TMR = 4,
+ PKT_TYPE_RETRIEVE = 5,
+ PKT_TYPE_RX_EVENT = 7,
+};
+
+#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
+#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
+#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
+#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
+#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
+#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
+#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
+#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
+#define MT_RXD1_NORMAL_BCAST BIT(3)
+#define MT_RXD1_NORMAL_MCAST BIT(2)
+#define MT_RXD1_NORMAL_U2M BIT(1)
+#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
+
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
+#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_UDF_VALID BIT(26)
+#define MT_RXD2_NORMAL_LLC_MIS BIT(25)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
+#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
+#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
+#define MT_RXD2_NORMAL_CLM BIT(19)
+#define MT_RXD2_NORMAL_CM BIT(18)
+#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
+#define MT_RXD2_NORMAL_SW_BIT BIT(16)
+#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
+#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
+#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
+
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD3_NORMAL_CLS BIT(10)
+#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+
+#define MT_RXV1_VHTA1_B5_B4 GENMASK(31, 30)
+#define MT_RXV1_VHTA2_B8_B1 GENMASK(29, 22)
+#define MT_RXV1_HT_NO_SOUND BIT(21)
+#define MT_RXV1_HT_SMOOTH BIT(20)
+#define MT_RXV1_HT_SHORT_GI BIT(19)
+#define MT_RXV1_HT_AGGR BIT(18)
+#define MT_RXV1_VHTA1_B22 BIT(17)
+#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
+#define MT_RXV1_TX_MODE GENMASK(14, 12)
+#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
+#define MT_RXV1_HT_AD_CODE BIT(9)
+#define MT_RXV1_HT_STBC GENMASK(8, 7)
+#define MT_RXV1_TX_RATE GENMASK(6, 0)
+
+#define MT_RXV2_VHTA1_B16_B6 GENMASK(31, 21)
+#define MT_RXV2_LENGTH GENMASK(20, 0)
+
+#define MT_RXV3_F_AGC1_CAL_GAIN GENMASK(31, 29)
+#define MT_RXV3_F_AGC1_EQ_CAL BIT(28)
+#define MT_RXV3_RCPI1 GENMASK(27, 20)
+#define MT_RXV3_F_AGC0_CAL_GAIN GENMASK(19, 17)
+#define MT_RXV3_F_AGC0_EQ_CAL BIT(16)
+#define MT_RXV3_RCPI0 GENMASK(15, 8)
+#define MT_RXV3_SEL_ANT BIT(7)
+#define MT_RXV3_ACI_DET_X BIT(6)
+#define MT_RXV3_OFDM_FREQ_TRANS_DETECT BIT(5)
+#define MT_RXV3_VHTA1_B21_B17 GENMASK(4, 0)
+
+#define MT_RXV4_F_AGC_CAL_GAIN GENMASK(31, 29)
+#define MT_RXV4_F_AGC2_EQ_CAL BIT(28)
+#define MT_RXV4_IB_RSSI1 GENMASK(27, 20)
+#define MT_RXV4_F_AGC_LPF_GAIN_X GENMASK(19, 16)
+#define MT_RXV4_WB_RSSI_X GENMASK(15, 8)
+#define MT_RXV4_IB_RSSI0 GENMASK(7, 0)
+
+#define MT_RXV5_LTF_SNR0 GENMASK(31, 26)
+#define MT_RXV5_LTF_PROC_TIME GENMASK(25, 19)
+#define MT_RXV5_FOE GENMASK(18, 7)
+#define MT_RXV5_C_AGC_SATE GENMASK(6, 4)
+#define MT_RXV5_F_AGC_LNA_GAIN_0 GENMASK(3, 2)
+#define MT_RXV5_F_AGC_LNA_GAIN_1 GENMASK(1, 0)
+
+#define MT_RXV6_C_AGC_STATE GENMASK(30, 28)
+#define MT_RXV6_NS_TS_FIELD GENMASK(27, 25)
+#define MT_RXV6_RX_VALID BIT(24)
+#define MT_RXV6_NF2 GENMASK(23, 16)
+#define MT_RXV6_NF1 GENMASK(15, 8)
+#define MT_RXV6_NF0 GENMASK(7, 0)
+
+enum mt7603_tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_P_IDX BIT(31)
+#define MT_TXD0_Q_IDX GENMASK(30, 27)
+#define MT_TXD0_UTXB BIT(26)
+#define MT_TXD0_UNXV BIT(25)
+#define MT_TXD0_UDP_TCP_SUM BIT(24)
+#define MT_TXD0_IP_SUM BIT(23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_OWN_MAC GENMASK(31, 26)
+#define MT_TXD1_PROTECTED BIT(23)
+#define MT_TXD1_TID GENMASK(22, 20)
+#define MT_TXD1_NO_ACK BIT(19)
+#define MT_TXD1_HDR_PAD GENMASK(18, 16)
+#define MT_TXD1_LONG_FORMAT BIT(15)
+#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
+#define MT_TXD1_HDR_INFO GENMASK(12, 8)
+#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_TIMING_MEASURE BIT(30)
+#define MT_TXD2_BA_DISABLE BIT(29)
+#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_SW_POWER_MGMT BIT(13)
+#define MT_TXD5_BA_SEQ_CTRL BIT(12)
+#define MT_TXD5_DA_SELECT BIT(11)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_SGI BIT(31)
+#define MT_TXD6_LDPC BIT(30)
+#define MT_TXD6_TX_RATE GENMASK(29, 18)
+#define MT_TXD6_I_TXBF BIT(17)
+#define MT_TXD6_E_TXBF BIT(16)
+#define MT_TXD6_DYN_BW BIT(15)
+#define MT_TXD6_ANT_PRI GENMASK(14, 12)
+#define MT_TXD6_SPE_EN BIT(11)
+#define MT_TXD6_FIXED_BW BIT(10)
+#define MT_TXD6_BW GENMASK(9, 8)
+#define MT_TXD6_ANT_ID GENMASK(7, 2)
+#define MT_TXD6_FIXED_RATE BIT(0)
+
+#define MT_TX_RATE_STBC BIT(11)
+#define MT_TX_RATE_NSS GENMASK(10, 9)
+#define MT_TX_RATE_MODE GENMASK(8, 6)
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXS0_ANTENNA GENMASK(31, 26)
+#define MT_TXS0_TID GENMASK(25, 22)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TXS_FORMAT BIT(13)
+#define MT_TXS0_FIXED_RATE BIT(12)
+#define MT_TXS0_TX_RATE GENMASK(11, 0)
+
+#define MT_TXS1_F0_TIMESTAMP GENMASK(31, 0)
+#define MT_TXS1_F1_NOISE_2 GENMASK(23, 16)
+#define MT_TXS1_F1_NOISE_1 GENMASK(15, 8)
+#define MT_TXS1_F1_NOISE_0 GENMASK(7, 0)
+
+#define MT_TXS2_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS2_F1_RCPI_2 GENMASK(23, 16)
+#define MT_TXS2_F1_RCPI_1 GENMASK(15, 8)
+#define MT_TXS2_F1_RCPI_0 GENMASK(7, 0)
+
+#define MT_TXS3_WCID GENMASK(31, 24)
+#define MT_TXS3_RXV_SEQNO GENMASK(23, 16)
+#define MT_TXS3_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS4_LAST_TX_RATE GENMASK(31, 29)
+#define MT_TXS4_TX_COUNT GENMASK(28, 24)
+#define MT_TXS4_AMPDU BIT(23)
+#define MT_TXS4_ACKED_MPDU BIT(22)
+#define MT_TXS4_PID GENMASK(21, 14)
+#define MT_TXS4_BW GENMASK(13, 12)
+#define MT_TXS4_F0_SEQNO GENMASK(11, 0)
+#define MT_TXS4_F1_TSSI GENMASK(11, 0)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
new file mode 100644
index 000000000000..b10775ed92e6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -0,0 +1,709 @@
+/* SPDX-License-Identifier: ISC */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7603.h"
+#include "eeprom.h"
+
+static int
+mt7603_start(struct ieee80211_hw *hw)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ mt7603_mac_start(dev);
+ dev->survey_time = ktime_get_boottime();
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt7603_mac_work(&dev->mac_work.work);
+
+ return 0;
+}
+
+static void
+mt7603_stop(struct ieee80211_hw *hw)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt7603_mac_stop(dev);
+}
+
+static int
+mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_dev *dev = hw->priv;
+ struct mt76_txq *mtxq;
+ u8 bc_addr[ETH_ALEN];
+ int idx;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mvif->idx = ffs(~dev->vif_mask) - 1;
+ if (mvif->idx >= MT7603_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR0(mvif->idx),
+ get_unaligned_le32(vif->addr));
+ mt76_wr(dev, MT_MAC_ADDR1(mvif->idx),
+ (get_unaligned_le16(vif->addr + 4) |
+ MT_MAC_ADDR1_VALID));
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ mt76_wr(dev, MT_BSSID0(mvif->idx),
+ get_unaligned_le32(vif->addr));
+ mt76_wr(dev, MT_BSSID1(mvif->idx),
+ (get_unaligned_le16(vif->addr + 4) |
+ MT_BSSID1_VALID));
+ }
+
+ idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
+ dev->vif_mask |= BIT(mvif->idx);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+
+ eth_broadcast_addr(bc_addr);
+ mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
+
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ mt76_txq_init(&dev->mt76, vif->txq);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void
+mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_dev *dev = hw->priv;
+ int idx = mvif->sta.wcid.idx;
+
+ mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0);
+ mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0);
+ mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
+ mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
+ mt7603_beacon_set_timer(dev, mvif->idx, 0);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ mt76_txq_remove(&dev->mt76, vif->txq);
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->vif_mask &= ~BIT(mvif->idx);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7603_init_edcca(struct mt7603_dev *dev)
+{
+ /* Set lower signal level to -65dBm */
+ mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23);
+
+ /* clear previous energy detect monitor results */
+ mt76_rr(dev, MT_MIB_STAT_ED);
+
+ if (dev->ed_monitor)
+ mt76_set(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
+ else
+ mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME);
+
+ dev->ed_strict_mode = 0xff;
+ dev->ed_strong_signal = 0;
+ dev->ed_time = ktime_get_boottime();
+
+ mt7603_edcca_set_strict(dev, false);
+}
+
+static int
+mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
+{
+ u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
+ int idx, ret;
+ u8 bw = MT_BW_20;
+ bool failed = false;
+
+ cancel_delayed_work_sync(&dev->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_set_channel(&dev->mt76);
+ mt7603_mac_stop(dev);
+
+ if (def->width == NL80211_CHAN_WIDTH_40)
+ bw = MT_BW_40;
+
+ dev->mt76.chandef = *def;
+ mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
+ ret = mt7603_mcu_set_channel(dev);
+ if (ret) {
+ failed = true;
+ goto out;
+ }
+
+ if (def->chan->band == NL80211_BAND_5GHZ) {
+ idx = 1;
+ rssi_data += MT_EE_RSSI_OFFSET_5G;
+ } else {
+ idx = 0;
+ rssi_data += MT_EE_RSSI_OFFSET_2G;
+ }
+
+ memcpy(dev->rssi_offset, rssi_data, sizeof(dev->rssi_offset));
+
+ idx |= (def->chan -
+ mt76_hw(dev)->wiphy->bands[def->chan->band]->channels) << 1;
+ mt76_wr(dev, MT_WF_RMAC_CH_FREQ, idx);
+ mt7603_mac_set_timing(dev);
+ mt7603_mac_start(dev);
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_txq_schedule_all(&dev->mt76);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ MT7603_WATCHDOG_TIME);
+
+ /* reset channel stats */
+ mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS);
+ mt76_set(dev, MT_MIB_CTL,
+ MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME);
+ mt76_rr(dev, MT_MIB_STAT_PSCCA);
+ mt7603_cca_stats_reset(dev);
+
+ dev->survey_time = ktime_get_boottime();
+
+ mt7603_init_edcca(dev);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (failed)
+ mt7603_mac_work(&dev->mac_work.work);
+
+ return ret;
+}
+
+static int
+mt7603_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7603_dev *dev = hw->priv;
+ int ret = 0;
+
+ if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER))
+ ret = mt7603_set_channel(dev, &hw->conf.chandef);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ dev->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
+
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ return ret;
+}
+
+static void
+mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt7603_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR, dev->rxfilter);
+}
+
+static void
+mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) {
+ if (info->assoc || info->ibss_joined) {
+ mt76_wr(dev, MT_BSSID0(mvif->idx),
+ get_unaligned_le32(info->bssid));
+ mt76_wr(dev, MT_BSSID1(mvif->idx),
+ (get_unaligned_le16(info->bssid + 4) |
+ MT_BSSID1_VALID));
+ } else {
+ mt76_wr(dev, MT_BSSID0(mvif->idx), 0);
+ mt76_wr(dev, MT_BSSID1(mvif->idx), 0);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != dev->slottime) {
+ dev->slottime = slottime;
+ mt7603_mac_set_timing(dev);
+ }
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
+ int beacon_int = !!info->enable_beacon * info->beacon_int;
+
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int
+mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ int idx;
+ int ret = 0;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7603_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ __skb_queue_head_init(&msta->psq);
+ msta->ps = ~0;
+ msta->smps = ~0;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr);
+ mt7603_wtbl_set_ps(dev, msta, false);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ return ret;
+}
+
+void
+mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_wtbl_update_cap(dev, sta);
+}
+
+void
+mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ spin_lock_bh(&dev->ps_lock);
+ __skb_queue_purge(&msta->psq);
+ mt7603_filter_tx(dev, wcid->idx, true);
+ spin_unlock_bh(&dev->ps_lock);
+
+ mt7603_wtbl_clear(dev, wcid->idx);
+}
+
+static void
+mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL)
+ mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb),
+ skb, 0);
+}
+
+void
+mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct sk_buff_head list;
+
+ mt76_stop_tx_queues(&dev->mt76, sta, false);
+ mt7603_wtbl_set_ps(dev, msta, ps);
+ if (ps)
+ return;
+
+ __skb_queue_head_init(&list);
+
+ spin_lock_bh(&dev->ps_lock);
+ skb_queue_splice_tail_init(&msta->psq, &list);
+ spin_unlock_bh(&dev->ps_lock);
+
+ mt7603_ps_tx_list(dev, &list);
+}
+
+static void
+mt7603_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct sk_buff_head list;
+ struct sk_buff *skb, *tmp;
+
+ __skb_queue_head_init(&list);
+
+ spin_lock_bh(&dev->ps_lock);
+ skb_queue_walk_safe(&msta->psq, skb, tmp) {
+ if (!nframes)
+ break;
+
+ if (!(tids & BIT(skb->priority)))
+ continue;
+
+ skb_set_queue_mapping(skb, MT_TXQ_PSD);
+ __skb_unlink(skb, &msta->psq);
+ __skb_queue_tail(&list, skb);
+ nframes--;
+ }
+ spin_unlock_bh(&dev->ps_lock);
+
+ mt7603_ps_tx_list(dev, &list);
+
+ if (nframes)
+ mt76_release_buffered_frames(hw, sta, tids, nframes, reason,
+ more_data);
+}
+
+static int
+mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct mt7603_sta *msta = sta ? (struct mt7603_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ return mt7603_wtbl_set_key(dev, wcid->idx, key);
+}
+
+static int
+mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7603_dev *dev = hw->priv;
+ u16 cw_min = (1 << 5) - 1;
+ u16 cw_max = (1 << 10) - 1;
+ u32 val;
+
+ queue = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = params->cw_min;
+ if (params->cw_max)
+ cw_max = params->cw_max;
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7603_mac_stop(dev);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(queue));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_TXOP(queue), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX(queue));
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
+ mt76_wr(dev, MT_WMM_CWMAX(queue), val);
+
+ mt7603_mac_start(dev);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void
+mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+ mt7603_beacon_set_timer(dev, -1, 0);
+}
+
+static void
+mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+ mt7603_beacon_set_timer(dev, -1, dev->beacon_int);
+}
+
+static void
+mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+}
+
+static int
+mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7603_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 ba_size = params->buf_size;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn,
+ params->buf_size);
+ mt7603_mac_rx_ba_reset(dev, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7603_dev *dev = hw->priv;
+ struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
+ struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
+ int i;
+
+ spin_lock_bh(&dev->mt76.lock);
+ for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
+ msta->rates[i].idx = sta_rates->rate[i].idx;
+ msta->rates[i].count = sta_rates->rate[i].count;
+ msta->rates[i].flags = sta_rates->rate[i].flags;
+
+ if (msta->rates[i].idx < 0 || !msta->rates[i].count)
+ break;
+ }
+ msta->n_rates = i;
+ mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates);
+ msta->rate_probe = false;
+ mt7603_wtbl_set_smps(dev, msta,
+ sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+ spin_unlock_bh(&dev->mt76.lock);
+}
+
+static void
+mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7603_dev *dev = hw->priv;
+
+ dev->coverage_class = coverage_class;
+ mt7603_mac_set_timing(dev);
+}
+
+static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7603_dev *dev = hw->priv;
+ struct mt76_wcid *wcid = &dev->global_sta.wcid;
+
+ if (control->sta) {
+ struct mt7603_sta *msta;
+
+ msta = (struct mt7603_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif) {
+ struct mt7603_vif *mvif;
+
+ mvif = (struct mt7603_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+static int
+mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ return 0;
+}
+
+const struct ieee80211_ops mt7603_ops = {
+ .tx = mt7603_tx,
+ .start = mt7603_start,
+ .stop = mt7603_stop,
+ .add_interface = mt7603_add_interface,
+ .remove_interface = mt7603_remove_interface,
+ .config = mt7603_config,
+ .configure_filter = mt7603_configure_filter,
+ .bss_info_changed = mt7603_bss_info_changed,
+ .sta_state = mt76_sta_state,
+ .set_key = mt7603_set_key,
+ .conf_tx = mt7603_conf_tx,
+ .sw_scan_start = mt7603_sw_scan,
+ .sw_scan_complete = mt7603_sw_scan_complete,
+ .flush = mt7603_flush,
+ .ampdu_action = mt7603_ampdu_action,
+ .get_txpower = mt76_get_txpower,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
+ .release_buffered_frames = mt7603_release_buffered_frames,
+ .set_coverage_class = mt7603_set_coverage_class,
+ .set_tim = mt7603_set_tim,
+ .get_survey = mt76_get_survey,
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int __init mt7603_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mt76_wmac_driver);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_PCI
+ ret = pci_register_driver(&mt7603_pci_driver);
+ if (ret)
+ platform_driver_unregister(&mt76_wmac_driver);
+#endif
+ return ret;
+}
+
+static void __exit mt7603_exit(void)
+{
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&mt7603_pci_driver);
+#endif
+ platform_driver_unregister(&mt76_wmac_driver);
+}
+
+module_init(mt7603_init);
+module_exit(mt7603_exit);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
new file mode 100644
index 000000000000..4b0713f1fd5e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -0,0 +1,483 @@
+/* SPDX-License-Identifier: ISC */
+
+#include <linux/firmware.h>
+#include "mt7603.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+#define MCU_SKB_RESERVE 8
+
+struct mt7603_fw_trailer {
+ char fw_ver[10];
+ char build_date[15];
+ __le32 dl_len;
+} __packed;
+
+static int
+__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
+ int query, int *wait_seq)
+{
+ int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7603_mcu_txd *txd;
+ u8 seq;
+
+ if (!skb)
+ return -EINVAL;
+
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+
+ txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
+ memset(txd, 0, hdrlen);
+
+ txd->len = cpu_to_le16(skb->len);
+ if (cmd == -MCU_CMD_FW_SCATTER)
+ txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE_FW);
+ else
+ txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE);
+ txd->pkt_type = MCU_PKT_ID;
+ txd->seq = seq;
+
+ if (cmd < 0) {
+ txd->cid = -cmd;
+ } else {
+ txd->cid = MCU_CMD_EXT_CID;
+ txd->ext_cid = cmd;
+ if (query != MCU_Q_NA)
+ txd->ext_cid_ack = 1;
+ }
+
+ txd->set_query = query;
+
+ if (wait_seq)
+ *wait_seq = seq;
+
+ return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
+}
+
+static int
+mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
+ int query)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long expires = jiffies + 3 * HZ;
+ struct mt7603_mcu_rxd *rxd;
+ int ret, seq;
+
+ mutex_lock(&mdev->mmio.mcu.mutex);
+
+ ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq);
+ if (ret)
+ goto out;
+
+ while (1) {
+ bool check_seq = false;
+
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(mdev->dev,
+ "MCU message %d (seq %d) timed out\n",
+ cmd, seq);
+ dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxd = (struct mt7603_mcu_rxd *)skb->data;
+ if (seq == rxd->seq)
+ check_seq = true;
+
+ dev_kfree_skb(skb);
+
+ if (check_seq)
+ break;
+ }
+
+out:
+ mutex_unlock(&mdev->mmio.mcu.mutex);
+
+ return ret;
+}
+
+static int
+mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(BIT(31)),
+ };
+ struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ MCU_Q_NA);
+}
+
+static int
+mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+ int ret = 0;
+
+ while (len > 0) {
+ int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
+ len);
+
+ skb = mt7603_mcu_msg_alloc(data, cur_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
+ MCU_Q_NA, NULL);
+ if (ret)
+ break;
+
+ data += cur_len;
+ len -= cur_len;
+ }
+
+ return ret;
+}
+
+static int
+mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
+{
+ struct {
+ __le32 override;
+ __le32 addr;
+ } req = {
+ .override = cpu_to_le32(addr ? 1 : 0),
+ .addr = cpu_to_le32(addr),
+ };
+ struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
+ MCU_Q_NA);
+}
+
+static int
+mt7603_mcu_restart(struct mt7603_dev *dev)
+{
+ struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0);
+
+ return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
+ MCU_Q_NA);
+}
+
+static int
+mt7603_load_firmware(struct mt7603_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt7603_fw_trailer *hdr;
+ const char *firmware;
+ int dl_len;
+ u32 addr, val;
+ int ret;
+
+ if (is_mt7628(dev)) {
+ if (mt76xx_rev(dev) == MT7628_REV_E1)
+ firmware = MT7628_FIRMWARE_E1;
+ else
+ firmware = MT7628_FIRMWARE_E2;
+ } else {
+ if (mt76xx_rev(dev) < MT7603_REV_E2)
+ firmware = MT7603_FIRMWARE_E1;
+ else
+ firmware = MT7603_FIRMWARE_E2;
+ }
+
+ ret = request_firmware(&fw, firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7603_fw_trailer *)(fw->data + fw->size -
+ sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "Firmware Version: %.10s\n", hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Build Time: %.15s\n", hdr->build_date);
+
+ addr = mt7603_reg_map(dev, 0x50012498);
+ mt76_wr(dev, addr, 0x5);
+ mt76_wr(dev, addr, 0x5);
+ udelay(1);
+
+ /* switch to bypass mode */
+ mt76_rmw(dev, MT_SCH_4, MT_SCH_4_FORCE_QID,
+ MT_SCH_4_BYPASS | FIELD_PREP(MT_SCH_4_FORCE_QID, 5));
+
+ val = mt76_rr(dev, MT_TOP_MISC2);
+ if (val & BIT(1)) {
+ dev_info(dev->mt76.dev, "Firmware already running...\n");
+ goto running;
+ }
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(0) | BIT(1), BIT(0), 500)) {
+ dev_err(dev->mt76.dev, "Timeout waiting for ROM code to become ready\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ dl_len = le32_to_cpu(hdr->dl_len) + 4;
+ ret = mt7603_mcu_init_download(dev, MCU_FIRMWARE_ADDRESS, dl_len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ ret = mt7603_mcu_start_firmware(dev, MCU_FIRMWARE_ADDRESS);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start firmware\n");
+ goto out;
+ }
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(1), BIT(1), 500)) {
+ dev_err(dev->mt76.dev, "Timeout waiting for firmware to initialize\n");
+ ret = -EIO;
+ goto out;
+ }
+
+running:
+ mt76_clear(dev, MT_SCH_4, MT_SCH_4_FORCE_QID | MT_SCH_4_BYPASS);
+
+ mt76_set(dev, MT_SCH_4, BIT(8));
+ mt76_clear(dev, MT_SCH_4, BIT(8));
+
+ dev->mcu_running = true;
+ dev_info(dev->mt76.dev, "firmware init done\n");
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+int mt7603_mcu_init(struct mt7603_dev *dev)
+{
+ mutex_init(&dev->mt76.mmio.mcu.mutex);
+
+ return mt7603_load_firmware(dev);
+}
+
+void mt7603_mcu_exit(struct mt7603_dev *dev)
+{
+ mt7603_mcu_restart(dev);
+ skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+}
+
+int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
+{
+ static const u16 req_fields[] = {
+#define WORD(_start) \
+ _start, \
+ _start + 1
+#define GROUP_2G(_start) \
+ WORD(_start), \
+ WORD(_start + 2), \
+ WORD(_start + 4)
+
+ MT_EE_NIC_CONF_0 + 1,
+ WORD(MT_EE_NIC_CONF_1),
+ MT_EE_WIFI_RF_SETTING,
+ MT_EE_TX_POWER_DELTA_BW40,
+ MT_EE_TX_POWER_DELTA_BW80 + 1,
+ MT_EE_TX_POWER_EXT_PA_5G,
+ MT_EE_TEMP_SENSOR_CAL,
+ GROUP_2G(MT_EE_TX_POWER_0_START_2G),
+ GROUP_2G(MT_EE_TX_POWER_1_START_2G),
+ WORD(MT_EE_TX_POWER_CCK),
+ WORD(MT_EE_TX_POWER_OFDM_2G_6M),
+ WORD(MT_EE_TX_POWER_OFDM_2G_24M),
+ WORD(MT_EE_TX_POWER_OFDM_2G_54M),
+ WORD(MT_EE_TX_POWER_HT_BPSK_QPSK),
+ WORD(MT_EE_TX_POWER_HT_16_64_QAM),
+ WORD(MT_EE_TX_POWER_HT_64_QAM),
+ MT_EE_ELAN_RX_MODE_GAIN,
+ MT_EE_ELAN_RX_MODE_NF,
+ MT_EE_ELAN_RX_MODE_P1DB,
+ MT_EE_ELAN_BYPASS_MODE_GAIN,
+ MT_EE_ELAN_BYPASS_MODE_NF,
+ MT_EE_ELAN_BYPASS_MODE_P1DB,
+ WORD(MT_EE_STEP_NUM_NEG_6_7),
+ WORD(MT_EE_STEP_NUM_NEG_4_5),
+ WORD(MT_EE_STEP_NUM_NEG_2_3),
+ WORD(MT_EE_STEP_NUM_NEG_0_1),
+ WORD(MT_EE_REF_STEP_24G),
+ WORD(MT_EE_STEP_NUM_PLUS_1_2),
+ WORD(MT_EE_STEP_NUM_PLUS_3_4),
+ WORD(MT_EE_STEP_NUM_PLUS_5_6),
+ MT_EE_STEP_NUM_PLUS_7,
+ MT_EE_XTAL_FREQ_OFFSET,
+ MT_EE_XTAL_TRIM_2_COMP,
+ MT_EE_XTAL_TRIM_3_COMP,
+ MT_EE_XTAL_WF_RFCAL,
+
+ /* unknown fields below */
+ WORD(0x24),
+ 0x34,
+ 0x39,
+ 0x3b,
+ WORD(0x42),
+ WORD(0x9e),
+ 0xf2,
+ WORD(0xf8),
+ 0xfa,
+ 0x12e,
+ WORD(0x130), WORD(0x132), WORD(0x134), WORD(0x136),
+ WORD(0x138), WORD(0x13a), WORD(0x13c), WORD(0x13e),
+
+#undef GROUP_2G
+#undef WORD
+
+ };
+ struct req_data {
+ u16 addr;
+ u8 val;
+ u8 pad;
+ } __packed;
+ struct {
+ u8 buffer_mode;
+ u8 len;
+ u8 pad[2];
+ } req_hdr = {
+ .buffer_mode = 1,
+ .len = ARRAY_SIZE(req_fields) - 1,
+ };
+ struct sk_buff *skb;
+ struct req_data *data;
+ const int size = 0xff * sizeof(struct req_data);
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
+
+ skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+ data = (struct req_data *)skb_put(skb, size);
+ memset(data, 0, size);
+
+ for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
+ data[i].addr = cpu_to_le16(req_fields[i]);
+ data[i].val = eep[req_fields[i]];
+ data[i].pad = 0;
+ }
+
+ return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ MCU_Q_SET);
+}
+
+static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
+{
+ struct {
+ u8 center_channel;
+ u8 tssi;
+ u8 temp_comp;
+ u8 target_power[2];
+ u8 rate_power_delta[14];
+ u8 bw_power_delta;
+ u8 ch_power_delta[6];
+ u8 temp_comp_power[17];
+ u8 reserved;
+ } req = {
+ .center_channel = dev->mt76.chandef.chan->hw_value,
+#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n]
+ .tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1),
+ .temp_comp = EEP_VAL(MT_EE_NIC_CONF_1),
+ .target_power = {
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 2),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 2)
+ },
+ .bw_power_delta = EEP_VAL(MT_EE_TX_POWER_DELTA_BW40),
+ .ch_power_delta = {
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 3),
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 4),
+ EEP_VAL(MT_EE_TX_POWER_0_START_2G + 5),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 3),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 4),
+ EEP_VAL(MT_EE_TX_POWER_1_START_2G + 5)
+ },
+#undef EEP_VAL
+ };
+ struct sk_buff *skb;
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+
+ memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
+ sizeof(req.rate_power_delta));
+
+ memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
+ sizeof(req.temp_comp_power));
+
+ skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
+ return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL,
+ MCU_Q_SET);
+}
+
+int mt7603_mcu_set_channel(struct mt7603_dev *dev)
+{
+ struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
+ struct {
+ u8 control_chan;
+ u8 center_chan;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 _res0[7];
+ u8 txpower[21];
+ u8 _res1[3];
+ } req = {
+ .control_chan = chandef->chan->hw_value,
+ .center_chan = chandef->chan->hw_value,
+ .bw = MT_BW_20,
+ .tx_streams = n_chains,
+ .rx_streams = n_chains,
+ };
+ struct sk_buff *skb;
+ s8 tx_power;
+ int ret;
+ int i;
+
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
+ req.bw = MT_BW_40;
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ req.center_chan += 2;
+ else
+ req.center_chan -= 2;
+ }
+
+ tx_power = hw->conf.power_level * 2;
+ if (dev->mt76.antenna_mask == 3)
+ tx_power -= 6;
+ tx_power = min(tx_power, dev->tx_power_limit);
+
+ dev->mt76.txpower_cur = tx_power;
+
+ for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
+ req.txpower[i] = tx_power;
+
+ skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
+ ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
+ MCU_Q_SET);
+ if (ret)
+ return ret;
+
+ return mt7603_mcu_set_tx_power(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
new file mode 100644
index 000000000000..1bba369d5c8a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_MCU_H
+#define __MT7603_MCU_H
+
+struct mt7603_mcu_txd {
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query;
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 uc_d2b2_rev;
+ u8 ext_cid_ack;
+
+ u32 au4_d3_to_d7_rev[5];
+} __packed __aligned(4);
+
+struct mt7603_mcu_rxd {
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[3];
+};
+
+#define MCU_PKT_ID 0xa0
+#define MCU_PORT_QUEUE 0x8000
+#define MCU_PORT_QUEUE_FW 0xc000
+
+#define MCU_FIRMWARE_ADDRESS 0x100000
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+ MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_HIF_LOOPBACK = 0x20,
+ MCU_CMD_CH_PRIVILEGE = 0x20,
+ MCU_CMD_ACCESS_REG = 0xC2,
+ MCU_CMD_EXT_CID = 0xED,
+ MCU_CMD_FW_SCATTER = 0xEE,
+ MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+ MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
+ MCU_EXT_CMD_RF_TEST = 0x04,
+ MCU_EXT_CMD_RADIO_ON_OFF_CTRL = 0x05,
+ MCU_EXT_CMD_WIFI_RX_DISABLE = 0x06,
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_NIC_CAPABILITY = 0x09,
+ MCU_EXT_CMD_PWR_SAVING = 0x0A,
+ MCU_EXT_CMD_MULTIPLE_REG_ACCESS = 0x0E,
+ MCU_EXT_CMD_AP_PWR_SAVING_CAPABILITY = 0xF,
+ MCU_EXT_CMD_SEC_ADDREMOVE_KEY = 0x10,
+ MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_CMD_PS_RETRIEVE_START = 0x14,
+ MCU_EXT_CMD_LED_CTRL = 0x17,
+ MCU_EXT_CMD_PACKET_FILTER = 0x18,
+ MCU_EXT_CMD_PWR_MGT_BIT_WIFI = 0x1B,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_THERMAL_PROTECT = 0x23,
+ MCU_EXT_CMD_EDCA_SET = 0x27,
+ MCU_EXT_CMD_SLOT_TIME_SET = 0x28,
+ MCU_EXT_CMD_CONFIG_INTERNAL_SETTING = 0x29,
+ MCU_EXT_CMD_NOA_OFFLOAD_CTRL = 0x2B,
+ MCU_EXT_CMD_GET_THEMAL_SENSOR = 0x2C,
+ MCU_EXT_CMD_WAKEUP_OPTION = 0x2E,
+ MCU_EXT_CMD_AC_QUEUE_CONTROL = 0x31,
+ MCU_EXT_CMD_BCN_UPDATE = 0x33
+};
+
+enum {
+ MCU_EXT_EVENT_CMD_RESULT = 0x0,
+ MCU_EXT_EVENT_RF_REG_ACCESS = 0x2,
+ MCU_EXT_EVENT_MULTI_CR_ACCESS = 0x0E,
+ MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
+ MCU_EXT_EVENT_BEACON_LOSS = 0x1A,
+ MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
+ MCU_EXT_EVENT_BCN_UPDATE = 0x31,
+};
+
+static inline struct sk_buff *
+mt7603_mcu_msg_alloc(const void *data, int len)
+{
+ return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd),
+ len, 0);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
new file mode 100644
index 000000000000..79f332429432
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_H
+#define __MT7603_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7603_MAX_INTERFACES 4
+#define MT7603_WTBL_SIZE 128
+#define MT7603_WTBL_RESERVED (MT7603_WTBL_SIZE - 1)
+#define MT7603_WTBL_STA (MT7603_WTBL_RESERVED - MT7603_MAX_INTERFACES)
+
+#define MT7603_RATE_RETRY 2
+
+#define MT7603_RX_RING_SIZE 128
+
+#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
+#define MT7603_FIRMWARE_E2 "mt7603_e2.bin"
+#define MT7628_FIRMWARE_E1 "mt7628_e1.bin"
+#define MT7628_FIRMWARE_E2 "mt7628_e2.bin"
+
+#define MT7603_EEPROM_SIZE 1024
+
+#define MT_AGG_SIZE_LIMIT(_n) (((_n) + 1) * 4)
+
+#define MT7603_PRE_TBTT_TIME 5000 /* ms */
+
+#define MT7603_WATCHDOG_TIME 100 /* ms */
+#define MT7603_WATCHDOG_TIMEOUT 10 /* number of checks */
+
+#define MT7603_EDCCA_BLOCK_TH 10
+
+#define MT7603_CFEND_RATE_DEFAULT 0x69 /* chip default (24M) */
+#define MT7603_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
+struct mt7603_vif;
+struct mt7603_sta;
+
+enum {
+ MT7603_REV_E1 = 0x00,
+ MT7603_REV_E2 = 0x10,
+ MT7628_REV_E1 = 0x8a00,
+};
+
+enum mt7603_bw {
+ MT_BW_20,
+ MT_BW_40,
+ MT_BW_80,
+};
+
+struct mt7603_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7603_vif *vif;
+
+ struct sk_buff_head psq;
+
+ struct ieee80211_tx_rate rates[8];
+ u8 rate_count;
+ u8 n_rates;
+
+ u8 rate_probe;
+ u8 smps;
+
+ u8 ps;
+};
+
+struct mt7603_vif {
+ struct mt7603_sta sta; /* must be first */
+
+ u8 idx;
+};
+
+enum mt7603_reset_cause {
+ RESET_CAUSE_TX_HANG,
+ RESET_CAUSE_TX_BUSY,
+ RESET_CAUSE_RX_BUSY,
+ RESET_CAUSE_BEACON_STUCK,
+ RESET_CAUSE_RX_PSE_BUSY,
+ RESET_CAUSE_MCU_HANG,
+ RESET_CAUSE_RESET_FAILED,
+ __RESET_CAUSE_MAX
+};
+
+struct mt7603_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ const struct mt76_bus_ops *bus_ops;
+
+ u32 rxfilter;
+
+ u8 vif_mask;
+
+ struct mt7603_sta global_sta;
+
+ u32 agc0, agc3;
+ u32 false_cca_ofdm, false_cca_cck;
+ unsigned long last_cca_adj;
+
+ u8 rssi_offset[3];
+
+ u8 slottime;
+ s16 coverage_class;
+
+ s8 tx_power_limit;
+
+ ktime_t survey_time;
+ ktime_t ed_time;
+ int beacon_int;
+
+ struct mt76_queue q_rx;
+
+ spinlock_t ps_lock;
+
+ u8 mac_work_count;
+
+ u8 mcu_running;
+ u8 ed_monitor;
+
+ s8 ed_trigger;
+ u8 ed_strict_mode;
+ u8 ed_strong_signal;
+
+ s8 sensitivity;
+
+ u8 beacon_mask;
+
+ u8 beacon_check;
+ u8 tx_hang_check;
+ u8 tx_dma_check;
+ u8 rx_dma_check;
+ u8 rx_pse_check;
+ u8 mcu_hang;
+
+ enum mt7603_reset_cause cur_reset_cause;
+
+ u16 tx_dma_idx[4];
+ u16 rx_dma_idx;
+
+ u32 reset_test;
+
+ unsigned int reset_cause[__RESET_CAUSE_MAX];
+
+ struct delayed_work mac_work;
+ struct tasklet_struct tx_tasklet;
+ struct tasklet_struct pre_tbtt_tasklet;
+};
+
+extern const struct mt76_driver_ops mt7603_drv_ops;
+extern const struct ieee80211_ops mt7603_ops;
+extern struct pci_driver mt7603_pci_driver;
+extern struct platform_driver mt76_wmac_driver;
+
+static inline bool is_mt7603(struct mt7603_dev *dev)
+{
+ return mt76xx_chip(dev) == 0x7603;
+}
+
+static inline bool is_mt7628(struct mt7603_dev *dev)
+{
+ return mt76xx_chip(dev) == 0x7628;
+}
+
+/* need offset to prevent conflict with ampdu_ack_len */
+#define MT_RATE_DRIVER_DATA_OFFSET 4
+
+u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr);
+
+irqreturn_t mt7603_irq_handler(int irq, void *dev_instance);
+
+int mt7603_register_device(struct mt7603_dev *dev);
+void mt7603_unregister_device(struct mt7603_dev *dev);
+int mt7603_eeprom_init(struct mt7603_dev *dev);
+int mt7603_dma_init(struct mt7603_dev *dev);
+void mt7603_dma_cleanup(struct mt7603_dev *dev);
+int mt7603_mcu_init(struct mt7603_dev *dev);
+void mt7603_init_debugfs(struct mt7603_dev *dev);
+
+void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set);
+
+static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
+{
+ mt7603_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
+{
+ mt7603_set_irq_mask(dev, mask, 0);
+}
+
+void mt7603_mac_dma_start(struct mt7603_dev *dev);
+void mt7603_mac_start(struct mt7603_dev *dev);
+void mt7603_mac_stop(struct mt7603_dev *dev);
+void mt7603_mac_work(struct work_struct *work);
+void mt7603_mac_set_timing(struct mt7603_dev *dev);
+void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
+int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
+void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
+void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+ int ba_size);
+
+void mt7603_pse_client_reset(struct mt7603_dev *dev);
+
+int mt7603_mcu_set_channel(struct mt7603_dev *dev);
+int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
+void mt7603_mcu_exit(struct mt7603_dev *dev);
+
+void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
+ const u8 *mac_addr);
+void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx);
+void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta);
+void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates);
+int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
+ struct ieee80211_key_conf *key);
+void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled);
+void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ bool enabled);
+void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
+
+int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+
+void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt7603_pre_tbtt_tasklet(unsigned long arg);
+
+void mt7603_update_channel(struct mt76_dev *mdev);
+
+void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
+void mt7603_cca_stats_reset(struct mt7603_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
new file mode 100644
index 000000000000..4acdbf5d8968
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: ISC */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7603.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7603) },
+ { },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mt7603_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
+ &mt7603_drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7603_dev, mt76);
+ mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7603_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_unregister_device(dev);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7603_FIRMWARE_E1);
+MODULE_FIRMWARE(MT7603_FIRMWARE_E2);
+
+struct pci_driver mt7603_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76pci_device_table,
+ .probe = mt76pci_probe,
+ .remove = mt76pci_remove,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
new file mode 100644
index 000000000000..da6827ae6cee
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -0,0 +1,774 @@
+/* SPDX-License-Identifier: ISC */
+
+#ifndef __MT7603_REGS_H
+#define __MT7603_REGS_H
+
+#define MT_HW_REV 0x1000
+#define MT_HW_CHIPID 0x1008
+#define MT_TOP_MISC2 0x1134
+
+#define MT_MCU_BASE 0x2000
+#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
+
+#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
+#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
+#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
+
+#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
+#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
+
+#define MT_HIF_BASE 0x4000
+#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
+
+#define MT_INT_SOURCE_CSR MT_HIF(0x200)
+#define MT_INT_MASK_CSR MT_HIF(0x204)
+#define MT_DELAY_INT_CFG MT_HIF(0x210)
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
+#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+
+#define MT_INT_RX_COHERENT BIT(20)
+#define MT_INT_TX_COHERENT BIT(21)
+#define MT_INT_MAC_IRQ3 BIT(27)
+
+#define MT_INT_MCU_CMD BIT(30)
+
+#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MT_WPDMA_GLO_CFG_FORCE_TX_EOF BIT(25)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+
+#define MT_WPDMA_DEBUG MT_HIF(0x244)
+#define MT_WPDMA_DEBUG_VALUE GENMASK(17, 0)
+#define MT_WPDMA_DEBUG_SEL BIT(27)
+#define MT_WPDMA_DEBUG_IDX GENMASK(31, 28)
+
+#define MT_TX_RING_BASE MT_HIF(0x300)
+#define MT_RX_RING_BASE MT_HIF(0x400)
+
+#define MT_TXTIME_THRESH_BASE MT_HIF(0x500)
+#define MT_TXTIME_THRESH(n) (MT_TXTIME_THRESH_BASE + ((n) * 4))
+
+#define MT_PAGE_COUNT_BASE MT_HIF(0x540)
+#define MT_PAGE_COUNT(n) (MT_PAGE_COUNT_BASE + ((n) * 4))
+
+#define MT_SCH_1 MT_HIF(0x588)
+#define MT_SCH_2 MT_HIF(0x58c)
+#define MT_SCH_3 MT_HIF(0x590)
+
+#define MT_SCH_4 MT_HIF(0x594)
+#define MT_SCH_4_FORCE_QID GENMASK(4, 0)
+#define MT_SCH_4_BYPASS BIT(5)
+#define MT_SCH_4_RESET BIT(8)
+
+#define MT_GROUP_THRESH_BASE MT_HIF(0x598)
+#define MT_GROUP_THRESH(n) (MT_GROUP_THRESH_BASE + ((n) * 4))
+
+#define MT_QUEUE_PRIORITY_1 MT_HIF(0x580)
+#define MT_QUEUE_PRIORITY_2 MT_HIF(0x584)
+
+#define MT_BMAP_0 MT_HIF(0x5b0)
+#define MT_BMAP_1 MT_HIF(0x5b4)
+#define MT_BMAP_2 MT_HIF(0x5b8)
+
+#define MT_HIGH_PRIORITY_1 MT_HIF(0x5bc)
+#define MT_HIGH_PRIORITY_2 MT_HIF(0x5c0)
+
+#define MT_PRIORITY_MASK MT_HIF(0x5c4)
+
+#define MT_RSV_MAX_THRESH MT_HIF(0x5c8)
+
+#define MT_PSE_BASE 0x8000
+#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
+
+#define MT_MCU_DEBUG_RESET MT_PSE(0x16c)
+#define MT_MCU_DEBUG_RESET_PSE BIT(0)
+#define MT_MCU_DEBUG_RESET_PSE_S BIT(1)
+#define MT_MCU_DEBUG_RESET_QUEUES GENMASK(6, 2)
+
+#define MT_PSE_FC_P0 MT_PSE(0x120)
+#define MT_PSE_FC_P0_MIN_RESERVE GENMASK(11, 0)
+#define MT_PSE_FC_P0_MAX_QUOTA GENMASK(27, 16)
+
+#define MT_PSE_FRP MT_PSE(0x138)
+#define MT_PSE_FRP_P0 GENMASK(2, 0)
+#define MT_PSE_FRP_P1 GENMASK(5, 3)
+#define MT_PSE_FRP_P2_RQ0 GENMASK(8, 6)
+#define MT_PSE_FRP_P2_RQ1 GENMASK(11, 9)
+#define MT_PSE_FRP_P2_RQ2 GENMASK(14, 12)
+
+#define MT_FC_RSV_COUNT_0 MT_PSE(0x13c)
+#define MT_FC_RSV_COUNT_0_P0 GENMASK(11, 0)
+#define MT_FC_RSV_COUNT_0_P1 GENMASK(27, 16)
+
+#define MT_FC_SP2_Q0Q1 MT_PSE(0x14c)
+#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q0 GENMASK(11, 0)
+#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q1 GENMASK(27, 16)
+
+#define MT_PSE_FW_SHARED MT_PSE(0x17c)
+
+#define MT_PSE_RTA MT_PSE(0x194)
+#define MT_PSE_RTA_QUEUE_ID GENMASK(4, 0)
+#define MT_PSE_RTA_PORT_ID GENMASK(6, 5)
+#define MT_PSE_RTA_REDIRECT_EN BIT(7)
+#define MT_PSE_RTA_TAG_ID GENMASK(15, 8)
+#define MT_PSE_RTA_WRITE BIT(16)
+#define MT_PSE_RTA_BUSY BIT(31)
+
+#define MT_WF_PHY_BASE 0x10000
+#define MT_WF_PHY_OFFSET 0x1000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_AGC_BASE MT_WF_PHY(0x500)
+#define MT_AGC(n) (MT_AGC_BASE + ((n) * 4))
+
+#define MT_AGC1_BASE MT_WF_PHY(0x1500)
+#define MT_AGC1(n) (MT_AGC1_BASE + ((n) * 4))
+
+#define MT_AGC_41_RSSI_0 GENMASK(23, 16)
+#define MT_AGC_41_RSSI_1 GENMASK(7, 0)
+
+#define MT_RXTD_BASE MT_WF_PHY(0x600)
+#define MT_RXTD(n) (MT_RXTD_BASE + ((n) * 4))
+
+#define MT_RXTD_6_ACI_TH GENMASK(4, 0)
+#define MT_RXTD_6_CCAED_TH GENMASK(14, 8)
+
+#define MT_RXTD_8_LOWER_SIGNAL GENMASK(5, 0)
+
+#define MT_RXTD_13_ACI_TH_EN BIT(0)
+
+#define MT_WF_PHY_CR_TSSI_BASE MT_WF_PHY(0xd00)
+#define MT_WF_PHY_CR_TSSI(phy, n) (MT_WF_PHY_CR_TSSI_BASE + \
+ ((phy) * MT_WF_PHY_OFFSET) + \
+ ((n) * 4))
+
+#define MT_PHYCTRL_BASE MT_WF_PHY(0x4100)
+#define MT_PHYCTRL(n) (MT_PHYCTRL_BASE + ((n) * 4))
+
+#define MT_PHYCTRL_2_STATUS_RESET BIT(6)
+#define MT_PHYCTRL_2_STATUS_EN BIT(7)
+
+#define MT_PHYCTRL_STAT_PD MT_PHYCTRL(3)
+#define MT_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
+#define MT_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
+
+#define MT_PHYCTRL_STAT_MDRDY MT_PHYCTRL(8)
+#define MT_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
+#define MT_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
+
+#define MT_WF_AGG_BASE 0x21200
+#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
+
+#define MT_AGG_ARCR MT_WF_AGG(0x010)
+#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
+#define MT_AGG_ARCR_FB_SGI_DISABLE BIT(1)
+#define MT_AGG_ARCR_RATE8_DOWN_WRAP BIT(2)
+#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
+#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
+#define MT_AGG_ARCR_SPE_DIS_TH GENMASK(27, 24)
+
+#define MT_AGG_ARUCR MT_WF_AGG(0x014)
+#define MT_AGG_ARDCR MT_WF_AGG(0x018)
+#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
+#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+
+#define MT_AGG_LIMIT MT_WF_AGG(0x040)
+#define MT_AGG_LIMIT_1 MT_WF_AGG(0x044)
+#define MT_AGG_LIMIT_AC(_n) GENMASK(((_n) + 1) * 8 - 1, (_n) * 8)
+
+#define MT_AGG_BA_SIZE_LIMIT_0 MT_WF_AGG(0x048)
+#define MT_AGG_BA_SIZE_LIMIT_1 MT_WF_AGG(0x04c)
+#define MT_AGG_BA_SIZE_LIMIT_SHIFT 8
+
+#define MT_AGG_PCR MT_WF_AGG(0x050)
+#define MT_AGG_PCR_MM BIT(16)
+#define MT_AGG_PCR_GF BIT(17)
+#define MT_AGG_PCR_BW40 BIT(18)
+#define MT_AGG_PCR_RIFS BIT(19)
+#define MT_AGG_PCR_BW80 BIT(20)
+#define MT_AGG_PCR_BW160 BIT(21)
+#define MT_AGG_PCR_ERP BIT(22)
+
+#define MT_AGG_PCR_RTS MT_WF_AGG(0x054)
+#define MT_AGG_PCR_RTS_THR GENMASK(19, 0)
+#define MT_AGG_PCR_RTS_PKT_THR GENMASK(31, 25)
+
+#define MT_AGG_CONTROL MT_WF_AGG(0x070)
+#define MT_AGG_CONTROL_NO_BA_RULE BIT(0)
+#define MT_AGG_CONTROL_NO_BA_AR_RULE BIT(1)
+#define MT_AGG_CONTROL_CFEND_SPE_EN BIT(3)
+#define MT_AGG_CONTROL_CFEND_RATE GENMASK(15, 4)
+#define MT_AGG_CONTROL_BAR_SPE_EN BIT(19)
+#define MT_AGG_CONTROL_BAR_RATE GENMASK(31, 20)
+
+#define MT_AGG_TMP MT_WF_AGG(0x0d8)
+
+#define MT_AGG_BWCR MT_WF_AGG(0x0ec)
+#define MT_AGG_BWCR_BW GENMASK(3, 2)
+
+#define MT_AGG_RETRY_CONTROL MT_WF_AGG(0x0f4)
+#define MT_AGG_RETRY_CONTROL_RTS_LIMIT GENMASK(11, 7)
+#define MT_AGG_RETRY_CONTROL_BAR_LIMIT GENMASK(15, 12)
+
+#define MT_WF_DMA_BASE 0x21c00
+#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR1 MT_WF_DMA(0x004)
+
+#define MT_DMA_FQCR0 MT_WF_DMA(0x008)
+#define MT_DMA_FQCR0_TARGET_WCID GENMASK(7, 0)
+#define MT_DMA_FQCR0_TARGET_BSS GENMASK(13, 8)
+#define MT_DMA_FQCR0_TARGET_QID GENMASK(20, 16)
+#define MT_DMA_FQCR0_DEST_PORT_ID GENMASK(23, 22)
+#define MT_DMA_FQCR0_DEST_QUEUE_ID GENMASK(28, 24)
+#define MT_DMA_FQCR0_MODE BIT(29)
+#define MT_DMA_FQCR0_STATUS BIT(30)
+#define MT_DMA_FQCR0_BUSY BIT(31)
+
+#define MT_DMA_RCFR0 MT_WF_DMA(0x070)
+#define MT_DMA_VCFR0 MT_WF_DMA(0x07c)
+
+#define MT_DMA_TCFR0 MT_WF_DMA(0x080)
+#define MT_DMA_TCFR1 MT_WF_DMA(0x084)
+#define MT_DMA_TCFR_TXS_AGGR_TIMEOUT GENMASK(27, 16)
+#define MT_DMA_TCFR_TXS_QUEUE BIT(14)
+#define MT_DMA_TCFR_TXS_AGGR_COUNT GENMASK(12, 8)
+#define MT_DMA_TCFR_TXS_BIT_MAP GENMASK(6, 0)
+
+#define MT_DMA_TMCFR0 MT_WF_DMA(0x088)
+
+#define MT_WF_ARB_BASE 0x21400
+#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
+
+#define MT_WMM_AIFSN MT_WF_ARB(0x020)
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX_BASE MT_WF_ARB(0x028)
+#define MT_WMM_CWMAX(_n) (MT_WMM_CWMAX_BASE + (((_n) / 2) << 2))
+#define MT_WMM_CWMAX_SHIFT(_n) (((_n) & 1) * 16)
+#define MT_WMM_CWMAX_MASK GENMASK(15, 0)
+
+#define MT_WMM_CWMIN MT_WF_ARB(0x040)
+#define MT_WMM_CWMIN_MASK GENMASK(7, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 8)
+
+#define MT_WF_ARB_RQCR MT_WF_ARB(0x070)
+#define MT_WF_ARB_RQCR_RX_START BIT(0)
+#define MT_WF_ARB_RQCR_RXV_START BIT(4)
+#define MT_WF_ARB_RQCR_RXV_R_EN BIT(7)
+#define MT_WF_ARB_RQCR_RXV_T_EN BIT(8)
+
+#define MT_ARB_SCR MT_WF_ARB(0x080)
+#define MT_ARB_SCR_BCNQ_OPMODE_MASK GENMASK(1, 0)
+#define MT_ARB_SCR_BCNQ_OPMODE_SHIFT(n) ((n) * 2)
+#define MT_ARB_SCR_TX_DISABLE BIT(8)
+#define MT_ARB_SCR_RX_DISABLE BIT(9)
+#define MT_ARB_SCR_BCNQ_EMPTY_SKIP BIT(28)
+#define MT_ARB_SCR_TTTT_BTIM_PRIO BIT(29)
+#define MT_ARB_SCR_TBTT_BCN_PRIO BIT(30)
+#define MT_ARB_SCR_TBTT_BCAST_PRIO BIT(31)
+
+enum {
+ MT_BCNQ_OPMODE_STA = 0,
+ MT_BCNQ_OPMODE_AP = 1,
+ MT_BCNQ_OPMODE_ADHOC = 2,
+};
+
+#define MT_WF_ARB_TX_START_0 MT_WF_ARB(0x100)
+#define MT_WF_ARB_TX_START_1 MT_WF_ARB(0x104)
+#define MT_WF_ARB_TX_FLUSH_0 MT_WF_ARB(0x108)
+#define MT_WF_ARB_TX_FLUSH_1 MT_WF_ARB(0x10c)
+#define MT_WF_ARB_TX_STOP_0 MT_WF_ARB(0x110)
+#define MT_WF_ARB_TX_STOP_1 MT_WF_ARB(0x114)
+
+#define MT_WF_ARB_BCN_START MT_WF_ARB(0x118)
+#define MT_WF_ARB_BCN_START_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_BCN_START_T_PRE_TTTT BIT(10)
+#define MT_WF_ARB_BCN_START_T_TTTT BIT(11)
+#define MT_WF_ARB_BCN_START_T_PRE_TBTT BIT(12)
+#define MT_WF_ARB_BCN_START_T_TBTT BIT(13)
+#define MT_WF_ARB_BCN_START_T_SLOT_IDLE BIT(14)
+#define MT_WF_ARB_BCN_START_T_TX_START BIT(15)
+#define MT_WF_ARB_BCN_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_BCN_FLUSH MT_WF_ARB(0x11c)
+#define MT_WF_ARB_BCN_FLUSH_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_BCN_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_START MT_WF_ARB(0x120)
+#define MT_WF_ARB_CAB_START_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_CAB_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_FLUSH MT_WF_ARB(0x124)
+#define MT_WF_ARB_CAB_FLUSH_BSSn(n) BIT(0 + (n))
+#define MT_WF_ARB_CAB_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0)
+
+#define MT_WF_ARB_CAB_COUNT(n) MT_WF_ARB(0x128 + (n) * 4)
+#define MT_WF_ARB_CAB_COUNT_SHIFT 4
+#define MT_WF_ARB_CAB_COUNT_MASK GENMASK(3, 0)
+#define MT_WF_ARB_CAB_COUNT_B0_REG(n) MT_WF_ARB_CAB_COUNT(((n) > 12 ? 2 : \
+ ((n) > 4 ? 1 : 0)))
+#define MT_WF_ARB_CAB_COUNT_B0_SHIFT(n) (((n) > 12 ? (n) - 12 : \
+ ((n) > 4 ? (n) - 4 : \
+ (n) ? (n) + 3 : 0)) * 4)
+
+#define MT_TX_ABORT MT_WF_ARB(0x134)
+#define MT_TX_ABORT_EN BIT(0)
+#define MT_TX_ABORT_WCID GENMASK(15, 8)
+
+#define MT_WF_TMAC_BASE 0x21600
+#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
+
+#define MT_TMAC_TCR MT_WF_TMAC(0x000)
+#define MT_TMAC_TCR_BLINK_SEL GENMASK(7, 6)
+#define MT_TMAC_TCR_PRE_RTS_GUARD GENMASK(11, 8)
+#define MT_TMAC_TCR_PRE_RTS_SEC_IDLE GENMASK(13, 12)
+#define MT_TMAC_TCR_RTS_SIGTA BIT(14)
+#define MT_TMAC_TCR_LDPC_OFS BIT(15)
+#define MT_TMAC_TCR_TX_STREAMS GENMASK(17, 16)
+#define MT_TMAC_TCR_SCH_IDLE_SEL GENMASK(19, 18)
+#define MT_TMAC_TCR_SCH_DET_PER_IOD BIT(20)
+#define MT_TMAC_TCR_DCH_DET_DISABLE BIT(21)
+#define MT_TMAC_TCR_TX_RIFS BIT(22)
+#define MT_TMAC_TCR_RX_RIFS_MODE BIT(23)
+#define MT_TMAC_TCR_TXOP_TBTT_CTL BIT(24)
+#define MT_TMAC_TCR_TBTT_TX_STOP_CTL BIT(25)
+#define MT_TMAC_TCR_TXOP_BURST_STOP BIT(26)
+#define MT_TMAC_TCR_RDG_RA_MODE BIT(27)
+#define MT_TMAC_TCR_RDG_RESP BIT(29)
+#define MT_TMAC_TCR_RDG_NO_PENDING BIT(30)
+#define MT_TMAC_TCR_SMOOTHING BIT(31)
+
+#define MT_WMM_TXOP_BASE MT_WF_TMAC(0x010)
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + \
+ ((((_n) / 2) ^ 0x1) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) (((_n) & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_TIMEOUT_CCK MT_WF_TMAC(0x090)
+#define MT_TIMEOUT_OFDM MT_WF_TMAC(0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TXREQ MT_WF_TMAC(0x09c)
+#define MT_TXREQ_CCA_SRC_SEL GENMASK(31, 30)
+
+#define MT_RXREQ MT_WF_TMAC(0x0a0)
+#define MT_RXREQ_DELAY GENMASK(8, 0)
+
+#define MT_IFS MT_WF_TMAC(0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_PCR MT_WF_TMAC(0x0b4)
+#define MT_TMAC_PCR_RATE GENMASK(8, 0)
+#define MT_TMAC_PCR_RATE_FIXED BIT(15)
+#define MT_TMAC_PCR_ANT_ID GENMASK(21, 16)
+#define MT_TMAC_PCR_ANT_ID_SEL BIT(22)
+#define MT_TMAC_PCR_SPE_EN BIT(23)
+#define MT_TMAC_PCR_ANT_PRI GENMASK(26, 24)
+#define MT_TMAC_PCR_ANT_PRI_SEL GENMASK(27)
+
+#define MT_WF_RMAC_BASE 0x21800
+#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
+
+#define MT_WF_RFCR MT_WF_RMAC(0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_BSSID0(idx) MT_WF_RMAC(0x004 + (idx) * 8)
+#define MT_BSSID1(idx) MT_WF_RMAC(0x008 + (idx) * 8)
+#define MT_BSSID1_VALID BIT(16)
+
+#define MT_MAC_ADDR0(idx) MT_WF_RMAC(0x024 + (idx) * 8)
+#define MT_MAC_ADDR1(idx) MT_WF_RMAC(0x028 + (idx) * 8)
+#define MT_MAC_ADDR1_ADDR GENMASK(15, 0)
+#define MT_MAC_ADDR1_VALID BIT(16)
+
+#define MT_BA_CONTROL_0 MT_WF_RMAC(0x068)
+#define MT_BA_CONTROL_1 MT_WF_RMAC(0x06c)
+#define MT_BA_CONTROL_1_ADDR GENMASK(15, 0)
+#define MT_BA_CONTROL_1_TID GENMASK(19, 16)
+#define MT_BA_CONTROL_1_IGNORE_TID BIT(20)
+#define MT_BA_CONTROL_1_IGNORE_ALL BIT(21)
+#define MT_BA_CONTROL_1_RESET BIT(22)
+
+#define MT_WF_RMACDR MT_WF_RMAC(0x078)
+#define MT_WF_RMACDR_TSF_PROBERSP_DIS BIT(0)
+#define MT_WF_RMACDR_TSF_TIM BIT(4)
+#define MT_WF_RMACDR_MBSSID_MASK GENMASK(25, 24)
+#define MT_WF_RMACDR_CHECK_HTC_BY_RATE BIT(26)
+#define MT_WF_RMACDR_MAXLEN_20BIT BIT(30)
+
+#define MT_WF_RMAC_RMCR MT_WF_RMAC(0x080)
+#define MT_WF_RMAC_RMCR_SMPS_MODE GENMASK(21, 20)
+#define MT_WF_RMAC_RMCR_RX_STREAMS GENMASK(24, 22)
+#define MT_WF_RMAC_RMCR_SMPS_RTS BIT(25)
+
+#define MT_WF_RMAC_CH_FREQ MT_WF_RMAC(0x090)
+#define MT_WF_RMAC_MAXMINLEN MT_WF_RMAC(0x098)
+#define MT_WF_RFCR1 MT_WF_RMAC(0x0a4)
+#define MT_WF_RMAC_TMR_PA MT_WF_RMAC(0x0e0)
+
+#define MT_WF_SEC_BASE 0x21a00
+#define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs))
+
+#define MT_SEC_SCR MT_WF_SEC(0x004)
+#define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0)
+
+#define MT_WTBL_OFF_BASE 0x23000
+#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
+
+#define MT_WTBL_UPDATE MT_WTBL_OFF(0x000)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
+#define MT_WTBL_UPDATE_WTBL2 BIT(11)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
+#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
+#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
+#define MT_WTBL_UPDATE_RX_COUNT_CLEAR BIT(15)
+#define MT_WTBL_UPDATE_BUSY BIT(16)
+
+#define MT_WTBL_RMVTCR MT_WTBL_OFF(0x008)
+#define MT_WTBL_RMVTCR_RX_MV_MODE BIT(23)
+
+#define MT_LPON_BASE 0x24000
+#define MT_LPON(n) (MT_LPON_BASE + (n))
+
+#define MT_LPON_BTEIR MT_LPON(0x020)
+#define MT_LPON_BTEIR_MBSS_MODE GENMASK(31, 29)
+
+#define MT_PRE_TBTT MT_LPON(0x030)
+#define MT_PRE_TBTT_MASK GENMASK(7, 0)
+#define MT_PRE_TBTT_SHIFT 8
+
+#define MT_TBTT MT_LPON(0x034)
+#define MT_TBTT_PERIOD GENMASK(15, 0)
+#define MT_TBTT_DTIM_PERIOD GENMASK(23, 16)
+#define MT_TBTT_TBTT_WAKE_PERIOD GENMASK(27, 24)
+#define MT_TBTT_DTIM_WAKE_PERIOD GENMASK(30, 28)
+#define MT_TBTT_CAL_ENABLE BIT(31)
+
+#define MT_TBTT_TIMER_CFG MT_LPON(0x05c)
+
+#define MT_LPON_SBTOR(n) MT_LPON(0x0a0)
+#define MT_LPON_SBTOR_SUB_BSS_EN BIT(29)
+#define MT_LPON_SBTOR_TIME_OFFSET GENMASK(19, 0)
+
+#define MT_INT_WAKEUP_BASE 0x24400
+#define MT_INT_WAKEUP(n) (MT_INT_WAKEUP_BASE + (n))
+
+#define MT_HW_INT_STATUS(n) MT_INT_WAKEUP(0x3c + (n) * 8)
+#define MT_HW_INT_MASK(n) MT_INT_WAKEUP(0x40 + (n) * 8)
+
+#define MT_HW_INT3_TBTT0 BIT(15)
+#define MT_HW_INT3_PRE_TBTT0 BIT(31)
+
+#define MT_WTBL1_BASE 0x28000
+
+#define MT_WTBL_ON_BASE (MT_WTBL1_BASE + 0x2000)
+#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
+
+#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x200)
+
+#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x204)
+#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
+#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
+#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
+
+#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x208)
+#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
+#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
+#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
+#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
+
+#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x20c)
+#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
+#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
+#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
+
+#define MT_MIB_BASE 0x2c000
+#define MT_MIB(_n) (MT_MIB_BASE + (_n))
+
+#define MT_MIB_CTL MT_MIB(0x00)
+#define MT_MIB_CTL_PSCCA_TIME GENMASK(13, 11)
+#define MT_MIB_CTL_CCA_NAV_TX GENMASK(16, 14)
+#define MT_MIB_CTL_ED_TIME GENMASK(30, 28)
+#define MT_MIB_CTL_READ_CLR_DIS BIT(31)
+
+#define MT_MIB_STAT(_n) MT_MIB(0x08 + (_n) * 4)
+
+#define MT_MIB_STAT_CCA MT_MIB_STAT(9)
+#define MT_MIB_STAT_CCA_MASK GENMASK(23, 0)
+
+#define MT_MIB_STAT_PSCCA MT_MIB_STAT(16)
+#define MT_MIB_STAT_PSCCA_MASK GENMASK(23, 0)
+
+#define MT_MIB_STAT_ED MT_MIB_STAT(18)
+#define MT_MIB_STAT_ED_MASK GENMASK(23, 0)
+
+#define MT_PCIE_REMAP_BASE_1 0x40000
+#define MT_PCIE_REMAP_BASE_2 0x80000
+
+#define MT_TX_HW_QUEUE_MGMT 4
+#define MT_TX_HW_QUEUE_MCU 5
+#define MT_TX_HW_QUEUE_BCN 7
+#define MT_TX_HW_QUEUE_BMC 8
+
+#define MT_LED_BASE_PHYS 0x80024000
+#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n))
+
+#define MT_LED_CTRL MT_LED_PHYS(0x00)
+
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
+#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v) (((_v) << \
+ __ffs(MT_LED_STATUS_OFF_MASK)) & \
+ MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v) (((_v) << \
+ __ffs(MT_LED_STATUS_ON_MASK)) & \
+ MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 0)
+#define MT_LED_STATUS_DURATION(_v) (((_v) << \
+ __ffs(MT_LED_STATUS_DURATION_MASK)) &\
+ MT_LED_STATUS_DURATION_MASK)
+
+#define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000
+
+#define MT_CLIENT_TMAC_INFO_TEMPLATE 0x040
+
+#define MT_CLIENT_STATUS 0x06c
+
+#define MT_CLIENT_RESET_TX 0x070
+#define MT_CLIENT_RESET_TX_R_E_1 BIT(16)
+#define MT_CLIENT_RESET_TX_R_E_2 BIT(17)
+#define MT_CLIENT_RESET_TX_R_E_1_S BIT(20)
+#define MT_CLIENT_RESET_TX_R_E_2_S BIT(21)
+
+#define MT_EFUSE_BASE 0x81070000
+
+#define MT_EFUSE_BASE_CTRL 0x000
+#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
+
+#define MT_EFUSE_CTRL 0x008
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_VALID BIT(29)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
+#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+
+#define MT_CLIENT_RXINF 0x068
+#define MT_CLIENT_RXINF_RXSH_GROUPS GENMASK(2, 0)
+
+#define MT_PSE_BASE_PHYS_ADDR 0xa0000000
+
+#define MT_PSE_WTBL_2_PHYS_ADDR 0xa5000000
+
+#define MT_WTBL1_SIZE (8 * 4)
+#define MT_WTBL2_SIZE (16 * 4)
+#define MT_WTBL3_OFFSET (MT7603_WTBL_SIZE * MT_WTBL2_SIZE)
+#define MT_WTBL3_SIZE (16 * 4)
+#define MT_WTBL4_OFFSET (MT7603_WTBL_SIZE * MT_WTBL3_SIZE + \
+ MT_WTBL3_OFFSET)
+#define MT_WTBL4_SIZE (8 * 4)
+
+#define MT_WTBL1_W0_ADDR_HI GENMASK(15, 0)
+#define MT_WTBL1_W0_MUAR_IDX GENMASK(21, 16)
+#define MT_WTBL1_W0_RX_CHECK_A1 BIT(22)
+#define MT_WTBL1_W0_KEY_IDX GENMASK(24, 23)
+#define MT_WTBL1_W0_RX_CHECK_KEY_IDX BIT(25)
+#define MT_WTBL1_W0_RX_KEY_VALID BIT(26)
+#define MT_WTBL1_W0_RX_IK_VALID BIT(27)
+#define MT_WTBL1_W0_RX_VALID BIT(28)
+#define MT_WTBL1_W0_RX_CHECK_A2 BIT(29)
+#define MT_WTBL1_W0_RX_DATA_VALID BIT(30)
+#define MT_WTBL1_W0_WRITE_BURST BIT(31)
+
+#define MT_WTBL1_W1_ADDR_LO GENMASK(31, 0)
+
+#define MT_WTBL1_W2_MPDU_DENSITY GENMASK(2, 0)
+#define MT_WTBL1_W2_KEY_TYPE GENMASK(6, 3)
+#define MT_WTBL1_W2_EVEN_PN BIT(7)
+#define MT_WTBL1_W2_TO_DS BIT(8)
+#define MT_WTBL1_W2_FROM_DS BIT(9)
+#define MT_WTBL1_W2_HEADER_TRANS BIT(10)
+#define MT_WTBL1_W2_AMPDU_FACTOR GENMASK(13, 11)
+#define MT_WTBL1_W2_PWR_MGMT BIT(14)
+#define MT_WTBL1_W2_RDG BIT(15)
+#define MT_WTBL1_W2_RTS BIT(16)
+#define MT_WTBL1_W2_CFACK BIT(17)
+#define MT_WTBL1_W2_RDG_BA BIT(18)
+#define MT_WTBL1_W2_SMPS BIT(19)
+#define MT_WTBL1_W2_TXS_BAF_REPORT BIT(20)
+#define MT_WTBL1_W2_DYN_BW BIT(21)
+#define MT_WTBL1_W2_LDPC BIT(22)
+#define MT_WTBL1_W2_ITXBF BIT(23)
+#define MT_WTBL1_W2_ETXBF BIT(24)
+#define MT_WTBL1_W2_TXOP_PS BIT(25)
+#define MT_WTBL1_W2_MESH BIT(26)
+#define MT_WTBL1_W2_QOS BIT(27)
+#define MT_WTBL1_W2_HT BIT(28)
+#define MT_WTBL1_W2_VHT BIT(29)
+#define MT_WTBL1_W2_ADMISSION_CONTROL BIT(30)
+#define MT_WTBL1_W2_GROUP_ID BIT(31)
+
+#define MT_WTBL1_W3_WTBL2_FRAME_ID GENMASK(10, 0)
+#define MT_WTBL1_W3_WTBL2_ENTRY_ID GENMASK(15, 11)
+#define MT_WTBL1_W3_WTBL4_FRAME_ID GENMASK(26, 16)
+#define MT_WTBL1_W3_CHECK_PER BIT(27)
+#define MT_WTBL1_W3_KEEP_I_PSM BIT(28)
+#define MT_WTBL1_W3_I_PSM BIT(29)
+#define MT_WTBL1_W3_POWER_SAVE BIT(30)
+#define MT_WTBL1_W3_SKIP_TX BIT(31)
+
+#define MT_WTBL1_W4_WTBL3_FRAME_ID GENMASK(10, 0)
+#define MT_WTBL1_W4_WTBL3_ENTRY_ID GENMASK(16, 11)
+#define MT_WTBL1_W4_WTBL4_ENTRY_ID GENMASK(22, 17)
+#define MT_WTBL1_W4_PARTIAL_AID GENMASK(31, 23)
+
+#define MT_WTBL2_W0_PN_LO GENMASK(31, 0)
+
+#define MT_WTBL2_W1_PN_HI GENMASK(15, 0)
+#define MT_WTBL2_W1_NON_QOS_SEQNO GENMASK(27, 16)
+
+#define MT_WTBL2_W2_TID0_SN GENMASK(11, 0)
+#define MT_WTBL2_W2_TID1_SN GENMASK(23, 12)
+#define MT_WTBL2_W2_TID2_SN_LO GENMASK(31, 24)
+
+#define MT_WTBL2_W3_TID2_SN_HI GENMASK(3, 0)
+#define MT_WTBL2_W3_TID3_SN GENMASK(15, 4)
+#define MT_WTBL2_W3_TID4_SN GENMASK(27, 16)
+#define MT_WTBL2_W3_TID5_SN_LO GENMASK(31, 28)
+
+#define MT_WTBL2_W4_TID5_SN_HI GENMASK(7, 0)
+#define MT_WTBL2_W4_TID6_SN GENMASK(19, 8)
+#define MT_WTBL2_W4_TID7_SN GENMASK(31, 20)
+
+#define MT_WTBL2_W5_TX_COUNT_RATE1 GENMASK(15, 0)
+#define MT_WTBL2_W5_FAIL_COUNT_RATE1 GENAMSK(31, 16)
+
+#define MT_WTBL2_W6_TX_COUNT_RATE2 GENMASK(7, 0)
+#define MT_WTBL2_W6_TX_COUNT_RATE3 GENMASK(15, 8)
+#define MT_WTBL2_W6_TX_COUNT_RATE4 GENMASK(23, 16)
+#define MT_WTBL2_W6_TX_COUNT_RATE5 GENMASK(31, 24)
+
+#define MT_WTBL2_W7_TX_COUNT_CUR_BW GENMASK(15, 0)
+#define MT_WTBL2_W7_FAIL_COUNT_CUR_BW GENMASK(31, 16)
+
+#define MT_WTBL2_W8_TX_COUNT_OTHER_BW GENMASK(15, 0)
+#define MT_WTBL2_W8_FAIL_COUNT_OTHER_BW GENMASK(31, 16)
+
+#define MT_WTBL2_W9_POWER_OFFSET GENMASK(4, 0)
+#define MT_WTBL2_W9_SPATIAL_EXT BIT(5)
+#define MT_WTBL2_W9_ANT_PRIORITY GENMASK(8, 6)
+#define MT_WTBL2_W9_CC_BW_SEL GENMASK(10, 9)
+#define MT_WTBL2_W9_CHANGE_BW_RATE GENMASK(13, 11)
+#define MT_WTBL2_W9_BW_CAP GENMASK(15, 14)
+#define MT_WTBL2_W9_SHORT_GI_20 BIT(16)
+#define MT_WTBL2_W9_SHORT_GI_40 BIT(17)
+#define MT_WTBL2_W9_SHORT_GI_80 BIT(18)
+#define MT_WTBL2_W9_SHORT_GI_160 BIT(19)
+#define MT_WTBL2_W9_MPDU_FAIL_COUNT GENMASK(25, 23)
+#define MT_WTBL2_W9_MPDU_OK_COUNT GENMASK(28, 26)
+#define MT_WTBL2_W9_RATE_IDX GENMASK(31, 29)
+
+#define MT_WTBL2_W10_RATE1 GENMASK(11, 0)
+#define MT_WTBL2_W10_RATE2 GENMASK(23, 12)
+#define MT_WTBL2_W10_RATE3_LO GENMASK(31, 24)
+
+#define MT_WTBL2_W11_RATE3_HI GENMASK(3, 0)
+#define MT_WTBL2_W11_RATE4 GENMASK(15, 4)
+#define MT_WTBL2_W11_RATE5 GENMASK(27, 16)
+#define MT_WTBL2_W11_RATE6_LO GENMASK(31, 28)
+
+#define MT_WTBL2_W12_RATE6_HI GENMASK(7, 0)
+#define MT_WTBL2_W12_RATE7 GENMASK(19, 8)
+#define MT_WTBL2_W12_RATE8 GENMASK(31, 20)
+
+#define MT_WTBL2_W13_AVG_RCPI0 GENMASK(7, 0)
+#define MT_WTBL2_W13_AVG_RCPI1 GENMASK(15, 8)
+#define MT_WTBL2_W13_AVG_RCPI2 GENAMSK(23, 16)
+
+#define MT_WTBL2_W14_CC_NOISE_1S GENMASK(6, 0)
+#define MT_WTBL2_W14_CC_NOISE_2S GENMASK(13, 7)
+#define MT_WTBL2_W14_CC_NOISE_3S GENMASK(20, 14)
+#define MT_WTBL2_W14_CHAN_EST_RMS GENMASK(24, 21)
+#define MT_WTBL2_W14_CC_NOISE_SEL BIT(15)
+#define MT_WTBL2_W14_ANT_SEL GENMASK(31, 26)
+
+#define MT_WTBL2_W15_BA_WIN_SIZE GENMASK(2, 0)
+#define MT_WTBL2_W15_BA_WIN_SIZE_SHIFT 3
+#define MT_WTBL2_W15_BA_EN_TIDS GENMASK(31, 24)
+
+#define MT_WTBL1_OR (MT_WTBL1_BASE + 0x2300)
+#define MT_WTBL1_OR_PSM_WRITE BIT(31)
+
+enum mt7603_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_TKIP_NO_MIC,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_BIP_CMAC_128,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
new file mode 100644
index 000000000000..e13fea80d970
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: ISC */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mt7603.h"
+
+static int
+mt76_wmac_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct mt7603_dev *dev;
+ void __iomem *mem_base;
+ struct mt76_dev *mdev;
+ int irq;
+ int ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get device IRQ\n");
+ return irq;
+ }
+
+ mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!mem_base) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return -EINVAL;
+ }
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
+ &mt7603_drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7603_dev, mt76);
+ mt76_mmio_init(mdev, mem_base);
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7603_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static int
+mt76_wmac_remove(struct platform_device *pdev)
+{
+ struct mt76_dev *mdev = platform_get_drvdata(pdev);
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+
+ mt7603_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id of_wmac_match[] = {
+ { .compatible = "mediatek,mt7628-wmac" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_wmac_match);
+MODULE_FIRMWARE(MT7628_FIRMWARE_E1);
+MODULE_FIRMWARE(MT7628_FIRMWARE_E2);
+
+struct platform_driver mt76_wmac_driver = {
+ .probe = mt76_wmac_probe,
+ .remove = mt76_wmac_remove,
+ .driver = {
+ .name = "mt76_wmac",
+ .of_match_table = of_wmac_match,
+ },
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 497e762978cc..ab6dfc026acb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -152,11 +152,11 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
return mt76x02_rate_power_val(val);
}
-void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan,
+ struct mt76_rate_power *t)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
- struct mt76_rate_power *t = &dev->mt76.rate_power;
u16 val, addr;
s8 delta;
@@ -189,7 +189,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
val = mt76x02_eeprom_get(dev, addr);
t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
- t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
+ t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
@@ -205,31 +205,31 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
/* vht mcs 8, 9 5GHz */
val = mt76x02_eeprom_get(dev, 0x132);
- t->vht[7] = s6_to_s8(val);
- t->vht[8] = s6_to_s8(val >> 8);
+ t->vht[8] = s6_to_s8(val);
+ t->vht[9] = s6_to_s8(val >> 8);
delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
mt76x02_add_rate_power_offset(t, delta);
}
-void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
+void mt76x0_get_power_info(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan, s8 *tp)
{
struct mt76x0_chan_map {
u8 chan;
u8 offset;
} chan_map[] = {
- { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 },
- { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 },
- { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 },
- { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 },
- { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 },
- { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 },
- { 167, 17 }, { 171, 18 }, { 173, 19 },
+ { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 },
+ { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 },
+ { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 },
+ { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
+ { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
+ { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
+ { 167, 34 }, { 171, 36 }, { 175, 38 },
};
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u8 offset, addr;
+ int i, idx = 0;
u16 data;
- int i;
if (mt76x0_tssi_enabled(dev)) {
s8 target_power;
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
else
data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
- info[0] = target_power + mt76x0_get_delta(dev);
- info[1] = 0;
+ *tp = target_power + mt76x0_get_delta(dev);
return;
}
for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
- if (chan_map[i].chan <= chan->hw_value) {
+ if (chan->hw_value <= chan_map[i].chan) {
+ idx = (chan->hw_value == chan_map[i].chan);
offset = chan_map[i].offset;
break;
}
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
} else {
switch (chan->hw_value) {
+ case 42:
+ offset = 2;
+ break;
case 58:
offset = 8;
break;
case 106:
offset = 14;
break;
- case 112:
+ case 122:
offset = 20;
break;
case 155:
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
}
data = mt76x02_eeprom_get(dev, addr);
-
- info[0] = data;
- if (!info[0] || info[0] > 0x3f)
- info[0] = 5;
-
- info[1] = data >> 8;
- if (!info[1] || info[1] > 0x3f)
- info[1] = 5;
+ *tp = data >> (8 * idx);
+ if (*tp < 0 || *tp > 0x3f)
+ *tp = 5;
}
static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index ee9ade9f3c8b..7f73034a23b1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -25,8 +25,11 @@ struct mt76x02_dev;
int mt76x0_eeprom_init(struct mt76x02_dev *dev);
void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
-void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
-void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info);
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan,
+ struct mt76_rate_power *t);
+void mt76x0_get_power_info(struct mt76x02_dev *dev,
+ struct ieee80211_channel *chan, s8 *tp);
static inline s8 s6_to_s8(u32 val)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 87b575fe1c74..bcb72e019fd2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -18,6 +18,7 @@
#include "eeprom.h"
#include "mcu.h"
#include "initvals.h"
+#include "../mt76x02_phy.h"
static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
{
@@ -186,6 +187,8 @@ void mt76x0_mac_stop(struct mt76x02_dev *dev)
{
int i = 200, ok = 0;
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+
/* Page count on TxQ */
while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
(mt76_rr(dev, 0x0a30) & 0x000000ff) ||
@@ -262,27 +265,24 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
-struct mt76x02_dev *
-mt76x0_alloc_device(struct device *pdev,
- const struct mt76_driver_ops *drv_ops,
- const struct ieee80211_ops *ops)
+static void
+mt76x0_init_txpower(struct mt76x02_dev *dev,
+ struct ieee80211_supported_band *sband)
{
- struct mt76x02_dev *dev;
- struct mt76_dev *mdev;
-
- mdev = mt76_alloc_device(sizeof(*dev), ops);
- if (!mdev)
- return NULL;
+ struct ieee80211_channel *chan;
+ struct mt76_rate_power t;
+ s8 tp;
+ int i;
- mdev->dev = pdev;
- mdev->drv = drv_ops;
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
- dev = container_of(mdev, struct mt76x02_dev, mt76);
- mutex_init(&dev->phy_mutex);
+ mt76x0_get_tx_power_per_rate(dev, chan, &t);
+ mt76x0_get_power_info(dev, chan, &tp);
- return dev;
+ chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
+ }
}
-EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
int mt76x0_register_device(struct mt76x02_dev *dev)
{
@@ -296,9 +296,14 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
if (ret)
return ret;
- /* overwrite unsupported features */
- if (dev->mt76.cap.has_5ghz)
+ if (dev->mt76.cap.has_5ghz) {
+ /* overwrite unsupported features */
mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
+ mt76x0_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ }
+
+ if (dev->mt76.cap.has_2ghz)
+ mt76x0_init_txpower(dev, &dev->mt76.sband_2g.sband);
mt76x02_init_debugfs(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index a1657922758e..0290ba5869a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -88,6 +88,7 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
{ MT_TX_PROT_CFG6, 0xe3f42004 },
{ MT_TX_PROT_CFG7, 0xe3f42084 },
{ MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
};
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index a803a9b6a4c5..fee16ab21edb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -34,6 +34,8 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY);
+ mt76x02_edcca_init(dev, true);
+
if (mt76_is_mmio(dev)) {
mt76x02_dfs_init_params(dev);
tasklet_enable(&dev->pre_tbtt_tasklet);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 46629f61673b..51fbd75dff31 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -50,10 +50,6 @@ static inline bool is_mt7630(struct mt76x02_dev *dev)
}
/* Init */
-struct mt76x02_dev *
-mt76x0_alloc_device(struct device *pdev,
- const struct mt76_driver_ops *drv_ops,
- const struct ieee80211_ops *ops);
int mt76x0_init_hardware(struct mt76x02_dev *dev);
int mt76x0_register_device(struct mt76x02_dev *dev);
void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index d895b6f3dc44..f302162036d0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -30,7 +30,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
mt76x02_mac_start(dev);
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
- MT_CALIBRATE_INTERVAL);
+ MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -99,7 +99,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.wake_tx_queue = mt76_wake_tx_queue,
.get_survey = mt76_get_survey,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
.flush = mt76x0e_flush,
.set_tim = mt76x0e_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
@@ -141,6 +141,15 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ mt76_wr(dev, MT_CH_TIME_CFG,
+ MT_CH_TIME_CFG_TIMER_EN |
+ MT_CH_TIME_CFG_TX_AS_BUSY |
+ MT_CH_TIME_CFG_RX_AS_BUSY |
+ MT_CH_TIME_CFG_NAV_AS_BUSY |
+ MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ MT_CH_CCA_RC_EN |
+ FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
err = mt76x0_register_device(dev);
if (err < 0)
return err;
@@ -165,6 +174,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
int ret;
ret = pcim_enable_device(pdev);
@@ -181,16 +191,20 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops);
- if (!dev)
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
+ &drv_ops);
+ if (!mdev)
return -ENOMEM;
- mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mutex_init(&dev->phy_mutex);
+
+ mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
- dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
- dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
- ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 1eb1a802ed20..1fd22eb841c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -845,17 +845,18 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
{
struct mt76_rate_power *t = &dev->mt76.rate_power;
- u8 info[2];
+ s8 info;
- mt76x0_get_tx_power_per_rate(dev);
- mt76x0_get_power_info(dev, info);
+ mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t);
+ mt76x0_get_power_info(dev, dev->mt76.chandef.chan, &info);
- mt76x02_add_rate_power_offset(t, info[0]);
+ mt76x02_add_rate_power_offset(t, info);
mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
- mt76x02_add_rate_power_offset(t, -info[0]);
+ mt76x02_add_rate_power_offset(t, -info);
- mt76x02_phy_set_txpower(dev, info[0], info[1]);
+ dev->target_power = info;
+ mt76x02_phy_set_txpower(dev, info, info);
}
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
@@ -1075,7 +1076,9 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ if (!dev->cal.avg_rssi_all)
+ dev->cal.avg_rssi_all = -75;
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0e6b43bb4678..91718647da02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -79,7 +79,6 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
mt76u_queues_deinit(&dev->mt76);
- mt76u_mcu_deinit(&dev->mt76);
}
static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
@@ -118,7 +117,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
- MT_CALIBRATE_INTERVAL);
+ MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -155,48 +154,54 @@ static const struct ieee80211_ops mt76x0u_ops = {
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.set_rts_threshold = mt76x02_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
};
-static int mt76x0u_register_device(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
{
- struct ieee80211_hw *hw = dev->mt76.hw;
int err;
- err = mt76u_alloc_queues(&dev->mt76);
- if (err < 0)
- goto out_err;
-
- err = mt76u_mcu_init_rx(&dev->mt76);
- if (err < 0)
- goto out_err;
-
mt76x0_chip_onoff(dev, true, true);
- if (!mt76x02_wait_for_mac(&dev->mt76)) {
- err = -ETIMEDOUT;
- goto out_err;
- }
+
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
err = mt76x0u_mcu_init(dev);
if (err < 0)
- goto out_err;
+ return err;
mt76x0_init_usb_dma(dev);
err = mt76x0_init_hardware(dev);
if (err < 0)
- goto out_err;
+ return err;
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+ return 0;
+}
+
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ int err;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto out_err;
+
+ err = mt76x0u_init_hardware(dev);
+ if (err < 0)
+ goto out_err;
+
err = mt76x0_register_device(dev);
if (err < 0)
goto out_err;
/* check hw sg support in order to enable AMSDU */
- if (mt76u_check_sg(&dev->mt76))
+ if (dev->mt76.usb.sg_en)
hw->max_tx_fragments = MT_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;
@@ -223,14 +228,18 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
};
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
u32 asic_rev, mac_rev;
int ret;
- dev = mt76x0_alloc_device(&usb_intf->dev, &drv_ops,
- &mt76x0u_ops);
- if (!dev)
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
+ &drv_ops);
+ if (!mdev)
return -ENOMEM;
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mutex_init(&dev->phy_mutex);
+
/* Quirk for Archer T1U */
if (id->driver_info)
dev->no_2ghz = true;
@@ -240,27 +249,27 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
- mt76x02u_init_mcu(&dev->mt76);
- ret = mt76u_init(&dev->mt76, usb_intf);
+ mt76x02u_init_mcu(mdev);
+ ret = mt76u_init(mdev, usb_intf);
if (ret)
goto err;
/* Disable the HW, otherwise MCU fail to initalize on hot reboot */
mt76x0_chip_onoff(dev, false, false);
- if (!mt76x02_wait_for_mac(&dev->mt76)) {
+ if (!mt76x02_wait_for_mac(mdev)) {
ret = -ETIMEDOUT;
goto err;
}
asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
mac_rev = mt76_rr(dev, MT_MAC_CSR0);
- dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
+ dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
/* Note: vendor driver skips this check for MT76X0U */
if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
- dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
+ dev_warn(mdev->dev, "Warning: eFUSE not present\n");
ret = mt76x0u_register_device(dev);
if (ret < 0)
@@ -272,7 +281,7 @@ err:
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
- ieee80211_free_hw(dev->mt76.hw);
+ ieee80211_free_hw(mdev->hw);
return ret;
}
@@ -297,11 +306,11 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
pm_message_t state)
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- struct mt76_usb *usb = &dev->mt76.usb;
mt76u_stop_queues(&dev->mt76);
mt76x0u_mac_stop(dev);
- usb_kill_urb(usb->mcu.res.urb);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ mt76x0_chip_onoff(dev, false, false);
return 0;
}
@@ -312,15 +321,6 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
struct mt76_usb *usb = &dev->mt76.usb;
int ret;
- reinit_completion(&usb->mcu.cmpl);
- ret = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
- MT_EP_IN_CMD_RESP,
- &usb->mcu.res, GFP_KERNEL,
- mt76u_mcu_complete_urb,
- &usb->mcu.cmpl);
- if (ret < 0)
- goto err;
-
ret = mt76u_submit_rx_buffers(&dev->mt76);
if (ret < 0)
goto err;
@@ -328,7 +328,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
tasklet_enable(&usb->rx_tasklet);
tasklet_enable(&usb->tx_tasklet);
- ret = mt76x0_init_hardware(dev);
+ ret = mt76x0u_init_hardware(dev);
if (ret)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 9d7585029df9..4a282761ca58 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
+#include <linux/module.h>
#include "mt76x0.h"
#include "mcu.h"
@@ -139,12 +140,6 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 6000);
-/*
- mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
- MT_PBF_CFG_TX1Q_EN |
- MT_PBF_CFG_TX2Q_EN |
- MT_PBF_CFG_TX3Q_EN));
-*/
mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 6782665049dd..6915cce5def9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -15,8 +15,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76X02_UTIL_H
-#define __MT76X02_UTIL_H
+#ifndef __MT76x02_H
+#define __MT76x02_H
#include <linux/kfifo.h>
@@ -27,6 +27,10 @@
#include "mt76x02_dma.h"
#define MT_CALIBRATE_INTERVAL HZ
+#define MT_MAC_WORK_INTERVAL (HZ / 10)
+
+#define MT_WATCHDOG_TIME (HZ / 10)
+#define MT_TX_HANG_TH 10
#define MT_MAX_CHAINS 2
struct mt76x02_rx_freq_cal {
@@ -70,6 +74,8 @@ struct mt76x02_dev {
struct mutex phy_mutex;
+ u16 vif_mask;
+
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
@@ -79,6 +85,7 @@ struct mt76x02_dev {
struct tasklet_struct pre_tbtt_tasklet;
struct delayed_work cal_work;
struct delayed_work mac_work;
+ struct delayed_work wdt_work;
u32 aggr_stats[32];
@@ -89,6 +96,10 @@ struct mt76x02_dev {
u8 tbtt_count;
u16 beacon_int;
+ u32 tx_hang_reset;
+ u8 tx_hang_check;
+ u8 mcu_timeout;
+
struct mt76x02_calibration cal;
s8 target_power;
@@ -101,6 +112,13 @@ struct mt76x02_dev {
u8 slottime;
struct mt76x02_dfs_pattern_detector dfs_pd;
+
+ /* edcca monitor */
+ bool ed_tx_blocked;
+ bool ed_monitor;
+ u8 ed_trigger;
+ u8 ed_silent;
+ ktime_t ed_time;
};
extern struct ieee80211_rate mt76x02_rates[12];
@@ -115,8 +133,7 @@ void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev);
-void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
- unsigned int idx);
+
int mt76x02_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void mt76x02_remove_interface(struct ieee80211_hw *hw,
@@ -136,6 +153,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate);
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
s8 max_txpwr_adj);
+void mt76x02_wdt_work(struct work_struct *work);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
@@ -158,8 +176,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-int mt76x02_get_txpower(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int *dbm);
void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -224,4 +240,4 @@ mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
return &sta->vif->group_wcid;
}
-#endif
+#endif /* __MT76x02_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index a9d52ba1e270..7580c5c986ff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -133,5 +133,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
read_txpower);
debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
+
+ debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset);
}
EXPORT_SYMBOL_GPL(mt76x02_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 054609c634a2..e4649103efd4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -881,12 +881,18 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ mutex_lock(&dev->mt76.mutex);
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
+
+ dev->ed_monitor = region == NL80211_DFS_ETSI;
+ mt76x02_edcca_init(dev, true);
+
dfs_pd->region = region;
mt76x02_dfs_init_params(dev);
tasklet_enable(&dfs_pd->dfs_tasklet);
}
+ mutex_unlock(&dev->mt76.mutex);
}
void mt76x02_regd_notifier(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index c08bf371e527..91ff6598eccf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -130,10 +130,8 @@ static __le16
mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
+ u8 phy, rate_idx, nss, bw = 0;
u16 rateval;
- u8 phy, rate_idx;
- u8 nss = 1;
- u8 bw = 0;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
rate_idx = rate->idx;
@@ -164,7 +162,7 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
phy = val >> 8;
rate_idx = val & 0xff;
- bw = 0;
+ nss = 1;
}
rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
@@ -293,6 +291,13 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
memset(txwi, 0, sizeof(*txwi));
+ if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ wcid = NULL;
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+ }
+
if (wcid)
txwi->wcid = wcid->idx;
else
@@ -309,7 +314,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
ccmp_pn[6] = pn >> 32;
ccmp_pn[7] = pn >> 40;
txwi->iv = *((__le32 *)&ccmp_pn[0]);
- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[4]);
}
spin_lock_bh(&dev->mt76.lock);
@@ -435,7 +440,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
}
if (wcid) {
- if (stat->pktid)
+ if (stat->pktid >= MT_PACKET_ID_FIRST)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
stat->pktid, &list);
if (status.skb)
@@ -478,7 +483,9 @@ out:
}
static int
-mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+mt76x02_mac_process_rate(struct mt76x02_dev *dev,
+ struct mt76_rx_status *status,
+ u16 rate)
{
u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
@@ -510,11 +517,15 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
status->encoding = RX_ENC_HT;
status->rate_idx = idx;
break;
- case MT_PHY_TYPE_VHT:
+ case MT_PHY_TYPE_VHT: {
+ u8 n_rxstream = dev->mt76.chainmask & 0xf;
+
status->encoding = RX_ENC_VHT;
status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
- status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ status->nss = min_t(u8, n_rxstream,
+ FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
break;
+ }
default:
return -EINVAL;
}
@@ -544,8 +555,11 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
return 0;
}
-void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
{
+ static const u8 null_addr[ETH_ALEN] = {};
+ int i;
+
ether_addr_copy(dev->mt76.macaddr, addr);
if (!is_valid_ether_addr(dev->mt76.macaddr)) {
@@ -559,6 +573,16 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
mt76_wr(dev, MT_MAC_ADDR_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ mt76_wr(dev, MT_MAC_BSSID_DW0,
+ get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_BSSID_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
+ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+ for (i = 0; i < 16; i++)
+ mt76x02_mac_set_bssid(dev, i, null_addr);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
@@ -584,7 +608,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+ int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
s8 signal;
u8 pn_len;
u8 wcid;
@@ -644,12 +668,13 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->chains = BIT(0);
signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
- for (i = 1; i < nstreams; i++) {
- status->chains |= BIT(i);
- status->chain_signal[i] = mt76x02_mac_get_rssi(dev,
- rxwi->rssi[i],
- i);
- signal = max_t(s8, signal, status->chain_signal[i]);
+ status->chain_signal[0] = signal;
+ if (nstreams > 1) {
+ status->chains |= BIT(1);
+ status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
+ rxwi->rssi[1],
+ 1);
+ signal = max_t(s8, signal, status->chain_signal[1]);
}
status->signal = signal;
status->freq = dev->mt76.chandef.chan->center_freq;
@@ -658,12 +683,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
- if (sta) {
- ewma_signal_add(&sta->rssi, status->signal);
- sta->inactive_count = 0;
- }
-
- return mt76x02_mac_process_rate(status, rate);
+ return mt76x02_mac_process_rate(dev, status, rate);
}
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
@@ -715,7 +735,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
-void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
{
u32 data = 0;
@@ -729,20 +749,89 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_OFDM_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_MM20_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_MM40_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_GF20_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_GF40_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG6,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG7,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG8,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
+
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ u32 vht_prot[3];
+ int i;
+ u16 rts_thr;
+
+ for (i = 0; i < ARRAY_SIZE(prot); i++) {
+ prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
+ prot[i] &= ~MT_PROT_CFG_CTRL;
+ if (i >= 2)
+ prot[i] &= ~MT_PROT_CFG_RATE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
+ vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
+ vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
+ }
+
+ rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
+
+ if (rts_thr != 0xffff)
+ prot[0] |= MT_PROT_CTRL_RTS_CTS;
+
+ if (legacy_prot) {
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+
+ vht_prot[0] |= MT_PROT_RATE_CCK_11;
+ vht_prot[1] |= MT_PROT_RATE_CCK_11;
+ vht_prot[2] |= MT_PROT_RATE_CCK_11;
+ } else {
+ if (rts_thr != 0xffff)
+ prot[1] |= MT_PROT_CTRL_RTS_CTS;
+
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+
+ vht_prot[0] |= MT_PROT_RATE_OFDM_24;
+ vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
+ vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ prot[3] |= MT_PROT_CTRL_RTS_CTS;
+ prot[4] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ prot[3] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ break;
+ }
+
+ if (non_gf) {
+ prot[4] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(prot); i++)
+ mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+
+ for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
+ mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
}
void mt76x02_update_channel(struct mt76_dev *mdev)
@@ -774,8 +863,100 @@ static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
- mt76_clear(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
+{
+ if (enable) {
+ u32 data;
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+ /* enable pa-lna */
+ data = mt76_rr(dev, MT_TX_PIN_CFG);
+ data |= MT_TX_PIN_CFG_TXANT |
+ MT_TX_PIN_CFG_RXANT |
+ MT_TX_PIN_RFTR_EN |
+ MT_TX_PIN_TRSW_EN;
+ mt76_wr(dev, MT_TX_PIN_CFG, data);
+ } else {
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+ /* disable pa-lna */
+ mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
+ mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
+ }
+ dev->ed_tx_blocked = !enable;
+}
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
+{
+ dev->ed_trigger = 0;
+ dev->ed_silent = 0;
+
+ if (dev->ed_monitor && enable) {
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
+
+ mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+ mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
+ ed_th << 8 | ed_th);
+ mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+ } else {
+ mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ if (is_mt76x2(dev)) {
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_set(dev, MT_TXOP_HLDR_ET,
+ MT_TXOP_HLDR_TX40M_BLK_EN);
+ } else {
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
+ mt76_clear(dev, MT_TXOP_HLDR_ET,
+ MT_TXOP_HLDR_TX40M_BLK_EN);
+ }
+ }
+ mt76x02_edcca_tx_enable(dev, true);
+
+ /* clear previous CCA timer value */
+ mt76_rr(dev, MT_ED_CCA_TIMER);
+ dev->ed_time = ktime_get_boottime();
+}
+EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
+
+#define MT_EDCCA_TH 92
+#define MT_EDCCA_BLOCK_TH 2
+static void mt76x02_edcca_check(struct mt76x02_dev *dev)
+{
+ ktime_t cur_time;
+ u32 active, val, busy;
+
+ cur_time = ktime_get_boottime();
+ val = mt76_rr(dev, MT_ED_CCA_TIMER);
+
+ active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
+ dev->ed_time = cur_time;
+
+ busy = (val * 100) / active;
+ busy = min_t(u32, busy, 100);
+
+ if (busy > MT_EDCCA_TH) {
+ dev->ed_trigger++;
+ dev->ed_silent = 0;
+ } else {
+ dev->ed_silent++;
+ dev->ed_trigger = 0;
+ }
+
+ if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
+ !dev->ed_tx_blocked)
+ mt76x02_edcca_tx_enable(dev, false);
+ else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
+ dev->ed_tx_blocked)
+ mt76x02_edcca_tx_enable(dev, true);
}
void mt76x02_mac_work(struct work_struct *work)
@@ -784,6 +965,8 @@ void mt76x02_mac_work(struct work_struct *work)
mac_work.work);
int i, idx;
+ mutex_lock(&dev->mt76.mutex);
+
mt76x02_update_channel(&dev->mt76);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
@@ -792,14 +975,18 @@ void mt76x02_mac_work(struct work_struct *work)
dev->aggr_stats[idx++] += val >> 16;
}
- /* XXX: check beacon stuck for ap mode */
if (!dev->beacon_mask)
mt76x02_check_mac_err(dev);
+ if (dev->ed_monitor)
+ mt76x02_edcca_check(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
mt76_tx_status_check(&dev->mt76, NULL, false);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
- MT_CALIBRATE_INTERVAL);
+ MT_MAC_WORK_INTERVAL);
}
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
@@ -891,8 +1078,9 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
return 0;
}
-void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
- u8 vif_idx, bool val)
+static void
+__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
+ bool val, struct sk_buff *skb)
{
u8 old_mask = dev->beacon_mask;
bool en;
@@ -900,6 +1088,8 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
if (val) {
dev->beacon_mask |= BIT(vif_idx);
+ if (skb)
+ mt76x02_mac_set_beacon(dev, vif_idx, skb);
} else {
dev->beacon_mask &= ~BIT(vif_idx);
mt76x02_mac_set_beacon(dev, vif_idx, NULL);
@@ -910,14 +1100,37 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
en = dev->beacon_mask;
- mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
reg = MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_TIMER_EN;
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+ if (mt76_is_usb(dev))
+ return;
+
+ mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
if (en)
mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}
+
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ struct ieee80211_vif *vif, bool val)
+{
+ u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
+ struct sk_buff *skb = NULL;
+
+ if (mt76_is_mmio(dev))
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ else if (val)
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+
+ if (!dev->beacon_mask)
+ dev->tbtt_count = 0;
+
+ __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
+
+ if (mt76_is_mmio(dev))
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 4e597004c445..6b1f25d2f64c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -18,8 +18,6 @@
#ifndef __MT76X02_MAC_H
#define __MT76X02_MAC_H
-#include <linux/average.h>
-
struct mt76x02_dev;
struct mt76x02_tx_status {
@@ -41,8 +39,6 @@ struct mt76x02_vif {
u8 idx;
};
-DECLARE_EWMA(signal, 10, 8);
-
struct mt76x02_sta {
struct mt76_wcid wcid; /* must be first */
@@ -50,8 +46,6 @@ struct mt76x02_sta {
struct mt76x02_tx_status status;
int n_frames;
- struct ewma_signal rssi;
- int inactive_count;
};
#define MT_RXINFO_BA BIT(0)
@@ -194,8 +188,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update);
int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void *rxi);
-void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
-void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val);
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr);
void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
@@ -208,6 +204,8 @@ void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb);
-void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
- bool val);
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ struct ieee80211_vif *vif, bool val);
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index b7f4edb729e3..6501b853b65c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -21,70 +21,13 @@
#include "mt76x02_mcu.h"
-static struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return NULL;
- memcpy(skb_put(skb, len), data, len);
-
- return skb;
-}
-
-static struct sk_buff *
-mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
-{
- unsigned long timeout;
-
- if (!time_is_after_jiffies(expires))
- return NULL;
-
- timeout = expires - jiffies;
- wait_event_timeout(dev->mt76.mmio.mcu.wait,
- !skb_queue_empty(&dev->mt76.mmio.mcu.res_q),
- timeout);
- return skb_dequeue(&dev->mt76.mmio.mcu.res_q);
-}
-
-static int
-mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
- struct sk_buff *skb, int cmd, int seq)
-{
- struct mt76_queue *q = &dev->mt76.q_tx[qid];
- struct mt76_queue_buf buf;
- dma_addr_t addr;
- u32 tx_info;
-
- tx_info = MT_MCU_MSG_TYPE_CMD |
- FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
- FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
- FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
- FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
-
- addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->mt76.dev, addr))
- return -ENOMEM;
-
- buf.addr = addr;
- buf.len = skb->len;
-
- spin_lock_bh(&q->lock);
- mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
- mt76_queue_kick(dev, q);
- spin_unlock_bh(&q->lock);
-
- return 0;
-}
-
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
unsigned long expires = jiffies + HZ;
struct sk_buff *skb;
+ u32 tx_info;
int ret;
u8 seq;
@@ -98,7 +41,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!seq)
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
- ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+ tx_info = MT_MCU_MSG_TYPE_CMD |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+ ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info);
if (ret)
goto out;
@@ -106,12 +55,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
u32 *rxfce;
bool check_seq = false;
- skb = mt76x02_mcu_get_response(dev, expires);
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
+ dev->mcu_timeout = 1;
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
index 7e4004120102..a7b0d3e5df1d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -96,6 +96,12 @@ struct mt76x02_patch_header {
u8 pad[2];
};
+static inline struct sk_buff *
+mt76x02_mcu_msg_alloc(const void *data, int len)
+{
+ return mt76_mcu_msg_alloc(data, 0, len, 0);
+}
+
int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 66315410aebe..1229f19f2b02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -79,24 +79,24 @@ mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
* Beacon timer drifts by 1us every tick, the timer is configured
* in 1/16 TU (64us) units.
*/
- if (dev->tbtt_count < 62)
+ if (dev->tbtt_count < 63)
return;
- if (dev->tbtt_count >= 64) {
- dev->tbtt_count = 0;
- return;
- }
-
/*
* The updated beacon interval takes effect after two TBTT, because
* at this point the original interval has already been loaded into
* the next TBTT_TIMER value
*/
- if (dev->tbtt_count == 62)
+ if (dev->tbtt_count == 63)
timer_val -= 1;
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL, timer_val);
+
+ if (dev->tbtt_count >= 64) {
+ dev->tbtt_count = 0;
+ return;
+ }
}
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
@@ -116,14 +116,20 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_csa_check(&dev->mt76);
+
+ if (dev->mt76.csa_complete)
+ return;
+
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_add_buffered_bc, &data);
- } while (nframes != skb_queue_len(&data.q));
+ } while (nframes != skb_queue_len(&data.q) &&
+ skb_queue_len(&data.q) < 8);
- if (!nframes)
+ if (!skb_queue_len(&data.q))
return;
for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
@@ -308,8 +314,12 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
tasklet_schedule(&dev->pre_tbtt_tasklet);
/* send buffered multicast frames now */
- if (intr & MT_INT_TBTT)
- mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+ if (intr & MT_INT_TBTT) {
+ if (dev->mt76.csa_complete)
+ mt76_csa_finish(&dev->mt76);
+ else
+ mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+ }
if (intr & MT_INT_TX_STAT) {
mt76x02_mac_poll_tx_status(dev, true);
@@ -384,3 +394,137 @@ void mt76x02_mac_start(struct mt76x02_dev *dev)
MT_INT_TX_STAT);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_start);
+
+static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
+{
+ u32 dma_idx, prev_dma_idx;
+ struct mt76_queue *q;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ q = &dev->mt76.q_tx[i];
+
+ if (!q->queued)
+ continue;
+
+ prev_dma_idx = dev->mt76.tx_dma_idx[i];
+ dma_idx = ioread32(&q->regs->dma_idx);
+ dev->mt76.tx_dma_idx[i] = dma_idx;
+
+ if (prev_dma_idx == dma_idx)
+ break;
+ }
+
+ return i < 4;
+}
+
+static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
+{
+ u32 mask = dev->mt76.mmio.irqmask;
+ int i;
+
+ ieee80211_stop_queues(dev->mt76.hw);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->tx_tasklet);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
+ napi_disable(&dev->mt76.napi[i]);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->beacon_mask)
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ mt76x02_irq_disable(dev, mask);
+
+ /* perform device reset */
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ usleep_range(5000, 10000);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
+
+ /* let fw reset DMA */
+ mt76_set(dev, 0x734, 0x3);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_queue_rx_reset(dev, i);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ if (dev->ed_monitor)
+ mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+
+ if (dev->beacon_mask)
+ mt76_set(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ mt76x02_irq_enable(dev, mask);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+
+ tasklet_enable(&dev->tx_tasklet);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+
+ ieee80211_wake_queues(dev->mt76.hw);
+
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
+{
+ if (mt76x02_tx_hang(dev)) {
+ if (++dev->tx_hang_check >= MT_TX_HANG_TH)
+ goto restart;
+ } else {
+ dev->tx_hang_check = 0;
+ }
+
+ if (dev->mcu_timeout)
+ goto restart;
+
+ return;
+
+restart:
+ mt76x02_watchdog_reset(dev);
+
+ mutex_lock(&dev->mt76.mmio.mcu.mutex);
+ dev->mcu_timeout = 0;
+ mutex_unlock(&dev->mt76.mmio.mcu.mutex);
+
+ dev->tx_hang_reset++;
+ dev->tx_hang_check = 0;
+ memset(dev->mt76.tx_dma_idx, 0xff,
+ sizeof(dev->mt76.tx_dma_idx));
+}
+
+void mt76x02_wdt_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ wdt_work.work);
+
+ mt76x02_check_tx_hang(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+ MT_WATCHDOG_TIME);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index 977a8e7e26df..a020c757ba5c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -132,53 +132,6 @@ void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
-int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
-{
- struct mt76x02_sta *sta;
- struct mt76_wcid *wcid;
- int i, j, min_rssi = 0;
- s8 cur_rssi;
-
- local_bh_disable();
- rcu_read_lock();
-
- for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
- unsigned long mask = dev->mt76.wcid_mask[i];
-
- if (!mask)
- continue;
-
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
- if (!(mask & 1))
- continue;
-
- wcid = rcu_dereference(dev->mt76.wcid[j]);
- if (!wcid)
- continue;
-
- sta = container_of(wcid, struct mt76x02_sta, wcid);
- spin_lock(&dev->mt76.rx_lock);
- if (sta->inactive_count++ < 5)
- cur_rssi = ewma_signal_read(&sta->rssi);
- else
- cur_rssi = 0;
- spin_unlock(&dev->mt76.rx_lock);
-
- if (cur_rssi < min_rssi)
- min_rssi = cur_rssi;
- }
- }
-
- rcu_read_unlock();
- local_bh_enable();
-
- if (!min_rssi)
- return -75;
-
- return min_rssi;
-}
-EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
-
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
{
int core_val, agc_val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index 2b316cf7c70c..d2971db06f13 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -51,7 +51,6 @@ void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
-int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev);
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
bool primary_upper);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index f7de77d09d28..7401cb94fb72 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -230,6 +230,29 @@
#define MT_COM_REG2 0x0738
#define MT_COM_REG3 0x073C
+#define MT_LED_CTRL 0x0770
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0 0x0774
+#define MT_LED_TX_BLINK_1 0x0778
+
+#define MT_LED_S0_BASE 0x077C
+#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE 0x0780
+#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+ MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+ MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+ MT_LED_STATUS_DURATION_MASK)
+
#define MT_FCE_PSE_CTRL 0x0800
#define MT_FCE_PARAMETERS 0x0804
#define MT_FCE_CSO 0x0808
@@ -318,6 +341,7 @@
#define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3)
#define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4)
#define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5)
+#define MT_CH_CCA_RC_EN BIT(6)
#define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8)
#define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10)
@@ -378,6 +402,9 @@
#define MT_TX_PWR_CFG_4 0x1324
#define MT_TX_PIN_CFG 0x1328
#define MT_TX_PIN_CFG_TXANT GENMASK(3, 0)
+#define MT_TX_PIN_CFG_RXANT GENMASK(11, 8)
+#define MT_TX_PIN_RFTR_EN BIT(16)
+#define MT_TX_PIN_TRSW_EN BIT(18)
#define MT_TX_BAND_CFG 0x132c
#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
@@ -398,6 +425,7 @@
#define MT_TXOP_CTRL_CFG 0x1340
#define MT_TXOP_TRUN_EN GENMASK(5, 0)
#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_ED_CCA_EN BIT(20)
#define MT_TX_RTS_CFG 0x1344
#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
@@ -409,6 +437,7 @@
#define MT_TX_RETRY_CFG 0x134c
#define MT_TX_LINK_CFG 0x1350
+#define MT_TX_CFACK_EN BIT(12)
#define MT_VHT_HT_FBK_CFG0 0x1354
#define MT_VHT_HT_FBK_CFG1 0x1358
#define MT_LG_FBK_CFG0 0x135c
@@ -440,9 +469,10 @@
#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
#define MT_PROT_RTS_THR_EN BIT(26)
#define MT_PROT_RATE_CCK_11 0x0003
-#define MT_PROT_RATE_OFDM_6 0x4000
-#define MT_PROT_RATE_OFDM_24 0x4004
-#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_RATE_OFDM_6 0x2000
+#define MT_PROT_RATE_OFDM_24 0x2004
+#define MT_PROT_RATE_DUP_OFDM_24 0x2084
+#define MT_PROT_RATE_SGI_OFDM_24 0x2104
#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
~MT_PROT_TXOP_ALLOW_MM40 & \
@@ -511,6 +541,7 @@
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
#define MT_AUTO_RSP_CFG 0x1404
+#define MT_AUTO_RSP_EN BIT(0)
#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
@@ -532,6 +563,7 @@
#define MT_PN_PAD_MODE 0x150c
#define MT_TXOP_HLDR_ET 0x1608
+#define MT_TXOP_HLDR_TX40M_BLK_EN BIT(1)
#define MT_PROT_AUTO_TX_CFG 0x1648
#define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 4598cb2cc3ff..94f47248c59f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -22,7 +22,6 @@
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76x02_dev *dev = hw->priv;
struct ieee80211_vif *vif = info->control.vif;
@@ -33,13 +32,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
msta = (struct mt76x02_sta *)control->sta->drv_priv;
wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != 0xff &&
- ieee80211_has_protected(hdr->frame_control))
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
+ } else if (vif) {
struct mt76x02_vif *mvif;
mvif = (struct mt76x02_vif *)vif->drv_priv;
@@ -58,8 +51,7 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
if (q == MT_RXQ_MCU) {
/* this is used just by mmio code */
- skb_queue_tail(&mdev->mmio.mcu.res_q, skb);
- wake_up(&mdev->mmio.mcu.wait);
+ mt76_mcu_rx_event(&dev->mt76, skb);
return;
}
@@ -177,7 +169,7 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (ret < 0)
return ret;
- if (pid && pid != MT_PACKET_ID_NO_ACK)
+ if (pid >= MT_PACKET_ID_FIRST)
qsel = MT_QSEL_MGMT;
*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 81970cf777c0..43f07461c8d3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -49,7 +49,12 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ /* Add zero pad of 4 - 7 bytes */
pad = round_up(skb->len, 4) + 4 - skb->len;
+
+ /* First packet of a A-MSDU burst keeps track of the whole burst
+ * length, need to update lenght of it and the last packet.
+ */
skb_walk_frags(skb, iter) {
last = iter;
if (!iter->next) {
@@ -59,11 +64,10 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
}
}
- if (unlikely(pad)) {
- if (skb_pad(last, pad))
- return -ENOMEM;
- __skb_put(last, pad);
- }
+ if (skb_pad(last, pad))
+ return -ENOMEM;
+ __skb_put(last, pad);
+
return 0;
}
@@ -87,8 +91,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
- if ((pid && pid != MT_PACKET_ID_NO_ACK) ||
- q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
+ if (pid >= MT_PACKET_ID_FIRST || q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
qsel = MT_QSEL_MGMT;
else
qsel = MT_QSEL_EDCA;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 6db789f90269..0cb8751321a1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -28,21 +28,6 @@
#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
-static struct sk_buff *
-mt76x02u_mcu_msg_alloc(const void *data, int len)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, MT_CMD_HDR_LEN);
- skb_put_data(skb, data, len);
-
- return skb;
-}
-
static void
mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
{
@@ -76,34 +61,21 @@ mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
{
struct mt76_usb *usb = &dev->usb;
- struct mt76u_buf *buf = &usb->mcu.res;
- struct urb *urb = buf->urb;
- int i, ret;
+ u8 *data = usb->mcu.data;
+ int i, len, ret;
u32 rxfce;
- u8 *data;
for (i = 0; i < 5; i++) {
- if (!wait_for_completion_timeout(&usb->mcu.cmpl,
- msecs_to_jiffies(300)))
+ ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300);
+ if (ret == -ETIMEDOUT)
continue;
+ if (ret)
+ goto out;
- if (urb->status)
- return -EIO;
-
- data = sg_virt(&urb->sg[0]);
if (usb->mcu.rp)
- mt76x02u_multiple_mcu_reads(dev, data + 4,
- urb->actual_length - 8);
+ mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
rxfce = get_unaligned_le32(data);
- ret = mt76u_submit_buf(dev, USB_DIR_IN,
- MT_EP_IN_CMD_RESP,
- buf, GFP_KERNEL,
- mt76u_mcu_complete_urb,
- &usb->mcu.cmpl);
- if (ret)
- return ret;
-
if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
return 0;
@@ -112,27 +84,23 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
}
-
- dev_err(dev->dev, "error: %s timed out\n", __func__);
- return -ETIMEDOUT;
+out:
+ dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
+ return ret;
}
static int
__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
- unsigned int pipe;
- int ret, sent;
+ int ret;
u8 seq = 0;
u32 info;
if (test_bit(MT76_REMOVED, &dev->state))
return 0;
- pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
if (wait_resp) {
seq = ++usb->mcu.msg_seq & 0xf;
if (!seq)
@@ -146,7 +114,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
if (ret)
return ret;
- ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+ ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
if (ret)
return ret;
@@ -166,7 +134,7 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
struct sk_buff *skb;
int err;
- skb = mt76x02u_mcu_msg_alloc(data, len);
+ skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8);
if (!skb)
return -ENOMEM;
@@ -268,14 +236,12 @@ void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
static int
-__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
+__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
const void *fw_data, int len, u32 dst_addr)
{
- u8 *data = sg_virt(&buf->urb->sg[0]);
- DECLARE_COMPLETION_ONSTACK(cmpl);
__le32 info;
u32 val;
- int err;
+ int err, data_len;
info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, len) |
@@ -291,25 +257,12 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_LEN, len << 16);
- buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
- err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
- MT_EP_OUT_INBAND_CMD,
- buf, GFP_KERNEL,
- mt76u_mcu_complete_urb, &cmpl);
- if (err < 0)
- return err;
-
- if (!wait_for_completion_timeout(&cmpl,
- msecs_to_jiffies(1000))) {
- dev_err(dev->mt76.dev, "firmware upload timed out\n");
- usb_kill_urb(buf->urb);
- return -ETIMEDOUT;
- }
+ data_len = MT_CMD_HDR_LEN + len + sizeof(info);
- if (mt76u_urb_error(buf->urb)) {
- dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
- buf->urb->status);
- return buf->urb->status;
+ err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000);
+ if (err) {
+ dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
+ return err;
}
val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
@@ -322,17 +275,16 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset)
{
- int err, len, pos = 0, max_len = max_payload - 8;
- struct mt76u_buf buf;
+ int len, err = 0, pos = 0, max_len = max_payload - 8;
+ u8 *buf;
- err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
- GFP_KERNEL);
- if (err < 0)
- return err;
+ buf = kmalloc(max_payload, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
while (data_len > 0) {
len = min_t(int, data_len, max_len);
- err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
+ err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
len, offset + pos);
if (err < 0)
break;
@@ -341,7 +293,7 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
pos += len;
usleep_range(5000, 10000);
}
- mt76u_buf_free(&buf);
+ kfree(buf);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 38bd466cff16..a48c261b0c63 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -75,6 +75,58 @@ static const struct ieee80211_iface_combination mt76x02_if_comb[] = {
}
};
+static void
+mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
+ u8 delay_off)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev,
+ mt76);
+ u32 val;
+
+ val = MT_LED_STATUS_DURATION(0xff) |
+ MT_LED_STATUS_OFF(delay_off) |
+ MT_LED_STATUS_ON(delay_on);
+
+ mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
+ mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
+
+ val = MT_LED_CTRL_REPLAY(mdev->led_pin) |
+ MT_LED_CTRL_KICK(mdev->led_pin);
+ if (mdev->led_al)
+ val |= MT_LED_CTRL_POLARITY(mdev->led_pin);
+ mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int
+mt76x02_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt76x02_led_set_config(mdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt76x02_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+
+ if (!brightness)
+ mt76x02_led_set_config(mdev, 0, 0xff);
+ else
+ mt76x02_led_set_config(mdev, 0xff, 0);
+}
+
void mt76x02_init_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -88,27 +140,37 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+
if (mt76_is_usb(dev)) {
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
} else {
+ INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work);
+
mt76x02_dfs_init_detector(dev);
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set =
+ mt76x02_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink;
+ }
}
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
@@ -189,8 +251,6 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_AP)
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
- ewma_signal_init(&msta->rssi);
-
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_add);
@@ -207,8 +267,9 @@ void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
-void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
- unsigned int idx)
+static void
+mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx)
{
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76_txq *mtxq;
@@ -221,7 +282,6 @@ void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mt76_txq_init(&dev->mt76, vif->txq);
}
-EXPORT_SYMBOL_GPL(mt76x02_vif_init);
int
mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -248,6 +308,15 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_STATION)
idx += 8;
+ if (dev->vif_mask & BIT(idx))
+ return -EBUSY;
+
+ /* Allow to change address in HW if we create first interface. */
+ if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
+ mt76x02_mac_setaddr(dev, vif->addr);
+
+ dev->vif_mask |= BIT(idx);
+
mt76x02_vif_init(dev, vif, idx);
return 0;
}
@@ -257,8 +326,10 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mt76_txq_remove(&dev->mt76, vif->txq);
+ dev->vif_mask &= ~BIT(mvif->idx);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
@@ -360,7 +431,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
} else {
if (idx == wcid->hw_key_idx) {
wcid->hw_key_idx = -1;
- wcid->sw_iv = true;
+ wcid->sw_iv = false;
}
key = NULL;
@@ -463,7 +534,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
return -EINVAL;
mutex_lock(&dev->mt76.mutex);
- mt76x02_mac_set_tx_protection(dev, val);
+ mt76x02_mac_set_rts_thresh(dev, val);
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -546,24 +617,6 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete);
-int mt76x02_get_txpower(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int *dbm)
-{
- struct mt76x02_dev *dev = hw->priv;
- u8 nstreams = dev->mt76.chainmask & 0xf;
-
- *dbm = dev->mt76.txpower_cur / 2;
-
- /* convert from per-chain power to combined
- * output on 2x2 devices
- */
- if (nstreams > 1)
- *dbm += 3;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x02_get_txpower);
-
void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
bool ps)
{
@@ -614,29 +667,26 @@ static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
{
- static const u8 null_addr[ETH_ALEN] = {};
int i;
- mt76_wr(dev, MT_MAC_BSSID_DW0,
- get_unaligned_le32(dev->mt76.macaddr));
- mt76_wr(dev, MT_MAC_BSSID_DW1,
- get_unaligned_le16(dev->mt76.macaddr + 4) |
- FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
- MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
-
- /* Fire a pre-TBTT interrupt 8 ms before TBTT */
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
- 8 << 4);
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
- MT_DFS_GP_INTERVAL);
- mt76_wr(dev, MT_INT_TIMER_EN, 0);
+ if (mt76_is_mmio(dev)) {
+ /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+ 8 << 4);
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+ MT_DFS_GP_INTERVAL);
+ mt76_wr(dev, MT_INT_TIMER_EN, 0);
+ }
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+ mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
- for (i = 0; i < 8; i++) {
- mt76x02_mac_set_bssid(dev, i, null_addr);
+ for (i = 0; i < 8; i++)
mt76x02_mac_set_beacon(dev, i, NULL);
- }
+
mt76x02_set_beacon_offsets(dev);
}
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
@@ -654,21 +704,20 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID)
mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid);
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- tasklet_disable(&dev->pre_tbtt_tasklet);
- mt76x02_mac_set_beacon_enable(dev, mvif->idx,
- info->enable_beacon);
- tasklet_enable(&dev->pre_tbtt_tasklet);
- }
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt76x02_mac_set_tx_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
if (changed & BSS_CHANGED_BEACON_INT) {
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
dev->beacon_int = info->beacon_int;
- dev->tbtt_count = 0;
}
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon);
+
if (changed & BSS_CHANGED_ERP_PREAMBLE)
mt76x02_mac_set_short_preamble(dev, info->use_short_preamble);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 54a9b5fac787..f8534362e2c8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -143,6 +143,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_VHT_HT_FBK_CFG1, 0xedcba980 },
{ MT_PROT_AUTO_TX_CFG, 0x00830083 },
{ MT_HT_CTRL_CFG, 0x000001ff },
+ { MT_TX_LINK_CFG, 0x00001020 },
};
struct mt76_reg_pair prot_vals[] = {
{ MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
index e25905c91ee2..e99d4c9bd428 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
@@ -23,6 +23,9 @@ void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force)
u32 rts_cfg;
int i;
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
+
mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
index 4c8e20bce920..42ff221d7706 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -25,6 +25,12 @@ struct mt76x02_vif;
int mt76x2_mac_start(struct mt76x02_dev *dev);
void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
-void mt76x2_mac_resume(struct mt76x02_dev *dev);
+
+static inline void mt76x2_mac_resume(struct mt76x02_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
index acfa2b570c7c..40ef43926c06 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
@@ -26,29 +26,6 @@
#define MT_MCU_PCIE_REMAP_BASE2 0x0744
#define MT_MCU_PCIE_REMAP_BASE3 0x0748
-#define MT_LED_CTRL 0x0770
-#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
-#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
-#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
-#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
-
-#define MT_LED_TX_BLINK_0 0x0774
-#define MT_LED_TX_BLINK_1 0x0778
-
-#define MT_LED_S0_BASE 0x077C
-#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
-#define MT_LED_S1_BASE 0x0780
-#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
- MT_LED_STATUS_DURATION_MASK)
-
#define MT_MCU_ROM_PATCH_OFFSET 0x80000
#define MT_MCU_ROM_PATCH_ADDR 0x90000
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index b259e4b50f1e..6c619f1c65c9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -49,11 +49,9 @@ static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
extern const struct ieee80211_ops mt76x2_ops;
-struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x02_dev *dev);
void mt76x2_phy_power_on(struct mt76x02_dev *dev);
-int mt76x2_init_hardware(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
index 0b0075411b34..76cb1f84eff5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -29,14 +29,12 @@
extern const struct ieee80211_ops mt76x2u_ops;
-struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev);
int mt76x2u_register_device(struct mt76x02_dev *dev);
int mt76x2u_init_hardware(struct mt76x02_dev *dev);
void mt76x2u_cleanup(struct mt76x02_dev *dev);
void mt76x2u_stop_hw(struct mt76x02_dev *dev);
int mt76x2u_mac_reset(struct mt76x02_dev *dev);
-void mt76x2u_mac_resume(struct mt76x02_dev *dev);
int mt76x2u_mac_start(struct mt76x02_dev *dev);
int mt76x2u_mac_stop(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 92432fe97312..6274655e1f7e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -30,7 +30,19 @@ static const struct pci_device_id mt76pci_device_table[] = {
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x02_txwi),
+ .update_survey = mt76x02_update_channel,
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
+ .sta_ps = mt76x02_sta_ps,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ };
struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
int ret;
ret = pcim_enable_device(pdev);
@@ -47,17 +59,19 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- dev = mt76x2_alloc_device(&pdev->dev);
- if (!dev)
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x2_ops,
+ &drv_ops);
+ if (!mdev)
return -ENOMEM;
- mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
mt76x2_reset_wlan(dev, false);
- dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
- dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
- ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 7f4ea2d00f42..984d9c4c2e1a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -119,9 +119,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
- mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
- mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
-
+ mt76x02_mac_setaddr(dev, macaddr);
mt76x02_init_beacon_config(dev);
if (!hard)
return 0;
@@ -151,6 +149,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
MT_CH_TIME_CFG_RX_AS_BUSY |
MT_CH_TIME_CFG_NAV_AS_BUSY |
MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ MT_CH_CCA_RC_EN |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
mt76x02_set_tx_ackto(dev);
@@ -174,13 +173,6 @@ int mt76x2_mac_start(struct mt76x02_dev *dev)
return 0;
}
-void mt76x2_mac_resume(struct mt76x02_dev *dev)
-{
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX |
- MT_MAC_SYS_CTRL_ENABLE_RX);
-}
-
static void
mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
{
@@ -260,7 +252,7 @@ mt76x2_power_on(struct mt76x02_dev *dev)
mt76x2_power_on_rf(dev, 1);
}
-int mt76x2_init_hardware(struct mt76x02_dev *dev)
+static int mt76x2_init_hardware(struct mt76x02_dev *dev)
{
int ret;
@@ -300,6 +292,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->wdt_work);
mt76x02_mcu_set_radio_state(dev, false);
mt76x2_mac_stop(dev, false);
}
@@ -313,81 +306,6 @@ void mt76x2_cleanup(struct mt76x02_dev *dev)
mt76x02_mcu_cleanup(dev);
}
-struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
-{
- static const struct mt76_driver_ops drv_ops = {
- .txwi_size = sizeof(struct mt76x02_txwi),
- .update_survey = mt76x02_update_channel,
- .tx_prepare_skb = mt76x02_tx_prepare_skb,
- .tx_complete_skb = mt76x02_tx_complete_skb,
- .rx_skb = mt76x02_queue_rx_skb,
- .rx_poll_complete = mt76x02_rx_poll_complete,
- .sta_ps = mt76x02_sta_ps,
- .sta_add = mt76x02_sta_add,
- .sta_remove = mt76x02_sta_remove,
- };
- struct mt76x02_dev *dev;
- struct mt76_dev *mdev;
-
- mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops);
- if (!mdev)
- return NULL;
-
- dev = container_of(mdev, struct mt76x02_dev, mt76);
- mdev->dev = pdev;
- mdev->drv = &drv_ops;
-
- return dev;
-}
-
-static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
- u8 delay_off)
-{
- struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev,
- mt76);
- u32 val;
-
- val = MT_LED_STATUS_DURATION(0xff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
-
- mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
- mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
-
- val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
- MT_LED_CTRL_KICK(mt76->led_pin);
- if (mt76->led_al)
- val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
- mt76_wr(dev, MT_LED_CTRL, val);
-}
-
-static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
- u8 delta_on, delta_off;
-
- delta_off = max_t(u8, *delay_off / 10, 1);
- delta_on = max_t(u8, *delay_on / 10, 1);
-
- mt76x2_led_set_config(mt76, delta_on, delta_off);
- return 0;
-}
-
-static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
-
- if (!brightness)
- mt76x2_led_set_config(mt76, 0, 0xff);
- else
- mt76x2_led_set_config(mt76, 0xff, 0);
-}
-
int mt76x2_register_device(struct mt76x02_dev *dev)
{
int ret;
@@ -402,12 +320,6 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
mt76x02_config_mac_addr_list(dev);
- /* init led callbacks */
- if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
- }
-
ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index b54a32397486..878ce92405ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -33,7 +33,9 @@ mt76x2_start(struct ieee80211_hw *hw)
goto out;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
- MT_CALIBRATE_INTERVAL);
+ MT_MAC_WORK_INTERVAL);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+ MT_WATCHDOG_TIME);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -189,7 +191,7 @@ const struct ieee80211_ops mt76x2_ops = {
.sw_scan_complete = mt76x02_sw_scan_complete,
.flush = mt76x2_flush,
.ampdu_action = mt76x02_ampdu_action,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
.wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.release_buffered_frames = mt76_release_buffered_frames,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index da7cd40f56ff..cc1aebcb0696 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -74,6 +74,7 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
+ mt76x02_edcca_init(dev, true);
dev->cal.channel_cal_done = true;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index c9634a774705..1848e8ab2e21 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -241,7 +241,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
t.offset1 = txp.chain[1].tssi_offset;
mt76x2_mcu_tssi_comp(dev, &t);
- if (t.pa_mode || dev->cal.dpd_cal_done)
+ if (t.pa_mode || dev->cal.dpd_cal_done || dev->ed_tx_blocked)
return;
usleep_range(10000, 20000);
@@ -284,7 +284,9 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ if (!dev->cal.avg_rssi_all)
+ dev->cal.avg_rssi_all = -75;
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 4d1788eb3812..ddb6b2c48e01 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -36,24 +36,36 @@ static const struct usb_device_id mt76x2u_device_table[] = {
static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ static const struct mt76_driver_ops drv_ops = {
+ .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+ .tx_complete_skb = mt76x02u_tx_complete_skb,
+ .tx_status_data = mt76x02_tx_status_data,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ };
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
int err;
- dev = mt76x2u_alloc_device(&intf->dev);
- if (!dev)
+ mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
+ &drv_ops);
+ if (!mdev)
return -ENOMEM;
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+
udev = usb_get_dev(udev);
usb_reset_device(udev);
- mt76x02u_init_mcu(&dev->mt76);
- err = mt76u_init(&dev->mt76, intf);
+ mt76x02u_init_mcu(mdev);
+ err = mt76u_init(mdev, intf);
if (err < 0)
goto err;
- dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
- dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+ mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
err = mt76x2u_register_device(dev);
if (err < 0)
@@ -88,11 +100,9 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
pm_message_t state)
{
struct mt76x02_dev *dev = usb_get_intfdata(intf);
- struct mt76_usb *usb = &dev->mt76.usb;
mt76u_stop_queues(&dev->mt76);
mt76x2u_stop_hw(dev);
- usb_kill_urb(usb->mcu.res.urb);
return 0;
}
@@ -103,15 +113,6 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
struct mt76_usb *usb = &dev->mt76.usb;
int err;
- reinit_completion(&usb->mcu.cmpl);
- err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
- MT_EP_IN_CMD_RESP,
- &usb->mcu.res, GFP_KERNEL,
- mt76u_mcu_complete_urb,
- &usb->mcu.cmpl);
- if (err < 0)
- goto err;
-
err = mt76u_submit_rx_buffers(&dev->mt76);
if (err < 0)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 0be3784f44fb..1da90e58d942 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -134,30 +134,6 @@ static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
return 0;
}
-struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
-{
- static const struct mt76_driver_ops drv_ops = {
- .tx_prepare_skb = mt76x02u_tx_prepare_skb,
- .tx_complete_skb = mt76x02u_tx_complete_skb,
- .tx_status_data = mt76x02_tx_status_data,
- .rx_skb = mt76x02_queue_rx_skb,
- .sta_add = mt76x02_sta_add,
- .sta_remove = mt76x02_sta_remove,
- };
- struct mt76x02_dev *dev;
- struct mt76_dev *mdev;
-
- mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
- if (!mdev)
- return NULL;
-
- dev = container_of(mdev, struct mt76x02_dev, mt76);
- mdev->dev = pdev;
- mdev->drv = &drv_ops;
-
- return dev;
-}
-
int mt76x2u_init_hardware(struct mt76x02_dev *dev)
{
int i, k, err;
@@ -207,11 +183,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_mac_shared_key_setup(dev, i, k, NULL);
}
- mt76_clear(dev, MT_BEACON_TIME_CFG,
- MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_SYNC_MODE |
- MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_BEACON_TX);
+ mt76x02_init_beacon_config(dev);
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
@@ -242,10 +214,6 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto fail;
- err = mt76u_mcu_init_rx(&dev->mt76);
- if (err < 0)
- goto fail;
-
err = mt76x2u_init_hardware(dev);
if (err < 0)
goto fail;
@@ -256,7 +224,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
goto fail;
/* check hw sg support in order to enable AMSDU */
- if (mt76u_check_sg(&dev->mt76))
+ if (dev->mt76.usb.sg_en)
hw->max_tx_fragments = MT_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;
@@ -287,5 +255,4 @@ void mt76x2u_cleanup(struct mt76x02_dev *dev)
mt76x02_mcu_set_radio_state(dev, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
- mt76u_mcu_deinit(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index db2194a92e67..5e84b4535cb1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -143,8 +143,8 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev)
rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
- mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
- mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
/* wait tx dma to stop */
for (i = 0; i < 2000; i++) {
@@ -211,12 +211,3 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev)
return 0;
}
-
-void mt76x2u_mac_resume(struct mt76x02_dev *dev)
-{
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX |
- MT_MAC_SYS_CTRL_ENABLE_RX);
- mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
- mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 2b48cc51a30d..2ac78e4dc41a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -28,7 +28,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
goto out;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
- MT_CALIBRATE_INTERVAL);
+ MT_MAC_WORK_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out:
@@ -46,19 +46,6 @@ static void mt76x2u_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mt76.mutex);
}
-static int mt76x2u_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct mt76x02_dev *dev = hw->priv;
- unsigned int idx = 8;
-
- if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
- mt76x02_mac_setaddr(dev, vif->addr);
-
- mt76x02_vif_init(dev, vif, idx);
- return 0;
-}
-
static int
mt76x2u_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
@@ -70,13 +57,12 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76_set_channel(&dev->mt76);
- mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
- mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
mt76x2_mac_stop(dev, false);
err = mt76x2u_phy_set_channel(dev, chandef);
- mt76x2u_mac_resume(dev);
+ mt76x2_mac_resume(dev);
+ mt76x02_edcca_init(dev, true);
clear_bit(MT76_RESET, &dev->mt76.state);
mt76_txq_schedule_all(&dev->mt76);
@@ -125,7 +111,7 @@ const struct ieee80211_ops mt76x2u_ops = {
.tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
- .add_interface = mt76x2u_add_interface,
+ .add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.sta_state = mt76_sta_state,
.set_key = mt76x02_set_key,
@@ -138,5 +124,5 @@ const struct ieee80211_ops mt76x2u_ops = {
.sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x02_sw_scan_complete,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
index 45a95ee3a415..152d41fe9ff5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -39,7 +39,7 @@ static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev)
{
struct mt76_usb *usb = &dev->mt76.usb;
- const u8 data[] = {
+ static const u8 data[] = {
0x6f, 0xfc, 0x08, 0x01,
0x20, 0x04, 0x00, 0x00,
0x00, 0x09, 0x00,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index 11d414d86c68..07f67cb6854c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -43,8 +43,9 @@ mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
if (!mac_stopped)
- mt76x2u_mac_resume(dev);
+ mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
+ mt76x02_edcca_init(dev, true);
dev->cal.channel_cal_done = true;
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7b711058807d..5a349fe3e576 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -170,21 +170,22 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
int pid;
if (!wcid)
- return 0;
+ return MT_PACKET_ID_NO_ACK;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
return MT_PACKET_ID_NO_ACK;
if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
- return 0;
+ return MT_PACKET_ID_NO_SKB;
spin_lock_bh(&dev->status_list.lock);
memset(cb, 0, sizeof(*cb));
wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
- if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK)
- wcid->packet_id = 1;
+ if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
+ wcid->packet_id == MT_PACKET_ID_NO_SKB)
+ wcid->packet_id = MT_PACKET_ID_FIRST;
pid = wcid->packet_id;
cb->wcid = wcid->idx;
@@ -204,9 +205,6 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
{
struct sk_buff *skb, *tmp;
- if (pktid == MT_PACKET_ID_NO_ACK)
- return NULL;
-
skb_queue_walk_safe(&dev->status_list, skb, tmp) {
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
@@ -216,7 +214,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
if (cb->pktid == pktid)
return skb;
- if (!pktid &&
+ if (pktid >= 0 &&
!time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
continue;
@@ -330,7 +328,8 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
if (last)
- info->flags |= IEEE80211_TX_STATUS_EOSP;
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
mt76_skb_set_moredata(skb, !last);
dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
@@ -394,6 +393,11 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
bool probe;
int idx;
+ if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
+ *empty = true;
+ return 0;
+ }
+
skb = mt76_txq_dequeue(dev, mtxq, false);
if (!skb) {
*empty = true;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index b061263453d4..ae6ada370597 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -22,6 +22,10 @@
#define MT_VEND_REQ_MAX_RETRY 10
#define MT_VEND_REQ_TOUT_MS 300
+static bool disable_usb_sg;
+module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
+MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
+
/* should be called with usb_ctrl_mtx locked */
static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
@@ -241,6 +245,16 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
return mt76u_req_rd_rp(dev, base, data, n);
}
+static bool mt76u_check_sg(struct mt76_dev *dev)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
+ (udev->bus->no_sg_constraint ||
+ udev->speed == USB_SPEED_WIRELESS));
+}
+
static int
mt76u_set_endpoints(struct usb_interface *intf,
struct mt76_usb *usb)
@@ -309,42 +323,66 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
return i ? : -ENOMEM;
}
-int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
- int nsgs, int len, int sglen, gfp_t gfp)
+static int
+mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76u_buf *buf, int nsgs, gfp_t gfp)
+{
+ if (dev->usb.sg_en) {
+ return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
+ SKB_WITH_OVERHEAD(q->buf_size));
+ } else {
+ buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+ return buf->buf ? 0 : -ENOMEM;
+ }
+}
+
+static int
+mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf)
{
- buf->urb = usb_alloc_urb(0, gfp);
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+
+ buf->len = SKB_WITH_OVERHEAD(q->buf_size);
+ buf->dev = dev;
+
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!buf->urb)
return -ENOMEM;
- buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
- gfp);
- if (!buf->urb->sg)
- return -ENOMEM;
+ if (dev->usb.sg_en) {
+ buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
+ sizeof(*buf->urb->sg),
+ GFP_KERNEL);
+ if (!buf->urb->sg)
+ return -ENOMEM;
- sg_init_table(buf->urb->sg, nsgs);
- buf->dev = dev;
+ sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE);
+ }
- return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
+ return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL);
}
-EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
-void mt76u_buf_free(struct mt76u_buf *buf)
+static void mt76u_buf_free(struct mt76u_buf *buf)
{
struct urb *urb = buf->urb;
int i;
for (i = 0; i < urb->num_sgs; i++)
skb_free_frag(sg_virt(&urb->sg[i]));
+
+ if (buf->buf)
+ skb_free_frag(buf->buf);
+
usb_free_urb(buf->urb);
}
-EXPORT_SYMBOL_GPL(mt76u_buf_free);
-int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
- struct mt76u_buf *buf, gfp_t gfp,
- usb_complete_t complete_fn, void *context)
+static void
+mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, usb_complete_t complete_fn,
+ void *context)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
+ u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
unsigned int pipe;
if (dir == USB_DIR_IN)
@@ -352,13 +390,21 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
else
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
- usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
+ usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
complete_fn, context);
+}
+
+static int
+mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn,
+ context);
trace_submit_urb(dev, buf->urb);
return usb_submit_urb(buf->urb, gfp);
}
-EXPORT_SYMBOL_GPL(mt76u_submit_buf);
static inline struct mt76u_buf
*mt76u_get_next_rx_entry(struct mt76_queue *q)
@@ -393,10 +439,11 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
}
static int
-mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- u8 *data = sg_virt(&urb->sg[0]);
+ struct urb *urb = buf->urb;
+ u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
int data_len, len, nsgs = 1;
struct sk_buff *skb;
@@ -407,21 +454,20 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
if (len < 0)
return 0;
+ data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
+ data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
+ if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
+ return 0;
+
skb = build_skb(data, q->buf_size);
if (!skb)
return 0;
- data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
skb_reserve(skb, MT_DMA_HDR_LEN);
- if (skb->tail + data_len > skb->end) {
- dev_kfree_skb(skb);
- return 1;
- }
-
__skb_put(skb, data_len);
len -= data_len;
- while (len > 0) {
+ while (len > 0 && nsgs < urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
sg_page(&urb->sg[nsgs]),
@@ -449,7 +495,8 @@ static void mt76u_complete_rx(struct urb *urb)
case -ENOENT:
return;
default:
- dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
+ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+ urb->status);
/* fall through */
case 0:
break;
@@ -470,8 +517,8 @@ static void mt76u_rx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int err, nsgs, buf_len = q->buf_size;
struct mt76u_buf *buf;
+ int err, count;
rcu_read_lock();
@@ -480,11 +527,10 @@ static void mt76u_rx_tasklet(unsigned long data)
if (!buf)
break;
- nsgs = mt76u_process_rx_entry(dev, buf->urb);
- if (nsgs > 0) {
- err = mt76u_fill_rx_sg(dev, buf, nsgs,
- buf_len,
- SKB_WITH_OVERHEAD(buf_len));
+ count = mt76u_process_rx_entry(dev, buf);
+ if (count > 0) {
+ err = mt76u_refill_rx(dev, q, buf, count,
+ GFP_ATOMIC);
if (err < 0)
break;
}
@@ -521,8 +567,13 @@ EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
static int mt76u_alloc_rx(struct mt76_dev *dev)
{
+ struct mt76_usb *usb = &dev->usb;
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i, err, nsgs;
+ int i, err;
+
+ usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
@@ -532,23 +583,13 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
if (!q->entry)
return -ENOMEM;
- if (mt76u_check_sg(dev)) {
- q->buf_size = MT_RX_BUF_SIZE;
- nsgs = MT_SG_MAX_SIZE;
- } else {
- q->buf_size = PAGE_SIZE;
- nsgs = 1;
- }
-
- for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
- err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
- nsgs, q->buf_size,
- SKB_WITH_OVERHEAD(q->buf_size),
- GFP_KERNEL);
+ q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
+ q->ndesc = MT_NUM_RX_ENTRIES;
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_buf_alloc(dev, &q->entry[i].ubuf);
if (err < 0)
return err;
}
- q->ndesc = MT_NUM_RX_ENTRIES;
return mt76u_submit_rx_buffers(dev);
}
@@ -585,6 +626,7 @@ static void mt76u_stop_rx(struct mt76_dev *dev)
static void mt76u_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue_entry entry;
struct mt76u_buf *buf;
struct mt76_queue *q;
bool wake;
@@ -599,17 +641,18 @@ static void mt76u_tx_tasklet(unsigned long data)
if (!buf->done || !q->queued)
break;
- dev->drv->tx_complete_skb(dev, q,
- &q->entry[q->head],
- false);
-
if (q->entry[q->head].schedule) {
q->entry[q->head].schedule = false;
q->swq_queued--;
}
+ entry = q->entry[q->head];
q->head = (q->head + 1) % q->ndesc;
q->queued--;
+
+ spin_unlock_bh(&q->lock);
+ dev->drv->tx_complete_skb(dev, q, &entry, false);
+ spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);
wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
@@ -667,21 +710,15 @@ static void mt76u_complete_tx(struct urb *urb)
}
static int
-mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
+mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
+ struct urb *urb)
{
- int nsgs = 1 + skb_shinfo(skb)->nr_frags;
- struct sk_buff *iter;
-
- skb_walk_frags(skb, iter)
- nsgs += 1 + skb_shinfo(iter)->nr_frags;
-
- memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
-
- nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
- sg_init_marker(urb->sg, nsgs);
- urb->num_sgs = nsgs;
+ if (!dev->usb.sg_en)
+ return 0;
- return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
+ sg_init_table(urb->sg, MT_SG_MAX_SIZE);
+ urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
+ return urb->num_sgs;
}
static int
@@ -689,12 +726,8 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
- u8 ep = q2ep(q->hw_idx);
struct mt76u_buf *buf;
u16 idx = q->tail;
- unsigned int pipe;
int err;
if (q->queued == q->ndesc)
@@ -706,15 +739,16 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return err;
buf = &q->entry[idx].ubuf;
+ buf->buf = skb->data;
+ buf->len = skb->len;
buf->done = false;
- err = mt76u_tx_build_sg(skb, buf->urb);
+ err = mt76u_tx_build_sg(dev, skb, buf->urb);
if (err < 0)
return err;
- pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
- usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
- mt76u_complete_tx, buf);
+ mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
+ buf, mt76u_complete_tx, buf);
q->tail = (q->tail + 1) % q->ndesc;
q->entry[idx].skb = skb;
@@ -749,10 +783,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
{
struct mt76u_buf *buf;
struct mt76_queue *q;
- size_t size;
int i, j;
- size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = &dev->q_tx[i];
spin_lock_init(&q->lock);
@@ -774,9 +806,15 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
if (!buf->urb)
return -ENOMEM;
- buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
- if (!buf->urb->sg)
- return -ENOMEM;
+ if (dev->usb.sg_en) {
+ size_t size = MT_SG_MAX_SIZE *
+ sizeof(struct scatterlist);
+
+ buf->urb->sg = devm_kzalloc(dev->dev, size,
+ GFP_KERNEL);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+ }
}
}
return 0;
@@ -838,16 +876,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
err = mt76u_alloc_rx(dev);
if (err < 0)
- goto err;
-
- err = mt76u_alloc_tx(dev);
- if (err < 0)
- goto err;
+ return err;
- return 0;
-err:
- mt76u_queues_deinit(dev);
- return err;
+ return mt76u_alloc_tx(dev);
}
EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
@@ -875,13 +906,14 @@ int mt76u_init(struct mt76_dev *dev,
INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
- init_completion(&usb->mcu.cmpl);
mutex_init(&usb->mcu.mutex);
mutex_init(&usb->usb_ctrl_mtx);
dev->bus = &mt76u_ops;
dev->queue_ops = &usb_queue_ops;
+ usb->sg_en = mt76u_check_sg(dev);
+
return mt76u_set_endpoints(intf, usb);
}
EXPORT_SYMBOL_GPL(mt76u_init);
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
deleted file mode 100644
index 036be4163e69..000000000000
--- a/drivers/net/wireless/mediatek/mt76/usb_mcu.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76.h"
-
-void mt76u_mcu_complete_urb(struct urb *urb)
-{
- struct completion *cmpl = urb->context;
-
- complete(cmpl);
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
-
-int mt76u_mcu_init_rx(struct mt76_dev *dev)
-{
- struct mt76_usb *usb = &dev->usb;
- int err;
-
- err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
- MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
- GFP_KERNEL);
- if (err < 0)
- return err;
-
- err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
- &usb->mcu.res, GFP_KERNEL,
- mt76u_mcu_complete_urb,
- &usb->mcu.cmpl);
- if (err < 0)
- mt76u_buf_free(&usb->mcu.res);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
-
-void mt76u_mcu_deinit(struct mt76_dev *dev)
-{
- struct mt76_usb *usb = &dev->usb;
-
- usb_kill_urb(usb->mcu.res.urb);
- mt76u_buf_free(&usb->mcu.res);
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_deinit);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 0c35b8db58cd..69270c1a9091 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -75,4 +75,46 @@ int mt76_wcid_alloc(unsigned long *mask, int size)
}
EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev)
+{
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ unsigned long mask = dev->wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->wcid[j]);
+ if (!wcid)
+ continue;
+
+ spin_lock(&dev->rx_lock);
+ if (wcid->inactive_count++ < 5)
+ cur_rssi = -ewma_signal_read(&wcid->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi);
+
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 7f3e3983b781..f7edeffb2b19 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -124,9 +124,9 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
u16 dma_len = get_unaligned_le16(data);
if (data_len < min_seg_len ||
- WARN_ON(!dma_len) ||
- WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
- WARN_ON(dma_len & 0x3))
+ WARN_ON_ONCE(!dma_len) ||
+ WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON_ONCE(dma_len & 0x3))
return 0;
return MT_DMA_HDRS + dma_len;
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
index 662d12703b69..57b503ae63f1 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -17,7 +17,7 @@
struct mt7601u_dev;
-#define MT7601U_EE_MAX_VER 0x0c
+#define MT7601U_EE_MAX_VER 0x0d
#define MT7601U_EEPROM_SIZE 256
#define MT7601U_DEFAULT_TX_POWER 6
diff --git a/drivers/net/wireless/quantenna/Makefile b/drivers/net/wireless/quantenna/Makefile
index baebfbde119e..cea83d178d2e 100644
--- a/drivers/net/wireless/quantenna/Makefile
+++ b/drivers/net/wireless/quantenna/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2015-2016 Quantenna Communications, Inc.
# All rights reserved.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 528ca7f5e070..14b569b6d1b5 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015 Quantenna Communications
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */
#ifndef QTNFMAC_BUS_H
#define QTNFMAC_BUS_H
@@ -135,7 +122,5 @@ static __always_inline void qtnf_bus_unlock(struct qtnf_bus *bus)
int qtnf_core_attach(struct qtnf_bus *bus);
void qtnf_core_detach(struct qtnf_bus *bus);
-void qtnf_txflowblock(struct device *dev, bool state);
-void qtnf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
#endif /* QTNFMAC_BUS_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 51b33ec78fac..dcb0991432f4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2012-2012 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/kernel.h>
#include <linux/etherdevice.h>
@@ -66,9 +53,11 @@ static const u32 qtnf_cipher_suites[] = {
static const struct ieee80211_txrx_stypes
qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
- .tx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
},
[NL80211_IFTYPE_AP] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4),
@@ -122,7 +111,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
struct vif_params *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
- u8 *mac_addr;
+ u8 *mac_addr = NULL;
+ int use4addr = 0;
int ret;
ret = qtnf_validate_iface_combinations(wiphy, vif, type);
@@ -132,14 +122,14 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
return ret;
}
- if (params)
+ if (params) {
mac_addr = params->macaddr;
- else
- mac_addr = NULL;
+ use4addr = params->use_4addr;
+ }
qtnf_scan_done(vif->mac, true);
- ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr);
+ ret = qtnf_cmd_send_change_intf_type(vif, type, use4addr, mac_addr);
if (ret) {
pr_err("VIF%u.%u: failed to change type to %d\n",
vif->mac->macid, vif->vifid, type);
@@ -190,6 +180,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
u8 *mac_addr = NULL;
+ int use4addr = 0;
int ret;
mac = wiphy_priv(wiphy);
@@ -225,10 +216,12 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-ENOTSUPP);
}
- if (params)
+ if (params) {
mac_addr = params->macaddr;
+ use4addr = params->use_4addr;
+ }
- ret = qtnf_cmd_send_add_intf(vif, type, mac_addr);
+ ret = qtnf_cmd_send_add_intf(vif, type, use4addr, mac_addr);
if (ret) {
pr_err("VIF%u.%u: failed to add VIF %pM\n",
mac->macid, vif->vifid, mac_addr);
@@ -359,11 +352,6 @@ static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed)
return -EFAULT;
}
- if (changed & (WIPHY_PARAM_RETRY_LONG | WIPHY_PARAM_RETRY_SHORT)) {
- pr_err("MAC%u: can't modify retry params\n", mac->macid);
- return -EOPNOTSUPP;
- }
-
ret = qtnf_cmd_send_update_phy_params(mac, changed);
if (ret)
pr_err("MAC%u: failed to update PHY params\n", mac->macid);
@@ -650,6 +638,12 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
+ if (sme->auth_type == NL80211_AUTHTYPE_SAE &&
+ !(sme->flags & CONNECT_REQ_EXTERNAL_AUTH_SUPPORT)) {
+ pr_err("can not offload authentication to userspace\n");
+ return -EOPNOTSUPP;
+ }
+
if (sme->bssid)
ether_addr_copy(vif->bssid, sme->bssid);
else
@@ -667,6 +661,30 @@ out:
}
static int
+qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *auth)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (!ether_addr_equal(vif->bssid, auth->bssid))
+ pr_warn("unexpected bssid: %pM", auth->bssid);
+
+ ret = qtnf_cmd_send_external_auth(vif, auth);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to report external auth\n",
+ vif->mac->macid, vif->vifid);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int
qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
@@ -960,6 +978,7 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.set_default_mgmt_key = qtnf_set_default_mgmt_key,
.scan = qtnf_scan,
.connect = qtnf_connect,
+ .external_auth = qtnf_external_auth,
.disconnect = qtnf_disconnect,
.dump_survey = qtnf_dump_survey,
.get_channel = qtnf_get_channel,
@@ -1107,7 +1126,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_4ADDR_STATION;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
@@ -1138,6 +1158,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_SAE)
+ wiphy->features |= NL80211_FEATURE_SAE;
+
#ifdef CONFIG_PM
if (macinfo->wowlan)
wiphy->wowlan = macinfo->wowlan;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index b73425122a10..c374857283ac 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_CFG80211_H_
#define _QTN_FMAC_CFG80211_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 659e7649fe22..85a2a58f4c16 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -1,17 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/types.h>
#include <linux/skbuff.h>
@@ -72,6 +60,8 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode)
return -EADDRINUSE;
case QLINK_CMD_RESULT_EADDRNOTAVAIL:
return -EADDRNOTAVAIL;
+ case QLINK_CMD_RESULT_EBUSY:
+ return -EBUSY;
default:
return -EFAULT;
}
@@ -97,14 +87,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
vif_id = cmd->vifid;
cmd->mhdr.len = cpu_to_le16(cmd_skb->len);
- pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
- le16_to_cpu(cmd->cmd_id));
+ pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id);
if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
- le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) {
+ cmd_id != QLINK_CMD_FW_INIT) {
pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
- mac_id, vif_id, le16_to_cpu(cmd->cmd_id),
- bus->fw_state);
+ mac_id, vif_id, cmd_id, bus->fw_state);
dev_kfree_skb(cmd_skb);
return -ENODEV;
}
@@ -138,7 +126,7 @@ out:
return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
- mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret);
+ mac_id, vif_id, cmd_id, ret);
return ret;
}
@@ -732,6 +720,7 @@ out:
static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
enum nl80211_iftype iftype,
+ int use4addr,
u8 *mac_addr,
enum qlink_cmd_type cmd_type)
{
@@ -749,6 +738,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
qtnf_bus_lock(vif->mac->bus);
cmd = (struct qlink_cmd_manage_intf *)cmd_skb->data;
+ cmd->intf_info.use4addr = use4addr;
switch (iftype) {
case NL80211_IFTYPE_AP:
@@ -784,17 +774,19 @@ out:
return ret;
}
-int qtnf_cmd_send_add_intf(struct qtnf_vif *vif,
- enum nl80211_iftype iftype, u8 *mac_addr)
+int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype,
+ int use4addr, u8 *mac_addr)
{
- return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr,
+ return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
QLINK_CMD_ADD_INTF);
}
int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
- enum nl80211_iftype iftype, u8 *mac_addr)
+ enum nl80211_iftype iftype,
+ int use4addr,
+ u8 *mac_addr)
{
- return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr,
+ return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
QLINK_CMD_CHANGE_INTF);
}
@@ -914,9 +906,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
return -E2BIG;
- hwinfo->rd = kzalloc(sizeof(*hwinfo->rd)
- + sizeof(struct ieee80211_reg_rule)
- * resp->n_reg_rules, GFP_KERNEL);
+ hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules,
+ resp->n_reg_rules), GFP_KERNEL);
if (!hwinfo->rd)
return -ENOMEM;
@@ -1558,11 +1549,11 @@ static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
switch (tlv_type) {
case QTN_TLV_ID_FRAG_THRESH:
phy_thr = (void *)tlv;
- mac_info->frag_thr = (u32)le16_to_cpu(phy_thr->thr);
+ mac_info->frag_thr = le32_to_cpu(phy_thr->thr);
break;
case QTN_TLV_ID_RTS_THRESH:
phy_thr = (void *)tlv;
- mac_info->rts_thr = (u32)le16_to_cpu(phy_thr->thr);
+ mac_info->rts_thr = le32_to_cpu(phy_thr->thr);
break;
case QTN_TLV_ID_SRETRY_LIMIT:
limit = (void *)tlv;
@@ -1810,15 +1801,23 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
qtnf_bus_lock(mac->bus);
if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
- qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_FRAG_THRESH,
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_FRAG_THRESH,
wiphy->frag_threshold);
if (changed & WIPHY_PARAM_RTS_THRESHOLD)
- qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_RTS_THRESH,
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_RTS_THRESH,
wiphy->rts_threshold);
if (changed & WIPHY_PARAM_COVERAGE_CLASS)
qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
wiphy->coverage_class);
+ if (changed & WIPHY_PARAM_RETRY_LONG)
+ qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
+ wiphy->retry_long);
+
+ if (changed & WIPHY_PARAM_RETRY_SHORT)
+ qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
+ wiphy->retry_short);
+
ret = qtnf_cmd_send(mac->bus, cmd_skb);
if (ret)
goto out;
@@ -2323,6 +2322,35 @@ out:
return ret;
}
+int qtnf_cmd_send_external_auth(struct qtnf_vif *vif,
+ struct cfg80211_external_auth_params *auth)
+{
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_external_auth *cmd;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_EXTERNAL_AUTH,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_external_auth *)cmd_skb->data;
+
+ ether_addr_copy(cmd->bssid, auth->bssid);
+ cmd->status = cpu_to_le16(auth->status);
+
+ qtnf_bus_lock(vif->mac->bus);
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
+ goto out;
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+
+ return ret;
+}
+
int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
{
struct sk_buff *cmd_skb;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 1ac41156c192..64f0b9dc8a14 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -1,17 +1,5 @@
-/*
- * Copyright (c) 2016 Quantenna Communications, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016 Quantenna Communications. All rights reserved. */
#ifndef QLINK_COMMANDS_H_
#define QLINK_COMMANDS_H_
@@ -26,9 +14,11 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus);
int qtnf_cmd_get_hw_info(struct qtnf_bus *bus);
int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac);
int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype,
- u8 *mac_addr);
+ int use4addr, u8 *mac_addr);
int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
- enum nl80211_iftype iftype, u8 *mac_addr);
+ enum nl80211_iftype iftype,
+ int use4addr,
+ u8 *mac_addr);
int qtnf_cmd_send_del_intf(struct qtnf_vif *vif);
int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
struct ieee80211_supported_band *band);
@@ -61,6 +51,8 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
int qtnf_cmd_send_scan(struct qtnf_wmac *mac);
int qtnf_cmd_send_connect(struct qtnf_vif *vif,
struct cfg80211_connect_params *sme);
+int qtnf_cmd_send_external_auth(struct qtnf_vif *vif,
+ struct cfg80211_external_auth_params *auth);
int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
u16 reason_code);
int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 5d18a4a917c9..ee1b75fda1dd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -195,6 +182,7 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
qtnf_scan_done(vif->mac, true);
ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype,
+ vif->wdev.use_4addr,
sa->sa_data);
if (ret)
@@ -545,7 +533,8 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
- ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr);
+ ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype,
+ vif->wdev.use_4addr, vif->mac_addr);
if (ret) {
pr_err("MAC%u: failed to add VIF\n", macid);
goto error;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 293055049caa..a31cff46e964 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_CORE_H_
#define _QTN_FMAC_CORE_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.c b/drivers/net/wireless/quantenna/qtnfmac/debug.c
index 9f826b9ef5d9..598ece753a4b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/debug.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.c
@@ -1,32 +1,11 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include "debug.h"
-#undef pr_fmt
-#define pr_fmt(fmt) "qtnfmac dbg: %s: " fmt, __func__
-
void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name)
{
bus->dbg_dir = debugfs_create_dir(name, NULL);
-
- if (IS_ERR_OR_NULL(bus->dbg_dir)) {
- pr_warn("failed to create debugfs root dir\n");
- bus->dbg_dir = NULL;
- }
}
void qtnf_debugfs_remove(struct qtnf_bus *bus)
@@ -38,9 +17,5 @@ void qtnf_debugfs_remove(struct qtnf_bus *bus)
void qtnf_debugfs_add_entry(struct qtnf_bus *bus, const char *name,
int (*fn)(struct seq_file *seq, void *data))
{
- struct dentry *entry;
-
- entry = debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn);
- if (IS_ERR_OR_NULL(entry))
- pr_warn("failed to add entry (%s)\n", name);
+ debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.h b/drivers/net/wireless/quantenna/qtnfmac/debug.h
index d6dd12b5d434..61b45536b83a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/debug.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_DEBUG_H_
#define _QTN_FMAC_DEBUG_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 8b542b431b75..6c1b886339ac 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -158,6 +145,19 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
const struct qlink_event_bss_join *join_info,
u16 len)
{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ enum ieee80211_statuscode status = le16_to_cpu(join_info->status);
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_bss *bss = NULL;
+ u8 *ie = NULL;
+ size_t payload_len;
+ u16 tlv_type;
+ u16 tlv_value_len;
+ size_t tlv_full_len;
+ const struct qlink_tlv_hdr *tlv;
+ const u8 *rsp_ies = NULL;
+ size_t rsp_ies_len = 0;
+
if (unlikely(len < sizeof(*join_info))) {
pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
vif->mac->macid, vif->vifid, len,
@@ -171,15 +171,131 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
return -EPROTO;
}
- pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid,
- join_info->bssid);
+ pr_debug("VIF%u.%u: BSSID:%pM status:%u\n",
+ vif->mac->macid, vif->vifid, join_info->bssid, status);
+
+ if (status != WLAN_STATUS_SUCCESS)
+ goto done;
+
+ qlink_chandef_q2cfg(wiphy, &join_info->chan, &chandef);
+ if (!cfg80211_chandef_valid(&chandef)) {
+ pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+ vif->mac->macid, vif->vifid,
+ chandef.chan->center_freq,
+ chandef.center_freq1,
+ chandef.center_freq2,
+ chandef.width);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+
+ bss = cfg80211_get_bss(wiphy, chandef.chan, join_info->bssid,
+ NULL, 0, IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ pr_warn("VIF%u.%u: add missing BSS:%pM chan:%u\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid, chandef.chan->hw_value);
+
+ if (!vif->wdev.ssid_len) {
+ pr_warn("VIF%u.%u: SSID unknown for BSS:%pM\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+
+ ie = kzalloc(2 + vif->wdev.ssid_len, GFP_KERNEL);
+ if (!ie) {
+ pr_warn("VIF%u.%u: IE alloc failed for BSS:%pM\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+
+ ie[0] = WLAN_EID_SSID;
+ ie[1] = vif->wdev.ssid_len;
+ memcpy(ie + 2, vif->wdev.ssid, vif->wdev.ssid_len);
+
+ bss = cfg80211_inform_bss(wiphy, chandef.chan,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ join_info->bssid, 0,
+ WLAN_CAPABILITY_ESS, 100,
+ ie, 2 + vif->wdev.ssid_len,
+ 0, GFP_KERNEL);
+ if (!bss) {
+ pr_warn("VIF%u.%u: can't connect to unknown BSS: %pM\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+ }
+
+ payload_len = len - sizeof(*join_info);
+ tlv = (struct qlink_tlv_hdr *)join_info->ies;
+
+ while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+ tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
+
+ if (payload_len < tlv_full_len) {
+ pr_warn("invalid %u TLV\n", tlv_type);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+
+ if (tlv_type == QTN_TLV_ID_IE_SET) {
+ const struct qlink_tlv_ie_set *ie_set;
+ unsigned int ie_len;
+
+ if (payload_len < sizeof(*ie_set)) {
+ pr_warn("invalid IE_SET TLV\n");
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+
+ ie_set = (const struct qlink_tlv_ie_set *)tlv;
+ ie_len = tlv_value_len -
+ (sizeof(*ie_set) - sizeof(ie_set->hdr));
+
+ switch (ie_set->type) {
+ case QLINK_IE_SET_ASSOC_RESP:
+ if (ie_len) {
+ rsp_ies = ie_set->ie_data;
+ rsp_ies_len = ie_len;
+ }
+ break;
+ default:
+ pr_warn("unexpected IE type: %u\n",
+ ie_set->type);
+ break;
+ }
+ }
- cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL,
- 0, le16_to_cpu(join_info->status), GFP_KERNEL);
+ payload_len -= tlv_full_len;
+ tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ }
- if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS)
+ if (payload_len)
+ pr_warn("VIF%u.%u: unexpected remaining payload: %zu\n",
+ vif->mac->macid, vif->vifid, payload_len);
+
+done:
+ cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, rsp_ies,
+ rsp_ies_len, status, GFP_KERNEL);
+ if (bss) {
+ if (!ether_addr_equal(vif->bssid, join_info->bssid))
+ ether_addr_copy(vif->bssid, join_info->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ }
+
+ if (status == WLAN_STATUS_SUCCESS)
netif_carrier_on(vif->netdev);
+ kfree(ie);
return 0;
}
@@ -458,6 +574,43 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
return 0;
}
+static int
+qtnf_event_handle_external_auth(struct qtnf_vif *vif,
+ const struct qlink_event_external_auth *ev,
+ u16 len)
+{
+ struct cfg80211_external_auth_params auth = {0};
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ int ret;
+
+ if (len < sizeof(*ev)) {
+ pr_err("MAC%u: payload is too short\n", vif->mac->macid);
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ if (ev->ssid_len) {
+ memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
+ auth.ssid.ssid_len = ev->ssid_len;
+ }
+
+ auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
+ ether_addr_copy(auth.bssid, ev->bssid);
+ auth.action = ev->action;
+
+ pr_info("%s: external auth bss=%pM action=%u akm=%u\n",
+ vif->netdev->name, auth.bssid, auth.action,
+ auth.key_mgmt_suite);
+
+ ret = cfg80211_external_auth_request(vif->netdev, &auth, GFP_KERNEL);
+ if (ret)
+ pr_warn("failed to offload external auth request\n");
+
+ return ret;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -516,6 +669,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_radar(vif, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_EXTERNAL_AUTH:
+ ret = qtnf_event_handle_external_auth(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.h b/drivers/net/wireless/quantenna/qtnfmac/event.h
index ae759b602c2a..533ad99d045d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_EVENT_H_
#define _QTN_FMAC_EVENT_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 598edb814421..cbcda57105f3 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -559,6 +559,9 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
return IRQ_NONE;
+ if (!priv->msi_enabled)
+ qtnf_deassert_intx(ts);
+
priv->pcie_irq_count++;
qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
@@ -571,9 +574,6 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
tasklet_hi_schedule(&priv->reclaim_tq);
- if (!priv->msi_enabled)
- qtnf_deassert_intx(ts);
-
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 8d62addea895..7798edcf7980 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -1,25 +1,12 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_QLINK_H_
#define _QTN_QLINK_H_
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 11
+#define QLINK_PROTO_VER 13
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -81,6 +68,7 @@ enum qlink_hw_capab {
QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
+ QLINK_HW_CAPAB_SAE = BIT(8),
};
enum qlink_iface_type {
@@ -105,7 +93,8 @@ struct qlink_intf_info {
__le16 if_type;
__le16 vlanid;
u8 mac_addr[ETH_ALEN];
- u8 rsvd[2];
+ u8 use4addr;
+ u8 rsvd[1];
} __packed;
enum qlink_sta_flags {
@@ -262,6 +251,7 @@ enum qlink_cmd_type {
QLINK_CMD_DISCONNECT = 0x0061,
QLINK_CMD_PM_SET = 0x0062,
QLINK_CMD_WOWLAN_SET = 0x0063,
+ QLINK_CMD_EXTERNAL_AUTH = 0x0066,
};
/**
@@ -493,6 +483,20 @@ struct qlink_cmd_connect {
} __packed;
/**
+ * struct qlink_cmd_external_auth - data for QLINK_CMD_EXTERNAL_AUTH command
+ *
+ * @bssid: BSSID of the BSS to connect to
+ * @status: authentication status code
+ * @payload: variable portion of connection request.
+ */
+struct qlink_cmd_external_auth {
+ struct qlink_cmd chdr;
+ u8 bssid[ETH_ALEN];
+ __le16 status;
+ u8 payload[0];
+} __packed;
+
+/**
* struct qlink_cmd_disconnect - data for QLINK_CMD_DISCONNECT command
*
* @reason: code of the reason of disconnect, see &enum ieee80211_reasoncode.
@@ -733,6 +737,7 @@ enum qlink_cmd_result {
QLINK_CMD_RESULT_EALREADY,
QLINK_CMD_RESULT_EADDRINUSE,
QLINK_CMD_RESULT_EADDRNOTAVAIL,
+ QLINK_CMD_RESULT_EBUSY,
};
/**
@@ -936,6 +941,7 @@ enum qlink_event_type {
QLINK_EVENT_BSS_LEAVE = 0x0027,
QLINK_EVENT_FREQ_CHANGE = 0x0028,
QLINK_EVENT_RADAR = 0x0029,
+ QLINK_EVENT_EXTERNAL_AUTH = 0x0030,
};
/**
@@ -986,13 +992,16 @@ struct qlink_event_sta_deauth {
/**
* struct qlink_event_bss_join - data for QLINK_EVENT_BSS_JOIN event
*
+ * @chan: new operating channel definition
* @bssid: BSSID of a BSS which interface tried to joined.
* @status: status of joining attempt, see &enum ieee80211_statuscode.
*/
struct qlink_event_bss_join {
struct qlink_event ehdr;
+ struct qlink_chandef chan;
u8 bssid[ETH_ALEN];
__le16 status;
+ u8 ies[0];
} __packed;
/**
@@ -1108,6 +1117,24 @@ struct qlink_event_radar {
u8 rsvd[3];
} __packed;
+/**
+ * struct qlink_event_external_auth - data for QLINK_EVENT_EXTERNAL_AUTH event
+ *
+ * @ssid: SSID announced by BSS
+ * @ssid_len: SSID length
+ * @bssid: BSSID of the BSS to connect to
+ * @akm_suite: AKM suite for external authentication
+ * @action: action type/trigger for external authentication
+ */
+struct qlink_event_external_auth {
+ struct qlink_event ehdr;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 bssid[ETH_ALEN];
+ __le32 akm_suite;
+ u8 action;
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
@@ -1182,7 +1209,7 @@ struct qlink_iface_limit_record {
struct qlink_tlv_frag_rts_thr {
struct qlink_tlv_hdr hdr;
- __le16 thr;
+ __le32 thr;
} __packed;
struct qlink_tlv_rlimit {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index aeeda81b09ea..72bfd17cb687 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -1,17 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/nl80211.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 960d5d97492f..781ea7fe79f2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_QLINK_UTIL_H_
#define _QTN_FMAC_QLINK_UTIL_H_
@@ -69,6 +56,17 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
memcpy(hdr->val, &tmp, sizeof(tmp));
}
+static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb,
+ u16 tlv_id, u32 value)
+{
+ struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
+ __le32 tmp = cpu_to_le32(value);
+
+ hdr->type = cpu_to_le16(tlv_id);
+ hdr->len = cpu_to_le16(sizeof(value));
+ memcpy(hdr->val, &tmp, sizeof(tmp));
+}
+
u16 qlink_iface_type_to_nl_mask(u16 qlink_type);
u8 qlink_chan_width_mask_to_nl(u16 qlink_mask);
void qlink_chandef_q2cfg(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
index 40295a511224..82d879950b62 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_HW_IDS_H_
#define _QTN_HW_IDS_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
index 2ec334199c2b..ff678951d3b2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/types.h>
#include <linux/io.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
index c2a3702a9ee7..52cac5439b03 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_SHM_IPC_H_
#define _QTN_FMAC_SHM_IPC_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
index 95a5f89a8b1a..78be70df1218 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_SHM_IPC_DEFS_H_
#define _QTN_FMAC_SHM_IPC_DEFS_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.c b/drivers/net/wireless/quantenna/qtnfmac/trans.c
index 345f34ec9750..95356e280e23 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/trans.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/trans.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/types.h>
#include <linux/export.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.h b/drivers/net/wireless/quantenna/qtnfmac/trans.h
index 9a473e07af0f..c0b76f871b31 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/trans.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/trans.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_TRANS_H_
#define _QTN_FMAC_TRANS_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index 3bc96b264769..cda6f5f3f38a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include "util.h"
#include "qtn_hw_ids.h"
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
index b8744baac332..a14b7078a9c7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015 Quantenna Communications
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */
#ifndef QTNFMAC_UTIL_H
#define QTNFMAC_UTIL_H
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 0e95555aec62..a03b5284a050 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -2966,6 +2966,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
struct channel_info *info)
{
u8 rfcsr;
+ int idx = rf->channel-1;
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
@@ -3003,60 +3004,56 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_freq_cal_mode1(rt2x00dev);
- if (rf->channel <= 14) {
- int idx = rf->channel-1;
-
- if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
- /* r55/r59 value array of channel 1~14 */
- static const char r55_bt_rev[] = {0x83, 0x83,
- 0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
- 0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
- static const char r59_bt_rev[] = {0x0e, 0x0e,
- 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
-
- rt2800_rfcsr_write(rt2x00dev, 55,
- r55_bt_rev[idx]);
- rt2800_rfcsr_write(rt2x00dev, 59,
- r59_bt_rev[idx]);
- } else {
- static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
- 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
- 0x88, 0x88, 0x86, 0x85, 0x84};
-
- rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
- }
+ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ /* r55/r59 value array of channel 1~14 */
+ static const char r55_bt_rev[] = {0x83, 0x83,
+ 0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
+ 0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
+ static const char r59_bt_rev[] = {0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
+
+ rt2800_rfcsr_write(rt2x00dev, 55,
+ r55_bt_rev[idx]);
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_bt_rev[idx]);
} else {
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
- static const char r55_nonbt_rev[] = {0x23, 0x23,
- 0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
- static const char r59_nonbt_rev[] = {0x07, 0x07,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
-
- rt2800_rfcsr_write(rt2x00dev, 55,
- r55_nonbt_rev[idx]);
- rt2800_rfcsr_write(rt2x00dev, 59,
- r59_nonbt_rev[idx]);
- } else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT6352)) {
- static const char r59_non_bt[] = {0x8f, 0x8f,
- 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
- 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
-
- rt2800_rfcsr_write(rt2x00dev, 59,
- r59_non_bt[idx]);
- } else if (rt2x00_rt(rt2x00dev, RT5350)) {
- static const char r59_non_bt[] = {0x0b, 0x0b,
- 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a,
- 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06};
-
- rt2800_rfcsr_write(rt2x00dev, 59,
- r59_non_bt[idx]);
- }
+ static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
+ 0x88, 0x88, 0x86, 0x85, 0x84};
+
+ rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
+ }
+ } else {
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ static const char r55_nonbt_rev[] = {0x23, 0x23,
+ 0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
+ static const char r59_nonbt_rev[] = {0x07, 0x07,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
+
+ rt2800_rfcsr_write(rt2x00dev, 55,
+ r55_nonbt_rev[idx]);
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_nonbt_rev[idx]);
+ } else if (rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
+ static const char r59_non_bt[] = {0x8f, 0x8f,
+ 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
+ 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
+
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_non_bt[idx]);
+ } else if (rt2x00_rt(rt2x00dev, RT5350)) {
+ static const char r59_non_bt[] = {0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a,
+ 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06};
+
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_non_bt[idx]);
}
}
}
@@ -3861,10 +3858,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0);
- if (rt2x00_rt(rt2x00dev, RT6352))
+ if (rt2x00_rt(rt2x00dev, RT6352)) {
tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
- else
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1);
+ } else {
tx_pin = 0;
+ }
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
@@ -3896,24 +3895,29 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
/* Turn on tertiary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
+ rf->channel > 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
+ rf->channel <= 14);
/* fall-through */
case 2:
/* Turn on secondary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
+ rf->channel > 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
+ rf->channel <= 14);
/* fall-through */
case 1:
/* Turn on primary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
+ rf->channel > 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
+ rf->channel <= 14);
break;
}
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1); /* mt7620 */
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
@@ -3985,13 +3989,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 195, 141);
rt2800_bbp_write(rt2x00dev, 196, reg);
- /* AGC init */
- if (rt2x00_rt(rt2x00dev, RT6352))
- reg = 0x04;
- else
- reg = rf->channel <= 14 ? 0x1c : 0x24;
-
- reg += 2 * rt2x00dev->lna_gain;
+ /* AGC init.
+ * Despite the vendor driver using different values here for
+ * RT6352 chip, we use 0x1c for now. This may have to be changed
+ * once TSSI got implemented.
+ */
+ reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
rt2800_iq_calibrate(rt2x00dev, rf->channel);
@@ -5477,7 +5480,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
- rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x06060606);
+ rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 61ba573e8bf1..05a2e8da412c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -656,36 +656,24 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf->driver_folder =
debugfs_create_dir(intf->rt2x00dev->ops->name,
rt2x00dev->hw->wiphy->debugfsdir);
- if (IS_ERR(intf->driver_folder) || !intf->driver_folder)
- goto exit;
intf->driver_entry =
rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob);
- if (IS_ERR(intf->driver_entry) || !intf->driver_entry)
- goto exit;
intf->chipset_entry =
rt2x00debug_create_file_chipset("chipset",
intf, &intf->chipset_blob);
- if (IS_ERR(intf->chipset_entry) || !intf->chipset_entry)
- goto exit;
intf->dev_flags = debugfs_create_file("dev_flags", 0400,
intf->driver_folder, intf,
&rt2x00debug_fop_dev_flags);
- if (IS_ERR(intf->dev_flags) || !intf->dev_flags)
- goto exit;
intf->cap_flags = debugfs_create_file("cap_flags", 0400,
intf->driver_folder, intf,
&rt2x00debug_fop_cap_flags);
- if (IS_ERR(intf->cap_flags) || !intf->cap_flags)
- goto exit;
intf->register_folder =
debugfs_create_dir("register", intf->driver_folder);
- if (IS_ERR(intf->register_folder) || !intf->register_folder)
- goto exit;
#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \
({ \
@@ -695,9 +683,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
0600, \
(__intf)->register_folder, \
&(__intf)->offset_##__name); \
- if (IS_ERR((__intf)->__name##_off_entry) || \
- !(__intf)->__name##_off_entry) \
- goto exit; \
\
(__intf)->__name##_val_entry = \
debugfs_create_file(__stringify(__name) "_value", \
@@ -705,9 +690,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
(__intf)->register_folder, \
(__intf), \
&rt2x00debug_fop_##__name); \
- if (IS_ERR((__intf)->__name##_val_entry) || \
- !(__intf)->__name##_val_entry) \
- goto exit; \
} \
})
@@ -721,15 +703,10 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf->queue_folder =
debugfs_create_dir("queue", intf->driver_folder);
- if (IS_ERR(intf->queue_folder) || !intf->queue_folder)
- goto exit;
intf->queue_frame_dump_entry =
debugfs_create_file("dump", 0400, intf->queue_folder,
intf, &rt2x00debug_fop_queue_dump);
- if (IS_ERR(intf->queue_frame_dump_entry)
- || !intf->queue_frame_dump_entry)
- goto exit;
skb_queue_head_init(&intf->frame_dump_skbqueue);
init_waitqueue_head(&intf->frame_dump_waitqueue);
@@ -747,10 +724,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
#endif
return;
-
-exit:
- rt2x00debug_deregister(rt2x00dev);
- rt2x00_err(rt2x00dev, "Failed to register debug handler\n");
}
void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
index 528cb0401df1..4956a54151cb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
@@ -119,9 +119,9 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
/*
* Allocate DMA memory for descriptor and buffer.
*/
- addr = dma_zalloc_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size, &dma,
- GFP_KERNEL);
+ addr = dma_alloc_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size, &dma,
+ GFP_KERNEL);
if (!addr)
return -ENOMEM;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 4c5de8fc8f12..52b9fc480f8b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -321,97 +321,12 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
- struct hw_key_entry key_entry;
- struct rt2x00_field32 field;
- u32 mask;
- u32 reg;
-
- if (crypto->cmd == SET_KEY) {
- /*
- * rt2x00lib can't determine the correct free
- * key_idx for shared keys. We have 1 register
- * with key valid bits. The goal is simple, read
- * the register, if that is full we have no slots
- * left.
- * Note that each BSS is allowed to have up to 4
- * shared keys, so put a mask over the allowed
- * entries.
- */
- mask = (0xf << crypto->bssidx);
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0);
- reg &= mask;
-
- if (reg && reg == mask)
- return -ENOSPC;
-
- key->hw_key_idx += reg ? ffz(reg) : 0;
-
- /*
- * Upload key to hardware
- */
- memcpy(key_entry.key, crypto->key,
- sizeof(key_entry.key));
- memcpy(key_entry.tx_mic, crypto->tx_mic,
- sizeof(key_entry.tx_mic));
- memcpy(key_entry.rx_mic, crypto->rx_mic,
- sizeof(key_entry.rx_mic));
-
- reg = SHARED_KEY_ENTRY(key->hw_key_idx);
- rt2x00mmio_register_multiwrite(rt2x00dev, reg,
- &key_entry, sizeof(key_entry));
-
- /*
- * The cipher types are stored over 2 registers.
- * bssidx 0 and 1 keys are stored in SEC_CSR1 and
- * bssidx 1 and 2 keys are stored in SEC_CSR5.
- * Using the correct defines correctly will cause overhead,
- * so just calculate the correct offset.
- */
- if (key->hw_key_idx < 8) {
- field.bit_offset = (3 * key->hw_key_idx);
- field.bit_mask = 0x7 << field.bit_offset;
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR1);
- rt2x00_set_field32(&reg, field, crypto->cipher);
- rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, reg);
- } else {
- field.bit_offset = (3 * (key->hw_key_idx - 8));
- field.bit_mask = 0x7 << field.bit_offset;
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR5);
- rt2x00_set_field32(&reg, field, crypto->cipher);
- rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, reg);
- }
-
- /*
- * The driver does not support the IV/EIV generation
- * in hardware. However it doesn't support the IV/EIV
- * inside the ieee80211 frame either, but requires it
- * to be provided separately for the descriptor.
- * rt2x00lib will cut the IV/EIV data out of all frames
- * given to us by mac80211, but we must tell mac80211
- * to generate the IV/EIV data.
- */
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- }
-
/*
- * SEC_CSR0 contains only single-bit fields to indicate
- * a particular key is valid. Because using the FIELD32()
- * defines directly will cause a lot of overhead, we use
- * a calculation to determine the correct bit directly.
+ * Let the software handle the shared keys,
+ * since the hardware decryption does not work reliably,
+ * because the firmware does not know the key's keyidx.
*/
- mask = 1 << key->hw_key_idx;
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0);
- if (crypto->cmd == SET_KEY)
- reg |= mask;
- else if (crypto->cmd == DISABLE_KEY)
- reg &= ~mask;
- rt2x00mmio_register_write(rt2x00dev, SEC_CSR0, reg);
-
- return 0;
+ return -EOPNOTSUPP;
}
static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 33ad87528d9a..44a943d18b84 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -959,7 +959,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
if (proto == htons(ETH_P_AARP) || proto == htons(ETH_P_IPX)) {
/* This is the selective translation table, only 2 entries */
writeb(0xf8,
- &((struct snaphdr_t __iomem *)ptx->var)->org[3]);
+ &((struct snaphdr_t __iomem *)ptx->var)->org[2]);
}
/* Copy body of ethernet packet without ethernet header */
memcpy_toio((void __iomem *)&ptx->var +
@@ -2211,7 +2211,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
untranslate(local, skb, total_len);
}
} else { /* sniffer mode, so just pass whole packet */
- };
+ }
/************************/
/* Now pick up the rest of the fragments if any */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 2966681efaef..5d6b06d3c02c 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
@@ -2,4 +2,4 @@ rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
obj-$(CONFIG_RTL8180) += rtl818x_pci.o
-ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
+ccflags-y += -I $(srctree)/$(src)/..
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 225c1c8851cc..d5f65372356b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -803,7 +803,7 @@ static void rtl8180_config_cardbus(struct ieee80211_hw *dev)
rtl818x_iowrite16(priv, FEMR_SE, 0xffff);
} else {
reg16 = rtl818x_ioread16(priv, &priv->map->FEMR);
- reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
+ reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
rtl818x_iowrite16(priv, &priv->map->FEMR, reg16);
}
@@ -1723,8 +1723,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
{
struct ieee80211_hw *dev;
struct rtl8180_priv *priv;
- unsigned long mem_addr, mem_len;
- unsigned int io_addr, io_len;
+ unsigned long mem_len;
+ unsigned int io_len;
int err;
const char *chip_name, *rf_name = NULL;
u32 reg;
@@ -1743,9 +1743,7 @@ static int rtl8180_probe(struct pci_dev *pdev,
goto err_disable_dev;
}
- io_addr = pci_resource_start(pdev, 0);
io_len = pci_resource_len(pdev, 0);
- mem_addr = pci_resource_start(pdev, 1);
mem_len = pci_resource_len(pdev, 1);
if (mem_len < sizeof(struct rtl818x_csr) ||
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index ff074912a095..95bac73ece7c 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
@@ -2,4 +2,4 @@ rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o
obj-$(CONFIG_RTL8187) += rtl8187.o
-ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
+ccflags-y += -I $(srctree)/$(src)/..
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ef9b502ce576..217d2a7a43c7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "rc.h"
@@ -452,6 +430,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
} else {
u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+
get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
}
@@ -481,7 +460,6 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
(void *)rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
(void *)rtl_c2hcmd_wq_callback);
-
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -640,6 +618,7 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
u8 rate_flag = info->control.rates[0].flags;
u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0;
u8 sgi_80 = 0, bw_80 = 0;
+
tcb_desc->use_shortgi = false;
if (sta == NULL)
@@ -1872,6 +1851,7 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
return 0;
}
+
int rtl_tx_agg_oper(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid)
{
@@ -2095,7 +2075,6 @@ void rtl_watchdog_wq_callback(void *data)
* busytraffic we don't change channel
*/
if (mac->link_state >= MAC80211_LINKED) {
-
/* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
for (idx = 0; idx <= 2; idx++) {
rtlpriv->link_info.num_rx_in4period[idx] =
@@ -2172,8 +2151,6 @@ label_lps_done:
;
}
- rtlpriv->link_info.num_rx_inperiod = 0;
- rtlpriv->link_info.num_tx_inperiod = 0;
for (tid = 0; tid <= 7; tid++)
rtlpriv->link_info.tidtx_inperiod[tid] = 0;
@@ -2236,6 +2213,8 @@ label_lps_done:
rtlpriv->btcoexist.btc_info.in_4way = false;
}
+ rtlpriv->link_info.num_rx_inperiod = 0;
+ rtlpriv->link_info.num_tx_inperiod = 0;
rtlpriv->link_info.bcn_rx_inperiod = 0;
/* <6> scan list */
@@ -2255,6 +2234,7 @@ void rtl_watch_dog_timer_callback(struct timer_list *t)
mod_timer(&rtlpriv->works.watchdog_timer,
jiffies + MSECS(RTL_WATCH_DOG_TIME));
}
+
void rtl_fwevt_wq_callback(void *data)
{
struct rtl_works *rtlworks =
@@ -2311,11 +2291,10 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops;
const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
- u8 cmd_id, cmd_seq, cmd_len;
+ u8 cmd_id, cmd_len;
u8 *cmd_buf = NULL;
cmd_id = GET_C2H_CMD_ID(skb->data);
- cmd_seq = GET_C2H_SEQ(skb->data);
cmd_len = skb->len - C2H_DATA_OFFSET;
cmd_buf = GET_C2H_DATA_PTR(skb->data);
@@ -2407,6 +2386,7 @@ void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t)
rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
}
+
/*********************************************************
*
* frame process functions
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index a7ae40eaa3cd..99565ad09cdc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_BASE_H__
#define __RTL_BASE_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
index 02dff4c3f664..30a25480752d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2007-2011 Realtek Corporation.*/
#ifndef __HALBT_PRECOMP_H__
#define __HALBT_PRECOMP_H__
@@ -49,8 +28,6 @@
#include "halbtc8821a2ant.h"
#include "halbtc8821a1ant.h"
-#define GetDefaultAdapter(padapter) padapter
-
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index f22fec093f1d..3ebc7c93b1e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -1,36 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
-/**************************************************************
- * Description:
- *
- * This file is for RTL8192E Co-exist mechanism
- *
- * History
- * 2012/11/15 Cosa first check in.
- *
- **************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2012 Realtek Corporation.*/
/**************************************************************
* include files
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
index b8c95c7afc06..41ac0d5dcc9c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2012 Realtek Corporation.*/
+
/*****************************************************************
* The following is for 8192E 2Ant BT Co-exist definition
*****************************************************************/
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 59553db020ef..5f5739904edf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2012 Realtek Corporation.*/
/***************************************************************
* Description:
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
index 934f27893c16..9d41e11388ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2012 Realtek Corporation.*/
+
/**********************************************************************
* The following is for 8723B 1ANT BT Co-exist definition
**********************************************************************/
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 6597f7cb3411..9f7b9af5bdcd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2012 Realtek Corporation.*/
+
/***************************************************************
* Description:
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
index aa24da402fb9..08aad6ef4046 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2012 Realtek Corporation.*/
+
#ifndef _HAL8723B_2_ANT
#define _HAL8723B_2_ANT
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index b5d65872db84..fa5b73f81c57 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2012 Realtek Corporation.*/
/**************************************************************
* Description:
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
index a498ff5b1b9c..a63fb79a971a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2012 Realtek Corporation.*/
/*===========================================
* The following is for 8821A 1ANT BT Co-exist definition
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 01a9d303603b..e9e211fda264 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2012 Realtek Corporation.*/
/************************************************************
* Description:
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
index ce3e58c4e660..3df0ee8bd37b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2012 Realtek Corporation.*/
/*===========================================
* The following is for 8821A 2Ant BT Co-exist definition
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c
index 951b8c1e0153..145d6f9c017a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c
@@ -1,17 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2016-2017 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-2017 Realtek Corporation.*/
+
#include "halbt_precomp.h"
void ex_hal8822b_wifi_only_hw_config(struct wifi_only_cfg *wifionlycfg)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h
index 6ec356542eea..5fc66cea74b2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h
@@ -1,17 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2016-2017 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2016-2017 Realtek Corporation.*/
+
#ifndef __INC_HAL8822BWIFIONLYHWCFG_H
#define __INC_HAL8822BWIFIONLYHWCFG_H
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index d748aab66aa2..2ac0481b29ef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2007-2013 Realtek Corporation.*/
#include "halbt_precomp.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 9eae87d19120..ee9aeddf1ebc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __HALBTC_OUT_SRC_H__
#define __HALBTC_OUT_SRC_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index cce4a37ea408..0e509c33e9e6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
+
#include "../wifi.h"
#include <linux/vmalloc.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
index 8c996055de71..bf2cf8505aca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL_BTC_H__
#define __RTL_BTC_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index f7a7dcbf945e..bf0e0bb1f99b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#include "wifi.h"
#include "cam.h"
#include <linux/export.h>
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h
index e2e647d511c1..2461fa9afda0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.h
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_CAM_H_
#define __RTL_CAM_H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 4bf7967590ca..f73e690bbe8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "core.h"
@@ -210,6 +188,7 @@ static void rtl_op_tx(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_tcb_desc tcb_desc;
+
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
@@ -368,12 +347,14 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtlpriv->locks.conf_mutex);
}
+
static int rtl_op_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype new_type, bool p2p)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int ret;
+
rtl_op_remove_interface(hw, vif);
vif->type = new_type;
@@ -903,6 +884,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *)(&mac->rx_conf));
}
+
static int rtl_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -955,6 +937,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_sta_info *sta_entry;
+
if (sta) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
"Remove sta addr is %pM\n", sta->addr);
@@ -967,6 +950,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
}
return 0;
}
+
static int _rtl_get_hal_qnum(u16 queue)
{
int qnum;
@@ -1088,6 +1072,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
/*TODO: reference to enum ieee80211_bss_change */
if (changed & BSS_CHANGED_ASSOC) {
u8 mstatus;
+
if (bss_conf->assoc) {
struct ieee80211_sta *sta = NULL;
u8 keep_alive = 10;
@@ -1316,6 +1301,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
* set in sta_add, and will be NULL here */
if (vif->type == NL80211_IFTYPE_STATION) {
struct rtl_sta_info *sta_entry;
+
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
sta_entry->wireless_mode = mac->mode;
}
@@ -1957,5 +1943,7 @@ void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igvalue)
dm_digtable->bt30_cur_igi = 0x32;
dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+ dm_digtable->pre_cck_fa_state = 0;
+ dm_digtable->cur_cck_fa_state = 0;
}
EXPORT_SYMBOL(rtl_dm_diginit);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h
index 782ac2fc4b28..7447ff456710 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.h
+++ b/drivers/net/wireless/realtek/rtlwifi/core.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_CORE_H__
#define __RTL_CORE_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index d70385be9976..a051de16284d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "cam.h"
@@ -463,12 +442,9 @@ static const struct file_operations file_ops_common_write = {
#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \
do { \
rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \
- if (!debugfs_create_file(#name, mode, \
- parent, &rtl_debug_priv_ ##name, \
- &file_ops_ ##fopname)) \
- pr_err("Unable to initialize debugfs:%s/%s\n", \
- rtlpriv->dbg.debugfs_name, \
- #name); \
+ debugfs_create_file(#name, mode, parent, \
+ &rtl_debug_priv_ ##name, \
+ &file_ops_ ##fopname); \
} while (0)
#define RTL_DEBUGFS_ADD(name) \
@@ -486,11 +462,6 @@ void rtl_debug_add_one(struct ieee80211_hw *hw)
rtlpriv->dbg.debugfs_dir =
debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir);
- if (!rtlpriv->dbg.debugfs_dir) {
- pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name,
- rtlpriv->dbg.debugfs_name);
- return;
- }
parent = rtlpriv->dbg.debugfs_dir;
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index ad6834af618b..69f169d4d4ae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_DEBUG_H__
#define __RTL_DEBUG_H__
@@ -157,7 +136,7 @@ enum dbgp_flag_e {
FEEPROM = 11,
FPWR = 12,
FDM = 13,
- FDBGCtrl = 14,
+ FDBGCTRL = 14,
FC2H = 15,
FBT = 16,
FINIT = 17,
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 9729e51fce38..e68340dfd980 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * Tmis program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * Tmis program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * Tme full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#include "wifi.h"
#include "efuse.h"
#include "pci.h"
@@ -386,20 +365,20 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 section_idx, i, Base;
+ u8 section_idx, i, base;
u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
bool wordchanged, result = true;
for (section_idx = 0; section_idx < 16; section_idx++) {
- Base = section_idx * 8;
+ base = section_idx * 8;
wordchanged = false;
for (i = 0; i < 8; i = i + 2) {
- if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) ||
- (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] !=
- rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
- 1])) {
+ if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i] ||
+ rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i + 1] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i +
+ 1]) {
words_need++;
wordchanged = true;
}
@@ -495,6 +474,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (word_en != 0x0F) {
u8 tmpdata[8];
+
memcpy(tmpdata,
&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
8);
@@ -508,7 +488,6 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
break;
}
}
-
}
efuse_power_switch(hw, true, false);
@@ -540,7 +519,7 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(rtl_efuse_shadow_map_update);
-void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
+void efuse_force_write_vendor_id(struct ieee80211_hw *hw)
{
u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -683,6 +662,7 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+
efuse_power_switch(hw, false, true);
read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
efuse_power_switch(hw, false, false);
@@ -833,6 +813,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = offset;
u8 reorg_worden = badworden;
+
efuse_pg_packet_write(hw, reorg_offset,
reorg_worden,
originaldata);
@@ -922,6 +903,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
if (0x0F != (badworden & 0x0F)) {
u8 reorg_offset = tmp_pkt.offset;
u8 reorg_worden = badworden;
+
efuse_pg_packet_write(hw, reorg_offset,
reorg_worden,
originaldata);
@@ -978,7 +960,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
-
if (write_state == PG_STATE_HEADER) {
dataempty = true;
badworden = 0x0F;
@@ -1132,40 +1113,39 @@ void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 tempval;
- u16 tmpV16;
+ u16 tmpv16;
if (pwrstate && (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)) {
-
if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE &&
rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE) {
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69);
} else {
- tmpV16 =
+ tmpv16 =
rtl_read_word(rtlpriv,
rtlpriv->cfg->maps[SYS_ISO_CTRL]);
- if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
- tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
+ if (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
+ tmpv16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
rtl_write_word(rtlpriv,
rtlpriv->cfg->maps[SYS_ISO_CTRL],
- tmpV16);
+ tmpv16);
}
}
- tmpV16 = rtl_read_word(rtlpriv,
+ tmpv16 = rtl_read_word(rtlpriv,
rtlpriv->cfg->maps[SYS_FUNC_EN]);
- if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
- tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
+ if (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
+ tmpv16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
rtl_write_word(rtlpriv,
- rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+ rtlpriv->cfg->maps[SYS_FUNC_EN], tmpv16);
}
- tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
- if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
- (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
- tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
+ tmpv16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
+ if ((!(tmpv16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
+ (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
+ tmpv16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
rtlpriv->cfg->maps[EFUSE_ANA8M]);
rtl_write_word(rtlpriv,
- rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+ rtlpriv->cfg->maps[SYS_CLK], tmpv16);
}
}
@@ -1240,6 +1220,7 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
static u8 efuse_calculate_word_cnts(u8 word_en)
{
u8 word_cnts = 0;
+
if (!(word_en & BIT(0)))
word_cnts++;
if (!(word_en & BIT(1)))
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index dfa31c13fc7a..1ec59f439382 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_EFUSE_H_
#define __RTL_EFUSE_H_
@@ -107,7 +85,7 @@ void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
bool efuse_shadow_update(struct ieee80211_hw *hw);
bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_id(struct ieee80211_hw *hw);
void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 5d1fda16fc8c..48ca52102cef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index 3fb56c845a61..866861626a0a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_PCI_H__
#define __RTL_PCI_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 479a4cfc245d..70f04c2f5b17 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "base.h"
@@ -740,6 +718,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
u8 noa_num, index , i, noa_index = 0;
bool find_p2p_ie = false , find_p2p_ps_ie = false;
+
pos = (u8 *)mgmt->u.beacon.variable;
end = data + len;
ie = NULL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
index 0df2b5203030..aaa2ed2bbe16 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.h
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __REALTEK_RTL_PCI_PS_H__
#define __REALTEK_RTL_PCI_PS_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
index 17ce0cb2c35c..db1765c6fef6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_PWRSEQCMD_H__
#define __RTL8723E_PWRSEQCMD_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 6c78c6dabbdf..cf8e42a01015 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "base.h"
@@ -258,6 +236,7 @@ static void rtl_tx_status(void *ppriv,
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
+
if (_rtl_tx_aggr_check(rtlpriv, sta_entry,
tid)) {
sta_entry->tids[tid].agg.agg_state =
@@ -315,6 +294,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rtl_rate_priv *rate_priv = priv_sta;
+
kfree(rate_priv);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.h b/drivers/net/wireless/realtek/rtlwifi/rc.h
index f29643d60d6b..1c0a17370093 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_RC_H__
#define __RTL_RC_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 1bf3eb25c1da..6ccb5b93a595 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -1,32 +1,10 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "regd.h"
-static struct country_code_to_enum_rd allCountries[] = {
+static struct country_code_to_enum_rd all_countries[] = {
{COUNTRY_CODE_FCC, "US"},
{COUNTRY_CODE_IC, "US"},
{COUNTRY_CODE_ETSI, "EC"},
@@ -63,7 +41,6 @@ static struct country_code_to_enum_rd allCountries[] = {
NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_NO_OFDM)
-
/* 5G chan 36 - chan 64*/
#define RTL819x_5GHZ_5150_5350 \
REG_RULE(5150-10, 5350+10, 80, 0, 30, 0)
@@ -391,9 +368,9 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
{
int i;
- for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
- if (allCountries[i].countrycode == countrycode)
- return &allCountries[i];
+ for (i = 0; i < ARRAY_SIZE(all_countries); i++) {
+ if (all_countries[i].countrycode == countrycode)
+ return &all_countries[i];
}
return NULL;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.h b/drivers/net/wireless/realtek/rtlwifi/regd.h
index f7f15bce35dd..3ba068511e34 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.h
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_REGD_H__
#define __RTL_REGD_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
index 45c866d3ca88..fa2e1b063f68 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92C_DEF_H__
#define __RTL92C_DEF_H__
@@ -123,9 +101,6 @@
#define IS_92C_SERIAL(version) \
((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
-#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
- (IS_81XXC(version) ? ((IS_CHIP_VENDOR_UMC(version)) ? \
- ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) : false)
#define IS_81XXC_VENDOR_UMC_B_CUT(version) \
(IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index e05af7d60830..85360353f557 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
index 50f26a9a97db..eb8090caeec2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL88E_DM_H__
#define __RTL88E_DM_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 63874512598b..203e7b574e84 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
index b884c30c7b37..39ddb7afea9d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92C__FW__H__
#define __RTL92C__FW__H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index cfc8762c55f4..454bab38b165 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
index 214cd2a32018..fd09b0712d17 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92CE_HW_H__
#define __RTL92CE_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
index df3e214460db..4ef6d5907521 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
index 4b325b75faaf..67d3dc389ba0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92CE_LED_H__
#define __RTL92CE_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 14a256062614..96d8f25b120f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
index b29bd77210f4..8157ef419eeb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92C_PHY_H__
#define __RTL92C_PHY_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
index 02013df968a0..d69497bf5d19 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../pwrseqcmd.h"
#include "pwrseq.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
index 8379a3e5198c..42e222c1795f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
index 0c0d64aea651..0fc8db8916fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92C_REG_H__
#define __RTL92C_REG_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
index 30798b12a363..0f401ad92c2e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
index 0eca030e3238..05e27b40b2a9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92C_RF_H__
#define __RTL92C_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 8c15ffd3568b..eab48fed61ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
index 22398c3753a6..1407151503f8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92CE_SW_H__
#define __RTL92CE_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
index 68bcb7fe6a65..a3c312c3ed9a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "table.h"
u32 RTL8188EEPHY_REG_1TARRAY[] = {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
index 403c4ddd236f..df6065602401 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92CE_TABLE__H_
#define __RTL92CE_TABLE__H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 14dcb0816bc0..106011a24827 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -415,7 +393,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
(GET_RX_DESC_FAGGR(pdesc) == 1));
if (status->packet_report_type == NORMAL_RX)
status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate);
@@ -442,7 +420,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
if (status->crc)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (status->rx_is40Mhzpacket)
+ if (status->rx_is40mhzpacket)
rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
index 127ba977206f..c29d9bfa5bd4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2013 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2013 Realtek Corporation.*/
#ifndef __RTL92CE_TRX_H__
#define __RTL92CE_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 0b5a06ffa482..f2908ee5f860 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include <linux/export.h>
#include "dm_common.h"
@@ -447,7 +425,6 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
-
if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
dm_digtable->rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
@@ -526,7 +503,6 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
rtl92c_dm_cck_packet_detection_thresh(hw);
dm_digtable->presta_cstate = dm_digtable->cursta_cstate;
-
}
static void rtl92c_dm_dig(struct ieee80211_hw *hw)
@@ -629,6 +605,7 @@ static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+
rtlpriv->dm.current_turbo_edca = false;
rtlpriv->dm.is_any_nonbepkts = false;
rtlpriv->dm.is_cur_rdlstate = false;
@@ -682,7 +659,6 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
(!rtlpriv->dm.disable_framebursting))) {
-
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
@@ -707,6 +683,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
+
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
&tmp);
rtlpriv->dm.current_turbo_edca = false;
@@ -1657,7 +1634,6 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
-
/* Only enable HW BT coexist when BT in "Busy" state. */
if (rtlpriv->mac80211.vendor == PEER_CISCO &&
rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
index 441604ff5858..c4ce9fc96163 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92COMMON_DM_H__
#define __RTL92COMMON_DM_H__
@@ -49,7 +27,7 @@
#define DM_DIG_FA_TH1 0x100
#define DM_DIG_FA_TH2 0x200
-#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_SS_TH_LOW 30
#define RXPATHSELECTION_DIFF_TH 18
#define DM_RATR_STA_INIT 0
@@ -60,7 +38,7 @@
#define CTS2SELF_THVAL 30
#define REGC38_TH 20
-#define WAIOTTHVal 25
+#define WAIOTTHVAL 25
#define TXHIGHPWRLEVEL_NORMAL 0
#define TXHIGHPWRLEVEL_LEVEL1 1
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index f3bff66e85d0..18c76990a089 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -40,6 +18,7 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) {
u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+
if (enable)
value32 |= MCUFWDL_EN;
else
@@ -47,8 +26,8 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
} else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) {
u8 tmp;
- if (enable) {
+ if (enable) {
tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1,
tmp | 0x04);
@@ -59,7 +38,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
} else {
-
tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
@@ -79,27 +57,27 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
is_version_b = IS_NORMAL_CHIP(version);
if (is_version_b) {
- u32 pageNums, remainsize;
+ u32 pagenums, remainsize;
u32 page, offset;
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
rtl_fill_dummy(bufferptr, &size);
- pageNums = size / FW_8192C_PAGE_SIZE;
+ pagenums = size / FW_8192C_PAGE_SIZE;
remainsize = size % FW_8192C_PAGE_SIZE;
- if (pageNums > 4)
+ if (pagenums > 4)
pr_err("Page numbers should not greater then 4\n");
- for (page = 0; page < pageNums; page++) {
+ for (page = 0; page < pagenums; page++) {
offset = page * FW_8192C_PAGE_SIZE;
rtl_fw_page_write(hw, page, (bufferptr + offset),
FW_8192C_PAGE_SIZE);
}
if (remainsize) {
- offset = pageNums * FW_8192C_PAGE_SIZE;
- page = pageNums;
+ offset = pagenums * FW_8192C_PAGE_SIZE;
+ page = pagenums;
rtl_fw_page_write(hw, page, (bufferptr + offset),
remainsize);
}
@@ -118,7 +96,7 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
} while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
- (!(value32 & FWDL_ChkSum_rpt)));
+ (!(value32 & FWDL_CHKSUM_RPT)));
if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n",
@@ -644,7 +622,6 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
"rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
u1rsvdpageloc, 3);
-
skb = dev_alloc_skb(totalpacketlen);
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
index c5fa14bda387..888d9fc94bc2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C__FW__COMMON__H__
#define __RTL92C__FW__COMMON__H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
index 889bd1301154..97ad21c3964f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
@@ -1,32 +1,9 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include <linux/module.h>
-
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Georgia <georgia@realtek.com>");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 7c6e5d91439d..0efd19aa4fe5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../rtl8192ce/reg.h"
@@ -239,7 +217,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
-void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask,
u32 data)
{
@@ -393,7 +371,7 @@ void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
rtlphy->pwrgroup_cnt++;
}
}
-EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
+EXPORT_SYMBOL(_rtl92c_store_pwrindex_diffrate_offset);
void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
{
@@ -452,10 +430,10 @@ void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
RFPGA0_XB_LSSIPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;
rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
@@ -769,6 +747,7 @@ static void _rtl92c_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) {
if (channel == 6 &&
rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
@@ -1120,19 +1099,19 @@ static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
u32 *addareg, bool is_patha_on, bool is2t)
{
- u32 pathOn;
+ u32 pathon;
u32 i;
- pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
if (false == is2t) {
- pathOn = 0x0bdb25a0;
+ pathon = 0x0bdb25a0;
rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
} else {
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
}
for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
}
static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
@@ -1361,7 +1340,7 @@ static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
if (is_hal_stop(rtlhal)) {
rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
- rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
}
if (is2t) {
if (bmain)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
index d11261e05a2e..75afa6253ad0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_PHY_COMMON_H__
#define __RTL92C_PHY_COMMON_H__
@@ -44,9 +22,9 @@
#define LOOP_LIMIT 5
#define MAX_STALL_TIME 50
-#define AntennaDiversityValue 0x80
+#define ANTENNADIVERSITYVALUE 0x80
#define MAX_TXPWR_IDX_NMODE_92S 63
-#define Reset_Cnt_Limit 3
+#define RESET_CNT_LIMIT 3
#define IQK_ADDA_REG_NUM 16
#define IQK_MAC_REG_NUM 4
@@ -242,7 +220,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
-void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask,
u32 data);
bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
index d2005d7e9ad2..147bf2407f8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_DEF_H__
#define __RTL92C_DEF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
index 2c8205e46be4..a3e2c8a60967 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
index 9761d0ca31b0..eab42c1bd470 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_DM_H__
#define __RTL92C_DM_H__
@@ -44,7 +22,7 @@
#define DM_DIG_FA_TH1 0x100
#define DM_DIG_FA_TH2 0x200
-#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_SS_TH_LOW 30
#define RXPATHSELECTION_DIFF_TH 18
#define DM_RATR_STA_INIT 0
@@ -55,7 +33,7 @@
#define CTS2SELF_THVAL 30
#define REGC38_TH 20
-#define WAIOTTHVal 25
+#define WAIOTTHVAL 25
#define TXHIGHPWRLEVEL_NORMAL 0
#define TXHIGHPWRLEVEL_LEVEL1 1
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 4a81e0ef4b8e..a52dd64d528d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
@@ -104,13 +82,13 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
break;
case HW_VAR_FWLPS_RF_ON:{
- enum rf_pwrstate rfState;
+ enum rf_pwrstate rfstate;
u32 val_rcr;
rtlpriv->cfg->ops->get_hw_reg(hw,
HW_VAR_RF_STATE,
- (u8 *) (&rfState));
- if (rfState == ERFOFF) {
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
*((bool *) (val)) = true;
} else {
val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
@@ -166,6 +144,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_BASIC_RATE:{
u16 rate_cfg = ((u16 *) val)[0];
u8 rate_index = 0;
+
rate_cfg &= 0x15f;
rate_cfg |= 0x01;
rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
@@ -219,6 +198,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
u8 short_preamble = (bool)*val;
+
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -315,6 +295,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_AC_PARAM:{
u8 e_aci = *(val);
+
rtl92c_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != EACMWAY2_SW)
@@ -336,13 +317,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
if (acm) {
switch (e_aci) {
case AC0_BE:
- acm_ctrl |= AcmHw_BeqEn;
+ acm_ctrl |= ACMHW_BEQEN;
break;
case AC2_VI:
- acm_ctrl |= AcmHw_ViqEn;
+ acm_ctrl |= ACMHW_VIQEN;
break;
case AC3_VO:
- acm_ctrl |= AcmHw_VoqEn;
+ acm_ctrl |= ACMHW_VOQEN;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -353,13 +334,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
} else {
switch (e_aci) {
case AC0_BE:
- acm_ctrl &= (~AcmHw_BeqEn);
+ acm_ctrl &= (~ACMHW_BEQEN);
break;
case AC2_VI:
- acm_ctrl &= (~AcmHw_ViqEn);
+ acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
- acm_ctrl &= (~AcmHw_VoqEn);
+ acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
pr_err("switch case %#x not processed\n",
@@ -478,6 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
case HW_VAR_AID:{
u16 u2btmp;
+
u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
u2btmp &= 0xC000;
rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
@@ -584,23 +566,23 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned short i;
u8 txpktbuf_bndy;
- u8 maxPage;
+ u8 maxpage;
bool status;
#if LLT_CONFIG == 1
- maxPage = 255;
+ maxpage = 255;
txpktbuf_bndy = 252;
#elif LLT_CONFIG == 2
- maxPage = 127;
+ maxpage = 127;
txpktbuf_bndy = 124;
#elif LLT_CONFIG == 3
- maxPage = 255;
+ maxpage = 255;
txpktbuf_bndy = 174;
#elif LLT_CONFIG == 4
- maxPage = 255;
+ maxpage = 255;
txpktbuf_bndy = 246;
#elif LLT_CONFIG == 5
- maxPage = 255;
+ maxpage = 255;
txpktbuf_bndy = 246;
#endif
@@ -639,13 +621,13 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
if (true != status)
return status;
- for (i = txpktbuf_bndy; i < maxPage; i++) {
+ for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl92ce_llt_write(hw, i, (i + 1));
if (true != status)
return status;
}
- status = _rtl92ce_llt_write(hw, maxPage, txpktbuf_bndy);
+ status = _rtl92ce_llt_write(hw, maxpage, txpktbuf_bndy);
if (true != status)
return status;
@@ -683,6 +665,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
if (rtlpriv->btcoexist.bt_coexistence) {
u32 value32;
+
value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO);
value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK);
rtl_write_dword(rtlpriv, REG_APS_FSMCO, value32);
@@ -908,11 +891,11 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
return;
}
- sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+ sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
if (rtlpriv->sec.use_defaultkey) {
- sec_reg_value |= SCR_TxUseDK;
- sec_reg_value |= SCR_RxUseDK;
+ sec_reg_value |= SCR_TXUSEDK;
+ sec_reg_value |= SCR_RXUSEDK;
}
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
@@ -1267,6 +1250,7 @@ int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+
rtl92c_dm_init_edca_turbo(hw);
switch (aci) {
case AC1_BK:
@@ -2301,7 +2285,6 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw)
rtlpriv->btcoexist.reg_bt_sco = 0;
}
-
void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
index 6711ea1a75d9..fa1049d16cb7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CE_HW_H__
#define __RTL92CE_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
index 7edf5af9046e..d6933d36ada2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
index f6edb9cd9b67..97ab1e00af5f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CE_LED_H__
#define __RTL92CE_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index 7c6d7fc1ef9a..f6574f31fa3b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -104,7 +82,7 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
- FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ FEN_BB_GLB_RSTN | FEN_BBRSTB);
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
@@ -236,7 +214,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
rtl_addr_delay(phy_regarray_table_pg[i]);
- _rtl92c_store_pwrIndex_diffrate_offset(hw,
+ _rtl92c_store_pwrindex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
phy_regarray_table_pg[i + 2]);
@@ -464,13 +442,14 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
if ((ppsc->rfpwr_state == ERFOFF) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
- u32 InitializeCount = 0;
+ u32 initializecount = 0;
+
do {
- InitializeCount++;
+ initializecount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while (!rtstatus && (InitializeCount < 10));
+ } while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
index 93f3bc0197b4..7582a162bd11 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_PHY_H__
#define __RTL92C_PHY_H__
@@ -44,9 +22,9 @@
#define LOOP_LIMIT 5
#define MAX_STALL_TIME 50
-#define AntennaDiversityValue 0x80
+#define ANTENNADIVERSITYVALUE 0x80
#define MAX_TXPWR_IDX_NMODE_92S 63
-#define Reset_Cnt_Limit 3
+#define RESET_CNT_LIMIT 3
#define IQK_ADDA_REG_NUM 16
#define IQK_MAC_REG_NUM 4
@@ -122,7 +100,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask, u32 data);
bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
index 9e3b58a5d2bb..431277ef1c98 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_REG_H__
#define __RTL92C_REG_H__
@@ -702,7 +680,7 @@
#define PWC_EV12V BIT(15)
#define FEN_BBRSTB BIT(0)
-#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_BB_GLB_RSTN BIT(1)
#define FEN_USBA BIT(2)
#define FEN_UPLL BIT(3)
#define FEN_USBD BIT(4)
@@ -722,7 +700,7 @@
#define PFM_ALDN BIT(1)
#define PFM_LDKP BIT(2)
#define PFM_WOWL BIT(3)
-#define EnPDN BIT(4)
+#define ENPDN BIT(4)
#define PDN_PL BIT(5)
#define APFM_ONMAC BIT(8)
#define APFM_OFF BIT(9)
@@ -837,19 +815,19 @@
#define LDOE25_EN BIT(31)
#define RSM_EN BIT(0)
-#define Timer_EN BIT(4)
+#define TIMER_EN BIT(4)
#define TRSW0EN BIT(2)
#define TRSW1EN BIT(3)
#define EROM_EN BIT(4)
-#define EnBT BIT(5)
-#define EnUart BIT(8)
-#define Uart_910 BIT(9)
-#define EnPMAC BIT(10)
+#define ENBT BIT(5)
+#define ENUART BIT(8)
+#define UART_910 BIT(9)
+#define ENPMAC BIT(10)
#define SIC_SWRST BIT(11)
-#define EnSIC BIT(12)
+#define ENSIC BIT(12)
#define SIC_23 BIT(13)
-#define EnHDP BIT(14)
+#define ENHDP BIT(14)
#define SIC_LBK BIT(15)
#define LED0PL BIT(4)
@@ -858,7 +836,7 @@
#define MCUFWDL_EN BIT(0)
#define MCUFWDL_RDY BIT(1)
-#define FWDL_ChkSum_rpt BIT(2)
+#define FWDL_CHKSUM_RPT BIT(2)
#define MACINI_RDY BIT(3)
#define BBINI_RDY BIT(4)
#define RFINI_RDY BIT(5)
@@ -1076,13 +1054,13 @@
#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
-#define AcmHw_HwEn BIT(0)
-#define AcmHw_BeqEn BIT(1)
-#define AcmHw_ViqEn BIT(2)
-#define AcmHw_VoqEn BIT(3)
-#define AcmHw_BeqStatus BIT(4)
-#define AcmHw_ViqStatus BIT(5)
-#define AcmHw_VoqStatus BIT(6)
+#define ACMHW_HWEN BIT(0)
+#define ACMHW_BEQEN BIT(1)
+#define ACMHW_VIQEN BIT(2)
+#define ACMHW_VOQEN BIT(3)
+#define ACMHW_BEQSTATUS BIT(4)
+#define ACMHW_VIQSTATUS BIT(5)
+#define ACMHW_VOQSTATUS BIT(6)
#define APSDOFF BIT(6)
#define APSDOFF_STATUS BIT(7)
@@ -1121,7 +1099,7 @@
#define BM_DATA_EN BIT(17)
#define MFBEN BIT(22)
#define LSIGEN BIT(23)
-#define EnMBID BIT(24)
+#define ENMBID BIT(24)
#define APP_BASSN BIT(27)
#define APP_PHYSTS BIT(28)
#define APP_ICV BIT(29)
@@ -1150,12 +1128,12 @@
#define RXERR_RPT_RST BIT(27)
#define _RXERR_RPT_SEL(type) ((type) << 28)
-#define SCR_TxUseDK BIT(0)
-#define SCR_RxUseDK BIT(1)
-#define SCR_TxEncEnable BIT(2)
-#define SCR_RxDecEnable BIT(3)
-#define SCR_SKByA2 BIT(4)
-#define SCR_NoSKMC BIT(5)
+#define SCR_TXUSEDK BIT(0)
+#define SCR_RXUSEDK BIT(1)
+#define SCR_TXENCENABLE BIT(2)
+#define SCR_RXDECENABLE BIT(3)
+#define SCR_SKBYA2 BIT(4)
+#define SCR_NOSKMC BIT(5)
#define SCR_TXBCUSEDK BIT(6)
#define SCR_RXBCUSEDK BIT(7)
@@ -1208,7 +1186,7 @@
#define RPMAC_CCKPLCPHEADER 0x144
#define RPMAC_CCKCRC16 0x148
#define RPMAC_OFDMRXCRC32OK 0x170
-#define RPMAC_OFDMRXCRC32Er 0x174
+#define RPMAC_OFDMRXCRC32ER 0x174
#define RPMAC_OFDMRXPARITYER 0x178
#define RPMAC_OFDMRXCRC8ER 0x17c
#define RPMAC_CCKCRXRC16ER 0x180
@@ -1246,8 +1224,8 @@
#define RFPGA0_XAB_RFINTERFACESW 0x870
#define RFPGA0_XCD_RFINTERFACESW 0x874
-#define rFPGA0_XAB_RFPARAMETER 0x878
-#define rFPGA0_XCD_RFPARAMETER 0x87c
+#define RFPGA0_XAB_RFPARAMETER 0x878
+#define RFPGA0_XCD_RFPARAMETER 0x87c
#define RFPGA0_ANALOGPARAMETER1 0x880
#define RFPGA0_ANALOGPARAMETER2 0x884
@@ -1521,8 +1499,8 @@
#define BCCKTXCRC16 0xffff
#define BCCKTXSTATUS 0x1
#define BOFDMTXSTATUS 0x2
-#define IS_BB_REG_OFFSET_92S(_Offset) \
- ((_Offset >= 0x800) && (_Offset <= 0xfff))
+#define IS_BB_REG_OFFSET_92S(_offset) \
+ (((_offset) >= 0x800) && ((_offset) <= 0xfff))
#define BRFMOD 0x1
#define BJAPANMODE 0x2
@@ -1715,7 +1693,6 @@
#define BCCK_RF_EXTEND 0x20000000
#define BCCK_RXAGC_SATLEVEL 0x1f000000
#define BCCK_RXAGC_SATCOUNT 0xe0
-#define bCCKRxRFSettle 0x1f
#define BCCK_FIXED_RXAGC 0x8000
#define BCCK_ANTENNA_POLARITY 0x2000
#define BCCK_TXFILTER_TYPE 0x0c00
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
index e68ed7f37c79..713859488744 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
@@ -150,18 +128,18 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u32 powerBase0, powerBase1;
+ u32 powerbase0, powerbase1;
u8 legacy_pwrdiff, ht20_pwrdiff;
u8 i, powerlevel[2];
for (i = 0; i < 2; i++) {
powerlevel[i] = ppowerlevel[i];
legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
- powerBase0 = powerlevel[i] + legacy_pwrdiff;
+ powerbase0 = powerlevel[i] + legacy_pwrdiff;
- powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
- (powerBase0 << 8) | powerBase0;
- *(ofdmbase + i) = powerBase0;
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
" [OFDM power base index rf(%c) = 0x%x]\n",
i == 0 ? 'A' : 'B', *(ofdmbase + i));
@@ -172,11 +150,11 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
powerlevel[i] += ht20_pwrdiff;
}
- powerBase1 = powerlevel[i];
- powerBase1 = (powerBase1 << 24) |
- (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+ powerbase1 = powerlevel[i];
+ powerbase1 = (powerbase1 << 24) |
+ (powerbase1 << 16) | (powerbase1 << 8) | powerbase1;
- *(mcsbase + i) = powerBase1;
+ *(mcsbase + i) = powerbase1;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
" [MCS power base index rf(%c) = 0x%x]\n",
@@ -186,37 +164,37 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
u8 channel, u8 index,
- u32 *powerBase0,
- u32 *powerBase1,
+ u32 *powerbase0,
+ u32 *powerbase1,
u32 *p_outwriteval)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 i, chnlgroup = 0, pwr_diff_limit[4];
- u32 writeVal, customer_limit, rf;
+ u32 writeval, customer_limit, rf;
for (rf = 0; rf < 2; rf++) {
switch (rtlefuse->eeprom_regulatory) {
case 0:
chnlgroup = 0;
- writeVal = rtlphy->mcs_offset[chnlgroup][index +
+ writeval = rtlphy->mcs_offset[chnlgroup][index +
(rf ? 8 : 0)]
- + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance, writeVal(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "RTK better performance, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
case 1:
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- writeVal = ((index < 2) ? powerBase0[rf] :
- powerBase1[rf]);
+ writeval = ((index < 2) ? powerbase0[rf] :
+ powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Realtek regulatory, 40MHz, writeVal(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "Realtek regulatory, 40MHz, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
} else {
if (rtlphy->pwrgroup_cnt == 1)
chnlgroup = 0;
@@ -231,23 +209,23 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
chnlgroup++;
}
- writeVal = rtlphy->mcs_offset[chnlgroup]
+ writeval = rtlphy->mcs_offset[chnlgroup]
[index + (rf ? 8 : 0)] + ((index < 2) ?
- powerBase0[rf] :
- powerBase1[rf]);
+ powerbase0[rf] :
+ powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
}
break;
case 2:
- writeVal =
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ writeval =
+ ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Better regulatory, writeVal(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "Better regulatory, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
case 3:
chnlgroup = 0;
@@ -297,36 +275,36 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
"Customer's limit rf(%c) = 0x%x\n",
rf == 0 ? 'A' : 'B', customer_limit);
- writeVal = customer_limit +
- ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ writeval = customer_limit +
+ ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Customer, writeVal rf(%c)= 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "Customer, writeval rf(%c)= 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
default:
chnlgroup = 0;
- writeVal = rtlphy->mcs_offset[chnlgroup]
+ writeval = rtlphy->mcs_offset[chnlgroup]
[index + (rf ? 8 : 0)]
- + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance, writeVal rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "RTK better performance, writeval rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
}
if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
- writeVal = writeVal - 0x06060606;
+ writeval = writeval - 0x06060606;
else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
TXHIGHPWRLEVEL_BT2)
- writeVal = writeVal - 0x0c0c0c0c;
- *(p_outwriteval + rf) = writeVal;
+ writeval = writeval - 0x0c0c0c0c;
+ *(p_outwriteval + rf) = writeval;
}
}
static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
- u8 index, u32 *pValue)
+ u8 index, u32 *value)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -342,29 +320,29 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
};
u8 i, rf, pwr_val[4];
- u32 writeVal;
+ u32 writeval;
u16 regoffset;
for (rf = 0; rf < 2; rf++) {
- writeVal = pValue[rf];
+ writeval = value[rf];
for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8) ((writeVal & (0x7f <<
+ pwr_val[i] = (u8)((writeval & (0x7f <<
(i * 8))) >> (i * 8));
if (pwr_val[i] > RF6052_MAX_TX_PWR)
pwr_val[i] = RF6052_MAX_TX_PWR;
}
- writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
(pwr_val[1] << 8) | pwr_val[0];
if (rf == 0)
regoffset = regoffset_a[index];
else
regoffset = regoffset_b[index];
- rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Set 0x%x = %08x\n", regoffset, writeVal);
+ "Set 0x%x = %08x\n", regoffset, writeval);
if (((get_rf_type(rtlphy) == RF_2T2R) &&
(regoffset == RTXAGC_A_MCS15_MCS12 ||
@@ -373,7 +351,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
(regoffset == RTXAGC_A_MCS07_MCS04 ||
regoffset == RTXAGC_B_MCS07_MCS04))) {
- writeVal = pwr_val[3];
+ writeval = pwr_val[3];
if (regoffset == RTXAGC_A_MCS15_MCS12 ||
regoffset == RTXAGC_A_MCS07_MCS04)
regoffset = 0xc90;
@@ -382,9 +360,9 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset = 0xc98;
for (i = 0; i < 3; i++) {
- writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+ writeval = (writeval > 6) ? (writeval - 6) : 0;
rtl_write_byte(rtlpriv, (u32) (regoffset + i),
- (u8) writeVal);
+ (u8)writeval);
}
}
}
@@ -393,20 +371,20 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel, u8 channel)
{
- u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u32 writeval[2], powerbase0[2], powerbase1[2];
u8 index;
rtl92c_phy_get_power_base(hw, ppowerlevel,
- channel, &powerBase0[0], &powerBase1[0]);
+ channel, &powerbase0[0], &powerbase1[0]);
for (index = 0; index < 6; index++) {
_rtl92c_get_txpower_writeval_by_regulatory(hw,
channel, index,
- &powerBase0[0],
- &powerBase1[0],
- &writeVal[0]);
+ &powerbase0[0],
+ &powerbase1[0],
+ &writeval[0]);
- _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
+ _rtl92c_write_ofdm_power_reg(hw, index, &writeval[0]);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
index 22c5e6f51331..6fa70224d2d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_RF_H__
#define __RTL92C_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 71a6761d3648..a9c0111444bc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
index 9a1c89cbbda1..f2d121a60159 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CE_SW_H__
#define __RTL92CE_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
index 98b06d48a2dd..58878db404ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
@@ -1,33 +1,8 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "table.h"
-
u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH] = {
0x024, 0x0011800f,
0x028, 0x00ffdb83,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
index 51e4e07396a6..473af27f80d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CE_TABLE__H_
#define __RTL92CE_TABLE__H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index d36e0060cc7a..18a0ab59631a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -58,6 +36,7 @@ static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
static u8 _rtl92c_evm_db_to_percentage(s8 value)
{
s8 ret_val;
+
ret_val = value;
if (ret_val >= 0)
@@ -131,6 +110,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
if (is_cck_rate) {
u8 report, cck_highpwr;
+
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
if (ppsc->rfpwr_state == ERFON)
@@ -142,6 +122,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
if (!cck_highpwr) {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
report = cck_buf->cck_agc_rpt & 0xc0;
report = report >> 6;
switch (report) {
@@ -160,6 +141,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
}
} else {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
report = p_drvinfo->cfosho[0] & 0x60;
report = report >> 5;
switch (report) {
@@ -204,6 +186,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
/* (3) Get Signal Quality (EVM) */
if (packet_match_bssid) {
u8 sq;
+
if (pstats->rx_pwdb_all > 40)
sq = 100;
else {
@@ -340,6 +323,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
struct ieee80211_hdr *hdr;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
@@ -354,7 +338,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
@@ -368,7 +352,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
if (stats->crc)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (stats->rx_is40Mhzpacket)
+ if (stats->rx_is40mhzpacket)
rx_status->bw = RATE_INFO_BW_40;
if (stats->is_ht)
@@ -519,6 +503,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
+
SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
}
@@ -755,6 +740,7 @@ bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw,
void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+
if (hw_queue == BEACON_QUEUE) {
rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
index 91f0bd6b752f..fb1d4444a52f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CE_TRX_H__
#define __RTL92CE_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
index 316fe9990b6d..91e4427ab022 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../rtl8192ce/def.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
index 00fc0685317a..9d1167ff3b50 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
index ce71433792e3..2befc2f4e3fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../rtl8192ce/dm.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 1e60f70481f5..56cc3bc30860 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
@@ -53,9 +31,9 @@ static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY;
if (IS_HIGHT_PA(rtlefuse->board_type)) {
rtlphy->hwparam_tables[PHY_REG_PG].length =
- RTL8192CUPHY_REG_Array_PG_HPLength;
+ RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH;
rtlphy->hwparam_tables[PHY_REG_PG].pdata =
- RTL8192CUPHY_REG_Array_PG_HP;
+ RTL8192CUPHY_REG_ARRAY_PG_HP;
} else {
rtlphy->hwparam_tables[PHY_REG_PG].length =
RTL8192CUPHY_REG_ARRAY_PGLENGTH;
@@ -82,21 +60,21 @@ static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
/* 1T */
if (IS_HIGHT_PA(rtlefuse->board_type)) {
rtlphy->hwparam_tables[PHY_REG_1T].length =
- RTL8192CUPHY_REG_1T_HPArrayLength;
+ RTL8192CUPHY_REG_1T_HPARRAYLENGTH;
rtlphy->hwparam_tables[PHY_REG_1T].pdata =
- RTL8192CUPHY_REG_1T_HPArray;
+ RTL8192CUPHY_REG_1T_HPARRAY;
rtlphy->hwparam_tables[RADIOA_1T].length =
- RTL8192CURadioA_1T_HPArrayLength;
+ RTL8192CURADIOA_1T_HPARRAYLENGTH;
rtlphy->hwparam_tables[RADIOA_1T].pdata =
- RTL8192CURadioA_1T_HPArray;
+ RTL8192CURADIOA_1T_HPARRAY;
rtlphy->hwparam_tables[RADIOB_1T].length =
RTL8192CURADIOB_1TARRAYLENGTH;
rtlphy->hwparam_tables[RADIOB_1T].pdata =
RTL8192CU_RADIOB_1TARRAY;
rtlphy->hwparam_tables[AGCTAB_1T].length =
- RTL8192CUAGCTAB_1T_HPArrayLength;
+ RTL8192CUAGCTAB_1T_HPARRAYLENGTH;
rtlphy->hwparam_tables[AGCTAB_1T].pdata =
- Rtl8192CUAGCTAB_1T_HPArray;
+ RTL8192CUAGCTAB_1T_HPARRAY;
} else {
rtlphy->hwparam_tables[PHY_REG_1T].length =
RTL8192CUPHY_REG_1TARRAY_LENGTH;
@@ -323,16 +301,16 @@ static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 boardType;
+ u8 boardtype;
if (IS_NORMAL_CHIP(rtlhal->version)) {
- boardType = ((contents[EEPROM_RF_OPT1]) &
+ boardtype = ((contents[EEPROM_RF_OPT1]) &
BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/
} else {
- boardType = contents[EEPROM_RF_OPT4];
- boardType &= BOARD_TYPE_TEST_MASK;
+ boardtype = contents[EEPROM_RF_OPT4];
+ boardtype &= BOARD_TYPE_TEST_MASK;
}
- rtlefuse->board_type = boardType;
+ rtlefuse->board_type = boardtype;
if (IS_HIGHT_PA(rtlefuse->board_type))
rtlefuse->external_pa = 1;
pr_info("Board Type %x\n", rtlefuse->board_type);
@@ -442,7 +420,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
u16 value16;
u8 value8;
/* polling autoload done. */
- u32 pollingCount = 0;
+ u32 pollingcount = 0;
do {
if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
@@ -450,7 +428,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
"Autoload Done!\n");
break;
}
- if (pollingCount++ > 100) {
+ if (pollingcount++ > 100) {
pr_err("Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n");
return -ENODEV;
}
@@ -474,7 +452,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8);
}
/* auto enable WLAN */
- pollingCount = 0;
+ pollingcount = 0;
value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO);
value16 |= APFM_ONMAC;
rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
@@ -483,7 +461,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
pr_info("MAC auto ON okay!\n");
break;
}
- if (pollingCount++ > 1000) {
+ if (pollingcount++ > 1000) {
pr_err("Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
return -ENODEV;
}
@@ -495,12 +473,12 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
value16 &= ~ISO_DIOR;
rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16);
/* Reconsider when to do this operation after asking HWSD. */
- pollingCount = 0;
+ pollingcount = 0;
rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv,
REG_APSD_CTRL) & ~BIT(6)));
do {
- pollingCount++;
- } while ((pollingCount < 200) &&
+ pollingcount++;
+ } while ((pollingcount < 200) &&
(rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7)));
/* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
value16 = rtl_read_word(rtlpriv, REG_CR);
@@ -517,60 +495,60 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- bool isChipN = IS_NORMAL_CHIP(rtlhal->version);
- u32 outEPNum = (u32)out_ep_num;
- u32 numHQ = 0;
- u32 numLQ = 0;
- u32 numNQ = 0;
- u32 numPubQ;
+ bool ischipn = IS_NORMAL_CHIP(rtlhal->version);
+ u32 outepnum = (u32)out_ep_num;
+ u32 numhq = 0;
+ u32 numlq = 0;
+ u32 numnq = 0;
+ u32 numpubq;
u32 value32;
u8 value8;
- u32 txQPageNum, txQPageUnit, txQRemainPage;
+ u32 txqpagenum, txqpageunit, txqremaininpage;
if (!wmm_enable) {
- numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ :
+ numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ :
CHIP_A_PAGE_NUM_PUBQ;
- txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ;
+ txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq;
- txQPageUnit = txQPageNum/outEPNum;
- txQRemainPage = txQPageNum % outEPNum;
+ txqpageunit = txqpagenum / outepnum;
+ txqremaininpage = txqpagenum % outepnum;
if (queue_sel & TX_SELE_HQ)
- numHQ = txQPageUnit;
+ numhq = txqpageunit;
if (queue_sel & TX_SELE_LQ)
- numLQ = txQPageUnit;
+ numlq = txqpageunit;
/* HIGH priority queue always present in the configuration of
* 2 out-ep. Remainder pages have assigned to High queue */
- if ((outEPNum > 1) && (txQRemainPage))
- numHQ += txQRemainPage;
+ if (outepnum > 1 && txqremaininpage)
+ numhq += txqremaininpage;
/* NOTE: This step done before writting REG_RQPN. */
- if (isChipN) {
+ if (ischipn) {
if (queue_sel & TX_SELE_NQ)
- numNQ = txQPageUnit;
- value8 = (u8)_NPQ(numNQ);
+ numnq = txqpageunit;
+ value8 = (u8)_NPQ(numnq);
rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
}
} else {
/* for WMM ,number of out-ep must more than or equal to 2! */
- numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ :
+ numpubq = ischipn ? WMM_CHIP_B_PAGE_NUM_PUBQ :
WMM_CHIP_A_PAGE_NUM_PUBQ;
if (queue_sel & TX_SELE_HQ) {
- numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ :
+ numhq = ischipn ? WMM_CHIP_B_PAGE_NUM_HPQ :
WMM_CHIP_A_PAGE_NUM_HPQ;
}
if (queue_sel & TX_SELE_LQ) {
- numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ :
+ numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ :
WMM_CHIP_A_PAGE_NUM_LPQ;
}
/* NOTE: This step done before writting REG_RQPN. */
- if (isChipN) {
+ if (ischipn) {
if (queue_sel & TX_SELE_NQ)
- numNQ = WMM_CHIP_B_PAGE_NUM_NPQ;
- value8 = (u8)_NPQ(numNQ);
+ numnq = WMM_CHIP_B_PAGE_NUM_NPQ;
+ value8 = (u8)_NPQ(numnq);
rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
}
}
/* TX DMA */
- value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+ value32 = _HPQ(numhq) | _LPQ(numlq) | _PUBQ(numpubq) | LD_RQPN;
rtl_write_dword(rtlpriv, REG_RQPN, value32);
}
@@ -597,20 +575,20 @@ static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
rtl_write_byte(rtlpriv, REG_PBP, value8);
}
-static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ,
- u16 bkQ, u16 viQ, u16 voQ,
- u16 mgtQ, u16 hiQ)
+static void _rtl92c_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq,
+ u16 bkq, u16 viq, u16 voq,
+ u16 mgtq, u16 hiq)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7);
- value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
- _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
- _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+ value16 |= _TXDMA_BEQ_MAP(beq) | _TXDMA_BKQ_MAP(bkq) |
+ _TXDMA_VIQ_MAP(viq) | _TXDMA_VOQ_MAP(voq) |
+ _TXDMA_MGQ_MAP(mgtq) | _TXDMA_HIQ_MAP(hiq);
rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16);
}
-static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
+static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
bool wmm_enable,
u8 queue_sel)
{
@@ -630,96 +608,96 @@ static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
WARN_ON(1); /* Shall not reach here! */
break;
}
- _rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
+ _rtl92c_init_chipn_reg_priority(hw, value, value, value, value,
value, value);
pr_info("Tx queue select: 0x%02x\n", queue_sel);
}
-static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
- u8 queue_sel)
+static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 queue_sel)
{
- u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
- u16 uninitialized_var(valueHi);
- u16 uninitialized_var(valueLow);
+ u16 beq, bkq, viq, voq, mgtq, hiq;
+ u16 uninitialized_var(valuehi);
+ u16 uninitialized_var(valuelow);
switch (queue_sel) {
case (TX_SELE_HQ | TX_SELE_LQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_LOW;
+ valuehi = QUEUE_HIGH;
+ valuelow = QUEUE_LOW;
break;
case (TX_SELE_NQ | TX_SELE_LQ):
- valueHi = QUEUE_NORMAL;
- valueLow = QUEUE_LOW;
+ valuehi = QUEUE_NORMAL;
+ valuelow = QUEUE_LOW;
break;
case (TX_SELE_HQ | TX_SELE_NQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_NORMAL;
+ valuehi = QUEUE_HIGH;
+ valuelow = QUEUE_NORMAL;
break;
default:
WARN_ON(1);
break;
}
if (!wmm_enable) {
- beQ = valueLow;
- bkQ = valueLow;
- viQ = valueHi;
- voQ = valueHi;
- mgtQ = valueHi;
- hiQ = valueHi;
+ beq = valuelow;
+ bkq = valuelow;
+ viq = valuehi;
+ voq = valuehi;
+ mgtq = valuehi;
+ hiq = valuehi;
} else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
- beQ = valueHi;
- bkQ = valueLow;
- viQ = valueLow;
- voQ = valueHi;
- mgtQ = valueHi;
- hiQ = valueHi;
+ beq = valuehi;
+ bkq = valuelow;
+ viq = valuelow;
+ voq = valuehi;
+ mgtq = valuehi;
+ hiq = valuehi;
}
- _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ _rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
pr_info("Tx queue select: 0x%02x\n", queue_sel);
}
-static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
+static void _rtl92cu_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw,
bool wmm_enable,
u8 queue_sel)
{
- u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ u16 beq, bkq, viq, voq, mgtq, hiq;
if (!wmm_enable) { /* typical setting */
- beQ = QUEUE_LOW;
- bkQ = QUEUE_LOW;
- viQ = QUEUE_NORMAL;
- voQ = QUEUE_HIGH;
- mgtQ = QUEUE_HIGH;
- hiQ = QUEUE_HIGH;
+ beq = QUEUE_LOW;
+ bkq = QUEUE_LOW;
+ viq = QUEUE_NORMAL;
+ voq = QUEUE_HIGH;
+ mgtq = QUEUE_HIGH;
+ hiq = QUEUE_HIGH;
} else { /* for WMM */
- beQ = QUEUE_LOW;
- bkQ = QUEUE_NORMAL;
- viQ = QUEUE_NORMAL;
- voQ = QUEUE_HIGH;
- mgtQ = QUEUE_HIGH;
- hiQ = QUEUE_HIGH;
+ beq = QUEUE_LOW;
+ bkq = QUEUE_NORMAL;
+ viq = QUEUE_NORMAL;
+ voq = QUEUE_HIGH;
+ mgtq = QUEUE_HIGH;
+ hiq = QUEUE_HIGH;
}
- _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ _rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
pr_info("Tx queue select :0x%02x..\n", queue_sel);
}
-static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
+static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw,
bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
switch (out_ep_num) {
case 1:
- _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable,
+ _rtl92cu_init_chipn_one_out_ep_priority(hw, wmm_enable,
queue_sel);
break;
case 2:
- _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable,
+ _rtl92cu_init_chipn_two_out_ep_priority(hw, wmm_enable,
queue_sel);
break;
case 3:
- _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable,
+ _rtl92cu_init_chipn_three_out_ep_priority(hw, wmm_enable,
queue_sel);
break;
default:
@@ -728,7 +706,7 @@ static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
}
}
-static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
+static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw,
bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
@@ -769,11 +747,12 @@ static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
u8 queue_sel)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
if (IS_NORMAL_CHIP(rtlhal->version))
- _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num,
+ _rtl92cu_init_chipn_queue_priority(hw, wmm_enable, out_ep_num,
queue_sel);
else
- _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num,
+ _rtl92cu_init_chipt_queue_priority(hw, wmm_enable, out_ep_num,
queue_sel);
}
@@ -835,6 +814,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
u8 wmm_enable = false; /* TODO */
u8 out_ep_nums = rtlusb->out_ep_nums;
u8 queue_sel = rtlusb->out_queue_sel;
+
err = _rtl92cu_init_power_on(hw);
if (err) {
@@ -889,10 +869,10 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
"not open sw encryption\n");
return;
}
- sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+ sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
if (rtlpriv->sec.use_defaultkey) {
- sec_reg_value |= SCR_TxUseDK;
- sec_reg_value |= SCR_RxUseDK;
+ sec_reg_value |= SCR_TXUSEDK;
+ sec_reg_value |= SCR_RXUSEDK;
}
if (IS_NORMAL_CHIP(rtlhal->version))
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
@@ -921,7 +901,7 @@ static void _rtl92cu_hw_configure(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
}
-static void _InitPABias(struct ieee80211_hw *hw)
+static void _initpabias(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1017,14 +997,14 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
rtl92c_phy_lc_calibrate(hw);
}
_rtl92cu_hw_configure(hw);
- _InitPABias(hw);
+ _initpabias(hw);
rtl92c_dm_init(hw);
exit:
local_irq_restore(flags);
return err;
}
-static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw)
+static void disable_rfafeandresetbb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/**************************************
@@ -1034,20 +1014,21 @@ c. APSD_CTRL 0x600[7:0] = 0x40
d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
***************************************/
- u8 eRFPath = 0, value8 = 0;
+ u8 erfpath = 0, value8 = 0;
+
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
- rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0);
+ rtl_set_rfreg(hw, (enum radio_path)erfpath, 0x0, MASKBYTE0, 0x0);
value8 |= APSDOFF;
rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/
value8 = 0;
- value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+ value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTN);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/
- value8 &= (~FEN_BB_GLB_RSTn);
+ value8 &= (~FEN_BB_GLB_RSTN);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/
}
-static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
+static void _resetdigitalprocedure1(struct ieee80211_hw *hw, bool withouthwsm)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1098,7 +1079,7 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
}
- if (bWithoutHWSM) {
+ if (withouthwsm) {
/*****************************
Without HW auto state machine
g.SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock
@@ -1113,7 +1094,7 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
}
}
-static void _ResetDigitalProcedure2(struct ieee80211_hw *hw)
+static void _resetdigitalprocedure2(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/*****************************
@@ -1125,7 +1106,7 @@ m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82);
}
-static void _DisableGPIO(struct ieee80211_hw *hw)
+static void _disablegpio(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/***************************************
@@ -1155,13 +1136,13 @@ n. LEDCFG 0x4C[15:0] = 0x8080
rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
}
-static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM)
+static void disable_analog(struct ieee80211_hw *hw, bool withouthwsm)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u16 value16 = 0;
u8 value8 = 0;
- if (bWithoutHWSM) {
+ if (withouthwsm) {
/*****************************
n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power
o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
@@ -1184,30 +1165,30 @@ i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
}
-static void _CardDisableHWSM(struct ieee80211_hw *hw)
+static void carddisable_hwsm(struct ieee80211_hw *hw)
{
/* ==== RF Off Sequence ==== */
- _DisableRFAFEAndResetBB(hw);
+ disable_rfafeandresetbb(hw);
/* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1(hw, false);
+ _resetdigitalprocedure1(hw, false);
/* ==== Pull GPIO PIN to balance level and LED control ====== */
- _DisableGPIO(hw);
+ _disablegpio(hw);
/* ==== Disable analog sequence === */
- _DisableAnalog(hw, false);
+ disable_analog(hw, false);
}
-static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw)
+static void carddisablewithout_hwsm(struct ieee80211_hw *hw)
{
/*==== RF Off Sequence ==== */
- _DisableRFAFEAndResetBB(hw);
+ disable_rfafeandresetbb(hw);
/* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure1(hw, true);
+ _resetdigitalprocedure1(hw, true);
/* ==== Pull GPIO PIN to balance level and LED control ====== */
- _DisableGPIO(hw);
+ _disablegpio(hw);
/* ==== Reset digital sequence ====== */
- _ResetDigitalProcedure2(hw);
+ _resetdigitalprocedure2(hw);
/* ==== Disable analog sequence === */
- _DisableAnalog(hw, true);
+ disable_analog(hw, true);
}
static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
@@ -1226,6 +1207,7 @@ static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 tmp1byte = 0;
+
if (IS_NORMAL_CHIP(rtlhal->version)) {
tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
@@ -1353,10 +1335,10 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
_rtl92cu_set_media_status(hw, opmode);
rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
- if (rtlusb->disableHWSM)
- _CardDisableHWSM(hw);
+ if (rtlusb->disablehwsm)
+ carddisable_hwsm(hw);
else
- _CardDisableWithoutHWSM(hw);
+ carddisablewithout_hwsm(hw);
/* after power off we should do iqk again */
rtlpriv->phy.iqk_initialized = false;
@@ -1375,6 +1357,7 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
if (check_bssid) {
u8 tmp;
+
if (IS_NORMAL_CHIP(rtlhal->version)) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
tmp = BIT(4);
@@ -1387,6 +1370,7 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
_rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
} else {
u8 tmp;
+
if (IS_NORMAL_CHIP(rtlhal->version)) {
reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
tmp = BIT(4);
@@ -1498,12 +1482,12 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
break;
case HW_VAR_FWLPS_RF_ON:{
- enum rf_pwrstate rfState;
+ enum rf_pwrstate rfstate;
u32 val_rcr;
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
- (u8 *)(&rfState));
- if (rfState == ERFOFF) {
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
*((bool *) (val)) = true;
} else {
val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
@@ -1630,7 +1614,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
&e_aci);
} else {
u8 sifstime = 0;
- u8 u1bAIFS;
+ u8 u1baifs;
if (IS_WIRELESS_MODE_A(wirelessmode) ||
IS_WIRELESS_MODE_N_24G(wirelessmode) ||
@@ -1638,21 +1622,22 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
sifstime = 16;
else
sifstime = 10;
- u1bAIFS = sifstime + (2 * val[0]);
+ u1baifs = sifstime + (2 * val[0]);
rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
- u1bAIFS);
+ u1baifs);
rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
- u1bAIFS);
+ u1baifs);
rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
- u1bAIFS);
+ u1baifs);
rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
- u1bAIFS);
+ u1baifs);
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
u8 short_preamble = (bool)*val;
+
reg_tmp = 0;
if (short_preamble)
reg_tmp |= 0x80;
@@ -1903,6 +1888,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
case HW_VAR_KEEP_ALIVE:{
u8 array[2];
+
array[0] = 0xff;
array[1] = *((u8 *)val);
rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2,
@@ -1985,7 +1971,6 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
if (nmode && ((curtxbw_40mhz &&
curshortgi_40mhz) || (!curtxbw_40mhz &&
curshortgi_20mhz))) {
-
ratr_value |= 0x10000000;
tmp_ratr_value = (ratr_value >> 12);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
index ebd168400d45..5c48c3fd45e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CU_HW_H__
#define __RTL92CU_HW_H__
@@ -36,14 +14,12 @@
#define TX_TOTAL_PAGE_NUMBER 0xF8
#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
-
#define CHIP_B_PAGE_NUM_PUBQ 0xE7
/* For Test Chip Setting
* (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
#define CHIP_A_PAGE_NUM_PUBQ 0x7E
-
/* For Chip A Setting */
#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5
#define WMM_CHIP_A_TX_PAGE_BOUNDARY \
@@ -53,8 +29,6 @@
#define WMM_CHIP_A_PAGE_NUM_HPQ 0x29
#define WMM_CHIP_A_PAGE_NUM_LPQ 0x29
-
-
/* Note: For Chip B Setting ,modify later */
#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5
#define WMM_CHIP_B_TX_PAGE_BOUNDARY \
@@ -71,14 +45,14 @@
/* should be renamed and moved to another file */
enum _BOARD_TYPE_8192CUSB {
BOARD_USB_DONGLE = 0, /* USB dongle */
- BOARD_USB_High_PA = 1, /* USB dongle - high power PA */
+ BOARD_USB_HIGH_PA = 1, /* USB dongle - high power PA */
BOARD_MINICARD = 2, /* Minicard */
BOARD_USB_SOLO = 3, /* USB solo-Slim module */
BOARD_USB_COMBO = 4, /* USB Combo-Slim module */
};
#define IS_HIGHT_PA(boardtype) \
- ((boardtype == BOARD_USB_High_PA) ? true : false)
+ ((boardtype == BOARD_USB_HIGH_PA) ? true : false)
#define RTL92C_DRIVER_INFO_SIZE 4
void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
index 66d2784de67d..cc13a4a8f856 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
@@ -1,25 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../usb.h"
@@ -34,7 +14,7 @@ static void _rtl92cu_init_led(struct ieee80211_hw *hw,
pled->ledon = false;
}
-static void _rtl92cu_deInit_led(struct rtl_led *pled)
+static void rtl92cu_deinit_led(struct rtl_led *pled)
{
}
@@ -108,8 +88,8 @@ void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led0);
- _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led1);
+ rtl92cu_deinit_led(&rtlpriv->ledctl.sw_led0);
+ rtl92cu_deinit_led(&rtlpriv->ledctl.sw_led1);
}
static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
index 551deb8afb6f..3fc1e7c8f78b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
@@ -1,25 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CU_LED_H__
#define __RTL92CU_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 5657b1e34ad0..b3ce8000d52d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
-****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -46,7 +24,6 @@
#define RX_EVM rx_evm_percentage
#define RX_SIGQ rx_mimo_sig_qual
-
void rtl92c_read_chip_version(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -165,6 +142,7 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
} while (++count);
return status;
}
+
/**
* rtl92c_init_LLT_table - Init LLT table
* @io: io callback
@@ -211,6 +189,7 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
}
return rst;
}
+
void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all)
@@ -392,6 +371,7 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+
rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
}
@@ -669,6 +649,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->RX_SIGQ[1] = -1;
if (is_cck_rate) {
u8 report, cck_highpwr;
+
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
if (!in_powersavemode)
cck_highpwr = rtlphy->cck_high_power;
@@ -676,6 +657,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
cck_highpwr = false;
if (!cck_highpwr) {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
report = cck_buf->cck_agc_rpt & 0xc0;
report = report >> 6;
switch (report) {
@@ -694,6 +676,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
}
} else {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
report = p_drvinfo->cfosho[0] & 0x60;
report = report >> 5;
switch (report) {
@@ -716,6 +699,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
u8 sq;
+
if (pstats->rx_pwdb_all > 40)
sq = 100;
else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
index 8573b7e257d9..dd76a05829d5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_MAC_H__
#define __RTL92C_MAC_H__
@@ -40,7 +18,6 @@ void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
-
/*---------------------------------------------------------------
* Hardware init functions
*---------------------------------------------------------------*/
@@ -152,6 +129,4 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
* Card disable functions
*---------------------------------------------------------------*/
-
-
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index f068dd5317a7..9cd028cb2239 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -125,7 +103,7 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
- FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ FEN_BB_GLB_RSTN | FEN_BBRSTB);
regval32 = rtl_read_dword(rtlpriv, 0x87c);
rtl_write_dword(rtlpriv, 0x87c, regval32 & (~BIT(31)));
rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
@@ -143,7 +121,7 @@ bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_ARRAY\n");
arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CUMAC_2T_ARRAY\n");
@@ -181,7 +159,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
phy_regarray_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
phy_regarray_table[i],
phy_regarray_table[i + 1]);
}
@@ -191,7 +169,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
agctab_array_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
agctab_array_table[i],
agctab_array_table[i + 1]);
}
@@ -214,7 +192,7 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
rtl_addr_delay(phy_regarray_table_pg[i]);
- _rtl92c_store_pwrIndex_diffrate_offset(hw,
+ _rtl92c_store_pwrindex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
phy_regarray_table_pg[i + 2]);
@@ -408,14 +386,14 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
if ((ppsc->rfpwr_state == ERFOFF) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
- u32 InitializeCount = 0;
+ u32 init_count = 0;
do {
- InitializeCount++;
+ init_count++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while (!rtstatus && (InitializeCount < 10));
+ } while (!rtstatus && (init_count < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
index a422c4db1a41..a3cc980c42fe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../rtl8192ce/phy.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
index 8185886daa8e..b4b6cde23341 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
@@ -1,26 +1,4 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../rtl8192ce/reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index cf551785eb08..f3a336e0ea98 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
@@ -148,17 +126,17 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u32 powerBase0, powerBase1;
+ u32 powerbase0, powerbase1;
u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
u8 i, powerlevel[2];
for (i = 0; i < 2; i++) {
powerlevel[i] = ppowerlevel[i];
legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
- powerBase0 = powerlevel[i] + legacy_pwrdiff;
- powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
- (powerBase0 << 8) | powerBase0;
- *(ofdmbase + i) = powerBase0;
+ powerbase0 = powerlevel[i] + legacy_pwrdiff;
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
" [OFDM power base index rf(%c) = 0x%x]\n",
i == 0 ? 'A' : 'B', *(ofdmbase + i));
@@ -168,10 +146,10 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
powerlevel[i] += ht20_pwrdiff;
}
- powerBase1 = powerlevel[i];
- powerBase1 = (powerBase1 << 24) |
- (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
- *(mcsbase + i) = powerBase1;
+ powerbase1 = powerlevel[i];
+ powerbase1 = (powerbase1 << 24) |
+ (powerbase1 << 16) | (powerbase1 << 8) | powerbase1;
+ *(mcsbase + i) = powerbase1;
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
" [MCS power base index rf(%c) = 0x%x]\n",
i == 0 ? 'A' : 'B', *(mcsbase + i));
@@ -180,26 +158,26 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
u8 channel, u8 index,
- u32 *powerBase0,
- u32 *powerBase1,
+ u32 *powerbase0,
+ u32 *powerbase1,
u32 *p_outwriteval)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 i, chnlgroup = 0, pwr_diff_limit[4];
- u32 writeVal, customer_limit, rf;
+ u32 writeval, customer_limit, rf;
for (rf = 0; rf < 2; rf++) {
switch (rtlefuse->eeprom_regulatory) {
case 0:
chnlgroup = 0;
- writeVal = rtlphy->mcs_offset
+ writeval = rtlphy->mcs_offset
[chnlgroup][index + (rf ? 8 : 0)]
- + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance,writeVal(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "RTK better performance,writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
case 1:
if (rtlphy->pwrgroup_cnt == 1)
@@ -217,20 +195,20 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
else
chnlgroup += 4;
}
- writeVal = rtlphy->mcs_offset[chnlgroup][index +
+ writeval = rtlphy->mcs_offset[chnlgroup][index +
(rf ? 8 : 0)] +
- ((index < 2) ? powerBase0[rf] :
- powerBase1[rf]);
+ ((index < 2) ? powerbase0[rf] :
+ powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
case 2:
- writeVal = ((index < 2) ? powerBase0[rf] :
- powerBase1[rf]);
+ writeval = ((index < 2) ? powerbase0[rf] :
+ powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Better regulatory,writeVal(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "Better regulatory,writeval(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
case 3:
chnlgroup = 0;
@@ -275,36 +253,36 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
"Customer's limit rf(%c) = 0x%x\n",
rf == 0 ? 'A' : 'B', customer_limit);
- writeVal = customer_limit + ((index < 2) ?
- powerBase0[rf] : powerBase1[rf]);
+ writeval = customer_limit + ((index < 2) ?
+ powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Customer, writeVal rf(%c)= 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "Customer, writeval rf(%c)= 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
default:
chnlgroup = 0;
- writeVal = rtlphy->mcs_offset[chnlgroup]
+ writeval = rtlphy->mcs_offset[chnlgroup]
[index + (rf ? 8 : 0)] + ((index < 2) ?
- powerBase0[rf] : powerBase1[rf]);
+ powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance, writeValrf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeVal);
+ "RTK better performance, writevalrf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
break;
}
if (rtlpriv->dm.dynamic_txhighpower_lvl ==
TXHIGHPWRLEVEL_LEVEL1)
- writeVal = 0x14141414;
+ writeval = 0x14141414;
else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
TXHIGHPWRLEVEL_LEVEL2)
- writeVal = 0x00000000;
+ writeval = 0x00000000;
if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
- writeVal = writeVal - 0x06060606;
- *(p_outwriteval + rf) = writeVal;
+ writeval = writeval - 0x06060606;
+ *(p_outwriteval + rf) = writeval;
}
}
static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
- u8 index, u32 *pValue)
+ u8 index, u32 *value)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -319,33 +297,33 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
};
u8 i, rf, pwr_val[4];
- u32 writeVal;
+ u32 writeval;
u16 regoffset;
for (rf = 0; rf < 2; rf++) {
- writeVal = pValue[rf];
+ writeval = value[rf];
for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >>
+ pwr_val[i] = (u8)((writeval & (0x7f << (i * 8))) >>
(i * 8));
if (pwr_val[i] > RF6052_MAX_TX_PWR)
pwr_val[i] = RF6052_MAX_TX_PWR;
}
- writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
(pwr_val[1] << 8) | pwr_val[0];
if (rf == 0)
regoffset = regoffset_a[index];
else
regoffset = regoffset_b[index];
- rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Set 0x%x = %08x\n", regoffset, writeVal);
+ "Set 0x%x = %08x\n", regoffset, writeval);
if (((get_rf_type(rtlphy) == RF_2T2R) &&
(regoffset == RTXAGC_A_MCS15_MCS12 ||
regoffset == RTXAGC_B_MCS15_MCS12)) ||
((get_rf_type(rtlphy) != RF_2T2R) &&
(regoffset == RTXAGC_A_MCS07_MCS04 ||
regoffset == RTXAGC_B_MCS07_MCS04))) {
- writeVal = pwr_val[3];
+ writeval = pwr_val[3];
if (regoffset == RTXAGC_A_MCS15_MCS12 ||
regoffset == RTXAGC_A_MCS07_MCS04)
regoffset = 0xc90;
@@ -354,13 +332,13 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset = 0xc98;
for (i = 0; i < 3; i++) {
if (i != 2)
- writeVal = (writeVal > 8) ?
- (writeVal - 8) : 0;
+ writeval = (writeval > 8) ?
+ (writeval - 8) : 0;
else
- writeVal = (writeVal > 6) ?
- (writeVal - 6) : 0;
+ writeval = (writeval > 6) ?
+ (writeval - 6) : 0;
rtl_write_byte(rtlpriv, (u32)(regoffset + i),
- (u8)writeVal);
+ (u8)writeval);
}
}
}
@@ -369,18 +347,18 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
u8 *ppowerlevel, u8 channel)
{
- u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u32 writeval[2], powerbase0[2], powerbase1[2];
u8 index = 0;
rtl92c_phy_get_power_base(hw, ppowerlevel,
- channel, &powerBase0[0], &powerBase1[0]);
+ channel, &powerbase0[0], &powerbase1[0]);
for (index = 0; index < 6; index++) {
_rtl92c_get_txpower_writeval_by_regulatory(hw,
channel, index,
- &powerBase0[0],
- &powerBase1[0],
- &writeVal[0]);
- _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
+ &powerbase0[0],
+ &powerbase1[0],
+ &writeval[0]);
+ _rtl92c_write_ofdm_power_reg(hw, index, &writeval[0]);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
index 07aec0b20cc9..2661e5f8f6da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CU_RF_H__
#define __RTL92CU_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 43e021b49260..c1c34dca39d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
index 4ea2cb225580..662440c4cdc9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CU_SW_H__
#define __RTL92CU_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
index b3ac981d88c6..addeac90ee0c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "table.h"
@@ -1269,7 +1247,7 @@ u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = {
0xc78, 0x621f001e,
};
-u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
+u32 RTL8192CUPHY_REG_1T_HPARRAY[RTL8192CUPHY_REG_1T_HPARRAYLENGTH] = {
0x024, 0x0011800f,
0x028, 0x00ffdb83,
0x040, 0x000c0004,
@@ -1461,7 +1439,7 @@ u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
0xf00, 0x00000300,
};
-u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
+u32 RTL8192CUPHY_REG_ARRAY_PG_HP[RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH] = {
0xe00, 0xffffffff, 0x06080808,
0xe04, 0xffffffff, 0x00040406,
0xe08, 0x0000ff00, 0x00000000,
@@ -1576,7 +1554,7 @@ u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
0x868, 0xffffffff, 0x00000000,
};
-u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
+u32 RTL8192CURADIOA_1T_HPARRAY[RTL8192CURADIOA_1T_HPARRAYLENGTH] = {
0x000, 0x00030159,
0x001, 0x00031284,
0x002, 0x00098000,
@@ -1720,7 +1698,7 @@ u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
0x000, 0x00030159,
};
-u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = {
+u32 RTL8192CUAGCTAB_1T_HPARRAY[RTL8192CUAGCTAB_1T_HPARRAYLENGTH] = {
0xc78, 0x7b000001,
0xc78, 0x7b010001,
0xc78, 0x7b020001,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
index 851bf53d246c..efc89f7db80f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CU_TABLE__H_
#define __RTL92CU_TABLE__H_
@@ -53,15 +31,15 @@ extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH];
#define RTL8192CUAGCTAB_1TARRAYLENGTH 320
extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH];
-#define RTL8192CUPHY_REG_1T_HPArrayLength 378
-extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength];
+#define RTL8192CUPHY_REG_1T_HPARRAYLENGTH 378
+extern u32 RTL8192CUPHY_REG_1T_HPARRAY[RTL8192CUPHY_REG_1T_HPARRAYLENGTH];
-#define RTL8192CUPHY_REG_Array_PG_HPLength 336
-extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength];
+#define RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH 336
+extern u32 RTL8192CUPHY_REG_ARRAY_PG_HP[RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH];
-#define RTL8192CURadioA_1T_HPArrayLength 282
-extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength];
-#define RTL8192CUAGCTAB_1T_HPArrayLength 320
-extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength];
+#define RTL8192CURADIOA_1T_HPARRAYLENGTH 282
+extern u32 RTL8192CURADIOA_1T_HPARRAY[RTL8192CURADIOA_1T_HPARRAYLENGTH];
+#define RTL8192CUAGCTAB_1T_HPARRAYLENGTH 320
+extern u32 RTL8192CUAGCTAB_1T_HPARRAY[RTL8192CUAGCTAB_1T_HPARRAYLENGTH];
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 9ab56827124e..0020adc004a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../usb.h"
@@ -36,7 +14,7 @@
#include "trx.h"
#include "../rtl8192c/fw_common.h"
-static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
+static int configvertoutep(struct ieee80211_hw *hw)
{
u8 ep_cfg, txqsele;
u8 ep_nums = 0;
@@ -69,7 +47,7 @@ static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
}
-static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
+static int configvernoutep(struct ieee80211_hw *hw)
{
u8 ep_cfg;
u8 ep_nums = 0;
@@ -98,7 +76,7 @@ static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
}
-static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
+static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
bool bwificfg, struct rtl_ep_map *ep_map)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -126,10 +104,11 @@ static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
}
}
-static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
+static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_ep_map *ep_map)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+
if (bwificfg) { /* for WMM */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for WMM.....\n");
@@ -153,7 +132,7 @@ static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
}
}
-static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
+static void oneoutepmapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
{
ep_map->ep_mapping[RTL_TXQ_BE] = 2;
ep_map->ep_mapping[RTL_TXQ_BK] = 2;
@@ -163,30 +142,31 @@ static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
}
+
static int _out_ep_mapping(struct ieee80211_hw *hw)
{
int err = 0;
- bool bIsChipN, bwificfg = false;
+ bool ischipn, bwificfg = false;
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_ep_map *ep_map = &(rtlusb->ep_map);
- bIsChipN = IS_NORMAL_CHIP(rtlhal->version);
+ ischipn = IS_NORMAL_CHIP(rtlhal->version);
switch (rtlusb->out_ep_nums) {
case 2:
- _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map);
+ twooutepmapping(hw, ischipn, bwificfg, ep_map);
break;
case 3:
/* Test chip doesn't support three out EPs. */
- if (!bIsChipN) {
+ if (!ischipn) {
err = -EINVAL;
goto err_out;
}
- _ThreeOutEpMapping(hw, bIsChipN, ep_map);
+ threeoutepmapping(hw, ischipn, ep_map);
break;
case 1:
- _OneOutEpMapping(hw, ep_map);
+ oneoutepmapping(hw, ep_map);
break;
default:
err = -EINVAL;
@@ -196,15 +176,17 @@ err_out:
return err;
}
+
/* endpoint mapping */
int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
int error = 0;
+
if (likely(IS_NORMAL_CHIP(rtlhal->version)))
- error = _ConfigVerNOutEP(hw);
+ error = configvernoutep(hw);
else
- error = _ConfigVerTOutEP(hw);
+ error = configvertoutep(hw);
if (error)
goto err_out;
error = _out_ep_mapping(hw);
@@ -320,7 +302,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
stats->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1)
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
stats->is_ht = (bool)GET_RX_DESC_RX_HT(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -387,7 +369,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1)
&& (GET_RX_DESC_FAGGR(rxdesc) == 1));
stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
- stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+ stats.rx_is40mhzpacket = (bool)GET_RX_DESC_BW(rxdesc);
stats.is_ht = (bool)GET_RX_DESC_RX_HT(rxdesc);
/* TODO: is center_freq changed when doing scan? */
/* TODO: Shall we add protection or just skip those two step? */
@@ -464,6 +446,7 @@ static void _rtl_fill_usb_tx_desc(u8 *txdesc)
SET_TX_DESC_LAST_SEG(txdesc, 1);
SET_TX_DESC_FIRST_SEG(txdesc, 1);
}
+
/**
* For HW recovery information
*/
@@ -553,11 +536,13 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
sta = ieee80211_find_sta(mac->vif, mac->bssid);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
+
SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
}
rcu_read_unlock();
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -610,28 +595,28 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
}
-void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
- u32 buffer_len, bool bIsPsPoll)
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc,
+ u32 buffer_len, bool is_pspoll)
{
/* Clear all status */
- memset(pDesc, 0, RTL_TX_HEADER_SIZE);
- SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */
- SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */
- SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
- SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */
- SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
+ memset(pdesc, 0, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_FIRST_SEG(pdesc, 1); /* bFirstSeg; */
+ SET_TX_DESC_LAST_SEG(pdesc, 1); /* bLastSeg; */
+ SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
+ SET_TX_DESC_PKT_SIZE(pdesc, buffer_len); /* Buffer size + command hdr */
+ SET_TX_DESC_QUEUE_SEL(pdesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error
* vlaue by Hw. */
- if (bIsPsPoll) {
- SET_TX_DESC_NAV_USE_HDR(pDesc, 1);
+ if (is_pspoll) {
+ SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
} else {
- SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */
- SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1); /* Hw set sequence number */
+ SET_TX_DESC_PKT_ID(pdesc, 0x100); /* set bit3 to 1. */
}
- SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
- SET_TX_DESC_OWN(pDesc, 1);
- SET_TX_DESC_TX_RATE(pDesc, DESC_RATE1M);
- _rtl_tx_desc_checksum(pDesc);
+ SET_TX_DESC_USE_RATE(pdesc, 1); /* use data rate which is set by Sw */
+ SET_TX_DESC_OWN(pdesc, 1);
+ SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ _rtl_tx_desc_checksum(pdesc);
}
void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
index 15a66c547287..ae2e8aa212de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CU_TRX_H__
#define __RTL92CU_TRX_H__
@@ -220,7 +198,6 @@ struct rx_drv_info_92c {
#define SET_TX_DESC_OWN(__txdesc, __value) \
SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value)
-
/* Dword 1 */
#define SET_TX_DESC_MACID(__txdesc, __value) \
SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value)
@@ -377,7 +354,6 @@ struct rx_drv_info_92c {
#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \
SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value)
-
int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
@@ -397,8 +373,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct sk_buff *skb,
u8 queue_index,
struct rtl_tcb_desc *tcb_desc);
-void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
- u32 buffer_len, bool bIsPsPoll);
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc,
+ u32 buffer_len, bool ispspoll);
void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 *pdesc, bool b_firstseg,
bool b_lastseg, struct sk_buff *skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
index cb7b9b727e3a..fa33b05db052 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92D_DEF_H__
#define __RTL92D_DEF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
index ac6d554b67c8..7cc86bb387a1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
@@ -864,7 +842,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
else
rf = 1;
if (thermalvalue) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
@@ -872,13 +850,13 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATxIQIMBALANCE,
+ ROFDM0_XATXIQIMBALANCE,
ele_d, ofdm_index_old[0]);
break;
}
}
if (is2t) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d ==
@@ -887,7 +865,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
"Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
- ROFDM0_XBTxIQIMBALANCE, ele_d,
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
ofdm_index_old[1]);
break;
}
@@ -1059,11 +1037,11 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
* regC94, element B is always 0 */
value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
16) | ele_a;
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
@@ -1071,11 +1049,11 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
value32);
} else {
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD,
ofdmswing_table
[(u8)ofdm_index[0]]);
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(24), 0x00);
@@ -1172,21 +1150,21 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
((ele_c & 0x3F) << 16) |
ele_a;
rtl_set_bbreg(hw,
- ROFDM0_XBTxIQIMBALANCE,
+ ROFDM0_XBTXIQIMBALANCE,
MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
MASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), value32);
} else {
rtl_set_bbreg(hw,
- ROFDM0_XBTxIQIMBALANCE,
+ ROFDM0_XBTXIQIMBALANCE,
MASKDWORD,
ofdmswing_table
[(u8) ofdm_index[1]]);
- rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
MASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
index 5d346ec366ce..939cc45bfebd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_DM_H__
#define __RTL92C_DM_H__
@@ -44,7 +22,7 @@
#define DM_DIG_FA_TH1 0x400
#define DM_DIG_FA_TH2 0x600
-#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_SS_TH_LOW 30
#define RXPATHSELECTION_DIFF_TH 18
#define DM_RATR_STA_INIT 0
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 75bfa9dfef4a..2064813f9381 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -97,7 +75,7 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
} while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
- (!(value32 & FWDL_ChkSum_rpt)));
+ (!(value32 & FWDL_CHKSUM_RPT)));
if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
value32);
@@ -621,7 +599,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
struct sk_buff *skb = NULL;
u32 totalpacketlen;
bool rtstatus;
- u8 u1RsvdPageLoc[3] = { 0 };
+ u8 u1rsvdpageloc[3] = { 0 };
bool dlok = false;
u8 *beacon;
u8 *p_pspoll;
@@ -640,7 +618,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
- SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
/*--------------------------------------------------------
(3) null data
---------------------------------------------------------*/
@@ -648,7 +626,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
/*---------------------------------------------------------
(4) probe response
----------------------------------------------------------*/
@@ -656,14 +634,14 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
totalpacketlen = TOTAL_RESERVED_PKT_LEN;
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
&reserved_page_packet[0], totalpacketlen);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
- u1RsvdPageLoc, 3);
+ u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
if (!skb) {
dlok = false;
@@ -678,9 +656,9 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"Set RSVD page location to Fw\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "H2C_RSVDPAGE", u1RsvdPageLoc, 3);
+ "H2C_RSVDPAGE", u1rsvdpageloc, 3);
rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE,
- sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Set RSVD page location to Fw FAIL!!!!!!\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
index 6b435236a28e..bd8ea6d66dff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92D__FW__H__
#define __RTL92D__FW__H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 80123fd97221..c7f29a9be50d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
@@ -124,12 +102,12 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
break;
case HW_VAR_FWLPS_RF_ON:{
- enum rf_pwrstate rfState;
+ enum rf_pwrstate rfstate;
u32 val_rcr;
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
- (u8 *) (&rfState));
- if (rfState == ERFOFF) {
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
*((bool *) (val)) = true;
} else {
val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
@@ -280,23 +258,23 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_AMPDU_FACTOR: {
u8 factor_toset;
- u32 regtoSet;
+ u32 regtoset;
u8 *ptmp_byte = NULL;
u8 index;
if (rtlhal->macphymode == DUALMAC_DUALPHY)
- regtoSet = 0xb9726641;
+ regtoset = 0xb9726641;
else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
- regtoSet = 0x66626641;
+ regtoset = 0x66626641;
else
- regtoSet = 0xb972a841;
+ regtoset = 0xb972a841;
factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
factor_toset = 0xf;
for (index = 0; index < 4; index++) {
- ptmp_byte = (u8 *) (&regtoSet) + index;
+ ptmp_byte = (u8 *)(&regtoset) + index;
if ((*ptmp_byte & 0xf0) >
(factor_toset << 4))
*ptmp_byte = (*ptmp_byte & 0x0f)
@@ -305,7 +283,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*ptmp_byte = (*ptmp_byte & 0xf0)
| (factor_toset);
}
- rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoSet);
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
"Set HW_VAR_AMPDU_FACTOR: %#x\n",
factor_toset);
@@ -531,18 +509,18 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned short i;
u8 txpktbuf_bndy;
- u8 maxPage;
+ u8 maxpage;
bool status;
u32 value32; /* High+low page number */
u8 value8; /* normal page number */
if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
- maxPage = 255;
+ maxpage = 255;
txpktbuf_bndy = 246;
value8 = 0;
value32 = 0x80bf0d29;
} else {
- maxPage = 127;
+ maxpage = 127;
txpktbuf_bndy = 123;
value8 = 0;
value32 = 0x80750005;
@@ -598,14 +576,14 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* This ring buffer is used as beacon buffer if we */
/* config this MAC as two MAC transfer. */
/* Otherwise used as local loopback buffer. */
- for (i = txpktbuf_bndy; i < maxPage; i++) {
+ for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl92de_llt_write(hw, i, (i + 1));
if (true != status)
return status;
}
/* Let last entry point to the start entry of ring buffer */
- status = _rtl92de_llt_write(hw, maxPage, txpktbuf_bndy);
+ status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy);
if (true != status)
return status;
@@ -1415,13 +1393,13 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
}
static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo,
- u8 *rom_content, bool autoLoadfail)
+ u8 *rom_content, bool autoloadfail)
{
u32 rfpath, eeaddr, group, offset1, offset2;
u8 i;
memset(pwrinfo, 0, sizeof(struct txpower_info));
- if (autoLoadfail) {
+ if (autoloadfail) {
for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
if (group < CHANNEL_GROUP_MAX_2G) {
@@ -1563,7 +1541,7 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct txpower_info pwrinfo;
u8 tempval[2], i, pwr, diff;
- u32 ch, rfPath, group;
+ u32 ch, rfpath, group;
_rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
if (!autoload_fail) {
@@ -1643,25 +1621,25 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
"Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
rtlefuse->delta_iqk, rtlefuse->delta_lck);
- for (rfPath = 0; rfPath < RF6052_MAX_PATH; rfPath++) {
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
group = rtl92d_get_chnlgroup_fromarray((u8) ch);
if (ch < CHANNEL_MAX_NUMBER_2G)
- rtlefuse->txpwrlevel_cck[rfPath][ch] =
- pwrinfo.cck_index[rfPath][group];
- rtlefuse->txpwrlevel_ht40_1s[rfPath][ch] =
- pwrinfo.ht40_1sindex[rfPath][group];
- rtlefuse->txpwr_ht20diff[rfPath][ch] =
- pwrinfo.ht20indexdiff[rfPath][group];
- rtlefuse->txpwr_legacyhtdiff[rfPath][ch] =
- pwrinfo.ofdmindexdiff[rfPath][group];
- rtlefuse->pwrgroup_ht20[rfPath][ch] =
- pwrinfo.ht20maxoffset[rfPath][group];
- rtlefuse->pwrgroup_ht40[rfPath][ch] =
- pwrinfo.ht40maxoffset[rfPath][group];
- pwr = pwrinfo.ht40_1sindex[rfPath][group];
- diff = pwrinfo.ht40_2sindexdiff[rfPath][group];
- rtlefuse->txpwrlevel_ht40_2s[rfPath][ch] =
+ rtlefuse->txpwrlevel_cck[rfpath][ch] =
+ pwrinfo.cck_index[rfpath][group];
+ rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] =
+ pwrinfo.ht40_1sindex[rfpath][group];
+ rtlefuse->txpwr_ht20diff[rfpath][ch] =
+ pwrinfo.ht20indexdiff[rfpath][group];
+ rtlefuse->txpwr_legacyhtdiff[rfpath][ch] =
+ pwrinfo.ofdmindexdiff[rfpath][group];
+ rtlefuse->pwrgroup_ht20[rfpath][ch] =
+ pwrinfo.ht20maxoffset[rfpath][group];
+ rtlefuse->pwrgroup_ht40[rfpath][ch] =
+ pwrinfo.ht40maxoffset[rfpath][group];
+ pwr = pwrinfo.ht40_1sindex[rfpath][group];
+ diff = pwrinfo.ht40_2sindexdiff[rfpath][group];
+ rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] =
(pwr > diff) ? (pwr - diff) : 0;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index e6c702e69ecf..ea495216d394 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92DE_HW_H__
#define __RTL92DE_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 8851038c9eba..2b76a025deb8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
index 9874519704d3..7599c7e5ecc3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CE_LED_H__
#define __RTL92CE_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index de98d88199d6..0ae6371b6318 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -506,16 +484,16 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
/* Tx AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATxIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTxIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTxIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
/* Tx AFE control 2 */
- rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATxAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTxAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTxAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTxAFE;
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
/* Tranceiver LSSI Readback SI mode */
rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
@@ -764,7 +742,7 @@ bool rtl92d_phy_bb_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RF_CTRL, value | RF_EN | RF_RSTB |
RF_SDMRSTB);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
- FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ FEN_DIO_PCIE | FEN_BB_GLB_RSTN | FEN_BBRSTB);
rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
if (!(IS_92D_SINGLEPHY(rtlpriv->rtlhal.version))) {
regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
@@ -1480,11 +1458,11 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
u8 result = 0;
u8 i;
u8 retrycount = 2;
- u32 TxOKBit = BIT(28), RxOKBit = BIT(27);
+ u32 TXOKBIT = BIT(28), RXOKBIT = BIT(27);
if (rtlhal->interfaceindex == 1) { /* PHY1 */
- TxOKBit = BIT(31);
- RxOKBit = BIT(30);
+ TXOKBIT = BIT(31);
+ RXOKBIT = BIT(30);
}
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n");
/* path-A IQK setting */
@@ -1526,7 +1504,7 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
- if (!(regeac & TxOKBit) &&
+ if (!(regeac & TXOKBIT) &&
(((rege94 & 0x03FF0000) >> 16) != 0x142)) {
result |= 0x01;
} else { /* if Tx not OK, ignore Rx */
@@ -1536,7 +1514,7 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
}
/* if Tx is OK, check whether Rx is OK */
- if (!(regeac & RxOKBit) &&
+ if (!(regeac & RXOKBIT) &&
(((regea4 & 0x03FF0000) >> 16) != 0x132)) {
result |= 0x02;
break;
@@ -2165,7 +2143,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
if (final_candidate == 0xFF) {
return;
} else if (iqk_ok) {
- oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */
val_x = result[final_candidate][0];
if ((val_x & 0x00000200) != 0)
@@ -2174,7 +2152,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"X = 0x%x, tx0_a = 0x%x, oldval_0 0x%x\n",
val_x, tx0_a, oldval_0);
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
((val_x * oldval_0 >> 7) & 0x1));
val_y = result[final_candidate][1];
@@ -2188,15 +2166,15 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Y = 0x%lx, tx0_c = 0x%lx\n",
val_y, tx0_c);
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000,
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
((tx0_c & 0x3C0) >> 6));
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x003F0000,
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
(tx0_c & 0x3F));
if (is2t)
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26),
((val_y * oldval_0 >> 7) & 0x1));
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
- rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
MASKDWORD));
if (txonly) {
RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
@@ -2224,7 +2202,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
if (final_candidate == 0xFF) {
return;
} else if (iqk_ok) {
- oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
+ oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
MASKDWORD) >> 22) & 0x3FF;
val_x = result[final_candidate][4];
if ((val_x & 0x00000200) != 0)
@@ -2232,7 +2210,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
tx1_a = (val_x * oldval_1) >> 8;
RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x, tx1_a = 0x%x\n",
val_x, tx1_a);
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x3FF, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28),
((val_x * oldval_1 >> 7) & 0x1));
val_y = result[final_candidate][5];
@@ -2243,9 +2221,9 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
tx1_c = (val_y * oldval_1) >> 8;
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%lx, tx1_c = 0x%lx\n",
val_y, tx1_c);
- rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 0xF0000000,
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
((tx1_c & 0x3C0) >> 6));
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x003F0000,
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
(tx1_c & 0x3F));
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30),
((val_y * oldval_1 >> 7) & 0x1));
@@ -3086,13 +3064,13 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
if ((ppsc->rfpwr_state == ERFOFF) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
- u32 InitializeCount = 0;
+ u32 initializecount = 0;
do {
- InitializeCount++;
+ initializecount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while (!rtstatus && (InitializeCount < 10));
+ } while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
@@ -3387,9 +3365,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
/* 5G LAN ON */
rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
/* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
0x40000100);
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD,
0x40000100);
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3443,16 +3421,16 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
/* TX BB gain shift,Just for testchip,0xc80,0xc88 */
if (rtlefuse->internal_pa_5g[0])
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
0x2d4000b5);
else
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
0x20000080);
if (rtlefuse->internal_pa_5g[1])
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD,
0x2d4000b5);
else
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD,
0x20000080);
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3481,10 +3459,10 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
/* update IQK related settings */
rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
BIT(26) | BIT(24), 0x00);
- rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, 0x00);
rtl_set_bbreg(hw, 0xca0, 0xF0000000, 0x00);
rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, 0x00);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
index 58b56b523dbe..8d07c783a023 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92D_PHY_H__
#define __RTL92D_PHY_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
index d4c4e76a9244..2783d7e7b227 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92D_REG_H__
#define __RTL92D_REG_H__
@@ -752,7 +730,7 @@
/* SYS_FUNC_EN */
#define FEN_BBRSTB BIT(0)
-#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_BB_GLB_RSTN BIT(1)
#define FEN_USBA BIT(2)
#define FEN_UPLL BIT(3)
#define FEN_USBD BIT(4)
@@ -773,7 +751,7 @@
#define PFM_ALDN BIT(1)
#define PFM_LDKP BIT(2)
#define PFM_WOWL BIT(3)
-#define EnPDN BIT(4)
+#define ENPDN BIT(4)
#define PDN_PL BIT(5)
#define APFM_ONMAC BIT(8)
#define APFM_OFF BIT(9)
@@ -910,7 +888,7 @@
/* MCUFWDL */
#define MCUFWDL_EN BIT(0)
#define MCUFWDL_RDY BIT(1)
-#define FWDL_ChkSum_rpt BIT(2)
+#define FWDL_CHKSUM_RPT BIT(2)
#define MACINI_RDY BIT(3)
#define BBINI_RDY BIT(4)
#define RFINI_RDY BIT(5)
@@ -1033,7 +1011,7 @@
#define RFPGA0_XA_LSSIPARAMETER 0x840
#define RFPGA0_XB_LSSIPARAMETER 0x844
-#define RFPGA0_RFWAkEUPPARAMETER 0x850
+#define RFPGA0_RFWAKEUPPARAMETER 0x850
#define RFPGA0_RFSLEEPUPPARAMETER 0x854
#define RFPGA0_XAB_SWITCHCONTROL 0x858
@@ -1135,14 +1113,14 @@
#define ROFDM0_AGCRSSITABLE 0xc78
#define ROFDM0_HTSTFAGC 0xc7c
-#define ROFDM0_XATxIQIMBALANCE 0xc80
-#define ROFDM0_XATxAFE 0xc84
-#define ROFDM0_XBTxIQIMBALANCE 0xc88
-#define ROFDM0_XBTxAFE 0xc8c
-#define ROFDM0_XCTxIQIMBALANCE 0xc90
-#define ROFDM0_XCTxAFE 0xc94
-#define ROFDM0_XDTxIQIMBALANCE 0xc98
-#define ROFDM0_XDTxAFE 0xc9c
+#define ROFDM0_XATXIQIMBALANCE 0xc80
+#define ROFDM0_XATXAFE 0xc84
+#define ROFDM0_XBTXIQIMBALANCE 0xc88
+#define ROFDM0_XBTXAFE 0xc8c
+#define ROFDM0_XCTXIQIMBALANCE 0xc90
+#define ROFDM0_XCTXAFE 0xc94
+#define ROFDM0_XDTXIQIMBALANCE 0xc98
+#define ROFDM0_XDTXAFE 0xc9c
#define ROFDM0_RXHPPARAMETER 0xce0
#define ROFDM0_TXPSEUDONOISEWGT 0xce4
@@ -1186,7 +1164,7 @@
#define ROFDM_AGCREPORT 0xdd0
#define ROFDM_RXSNR 0xdd4
#define ROFDM_RXEVMCSI 0xdd8
-#define ROFDM_SIGReport 0xddc
+#define ROFDM_SIGREPORT 0xddc
/* 8. PageE(0xE00) */
#define RTXAGC_A_RATE18_06 0xe00
@@ -1228,7 +1206,7 @@
#define RF_IPA 0x15
#define RF_POW_ABILITY 0x17
#define RF_MODE_AG 0x18
-#define rRfChannel 0x18
+#define rfchannel 0x18
#define RF_CHNLBW 0x18
#define RF_TOP 0x19
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 021d3c538ac2..915a36f7af5e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
index c650a8dcdb26..4e646cc9ebc0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92D_RF_H__
#define __RTL92D_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index d5ba2bace79b..99e5cd9a5c86 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
index fd7d036e9abc..19db4ce30e44 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92DE_SW_H__
#define __RTL92DE_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
index 4badb183cf35..9b35c65d91f2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
@@ -1,28 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- * Created on 2010/12/23, 6:38
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
index 7fefc483ec28..2a45082f441f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
@@ -1,28 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- * Created on 2010/ 5/18, 1:41
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92DE_TABLE__H_
#define __RTL92DE_TABLE__H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index d7b023cf7400..d162884a9e00 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -494,7 +472,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
&& (GET_RX_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index f7f776539438..36820070fd76 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92DE_TRX_H__
#define __RTL92DE_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
index 9f7e7bb8610b..fe1b7cdab1d5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_DEF_H__
#define __RTL92E_DEF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index faed6e2dedf6..648f9108ed4b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
index 107d5a488fa8..ec48e5671b5b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_DM_H__
#define __RTL92E_DM_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 84a0d0eb72e1..7c5b54b71a92 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
index 6a2fbf20d579..cbfecea232a3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E__FW__H__
#define __RTL92E__FW__H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 3f2295fdb02d..53011c2a44f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
index 3a63bec9b0cc..fc224392615f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_HW_H__
#define __RTL92E_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
index 96c64785108b..78202ad4036e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
index 8ef640a2ef7f..6d775e14846f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_LED_H__
#define __RTL92E_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 8b072ee8e0d5..222abc41669c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
index 49bd0e554c65..1a5dbc628379 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_PHY_H__
#define __RTL92E_PHY_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
index 1a701d007f0c..515c1c3d6b43 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "pwrseq.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
index c570801508cc..2ae8347f7ebb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_PWRSEQ_H__
#define __RTL92E_PWRSEQ_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
index 1eaa1fab550d..0164e006f43e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_REG_H__
#define __RTL92E_REG_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
index bc76a91da762..6b8ef680dc57 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
index 039c0133ad6b..d7b391599926 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_RF_H__
#define __RTL92E_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index 9ea62599ecbb..b6ee7dae5943 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
index 21433d0332d0..36e29a2da0fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_SW_H__
#define __RTL92E_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
index abcdd0670fd8..fb66f610bf5d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "table.h"
u32 RTL8192EE_PHY_REG_ARRAY[] = {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
index bff9df88815d..0bc7ef58c3b9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_TABLE__H_
#define __RTL92E_TABLE__H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 14d6e3fc5767..09cf8180e4ff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -393,7 +371,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
if (status->crc)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (status->rx_is40Mhzpacket)
+ if (status->rx_is40mhzpacket)
rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
index 45df3e79f490..a9e5e620a653 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL92E_TRX_H__
#define __RTL92E_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
index b5ba0554a0cd..bb6b60814762 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __REALTEK_92S_DEF_H__
#define __REALTEK_92S_DEF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
index 44f510a94b09..a6e4384ceea1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
index 3af07efed73a..b9c5a92c2bd0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __RTL_92S_DM_H__
#define __RTL_92S_DM_H__
@@ -79,7 +58,7 @@ enum dm_ratr_sta {
#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
#define DM_DIG_HIGH_PWR_THRESH_LOW 70
-#define DM_DIG_MIN_Netcore 0x12
+#define DM_DIG_MIN_NETCORE 0x12
void rtl92s_dm_watchdog(struct ieee80211_hw *hw);
void rtl92s_dm_init(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index e7b1d7c0f542..541b7881735e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -158,7 +136,6 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb;
struct rtl_tcb_desc *tcb_desc;
- unsigned char *seg_ptr;
u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE;
u16 frag_length, frag_offset = 0;
u16 extra_descoffset = 0;
@@ -188,9 +165,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
if (!skb)
return false;
skb_reserve(skb, extra_descoffset);
- seg_ptr = skb_put_data(skb,
- code_virtual_address + frag_offset,
- (u32)(frag_length - extra_descoffset));
+ skb_put_data(skb, code_virtual_address + frag_offset,
+ (u32)(frag_length - extra_descoffset));
tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
tcb_desc->queue_index = TXCMD_QUEUE;
@@ -569,14 +545,14 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
return true;
}
-void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 Mode)
+void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct h2c_set_pwrmode_parm pwrmode;
u16 max_wakeup_period = 0;
- pwrmode.mode = Mode;
+ pwrmode.mode = mode;
pwrmode.flag_low_traffic_en = 0;
pwrmode.flag_lpnav_en = 0;
pwrmode.flag_rf_low_snr_en = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
index 5827aa32cef0..99c6f7eefd85 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __REALTEK_FIRMWARE92S_H__
#define __REALTEK_FIRMWARE92S_H__
@@ -297,7 +276,7 @@ enum fw_h2c_cmd {
H2C_JOINBSS_CMD,
H2C_DISCONNECT_CMD, /*15*/
H2C_CREATEBSS_CMD,
- H2C_SETOPMode_CMD,
+ H2C_SETOPMODE_CMD,
H2C_SITESURVEY_CMD,
H2C_SETAUTH_CMD,
H2C_SETKEY_CMD, /*20*/
@@ -336,10 +315,10 @@ enum fw_h2c_cmd {
/* The following macros are used for FW
* CMD map and parameter updated. */
-#define FW_CMD_IO_CLR(rtlpriv, _Bit) \
+#define FW_CMD_IO_CLR(rtlpriv, _bit) \
do { \
udelay(1000); \
- rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \
+ rtlpriv->rtlhal.fwcmd_iomap &= (~_bit); \
} while (0)
#define FW_CMD_IO_UPDATE(rtlpriv, _val) \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 30dea7b9bc17..6d6e8994460d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
@@ -258,7 +236,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(
mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
- u8 acm_ctrl = rtl_read_byte(rtlpriv, AcmHwCtrl);
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, ACMHWCTRL);
acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ?
0x0 : 0x1);
@@ -266,13 +244,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
if (acm) {
switch (e_aci) {
case AC0_BE:
- acm_ctrl |= AcmHw_BeqEn;
+ acm_ctrl |= ACMHW_BEQEN;
break;
case AC2_VI:
- acm_ctrl |= AcmHw_ViqEn;
+ acm_ctrl |= ACMHW_VIQEN;
break;
case AC3_VO:
- acm_ctrl |= AcmHw_VoqEn;
+ acm_ctrl |= ACMHW_VOQEN;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -283,13 +261,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
} else {
switch (e_aci) {
case AC0_BE:
- acm_ctrl &= (~AcmHw_BeqEn);
+ acm_ctrl &= (~ACMHW_BEQEN);
break;
case AC2_VI:
- acm_ctrl &= (~AcmHw_ViqEn);
+ acm_ctrl &= (~ACMHW_VIQEN);
break;
case AC3_VO:
- acm_ctrl &= (~AcmHw_VoqEn);
+ acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
pr_err("switch case %#x not processed\n",
@@ -300,7 +278,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
"HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl);
- rtl_write_byte(rtlpriv, AcmHwCtrl, acm_ctrl);
+ rtl_write_byte(rtlpriv, ACMHWCTRL, acm_ctrl);
break;
}
case HW_VAR_RCR:{
@@ -869,7 +847,7 @@ static void _rtl92se_macconfig_after_fwdownload(struct ieee80211_hw *hw)
/* 10. Power Save Control Register (Offset: 0x0260 - 0x02DF) */
/* 11. General Purpose Register (Offset: 0x02E0 - 0x02FF) */
/* 12. Host Interrupt Status Register (Offset: 0x0300 - 0x030F) */
- /* 13. Test Mode and Debug Control Register (Offset: 0x0310 - 0x034F) */
+ /* 13. Test mode and Debug Control Register (Offset: 0x0310 - 0x034F) */
/* 14. Set driver info, we only accept PHY status now. */
rtl_write_byte(rtlpriv, RXDRVINFO_SZ, 4);
@@ -1641,7 +1619,7 @@ void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw,
rtl92se_enable_interrupt(hw);
}
-static void _rtl8192se_get_IC_Inferiority(struct ieee80211_hw *hw)
+static void _rtl8192se_get_ic_inferiority(struct ieee80211_hw *hw)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1704,7 +1682,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->autoload_failflag)
return;
- _rtl8192se_get_IC_Inferiority(hw);
+ _rtl8192se_get_ic_inferiority(hw);
/* Read IC Version && Channel Plan */
/* VID, DID SE 0xA-D */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
index fa836ceefc8f..5edbc1ecd261 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __REALTEK_PCI92SE_HW_H__
#define __REALTEK_PCI92SE_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
index 33c307aca911..2d18bc1ee480 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
index 90e265d9ffc6..c9e481a8d943 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __REALTEK_PCI92SE_LED_H__
#define __REALTEK_PCI92SE_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 86cb853f7169..d5c0eb462315 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -549,13 +527,13 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
- u32 InitializeCount = 0;
+ u32 initializecount = 0;
do {
- InitializeCount++;
+ initializecount++;
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
- } while (!rtstatus && (InitializeCount < 10));
+ } while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
@@ -935,7 +913,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
if (!rtstatus) {
pr_err("Write BB Reg Fail!!\n");
- goto phy_BB8190_Config_ParaFile_Fail;
+ goto phy_bb8190_config_parafile_fail;
}
/* 2. If EEPROM or EFUSE autoload OK, We must config by
@@ -948,7 +926,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
pr_err("_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n");
- goto phy_BB8190_Config_ParaFile_Fail;
+ goto phy_bb8190_config_parafile_fail;
}
/* 3. BB AGC table Initialization */
@@ -956,7 +934,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
if (!rtstatus) {
pr_err("%s(): AGC Table Fail\n", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
+ goto phy_bb8190_config_parafile_fail;
}
/* Check if the CCK HighPower is turned ON. */
@@ -964,7 +942,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
rtlphy->cck_high_power = (bool)(rtl92s_phy_query_bb_reg(hw,
RFPGA0_XA_HSSIPARAMETER2, 0x200));
-phy_BB8190_Config_ParaFile_Fail:
+phy_bb8190_config_parafile_fail:
return rtstatus;
}
@@ -1029,13 +1007,13 @@ bool rtl92s_phy_mac_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 i;
u32 arraylength;
- u32 *ptraArray;
+ u32 *ptrarray;
arraylength = MAC_2T_ARRAYLENGTH;
- ptraArray = rtl8192semac_2t_array;
+ ptrarray = rtl8192semac_2t_array;
for (i = 0; i < arraylength; i = i + 2)
- rtl_write_byte(rtlpriv, ptraArray[i], (u8)ptraArray[i + 1]);
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]);
return true;
}
@@ -1128,7 +1106,7 @@ void rtl92s_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
}
static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
- u8 *cckpowerlevel, u8 *ofdmpowerLevel)
+ u8 *cckpowerlevel, u8 *ofdmpowerlevel)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -1144,15 +1122,15 @@ static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
/* 2. OFDM for 1T or 2T */
if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) {
/* Read HT 40 OFDM TX power */
- ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_1s[0][index];
- ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_1s[1][index];
+ ofdmpowerlevel[0] = rtlefuse->txpwrlevel_ht40_1s[0][index];
+ ofdmpowerlevel[1] = rtlefuse->txpwrlevel_ht40_1s[1][index];
} else if (rtlphy->rf_type == RF_2T2R) {
/* Read HT 40 OFDM TX power */
- ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index];
- ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index];
+ ofdmpowerlevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index];
+ ofdmpowerlevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index];
} else {
- ofdmpowerLevel[0] = 0;
- ofdmpowerLevel[1] = 0;
+ ofdmpowerlevel[0] = 0;
+ ofdmpowerlevel[1] = 0;
}
}
@@ -1171,7 +1149,7 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
/* [0]:RF-A, [1]:RF-B */
- u8 cckpowerlevel[2], ofdmpowerLevel[2];
+ u8 cckpowerlevel[2], ofdmpowerlevel[2];
if (!rtlefuse->txpwr_fromeprom)
return;
@@ -1183,18 +1161,18 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel)
* 1. For CCK.
* 2. For OFDM 1T or 2T */
_rtl92s_phy_get_txpower_index(hw, channel, &cckpowerlevel[0],
- &ofdmpowerLevel[0]);
+ &ofdmpowerlevel[0]);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"Channel-%d, cckPowerLevel (A / B) = 0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
channel, cckpowerlevel[0], cckpowerlevel[1],
- ofdmpowerLevel[0], ofdmpowerLevel[1]);
+ ofdmpowerlevel[0], ofdmpowerlevel[1]);
_rtl92s_phy_ccxpower_indexcheck(hw, channel, &cckpowerlevel[0],
- &ofdmpowerLevel[0]);
+ &ofdmpowerlevel[0]);
rtl92s_phy_rf6052_set_ccktxpower(hw, cckpowerlevel[0]);
- rtl92s_phy_rf6052_set_ofdmtxpower(hw, &ofdmpowerLevel[0], channel);
+ rtl92s_phy_rf6052_set_ofdmtxpower(hw, &ofdmpowerlevel[0], channel);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
index 7a3b6b623872..b8b5f097b67b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __RTL92S_PHY_H__
#define __RTL92S_PHY_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
index 5d445c2afcf3..45f968e0e57c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __REALTEK_92S_REG_H__
#define __REALTEK_92S_REG_H__
@@ -190,7 +169,7 @@
#define BCNTCFG 0x01E0
#define CWRR 0x01E2
#define ACMAVG 0x01E4
-#define AcmHwCtrl 0x01E7
+#define ACMHWCTRL 0x01E7
#define VO_ADMTM 0x01E8
#define VI_ADMTM 0x01EC
#define BE_ADMTM 0x01F0
@@ -256,7 +235,7 @@
#define INTA_MASK 0x0300
#define ISR 0x0308
-/* 13. Test Mode and Debug Control Registers */
+/* 13. Test mode and Debug Control Registers */
#define DBG_PORT_SWITCH 0x003A
#define BIST 0x0310
#define DBS 0x0314
@@ -346,9 +325,9 @@
#define SYS_SWHW_SEL BIT(14)
#define SYS_FWHW_SEL BIT(15)
-#define CmdEEPROM_En BIT(5)
-#define CmdEERPOMSEL BIT(4)
-#define Cmd9346CR_9356SEL BIT(4)
+#define CMDEEPROM_EN BIT(5)
+#define CMDEERPOMSEL BIT(4)
+#define CMD9346CR_9356SEL BIT(4)
#define AFE_MBEN BIT(1)
#define AFE_BGEN BIT(0)
@@ -369,9 +348,9 @@
#define APLL_EN BIT(0)
-#define AFR_CardBEn BIT(0)
+#define AFR_CARDBEN BIT(0)
#define AFR_CLKRUN_SEL BIT(1)
-#define AFR_FuncRegEn BIT(2)
+#define AFR_FUNCREGEN BIT(2)
#define APSDOFF_STATUS BIT(15)
#define APSDOFF BIT(14)
@@ -387,13 +366,13 @@
#define HCI_RXDMA_EN BIT(3)
#define HCI_TXDMA_EN BIT(2)
-#define StopHCCA BIT(6)
-#define StopHigh BIT(5)
-#define StopMgt BIT(4)
-#define StopVO BIT(3)
-#define StopVI BIT(2)
-#define StopBE BIT(1)
-#define StopBK BIT(0)
+#define STOPHCCA BIT(6)
+#define STOPHIGH BIT(5)
+#define STOPMGT BIT(4)
+#define STOPVO BIT(3)
+#define STOPVI BIT(2)
+#define STOPBE BIT(1)
+#define STOPBK BIT(0)
#define LBK_NORMAL 0x00
#define LBK_MAC_LB (BIT(0) | BIT(1) | BIT(3))
@@ -405,7 +384,7 @@
#define TXDMAPRE2FULL BIT(23)
#define DISCW BIT(20)
#define TCRICV BIT(19)
-#define CfendForm BIT(17)
+#define cfendform BIT(17)
#define TCRCRC BIT(16)
#define FAKE_IMEM_EN BIT(15)
#define TSFRST BIT(9)
@@ -530,7 +509,7 @@
#define RRSR_MCS5 BIT(17)
#define RRSR_MCS6 BIT(18)
#define RRSR_MCS7 BIT(19)
-#define BRSR_AckShortPmb BIT(23)
+#define BRSR_ACKSHORTPMB BIT(23)
#define RATR_1M 0x00000001
#define RATR_2M 0x00000002
@@ -581,13 +560,13 @@
#define AC_PARAM_ECW_MIN_OFFSET 8
#define AC_PARAM_AIFS_OFFSET 0
-#define AcmHw_HwEn BIT(0)
-#define AcmHw_BeqEn BIT(1)
-#define AcmHw_ViqEn BIT(2)
-#define AcmHw_VoqEn BIT(3)
-#define AcmHw_BeqStatus BIT(4)
-#define AcmHw_ViqStatus BIT(5)
-#define AcmHw_VoqStatus BIT(6)
+#define ACMHW_HWEN BIT(0)
+#define ACMHW_BEQEN BIT(1)
+#define ACMHW_VIQEN BIT(2)
+#define ACMHW_VOQEN BIT(3)
+#define ACMHW_BEQSTATUS BIT(4)
+#define ACMHW_VIQSTATUS BIT(5)
+#define ACMHW_VOQSTATUS BIT(6)
#define RETRY_LIMIT_SHORT_SHIFT 8
#define RETRY_LIMIT_LONG_SHIFT 0
@@ -845,7 +824,7 @@
#define TCR_SAT BIT(24)
#define RCR_MXDMA_OFFSET 8
#define RCR_FIFO_OFFSET 13
-#define RCR_OnlyErlPkt BIT(31)
+#define RCR_ONLYERLPKT BIT(31)
#define CWR 0xDC
#define RETRYCTR 0xDE
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index ea5b8ec45ec9..a37855f57e76 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
index e9ba283d05ad..a5959a228a35 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __INC_RTL92S_RF_H
#define __INC_RTL92S_RF_H
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index d55554b7fa9a..d1d84e7d47a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
index af449d6714e6..a31efba0e6b5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
@@ -1,25 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __REALTEK_PCI92SE_SW_H__
#define __REALTEK_PCI92SE_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
index 162578f05c85..776e28e99d53 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
@@ -1,28 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- * Created on 2010/ 5/18, 1:41
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "table.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
index aa3c7687d226..4fd09f11f5bc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
@@ -1,20 +1,6 @@
-/******************************************************************************
- * Copyright(c) 2008 - 2012 Realtek Corporation. All rights reserved.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __INC_HAL8192SE_FW_IMG_H
#define __INC_HAL8192SE_FW_IMG_H
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index e1904c39f147..efb432c6d785 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -275,7 +253,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
stats->isfirst_ampdu = (bool) ((GET_RX_STATUS_DESC_PAGGR(pdesc) == 1)
&& (GET_RX_STATUS_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc);
- stats->rx_is40Mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
+ stats->rx_is40mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
stats->is_ht = (bool)GET_RX_STATUS_DESC_RX_HT(pdesc);
stats->is_cck = SE_RX_HAL_IS_CCK_RATE(pdesc);
@@ -288,7 +266,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
if (stats->crc)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (stats->rx_is40Mhzpacket)
+ if (stats->rx_is40mhzpacket)
rx_status->bw = RATE_INFO_BW_40;
if (stats->is_ht)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
index 81a5445c04a3..90aa12fc6a7f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#ifndef __REALTEK_PCI92SE_TRX_H__
#define __REALTEK_PCI92SE_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
index 06c448c010fd..20a67dcc59cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- **
- ** Copyright(c) 2009-2012 Realtek Corporation.
- **
- ** This program is free software; you can redistribute it and/or modify it
- ** under the terms of version 2 of the GNU General Public License as
- ** published by the Free Software Foundation.
- **
- ** This program is distributed in the hope that it will be useful, but WITHOUT
- ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- ** more details.
- **
- ** The full GNU General Public License is included in this distribution in the
- ** file called LICENSE.
- **
- ** Contact Information:
- ** wlanfae <wlanfae@realtek.com>
- ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- ** Hsinchu 300, Taiwan.
- ** Larry Finger <Larry.Finger@lwfinger.net>
- **
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_BTC_H__
#define __RTL8723E_BTC_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
index 847544817549..42958df6b5d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_DEF_H__
#define __RTL8723E_DEF_H__
@@ -116,7 +94,7 @@
#define IS_VENDOR_8723A_B_CUT(version) ((IS_8723_SERIES(version))\
? ((GET_CVID_CUT_VERSION(version) == \
B_CUT_VERSION) ? true : false) : false)
-#define IS_81xxC_VENDOR_UMC_B_CUT(version) ((IS_CHIP_VENDOR_UMC(version))\
+#define IS_81XXC_VENDOR_UMC_B_CUT(version) ((IS_CHIP_VENDOR_UMC(version))\
? ((GET_CVID_CUT_VERSION(version) == \
B_CUT_VERSION) ? true : false) : false)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index 42a6fba90ba9..514891ea2c64 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
@@ -151,8 +129,14 @@ static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
long rssi_val_min = 0;
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION &&
+ rtlpriv->link_info.bcn_rx_inperiod == 0)
+ return 0;
+
if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
(dm_digtable->cursta_cstate == DIG_STA_CONNECT)) {
if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
@@ -417,6 +401,8 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
} else {
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
+ dm_digtable->pre_cck_fa_state = 0;
+ dm_digtable->cur_cck_fa_state = 0;
}
dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
@@ -665,7 +651,7 @@ void rtl8723e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rate_adaptive *p_ra = &(rtlpriv->ra);
+ struct rate_adaptive *p_ra = &rtlpriv->ra;
p_ra->ratr_state = DM_RATR_STA_INIT;
p_ra->pre_ratr_state = DM_RATR_STA_INIT;
@@ -677,6 +663,89 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
}
+void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *p_ra = &rtlpriv->ra;
+ u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+ struct ieee80211_sta *sta = NULL;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver is going to unload\n");
+ return;
+ }
+
+ if (!rtlpriv->dm.useramask) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver does not control rate adaptive mask\n");
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_HIGH:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra = 55;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra = 60;
+ low_rssithresh_for_ra = 25;
+ break;
+ default:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ }
+
+ if (rtlpriv->link_info.bcn_rx_inperiod == 0)
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_HIGH:
+ default:
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ break;
+ case DM_RATR_STA_MIDDLE:
+ case DM_RATR_STA_LOW:
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+ break;
+ }
+ else if (rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (p_ra->pre_ratr_state != p_ra->ratr_state) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
+ p_ra->ratr_state,
+ true);
+ rcu_read_unlock();
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
+ }
+ }
+}
+
void rtl8723e_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -826,7 +895,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw)
rtl8723e_dm_dynamic_bb_powersaving(hw);
rtl8723e_dm_dynamic_txpower(hw);
rtl8723e_dm_check_txpower_tracking(hw);
- /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */
+ rtl8723e_dm_refresh_rate_adaptive_mask(hw);
rtl8723e_dm_bt_coexist(hw);
rtl8723e_dm_check_edca_turbo(hw);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
index a113780af08a..bcad516f0d28 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_DM_H__
#define __RTL8723E_DM_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index bf9859f74b6f..be451a6f7dbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
index 2e668fcfc5c2..5c843736de8d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C__FW__H__
#define __RTL92C__FW__H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 5aac45d5a974..3ac31ec26517 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "hal_bt_coexist.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
index 45719fdcb067..0455a3712f3e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_HAL_BT_COEXIST_H__
#define __RTL8723E_HAL_BT_COEXIST_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 788de88ab1ee..680198280f8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#include "hal_btc.h"
#include "../pci.h"
#include "phy.h"
@@ -1426,7 +1405,6 @@ static void rtl8723e_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw)
static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 bt_retry_cnt;
u8 bt_info_original;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex] Get bt info by fw!!\n");
@@ -1438,7 +1416,6 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
"[BTCoex] c2h for bt_info not rcvd yet!!\n");
}
- bt_retry_cnt = hal_coex_8723.bt_retry_cnt;
bt_info_original = hal_coex_8723.c2h_bt_info_original;
/* when bt inquiry or page scan, we have to set h2c 0x25 */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
index 756868897d8b..620677128f11 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_HAL_BTC_H__
#define __RTL8723E_HAL_BTC_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index f783e4a8083d..6bab162e1bb8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
@@ -698,7 +676,7 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw)
/* HW Power on sequence */
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_PCI_MSK, Rtl8723_NIC_ENABLE_FLOW))
+ PWR_INTF_PCI_MSK, RTL8723_NIC_ENABLE_FLOW))
return false;
bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+2);
@@ -988,7 +966,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
if (IS_VENDOR_UMC_A_CUT(rtlhal->version)) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
- } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+ } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE);
rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31);
rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425);
@@ -1284,7 +1262,7 @@ static void _rtl8723e_poweroff_adapter(struct ieee80211_hw *hw)
/* Combo (PCIe + USB) Card and PCIe-MF Card */
/* 1. Run LPS WL RFOFF flow */
rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_PCI_MSK, Rtl8723_NIC_LPS_ENTER_FLOW);
+ PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW);
/* 2. 0x1F[7:0] = 0 */
/* turn off RF */
@@ -1304,7 +1282,7 @@ static void _rtl8723e_poweroff_adapter(struct ieee80211_hw *hw)
/* HW card disable configuration. */
rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_PCI_MSK, Rtl8723_NIC_DISABLE_FLOW);
+ PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW);
/* Reset MCU IO Wrapper */
u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
index c76e453f4f43..cf55f45d0779 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_HW_H__
#define __RTL8723E_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
index d567b0df0e9f..5e503dbc463b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
index c22b19f542a6..9f85845d23cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92CE_LED_H__
#define __RTL92CE_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 3f3327878b51..54a3aec1dfa7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -905,7 +883,7 @@ static void _rtl8723e_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+ if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) {
if (channel == 6 && rtlphy->current_chan_bw ==
HT_CHANNEL_WIDTH_20)
rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
index b85f5c7c5c01..98bfe02f66d5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL92C_PHY_H__
#define __RTL92C_PHY_H__
@@ -48,7 +26,7 @@
#define MAX_STALL_TIME 50
#define ANTENNADIVERSITYVALUE 0x80
#define MAX_TXPWR_IDX_NMODE_92S 63
-#define Reset_Cnt_Limit 3
+#define reset_cnt_limit 3
#define IQK_ADDA_REG_NUM 16
#define IQK_MAC_REG_NUM 4
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
index 2f7f81af8a55..041e3113a500 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../pwrseqcmd.h"
#include "pwrseq.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
index e6c3aac3e9fd..d9247a8f3039 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
@@ -327,14 +305,14 @@ extern struct wlan_pwr_cfg rtl8723A_leave_lps_flow
[RTL8723A_TRANS_LPS_TO_ACT_STEPS + RTL8723A_TRANS_END_STEPS];
/* RTL8723 Power Configuration CMDs for PCIe interface */
-#define Rtl8723_NIC_PWR_ON_FLOW rtl8723A_power_on_flow
-#define Rtl8723_NIC_RF_OFF_FLOW rtl8723A_radio_off_flow
-#define Rtl8723_NIC_DISABLE_FLOW rtl8723A_card_disable_flow
-#define Rtl8723_NIC_ENABLE_FLOW rtl8723A_card_enable_flow
-#define Rtl8723_NIC_SUSPEND_FLOW rtl8723A_suspend_flow
-#define Rtl8723_NIC_RESUME_FLOW rtl8723A_resume_flow
-#define Rtl8723_NIC_PDN_FLOW rtl8723A_hwpdn_flow
-#define Rtl8723_NIC_LPS_ENTER_FLOW rtl8723A_enter_lps_flow
-#define Rtl8723_NIC_LPS_LEAVE_FLOW rtl8723A_leave_lps_flow
+#define RTL8723_NIC_PWR_ON_FLOW rtl8723A_power_on_flow
+#define RTL8723_NIC_RF_OFF_FLOW rtl8723A_radio_off_flow
+#define RTL8723_NIC_DISABLE_FLOW rtl8723A_card_disable_flow
+#define RTL8723_NIC_ENABLE_FLOW rtl8723A_card_enable_flow
+#define RTL8723_NIC_SUSPEND_FLOW rtl8723A_suspend_flow
+#define RTL8723_NIC_RESUME_FLOW rtl8723A_resume_flow
+#define RTL8723_NIC_PDN_FLOW rtl8723A_hwpdn_flow
+#define RTL8723_NIC_LPS_ENTER_FLOW rtl8723A_enter_lps_flow
+#define RTL8723_NIC_LPS_LEAVE_FLOW rtl8723A_leave_lps_flow
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
index 30938cd9fce5..8696614c7cdd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_REG_H__
#define __RTL8723E_REG_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
index 89958b64b52d..9058527a7f94 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
index 7b44ebc0fac9..b445cfe65bf4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_RF_H__
#define __RTL8723E_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 07b82700d1de..4b370410c83c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
@@ -175,7 +153,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
fw_name = "rtlwifi/rtl8723fw_B.bin";
rtlpriv->max_fw_size = 0x6000;
@@ -266,8 +244,8 @@ static struct rtl_hal_ops rtl8723e_hal_ops = {
static struct rtl_mod_params rtl8723e_mod_params = {
.sw_crypto = false,
.inactiveps = true,
- .swctrl_lps = false,
- .fwctrl_lps = true,
+ .swctrl_lps = true,
+ .fwctrl_lps = false,
.aspm_support = 1,
.debug_level = 0,
.debug_mask = 0,
@@ -395,8 +373,8 @@ module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
index 46478780d262..200ce39a0dd7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_SW_H__
#define __RTL8723E_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
index 1bbee0bfac23..d895694be023 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "table.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
index a044f3c456fa..4897dbc73b9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_TABLE__H_
#define __RTL8723E_TABLE__H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index d461d0c9631f..90dc91b0d35b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -302,7 +280,7 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) &&
(GET_RX_DESC_FAGGR(pdesc) == 1));
status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- status->rx_is40Mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
+ status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
status->is_cck = RX_HAL_IS_CCK_RATE(status->rate);
@@ -316,7 +294,7 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
if (status->crc)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (status->rx_is40Mhzpacket)
+ if (status->rx_is40mhzpacket)
rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
index d592b08d4ac8..4a19ea76b290 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL8723E_TRX_H__
#define __RTL8723E_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
index 5e5403d69220..f4886c868df8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_DEF_H__
#define __RTL8723BE_DEF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
index 47e87a21ae27..b13fd3c0c832 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
@@ -1017,12 +995,9 @@ static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw)
u32 edca_be = 0x5ea42b;
u32 iot_peer = 0;
bool b_is_cur_rdlstate;
- bool b_last_is_cur_rdlstate = false;
bool b_bias_on_rx = false;
bool b_edca_turbo_on = false;
- b_last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate;
-
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
index f752a2cad63d..c4f36e951747 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
@@ -1,24 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_DM_H__
#define __RTL8723BE_DM_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index f2441fbb92f1..4d7fa27f55ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
index 948d28646364..97ea77464139 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE__FW__H__
#define __RTL8723BE__FW__H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index b4f3f91b590e..979e5bfe5f45 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
@@ -319,12 +297,12 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
break;
case HW_VAR_FWLPS_RF_ON:{
- enum rf_pwrstate rfState;
+ enum rf_pwrstate rfstate;
u32 val_rcr;
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
- (u8 *)(&rfState));
- if (rfState == ERFOFF) {
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
*((bool *)(val)) = true;
} else {
val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
@@ -764,10 +742,10 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned short i;
u8 txpktbuf_bndy;
- u8 maxPage;
+ u8 maxpage;
bool status;
- maxPage = 255;
+ maxpage = 255;
txpktbuf_bndy = 245;
rtl_write_dword(rtlpriv, REG_TRXFF_BNDY,
@@ -792,13 +770,13 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
if (!status)
return status;
- for (i = txpktbuf_bndy; i < maxPage; i++) {
+ for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl8723be_llt_write(hw, i, (i + 1));
if (!status)
return status;
}
- status = _rtl8723be_llt_write(hw, maxPage, txpktbuf_bndy);
+ status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy);
if (!status)
return status;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
index ae856a19e81a..eb797e9bb931 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_HW_H__
#define __RTL8723BE_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
index 4f7890d62c21..525f2c47da5b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
index c57de379ee8d..8ac59374b632 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_LED_H__
#define __RTL8723BE_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 1263b12db5dc..aa8a0950fcea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -332,7 +310,7 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
@@ -374,7 +352,7 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
@@ -694,7 +672,7 @@ static u8 _rtl8723be_get_rate_section_index(u32 regaddr)
else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
index = (u8)((regaddr - 0xE20) / 4);
break;
- };
+ }
return index;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
index 9021d4745ab7..a59813ea7c72 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_PHY_H__
#define __RTL8723BE_PHY_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
index a1bb1f6116fb..95adac66676e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../pwrseqcmd.h"
#include "pwrseq.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
index 3367cfbc9502..57eac4864184 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_PWRSEQ_H__
#define __RTL8723BE_PWRSEQ_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
index 95c4f8e206c7..28fd22b2a792 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_REG_H__
#define __RTL8723BE_REG_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index 48491454b878..af72e489e31c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
index f423e157020f..81537a1a4fdf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_RF_H__
#define __RTL8723BE_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index c9f7b042d9c6..00e6254bf82b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
index a7b25e769950..6ecacf9fbfd7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_SW_H__
#define __RTL8723BE_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
index 160fee8333ae..5864be89d187 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include <linux/kernel.h>
#include "table.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
index 1deaffe22251..cf0c8d58cea1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_TABLE__H_
#define __RTL8723BE_TABLE__H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 9f8dfb5af774..9ada9a06c6ea 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -338,7 +316,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- status->rx_is40Mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
+ status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
status->bandwidth = (u8)GET_RX_DESC_BW(pdesc);
status->macid = GET_RX_DESC_MACID(pdesc);
status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
@@ -372,7 +350,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
if (status->crc)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (status->rx_is40Mhzpacket)
+ if (status->rx_is40mhzpacket)
rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
index 609f7ad7f787..11e75a4e68bd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __RTL8723BE_TRX_H__
#define __RTL8723BE_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
index 064340641913..46ab90332eb7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "dm_common.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
index 5c1b94ce2f86..6300be65aba0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __DM_COMMON_H__
#define __DM_COMMON_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index 521039c5d4ce..18ce2856a91b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
index 77c25a976233..b527fcbbdf08 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __FW_COMMON_H__
#define __FW_COMMON_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
index 9014a94fac6a..f5a9ecbf7363 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include <linux/module.h>
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index 43d24e1ee5e6..aae14c68bf69 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#include "../wifi.h"
#include "phy_common.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
index 83b891a9adb8..edf1c52f0ee2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2014 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2014 Realtek Corporation.*/
#ifndef __PHY_COMMON__
#define __PHY_COMMON__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
index 3fe3aaa5fe3c..827bc5f35d2a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_DEF_H__
#define __RTL8821AE_DEF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 3be8c88971e2..49d05b631ba1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "../base.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
index 625a6bbb21fc..137ed735d80d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_DM_H__
#define __RTL8821AE_DM_H__
@@ -92,11 +70,11 @@
#define DM_REG_CCK_CCA_CNT_11N 0xA60
#define DM_REG_BB_PWR_SAV4_11N 0xA74
/*PAGE B */
-#define DM_REG_LNA_SWITCH_11N 0xB2C
-#define DM_REG_PATH_SWITCH_11N 0xB30
-#define DM_REG_RSSI_CTRL_11N 0xB38
-#define DM_REG_CONFIG_ANTA_11N 0xB68
-#define DM_REG_RSSI_BT_11N 0xB9C
+#define DM_REG_LNA_SWITCH_11N 0XB2C
+#define DM_REG_PATH_SWITCH_11N 0XB30
+#define DM_REG_RSSI_CTRL_11N 0XB38
+#define DM_REG_CONFIG_ANTA_11N 0XB68
+#define DM_REG_RSSI_BT_11N 0XB9C
/*PAGE C */
#define DM_REG_OFDM_FA_HOLDC_11N 0xC00
#define DM_REG_RX_PATH_11N 0xC04
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index d868a034659f..dc0eb692088f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -844,9 +822,9 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 3: qos null data */
- 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -877,9 +855,9 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 4: BT qos null data */
- 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -911,9 +889,9 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 5~7 is for wowlan */
/* page 5: ARP resp */
- 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00,
0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06,
0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x02,
0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x00, 0x00,
@@ -1015,7 +993,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
/* page 0: beacon */
0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x60, 0x00,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x60, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x00, 0x20, 0x04, 0x00, 0x03, 0x32, 0x31,
0x35, 0x01, 0x08, 0x82, 0x84, 0x8B, 0x96, 0x0C,
@@ -1078,8 +1056,8 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 1: ps-poll */
- 0xA4, 0x10, 0x09, 0xC0, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0xA4, 0x10, 0x09, 0xC0, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1143,9 +1121,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 2: null data */
- 0x48, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x48, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1208,9 +1186,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 3: Qos null data */
- 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1273,9 +1251,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 4: BT Qos null data */
- 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1339,9 +1317,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* page 5~7 is for wowlan */
/* page 5: ARP resp */
- 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
- 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
- 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7,
+ 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00,
0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06,
0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x02,
0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x00, 0x00,
@@ -1543,8 +1521,8 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
struct sk_buff *skb = NULL;
u32 totalpacketlen;
bool rtstatus;
- u8 u1RsvdPageLoc[5] = { 0 };
- u8 u1RsvdPageLoc2[7] = { 0 };
+ u8 u1rsvdpageloc[5] = { 0 };
+ u8 u1rsvdpageloc2[7] = { 0 };
bool b_dlok = false;
u8 *beacon;
u8 *p_pspoll;
@@ -1574,7 +1552,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
- SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
/*--------------------------------------------------------
* (3) null data
@@ -1585,7 +1563,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
/*---------------------------------------------------------
* (4) Qos null data
@@ -1596,7 +1574,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr);
SET_80211_HDR_ADDRESS3(qosnull, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOSNULL_PG);
/*---------------------------------------------------------
* (5) BT Qos null data
@@ -1607,7 +1585,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOSNULL_PG);
if (!dl_whole_packets) {
totalpacketlen = 512 * (BT_QOSNULL_PG + 1) - 40;
@@ -1622,20 +1600,20 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(arpresp, mac->mac_addr);
SET_80211_HDR_ADDRESS3(arpresp, mac->bssid);
- SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG);
+ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1rsvdpageloc2, ARPRESP_PG);
/*---------------------------------------------------------
* (7) Remote Wake Ctrl
*----------------------------------------------------------
*/
- SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2,
+ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1rsvdpageloc2,
REMOTE_PG);
/*---------------------------------------------------------
* (8) GTK Ext Memory
*----------------------------------------------------------
*/
- SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG);
+ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1rsvdpageloc2, GTKEXT_PG);
totalpacketlen = TOTAL_RESERVED_PKT_LEN_8812 - 40;
@@ -1654,14 +1632,14 @@ out:
if (!b_dl_finished && b_dlok) {
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 5);
+ "H2C_RSVDPAGE:\n", u1rsvdpageloc, 5);
rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
- sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ sizeof(u1rsvdpageloc), u1rsvdpageloc);
if (dl_whole_packets) {
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "wowlan H2C_RSVDPAGE:\n", u1RsvdPageLoc2, 7);
+ "wowlan H2C_RSVDPAGE:\n", u1rsvdpageloc2, 7);
rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AOAC_RSVDPAGE,
- sizeof(u1RsvdPageLoc2), u1RsvdPageLoc2);
+ sizeof(u1rsvdpageloc2), u1rsvdpageloc2);
}
}
@@ -1678,8 +1656,8 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
struct sk_buff *skb = NULL;
u32 totalpacketlen;
bool rtstatus;
- u8 u1RsvdPageLoc[5] = { 0 };
- u8 u1RsvdPageLoc2[7] = { 0 };
+ u8 u1rsvdpageloc[5] = { 0 };
+ u8 u1rsvdpageloc2[7] = { 0 };
bool b_dlok = false;
u8 *beacon;
u8 *p_pspoll;
@@ -1709,7 +1687,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
- SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
/*--------------------------------------------------------
* (3) null data
@@ -1720,7 +1698,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
/*---------------------------------------------------------
* (4) Qos null data
@@ -1731,7 +1709,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr);
SET_80211_HDR_ADDRESS3(qosnull, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOSNULL_PG);
/*---------------------------------------------------------
* (5) Qos null data
@@ -1742,7 +1720,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG);
+ SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOSNULL_PG);
if (!dl_whole_packets) {
totalpacketlen = 256 * (BT_QOSNULL_PG + 1) - 40;
@@ -1757,20 +1735,20 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_80211_HDR_ADDRESS2(arpresp, mac->mac_addr);
SET_80211_HDR_ADDRESS3(arpresp, mac->bssid);
- SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG);
+ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1rsvdpageloc2, ARPRESP_PG);
/*---------------------------------------------------------
* (7) Remote Wake Ctrl
*----------------------------------------------------------
*/
- SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2,
+ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1rsvdpageloc2,
REMOTE_PG);
/*---------------------------------------------------------
* (8) GTK Ext Memory
*----------------------------------------------------------
*/
- SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG);
+ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1rsvdpageloc2, GTKEXT_PG);
totalpacketlen = TOTAL_RESERVED_PKT_LEN_8821 - 40;
@@ -1792,16 +1770,16 @@ out:
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 5);
+ "H2C_RSVDPAGE:\n", u1rsvdpageloc, 5);
rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
- sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ sizeof(u1rsvdpageloc), u1rsvdpageloc);
if (dl_whole_packets) {
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"wowlan H2C_RSVDPAGE:\n",
- u1RsvdPageLoc2, 7);
+ u1rsvdpageloc2, 7);
rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AOAC_RSVDPAGE,
- sizeof(u1RsvdPageLoc2),
- u1RsvdPageLoc2);
+ sizeof(u1rsvdpageloc2),
+ u1rsvdpageloc2);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index 99c902ff0b84..e11e496b7277 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -1,26 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE__FW__H__
#define __RTL8821AE__FW__H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index ba258318ee9f..198d419ebb9c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "../efuse.h"
@@ -2606,50 +2584,50 @@ static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw,
u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 rfPath, eeAddr = EEPROM_TX_PWR_INX, group, TxCount = 0;
+ u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcount = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"hal_ReadPowerValueFromPROM8821ae(): hwinfo[0x%x]=0x%x\n",
- (eeAddr+1), hwinfo[eeAddr+1]);
- if (0xFF == hwinfo[eeAddr+1]) /*YJ,add,120316*/
+ (eeaddr + 1), hwinfo[eeaddr + 1]);
+ if (hwinfo[eeaddr + 1] == 0xFF) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"auto load fail : Use Default value!\n");
- for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+ for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/*2.4G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
- pwrinfo24g->index_cck_base[rfPath][group] = 0x2D;
- pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+ pwrinfo24g->index_cck_base[rfpath][group] = 0x2D;
+ pwrinfo24g->index_bw40_base[rfpath][group] = 0x2D;
}
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- if (TxCount == 0) {
- pwrinfo24g->bw20_diff[rfPath][0] = 0x02;
- pwrinfo24g->ofdm_diff[rfPath][0] = 0x04;
+ for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) {
+ if (txcount == 0) {
+ pwrinfo24g->bw20_diff[rfpath][0] = 0x02;
+ pwrinfo24g->ofdm_diff[rfpath][0] = 0x04;
} else {
- pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE;
- pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE;
- pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE;
- pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE;
+ pwrinfo24g->bw20_diff[rfpath][txcount] = 0xFE;
+ pwrinfo24g->bw40_diff[rfpath][txcount] = 0xFE;
+ pwrinfo24g->cck_diff[rfpath][txcount] = 0xFE;
+ pwrinfo24g->ofdm_diff[rfpath][txcount] = 0xFE;
}
}
/*5G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++)
- pwrinfo5g->index_bw40_base[rfPath][group] = 0x2A;
-
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- if (TxCount == 0) {
- pwrinfo5g->ofdm_diff[rfPath][0] = 0x04;
- pwrinfo5g->bw20_diff[rfPath][0] = 0x00;
- pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
- pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->index_bw40_base[rfpath][group] = 0x2A;
+
+ for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) {
+ if (txcount == 0) {
+ pwrinfo5g->ofdm_diff[rfpath][0] = 0x04;
+ pwrinfo5g->bw20_diff[rfpath][0] = 0x00;
+ pwrinfo5g->bw80_diff[rfpath][0] = 0xFE;
+ pwrinfo5g->bw160_diff[rfpath][0] = 0xFE;
} else {
- pwrinfo5g->ofdm_diff[rfPath][0] = 0xFE;
- pwrinfo5g->bw20_diff[rfPath][0] = 0xFE;
- pwrinfo5g->bw40_diff[rfPath][0] = 0xFE;
- pwrinfo5g->bw80_diff[rfPath][0] = 0xFE;
- pwrinfo5g->bw160_diff[rfPath][0] = 0xFE;
+ pwrinfo5g->ofdm_diff[rfpath][0] = 0xFE;
+ pwrinfo5g->bw20_diff[rfpath][0] = 0xFE;
+ pwrinfo5g->bw40_diff[rfpath][0] = 0xFE;
+ pwrinfo5g->bw80_diff[rfpath][0] = 0xFE;
+ pwrinfo5g->bw160_diff[rfpath][0] = 0xFE;
}
}
}
@@ -2658,112 +2636,112 @@ static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw,
rtl_priv(hw)->efuse.txpwr_fromeprom = true;
- for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+ for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/*2.4G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
- pwrinfo24g->index_cck_base[rfPath][group] = hwinfo[eeAddr++];
- if (pwrinfo24g->index_cck_base[rfPath][group] == 0xFF)
- pwrinfo24g->index_cck_base[rfPath][group] = 0x2D;
+ pwrinfo24g->index_cck_base[rfpath][group] = hwinfo[eeaddr++];
+ if (pwrinfo24g->index_cck_base[rfpath][group] == 0xFF)
+ pwrinfo24g->index_cck_base[rfpath][group] = 0x2D;
}
for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) {
- pwrinfo24g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
- if (pwrinfo24g->index_bw40_base[rfPath][group] == 0xFF)
- pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D;
+ pwrinfo24g->index_bw40_base[rfpath][group] = hwinfo[eeaddr++];
+ if (pwrinfo24g->index_bw40_base[rfpath][group] == 0xFF)
+ pwrinfo24g->index_bw40_base[rfpath][group] = 0x2D;
}
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- if (TxCount == 0) {
- pwrinfo24g->bw40_diff[rfPath][TxCount] = 0;
+ for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) {
+ if (txcount == 0) {
+ pwrinfo24g->bw40_diff[rfpath][txcount] = 0;
/*bit sign number to 8 bit sign number*/
- pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
- if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3))
- pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo24g->bw20_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4;
+ if (pwrinfo24g->bw20_diff[rfpath][txcount] & BIT(3))
+ pwrinfo24g->bw20_diff[rfpath][txcount] |= 0xF0;
/*bit sign number to 8 bit sign number*/
- pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
- if (pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3))
- pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo24g->ofdm_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f);
+ if (pwrinfo24g->ofdm_diff[rfpath][txcount] & BIT(3))
+ pwrinfo24g->ofdm_diff[rfpath][txcount] |= 0xF0;
- pwrinfo24g->cck_diff[rfPath][TxCount] = 0;
- eeAddr++;
+ pwrinfo24g->cck_diff[rfpath][txcount] = 0;
+ eeaddr++;
} else {
- pwrinfo24g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr]&0xf0) >> 4;
- if (pwrinfo24g->bw40_diff[rfPath][TxCount] & BIT(3))
- pwrinfo24g->bw40_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo24g->bw40_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4;
+ if (pwrinfo24g->bw40_diff[rfpath][txcount] & BIT(3))
+ pwrinfo24g->bw40_diff[rfpath][txcount] |= 0xF0;
- pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
- if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3))
- pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo24g->bw20_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f);
+ if (pwrinfo24g->bw20_diff[rfpath][txcount] & BIT(3))
+ pwrinfo24g->bw20_diff[rfpath][txcount] |= 0xF0;
- eeAddr++;
+ eeaddr++;
- pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
- if (pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3))
- pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo24g->ofdm_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4;
+ if (pwrinfo24g->ofdm_diff[rfpath][txcount] & BIT(3))
+ pwrinfo24g->ofdm_diff[rfpath][txcount] |= 0xF0;
- pwrinfo24g->cck_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
- if (pwrinfo24g->cck_diff[rfPath][TxCount] & BIT(3))
- pwrinfo24g->cck_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo24g->cck_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f);
+ if (pwrinfo24g->cck_diff[rfpath][txcount] & BIT(3))
+ pwrinfo24g->cck_diff[rfpath][txcount] |= 0xF0;
- eeAddr++;
+ eeaddr++;
}
}
/*5G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) {
- pwrinfo5g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++];
- if (pwrinfo5g->index_bw40_base[rfPath][group] == 0xFF)
- pwrinfo5g->index_bw40_base[rfPath][group] = 0xFE;
+ pwrinfo5g->index_bw40_base[rfpath][group] = hwinfo[eeaddr++];
+ if (pwrinfo5g->index_bw40_base[rfpath][group] == 0xFF)
+ pwrinfo5g->index_bw40_base[rfpath][group] = 0xFE;
}
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- if (TxCount == 0) {
- pwrinfo5g->bw40_diff[rfPath][TxCount] = 0;
+ for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) {
+ if (txcount == 0) {
+ pwrinfo5g->bw40_diff[rfpath][txcount] = 0;
- pwrinfo5g->bw20_diff[rfPath][0] = (hwinfo[eeAddr] & 0xf0) >> 4;
- if (pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
- pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo5g->bw20_diff[rfpath][0] = (hwinfo[eeaddr] & 0xf0) >> 4;
+ if (pwrinfo5g->bw20_diff[rfpath][txcount] & BIT(3))
+ pwrinfo5g->bw20_diff[rfpath][txcount] |= 0xF0;
- pwrinfo5g->ofdm_diff[rfPath][0] = (hwinfo[eeAddr] & 0x0f);
- if (pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
- pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo5g->ofdm_diff[rfpath][0] = (hwinfo[eeaddr] & 0x0f);
+ if (pwrinfo5g->ofdm_diff[rfpath][txcount] & BIT(3))
+ pwrinfo5g->ofdm_diff[rfpath][txcount] |= 0xF0;
- eeAddr++;
+ eeaddr++;
} else {
- pwrinfo5g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
- if (pwrinfo5g->bw40_diff[rfPath][TxCount] & BIT(3))
- pwrinfo5g->bw40_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo5g->bw40_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4;
+ if (pwrinfo5g->bw40_diff[rfpath][txcount] & BIT(3))
+ pwrinfo5g->bw40_diff[rfpath][txcount] |= 0xF0;
- pwrinfo5g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
- if (pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3))
- pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo5g->bw20_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f);
+ if (pwrinfo5g->bw20_diff[rfpath][txcount] & BIT(3))
+ pwrinfo5g->bw20_diff[rfpath][txcount] |= 0xF0;
- eeAddr++;
+ eeaddr++;
}
}
- pwrinfo5g->ofdm_diff[rfPath][1] = (hwinfo[eeAddr] & 0xf0) >> 4;
- pwrinfo5g->ofdm_diff[rfPath][2] = (hwinfo[eeAddr] & 0x0f);
+ pwrinfo5g->ofdm_diff[rfpath][1] = (hwinfo[eeaddr] & 0xf0) >> 4;
+ pwrinfo5g->ofdm_diff[rfpath][2] = (hwinfo[eeaddr] & 0x0f);
- eeAddr++;
+ eeaddr++;
- pwrinfo5g->ofdm_diff[rfPath][3] = (hwinfo[eeAddr] & 0x0f);
+ pwrinfo5g->ofdm_diff[rfpath][3] = (hwinfo[eeaddr] & 0x0f);
- eeAddr++;
+ eeaddr++;
- for (TxCount = 1; TxCount < MAX_TX_COUNT; TxCount++) {
- if (pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3))
- pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0;
+ for (txcount = 1; txcount < MAX_TX_COUNT; txcount++) {
+ if (pwrinfo5g->ofdm_diff[rfpath][txcount] & BIT(3))
+ pwrinfo5g->ofdm_diff[rfpath][txcount] |= 0xF0;
}
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- pwrinfo5g->bw80_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4;
+ for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) {
+ pwrinfo5g->bw80_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4;
/* 4bit sign number to 8 bit sign number */
- if (pwrinfo5g->bw80_diff[rfPath][TxCount] & BIT(3))
- pwrinfo5g->bw80_diff[rfPath][TxCount] |= 0xF0;
+ if (pwrinfo5g->bw80_diff[rfpath][txcount] & BIT(3))
+ pwrinfo5g->bw80_diff[rfpath][txcount] |= 0xF0;
/* 4bit sign number to 8 bit sign number */
- pwrinfo5g->bw160_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f);
- if (pwrinfo5g->bw160_diff[rfPath][TxCount] & BIT(3))
- pwrinfo5g->bw160_diff[rfPath][TxCount] |= 0xF0;
+ pwrinfo5g->bw160_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f);
+ if (pwrinfo5g->bw160_diff[rfpath][txcount] & BIT(3))
+ pwrinfo5g->bw160_diff[rfpath][txcount] |= 0xF0;
- eeAddr++;
+ eeaddr++;
}
}
}
@@ -2930,8 +2908,8 @@ static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
if (!autoload_fail) {
- rtlhal->pa_type_2g = hwinfo[0xBC];
- rtlhal->lna_type_2g = hwinfo[0xBD];
+ rtlhal->pa_type_2g = hwinfo[0XBC];
+ rtlhal->lna_type_2g = hwinfo[0XBD];
if (rtlhal->pa_type_2g == 0xFF && rtlhal->lna_type_2g == 0xFF) {
rtlhal->pa_type_2g = 0;
rtlhal->lna_type_2g = 0;
@@ -2943,8 +2921,8 @@ static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo,
(rtlhal->lna_type_2g & BIT(3))) ?
1 : 0;
- rtlhal->pa_type_5g = hwinfo[0xBC];
- rtlhal->lna_type_5g = hwinfo[0xBF];
+ rtlhal->pa_type_5g = hwinfo[0XBC];
+ rtlhal->lna_type_5g = hwinfo[0XBF];
if (rtlhal->pa_type_5g == 0xFF && rtlhal->lna_type_5g == 0xFF) {
rtlhal->pa_type_5g = 0;
rtlhal->lna_type_5g = 0;
@@ -2969,18 +2947,18 @@ static void _rtl8812ae_read_amplifier_type(struct ieee80211_hw *hw, u8 *hwinfo,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u8 ext_type_pa_2g_a = (hwinfo[0xBD] & BIT(2)) >> 2; /* 0xBD[2] */
- u8 ext_type_pa_2g_b = (hwinfo[0xBD] & BIT(6)) >> 6; /* 0xBD[6] */
- u8 ext_type_pa_5g_a = (hwinfo[0xBF] & BIT(2)) >> 2; /* 0xBF[2] */
- u8 ext_type_pa_5g_b = (hwinfo[0xBF] & BIT(6)) >> 6; /* 0xBF[6] */
- /* 0xBD[1:0] */
- u8 ext_type_lna_2g_a = (hwinfo[0xBD] & (BIT(1) | BIT(0))) >> 0;
- /* 0xBD[5:4] */
- u8 ext_type_lna_2g_b = (hwinfo[0xBD] & (BIT(5) | BIT(4))) >> 4;
- /* 0xBF[1:0] */
- u8 ext_type_lna_5g_a = (hwinfo[0xBF] & (BIT(1) | BIT(0))) >> 0;
- /* 0xBF[5:4] */
- u8 ext_type_lna_5g_b = (hwinfo[0xBF] & (BIT(5) | BIT(4))) >> 4;
+ u8 ext_type_pa_2g_a = (hwinfo[0XBD] & BIT(2)) >> 2; /* 0XBD[2] */
+ u8 ext_type_pa_2g_b = (hwinfo[0XBD] & BIT(6)) >> 6; /* 0XBD[6] */
+ u8 ext_type_pa_5g_a = (hwinfo[0XBF] & BIT(2)) >> 2; /* 0XBF[2] */
+ u8 ext_type_pa_5g_b = (hwinfo[0XBF] & BIT(6)) >> 6; /* 0XBF[6] */
+ /* 0XBD[1:0] */
+ u8 ext_type_lna_2g_a = (hwinfo[0XBD] & (BIT(1) | BIT(0))) >> 0;
+ /* 0XBD[5:4] */
+ u8 ext_type_lna_2g_b = (hwinfo[0XBD] & (BIT(5) | BIT(4))) >> 4;
+ /* 0XBF[1:0] */
+ u8 ext_type_lna_5g_a = (hwinfo[0XBF] & (BIT(1) | BIT(0))) >> 0;
+ /* 0XBF[5:4] */
+ u8 ext_type_lna_5g_b = (hwinfo[0XBF] & (BIT(5) | BIT(4))) >> 4;
_rtl8812ae_read_pa_type(hw, hwinfo, autoload_fail);
@@ -3008,8 +2986,8 @@ static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
if (!autoload_fail) {
- rtlhal->pa_type_2g = hwinfo[0xBC];
- rtlhal->lna_type_2g = hwinfo[0xBD];
+ rtlhal->pa_type_2g = hwinfo[0XBC];
+ rtlhal->lna_type_2g = hwinfo[0XBD];
if (rtlhal->pa_type_2g == 0xFF && rtlhal->lna_type_2g == 0xFF) {
rtlhal->pa_type_2g = 0;
rtlhal->lna_type_2g = 0;
@@ -3017,8 +2995,8 @@ static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo,
rtlhal->external_pa_2g = (rtlhal->pa_type_2g & BIT(5)) ? 1 : 0;
rtlhal->external_lna_2g = (rtlhal->lna_type_2g & BIT(7)) ? 1 : 0;
- rtlhal->pa_type_5g = hwinfo[0xBC];
- rtlhal->lna_type_5g = hwinfo[0xBF];
+ rtlhal->pa_type_5g = hwinfo[0XBC];
+ rtlhal->lna_type_5g = hwinfo[0XBF];
if (rtlhal->pa_type_5g == 0xFF && rtlhal->lna_type_5g == 0xFF) {
rtlhal->pa_type_5g = 0;
rtlhal->lna_type_5g = 0;
@@ -4033,10 +4011,10 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, RXPKT_BUF_SELECT);
for (addr = 0; addr < WKFMCAM_ADDR_NUM; addr++) {
/* Set Rx packet buffer offset.
- * RxBufer pointer increases 1,
+ * RXBufer pointer increases 1,
* we can access 8 bytes in Rx packet buffer.
* CAM start offset (unit: 1 byte) = index*WKFMCAM_SIZE
- * RxBufer addr = (CAM start offset +
+ * RXBufer addr = (CAM start offset +
* per entry offset of a WKFM CAM)/8
* * index: The index of the wake up frame mask
* * WKFMCAM_SIZE: the total size of one WKFM CAM
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
index e2ab783a2ad9..fb0fb3a501d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_HW_H__
#define __RTL8821AE_HW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
index 405c7541b386..dd7553e80ab1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
index 038e64e18ae8..249a37a8d9db 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_LED_H__
#define __RTL8821AE_LED_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index a75451c246fd..408af144098e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
@@ -475,7 +453,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
const s8 auto_temp = -1;
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n",
+ "===> PHY_GetTXBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n",
(int)swing_2g, (int)swing_5g,
(int)rtlefuse->autoload_failflag);
@@ -556,7 +534,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
swing_a = (swing & 0x3) >> 0; /* 0xC6/C7[1:0] */
swing_b = (swing & 0xC) >> 2; /* 0xC6/C7[3:2] */
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "===> PHY_GetTxBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
+ "===> PHY_GetTXBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
swing_a, swing_b);
/* 3 Path-A */
@@ -614,7 +592,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
}
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out);
+ "<=== PHY_GetTXBBSwing_8812A, out = 0x%X\n", out);
return out;
}
@@ -1078,52 +1056,52 @@ static void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
- u16 rawValue = 0;
+ u16 rawvalue = 0;
u8 base = 0, path = 0;
for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base);
- rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF;
- base = (rawValue >> 4) * 10 + (rawValue & 0xF);
+ rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF;
+ base = (rawvalue >> 4) * 10 + (rawvalue & 0xF);
_rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base);
}
}
@@ -1380,7 +1358,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfPath %d] %d)\n",
+ "TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfpath %d] %d)\n",
regulation, bw, rate_section, channel,
rtlphy->txpwr_limit_2_4g[regulation][bw]
[rate_section][channel][rf_path], (temp_pwrlmt == 63)
@@ -1445,7 +1423,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfPath %d] %d)\n",
+ "TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfpath %d] %d)\n",
regulation, bw, rate_section,
channel, rtlphy->txpwr_limit_5g[regulation]
[bw][rate_section][channel][rf_path],
@@ -1495,106 +1473,106 @@ static void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee8021
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 base = 0, rfPath = 0;
+ u8 base = 0, rfpath = 0;
- for (rfPath = RF90_PATH_A; rfPath <= RF90_PATH_B; ++rfPath) {
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, CCK);
+ for (rfpath = RF90_PATH_A; rfpath <= RF90_PATH_B; ++rfpath) {
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, CCK);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][0],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][0],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, OFDM);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, OFDM);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][1],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][1],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][2],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][2],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, HT_MCS0_MCS7);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][3],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][3],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][4],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][4],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_2TX, HT_MCS8_MCS15);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][5],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][5],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][6],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][6],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, VHT_1SSMCS0_1SSMCS9);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][7],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][7],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][8],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][8],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][9],
0, 1, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_2TX, VHT_2SSMCS0_2SSMCS9);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][9],
2, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][10],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][10],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][11],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][11],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, OFDM);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_1TX, OFDM);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][1],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][1],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][2],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][2],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, HT_MCS0_MCS7);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_1TX, HT_MCS0_MCS7);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][3],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][3],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][4],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][4],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, HT_MCS8_MCS15);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_2TX, HT_MCS8_MCS15);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][5],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][5],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][6],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][6],
0, 3, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_1TX, VHT_1SSMCS0_1SSMCS9);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][7],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][7],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][8],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][8],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][9],
0, 1, base);
- base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9);
+ base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_2TX, VHT_2SSMCS0_2SSMCS9);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][9],
2, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][10],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][10],
0, 3, base);
_phy_convert_txpower_dbm_to_relative_value(
- &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][11],
+ &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][11],
0, 3, base);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
index 1285e1adfe9d..35b7d0f70125 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_PHY_H__
#define __RTL8821AE_PHY_H__
@@ -59,9 +37,9 @@
#define LOOP_LIMIT 5
#define MAX_STALL_TIME 50
-#define AntennaDiversityValue 0x80
+#define ANTENNADIVERSITYVALUE 0x80
#define MAX_TXPWR_IDX_NMODE_92S 63
-#define Reset_Cnt_Limit 3
+#define RESET_CNT_LIMIT 3
#define IQK_ADDA_REG_NUM 16
#define IQK_MAC_REG_NUM 4
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
index 9ddf78a187dd..1e7b3c770ac2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../pwrseqcmd.h"
#include "pwrseq.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
index 6dd575435c63..d6f3cbab4abc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_PWRSEQ_H__
#define __RTL8821AE_PWRSEQ_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
index db8bc8a2de61..7d833b72c7ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_REG_H__
#define __RTL8821AE_REG_H__
@@ -696,7 +674,7 @@
#define EEPROM_CHANNEL_PLAN_TELEC 0x8
#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
-#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_CHANNEL_PLAN_NCC 0XB
#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
#define EEPROM_CID_DEFAULT 0x0
@@ -718,10 +696,10 @@
#define EEPROM_TX_PWR_INX 0x10
-#define EEPROM_CHANNELPLAN 0xB8
-#define EEPROM_XTAL_8821AE 0xB9
-#define EEPROM_THERMAL_METER 0xBA
-#define EEPROM_IQK_LCK_88E 0xBB
+#define EEPROM_CHANNELPLAN 0XB8
+#define EEPROM_XTAL_8821AE 0XB9
+#define EEPROM_THERMAL_METER 0XBA
+#define EEPROM_IQK_LCK_88E 0XBB
#define EEPROM_RF_BOARD_OPTION 0xC1
#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
@@ -1015,7 +993,7 @@
#define _LBMODE(x) (((x) & 0xF) << 24)
#define MASK_LBMODE 0xF000000
#define LOOPBACK_NORMAL 0x0
-#define LOOPBACK_IMMEDIATELY 0xB
+#define LOOPBACK_IMMEDIATELY 0XB
#define LOOPBACK_MAC_DELAY 0x3
#define LOOPBACK_PHY 0x1
#define LOOPBACK_DMA 0x7
@@ -1430,7 +1408,7 @@
#define RCCK0_FACOUNTERUPPER 0xa58
#define RCCK0_CCA_CNT 0xa60
-/* PageB(0xB00) */
+/* PageB(0XB00) */
#define RPDP_ANTA 0xb00
#define RPDP_ANTA_4 0xb04
#define RPDP_ANTA_8 0xb08
@@ -1477,16 +1455,16 @@
#define RPM_RX3_ANTB 0xbf8
/*RSSI Dump*/
-#define RA_RSSI_DUMP 0xBF0
-#define RB_RSSI_DUMP 0xBF1
-#define RS1_RX_EVM_DUMP 0xBF4
-#define RS2_RX_EVM_DUMP 0xBF5
-#define RA_RX_SNR_DUMP 0xBF6
-#define RB_RX_SNR_DUMP 0xBF7
-#define RA_CFO_SHORT_DUMP 0xBF8
-#define RB_CFO_SHORT_DUMP 0xBFA
-#define RA_CFO_LONG_DUMP 0xBEC
-#define RB_CFO_LONG_DUMP 0xBEE
+#define RA_RSSI_DUMP 0XBF0
+#define RB_RSSI_DUMP 0XBF1
+#define RS1_RX_EVM_DUMP 0XBF4
+#define RS2_RX_EVM_DUMP 0XBF5
+#define RA_RX_SNR_DUMP 0XBF6
+#define RB_RX_SNR_DUMP 0XBF7
+#define RA_CFO_SHORT_DUMP 0XBF8
+#define RB_CFO_SHORT_DUMP 0XBFA
+#define RA_CFO_LONG_DUMP 0XBEC
+#define RB_CFO_LONG_DUMP 0XBEE
/*Page C*/
#define ROFDM0_LSTF 0xc00
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
index 95489f41f8a0..a6e56872e063 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "reg.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
index efd22bd0b139..6e3c8bfb2048 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_RF_H__
#define __RTL8821AE_RF_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 77f6401021c9..eec7c4ecf3ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "../core.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
index d001e7ce3052..9d7610f84b20 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_SW_H__
#define __RTL8821AE_SW_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index f87f9d03b9fa..85093b3e5373 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -1,29 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
+
#include <linux/kernel.h>
#include "table.h"
u32 RTL8812AE_PHY_REG_ARRAY[] = {
@@ -134,30 +111,30 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0xA7C, 0x225B0606,
0xA80, 0x218075B2,
0xA84, 0x001F8C80,
- 0xB00, 0x03100000,
- 0xB04, 0x0000B000,
- 0xB08, 0xAE0201EB,
- 0xB0C, 0x01003207,
- 0xB10, 0x00009807,
- 0xB14, 0x01000000,
- 0xB18, 0x00000002,
- 0xB1C, 0x00000002,
- 0xB20, 0x0000001F,
- 0xB24, 0x03020100,
- 0xB28, 0x07060504,
- 0xB2C, 0x0B0A0908,
- 0xB30, 0x0F0E0D0C,
- 0xB34, 0x13121110,
- 0xB38, 0x17161514,
- 0xB3C, 0x0000003A,
- 0xB40, 0x00000000,
- 0xB44, 0x00000000,
- 0xB48, 0x13000032,
- 0xB4C, 0x48080000,
- 0xB50, 0x00000000,
- 0xB54, 0x00000000,
- 0xB58, 0x00000000,
- 0xB5C, 0x00000000,
+ 0XB00, 0x03100000,
+ 0XB04, 0x0000B000,
+ 0XB08, 0xAE0201EB,
+ 0XB0C, 0x01003207,
+ 0XB10, 0x00009807,
+ 0XB14, 0x01000000,
+ 0XB18, 0x00000002,
+ 0XB1C, 0x00000002,
+ 0XB20, 0x0000001F,
+ 0XB24, 0x03020100,
+ 0XB28, 0x07060504,
+ 0XB2C, 0x0B0A0908,
+ 0XB30, 0x0F0E0D0C,
+ 0XB34, 0x13121110,
+ 0XB38, 0x17161514,
+ 0XB3C, 0x0000003A,
+ 0XB40, 0x00000000,
+ 0XB44, 0x00000000,
+ 0XB48, 0x13000032,
+ 0XB4C, 0x48080000,
+ 0XB50, 0x00000000,
+ 0XB54, 0x00000000,
+ 0XB58, 0x00000000,
+ 0XB5C, 0x00000000,
0xC00, 0x00000007,
0xC04, 0x00042020,
0xC08, 0x80410231,
@@ -197,7 +174,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0xC68, 0x59791979,
0xA0000000, 0x00000000,
0xC68, 0x59799979,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0xC6C, 0x59795979,
0xC70, 0x19795979,
0xC74, 0x19795979,
@@ -367,30 +344,30 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0xA7C, 0x225B0606,
0xA80, 0x21805490,
0xA84, 0x001F0000,
- 0xB00, 0x03100040,
- 0xB04, 0x0000B000,
- 0xB08, 0xAE0201EB,
- 0xB0C, 0x01003207,
- 0xB10, 0x00009807,
- 0xB14, 0x01000000,
- 0xB18, 0x00000002,
- 0xB1C, 0x00000002,
- 0xB20, 0x0000001F,
- 0xB24, 0x03020100,
- 0xB28, 0x07060504,
- 0xB2C, 0x0B0A0908,
- 0xB30, 0x0F0E0D0C,
- 0xB34, 0x13121110,
- 0xB38, 0x17161514,
- 0xB3C, 0x0000003A,
- 0xB40, 0x00000000,
- 0xB44, 0x00000000,
- 0xB48, 0x13000032,
- 0xB4C, 0x48080000,
- 0xB50, 0x00000000,
- 0xB54, 0x00000000,
- 0xB58, 0x00000000,
- 0xB5C, 0x00000000,
+ 0XB00, 0x03100040,
+ 0XB04, 0x0000B000,
+ 0XB08, 0xAE0201EB,
+ 0XB0C, 0x01003207,
+ 0XB10, 0x00009807,
+ 0XB14, 0x01000000,
+ 0XB18, 0x00000002,
+ 0XB1C, 0x00000002,
+ 0XB20, 0x0000001F,
+ 0XB24, 0x03020100,
+ 0XB28, 0x07060504,
+ 0XB2C, 0x0B0A0908,
+ 0XB30, 0x0F0E0D0C,
+ 0XB34, 0x13121110,
+ 0XB38, 0x17161514,
+ 0XB3C, 0x0000003A,
+ 0XB40, 0x00000000,
+ 0XB44, 0x00000000,
+ 0XB48, 0x13000032,
+ 0XB4C, 0x48080000,
+ 0XB50, 0x00000000,
+ 0XB54, 0x00000000,
+ 0XB58, 0x00000000,
+ 0XB5C, 0x00000000,
0xC00, 0x00000007,
0xC04, 0x00042020,
0xC08, 0x80410231,
@@ -521,12 +498,12 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x086, 0x00014B3A,
0xA0000000, 0x00000000,
0x086, 0x00014B38,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000004, 0x00000000, 0x40000000, 0x00000000,
0x08B, 0x00080180,
0xA0000000, 0x00000000,
0x08B, 0x00087180,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0B1, 0x0001FC1A,
0x0B3, 0x000F0810,
0x0B4, 0x0001A78D,
@@ -557,7 +534,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x03B, 0x00018248,
0x03B, 0x00010240,
0x03B, 0x00008240,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000100,
0x80000002, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A4EE,
@@ -583,7 +560,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x000024E7,
0x034, 0x0000146B,
0x034, 0x0000006D,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x000020A2,
0x0DF, 0x00000080,
@@ -712,7 +689,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x000428C5,
0x034, 0x000418C2,
0x034, 0x000408C0,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002A0B2,
0x034, 0x000290AF,
@@ -749,7 +726,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x000228C5,
0x034, 0x000218C2,
0x034, 0x000208C0,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A0B2,
0x034, 0x000090AF,
@@ -786,7 +763,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x000028C9,
0x034, 0x000018C6,
0x034, 0x000008C3,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
@@ -824,7 +801,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x035, 0x000401D8,
0x035, 0x000481D8,
0x035, 0x000501D8,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
@@ -871,7 +848,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x036, 0x000CCC35,
0x036, 0x000D4C35,
0x036, 0x000DCC35,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000008,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
@@ -886,7 +863,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x03C, 0x000002A8,
0x03C, 0x000005A2,
0x03C, 0x00000880,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000002,
@@ -910,7 +887,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x063, 0x000114EB,
0x064, 0x000196AC,
0x065, 0x000911D7,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x008, 0x00008400,
0x01C, 0x000739D2,
0x0B4, 0x0001E78D,
@@ -935,12 +912,12 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x086, 0x00014B3A,
0xA0000000, 0x00000000,
0x086, 0x00014B38,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000004, 0x00000000, 0x40000000, 0x00000000,
0x08B, 0x00080180,
0xA0000000, 0x00000000,
0x08B, 0x00087180,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x018, 0x00000006,
0x0EF, 0x00002000,
0x80000001, 0x00000000, 0x40000000, 0x00000000,
@@ -967,7 +944,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x03B, 0x00018248,
0x03B, 0x00010240,
0x03B, 0x00008240,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000100,
0x80000002, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A4EE,
@@ -993,7 +970,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x000024E7,
0x034, 0x0000146B,
0x034, 0x0000006D,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x000020A2,
0x0DF, 0x00000080,
@@ -1122,7 +1099,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x000428C5,
0x034, 0x000418C2,
0x034, 0x000408C0,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002A0B2,
0x034, 0x000290AF,
@@ -1159,7 +1136,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x000228C5,
0x034, 0x000218C2,
0x034, 0x000208C0,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A0B2,
0x034, 0x000090AF,
@@ -1196,7 +1173,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x000028C9,
0x034, 0x000018C6,
0x034, 0x000008C3,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
@@ -1237,7 +1214,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x035, 0x000481D8,
0x035, 0x000501D8,
0x0EF, 0x00000000,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
@@ -1283,7 +1260,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x036, 0x000CCC35,
0x036, 0x000D4C35,
0x036, 0x000DCC35,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000008,
0x80000008, 0x00000000, 0x40000000, 0x00000000,
@@ -1298,7 +1275,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x03C, 0x000002A8,
0x03C, 0x000005A2,
0x03C, 0x00000880,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000002,
@@ -1327,7 +1304,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x063, 0x000114EB,
0x064, 0x000196AC,
0x065, 0x000911D7,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x008, 0x00008400,
};
@@ -1933,7 +1910,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x011, 0x00000066,
0xA0000000, 0x00000000,
0x011, 0x0000005A,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x025, 0x0000000F,
0x072, 0x00000000,
0x420, 0x00000080,
@@ -2337,7 +2314,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x81C, 0x417A0001,
0x81C, 0x417C0001,
0x81C, 0x417E0001,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0x80000004, 0x00000000, 0x40000000, 0x00000000,
0x81C, 0xFC800001,
0x81C, 0xFB820001,
@@ -2468,7 +2445,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x81C, 0x01FA0001,
0x81C, 0x01FC0001,
0x81C, 0x01FE0001,
- 0xB0000000, 0x00000000,
+ 0XB0000000, 0x00000000,
0xC50, 0x00000022,
0xC50, 0x00000020,
0xE50, 0x00000022,
@@ -2478,24 +2455,24 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY);
u32 RTL8821AE_AGC_TAB_ARRAY[] = {
- 0x81C, 0xBF000001,
- 0x81C, 0xBF020001,
- 0x81C, 0xBF040001,
- 0x81C, 0xBF060001,
- 0x81C, 0xBE080001,
- 0x81C, 0xBD0A0001,
- 0x81C, 0xBC0C0001,
- 0x81C, 0xBA0E0001,
- 0x81C, 0xB9100001,
- 0x81C, 0xB8120001,
- 0x81C, 0xB7140001,
- 0x81C, 0xB6160001,
- 0x81C, 0xB5180001,
- 0x81C, 0xB41A0001,
- 0x81C, 0xB31C0001,
- 0x81C, 0xB21E0001,
- 0x81C, 0xB1200001,
- 0x81C, 0xB0220001,
+ 0x81C, 0XBF000001,
+ 0x81C, 0XBF020001,
+ 0x81C, 0XBF040001,
+ 0x81C, 0XBF060001,
+ 0x81C, 0XBE080001,
+ 0x81C, 0XBD0A0001,
+ 0x81C, 0XBC0C0001,
+ 0x81C, 0XBA0E0001,
+ 0x81C, 0XB9100001,
+ 0x81C, 0XB8120001,
+ 0x81C, 0XB7140001,
+ 0x81C, 0XB6160001,
+ 0x81C, 0XB5180001,
+ 0x81C, 0XB41A0001,
+ 0x81C, 0XB31C0001,
+ 0x81C, 0XB21E0001,
+ 0x81C, 0XB1200001,
+ 0x81C, 0XB0220001,
0x81C, 0xAF240001,
0x81C, 0xAE260001,
0x81C, 0xAD280001,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
index 36c2388b60bc..540159c25078 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
@@ -1,29 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Created on 2010/ 5/18, 1:41
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_TABLE__H_
#define __RTL8821AE_TABLE__H_
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 0f2b7c619918..db5e628b17ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#include "../wifi.h"
#include "../pci.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
index 4ff0968dba81..a3feecad645d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2010 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2010 Realtek Corporation.*/
#ifndef __RTL8821AE_TRX_H__
#define __RTL8821AE_TRX_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c
index 61700fa05570..504ca587a16a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/stats.c
+++ b/drivers/net/wireless/realtek/rtlwifi/stats.c
@@ -1,27 +1,6 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
#include "wifi.h"
#include "stats.h"
#include <linux/export.h>
diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h
index bd0108f93182..581590729b0f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/stats.h
+++ b/drivers/net/wireless/realtek/rtlwifi/stats.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_STATS_H__
#define __RTL_STATS_H__
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 2ac5004d7a40..e24fda5e9087 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1,25 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- *****************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "wifi.h"
#include "core.h"
@@ -214,7 +194,7 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
_usb_write_async(to_usb_device(dev), addr, val, 4);
}
-static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+static void _usb_writen_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
u16 len)
{
struct device *dev = rtlpriv->io.dev;
@@ -249,7 +229,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.read8_sync = _usb_read8_sync;
rtlpriv->io.read16_sync = _usb_read16_sync;
rtlpriv->io.read32_sync = _usb_read32_sync;
- rtlpriv->io.writeN_sync = _usb_writeN_sync;
+ rtlpriv->io.writen_sync = _usb_writen_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
@@ -287,6 +267,7 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
for (i = 0; i < __RTL_TXQ_NUM; i++) {
u32 ep_num = rtlusb->ep_map.ep_mapping[i];
+
if (!ep_num) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"Invalid endpoint map setting!\n");
@@ -351,6 +332,7 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
for (epidx = 0; epidx < epnums; epidx++) {
struct usb_endpoint_descriptor *pep_desc;
+
pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
if (usb_endpoint_dir_in(pep_desc))
@@ -413,7 +395,7 @@ static void rtl_usb_init_sw(struct ieee80211_hw *hw)
rtlusb->irq_mask[0] = 0xFFFFFFFF;
/* HIMR_EX - turn all on */
rtlusb->irq_mask[1] = 0xFFFFFFFF;
- rtlusb->disableHWSM = true;
+ rtlusb->disablehwsm = true;
}
static void _rtl_rx_completed(struct urb *urb);
@@ -773,6 +755,7 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
return err;
}
+
/**
*
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index c91cec04bfaf..3bf85b23eec1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -1,25 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_USB_H__
#define __RTL_USB_H__
@@ -37,7 +17,6 @@
#define USB_HIGH_SPEED_BULK_SIZE 512
#define USB_FULL_SPEED_BULK_SIZE 64
-
#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
#define RTL_USB_MAX_TX_URBS_NUM 8
@@ -73,11 +52,11 @@ static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb,
u32 ep_num)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
info->rate_driver_data[0] = rtlusb;
info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num;
}
-
/* Add suspend/resume later */
enum rtl_usb_state {
USB_STATE_STOP = 0,
@@ -104,7 +83,7 @@ struct rtl_usb {
/* Bcn control register setting */
u32 reg_bcn_ctrl_val;
/* for 88/92cu card disable */
- u8 disableHWSM;
+ u8 disablehwsm;
/*QOS & EDCA */
enum acm_method acm_method;
/* irq . HIMR,HIMR_EX */
@@ -153,8 +132,6 @@ struct rtl_usb_priv {
#define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
#define rtl_usbdev(usbpriv) (&((usbpriv)->dev))
-
-
int rtl_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id,
struct rtl_hal_cfg *rtl92cu_hal_cfg);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 87bc21bb5e8b..e32e9ffa3192 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1,27 +1,5 @@
-/******************************************************************************
- *
- * Copyright(c) 2009-2012 Realtek Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
- * Hsinchu 300, Taiwan.
- *
- * Larry Finger <Larry.Finger@lwfinger.net>
- *
- *****************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
#ifndef __RTL_WIFI_H__
#define __RTL_WIFI_H__
@@ -263,7 +241,7 @@ struct rtlwifi_firmware_header {
u8 date;
u8 hour;
u8 minute;
- __le16 ramcodeSize;
+ __le16 ramcodesize;
__le16 rsvd2;
__le32 svnindex;
__le32 rsvd3;
@@ -435,8 +413,8 @@ enum hw_variables {
HW_VAR_MULTICAST_REG = 0x1,
HW_VAR_BASIC_RATE = 0x2,
HW_VAR_BSSID = 0x3,
- HW_VAR_MEDIA_STATUS= 0x4,
- HW_VAR_SECURITY_CONF= 0x5,
+ HW_VAR_MEDIA_STATUS = 0x4,
+ HW_VAR_SECURITY_CONF = 0x5,
HW_VAR_BEACON_INTERVAL = 0x6,
HW_VAR_ATIM_WINDOW = 0x7,
HW_VAR_LISTEN_INTERVAL = 0x8,
@@ -453,7 +431,7 @@ enum hw_variables {
HW_VAR_ACK_PREAMBLE = 0x13,
HW_VAR_CW_CONFIG = 0x14,
HW_VAR_CW_VALUES = 0x15,
- HW_VAR_RATE_FALLBACK_CONTROL= 0x16,
+ HW_VAR_RATE_FALLBACK_CONTROL = 0x16,
HW_VAR_CONTENTION_WINDOW = 0x17,
HW_VAR_RETRY_COUNT = 0x18,
HW_VAR_TR_SWITCH = 0x19,
@@ -465,11 +443,11 @@ enum hw_variables {
HW_VAR_MCS_RATE_AVAILABLE = 0x1f,
HW_VAR_AC_PARAM = 0x20,
HW_VAR_ACM_CTRL = 0x21,
- HW_VAR_DIS_Req_Qsize = 0x22,
+ HW_VAR_DIS_REQ_QSIZE = 0x22,
HW_VAR_CCX_CHNL_LOAD = 0x23,
HW_VAR_CCX_NOISE_HISTOGRAM = 0x24,
HW_VAR_CCX_CLM_NHM = 0x25,
- HW_VAR_TxOPLimit = 0x26,
+ HW_VAR_TXOPLIMIT = 0x26,
HW_VAR_TURBO_MODE = 0x27,
HW_VAR_RF_STATE = 0x28,
HW_VAR_RF_OFF_BY_HW = 0x29,
@@ -522,7 +500,7 @@ enum hw_variables {
HW_VAR_BCN_VALID = 0x55,
HW_VAR_FWLPS_RF_ON = 0x56,
HW_VAR_DUAL_TSF_RST = 0x57,
- HW_VAR_SWITCH_EPHY_WoWLAN = 0x58,
+ HW_VAR_SWITCH_EPHY_WOWLAN = 0x58,
HW_VAR_INT_MIGRATION = 0x59,
HW_VAR_INT_AC = 0x5a,
HW_VAR_RF_TIMING = 0x5b,
@@ -620,7 +598,8 @@ enum ht_channel_width {
};
/* Ref: 802.11i sepc D10.0 7.3.2.25.1
-Cipher Suites Encryption Algorithms */
+ * Cipher Suites Encryption Algorithms
+ */
enum rt_enc_alg {
NO_ENCRYPTION = 0,
WEP40_ENCRYPTION = 1,
@@ -770,7 +749,8 @@ enum rtl_var_map {
RTL_IMR_ROK, /*Receive DMA OK Interrupt */
RTL_IMR_HSISR_IND, /*HSISR Interrupt*/
RTL_IBSS_INT_MASKS, /*(RTL_IMR_BCNINT | RTL_IMR_TBDOK |
- * RTL_IMR_TBDER) */
+ * RTL_IMR_TBDER)
+ */
RTL_IMR_C2HCMD, /*fw interrupt*/
/*CCK Rates, TxHT = 0 */
@@ -814,8 +794,8 @@ enum _fw_ps_mode {
FW_PS_UAPSD_MODE = 6,
FW_PS_IBSS_MODE = 7,
FW_PS_WWLAN_MODE = 8,
- FW_PS_PM_Radio_Off = 9,
- FW_PS_PM_Card_Disable = 10,
+ FW_PS_PM_RADIO_OFF = 9,
+ FW_PS_PM_CARD_DISABLE = 10,
};
enum rt_psmode {
@@ -849,8 +829,8 @@ enum rtl_led_pin {
/*QoS related.*/
/*acm implementation method.*/
enum acm_method {
- eAcmWay0_SwAndHw = 0,
- eAcmWay1_HW = 1,
+ EACMWAY0_SWANDHW = 0,
+ EACMWAY1_HW = 1,
EACMWAY2_SW = 2,
};
@@ -867,8 +847,9 @@ enum band_type {
BANDMAX
};
-/*aci/aifsn Field.
-Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
+/* aci/aifsn Field.
+ * Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
+ */
union aci_aifsn {
u8 char_data;
@@ -1084,7 +1065,8 @@ struct rtl_probe_rsp {
__le16 beacon_interval;
__le16 capability;
/*SSID, supported rates, FH params, DS params,
- CF params, IBSS params, TIM (if beacon), RSN */
+ * CF params, IBSS params, TIM (if beacon), RSN
+ */
struct rtl_info_element info_element[0];
} __packed;
@@ -1158,7 +1140,8 @@ struct wireless_stats {
long rx_snr_db[4];
/*Correct smoothed ss in Dbm, only used
- in driver to report real power now. */
+ * in driver to report real power now.
+ */
long recv_signal_power;
long signal_quality;
long last_sigstrength_inpercent;
@@ -1166,8 +1149,9 @@ struct wireless_stats {
u32 rssi_calculate_cnt;
u32 pwdb_all_cnt;
- /*Transformed, in dbm. Beautified signal
- strength for UI, not correct. */
+ /* Transformed, in dbm. Beautified signal
+ * strength for UI, not correct.
+ */
long signal_strength;
u8 rx_rssi_percentage[4];
@@ -1478,15 +1462,15 @@ struct rtl_io {
/*PCI IO map */
unsigned long pci_base_addr; /*device I/O address */
- void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
- void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
- void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
- u16 len);
+ void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*writen_sync)(struct rtl_priv *rtlpriv, u32 addr, void *buf,
+ u16 len);
- u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
- u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
- u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
+ u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
};
@@ -1711,7 +1695,8 @@ struct rtl_hal {
bool during_mac1init_radioa;
bool reloadtxpowerindex;
/* True if IMR or IQK have done
- for 2.4G in scan progress */
+ * for 2.4G in scan progress
+ */
bool load_imrandiqk_setting_for2g;
bool disable_amsdu_8k;
@@ -1750,12 +1735,14 @@ struct rtl_security {
u32 hwsec_cam_bitmap;
u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN];
/*local Key buffer, indx 0 is for
- pairwise key 1-4 is for agoup key. */
+ * pairwise key 1-4 is for agoup key.
+ */
u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
u8 key_len[KEY_BUF_SIZE];
/*The pointer of Pairwise Key,
- it always points to KeyBuf[4] */
+ * it always points to KeyBuf[4]
+ */
u8 *pairwise_key;
};
@@ -1898,7 +1885,7 @@ struct rtl_dm {
struct rtl_efuse {
const struct rtl_efuse_ops *efuse_ops;
- bool autoLoad_ok;
+ bool autoload_ok;
bool bootfromefuse;
u16 max_physical_size;
@@ -2019,11 +2006,10 @@ struct rtl_ps_ctl {
bool rfchange_inprogress;
bool swrf_processing;
bool hwradiooff;
- /*
- * just for PCIE ASPM
+ /* just for PCIE ASPM
* If it supports ASPM, Offset[560h] = 0x40,
* otherwise Offset[560h] = 0x00.
- * */
+ */
bool support_aspm;
bool support_backdoor;
@@ -2103,10 +2089,9 @@ struct rtl_stats {
u8 nic_type;
u16 length;
u8 signalquality; /*in 0-100 index. */
- /*
- * Real power in dBm for this packet,
+ /* Real power in dBm for this packet,
* no beautification and aggregation.
- * */
+ */
s32 recvsignalpower;
s8 rxpower; /*in dBm Translate from PWdB */
u8 signalstrength; /*in 0-100 index. */
@@ -2125,7 +2110,7 @@ struct rtl_stats {
u8 rx_bufshift;
bool isampdu;
bool isfirst_ampdu;
- bool rx_is40Mhzpacket;
+ bool rx_is40mhzpacket;
u8 rx_packet_bw;
u32 rx_pwdb_all;
u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
@@ -2158,7 +2143,6 @@ struct rtl_stats {
u32 macid_valid_entry[2];
};
-
struct rt_link_detect {
/* count for roaming */
u32 bcn_rx_inperiod;
@@ -2232,114 +2216,114 @@ struct rtl_int {
};
struct rtl_hal_ops {
- int (*init_sw_vars) (struct ieee80211_hw *hw);
- void (*deinit_sw_vars) (struct ieee80211_hw *hw);
+ int (*init_sw_vars)(struct ieee80211_hw *hw);
+ void (*deinit_sw_vars)(struct ieee80211_hw *hw);
void (*read_chip_version)(struct ieee80211_hw *hw);
- void (*read_eeprom_info) (struct ieee80211_hw *hw);
- void (*interrupt_recognized) (struct ieee80211_hw *hw,
- struct rtl_int *intvec);
- int (*hw_init) (struct ieee80211_hw *hw);
- void (*hw_disable) (struct ieee80211_hw *hw);
- void (*hw_suspend) (struct ieee80211_hw *hw);
- void (*hw_resume) (struct ieee80211_hw *hw);
- void (*enable_interrupt) (struct ieee80211_hw *hw);
- void (*disable_interrupt) (struct ieee80211_hw *hw);
- int (*set_network_type) (struct ieee80211_hw *hw,
- enum nl80211_iftype type);
+ void (*read_eeprom_info)(struct ieee80211_hw *hw);
+ void (*interrupt_recognized)(struct ieee80211_hw *hw,
+ struct rtl_int *intvec);
+ int (*hw_init)(struct ieee80211_hw *hw);
+ void (*hw_disable)(struct ieee80211_hw *hw);
+ void (*hw_suspend)(struct ieee80211_hw *hw);
+ void (*hw_resume)(struct ieee80211_hw *hw);
+ void (*enable_interrupt)(struct ieee80211_hw *hw);
+ void (*disable_interrupt)(struct ieee80211_hw *hw);
+ int (*set_network_type)(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
void (*set_chk_bssid)(struct ieee80211_hw *hw,
- bool check_bssid);
- void (*set_bw_mode) (struct ieee80211_hw *hw,
- enum nl80211_channel_type ch_type);
- u8(*switch_channel) (struct ieee80211_hw *hw);
- void (*set_qos) (struct ieee80211_hw *hw, int aci);
- void (*set_bcn_reg) (struct ieee80211_hw *hw);
- void (*set_bcn_intv) (struct ieee80211_hw *hw);
- void (*update_interrupt_mask) (struct ieee80211_hw *hw,
- u32 add_msr, u32 rm_msr);
- void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
- void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
- void (*update_rate_tbl) (struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_leve,
- bool update_bw);
+ bool check_bssid);
+ void (*set_bw_mode)(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+ u8 (*switch_channel)(struct ieee80211_hw *hw);
+ void (*set_qos)(struct ieee80211_hw *hw, int aci);
+ void (*set_bcn_reg)(struct ieee80211_hw *hw);
+ void (*set_bcn_intv)(struct ieee80211_hw *hw);
+ void (*update_interrupt_mask)(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+ void (*get_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*set_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*update_rate_tbl)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_leve,
+ bool update_bw);
void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
u8 *desc, u8 queue_index,
struct sk_buff *skb, dma_addr_t addr);
- void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
+ void (*update_rate_mask)(struct ieee80211_hw *hw, u8 rssi_level);
u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
u8 queue_index);
void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
u8 queue_index);
- void (*fill_tx_desc) (struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- u8 *pbd_desc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb, u8 hw_queue,
- struct rtl_tcb_desc *ptcb_desc);
- void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
- u32 buffer_len, bool bIsPsPoll);
- void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
- bool firstseg, bool lastseg,
- struct sk_buff *skb);
+ void (*fill_tx_desc)(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ u8 *pbd_desc_tx,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, u8 hw_queue,
+ struct rtl_tcb_desc *ptcb_desc);
+ void (*fill_fake_txdesc)(struct ieee80211_hw *hw, u8 *pdesc,
+ u32 buffer_len, bool bsspspoll);
+ void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc,
+ bool firstseg, bool lastseg,
+ struct sk_buff *skb);
void (*fill_tx_special_desc)(struct ieee80211_hw *hw,
u8 *pdesc, u8 *pbd_desc,
struct sk_buff *skb, u8 hw_queue);
- bool (*query_rx_desc) (struct ieee80211_hw *hw,
- struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb);
- void (*set_channel_access) (struct ieee80211_hw *hw);
- bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
- void (*dm_watchdog) (struct ieee80211_hw *hw);
- void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
- bool (*set_rf_power_state) (struct ieee80211_hw *hw,
- enum rf_pwrstate rfpwr_state);
- void (*led_control) (struct ieee80211_hw *hw,
- enum led_ctl_mode ledaction);
+ bool (*query_rx_desc)(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+ void (*set_channel_access)(struct ieee80211_hw *hw);
+ bool (*radio_onoff_checking)(struct ieee80211_hw *hw, u8 *valid);
+ void (*dm_watchdog)(struct ieee80211_hw *hw);
+ void (*scan_operation_backup)(struct ieee80211_hw *hw, u8 operation);
+ bool (*set_rf_power_state)(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+ void (*led_control)(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction);
void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
u64 (*get_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name);
- bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
- u8 hw_queue, u16 index);
- void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue);
- void (*enable_hw_sec) (struct ieee80211_hw *hw);
- void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
- u8 *macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all);
- void (*init_sw_leds) (struct ieee80211_hw *hw);
- void (*deinit_sw_leds) (struct ieee80211_hw *hw);
- u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
- void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
- u32 data);
- u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask);
- void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data);
- void (*linked_set_reg) (struct ieee80211_hw *hw);
- void (*chk_switch_dmdp) (struct ieee80211_hw *hw);
- void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
- void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
- bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
- void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
- u8 *powerlevel);
- void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
- bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw,
- u8 configtype);
- bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw,
- u8 configtype);
- void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
- void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
- void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
- void (*c2h_command_handle) (struct ieee80211_hw *hw);
- void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
- bool mstate);
- void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
- void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
- u32 cmd_len, u8 *p_cmdbuffer);
+ bool (*is_tx_desc_closed)(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+ void (*tx_polling)(struct ieee80211_hw *hw, u8 hw_queue);
+ void (*enable_hw_sec)(struct ieee80211_hw *hw);
+ void (*set_key)(struct ieee80211_hw *hw, u32 key_index,
+ u8 *macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+ void (*init_sw_leds)(struct ieee80211_hw *hw);
+ void (*deinit_sw_leds)(struct ieee80211_hw *hw);
+ u32 (*get_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+ void (*set_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ u32 data);
+ u32 (*get_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+ void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+ void (*linked_set_reg)(struct ieee80211_hw *hw);
+ void (*chk_switch_dmdp)(struct ieee80211_hw *hw);
+ void (*dualmac_easy_concurrent)(struct ieee80211_hw *hw);
+ void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw);
+ bool (*phy_rf6052_config)(struct ieee80211_hw *hw);
+ void (*phy_rf6052_set_cck_txpower)(struct ieee80211_hw *hw,
+ u8 *powerlevel);
+ void (*phy_rf6052_set_ofdm_txpower)(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+ bool (*config_bb_with_headerfile)(struct ieee80211_hw *hw,
+ u8 configtype);
+ bool (*config_bb_with_pgheaderfile)(struct ieee80211_hw *hw,
+ u8 configtype);
+ void (*phy_lc_calibrate)(struct ieee80211_hw *hw, bool is2t);
+ void (*phy_set_bw_mode_callback)(struct ieee80211_hw *hw);
+ void (*dm_dynamic_txpower)(struct ieee80211_hw *hw);
+ void (*c2h_command_handle)(struct ieee80211_hw *hw);
+ void (*bt_wifi_media_status_notify)(struct ieee80211_hw *hw,
+ bool mstate);
+ void (*bt_coex_off_before_lps)(struct ieee80211_hw *hw);
+ void (*fill_h2c_cmd)(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
void (*set_default_port_id_cmd)(struct ieee80211_hw *hw);
- bool (*get_btc_status) (void);
+ bool (*get_btc_status)(void);
bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
struct rtl_wow_pattern *rtl_pattern,
@@ -2352,24 +2336,24 @@ struct rtl_hal_ops {
struct rtl_intf_ops {
/*com */
void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
- int (*adapter_start) (struct ieee80211_hw *hw);
- void (*adapter_stop) (struct ieee80211_hw *hw);
+ int (*adapter_start)(struct ieee80211_hw *hw);
+ void (*adapter_stop)(struct ieee80211_hw *hw);
bool (*check_buddy_priv)(struct ieee80211_hw *hw,
struct rtl_priv **buddy_priv);
- int (*adapter_tx) (struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
- struct rtl_tcb_desc *ptcb_desc);
+ int (*adapter_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc);
void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
- int (*reset_trx_ring) (struct ieee80211_hw *hw);
- bool (*waitq_insert) (struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct sk_buff *skb);
+ int (*reset_trx_ring)(struct ieee80211_hw *hw);
+ bool (*waitq_insert)(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
/*pci */
- void (*disable_aspm) (struct ieee80211_hw *hw);
- void (*enable_aspm) (struct ieee80211_hw *hw);
+ void (*disable_aspm)(struct ieee80211_hw *hw);
+ void (*enable_aspm)(struct ieee80211_hw *hw);
/*usb */
};
@@ -2447,7 +2431,8 @@ struct rtl_hal_cfg {
enum rtl_spec_ver spec_ver;
/*this map used for some registers or vars
- defined int HAL but used in MAIN */
+ * defined int HAL but used in MAIN
+ */
u32 maps[RTL_VAR_MAP_MAX];
};
@@ -2609,7 +2594,8 @@ struct dig_t {
struct rtl_global_var {
/* from this list we can get
- * other adapter's rtl_priv */
+ * other adapter's rtl_priv
+ */
struct list_head glb_priv_list;
spinlock_t glb_list_lock;
};
@@ -2688,30 +2674,30 @@ struct bt_coexist_info {
};
struct rtl_btc_ops {
- void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+ void (*btc_init_variables)(struct rtl_priv *rtlpriv);
void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv);
void (*btc_deinit_variables)(struct rtl_priv *rtlpriv);
- void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hal_vars)(struct rtl_priv *rtlpriv);
void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
- void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config)(struct rtl_priv *rtlpriv);
void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv);
- void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
+ void (*btc_ips_notify)(struct rtl_priv *rtlpriv, u8 type);
void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
- void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_scan_notify)(struct rtl_priv *rtlpriv, u8 scantype);
void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv,
u8 scantype);
- void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
- void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
- enum rt_media_status mstatus);
- void (*btc_periodical) (struct rtl_priv *rtlpriv);
+ void (*btc_connect_notify)(struct rtl_priv *rtlpriv, u8 action);
+ void (*btc_mediastatus_notify)(struct rtl_priv *rtlpriv,
+ enum rt_media_status mstatus);
+ void (*btc_periodical)(struct rtl_priv *rtlpriv);
void (*btc_halt_notify)(struct rtl_priv *rtlpriv);
- void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
- u8 *tmp_buf, u8 length);
+ void (*btc_btinfo_notify)(struct rtl_priv *rtlpriv,
+ u8 *tmp_buf, u8 length);
void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv,
u8 *tmp_buf, u8 length);
- bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
- bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
- bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
+ bool (*btc_is_limited_dig)(struct rtl_priv *rtlpriv);
+ bool (*btc_is_disable_edca_turbo)(struct rtl_priv *rtlpriv);
+ bool (*btc_is_bt_disabled)(struct rtl_priv *rtlpriv);
void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv,
u8 pkt_type);
void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type,
@@ -2797,16 +2783,16 @@ struct rtl_priv {
struct rtl_debug dbg;
int max_fw_size;
- /*
- *hal_cfg : for diff cards
- *intf_ops : for diff interrface usb/pcie
+ /* hal_cfg : for diff cards
+ * intf_ops : for diff interrface usb/pcie
*/
struct rtl_hal_cfg *cfg;
const struct rtl_intf_ops *intf_ops;
- /*this var will be set by set_bit,
- and was used to indicate status of
- interface or hardware */
+ /* this var will be set by set_bit,
+ * and was used to indicate status of
+ * interface or hardware
+ */
unsigned long status;
/* tables for dm */
@@ -2842,10 +2828,11 @@ struct rtl_priv {
#ifdef CONFIG_PM
struct wiphy_wowlan_support wowlan;
#endif
- /*This must be the last item so
- that it points to the data allocated
- beyond this structure like:
- rtl_pci_priv or rtl_usb_priv */
+ /* This must be the last item so
+ * that it points to the data allocated
+ * beyond this structure like:
+ * rtl_pci_priv or rtl_usb_priv
+ */
u8 priv[0] __aligned(sizeof(void *));
};
@@ -2855,10 +2842,7 @@ struct rtl_priv {
#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
-
-/***************************************
- Bluetooth Co-existence Related
-****************************************/
+/* Bluetooth Co-existence Related */
enum bt_ant_num {
ANT_X2 = 0,
@@ -2907,14 +2891,13 @@ enum bt_radio_shared {
BT_RADIO_INDIVIDUAL = 1,
};
-
/****************************************
- mem access macro define start
- Call endian free function when
- 1. Read/write packet content.
- 2. Before write integer to IO.
- 3. After read integer from IO.
-****************************************/
+ * mem access macro define start
+ * Call endian free function when
+ * 1. Read/write packet content.
+ * 2. Before write integer to IO.
+ * 3. After read integer from IO.
+ ****************************************/
/* Convert little data endian to host ordering */
#define EF1BYTE(_val) \
((u8)(_val))
@@ -2970,8 +2953,9 @@ enum bt_radio_shared {
(EF1BYTE(*((u8 *)(__pstart))))
/*Description:
-Translate subfield (continuous bits in little-endian) of 4-byte
-value to host byte ordering.*/
+ * Translate subfield (continuous bits in little-endian) of 4-byte
+ * value to host byte ordering.
+ */
#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
( \
(LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
@@ -3033,9 +3017,7 @@ value to host byte ordering.*/
#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
(__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
-/****************************************
- mem access macro define end
-****************************************/
+/* mem access macro define end */
#define byte(x, n) ((x >> (8 * n)) & 0xff)
@@ -3170,7 +3152,7 @@ static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
}
static inline void rtl_set_bbreg_with_dwmask(struct ieee80211_hw *hw,
- u32 regaddr, u32 data)
+ u32 regaddr, u32 data)
{
rtl_set_bbreg(hw, regaddr, 0xffffffff, data);
}
@@ -3241,9 +3223,10 @@ static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
}
static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
- u8 *mac_addr)
+ u8 *mac_addr)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
return ieee80211_find_sta(mac->vif, mac_addr);
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index 8c6ca8e689e4..c71b41e45423 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -59,7 +59,7 @@ static int rsi_sdio_stats_read(struct seq_file *seq, void *data)
}
/**
- * rsi_sdio_stats_open() - This funtion calls single open function of seq_file
+ * rsi_sdio_stats_open() - This function calls single open function of seq_file
* to open file and read contents from it.
* @inode: Pointer to the inode structure.
* @file: Pointer to the file structure.
@@ -93,7 +93,7 @@ static int rsi_version_read(struct seq_file *seq, void *data)
}
/**
- * rsi_version_open() - This funtion calls single open function of seq_file to
+ * rsi_version_open() - This function calls single open function of seq_file to
* open file and read contents from it.
* @inode: Pointer to the inode structure.
* @file: Pointer to the file structure.
@@ -178,7 +178,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data)
}
/**
- * rsi_stats_open() - This funtion calls single open function of seq_file to
+ * rsi_stats_open() - This function calls single open function of seq_file to
* open file and read contents from it.
* @inode: Pointer to the inode structure.
* @file: Pointer to the file structure.
@@ -207,7 +207,7 @@ static int rsi_debug_zone_read(struct seq_file *seq, void *data)
}
/**
- * rsi_debug_read() - This funtion calls single open function of seq_file to
+ * rsi_debug_read() - This function calls single open function of seq_file to
* open file and read contents from it.
* @inode: Pointer to the inode structure.
* @file: Pointer to the file structure.
@@ -297,11 +297,6 @@ int rsi_init_dbgfs(struct rsi_hw *adapter)
dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
- if (!dev_dbgfs->subdir) {
- kfree(dev_dbgfs);
- return -ENOMEM;
- }
-
for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
files = &dev_debugfs_files[ii];
dev_dbgfs->rsi_files[ii] =
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 182b06629371..1dbaab2a96b7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -100,6 +100,9 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
mgmt_desc->frame_type = TX_DOT11_MGMT;
mgmt_desc->header_len = MIN_802_11_HDR_LEN;
mgmt_desc->xtend_desc_size = header_size - FRAME_DESC_SZ;
+
+ if (ieee80211_is_probe_req(wh->frame_control))
+ mgmt_desc->frame_info = cpu_to_le16(RSI_INSERT_SEQ_IN_FW);
mgmt_desc->frame_info |= cpu_to_le16(RATE_INFO_ENABLE);
if (is_broadcast_ether_addr(wh->addr1))
mgmt_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index e56fc83faf0e..831046e760f8 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -229,6 +229,69 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
/* sbands->ht_cap.mcs.rx_highest = 0x82; */
}
+static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct cfg80211_scan_request *scan_req = &hw_req->req;
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
+
+ rsi_dbg(INFO_ZONE, "***** Hardware scan start *****\n");
+ common->mac_ops_resumed = false;
+
+ if (common->fsm_state != FSM_MAC_INIT_DONE)
+ return -ENODEV;
+
+ if ((common->wow_flags & RSI_WOW_ENABLED) ||
+ scan_req->n_channels == 0)
+ return -EINVAL;
+
+ /* Scan already in progress. So return */
+ if (common->bgscan_en)
+ return -EBUSY;
+
+ /* If STA is not connected, return with special value 1, in order
+ * to start sw_scan in mac80211
+ */
+ if (!bss->assoc)
+ return 1;
+
+ mutex_lock(&common->mutex);
+ common->hwscan = scan_req;
+ if (!rsi_send_bgscan_params(common, RSI_START_BGSCAN)) {
+ if (!rsi_send_bgscan_probe_req(common, vif)) {
+ rsi_dbg(INFO_ZONE, "Background scan started...\n");
+ common->bgscan_en = true;
+ }
+ }
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+static void rsi_mac80211_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct cfg80211_scan_info info;
+
+ rsi_dbg(INFO_ZONE, "***** Hardware scan stop *****\n");
+ mutex_lock(&common->mutex);
+
+ if (common->bgscan_en) {
+ if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN))
+ common->bgscan_en = false;
+ info.aborted = false;
+ ieee80211_scan_completed(adapter->hw, &info);
+ rsi_dbg(INFO_ZONE, "Back ground scan cancelled\n");
+ }
+ common->hwscan = NULL;
+ mutex_unlock(&common->mutex);
+}
+
/**
* rsi_mac80211_detach() - This function is used to de-initialize the
* Mac80211 stack.
@@ -308,6 +371,10 @@ static void rsi_mac80211_tx(struct ieee80211_hw *hw,
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
+ struct ieee80211_hdr *wlh = (struct ieee80211_hdr *)skb->data;
+
+ if (ieee80211_is_auth(wlh->frame_control))
+ common->mac_ops_resumed = false;
rsi_core_xmit(common, skb);
}
@@ -615,7 +682,8 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
}
/* Power save parameters */
- if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if ((changed & IEEE80211_CONF_CHANGE_PS) &&
+ !common->mac_ops_resumed) {
struct ieee80211_vif *vif, *sta_vif = NULL;
unsigned long flags;
int i, set_ps = 1;
@@ -748,15 +816,15 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
adapter->ps_info.dtim_interval_duration = bss->dtim_period;
adapter->ps_info.listen_interval = conf->listen_interval;
- /* If U-APSD is updated, send ps parameters to firmware */
- if (bss->assoc) {
- if (common->uapsd_bitmap) {
- rsi_dbg(INFO_ZONE, "Configuring UAPSD\n");
- rsi_conf_uapsd(adapter, vif);
+ /* If U-APSD is updated, send ps parameters to firmware */
+ if (bss->assoc) {
+ if (common->uapsd_bitmap) {
+ rsi_dbg(INFO_ZONE, "Configuring UAPSD\n");
+ rsi_conf_uapsd(adapter, vif);
+ }
+ } else {
+ common->uapsd_bitmap = 0;
}
- } else {
- common->uapsd_bitmap = 0;
- }
}
if (changed & BSS_CHANGED_CQM) {
@@ -1270,7 +1338,7 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
}
/**
- * rsi_indicate_pkt_to_os() - This function sends recieved packet to mac80211.
+ * rsi_indicate_pkt_to_os() - This function sends received packet to mac80211.
* @common: Pointer to the driver private structure.
* @skb: Pointer to the socket buffer structure.
*
@@ -1833,6 +1901,10 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
return 0;
}
rsi_dbg(INFO_ZONE, "TRIGGERS %x\n", triggers);
+
+ if (common->coex_mode > 1)
+ rsi_disable_ps(adapter, adapter->vifs[0]);
+
rsi_send_wowlan_request(common, triggers, 1);
/**
@@ -1876,8 +1948,13 @@ static int rsi_mac80211_resume(struct ieee80211_hw *hw)
rsi_dbg(INFO_ZONE, "%s: mac80211 resume\n", __func__);
- if (common->hibernate_resume)
- return 0;
+ if (common->hibernate_resume) {
+ common->mac_ops_resumed = true;
+ /* Device need a complete restart of all MAC operations.
+ * returning 1 will serve this purpose.
+ */
+ return 1;
+ }
mutex_lock(&common->mutex);
rsi_send_wowlan_request(common, 0, 0);
@@ -1917,6 +1994,8 @@ static const struct ieee80211_ops mac80211_ops = {
.suspend = rsi_mac80211_suspend,
.resume = rsi_mac80211_resume,
#endif
+ .hw_scan = rsi_mac80211_hw_scan_start,
+ .cancel_hw_scan = rsi_mac80211_cancel_hw_scan,
};
/**
@@ -1999,6 +2078,9 @@ int rsi_mac80211_attach(struct rsi_common *common)
common->max_stations = wiphy->max_ap_assoc_sta;
rsi_dbg(ERR_ZONE, "Max Stations Allowed = %d\n", common->max_stations);
hw->sta_data_size = sizeof(struct rsi_sta);
+
+ wiphy->max_scan_ssids = RSI_MAX_SCAN_SSIDS;
+ wiphy->max_scan_ie_len = RSI_MAX_SCAN_IE_LEN;
wiphy->flags = WIPHY_FLAG_REPORTS_OBSS;
wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 01d99ed985ee..29d83049c5f5 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -121,11 +121,8 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
u32 pkt_len,
u8 extended_desc)
{
- struct ieee80211_tx_info *info;
struct sk_buff *skb = NULL;
u8 payload_offset;
- struct ieee80211_vif *vif;
- struct ieee80211_hdr *wh;
if (WARN(!pkt_len, "%s: Dummy pkt received", __func__))
return NULL;
@@ -144,10 +141,7 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
payload_offset = (extended_desc + FRAME_DESC_SZ);
skb_put(skb, pkt_len);
memcpy((skb->data), (buffer + payload_offset), skb->len);
- wh = (struct ieee80211_hdr *)skb->data;
- vif = rsi_get_vif(common->priv, wh->addr1);
- info = IEEE80211_SKB_CB(skb);
return skb;
}
@@ -328,6 +322,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
}
rsi_default_ps_params(adapter);
+ init_bgscan_params(common);
spin_lock_init(&adapter->ps_lock);
timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
init_completion(&common->wlan_init_completion);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 1095df7d9573..844f2fac298f 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -15,6 +15,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/timer.h>
#include "rsi_mgmt.h"
#include "rsi_common.h"
#include "rsi_ps.h"
@@ -236,6 +237,18 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->dtim_cnt = RSI_DTIM_COUNT;
}
+void init_bgscan_params(struct rsi_common *common)
+{
+ memset((u8 *)&common->bgscan, 0, sizeof(struct rsi_bgscan_params));
+ common->bgscan.bgscan_threshold = RSI_DEF_BGSCAN_THRLD;
+ common->bgscan.roam_threshold = RSI_DEF_ROAM_THRLD;
+ common->bgscan.bgscan_periodicity = RSI_BGSCAN_PERIODICITY;
+ common->bgscan.num_bgscan_channels = 0;
+ common->bgscan.two_probe = 1;
+ common->bgscan.active_scan_duration = RSI_ACTIVE_SCAN_TIME;
+ common->bgscan.passive_scan_duration = RSI_PASSIVE_SCAN_TIME;
+}
+
/**
* rsi_set_contention_vals() - This function sets the contention values for the
* backoff procedure.
@@ -396,8 +409,8 @@ static int rsi_load_radio_caps(struct rsi_common *common)
* rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module.
* @common: Pointer to the driver private structure.
* @msg: Pointer to received packet.
- * @msg_len: Length of the recieved packet.
- * @type: Type of recieved packet.
+ * @msg_len: Length of the received packet.
+ * @type: Type of received packet.
*
* Return: 0 on success, -1 on failure.
*/
@@ -1534,7 +1547,7 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
}
/**
- * rsi_set_antenna() - This fuction send antenna configuration request
+ * rsi_set_antenna() - This function send antenna configuration request
* to device
*
* @common: Pointer to the driver private structure.
@@ -1628,6 +1641,111 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
}
#endif
+int rsi_send_bgscan_params(struct rsi_common *common, int enable)
+{
+ struct rsi_bgscan_params *params = &common->bgscan;
+ struct cfg80211_scan_request *scan_req = common->hwscan;
+ struct rsi_bgscan_config *bgscan;
+ struct sk_buff *skb;
+ u16 frame_len = sizeof(*bgscan);
+ u8 i;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan params frame\n", __func__);
+
+ skb = dev_alloc_skb(frame_len);
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, frame_len);
+
+ bgscan = (struct rsi_bgscan_config *)skb->data;
+ rsi_set_len_qno(&bgscan->desc_dword0.len_qno,
+ (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
+ bgscan->desc_dword0.frame_type = BG_SCAN_PARAMS;
+ bgscan->bgscan_threshold = cpu_to_le16(params->bgscan_threshold);
+ bgscan->roam_threshold = cpu_to_le16(params->roam_threshold);
+ if (enable)
+ bgscan->bgscan_periodicity =
+ cpu_to_le16(params->bgscan_periodicity);
+ bgscan->active_scan_duration =
+ cpu_to_le16(params->active_scan_duration);
+ bgscan->passive_scan_duration =
+ cpu_to_le16(params->passive_scan_duration);
+ bgscan->two_probe = params->two_probe;
+
+ bgscan->num_bgscan_channels = scan_req->n_channels;
+ for (i = 0; i < bgscan->num_bgscan_channels; i++)
+ bgscan->channels2scan[i] =
+ cpu_to_le16(scan_req->channels[i]->hw_value);
+
+ skb_put(skb, frame_len);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/* This function sends the probe request to be used by firmware in
+ * background scan
+ */
+int rsi_send_bgscan_probe_req(struct rsi_common *common,
+ struct ieee80211_vif *vif)
+{
+ struct cfg80211_scan_request *scan_req = common->hwscan;
+ struct rsi_bgscan_probe *bgscan;
+ struct sk_buff *skb;
+ struct sk_buff *probereq_skb;
+ u16 frame_len = sizeof(*bgscan);
+ size_t ssid_len = 0;
+ u8 *ssid = NULL;
+
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Sending bgscan probe req frame\n", __func__);
+
+ if (common->priv->sc_nvifs <= 0)
+ return -ENODEV;
+
+ if (scan_req->n_ssids) {
+ ssid = scan_req->ssids[0].ssid;
+ ssid_len = scan_req->ssids[0].ssid_len;
+ }
+
+ skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
+
+ bgscan = (struct rsi_bgscan_probe *)skb->data;
+ bgscan->desc_dword0.frame_type = BG_SCAN_PROBE_REQ;
+ bgscan->flags = cpu_to_le16(HOST_BG_SCAN_TRIG);
+ if (common->band == NL80211_BAND_5GHZ) {
+ bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_6);
+ bgscan->def_chan = cpu_to_le16(40);
+ } else {
+ bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_1);
+ bgscan->def_chan = cpu_to_le16(11);
+ }
+ bgscan->channel_scan_time = cpu_to_le16(RSI_CHANNEL_SCAN_TIME);
+
+ probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid,
+ ssid_len, scan_req->ie_len);
+ if (!probereq_skb) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len);
+
+ bgscan->probe_req_length = cpu_to_le16(probereq_skb->len);
+
+ rsi_set_len_qno(&bgscan->desc_dword0.len_qno,
+ (frame_len - FRAME_DESC_SZ + probereq_skb->len),
+ RSI_WIFI_MGMT_Q);
+
+ skb_put(skb, frame_len + probereq_skb->len);
+
+ dev_kfree_skb(probereq_skb);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
/**
* rsi_handle_ta_confirm_type() - This function handles the confirm frames.
* @common: Pointer to the driver private structure.
@@ -1771,9 +1889,28 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
return 0;
}
break;
+
+ case SCAN_REQUEST:
+ rsi_dbg(INFO_ZONE, "Set channel confirm\n");
+ break;
+
case WAKEUP_SLEEP_REQUEST:
rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n");
return rsi_handle_ps_confirm(adapter, msg);
+
+ case BG_SCAN_PROBE_REQ:
+ rsi_dbg(INFO_ZONE, "BG scan complete event\n");
+ if (common->bgscan_en) {
+ struct cfg80211_scan_info info;
+
+ if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN))
+ common->bgscan_en = 0;
+ info.aborted = false;
+ ieee80211_scan_completed(adapter->hw, &info);
+ }
+ rsi_dbg(INFO_ZONE, "Background scan completed\n");
+ break;
+
default:
rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n",
__func__);
@@ -1822,7 +1959,7 @@ int rsi_handle_card_ready(struct rsi_common *common, u8 *msg)
/**
* rsi_mgmt_pkt_recv() - This function processes the management packets
- * recieved from the hardware.
+ * received from the hardware.
* @common: Pointer to the driver private structure.
* @msg: Pointer to the received packet.
*
@@ -1870,6 +2007,35 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
return -1;
rsi_send_beacon(common);
break;
+ case WOWLAN_WAKEUP_REASON:
+ rsi_dbg(ERR_ZONE, "\n\nWakeup Type: %x\n", msg[15]);
+ switch (msg[15]) {
+ case RSI_UNICAST_MAGIC_PKT:
+ rsi_dbg(ERR_ZONE,
+ "*** Wakeup for Unicast magic packet ***\n");
+ break;
+ case RSI_BROADCAST_MAGICPKT:
+ rsi_dbg(ERR_ZONE,
+ "*** Wakeup for Broadcast magic packet ***\n");
+ break;
+ case RSI_EAPOL_PKT:
+ rsi_dbg(ERR_ZONE,
+ "*** Wakeup for GTK renewal ***\n");
+ break;
+ case RSI_DISCONNECT_PKT:
+ rsi_dbg(ERR_ZONE,
+ "*** Wakeup for Disconnect ***\n");
+ break;
+ case RSI_HW_BMISS_PKT:
+ rsi_dbg(ERR_ZONE,
+ "*** Wakeup for HW Beacon miss ***\n");
+ break;
+ default:
+ rsi_dbg(ERR_ZONE,
+ "##### Un-intentional Wakeup #####\n");
+ break;
+ }
+ break;
case RX_DOT11_MGMT:
return rsi_mgmt_pkt_to_core(common, msg, msg_len);
default:
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 5733e440ecaf..3430d7a0899e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -138,7 +138,7 @@ static int rsi_issue_sdiocommand(struct sdio_func *func,
}
/**
- * rsi_handle_interrupt() - This function is called upon the occurence
+ * rsi_handle_interrupt() - This function is called upon the occurrence
* of an interrupt.
* @function: Pointer to the sdio_func structure.
*
@@ -230,16 +230,19 @@ static void rsi_reset_card(struct sdio_func *pfunction)
rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
/* Issue CMD5, arg = 0 */
- err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
- (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
- if (err)
- rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err);
- card->ocr = resp;
+ if (!host->ocr_avail) {
+ err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
+ (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ host->ocr_avail = resp;
+ }
/* Issue CMD5, arg = ocr. Wait till card is ready */
for (i = 0; i < 100; i++) {
err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND,
- card->ocr,
+ host->ocr_avail,
(MMC_RSP_R4 | MMC_CMD_BCR), &resp);
if (err) {
rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
@@ -872,7 +875,7 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter,
goto fail;
}
- rsi_dbg(INIT_ZONE, "%s: Setup card succesfully\n", __func__);
+ rsi_dbg(INIT_ZONE, "%s: Setup card successfully\n", __func__);
status = rsi_init_sdio_slave_regs(adapter);
if (status) {
@@ -1129,6 +1132,12 @@ static void rsi_disconnect(struct sdio_func *pfunction)
rsi_mac80211_detach(adapter);
mdelay(10);
+ if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 &&
+ adapter->priv->bt_adapter) {
+ rsi_bt_ops.detach(adapter->priv->bt_adapter);
+ adapter->priv->bt_adapter = NULL;
+ }
+
/* Reset Chip */
rsi_reset_chip(adapter);
@@ -1305,6 +1314,12 @@ static int rsi_freeze(struct device *dev)
rsi_dbg(ERR_ZONE,
"##### Device can not wake up through WLAN\n");
+ if (IS_ENABLED(CONFIG_RSI_COEX) && common->coex_mode > 1 &&
+ common->bt_adapter) {
+ rsi_bt_ops.detach(common->bt_adapter);
+ common->bt_adapter = NULL;
+ }
+
ret = rsi_sdio_disable_interrupts(pfunction);
if (sdev->write_fail)
@@ -1352,6 +1367,12 @@ static void rsi_shutdown(struct device *dev)
if (rsi_config_wowlan(adapter, wowlan))
rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 &&
+ adapter->priv->bt_adapter) {
+ rsi_bt_ops.detach(adapter->priv->bt_adapter);
+ adapter->priv->bt_adapter = NULL;
+ }
+
rsi_sdio_disable_interrupts(sdev->pfunction);
if (sdev->write_fail)
@@ -1375,7 +1396,7 @@ static int rsi_restore(struct device *dev)
common->iface_down = true;
adapter->sc_nvifs = 0;
- ieee80211_restart_hw(adapter->hw);
+ adapter->ps_state = PS_NONE;
common->wow_flags = 0;
common->iface_down = false;
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index f360690396dd..ac0ef5ea6ffb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -252,7 +252,7 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
/**
* rsi_rx_done_handler() - This function is called when a packet is received
- * from USB stack. This is callback to recieve done.
+ * from USB stack. This is callback to receive done.
* @urb: Received URB.
*
* Return: None.
@@ -816,6 +816,13 @@ static void rsi_disconnect(struct usb_interface *pfunction)
return;
rsi_mac80211_detach(adapter);
+
+ if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 &&
+ adapter->priv->bt_adapter) {
+ rsi_bt_ops.detach(adapter->priv->bt_adapter);
+ adapter->priv->bt_adapter = NULL;
+ }
+
rsi_reset_card(adapter);
rsi_deinit_usb_interface(adapter);
rsi_91x_deinit(adapter);
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index a084f224bb03..35d13f35e9b0 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -164,6 +164,24 @@ struct transmit_q_stats {
u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 2];
};
+#define MAX_BGSCAN_CHANNELS_DUAL_BAND 38
+#define MAX_BGSCAN_PROBE_REQ_LEN 0x64
+#define RSI_DEF_BGSCAN_THRLD 0x0
+#define RSI_DEF_ROAM_THRLD 0xa
+#define RSI_BGSCAN_PERIODICITY 0x1e
+#define RSI_ACTIVE_SCAN_TIME 0x14
+#define RSI_PASSIVE_SCAN_TIME 0x46
+#define RSI_CHANNEL_SCAN_TIME 20
+struct rsi_bgscan_params {
+ u16 bgscan_threshold;
+ u16 roam_threshold;
+ u16 bgscan_periodicity;
+ u8 num_bgscan_channels;
+ u8 two_probe;
+ u16 active_scan_duration;
+ u16 passive_scan_duration;
+};
+
struct vif_priv {
bool is_ht;
bool sgi;
@@ -289,6 +307,11 @@ struct rsi_common {
bool eapol4_confirm;
void *bt_adapter;
+
+ struct cfg80211_scan_request *hwscan;
+ struct rsi_bgscan_params bgscan;
+ u8 bgscan_en;
+ u8 mac_ops_resumed;
};
struct eepromrw_info {
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 359fbdf85739..ea83faa15c7e 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -228,6 +228,9 @@
#define RSI_MAX_TX_AGGR_FRMS 8
#define RSI_MAX_RX_AGGR_FRMS 8
+#define RSI_MAX_SCAN_SSIDS 16
+#define RSI_MAX_SCAN_IE_LEN 256
+
enum opmode {
RSI_OPMODE_UNSUPPORTED = -1,
RSI_OPMODE_AP = 0,
@@ -623,6 +626,34 @@ struct rsi_wowlan_req {
u16 host_sleep_status;
} __packed;
+#define RSI_START_BGSCAN 1
+#define RSI_STOP_BGSCAN 0
+#define HOST_BG_SCAN_TRIG BIT(4)
+struct rsi_bgscan_config {
+ struct rsi_cmd_desc_dword0 desc_dword0;
+ __le64 reserved;
+ __le32 reserved1;
+ __le16 bgscan_threshold;
+ __le16 roam_threshold;
+ __le16 bgscan_periodicity;
+ u8 num_bgscan_channels;
+ u8 two_probe;
+ __le16 active_scan_duration;
+ __le16 passive_scan_duration;
+ __le16 channels2scan[MAX_BGSCAN_CHANNELS_DUAL_BAND];
+} __packed;
+
+struct rsi_bgscan_probe {
+ struct rsi_cmd_desc_dword0 desc_dword0;
+ __le64 reserved;
+ __le32 reserved1;
+ __le16 mgmt_rate;
+ __le16 flags;
+ __le16 def_chan;
+ __le16 channel_scan_time;
+ __le16 probe_req_length;
+} __packed;
+
static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
{
return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
@@ -694,4 +725,8 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
#endif
int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
struct ieee80211_vif *vif);
+void init_bgscan_params(struct rsi_common *common);
+int rsi_send_bgscan_params(struct rsi_common *common, int enable);
+int rsi_send_bgscan_probe_req(struct rsi_common *common,
+ struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/st/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c
index 2231ba08bc1f..d94266d9d0b8 100644
--- a/drivers/net/wireless/st/cw1200/debug.c
+++ b/drivers/net/wireless/st/cw1200/debug.c
@@ -371,28 +371,14 @@ int cw1200_debug_init(struct cw1200_common *priv)
d->debugfs_phy = debugfs_create_dir("cw1200",
priv->hw->wiphy->debugfsdir);
- if (!d->debugfs_phy)
- goto err;
-
- if (!debugfs_create_file("status", 0400, d->debugfs_phy,
- priv, &cw1200_status_fops))
- goto err;
-
- if (!debugfs_create_file("counters", 0400, d->debugfs_phy,
- priv, &cw1200_counters_fops))
- goto err;
-
- if (!debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy,
- priv, &fops_wsm_dumps))
- goto err;
+ debugfs_create_file("status", 0400, d->debugfs_phy, priv,
+ &cw1200_status_fops);
+ debugfs_create_file("counters", 0400, d->debugfs_phy, priv,
+ &cw1200_counters_fops);
+ debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, priv,
+ &fops_wsm_dumps);
return 0;
-
-err:
- priv->debug = NULL;
- debugfs_remove_recursive(d->debugfs_phy);
- kfree(d);
- return ret;
}
void cw1200_debug_release(struct cw1200_common *priv)
diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c
index 30e7646d04af..b7881232499c 100644
--- a/drivers/net/wireless/st/cw1200/fwio.c
+++ b/drivers/net/wireless/st/cw1200/fwio.c
@@ -465,8 +465,8 @@ int cw1200_load_firmware(struct cw1200_common *priv)
if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) {
pr_err("Device is already in QUEUE mode!\n");
- ret = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto out;
}
switch (priv->hw_type) {
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 7c31b63b8258..7895efefa95d 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -283,7 +283,6 @@ int cw1200_queue_put(struct cw1200_queue *queue,
struct cw1200_txpriv *txpriv)
{
int ret = 0;
- LIST_HEAD(gc_list);
struct cw1200_queue_stats *stats = queue->stats;
if (txpriv->link_id >= queue->stats->map_capacity)
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 0a9eac93dd01..71e9b91cf15b 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -84,8 +84,11 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
- if (!frame.skb)
+ if (!frame.skb) {
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
return -ENOMEM;
+ }
if (req->ie_len)
skb_put_data(frame.skb, req->ie, req->ie_len);
diff --git a/drivers/net/wireless/ti/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c
index 448da1f8c22f..c99b23aaa70e 100644
--- a/drivers/net/wireless/ti/wl1251/debugfs.c
+++ b/drivers/net/wireless/ti/wl1251/debugfs.c
@@ -54,11 +54,6 @@ static const struct file_operations name## _ops = { \
#define DEBUGFS_ADD(name, parent) \
wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \
wl, &name## _ops); \
- if (IS_ERR(wl->debugfs.name)) { \
- ret = PTR_ERR(wl->debugfs.name); \
- wl->debugfs.name = NULL; \
- goto out; \
- }
#define DEBUGFS_DEL(name) \
do { \
@@ -354,10 +349,8 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
DEBUGFS_DEL(excessive_retries);
}
-static int wl1251_debugfs_add_files(struct wl1251 *wl)
+static void wl1251_debugfs_add_files(struct wl1251 *wl)
{
- int ret = 0;
-
DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
@@ -453,12 +446,6 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
-
-out:
- if (ret < 0)
- wl1251_debugfs_delete_files(wl);
-
- return ret;
}
void wl1251_debugfs_reset(struct wl1251 *wl)
@@ -471,56 +458,20 @@ void wl1251_debugfs_reset(struct wl1251 *wl)
int wl1251_debugfs_init(struct wl1251 *wl)
{
- int ret;
+ wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), GFP_KERNEL);
+ if (!wl->stats.fw_stats)
+ return -ENOMEM;
wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(wl->debugfs.rootdir)) {
- ret = PTR_ERR(wl->debugfs.rootdir);
- wl->debugfs.rootdir = NULL;
- goto err;
- }
-
wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics",
wl->debugfs.rootdir);
- if (IS_ERR(wl->debugfs.fw_statistics)) {
- ret = PTR_ERR(wl->debugfs.fw_statistics);
- wl->debugfs.fw_statistics = NULL;
- goto err_root;
- }
-
- wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
- GFP_KERNEL);
-
- if (!wl->stats.fw_stats) {
- ret = -ENOMEM;
- goto err_fw;
- }
-
wl->stats.fw_stats_update = jiffies;
- ret = wl1251_debugfs_add_files(wl);
-
- if (ret < 0)
- goto err_file;
+ wl1251_debugfs_add_files(wl);
return 0;
-
-err_file:
- kfree(wl->stats.fw_stats);
- wl->stats.fw_stats = NULL;
-
-err_fw:
- debugfs_remove(wl->debugfs.fw_statistics);
- wl->debugfs.fw_statistics = NULL;
-
-err_root:
- debugfs_remove(wl->debugfs.rootdir);
- wl->debugfs.rootdir = NULL;
-
-err:
- return ret;
}
void wl1251_debugfs_exit(struct wl1251 *wl)
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c
index 0521cbf858cf..6c3c04eca8fd 100644
--- a/drivers/net/wireless/ti/wl12xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.c
@@ -125,20 +125,10 @@ WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
int wl12xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
- int ret = 0;
- struct dentry *entry, *stats, *moddir;
+ struct dentry *stats, *moddir;
moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
- if (!moddir || IS_ERR(moddir)) {
- entry = moddir;
- goto err;
- }
-
stats = debugfs_create_dir("fw_stats", moddir);
- if (!stats || IS_ERR(stats)) {
- entry = stats;
- goto err;
- }
DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
@@ -232,12 +222,4 @@ int wl12xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
return 0;
-
-err:
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- else
- ret = -ENOMEM;
-
- return ret;
}
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 597e934c4630..5f4ec997ca59 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -422,20 +422,10 @@ static const struct file_operations radar_debug_mode_ops = {
int wl18xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
- int ret = 0;
- struct dentry *entry, *stats, *moddir;
+ struct dentry *stats, *moddir;
moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
- if (!moddir || IS_ERR(moddir)) {
- entry = moddir;
- goto err;
- }
-
stats = debugfs_create_dir("fw_stats", moddir);
- if (!stats || IS_ERR(stats)) {
- entry = stats;
- goto err;
- }
DEBUGFS_ADD(clear_fw_stats, stats);
@@ -590,12 +580,4 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(dynamic_fw_traces, moddir);
return 0;
-
-err:
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- else
- ret = -ENOMEM;
-
- return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 903968735a74..348be0aed97e 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1427,7 +1427,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("could not set keys");
- goto out;
+ goto out;
}
out:
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index aeb74e74698e..68acd901d384 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -1301,11 +1301,10 @@ static const struct file_operations fw_logger_ops = {
.llseek = default_llseek,
};
-static int wl1271_debugfs_add_files(struct wl1271 *wl,
- struct dentry *rootdir)
+static void wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
{
- int ret = 0;
- struct dentry *entry, *streaming;
+ struct dentry *streaming;
DEBUGFS_ADD(tx_queue_len, rootdir);
DEBUGFS_ADD(retry_count, rootdir);
@@ -1330,23 +1329,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(fw_logger, rootdir);
streaming = debugfs_create_dir("rx_streaming", rootdir);
- if (!streaming || IS_ERR(streaming))
- goto err;
DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
DEBUGFS_ADD_PREFIX(dev, mem, rootdir);
-
- return 0;
-
-err:
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- else
- ret = -ENOMEM;
-
- return ret;
}
void wl1271_debugfs_reset(struct wl1271 *wl)
@@ -1367,11 +1354,6 @@ int wl1271_debugfs_init(struct wl1271 *wl)
rootdir = debugfs_create_dir(KBUILD_MODNAME,
wl->hw->wiphy->debugfsdir);
- if (IS_ERR(rootdir)) {
- ret = PTR_ERR(rootdir);
- goto out;
- }
-
wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL);
if (!wl->stats.fw_stats) {
ret = -ENOMEM;
@@ -1380,9 +1362,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
- ret = wl1271_debugfs_add_files(wl, rootdir);
- if (ret < 0)
- goto out_exit;
+ wl1271_debugfs_add_files(wl, rootdir);
ret = wlcore_debugfs_init(wl, rootdir);
if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index bf14676e6515..a4952c4f587e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -53,19 +53,15 @@ static const struct file_operations name## _ops = { \
#define DEBUGFS_ADD(name, parent) \
do { \
- entry = debugfs_create_file(#name, 0400, parent, \
- wl, &name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
+ debugfs_create_file(#name, 0400, parent, \
+ wl, &name## _ops); \
} while (0)
#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
do { \
- entry = debugfs_create_file(#name, 0400, parent, \
+ debugfs_create_file(#name, 0400, parent, \
wl, &prefix## _## name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
} while (0)
#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 26b187336875..2e12de813a5b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1085,8 +1085,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
goto out;
ret = wl12xx_fetch_firmware(wl, plt);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ kfree(wl->fw_status);
+ kfree(wl->raw_fw_status);
+ kfree(wl->tx_res_if);
+ }
out:
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index bd10165d7eec..4d4b07701149 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
}
sdio_claim_host(func);
+ /*
+ * To guarantee that the SDIO card is power cycled, as required to make
+ * the FW programming to succeed, let's do a brute force HW reset.
+ */
+ mmc_hw_reset(card->host);
+
sdio_enable_func(func);
sdio_release_host(func);
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
- int error;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
/* Let runtime PM know the card is powered off */
- error = pm_runtime_put(&card->dev);
- if (error < 0 && error != -EBUSY) {
- dev_err(&card->dev, "%s failed: %i\n", __func__, error);
-
- return error;
- }
-
+ pm_runtime_put(&card->dev);
return 0;
}
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 64b218699656..606999f102eb 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -14,11 +14,6 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
-#include <net/cfg80211.h>
-#include <net/rtnetlink.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-
static struct wiphy *common_wiphy;
struct virt_wifi_wiphy_priv {
@@ -365,7 +360,6 @@ static struct wiphy *virt_wifi_make_wiphy(void)
wiphy->bands[NL80211_BAND_5GHZ] = &band_5ghz;
wiphy->bands[NL80211_BAND_60GHZ] = NULL;
- wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
priv = wiphy_priv(wiphy);
@@ -429,13 +423,11 @@ static int virt_wifi_net_device_open(struct net_device *dev)
static int virt_wifi_net_device_stop(struct net_device *dev)
{
struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev);
- struct virt_wifi_wiphy_priv *w_priv;
n_priv->is_up = false;
if (!dev->ieee80211_ptr)
return 0;
- w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy);
virt_wifi_cancel_connect(dev);
@@ -530,8 +522,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
- if (!dev->ieee80211_ptr)
+ if (!dev->ieee80211_ptr) {
+ err = -ENOMEM;
goto remove_handler;
+ }
dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
dev->ieee80211_ptr->wiphy = common_wiphy;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 0ccb021f1e78..10d580c3dea3 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif)
if (xenvif_hash_cache_size == 0)
return;
+ BUG_ON(vif->hash.cache.count);
+
spin_lock_init(&vif->hash.cache.lock);
INIT_LIST_HEAD(&vif->hash.cache.list);
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 182d6770f102..6da12518e693 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
+ unsigned int num_queues;
+
+ /* If queues are not set up internally - always return 0
+ * as the packet going to be dropped anyway */
+ num_queues = READ_ONCE(vif->num_queues);
+ if (num_queues < 1)
+ return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 80aae3a32c2a..1d9940d4e8c7 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
skb_frag_size_set(&frags[i], len);
}
- /* Copied all the bits from the frag list -- free it. */
- skb_frag_list_init(skb);
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
-
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+ xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
kfree_skb(skb);
continue;
}
+ /* Copied all the bits from the frag list -- free it. */
+ skb_frag_list_init(skb);
+ kfree_skb(nskb);
}
skb->dev = queue->vif->dev;
@@ -1169,15 +1169,24 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
continue;
}
- skb_probe_transport_header(skb, 0);
+ skb_probe_transport_header(skb);
/* If the packet is GSO then we will have just set up the
* transport header offset in checksum_setup so it's now
* straightforward to calculate gso_segs.
*/
if (skb_is_gso(skb)) {
- int mss = skb_shinfo(skb)->gso_size;
- int hdrlen = skb_transport_header(skb) -
+ int mss, hdrlen;
+
+ /* GSO implies having the L4 header. */
+ WARN_ON_ONCE(!skb_transport_header_was_set(skb));
+ if (unlikely(!skb_transport_header_was_set(skb))) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ mss = skb_shinfo(skb)->gso_size;
+ hdrlen = skb_transport_header(skb) -
skb_mac_header(skb) +
tcp_hdrlen(skb);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 2625740bdc4a..330ddb64930f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -655,7 +655,7 @@ static void frontend_changed(struct xenbus_device *dev,
set_backend_state(be, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through if not online */
+ /* fall through - if not online */
case XenbusStateUnknown:
set_backend_state(be, XenbusStateClosed);
device_unregister(&dev->dev);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index c49ff8970ce3..e071e28bca3f 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -53,6 +53,7 @@
#include <linux/ntb.h>
#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
/* PCI device IDs */
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
@@ -218,33 +219,4 @@ static inline int pdev_is_gen3(struct pci_dev *pdev)
return 0;
}
-#ifndef ioread64
-#ifdef readq
-#define ioread64 readq
-#else
-#define ioread64 _ioread64
-static inline u64 _ioread64(void __iomem *mmio)
-{
- u64 low, high;
-
- low = ioread32(mmio);
- high = ioread32(mmio + sizeof(u32));
- return low | (high << 32);
-}
-#endif
-#endif
-
-#ifndef iowrite64
-#ifdef writeq
-#define iowrite64 writeq
-#else
-#define iowrite64 _iowrite64
-static inline void _iowrite64(u64 val, void __iomem *mmio)
-{
- iowrite32(val, mmio);
- iowrite32(val >> 32, mmio + sizeof(u32));
-}
-#endif
-#endif
-
#endif
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 5ee5f40b4dfc..f2df2d39c65b 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -13,13 +13,14 @@
*
*/
-#include <linux/switchtec.h>
-#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/delay.h>
#include <linux/kthread.h>
-#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/ntb.h>
#include <linux/pci.h>
+#include <linux/switchtec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
@@ -36,35 +37,6 @@ module_param(use_lut_mws, bool, 0644);
MODULE_PARM_DESC(use_lut_mws,
"Enable the use of the LUT based memory windows");
-#ifndef ioread64
-#ifdef readq
-#define ioread64 readq
-#else
-#define ioread64 _ioread64
-static inline u64 _ioread64(void __iomem *mmio)
-{
- u64 low, high;
-
- low = ioread32(mmio);
- high = ioread32(mmio + sizeof(u32));
- return low | (high << 32);
-}
-#endif
-#endif
-
-#ifndef iowrite64
-#ifdef writeq
-#define iowrite64 writeq
-#else
-#define iowrite64 _iowrite64
-static inline void _iowrite64(u64 val, void __iomem *mmio)
-{
- iowrite32(val, mmio);
- iowrite32(val >> 32, mmio + sizeof(u32));
-}
-#endif
-#endif
-
#define SWITCHTEC_NTB_MAGIC 0x45CC0001
#define MAX_MWS 128
@@ -1339,10 +1311,10 @@ static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
int rc;
sndev->nr_rsvd_luts++;
- sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
- LUT_SIZE,
- &sndev->self_shared_dma,
- GFP_KERNEL);
+ sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
+ LUT_SIZE,
+ &sndev->self_shared_dma,
+ GFP_KERNEL);
if (!sndev->self_shared) {
dev_err(&sndev->stdev->dev,
"unable to allocate memory for shared mw\n");
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index dca5f7a805cb..7bbff0af29b2 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -23,6 +23,7 @@
#include <linux/ndctl.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -534,11 +535,15 @@ void __nd_device_register(struct device *dev)
set_dev_node(dev, to_nd_region(dev)->numa_node);
dev->bus = &nvdimm_bus_type;
- if (dev->parent)
+ if (dev->parent) {
get_device(dev->parent);
+ if (dev_to_node(dev) == NUMA_NO_NODE)
+ set_dev_node(dev, dev_to_node(dev->parent));
+ }
get_device(dev);
- async_schedule_domain(nd_async_device_register, dev,
- &nd_async_domain);
+
+ async_schedule_dev_domain(nd_async_device_register, dev,
+ &nd_async_domain);
}
void nd_device_register(struct device *dev)
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 0cf58cabc9ed..3cf50274fadb 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -26,6 +26,12 @@ static int nvdimm_probe(struct device *dev)
struct nvdimm_drvdata *ndd;
int rc;
+ rc = nvdimm_security_setup_events(dev);
+ if (rc < 0) {
+ dev_err(dev, "security event setup failed: %d\n", rc);
+ return rc;
+ }
+
rc = nvdimm_check_config_data(dev);
if (rc) {
/* not required for non-aliased nvdimm, ex. NVDIMM-N */
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 4890310df874..efe412a6b5b9 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -578,13 +578,25 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(__nvdimm_create);
-int nvdimm_security_setup_events(struct nvdimm *nvdimm)
+static void shutdown_security_notify(void *data)
{
- nvdimm->sec.overwrite_state = sysfs_get_dirent(nvdimm->dev.kobj.sd,
- "security");
+ struct nvdimm *nvdimm = data;
+
+ sysfs_put(nvdimm->sec.overwrite_state);
+}
+
+int nvdimm_security_setup_events(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ if (nvdimm->sec.state < 0 || !nvdimm->sec.ops
+ || !nvdimm->sec.ops->overwrite)
+ return 0;
+ nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
if (!nvdimm->sec.overwrite_state)
- return -ENODEV;
- return 0;
+ return -ENOMEM;
+
+ return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
}
EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 2b2cf4e554d3..e5ffd5733540 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -54,12 +54,12 @@ struct nvdimm {
};
static inline enum nvdimm_security_state nvdimm_security_state(
- struct nvdimm *nvdimm, bool master)
+ struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
{
if (!nvdimm->sec.ops)
return -ENXIO;
- return nvdimm->sec.ops->state(nvdimm, master);
+ return nvdimm->sec.ops->state(nvdimm, ptype);
}
int nvdimm_security_freeze(struct nvdimm *nvdimm);
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index cfde992684e7..379bf4305e61 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -250,6 +250,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
void nvdimm_set_aliasing(struct device *dev);
void nvdimm_set_locked(struct device *dev);
void nvdimm_clear_locked(struct device *dev);
+int nvdimm_security_setup_events(struct device *dev);
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
int nvdimm_security_unlock(struct device *dev);
#else
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 08f2c92602f4..6a9dd68c0f4f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
* effects say only one namespace is affected.
*/
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ mutex_lock(&ctrl->scan_lock);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
}
@@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
*/
if (effects & NVME_CMD_EFFECTS_LBCC)
nvme_update_formats(ctrl);
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
nvme_unfreeze(ctrl);
+ mutex_unlock(&ctrl->scan_lock);
+ }
if (effects & NVME_CMD_EFFECTS_CCC)
nvme_init_identify(ctrl);
if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -2173,18 +2176,20 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
size_t nqnlen;
int off;
- nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
- if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
- return;
- }
+ if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
+ nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
+ if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
+ strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ return;
+ }
- if (ctrl->vs >= NVME_VS(1, 2, 1))
- dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
+ if (ctrl->vs >= NVME_VS(1, 2, 1))
+ dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
+ }
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
- "nqn.2014.08.org.nvmexpress:%4x%4x",
+ "nqn.2014.08.org.nvmexpress:%04x%04x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
off += sizeof(id->sn);
@@ -2500,7 +2505,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
- ctrl->cntlid = le16_to_cpup(&id->cntlid);
if (id->mdts)
max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else
@@ -3400,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_identify_ctrl(ctrl, &id))
return;
+ mutex_lock(&ctrl->scan_lock);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3408,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work)
}
nvme_scan_ns_sequential(ctrl, nn);
out_free_id:
+ mutex_unlock(&ctrl->scan_lock);
kfree(id);
down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3651,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->state = NVME_CTRL_NEW;
spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index b2ab213f43de..3eb908c50e1a 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -874,6 +874,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (opts->discovery_nqn) {
opts->kato = 0;
opts->nr_io_queues = 0;
+ opts->nr_write_queues = 0;
+ opts->nr_poll_queues = 0;
opts->duplicate_connect = true;
}
if (ctrl_loss_tmo < 0)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 183ec17ba067..b9fff3b8ed1b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
- if (!(ctrl->anacap & (1 << 6)))
- ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+ ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
dev_err(ctrl->device,
@@ -570,6 +569,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return 0;
out_free_ana_log_buf:
kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = NULL;
out:
return error;
}
@@ -577,5 +577,6 @@ out:
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = NULL;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2b36ac922596..c4a1bb41abf0 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -90,6 +90,11 @@ enum nvme_quirks {
* Set MEDIUM priority on SQ creation
*/
NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
+
+ /*
+ * Ignore device provided subnqn.
+ */
+ NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
};
/*
@@ -149,6 +154,7 @@ struct nvme_ctrl {
enum nvme_ctrl_state state;
bool identified;
spinlock_t lock;
+ struct mutex scan_lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct request_queue *connect_q;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a0bf6a24d50..e905861186e3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -95,6 +95,7 @@ struct nvme_dev;
struct nvme_queue;
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -1019,9 +1020,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- if (++nvmeq->cq_head == nvmeq->q_depth) {
+ if (nvmeq->cq_head == nvmeq->q_depth - 1) {
nvmeq->cq_head = 0;
nvmeq->cq_phase = !nvmeq->cq_phase;
+ } else {
+ nvmeq->cq_head++;
}
}
@@ -1420,6 +1423,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
return 0;
}
+static void nvme_suspend_io_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(&dev->queues[i]);
+}
+
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
struct nvme_queue *nvmeq = &dev->queues[0];
@@ -1485,8 +1496,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
if (dev->ctrl.queue_count > qid)
return 0;
- nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
- &nvmeq->cq_dma_addr, GFP_KERNEL);
+ nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
if (!nvmeq->cqes)
goto free_nvmeq;
@@ -1885,8 +1896,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
- dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
- le64_to_cpu(desc->addr));
+ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
+ le64_to_cpu(desc->addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
}
kfree(dev->host_mem_desc_bufs);
@@ -1915,8 +1927,8 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
max_entries = dev->ctrl.hmmaxd;
- descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
- &descs_dma, GFP_KERNEL);
+ descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
if (!descs)
goto out;
@@ -1952,8 +1964,9 @@ out_free_bufs:
while (--i >= 0) {
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
- dma_free_coherent(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr));
+ dma_free_attrs(dev->dev, size, bufs[i],
+ le64_to_cpu(descs[i].addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
}
kfree(bufs);
@@ -2028,49 +2041,52 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
return ret;
}
-static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
+/*
+ * nirqs is the number of interrupts available for write and read
+ * queues. The core already reserved an interrupt for the admin queue.
+ */
+static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
{
- unsigned int this_w_queues = write_queues;
-
- /*
- * Setup read/write queue split
- */
- if (irq_queues == 1) {
- dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
- dev->io_queues[HCTX_TYPE_READ] = 0;
- return;
- }
-
- /*
- * If 'write_queues' is set, ensure it leaves room for at least
- * one read queue
- */
- if (this_w_queues >= irq_queues)
- this_w_queues = irq_queues - 1;
+ struct nvme_dev *dev = affd->priv;
+ unsigned int nr_read_queues;
/*
- * If 'write_queues' is set to zero, reads and writes will share
- * a queue set.
+ * If there is no interupt available for queues, ensure that
+ * the default queue is set to 1. The affinity set size is
+ * also set to one, but the irq core ignores it for this case.
+ *
+ * If only one interrupt is available or 'write_queue' == 0, combine
+ * write and read queues.
+ *
+ * If 'write_queues' > 0, ensure it leaves room for at least one read
+ * queue.
*/
- if (!this_w_queues) {
- dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues;
- dev->io_queues[HCTX_TYPE_READ] = 0;
+ if (!nrirqs) {
+ nrirqs = 1;
+ nr_read_queues = 0;
+ } else if (nrirqs == 1 || !write_queues) {
+ nr_read_queues = 0;
+ } else if (write_queues >= nrirqs) {
+ nr_read_queues = 1;
} else {
- dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
- dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues;
+ nr_read_queues = nrirqs - write_queues;
}
+
+ dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
+ affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
+ dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
+ affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
+ affd->nr_sets = nr_read_queues ? 2 : 1;
}
static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int irq_sets[2];
struct irq_affinity affd = {
- .pre_vectors = 1,
- .nr_sets = ARRAY_SIZE(irq_sets),
- .sets = irq_sets,
+ .pre_vectors = 1,
+ .calc_sets = nvme_calc_irq_sets,
+ .priv = dev,
};
- int result = 0;
unsigned int irq_queues, this_p_queues;
/*
@@ -2082,54 +2098,22 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
} else {
- irq_queues = nr_io_queues - this_p_queues;
+ irq_queues = nr_io_queues - this_p_queues + 1;
}
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
- /*
- * For irq sets, we have to ask for minvec == maxvec. This passes
- * any reduction back to us, so we can adjust our queue counts and
- * IRQ vector needs.
- */
- do {
- nvme_calc_io_queues(dev, irq_queues);
- irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT];
- irq_sets[1] = dev->io_queues[HCTX_TYPE_READ];
- if (!irq_sets[1])
- affd.nr_sets = 1;
-
- /*
- * If we got a failure and we're down to asking for just
- * 1 + 1 queues, just ask for a single vector. We'll share
- * that between the single IO queue and the admin queue.
- */
- if (result >= 0 && irq_queues > 1)
- irq_queues = irq_sets[0] + irq_sets[1] + 1;
-
- result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
- irq_queues,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ /* Initialize for the single interrupt case */
+ dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
+ dev->io_queues[HCTX_TYPE_READ] = 0;
- /*
- * Need to reduce our vec counts. If we get ENOSPC, the
- * platform should support mulitple vecs, we just need
- * to decrease our ask. If we get EINVAL, the platform
- * likely does not. Back down to ask for just one vector.
- */
- if (result == -ENOSPC) {
- irq_queues--;
- if (!irq_queues)
- return result;
- continue;
- } else if (result == -EINVAL) {
- irq_queues = 1;
- continue;
- } else if (result <= 0)
- return -EIO;
- break;
- } while (1);
+ return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+}
- return result;
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
+ __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
@@ -2168,6 +2152,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
} while (1);
adminq->q_db = dev->dbs;
+ retry:
/* Deregister the admin queue's interrupt */
pci_free_irq(pdev, 0, adminq);
@@ -2185,25 +2170,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
result = max(result - 1, 1);
dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
- dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
- dev->io_queues[HCTX_TYPE_DEFAULT],
- dev->io_queues[HCTX_TYPE_READ],
- dev->io_queues[HCTX_TYPE_POLL]);
-
/*
* Should investigate if there's a performance win from allocating
* more queues than interrupt vectors; it might allow the submission
* path to scale better, even if the receive path is limited by the
* number of interrupts.
*/
-
result = queue_request_irq(adminq);
if (result) {
adminq->cq_vector = -1;
return result;
}
set_bit(NVMEQ_ENABLED, &adminq->flags);
- return nvme_create_io_queues(dev);
+
+ result = nvme_create_io_queues(dev);
+ if (result || dev->online_queues < 2)
+ return result;
+
+ if (dev->online_queues - 1 < dev->max_qid) {
+ nr_io_queues = dev->online_queues - 1;
+ nvme_disable_io_queues(dev);
+ nvme_suspend_io_queues(dev);
+ goto retry;
+ }
+ dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
+ dev->io_queues[HCTX_TYPE_DEFAULT],
+ dev->io_queues[HCTX_TYPE_READ],
+ dev->io_queues[HCTX_TYPE_POLL]);
+ return 0;
}
static void nvme_del_queue_end(struct request *req, blk_status_t error)
@@ -2248,7 +2242,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
+static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
{
int nr_queues = dev->online_queues - 1, sent = 0;
unsigned long timeout;
@@ -2294,7 +2288,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.nr_maps = 2; /* default + read */
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
- dev->tagset.nr_maps = HCTX_MAX_TYPES;
dev->tagset.timeout = NVME_IO_TIMEOUT;
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
@@ -2410,7 +2403,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i;
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -2437,13 +2429,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl);
if (!dead && dev->ctrl.queue_count > 0) {
- if (nvme_disable_io_queues(dev, nvme_admin_delete_sq))
- nvme_disable_io_queues(dev, nvme_admin_delete_cq);
+ nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
- for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
- nvme_suspend_queue(&dev->queues[i]);
-
+ nvme_suspend_io_queues(dev);
+ nvme_suspend_queue(&dev->queues[0]);
nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2527,27 +2517,18 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
- /*
- * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
- * initializing procedure here.
- */
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
- dev_warn(dev->ctrl.device,
- "failed to mark controller CONNECTING\n");
- goto out;
- }
-
+ mutex_lock(&dev->shutdown_lock);
result = nvme_pci_enable(dev);
if (result)
- goto out;
+ goto out_unlock;
result = nvme_pci_configure_admin_queue(dev);
if (result)
- goto out;
+ goto out_unlock;
result = nvme_alloc_admin_tags(dev);
if (result)
- goto out;
+ goto out_unlock;
/*
* Limit the max command size to prevent iod->sg allocations going
@@ -2555,6 +2536,17 @@ static void nvme_reset_work(struct work_struct *work)
*/
dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
dev->ctrl.max_segments = NVME_MAX_SEGS;
+ mutex_unlock(&dev->shutdown_lock);
+
+ /*
+ * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
+ * initializing procedure here.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller CONNECTING\n");
+ goto out;
+ }
result = nvme_init_identify(&dev->ctrl);
if (result)
@@ -2619,6 +2611,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_start_ctrl(&dev->ctrl);
return;
+ out_unlock:
+ mutex_unlock(&dev->shutdown_lock);
out:
nvme_remove_dead_ctrl(dev, result);
}
@@ -2946,6 +2940,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_MEDIUM_PRIO_SQ },
+ { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
@@ -2988,6 +2984,7 @@ static struct pci_driver nvme_driver = {
static int __init nvme_init(void)
{
+ BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0a2fd2949ad7..52abc3a6de12 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -119,6 +119,7 @@ struct nvme_rdma_ctrl {
struct nvme_ctrl ctrl;
bool use_inline_data;
+ u32 io_queues[HCTX_MAX_TYPES];
};
static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
{
return nvme_rdma_queue_idx(queue) >
- queue->ctrl->ctrl.opts->nr_io_queues +
- queue->ctrl->ctrl.opts->nr_write_queues;
+ queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ queue->ctrl->io_queues[HCTX_TYPE_READ];
}
static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
@@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
nr_io_queues = min_t(unsigned int, nr_io_queues,
ibdev->num_comp_vectors);
- nr_io_queues += min(opts->nr_write_queues, num_online_cpus());
- nr_io_queues += min(opts->nr_poll_queues, num_online_cpus());
+ if (opts->nr_write_queues) {
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_write_queues, nr_io_queues);
+ nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
+ }
+
+ ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
+
+ if (opts->nr_poll_queues) {
+ ctrl->io_queues[HCTX_TYPE_POLL] =
+ min(opts->nr_poll_queues, num_online_cpus());
+ nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
+ }
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret)
@@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- dev_warn(req->queue->ctrl->ctrl.device,
- "I/O %d QID %d timeout, reset controller\n",
- rq->tag, nvme_rdma_queue_idx(req->queue));
+ dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
+ rq->tag, nvme_rdma_queue_idx(queue));
- /* queue error recovery */
- nvme_rdma_error_recovery(req->queue->ctrl);
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ /*
+ * Teardown immediately if controller times out while starting
+ * or we are already started error recovery. all outstanding
+ * requests are completed on shutdown, so we return BLK_EH_DONE.
+ */
+ flush_work(&ctrl->err_work);
+ nvme_rdma_teardown_io_queues(ctrl, false);
+ nvme_rdma_teardown_admin_queue(ctrl, false);
+ return BLK_EH_DONE;
+ }
- /* fail with DNR on cmd timeout */
- nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+ nvme_rdma_error_recovery(ctrl);
- return BLK_EH_DONE;
+ return BLK_EH_RESET_TIMER;
}
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
struct nvme_rdma_ctrl *ctrl = set->driver_data;
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
if (ctrl->ctrl.opts->nr_write_queues) {
/* separate read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->ctrl.opts->nr_write_queues;
set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->ctrl.opts->nr_write_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
/* mixed read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->ctrl.opts->nr_io_queues;
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
if (ctrl->ctrl.opts->nr_poll_queues) {
set->map[HCTX_TYPE_POLL].nr_queues =
- ctrl->ctrl.opts->nr_poll_queues;
+ ctrl->io_queues[HCTX_TYPE_POLL];
set->map[HCTX_TYPE_POLL].queue_offset =
- ctrl->ctrl.opts->nr_io_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
if (ctrl->ctrl.opts->nr_write_queues)
set->map[HCTX_TYPE_POLL].queue_offset +=
- ctrl->ctrl.opts->nr_write_queues;
+ ctrl->io_queues[HCTX_TYPE_READ];
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
}
return 0;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index de174912445e..5f0a00425242 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1565,8 +1565,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_io_queues(ctrl);
if (remove) {
- if (ctrl->ops->flags & NVME_F_FABRICS)
- blk_cleanup_queue(ctrl->connect_q);
+ blk_cleanup_queue(ctrl->connect_q);
blk_mq_free_tag_set(ctrl->tagset);
}
nvme_tcp_free_io_queues(ctrl);
@@ -1587,12 +1586,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
- if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ctrl->connect_q)) {
- ret = PTR_ERR(ctrl->connect_q);
- goto out_free_tag_set;
- }
+ ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
+ if (IS_ERR(ctrl->connect_q)) {
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
}
} else {
blk_mq_update_nr_hw_queues(ctrl->tagset,
@@ -1606,7 +1603,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return 0;
out_cleanup_connect_q:
- if (new && (ctrl->ops->flags & NVME_F_FABRICS))
+ if (new)
blk_cleanup_queue(ctrl->connect_q);
out_free_tag_set:
if (new)
@@ -1620,7 +1617,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_queue(ctrl, 0);
if (remove) {
- free_opal_dev(ctrl->opal_dev);
blk_cleanup_queue(ctrl->admin_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
@@ -1952,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
- dev_dbg(ctrl->ctrl.device,
+ dev_warn(ctrl->ctrl.device,
"queue %d: timeout request %#x type %d\n",
- nvme_tcp_queue_id(req->queue), rq->tag,
- pdu->hdr.type);
+ nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
- union nvme_result res = {};
-
- nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
- nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res);
+ /*
+ * Teardown immediately if controller times out while starting
+ * or we are already started error recovery. all outstanding
+ * requests are completed on shutdown, so we return BLK_EH_DONE.
+ */
+ flush_work(&ctrl->err_work);
+ nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
+ nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
return BLK_EH_DONE;
}
- /* queue error recovery */
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
nvme_tcp_error_recovery(&ctrl->ctrl);
return BLK_EH_RESET_TIMER;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index a8d23eb80192..a884e3a0e8af 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r);
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r);
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
spin_unlock_irqrestore(&queue->rsps_lock, flags);
if (unlikely(!rsp)) {
- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ int ret;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
if (unlikely(!rsp))
return NULL;
+ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
+ if (unlikely(ret)) {
+ kfree(rsp);
+ return NULL;
+ }
+
rsp->allocated = true;
}
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
unsigned long flags;
if (unlikely(rsp->allocated)) {
+ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
kfree(rsp);
return;
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 44b37b202e39..ad0df786fe93 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1089,7 +1089,7 @@ out:
static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
{
- int result;
+ int result = 0;
if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
return 0;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 0a7a470ee859..530d570724c9 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -26,7 +26,7 @@ config NVMEM_IMX_IIM
config NVMEM_IMX_OCOTP
tristate "i.MX6 On-Chip OTP Controller support"
- depends on SOC_IMX6 || COMPILE_TEST
+ depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST
depends on HAS_IOMEM
help
This is a driver for the On-Chip OTP Controller (OCOTP) available on
@@ -192,4 +192,14 @@ config SC27XX_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem-sc27xx-efuse.
+config NVMEM_ZYNQMP
+ bool "Xilinx ZYNQMP SoC nvmem firmware support"
+ depends on ARCH_ZYNQMP
+ help
+ This is a driver to access hardware related data like
+ soc revision, IDCODE... etc by using the firmware
+ interface.
+
+ If sure, say yes. If unsure, say no.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 4e8c61628f1a..2ece8ffffdda 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -41,3 +41,5 @@ obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
+obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
+nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 4159b3f41d79..a8097511582a 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -11,13 +11,14 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
/*
@@ -78,9 +79,9 @@ static struct otpc_map otp_map_v2 = {
};
struct otpc_priv {
- struct device *dev;
- void __iomem *base;
- struct otpc_map *map;
+ struct device *dev;
+ void __iomem *base;
+ const struct otpc_map *map;
struct nvmem_config *config;
};
@@ -237,16 +238,22 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
};
static const struct of_device_id bcm_otpc_dt_ids[] = {
- { .compatible = "brcm,ocotp" },
- { .compatible = "brcm,ocotp-v2" },
+ { .compatible = "brcm,ocotp", .data = &otp_map },
+ { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
+static const struct acpi_device_id bcm_otpc_acpi_ids[] = {
+ { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
+ { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids);
+
static int bcm_otpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *dn = dev->of_node;
struct resource *res;
struct otpc_priv *priv;
struct nvmem_device *nvmem;
@@ -257,14 +264,9 @@ static int bcm_otpc_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- if (of_device_is_compatible(dev->of_node, "brcm,ocotp"))
- priv->map = &otp_map;
- else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2"))
- priv->map = &otp_map_v2;
- else {
- dev_err(dev, "%s otpc config map not defined\n", __func__);
- return -EINVAL;
- }
+ priv->map = device_get_match_data(dev);
+ if (!priv->map)
+ return -ENODEV;
/* Get OTP base address register. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -281,7 +283,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
reset_start_bit(priv->base);
/* Read size of memory in words. */
- err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words);
+ err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words);
if (err) {
dev_err(dev, "size parameter not specified\n");
return -EINVAL;
@@ -294,7 +296,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
bcm_otpc_nvmem_config.dev = dev;
bcm_otpc_nvmem_config.priv = priv;
- if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) {
+ if (priv->map == &otp_map_v2) {
bcm_otpc_nvmem_config.word_size = 8;
bcm_otpc_nvmem_config.stride = 8;
}
@@ -315,6 +317,7 @@ static struct platform_driver bcm_otpc_driver = {
.driver = {
.name = "brcm-otpc",
.of_match_table = bcm_otpc_dt_ids,
+ .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids),
},
};
module_platform_driver(bcm_otpc_driver);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f7301bb4ef3b..f24008b66826 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -525,12 +525,14 @@ out:
static struct nvmem_cell *
nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
- list_for_each_entry(cell, &nvmem->cells, node) {
- if (strcmp(cell_id, cell->name) == 0)
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (strcmp(cell_id, iter->name) == 0) {
+ cell = iter;
break;
+ }
}
mutex_unlock(&nvmem_mutex);
@@ -646,8 +648,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
config->name ? config->id : nvmem->id);
}
- nvmem->read_only = device_property_present(config->dev, "read-only") |
- config->read_only;
+ nvmem->read_only = device_property_present(config->dev, "read-only") ||
+ config->read_only || !nvmem->reg_write;
if (config->root_only)
nvmem->dev.groups = nvmem->read_only ?
@@ -686,9 +688,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
- rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
- if (rval)
- goto err_remove_cells;
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
@@ -809,6 +809,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
"could not increase module refcount for cell %s\n",
nvmem_dev_name(nvmem));
+ put_device(&nvmem->dev);
return ERR_PTR(-EINVAL);
}
@@ -819,6 +820,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
static void __nvmem_device_put(struct nvmem_device *nvmem)
{
+ put_device(&nvmem->dev);
module_put(nvmem->owner);
kref_put(&nvmem->refcnt, nvmem_device_release);
}
@@ -837,13 +839,14 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
{
struct device_node *nvmem_np;
- int index;
+ int index = 0;
- index = of_property_match_string(np, "nvmem-names", id);
+ if (id)
+ index = of_property_match_string(np, "nvmem-names", id);
nvmem_np = of_parse_phandle(np, "nvmem", index);
if (!nvmem_np)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOENT);
return __nvmem_device_get(nvmem_np, NULL);
}
@@ -871,7 +874,7 @@ struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
}
- return nvmem_find(dev_name);
+ return __nvmem_device_get(NULL, dev_name);
}
EXPORT_SYMBOL_GPL(nvmem_device_get);
@@ -972,7 +975,7 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if (IS_ERR(nvmem)) {
/* Provider may not be registered yet. */
cell = ERR_CAST(nvmem);
- goto out;
+ break;
}
cell = nvmem_find_cell_by_name(nvmem,
@@ -980,12 +983,11 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if (!cell) {
__nvmem_device_put(nvmem);
cell = ERR_PTR(-ENOENT);
- goto out;
}
+ break;
}
}
-out:
mutex_unlock(&nvmem_lookup_mutex);
return cell;
}
@@ -994,12 +996,14 @@ out:
static struct nvmem_cell *
nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
- list_for_each_entry(cell, &nvmem->cells, node) {
- if (np == cell->np)
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (np == iter->np) {
+ cell = iter;
break;
+ }
}
mutex_unlock(&nvmem_mutex);
@@ -1031,7 +1035,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
cell_np = of_parse_phandle(np, "nvmem-cells", index);
if (!cell_np)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOENT);
nvmem_np = of_get_next_parent(cell_np);
if (!nvmem_np)
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index afb429a417fe..08a9b1ef8ae4 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -427,19 +427,32 @@ static const struct ocotp_params imx6ul_params = {
.set_timing = imx_ocotp_set_imx6_timing,
};
+static const struct ocotp_params imx6ull_params = {
+ .nregs = 64,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
static const struct ocotp_params imx7d_params = {
.nregs = 64,
.bank_address_words = 4,
.set_timing = imx_ocotp_set_imx7_timing,
};
+static const struct ocotp_params imx7ulp_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
{ .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
{ .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
+ { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params },
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
+ { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index 33185d8d82cf..c6ee21018d80 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -106,10 +106,12 @@ static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
{
struct sc27xx_efuse *efuse = context;
- u32 buf;
+ u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH;
+ u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
int ret;
- if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH)
+ if (blk_index > SC27XX_EFUSE_BLOCK_MAX ||
+ bytes > SC27XX_EFUSE_BLOCK_WIDTH)
return -EINVAL;
ret = sc27xx_efuse_lock(efuse);
@@ -133,7 +135,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
/* Set the block address to be read. */
ret = regmap_write(efuse->regmap,
efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
- offset & SC27XX_EFUSE_BLOCK_MASK);
+ blk_index & SC27XX_EFUSE_BLOCK_MASK);
if (ret)
goto disable_efuse;
@@ -171,8 +173,10 @@ disable_efuse:
unlock_efuse:
sc27xx_efuse_unlock(efuse);
- if (!ret)
+ if (!ret) {
+ buf >>= blk_offset;
memcpy(val, &buf, bytes);
+ }
return ret;
}
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
new file mode 100644
index 000000000000..490c8fcaec80
--- /dev/null
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define SILICON_REVISION_MASK 0xF
+
+struct zynqmp_nvmem_data {
+ struct device *dev;
+ struct nvmem_device *nvmem;
+};
+
+static int zynqmp_nvmem_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ int ret;
+ int idcode, version;
+ struct zynqmp_nvmem_data *priv = context;
+
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->get_chipid)
+ return -ENXIO;
+
+ ret = eemi_ops->get_chipid(&idcode, &version);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "zynqmp-nvmem",
+ .owner = THIS_MODULE,
+ .word_size = 1,
+ .size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id zynqmp_nvmem_match[] = {
+ { .compatible = "xlnx,zynqmp-nvmem-fw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
+
+static int zynqmp_nvmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_nvmem_data *priv;
+
+ priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ econfig.dev = dev;
+ econfig.reg_read = zynqmp_nvmem_read;
+ econfig.priv = priv;
+
+ priv->nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(priv->nvmem);
+}
+
+static struct platform_driver zynqmp_nvmem_driver = {
+ .probe = zynqmp_nvmem_probe,
+ .driver = {
+ .name = "zynqmp-nvmem",
+ .of_match_table = zynqmp_nvmem_match,
+ },
+};
+
+module_platform_driver(zynqmp_nvmem_driver);
+
+MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>, Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("ZynqMP NVMEM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index a09c1c3cf831..49b16f76d78e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np)
if (!of_node_check_flag(np, OF_OVERLAY)) {
np->name = __of_get_property(np, "name", NULL);
- np->type = __of_get_property(np, "device_type", NULL);
if (!np->name)
np->name = "<NULL>";
- if (!np->type)
- np->type = "<NULL>";
phandle = __of_get_property(np, "phandle", &sz);
if (!phandle)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7099c652c6a5..9cc1461aac7d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,12 +314,8 @@ static bool populate_node(const void *blob,
populate_properties(blob, offset, mem, np, pathp, dryrun);
if (!dryrun) {
np->name = of_get_property(np, "name", NULL);
- np->type = of_get_property(np, "device_type", NULL);
-
if (!np->name)
np->name = "<NULL>";
- if (!np->type)
- np->type = "<NULL>";
}
*pnp = np;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5ad1342f5682..de6157357e26 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -16,7 +16,6 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
@@ -463,7 +462,6 @@ int of_phy_register_fixed_link(struct device_node *np)
struct device_node *fixed_link_node;
u32 fixed_link_prop[5];
const char *managed;
- int link_gpio = -1;
if (of_property_read_string(np, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
@@ -485,11 +483,7 @@ int of_phy_register_fixed_link(struct device_node *np)
status.pause = of_property_read_bool(fixed_link_node, "pause");
status.asym_pause = of_property_read_bool(fixed_link_node,
"asym-pause");
- link_gpio = of_get_named_gpio_flags(fixed_link_node,
- "link-gpios", 0, NULL);
of_node_put(fixed_link_node);
- if (link_gpio == -EPROBE_DEFER)
- return -EPROBE_DEFER;
goto register_phy;
}
@@ -508,8 +502,7 @@ int of_phy_register_fixed_link(struct device_node *np)
return -ENODEV;
register_phy:
- return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, link_gpio,
- np));
+ return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np));
}
EXPORT_SYMBOL(of_phy_register_fixed_link);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 2b5ac43a5690..c423e94baf0f 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
tchild->parent = target->np;
tchild->name = __of_get_property(node, "name", NULL);
- tchild->type = __of_get_property(node, "device_type", NULL);
if (!tchild->name)
tchild->name = "<NULL>";
- if (!tchild->type)
- tchild->type = "<NULL>";
/* ignore obsolete "linux,phandle" */
phandle = __of_get_property(node, "phandle", &size);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index d3185063d369..7eda43c66c91 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
dp->parent = parent;
dp->name = of_pdt_get_one_property(node, "name");
- dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
dp->properties = of_pdt_build_prop_list(node);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 08430031bd28..8631efa1daa1 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
if (!of_device_is_available(remote)) {
pr_debug("not available for remote node\n");
+ of_node_put(remote);
return NULL;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e5507add8f04..d7f97167cac3 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -131,6 +131,24 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
+ * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
+ * @opp: opp for which level value has to be returned for
+ *
+ * Return: level read from device tree corresponding to the opp, else
+ * return 0.
+ */
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ return opp->level;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
@@ -533,9 +551,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return ret;
}
-static inline int
-_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long old_freq, unsigned long freq)
+static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
+ unsigned long freq)
{
int ret;
@@ -572,7 +589,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
}
/* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
if (ret)
goto restore_voltage;
@@ -586,7 +603,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
return 0;
restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
+ if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
__func__, old_freq);
restore_voltage:
@@ -759,7 +776,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
opp->supplies);
} else {
/* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ ret = _generic_set_opp_clk_only(dev, clk, freq);
}
/* Scaling down? Configure required OPPs after frequency */
@@ -793,7 +810,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
struct opp_table *opp_table)
{
struct opp_device *opp_dev;
- int ret;
opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
if (!opp_dev)
@@ -805,10 +821,7 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
list_add(&opp_dev->node, &opp_table->dev_list);
/* Create debugfs entries for the opp_table */
- ret = opp_debug_register(opp_dev, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
- __func__, ret);
+ opp_debug_register(opp_dev, opp_table);
return opp_dev;
}
@@ -988,11 +1001,9 @@ void _opp_free(struct dev_pm_opp *opp)
kfree(opp);
}
-static void _opp_kref_release(struct kref *kref)
+static void _opp_kref_release(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
{
- struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
- struct opp_table *opp_table = opp->opp_table;
-
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
@@ -1002,7 +1013,22 @@ static void _opp_kref_release(struct kref *kref)
opp_debug_remove_one(opp);
list_del(&opp->node);
kfree(opp);
+}
+
+static void _opp_kref_release_unlocked(struct kref *kref)
+{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+ _opp_kref_release(opp, opp_table);
+}
+
+static void _opp_kref_release_locked(struct kref *kref)
+{
+ struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
+ struct opp_table *opp_table = opp->opp_table;
+
+ _opp_kref_release(opp, opp_table);
mutex_unlock(&opp_table->lock);
}
@@ -1013,10 +1039,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
- kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
+ kref_put_mutex(&opp->kref, _opp_kref_release_locked,
+ &opp->opp_table->lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put);
+static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
+{
+ kref_put(&opp->kref, _opp_kref_release_unlocked);
+}
+
/**
* dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
@@ -1060,6 +1092,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+/**
+ * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
+ * @dev: device for which we do this operation
+ *
+ * This function removes all dynamically created OPPs from the opp table.
+ */
+void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp, *temp;
+ int count = 0;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return;
+
+ mutex_lock(&opp_table->lock);
+ list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
+ if (opp->dynamic) {
+ dev_pm_opp_put_unlocked(opp);
+ count++;
+ }
+ }
+ mutex_unlock(&opp_table->lock);
+
+ /* Drop the references taken by dev_pm_opp_add() */
+ while (count--)
+ dev_pm_opp_put_opp_table(opp_table);
+
+ /* Drop the reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
+
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
{
struct dev_pm_opp *opp;
@@ -1176,10 +1242,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
new_opp->opp_table = opp_table;
kref_init(&new_opp->kref);
- ret = opp_debug_create_one(new_opp, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
- __func__, ret);
+ opp_debug_create_one(new_opp, opp_table);
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index e6828e5f81b0..a1c57fe14de4 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -35,7 +35,7 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
-static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
+static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
{
@@ -50,30 +50,21 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- if (!d)
- return false;
+ debugfs_create_ulong("u_volt_target", S_IRUGO, d,
+ &opp->supplies[i].u_volt);
- if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
- &opp->supplies[i].u_volt))
- return false;
+ debugfs_create_ulong("u_volt_min", S_IRUGO, d,
+ &opp->supplies[i].u_volt_min);
- if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
- &opp->supplies[i].u_volt_min))
- return false;
+ debugfs_create_ulong("u_volt_max", S_IRUGO, d,
+ &opp->supplies[i].u_volt_max);
- if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
- &opp->supplies[i].u_volt_max))
- return false;
-
- if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
- &opp->supplies[i].u_amp))
- return false;
+ debugfs_create_ulong("u_amp", S_IRUGO, d,
+ &opp->supplies[i].u_amp);
}
-
- return true;
}
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
struct dentry *pdentry = opp_table->dentry;
struct dentry *d;
@@ -95,40 +86,23 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- if (!d)
- return -ENOMEM;
-
- if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
- return -ENOMEM;
-
- if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
- return -ENOMEM;
-
- if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
- return -ENOMEM;
-
- if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
- return -ENOMEM;
-
- if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate))
- return -ENOMEM;
- if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
- return -ENOMEM;
+ debugfs_create_bool("available", S_IRUGO, d, &opp->available);
+ debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic);
+ debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
+ debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
+ debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
+ debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
+ debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+ &opp->clock_latency_ns);
- if (!opp_debug_create_supplies(opp, opp_table, d))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
- &opp->clock_latency_ns))
- return -ENOMEM;
+ opp_debug_create_supplies(opp, opp_table, d);
opp->dentry = d;
- return 0;
}
-static int opp_list_debug_create_dir(struct opp_device *opp_dev,
- struct opp_table *opp_table)
+static void opp_list_debug_create_dir(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
const struct device *dev = opp_dev->dev;
struct dentry *d;
@@ -137,36 +111,21 @@ static int opp_list_debug_create_dir(struct opp_device *opp_dev,
/* Create device specific directory */
d = debugfs_create_dir(opp_table->dentry_name, rootdir);
- if (!d) {
- dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
- return -ENOMEM;
- }
opp_dev->dentry = d;
opp_table->dentry = d;
-
- return 0;
}
-static int opp_list_debug_create_link(struct opp_device *opp_dev,
- struct opp_table *opp_table)
+static void opp_list_debug_create_link(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- const struct device *dev = opp_dev->dev;
char name[NAME_MAX];
- struct dentry *d;
opp_set_dev_name(opp_dev->dev, name);
/* Create device specific directory link */
- d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
- if (!d) {
- dev_err(dev, "%s: Failed to create link\n", __func__);
- return -ENOMEM;
- }
-
- opp_dev->dentry = d;
-
- return 0;
+ opp_dev->dentry = debugfs_create_symlink(name, rootdir,
+ opp_table->dentry_name);
}
/**
@@ -177,20 +136,13 @@ static int opp_list_debug_create_link(struct opp_device *opp_dev,
* Dynamically adds device specific directory in debugfs 'opp' directory. If the
* device-opp is shared with other devices, then links will be created for all
* devices except the first.
- *
- * Return: 0 on success, otherwise negative error.
*/
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
+void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
{
- if (!rootdir) {
- pr_debug("%s: Uninitialized rootdir\n", __func__);
- return -EINVAL;
- }
-
if (opp_table->dentry)
- return opp_list_debug_create_link(opp_dev, opp_table);
-
- return opp_list_debug_create_dir(opp_dev, opp_table);
+ opp_list_debug_create_link(opp_dev, opp_table);
+ else
+ opp_list_debug_create_dir(opp_dev, opp_table);
}
static void opp_migrate_dentry(struct opp_device *opp_dev,
@@ -252,10 +204,6 @@ static int __init opp_debug_init(void)
{
/* Create /sys/kernel/debug/opp directory */
rootdir = debugfs_create_dir("opp", NULL);
- if (!rootdir) {
- pr_err("%s: Failed to create root directory\n", __func__);
- return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 06f0f632ec47..62504b18f198 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -20,6 +20,7 @@
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/energy_model.h>
#include "opp.h"
@@ -594,6 +595,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
new_opp->rate = (unsigned long)rate;
}
+ of_property_read_u32(np, "opp-level", &new_opp->level);
+
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
@@ -1047,3 +1050,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
return of_node_get(opp->np);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
+
+/*
+ * Callback function provided to the Energy Model framework upon registration.
+ * This computes the power estimated by @CPU at @kHz if it is the frequency
+ * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
+ * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
+ * frequency and @mW to the associated power. The power is estimated as
+ * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
+ * the voltage and frequency of the OPP.
+ *
+ * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
+ * calculation failed because of missing parameters, 0 otherwise.
+ */
+static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
+ int cpu)
+{
+ struct device *cpu_dev;
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long mV, Hz;
+ u32 cap;
+ u64 tmp;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
+ of_node_put(np);
+ if (ret)
+ return -EINVAL;
+
+ Hz = *kHz * 1000;
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
+ if (IS_ERR(opp))
+ return -EINVAL;
+
+ mV = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+ if (!mV)
+ return -EINVAL;
+
+ tmp = (u64)cap * mV * mV * (Hz / 1000000);
+ do_div(tmp, 1000000000);
+
+ *mW = (unsigned long)tmp;
+ *kHz = Hz / 1000;
+
+ return 0;
+}
+
+/**
+ * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
+ * @cpus : CPUs for which an Energy Model has to be registered
+ *
+ * This checks whether the "dynamic-power-coefficient" devicetree property has
+ * been specified, and tries to register an Energy Model with it if it has.
+ */
+void dev_pm_opp_of_register_em(struct cpumask *cpus)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
+ int ret, nr_opp, cpu = cpumask_first(cpus);
+ struct device *cpu_dev;
+ struct device_node *np;
+ u32 cap;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return;
+
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0)
+ return;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return;
+
+ /*
+ * Register an EM only if the 'dynamic-power-coefficient' property is
+ * set in devicetree. It is assumed the voltage values are known if that
+ * property is set since it is useless otherwise. If voltages are not
+ * known, just let the EM registration fail with an error to alert the
+ * user about the inconsistent configuration.
+ */
+ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
+ of_node_put(np);
+ if (ret || !cap)
+ return;
+
+ em_register_perf_domain(cpus, nr_opp, &em_cb);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index e24d81497375..569b3525aa67 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -60,6 +60,7 @@ extern struct list_head opp_tables;
* @suspend: true if suspend OPP
* @pstate: Device's power domain's performance state.
* @rate: Frequency in hertz
+ * @level: Performance level
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
@@ -80,6 +81,7 @@ struct dev_pm_opp {
bool suspend;
unsigned int pstate;
unsigned long rate;
+ unsigned int level;
struct dev_pm_opp_supply *supplies;
@@ -236,18 +238,17 @@ static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
#ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp);
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
#else
static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
-static inline int opp_debug_create_one(struct dev_pm_opp *opp,
- struct opp_table *opp_table)
-{ return 0; }
-static inline int opp_debug_register(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{ return 0; }
+static inline void opp_debug_create_one(struct dev_pm_opp *opp,
+ struct opp_table *opp_table) { }
+
+static inline void opp_debug_register(struct opp_device *opp_dev,
+ struct opp_table *opp_table) { }
static inline void opp_debug_unregister(struct opp_device *opp_dev,
struct opp_table *opp_table)
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 8d2fc84119c6..097b0d43d13c 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -55,6 +55,8 @@
#include <asm/hardware.h> /* for register_module() */
#include <asm/parisc-device.h>
+#include "iommu.h"
+
/*
** Choose "ccio" since that's what HP-UX calls it.
** Make it easier for folks to migrate from one to the other :^)
@@ -1517,6 +1519,7 @@ static int __init ccio_probe(struct parisc_device *dev)
{
int i;
struct ioc *ioc, **ioc_p = &ioc_list;
+ struct pci_hba_data *hba;
ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
if (ioc == NULL) {
@@ -1543,11 +1546,13 @@ static int __init ccio_probe(struct parisc_device *dev)
ccio_ioc_init(ioc);
ccio_init_resources(ioc);
hppa_dma_ops = &ccio_ops;
- dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
/* if this fails, no I/O cards will work, so may as well bug */
- BUG_ON(dev->dev.platform_data == NULL);
- HBA_DATA(dev->dev.platform_data)->iommu = ioc;
+ BUG_ON(hba == NULL);
+
+ hba->iommu = ioc;
+ dev->dev.platform_data = hba;
#ifdef CONFIG_PROC_FS
if (ioc_count == 0) {
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index dfeea458a789..846b59d15999 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -59,6 +59,7 @@
#include <asm/hardware.h>
#include "gsc.h"
+#include "iommu.h"
#undef DINO_DEBUG
@@ -153,12 +154,10 @@ struct dino_device
#endif
};
-/* Looks nice and keeps the compiler happy */
-#define DINO_DEV(d) ({ \
- void *__pdata = d; \
- BUG_ON(!__pdata); \
- (struct dino_device *)__pdata; })
-
+static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
+{
+ return container_of(hba, struct dino_device, hba);
+}
/*
* Dino Configuration Space Accessor Functions
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 9ff434f354bd..5657a1d3eb2b 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -45,6 +45,8 @@
#include <asm/eisa_bus.h>
#include <asm/eisa_eeprom.h>
+#include "iommu.h"
+
#if 0
#define EISA_DBG(msg, arg...) printk(KERN_DEBUG "eisa: " msg, ## arg)
#else
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index ebc7b617e5d0..3b3481c0d81d 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -23,6 +23,8 @@
#include <asm/hardware.h>
#include <asm/parisc-device.h>
+#include "iommu.h"
+
struct hppb_card {
unsigned long hpa;
struct resource mmio_region;
diff --git a/drivers/parisc/iommu.h b/drivers/parisc/iommu.h
new file mode 100644
index 000000000000..240059cd8185
--- /dev/null
+++ b/drivers/parisc/iommu.h
@@ -0,0 +1,55 @@
+#ifndef _IOMMU_H
+#define _IOMMU_H 1
+
+#include <linux/pci.h>
+
+struct parisc_device;
+struct ioc;
+
+static inline struct pci_hba_data *parisc_walk_tree(struct device *dev)
+{
+ struct device *otherdev;
+
+ if (likely(dev->platform_data))
+ return dev->platform_data;
+
+ /* OK, just traverse the bus to find it */
+ for (otherdev = dev->parent;
+ otherdev;
+ otherdev = otherdev->parent) {
+ if (otherdev->platform_data) {
+ dev->platform_data = otherdev->platform_data;
+ break;
+ }
+ }
+
+ return dev->platform_data;
+}
+
+static inline struct ioc *GET_IOC(struct device *dev)
+{
+ struct pci_hba_data *pdata = parisc_walk_tree(dev);
+
+ if (!pdata)
+ return NULL;
+ return pdata->iommu;
+}
+
+#ifdef CONFIG_IOMMU_CCIO
+void *ccio_get_iommu(const struct parisc_device *dev);
+int ccio_request_resource(const struct parisc_device *dev,
+ struct resource *res);
+int ccio_allocate_resource(const struct parisc_device *dev,
+ struct resource *res, unsigned long size,
+ unsigned long min, unsigned long max, unsigned long align);
+#else /* !CONFIG_IOMMU_CCIO */
+#define ccio_get_iommu(dev) NULL
+#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
+#define ccio_allocate_resource(dev, res, size, min, max, align) \
+ allocate_resource(&iomem_resource, res, size, min, max, \
+ align, NULL, NULL)
+#endif /* !CONFIG_IOMMU_CCIO */
+
+void *sba_get_iommu(struct parisc_device *dev);
+
+#endif /* _IOMMU_H */
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 144c77dfe4b1..1be571c20062 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -126,21 +126,10 @@
** o disable IRdT - call disable_irq(vector[line]->processor_irq)
*/
-
-/* FIXME: determine which include files are really needed */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <asm/byteorder.h> /* get in-line asm for swab */
#include <asm/pdc.h>
#include <asm/pdcpat.h>
-#include <asm/page.h>
-#include <asm/io.h> /* read/write functions */
#ifdef CONFIG_SUPERIO
#include <asm/superio.h>
#endif
@@ -168,12 +157,8 @@
#define DBG_IRT(x...)
#endif
-#ifdef CONFIG_64BIT
-#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
-#else
#define COMPARE_IRTE_ADDR(irte, hpa) \
- ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
-#endif
+ ((irte)->dest_iosapic_addr == F_EXTEND(hpa))
#define IOSAPIC_REG_SELECT 0x00
#define IOSAPIC_REG_WINDOW 0x10
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 69bd98421eb1..d4701589bc8c 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -49,6 +49,8 @@
#include <asm/parisc-device.h>
#include <asm/io.h> /* read/write stuff */
+#include "iommu.h"
+
#undef DEBUG_LBA /* general stuff */
#undef DEBUG_LBA_PORT /* debug I/O Port access */
#undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */
@@ -109,12 +111,10 @@ static u32 lba_t32;
#define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
-
-/* Looks nice and keeps the compiler happy */
-#define LBA_DEV(d) ({ \
- void *__pdata = d; \
- BUG_ON(!__pdata); \
- (struct lba_device *)__pdata; })
+static inline struct lba_device *LBA_DEV(struct pci_hba_data *hba)
+{
+ return container_of(hba, struct lba_device, hba);
+}
/*
** Only allow 8 subsidiary busses per LBA
@@ -1275,7 +1275,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
r->flags = IORESOURCE_MEM;
/* mmio_mask also clears Enable bit */
r->start &= mmio_mask;
- r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
+ r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start);
rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
/*
@@ -1321,7 +1321,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
r->flags = IORESOURCE_MEM;
/* mmio_mask also clears Enable bit */
r->start &= mmio_mask;
- r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
+ r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start);
rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
r->end = r->start + ~rsize;
}
@@ -1562,7 +1562,7 @@ lba_driver_probe(struct parisc_device *dev)
/* ------------ Second : initialize common stuff ---------- */
pci_bios = &lba_bios_ops;
- pcibios_register_hba(HBA_DATA(lba_dev));
+ pcibios_register_hba(&lba_dev->hba);
spin_lock_init(&lba_dev->lba_lock);
if (lba_hw_init(lba_dev))
@@ -1743,3 +1743,15 @@ static void quirk_diva_aux_disable(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
quirk_diva_aux_disable);
+
+static void quirk_tosca_aux_disable(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+ dev->subsystem_device != 0x104a)
+ return;
+
+ dev_info(&dev->dev, "Hiding Tosca secondary built-in AUX serial device");
+ dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA,
+ quirk_tosca_aux_disable);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 42172eb32235..afaf8e6aefe6 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -49,6 +49,8 @@
#include <asm/pdcpat.h> /* for is_pdc_pat() */
#include <asm/parisc-device.h>
+#include "iommu.h"
+
#define MODULE_NAME "SBA"
/*
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5484a46dafda..56dd83a45e55 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -213,10 +213,12 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
+ struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
+ memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -230,7 +232,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
+ dev = parport_register_dev_model(port, name, &par_cb, devnum);
parport_put_port(port);
if (!dev)
return NULL;
@@ -480,3 +482,31 @@ static int assign_addrs(struct parport *port)
kfree(deviceid);
return detected;
}
+
+static int daisy_drv_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+
+ if (strcmp(drv->name, "daisy_drv"))
+ return -ENODEV;
+ if (strcmp(par_dev->name, daisy_dev_name))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver daisy_driver = {
+ .name = "daisy_drv",
+ .probe = daisy_drv_probe,
+ .devmodel = true,
+};
+
+int daisy_drv_init(void)
+{
+ return parport_register_driver(&daisy_driver);
+}
+
+void daisy_drv_exit(void)
+{
+ parport_unregister_driver(&daisy_driver);
+}
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 9c8249f74479..6296dbb83d47 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
{
int i;
for (i = 0; i < NR_SUPERIOS; i++)
- if (superios[i].io != p->base)
+ if (superios[i].io == p->base)
return &superios[i];
return NULL;
}
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e035174ba205..e5e6a463a941 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open (devnum, "Device ID probe");
+ struct pardevice *dev = parport_open(devnum, daisy_dev_name);
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5dc53d420ca8..0171b8dbcdcd 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -137,11 +137,19 @@ static struct bus_type parport_bus_type = {
int parport_bus_init(void)
{
- return bus_register(&parport_bus_type);
+ int retval;
+
+ retval = bus_register(&parport_bus_type);
+ if (retval)
+ return retval;
+ daisy_drv_init();
+
+ return 0;
}
void parport_bus_exit(void)
{
+ daisy_drv_exit();
bus_unregister(&parport_bus_type);
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 4310c7a4212e..2ab92409210a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,13 +21,14 @@ menuconfig PCI
support for PCI-X and the foundations for PCI Express support.
Say 'Y' here unless you know what you are doing.
+if PCI
+
config PCI_DOMAINS
bool
depends on PCI
config PCI_DOMAINS_GENERIC
bool
- depends on PCI
select PCI_DOMAINS
config PCI_SYSCALL
@@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig"
config PCI_MSI
bool "Message Signaled Interrupts (MSI and MSI-X)"
- depends on PCI
select GENERIC_MSI_IRQ
help
This allows device drivers to enable MSI (Message Signaled
@@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN
config PCI_QUIRKS
default y
bool "Enable PCI quirk workarounds" if EXPERT
- depends on PCI
help
This enables workarounds for various PCI chipset bugs/quirks.
Disable this only if your target machine is unaffected by PCI
@@ -67,7 +66,7 @@ config PCI_QUIRKS
config PCI_DEBUG
bool "PCI Debugging"
- depends on PCI && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
help
Say Y here if you want the PCI core to produce a bunch of debug
messages to the system log. Select this if you are having a
@@ -77,7 +76,6 @@ config PCI_DEBUG
config PCI_REALLOC_ENABLE_AUTO
bool "Enable PCI resource re-allocation detection"
- depends on PCI
depends on PCI_IOV
help
Say Y here if you want the PCI core to detect if PCI resource
@@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO
config PCI_STUB
tristate "PCI Stub driver"
- depends on PCI
help
Say Y or M here if you want be able to reserve a PCI device
when it is going to be assigned to a guest operating system.
@@ -99,7 +96,6 @@ config PCI_STUB
config PCI_PF_STUB
tristate "PCI PF Stub driver"
- depends on PCI
depends on PCI_IOV
help
Say Y or M here if you want to enable support for devices that
@@ -111,7 +107,7 @@ config PCI_PF_STUB
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
- depends on PCI && X86 && XEN
+ depends on X86 && XEN
select PCI_XEN
select XEN_XENBUS_FRONTEND
default y
@@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL
config PCI_IOV
bool "PCI IOV support"
- depends on PCI
select PCI_ATS
help
I/O Virtualization is a PCI feature supported by some devices
@@ -144,7 +139,6 @@ config PCI_IOV
config PCI_PRI
bool "PCI PRI support"
- depends on PCI
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
@@ -154,7 +148,6 @@ config PCI_PRI
config PCI_PASID
bool "PCI PASID support"
- depends on PCI
select PCI_ATS
help
Process Address Space Identifiers (PASIDs) can be used by PCI devices
@@ -167,7 +160,7 @@ config PCI_PASID
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
- depends on PCI && ZONE_DEVICE
+ depends on ZONE_DEVICE
select GENERIC_ALLOCATOR
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
@@ -184,12 +177,11 @@ config PCI_P2PDMA
config PCI_LABEL
def_bool y if (DMI || ACPI)
- depends on PCI
select NLS
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
- depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
help
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
@@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
source "drivers/pci/switch/Kconfig"
+
+endif
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 52e47dac028f..80f843030e36 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev)
imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
if (IS_ERR(imx6_pcie->pd_pcie))
return PTR_ERR(imx6_pcie->pd_pcie);
+ /* Do nothing when power domain missing */
+ if (!imx6_pcie->pd_pcie)
+ return 0;
link = device_link_add(dev, imx6_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
@@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev)
if (IS_ERR(imx6_pcie->pd_pcie_phy))
return PTR_ERR(imx6_pcie->pd_pcie_phy);
- device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (IS_ERR(link)) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
- return PTR_ERR(link);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 241ebe0c4505..e35e9eaa50ee 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b171b6bc15c8..0c389a30ef5d 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -22,7 +22,6 @@
#include <linux/resource.h>
#include <linux/of_pci.h>
#include <linux/of_irq.h>
-#include <linux/gpio/consumer.h>
#include "pcie-designware.h"
@@ -30,7 +29,6 @@ struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
struct clk *clk_reg;
- struct gpio_desc *reset_gpio;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
- if (pcie->reset_gpio) {
- /* assert and then deassert the reset signal */
- gpiod_set_value_cansleep(pcie->reset_gpio, 1);
- msleep(100);
- gpiod_set_value_cansleep(pcie->reset_gpio, 0);
- }
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pcie);
@@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail_clkreg;
}
- /* Get reset gpio signal and hold asserted (logically high) */
- pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset_gpio)) {
- ret = PTR_ERR(pcie->reset_gpio);
- goto fail_clkreg;
- }
-
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 9deb56989d72..cb3401a931f8 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -602,9 +602,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
}
/* Reserve memory for event queue and make sure memories are zeroed */
- msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
- msi->nr_eq_region * EQ_MEM_REGION_SIZE,
- &msi->eq_dma, GFP_KERNEL);
+ msi->eq_cpu = dma_alloc_coherent(pcie->dev,
+ msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+ &msi->eq_dma, GFP_KERNEL);
if (!msi->eq_cpu) {
ret = -ENOMEM;
goto free_irqs;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a1c8a09efa5..73986825d221 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -532,7 +532,7 @@ error_attrs:
}
static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
+msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
{
struct irq_affinity_desc *masks = NULL;
struct msi_desc *entry;
@@ -597,7 +597,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* which could have been allocated.
*/
static int msi_capability_init(struct pci_dev *dev, int nvec,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
struct msi_desc *entry;
int ret;
@@ -669,7 +669,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
struct irq_affinity_desc *curmsk, *masks = NULL;
struct msi_desc *entry;
@@ -736,7 +736,7 @@ static void msix_program_entries(struct pci_dev *dev,
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, const struct irq_affinity *affd)
+ int nvec, struct irq_affinity *affd)
{
int ret;
u16 control;
@@ -932,7 +932,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, const struct irq_affinity *affd)
+ int nvec, struct irq_affinity *affd)
{
int nr_entries;
int i, j;
@@ -1018,7 +1018,7 @@ int pci_msi_enabled(void)
EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
int nvec;
int rc;
@@ -1035,13 +1035,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (maxvec < minvec)
return -ERANGE;
- /*
- * If the caller is passing in sets, we can't support a range of
- * vectors. The caller needs to handle that.
- */
- if (affd && affd->nr_sets && minvec != maxvec)
- return -EINVAL;
-
if (WARN_ON_ONCE(dev->msi_enabled))
return -EINVAL;
@@ -1086,20 +1079,13 @@ EXPORT_SYMBOL(pci_enable_msi);
static int __pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec,
- int maxvec, const struct irq_affinity *affd)
+ int maxvec, struct irq_affinity *affd)
{
int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
- /*
- * If the caller is passing in sets, we can't support a range of
- * supported vectors. The caller needs to handle that.
- */
- if (affd && affd->nr_sets && minvec != maxvec)
- return -EINVAL;
-
if (WARN_ON_ONCE(dev->msix_enabled))
return -EINVAL;
@@ -1165,10 +1151,11 @@ EXPORT_SYMBOL(pci_enable_msix_range);
*/
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
- static const struct irq_affinity msi_default_affd;
- int vecs = -ENOSPC;
+ struct irq_affinity msi_default_affd = {0};
+ int msix_vecs = -ENOSPC;
+ int msi_vecs = -ENOSPC;
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
@@ -1179,27 +1166,37 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
}
if (flags & PCI_IRQ_MSIX) {
- vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
- affd);
- if (vecs > 0)
- return vecs;
+ msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
+ max_vecs, affd);
+ if (msix_vecs > 0)
+ return msix_vecs;
}
if (flags & PCI_IRQ_MSI) {
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
- if (vecs > 0)
- return vecs;
+ msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
+ affd);
+ if (msi_vecs > 0)
+ return msi_vecs;
}
/* use legacy irq if allowed */
if (flags & PCI_IRQ_LEGACY) {
if (min_vecs == 1 && dev->irq) {
+ /*
+ * Invoke the affinity spreading logic to ensure that
+ * the device driver can adjust queue configuration
+ * for the single interrupt case.
+ */
+ if (affd)
+ irq_create_affinity_masks(1, affd);
pci_intx(dev, 1);
return 1;
}
}
- return vecs;
+ if (msix_vecs == -ENOSPC)
+ return -ENOSPC;
+ return msi_vecs;
}
EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9ecfe13157c0..25794c27c7a4 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -412,8 +412,7 @@ static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(msi_bus);
-static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
- size_t count)
+static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count)
{
unsigned long val;
struct pci_bus *b = NULL;
@@ -429,7 +428,7 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
}
return count;
}
-static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store);
+static BUS_ATTR_WO(rescan);
static struct attribute *pci_bus_attrs[] = {
&bus_attr_rescan.attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9d8e3c837de..db55acf32a7e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6034,19 +6034,18 @@ static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
return count;
}
-static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
+static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
{
return pci_get_resource_alignment_param(buf, PAGE_SIZE);
}
-static ssize_t pci_resource_alignment_store(struct bus_type *bus,
+static ssize_t resource_alignment_store(struct bus_type *bus,
const char *buf, size_t count)
{
return pci_set_resource_alignment_param(buf, count);
}
-static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
- pci_resource_alignment_store);
+static BUS_ATTR_RW(resource_alignment);
static int __init pci_resource_alignment_sysfs_init(void)
{
@@ -6195,7 +6194,8 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
- disable_acs_redir_param = str + 18;
+ disable_acs_redir_param =
+ kstrdup(str + 18, GFP_KERNEL);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b0a413f3f7ca..e2a879e93d86 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev)
break;
}
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
- quirk_synopsys_haps);
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB_XHCI, 0,
+ quirk_synopsys_haps);
/*
* Let's make the southbridge information explicit instead of having to
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 6c5536d3d42a..e22766c79fe9 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1373,10 +1373,10 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
return 0;
- stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev,
- sizeof(*stdev->dma_mrpc),
- &stdev->dma_mrpc_dma_addr,
- GFP_KERNEL);
+ stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
+ sizeof(*stdev->dma_mrpc),
+ &stdev->dma_mrpc_dma_addr,
+ GFP_KERNEL);
if (stdev->dma_mrpc == NULL)
return -ENOMEM;
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 1bfeb160c5b1..bfd03e023308 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1327,15 +1327,6 @@ static int cci_pmu_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
- /* We have no filtering of any kind */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
/*
* Following the example set by other "uncore" PMUs, we accept any CPU
* and rewrite its affinity dynamically rather than having perf core
@@ -1433,6 +1424,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
.stop = cci_pmu_stop,
.read = pmu_read,
.attr_groups = pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
cci_pmu->plat_device = pdev;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 7dd850e02f19..2ae76026e947 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -741,10 +741,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- if (has_branch_stack(event) || event->attr.exclude_user ||
- event->attr.exclude_kernel || event->attr.exclude_hv ||
- event->attr.exclude_idle || event->attr.exclude_host ||
- event->attr.exclude_guest) {
+ if (has_branch_stack(event)) {
dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
return -EINVAL;
}
@@ -1290,6 +1287,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
.read = arm_ccn_pmu_event_read,
.pmu_enable = arm_ccn_pmu_enable,
.pmu_disable = arm_ccn_pmu_disable,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
/* No overflow interrupt? Have to use a timer instead. */
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 660cb8ac886a..5851de56bbd0 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -562,13 +562,7 @@ static int dsu_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
- if (has_branch_stack(event) ||
- event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest) {
+ if (has_branch_stack(event)) {
dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
return -EINVAL;
}
@@ -735,6 +729,7 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
.read = dsu_pmu_read,
.attr_groups = dsu_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d0b7dd8fb184..eec75b97e7ea 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -357,13 +357,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
}
static int
-event_requires_mode_exclusion(struct perf_event_attr *attr)
-{
- return attr->exclude_idle || attr->exclude_user ||
- attr->exclude_kernel || attr->exclude_hv;
-}
-
-static int
__hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
@@ -393,9 +386,8 @@ __hw_perf_event_init(struct perf_event *event)
/*
* Check whether we need to exclude the counter from certain modes.
*/
- if ((!armpmu->set_event_filter ||
- armpmu->set_event_filter(hwc, &event->attr)) &&
- event_requires_mode_exclusion(&event->attr)) {
+ if (armpmu->set_event_filter &&
+ armpmu->set_event_filter(hwc, &event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EOPNOTSUPP;
@@ -867,6 +859,9 @@ int armpmu_register(struct arm_pmu *pmu)
if (ret)
return ret;
+ if (!pmu->set_event_filter)
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
+
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (ret)
goto out_destroy;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8e46a9dad2fa..7cb766dafe85 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event)
{
}
-static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
- bool snapshot)
+static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
{
- int i;
+ int i, cpu = event->cpu;
struct page **pglist;
struct arm_spe_pmu_buf *buf;
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 69372e2bc93c..0eba947c2ee9 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -396,6 +396,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
.attr_groups = hisi_ddrc_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 443906e0aff3..2553a844ebf6 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -407,6 +407,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
.attr_groups = hisi_hha_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 0bde5d919b2e..cf1cc34f402a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -397,6 +397,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
.attr_groups = hisi_l3c_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 9efd2413240c..f028cbc3443c 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -142,15 +142,6 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
- /* counters do not have these bits */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle)
- return -EINVAL;
-
/*
* The uncore counters not specific to any CPU, so cannot
* support per-task
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 842135cf35a3..091b4d7d32c4 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -509,14 +509,6 @@ static int l2_cache_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- /* We cannot filter accurately so we just don't allow it. */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_hv || event->attr.exclude_idle) {
- dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
- "Can't exclude execution levels\n");
- return -EOPNOTSUPP;
- }
-
if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
((event->attr.config & ~L2_EVT_MASK) != 0)) &&
(event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
@@ -982,6 +974,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev)
.stop = l2_cache_event_stop,
.read = l2_cache_event_read,
.attr_groups = l2_cache_pmu_attr_grps,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
l2cache_pmu->num_counters = get_num_counters();
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 2dc63d61f2ea..5d70646da8c7 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -495,13 +495,6 @@ static int qcom_l3_cache__event_init(struct perf_event *event)
return -ENOENT;
/*
- * There are no per-counter mode filters in the PMU.
- */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_hv || event->attr.exclude_idle)
- return -EINVAL;
-
- /*
* Sampling not supported since these events are not core-attributable.
*/
if (hwc->sample_period)
@@ -777,6 +770,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
.read = qcom_l3_cache__event_read,
.attr_groups = qcom_l3_cache_pmu_attr_grps,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index c9a1701d3e54..43d76c85da56 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -424,15 +424,6 @@ static int tx2_uncore_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- /* We have no filtering of any kind */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
@@ -572,6 +563,7 @@ static int tx2_uncore_pmu_register(
.start = tx2_uncore_event_start,
.stop = tx2_uncore_event_stop,
.read = tx2_uncore_event_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 0dc9ff0f8894..d4ec04868d59 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -917,11 +917,6 @@ static int xgene_perf_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- /* SOC counters do not have usr/os/guest/host bits */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_host || event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
/*
@@ -1136,6 +1131,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
.start = xgene_perf_start,
.stop = xgene_perf_stop,
.read = xgene_perf_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
/* Hardware counter init */
diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
index c10e95f86de5..96a3af126a78 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
@@ -78,8 +78,8 @@ struct sr_pcie_phy_core {
static const u8 pipemux_table[] = {
/* PIPEMUX = 0, EP 1x16 */
0x00,
- /* PIPEMUX = 1, EP 2x8 */
- 0x00,
+ /* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */
+ 0x80,
/* PIPEMUX = 2, EP 4x4 */
0x00,
/* PIPEMUX = 3, RC 2x8, cores 0, 7 */
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
index 2b8c0851ff33..31f18b67dd7c 100644
--- a/drivers/phy/cadence/Kconfig
+++ b/drivers/phy/cadence/Kconfig
@@ -1,6 +1,7 @@
#
# Phy drivers for Cadence PHYs
#
+
config PHY_CADENCE_DP
tristate "Cadence MHDP DisplayPort PHY driver"
depends on OF
@@ -9,9 +10,19 @@ config PHY_CADENCE_DP
help
Support for Cadence MHDP DisplayPort PHY.
+config PHY_CADENCE_DPHY
+ tristate "Cadence D-PHY Support"
+ depends on HAS_IOMEM && OF
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Choose this option if you have a Cadence D-PHY in your
+ system. If M is selected, the module will be called
+ cdns-dphy.
+
config PHY_CADENCE_SIERRA
tristate "Cadence Sierra PHY Driver"
depends on OF && HAS_IOMEM && RESET_CONTROLLER
select GENERIC_PHY
help
- Enable this to support the Cadence Sierra PHY driver \ No newline at end of file
+ Enable this to support the Cadence Sierra PHY driver
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
index 412349af0492..2f9e3457b954 100644
--- a/drivers/phy/cadence/Makefile
+++ b/drivers/phy/cadence/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o
+obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o
obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
new file mode 100644
index 000000000000..90c4e9b5aac8
--- /dev/null
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright: 2017-2018 Cadence Design Systems, Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+
+#define REG_WAKEUP_TIME_NS 800
+#define DPHY_PLL_RATE_HZ 108000000
+
+/* DPHY registers */
+#define DPHY_PMA_CMN(reg) (reg)
+#define DPHY_PMA_LCLK(reg) (0x100 + (reg))
+#define DPHY_PMA_LDATA(lane, reg) (0x200 + ((lane) * 0x100) + (reg))
+#define DPHY_PMA_RCLK(reg) (0x600 + (reg))
+#define DPHY_PMA_RDATA(lane, reg) (0x700 + ((lane) * 0x100) + (reg))
+#define DPHY_PCS(reg) (0xb00 + (reg))
+
+#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
+#define DPHY_CMN_SSM_EN BIT(0)
+#define DPHY_CMN_TX_MODE_EN BIT(9)
+
+#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
+#define DPHY_CMN_PWM_DIV(x) ((x) << 20)
+#define DPHY_CMN_PWM_LOW(x) ((x) << 10)
+#define DPHY_CMN_PWM_HIGH(x) (x)
+
+#define DPHY_CMN_FBDIV DPHY_PMA_CMN(0x4c)
+#define DPHY_CMN_FBDIV_VAL(low, high) (((high) << 11) | ((low) << 22))
+#define DPHY_CMN_FBDIV_FROM_REG (BIT(10) | BIT(21))
+
+#define DPHY_CMN_OPIPDIV DPHY_PMA_CMN(0x50)
+#define DPHY_CMN_IPDIV_FROM_REG BIT(0)
+#define DPHY_CMN_IPDIV(x) ((x) << 1)
+#define DPHY_CMN_OPDIV_FROM_REG BIT(6)
+#define DPHY_CMN_OPDIV(x) ((x) << 7)
+
+#define DPHY_PSM_CFG DPHY_PCS(0x4)
+#define DPHY_PSM_CFG_FROM_REG BIT(0)
+#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
+
+#define DSI_HBP_FRAME_OVERHEAD 12
+#define DSI_HSA_FRAME_OVERHEAD 14
+#define DSI_HFP_FRAME_OVERHEAD 6
+#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
+#define DSI_BLANKING_FRAME_OVERHEAD 6
+#define DSI_NULL_FRAME_OVERHEAD 6
+#define DSI_EOT_PKT_SIZE 4
+
+struct cdns_dphy_cfg {
+ u8 pll_ipdiv;
+ u8 pll_opdiv;
+ u16 pll_fbdiv;
+ unsigned int nlanes;
+};
+
+enum cdns_dphy_clk_lane_cfg {
+ DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0,
+ DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1,
+ DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2,
+ DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3,
+};
+
+struct cdns_dphy;
+struct cdns_dphy_ops {
+ int (*probe)(struct cdns_dphy *dphy);
+ void (*remove)(struct cdns_dphy *dphy);
+ void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
+ void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
+ enum cdns_dphy_clk_lane_cfg cfg);
+ void (*set_pll_cfg)(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg);
+ unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
+};
+
+struct cdns_dphy {
+ struct cdns_dphy_cfg cfg;
+ void __iomem *regs;
+ struct clk *psm_clk;
+ struct clk *pll_ref_clk;
+ const struct cdns_dphy_ops *ops;
+ struct phy *phy;
+};
+
+static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
+ struct cdns_dphy_cfg *cfg,
+ struct phy_configure_opts_mipi_dphy *opts,
+ unsigned int *dsi_hfp_ext)
+{
+ unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
+ u64 dlane_bps;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000)
+ return -EINVAL;
+ else if (pll_ref_hz < 19200000)
+ cfg->pll_ipdiv = 1;
+ else if (pll_ref_hz < 38400000)
+ cfg->pll_ipdiv = 2;
+ else if (pll_ref_hz < 76800000)
+ cfg->pll_ipdiv = 4;
+ else
+ cfg->pll_ipdiv = 8;
+
+ dlane_bps = opts->hs_clk_rate;
+
+ if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
+ return -EINVAL;
+ else if (dlane_bps >= 1250000000)
+ cfg->pll_opdiv = 1;
+ else if (dlane_bps >= 630000000)
+ cfg->pll_opdiv = 2;
+ else if (dlane_bps >= 320000000)
+ cfg->pll_opdiv = 4;
+ else if (dlane_bps >= 160000000)
+ cfg->pll_opdiv = 8;
+
+ cfg->pll_fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
+ cfg->pll_ipdiv,
+ pll_ref_hz);
+
+ return 0;
+}
+
+static int cdns_dphy_setup_psm(struct cdns_dphy *dphy)
+{
+ unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk);
+ unsigned long psm_div;
+
+ if (!psm_clk_hz || psm_clk_hz > 100000000)
+ return -EINVAL;
+
+ psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000);
+ if (dphy->ops->set_psm_div)
+ dphy->ops->set_psm_div(dphy, psm_div);
+
+ return 0;
+}
+
+static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy,
+ enum cdns_dphy_clk_lane_cfg cfg)
+{
+ if (dphy->ops->set_clk_lane_cfg)
+ dphy->ops->set_clk_lane_cfg(dphy, cfg);
+}
+
+static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+{
+ if (dphy->ops->set_pll_cfg)
+ dphy->ops->set_pll_cfg(dphy, cfg);
+}
+
+static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+ return dphy->ops->get_wakeup_time_ns(dphy);
+}
+
+static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+ /* Default wakeup time is 800 ns (in a simulated environment). */
+ return 800;
+}
+
+static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+{
+ u32 fbdiv_low, fbdiv_high;
+
+ fbdiv_low = (cfg->pll_fbdiv / 4) - 2;
+ fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2;
+
+ writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG |
+ DPHY_CMN_IPDIV(cfg->pll_ipdiv) |
+ DPHY_CMN_OPDIV(cfg->pll_opdiv),
+ dphy->regs + DPHY_CMN_OPIPDIV);
+ writel(DPHY_CMN_FBDIV_FROM_REG |
+ DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high),
+ dphy->regs + DPHY_CMN_FBDIV);
+ writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
+ DPHY_CMN_PWM_DIV(0x8),
+ dphy->regs + DPHY_CMN_PWM);
+}
+
+static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
+{
+ writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div),
+ dphy->regs + DPHY_PSM_CFG);
+}
+
+/*
+ * This is the reference implementation of DPHY hooks. Specific integration of
+ * this IP may have to re-implement some of them depending on how they decided
+ * to wire things in the SoC.
+ */
+static const struct cdns_dphy_ops ref_dphy_ops = {
+ .get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
+ .set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
+ .set_psm_div = cdns_dphy_ref_set_psm_div,
+};
+
+static int cdns_dphy_config_from_opts(struct phy *phy,
+ struct phy_configure_opts_mipi_dphy *opts,
+ struct cdns_dphy_cfg *cfg)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ unsigned int dsi_hfp_ext = 0;
+ int ret;
+
+ ret = phy_mipi_dphy_config_validate(opts);
+ if (ret)
+ return ret;
+
+ ret = cdns_dsi_get_dphy_pll_cfg(dphy, cfg,
+ opts, &dsi_hfp_ext);
+ if (ret)
+ return ret;
+
+ opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
+
+ return 0;
+}
+
+static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts)
+{
+ struct cdns_dphy_cfg cfg = { 0 };
+
+ if (mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
+
+ return cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+}
+
+static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ struct cdns_dphy_cfg cfg = { 0 };
+ int ret;
+
+ ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the internal PSM clk divider so that the DPHY has a
+ * 1MHz clk (or something close).
+ */
+ ret = cdns_dphy_setup_psm(dphy);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
+ * and 8 data lanes, each clk lane can be attache different set of
+ * data lanes. The 2 groups are named 'left' and 'right', so here we
+ * just say that we want the 'left' clk lane to drive the 'left' data
+ * lanes.
+ */
+ cdns_dphy_set_clk_lane_cfg(dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT);
+
+ /*
+ * Configure the DPHY PLL that will be used to generate the TX byte
+ * clk.
+ */
+ cdns_dphy_set_pll_cfg(dphy, &cfg);
+
+ return 0;
+}
+
+static int cdns_dphy_power_on(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+
+ clk_prepare_enable(dphy->psm_clk);
+ clk_prepare_enable(dphy->pll_ref_clk);
+
+ /* Start TX state machine. */
+ writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+ dphy->regs + DPHY_CMN_SSM);
+
+ return 0;
+}
+
+static int cdns_dphy_power_off(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(dphy->pll_ref_clk);
+ clk_disable_unprepare(dphy->psm_clk);
+
+ return 0;
+}
+
+static const struct phy_ops cdns_dphy_ops = {
+ .configure = cdns_dphy_configure,
+ .validate = cdns_dphy_validate,
+ .power_on = cdns_dphy_power_on,
+ .power_off = cdns_dphy_power_off,
+};
+
+static int cdns_dphy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct cdns_dphy *dphy;
+ struct resource *res;
+ int ret;
+
+ dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
+ if (!dphy)
+ return -ENOMEM;
+ dev_set_drvdata(&pdev->dev, dphy);
+
+ dphy->ops = of_device_get_match_data(&pdev->dev);
+ if (!dphy->ops)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dphy->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dphy->regs))
+ return PTR_ERR(dphy->regs);
+
+ dphy->psm_clk = devm_clk_get(&pdev->dev, "psm");
+ if (IS_ERR(dphy->psm_clk))
+ return PTR_ERR(dphy->psm_clk);
+
+ dphy->pll_ref_clk = devm_clk_get(&pdev->dev, "pll_ref");
+ if (IS_ERR(dphy->pll_ref_clk))
+ return PTR_ERR(dphy->pll_ref_clk);
+
+ if (dphy->ops->probe) {
+ ret = dphy->ops->probe(dphy);
+ if (ret)
+ return ret;
+ }
+
+ dphy->phy = devm_phy_create(&pdev->dev, NULL, &cdns_dphy_ops);
+ if (IS_ERR(dphy->phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ if (dphy->ops->remove)
+ dphy->ops->remove(dphy);
+ return PTR_ERR(dphy->phy);
+ }
+
+ phy_set_drvdata(dphy->phy, dphy);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int cdns_dphy_remove(struct platform_device *pdev)
+{
+ struct cdns_dphy *dphy = dev_get_drvdata(&pdev->dev);
+
+ if (dphy->ops->remove)
+ dphy->ops->remove(dphy);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_dphy_of_match[] = {
+ { .compatible = "cdns,dphy", .data = &ref_dphy_ops },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cdns_dphy_of_match);
+
+static struct platform_driver cdns_dphy_platform_driver = {
+ .probe = cdns_dphy_probe,
+ .remove = cdns_dphy_remove,
+ .driver = {
+ .name = "cdns-mipi-dphy",
+ .of_match_table = cdns_dphy_of_match,
+ },
+};
+module_platform_driver(cdns_dphy_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_DESCRIPTION("Cadence MIPI D-PHY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index f050bd4e97e0..832670b4952b 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -2,4 +2,4 @@ config PHY_FSL_IMX8MQ_USB
tristate "Freescale i.MX8M USB3 PHY"
depends on OF && HAS_IOMEM
select GENERIC_PHY
- default SOC_IMX8MQ
+ default ARCH_MXC && ARM64
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 6fb4b56e4c14..9ba872325dad 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -21,6 +21,37 @@ config PHY_BERLIN_USB
help
Enable this to support the USB PHY on Marvell Berlin SoCs.
+config PHY_MVEBU_A3700_COMPHY
+ tristate "Marvell A3700 comphy driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ depends on HAVE_ARM_SMCCC
+ default y
+ select GENERIC_PHY
+ help
+ This driver allows to control the comphy, a hardware block providing
+ shared serdes PHYs on Marvell Armada 3700. Its serdes lanes can be
+ used by various controllers: Ethernet, SATA, USB3, PCIe.
+
+config PHY_MVEBU_A3700_UTMI
+ tristate "Marvell A3700 UTMI driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ default y
+ select GENERIC_PHY
+ help
+ Enable this to support Marvell A3700 UTMI PHY driver.
+
+config PHY_MVEBU_A38X_COMPHY
+ tristate "Marvell Armada 38x comphy driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ This driver allows to control the comphy, an hardware block providing
+ shared serdes PHYs on Marvell Armada 38x. Its serdes lanes can be
+ used by various controllers (Ethernet, sata, usb, PCIe...).
+
config PHY_MVEBU_CP110_COMPHY
tristate "Marvell CP110 comphy driver"
depends on ARCH_MVEBU || COMPILE_TEST
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 3975b144f8ec..434eb9ca6cc3 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -2,6 +2,9 @@
obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
+obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY) += phy-mvebu-a3700-comphy.o
+obj-$(CONFIG_PHY_MVEBU_A3700_UTMI) += phy-mvebu-a3700-utmi.o
+obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY) += phy-armada38x-comphy.o
obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
diff --git a/drivers/phy/marvell/phy-armada375-usb2.c b/drivers/phy/marvell/phy-armada375-usb2.c
index 1a3db288c0a9..fa5dc9462d09 100644
--- a/drivers/phy/marvell/phy-armada375-usb2.c
+++ b/drivers/phy/marvell/phy-armada375-usb2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB cluster support for Armada 375 platform.
*
@@ -5,10 +6,6 @@
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2 or later. This program is licensed "as is"
- * without any warranty of any kind, whether express or implied.
- *
* Armada 375 comes with an USB2 host and device controller and an
* USB3 controller. The USB cluster control register allows to manage
* common features of both USB controllers.
@@ -18,7 +15,6 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -142,7 +138,6 @@ static const struct of_device_id of_usb_cluster_table[] = {
{ .compatible = "marvell,armada-375-usb-cluster", },
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(of, of_usb_cluster_table);
static struct platform_driver armada375_usb_phy_driver = {
.probe = armada375_usb_phy_probe,
@@ -151,8 +146,4 @@ static struct platform_driver armada375_usb_phy_driver = {
.name = "armada-375-usb-cluster",
}
};
-module_platform_driver(armada375_usb_phy_driver);
-
-MODULE_DESCRIPTION("Armada 375 USB cluster driver");
-MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(armada375_usb_phy_driver);
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
new file mode 100644
index 000000000000..3e00bc679d4e
--- /dev/null
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Russell King, Deep Blue Solutions Ltd.
+ *
+ * Partly derived from CP110 comphy driver by Antoine Tenart
+ * <antoine.tenart@bootlin.com>
+ */
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define MAX_A38X_COMPHY 6
+#define MAX_A38X_PORTS 3
+
+#define COMPHY_CFG1 0x00
+#define COMPHY_CFG1_GEN_TX(x) ((x) << 26)
+#define COMPHY_CFG1_GEN_TX_MSK COMPHY_CFG1_GEN_TX(15)
+#define COMPHY_CFG1_GEN_RX(x) ((x) << 22)
+#define COMPHY_CFG1_GEN_RX_MSK COMPHY_CFG1_GEN_RX(15)
+#define GEN_SGMII_1_25GBPS 6
+#define GEN_SGMII_3_125GBPS 8
+
+#define COMPHY_STAT1 0x18
+#define COMPHY_STAT1_PLL_RDY_TX BIT(3)
+#define COMPHY_STAT1_PLL_RDY_RX BIT(2)
+
+#define COMPHY_SELECTOR 0xfc
+
+struct a38x_comphy;
+
+struct a38x_comphy_lane {
+ void __iomem *base;
+ struct a38x_comphy *priv;
+ unsigned int n;
+
+ int port;
+};
+
+struct a38x_comphy {
+ void __iomem *base;
+ struct device *dev;
+ struct a38x_comphy_lane lane[MAX_A38X_COMPHY];
+};
+
+static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = {
+ { 0, 0, 0 },
+ { 4, 5, 0 },
+ { 0, 4, 0 },
+ { 0, 0, 4 },
+ { 0, 3, 0 },
+ { 0, 0, 3 },
+};
+
+static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane,
+ unsigned int offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(lane->base + offset) & ~mask;
+ writel(val | value, lane->base + offset);
+}
+
+static void a38x_comphy_set_speed(struct a38x_comphy_lane *lane,
+ unsigned int gen_tx, unsigned int gen_rx)
+{
+ a38x_comphy_set_reg(lane, COMPHY_CFG1,
+ COMPHY_CFG1_GEN_TX_MSK | COMPHY_CFG1_GEN_RX_MSK,
+ COMPHY_CFG1_GEN_TX(gen_tx) |
+ COMPHY_CFG1_GEN_RX(gen_rx));
+}
+
+static int a38x_comphy_poll(struct a38x_comphy_lane *lane,
+ unsigned int offset, u32 mask, u32 value)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout_atomic(lane->base + offset, val,
+ (val & mask) == value,
+ 1000, 150000);
+
+ if (ret)
+ dev_err(lane->priv->dev,
+ "comphy%u: timed out waiting for status\n", lane->n);
+
+ return ret;
+}
+
+/*
+ * We only support changing the speed for comphys configured for GBE.
+ * Since that is all we do, we only poll for PLL ready status.
+ */
+static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
+{
+ struct a38x_comphy_lane *lane = phy_get_drvdata(phy);
+ unsigned int gen;
+
+ if (mode != PHY_MODE_ETHERNET)
+ return -EINVAL;
+
+ switch (sub) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ gen = GEN_SGMII_1_25GBPS;
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ gen = GEN_SGMII_3_125GBPS;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ a38x_comphy_set_speed(lane, gen, gen);
+
+ return a38x_comphy_poll(lane, COMPHY_STAT1,
+ COMPHY_STAT1_PLL_RDY_TX |
+ COMPHY_STAT1_PLL_RDY_RX,
+ COMPHY_STAT1_PLL_RDY_TX |
+ COMPHY_STAT1_PLL_RDY_RX);
+}
+
+static const struct phy_ops a38x_comphy_ops = {
+ .set_mode = a38x_comphy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *a38x_comphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct a38x_comphy_lane *lane;
+ struct phy *phy;
+ u32 val;
+
+ if (WARN_ON(args->args[0] >= MAX_A38X_PORTS))
+ return ERR_PTR(-EINVAL);
+
+ phy = of_phy_simple_xlate(dev, args);
+ if (IS_ERR(phy))
+ return phy;
+
+ lane = phy_get_drvdata(phy);
+ if (lane->port >= 0)
+ return ERR_PTR(-EBUSY);
+
+ lane->port = args->args[0];
+
+ val = readl_relaxed(lane->priv->base + COMPHY_SELECTOR);
+ val = (val >> (4 * lane->n)) & 0xf;
+
+ if (!gbe_mux[lane->n][lane->port] ||
+ val != gbe_mux[lane->n][lane->port]) {
+ dev_warn(lane->priv->dev,
+ "comphy%u: not configured for GBE\n", lane->n);
+ phy = ERR_PTR(-EINVAL);
+ }
+
+ return phy;
+}
+
+static int a38x_comphy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *provider;
+ struct device_node *child;
+ struct a38x_comphy *priv;
+ struct resource *res;
+ void __iomem *base;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->dev = &pdev->dev;
+ priv->base = base;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ struct phy *phy;
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(child, "reg", &val);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing 'reg' property (%d)\n",
+ ret);
+ continue;
+ }
+
+ if (val >= MAX_A38X_COMPHY || priv->lane[val].base) {
+ dev_err(&pdev->dev, "invalid 'reg' property\n");
+ continue;
+ }
+
+ phy = devm_phy_create(&pdev->dev, child, &a38x_comphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ priv->lane[val].base = base + 0x28 * val;
+ priv->lane[val].priv = priv;
+ priv->lane[val].n = val;
+ priv->lane[val].port = -1;
+ phy_set_drvdata(phy, &priv->lane[val]);
+ }
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ provider = devm_of_phy_provider_register(&pdev->dev, a38x_comphy_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id a38x_comphy_of_match_table[] = {
+ { .compatible = "marvell,armada-380-comphy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, a38x_comphy_of_match_table);
+
+static struct platform_driver a38x_comphy_driver = {
+ .probe = a38x_comphy_probe,
+ .driver = {
+ .name = "armada-38x-comphy",
+ .of_match_table = a38x_comphy_of_match_table,
+ },
+};
+module_platform_driver(a38x_comphy_driver);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
+MODULE_DESCRIPTION("Common PHY driver for Armada 38x SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index a91fc67fc4e0..d70ba9bc42d9 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -32,7 +32,7 @@
/* register 0x01 */
#define REF_FREF_SEL_25 BIT(0)
-#define PHY_MODE_SATA (0x0 << 5)
+#define PHY_BERLIN_MODE_SATA (0x0 << 5)
/* register 0x02 */
#define USE_MAX_PLL_RATE BIT(12)
@@ -102,7 +102,8 @@ static int phy_berlin_sata_power_on(struct phy *phy)
/* set PHY mode and ref freq to 25 MHz */
phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01,
- 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA);
+ 0x00ff,
+ REF_FREF_SEL_25 | PHY_BERLIN_MODE_SATA);
/* set PHY up to 6 Gbps */
phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25,
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
new file mode 100644
index 000000000000..8812a104c233
--- /dev/null
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell
+ *
+ * Authors:
+ * Evan Wang <xswang@marvell.com>
+ * Miquèl Raynal <miquel.raynal@bootlin.com>
+ *
+ * Structure inspired from phy-mvebu-cp110-comphy.c written by Antoine Tenart.
+ * SMC call initial support done by Grzegorz Jaszczyk.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define MVEBU_A3700_COMPHY_LANES 3
+#define MVEBU_A3700_COMPHY_PORTS 2
+
+/* COMPHY Fast SMC function identifiers */
+#define COMPHY_SIP_POWER_ON 0x82000001
+#define COMPHY_SIP_POWER_OFF 0x82000002
+#define COMPHY_SIP_PLL_LOCK 0x82000003
+
+#define COMPHY_FW_MODE_SATA 0x1
+#define COMPHY_FW_MODE_SGMII 0x2
+#define COMPHY_FW_MODE_HS_SGMII 0x3
+#define COMPHY_FW_MODE_USB3H 0x4
+#define COMPHY_FW_MODE_USB3D 0x5
+#define COMPHY_FW_MODE_PCIE 0x6
+#define COMPHY_FW_MODE_RXAUI 0x7
+#define COMPHY_FW_MODE_XFI 0x8
+#define COMPHY_FW_MODE_SFI 0x9
+#define COMPHY_FW_MODE_USB3 0xa
+
+#define COMPHY_FW_SPEED_1_25G 0 /* SGMII 1G */
+#define COMPHY_FW_SPEED_2_5G 1
+#define COMPHY_FW_SPEED_3_125G 2 /* SGMII 2.5G */
+#define COMPHY_FW_SPEED_5G 3
+#define COMPHY_FW_SPEED_5_15625G 4 /* XFI 5G */
+#define COMPHY_FW_SPEED_6G 5
+#define COMPHY_FW_SPEED_10_3125G 6 /* XFI 10G */
+#define COMPHY_FW_SPEED_MAX 0x3F
+
+#define COMPHY_FW_MODE(mode) ((mode) << 12)
+#define COMPHY_FW_NET(mode, idx, speed) (COMPHY_FW_MODE(mode) | \
+ ((idx) << 8) | \
+ ((speed) << 2))
+#define COMPHY_FW_PCIE(mode, idx, speed, width) (COMPHY_FW_NET(mode, idx, speed) | \
+ ((width) << 18))
+
+struct mvebu_a3700_comphy_conf {
+ unsigned int lane;
+ enum phy_mode mode;
+ int submode;
+ unsigned int port;
+ u32 fw_mode;
+};
+
+#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _port, _fw) \
+ { \
+ .lane = _lane, \
+ .mode = _mode, \
+ .submode = _smode, \
+ .port = _port, \
+ .fw_mode = _fw, \
+ }
+
+#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _port, _fw) \
+ MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _port, _fw)
+
+#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _port, _fw) \
+ MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _port, _fw)
+
+static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = {
+ /* lane 0 */
+ MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, 0,
+ COMPHY_FW_MODE_USB3H),
+ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, 1,
+ COMPHY_FW_MODE_SGMII),
+ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, 1,
+ COMPHY_FW_MODE_HS_SGMII),
+ /* lane 1 */
+ MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, 0,
+ COMPHY_FW_MODE_PCIE),
+ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, 0,
+ COMPHY_FW_MODE_SGMII),
+ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, 0,
+ COMPHY_FW_MODE_HS_SGMII),
+ /* lane 2 */
+ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, 0,
+ COMPHY_FW_MODE_SATA),
+ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, 0,
+ COMPHY_FW_MODE_USB3H),
+};
+
+struct mvebu_a3700_comphy_lane {
+ struct device *dev;
+ unsigned int id;
+ enum phy_mode mode;
+ int submode;
+ int port;
+};
+
+static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane,
+ unsigned long mode)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(function, lane, mode, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static int mvebu_a3700_comphy_get_fw_mode(int lane, int port,
+ enum phy_mode mode,
+ int submode)
+{
+ int i, n = ARRAY_SIZE(mvebu_a3700_comphy_modes);
+
+ /* Unused PHY mux value is 0x0 */
+ if (mode == PHY_MODE_INVALID)
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ if (mvebu_a3700_comphy_modes[i].lane == lane &&
+ mvebu_a3700_comphy_modes[i].port == port &&
+ mvebu_a3700_comphy_modes[i].mode == mode &&
+ mvebu_a3700_comphy_modes[i].submode == submode)
+ break;
+ }
+
+ if (i == n)
+ return -EINVAL;
+
+ return mvebu_a3700_comphy_modes[i].fw_mode;
+}
+
+static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
+ int fw_mode;
+
+ if (submode == PHY_INTERFACE_MODE_1000BASEX)
+ submode = PHY_INTERFACE_MODE_SGMII;
+
+ fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, mode,
+ submode);
+ if (fw_mode < 0) {
+ dev_err(lane->dev, "invalid COMPHY mode\n");
+ return fw_mode;
+ }
+
+ /* Just remember the mode, ->power_on() will do the real setup */
+ lane->mode = mode;
+ lane->submode = submode;
+
+ return 0;
+}
+
+static int mvebu_a3700_comphy_power_on(struct phy *phy)
+{
+ struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
+ u32 fw_param;
+ int fw_mode;
+
+ fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port,
+ lane->mode, lane->submode);
+ if (fw_mode < 0) {
+ dev_err(lane->dev, "invalid COMPHY mode\n");
+ return fw_mode;
+ }
+
+ switch (lane->mode) {
+ case PHY_MODE_USB_HOST_SS:
+ dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id);
+ fw_param = COMPHY_FW_MODE(fw_mode);
+ break;
+ case PHY_MODE_SATA:
+ dev_dbg(lane->dev, "set lane %d to SATA mode\n", lane->id);
+ fw_param = COMPHY_FW_MODE(fw_mode);
+ break;
+ case PHY_MODE_ETHERNET:
+ switch (lane->submode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ dev_dbg(lane->dev, "set lane %d to SGMII mode\n",
+ lane->id);
+ fw_param = COMPHY_FW_NET(fw_mode, lane->port,
+ COMPHY_FW_SPEED_1_25G);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ dev_dbg(lane->dev, "set lane %d to HS SGMII mode\n",
+ lane->id);
+ fw_param = COMPHY_FW_NET(fw_mode, lane->port,
+ COMPHY_FW_SPEED_3_125G);
+ break;
+ default:
+ dev_err(lane->dev, "unsupported PHY submode (%d)\n",
+ lane->submode);
+ return -ENOTSUPP;
+ }
+ break;
+ case PHY_MODE_PCIE:
+ dev_dbg(lane->dev, "set lane %d to PCIe mode\n", lane->id);
+ fw_param = COMPHY_FW_PCIE(fw_mode, lane->port,
+ COMPHY_FW_SPEED_5G,
+ phy->attrs.bus_width);
+ break;
+ default:
+ dev_err(lane->dev, "unsupported PHY mode (%d)\n", lane->mode);
+ return -ENOTSUPP;
+ }
+
+ return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
+}
+
+static int mvebu_a3700_comphy_power_off(struct phy *phy)
+{
+ struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
+
+ return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_OFF, lane->id, 0);
+}
+
+static const struct phy_ops mvebu_a3700_comphy_ops = {
+ .power_on = mvebu_a3700_comphy_power_on,
+ .power_off = mvebu_a3700_comphy_power_off,
+ .set_mode = mvebu_a3700_comphy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *mvebu_a3700_comphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct mvebu_a3700_comphy_lane *lane;
+ struct phy *phy;
+
+ if (WARN_ON(args->args[0] >= MVEBU_A3700_COMPHY_PORTS))
+ return ERR_PTR(-EINVAL);
+
+ phy = of_phy_simple_xlate(dev, args);
+ if (IS_ERR(phy))
+ return phy;
+
+ lane = phy_get_drvdata(phy);
+ lane->port = args->args[0];
+
+ return phy;
+}
+
+static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *provider;
+ struct device_node *child;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ struct mvebu_a3700_comphy_lane *lane;
+ struct phy *phy;
+ int ret;
+ u32 lane_id;
+
+ ret = of_property_read_u32(child, "reg", &lane_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing 'reg' property (%d)\n",
+ ret);
+ continue;
+ }
+
+ if (lane_id >= MVEBU_A3700_COMPHY_LANES) {
+ dev_err(&pdev->dev, "invalid 'reg' property\n");
+ continue;
+ }
+
+ lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL);
+ if (!lane)
+ return -ENOMEM;
+
+ phy = devm_phy_create(&pdev->dev, child,
+ &mvebu_a3700_comphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ lane->dev = &pdev->dev;
+ lane->mode = PHY_MODE_INVALID;
+ lane->submode = PHY_INTERFACE_MODE_NA;
+ lane->id = lane_id;
+ lane->port = -1;
+ phy_set_drvdata(phy, lane);
+ }
+
+ provider = devm_of_phy_provider_register(&pdev->dev,
+ mvebu_a3700_comphy_xlate);
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id mvebu_a3700_comphy_of_match_table[] = {
+ { .compatible = "marvell,comphy-a3700" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mvebu_a3700_comphy_of_match_table);
+
+static struct platform_driver mvebu_a3700_comphy_driver = {
+ .probe = mvebu_a3700_comphy_probe,
+ .driver = {
+ .name = "mvebu-a3700-comphy",
+ .of_match_table = mvebu_a3700_comphy_of_match_table,
+ },
+};
+module_platform_driver(mvebu_a3700_comphy_driver);
+
+MODULE_AUTHOR("Miquèl Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Common PHY driver for A3700");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
new file mode 100644
index 000000000000..94a29dea57af
--- /dev/null
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell
+ *
+ * Authors:
+ * Igal Liberman <igall@marvell.com>
+ * Miquèl Raynal <miquel.raynal@bootlin.com>
+ *
+ * Marvell A3700 UTMI PHY driver
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* Armada 3700 UTMI PHY registers */
+#define USB2_PHY_PLL_CTRL_REG0 0x0
+#define PLL_REF_DIV_OFF 0
+#define PLL_REF_DIV_MASK GENMASK(6, 0)
+#define PLL_REF_DIV_5 5
+#define PLL_FB_DIV_OFF 16
+#define PLL_FB_DIV_MASK GENMASK(24, 16)
+#define PLL_FB_DIV_96 96
+#define PLL_SEL_LPFR_OFF 28
+#define PLL_SEL_LPFR_MASK GENMASK(29, 28)
+#define PLL_READY BIT(31)
+#define USB2_PHY_CAL_CTRL 0x8
+#define PHY_PLLCAL_DONE BIT(31)
+#define PHY_IMPCAL_DONE BIT(23)
+#define USB2_RX_CHAN_CTRL1 0x18
+#define USB2PHY_SQCAL_DONE BIT(31)
+#define USB2_PHY_OTG_CTRL 0x34
+#define PHY_PU_OTG BIT(4)
+#define USB2_PHY_CHRGR_DETECT 0x38
+#define PHY_CDP_EN BIT(2)
+#define PHY_DCP_EN BIT(3)
+#define PHY_PD_EN BIT(4)
+#define PHY_PU_CHRG_DTC BIT(5)
+#define PHY_CDP_DM_AUTO BIT(7)
+#define PHY_ENSWITCH_DP BIT(12)
+#define PHY_ENSWITCH_DM BIT(13)
+
+/* Armada 3700 USB miscellaneous registers */
+#define USB2_PHY_CTRL(usb32) (usb32 ? 0x20 : 0x4)
+#define RB_USB2PHY_PU BIT(0)
+#define USB2_DP_PULLDN_DEV_MODE BIT(5)
+#define USB2_DM_PULLDN_DEV_MODE BIT(6)
+#define RB_USB2PHY_SUSPM(usb32) (usb32 ? BIT(14) : BIT(7))
+
+#define PLL_LOCK_DELAY_US 10000
+#define PLL_LOCK_TIMEOUT_US 1000000
+
+/**
+ * struct mvebu_a3700_utmi_caps - PHY capabilities
+ *
+ * @usb32: Flag indicating which PHY is in use (impacts the register map):
+ * - The UTMI PHY wired to the USB3/USB2 controller (otg)
+ * - The UTMI PHY wired to the USB2 controller (host only)
+ * @ops: PHY operations
+ */
+struct mvebu_a3700_utmi_caps {
+ int usb32;
+ const struct phy_ops *ops;
+};
+
+/**
+ * struct mvebu_a3700_utmi - PHY driver data
+ *
+ * @regs: PHY registers
+ * @usb_mis: Regmap with USB miscellaneous registers including PHY ones
+ * @caps: PHY capabilities
+ * @phy: PHY handle
+ */
+struct mvebu_a3700_utmi {
+ void __iomem *regs;
+ struct regmap *usb_misc;
+ const struct mvebu_a3700_utmi_caps *caps;
+ struct phy *phy;
+};
+
+static int mvebu_a3700_utmi_phy_power_on(struct phy *phy)
+{
+ struct mvebu_a3700_utmi *utmi = phy_get_drvdata(phy);
+ struct device *dev = &phy->dev;
+ int usb32 = utmi->caps->usb32;
+ int ret = 0;
+ u32 reg;
+
+ /*
+ * Setup PLL. 40MHz clock used to be the default, being 25MHz now.
+ * See "PLL Settings for Typical REFCLK" table.
+ */
+ reg = readl(utmi->regs + USB2_PHY_PLL_CTRL_REG0);
+ reg &= ~(PLL_REF_DIV_MASK | PLL_FB_DIV_MASK | PLL_SEL_LPFR_MASK);
+ reg |= (PLL_REF_DIV_5 << PLL_REF_DIV_OFF) |
+ (PLL_FB_DIV_96 << PLL_FB_DIV_OFF);
+ writel(reg, utmi->regs + USB2_PHY_PLL_CTRL_REG0);
+
+ /* Enable PHY pull up and disable USB2 suspend */
+ regmap_update_bits(utmi->usb_misc, USB2_PHY_CTRL(usb32),
+ RB_USB2PHY_SUSPM(usb32) | RB_USB2PHY_PU,
+ RB_USB2PHY_SUSPM(usb32) | RB_USB2PHY_PU);
+
+ if (usb32) {
+ /* Power up OTG module */
+ reg = readl(utmi->regs + USB2_PHY_OTG_CTRL);
+ reg |= PHY_PU_OTG;
+ writel(reg, utmi->regs + USB2_PHY_OTG_CTRL);
+
+ /* Disable PHY charger detection */
+ reg = readl(utmi->regs + USB2_PHY_CHRGR_DETECT);
+ reg &= ~(PHY_CDP_EN | PHY_DCP_EN | PHY_PD_EN | PHY_PU_CHRG_DTC |
+ PHY_CDP_DM_AUTO | PHY_ENSWITCH_DP | PHY_ENSWITCH_DM);
+ writel(reg, utmi->regs + USB2_PHY_CHRGR_DETECT);
+
+ /* Disable PHY DP/DM pull-down (used for device mode) */
+ regmap_update_bits(utmi->usb_misc, USB2_PHY_CTRL(usb32),
+ USB2_DP_PULLDN_DEV_MODE |
+ USB2_DM_PULLDN_DEV_MODE, 0);
+ }
+
+ /* Wait for PLL calibration */
+ ret = readl_poll_timeout(utmi->regs + USB2_PHY_CAL_CTRL, reg,
+ reg & PHY_PLLCAL_DONE,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Failed to end USB2 PLL calibration\n");
+ return ret;
+ }
+
+ /* Wait for impedance calibration */
+ ret = readl_poll_timeout(utmi->regs + USB2_PHY_CAL_CTRL, reg,
+ reg & PHY_IMPCAL_DONE,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Failed to end USB2 impedance calibration\n");
+ return ret;
+ }
+
+ /* Wait for squelch calibration */
+ ret = readl_poll_timeout(utmi->regs + USB2_RX_CHAN_CTRL1, reg,
+ reg & USB2PHY_SQCAL_DONE,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Failed to end USB2 unknown calibration\n");
+ return ret;
+ }
+
+ /* Wait for PLL to be locked */
+ ret = readl_poll_timeout(utmi->regs + USB2_PHY_PLL_CTRL_REG0, reg,
+ reg & PLL_READY,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret)
+ dev_err(dev, "Failed to lock USB2 PLL\n");
+
+ return ret;
+}
+
+static int mvebu_a3700_utmi_phy_power_off(struct phy *phy)
+{
+ struct mvebu_a3700_utmi *utmi = phy_get_drvdata(phy);
+ int usb32 = utmi->caps->usb32;
+ u32 reg;
+
+ /* Disable PHY pull-up and enable USB2 suspend */
+ reg = readl(utmi->regs + USB2_PHY_CTRL(usb32));
+ reg &= ~(RB_USB2PHY_PU | RB_USB2PHY_SUSPM(usb32));
+ writel(reg, utmi->regs + USB2_PHY_CTRL(usb32));
+
+ /* Power down OTG module */
+ if (usb32) {
+ reg = readl(utmi->regs + USB2_PHY_OTG_CTRL);
+ reg &= ~PHY_PU_OTG;
+ writel(reg, utmi->regs + USB2_PHY_OTG_CTRL);
+ }
+
+ return 0;
+}
+
+static const struct phy_ops mvebu_a3700_utmi_phy_ops = {
+ .power_on = mvebu_a3700_utmi_phy_power_on,
+ .power_off = mvebu_a3700_utmi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct mvebu_a3700_utmi_caps mvebu_a3700_utmi_otg_phy_caps = {
+ .usb32 = true,
+ .ops = &mvebu_a3700_utmi_phy_ops,
+};
+
+static const struct mvebu_a3700_utmi_caps mvebu_a3700_utmi_host_phy_caps = {
+ .usb32 = false,
+ .ops = &mvebu_a3700_utmi_phy_ops,
+};
+
+static const struct of_device_id mvebu_a3700_utmi_of_match[] = {
+ {
+ .compatible = "marvell,a3700-utmi-otg-phy",
+ .data = &mvebu_a3700_utmi_otg_phy_caps,
+ },
+ {
+ .compatible = "marvell,a3700-utmi-host-phy",
+ .data = &mvebu_a3700_utmi_host_phy_caps,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_a3700_utmi_of_match);
+
+static int mvebu_a3700_utmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mvebu_a3700_utmi *utmi;
+ struct phy_provider *provider;
+ struct resource *res;
+
+ utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
+ if (!utmi)
+ return -ENOMEM;
+
+ /* Get UTMI memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Missing UTMI PHY memory resource\n");
+ return -ENODEV;
+ }
+
+ utmi->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(utmi->regs))
+ return PTR_ERR(utmi->regs);
+
+ /* Get miscellaneous Host/PHY region */
+ utmi->usb_misc = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "marvell,usb-misc-reg");
+ if (IS_ERR(utmi->usb_misc)) {
+ dev_err(dev,
+ "Missing USB misc purpose system controller\n");
+ return PTR_ERR(utmi->usb_misc);
+ }
+
+ /* Retrieve PHY capabilities */
+ utmi->caps = of_device_get_match_data(dev);
+
+ /* Instantiate the PHY */
+ utmi->phy = devm_phy_create(dev, NULL, utmi->caps->ops);
+ if (IS_ERR(utmi->phy)) {
+ dev_err(dev, "Failed to create the UTMI PHY\n");
+ return PTR_ERR(utmi->phy);
+ }
+
+ phy_set_drvdata(utmi->phy, utmi);
+
+ /* Ensure the PHY is powered off */
+ utmi->caps->ops->power_off(utmi->phy);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static struct platform_driver mvebu_a3700_utmi_driver = {
+ .probe = mvebu_a3700_utmi_phy_probe,
+ .driver = {
+ .name = "mvebu-a3700-utmi-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = mvebu_a3700_utmi_of_match,
+ },
+};
+module_platform_driver(mvebu_a3700_utmi_driver);
+
+MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Marvell EBU A3700 UTMI PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 187cccde53b5..d98e0451f6a1 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -580,8 +580,6 @@ static struct phy *mvebu_comphy_xlate(struct device *dev,
return phy;
lane = phy_get_drvdata(phy);
- if (lane->port >= 0)
- return ERR_PTR(-EBUSY);
lane->port = args->args[0];
return phy;
diff --git a/drivers/phy/marvell/phy-mvebu-sata.c b/drivers/phy/marvell/phy-mvebu-sata.c
index 768ce92e81ce..369fece2be7a 100644
--- a/drivers/phy/marvell/phy-mvebu-sata.c
+++ b/drivers/phy/marvell/phy-mvebu-sata.c
@@ -10,7 +10,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/io.h>
@@ -122,7 +122,6 @@ static const struct of_device_id phy_mvebu_sata_of_match[] = {
{ .compatible = "marvell,mvebu-sata-phy" },
{ },
};
-MODULE_DEVICE_TABLE(of, phy_mvebu_sata_of_match);
static struct platform_driver phy_mvebu_sata_driver = {
.probe = phy_mvebu_sata_probe,
@@ -131,8 +130,4 @@ static struct platform_driver phy_mvebu_sata_driver = {
.of_match_table = phy_mvebu_sata_of_match,
}
};
-module_platform_driver(phy_mvebu_sata_driver);
-
-MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
-MODULE_DESCRIPTION("Marvell MVEBU SATA PHY driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(phy_mvebu_sata_driver);
diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
index 465fa1b91a5f..14e0551cd319 100644
--- a/drivers/phy/phy-core-mipi-dphy.c
+++ b/drivers/phy/phy-core-mipi-dphy.c
@@ -65,12 +65,12 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
*/
cfg->hs_trail = max(4 * 8 * ui, 60000 + 4 * 4 * ui);
- cfg->init = 100000000;
+ cfg->init = 100;
cfg->lpx = 60000;
cfg->ta_get = 5 * cfg->lpx;
cfg->ta_go = 4 * cfg->lpx;
cfg->ta_sure = 2 * cfg->lpx;
- cfg->wakeup = 1000000000;
+ cfg->wakeup = 1000;
cfg->hs_clk_rate = hs_clk_rate;
cfg->lanes = lanes;
@@ -143,7 +143,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg)
if (cfg->hs_trail < max(8 * ui, 60000 + 4 * ui))
return -EINVAL;
- if (cfg->init < 100000000)
+ if (cfg->init < 100)
return -EINVAL;
if (cfg->lpx < 50000)
@@ -158,7 +158,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg)
if (cfg->ta_sure < cfg->lpx || cfg->ta_sure > (2 * cfg->lpx))
return -EINVAL;
- if (cfg->wakeup < 1000000000)
+ if (cfg->wakeup < 1000)
return -EINVAL;
return 0;
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 19b05e824ee4..cb38f6e8614c 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -1112,14 +1112,4 @@ static int __init phy_core_init(void)
return 0;
}
-module_init(phy_core_init);
-
-static void __exit phy_core_exit(void)
-{
- class_destroy(phy_class);
-}
-module_exit(phy_core_exit);
-
-MODULE_DESCRIPTION("Generic PHY Framework");
-MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
-MODULE_LICENSE("GPL v2");
+device_initcall(phy_core_init);
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 6fd6e07ab345..09a77e556ece 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy)
err = reset_control_deassert(priv->reset);
if (err && priv->no_suspend_override)
- reset_control_assert(priv->no_suspend_override);
+ reset_control_deassert(priv->no_suspend_override);
return err;
}
@@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy");
+ priv->reset = devm_reset_control_get(&pdev->dev, "phy");
if (IS_ERR(priv->reset))
return PTR_ERR(priv->reset);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index b4006818e1b6..08d6f6f7f039 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -687,6 +687,116 @@ static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
};
+static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
/* phy-type - PCIE/UFS/USB */
@@ -1036,6 +1146,33 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.no_pcs_sw_reset = true,
};
+static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8998_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
+ .tx_tbl = msm8998_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl),
+ .rx_tbl = msm8998_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
+ .pcs_tbl = msm8998_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -1736,6 +1873,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,msm8996-qmp-usb3-phy",
.data = &msm8996_usb3phy_cfg,
}, {
+ .compatible = "qcom,msm8998-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
+ }, {
.compatible = "qcom,ipq8074-qmp-pcie-phy",
.data = &ipq8074_pciephy_cfg,
}, {
@@ -1747,6 +1887,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sdm845-qmp-ufs-phy",
.data = &sdm845_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,msm8998-qmp-usb3-phy",
+ .data = &msm8998_usb3phy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index d201cc307151..a1b6cdee9a08 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -174,6 +174,7 @@
#define QSERDES_V3_COM_DIV_FRAC_START1_MODE1 0x0c4
#define QSERDES_V3_COM_DIV_FRAC_START2_MODE1 0x0c8
#define QSERDES_V3_COM_DIV_FRAC_START3_MODE1 0x0cc
+#define QSERDES_V3_COM_INTEGLOOP_INITVAL 0x0d0
#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0 0x0d8
#define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0 0x0dc
#define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1 0x0e0
@@ -201,6 +202,7 @@
#define QSERDES_V3_COM_DEBUG_BUS2 0x170
#define QSERDES_V3_COM_DEBUG_BUS3 0x174
#define QSERDES_V3_COM_DEBUG_BUS_SEL 0x178
+#define QSERDES_V3_COM_CMN_MODE 0x184
/* Only for QMP V3 PHY - TX registers */
#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX 0x044
@@ -211,6 +213,7 @@
#define QSERDES_V3_TX_RCV_DETECT_LVL_2 0x0a4
/* Only for QMP V3 PHY - RX registers */
+#define QSERDES_V3_RX_UCDR_FO_GAIN 0x008
#define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c
#define QSERDES_V3_RX_UCDR_SO_GAIN 0x014
#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024
@@ -219,6 +222,7 @@
#define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030
#define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
#define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044
#define QSERDES_V3_RX_RX_TERM_BW 0x07c
#define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 9177989f22d1..8fd7ce139772 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -152,6 +152,31 @@ static const struct qusb2_phy_init_tbl msm8996_init_tbl[] = {
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
};
+static const unsigned int msm8998_regs_layout[] = {
+ [QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8,
+ [QUSB2PHY_PLL_STATUS] = 0x1a0,
+ [QUSB2PHY_PORT_TUNE1] = 0x23c,
+ [QUSB2PHY_PORT_TUNE2] = 0x240,
+ [QUSB2PHY_PORT_TUNE3] = 0x244,
+ [QUSB2PHY_PORT_TUNE4] = 0x248,
+ [QUSB2PHY_PORT_TEST1] = 0x24c,
+ [QUSB2PHY_PORT_TEST2] = 0x250,
+ [QUSB2PHY_PORT_POWERDOWN] = 0x210,
+ [QUSB2PHY_INTR_CTRL] = 0x22c,
+};
+
+static const struct qusb2_phy_init_tbl msm8998_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_ANALOG_CONTROLS_TWO, 0x13),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CLOCK_INVERTERS, 0x7c),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CMODE, 0x80),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_LOCK_DELAY, 0x0a),
+
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0xa5),
+ QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0x09),
+
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_DIGITAL_TIMERS_TWO, 0x19),
+};
+
static const unsigned int sdm845_regs_layout[] = {
[QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8,
[QUSB2PHY_PLL_STATUS] = 0x1a0,
@@ -221,6 +246,18 @@ static const struct qusb2_phy_cfg msm8996_phy_cfg = {
.autoresume_en = BIT(3),
};
+static const struct qusb2_phy_cfg msm8998_phy_cfg = {
+ .tbl = msm8998_init_tbl,
+ .tbl_num = ARRAY_SIZE(msm8998_init_tbl),
+ .regs = msm8998_regs_layout,
+
+ .disable_ctrl = POWER_DOWN,
+ .mask_core_ready = CORE_READY_STATUS,
+ .has_pll_override = true,
+ .autoresume_en = BIT(0),
+ .update_tune1_with_efuse = true,
+};
+
static const struct qusb2_phy_cfg sdm845_phy_cfg = {
.tbl = sdm845_init_tbl,
.tbl_num = ARRAY_SIZE(sdm845_init_tbl),
@@ -734,6 +771,9 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
.compatible = "qcom,msm8996-qusb2-phy",
.data = &msm8996_phy_cfg,
}, {
+ .compatible = "qcom,msm8998-qusb2-phy",
+ .data = &msm8998_phy_cfg,
+ }, {
.compatible = "qcom,sdm845-qusb2-phy",
.data = &sdm845_phy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 681644e43248..f798fb64de94 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -23,24 +23,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
-
-#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
-({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
- might_sleep_if(timeout_us); \
- for (;;) { \
- (val) = readl(addr); \
- if (cond) \
- break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- (val) = readl(addr); \
- break; \
- } \
- if (sleep_us) \
- usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
- } \
- (cond) ? 0 : -ETIMEDOUT; \
-})
+#include <linux/iopoll.h>
#define UFS_QCOM_PHY_CAL_ENTRY(reg, val) \
{ \
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 91fba60267a0..ba07121c3eff 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -55,16 +55,16 @@ enum rockchip_usb2phy_host_state {
};
/**
- * Different states involved in USB charger detection.
- * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * enum usb_chg_state - Different states involved in USB charger detection.
+ * @USB_CHG_STATE_UNDEFINED: USB charger is not connected or detection
* process is not yet started.
- * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
- * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
- * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * @USB_CHG_STATE_WAIT_FOR_DCD: Waiting for Data pins contact.
+ * @USB_CHG_STATE_DCD_DONE: Data pin contact is detected.
+ * @USB_CHG_STATE_PRIMARY_DONE: Primary detection is completed (Detects
* between SDP and DCP/CDP).
- * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
- * between DCP and CDP).
- * USB_CHG_STATE_DETECTED USB charger type is determined.
+ * @USB_CHG_STATE_SECONDARY_DONE: Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * @USB_CHG_STATE_DETECTED: USB charger type is determined.
*/
enum usb_chg_state {
USB_CHG_STATE_UNDEFINED = 0,
@@ -94,7 +94,7 @@ struct usb2phy_reg {
};
/**
- * struct rockchip_chg_det_reg: usb charger detect registers
+ * struct rockchip_chg_det_reg - usb charger detect registers
* @cp_det: charging port detected successfully.
* @dcp_det: dedicated charging port detected successfully.
* @dp_det: assert data pin connect successfully.
@@ -120,7 +120,7 @@ struct rockchip_chg_det_reg {
};
/**
- * struct rockchip_usb2phy_port_cfg: usb-phy port configuration.
+ * struct rockchip_usb2phy_port_cfg - usb-phy port configuration.
* @phy_sus: phy suspend register.
* @bvalid_det_en: vbus valid rise detection enable register.
* @bvalid_det_st: vbus valid rise detection status register.
@@ -148,10 +148,11 @@ struct rockchip_usb2phy_port_cfg {
};
/**
- * struct rockchip_usb2phy_cfg: usb-phy configuration.
+ * struct rockchip_usb2phy_cfg - usb-phy configuration.
* @reg: the address offset of grf for usb-phy config.
* @num_ports: specify how many ports that the phy has.
* @clkout_ctl: keep on/turn off output clk of phy.
+ * @port_cfgs: usb-phy port configurations.
* @chg_det: charger detection registers.
*/
struct rockchip_usb2phy_cfg {
@@ -163,12 +164,10 @@ struct rockchip_usb2phy_cfg {
};
/**
- * struct rockchip_usb2phy_port: usb-phy port data.
+ * struct rockchip_usb2phy_port - usb-phy port data.
+ * @phy: generic phy.
* @port_id: flag for otg port or host port.
* @suspended: phy suspended flag.
- * @utmi_avalid: utmi avalid status usage flag.
- * true - use avalid to get vbus status
- * flase - use bvalid to get vbus status
* @vbus_attached: otg device vbus status.
* @bvalid_irq: IRQ number assigned for vbus valid rise detection.
* @ls_irq: IRQ number assigned for linestate detection.
@@ -178,7 +177,7 @@ struct rockchip_usb2phy_cfg {
* @chg_work: charge detect work.
* @otg_sm_work: OTG state machine work.
* @sm_work: HOST state machine work.
- * @phy_cfg: port register configuration, assigned by driver data.
+ * @port_cfg: port register configuration, assigned by driver data.
* @event_nb: hold event notification callback.
* @state: define OTG enumeration states before device reset.
* @mode: the dr_mode of the controller.
@@ -187,7 +186,6 @@ struct rockchip_usb2phy_port {
struct phy *phy;
unsigned int port_id;
bool suspended;
- bool utmi_avalid;
bool vbus_attached;
int bvalid_irq;
int ls_irq;
@@ -203,12 +201,13 @@ struct rockchip_usb2phy_port {
};
/**
- * struct rockchip_usb2phy: usb2.0 phy driver data.
+ * struct rockchip_usb2phy - usb2.0 phy driver data.
+ * @dev: pointer to device.
* @grf: General Register Files regmap.
* @usbgrf: USB General Register Files regmap.
* @clk: clock struct of phy input clk.
* @clk480m: clock struct of phy output clk.
- * @clk_hw: clock struct of phy output clk management.
+ * @clk480m_hw: clock struct of phy output clk management.
* @chg_state: states involved in USB charger detection.
* @chg_type: USB charger types.
* @dcd_retries: The retry count used to track Data contact
@@ -542,12 +541,8 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
unsigned long delay;
bool vbus_attach, sch_work, notify_charger;
- if (rport->utmi_avalid)
- vbus_attach = property_enabled(rphy->grf,
- &rport->port_cfg->utmi_avalid);
- else
- vbus_attach = property_enabled(rphy->grf,
- &rport->port_cfg->utmi_bvalid);
+ vbus_attach = property_enabled(rphy->grf,
+ &rport->port_cfg->utmi_bvalid);
sch_work = false;
notify_charger = false;
@@ -1021,9 +1016,6 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
- rport->utmi_avalid =
- of_property_read_bool(child_np, "rockchip,utmi-avalid");
-
/*
* Some SoCs use one interrupt with otg-id/otg-bvalid/linestate
* interrupts muxed together, so probe the otg-mux interrupt first,
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index f137e0107764..103efc456a12 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -33,12 +33,11 @@ config OMAP_CONTROL_PHY
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || ARCH_K3
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
- select OMAP_CONTROL_PHY
- depends on OMAP_OCP2SCP
+ select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS
help
Enable this to support the transceiver that is part of SOC. This
driver takes care of all the PHY functionality apart from comparator.
@@ -50,7 +49,6 @@ config TI_PIPE3
depends on ARCH_OMAP2PLUS || COMPILE_TEST
select GENERIC_PHY
select OMAP_CONTROL_PHY
- depends on OMAP_OCP2SCP
help
Enable this to support the PIPE3 PHY that is part of TI SOCs. This
driver takes care of all the PHY functionality apart from comparator.
@@ -82,6 +80,7 @@ config PHY_TI_GMII_SEL
default y if TI_CPSW=y
depends on TI_CPSW || COMPILE_TEST
select GENERIC_PHY
+ select REGMAP
default m
help
This driver supports configuring of the TI CPSW Port mode depending on
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 77fdaa551977..a52c5bb35033 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -204,11 +204,11 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
if (args->args_count < 1)
return ERR_PTR(-EINVAL);
+ if (!priv || !priv->if_phys)
+ return ERR_PTR(-ENODEV);
if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
args->args_count < 2)
return ERR_PTR(-EINVAL);
- if (!priv || !priv->if_phys)
- return ERR_PTR(-ENODEV);
if (phy_id > priv->soc_data->num_ports)
return ERR_PTR(-EINVAL);
if (phy_id != priv->if_phys[phy_id - 1].id)
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index fe909fd8144f..e871f2983a0e 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -36,6 +36,10 @@
#define USB2PHY_DISCON_BYP_LATCH (1 << 31)
#define USB2PHY_ANA_CONFIG1 0x4c
+#define AM654_USB2_OTG_PD BIT(8)
+#define AM654_USB2_VBUS_DET_EN BIT(5)
+#define AM654_USB2_VBUSVALID_DET_EN BIT(4)
+
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
* this phy
@@ -135,9 +139,9 @@ static int omap_usb_power_on(struct phy *x)
static int omap_usb2_disable_clocks(struct omap_usb *phy)
{
- clk_disable(phy->wkupclk);
+ clk_disable_unprepare(phy->wkupclk);
if (!IS_ERR(phy->optclk))
- clk_disable(phy->optclk);
+ clk_disable_unprepare(phy->optclk);
return 0;
}
@@ -146,14 +150,14 @@ static int omap_usb2_enable_clocks(struct omap_usb *phy)
{
int ret;
- ret = clk_enable(phy->wkupclk);
+ ret = clk_prepare_enable(phy->wkupclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
goto err0;
}
if (!IS_ERR(phy->optclk)) {
- ret = clk_enable(phy->optclk);
+ ret = clk_prepare_enable(phy->optclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
goto err1;
@@ -245,6 +249,15 @@ static const struct usb_phy_data am437x_usb2_data = {
.power_off = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD,
};
+static const struct usb_phy_data am654_usb2_data = {
+ .label = "am654_usb2",
+ .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT,
+ .mask = AM654_USB2_OTG_PD | AM654_USB2_VBUS_DET_EN |
+ AM654_USB2_VBUSVALID_DET_EN,
+ .power_on = AM654_USB2_VBUS_DET_EN | AM654_USB2_VBUSVALID_DET_EN,
+ .power_off = AM654_USB2_OTG_PD,
+};
+
static const struct of_device_id omap_usb2_id_table[] = {
{
.compatible = "ti,omap-usb2",
@@ -266,6 +279,10 @@ static const struct of_device_id omap_usb2_id_table[] = {
.compatible = "ti,am437x-usb2",
.data = &am437x_usb2_data,
},
+ {
+ .compatible = "ti,am654-usb2",
+ .data = &am654_usb2_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
@@ -346,63 +363,72 @@ static int omap_usb2_probe(struct platform_device *pdev)
}
}
- otg->set_host = omap_usb_set_host;
- otg->set_peripheral = omap_usb_set_peripheral;
- if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS)
- otg->set_vbus = omap_usb_set_vbus;
- if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
- otg->start_srp = omap_usb_start_srp;
- otg->usb_phy = &phy->phy;
-
- platform_set_drvdata(pdev, phy);
- pm_runtime_enable(phy->dev);
-
- generic_phy = devm_phy_create(phy->dev, NULL, &ops);
- if (IS_ERR(generic_phy)) {
- pm_runtime_disable(phy->dev);
- return PTR_ERR(generic_phy);
- }
-
- phy_set_drvdata(generic_phy, phy);
- omap_usb_power_off(generic_phy);
-
- phy_provider = devm_of_phy_provider_register(phy->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- pm_runtime_disable(phy->dev);
- return PTR_ERR(phy_provider);
- }
phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
- dev_warn(&pdev->dev, "unable to get wkupclk, trying old name\n");
+ if (PTR_ERR(phy->wkupclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "unable to get wkupclk %ld, trying old name\n",
+ PTR_ERR(phy->wkupclk));
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
+
if (IS_ERR(phy->wkupclk)) {
- dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
- pm_runtime_disable(phy->dev);
+ if (PTR_ERR(phy->wkupclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
return PTR_ERR(phy->wkupclk);
} else {
dev_warn(&pdev->dev,
"found usb_phy_cm_clk32k, please fix DTS\n");
}
}
- clk_prepare(phy->wkupclk);
phy->optclk = devm_clk_get(phy->dev, "refclk");
if (IS_ERR(phy->optclk)) {
+ if (PTR_ERR(phy->optclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
dev_dbg(&pdev->dev, "unable to get refclk, trying old name\n");
phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
+
if (IS_ERR(phy->optclk)) {
- dev_dbg(&pdev->dev,
- "unable to get usb_otg_ss_refclk960m\n");
+ if (PTR_ERR(phy->optclk) != -EPROBE_DEFER) {
+ dev_dbg(&pdev->dev,
+ "unable to get usb_otg_ss_refclk960m\n");
+ }
} else {
dev_warn(&pdev->dev,
"found usb_otg_ss_refclk960m, please fix DTS\n");
}
}
- if (!IS_ERR(phy->optclk))
- clk_prepare(phy->optclk);
+ otg->set_host = omap_usb_set_host;
+ otg->set_peripheral = omap_usb_set_peripheral;
+ if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS)
+ otg->set_vbus = omap_usb_set_vbus;
+ if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
+ otg->start_srp = omap_usb_start_srp;
+ otg->usb_phy = &phy->phy;
+
+ platform_set_drvdata(pdev, phy);
+ pm_runtime_enable(phy->dev);
+
+ generic_phy = devm_phy_create(phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy)) {
+ pm_runtime_disable(phy->dev);
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, phy);
+ omap_usb_power_off(generic_phy);
+
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ pm_runtime_disable(phy->dev);
+ return PTR_ERR(phy_provider);
+ }
+
usb_add_phy_dev(&phy->phy);
@@ -413,9 +439,6 @@ static int omap_usb2_remove(struct platform_device *pdev)
{
struct omap_usb *phy = platform_get_drvdata(pdev);
- clk_unprepare(phy->wkupclk);
- if (!IS_ERR(phy->optclk))
- clk_unprepare(phy->optclk);
usb_remove_phy(&phy->phy);
pm_runtime_disable(phy->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 05044e323ea5..03ec7a5d9d0b 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1817786ab6aa..a005cbccb4f7 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -45,12 +45,14 @@ config PINCTRL_MT2701
config PINCTRL_MT7623
bool "Mediatek MT7623 pin control with generic binding"
depends on MACH_MT7623 || COMPILE_TEST
+ depends on OF
default MACH_MT7623
select PINCTRL_MTK_MOORE
config PINCTRL_MT7629
bool "Mediatek MT7629 pin control"
depends on MACH_MT7629 || COMPILE_TEST
+ depends on OF
default MACH_MT7629
select PINCTRL_MTK_MOORE
@@ -92,6 +94,7 @@ config PINCTRL_MT6797
config PINCTRL_MT7622
bool "MediaTek MT7622 pin control"
+ depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index c69ca95b1ad5..0f140a802137 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = {
static const char * const sdxc_a_groups[] = {
"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
- "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
+ "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
};
static const char * const pcm_a_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b03481ef99a1..98905d4a79ca 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
break;
case MCP_TYPE_S18:
+ one_regmap_config =
+ devm_kmemdup(dev, &mcp23x17_regmap,
+ sizeof(struct regmap_config), GFP_KERNEL);
+ if (!one_regmap_config)
+ return -ENOMEM;
mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
- &mcp23x17_regmap);
+ one_regmap_config);
mcp->reg_shift = 1;
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s18";
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 7aae52a09ff0..4ffd56ff809e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -79,7 +79,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
- .tile = NORTH, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index aa8b58125568..ef4268cc6227 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
.pins = h6_pins,
.npins = ARRAY_SIZE(h6_pins),
- .irq_banks = 3,
+ .irq_banks = 4,
.irq_bank_map = h6_irq_bank_map,
.irq_read_needs_mux = true,
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 5d9184d18c16..0e7fa69e93df 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
unsigned short bank = offset / PINS_PER_BANK;
- struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank];
- struct regulator *reg;
+ unsigned short bank_offset = bank - pctl->desc->pin_base /
+ PINS_PER_BANK;
+ struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
+ struct regulator *reg = s_reg->regulator;
+ char supply[16];
int ret;
- reg = s_reg->regulator;
- if (!reg) {
- char supply[16];
-
- snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
- reg = regulator_get(pctl->dev, supply);
- if (IS_ERR(reg)) {
- dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
- 'A' + bank);
- return PTR_ERR(reg);
- }
-
- s_reg->regulator = reg;
- refcount_set(&s_reg->refcount, 1);
- } else {
+ if (reg) {
refcount_inc(&s_reg->refcount);
+ return 0;
+ }
+
+ snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
+ reg = regulator_get(pctl->dev, supply);
+ if (IS_ERR(reg)) {
+ dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
+ 'A' + bank);
+ return PTR_ERR(reg);
}
ret = regulator_enable(reg);
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
goto out;
}
+ s_reg->regulator = reg;
+ refcount_set(&s_reg->refcount, 1);
+
return 0;
out:
- if (refcount_dec_and_test(&s_reg->refcount)) {
- regulator_put(s_reg->regulator);
- s_reg->regulator = NULL;
- }
+ regulator_put(s_reg->regulator);
return ret;
}
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
unsigned short bank = offset / PINS_PER_BANK;
- struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank];
+ unsigned short bank_offset = bank - pctl->desc->pin_base /
+ PINS_PER_BANK;
+ struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
if (!refcount_dec_and_test(&s_reg->refcount))
return 0;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e340d2a24b44..034c0317c8d6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -136,7 +136,7 @@ struct sunxi_pinctrl {
struct gpio_chip *chip;
const struct sunxi_pinctrl_desc *desc;
struct device *dev;
- struct sunxi_pinctrl_regulator regulators[12];
+ struct sunxi_pinctrl_regulator regulators[9];
struct irq_domain *domain;
struct sunxi_pinctrl_function *functions;
unsigned nfunctions;
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 479031aa4f88..74fdfa68d1f2 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -2,7 +2,7 @@ menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
depends on HAS_IOMEM
- ---help---
+ help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
@@ -12,7 +12,7 @@ if GOLDFISH
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
- ---help---
+ help
This is a virtual device to drive the QEMU pipe interface used by
the Goldfish Android Virtual Device.
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e3b62c2ee8d1..b5e9db85e881 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -905,6 +905,7 @@ config TOSHIBA_WMI
config ACPI_CMPC
tristate "CMPC Laptop Extras"
depends on ACPI && INPUT
+ depends on BACKLIGHT_LCD_SUPPORT
depends on RFKILL || RFKILL=n
select BACKLIGHT_CLASS_DEVICE
help
@@ -1009,7 +1010,7 @@ config INTEL_MFLD_THERMAL
config INTEL_IPS
tristate "Intel Intelligent Power Sharing"
- depends on ACPI
+ depends on ACPI && PCI
---help---
Intel Calpella platforms support dynamic power sharing between the
CPU and GPU, maximizing performance in a given TDP. This driver,
@@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL
config SAMSUNG_Q10
tristate "Samsung Q10 Extras"
depends on ACPI
+ depends on BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
This driver provides support for backlight control on Samsung Q10
@@ -1135,7 +1137,7 @@ config SAMSUNG_Q10
config APPLE_GMUX
tristate "Apple Gmux Driver"
- depends on ACPI
+ depends on ACPI && PCI
depends on PNP
depends on BACKLIGHT_CLASS_DEVICE
depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
@@ -1174,7 +1176,7 @@ config INTEL_SMARTCONNECT
config INTEL_PMC_IPC
tristate "Intel PMC IPC Driver"
- depends on ACPI
+ depends on ACPI && PCI
---help---
This driver provides support for PMC control on some Intel platforms.
The PMC is an ARC processor which defines IPC commands for communication
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 02bc74608cf3..6fa3cced6f8e 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -32,7 +32,7 @@ struct cht_int33fe_data {
struct i2c_client *fusb302;
struct i2c_client *pi3usb30532;
/* Contain a list-head must be per device */
- struct device_connection connections[5];
+ struct device_connection connections[4];
};
/*
@@ -174,16 +174,13 @@ static int cht_int33fe_probe(struct platform_device *pdev)
data->connections[0].endpoint[0] = "port0";
data->connections[0].endpoint[1] = "i2c-pi3usb30532";
- data->connections[0].id = "typec-switch";
+ data->connections[0].id = "orientation-switch";
data->connections[1].endpoint[0] = "port0";
data->connections[1].endpoint[1] = "i2c-pi3usb30532";
- data->connections[1].id = "typec-mux";
- data->connections[2].endpoint[0] = "port0";
- data->connections[2].endpoint[1] = "i2c-pi3usb30532";
- data->connections[2].id = "idff01m01";
- data->connections[3].endpoint[0] = "i2c-fusb302";
- data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch";
- data->connections[3].id = "usb-role-switch";
+ data->connections[1].id = "mode-switch";
+ data->connections[2].endpoint[0] = "i2c-fusb302";
+ data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
+ data->connections[2].id = "usb-role-switch";
device_connections_add(data->connections);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index b205b037fd61..4bfbfa3f78e6 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4392,7 +4392,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
list_add(&interrupt->list, &dev->interrupts);
interrupt->irq.triggering = p->triggering;
interrupt->irq.polarity = p->polarity;
- interrupt->irq.sharable = p->sharable;
+ interrupt->irq.shareable = p->shareable;
interrupt->irq.interrupt_count = 1;
interrupt->irq.interrupts[0] = p->interrupts[i];
}
@@ -4546,7 +4546,7 @@ static int sony_pic_enable(struct acpi_device *device,
memcpy(&resource->res3.data.irq, &irq->irq,
sizeof(struct acpi_resource_irq));
/* we requested a shared irq */
- resource->res3.data.irq.sharable = ACPI_SHARED;
+ resource->res3.data.irq.shareable = ACPI_SHARED;
resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
resource->res4.length = sizeof(struct acpi_resource);
@@ -4565,7 +4565,7 @@ static int sony_pic_enable(struct acpi_device *device,
memcpy(&resource->res2.data.irq, &irq->irq,
sizeof(struct acpi_resource_irq));
/* we requested a shared irq */
- resource->res2.data.irq.sharable = ACPI_SHARED;
+ resource->res2.data.irq.shareable = ACPI_SHARED;
resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
resource->res3.length = sizeof(struct acpi_resource);
@@ -4779,7 +4779,7 @@ static int sony_pic_add(struct acpi_device *device)
irq->irq.interrupts[0],
irq->irq.triggering,
irq->irq.polarity,
- irq->irq.sharable);
+ irq->irq.shareable);
spic_dev.cur_irq = irq;
break;
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 43d8ed577e70..c79417ca1b3c 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -215,7 +215,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
if (i >= 0) {
flags = acpi_dev_irq_flags(gpio->triggering,
gpio->polarity,
- gpio->sharable);
+ gpio->shareable);
} else {
flags = IORESOURCE_DISABLED;
}
@@ -324,7 +324,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -348,7 +348,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
}
}
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -681,7 +681,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev,
decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
irq->triggering = triggering;
irq->polarity = polarity;
- irq->sharable = shareable;
+ irq->shareable = shareable;
irq->interrupt_count = 1;
irq->interrupts[0] = p->start;
@@ -689,7 +689,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev,
(int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
- irq->sharable == ACPI_SHARED ? "shared" : "exclusive",
+ irq->shareable == ACPI_SHARED ? "shared" : "exclusive",
irq->descriptor_length);
}
@@ -711,14 +711,14 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
extended_irq->producer_consumer = ACPI_CONSUMER;
extended_irq->triggering = triggering;
extended_irq->polarity = polarity;
- extended_irq->sharable = shareable;
+ extended_irq->shareable = shareable;
extended_irq->interrupt_count = 1;
extended_irq->interrupts[0] = p->start;
pnp_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
- extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
+ extended_irq->shareable == ACPI_SHARED ? "shared" : "exclusive");
}
static void pnpacpi_encode_dma(struct pnp_dev *dev,
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 6cdb2c14eee4..4347f15165f8 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1156,6 +1156,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core),
INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core),
INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core),
+ INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -1164,6 +1165,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core),
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index d137c480db46..7fe18636915a 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE
config PTP_1588_CLOCK_QORIQ
tristate "Freescale QorIQ 1588 timer as PTP clock"
- depends on GIANFAR || FSL_DPAA_ETH
+ depends on GIANFAR || FSL_DPAA_ETH || FSL_ENETC || FSL_ENETC_VF
depends on PTP_1588_CLOCK
default y
help
@@ -53,7 +53,7 @@ config PTP_1588_CLOCK_QORIQ
packets using the SO_TIMESTAMPING API.
To compile this driver as a module, choose M here: the module
- will be called ptp_qoriq.
+ will be called ptp-qoriq.
config PTP_1588_CLOCK_IXP46X
tristate "Intel IXP46x as PTP clock"
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 19efa9cfa950..677d1d178a3e 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -9,4 +9,6 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o
obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
-obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o
+obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
+ptp-qoriq-y += ptp_qoriq.o
+ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 797fab33bb98..7cbea796652a 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -224,7 +224,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
extoff = NULL;
break;
}
- if (extoff->n_samples > PTP_MAX_SAMPLES) {
+ if (extoff->n_samples > PTP_MAX_SAMPLES
+ || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
err = -EINVAL;
break;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 48f3594a7458..79bd102c9bbc 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -124,7 +124,7 @@ static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
return err;
}
-static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
+static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops;
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index fdd49c26bbcc..53775362aac6 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -22,7 +22,6 @@
#include <linux/device.h>
#include <linux/hrtimer.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -37,152 +36,188 @@
* Register access functions
*/
-/* Caller must hold qoriq_ptp->lock. */
-static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
+/* Caller must hold ptp_qoriq->lock. */
+static u64 tmr_cnt_read(struct ptp_qoriq *ptp_qoriq)
{
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u64 ns;
u32 lo, hi;
- lo = qoriq_read(&regs->ctrl_regs->tmr_cnt_l);
- hi = qoriq_read(&regs->ctrl_regs->tmr_cnt_h);
+ lo = ptp_qoriq->read(&regs->ctrl_regs->tmr_cnt_l);
+ hi = ptp_qoriq->read(&regs->ctrl_regs->tmr_cnt_h);
ns = ((u64) hi) << 32;
ns |= lo;
return ns;
}
-/* Caller must hold qoriq_ptp->lock. */
-static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns)
+/* Caller must hold ptp_qoriq->lock. */
+static void tmr_cnt_write(struct ptp_qoriq *ptp_qoriq, u64 ns)
{
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u32 hi = ns >> 32;
u32 lo = ns & 0xffffffff;
- qoriq_write(&regs->ctrl_regs->tmr_cnt_l, lo);
- qoriq_write(&regs->ctrl_regs->tmr_cnt_h, hi);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_cnt_l, lo);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_cnt_h, hi);
}
-/* Caller must hold qoriq_ptp->lock. */
-static void set_alarm(struct qoriq_ptp *qoriq_ptp)
+/* Caller must hold ptp_qoriq->lock. */
+static void set_alarm(struct ptp_qoriq *ptp_qoriq)
{
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
u64 ns;
u32 lo, hi;
- ns = tmr_cnt_read(qoriq_ptp) + 1500000000ULL;
+ ns = tmr_cnt_read(ptp_qoriq) + 1500000000ULL;
ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
- ns -= qoriq_ptp->tclk_period;
+ ns -= ptp_qoriq->tclk_period;
hi = ns >> 32;
lo = ns & 0xffffffff;
- qoriq_write(&regs->alarm_regs->tmr_alarm1_l, lo);
- qoriq_write(&regs->alarm_regs->tmr_alarm1_h, hi);
+ ptp_qoriq->write(&regs->alarm_regs->tmr_alarm1_l, lo);
+ ptp_qoriq->write(&regs->alarm_regs->tmr_alarm1_h, hi);
}
-/* Caller must hold qoriq_ptp->lock. */
-static void set_fipers(struct qoriq_ptp *qoriq_ptp)
+/* Caller must hold ptp_qoriq->lock. */
+static void set_fipers(struct ptp_qoriq *ptp_qoriq)
{
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- set_alarm(qoriq_ptp);
- qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
- qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
+ set_alarm(ptp_qoriq);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+}
+
+static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index,
+ bool update_event)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ struct ptp_clock_event event;
+ void __iomem *reg_etts_l;
+ void __iomem *reg_etts_h;
+ u32 valid, stat, lo, hi;
+
+ switch (index) {
+ case 0:
+ valid = ETS1_VLD;
+ reg_etts_l = &regs->etts_regs->tmr_etts1_l;
+ reg_etts_h = &regs->etts_regs->tmr_etts1_h;
+ break;
+ case 1:
+ valid = ETS2_VLD;
+ reg_etts_l = &regs->etts_regs->tmr_etts2_l;
+ reg_etts_h = &regs->etts_regs->tmr_etts2_h;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = index;
+
+ do {
+ lo = ptp_qoriq->read(reg_etts_l);
+ hi = ptp_qoriq->read(reg_etts_h);
+
+ if (update_event) {
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ ptp_clock_event(ptp_qoriq->clock, &event);
+ }
+
+ stat = ptp_qoriq->read(&regs->ctrl_regs->tmr_stat);
+ } while (ptp_qoriq->extts_fifo_support && (stat & valid));
+
+ return 0;
}
/*
* Interrupt service routine
*/
-static irqreturn_t isr(int irq, void *priv)
+irqreturn_t ptp_qoriq_isr(int irq, void *priv)
{
- struct qoriq_ptp *qoriq_ptp = priv;
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_qoriq *ptp_qoriq = priv;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
u64 ns;
- u32 ack = 0, lo, hi, mask, val;
+ u32 ack = 0, lo, hi, mask, val, irqs;
+
+ spin_lock(&ptp_qoriq->lock);
+
+ val = ptp_qoriq->read(&regs->ctrl_regs->tmr_tevent);
+ mask = ptp_qoriq->read(&regs->ctrl_regs->tmr_temask);
+
+ spin_unlock(&ptp_qoriq->lock);
- val = qoriq_read(&regs->ctrl_regs->tmr_tevent);
+ irqs = val & mask;
- if (val & ETS1) {
+ if (irqs & ETS1) {
ack |= ETS1;
- hi = qoriq_read(&regs->etts_regs->tmr_etts1_h);
- lo = qoriq_read(&regs->etts_regs->tmr_etts1_l);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
- ptp_clock_event(qoriq_ptp->clock, &event);
+ extts_clean_up(ptp_qoriq, 0, true);
}
- if (val & ETS2) {
+ if (irqs & ETS2) {
ack |= ETS2;
- hi = qoriq_read(&regs->etts_regs->tmr_etts2_h);
- lo = qoriq_read(&regs->etts_regs->tmr_etts2_l);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 1;
- event.timestamp = ((u64) hi) << 32;
- event.timestamp |= lo;
- ptp_clock_event(qoriq_ptp->clock, &event);
+ extts_clean_up(ptp_qoriq, 1, true);
}
- if (val & ALM2) {
+ if (irqs & ALM2) {
ack |= ALM2;
- if (qoriq_ptp->alarm_value) {
+ if (ptp_qoriq->alarm_value) {
event.type = PTP_CLOCK_ALARM;
event.index = 0;
- event.timestamp = qoriq_ptp->alarm_value;
- ptp_clock_event(qoriq_ptp->clock, &event);
+ event.timestamp = ptp_qoriq->alarm_value;
+ ptp_clock_event(ptp_qoriq->clock, &event);
}
- if (qoriq_ptp->alarm_interval) {
- ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval;
+ if (ptp_qoriq->alarm_interval) {
+ ns = ptp_qoriq->alarm_value + ptp_qoriq->alarm_interval;
hi = ns >> 32;
lo = ns & 0xffffffff;
- spin_lock(&qoriq_ptp->lock);
- qoriq_write(&regs->alarm_regs->tmr_alarm2_l, lo);
- qoriq_write(&regs->alarm_regs->tmr_alarm2_h, hi);
- spin_unlock(&qoriq_ptp->lock);
- qoriq_ptp->alarm_value = ns;
+ ptp_qoriq->write(&regs->alarm_regs->tmr_alarm2_l, lo);
+ ptp_qoriq->write(&regs->alarm_regs->tmr_alarm2_h, hi);
+ ptp_qoriq->alarm_value = ns;
} else {
- qoriq_write(&regs->ctrl_regs->tmr_tevent, ALM2);
- spin_lock(&qoriq_ptp->lock);
- mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
+ spin_lock(&ptp_qoriq->lock);
+ mask = ptp_qoriq->read(&regs->ctrl_regs->tmr_temask);
mask &= ~ALM2EN;
- qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
- spin_unlock(&qoriq_ptp->lock);
- qoriq_ptp->alarm_value = 0;
- qoriq_ptp->alarm_interval = 0;
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, mask);
+ spin_unlock(&ptp_qoriq->lock);
+ ptp_qoriq->alarm_value = 0;
+ ptp_qoriq->alarm_interval = 0;
}
}
- if (val & PP1) {
+ if (irqs & PP1) {
ack |= PP1;
event.type = PTP_CLOCK_PPS;
- ptp_clock_event(qoriq_ptp->clock, &event);
+ ptp_clock_event(ptp_qoriq->clock, &event);
}
if (ack) {
- qoriq_write(&regs->ctrl_regs->tmr_tevent, ack);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_tevent, ack);
return IRQ_HANDLED;
} else
return IRQ_NONE;
}
+EXPORT_SYMBOL_GPL(ptp_qoriq_isr);
/*
* PTP clock operations
*/
-static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
u64 adj, diff;
u32 tmr_add;
int neg_adj = 0;
- struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
if (scaled_ppm < 0) {
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
- tmr_add = qoriq_ptp->tmr_add;
+ tmr_add = ptp_qoriq->tmr_add;
adj = tmr_add;
/* calculate diff as adj*(scaled_ppm/65536)/1000000
@@ -194,73 +229,76 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
- qoriq_write(&regs->ctrl_regs->tmr_add, tmr_add);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_add, tmr_add);
return 0;
}
+EXPORT_SYMBOL_GPL(ptp_qoriq_adjfine);
-static int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
+int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
s64 now;
unsigned long flags;
- struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
- now = tmr_cnt_read(qoriq_ptp);
+ now = tmr_cnt_read(ptp_qoriq);
now += delta;
- tmr_cnt_write(qoriq_ptp, now);
- set_fipers(qoriq_ptp);
+ tmr_cnt_write(ptp_qoriq, now);
+ set_fipers(ptp_qoriq);
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(ptp_qoriq_adjtime);
-static int ptp_qoriq_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
- struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
- ns = tmr_cnt_read(qoriq_ptp);
+ ns = tmr_cnt_read(ptp_qoriq);
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
+EXPORT_SYMBOL_GPL(ptp_qoriq_gettime);
-static int ptp_qoriq_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
+int ptp_qoriq_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
{
u64 ns;
unsigned long flags;
- struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
ns = timespec64_to_ns(ts);
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
- tmr_cnt_write(qoriq_ptp, ns);
- set_fipers(qoriq_ptp);
+ tmr_cnt_write(ptp_qoriq, ns);
+ set_fipers(ptp_qoriq);
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(ptp_qoriq_settime);
-static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+int ptp_qoriq_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
- struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
unsigned long flags;
- u32 bit, mask;
+ u32 bit, mask = 0;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
@@ -274,33 +312,34 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
default:
return -EINVAL;
}
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
- if (on)
- mask |= bit;
- else
- mask &= ~bit;
- qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
- return 0;
- case PTP_CLK_REQ_PPS:
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
- mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
if (on)
- mask |= PP1EN;
- else
- mask &= ~PP1EN;
- qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
- return 0;
+ extts_clean_up(ptp_qoriq, rq->extts.index, false);
- default:
break;
+ case PTP_CLK_REQ_PPS:
+ bit = PP1EN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ mask = ptp_qoriq->read(&regs->ctrl_regs->tmr_temask);
+ if (on) {
+ mask |= bit;
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_tevent, bit);
+ } else {
+ mask &= ~bit;
}
- return -EOPNOTSUPP;
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, mask);
+
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
+ return 0;
}
+EXPORT_SYMBOL_GPL(ptp_qoriq_enable);
static const struct ptp_clock_info ptp_qoriq_caps = {
.owner = THIS_MODULE,
@@ -319,7 +358,7 @@ static const struct ptp_clock_info ptp_qoriq_caps = {
};
/**
- * qoriq_ptp_nominal_freq - calculate nominal frequency according to
+ * ptp_qoriq_nominal_freq - calculate nominal frequency according to
* reference clock frequency
*
* @clk_src: reference clock frequency
@@ -330,7 +369,7 @@ static const struct ptp_clock_info ptp_qoriq_caps = {
*
* Return the nominal frequency
*/
-static u32 qoriq_ptp_nominal_freq(u32 clk_src)
+static u32 ptp_qoriq_nominal_freq(u32 clk_src)
{
u32 remainder = 0;
@@ -350,9 +389,9 @@ static u32 qoriq_ptp_nominal_freq(u32 clk_src)
}
/**
- * qoriq_ptp_auto_config - calculate a set of default configurations
+ * ptp_qoriq_auto_config - calculate a set of default configurations
*
- * @qoriq_ptp: pointer to qoriq_ptp
+ * @ptp_qoriq: pointer to ptp_qoriq
* @node: pointer to device_node
*
* If below dts properties are not provided, this function will be
@@ -366,7 +405,7 @@ static u32 qoriq_ptp_nominal_freq(u32 clk_src)
*
* Return 0 if success
*/
-static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
+static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
struct device_node *node)
{
struct clk *clk;
@@ -376,7 +415,7 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
u32 remainder = 0;
u32 clk_src = 0;
- qoriq_ptp->cksel = DEFAULT_CKSEL;
+ ptp_qoriq->cksel = DEFAULT_CKSEL;
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
@@ -389,12 +428,12 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
return -EINVAL;
}
- nominal_freq = qoriq_ptp_nominal_freq(clk_src);
+ nominal_freq = ptp_qoriq_nominal_freq(clk_src);
if (!nominal_freq)
return -EINVAL;
- qoriq_ptp->tclk_period = 1000000000UL / nominal_freq;
- qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC;
+ ptp_qoriq->tclk_period = 1000000000UL / nominal_freq;
+ ptp_qoriq->tmr_prsc = DEFAULT_TMR_PRSC;
/* Calculate initial frequency compensation value for TMR_ADD register.
* freq_comp = ceil(2^32 / freq_ratio)
@@ -405,164 +444,193 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
if (remainder)
freq_comp++;
- qoriq_ptp->tmr_add = freq_comp;
- qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period;
- qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period;
+ ptp_qoriq->tmr_add = freq_comp;
+ ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period;
+ ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period;
/* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
* freq_ratio = reference_clock_freq / nominal_freq
*/
max_adj = 1000000000ULL * (clk_src - nominal_freq);
max_adj = div_u64(max_adj, nominal_freq) - 1;
- qoriq_ptp->caps.max_adj = max_adj;
+ ptp_qoriq->caps.max_adj = max_adj;
return 0;
}
-static int qoriq_ptp_probe(struct platform_device *dev)
+int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ const struct ptp_clock_info *caps)
{
- struct device_node *node = dev->dev.of_node;
- struct qoriq_ptp *qoriq_ptp;
- struct qoriq_ptp_registers *regs;
+ struct device_node *node = ptp_qoriq->dev->of_node;
+ struct ptp_qoriq_registers *regs;
struct timespec64 now;
- int err = -ENOMEM;
- u32 tmr_ctrl;
unsigned long flags;
- void __iomem *base;
-
- qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL);
- if (!qoriq_ptp)
- goto no_memory;
+ u32 tmr_ctrl;
- err = -EINVAL;
+ ptp_qoriq->base = base;
+ ptp_qoriq->caps = *caps;
- qoriq_ptp->caps = ptp_qoriq_caps;
+ if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel))
+ ptp_qoriq->cksel = DEFAULT_CKSEL;
- if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel))
- qoriq_ptp->cksel = DEFAULT_CKSEL;
+ if (of_property_read_bool(node, "fsl,extts-fifo"))
+ ptp_qoriq->extts_fifo_support = true;
+ else
+ ptp_qoriq->extts_fifo_support = false;
if (of_property_read_u32(node,
- "fsl,tclk-period", &qoriq_ptp->tclk_period) ||
+ "fsl,tclk-period", &ptp_qoriq->tclk_period) ||
of_property_read_u32(node,
- "fsl,tmr-prsc", &qoriq_ptp->tmr_prsc) ||
+ "fsl,tmr-prsc", &ptp_qoriq->tmr_prsc) ||
of_property_read_u32(node,
- "fsl,tmr-add", &qoriq_ptp->tmr_add) ||
+ "fsl,tmr-add", &ptp_qoriq->tmr_add) ||
of_property_read_u32(node,
- "fsl,tmr-fiper1", &qoriq_ptp->tmr_fiper1) ||
+ "fsl,tmr-fiper1", &ptp_qoriq->tmr_fiper1) ||
of_property_read_u32(node,
- "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) ||
+ "fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) ||
of_property_read_u32(node,
- "fsl,max-adj", &qoriq_ptp->caps.max_adj)) {
+ "fsl,max-adj", &ptp_qoriq->caps.max_adj)) {
pr_warn("device tree node missing required elements, try automatic configuration\n");
- if (qoriq_ptp_auto_config(qoriq_ptp, node))
- goto no_config;
+ if (ptp_qoriq_auto_config(ptp_qoriq, node))
+ return -ENODEV;
}
- err = -ENODEV;
+ if (of_property_read_bool(node, "little-endian")) {
+ ptp_qoriq->read = qoriq_read_le;
+ ptp_qoriq->write = qoriq_write_le;
+ } else {
+ ptp_qoriq->read = qoriq_read_be;
+ ptp_qoriq->write = qoriq_write_be;
+ }
+
+ /* The eTSEC uses differnt memory map with DPAA/ENETC */
+ if (of_device_is_compatible(node, "fsl,etsec-ptp")) {
+ ptp_qoriq->regs.ctrl_regs = base + ETSEC_CTRL_REGS_OFFSET;
+ ptp_qoriq->regs.alarm_regs = base + ETSEC_ALARM_REGS_OFFSET;
+ ptp_qoriq->regs.fiper_regs = base + ETSEC_FIPER_REGS_OFFSET;
+ ptp_qoriq->regs.etts_regs = base + ETSEC_ETTS_REGS_OFFSET;
+ } else {
+ ptp_qoriq->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
+ ptp_qoriq->regs.alarm_regs = base + ALARM_REGS_OFFSET;
+ ptp_qoriq->regs.fiper_regs = base + FIPER_REGS_OFFSET;
+ ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
+ }
+
+ ktime_get_real_ts64(&now);
+ ptp_qoriq_settime(&ptp_qoriq->caps, &now);
+
+ tmr_ctrl =
+ (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+ (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+
+ spin_lock_init(&ptp_qoriq->lock);
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ regs = &ptp_qoriq->regs;
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_add, ptp_qoriq->tmr_add);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+ set_alarm(ptp_qoriq);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl,
+ tmr_ctrl|FIPERST|RTPE|TE|FRD);
+
+ spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
+
+ ptp_qoriq->clock = ptp_clock_register(&ptp_qoriq->caps, ptp_qoriq->dev);
+ if (IS_ERR(ptp_qoriq->clock))
+ return PTR_ERR(ptp_qoriq->clock);
+
+ ptp_qoriq->phc_index = ptp_clock_index(ptp_qoriq->clock);
+ ptp_qoriq_create_debugfs(ptp_qoriq);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_init);
- qoriq_ptp->irq = platform_get_irq(dev, 0);
+void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, 0);
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, 0);
+
+ ptp_qoriq_remove_debugfs(ptp_qoriq);
+ ptp_clock_unregister(ptp_qoriq->clock);
+ iounmap(ptp_qoriq->base);
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+}
+EXPORT_SYMBOL_GPL(ptp_qoriq_free);
+
+static int ptp_qoriq_probe(struct platform_device *dev)
+{
+ struct ptp_qoriq *ptp_qoriq;
+ int err = -ENOMEM;
+ void __iomem *base;
+
+ ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL);
+ if (!ptp_qoriq)
+ goto no_memory;
- if (qoriq_ptp->irq < 0) {
+ ptp_qoriq->dev = &dev->dev;
+
+ err = -ENODEV;
+
+ ptp_qoriq->irq = platform_get_irq(dev, 0);
+ if (ptp_qoriq->irq < 0) {
pr_err("irq not in device tree\n");
goto no_node;
}
- if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) {
+ if (request_irq(ptp_qoriq->irq, ptp_qoriq_isr, IRQF_SHARED,
+ DRIVER, ptp_qoriq)) {
pr_err("request_irq failed\n");
goto no_node;
}
- qoriq_ptp->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!qoriq_ptp->rsrc) {
+ ptp_qoriq->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!ptp_qoriq->rsrc) {
pr_err("no resource\n");
goto no_resource;
}
- if (request_resource(&iomem_resource, qoriq_ptp->rsrc)) {
+ if (request_resource(&iomem_resource, ptp_qoriq->rsrc)) {
pr_err("resource busy\n");
goto no_resource;
}
- spin_lock_init(&qoriq_ptp->lock);
-
- base = ioremap(qoriq_ptp->rsrc->start,
- resource_size(qoriq_ptp->rsrc));
+ base = ioremap(ptp_qoriq->rsrc->start,
+ resource_size(ptp_qoriq->rsrc));
if (!base) {
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
- qoriq_ptp->base = base;
-
- if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) {
- qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET;
- qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET;
- qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET;
- qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET;
- } else {
- qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
- qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET;
- qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET;
- qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET;
- }
-
- ktime_get_real_ts64(&now);
- ptp_qoriq_settime(&qoriq_ptp->caps, &now);
-
- tmr_ctrl =
- (qoriq_ptp->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
- (qoriq_ptp->cksel & CKSEL_MASK) << CKSEL_SHIFT;
-
- spin_lock_irqsave(&qoriq_ptp->lock, flags);
-
- regs = &qoriq_ptp->regs;
- qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
- qoriq_write(&regs->ctrl_regs->tmr_add, qoriq_ptp->tmr_add);
- qoriq_write(&regs->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc);
- qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
- qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
- set_alarm(qoriq_ptp);
- qoriq_write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
-
- spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
-
- qoriq_ptp->clock = ptp_clock_register(&qoriq_ptp->caps, &dev->dev);
- if (IS_ERR(qoriq_ptp->clock)) {
- err = PTR_ERR(qoriq_ptp->clock);
+ err = ptp_qoriq_init(ptp_qoriq, base, &ptp_qoriq_caps);
+ if (err)
goto no_clock;
- }
- qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock);
-
- platform_set_drvdata(dev, qoriq_ptp);
+ platform_set_drvdata(dev, ptp_qoriq);
return 0;
no_clock:
- iounmap(qoriq_ptp->base);
+ iounmap(ptp_qoriq->base);
no_ioremap:
- release_resource(qoriq_ptp->rsrc);
+ release_resource(ptp_qoriq->rsrc);
no_resource:
- free_irq(qoriq_ptp->irq, qoriq_ptp);
-no_config:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
no_node:
- kfree(qoriq_ptp);
+ kfree(ptp_qoriq);
no_memory:
return err;
}
-static int qoriq_ptp_remove(struct platform_device *dev)
+static int ptp_qoriq_remove(struct platform_device *dev)
{
- struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev);
- struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
-
- qoriq_write(&regs->ctrl_regs->tmr_temask, 0);
- qoriq_write(&regs->ctrl_regs->tmr_ctrl, 0);
-
- ptp_clock_unregister(qoriq_ptp->clock);
- iounmap(qoriq_ptp->base);
- release_resource(qoriq_ptp->rsrc);
- free_irq(qoriq_ptp->irq, qoriq_ptp);
- kfree(qoriq_ptp);
+ struct ptp_qoriq *ptp_qoriq = platform_get_drvdata(dev);
+ ptp_qoriq_free(ptp_qoriq);
+ release_resource(ptp_qoriq->rsrc);
+ kfree(ptp_qoriq);
return 0;
}
@@ -573,16 +641,16 @@ static const struct of_device_id match_table[] = {
};
MODULE_DEVICE_TABLE(of, match_table);
-static struct platform_driver qoriq_ptp_driver = {
+static struct platform_driver ptp_qoriq_driver = {
.driver = {
.name = "ptp_qoriq",
.of_match_table = match_table,
},
- .probe = qoriq_ptp_probe,
- .remove = qoriq_ptp_remove,
+ .probe = ptp_qoriq_probe,
+ .remove = ptp_qoriq_remove,
};
-module_platform_driver(qoriq_ptp_driver);
+module_platform_driver(ptp_qoriq_driver);
MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer");
diff --git a/drivers/ptp/ptp_qoriq_debugfs.c b/drivers/ptp/ptp_qoriq_debugfs.c
new file mode 100644
index 000000000000..e8dddcedf288
--- /dev/null
+++ b/drivers/ptp/ptp_qoriq_debugfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2019 NXP
+ */
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/fsl/ptp_qoriq.h>
+
+static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ *val = ctrl & PP1L ? 1 : 0;
+
+ return 0;
+}
+
+static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ if (val == 0)
+ ctrl &= ~PP1L;
+ else
+ ctrl |= PP1L;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get,
+ ptp_qoriq_fiper1_lpbk_set, "%llu\n");
+
+static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ *val = ctrl & PP2L ? 1 : 0;
+
+ return 0;
+}
+
+static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val)
+{
+ struct ptp_qoriq *ptp_qoriq = data;
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 ctrl;
+
+ ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ if (val == 0)
+ ctrl &= ~PP2L;
+ else
+ ctrl |= PP2L;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get,
+ ptp_qoriq_fiper2_lpbk_set, "%llu\n");
+
+void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(dev_name(ptp_qoriq->dev), NULL);
+ if (IS_ERR(root))
+ return;
+ if (!root)
+ goto err_root;
+
+ ptp_qoriq->debugfs_root = root;
+
+ if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root,
+ ptp_qoriq, &ptp_qoriq_fiper1_fops))
+ goto err_node;
+ if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root,
+ ptp_qoriq, &ptp_qoriq_fiper2_fops))
+ goto err_node;
+ return;
+
+err_node:
+ debugfs_remove_recursive(root);
+ ptp_qoriq->debugfs_root = NULL;
+err_root:
+ dev_err(ptp_qoriq->dev, "failed to initialize debugfs\n");
+}
+
+void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
+{
+ debugfs_remove_recursive(ptp_qoriq->debugfs_root);
+ ptp_qoriq->debugfs_root = NULL;
+}
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index bb655854713d..b64c56c33c3b 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -1382,9 +1382,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
- IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
- &priv->idb_dma, GFP_KERNEL);
+ priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
+ &priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
@@ -1447,9 +1447,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
/* Allocate space for DMA descriptors */
- bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
- bd_num * sizeof(struct tsi721_dma_desc),
- &bd_phys, GFP_KERNEL);
+ bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
if (!bd_ptr)
return -ENOMEM;
@@ -1464,7 +1464,7 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -1939,10 +1939,10 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
- priv->omsg_ring[mbox].sts_size *
- sizeof(struct tsi721_dma_sts),
- &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
+ priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
+ &priv->omsg_ring[mbox].sts_phys,
+ GFP_KERNEL);
if (priv->omsg_ring[mbox].sts_base == NULL) {
tsi_debug(OMSG, &priv->pdev->dev,
"ENOMEM for OB_MSG_%d status FIFO", mbox);
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 006ea5a45020..7f5d4436f594 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -90,9 +90,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
* Allocate space for DMA descriptors
* (add an extra element for link descriptor)
*/
- bd_ptr = dma_zalloc_coherent(dev,
- (bd_num + 1) * sizeof(struct tsi721_dma_desc),
- &bd_phys, GFP_ATOMIC);
+ bd_ptr = dma_alloc_coherent(dev,
+ (bd_num + 1) * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_ATOMIC);
if (!bd_ptr)
return -ENOMEM;
@@ -108,7 +108,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
(bd_num + 1) : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_zalloc_coherent(dev,
+ sts_ptr = dma_alloc_coherent(dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_ATOMIC);
if (!sts_ptr) {
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 73e4b407f162..ad5e303dda05 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -290,8 +290,7 @@ const struct attribute_group *rio_dev_groups[] = {
NULL,
};
-static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
- size_t count)
+static ssize_t scan_store(struct bus_type *bus, const char *buf, size_t count)
{
long val;
int rc;
@@ -314,7 +313,7 @@ exit:
return rc;
}
-static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store);
+static BUS_ATTR_WO(scan);
static struct attribute *rio_bus_attrs[] = {
&bus_attr_scan.attr,
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 28f55248eb90..753a6a1b30c3 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
@@ -22,12 +21,7 @@
struct pm8607_regulator_info {
struct regulator_desc desc;
- struct pm860x_chip *chip;
- struct regulator_dev *regulator;
- struct i2c_client *i2c;
- struct i2c_client *i2c_8606;
- unsigned int *vol_table;
unsigned int *vol_suspend;
int slope_double;
@@ -210,13 +204,15 @@ static const unsigned int LDO14_suspend_table[] = {
static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- int ret = -EINVAL;
+ int ret;
+
+ ret = regulator_list_voltage_table(rdev, index);
+ if (ret < 0)
+ return ret;
+
+ if (info->slope_double)
+ ret <<= 1;
- if (info->vol_table && (index < rdev->desc->n_voltages)) {
- ret = info->vol_table[index];
- if (info->slope_double)
- ret <<= 1;
- }
return ret;
}
@@ -257,6 +253,7 @@ static const struct regulator_ops pm8606_preg_ops = {
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_##vreg, \
.owner = THIS_MODULE, \
+ .volt_table = vreg##_table, \
.n_voltages = ARRAY_SIZE(vreg##_table), \
.vsel_reg = PM8607_##vreg, \
.vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \
@@ -266,7 +263,6 @@ static const struct regulator_ops pm8606_preg_ops = {
.enable_mask = 1 << (ebit), \
}, \
.slope_double = (0), \
- .vol_table = (unsigned int *)&vreg##_table, \
.vol_suspend = (unsigned int *)&vreg##_suspend_table, \
}
@@ -278,6 +274,7 @@ static const struct regulator_ops pm8606_preg_ops = {
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_LDO##_id, \
.owner = THIS_MODULE, \
+ .volt_table = LDO##_id##_table, \
.n_voltages = ARRAY_SIZE(LDO##_id##_table), \
.vsel_reg = PM8607_##vreg, \
.vsel_mask = (ARRAY_SIZE(LDO##_id##_table) - 1) << (shift), \
@@ -285,7 +282,6 @@ static const struct regulator_ops pm8606_preg_ops = {
.enable_mask = 1 << (ebit), \
}, \
.slope_double = (0), \
- .vol_table = (unsigned int *)&LDO##_id##_table, \
.vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \
}
@@ -349,6 +345,7 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
struct pm8607_regulator_info *info = NULL;
struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_config config = { };
+ struct regulator_dev *rdev;
struct resource *res;
int i;
@@ -371,13 +368,9 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
/* i is used to check regulator ID */
i = -1;
}
- info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
- info->i2c_8606 = (chip->id == CHIP_PM8607) ? chip->companion :
- chip->client;
- info->chip = chip;
/* check DVC ramp slope double */
- if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double)
+ if ((i == PM8607_ID_BUCK3) && chip->buck3_double)
info->slope_double = 1;
config.dev = &pdev->dev;
@@ -392,12 +385,11 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
else
config.regmap = chip->regmap_companion;
- info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
- &config);
- if (IS_ERR(info->regulator)) {
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- return PTR_ERR(info->regulator);
+ return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, info);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index ee60a222f5eb..b7f249ee5e68 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -180,6 +180,17 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
+config REGULATOR_BD70528
+ tristate "ROHM BD70528 Power Regulator"
+ depends on MFD_ROHM_BD70528
+ help
+ This driver supports voltage regulators on ROHM BD70528 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd70528-regulator.
+
config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
depends on MFD_ROHM_BD718XX
@@ -457,6 +468,14 @@ config REGULATOR_MAX77620
chip to control Step-Down DC-DC and LDOs. Say Y here to
enable the regulator driver.
+config REGULATOR_MAX77650
+ tristate "Maxim MAX77650/77651 regulator support"
+ depends on MFD_MAX77650
+ help
+ Regulator driver for MAX77650/77651 PMIC from Maxim
+ Semiconductor. This device has a SIMO with three independent
+ power rails and an LDO.
+
config REGULATOR_MAX8649
tristate "Maxim 8649 voltage regulator"
depends on I2C
@@ -484,7 +503,7 @@ config REGULATOR_MAX8925
tristate "Maxim MAX8925 Power Management IC"
depends on MFD_MAX8925
help
- Say y here to support the voltage regulaltor of Maxim MAX8925 PMIC.
+ Say y here to support the voltage regulator of Maxim MAX8925 PMIC.
config REGULATOR_MAX8952
tristate "Maxim MAX8952 Power Management IC"
@@ -501,7 +520,7 @@ config REGULATOR_MAX8973
select REGMAP_I2C
help
The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down
- switching regulator delievers up to 9A of output current. Each
+ switching regulator delivers up to 9A of output current. Each
phase operates at a 2MHz fixed frequency with a 120 deg shift
from the adjacent phase, allowing the use of small magnetic component.
@@ -646,7 +665,7 @@ config REGULATOR_PCF50633
tristate "NXP PCF50633 regulator driver"
depends on MFD_PCF50633
help
- Say Y here to support the voltage regulators and convertors
+ Say Y here to support the voltage regulators and converters
on PCF50633
config REGULATOR_PFUZE100
@@ -924,7 +943,7 @@ config REGULATOR_TPS65132
select REGMAP_I2C
help
This driver supports TPS65132 single inductor - dual output
- power supply specifcally designed for display panels.
+ power supply specifically designed for display panels.
config REGULATOR_TPS65217
tristate "TI TPS65217 Power regulators"
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b12e1c9b2118..1169f8a27d91 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
+obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
@@ -60,6 +61,7 @@ obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o
obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
+obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 21e20483bd91..e0239cf3f56d 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -131,7 +131,7 @@
* ACT8865 voltage number
*/
#define ACT8865_VOLTAGE_NUM 64
-#define ACT8600_SUDCDC_VOLTAGE_NUM 255
+#define ACT8600_SUDCDC_VOLTAGE_NUM 256
struct act8865 {
struct regmap *regmap;
@@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
- REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000),
+ REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
+ REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
};
static struct regulator_ops act8865_ops = {
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index 603db77723b6..caa61d306a69 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -87,7 +87,8 @@ static const struct regulator_linear_range act8945a_voltage_ranges[] = {
static int act8945a_set_suspend_state(struct regulator_dev *rdev, bool enable)
{
struct regmap *regmap = rdev->regmap;
- int id = rdev->desc->id, reg, val;
+ int id = rdev_get_id(rdev);
+ int reg, val;
switch (id) {
case ACT8945A_ID_DCDC1:
@@ -159,7 +160,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev->regmap;
- int id = rdev->desc->id;
+ int id = rdev_get_id(rdev);
int reg, ret, val = 0;
switch (id) {
@@ -190,11 +191,11 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_STANDBY:
- if (rdev->desc->id > ACT8945A_ID_DCDC3)
+ if (id > ACT8945A_ID_DCDC3)
val = BIT(5);
break;
case REGULATOR_MODE_NORMAL:
- if (rdev->desc->id <= ACT8945A_ID_DCDC3)
+ if (id <= ACT8945A_ID_DCDC3)
val = BIT(5);
break;
default:
@@ -213,7 +214,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
static unsigned int act8945a_get_mode(struct regulator_dev *rdev)
{
struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
- int id = rdev->desc->id;
+ int id = rdev_get_id(rdev);
if (id < ACT8945A_ID_DCDC1 || id >= ACT8945A_ID_MAX)
return -EINVAL;
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index b9a93049e41e..bf3ab405eed1 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -40,35 +40,10 @@ struct arizona_ldo1 {
struct gpio_desc *ena_gpiod;
};
-static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector >= rdev->desc->n_voltages)
- return -EINVAL;
-
- if (selector == rdev->desc->n_voltages - 1)
- return 1800000;
- else
- return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
-}
-
-static int arizona_ldo1_hc_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int sel;
-
- sel = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
- if (sel >= rdev->desc->n_voltages)
- sel = rdev->desc->n_voltages - 1;
-
- return sel;
-}
-
static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
- struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev);
- struct regmap *regmap = ldo->regmap;
+ struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
@@ -85,16 +60,12 @@ static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev,
if (val)
return 0;
- val = sel << ARIZONA_LDO1_VSEL_SHIFT;
-
- return regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_1,
- ARIZONA_LDO1_VSEL_MASK, val);
+ return regulator_set_voltage_sel_regmap(rdev, sel);
}
static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
{
- struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev);
- struct regmap *regmap = ldo->regmap;
+ struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
@@ -105,32 +76,35 @@ static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
if (val & ARIZONA_LDO1_HI_PWR)
return rdev->desc->n_voltages - 1;
- ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_1, &val);
- if (ret != 0)
- return ret;
-
- return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT;
+ return regulator_get_voltage_sel_regmap(rdev);
}
static const struct regulator_ops arizona_ldo1_hc_ops = {
- .list_voltage = arizona_ldo1_hc_list_voltage,
- .map_voltage = arizona_ldo1_hc_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = arizona_ldo1_hc_get_voltage_sel,
.set_voltage_sel = arizona_ldo1_hc_set_voltage_sel,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
+static const struct regulator_linear_range arizona_ldo1_hc_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0, 0x6, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x7, 0x7, 0),
+};
+
static const struct regulator_desc arizona_ldo1_hc = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_hc_ops,
+ .vsel_reg = ARIZONA_LDO1_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO1_VSEL_MASK,
.bypass_reg = ARIZONA_LDO1_CONTROL_1,
.bypass_mask = ARIZONA_LDO1_BYPASS,
- .min_uV = 900000,
- .uV_step = 50000,
+ .linear_ranges = arizona_ldo1_hc_ranges,
+ .n_linear_ranges = ARRAY_SIZE(arizona_ldo1_hc_ranges),
.n_voltages = 8,
.enable_time = 1500,
.ramp_delay = 24000,
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 66337e12719b..e5fed289b52d 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -886,7 +886,7 @@ static int as3722_regulator_probe(struct platform_device *pdev)
as3722_regs->desc[id].min_uV = 410000;
} else {
as3722_regs->desc[id].n_voltages =
- AS3722_SD0_VSEL_MAX + 1,
+ AS3722_SD0_VSEL_MAX + 1;
as3722_regs->desc[id].min_uV = 610000;
}
as3722_regs->desc[id].uV_step = 10000;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 48af859fd053..fba8f58ab769 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -367,13 +367,12 @@ static const int axp209_dcdc2_ldo3_slew_rates[] = {
static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc = rdev->desc;
+ const struct regulator_desc *desc;
u8 reg, mask, enable, cfg = 0xff;
const int *slew_rates;
int rate_count = 0;
- if (!rdev)
- return -EINVAL;
+ desc = rdev->desc;
switch (axp20x->variant) {
case AXP209_ID:
@@ -436,11 +435,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc = rdev->desc;
+ const struct regulator_desc *desc;
if (!rdev)
return -EINVAL;
+ desc = rdev->desc;
+
switch (axp20x->variant) {
case AXP209_ID:
if ((desc->id == AXP20X_LDO3) &&
@@ -573,7 +574,7 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK),
AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
- AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT,
+ AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK),
AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK,
@@ -719,7 +720,7 @@ static const struct regulator_desc axp803_regulators[] = {
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP803, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT,
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP803, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
@@ -729,7 +730,7 @@ static const struct regulator_desc axp803_regulators[] = {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
- AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT,
+ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP803, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
@@ -744,7 +745,7 @@ static const struct regulator_desc axp803_regulators[] = {
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP803, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
- AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT,
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
AXP_DESC(AXP803, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK,
@@ -791,7 +792,7 @@ static const struct regulator_desc axp806_regulators[] = {
AXP806_DCDCA_V_CTRL, AXP806_DCDCA_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCA_MASK),
AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50,
- AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL,
+ AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCB_MASK),
AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc",
axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES,
@@ -817,7 +818,7 @@ static const struct regulator_desc axp806_regulators[] = {
AXP806_BLDO1_V_CTRL, AXP806_BLDO1_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO1_MASK),
AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100,
- AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL,
+ AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO2_MASK),
AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100,
AXP806_BLDO3_V_CTRL, AXP806_BLDO3_V_CTRL_MASK,
@@ -952,7 +953,7 @@ static const struct regulator_desc axp813_regulators[] = {
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT,
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
@@ -962,7 +963,7 @@ static const struct regulator_desc axp813_regulators[] = {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
- AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT,
+ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
@@ -977,7 +978,7 @@ static const struct regulator_desc axp813_regulators[] = {
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
- AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT,
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/* to do / check ... */
AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 92d6d7b10cf7..e49c0a7d5dd5 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -242,8 +242,12 @@ static int bcm590xx_get_enable_register(int id)
case BCM590XX_REG_SDSR2:
reg = BCM590XX_SDSR2PMCTRL1;
break;
+ case BCM590XX_REG_VSR:
+ reg = BCM590XX_VSRPMCTRL1;
+ break;
case BCM590XX_REG_VBUS:
reg = BCM590XX_OTG_CTRL;
+ break;
}
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
new file mode 100644
index 000000000000..30e3ed430a8a
--- /dev/null
+++ b/drivers/regulator/bd70528-regulator.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 ROHM Semiconductors
+// bd70528-regulator.c ROHM BD70528MWV regulator driver
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd70528.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+#define BUCK_RAMPRATE_250MV 0
+#define BUCK_RAMPRATE_125MV 1
+#define BUCK_RAMP_MAX 250
+
+static const struct regulator_linear_range bd70528_buck1_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 600000),
+ REGULATOR_LINEAR_RANGE(2750000, 0x2, 0xf, 50000),
+};
+static const struct regulator_linear_range bd70528_buck2_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 300000),
+ REGULATOR_LINEAR_RANGE(1550000, 0x2, 0xd, 50000),
+ REGULATOR_LINEAR_RANGE(3000000, 0xe, 0xf, 300000),
+};
+static const struct regulator_linear_range bd70528_buck3_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0xd, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 0xe, 0xf, 0),
+};
+
+/* All LDOs have same voltage ranges */
+static const struct regulator_linear_range bd70528_ldo_volts[] = {
+ REGULATOR_LINEAR_RANGE(1650000, 0x0, 0x07, 50000),
+ REGULATOR_LINEAR_RANGE(2100000, 0x8, 0x0f, 100000),
+ REGULATOR_LINEAR_RANGE(2850000, 0x10, 0x19, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x19, 0x1f, 0),
+};
+
+/* Also both LEDs support same voltages */
+static const unsigned int led_volts[] = {
+ 20000, 30000
+};
+
+static int bd70528_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ if (ramp_delay > 0 && ramp_delay <= BUCK_RAMP_MAX) {
+ unsigned int ramp_value = BUCK_RAMPRATE_250MV;
+
+ if (ramp_delay <= 125)
+ ramp_value = BUCK_RAMPRATE_125MV;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ BD70528_MASK_BUCK_RAMP,
+ ramp_value << BD70528_SIFT_BUCK_RAMP);
+ }
+ dev_err(&rdev->dev, "%s: ramp_delay: %d not supported\n",
+ rdev->desc->name, ramp_delay);
+ return -EINVAL;
+}
+
+static int bd70528_led_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int ret;
+
+ ret = regulator_is_enabled_regmap(rdev);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+
+ dev_err(&rdev->dev,
+ "LED voltage change not allowed when led is enabled\n");
+
+ return -EBUSY;
+}
+
+static const struct regulator_ops bd70528_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd70528_set_ramp_delay,
+};
+
+static const struct regulator_ops bd70528_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd70528_set_ramp_delay,
+};
+
+static const struct regulator_ops bd70528_led_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = bd70528_led_set_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_desc bd70528_desc[] = {
+ {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_BUCK1,
+ .ops = &bd70528_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_buck1_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_buck1_volts),
+ .n_voltages = BD70528_BUCK_VOLTS,
+ .enable_reg = BD70528_REG_BUCK1_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_BUCK1_VOLT,
+ .vsel_mask = BD70528_MASK_BUCK_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_BUCK2,
+ .ops = &bd70528_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_buck2_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_buck2_volts),
+ .n_voltages = BD70528_BUCK_VOLTS,
+ .enable_reg = BD70528_REG_BUCK2_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_BUCK2_VOLT,
+ .vsel_mask = BD70528_MASK_BUCK_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_BUCK3,
+ .ops = &bd70528_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_buck3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_buck3_volts),
+ .n_voltages = BD70528_BUCK_VOLTS,
+ .enable_reg = BD70528_REG_BUCK3_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_BUCK3_VOLT,
+ .vsel_mask = BD70528_MASK_BUCK_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LDO1,
+ .ops = &bd70528_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
+ .n_voltages = BD70528_LDO_VOLTS,
+ .enable_reg = BD70528_REG_LDO1_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_LDO1_VOLT,
+ .vsel_mask = BD70528_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LDO2,
+ .ops = &bd70528_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
+ .n_voltages = BD70528_LDO_VOLTS,
+ .enable_reg = BD70528_REG_LDO2_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_LDO2_VOLT,
+ .vsel_mask = BD70528_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LDO3,
+ .ops = &bd70528_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
+ .n_voltages = BD70528_LDO_VOLTS,
+ .enable_reg = BD70528_REG_LDO3_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_LDO3_VOLT,
+ .vsel_mask = BD70528_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo_led1",
+ .of_match = of_match_ptr("LDO_LED1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LED1,
+ .ops = &bd70528_led_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &led_volts[0],
+ .n_voltages = ARRAY_SIZE(led_volts),
+ .enable_reg = BD70528_REG_LED_EN,
+ .enable_mask = BD70528_MASK_LED1_EN,
+ .vsel_reg = BD70528_REG_LED_VOLT,
+ .vsel_mask = BD70528_MASK_LED1_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo_led2",
+ .of_match = of_match_ptr("LDO_LED2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LED2,
+ .ops = &bd70528_led_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &led_volts[0],
+ .n_voltages = ARRAY_SIZE(led_volts),
+ .enable_reg = BD70528_REG_LED_EN,
+ .enable_mask = BD70528_MASK_LED2_EN,
+ .vsel_reg = BD70528_REG_LED_VOLT,
+ .vsel_mask = BD70528_MASK_LED2_VOLT,
+ .owner = THIS_MODULE,
+ },
+
+};
+
+static int bd70528_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd70528;
+ int i;
+ struct regulator_config config = {
+ .dev = pdev->dev.parent,
+ };
+
+ bd70528 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd70528) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+
+ config.regmap = bd70528->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(bd70528_desc); i++) {
+ struct regulator_dev *rdev;
+
+ rdev = devm_regulator_register(&pdev->dev, &bd70528_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ bd70528_desc[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver bd70528_regulator = {
+ .driver = {
+ .name = "bd70528-pmic"
+ },
+ .probe = bd70528_probe,
+};
+
+module_platform_driver(bd70528_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD70528 voltage regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index b8dcdc21dc22..b2191be49670 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -79,7 +79,7 @@ static int bd718xx_set_voltage_sel_pickable_restricted(
return regulator_set_voltage_sel_pickable_regmap(rdev, sel);
}
-static struct regulator_ops bd718xx_pickable_range_ldo_ops = {
+static const struct regulator_ops bd718xx_pickable_range_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -88,7 +88,7 @@ static struct regulator_ops bd718xx_pickable_range_ldo_ops = {
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
};
-static struct regulator_ops bd718xx_pickable_range_buck_ops = {
+static const struct regulator_ops bd718xx_pickable_range_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -98,7 +98,7 @@ static struct regulator_ops bd718xx_pickable_range_buck_ops = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops bd718xx_ldo_regulator_ops = {
+static const struct regulator_ops bd718xx_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -107,7 +107,7 @@ static struct regulator_ops bd718xx_ldo_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
+static const struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -116,7 +116,7 @@ static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops bd718xx_buck_regulator_ops = {
+static const struct regulator_ops bd718xx_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -126,7 +126,7 @@ static struct regulator_ops bd718xx_buck_regulator_ops = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
+static const struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -137,7 +137,7 @@ static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
+static const struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -350,6 +350,135 @@ static const struct reg_init bd71837_ldo6_inits[] = {
},
};
+#define NUM_DVS_BUCKS 4
+
+struct of_dvs_setting {
+ const char *prop;
+ unsigned int reg;
+};
+
+static int set_dvs_levels(const struct of_dvs_setting *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap)
+{
+ int ret, i;
+ unsigned int uv;
+
+ ret = of_property_read_u32(np, dvs->prop, &uv);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ return 0;
+ }
+
+ for (i = 0; i < desc->n_voltages; i++) {
+ ret = regulator_desc_list_voltage_linear_range(desc, i);
+ if (ret < 0)
+ continue;
+ if (ret == uv) {
+ i <<= ffs(desc->vsel_mask) - 1;
+ ret = regmap_update_bits(regmap, dvs->reg,
+ DVS_BUCK_RUN_MASK, i);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int buck4_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD71837_REG_BUCK4_VOLT_RUN,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+static int buck3_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD71837_REG_BUCK3_VOLT_RUN,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int buck2_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD718XX_REG_BUCK2_VOLT_RUN,
+ },
+ {
+ .prop = "rohm,dvs-idle-voltage",
+ .reg = BD718XX_REG_BUCK2_VOLT_IDLE,
+ },
+ };
+
+
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int buck1_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD718XX_REG_BUCK1_VOLT_RUN,
+ },
+ {
+ .prop = "rohm,dvs-idle-voltage",
+ .reg = BD718XX_REG_BUCK1_VOLT_IDLE,
+ },
+ {
+ .prop = "rohm,dvs-suspend-voltage",
+ .reg = BD718XX_REG_BUCK1_VOLT_SUSP,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
static const struct bd718xx_regulator_data bd71847_regulators[] = {
{
.desc = {
@@ -368,6 +497,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck1_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK1_CTRL,
@@ -391,6 +521,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck2_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK2_CTRL,
@@ -662,6 +793,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck1_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK1_CTRL,
@@ -685,6 +817,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck2_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK2_CTRL,
@@ -708,6 +841,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck3_set_hw_dvs_levels,
},
.init = {
.reg = BD71837_REG_BUCK3_CTRL,
@@ -731,6 +865,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck4_set_hw_dvs_levels,
},
.init = {
.reg = BD71837_REG_BUCK4_CTRL,
@@ -1029,6 +1164,7 @@ static int bd718xx_probe(struct platform_device *pdev)
};
int i, j, err;
+ bool use_snvs;
mfd = dev_get_drvdata(pdev->dev.parent);
if (!mfd) {
@@ -1055,27 +1191,28 @@ static int bd718xx_probe(struct platform_device *pdev)
BD718XX_REG_REGLOCK);
}
- /* At poweroff transition PMIC HW disables EN bit for regulators but
- * leaves SEL bit untouched. So if state transition from POWEROFF
- * is done to SNVS - then all power rails controlled by SW (having
- * SEL bit set) stay disabled as EN is cleared. This may result boot
- * failure if any crucial systems are powered by these rails.
- *
+ use_snvs = of_property_read_bool(pdev->dev.parent->of_node,
+ "rohm,reset-snvs-powered");
+
+ /*
* Change the next stage from poweroff to be READY instead of SNVS
* for all reset types because OTP loading at READY will clear SEL
* bit allowing HW defaults for power rails to be used
*/
- err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1,
- BD718XX_ON_REQ_POWEROFF_MASK |
- BD718XX_SWRESET_POWEROFF_MASK |
- BD718XX_WDOG_POWEROFF_MASK |
- BD718XX_KEY_L_POWEROFF_MASK,
- BD718XX_POWOFF_TO_RDY);
- if (err) {
- dev_err(&pdev->dev, "Failed to change reset target\n");
- goto err;
- } else {
- dev_dbg(&pdev->dev, "Changed all resets from SVNS to READY\n");
+ if (!use_snvs) {
+ err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1,
+ BD718XX_ON_REQ_POWEROFF_MASK |
+ BD718XX_SWRESET_POWEROFF_MASK |
+ BD718XX_WDOG_POWEROFF_MASK |
+ BD718XX_KEY_L_POWEROFF_MASK,
+ BD718XX_POWOFF_TO_RDY);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to change reset target\n");
+ goto err;
+ } else {
+ dev_dbg(&pdev->dev,
+ "Changed all resets from SVNS to READY\n");
+ }
}
for (i = 0; i < pmic_regulators[mfd->chip_type].r_amount; i++) {
@@ -1098,19 +1235,33 @@ static int bd718xx_probe(struct platform_device *pdev)
err = PTR_ERR(rdev);
goto err;
}
- /* Regulator register gets the regulator constraints and
+
+ /*
+ * Regulator register gets the regulator constraints and
* applies them (set_machine_constraints). This should have
* turned the control register(s) to correct values and we
* can now switch the control from PMIC state machine to the
* register interface
+ *
+ * At poweroff transition PMIC HW disables EN bit for
+ * regulators but leaves SEL bit untouched. So if state
+ * transition from POWEROFF is done to SNVS - then all power
+ * rails controlled by SW (having SEL bit set) stay disabled
+ * as EN is cleared. This will result boot failure if any
+ * crucial systems are powered by these rails. We don't
+ * enable SW control for crucial regulators if snvs state is
+ * used
*/
- err = regmap_update_bits(mfd->regmap, r->init.reg,
- r->init.mask, r->init.val);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to write BUCK/LDO SEL bit for (%s)\n",
- desc->name);
- goto err;
+ if (!use_snvs || !rdev->constraints->always_on ||
+ !rdev->constraints->boot_on) {
+ err = regmap_update_bits(mfd->regmap, r->init.reg,
+ r->init.mask, r->init.val);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to take control for (%s)\n",
+ desc->name);
+ goto err;
+ }
}
for (j = 0; j < r->additional_init_amnt; j++) {
err = regmap_update_bits(mfd->regmap,
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index e12dd1f750f3..e690c2ce5b3c 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -100,7 +100,7 @@ static int bd9571mwv_reg_set_voltage_sel_regmap(struct regulator_dev *rdev,
}
/* Operations permitted on AVS voltage regulator */
-static struct regulator_ops avs_ops = {
+static const struct regulator_ops avs_ops = {
.set_voltage_sel = bd9571mwv_avs_set_voltage_sel_regmap,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = bd9571mwv_avs_get_voltage_sel_regmap,
@@ -108,7 +108,7 @@ static struct regulator_ops avs_ops = {
};
/* Operations permitted on voltage regulators */
-static struct regulator_ops reg_ops = {
+static const struct regulator_ops reg_ops = {
.set_voltage_sel = bd9571mwv_reg_set_voltage_sel_regmap,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -116,13 +116,13 @@ static struct regulator_ops reg_ops = {
};
/* Operations permitted on voltage monitors */
-static struct regulator_ops vid_ops = {
+static const struct regulator_ops vid_ops = {
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
0x80, 600000, 10000, 0x3c),
BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b9d7b45c7295..68473d0cc57e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -23,7 +23,6 @@
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/regmap.h>
@@ -82,7 +81,6 @@ struct regulator_enable_gpio {
struct gpio_desc *gpiod;
u32 enable_count; /* a number of enabled shared GPIO */
u32 request_count; /* a number of requested shared GPIO */
- unsigned int ena_gpio_invert:1;
};
/*
@@ -145,14 +143,6 @@ static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops)
return false;
}
-static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
-{
- if (rdev && rdev->supply)
- return rdev->supply->rdev;
-
- return NULL;
-}
-
/**
* regulator_lock_nested - lock a single regulator
* @rdev: regulator source
@@ -326,7 +316,7 @@ err_unlock:
* @rdev: regulator source
* @ww_ctx: w/w mutex acquire context
*
- * Unlock all regulators related with rdev by coupling or suppling.
+ * Unlock all regulators related with rdev by coupling or supplying.
*/
static void regulator_unlock_dependent(struct regulator_dev *rdev,
struct ww_acquire_ctx *ww_ctx)
@@ -341,7 +331,7 @@ static void regulator_unlock_dependent(struct regulator_dev *rdev,
* @ww_ctx: w/w mutex acquire context
*
* This function as a wrapper on regulator_lock_recursive(), which locks
- * all regulators related with rdev by coupling or suppling.
+ * all regulators related with rdev by coupling or supplying.
*/
static void regulator_lock_dependent(struct regulator_dev *rdev,
struct ww_acquire_ctx *ww_ctx)
@@ -924,14 +914,14 @@ static int drms_uA_update(struct regulator_dev *rdev)
int current_uA = 0, output_uV, input_uV, err;
unsigned int mode;
- lockdep_assert_held_once(&rdev->mutex.base);
-
/*
* first check to see if we can set modes at all, otherwise just
* tell the consumer everything is OK.
*/
- if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS)) {
+ rdev_dbg(rdev, "DRMS operation not allowed\n");
return 0;
+ }
if (!rdev->desc->ops->get_optimum_mode &&
!rdev->desc->ops->set_load)
@@ -1003,7 +993,7 @@ static int suspend_set_state(struct regulator_dev *rdev,
if (rstate == NULL)
return 0;
- /* If we have no suspend mode configration don't set anything;
+ /* If we have no suspend mode configuration don't set anything;
* only warn if the driver implements set_suspend_voltage or
* set_suspend_mode callback.
*/
@@ -1131,7 +1121,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
int current_uV = _regulator_get_voltage(rdev);
if (current_uV == -ENOTRECOVERABLE) {
- /* This regulator can't be read and must be initted */
+ /* This regulator can't be read and must be initialized */
rdev_info(rdev, "Setting %d-%duV\n",
rdev->constraints->min_uV,
rdev->constraints->max_uV);
@@ -1349,7 +1339,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
* We'll only apply the initial system load if an
* initial mode wasn't specified.
*/
+ regulator_lock(rdev);
drms_uA_update(rdev);
+ regulator_unlock(rdev);
}
if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
@@ -1780,7 +1772,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
struct device *dev = rdev->dev.parent;
int ret;
- /* No supply to resovle? */
+ /* No supply to resolve? */
if (!rdev->supply_name)
return 0;
@@ -2058,15 +2050,7 @@ static void _regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
if (regulator->dev) {
- int count = 0;
- struct regulator *r;
-
- list_for_each_entry(r, &rdev->consumer_list, list)
- if (r->dev == regulator->dev)
- count++;
-
- if (count == 1)
- device_link_remove(regulator->dev, &rdev->dev);
+ device_link_remove(regulator->dev, &rdev->dev);
/* remove any sysfs entries */
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
@@ -2237,38 +2221,21 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
{
struct regulator_enable_gpio *pin;
struct gpio_desc *gpiod;
- int ret;
- if (config->ena_gpiod)
- gpiod = config->ena_gpiod;
- else
- gpiod = gpio_to_desc(config->ena_gpio);
+ gpiod = config->ena_gpiod;
list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
if (pin->gpiod == gpiod) {
- rdev_dbg(rdev, "GPIO %d is already used\n",
- config->ena_gpio);
+ rdev_dbg(rdev, "GPIO is already used\n");
goto update_ena_gpio_to_rdev;
}
}
- if (!config->ena_gpiod) {
- ret = gpio_request_one(config->ena_gpio,
- GPIOF_DIR_OUT | config->ena_gpio_flags,
- rdev_get_name(rdev));
- if (ret)
- return ret;
- }
-
pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL);
- if (pin == NULL) {
- if (!config->ena_gpiod)
- gpio_free(config->ena_gpio);
+ if (pin == NULL)
return -ENOMEM;
- }
pin->gpiod = gpiod;
- pin->ena_gpio_invert = config->ena_gpio_invert;
list_add(&pin->list, &regulator_ena_gpio_list);
update_ena_gpio_to_rdev:
@@ -2289,7 +2256,6 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
if (pin->gpiod == rdev->ena_pin->gpiod) {
if (pin->request_count <= 1) {
pin->request_count = 0;
- gpiod_put(pin->gpiod);
list_del(&pin->list);
kfree(pin);
rdev->ena_pin = NULL;
@@ -2319,8 +2285,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
if (enable) {
/* Enable GPIO at initial use */
if (pin->enable_count == 0)
- gpiod_set_value_cansleep(pin->gpiod,
- !pin->ena_gpio_invert);
+ gpiod_set_value_cansleep(pin->gpiod, 1);
pin->enable_count++;
} else {
@@ -2331,8 +2296,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
/* Disable GPIO if not used */
if (pin->enable_count <= 1) {
- gpiod_set_value_cansleep(pin->gpiod,
- pin->ena_gpio_invert);
+ gpiod_set_value_cansleep(pin->gpiod, 0);
pin->enable_count = 0;
}
}
@@ -2409,7 +2373,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* timer wrapping.
* in case of multiple timer wrapping, either it can be
* detected by out-of-range remaining, or it cannot be
- * detected and we gets a panelty of
+ * detected and we get a penalty of
* _regulator_enable_delay().
*/
remaining = intended - start_jiffy;
@@ -2809,7 +2773,7 @@ static void regulator_disable_work(struct work_struct *work)
/**
* regulator_disable_deferred - disable regulator output with delay
* @regulator: regulator source
- * @ms: miliseconds until the regulator is disabled
+ * @ms: milliseconds until the regulator is disabled
*
* Execute regulator_disable() on the regulator after a delay. This
* is intended for use with devices that require some time to quiesce.
@@ -4943,7 +4907,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
* device tree until we have handled it over to the core. If the
* config that was passed in to this function DOES NOT contain
* a descriptor, and the config after this call DOES contain
- * a descriptor, we definately got one from parsing the device
+ * a descriptor, we definitely got one from parsing the device
* tree.
*/
if (!cfg->ena_gpiod && config->ena_gpiod)
@@ -4975,15 +4939,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
goto clean;
}
- if (config->ena_gpiod ||
- ((config->ena_gpio || config->ena_gpio_initialized) &&
- gpio_is_valid(config->ena_gpio))) {
+ if (config->ena_gpiod) {
mutex_lock(&regulator_list_mutex);
ret = regulator_ena_gpio_request(rdev, config);
mutex_unlock(&regulator_list_mutex);
if (ret != 0) {
- rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
- config->ena_gpio, ret);
+ rdev_err(rdev, "Failed to request enable GPIO: %d\n",
+ ret);
goto clean;
}
/* The regulator core took over the GPIO descriptor */
@@ -5251,6 +5213,12 @@ struct device *rdev_get_dev(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(rdev_get_dev);
+struct regmap *rdev_get_regmap(struct regulator_dev *rdev)
+{
+ return rdev->regmap;
+}
+EXPORT_SYMBOL_GPL(rdev_get_regmap);
+
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data)
{
return reg_init_data->driver_data;
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index 2131457937b7..e7dab5c4d1d1 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -100,12 +100,11 @@ struct cpcap_regulator {
struct regulator_desc rdesc;
const u16 assign_reg;
const u16 assign_mask;
- const u16 vsel_shift;
};
#define CPCAP_REG(_ID, reg, assignment_reg, assignment_mask, val_tbl, \
- mode_mask, volt_mask, volt_shft, \
- mode_val, off_val, volt_trans_time) { \
+ mode_mask, volt_mask, mode_val, off_val, \
+ volt_trans_time) { \
.rdesc = { \
.name = #_ID, \
.of_match = of_match_ptr(#_ID), \
@@ -127,7 +126,6 @@ struct cpcap_regulator {
}, \
.assign_reg = (assignment_reg), \
.assign_mask = (assignment_mask), \
- .vsel_shift = (volt_shft), \
}
struct cpcap_ddata {
@@ -336,155 +334,155 @@ static const unsigned int vaudio_val_tbl[] = { 0, 2775000, };
* SW1 to SW4 and SW6 seems to be unused for mapphone. Note that VSIM and
* VSIMCARD have a shared resource assignment bit.
*/
-static struct cpcap_regulator omap4_regulators[] = {
+static const struct cpcap_regulator omap4_regulators[] = {
CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW1_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW2_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW3_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW4_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW5_SEL, sw5_val_tbl,
- 0x28, 0, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0),
+ 0x28, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0),
CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW6_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
- 0x87, 0x30, 4, 0x3, 0, 420),
+ 0x87, 0x30, 0x3, 0, 420),
CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
- 0x47, 0x10, 4, 0x43, 0x41, 350),
+ 0x47, 0x10, 0x43, 0x41, 350),
CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
- 0x87, 0x30, 4, 0x3, 0, 420),
+ 0x87, 0x30, 0x3, 0, 420),
CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
- 0x87, 0x30, 4, 0x82, 0, 420),
+ 0x87, 0x30, 0x82, 0, 420),
CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
- 0x80, 0xf, 0, 0x80, 0, 420),
+ 0x80, 0xf, 0x80, 0, 420),
CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
- 0x17, 0, 0, 0, 0x12, 0),
+ 0x17, 0, 0, 0x12, 0),
CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
- 0x87, 0x38, 3, 0x82, 0, 420),
+ 0x87, 0x38, 0x82, 0, 420),
CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
- 0x43, 0x18, 3, 0x2, 0, 420),
+ 0x43, 0x18, 0x2, 0, 420),
CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
- 0xac, 0x2, 1, 0x4, 0, 10),
+ 0xac, 0x2, 0x4, 0, 10),
CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
- 0x23, 0x8, 3, 0, 0, 10),
+ 0x23, 0x8, 0, 0, 10),
CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
- 0x23, 0x8, 3, 0, 0, 420),
+ 0x23, 0x8, 0, 0, 420),
CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
- 0x47, 0x10, 4, 0, 0, 420),
+ 0x47, 0x10, 0, 0, 420),
CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
- 0x20c, 0xc0, 6, 0x20c, 0, 420),
+ 0x20c, 0xc0, 0x20c, 0, 420),
CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsim_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 420),
+ 0x23, 0x8, 0x3, 0, 420),
CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsimcard_val_tbl,
- 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+ 0x1e80, 0x8, 0x1e00, 0, 420),
CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
- 0x1, 0xc, 2, 0x1, 0, 500),
+ 0x1, 0xc, 0x1, 0, 500),
CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
- 0x11c, 0x40, 6, 0xc, 0, 0),
+ 0x11c, 0x40, 0xc, 0, 0),
CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
- 0x16, 0x1, 0, 0x4, 0, 0),
+ 0x16, 0x1, 0x4, 0, 0),
{ /* sentinel */ },
};
-static struct cpcap_regulator xoom_regulators[] = {
+static const struct cpcap_regulator xoom_regulators[] = {
CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW1_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW2_SEL, sw2_sw4_val_tbl,
- 0xf00, 0x7f, 0, 0x800, 0, 120),
+ 0xf00, 0x7f, 0x800, 0, 120),
CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW3_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW4_SEL, sw2_sw4_val_tbl,
- 0xf00, 0x7f, 0, 0x900, 0, 100),
+ 0xf00, 0x7f, 0x900, 0, 100),
CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW5_SEL, sw5_val_tbl,
- 0x2a, 0, 0, 0x22, 0, 0),
+ 0x2a, 0, 0x22, 0, 0),
CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW6_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
- 0x87, 0x30, 4, 0x7, 0, 420),
+ 0x87, 0x30, 0x7, 0, 420),
CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
- 0x47, 0x10, 4, 0x7, 0, 350),
+ 0x47, 0x10, 0x7, 0, 350),
CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
- 0x87, 0x30, 4, 0x3, 0, 420),
+ 0x87, 0x30, 0x3, 0, 420),
CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
- 0x87, 0x30, 4, 0x5, 0, 420),
+ 0x87, 0x30, 0x5, 0, 420),
CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
- 0x80, 0xf, 0, 0x80, 0, 420),
+ 0x80, 0xf, 0x80, 0, 420),
CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
- 0x17, 0, 0, 0x2, 0, 0),
+ 0x17, 0, 0x2, 0, 0),
CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
- 0x87, 0x38, 3, 0x2, 0, 420),
+ 0x87, 0x38, 0x2, 0, 420),
CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
- 0x43, 0x18, 3, 0x1, 0, 420),
+ 0x43, 0x18, 0x1, 0, 420),
CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
- 0xac, 0x2, 1, 0xc, 0, 10),
+ 0xac, 0x2, 0xc, 0, 10),
CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 10),
+ 0x23, 0x8, 0x3, 0, 10),
CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 420),
+ 0x23, 0x8, 0x3, 0, 420),
CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
- 0x47, 0x10, 4, 0x5, 0, 420),
+ 0x47, 0x10, 0x5, 0, 420),
CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
- 0x20c, 0xc0, 6, 0x8, 0, 420),
+ 0x20c, 0xc0, 0x8, 0, 420),
CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsim_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 420),
+ 0x23, 0x8, 0x3, 0, 420),
CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsimcard_val_tbl,
- 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+ 0x1e80, 0x8, 0x1e00, 0, 420),
CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
- 0x1, 0xc, 2, 0, 0x1, 500),
+ 0x1, 0xc, 0, 0x1, 500),
CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
- 0x11c, 0x40, 6, 0xc, 0, 0),
+ 0x11c, 0x40, 0xc, 0, 0),
CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
- 0x16, 0x1, 0, 0x4, 0, 0),
+ 0x16, 0x1, 0x4, 0, 0),
{ /* sentinel */ },
};
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 207cb3859dcc..cefa3558236d 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -290,10 +290,10 @@ static const struct regulator_ops da9052_ldo_ops = {
.disable = regulator_disable_regmap,
};
-#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \
+#define DA9052_LDO(_id, _name, step, min, max, sbits, ebits, abits) \
{\
.reg_desc = {\
- .name = #_id,\
+ .name = #_name,\
.ops = &da9052_ldo_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -310,10 +310,10 @@ static const struct regulator_ops da9052_ldo_ops = {
.activate_bit = (abits),\
}
-#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
+#define DA9052_DCDC(_id, _name, step, min, max, sbits, ebits, abits) \
{\
.reg_desc = {\
- .name = #_id,\
+ .name = #_name,\
.ops = &da9052_dcdc_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -331,37 +331,37 @@ static const struct regulator_ops da9052_ldo_ops = {
}
static struct da9052_regulator_info da9052_regulator_info[] = {
- DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
- DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
- DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
- DA9052_DCDC(BUCK4, 50, 1800, 3600, 5, 6, 0),
- DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
- DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
- DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
- DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0),
- DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0),
- DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_DCDC(BUCK4, buck4, 50, 1800, 3600, 5, 6, 0),
+ DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0),
};
static struct da9052_regulator_info da9053_regulator_info[] = {
- DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
- DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
- DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
- DA9052_DCDC(BUCK4, 25, 950, 2525, 6, 6, 0),
- DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
- DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
- DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
- DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0),
- DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0),
- DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_DCDC(BUCK4, buck4, 25, 950, 2525, 6, 6, 0),
+ DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0),
};
static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 588c3d2445cf..3c6fac793658 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -48,7 +48,9 @@
#define DA9055_ID_LDO6 7
/* DA9055 BUCK current limit */
-static const int da9055_current_limits[] = { 500000, 600000, 700000, 800000 };
+static const unsigned int da9055_current_limits[] = {
+ 500000, 600000, 700000, 800000
+};
struct da9055_conf_reg {
int reg;
@@ -169,39 +171,6 @@ static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
val << volt.sl_shift);
}
-static int da9055_buck_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
- int ret;
-
- ret = da9055_reg_read(regulator->da9055, DA9055_REG_BUCK_LIM);
- if (ret < 0)
- return ret;
-
- ret &= info->mode.mask;
- return da9055_current_limits[ret >> info->mode.shift];
-}
-
-static int da9055_buck_set_current_limit(struct regulator_dev *rdev, int min_uA,
- int max_uA)
-{
- struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
- int i;
-
- for (i = ARRAY_SIZE(da9055_current_limits) - 1; i >= 0; i--) {
- if ((min_uA <= da9055_current_limits[i]) &&
- (da9055_current_limits[i] <= max_uA))
- return da9055_reg_update(regulator->da9055,
- DA9055_REG_BUCK_LIM,
- info->mode.mask,
- i << info->mode.shift);
- }
-
- return -EINVAL;
-}
-
static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
@@ -329,8 +298,8 @@ static const struct regulator_ops da9055_buck_ops = {
.get_mode = da9055_buck_get_mode,
.set_mode = da9055_buck_set_mode,
- .get_current_limit = da9055_buck_get_current_limit,
- .set_current_limit = da9055_buck_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_voltage_sel = da9055_regulator_get_voltage_sel,
.set_voltage_sel = da9055_regulator_set_voltage_sel,
@@ -407,6 +376,10 @@ static const struct regulator_ops da9055_ldo_ops = {
.uV_step = (step) * 1000,\
.linear_min_sel = (voffset),\
.owner = THIS_MODULE,\
+ .curr_table = da9055_current_limits,\
+ .n_current_limits = ARRAY_SIZE(da9055_current_limits),\
+ .csel_reg = DA9055_REG_BUCK_LIM,\
+ .csel_mask = (mbits),\
},\
.conf = {\
.reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \
@@ -457,7 +430,6 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
int gpio_mux = pdata->gpio_ren[id];
config->ena_gpiod = pdata->ena_gpiods[id];
- config->ena_gpio_invert = 1;
/*
* GPI pin is muxed with regulator to control the
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index 34a70d9dc450..b064d8a19d4c 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -126,7 +126,7 @@ static int da9062_set_current_limit(struct regulator_dev *rdev,
const struct da9062_regulator_info *rinfo = regl->info;
int n, tval;
- for (n = 0; n < rinfo->n_current_limits; n++) {
+ for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
tval = rinfo->current_limits[n];
if (tval >= min_ua && tval <= max_ua)
return regmap_field_write(regl->ilimit, n);
@@ -992,7 +992,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
struct regulator_config config = { };
const struct da9062_regulator_info *rinfo;
int irq, n, ret;
- size_t size;
int max_regulators;
switch (chip->chip_type) {
@@ -1010,9 +1009,8 @@ static int da9062_regulator_probe(struct platform_device *pdev)
}
/* Allocate memory required by usable regulators */
- size = sizeof(struct da9062_regulators) +
- max_regulators * sizeof(struct da9062_regulator);
- regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ regulators = devm_kzalloc(&pdev->dev, struct_size(regulators, regulator,
+ max_regulators), GFP_KERNEL);
if (!regulators)
return -ENOMEM;
@@ -1029,31 +1027,50 @@ static int da9062_regulator_probe(struct platform_device *pdev)
regl->desc.type = REGULATOR_VOLTAGE;
regl->desc.owner = THIS_MODULE;
- if (regl->info->mode.reg)
+ if (regl->info->mode.reg) {
regl->mode = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->mode);
- if (regl->info->suspend.reg)
+ if (IS_ERR(regl->mode))
+ return PTR_ERR(regl->mode);
+ }
+
+ if (regl->info->suspend.reg) {
regl->suspend = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->suspend);
- if (regl->info->sleep.reg)
+ if (IS_ERR(regl->suspend))
+ return PTR_ERR(regl->suspend);
+ }
+
+ if (regl->info->sleep.reg) {
regl->sleep = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->sleep);
- if (regl->info->suspend_sleep.reg)
+ if (IS_ERR(regl->sleep))
+ return PTR_ERR(regl->sleep);
+ }
+
+ if (regl->info->suspend_sleep.reg) {
regl->suspend_sleep = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->suspend_sleep);
- if (regl->info->ilimit.reg)
+ if (IS_ERR(regl->suspend_sleep))
+ return PTR_ERR(regl->suspend_sleep);
+ }
+
+ if (regl->info->ilimit.reg) {
regl->ilimit = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->ilimit);
+ if (IS_ERR(regl->ilimit))
+ return PTR_ERR(regl->ilimit);
+ }
/* Register regulator */
memset(&config, 0, sizeof(config));
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 8cbcd2a3eb20..2b0c7a85306a 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -167,7 +167,7 @@ static int da9063_set_current_limit(struct regulator_dev *rdev,
const struct da9063_regulator_info *rinfo = regl->info;
int n, tval;
- for (n = 0; n < rinfo->n_current_limits; n++) {
+ for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
tval = rinfo->current_limits[n];
if (tval >= min_uA && tval <= max_uA)
return regmap_field_write(regl->ilimit, n);
@@ -739,7 +739,6 @@ static int da9063_regulator_probe(struct platform_device *pdev)
struct regulator_config config;
bool bcores_merged, bmem_bio_merged;
int id, irq, n, n_regulators, ret, val;
- size_t size;
regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL;
@@ -784,9 +783,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
n_regulators--; /* remove BMEM_BIO_MERGED */
/* Allocate memory required by usable regulators */
- size = sizeof(struct da9063_regulators) +
- n_regulators * sizeof(struct da9063_regulator);
- regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ regulators = devm_kzalloc(&pdev->dev, struct_size(regulators,
+ regulator, n_regulators), GFP_KERNEL);
if (!regulators)
return -ENOMEM;
@@ -835,21 +833,40 @@ static int da9063_regulator_probe(struct platform_device *pdev)
regl->desc.type = REGULATOR_VOLTAGE;
regl->desc.owner = THIS_MODULE;
- if (regl->info->mode.reg)
+ if (regl->info->mode.reg) {
regl->mode = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->mode);
- if (regl->info->suspend.reg)
+ if (IS_ERR(regl->mode))
+ return PTR_ERR(regl->mode);
+ }
+
+ if (regl->info->suspend.reg) {
regl->suspend = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->suspend);
- if (regl->info->sleep.reg)
+ if (IS_ERR(regl->suspend))
+ return PTR_ERR(regl->suspend);
+ }
+
+ if (regl->info->sleep.reg) {
regl->sleep = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->sleep);
- if (regl->info->suspend_sleep.reg)
+ if (IS_ERR(regl->sleep))
+ return PTR_ERR(regl->sleep);
+ }
+
+ if (regl->info->suspend_sleep.reg) {
regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->suspend_sleep);
- if (regl->info->ilimit.reg)
+ if (IS_ERR(regl->suspend_sleep))
+ return PTR_ERR(regl->suspend_sleep);
+ }
+
+ if (regl->info->ilimit.reg) {
regl->ilimit = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->ilimit);
+ if (IS_ERR(regl->ilimit))
+ return PTR_ERR(regl->ilimit);
+ }
/* Register regulator */
memset(&config, 0, sizeof(config));
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 84dba64ed11e..528303771723 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -41,10 +41,6 @@ static const struct regmap_config da9210_regmap_config = {
.val_bits = 8,
};
-static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
- int max_uA);
-static int da9210_get_current_limit(struct regulator_dev *rdev);
-
static const struct regulator_ops da9210_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -52,8 +48,8 @@ static const struct regulator_ops da9210_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = da9210_set_current_limit,
- .get_current_limit = da9210_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
/* Default limits measured in millivolts and milliamps */
@@ -62,7 +58,7 @@ static const struct regulator_ops da9210_buck_ops = {
#define DA9210_STEP_MV 10
/* Current limits for buck (uA) indices corresponds with register values */
-static const int da9210_buck_limits[] = {
+static const unsigned int da9210_buck_limits[] = {
1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000,
3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000
};
@@ -80,47 +76,12 @@ static const struct regulator_desc da9210_reg = {
.enable_reg = DA9210_REG_BUCK_CONT,
.enable_mask = DA9210_BUCK_EN,
.owner = THIS_MODULE,
+ .curr_table = da9210_buck_limits,
+ .n_current_limits = ARRAY_SIZE(da9210_buck_limits),
+ .csel_reg = DA9210_REG_BUCK_ILIM,
+ .csel_mask = DA9210_BUCK_ILIM_MASK,
};
-static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
- int max_uA)
-{
- struct da9210 *chip = rdev_get_drvdata(rdev);
- unsigned int sel;
- int i;
-
- /* search for closest to maximum */
- for (i = ARRAY_SIZE(da9210_buck_limits)-1; i >= 0; i--) {
- if (min_uA <= da9210_buck_limits[i] &&
- max_uA >= da9210_buck_limits[i]) {
- sel = i;
- sel = sel << DA9210_BUCK_ILIM_SHIFT;
- return regmap_update_bits(chip->regmap,
- DA9210_REG_BUCK_ILIM,
- DA9210_BUCK_ILIM_MASK, sel);
- }
- }
-
- return -EINVAL;
-}
-
-static int da9210_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9210 *chip = rdev_get_drvdata(rdev);
- unsigned int data;
- unsigned int sel;
- int ret;
-
- ret = regmap_read(chip->regmap, DA9210_REG_BUCK_ILIM, &data);
- if (ret < 0)
- return ret;
-
- /* select one of 16 values: 0000 (1600mA) to 1111 (4600mA) */
- sel = (data & DA9210_BUCK_ILIM_MASK) >> DA9210_BUCK_ILIM_SHIFT;
-
- return da9210_buck_limits[sel];
-}
-
static irqreturn_t da9210_irq_handler(int irq, void *data)
{
struct da9210 *chip = data;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index a3bc8037153e..771a06d1900d 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -40,7 +40,6 @@
/* VSEL bit definitions */
#define VSEL_BUCK_EN (1 << 7)
#define VSEL_MODE (1 << 6)
-#define VSEL_NSEL_MASK 0x3F
/* Chip ID and Verison */
#define DIE_ID 0x0F /* ID1 */
#define DIE_REV 0x0F /* ID2 */
@@ -49,14 +48,26 @@
#define CTL_SLEW_MASK (0x7 << 4)
#define CTL_SLEW_SHIFT 4
#define CTL_RESET (1 << 2)
+#define CTL_MODE_VSEL0_MODE BIT(0)
+#define CTL_MODE_VSEL1_MODE BIT(1)
#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */
+#define FAN53526_NVOLTAGES 128
enum fan53555_vendor {
- FAN53555_VENDOR_FAIRCHILD = 0,
+ FAN53526_VENDOR_FAIRCHILD = 0,
+ FAN53555_VENDOR_FAIRCHILD,
FAN53555_VENDOR_SILERGY,
};
+enum {
+ FAN53526_CHIP_ID_01 = 1,
+};
+
+enum {
+ FAN53526_CHIP_REV_08 = 8,
+};
+
/* IC Type */
enum {
FAN53555_CHIP_ID_00 = 0,
@@ -94,8 +105,12 @@ struct fan53555_device_info {
/* Voltage range and step(linear) */
unsigned int vsel_min;
unsigned int vsel_step;
+ unsigned int vsel_count;
/* Voltage slew rate limiting */
unsigned int slew_rate;
+ /* Mode */
+ unsigned int mode_reg;
+ unsigned int mode_mask;
/* Sleep voltage cache */
unsigned int sleep_vol_cache;
};
@@ -111,7 +126,7 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
if (ret < 0)
return ret;
ret = regmap_update_bits(di->regmap, di->sleep_reg,
- VSEL_NSEL_MASK, ret);
+ di->desc.vsel_mask, ret);
if (ret < 0)
return ret;
/* Cache the sleep voltage setting.
@@ -143,11 +158,11 @@ static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- regmap_update_bits(di->regmap, di->vol_reg,
- VSEL_MODE, VSEL_MODE);
+ regmap_update_bits(di->regmap, di->mode_reg,
+ di->mode_mask, di->mode_mask);
break;
case REGULATOR_MODE_NORMAL:
- regmap_update_bits(di->regmap, di->vol_reg, VSEL_MODE, 0);
+ regmap_update_bits(di->regmap, di->vol_reg, di->mode_mask, 0);
break;
default:
return -EINVAL;
@@ -161,10 +176,10 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
unsigned int val;
int ret = 0;
- ret = regmap_read(di->regmap, di->vol_reg, &val);
+ ret = regmap_read(di->regmap, di->mode_reg, &val);
if (ret < 0)
return ret;
- if (val & VSEL_MODE)
+ if (val & di->mode_mask)
return REGULATOR_MODE_FAST;
else
return REGULATOR_MODE_NORMAL;
@@ -219,6 +234,34 @@ static const struct regulator_ops fan53555_regulator_ops = {
.set_suspend_disable = fan53555_set_suspend_disable,
};
+static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
+{
+ /* Init voltage range and step */
+ switch (di->chip_id) {
+ case FAN53526_CHIP_ID_01:
+ switch (di->chip_rev) {
+ case FAN53526_CHIP_REV_08:
+ di->vsel_min = 600000;
+ di->vsel_step = 6250;
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d with rev %d not supported!\n",
+ di->chip_id, di->chip_rev);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d not supported!\n", di->chip_id);
+ return -EINVAL;
+ }
+
+ di->vsel_count = FAN53526_NVOLTAGES;
+
+ return 0;
+}
+
static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
{
/* Init voltage range and step */
@@ -257,6 +300,8 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
return -EINVAL;
}
+ di->vsel_count = FAN53555_NVOLTAGES;
+
return 0;
}
@@ -274,6 +319,8 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
return -EINVAL;
}
+ di->vsel_count = FAN53555_NVOLTAGES;
+
return 0;
}
@@ -302,7 +349,35 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
return -EINVAL;
}
+ /* Setup mode control register */
+ switch (di->vendor) {
+ case FAN53526_VENDOR_FAIRCHILD:
+ di->mode_reg = FAN53555_CONTROL;
+
+ switch (pdata->sleep_vsel_id) {
+ case FAN53555_VSEL_ID_0:
+ di->mode_mask = CTL_MODE_VSEL1_MODE;
+ break;
+ case FAN53555_VSEL_ID_1:
+ di->mode_mask = CTL_MODE_VSEL0_MODE;
+ break;
+ }
+ break;
+ case FAN53555_VENDOR_FAIRCHILD:
+ case FAN53555_VENDOR_SILERGY:
+ di->mode_reg = di->vol_reg;
+ di->mode_mask = VSEL_MODE;
+ break;
+ default:
+ dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
+ return -EINVAL;
+ }
+
+ /* Setup voltage range */
switch (di->vendor) {
+ case FAN53526_VENDOR_FAIRCHILD:
+ ret = fan53526_voltages_setup_fairchild(di);
+ break;
case FAN53555_VENDOR_FAIRCHILD:
ret = fan53555_voltages_setup_fairchild(di);
break;
@@ -326,13 +401,13 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->supply_name = "vin";
rdesc->ops = &fan53555_regulator_ops;
rdesc->type = REGULATOR_VOLTAGE;
- rdesc->n_voltages = FAN53555_NVOLTAGES;
+ rdesc->n_voltages = di->vsel_count;
rdesc->enable_reg = di->vol_reg;
rdesc->enable_mask = VSEL_BUCK_EN;
rdesc->min_uV = di->vsel_min;
rdesc->uV_step = di->vsel_step;
rdesc->vsel_reg = di->vol_reg;
- rdesc->vsel_mask = VSEL_NSEL_MASK;
+ rdesc->vsel_mask = di->vsel_count - 1;
rdesc->owner = THIS_MODULE;
di->rdev = devm_regulator_register(di->dev, &di->desc, config);
@@ -368,6 +443,9 @@ static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev,
static const struct of_device_id fan53555_dt_ids[] = {
{
+ .compatible = "fcs,fan53526",
+ .data = (void *)FAN53526_VENDOR_FAIRCHILD,
+ }, {
.compatible = "fcs,fan53555",
.data = (void *)FAN53555_VENDOR_FAIRCHILD
}, {
@@ -412,11 +490,13 @@ static int fan53555_regulator_probe(struct i2c_client *client,
} else {
/* if no ramp constraint set, get the pdata ramp_delay */
if (!di->regulator->constraints.ramp_delay) {
- int slew_idx = (pdata->slew_rate & 0x7)
- ? pdata->slew_rate : 0;
+ if (pdata->slew_rate >= ARRAY_SIZE(slew_rates)) {
+ dev_err(&client->dev, "Invalid slew_rate\n");
+ return -EINVAL;
+ }
di->regulator->constraints.ramp_delay
- = slew_rates[slew_idx];
+ = slew_rates[pdata->slew_rate];
}
di->vendor = id->driver_data;
@@ -467,6 +547,9 @@ static int fan53555_regulator_probe(struct i2c_client *client,
static const struct i2c_device_id fan53555_id[] = {
{
+ .name = "fan53526",
+ .driver_data = FAN53526_VENDOR_FAIRCHILD
+ }, {
.name = "fan53555",
.driver_data = FAN53555_VENDOR_FAIRCHILD
}, {
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 9abdb9130766..b5afc9db2c61 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -79,15 +79,6 @@ of_get_fixed_voltage_config(struct device *dev,
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
- /*
- * FIXME: we pulled active low/high and open drain handling into
- * gpiolib so it will be handled there. Delete this in the second
- * step when we also remove the custom inversion handling for all
- * legacy boardfiles.
- */
- config->enable_high = 1;
- config->gpio_is_open_drain = 0;
-
if (of_find_property(np, "vin-supply", NULL))
config->input_supply = "vin";
@@ -151,24 +142,14 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.fixed_uV = config->microvolts;
- cfg.ena_gpio_invert = !config->enable_high;
- if (config->enabled_at_boot) {
- if (config->enable_high)
- gflags = GPIOD_OUT_HIGH;
- else
- gflags = GPIOD_OUT_LOW;
- } else {
- if (config->enable_high)
- gflags = GPIOD_OUT_LOW;
- else
- gflags = GPIOD_OUT_HIGH;
- }
- if (config->gpio_is_open_drain) {
- if (gflags == GPIOD_OUT_HIGH)
- gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
- else
- gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
- }
+ /*
+ * The signal will be inverted by the GPIO core if flagged so in the
+ * decriptor.
+ */
+ if (config->enabled_at_boot)
+ gflags = GPIOD_OUT_HIGH;
+ else
+ gflags = GPIOD_OUT_LOW;
/*
* Some fixed regulators share the enable line between two
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index b2f5ec4f658a..6157001df0a4 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -30,16 +30,15 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/gpio-regulator.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
struct gpio_regulator_data {
struct regulator_desc desc;
struct regulator_dev *dev;
- struct gpio *gpios;
+ struct gpio_desc **gpiods;
int nr_gpios;
struct gpio_regulator_state *states;
@@ -82,7 +81,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
for (ptr = 0; ptr < data->nr_gpios; ptr++) {
state = (target & (1 << ptr)) >> ptr;
- gpio_set_value_cansleep(data->gpios[ptr].gpio, state);
+ gpiod_set_value_cansleep(data->gpiods[ptr], state);
}
data->state = target;
@@ -119,7 +118,7 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
for (ptr = 0; ptr < data->nr_gpios; ptr++) {
state = (target & (1 << ptr)) >> ptr;
- gpio_set_value_cansleep(data->gpios[ptr].gpio, state);
+ gpiod_set_value_cansleep(data->gpiods[ptr], state);
}
data->state = target;
@@ -138,7 +137,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
{
struct gpio_regulator_config *config;
const char *regtype;
- int proplen, gpio, i;
+ int proplen, i;
+ int ngpios;
int ret;
config = devm_kzalloc(dev,
@@ -153,59 +153,36 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
config->supply_name = config->init_data->constraints.name;
- if (of_property_read_bool(np, "enable-active-high"))
- config->enable_high = true;
-
if (of_property_read_bool(np, "enable-at-boot"))
config->enabled_at_boot = true;
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
- config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
- if (config->enable_gpio < 0 && config->enable_gpio != -ENOENT)
- return ERR_PTR(config->enable_gpio);
-
- /* Fetch GPIOs. - optional property*/
- ret = of_gpio_count(np);
- if ((ret < 0) && (ret != -ENOENT))
- return ERR_PTR(ret);
-
- if (ret > 0) {
- config->nr_gpios = ret;
- config->gpios = devm_kcalloc(dev,
- config->nr_gpios, sizeof(struct gpio),
- GFP_KERNEL);
- if (!config->gpios)
+ /* Fetch GPIO init levels */
+ ngpios = gpiod_count(dev, NULL);
+ if (ngpios > 0) {
+ config->gflags = devm_kzalloc(dev,
+ sizeof(enum gpiod_flags)
+ * ngpios,
+ GFP_KERNEL);
+ if (!config->gflags)
return ERR_PTR(-ENOMEM);
- proplen = of_property_count_u32_elems(np, "gpios-states");
- /* optional property */
- if (proplen < 0)
- proplen = 0;
+ for (i = 0; i < ngpios; i++) {
+ u32 val;
- if (proplen > 0 && proplen != config->nr_gpios) {
- dev_warn(dev, "gpios <-> gpios-states mismatch\n");
- proplen = 0;
- }
+ ret = of_property_read_u32_index(np, "gpios-states", i,
+ &val);
- for (i = 0; i < config->nr_gpios; i++) {
- gpio = of_get_named_gpio(np, "gpios", i);
- if (gpio < 0) {
- if (gpio != -ENOENT)
- return ERR_PTR(gpio);
- break;
- }
- config->gpios[i].gpio = gpio;
- config->gpios[i].label = config->supply_name;
- if (proplen > 0) {
- of_property_read_u32_index(np, "gpios-states",
- i, &ret);
- if (ret)
- config->gpios[i].flags =
- GPIOF_OUT_INIT_HIGH;
- }
+ /* Default to high per specification */
+ if (ret)
+ config->gflags[i] = GPIOD_OUT_HIGH;
+ else
+ config->gflags[i] =
+ val ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
}
}
+ config->ngpios = ngpios;
/* Fetch states. */
proplen = of_property_count_u32_elems(np, "states");
@@ -251,59 +228,56 @@ static struct regulator_ops gpio_regulator_current_ops = {
static int gpio_regulator_probe(struct platform_device *pdev)
{
- struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct gpio_regulator_config *config = dev_get_platdata(dev);
+ struct device_node *np = dev->of_node;
struct gpio_regulator_data *drvdata;
struct regulator_config cfg = { };
- int ptr, ret, state;
+ enum gpiod_flags gflags;
+ int ptr, ret, state, i;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
+ drvdata = devm_kzalloc(dev, sizeof(struct gpio_regulator_data),
GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
if (np) {
- config = of_get_gpio_regulator_config(&pdev->dev, np,
+ config = of_get_gpio_regulator_config(dev, np,
&drvdata->desc);
if (IS_ERR(config))
return PTR_ERR(config);
}
- drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
+ drvdata->desc.name = devm_kstrdup(dev, config->supply_name, GFP_KERNEL);
if (drvdata->desc.name == NULL) {
- dev_err(&pdev->dev, "Failed to allocate supply name\n");
+ dev_err(dev, "Failed to allocate supply name\n");
return -ENOMEM;
}
- if (config->nr_gpios != 0) {
- drvdata->gpios = kmemdup(config->gpios,
- config->nr_gpios * sizeof(struct gpio),
- GFP_KERNEL);
- if (drvdata->gpios == NULL) {
- dev_err(&pdev->dev, "Failed to allocate gpio data\n");
- ret = -ENOMEM;
- goto err_name;
- }
-
- drvdata->nr_gpios = config->nr_gpios;
- ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Could not obtain regulator setting GPIOs: %d\n",
- ret);
- goto err_memgpio;
- }
+ drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!drvdata->gpiods)
+ return -ENOMEM;
+ for (i = 0; i < config->ngpios; i++) {
+ drvdata->gpiods[i] = devm_gpiod_get_index(dev,
+ NULL,
+ i,
+ config->gflags[i]);
+ if (IS_ERR(drvdata->gpiods[i]))
+ return PTR_ERR(drvdata->gpiods[i]);
+ /* This is good to know */
+ gpiod_set_consumer_name(drvdata->gpiods[i], drvdata->desc.name);
}
+ drvdata->nr_gpios = config->ngpios;
- drvdata->states = kmemdup(config->states,
- config->nr_states *
- sizeof(struct gpio_regulator_state),
- GFP_KERNEL);
+ drvdata->states = devm_kmemdup(dev,
+ config->states,
+ config->nr_states *
+ sizeof(struct gpio_regulator_state),
+ GFP_KERNEL);
if (drvdata->states == NULL) {
- dev_err(&pdev->dev, "Failed to allocate state data\n");
- ret = -ENOMEM;
- goto err_stategpio;
+ dev_err(dev, "Failed to allocate state data\n");
+ return -ENOMEM;
}
drvdata->nr_states = config->nr_states;
@@ -322,61 +296,46 @@ static int gpio_regulator_probe(struct platform_device *pdev)
drvdata->desc.ops = &gpio_regulator_current_ops;
break;
default:
- dev_err(&pdev->dev, "No regulator type set\n");
- ret = -EINVAL;
- goto err_memstate;
+ dev_err(dev, "No regulator type set\n");
+ return -EINVAL;
}
/* build initial state from gpio init data. */
state = 0;
for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) {
- if (config->gpios[ptr].flags & GPIOF_OUT_INIT_HIGH)
+ if (config->gflags[ptr] == GPIOD_OUT_HIGH)
state |= (1 << ptr);
}
drvdata->state = state;
- cfg.dev = &pdev->dev;
+ cfg.dev = dev;
cfg.init_data = config->init_data;
cfg.driver_data = drvdata;
cfg.of_node = np;
- if (gpio_is_valid(config->enable_gpio)) {
- cfg.ena_gpio = config->enable_gpio;
- cfg.ena_gpio_initialized = true;
- }
- cfg.ena_gpio_invert = !config->enable_high;
- if (config->enabled_at_boot) {
- if (config->enable_high)
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- else
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- } else {
- if (config->enable_high)
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- else
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- }
+ /*
+ * The signal will be inverted by the GPIO core if flagged so in the
+ * decriptor.
+ */
+ if (config->enabled_at_boot)
+ gflags = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
+ else
+ gflags = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
+
+ cfg.ena_gpiod = gpiod_get_optional(dev, "enable", gflags);
+ if (IS_ERR(cfg.ena_gpiod))
+ return PTR_ERR(cfg.ena_gpiod);
drvdata->dev = regulator_register(&drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
- dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- goto err_memstate;
+ dev_err(dev, "Failed to register regulator: %d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, drvdata);
return 0;
-
-err_memstate:
- kfree(drvdata->states);
-err_stategpio:
- gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
-err_memgpio:
- kfree(drvdata->gpios);
-err_name:
- kfree(drvdata->desc.name);
- return ret;
}
static int gpio_regulator_remove(struct platform_device *pdev)
@@ -385,13 +344,6 @@ static int gpio_regulator_remove(struct platform_device *pdev)
regulator_unregister(drvdata->dev);
- gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
-
- kfree(drvdata->states);
- kfree(drvdata->gpios);
-
- kfree(drvdata->desc.name);
-
return 0;
}
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 5686a1335bd3..32d3f0499e2d 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -594,28 +594,30 @@ int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
EXPORT_SYMBOL_GPL(regulator_list_voltage_pickable_linear_range);
/**
- * regulator_list_voltage_linear_range - List voltages for linear ranges
+ * regulator_desc_list_voltage_linear_range - List voltages for linear ranges
*
- * @rdev: Regulator device
+ * @desc: Regulator desc for regulator which volatges are to be listed
* @selector: Selector to convert into a voltage
*
* Regulators with a series of simple linear mappings between voltages
- * and selectors can set linear_ranges in the regulator descriptor and
- * then use this function as their list_voltage() operation,
+ * and selectors who have set linear_ranges in the regulator descriptor
+ * can use this function prior regulator registration to list voltages.
+ * This is useful when voltages need to be listed during device-tree
+ * parsing.
*/
-int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
- unsigned int selector)
+int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
+ unsigned int selector)
{
const struct regulator_linear_range *range;
int i;
- if (!rdev->desc->n_linear_ranges) {
- BUG_ON(!rdev->desc->n_linear_ranges);
+ if (!desc->n_linear_ranges) {
+ BUG_ON(!desc->n_linear_ranges);
return -EINVAL;
}
- for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
- range = &rdev->desc->linear_ranges[i];
+ for (i = 0; i < desc->n_linear_ranges; i++) {
+ range = &desc->linear_ranges[i];
if (!(selector >= range->min_sel &&
selector <= range->max_sel))
@@ -628,6 +630,23 @@ int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(regulator_desc_list_voltage_linear_range);
+
+/**
+ * regulator_list_voltage_linear_range - List voltages for linear ranges
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a series of simple linear mappings between voltages
+ * and selectors can set linear_ranges in the regulator descriptor and
+ * then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ return regulator_desc_list_voltage_linear_range(rdev->desc, selector);
+}
EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range);
/**
@@ -761,3 +780,89 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
rdev->desc->active_discharge_mask, val);
}
EXPORT_SYMBOL_GPL(regulator_set_active_discharge_regmap);
+
+/**
+ * regulator_set_current_limit_regmap - set_current_limit for regmap users
+ *
+ * @rdev: regulator to operate on
+ * @min_uA: Lower bound for current limit
+ * @max_uA: Upper bound for current limit
+ *
+ * Regulators that use regmap for their register I/O can set curr_table,
+ * csel_reg and csel_mask fields in their descriptor and then use this
+ * as their set_current_limit operation, saving some code.
+ */
+int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ unsigned int n_currents = rdev->desc->n_current_limits;
+ int i, sel = -1;
+
+ if (n_currents == 0)
+ return -EINVAL;
+
+ if (rdev->desc->curr_table) {
+ const unsigned int *curr_table = rdev->desc->curr_table;
+ bool ascend = curr_table[n_currents - 1] > curr_table[0];
+
+ /* search for closest to maximum */
+ if (ascend) {
+ for (i = n_currents - 1; i >= 0; i--) {
+ if (min_uA <= curr_table[i] &&
+ curr_table[i] <= max_uA) {
+ sel = i;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < n_currents; i++) {
+ if (min_uA <= curr_table[i] &&
+ curr_table[i] <= max_uA) {
+ sel = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (sel < 0)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+ rdev->desc->csel_mask, sel);
+}
+EXPORT_SYMBOL_GPL(regulator_set_current_limit_regmap);
+
+/**
+ * regulator_get_current_limit_regmap - get_current_limit for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * csel_reg and csel_mask fields in their descriptor and then use this
+ * as their get_current_limit operation, saving some code.
+ */
+int regulator_get_current_limit_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->csel_mask;
+ val >>= ffs(rdev->desc->csel_mask) - 1;
+
+ if (rdev->desc->curr_table) {
+ if (val >= rdev->desc->n_current_limits)
+ return -EINVAL;
+
+ return rdev->desc->curr_table[val];
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_get_current_limit_regmap);
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index 36ae54b53814..bba24a6fdb1e 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -28,7 +28,6 @@
struct hi655x_regulator {
unsigned int disable_reg;
unsigned int status_reg;
- unsigned int ctrl_regs;
unsigned int ctrl_mask;
struct regulator_desc rdesc;
};
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 4abd8e9c81e5..6f28bba81d13 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -31,7 +31,6 @@
/* PMIC details */
struct isl_pmic {
struct i2c_client *client;
- struct regulator_dev *rdev[3];
struct mutex mtx;
};
@@ -66,14 +65,14 @@ static int isl6271a_set_voltage_sel(struct regulator_dev *dev,
return err;
}
-static struct regulator_ops isl_core_ops = {
+static const struct regulator_ops isl_core_ops = {
.get_voltage_sel = isl6271a_get_voltage_sel,
.set_voltage_sel = isl6271a_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
};
-static struct regulator_ops isl_fixed_ops = {
+static const struct regulator_ops isl_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
};
@@ -109,6 +108,7 @@ static const struct regulator_desc isl_rd[] = {
static int isl6271a_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ struct regulator_dev *rdev;
struct regulator_config config = { };
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct isl_pmic *pmic;
@@ -133,11 +133,10 @@ static int isl6271a_probe(struct i2c_client *i2c,
config.init_data = NULL;
config.driver_data = pmic;
- pmic->rdev[i] = devm_regulator_register(&i2c->dev, &isl_rd[i],
- &config);
- if (IS_ERR(pmic->rdev[i])) {
+ rdev = devm_regulator_register(&i2c->dev, &isl_rd[i], &config);
+ if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
- return PTR_ERR(pmic->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index 8c0e8419c43f..c876e161052a 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -258,6 +258,9 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
* Register update is required if the pin is used.
*/
gpiod = lm363x_regulator_of_get_enable_gpio(dev, id);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
if (gpiod) {
cfg.ena_gpiod = gpiod;
@@ -265,8 +268,7 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
LM3632_EXT_EN_MASK,
LM3632_EXT_EN_MASK);
if (ret) {
- if (gpiod)
- gpiod_put(gpiod);
+ gpiod_put(gpiod);
dev_err(dev, "External pin err: %d\n", ret);
return ret;
}
diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c
index 5a89e6d4b9a6..ff97cc50f2eb 100644
--- a/drivers/regulator/lochnagar-regulator.c
+++ b/drivers/regulator/lochnagar-regulator.c
@@ -194,7 +194,7 @@ static const struct regulator_desc lochnagar_regulators[] = {
.name = "VDDCORE",
.supply_name = "SYSVDD",
.type = REGULATOR_VOLTAGE,
- .n_voltages = 57,
+ .n_voltages = 66,
.ops = &lochnagar_vddcore_ops,
.id = LOCHNAGAR_VDDCORE,
@@ -226,14 +226,15 @@ static const struct of_device_id lochnagar_of_match[] = {
},
{
.compatible = "cirrus,lochnagar2-mic2vdd",
- .data = &lochnagar_regulators[LOCHNAGAR_MIC1VDD],
+ .data = &lochnagar_regulators[LOCHNAGAR_MIC2VDD],
},
{
.compatible = "cirrus,lochnagar2-vddcore",
.data = &lochnagar_regulators[LOCHNAGAR_VDDCORE],
},
- {},
+ {}
};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
static int lochnagar_regulator_probe(struct platform_device *pdev)
{
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 204b5c5270e0..9e45112658ba 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -159,7 +159,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
selector << LDO_VOL_CONTR_SHIFT(ldo));
}
-static struct regulator_ops lp3971_ldo_ops = {
+static const struct regulator_ops lp3971_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3971_ldo_is_enabled,
@@ -233,7 +233,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
0 << BUCK_VOL_CHANGE_SHIFT(buck));
}
-static struct regulator_ops lp3971_dcdc_ops = {
+static const struct regulator_ops lp3971_dcdc_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3971_dcdc_is_enabled,
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index ff0c275f902e..fb098198b688 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -305,7 +305,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
return ret;
}
-static struct regulator_ops lp3972_ldo_ops = {
+static const struct regulator_ops lp3972_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_ldo_is_enabled,
@@ -386,7 +386,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
LP3972_VOL_CHANGE_FLAG_MASK, 0);
}
-static struct regulator_ops lp3972_dcdc_ops = {
+static const struct regulator_ops lp3972_dcdc_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_dcdc_is_enabled,
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 38992112fd6e..ca95257ce252 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -353,64 +353,6 @@ static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev)
return val & LP872X_VOUT_M;
}
-static int lp8725_buck_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct lp872x *lp = rdev_get_drvdata(rdev);
- enum lp872x_regulator_id buck = rdev_get_id(rdev);
- int i;
- u8 addr;
-
- switch (buck) {
- case LP8725_ID_BUCK1:
- addr = LP8725_BUCK1_VOUT2;
- break;
- case LP8725_ID_BUCK2:
- addr = LP8725_BUCK2_VOUT2;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = ARRAY_SIZE(lp8725_buck_uA) - 1; i >= 0; i--) {
- if (lp8725_buck_uA[i] >= min_uA &&
- lp8725_buck_uA[i] <= max_uA)
- return lp872x_update_bits(lp, addr,
- LP8725_BUCK_CL_M,
- i << LP8725_BUCK_CL_S);
- }
-
- return -EINVAL;
-}
-
-static int lp8725_buck_get_current_limit(struct regulator_dev *rdev)
-{
- struct lp872x *lp = rdev_get_drvdata(rdev);
- enum lp872x_regulator_id buck = rdev_get_id(rdev);
- u8 addr, val;
- int ret;
-
- switch (buck) {
- case LP8725_ID_BUCK1:
- addr = LP8725_BUCK1_VOUT2;
- break;
- case LP8725_ID_BUCK2:
- addr = LP8725_BUCK2_VOUT2;
- break;
- default:
- return -EINVAL;
- }
-
- ret = lp872x_read_byte(lp, addr, &val);
- if (ret)
- return ret;
-
- val = (val & LP8725_BUCK_CL_M) >> LP8725_BUCK_CL_S;
-
- return (val < ARRAY_SIZE(lp8725_buck_uA)) ?
- lp8725_buck_uA[val] : -EINVAL;
-}
-
static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
@@ -478,7 +420,7 @@ static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev)
return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
-static struct regulator_ops lp872x_ldo_ops = {
+static const struct regulator_ops lp872x_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -489,7 +431,7 @@ static struct regulator_ops lp872x_ldo_ops = {
.enable_time = lp872x_regulator_enable_time,
};
-static struct regulator_ops lp8720_buck_ops = {
+static const struct regulator_ops lp8720_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
@@ -502,7 +444,7 @@ static struct regulator_ops lp8720_buck_ops = {
.get_mode = lp872x_buck_get_mode,
};
-static struct regulator_ops lp8725_buck_ops = {
+static const struct regulator_ops lp8725_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
@@ -513,11 +455,11 @@ static struct regulator_ops lp8725_buck_ops = {
.enable_time = lp872x_regulator_enable_time,
.set_mode = lp872x_buck_set_mode,
.get_mode = lp872x_buck_get_mode,
- .set_current_limit = lp8725_buck_set_current_limit,
- .get_current_limit = lp8725_buck_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
-static struct regulator_desc lp8720_regulator_desc[] = {
+static const struct regulator_desc lp8720_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
@@ -602,7 +544,7 @@ static struct regulator_desc lp8720_regulator_desc[] = {
},
};
-static struct regulator_desc lp8725_regulator_desc[] = {
+static const struct regulator_desc lp8725_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
@@ -712,6 +654,10 @@ static struct regulator_desc lp8725_regulator_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK1_EN_M,
+ .curr_table = lp8725_buck_uA,
+ .n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
+ .csel_reg = LP8725_BUCK1_VOUT2,
+ .csel_mask = LP8725_BUCK_CL_M,
},
{
.name = "buck2",
@@ -724,6 +670,10 @@ static struct regulator_desc lp8725_regulator_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK2_EN_M,
+ .curr_table = lp8725_buck_uA,
+ .n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
+ .csel_reg = LP8725_BUCK2_VOUT2,
+ .csel_mask = LP8725_BUCK_CL_M,
},
};
@@ -820,7 +770,7 @@ static struct regulator_init_data
static int lp872x_regulator_register(struct lp872x *lp)
{
- struct regulator_desc *desc;
+ const struct regulator_desc *desc;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
int i;
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index 70e3df653381..b55de293ca7a 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -39,6 +39,10 @@
.ramp_delay = _delay, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
+ .curr_table = lp873x_buck_uA, \
+ .n_current_limits = ARRAY_SIZE(lp873x_buck_uA), \
+ .csel_reg = (_cr), \
+ .csel_mask = LP873X_BUCK0_CTRL_2_BUCK0_ILIM,\
}, \
.ctrl2_reg = _cr, \
}
@@ -61,7 +65,7 @@ static const struct regulator_linear_range ldo0_ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000),
};
-static unsigned int lp873x_buck_ramp_delay[] = {
+static const unsigned int lp873x_buck_ramp_delay[] = {
30000, 15000, 10000, 7500, 3800, 1900, 940, 470
};
@@ -108,45 +112,8 @@ static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
return 0;
}
-static int lp873x_buck_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- int id = rdev_get_id(rdev);
- struct lp873x *lp873 = rdev_get_drvdata(rdev);
- int i;
-
- for (i = ARRAY_SIZE(lp873x_buck_uA) - 1; i >= 0; i--) {
- if (lp873x_buck_uA[i] >= min_uA &&
- lp873x_buck_uA[i] <= max_uA)
- return regmap_update_bits(lp873->regmap,
- regulators[id].ctrl2_reg,
- LP873X_BUCK0_CTRL_2_BUCK0_ILIM,
- i << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM));
- }
-
- return -EINVAL;
-}
-
-static int lp873x_buck_get_current_limit(struct regulator_dev *rdev)
-{
- int id = rdev_get_id(rdev);
- struct lp873x *lp873 = rdev_get_drvdata(rdev);
- int ret;
- unsigned int val;
-
- ret = regmap_read(lp873->regmap, regulators[id].ctrl2_reg, &val);
- if (ret)
- return ret;
-
- val = (val & LP873X_BUCK0_CTRL_2_BUCK0_ILIM) >>
- __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM);
-
- return (val < ARRAY_SIZE(lp873x_buck_uA)) ?
- lp873x_buck_uA[val] : -EINVAL;
-}
-
/* Operations permitted on BUCK0, BUCK1 */
-static struct regulator_ops lp873x_buck01_ops = {
+static const struct regulator_ops lp873x_buck01_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -156,12 +123,12 @@ static struct regulator_ops lp873x_buck01_ops = {
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = lp873x_buck_set_ramp_delay,
- .set_current_limit = lp873x_buck_set_current_limit,
- .get_current_limit = lp873x_buck_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
/* Operations permitted on LDO0 and LDO1 */
-static struct regulator_ops lp873x_ldo01_ops = {
+static const struct regulator_ops lp873x_ldo01_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 244822bb63cd..14fd38807134 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -315,7 +315,7 @@ out_i2c_error:
.vsel_mask = LP8755_BUCK_VOUT_M,\
}
-static struct regulator_desc lp8755_regulators[] = {
+static const struct regulator_desc lp8755_regulators[] = {
lp8755_buck_desc(0),
lp8755_buck_desc(1),
lp8755_buck_desc(2),
@@ -386,7 +386,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
if (ret < 0)
goto err_i2c;
- /* send OCP event to all regualtor devices */
+ /* send OCP event to all regulator devices */
if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL)
@@ -394,7 +394,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
LP8755_EVENT_OCP,
NULL);
- /* send OVP event to all regualtor devices */
+ /* send OVP event to all regulator devices */
if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL)
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index c192357d1dea..4ed41731a5b1 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -51,7 +51,7 @@ static const struct regulator_linear_range buck0_1_2_3_ranges[] = {
REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
};
-static unsigned int lp87565_buck_ramp_delay[] = {
+static const unsigned int lp87565_buck_ramp_delay[] = {
30000, 15000, 10000, 7500, 3800, 1900, 940, 470
};
@@ -140,7 +140,7 @@ static int lp87565_buck_get_current_limit(struct regulator_dev *rdev)
}
/* Operations permitted on BUCK0, BUCK1 */
-static struct regulator_ops lp87565_buck_ops = {
+static const struct regulator_ops lp87565_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index ec46290b647e..a7d30550bb5f 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -95,12 +95,10 @@ struct lp8788_buck {
void *dvs;
};
-/* BUCK 1 ~ 4 voltage table */
-static const int lp8788_buck_vtbl[] = {
- 500000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
- 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
- 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
- 1950000, 2000000,
+/* BUCK 1 ~ 4 voltage ranges */
+static const struct regulator_linear_range buck_volt_ranges[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0, 0),
+ REGULATOR_LINEAR_RANGE(800000, 1, 25, 50000),
};
static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
@@ -345,8 +343,8 @@ static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev)
}
static const struct regulator_ops lp8788_buck12_ops = {
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_ascend,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = lp8788_buck12_set_voltage_sel,
.get_voltage_sel = lp8788_buck12_get_voltage_sel,
.enable = regulator_enable_regmap,
@@ -358,8 +356,8 @@ static const struct regulator_ops lp8788_buck12_ops = {
};
static const struct regulator_ops lp8788_buck34_ops = {
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_ascend,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
@@ -370,13 +368,14 @@ static const struct regulator_ops lp8788_buck34_ops = {
.get_mode = lp8788_buck_get_mode,
};
-static struct regulator_desc lp8788_buck_desc[] = {
+static const struct regulator_desc lp8788_buck_desc[] = {
{
.name = "buck1",
.id = BUCK1,
.ops = &lp8788_buck12_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_BUCK,
@@ -386,8 +385,9 @@ static struct regulator_desc lp8788_buck_desc[] = {
.name = "buck2",
.id = BUCK2,
.ops = &lp8788_buck12_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_BUCK,
@@ -397,8 +397,9 @@ static struct regulator_desc lp8788_buck_desc[] = {
.name = "buck3",
.id = BUCK3,
.ops = &lp8788_buck34_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_BUCK3_VOUT,
@@ -410,8 +411,9 @@ static struct regulator_desc lp8788_buck_desc[] = {
.name = "buck4",
.id = BUCK4,
.ops = &lp8788_buck34_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_BUCK4_VOUT,
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 2ee22e7ea675..a2ef146e6b3a 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -186,7 +186,7 @@ static const struct regulator_ops lp8788_ldo_voltage_fixed_ops = {
.enable_time = lp8788_ldo_enable_time,
};
-static struct regulator_desc lp8788_dldo_desc[] = {
+static const struct regulator_desc lp8788_dldo_desc[] = {
{
.name = "dldo1",
.id = DLDO1,
@@ -343,7 +343,7 @@ static struct regulator_desc lp8788_dldo_desc[] = {
},
};
-static struct regulator_desc lp8788_aldo_desc[] = {
+static const struct regulator_desc lp8788_aldo_desc[] = {
{
.name = "aldo1",
.id = ALDO1,
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index 71fd0f2a4b76..e6d66e492b85 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -241,61 +241,10 @@ static struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = {
LTC3676_FIXED_REG(LDO4, ldo4, LDOB, 2),
};
-static bool ltc3676_writeable_reg(struct device *dev, unsigned int reg)
+static bool ltc3676_readable_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case LTC3676_IRQSTAT:
- case LTC3676_BUCK1:
- case LTC3676_BUCK2:
- case LTC3676_BUCK3:
- case LTC3676_BUCK4:
- case LTC3676_LDOA:
- case LTC3676_LDOB:
- case LTC3676_SQD1:
- case LTC3676_SQD2:
- case LTC3676_CNTRL:
- case LTC3676_DVB1A:
- case LTC3676_DVB1B:
- case LTC3676_DVB2A:
- case LTC3676_DVB2B:
- case LTC3676_DVB3A:
- case LTC3676_DVB3B:
- case LTC3676_DVB4A:
- case LTC3676_DVB4B:
- case LTC3676_MSKIRQ:
- case LTC3676_MSKPG:
- case LTC3676_USER:
- case LTC3676_HRST:
- case LTC3676_CLIRQ:
- return true;
- }
- return false;
-}
-
-static bool ltc3676_readable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case LTC3676_IRQSTAT:
- case LTC3676_BUCK1:
- case LTC3676_BUCK2:
- case LTC3676_BUCK3:
- case LTC3676_BUCK4:
- case LTC3676_LDOA:
- case LTC3676_LDOB:
- case LTC3676_SQD1:
- case LTC3676_SQD2:
- case LTC3676_CNTRL:
- case LTC3676_DVB1A:
- case LTC3676_DVB1B:
- case LTC3676_DVB2A:
- case LTC3676_DVB2B:
- case LTC3676_DVB3A:
- case LTC3676_DVB3B:
- case LTC3676_DVB4A:
- case LTC3676_DVB4B:
- case LTC3676_MSKIRQ:
- case LTC3676_MSKPG:
- case LTC3676_USER:
+ case LTC3676_BUCK1 ... LTC3676_IRQSTAT:
case LTC3676_HRST:
case LTC3676_CLIRQ:
return true;
@@ -306,9 +255,7 @@ static bool ltc3676_readable_reg(struct device *dev, unsigned int reg)
static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case LTC3676_IRQSTAT:
- case LTC3676_PGSTATL:
- case LTC3676_PGSTATRT:
+ case LTC3676_IRQSTAT ... LTC3676_PGSTATRT:
return true;
}
return false;
@@ -317,8 +264,8 @@ static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg)
static const struct regmap_config ltc3676_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .writeable_reg = ltc3676_writeable_reg,
- .readable_reg = ltc3676_readable_reg,
+ .writeable_reg = ltc3676_readable_writeable_reg,
+ .readable_reg = ltc3676_readable_writeable_reg,
.volatile_reg = ltc3676_volatile_reg,
.max_register = LTC3676_CLIRQ,
.use_single_read = true,
@@ -442,5 +389,5 @@ static struct i2c_driver ltc3676_driver = {
module_i2c_driver(ltc3676_driver);
MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>");
-MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC1376");
+MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3676");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index bc7f4751bf9c..85a88a9e4d42 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -324,4 +324,3 @@ module_exit(max14577_regulator_exit);
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index b94e3a721721..1607ac673e44 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -1,7 +1,7 @@
/*
* Maxim MAX77620 Regulator driver
*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Author: Mallikarjun Kasoju <mkasoju@nvidia.com>
* Laxman Dewangan <ldewangan@nvidia.com>
@@ -690,6 +690,7 @@ static const struct regulator_ops max77620_regulator_ops = {
.active_discharge_mask = MAX77620_SD_CFG1_ADE_MASK, \
.active_discharge_reg = MAX77620_REG_##_id##_CFG, \
.type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
}, \
}
@@ -721,6 +722,7 @@ static const struct regulator_ops max77620_regulator_ops = {
.active_discharge_mask = MAX77620_LDO_CFG2_ADE_MASK, \
.active_discharge_reg = MAX77620_REG_##_id##_CFG2, \
.type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
}, \
}
@@ -803,6 +805,14 @@ static int max77620_regulator_probe(struct platform_device *pdev)
rdesc = &rinfo[id].desc;
pmic->rinfo[id] = &max77620_regs_info[id];
pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
+ pmic->reg_pdata[id].active_fps_src = -1;
+ pmic->reg_pdata[id].active_fps_pd_slot = -1;
+ pmic->reg_pdata[id].active_fps_pu_slot = -1;
+ pmic->reg_pdata[id].suspend_fps_src = -1;
+ pmic->reg_pdata[id].suspend_fps_pd_slot = -1;
+ pmic->reg_pdata[id].suspend_fps_pu_slot = -1;
+ pmic->reg_pdata[id].power_ok = -1;
+ pmic->reg_pdata[id].ramp_rate_setting = -1;
ret = max77620_read_slew_rate(pmic, id);
if (ret < 0)
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
new file mode 100644
index 000000000000..31ebf34b01ec
--- /dev/null
+++ b/drivers/regulator/max77650-regulator.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// Regulator driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/of.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define MAX77650_REGULATOR_EN_CTRL_MASK GENMASK(3, 0)
+#define MAX77650_REGULATOR_EN_CTRL_BITS(_reg) \
+ ((_reg) & MAX77650_REGULATOR_EN_CTRL_MASK)
+#define MAX77650_REGULATOR_ENABLED GENMASK(2, 1)
+#define MAX77650_REGULATOR_DISABLED BIT(2)
+
+#define MAX77650_REGULATOR_V_LDO_MASK GENMASK(6, 0)
+#define MAX77650_REGULATOR_V_SBB_MASK GENMASK(5, 0)
+
+#define MAX77650_REGULATOR_AD_MASK BIT(3)
+#define MAX77650_REGULATOR_AD_DISABLED 0x00
+#define MAX77650_REGULATOR_AD_ENABLED BIT(3)
+
+#define MAX77650_REGULATOR_CURR_LIM_MASK GENMASK(7, 6)
+
+enum {
+ MAX77650_REGULATOR_ID_LDO = 0,
+ MAX77650_REGULATOR_ID_SBB0,
+ MAX77650_REGULATOR_ID_SBB1,
+ MAX77650_REGULATOR_ID_SBB2,
+ MAX77650_REGULATOR_NUM_REGULATORS,
+};
+
+struct max77650_regulator_desc {
+ struct regulator_desc desc;
+ unsigned int regA;
+ unsigned int regB;
+};
+
+static const u32 max77651_sbb1_regulator_volt_table[] = {
+ 2400000, 3200000, 4000000, 4800000,
+ 2450000, 3250000, 4050000, 4850000,
+ 2500000, 3300000, 4100000, 4900000,
+ 2550000, 3350000, 4150000, 4950000,
+ 2600000, 3400000, 4200000, 5000000,
+ 2650000, 3450000, 4250000, 5050000,
+ 2700000, 3500000, 4300000, 5100000,
+ 2750000, 3550000, 4350000, 5150000,
+ 2800000, 3600000, 4400000, 5200000,
+ 2850000, 3650000, 4450000, 5250000,
+ 2900000, 3700000, 4500000, 0,
+ 2950000, 3750000, 4550000, 0,
+ 3000000, 3800000, 4600000, 0,
+ 3050000, 3850000, 4650000, 0,
+ 3100000, 3900000, 4700000, 0,
+ 3150000, 3950000, 4750000, 0,
+};
+
+#define MAX77651_REGULATOR_SBB1_SEL_DEC(_val) \
+ (((_val & 0x3c) >> 2) | ((_val & 0x03) << 4))
+#define MAX77651_REGULATOR_SBB1_SEL_ENC(_val) \
+ (((_val & 0x30) >> 4) | ((_val & 0x0f) << 2))
+
+#define MAX77650_REGULATOR_SBB1_SEL_DECR(_val) \
+ do { \
+ _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \
+ _val--; \
+ _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \
+ } while (0)
+
+#define MAX77650_REGULATOR_SBB1_SEL_INCR(_val) \
+ do { \
+ _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \
+ _val++; \
+ _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \
+ } while (0)
+
+static const unsigned int max77650_current_limit_table[] = {
+ 1000000, 866000, 707000, 500000,
+};
+
+static int max77650_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct max77650_regulator_desc *rdesc;
+ struct regmap *map;
+ int val, rv, en;
+
+ rdesc = rdev_get_drvdata(rdev);
+ map = rdev_get_regmap(rdev);
+
+ rv = regmap_read(map, rdesc->regB, &val);
+ if (rv)
+ return rv;
+
+ en = MAX77650_REGULATOR_EN_CTRL_BITS(val);
+
+ return en != MAX77650_REGULATOR_DISABLED;
+}
+
+static int max77650_regulator_enable(struct regulator_dev *rdev)
+{
+ struct max77650_regulator_desc *rdesc;
+ struct regmap *map;
+
+ rdesc = rdev_get_drvdata(rdev);
+ map = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(map, rdesc->regB,
+ MAX77650_REGULATOR_EN_CTRL_MASK,
+ MAX77650_REGULATOR_ENABLED);
+}
+
+static int max77650_regulator_disable(struct regulator_dev *rdev)
+{
+ struct max77650_regulator_desc *rdesc;
+ struct regmap *map;
+
+ rdesc = rdev_get_drvdata(rdev);
+ map = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(map, rdesc->regB,
+ MAX77650_REGULATOR_EN_CTRL_MASK,
+ MAX77650_REGULATOR_DISABLED);
+}
+
+static int max77650_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int rv = 0, curr, diff;
+ bool ascending;
+
+ /*
+ * If the regulator is disabled, we can program the desired
+ * voltage right away.
+ */
+ if (!max77650_regulator_is_enabled(rdev))
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+
+ /*
+ * Otherwise we need to manually ramp the output voltage up/down
+ * one step at a time.
+ */
+
+ curr = regulator_get_voltage_sel_regmap(rdev);
+ if (curr < 0)
+ return curr;
+
+ diff = curr - sel;
+ if (diff == 0)
+ return 0; /* Already there. */
+ else if (diff > 0)
+ ascending = false;
+ else
+ ascending = true;
+
+ /*
+ * Make sure we'll get to the right voltage and break the loop even if
+ * the selector equals 0.
+ */
+ for (ascending ? curr++ : curr--;; ascending ? curr++ : curr--) {
+ rv = regulator_set_voltage_sel_regmap(rdev, curr);
+ if (rv)
+ return rv;
+
+ if (curr == sel)
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Special case: non-linear voltage table for max77651 SBB1 - software
+ * must ensure the voltage is ramped in 50mV increments.
+ */
+static int max77651_regulator_sbb1_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int rv = 0, curr, vcurr, vdest, vdiff;
+
+ /*
+ * If the regulator is disabled, we can program the desired
+ * voltage right away.
+ */
+ if (!max77650_regulator_is_enabled(rdev))
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+
+ curr = regulator_get_voltage_sel_regmap(rdev);
+ if (curr < 0)
+ return curr;
+
+ if (curr == sel)
+ return 0; /* Already there. */
+
+ vcurr = max77651_sbb1_regulator_volt_table[curr];
+ vdest = max77651_sbb1_regulator_volt_table[sel];
+ vdiff = vcurr - vdest;
+
+ for (;;) {
+ if (vdiff > 0)
+ MAX77650_REGULATOR_SBB1_SEL_DECR(curr);
+ else
+ MAX77650_REGULATOR_SBB1_SEL_INCR(curr);
+
+ rv = regulator_set_voltage_sel_regmap(rdev, curr);
+ if (rv)
+ return rv;
+
+ if (curr == sel)
+ break;
+ };
+
+ return 0;
+}
+
+static const struct regulator_ops max77650_regulator_LDO_ops = {
+ .is_enabled = max77650_regulator_is_enabled,
+ .enable = max77650_regulator_enable,
+ .disable = max77650_regulator_disable,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = max77650_regulator_set_voltage_sel,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops max77650_regulator_SBB_ops = {
+ .is_enabled = max77650_regulator_is_enabled,
+ .enable = max77650_regulator_enable,
+ .disable = max77650_regulator_disable,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = max77650_regulator_set_voltage_sel,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+/* Special case for max77651 SBB1 - non-linear voltage mapping. */
+static const struct regulator_ops max77651_SBB1_regulator_ops = {
+ .is_enabled = max77650_regulator_is_enabled,
+ .enable = max77650_regulator_enable,
+ .disable = max77650_regulator_disable,
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = max77651_regulator_sbb1_set_voltage_sel,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static struct max77650_regulator_desc max77650_LDO_desc = {
+ .desc = {
+ .name = "ldo",
+ .of_match = of_match_ptr("ldo"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-ldo",
+ .id = MAX77650_REGULATOR_ID_LDO,
+ .ops = &max77650_regulator_LDO_ops,
+ .min_uV = 1350000,
+ .uV_step = 12500,
+ .n_voltages = 128,
+ .vsel_mask = MAX77650_REGULATOR_V_LDO_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_LDO_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_LDO_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ .regA = MAX77650_REG_CNFG_LDO_A,
+ .regB = MAX77650_REG_CNFG_LDO_B,
+};
+
+static struct max77650_regulator_desc max77650_SBB0_desc = {
+ .desc = {
+ .name = "sbb0",
+ .of_match = of_match_ptr("sbb0"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb0",
+ .id = MAX77650_REGULATOR_ID_SBB0,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 800000,
+ .uV_step = 25000,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB0_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB0_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB0_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB0_A,
+ .regB = MAX77650_REG_CNFG_SBB0_B,
+};
+
+static struct max77650_regulator_desc max77650_SBB1_desc = {
+ .desc = {
+ .name = "sbb1",
+ .of_match = of_match_ptr("sbb1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb1",
+ .id = MAX77650_REGULATOR_ID_SBB1,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 800000,
+ .uV_step = 12500,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB1_A,
+ .regB = MAX77650_REG_CNFG_SBB1_B,
+};
+
+static struct max77650_regulator_desc max77651_SBB1_desc = {
+ .desc = {
+ .name = "sbb1",
+ .of_match = of_match_ptr("sbb1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb1",
+ .id = MAX77650_REGULATOR_ID_SBB1,
+ .ops = &max77651_SBB1_regulator_ops,
+ .volt_table = max77651_sbb1_regulator_volt_table,
+ .n_voltages = ARRAY_SIZE(max77651_sbb1_regulator_volt_table),
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB1_A,
+ .regB = MAX77650_REG_CNFG_SBB1_B,
+};
+
+static struct max77650_regulator_desc max77650_SBB2_desc = {
+ .desc = {
+ .name = "sbb2",
+ .of_match = of_match_ptr("sbb2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb0",
+ .id = MAX77650_REGULATOR_ID_SBB2,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 800000,
+ .uV_step = 50000,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB2_A,
+ .regB = MAX77650_REG_CNFG_SBB2_B,
+};
+
+static struct max77650_regulator_desc max77651_SBB2_desc = {
+ .desc = {
+ .name = "sbb2",
+ .of_match = of_match_ptr("sbb2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb0",
+ .id = MAX77650_REGULATOR_ID_SBB2,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 2400000,
+ .uV_step = 50000,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB2_A,
+ .regB = MAX77650_REG_CNFG_SBB2_B,
+};
+
+static int max77650_regulator_probe(struct platform_device *pdev)
+{
+ struct max77650_regulator_desc **rdescs;
+ struct max77650_regulator_desc *rdesc;
+ struct regulator_config config = { };
+ struct device *dev, *parent;
+ struct regulator_dev *rdev;
+ struct regmap *map;
+ unsigned int val;
+ int i, rv;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+
+ if (!dev->of_node)
+ dev->of_node = parent->of_node;
+
+ rdescs = devm_kcalloc(dev, MAX77650_REGULATOR_NUM_REGULATORS,
+ sizeof(*rdescs), GFP_KERNEL);
+ if (!rdescs)
+ return -ENOMEM;
+
+ map = dev_get_regmap(parent, NULL);
+ if (!map)
+ return -ENODEV;
+
+ rv = regmap_read(map, MAX77650_REG_CID, &val);
+ if (rv)
+ return rv;
+
+ rdescs[MAX77650_REGULATOR_ID_LDO] = &max77650_LDO_desc;
+ rdescs[MAX77650_REGULATOR_ID_SBB0] = &max77650_SBB0_desc;
+
+ switch (MAX77650_CID_BITS(val)) {
+ case MAX77650_CID_77650A:
+ case MAX77650_CID_77650C:
+ rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77650_SBB1_desc;
+ rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77650_SBB2_desc;
+ break;
+ case MAX77650_CID_77651A:
+ case MAX77650_CID_77651B:
+ rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77651_SBB1_desc;
+ rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77651_SBB2_desc;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ config.dev = parent;
+
+ for (i = 0; i < MAX77650_REGULATOR_NUM_REGULATORS; i++) {
+ rdesc = rdescs[i];
+ config.driver_data = rdesc;
+
+ rdev = devm_regulator_register(dev, &rdesc->desc, &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver max77650_regulator_driver = {
+ .driver = {
+ .name = "max77650-regulator",
+ },
+ .probe = max77650_regulator_probe,
+};
+module_platform_driver(max77650_regulator_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 regulator driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index c30cf5c9f2de..ea7b50397300 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -248,9 +248,9 @@ static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev,
unsigned int ramp_value;
if (id > MAX77802_BUCK4) {
- dev_warn(&rdev->dev,
- "%s: regulator: ramp delay not supported\n",
- rdev->desc->name);
+ dev_warn(&rdev->dev,
+ "%s: regulator: ramp delay not supported\n",
+ rdev->desc->name);
return -EINVAL;
}
ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit,
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 8fd1adc9c9a9..ab558b26cd7c 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -226,69 +226,69 @@ static const unsigned int mc13783_pwgtdrv_val[] = {
5500000,
};
-static struct regulator_ops mc13783_gpo_regulator_ops;
+static const struct regulator_ops mc13783_gpo_regulator_ops;
-#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages) \
- MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \
+#define MC13783_DEFINE(prefix, name, node, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13783_REG_, name, node, reg, vsel_reg, voltages, \
mc13xxx_regulator_ops)
-#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages) \
- MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \
+#define MC13783_FIXED_DEFINE(prefix, name, node, reg, voltages) \
+ MC13xxx_FIXED_DEFINE(MC13783_REG_, name, node, reg, voltages, \
mc13xxx_fixed_regulator_ops)
-#define MC13783_GPO_DEFINE(prefix, name, reg, voltages) \
- MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \
+#define MC13783_GPO_DEFINE(prefix, name, node, reg, voltages) \
+ MC13xxx_GPO_DEFINE(MC13783_REG_, name, node, reg, voltages, \
mc13783_gpo_regulator_ops)
-#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \
- MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
-#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \
- MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
+#define MC13783_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages)
+#define MC13783_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages)
static struct mc13xxx_regulator mc13783_regulators[] = {
- MC13783_DEFINE_SW(SW1A, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val),
- MC13783_DEFINE_SW(SW1B, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val),
- MC13783_DEFINE_SW(SW2A, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val),
- MC13783_DEFINE_SW(SW2B, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val),
- MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
-
- MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
- MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
- MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_SW(SW1A, sw1a, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val),
+ MC13783_DEFINE_SW(SW1B, sw1b, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val),
+ MC13783_DEFINE_SW(SW2A, sw2a, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val),
+ MC13783_DEFINE_SW(SW2B, sw2b, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val),
+ MC13783_DEFINE_SW(SW3, sw3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
+
+ MC13783_FIXED_DEFINE(REG, VAUDIO, vaudio, REGULATORMODE0, mc13783_vaudio_val),
+ MC13783_FIXED_DEFINE(REG, VIOHI, viohi, REGULATORMODE0, mc13783_viohi_val),
+ MC13783_DEFINE_REGU(VIOLO, violo, REGULATORMODE0, REGULATORSETTING0,
mc13783_violo_val),
- MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0,
mc13783_vdig_val),
- MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VGEN, vgen, REGULATORMODE0, REGULATORSETTING0,
mc13783_vgen_val),
- MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VRFDIG, vrfdig, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfdig_val),
- MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VRFREF, vrfref, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfref_val),
- MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VRFCP, vrfcp, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfcp_val),
- MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VSIM, vsim, REGULATORMODE1, REGULATORSETTING0,
mc13783_vsim_val),
- MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VESIM, vesim, REGULATORMODE1, REGULATORSETTING0,
mc13783_vesim_val),
- MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0,
mc13783_vcam_val),
- MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
- MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_FIXED_DEFINE(REG, VRFBG, vrfbg, REGULATORMODE1, mc13783_vrfbg_val),
+ MC13783_DEFINE_REGU(VVIB, vvib, REGULATORMODE1, REGULATORSETTING1,
mc13783_vvib_val),
- MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VRF1, vrf1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VRF2, vrf2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VMMC1, vmmc1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
- MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VMMC2, vmmc2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
- MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
- MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
+ MC13783_GPO_DEFINE(REG, GPO1, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO2, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO3, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO4, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, PWGT1SPI, pwgt1spi, POWERMISC, mc13783_pwgtdrv_val),
+ MC13783_GPO_DEFINE(REG, PWGT2SPI, pwgt2spi, POWERMISC, mc13783_pwgtdrv_val),
};
static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
@@ -380,7 +380,7 @@ static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
-static struct regulator_ops mc13783_gpo_regulator_ops = {
+static const struct regulator_ops mc13783_gpo_regulator_ops = {
.enable = mc13783_gpo_regulator_enable,
.disable = mc13783_gpo_regulator_disable,
.is_enabled = mc13783_gpo_regulator_is_enabled,
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index f3fba1cc1379..a731e826a037 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -242,61 +242,61 @@ static const unsigned int mc13892_pwgtdrv[] = {
5000000,
};
-static struct regulator_ops mc13892_gpo_regulator_ops;
-static struct regulator_ops mc13892_sw_regulator_ops;
+static const struct regulator_ops mc13892_gpo_regulator_ops;
+static const struct regulator_ops mc13892_sw_regulator_ops;
-#define MC13892_FIXED_DEFINE(name, reg, voltages) \
- MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \
+#define MC13892_FIXED_DEFINE(name, node, reg, voltages) \
+ MC13xxx_FIXED_DEFINE(MC13892_, name, node, reg, voltages, \
mc13xxx_fixed_regulator_ops)
-#define MC13892_GPO_DEFINE(name, reg, voltages) \
- MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \
+#define MC13892_GPO_DEFINE(name, node, reg, voltages) \
+ MC13xxx_GPO_DEFINE(MC13892_, name, node, reg, voltages, \
mc13892_gpo_regulator_ops)
-#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \
- MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+#define MC13892_SW_DEFINE(name, node, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
mc13892_sw_regulator_ops)
-#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \
- MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+#define MC13892_DEFINE_REGU(name, node, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
mc13xxx_regulator_ops)
static struct mc13xxx_regulator mc13892_regulators[] = {
- MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell),
- MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
- MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw),
- MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw),
- MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
- MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
- MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
- MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VCOINCELL, vcoincell, POWERCTL0, POWERCTL0, mc13892_vcoincell),
+ MC13892_SW_DEFINE(SW1, sw1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
+ MC13892_SW_DEFINE(SW2, sw2, SWITCHERS1, SWITCHERS1, mc13892_sw),
+ MC13892_SW_DEFINE(SW3, sw3, SWITCHERS2, SWITCHERS2, mc13892_sw),
+ MC13892_SW_DEFINE(SW4, sw4, SWITCHERS3, SWITCHERS3, mc13892_sw),
+ MC13892_FIXED_DEFINE(SWBST, swbst, SWITCHERS5, mc13892_swbst),
+ MC13892_FIXED_DEFINE(VIOHI, viohi, REGULATORMODE0, mc13892_viohi),
+ MC13892_DEFINE_REGU(VPLL, vpll, REGULATORMODE0, REGULATORSETTING0,
mc13892_vpll),
- MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0,
mc13892_vdig),
- MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,
+ MC13892_DEFINE_REGU(VSD, vsd, REGULATORMODE1, REGULATORSETTING1,
mc13892_vsd),
- MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VUSB2, vusb2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vusb2),
- MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,
+ MC13892_DEFINE_REGU(VVIDEO, vvideo, REGULATORMODE1, REGULATORSETTING1,
mc13892_vvideo),
- MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,
+ MC13892_DEFINE_REGU(VAUDIO, vaudio, REGULATORMODE1, REGULATORSETTING1,
mc13892_vaudio),
- MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0,
mc13892_vcam),
- MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VGEN1, vgen1, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen1),
- MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VGEN2, vgen2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen2),
- MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VGEN3, vgen3, REGULATORMODE1, REGULATORSETTING0,
mc13892_vgen3),
- MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
- MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv),
- MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv),
+ MC13892_FIXED_DEFINE(VUSB, vusb, USB1, mc13892_vusb),
+ MC13892_GPO_DEFINE(GPO1, gpo1, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO2, gpo2, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO3, gpo3, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO4, gpo4, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(PWGT1SPI, pwgt1spi, POWERMISC, mc13892_pwgtdrv),
+ MC13892_GPO_DEFINE(PWGT2SPI, pwgt2spi, POWERMISC, mc13892_pwgtdrv),
};
static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
@@ -387,7 +387,7 @@ static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev)
}
-static struct regulator_ops mc13892_gpo_regulator_ops = {
+static const struct regulator_ops mc13892_gpo_regulator_ops = {
.enable = mc13892_gpo_regulator_enable,
.disable = mc13892_gpo_regulator_disable,
.is_enabled = mc13892_gpo_regulator_is_enabled,
@@ -479,7 +479,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static struct regulator_ops mc13892_sw_regulator_ops = {
+static const struct regulator_ops mc13892_sw_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = mc13892_sw_regulator_set_voltage_sel,
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 2243138d8a58..8ff19150ca92 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -99,7 +99,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
return rdev->desc->volt_table[val];
}
-struct regulator_ops mc13xxx_regulator_ops = {
+const struct regulator_ops mc13xxx_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
@@ -127,7 +127,7 @@ int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
}
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
-struct regulator_ops mc13xxx_fixed_regulator_ops = {
+const struct regulator_ops mc13xxx_fixed_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 2ab9bfd93b4e..ba7eff1070bd 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -53,13 +53,13 @@ static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
}
#endif
-extern struct regulator_ops mc13xxx_regulator_ops;
-extern struct regulator_ops mc13xxx_fixed_regulator_ops;
+extern const struct regulator_ops mc13xxx_regulator_ops;
+extern const struct regulator_ops mc13xxx_fixed_regulator_ops;
-#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \
+#define MC13xxx_DEFINE(prefix, _name, _node, _reg, _vsel_reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #_name, \
+ .name = #_node, \
.n_voltages = ARRAY_SIZE(_voltages), \
.volt_table = _voltages, \
.ops = &_ops, \
@@ -74,10 +74,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
}
-#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
+#define MC13xxx_FIXED_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #_name, \
+ .name = #_node, \
.n_voltages = ARRAY_SIZE(_voltages), \
.volt_table = _voltages, \
.ops = &_ops, \
@@ -89,10 +89,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
}
-#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
+#define MC13xxx_GPO_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #_name, \
+ .name = #_node, \
.n_voltages = ARRAY_SIZE(_voltages), \
.volt_table = _voltages, \
.ops = &_ops, \
@@ -104,9 +104,9 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
}
-#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \
- MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops)
-#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops) \
- MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages, ops) \
+ MC13xxx_DEFINE(SW, _name, _node, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages, ops) \
+ MC13xxx_DEFINE(REGU, _name, _node, _reg, _vsel_reg, _voltages, ops)
#endif
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 3479ae009b0b..3a8004abe044 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -17,6 +17,7 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/suspend.h>
+#include <linux/gpio/consumer.h>
#define VDD_LOW_SEL 0x0D
#define VDD_HIGH_SEL 0x3F
@@ -546,7 +547,6 @@ static struct i2c_driver mcp16502_drv = {
module_i2c_driver(mcp16502_drv);
-MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MCP16502 PMIC driver");
MODULE_AUTHOR("Andrei Stefanescu andrei.stefanescu@microchip.com");
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index 0495716fd35f..01d69f43d2b0 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -38,13 +38,9 @@ static const struct regmap_config mt6311_regmap_config = {
#define MT6311_MAX_UV 1393750
#define MT6311_STEP_UV 6250
-static const struct regulator_linear_range buck_volt_range[] = {
- REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV),
-};
-
static const struct regulator_ops mt6311_buck_ops = {
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
@@ -71,8 +67,6 @@ static const struct regulator_ops mt6311_ldo_ops = {
.min_uV = MT6311_MIN_UV,\
.uV_step = MT6311_STEP_UV,\
.owner = THIS_MODULE,\
- .linear_ranges = buck_volt_range, \
- .n_linear_ranges = ARRAY_SIZE(buck_volt_range), \
.enable_reg = MT6311_VDVFS11_CON9,\
.enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\
.vsel_reg = MT6311_VDVFS11_CON12,\
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index ffa5fc3724e4..7b6bf3536271 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -255,7 +255,7 @@ static void of_get_regulation_constraints(struct device_node *np,
* @desc: regulator description
*
* Populates regulator_init_data structure by extracting data from device
- * tree node, returns a pointer to the populated struture or NULL if memory
+ * tree node, returns a pointer to the populated structure or NULL if memory
* alloc fails.
*/
struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
@@ -547,7 +547,7 @@ bool of_check_coupling_data(struct regulator_dev *rdev)
NULL);
if (c_n_phandles != n_phandles) {
- dev_err(&rdev->dev, "number of couped reg phandles mismatch\n");
+ dev_err(&rdev->dev, "number of coupled reg phandles mismatch\n");
ret = false;
goto clean;
}
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c2cc392a27d4..7fb9e8dd834e 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -382,7 +382,7 @@ static struct palmas_sleep_requestor_info tps65917_sleep_req_info[] = {
EXTERNAL_REQUESTOR_TPS65917(LDO5, 2, 4),
};
-static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
+static const unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
#define SMPS_CTRL_MODE_OFF 0x00
#define SMPS_CTRL_MODE_ON 0x01
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index a9446056435f..1600f9821891 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -53,10 +53,6 @@ enum {
struct pv88060_regulator {
struct regulator_desc desc;
- /* Current limiting */
- unsigned n_current_limits;
- const int *current_limits;
- unsigned int limit_mask;
unsigned int conf; /* buck configuration register */
};
@@ -75,7 +71,7 @@ static const struct regmap_config pv88060_regmap_config = {
* Entry indexes corresponds to register values.
*/
-static const int pv88060_buck1_limits[] = {
+static const unsigned int pv88060_buck1_limits[] = {
1496000, 2393000, 3291000, 4189000
};
@@ -128,40 +124,6 @@ static int pv88060_buck_set_mode(struct regulator_dev *rdev,
PV88060_BUCK_MODE_MASK, val);
}
-static int pv88060_set_current_limit(struct regulator_dev *rdev, int min,
- int max)
-{
- struct pv88060_regulator *info = rdev_get_drvdata(rdev);
- int i;
-
- /* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
- if (min <= info->current_limits[i]
- && max >= info->current_limits[i]) {
- return regmap_update_bits(rdev->regmap,
- info->conf,
- info->limit_mask,
- i << PV88060_BUCK_ILIM_SHIFT);
- }
- }
-
- return -EINVAL;
-}
-
-static int pv88060_get_current_limit(struct regulator_dev *rdev)
-{
- struct pv88060_regulator *info = rdev_get_drvdata(rdev);
- unsigned int data;
- int ret;
-
- ret = regmap_read(rdev->regmap, info->conf, &data);
- if (ret < 0)
- return ret;
-
- data = (data & info->limit_mask) >> PV88060_BUCK_ILIM_SHIFT;
- return info->current_limits[data];
-}
-
static const struct regulator_ops pv88060_buck_ops = {
.get_mode = pv88060_buck_get_mode,
.set_mode = pv88060_buck_set_mode,
@@ -171,8 +133,8 @@ static const struct regulator_ops pv88060_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = pv88060_set_current_limit,
- .get_current_limit = pv88060_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_ops pv88060_ldo_ops = {
@@ -184,6 +146,12 @@ static const struct regulator_ops pv88060_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
};
+static const struct regulator_ops pv88060_sw_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
#define PV88060_BUCK(chip, regl_name, min, step, max, limits_array) \
{\
.desc = {\
@@ -201,10 +169,11 @@ static const struct regulator_ops pv88060_ldo_ops = {
.enable_mask = PV88060_BUCK_EN, \
.vsel_reg = PV88060_REG_##regl_name##_CONF0,\
.vsel_mask = PV88060_VBUCK_MASK,\
+ .curr_table = limits_array,\
+ .n_current_limits = ARRAY_SIZE(limits_array),\
+ .csel_reg = PV88060_REG_##regl_name##_CONF1,\
+ .csel_mask = PV88060_BUCK_ILIM_MASK,\
},\
- .current_limits = limits_array,\
- .n_current_limits = ARRAY_SIZE(limits_array),\
- .limit_mask = PV88060_BUCK_ILIM_MASK, \
.conf = PV88060_REG_##regl_name##_CONF1,\
}
@@ -237,9 +206,8 @@ static const struct regulator_ops pv88060_ldo_ops = {
.regulators_node = of_match_ptr("regulators"),\
.type = REGULATOR_VOLTAGE,\
.owner = THIS_MODULE,\
- .ops = &pv88060_ldo_ops,\
- .min_uV = max,\
- .uV_step = 0,\
+ .ops = &pv88060_sw_ops,\
+ .fixed_uV = max,\
.n_voltages = 1,\
.enable_reg = PV88060_REG_##regl_name##_CONF,\
.enable_mask = PV88060_SW_EN,\
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index 9a08cb2de501..bdddacdbeb99 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -45,12 +45,7 @@ enum pv88080_types {
struct pv88080_regulator {
struct regulator_desc desc;
- /* Current limiting */
- unsigned int n_current_limits;
- const int *current_limits;
- unsigned int limit_mask;
unsigned int mode_reg;
- unsigned int limit_reg;
unsigned int conf2;
unsigned int conf5;
};
@@ -102,11 +97,11 @@ static const struct regmap_config pv88080_regmap_config = {
* Entry indexes corresponds to register values.
*/
-static const int pv88080_buck1_limits[] = {
+static const unsigned int pv88080_buck1_limits[] = {
3230000, 5130000, 6960000, 8790000
};
-static const int pv88080_buck23_limits[] = {
+static const unsigned int pv88080_buck23_limits[] = {
1496000, 2393000, 3291000, 4189000
};
@@ -272,40 +267,6 @@ static int pv88080_buck_set_mode(struct regulator_dev *rdev,
PV88080_BUCK1_MODE_MASK, val);
}
-static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
- int max)
-{
- struct pv88080_regulator *info = rdev_get_drvdata(rdev);
- int i;
-
- /* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
- if (min <= info->current_limits[i]
- && max >= info->current_limits[i]) {
- return regmap_update_bits(rdev->regmap,
- info->limit_reg,
- info->limit_mask,
- i << PV88080_BUCK1_ILIM_SHIFT);
- }
- }
-
- return -EINVAL;
-}
-
-static int pv88080_get_current_limit(struct regulator_dev *rdev)
-{
- struct pv88080_regulator *info = rdev_get_drvdata(rdev);
- unsigned int data;
- int ret;
-
- ret = regmap_read(rdev->regmap, info->limit_reg, &data);
- if (ret < 0)
- return ret;
-
- data = (data & info->limit_mask) >> PV88080_BUCK1_ILIM_SHIFT;
- return info->current_limits[data];
-}
-
static const struct regulator_ops pv88080_buck_ops = {
.get_mode = pv88080_buck_get_mode,
.set_mode = pv88080_buck_set_mode,
@@ -315,8 +276,8 @@ static const struct regulator_ops pv88080_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = pv88080_set_current_limit,
- .get_current_limit = pv88080_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_ops pv88080_hvbuck_ops = {
@@ -341,9 +302,9 @@ static const struct regulator_ops pv88080_hvbuck_ops = {
.min_uV = min, \
.uV_step = step, \
.n_voltages = ((max) - (min))/(step) + 1, \
+ .curr_table = limits_array, \
+ .n_current_limits = ARRAY_SIZE(limits_array), \
},\
- .current_limits = limits_array, \
- .n_current_limits = ARRAY_SIZE(limits_array), \
}
#define PV88080_HVBUCK(chip, regl_name, min, step, max) \
@@ -521,9 +482,9 @@ static int pv88080_i2c_probe(struct i2c_client *i2c,
if (init_data)
config.init_data = &init_data[i];
- pv88080_regulator_info[i].limit_reg
+ pv88080_regulator_info[i].desc.csel_reg
= regmap_config->buck_regmap[i].buck_limit_reg;
- pv88080_regulator_info[i].limit_mask
+ pv88080_regulator_info[i].desc.csel_mask
= regmap_config->buck_regmap[i].buck_limit_mask;
pv88080_regulator_info[i].mode_reg
= regmap_config->buck_regmap[i].buck_mode_reg;
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 7a0c15957bd0..6e97cc6df2ee 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -42,10 +42,6 @@ enum {
struct pv88090_regulator {
struct regulator_desc desc;
- /* Current limiting */
- unsigned int n_current_limits;
- const int *current_limits;
- unsigned int limit_mask;
unsigned int conf;
unsigned int conf2;
};
@@ -71,14 +67,14 @@ static const struct regmap_config pv88090_regmap_config = {
* Entry indexes corresponds to register values.
*/
-static const int pv88090_buck1_limits[] = {
+static const unsigned int pv88090_buck1_limits[] = {
220000, 440000, 660000, 880000, 1100000, 1320000, 1540000, 1760000,
1980000, 2200000, 2420000, 2640000, 2860000, 3080000, 3300000, 3520000,
3740000, 3960000, 4180000, 4400000, 4620000, 4840000, 5060000, 5280000,
5500000, 5720000, 5940000, 6160000, 6380000, 6600000, 6820000, 7040000
};
-static const int pv88090_buck23_limits[] = {
+static const unsigned int pv88090_buck23_limits[] = {
1496000, 2393000, 3291000, 4189000
};
@@ -150,40 +146,6 @@ static int pv88090_buck_set_mode(struct regulator_dev *rdev,
PV88090_BUCK1_MODE_MASK, val);
}
-static int pv88090_set_current_limit(struct regulator_dev *rdev, int min,
- int max)
-{
- struct pv88090_regulator *info = rdev_get_drvdata(rdev);
- int i;
-
- /* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
- if (min <= info->current_limits[i]
- && max >= info->current_limits[i]) {
- return regmap_update_bits(rdev->regmap,
- info->conf,
- info->limit_mask,
- i << PV88090_BUCK1_ILIM_SHIFT);
- }
- }
-
- return -EINVAL;
-}
-
-static int pv88090_get_current_limit(struct regulator_dev *rdev)
-{
- struct pv88090_regulator *info = rdev_get_drvdata(rdev);
- unsigned int data;
- int ret;
-
- ret = regmap_read(rdev->regmap, info->conf, &data);
- if (ret < 0)
- return ret;
-
- data = (data & info->limit_mask) >> PV88090_BUCK1_ILIM_SHIFT;
- return info->current_limits[data];
-}
-
static const struct regulator_ops pv88090_buck_ops = {
.get_mode = pv88090_buck_get_mode,
.set_mode = pv88090_buck_set_mode,
@@ -193,8 +155,8 @@ static const struct regulator_ops pv88090_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = pv88090_set_current_limit,
- .get_current_limit = pv88090_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_ops pv88090_ldo_ops = {
@@ -223,10 +185,11 @@ static const struct regulator_ops pv88090_ldo_ops = {
.enable_mask = PV88090_##regl_name##_EN, \
.vsel_reg = PV88090_REG_##regl_name##_CONF0, \
.vsel_mask = PV88090_V##regl_name##_MASK, \
+ .curr_table = limits_array, \
+ .n_current_limits = ARRAY_SIZE(limits_array), \
+ .csel_reg = PV88090_REG_##regl_name##_CONF1, \
+ .csel_mask = PV88090_##regl_name##_ILIM_MASK, \
},\
- .current_limits = limits_array, \
- .n_current_limits = ARRAY_SIZE(limits_array), \
- .limit_mask = PV88090_##regl_name##_ILIM_MASK, \
.conf = PV88090_REG_##regl_name##_CONF1, \
.conf2 = PV88090_REG_##regl_name##_CONF2, \
}
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index a2fd140eff81..3f53f9134b32 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -40,9 +40,6 @@ struct pwm_regulator_data {
/* regulator descriptor */
struct regulator_desc desc;
- /* Regulator ops */
- struct regulator_ops ops;
-
int state;
/* Enable GPIO */
@@ -231,7 +228,7 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
return 0;
}
-static struct regulator_ops pwm_regulator_voltage_table_ops = {
+static const struct regulator_ops pwm_regulator_voltage_table_ops = {
.set_voltage_sel = pwm_regulator_set_voltage_sel,
.get_voltage_sel = pwm_regulator_get_voltage_sel,
.list_voltage = pwm_regulator_list_voltage,
@@ -241,7 +238,7 @@ static struct regulator_ops pwm_regulator_voltage_table_ops = {
.is_enabled = pwm_regulator_is_enabled,
};
-static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
+static const struct regulator_ops pwm_regulator_voltage_continuous_ops = {
.get_voltage = pwm_regulator_get_voltage,
.set_voltage = pwm_regulator_set_voltage,
.enable = pwm_regulator_enable,
@@ -249,7 +246,7 @@ static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
.is_enabled = pwm_regulator_is_enabled,
};
-static struct regulator_desc pwm_regulator_desc = {
+static const struct regulator_desc pwm_regulator_desc = {
.name = "pwm-regulator",
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
@@ -287,9 +284,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
drvdata->state = -EINVAL;
drvdata->duty_cycle_table = duty_cycle_table;
- memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops,
- sizeof(drvdata->ops));
- drvdata->desc.ops = &drvdata->ops;
+ drvdata->desc.ops = &pwm_regulator_voltage_table_ops;
drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table);
return 0;
@@ -301,9 +296,7 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev,
u32 dutycycle_range[2] = { 0, 100 };
u32 dutycycle_unit = 100;
- memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops,
- sizeof(drvdata->ops));
- drvdata->desc.ops = &drvdata->ops;
+ drvdata->desc.ops = &pwm_regulator_voltage_continuous_ops;
drvdata->desc.continuous_voltage_range = true;
of_property_read_u32_array(pdev->dev.of_node,
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index f5bca77d67c1..68bc23df4213 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -31,6 +31,11 @@ struct qcom_rpm_reg {
int is_enabled;
int uV;
+ u32 load;
+
+ unsigned int enabled_updated:1;
+ unsigned int uv_updated:1;
+ unsigned int load_updated:1;
};
struct rpm_regulator_req {
@@ -43,30 +48,59 @@ struct rpm_regulator_req {
#define RPM_KEY_UV 0x00007675 /* "uv" */
#define RPM_KEY_MA 0x0000616d /* "ma" */
-static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
- struct rpm_regulator_req *req,
- size_t size)
+static int rpm_reg_write_active(struct qcom_rpm_reg *vreg)
{
- return qcom_rpm_smd_write(vreg->rpm,
- QCOM_SMD_RPM_ACTIVE_STATE,
- vreg->type,
- vreg->id,
- req, size);
+ struct rpm_regulator_req req[3];
+ int reqlen = 0;
+ int ret;
+
+ if (vreg->enabled_updated) {
+ req[reqlen].key = cpu_to_le32(RPM_KEY_SWEN);
+ req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
+ req[reqlen].value = cpu_to_le32(vreg->is_enabled);
+ reqlen++;
+ }
+
+ if (vreg->uv_updated && vreg->is_enabled) {
+ req[reqlen].key = cpu_to_le32(RPM_KEY_UV);
+ req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
+ req[reqlen].value = cpu_to_le32(vreg->uV);
+ reqlen++;
+ }
+
+ if (vreg->load_updated && vreg->is_enabled) {
+ req[reqlen].key = cpu_to_le32(RPM_KEY_MA);
+ req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
+ req[reqlen].value = cpu_to_le32(vreg->load / 1000);
+ reqlen++;
+ }
+
+ if (!reqlen)
+ return 0;
+
+ ret = qcom_rpm_smd_write(vreg->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ vreg->type, vreg->id,
+ req, sizeof(req[0]) * reqlen);
+ if (!ret) {
+ vreg->enabled_updated = 0;
+ vreg->uv_updated = 0;
+ vreg->load_updated = 0;
+ }
+
+ return ret;
}
static int rpm_reg_enable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
int ret;
- req.key = cpu_to_le32(RPM_KEY_SWEN);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = cpu_to_le32(1);
+ vreg->is_enabled = 1;
+ vreg->enabled_updated = 1;
- ret = rpm_reg_write_active(vreg, &req, sizeof(req));
- if (!ret)
- vreg->is_enabled = 1;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->is_enabled = 0;
return ret;
}
@@ -81,16 +115,14 @@ static int rpm_reg_is_enabled(struct regulator_dev *rdev)
static int rpm_reg_disable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
int ret;
- req.key = cpu_to_le32(RPM_KEY_SWEN);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = 0;
+ vreg->is_enabled = 0;
+ vreg->enabled_updated = 1;
- ret = rpm_reg_write_active(vreg, &req, sizeof(req));
- if (!ret)
- vreg->is_enabled = 0;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->is_enabled = 1;
return ret;
}
@@ -108,16 +140,15 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev,
unsigned *selector)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
- int ret = 0;
+ int ret;
+ int old_uV = vreg->uV;
- req.key = cpu_to_le32(RPM_KEY_UV);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = cpu_to_le32(min_uV);
+ vreg->uV = min_uV;
+ vreg->uv_updated = 1;
- ret = rpm_reg_write_active(vreg, &req, sizeof(req));
- if (!ret)
- vreg->uV = min_uV;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->uV = old_uV;
return ret;
}
@@ -125,13 +156,16 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev,
static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
+ u32 old_load = vreg->load;
+ int ret;
- req.key = cpu_to_le32(RPM_KEY_MA);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = cpu_to_le32(load_uA / 1000);
+ vreg->load = load_uA;
+ vreg->load_updated = 1;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->load = old_load;
- return rpm_reg_write_active(vreg, &req, sizeof(req));
+ return ret;
}
static const struct regulator_ops rpm_smps_ldo_ops = {
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 213b68743cc8..23713e16c286 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1,5 +1,5 @@
/*
- * Regulator driver for Rockchip RK808/RK818
+ * Regulator driver for Rockchip RK805/RK808/RK818
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
@@ -363,28 +363,28 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev)
rdev->desc->enable_mask);
}
-static struct regulator_ops rk805_reg_ops = {
- .list_voltage = regulator_list_voltage_linear,
- .map_voltage = regulator_map_voltage_linear,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .set_suspend_voltage = rk808_set_suspend_voltage,
- .set_suspend_enable = rk805_set_suspend_enable,
- .set_suspend_disable = rk805_set_suspend_disable,
+static const struct regulator_ops rk805_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_voltage = rk808_set_suspend_voltage,
+ .set_suspend_enable = rk805_set_suspend_enable,
+ .set_suspend_disable = rk805_set_suspend_disable,
};
-static struct regulator_ops rk805_switch_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .set_suspend_enable = rk805_set_suspend_enable,
- .set_suspend_disable = rk805_set_suspend_disable,
+static const struct regulator_ops rk805_switch_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_enable = rk805_set_suspend_enable,
+ .set_suspend_disable = rk805_set_suspend_disable,
};
-static struct regulator_ops rk808_buck1_2_ops = {
+static const struct regulator_ops rk808_buck1_2_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap,
@@ -399,7 +399,7 @@ static struct regulator_ops rk808_buck1_2_ops = {
.set_suspend_disable = rk808_set_suspend_disable,
};
-static struct regulator_ops rk808_reg_ops = {
+static const struct regulator_ops rk808_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -412,7 +412,7 @@ static struct regulator_ops rk808_reg_ops = {
.set_suspend_disable = rk808_set_suspend_disable,
};
-static struct regulator_ops rk808_reg_ops_ranges = {
+static const struct regulator_ops rk808_reg_ops_ranges = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -425,7 +425,7 @@ static struct regulator_ops rk808_reg_ops_ranges = {
.set_suspend_disable = rk808_set_suspend_disable,
};
-static struct regulator_ops rk808_switch_ops = {
+static const struct regulator_ops rk808_switch_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -433,6 +433,12 @@ static struct regulator_ops rk808_switch_ops = {
.set_suspend_disable = rk808_set_suspend_disable,
};
+static const struct regulator_linear_range rk805_buck_1_2_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(712500, 0, 59, 12500),
+ REGULATOR_LINEAR_RANGE(1800000, 60, 62, 200000),
+ REGULATOR_LINEAR_RANGE(2300000, 63, 63, 0),
+};
+
static const struct regulator_desc rk805_reg[] = {
{
.name = "DCDC_REG1",
@@ -440,11 +446,11 @@ static const struct regulator_desc rk805_reg[] = {
.of_match = of_match_ptr("DCDC_REG1"),
.regulators_node = of_match_ptr("regulators"),
.id = RK805_ID_DCDC1,
- .ops = &rk805_reg_ops,
+ .ops = &rk808_reg_ops_ranges,
.type = REGULATOR_VOLTAGE,
- .min_uV = 712500,
- .uV_step = 12500,
.n_voltages = 64,
+ .linear_ranges = rk805_buck_1_2_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges),
.vsel_reg = RK805_BUCK1_ON_VSEL_REG,
.vsel_mask = RK818_BUCK_VSEL_MASK,
.enable_reg = RK805_DCDC_EN_REG,
@@ -456,11 +462,11 @@ static const struct regulator_desc rk805_reg[] = {
.of_match = of_match_ptr("DCDC_REG2"),
.regulators_node = of_match_ptr("regulators"),
.id = RK805_ID_DCDC2,
- .ops = &rk805_reg_ops,
+ .ops = &rk808_reg_ops_ranges,
.type = REGULATOR_VOLTAGE,
- .min_uV = 712500,
- .uV_step = 12500,
.n_voltages = 64,
+ .linear_ranges = rk805_buck_1_2_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges),
.vsel_reg = RK805_BUCK2_ON_VSEL_REG,
.vsel_mask = RK818_BUCK_VSEL_MASK,
.enable_reg = RK805_DCDC_EN_REG,
@@ -796,7 +802,7 @@ static struct platform_driver rk808_regulator_driver = {
module_platform_driver(rk808_regulator_driver);
-MODULE_DESCRIPTION("regulator driver for the RK808/RK818 series PMICs");
+MODULE_DESCRIPTION("regulator driver for the RK805/RK808/RK818 series PMICs");
MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
MODULE_AUTHOR("Zhang Qing <zhangqing@rock-chips.com>");
MODULE_AUTHOR("Wadim Egorov <w.egorov@phytec.de>");
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c
index 96d2c18e051a..639cbadc044a 100644
--- a/drivers/regulator/rt5033-regulator.c
+++ b/drivers/regulator/rt5033-regulator.c
@@ -16,14 +16,14 @@
#include <linux/mfd/rt5033-private.h>
#include <linux/regulator/of_regulator.h>
-static struct regulator_ops rt5033_safe_ldo_ops = {
+static const struct regulator_ops rt5033_safe_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops rt5033_buck_ops = {
+static const struct regulator_ops rt5033_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 095d25f3d2ea..58a1fe583a6c 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(2, STEP_50_MV),
regulator_desc_ldo(3, STEP_50_MV),
regulator_desc_ldo(4, STEP_50_MV),
- regulator_desc_ldo(5, STEP_50_MV),
+ regulator_desc_ldo(5, STEP_25_MV),
regulator_desc_ldo(6, STEP_25_MV),
regulator_desc_ldo(7, STEP_50_MV),
regulator_desc_ldo(8, STEP_50_MV),
regulator_desc_ldo(9, STEP_50_MV),
regulator_desc_ldo(10, STEP_50_MV),
- regulator_desc_ldo(11, STEP_25_MV),
+ regulator_desc_ldo(11, STEP_50_MV),
regulator_desc_ldo(12, STEP_50_MV),
regulator_desc_ldo(13, STEP_50_MV),
regulator_desc_ldo(14, STEP_50_MV),
@@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(19, STEP_50_MV),
regulator_desc_ldo(20, STEP_50_MV),
regulator_desc_ldo(21, STEP_50_MV),
- regulator_desc_ldo(22, STEP_25_MV),
- regulator_desc_ldo(23, STEP_25_MV),
+ regulator_desc_ldo(22, STEP_50_MV),
+ regulator_desc_ldo(23, STEP_50_MV),
regulator_desc_ldo(24, STEP_50_MV),
regulator_desc_ldo(25, STEP_50_MV),
- regulator_desc_ldo(26, STEP_50_MV),
+ regulator_desc_ldo(26, STEP_25_MV),
regulator_desc_buck1_4(1),
regulator_desc_buck1_4(2),
regulator_desc_buck1_4(3),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index ee4a23ab0663..134c62db36c5 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -362,7 +362,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(32, STEP_50_MV),
regulator_desc_s2mps11_ldo(33, STEP_50_MV),
regulator_desc_s2mps11_ldo(34, STEP_50_MV),
- regulator_desc_s2mps11_ldo(35, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(35, STEP_25_MV),
regulator_desc_s2mps11_ldo(36, STEP_50_MV),
regulator_desc_s2mps11_ldo(37, STEP_50_MV),
regulator_desc_s2mps11_ldo(38, STEP_50_MV),
@@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
};
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index b581f01f3395..bb9d1a083299 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -115,7 +115,7 @@ static const struct sec_voltage_desc *reg_voltage_map[] = {
[S5M8767_BUCK9] = &buck_voltage_val3,
};
-static unsigned int s5m8767_opmode_reg[][4] = {
+static const unsigned int s5m8767_opmode_reg[][4] = {
/* {OFF, ON, LOWPOWER, SUSPEND} */
/* LDO1 ... LDO28 */
{0x0, 0x3, 0x2, 0x1}, /* LDO1 */
@@ -339,13 +339,9 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_sel)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- const struct sec_voltage_desc *desc;
- int reg_id = rdev_get_id(rdev);
-
- desc = reg_voltage_map[reg_id];
if ((old_sel < new_sel) && s5m8767->ramp_delay)
- return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
+ return DIV_ROUND_UP(rdev->desc->uV_step * (new_sel - old_sel),
s5m8767->ramp_delay * 1000);
return 0;
}
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index e0a9c445ed67..ba2f24949dc9 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/pm_runtime.h>
/* STM32 VREFBUF registers */
#define STM32_VREFBUF_CSR 0x00
@@ -25,9 +26,12 @@
#define STM32_HIZ BIT(1)
#define STM32_ENVR BIT(0)
+#define STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS 10
+
struct stm32_vrefbuf {
void __iomem *base;
struct clk *clk;
+ struct device *dev;
};
static const unsigned int stm32_vrefbuf_voltages[] = {
@@ -38,9 +42,16 @@ static const unsigned int stm32_vrefbuf_voltages[] = {
static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
int ret;
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_HIZ) | STM32_ENVR;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
@@ -59,45 +70,95 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
}
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
return ret;
}
static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
+ int ret;
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_ENVR) | STM32_HIZ;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
return 0;
}
static int stm32_vrefbuf_is_enabled(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR;
- return readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR;
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
+ return ret;
}
static int stm32_vrefbuf_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
+ int ret;
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_VRS) | FIELD_PREP(STM32_VRS, sel);
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
return 0;
}
static int stm32_vrefbuf_get_voltage_sel(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
+ int ret;
- return FIELD_GET(STM32_VRS, val);
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ ret = FIELD_GET(STM32_VRS, val);
+
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
+ return ret;
}
static const struct regulator_ops stm32_vrefbuf_volt_ops = {
@@ -130,6 +191,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
@@ -140,10 +202,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&pdev->dev, "clk prepare failed with error %d\n", ret);
- return ret;
+ goto err_pm_stop;
}
config.dev = &pdev->dev;
@@ -161,10 +230,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err_clk_dis:
clk_disable_unprepare(priv->clk);
+err_pm_stop:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
return ret;
}
@@ -174,12 +250,42 @@ static int stm32_vrefbuf_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+ pm_runtime_get_sync(&pdev->dev);
regulator_unregister(rdev);
clk_disable_unprepare(priv->clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
return 0;
};
+static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_vrefbuf_runtime_resume(struct device *dev)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+
+ return clk_prepare_enable(priv->clk);
+}
+
+static const struct dev_pm_ops stm32_vrefbuf_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_vrefbuf_runtime_suspend,
+ stm32_vrefbuf_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id stm32_vrefbuf_of_match[] = {
{ .compatible = "st,stm32-vrefbuf", },
{},
@@ -192,6 +298,7 @@ static struct platform_driver stm32_vrefbuf_driver = {
.driver = {
.name = "stm32-vrefbuf",
.of_match_table = of_match_ptr(stm32_vrefbuf_of_match),
+ .pm = &stm32_vrefbuf_pm_ops,
},
};
module_platform_driver(stm32_vrefbuf_driver);
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index 16ba0297f709..f09061473613 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -12,8 +12,10 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
/**
- * stpmic1 regulator description
+ * stpmic1 regulator description: this structure is used as driver data
* @desc: regulator framework description
* @mask_reset_reg: mask reset register address
* @mask_reset_mask: mask rank and mask reset register mask
@@ -28,28 +30,9 @@ struct stpmic1_regulator_cfg {
u8 icc_mask;
};
-/**
- * stpmic1 regulator data: this structure is used as driver data
- * @regul_id: regulator id
- * @reg_node: DT node of regulator (unused on non-DT platforms)
- * @cfg: stpmic specific regulator description
- * @mask_reset: mask_reset bit value
- * @irq_curlim: current limit interrupt number
- * @regmap: point to parent regmap structure
- */
-struct stpmic1_regulator {
- unsigned int regul_id;
- struct device_node *reg_node;
- struct stpmic1_regulator_cfg *cfg;
- u8 mask_reset;
- int irq_curlim;
- struct regmap *regmap;
-};
-
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode);
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev);
static int stpmic1_set_icc(struct regulator_dev *rdev);
-static int stpmic1_regulator_parse_dt(void *driver_data);
static unsigned int stpmic1_map_mode(unsigned int mode);
enum {
@@ -72,15 +55,13 @@ enum {
/* Enable time worst case is 5000mV/(2250uV/uS) */
#define PMIC_ENABLE_TIME_US 2200
-#define STPMIC1_BUCK_MODE_NORMAL 0
-#define STPMIC1_BUCK_MODE_LP BUCK_HPLP_ENABLE_MASK
-
-struct regulator_linear_range buck1_ranges[] = {
- REGULATOR_LINEAR_RANGE(600000, 0, 30, 25000),
- REGULATOR_LINEAR_RANGE(1350000, 31, 63, 0),
+static const struct regulator_linear_range buck1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(725000, 0, 4, 0),
+ REGULATOR_LINEAR_RANGE(725000, 5, 36, 25000),
+ REGULATOR_LINEAR_RANGE(1500000, 37, 63, 0),
};
-struct regulator_linear_range buck2_ranges[] = {
+static const struct regulator_linear_range buck2_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0),
REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0),
@@ -94,7 +75,7 @@ struct regulator_linear_range buck2_ranges[] = {
REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0),
};
-struct regulator_linear_range buck3_ranges[] = {
+static const struct regulator_linear_range buck3_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0),
REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0),
@@ -102,10 +83,9 @@ struct regulator_linear_range buck3_ranges[] = {
REGULATOR_LINEAR_RANGE(1400000, 32, 35, 0),
REGULATOR_LINEAR_RANGE(1500000, 36, 55, 100000),
REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0),
-
};
-struct regulator_linear_range buck4_ranges[] = {
+static const struct regulator_linear_range buck4_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000),
REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
@@ -113,24 +93,21 @@ struct regulator_linear_range buck4_ranges[] = {
REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
REGULATOR_LINEAR_RANGE(1500000, 36, 60, 100000),
REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0),
-
};
-struct regulator_linear_range ldo1_ranges[] = {
+static const struct regulator_linear_range ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
-
};
-struct regulator_linear_range ldo2_ranges[] = {
+static const struct regulator_linear_range ldo2_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
-
};
-struct regulator_linear_range ldo3_ranges[] = {
+static const struct regulator_linear_range ldo3_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
@@ -138,18 +115,18 @@ struct regulator_linear_range ldo3_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 31, 31, 0),
};
-struct regulator_linear_range ldo5_ranges[] = {
+static const struct regulator_linear_range ldo5_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000),
REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0),
};
-struct regulator_linear_range ldo6_ranges[] = {
+static const struct regulator_linear_range ldo6_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
};
-static struct regulator_ops stpmic1_ldo_ops = {
+static const struct regulator_ops stpmic1_ldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
@@ -157,11 +134,10 @@ static struct regulator_ops stpmic1_ldo_ops = {
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_ldo3_ops = {
+static const struct regulator_ops stpmic1_ldo3_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_iterate,
.is_enabled = regulator_is_enabled_regmap,
@@ -169,21 +145,19 @@ static struct regulator_ops stpmic1_ldo3_ops = {
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_ldo4_fixed_regul_ops = {
+static const struct regulator_ops stpmic1_ldo4_fixed_regul_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_buck_ops = {
+static const struct regulator_ops stpmic1_buck_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
@@ -197,20 +171,27 @@ static struct regulator_ops stpmic1_buck_ops = {
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_vref_ddr_ops = {
+static const struct regulator_ops stpmic1_vref_ddr_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
};
-static struct regulator_ops stpmic1_switch_regul_ops = {
+static const struct regulator_ops stpmic1_boost_regul_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
+static const struct regulator_ops stpmic1_switch_regul_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_over_current_protection = stpmic1_set_icc,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
#define REG_LDO(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
@@ -227,8 +208,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
@@ -252,8 +231,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.bypass_mask = LDO_BYPASS_MASK, \
.bypass_val_on = LDO_BYPASS_MASK, \
.bypass_val_off = 0, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
@@ -271,8 +248,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
@@ -312,12 +287,47 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
-#define REG_SWITCH(ids, base, reg, mask, val) { \
+#define REG_BOOST(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 1, \
+ .ops = &stpmic1_boost_regul_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 0, \
+ .fixed_uV = 5000000, \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = BOOST_ENABLED, \
+ .enable_val = BOOST_ENABLED, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .supply_name = #base, \
+}
+
+#define REG_VBUS_OTG(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 1, \
+ .ops = &stpmic1_switch_regul_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 0, \
+ .fixed_uV = 5000000, \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = USBSW_OTG_SWITCH_ENABLED, \
+ .enable_val = USBSW_OTG_SWITCH_ENABLED, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .supply_name = #base, \
+ .active_discharge_reg = BST_SW_CR, \
+ .active_discharge_mask = VBUS_OTG_DISCHARGE, \
+ .active_discharge_on = VBUS_OTG_DISCHARGE, \
+}
+
+#define REG_SW_OUT(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 1, \
@@ -326,15 +336,18 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.owner = THIS_MODULE, \
.min_uV = 0, \
.fixed_uV = 5000000, \
- .enable_reg = (reg), \
- .enable_mask = (mask), \
- .enable_val = (val), \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = SWIN_SWOUT_ENABLED, \
+ .enable_val = SWIN_SWOUT_ENABLED, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.supply_name = #base, \
+ .active_discharge_reg = BST_SW_CR, \
+ .active_discharge_mask = SW_OUT_DISCHARGE, \
+ .active_discharge_on = SW_OUT_DISCHARGE, \
}
-struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
+static const struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
[STPMIC1_BUCK1] = {
.desc = REG_BUCK(BUCK1, buck1),
.icc_reg = BUCKS_ICCTO_CR,
@@ -411,23 +424,17 @@ struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
.mask_reset_mask = BIT(6),
},
[STPMIC1_BOOST] = {
- .desc = REG_SWITCH(BOOST, boost, BST_SW_CR,
- BOOST_ENABLED,
- BOOST_ENABLED),
+ .desc = REG_BOOST(BOOST, boost),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(6),
},
[STPMIC1_VBUS_OTG] = {
- .desc = REG_SWITCH(VBUS_OTG, pwr_sw1, BST_SW_CR,
- USBSW_OTG_SWITCH_ENABLED,
- USBSW_OTG_SWITCH_ENABLED),
+ .desc = REG_VBUS_OTG(VBUS_OTG, pwr_sw1),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(4),
},
[STPMIC1_SW_OUT] = {
- .desc = REG_SWITCH(SW_OUT, pwr_sw2, BST_SW_CR,
- SWIN_SWOUT_ENABLED,
- SWIN_SWOUT_ENABLED),
+ .desc = REG_SW_OUT(SW_OUT, pwr_sw2),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(5),
},
@@ -448,8 +455,9 @@ static unsigned int stpmic1_map_mode(unsigned int mode)
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev)
{
int value;
+ struct regmap *regmap = rdev_get_regmap(rdev);
- regmap_read(rdev->regmap, rdev->desc->enable_reg, &value);
+ regmap_read(regmap, rdev->desc->enable_reg, &value);
if (value & STPMIC1_BUCK_MODE_LP)
return REGULATOR_MODE_STANDBY;
@@ -460,6 +468,7 @@ static unsigned int stpmic1_get_mode(struct regulator_dev *rdev)
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
int value;
+ struct regmap *regmap = rdev_get_regmap(rdev);
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -472,17 +481,18 @@ static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
return -EINVAL;
}
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ return regmap_update_bits(regmap, rdev->desc->enable_reg,
STPMIC1_BUCK_MODE_LP, value);
}
static int stpmic1_set_icc(struct regulator_dev *rdev)
{
- struct stpmic1_regulator *regul = rdev_get_drvdata(rdev);
+ struct stpmic1_regulator_cfg *cfg = rdev_get_drvdata(rdev);
+ struct regmap *regmap = rdev_get_regmap(rdev);
/* enable switch off in case of over current */
- return regmap_update_bits(regul->regmap, regul->cfg->icc_reg,
- regul->cfg->icc_mask, regul->cfg->icc_mask);
+ return regmap_update_bits(regmap, cfg->icc_reg, cfg->icc_mask,
+ cfg->icc_mask);
}
static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
@@ -501,46 +511,13 @@ static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int stpmic1_regulator_init(struct platform_device *pdev,
- struct regulator_dev *rdev)
-{
- struct stpmic1_regulator *regul = rdev_get_drvdata(rdev);
- int ret = 0;
-
- /* set mask reset */
- if (regul->mask_reset && regul->cfg->mask_reset_reg != 0) {
- ret = regmap_update_bits(regul->regmap,
- regul->cfg->mask_reset_reg,
- regul->cfg->mask_reset_mask,
- regul->cfg->mask_reset_mask);
- if (ret) {
- dev_err(&pdev->dev, "set mask reset failed\n");
- return ret;
- }
- }
-
- /* setup an irq handler for over-current detection */
- if (regul->irq_curlim > 0) {
- ret = devm_request_threaded_irq(&pdev->dev,
- regul->irq_curlim, NULL,
- stpmic1_curlim_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED,
- pdev->name, rdev);
- if (ret) {
- dev_err(&pdev->dev, "Request IRQ failed\n");
- return ret;
- }
- }
- return 0;
-}
-
#define MATCH(_name, _id) \
[STPMIC1_##_id] = { \
.name = #_name, \
.desc = &stpmic1_regulator_cfgs[STPMIC1_##_id].desc, \
}
-static struct of_regulator_match stpmic1_regulators_matches[] = {
+static struct of_regulator_match stpmic1_matches[] = {
MATCH(buck1, BUCK1),
MATCH(buck2, BUCK2),
MATCH(buck3, BUCK3),
@@ -557,94 +534,75 @@ static struct of_regulator_match stpmic1_regulators_matches[] = {
MATCH(pwr_sw2, SW_OUT),
};
-static int stpmic1_regulator_parse_dt(void *driver_data)
-{
- struct stpmic1_regulator *regul =
- (struct stpmic1_regulator *)driver_data;
-
- if (!regul)
- return -EINVAL;
-
- if (of_get_property(regul->reg_node, "st,mask-reset", NULL))
- regul->mask_reset = 1;
-
- regul->irq_curlim = of_irq_get(regul->reg_node, 0);
-
- return 0;
-}
-
-static struct
-regulator_dev *stpmic1_regulator_register(struct platform_device *pdev, int id,
- struct regulator_init_data *init_data,
- struct stpmic1_regulator *regul)
+static int stpmic1_regulator_register(struct platform_device *pdev, int id,
+ struct of_regulator_match *match,
+ const struct stpmic1_regulator_cfg *cfg)
{
struct stpmic1 *pmic_dev = dev_get_drvdata(pdev->dev.parent);
struct regulator_dev *rdev;
struct regulator_config config = {};
+ int ret = 0;
+ int irq;
config.dev = &pdev->dev;
- config.init_data = init_data;
- config.of_node = stpmic1_regulators_matches[id].of_node;
+ config.init_data = match->init_data;
+ config.of_node = match->of_node;
config.regmap = pmic_dev->regmap;
- config.driver_data = regul;
-
- regul->regul_id = id;
- regul->reg_node = config.of_node;
- regul->cfg = &stpmic1_regulator_cfgs[id];
- regul->regmap = pmic_dev->regmap;
+ config.driver_data = (void *)cfg;
- rdev = devm_regulator_register(&pdev->dev, &regul->cfg->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &cfg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s regulator\n",
- regul->cfg->desc.name);
+ cfg->desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ /* set mask reset */
+ if (of_get_property(config.of_node, "st,mask-reset", NULL) &&
+ cfg->mask_reset_reg != 0) {
+ ret = regmap_update_bits(pmic_dev->regmap,
+ cfg->mask_reset_reg,
+ cfg->mask_reset_mask,
+ cfg->mask_reset_mask);
+ if (ret) {
+ dev_err(&pdev->dev, "set mask reset failed\n");
+ return ret;
+ }
}
- return rdev;
+ /* setup an irq handler for over-current detection */
+ irq = of_irq_get(config.of_node, 0);
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev,
+ irq, NULL,
+ stpmic1_curlim_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ pdev->name, rdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ failed\n");
+ return ret;
+ }
+ }
+ return 0;
}
static int stpmic1_regulator_probe(struct platform_device *pdev)
{
- struct regulator_dev *rdev;
- struct stpmic1_regulator *regul;
- struct regulator_init_data *init_data;
- struct device_node *np;
int i, ret;
- np = pdev->dev.of_node;
-
- ret = of_regulator_match(&pdev->dev, np,
- stpmic1_regulators_matches,
- ARRAY_SIZE(stpmic1_regulators_matches));
+ ret = of_regulator_match(&pdev->dev, pdev->dev.of_node, stpmic1_matches,
+ ARRAY_SIZE(stpmic1_matches));
if (ret < 0) {
dev_err(&pdev->dev,
"Error in PMIC regulator device tree node");
return ret;
}
- regul = devm_kzalloc(&pdev->dev, ARRAY_SIZE(stpmic1_regulator_cfgs) *
- sizeof(struct stpmic1_regulator),
- GFP_KERNEL);
- if (!regul)
- return -ENOMEM;
-
for (i = 0; i < ARRAY_SIZE(stpmic1_regulator_cfgs); i++) {
- /* Parse DT & find regulators to register */
- init_data = stpmic1_regulators_matches[i].init_data;
- if (init_data)
- init_data->regulator_init = &stpmic1_regulator_parse_dt;
-
- rdev = stpmic1_regulator_register(pdev, i, init_data, regul);
- if (IS_ERR(rdev))
- return PTR_ERR(rdev);
-
- ret = stpmic1_regulator_init(pdev, rdev);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to initialize regulator %d\n", ret);
+ ret = stpmic1_regulator_register(pdev, i, &stpmic1_matches[i],
+ &stpmic1_regulator_cfgs[i]);
+ if (ret < 0)
return ret;
- }
-
- regul++;
}
dev_dbg(&pdev->dev, "stpmic1_regulator driver probed\n");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 6209beee1018..95708d34876b 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -188,7 +188,8 @@ static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
.set_suspend_disable = tps65218_pmic_set_suspend_disable,
};
-static const int ls3_currents[] = { 100, 200, 500, 1000 };
+static const int ls3_currents[] = { 100000, 200000, 500000, 1000000 };
+
static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev,
int lim_uA)
@@ -204,7 +205,8 @@ static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev,
return -EINVAL;
return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask,
- index << 2, TPS65218_PROTECT_L1);
+ index << __builtin_ctz(dev->desc->csel_mask),
+ TPS65218_PROTECT_L1);
}
static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
@@ -214,7 +216,7 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
unsigned int num_currents = ARRAY_SIZE(ls3_currents);
struct tps65218 *tps = rdev_get_drvdata(dev);
- while (index < num_currents && ls3_currents[index] < max_uA)
+ while (index < num_currents && ls3_currents[index] <= max_uA)
index++;
index--;
@@ -223,7 +225,8 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
return -EINVAL;
return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask,
- index << 2, TPS65218_PROTECT_L1);
+ index << __builtin_ctz(dev->desc->csel_mask),
+ TPS65218_PROTECT_L1);
}
static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
@@ -236,12 +239,13 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
if (retval < 0)
return retval;
- index = (index & dev->desc->csel_mask) >> 2;
+ index = (index & dev->desc->csel_mask) >>
+ __builtin_ctz(dev->desc->csel_mask);
return ls3_currents[index];
}
-static struct regulator_ops tps65218_ls3_ops = {
+static struct regulator_ops tps65218_ls23_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -303,8 +307,13 @@ static const struct regulator_desc regulators[] = {
TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
2, 0, 0, TPS65218_REG_SEQ6,
TPS65218_SEQ6_LDO1_SEQ_MASK),
+ TPS65218_REGULATOR("LS2", "regulator-ls2", TPS65218_LS_2,
+ REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
+ TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS2_EN,
+ TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS2ILIM_MASK,
+ NULL, 0, 0, 0, 0, 0),
TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3,
- REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0,
+ REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN,
TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK,
NULL, 0, 0, 0, 0, 0),
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 884c7505ed91..402ea43c77d1 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -576,14 +576,9 @@ static int twlreg_probe(struct platform_device *pdev)
struct regulator_init_data *initdata;
struct regulation_constraints *c;
struct regulator_dev *rdev;
- const struct of_device_id *match;
struct regulator_config config = { };
- match = of_match_device(twl_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- template = match->data;
+ template = of_device_get_match_data(&pdev->dev);
if (!template)
return -ENODEV;
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 219cbd910dbf..15f19df6bc5d 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -31,9 +31,6 @@ struct twlreg_info {
/* twl resource ID, for resource control state machine */
u8 id;
- /* chip constraints on regulator behavior */
- u16 min_mV;
-
u8 flags;
/* used by regulator core */
@@ -247,32 +244,11 @@ static int twl6030coresmps_get_voltage(struct regulator_dev *rdev)
return -ENODEV;
}
-static struct regulator_ops twl6030coresmps_ops = {
+static const struct regulator_ops twl6030coresmps_ops = {
.set_voltage = twl6030coresmps_set_voltage,
.get_voltage = twl6030coresmps_get_voltage,
};
-static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- switch (sel) {
- case 0:
- return 0;
- case 1 ... 24:
- /* Linear mapping from 00000001 to 00011000:
- * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001)
- */
- return (info->min_mV + 100 * (sel - 1)) * 1000;
- case 25 ... 30:
- return -EINVAL;
- case 31:
- return 2750000;
- default:
- return -EINVAL;
- }
-}
-
static int
twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
@@ -290,8 +266,8 @@ static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
return vsel;
}
-static struct regulator_ops twl6030ldo_ops = {
- .list_voltage = twl6030ldo_list_voltage,
+static const struct regulator_ops twl6030ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = twl6030ldo_set_voltage_sel,
.get_voltage_sel = twl6030ldo_get_voltage_sel,
@@ -305,7 +281,7 @@ static struct regulator_ops twl6030ldo_ops = {
.get_status = twl6030reg_get_status,
};
-static struct regulator_ops twl6030fixed_ops = {
+static const struct regulator_ops twl6030fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = twl6030reg_enable,
@@ -496,7 +472,7 @@ static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
}
-static struct regulator_ops twlsmps_ops = {
+static const struct regulator_ops twlsmps_ops = {
.list_voltage = twl6030smps_list_voltage,
.map_voltage = twl6030smps_map_voltage,
@@ -513,6 +489,11 @@ static struct regulator_ops twlsmps_ops = {
};
/*----------------------------------------------------------------------*/
+static const struct regulator_linear_range twl6030ldo_linear_range[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 0, 0),
+ REGULATOR_LINEAR_RANGE(1000000, 1, 24, 100000),
+ REGULATOR_LINEAR_RANGE(2750000, 31, 31, 0),
+};
#define TWL6030_ADJUSTABLE_SMPS(label) \
static const struct twlreg_info TWL6030_INFO_##label = { \
@@ -525,28 +506,30 @@ static const struct twlreg_info TWL6030_INFO_##label = { \
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts) \
+#define TWL6030_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6030_INFO_##label = { \
.base = offset, \
- .min_mV = min_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
.n_voltages = 32, \
+ .linear_ranges = twl6030ldo_linear_range, \
+ .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts) \
+#define TWL6032_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6032_INFO_##label = { \
.base = offset, \
- .min_mV = min_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
.n_voltages = 32, \
+ .linear_ranges = twl6030ldo_linear_range, \
+ .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -557,7 +540,6 @@ static const struct twlreg_info TWL6032_INFO_##label = { \
static const struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = 0, \
- .min_mV = mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030##_REG_##label, \
@@ -574,7 +556,6 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
static const struct twlreg_info TWLSMPS_INFO_##label = { \
.base = offset, \
- .min_mV = 600, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
@@ -592,22 +573,22 @@ static const struct twlreg_info TWLSMPS_INFO_##label = { \
TWL6030_ADJUSTABLE_SMPS(VDD1);
TWL6030_ADJUSTABLE_SMPS(VDD2);
TWL6030_ADJUSTABLE_SMPS(VDD3);
-TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000);
-TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000);
-TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000);
-TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000);
-TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000);
-TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000);
+TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54);
+TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58);
+TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c);
+TWL6030_ADJUSTABLE_LDO(VMMC, 0x68);
+TWL6030_ADJUSTABLE_LDO(VPP, 0x6c);
+TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74);
/* 6025 are renamed compared to 6030 versions */
-TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000);
-TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000);
-TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO2, 0x54);
+TWL6032_ADJUSTABLE_LDO(LDO4, 0x58);
+TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c);
+TWL6032_ADJUSTABLE_LDO(LDO5, 0x68);
+TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c);
+TWL6032_ADJUSTABLE_LDO(LDO7, 0x74);
+TWL6032_ADJUSTABLE_LDO(LDO6, 0x60);
+TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64);
+TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70);
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0);
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0);
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
@@ -687,14 +668,9 @@ static int twlreg_probe(struct platform_device *pdev)
struct regulator_init_data *initdata;
struct regulation_constraints *c;
struct regulator_dev *rdev;
- const struct of_device_id *match;
struct regulator_config config = { };
- match = of_match_device(twl_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- template = match->data;
+ template = of_device_get_match_data(&pdev->dev);
if (!template)
return -ENODEV;
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index abf22acbd13e..9026d5a3e964 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -32,7 +32,7 @@ struct uniphier_regulator_priv {
const struct uniphier_regulator_soc_data *data;
};
-static struct regulator_ops uniphier_regulator_ops = {
+static const struct regulator_ops uniphier_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -87,8 +87,10 @@ static int uniphier_regulator_probe(struct platform_device *pdev)
}
regmap = devm_regmap_init_mmio(dev, base, priv->data->regconf);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto out_rst_assert;
+ }
config.dev = dev;
config.driver_data = priv;
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 5a5bc4bb08d2..12b422373580 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -205,33 +205,10 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
* BUCKV specifics
*/
-static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector <= 0x8)
- return 600000;
- if (selector <= WM831X_BUCKV_MAX_SELECTOR)
- return 600000 + ((selector - 0x8) * 12500);
- return -EINVAL;
-}
-
-static int wm831x_buckv_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- u16 vsel;
-
- if (min_uV < 600000)
- vsel = 0;
- else if (min_uV <= 1800000)
- vsel = DIV_ROUND_UP(min_uV - 600000, 12500) + 8;
- else
- return -EINVAL;
-
- if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV)
- return -EINVAL;
-
- return vsel;
-}
+static const struct regulator_linear_range wm831x_buckv_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 0x7, 0),
+ REGULATOR_LINEAR_RANGE(600000, 0x8, 0x68, 12500),
+};
static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
{
@@ -309,7 +286,7 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
int vsel;
- vsel = wm831x_buckv_map_voltage(rdev, uV, uV);
+ vsel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (vsel < 0)
return vsel;
@@ -327,52 +304,18 @@ static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
}
/* Current limit options */
-static u16 wm831x_dcdc_ilim[] = {
- 125, 250, 375, 500, 625, 750, 875, 1000
+static const unsigned int wm831x_dcdc_ilim[] = {
+ 125000, 250000, 375000, 500000, 625000, 750000, 875000, 1000000
};
-static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
- u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2;
- int i;
-
- for (i = ARRAY_SIZE(wm831x_dcdc_ilim) - 1; i >= 0; i--) {
- if ((min_uA <= wm831x_dcdc_ilim[i]) &&
- (wm831x_dcdc_ilim[i] <= max_uA))
- return wm831x_set_bits(wm831x, reg,
- WM831X_DC1_HC_THR_MASK,
- i << WM831X_DC1_HC_THR_SHIFT);
- }
-
- return -EINVAL;
-}
-
-static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
- u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2;
- int val;
-
- val = wm831x_reg_read(wm831x, reg);
- if (val < 0)
- return val;
-
- val = (val & WM831X_DC1_HC_THR_MASK) >> WM831X_DC1_HC_THR_SHIFT;
- return wm831x_dcdc_ilim[val];
-}
-
static const struct regulator_ops wm831x_buckv_ops = {
.set_voltage_sel = wm831x_buckv_set_voltage_sel,
.get_voltage_sel = wm831x_buckv_get_voltage_sel,
- .list_voltage = wm831x_buckv_list_voltage,
- .map_voltage = wm831x_buckv_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
- .set_current_limit = wm831x_buckv_set_current_limit,
- .get_current_limit = wm831x_buckv_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -492,10 +435,16 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
dcdc->desc.id = id;
dcdc->desc.type = REGULATOR_VOLTAGE;
dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1;
+ dcdc->desc.linear_ranges = wm831x_buckv_ranges;
+ dcdc->desc.n_linear_ranges = ARRAY_SIZE(wm831x_buckv_ranges);
dcdc->desc.ops = &wm831x_buckv_ops;
dcdc->desc.owner = THIS_MODULE;
dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
dcdc->desc.enable_mask = 1 << id;
+ dcdc->desc.csel_reg = dcdc->base + WM831X_DCDC_CONTROL_2;
+ dcdc->desc.csel_mask = WM831X_DC1_HC_THR_MASK;
+ dcdc->desc.n_current_limits = ARRAY_SIZE(wm831x_dcdc_ilim);
+ dcdc->desc.curr_table = wm831x_dcdc_ilim;
ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
if (ret < 0) {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 183fc42a510a..2d7cd344f3bf 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
const bool * ctx,
struct irq_affinity *desc)
{
- int i, ret;
+ int i, ret, queue_idx = 0;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index c21da9fe51ec..2c8c23db92fb 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -40,6 +40,14 @@ config RESET_BERLIN
help
This enables the reset controller driver for Marvell Berlin SoCs.
+config RESET_BRCMSTB
+ tristate "Broadcom STB reset controller"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ This enables the reset controller driver for Broadcom STB SoCs using
+ a SUN_TOP_CTRL_SW_INIT style controller.
+
config RESET_HSDK
bool "Synopsys HSDK Reset Driver"
depends on HAS_IOMEM
@@ -48,9 +56,9 @@ config RESET_HSDK
This enables the reset controller driver for HSDK board.
config RESET_IMX7
- bool "i.MX7 Reset Driver" if COMPILE_TEST
+ bool "i.MX7/8 Reset Driver" if COMPILE_TEST
depends on HAS_IOMEM
- default SOC_IMX7D
+ default SOC_IMX7D || (ARM64 && ARCH_MXC)
select MFD_SYSCON
help
This enables the reset controller driver for i.MX7 SoCs.
@@ -109,7 +117,7 @@ config RESET_QCOM_PDC
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
- default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
+ default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -128,6 +136,14 @@ config RESET_STM32MP157
help
This enables the RCC reset controller driver for STM32 MPUs.
+config RESET_SOCFPGA
+ bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA
+ default ARCH_SOCFPGA
+ select RESET_SIMPLE
+ help
+ This enables the reset driver for the SoCFPGA ARMv7 platforms. This
+ driver gets initialized early during platform init calls.
+
config RESET_SUNXI
bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
default ARCH_SUNXI
@@ -163,15 +179,15 @@ config RESET_UNIPHIER
Say Y if you want to control reset signals provided by System Control
block, Media I/O block, Peripheral Block.
-config RESET_UNIPHIER_USB3
- tristate "USB3 reset driver for UniPhier SoCs"
+config RESET_UNIPHIER_GLUE
+ tristate "Reset driver in glue layer for UniPhier SoCs"
depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
default ARCH_UNIPHIER
select RESET_SIMPLE
help
- Support for the USB3 core reset on UniPhier SoCs.
- Say Y if you want to control reset signals provided by
- USB3 glue layer.
+ Support for peripheral core reset included in its own glue layer
+ on UniPhier SoCs. Say Y if you want to control reset signals
+ provided by the glue layer.
config RESET_ZYNQ
bool "ZYNQ Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d08e8b90046a..61456b8f659c 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
@@ -19,10 +20,12 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
+obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
-obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o
+obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
+obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index d1887c0ed5d3..9582efb70025 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -795,3 +795,45 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
return rstc;
}
EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
+
+static int reset_control_get_count_from_lookup(struct device *dev)
+{
+ const struct reset_control_lookup *lookup;
+ const char *dev_id;
+ int count = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ dev_id = dev_name(dev);
+ mutex_lock(&reset_lookup_mutex);
+
+ list_for_each_entry(lookup, &reset_lookup_list, list) {
+ if (!strcmp(lookup->dev_id, dev_id))
+ count++;
+ }
+
+ mutex_unlock(&reset_lookup_mutex);
+
+ if (count == 0)
+ count = -ENOENT;
+
+ return count;
+}
+
+/**
+ * reset_control_get_count - Count number of resets available with a device
+ *
+ * @dev: device for which to return the number of resets
+ *
+ * Returns positive reset count on success, or error number on failure and
+ * on count being zero.
+ */
+int reset_control_get_count(struct device *dev)
+{
+ if (dev->of_node)
+ return of_reset_control_get_count(dev->of_node);
+
+ return reset_control_get_count_from_lookup(dev);
+}
+EXPORT_SYMBOL_GPL(reset_control_get_count);
diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
new file mode 100644
index 000000000000..a608f445dad6
--- /dev/null
+++ b/drivers/reset/reset-brcmstb.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom STB generic reset controller for SW_INIT style reset controller
+ *
+ * Author: Florian Fainelli <f.fainelli@gmail.com>
+ * Copyright (C) 2018 Broadcom
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+struct brcmstb_reset {
+ void __iomem *base;
+ struct reset_controller_dev rcdev;
+};
+
+#define SW_INIT_SET 0x00
+#define SW_INIT_CLEAR 0x04
+#define SW_INIT_STATUS 0x08
+
+#define SW_INIT_BIT(id) BIT((id) & 0x1f)
+#define SW_INIT_BANK(id) ((id) >> 5)
+
+/* A full bank contains extra registers that we are not utilizing but still
+ * qualify as a single bank.
+ */
+#define SW_INIT_BANK_SIZE 0x18
+
+static inline
+struct brcmstb_reset *to_brcmstb(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct brcmstb_reset, rcdev);
+}
+
+static int brcmstb_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_SET);
+
+ return 0;
+}
+
+static int brcmstb_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_CLEAR);
+ /* Maximum reset delay after de-asserting a line and seeing block
+ * operation is typically 14us for the worst case, build some slack
+ * here.
+ */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int brcmstb_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ return readl_relaxed(priv->base + off + SW_INIT_STATUS) &
+ SW_INIT_BIT(id);
+}
+
+static const struct reset_control_ops brcmstb_reset_ops = {
+ .assert = brcmstb_reset_assert,
+ .deassert = brcmstb_reset_deassert,
+ .status = brcmstb_reset_status,
+};
+
+static int brcmstb_reset_probe(struct platform_device *pdev)
+{
+ struct device *kdev = &pdev->dev;
+ struct brcmstb_reset *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) ||
+ !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) {
+ dev_err(kdev, "incorrect register range\n");
+ return -EINVAL;
+ }
+
+ priv->base = devm_ioremap_resource(kdev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ dev_set_drvdata(kdev, priv);
+
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = DIV_ROUND_DOWN_ULL(resource_size(res),
+ SW_INIT_BANK_SIZE) * 32;
+ priv->rcdev.ops = &brcmstb_reset_ops;
+ priv->rcdev.of_node = kdev->of_node;
+ /* Use defaults: 1 cell and simple xlate function */
+
+ return devm_reset_controller_register(kdev, &priv->rcdev);
+}
+
+static const struct of_device_id brcmstb_reset_of_match[] = {
+ { .compatible = "brcm,brcmstb-reset" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver brcmstb_reset_driver = {
+ .probe = brcmstb_reset_probe,
+ .driver = {
+ .name = "brcmstb-reset",
+ .of_match_table = brcmstb_reset_of_match,
+ },
+};
+module_platform_driver(brcmstb_reset_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB reset controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-hsdk.c b/drivers/reset/reset-hsdk.c
index 8bce391c6943..4c7b8647b49c 100644
--- a/drivers/reset/reset-hsdk.c
+++ b/drivers/reset/reset-hsdk.c
@@ -86,6 +86,7 @@ static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
static const struct reset_control_ops hsdk_reset_ops = {
.reset = hsdk_reset_reset,
+ .deassert = hsdk_reset_reset,
};
static int hsdk_reset_probe(struct platform_device *pdev)
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 77911fa8f31d..aed76e33a0a9 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -17,14 +17,27 @@
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/regmap.h>
#include <dt-bindings/reset/imx7-reset.h>
+#include <dt-bindings/reset/imx8mq-reset.h>
+
+struct imx7_src_signal {
+ unsigned int offset, bit;
+};
+
+struct imx7_src_variant {
+ const struct imx7_src_signal *signals;
+ unsigned int signals_num;
+ struct reset_control_ops ops;
+};
struct imx7_src {
struct reset_controller_dev rcdev;
struct regmap *regmap;
+ const struct imx7_src_signal *signals;
};
enum imx7_src_registers {
@@ -39,9 +52,14 @@ enum imx7_src_registers {
SRC_DDRC_RCR = 0x1000,
};
-struct imx7_src_signal {
- unsigned int offset, bit;
-};
+static int imx7_reset_update(struct imx7_src *imx7src,
+ unsigned long id, unsigned int value)
+{
+ const struct imx7_src_signal *signal = &imx7src->signals[id];
+
+ return regmap_update_bits(imx7src->regmap,
+ signal->offset, signal->bit, value);
+}
static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
[IMX7_RESET_A7_CORE_POR_RESET0] = { SRC_A7RCR0, BIT(0) },
@@ -81,8 +99,8 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct imx7_src *imx7src = to_imx7_src(rcdev);
- const struct imx7_src_signal *signal = &imx7_src_signals[id];
- unsigned int value = assert ? signal->bit : 0;
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
switch (id) {
case IMX7_RESET_PCIEPHY:
@@ -95,12 +113,11 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX7_RESET_PCIE_CTRL_APPS_EN:
- value = (assert) ? 0 : signal->bit;
+ value = assert ? 0 : bit;
break;
}
- return regmap_update_bits(imx7src->regmap,
- signal->offset, signal->bit, value);
+ return imx7_reset_update(imx7src, id, value);
}
static int imx7_reset_assert(struct reset_controller_dev *rcdev,
@@ -115,9 +132,133 @@ static int imx7_reset_deassert(struct reset_controller_dev *rcdev,
return imx7_reset_set(rcdev, id, false);
}
-static const struct reset_control_ops imx7_reset_ops = {
- .assert = imx7_reset_assert,
- .deassert = imx7_reset_deassert,
+static const struct imx7_src_variant variant_imx7 = {
+ .signals = imx7_src_signals,
+ .signals_num = ARRAY_SIZE(imx7_src_signals),
+ .ops = {
+ .assert = imx7_reset_assert,
+ .deassert = imx7_reset_deassert,
+ },
+};
+
+enum imx8mq_src_registers {
+ SRC_A53RCR0 = 0x0004,
+ SRC_HDMI_RCR = 0x0030,
+ SRC_DISP_RCR = 0x0034,
+ SRC_GPU_RCR = 0x0040,
+ SRC_VPU_RCR = 0x0044,
+ SRC_PCIE2_RCR = 0x0048,
+ SRC_MIPIPHY1_RCR = 0x004c,
+ SRC_MIPIPHY2_RCR = 0x0050,
+ SRC_DDRC2_RCR = 0x1004,
+};
+
+static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
+ [IMX8MQ_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) },
+ [IMX8MQ_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) },
+ [IMX8MQ_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) },
+ [IMX8MQ_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) },
+ [IMX8MQ_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) },
+ [IMX8MQ_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) },
+ [IMX8MQ_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) },
+ [IMX8MQ_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) },
+ [IMX8MQ_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) },
+ [IMX8MQ_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) },
+ [IMX8MQ_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) },
+ [IMX8MQ_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) },
+ [IMX8MQ_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) },
+ [IMX8MQ_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
+ [IMX8MQ_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
+ [IMX8MQ_RESET_SW_NON_SCLR_M4C_RST] = { SRC_M4RCR, BIT(0) },
+ [IMX8MQ_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
+ [IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) },
+ [IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
+ [IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
+ [IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
+ [IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR,
+ BIT(2) | BIT(1) },
+ [IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
+ [IMX8MQ_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
+ [IMX8MQ_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) },
+ [IMX8MQ_RESET_DISP_RESET] = { SRC_DISP_RCR, BIT(0) },
+ [IMX8MQ_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) },
+ [IMX8MQ_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) },
+ [IMX8MQ_RESET_PCIEPHY2] = { SRC_PCIE2_RCR,
+ BIT(2) | BIT(1) },
+ [IMX8MQ_RESET_PCIEPHY2_PERST] = { SRC_PCIE2_RCR, BIT(3) },
+ [IMX8MQ_RESET_PCIE2_CTRL_APPS_EN] = { SRC_PCIE2_RCR, BIT(6) },
+ [IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF] = { SRC_PCIE2_RCR, BIT(11) },
+ [IMX8MQ_RESET_MIPI_CSI1_CORE_RESET] = { SRC_MIPIPHY1_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET] = { SRC_MIPIPHY1_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_CSI1_ESC_RESET] = { SRC_MIPIPHY1_RCR, BIT(2) },
+ [IMX8MQ_RESET_MIPI_CSI2_CORE_RESET] = { SRC_MIPIPHY2_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET] = { SRC_MIPIPHY2_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_CSI2_ESC_RESET] = { SRC_MIPIPHY2_RCR, BIT(2) },
+ [IMX8MQ_RESET_DDRC1_PRST] = { SRC_DDRC_RCR, BIT(0) },
+ [IMX8MQ_RESET_DDRC1_CORE_RESET] = { SRC_DDRC_RCR, BIT(1) },
+ [IMX8MQ_RESET_DDRC1_PHY_RESET] = { SRC_DDRC_RCR, BIT(2) },
+ [IMX8MQ_RESET_DDRC2_PHY_RESET] = { SRC_DDRC2_RCR, BIT(0) },
+ [IMX8MQ_RESET_DDRC2_CORE_RESET] = { SRC_DDRC2_RCR, BIT(1) },
+ [IMX8MQ_RESET_DDRC2_PRST] = { SRC_DDRC2_RCR, BIT(2) },
+};
+
+static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct imx7_src *imx7src = to_imx7_src(rcdev);
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
+
+ switch (id) {
+ case IMX8MQ_RESET_PCIEPHY:
+ case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */
+ /*
+ * wait for more than 10us to release phy g_rst and
+ * btnrst
+ */
+ if (!assert)
+ udelay(10);
+ break;
+
+ case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */
+ value = assert ? 0 : bit;
+ break;
+ }
+
+ return imx7_reset_update(imx7src, id, value);
+}
+
+static int imx8mq_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mq_reset_set(rcdev, id, true);
+}
+
+static int imx8mq_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mq_reset_set(rcdev, id, false);
+}
+
+static const struct imx7_src_variant variant_imx8mq = {
+ .signals = imx8mq_src_signals,
+ .signals_num = ARRAY_SIZE(imx8mq_src_signals),
+ .ops = {
+ .assert = imx8mq_reset_assert,
+ .deassert = imx8mq_reset_deassert,
+ },
};
static int imx7_reset_probe(struct platform_device *pdev)
@@ -125,11 +266,13 @@ static int imx7_reset_probe(struct platform_device *pdev)
struct imx7_src *imx7src;
struct device *dev = &pdev->dev;
struct regmap_config config = { .name = "src" };
+ const struct imx7_src_variant *variant = of_device_get_match_data(dev);
imx7src = devm_kzalloc(dev, sizeof(*imx7src), GFP_KERNEL);
if (!imx7src)
return -ENOMEM;
+ imx7src->signals = variant->signals;
imx7src->regmap = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(imx7src->regmap)) {
dev_err(dev, "Unable to get imx7-src regmap");
@@ -138,15 +281,16 @@ static int imx7_reset_probe(struct platform_device *pdev)
regmap_attach_dev(dev, imx7src->regmap, &config);
imx7src->rcdev.owner = THIS_MODULE;
- imx7src->rcdev.nr_resets = IMX7_RESET_NUM;
- imx7src->rcdev.ops = &imx7_reset_ops;
+ imx7src->rcdev.nr_resets = variant->signals_num;
+ imx7src->rcdev.ops = &variant->ops;
imx7src->rcdev.of_node = dev->of_node;
return devm_reset_controller_register(dev, &imx7src->rcdev);
}
static const struct of_device_id imx7_reset_dt_ids[] = {
- { .compatible = "fsl,imx7d-src", },
+ { .compatible = "fsl,imx7d-src", .data = &variant_imx7 },
+ { .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq },
{ /* sentinel */ },
};
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index a91107fc9e27..77fbba3100c8 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -109,7 +109,7 @@ struct reset_simple_devdata {
#define SOCFPGA_NR_BANKS 8
static const struct reset_simple_devdata reset_simple_socfpga = {
- .reg_offset = 0x10,
+ .reg_offset = 0x20,
.nr_resets = SOCFPGA_NR_BANKS * 32,
.status_active_low = true,
};
@@ -120,7 +120,8 @@ static const struct reset_simple_devdata reset_simple_active_low = {
};
static const struct of_device_id reset_simple_dt_ids[] = {
- { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga },
+ { .compatible = "altr,stratix10-rst-mgr",
+ .data = &reset_simple_socfpga },
{ .compatible = "st,stm32-rcc", },
{ .compatible = "allwinner,sun6i-a31-clock-reset",
.data = &reset_simple_active_low },
@@ -166,14 +167,6 @@ static int reset_simple_probe(struct platform_device *pdev)
data->status_active_low = devdata->status_active_low;
}
- if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") &&
- of_property_read_u32(dev->of_node, "altr,modrst-offset",
- &reg_offset)) {
- dev_warn(dev,
- "missing altr,modrst-offset property, assuming 0x%x!\n",
- reg_offset);
- }
-
data->membase += reg_offset;
return devm_reset_controller_register(dev, &data->rcdev);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
new file mode 100644
index 000000000000..96953992c2bb
--- /dev/null
+++ b/drivers/reset/reset-socfpga.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Intel Corporation
+ * Copied from reset-sunxi.c
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/reset/socfpga.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "reset-simple.h"
+
+#define SOCFPGA_NR_BANKS 8
+
+static int a10_reset_init(struct device_node *np)
+{
+ struct reset_simple_data *data;
+ struct resource res;
+ resource_size_t size;
+ int ret;
+ u32 reg_offset = 0x10;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ goto err_alloc;
+
+ size = resource_size(&res);
+ if (!request_mem_region(res.start, size, np->name)) {
+ ret = -EBUSY;
+ goto err_alloc;
+ }
+
+ data->membase = ioremap(res.start, size);
+ if (!data->membase) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset))
+ pr_warn("missing altr,modrst-offset property, assuming 0x10\n");
+ data->membase += reg_offset;
+
+ spin_lock_init(&data->lock);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32;
+ data->rcdev.ops = &reset_simple_ops;
+ data->rcdev.of_node = np;
+ data->status_active_low = true;
+
+ return reset_controller_register(&data->rcdev);
+
+err_alloc:
+ kfree(data);
+ return ret;
+};
+
+/*
+ * These are the reset controller we need to initialize early on in
+ * our system, before we can even think of using a regular device
+ * driver for it.
+ * The controllers that we can register through the regular device
+ * model are handled by the simple reset driver directly.
+ */
+static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = {
+ { .compatible = "altr,rst-mgr", },
+ { /* sentinel */ },
+};
+
+void __init socfpga_reset_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, socfpga_early_reset_dt_ids)
+ a10_reset_init(np);
+}
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index db9a1a75523f..b06d724d8f21 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -18,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/sunxi.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/reset/reset-uniphier-usb3.c b/drivers/reset/reset-uniphier-glue.c
index ffa1b19b594d..a45923f4df6d 100644
--- a/drivers/reset/reset-uniphier-usb3.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
//
-// reset-uniphier-usb3.c - USB3 reset driver for UniPhier
+// reset-uniphier-glue.c - Glue layer reset driver for UniPhier
// Copyright 2018 Socionext Inc.
// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
@@ -15,24 +15,24 @@
#define MAX_CLKS 2
#define MAX_RSTS 2
-struct uniphier_usb3_reset_soc_data {
+struct uniphier_glue_reset_soc_data {
int nclks;
const char * const *clock_names;
int nrsts;
const char * const *reset_names;
};
-struct uniphier_usb3_reset_priv {
+struct uniphier_glue_reset_priv {
struct clk_bulk_data clk[MAX_CLKS];
struct reset_control *rst[MAX_RSTS];
struct reset_simple_data rdata;
- const struct uniphier_usb3_reset_soc_data *data;
+ const struct uniphier_glue_reset_soc_data *data;
};
-static int uniphier_usb3_reset_probe(struct platform_device *pdev)
+static int uniphier_glue_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct uniphier_usb3_reset_priv *priv;
+ struct uniphier_glue_reset_priv *priv;
struct resource *res;
resource_size_t size;
const char *name;
@@ -100,9 +100,9 @@ out_rst_assert:
return ret;
}
-static int uniphier_usb3_reset_remove(struct platform_device *pdev)
+static int uniphier_glue_reset_remove(struct platform_device *pdev)
{
- struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev);
+ struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev);
int i;
for (i = 0; i < priv->data->nrsts; i++)
@@ -117,7 +117,7 @@ static const char * const uniphier_pro4_clock_reset_names[] = {
"gio", "link",
};
-static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = {
+static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = {
.nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
.clock_names = uniphier_pro4_clock_reset_names,
.nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
@@ -128,14 +128,14 @@ static const char * const uniphier_pxs2_clock_reset_names[] = {
"link",
};
-static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = {
+static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = {
.nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
.clock_names = uniphier_pxs2_clock_reset_names,
.nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
.reset_names = uniphier_pxs2_clock_reset_names,
};
-static const struct of_device_id uniphier_usb3_reset_match[] = {
+static const struct of_device_id uniphier_glue_reset_match[] = {
{
.compatible = "socionext,uniphier-pro4-usb3-reset",
.data = &uniphier_pro4_data,
@@ -152,20 +152,32 @@ static const struct of_device_id uniphier_usb3_reset_match[] = {
.compatible = "socionext,uniphier-pxs3-usb3-reset",
.data = &uniphier_pxs2_data,
},
+ {
+ .compatible = "socionext,uniphier-pro4-ahci-reset",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-ahci-reset",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-ahci-reset",
+ .data = &uniphier_pxs2_data,
+ },
{ /* Sentinel */ }
};
-MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match);
+MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
-static struct platform_driver uniphier_usb3_reset_driver = {
- .probe = uniphier_usb3_reset_probe,
- .remove = uniphier_usb3_reset_remove,
+static struct platform_driver uniphier_glue_reset_driver = {
+ .probe = uniphier_glue_reset_probe,
+ .remove = uniphier_glue_reset_remove,
.driver = {
- .name = "uniphier-usb3-reset",
- .of_match_table = uniphier_usb3_reset_match,
+ .name = "uniphier-glue-reset",
+ .of_match_table = uniphier_glue_reset_match,
},
};
-module_platform_driver(uniphier_usb3_reset_driver);
+module_platform_driver(uniphier_glue_reset_driver);
MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
-MODULE_DESCRIPTION("UniPhier USB3 Reset Driver");
+MODULE_DESCRIPTION("UniPhier Glue layer reset driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
new file mode 100644
index 000000000000..2ef1f13aa47b
--- /dev/null
+++ b/drivers/reset/reset-zynqmp.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NR_RESETS (ZYNQMP_PM_RESET_END - ZYNQMP_PM_RESET_START)
+#define ZYNQMP_RESET_ID ZYNQMP_PM_RESET_START
+
+struct zynqmp_reset_data {
+ struct reset_controller_dev rcdev;
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+static inline struct zynqmp_reset_data *
+to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct zynqmp_reset_data, rcdev);
+}
+
+static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_ASSERT);
+}
+
+static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_RELEASE);
+}
+
+static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+ int val, err;
+
+ err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_PULSE);
+}
+
+static struct reset_control_ops zynqmp_reset_ops = {
+ .reset = zynqmp_reset_reset,
+ .assert = zynqmp_reset_assert,
+ .deassert = zynqmp_reset_deassert,
+ .status = zynqmp_reset_status,
+};
+
+static int zynqmp_reset_probe(struct platform_device *pdev)
+{
+ struct zynqmp_reset_data *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (!priv->eemi_ops)
+ return -ENXIO;
+
+ priv->rcdev.ops = &zynqmp_reset_ops;
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.of_node = pdev->dev.of_node;
+ priv->rcdev.nr_resets = ZYNQMP_NR_RESETS;
+
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
+}
+
+static const struct of_device_id zynqmp_reset_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-reset", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver zynqmp_reset_driver = {
+ .probe = zynqmp_reset_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = zynqmp_reset_dt_ids,
+ },
+};
+
+static int __init zynqmp_reset_init(void)
+{
+ return platform_driver_register(&zynqmp_reset_driver);
+}
+
+arch_initcall(zynqmp_reset_init);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 397af07e4d88..e03304fe25bb 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3965,13 +3965,11 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
- void *rdc_buffer,
int rdc_buffer_size,
int magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- unsigned long *idaw;
cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
NULL);
@@ -3986,16 +3984,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
- if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
- idaw = (unsigned long *) (cqr->data);
- ccw->cda = (__u32)(addr_t) idaw;
- ccw->flags = CCW_FLAG_IDA;
- idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
- } else {
- ccw->cda = (__u32)(addr_t) rdc_buffer;
- ccw->flags = 0;
- }
-
+ ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->flags = 0;
ccw->count = rdc_buffer_size;
cqr->startdev = device;
cqr->memdev = device;
@@ -4013,12 +4003,13 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
int ret;
struct dasd_ccw_req *cqr;
- cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
- magic);
+ cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
ret = dasd_sleep_on(cqr);
+ if (ret == 0)
+ memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
dasd_sfree_request(cqr, cqr->memdev);
return ret;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 4e7b55a14b1a..6e294b4d3635 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
usrparm.psf_data &= 0x7fffffffULL;
usrparm.rssd_result &= 0x7fffffffULL;
}
+ /* at least 2 bytes are accessed and should be allocated */
+ if (usrparm.psf_data_len < 2) {
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "Symmetrix ioctl invalid data length %d",
+ usrparm.psf_data_len);
+ rc = -EINVAL;
+ goto out;
+ }
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index b3fcc24b1182..367e9d384d85 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -195,7 +195,9 @@ struct read_info_sccb {
u16 hcpua; /* 120-121 */
u8 _pad_122[124 - 122]; /* 122-123 */
u32 hmfai; /* 124-127 */
- u8 _pad_128[4096 - 128]; /* 128-4095 */
+ u8 _pad_128[134 - 128]; /* 128-133 */
+ u8 byte_134; /* 134 */
+ u8 _pad_135[4096 - 135]; /* 135-4095 */
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 194ffd5c8580..039b2074db7e 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
+ lock_device_hotplug();
smp_rescan_cpus();
+ unlock_device_hotplug();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index e792cee3b51c..8332788681c4 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -44,6 +44,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
+ if (sccb->cpuoff > 134)
+ sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index e324d890a4f6..a59887fad13e 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -181,7 +181,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
- void **sbals_array, int i)
+ struct qdio_buffer **sbals_array, int i)
{
struct qdio_q *prev;
int j;
@@ -212,8 +212,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
struct qdio_q *q;
- void **input_sbal_array = qdio_init->input_sbal_addr_array;
- void **output_sbal_array = qdio_init->output_sbal_addr_array;
+ struct qdio_buffer **input_sbal_array = qdio_init->input_sbal_addr_array;
+ struct qdio_buffer **output_sbal_array = qdio_init->output_sbal_addr_array;
struct qdio_outbuf_state *output_sbal_state_array =
qdio_init->output_sbal_state_array;
int i;
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 70a006ba4d05..384b3987eeb4 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -283,6 +283,33 @@ static long copy_ccw_from_iova(struct channel_program *cp,
#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
+/*
+ * is_cpa_within_range()
+ *
+ * @cpa: channel program address being questioned
+ * @head: address of the beginning of a CCW chain
+ * @len: number of CCWs within the chain
+ *
+ * Determine whether the address of a CCW (whether a new chain,
+ * or the target of a TIC) falls within a range (including the end points).
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
+{
+ u32 tail = head + (len - 1) * sizeof(struct ccw1);
+
+ return (head <= cpa && cpa <= tail);
+}
+
+static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
+{
+ if (!ccw_is_tic(ccw))
+ return 0;
+
+ return is_cpa_within_range(ccw->cda, head, len);
+}
+
static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
{
struct ccwchain *chain;
@@ -392,7 +419,15 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
return -EOPNOTSUPP;
}
- if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
+ /*
+ * We want to keep counting if the current CCW has the
+ * command-chaining flag enabled, or if it is a TIC CCW
+ * that loops back into the current chain. The latter
+ * is used for device orientation, where the CCW PRIOR to
+ * the TIC can either jump to the TIC or a CCW immediately
+ * after the TIC, depending on the results of its operation.
+ */
+ if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt))
break;
ccw++;
@@ -408,13 +443,11 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
{
struct ccwchain *chain;
- u32 ccw_head, ccw_tail;
+ u32 ccw_head;
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = chain->ch_iova;
- ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1);
-
- if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail))
+ if (is_cpa_within_range(tic->cda, ccw_head, chain->ch_len))
return 1;
}
@@ -481,13 +514,11 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
{
struct ccw1 *ccw = chain->ch_ccw + idx;
struct ccwchain *iter;
- u32 ccw_head, ccw_tail;
+ u32 ccw_head;
list_for_each_entry(iter, &cp->ccwchain_list, next) {
ccw_head = iter->ch_iova;
- ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
-
- if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
+ if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
(ccw->cda - ccw_head));
return 0;
@@ -829,7 +860,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
{
struct ccwchain *chain;
u32 cpa = scsw->cmd.cpa;
- u32 ccw_head, ccw_tail;
+ u32 ccw_head;
/*
* LATER:
@@ -839,9 +870,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = (u32)(u64)chain->ch_ccw;
- ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1);
-
- if ((ccw_head <= cpa) && (cpa <= ccw_tail)) {
+ if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) {
/*
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48ea0004a56d..e15816ff1265 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
static inline int ap_test_config_card_id(unsigned int id)
{
if (!ap_configuration) /* QCI not supported */
- return 1;
+ /* only ids 0...3F may be probed */
+ return id < 0x40 ? 1 : 0;
return ap_test_config(ap_configuration->apm, id);
}
@@ -1335,6 +1336,16 @@ static int __match_queue_device_with_qid(struct device *dev, void *data)
}
/*
+ * Helper function to be used with bus_find_dev
+ * matches any queue device with given queue id
+ */
+static int __match_queue_device_with_queue_id(struct device *dev, void *data)
+{
+ return is_queue_dev(dev)
+ && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data;
+}
+
+/*
* Helper function for ap_scan_bus().
* Does the scan bus job for the given adapter id.
*/
@@ -1434,8 +1445,13 @@ static void _ap_scan_bus_adapter(int id)
borked = aq->state == AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
}
- if (borked) /* Remove broken device */
+ if (borked) {
+ /* Remove broken device */
+ AP_DBF(DBF_DEBUG,
+ "removing broken queue=%02x.%04x\n",
+ id, dom);
device_unregister(dev);
+ }
put_device(dev);
continue;
}
@@ -1505,7 +1521,7 @@ static void ap_scan_bus(struct work_struct *unused)
struct device *dev =
bus_find_device(&ap_bus_type, NULL,
(void *)(long) ap_domain_index,
- __match_queue_device_with_qid);
+ __match_queue_device_with_queue_id);
if (dev)
put_device(dev);
else
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index bfc66e4a9de1..d0059eae5d94 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,7 +91,8 @@ enum ap_state {
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
AP_STATE_SUSPEND_WAIT,
- AP_STATE_BORKED,
+ AP_STATE_UNBOUND, /* momentary not bound to a driver */
+ AP_STATE_BORKED, /* broken */
NR_AP_STATES
};
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 576ac08777c5..ba261210c6da 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
+ [AP_STATE_UNBOUND] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
[AP_STATE_BORKED] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -725,6 +729,7 @@ static void __ap_flush_queue(struct ap_queue *aq)
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
+ aq->queue_count = 0;
}
void ap_flush_queue(struct ap_queue *aq)
@@ -743,7 +748,7 @@ void ap_queue_remove(struct ap_queue *aq)
/* reset with zero, also clears irq registration */
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid);
- aq->state = AP_STATE_BORKED;
+ aq->state = AP_STATE_UNBOUND;
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_remove);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 2f92bbed4bf6..3e85d665c572 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -1079,7 +1079,7 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
rc = mkvp_cache_fetch(cardnr, domain, mkvp);
if (rc)
goto out;
- if (t->mkvp == mkvp[1]) {
+ if (t->mkvp == mkvp[1] && t->mkvp != mkvp[0]) {
DEBUG_DBG("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 31c6c847eaca..e9824c35c34f 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -15,7 +15,6 @@
#include "vfio_ap_private.h"
#define VFIO_AP_ROOT_NAME "vfio_ap"
-#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
#define VFIO_AP_DEV_NAME "matrix"
MODULE_AUTHOR("IBM Corporation");
@@ -24,10 +23,6 @@ MODULE_LICENSE("GPL v2");
static struct ap_driver vfio_ap_drv;
-static struct device_type vfio_ap_dev_type = {
- .name = VFIO_AP_DEV_TYPE_NAME,
-};
-
struct ap_matrix_dev *matrix_dev;
/* Only type 10 adapters (CEX4 and later) are supported
@@ -62,6 +57,22 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
kfree(matrix_dev);
}
+static int matrix_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static struct bus_type matrix_bus = {
+ .name = "matrix",
+ .match = &matrix_bus_match,
+};
+
+static struct device_driver matrix_driver = {
+ .name = "vfio_ap",
+ .bus = &matrix_bus,
+ .suppress_bind_attrs = true,
+};
+
static int vfio_ap_matrix_dev_create(void)
{
int ret;
@@ -71,6 +82,10 @@ static int vfio_ap_matrix_dev_create(void)
if (IS_ERR(root_device))
return PTR_ERR(root_device);
+ ret = bus_register(&matrix_bus);
+ if (ret)
+ goto bus_register_err;
+
matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
if (!matrix_dev) {
ret = -ENOMEM;
@@ -87,30 +102,41 @@ static int vfio_ap_matrix_dev_create(void)
mutex_init(&matrix_dev->lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
- matrix_dev->device.type = &vfio_ap_dev_type;
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
+ matrix_dev->device.bus = &matrix_bus;
matrix_dev->device.release = vfio_ap_matrix_dev_release;
- matrix_dev->device.driver = &vfio_ap_drv.driver;
+ matrix_dev->vfio_ap_drv = &vfio_ap_drv;
ret = device_register(&matrix_dev->device);
if (ret)
goto matrix_reg_err;
+ ret = driver_register(&matrix_driver);
+ if (ret)
+ goto matrix_drv_err;
+
return 0;
+matrix_drv_err:
+ device_unregister(&matrix_dev->device);
matrix_reg_err:
put_device(&matrix_dev->device);
matrix_alloc_err:
+ bus_unregister(&matrix_bus);
+bus_register_err:
root_device_unregister(root_device);
-
return ret;
}
static void vfio_ap_matrix_dev_destroy(void)
{
+ struct device *root_device = matrix_dev->device.parent;
+
+ driver_unregister(&matrix_driver);
device_unregister(&matrix_dev->device);
- root_device_unregister(matrix_dev->device.parent);
+ bus_unregister(&matrix_bus);
+ root_device_unregister(root_device);
}
static int __init vfio_ap_init(void)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 272ef427dcc0..900b9cf20ca5 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -198,8 +198,8 @@ static int vfio_ap_verify_queue_reserved(unsigned long *apid,
qres.apqi = apqi;
qres.reserved = false;
- ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
- vfio_ap_has_queue);
+ ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &qres, vfio_ap_has_queue);
if (ret)
return ret;
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 5675492233c7..76b7f98e47e9 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -40,6 +40,7 @@ struct ap_matrix_dev {
struct ap_config_info info;
struct list_head mdev_list;
struct mutex lock;
+ struct ap_driver *vfio_ap_drv;
};
extern struct ap_matrix_dev *matrix_dev;
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index f2d6bbe57a6f..bc55ec316adb 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
obj-$(CONFIG_LCS) += lcs.o
-qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
+qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o
obj-$(CONFIG_QETH) += qeth.o
qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dcbf5c857743..3e132592c1fe 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism)
dma_addr_t dma_handle;
struct ism_sba *sba;
- sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!sba)
return -ENOMEM;
@@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism)
dma_addr_t dma_handle;
struct ism_eq *ieq;
- ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
- &dma_handle, GFP_KERNEL);
+ ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
+ GFP_KERNEL);
if (!ieq)
return -ENOMEM;
@@ -141,10 +141,13 @@ static int register_ieq(struct ism_dev *ism)
static int unregister_sba(struct ism_dev *ism)
{
+ int ret;
+
if (!ism->sba)
return 0;
- if (ism_cmd_simple(ism, ISM_UNREG_SBA))
+ ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
+ if (ret && ret != ISM_ERROR)
return -EIO;
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
@@ -158,10 +161,13 @@ static int unregister_sba(struct ism_dev *ism)
static int unregister_ieq(struct ism_dev *ism)
{
+ int ret;
+
if (!ism->ieq)
return 0;
- if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
+ ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
+ if (ret && ret != ISM_ERROR)
return -EIO;
dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
@@ -234,10 +240,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr, GFP_KERNEL |
- __GFP_NOWARN | __GFP_NOMEMALLOC |
- __GFP_COMP | __GFP_NORETRY);
+ dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+ &dmb->dma_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
@@ -288,7 +293,7 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
cmd.request.dmb_tok = dmb->dmb_tok;
ret = ism_cmd(ism, &cmd);
- if (ret)
+ if (ret && ret != ISM_ERROR)
goto out;
ism_free_dmb(ism, dmb);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 0ee026947f20..c851cf6e01c4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -18,10 +18,10 @@
#include <linux/in6.h>
#include <linux/bitops.h>
#include <linux/seq_file.h>
-#include <linux/ethtool.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
@@ -34,6 +34,8 @@
#include <asm/ccwgroup.h>
#include <asm/sysinfo.h>
+#include <uapi/linux/if_link.h>
+
#include "qeth_core_mpc.h"
/**
@@ -112,59 +114,6 @@ static inline u32 qeth_get_device_id(struct ccw_device *cdev)
#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
-/**
- * card stuff
- */
-struct qeth_perf_stats {
- unsigned int bufs_rec;
- unsigned int bufs_sent;
- unsigned int buf_elements_sent;
-
- unsigned int skbs_sent_pack;
- unsigned int bufs_sent_pack;
-
- unsigned int sc_dp_p;
- unsigned int sc_p_dp;
- /* qdio_cq_handler: number of times called, time spent in */
- __u64 cq_start_time;
- unsigned int cq_cnt;
- unsigned int cq_time;
- /* qdio_input_handler: number of times called, time spent in */
- __u64 inbound_start_time;
- unsigned int inbound_cnt;
- unsigned int inbound_time;
- /* qeth_send_packet: number of times called, time spent in */
- __u64 outbound_start_time;
- unsigned int outbound_cnt;
- unsigned int outbound_time;
- /* qdio_output_handler: number of times called, time spent in */
- __u64 outbound_handler_start_time;
- unsigned int outbound_handler_cnt;
- unsigned int outbound_handler_time;
- /* number of calls to and time spent in do_QDIO for inbound queue */
- __u64 inbound_do_qdio_start_time;
- unsigned int inbound_do_qdio_cnt;
- unsigned int inbound_do_qdio_time;
- /* number of calls to and time spent in do_QDIO for outbound queues */
- __u64 outbound_do_qdio_start_time;
- unsigned int outbound_do_qdio_cnt;
- unsigned int outbound_do_qdio_time;
- unsigned int large_send_bytes;
- unsigned int large_send_cnt;
- unsigned int sg_skbs_sent;
- /* initial values when measuring starts */
- unsigned long initial_rx_packets;
- unsigned long initial_tx_packets;
- /* inbound scatter gather data */
- unsigned int sg_skbs_rx;
- unsigned int sg_frags_rx;
- unsigned int sg_alloc_page_rx;
- unsigned int tx_csum;
- unsigned int tx_lin;
- unsigned int tx_linfail;
- unsigned int rx_csum;
-};
-
/* Routing stuff */
struct qeth_routing_info {
enum qeth_routing_types type;
@@ -494,10 +443,53 @@ enum qeth_out_q_states {
QETH_OUT_Q_LOCKED_FLUSH,
};
+#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val))
+#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1)
+
+#define QETH_TXQ_STAT_ADD(_q, _stat, _val) ((_q)->stats._stat += (_val))
+#define QETH_TXQ_STAT_INC(_q, _stat) QETH_TXQ_STAT_ADD(_q, _stat, 1)
+
+struct qeth_card_stats {
+ u64 rx_bufs;
+ u64 rx_skb_csum;
+ u64 rx_sg_skbs;
+ u64 rx_sg_frags;
+ u64 rx_sg_alloc_page;
+
+ /* rtnl_link_stats64 */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 tx_errors;
+};
+
+struct qeth_out_q_stats {
+ u64 bufs;
+ u64 bufs_pack;
+ u64 buf_elements;
+ u64 skbs_pack;
+ u64 skbs_sg;
+ u64 skbs_csum;
+ u64 skbs_tso;
+ u64 skbs_linearized;
+ u64 skbs_linearized_fail;
+ u64 tso_bytes;
+ u64 packing_mode_switch;
+
+ /* rtnl_link_stats64 */
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+ u64 tx_dropped;
+};
+
struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
+ struct qeth_out_q_stats stats;
int queue_no;
struct qeth_card *card;
atomic_t state;
@@ -527,7 +519,7 @@ struct qeth_qdio_info {
/* output */
int no_out_queues;
- struct qeth_qdio_out_q **out_qs;
+ struct qeth_qdio_out_q *out_qs[QETH_MAX_QUEUES];
struct qdio_outbuf_state *out_bufstates;
/* priority queueing */
@@ -559,8 +551,6 @@ enum qeth_card_states {
CARD_STATE_DOWN,
CARD_STATE_HARDSETUP,
CARD_STATE_SOFTSETUP,
- CARD_STATE_UP,
- CARD_STATE_RECOVER,
};
/**
@@ -594,8 +584,8 @@ struct qeth_channel;
struct qeth_cmd_buffer {
enum qeth_cmd_buffer_state state;
struct qeth_channel *channel;
+ struct qeth_reply *reply;
unsigned char *data;
- int rc;
void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
struct qeth_cmd_buffer *iob);
};
@@ -672,6 +662,7 @@ struct qeth_card_info {
unsigned short chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
+ u8 open_when_online:1;
int guestlan;
int mac_bits;
enum qeth_card_types type;
@@ -701,7 +692,6 @@ struct qeth_card_options {
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
enum qeth_discipline_id layer;
- int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
enum qeth_ipa_isolation_modes prev_isolation;
@@ -741,11 +731,6 @@ struct qeth_discipline {
struct qeth_ipa_cmd *cmd);
};
-struct qeth_vlan_vid {
- struct list_head list;
- unsigned short vid;
-};
-
enum qeth_addr_disposition {
QETH_DISP_ADDR_DELETE = 0,
QETH_DISP_ADDR_DO_NOTHING = 1,
@@ -782,18 +767,16 @@ struct qeth_card {
struct qeth_channel data;
struct net_device *dev;
- struct net_device_stats stats;
-
+ struct qeth_card_stats stats;
struct qeth_card_info info;
struct qeth_token token;
struct qeth_seqno seqno;
struct qeth_card_options options;
+ struct workqueue_struct *event_wq;
wait_queue_head_t wait_q;
spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mutex vid_list_mutex; /* vid_list */
- struct list_head vid_list;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
DECLARE_HASHTABLE(ip_mc_htable, 4);
@@ -802,13 +785,11 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct task_struct *recovery_task;
spinlock_t ip_lock;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
- struct qeth_perf_stats perf_stats;
int read_or_write_problem;
struct qeth_osn_info osn_info;
struct qeth_discipline *discipline;
@@ -825,6 +806,11 @@ struct qeth_card {
struct work_struct close_dev_work;
};
+static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
+{
+ return card->state == CARD_STATE_SOFTSETUP;
+}
+
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
@@ -865,11 +851,6 @@ static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
return PFN_UP(end) - PFN_DOWN(start);
}
-static inline int qeth_get_micros(void)
-{
- return (int) (get_tod_clock() >> 12);
-}
-
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -894,8 +875,7 @@ static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
if ((card->dev->features & NETIF_F_RXCSUM) &&
(flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (card->options.performance_stats)
- card->perf_stats.rx_csum++;
+ QETH_CARD_STAT_INC(card, rx_skb_csum);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
@@ -957,14 +937,14 @@ static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
+extern const struct ethtool_ops qeth_ethtool_ops;
+extern const struct ethtool_ops qeth_osn_ethtool_ops;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
extern const struct attribute_group qeth_device_attr_group;
extern const struct attribute_group qeth_device_blkt_group;
extern const struct device_type qeth_generic_devtype;
-extern struct workqueue_struct *qeth_wq;
-int qeth_card_hw_is_reachable(struct qeth_card *);
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
@@ -976,11 +956,8 @@ extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
-void qeth_set_recovery_task(struct qeth_card *);
-void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
@@ -1004,38 +981,29 @@ void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
-struct net_device_stats *qeth_get_stats(struct net_device *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ u16 cmd_length);
struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
-int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
- void *reply_param);
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info);
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, unsigned int hd_len,
int elements_needed);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int qeth_core_get_sset_count(struct net_device *, int);
-void qeth_core_get_ethtool_stats(struct net_device *,
- struct ethtool_stats *, u64 *);
-void qeth_core_get_strings(struct net_device *, u32, u8 *);
-void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
-int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd);
int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
-void qeth_close_dev(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,
@@ -1047,11 +1015,16 @@ netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+int qeth_open(struct net_device *dev);
+int qeth_stop(struct net_device *dev);
+
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
- void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
+ void (*fill_header)(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type,
unsigned int data_len));
/* exports for OSN */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e63e03143ca7..197b0f5b63e7 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -74,35 +74,15 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
-struct workqueue_struct *qeth_wq;
-EXPORT_SYMBOL_GPL(qeth_wq);
-
-int qeth_card_hw_is_reachable(struct qeth_card *card)
-{
- return (card->state == CARD_STATE_SOFTSETUP) ||
- (card->state == CARD_STATE_UP);
-}
-EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
-
static void qeth_close_dev_handler(struct work_struct *work)
{
struct qeth_card *card;
card = container_of(work, struct qeth_card, close_dev_work);
QETH_CARD_TEXT(card, 2, "cldevhdl");
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
ccwgroup_set_offline(card->gdev);
}
-void qeth_close_dev(struct qeth_card *card)
-{
- QETH_CARD_TEXT(card, 2, "cldevsubm");
- queue_work(qeth_wq, &card->close_dev_work);
-}
-EXPORT_SYMBOL_GPL(qeth_close_dev);
-
static const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
@@ -193,23 +173,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "n/a";
}
-void qeth_set_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = current;
-}
-EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
-
-void qeth_clear_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = NULL;
-}
-EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
-
-static bool qeth_is_recovery_task(const struct qeth_card *card)
-{
- return card->recovery_task == current;
-}
-
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
@@ -236,15 +199,6 @@ int qeth_threads_running(struct qeth_card *card, unsigned long threads)
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
-int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
-{
- if (qeth_is_recovery_task(card))
- return 0;
- return wait_event_interruptible(card->wait_q,
- qeth_threads_running(card, threads) == 0);
-}
-EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
-
void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
@@ -292,8 +246,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
QETH_CARD_TEXT(card, 2, "realcbp");
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
+ if (card->state != CARD_STATE_DOWN)
return -EPERM;
/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
@@ -566,6 +519,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -592,6 +546,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
if (reply) {
refcount_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
+ init_waitqueue_head(&reply->wait_q);
}
return reply;
}
@@ -607,6 +562,26 @@ static void qeth_put_reply(struct qeth_reply *reply)
kfree(reply);
}
+static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply)
+{
+ spin_lock_irq(&card->lock);
+ list_add_tail(&reply->list, &card->cmd_waiter_list);
+ spin_unlock_irq(&card->lock);
+}
+
+static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
+{
+ spin_lock_irq(&card->lock);
+ list_del(&reply->list);
+ spin_unlock_irq(&card->lock);
+}
+
+static void qeth_notify_reply(struct qeth_reply *reply)
+{
+ atomic_inc(&reply->received);
+ wake_up(&reply->wait_q);
+}
+
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
@@ -644,7 +619,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
dev_err(&card->gdev->dev,
"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
QETH_CARD_IFNAME(card));
- qeth_close_dev(card);
+ schedule_work(&card->close_dev_work);
} else {
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
@@ -683,19 +658,15 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
void qeth_clear_ipacmd_list(struct qeth_card *card)
{
- struct qeth_reply *reply, *r;
+ struct qeth_reply *reply;
unsigned long flags;
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
- qeth_get_reply(reply);
+ list_for_each_entry(reply, &card->cmd_waiter_list, list) {
reply->rc = -EIO;
- atomic_inc(&reply->received);
- list_del_init(&reply->list);
- wake_up(&reply->wait_q);
- qeth_put_reply(reply);
+ qeth_notify_reply(reply);
}
spin_unlock_irqrestore(&card->lock, flags);
}
@@ -752,7 +723,10 @@ void qeth_release_buffer(struct qeth_channel *channel,
spin_lock_irqsave(&channel->iob_lock, flags);
iob->state = BUF_STATE_FREE;
iob->callback = qeth_send_control_data_cb;
- iob->rc = 0;
+ if (iob->reply) {
+ qeth_put_reply(iob->reply);
+ iob->reply = NULL;
+ }
spin_unlock_irqrestore(&channel->iob_lock, flags);
wake_up(&channel->wait_q);
}
@@ -765,6 +739,17 @@ static void qeth_release_buffer_cb(struct qeth_card *card,
qeth_release_buffer(channel, iob);
}
+static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
+{
+ struct qeth_reply *reply = iob->reply;
+
+ if (reply) {
+ reply->rc = rc;
+ qeth_notify_reply(reply);
+ }
+ qeth_release_buffer(iob->channel, iob);
+}
+
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
{
struct qeth_cmd_buffer *buffer = NULL;
@@ -800,9 +785,9 @@ static void qeth_send_control_data_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
struct qeth_ipa_cmd *cmd = NULL;
- struct qeth_reply *reply, *r;
+ struct qeth_reply *reply = NULL;
+ struct qeth_reply *r;
unsigned long flags;
- int keep_reply;
int rc = 0;
QETH_CARD_TEXT(card, 4, "sndctlcb");
@@ -834,44 +819,40 @@ static void qeth_send_control_data_cb(struct qeth_card *card,
goto out;
}
+ /* match against pending cmd requests */
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
- if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
- ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
+ list_for_each_entry(r, &card->cmd_waiter_list, list) {
+ if ((r->seqno == QETH_IDX_COMMAND_SEQNO) ||
+ (cmd && (r->seqno == cmd->hdr.seqno))) {
+ reply = r;
+ /* take the object outside the lock */
qeth_get_reply(reply);
- list_del_init(&reply->list);
- spin_unlock_irqrestore(&card->lock, flags);
- keep_reply = 0;
- if (reply->callback != NULL) {
- if (cmd) {
- reply->offset = (__u16)((char *)cmd -
- (char *)iob->data);
- keep_reply = reply->callback(card,
- reply,
- (unsigned long)cmd);
- } else
- keep_reply = reply->callback(card,
- reply,
- (unsigned long)iob);
- }
- if (cmd)
- reply->rc = (u16) cmd->hdr.return_code;
- else if (iob->rc)
- reply->rc = iob->rc;
- if (keep_reply) {
- spin_lock_irqsave(&card->lock, flags);
- list_add_tail(&reply->list,
- &card->cmd_waiter_list);
- spin_unlock_irqrestore(&card->lock, flags);
- } else {
- atomic_inc(&reply->received);
- wake_up(&reply->wait_q);
- }
- qeth_put_reply(reply);
- goto out;
+ break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
+
+ if (!reply)
+ goto out;
+
+ if (!reply->callback) {
+ rc = 0;
+ } else {
+ if (cmd) {
+ reply->offset = (u16)((char *)cmd - (char *)iob->data);
+ rc = reply->callback(card, reply, (unsigned long)cmd);
+ } else {
+ rc = reply->callback(card, reply, (unsigned long)iob);
+ }
+ }
+
+ if (rc <= 0) {
+ reply->rc = rc;
+ qeth_notify_reply(reply);
+ }
+
+ qeth_put_reply(reply);
+
out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
@@ -1002,9 +983,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
return 0;
}
-static long qeth_check_irb_error(struct qeth_card *card,
- struct ccw_device *cdev, unsigned long intparm,
- struct irb *irb)
+static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb)
{
if (!IS_ERR(irb))
return 0;
@@ -1015,7 +995,7 @@ static long qeth_check_irb_error(struct qeth_card *card,
CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
- break;
+ return -EIO;
case -ETIMEDOUT:
dev_warn(&cdev->dev, "A hardware operation timed out"
" on the device\n");
@@ -1027,14 +1007,14 @@ static long qeth_check_irb_error(struct qeth_card *card,
wake_up(&card->wait_q);
}
}
- break;
+ return -ETIMEDOUT;
default:
QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
PTR_ERR(irb), CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
+ return PTR_ERR(irb);
}
- return PTR_ERR(irb);
}
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
@@ -1069,10 +1049,11 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (qeth_intparm_is_iob(intparm))
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- if (qeth_check_irb_error(card, cdev, intparm, irb)) {
+ rc = qeth_check_irb_error(card, cdev, intparm, irb);
+ if (rc) {
/* IO was terminated, free its resources. */
if (iob)
- qeth_release_buffer(iob->channel, iob);
+ qeth_cancel_cmd(iob, rc);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return;
@@ -1127,6 +1108,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
+ if (iob)
+ qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
@@ -1288,7 +1271,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
channel->iob[cnt].state = BUF_STATE_FREE;
channel->iob[cnt].channel = channel;
channel->iob[cnt].callback = qeth_send_control_data_cb;
- channel->iob[cnt].rc = 0;
}
if (cnt < QETH_CMD_BUFFER_NO) {
qeth_clean_channel(channel);
@@ -1430,7 +1412,6 @@ static void qeth_setup_card(struct qeth_card *card)
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
- mutex_init(&card->vid_list_mutex);
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
@@ -1466,6 +1447,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
CARD_RDEV(card) = gdev->cdev[0];
CARD_WDEV(card) = gdev->cdev[1];
CARD_DDEV(card) = gdev->cdev[2];
+
+ card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
+ if (!card->event_wq)
+ goto out_wq;
if (qeth_setup_channel(&card->read, true))
goto out_ip;
if (qeth_setup_channel(&card->write, true))
@@ -1481,6 +1466,8 @@ out_data:
out_channel:
qeth_clean_channel(&card->read);
out_ip:
+ destroy_workqueue(card->event_wq);
+out_wq:
dev_set_drvdata(&gdev->dev, NULL);
kfree(card);
out:
@@ -1809,6 +1796,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@@ -1878,6 +1866,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@@ -2008,7 +1997,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
card->seqno.pdu_hdr++;
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
}
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
@@ -2025,24 +2014,22 @@ EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
* for the IPA commands.
* @reply_param: private pointer passed to the callback
*
- * Returns the value of the `return_code' field of the response
- * block returned from the hardware, or other error indication.
- * Value of zero indicates successful execution of the command.
- *
* Callback function gets called one or more times, with cb_cmd
* pointing to the response returned by the hardware. Callback
- * function must return non-zero if more reply blocks are expected,
- * and zero if the last or only reply block is received. Callback
- * function can get the value of the reply_param pointer from the
+ * function must return
+ * > 0 if more reply blocks are expected,
+ * 0 if the last or only reply block is received, and
+ * < 0 on error.
+ * Callback function can get the value of the reply_param pointer from the
* field 'param' of the structure qeth_reply.
*/
-int qeth_send_control_data(struct qeth_card *card, int len,
- struct qeth_cmd_buffer *iob,
- int (*reply_cb)(struct qeth_card *cb_card,
- struct qeth_reply *cb_reply,
- unsigned long cb_cmd),
- void *reply_param)
+static int qeth_send_control_data(struct qeth_card *card, int len,
+ struct qeth_cmd_buffer *iob,
+ int (*reply_cb)(struct qeth_card *cb_card,
+ struct qeth_reply *cb_reply,
+ unsigned long cb_cmd),
+ void *reply_param)
{
struct qeth_channel *channel = iob->channel;
int rc;
@@ -2058,12 +2045,15 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
reply = qeth_alloc_reply(card);
if (!reply) {
+ qeth_release_buffer(channel, iob);
return -ENOMEM;
}
reply->callback = reply_cb;
reply->param = reply_param;
- init_waitqueue_head(&reply->wait_q);
+ /* pairs with qeth_release_buffer(): */
+ qeth_get_reply(reply);
+ iob->reply = reply;
while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
@@ -2078,9 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
qeth_prepare_control_data(card, len, iob);
- spin_lock_irq(&card->lock);
- list_add_tail(&reply->list, &card->cmd_waiter_list);
- spin_unlock_irq(&card->lock);
+ qeth_enqueue_reply(card, reply);
timeout = jiffies + event_timeout;
@@ -2093,10 +2081,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
- spin_lock_irq(&card->lock);
- list_del_init(&reply->list);
+ qeth_dequeue_reply(card, reply);
qeth_put_reply(reply);
- spin_unlock_irq(&card->lock);
qeth_release_buffer(channel, iob);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
@@ -2118,21 +2104,16 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
}
+ qeth_dequeue_reply(card, reply);
rc = reply->rc;
qeth_put_reply(reply);
return rc;
time_err:
- reply->rc = -ETIME;
- spin_lock_irq(&card->lock);
- list_del_init(&reply->list);
- spin_unlock_irq(&card->lock);
- atomic_inc(&reply->received);
- rc = reply->rc;
+ qeth_dequeue_reply(card, reply);
qeth_put_reply(reply);
- return rc;
+ return -ETIME;
}
-EXPORT_SYMBOL_GPL(qeth_send_control_data);
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
@@ -2337,9 +2318,8 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
QETH_DBF_TEXT(SETUP, 2, "olmlimit");
dev_err(&card->gdev->dev, "A connection could not be "
"established because of an OLM limit\n");
- iob->rc = -EMLINK;
+ return -EMLINK;
}
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2389,11 +2369,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
return 0;
}
-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
{
if (!q)
return;
+ qeth_clear_outq_buffers(q, 1);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
@@ -2432,12 +2413,6 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
goto out_freeinq;
/* outbound */
- card->qdio.out_qs =
- kcalloc(card->qdio.no_out_queues,
- sizeof(struct qeth_qdio_out_q *),
- GFP_KERNEL);
- if (!card->qdio.out_qs)
- goto out_freepool;
for (i = 0; i < card->qdio.no_out_queues; ++i) {
card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
if (!card->qdio.out_qs[i])
@@ -2468,12 +2443,9 @@ out_freeoutqbufs:
}
out_freeoutq:
while (i > 0) {
- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+ qeth_free_output_queue(card->qdio.out_qs[--i]);
+ card->qdio.out_qs[i] = NULL;
}
- kfree(card->qdio.out_qs);
- card->qdio.out_qs = NULL;
-out_freepool:
qeth_free_buffer_pool(card);
out_freeinq:
qeth_free_qdio_queue(card->qdio.in_q);
@@ -2502,13 +2474,9 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
- if (card->qdio.out_qs) {
- for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
- qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
- }
- kfree(card->qdio.out_qs);
- card->qdio.out_qs = NULL;
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ qeth_free_output_queue(card->qdio.out_qs[i]);
+ card->qdio.out_qs[i] = NULL;
}
}
@@ -2715,8 +2683,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
} else {
free_page((unsigned long)entry->elements[i]);
entry->elements[i] = page_address(page);
- if (card->options.performance_stats)
- card->perf_stats.sg_alloc_page_rx++;
+ QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
}
}
}
@@ -2835,14 +2802,20 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
cmd->hdr.prot_version = prot;
}
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ u16 cmd_length)
{
+ u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length;
u8 prot_type = qeth_mpc_select_prot_type(card);
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+ memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
+ memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
@@ -2853,7 +2826,7 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
iob = qeth_get_buffer(&card->write);
if (iob) {
- qeth_prepare_ipa_cmd(card, iob);
+ qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd));
qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
} else {
dev_warn(&card->gdev->dev,
@@ -2866,6 +2839,14 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
+static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ return (cmd->hdr.return_code) ? -EIO : 0;
+}
+
/**
* qeth_send_ipa_cmd() - send an IPA command
*
@@ -2877,11 +2858,15 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
unsigned long),
void *reply_param)
{
+ u16 length;
int rc;
QETH_CARD_TEXT(card, 4, "sendipa");
- rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
- iob, reply_cb, reply_param);
+
+ if (reply_cb == NULL)
+ reply_cb = qeth_send_ipa_cmd_cb;
+ memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
+ rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param);
if (rc == -ETIME) {
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
@@ -2890,9 +2875,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
+static int qeth_send_startlan_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
+ return -ENETDOWN;
+
+ return (cmd->hdr.return_code) ? -EIO : 0;
+}
+
static int qeth_send_startlan(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "strtlan");
@@ -2900,8 +2895,7 @@ static int qeth_send_startlan(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
if (!iob)
return -ENOMEM;
- rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
- return rc;
+ return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
}
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
@@ -2919,7 +2913,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 3, "quyadpcb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
@@ -2974,19 +2968,18 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) data;
switch (cmd->hdr.return_code) {
+ case IPA_RC_SUCCESS:
+ break;
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
- return 0;
+ return -EOPNOTSUPP;
default:
- if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
- CARD_DEVID(card),
- cmd->hdr.return_code);
- return 0;
- }
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
+ CARD_DEVID(card), cmd->hdr.return_code);
+ return -EIO;
}
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
@@ -3024,7 +3017,7 @@ static int qeth_query_switch_attributes_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "qswiatcb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
sw_info = (struct qeth_switch_info *)reply->param;
attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
@@ -3056,15 +3049,15 @@ int qeth_query_switch_attributes(struct qeth_card *card,
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- __u16 rc;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ u16 rc = cmd->hdr.return_code;
- cmd = (struct qeth_ipa_cmd *)data;
- rc = cmd->hdr.return_code;
- if (rc)
+ if (rc) {
QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
- else
- card->info.diagass_support = cmd->data.diagass.ext;
+ return -EIO;
+ }
+
+ card->info.diagass_support = cmd->data.diagass.ext;
return 0;
}
@@ -3111,13 +3104,13 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
static int qeth_hw_trap_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- __u16 rc;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ u16 rc = cmd->hdr.return_code;
- cmd = (struct qeth_ipa_cmd *)data;
- rc = cmd->hdr.return_code;
- if (rc)
+ if (rc) {
QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
+ return -EIO;
+ }
return 0;
}
@@ -3166,7 +3159,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card,
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
- card->stats.rx_dropped++;
+ QETH_CARD_STAT_INC(card, rx_dropped);
return 0;
} else
return 1;
@@ -3230,17 +3223,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
* 'index') un-requeued -> this buffer is the first buffer that
* will be requeued the next time
*/
- if (card->options.performance_stats) {
- card->perf_stats.inbound_do_qdio_cnt++;
- card->perf_stats.inbound_do_qdio_start_time =
- qeth_get_micros();
- }
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
queue->next_buf_to_init, count);
- if (card->options.performance_stats)
- card->perf_stats.inbound_do_qdio_time +=
- qeth_get_micros() -
- card->perf_stats.inbound_do_qdio_start_time;
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
@@ -3317,8 +3301,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
QETH_CARD_TEXT(queue->card, 6, "np->pack");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.sc_dp_p++;
+ QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 1;
}
}
@@ -3337,8 +3320,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
QETH_CARD_TEXT(queue->card, 6, "pack->np");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.sc_p_dp++;
+ QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 0;
return qeth_prep_flush_pack_buffer(queue);
}
@@ -3392,25 +3374,16 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
+ QETH_TXQ_STAT_ADD(queue, bufs, count);
netif_trans_update(queue->card->dev);
- if (queue->card->options.performance_stats) {
- queue->card->perf_stats.outbound_do_qdio_cnt++;
- queue->card->perf_stats.outbound_do_qdio_start_time =
- qeth_get_micros();
- }
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
atomic_add(count, &queue->used_buffers);
-
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.outbound_do_qdio_time +=
- qeth_get_micros() -
- queue->card->perf_stats.outbound_do_qdio_start_time;
if (rc) {
- queue->card->stats.tx_errors += count;
+ QETH_TXQ_STAT_ADD(queue, tx_errors, count);
/* ignore temporary SIGA errors without busy condition */
if (rc == -ENOBUFS)
return;
@@ -3425,8 +3398,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
qeth_schedule_recovery(queue->card);
return;
}
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.bufs_sent += count;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
@@ -3457,10 +3428,8 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
if (!flush_cnt &&
!atomic_read(&queue->set_pci_flags_count))
flush_cnt += qeth_prep_flush_pack_buffer(queue);
- if (queue->card->options.performance_stats &&
- q_was_packing)
- queue->card->perf_stats.bufs_sent_pack +=
- flush_cnt;
+ if (q_was_packing)
+ QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
if (flush_cnt)
qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3490,8 +3459,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out;
}
- if (card->state != CARD_STATE_DOWN &&
- card->state != CARD_STATE_RECOVER) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -1;
goto out;
}
@@ -3515,7 +3483,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
int rc;
if (!qeth_is_cq(card, queue))
- goto out;
+ return;
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
@@ -3524,12 +3492,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
if (qdio_err) {
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
- goto out;
- }
-
- if (card->options.performance_stats) {
- card->perf_stats.cq_cnt++;
- card->perf_stats.cq_start_time = qeth_get_micros();
+ return;
}
for (i = first_element; i < first_element + count; ++i) {
@@ -3557,16 +3520,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
}
card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
+ count) % QDIO_MAX_BUFFERS_PER_Q;
-
- netif_wake_queue(card->dev);
-
- if (card->options.performance_stats) {
- int delta_t = qeth_get_micros();
- delta_t -= card->perf_stats.cq_start_time;
- card->perf_stats.cq_time += delta_t;
- }
-out:
- return;
}
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
@@ -3602,11 +3555,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_schedule_recovery(card);
return;
}
- if (card->options.performance_stats) {
- card->perf_stats.outbound_handler_cnt++;
- card->perf_stats.outbound_handler_start_time =
- qeth_get_micros();
- }
+
for (i = first_element; i < (first_element + count); ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[bidx];
@@ -3652,9 +3601,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_check_outbound_queue(queue);
netif_wake_queue(queue->card->dev);
- if (card->options.performance_stats)
- card->perf_stats.outbound_handler_time += qeth_get_micros() -
- card->perf_stats.outbound_handler_start_time;
}
/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
@@ -3775,11 +3721,12 @@ EXPORT_SYMBOL_GPL(qeth_count_elements);
* The number of needed buffer elements is returned in @elements.
* Error to create the hdr is indicated by returning with < 0.
*/
-static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr **hdr, unsigned int hdr_len,
- unsigned int proto_len, unsigned int *elements)
+static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr **hdr,
+ unsigned int hdr_len, unsigned int proto_len,
+ unsigned int *elements)
{
- const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card);
const unsigned int contiguous = proto_len ? proto_len : 1;
unsigned int __elements;
addr_t start, end;
@@ -3818,15 +3765,12 @@ check_layout:
}
rc = skb_linearize(skb);
- if (card->options.performance_stats) {
- if (rc)
- card->perf_stats.tx_linfail++;
- else
- card->perf_stats.tx_lin++;
- }
- if (rc)
+ if (rc) {
+ QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
return rc;
+ }
+ QETH_TXQ_STAT_INC(queue, skbs_linearized);
/* Linearization changed the layout, re-evaluate: */
goto check_layout;
}
@@ -3928,7 +3872,6 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
{
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
- int flush_cnt = 0;
__skb_queue_tail(&buf->skb_list, skb);
@@ -3949,24 +3892,21 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
if (!queue->do_pack) {
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
- /* set state to PRIMED -> will be flushed */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt = 1;
} else {
QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
- if (queue->card->options.performance_stats)
- queue->card->perf_stats.skbs_sent_pack++;
- if (buf->next_element_to_fill >=
- QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
- /*
- * packed buffer if full -> set state PRIMED
- * -> will be flushed
- */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- flush_cnt = 1;
- }
+
+ QETH_TXQ_STAT_INC(queue, skbs_pack);
+ /* If the buffer still has free elements, keep using it. */
+ if (buf->next_element_to_fill <
+ QETH_MAX_BUFFER_ELEMENTS(queue->card))
+ return 0;
}
- return flush_cnt;
+
+ /* flush out the buffer */
+ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ return 1;
}
static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
@@ -3982,7 +3922,6 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1);
return 0;
@@ -4040,10 +3979,9 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
}
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
- queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
- QDIO_MAX_BUFFERS_PER_Q;
- flush_count += tmp;
+
+ flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
+ hd_len);
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -4071,8 +4009,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
out:
/* at this point the queue is UNLOCKED again */
- if (queue->card->options.performance_stats && do_pack)
- queue->card->perf_stats.bufs_sent_pack += flush_count;
+ if (do_pack)
+ QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
return rc;
}
@@ -4096,8 +4034,9 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
- void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
+ void (*fill_header)(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type,
unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
@@ -4122,7 +4061,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
if (rc)
return rc;
- push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
+ push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
&elements);
if (push_len < 0)
return push_len;
@@ -4132,7 +4071,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
- fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+ fill_header(queue, hdr, skb, ipv, cast_type, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
@@ -4149,14 +4088,12 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
}
if (!rc) {
- if (card->options.performance_stats) {
- card->perf_stats.buf_elements_sent += elements;
- if (is_sg)
- card->perf_stats.sg_skbs_sent++;
- if (is_tso) {
- card->perf_stats.large_send_bytes += frame_len;
- card->perf_stats.large_send_cnt++;
- }
+ QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
+ if (is_sg)
+ QETH_TXQ_STAT_INC(queue, skbs_sg);
+ if (is_tso) {
+ QETH_TXQ_STAT_INC(queue, skbs_tso);
+ QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
}
} else {
if (!push_len)
@@ -4183,7 +4120,7 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
- return 0;
+ return (cmd->hdr.return_code) ? -EIO : 0;
}
void qeth_setadp_promisc_mode(struct qeth_card *card)
@@ -4215,18 +4152,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
-struct net_device_stats *qeth_get_stats(struct net_device *dev)
-{
- struct qeth_card *card;
-
- card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "getstat");
-
- return &card->stats;
-}
-EXPORT_SYMBOL_GPL(qeth_get_stats);
-
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -4235,12 +4160,15 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "chgmaccb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
adp_cmd = &cmd->data.setadapterparms;
+ if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
+ return -EADDRNOTAVAIL;
+
if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
!(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
- return 0;
+ return -EADDRNOTAVAIL;
ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
return 0;
@@ -4279,7 +4207,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setaccb");
if (cmd->hdr.return_code)
- return 0;
+ return -EIO;
qeth_setadpparms_inspect_rc(cmd);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
@@ -4353,7 +4281,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
card->options.isolation = card->options.prev_isolation;
break;
}
- return 0;
+ return (cmd->hdr.return_code) ? -EIO : 0;
}
static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
@@ -4417,7 +4345,7 @@ void qeth_tx_timeout(struct net_device *dev)
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
- card->stats.tx_errors++;
+ QETH_CARD_STAT_INC(card, tx_errors);
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
@@ -4487,27 +4415,6 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
return rc;
}
-static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, int len,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
- void *reply_param)
-{
- u16 s1, s2;
-
- QETH_CARD_TEXT(card, 4, "sendsnmp");
-
- /* adjust PDU length fields in IPA_PDU_HEADER */
- s1 = (u32) IPA_PDU_HEADER_SIZE + len;
- s2 = (u32) len;
- memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
- return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
- reply_cb, reply_param);
-}
-
static int qeth_snmp_command_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long sdata)
{
@@ -4525,13 +4432,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
- return 0;
+ return -EIO;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
- return 0;
+ return -EIO;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
@@ -4546,9 +4453,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
- QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
- cmd->hdr.return_code = IPA_RC_ENOMEM;
- return 0;
+ QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
+ return -ENOSPC;
}
QETH_CARD_TEXT_(card, 4, "snore%i",
cmd->data.setadapterparms.hdr.used_total);
@@ -4613,10 +4519,13 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
rc = -ENOMEM;
goto out;
}
+
+ /* for large requests, fix-up the length fields: */
+ qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len);
+
cmd = __ipa_cmd(iob);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
- rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
- qeth_snmp_command_cb, (void *)&qinfo);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
CARD_DEVID(card), rc);
@@ -4640,16 +4549,14 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 3, "qoatcb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
priv = (struct qeth_qoat_priv *)reply->param;
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
resdata = (char *)data + 28;
- if (resdatalen > (priv->buffer_len - priv->response_len)) {
- cmd->hdr.return_code = IPA_RC_FFFF;
- return 0;
- }
+ if (resdatalen > (priv->buffer_len - priv->response_len))
+ return -ENOSPC;
memcpy((priv->buffer + priv->response_len), resdata,
resdatalen);
@@ -4722,9 +4629,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, &oat_data,
sizeof(struct qeth_query_oat_data)))
rc = -EFAULT;
- } else
- if (rc == IPA_RC_FFFF)
- rc = -EFAULT;
+ }
out_free:
vfree(priv.buffer);
@@ -4741,7 +4646,7 @@ static int qeth_query_card_info_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "qcrdincb");
if (qeth_setadpparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
card_info = &cmd->data.setadapterparms.data.card_info;
carrier_info->card_type = card_info->card_type;
@@ -4750,8 +4655,8 @@ static int qeth_query_card_info_cb(struct qeth_card *card,
return 0;
}
-static int qeth_query_card_info(struct qeth_card *card,
- struct carrier_info *carrier_info)
+int qeth_query_card_info(struct qeth_card *card,
+ struct carrier_info *carrier_info)
{
struct qeth_cmd_buffer *iob;
@@ -4979,8 +4884,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.output_handler = qeth_qdio_output_handler;
init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
- init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
- init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
+ init_data.input_sbal_addr_array = in_sbal_ptrs;
+ init_data.output_sbal_addr_array = out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
init_data.scan_threshold =
(card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
@@ -5028,6 +4933,7 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
qeth_clean_channel(&card->data);
+ destroy_workqueue(card->event_wq);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
@@ -5142,25 +5048,16 @@ retriable:
rc = qeth_send_startlan(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
- if (rc == IPA_RC_LAN_OFFLINE) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
+ if (rc == -ENETDOWN) {
+ dev_warn(&card->gdev->dev, "The LAN is offline\n");
*carrier_ok = false;
} else {
- rc = -ENODEV;
goto out;
}
} else {
*carrier_ok = true;
}
- if (qeth_netdev_is_registered(card->dev)) {
- if (*carrier_ok)
- netif_carrier_on(card->dev);
- else
- netif_carrier_off(card->dev);
- }
-
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5306,7 +5203,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
dev_kfree_skb_any(skb);
- card->stats.rx_errors++;
+ QETH_CARD_STAT_INC(card, rx_errors);
return NULL;
}
element++;
@@ -5318,16 +5215,17 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
}
*__element = element;
*__offset = offset;
- if (use_rx_sg && card->options.performance_stats) {
- card->perf_stats.sg_skbs_rx++;
- card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
+ if (use_rx_sg) {
+ QETH_CARD_STAT_INC(card, rx_sg_skbs);
+ QETH_CARD_STAT_ADD(card, rx_sg_frags,
+ skb_shinfo(skb)->nr_frags);
}
return skb;
no_mem:
if (net_ratelimit()) {
QETH_CARD_TEXT(card, 2, "noskbmem");
}
- card->stats.rx_dropped++;
+ QETH_CARD_STAT_INC(card, rx_dropped);
return NULL;
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
@@ -5340,11 +5238,6 @@ int qeth_poll(struct napi_struct *napi, int budget)
int done;
int new_budget = budget;
- if (card->options.performance_stats) {
- card->perf_stats.inbound_cnt++;
- card->perf_stats.inbound_start_time = qeth_get_micros();
- }
-
while (1) {
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
@@ -5373,8 +5266,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
done = 1;
if (done) {
- if (card->options.performance_stats)
- card->perf_stats.bufs_rec++;
+ QETH_CARD_STAT_INC(card, rx_bufs);
qeth_put_buffer_pool_entry(card,
buffer->pool_entry);
qeth_queue_input_buffer(card, card->rx.b_index);
@@ -5402,9 +5294,6 @@ int qeth_poll(struct napi_struct *napi, int budget)
if (qdio_start_irq(card->data.ccwdev, 0))
napi_schedule(&card->napi);
out:
- if (card->options.performance_stats)
- card->perf_stats.inbound_time += qeth_get_micros() -
- card->perf_stats.inbound_start_time;
return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);
@@ -5424,7 +5313,7 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
struct qeth_ipa_caps *caps = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
caps->supported = cmd->data.setassparms.data.caps.supported;
caps->enabled = cmd->data.setassparms.data.caps.enabled;
@@ -5434,18 +5323,18 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 4, "defadpcb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0) {
- cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
- if (cmd->hdr.prot_version == QETH_PROT_IPV4)
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- if (cmd->hdr.prot_version == QETH_PROT_IPV6)
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- }
+ if (cmd->hdr.return_code)
+ return -EIO;
+
+ cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+ if (cmd->hdr.prot_version == QETH_PROT_IPV4)
+ card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+ if (cmd->hdr.prot_version == QETH_PROT_IPV6)
+ card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -5691,7 +5580,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- if (!IS_OSN(card)) {
+ if (IS_OSN(card)) {
+ dev->ethtool_ops = &qeth_osn_ethtool_ops;
+ } else {
+ dev->ethtool_ops = &qeth_ethtool_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
@@ -5937,12 +5829,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!card)
return -ENODEV;
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
-
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return -EPERM;
-
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
@@ -5982,454 +5868,91 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
EXPORT_SYMBOL_GPL(qeth_do_ioctl);
-static struct {
- const char str[ETH_GSTRING_LEN];
-} qeth_ethtool_stats_keys[] = {
-/* 0 */{"rx skbs"},
- {"rx buffers"},
- {"tx skbs"},
- {"tx buffers"},
- {"tx skbs no packing"},
- {"tx buffers no packing"},
- {"tx skbs packing"},
- {"tx buffers packing"},
- {"tx sg skbs"},
- {"tx buffer elements"},
-/* 10 */{"rx sg skbs"},
- {"rx sg frags"},
- {"rx sg page allocs"},
- {"tx large kbytes"},
- {"tx large count"},
- {"tx pk state ch n->p"},
- {"tx pk state ch p->n"},
- {"tx pk watermark low"},
- {"tx pk watermark high"},
- {"queue 0 buffer usage"},
-/* 20 */{"queue 1 buffer usage"},
- {"queue 2 buffer usage"},
- {"queue 3 buffer usage"},
- {"rx poll time"},
- {"rx poll count"},
- {"rx do_QDIO time"},
- {"rx do_QDIO count"},
- {"tx handler time"},
- {"tx handler count"},
- {"tx time"},
-/* 30 */{"tx count"},
- {"tx do_QDIO time"},
- {"tx do_QDIO count"},
- {"tx csum"},
- {"tx lin"},
- {"tx linfail"},
- {"cq handler count"},
- {"cq handler time"},
- {"rx csum"}
-};
-
-int qeth_core_get_sset_count(struct net_device *dev, int stringset)
-{
- switch (stringset) {
- case ETH_SS_STATS:
- return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
-
-void qeth_core_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct qeth_card *card = dev->ml_priv;
- data[0] = card->stats.rx_packets -
- card->perf_stats.initial_rx_packets;
- data[1] = card->perf_stats.bufs_rec;
- data[2] = card->stats.tx_packets -
- card->perf_stats.initial_tx_packets;
- data[3] = card->perf_stats.bufs_sent;
- data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
- - card->perf_stats.skbs_sent_pack;
- data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
- data[6] = card->perf_stats.skbs_sent_pack;
- data[7] = card->perf_stats.bufs_sent_pack;
- data[8] = card->perf_stats.sg_skbs_sent;
- data[9] = card->perf_stats.buf_elements_sent;
- data[10] = card->perf_stats.sg_skbs_rx;
- data[11] = card->perf_stats.sg_frags_rx;
- data[12] = card->perf_stats.sg_alloc_page_rx;
- data[13] = (card->perf_stats.large_send_bytes >> 10);
- data[14] = card->perf_stats.large_send_cnt;
- data[15] = card->perf_stats.sc_dp_p;
- data[16] = card->perf_stats.sc_p_dp;
- data[17] = QETH_LOW_WATERMARK_PACK;
- data[18] = QETH_HIGH_WATERMARK_PACK;
- data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
- data[20] = (card->qdio.no_out_queues > 1) ?
- atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
- data[21] = (card->qdio.no_out_queues > 2) ?
- atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
- data[22] = (card->qdio.no_out_queues > 3) ?
- atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
- data[23] = card->perf_stats.inbound_time;
- data[24] = card->perf_stats.inbound_cnt;
- data[25] = card->perf_stats.inbound_do_qdio_time;
- data[26] = card->perf_stats.inbound_do_qdio_cnt;
- data[27] = card->perf_stats.outbound_handler_time;
- data[28] = card->perf_stats.outbound_handler_cnt;
- data[29] = card->perf_stats.outbound_time;
- data[30] = card->perf_stats.outbound_cnt;
- data[31] = card->perf_stats.outbound_do_qdio_time;
- data[32] = card->perf_stats.outbound_do_qdio_cnt;
- data[33] = card->perf_stats.tx_csum;
- data[34] = card->perf_stats.tx_lin;
- data[35] = card->perf_stats.tx_linfail;
- data[36] = card->perf_stats.cq_cnt;
- data[37] = card->perf_stats.cq_time;
- data[38] = card->perf_stats.rx_csum;
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
-
-void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- switch (stringset) {
- case ETH_SS_STATS:
- memcpy(data, &qeth_ethtool_stats_keys,
- sizeof(qeth_ethtool_stats_keys));
- break;
- default:
- WARN_ON(1);
- break;
- }
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_strings);
-
-void qeth_core_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct qeth_card *card = dev->ml_priv;
-
- strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
- sizeof(info->driver));
- strlcpy(info->version, "1.0", sizeof(info->version));
- strlcpy(info->fw_version, card->info.mcl_level,
- sizeof(info->fw_version));
- snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
- CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
-}
-EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
-
-/* Helper function to fill 'advertising' and 'supported' which are the same. */
-/* Autoneg and full-duplex are supported and advertised unconditionally. */
-/* Always advertise and support all speeds up to specified, and only one */
-/* specified port type. */
-static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
- int maxspeed, int porttype)
-{
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
-
- ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
-
- switch (porttype) {
- case PORT_TP:
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- break;
- case PORT_FIBRE:
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
- break;
- default:
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- WARN_ON_ONCE(1);
- }
-
- /* partially does fall through, to also select lower speeds */
- switch (maxspeed) {
- case SPEED_25000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 25000baseSR_Full);
- break;
- case SPEED_10000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10000baseT_Full);
- case SPEED_1000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Half);
- case SPEED_100:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Half);
- case SPEED_10:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
- /* end fallthrough */
- break;
- default:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
- WARN_ON_ONCE(1);
- }
-}
-
-int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct qeth_card *card = netdev->ml_priv;
- enum qeth_link_types link_type;
- struct carrier_info carrier_info;
- int rc;
-
- if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
- link_type = QETH_LINK_TYPE_10GBIT_ETH;
- else
- link_type = card->info.link_type;
-
- cmd->base.duplex = DUPLEX_FULL;
- cmd->base.autoneg = AUTONEG_ENABLE;
- cmd->base.phy_address = 0;
- cmd->base.mdio_support = 0;
- cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
- cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
-
- switch (link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- cmd->base.speed = SPEED_100;
- cmd->base.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- cmd->base.speed = SPEED_1000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- cmd->base.speed = SPEED_10000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- cmd->base.speed = SPEED_25000;
- cmd->base.port = PORT_FIBRE;
- break;
- default:
- cmd->base.speed = SPEED_10;
- cmd->base.port = PORT_TP;
- }
- qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
-
- /* Check if we can obtain more accurate information. */
- /* If QUERY_CARD_INFO command is not supported or fails, */
- /* just return the heuristics that was filled above. */
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
- rc = qeth_query_card_info(card, &carrier_info);
- if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
- return 0;
- if (rc) /* report error from the hardware operation */
- return rc;
- /* on success, fill in the information got from the hardware */
-
- netdev_dbg(netdev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- carrier_info.card_type,
- carrier_info.port_mode,
- carrier_info.port_speed);
-
- /* Update attributes for which we've obtained more authoritative */
- /* information, leave the rest the way they where filled above. */
- switch (carrier_info.card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- cmd->base.port = PORT_TP;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
- break;
- }
-
- switch (carrier_info.port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- cmd->base.duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- cmd->base.duplex = DUPLEX_HALF;
- break;
- }
-
- switch (carrier_info.port_speed) {
- case CARD_INFO_PORTS_10M:
- cmd->base.speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- cmd->base.speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- cmd->base.speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- cmd->base.speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- cmd->base.speed = SPEED_25000;
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings);
-
-/* Callback to handle checksum offload command reply from OSA card.
- * Verify that required features have been enabled on the card.
- * Return error in hdr->return_code as this value is checked by caller.
- *
- * Always returns zero to indicate no further messages from the OSA card.
- */
-static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- struct qeth_checksum_cmd *chksum_cb =
- (struct qeth_checksum_cmd *)reply->param;
+ u32 *features = reply->param;
- QETH_CARD_TEXT(card, 4, "chkdoccb");
if (qeth_setassparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
- memset(chksum_cb, 0, sizeof(*chksum_cb));
- if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- chksum_cb->supported =
- cmd->data.setassparms.data.chksum.supported;
- QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
- }
- if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
- chksum_cb->supported =
- cmd->data.setassparms.data.chksum.supported;
- chksum_cb->enabled =
- cmd->data.setassparms.data.chksum.enabled;
- QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
- QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
- }
+ *features = cmd->data.setassparms.data.flags_32bit;
return 0;
}
-/* Send command to OSA card and check results. */
-static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
- enum qeth_ipa_funcs ipa_func,
- __u16 cmd_code, long data,
- struct qeth_checksum_cmd *chksum_cb,
- enum qeth_prot_versions prot)
+static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
+ enum qeth_prot_versions prot)
{
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 4, "chkdocmd");
- iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- sizeof(__u32), prot);
- if (!iob)
- return -ENOMEM;
-
- __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
- return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb,
- chksum_cb);
+ return qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0, prot);
}
-static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
- enum qeth_prot_versions prot)
+static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
+ enum qeth_prot_versions prot)
{
u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
- struct qeth_checksum_cmd chksum_cb;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_caps caps;
+ u32 features;
int rc;
- if (prot == QETH_PROT_IPV4)
+ /* some L3 HW requires combined L3+L4 csum offload: */
+ if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
+ cstype == IPA_OUTBOUND_CHECKSUM)
required_features |= QETH_IPA_CHECKSUM_IP_HDR;
- rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
- &chksum_cb, prot);
- if (!rc) {
- if ((required_features & chksum_cb.supported) !=
- required_features)
- rc = -EIO;
- else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
- cstype == IPA_INBOUND_CHECKSUM)
- dev_warn(&card->gdev->dev,
- "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
- QETH_CARD_IFNAME(card));
- }
- if (rc) {
- qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0, prot);
- dev_warn(&card->gdev->dev,
- "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
- prot, QETH_CARD_IFNAME(card));
+
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
+ prot);
+ if (!iob)
+ return -ENOMEM;
+
+ rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
+ if (rc)
return rc;
+
+ if ((required_features & features) != required_features) {
+ qeth_set_csum_off(card, cstype, prot);
+ return -EOPNOTSUPP;
}
- rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
- chksum_cb.supported, &chksum_cb,
+
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4,
prot);
- if (!rc) {
- if ((required_features & chksum_cb.enabled) !=
- required_features)
- rc = -EIO;
+ if (!iob) {
+ qeth_set_csum_off(card, cstype, prot);
+ return -ENOMEM;
}
+
+ if (features & QETH_IPA_CHECKSUM_LP2LP)
+ required_features |= QETH_IPA_CHECKSUM_LP2LP;
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
if (rc) {
- qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0, prot);
- dev_warn(&card->gdev->dev,
- "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
- prot, QETH_CARD_IFNAME(card));
+ qeth_set_csum_off(card, cstype, prot);
return rc;
}
+ if (!qeth_ipa_caps_supported(&caps, required_features) ||
+ !qeth_ipa_caps_enabled(&caps, required_features)) {
+ qeth_set_csum_off(card, cstype, prot);
+ return -EOPNOTSUPP;
+ }
+
dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
+ if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
+ cstype == IPA_OUTBOUND_CHECKSUM)
+ dev_warn(&card->gdev->dev,
+ "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
+ QETH_CARD_IFNAME(card));
return 0;
}
static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
enum qeth_prot_versions prot)
{
- int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
- : qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0,
- prot);
- return rc ? -EIO : 0;
+ return on ? qeth_set_csum_on(card, cstype, prot) :
+ qeth_set_csum_off(card, cstype, prot);
}
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -6439,7 +5962,7 @@ static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
struct qeth_tso_start_data *tso_data = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
- return 0;
+ return -EIO;
tso_data->mss = cmd->data.setassparms.data.tso.mss;
tso_data->supported = cmd->data.setassparms.data.tso.supported;
@@ -6505,10 +6028,7 @@ static int qeth_set_tso_on(struct qeth_card *card,
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
enum qeth_prot_versions prot)
{
- int rc = on ? qeth_set_tso_on(card, prot) :
- qeth_set_tso_off(card, prot);
-
- return rc ? -EIO : 0;
+ return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
}
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
@@ -6534,8 +6054,6 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
}
-#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
- NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
/**
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
* @dev: a net_device
@@ -6545,17 +6063,20 @@ void qeth_enable_hw_features(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
netdev_features_t features;
- rtnl_lock();
features = dev->features;
- /* force-off any feature that needs an IPA sequence.
+ /* force-off any feature that might need an IPA sequence.
* netdev_update_features() will restart them.
*/
- dev->features &= ~QETH_HW_FEATURES;
+ dev->features &= ~dev->hw_features;
+ /* toggle VLAN filter, so that VIDs are re-programmed: */
+ if (IS_LAYER2(card) && IS_VM_NIC(card)) {
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
netdev_update_features(dev);
if (features != dev->features)
dev_warn(&card->gdev->dev,
"Device recovery failed to restore all offload features\n");
- rtnl_unlock();
}
EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
@@ -6624,10 +6145,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
features &= ~NETIF_F_TSO;
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO6;
- /* if the card isn't up, remove features that require hw changes */
- if (card->state == CARD_STATE_DOWN ||
- card->state == CARD_STATE_RECOVER)
- features &= ~QETH_HW_FEATURES;
+
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
return features;
}
@@ -6659,18 +6177,70 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(qeth_features_check);
+void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ QETH_CARD_TEXT(card, 5, "getstat");
+
+ stats->rx_packets = card->stats.rx_packets;
+ stats->rx_bytes = card->stats.rx_bytes;
+ stats->rx_errors = card->stats.rx_errors;
+ stats->rx_dropped = card->stats.rx_dropped;
+ stats->multicast = card->stats.rx_multicast;
+ stats->tx_errors = card->stats.tx_errors;
+
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ queue = card->qdio.out_qs[i];
+
+ stats->tx_packets += queue->stats.tx_packets;
+ stats->tx_bytes += queue->stats.tx_bytes;
+ stats->tx_errors += queue->stats.tx_errors;
+ stats->tx_dropped += queue->stats.tx_dropped;
+ }
+}
+EXPORT_SYMBOL_GPL(qeth_get_stats64);
+
+int qeth_open(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 4, "qethopen");
+
+ if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
+ return -EIO;
+
+ card->data.state = CH_STATE_UP;
+ netif_start_queue(dev);
+
+ napi_enable(&card->napi);
+ local_bh_disable();
+ napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_open);
+
+int qeth_stop(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 4, "qethstop");
+ netif_tx_disable(dev);
+ napi_disable(&card->napi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_stop);
+
static int __init qeth_core_init(void)
{
int rc;
pr_info("loading core functions\n");
- qeth_wq = create_singlethread_workqueue("qeth_wq");
- if (!qeth_wq) {
- rc = -ENOMEM;
- goto out_err;
- }
-
rc = qeth_register_dbf_views();
if (rc)
goto dbf_err;
@@ -6712,8 +6282,6 @@ slab_err:
register_err:
qeth_unregister_dbf_views();
dbf_err:
- destroy_workqueue(qeth_wq);
-out_err:
pr_err("Initializing the qeth device driver failed\n");
return rc;
}
@@ -6721,7 +6289,6 @@ out_err:
static void __exit qeth_core_exit(void)
{
qeth_clear_dbf_list();
- destroy_workqueue(qeth_wq);
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccw_driver_unregister(&qeth_ccw_driver);
kmem_cache_destroy(qeth_qdio_outbuf_cache);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 16fc51ad0514..e3f4866c158e 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -125,24 +125,13 @@ unsigned char DM_ACT[] = {
unsigned char IPA_PDU_HEADER[] = {
0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
- 0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
- (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
- (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
+ 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00,
0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x24,
- sizeof(struct qeth_ipa_cmd) / 256,
- sizeof(struct qeth_ipa_cmd) % 256,
- 0x00,
- sizeof(struct qeth_ipa_cmd) / 256,
- sizeof(struct qeth_ipa_cmd) % 256,
- 0x05,
- 0x77, 0x77, 0x77, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x05, 0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00,
- sizeof(struct qeth_ipa_cmd) / 256,
- sizeof(struct qeth_ipa_cmd) % 256,
- 0x00, 0x00, 0x00, 0x40,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
};
struct ipa_rc_msg {
@@ -212,12 +201,10 @@ static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
{IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
- {IPA_RC_ENOMEM, "Memory problem"},
+ /* default for qeth_get_ipa_msg(): */
{IPA_RC_FFFF, "Unknown Error"}
};
-
-
const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
{
int x;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1ab321926f64..f8c5d4a9be13 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -21,8 +21,6 @@
extern unsigned char IPA_PDU_HEADER[];
#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
-#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
-
#define QETH_SEQ_NO_LENGTH 4
#define QETH_MPC_TOKEN_LENGTH 4
#define QETH_MCL_LENGTH 4
@@ -81,7 +79,9 @@ enum qeth_card_types {
#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
+#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
+#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
#define IS_VM_NIC(card) ((card)->info.guestlan)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -230,7 +230,6 @@ enum qeth_ipa_return_codes {
IPA_RC_LAN_OFFLINE = 0xe080,
IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090,
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
- IPA_RC_ENOMEM = 0xfffe,
IPA_RC_FFFF = 0xffff
};
/* for VNIC Characteristics */
@@ -418,12 +417,6 @@ enum qeth_ipa_checksum_bits {
QETH_IPA_CHECKSUM_LP2LP = 0x0020
};
-/* IPA Assist checksum offload reply layout. */
-struct qeth_checksum_cmd {
- __u32 supported;
- __u32 enabled;
-} __packed;
-
enum qeth_ipa_large_send_caps {
QETH_IPA_LARGE_SEND_TCP = 0x00000001,
};
@@ -439,7 +432,6 @@ struct qeth_ipacmd_setassparms {
union {
__u32 flags_32bit;
struct qeth_ipa_caps caps;
- struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
@@ -833,15 +825,10 @@ enum qeth_ipa_arp_return_codes {
extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
-#define QETH_SETASS_BASE_LEN (IPA_PDU_HEADER_SIZE + \
- sizeof(struct qeth_ipacmd_hdr) + \
- sizeof(struct qeth_ipacmd_setassparms_hdr))
#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setadpparms_hdr))
#define QETH_SNMP_SETADP_CMDLENGTH 16
-#define QETH_ARP_DATA_SIZE 3968
-#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
/* Helper functions */
#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
(cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 30f61608fa22..56deeb6f7bc0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -29,13 +29,11 @@ static ssize_t qeth_dev_state_show(struct device *dev,
case CARD_STATE_HARDSETUP:
return sprintf(buf, "HARDSETUP\n");
case CARD_STATE_SOFTSETUP:
+ if (card->dev->flags & IFF_UP)
+ return sprintf(buf, "UP (LAN %s)\n",
+ netif_carrier_ok(card->dev) ? "ONLINE" :
+ "OFFLINE");
return sprintf(buf, "SOFTSETUP\n");
- case CARD_STATE_UP:
- return sprintf(buf, "UP (LAN %s)\n",
- netif_carrier_ok(card->dev) ? "ONLINE" :
- "OFFLINE");
- case CARD_STATE_RECOVER:
- return sprintf(buf, "RECOVER\n");
default:
return sprintf(buf, "UNKNOWN\n");
}
@@ -126,8 +124,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -202,8 +199,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -285,8 +281,7 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -316,7 +311,7 @@ static ssize_t qeth_dev_recover_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->state != CARD_STATE_UP)
+ if (!qeth_card_hw_is_reachable(card))
return -EPERM;
i = simple_strtoul(buf, &tmp, 16);
@@ -336,35 +331,36 @@ static ssize_t qeth_dev_performance_stats_show(struct device *dev,
if (!card)
return -EINVAL;
- return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
+ return sprintf(buf, "1\n");
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i, rc = 0;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+ bool reset;
+ int rc;
if (!card)
return -EINVAL;
- mutex_lock(&card->conf_mutex);
- i = simple_strtoul(buf, &tmp, 16);
- if ((i == 0) || (i == 1)) {
- if (i == card->options.performance_stats)
- goto out;
- card->options.performance_stats = i;
- if (i == 0)
- memset(&card->perf_stats, 0,
- sizeof(struct qeth_perf_stats));
- card->perf_stats.initial_rx_packets = card->stats.rx_packets;
- card->perf_stats.initial_tx_packets = card->stats.tx_packets;
- } else
- rc = -EINVAL;
-out:
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
+ rc = kstrtobool(buf, &reset);
+ if (rc)
+ return rc;
+
+ if (reset) {
+ memset(&card->stats, 0, sizeof(card->stats));
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ queue = card->qdio.out_qs[i];
+ if (!queue)
+ break;
+ memset(&queue->stats, 0, sizeof(queue->stats));
+ }
+ }
+
+ return count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
@@ -420,7 +416,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
}
- card->info.mac_bits = 0;
if (card->discipline) {
/* start with a new, pristine netdevice: */
ndev = qeth_clone_netdev(card->dev);
@@ -633,8 +628,7 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
new file mode 100644
index 000000000000..93a53fed4cf8
--- /dev/null
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2018
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/ethtool.h>
+#include "qeth_core.h"
+
+
+#define QETH_TXQ_STAT(_name, _stat) { \
+ .name = _name, \
+ .offset = offsetof(struct qeth_out_q_stats, _stat) \
+}
+
+#define QETH_CARD_STAT(_name, _stat) { \
+ .name = _name, \
+ .offset = offsetof(struct qeth_card_stats, _stat) \
+}
+
+struct qeth_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned int offset;
+};
+
+static const struct qeth_stats txq_stats[] = {
+ QETH_TXQ_STAT("IO buffers", bufs),
+ QETH_TXQ_STAT("IO buffer elements", buf_elements),
+ QETH_TXQ_STAT("packed IO buffers", bufs_pack),
+ QETH_TXQ_STAT("skbs", tx_packets),
+ QETH_TXQ_STAT("packed skbs", skbs_pack),
+ QETH_TXQ_STAT("SG skbs", skbs_sg),
+ QETH_TXQ_STAT("HW csum skbs", skbs_csum),
+ QETH_TXQ_STAT("TSO skbs", skbs_tso),
+ QETH_TXQ_STAT("linearized skbs", skbs_linearized),
+ QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
+ QETH_TXQ_STAT("TSO bytes", tso_bytes),
+ QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
+};
+
+static const struct qeth_stats card_stats[] = {
+ QETH_CARD_STAT("rx0 IO buffers", rx_bufs),
+ QETH_CARD_STAT("rx0 HW csum skbs", rx_skb_csum),
+ QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
+ QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
+ QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
+};
+
+#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
+#define CARD_STATS_LEN ARRAY_SIZE(card_stats)
+
+static void qeth_add_stat_data(u64 **dst, void *src,
+ const struct qeth_stats stats[],
+ unsigned int size)
+{
+ unsigned int i;
+ char *stat;
+
+ for (i = 0; i < size; i++) {
+ stat = (char *)src + stats[i].offset;
+ **dst = *(u64 *)stat;
+ (*dst)++;
+ }
+}
+
+static void qeth_add_stat_strings(u8 **data, const char *prefix,
+ const struct qeth_stats stats[],
+ unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name);
+ *data += ETH_GSTRING_LEN;
+ }
+}
+
+static int qeth_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return CARD_STATS_LEN +
+ card->qdio.no_out_queues * TXQ_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void qeth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct qeth_card *card = dev->ml_priv;
+ unsigned int i;
+
+ qeth_add_stat_data(&data, &card->stats, card_stats, CARD_STATS_LEN);
+ for (i = 0; i < card->qdio.no_out_queues; i++)
+ qeth_add_stat_data(&data, &card->qdio.out_qs[i]->stats,
+ txq_stats, TXQ_STATS_LEN);
+}
+
+static void qeth_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ param->rx_max_pending = QDIO_MAX_BUFFERS_PER_Q;
+ param->rx_mini_max_pending = 0;
+ param->rx_jumbo_max_pending = 0;
+ param->tx_max_pending = QDIO_MAX_BUFFERS_PER_Q;
+
+ param->rx_pending = card->qdio.in_buf_pool.buf_count;
+ param->rx_mini_pending = 0;
+ param->rx_jumbo_pending = 0;
+ param->tx_pending = QDIO_MAX_BUFFERS_PER_Q;
+}
+
+static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct qeth_card *card = dev->ml_priv;
+ char prefix[ETH_GSTRING_LEN] = "";
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ qeth_add_stat_strings(&data, prefix, card_stats,
+ CARD_STATS_LEN);
+ for (i = 0; i < card->qdio.no_out_queues; i++) {
+ snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
+ qeth_add_stat_strings(&data, prefix, txq_stats,
+ TXQ_STATS_LEN);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void qeth_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
+ sizeof(info->driver));
+ strlcpy(info->version, "1.0", sizeof(info->version));
+ strlcpy(info->fw_version, card->info.mcl_level,
+ sizeof(info->fw_version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
+ CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
+}
+
+/* Helper function to fill 'advertising' and 'supported' which are the same. */
+/* Autoneg and full-duplex are supported and advertised unconditionally. */
+/* Always advertise and support all speeds up to specified, and only one */
+/* specified port type. */
+static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
+ int maxspeed, int porttype)
+{
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+
+ switch (porttype) {
+ case PORT_TP:
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ break;
+ case PORT_FIBRE:
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ break;
+ default:
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ WARN_ON_ONCE(1);
+ }
+
+ /* partially does fall through, to also select lower speeds */
+ switch (maxspeed) {
+ case SPEED_25000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 25000baseSR_Full);
+ break;
+ case SPEED_10000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseT_Full);
+ /* fall through */
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Half);
+ /* fall through */
+ case SPEED_100:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Half);
+ /* fall through */
+ case SPEED_10:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Half);
+ break;
+ default:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Half);
+ WARN_ON_ONCE(1);
+ }
+}
+
+
+static int qeth_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct qeth_card *card = netdev->ml_priv;
+ enum qeth_link_types link_type;
+ struct carrier_info carrier_info;
+ int rc;
+
+ if (IS_IQD(card) || IS_VM_NIC(card))
+ link_type = QETH_LINK_TYPE_10GBIT_ETH;
+ else
+ link_type = card->info.link_type;
+
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.phy_address = 0;
+ cmd->base.mdio_support = 0;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
+ switch (link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ cmd->base.speed = SPEED_100;
+ cmd->base.port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ cmd->base.speed = SPEED_1000;
+ cmd->base.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ cmd->base.speed = SPEED_10000;
+ cmd->base.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ cmd->base.speed = SPEED_25000;
+ cmd->base.port = PORT_FIBRE;
+ break;
+ default:
+ cmd->base.speed = SPEED_10;
+ cmd->base.port = PORT_TP;
+ }
+ qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
+
+ /* Check if we can obtain more accurate information. */
+ /* If QUERY_CARD_INFO command is not supported or fails, */
+ /* just return the heuristics that was filled above. */
+ rc = qeth_query_card_info(card, &carrier_info);
+ if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
+ return 0;
+ if (rc) /* report error from the hardware operation */
+ return rc;
+ /* on success, fill in the information got from the hardware */
+
+ netdev_dbg(netdev,
+ "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+ carrier_info.card_type,
+ carrier_info.port_mode,
+ carrier_info.port_speed);
+
+ /* Update attributes for which we've obtained more authoritative */
+ /* information, leave the rest the way they where filled above. */
+ switch (carrier_info.card_type) {
+ case CARD_INFO_TYPE_1G_COPPER_A:
+ case CARD_INFO_TYPE_1G_COPPER_B:
+ cmd->base.port = PORT_TP;
+ qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
+ break;
+ case CARD_INFO_TYPE_1G_FIBRE_A:
+ case CARD_INFO_TYPE_1G_FIBRE_B:
+ cmd->base.port = PORT_FIBRE;
+ qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
+ break;
+ case CARD_INFO_TYPE_10G_FIBRE_A:
+ case CARD_INFO_TYPE_10G_FIBRE_B:
+ cmd->base.port = PORT_FIBRE;
+ qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
+ break;
+ }
+
+ switch (carrier_info.port_mode) {
+ case CARD_INFO_PORTM_FULLDUPLEX:
+ cmd->base.duplex = DUPLEX_FULL;
+ break;
+ case CARD_INFO_PORTM_HALFDUPLEX:
+ cmd->base.duplex = DUPLEX_HALF;
+ break;
+ }
+
+ switch (carrier_info.port_speed) {
+ case CARD_INFO_PORTS_10M:
+ cmd->base.speed = SPEED_10;
+ break;
+ case CARD_INFO_PORTS_100M:
+ cmd->base.speed = SPEED_100;
+ break;
+ case CARD_INFO_PORTS_1G:
+ cmd->base.speed = SPEED_1000;
+ break;
+ case CARD_INFO_PORTS_10G:
+ cmd->base.speed = SPEED_10000;
+ break;
+ case CARD_INFO_PORTS_25G:
+ cmd->base.speed = SPEED_25000;
+ break;
+ }
+
+ return 0;
+}
+
+const struct ethtool_ops qeth_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = qeth_get_ringparam,
+ .get_strings = qeth_get_strings,
+ .get_ethtool_stats = qeth_get_ethtool_stats,
+ .get_sset_count = qeth_get_sset_count,
+ .get_drvinfo = qeth_get_drvinfo,
+ .get_link_ksettings = qeth_get_link_ksettings,
+};
+
+const struct ethtool_ops qeth_osn_ethtool_ops = {
+ .get_strings = qeth_get_strings,
+ .get_ethtool_stats = qeth_get_ethtool_stats,
+ .get_sset_count = qeth_get_sset_count,
+ .get_drvinfo = qeth_get_drvinfo,
+};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f108d4b44605..8efb2e8ff8f4 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -25,7 +25,6 @@
#include "qeth_l2.h"
static int qeth_l2_set_offline(struct ccwgroup_device *);
-static int qeth_l2_stop(struct net_device *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -36,7 +35,7 @@ static void qeth_l2_vnicc_init(struct qeth_card *card);
static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
u32 *timeout);
-static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
+static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
{
int rc;
@@ -63,9 +62,6 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
case IPA_RC_L2_MAC_NOT_FOUND:
rc = -ENOENT;
break;
- case -ENOMEM:
- rc = -ENOMEM;
- break;
default:
rc = -EIO;
break;
@@ -73,6 +69,15 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
return rc;
}
+static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
+}
+
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
enum qeth_ipa_cmds ipacmd)
{
@@ -86,8 +91,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
cmd = __ipa_cmd(iob);
cmd->data.setdelmac.mac_length = ETH_ALEN;
ether_addr_copy(cmd->data.setdelmac.mac, mac);
- return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
- NULL, NULL));
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
}
static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
@@ -98,8 +102,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- mac, card->dev->name);
+ "MAC address %pM successfully registered\n", mac);
} else {
switch (rc) {
case -EEXIST:
@@ -171,9 +174,9 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
return RTN_UNICAST;
}
-static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
- unsigned int data_len)
+static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type, unsigned int data_len)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
@@ -185,8 +188,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
+ QETH_TXQ_STAT_INC(queue, skbs_csum);
}
}
@@ -207,7 +209,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
}
-static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode)
+static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode)
{
if (retcode)
QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
@@ -223,8 +225,6 @@ static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode)
return -ENOENT;
case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
return -EPERM;
- case -ENOMEM:
- return -ENOMEM;
default:
return -EIO;
}
@@ -242,9 +242,8 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
cmd->data.setdelvlan.vlan_id,
CARD_DEVID(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
- QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
- return 0;
+ return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code);
}
static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
@@ -259,101 +258,40 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setdelvlan.vlan_id = i;
- return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob,
- qeth_l2_send_setdelvlan_cb, NULL));
-}
-
-static void qeth_l2_process_vlans(struct qeth_card *card)
-{
- struct qeth_vlan_vid *id;
-
- QETH_CARD_TEXT(card, 3, "L2prcvln");
- mutex_lock(&card->vid_list_mutex);
- list_for_each_entry(id, &card->vid_list, list) {
- qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
- }
- mutex_unlock(&card->vid_list_mutex);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL);
}
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct qeth_card *card = dev->ml_priv;
- struct qeth_vlan_vid *id;
- int rc;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
return 0;
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "aidREC");
- return 0;
- }
- id = kmalloc(sizeof(*id), GFP_KERNEL);
- if (id) {
- id->vid = vid;
- rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
- if (rc) {
- kfree(id);
- return rc;
- }
- mutex_lock(&card->vid_list_mutex);
- list_add_tail(&id->list, &card->vid_list);
- mutex_unlock(&card->vid_list_mutex);
- } else {
- return -ENOMEM;
- }
- return 0;
+
+ return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
}
static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
- struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
- int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "kidREC");
+ if (!vid)
return 0;
- }
- mutex_lock(&card->vid_list_mutex);
- list_for_each_entry(id, &card->vid_list, list) {
- if (id->vid == vid) {
- list_del(&id->list);
- tmpid = id;
- break;
- }
- }
- mutex_unlock(&card->vid_list_mutex);
- if (tmpid) {
- rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
- kfree(tmpid);
- }
- return rc;
+
+ return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
}
-static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l2_stop_card(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP , 2, "stopcard");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_set_allowed_threads(card, 0, 1);
- if (card->read.state == CH_STATE_UP &&
- card->write.state == CH_STATE_UP &&
- (card->state == CARD_STATE_UP)) {
- if (recovery_mode &&
- card->info.type != QETH_CARD_TYPE_OSN) {
- qeth_l2_stop(card->dev);
- } else {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- card->state = CARD_STATE_SOFTSETUP;
- }
+
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
@@ -369,6 +307,9 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
+
+ flush_workqueue(card->event_wq);
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
}
static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
@@ -416,8 +357,8 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
}
work_done++;
budget--;
- card->stats.rx_packets++;
- card->stats.rx_bytes += len;
+ QETH_CARD_STAT_INC(card, rx_packets);
+ QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
@@ -441,7 +382,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
if (!IS_OSN(card)) {
rc = qeth_setadpparms_change_macaddr(card);
- if (!rc && is_valid_ether_addr(card->dev->dev_addr))
+ if (!rc)
goto out;
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
CARD_DEVID(card), rc);
@@ -460,6 +401,26 @@ out:
return 0;
}
+static void qeth_l2_register_dev_addr(struct qeth_card *card)
+{
+ if (!is_valid_ether_addr(card->dev->dev_addr))
+ qeth_l2_request_initial_mac(card);
+
+ if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+}
+
+static int qeth_l2_validate_addr(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ return eth_validate_addr(dev);
+
+ QETH_CARD_TEXT(card, 4, "nomacadr");
+ return -EPERM;
+}
+
static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
@@ -469,9 +430,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
QETH_CARD_TEXT(card, 3, "setmac");
- if (card->info.type == QETH_CARD_TYPE_OSN ||
- card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX) {
+ if (IS_OSM(card) || IS_OSX(card)) {
QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
@@ -479,39 +438,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "setmcREC");
- return -ERESTARTSYS;
- }
-
- /* avoid racing against concurrent state change: */
- if (!mutex_trylock(&card->conf_mutex))
- return -EAGAIN;
-
- if (!qeth_card_hw_is_reachable(card)) {
- ether_addr_copy(dev->dev_addr, addr->sa_data);
- goto out_unlock;
- }
-
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- goto out_unlock;
+ return 0;
/* add the new address, switch over, drop the old */
rc = qeth_l2_send_setmac(card, addr->sa_data);
if (rc)
- goto out_unlock;
+ return rc;
ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data);
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
qeth_l2_remove_mac(card, old_addr);
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-
-out_unlock:
- mutex_unlock(&card->conf_mutex);
- return rc;
+ return 0;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -582,13 +524,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
int i;
int rc;
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return;
-
QETH_CARD_TEXT(card, 3, "setmulti");
- if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
- (card->state != CARD_STATE_UP))
- return;
spin_lock_bh(&card->mclock);
@@ -673,17 +609,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
- if (card->state != CARD_STATE_UP) {
- card->stats.tx_carrier_errors++;
- goto tx_drop;
- }
-
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
- if (card->options.performance_stats) {
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
- }
netif_stop_queue(dev);
if (IS_OSN(card))
@@ -693,81 +620,21 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
qeth_l2_fill_header);
if (!rc) {
- card->stats.tx_packets++;
- card->stats.tx_bytes += tx_bytes;
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
+ QETH_TXQ_STAT_INC(queue, tx_packets);
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
-tx_drop:
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
+ QETH_TXQ_STAT_INC(queue, tx_dropped);
+ QETH_TXQ_STAT_INC(queue, tx_errors);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
-static int __qeth_l2_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 4, "qethopen");
- if (card->state == CARD_STATE_UP)
- return rc;
- if (card->state != CARD_STATE_SOFTSETUP)
- return -ENODEV;
-
- if ((card->info.type != QETH_CARD_TYPE_OSN) &&
- (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
- QETH_CARD_TEXT(card, 4, "nomacadr");
- return -EPERM;
- }
- card->data.state = CH_STATE_UP;
- card->state = CARD_STATE_UP;
- netif_start_queue(dev);
-
- if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
- napi_enable(&card->napi);
- local_bh_disable();
- napi_schedule(&card->napi);
- /* kick-start the NAPI softirq: */
- local_bh_enable();
- } else
- rc = -EIO;
- return rc;
-}
-
-static int qeth_l2_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "qethope_");
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "openREC");
- return -ERESTARTSYS;
- }
- return __qeth_l2_open(dev);
-}
-
-static int qeth_l2_stop(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "qethstop");
- netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP) {
- card->state = CARD_STATE_SOFTSETUP;
- napi_disable(&card->napi);
- }
- return 0;
-}
-
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
@@ -783,7 +650,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
if (rc)
return rc;
}
- INIT_LIST_HEAD(&card->vid_list);
+
hash_init(card->mac_htable);
card->info.hwtrap = 0;
qeth_l2_vnicc_set_defaults(card);
@@ -801,33 +668,19 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
+
+ cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
}
-static const struct ethtool_ops qeth_l2_ethtool_ops = {
- .get_link = ethtool_op_get_link,
- .get_strings = qeth_core_get_strings,
- .get_ethtool_stats = qeth_core_get_ethtool_stats,
- .get_sset_count = qeth_core_get_sset_count,
- .get_drvinfo = qeth_core_get_drvinfo,
- .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
-};
-
-static const struct ethtool_ops qeth_l2_osn_ops = {
- .get_strings = qeth_core_get_strings,
- .get_ethtool_stats = qeth_core_get_ethtool_stats,
- .get_sset_count = qeth_core_get_sset_count,
- .get_drvinfo = qeth_core_get_drvinfo,
-};
-
static const struct net_device_ops qeth_l2_netdev_ops = {
- .ndo_open = qeth_l2_open,
- .ndo_stop = qeth_l2_stop,
- .ndo_get_stats = qeth_get_stats,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_set_mac_address = qeth_l2_set_mac_address,
@@ -838,27 +691,36 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_features = qeth_set_features
};
+static const struct net_device_ops qeth_osn_netdev_ops = {
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
+ .ndo_start_xmit = qeth_l2_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = qeth_tx_timeout,
+};
+
static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
int rc;
- if (qeth_netdev_is_registered(card->dev))
- return 0;
-
- card->dev->priv_flags |= IFF_UNICAST_FLT;
- card->dev->netdev_ops = &qeth_l2_netdev_ops;
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- card->dev->ethtool_ops = &qeth_l2_osn_ops;
+ if (IS_OSN(card)) {
+ card->dev->netdev_ops = &qeth_osn_netdev_ops;
card->dev->flags |= IFF_NOARP;
- } else {
- card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
- card->dev->needed_headroom = sizeof(struct qeth_hdr);
+ goto add_napi;
}
- if (card->info.type == QETH_CARD_TYPE_OSM)
+ card->dev->needed_headroom = sizeof(struct qeth_hdr);
+ card->dev->netdev_ops = &qeth_l2_netdev_ops;
+ card->dev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (IS_OSM(card)) {
card->dev->features |= NETIF_F_VLAN_CHALLENGED;
- else
+ } else {
+ if (!IS_VM_NIC(card))
+ card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->features |= NETIF_F_SG;
@@ -892,8 +754,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
- if (!is_valid_ether_addr(card->dev->dev_addr))
- qeth_l2_request_initial_mac(card);
+add_napi:
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
if (!rc && carrier_ok)
@@ -924,11 +785,11 @@ static void qeth_l2_trace_features(struct qeth_card *card)
sizeof(card->options.vnicc.sup_chars));
}
-static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+static int qeth_l2_set_online(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct net_device *dev = card->dev;
int rc = 0;
- enum qeth_card_states recover_flag;
bool carrier_ok;
mutex_lock(&card->discipline_mutex);
@@ -936,25 +797,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- recover_flag = card->state;
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
- qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs)
- dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
-
- rc = qeth_l2_setup_netdev(card, carrier_ok);
- if (rc)
- goto out_remove;
-
- if (card->info.type != QETH_CARD_TYPE_OSN &&
- !qeth_l2_send_setmac(card, card->dev->dev_addr))
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -963,6 +811,13 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
} else
card->info.hwtrap = 0;
+ qeth_bridgeport_query_support(card);
+ if (card->options.sbp.supported_funcs)
+ dev_info(&card->gdev->dev,
+ "The device represents a Bridge Capable Port\n");
+
+ qeth_l2_register_dev_addr(card);
+
/* for the rx_bcast characteristic, init VNICC after setmac */
qeth_l2_vnicc_init(card);
@@ -984,11 +839,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (card->info.type != QETH_CARD_TYPE_OSN)
- qeth_l2_process_vlans(card);
-
- netif_tx_disable(card->dev);
-
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
@@ -999,17 +849,25 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
- qeth_enable_hw_features(card->dev);
- if (recover_flag == CARD_STATE_RECOVER) {
- if (recovery_mode &&
- card->info.type != QETH_CARD_TYPE_OSN) {
- __qeth_l2_open(card->dev);
- qeth_l2_set_rx_mode(card->dev);
- } else {
- rtnl_lock();
- dev_open(card->dev, NULL);
- rtnl_unlock();
+ if (!qeth_netdev_is_registered(dev)) {
+ rc = qeth_l2_setup_netdev(card, carrier_ok);
+ if (rc)
+ goto out_remove;
+ } else {
+ rtnl_lock();
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+
+ if (card->info.open_when_online) {
+ card->info.open_when_online = 0;
+ dev_open(dev, NULL);
}
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1018,44 +876,42 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
return 0;
out_remove:
- qeth_l2_stop_card(card, 0);
+ qeth_l2_stop_card(card);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_RECOVER)
- card->state = CARD_STATE_RECOVER;
- else
- card->state = CARD_STATE_DOWN;
+ card->state = CARD_STATE_DOWN;
+
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int qeth_l2_set_online(struct ccwgroup_device *gdev)
-{
- return __qeth_l2_set_online(gdev, 0);
-}
-
static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
int recovery_mode)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
int rc = 0, rc2 = 0, rc3 = 0;
- enum qeth_card_states recover_flag;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- netif_carrier_off(card->dev);
- recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
card->info.hwtrap = 1;
}
- qeth_l2_stop_card(card, recovery_mode);
+
+ rtnl_lock();
+ card->info.open_when_online = card->dev->flags & IFF_UP;
+ dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+
+ qeth_l2_stop_card(card);
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -1064,8 +920,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_UP)
- card->state = CARD_STATE_RECOVER;
+
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
@@ -1090,18 +945,16 @@ static int qeth_l2_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l2_set_offline(card->gdev, 1);
- rc = __qeth_l2_set_online(card->gdev, 1);
+ rc = qeth_l2_set_online(card->gdev);
if (!rc)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
else {
- qeth_close_dev(card);
+ ccwgroup_set_offline(card->gdev);
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
@@ -1122,37 +975,23 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- if (card->state == CARD_STATE_UP) {
- if (card->info.hwtrap)
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- __qeth_l2_set_offline(card->gdev, 1);
- } else
- __qeth_l2_set_offline(card->gdev, 0);
+
+ qeth_l2_set_offline(gdev);
return 0;
}
static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc = 0;
+ int rc;
- if (card->state == CARD_STATE_RECOVER) {
- rc = __qeth_l2_set_online(card->gdev, 1);
- if (rc) {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- } else
- rc = __qeth_l2_set_online(card->gdev, 0);
+ rc = qeth_l2_set_online(gdev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
- netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
@@ -1224,20 +1063,14 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
}
static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, int data_len)
+ struct qeth_cmd_buffer *iob)
{
- u16 s1, s2;
+ u16 length;
QETH_CARD_TEXT(card, 4, "osndipa");
- qeth_prepare_ipa_cmd(card, iob);
- s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
- s2 = (u16)data_len;
- memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
- memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
- return qeth_osn_send_control_data(card, s1, iob);
+ memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
+ return qeth_osn_send_control_data(card, length, iob);
}
int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
@@ -1254,8 +1087,9 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
if (!qeth_card_hw_is_reachable(card))
return -ENODEV;
iob = qeth_wait_for_buffer(&card->write);
+ qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
memcpy(__ipa_cmd(iob), data, data_len);
- return qeth_osn_send_ipa_cmd(card, iob, data_len);
+ return qeth_osn_send_ipa_cmd(card, iob);
}
EXPORT_SYMBOL(qeth_osn_assist);
@@ -1434,7 +1268,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
data->card = card;
memcpy(&data->qports, qports,
sizeof(struct qeth_sbp_state_change) + extrasize);
- queue_work(qeth_wq, &data->worker);
+ queue_work(card->event_wq, &data->worker);
}
struct qeth_bridge_host_data {
@@ -1506,14 +1340,12 @@ static void qeth_bridge_host_event(struct qeth_card *card,
data->card = card;
memcpy(&data->hostevs, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
- queue_work(qeth_wq, &data->worker);
+ queue_work(card->event_wq, &data->worker);
}
/* SETBRIDGEPORT support; sending commands */
struct _qeth_sbp_cbctl {
- u16 ipa_rc;
- u16 cmd_rc;
union {
u32 supported;
struct {
@@ -1523,23 +1355,21 @@ struct _qeth_sbp_cbctl {
} data;
};
-/**
- * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes.
- * @card: qeth_card structure pointer, for debug messages.
- * @cbctl: state structure with hardware return codes.
- * @setcmd: IPA command code
- *
- * Returns negative errno-compatible error indication or 0 on success.
- */
static int qeth_bridgeport_makerc(struct qeth_card *card,
- struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
+ struct qeth_ipa_cmd *cmd)
{
+ struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp;
+ enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code;
+ u16 ipa_rc = cmd->hdr.return_code;
+ u16 sbp_rc = sbp->hdr.return_code;
int rc;
- int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD);
- if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
- (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
- switch (cbctl->cmd_rc) {
+ if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS)
+ return 0;
+
+ if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) ||
+ (!IS_IQD(card) && ipa_rc == sbp_rc)) {
+ switch (sbp_rc) {
case IPA_RC_SUCCESS:
rc = 0;
break;
@@ -1603,8 +1433,8 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
default:
rc = -EIO;
}
- else
- switch (cbctl->ipa_rc) {
+ } else {
+ switch (ipa_rc) {
case IPA_RC_NOTSUPP:
rc = -EOPNOTSUPP;
break;
@@ -1614,10 +1444,11 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
default:
rc = -EIO;
}
+ }
if (rc) {
- QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
- QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc);
+ QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc);
}
return rc;
}
@@ -1649,15 +1480,15 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ int rc;
+
QETH_CARD_TEXT(card, 2, "brqsupcb");
- cbctl->ipa_rc = cmd->hdr.return_code;
- cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
- if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) {
- cbctl->data.supported =
- cmd->data.sbp.data.query_cmds_supp.supported_cmds;
- } else {
- cbctl->data.supported = 0;
- }
+ rc = qeth_bridgeport_makerc(card, cmd);
+ if (rc)
+ return rc;
+
+ cbctl->data.supported =
+ cmd->data.sbp.data.query_cmds_supp.supported_cmds;
return 0;
}
@@ -1678,12 +1509,11 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
sizeof(struct qeth_sbp_query_cmds_supp));
if (!iob)
return;
+
if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
- (void *)&cbctl) ||
- qeth_bridgeport_makerc(card, &cbctl,
- IPA_SBP_QUERY_COMMANDS_SUPPORTED)) {
- /* non-zero makerc signifies failure, and produce messages */
+ &cbctl)) {
card->options.sbp.role = QETH_SBP_ROLE_NONE;
+ card->options.sbp.supported_funcs = 0;
return;
}
card->options.sbp.supported_funcs = cbctl.data.supported;
@@ -1695,16 +1525,16 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb");
- cbctl->ipa_rc = cmd->hdr.return_code;
- cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
- if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0))
- return 0;
+ rc = qeth_bridgeport_makerc(card, cmd);
+ if (rc)
+ return rc;
+
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
- cbctl->cmd_rc = 0xffff;
QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
- return 0;
+ return -EINVAL;
}
/* first entry contains the state of the local port */
if (qports->num_entries > 0) {
@@ -1729,7 +1559,6 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
int qeth_bridgeport_query_ports(struct qeth_card *card,
enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
{
- int rc = 0;
struct qeth_cmd_buffer *iob;
struct _qeth_sbp_cbctl cbctl = {
.data = {
@@ -1746,22 +1575,18 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
if (!iob)
return -ENOMEM;
- rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
- (void *)&cbctl);
- if (rc < 0)
- return rc;
- return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
+
+ return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
+ &cbctl);
}
static int qeth_bridgeport_set_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
- struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+
QETH_CARD_TEXT(card, 2, "brsetrcb");
- cbctl->ipa_rc = cmd->hdr.return_code;
- cbctl->cmd_rc = cmd->data.sbp.hdr.return_code;
- return 0;
+ return qeth_bridgeport_makerc(card, cmd);
}
/**
@@ -1773,10 +1598,8 @@ static int qeth_bridgeport_set_cb(struct qeth_card *card,
*/
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
{
- int rc = 0;
int cmdlength;
struct qeth_cmd_buffer *iob;
- struct _qeth_sbp_cbctl cbctl;
enum qeth_ipa_sbp_cmd setcmd;
QETH_CARD_TEXT(card, 2, "brsetrol");
@@ -1801,11 +1624,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
if (!iob)
return -ENOMEM;
- rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
- (void *)&cbctl);
- if (rc < 0)
- return rc;
- return qeth_bridgeport_makerc(card, &cbctl, setcmd);
+
+ return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
}
/**
@@ -1909,7 +1729,7 @@ static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
/* VNIC Characteristics support */
/* handle VNICC IPA command return codes; convert to error codes */
-static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc)
+static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
{
int rc;
@@ -1967,7 +1787,7 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "vniccrcb");
if (cmd->hdr.return_code)
- return 0;
+ return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
/* return results to caller */
card->options.vnicc.sup_chars = rep->hdr.sup;
card->options.vnicc.cur_chars = rep->hdr.cur;
@@ -1988,7 +1808,6 @@ static int qeth_l2_vnicc_request(struct qeth_card *card,
struct qeth_ipacmd_vnicc *req;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- int rc;
QETH_CARD_TEXT(card, 2, "vniccreq");
@@ -2031,10 +1850,7 @@ static int qeth_l2_vnicc_request(struct qeth_card *card,
}
/* send request */
- rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb,
- (void *) cbctl);
-
- return qeth_l2_vnicc_makerc(card, rc);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, cbctl);
}
/* VNICC query VNIC characteristics request */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 42a7cdc59b76..7e68d9d16859 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -40,7 +40,6 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
-static int qeth_l3_stop(struct net_device *);
static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
@@ -254,8 +253,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
} else
rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
- (rc == IPA_RC_LAN_OFFLINE)) {
+ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) {
qeth_l3_deregister_addr_entry(card, addr);
@@ -339,10 +337,28 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
}
+static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ switch (cmd->hdr.return_code) {
+ case IPA_RC_SUCCESS:
+ return 0;
+ case IPA_RC_DUPLICATE_IP_ADDRESS:
+ return -EADDRINUSE;
+ case IPA_RC_MC_ADDR_NOT_FOUND:
+ return -ENOENT;
+ case IPA_RC_LAN_OFFLINE:
+ return -ENETDOWN;
+ default:
+ return -EIO;
+ }
+}
+
static int qeth_l3_send_setdelmc(struct qeth_card *card,
struct qeth_ipaddr *addr, int ipacmd)
{
- int rc;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
@@ -359,9 +375,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
else
memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
- rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
-
- return rc;
+ return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len)
@@ -423,7 +437,7 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
cmd->data.setdelip4.flags = flags;
}
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
+ return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
static int qeth_l3_send_setrouting(struct qeth_card *card,
@@ -943,12 +957,13 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0)
- ether_addr_copy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.unique_id);
+ if (cmd->hdr.return_code)
+ return -EIO;
+
+ ether_addr_copy(card->dev->dev_addr,
+ cmd->data.create_destroy_addr.unique_id);
return 0;
}
@@ -976,19 +991,18 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0)
+ if (cmd->hdr.return_code == 0) {
card->info.unique_id = *((__u16 *)
&cmd->data.create_destroy_addr.unique_id[6]);
- else {
- card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
- UNIQUE_ID_NOT_BY_CARD;
- dev_warn(&card->gdev->dev, "The network adapter failed to "
- "generate a unique ID\n");
+ return 0;
}
- return 0;
+
+ card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
+ UNIQUE_ID_NOT_BY_CARD;
+ dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
+ return -EIO;
}
static int qeth_l3_get_unique_id(struct qeth_card *card)
@@ -1071,7 +1085,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
cmd->data.diagass.action, CARD_DEVID(card));
}
- return 0;
+ return rc ? -EIO : 0;
}
static int
@@ -1281,10 +1295,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "kidREC");
- return 0;
- }
clear_bit(vid, card->active_vlans);
qeth_l3_set_rx_mode(dev);
return 0;
@@ -1305,12 +1315,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
else
ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
-
- card->stats.multicast++;
+ QETH_CARD_STAT_INC(card, rx_multicast);
break;
case QETH_CAST_BROADCAST:
ether_addr_copy(tg_addr, card->dev->broadcast);
- card->stats.multicast++;
+ QETH_CARD_STAT_INC(card, rx_multicast);
break;
default:
if (card->options.sniffer)
@@ -1391,33 +1400,23 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
}
work_done++;
budget--;
- card->stats.rx_packets++;
- card->stats.rx_bytes += len;
+ QETH_CARD_STAT_INC(card, rx_packets);
+ QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
-static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l3_stop_card(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 2, "stopcard");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_set_allowed_threads(card, 0, 1);
+
if (card->options.sniffer &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON))
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
- if (card->read.state == CH_STATE_UP &&
- card->write.state == CH_STATE_UP &&
- (card->state == CARD_STATE_UP)) {
- if (recovery_mode)
- qeth_l3_stop(card->dev);
- else {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- card->state = CARD_STATE_SOFTSETUP;
- }
+
if (card->state == CARD_STATE_SOFTSETUP) {
qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
@@ -1433,6 +1432,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
qeth_clear_cmd_buffers(&card->read);
qeth_clear_cmd_buffers(&card->write);
}
+
+ flush_workqueue(card->event_wq);
}
/*
@@ -1473,9 +1474,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
int i, rc;
QETH_CARD_TEXT(card, 3, "setmulti");
- if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
- (card->state != CARD_STATE_UP))
- return;
+
if (!card->options.sniffer) {
spin_lock_bh(&card->mclock);
@@ -1486,14 +1485,14 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
switch (addr->disp_flag) {
case QETH_DISP_ADDR_DELETE:
rc = qeth_l3_deregister_addr_entry(card, addr);
- if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+ if (!rc || rc == -ENOENT) {
hash_del(&addr->hnode);
kfree(addr);
}
break;
case QETH_DISP_ADDR_ADD:
rc = qeth_l3_register_addr_entry(card, addr);
- if (rc && rc != IPA_RC_LAN_OFFLINE) {
+ if (rc && rc != -ENETDOWN) {
hash_del(&addr->hnode);
kfree(addr);
break;
@@ -1514,7 +1513,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
qeth_l3_handle_promisc_mode(card);
}
-static int qeth_l3_arp_makerc(int rc)
+static int qeth_l3_arp_makerc(u16 rc)
{
switch (rc) {
case IPA_RC_SUCCESS:
@@ -1531,8 +1530,18 @@ static int qeth_l3_arp_makerc(int rc)
}
}
+static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+
+ qeth_setassparms_cb(card, reply, data);
+ return qeth_l3_arp_makerc(cmd->hdr.return_code);
+}
+
static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
{
+ struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 3, "arpstnoe");
@@ -1547,13 +1556,19 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
- rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
- no_entries);
+
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 4,
+ QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
+
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
CARD_DEVID(card), rc);
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static __u32 get_arp_entry_size(struct qeth_card *card,
@@ -1604,7 +1619,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_arp_query_data *qdata;
struct qeth_arp_query_info *qinfo;
- int i;
int e;
int entrybytes_done;
int stripped_bytes;
@@ -1618,13 +1632,13 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
if (cmd->hdr.return_code) {
QETH_CARD_TEXT(card, 4, "arpcberr");
QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
- return 0;
+ return qeth_l3_arp_makerc(cmd->hdr.return_code);
}
if (cmd->data.setassparms.hdr.return_code) {
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
QETH_CARD_TEXT(card, 4, "setaperr");
QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
- return 0;
+ return qeth_l3_arp_makerc(cmd->hdr.return_code);
}
qdata = &cmd->data.setassparms.data.query_arp;
QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
@@ -1651,9 +1665,9 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
break;
if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
- QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
- cmd->hdr.return_code = IPA_RC_ENOMEM;
- goto out_error;
+ QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC);
+ memset(qinfo->udata, 0, 4);
+ return -ENOSPC;
}
memcpy(qinfo->udata + qinfo->udata_offset,
@@ -1676,10 +1690,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
QETH_CARD_TEXT_(card, 4, "rc%i", 0);
return 0;
-out_error:
- i = 0;
- memcpy(qinfo->udata, &i, 4);
- return 0;
}
static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
@@ -1701,13 +1711,11 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
- rc = qeth_send_control_data(card,
- QETH_SETASS_BASE_LEN + QETH_ARP_CMD_LEN,
- iob, qeth_l3_arp_query_cb, qinfo);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@@ -1789,16 +1797,16 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card,
cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
- rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
arp_cmd, CARD_DEVID(card), rc);
-
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static int qeth_l3_arp_flush_cache(struct qeth_card *card)
{
+ struct qeth_cmd_buffer *iob;
int rc;
QETH_CARD_TEXT(card, 3, "arpflush");
@@ -1813,12 +1821,18 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
- rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
+
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_FLUSH_CACHE, 0,
+ QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
+
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
- return qeth_l3_arp_makerc(rc);
+ return rc;
}
static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1920,12 +1934,13 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
return QETH_CAST_UNICAST;
}
-static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type,
- unsigned int data_len)
+static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
+ struct qeth_hdr *hdr, struct sk_buff *skb,
+ int ipv, int cast_type, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+ struct qeth_card *card = queue->card;
hdr->hdr.l3.length = data_len;
@@ -1947,8 +1962,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
/* some HW requires combined L3+L4 csum offload: */
if (ipv == 4)
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
+ QETH_TXQ_STAT_INC(queue, skbs_csum);
}
}
@@ -2049,6 +2063,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
+ queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
if (IS_IQD(card)) {
if (card->options.sniffer)
goto tx_drop;
@@ -2058,20 +2074,9 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
goto tx_drop;
}
- if (card->state != CARD_STATE_UP) {
- card->stats.tx_carrier_errors++;
- goto tx_drop;
- }
-
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
-
- if (card->options.performance_stats) {
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
- }
netif_stop_queue(dev);
if (ipv == 4 || IS_IQD(card))
@@ -2081,11 +2086,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_l3_fill_header);
if (!rc) {
- card->stats.tx_packets++;
- card->stats.tx_bytes += tx_bytes;
- if (card->options.performance_stats)
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
+ QETH_TXQ_STAT_INC(queue, tx_packets);
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
@@ -2093,72 +2095,13 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
} /* else fall through */
tx_drop:
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
+ QETH_TXQ_STAT_INC(queue, tx_dropped);
+ QETH_TXQ_STAT_INC(queue, tx_errors);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
-static int __qeth_l3_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 4, "qethopen");
- if (card->state == CARD_STATE_UP)
- return rc;
- if (card->state != CARD_STATE_SOFTSETUP)
- return -ENODEV;
- card->data.state = CH_STATE_UP;
- card->state = CARD_STATE_UP;
- netif_start_queue(dev);
-
- if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
- napi_enable(&card->napi);
- local_bh_disable();
- napi_schedule(&card->napi);
- /* kick-start the NAPI softirq: */
- local_bh_enable();
- } else
- rc = -EIO;
- return rc;
-}
-
-static int qeth_l3_open(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 5, "qethope_");
- if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_CARD_TEXT(card, 3, "openREC");
- return -ERESTARTSYS;
- }
- return __qeth_l3_open(dev);
-}
-
-static int qeth_l3_stop(struct net_device *dev)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_CARD_TEXT(card, 4, "qethstop");
- netif_tx_disable(dev);
- if (card->state == CARD_STATE_UP) {
- card->state = CARD_STATE_SOFTSETUP;
- napi_disable(&card->napi);
- }
- return 0;
-}
-
-static const struct ethtool_ops qeth_l3_ethtool_ops = {
- .get_link = ethtool_op_get_link,
- .get_strings = qeth_core_get_strings,
- .get_ethtool_stats = qeth_core_get_ethtool_stats,
- .get_sset_count = qeth_core_get_sset_count,
- .get_drvinfo = qeth_core_get_drvinfo,
- .get_link_ksettings = qeth_core_ethtool_get_link_ksettings,
-};
-
/*
* we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
* NOARP on the netdevice is no option because it also turns off neighbor
@@ -2193,9 +2136,9 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
}
static const struct net_device_ops qeth_l3_netdev_ops = {
- .ndo_open = qeth_l3_open,
- .ndo_stop = qeth_l3_stop,
- .ndo_get_stats = qeth_get_stats,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
@@ -2208,9 +2151,9 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
};
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
- .ndo_open = qeth_l3_open,
- .ndo_stop = qeth_l3_stop,
- .ndo_get_stats = qeth_get_stats,
+ .ndo_open = qeth_open,
+ .ndo_stop = qeth_stop,
+ .ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
.ndo_validate_addr = eth_validate_addr,
@@ -2229,9 +2172,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
unsigned int headroom;
int rc;
- if (qeth_netdev_is_registered(card->dev))
- return 0;
-
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -2283,7 +2223,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
return -ENODEV;
card->dev->needed_headroom = headroom;
- card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -2338,17 +2277,18 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
+ cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
-static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+static int qeth_l3_set_online(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct net_device *dev = card->dev;
int rc = 0;
- enum qeth_card_states recover_flag;
bool carrier_ok;
mutex_lock(&card->discipline_mutex);
@@ -2356,7 +2296,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- recover_flag = card->state;
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
@@ -2364,10 +2303,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- rc = qeth_l3_setup_netdev(card, carrier_ok);
- if (rc)
- goto out_remove;
-
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
@@ -2397,7 +2332,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
}
- netif_tx_disable(card->dev);
rc = qeth_init_qdio_queues(card);
if (rc) {
@@ -2410,14 +2344,23 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
- qeth_enable_hw_features(card->dev);
- if (recover_flag == CARD_STATE_RECOVER) {
+ if (!qeth_netdev_is_registered(dev)) {
+ rc = qeth_l3_setup_netdev(card, carrier_ok);
+ if (rc)
+ goto out_remove;
+ } else {
rtnl_lock();
- if (recovery_mode) {
- __qeth_l3_open(card->dev);
- qeth_l3_set_rx_mode(card->dev);
- } else {
- dev_open(card->dev, NULL);
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+
+ if (card->info.open_when_online) {
+ card->info.open_when_online = 0;
+ dev_open(dev, NULL);
}
rtnl_unlock();
}
@@ -2428,45 +2371,43 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
mutex_unlock(&card->discipline_mutex);
return 0;
out_remove:
- qeth_l3_stop_card(card, 0);
+ qeth_l3_stop_card(card);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_RECOVER)
- card->state = CARD_STATE_RECOVER;
- else
- card->state = CARD_STATE_DOWN;
+ card->state = CARD_STATE_DOWN;
+
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
return rc;
}
-static int qeth_l3_set_online(struct ccwgroup_device *gdev)
-{
- return __qeth_l3_set_online(gdev, 0);
-}
-
static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
int recovery_mode)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
int rc = 0, rc2 = 0, rc3 = 0;
- enum qeth_card_states recover_flag;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
- netif_carrier_off(card->dev);
- recover_flag = card->state;
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
card->info.hwtrap = 1;
}
- qeth_l3_stop_card(card, recovery_mode);
- if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+
+ rtnl_lock();
+ card->info.open_when_online = card->dev->flags & IFF_UP;
+ dev_close(card->dev);
+ netif_device_detach(card->dev);
+ netif_carrier_off(card->dev);
+ rtnl_unlock();
+
+ qeth_l3_stop_card(card);
+ if (card->options.cq == QETH_CQ_ENABLED) {
rtnl_lock();
call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
rtnl_unlock();
@@ -2479,8 +2420,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
- if (recover_flag == CARD_STATE_UP)
- card->state = CARD_STATE_RECOVER;
+
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
@@ -2506,18 +2446,16 @@ static int qeth_l3_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l3_set_offline(card->gdev, 1);
- rc = __qeth_l3_set_online(card->gdev, 1);
+ rc = qeth_l3_set_online(card->gdev);
if (!rc)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
else {
- qeth_close_dev(card);
+ ccwgroup_set_offline(card->gdev);
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
@@ -2527,37 +2465,23 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- netif_device_detach(card->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_OFFLINE)
return 0;
- if (card->state == CARD_STATE_UP) {
- if (card->info.hwtrap)
- qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
- __qeth_l3_set_offline(card->gdev, 1);
- } else
- __qeth_l3_set_offline(card->gdev, 0);
+
+ qeth_l3_set_offline(gdev);
return 0;
}
static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc = 0;
+ int rc;
- if (card->state == CARD_STATE_RECOVER) {
- rc = __qeth_l3_set_online(card->gdev, 1);
- if (rc) {
- rtnl_lock();
- dev_close(card->dev);
- rtnl_unlock();
- }
- } else
- rc = __qeth_l3_set_online(card->gdev, 0);
+ rc = qeth_l3_set_online(gdev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
- netif_device_attach(card->dev);
if (rc)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 45ac6d8705c6..cff518b0f904 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -167,8 +167,7 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -213,8 +212,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
return -EPERM;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
@@ -280,8 +278,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
- if (card->state != CARD_STATE_DOWN &&
- card->state != CARD_STATE_RECOVER)
+ if (card->state != CARD_STATE_DOWN)
return -EPERM;
if (card->options.sniffer)
return -EPERM;
@@ -356,8 +353,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)) {
+ if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cf30d124b9e..e390f8c6d5f3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
goto failed;
/* report size limit per scatter-gather segment */
- adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 10c4e8e3fd59..661436a92f8e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -294,8 +294,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
id->input_handler = zfcp_qdio_int_resp;
id->output_handler = zfcp_qdio_int_req;
id->int_parm = (unsigned long) qdio;
- id->input_sbal_addr_array = (void **) (qdio->res_q);
- id->output_sbal_addr_array = (void **) (qdio->req_q);
+ id->input_sbal_addr_array = qdio->res_q;
+ id->output_sbal_addr_array = qdio->req_q;
id->scan_threshold =
QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 00acc7144bbc..f4f6a07c5222 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
/* GCD, adjusted later */
+ /* report size limit per scatter-gather segment */
+ .max_segment_size = ZFCP_QDIO_SBALE_LEN,
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index fc9dbad476c0..ae1d56da671d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
unsigned long *indicatorp = NULL;
- int ret, i;
+ int ret, i, queue_idx = 0;
struct ccw1 *ccw;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return -ENOMEM;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
- ctx ? ctx[i] : false, ccw);
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
+ names[i], ctx ? ctx[i] : false,
+ ccw);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
vqs[i] = NULL;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a3c20e3a8b7c..3337b1e80412 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
unsigned long mem_addr, mem_len;
- int retval = -ENODEV;
+ int retval;
retval = pci_enable_device(pdev);
if (retval) {
@@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
@@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index e8f5f7c63190..dda6fa857709 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -646,8 +646,9 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
- size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle,
+ GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
@@ -1572,8 +1573,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
@@ -1804,8 +1807,10 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 128d658d472a..16957d7ac414 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
if(tpnt->sdev_attrs == NULL)
tpnt->sdev_attrs = NCR_700_dev_attrs;
- memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+ memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if(memory == NULL) {
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f38882f6f37d..8f9d9e9fa695 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1369,14 +1369,14 @@ config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
select SCSI_SPI_ATTRS
- select NVRAM
---help---
If you have an Atari with built-in NCR5380 SCSI controller (TT,
Falcon, ...) say Y to get it supported. Of course also, if you have
a compatible SCSI controller (e.g. for Medusa).
- To compile this driver as a module, choose M here: the
- module will be called atari_scsi.
+ To compile this driver as a module, choose M here: the module will
+ be called atari_scsi. If you also enable NVRAM support, the SCSI
+ host's ID is taken from the setting in TT RTC NVRAM.
This driver supports both styles of NCR integration into the
system: the TT style (separate DMA), and the Falcon style (via
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index ff53fd0d12f2..66c514310f3c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1123,8 +1123,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
- host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
- GFP_KERNEL);
+ host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys,
+ GFP_KERNEL);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
@@ -1132,8 +1132,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
- host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
- GFP_KERNEL);
+ host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys,
+ GFP_KERNEL);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 634ddb90e7aa..7e56a11836c1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
- error = dma_set_max_seg_size(&pdev->dev,
- (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
- (shost->max_sectors << 9) : 65536);
- if (error)
- goto out_deinit;
+ if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
+ shost->max_segment_size = shost->max_sectors << 9;
+ else
+ shost->max_segment_size = 65536;
/*
* Firmware printf works only with older firmware.
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f83f79b07b50..bbdae67774f0 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n",
asd_dev_rev[asd_ha->revision_id]);
}
-static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
+static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
static ssize_t asd_show_dev_bios_build(struct device *dev,
struct device_attribute *attr,char *buf)
@@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
{
int err;
- err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
if (err)
return err;
@@ -499,13 +499,13 @@ err_update_bios:
err_biosb:
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
err_rev:
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
return err;
}
static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
{
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
@@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto Err_remove;
- err = -ENODEV;
- if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
+ err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = -ENODEV;
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove;
}
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 0f6751b0a633..57c6fa388bf6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -587,8 +587,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg;
acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
@@ -617,8 +619,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
struct MessageUnit_D *reg;
acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent) {
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
@@ -659,8 +663,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
uint32_t completeQ_size;
completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
acb->roundup_ccbsize = roundup(completeQ_size, 32);
- dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
- &dma_coherent_handle, GFP_KERNEL);
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ acb->roundup_ccbsize,
+ &dma_coherent_handle,
+ GFP_KERNEL);
if (!dma_coherent){
pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
return false;
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a503dc50c4f8..e809493d0d06 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -757,15 +757,17 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_hostid >= 0) {
atari_scsi_template.this_id = setup_hostid & 7;
- } else {
+ } else if (IS_REACHABLE(CONFIG_NVRAM)) {
/* Test if a host id is set in the NVRam */
- if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
- unsigned char b = nvram_read_byte(16);
+ if (ATARIHW_PRESENT(TT_CLK)) {
+ unsigned char b;
+ loff_t offset = 16;
+ ssize_t count = nvram_read(&b, 1, &offset);
/* Arbitration enabled? (for TOS)
* If yes, use configured host ID
*/
- if (b & 0x80)
+ if ((count == 1) && (b & 0x80))
atari_scsi_template.this_id = b & 7;
}
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 39f3820572b4..76e49d902609 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3321,8 +3321,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -3566,7 +3566,7 @@ static void be2iscsi_enable_msix(struct beiscsi_hba *phba)
/* if eqid_count == 1 fall back to INTX */
if (enable_msix && nvec > 1) {
- const struct irq_affinity desc = { .post_vectors = 1 };
+ struct irq_affinity desc = { .post_vectors = 1 };
if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ca7b7bbc8371..d4febaadfaa3 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -293,8 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *cmd,
u8 subsystem, u8 opcode, u32 size)
{
- cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
- GFP_KERNEL);
+ cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+ GFP_KERNEL);
if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
@@ -1510,10 +1510,9 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
return -EINVAL;
nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
- nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
- nonemb_cmd.size,
- &nonemb_cmd.dma,
- GFP_KERNEL);
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
+ nonemb_cmd.size, &nonemb_cmd.dma,
+ GFP_KERNEL);
if (!nonemb_cmd.va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
"BM_%d : invldt_cmds_params alloc failed\n");
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 42a0caf6740d..88880a66a189 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad)
int
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
{
- int rc = -ENODEV;
+ int rc = -ENODEV;
if (pci_enable_device(pdev)) {
printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
@@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ if (rc) {
+ rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
goto out_release_region;
}
@@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
{
struct bfad_s *bfad = pci_get_drvdata(pdev);
u8 byte;
+ int rc;
dev_printk(KERN_ERR, &pdev->dev,
"bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
@@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
+ rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
+ DMA_BIT_MASK(32));
+ if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 5d163ca1b366..d8e6d7480f35 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3264,9 +3264,9 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
/* Allocate dma coherent memory */
buf_info = buf_base;
buf_info->size = payload_len;
- buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
- buf_info->size, &buf_info->phys,
- GFP_KERNEL);
+ buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
+ buf_info->size, &buf_info->phys,
+ GFP_KERNEL);
if (!buf_info->virt)
goto out_free_mem;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index e8ae4d671d23..039328d9ef13 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,10 +1857,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
@@ -1894,10 +1894,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
@@ -2031,19 +2031,19 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
}
- hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
@@ -2104,10 +2104,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
- mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
@@ -2116,9 +2115,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
@@ -2140,9 +2139,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c13a5b..bc9f2a2365f4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
return NULL;
}
+ cmgr->hba = hba;
cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
GFP_KERNEL);
if (!cmgr->free_list) {
@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
goto mem_err;
}
- cmgr->hba = hba;
cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
for (i = 0; i < arr_sz; i++) {
@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
mem_size = num_ios * sizeof(struct io_bdt *);
- cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+ cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
if (!cmgr->io_bdt_pool) {
printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
goto mem_err;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index e3d1c7c440c8..d735e87e416a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,8 +672,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
@@ -685,8 +685,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
@@ -698,8 +698,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
@@ -710,8 +710,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
@@ -735,9 +735,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->xferq_mem_size, &tgt->xferq_dma,
- GFP_KERNEL);
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
@@ -749,9 +749,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->confq_mem_size, &tgt->confq_dma,
- GFP_KERNEL);
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
@@ -763,9 +763,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
@@ -787,9 +787,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
@@ -802,8 +802,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91f5316aa3ab..fae6f71e677d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1070,8 +1070,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
@@ -1106,8 +1106,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a004036e3d7..9bd2bd8dc2be 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
}
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+ ln->fc_vport = fc_vport;
if (csio_fcoe_alloc_vnp(hw, ln))
goto error;
*(struct csio_lnode **)fc_vport->dd_data = ln;
- ln->fc_vport = fc_vport;
if (!fc_vport->node_name)
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
if (!fc_vport->port_name)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cf629380a981..616b25bf7941 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -210,8 +210,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv)
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rv) {
+ rv = -ENODEV;
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions;
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index dc12933533d5..66bbd21819ae 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -233,8 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
q = wrm->q_arr[free_idx];
- q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
- GFP_KERNEL);
+ q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+ GFP_KERNEL);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 8a20411699d9..75e1273a44b3 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
- unsigned int tid, int pg_idx, bool reply)
+ unsigned int tid, int pg_idx)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
* @tid: connection id
* @hcrc: header digest enabled
* @dcrc: data digest enabled
- * @reply: request reply from h/w
* set up the iscsi digest settings for a connection identified by tid
*/
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0x0F000000);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 49f8028ac524..d26f50af00ea 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cxgbi_sock *csk;
csk = lookup_tid(t, tid);
- if (!csk)
+ if (!csk) {
pr_err("can't find conn. for tid %u.\n", tid);
+ return;
+ }
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, rpl->status);
- if (rpl->status != CPL_ERR_NONE)
+ if (rpl->status != CPL_ERR_NONE) {
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
csk, tid, rpl->status);
+ csk->err = -EINVAL;
+ }
+
+ complete(&csk->cmpl);
__kfree_skb(skb);
}
@@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
- int pg_idx, bool reply)
+ int pg_idx)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
@@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 75f876409fb9..245742557c03 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
timer_setup(&csk->retry_timer, NULL, 0);
+ init_completion(&csk->cmpl);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
if (!err && conn->hdrdgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_DATADGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && conn->datadgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
ppm = csk->cdev->cdev2ppm(csk->cdev);
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
- ppm->tformat.pgsz_idx_dflt, 0);
+ ppm->tformat.pgsz_idx_dflt);
if (err < 0)
return err;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 5d5d8b50d842..1917ff57651d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -149,6 +149,7 @@ struct cxgbi_sock {
struct sk_buff_head receive_queue;
struct sk_buff_head write_queue;
struct timer_list retry_timer;
+ struct completion cmpl;
int err;
rwlock_t callback_lock;
void *user_data;
@@ -490,9 +491,9 @@ struct cxgbi_device {
struct cxgbi_ppm *,
struct cxgbi_task_tag_info *);
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
- unsigned int, int, int, int);
+ unsigned int, int, int);
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
- unsigned int, int, bool);
+ unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *);
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index bfa13e3b191c..c8bad2c093b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
cfg = shost_priv(host);
+ cfg->state = STATE_PROBING;
cfg->host = host;
rc = alloc_mem(cfg);
if (rc) {
@@ -3775,6 +3776,7 @@ out:
return rc;
out_remove:
+ cfg->state = STATE_PROBED;
cxlflash_remove(pdev);
goto out;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index eed7fc5b3389..bc17fa0d8375 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2323,6 +2323,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct Scsi_Host *shost;
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
+ int error;
shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
if (!shost) {
@@ -2343,8 +2344,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e2420a810e99..e0570fd8466e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2447,10 +2447,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err_out_disable_device;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
- rc = -EIO;
+ rc = -ENODEV;
goto err_out_regions;
}
@@ -2507,6 +2509,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ if (hisi_hba->prot_mask) {
+ dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
+ prot_mask);
+ scsi_host_set_prot(hisi_hba->shost, prot_mask);
+ }
+
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
@@ -2519,12 +2527,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err_out_register_ha;
- if (hisi_hba->prot_mask) {
- dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
- prot_mask);
- scsi_host_set_prot(hisi_hba->shost, prot_mask);
- }
-
scsi_scan_host(shost);
return 0;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 3eedfd4f8f57..251c084a6ff0 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
dma_addr_t start_phy;
void *start_virt;
u32 offset, i, req_size;
+ int rc;
dprintk("hptiop_probe(%p)\n", pcidev);
@@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
/* Enable 64bit DMA if possible */
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
- if (dma_set_mask(&pcidev->dev,
- DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
- dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask(&pcidev->dev,
+ DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
+ if (rc)
+ rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
+
+ if (rc) {
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
goto disable_pci_device;
}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 68b90c4f79a3..1727d0c71b12 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
shost->max_lun = ~0;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ /* turn on DIF support */
+ scsi_host_set_prot(shost,
+ SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
err = scsi_add_host(shost, &pdev->dev);
if (err)
goto err_shost;
@@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_host_alloc;
}
pci_info->hosts[i] = h;
-
- /* turn on DIF support */
- scsi_host_set_prot(to_shost(h),
- SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIF_TYPE3_PROTECTION);
- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590ed955..ff943f477d6f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_frame_payload_op(fp) != ELS_LS_ACC) {
FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
flp = fc_frame_payload_get(fp, sizeof(*flp));
if (!flp) {
FC_LPORT_DBG(lport, "FLOGI bad response\n");
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
"lport->mfs:%hu\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 9192a1d9dec6..dfba4921b265 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
- WARN_ON(!list_empty(&rdata->peers));
kfree_rcu(rdata, rcu);
}
EXPORT_SYMBOL(fc_rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b8d325ce8754..120fc520f27a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
return -ENODATA;
+ spin_lock_bh(&conn->session->back_lock);
+ if (conn->task == NULL) {
+ spin_unlock_bh(&conn->session->back_lock);
+ return -ENODATA;
+ }
__iscsi_get_task(task);
+ spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 17eb4185f29d..f21c93bbb35c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev(
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
goto out_free;
+ rphy->identify.phy_identifier = phy_id;
child->rphy = rphy;
get_device(&rphy->dev);
@@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
get_device(&rphy->dev);
+ rphy->identify.phy_identifier = phy_id;
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 8698af86485d..2dc564e59430 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2730,8 +2730,8 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
INIT_LIST_HEAD(&dmabuf->list);
/* now, allocate dma buffer */
- dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
- &(dmabuf->phys), GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+ &(dmabuf->phys), GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c1c36812c3d2..e1129260ed18 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6973,9 +6973,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
if (!dmabuf)
return NULL;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
- LPFC_HDR_TEMPLATE_SIZE,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_HDR_TEMPLATE_SIZE,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
rpi_hdr = NULL;
goto err_free_dmabuf;
@@ -7361,15 +7361,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
unsigned long bar0map_len, bar2map_len;
int i, hbq_count;
void *ptr;
- int error = -ENODEV;
+ int error;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
+ error = -ENODEV;
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
@@ -7397,8 +7400,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
}
/* Allocate memory for SLI-2 structures */
- phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
- &phba->slim2p.phys, GFP_KERNEL);
+ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ &phba->slim2p.phys, GFP_KERNEL);
if (!phba->slim2p.virt)
goto out_iounmap;
@@ -7816,8 +7819,8 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
* plus an alignment restriction of 16 bytes.
*/
bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
@@ -9742,11 +9745,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
uint32_t if_type;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
/*
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f6a5083a621e..4d3b94317515 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1827,9 +1827,9 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
* page, this is used as a priori size of SLI4_PAGE_SIZE for
* the later DMA memory free.
*/
- viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
- SLI4_PAGE_SIZE, &phyaddr,
- GFP_KERNEL);
+ viraddr = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI4_PAGE_SIZE, &phyaddr,
+ GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 4c66b19e6199..8c9f79042228 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
lport);
/* release any threads waiting for the unreg to complete */
- complete(&lport->lport_unreg_done);
+ if (lport->vport->localport)
+ complete(lport->lport_unreg_cmp);
}
/* lpfc_nvme_remoteport_delete
@@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
void
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
- struct lpfc_nvme_lport *lport)
+ struct lpfc_nvme_lport *lport,
+ struct completion *lport_unreg_cmp)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
u32 wait_tmo;
@@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
*/
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
while (true) {
- ret = wait_for_completion_timeout(&lport->lport_unreg_done,
- wait_tmo);
+ ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6176 Lport %p Localport %p wait "
@@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
int ret;
+ DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
if (vport->nvmei_support == 0)
return;
localport = vport->localport;
- vport->localport = NULL;
lport = (struct lpfc_nvme_lport *)localport->private;
cstat = lport->cstat;
@@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
/* lport's rport list is clear. Unregister
* lport and release resources.
*/
- init_completion(&lport->lport_unreg_done);
+ lport->lport_unreg_cmp = &lport_unreg_cmp;
ret = nvme_fc_unregister_localport(localport);
/* Wait for completion. This either blocks
* indefinitely or succeeds
*/
- lpfc_nvme_lport_unreg_wait(vport, lport);
+ lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
+ vport->localport = NULL;
kfree(cstat);
/* Regardless of the unregister upcall response, clear
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719be25c..b234d0298994 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
- struct completion lport_unreg_done;
+ struct completion *lport_unreg_cmp;
/* Add stats counters here */
struct lpfc_nvme_ctrl_stat *cstat;
atomic_t fc4NvmeLsRequests;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6245f442d784..95fee83090eb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
struct lpfc_nvmet_tgtport *tport = targetport->private;
/* release any threads waiting for the unreg to complete */
- complete(&tport->tport_unreg_done);
+ if (tport->phba->targetport)
+ complete(tport->tport_unreg_cmp);
}
static void
@@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_queue *wq;
uint32_t qidx;
+ DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
if (phba->nvmet_support == 0)
return;
@@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
wq = phba->sli4_hba.nvme_wq[qidx];
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
- init_completion(&tgtp->tport_unreg_done);
+ tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+ wait_for_completion_timeout(&tport_unreg_cmp, 5);
lpfc_nvmet_cleanup_io_context(phba);
}
phba->targetport = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63f1f41..0ec1082ce7ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -34,7 +34,7 @@
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
- struct completion tport_unreg_done;
+ struct completion *tport_unreg_cmp;
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
atomic_t rcv_ls_req_in;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 30734caf77e1..2242e9b3ca12 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5362,8 +5362,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
* mailbox command.
*/
dma_size = *vpd_size;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
- &dmabuf->phys, GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
@@ -6300,10 +6300,9 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
goto free_mem;
}
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
LPFC_RAS_MAX_ENTRY_SIZE,
- &dmabuf->phys,
- GFP_KERNEL);
+ &dmabuf->phys, GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
rc = -ENOMEM;
@@ -9408,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
cmnd = CMD_XMIT_SEQUENCE64_CR;
if (phba->link_flag & LS_LOOPBACK_MODE)
bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+ /* fall through */
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
@@ -13529,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
+ /* fall through */
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -13938,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"6126 Receive Frame Truncated!!\n");
- /* Drop thru */
+ /* fall through */
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
@@ -14613,9 +14614,9 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
goto out_fail;
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
- hw_page_size, &dmabuf->phys,
- GFP_KERNEL);
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ hw_page_size, &dmabuf->phys,
+ GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
@@ -14850,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
eq->entry_count);
if (eq->entry_count < 256)
return -EINVAL;
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_256);
@@ -14981,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
LPFC_CQ_CNT_WORD7);
break;
}
- /* Fall Thru */
+ /* fall through */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14992,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_256);
@@ -15152,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
LPFC_CQ_CNT_WORD7);
break;
}
- /* Fall Thru */
+ /* fall through */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -15161,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest (drop thru) */
+ /* fall through - otherwise default to smallest */
case 256:
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
&cq_set->u.request, LPFC_CQ_CNT_256);
@@ -15433,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 16:
bf_set(lpfc_mq_context_ring_size,
&mq_create_ext->u.request.context,
@@ -15852,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
@@ -15989,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -EINVAL;
goto out;
}
- /* otherwise default to smallest count (drop through) */
+ /* fall through - otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index e836392b75e8..f112458023ff 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -967,9 +967,10 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
- sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
- GFP_KERNEL);
+ raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(mbox64_t),
+ &raid_dev->una_mbox64_dma,
+ GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
@@ -995,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
align;
// Allocate memory for commands issued internally
- adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h, GFP_KERNEL);
+ adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h, GFP_KERNEL);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
@@ -2897,8 +2898,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h, GFP_KERNEL);
+ pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h, GFP_KERNEL);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f7bdd783360a..fcbff83c0097 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2273,9 +2273,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h, GFP_KERNEL);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2380,10 +2380,9 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- dma_zalloc_coherent(&instance->pdev->dev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h, GFP_KERNEL);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2546,9 +2545,10 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
if (initial) {
instance->hb_host_mem =
- dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h, GFP_KERNEL);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h,
+ GFP_KERNEL);
if (!instance->hb_host_mem) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for scsi%d\n",
@@ -5816,9 +5816,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
}
dcmd = &cmd->frame->dcmd;
- el_info = dma_zalloc_coherent(&instance->pdev->dev,
- sizeof(struct megasas_evt_log_info), &el_info_h,
- GFP_KERNEL);
+ el_info = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h, GFP_KERNEL);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
@@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
instance->consistent_mask_64bit = true;
dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
- ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
(instance->consistent_mask_64bit ? "63" : "32"));
return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 211c17c33aa0..647f48a28f85 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
/*
* Check if it is our interrupt
*/
- status = readl(&regs->outbound_intr_status);
+ status = megasas_readl(instance,
+ &regs->outbound_intr_status);
if (status & 1) {
writel(status, &regs->outbound_intr_status);
@@ -689,8 +690,9 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
- array_size, &fusion->rdpq_phys, GFP_KERNEL);
+ fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
+ array_size, &fusion->rdpq_phys,
+ GFP_KERNEL);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index f3e182eb0970..c9dc7740e9e7 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,9 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
- ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
+ dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
+ ms->dma_cmd_size, &dma_cmd_bus,
+ GFP_KERNEL);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index dbe753fba486..36f64205ecfa 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,9 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
- res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
- &res->bus_addr, GFP_KERNEL);
+ res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
+ &res->bus_addr,
+ GFP_KERNEL);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
@@ -246,8 +247,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (size == 0)
return 0;
- virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
- GFP_KERNEL);
+ virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+ GFP_KERNEL);
if (!virt_addr)
return -1;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b3be49d41375..084f2fcced0a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
u64 align_offset = 0;
if (align)
align_offset = (dma_addr_t)align - 1;
- mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
- &mem_dma_handle, GFP_KERNEL);
+ mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
+ &mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n");
return -1;
@@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
if (dev->dev_type == SAS_SATA_DEV) {
pm8001_device->attached_phy =
dev->rphy->identify.phy_identifier;
- flag = 1; /* directly sata*/
+ flag = 1; /* directly sata */
}
} /*register this device to HBA*/
PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index edcaf4b0cb0b..9bbc19fc190b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1050,16 +1050,17 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
sizeof(void *);
fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
- fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
- fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
+ fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
+ &fcport->sq_dma, GFP_KERNEL);
if (!fcport->sq) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
rval = 1;
goto out;
}
- fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
- fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
+ fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+ fcport->sq_pbl_size,
+ &fcport->sq_pbl_dma, GFP_KERNEL);
if (!fcport->sq_pbl) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
rval = 1;
@@ -2680,8 +2681,10 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
}
/* Allocate list of PBL pages */
- qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
- QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
+ qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_PAGE_SIZE,
+ &qedf->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedf->bdq_pbl_list) {
QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
return -ENOMEM;
@@ -2770,9 +2773,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
qedf->global_queues[i]->cq =
- dma_zalloc_coherent(&qedf->pdev->dev,
- qedf->global_queues[i]->cq_mem_size,
- &qedf->global_queues[i]->cq_dma, GFP_KERNEL);
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_mem_size,
+ &qedf->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedf->global_queues[i]->cq) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
@@ -2781,9 +2785,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
}
qedf->global_queues[i]->cq_pbl =
- dma_zalloc_coherent(&qedf->pdev->dev,
- qedf->global_queues[i]->cq_pbl_size,
- &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_pbl_size,
+ &qedf->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedf->global_queues[i]->cq_pbl) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 4da660c1c431..6d6d6013e35b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qedi_ep = ep->dd_data;
if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
return -1;
@@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
switch (qedi_ep->state) {
case EP_STATE_OFLDCONN_START:
+ case EP_STATE_OFLDCONN_NONE:
goto ep_release_conn;
case EP_STATE_OFLDCONN_FAILED:
break;
@@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
ret = -EIO;
goto set_path_exit;
}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 11260776212f..892d70d54553 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -59,6 +59,7 @@ enum {
EP_STATE_OFLDCONN_FAILED = 0x2000,
EP_STATE_CONNECT_FAILED = 0x4000,
EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+ EP_STATE_OFLDCONN_NONE = 0x10000,
};
struct qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5c53409a8cea..e74a62448ba4 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1394,10 +1394,9 @@ static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
struct qedi_nvm_iscsi_image nvm_image;
- qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
- sizeof(nvm_image),
- &qedi->nvm_buf_dma,
- GFP_KERNEL);
+ qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
+ sizeof(nvm_image),
+ &qedi->nvm_buf_dma, GFP_KERNEL);
if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
return -ENOMEM;
@@ -1510,10 +1509,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev,
- QEDI_PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+ QEDI_PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
@@ -1609,10 +1608,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1620,10 +1619,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1691,16 +1690,16 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index a414f51302b7..6856dfdfa473 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
- if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 00444dc79756..ac504a1ff0ff 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2415,8 +2415,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_chip_is_down(vha))
goto done;
- stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
- &stats_dma, GFP_KERNEL);
+ stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+ GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4a9fd8d944d6..17d42658ad9a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2312,8 +2312,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
- &stats_dma, GFP_KERNEL);
+ stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+ GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 26b93c563f92..d1fc4958222a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host {
uint16_t n2n_id;
struct list_head gpnid_list;
struct fab_scan scan;
+
+ unsigned int irq_offset;
} scsi_qla_host_t;
struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 90cfa394f942..cbc3bc49d4d1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -4147,9 +4147,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
return rval;
}
- sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
- &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
- &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xffff,
@@ -4165,9 +4166,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
((vha->hw->max_fibre_devices - 1) *
sizeof(struct ct_sns_gpn_ft_data));
- sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
- &vha->hw->pdev->dev, rspsz,
- &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ rspsz,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 364bb52ed2a6..8d1acc802a67 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
/* Issue Marker IOCB */
qla2x00_marker(vha, vha->hw->req_q_map[0],
- vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+ vha->hw->rsp_q_map[0], fcport->loop_id, lun,
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
}
done_free_sp:
sp->free(sp);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -3099,8 +3099,8 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
FCE_SIZE, ha->fce, ha->fce_dma);
/* Allocate memory for Fibre Channel Event Buffer. */
- tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00be,
"Unable to allocate (%d KB) for FCE.\n",
@@ -3131,8 +3131,8 @@ try_eft:
EFT_SIZE, ha->eft, ha->eft_dma);
/* Allocate memory for Extended Trace Buffer. */
- tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 30d3090842f8..8507c43b918c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
"Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
}
}
+ vha->irq_offset = desc.pre_vectors;
ha->msix_entries = kcalloc(ha->msix_count,
sizeof(struct qla_msix_entry),
GFP_KERNEL);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ea69dafc9774..c6ef83d0d99b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw))
rc = blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
+ rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
return rc;
}
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1ef74aa2d00a..2bf5e3e639e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,8 +153,8 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 5d56904687b9..dac9a7013208 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,9 +625,9 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
@@ -709,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
@@ -1340,9 +1340,9 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d2b333d629be..5a31877c9d04 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4052,8 +4052,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 949e186cc5d7..a77bfb224248 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2704,9 +2704,9 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
@@ -4206,8 +4206,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
@@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
fw_ddb_entry);
+ if (rc)
+ goto free_sess;
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
__func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 661512bec3ac..e27f4df24021 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
/* make sure inq_product_rev string corresponds to this version */
#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20180128";
+static const char *sdebug_version_date = "20190125";
#define MY_NAME "scsi_debug"
@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
-static void *fake_store(unsigned long long lba)
+static void *lba2fake_store(unsigned long long lba)
{
lba = do_div(lba, sdebug_store_sectors);
@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
return ret;
}
-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into fake_store(lba,num) and return true. If comparison fails then
+/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
+ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
* return false. */
static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
{
@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+ ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
if (ret) {
dif_errors++;
return ret;
@@ -3261,10 +3261,12 @@ err_out:
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
+ int ret;
unsigned long iflags;
unsigned long long i;
- int ret;
- u64 lba_off;
+ u32 lb_size = sdebug_sector_size;
+ u64 block, lbaa;
+ u8 *fs1p;
ret = check_device_access_params(scp, lba, num);
if (ret)
@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
unmap_region(lba, num);
goto out;
}
-
- lba_off = lba * sdebug_sector_size;
+ lbaa = lba;
+ block = do_div(lbaa, sdebug_store_sectors);
/* if ndob then zero 1 logical block, else fetch 1 logical block */
+ fs1p = fake_storep + (block * lb_size);
if (ndob) {
- memset(fake_storep + lba_off, 0, sdebug_sector_size);
+ memset(fs1p, 0, lb_size);
ret = 0;
} else
- ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
- sdebug_sector_size);
+ ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
write_unlock_irqrestore(&atomic_rw, iflags);
return DID_ERROR << 16;
- } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
+ } else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
"%s: %s: lb size=%u, IO sent=%d bytes\n",
- my_name, "write same",
- sdebug_sector_size, ret);
+ my_name, "write same", lb_size, ret);
/* Copy first sector to remaining blocks */
- for (i = 1 ; i < num ; i++)
- memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
- fake_storep + lba_off,
- sdebug_sector_size);
-
+ for (i = 1 ; i < num ; i++) {
+ lbaa = lba + i;
+ block = do_div(lbaa, sdebug_store_sectors);
+ memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+ }
if (scsi_debug_lbp())
map_region(lba, num);
out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b13cc9288ba0..a6828391d6b3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
set_host_byte(cmd, DID_OK);
return BLK_STS_TARGET;
case DID_NEXUS_FAILURE:
+ set_host_byte(cmd, DID_OK);
return BLK_STS_NEXUS;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
@@ -1842,8 +1843,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
- blk_queue_max_segment_size(q,
- min(shost->max_segment_size, dma_get_max_seg_size(dev)));
+ blk_queue_max_segment_size(q, shost->max_segment_size);
+ dma_set_max_seg_size(dev, shost->max_segment_size);
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
@@ -2597,7 +2598,6 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- WARN_ON_ONCE(!sdev->quiesced_by);
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
if (sdev->sdev_state == SDEV_QUIESCE)
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index a2b4179bfdf7..7639df91b110 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
if (err == 0) {
pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
+ err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ /*
+ * Forcibly set runtime PM status of request queue to "active"
+ * to make sure we can again get requests from the queue
+ * (see also blk_pm_peek_request()).
+ *
+ * The resume hook will correct runtime PM status of the disk.
+ */
+ if (!err && scsi_is_sdev_device(dev)) {
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (sdev->request_queue->dev)
+ blk_set_runtime_active(sdev->request_queue);
+ }
}
return err;
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
else
fn = NULL;
- /*
- * Forcibly set runtime PM status of request queue to "active" to
- * make sure we can again get requests from the queue (see also
- * blk_pm_peek_request()).
- *
- * The resume hook will correct runtime PM status of the disk.
- */
- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a1a44f52e0e8..5464d467e23e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
sp = buffer_data[0] & 0x80 ? 1 : 0;
buffer_data[0] &= ~0x80;
+ /*
+ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+ * received mode parameter buffer before doing MODE SELECT.
+ */
+ data.device_specific = 0;
+
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
SD_MAX_RETRIES, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
@@ -2945,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
- } else {
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
}
if (sdkp->device->type == TYPE_ZBC) {
@@ -3084,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
+ /*
+ * set the default to rotational. All non-rotational devices
+ * support the block characteristics VPD page, which will
+ * cause this to be updated correctly and any device which
+ * doesn't support it should be treated as rotational.
+ */
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 83365b29a4d8..a340af797a85 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
return -EOPNOTSUPP;
/*
- * Get a reply buffer for the number of requested zones plus a header.
- * For ATA, buffers must be aligned to 512B.
+ * Get a reply buffer for the number of requested zones plus a header,
+ * without exceeding the device maximum command size. For ATA disks,
+ * buffers must be aligned to 512B.
*/
- buflen = roundup((nrz + 1) * 64, 512);
+ buflen = min(queue_max_hw_sectors(disk->queue) << 9,
+ roundup((nrz + 1) * 64, 512));
buf = kmalloc(buflen, gfp_mask);
if (!buf)
return -ENOMEM;
@@ -462,12 +464,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
sdkp->device->use_10_for_rw = 0;
/*
- * If something changed, revalidate the disk zone bitmaps once we have
- * the capacity, that is on the second revalidate execution during disk
- * scan and always during normal revalidate.
+ * Revalidate the disk zone bitmaps once the block device capacity is
+ * set on the second revalidate execution during disk scan and if
+ * something changed when executing a normal revalidate.
*/
- if (sdkp->first_scan)
+ if (sdkp->first_scan) {
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->nr_zones = nr_zones;
return 0;
+ }
+
if (sdkp->zone_blocks != zone_blocks ||
sdkp->nr_zones != nr_zones ||
disk->queue->nr_zones != nr_zones) {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index e2fa3f476227..f564af8949e8 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
- return device->in_remove & !ctrl_info->in_shutdown;
+ return device->in_remove && !ctrl_info->in_shutdown;
}
static inline void pqi_schedule_rescan_worker_with_delay(
@@ -3576,9 +3576,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
alloc_length += PQI_EXTRA_SGL_MEMORY;
ctrl_info->queue_memory_base =
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- alloc_length,
- &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+ &ctrl_info->queue_memory_base_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->queue_memory_base)
return -ENOMEM;
@@ -3715,10 +3715,9 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
ctrl_info->admin_queue_memory_base =
- dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- alloc_length,
- &ctrl_info->admin_queue_memory_base_dma_handle,
- GFP_KERNEL);
+ dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+ &ctrl_info->admin_queue_memory_base_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->admin_queue_memory_base)
return -ENOMEM;
@@ -4602,9 +4601,10 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
@@ -7487,8 +7487,8 @@ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
dma_addr_t dma_handle;
ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_zalloc_coherent(dev, chunk_size, &dma_handle,
- GFP_KERNEL);
+ dma_alloc_coherent(dev, chunk_size, &dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
break;
@@ -7545,10 +7545,10 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
struct device *dev;
dev = &ctrl_info->pci_dev->dev;
- pqi_ofa_memory = dma_zalloc_coherent(dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
- &ctrl_info->pqi_ofa_mem_dma_handle,
- GFP_KERNEL);
+ pqi_ofa_memory = dma_alloc_coherent(dev,
+ PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
+ &ctrl_info->pqi_ofa_mem_dma_handle,
+ GFP_KERNEL);
if (!pqi_ofa_memory)
return;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index dd65fea07687..6d176815e6ce 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9ba7671b84f8..2ddf24466a62 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -108,13 +108,19 @@
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix)
{
- u8 *regs;
+ u32 *regs;
+ size_t pos;
+
+ if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
+ return -EINVAL;
regs = kzalloc(len, GFP_KERNEL);
if (!regs)
return -ENOMEM;
- memcpy_fromio(regs, hba->mmio_base + offset, len);
+ for (pos = 0; pos < len; pos += 4)
+ regs[pos / 4] = ufshcd_readl(hba, offset + pos);
+
ufshcd_hex_dump(prefix, regs, len);
kfree(regs);
@@ -8001,6 +8007,8 @@ out:
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 55eda5863a6b..b2f07d2043eb 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -21,7 +21,9 @@ static const struct slim_device_id *slim_match(const struct slim_device_id *id,
{
while (id->manf_id != 0 || id->prod_code != 0) {
if (id->manf_id == sbdev->e_addr.manf_id &&
- id->prod_code == sbdev->e_addr.prod_code)
+ id->prod_code == sbdev->e_addr.prod_code &&
+ id->dev_index == sbdev->e_addr.dev_index &&
+ id->instance == sbdev->e_addr.instance)
return id;
id++;
}
@@ -40,6 +42,23 @@ static int slim_device_match(struct device *dev, struct device_driver *drv)
return !!slim_match(sbdrv->id_table, sbdev);
}
+static void slim_device_update_status(struct slim_device *sbdev,
+ enum slim_device_status status)
+{
+ struct slim_driver *sbdrv;
+
+ if (sbdev->status == status)
+ return;
+
+ sbdev->status = status;
+ if (!sbdev->dev.driver)
+ return;
+
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ if (sbdrv->device_status)
+ sbdrv->device_status(sbdev, sbdev->status);
+}
+
static int slim_device_probe(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
@@ -53,8 +72,7 @@ static int slim_device_probe(struct device *dev)
/* try getting the logical address after probe */
ret = slim_get_logical_addr(sbdev);
if (!ret) {
- if (sbdrv->device_status)
- sbdrv->device_status(sbdev, sbdev->status);
+ slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
} else {
dev_err(&sbdev->dev, "Failed to get logical address\n");
ret = -EPROBE_DEFER;
@@ -256,6 +274,7 @@ int slim_register_controller(struct slim_controller *ctrl)
mutex_init(&ctrl->lock);
mutex_init(&ctrl->sched.m_reconf);
init_completion(&ctrl->sched.pause_comp);
+ spin_lock_init(&ctrl->txn_lock);
dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
ctrl->name, ctrl->dev);
@@ -295,23 +314,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
}
EXPORT_SYMBOL_GPL(slim_unregister_controller);
-static void slim_device_update_status(struct slim_device *sbdev,
- enum slim_device_status status)
-{
- struct slim_driver *sbdrv;
-
- if (sbdev->status == status)
- return;
-
- sbdev->status = status;
- if (!sbdev->dev.driver)
- return;
-
- sbdrv = to_slim_driver(sbdev->dev.driver);
- if (sbdrv->device_status)
- sbdrv->device_status(sbdev, sbdev->status);
-}
-
/**
* slim_report_absent() - Controller calls this function when a device
* reports absent, OR when the device cannot be communicated with
@@ -464,6 +466,7 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
sbdev->laddr = laddr;
sbdev->is_laddr_valid = true;
+ mutex_unlock(&ctrl->lock);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
@@ -471,6 +474,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
sbdev->e_addr.dev_index, sbdev->e_addr.instance);
+ return 0;
+
err:
mutex_unlock(&ctrl->lock);
return ret;
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index fce33ca76bb6..be95a37c3fec 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -51,16 +51,30 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
{
struct device_node *canvas_node;
struct platform_device *canvas_pdev;
+ struct meson_canvas *canvas;
canvas_node = of_parse_phandle(dev->of_node, "amlogic,canvas", 0);
if (!canvas_node)
return ERR_PTR(-ENODEV);
canvas_pdev = of_find_device_by_node(canvas_node);
- if (!canvas_pdev)
+ if (!canvas_pdev) {
+ of_node_put(canvas_node);
return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(canvas_node);
+
+ /*
+ * If priv is NULL, it's probably because the canvas hasn't
+ * properly initialized. Bail out with -EINVAL because, in the
+ * current state, this driver probe cannot return -EPROBE_DEFER
+ */
+ canvas = dev_get_drvdata(&canvas_pdev->dev);
+ if (!canvas)
+ return ERR_PTR(-EINVAL);
- return dev_get_drvdata(&canvas_pdev->dev);
+ return canvas;
}
EXPORT_SYMBOL_GPL(meson_canvas_get);
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index daea191a66fa..19d4cbc93a17 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -165,6 +165,194 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
CLK_MSR_ID(82, "ge2d"),
};
+static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "a53_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(5, "gp1_pll"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(23, "mmc_clk"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(40, "mod_eth_tx_clk"),
+ CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmm_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(66, "audio_slv_lrclk_c"),
+ CLK_MSR_ID(67, "audio_slv_lrclk_b"),
+ CLK_MSR_ID(68, "audio_slv_lrclk_a"),
+ CLK_MSR_ID(69, "audio_slv_sclk_c"),
+ CLK_MSR_ID(70, "audio_slv_sclk_b"),
+ CLK_MSR_ID(71, "audio_slv_sclk_a"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(74, "wifi_beacon"),
+ CLK_MSR_ID(75, "tdmin_lb_lrcl"),
+ CLK_MSR_ID(76, "tdmin_lb_sclk"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(84, "audio_resample"),
+ CLK_MSR_ID(85, "audio_pdm_sys"),
+ CLK_MSR_ID(86, "audio_spdifout"),
+ CLK_MSR_ID(87, "audio_spdifin"),
+ CLK_MSR_ID(88, "audio_lrclk_f"),
+ CLK_MSR_ID(89, "audio_lrclk_e"),
+ CLK_MSR_ID(90, "audio_lrclk_d"),
+ CLK_MSR_ID(91, "audio_lrclk_c"),
+ CLK_MSR_ID(92, "audio_lrclk_b"),
+ CLK_MSR_ID(93, "audio_lrclk_a"),
+ CLK_MSR_ID(94, "audio_sclk_f"),
+ CLK_MSR_ID(95, "audio_sclk_e"),
+ CLK_MSR_ID(96, "audio_sclk_d"),
+ CLK_MSR_ID(97, "audio_sclk_c"),
+ CLK_MSR_ID(98, "audio_sclk_b"),
+ CLK_MSR_ID(99, "audio_sclk_a"),
+ CLK_MSR_ID(100, "audio_mclk_f"),
+ CLK_MSR_ID(101, "audio_mclk_e"),
+ CLK_MSR_ID(102, "audio_mclk_d"),
+ CLK_MSR_ID(103, "audio_mclk_c"),
+ CLK_MSR_ID(104, "audio_mclk_b"),
+ CLK_MSR_ID(105, "audio_mclk_a"),
+ CLK_MSR_ID(106, "pcie_refclk_n"),
+ CLK_MSR_ID(107, "pcie_refclk_p"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+};
+
+static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "sys_cpu_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(6, "enci"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(10, "vdac"),
+ CLK_MSR_ID(11, "eth_tx"),
+ CLK_MSR_ID(12, "hifi_pll"),
+ CLK_MSR_ID(13, "mod_tcon"),
+ CLK_MSR_ID(14, "fec_0"),
+ CLK_MSR_ID(15, "fec_1"),
+ CLK_MSR_ID(16, "fec_2"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(19, "lcd_an_ph2"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(21, "lcd_an_ph3"),
+ CLK_MSR_ID(22, "eth_phy_ref"),
+ CLK_MSR_ID(23, "mpll_50m"),
+ CLK_MSR_ID(24, "eth_125m"),
+ CLK_MSR_ID(25, "eth_rmii"),
+ CLK_MSR_ID(26, "sc_int"),
+ CLK_MSR_ID(27, "in_mac"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(29, "pcie_inp"),
+ CLK_MSR_ID(30, "pcie_inn"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(32, "vdec"),
+ CLK_MSR_ID(33, "sys_cpu_ring_osc_1"),
+ CLK_MSR_ID(34, "eth_mpll_50m"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(37, "cdac"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "bt656"),
+ CLK_MSR_ID(41, "eth_rx_or_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmc_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(53, "sd_emmc_a"),
+ CLK_MSR_ID(54, "vpu_clkc"),
+ CLK_MSR_ID(55, "vid_pll_div_out"),
+ CLK_MSR_ID(56, "wave420l_a"),
+ CLK_MSR_ID(57, "wave420l_c"),
+ CLK_MSR_ID(58, "wave420l_b"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(62, "hevcb"),
+ CLK_MSR_ID(63, "dsi_meas"),
+ CLK_MSR_ID(64, "spicc_1"),
+ CLK_MSR_ID(65, "spicc_0"),
+ CLK_MSR_ID(66, "vid_lock"),
+ CLK_MSR_ID(67, "dsi_phy"),
+ CLK_MSR_ID(68, "hdcp22_esm"),
+ CLK_MSR_ID(69, "hdcp22_skp"),
+ CLK_MSR_ID(70, "pwm_f"),
+ CLK_MSR_ID(71, "pwm_e"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(75, "hevcf"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(83, "co_rx"),
+ CLK_MSR_ID(84, "co_tx"),
+ CLK_MSR_ID(89, "hdmi_todig"),
+ CLK_MSR_ID(90, "hdmitx_sys"),
+ CLK_MSR_ID(94, "eth_phy_rx"),
+ CLK_MSR_ID(95, "eth_phy_pll"),
+ CLK_MSR_ID(96, "vpu_b"),
+ CLK_MSR_ID(97, "cpu_b_tmp"),
+ CLK_MSR_ID(98, "ts"),
+ CLK_MSR_ID(99, "ring_osc_out_ee_3"),
+ CLK_MSR_ID(100, "ring_osc_out_ee_4"),
+ CLK_MSR_ID(101, "ring_osc_out_ee_5"),
+ CLK_MSR_ID(102, "ring_osc_out_ee_6"),
+ CLK_MSR_ID(103, "ring_osc_out_ee_7"),
+ CLK_MSR_ID(104, "ring_osc_out_ee_8"),
+ CLK_MSR_ID(105, "ring_osc_out_ee_9"),
+ CLK_MSR_ID(106, "ephy_test"),
+ CLK_MSR_ID(107, "au_dac_g128x"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+ CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(117, "audio_resample"),
+ CLK_MSR_ID(118, "audio_pdm_sys"),
+ CLK_MSR_ID(119, "audio_spdifout_b"),
+ CLK_MSR_ID(120, "audio_spdifout"),
+ CLK_MSR_ID(121, "audio_spdifin"),
+ CLK_MSR_ID(122, "audio_pdm_dclk"),
+};
+
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
unsigned int duration)
{
@@ -337,6 +525,14 @@ static const struct of_device_id meson_msr_match_table[] = {
.compatible = "amlogic,meson8b-clk-measure",
.data = (void *)clk_msr_m8,
},
+ {
+ .compatible = "amlogic,meson-axg-clk-measure",
+ .data = (void *)clk_msr_axg,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-clk-measure",
+ .data = (void *)clk_msr_g12a,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 055a845ed979..03fa91fbe2da 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -1,5 +1,17 @@
menu "Broadcom SoC drivers"
+config BCM2835_POWER
+ bool "BCM2835 power domain driver"
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
+ default y if ARCH_BCM2835
+ select PM_GENERIC_DOMAINS if PM
+ select RESET_CONTROLLER
+ help
+ This enables support for the BCM2835 power domains and reset
+ controller. Any usage of power domains by the Raspberry Pi
+ firmware means that Linux usage of the same power domain
+ must be accessed using the RASPBERRYPI_POWER driver
+
config RASPBERRYPI_POWER
bool "Raspberry Pi power domain driver"
depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index dc4fced72d21..c81df4b2403c 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1,2 +1,3 @@
+obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
new file mode 100644
index 000000000000..9351349cf0a9
--- /dev/null
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Power domain driver for Broadcom BCM2835
+ *
+ * Copyright (C) 2018 Broadcom
+ */
+
+#include <dt-bindings/soc/bcm2835-pm.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+#define PM_GNRIC 0x00
+#define PM_AUDIO 0x04
+#define PM_STATUS 0x18
+#define PM_RSTC 0x1c
+#define PM_RSTS 0x20
+#define PM_WDOG 0x24
+#define PM_PADS0 0x28
+#define PM_PADS2 0x2c
+#define PM_PADS3 0x30
+#define PM_PADS4 0x34
+#define PM_PADS5 0x38
+#define PM_PADS6 0x3c
+#define PM_CAM0 0x44
+#define PM_CAM0_LDOHPEN BIT(2)
+#define PM_CAM0_LDOLPEN BIT(1)
+#define PM_CAM0_CTRLEN BIT(0)
+
+#define PM_CAM1 0x48
+#define PM_CAM1_LDOHPEN BIT(2)
+#define PM_CAM1_LDOLPEN BIT(1)
+#define PM_CAM1_CTRLEN BIT(0)
+
+#define PM_CCP2TX 0x4c
+#define PM_CCP2TX_LDOEN BIT(1)
+#define PM_CCP2TX_CTRLEN BIT(0)
+
+#define PM_DSI0 0x50
+#define PM_DSI0_LDOHPEN BIT(2)
+#define PM_DSI0_LDOLPEN BIT(1)
+#define PM_DSI0_CTRLEN BIT(0)
+
+#define PM_DSI1 0x54
+#define PM_DSI1_LDOHPEN BIT(2)
+#define PM_DSI1_LDOLPEN BIT(1)
+#define PM_DSI1_CTRLEN BIT(0)
+
+#define PM_HDMI 0x58
+#define PM_HDMI_RSTDR BIT(19)
+#define PM_HDMI_LDOPD BIT(1)
+#define PM_HDMI_CTRLEN BIT(0)
+
+#define PM_USB 0x5c
+/* The power gates must be enabled with this bit before enabling the LDO in the
+ * USB block.
+ */
+#define PM_USB_CTRLEN BIT(0)
+
+#define PM_PXLDO 0x60
+#define PM_PXBG 0x64
+#define PM_DFT 0x68
+#define PM_SMPS 0x6c
+#define PM_XOSC 0x70
+#define PM_SPAREW 0x74
+#define PM_SPARER 0x78
+#define PM_AVS_RSTDR 0x7c
+#define PM_AVS_STAT 0x80
+#define PM_AVS_EVENT 0x84
+#define PM_AVS_INTEN 0x88
+#define PM_DUMMY 0xfc
+
+#define PM_IMAGE 0x108
+#define PM_GRAFX 0x10c
+#define PM_PROC 0x110
+#define PM_ENAB BIT(12)
+#define PM_ISPRSTN BIT(8)
+#define PM_H264RSTN BIT(7)
+#define PM_PERIRSTN BIT(6)
+#define PM_V3DRSTN BIT(6)
+#define PM_ISFUNC BIT(5)
+#define PM_MRDONE BIT(4)
+#define PM_MEMREP BIT(3)
+#define PM_ISPOW BIT(2)
+#define PM_POWOK BIT(1)
+#define PM_POWUP BIT(0)
+#define PM_INRUSH_SHIFT 13
+#define PM_INRUSH_3_5_MA 0
+#define PM_INRUSH_5_MA 1
+#define PM_INRUSH_10_MA 2
+#define PM_INRUSH_20_MA 3
+#define PM_INRUSH_MASK (3 << PM_INRUSH_SHIFT)
+
+#define PM_PASSWORD 0x5a000000
+
+#define PM_WDOG_TIME_SET 0x000fffff
+#define PM_RSTC_WRCFG_CLR 0xffffffcf
+#define PM_RSTS_HADWRH_SET 0x00000040
+#define PM_RSTC_WRCFG_SET 0x00000030
+#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
+#define PM_RSTC_RESET 0x00000102
+
+#define PM_READ(reg) readl(power->base + (reg))
+#define PM_WRITE(reg, val) writel(PM_PASSWORD | (val), power->base + (reg))
+
+#define ASB_BRDG_VERSION 0x00
+#define ASB_CPR_CTRL 0x04
+
+#define ASB_V3D_S_CTRL 0x08
+#define ASB_V3D_M_CTRL 0x0c
+#define ASB_ISP_S_CTRL 0x10
+#define ASB_ISP_M_CTRL 0x14
+#define ASB_H264_S_CTRL 0x18
+#define ASB_H264_M_CTRL 0x1c
+
+#define ASB_REQ_STOP BIT(0)
+#define ASB_ACK BIT(1)
+#define ASB_EMPTY BIT(2)
+#define ASB_FULL BIT(3)
+
+#define ASB_AXI_BRDG_ID 0x20
+
+#define ASB_READ(reg) readl(power->asb + (reg))
+#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg))
+
+struct bcm2835_power_domain {
+ struct generic_pm_domain base;
+ struct bcm2835_power *power;
+ u32 domain;
+ struct clk *clk;
+};
+
+struct bcm2835_power {
+ struct device *dev;
+ /* PM registers. */
+ void __iomem *base;
+ /* AXI Async bridge registers. */
+ void __iomem *asb;
+
+ struct genpd_onecell_data pd_xlate;
+ struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT];
+ struct reset_controller_dev reset;
+};
+
+static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
+{
+ u64 start = ktime_get_ns();
+
+ /* Enable the module's async AXI bridges. */
+ ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
+ while (ASB_READ(reg) & ASB_ACK) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+{
+ u64 start = ktime_get_ns();
+
+ /* Enable the module's async AXI bridges. */
+ ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
+ while (!(ASB_READ(reg) & ASB_ACK)) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+ struct bcm2835_power *power = pd->power;
+
+ /* Enable functional isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC);
+
+ /* Enable electrical isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+
+ /* Open the power switches. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_POWUP);
+
+ return 0;
+}
+
+static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+ struct bcm2835_power *power = pd->power;
+ struct device *dev = power->dev;
+ u64 start;
+ int ret;
+ int inrush;
+ bool powok;
+
+ /* If it was already powered on by the fw, leave it that way. */
+ if (PM_READ(pm_reg) & PM_POWUP)
+ return 0;
+
+ /* Enable power. Allowing too much current at once may result
+ * in POWOK never getting set, so start low and ramp it up as
+ * necessary to succeed.
+ */
+ powok = false;
+ for (inrush = PM_INRUSH_3_5_MA; inrush <= PM_INRUSH_20_MA; inrush++) {
+ PM_WRITE(pm_reg,
+ (PM_READ(pm_reg) & ~PM_INRUSH_MASK) |
+ (inrush << PM_INRUSH_SHIFT) |
+ PM_POWUP);
+
+ start = ktime_get_ns();
+ while (!(powok = !!(PM_READ(pm_reg) & PM_POWOK))) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 3000)
+ break;
+ }
+ }
+ if (!powok) {
+ dev_err(dev, "Timeout waiting for %s power OK\n",
+ pd->base.name);
+ ret = -ETIMEDOUT;
+ goto err_disable_powup;
+ }
+
+ /* Disable electrical isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISPOW);
+
+ /* Repair memory */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_MEMREP);
+ start = ktime_get_ns();
+ while (!(PM_READ(pm_reg) & PM_MRDONE)) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000) {
+ dev_err(dev, "Timeout waiting for %s memory repair\n",
+ pd->base.name);
+ ret = -ETIMEDOUT;
+ goto err_disable_ispow;
+ }
+ }
+
+ /* Disable functional isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISFUNC);
+
+ return 0;
+
+err_disable_ispow:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+err_disable_powup:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~(PM_POWUP | PM_INRUSH_MASK));
+ return ret;
+}
+
+static int bcm2835_asb_power_on(struct bcm2835_power_domain *pd,
+ u32 pm_reg,
+ u32 asb_m_reg,
+ u32 asb_s_reg,
+ u32 reset_flags)
+{
+ struct bcm2835_power *power = pd->power;
+ int ret;
+
+ ret = clk_prepare_enable(pd->clk);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable clock for %s\n",
+ pd->base.name);
+ return ret;
+ }
+
+ /* Wait 32 clocks for reset to propagate, 1 us will be enough */
+ udelay(1);
+
+ clk_disable_unprepare(pd->clk);
+
+ /* Deassert the resets. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | reset_flags);
+
+ ret = clk_prepare_enable(pd->clk);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable clock for %s\n",
+ pd->base.name);
+ goto err_enable_resets;
+ }
+
+ ret = bcm2835_asb_enable(power, asb_m_reg);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable ASB master for %s\n",
+ pd->base.name);
+ goto err_disable_clk;
+ }
+ ret = bcm2835_asb_enable(power, asb_s_reg);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable ASB slave for %s\n",
+ pd->base.name);
+ goto err_disable_asb_master;
+ }
+
+ return 0;
+
+err_disable_asb_master:
+ bcm2835_asb_disable(power, asb_m_reg);
+err_disable_clk:
+ clk_disable_unprepare(pd->clk);
+err_enable_resets:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+ return ret;
+}
+
+static int bcm2835_asb_power_off(struct bcm2835_power_domain *pd,
+ u32 pm_reg,
+ u32 asb_m_reg,
+ u32 asb_s_reg,
+ u32 reset_flags)
+{
+ struct bcm2835_power *power = pd->power;
+ int ret;
+
+ ret = bcm2835_asb_disable(power, asb_s_reg);
+ if (ret) {
+ dev_warn(power->dev, "Failed to disable ASB slave for %s\n",
+ pd->base.name);
+ return ret;
+ }
+ ret = bcm2835_asb_disable(power, asb_m_reg);
+ if (ret) {
+ dev_warn(power->dev, "Failed to disable ASB master for %s\n",
+ pd->base.name);
+ bcm2835_asb_enable(power, asb_s_reg);
+ return ret;
+ }
+
+ clk_disable_unprepare(pd->clk);
+
+ /* Assert the resets. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+
+ return 0;
+}
+
+static int bcm2835_power_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct bcm2835_power_domain *pd =
+ container_of(domain, struct bcm2835_power_domain, base);
+ struct bcm2835_power *power = pd->power;
+
+ switch (pd->domain) {
+ case BCM2835_POWER_DOMAIN_GRAFX:
+ return bcm2835_power_power_on(pd, PM_GRAFX);
+
+ case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+ return bcm2835_asb_power_on(pd, PM_GRAFX,
+ ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+ PM_V3DRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE:
+ return bcm2835_power_power_on(pd, PM_IMAGE);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ 0, 0,
+ PM_PERIRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+ PM_ISPRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_H264:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+ PM_H264RSTN);
+
+ case BCM2835_POWER_DOMAIN_USB:
+ PM_WRITE(PM_USB, PM_USB_CTRLEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI0:
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN | PM_DSI0_LDOHPEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI1:
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN | PM_DSI1_LDOHPEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_CCP2TX:
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN | PM_CCP2TX_LDOEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_HDMI:
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_RSTDR);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_CTRLEN);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_LDOPD);
+ usleep_range(100, 200);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_RSTDR);
+ return 0;
+
+ default:
+ dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+ return -EINVAL;
+ }
+}
+
+static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct bcm2835_power_domain *pd =
+ container_of(domain, struct bcm2835_power_domain, base);
+ struct bcm2835_power *power = pd->power;
+
+ switch (pd->domain) {
+ case BCM2835_POWER_DOMAIN_GRAFX:
+ return bcm2835_power_power_off(pd, PM_GRAFX);
+
+ case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+ return bcm2835_asb_power_off(pd, PM_GRAFX,
+ ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+ PM_V3DRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE:
+ return bcm2835_power_power_off(pd, PM_IMAGE);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ 0, 0,
+ PM_PERIRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+ PM_ISPRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_H264:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+ PM_H264RSTN);
+
+ case BCM2835_POWER_DOMAIN_USB:
+ PM_WRITE(PM_USB, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI0:
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+ PM_WRITE(PM_DSI0, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI1:
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+ PM_WRITE(PM_DSI1, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_CCP2TX:
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+ PM_WRITE(PM_CCP2TX, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_HDMI:
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_LDOPD);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_CTRLEN);
+ return 0;
+
+ default:
+ dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+ return -EINVAL;
+ }
+}
+
+static void
+bcm2835_init_power_domain(struct bcm2835_power *power,
+ int pd_xlate_index, const char *name)
+{
+ struct device *dev = power->dev;
+ struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
+
+ dom->clk = devm_clk_get(dev->parent, name);
+
+ dom->base.name = name;
+ dom->base.power_on = bcm2835_power_pd_power_on;
+ dom->base.power_off = bcm2835_power_pd_power_off;
+
+ dom->domain = pd_xlate_index;
+ dom->power = power;
+
+ /* XXX: on/off at boot? */
+ pm_genpd_init(&dom->base, NULL, true);
+
+ power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+}
+
+/** bcm2835_reset_reset - Resets a block that has a reset line in the
+ * PM block.
+ *
+ * The consumer of the reset controller must have the power domain up
+ * -- there's no reset ability with the power domain down. To reset
+ * the sub-block, we just disable its access to memory through the
+ * ASB, reset, and re-enable.
+ */
+static int bcm2835_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+ reset);
+ struct bcm2835_power_domain *pd;
+ int ret;
+
+ switch (id) {
+ case BCM2835_RESET_V3D:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_GRAFX_V3D];
+ break;
+ case BCM2835_RESET_H264:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_H264];
+ break;
+ case BCM2835_RESET_ISP:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_ISP];
+ break;
+ default:
+ dev_err(power->dev, "Bad reset id %ld\n", id);
+ return -EINVAL;
+ }
+
+ ret = bcm2835_power_pd_power_off(&pd->base);
+ if (ret)
+ return ret;
+
+ return bcm2835_power_pd_power_on(&pd->base);
+}
+
+static int bcm2835_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+ reset);
+
+ switch (id) {
+ case BCM2835_RESET_V3D:
+ return !PM_READ(PM_GRAFX & PM_V3DRSTN);
+ case BCM2835_RESET_H264:
+ return !PM_READ(PM_IMAGE & PM_H264RSTN);
+ case BCM2835_RESET_ISP:
+ return !PM_READ(PM_IMAGE & PM_ISPRSTN);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct reset_control_ops bcm2835_reset_ops = {
+ .reset = bcm2835_reset_reset,
+ .status = bcm2835_reset_status,
+};
+
+static const char *const power_domain_names[] = {
+ [BCM2835_POWER_DOMAIN_GRAFX] = "grafx",
+ [BCM2835_POWER_DOMAIN_GRAFX_V3D] = "v3d",
+
+ [BCM2835_POWER_DOMAIN_IMAGE] = "image",
+ [BCM2835_POWER_DOMAIN_IMAGE_PERI] = "peri_image",
+ [BCM2835_POWER_DOMAIN_IMAGE_H264] = "h264",
+ [BCM2835_POWER_DOMAIN_IMAGE_ISP] = "isp",
+
+ [BCM2835_POWER_DOMAIN_USB] = "usb",
+ [BCM2835_POWER_DOMAIN_DSI0] = "dsi0",
+ [BCM2835_POWER_DOMAIN_DSI1] = "dsi1",
+ [BCM2835_POWER_DOMAIN_CAM0] = "cam0",
+ [BCM2835_POWER_DOMAIN_CAM1] = "cam1",
+ [BCM2835_POWER_DOMAIN_CCP2TX] = "ccp2tx",
+ [BCM2835_POWER_DOMAIN_HDMI] = "hdmi",
+};
+
+static int bcm2835_power_probe(struct platform_device *pdev)
+{
+ struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct bcm2835_power *power;
+ static const struct {
+ int parent, child;
+ } domain_deps[] = {
+ { BCM2835_POWER_DOMAIN_GRAFX, BCM2835_POWER_DOMAIN_GRAFX_V3D },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_PERI },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_H264 },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_ISP },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_USB },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
+ };
+ int ret, i;
+ u32 id;
+
+ power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, power);
+
+ power->dev = dev;
+ power->base = pm->base;
+ power->asb = pm->asb;
+
+ id = ASB_READ(ASB_AXI_BRDG_ID);
+ if (id != 0x62726467 /* "BRDG" */) {
+ dev_err(dev, "ASB register ID returned 0x%08x\n", id);
+ return -ENODEV;
+ }
+
+ power->pd_xlate.domains = devm_kcalloc(dev,
+ ARRAY_SIZE(power_domain_names),
+ sizeof(*power->pd_xlate.domains),
+ GFP_KERNEL);
+ if (!power->pd_xlate.domains)
+ return -ENOMEM;
+
+ power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
+
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
+ bcm2835_init_power_domain(power, i, power_domain_names[i]);
+
+ for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
+ pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
+ &power->domains[domain_deps[i].child].base);
+ }
+
+ power->reset.owner = THIS_MODULE;
+ power->reset.nr_resets = BCM2835_RESET_COUNT;
+ power->reset.ops = &bcm2835_reset_ops;
+ power->reset.of_node = dev->parent->of_node;
+
+ ret = devm_reset_controller_register(dev, &power->reset);
+ if (ret)
+ return ret;
+
+ of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
+
+ dev_info(dev, "Broadcom BCM2835 power domains driver");
+ return 0;
+}
+
+static int bcm2835_power_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver bcm2835_power_driver = {
+ .probe = bcm2835_power_probe,
+ .remove = bcm2835_power_remove,
+ .driver = {
+ .name = "bcm2835-power",
+ },
+};
+module_platform_driver(bcm2835_power_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM power domains and reset");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 8f80e8bbf29e..61f8e1433d0a 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -22,6 +22,7 @@ config FSL_GUTS
config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
+ select SOC_BUS
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index ab8f82ee7ee5..e13fd3ac1939 100644
--- a/drivers/soc/fsl/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -25,6 +25,8 @@
#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
struct dpio_cmd_open {
__le32 dpio_id;
@@ -46,4 +48,8 @@ struct dpio_rsp_get_attr {
__le32 qbman_version;
};
+struct dpio_stashing_dest {
+ u8 sdest;
+};
+
#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index e58fcc9096e8..c0cdc8946031 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
@@ -30,6 +31,48 @@ struct dpio_priv {
struct dpaa2_io *io;
};
+static cpumask_var_t cpus_unused_mask;
+
+static const struct soc_device_attribute ls1088a_soc[] = {
+ {.family = "QorIQ LS1088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2080a_soc[] = {
+ {.family = "QorIQ LS2080A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2088a_soc[] = {
+ {.family = "QorIQ LS2088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute lx2160a_soc[] = {
+ {.family = "QorIQ LX2160A"},
+ { /* sentinel */ }
+};
+
+static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ int cluster_base, cluster_size;
+
+ if (soc_device_match(ls1088a_soc)) {
+ cluster_base = 2;
+ cluster_size = 4;
+ } else if (soc_device_match(ls2080a_soc) ||
+ soc_device_match(ls2088a_soc) ||
+ soc_device_match(lx2160a_soc)) {
+ cluster_base = 0;
+ cluster_size = 2;
+ } else {
+ dev_err(&dpio_dev->dev, "unknown SoC version\n");
+ return -1;
+ }
+
+ return cluster_base + cpu / cluster_size;
+}
+
static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
{
struct device *dev = (struct device *)arg;
@@ -86,7 +129,8 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
struct dpio_priv *priv;
int err = -ENOMEM;
struct device *dev = &dpio_dev->dev;
- static int next_cpu = -1;
+ int possible_next_cpu;
+ int sdest;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -108,6 +152,12 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
goto err_open;
}
+ err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_reset() failed\n");
+ goto err_reset;
+ }
+
err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
&dpio_attrs);
if (err) {
@@ -128,17 +178,24 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
desc.dpio_id = dpio_dev->obj_desc.id;
/* get the cpu to use for the affinity hint */
- if (next_cpu == -1)
- next_cpu = cpumask_first(cpu_online_mask);
- else
- next_cpu = cpumask_next(next_cpu, cpu_online_mask);
-
- if (!cpu_possible(next_cpu)) {
+ possible_next_cpu = cpumask_first(cpus_unused_mask);
+ if (possible_next_cpu >= nr_cpu_ids) {
dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
err = -ERANGE;
goto err_allocate_irqs;
}
- desc.cpu = next_cpu;
+ desc.cpu = possible_next_cpu;
+ cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
+
+ sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu);
+ if (sdest >= 0) {
+ err = dpio_set_stashing_destination(dpio_dev->mc_io, 0,
+ dpio_dev->mc_handle,
+ sdest);
+ if (err)
+ dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n",
+ desc.cpu);
+ }
/*
* Set the CENA regs to be the cache inhibited area of the portal to
@@ -171,7 +228,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
if (err)
goto err_register_dpio_irq;
- priv->io = dpaa2_io_create(&desc);
+ priv->io = dpaa2_io_create(&desc, dev);
if (!priv->io) {
dev_err(dev, "dpaa2_io_create failed\n");
err = -ENOMEM;
@@ -182,7 +239,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
dev_dbg(dev, " receives_notifications = %d\n",
desc.receives_notifications);
dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
- fsl_mc_portal_free(dpio_dev->mc_io);
return 0;
@@ -193,6 +249,7 @@ err_register_dpio_irq:
err_allocate_irqs:
dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
err_get_attr:
+err_reset:
dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
err_open:
fsl_mc_portal_free(dpio_dev->mc_io);
@@ -211,20 +268,17 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
{
struct device *dev;
struct dpio_priv *priv;
- int err;
+ int err = 0, cpu;
dev = &dpio_dev->dev;
priv = dev_get_drvdata(dev);
+ cpu = dpaa2_io_get_cpu(priv->io);
dpaa2_io_down(priv->io);
dpio_teardown_irqs(dpio_dev);
- err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
- if (err) {
- dev_err(dev, "MC portal allocation failed\n");
- goto err_mcportal;
- }
+ cpumask_set_cpu(cpu, cpus_unused_mask);
err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
&dpio_dev->mc_handle);
@@ -243,7 +297,7 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
err_open:
fsl_mc_portal_free(dpio_dev->mc_io);
-err_mcportal:
+
return err;
}
@@ -267,11 +321,16 @@ static struct fsl_mc_driver dpaa2_dpio_driver = {
static int dpio_driver_init(void)
{
+ if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(cpus_unused_mask, cpu_online_mask);
+
return fsl_mc_driver_register(&dpaa2_dpio_driver);
}
static void dpio_driver_exit(void)
{
+ free_cpumask_var(cpus_unused_mask);
fsl_mc_driver_unregister(&dpaa2_dpio_driver);
}
module_init(dpio_driver_init);
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index ec0837ff039a..b9539ef2c3cd 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -27,6 +27,7 @@ struct dpaa2_io {
/* protect notifications list */
spinlock_t lock_notifications;
struct list_head notifications;
+ struct device *dev;
};
struct dpaa2_io_store {
@@ -98,13 +99,15 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
+ * @dev: the actual DPIO device
*
* Activates a "struct dpaa2_io" corresponding to the given config of an actual
* DPIO object.
*
* Return a valid dpaa2_io object for success, or NULL for failure.
*/
-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
+ struct device *dev)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
@@ -146,6 +149,8 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
dpio_by_cpu[desc->cpu] = obj;
spin_unlock(&dpio_list_lock);
+ obj->dev = dev;
+
return obj;
}
@@ -160,6 +165,11 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
*/
void dpaa2_io_down(struct dpaa2_io *d)
{
+ spin_lock(&dpio_list_lock);
+ dpio_by_cpu[d->dpio_desc.cpu] = NULL;
+ list_del(&d->node);
+ spin_unlock(&dpio_list_lock);
+
kfree(d);
}
@@ -210,10 +220,24 @@ done:
}
/**
+ * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
+ *
+ * @d: the given DPIO object.
+ *
+ * Return the cpu associated with the DPIO object
+ */
+int dpaa2_io_get_cpu(struct dpaa2_io *d)
+{
+ return d->dpio_desc.cpu;
+}
+EXPORT_SYMBOL(dpaa2_io_get_cpu);
+
+/**
* dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
* notifications on the given DPIO service.
* @d: the given DPIO service.
* @ctx: the notification context.
+ * @dev: the device that requests the register
*
* The caller should make the MC command to attach a DPAA2 object to
* a DPIO after this function completes successfully. In that way:
@@ -228,14 +252,20 @@ done:
* Return 0 for success, or -ENODEV for failure.
*/
int dpaa2_io_service_register(struct dpaa2_io *d,
- struct dpaa2_io_notification_ctx *ctx)
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev)
{
+ struct device_link *link;
unsigned long irqflags;
d = service_select_by_cpu(d, ctx->desired_cpu);
if (!d)
return -ENODEV;
+ link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!link)
+ return -EINVAL;
+
ctx->dpio_id = d->dpio_desc.dpio_id;
ctx->qman64 = (u64)(uintptr_t)ctx;
ctx->dpio_private = d;
@@ -256,12 +286,14 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
* dpaa2_io_service_deregister - The opposite of 'register'.
* @service: the given DPIO service.
* @ctx: the notification context.
+ * @dev: the device that requests to be deregistered
*
* This function should be called only after sending the MC command to
* to detach the notification-producing device from the DPIO.
*/
void dpaa2_io_service_deregister(struct dpaa2_io *service,
- struct dpaa2_io_notification_ctx *ctx)
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev)
{
struct dpaa2_io *d = ctx->dpio_private;
unsigned long irqflags;
@@ -272,6 +304,9 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
spin_lock_irqsave(&d->lock_notifications, irqflags);
list_del(&ctx->node);
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+ if (dev)
+ device_link_remove(dev, d->dev);
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
@@ -438,7 +473,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
* Return 0 for success, and negative error code for failure.
*/
int dpaa2_io_service_release(struct dpaa2_io *d,
- u32 bpid,
+ u16 bpid,
const u64 *buffers,
unsigned int num_buffers)
{
@@ -467,7 +502,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
* Eg. if the buffer pool is empty, this will return zero.
*/
int dpaa2_io_service_acquire(struct dpaa2_io *d,
- u32 bpid,
+ u16 bpid,
u64 *buffers,
unsigned int num_buffers)
{
@@ -595,6 +630,7 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
ret = NULL;
} else {
+ prefetch(&s->vaddr[s->idx]);
*is_last = 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index ff37c80e11a0..af74c597a675 100644
--- a/drivers/soc/fsl/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -166,6 +166,22 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 sdest)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_stashing_dest *dpio_cmd;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags, token);
+ dpio_cmd = (struct dpio_stashing_dest *)cmd.params;
+ dpio_cmd->sdest = sdest;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpio_get_api_version - Get Data Path I/O API version
* @mc_io: Pointer to MC portal's DPIO object
@@ -196,3 +212,26 @@ int dpio_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index 49194c8e45f1..da06f7258098 100644
--- a/drivers/soc/fsl/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -75,9 +75,18 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpio_attr *attr);
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 dest);
+
int dpio_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
u16 *minor_ver);
+int dpio_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
#endif /* __FSL_DPIO_H */
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 0bddb85c0ae5..d02013556a1b 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -169,9 +169,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
3, /* RPM: Valid bit mode, RCR in array mode */
2, /* DCM: Discrete consumption ack mode */
3, /* EPM: Valid bit mode, EQCR in array mode */
- 0, /* mem stashing drop enable == FALSE */
+ 1, /* mem stashing drop enable == TRUE */
1, /* mem stashing priority == TRUE */
- 0, /* mem stashing enable == FALSE */
+ 1, /* mem stashing enable == TRUE */
1, /* dequeue stashing priority == TRUE */
0, /* dequeue stashing enable == FALSE */
0); /* EQCR_CI stashing priority == FALSE */
@@ -180,6 +180,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
if (!reg) {
pr_err("qbman: the portal is not enabled!\n");
+ kfree(p);
return NULL;
}
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 302e0c8d69d9..63f6df86f9e5 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -32,6 +32,7 @@ struct fsl_soc_die_attr {
static struct guts *guts;
static struct soc_device_attribute soc_dev_attr;
static struct soc_device *soc_dev;
+static struct device_node *root;
/* SoC die attribute definition for QorIQ platform */
@@ -114,7 +115,7 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match(
return NULL;
}
-u32 fsl_guts_get_svr(void)
+static u32 fsl_guts_get_svr(void)
{
u32 svr = 0;
@@ -128,11 +129,10 @@ u32 fsl_guts_get_svr(void)
return svr;
}
-EXPORT_SYMBOL(fsl_guts_get_svr);
static int fsl_guts_probe(struct platform_device *pdev)
{
- struct device_node *root, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct resource *res;
const struct fsl_soc_die_attr *soc_die;
@@ -155,9 +155,8 @@ static int fsl_guts_probe(struct platform_device *pdev)
root = of_find_node_by_path("/");
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
- of_node_put(root);
if (machine)
- soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ soc_dev_attr.machine = machine;
svr = fsl_guts_get_svr();
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -192,6 +191,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
static int fsl_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
+ of_node_put(root);
return 0;
}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 9436aa83ff1b..e6d48dccb8d5 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
return -ENODEV;
}
- if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
+ if (!dma_alloc_coherent(dev, *size, addr, 0)) {
dev_err(dev, "DMA Alloc memory failed\n");
return -ENODEV;
}
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 52c153cd795a..636f83f781f5 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work);
static irqreturn_t portal_isr(int irq, void *ptr)
{
struct qman_portal *p = ptr;
-
- u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+ u32 clear = 0;
if (unlikely(!is))
return IRQ_NONE;
/* DQRR-handling if it's interrupt-driven */
- if (is & QM_PIRQ_DQRI)
+ if (is & QM_PIRQ_DQRI) {
__poll_portal_fast(p, QMAN_POLL_LIMIT);
+ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+ }
/* Handling of anything else that's interrupt-driven */
- clear |= __poll_portal_slow(p, is);
+ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
qm_out(&p->p, QM_REG_ISR, clear);
return IRQ_HANDLED;
}
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index f78c34647ca2..76480df195a8 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
const char *sprop;
int ret = 0;
u32 val;
- struct resource *res;
- struct device_node *np2;
- static int siram_init_flag;
- struct platform_device *pdev;
sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
if (sprop) {
@@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
utdm->siram_entry_id = val;
set_si_param(utdm, ut_info);
-
- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
- if (!np2)
- return -EINVAL;
-
- pdev = of_find_device_by_node(np2);
- if (!pdev) {
- pr_err("%pOFn: failed to lookup pdev\n", np2);
- of_node_put(np2);
- return -EINVAL;
- }
-
- of_node_put(np2);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(utdm->si_regs)) {
- ret = PTR_ERR(utdm->si_regs);
- goto err_miss_siram_property;
- }
-
- np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
- if (!np2) {
- ret = -EINVAL;
- goto err_miss_siram_property;
- }
-
- pdev = of_find_device_by_node(np2);
- if (!pdev) {
- ret = -EINVAL;
- pr_err("%pOFn: failed to lookup pdev\n", np2);
- of_node_put(np2);
- goto err_miss_siram_property;
- }
-
- of_node_put(np2);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- utdm->siram = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(utdm->siram)) {
- ret = PTR_ERR(utdm->siram);
- goto err_miss_siram_property;
- }
-
- if (siram_init_flag == 0) {
- memset_io(utdm->siram, 0, resource_size(res));
- siram_init_flag = 1;
- }
-
- return ret;
-
-err_miss_siram_property:
- devm_iounmap(&pdev->dev, utdm->si_regs);
return ret;
}
EXPORT_SYMBOL(ucc_of_parse_tdm);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 2112d18dbb7b..d80f899d22f9 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -2,7 +2,7 @@ menu "i.MX SoC drivers"
config IMX_GPCV2_PM_DOMAINS
bool "i.MX GPCv2 PM domains"
- depends on SOC_IMX7D || SOC_IMX8MQ || (COMPILE_TEST && OF)
+ depends on ARCH_MXC || (COMPILE_TEST && OF)
depends on PM
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 8b4f48a2ca57..176f473127b6 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -8,6 +8,7 @@
* Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
*/
+#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -65,6 +66,12 @@
#define GPC_M4_PU_PDN_FLG 0x1bc
+#define GPC_PU_PWRHSK 0x1fc
+
+#define IMX8M_GPU_HSK_PWRDNREQN BIT(6)
+#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
+#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
+
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
@@ -92,16 +99,21 @@
#define GPC_PGC_CTRL_PCR BIT(0)
+#define GPC_CLK_MAX 6
+
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
struct regulator *regulator;
+ struct clk *clk[GPC_CLK_MAX];
+ int num_clks;
unsigned int pgc;
const struct {
u32 pxx;
u32 map;
+ u32 hsk;
} bits;
const int voltage;
@@ -125,7 +137,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
const bool enable_power_control = !on;
const bool has_regulator = !IS_ERR(domain->regulator);
unsigned long deadline;
- int ret = 0;
+ int i, ret = 0;
regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
domain->bits.map, domain->bits.map);
@@ -138,10 +150,18 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
}
}
+ /* Enable reset clocks for all devices in the domain */
+ for (i = 0; i < domain->num_clks; i++)
+ clk_prepare_enable(domain->clk[i]);
+
if (enable_power_control)
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ if (domain->bits.hsk)
+ regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
+ domain->bits.hsk, on ? domain->bits.hsk : 0);
+
regmap_update_bits(domain->regmap, offset,
domain->bits.pxx, domain->bits.pxx);
@@ -179,6 +199,10 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
GPC_PGC_CTRL_PCR, 0);
+ /* Disable reset clocks for all devices in the domain */
+ for (i = 0; i < domain->num_clks; i++)
+ clk_disable_unprepare(domain->clk[i]);
+
if (has_regulator && !on) {
int err;
@@ -328,6 +352,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_GPU_SW_Pxx_REQ,
.map = IMX8M_GPU_A53_DOMAIN,
+ .hsk = IMX8M_GPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_GPU,
},
@@ -339,6 +364,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_VPU_SW_Pxx_REQ,
.map = IMX8M_VPU_A53_DOMAIN,
+ .hsk = IMX8M_VPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_VPU,
},
@@ -350,6 +376,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_DISP_SW_Pxx_REQ,
.map = IMX8M_DISP_A53_DOMAIN,
+ .hsk = IMX8M_DISP_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_DISP,
},
@@ -390,7 +417,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
static const struct regmap_range imx8m_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
- GPC_M4_PU_PDN_FLG),
+ GPC_PU_PWRHSK),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI),
GPC_PGC_SR(IMX8M_PGC_MIPI)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1),
@@ -426,6 +453,41 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.reg_access_table = &imx8m_access_table,
};
+static int imx_pgc_get_clocks(struct imx_pgc_domain *domain)
+{
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ struct clk *clk = of_clk_get(domain->dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= GPC_CLK_MAX) {
+ dev_err(domain->dev, "more than %d clocks\n",
+ GPC_CLK_MAX);
+ ret = -EINVAL;
+ goto clk_err;
+ }
+ domain->clk[i] = clk;
+ }
+ domain->num_clks = i;
+
+ return 0;
+
+clk_err:
+ while (i--)
+ clk_put(domain->clk[i]);
+
+ return ret;
+}
+
+static void imx_pgc_put_clocks(struct imx_pgc_domain *domain)
+{
+ int i;
+
+ for (i = domain->num_clks - 1; i >= 0; i--)
+ clk_put(domain->clk[i]);
+}
+
static int imx_pgc_domain_probe(struct platform_device *pdev)
{
struct imx_pgc_domain *domain = pdev->dev.platform_data;
@@ -445,9 +507,17 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
domain->voltage, domain->voltage);
}
+ ret = imx_pgc_get_clocks(domain);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(domain->dev, "Failed to get domain's clocks\n");
+ return ret;
+ }
+
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err(domain->dev, "Failed to init power domain\n");
+ imx_pgc_put_clocks(domain);
return ret;
}
@@ -456,6 +526,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
if (ret) {
dev_err(domain->dev, "Failed to add genpd provider\n");
pm_genpd_remove(&domain->genpd);
+ imx_pgc_put_clocks(domain);
}
return ret;
@@ -467,6 +538,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
of_genpd_del_provider(domain->dev->of_node);
pm_genpd_remove(&domain->genpd);
+ imx_pgc_put_clocks(domain);
return 0;
}
diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile
index be9e866d53e5..35aa86bd1023 100644
--- a/drivers/soc/lantiq/Makefile
+++ b/drivers/soc/lantiq/Makefile
@@ -1,2 +1 @@
obj-y += fpi-bus.o
-obj-$(CONFIG_XRX200_PHY_FW) += gphy.o
diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c
deleted file mode 100644
index feeb17cebc25..000000000000
--- a/drivers/soc/lantiq/gphy.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2012 John Crispin <blogic@phrozen.org>
- * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
- * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de>
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/firmware.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/property.h>
-#include <dt-bindings/mips/lantiq_rcu_gphy.h>
-
-#include <lantiq_soc.h>
-
-#define XRX200_GPHY_FW_ALIGN (16 * 1024)
-
-struct xway_gphy_priv {
- struct clk *gphy_clk_gate;
- struct reset_control *gphy_reset;
- struct reset_control *gphy_reset2;
- void __iomem *membase;
- char *fw_name;
-};
-
-struct xway_gphy_match_data {
- char *fe_firmware_name;
- char *ge_firmware_name;
-};
-
-static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
-};
-
-static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
-};
-
-static const struct xway_gphy_match_data xrx300_gphy_data = {
- .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
- .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
-};
-
-static const struct of_device_id xway_gphy_match[] = {
- { .compatible = "lantiq,xrx200a1x-gphy", .data = &xrx200a1x_gphy_data },
- { .compatible = "lantiq,xrx200a2x-gphy", .data = &xrx200a2x_gphy_data },
- { .compatible = "lantiq,xrx300-gphy", .data = &xrx300_gphy_data },
- { .compatible = "lantiq,xrx330-gphy", .data = &xrx300_gphy_data },
- {},
-};
-MODULE_DEVICE_TABLE(of, xway_gphy_match);
-
-static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
- dma_addr_t *dev_addr)
-{
- const struct firmware *fw;
- void *fw_addr;
- dma_addr_t dma_addr;
- size_t size;
- int ret;
-
- ret = request_firmware(&fw, priv->fw_name, dev);
- if (ret) {
- dev_err(dev, "failed to load firmware: %s, error: %i\n",
- priv->fw_name, ret);
- return ret;
- }
-
- /*
- * GPHY cores need the firmware code in a persistent and contiguous
- * memory area with a 16 kB boundary aligned start address.
- */
- size = fw->size + XRX200_GPHY_FW_ALIGN;
-
- fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
- if (fw_addr) {
- fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
- *dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
- memcpy(fw_addr, fw->data, fw->size);
- } else {
- dev_err(dev, "failed to alloc firmware memory\n");
- ret = -ENOMEM;
- }
-
- release_firmware(fw);
-
- return ret;
-}
-
-static int xway_gphy_of_probe(struct platform_device *pdev,
- struct xway_gphy_priv *priv)
-{
- struct device *dev = &pdev->dev;
- const struct xway_gphy_match_data *gphy_fw_name_cfg;
- u32 gphy_mode;
- int ret;
- struct resource *res_gphy;
-
- gphy_fw_name_cfg = of_device_get_match_data(dev);
-
- priv->gphy_clk_gate = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->gphy_clk_gate)) {
- dev_err(dev, "Failed to lookup gate clock\n");
- return PTR_ERR(priv->gphy_clk_gate);
- }
-
- res_gphy = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->membase = devm_ioremap_resource(dev, res_gphy);
- if (IS_ERR(priv->membase))
- return PTR_ERR(priv->membase);
-
- priv->gphy_reset = devm_reset_control_get(dev, "gphy");
- if (IS_ERR(priv->gphy_reset)) {
- if (PTR_ERR(priv->gphy_reset) != -EPROBE_DEFER)
- dev_err(dev, "Failed to lookup gphy reset\n");
- return PTR_ERR(priv->gphy_reset);
- }
-
- priv->gphy_reset2 = devm_reset_control_get_optional(dev, "gphy2");
- if (IS_ERR(priv->gphy_reset2))
- return PTR_ERR(priv->gphy_reset2);
-
- ret = device_property_read_u32(dev, "lantiq,gphy-mode", &gphy_mode);
- /* Default to GE mode */
- if (ret)
- gphy_mode = GPHY_MODE_GE;
-
- switch (gphy_mode) {
- case GPHY_MODE_FE:
- priv->fw_name = gphy_fw_name_cfg->fe_firmware_name;
- break;
- case GPHY_MODE_GE:
- priv->fw_name = gphy_fw_name_cfg->ge_firmware_name;
- break;
- default:
- dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int xway_gphy_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct xway_gphy_priv *priv;
- dma_addr_t fw_addr = 0;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- ret = xway_gphy_of_probe(pdev, priv);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(priv->gphy_clk_gate);
- if (ret)
- return ret;
-
- ret = xway_gphy_load(dev, priv, &fw_addr);
- if (ret) {
- clk_disable_unprepare(priv->gphy_clk_gate);
- return ret;
- }
-
- reset_control_assert(priv->gphy_reset);
- reset_control_assert(priv->gphy_reset2);
-
- iowrite32be(fw_addr, priv->membase);
-
- reset_control_deassert(priv->gphy_reset);
- reset_control_deassert(priv->gphy_reset2);
-
- platform_set_drvdata(pdev, priv);
-
- return ret;
-}
-
-static int xway_gphy_remove(struct platform_device *pdev)
-{
- struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
-
- iowrite32be(0, priv->membase);
-
- clk_disable_unprepare(priv->gphy_clk_gate);
-
- return 0;
-}
-
-static struct platform_driver xway_gphy_driver = {
- .probe = xway_gphy_probe,
- .remove = xway_gphy_remove,
- .driver = {
- .name = "xway-rcu-gphy",
- .of_match_table = xway_gphy_match,
- },
-};
-
-module_platform_driver(xway_gphy_driver);
-
-MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
-MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
-MODULE_DESCRIPTION("Lantiq XWAY GPHY Firmware Loader");
-MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index fcbf8a2e4080..1ee298f6bf17 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -98,6 +98,24 @@ config QCOM_RPMH
of hardware components aggregate requests for these resources and
help apply the aggregated state on the resource.
+config QCOM_RPMHPD
+ bool "Qualcomm RPMh Power domain driver"
+ depends on QCOM_RPMH && QCOM_COMMAND_DB
+ help
+ QCOM RPMh Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPMh which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_RPMPD
+ bool "Qualcomm RPM Power domain driver"
+ depends on QCOM_SMD_RPM=y
+ help
+ QCOM RPM Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPM which then translates it into corresponding voltage
+ for the voltage rail.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f25b54cd6cf8..ffe519b0cb66 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -21,3 +21,5 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
+obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
index 2e1e4f0a5db8..86600d97c36d 100644
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -71,6 +71,11 @@ static struct llcc_slice_config sdm845_data[] = {
SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
};
+static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
+{
+ return qcom_llcc_remove(pdev);
+}
+
static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
{
return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
@@ -87,6 +92,7 @@ static struct platform_driver sdm845_qcom_llcc_driver = {
.of_match_table = sdm845_qcom_llcc_of_match,
},
.probe = sdm845_qcom_llcc_probe,
+ .remove = sdm845_qcom_llcc_remove,
};
module_platform_driver(sdm845_qcom_llcc_driver);
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 80667f7be52c..9090ea12eaf3 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -46,7 +46,7 @@
#define BANK_OFFSET_STRIDE 0x80000
-static struct llcc_drv_data *drv_data;
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
static const struct regmap_config llcc_regmap_config = {
.reg_bits = 32,
@@ -68,6 +68,9 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid)
struct llcc_slice_desc *desc;
u32 sz, count;
+ if (IS_ERR(drv_data))
+ return ERR_CAST(drv_data);
+
cfg = drv_data->cfg;
sz = drv_data->cfg_size;
@@ -108,6 +111,9 @@ static int llcc_update_act_ctrl(u32 sid,
u32 slice_status;
int ret;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
status_reg = LLCC_TRP_STATUSn(sid);
@@ -143,6 +149,9 @@ int llcc_slice_activate(struct llcc_slice_desc *desc)
int ret;
u32 act_ctrl_val;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
if (IS_ERR_OR_NULL(desc))
return -EINVAL;
@@ -180,6 +189,9 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc)
u32 act_ctrl_val;
int ret;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
if (IS_ERR_OR_NULL(desc))
return -EINVAL;
@@ -289,46 +301,62 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
return ret;
}
+int qcom_llcc_remove(struct platform_device *pdev)
+{
+ /* Set the global pointer to a error code to avoid referencing it */
+ drv_data = ERR_PTR(-ENODEV);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_remove);
+
+static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res)
+ return ERR_PTR(-ENODEV);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
+}
+
int qcom_llcc_probe(struct platform_device *pdev,
const struct llcc_slice_config *llcc_cfg, u32 sz)
{
u32 num_banks;
struct device *dev = &pdev->dev;
- struct resource *llcc_banks_res, *llcc_bcast_res;
- void __iomem *llcc_banks_base, *llcc_bcast_base;
int ret, i;
struct platform_device *llcc_edac;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data)
- return -ENOMEM;
-
- llcc_banks_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "llcc_base");
- llcc_banks_base = devm_ioremap_resource(&pdev->dev, llcc_banks_res);
- if (IS_ERR(llcc_banks_base))
- return PTR_ERR(llcc_banks_base);
-
- drv_data->regmap = devm_regmap_init_mmio(dev, llcc_banks_base,
- &llcc_regmap_config);
- if (IS_ERR(drv_data->regmap))
- return PTR_ERR(drv_data->regmap);
-
- llcc_bcast_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "llcc_broadcast_base");
- llcc_bcast_base = devm_ioremap_resource(&pdev->dev, llcc_bcast_res);
- if (IS_ERR(llcc_bcast_base))
- return PTR_ERR(llcc_bcast_base);
-
- drv_data->bcast_regmap = devm_regmap_init_mmio(dev, llcc_bcast_base,
- &llcc_regmap_config);
- if (IS_ERR(drv_data->bcast_regmap))
- return PTR_ERR(drv_data->bcast_regmap);
+ if (!drv_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
+ if (IS_ERR(drv_data->regmap)) {
+ ret = PTR_ERR(drv_data->regmap);
+ goto err;
+ }
+
+ drv_data->bcast_regmap =
+ qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
+ if (IS_ERR(drv_data->bcast_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
&num_banks);
if (ret)
- return ret;
+ goto err;
num_banks &= LLCC_LB_CNT_MASK;
num_banks >>= LLCC_LB_CNT_SHIFT;
@@ -340,8 +368,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
GFP_KERNEL);
- if (!drv_data->offsets)
- return -ENOMEM;
+ if (!drv_data->offsets) {
+ ret = -ENOMEM;
+ goto err;
+ }
for (i = 0; i < num_banks; i++)
drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
@@ -349,8 +379,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
drv_data->bitmap = devm_kcalloc(dev,
BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
GFP_KERNEL);
- if (!drv_data->bitmap)
- return -ENOMEM;
+ if (!drv_data->bitmap) {
+ ret = -ENOMEM;
+ goto err;
+ }
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
@@ -359,7 +391,7 @@ int qcom_llcc_probe(struct platform_device *pdev,
ret = qcom_llcc_cfg_program(pdev);
if (ret)
- return ret;
+ goto err;
drv_data->ecc_irq = platform_get_irq(pdev, 0);
if (drv_data->ecc_irq >= 0) {
@@ -370,6 +402,9 @@ int qcom_llcc_probe(struct platform_device *pdev,
dev_err(dev, "Failed to register llcc edac driver\n");
}
+ return 0;
+err:
+ drv_data = ERR_PTR(-ENODEV);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_llcc_probe);
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 09c669e70d63..038abc377fdb 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
- int i;
+ int i, ret;
u32 mask, gsbi_num;
const struct crci_config *config = NULL;
@@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gsbi);
- return of_platform_populate(node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ clk_disable_unprepare(gsbi->hclk);
+ return ret;
}
static int gsbi_remove(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 97bb5989aa21..7200d762a951 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -45,9 +45,9 @@ static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
char *buf);
-static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
@@ -132,6 +132,11 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
+static struct class rmtfs_class = {
+ .owner = THIS_MODULE,
+ .name = "rmtfs",
+};
+
static const struct file_operations qcom_rmtfs_mem_fops = {
.owner = THIS_MODULE,
.open = qcom_rmtfs_mem_open,
@@ -199,6 +204,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.class = &rmtfs_class;
rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
@@ -277,32 +283,42 @@ static struct platform_driver qcom_rmtfs_mem_driver = {
},
};
-static int qcom_rmtfs_mem_init(void)
+static int __init qcom_rmtfs_mem_init(void)
{
int ret;
+ ret = class_register(&rmtfs_class);
+ if (ret)
+ return ret;
+
ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
- return ret;
+ goto unregister_class;
}
ret = platform_driver_register(&qcom_rmtfs_mem_driver);
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
- unregister_chrdev_region(qcom_rmtfs_mem_major,
- QCOM_RMTFS_MEM_DEV_MAX);
+ goto unregister_chrdev;
}
+ return 0;
+
+unregister_chrdev:
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+unregister_class:
+ class_unregister(&rmtfs_class);
return ret;
}
module_init(qcom_rmtfs_mem_init);
-static void qcom_rmtfs_mem_exit(void)
+static void __exit qcom_rmtfs_mem_exit(void)
{
platform_driver_unregister(&qcom_rmtfs_mem_driver);
unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+ class_unregister(&rmtfs_class);
}
module_exit(qcom_rmtfs_mem_exit);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index c7beb6841289..035091fd44b8 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
msg);
struct completion *compl = rpm_msg->completion;
+ bool free = rpm_msg->needs_free;
rpm_msg->err = r;
@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
complete(compl);
exit:
- if (rpm_msg->needs_free)
+ if (free)
kfree(rpm_msg);
}
@@ -192,9 +193,8 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
- ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
- &rpm_msg->msg);
/* Clean up our call by spoofing tx_done */
+ ret = 0;
rpmh_tx_done(&rpm_msg->msg, ret);
}
@@ -348,11 +348,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
{
struct batch_cache_req *req;
struct rpmh_request *rpm_msgs;
- DECLARE_COMPLETION_ONSTACK(compl);
+ struct completion *compls;
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
unsigned long time_left;
int count = 0;
- int ret, i, j;
+ int ret, i;
+ void *ptr;
if (!cmd || !n)
return -EINVAL;
@@ -362,10 +363,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
if (!count)
return -EINVAL;
- req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+ ptr = kzalloc(sizeof(*req) +
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
GFP_ATOMIC);
- if (!req)
+ if (!ptr)
return -ENOMEM;
+
+ req = ptr;
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
req->count = count;
rpm_msgs = req->rpm_msgs;
@@ -380,25 +386,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
for (i = 0; i < count; i++) {
- rpm_msgs[i].completion = &compl;
+ struct completion *compl = &compls[i];
+
+ init_completion(compl);
+ rpm_msgs[i].completion = compl;
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
if (ret) {
pr_err("Error(%d) sending RPMH message addr=%#x\n",
ret, rpm_msgs[i].msg.cmds[0].addr);
- for (j = i; j < count; j++)
- rpmh_tx_done(&rpm_msgs[j].msg, ret);
break;
}
}
time_left = RPMH_TIMEOUT_MS;
- for (i = 0; i < count; i++) {
- time_left = wait_for_completion_timeout(&compl, time_left);
+ while (i--) {
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
if (!time_left) {
/*
* Better hope they never finish because they'll signal
- * the completion on our stack and that's bad once
- * we've returned from the function.
+ * the completion that we're going to free once
+ * we've returned from this function.
*/
WARN_ON(1);
ret = -ETIMEDOUT;
@@ -407,7 +414,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
exit:
- kfree(req);
+ kfree(ptr);
return ret;
}
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
new file mode 100644
index 000000000000..5741ec3fa814
--- /dev/null
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
+
+#define RPMH_ARC_MAX_LEVELS 16
+
+/**
+ * struct rpmhpd - top level RPMh power domain resource data structure
+ * @dev: rpmh power domain controller device
+ * @pd: generic_pm_domain corrresponding to the power domain
+ * @peer: A peer power domain in case Active only Voting is
+ * supported
+ * @active_only: True if it represents an Active only peer
+ * @level: An array of level (vlvl) to corner (hlvl) mappings
+ * derived from cmd-db
+ * @level_count: Number of levels supported by the power domain. max
+ * being 16 (0 - 15)
+ * @enabled: true if the power domain is enabled
+ * @res_name: Resource name used for cmd-db lookup
+ * @addr: Resource address as looped up using resource name from
+ * cmd-db
+ */
+struct rpmhpd {
+ struct device *dev;
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct rpmhpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ unsigned int active_corner;
+ u32 level[RPMH_ARC_MAX_LEVELS];
+ size_t level_count;
+ bool enabled;
+ const char *res_name;
+ u32 addr;
+};
+
+struct rpmhpd_desc {
+ struct rpmhpd **rpmhpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmhpd_lock);
+
+/* SDM845 RPMH powerdomains */
+
+static struct rpmhpd sdm845_ebi = {
+ .pd = { .name = "ebi", },
+ .res_name = "ebi.lvl",
+};
+
+static struct rpmhpd sdm845_lmx = {
+ .pd = { .name = "lmx", },
+ .res_name = "lmx.lvl",
+};
+
+static struct rpmhpd sdm845_lcx = {
+ .pd = { .name = "lcx", },
+ .res_name = "lcx.lvl",
+};
+
+static struct rpmhpd sdm845_gfx = {
+ .pd = { .name = "gfx", },
+ .res_name = "gfx.lvl",
+};
+
+static struct rpmhpd sdm845_mss = {
+ .pd = { .name = "mss", },
+ .res_name = "mss.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao;
+static struct rpmhpd sdm845_mx = {
+ .pd = { .name = "mx", },
+ .peer = &sdm845_mx_ao,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &sdm845_mx,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao;
+static struct rpmhpd sdm845_cx = {
+ .pd = { .name = "cx", },
+ .peer = &sdm845_cx_ao,
+ .parent = &sdm845_mx.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &sdm845_cx,
+ .parent = &sdm845_mx_ao.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd *sdm845_rpmhpds[] = {
+ [SDM845_EBI] = &sdm845_ebi,
+ [SDM845_MX] = &sdm845_mx,
+ [SDM845_MX_AO] = &sdm845_mx_ao,
+ [SDM845_CX] = &sdm845_cx,
+ [SDM845_CX_AO] = &sdm845_cx_ao,
+ [SDM845_LMX] = &sdm845_lmx,
+ [SDM845_LCX] = &sdm845_lcx,
+ [SDM845_GFX] = &sdm845_gfx,
+ [SDM845_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sdm845_desc = {
+ .rpmhpds = sdm845_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
+};
+
+static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { }
+};
+
+static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
+ unsigned int corner, bool sync)
+{
+ struct tcs_cmd cmd = {
+ .addr = pd->addr,
+ .data = corner,
+ };
+
+ /*
+ * Wait for an ack only when we are increasing the
+ * perf state of the power domain
+ */
+ if (sync)
+ return rpmh_write(pd->dev, state, &cmd, 1);
+ else
+ return rpmh_write_async(pd->dev, state, &cmd, 1);
+}
+
+static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+/*
+ * This function is used to aggregate the votes across the active only
+ * resources and its peers. The aggregated votes are sent to RPMh as
+ * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
+ * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
+ * on system sleep).
+ * We send ACTIVE_ONLY votes for resources without any peers. For others,
+ * which have an active only peer, all 3 votes are sent.
+ */
+static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+{
+ int ret;
+ struct rpmhpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
+ active_corner > pd->active_corner);
+ if (ret)
+ return ret;
+
+ pd->active_corner = active_corner;
+
+ if (peer) {
+ peer->active_corner = active_corner;
+
+ ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
+ active_corner, false);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
+ false);
+ }
+
+ return ret;
+}
+
+static int rpmhpd_power_on(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ if (pd->corner)
+ ret = rpmhpd_aggregate_corner(pd, pd->corner);
+
+ if (!ret)
+ pd->enabled = true;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_power_off(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
+
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int level)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0, i;
+
+ mutex_lock(&rpmhpd_lock);
+
+ for (i = 0; i < pd->level_count; i++)
+ if (level <= pd->level[i])
+ break;
+
+ /*
+ * If the level requested is more than that supported by the
+ * max corner, just set it to max anyway.
+ */
+ if (i == pd->level_count)
+ i--;
+
+ if (pd->enabled) {
+ ret = rpmhpd_aggregate_corner(pd, i);
+ if (ret)
+ goto out;
+ }
+
+ pd->corner = i;
+out:
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+{
+ int i;
+ const u16 *buf;
+
+ buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* 2 bytes used for each command DB aux data entry */
+ rpmhpd->level_count >>= 1;
+
+ if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < rpmhpd->level_count; i++) {
+ rpmhpd->level[i] = buf[i];
+
+ /*
+ * The AUX data may be zero padded. These 0 valued entries at
+ * the end of the map must be ignored.
+ */
+ if (i > 0 && rpmhpd->level[i] == 0) {
+ rpmhpd->level_count = i;
+ break;
+ }
+ pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
+ rpmhpd->level[i]);
+ }
+
+ return 0;
+}
+
+static int rpmhpd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ size_t num_pds;
+ struct device *dev = &pdev->dev;
+ struct genpd_onecell_data *data;
+ struct rpmhpd **rpmhpds;
+ const struct rpmhpd_desc *desc;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmhpds = desc->rpmhpds;
+ num_pds = desc->num_pds;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num_pds;
+
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i]) {
+ dev_warn(dev, "rpmhpds[%d] is empty\n", i);
+ continue;
+ }
+
+ rpmhpds[i]->dev = dev;
+ rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
+ if (!rpmhpds[i]->addr) {
+ dev_err(dev, "Could not find RPMh address for resource %s\n",
+ rpmhpds[i]->res_name);
+ return -ENODEV;
+ }
+
+ ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
+ if (ret != CMD_DB_HW_ARC) {
+ dev_err(dev, "RPMh slave ID mismatch\n");
+ return -EINVAL;
+ }
+
+ ret = rpmhpd_update_level_mapping(rpmhpds[i]);
+ if (ret)
+ return ret;
+
+ rpmhpds[i]->pd.power_off = rpmhpd_power_off;
+ rpmhpds[i]->pd.power_on = rpmhpd_power_on;
+ rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
+ rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
+ pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmhpds[i]->pd;
+ }
+
+ /* Add subdomains */
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i])
+ continue;
+ if (rpmhpds[i]->parent)
+ pm_genpd_add_subdomain(rpmhpds[i]->parent,
+ &rpmhpds[i]->pd);
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmhpd_driver = {
+ .driver = {
+ .name = "qcom-rpmhpd",
+ .of_match_table = rpmhpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmhpd_probe,
+};
+
+static int __init rpmhpd_init(void)
+{
+ return platform_driver_register(&rpmhpd_driver);
+}
+core_initcall(rpmhpd_init);
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
new file mode 100644
index 000000000000..005326050c23
--- /dev/null
+++ b/drivers/soc/qcom/rpmpd.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
+
+/* Resource types */
+#define RPMPD_SMPA 0x61706d73
+#define RPMPD_LDOA 0x616f646c
+
+/* Operation Keys */
+#define KEY_CORNER 0x6e726f63 /* corn */
+#define KEY_ENABLE 0x6e657773 /* swen */
+#define KEY_FLOOR_CORNER 0x636676 /* vfc */
+
+#define MAX_RPMPD_STATE 6
+
+#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \
+ static struct rpmpd _platform##_##_active; \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .peer = &_platform##_##_active, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }; \
+ static struct rpmpd _platform##_##_active = { \
+ .pd = { .name = #_active, }, \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_LDOA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA)
+
+#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA)
+
+struct rpmpd_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpmpd {
+ struct generic_pm_domain pd;
+ struct rpmpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ bool enabled;
+ const char *res_name;
+ const int res_type;
+ const int res_id;
+ struct qcom_smd_rpm *rpm;
+ __le32 key;
+};
+
+struct rpmpd_desc {
+ struct rpmpd **rpmpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmpd_lock);
+
+/* msm8996 RPM Power domains */
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1);
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2);
+DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26);
+
+DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1);
+DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26);
+
+static struct rpmpd *msm8996_rpmpds[] = {
+ [MSM8996_VDDCX] = &msm8996_vddcx,
+ [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao,
+ [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc,
+ [MSM8996_VDDMX] = &msm8996_vddmx,
+ [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao,
+ [MSM8996_VDDSSCX] = &msm8996_vddsscx,
+ [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc,
+};
+
+static const struct rpmpd_desc msm8996_desc = {
+ .rpmpds = msm8996_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8996_rpmpds),
+};
+
+static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+ { }
+};
+
+static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
+{
+ struct rpmpd_req req = {
+ .key = KEY_ENABLE,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(enable),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ pd->res_type, pd->res_id, &req, sizeof(req));
+}
+
+static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
+{
+ struct rpmpd_req req = {
+ .key = pd->key,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(corner),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
+ &req, sizeof(req));
+};
+
+static void to_active_sleep(struct rpmpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int rpmpd_aggregate_corner(struct rpmpd *pd)
+{
+ int ret;
+ struct rpmpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner);
+}
+
+static int rpmpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, true);
+ if (ret)
+ goto out;
+
+ pd->enabled = true;
+
+ if (pd->corner)
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, false);
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_set_performance(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ int ret = 0;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ if (state > MAX_RPMPD_STATE)
+ goto out;
+
+ mutex_lock(&rpmpd_lock);
+
+ pd->corner = state;
+
+ if (!pd->enabled && pd->key != KEY_FLOOR_CORNER)
+ goto out;
+
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmpd_probe(struct platform_device *pdev)
+{
+ int i;
+ size_t num;
+ struct genpd_onecell_data *data;
+ struct qcom_smd_rpm *rpm;
+ struct rpmpd **rpmpds;
+ const struct rpmpd_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmpds = desc->rpmpds;
+ num = desc->num_pds;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ data->num_domains = num;
+
+ for (i = 0; i < num; i++) {
+ if (!rpmpds[i]) {
+ dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n",
+ i);
+ continue;
+ }
+
+ rpmpds[i]->rpm = rpm;
+ rpmpds[i]->pd.power_off = rpmpd_power_off;
+ rpmpds[i]->pd.power_on = rpmpd_power_on;
+ rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+ rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance;
+ pm_genpd_init(&rpmpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmpds[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmpd_driver = {
+ .driver = {
+ .name = "qcom-rpmpd",
+ .of_match_table = rpmpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmpd_probe,
+};
+
+static int __init rpmpd_init(void)
+{
+ return platform_driver_register(&rpmpd_driver);
+}
+core_initcall(rpmpd_init);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index b8e63724a49d..9956bb2c63f2 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
+ { .compatible = "qcom,rpm-sdm660" },
{ .compatible = "qcom,rpm-qcs404" },
{}
};
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 4d8012e1205c..68bfca6f20dd 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -44,7 +44,7 @@ config ARCH_RZN1
bool
select ARM_AMBA
-if ARM
+if ARM && ARCH_RENESAS
#comment "Renesas ARM SoCs System Type"
diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c
index e1ac4c0f6640..11050e17ea81 100644
--- a/drivers/soc/renesas/r8a774c0-sysc.c
+++ b/drivers/soc/renesas/r8a774c0-sysc.c
@@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a774c0_areas[] __initdata = {
{ "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A },
};
-static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas,
- unsigned int num_areas, u8 id,
- int new_parent)
-{
- unsigned int i;
-
- for (i = 0; i < num_areas; i++)
- if (areas[i].isr_bit == id) {
- areas[i].parent = new_parent;
- return;
- }
-}
-
/* Fixups for RZ/G2E ES1.0 revision */
static const struct soc_device_attribute r8a774c0[] __initconst = {
{ .soc_id = "r8a774c0", .revision = "ES1.0" },
@@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a774c0[] __initconst = {
static int __init r8a774c0_sysc_init(void)
{
if (soc_device_match(r8a774c0)) {
- rcar_sysc_fix_parent(r8a774c0_areas,
- ARRAY_SIZE(r8a774c0_areas),
- R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B);
- rcar_sysc_fix_parent(r8a774c0_areas,
- ARRAY_SIZE(r8a774c0_areas),
- R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON);
+ /* Fix incorrect 3DG hierarchy */
+ swap(r8a774c0_areas[6], r8a774c0_areas[7]);
+ r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON;
+ r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B;
}
return 0;
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index fe4481676da6..a0b03443d8c1 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -76,6 +76,7 @@ config ARCH_TEGRA_210_SOC
select PINCTRL_TEGRA210
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
+ select TEGRA_TIMER
help
Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1,
the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index a33ee8ef8b6b..51625703399e 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fuse->phys = res->start;
fuse->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fuse->base))
- return PTR_ERR(fuse->base);
+ if (IS_ERR(fuse->base)) {
+ err = PTR_ERR(fuse->base);
+ fuse->base = base;
+ return err;
+ }
fuse->clk = devm_clk_get(&pdev->dev, "fuse");
if (IS_ERR(fuse->clk)) {
dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
PTR_ERR(fuse->clk));
+ fuse->base = base;
return PTR_ERR(fuse->clk);
}
@@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
- if (err < 0)
+ if (err < 0) {
+ fuse->base = base;
return err;
+ }
}
if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 5373f4c16b54..8ed35d9851f8 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -131,7 +131,7 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
- soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+ soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 7ea3280279ff..0df258518693 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -20,7 +20,7 @@
#define pr_fmt(fmt) "tegra-pmc: " fmt
-#include <linux/kernel.h>
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
@@ -30,16 +30,17 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/irq.h>
#include <linux/irqdomain.h>
-#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_clk.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/reboot.h>
@@ -145,6 +146,11 @@
#define WAKE_AOWAKE_CTRL 0x4f4
#define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0)
+/* for secure PMC */
+#define TEGRA_SMC_PMC 0xc2fffe00
+#define TEGRA_SMC_PMC_READ 0xaa
+#define TEGRA_SMC_PMC_WRITE 0xbb
+
struct tegra_powergate {
struct generic_pm_domain genpd;
struct tegra_pmc *pmc;
@@ -216,6 +222,7 @@ struct tegra_pmc_soc {
bool has_gpu_clamps;
bool needs_mbist_war;
bool has_impl_33v_pwr;
+ bool maybe_tz_only;
const struct tegra_io_pad_soc *io_pads;
unsigned int num_io_pads;
@@ -273,8 +280,12 @@ static const char * const tegra30_reset_sources[] = {
* struct tegra_pmc - NVIDIA Tegra PMC
* @dev: pointer to PMC device structure
* @base: pointer to I/O remapped register region
+ * @wake: pointer to I/O remapped region for WAKE registers
+ * @aotag: pointer to I/O remapped region for AOTAG registers
+ * @scratch: pointer to I/O remapped region for scratch registers
* @clk: pointer to pclk clock
* @soc: pointer to SoC data structure
+ * @tz_only: flag specifying if the PMC can only be accessed via TrustZone
* @debugfs: pointer to debugfs entry
* @rate: currently configured rate of pclk
* @suspend_mode: lowest suspend mode available
@@ -291,6 +302,9 @@ static const char * const tegra30_reset_sources[] = {
* @lp0_vec_size: size of the LP0 warm boot code
* @powergates_available: Bitmap of available power gates
* @powergates_lock: mutex for power gate register access
+ * @pctl_dev: pin controller exposed by the PMC
+ * @domain: IRQ domain provided by the PMC
+ * @irq: chip implementation for the IRQ domain
*/
struct tegra_pmc {
struct device *dev;
@@ -302,6 +316,7 @@ struct tegra_pmc {
struct dentry *debugfs;
const struct tegra_pmc_soc *soc;
+ bool tz_only;
unsigned long rate;
@@ -338,30 +353,85 @@ to_powergate(struct generic_pm_domain *domain)
return container_of(domain, struct tegra_powergate, genpd);
}
-static u32 tegra_pmc_readl(unsigned long offset)
+static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset)
{
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_READ, offset, 0, 0,
+ 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+
+ return res.a1;
+ }
+
return readl(pmc->base + offset);
}
-static void tegra_pmc_writel(u32 value, unsigned long offset)
+static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
{
- writel(value, pmc->base + offset);
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_WRITE, offset,
+ value, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+ } else {
+ writel(value, pmc->base + offset);
+ }
}
+static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset)
+{
+ if (pmc->tz_only)
+ return tegra_pmc_readl(pmc, offset);
+
+ return readl(pmc->scratch + offset);
+}
+
+static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
+{
+ if (pmc->tz_only)
+ tegra_pmc_writel(pmc, value, offset);
+ else
+ writel(value, pmc->scratch + offset);
+}
+
+/*
+ * TODO Figure out a way to call this with the struct tegra_pmc * passed in.
+ * This currently doesn't work because readx_poll_timeout() can only operate
+ * on functions that take a single argument.
+ */
static inline bool tegra_powergate_state(int id)
{
if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
- return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0;
+ return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0;
else
- return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0;
+ return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0;
}
-static inline bool tegra_powergate_is_valid(int id)
+static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id)
{
return (pmc->soc && pmc->soc->powergates[id]);
}
-static inline bool tegra_powergate_is_available(int id)
+static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id)
{
return test_bit(id, pmc->powergates_available);
}
@@ -374,7 +444,7 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
return -EINVAL;
for (i = 0; i < pmc->soc->num_powergates; i++) {
- if (!tegra_powergate_is_valid(i))
+ if (!tegra_powergate_is_valid(pmc, i))
continue;
if (!strcmp(name, pmc->soc->powergates[i]))
@@ -386,10 +456,12 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
/**
* tegra_powergate_set() - set the state of a partition
+ * @pmc: power management controller
* @id: partition ID
* @new_state: new state of the partition
*/
-static int tegra_powergate_set(unsigned int id, bool new_state)
+static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
{
bool status;
int err;
@@ -404,7 +476,7 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
return 0;
}
- tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
err = readx_poll_timeout(tegra_powergate_state, id, status,
status == new_state, 10, 100000);
@@ -414,7 +486,8 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
return err;
}
-static int __tegra_powergate_remove_clamping(unsigned int id)
+static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc,
+ unsigned int id)
{
u32 mask;
@@ -426,7 +499,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
*/
if (id == TEGRA_POWERGATE_3D) {
if (pmc->soc->has_gpu_clamps) {
- tegra_pmc_writel(0, GPU_RG_CNTRL);
+ tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL);
goto out;
}
}
@@ -442,7 +515,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
else
mask = (1 << id);
- tegra_pmc_writel(mask, REMOVE_CLAMPING);
+ tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING);
out:
mutex_unlock(&pmc->powergates_lock);
@@ -494,7 +567,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
usleep_range(10, 20);
- err = tegra_powergate_set(pg->id, true);
+ err = tegra_powergate_set(pg->pmc, pg->id, true);
if (err < 0)
return err;
@@ -506,7 +579,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
usleep_range(10, 20);
- err = __tegra_powergate_remove_clamping(pg->id);
+ err = __tegra_powergate_remove_clamping(pg->pmc, pg->id);
if (err)
goto disable_clks;
@@ -533,7 +606,7 @@ disable_clks:
usleep_range(10, 20);
powergate_off:
- tegra_powergate_set(pg->id, false);
+ tegra_powergate_set(pg->pmc, pg->id, false);
return err;
}
@@ -558,7 +631,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
usleep_range(10, 20);
- err = tegra_powergate_set(pg->id, false);
+ err = tegra_powergate_set(pg->pmc, pg->id, false);
if (err)
goto assert_resets;
@@ -579,12 +652,13 @@ disable_clks:
static int tegra_genpd_power_on(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
int err;
err = tegra_powergate_power_up(pg, true);
if (err)
- pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
- err);
+ dev_err(dev, "failed to turn on PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -592,12 +666,13 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
static int tegra_genpd_power_off(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
int err;
err = tegra_powergate_power_down(pg);
if (err)
- pr_err("failed to turn off PM domain %s: %d\n",
- pg->genpd.name, err);
+ dev_err(dev, "failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -608,10 +683,10 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain)
*/
int tegra_powergate_power_on(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return tegra_powergate_set(id, true);
+ return tegra_powergate_set(pmc, id, true);
}
/**
@@ -620,20 +695,21 @@ int tegra_powergate_power_on(unsigned int id)
*/
int tegra_powergate_power_off(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return tegra_powergate_set(id, false);
+ return tegra_powergate_set(pmc, id, false);
}
EXPORT_SYMBOL(tegra_powergate_power_off);
/**
* tegra_powergate_is_powered() - check if partition is powered
+ * @pmc: power management controller
* @id: partition ID
*/
-int tegra_powergate_is_powered(unsigned int id)
+static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id)
{
- if (!tegra_powergate_is_valid(id))
+ if (!tegra_powergate_is_valid(pmc, id))
return -EINVAL;
return tegra_powergate_state(id);
@@ -645,10 +721,10 @@ int tegra_powergate_is_powered(unsigned int id)
*/
int tegra_powergate_remove_clamping(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return __tegra_powergate_remove_clamping(id);
+ return __tegra_powergate_remove_clamping(pmc, id);
}
EXPORT_SYMBOL(tegra_powergate_remove_clamping);
@@ -666,7 +742,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct tegra_powergate *pg;
int err;
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
@@ -681,7 +757,8 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
err = tegra_powergate_power_up(pg, false);
if (err)
- pr_err("failed to turn on partition %d: %d\n", id, err);
+ dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id,
+ err);
kfree(pg);
@@ -691,12 +768,14 @@ EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
/**
* tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
+ * @pmc: power management controller
* @cpuid: CPU partition ID
*
* Returns the partition ID corresponding to the CPU partition ID or a
* negative error code on failure.
*/
-static int tegra_get_cpu_powergate_id(unsigned int cpuid)
+static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc,
+ unsigned int cpuid)
{
if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
return pmc->soc->cpu_powergates[cpuid];
@@ -712,11 +791,11 @@ bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return false;
- return tegra_powergate_is_powered(id);
+ return tegra_powergate_is_powered(pmc, id);
}
/**
@@ -727,11 +806,11 @@ int tegra_pmc_cpu_power_on(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return id;
- return tegra_powergate_set(id, true);
+ return tegra_powergate_set(pmc, id, true);
}
/**
@@ -742,7 +821,7 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return id;
@@ -755,7 +834,7 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
const char *cmd = data;
u32 value;
- value = readl(pmc->scratch + pmc->soc->regs->scratch0);
+ value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
value &= ~PMC_SCRATCH0_MODE_MASK;
if (cmd) {
@@ -769,12 +848,12 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
value |= PMC_SCRATCH0_MODE_RCM;
}
- writel(value, pmc->scratch + pmc->soc->regs->scratch0);
+ tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
/* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_MAIN_RST;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
return NOTIFY_DONE;
}
@@ -793,7 +872,7 @@ static int powergate_show(struct seq_file *s, void *data)
seq_printf(s, "------------------\n");
for (i = 0; i < pmc->soc->num_powergates; i++) {
- status = tegra_powergate_is_powered(i);
+ status = tegra_powergate_is_powered(pmc, i);
if (status < 0)
continue;
@@ -855,12 +934,13 @@ err:
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
struct device_node *np, bool off)
{
+ struct device *dev = pg->pmc->dev;
int err;
pg->reset = of_reset_control_array_get_exclusive(np);
if (IS_ERR(pg->reset)) {
err = PTR_ERR(pg->reset);
- pr_err("failed to get device resets: %d\n", err);
+ dev_err(dev, "failed to get device resets: %d\n", err);
return err;
}
@@ -877,6 +957,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
{
+ struct device *dev = pmc->dev;
struct tegra_powergate *pg;
int id, err;
bool off;
@@ -887,7 +968,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- pr_err("powergate lookup failed for %pOFn: %d\n", np, id);
+ dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id);
goto free_mem;
}
@@ -903,17 +984,17 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
pg->genpd.power_on = tegra_genpd_power_on;
pg->pmc = pmc;
- off = !tegra_powergate_is_powered(pg->id);
+ off = !tegra_powergate_is_powered(pmc, pg->id);
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- pr_err("failed to get clocks for %pOFn: %d\n", np, err);
+ dev_err(dev, "failed to get clocks for %pOFn: %d\n", np, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- pr_err("failed to get resets for %pOFn: %d\n", np, err);
+ dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
@@ -926,19 +1007,19 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = pm_genpd_init(&pg->genpd, NULL, off);
if (err < 0) {
- pr_err("failed to initialise PM domain %pOFn: %d\n", np,
+ dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
err);
goto remove_resets;
}
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- pr_err("failed to add PM domain provider for %pOFn: %d\n",
- np, err);
+ dev_err(dev, "failed to add PM domain provider for %pOFn: %d\n",
+ np, err);
goto remove_genpd;
}
- pr_debug("added PM domain %s\n", pg->genpd.name);
+ dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
return;
@@ -994,7 +1075,8 @@ tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
return NULL;
}
-static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
+static int tegra_io_pad_get_dpd_register_bit(struct tegra_pmc *pmc,
+ enum tegra_io_pad id,
unsigned long *request,
unsigned long *status,
u32 *mask)
@@ -1003,7 +1085,7 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
pad = tegra_io_pad_find(pmc, id);
if (!pad) {
- pr_err("invalid I/O pad ID %u\n", id);
+ dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
return -ENOENT;
}
@@ -1023,43 +1105,44 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
return 0;
}
-static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
- unsigned long *status, u32 *mask)
+static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id,
+ unsigned long *request, unsigned long *status,
+ u32 *mask)
{
unsigned long rate, value;
int err;
- err = tegra_io_pad_get_dpd_register_bit(id, request, status, mask);
+ err = tegra_io_pad_get_dpd_register_bit(pmc, id, request, status, mask);
if (err)
return err;
if (pmc->clk) {
rate = clk_get_rate(pmc->clk);
if (!rate) {
- pr_err("failed to get clock rate\n");
+ dev_err(pmc->dev, "failed to get clock rate\n");
return -ENODEV;
}
- tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
+ tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE);
/* must be at least 200 ns, in APB (PCLK) clock cycles */
value = DIV_ROUND_UP(1000000000, rate);
value = DIV_ROUND_UP(200, value);
- tegra_pmc_writel(value, SEL_DPD_TIM);
+ tegra_pmc_writel(pmc, value, SEL_DPD_TIM);
}
return 0;
}
-static int tegra_io_pad_poll(unsigned long offset, u32 mask,
- u32 val, unsigned long timeout)
+static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset,
+ u32 mask, u32 val, unsigned long timeout)
{
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_after(timeout, jiffies)) {
- value = tegra_pmc_readl(offset);
+ value = tegra_pmc_readl(pmc, offset);
if ((value & mask) == val)
return 0;
@@ -1069,10 +1152,10 @@ static int tegra_io_pad_poll(unsigned long offset, u32 mask,
return -ETIMEDOUT;
}
-static void tegra_io_pad_unprepare(void)
+static void tegra_io_pad_unprepare(struct tegra_pmc *pmc)
{
if (pmc->clk)
- tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
+ tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE);
}
/**
@@ -1089,21 +1172,21 @@ int tegra_io_pad_power_enable(enum tegra_io_pad id)
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
if (err < 0) {
- pr_err("failed to prepare I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request);
- err = tegra_io_pad_poll(status, mask, 0, 250);
+ err = tegra_io_pad_poll(pmc, status, mask, 0, 250);
if (err < 0) {
- pr_err("failed to enable I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err);
goto unlock;
}
- tegra_io_pad_unprepare();
+ tegra_io_pad_unprepare(pmc);
unlock:
mutex_unlock(&pmc->powergates_lock);
@@ -1125,21 +1208,21 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id)
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
if (err < 0) {
- pr_err("failed to prepare I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request);
- err = tegra_io_pad_poll(status, mask, mask, 250);
+ err = tegra_io_pad_poll(pmc, status, mask, mask, 250);
if (err < 0) {
- pr_err("failed to disable I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err);
goto unlock;
}
- tegra_io_pad_unprepare();
+ tegra_io_pad_unprepare(pmc);
unlock:
mutex_unlock(&pmc->powergates_lock);
@@ -1147,22 +1230,24 @@ unlock:
}
EXPORT_SYMBOL(tegra_io_pad_power_disable);
-static int tegra_io_pad_is_powered(enum tegra_io_pad id)
+static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
unsigned long request, status;
u32 mask, value;
int err;
- err = tegra_io_pad_get_dpd_register_bit(id, &request, &status, &mask);
+ err = tegra_io_pad_get_dpd_register_bit(pmc, id, &request, &status,
+ &mask);
if (err)
return err;
- value = tegra_pmc_readl(status);
+ value = tegra_pmc_readl(pmc, status);
return !(value & mask);
}
-static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
+static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id,
+ int voltage)
{
const struct tegra_io_pad_soc *pad;
u32 value;
@@ -1177,29 +1262,29 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
mutex_lock(&pmc->powergates_lock);
if (pmc->soc->has_impl_33v_pwr) {
- value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
value &= ~BIT(pad->voltage);
else
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR);
+ tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR);
} else {
/* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
- value = tegra_pmc_readl(PMC_PWR_DET);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET);
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_PWR_DET);
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET);
/* update I/O voltage */
- value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
value &= ~BIT(pad->voltage);
else
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE);
}
mutex_unlock(&pmc->powergates_lock);
@@ -1209,7 +1294,7 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
return 0;
}
-static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
const struct tegra_io_pad_soc *pad;
u32 value;
@@ -1222,9 +1307,9 @@ static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
return -ENOTSUPP;
if (pmc->soc->has_impl_33v_pwr)
- value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
else
- value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
if ((value & BIT(pad->voltage)) == 0)
return TEGRA_IO_PAD_VOLTAGE_1V8;
@@ -1296,21 +1381,21 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
wmb();
pmc->rate = rate;
}
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
value |= PMC_CNTRL_CPU_PWRREQ_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
#endif
@@ -1432,13 +1517,13 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux))
pinmux = 0;
- value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
value |= PMC_SENSOR_CTRL_SCRATCH_WRITE;
- tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) |
(reg_addr << PMC_SCRATCH54_ADDR_SHIFT);
- tegra_pmc_writel(value, PMC_SCRATCH54);
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH54);
value = PMC_SCRATCH55_RESET_TEGRA;
value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT;
@@ -1456,11 +1541,11 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT;
- tegra_pmc_writel(value, PMC_SCRATCH55);
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH55);
- value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
value |= PMC_SENSOR_CTRL_ENABLE_RST;
- tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
dev_info(pmc->dev, "emergency thermal reset enabled\n");
@@ -1470,12 +1555,16 @@ out:
static int tegra_io_pad_pinctrl_get_groups_count(struct pinctrl_dev *pctl_dev)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
return pmc->soc->num_io_pads;
}
-static const char *tegra_io_pad_pinctrl_get_group_name(
- struct pinctrl_dev *pctl, unsigned int group)
+static const char *tegra_io_pad_pinctrl_get_group_name(struct pinctrl_dev *pctl,
+ unsigned int group)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl);
+
return pmc->soc->io_pads[group].name;
}
@@ -1484,8 +1573,11 @@ static int tegra_io_pad_pinctrl_get_group_pins(struct pinctrl_dev *pctl_dev,
const unsigned int **pins,
unsigned int *num_pins)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
*pins = &pmc->soc->io_pads[group].id;
*num_pins = 1;
+
return 0;
}
@@ -1500,27 +1592,33 @@ static const struct pinctrl_ops tegra_io_pad_pinctrl_ops = {
static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev,
unsigned int pin, unsigned long *config)
{
- const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
enum pin_config_param param = pinconf_to_config_param(*config);
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
int ret;
u32 arg;
+ pad = tegra_io_pad_find(pmc, pin);
if (!pad)
return -EINVAL;
switch (param) {
case PIN_CONFIG_POWER_SOURCE:
- ret = tegra_io_pad_get_voltage(pad->id);
+ ret = tegra_io_pad_get_voltage(pmc, pad->id);
if (ret < 0)
return ret;
+
arg = ret;
break;
+
case PIN_CONFIG_LOW_POWER_MODE:
- ret = tegra_io_pad_is_powered(pad->id);
+ ret = tegra_io_pad_is_powered(pmc, pad->id);
if (ret < 0)
return ret;
+
arg = !ret;
break;
+
default:
return -EINVAL;
}
@@ -1534,12 +1632,14 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
unsigned int pin, unsigned long *configs,
unsigned int num_configs)
{
- const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
enum pin_config_param param;
unsigned int i;
int err;
u32 arg;
+ pad = tegra_io_pad_find(pmc, pin);
if (!pad)
return -EINVAL;
@@ -1560,7 +1660,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
if (arg != TEGRA_IO_PAD_VOLTAGE_1V8 &&
arg != TEGRA_IO_PAD_VOLTAGE_3V3)
return -EINVAL;
- err = tegra_io_pad_set_voltage(pad->id, arg);
+ err = tegra_io_pad_set_voltage(pmc, pad->id, arg);
if (err)
return err;
break;
@@ -1585,7 +1685,7 @@ static struct pinctrl_desc tegra_pmc_pctl_desc = {
static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
{
- int err = 0;
+ int err;
if (!pmc->soc->num_pin_descs)
return 0;
@@ -1598,18 +1698,20 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
pmc);
if (IS_ERR(pmc->pctl_dev)) {
err = PTR_ERR(pmc->pctl_dev);
- dev_err(pmc->dev, "unable to register pinctrl, %d\n", err);
+ dev_err(pmc->dev, "failed to register pin controller: %d\n",
+ err);
+ return err;
}
- return err;
+ return 0;
}
static ssize_t reset_reason_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u32 value, rst_src;
- value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
rst_src = (value & pmc->soc->regs->rst_source_mask) >>
pmc->soc->regs->rst_source_shift;
@@ -1619,11 +1721,11 @@ static ssize_t reset_reason_show(struct device *dev,
static DEVICE_ATTR_RO(reset_reason);
static ssize_t reset_level_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u32 value, rst_lvl;
- value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
rst_lvl = (value & pmc->soc->regs->rst_level_mask) >>
pmc->soc->regs->rst_level_shift;
@@ -1641,16 +1743,16 @@ static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc)
err = device_create_file(dev, &dev_attr_reset_reason);
if (err < 0)
dev_warn(dev,
- "failed to create attr \"reset_reason\": %d\n",
- err);
+ "failed to create attr \"reset_reason\": %d\n",
+ err);
}
if (pmc->soc->reset_levels) {
err = device_create_file(dev, &dev_attr_reset_level);
if (err < 0)
dev_warn(dev,
- "failed to create attr \"reset_level\": %d\n",
- err);
+ "failed to create attr \"reset_level\": %d\n",
+ err);
}
}
@@ -1920,6 +2022,8 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->base = base;
mutex_unlock(&pmc->powergates_lock);
+ platform_set_drvdata(pdev, pmc);
+
return 0;
cleanup_restart_handler:
@@ -1932,14 +2036,18 @@ cleanup_debugfs:
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
static int tegra_pmc_suspend(struct device *dev)
{
- tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41);
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41);
return 0;
}
static int tegra_pmc_resume(struct device *dev)
{
- tegra_pmc_writel(0x0, PMC_SCRATCH41);
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41);
return 0;
}
@@ -1976,11 +2084,11 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
u32 value;
/* Always enable CPU power request */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_CPU_PWRREQ_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
if (pmc->sysclkreq_high)
value &= ~PMC_CNTRL_SYSCLK_POLARITY;
@@ -1988,12 +2096,12 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
value |= PMC_CNTRL_SYSCLK_POLARITY;
/* configure the output polarity while the request is tristated */
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
/* now enable the request */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_SYSCLK_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -2002,14 +2110,14 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
{
u32 value;
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
if (invert)
value |= PMC_CNTRL_INTR_POLARITY;
else
value &= ~PMC_CNTRL_INTR_POLARITY;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
static const struct tegra_pmc_soc tegra20_pmc_soc = {
@@ -2019,6 +2127,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2063,7 +2174,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.cpu_powergates = tegra30_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2112,7 +2225,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.cpu_powergates = tegra114_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2221,7 +2336,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.cpu_powergates = tegra124_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra124_io_pads),
.io_pads = tegra124_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra124_pin_descs),
@@ -2325,8 +2442,9 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.cpu_powergates = tegra210_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
- .has_impl_33v_pwr = false,
.needs_mbist_war = true,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = true,
.num_io_pads = ARRAY_SIZE(tegra210_io_pads),
.io_pads = tegra210_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra210_pin_descs),
@@ -2413,7 +2531,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
index = of_property_match_string(np, "reg-names", "wake");
if (index < 0) {
- pr_err("failed to find PMC wake registers\n");
+ dev_err(pmc->dev, "failed to find PMC wake registers\n");
return;
}
@@ -2421,7 +2539,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
wake = ioremap_nocache(regs.start, resource_size(&regs));
if (!wake) {
- pr_err("failed to map PMC wake registers\n");
+ dev_err(pmc->dev, "failed to map PMC wake registers\n");
return;
}
@@ -2438,7 +2556,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_wake_event tegra186_wake_events[] = {
- TEGRA_WAKE_GPIO("power", 29, 1, TEGRA_AON_GPIO(FF, 0)),
+ TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
@@ -2449,7 +2567,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra186_io_pads),
.io_pads = tegra186_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra186_pin_descs),
@@ -2527,6 +2647,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra194_io_pads),
.io_pads = tegra194_io_pads,
.regs = &tegra186_pmc_regs,
@@ -2561,6 +2684,32 @@ static struct platform_driver tegra_pmc_driver = {
};
builtin_platform_driver(tegra_pmc_driver);
+static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc)
+{
+ u32 value, saved;
+
+ saved = readl(pmc->base + pmc->soc->regs->scratch0);
+ value = saved ^ 0xffffffff;
+
+ if (value == 0xffffffff)
+ value = 0xdeadbeef;
+
+ /* write pattern and read it back */
+ writel(value, pmc->base + pmc->soc->regs->scratch0);
+ value = readl(pmc->base + pmc->soc->regs->scratch0);
+
+ /* if we read all-zeroes, access is restricted to TZ only */
+ if (value == 0) {
+ pr_info("access to PMC is restricted to TZ\n");
+ return true;
+ }
+
+ /* restore original value */
+ writel(saved, pmc->base + pmc->soc->regs->scratch0);
+
+ return false;
+}
+
/*
* Early initialization to allow access to registers in the very early boot
* process.
@@ -2623,6 +2772,9 @@ static int __init tegra_pmc_early_init(void)
if (np) {
pmc->soc = match->data;
+ if (pmc->soc->maybe_tz_only)
+ pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
+
tegra_powergate_init(pmc, np);
/*
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index e05ab16d9a9e..6285cd8efb21 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -598,7 +598,7 @@ static int pktdma_init_chan(struct knav_dma_device *dma,
INIT_LIST_HEAD(&chan->list);
chan->dma = dma;
- chan->direction = DMA_NONE;
+ chan->direction = DMA_TRANS_NONE;
atomic_set(&chan->ref_count, 0);
spin_lock_init(&chan->lock);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 687c8f3cd955..01e76b58dd78 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -17,4 +17,24 @@ config XILINX_VCU
To compile this driver as a module, choose M here: the
module will be called xlnx_vcu.
+config ZYNQMP_POWER
+ bool "Enable Xilinx Zynq MPSoC Power Management driver"
+ depends on PM && ARCH_ZYNQMP
+ default y
+ help
+ Say yes to enable power management support for ZyqnMP SoC.
+ This driver uses firmware driver as an interface for power
+ management request to firmware. It registers isr to handle
+ power management callbacks from firmware.
+ If in doubt, say N.
+
+config ZYNQMP_PM_DOMAINS
+ bool "Enable Zynq MPSoC generic PM domains"
+ default y
+ depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE
+ select PM_GENERIC_DOMAINS
+ help
+ Say yes to enable device power management through PM domains
+ If in doubt, say N.
+
endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index dee8fd51e303..f66bfea5de17 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
+obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
new file mode 100644
index 000000000000..354d256e6e00
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Generic PM domain support
+ *
+ * Copyright (C) 2015-2018 Xilinx, Inc.
+ *
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NUM_DOMAINS (100)
+/* Flag stating if PM nodes mapped to the PM domain has been requested */
+#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
+
+/**
+ * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
+ * @gpd: Generic power domain
+ * @node_id: PM node ID corresponding to device inside PM domain
+ * @flags: ZynqMP PM domain flags
+ */
+struct zynqmp_pm_domain {
+ struct generic_pm_domain gpd;
+ u32 node_id;
+ u8 flags;
+};
+
+/**
+ * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
+ * path
+ * @dev: Device to check for wakeup source path
+ * @not_used: Data member (not required)
+ *
+ * This function is checks device's child hierarchy and checks if any device is
+ * set as wakeup source.
+ *
+ * Return: 1 if device is in wakeup source path else 0
+ */
+static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
+{
+ int may_wakeup;
+
+ may_wakeup = device_may_wakeup(dev);
+ if (may_wakeup)
+ return may_wakeup;
+
+ return device_for_each_child(dev, NULL,
+ zynqmp_gpd_is_active_wakeup_path);
+}
+
+/**
+ * zynqmp_gpd_power_on() - Power on PM domain
+ * @domain: Generic PM domain
+ *
+ * This function is called before devices inside a PM domain are resumed, to
+ * power on PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_requirement)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+ ret = eemi_ops->set_requirement(pd->node_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ ZYNQMP_PM_MAX_QOS,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret) {
+ pr_err("%s() %s set requirement for node %d failed: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pr_debug("%s() Powered on %s domain\n", __func__, domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_power_off() - Power off PM domain
+ * @domain: Generic PM domain
+ *
+ * This function is called after devices inside a PM domain are suspended, to
+ * power off PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct pm_domain_data *pdd, *tmp;
+ struct zynqmp_pm_domain *pd;
+ u32 capabilities = 0;
+ bool may_wakeup;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_requirement)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If domain is already released there is nothing to be done */
+ if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) {
+ pr_debug("%s() %s domain is already released\n",
+ __func__, domain->name);
+ return 0;
+ }
+
+ list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) {
+ /* If device is in wakeup path, set capability to WAKEUP */
+ may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL);
+ if (may_wakeup) {
+ dev_dbg(pdd->dev, "device is in wakeup path in %s\n",
+ domain->name);
+ capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP;
+ break;
+ }
+ }
+
+ ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ /**
+ * If powering down of any node inside this domain fails,
+ * report and return the error
+ */
+ if (ret) {
+ pr_err("%s() %s set requirement for node %d failed: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pr_debug("%s() Powered off %s domain\n", __func__, domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_attach_dev() - Attach device to the PM domain
+ * @domain: Generic PM domain
+ * @dev: Device to attach
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->request_node)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If this is not the first device to attach there is nothing to do */
+ if (domain->device_count)
+ return 0;
+
+ ret = eemi_ops->request_node(pd->node_id, 0, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ /* If requesting a node fails print and return the error */
+ if (ret) {
+ pr_err("%s() %s request failed for node %d: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED;
+
+ pr_debug("%s() %s attached to %s domain\n", __func__,
+ dev_name(dev), domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_detach_dev() - Detach device from the PM domain
+ * @domain: Generic PM domain
+ * @dev: Device to detach
+ */
+static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->release_node)
+ return;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If this is not the last device to detach there is nothing to do */
+ if (domain->device_count)
+ return;
+
+ ret = eemi_ops->release_node(pd->node_id);
+ /* If releasing a node fails print the error and return */
+ if (ret) {
+ pr_err("%s() %s release failed for node %d: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return;
+ }
+
+ pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED;
+
+ pr_debug("%s() %s detached from %s domain\n", __func__,
+ dev_name(dev), domain->name);
+}
+
+static struct generic_pm_domain *zynqmp_gpd_xlate
+ (struct of_phandle_args *genpdspec, void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int i, idx = genpdspec->args[0];
+ struct zynqmp_pm_domain *pd;
+
+ pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd);
+
+ if (genpdspec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ /* Check for existing pm domains */
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+ if (pd[i].node_id == idx)
+ goto done;
+ }
+
+ /**
+ * Add index in empty node_id of power domain list as no existing
+ * power domain found for current index.
+ */
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+ if (pd[i].node_id == 0) {
+ pd[i].node_id = idx;
+ break;
+ }
+ }
+
+done:
+ if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS)
+ return ERR_PTR(-ENOENT);
+
+ return genpd_data->domains[i];
+}
+
+static int zynqmp_gpd_probe(struct platform_device *pdev)
+{
+ int i;
+ struct genpd_onecell_data *zynqmp_pd_data;
+ struct generic_pm_domain **domains;
+ struct zynqmp_pm_domain *pd;
+ struct device *dev = &pdev->dev;
+
+ pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL);
+ if (!zynqmp_pd_data)
+ return -ENOMEM;
+
+ zynqmp_pd_data->xlate = zynqmp_gpd_xlate;
+
+ domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains),
+ GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
+ pd->node_id = 0;
+ pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
+ pd->gpd.power_off = zynqmp_gpd_power_off;
+ pd->gpd.power_on = zynqmp_gpd_power_on;
+ pd->gpd.attach_dev = zynqmp_gpd_attach_dev;
+ pd->gpd.detach_dev = zynqmp_gpd_detach_dev;
+
+ domains[i] = &pd->gpd;
+
+ /* Mark all PM domains as initially powered off */
+ pm_genpd_init(&pd->gpd, NULL, true);
+ }
+
+ zynqmp_pd_data->domains = domains;
+ zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS;
+ of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data);
+
+ return 0;
+}
+
+static int zynqmp_gpd_remove(struct platform_device *pdev)
+{
+ of_genpd_del_provider(pdev->dev.parent->of_node);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_power_domain_driver = {
+ .driver = {
+ .name = "zynqmp_power_controller",
+ },
+ .probe = zynqmp_gpd_probe,
+ .remove = zynqmp_gpd_remove,
+};
+module_platform_driver(zynqmp_power_domain_driver);
+
+MODULE_ALIAS("platform:zynqmp_power_controller");
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
new file mode 100644
index 000000000000..771cb59b9d22
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Power Management
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+enum pm_suspend_mode {
+ PM_SUSPEND_MODE_FIRST = 0,
+ PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST,
+ PM_SUSPEND_MODE_POWER_OFF,
+};
+
+#define PM_SUSPEND_MODE_FIRST PM_SUSPEND_MODE_STD
+
+static const char *const suspend_modes[] = {
+ [PM_SUSPEND_MODE_STD] = "standard",
+ [PM_SUSPEND_MODE_POWER_OFF] = "power-off",
+};
+
+static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB,
+ PM_NOTIFY_CB,
+};
+
+static void zynqmp_pm_get_callback_data(u32 *buf)
+{
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static irqreturn_t zynqmp_pm_isr(int irq, void *data)
+{
+ u32 payload[CB_PAYLOAD_SIZE];
+
+ zynqmp_pm_get_callback_data(payload);
+
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ switch (payload[1]) {
+ case SUSPEND_SYSTEM_SHUTDOWN:
+ orderly_poweroff(true);
+ break;
+ case SUSPEND_POWER_REQUEST:
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+ default:
+ pr_err("%s Unsupported InitSuspendCb reason "
+ "code %d\n", __func__, payload[1]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t suspend_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int md;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md]) {
+ if (md == suspend_mode)
+ s += sprintf(s, "[%s] ", suspend_modes[md]);
+ else
+ s += sprintf(s, "%s ", suspend_modes[md]);
+ }
+
+ /* Convert last space to newline */
+ if (s != buf)
+ *(s - 1) = '\n';
+ return (s - buf);
+}
+
+static ssize_t suspend_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int md, ret = -EINVAL;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_suspend_mode)
+ return ret;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md] &&
+ sysfs_streq(suspend_modes[md], buf)) {
+ ret = 0;
+ break;
+ }
+
+ if (!ret && md != suspend_mode) {
+ ret = eemi_ops->set_suspend_mode(md);
+ if (likely(!ret))
+ suspend_mode = md;
+ }
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(suspend_mode);
+
+static int zynqmp_pm_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ u32 pm_api_version;
+
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize)
+ return -ENXIO;
+
+ eemi_ops->init_finalize();
+ eemi_ops->get_api_version(&pm_api_version);
+
+ /* Check PM API version number */
+ if (pm_api_version < ZYNQMP_PM_VERSION)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev), &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
+ "with %d\n", irq, ret);
+ return ret;
+ }
+
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to create sysfs interface\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_pm_remove(struct platform_device *pdev)
+{
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+
+ return 0;
+}
+
+static const struct of_device_id pm_of_match[] = {
+ { .compatible = "xlnx,zynqmp-power", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, pm_of_match);
+
+static struct platform_driver zynqmp_pm_platform_driver = {
+ .probe = zynqmp_pm_probe,
+ .remove = zynqmp_pm_remove,
+ .driver = {
+ .name = "zynqmp_power",
+ .of_match_table = pm_of_match,
+ },
+};
+module_platform_driver(zynqmp_pm_platform_driver);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 9f89cb134549..f761655e2a36 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -63,7 +63,7 @@ config SPI_ALTERA
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
- depends on ATH79 && GPIOLIB
+ depends on ATH79 || COMPILE_TEST
select SPI_BITBANG
help
This enables support for the SPI controller present on the
@@ -268,6 +268,27 @@ config SPI_FSL_LPSPI
help
This enables Freescale i.MX LPSPI controllers in master mode.
+config SPI_FSL_QUADSPI
+ tristate "Freescale QSPI controller"
+ depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ Up to four flash chips can be connected on two buses with two
+ chipselects each.
+ This controller does not support generic SPI messages. It only
+ supports the high-level SPI memory interface.
+
+config SPI_NXP_FLEXSPI
+ tristate "NXP Flex SPI controller"
+ depends on ARCH_LAYERSCAPE || HAS_IOMEM
+ help
+ This enables support for the Flex SPI controller in master mode.
+ Up to four slave devices can be connected on two buses with two
+ chipselects each.
+ This controller does not support generic SPI messages and only
+ supports the high-level SPI memory interface.
+
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GPIOLIB || COMPILE_TEST
@@ -296,8 +317,7 @@ config SPI_IMX
depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
help
- This enables using the Freescale i.MX SPI controllers in master
- mode.
+ This enables support for the Freescale i.MX SPI controllers.
config SPI_JCORE
tristate "J-Core SPI Master"
@@ -372,7 +392,7 @@ config SPI_FSL_DSPI
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
- mode. VF610 platform uses the controller.
+ mode. VF610, LS1021A and ColdFire platforms uses the controller.
config SPI_FSL_ESPI
tristate "Freescale eSPI controller"
@@ -631,6 +651,12 @@ config SPI_SH_HSPI
help
SPI driver for SuperH HSPI blocks.
+config SPI_SIFIVE
+ tristate "SiFive SPI controller"
+ depends on HAS_IOMEM
+ help
+ This exposes the SPI controller IP from SiFive.
+
config SPI_SIRF
tristate "CSR SiRFprimaII SPI controller"
depends on SIRF_DMA
@@ -665,7 +691,7 @@ config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
help
- SPI driver for STMicroelectonics STM32 SoCs.
+ SPI driver for STMicroelectronics STM32 SoCs.
STM32 SPI controller supports DMA and PIO modes. When DMA
is not available, the driver automatically falls back to
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f29627040dfb..d8fc03c9faa2 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
+obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
+obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
@@ -93,6 +95,7 @@ obj-$(CONFIG_SPI_SH) += spi-sh.o
obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
+obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index ddc712410812..fffc21cd5f79 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Atmel QSPI Controller
*
@@ -7,31 +8,19 @@
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
* Author: Piotr Bugalski <bugalski.piotr@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
-#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
-#include <linux/of.h>
-
#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi-mem.h>
/* QSPI register offsets */
@@ -47,7 +36,9 @@
#define QSPI_IAR 0x0030 /* Instruction Address Register */
#define QSPI_ICR 0x0034 /* Instruction Code Register */
+#define QSPI_WICR 0x0034 /* Write Instruction Code Register */
#define QSPI_IFR 0x0038 /* Instruction Frame Register */
+#define QSPI_RICR 0x003C /* Read Instruction Code Register */
#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
#define QSPI_SKR 0x0044 /* Scrambling Key Register */
@@ -100,7 +91,7 @@
#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
-/* Bitfields in QSPI_ICR (Instruction Code Register) */
+/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
#define QSPI_ICR_INST_MASK GENMASK(7, 0)
#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
@@ -125,14 +116,12 @@
#define QSPI_IFR_OPTL_4BIT (2 << 8)
#define QSPI_IFR_OPTL_8BIT (3 << 8)
#define QSPI_IFR_ADDRL BIT(10)
-#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12)
-#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12)
-#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12)
-#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12)
-#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
+#define QSPI_IFR_TFRTYP_MEM BIT(12)
+#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
#define QSPI_IFR_CRM BIT(14)
#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
#define QSPI_SMR_SCREN BIT(0)
@@ -148,24 +137,31 @@
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+struct atmel_qspi_caps {
+ bool has_qspick;
+ bool has_ricr;
+};
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
- struct clk *clk;
+ struct clk *pclk;
+ struct clk *qspick;
struct platform_device *pdev;
+ const struct atmel_qspi_caps *caps;
u32 pending;
+ u32 mr;
struct completion cmd_completion;
};
-struct qspi_mode {
+struct atmel_qspi_mode {
u8 cmd_buswidth;
u8 addr_buswidth;
u8 data_buswidth;
u32 config;
};
-static const struct qspi_mode sama5d2_qspi_modes[] = {
+static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
@@ -175,19 +171,8 @@ static const struct qspi_mode sama5d2_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
-/* Register access functions */
-static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg)
-{
- return readl_relaxed(aq->regs + reg);
-}
-
-static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
-{
- writel_relaxed(value, aq->regs + reg);
-}
-
-static inline bool is_compatible(const struct spi_mem_op *op,
- const struct qspi_mode *mode)
+static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
+ const struct atmel_qspi_mode *mode)
{
if (op->cmd.buswidth != mode->cmd_buswidth)
return false;
@@ -201,21 +186,21 @@ static inline bool is_compatible(const struct spi_mem_op *op,
return true;
}
-static int find_mode(const struct spi_mem_op *op)
+static int atmel_qspi_find_mode(const struct spi_mem_op *op)
{
u32 i;
- for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++)
- if (is_compatible(op, &sama5d2_qspi_modes[i]))
+ for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
+ if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
return i;
- return -1;
+ return -ENOTSUPP;
}
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- if (find_mode(op) < 0)
+ if (atmel_qspi_find_mode(op) < 0)
return false;
/* special case not supported by hardware */
@@ -226,29 +211,37 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
return true;
}
-static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
+ const struct spi_mem_op *op, u32 *offset)
{
- struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
- int mode;
+ u32 iar, icr, ifr;
u32 dummy_cycles = 0;
- u32 iar, icr, ifr, sr;
- int err = 0;
+ int mode;
iar = 0;
icr = QSPI_ICR_INST(op->cmd.opcode);
ifr = QSPI_IFR_INSTEN;
- qspi_writel(aq, QSPI_MR, QSPI_MR_SMM);
-
- mode = find_mode(op);
+ mode = atmel_qspi_find_mode(op);
if (mode < 0)
- return -ENOTSUPP;
-
- ifr |= sama5d2_qspi_modes[mode].config;
+ return mode;
+ ifr |= atmel_qspi_modes[mode].config;
if (op->dummy.buswidth && op->dummy.nbytes)
dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+ /*
+ * The controller allows 24 and 32-bit addressing while NAND-flash
+ * requires 16-bit long. Handling 8-bit long addresses is done using
+ * the option field. For the 16-bit addresses, the workaround depends
+ * of the number of requested dummy bits. If there are 8 or more dummy
+ * cycles, the address is shifted and sent with the first dummy byte.
+ * Otherwise opcode is disabled and the first byte of the address
+ * contains the command opcode (works only if the opcode and address
+ * use the same buswidth). The limitation is when the 16-bit address is
+ * used without enough dummy cycles and the opcode is using a different
+ * buswidth than the address.
+ */
if (op->addr.buswidth) {
switch (op->addr.nbytes) {
case 0:
@@ -282,6 +275,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
}
+ /* offset of the data access in the QSPI memory space */
+ *offset = iar;
+
/* Set number of dummy cycles */
if (dummy_cycles)
ifr |= QSPI_IFR_NBDUM(dummy_cycles);
@@ -290,49 +286,82 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->data.nbytes)
ifr |= QSPI_IFR_DATAEN;
- if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
- ifr |= QSPI_IFR_TFRTYP_TRSFR_READ;
- else
- ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE;
+ /*
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+ if (aq->mr != QSPI_MR_SMM) {
+ writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
+ }
/* Clear pending interrupts */
- (void)qspi_readl(aq, QSPI_SR);
+ (void)readl_relaxed(aq->regs + QSPI_SR);
+
+ if (aq->caps->has_ricr) {
+ if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ ifr |= QSPI_IFR_APBTFRTYP_READ;
- /* Set QSPI Instruction Frame registers */
- qspi_writel(aq, QSPI_IAR, iar);
- qspi_writel(aq, QSPI_ICR, icr);
- qspi_writel(aq, QSPI_IFR, ifr);
+ /* Set QSPI Instruction Frame registers */
+ writel_relaxed(iar, aq->regs + QSPI_IAR);
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ writel_relaxed(icr, aq->regs + QSPI_RICR);
+ else
+ writel_relaxed(icr, aq->regs + QSPI_WICR);
+ writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ } else {
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
+
+ /* Set QSPI Instruction Frame registers */
+ writel_relaxed(iar, aq->regs + QSPI_IAR);
+ writel_relaxed(icr, aq->regs + QSPI_ICR);
+ writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
+ u32 sr, offset;
+ int err;
+
+ err = atmel_qspi_set_cfg(aq, op, &offset);
+ if (err)
+ return err;
/* Skip to the final steps if there is no data */
if (op->data.nbytes) {
/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)qspi_readl(aq, QSPI_IFR);
+ (void)readl_relaxed(aq->regs + QSPI_IFR);
/* Send/Receive data */
if (op->data.dir == SPI_MEM_DATA_IN)
- _memcpy_fromio(op->data.buf.in,
- aq->mem + iar, op->data.nbytes);
+ _memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
else
- _memcpy_toio(aq->mem + iar,
- op->data.buf.out, op->data.nbytes);
+ _memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
/* Release the chip-select */
- qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
+ writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR);
}
/* Poll INSTRuction End status */
- sr = qspi_readl(aq, QSPI_SR);
+ sr = readl_relaxed(aq->regs + QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
return err;
/* Wait for INSTRuction End interrupt */
reinit_completion(&aq->cmd_completion);
aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED);
+ writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER);
if (!wait_for_completion_timeout(&aq->cmd_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
- qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED);
+ writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR);
return err;
}
@@ -361,7 +390,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- src_rate = clk_get_rate(aq->clk);
+ src_rate = clk_get_rate(aq->pclk);
if (!src_rate)
return -EINVAL;
@@ -371,7 +400,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
scbr--;
scr = QSPI_SCR_SCBR(scbr);
- qspi_writel(aq, QSPI_SCR, scr);
+ writel_relaxed(scr, aq->regs + QSPI_SCR);
return 0;
}
@@ -379,21 +408,25 @@ static int atmel_qspi_setup(struct spi_device *spi)
static int atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
- qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+ writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+ writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
/* Enable the QSPI controller */
- qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
+ writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
return 0;
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
{
- struct atmel_qspi *aq = (struct atmel_qspi *)dev_id;
+ struct atmel_qspi *aq = dev_id;
u32 status, mask, pending;
- status = qspi_readl(aq, QSPI_SR);
- mask = qspi_readl(aq, QSPI_IMR);
+ status = readl_relaxed(aq->regs + QSPI_SR);
+ mask = readl_relaxed(aq->regs + QSPI_IMR);
pending = status & mask;
if (!pending)
@@ -449,44 +482,74 @@ static int atmel_qspi_probe(struct platform_device *pdev)
}
/* Get the peripheral clock */
- aq->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(aq->clk)) {
+ aq->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(aq->pclk))
+ aq->pclk = devm_clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(aq->pclk)) {
dev_err(&pdev->dev, "missing peripheral clock\n");
- err = PTR_ERR(aq->clk);
+ err = PTR_ERR(aq->pclk);
goto exit;
}
/* Enable the peripheral clock */
- err = clk_prepare_enable(aq->clk);
+ err = clk_prepare_enable(aq->pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
goto exit;
}
+ aq->caps = of_device_get_match_data(&pdev->dev);
+ if (!aq->caps) {
+ dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (aq->caps->has_qspick) {
+ /* Get the QSPI system clock */
+ aq->qspick = devm_clk_get(&pdev->dev, "qspick");
+ if (IS_ERR(aq->qspick)) {
+ dev_err(&pdev->dev, "missing system clock\n");
+ err = PTR_ERR(aq->qspick);
+ goto disable_pclk;
+ }
+
+ /* Enable the QSPI system clock */
+ err = clk_prepare_enable(aq->qspick);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to enable the QSPI system clock\n");
+ goto disable_pclk;
+ }
+ }
+
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "missing IRQ\n");
err = irq;
- goto disable_clk;
+ goto disable_qspick;
}
err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
0, dev_name(&pdev->dev), aq);
if (err)
- goto disable_clk;
+ goto disable_qspick;
err = atmel_qspi_init(aq);
if (err)
- goto disable_clk;
+ goto disable_qspick;
err = spi_register_controller(ctrl);
if (err)
- goto disable_clk;
+ goto disable_qspick;
return 0;
-disable_clk:
- clk_disable_unprepare(aq->clk);
+disable_qspick:
+ clk_disable_unprepare(aq->qspick);
+disable_pclk:
+ clk_disable_unprepare(aq->pclk);
exit:
spi_controller_put(ctrl);
@@ -499,8 +562,9 @@ static int atmel_qspi_remove(struct platform_device *pdev)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
spi_unregister_controller(ctrl);
- qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
- clk_disable_unprepare(aq->clk);
+ writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR);
+ clk_disable_unprepare(aq->qspick);
+ clk_disable_unprepare(aq->pclk);
return 0;
}
@@ -508,7 +572,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
struct atmel_qspi *aq = dev_get_drvdata(dev);
- clk_disable_unprepare(aq->clk);
+ clk_disable_unprepare(aq->qspick);
+ clk_disable_unprepare(aq->pclk);
return 0;
}
@@ -517,7 +582,8 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
struct atmel_qspi *aq = dev_get_drvdata(dev);
- clk_prepare_enable(aq->clk);
+ clk_prepare_enable(aq->pclk);
+ clk_prepare_enable(aq->qspick);
return atmel_qspi_init(aq);
}
@@ -525,8 +591,22 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
atmel_qspi_resume);
+static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
+
+static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
+ .has_qspick = true,
+ .has_ricr = true,
+};
+
static const struct of_device_id atmel_qspi_dt_ids[] = {
- { .compatible = "atmel,sama5d2-qspi" },
+ {
+ .compatible = "atmel,sama5d2-qspi",
+ .data = &atmel_sama5d2_qspi_caps,
+ },
+ {
+ .compatible = "microchip,sam9x60-qspi",
+ .data = &atmel_sam9x60_qspi_caps,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 3f6b657394de..847f354ebef1 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -21,18 +21,26 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
-#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
-
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ath79_spi_platform.h>
+#include <linux/platform_data/spi-ath79.h>
#define DRV_NAME "ath79-spi"
#define ATH79_SPI_RRW_DELAY_FACTOR 12000
#define MHZ (1000 * 1000)
+#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
+#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
+#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
+#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
+
+#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
+
+#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
+#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
+#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
+
struct ath79_spi {
struct spi_bitbang bitbang;
u32 ioc_base;
@@ -67,31 +75,14 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
+ u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
- if (is_active) {
- /* set initial clock polarity */
- if (spi->mode & SPI_CPOL)
- sp->ioc_base |= AR71XX_SPI_IOC_CLK;
- else
- sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
-
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
- }
-
- if (gpio_is_valid(spi->cs_gpio)) {
- /* SPI is normally active-low */
- gpio_set_value_cansleep(spi->cs_gpio, cs_high);
- } else {
- u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
-
- if (cs_high)
- sp->ioc_base |= cs_bit;
- else
- sp->ioc_base &= ~cs_bit;
-
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
- }
+ if (cs_high)
+ sp->ioc_base |= cs_bit;
+ else
+ sp->ioc_base &= ~cs_bit;
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
static void ath79_spi_enable(struct ath79_spi *sp)
@@ -103,6 +94,9 @@ static void ath79_spi_enable(struct ath79_spi *sp)
sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
+ /* clear clk and mosi in the base state */
+ sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK);
+
/* TODO: setup speed? */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
}
@@ -115,66 +109,6 @@ static void ath79_spi_disable(struct ath79_spi *sp)
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
-static int ath79_spi_setup_cs(struct spi_device *spi)
-{
- struct ath79_spi *sp = ath79_spidev_to_sp(spi);
- int status;
-
- status = 0;
- if (gpio_is_valid(spi->cs_gpio)) {
- unsigned long flags;
-
- flags = GPIOF_DIR_OUT;
- if (spi->mode & SPI_CS_HIGH)
- flags |= GPIOF_INIT_LOW;
- else
- flags |= GPIOF_INIT_HIGH;
-
- status = gpio_request_one(spi->cs_gpio, flags,
- dev_name(&spi->dev));
- } else {
- u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
-
- if (spi->mode & SPI_CS_HIGH)
- sp->ioc_base &= ~cs_bit;
- else
- sp->ioc_base |= cs_bit;
-
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
- }
-
- return status;
-}
-
-static void ath79_spi_cleanup_cs(struct spi_device *spi)
-{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
-}
-
-static int ath79_spi_setup(struct spi_device *spi)
-{
- int status = 0;
-
- if (!spi->controller_state) {
- status = ath79_spi_setup_cs(spi);
- if (status)
- return status;
- }
-
- status = spi_bitbang_setup(spi);
- if (status && !spi->controller_state)
- ath79_spi_cleanup_cs(spi);
-
- return status;
-}
-
-static void ath79_spi_cleanup(struct spi_device *spi)
-{
- ath79_spi_cleanup_cs(spi);
- spi_bitbang_cleanup(spi);
-}
-
static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
u32 word, u8 bits, unsigned flags)
{
@@ -225,9 +159,10 @@ static int ath79_spi_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
+ master->use_gpio_descriptors = true;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->setup = ath79_spi_setup;
- master->cleanup = ath79_spi_cleanup;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
if (pdata) {
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
@@ -236,7 +171,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
- sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
sp->bitbang.flags = SPI_CS_HIGH;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 74fddcd3282b..4954f0ab1606 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -23,8 +23,7 @@
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
@@ -312,7 +311,7 @@ struct atmel_spi {
/* Controller-specific per-slave state */
struct atmel_spi_device {
- unsigned int npcs_pin;
+ struct gpio_desc *npcs_pin;
u32 csr;
};
@@ -355,7 +354,6 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
- unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
if (atmel_spi_is_v2(as)) {
@@ -379,7 +377,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
if (as->use_cs_gpios)
- gpio_set_value(asd->npcs_pin, active);
+ gpiod_set_value(asd->npcs_pin, 1);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@@ -396,19 +394,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
if (as->use_cs_gpios && spi->chip_select != 0)
- gpio_set_value(asd->npcs_pin, active);
+ gpiod_set_value(asd->npcs_pin, 1);
spi_writel(as, MR, mr);
}
- dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
- asd->npcs_pin, active ? " (high)" : "",
- mr);
+ dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
}
static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
- unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
/* only deactivate *this* device; sometimes transfers to
@@ -420,14 +415,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
spi_writel(as, MR, mr);
}
- dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
- asd->npcs_pin, active ? " (low)" : "",
- mr);
+ dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
if (!as->use_cs_gpios)
spi_writel(as, CR, SPI_BIT(LASTXFER));
else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
- gpio_set_value(asd->npcs_pin, !active);
+ gpiod_set_value(asd->npcs_pin, 0);
}
static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -1188,7 +1181,6 @@ static int atmel_spi_setup(struct spi_device *spi)
struct atmel_spi_device *asd;
u32 csr;
unsigned int bits = spi->bits_per_word;
- unsigned int npcs_pin;
as = spi_master_get_devdata(spi->master);
@@ -1209,21 +1201,14 @@ static int atmel_spi_setup(struct spi_device *spi)
csr |= SPI_BIT(CSAAT);
/* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
- *
- * DLYBCT would add delays between words, slowing down transfers.
- * It could potentially be useful to cope with DMA bottlenecks, but
- * in those cases it's probably best to just use a lower bitrate.
*/
csr |= SPI_BF(DLYBS, 0);
- csr |= SPI_BF(DLYBCT, 0);
-
- /* chipselect must have been muxed as GPIO (e.g. in board setup) */
- npcs_pin = (unsigned long)spi->controller_data;
- if (!as->use_cs_gpios)
- npcs_pin = spi->chip_select;
- else if (gpio_is_valid(spi->cs_gpio))
- npcs_pin = spi->cs_gpio;
+ /* DLYBCT adds delays between words. This is useful for slow devices
+ * that need a bit of time to setup the next transfer.
+ */
+ csr |= SPI_BF(DLYBCT,
+ (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5);
asd = spi->controller_state;
if (!asd) {
@@ -1231,11 +1216,21 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
- if (as->use_cs_gpios)
- gpio_direction_output(npcs_pin,
- !(spi->mode & SPI_CS_HIGH));
+ /*
+ * If use_cs_gpios is true this means that we have "cs-gpios"
+ * defined in the device tree node so we should have
+ * gotten the GPIO lines from the device tree inside the
+ * SPI core. Warn if this is not the case but continue since
+ * CS GPIOs are after all optional.
+ */
+ if (as->use_cs_gpios) {
+ if (!spi->cs_gpiod) {
+ dev_err(&spi->dev,
+ "host claims to use CS GPIOs but no CS found in DT by the SPI core\n");
+ }
+ asd->npcs_pin = spi->cs_gpiod;
+ }
- asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
}
@@ -1473,41 +1468,6 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.has_pdc_support = version < 0x212;
}
-/*-------------------------------------------------------------------------*/
-static int atmel_spi_gpio_cs(struct platform_device *pdev)
-{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct device_node *np = master->dev.of_node;
- int i;
- int ret = 0;
- int nb = 0;
-
- if (!as->use_cs_gpios)
- return 0;
-
- if (!np)
- return 0;
-
- nb = of_gpio_named_count(np, "cs-gpios");
- for (i = 0; i < nb; i++) {
- int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
- "cs-gpios", i);
-
- if (cs_gpio == -EPROBE_DEFER)
- return cs_gpio;
-
- if (gpio_is_valid(cs_gpio)) {
- ret = devm_gpio_request(&pdev->dev, cs_gpio,
- dev_name(&pdev->dev));
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
static void atmel_spi_init(struct atmel_spi *as)
{
spi_writel(as, CR, SPI_BIT(SWRST));
@@ -1560,6 +1520,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
goto out_free;
/* the spi->mode bits understood by this driver: */
+ master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->dev.of_node = pdev->dev.of_node;
@@ -1592,6 +1553,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
atmel_get_caps(as);
+ /*
+ * If there are chip selects in the device tree, those will be
+ * discovered by the SPI core when registering the SPI master
+ * and assigned to each SPI device.
+ */
as->use_cs_gpios = true;
if (atmel_spi_is_v2(as) &&
pdev->dev.of_node &&
@@ -1600,10 +1566,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
}
- ret = atmel_spi_gpio_cs(pdev);
- if (ret)
- goto out_unmap_regs;
-
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 671e374e1b01..f7e054848ca5 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -456,7 +456,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
}
bs->clk = devm_clk_get(&pdev->dev, NULL);
- if ((!bs->clk) || (IS_ERR(bs->clk))) {
+ if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_master_put;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index f29176000b8d..dd9a8c54a693 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -213,19 +213,6 @@ int spi_bitbang_setup(struct spi_device *spi)
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
- /* NOTE we _need_ to call chipselect() early, ideally with adapter
- * setup, unless the hardware defaults cooperate to avoid confusion
- * between normal (active low) and inverted chipselects.
- */
-
- /* deselect chip (low or high) */
- mutex_lock(&bitbang->lock);
- if (!bitbang->busy) {
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(cs->nsecs);
- }
- mutex_unlock(&bitbang->lock);
-
return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 7c88f74f7f47..43d0e79842ac 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -128,10 +128,6 @@ struct cdns_spi {
u32 is_decoded_cs;
};
-struct cdns_spi_device_data {
- bool gpio_requested;
-};
-
/* Macros for the SPI controller read/write */
static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
{
@@ -176,16 +172,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
/**
* cdns_spi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
- * @is_high: Select(0) or deselect (1) the chip select line
+ * @enable: Select (1) or deselect (0) the chip select line
*/
-static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
+static void cdns_spi_chipselect(struct spi_device *spi, bool enable)
{
struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
u32 ctrl_reg;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
- if (is_high) {
+ if (!enable) {
/* Deselect the slave */
ctrl_reg |= CDNS_SPI_CR_SSCTRL;
} else {
@@ -469,64 +465,6 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master)
return 0;
}
-static int cdns_spi_setup(struct spi_device *spi)
-{
-
- int ret = -EINVAL;
- struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
-
- /* this is a pin managed by the controller, leave it alone */
- if (spi->cs_gpio == -ENOENT)
- return 0;
-
- /* this seems to be the first time we're here */
- if (!cdns_spi_data) {
- cdns_spi_data = kzalloc(sizeof(*cdns_spi_data), GFP_KERNEL);
- if (!cdns_spi_data)
- return -ENOMEM;
- cdns_spi_data->gpio_requested = false;
- spi_set_ctldata(spi, cdns_spi_data);
- }
-
- /* if we haven't done so, grab the gpio */
- if (!cdns_spi_data->gpio_requested && gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request_one(spi->cs_gpio,
- (spi->mode & SPI_CS_HIGH) ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (ret)
- dev_err(&spi->dev, "can't request chipselect gpio %d\n",
- spi->cs_gpio);
- else
- cdns_spi_data->gpio_requested = true;
- } else {
- if (gpio_is_valid(spi->cs_gpio)) {
- int mode = ((spi->mode & SPI_CS_HIGH) ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
-
- ret = gpio_direction_output(spi->cs_gpio, mode);
- if (ret)
- dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
- spi->cs_gpio, ret);
- }
- }
-
- return ret;
-}
-
-static void cdns_spi_cleanup(struct spi_device *spi)
-{
- struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
-
- if (cdns_spi_data) {
- if (cdns_spi_data->gpio_requested)
- gpio_free(spi->cs_gpio);
- kfree(cdns_spi_data);
- spi_set_ctldata(spi, NULL);
- }
-
-}
-
/**
* cdns_spi_probe - Probe method for the SPI driver
* @pdev: Pointer to the platform_device structure
@@ -584,11 +522,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -603,8 +536,10 @@ static int cdns_spi_probe(struct platform_device *pdev)
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
@@ -621,13 +556,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
+ master->use_gpio_descriptors = true;
master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
master->prepare_message = cdns_prepare_message;
master->transfer_one = cdns_transfer_one;
master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
master->set_cs = cdns_spi_chipselect;
- master->setup = cdns_spi_setup;
- master->cleanup = cdns_spi_cleanup;
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 18193df2eba8..8c03c409fc07 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -11,7 +11,7 @@
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -36,25 +36,6 @@ struct spi_clps711x_data {
int len;
};
-static int spi_clps711x_setup(struct spi_device *spi)
-{
- if (!spi->controller_state) {
- int ret;
-
- ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio,
- dev_name(&spi->master->dev));
- if (ret)
- return ret;
-
- spi->controller_state = spi;
- }
-
- /* We are expect that SPI-device is not selected */
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
-
- return 0;
-}
-
static int spi_clps711x_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -125,11 +106,11 @@ static int spi_clps711x_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->use_gpio_descriptors = true;
master->bus_num = -1;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
master->dev.of_node = pdev->dev.of_node;
- master->setup = spi_clps711x_setup;
master->prepare_message = spi_clps711x_prepare_message;
master->transfer_one = spi_clps711x_transfer_one;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 56adec83f8fc..eb246ebcfa3a 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
@@ -222,12 +221,17 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
* Board specific chip select logic decides the polarity and cs
* line for the controller
*/
- if (spi->cs_gpio >= 0) {
+ if (spi->cs_gpiod) {
+ /*
+ * FIXME: is this code ever executed? This host does not
+ * set SPI_MASTER_GPIO_SS so this chipselect callback should
+ * not get called from the SPI core when we are using
+ * GPIOs for chip select.
+ */
if (value == BITBANG_CS_ACTIVE)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH);
+ gpiod_set_value(spi->cs_gpiod, 1);
else
- gpio_set_value(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
+ gpiod_set_value(spi->cs_gpiod, 0);
} else {
if (value == BITBANG_CS_ACTIVE) {
if (!(spi->mode & SPI_CS_WORD))
@@ -418,30 +422,18 @@ static int davinci_spi_of_setup(struct spi_device *spi)
*/
static int davinci_spi_setup(struct spi_device *spi)
{
- int retval = 0;
struct davinci_spi *dspi;
- struct spi_master *master = spi->master;
struct device_node *np = spi->dev.of_node;
bool internal_cs = true;
dspi = spi_master_get_devdata(spi->master);
if (!(spi->mode & SPI_NO_CS)) {
- if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
- retval = gpio_direction_output(
- spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (np && spi->cs_gpiod)
internal_cs = false;
- }
-
- if (retval) {
- dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
- spi->cs_gpio, retval);
- return retval;
- }
- if (internal_cs) {
+ if (internal_cs)
set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
- }
}
if (spi->mode & SPI_READY)
@@ -962,6 +954,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (ret)
goto free_master;
+ master->use_gpio_descriptors = true;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_chipselect;
@@ -980,27 +973,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (dspi->version == SPI_VERSION_2)
dspi->bitbang.flags |= SPI_READY;
- if (pdev->dev.of_node) {
- int i;
-
- for (i = 0; i < pdata->num_chipselect; i++) {
- int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
- "cs-gpios", i);
-
- if (cs_gpio == -EPROBE_DEFER) {
- ret = cs_gpio;
- goto free_clk;
- }
-
- if (gpio_is_valid(cs_gpio)) {
- ret = devm_gpio_request(&pdev->dev, cs_gpio,
- dev_name(&pdev->dev));
- if (ret)
- goto free_clk;
- }
- }
- }
-
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
ret = davinci_spi_request_dma(dspi);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index d0dd7814e997..4bd59a93d988 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -18,7 +18,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
#include <linux/property.h>
@@ -185,27 +184,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws->num_cs = num_cs;
- if (pdev->dev.of_node) {
- int i;
-
- for (i = 0; i < dws->num_cs; i++) {
- int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
- "cs-gpios", i);
-
- if (cs_gpio == -EPROBE_DEFER) {
- ret = cs_gpio;
- goto out;
- }
-
- if (gpio_is_valid(cs_gpio)) {
- ret = devm_gpio_request(&pdev->dev, cs_gpio,
- dev_name(&pdev->dev));
- if (ret)
- goto out;
- }
- }
- }
-
init_func = device_get_match_data(&pdev->dev);
if (init_func) {
ret = init_func(pdev, dwsmmio);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 2e822a56576a..ac81025f86ab 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
#include "spi-dw.h"
@@ -54,41 +53,41 @@ static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
if (!buf)
return 0;
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"%s registers:\n", dev_name(&dws->master->dev));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -138,11 +137,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip = spi_get_ctldata(spi);
- /* Chip select logic is inverted from spi_set_cs() */
if (chip && chip->cs_control)
- chip->cs_control(!enable);
+ chip->cs_control(enable);
- if (!enable)
+ if (enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
else if (dws->cs_override)
dw_writel(dws, DW_SPI_SER, 0);
@@ -317,7 +315,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
- | (spi->mode << SPI_MODE_OFFSET)
+ | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
+ (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
| (chip->tmode << SPI_TMOD_OFFSET);
/*
@@ -397,7 +396,6 @@ static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi_chip *chip_info = NULL;
struct chip_data *chip;
- int ret;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
@@ -425,13 +423,6 @@ static int dw_spi_setup(struct spi_device *spi)
chip->tmode = SPI_TMOD_TR;
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_direction_output(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -496,6 +487,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
goto err_free_master;
}
+ master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->bus_num = dws->bus_num;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 5e10dc5c93a5..53335ccc98f6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -67,7 +67,7 @@
#define SPI_SR 0x2c
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
-#define SPI_SR_CLEAR 0xdaad0000
+#define SPI_SR_CLEAR 0x9aaf0000
#define SPI_RSER_TFFFE BIT(25)
#define SPI_RSER_TFFFD BIT(24)
@@ -233,6 +233,9 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
+ if (spi_controller_is_slave(dspi->master))
+ return data;
+
if (dspi->len > 0)
cmd |= SPI_PUSHR_CMD_CONT;
return cmd << 16 | data;
@@ -329,6 +332,11 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma_async_issue_pending(dma->chan_rx);
dma_async_issue_pending(dma->chan_tx);
+ if (spi_controller_is_slave(dspi->master)) {
+ wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
+ return 0;
+ }
+
time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
DMA_COMPLETION_TIMEOUT);
if (time_left == 0) {
@@ -798,14 +806,18 @@ static int dspi_setup(struct spi_device *spi)
ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
- | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
- | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
- | SPI_CTAR_PCSSCK(pcssck)
- | SPI_CTAR_CSSCK(cssck)
- | SPI_CTAR_PASC(pasc)
- | SPI_CTAR_ASC(asc)
- | SPI_CTAR_PBR(pbr)
- | SPI_CTAR_BR(br);
+ | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0);
+
+ if (!spi_controller_is_slave(dspi->master)) {
+ chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode &
+ SPI_LSB_FIRST ? 1 : 0)
+ | SPI_CTAR_PCSSCK(pcssck)
+ | SPI_CTAR_CSSCK(cssck)
+ | SPI_CTAR_PASC(pasc)
+ | SPI_CTAR_ASC(asc)
+ | SPI_CTAR_PBR(pbr)
+ | SPI_CTAR_BR(br);
+ }
spi_set_ctldata(spi, chip);
@@ -970,8 +982,13 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
static void dspi_init(struct fsl_dspi *dspi)
{
- regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
- (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
+ unsigned int mcr = SPI_MCR_PCSIS |
+ (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0);
+
+ if (!spi_controller_is_slave(dspi->master))
+ mcr |= SPI_MCR_MASTER;
+
+ regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
if (dspi->devtype_data->xspi_mode)
regmap_write(dspi->regmap, SPI_CTARE(0),
@@ -1027,6 +1044,9 @@ static int dspi_probe(struct platform_device *pdev)
}
master->bus_num = bus_num;
+ if (of_property_read_bool(np, "spi-slave"))
+ master->slave = true;
+
dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
dev_err(&pdev->dev, "can't get devtype_data\n");
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 08dcc3c22e88..391863914043 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -48,10 +48,13 @@
#define CR_RTF BIT(8)
#define CR_RST BIT(1)
#define CR_MEN BIT(0)
+#define SR_MBF BIT(24)
#define SR_TCF BIT(10)
+#define SR_FCF BIT(9)
#define SR_RDF BIT(1)
#define SR_TDF BIT(0)
#define IER_TCIE BIT(10)
+#define IER_FCIE BIT(9)
#define IER_RDIE BIT(1)
#define IER_TDIE BIT(0)
#define CFGR1_PCSCFG BIT(27)
@@ -59,6 +62,7 @@
#define CFGR1_PCSPOL BIT(8)
#define CFGR1_NOSTALL BIT(3)
#define CFGR1_MASTER BIT(0)
+#define FSR_RXCOUNT (BIT(16)|BIT(17)|BIT(18))
#define RSR_RXEMPTY BIT(1)
#define TCR_CPOL BIT(31)
#define TCR_CPHA BIT(30)
@@ -161,28 +165,10 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
return 0;
}
-static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi)
-{
- u32 txcnt;
- unsigned long orig_jiffies = jiffies;
-
- do {
- txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
-
- if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
- dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n");
- return -ETIMEDOUT;
- }
- cond_resched();
-
- } while (txcnt);
-
- return 0;
-}
-
static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
{
u8 txfifo_cnt;
+ u32 temp;
txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
@@ -193,9 +179,15 @@ static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
txfifo_cnt++;
}
- if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize))
- writel(0, fsl_lpspi->base + IMX7ULP_TDR);
- else
+ if (txfifo_cnt < fsl_lpspi->txfifosize) {
+ if (!fsl_lpspi->is_slave) {
+ temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
+ temp &= ~TCR_CONTC;
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+ }
+
+ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
+ } else
fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
}
@@ -276,10 +268,6 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
u32 temp;
int ret;
- temp = CR_RST;
- writel(temp, fsl_lpspi->base + IMX7ULP_CR);
- writel(0, fsl_lpspi->base + IMX7ULP_CR);
-
if (!fsl_lpspi->is_slave) {
ret = fsl_lpspi_set_bitrate(fsl_lpspi);
if (ret)
@@ -370,6 +358,24 @@ static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
return 0;
}
+static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+
+ /* Disable all interrupt */
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+
+ /* W1C for all flags in SR */
+ temp = 0x3F << 8;
+ writel(temp, fsl_lpspi->base + IMX7ULP_SR);
+
+ /* Clear FIFO and disable module */
+ temp = CR_RRF | CR_RTF;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+
+ return 0;
+}
+
static int fsl_lpspi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
@@ -391,11 +397,7 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller,
if (ret)
return ret;
- ret = fsl_lpspi_txfifo_empty(fsl_lpspi);
- if (ret)
- return ret;
-
- fsl_lpspi_read_rx_fifo(fsl_lpspi);
+ fsl_lpspi_reset(fsl_lpspi);
return 0;
}
@@ -408,7 +410,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
struct spi_device *spi = msg->spi;
struct spi_transfer *xfer;
bool is_first_xfer = true;
- u32 temp;
int ret = 0;
msg->status = 0;
@@ -428,13 +429,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
}
complete:
- if (!fsl_lpspi->is_slave) {
- /* de-assert SS, then finalize current message */
- temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
- temp &= ~TCR_CONTC;
- writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
- }
-
msg->status = ret;
spi_finalize_current_message(controller);
@@ -443,20 +437,30 @@ complete:
static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
{
+ u32 temp_SR, temp_IER;
struct fsl_lpspi_data *fsl_lpspi = dev_id;
- u32 temp;
+ temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
fsl_lpspi_intctrl(fsl_lpspi, 0);
- temp = readl(fsl_lpspi->base + IMX7ULP_SR);
+ temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
fsl_lpspi_read_rx_fifo(fsl_lpspi);
- if (temp & SR_TDF) {
+ if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
fsl_lpspi_write_tx_fifo(fsl_lpspi);
+ return IRQ_HANDLED;
+ }
- if (!fsl_lpspi->remain)
- complete(&fsl_lpspi->xfer_done);
+ if (temp_SR & SR_MBF ||
+ readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_RXCOUNT) {
+ writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
+ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
+ return IRQ_HANDLED;
+ }
+ if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
+ writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
+ complete(&fsl_lpspi->xfer_done);
return IRQ_HANDLED;
}
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
new file mode 100644
index 000000000000..6a713f78a62e
--- /dev/null
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -0,0 +1,966 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2018 Bootlin
+ * Copyright (C) 2018 exceet electronics GmbH
+ * Copyright (C) 2018 Kontron Electronics GmbH
+ *
+ * Transition to SPI MEM interface:
+ * Authors:
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ * Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
+ * Suresh Gupta <suresh.gupta@nxp.com>
+ *
+ * Based on the original fsl-quadspi.c spi-nor driver:
+ * Author: Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (15).
+ */
+#define SEQID_LUT 15
+
+/* Registers used by the driver */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
+#define QUADSPI_MCR_MDIS_MASK BIT(14)
+#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
+#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
+#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
+#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
+#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
+#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
+#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
+#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
+#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
+#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
+#define QUADSPI_SMPR_HSENA_MASK BIT(0)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
+#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
+
+#define QUADSPI_TBDR 0x154
+
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_MASK BIT(1)
+#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK BIT(0)
+
+#define QUADSPI_SPTRCLR 0x16c
+#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
+#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK BIT(0)
+#define QUADSPI_LCKER_UNLOCK BIT(1)
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE BIT(0)
+
+#define QUADSPI_LUT_BASE 0x310
+#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define QUADSPI_LUT_REG(idx) \
+ (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
+
+/* Instruction set for the LUT register */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_FSL_READ 7
+#define LUT_FSL_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_FSL_READ_DDR 14
+#define LUT_FSL_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:3].
+ * For example, the quad read needs four IO lines,
+ * so you should use LUT_PAD(4).
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
+
+/* Controller needs driver to swap endianness */
+#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
+
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
+
+/*
+ * TKT253890, the controller needs the driver to fill the txfifo with
+ * 16 bytes at least to trigger a data transfer, even though the extra
+ * data won't be transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 BIT(2)
+
+/* TKT245618, the controller cannot wake up from wait mode */
+#define QUADSPI_QUIRK_TKT245618 BIT(3)
+
+/*
+ * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
+ * internally. No need to add it when setting SFXXAD and SFAR registers
+ */
+#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
+
+struct fsl_qspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static const struct fsl_qspi_devtype_data vybrid_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6sx_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx7d_data = {
+ .rxfifo = SZ_512,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6ul_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data ls1021a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = 0,
+ .little_endian = false,
+};
+
+static const struct fsl_qspi_devtype_data ls2080a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
+ .little_endian = true,
+};
+
+struct fsl_qspi {
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ const struct fsl_qspi_devtype_data *devtype_data;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
+ int selected;
+};
+
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
+}
+
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
+}
+
+static inline int needs_amba_base_offset(struct fsl_qspi *q)
+{
+ return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
+}
+
+/*
+ * An IC bug makes it necessary to rearrange the 32-bit data.
+ * Later chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return needs_swap_endian(q) ? __swab32(a) : a;
+}
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The QSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the QSPI controller can use
+ * big-endian or little-endian.
+ */
+static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ return ioread32(addr);
+
+ return ioread32be(addr);
+}
+
+static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
+{
+ struct fsl_qspi *q = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = qspi_readl(q, q->iobase + QUADSPI_FR);
+ qspi_writel(q, reg, q->iobase + QUADSPI_FR);
+
+ if (reg & QUADSPI_FR_TFF_MASK)
+ complete(&q->c);
+
+ dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
+ return IRQ_HANDLED;
+}
+
+static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool fsl_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of instructions needed for the op, needs
+ * to fit into a single LUT entry.
+ */
+ if (op->addr.nbytes +
+ (op->dummy.nbytes ? 1:0) +
+ (op->data.nbytes ? 1:0) > 6)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.nbytes &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > q->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > q->devtype_data->txfifo)
+ return false;
+
+ return true;
+}
+
+static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ /*
+ * For some unknown reason, using LUT_ADDR doesn't work in some
+ * cases (at least with only one byte long addresses), so
+ * let's use LUT_MODE to write the address bytes one by one
+ */
+ for (i = 0; i < op->addr.nbytes; i++) {
+ u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
+ LUT_PAD(op->addr.buswidth),
+ addrbyte);
+ lutidx++;
+ }
+
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ LUT_PAD(op->dummy.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_FSL_READ : LUT_FSL_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
+
+ /* lock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+ int ret;
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+ }
+
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ return 0;
+}
+
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_remove_request(&q->pm_qos_req);
+
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing, or if we
+ * read from flash with a different offset into the page buffer, we need to
+ * invalidate the AHB buffer. If we do not do so, we may read out the wrong
+ * data. The spec tells us reset the AHB domain and Serial Flash domain at
+ * the same time.
+ */
+static void fsl_qspi_invalidate(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+}
+
+static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
+{
+ unsigned long rate = spi->max_speed_hz;
+ int ret;
+
+ if (q->selected == spi->chip_select)
+ return;
+
+ if (needs_4x_clock(q))
+ rate *= 4;
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ ret = clk_set_rate(q->clk, rate);
+ if (ret)
+ return;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return;
+
+ q->selected = spi->chip_select;
+
+ fsl_qspi_invalidate(q);
+}
+
+static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in,
+ q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
+ op->data.nbytes);
+}
+
+static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ memcpy(&val, op->data.buf.out + i, 4);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (i < op->data.nbytes) {
+ memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (needs_fill_txfifo(q)) {
+ for (i = op->data.nbytes; i < 16; i += 4)
+ qspi_writel(q, 0, base + QUADSPI_TBDR);
+ }
+}
+
+static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u8 *buf = op->data.buf.in;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, 4);
+ }
+
+ if (i < op->data.nbytes) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, op->data.nbytes - i);
+ }
+}
+
+static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int err = 0;
+
+ init_completion(&q->c);
+
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
+ base + QUADSPI_IPCR);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ fsl_qspi_read_rxfifo(q, op);
+
+ return err;
+}
+
+static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
+ u32 mask, u32 delay_us, u32 timeout_us)
+{
+ u32 reg;
+
+ if (!q->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
+ timeout_us);
+}
+
+static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ void __iomem *base = q->iobase;
+ u32 addr_offset = 0;
+ int err = 0;
+
+ mutex_lock(&q->lock);
+
+ /* wait for the controller being ready */
+ fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
+ QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
+
+ fsl_qspi_select_mem(q, mem->spi);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ qspi_writel(q,
+ q->selected * q->devtype_data->ahb_buf_size + addr_offset,
+ base + QUADSPI_SFAR);
+
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
+ QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
+ base + QUADSPI_MCR);
+
+ qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
+ base + QUADSPI_SPTRCLR);
+
+ fsl_qspi_prepare_lut(q, op);
+
+ /*
+ * If we have large chunks of data, we read them through the AHB bus
+ * by accessing the mapped memory. In all other cases we use
+ * IP commands to access the flash.
+ */
+ if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN) {
+ fsl_qspi_read_ahb(q, op);
+ } else {
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
+ QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
+
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ fsl_qspi_fill_txfifo(q, op);
+
+ err = fsl_qspi_do_op(q, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ fsl_qspi_invalidate(q);
+
+ mutex_unlock(&q->lock);
+
+ return err;
+}
+
+static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > q->devtype_data->txfifo)
+ op->data.nbytes = q->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > q->devtype_data->ahb_buf_size)
+ op->data.nbytes = q->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ return 0;
+}
+
+static int fsl_qspi_default_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg, addr_offset = 0;
+ int ret;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ /* the default frequency, we will change it later if necessary. */
+ ret = clk_set_rate(q->clk, 66000000);
+ if (ret)
+ return ret;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
+ /* Disable the module */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ reg = qspi_readl(q, base + QUADSPI_SMPR);
+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* We only use the buffer3 for AHB read */
+ qspi_writel(q, 0, base + QUADSPI_BUF0IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF1IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF2IND);
+
+ qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
+ q->iobase + QUADSPI_BFGENCR);
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
+ QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
+ base + QUADSPI_BUF3CR);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ /*
+ * In HW there can be a maximum of four chips on two buses with
+ * two chip selects on each bus. We use four chip selects in SW
+ * to differentiate between the four chips.
+ * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
+ * SFB2AD accordingly.
+ */
+ qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
+ base + QUADSPI_SFA1AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
+ base + QUADSPI_SFA2AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
+ base + QUADSPI_SFB1AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
+ base + QUADSPI_SFB2AD);
+
+ q->selected = -1;
+
+ /* Enable the module */
+ qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+
+ /* clear all interrupt status */
+ qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
+
+ /* enable the interrupt */
+ qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+
+ return 0;
+}
+
+static const char *fsl_qspi_get_name(struct spi_mem *mem)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = &mem->spi->dev;
+ const char *name;
+
+ /*
+ * In order to keep mtdparts compatible with the old MTD driver at
+ * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
+ * platform_device of the controller.
+ */
+ if (of_get_available_child_count(q->dev->of_node) == 1)
+ return dev_name(q->dev);
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", dev_name(q->dev),
+ mem->spi->chip_select);
+
+ if (!name) {
+ dev_err(dev, "failed to get memory for custom flash name\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return name;
+}
+
+static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
+ .adjust_op_size = fsl_qspi_adjust_op_size,
+ .supports_op = fsl_qspi_supports_op,
+ .exec_op = fsl_qspi_exec_op,
+ .get_name = fsl_qspi_get_name,
+};
+
+static int fsl_qspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct fsl_qspi *q;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ q = spi_controller_get_devdata(ctlr);
+ q->dev = dev;
+ q->devtype_data = of_device_get_match_data(dev);
+ if (!q->devtype_data) {
+ ret = -ENODEV;
+ goto err_put_ctrl;
+ }
+
+ platform_set_drvdata(pdev, q);
+
+ /* find the resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
+ q->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->iobase)) {
+ ret = PTR_ERR(q->iobase);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "QuadSPI-memory");
+ q->ahb_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->ahb_addr)) {
+ ret = PTR_ERR(q->ahb_addr);
+ goto err_put_ctrl;
+ }
+
+ q->memmap_phy = res->start;
+
+ /* find the clocks */
+ q->clk_en = devm_clk_get(dev, "qspi_en");
+ if (IS_ERR(q->clk_en)) {
+ ret = PTR_ERR(q->clk_en);
+ goto err_put_ctrl;
+ }
+
+ q->clk = devm_clk_get(dev, "qspi");
+ if (IS_ERR(q->clk)) {
+ ret = PTR_ERR(q->clk);
+ goto err_put_ctrl;
+ }
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto err_put_ctrl;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ fsl_qspi_irq_handler, 0, pdev->name, q);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ mutex_init(&q->lock);
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 4;
+ ctlr->mem_ops = &fsl_qspi_mem_ops;
+
+ fsl_qspi_default_setup(q);
+
+ ctlr->dev.of_node = np;
+
+ ret = spi_register_controller(ctlr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ return 0;
+
+err_destroy_mutex:
+ mutex_destroy(&q->lock);
+
+err_disable_clk:
+ fsl_qspi_clk_disable_unprep(q);
+
+err_put_ctrl:
+ spi_controller_put(ctlr);
+
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
+ return ret;
+}
+
+static int fsl_qspi_remove(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+
+ /* disable the hardware */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ mutex_destroy(&q->lock);
+
+ return 0;
+}
+
+static int fsl_qspi_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int fsl_qspi_resume(struct device *dev)
+{
+ struct fsl_qspi *q = dev_get_drvdata(dev);
+
+ fsl_qspi_default_setup(q);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
+ { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
+ { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+
+static const struct dev_pm_ops fsl_qspi_pm_ops = {
+ .suspend = fsl_qspi_suspend,
+ .resume = fsl_qspi_resume,
+};
+
+static struct platform_driver fsl_qspi_driver = {
+ .driver = {
+ .name = "fsl-quadspi",
+ .of_match_table = fsl_qspi_dt_ids,
+ .pm = &fsl_qspi_pm_ops,
+ },
+ .probe = fsl_qspi_probe,
+ .remove = fsl_qspi_remove,
+};
+module_platform_driver(fsl_qspi_driver);
+
+MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
+MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
+MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>");
+MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index fdb7cb88fb56..5f0b0d5bfef4 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -89,9 +89,6 @@ struct spi_geni_master {
int irq;
};
-static void handle_fifo_timeout(struct spi_master *spi,
- struct spi_message *msg);
-
static int get_spi_clk_cfg(unsigned int speed_hz,
struct spi_geni_master *mas,
unsigned int *clk_idx,
@@ -122,6 +119,32 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
+static void handle_fifo_timeout(struct spi_master *spi,
+ struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ unsigned long time_left, flags;
+ struct geni_se *se = &mas->se;
+
+ spin_lock_irqsave(&mas->lock, flags);
+ reinit_completion(&mas->xfer_done);
+ mas->cur_mcmd = CMD_CANCEL;
+ geni_se_cancel_m_cmd(se);
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ spin_unlock_irqrestore(&mas->lock, flags);
+ time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (time_left)
+ return;
+
+ spin_lock_irqsave(&mas->lock, flags);
+ reinit_completion(&mas->xfer_done);
+ geni_se_abort_m_cmd(se);
+ spin_unlock_irqrestore(&mas->lock, flags);
+ time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (!time_left)
+ dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+}
+
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
@@ -233,7 +256,6 @@ static int spi_geni_prepare_message(struct spi_master *spi,
struct geni_se *se = &mas->se;
geni_se_select_mode(se, GENI_SE_FIFO);
- reinit_completion(&mas->xfer_done);
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -357,32 +379,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
-static void handle_fifo_timeout(struct spi_master *spi,
- struct spi_message *msg)
-{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
- unsigned long time_left, flags;
- struct geni_se *se = &mas->se;
-
- spin_lock_irqsave(&mas->lock, flags);
- reinit_completion(&mas->xfer_done);
- mas->cur_mcmd = CMD_CANCEL;
- geni_se_cancel_m_cmd(se);
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
- spin_unlock_irqrestore(&mas->lock, flags);
- time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
- if (time_left)
- return;
-
- spin_lock_irqsave(&mas->lock, flags);
- reinit_completion(&mas->xfer_done);
- geni_se_abort_m_cmd(se);
- spin_unlock_irqrestore(&mas->lock, flags);
- time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
- if (!time_left)
- dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
-}
-
static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_device *slv,
struct spi_transfer *xfer)
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index a4aee26028cd..53b35c56a557 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -428,7 +428,8 @@ static int spi_gpio_probe(struct platform_device *pdev)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL;
+ master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
+ SPI_CS_HIGH;
master->flags = master_flags;
master->bus_num = pdev->id;
/* The master needs to think there is a chipselect even if not connected */
@@ -455,7 +456,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
}
spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
- spi_gpio->bitbang.flags = SPI_CS_HIGH;
status = spi_bitbang_start(&spi_gpio->bitbang);
if (status)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 5217a5628be2..a4d8d19ecff9 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -537,7 +537,6 @@ EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
/**
* spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
* @desc: the direct mapping descriptor to destroy
- * @info: direct mapping information
*
* This function destroys a direct mapping descriptor previously created by
* spi_mem_dirmap_create().
@@ -548,9 +547,80 @@ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
ctlr->mem_ops->dirmap_destroy(desc);
+
+ kfree(desc);
}
EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
+static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
+{
+ struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
+
+ spi_mem_dirmap_destroy(desc);
+}
+
+/**
+ * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
+ * it to a device
+ * @dev: device the dirmap desc will be attached to
+ * @mem: SPI mem device this direct mapping should be created for
+ * @info: direct mapping information
+ *
+ * devm_ variant of the spi_mem_dirmap_create() function. See
+ * spi_mem_dirmap_create() for more details.
+ *
+ * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
+ */
+struct spi_mem_dirmap_desc *
+devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info)
+{
+ struct spi_mem_dirmap_desc **ptr, *desc;
+
+ ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = spi_mem_dirmap_create(mem, info);
+ if (IS_ERR(desc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = desc;
+ devres_add(dev, ptr);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
+
+static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
+{
+ struct spi_mem_dirmap_desc **ptr = res;
+
+ if (WARN_ON(!ptr || !*ptr))
+ return 0;
+
+ return *ptr == data;
+}
+
+/**
+ * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
+ * to a device
+ * @dev: device the dirmap desc is attached to
+ * @desc: the direct mapping descriptor to destroy
+ *
+ * devm_ variant of the spi_mem_dirmap_destroy() function. See
+ * spi_mem_dirmap_destroy() for more details.
+ */
+void devm_spi_mem_dirmap_destroy(struct device *dev,
+ struct spi_mem_dirmap_desc *desc)
+{
+ devres_release(dev, devm_spi_mem_dirmap_release,
+ devm_spi_mem_dirmap_match, desc);
+}
+EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
+
/**
* spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
* @desc: direct mapping descriptor
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 6ac95a2a21ce..7bf53cfc25d6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -39,6 +39,7 @@
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
+#include <trace/events/spi.h>
#define DRIVER_NAME "mxs-spi"
@@ -374,6 +375,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
+ trace_spi_transfer_start(m, t);
+
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
break;
@@ -419,6 +422,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
flag);
}
+ trace_spi_transfer_stop(m, t);
+
if (status) {
stmp_reset_block(ssp->base);
break;
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index e1dca79b9090..734a2b956959 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -465,7 +465,8 @@ out_master_put:
static int npcm_pspi_remove(struct platform_device *pdev)
{
- struct npcm_pspi *priv = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
npcm_pspi_reset_hw(priv);
clk_disable_unprepare(priv->clk);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
new file mode 100644
index 000000000000..8894f98cc99c
--- /dev/null
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * NXP FlexSPI(FSPI) controller driver.
+ *
+ * Copyright 2019 NXP.
+ *
+ * FlexSPI is a flexsible SPI host controller which supports two SPI
+ * channels and up to 4 external devices. Each channel supports
+ * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional
+ * data lines).
+ *
+ * FlexSPI controller is driven by the LUT(Look-up Table) registers
+ * LUT registers are a look-up-table for sequences of instructions.
+ * A valid sequence consists of four LUT registers.
+ * Maximum 32 LUT sequences can be programmed simultaneously.
+ *
+ * LUTs are being created at run-time based on the commands passed
+ * from the spi-mem framework, thus using single LUT index.
+ *
+ * Software triggered Flash read/write access by IP Bus.
+ *
+ * Memory mapped read access by AHB Bus.
+ *
+ * Based on SPI MEM interface and spi-fsl-qspi.c driver.
+ *
+ * Author:
+ * Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (31).
+ */
+#define SEQID_LUT 31
+
+/* Registers used by the driver */
+#define FSPI_MCR0 0x00
+#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
+#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR0_LEARN_EN BIT(15)
+#define FSPI_MCR0_SCRFRUN_EN BIT(14)
+#define FSPI_MCR0_OCTCOMB_EN BIT(13)
+#define FSPI_MCR0_DOZE_EN BIT(12)
+#define FSPI_MCR0_HSEN BIT(11)
+#define FSPI_MCR0_SERCLKDIV BIT(8)
+#define FSPI_MCR0_ATDF_EN BIT(7)
+#define FSPI_MCR0_ARDF_EN BIT(6)
+#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4)
+#define FSPI_MCR0_END_CFG(x) ((x) << 2)
+#define FSPI_MCR0_MDIS BIT(1)
+#define FSPI_MCR0_SWRST BIT(0)
+
+#define FSPI_MCR1 0x04
+#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR1_AHB_TIMEOUT(x) (x)
+
+#define FSPI_MCR2 0x08
+#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24)
+#define FSPI_MCR2_SAMEDEVICEEN BIT(15)
+#define FSPI_MCR2_CLRLRPHS BIT(14)
+#define FSPI_MCR2_ABRDATSZ BIT(8)
+#define FSPI_MCR2_ABRLEARN BIT(7)
+#define FSPI_MCR2_ABR_READ BIT(6)
+#define FSPI_MCR2_ABRWRITE BIT(5)
+#define FSPI_MCR2_ABRDUMMY BIT(4)
+#define FSPI_MCR2_ABR_MODE BIT(3)
+#define FSPI_MCR2_ABRCADDR BIT(2)
+#define FSPI_MCR2_ABRRADDR BIT(1)
+#define FSPI_MCR2_ABR_CMD BIT(0)
+
+#define FSPI_AHBCR 0x0c
+#define FSPI_AHBCR_RDADDROPT BIT(6)
+#define FSPI_AHBCR_PREF_EN BIT(5)
+#define FSPI_AHBCR_BUFF_EN BIT(4)
+#define FSPI_AHBCR_CACH_EN BIT(3)
+#define FSPI_AHBCR_CLRTXBUF BIT(2)
+#define FSPI_AHBCR_CLRRXBUF BIT(1)
+#define FSPI_AHBCR_PAR_EN BIT(0)
+
+#define FSPI_INTEN 0x10
+#define FSPI_INTEN_SCLKSBWR BIT(9)
+#define FSPI_INTEN_SCLKSBRD BIT(8)
+#define FSPI_INTEN_DATALRNFL BIT(7)
+#define FSPI_INTEN_IPTXWE BIT(6)
+#define FSPI_INTEN_IPRXWA BIT(5)
+#define FSPI_INTEN_AHBCMDERR BIT(4)
+#define FSPI_INTEN_IPCMDERR BIT(3)
+#define FSPI_INTEN_AHBCMDGE BIT(2)
+#define FSPI_INTEN_IPCMDGE BIT(1)
+#define FSPI_INTEN_IPCMDDONE BIT(0)
+
+#define FSPI_INTR 0x14
+#define FSPI_INTR_SCLKSBWR BIT(9)
+#define FSPI_INTR_SCLKSBRD BIT(8)
+#define FSPI_INTR_DATALRNFL BIT(7)
+#define FSPI_INTR_IPTXWE BIT(6)
+#define FSPI_INTR_IPRXWA BIT(5)
+#define FSPI_INTR_AHBCMDERR BIT(4)
+#define FSPI_INTR_IPCMDERR BIT(3)
+#define FSPI_INTR_AHBCMDGE BIT(2)
+#define FSPI_INTR_IPCMDGE BIT(1)
+#define FSPI_INTR_IPCMDDONE BIT(0)
+
+#define FSPI_LUTKEY 0x18
+#define FSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define FSPI_LCKCR 0x1C
+
+#define FSPI_LCKER_LOCK 0x1
+#define FSPI_LCKER_UNLOCK 0x2
+
+#define FSPI_BUFXCR_INVALID_MSTRID 0xE
+#define FSPI_AHBRX_BUF0CR0 0x20
+#define FSPI_AHBRX_BUF1CR0 0x24
+#define FSPI_AHBRX_BUF2CR0 0x28
+#define FSPI_AHBRX_BUF3CR0 0x2C
+#define FSPI_AHBRX_BUF4CR0 0x30
+#define FSPI_AHBRX_BUF5CR0 0x34
+#define FSPI_AHBRX_BUF6CR0 0x38
+#define FSPI_AHBRX_BUF7CR0 0x3C
+#define FSPI_AHBRXBUF0CR7_PREF BIT(31)
+
+#define FSPI_AHBRX_BUF0CR1 0x40
+#define FSPI_AHBRX_BUF1CR1 0x44
+#define FSPI_AHBRX_BUF2CR1 0x48
+#define FSPI_AHBRX_BUF3CR1 0x4C
+#define FSPI_AHBRX_BUF4CR1 0x50
+#define FSPI_AHBRX_BUF5CR1 0x54
+#define FSPI_AHBRX_BUF6CR1 0x58
+#define FSPI_AHBRX_BUF7CR1 0x5C
+
+#define FSPI_FLSHA1CR0 0x60
+#define FSPI_FLSHA2CR0 0x64
+#define FSPI_FLSHB1CR0 0x68
+#define FSPI_FLSHB2CR0 0x6C
+#define FSPI_FLSHXCR0_SZ_KB 10
+#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB)
+
+#define FSPI_FLSHA1CR1 0x70
+#define FSPI_FLSHA2CR1 0x74
+#define FSPI_FLSHB1CR1 0x78
+#define FSPI_FLSHB2CR1 0x7C
+#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16)
+#define FSPI_FLSHXCR1_CAS(x) ((x) << 11)
+#define FSPI_FLSHXCR1_WA BIT(10)
+#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5)
+#define FSPI_FLSHXCR1_TCSS(x) (x)
+
+#define FSPI_FLSHA1CR2 0x80
+#define FSPI_FLSHA2CR2 0x84
+#define FSPI_FLSHB1CR2 0x88
+#define FSPI_FLSHB2CR2 0x8C
+#define FSPI_FLSHXCR2_CLRINSP BIT(24)
+#define FSPI_FLSHXCR2_AWRWAIT BIT(16)
+#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
+#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
+#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
+#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
+
+#define FSPI_IPCR0 0xA0
+
+#define FSPI_IPCR1 0xA4
+#define FSPI_IPCR1_IPAREN BIT(31)
+#define FSPI_IPCR1_SEQNUM_SHIFT 24
+#define FSPI_IPCR1_SEQID_SHIFT 16
+#define FSPI_IPCR1_IDATSZ(x) (x)
+
+#define FSPI_IPCMD 0xB0
+#define FSPI_IPCMD_TRG BIT(0)
+
+#define FSPI_DLPR 0xB4
+
+#define FSPI_IPRXFCR 0xB8
+#define FSPI_IPRXFCR_CLR BIT(0)
+#define FSPI_IPRXFCR_DMA_EN BIT(1)
+#define FSPI_IPRXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_IPTXFCR 0xBC
+#define FSPI_IPTXFCR_CLR BIT(0)
+#define FSPI_IPTXFCR_DMA_EN BIT(1)
+#define FSPI_IPTXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_DLLACR 0xC0
+#define FSPI_DLLACR_OVRDEN BIT(8)
+
+#define FSPI_DLLBCR 0xC4
+#define FSPI_DLLBCR_OVRDEN BIT(8)
+
+#define FSPI_STS0 0xE0
+#define FSPI_STS0_DLPHB(x) ((x) << 8)
+#define FSPI_STS0_DLPHA(x) ((x) << 4)
+#define FSPI_STS0_CMD_SRC(x) ((x) << 2)
+#define FSPI_STS0_ARB_IDLE BIT(1)
+#define FSPI_STS0_SEQ_IDLE BIT(0)
+
+#define FSPI_STS1 0xE4
+#define FSPI_STS1_IP_ERRCD(x) ((x) << 24)
+#define FSPI_STS1_IP_ERRID(x) ((x) << 16)
+#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8)
+#define FSPI_STS1_AHB_ERRID(x) (x)
+
+#define FSPI_AHBSPNST 0xEC
+#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16)
+#define FSPI_AHBSPNST_BUFID(x) ((x) << 1)
+#define FSPI_AHBSPNST_ACTIVE BIT(0)
+
+#define FSPI_IPRXFSTS 0xF0
+#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16)
+#define FSPI_IPRXFSTS_FILL(x) (x)
+
+#define FSPI_IPTXFSTS 0xF4
+#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16)
+#define FSPI_IPTXFSTS_FILL(x) (x)
+
+#define FSPI_RFDR 0x100
+#define FSPI_TFDR 0x180
+
+#define FSPI_LUT_BASE 0x200
+#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define FSPI_LUT_REG(idx) \
+ (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
+
+/* register map end */
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0x00
+#define LUT_CMD 0x01
+#define LUT_ADDR 0x02
+#define LUT_CADDR_SDR 0x03
+#define LUT_MODE 0x04
+#define LUT_MODE2 0x05
+#define LUT_MODE4 0x06
+#define LUT_MODE8 0x07
+#define LUT_NXP_WRITE 0x08
+#define LUT_NXP_READ 0x09
+#define LUT_LEARN_SDR 0x0A
+#define LUT_DATSZ_SDR 0x0B
+#define LUT_DUMMY 0x0C
+#define LUT_DUMMY_RWDS_SDR 0x0D
+#define LUT_JMP_ON_CS 0x1F
+#define LUT_CMD_DDR 0x21
+#define LUT_ADDR_DDR 0x22
+#define LUT_CADDR_DDR 0x23
+#define LUT_MODE_DDR 0x24
+#define LUT_MODE2_DDR 0x25
+#define LUT_MODE4_DDR 0x26
+#define LUT_MODE8_DDR 0x27
+#define LUT_WRITE_DDR 0x28
+#define LUT_READ_DDR 0x29
+#define LUT_LEARN_DDR 0x2A
+#define LUT_DATSZ_DDR 0x2B
+#define LUT_DUMMY_DDR 0x2C
+#define LUT_DUMMY_RWDS_DDR 0x2D
+
+/*
+ * Calculate number of required PAD bits for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:7].
+ * For example, the octal read needs eight IO lines,
+ * so you should use LUT_PAD(8). This macro
+ * returns 3 i.e. use eight (2^3) IP lines for read.
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define PAD_SHIFT 8
+#define INSTR_SHIFT 10
+#define OPRND_SHIFT 16
+
+/* Macros for constructing the LUT register. */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
+ (opr)) << (((idx) % 2) * OPRND_SHIFT))
+
+#define POLL_TOUT 5000
+#define NXP_FSPI_MAX_CHIPSELECT 4
+
+struct nxp_fspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static const struct nxp_fspi_devtype_data lx2160a_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+struct nxp_fspi {
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ u32 memmap_phy_size;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ const struct nxp_fspi_devtype_data *devtype_data;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
+ int selected;
+};
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The FSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the FSPI controller can use
+ * big-endian or little-endian.
+ */
+static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ return ioread32(addr);
+ else
+ return ioread32be(addr);
+}
+
+static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id)
+{
+ struct nxp_fspi *f = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR);
+
+ if (reg & FSPI_INTR_IPCMDDONE)
+ complete(&f->c);
+
+ return IRQ_HANDLED;
+}
+
+static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool nxp_fspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of address bytes should be equal to or less than 4 bytes.
+ */
+ if (op->addr.nbytes > 4)
+ return false;
+
+ /*
+ * If requested address value is greater than controller assigned
+ * memory mapped space, return error as it didn't fit in the range
+ * of assigned address space.
+ */
+ if (op->addr.val >= f->memmap_phy_size)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.buswidth &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > f->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > f->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > f->devtype_data->txfifo)
+ return false;
+
+ return true;
+}
+
+/* Instead of busy looping invoke readl_poll_timeout functionality. */
+static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base,
+ u32 mask, u32 delay_us,
+ u32 timeout_us, bool c)
+{
+ u32 reg;
+
+ if (!f->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ if (c)
+ return readl_poll_timeout(base, reg, (reg & mask),
+ delay_us, timeout_us);
+ else
+ return readl_poll_timeout(base, reg, !(reg & mask),
+ delay_us, timeout_us);
+}
+
+/*
+ * If the slave device content being changed by Write/Erase, need to
+ * invalidate the AHB buffer. This can be achieved by doing the reset
+ * of controller after setting MCR0[SWRESET] bit.
+ */
+static inline void nxp_fspi_invalid(struct nxp_fspi *f)
+{
+ u32 reg;
+ int ret;
+
+ reg = fspi_readl(f, f->iobase + FSPI_MCR0);
+ fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0);
+
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+}
+
+static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ /* cmd */
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ /* addr bytes */
+ if (op->addr.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
+ LUT_PAD(op->addr.buswidth),
+ op->addr.nbytes * 8);
+ lutidx++;
+ }
+
+ /* dummy bytes, if needed */
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ /*
+ * Due to FlexSPI controller limitation number of PAD for dummy
+ * buswidth needs to be programmed as equal to data buswidth.
+ */
+ LUT_PAD(op->data.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ /* read/write data bytes */
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_NXP_READ : LUT_NXP_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ /* stop condition. */
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
+
+ dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n",
+ op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
+
+ /* lock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR);
+}
+
+static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
+{
+ int ret;
+
+ ret = clk_prepare_enable(f->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(f->clk);
+ if (ret) {
+ clk_disable_unprepare(f->clk_en);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
+{
+ clk_disable_unprepare(f->clk);
+ clk_disable_unprepare(f->clk_en);
+}
+
+/*
+ * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0
+ * register and start base address of the slave device.
+ *
+ * (Higher address)
+ * -------- <-- FLSHB2CR0
+ * | B2 |
+ * | |
+ * B2 start address --> -------- <-- FLSHB1CR0
+ * | B1 |
+ * | |
+ * B1 start address --> -------- <-- FLSHA2CR0
+ * | A2 |
+ * | |
+ * A2 start address --> -------- <-- FLSHA1CR0
+ * | A1 |
+ * | |
+ * A1 start address --> -------- (Lower address)
+ *
+ *
+ * Start base address defines the starting address range for given CS and
+ * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS.
+ *
+ * But, different targets are having different combinations of number of CS,
+ * some targets only have single CS or two CS covering controller's full
+ * memory mapped space area.
+ * Thus, implementation is being done as independent of the size and number
+ * of the connected slave device.
+ * Assign controller memory mapped space size as the size to the connected
+ * slave device.
+ * Mark FLSHxxCR0 as zero initially and then assign value only to the selected
+ * chip-select Flash configuration register.
+ *
+ * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the
+ * memory mapped size of the controller.
+ * Value for rest of the CS FLSHxxCR0 register would be zero.
+ *
+ */
+static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
+{
+ unsigned long rate = spi->max_speed_hz;
+ int ret;
+ uint64_t size_kb;
+
+ /*
+ * Return, if previously selected slave device is same as current
+ * requested slave device.
+ */
+ if (f->selected == spi->chip_select)
+ return;
+
+ /* Reset FLSHxxCR0 registers */
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0);
+
+ /* Assign controller memory mapped space as size, KBytes, of flash. */
+ size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size);
+
+ fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 +
+ 4 * spi->chip_select);
+
+ dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi->chip_select);
+
+ nxp_fspi_clk_disable_unprep(f);
+
+ ret = clk_set_rate(f->clk, rate);
+ if (ret)
+ return;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return;
+
+ f->selected = spi->chip_select;
+}
+
+static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ u32 len = op->data.nbytes;
+
+ /* Read out the data directly from the AHB buffer. */
+ memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len);
+}
+
+static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ u8 *buf = (u8 *) op->data.buf.out;
+
+ /* clear the TX FIFO. */
+ fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR);
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * write request controller can write max 8 bytes of data.
+ */
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) {
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR);
+ fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4);
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+
+ if (i < op->data.nbytes) {
+ u32 data = 0;
+ int j;
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
+ memcpy(&data, buf + i + j, 4);
+ fspi_writel(f, data, base + FSPI_TFDR + j);
+ }
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+}
+
+static void nxp_fspi_read_rxfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ int len = op->data.nbytes;
+ u8 *buf = (u8 *) op->data.buf.in;
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * read request controller can read max 8 bytes of data.
+ */
+ for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) {
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR);
+ *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+ }
+
+ if (i < len) {
+ u32 tmp;
+ int size, j;
+
+ buf = op->data.buf.in + i;
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ len = op->data.nbytes - i;
+ for (j = 0; j < op->data.nbytes - i; j += 4) {
+ tmp = fspi_readl(f, base + FSPI_RFDR + j);
+ size = min(len, 4);
+ memcpy(buf + j, &tmp, size);
+ len -= size;
+ }
+ }
+
+ /* invalid the RXFIFO */
+ fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+}
+
+static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int seqnum = 0;
+ int err = 0;
+ u32 reg;
+
+ reg = fspi_readl(f, base + FSPI_IPRXFCR);
+ /* invalid RXFIFO first */
+ reg &= ~FSPI_IPRXFCR_DMA_EN;
+ reg = reg | FSPI_IPRXFCR_CLR;
+ fspi_writel(f, reg, base + FSPI_IPRXFCR);
+
+ init_completion(&f->c);
+
+ fspi_writel(f, op->addr.val, base + FSPI_IPCR0);
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ fspi_writel(f, op->data.nbytes |
+ (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
+ (seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
+ base + FSPI_IPCR1);
+
+ /* Trigger the LUT now. */
+ fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ /* Invoke IP data read, if request is of data read. */
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ nxp_fspi_read_rxfifo(f, op);
+
+ return err;
+}
+
+static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ int err = 0;
+
+ mutex_lock(&f->lock);
+
+ /* Wait for controller being ready. */
+ err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
+ FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
+ WARN_ON(err);
+
+ nxp_fspi_select_mem(f, mem->spi);
+
+ nxp_fspi_prepare_lut(f, op);
+ /*
+ * If we have large chunks of data, we read them through the AHB bus
+ * by accessing the mapped memory. In all other cases we use
+ * IP commands to access the flash.
+ */
+ if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN) {
+ nxp_fspi_read_ahb(f, op);
+ } else {
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ nxp_fspi_fill_txfifo(f, op);
+
+ err = nxp_fspi_do_op(f, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ nxp_fspi_invalid(f);
+
+ mutex_unlock(&f->lock);
+
+ return err;
+}
+
+static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > f->devtype_data->txfifo)
+ op->data.nbytes = f->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > f->devtype_data->ahb_buf_size)
+ op->data.nbytes = f->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (f->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ return 0;
+}
+
+static int nxp_fspi_default_setup(struct nxp_fspi *f)
+{
+ void __iomem *base = f->iobase;
+ int ret, i;
+ u32 reg;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ nxp_fspi_clk_disable_unprep(f);
+
+ /* the default frequency, we will change it later if necessary. */
+ ret = clk_set_rate(f->clk, 20000000);
+ if (ret)
+ return ret;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+
+ /* Disable the module */
+ fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0);
+
+ /* Reset the DLL register to default value */
+ fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR);
+ fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
+
+ /* enable module */
+ fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF),
+ base + FSPI_MCR0);
+
+ /*
+ * Disable same device enable bit and configure all slave devices
+ * independently.
+ */
+ reg = fspi_readl(f, f->iobase + FSPI_MCR2);
+ reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN);
+ fspi_writel(f, reg, base + FSPI_MCR2);
+
+ /* AHB configuration for access buffer 0~7. */
+ for (i = 0; i < 7; i++)
+ fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
+
+ /*
+ * Set ADATSZ with the maximum AHB buffer size to improve the read
+ * performance.
+ */
+ fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 |
+ FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0);
+
+ /* prefetch and no start address alignment limitation */
+ fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
+ base + FSPI_AHBCR);
+
+ /* AHB Read - Set lut sequence ID for all CS. */
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
+
+ f->selected = -1;
+
+ /* enable the interrupt */
+ fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN);
+
+ return 0;
+}
+
+static const char *nxp_fspi_get_name(struct spi_mem *mem)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = &mem->spi->dev;
+ const char *name;
+
+ // Set custom name derived from the platform_device of the controller.
+ if (of_get_available_child_count(f->dev->of_node) == 1)
+ return dev_name(f->dev);
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", dev_name(f->dev),
+ mem->spi->chip_select);
+
+ if (!name) {
+ dev_err(dev, "failed to get memory for custom flash name\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return name;
+}
+
+static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
+ .adjust_op_size = nxp_fspi_adjust_op_size,
+ .supports_op = nxp_fspi_supports_op,
+ .exec_op = nxp_fspi_exec_op,
+ .get_name = nxp_fspi_get_name,
+};
+
+static int nxp_fspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct nxp_fspi *f;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
+ SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
+
+ f = spi_controller_get_devdata(ctlr);
+ f->dev = dev;
+ f->devtype_data = of_device_get_match_data(dev);
+ if (!f->devtype_data) {
+ ret = -ENODEV;
+ goto err_put_ctrl;
+ }
+
+ platform_set_drvdata(pdev, f);
+
+ /* find the resources - configuration register address space */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base");
+ f->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(f->iobase)) {
+ ret = PTR_ERR(f->iobase);
+ goto err_put_ctrl;
+ }
+
+ /* find the resources - controller memory mapped space */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
+ f->ahb_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(f->ahb_addr)) {
+ ret = PTR_ERR(f->ahb_addr);
+ goto err_put_ctrl;
+ }
+
+ /* assign memory mapped starting address and mapped size. */
+ f->memmap_phy = res->start;
+ f->memmap_phy_size = resource_size(res);
+
+ /* find the clocks */
+ f->clk_en = devm_clk_get(dev, "fspi_en");
+ if (IS_ERR(f->clk_en)) {
+ ret = PTR_ERR(f->clk_en);
+ goto err_put_ctrl;
+ }
+
+ f->clk = devm_clk_get(dev, "fspi");
+ if (IS_ERR(f->clk)) {
+ ret = PTR_ERR(f->clk);
+ goto err_put_ctrl;
+ }
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto err_put_ctrl;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ nxp_fspi_irq_handler, 0, pdev->name, f);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ mutex_init(&f->lock);
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
+ ctlr->mem_ops = &nxp_fspi_mem_ops;
+
+ nxp_fspi_default_setup(f);
+
+ ctlr->dev.of_node = np;
+
+ ret = spi_register_controller(ctlr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ return 0;
+
+err_destroy_mutex:
+ mutex_destroy(&f->lock);
+
+err_disable_clk:
+ nxp_fspi_clk_disable_unprep(f);
+
+err_put_ctrl:
+ spi_controller_put(ctlr);
+
+ dev_err(dev, "NXP FSPI probe failed\n");
+ return ret;
+}
+
+static int nxp_fspi_remove(struct platform_device *pdev)
+{
+ struct nxp_fspi *f = platform_get_drvdata(pdev);
+
+ /* disable the hardware */
+ fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
+
+ nxp_fspi_clk_disable_unprep(f);
+
+ mutex_destroy(&f->lock);
+
+ return 0;
+}
+
+static int nxp_fspi_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int nxp_fspi_resume(struct device *dev)
+{
+ struct nxp_fspi *f = dev_get_drvdata(dev);
+
+ nxp_fspi_default_setup(f);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_fspi_dt_ids[] = {
+ { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
+
+static const struct dev_pm_ops nxp_fspi_pm_ops = {
+ .suspend = nxp_fspi_suspend,
+ .resume = nxp_fspi_resume,
+};
+
+static struct platform_driver nxp_fspi_driver = {
+ .driver = {
+ .name = "nxp-fspi",
+ .of_match_table = nxp_fspi_dt_ids,
+ .pm = &nxp_fspi_pm_ops,
+ },
+ .probe = nxp_fspi_probe,
+ .remove = nxp_fspi_remove,
+};
+module_platform_driver(nxp_fspi_driver);
+
+MODULE_DESCRIPTION("NXP FSPI Controller Driver");
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>");
+MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
+MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 2fd8881fcd65..8be304379628 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -623,8 +623,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width;
cfg.dst_addr_width = width;
- cfg.src_maxburst = es;
- cfg.dst_maxburst = es;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index d7e4e18ec3df..1ae9af5f17ec 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -466,9 +466,9 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
int i;
/* allocate coherent DMAable memory for hardware buffer descriptors. */
- sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
- sizeof(*bd) * PESQI_BD_COUNT,
- &sqi->bd_dma, GFP_KERNEL);
+ sqi->bd = dma_alloc_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ &sqi->bd_dma, GFP_KERNEL);
if (!sqi->bd) {
dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
return -ENOMEM;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 0c793e31d60f..26684178786f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -253,6 +253,7 @@
#define STATE_RUNNING ((void *) 1)
#define STATE_DONE ((void *) 2)
#define STATE_ERROR ((void *) -1)
+#define STATE_TIMEOUT ((void *) -2)
/*
* SSP State - Whether Enabled or Disabled
@@ -1484,6 +1485,30 @@ err_config_dma:
writew(irqflags, SSP_IMSC(pl022->virtbase));
}
+static void print_current_status(struct pl022 *pl022)
+{
+ u32 read_cr0;
+ u16 read_cr1, read_dmacr, read_sr;
+
+ if (pl022->vendor->extended_cr)
+ read_cr0 = readl(SSP_CR0(pl022->virtbase));
+ else
+ read_cr0 = readw(SSP_CR0(pl022->virtbase));
+ read_cr1 = readw(SSP_CR1(pl022->virtbase));
+ read_dmacr = readw(SSP_DMACR(pl022->virtbase));
+ read_sr = readw(SSP_SR(pl022->virtbase));
+
+ dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
+ dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
+ dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
+ dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
+ dev_warn(&pl022->adev->dev,
+ "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
+ pl022->exp_fifo_level,
+ pl022->vendor->fifodepth);
+
+}
+
static void do_polling_transfer(struct pl022 *pl022)
{
struct spi_message *message = NULL;
@@ -1535,7 +1560,8 @@ static void do_polling_transfer(struct pl022 *pl022)
if (time_after(time, timeout)) {
dev_warn(&pl022->adev->dev,
"%s: timeout!\n", __func__);
- message->state = STATE_ERROR;
+ message->state = STATE_TIMEOUT;
+ print_current_status(pl022);
goto out;
}
cpu_relax();
@@ -1553,6 +1579,8 @@ out:
/* Handle end of message */
if (message->state == STATE_DONE)
message->status = 0;
+ else if (message->state == STATE_TIMEOUT)
+ message->status = -EAGAIN;
else
message->status = -EIO;
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 2fa7f4b43492..15592598273e 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -23,7 +23,7 @@
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
bool error)
{
- struct spi_message *msg = drv_data->master->cur_msg;
+ struct spi_message *msg = drv_data->controller->cur_msg;
/*
* It is possible that one CPU is handling ROR interrupt and other
@@ -59,7 +59,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
msg->status = -EIO;
}
- spi_finalize_current_transfer(drv_data->master);
+ spi_finalize_current_transfer(drv_data->controller);
}
}
@@ -74,7 +74,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
struct spi_transfer *xfer)
{
struct chip_data *chip =
- spi_get_ctldata(drv_data->master->cur_msg->spi);
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
enum dma_slave_buswidth width;
struct dma_slave_config cfg;
struct dma_chan *chan;
@@ -102,14 +102,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
cfg.dst_maxburst = chip->dma_burst_size;
sgt = &xfer->tx_sg;
- chan = drv_data->master->dma_tx;
+ chan = drv_data->controller->dma_tx;
} else {
cfg.src_addr = drv_data->ssdr_physical;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
sgt = &xfer->rx_sg;
- chan = drv_data->master->dma_rx;
+ chan = drv_data->controller->dma_rx;
}
ret = dmaengine_slave_config(chan, &cfg);
@@ -130,8 +130,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
- dmaengine_terminate_async(drv_data->master->dma_rx);
- dmaengine_terminate_async(drv_data->master->dma_tx);
+ dmaengine_terminate_async(drv_data->controller->dma_rx);
+ dmaengine_terminate_async(drv_data->controller->dma_tx);
pxa2xx_spi_dma_transfer_complete(drv_data, true);
return IRQ_HANDLED;
@@ -171,15 +171,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
return 0;
err_rx:
- dmaengine_terminate_async(drv_data->master->dma_tx);
+ dmaengine_terminate_async(drv_data->controller->dma_tx);
err_tx:
return err;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
{
- dma_async_issue_pending(drv_data->master->dma_rx);
- dma_async_issue_pending(drv_data->master->dma_tx);
+ dma_async_issue_pending(drv_data->controller->dma_rx);
+ dma_async_issue_pending(drv_data->controller->dma_tx);
atomic_set(&drv_data->dma_running, 1);
}
@@ -187,30 +187,30 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data)
void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
{
atomic_set(&drv_data->dma_running, 0);
- dmaengine_terminate_sync(drv_data->master->dma_rx);
- dmaengine_terminate_sync(drv_data->master->dma_tx);
+ dmaengine_terminate_sync(drv_data->controller->dma_rx);
+ dmaengine_terminate_sync(drv_data->controller->dma_tx);
}
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
- struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
struct device *dev = &drv_data->pdev->dev;
- struct spi_controller *master = drv_data->master;
+ struct spi_controller *controller = drv_data->controller;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- master->dma_tx = dma_request_slave_channel_compat(mask,
+ controller->dma_tx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->tx_param, dev, "tx");
- if (!master->dma_tx)
+ if (!controller->dma_tx)
return -ENODEV;
- master->dma_rx = dma_request_slave_channel_compat(mask,
+ controller->dma_rx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->rx_param, dev, "rx");
- if (!master->dma_rx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (!controller->dma_rx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
return -ENODEV;
}
@@ -219,17 +219,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
- struct spi_controller *master = drv_data->master;
+ struct spi_controller *controller = drv_data->controller;
- if (master->dma_rx) {
- dmaengine_terminate_sync(master->dma_rx);
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (controller->dma_rx) {
+ dmaengine_terminate_sync(controller->dma_rx);
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
}
- if (master->dma_tx) {
- dmaengine_terminate_sync(master->dma_tx);
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (controller->dma_tx) {
+ dmaengine_terminate_sync(controller->dma_tx);
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
}
}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 869f188b02eb..1727fdfbac28 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -197,7 +197,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
struct platform_device_info pi;
int ret;
struct platform_device *pdev;
- struct pxa2xx_spi_master spi_pdata;
+ struct pxa2xx_spi_controller spi_pdata;
struct ssp_device *ssp;
struct pxa_spi_info *c;
char buf[40];
@@ -265,7 +265,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
struct platform_device *pdev = pci_get_drvdata(dev);
- struct pxa2xx_spi_master *spi_pdata;
+ struct pxa2xx_spi_controller *spi_pdata;
spi_pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index d84b893a64d7..b6ddba833d02 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -328,7 +328,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
/* Enable multiblock DMA transfers */
- if (drv_data->master_info->enable_dma) {
+ if (drv_data->controller_info->enable_dma) {
__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
if (config->reg_general >= 0) {
@@ -368,7 +368,7 @@ static void lpss_ssp_select_cs(struct spi_device *spi,
__lpss_ssp_write_priv(drv_data,
config->reg_cs_ctrl, value);
ndelay(1000000000 /
- (drv_data->master->max_speed_hz / 2));
+ (drv_data->controller->max_speed_hz / 2));
}
}
@@ -567,7 +567,7 @@ static int u32_reader(struct driver_data *drv_data)
static void reset_sccr1(struct driver_data *drv_data)
{
struct chip_data *chip =
- spi_get_ctldata(drv_data->master->cur_msg->spi);
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
u32 sccr1_reg;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
@@ -599,8 +599,8 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
dev_err(&drv_data->pdev->dev, "%s\n", msg);
- drv_data->master->cur_msg->status = -EIO;
- spi_finalize_current_transfer(drv_data->master);
+ drv_data->controller->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(drv_data->controller);
}
static void int_transfer_complete(struct driver_data *drv_data)
@@ -611,7 +611,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
- spi_finalize_current_transfer(drv_data->master);
+ spi_finalize_current_transfer(drv_data->controller);
}
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
@@ -747,7 +747,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
- if (!drv_data->master->cur_msg) {
+ if (!drv_data->controller->cur_msg) {
handle_bad_msg(drv_data);
/* Never fail */
return IRQ_HANDLED;
@@ -879,7 +879,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
- unsigned long ssp_clk = drv_data->master->max_speed_hz;
+ unsigned long ssp_clk = drv_data->controller->max_speed_hz;
const struct ssp_device *ssp = drv_data->ssp;
rate = min_t(int, ssp_clk, rate);
@@ -894,7 +894,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
int rate)
{
struct chip_data *chip =
- spi_get_ctldata(drv_data->master->cur_msg->spi);
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
unsigned int clk_div;
switch (drv_data->ssp_type) {
@@ -908,7 +908,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
return clk_div << 8;
}
-static bool pxa2xx_spi_can_dma(struct spi_controller *master,
+static bool pxa2xx_spi_can_dma(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -919,12 +919,12 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *master,
xfer->len >= chip->dma_burst_size;
}
-static int pxa2xx_spi_transfer_one(struct spi_controller *master,
+static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
- struct spi_message *message = master->cur_msg;
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
+ struct spi_message *message = controller->cur_msg;
struct chip_data *chip = spi_get_ctldata(message->spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
@@ -1006,9 +1006,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
"DMA burst size reduced to match bits_per_word\n");
}
- dma_mapped = master->can_dma &&
- master->can_dma(master, message->spi, transfer) &&
- master->cur_msg_mapped;
+ dma_mapped = controller->can_dma &&
+ controller->can_dma(controller, message->spi, transfer) &&
+ controller->cur_msg_mapped;
if (dma_mapped) {
/* Ensure we have the correct interrupt handler */
@@ -1036,12 +1036,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- master->max_speed_hz
+ controller->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
else
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- master->max_speed_hz / 2
+ controller->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
@@ -1092,7 +1092,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
}
}
- if (spi_controller_is_slave(master)) {
+ if (spi_controller_is_slave(controller)) {
while (drv_data->write(drv_data))
;
if (drv_data->gpiod_ready) {
@@ -1111,9 +1111,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
return 1;
}
-static int pxa2xx_spi_slave_abort(struct spi_master *master)
+static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Stop and reset SSP */
write_SSSR_CS(drv_data, drv_data->clear_sr);
@@ -1126,16 +1126,16 @@ static int pxa2xx_spi_slave_abort(struct spi_master *master)
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
- drv_data->master->cur_msg->status = -EINTR;
- spi_finalize_current_transfer(drv_data->master);
+ drv_data->controller->cur_msg->status = -EINTR;
+ spi_finalize_current_transfer(drv_data->controller);
return 0;
}
-static void pxa2xx_spi_handle_err(struct spi_controller *master,
+static void pxa2xx_spi_handle_err(struct spi_controller *controller,
struct spi_message *msg)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP */
pxa2xx_spi_write(drv_data, SSCR0,
@@ -1159,9 +1159,9 @@ static void pxa2xx_spi_handle_err(struct spi_controller *master,
pxa2xx_spi_dma_stop(drv_data);
}
-static int pxa2xx_spi_unprepare_transfer(struct spi_controller *master)
+static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
pxa2xx_spi_write(drv_data, SSCR0,
@@ -1260,7 +1260,7 @@ static int setup(struct spi_device *spi)
break;
default:
tx_hi_thres = 0;
- if (spi_controller_is_slave(drv_data->master)) {
+ if (spi_controller_is_slave(drv_data->controller)) {
tx_thres = 1;
rx_thres = 2;
} else {
@@ -1287,7 +1287,7 @@ static int setup(struct spi_device *spi)
chip->frm = spi->chip_select;
}
- chip->enable_dma = drv_data->master_info->enable_dma;
+ chip->enable_dma = drv_data->controller_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
@@ -1310,7 +1310,7 @@ static int setup(struct spi_device *spi)
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
}
- if (spi_controller_is_slave(drv_data->master)) {
+ if (spi_controller_is_slave(drv_data->controller)) {
chip->cr1 |= SSCR1_SCFR;
chip->cr1 |= SSCR1_SCLKDIR;
chip->cr1 |= SSCR1_SFRMDIR;
@@ -1497,10 +1497,10 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
#endif /* CONFIG_PCI */
-static struct pxa2xx_spi_master *
+static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
- struct pxa2xx_spi_master *pdata;
+ struct pxa2xx_spi_controller *pdata;
struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
@@ -1568,10 +1568,10 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return pdata;
}
-static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
+static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
unsigned int cs)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
if (has_acpi_companion(&drv_data->pdev->dev)) {
switch (drv_data->ssp_type) {
@@ -1595,8 +1595,8 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pxa2xx_spi_master *platform_info;
- struct spi_controller *master;
+ struct pxa2xx_spi_controller *platform_info;
+ struct spi_controller *controller;
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
@@ -1622,37 +1622,37 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
if (platform_info->is_slave)
- master = spi_alloc_slave(dev, sizeof(struct driver_data));
+ controller = spi_alloc_slave(dev, sizeof(struct driver_data));
else
- master = spi_alloc_master(dev, sizeof(struct driver_data));
+ controller = spi_alloc_master(dev, sizeof(struct driver_data));
- if (!master) {
- dev_err(&pdev->dev, "cannot alloc spi_master\n");
+ if (!controller) {
+ dev_err(&pdev->dev, "cannot alloc spi_controller\n");
pxa_ssp_free(ssp);
return -ENOMEM;
}
- drv_data = spi_controller_get_devdata(master);
- drv_data->master = master;
- drv_data->master_info = platform_info;
+ drv_data = spi_controller_get_devdata(controller);
+ drv_data->controller = controller;
+ drv_data->controller_info = platform_info;
drv_data->pdev = pdev;
drv_data->ssp = ssp;
- master->dev.of_node = pdev->dev.of_node;
+ controller->dev.of_node = pdev->dev.of_node;
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
-
- master->bus_num = ssp->port_id;
- master->dma_alignment = DMA_ALIGNMENT;
- master->cleanup = cleanup;
- master->setup = setup;
- master->set_cs = pxa2xx_spi_set_cs;
- master->transfer_one = pxa2xx_spi_transfer_one;
- master->slave_abort = pxa2xx_spi_slave_abort;
- master->handle_err = pxa2xx_spi_handle_err;
- master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
- master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
- master->auto_runtime_pm = true;
- master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+
+ controller->bus_num = ssp->port_id;
+ controller->dma_alignment = DMA_ALIGNMENT;
+ controller->cleanup = cleanup;
+ controller->setup = setup;
+ controller->set_cs = pxa2xx_spi_set_cs;
+ controller->transfer_one = pxa2xx_spi_transfer_one;
+ controller->slave_abort = pxa2xx_spi_slave_abort;
+ controller->handle_err = pxa2xx_spi_handle_err;
+ controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+ controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
+ controller->auto_runtime_pm = true;
+ controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
drv_data->ssp_type = ssp->type;
@@ -1661,10 +1661,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (pxa25x_ssp_comp(drv_data)) {
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
break;
default:
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
break;
}
@@ -1673,7 +1673,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->clear_sr = SSSR_ROR;
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
} else {
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
@@ -1685,7 +1685,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data);
if (status < 0) {
dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
- goto out_error_master_alloc;
+ goto out_error_controller_alloc;
}
/* Setup DMA if requested */
@@ -1695,7 +1695,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
dev_dbg(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
} else {
- master->can_dma = pxa2xx_spi_can_dma;
+ controller->can_dma = pxa2xx_spi_can_dma;
+ controller->max_dma_len = MAX_DMA_LEN;
}
}
@@ -1704,7 +1705,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (status)
goto out_error_dma_irq_alloc;
- master->max_speed_hz = clk_get_rate(ssp->clk);
+ controller->max_speed_hz = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
pxa2xx_spi_write(drv_data, SSCR0, 0);
@@ -1727,7 +1728,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
break;
default:
- if (spi_controller_is_slave(master)) {
+ if (spi_controller_is_slave(controller)) {
tmp = SSCR1_SCFR |
SSCR1_SCLKDIR |
SSCR1_SFRMDIR |
@@ -1740,7 +1741,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_Motorola | SSCR0_DataSize(8);
- if (!spi_controller_is_slave(master))
+ if (!spi_controller_is_slave(controller))
tmp |= SSCR0_SCR(2);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
@@ -1765,24 +1766,24 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
platform_info->num_chipselect = config->cs_num;
}
}
- master->num_chipselect = platform_info->num_chipselect;
+ controller->num_chipselect = platform_info->num_chipselect;
count = gpiod_count(&pdev->dev, "cs");
if (count > 0) {
int i;
- master->num_chipselect = max_t(int, count,
- master->num_chipselect);
+ controller->num_chipselect = max_t(int, count,
+ controller->num_chipselect);
drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
- master->num_chipselect, sizeof(struct gpio_desc *),
+ controller->num_chipselect, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!drv_data->cs_gpiods) {
status = -ENOMEM;
goto out_error_clock_enabled;
}
- for (i = 0; i < master->num_chipselect; i++) {
+ for (i = 0; i < controller->num_chipselect; i++) {
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
@@ -1815,9 +1816,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
- status = devm_spi_register_controller(&pdev->dev, master);
+ status = devm_spi_register_controller(&pdev->dev, controller);
if (status != 0) {
- dev_err(&pdev->dev, "problem registering spi master\n");
+ dev_err(&pdev->dev, "problem registering spi controller\n");
goto out_error_clock_enabled;
}
@@ -1832,8 +1833,8 @@ out_error_dma_irq_alloc:
pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
-out_error_master_alloc:
- spi_controller_put(master);
+out_error_controller_alloc:
+ spi_controller_put(controller);
pxa_ssp_free(ssp);
return status;
}
@@ -1854,7 +1855,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(ssp->clk);
/* Release DMA */
- if (drv_data->master_info->enable_dma)
+ if (drv_data->controller_info->enable_dma)
pxa2xx_spi_dma_release(drv_data);
pm_runtime_put_noidle(&pdev->dev);
@@ -1876,7 +1877,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status;
- status = spi_controller_suspend(drv_data->master);
+ status = spi_controller_suspend(drv_data->controller);
if (status != 0)
return status;
pxa2xx_spi_write(drv_data, SSCR0, 0);
@@ -1901,7 +1902,7 @@ static int pxa2xx_spi_resume(struct device *dev)
}
/* Start the queue running */
- return spi_controller_resume(drv_data->master);
+ return spi_controller_resume(drv_data->controller);
}
#endif
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 4e324da66ef7..aba777b4502d 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -31,10 +31,10 @@ struct driver_data {
/* SPI framework hookup */
enum pxa_ssp_type ssp_type;
- struct spi_controller *master;
+ struct spi_controller *controller;
/* PXA hookup */
- struct pxa2xx_spi_master *master_info;
+ struct pxa2xx_spi_controller *controller_info;
/* SSP register addresses */
void __iomem *ioaddr;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a4ef641b5227..556870dcdf79 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -180,7 +180,7 @@
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
- struct spi_master *master;
+ struct spi_controller *ctlr;
wait_queue_head_t wait;
struct clk *clk;
u16 spcmd;
@@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi)
/* optional functions */
struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
- int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
- struct spi_transfer *xfer);
+ int (*transfer_one)(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer);
u16 mode_bits;
u16 flags;
u16 fifo_size;
@@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
int error = rspi_wait_for_tx_empty(rspi);
if (error < 0) {
- dev_err(&rspi->master->dev, "transmit timeout\n");
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return error;
}
rspi_write_data(rspi, data);
@@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi)
error = rspi_wait_for_rx_full(rspi);
if (error < 0) {
- dev_err(&rspi->master->dev, "receive timeout\n");
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
return error;
}
data = rspi_read_data(rspi);
@@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
- desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
- rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
+ rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EAGAIN;
@@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
}
if (tx) {
- desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
- tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
+ tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
@@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* Now start DMA */
if (rx)
- dma_async_issue_pending(rspi->master->dma_rx);
+ dma_async_issue_pending(rspi->ctlr->dma_rx);
if (tx)
- dma_async_issue_pending(rspi->master->dma_tx);
+ dma_async_issue_pending(rspi->ctlr->dma_tx);
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
@@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = 0;
} else {
if (!ret) {
- dev_err(&rspi->master->dev, "DMA timeout\n");
+ dev_err(&rspi->ctlr->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
}
if (tx)
- dmaengine_terminate_all(rspi->master->dma_tx);
+ dmaengine_terminate_all(rspi->ctlr->dma_tx);
if (rx)
- dmaengine_terminate_all(rspi->master->dma_rx);
+ dmaengine_terminate_all(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
no_dma_tx:
if (rx)
- dmaengine_terminate_all(rspi->master->dma_rx);
+ dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&rspi->master->dev),
- dev_name(&rspi->master->dev));
+ dev_driver_string(&rspi->ctlr->dev),
+ dev_name(&rspi->ctlr->dev));
}
return ret;
}
@@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi,
return xfer->len > rspi->ops->fifo_size;
}
-static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
+static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
return __rspi_can_dma(rspi, xfer);
}
@@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
- if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
+ if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
return -EAGAIN;
/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
@@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi,
return 0;
}
-static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
- struct spi_transfer *xfer)
+static int rspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
u8 spcr;
spcr = rspi_read8(rspi, RSPI_SPCR);
@@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
return rspi_common_transfer(rspi, xfer);
}
-static int rspi_rz_transfer_one(struct spi_master *master,
+static int rspi_rz_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
rspi_rz_receive_init(rspi);
@@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
if (n == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "transmit timeout\n");
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "receive timeout\n");
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
- if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
if (ret != -EAGAIN)
return ret;
@@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "transmit timeout\n");
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
- if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
@@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "receive timeout\n");
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
return 0;
}
-static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
- struct spi_transfer *xfer)
+static int qspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer);
@@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
static int rspi_setup(struct spi_device *spi)
{
- struct rspi_data *rspi = spi_master_get_devdata(spi->master);
+ struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
rspi->max_speed_hz = spi->max_speed_hz;
@@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi,
return 0;
}
-static int rspi_prepare_message(struct spi_master *master,
+static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
int ret;
if (msg->spi->mode &
@@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master,
return 0;
}
-static int rspi_unprepare_message(struct spi_master *master,
+static int rspi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
/* Disable SPI function */
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
@@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
return chan;
}
-static int rspi_request_dma(struct device *dev, struct spi_master *master,
+static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
const struct resource *res)
{
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
@@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master,
return 0;
}
- master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
- res->start + RSPI_SPDR);
- if (!master->dma_tx)
+ ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
+ res->start + RSPI_SPDR);
+ if (!ctlr->dma_tx)
return -ENODEV;
- master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
- res->start + RSPI_SPDR);
- if (!master->dma_rx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
+ res->start + RSPI_SPDR);
+ if (!ctlr->dma_rx) {
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
return -ENODEV;
}
- master->can_dma = rspi_can_dma;
+ ctlr->can_dma = rspi_can_dma;
dev_info(dev, "DMA available");
return 0;
}
-static void rspi_release_dma(struct spi_master *master)
+static void rspi_release_dma(struct spi_controller *ctlr)
{
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
}
static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
- rspi_release_dma(rspi->master);
+ rspi_release_dma(rspi->ctlr);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
- .flags = SPI_MASTER_MUST_TX,
+ .flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
};
@@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
- .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
};
@@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
- .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
};
@@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = {
MODULE_DEVICE_TABLE(of, rspi_of_match);
-static int rspi_parse_dt(struct device *dev, struct spi_master *master)
+static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
u32 num_cs;
int error;
@@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master)
return error;
}
- master->num_chipselect = num_cs;
+ ctlr->num_chipselect = num_cs;
return 0;
}
#else
#define rspi_of_match NULL
-static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
+static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
return -EINVAL;
}
@@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq,
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct rspi_data *rspi;
int ret;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
- master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
- if (master == NULL)
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
+ if (ctlr == NULL)
return -ENOMEM;
ops = of_device_get_match_data(&pdev->dev);
if (ops) {
- ret = rspi_parse_dt(&pdev->dev, master);
+ ret = rspi_parse_dt(&pdev->dev, ctlr);
if (ret)
goto error1;
} else {
ops = (struct spi_ops *)pdev->id_entry->driver_data;
rspi_pd = dev_get_platdata(&pdev->dev);
if (rspi_pd && rspi_pd->num_chipselect)
- master->num_chipselect = rspi_pd->num_chipselect;
+ ctlr->num_chipselect = rspi_pd->num_chipselect;
else
- master->num_chipselect = 2; /* default */
+ ctlr->num_chipselect = 2; /* default */
}
/* ops parameter check */
@@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
- rspi = spi_master_get_devdata(master);
+ rspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
- rspi->master = master;
+ rspi->ctlr = ctlr;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rspi->addr = devm_ioremap_resource(&pdev->dev, res);
@@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev)
init_waitqueue_head(&rspi->wait);
- master->bus_num = pdev->id;
- master->setup = rspi_setup;
- master->auto_runtime_pm = true;
- master->transfer_one = ops->transfer_one;
- master->prepare_message = rspi_prepare_message;
- master->unprepare_message = rspi_unprepare_message;
- master->mode_bits = ops->mode_bits;
- master->flags = ops->flags;
- master->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->setup = rspi_setup;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one = ops->transfer_one;
+ ctlr->prepare_message = rspi_prepare_message;
+ ctlr->unprepare_message = rspi_unprepare_message;
+ ctlr->mode_bits = ops->mode_bits;
+ ctlr->flags = ops->flags;
+ ctlr->dev.of_node = pdev->dev.of_node;
ret = platform_get_irq_byname(pdev, "rx");
if (ret < 0) {
@@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev)
goto error2;
}
- ret = rspi_request_dma(&pdev->dev, master, res);
+ ret = rspi_request_dma(&pdev->dev, ctlr, res);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master error.\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error3;
}
@@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev)
return 0;
error3:
- rspi_release_dma(master);
+ rspi_release_dma(ctlr);
error2:
pm_runtime_disable(&pdev->dev);
error1:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
@@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
- return spi_master_suspend(rspi->master);
+ return spi_controller_suspend(rspi->ctlr);
}
static int rspi_resume(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
- return spi_master_resume(rspi->master);
+ return spi_controller_resume(rspi->ctlr);
}
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index dc0926e43665..7f73f91d412a 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -35,7 +35,7 @@
struct hspi_priv {
void __iomem *addr;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct device *dev;
struct clk *clk;
};
@@ -140,10 +140,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
-static int hspi_transfer_one_message(struct spi_master *master,
+static int hspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct hspi_priv *hspi = spi_master_get_devdata(master);
+ struct hspi_priv *hspi = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
u32 tx;
u32 rx;
@@ -205,7 +205,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
ndelay(nsecs);
hspi_hw_cs_disable(hspi);
}
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return ret;
}
@@ -213,7 +213,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct hspi_priv *hspi;
struct clk *clk;
int ret;
@@ -225,11 +225,9 @@ static int hspi_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(*hspi));
- if (!master) {
- dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi));
+ if (!ctlr)
return -ENOMEM;
- }
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -238,33 +236,32 @@ static int hspi_probe(struct platform_device *pdev)
goto error0;
}
- hspi = spi_master_get_devdata(master);
+ hspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, hspi);
/* init hspi */
- hspi->master = master;
+ hspi->ctlr = ctlr;
hspi->dev = &pdev->dev;
hspi->clk = clk;
hspi->addr = devm_ioremap(hspi->dev,
res->start, resource_size(res));
if (!hspi->addr) {
- dev_err(&pdev->dev, "ioremap error.\n");
ret = -ENOMEM;
goto error1;
}
pm_runtime_enable(&pdev->dev);
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->transfer_one_message = hspi_transfer_one_message;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one_message = hspi_transfer_one_message;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master error.\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error2;
}
@@ -275,7 +272,7 @@ static int hspi_probe(struct platform_device *pdev)
error1:
clk_put(clk);
error0:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d14b407cc800..e2eb466db10a 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * SuperH MSIOF SPI Master Interface
+ * SuperH MSIOF SPI Controller Interface
*
* Copyright (c) 2009 Magnus Damm
* Copyright (C) 2014 Renesas Electronics Corporation
@@ -32,14 +32,15 @@
#include <asm/unaligned.h>
struct sh_msiof_chipdata {
+ u32 bits_per_word_mask;
u16 tx_fifo_size;
u16 rx_fifo_size;
- u16 master_flags;
+ u16 ctlr_flags;
u16 min_div_pow;
};
struct sh_msiof_spi_priv {
- struct spi_master *master;
+ struct spi_controller *ctlr;
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
@@ -287,7 +288,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
sh_msiof_write(p, TSCR, scr);
- if (!(p->master->flags & SPI_MASTER_MUST_TX))
+ if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, RSCR, scr);
}
@@ -351,14 +352,14 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->master)) {
+ if (spi_controller_is_slave(p->ctlr)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
} else {
sh_msiof_write(p, TMDR1,
tmp | MDR1_TRMD | TMDR1_PCON |
(ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
}
- if (p->master->flags & SPI_MASTER_MUST_TX) {
+ if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
@@ -382,7 +383,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
{
u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
- if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX))
+ if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
@@ -539,8 +540,9 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_setup(struct spi_device *spi)
{
- struct device_node *np = spi->master->dev.of_node;
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ struct device_node *np = spi->controller->dev.of_node;
+ struct sh_msiof_spi_priv *p =
+ spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
if (!np) {
@@ -556,7 +558,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
}
- if (spi_controller_is_slave(p->master))
+ if (spi_controller_is_slave(p->ctlr))
return 0;
if (p->native_cs_inited &&
@@ -581,10 +583,10 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
}
-static int sh_msiof_prepare_message(struct spi_master *master,
+static int sh_msiof_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
const struct spi_device *spi = msg->spi;
u32 ss, cs_high;
@@ -605,7 +607,7 @@ static int sh_msiof_prepare_message(struct spi_master *master,
static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- bool slave = spi_controller_is_slave(p->master);
+ bool slave = spi_controller_is_slave(p->ctlr);
int ret = 0;
/* setup clock and rx/tx signals */
@@ -625,7 +627,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- bool slave = spi_controller_is_slave(p->master);
+ bool slave = spi_controller_is_slave(p->ctlr);
int ret = 0;
/* shut down frame, rx/tx and clock signals */
@@ -641,9 +643,9 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
return ret;
}
-static int sh_msiof_slave_abort(struct spi_master *master)
+static int sh_msiof_slave_abort(struct spi_controller *ctlr)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
p->slave_aborted = true;
complete(&p->done);
@@ -654,7 +656,7 @@ static int sh_msiof_slave_abort(struct spi_master *master)
static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
struct completion *x)
{
- if (spi_controller_is_slave(p->master)) {
+ if (spi_controller_is_slave(p->ctlr)) {
if (wait_for_completion_interruptible(x) ||
p->slave_aborted) {
dev_dbg(&p->pdev->dev, "interrupted\n");
@@ -754,7 +756,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
ier_bits |= IER_RDREQE | IER_RDMAE;
- desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
+ desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
@@ -769,9 +771,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
if (tx) {
ier_bits |= IER_TDREQE | IER_TDMAE;
- dma_sync_single_for_device(p->master->dma_tx->device->dev,
+ dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
- desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
+ desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
p->tx_dma_addr, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
@@ -803,9 +805,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* Now start DMA */
if (rx)
- dma_async_issue_pending(p->master->dma_rx);
+ dma_async_issue_pending(p->ctlr->dma_rx);
if (tx)
- dma_async_issue_pending(p->master->dma_tx);
+ dma_async_issue_pending(p->ctlr->dma_tx);
ret = sh_msiof_spi_start(p, rx);
if (ret) {
@@ -845,9 +847,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
if (rx)
- dma_sync_single_for_cpu(p->master->dma_rx->device->dev,
- p->rx_dma_addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
+ p->rx_dma_addr, len, DMA_FROM_DEVICE);
return 0;
@@ -856,10 +857,10 @@ stop_reset:
sh_msiof_spi_stop(p, rx);
stop_dma:
if (tx)
- dmaengine_terminate_all(p->master->dma_tx);
+ dmaengine_terminate_all(p->ctlr->dma_tx);
no_dma_tx:
if (rx)
- dmaengine_terminate_all(p->master->dma_rx);
+ dmaengine_terminate_all(p->ctlr->dma_rx);
sh_msiof_write(p, IER, 0);
return ret;
}
@@ -907,11 +908,11 @@ static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
memcpy(dst, src, words * 4);
}
-static int sh_msiof_transfer_one(struct spi_master *master,
+static int sh_msiof_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
void (*copy32)(u32 *, const u32 *, unsigned int);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
@@ -926,10 +927,10 @@ static int sh_msiof_transfer_one(struct spi_master *master,
int ret;
/* setup clocks (clock already enabled in chipselect()) */
- if (!spi_controller_is_slave(p->master))
+ if (!spi_controller_is_slave(p->ctlr))
sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
- while (master->dma_tx && len > 15) {
+ while (ctlr->dma_tx && len > 15) {
/*
* DMA supports 32-bit words only, hence pack 8-bit and 16-bit
* words, with byte resp. word swapping.
@@ -937,17 +938,13 @@ static int sh_msiof_transfer_one(struct spi_master *master,
unsigned int l = 0;
if (tx_buf)
- l = min(len, p->tx_fifo_size * 4);
+ l = min(round_down(len, 4), p->tx_fifo_size * 4);
if (rx_buf)
- l = min(len, p->rx_fifo_size * 4);
+ l = min(round_down(len, 4), p->rx_fifo_size * 4);
if (bits <= 8) {
- if (l & 3)
- break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
- if (l & 3)
- break;
copy32 = copy_wswap32;
} else {
copy32 = copy_plain32;
@@ -1052,23 +1049,28 @@ static int sh_msiof_transfer_one(struct spi_master *master,
}
static const struct sh_msiof_chipdata sh_data = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
- .master_flags = 0,
+ .ctlr_flags = 0,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen2_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
- .master_flags = SPI_MASTER_MUST_TX,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen3_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
- .master_flags = SPI_MASTER_MUST_TX,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
};
@@ -1136,7 +1138,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
if (ret <= 0)
return 0;
- num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
+ num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
for (i = 0; i < num_cs; i++) {
struct gpio_desc *gpiod;
@@ -1206,10 +1208,10 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
{
struct platform_device *pdev = p->pdev;
struct device *dev = &pdev->dev;
- const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
+ const struct sh_msiof_spi_info *info = p->info;
unsigned int dma_tx_id, dma_rx_id;
const struct resource *res;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct device *tx_dev, *rx_dev;
if (dev->of_node) {
@@ -1229,17 +1231,15 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master = p->master;
- master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
- dma_tx_id,
- res->start + TFDR);
- if (!master->dma_tx)
+ ctlr = p->ctlr;
+ ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
+ dma_tx_id, res->start + TFDR);
+ if (!ctlr->dma_tx)
return -ENODEV;
- master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
- dma_rx_id,
- res->start + RFDR);
- if (!master->dma_rx)
+ ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
+ dma_rx_id, res->start + RFDR);
+ if (!ctlr->dma_rx)
goto free_tx_chan;
p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
@@ -1250,13 +1250,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
if (!p->rx_dma_page)
goto free_tx_page;
- tx_dev = master->dma_tx->device->dev;
+ tx_dev = ctlr->dma_tx->device->dev;
p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(tx_dev, p->tx_dma_addr))
goto free_rx_page;
- rx_dev = master->dma_rx->device->dev;
+ rx_dev = ctlr->dma_rx->device->dev;
p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_dev, p->rx_dma_addr))
@@ -1272,34 +1272,34 @@ free_rx_page:
free_tx_page:
free_page((unsigned long)p->tx_dma_page);
free_rx_chan:
- dma_release_channel(master->dma_rx);
+ dma_release_channel(ctlr->dma_rx);
free_tx_chan:
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
return -ENODEV;
}
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
- struct spi_master *master = p->master;
+ struct spi_controller *ctlr = p->ctlr;
- if (!master->dma_tx)
+ if (!ctlr->dma_tx)
return;
- dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
- PAGE_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
+ DMA_TO_DEVICE);
free_page((unsigned long)p->rx_dma_page);
free_page((unsigned long)p->tx_dma_page);
- dma_release_channel(master->dma_rx);
- dma_release_channel(master->dma_tx);
+ dma_release_channel(ctlr->dma_rx);
+ dma_release_channel(ctlr->dma_tx);
}
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
- struct spi_master *master;
+ struct spi_controller *ctlr;
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
@@ -1320,18 +1320,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
}
if (info->mode == MSIOF_SPI_SLAVE)
- master = spi_alloc_slave(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ ctlr = spi_alloc_slave(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
else
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
- if (master == NULL)
+ ctlr = spi_alloc_master(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+ if (ctlr == NULL)
return -ENOMEM;
- p = spi_master_get_devdata(master);
+ p = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, p);
- p->master = master;
+ p->ctlr = ctlr;
p->info = info;
p->min_div_pow = chipdata->min_div_pow;
@@ -1378,31 +1378,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p->rx_fifo_size = p->info->rx_fifo_override;
/* Setup GPIO chip selects */
- master->num_chipselect = p->info->num_chipselect;
+ ctlr->num_chipselect = p->info->num_chipselect;
ret = sh_msiof_get_cs_gpios(p);
if (ret)
goto err1;
- /* init master code */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
- master->flags = chipdata->master_flags;
- master->bus_num = pdev->id;
- master->dev.of_node = pdev->dev.of_node;
- master->setup = sh_msiof_spi_setup;
- master->prepare_message = sh_msiof_prepare_message;
- master->slave_abort = sh_msiof_slave_abort;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- master->auto_runtime_pm = true;
- master->transfer_one = sh_msiof_transfer_one;
+ /* init controller code */
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
+ ctlr->flags = chipdata->ctlr_flags;
+ ctlr->bus_num = pdev->id;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->setup = sh_msiof_spi_setup;
+ ctlr->prepare_message = sh_msiof_prepare_message;
+ ctlr->slave_abort = sh_msiof_slave_abort;
+ ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one = sh_msiof_transfer_one;
ret = sh_msiof_request_dma(p);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master error.\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto err2;
}
@@ -1412,7 +1412,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
sh_msiof_release_dma(p);
pm_runtime_disable(&pdev->dev);
err1:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
@@ -1436,14 +1436,14 @@ static int sh_msiof_spi_suspend(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
- return spi_master_suspend(p->master);
+ return spi_controller_suspend(p->ctlr);
}
static int sh_msiof_spi_resume(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
- return spi_master_resume(p->master);
+ return spi_controller_resume(p->ctlr);
}
static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
@@ -1465,7 +1465,7 @@ static struct platform_driver sh_msiof_spi_drv = {
};
module_platform_driver(sh_msiof_spi_drv);
-MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
+MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
new file mode 100644
index 000000000000..93ec2c6cdbfd
--- /dev/null
+++ b/drivers/spi/spi-sifive.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2018 SiFive, Inc.
+//
+// SiFive SPI controller driver (master mode only)
+//
+// Author: SiFive, Inc.
+// sifive@sifive.com
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+
+#define SIFIVE_SPI_DRIVER_NAME "sifive_spi"
+
+#define SIFIVE_SPI_MAX_CS 32
+#define SIFIVE_SPI_DEFAULT_DEPTH 8
+#define SIFIVE_SPI_DEFAULT_MAX_BITS 8
+
+/* register offsets */
+#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */
+#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */
+#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */
+#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */
+#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */
+#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */
+#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */
+#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */
+#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */
+#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */
+#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */
+#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */
+#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */
+#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */
+#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */
+#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */
+
+/* sckdiv bits */
+#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU
+
+/* sckmode bits */
+#define SIFIVE_SPI_SCKMODE_PHA BIT(0)
+#define SIFIVE_SPI_SCKMODE_POL BIT(1)
+#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \
+ SIFIVE_SPI_SCKMODE_POL)
+
+/* csmode bits */
+#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U
+#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U
+#define SIFIVE_SPI_CSMODE_MODE_OFF 3U
+
+/* delay0 bits */
+#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU
+#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16)
+
+/* delay1 bits */
+#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU
+#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16)
+
+/* fmt bits */
+#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U
+#define SIFIVE_SPI_FMT_PROTO_DUAL 1U
+#define SIFIVE_SPI_FMT_PROTO_QUAD 2U
+#define SIFIVE_SPI_FMT_PROTO_MASK 3U
+#define SIFIVE_SPI_FMT_ENDIAN BIT(2)
+#define SIFIVE_SPI_FMT_DIR BIT(3)
+#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16)
+
+/* txdata bits */
+#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_TXDATA_FULL BIT(31)
+
+/* rxdata bits */
+#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_RXDATA_EMPTY BIT(31)
+
+/* ie and ip bits */
+#define SIFIVE_SPI_IP_TXWM BIT(0)
+#define SIFIVE_SPI_IP_RXWM BIT(1)
+
+struct sifive_spi {
+ void __iomem *regs; /* virt. address of control registers */
+ struct clk *clk; /* bus clock */
+ unsigned int fifo_depth; /* fifo depth in words */
+ u32 cs_inactive; /* level of the CS pins when inactive */
+ struct completion done; /* wake-up from interrupt */
+};
+
+static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value)
+{
+ iowrite32(value, spi->regs + offset);
+}
+
+static u32 sifive_spi_read(struct sifive_spi *spi, int offset)
+{
+ return ioread32(spi->regs + offset);
+}
+
+static void sifive_spi_init(struct sifive_spi *spi)
+{
+ /* Watermark interrupts are disabled by default */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+
+ /* Default watermark FIFO threshold values */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0);
+
+ /* Set CS/SCK Delays and Inactive Time to defaults */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0,
+ SIFIVE_SPI_DELAY0_CSSCK(1) |
+ SIFIVE_SPI_DELAY0_SCKCS(1));
+ sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1,
+ SIFIVE_SPI_DELAY1_INTERCS(1) |
+ SIFIVE_SPI_DELAY1_INTERXFR(0));
+
+ /* Exit specialized memory-mapped SPI flash mode */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0);
+}
+
+static int
+sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct spi_device *device = msg->spi;
+
+ /* Update the chip select polarity */
+ if (device->mode & SPI_CS_HIGH)
+ spi->cs_inactive &= ~BIT(device->chip_select);
+ else
+ spi->cs_inactive |= BIT(device->chip_select);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
+
+ /* Select the correct device */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select);
+
+ /* Set clock mode */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE,
+ device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK);
+
+ return 0;
+}
+
+static void sifive_spi_set_cs(struct spi_device *device, bool is_high)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(device->master);
+
+ /* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */
+ if (device->mode & SPI_CS_HIGH)
+ is_high = !is_high;
+
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ?
+ SIFIVE_SPI_CSMODE_MODE_AUTO :
+ SIFIVE_SPI_CSMODE_MODE_HOLD);
+}
+
+static int
+sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device,
+ struct spi_transfer *t)
+{
+ u32 cr;
+ unsigned int mode;
+
+ /* Calculate and program the clock rate */
+ cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1;
+ cr &= SIFIVE_SPI_SCKDIV_DIV_MASK;
+ sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr);
+
+ mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits);
+
+ /* Set frame format */
+ cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word);
+ switch (mode) {
+ case SPI_NBITS_QUAD:
+ cr |= SIFIVE_SPI_FMT_PROTO_QUAD;
+ break;
+ case SPI_NBITS_DUAL:
+ cr |= SIFIVE_SPI_FMT_PROTO_DUAL;
+ break;
+ default:
+ cr |= SIFIVE_SPI_FMT_PROTO_SINGLE;
+ break;
+ }
+ if (device->mode & SPI_LSB_FIRST)
+ cr |= SIFIVE_SPI_FMT_ENDIAN;
+ if (!t->rx_buf)
+ cr |= SIFIVE_SPI_FMT_DIR;
+ sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr);
+
+ /* We will want to poll if the time we need to wait is
+ * less than the context switching time.
+ * Let's call that threshold 5us. The operation will take:
+ * (8/mode) * fifo_depth / hz <= 5 * 10^-6
+ * 1600000 * fifo_depth <= hz * mode
+ */
+ return 1600000 * spi->fifo_depth <= t->speed_hz * mode;
+}
+
+static irqreturn_t sifive_spi_irq(int irq, void *dev_id)
+{
+ struct sifive_spi *spi = dev_id;
+ u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
+
+ if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) {
+ /* Disable interrupts until next transfer */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+ complete(&spi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll)
+{
+ if (poll) {
+ u32 cr;
+
+ do {
+ cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
+ } while (!(cr & bit));
+ } else {
+ reinit_completion(&spi->done);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit);
+ wait_for_completion(&spi->done);
+ }
+}
+
+static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr)
+{
+ WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA)
+ & SIFIVE_SPI_TXDATA_FULL) != 0);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA,
+ *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK);
+}
+
+static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
+{
+ u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA);
+
+ WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0);
+ *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK;
+}
+
+static int
+sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
+ struct spi_transfer *t)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ int poll = sifive_spi_prep_transfer(spi, device, t);
+ const u8 *tx_ptr = t->tx_buf;
+ u8 *rx_ptr = t->rx_buf;
+ unsigned int remaining_words = t->len;
+
+ while (remaining_words) {
+ unsigned int n_words = min(remaining_words, spi->fifo_depth);
+ unsigned int i;
+
+ /* Enqueue n_words for transmission */
+ for (i = 0; i < n_words; i++)
+ sifive_spi_tx(spi, tx_ptr++);
+
+ if (rx_ptr) {
+ /* Wait for transmission + reception to complete */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK,
+ n_words - 1);
+ sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll);
+
+ /* Read out all the data from the RX FIFO */
+ for (i = 0; i < n_words; i++)
+ sifive_spi_rx(spi, rx_ptr++);
+ } else {
+ /* Wait for transmission to complete */
+ sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll);
+ }
+
+ remaining_words -= n_words;
+ }
+
+ return 0;
+}
+
+static int sifive_spi_probe(struct platform_device *pdev)
+{
+ struct sifive_spi *spi;
+ struct resource *res;
+ int ret, irq, num_cs;
+ u32 cs_bits, max_bits_per_word;
+ struct spi_master *master;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ spi = spi_master_get_devdata(master);
+ init_completion(&spi->done);
+ platform_set_drvdata(pdev, master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi->regs)) {
+ ret = PTR_ERR(spi->regs);
+ goto put_master;
+ }
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ dev_err(&pdev->dev, "Unable to find bus clock\n");
+ ret = PTR_ERR(spi->clk);
+ goto put_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Unable to find interrupt\n");
+ ret = irq;
+ goto put_master;
+ }
+
+ /* Optional parameters */
+ ret =
+ of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth",
+ &spi->fifo_depth);
+ if (ret < 0)
+ spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH;
+
+ ret =
+ of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word",
+ &max_bits_per_word);
+
+ if (!ret && max_bits_per_word < 8) {
+ dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n");
+ ret = -EINVAL;
+ goto put_master;
+ }
+
+ /* Spin up the bus clock before hitting registers */
+ ret = clk_prepare_enable(spi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable bus clock\n");
+ goto put_master;
+ }
+
+ /* probe the number of CS lines */
+ spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU);
+ cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
+ if (!cs_bits) {
+ dev_err(&pdev->dev, "Could not auto probe CS lines\n");
+ ret = -EINVAL;
+ goto put_master;
+ }
+
+ num_cs = ilog2(cs_bits) + 1;
+ if (num_cs > SIFIVE_SPI_MAX_CS) {
+ dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ ret = -EINVAL;
+ goto put_master;
+ }
+
+ /* Define our master */
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPHA | SPI_CPOL
+ | SPI_CS_HIGH | SPI_LSB_FIRST
+ | SPI_TX_DUAL | SPI_TX_QUAD
+ | SPI_RX_DUAL | SPI_RX_QUAD;
+ /* TODO: add driver support for bits_per_word < 8
+ * we need to "left-align" the bits (unless SPI_LSB_FIRST)
+ */
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS;
+ master->prepare_message = sifive_spi_prepare_message;
+ master->set_cs = sifive_spi_set_cs;
+ master->transfer_one = sifive_spi_transfer_one;
+
+ pdev->dev.dma_mask = NULL;
+ /* Configure the SPI master hardware */
+ sifive_spi_init(spi);
+
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0,
+ dev_name(&pdev->dev), spi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to bind to interrupt\n");
+ goto put_master;
+ }
+
+ dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
+ irq, master->num_chipselect);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto put_master;
+ }
+
+ return 0;
+
+put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int sifive_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+
+ /* Disable all the interrupts just in case */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+
+ return 0;
+}
+
+static const struct of_device_id sifive_spi_of_match[] = {
+ { .compatible = "sifive,spi0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sifive_spi_of_match);
+
+static struct platform_driver sifive_spi_driver = {
+ .probe = sifive_spi_probe,
+ .remove = sifive_spi_remove,
+ .driver = {
+ .name = SIFIVE_SPI_DRIVER_NAME,
+ .of_match_table = sifive_spi_of_match,
+ },
+};
+module_platform_driver(sifive_spi_driver);
+
+MODULE_AUTHOR("SiFive, Inc. <sifive@sifive.com>");
+MODULE_DESCRIPTION("SiFive SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 8daa24eec624..1b7eebb72c07 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -2,6 +2,9 @@
// Copyright (C) 2018 Spreadtrum Communications Inc.
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/sprd-dma.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -9,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -128,11 +132,28 @@
#define SPRD_SPI_DEFAULT_SOURCE 26000000
#define SPRD_SPI_MAX_SPEED_HZ 48000000
#define SPRD_SPI_AUTOSUSPEND_DELAY 100
+#define SPRD_SPI_DMA_STEP 8
+
+enum sprd_spi_dma_channel {
+ SPRD_SPI_RX,
+ SPRD_SPI_TX,
+ SPRD_SPI_MAX,
+};
+
+struct sprd_spi_dma {
+ bool enable;
+ struct dma_chan *dma_chan[SPRD_SPI_MAX];
+ enum dma_slave_buswidth width;
+ u32 fragmens_len;
+ u32 rx_len;
+};
struct sprd_spi {
void __iomem *base;
+ phys_addr_t phy_base;
struct device *dev;
struct clk *clk;
+ int irq;
u32 src_clk;
u32 hw_mode;
u32 trans_len;
@@ -141,6 +162,8 @@ struct sprd_spi {
u32 hw_speed_hz;
u32 len;
int status;
+ struct sprd_spi_dma dma;
+ struct completion xfer_completion;
const void *tx_buf;
void *rx_buf;
int (*read_bufs)(struct sprd_spi *ss, u32 len);
@@ -380,7 +403,7 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len, len;
- int ret, write_size = 0;
+ int ret, write_size = 0, read_size = 0;
while (trans_len) {
len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
@@ -416,19 +439,223 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
goto complete;
if (ss->trans_mode & SPRD_SPI_RX_MODE)
- ss->read_bufs(ss, len);
+ read_size += ss->read_bufs(ss, len);
trans_len -= len;
}
- ret = write_size;
-
+ if (ss->trans_mode & SPRD_SPI_TX_MODE)
+ ret = write_size;
+ else
+ ret = read_size;
complete:
sprd_spi_enter_idle(ss);
return ret;
}
+static void sprd_spi_irq_enable(struct sprd_spi *ss)
+{
+ u32 val;
+
+ /* Clear interrupt status before enabling interrupt. */
+ writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
+ ss->base + SPRD_SPI_INT_CLR);
+ /* Enable SPI interrupt only in DMA mode. */
+ val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
+ writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
+ SPRD_SPI_RX_END_INT_EN,
+ ss->base + SPRD_SPI_INT_EN);
+}
+
+static void sprd_spi_irq_disable(struct sprd_spi *ss)
+{
+ writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
+}
+
+static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
+
+ if (enable)
+ val |= SPRD_SPI_DMA_EN;
+ else
+ val &= ~SPRD_SPI_DMA_EN;
+
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
+}
+
+static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
+ struct dma_slave_config *c,
+ struct sg_table *sg,
+ enum dma_transfer_direction dir)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int ret;
+
+ ret = dmaengine_slave_config(dma_chan, c);
+ if (ret < 0)
+ return ret;
+
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
+ desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
+ if (!desc)
+ return -ENODEV;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ return dma_submit_error(cookie);
+
+ dma_async_issue_pending(dma_chan);
+
+ return 0;
+}
+
+static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
+ struct dma_slave_config config = {
+ .src_addr = ss->phy_base,
+ .src_addr_width = ss->dma.width,
+ .dst_addr_width = ss->dma.width,
+ .dst_maxburst = ss->dma.fragmens_len,
+ };
+ int ret;
+
+ ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
+ if (ret)
+ return ret;
+
+ return ss->dma.rx_len;
+}
+
+static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
+ struct dma_slave_config config = {
+ .dst_addr = ss->phy_base,
+ .src_addr_width = ss->dma.width,
+ .dst_addr_width = ss->dma.width,
+ .src_maxburst = ss->dma.fragmens_len,
+ };
+ int ret;
+
+ ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
+ if (ret)
+ return ret;
+
+ return t->len;
+}
+
+static int sprd_spi_dma_request(struct sprd_spi *ss)
+{
+ ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
+ if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) {
+ if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER)
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
+
+ dev_err(ss->dev, "request RX DMA channel failed!\n");
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
+ }
+
+ ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
+ if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
+ if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER)
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
+
+ dev_err(ss->dev, "request TX DMA channel failed!\n");
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
+ }
+
+ return 0;
+}
+
+static void sprd_spi_dma_release(struct sprd_spi *ss)
+{
+ if (ss->dma.dma_chan[SPRD_SPI_RX])
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+
+ if (ss->dma.dma_chan[SPRD_SPI_TX])
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
+}
+
+static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
+ struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
+ u32 trans_len = ss->trans_len;
+ int ret, write_size = 0;
+
+ reinit_completion(&ss->xfer_completion);
+ sprd_spi_irq_enable(ss);
+ if (ss->trans_mode & SPRD_SPI_TX_MODE) {
+ write_size = sprd_spi_dma_tx_config(ss, t);
+ sprd_spi_set_tx_length(ss, trans_len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to transfer.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_tx_req(ss);
+ } else {
+ sprd_spi_set_rx_length(ss, trans_len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to read.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_rx_req(ss);
+ else
+ write_size = ss->write_bufs(ss, trans_len);
+ }
+
+ if (write_size < 0) {
+ ret = write_size;
+ dev_err(ss->dev, "failed to write, ret = %d\n", ret);
+ goto trans_complete;
+ }
+
+ if (ss->trans_mode & SPRD_SPI_RX_MODE) {
+ /*
+ * Set up the DMA receive data length, which must be an
+ * integral multiple of fragment length. But when the length
+ * of received data is less than fragment length, DMA can be
+ * configured to receive data according to the actual length
+ * of received data.
+ */
+ ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
+ (t->len - t->len % ss->dma.fragmens_len) :
+ t->len;
+ ret = sprd_spi_dma_rx_config(ss, t);
+ if (ret < 0) {
+ dev_err(&sdev->dev,
+ "failed to configure rx DMA, ret = %d\n", ret);
+ goto trans_complete;
+ }
+ }
+
+ sprd_spi_dma_enable(ss, true);
+ wait_for_completion(&(ss->xfer_completion));
+
+ if (ss->trans_mode & SPRD_SPI_TX_MODE)
+ ret = write_size;
+ else
+ ret = ss->dma.rx_len;
+
+trans_complete:
+ sprd_spi_dma_enable(ss, false);
+ sprd_spi_enter_idle(ss);
+ sprd_spi_irq_disable(ss);
+
+ return ret;
+}
+
static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
{
/*
@@ -514,16 +741,22 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev,
ss->trans_len = t->len;
ss->read_bufs = sprd_spi_read_bufs_u8;
ss->write_bufs = sprd_spi_write_bufs_u8;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
break;
case 16:
ss->trans_len = t->len >> 1;
ss->read_bufs = sprd_spi_read_bufs_u16;
ss->write_bufs = sprd_spi_write_bufs_u16;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
break;
case 32:
ss->trans_len = t->len >> 2;
ss->read_bufs = sprd_spi_read_bufs_u32;
ss->write_bufs = sprd_spi_write_bufs_u32;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
break;
default:
return -EINVAL;
@@ -561,7 +794,11 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr,
if (ret)
goto setup_err;
- ret = sprd_spi_txrx_bufs(sdev, t);
+ if (sctlr->can_dma(sctlr, sdev, t))
+ ret = sprd_spi_dma_txrx_bufs(sdev, t);
+ else
+ ret = sprd_spi_txrx_bufs(sdev, t);
+
if (ret == t->len)
ret = 0;
else if (ret >= 0)
@@ -573,6 +810,53 @@ setup_err:
return ret;
}
+static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
+{
+ struct sprd_spi *ss = (struct sprd_spi *)data;
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
+
+ if (val & SPRD_SPI_MASK_TX_END) {
+ writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
+ if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
+ complete(&ss->xfer_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (val & SPRD_SPI_MASK_RX_END) {
+ writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
+ if (ss->dma.rx_len < ss->len) {
+ ss->rx_buf += ss->dma.rx_len;
+ ss->dma.rx_len +=
+ ss->read_bufs(ss, ss->len - ss->dma.rx_len);
+ }
+ complete(&ss->xfer_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ int ret;
+
+ ss->irq = platform_get_irq(pdev, 0);
+ if (ss->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return ss->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
+ 0, pdev->name, ss);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
+ ss->irq, ret);
+
+ return ret;
+}
+
static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
{
struct clk *clk_spi, *clk_parent;
@@ -603,6 +887,35 @@ static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
return 0;
}
+static bool sprd_spi_can_dma(struct spi_controller *sctlr,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+
+ return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
+}
+
+static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ int ret;
+
+ ret = sprd_spi_dma_request(ss);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ dev_warn(&pdev->dev,
+ "failed to request dma, enter no dma mode, ret = %d\n",
+ ret);
+
+ return 0;
+ }
+
+ ss->dma.enable = true;
+
+ return 0;
+}
+
static int sprd_spi_probe(struct platform_device *pdev)
{
struct spi_controller *sctlr;
@@ -623,25 +936,36 @@ static int sprd_spi_probe(struct platform_device *pdev)
goto free_controller;
}
+ ss->phy_base = res->start;
ss->dev = &pdev->dev;
sctlr->dev.of_node = pdev->dev.of_node;
sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
sctlr->bus_num = pdev->id;
sctlr->set_cs = sprd_spi_chipselect;
sctlr->transfer_one = sprd_spi_transfer_one;
+ sctlr->can_dma = sprd_spi_can_dma;
sctlr->auto_runtime_pm = true;
sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
SPRD_SPI_MAX_SPEED_HZ);
+ init_completion(&ss->xfer_completion);
platform_set_drvdata(pdev, sctlr);
ret = sprd_spi_clk_init(pdev, ss);
if (ret)
goto free_controller;
- ret = clk_prepare_enable(ss->clk);
+ ret = sprd_spi_irq_init(pdev, ss);
if (ret)
goto free_controller;
+ ret = sprd_spi_dma_init(pdev, ss);
+ if (ret)
+ goto free_controller;
+
+ ret = clk_prepare_enable(ss->clk);
+ if (ret)
+ goto release_dma;
+
ret = pm_runtime_set_active(&pdev->dev);
if (ret < 0)
goto disable_clk;
@@ -670,6 +994,8 @@ err_rpm_put:
pm_runtime_disable(&pdev->dev);
disable_clk:
clk_disable_unprepare(ss->clk);
+release_dma:
+ sprd_spi_dma_release(ss);
free_controller:
spi_controller_put(sctlr);
@@ -688,6 +1014,10 @@ static int sprd_spi_remove(struct platform_device *pdev)
return ret;
}
+ spi_controller_suspend(sctlr);
+
+ if (ss->dma.enable)
+ sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -700,6 +1030,9 @@ static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ if (ss->dma.enable)
+ sprd_spi_dma_release(ss);
+
clk_disable_unprepare(ss->clk);
return 0;
@@ -715,7 +1048,14 @@ static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
if (ret)
return ret;
- return 0;
+ if (!ss->dma.enable)
+ return 0;
+
+ ret = sprd_spi_dma_request(ss);
+ if (ret)
+ clk_disable_unprepare(ss->clk);
+
+ return ret;
}
static const struct dev_pm_ops sprd_spi_pm_ops = {
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index ad1e55d3d5d5..4186ed20d796 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1,23 +1,10 @@
-/*
- * STMicroelectronics STM32 SPI Controller driver (master mode only)
- *
- * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
- * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
- *
- * License terms: GPL V2.0.
- *
- * spi_stm32 driver is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * spi_stm32 driver is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License along with
- * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// STMicroelectronics STM32 SPI Controller driver (master mode only)
+//
+// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
+
#include <linux/debugfs.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -33,99 +20,251 @@
#define DRIVER_NAME "spi_stm32"
-/* STM32 SPI registers */
-#define STM32_SPI_CR1 0x00
-#define STM32_SPI_CR2 0x04
-#define STM32_SPI_CFG1 0x08
-#define STM32_SPI_CFG2 0x0C
-#define STM32_SPI_IER 0x10
-#define STM32_SPI_SR 0x14
-#define STM32_SPI_IFCR 0x18
-#define STM32_SPI_TXDR 0x20
-#define STM32_SPI_RXDR 0x30
-#define STM32_SPI_I2SCFGR 0x50
-
-/* STM32_SPI_CR1 bit fields */
-#define SPI_CR1_SPE BIT(0)
-#define SPI_CR1_MASRX BIT(8)
-#define SPI_CR1_CSTART BIT(9)
-#define SPI_CR1_CSUSP BIT(10)
-#define SPI_CR1_HDDIR BIT(11)
-#define SPI_CR1_SSI BIT(12)
-
-/* STM32_SPI_CR2 bit fields */
-#define SPI_CR2_TSIZE_SHIFT 0
-#define SPI_CR2_TSIZE GENMASK(15, 0)
-
-/* STM32_SPI_CFG1 bit fields */
-#define SPI_CFG1_DSIZE_SHIFT 0
-#define SPI_CFG1_DSIZE GENMASK(4, 0)
-#define SPI_CFG1_FTHLV_SHIFT 5
-#define SPI_CFG1_FTHLV GENMASK(8, 5)
-#define SPI_CFG1_RXDMAEN BIT(14)
-#define SPI_CFG1_TXDMAEN BIT(15)
-#define SPI_CFG1_MBR_SHIFT 28
-#define SPI_CFG1_MBR GENMASK(30, 28)
-#define SPI_CFG1_MBR_MIN 0
-#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
-
-/* STM32_SPI_CFG2 bit fields */
-#define SPI_CFG2_MIDI_SHIFT 4
-#define SPI_CFG2_MIDI GENMASK(7, 4)
-#define SPI_CFG2_COMM_SHIFT 17
-#define SPI_CFG2_COMM GENMASK(18, 17)
-#define SPI_CFG2_SP_SHIFT 19
-#define SPI_CFG2_SP GENMASK(21, 19)
-#define SPI_CFG2_MASTER BIT(22)
-#define SPI_CFG2_LSBFRST BIT(23)
-#define SPI_CFG2_CPHA BIT(24)
-#define SPI_CFG2_CPOL BIT(25)
-#define SPI_CFG2_SSM BIT(26)
-#define SPI_CFG2_AFCNTR BIT(31)
-
-/* STM32_SPI_IER bit fields */
-#define SPI_IER_RXPIE BIT(0)
-#define SPI_IER_TXPIE BIT(1)
-#define SPI_IER_DXPIE BIT(2)
-#define SPI_IER_EOTIE BIT(3)
-#define SPI_IER_TXTFIE BIT(4)
-#define SPI_IER_OVRIE BIT(6)
-#define SPI_IER_MODFIE BIT(9)
-#define SPI_IER_ALL GENMASK(10, 0)
-
-/* STM32_SPI_SR bit fields */
-#define SPI_SR_RXP BIT(0)
-#define SPI_SR_TXP BIT(1)
-#define SPI_SR_EOT BIT(3)
-#define SPI_SR_OVR BIT(6)
-#define SPI_SR_MODF BIT(9)
-#define SPI_SR_SUSP BIT(11)
-#define SPI_SR_RXPLVL_SHIFT 13
-#define SPI_SR_RXPLVL GENMASK(14, 13)
-#define SPI_SR_RXWNE BIT(15)
-
-/* STM32_SPI_IFCR bit fields */
-#define SPI_IFCR_ALL GENMASK(11, 3)
-
-/* STM32_SPI_I2SCFGR bit fields */
-#define SPI_I2SCFGR_I2SMOD BIT(0)
-
-/* SPI Master Baud Rate min/max divisor */
-#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN)
-#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX)
-
-/* SPI Communication mode */
+/* STM32F4 SPI registers */
+#define STM32F4_SPI_CR1 0x00
+#define STM32F4_SPI_CR2 0x04
+#define STM32F4_SPI_SR 0x08
+#define STM32F4_SPI_DR 0x0C
+#define STM32F4_SPI_I2SCFGR 0x1C
+
+/* STM32F4_SPI_CR1 bit fields */
+#define STM32F4_SPI_CR1_CPHA BIT(0)
+#define STM32F4_SPI_CR1_CPOL BIT(1)
+#define STM32F4_SPI_CR1_MSTR BIT(2)
+#define STM32F4_SPI_CR1_BR_SHIFT 3
+#define STM32F4_SPI_CR1_BR GENMASK(5, 3)
+#define STM32F4_SPI_CR1_SPE BIT(6)
+#define STM32F4_SPI_CR1_LSBFRST BIT(7)
+#define STM32F4_SPI_CR1_SSI BIT(8)
+#define STM32F4_SPI_CR1_SSM BIT(9)
+#define STM32F4_SPI_CR1_RXONLY BIT(10)
+#define STM32F4_SPI_CR1_DFF BIT(11)
+#define STM32F4_SPI_CR1_CRCNEXT BIT(12)
+#define STM32F4_SPI_CR1_CRCEN BIT(13)
+#define STM32F4_SPI_CR1_BIDIOE BIT(14)
+#define STM32F4_SPI_CR1_BIDIMODE BIT(15)
+#define STM32F4_SPI_CR1_BR_MIN 0
+#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
+
+/* STM32F4_SPI_CR2 bit fields */
+#define STM32F4_SPI_CR2_RXDMAEN BIT(0)
+#define STM32F4_SPI_CR2_TXDMAEN BIT(1)
+#define STM32F4_SPI_CR2_SSOE BIT(2)
+#define STM32F4_SPI_CR2_FRF BIT(4)
+#define STM32F4_SPI_CR2_ERRIE BIT(5)
+#define STM32F4_SPI_CR2_RXNEIE BIT(6)
+#define STM32F4_SPI_CR2_TXEIE BIT(7)
+
+/* STM32F4_SPI_SR bit fields */
+#define STM32F4_SPI_SR_RXNE BIT(0)
+#define STM32F4_SPI_SR_TXE BIT(1)
+#define STM32F4_SPI_SR_CHSIDE BIT(2)
+#define STM32F4_SPI_SR_UDR BIT(3)
+#define STM32F4_SPI_SR_CRCERR BIT(4)
+#define STM32F4_SPI_SR_MODF BIT(5)
+#define STM32F4_SPI_SR_OVR BIT(6)
+#define STM32F4_SPI_SR_BSY BIT(7)
+#define STM32F4_SPI_SR_FRE BIT(8)
+
+/* STM32F4_SPI_I2SCFGR bit fields */
+#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
+
+/* STM32F4 SPI Baud Rate min/max divisor */
+#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
+#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
+
+/* STM32H7 SPI registers */
+#define STM32H7_SPI_CR1 0x00
+#define STM32H7_SPI_CR2 0x04
+#define STM32H7_SPI_CFG1 0x08
+#define STM32H7_SPI_CFG2 0x0C
+#define STM32H7_SPI_IER 0x10
+#define STM32H7_SPI_SR 0x14
+#define STM32H7_SPI_IFCR 0x18
+#define STM32H7_SPI_TXDR 0x20
+#define STM32H7_SPI_RXDR 0x30
+#define STM32H7_SPI_I2SCFGR 0x50
+
+/* STM32H7_SPI_CR1 bit fields */
+#define STM32H7_SPI_CR1_SPE BIT(0)
+#define STM32H7_SPI_CR1_MASRX BIT(8)
+#define STM32H7_SPI_CR1_CSTART BIT(9)
+#define STM32H7_SPI_CR1_CSUSP BIT(10)
+#define STM32H7_SPI_CR1_HDDIR BIT(11)
+#define STM32H7_SPI_CR1_SSI BIT(12)
+
+/* STM32H7_SPI_CR2 bit fields */
+#define STM32H7_SPI_CR2_TSIZE_SHIFT 0
+#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
+
+/* STM32H7_SPI_CFG1 bit fields */
+#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0
+#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
+#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5
+#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
+#define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
+#define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
+#define STM32H7_SPI_CFG1_MBR_SHIFT 28
+#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
+#define STM32H7_SPI_CFG1_MBR_MIN 0
+#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
+
+/* STM32H7_SPI_CFG2 bit fields */
+#define STM32H7_SPI_CFG2_MIDI_SHIFT 4
+#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
+#define STM32H7_SPI_CFG2_COMM_SHIFT 17
+#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
+#define STM32H7_SPI_CFG2_SP_SHIFT 19
+#define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
+#define STM32H7_SPI_CFG2_MASTER BIT(22)
+#define STM32H7_SPI_CFG2_LSBFRST BIT(23)
+#define STM32H7_SPI_CFG2_CPHA BIT(24)
+#define STM32H7_SPI_CFG2_CPOL BIT(25)
+#define STM32H7_SPI_CFG2_SSM BIT(26)
+#define STM32H7_SPI_CFG2_AFCNTR BIT(31)
+
+/* STM32H7_SPI_IER bit fields */
+#define STM32H7_SPI_IER_RXPIE BIT(0)
+#define STM32H7_SPI_IER_TXPIE BIT(1)
+#define STM32H7_SPI_IER_DXPIE BIT(2)
+#define STM32H7_SPI_IER_EOTIE BIT(3)
+#define STM32H7_SPI_IER_TXTFIE BIT(4)
+#define STM32H7_SPI_IER_OVRIE BIT(6)
+#define STM32H7_SPI_IER_MODFIE BIT(9)
+#define STM32H7_SPI_IER_ALL GENMASK(10, 0)
+
+/* STM32H7_SPI_SR bit fields */
+#define STM32H7_SPI_SR_RXP BIT(0)
+#define STM32H7_SPI_SR_TXP BIT(1)
+#define STM32H7_SPI_SR_EOT BIT(3)
+#define STM32H7_SPI_SR_OVR BIT(6)
+#define STM32H7_SPI_SR_MODF BIT(9)
+#define STM32H7_SPI_SR_SUSP BIT(11)
+#define STM32H7_SPI_SR_RXPLVL_SHIFT 13
+#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
+#define STM32H7_SPI_SR_RXWNE BIT(15)
+
+/* STM32H7_SPI_IFCR bit fields */
+#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3)
+
+/* STM32H7_SPI_I2SCFGR bit fields */
+#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
+
+/* STM32H7 SPI Master Baud Rate min/max divisor */
+#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
+#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
+
+/* STM32H7 SPI Communication mode */
+#define STM32H7_SPI_FULL_DUPLEX 0
+#define STM32H7_SPI_SIMPLEX_TX 1
+#define STM32H7_SPI_SIMPLEX_RX 2
+#define STM32H7_SPI_HALF_DUPLEX 3
+
+/* SPI Communication type */
#define SPI_FULL_DUPLEX 0
#define SPI_SIMPLEX_TX 1
#define SPI_SIMPLEX_RX 2
-#define SPI_HALF_DUPLEX 3
+#define SPI_3WIRE_TX 3
+#define SPI_3WIRE_RX 4
#define SPI_1HZ_NS 1000000000
+/*
+ * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
+ * without fifo buffers.
+ */
+#define SPI_DMA_MIN_BYTES 16
+
+/**
+ * stm32_spi_reg - stm32 SPI register & bitfield desc
+ * @reg: register offset
+ * @mask: bitfield mask
+ * @shift: left shift
+ */
+struct stm32_spi_reg {
+ int reg;
+ int mask;
+ int shift;
+};
+
+/**
+ * stm32_spi_regspec - stm32 registers definition, compatible dependent data
+ * en: enable register and SPI enable bit
+ * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
+ * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
+ * cpol: clock polarity register and polarity bit
+ * cpha: clock phase register and phase bit
+ * lsb_first: LSB transmitted first register and bit
+ * br: baud rate register and bitfields
+ * rx: SPI RX data register
+ * tx: SPI TX data register
+ */
+struct stm32_spi_regspec {
+ const struct stm32_spi_reg en;
+ const struct stm32_spi_reg dma_rx_en;
+ const struct stm32_spi_reg dma_tx_en;
+ const struct stm32_spi_reg cpol;
+ const struct stm32_spi_reg cpha;
+ const struct stm32_spi_reg lsb_first;
+ const struct stm32_spi_reg br;
+ const struct stm32_spi_reg rx;
+ const struct stm32_spi_reg tx;
+};
+
+struct stm32_spi;
+
+/**
+ * stm32_spi_cfg - stm32 compatible configuration data
+ * @regs: registers descriptions
+ * @get_fifo_size: routine to get fifo size
+ * @get_bpw_mask: routine to get bits per word mask
+ * @disable: routine to disable controller
+ * @config: routine to configure controller as SPI Master
+ * @set_bpw: routine to configure registers to for bits per word
+ * @set_mode: routine to configure registers to desired mode
+ * @set_data_idleness: optional routine to configure registers to desired idle
+ * time between frames (if driver has this functionality)
+ * set_number_of_data: optional routine to configure registers to desired
+ * number of data (if driver has this functionality)
+ * @can_dma: routine to determine if the transfer is eligible for DMA use
+ * @transfer_one_dma_start: routine to start transfer a single spi_transfer
+ * using DMA
+ * @dma_rx cb: routine to call after DMA RX channel operation is complete
+ * @dma_tx cb: routine to call after DMA TX channel operation is complete
+ * @transfer_one_irq: routine to configure interrupts for driver
+ * @irq_handler_event: Interrupt handler for SPI controller events
+ * @irq_handler_thread: thread of interrupt handler for SPI controller
+ * @baud_rate_div_min: minimum baud rate divisor
+ * @baud_rate_div_max: maximum baud rate divisor
+ * @has_fifo: boolean to know if fifo is used for driver
+ * @has_startbit: boolean to know if start bit is used to start transfer
+ */
+struct stm32_spi_cfg {
+ const struct stm32_spi_regspec *regs;
+ int (*get_fifo_size)(struct stm32_spi *spi);
+ int (*get_bpw_mask)(struct stm32_spi *spi);
+ void (*disable)(struct stm32_spi *spi);
+ int (*config)(struct stm32_spi *spi);
+ void (*set_bpw)(struct stm32_spi *spi);
+ int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
+ void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
+ int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
+ void (*transfer_one_dma_start)(struct stm32_spi *spi);
+ void (*dma_rx_cb)(void *data);
+ void (*dma_tx_cb)(void *data);
+ int (*transfer_one_irq)(struct stm32_spi *spi);
+ irqreturn_t (*irq_handler_event)(int irq, void *dev_id);
+ irqreturn_t (*irq_handler_thread)(int irq, void *dev_id);
+ unsigned int baud_rate_div_min;
+ unsigned int baud_rate_div_max;
+ bool has_fifo;
+};
+
/**
* struct stm32_spi - private data of the SPI controller
* @dev: driver model representation of the controller
* @master: controller master interface
+ * @cfg: compatible configuration data
* @base: virtual memory area
* @clk: hw kernel clock feeding the SPI clock generator
* @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
@@ -151,6 +290,7 @@
struct stm32_spi {
struct device *dev;
struct spi_master *master;
+ const struct stm32_spi_cfg *cfg;
void __iomem *base;
struct clk *clk;
u32 clk_rate;
@@ -176,6 +316,40 @@ struct stm32_spi {
dma_addr_t phys_addr;
};
+static const struct stm32_spi_regspec stm32f4_spi_regspec = {
+ .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
+ .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
+
+ .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
+ .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
+ .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
+ .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
+
+ .rx = { STM32F4_SPI_DR },
+ .tx = { STM32F4_SPI_DR },
+};
+
+static const struct stm32_spi_regspec stm32h7_spi_regspec = {
+ /* SPI data transfer is enabled but spi_ker_ck is idle.
+ * CFG1 and CFG2 registers are write protected when SPE is enabled.
+ */
+ .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
+ .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
+
+ .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
+ .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
+ .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
+ .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
+ STM32H7_SPI_CFG1_MBR_SHIFT },
+
+ .rx = { STM32H7_SPI_RXDR },
+ .tx = { STM32H7_SPI_TXDR },
+};
+
static inline void stm32_spi_set_bits(struct stm32_spi *spi,
u32 offset, u32 bits)
{
@@ -191,22 +365,22 @@ static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
}
/**
- * stm32_spi_get_fifo_size - Return fifo size
+ * stm32h7_spi_get_fifo_size - Return fifo size
* @spi: pointer to the spi controller data structure
*/
-static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
+static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi)
{
unsigned long flags;
u32 count = 0;
spin_lock_irqsave(&spi->lock, flags);
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
- while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)
- writeb_relaxed(++count, spi->base + STM32_SPI_TXDR);
+ while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP)
+ writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR);
- stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -216,10 +390,20 @@ static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
}
/**
- * stm32_spi_get_bpw_mask - Return bits per word mask
+ * stm32f4_spi_get_bpw_mask - Return bits per word mask
* @spi: pointer to the spi controller data structure
*/
-static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
+static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
+ return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+}
+
+/**
+ * stm32h7_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
{
unsigned long flags;
u32 cfg1, max_bpw;
@@ -230,10 +414,11 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
* The most significant bit at DSIZE bit field is reserved when the
* maximum data size of periperal instances is limited to 16-bit
*/
- stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
- cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1);
- max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT;
+ cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
+ max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >>
+ STM32H7_SPI_CFG1_DSIZE_SHIFT;
max_bpw += 1;
spin_unlock_irqrestore(&spi->lock, flags);
@@ -244,13 +429,16 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
}
/**
- * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value
+ * stm32_spi_prepare_mbr - Determine baud rate divisor value
* @spi: pointer to the spi controller data structure
* @speed_hz: requested speed
+ * @min_div: minimum baud rate divisor
+ * @max_div: maximum baud rate divisor
*
- * Return SPI_CFG1.MBR value in case of success or -EINVAL
+ * Return baud rate divisor value in case of success or -EINVAL
*/
-static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
+static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
+ u32 min_div, u32 max_div)
{
u32 div, mbrdiv;
@@ -263,8 +451,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
* no need to check it there.
* However, we need to ensure the following calculations.
*/
- if (div < SPI_MBR_DIV_MIN ||
- div > SPI_MBR_DIV_MAX)
+ if ((div < min_div) || (div > max_div))
return -EINVAL;
/* Determine the first power of 2 greater than or equal to div */
@@ -279,10 +466,10 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
}
/**
- * stm32_spi_prepare_fthlv - Determine FIFO threshold level
+ * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
* @spi: pointer to the spi controller data structure
*/
-static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
+static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
{
u32 fthlv, half_fifo;
@@ -306,32 +493,62 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
}
/**
- * stm32_spi_write_txfifo - Write bytes in Transmit Data Register
+ * stm32f4_spi_write_tx - Write bytes to Transmit Data Register
* @spi: pointer to the spi controller data structure
*
* Read from tx_buf depends on remaining bytes to avoid to read beyond
* tx_buf end.
*/
-static void stm32_spi_write_txfifo(struct stm32_spi *spi)
+static void stm32f4_spi_write_tx(struct stm32_spi *spi)
+{
+ if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
+ STM32F4_SPI_SR_TXE)) {
+ u32 offs = spi->cur_xferlen - spi->tx_len;
+
+ if (spi->cur_bpw == 16) {
+ const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+ writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
+ spi->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+ writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
+ spi->tx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
{
while ((spi->tx_len > 0) &&
- (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) {
+ (readl_relaxed(spi->base + STM32H7_SPI_SR) &
+ STM32H7_SPI_SR_TXP)) {
u32 offs = spi->cur_xferlen - spi->tx_len;
if (spi->tx_len >= sizeof(u32)) {
const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
- writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR);
+ writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u32);
} else if (spi->tx_len >= sizeof(u16)) {
const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
- writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR);
+ writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u16);
} else {
const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
- writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR);
+ writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u8);
}
}
@@ -340,43 +557,74 @@ static void stm32_spi_write_txfifo(struct stm32_spi *spi)
}
/**
- * stm32_spi_read_rxfifo - Read bytes in Receive Data Register
+ * stm32f4_spi_read_rx - Read bytes from Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32f4_spi_read_rx(struct stm32_spi *spi)
+{
+ if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
+ STM32F4_SPI_SR_RXNE)) {
+ u32 offs = spi->cur_xferlen - spi->rx_len;
+
+ if (spi->cur_bpw == 16) {
+ u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+ *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
+ spi->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+ *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
+ spi->rx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len);
+}
+
+/**
+ * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
*
* Write in rx_buf depends on remaining bytes to avoid to write beyond
* rx_buf end.
*/
-static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
+static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
{
- u32 sr = readl_relaxed(spi->base + STM32_SPI_SR);
- u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+ u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
+ STM32H7_SPI_SR_RXPLVL_SHIFT;
while ((spi->rx_len > 0) &&
- ((sr & SPI_SR_RXP) ||
- (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) {
+ ((sr & STM32H7_SPI_SR_RXP) ||
+ (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
u32 offs = spi->cur_xferlen - spi->rx_len;
if ((spi->rx_len >= sizeof(u32)) ||
- (flush && (sr & SPI_SR_RXWNE))) {
+ (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
- *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR);
+ *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u32);
} else if ((spi->rx_len >= sizeof(u16)) ||
(flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
- *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR);
+ *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u16);
} else {
u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
- *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR);
+ *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u8);
}
- sr = readl_relaxed(spi->base + STM32_SPI_SR);
- rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
+ STM32H7_SPI_SR_RXPLVL_SHIFT;
}
dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
@@ -386,26 +634,76 @@ static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
/**
* stm32_spi_enable - Enable SPI controller
* @spi: pointer to the spi controller data structure
- *
- * SPI data transfer is enabled but spi_ker_ck is idle.
- * SPI_CFG1 and SPI_CFG2 are now write protected.
*/
static void stm32_spi_enable(struct stm32_spi *spi)
{
dev_dbg(spi->dev, "enable controller\n");
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_set_bits(spi, spi->cfg->regs->en.reg,
+ spi->cfg->regs->en.mask);
}
/**
- * stm32_spi_disable - Disable SPI controller
+ * stm32f4_spi_disable - Disable SPI controller
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f4_spi_disable(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 sr;
+
+ dev_dbg(spi->dev, "disable controller\n");
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
+ STM32F4_SPI_CR1_SPE)) {
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return;
+ }
+
+ /* Disable interrupts */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
+ STM32F4_SPI_CR2_RXNEIE |
+ STM32F4_SPI_CR2_ERRIE);
+
+ /* Wait until BSY = 0 */
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
+ sr, !(sr & STM32F4_SPI_SR_BSY),
+ 10, 100000) < 0) {
+ dev_warn(spi->dev, "disabling condition timeout\n");
+ }
+
+ if (spi->cur_usedma && spi->dma_tx)
+ dmaengine_terminate_all(spi->dma_tx);
+ if (spi->cur_usedma && spi->dma_rx)
+ dmaengine_terminate_all(spi->dma_rx);
+
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
+
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
+ STM32F4_SPI_CR2_RXDMAEN);
+
+ /* Sequence to clear OVR flag */
+ readl_relaxed(spi->base + STM32F4_SPI_DR);
+ readl_relaxed(spi->base + STM32F4_SPI_SR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+}
+
+/**
+ * stm32h7_spi_disable - Disable SPI controller
* @spi: pointer to the spi controller data structure
*
* RX-Fifo is flushed when SPI controller is disabled. To prevent any data
- * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in
+ * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
* RX-Fifo.
+ * Normally, if TSIZE has been configured, we should relax the hardware at the
+ * reception of the EOT interrupt. But in case of error, EOT will not be
+ * raised. So the subsystem unprepare_message call allows us to properly
+ * complete the transfer from an hardware point of view.
*/
-static void stm32_spi_disable(struct stm32_spi *spi)
+static void stm32h7_spi_disable(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr1, sr;
@@ -414,23 +712,23 @@ static void stm32_spi_disable(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
- cr1 = readl_relaxed(spi->base + STM32_SPI_CR1);
+ cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1);
- if (!(cr1 & SPI_CR1_SPE)) {
+ if (!(cr1 & STM32H7_SPI_CR1_SPE)) {
spin_unlock_irqrestore(&spi->lock, flags);
return;
}
/* Wait on EOT or suspend the flow */
- if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR,
- sr, !(sr & SPI_SR_EOT),
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
+ sr, !(sr & STM32H7_SPI_SR_EOT),
10, 100000) < 0) {
- if (cr1 & SPI_CR1_CSTART) {
- writel_relaxed(cr1 | SPI_CR1_CSUSP,
- spi->base + STM32_SPI_CR1);
+ if (cr1 & STM32H7_SPI_CR1_CSTART) {
+ writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
+ spi->base + STM32H7_SPI_CR1);
if (readl_relaxed_poll_timeout_atomic(
- spi->base + STM32_SPI_SR,
- sr, !(sr & SPI_SR_SUSP),
+ spi->base + STM32H7_SPI_SR,
+ sr, !(sr & STM32H7_SPI_SR_SUSP),
10, 100000) < 0)
dev_warn(spi->dev,
"Suspend request timeout\n");
@@ -438,21 +736,21 @@ static void stm32_spi_disable(struct stm32_spi *spi)
}
if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
- stm32_spi_read_rxfifo(spi, true);
+ stm32h7_spi_read_rxfifo(spi, true);
- if (spi->cur_usedma && spi->tx_buf)
+ if (spi->cur_usedma && spi->dma_tx)
dmaengine_terminate_all(spi->dma_tx);
- if (spi->cur_usedma && spi->rx_buf)
+ if (spi->cur_usedma && spi->dma_rx)
dmaengine_terminate_all(spi->dma_rx);
- stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
- stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN |
- SPI_CFG1_RXDMAEN);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN |
+ STM32H7_SPI_CFG1_RXDMAEN);
/* Disable interrupts and clear status flags */
- writel_relaxed(0, spi->base + STM32_SPI_IER);
- writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR);
+ writel_relaxed(0, spi->base + STM32H7_SPI_IER);
+ writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
}
@@ -460,26 +758,136 @@ static void stm32_spi_disable(struct stm32_spi *spi)
/**
* stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
*
- * If the current transfer size is greater than fifo size, use DMA.
+ * If driver has fifo and the current transfer size is greater than fifo size,
+ * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
*/
static bool stm32_spi_can_dma(struct spi_master *master,
struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
+ unsigned int dma_size;
struct stm32_spi *spi = spi_master_get_devdata(master);
+ if (spi->cfg->has_fifo)
+ dma_size = spi->fifo_size;
+ else
+ dma_size = SPI_DMA_MIN_BYTES;
+
dev_dbg(spi->dev, "%s: %s\n", __func__,
- (transfer->len > spi->fifo_size) ? "true" : "false");
+ (transfer->len > dma_size) ? "true" : "false");
+
+ return (transfer->len > dma_size);
+}
+
+/**
+ * stm32f4_spi_irq_event - Interrupt handler for SPI controller events
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ u32 sr, mask = 0;
+ unsigned long flags;
+ bool end = false;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
+ /*
+ * BSY flag is not handled in interrupt but it is normal behavior when
+ * this flag is set.
+ */
+ sr &= ~STM32F4_SPI_SR_BSY;
+
+ if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
+ spi->cur_comm == SPI_3WIRE_TX)) {
+ /* OVR flag shouldn't be handled for TX only mode */
+ sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE;
+ mask |= STM32F4_SPI_SR_TXE;
+ }
+
+ if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) {
+ /* TXE flag is set and is handled when RXNE flag occurs */
+ sr &= ~STM32F4_SPI_SR_TXE;
+ mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
+ }
+
+ if (!(sr & mask)) {
+ dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (sr & STM32F4_SPI_SR_OVR) {
+ dev_warn(spi->dev, "Overrun: received value discarded\n");
+
+ /* Sequence to clear OVR flag */
+ readl_relaxed(spi->base + STM32F4_SPI_DR);
+ readl_relaxed(spi->base + STM32F4_SPI_SR);
+
+ /*
+ * If overrun is detected, it means that something went wrong,
+ * so stop the current transfer. Transfer can wait for next
+ * RXNE but DR is already read and end never happens.
+ */
+ end = true;
+ goto end_irq;
+ }
+
+ if (sr & STM32F4_SPI_SR_TXE) {
+ if (spi->tx_buf)
+ stm32f4_spi_write_tx(spi);
+ if (spi->tx_len == 0)
+ end = true;
+ }
+
+ if (sr & STM32F4_SPI_SR_RXNE) {
+ stm32f4_spi_read_rx(spi);
+ if (spi->rx_len == 0)
+ end = true;
+ else /* Load data for discontinuous mode */
+ stm32f4_spi_write_tx(spi);
+ }
+
+end_irq:
+ if (end) {
+ /* Immediately disable interrupts to do not generate new one */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
+ STM32F4_SPI_CR2_TXEIE |
+ STM32F4_SPI_CR2_RXNEIE |
+ STM32F4_SPI_CR2_ERRIE);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_WAKE_THREAD;
+ }
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ spi_finalize_current_transfer(master);
+ stm32f4_spi_disable(spi);
- return (transfer->len > spi->fifo_size);
+ return IRQ_HANDLED;
}
/**
- * stm32_spi_irq - Interrupt handler for SPI controller events
+ * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
* @dev_id: SPI controller master interface
*/
-static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
+static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct stm32_spi *spi = spi_master_get_devdata(master);
@@ -489,19 +897,19 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
spin_lock_irqsave(&spi->lock, flags);
- sr = readl_relaxed(spi->base + STM32_SPI_SR);
- ier = readl_relaxed(spi->base + STM32_SPI_IER);
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
mask = ier;
/* EOTIE is triggered on EOT, SUSP and TXC events. */
- mask |= SPI_SR_SUSP;
+ mask |= STM32H7_SPI_SR_SUSP;
/*
* When TXTF is set, DXPIE and TXPIE are cleared. So in case of
* Full-Duplex, need to poll RXP event to know if there are remaining
* data, before disabling SPI.
*/
if (spi->rx_buf && !spi->cur_usedma)
- mask |= SPI_SR_RXP;
+ mask |= STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
@@ -510,10 +918,10 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
return IRQ_NONE;
}
- if (sr & SPI_SR_SUSP) {
+ if (sr & STM32H7_SPI_SR_SUSP) {
dev_warn(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, false);
+ stm32h7_spi_read_rxfifo(spi, false);
/*
* If communication is suspended while using DMA, it means
* that something went wrong, so stop the current transfer
@@ -522,15 +930,15 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
end = true;
}
- if (sr & SPI_SR_MODF) {
+ if (sr & STM32H7_SPI_SR_MODF) {
dev_warn(spi->dev, "Mode fault: transfer aborted\n");
end = true;
}
- if (sr & SPI_SR_OVR) {
+ if (sr & STM32H7_SPI_SR_OVR) {
dev_warn(spi->dev, "Overrun: received value discarded\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, false);
+ stm32h7_spi_read_rxfifo(spi, false);
/*
* If overrun is detected while using DMA, it means that
* something went wrong, so stop the current transfer
@@ -539,27 +947,27 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
end = true;
}
- if (sr & SPI_SR_EOT) {
+ if (sr & STM32H7_SPI_SR_EOT) {
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, true);
+ stm32h7_spi_read_rxfifo(spi, true);
end = true;
}
- if (sr & SPI_SR_TXP)
+ if (sr & STM32H7_SPI_SR_TXP)
if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
- stm32_spi_write_txfifo(spi);
+ stm32h7_spi_write_txfifo(spi);
- if (sr & SPI_SR_RXP)
+ if (sr & STM32H7_SPI_SR_RXP)
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, false);
+ stm32h7_spi_read_rxfifo(spi, false);
- writel_relaxed(mask, spi->base + STM32_SPI_IFCR);
+ writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
if (end) {
spi_finalize_current_transfer(master);
- stm32_spi_disable(spi);
+ stm32h7_spi_disable(spi);
}
return IRQ_HANDLED;
@@ -598,7 +1006,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
struct spi_device *spi_dev = msg->spi;
struct device_node *np = spi_dev->dev.of_node;
unsigned long flags;
- u32 cfg2_clrb = 0, cfg2_setb = 0;
+ u32 clrb = 0, setb = 0;
/* SPI slave device may need time between data frames */
spi->cur_midi = 0;
@@ -606,19 +1014,19 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
if (spi_dev->mode & SPI_CPOL)
- cfg2_setb |= SPI_CFG2_CPOL;
+ setb |= spi->cfg->regs->cpol.mask;
else
- cfg2_clrb |= SPI_CFG2_CPOL;
+ clrb |= spi->cfg->regs->cpol.mask;
if (spi_dev->mode & SPI_CPHA)
- cfg2_setb |= SPI_CFG2_CPHA;
+ setb |= spi->cfg->regs->cpha.mask;
else
- cfg2_clrb |= SPI_CFG2_CPHA;
+ clrb |= spi->cfg->regs->cpha.mask;
if (spi_dev->mode & SPI_LSB_FIRST)
- cfg2_setb |= SPI_CFG2_LSBFRST;
+ setb |= spi->cfg->regs->lsb_first.mask;
else
- cfg2_clrb |= SPI_CFG2_LSBFRST;
+ clrb |= spi->cfg->regs->lsb_first.mask;
dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
spi_dev->mode & SPI_CPOL,
@@ -628,11 +1036,12 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
spin_lock_irqsave(&spi->lock, flags);
- if (cfg2_clrb || cfg2_setb)
+ /* CPOL, CPHA and LSB FIRST bits have common register */
+ if (clrb || setb)
writel_relaxed(
- (readl_relaxed(spi->base + STM32_SPI_CFG2) &
- ~cfg2_clrb) | cfg2_setb,
- spi->base + STM32_SPI_CFG2);
+ (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) &
+ ~clrb) | setb,
+ spi->base + spi->cfg->regs->cpol.reg);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -640,12 +1049,40 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
}
/**
- * stm32_spi_dma_cb - dma callback
+ * stm32f4_spi_dma_tx_cb - dma callback
+ *
+ * DMA callback is called when the transfer is complete for DMA TX channel.
+ */
+static void stm32f4_spi_dma_tx_cb(void *data)
+{
+ struct stm32_spi *spi = data;
+
+ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
+ spi_finalize_current_transfer(spi->master);
+ stm32f4_spi_disable(spi);
+ }
+}
+
+/**
+ * stm32f4_spi_dma_rx_cb - dma callback
+ *
+ * DMA callback is called when the transfer is complete for DMA RX channel.
+ */
+static void stm32f4_spi_dma_rx_cb(void *data)
+{
+ struct stm32_spi *spi = data;
+
+ spi_finalize_current_transfer(spi->master);
+ stm32f4_spi_disable(spi);
+}
+
+/**
+ * stm32h7_spi_dma_cb - dma callback
*
* DMA callback is called when the transfer is complete or when an error
* occurs. If the transfer is complete, EOT flag is raised.
*/
-static void stm32_spi_dma_cb(void *data)
+static void stm32h7_spi_dma_cb(void *data)
{
struct stm32_spi *spi = data;
unsigned long flags;
@@ -653,11 +1090,11 @@ static void stm32_spi_dma_cb(void *data)
spin_lock_irqsave(&spi->lock, flags);
- sr = readl_relaxed(spi->base + STM32_SPI_SR);
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
spin_unlock_irqrestore(&spi->lock, flags);
- if (!(sr & SPI_SR_EOT))
+ if (!(sr & STM32H7_SPI_SR_EOT))
dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
/* Now wait for EOT, or SUSP or OVR in case of error */
@@ -681,23 +1118,27 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
else
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
- /* Valid for DMA Half or Full Fifo threshold */
- if (spi->cur_fthlv == 2)
+ if (spi->cfg->has_fifo) {
+ /* Valid for DMA Half or Full Fifo threshold */
+ if (spi->cur_fthlv == 2)
+ maxburst = 1;
+ else
+ maxburst = spi->cur_fthlv;
+ } else {
maxburst = 1;
- else
- maxburst = spi->cur_fthlv;
+ }
memset(dma_conf, 0, sizeof(struct dma_slave_config));
dma_conf->direction = dir;
if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
- dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR;
+ dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg;
dma_conf->src_addr_width = buswidth;
dma_conf->src_maxburst = maxburst;
dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
buswidth, maxburst);
} else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
- dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR;
+ dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg;
dma_conf->dst_addr_width = buswidth;
dma_conf->dst_maxburst = maxburst;
@@ -707,27 +1148,68 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
}
/**
- * stm32_spi_transfer_one_irq - transfer a single spi_transfer using
- * interrupts
+ * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
+ * interrupts
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
-static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
+static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 cr2 = 0;
+
+ /* Enable the interrupts relative to the current communication mode */
+ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
+ cr2 |= STM32F4_SPI_CR2_TXEIE;
+ } else if (spi->cur_comm == SPI_FULL_DUPLEX) {
+ /* In transmit-only mode, the OVR flag is set in the SR register
+ * since the received data are never read. Therefore set OVR
+ * interrupt only when rx buffer is available.
+ */
+ cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
+ } else {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
+
+ stm32_spi_enable(spi);
+
+ /* starting data transfer when buffer is loaded */
+ if (spi->tx_buf)
+ stm32f4_spi_write_tx(spi);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 1;
+}
+
+/**
+ * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
+ * interrupts
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
{
unsigned long flags;
u32 ier = 0;
/* Enable the interrupts relative to the current communication mode */
if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
- ier |= SPI_IER_DXPIE;
+ ier |= STM32H7_SPI_IER_DXPIE;
else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
- ier |= SPI_IER_TXPIE;
+ ier |= STM32H7_SPI_IER_TXPIE;
else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
- ier |= SPI_IER_RXPIE;
+ ier |= STM32H7_SPI_IER_RXPIE;
/* Enable the interrupts relative to the end of transfer */
- ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
+ ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE |
+ STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
spin_lock_irqsave(&spi->lock, flags);
@@ -735,11 +1217,11 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
- stm32_spi_write_txfifo(spi);
+ stm32h7_spi_write_txfifo(spi);
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
- writel_relaxed(ier, spi->base + STM32_SPI_IER);
+ writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -747,6 +1229,43 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
}
/**
+ * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ */
+static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ /* In DMA mode end of transfer is handled by DMA TX or RX callback. */
+ if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
+ spi->cur_comm == SPI_FULL_DUPLEX) {
+ /*
+ * In transmit-only mode, the OVR flag is set in the SR register
+ * since the received data are never read. Therefore set OVR
+ * interrupt only when rx buffer is available.
+ */
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
+ }
+
+ stm32_spi_enable(spi);
+}
+
+/**
+ * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ */
+static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ /* Enable the interrupts relative to the end of transfer */
+ stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
+ STM32H7_SPI_IER_TXTFIE |
+ STM32H7_SPI_IER_OVRIE |
+ STM32H7_SPI_IER_MODFIE);
+
+ stm32_spi_enable(spi);
+
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
+}
+
+/**
* stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
@@ -758,17 +1277,17 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
struct dma_slave_config tx_dma_conf, rx_dma_conf;
struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
unsigned long flags;
- u32 ier = 0;
spin_lock_irqsave(&spi->lock, flags);
rx_dma_desc = NULL;
- if (spi->rx_buf) {
+ if (spi->rx_buf && spi->dma_rx) {
stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
/* Enable Rx DMA request */
- stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+ stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg,
+ spi->cfg->regs->dma_rx_en.mask);
rx_dma_desc = dmaengine_prep_slave_sg(
spi->dma_rx, xfer->rx_sg.sgl,
@@ -778,7 +1297,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
}
tx_dma_desc = NULL;
- if (spi->tx_buf) {
+ if (spi->tx_buf && spi->dma_tx) {
stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
@@ -789,12 +1308,15 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
DMA_PREP_INTERRUPT);
}
- if ((spi->tx_buf && !tx_dma_desc) ||
- (spi->rx_buf && !rx_dma_desc))
+ if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) ||
+ (spi->rx_buf && spi->dma_rx && !rx_dma_desc))
+ goto dma_desc_error;
+
+ if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc))
goto dma_desc_error;
if (rx_dma_desc) {
- rx_dma_desc->callback = stm32_spi_dma_cb;
+ rx_dma_desc->callback = spi->cfg->dma_rx_cb;
rx_dma_desc->callback_param = spi;
if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
@@ -806,8 +1328,9 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
}
if (tx_dma_desc) {
- if (spi->cur_comm == SPI_SIMPLEX_TX) {
- tx_dma_desc->callback = stm32_spi_dma_cb;
+ if (spi->cur_comm == SPI_SIMPLEX_TX ||
+ spi->cur_comm == SPI_3WIRE_TX) {
+ tx_dma_desc->callback = spi->cfg->dma_tx_cb;
tx_dma_desc->callback_param = spi;
}
@@ -819,130 +1342,278 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
dma_async_issue_pending(spi->dma_tx);
/* Enable Tx DMA request */
- stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN);
+ stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg,
+ spi->cfg->regs->dma_tx_en.mask);
}
- /* Enable the interrupts relative to the end of transfer */
- ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
- writel_relaxed(ier, spi->base + STM32_SPI_IER);
-
- stm32_spi_enable(spi);
-
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+ spi->cfg->transfer_one_dma_start(spi);
spin_unlock_irqrestore(&spi->lock, flags);
return 1;
dma_submit_error:
- if (spi->rx_buf)
+ if (spi->dma_rx)
dmaengine_terminate_all(spi->dma_rx);
dma_desc_error:
- stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+ stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg,
+ spi->cfg->regs->dma_rx_en.mask);
spin_unlock_irqrestore(&spi->lock, flags);
dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
- return stm32_spi_transfer_one_irq(spi);
+ spi->cur_usedma = false;
+ return spi->cfg->transfer_one_irq(spi);
}
/**
- * stm32_spi_transfer_one_setup - common setup to transfer a single
- * spi_transfer either using DMA or
- * interrupts.
+ * stm32f4_spi_set_bpw - Configure bits per word
+ * @spi: pointer to the spi controller data structure
*/
-static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
- struct spi_device *spi_dev,
- struct spi_transfer *transfer)
+static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
{
- unsigned long flags;
- u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0;
- u32 mode, nb_words;
- int ret = 0;
-
- spin_lock_irqsave(&spi->lock, flags);
+ if (spi->cur_bpw == 16)
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ else
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+}
- if (spi->cur_bpw != transfer->bits_per_word) {
- u32 bpw, fthlv;
+/**
+ * stm32h7_spi_set_bpw - configure bits per word
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
+{
+ u32 bpw, fthlv;
+ u32 cfg1_clrb = 0, cfg1_setb = 0;
- spi->cur_bpw = transfer->bits_per_word;
- bpw = spi->cur_bpw - 1;
+ bpw = spi->cur_bpw - 1;
- cfg1_clrb |= SPI_CFG1_DSIZE;
- cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE;
+ cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
+ cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
+ STM32H7_SPI_CFG1_DSIZE;
- spi->cur_fthlv = stm32_spi_prepare_fthlv(spi);
- fthlv = spi->cur_fthlv - 1;
+ spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
+ fthlv = spi->cur_fthlv - 1;
- cfg1_clrb |= SPI_CFG1_FTHLV;
- cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV;
- }
+ cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
+ cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) &
+ STM32H7_SPI_CFG1_FTHLV;
- if (spi->cur_speed != transfer->speed_hz) {
- int mbr;
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
+ ~cfg1_clrb) | cfg1_setb,
+ spi->base + STM32H7_SPI_CFG1);
+}
- /* Update spi->cur_speed with real clock speed */
- mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz);
- if (mbr < 0) {
- ret = mbr;
- goto out;
- }
+/**
+ * stm32_spi_set_mbr - Configure baud rate divisor in master mode
+ * @spi: pointer to the spi controller data structure
+ * @mbrdiv: baud rate divisor value
+ */
+static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
+{
+ u32 clrb = 0, setb = 0;
- transfer->speed_hz = spi->cur_speed;
+ clrb |= spi->cfg->regs->br.mask;
+ setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) &
+ spi->cfg->regs->br.mask;
- cfg1_clrb |= SPI_CFG1_MBR;
- cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR;
- }
+ writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) &
+ ~clrb) | setb,
+ spi->base + spi->cfg->regs->br.reg);
+}
- if (cfg1_clrb || cfg1_setb)
- writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) &
- ~cfg1_clrb) | cfg1_setb,
- spi->base + STM32_SPI_CFG1);
+/**
+ * stm32_spi_communication_type - return transfer communication type
+ * @spi_dev: pointer to the spi device
+ * transfer: pointer to spi transfer
+ */
+static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned int type = SPI_FULL_DUPLEX;
- mode = SPI_FULL_DUPLEX;
if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
/*
* SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
- * is forbidden und unvalidated by SPI subsystem so depending
+ * is forbidden and unvalidated by SPI subsystem so depending
* on the valid buffer, we can determine the direction of the
* transfer.
*/
- mode = SPI_HALF_DUPLEX;
if (!transfer->tx_buf)
- stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
- else if (!transfer->rx_buf)
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
+ type = SPI_3WIRE_RX;
+ else
+ type = SPI_3WIRE_TX;
} else {
if (!transfer->tx_buf)
- mode = SPI_SIMPLEX_RX;
+ type = SPI_SIMPLEX_RX;
else if (!transfer->rx_buf)
- mode = SPI_SIMPLEX_TX;
+ type = SPI_SIMPLEX_TX;
+ }
+
+ return type;
+}
+
+/**
+ * stm32f4_spi_set_mode - configure communication mode
+ * @spi: pointer to the spi controller data structure
+ * @comm_type: type of communication to configure
+ */
+static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+{
+ if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE |
+ STM32F4_SPI_CR1_BIDIOE);
+ } else if (comm_type == SPI_FULL_DUPLEX) {
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE |
+ STM32F4_SPI_CR1_BIDIOE);
+ } else {
+ return -EINVAL;
}
- if (spi->cur_comm != mode) {
- spi->cur_comm = mode;
- cfg2_clrb |= SPI_CFG2_COMM;
- cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM;
+ return 0;
+}
+
+/**
+ * stm32h7_spi_set_mode - configure communication mode
+ * @spi: pointer to the spi controller data structure
+ * @comm_type: type of communication to configure
+ */
+static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+{
+ u32 mode;
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ if (comm_type == SPI_3WIRE_RX) {
+ mode = STM32H7_SPI_HALF_DUPLEX;
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
+ } else if (comm_type == SPI_3WIRE_TX) {
+ mode = STM32H7_SPI_HALF_DUPLEX;
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
+ } else if (comm_type == SPI_SIMPLEX_RX) {
+ mode = STM32H7_SPI_SIMPLEX_RX;
+ } else if (comm_type == SPI_SIMPLEX_TX) {
+ mode = STM32H7_SPI_SIMPLEX_TX;
+ } else {
+ mode = STM32H7_SPI_FULL_DUPLEX;
}
- cfg2_clrb |= SPI_CFG2_MIDI;
- if ((transfer->len > 1) && (spi->cur_midi > 0)) {
+ cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
+ cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) &
+ STM32H7_SPI_CFG2_COMM;
+
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32H7_SPI_CFG2);
+
+ return 0;
+}
+
+/**
+ * stm32h7_spi_data_idleness - configure minimum time delay inserted between two
+ * consecutive data frames in master mode
+ * @spi: pointer to the spi controller data structure
+ * @len: transfer len
+ */
+static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
+{
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
+ if ((len > 1) && (spi->cur_midi > 0)) {
u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
- (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT);
+ (u32)STM32H7_SPI_CFG2_MIDI >>
+ STM32H7_SPI_CFG2_MIDI_SHIFT);
dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
sck_period_ns, midi, midi * sck_period_ns);
+ cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) &
+ STM32H7_SPI_CFG2_MIDI;
+ }
+
+ writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32H7_SPI_CFG2);
+}
+
+/**
+ * stm32h7_spi_number_of_data - configure number of data at current transfer
+ * @spi: pointer to the spi controller data structure
+ * @len: transfer length
+ */
+static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
+{
+ u32 cr2_clrb = 0, cr2_setb = 0;
+
+ if (nb_words <= (STM32H7_SPI_CR2_TSIZE >>
+ STM32H7_SPI_CR2_TSIZE_SHIFT)) {
+ cr2_clrb |= STM32H7_SPI_CR2_TSIZE;
+ cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT;
+ writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) &
+ ~cr2_clrb) | cr2_setb,
+ spi->base + STM32H7_SPI_CR2);
+ } else {
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+/**
+ * stm32_spi_transfer_one_setup - common setup to transfer a single
+ * spi_transfer either using DMA or
+ * interrupts.
+ */
+static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned long flags;
+ unsigned int comm_type;
+ int nb_words, ret = 0;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ if (spi->cur_bpw != transfer->bits_per_word) {
+ spi->cur_bpw = transfer->bits_per_word;
+ spi->cfg->set_bpw(spi);
+ }
- cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI;
+ if (spi->cur_speed != transfer->speed_hz) {
+ int mbr;
+
+ /* Update spi->cur_speed with real clock speed */
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+ spi->cfg->baud_rate_div_min,
+ spi->cfg->baud_rate_div_max);
+ if (mbr < 0) {
+ ret = mbr;
+ goto out;
+ }
+
+ transfer->speed_hz = spi->cur_speed;
+ stm32_spi_set_mbr(spi, mbr);
}
- if (cfg2_clrb || cfg2_setb)
- writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) &
- ~cfg2_clrb) | cfg2_setb,
- spi->base + STM32_SPI_CFG2);
+ comm_type = stm32_spi_communication_type(spi_dev, transfer);
+ if (spi->cur_comm != comm_type) {
+ ret = spi->cfg->set_mode(spi, comm_type);
+
+ if (ret < 0)
+ goto out;
+
+ spi->cur_comm = comm_type;
+ }
+
+ if (spi->cfg->set_data_idleness)
+ spi->cfg->set_data_idleness(spi, transfer->len);
if (spi->cur_bpw <= 8)
nb_words = transfer->len;
@@ -950,13 +1621,11 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
else
nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
- nb_words <<= SPI_CR2_TSIZE_SHIFT;
- if (nb_words <= SPI_CR2_TSIZE) {
- writel_relaxed(nb_words, spi->base + STM32_SPI_CR2);
- } else {
- ret = -EMSGSIZE;
- goto out;
+ if (spi->cfg->set_number_of_data) {
+ ret = spi->cfg->set_number_of_data(spi, nb_words);
+ if (ret < 0)
+ goto out;
}
spi->cur_xferlen = transfer->len;
@@ -997,7 +1666,7 @@ static int stm32_spi_transfer_one(struct spi_master *master,
spi->rx_len = spi->rx_buf ? transfer->len : 0;
spi->cur_usedma = (master->can_dma &&
- stm32_spi_can_dma(master, spi_dev, transfer));
+ master->can_dma(master, spi_dev, transfer));
ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
if (ret) {
@@ -1008,47 +1677,73 @@ static int stm32_spi_transfer_one(struct spi_master *master,
if (spi->cur_usedma)
return stm32_spi_transfer_one_dma(spi, transfer);
else
- return stm32_spi_transfer_one_irq(spi);
+ return spi->cfg->transfer_one_irq(spi);
}
/**
* stm32_spi_unprepare_msg - relax the hardware
- *
- * Normally, if TSIZE has been configured, we should relax the hardware at the
- * reception of the EOT interrupt. But in case of error, EOT will not be
- * raised. So the subsystem unprepare_message call allows us to properly
- * complete the transfer from an hardware point of view.
*/
static int stm32_spi_unprepare_msg(struct spi_master *master,
struct spi_message *msg)
{
struct stm32_spi *spi = spi_master_get_devdata(master);
- stm32_spi_disable(spi);
+ spi->cfg->disable(spi);
+
+ return 0;
+}
+
+/**
+ * stm32f4_spi_config - Configure SPI controller as SPI master
+ */
+static int stm32f4_spi_config(struct stm32_spi *spi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /* Ensure I2SMOD bit is kept cleared */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
+ STM32F4_SPI_I2SCFGR_I2SMOD);
+
+ /*
+ * - SS input value high
+ * - transmitter half duplex direction
+ * - Set the master mode (default Motorola mode)
+ * - Consider 1 master/n slaves configuration and
+ * SS input value is determined by the SSI bit
+ */
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
+ STM32F4_SPI_CR1_BIDIOE |
+ STM32F4_SPI_CR1_MSTR |
+ STM32F4_SPI_CR1_SSM);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
return 0;
}
/**
- * stm32_spi_config - Configure SPI controller as SPI master
+ * stm32h7_spi_config - Configure SPI controller as SPI master
*/
-static int stm32_spi_config(struct stm32_spi *spi)
+static int stm32h7_spi_config(struct stm32_spi *spi)
{
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
/* Ensure I2SMOD bit is kept cleared */
- stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
+ STM32H7_SPI_I2SCFGR_I2SMOD);
/*
* - SS input value high
* - transmitter half duplex direction
* - automatic communication suspend when RX-Fifo is full
*/
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI |
- SPI_CR1_HDDIR |
- SPI_CR1_MASRX);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI |
+ STM32H7_SPI_CR1_HDDIR |
+ STM32H7_SPI_CR1_MASRX);
/*
* - Set the master mode (default Motorola mode)
@@ -1056,17 +1751,56 @@ static int stm32_spi_config(struct stm32_spi *spi)
* SS input value is determined by the SSI bit
* - keep control of all associated GPIOs
*/
- stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER |
- SPI_CFG2_SSM |
- SPI_CFG2_AFCNTR);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER |
+ STM32H7_SPI_CFG2_SSM |
+ STM32H7_SPI_CFG2_AFCNTR);
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
}
+static const struct stm32_spi_cfg stm32f4_spi_cfg = {
+ .regs = &stm32f4_spi_regspec,
+ .get_bpw_mask = stm32f4_spi_get_bpw_mask,
+ .disable = stm32f4_spi_disable,
+ .config = stm32f4_spi_config,
+ .set_bpw = stm32f4_spi_set_bpw,
+ .set_mode = stm32f4_spi_set_mode,
+ .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
+ .dma_tx_cb = stm32f4_spi_dma_tx_cb,
+ .dma_rx_cb = stm32f4_spi_dma_rx_cb,
+ .transfer_one_irq = stm32f4_spi_transfer_one_irq,
+ .irq_handler_event = stm32f4_spi_irq_event,
+ .irq_handler_thread = stm32f4_spi_irq_thread,
+ .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
+ .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
+ .has_fifo = false,
+};
+
+static const struct stm32_spi_cfg stm32h7_spi_cfg = {
+ .regs = &stm32h7_spi_regspec,
+ .get_fifo_size = stm32h7_spi_get_fifo_size,
+ .get_bpw_mask = stm32h7_spi_get_bpw_mask,
+ .disable = stm32h7_spi_disable,
+ .config = stm32h7_spi_config,
+ .set_bpw = stm32h7_spi_set_bpw,
+ .set_mode = stm32h7_spi_set_mode,
+ .set_data_idleness = stm32h7_spi_data_idleness,
+ .set_number_of_data = stm32h7_spi_number_of_data,
+ .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
+ .dma_rx_cb = stm32h7_spi_dma_cb,
+ .dma_tx_cb = stm32h7_spi_dma_cb,
+ .transfer_one_irq = stm32h7_spi_transfer_one_irq,
+ .irq_handler_thread = stm32h7_spi_irq_thread,
+ .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
+ .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
+ .has_fifo = true,
+};
+
static const struct of_device_id stm32_spi_of_match[] = {
- { .compatible = "st,stm32h7-spi", },
+ { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
+ { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
@@ -1090,12 +1824,17 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->master = master;
spin_lock_init(&spi->lock);
+ spi->cfg = (const struct stm32_spi_cfg *)
+ of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev)->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(spi->base)) {
ret = PTR_ERR(spi->base);
goto err_master_put;
}
+
spi->phys_addr = (dma_addr_t)res->start;
spi->irq = platform_get_irq(pdev, 0);
@@ -1104,16 +1843,17 @@ static int stm32_spi_probe(struct platform_device *pdev)
ret = -ENOENT;
goto err_master_put;
}
- ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL,
- stm32_spi_irq, IRQF_ONESHOT,
- pdev->name, master);
+ ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
+ spi->cfg->irq_handler_event,
+ spi->cfg->irq_handler_thread,
+ IRQF_ONESHOT, pdev->name, master);
if (ret) {
dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
ret);
goto err_master_put;
}
- spi->clk = devm_clk_get(&pdev->dev, 0);
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
ret = PTR_ERR(spi->clk);
dev_err(&pdev->dev, "clk get failed: %d\n", ret);
@@ -1139,9 +1879,10 @@ static int stm32_spi_probe(struct platform_device *pdev)
reset_control_deassert(spi->rst);
}
- spi->fifo_size = stm32_spi_get_fifo_size(spi);
+ if (spi->cfg->has_fifo)
+ spi->fifo_size = spi->cfg->get_fifo_size(spi);
- ret = stm32_spi_config(spi);
+ ret = spi->cfg->config(spi);
if (ret) {
dev_err(&pdev->dev, "controller configuration failed: %d\n",
ret);
@@ -1151,11 +1892,11 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
- master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST |
- SPI_3WIRE | SPI_LOOP;
- master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi);
- master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN;
- master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_3WIRE;
+ master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
+ master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
+ master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
master->setup = stm32_spi_setup;
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
@@ -1233,7 +1974,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct stm32_spi *spi = spi_master_get_devdata(master);
- stm32_spi_disable(spi);
+ spi->cfg->disable(spi);
if (master->dma_tx)
dma_release_channel(master->dma_tx);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 5f19016bbf10..b9fb6493cd6b 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base) {
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
- MEM_CS_EN(spi->chip_select),
- MEM_CS_MASK);
+ MEM_CS_MASK,
+ MEM_CS_EN(spi->chip_select));
}
qspi->mmap_enabled = true;
}
@@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base)
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
- 0, MEM_CS_MASK);
+ MEM_CS_MASK, 0);
qspi->mmap_enabled = false;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 97d137591b18..e7e8ea1edcce 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1008,6 +1008,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* RX */
dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
+ if (!dma->sg_rx_p)
+ return;
+
sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_rx_p;
@@ -1068,6 +1071,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
+ if (!dma->sg_tx_p)
+ return;
+
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_tx_p;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9a7def7c3237..93986f879b09 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -19,6 +19,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/property.h>
@@ -578,7 +579,10 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
- if (ctlr->cs_gpios)
+ /* Descriptors take precedence */
+ if (ctlr->cs_gpiods)
+ spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
+ else if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
@@ -772,10 +776,21 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (gpio_is_valid(spi->cs_gpio)) {
- /* Honour the SPI_NO_CS flag */
- if (!(spi->mode & SPI_NO_CS))
- gpio_set_value(spi->cs_gpio, !enable);
+ if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+ /*
+ * Honour the SPI_NO_CS flag and invert the enable line, as
+ * active low is default for SPI. Execution paths that handle
+ * polarity inversion in gpiolib (such as device tree) will
+ * enforce active high using the SPI_CS_HIGH resulting in a
+ * double inversion through the code above.
+ */
+ if (!(spi->mode & SPI_NO_CS)) {
+ if (spi->cs_gpiod)
+ gpiod_set_value_cansleep(spi->cs_gpiod,
+ !enable);
+ else
+ gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ }
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
spi->controller->set_cs)
@@ -1615,13 +1630,21 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi->mode |= SPI_CPHA;
if (of_property_read_bool(nc, "spi-cpol"))
spi->mode |= SPI_CPOL;
- if (of_property_read_bool(nc, "spi-cs-high"))
- spi->mode |= SPI_CS_HIGH;
if (of_property_read_bool(nc, "spi-3wire"))
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
+ /*
+ * For descriptors associated with the device, polarity inversion is
+ * handled in the gpiolib, so all chip selects are "active high" in
+ * the logical sense, the gpiolib will invert the line if need be.
+ */
+ if (ctlr->use_gpio_descriptors)
+ spi->mode |= SPI_CS_HIGH;
+ else if (of_property_read_bool(nc, "spi-cs-high"))
+ spi->mode |= SPI_CS_HIGH;
+
/* Device DUAL/QUAD mode */
if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
switch (value) {
@@ -2137,6 +2160,60 @@ static int of_spi_register_master(struct spi_controller *ctlr)
}
#endif
+/**
+ * spi_get_gpio_descs() - grab chip select GPIOs for the master
+ * @ctlr: The SPI master to grab GPIO descriptors for
+ */
+static int spi_get_gpio_descs(struct spi_controller *ctlr)
+{
+ int nb, i;
+ struct gpio_desc **cs;
+ struct device *dev = &ctlr->dev;
+
+ nb = gpiod_count(dev, "cs");
+ ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
+
+ /* No GPIOs at all is fine, else return the error */
+ if (nb == 0 || nb == -ENOENT)
+ return 0;
+ else if (nb < 0)
+ return nb;
+
+ cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
+ GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ ctlr->cs_gpiods = cs;
+
+ for (i = 0; i < nb; i++) {
+ /*
+ * Most chipselects are active low, the inverted
+ * semantics are handled by special quirks in gpiolib,
+ * so initializing them GPIOD_OUT_LOW here means
+ * "unasserted", in most cases this will drive the physical
+ * line high.
+ */
+ cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_LOW);
+
+ if (cs[i]) {
+ /*
+ * If we find a CS GPIO, name it after the device and
+ * chip select line.
+ */
+ char *gpioname;
+
+ gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
+ dev_name(dev), i);
+ if (!gpioname)
+ return -ENOMEM;
+ gpiod_set_consumer_name(cs[i], gpioname);
+ }
+ }
+
+ return 0;
+}
+
static int spi_controller_check_ops(struct spi_controller *ctlr)
{
/*
@@ -2199,9 +2276,21 @@ int spi_register_controller(struct spi_controller *ctlr)
return status;
if (!spi_controller_is_slave(ctlr)) {
- status = of_spi_register_master(ctlr);
- if (status)
- return status;
+ if (ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ return status;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ } else {
+ /* Legacy code path for GPIOs from DT */
+ status = of_spi_register_master(ctlr);
+ if (status)
+ return status;
+ }
}
/* even if it's just one always-selected device, there must
@@ -2915,6 +3004,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
+ spi->cs_gpiod ||
gpio_is_valid(spi->cs_gpio))) {
size_t maxsize;
int ret;
@@ -2961,6 +3051,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* it is not set for this transfer.
* Set transfer tx_nbits and rx_nbits as single transfer default
* (SPI_NBITS_SINGLE) if it is not set for this transfer.
+ * Ensure transfer word_delay is at least as long as that required by
+ * device itself.
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
@@ -3031,6 +3123,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
}
+
+ if (xfer->word_delay_usecs < spi->word_delay_usecs)
+ xfer->word_delay_usecs = spi->word_delay_usecs;
}
message->status = -EINPROGRESS;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index e4f608815c05..c0901b96cfe4 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -56,8 +56,6 @@ source "drivers/staging/iio/Kconfig"
source "drivers/staging/sm750fb/Kconfig"
-source "drivers/staging/xgifb/Kconfig"
-
source "drivers/staging/emxx_udc/Kconfig"
source "drivers/staging/speakup/Kconfig"
@@ -104,12 +102,16 @@ source "drivers/staging/pi433/Kconfig"
source "drivers/staging/mt7621-pci/Kconfig"
+source "drivers/staging/mt7621-pci-phy/Kconfig"
+
source "drivers/staging/mt7621-pinctrl/Kconfig"
source "drivers/staging/mt7621-spi/Kconfig"
source "drivers/staging/mt7621-dma/Kconfig"
+source "drivers/staging/ralink-gdma/Kconfig"
+
source "drivers/staging/mt7621-mmc/Kconfig"
source "drivers/staging/mt7621-eth/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5868631e8f1b..57c6bce13ff4 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_SM750) += sm750fb/
-obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_MFD_NVEC) += nvec/
@@ -41,12 +40,14 @@ obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_PI433) += pi433/
-obj-$(CONFIG_SOC_MT7621) += mt7621-pci/
-obj-$(CONFIG_SOC_MT7621) += mt7621-pinctrl/
-obj-$(CONFIG_SOC_MT7621) += mt7621-spi/
+obj-$(CONFIG_PCI_MT7621) += mt7621-pci/
+obj-$(CONFIG_PCI_MT7621_PHY) += mt7621-pci-phy/
+obj-$(CONFIG_PINCTRL_RT2880) += mt7621-pinctrl/
+obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
-obj-$(CONFIG_SOC_MT7621) += mt7621-mmc/
-obj-$(CONFIG_SOC_MT7621) += mt7621-eth/
+obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
+obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
+obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f1ac7d..74d497d39c5a 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@ struct ashmem_range {
/* LRU list of unpinned pages, protected by ashmem_mutex */
static LIST_HEAD(ashmem_lru_list);
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
/*
* long lru_count - The count of pages on our LRU list.
*
@@ -126,7 +129,8 @@ static inline bool page_range_in_range(struct ashmem_range *range,
page_range_subsumes_range(range, start, end);
}
-static inline bool range_before_page(struct ashmem_range *range, size_t page)
+static inline bool range_before_page(struct ashmem_range *range,
+ size_t page)
{
return range->pgend < page;
}
@@ -168,19 +172,15 @@ static inline void lru_del(struct ashmem_range *range)
* @end: The ending page (inclusive)
*
* This function is protected by ashmem_mutex.
- *
- * Return: 0 if successful, or -ENOMEM if there is an error
*/
-static int range_alloc(struct ashmem_area *asma,
- struct ashmem_range *prev_range, unsigned int purged,
- size_t start, size_t end)
+static void range_alloc(struct ashmem_area *asma,
+ struct ashmem_range *prev_range, unsigned int purged,
+ size_t start, size_t end,
+ struct ashmem_range **new_range)
{
- struct ashmem_range *range;
-
- range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
- if (!range)
- return -ENOMEM;
+ struct ashmem_range *range = *new_range;
+ *new_range = NULL;
range->asma = asma;
range->pgstart = start;
range->pgend = end;
@@ -190,8 +190,6 @@ static int range_alloc(struct ashmem_area *asma,
if (range_on_lru(range))
lru_add(range);
-
- return 0;
}
/**
@@ -438,7 +436,6 @@ out:
static unsigned long
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct ashmem_range *range, *next;
unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,21 +445,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (!mutex_trylock(&ashmem_mutex))
return -1;
- list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+ while (!list_empty(&ashmem_lru_list)) {
+ struct ashmem_range *range =
+ list_first_entry(&ashmem_lru_list, typeof(*range), lru);
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
+ struct file *f = range->asma->file;
- range->asma->file->f_op->fallocate(range->asma->file,
- FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- start, end - start);
+ get_file(f);
+ atomic_inc(&ashmem_shrink_inflight);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
freed += range_size(range);
+ mutex_unlock(&ashmem_mutex);
+ f->f_op->fallocate(f,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
+ fput(f);
+ if (atomic_dec_and_test(&ashmem_shrink_inflight))
+ wake_up_all(&ashmem_shrink_wait);
+ if (!mutex_trylock(&ashmem_mutex))
+ goto out;
if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
+out:
return freed;
}
@@ -582,7 +591,8 @@ static int get_name(struct ashmem_area *asma, void __user *name)
*
* Caller must hold ashmem_mutex.
*/
-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+ struct ashmem_range **new_range)
{
struct ashmem_range *range, *next;
int ret = ASHMEM_NOT_PURGED;
@@ -635,7 +645,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
* second half and adjust the first chunk's endpoint.
*/
range_alloc(asma, range, range->purged,
- pgend + 1, range->pgend);
+ pgend + 1, range->pgend, new_range);
range_shrink(range, range->pgstart, pgstart - 1);
break;
}
@@ -649,7 +659,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
*
* Caller must hold ashmem_mutex.
*/
-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+ struct ashmem_range **new_range)
{
struct ashmem_range *range, *next;
unsigned int purged = ASHMEM_NOT_PURGED;
@@ -675,7 +686,8 @@ restart:
}
}
- return range_alloc(asma, range, purged, pgstart, pgend);
+ range_alloc(asma, range, purged, pgstart, pgend, new_range);
+ return 0;
}
/*
@@ -708,11 +720,19 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
struct ashmem_pin pin;
size_t pgstart, pgend;
int ret = -EINVAL;
+ struct ashmem_range *range = NULL;
if (copy_from_user(&pin, p, sizeof(pin)))
return -EFAULT;
+ if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
+ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+ }
+
mutex_lock(&ashmem_mutex);
+ wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
if (!asma->file)
goto out_unlock;
@@ -735,10 +755,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
switch (cmd) {
case ASHMEM_PIN:
- ret = ashmem_pin(asma, pgstart, pgend);
+ ret = ashmem_pin(asma, pgstart, pgend, &range);
break;
case ASHMEM_UNPIN:
- ret = ashmem_unpin(asma, pgstart, pgend);
+ ret = ashmem_unpin(asma, pgstart, pgend, &range);
break;
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_get_pin_status(asma, pgstart, pgend);
@@ -747,6 +767,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
out_unlock:
mutex_unlock(&ashmem_mutex);
+ if (range)
+ kmem_cache_free(ashmem_range_cachep, range);
return ret;
}
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index bb30bf8774a0..17f3a7569e3d 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o
+obj-$(CONFIG_ION) += ion.o ion_heap.o
obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
deleted file mode 100644
index a8d3cc412fb9..000000000000
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2011 Google, Inc.
- */
-
-#include <linux/kernel.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "ion.h"
-
-union ion_ioctl_arg {
- struct ion_allocation_data allocation;
- struct ion_heap_query query;
-};
-
-static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
-{
- switch (cmd) {
- case ION_IOC_HEAP_QUERY:
- if (arg->query.reserved0 ||
- arg->query.reserved1 ||
- arg->query.reserved2)
- return -EINVAL;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-/* fix up the cases where the ioctl direction bits are incorrect */
-static unsigned int ion_ioctl_dir(unsigned int cmd)
-{
- switch (cmd) {
- default:
- return _IOC_DIR(cmd);
- }
-}
-
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int ret = 0;
- unsigned int dir;
- union ion_ioctl_arg data;
-
- dir = ion_ioctl_dir(cmd);
-
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
-
- /*
- * The copy_from_user is unconditional here for both read and write
- * to do the validate. If there is no write for the ioctl, the
- * buffer is cleared
- */
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
-
- ret = validate_ioctl_arg(cmd, &data);
- if (ret) {
- pr_warn_once("%s: ioctl validate failed\n", __func__);
- return ret;
- }
-
- if (!(dir & _IOC_WRITE))
- memset(&data, 0, sizeof(data));
-
- switch (cmd) {
- case ION_IOC_ALLOC:
- {
- int fd;
-
- fd = ion_alloc(data.allocation.len,
- data.allocation.heap_id_mask,
- data.allocation.flags);
- if (fd < 0)
- return fd;
-
- data.allocation.fd = fd;
-
- break;
- }
- case ION_IOC_HEAP_QUERY:
- ret = ion_query_heaps(&data.query);
- break;
- default:
- return -ENOTTY;
- }
-
- if (dir & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
- return -EFAULT;
- }
- return ret;
-}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a0802de8c3a1..92c2914239e3 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/staging/android/ion/ion.c
+ * ION Memory Allocator
*
* Copyright (C) 2011 Google, Inc.
*/
-#include <linux/anon_inodes.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
@@ -14,10 +13,8 @@
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/fs.h>
-#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/list.h>
-#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
@@ -248,10 +245,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
struct ion_dma_buf_attachment *a = attachment->priv;
struct ion_buffer *buffer = dmabuf->priv;
- free_duped_table(a->table);
mutex_lock(&buffer->lock);
list_del(&a->list);
mutex_unlock(&buffer->lock);
+ free_duped_table(a->table);
kfree(a);
}
@@ -390,7 +387,7 @@ static const struct dma_buf_ops dma_buf_ops = {
.unmap = ion_dma_buf_kunmap,
};
-int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
+static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
{
struct ion_device *dev = internal_dev;
struct ion_buffer *buffer = NULL;
@@ -447,7 +444,7 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
return fd;
}
-int ion_query_heaps(struct ion_heap_query *query)
+static int ion_query_heaps(struct ion_heap_query *query)
{
struct ion_device *dev = internal_dev;
struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
@@ -492,6 +489,81 @@ out:
return ret;
}
+union ion_ioctl_arg {
+ struct ion_allocation_data allocation;
+ struct ion_heap_query query;
+};
+
+static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
+{
+ switch (cmd) {
+ case ION_IOC_HEAP_QUERY:
+ if (arg->query.reserved0 ||
+ arg->query.reserved1 ||
+ arg->query.reserved2)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ union ion_ioctl_arg data;
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ /*
+ * The copy_from_user is unconditional here for both read and write
+ * to do the validate. If there is no write for the ioctl, the
+ * buffer is cleared
+ */
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ ret = validate_ioctl_arg(cmd, &data);
+ if (ret) {
+ pr_warn_once("%s: ioctl validate failed\n", __func__);
+ return ret;
+ }
+
+ if (!(_IOC_DIR(cmd) & _IOC_WRITE))
+ memset(&data, 0, sizeof(data));
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ int fd;
+
+ fd = ion_alloc(data.allocation.len,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (fd < 0)
+ return fd;
+
+ data.allocation.fd = fd;
+
+ break;
+ }
+ case ION_IOC_HEAP_QUERY:
+ ret = ion_query_heaps(&data.query);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = ion_ioctl,
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 47b594cf1ac9..e291299fd35f 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * drivers/staging/android/ion/ion.h
+ * ION Memory Allocator kernel interface header
*
* Copyright (C) 2011 Google, Inc.
*/
@@ -22,32 +22,9 @@
#include "../uapi/ion.h"
/**
- * struct ion_platform_heap - defines a heap in the given platform
- * @type: type of the heap from ion_heap_type enum
- * @id: unique identifier for heap. When allocating higher numb ers
- * will be allocated from first. At allocation these are passed
- * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
- * @name: used for debug purposes
- * @base: base address of heap in physical memory if applicable
- * @size: size of the heap in bytes if applicable
- * @priv: private info passed from the board file
- *
- * Provided by the board file.
- */
-struct ion_platform_heap {
- enum ion_heap_type type;
- unsigned int id;
- const char *name;
- phys_addr_t base;
- size_t size;
- phys_addr_t align;
- void *priv;
-};
-
-/**
* struct ion_buffer - metadata for a particular buffer
- * @ref: reference count
* @node: node in the ion_device buffers tree
+ * @list: element in list of deferred freeable buffers
* @dev: back pointer to the ion_device
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
@@ -58,7 +35,8 @@ struct ion_platform_heap {
* @lock: protects the buffers cnt fields
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kernel mapping if kmap_cnt is not zero
- * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @sg_table: the sg table for the buffer
+ * @attachments: list of devices attached to this buffer
*/
struct ion_buffer {
union {
@@ -174,12 +152,16 @@ struct ion_heap {
unsigned long flags;
unsigned int id;
const char *name;
+
+ /* deferred free support */
struct shrinker shrinker;
struct list_head free_list;
size_t free_list_size;
spinlock_t free_lock;
wait_queue_head_t waitqueue;
struct task_struct *task;
+
+ /* heap statistics */
u64 num_of_buffers;
u64 num_of_alloc_bytes;
u64 alloc_bytes_wm;
@@ -205,10 +187,6 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
-int ion_alloc(size_t len,
- unsigned int heap_id_mask,
- unsigned int flags);
-
/**
* ion_heap_init_shrinker
* @heap: the heap
@@ -330,8 +308,4 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan);
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-int ion_query_heaps(struct ion_heap_query *query);
-
#endif /* _ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index e129237a0417..bb9d614767a2 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/staging/android/ion/ion_carveout_heap.c
+ * ION Memory Allocator carveout heap helper
*
* Copyright (C) 2011 Google, Inc.
*/
-#include <linux/spinlock.h>
+
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
@@ -12,7 +12,7 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
+
#include "ion.h"
#define ION_CARVEOUT_ALLOCATE_FAIL -1
@@ -20,7 +20,6 @@
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
- phys_addr_t base;
};
static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
@@ -44,6 +43,7 @@ static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr,
if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
return;
+
gen_pool_free(carveout_heap->pool, addr, size);
}
@@ -103,17 +103,14 @@ static struct ion_heap_ops carveout_heap_ops = {
.unmap_kernel = ion_heap_unmap_kernel,
};
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+struct ion_heap *ion_carveout_heap_create(phys_addr_t base, size_t size)
{
struct ion_carveout_heap *carveout_heap;
int ret;
struct page *page;
- size_t size;
-
- page = pfn_to_page(PFN_DOWN(heap_data->base));
- size = heap_data->size;
+ page = pfn_to_page(PFN_DOWN(base));
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
@@ -127,9 +124,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
kfree(carveout_heap);
return ERR_PTR(-ENOMEM);
}
- carveout_heap->base = heap_data->base;
- gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
- -1);
+ gen_pool_add(carveout_heap->pool, base, size, -1);
carveout_heap->heap.ops = &carveout_heap_ops;
carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 159d72f5bc42..3cdde9c1a717 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -1,23 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/staging/android/ion/ion_chunk_heap.c
+ * ION memory allocator chunk heap helper
*
* Copyright (C) 2012 Google, Inc.
*/
+
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
-#include <linux/io.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
+
#include "ion.h"
struct ion_chunk_heap {
struct ion_heap heap;
struct gen_pool *pool;
- phys_addr_t base;
unsigned long chunk_size;
unsigned long size;
unsigned long allocated;
@@ -108,16 +107,13 @@ static struct ion_heap_ops chunk_heap_ops = {
.unmap_kernel = ion_heap_unmap_kernel,
};
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+struct ion_heap *ion_chunk_heap_create(phys_addr_t base, size_t size, size_t chunk_size)
{
struct ion_chunk_heap *chunk_heap;
int ret;
struct page *page;
- size_t size;
-
- page = pfn_to_page(PFN_DOWN(heap_data->base));
- size = heap_data->size;
+ page = pfn_to_page(PFN_DOWN(base));
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
@@ -126,23 +122,21 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
if (!chunk_heap)
return ERR_PTR(-ENOMEM);
- chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+ chunk_heap->chunk_size = chunk_size;
chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
PAGE_SHIFT, -1);
if (!chunk_heap->pool) {
ret = -ENOMEM;
goto error_gen_pool_create;
}
- chunk_heap->base = heap_data->base;
- chunk_heap->size = heap_data->size;
+ chunk_heap->size = size;
chunk_heap->allocated = 0;
- gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+ gen_pool_add(chunk_heap->pool, base, size, -1);
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_debug("%s: base %pa size %zu\n", __func__,
- &chunk_heap->base, heap_data->size);
+ pr_debug("%s: base %pa size %zu\n", __func__, &base, size);
return &chunk_heap->heap;
@@ -150,4 +144,3 @@ error_gen_pool_create:
kfree(chunk_heap);
return ERR_PTR(ret);
}
-
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 3fafd013d80a..bf65e67ef9d8 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/staging/android/ion/ion_cma_heap.c
+ * ION Memory Allocator CMA heap exporter
*
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
@@ -111,10 +111,6 @@ static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
return ERR_PTR(-ENOMEM);
cma_heap->heap.ops = &ion_cma_ops;
- /*
- * get device from private heaps data, later it will be
- * used to make the link with reserved CMA memory
- */
cma_heap->cma = cma;
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap;
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 31db510018a9..473b465724f1 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/staging/android/ion/ion_heap.c
+ * ION Memory Allocator generic heap helpers
*
* Copyright (C) 2011 Google, Inc.
*/
@@ -14,6 +14,7 @@
#include <uapi/linux/sched/types.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+
#include "ion.h"
void *ion_heap_map_kernel(struct ion_heap *heap,
@@ -92,6 +93,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
if (addr >= vma->vm_end)
return 0;
}
+
return 0;
}
@@ -254,6 +256,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
return PTR_ERR_OR_ZERO(heap->task);
}
sched_setscheduler(heap->task, SCHED_IDLE, &param);
+
return 0;
}
@@ -265,8 +268,10 @@ static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
int total = 0;
total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+
if (heap->ops->shrink)
total += heap->ops->shrink(heap, sc->gfp_mask, 0);
+
return total;
}
@@ -295,6 +300,7 @@ static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
if (heap->ops->shrink)
freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+
return freed;
}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 0d2a95957ee8..fd4995fb676e 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/staging/android/ion/ion_mem_pool.c
+ * ION Memory Allocator page pool helpers
*
* Copyright (C) 2011 Google, Inc.
*/
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 0383f7548d48..aa8d8425be25 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/staging/android/ion/ion_system_heap.c
+ * ION Memory Allocator system heap exporter
*
* Copyright (C) 2011 Google, Inc.
*/
@@ -13,6 +13,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+
#include "ion.h"
#define NUM_ORDERS ARRAY_SIZE(orders)
@@ -223,10 +224,10 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
static int ion_system_heap_create_pools(struct ion_page_pool **pools)
{
int i;
- gfp_t gfp_flags = low_order_gfp_flags;
for (i = 0; i < NUM_ORDERS; i++) {
struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
if (orders[i] > 4)
gfp_flags = high_order_gfp_flags;
@@ -236,6 +237,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools)
goto err_create_pool;
pools[i] = pool;
}
+
return 0;
err_create_pool:
@@ -274,6 +276,7 @@ static int ion_system_heap_create(void)
heap->name = "ion_system_heap";
ion_device_add_heap(heap);
+
return 0;
}
device_initcall(ion_system_heap_create);
@@ -355,6 +358,7 @@ static struct ion_heap *__ion_system_contig_heap_create(void)
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
heap->name = "ion_system_contig_heap";
+
return heap;
}
@@ -367,7 +371,7 @@ static int ion_system_contig_heap_create(void)
return PTR_ERR(heap);
ion_device_add_heap(heap);
+
return 0;
}
device_initcall(ion_system_contig_heap_create);
-
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 5d7009884c13..46c93fcb46d6 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* drivers/staging/android/uapi/ion.h
*
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
index 22571abcaa4e..8a75bd27c413 100644
--- a/drivers/staging/android/vsoc.c
+++ b/drivers/staging/android/vsoc.c
@@ -29,7 +29,6 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
-#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/file.h>
#include "uapi/vsoc_shm.h"
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 5d2fcbfe02af..0caae4a5c471 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1605,9 +1605,8 @@ static int do_insn_ioctl(struct comedi_device *dev,
unsigned int n_data = MIN_SAMPLES;
int ret = 0;
- if (copy_from_user(&insn, arg, sizeof(insn))) {
+ if (copy_from_user(&insn, arg, sizeof(insn)))
return -EFAULT;
- }
n_data = max(n_data, insn.n);
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 4e72a0778086..a9d052bfda38 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -252,9 +252,9 @@ static int cb_pcimdas_di_insn_bits(struct comedi_device *dev,
}
static int cb_pcimdas_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct cb_pcimdas_private *devpriv = dev->private;
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index e70a461e723f..405573e927cf 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -656,6 +656,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device *dev,
case NI_660X_PFI_OUTPUT_DIO:
if (chan > 31)
return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index b9a0dc6eac44..4bdef87d5dd7 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -49,116 +49,117 @@
/* defines for the PCI-DIO-32HS */
-#define Window_Address 4 /* W */
-#define Interrupt_And_Window_Status 4 /* R */
-#define IntStatus1 BIT(0)
-#define IntStatus2 BIT(1)
-#define WindowAddressStatus_mask 0x7c
-
-#define Master_DMA_And_Interrupt_Control 5 /* W */
-#define InterruptLine(x) ((x) & 3)
-#define OpenInt BIT(2)
-#define Group_Status 5 /* R */
-#define DataLeft BIT(0)
-#define Req BIT(2)
-#define StopTrig BIT(3)
-
-#define Group_1_Flags 6 /* R */
-#define Group_2_Flags 7 /* R */
-#define TransferReady BIT(0)
-#define CountExpired BIT(1)
-#define Waited BIT(5)
-#define PrimaryTC BIT(6)
-#define SecondaryTC BIT(7)
+#define WINDOW_ADDRESS 4 /* W */
+#define INTERRUPT_AND_WINDOW_STATUS 4 /* R */
+#define INT_STATUS_1 BIT(0)
+#define INT_STATUS_2 BIT(1)
+#define WINDOW_ADDRESS_STATUS_MASK 0x7c
+
+#define MASTER_DMA_AND_INTERRUPT_CONTROL 5 /* W */
+#define INTERRUPT_LINE(x) ((x) & 3)
+#define OPEN_INT BIT(2)
+#define GROUP_STATUS 5 /* R */
+#define DATA_LEFT BIT(0)
+#define REQ BIT(2)
+#define STOP_TRIG BIT(3)
+
+#define GROUP_1_FLAGS 6 /* R */
+#define GROUP_2_FLAGS 7 /* R */
+#define TRANSFER_READY BIT(0)
+#define COUNT_EXPIRED BIT(1)
+#define WAITED BIT(5)
+#define PRIMARY_TC BIT(6)
+#define SECONDARY_TC BIT(7)
/* #define SerialRose */
/* #define ReqRose */
/* #define Paused */
-#define Group_1_First_Clear 6 /* W */
-#define Group_2_First_Clear 7 /* W */
-#define ClearWaited BIT(3)
-#define ClearPrimaryTC BIT(4)
-#define ClearSecondaryTC BIT(5)
-#define DMAReset BIT(6)
-#define FIFOReset BIT(7)
-#define ClearAll 0xf8
-
-#define Group_1_FIFO 8 /* W */
-#define Group_2_FIFO 12 /* W */
-
-#define Transfer_Count 20
-#define Chip_ID_D 24
-#define Chip_ID_I 25
-#define Chip_ID_O 26
-#define Chip_Version 27
-#define Port_IO(x) (28 + (x))
-#define Port_Pin_Directions(x) (32 + (x))
-#define Port_Pin_Mask(x) (36 + (x))
-#define Port_Pin_Polarities(x) (40 + (x))
-
-#define Master_Clock_Routing 45
-#define RTSIClocking(x) (((x) & 3) << 4)
-
-#define Group_1_Second_Clear 46 /* W */
-#define Group_2_Second_Clear 47 /* W */
-#define ClearExpired BIT(0)
-
-#define Port_Pattern(x) (48 + (x))
-
-#define Data_Path 64
-#define FIFOEnableA BIT(0)
-#define FIFOEnableB BIT(1)
-#define FIFOEnableC BIT(2)
-#define FIFOEnableD BIT(3)
-#define Funneling(x) (((x) & 3) << 4)
-#define GroupDirection BIT(7)
-
-#define Protocol_Register_1 65
-#define OpMode Protocol_Register_1
-#define RunMode(x) ((x) & 7)
-#define Numbered BIT(3)
-
-#define Protocol_Register_2 66
-#define ClockReg Protocol_Register_2
-#define ClockLine(x) (((x) & 3) << 5)
-#define InvertStopTrig BIT(7)
-#define DataLatching(x) (((x) & 3) << 5)
-
-#define Protocol_Register_3 67
-#define Sequence Protocol_Register_3
-
-#define Protocol_Register_14 68 /* 16 bit */
-#define ClockSpeed Protocol_Register_14
-
-#define Protocol_Register_4 70
-#define ReqReg Protocol_Register_4
-#define ReqConditioning(x) (((x) & 7) << 3)
-
-#define Protocol_Register_5 71
-#define BlockMode Protocol_Register_5
+#define GROUP_1_FIRST_CLEAR 6 /* W */
+#define GROUP_2_FIRST_CLEAR 7 /* W */
+#define CLEAR_WAITED BIT(3)
+#define CLEAR_PRIMARY_TC BIT(4)
+#define CLEAR_SECONDARY_TC BIT(5)
+#define DMA_RESET BIT(6)
+#define FIFO_RESET BIT(7)
+#define CLEAR_ALL 0xf8
+
+#define GROUP_1_FIFO 8 /* W */
+#define GROUP_2_FIFO 12 /* W */
+
+#define TRANSFER_COUNT 20
+#define CHIP_ID_D 24
+#define CHIP_ID_I 25
+#define CHIP_ID_O 26
+#define CHIP_VERSION 27
+#define PORT_IO(x) (28 + (x))
+#define PORT_PIN_DIRECTIONS(x) (32 + (x))
+#define PORT_PIN_MASK(x) (36 + (x))
+#define PORT_PIN_POLARITIES(x) (40 + (x))
+
+#define MASTER_CLOCK_ROUTING 45
+#define RTSI_CLOCKING(x) (((x) & 3) << 4)
+
+#define GROUP_1_SECOND_CLEAR 46 /* W */
+#define GROUP_2_SECOND_CLEAR 47 /* W */
+#define CLEAR_EXPIRED BIT(0)
+
+#define PORT_PATTERN(x) (48 + (x))
+
+#define DATA_PATH 64
+#define FIFO_ENABLE_A BIT(0)
+#define FIFO_ENABLE_B BIT(1)
+#define FIFO_ENABLE_C BIT(2)
+#define FIFO_ENABLE_D BIT(3)
+#define FUNNELING(x) (((x) & 3) << 4)
+#define GROUP_DIRECTION BIT(7)
+
+#define PROTOCOL_REGISTER_1 65
+#define OP_MODE PROTOCOL_REGISTER_1
+#define RUN_MODE(x) ((x) & 7)
+#define NUMBERED BIT(3)
+
+#define PROTOCOL_REGISTER_2 66
+#define CLOCK_REG PROTOCOL_REGISTER_2
+#define CLOCK_LINE(x) (((x) & 3) << 5)
+#define INVERT_STOP_TRIG BIT(7)
+#define DATA_LATCHING(x) (((x) & 3) << 5)
+
+#define PROTOCOL_REGISTER_3 67
+#define SEQUENCE PROTOCOL_REGISTER_3
+
+#define PROTOCOL_REGISTER_14 68 /* 16 bit */
+#define CLOCK_SPEED PROTOCOL_REGISTER_14
+
+#define PROTOCOL_REGISTER_4 70
+#define REQ_REG PROTOCOL_REGISTER_4
+#define REQ_CONDITIONING(x) (((x) & 7) << 3)
+
+#define PROTOCOL_REGISTER_5 71
+#define BLOCK_MODE PROTOCOL_REGISTER_5
#define FIFO_Control 72
-#define ReadyLevel(x) ((x) & 7)
-
-#define Protocol_Register_6 73
-#define LinePolarities Protocol_Register_6
-#define InvertAck BIT(0)
-#define InvertReq BIT(1)
-#define InvertClock BIT(2)
-#define InvertSerial BIT(3)
-#define OpenAck BIT(4)
-#define OpenClock BIT(5)
-
-#define Protocol_Register_7 74
-#define AckSer Protocol_Register_7
-#define AckLine(x) (((x) & 3) << 2)
-#define ExchangePins BIT(7)
-
-#define Interrupt_Control 75
- /* bits same as flags */
-
-#define DMA_Line_Control_Group1 76
-#define DMA_Line_Control_Group2 108
+#define READY_LEVEL(x) ((x) & 7)
+
+#define PROTOCOL_REGISTER_6 73
+#define LINE_POLARITIES PROTOCOL_REGISTER_6
+#define INVERT_ACK BIT(0)
+#define INVERT_REQ BIT(1)
+#define INVERT_CLOCK BIT(2)
+#define INVERT_SERIAL BIT(3)
+#define OPEN_ACK BIT(4)
+#define OPEN_CLOCK BIT(5)
+
+#define PROTOCOL_REGISTER_7 74
+#define ACK_SER PROTOCOL_REGISTER_7
+#define ACK_LINE(x) (((x) & 3) << 2)
+#define EXCHANGE_PINS BIT(7)
+
+#define INTERRUPT_CONTROL 75
+/* bits same as flags */
+
+#define DMA_LINE_CONTROL_GROUP1 76
+#define DMA_LINE_CONTROL_GROUP2 108
+
/* channel zero is none */
static inline unsigned int primary_DMAChannel_bits(unsigned int channel)
{
@@ -170,41 +171,41 @@ static inline unsigned int secondary_DMAChannel_bits(unsigned int channel)
return (channel << 2) & 0xc;
}
-#define Transfer_Size_Control 77
-#define TransferWidth(x) ((x) & 3)
-#define TransferLength(x) (((x) & 3) << 3)
-#define RequireRLevel BIT(5)
+#define TRANSFER_SIZE_CONTROL 77
+#define TRANSFER_WIDTH(x) ((x) & 3)
+#define TRANSFER_LENGTH(x) (((x) & 3) << 3)
+#define REQUIRE_R_LEVEL BIT(5)
-#define Protocol_Register_15 79
-#define DAQOptions Protocol_Register_15
-#define StartSource(x) ((x) & 0x3)
-#define InvertStart BIT(2)
-#define StopSource(x) (((x) & 0x3) << 3)
-#define ReqStart BIT(6)
-#define PreStart BIT(7)
+#define PROTOCOL_REGISTER_15 79
+#define DAQ_OPTIONS PROTOCOL_REGISTER_15
+#define START_SOURCE(x) ((x) & 0x3)
+#define INVERT_START BIT(2)
+#define STOP_SOURCE(x) (((x) & 0x3) << 3)
+#define REQ_START BIT(6)
+#define PRE_START BIT(7)
-#define Pattern_Detection 81
-#define DetectionMethod BIT(0)
-#define InvertMatch BIT(1)
-#define IE_Pattern_Detection BIT(2)
+#define PATTERN_DETECTION 81
+#define DETECTION_METHOD BIT(0)
+#define INVERT_MATCH BIT(1)
+#define IE_PATTERN_DETECTION BIT(2)
-#define Protocol_Register_9 82
-#define ReqDelay Protocol_Register_9
+#define PROTOCOL_REGISTER_9 82
+#define REQ_DELAY PROTOCOL_REGISTER_9
-#define Protocol_Register_10 83
-#define ReqNotDelay Protocol_Register_10
+#define PROTOCOL_REGISTER_10 83
+#define REQ_NOT_DELAY PROTOCOL_REGISTER_10
-#define Protocol_Register_11 84
-#define AckDelay Protocol_Register_11
+#define PROTOCOL_REGISTER_11 84
+#define ACK_DELAY PROTOCOL_REGISTER_11
-#define Protocol_Register_12 85
-#define AckNotDelay Protocol_Register_12
+#define PROTOCOL_REGISTER_12 85
+#define ACK_NOT_DELAY PROTOCOL_REGISTER_12
-#define Protocol_Register_13 86
-#define Data1Delay Protocol_Register_13
+#define PROTOCOL_REGISTER_13 86
+#define DATA_1_DELAY PROTOCOL_REGISTER_13
-#define Protocol_Register_8 88 /* 32 bit */
-#define StartDelay Protocol_Register_8
+#define PROTOCOL_REGISTER_8 88 /* 32 bit */
+#define START_DELAY PROTOCOL_REGISTER_8
/* Firmware files for PCI-6524 */
#define FW_PCI_6534_MAIN "ni6534a.bin"
@@ -246,9 +247,10 @@ enum FPGA_Control_Bits {
#define TIMER_BASE 50 /* nanoseconds */
#ifdef USE_DMA
-#define IntEn (CountExpired | Waited | PrimaryTC | SecondaryTC)
+#define INT_EN (COUNT_EXPIRED | WAITED | PRIMARY_TC | SECONDARY_TC)
#else
-#define IntEn (TransferReady | CountExpired | Waited | PrimaryTC | SecondaryTC)
+#define INT_EN (TRANSFER_READY | COUNT_EXPIRED | WAITED \
+ | PRIMARY_TC | SECONDARY_TC)
#endif
enum nidio_boardid {
@@ -283,7 +285,7 @@ struct nidio96_private {
struct mite *mite;
int boardtype;
int dio;
- unsigned short OpModeBits;
+ unsigned short OP_MODEBits;
struct mite_channel *di_mite_chan;
struct mite_ring *di_mite_ring;
spinlock_t mite_channel_lock;
@@ -307,7 +309,7 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev)
devpriv->di_mite_chan->dir = COMEDI_INPUT;
writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) |
secondary_DMAChannel_bits(devpriv->di_mite_chan->channel),
- dev->mmio + DMA_Line_Control_Group1);
+ dev->mmio + DMA_LINE_CONTROL_GROUP1);
mmiowb();
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
@@ -324,7 +326,7 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
devpriv->di_mite_chan = NULL;
writeb(primary_DMAChannel_bits(0) |
secondary_DMAChannel_bits(0),
- dev->mmio + DMA_Line_Control_Group1);
+ dev->mmio + DMA_LINE_CONTROL_GROUP1);
mmiowb();
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
@@ -391,8 +393,8 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
/* Lock to avoid race with comedi_poll */
spin_lock(&dev->spinlock);
- status = readb(dev->mmio + Interrupt_And_Window_Status);
- flags = readb(dev->mmio + Group_1_Flags);
+ status = readb(dev->mmio + INTERRUPT_AND_WINDOW_STATUS);
+ flags = readb(dev->mmio + GROUP_1_FLAGS);
spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan) {
@@ -401,63 +403,63 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
}
spin_unlock(&devpriv->mite_channel_lock);
- while (status & DataLeft) {
+ while (status & DATA_LEFT) {
work++;
if (work > 20) {
dev_dbg(dev->class_dev, "too much work in interrupt\n");
writeb(0x00,
- dev->mmio + Master_DMA_And_Interrupt_Control);
+ dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL);
break;
}
- flags &= IntEn;
+ flags &= INT_EN;
- if (flags & TransferReady) {
- while (flags & TransferReady) {
+ if (flags & TRANSFER_READY) {
+ while (flags & TRANSFER_READY) {
work++;
if (work > 100) {
dev_dbg(dev->class_dev,
"too much work in interrupt\n");
writeb(0x00, dev->mmio +
- Master_DMA_And_Interrupt_Control
+ MASTER_DMA_AND_INTERRUPT_CONTROL
);
goto out;
}
- auxdata = readl(dev->mmio + Group_1_FIFO);
+ auxdata = readl(dev->mmio + GROUP_1_FIFO);
comedi_buf_write_samples(s, &auxdata, 1);
- flags = readb(dev->mmio + Group_1_Flags);
+ flags = readb(dev->mmio + GROUP_1_FLAGS);
}
}
- if (flags & CountExpired) {
- writeb(ClearExpired, dev->mmio + Group_1_Second_Clear);
+ if (flags & COUNT_EXPIRED) {
+ writeb(CLEAR_EXPIRED, dev->mmio + GROUP_1_SECOND_CLEAR);
async->events |= COMEDI_CB_EOA;
- writeb(0x00, dev->mmio + OpMode);
+ writeb(0x00, dev->mmio + OP_MODE);
break;
- } else if (flags & Waited) {
- writeb(ClearWaited, dev->mmio + Group_1_First_Clear);
+ } else if (flags & WAITED) {
+ writeb(CLEAR_WAITED, dev->mmio + GROUP_1_FIRST_CLEAR);
async->events |= COMEDI_CB_ERROR;
break;
- } else if (flags & PrimaryTC) {
- writeb(ClearPrimaryTC,
- dev->mmio + Group_1_First_Clear);
+ } else if (flags & PRIMARY_TC) {
+ writeb(CLEAR_PRIMARY_TC,
+ dev->mmio + GROUP_1_FIRST_CLEAR);
async->events |= COMEDI_CB_EOA;
- } else if (flags & SecondaryTC) {
- writeb(ClearSecondaryTC,
- dev->mmio + Group_1_First_Clear);
+ } else if (flags & SECONDARY_TC) {
+ writeb(CLEAR_SECONDARY_TC,
+ dev->mmio + GROUP_1_FIRST_CLEAR);
async->events |= COMEDI_CB_EOA;
}
- flags = readb(dev->mmio + Group_1_Flags);
- status = readb(dev->mmio + Interrupt_And_Window_Status);
+ flags = readb(dev->mmio + GROUP_1_FLAGS);
+ status = readb(dev->mmio + INTERRUPT_AND_WINDOW_STATUS);
}
out:
comedi_handle_events(dev, s);
#if 0
if (!tag)
- writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control);
+ writeb(0x03, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL);
#endif
spin_unlock(&dev->spinlock);
@@ -484,7 +486,7 @@ static int ni_pcidio_insn_config(struct comedi_device *dev,
if (ret)
return ret;
- writel(s->io_bits, dev->mmio + Port_Pin_Directions(0));
+ writel(s->io_bits, dev->mmio + PORT_PIN_DIRECTIONS(0));
return insn->n;
}
@@ -495,9 +497,9 @@ static int ni_pcidio_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
- writel(s->state, dev->mmio + Port_IO(0));
+ writel(s->state, dev->mmio + PORT_IO(0));
- data[1] = readl(dev->mmio + Port_IO(0));
+ data[1] = readl(dev->mmio + PORT_IO(0));
return insn->n;
}
@@ -609,7 +611,7 @@ static int ni_pcidio_inttrig(struct comedi_device *dev,
if (trig_num != cmd->start_arg)
return -EINVAL;
- writeb(devpriv->OpModeBits, dev->mmio + OpMode);
+ writeb(devpriv->OP_MODEBits, dev->mmio + OP_MODE);
s->async->inttrig = NULL;
return 1;
@@ -621,78 +623,78 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_cmd *cmd = &s->async->cmd;
/* XXX configure ports for input */
- writel(0x0000, dev->mmio + Port_Pin_Directions(0));
+ writel(0x0000, dev->mmio + PORT_PIN_DIRECTIONS(0));
if (1) {
/* enable fifos A B C D */
- writeb(0x0f, dev->mmio + Data_Path);
+ writeb(0x0f, dev->mmio + DATA_PATH);
/* set transfer width a 32 bits */
- writeb(TransferWidth(0) | TransferLength(0),
- dev->mmio + Transfer_Size_Control);
+ writeb(TRANSFER_WIDTH(0) | TRANSFER_LENGTH(0),
+ dev->mmio + TRANSFER_SIZE_CONTROL);
} else {
- writeb(0x03, dev->mmio + Data_Path);
- writeb(TransferWidth(3) | TransferLength(0),
- dev->mmio + Transfer_Size_Control);
+ writeb(0x03, dev->mmio + DATA_PATH);
+ writeb(TRANSFER_WIDTH(3) | TRANSFER_LENGTH(0),
+ dev->mmio + TRANSFER_SIZE_CONTROL);
}
/* protocol configuration */
if (cmd->scan_begin_src == TRIG_TIMER) {
/* page 4-5, "input with internal REQs" */
- writeb(0, dev->mmio + OpMode);
- writeb(0x00, dev->mmio + ClockReg);
- writeb(1, dev->mmio + Sequence);
- writeb(0x04, dev->mmio + ReqReg);
- writeb(4, dev->mmio + BlockMode);
- writeb(3, dev->mmio + LinePolarities);
- writeb(0xc0, dev->mmio + AckSer);
+ writeb(0, dev->mmio + OP_MODE);
+ writeb(0x00, dev->mmio + CLOCK_REG);
+ writeb(1, dev->mmio + SEQUENCE);
+ writeb(0x04, dev->mmio + REQ_REG);
+ writeb(4, dev->mmio + BLOCK_MODE);
+ writeb(3, dev->mmio + LINE_POLARITIES);
+ writeb(0xc0, dev->mmio + ACK_SER);
writel(ni_pcidio_ns_to_timer(&cmd->scan_begin_arg,
CMDF_ROUND_NEAREST),
- dev->mmio + StartDelay);
- writeb(1, dev->mmio + ReqDelay);
- writeb(1, dev->mmio + ReqNotDelay);
- writeb(1, dev->mmio + AckDelay);
- writeb(0x0b, dev->mmio + AckNotDelay);
- writeb(0x01, dev->mmio + Data1Delay);
+ dev->mmio + START_DELAY);
+ writeb(1, dev->mmio + REQ_DELAY);
+ writeb(1, dev->mmio + REQ_NOT_DELAY);
+ writeb(1, dev->mmio + ACK_DELAY);
+ writeb(0x0b, dev->mmio + ACK_NOT_DELAY);
+ writeb(0x01, dev->mmio + DATA_1_DELAY);
/*
* manual, page 4-5:
- * ClockSpeed comment is incorrectly listed on DAQOptions
+ * CLOCK_SPEED comment is incorrectly listed on DAQ_OPTIONS
*/
- writew(0, dev->mmio + ClockSpeed);
- writeb(0, dev->mmio + DAQOptions);
+ writew(0, dev->mmio + CLOCK_SPEED);
+ writeb(0, dev->mmio + DAQ_OPTIONS);
} else {
/* TRIG_EXT */
/* page 4-5, "input with external REQs" */
- writeb(0, dev->mmio + OpMode);
- writeb(0x00, dev->mmio + ClockReg);
- writeb(0, dev->mmio + Sequence);
- writeb(0x00, dev->mmio + ReqReg);
- writeb(4, dev->mmio + BlockMode);
+ writeb(0, dev->mmio + OP_MODE);
+ writeb(0x00, dev->mmio + CLOCK_REG);
+ writeb(0, dev->mmio + SEQUENCE);
+ writeb(0x00, dev->mmio + REQ_REG);
+ writeb(4, dev->mmio + BLOCK_MODE);
if (!(cmd->scan_begin_arg & CR_INVERT)) /* Leading Edge */
- writeb(0, dev->mmio + LinePolarities);
+ writeb(0, dev->mmio + LINE_POLARITIES);
else /* Trailing Edge */
- writeb(2, dev->mmio + LinePolarities);
- writeb(0x00, dev->mmio + AckSer);
- writel(1, dev->mmio + StartDelay);
- writeb(1, dev->mmio + ReqDelay);
- writeb(1, dev->mmio + ReqNotDelay);
- writeb(1, dev->mmio + AckDelay);
- writeb(0x0C, dev->mmio + AckNotDelay);
- writeb(0x10, dev->mmio + Data1Delay);
- writew(0, dev->mmio + ClockSpeed);
- writeb(0x60, dev->mmio + DAQOptions);
+ writeb(2, dev->mmio + LINE_POLARITIES);
+ writeb(0x00, dev->mmio + ACK_SER);
+ writel(1, dev->mmio + START_DELAY);
+ writeb(1, dev->mmio + REQ_DELAY);
+ writeb(1, dev->mmio + REQ_NOT_DELAY);
+ writeb(1, dev->mmio + ACK_DELAY);
+ writeb(0x0C, dev->mmio + ACK_NOT_DELAY);
+ writeb(0x10, dev->mmio + DATA_1_DELAY);
+ writew(0, dev->mmio + CLOCK_SPEED);
+ writeb(0x60, dev->mmio + DAQ_OPTIONS);
}
if (cmd->stop_src == TRIG_COUNT) {
writel(cmd->stop_arg,
- dev->mmio + Transfer_Count);
+ dev->mmio + TRANSFER_COUNT);
} else {
/* XXX */
}
#ifdef USE_DMA
- writeb(ClearPrimaryTC | ClearSecondaryTC,
- dev->mmio + Group_1_First_Clear);
+ writeb(CLEAR_PRIMARY_TC | CLEAR_SECONDARY_TC,
+ dev->mmio + GROUP_1_FIRST_CLEAR);
{
int retval = setup_mite_dma(dev, s);
@@ -701,25 +703,25 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return retval;
}
#else
- writeb(0x00, dev->mmio + DMA_Line_Control_Group1);
+ writeb(0x00, dev->mmio + DMA_LINE_CONTROL_GROUP1);
#endif
- writeb(0x00, dev->mmio + DMA_Line_Control_Group2);
+ writeb(0x00, dev->mmio + DMA_LINE_CONTROL_GROUP2);
/* clear and enable interrupts */
- writeb(0xff, dev->mmio + Group_1_First_Clear);
- /* writeb(ClearExpired, dev->mmio+Group_1_Second_Clear); */
+ writeb(0xff, dev->mmio + GROUP_1_FIRST_CLEAR);
+ /* writeb(CLEAR_EXPIRED, dev->mmio+GROUP_1_SECOND_CLEAR); */
- writeb(IntEn, dev->mmio + Interrupt_Control);
- writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control);
+ writeb(INT_EN, dev->mmio + INTERRUPT_CONTROL);
+ writeb(0x03, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL);
if (cmd->stop_src == TRIG_NONE) {
- devpriv->OpModeBits = DataLatching(0) | RunMode(7);
+ devpriv->OP_MODEBits = DATA_LATCHING(0) | RUN_MODE(7);
} else { /* TRIG_TIMER */
- devpriv->OpModeBits = Numbered | RunMode(7);
+ devpriv->OP_MODEBits = NUMBERED | RUN_MODE(7);
}
if (cmd->start_src == TRIG_NOW) {
/* start */
- writeb(devpriv->OpModeBits, dev->mmio + OpMode);
+ writeb(devpriv->OP_MODEBits, dev->mmio + OP_MODE);
s->async->inttrig = NULL;
} else {
/* TRIG_INT */
@@ -732,7 +734,7 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int ni_pcidio_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- writeb(0x00, dev->mmio + Master_DMA_And_Interrupt_Control);
+ writeb(0x00, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL);
ni_pcidio_release_di_mite_channel(dev);
return 0;
@@ -869,12 +871,12 @@ static int pci_6534_upload_firmware(struct comedi_device *dev)
static void nidio_reset_board(struct comedi_device *dev)
{
- writel(0, dev->mmio + Port_IO(0));
- writel(0, dev->mmio + Port_Pin_Directions(0));
- writel(0, dev->mmio + Port_Pin_Mask(0));
+ writel(0, dev->mmio + PORT_IO(0));
+ writel(0, dev->mmio + PORT_PIN_DIRECTIONS(0));
+ writel(0, dev->mmio + PORT_PIN_MASK(0));
/* disable interrupts on board */
- writeb(0, dev->mmio + Master_DMA_And_Interrupt_Control);
+ writeb(0, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL);
}
static int nidio_auto_attach(struct comedi_device *dev,
@@ -925,7 +927,7 @@ static int nidio_auto_attach(struct comedi_device *dev,
return ret;
dev_info(dev->class_dev, "%s rev=%d\n", dev->board_name,
- readb(dev->mmio + Chip_Version));
+ readb(dev->mmio + CHIP_VERSION));
s = &dev->subdevices[0];
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 0eb388c0e1f0..048cb35723ad 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -224,13 +224,16 @@ static void ni_tio_set_bits_transient(struct ni_gpct *counter,
unsigned int transient)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int chip = counter->chip_index;
unsigned long flags;
- if (reg < NITIO_NUM_REGS) {
+ if (reg < NITIO_NUM_REGS && chip < counter_dev->num_chips) {
+ unsigned int *regs = counter_dev->regs[chip];
+
spin_lock_irqsave(&counter_dev->regs_lock, flags);
- counter_dev->regs[reg] &= ~mask;
- counter_dev->regs[reg] |= (value & mask);
- ni_tio_write(counter, counter_dev->regs[reg] | transient, reg);
+ regs[reg] &= ~mask;
+ regs[reg] |= (value & mask);
+ ni_tio_write(counter, regs[reg] | transient, reg);
mmiowb();
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
@@ -267,12 +270,13 @@ unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int chip = counter->chip_index;
unsigned int value = 0;
unsigned long flags;
- if (reg < NITIO_NUM_REGS) {
+ if (reg < NITIO_NUM_REGS && chip < counter_dev->num_chips) {
spin_lock_irqsave(&counter_dev->regs_lock, flags);
- value = counter_dev->regs[reg];
+ value = counter_dev->regs[chip][reg];
spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
}
return value;
@@ -302,6 +306,7 @@ static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
+ unsigned int chip = counter->chip_index;
unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
unsigned int clock_source = 0;
unsigned int src;
@@ -318,7 +323,7 @@ static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
break;
case NI_M_TIMEBASE_3_CLK:
- if (counter_dev->regs[second_gate_reg] & GI_SRC_SUBSEL)
+ if (counter_dev->regs[chip][second_gate_reg] & GI_SRC_SUBSEL)
clock_source =
NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS;
else
@@ -328,7 +333,7 @@ static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
break;
case NI_M_NEXT_GATE_CLK:
- if (counter_dev->regs[second_gate_reg] & GI_SRC_SUBSEL)
+ if (counter_dev->regs[chip][second_gate_reg] & GI_SRC_SUBSEL)
clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS;
else
clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
@@ -721,6 +726,7 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
+ unsigned int chip = counter->chip_index;
unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
if (counter_dev->variant != ni_gpct_variant_m_series)
@@ -729,18 +735,18 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
/* Gi_Source_Subselect is zero */
case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
- counter_dev->regs[second_gate_reg] &= ~GI_SRC_SUBSEL;
+ counter_dev->regs[chip][second_gate_reg] &= ~GI_SRC_SUBSEL;
break;
/* Gi_Source_Subselect is one */
case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
- counter_dev->regs[second_gate_reg] |= GI_SRC_SUBSEL;
+ counter_dev->regs[chip][second_gate_reg] |= GI_SRC_SUBSEL;
break;
/* Gi_Source_Subselect doesn't matter */
default:
return;
}
- ni_tio_write(counter, counter_dev->regs[second_gate_reg],
+ ni_tio_write(counter, counter_dev->regs[chip][second_gate_reg],
second_gate_reg);
}
@@ -1116,6 +1122,7 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index,
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
+ unsigned int chip = counter->chip_index;
unsigned int abz_reg, shift, mask;
if (counter_dev->variant != ni_gpct_variant_m_series)
@@ -1141,9 +1148,9 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index,
if (source > 0x1f)
source = 0x1f; /* Disable gate */
- counter_dev->regs[abz_reg] &= ~mask;
- counter_dev->regs[abz_reg] |= (source << shift) & mask;
- ni_tio_write(counter, counter_dev->regs[abz_reg], abz_reg);
+ counter_dev->regs[chip][abz_reg] &= ~mask;
+ counter_dev->regs[chip][abz_reg] |= (source << shift) & mask;
+ ni_tio_write(counter, counter_dev->regs[chip][abz_reg], abz_reg);
return 0;
}
@@ -1632,6 +1639,7 @@ int ni_tio_insn_read(struct comedi_device *dev,
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int channel = CR_CHAN(insn->chanspec);
unsigned int cidx = counter->counter_index;
+ unsigned int chip = counter->chip_index;
int i;
for (i = 0; i < insn->n; i++) {
@@ -1640,10 +1648,12 @@ int ni_tio_insn_read(struct comedi_device *dev,
data[i] = ni_tio_read_sw_save_reg(dev, s);
break;
case 1:
- data[i] = counter_dev->regs[NITIO_LOADA_REG(cidx)];
+ data[i] =
+ counter_dev->regs[chip][NITIO_LOADA_REG(cidx)];
break;
case 2:
- data[i] = counter_dev->regs[NITIO_LOADB_REG(cidx)];
+ data[i] =
+ counter_dev->regs[chip][NITIO_LOADB_REG(cidx)];
break;
}
}
@@ -1670,6 +1680,7 @@ int ni_tio_insn_write(struct comedi_device *dev,
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int channel = CR_CHAN(insn->chanspec);
unsigned int cidx = counter->counter_index;
+ unsigned int chip = counter->chip_index;
unsigned int load_reg;
if (insn->n < 1)
@@ -1690,14 +1701,15 @@ int ni_tio_insn_write(struct comedi_device *dev,
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, GI_LOAD);
/* restore load reg */
- ni_tio_write(counter, counter_dev->regs[load_reg], load_reg);
+ ni_tio_write(counter, counter_dev->regs[chip][load_reg],
+ load_reg);
break;
case 1:
- counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
+ counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = data[0];
ni_tio_write(counter, data[0], NITIO_LOADA_REG(cidx));
break;
case 2:
- counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
+ counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = data[0];
ni_tio_write(counter, data[0], NITIO_LOADB_REG(cidx));
break;
default:
@@ -1711,11 +1723,12 @@ void ni_tio_init_counter(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int cidx = counter->counter_index;
+ unsigned int chip = counter->chip_index;
ni_tio_reset_count_and_disarm(counter);
/* initialize counter registers */
- counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
+ counter_dev->regs[chip][NITIO_AUTO_INC_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
@@ -1723,10 +1736,10 @@ void ni_tio_init_counter(struct ni_gpct *counter)
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
- counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
+ counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_LOADA_REG(cidx));
- counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
+ counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_LOADB_REG(cidx));
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
@@ -1735,7 +1748,7 @@ void ni_tio_init_counter(struct ni_gpct *counter)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), ~0, 0);
if (ni_tio_has_gate2_registers(counter_dev)) {
- counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
+ counter_dev->regs[chip][NITIO_GATE2_REG(cidx)] = 0x0;
ni_tio_write(counter, 0x0, NITIO_GATE2_REG(cidx));
}
@@ -1776,9 +1789,16 @@ ni_gpct_device_construct(struct comedi_device *dev,
spin_lock_init(&counter_dev->regs_lock);
+ counter_dev->num_counters = num_counters;
+ counter_dev->num_chips = DIV_ROUND_UP(num_counters, counters_per_chip);
+
counter_dev->counters = kcalloc(num_counters, sizeof(*counter),
GFP_KERNEL);
- if (!counter_dev->counters) {
+ counter_dev->regs = kcalloc(counter_dev->num_chips,
+ sizeof(*counter_dev->regs), GFP_KERNEL);
+ if (!counter_dev->regs || !counter_dev->counters) {
+ kfree(counter_dev->regs);
+ kfree(counter_dev->counters);
kfree(counter_dev);
return NULL;
}
@@ -1790,8 +1810,6 @@ ni_gpct_device_construct(struct comedi_device *dev,
counter->counter_index = i % counters_per_chip;
spin_lock_init(&counter->lock);
}
- counter_dev->num_counters = num_counters;
- counter_dev->counters_per_chip = counters_per_chip;
return counter_dev;
}
@@ -1801,6 +1819,7 @@ void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
{
if (!counter_dev)
return;
+ kfree(counter_dev->regs);
kfree(counter_dev->counters);
kfree(counter_dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 1e85d0a53715..e7b05718df9b 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -107,8 +107,8 @@ struct ni_gpct_device {
enum ni_gpct_variant variant;
struct ni_gpct *counters;
unsigned int num_counters;
- unsigned int counters_per_chip;
- unsigned int regs[NITIO_NUM_REGS];
+ unsigned int num_chips;
+ unsigned int (*regs)[NITIO_NUM_REGS]; /* [num_chips][NITIO_NUM_REGS] */
spinlock_t regs_lock; /* protects 'regs' */
const struct ni_route_tables *routing_tables; /* link to routes */
};
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index e18c0723b760..0d54f394dbd2 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -61,7 +61,7 @@
#define USBDUXFASTSUB_CPUCS 0xE600
/*
- * max lenghth of the transfer-buffer for software upload
+ * max length of the transfer-buffer for software upload
*/
#define TB_LEN 0x2000
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 8e8f57c4f029..a913d40f0801 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -27,7 +27,7 @@
#include <linux/usb/gadget.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include "emxx_udc.h"
@@ -2220,7 +2220,7 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
mdelay(VBUS_CHATTERING_MDELAY); /* wait (ms) */
/* VBUS ON Check*/
- reg_dt = gpio_get_value(VBUS_VALUE);
+ reg_dt = gpiod_get_value(vbus_gpio);
if (reg_dt == 0) {
udc->linux_suspended = 0;
@@ -2247,7 +2247,7 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
}
} else {
mdelay(5); /* wait (5ms) */
- reg_dt = gpio_get_value(VBUS_VALUE);
+ reg_dt = gpiod_get_value(vbus_gpio);
if (reg_dt == 0)
return;
@@ -2311,7 +2311,7 @@ static inline void _nbu2ss_int_usb_suspend(struct nbu2ss_udc *udc)
u32 reg_dt;
if (udc->usb_suspended == 0) {
- reg_dt = gpio_get_value(VBUS_VALUE);
+ reg_dt = gpiod_get_value(vbus_gpio);
if (reg_dt == 0)
return;
@@ -2351,7 +2351,7 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
struct fc_regs __iomem *preg = udc->p_regs;
- if (gpio_get_value(VBUS_VALUE) == 0) {
+ if (gpiod_get_value(vbus_gpio) == 0) {
_nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
_nbu2ss_writel(&preg->USB_INT_ENA, 0);
return IRQ_HANDLED;
@@ -2360,7 +2360,7 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
spin_lock(&udc->lock);
for (;;) {
- if (gpio_get_value(VBUS_VALUE) == 0) {
+ if (gpiod_get_value(vbus_gpio) == 0) {
_nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
_nbu2ss_writel(&preg->USB_INT_ENA, 0);
status = 0;
@@ -2750,7 +2750,7 @@ static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
preg = udc->p_regs;
- data = gpio_get_value(VBUS_VALUE);
+ data = gpiod_get_value(vbus_gpio);
if (data == 0)
return -EINVAL;
@@ -2790,7 +2790,7 @@ static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
return;
}
- data = gpio_get_value(VBUS_VALUE);
+ data = gpiod_get_value(vbus_gpio);
if (data == 0)
return;
@@ -2832,7 +2832,7 @@ static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
}
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
- data = gpio_get_value(VBUS_VALUE);
+ data = gpiod_get_value(vbus_gpio);
if (data == 0)
return -EINVAL;
@@ -2854,7 +2854,7 @@ static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget)
udc = container_of(pgadget, struct nbu2ss_udc, gadget);
- data = gpio_get_value(VBUS_VALUE);
+ data = gpiod_get_value(vbus_gpio);
if (data == 0) {
dev_warn(&pgadget->dev, "VBUS LEVEL = %d\n", data);
return -EINVAL;
@@ -3119,12 +3119,13 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
}
/* VBUS Interrupt */
- irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH);
- status = request_irq(INT_VBUS,
+ vbus_irq = gpiod_to_irq(vbus_gpio);
+ irq_set_irq_type(vbus_irq, IRQ_TYPE_EDGE_BOTH);
+ status = request_irq(vbus_irq,
_nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
if (status != 0) {
- dev_err(udc->dev, "request_irq(INT_VBUS) failed\n");
+ dev_err(udc->dev, "request_irq(vbus_irq) failed\n");
return status;
}
@@ -3160,7 +3161,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
}
/* Interrupt Handler - Release */
- free_irq(INT_VBUS, udc);
+ free_irq(vbus_irq, udc);
return 0;
}
@@ -3201,7 +3202,7 @@ static int nbu2ss_drv_resume(struct platform_device *pdev)
if (!udc)
return 0;
- data = gpio_get_value(VBUS_VALUE);
+ data = gpiod_get_value(vbus_gpio);
if (data) {
udc->vbus_active = 1;
udc->devstate = USB_STATE_POWERED;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index e28a74da9633..b8c3dee5626c 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -30,6 +30,8 @@
/* below hacked up for staging integration */
#define GPIO_VBUS 0 /* GPIO_P153 on KZM9D */
#define INT_VBUS 0 /* IRQ for GPIO_P153 */
+struct gpio_desc *vbus_gpio;
+int vbus_irq;
/*------------ Board dependence(Wait) */
diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt
new file mode 100644
index 000000000000..961ec4da7705
--- /dev/null
+++ b/drivers/staging/erofs/Documentation/filesystems/erofs.txt
@@ -0,0 +1,208 @@
+Overview
+========
+
+EROFS file-system stands for Enhanced Read-Only File System. Different
+from other read-only file systems, it aims to be designed for flexibility,
+scalability, but be kept simple and high performance.
+
+It is designed as a better filesystem solution for the following scenarios:
+ - read-only storage media or
+
+ - part of a fully trusted read-only solution, which means it needs to be
+ immutable and bit-for-bit identical to the official golden image for
+ their releases due to security and other considerations and
+
+ - hope to save some extra storage space with guaranteed end-to-end performance
+ by using reduced metadata and transparent file compression, especially
+ for those embedded devices with limited memory (ex, smartphone);
+
+Here is the main features of EROFS:
+ - Little endian on-disk design;
+
+ - Currently 4KB block size (nobh) and therefore maximum 16TB address space;
+
+ - Metadata & data could be mixed by design;
+
+ - 2 inode versions for different requirements:
+ v1 v2
+ Inode metadata size: 32 bytes 64 bytes
+ Max file size: 4 GB 16 EB (also limited by max. vol size)
+ Max uids/gids: 65536 4294967296
+ File creation time: no yes (64 + 32-bit timestamp)
+ Max hardlinks: 65536 4294967296
+ Metadata reserved: 4 bytes 14 bytes
+
+ - Support extended attributes (xattrs) as an option;
+
+ - Support xattr inline and tail-end data inline for all files;
+
+ - Support POSIX.1e ACLs by using xattrs;
+
+ - Support transparent file compression as an option:
+ LZ4 algorithm with 4 KB fixed-output compression for high performance;
+
+The following git tree provides the file system user-space tools under
+development (ex, formatting tool mkfs.erofs):
+>> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
+
+Bugs and patches are welcome, please kindly help us and send to the following
+linux-erofs mailing list:
+>> linux-erofs mailing list <linux-erofs@lists.ozlabs.org>
+
+Note that EROFS is still working in progress as a Linux staging driver,
+Cc the staging mailing list as well is highly recommended:
+>> Linux Driver Project Developer List <devel@driverdev.osuosl.org>
+
+Mount options
+=============
+
+fault_injection=%d Enable fault injection in all supported types with
+ specified injection rate. Supported injection type:
+ Type_Name Type_Value
+ FAULT_KMALLOC 0x000000001
+(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled
+ by default if CONFIG_EROFS_FS_XATTR is selected.
+(no)acl Setup POSIX Access Control List. Note: acl is enabled
+ by default if CONFIG_EROFS_FS_POSIX_ACL is selected.
+
+On-disk details
+===============
+
+Summary
+-------
+Different from other read-only file systems, an EROFS volume is designed
+to be as simple as possible:
+
+ |-> aligned with the block size
+ ____________________________________________________________
+ | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data |
+ |_|__|_|_____|__________|_____|______|__________|_____|______|
+ 0 +1K
+
+All data areas should be aligned with the block size, but metadata areas
+may not. All metadatas can be now observed in two different spaces (views):
+ 1. Inode metadata space
+ Each valid inode should be aligned with an inode slot, which is a fixed
+ value (32 bytes) and designed to be kept in line with v1 inode size.
+
+ Each inode can be directly found with the following formula:
+ inode offset = meta_blkaddr * block_size + 32 * nid
+
+ |-> aligned with 8B
+ |-> followed closely
+ + meta_blkaddr blocks |-> another slot
+ _____________________________________________________________________
+ | ... | inode | xattrs | extents | data inline | ... | inode ...
+ |________|_______|(optional)|(optional)|__(optional)_|_____|__________
+ |-> aligned with the inode slot size
+ . .
+ . .
+ . .
+ . .
+ . .
+ . .
+ .____________________________________________________|-> aligned with 4B
+ | xattr_ibody_header | shared xattrs | inline xattrs |
+ |____________________|_______________|_______________|
+ |-> 12 bytes <-|->x * 4 bytes<-| .
+ . . .
+ . . .
+ . . .
+ ._______________________________.______________________.
+ | id | id | id | id | ... | id | ent | ... | ent| ... |
+ |____|____|____|____|______|____|_____|_____|____|_____|
+ |-> aligned with 4B
+ |-> aligned with 4B
+
+ Inode could be 32 or 64 bytes, which can be distinguished from a common
+ field which all inode versions have -- i_advise:
+
+ __________________ __________________
+ | i_advise | | i_advise |
+ |__________________| |__________________|
+ | ... | | ... |
+ | | | |
+ |__________________| 32 bytes | |
+ | |
+ |__________________| 64 bytes
+
+ Xattrs, extents, data inline are followed by the corresponding inode with
+ proper alignes, and they could be optional for different data mappings,
+ _currently_ there are totally 3 valid data mappings supported:
+
+ 1) flat file data without data inline (no extent);
+ 2) fixed-output size data compression (must have extents);
+ 3) flat file data with tail-end data inline (no extent);
+
+ The size of the optional xattrs is indicated by i_xattr_count in inode
+ header. Large xattrs or xattrs shared by many different files can be
+ stored in shared xattrs metadata rather than inlined right after inode.
+
+ 2. Shared xattrs metadata space
+ Shared xattrs space is similar to the above inode space, started with
+ a specific block indicated by xattr_blkaddr, organized one by one with
+ proper align.
+
+ Each share xattr can also be directly found by the following formula:
+ xattr offset = xattr_blkaddr * block_size + 4 * xattr_id
+
+ |-> aligned by 4 bytes
+ + xattr_blkaddr blocks |-> aligned with 4 bytes
+ _________________________________________________________________________
+ | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ...
+ |________|_____________|_____________|_____|______________|_______________
+
+Directories
+-----------
+All directories are now organized in a compact on-disk format. Note that
+each directory block is divided into index and name areas in order to support
+random file lookup, and all directory entries are _strictly_ recorded in
+alphabetical order in order to support improved prefix binary search
+algorithm (could refer to the related source code).
+
+ ___________________________
+ / |
+ / ______________|________________
+ / / | nameoff1 | nameoffN-1
+ ____________.______________._______________v________________v__________
+| dirent | dirent | ... | dirent | filename | filename | ... | filename |
+|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
+ \ ^
+ \ | * could have
+ \ | trailing '\0'
+ \________________________| nameoff0
+
+ Directory block
+
+Note that apart from the offset of the first filename, nameoff0 also indicates
+the total number of directory entries in this block since it is no need to
+introduce another on-disk field at all.
+
+Compression
+-----------
+Currently, EROFS supports 4KB fixed-output clustersize transparent file
+compression, as illustrated below:
+
+ |---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
+ clusterofs clusterofs clusterofs
+ | | | logical data
+_________v_______________________________v_____________________v_______________
+... | . | | . | | . | ...
+____|____.________|_____________|________.____|_____________|__.__________|____
+ |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|
+ size size size size size
+ . . . .
+ . . . .
+ . . . .
+ _______._____________._____________._____________._____________________
+ ... | | | | ... physical data
+ _______|_____________|_____________|_____________|_____________________
+ |-> cluster <-|-> cluster <-|-> cluster <-|
+ size size size
+
+Currently each on-disk physical cluster can contain 4KB (un)compressed data
+at most. For each logical cluster, there is a corresponding on-disk index to
+describe its cluster type, physical cluster address, etc.
+
+See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
+
diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile
index c91b65223f99..38ab344a285e 100644
--- a/drivers/staging/erofs/Makefile
+++ b/drivers/staging/erofs/Makefile
@@ -6,7 +6,7 @@ ccflags-y += -Wall -DEROFS_VERSION=\"$(EROFS_VERSION)\"
obj-$(CONFIG_EROFS_FS) += erofs.o
# staging requirement: to be self-contained in its own directory
-ccflags-y += -I$(src)/include
+ccflags-y += -I $(srctree)/$(src)/include
erofs-objs := super.o inode.o data.o namei.o dir.o utils.o
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
erofs-$(CONFIG_EROFS_FS_ZIP) += unzip_vle.o unzip_vle_lz4.o
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index 5a55f0bfdfbb..9c471f08ffd4 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -165,43 +165,16 @@ err_out:
return err;
}
-#ifdef CONFIG_EROFS_FS_ZIP
-extern int z_erofs_map_blocks_iter(struct inode *,
- struct erofs_map_blocks *,
- struct page **, int);
-#endif
-
-int erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- struct page **mpage_ret, int flags)
-{
- /* by default, reading raw data never use erofs_map_blocks_iter */
- if (unlikely(!is_inode_layout_compression(inode))) {
- if (*mpage_ret)
- put_page(*mpage_ret);
- *mpage_ret = NULL;
-
- return erofs_map_blocks(inode, map, flags);
- }
-
-#ifdef CONFIG_EROFS_FS_ZIP
- return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
-#else
- /* data compression is not available */
- return -ENOTSUPP;
-#endif
-}
-
int erofs_map_blocks(struct inode *inode,
struct erofs_map_blocks *map, int flags)
{
if (unlikely(is_inode_layout_compression(inode))) {
- struct page *mpage = NULL;
- int err;
+ int err = z_erofs_map_blocks_iter(inode, map, flags);
- err = erofs_map_blocks_iter(inode, map, &mpage, flags);
- if (mpage)
- put_page(mpage);
+ if (map->mpage) {
+ put_page(map->mpage);
+ map->mpage = NULL;
+ }
return err;
}
return erofs_map_blocks_flatmode(inode, map, flags);
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 833f052f79d0..829f7b12e0dc 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -24,8 +24,8 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
};
static int erofs_fill_dentries(struct dir_context *ctx,
- void *dentry_blk, unsigned int *ofs,
- unsigned int nameoff, unsigned int maxsize)
+ void *dentry_blk, unsigned int *ofs,
+ unsigned int nameoff, unsigned int maxsize)
{
struct erofs_dirent *de = dentry_blk;
const struct erofs_dirent *end = dentry_blk + nameoff;
@@ -98,15 +98,14 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
if (IS_ERR(dentry_page))
continue;
- lock_page(dentry_page);
de = (struct erofs_dirent *)kmap(dentry_page);
nameoff = le16_to_cpu(de->nameoff);
if (unlikely(nameoff < sizeof(struct erofs_dirent) ||
- nameoff >= PAGE_SIZE)) {
+ nameoff >= PAGE_SIZE)) {
errln("%s, invalid de[0].nameoff %u",
- __func__, nameoff);
+ __func__, nameoff);
err = -EIO;
goto skip_this;
@@ -128,7 +127,6 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
skip_this:
kunmap(dentry_page);
- unlock_page(dentry_page);
put_page(dentry_page);
ctx->pos = blknr_to_addr(i) + ofs;
@@ -144,6 +142,6 @@ skip_this:
const struct file_operations erofs_dir_fops = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = erofs_readdir,
+ .iterate_shared = erofs_readdir,
};
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index d7fbf5f4600f..924b8dfc7a8f 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -184,32 +184,18 @@ static int fill_inode(struct inode *inode, int isdir)
if (!err) {
/* setup the new inode */
if (S_ISREG(inode->i_mode)) {
-#ifdef CONFIG_EROFS_FS_XATTR
- if (vi->xattr_isize)
- inode->i_op = &erofs_generic_xattr_iops;
-#endif
+ inode->i_op = &erofs_generic_iops;
inode->i_fop = &generic_ro_fops;
} else if (S_ISDIR(inode->i_mode)) {
- inode->i_op =
-#ifdef CONFIG_EROFS_FS_XATTR
- vi->xattr_isize ? &erofs_dir_xattr_iops :
-#endif
- &erofs_dir_iops;
+ inode->i_op = &erofs_dir_iops;
inode->i_fop = &erofs_dir_fops;
} else if (S_ISLNK(inode->i_mode)) {
/* by default, page_get_link is used for symlink */
- inode->i_op =
-#ifdef CONFIG_EROFS_FS_XATTR
- &erofs_symlink_xattr_iops,
-#else
- &page_symlink_inode_operations;
-#endif
+ inode->i_op = &erofs_symlink_iops;
inode_nohighmem(inode);
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
-#ifdef CONFIG_EROFS_FS_XATTR
- inode->i_op = &erofs_special_inode_operations;
-#endif
+ inode->i_op = &erofs_generic_iops;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
} else {
err = -EIO;
@@ -297,23 +283,26 @@ struct inode *erofs_iget(struct super_block *sb,
return inode;
}
+const struct inode_operations erofs_generic_iops = {
#ifdef CONFIG_EROFS_FS_XATTR
-const struct inode_operations erofs_generic_xattr_iops = {
.listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
};
-const struct inode_operations erofs_symlink_xattr_iops = {
+const struct inode_operations erofs_symlink_iops = {
.get_link = page_get_link,
+#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
};
-const struct inode_operations erofs_special_inode_operations = {
- .listxattr = erofs_listxattr,
-};
-
-const struct inode_operations erofs_fast_symlink_xattr_iops = {
+const struct inode_operations erofs_fast_symlink_iops = {
.get_link = simple_get_link,
+#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
-};
#endif
+ .get_acl = erofs_get_acl,
+};
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index e049d00c087a..e3bfde00c7d2 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -252,47 +252,20 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
}
#endif
-static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
-{
- int o;
-
-repeat:
- o = erofs_wait_on_workgroup_freezed(grp);
-
- if (unlikely(o <= 0))
- return -1;
-
- if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
- goto repeat;
-
- *ocnt = o;
- return 0;
-}
-
-#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
-#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
-
-extern int erofs_workgroup_put(struct erofs_workgroup *grp);
-
-extern struct erofs_workgroup *erofs_find_workgroup(
- struct super_block *sb, pgoff_t index, bool *tag);
-
-extern int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp, bool tag);
-
-extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
- unsigned long nr_shrink, bool cleanup);
-
-static inline void erofs_workstation_cleanup_all(struct super_block *sb)
-{
- erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
-}
+int erofs_workgroup_put(struct erofs_workgroup *grp);
+struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+ pgoff_t index, bool *tag);
+int erofs_register_workgroup(struct super_block *sb,
+ struct erofs_workgroup *grp, bool tag);
+unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
+ unsigned long nr_shrink, bool cleanup);
+void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
#ifdef EROFS_FS_HAS_MANAGED_CACHE
-extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *egrp);
-extern int erofs_try_to_free_cached_page(struct address_space *mapping,
- struct page *page);
+int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp);
+int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page);
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
#else
@@ -354,12 +327,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
}
-#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
-#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
+/* atomic flag definitions */
+#define EROFS_V_EA_INITED_BIT 0
+
+/* bitlock definitions (arranged in reverse order) */
+#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
struct erofs_vnode {
erofs_nid_t nid;
- unsigned int flags;
+
+ /* atomic flags (including bitlocks) */
+ unsigned long flags;
unsigned char data_mapping_mode;
/* inline size in bytes */
@@ -412,8 +390,6 @@ static inline bool is_inode_layout_inline(struct inode *inode)
}
extern const struct super_operations erofs_sops;
-extern const struct inode_operations erofs_dir_iops;
-extern const struct file_operations erofs_dir_fops;
extern const struct address_space_operations erofs_raw_access_aops;
#ifdef CONFIG_EROFS_FS_ZIP
@@ -461,11 +437,26 @@ struct erofs_map_blocks {
u64 m_plen, m_llen;
unsigned int m_flags;
+
+ struct page *mpage;
};
/* Flags used by erofs_map_blocks() */
#define EROFS_GET_BLOCKS_RAW 0x0001
+#ifdef CONFIG_EROFS_FS_ZIP
+int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags);
+#else
+static inline int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ return -ENOTSUPP;
+}
+#endif
+
/* data.c */
static inline struct bio *
erofs_grab_bio(struct super_block *sb,
@@ -506,8 +497,8 @@ static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
#define EROFS_IO_MAX_RETRIES_NOFAIL CONFIG_EROFS_FS_IO_MAX_RETRIES
#endif
-extern struct page *__erofs_get_meta_page(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio, bool nofail);
+struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr,
+ bool prio, bool nofail);
static inline struct page *erofs_get_meta_page(struct super_block *sb,
erofs_blk_t blkaddr, bool prio)
@@ -521,15 +512,7 @@ static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
return __erofs_get_meta_page(sb, blkaddr, prio, true);
}
-extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
-extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
- struct page **, int);
-
-struct erofs_map_blocks_iter {
- struct erofs_map_blocks map;
- struct page *mpage;
-};
-
+int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
static inline struct page *
erofs_get_inline_page(struct inode *inode,
@@ -549,41 +532,31 @@ static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
#endif
}
-extern struct inode *erofs_iget(struct super_block *sb,
- erofs_nid_t nid, bool dir);
-
-/* dir.c */
-int erofs_namei(struct inode *dir, struct qstr *name,
- erofs_nid_t *nid, unsigned *d_type);
-
-#ifdef CONFIG_EROFS_FS_XATTR
-/* xattr.c */
-extern const struct xattr_handler *erofs_xattr_handlers[];
-
-/* symlink and special inode */
-extern const struct inode_operations erofs_symlink_xattr_iops;
-extern const struct inode_operations erofs_fast_symlink_xattr_iops;
-extern const struct inode_operations erofs_special_inode_operations;
-#endif
+extern const struct inode_operations erofs_generic_iops;
+extern const struct inode_operations erofs_symlink_iops;
+extern const struct inode_operations erofs_fast_symlink_iops;
static inline void set_inode_fast_symlink(struct inode *inode)
{
-#ifdef CONFIG_EROFS_FS_XATTR
- inode->i_op = &erofs_fast_symlink_xattr_iops;
-#else
- inode->i_op = &simple_symlink_inode_operations;
-#endif
+ inode->i_op = &erofs_fast_symlink_iops;
}
static inline bool is_inode_fast_symlink(struct inode *inode)
{
-#ifdef CONFIG_EROFS_FS_XATTR
- return inode->i_op == &erofs_fast_symlink_xattr_iops;
-#else
- return inode->i_op == &simple_symlink_inode_operations;
-#endif
+ return inode->i_op == &erofs_fast_symlink_iops;
}
+struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
+
+/* namei.c */
+extern const struct inode_operations erofs_dir_iops;
+
+int erofs_namei(struct inode *dir, struct qstr *name,
+ erofs_nid_t *nid, unsigned int *d_type);
+
+/* dir.c */
+extern const struct file_operations erofs_dir_fops;
+
static inline void *erofs_vmap(struct page **pages, unsigned int count)
{
#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
@@ -612,15 +585,11 @@ static inline void erofs_vunmap(const void *mem, unsigned int count)
}
/* utils.c */
-extern struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
-
-extern void erofs_register_super(struct super_block *sb);
-extern void erofs_unregister_super(struct super_block *sb);
+extern struct shrinker erofs_shrinker_info;
-extern unsigned long erofs_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc);
-extern unsigned long erofs_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc);
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
+void erofs_register_super(struct super_block *sb);
+void erofs_unregister_super(struct super_block *sb);
#ifndef lru_to_page
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 5596c52e246d..3f4fa52c10fa 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -15,74 +15,77 @@
#include <trace/events/erofs.h>
-/* based on the value of qn->len is accurate */
-static inline int dirnamecmp(struct qstr *qn,
- struct qstr *qd, unsigned int *matched)
+struct erofs_qstr {
+ const unsigned char *name;
+ const unsigned char *end;
+};
+
+/* based on the end of qn is accurate and it must have the trailing '\0' */
+static inline int dirnamecmp(const struct erofs_qstr *qn,
+ const struct erofs_qstr *qd,
+ unsigned int *matched)
{
- unsigned int i = *matched, len = min(qn->len, qd->len);
-loop:
- if (unlikely(i >= len)) {
- *matched = i;
- if (qn->len < qd->len) {
- /*
- * actually (qn->len == qd->len)
- * when qd->name[i] == '\0'
- */
- return qd->name[i] == '\0' ? 0 : -1;
+ unsigned int i = *matched;
+
+ /*
+ * on-disk error, let's only BUG_ON in the debugging mode.
+ * otherwise, it will return 1 to just skip the invalid name
+ * and go on (in consideration of the lookup performance).
+ */
+ DBG_BUGON(qd->name > qd->end);
+
+ /* qd could not have trailing '\0' */
+ /* However it is absolutely safe if < qd->end */
+ while (qd->name + i < qd->end && qd->name[i] != '\0') {
+ if (qn->name[i] != qd->name[i]) {
+ *matched = i;
+ return qn->name[i] > qd->name[i] ? 1 : -1;
}
- return (qn->len > qd->len);
- }
-
- if (qn->name[i] != qd->name[i]) {
- *matched = i;
- return qn->name[i] > qd->name[i] ? 1 : -1;
+ ++i;
}
-
- ++i;
- goto loop;
+ *matched = i;
+ /* See comments in __d_alloc on the terminating NUL character */
+ return qn->name[i] == '\0' ? 0 : 1;
}
-static struct erofs_dirent *find_target_dirent(
- struct qstr *name,
- u8 *data, int maxsize)
+#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
+
+static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
+ u8 *data,
+ unsigned int dirblksize,
+ const int ndirents)
{
- unsigned int ndirents, head, back;
+ int head, back;
unsigned int startprfx, endprfx;
struct erofs_dirent *const de = (struct erofs_dirent *)data;
- /* make sure that maxsize is valid */
- BUG_ON(maxsize < sizeof(struct erofs_dirent));
-
- ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
-
- /* corrupted dir (may be unnecessary...) */
- BUG_ON(!ndirents);
-
- head = 0;
+ /* since the 1st dirent has been evaluated previously */
+ head = 1;
back = ndirents - 1;
startprfx = endprfx = 0;
while (head <= back) {
- unsigned int mid = head + (back - head) / 2;
- unsigned int nameoff = le16_to_cpu(de[mid].nameoff);
+ const int mid = head + (back - head) / 2;
+ const int nameoff = nameoff_from_disk(de[mid].nameoff,
+ dirblksize);
unsigned int matched = min(startprfx, endprfx);
-
- struct qstr dname = QSTR_INIT(data + nameoff,
- unlikely(mid >= ndirents - 1) ?
- maxsize - nameoff :
- le16_to_cpu(de[mid + 1].nameoff) - nameoff);
+ struct erofs_qstr dname = {
+ .name = data + nameoff,
+ .end = unlikely(mid >= ndirents - 1) ?
+ data + dirblksize :
+ data + nameoff_from_disk(de[mid + 1].nameoff,
+ dirblksize)
+ };
/* string comparison without already matched prefix */
int ret = dirnamecmp(name, &dname, &matched);
- if (unlikely(!ret))
+ if (unlikely(!ret)) {
return de + mid;
- else if (ret > 0) {
+ } else if (ret > 0) {
head = mid + 1;
startprfx = matched;
- } else if (unlikely(mid < 1)) /* fix "mid" overflow */
- break;
- else {
+ } else {
back = mid - 1;
endprfx = matched;
}
@@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent(
return ERR_PTR(-ENOENT);
}
-static struct page *find_target_block_classic(
- struct inode *dir,
- struct qstr *name, int *_diff)
+static struct page *find_target_block_classic(struct inode *dir,
+ struct erofs_qstr *name,
+ int *_ndirents)
{
unsigned int startprfx, endprfx;
- unsigned int head, back;
+ int head, back;
struct address_space *const mapping = dir->i_mapping;
struct page *candidate = ERR_PTR(-ENOENT);
@@ -105,89 +108,97 @@ static struct page *find_target_block_classic(
back = inode_datablocks(dir) - 1;
while (head <= back) {
- unsigned int mid = head + (back - head) / 2;
+ const int mid = head + (back - head) / 2;
struct page *page = read_mapping_page(mapping, mid, NULL);
- if (IS_ERR(page)) {
-exact_out:
- if (!IS_ERR(candidate)) /* valid candidate */
- put_page(candidate);
- return page;
- } else {
- int diff;
- unsigned int ndirents, matched;
- struct qstr dname;
+ if (!IS_ERR(page)) {
struct erofs_dirent *de = kmap_atomic(page);
- unsigned int nameoff = le16_to_cpu(de->nameoff);
-
- ndirents = nameoff / sizeof(*de);
+ const int nameoff = nameoff_from_disk(de->nameoff,
+ EROFS_BLKSIZ);
+ const int ndirents = nameoff / sizeof(*de);
+ int diff;
+ unsigned int matched;
+ struct erofs_qstr dname;
- /* corrupted dir (should have one entry at least) */
- BUG_ON(!ndirents || nameoff > PAGE_SIZE);
+ if (unlikely(!ndirents)) {
+ DBG_BUGON(1);
+ kunmap_atomic(de);
+ put_page(page);
+ page = ERR_PTR(-EIO);
+ goto out;
+ }
matched = min(startprfx, endprfx);
dname.name = (u8 *)de + nameoff;
- dname.len = ndirents == 1 ?
- /* since the rest of the last page is 0 */
- EROFS_BLKSIZ - nameoff
- : le16_to_cpu(de[1].nameoff) - nameoff;
+ if (ndirents == 1)
+ dname.end = (u8 *)de + EROFS_BLKSIZ;
+ else
+ dname.end = (u8 *)de +
+ nameoff_from_disk(de[1].nameoff,
+ EROFS_BLKSIZ);
/* string comparison without already matched prefix */
diff = dirnamecmp(name, &dname, &matched);
kunmap_atomic(de);
if (unlikely(!diff)) {
- *_diff = 0;
- goto exact_out;
+ *_ndirents = 0;
+ goto out;
} else if (diff > 0) {
head = mid + 1;
startprfx = matched;
- if (likely(!IS_ERR(candidate)))
+ if (!IS_ERR(candidate))
put_page(candidate);
candidate = page;
+ *_ndirents = ndirents;
} else {
put_page(page);
- if (unlikely(mid < 1)) /* fix "mid" overflow */
- break;
-
back = mid - 1;
endprfx = matched;
}
+ continue;
}
+out: /* free if the candidate is valid */
+ if (!IS_ERR(candidate))
+ put_page(candidate);
+ return page;
}
- *_diff = 1;
return candidate;
}
int erofs_namei(struct inode *dir,
- struct qstr *name,
- erofs_nid_t *nid, unsigned int *d_type)
+ struct qstr *name,
+ erofs_nid_t *nid, unsigned int *d_type)
{
- int diff;
+ int ndirents;
struct page *page;
- u8 *data;
+ void *data;
struct erofs_dirent *de;
+ struct erofs_qstr qn;
if (unlikely(!dir->i_size))
return -ENOENT;
- diff = 1;
- page = find_target_block_classic(dir, name, &diff);
+ qn.name = name->name;
+ qn.end = name->name + name->len;
+
+ ndirents = 0;
+ page = find_target_block_classic(dir, &qn, &ndirents);
- if (unlikely(IS_ERR(page)))
+ if (IS_ERR(page))
return PTR_ERR(page);
data = kmap_atomic(page);
/* the target page has been mapped */
- de = likely(diff) ?
- /* since the rest of the last page is 0 */
- find_target_dirent(name, data, EROFS_BLKSIZ) :
- (struct erofs_dirent *)data;
+ if (ndirents)
+ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
+ else
+ de = (struct erofs_dirent *)data;
- if (likely(!IS_ERR(de))) {
+ if (!IS_ERR(de)) {
*nid = le64_to_cpu(de->nid);
*d_type = de->file_type;
}
@@ -235,12 +246,9 @@ static struct dentry *erofs_lookup(struct inode *dir,
const struct inode_operations erofs_dir_iops = {
.lookup = erofs_lookup,
-};
-
-const struct inode_operations erofs_dir_xattr_iops = {
- .lookup = erofs_lookup,
#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
#endif
+ .get_acl = erofs_get_acl,
};
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 1c2eb69682ef..15c784fba879 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -16,6 +16,7 @@
#include <linux/parser.h>
#include <linux/seq_file.h>
#include "internal.h"
+#include "xattr.h"
#define CREATE_TRACE_POINTS
#include <trace/events/erofs.h>
@@ -397,6 +398,11 @@ static int erofs_read_super(struct super_block *sb,
if (!silent)
infoln("root inode @ nid %llu", ROOT_NID(sbi));
+ if (test_opt(sbi, POSIX_ACL))
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+
#ifdef CONFIG_EROFS_FS_ZIP
INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
#endif
@@ -420,13 +426,14 @@ static int erofs_read_super(struct super_block *sb,
errln("rootino(nid %llu) is not a directory(i_mode %o)",
ROOT_NID(sbi), inode->i_mode);
err = -EINVAL;
- goto err_isdir;
+ iput(inode);
+ goto err_iget;
}
sb->s_root = d_make_root(inode);
if (sb->s_root == NULL) {
err = -ENOMEM;
- goto err_makeroot;
+ goto err_iget;
}
/* save the device name to sbi */
@@ -452,10 +459,6 @@ static int erofs_read_super(struct super_block *sb,
*/
err_devname:
dput(sb->s_root);
-err_makeroot:
-err_isdir:
- if (sb->s_root == NULL)
- iput(inode);
err_iget:
#ifdef EROFS_FS_HAS_MANAGED_CACHE
iput(sbi->managed_cache);
@@ -493,7 +496,8 @@ static void erofs_put_super(struct super_block *sb)
mutex_lock(&sbi->umount_mutex);
#ifdef CONFIG_EROFS_FS_ZIP
- erofs_workstation_cleanup_all(sb);
+ /* clean up the compression space of this sb */
+ erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
#endif
erofs_unregister_super(sb);
@@ -537,12 +541,6 @@ static void erofs_kill_sb(struct super_block *sb)
kill_block_super(sb);
}
-static struct shrinker erofs_shrinker_info = {
- .scan_objects = erofs_shrink_scan,
- .count_objects = erofs_shrink_count,
- .seeks = DEFAULT_SEEKS,
-};
-
static struct file_system_type erofs_fs_type = {
.owner = THIS_MODULE,
.name = "erofs",
@@ -653,6 +651,11 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
if (err)
goto out;
+ if (test_opt(sbi, POSIX_ACL))
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+
*flags |= SB_RDONLY;
return 0;
out:
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 4ac1099a39c6..02f34a83147d 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -107,15 +107,30 @@ enum z_erofs_vle_work_role {
Z_EROFS_VLE_WORK_SECONDARY,
Z_EROFS_VLE_WORK_PRIMARY,
/*
- * The current work has at least been linked with the following
- * processed chained works, which means if the processing page
- * is the tail partial page of the work, the current work can
- * safely use the whole page, as illustrated below:
- * +--------------+-------------------------------------------+
- * | tail page | head page (of the previous work) |
- * +--------------+-------------------------------------------+
- * /\ which belongs to the current work
- * [ (*) this page can be used for the current work itself. ]
+ * The current work was the tail of an exist chain, and the previous
+ * processed chained works are all decided to be hooked up to it.
+ * A new chain should be created for the remaining unprocessed works,
+ * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+ * the next work cannot reuse the whole page in the following scenario:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (belongs to the next work) | (belongs to the current work) |
+ * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+ */
+ Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
+ /*
+ * The current work has been linked with the processed chained works,
+ * and could be also linked with the potential remaining works, which
+ * means if the processing page is the tail partial page of the work,
+ * the current work can safely use the whole page (since the next work
+ * is under control) for in-place decompression, as illustrated below:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (of the current work) | (of the previous work) |
+ * | PRIMARY_FOLLOWED or | |
+ * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
+ *
+ * [ (*) the above page can be used for the current work itself. ]
*/
Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
Z_EROFS_VLE_WORK_MAX
@@ -238,14 +253,9 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
{
struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
const unsigned int clusterpages = erofs_clusterpages(sbi);
-
- struct z_erofs_vle_workgroup *grp;
+ struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
int ret = 0; /* 0 - busy */
- /* prevent the workgroup from being freed */
- rcu_read_lock();
- grp = (void *)page_private(page);
-
if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
unsigned int i;
@@ -257,12 +267,11 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
}
}
erofs_workgroup_unfreeze(&grp->obj, 1);
- }
- rcu_read_unlock();
- if (ret) {
- ClearPagePrivate(page);
- put_page(page);
+ if (ret) {
+ ClearPagePrivate(page);
+ put_page(page);
+ }
}
return ret;
}
@@ -315,10 +324,10 @@ static int z_erofs_vle_work_add_page(
return ret ? 0 : -EAGAIN;
}
-static inline bool try_to_claim_workgroup(
- struct z_erofs_vle_workgroup *grp,
- z_erofs_vle_owned_workgrp_t *owned_head,
- bool *hosted)
+static enum z_erofs_vle_work_role
+try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
+ z_erofs_vle_owned_workgrp_t *owned_head,
+ bool *hosted)
{
DBG_BUGON(*hosted == true);
@@ -332,6 +341,9 @@ retry:
*owned_head = &grp->next;
*hosted = true;
+ /* lucky, I am the followee :) */
+ return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+
} else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
/*
* type 2, link to the end of a existing open chain,
@@ -341,12 +353,11 @@ retry:
if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
*owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
goto retry;
-
*owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
- } else
- return false; /* :( better luck next time */
+ return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
+ }
- return true; /* lucky, I am the followee :) */
+ return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
}
struct z_erofs_vle_work_finder {
@@ -424,12 +435,9 @@ z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
*f->hosted = false;
if (!primary)
*f->role = Z_EROFS_VLE_WORK_SECONDARY;
- /* claim the workgroup if possible */
- else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
- *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
- else
- *f->role = Z_EROFS_VLE_WORK_PRIMARY;
-
+ else /* claim the workgroup if possible */
+ *f->role = try_to_claim_workgroup(grp, f->owned_head,
+ f->hosted);
return work;
}
@@ -493,6 +501,9 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
return work;
}
+#define builder_is_hooked(builder) \
+ ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
+
#define builder_is_followed(builder) \
((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
@@ -539,7 +550,7 @@ repeat:
if (unlikely(work == ERR_PTR(-EAGAIN)))
goto repeat;
- if (unlikely(IS_ERR(work)))
+ if (IS_ERR(work))
return PTR_ERR(work);
got_it:
z_erofs_pagevec_ctor_init(&builder->vector,
@@ -589,7 +600,7 @@ static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
erofs_workgroup_put(&grp->obj);
}
-void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
+static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
{
struct z_erofs_vle_workgroup *grp =
z_erofs_vle_work_workgroup(work, true);
@@ -636,7 +647,7 @@ struct z_erofs_vle_frontend {
struct inode *const inode;
struct z_erofs_vle_work_builder builder;
- struct erofs_map_blocks_iter m_iter;
+ struct erofs_map_blocks map;
z_erofs_vle_owned_workgrp_t owned_head;
@@ -647,8 +658,9 @@ struct z_erofs_vle_frontend {
#define VLE_FRONTEND_INIT(__i) { \
.inode = __i, \
- .m_iter = { \
- { .m_llen = 0, .m_plen = 0 }, \
+ .map = { \
+ .m_llen = 0, \
+ .m_plen = 0, \
.mpage = NULL \
}, \
.builder = VLE_WORK_BUILDER_INIT(), \
@@ -681,12 +693,11 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
{
struct super_block *const sb = fe->inode->i_sb;
struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
- struct erofs_map_blocks_iter *const m = &fe->m_iter;
- struct erofs_map_blocks *const map = &m->map;
+ struct erofs_map_blocks *const map = &fe->map;
struct z_erofs_vle_work_builder *const builder = &fe->builder;
const loff_t offset = page_offset(page);
- bool tight = builder_is_followed(builder);
+ bool tight = builder_is_hooked(builder);
struct z_erofs_vle_work *work = builder->work;
enum z_erofs_cache_alloctype cache_strategy;
@@ -704,8 +715,12 @@ repeat:
/* lucky, within the range of the current map_blocks */
if (offset + cur >= map->m_la &&
- offset + cur < map->m_la + map->m_llen)
+ offset + cur < map->m_la + map->m_llen) {
+ /* didn't get a valid unzip work previously (very rare) */
+ if (!builder->work)
+ goto restart_now;
goto hitted;
+ }
/* go ahead the next map_blocks */
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
@@ -715,10 +730,11 @@ repeat:
map->m_la = offset + cur;
map->m_llen = 0;
- err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
+ err = z_erofs_map_blocks_iter(fe->inode, map, 0);
if (unlikely(err))
goto err_out;
+restart_now:
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
goto hitted;
@@ -740,7 +756,7 @@ repeat:
map->m_plen / PAGE_SIZE,
cache_strategy, page_pool, GFP_KERNEL);
- tight &= builder_is_followed(builder);
+ tight &= builder_is_hooked(builder);
work = builder->work;
hitted:
cur = end - min_t(unsigned int, offset + end - map->m_la, end);
@@ -755,6 +771,9 @@ hitted:
(tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
+ if (cur)
+ tight &= builder_is_followed(builder);
+
retry:
err = z_erofs_vle_work_add_page(builder, page, page_type);
/* should allocate an additional staging page for pagevec */
@@ -992,11 +1011,10 @@ repeat:
if (llen > grp->llen)
llen = grp->llen;
- err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
- clusterpages, pages, llen, work->pageofs,
- z_erofs_onlinepage_endio);
+ err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
+ pages, llen, work->pageofs);
if (err != -ENOTSUPP)
- goto out_percpu;
+ goto out;
if (sparsemem_pages >= nr_pages)
goto skip_allocpage;
@@ -1017,8 +1035,25 @@ skip_allocpage:
erofs_vunmap(vout, nr_pages);
out:
+ /* must handle all compressed pages before endding pages */
+ for (i = 0; i < clusterpages; ++i) {
+ page = compressed_pages[i];
+
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+ if (page->mapping == MNGD_MAPPING(sbi))
+ continue;
+#endif
+ /* recycle all individual staging pages */
+ (void)z_erofs_gather_if_stagingpage(page_pool, page);
+
+ WRITE_ONCE(compressed_pages[i], NULL);
+ }
+
for (i = 0; i < nr_pages; ++i) {
page = pages[i];
+ if (!page)
+ continue;
+
DBG_BUGON(!page->mapping);
/* recycle all individual staging pages */
@@ -1031,20 +1066,6 @@ out:
z_erofs_onlinepage_endio(page);
}
-out_percpu:
- for (i = 0; i < clusterpages; ++i) {
- page = compressed_pages[i];
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == MNGD_MAPPING(sbi))
- continue;
-#endif
- /* recycle all individual staging pages */
- (void)z_erofs_gather_if_stagingpage(page_pool, page);
-
- WRITE_ONCE(compressed_pages[i], NULL);
- }
-
if (pages == z_pagemap_global)
mutex_unlock(&z_pagemap_global_lock);
else if (unlikely(pages != pages_onstack))
@@ -1484,8 +1505,8 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
z_erofs_submit_and_unzip(&f, &pagepool, true);
out:
- if (f.m_iter.mpage)
- put_page(f.m_iter.mpage);
+ if (f.map.mpage)
+ put_page(f.map.mpage);
/* clean up the remaining free pages */
put_pages_list(&pagepool);
@@ -1555,8 +1576,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
z_erofs_submit_and_unzip(&f, &pagepool, sync);
- if (f.m_iter.mpage)
- put_page(f.m_iter.mpage);
+ if (f.map.mpage)
+ put_page(f.map.mpage);
/* clean up the remaining free pages */
put_pages_list(&pagepool);
@@ -1701,14 +1722,14 @@ vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map,
- struct page **mpage_ret, int flags)
+ int flags)
{
void *kaddr;
const struct vle_map_blocks_iter_ctx ctx = {
.inode = inode,
.sb = inode->i_sb,
.clusterbits = EROFS_I_SB(inode)->clusterbits,
- .mpage_ret = mpage_ret,
+ .mpage_ret = &map->mpage,
.kaddr_ret = &kaddr
};
const unsigned int clustersize = 1 << ctx.clusterbits;
@@ -1722,7 +1743,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* initialize `pblk' to keep gcc from printing foolish warnings */
erofs_blk_t mblk, pblk = 0;
- struct page *mpage = *mpage_ret;
+ struct page *mpage = map->mpage;
struct z_erofs_vle_decompressed_index *di;
unsigned int cluster_type, logical_cluster_ofs;
int err = 0;
@@ -1758,7 +1779,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
err = PTR_ERR(mpage);
goto out;
}
- *mpage_ret = mpage;
+ map->mpage = mpage;
} else {
lock_page(mpage);
DBG_BUGON(!PageUptodate(mpage));
@@ -1818,7 +1839,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* get the correspoinding first chunk */
err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
&pblk, &map->m_flags);
- mpage = *mpage_ret;
+ mpage = map->mpage;
if (unlikely(err)) {
if (mpage)
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
index 5a4e1b62c0d1..517e5ce8c5e9 100644
--- a/drivers/staging/erofs/unzip_vle.h
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -212,18 +212,17 @@ static inline void z_erofs_onlinepage_endio(struct page *page)
#define Z_EROFS_VLE_VMAP_GLOBAL_PAGES 2048
/* unzip_vle_lz4.c */
-extern int z_erofs_vle_plain_copy(struct page **compressed_pages,
- unsigned clusterpages, struct page **pages,
- unsigned nr_pages, unsigned short pageofs);
-
-extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
- unsigned clusterpages, struct page **pages,
- unsigned outlen, unsigned short pageofs,
- void (*endio)(struct page *));
-
-extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
- unsigned clusterpages, void *vaddr, unsigned llen,
- unsigned short pageofs, bool overlapped);
+int z_erofs_vle_plain_copy(struct page **compressed_pages,
+ unsigned int clusterpages, struct page **pages,
+ unsigned int nr_pages, unsigned short pageofs);
+int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+ unsigned int clusterpages,
+ struct page **pages, unsigned int outlen,
+ unsigned short pageofs);
+int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
+ unsigned int clusterpages,
+ void *vaddr, unsigned int llen,
+ unsigned short pageofs, bool overlapped);
#endif
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 52797bd89da1..48b263a2731a 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -13,7 +13,7 @@
#include "unzip_vle.h"
#include <linux/lz4.h>
-int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen)
+static int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen)
{
int ret = LZ4_decompress_safe_partial(in, out, inlen, outlen, outlen);
@@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
unsigned int clusterpages,
struct page **pages,
unsigned int outlen,
- unsigned short pageofs,
- void (*endio)(struct page *))
+ unsigned short pageofs)
{
void *vin, *vout;
unsigned int nr_pages, i, j;
@@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
ret = z_erofs_unzip_lz4(vin, vout + pageofs,
clusterpages * PAGE_SIZE, outlen);
- if (ret >= 0) {
- outlen = ret;
- ret = 0;
- }
+ if (ret < 0)
+ goto out;
+ ret = 0;
for (i = 0; i < nr_pages; ++i) {
j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
if (pages[i]) {
- if (ret < 0) {
- SetPageError(pages[i]);
- } else if (clusterpages == 1 &&
- pages[i] == compressed_pages[0]) {
+ if (clusterpages == 1 &&
+ pages[i] == compressed_pages[0]) {
memcpy(vin + pageofs, vout + pageofs, j);
} else {
void *dst = kmap_atomic(pages[i]);
@@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
memcpy(dst + pageofs, vout + pageofs, j);
kunmap_atomic(dst);
}
- endio(pages[i]);
}
vout += PAGE_SIZE;
outlen -= j;
pageofs = 0;
}
+
+out:
preempt_enable();
if (clusterpages == 1)
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index b535898ca753..5f61f99f4c10 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -31,13 +31,32 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
static atomic_long_t erofs_global_shrink_cnt;
#ifdef CONFIG_EROFS_FS_ZIP
+#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
+#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
-struct erofs_workgroup *erofs_find_workgroup(
- struct super_block *sb, pgoff_t index, bool *tag)
+static int erofs_workgroup_get(struct erofs_workgroup *grp)
+{
+ int o;
+
+repeat:
+ o = erofs_wait_on_workgroup_freezed(grp);
+ if (unlikely(o <= 0))
+ return -1;
+
+ if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
+ goto repeat;
+
+ /* decrease refcount paired by erofs_workgroup_put */
+ if (unlikely(o == 1))
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ return 0;
+}
+
+struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+ pgoff_t index, bool *tag)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_workgroup *grp;
- int oldcount;
repeat:
rcu_read_lock();
@@ -46,15 +65,12 @@ repeat:
*tag = xa_pointer_tag(grp);
grp = xa_untag_pointer(grp);
- if (erofs_workgroup_get(grp, &oldcount)) {
+ if (erofs_workgroup_get(grp)) {
/* prefer to relax rcu read side */
rcu_read_unlock();
goto repeat;
}
- /* decrease refcount added by erofs_workgroup_put */
- if (unlikely(oldcount == 1))
- atomic_long_dec(&erofs_global_shrink_cnt);
DBG_BUGON(index != grp->index);
}
rcu_read_unlock();
@@ -104,8 +120,6 @@ int erofs_register_workgroup(struct super_block *sb,
return err;
}
-extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
-
static void __erofs_workgroup_free(struct erofs_workgroup *grp)
{
atomic_long_dec(&erofs_global_shrink_cnt);
@@ -131,9 +145,9 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
__erofs_workgroup_free(grp);
}
-bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
+static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp,
+ bool cleanup)
{
/*
* for managed cache enabled, the refcount of workgroups
@@ -172,9 +186,9 @@ bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
#else
/* for nocache case, no customized reclaim path at all */
-bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
+static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp,
+ bool cleanup)
{
int cnt = atomic_read(&grp->refcount);
@@ -256,14 +270,14 @@ void erofs_unregister_super(struct super_block *sb)
spin_unlock(&erofs_sb_list_lock);
}
-unsigned long erofs_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long erofs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
return atomic_long_read(&erofs_global_shrink_cnt);
}
-unsigned long erofs_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long erofs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct erofs_sb_info *sbi;
struct list_head *p;
@@ -319,3 +333,9 @@ unsigned long erofs_shrink_scan(struct shrinker *shrink,
return freed;
}
+struct shrinker erofs_shrinker_info = {
+ .scan_objects = erofs_shrink_scan,
+ .count_objects = erofs_shrink_count,
+ .seeks = DEFAULT_SEEKS,
+};
+
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 80dca6a4adbe..f716ab0446e5 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -44,19 +44,48 @@ static inline void xattr_iter_end_final(struct xattr_iter *it)
static int init_inode_xattrs(struct inode *inode)
{
+ struct erofs_vnode *const vi = EROFS_V(inode);
struct xattr_iter it;
unsigned int i;
struct erofs_xattr_ibody_header *ih;
struct super_block *sb;
struct erofs_sb_info *sbi;
- struct erofs_vnode *vi;
bool atomic_map;
+ int ret = 0;
- if (likely(inode_has_inited_xattr(inode)))
+ /* the most case is that xattrs of this inode are initialized. */
+ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
return 0;
- vi = EROFS_V(inode);
- BUG_ON(!vi->xattr_isize);
+ if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+
+ /* someone has initialized xattrs for us? */
+ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+ /*
+ * bypass all xattr operations if ->xattr_isize is not greater than
+ * sizeof(struct erofs_xattr_ibody_header), in detail:
+ * 1) it is not enough to contain erofs_xattr_ibody_header then
+ * ->xattr_isize should be 0 (it means no xattr);
+ * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
+ * undefined right now (maybe use later with some new sb feature).
+ */
+ if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+ errln("xattr_isize %d of nid %llu is not supported yet",
+ vi->xattr_isize, vi->nid);
+ ret = -ENOTSUPP;
+ goto out_unlock;
+ } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+ if (unlikely(vi->xattr_isize)) {
+ DBG_BUGON(1);
+ ret = -EIO;
+ goto out_unlock; /* xattr ondisk layout error */
+ }
+ ret = -ENOATTR;
+ goto out_unlock;
+ }
sb = inode->i_sb;
sbi = EROFS_SB(sb);
@@ -64,8 +93,10 @@ static int init_inode_xattrs(struct inode *inode)
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
it.page = erofs_get_inline_page(inode, it.blkaddr);
- if (IS_ERR(it.page))
- return PTR_ERR(it.page);
+ if (IS_ERR(it.page)) {
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = kmap(it.page);
@@ -78,7 +109,8 @@ static int init_inode_xattrs(struct inode *inode)
sizeof(uint), GFP_KERNEL);
if (vi->xattr_shared_xattrs == NULL) {
xattr_iter_end(&it, atomic_map);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unlock;
}
/* let's skip ibody header */
@@ -92,8 +124,12 @@ static int init_inode_xattrs(struct inode *inode)
it.page = erofs_get_meta_page(sb,
++it.blkaddr, S_ISDIR(inode->i_mode));
- if (IS_ERR(it.page))
- return PTR_ERR(it.page);
+ if (IS_ERR(it.page)) {
+ kfree(vi->xattr_shared_xattrs);
+ vi->xattr_shared_xattrs = NULL;
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
it.kaddr = kmap_atomic(it.page);
atomic_map = true;
@@ -105,8 +141,11 @@ static int init_inode_xattrs(struct inode *inode)
}
xattr_iter_end(&it, atomic_map);
- inode_set_inited_xattr(inode);
- return 0;
+ set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
+
+out_unlock:
+ clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
+ return ret;
}
/*
@@ -117,10 +156,12 @@ static int init_inode_xattrs(struct inode *inode)
* and need to be handled
*/
struct xattr_iter_handlers {
- int (*entry)(struct xattr_iter *, struct erofs_xattr_entry *);
- int (*name)(struct xattr_iter *, unsigned int, char *, unsigned int);
- int (*alloc_buffer)(struct xattr_iter *, unsigned int);
- void (*value)(struct xattr_iter *, unsigned int, char *, unsigned int);
+ int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
+ int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
+ unsigned int len);
+ int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
+ void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
+ unsigned int len);
};
static inline int xattr_iter_fixup(struct xattr_iter *it)
@@ -422,7 +463,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
- struct erofs_vnode *const vi = EROFS_V(inode);
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
switch (handler->flags) {
@@ -440,9 +480,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
return -EINVAL;
}
- if (!vi->xattr_isize)
- return -ENOATTR;
-
return erofs_getxattr(inode, handler->flags, name, buffer, size);
}
@@ -503,8 +540,7 @@ static int xattr_entrylist(struct xattr_iter *_it,
if (h == NULL || (h->list != NULL && !h->list(it->dentry)))
return 1;
- /* Note that at least one of 'prefix' and 'name' should be non-NULL */
- prefix = h->prefix != NULL ? h->prefix : h->name;
+ prefix = xattr_prefix(h);
prefix_len = strlen(prefix);
if (it->buffer == NULL) {
@@ -627,3 +663,40 @@ ssize_t erofs_listxattr(struct dentry *dentry,
return shared_listxattr(&it);
}
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+struct posix_acl *erofs_get_acl(struct inode *inode, int type)
+{
+ struct posix_acl *acl;
+ int prefix, rc;
+ char *value = NULL;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = erofs_getxattr(inode, prefix, "", NULL, 0);
+ if (rc > 0) {
+ value = kmalloc(rc, GFP_KERNEL);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ rc = erofs_getxattr(inode, prefix, "", value, rc);
+ }
+
+ if (rc == -ENOATTR)
+ acl = NULL;
+ else if (rc < 0)
+ acl = ERR_PTR(rc);
+ else
+ acl = posix_acl_from_xattr(&init_user_ns, value, rc);
+ kfree(value);
+ return acl;
+}
+#endif
+
diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h
index 0c7379282fc5..35ba5ac2139a 100644
--- a/drivers/staging/erofs/xattr.h
+++ b/drivers/staging/erofs/xattr.h
@@ -68,9 +68,7 @@ static const struct xattr_handler *xattr_handler_map[] = {
}
#ifdef CONFIG_EROFS_FS_XATTR
-
-extern const struct inode_operations erofs_generic_xattr_iops;
-extern const struct inode_operations erofs_dir_xattr_iops;
+extern const struct xattr_handler *erofs_xattr_handlers[];
int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
ssize_t erofs_listxattr(struct dentry *, char *, size_t);
@@ -89,5 +87,11 @@ static ssize_t __maybe_unused erofs_listxattr(struct dentry *dentry,
}
#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+struct posix_acl *erofs_get_acl(struct inode *inode, int type);
+#else
+#define erofs_get_acl (NULL)
+#endif
+
#endif
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index f6f30f5bf15a..8f27bd8da17d 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -79,14 +79,14 @@ static int init_display(struct fbtft_par *par)
static void reset(struct fbtft_par *par)
{
- if (par->gpio.reset == -1)
+ if (!par->gpio.reset)
return;
dev_dbg(par->info->device, "%s()\n", __func__);
- gpio_set_value(par->gpio.reset, 0);
+ gpiod_set_value(par->gpio.reset, 0);
udelay(20);
- gpio_set_value(par->gpio.reset, 1);
+ gpiod_set_value(par->gpio.reset, 1);
mdelay(120);
}
@@ -98,30 +98,30 @@ static int verify_gpios(struct fbtft_par *par)
dev_dbg(par->info->device,
"%s()\n", __func__);
- if (par->EPIN < 0) {
+ if (!par->EPIN) {
dev_err(par->info->device,
"Missing info about 'wr' (aka E) gpio. Aborting.\n");
return -EINVAL;
}
for (i = 0; i < 8; ++i) {
- if (par->gpio.db[i] < 0) {
+ if (!par->gpio.db[i]) {
dev_err(par->info->device,
"Missing info about 'db[%i]' gpio. Aborting.\n",
i);
return -EINVAL;
}
}
- if (par->CS0 < 0) {
+ if (!par->CS0) {
dev_err(par->info->device,
"Missing info about 'cs0' gpio. Aborting.\n");
return -EINVAL;
}
- if (par->CS1 < 0) {
+ if (!par->CS1) {
dev_err(par->info->device,
"Missing info about 'cs1' gpio. Aborting.\n");
return -EINVAL;
}
- if (par->RW < 0) {
+ if (!par->RW) {
dev_err(par->info->device,
"Missing info about 'rw' gpio. Aborting.\n");
return -EINVAL;
@@ -139,22 +139,22 @@ request_gpios_match(struct fbtft_par *par, const struct fbtft_gpio *gpio)
if (strcasecmp(gpio->name, "wr") == 0) {
/* left ks0108 E pin */
par->EPIN = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
+ return GPIOD_OUT_LOW;
} else if (strcasecmp(gpio->name, "cs0") == 0) {
/* left ks0108 controller pin */
par->CS0 = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
+ return GPIOD_OUT_HIGH;
} else if (strcasecmp(gpio->name, "cs1") == 0) {
/* right ks0108 controller pin */
par->CS1 = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
+ return GPIOD_OUT_HIGH;
}
/* if write (rw = 0) e(1->0) perform write */
/* if read (rw = 1) e(0->1) set data on D0-7*/
else if (strcasecmp(gpio->name, "rw") == 0) {
par->RW = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
+ return GPIOD_OUT_LOW;
}
return FBTFT_GPIO_NO_MATCH;
@@ -194,15 +194,15 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
/* select chip */
if (*buf) {
/* cs1 */
- gpio_set_value(par->CS0, 1);
- gpio_set_value(par->CS1, 0);
+ gpiod_set_value(par->CS0, 1);
+ gpiod_set_value(par->CS1, 0);
} else {
/* cs0 */
- gpio_set_value(par->CS0, 0);
- gpio_set_value(par->CS1, 1);
+ gpiod_set_value(par->CS0, 0);
+ gpiod_set_value(par->CS1, 1);
}
- gpio_set_value(par->RS, 0); /* RS->0 (command mode) */
+ gpiod_set_value(par->RS, 0); /* RS->0 (command mode) */
len--;
if (len) {
@@ -364,7 +364,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
write_reg(par, 0x00, (0x17 << 3) | (u8)y);
/* write bitmap */
- gpio_set_value(par->RS, 1); /* RS->1 (data mode) */
+ gpiod_set_value(par->RS, 1); /* RS->1 (data mode) */
ret = par->fbtftops.write(par, buf, len);
if (ret < 0)
dev_err(par->info->device,
@@ -387,7 +387,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
write_reg(par, 0x01, (0x17 << 3) | (u8)y);
/* write bitmap */
- gpio_set_value(par->RS, 1); /* RS->1 (data mode) */
+ gpiod_set_value(par->RS, 1); /* RS->1 (data mode) */
par->fbtftops.write(par, buf, len);
if (ret < 0)
dev_err(par->info->device,
@@ -397,8 +397,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
kfree(convert_buf);
- gpio_set_value(par->CS0, 1);
- gpio_set_value(par->CS1, 1);
+ gpiod_set_value(par->CS0, 1);
+ gpiod_set_value(par->CS1, 1);
return ret;
}
@@ -408,7 +408,7 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
"%s(len=%d): ", __func__, len);
- gpio_set_value(par->RW, 0); /* set write mode */
+ gpiod_set_value(par->RW, 0); /* set write mode */
while (len--) {
u8 i, data;
@@ -417,12 +417,12 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
/* set data bus */
for (i = 0; i < 8; ++i)
- gpio_set_value(par->gpio.db[i], data & (1 << i));
+ gpiod_set_value(par->gpio.db[i], data & (1 << i));
/* set E */
- gpio_set_value(par->EPIN, 1);
+ gpiod_set_value(par->EPIN, 1);
udelay(5);
/* unset E - write */
- gpio_set_value(par->EPIN, 0);
+ gpiod_set_value(par->EPIN, 0);
udelay(1);
}
diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
index a58c514f4721..b6c6d66e4eb1 100644
--- a/drivers/staging/fbtft/fb_bd663474.c
+++ b/drivers/staging/fbtft/fb_bd663474.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -24,8 +24,8 @@
static int init_display(struct fbtft_par *par)
{
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
par->fbtftops.reset(par);
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index 86e140244aab..d609a2b67db9 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <video/mipi_display.h>
@@ -77,8 +77,8 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
mdelay(500);
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 740c0acbecd8..ea6e001288ce 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index 2cf75f2e03e2..b090e7ab6fdd 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -85,8 +85,8 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
bt &= 0x07;
vc &= 0x07;
diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
index 430f21e50f4d..415183c7054a 100644
--- a/drivers/staging/fbtft/fb_ili9340.c
+++ b/drivers/staging/fbtft/fb_ili9340.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <video/mipi_display.h>
diff --git a/drivers/staging/fbtft/fb_pcd8544.c b/drivers/staging/fbtft/fb_pcd8544.c
index 32172f8f79f0..ad49973ad594 100644
--- a/drivers/staging/fbtft/fb_pcd8544.c
+++ b/drivers/staging/fbtft/fb_pcd8544.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -119,7 +119,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
/* Write data */
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, 6 * 84);
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index 5d3b76ca74d8..70b37fc7fb66 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include "fbtft.h"
#define DRVNAME "fb_ra8875"
@@ -39,7 +39,7 @@ static int write_spi(struct fbtft_par *par, void *buf, size_t len)
static int init_display(struct fbtft_par *par)
{
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"%s()\n", __func__);
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index aa716f33420a..b3d0701880fe 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -29,8 +29,8 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
/* Initialization sequence from Lib_UTFT */
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 00096f8d249a..6f7249493ea3 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index c9b18b3ba4ab..bbf75f795234 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include "fbtft.h"
@@ -28,8 +28,8 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
write_reg(par, 0x00, 0x0001);
write_reg(par, 0x03, 0xA8A4);
diff --git a/drivers/staging/fbtft/fb_ssd1305.c b/drivers/staging/fbtft/fb_ssd1305.c
index 3515888d94c9..020fe48fed0b 100644
--- a/drivers/staging/fbtft/fb_ssd1305.c
+++ b/drivers/staging/fbtft/fb_ssd1305.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -168,7 +168,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
/* Write data */
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf,
par->info->var.xres * par->info->var.yres /
8);
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index 50172ddd94ae..d7c5e2e0eee9 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -190,7 +190,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
/* Write data */
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, xres * yres / 8);
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
index f974f7fc4d79..8a3140d41d8b 100644
--- a/drivers/staging/fbtft/fb_ssd1325.c
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -35,7 +35,7 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- gpio_set_value(par->gpio.cs, 0);
+ gpiod_set_value(par->gpio.cs, 0);
write_reg(par, 0xb3);
write_reg(par, 0xf0);
@@ -155,7 +155,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
}
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
/* Write data */
ret = par->fbtftops.write(par, par->txbuf.buf,
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 0b614c84822e..9f54fe28d511 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -2,7 +2,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -80,8 +80,8 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
va_start(args, len);
*buf = (u8)va_arg(args, unsigned int);
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 0);
+ if (!par->gpio.dc)
+ gpiod_set_value(par->gpio.dc, 0);
ret = par->fbtftops.write(par, par->buf, sizeof(u8));
if (ret < 0) {
va_end(args);
@@ -103,8 +103,8 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
return;
}
}
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 1);
+ if (!par->gpio.dc)
+ gpiod_set_value(par->gpio.dc, 1);
va_end(args);
}
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index 3da091b4d297..9ac78ce30619 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -2,7 +2,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -164,7 +164,7 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
static int blank(struct fbtft_par *par, bool on)
{
fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
+ __func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/fbtft/fb_tinylcd.c b/drivers/staging/fbtft/fb_tinylcd.c
index e463b0ddf16d..9469248f2c50 100644
--- a/drivers/staging/fbtft/fb_tinylcd.c
+++ b/drivers/staging/fbtft/fb_tinylcd.c
@@ -38,7 +38,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xE5, 0x00);
write_reg(par, 0xF0, 0x36, 0xA5, 0x53);
write_reg(par, 0xE0, 0x00, 0x35, 0x33, 0x00, 0x00, 0x00,
- 0x00, 0x35, 0x33, 0x00, 0x00, 0x00);
+ 0x00, 0x35, 0x33, 0x00, 0x00, 0x00);
write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
udelay(250);
diff --git a/drivers/staging/fbtft/fb_tls8204.c b/drivers/staging/fbtft/fb_tls8204.c
index 277b6ed9c725..bec6dd0ffb01 100644
--- a/drivers/staging/fbtft/fb_tls8204.c
+++ b/drivers/staging/fbtft/fb_tls8204.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -94,7 +94,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
/* The display is 102x68 but the LCD is 84x48.
* Set the write pointer at the start of each row.
*/
- gpio_set_value(par->gpio.dc, 0);
+ gpiod_set_value(par->gpio.dc, 0);
write_reg(par, 0x80 | 0);
write_reg(par, 0x40 | y);
@@ -109,7 +109,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
*buf++ = ch;
}
/* Write the row */
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
if (ret < 0) {
dev_err(par->info->device,
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index dfaf8bc70f73..65681d0fe200 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -251,7 +251,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
}
break;
}
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
/* Write data */
ret = par->fbtftops.write(par, par->txbuf.buf, len / 2);
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
index 0a3531d6eb39..e4ccc73868a7 100644
--- a/drivers/staging/fbtft/fb_uc1701.c
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -136,9 +136,9 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
write_reg(par, LCD_PAGE_ADDRESS | (u8)y);
write_reg(par, 0x00);
write_reg(par, LCD_COL_ADDRESS);
- gpio_set_value(par->gpio.dc, 1);
+ gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
- gpio_set_value(par->gpio.dc, 0);
+ gpiod_set_value(par->gpio.dc, 0);
}
if (ret < 0)
diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
index acc425fdf34e..564a38e34440 100644
--- a/drivers/staging/fbtft/fb_upd161704.c
+++ b/drivers/staging/fbtft/fb_upd161704.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -26,8 +26,8 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
/* Initialization sequence from Lib_UTFT */
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index e77178157f1b..0a5206d28da4 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
@@ -213,7 +213,7 @@ static int set_var(struct fbtft_par *par)
static int verify_gpios(struct fbtft_par *par)
{
- if (par->gpio.reset < 0) {
+ if (!par->gpio.reset) {
dev_err(par->info->device, "Missing 'reset' gpio. Aborting.\n");
return -EINVAL;
}
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 8ce1ff9b6c2a..2ea814d0dca5 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include "fbtft.h"
@@ -135,8 +135,8 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 1);
+ if (!par->gpio.dc)
+ gpiod_set_value(par->gpio.dc, 1);
/* non buffered write */
if (!par->txbuf.buf)
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index a2df02d97a8e..9b07badf4c6c 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
@@ -24,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <video/mipi_display.h>
#include "fbtft.h"
@@ -38,8 +37,8 @@ int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc)
{
int ret;
- if (gpio_is_valid(par->gpio.dc))
- gpio_set_value(par->gpio.dc, dc);
+ if (par->gpio.dc)
+ gpiod_set_value(par->gpio.dc, dc);
ret = par->fbtftops.write(par, buf, len);
if (ret < 0)
@@ -71,127 +70,26 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
}
EXPORT_SYMBOL(fbtft_dbg_hex);
-static unsigned long fbtft_request_gpios_match(struct fbtft_par *par,
- const struct fbtft_gpio *gpio)
-{
- int ret;
- unsigned int val;
-
- fbtft_par_dbg(DEBUG_REQUEST_GPIOS_MATCH, par, "%s('%s')\n",
- __func__, gpio->name);
-
- if (strcasecmp(gpio->name, "reset") == 0) {
- par->gpio.reset = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "dc") == 0) {
- par->gpio.dc = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- } else if (strcasecmp(gpio->name, "cs") == 0) {
- par->gpio.cs = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "wr") == 0) {
- par->gpio.wr = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "rd") == 0) {
- par->gpio.rd = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- } else if (strcasecmp(gpio->name, "latch") == 0) {
- par->gpio.latch = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- } else if (gpio->name[0] == 'd' && gpio->name[1] == 'b') {
- ret = kstrtouint(&gpio->name[2], 10, &val);
- if (ret == 0 && val < 16) {
- par->gpio.db[val] = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- }
- } else if (strcasecmp(gpio->name, "led") == 0) {
- par->gpio.led[0] = gpio->gpio;
- return GPIOF_OUT_INIT_LOW;
- } else if (strcasecmp(gpio->name, "led_") == 0) {
- par->gpio.led[0] = gpio->gpio;
- return GPIOF_OUT_INIT_HIGH;
- }
-
- return FBTFT_GPIO_NO_MATCH;
-}
-
-static int fbtft_request_gpios(struct fbtft_par *par)
-{
- struct fbtft_platform_data *pdata = par->pdata;
- const struct fbtft_gpio *gpio;
- unsigned long flags;
- int ret;
-
- if (!(pdata && pdata->gpios))
- return 0;
-
- gpio = pdata->gpios;
- while (gpio->name[0]) {
- flags = FBTFT_GPIO_NO_MATCH;
- /* if driver provides match function, try it first,
- * if no match use our own
- */
- if (par->fbtftops.request_gpios_match)
- flags = par->fbtftops.request_gpios_match(par, gpio);
- if (flags == FBTFT_GPIO_NO_MATCH)
- flags = fbtft_request_gpios_match(par, gpio);
- if (flags != FBTFT_GPIO_NO_MATCH) {
- ret = devm_gpio_request_one(par->info->device,
- gpio->gpio, flags,
- par->info->device->driver->name);
- if (ret < 0) {
- dev_err(par->info->device,
- "%s: gpio_request_one('%s'=%d) failed with %d\n",
- __func__, gpio->name,
- gpio->gpio, ret);
- return ret;
- }
- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par,
- "%s: '%s' = GPIO%d\n",
- __func__, gpio->name, gpio->gpio);
- }
- gpio++;
- }
-
- return 0;
-}
-
#ifdef CONFIG_OF
static int fbtft_request_one_gpio(struct fbtft_par *par,
- const char *name, int index, int *gpiop)
+ const char *name, int index,
+ struct gpio_desc **gpiop)
{
struct device *dev = par->info->device;
struct device_node *node = dev->of_node;
- int gpio, flags, ret = 0;
- enum of_gpio_flags of_flags;
+ int ret = 0;
if (of_find_property(node, name, NULL)) {
- gpio = of_get_named_gpio_flags(node, name, index, &of_flags);
- if (gpio == -ENOENT)
- return 0;
- if (gpio == -EPROBE_DEFER)
- return gpio;
- if (gpio < 0) {
- dev_err(dev,
- "failed to get '%s' from DT\n", name);
- return gpio;
- }
-
- /* active low translates to initially low */
- flags = (of_flags & OF_GPIO_ACTIVE_LOW) ? GPIOF_OUT_INIT_LOW :
- GPIOF_OUT_INIT_HIGH;
- ret = devm_gpio_request_one(dev, gpio, flags,
- dev->driver->name);
- if (ret) {
+ *gpiop = devm_gpiod_get_index(dev, dev->driver->name, index,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(*gpiop)) {
+ ret = PTR_ERR(*gpiop);
dev_err(dev,
- "gpio_request_one('%s'=%d) failed with %d\n",
- name, gpio, ret);
+ "Failed to request %s GPIO:%d\n", name, ret);
return ret;
}
- if (gpiop)
- *gpiop = gpio;
- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' = GPIO%d\n",
- __func__, name, gpio);
+ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
+ __func__, name);
}
return ret;
@@ -254,9 +152,9 @@ static int fbtft_backlight_update_status(struct backlight_device *bd)
if ((bd->props.power == FB_BLANK_UNBLANK) &&
(bd->props.fb_blank == FB_BLANK_UNBLANK))
- gpio_set_value(par->gpio.led[0], polarity);
+ gpiod_set_value(par->gpio.led[0], polarity);
else
- gpio_set_value(par->gpio.led[0], !polarity);
+ gpiod_set_value(par->gpio.led[0], !polarity);
return 0;
}
@@ -286,7 +184,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
struct backlight_device *bd;
struct backlight_properties bl_props = { 0, };
- if (par->gpio.led[0] == -1) {
+ if (!par->gpio.led[0]) {
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
"%s(): led pin not set, exiting.\n", __func__);
return;
@@ -295,7 +193,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
bl_props.type = BACKLIGHT_RAW;
/* Assume backlight is off, get polarity from current state of pin */
bl_props.power = FB_BLANK_POWERDOWN;
- if (!gpio_get_value(par->gpio.led[0]))
+ if (!gpiod_get_value(par->gpio.led[0]))
par->polarity = true;
bd = backlight_device_register(dev_driver_string(par->info->device),
@@ -333,12 +231,12 @@ static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
static void fbtft_reset(struct fbtft_par *par)
{
- if (par->gpio.reset == -1)
+ if (!par->gpio.reset)
return;
fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
- gpio_set_value_cansleep(par->gpio.reset, 0);
+ gpiod_set_value_cansleep(par->gpio.reset, 0);
usleep_range(20, 40);
- gpio_set_value_cansleep(par->gpio.reset, 1);
+ gpiod_set_value_cansleep(par->gpio.reset, 1);
msleep(120);
}
@@ -538,9 +436,9 @@ static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
return chan << bf->offset;
}
-static int fbtft_fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
- unsigned int blue, unsigned int transp,
- struct fb_info *info)
+static int fbtft_fb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *info)
{
unsigned int val;
int ret = 1;
@@ -663,7 +561,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
int txbuflen = display->txbuflen;
unsigned int bpp = display->bpp;
unsigned int fps = display->fps;
- int vmem_size, i;
+ int vmem_size;
const s16 *init_sequence = display->init_sequence;
char *gamma = display->gamma;
u32 *gamma_curves = NULL;
@@ -841,19 +739,6 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
par->txbuf.len = txbuflen;
}
- /* Initialize gpios to disabled */
- par->gpio.reset = -1;
- par->gpio.dc = -1;
- par->gpio.rd = -1;
- par->gpio.wr = -1;
- par->gpio.cs = -1;
- par->gpio.latch = -1;
- for (i = 0; i < 16; i++) {
- par->gpio.db[i] = -1;
- par->gpio.led[i] = -1;
- par->gpio.aux[i] = -1;
- }
-
/* default fbtft operations */
par->fbtftops.write = fbtft_write_spi;
par->fbtftops.read = fbtft_read_spi;
@@ -863,7 +748,6 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
par->fbtftops.reset = fbtft_reset;
par->fbtftops.mkdirty = fbtft_mkdirty;
par->fbtftops.update_display = fbtft_update_display;
- par->fbtftops.request_gpios = fbtft_request_gpios;
if (display->backlight)
par->fbtftops.register_backlight = fbtft_register_backlight;
@@ -1035,8 +919,8 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
return -EINVAL;
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
while (p) {
if (val & FBTFT_OF_INIT_CMD) {
@@ -1126,8 +1010,8 @@ int fbtft_init_display(struct fbtft_par *par)
}
par->fbtftops.reset(par);
- if (par->gpio.cs != -1)
- gpio_set_value(par->gpio.cs, 0); /* Activate chip */
+ if (!par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
i = 0;
while (i < FBTFT_MAX_INIT_SEQUENCE) {
@@ -1227,7 +1111,7 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
- par->gpio.dc < 0) {
+ !par->gpio.dc) {
dev_err(par->info->device,
"Missing info about 'dc' gpio. Aborting.\n");
return -EINVAL;
@@ -1236,12 +1120,12 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
if (!par->pdev)
return 0;
- if (par->gpio.wr < 0) {
+ if (!par->gpio.wr) {
dev_err(par->info->device, "Missing 'wr' gpio. Aborting.\n");
return -EINVAL;
}
for (i = 0; i < pdata->display.buswidth; i++) {
- if (par->gpio.db[i] < 0) {
+ if (!par->gpio.db[i]) {
dev_err(par->info->device,
"Missing 'db%02d' gpio. Aborting.\n", i);
return -EINVAL;
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index b5051d3d46a6..38cdad6203ea 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include "fbtft.h"
@@ -142,30 +142,30 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
data = *(u8 *)buf;
/* Start writing by pulling down /WR */
- gpio_set_value(par->gpio.wr, 0);
+ gpiod_set_value(par->gpio.wr, 0);
/* Set data */
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
if (data == prev_data) {
- gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ gpiod_set_value(par->gpio.wr, 0); /* used as delay */
} else {
for (i = 0; i < 8; i++) {
if ((data & 1) != (prev_data & 1))
- gpio_set_value(par->gpio.db[i],
- data & 1);
+ gpiod_set_value(par->gpio.db[i],
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
}
#else
for (i = 0; i < 8; i++) {
- gpio_set_value(par->gpio.db[i], data & 1);
+ gpiod_set_value(par->gpio.db[i], data & 1);
data >>= 1;
}
#endif
/* Pullup /WR */
- gpio_set_value(par->gpio.wr, 1);
+ gpiod_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
prev_data = *(u8 *)buf;
@@ -192,30 +192,30 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
data = *(u16 *)buf;
/* Start writing by pulling down /WR */
- gpio_set_value(par->gpio.wr, 0);
+ gpiod_set_value(par->gpio.wr, 0);
/* Set data */
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
if (data == prev_data) {
- gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ gpiod_set_value(par->gpio.wr, 0); /* used as delay */
} else {
for (i = 0; i < 16; i++) {
if ((data & 1) != (prev_data & 1))
- gpio_set_value(par->gpio.db[i],
- data & 1);
+ gpiod_set_value(par->gpio.db[i],
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
}
#else
for (i = 0; i < 16; i++) {
- gpio_set_value(par->gpio.db[i], data & 1);
+ gpiod_set_value(par->gpio.db[i], data & 1);
data >>= 1;
}
#endif
/* Pullup /WR */
- gpio_set_value(par->gpio.wr, 1);
+ gpiod_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
prev_data = *(u16 *)buf;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index ac427baa464a..7fdd3b0851ef 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -27,7 +27,7 @@
*/
struct fbtft_gpio {
char name[FBTFT_GPIO_NAME_SIZE];
- unsigned int gpio;
+ struct gpio_desc *gpio;
};
struct fbtft_par;
@@ -134,7 +134,6 @@ struct fbtft_display {
*/
struct fbtft_platform_data {
struct fbtft_display display;
- const struct fbtft_gpio *gpios;
unsigned int rotate;
bool bgr;
unsigned int fps;
@@ -207,15 +206,15 @@ struct fbtft_par {
unsigned int dirty_lines_start;
unsigned int dirty_lines_end;
struct {
- int reset;
- int dc;
- int rd;
- int wr;
- int latch;
- int cs;
- int db[16];
- int led[16];
- int aux[16];
+ struct gpio_desc *reset;
+ struct gpio_desc *dc;
+ struct gpio_desc *rd;
+ struct gpio_desc *wr;
+ struct gpio_desc *latch;
+ struct gpio_desc *cs;
+ struct gpio_desc *db[16];
+ struct gpio_desc *led[16];
+ struct gpio_desc *aux[16];
} gpio;
const s16 *init_sequence;
struct {
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 046f9d355ecb..5f6cd0816d58 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
@@ -45,11 +45,6 @@ static int mode = -1;
module_param(mode, int, 0000);
MODULE_PARM_DESC(mode, "SPI mode (override device default)");
-static char *gpios;
-module_param(gpios, charp, 0000);
-MODULE_PARM_DESC(gpios,
- "List of gpios. Comma separated with the form: reset:23,dc:24 (when overriding the default, all gpios must be specified)");
-
static unsigned int fps;
module_param(fps, uint, 0000);
MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
@@ -101,7 +96,7 @@ MODULE_PARM_DESC(debug,
static unsigned int verbose = 3;
module_param(verbose, uint, 0000);
MODULE_PARM_DESC(verbose,
- "0 silent, >0 show gpios, >1 show devices, >2 show devices before (default=3)");
+ "0 silent, >1 show devices, >2 show devices before (default=3)");
struct fbtft_device_display {
char *name;
@@ -279,12 +274,6 @@ static struct fbtft_device_display displays[] = {
.buswidth = 8,
.backlight = 1,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
.gamma = ADAFRUIT18_GAMMA,
}
}
@@ -302,12 +291,6 @@ static struct fbtft_device_display displays[] = {
adafruit18_green_tab_set_addr_win,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
.gamma = ADAFRUIT18_GAMMA,
}
}
@@ -323,11 +306,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 23 },
- {},
- },
}
}
}, {
@@ -342,12 +320,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -362,12 +334,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -380,11 +346,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
}
}
}, {
@@ -399,12 +360,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
.init_sequence = cberry28_init_sequence,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 22 },
- { "led", 18 },
- {},
- },
.gamma = CBERRY28_GAMMA,
}
}
@@ -420,9 +375,6 @@ static struct fbtft_device_display displays[] = {
.buswidth = 8,
.backlight = FBTFT_ONBOARD_BACKLIGHT,
},
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
},
}
}
@@ -437,11 +389,6 @@ static struct fbtft_device_display displays[] = {
.buswidth = 8,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 13 },
- { "dc", 6 },
- {},
- },
}
}
}, {
@@ -458,11 +405,6 @@ static struct fbtft_device_display displays[] = {
.height = 272,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
}
}
}, {
@@ -479,11 +421,6 @@ static struct fbtft_device_display displays[] = {
.height = 480,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
}
}
}, {
@@ -496,10 +433,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "dc", 24 },
- {},
- },
}
}
}, {
@@ -512,9 +445,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 9,
},
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
}
}
}, {
@@ -523,13 +453,6 @@ static struct fbtft_device_display displays[] = {
.modalias = "flexfb",
.max_speed_hz = 32000000,
.mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
- }
}
}, {
.name = "flexpfb",
@@ -538,24 +461,6 @@ static struct fbtft_device_display displays[] = {
.id = 0,
.dev = {
.release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 17 },
- { "dc", 1 },
- { "wr", 0 },
- { "cs", 21 },
- { "db00", 9 },
- { "db01", 11 },
- { "db02", 18 },
- { "db03", 23 },
- { "db04", 24 },
- { "db05", 25 },
- { "db06", 8 },
- { "db07", 7 },
- { "led", 4 },
- {},
- },
- },
}
}
}, {
@@ -570,11 +475,6 @@ static struct fbtft_device_display displays[] = {
.backlight = FBTFT_ONBOARD_BACKLIGHT,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
}
}
}, {
@@ -588,12 +488,6 @@ static struct fbtft_device_display displays[] = {
.buswidth = 8,
.backlight = 1,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
}
}
}, {
@@ -609,11 +503,6 @@ static struct fbtft_device_display displays[] = {
},
.startbyte = 0x70,
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -631,11 +520,6 @@ static struct fbtft_device_display displays[] = {
.startbyte = 0x70,
.bgr = true,
.fps = 50,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 18 },
- {},
- },
.gamma = HY28B_GAMMA,
}
}
@@ -652,12 +536,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 22 },
- {},
- },
}
}
}, {
@@ -673,22 +551,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = false,
- .gpios = (const struct fbtft_gpio []) {
- /* Wiring for LCD adapter kit */
- { "reset", 7 },
- { "dc", 0 }, /* rev 2: 2 */
- { "wr", 1 }, /* rev 2: 3 */
- { "cs", 8 },
- { "db00", 17 },
- { "db01", 18 },
- { "db02", 21 }, /* rev 2: 27 */
- { "db03", 22 },
- { "db04", 23 },
- { "db05", 24 },
- { "db06", 25 },
- { "db07", 4 },
- {}
- },
},
}
}
@@ -705,9 +567,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
},
}
}
@@ -723,11 +582,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
}
}
}, {
@@ -743,12 +597,6 @@ static struct fbtft_device_display displays[] = {
},
.startbyte = 0x70,
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -763,11 +611,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -777,10 +620,6 @@ static struct fbtft_device_display displays[] = {
.max_speed_hz = 4000000,
.mode = SPI_MODE_3,
.platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- {},
- },
}
}
}, {
@@ -793,12 +632,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
}
}
}, {
@@ -811,12 +644,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
}
}
}, {
@@ -831,9 +658,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
}
}
}, {
@@ -849,12 +673,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 22 },
- {},
- },
}
}
}, {
@@ -871,10 +689,6 @@ static struct fbtft_device_display displays[] = {
.init_sequence = pitft_init_sequence,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "dc", 25 },
- {},
- },
}
}
}, {
@@ -888,11 +702,6 @@ static struct fbtft_device_display displays[] = {
.buswidth = 8,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
.gamma = PIOLED_GAMMA
}
}
@@ -908,12 +717,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 23 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -928,12 +731,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 23 },
- {},
- },
}
}
}, {
@@ -946,11 +743,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
}
}
}, {
@@ -968,9 +760,6 @@ static struct fbtft_device_display displays[] = {
.fbtftops.write = write_gpio16_wr_slow,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
},
},
}
@@ -988,9 +777,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
},
},
}
@@ -1010,9 +796,6 @@ static struct fbtft_device_display displays[] = {
fbtft_write_gpio16_wr_latched,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
},
},
}
@@ -1028,11 +811,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- {},
- },
}
}
}, {
@@ -1044,9 +822,6 @@ static struct fbtft_device_display displays[] = {
.chip_select = 0,
.mode = SPI_MODE_0,
.platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
}
}
}, {
@@ -1059,11 +834,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
}
}
}, {
@@ -1078,12 +848,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -1098,12 +862,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 25 },
- { "dc", 24 },
- { "led", 18 },
- {},
- },
}
}
}, {
@@ -1118,12 +876,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 15 },
- { "dc", 25 },
- { "led_", 18 },
- {},
- },
}
}
}, {
@@ -1138,12 +890,6 @@ static struct fbtft_device_display displays[] = {
.backlight = 1,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 15 },
- { "dc", 25 },
- { "led_", 18 },
- {},
- },
}
}
}, {
@@ -1156,11 +902,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
}
}
}, {
@@ -1177,11 +918,6 @@ static struct fbtft_device_display displays[] = {
waveshare32b_init_sequence,
},
.bgr = true,
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 27 },
- { "dc", 22 },
- {},
- },
}
}
}, {
@@ -1194,11 +930,6 @@ static struct fbtft_device_display displays[] = {
.display = {
.buswidth = 8,
},
- .gpios = (const struct fbtft_gpio []) {
- { "reset", 24 },
- { "dc", 25 },
- {},
- },
}
}
}, {
@@ -1211,9 +942,6 @@ static struct fbtft_device_display displays[] = {
.max_speed_hz = 0,
.mode = SPI_MODE_0,
.platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
}
},
.pdev = &(struct platform_device) {
@@ -1222,9 +950,6 @@ static struct fbtft_device_display displays[] = {
.dev = {
.release = fbtft_device_pdev_release,
.platform_data = &(struct fbtft_platform_data) {
- .gpios = (const struct fbtft_gpio []) {
- {},
- },
},
},
},
@@ -1246,30 +971,30 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
data = *(u16 *)buf;
/* Start writing by pulling down /WR */
- gpio_set_value(par->gpio.wr, 0);
+ gpiod_set_value(par->gpio.wr, 0);
/* Set data */
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
if (data == prev_data) {
- gpio_set_value(par->gpio.wr, 0); /* used as delay */
+ gpiod_set_value(par->gpio.wr, 0); /* used as delay */
} else {
for (i = 0; i < 16; i++) {
if ((data & 1) != (prev_data & 1))
- gpio_set_value(par->gpio.db[i],
- data & 1);
+ gpiod_set_value(par->gpio.db[i],
+ data & 1);
data >>= 1;
prev_data >>= 1;
}
}
#else
for (i = 0; i < 16; i++) {
- gpio_set_value(par->gpio.db[i], data & 1);
+ gpiod_set_value(par->gpio.db[i], data & 1);
data >>= 1;
}
#endif
/* Pullup /WR */
- gpio_set_value(par->gpio.wr, 1);
+ gpiod_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
prev_data = *(u16 *)buf;
@@ -1289,9 +1014,6 @@ static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
write_reg(par, 0x2C);
}
-/* used if gpios parameter is present */
-static struct fbtft_gpio fbtft_device_param_gpios[MAX_GPIOS + 1] = { };
-
static void fbtft_device_pdev_release(struct device *dev)
{
/* Needed to silence this message:
@@ -1382,11 +1104,8 @@ static int __init fbtft_device_init(void)
{
struct spi_board_info *spi = NULL;
struct fbtft_platform_data *pdata;
- const struct fbtft_gpio *gpio = NULL;
- char *p_gpio, *p_name, *p_num;
bool found = false;
int i = 0;
- long val;
int ret = 0;
if (!name) {
@@ -1404,38 +1123,6 @@ static int __init fbtft_device_init(void)
return -EINVAL;
}
- /* parse module parameter: gpios */
- while ((p_gpio = strsep(&gpios, ","))) {
- if (!strchr(p_gpio, ':')) {
- pr_err("error: missing ':' in gpios parameter: %s\n",
- p_gpio);
- return -EINVAL;
- }
- p_num = p_gpio;
- p_name = strsep(&p_num, ":");
- if (!p_name || !p_num) {
- pr_err("something bad happened parsing gpios parameter: %s\n",
- p_gpio);
- return -EINVAL;
- }
- ret = kstrtol(p_num, 10, &val);
- if (ret) {
- pr_err("could not parse number in gpios parameter: %s:%s\n",
- p_name, p_num);
- return -EINVAL;
- }
- strncpy(fbtft_device_param_gpios[i].name, p_name,
- FBTFT_GPIO_NAME_SIZE - 1);
- fbtft_device_param_gpios[i++].gpio = (int)val;
- if (i == MAX_GPIOS) {
- pr_err("gpios parameter: exceeded max array size: %d\n",
- MAX_GPIOS);
- return -EINVAL;
- }
- }
- if (fbtft_device_param_gpios[0].name[0])
- gpio = fbtft_device_param_gpios;
-
if (verbose > 2) {
pr_spi_devices(); /* print list of registered SPI devices */
pr_p_devices(); /* print list of 'fb' platform devices */
@@ -1516,8 +1203,6 @@ static int __init fbtft_device_init(void)
pdata->txbuflen = txbuflen;
if (init_num)
pdata->display.init_sequence = init;
- if (gpio)
- pdata->gpios = gpio;
if (custom) {
pdata->display.width = width;
pdata->display.height = height;
@@ -1549,19 +1234,6 @@ static int __init fbtft_device_init(void)
return -EINVAL;
}
- if (verbose && pdata && pdata->gpios) {
- gpio = pdata->gpios;
- pr_info("GPIOS used by '%s':\n", name);
- found = false;
- while (verbose && gpio->name[0]) {
- pr_info("'%s' = GPIO%d\n", gpio->name, gpio->gpio);
- gpio++;
- found = true;
- }
- if (!found)
- pr_info("(none)\n");
- }
-
if (spi_device && (verbose > 1))
pr_spi_devices();
if (p_device && (verbose > 1))
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index 2af474469e7d..c5fa59105a43 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
@@ -521,7 +521,7 @@ static int flexfb_verify_gpios_dc(struct fbtft_par *par)
{
fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
- if (par->gpio.dc < 0) {
+ if (!par->gpio.dc) {
dev_err(par->info->device,
"Missing info about 'dc' gpio. Aborting.\n");
return -EINVAL;
@@ -537,22 +537,22 @@ static int flexfb_verify_gpios_db(struct fbtft_par *par)
fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
- if (par->gpio.dc < 0) {
+ if (!par->gpio.dc) {
dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
return -EINVAL;
}
- if (par->gpio.wr < 0) {
+ if (!par->gpio.wr) {
dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n");
return -EINVAL;
}
- if (latched && (par->gpio.latch < 0)) {
+ if (latched && !par->gpio.latch) {
dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n");
return -EINVAL;
}
if (latched)
num_db = buswidth / 2;
for (i = 0; i < num_db; i++) {
- if (par->gpio.db[i] < 0) {
+ if (!par->gpio.db[i]) {
dev_err(par->info->device,
"Missing info about 'db%02d' gpio. Aborting.\n",
i);
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
index da744f2b0ee6..14b974defa3a 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2017-2018 NXP
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
index db43fa3782b8..25635259ce44 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2017-2018 NXP
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index daabaceeea52..ad577beeb052 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -505,6 +505,17 @@ static netdev_tx_t port_dropframe(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int swdev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+
+ ppid->id_len = 1;
+ ppid->id[0] = port_priv->ethsw_data->dev_id;
+
+ return 0;
+}
+
static const struct net_device_ops ethsw_port_ops = {
.ndo_open = port_open,
.ndo_stop = port_stop,
@@ -515,6 +526,7 @@ static const struct net_device_ops ethsw_port_ops = {
.ndo_get_offload_stats = port_get_offload_stats,
.ndo_start_xmit = port_dropframe,
+ .ndo_get_port_parent_id = swdev_get_port_parent_id,
};
static void ethsw_links_state_update(struct ethsw_core *ethsw)
@@ -628,31 +640,6 @@ static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
fsl_mc_free_irqs(sw_dev);
}
-static int swdev_port_attr_get(struct net_device *netdev,
- struct switchdev_attr *attr)
-{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = 1;
- attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- attr->u.brport_flags =
- (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
- (port_priv->flood ? BR_FLOOD : 0);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
- attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int port_attr_stp_state_set(struct net_device *netdev,
struct switchdev_trans *trans,
u8 state)
@@ -665,6 +652,16 @@ static int port_attr_stp_state_set(struct net_device *netdev,
return ethsw_port_set_stp_state(port_priv, state);
}
+static int port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
+{
+ if (flags & ~(BR_LEARNING | BR_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
static int port_attr_br_flags_set(struct net_device *netdev,
struct switchdev_trans *trans,
unsigned long flags)
@@ -697,6 +694,10 @@ static int swdev_port_attr_set(struct net_device *netdev,
err = port_attr_stp_state_set(netdev, trans,
attr->u.stp_state);
break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = port_attr_br_flags_pre_set(netdev, trans,
+ attr->u.brport_flags);
+ break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = port_attr_br_flags_set(netdev, trans,
attr->u.brport_flags);
@@ -924,10 +925,18 @@ static int swdev_port_obj_del(struct net_device *netdev,
return err;
}
-static const struct switchdev_ops ethsw_port_switchdev_ops = {
- .switchdev_port_attr_get = swdev_port_attr_get,
- .switchdev_port_attr_set = swdev_port_attr_set,
-};
+static int
+ethsw_switchdev_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *port_attr_info)
+{
+ int err;
+
+ err = swdev_port_attr_set(netdev, port_attr_info->attr,
+ port_attr_info->trans);
+
+ port_attr_info->handled = true;
+ return notifier_from_errno(err);
+}
/* For the moment, only flood setting needs to be updated */
static int port_bridge_join(struct net_device *netdev,
@@ -1047,6 +1056,12 @@ static int port_switchdev_event(struct notifier_block *unused,
struct ethsw_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ if (!ethsw_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ return ethsw_switchdev_port_attr_set_event(dev, ptr);
+
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return NOTIFY_BAD;
@@ -1115,6 +1130,8 @@ static int port_switchdev_blocking_event(struct notifier_block *unused,
case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
case SWITCHDEV_PORT_OBJ_DEL:
return ethsw_switchdev_port_obj_event(event, dev, ptr);
+ case SWITCHDEV_PORT_ATTR_SET:
+ return ethsw_switchdev_port_attr_set_event(dev, ptr);
}
return NOTIFY_DONE;
@@ -1434,7 +1451,6 @@ static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
SET_NETDEV_DEV(port_netdev, dev);
port_netdev->netdev_ops = &ethsw_port_ops;
port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
- port_netdev->switchdev_ops = &ethsw_port_switchdev_ops;
/* Set MTU limits */
port_netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index 069c99bfba74..c48783680a05 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* DPAA2 Ethernet Switch declarations
*
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 3e416f5bbcba..a1b90ea7fcb8 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -1213,6 +1213,7 @@ static int get_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct fwtty_port *port = tty->driver_data;
+
mutex_lock(&port->port.mutex);
ss->type = PORT_UNKNOWN;
ss->line = port->port.tty->index;
diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c
index ad5657d213f0..ff61b782df30 100644
--- a/drivers/staging/gasket/gasket_interrupt.c
+++ b/drivers/staging/gasket/gasket_interrupt.c
@@ -9,7 +9,6 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/printk.h>
-#include <linux/version.h>
#ifdef GASKET_KERNEL_TRACE_SUPPORT
#define CREATE_TRACE_POINTS
#include <trace/events/gasket_interrupt.h>
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index d4520490cf6d..24a738238f9f 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -4,16 +4,6 @@
*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
index ab096bcef98c..b571e4e8060b 100644
--- a/drivers/staging/greybus/Kconfig
+++ b/drivers/staging/greybus/Kconfig
@@ -148,6 +148,7 @@ if GREYBUS_BRIDGED_PHY
config GREYBUS_GPIO
tristate "Greybus GPIO Bridged PHY driver"
depends on GPIOLIB
+ select GPIOLIB_IRQCHIP
---help---
Select this option if you have a device that follows the
Greybus GPIO Bridged PHY Class specification.
diff --git a/drivers/staging/greybus/TODO b/drivers/staging/greybus/TODO
index 3b90a5711998..31f1f2cb401c 100644
--- a/drivers/staging/greybus/TODO
+++ b/drivers/staging/greybus/TODO
@@ -1,5 +1,3 @@
* Convert all uses of the old GPIO API from <linux/gpio.h> to the
GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
lines from device tree or ACPI.
-* Convert the GPIO driver to use the GPIO irqchip library
- GPIOLIB_IRQCHIP instead of reimplementing the same.
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index be5ffed90bcf..bbf3ba744fc4 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -8,9 +8,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
@@ -24,12 +23,12 @@ static void apb_bootret_deassert(struct device *dev);
struct arche_apb_ctrl_drvdata {
/* Control GPIO signals to and from AP <=> AP Bridges */
- int resetn_gpio;
- int boot_ret_gpio;
- int pwroff_gpio;
- int wake_in_gpio;
- int wake_out_gpio;
- int pwrdn_gpio;
+ struct gpio_desc *resetn;
+ struct gpio_desc *boot_ret;
+ struct gpio_desc *pwroff;
+ struct gpio_desc *wake_in;
+ struct gpio_desc *wake_out;
+ struct gpio_desc *pwrdn;
enum arche_platform_state state;
bool init_disabled;
@@ -37,28 +36,28 @@ struct arche_apb_ctrl_drvdata {
struct regulator *vcore;
struct regulator *vio;
- int clk_en_gpio;
+ struct gpio_desc *clk_en;
struct clk *clk;
struct pinctrl *pinctrl;
struct pinctrl_state *pin_default;
/* V2: SPI Bus control */
- int spi_en_gpio;
+ struct gpio_desc *spi_en;
bool spi_en_polarity_high;
};
/*
* Note that these low level api's are active high
*/
-static inline void deassert_reset(unsigned int gpio)
+static inline void deassert_reset(struct gpio_desc *gpio)
{
- gpio_set_value(gpio, 1);
+ gpiod_set_raw_value(gpio, 1);
}
-static inline void assert_reset(unsigned int gpio)
+static inline void assert_reset(struct gpio_desc *gpio)
{
- gpio_set_value(gpio, 0);
+ gpiod_set_raw_value(gpio, 0);
}
/*
@@ -75,11 +74,10 @@ static int coldboot_seq(struct platform_device *pdev)
return 0;
/* Hold APB in reset state */
- assert_reset(apb->resetn_gpio);
+ assert_reset(apb->resetn);
- if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
- gpio_is_valid(apb->spi_en_gpio))
- devm_gpio_free(dev, apb->spi_en_gpio);
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING && apb->spi_en)
+ devm_gpiod_put(dev, apb->spi_en);
/* Enable power to APB */
if (!IS_ERR(apb->vcore)) {
@@ -101,13 +99,13 @@ static int coldboot_seq(struct platform_device *pdev)
apb_bootret_deassert(dev);
/* On DB3 clock was not mandatory */
- if (gpio_is_valid(apb->clk_en_gpio))
- gpio_set_value(apb->clk_en_gpio, 1);
+ if (apb->clk_en)
+ gpiod_set_value(apb->clk_en, 1);
usleep_range(100, 200);
/* deassert reset to APB : Active-low signal */
- deassert_reset(apb->resetn_gpio);
+ deassert_reset(apb->resetn);
apb->state = ARCHE_PLATFORM_STATE_ACTIVE;
@@ -136,25 +134,25 @@ static int fw_flashing_seq(struct platform_device *pdev)
return ret;
}
- if (gpio_is_valid(apb->spi_en_gpio)) {
+ if (apb->spi_en) {
unsigned long flags;
if (apb->spi_en_polarity_high)
- flags = GPIOF_OUT_INIT_HIGH;
+ flags = GPIOD_OUT_HIGH;
else
- flags = GPIOF_OUT_INIT_LOW;
+ flags = GPIOD_OUT_LOW;
- ret = devm_gpio_request_one(dev, apb->spi_en_gpio,
- flags, "apb_spi_en");
- if (ret) {
- dev_err(dev, "Failed requesting SPI bus en gpio %d\n",
- apb->spi_en_gpio);
+ apb->spi_en = devm_gpiod_get(dev, "spi-en", flags);
+ if (IS_ERR(apb->spi_en)) {
+ ret = PTR_ERR(apb->spi_en);
+ dev_err(dev, "Failed requesting SPI bus en GPIO: %d\n",
+ ret);
return ret;
}
}
/* for flashing device should be in reset state */
- assert_reset(apb->resetn_gpio);
+ assert_reset(apb->resetn);
apb->state = ARCHE_PLATFORM_STATE_FW_FLASHING;
return 0;
@@ -176,9 +174,8 @@ static int standby_boot_seq(struct platform_device *pdev)
apb->state == ARCHE_PLATFORM_STATE_OFF)
return 0;
- if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
- gpio_is_valid(apb->spi_en_gpio))
- devm_gpio_free(dev, apb->spi_en_gpio);
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING && apb->spi_en)
+ devm_gpiod_put(dev, apb->spi_en);
/*
* As per WDM spec, do nothing
@@ -201,13 +198,12 @@ static void poweroff_seq(struct platform_device *pdev)
if (apb->init_disabled || apb->state == ARCHE_PLATFORM_STATE_OFF)
return;
- if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
- gpio_is_valid(apb->spi_en_gpio))
- devm_gpio_free(dev, apb->spi_en_gpio);
+ if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING && apb->spi_en)
+ devm_gpiod_put(dev, apb->spi_en);
/* disable the clock */
- if (gpio_is_valid(apb->clk_en_gpio))
- gpio_set_value(apb->clk_en_gpio, 0);
+ if (apb->clk_en)
+ gpiod_set_value(apb->clk_en, 0);
if (!IS_ERR(apb->vcore) && regulator_is_enabled(apb->vcore) > 0)
regulator_disable(apb->vcore);
@@ -216,7 +212,7 @@ static void poweroff_seq(struct platform_device *pdev)
regulator_disable(apb->vio);
/* As part of exit, put APB back in reset state */
- assert_reset(apb->resetn_gpio);
+ assert_reset(apb->resetn);
apb->state = ARCHE_PLATFORM_STATE_OFF;
/* TODO: May have to send an event to SVC about this exit */
@@ -226,7 +222,7 @@ static void apb_bootret_deassert(struct device *dev)
{
struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
- gpio_set_value(apb->boot_ret_gpio, 0);
+ gpiod_set_value(apb->boot_ret, 0);
}
int apb_ctrl_coldboot(struct device *dev)
@@ -322,66 +318,44 @@ static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
struct arche_apb_ctrl_drvdata *apb)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
int ret;
- apb->resetn_gpio = of_get_named_gpio(np, "reset-gpios", 0);
- if (apb->resetn_gpio < 0) {
- dev_err(dev, "failed to get reset gpio\n");
- return apb->resetn_gpio;
- }
- ret = devm_gpio_request_one(dev, apb->resetn_gpio,
- GPIOF_OUT_INIT_LOW, "apb-reset");
- if (ret) {
- dev_err(dev, "Failed requesting reset gpio %d\n",
- apb->resetn_gpio);
+ apb->resetn = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->resetn)) {
+ ret = PTR_ERR(apb->resetn);
+ dev_err(dev, "Failed requesting reset GPIO: %d\n", ret);
return ret;
}
- apb->boot_ret_gpio = of_get_named_gpio(np, "boot-ret-gpios", 0);
- if (apb->boot_ret_gpio < 0) {
- dev_err(dev, "failed to get boot retention gpio\n");
- return apb->boot_ret_gpio;
- }
- ret = devm_gpio_request_one(dev, apb->boot_ret_gpio,
- GPIOF_OUT_INIT_LOW, "boot retention");
- if (ret) {
- dev_err(dev, "Failed requesting bootret gpio %d\n",
- apb->boot_ret_gpio);
+ apb->boot_ret = devm_gpiod_get(dev, "boot-ret", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->boot_ret)) {
+ ret = PTR_ERR(apb->boot_ret);
+ dev_err(dev, "Failed requesting bootret GPIO: %d\n", ret);
return ret;
}
/* It's not mandatory to support power management interface */
- apb->pwroff_gpio = of_get_named_gpio(np, "pwr-off-gpios", 0);
- if (apb->pwroff_gpio < 0) {
- dev_err(dev, "failed to get power off gpio\n");
- return apb->pwroff_gpio;
- }
- ret = devm_gpio_request_one(dev, apb->pwroff_gpio,
- GPIOF_IN, "pwroff_n");
- if (ret) {
- dev_err(dev, "Failed requesting pwroff_n gpio %d\n",
- apb->pwroff_gpio);
+ apb->pwroff = devm_gpiod_get_optional(dev, "pwr-off", GPIOD_IN);
+ if (IS_ERR(apb->pwroff)) {
+ ret = PTR_ERR(apb->pwroff);
+ dev_err(dev, "Failed requesting pwroff_n GPIO: %d\n", ret);
return ret;
}
/* Do not make clock mandatory as of now (for DB3) */
- apb->clk_en_gpio = of_get_named_gpio(np, "clock-en-gpio", 0);
- if (apb->clk_en_gpio < 0) {
- dev_warn(dev, "failed to get clock en gpio\n");
- } else if (gpio_is_valid(apb->clk_en_gpio)) {
- ret = devm_gpio_request_one(dev, apb->clk_en_gpio,
- GPIOF_OUT_INIT_LOW, "apb_clk_en");
- if (ret) {
- dev_warn(dev, "Failed requesting APB clock en gpio %d\n",
- apb->clk_en_gpio);
- return ret;
- }
+ apb->clk_en = devm_gpiod_get_optional(dev, "clock-en", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->clk_en)) {
+ ret = PTR_ERR(apb->clk_en);
+ dev_err(dev, "Failed requesting APB clock en GPIO: %d\n", ret);
+ return ret;
}
- apb->pwrdn_gpio = of_get_named_gpio(np, "pwr-down-gpios", 0);
- if (apb->pwrdn_gpio < 0)
- dev_warn(dev, "failed to get power down gpio\n");
+ apb->pwrdn = devm_gpiod_get(dev, "pwr-down", GPIOD_OUT_LOW);
+ if (IS_ERR(apb->pwrdn)) {
+ ret = PTR_ERR(apb->pwrdn);
+ dev_warn(dev, "Failed requesting power down GPIO: %d\n", ret);
+ return ret;
+ }
/* Regulators are optional, as we may have fixed supply coming in */
apb->vcore = devm_regulator_get(dev, "vcore");
@@ -404,12 +378,8 @@ static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
}
/* Only applicable for platform >= V2 */
- apb->spi_en_gpio = of_get_named_gpio(np, "spi-en-gpio", 0);
- if (apb->spi_en_gpio >= 0) {
- if (of_property_read_bool(pdev->dev.of_node,
- "spi-en-active-high"))
- apb->spi_en_polarity_high = true;
- }
+ if (of_property_read_bool(pdev->dev.of_node, "gb,spi-en-active-high"))
+ apb->spi_en_polarity_high = true;
return 0;
}
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 4c36e88766e7..6eb842040c22 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -8,10 +8,9 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -45,14 +44,14 @@ enum svc_wakedetect_state {
struct arche_platform_drvdata {
/* Control GPIO signals to and from AP <=> SVC */
- int svc_reset_gpio;
+ struct gpio_desc *svc_reset;
bool is_reset_act_hi;
- int svc_sysboot_gpio;
- int wake_detect_gpio; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
+ struct gpio_desc *svc_sysboot;
+ struct gpio_desc *wake_detect; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
enum arche_platform_state state;
- int svc_refclk_req;
+ struct gpio_desc *svc_refclk_req;
struct clk *svc_ref_clk;
struct pinctrl *pinctrl;
@@ -85,9 +84,9 @@ static void arche_platform_set_wake_detect_state(
arche_pdata->wake_detect_state = state;
}
-static inline void svc_reset_onoff(unsigned int gpio, bool onoff)
+static inline void svc_reset_onoff(struct gpio_desc *gpio, bool onoff)
{
- gpio_set_value(gpio, onoff);
+ gpiod_set_raw_value(gpio, onoff);
}
static int apb_cold_boot(struct device *dev, void *data)
@@ -116,7 +115,6 @@ static int apb_poweroff(struct device *dev, void *data)
static void arche_platform_wd_irq_en(struct arche_platform_drvdata *arche_pdata)
{
/* Enable interrupt here, to read event back from SVC */
- gpio_direction_input(arche_pdata->wake_detect_gpio);
enable_irq(arche_pdata->wake_detect_irq);
}
@@ -160,7 +158,7 @@ static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
spin_lock_irqsave(&arche_pdata->wake_lock, flags);
- if (gpio_get_value(arche_pdata->wake_detect_gpio)) {
+ if (gpiod_get_value(arche_pdata->wake_detect)) {
/* wake/detect rising */
/*
@@ -224,10 +222,9 @@ arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
dev_info(arche_pdata->dev, "Booting from cold boot state\n");
- svc_reset_onoff(arche_pdata->svc_reset_gpio,
- arche_pdata->is_reset_act_hi);
+ svc_reset_onoff(arche_pdata->svc_reset, arche_pdata->is_reset_act_hi);
- gpio_set_value(arche_pdata->svc_sysboot_gpio, 0);
+ gpiod_set_value(arche_pdata->svc_sysboot, 0);
usleep_range(100, 200);
ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
@@ -238,8 +235,7 @@ arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
}
/* bring SVC out of reset */
- svc_reset_onoff(arche_pdata->svc_reset_gpio,
- !arche_pdata->is_reset_act_hi);
+ svc_reset_onoff(arche_pdata->svc_reset, !arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_ACTIVE);
@@ -259,10 +255,9 @@ arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
dev_info(arche_pdata->dev, "Switching to FW flashing state\n");
- svc_reset_onoff(arche_pdata->svc_reset_gpio,
- arche_pdata->is_reset_act_hi);
+ svc_reset_onoff(arche_pdata->svc_reset, arche_pdata->is_reset_act_hi);
- gpio_set_value(arche_pdata->svc_sysboot_gpio, 1);
+ gpiod_set_value(arche_pdata->svc_sysboot, 1);
usleep_range(100, 200);
@@ -273,8 +268,7 @@ arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
return ret;
}
- svc_reset_onoff(arche_pdata->svc_reset_gpio,
- !arche_pdata->is_reset_act_hi);
+ svc_reset_onoff(arche_pdata->svc_reset, !arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_FW_FLASHING);
@@ -305,8 +299,7 @@ arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
clk_disable_unprepare(arche_pdata->svc_ref_clk);
/* As part of exit, put APB back in reset state */
- svc_reset_onoff(arche_pdata->svc_reset_gpio,
- arche_pdata->is_reset_act_hi);
+ svc_reset_onoff(arche_pdata->svc_reset, arche_pdata->is_reset_act_hi);
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
}
@@ -435,6 +428,7 @@ static int arche_platform_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
+ unsigned int flags;
arche_pdata = devm_kzalloc(&pdev->dev, sizeof(*arche_pdata),
GFP_KERNEL);
@@ -444,61 +438,33 @@ static int arche_platform_probe(struct platform_device *pdev)
/* setup svc reset gpio */
arche_pdata->is_reset_act_hi = of_property_read_bool(np,
"svc,reset-active-high");
- arche_pdata->svc_reset_gpio = of_get_named_gpio(np,
- "svc,reset-gpio",
- 0);
- if (!gpio_is_valid(arche_pdata->svc_reset_gpio)) {
- dev_err(dev, "failed to get reset-gpio\n");
- return arche_pdata->svc_reset_gpio;
- }
- ret = devm_gpio_request(dev, arche_pdata->svc_reset_gpio, "svc-reset");
- if (ret) {
- dev_err(dev, "failed to request svc-reset gpio:%d\n", ret);
- return ret;
- }
- ret = gpio_direction_output(arche_pdata->svc_reset_gpio,
- arche_pdata->is_reset_act_hi);
- if (ret) {
- dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
+ if (arche_pdata->is_reset_act_hi)
+ flags = GPIOD_OUT_HIGH;
+ else
+ flags = GPIOD_OUT_LOW;
+
+ arche_pdata->svc_reset = devm_gpiod_get(dev, "svc,reset", flags);
+ if (IS_ERR(arche_pdata->svc_reset)) {
+ ret = PTR_ERR(arche_pdata->svc_reset);
+ dev_err(dev, "failed to request svc-reset GPIO: %d\n", ret);
return ret;
}
arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
- arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
- "svc,sysboot-gpio",
- 0);
- if (!gpio_is_valid(arche_pdata->svc_sysboot_gpio)) {
- dev_err(dev, "failed to get sysboot gpio\n");
- return arche_pdata->svc_sysboot_gpio;
- }
- ret = devm_gpio_request(dev, arche_pdata->svc_sysboot_gpio, "sysboot0");
- if (ret) {
- dev_err(dev, "failed to request sysboot0 gpio:%d\n", ret);
- return ret;
- }
- ret = gpio_direction_output(arche_pdata->svc_sysboot_gpio, 0);
- if (ret) {
- dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
+ arche_pdata->svc_sysboot = devm_gpiod_get(dev, "svc,sysboot",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(arche_pdata->svc_sysboot)) {
+ ret = PTR_ERR(arche_pdata->svc_sysboot);
+ dev_err(dev, "failed to request sysboot0 GPIO: %d\n", ret);
return ret;
}
/* setup the clock request gpio first */
- arche_pdata->svc_refclk_req = of_get_named_gpio(np,
- "svc,refclk-req-gpio",
- 0);
- if (!gpio_is_valid(arche_pdata->svc_refclk_req)) {
- dev_err(dev, "failed to get svc clock-req gpio\n");
- return arche_pdata->svc_refclk_req;
- }
- ret = devm_gpio_request(dev, arche_pdata->svc_refclk_req,
- "svc-clk-req");
- if (ret) {
- dev_err(dev, "failed to request svc-clk-req gpio: %d\n", ret);
- return ret;
- }
- ret = gpio_direction_input(arche_pdata->svc_refclk_req);
- if (ret) {
- dev_err(dev, "failed to set svc-clk-req gpio dir :%d\n", ret);
+ arche_pdata->svc_refclk_req = devm_gpiod_get(dev, "svc,refclk-req",
+ GPIOD_IN);
+ if (IS_ERR(arche_pdata->svc_refclk_req)) {
+ ret = PTR_ERR(arche_pdata->svc_refclk_req);
+ dev_err(dev, "failed to request svc-clk-req GPIO: %d\n", ret);
return ret;
}
@@ -515,19 +481,11 @@ static int arche_platform_probe(struct platform_device *pdev)
arche_pdata->num_apbs = of_get_child_count(np);
dev_dbg(dev, "Number of APB's available - %d\n", arche_pdata->num_apbs);
- arche_pdata->wake_detect_gpio = of_get_named_gpio(np,
- "svc,wake-detect-gpio",
- 0);
- if (arche_pdata->wake_detect_gpio < 0) {
- dev_err(dev, "failed to get wake detect gpio\n");
- return arche_pdata->wake_detect_gpio;
- }
-
- ret = devm_gpio_request(dev, arche_pdata->wake_detect_gpio,
- "wake detect");
- if (ret) {
- dev_err(dev, "Failed requesting wake_detect gpio %d\n",
- arche_pdata->wake_detect_gpio);
+ arche_pdata->wake_detect = devm_gpiod_get(dev, "svc,wake-detect",
+ GPIOD_IN);
+ if (IS_ERR(arche_pdata->wake_detect)) {
+ ret = PTR_ERR(arche_pdata->wake_detect);
+ dev_err(dev, "Failed requesting wake_detect GPIO: %d\n", ret);
return ret;
}
@@ -538,7 +496,7 @@ static int arche_platform_probe(struct platform_device *pdev)
spin_lock_init(&arche_pdata->wake_lock);
mutex_init(&arche_pdata->platform_state_mutex);
arche_pdata->wake_detect_irq =
- gpio_to_irq(arche_pdata->wake_detect_gpio);
+ gpiod_to_irq(arche_pdata->wake_detect);
ret = devm_request_threaded_irq(dev, arche_pdata->wake_detect_irq,
arche_platform_wd_irq,
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 8bcbc3c4588c..4ac30accf226 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -923,7 +923,6 @@ static int gbaudio_tplg_create_wcontrol(struct gbaudio_module_info *gb,
break;
default:
return -EINVAL;
-
}
dev_dbg(gb->dev, "%s:%d DAPM control created, ret:%d\n", ctl->name,
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
index 81c018da1248..e97b2b87ba47 100644
--- a/drivers/staging/greybus/bundle.c
+++ b/drivers/staging/greybus/bundle.c
@@ -65,7 +65,7 @@ static struct attribute *bundle_attrs[] = {
ATTRIBUTE_GROUPS(bundle);
static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
- u8 bundle_id)
+ u8 bundle_id)
{
struct gb_bundle *bundle;
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
index 8deeb1d5f008..5ca3befc0636 100644
--- a/drivers/staging/greybus/connection.h
+++ b/drivers/staging/greybus/connection.h
@@ -88,7 +88,7 @@ void gb_connection_mode_switch_prepare(struct gb_connection *connection);
void gb_connection_mode_switch_complete(struct gb_connection *connection);
void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
- u8 *data, size_t length);
+ u8 *data, size_t length);
void gb_connection_latency_tag_enable(struct gb_connection *connection);
void gb_connection_latency_tag_disable(struct gb_connection *connection);
diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c
index ffa41d31896d..a9e8b6036cac 100644
--- a/drivers/staging/greybus/control.c
+++ b/drivers/staging/greybus/control.c
@@ -15,7 +15,6 @@
#define GB_CONTROL_VERSION_MAJOR 0
#define GB_CONTROL_VERSION_MINOR 1
-
static int gb_control_get_version(struct gb_control *control)
{
struct gb_interface *intf = control->connection->intf;
diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c
index 412337daf45c..d6b0d49130c0 100644
--- a/drivers/staging/greybus/core.c
+++ b/drivers/staging/greybus/core.c
@@ -266,7 +266,7 @@ static int greybus_remove(struct device *dev)
}
int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
- const char *mod_name)
+ const char *mod_name)
{
int retval;
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index e110681e6f86..3151004d26fb 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -9,9 +9,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/gpio/driver.h>
#include <linux/mutex.h>
#include "greybus.h"
@@ -39,11 +39,6 @@ struct gb_gpio_controller {
struct gpio_chip chip;
struct irq_chip irqc;
- struct irq_chip *irqchip;
- struct irq_domain *irqdomain;
- unsigned int irq_base;
- irq_flow_handler_t irq_handler;
- unsigned int irq_default_type;
struct mutex irq_lock;
};
#define gpio_chip_to_gb_gpio_controller(chip) \
@@ -391,7 +386,7 @@ static int gb_gpio_request_handler(struct gb_operation *op)
return -EINVAL;
}
- irq = irq_find_mapping(ggc->irqdomain, event->which);
+ irq = irq_find_mapping(ggc->chip.irq.domain, event->which);
if (!irq) {
dev_err(dev, "failed to find IRQ\n");
return -EINVAL;
@@ -506,135 +501,6 @@ static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
return ret;
}
-/**
- * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
- * @d: the irqdomain used by this irqchip
- * @irq: the global irq number used by this GB gpio irqchip irq
- * @hwirq: the local IRQ/GPIO line offset on this GB gpio
- *
- * This function will set up the mapping for a certain IRQ line on a
- * GB gpio by assigning the GB gpio as chip data, and using the irqchip
- * stored inside the GB gpio.
- */
-static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct gpio_chip *chip = domain->host_data;
- struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
-
- irq_set_chip_data(irq, ggc);
- irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
- irq_set_noprobe(irq);
- /*
- * No set-up of the hardware will happen if IRQ_TYPE_NONE
- * is passed as default type.
- */
- if (ggc->irq_default_type != IRQ_TYPE_NONE)
- irq_set_irq_type(irq, ggc->irq_default_type);
-
- return 0;
-}
-
-static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
-{
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
-}
-
-static const struct irq_domain_ops gb_gpio_domain_ops = {
- .map = gb_gpio_irq_map,
- .unmap = gb_gpio_irq_unmap,
-};
-
-/**
- * gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
- * @ggc: the gb_gpio_controller to remove the irqchip from
- *
- * This is called only from gb_gpio_remove()
- */
-static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
-{
- unsigned int offset;
-
- /* Remove all IRQ mappings and delete the domain */
- if (ggc->irqdomain) {
- for (offset = 0; offset < (ggc->line_max + 1); offset++)
- irq_dispose_mapping(irq_find_mapping(ggc->irqdomain,
- offset));
- irq_domain_remove(ggc->irqdomain);
- }
-
- if (ggc->irqchip)
- ggc->irqchip = NULL;
-}
-
-/**
- * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
- * @chip: the gpio chip to add the irqchip to
- * @irqchip: the irqchip to add to the adapter
- * @first_irq: if not dynamically assigned, the base (first) IRQ to
- * allocate gpio irqs from
- * @handler: the irq handler to use (often a predefined irq core function)
- * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
- * to have the core avoid setting up any default type in the hardware.
- *
- * This function closely associates a certain irqchip with a certain
- * gpio chip, providing an irq domain to translate the local IRQs to
- * global irqs, and making sure that the gpio chip
- * is passed as chip data to all related functions. Driver callbacks
- * need to use container_of() to get their local state containers back
- * from the gpio chip passed as chip data. An irqdomain will be stored
- * in the gpio chip that shall be used by the driver to handle IRQ number
- * translation. The gpio chip will need to be initialized and registered
- * before calling this function.
- */
-static int gb_gpio_irqchip_add(struct gpio_chip *chip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- struct gb_gpio_controller *ggc;
- unsigned int offset;
- unsigned int irq_base;
-
- if (!chip || !irqchip)
- return -EINVAL;
-
- ggc = gpio_chip_to_gb_gpio_controller(chip);
-
- ggc->irqchip = irqchip;
- ggc->irq_handler = handler;
- ggc->irq_default_type = type;
- ggc->irqdomain = irq_domain_add_simple(NULL,
- ggc->line_max + 1, first_irq,
- &gb_gpio_domain_ops, chip);
- if (!ggc->irqdomain) {
- ggc->irqchip = NULL;
- return -EINVAL;
- }
-
- /*
- * Prepare the mapping since the irqchip shall be orthogonal to
- * any gpio calls. If the first_irq was zero, this is
- * necessary to allocate descriptors for all IRQs.
- */
- for (offset = 0; offset < (ggc->line_max + 1); offset++) {
- irq_base = irq_create_mapping(ggc->irqdomain, offset);
- if (offset == 0)
- ggc->irq_base = irq_base;
- }
-
- return 0;
-}
-
-static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
-
- return irq_find_mapping(ggc->irqdomain, offset);
-}
-
static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
@@ -694,7 +560,6 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
gpio->get = gb_gpio_get;
gpio->set = gb_gpio_set;
gpio->set_config = gb_gpio_set_config;
- gpio->to_irq = gb_gpio_to_irq;
gpio->base = -1; /* Allocate base dynamically */
gpio->ngpio = ggc->line_max + 1;
gpio->can_sleep = true;
@@ -703,24 +568,24 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
if (ret)
goto exit_line_free;
- ret = gb_gpio_irqchip_add(gpio, irqc, 0,
- handle_level_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_add(gpio);
if (ret) {
- dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
goto exit_line_free;
}
- ret = gpiochip_add(gpio);
+ ret = gpiochip_irqchip_add(gpio, irqc, 0, handle_level_irq,
+ IRQ_TYPE_NONE);
if (ret) {
- dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
- goto exit_gpio_irqchip_remove;
+ dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
+ goto exit_gpiochip_remove;
}
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
-exit_gpio_irqchip_remove:
- gb_gpio_irqchip_remove(ggc);
+exit_gpiochip_remove:
+ gpiochip_remove(gpio);
exit_line_free:
kfree(ggc->lines);
exit_connection_disable:
@@ -744,7 +609,6 @@ static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
gb_connection_disable_rx(connection);
gpiochip_remove(&ggc->chip);
- gb_gpio_irqchip_remove(ggc);
gb_connection_disable(connection);
gb_connection_destroy(connection);
kfree(ggc->lines);
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index fa8b27e091a2..3e154562c64d 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -1,14 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
index 986e841f6b5e..5cf12c14cca4 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -1,14 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
#include <linux/firmware.h>
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
index 83a13ca7259a..80903ec36b76 100644
--- a/drivers/staging/gs_fpgaboot/io.c
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -1,15 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/init.h>
@@ -50,8 +39,7 @@ int xl_supported_prog_bus_width(enum wbus bus_bytes)
case bus_2byte:
break;
default:
- pr_err("unsupported program bus width %d\n",
- bus_bytes);
+ pr_err("unsupported program bus width %d\n", bus_bytes);
return 0;
}
diff --git a/drivers/staging/gs_fpgaboot/io.h b/drivers/staging/gs_fpgaboot/io.h
index bc5d99cbda8f..9bd86a92e90f 100644
--- a/drivers/staging/gs_fpgaboot/io.h
+++ b/drivers/staging/gs_fpgaboot/io.h
@@ -1,14 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
#define GPDIR 0
#define GPCFG 4 /* open drain or not */
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index fc23059f1673..7a93d3a5c113 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -3,40 +3,6 @@
#
menu "Analog to digital converters"
-config AD7606
- tristate "Analog Devices AD7606 ADC driver"
- depends on GPIOLIB || COMPILE_TEST
- depends on HAS_IOMEM
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Say yes here to build support for Analog Devices:
- ad7605-4, ad7606, ad7606-6, ad7606-4 analog to digital converters (ADC).
-
- To compile this driver as a module, choose M here: the
- module will be called ad7606.
-
-config AD7606_IFACE_PARALLEL
- tristate "parallel interface support"
- depends on AD7606
- help
- Say yes here to include parallel interface support on the AD7606
- ADC driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7606_parallel.
-
-config AD7606_IFACE_SPI
- tristate "spi interface support"
- depends on AD7606
- depends on SPI
- help
- Say yes here to include parallel interface support on the AD7606
- ADC driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7606_spi.
-
config AD7780
tristate "Analog Devices AD7780 and similar ADCs driver"
depends on SPI
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index ebe83c1ad362..7a421088ff82 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -3,10 +3,6 @@
# Makefile for industrial I/O ADC drivers
#
-obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
-obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
-obj-$(CONFIG_AD7606) += ad7606.o
-
obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7816) += ad7816.o
obj-$(CONFIG_AD7192) += ad7192.o
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 14f6a3ced060..d9df12665176 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -97,6 +97,10 @@
#define AD7280A_NUM_CH (AD7280A_AUX_ADC_6 - \
AD7280A_CELL_VOLTAGE_1 + 1)
+#define AD7280A_CALC_VOLTAGE_CHAN_NUM(d, c) ((d * AD7280A_CELLS_PER_DEV) + c)
+#define AD7280A_CALC_TEMP_CHAN_NUM(d, c) ((d * AD7280A_CELLS_PER_DEV) + \
+ c - AD7280A_CELLS_PER_DEV)
+
#define AD7280A_DEVADDR_MASTER 0
#define AD7280A_DEVADDR_ALL 0x1F
/* 5-bit device address is sent LSB first */
@@ -496,72 +500,171 @@ static const struct attribute_group ad7280_attrs_group = {
.attrs = ad7280_attributes,
};
+static void ad7280_voltage_channel_init(struct iio_chan_spec *chan, int i)
+{
+ chan->type = IIO_VOLTAGE;
+ chan->differential = 1;
+ chan->channel = i;
+ chan->channel2 = chan->channel + 1;
+}
+
+static void ad7280_temp_channel_init(struct iio_chan_spec *chan, int i)
+{
+ chan->type = IIO_TEMP;
+ chan->channel = i;
+}
+
+static void ad7280_common_fields_init(struct iio_chan_spec *chan, int addr,
+ int cnt)
+{
+ chan->indexed = 1;
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->address = addr;
+ chan->scan_index = cnt;
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 12;
+ chan->scan_type.storagebits = 32;
+}
+
+static void ad7280_total_voltage_channel_init(struct iio_chan_spec *chan,
+ int cnt, int dev)
+{
+ chan->type = IIO_VOLTAGE;
+ chan->differential = 1;
+ chan->channel = 0;
+ chan->channel2 = dev * AD7280A_CELLS_PER_DEV;
+ chan->address = AD7280A_ALL_CELLS;
+ chan->indexed = 1;
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->scan_index = cnt;
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 32;
+ chan->scan_type.storagebits = 32;
+}
+
+static void ad7280_timestamp_channel_init(struct iio_chan_spec *chan, int cnt)
+{
+ chan->type = IIO_TIMESTAMP;
+ chan->channel = -1;
+ chan->scan_index = cnt;
+ chan->scan_type.sign = 's';
+ chan->scan_type.realbits = 64;
+ chan->scan_type.storagebits = 64;
+}
+
+static void ad7280_init_dev_channels(struct ad7280_state *st, int dev, int *cnt)
+{
+ int addr, ch, i;
+ struct iio_chan_spec *chan;
+
+ for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_AUX_ADC_6; ch++) {
+ chan = &st->channels[*cnt];
+
+ if (ch < AD7280A_AUX_ADC_1) {
+ i = AD7280A_CALC_VOLTAGE_CHAN_NUM(dev, ch);
+ ad7280_voltage_channel_init(chan, i);
+ } else {
+ i = AD7280A_CALC_TEMP_CHAN_NUM(dev, ch);
+ ad7280_temp_channel_init(chan, i);
+ }
+
+ addr = ad7280a_devaddr(dev) << 8 | ch;
+ ad7280_common_fields_init(chan, addr, *cnt);
+
+ (*cnt)++;
+ }
+}
+
static int ad7280_channel_init(struct ad7280_state *st)
{
- int dev, ch, cnt;
+ int dev, cnt = 0;
st->channels = devm_kcalloc(&st->spi->dev, (st->slave_num + 1) * 12 + 2,
sizeof(*st->channels), GFP_KERNEL);
if (!st->channels)
return -ENOMEM;
- for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
- for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_AUX_ADC_6;
- ch++, cnt++) {
- if (ch < AD7280A_AUX_ADC_1) {
- st->channels[cnt].type = IIO_VOLTAGE;
- st->channels[cnt].differential = 1;
- st->channels[cnt].channel = (dev * 6) + ch;
- st->channels[cnt].channel2 =
- st->channels[cnt].channel + 1;
- } else {
- st->channels[cnt].type = IIO_TEMP;
- st->channels[cnt].channel = (dev * 6) + ch - 6;
- }
- st->channels[cnt].indexed = 1;
- st->channels[cnt].info_mask_separate =
- BIT(IIO_CHAN_INFO_RAW);
- st->channels[cnt].info_mask_shared_by_type =
- BIT(IIO_CHAN_INFO_SCALE);
- st->channels[cnt].address =
- ad7280a_devaddr(dev) << 8 | ch;
- st->channels[cnt].scan_index = cnt;
- st->channels[cnt].scan_type.sign = 'u';
- st->channels[cnt].scan_type.realbits = 12;
- st->channels[cnt].scan_type.storagebits = 32;
- st->channels[cnt].scan_type.shift = 0;
- }
+ for (dev = 0; dev <= st->slave_num; dev++)
+ ad7280_init_dev_channels(st, dev, &cnt);
- st->channels[cnt].type = IIO_VOLTAGE;
- st->channels[cnt].differential = 1;
- st->channels[cnt].channel = 0;
- st->channels[cnt].channel2 = dev * 6;
- st->channels[cnt].address = AD7280A_ALL_CELLS;
- st->channels[cnt].indexed = 1;
- st->channels[cnt].info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
- st->channels[cnt].info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
- st->channels[cnt].scan_index = cnt;
- st->channels[cnt].scan_type.sign = 'u';
- st->channels[cnt].scan_type.realbits = 32;
- st->channels[cnt].scan_type.storagebits = 32;
- st->channels[cnt].scan_type.shift = 0;
+ ad7280_total_voltage_channel_init(&st->channels[cnt], cnt, dev);
cnt++;
- st->channels[cnt].type = IIO_TIMESTAMP;
- st->channels[cnt].channel = -1;
- st->channels[cnt].scan_index = cnt;
- st->channels[cnt].scan_type.sign = 's';
- st->channels[cnt].scan_type.realbits = 64;
- st->channels[cnt].scan_type.storagebits = 64;
- st->channels[cnt].scan_type.shift = 0;
+ ad7280_timestamp_channel_init(&st->channels[cnt], cnt);
return cnt + 1;
}
-static int ad7280_attr_init(struct ad7280_state *st)
+static int ad7280_balance_switch_attr_init(struct iio_dev_attr *attr,
+ struct device *dev, int addr, int i)
{
- int dev, ch, cnt;
- unsigned int index;
+ attr->address = addr;
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = ad7280_show_balance_sw;
+ attr->dev_attr.store = ad7280_store_balance_sw;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "in%d-in%d_balance_switch_en",
+ i, i + 1);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ad7280_balance_timer_attr_init(struct iio_dev_attr *attr,
+ struct device *dev, int addr, int i)
+{
+ attr->address = addr;
+ attr->dev_attr.attr.mode = 0644;
+ attr->dev_attr.show = ad7280_show_balance_timer;
+ attr->dev_attr.store = ad7280_store_balance_timer;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "in%d-in%d_balance_timer",
+ i, i + 1);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ad7280_init_dev_attrs(struct ad7280_state *st, int dev, int *cnt)
+{
+ int addr, ch, i, ret;
struct iio_dev_attr *iio_attr;
+ struct device *sdev = &st->spi->dev;
+
+ for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_CELL_VOLTAGE_6; ch++) {
+ iio_attr = &st->iio_attr[*cnt];
+ addr = ad7280a_devaddr(dev) << 8 | ch;
+ i = dev * AD7280A_CELLS_PER_DEV + ch;
+
+ ret = ad7280_balance_switch_attr_init(iio_attr, sdev, addr, i);
+ if (ret < 0)
+ return ret;
+
+ ad7280_attributes[*cnt] = &iio_attr->dev_attr.attr;
+
+ (*cnt)++;
+ iio_attr = &st->iio_attr[*cnt];
+ addr = ad7280a_devaddr(dev) << 8 | (AD7280A_CB1_TIMER + ch);
+
+ ret = ad7280_balance_timer_attr_init(iio_attr, sdev, addr, i);
+ if (ret < 0)
+ return ret;
+
+ ad7280_attributes[*cnt] = &iio_attr->dev_attr.attr;
+ (*cnt)++;
+ }
+
+ ad7280_attributes[*cnt] = NULL;
+
+ return 0;
+}
+
+static int ad7280_attr_init(struct ad7280_state *st)
+{
+ int dev, cnt = 0, ret;
st->iio_attr = devm_kcalloc(&st->spi->dev, 2, sizeof(*st->iio_attr) *
(st->slave_num + 1) * AD7280A_CELLS_PER_DEV,
@@ -569,41 +672,11 @@ static int ad7280_attr_init(struct ad7280_state *st)
if (!st->iio_attr)
return -ENOMEM;
- for (dev = 0, cnt = 0; dev <= st->slave_num; dev++)
- for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_CELL_VOLTAGE_6;
- ch++, cnt++) {
- iio_attr = &st->iio_attr[cnt];
- index = dev * AD7280A_CELLS_PER_DEV + ch;
- iio_attr->address = ad7280a_devaddr(dev) << 8 | ch;
- iio_attr->dev_attr.attr.mode = 0644;
- iio_attr->dev_attr.show = ad7280_show_balance_sw;
- iio_attr->dev_attr.store = ad7280_store_balance_sw;
- iio_attr->dev_attr.attr.name =
- devm_kasprintf(&st->spi->dev, GFP_KERNEL,
- "in%d-in%d_balance_switch_en",
- index, index + 1);
- if (!iio_attr->dev_attr.attr.name)
- return -ENOMEM;
-
- ad7280_attributes[cnt] = &iio_attr->dev_attr.attr;
- cnt++;
- iio_attr = &st->iio_attr[cnt];
- iio_attr->address = ad7280a_devaddr(dev) << 8 |
- (AD7280A_CB1_TIMER + ch);
- iio_attr->dev_attr.attr.mode = 0644;
- iio_attr->dev_attr.show = ad7280_show_balance_timer;
- iio_attr->dev_attr.store = ad7280_store_balance_timer;
- iio_attr->dev_attr.attr.name =
- devm_kasprintf(&st->spi->dev, GFP_KERNEL,
- "in%d-in%d_balance_timer",
- index, index + 1);
- if (!iio_attr->dev_attr.attr.name)
- return -ENOMEM;
-
- ad7280_attributes[cnt] = &iio_attr->dev_attr.attr;
- }
-
- ad7280_attributes[cnt] = NULL;
+ for (dev = 0; dev <= st->slave_num; dev++) {
+ ret = ad7280_init_dev_attrs(st, dev, &cnt);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 5209651a1b25..ee50e7296795 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -65,7 +65,7 @@ enum ad7816_type {
static int ad7816_spi_read(struct ad7816_chip_info *chip, u16 *data)
{
struct spi_device *spi_dev = chip->spi_dev;
- int ret = 0;
+ int ret;
__be16 buf;
gpiod_set_value(chip->rdwr_pin, 1);
@@ -106,7 +106,7 @@ static int ad7816_spi_read(struct ad7816_chip_info *chip, u16 *data)
static int ad7816_spi_write(struct ad7816_chip_info *chip, u8 data)
{
struct spi_device *spi_dev = chip->spi_dev;
- int ret = 0;
+ int ret;
gpiod_set_value(chip->rdwr_pin, 1);
gpiod_set_value(chip->rdwr_pin, 0);
@@ -354,8 +354,7 @@ static int ad7816_probe(struct spi_device *spi_dev)
{
struct ad7816_chip_info *chip;
struct iio_dev *indio_dev;
- int ret = 0;
- int i;
+ int i, ret;
indio_dev = devm_iio_device_alloc(&spi_dev->dev, sizeof(*chip));
if (!indio_dev)
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 2d51bd425662..0f26bc38edc6 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -43,7 +43,7 @@ static int adt7316_i2c_read(void *client, u8 reg, u8 *data)
static int adt7316_i2c_write(void *client, u8 reg, u8 data)
{
struct i2c_client *cl = client;
- int ret = 0;
+ int ret;
ret = i2c_smbus_write_byte_data(cl, reg, data);
if (ret < 0)
@@ -55,7 +55,7 @@ static int adt7316_i2c_write(void *client, u8 reg, u8 data)
static int adt7316_i2c_multi_read(void *client, u8 reg, u8 count, u8 *data)
{
struct i2c_client *cl = client;
- int i, ret = 0;
+ int i, ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
@@ -74,7 +74,7 @@ static int adt7316_i2c_multi_read(void *client, u8 reg, u8 count, u8 *data)
static int adt7316_i2c_multi_write(void *client, u8 reg, u8 count, u8 *data)
{
struct i2c_client *cl = client;
- int i, ret = 0;
+ int i, ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index e75827e326a6..8294b9c1e3c2 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -27,7 +27,7 @@ static int adt7316_spi_multi_read(void *client, u8 reg, u8 count, u8 *data)
{
struct spi_device *spi_dev = client;
u8 cmd[2];
- int ret = 0;
+ int ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
@@ -56,7 +56,7 @@ static int adt7316_spi_multi_write(void *client, u8 reg, u8 count, u8 *data)
{
struct spi_device *spi_dev = client;
u8 buf[ADT7316_REG_MAX_ADDR + 2];
- int i, ret = 0;
+ int i, ret;
if (count > ADT7316_REG_MAX_ADDR)
count = ADT7316_REG_MAX_ADDR;
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index dc93e85808e0..6f7891b567b9 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -47,6 +47,8 @@
#define ADT7516_MSB_AIN3 0xA
#define ADT7516_MSB_AIN4 0xB
#define ADT7316_DA_DATA_BASE 0x10
+#define ADT7316_DA_10_BIT_LSB_SHIFT 6
+#define ADT7316_DA_12_BIT_LSB_SHIFT 4
#define ADT7316_DA_MSB_DATA_REGS 4
#define ADT7316_LSB_DAC_A 0x10
#define ADT7316_MSB_DAC_A 0x11
@@ -59,8 +61,8 @@
#define ADT7316_CONFIG1 0x18
#define ADT7316_CONFIG2 0x19
#define ADT7316_CONFIG3 0x1A
-#define ADT7316_LDAC_CONFIG 0x1B
-#define ADT7316_DAC_CONFIG 0x1C
+#define ADT7316_DAC_CONFIG 0x1B
+#define ADT7316_LDAC_CONFIG 0x1C
#define ADT7316_INT_MASK1 0x1D
#define ADT7316_INT_MASK2 0x1E
#define ADT7316_IN_TEMP_OFFSET 0x1F
@@ -117,7 +119,7 @@
*/
#define ADT7316_ADCLK_22_5 0x1
#define ADT7316_DA_HIGH_RESOLUTION 0x2
-#define ADT7316_DA_EN_VIA_DAC_LDCA 0x4
+#define ADT7316_DA_EN_VIA_DAC_LDAC 0x8
#define ADT7516_AIN_IN_VREF 0x10
#define ADT7316_EN_IN_TEMP_PROP_DACA 0x20
#define ADT7316_EN_EX_TEMP_PROP_DACB 0x40
@@ -127,6 +129,7 @@
*/
#define ADT7316_DA_2VREF_CH_MASK 0xF
#define ADT7316_DA_EN_MODE_MASK 0x30
+#define ADT7316_DA_EN_MODE_SHIFT 4
#define ADT7316_DA_EN_MODE_SINGLE 0x00
#define ADT7316_DA_EN_MODE_AB_CD 0x10
#define ADT7316_DA_EN_MODE_ABCD 0x20
@@ -632,9 +635,7 @@ static ssize_t adt7316_show_da_high_resolution(struct device *dev,
struct adt7316_chip_info *chip = iio_priv(dev_info);
if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) {
- if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
- return sprintf(buf, "1 (12 bits)\n");
- if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+ if (chip->id != ID_ADT7318 && chip->id != ID_ADT7519)
return sprintf(buf, "1 (10 bits)\n");
}
@@ -651,17 +652,12 @@ static ssize_t adt7316_store_da_high_resolution(struct device *dev,
u8 config3;
int ret;
- chip->dac_bits = 8;
+ if (chip->id == ID_ADT7318 || chip->id == ID_ADT7519)
+ return -EPERM;
- if (buf[0] == '1') {
- config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION;
- if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
- chip->dac_bits = 12;
- else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
- chip->dac_bits = 10;
- } else {
- config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
- }
+ config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
+ if (buf[0] == '1')
+ config3 |= ADT7316_DA_HIGH_RESOLUTION;
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
if (ret)
@@ -851,7 +847,7 @@ static ssize_t adt7316_show_DAC_update_mode(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
+ if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC))
return sprintf(buf, "manual\n");
switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) {
@@ -880,15 +876,15 @@ static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
u8 data;
int ret;
- if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
+ if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC))
return -EPERM;
ret = kstrtou8(buf, 10, &data);
- if (ret || data > ADT7316_DA_EN_MODE_MASK)
+ if (ret || data > (ADT7316_DA_EN_MODE_MASK >> ADT7316_DA_EN_MODE_SHIFT))
return -EINVAL;
dac_config = chip->dac_config & (~ADT7316_DA_EN_MODE_MASK);
- dac_config |= data;
+ dac_config |= data << ADT7316_DA_EN_MODE_SHIFT;
ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
if (ret)
@@ -911,7 +907,7 @@ static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)
+ if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC)
return sprintf(buf, "0 - auto at any MSB DAC writing\n"
"1 - auto at MSB DAC AB and CD writing\n"
"2 - auto at MSB DAC ABCD writing\n"
@@ -933,7 +929,7 @@ static ssize_t adt7316_store_update_DAC(struct device *dev,
u8 data;
int ret;
- if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) {
+ if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDAC) {
if ((chip->dac_config & ADT7316_DA_EN_MODE_MASK) !=
ADT7316_DA_EN_MODE_LDAC)
return -EPERM;
@@ -969,9 +965,6 @@ static ssize_t adt7316_show_DA_AB_Vref_bypass(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
return sprintf(buf, "%d\n",
!!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_AB));
}
@@ -986,9 +979,6 @@ static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev,
u8 dac_config;
int ret;
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_AB);
if (buf[0] == '1')
dac_config |= ADT7316_VREF_BYPASS_DAC_AB;
@@ -1014,9 +1004,6 @@ static ssize_t adt7316_show_DA_CD_Vref_bypass(struct device *dev,
struct iio_dev *dev_info = dev_to_iio_dev(dev);
struct adt7316_chip_info *chip = iio_priv(dev_info);
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
return sprintf(buf, "%d\n",
!!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_CD));
}
@@ -1031,9 +1018,6 @@ static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev,
u8 dac_config;
int ret;
- if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- return -EPERM;
-
dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_CD);
if (buf[0] == '1')
dac_config |= ADT7316_VREF_BYPASS_DAC_CD;
@@ -1061,10 +1045,10 @@ static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev,
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
return sprintf(buf, "0x%x\n",
- (chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >>
+ (chip->ldac_config & ADT7516_DAC_IN_VREF_MASK) >>
ADT7516_DAC_IN_VREF_OFFSET);
return sprintf(buf, "%d\n",
- !!(chip->dac_config & ADT7316_DAC_IN_VREF));
+ !!(chip->ldac_config & ADT7316_DAC_IN_VREF));
}
static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
@@ -1086,7 +1070,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
if (data & 0x1)
ldac_config |= ADT7516_DAC_AB_IN_VREF;
- else if (data & 0x2)
+ if (data & 0x2)
ldac_config |= ADT7516_DAC_CD_IN_VREF;
} else {
ret = kstrtou8(buf, 16, &data);
@@ -1410,7 +1394,7 @@ static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644,
static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
int channel, char *buf)
{
- u16 data;
+ u16 data = 0;
u8 msb, lsb, offset;
int ret;
@@ -1435,7 +1419,11 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
if (ret)
return -EIO;
- data = (msb << offset) + (lsb & ((1 << offset) - 1));
+ if (chip->dac_bits == 12)
+ data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
+ else if (chip->dac_bits == 10)
+ data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
+ data |= msb << offset;
return sprintf(buf, "%d\n", data);
}
@@ -1443,7 +1431,7 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
int channel, const char *buf, size_t len)
{
- u8 msb, lsb, offset;
+ u8 msb, lsb, lsb_reg, offset;
u16 data;
int ret;
@@ -1461,9 +1449,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
return -EINVAL;
if (chip->dac_bits > 8) {
- lsb = data & (1 << offset);
+ lsb = data & ((1 << offset) - 1);
+ if (chip->dac_bits == 12)
+ lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
+ else
+ lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
ret = chip->bus.write(chip->bus.client,
- ADT7316_DA_DATA_BASE + channel * 2, lsb);
+ ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
if (ret)
return -EIO;
}
@@ -1710,8 +1702,6 @@ static struct attribute *adt7516_attributes[] = {
&iio_dev_attr_DAC_update_mode.dev_attr.attr,
&iio_dev_attr_all_DAC_update_modes.dev_attr.attr,
&iio_dev_attr_update_DAC.dev_attr.attr,
- &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr,
- &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr,
&iio_dev_attr_DAC_internal_Vref.dev_attr.attr,
&iio_dev_attr_VDD.dev_attr.attr,
&iio_dev_attr_in_temp.dev_attr.attr,
@@ -1809,6 +1799,43 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static int adt7316_setup_irq(struct iio_dev *indio_dev)
+{
+ struct adt7316_chip_info *chip = iio_priv(indio_dev);
+ int irq_type, ret;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(chip->bus.irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ break;
+ default:
+ dev_info(&indio_dev->dev, "mode %d unsupported, using IRQF_TRIGGER_LOW\n",
+ irq_type);
+ irq_type = IRQF_TRIGGER_LOW;
+ break;
+ }
+
+ ret = devm_request_threaded_irq(&indio_dev->dev, chip->bus.irq,
+ NULL, adt7316_event_handler,
+ irq_type | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to request irq %d\n",
+ chip->bus.irq);
+ return ret;
+ }
+
+ if (irq_type & IRQF_TRIGGER_HIGH)
+ chip->config1 |= ADT7316_INT_POLARITY;
+
+ return 0;
+}
+
/*
* Show mask of enabled interrupts in Hex.
*/
@@ -2103,9 +2130,7 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
{
struct adt7316_chip_info *chip;
struct iio_dev *indio_dev;
- unsigned short *adt7316_platform_data = dev->platform_data;
- int irq_type = IRQF_TRIGGER_LOW;
- int ret = 0;
+ int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
if (!indio_dev)
@@ -2123,6 +2148,13 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
else
return -ENODEV;
+ if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
+ chip->dac_bits = 12;
+ else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+ chip->dac_bits = 10;
+ else
+ chip->dac_bits = 8;
+
chip->ldac_pin = devm_gpiod_get_optional(dev, "adi,ldac", GPIOD_OUT_LOW);
if (IS_ERR(chip->ldac_pin)) {
ret = PTR_ERR(chip->ldac_pin);
@@ -2130,8 +2162,8 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
return ret;
}
- if (chip->ldac_pin) {
- chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDCA;
+ if (!chip->ldac_pin) {
+ chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDAC;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
chip->config1 |= ADT7516_SEL_AIN3;
}
@@ -2148,20 +2180,9 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
indio_dev->modes = INDIO_DIRECT_MODE;
if (chip->bus.irq > 0) {
- if (adt7316_platform_data[0])
- irq_type = adt7316_platform_data[0];
-
- ret = devm_request_threaded_irq(dev, chip->bus.irq,
- NULL,
- adt7316_event_handler,
- irq_type | IRQF_ONESHOT,
- indio_dev->name,
- indio_dev);
+ ret = adt7316_setup_irq(indio_dev);
if (ret)
return ret;
-
- if (irq_type & IRQF_TRIGGER_HIGH)
- chip->config1 |= ADT7316_INT_POLARITY;
}
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, chip->config1);
diff --git a/drivers/staging/iio/cdc/Kconfig b/drivers/staging/iio/cdc/Kconfig
index 80211df8c577..b97478e7cbd0 100644
--- a/drivers/staging/iio/cdc/Kconfig
+++ b/drivers/staging/iio/cdc/Kconfig
@@ -13,16 +13,6 @@ config AD7150
To compile this driver as a module, choose M here: the
module will be called ad7150.
-config AD7152
- tristate "Analog Devices ad7152/3 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (ad7152, ad7153) Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7152.
-
config AD7746
tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
depends on I2C
diff --git a/drivers/staging/iio/cdc/Makefile b/drivers/staging/iio/cdc/Makefile
index a5fbabf5c8bf..1466bc31f244 100644
--- a/drivers/staging/iio/cdc/Makefile
+++ b/drivers/staging/iio/cdc/Makefile
@@ -3,5 +3,4 @@
#
obj-$(CONFIG_AD7150) += ad7150.o
-obj-$(CONFIG_AD7152) += ad7152.o
obj-$(CONFIG_AD7746) += ad7746.o
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
deleted file mode 100644
index 25f51db05d2d..000000000000
--- a/drivers/staging/iio/cdc/ad7152.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * AD7152 capacitive sensor driver supporting AD7152/3
- *
- * Copyright 2010-2011a Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-/*
- * TODO: Check compliance of calibbias with abi (units)
- */
-/*
- * AD7152 registers definition
- */
-
-#define AD7152_REG_STATUS 0
-#define AD7152_REG_CH1_DATA_HIGH 1
-#define AD7152_REG_CH2_DATA_HIGH 3
-#define AD7152_REG_CH1_OFFS_HIGH 5
-#define AD7152_REG_CH2_OFFS_HIGH 7
-#define AD7152_REG_CH1_GAIN_HIGH 9
-#define AD7152_REG_CH1_SETUP 11
-#define AD7152_REG_CH2_GAIN_HIGH 12
-#define AD7152_REG_CH2_SETUP 14
-#define AD7152_REG_CFG 15
-#define AD7152_REG_RESEVERD 16
-#define AD7152_REG_CAPDAC_POS 17
-#define AD7152_REG_CAPDAC_NEG 18
-#define AD7152_REG_CFG2 26
-
-/* Status Register Bit Designations (AD7152_REG_STATUS) */
-#define AD7152_STATUS_RDY1 BIT(0)
-#define AD7152_STATUS_RDY2 BIT(1)
-#define AD7152_STATUS_C1C2 BIT(2)
-#define AD7152_STATUS_PWDN BIT(7)
-
-/* Setup Register Bit Designations (AD7152_REG_CHx_SETUP) */
-#define AD7152_SETUP_CAPDIFF BIT(5)
-#define AD7152_SETUP_RANGE_2pF (0 << 6)
-#define AD7152_SETUP_RANGE_0_5pF (1 << 6)
-#define AD7152_SETUP_RANGE_1pF (2 << 6)
-#define AD7152_SETUP_RANGE_4pF (3 << 6)
-#define AD7152_SETUP_RANGE(x) ((x) << 6)
-
-/* Config Register Bit Designations (AD7152_REG_CFG) */
-#define AD7152_CONF_CH2EN BIT(3)
-#define AD7152_CONF_CH1EN BIT(4)
-#define AD7152_CONF_MODE_IDLE (0 << 0)
-#define AD7152_CONF_MODE_CONT_CONV (1 << 0)
-#define AD7152_CONF_MODE_SINGLE_CONV (2 << 0)
-#define AD7152_CONF_MODE_OFFS_CAL (5 << 0)
-#define AD7152_CONF_MODE_GAIN_CAL (6 << 0)
-
-/* Capdac Register Bit Designations (AD7152_REG_CAPDAC_XXX) */
-#define AD7152_CAPDAC_DACEN BIT(7)
-#define AD7152_CAPDAC_DACP(x) ((x) & 0x1F)
-
-/* CFG2 Register Bit Designations (AD7152_REG_CFG2) */
-#define AD7152_CFG2_OSR(x) (((x) & 0x3) << 4)
-
-enum {
- AD7152_DATA,
- AD7152_OFFS,
- AD7152_GAIN,
- AD7152_SETUP
-};
-
-/*
- * struct ad7152_chip_info - chip specific information
- */
-
-struct ad7152_chip_info {
- struct i2c_client *client;
- /*
- * Capacitive channel digital filter setup;
- * conversion time/update rate setup per channel
- */
- u8 filter_rate_setup;
- u8 setup[2];
- struct mutex state_lock; /* protect hardware state */
-};
-
-static inline ssize_t ad7152_start_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len,
- u8 regval)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- bool doit;
- int ret, timeout = 10;
-
- ret = strtobool(buf, &doit);
- if (ret < 0)
- return ret;
-
- if (!doit)
- return 0;
-
- if (this_attr->address == 0)
- regval |= AD7152_CONF_CH1EN;
- else
- regval |= AD7152_CONF_CH2EN;
-
- mutex_lock(&chip->state_lock);
- ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
- if (ret < 0)
- goto unlock;
-
- do {
- mdelay(20);
- ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
- if (ret < 0)
- goto unlock;
-
- } while ((ret == regval) && timeout--);
-
- mutex_unlock(&chip->state_lock);
- return len;
-
-unlock:
- mutex_unlock(&chip->state_lock);
- return ret;
-}
-
-static ssize_t ad7152_start_offset_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- return ad7152_start_calib(dev, attr, buf, len,
- AD7152_CONF_MODE_OFFS_CAL);
-}
-
-static ssize_t ad7152_start_gain_calib(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- return ad7152_start_calib(dev, attr, buf, len,
- AD7152_CONF_MODE_GAIN_CAL);
-}
-
-static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
- 0200, NULL, ad7152_start_offset_calib, 0);
-static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
- 0200, NULL, ad7152_start_offset_calib, 1);
-static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
- 0200, NULL, ad7152_start_gain_calib, 0);
-static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
- 0200, NULL, ad7152_start_gain_calib, 1);
-
-/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
-static const unsigned char ad7152_filter_rate_table[][2] = {
- {200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1},
-};
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17");
-
-static IIO_CONST_ATTR(in_capacitance_scale_available,
- "0.000061050 0.000030525 0.000015263 0.000007631");
-
-static struct attribute *ad7152_attributes[] = {
- &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
- &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
- &iio_const_attr_in_capacitance_scale_available.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7152_attribute_group = {
- .attrs = ad7152_attributes,
-};
-
-static const u8 ad7152_addresses[][4] = {
- { AD7152_REG_CH1_DATA_HIGH, AD7152_REG_CH1_OFFS_HIGH,
- AD7152_REG_CH1_GAIN_HIGH, AD7152_REG_CH1_SETUP },
- { AD7152_REG_CH2_DATA_HIGH, AD7152_REG_CH2_OFFS_HIGH,
- AD7152_REG_CH2_GAIN_HIGH, AD7152_REG_CH2_SETUP },
-};
-
-/* Values are nano relative to pf base. */
-static const int ad7152_scale_table[] = {
- 30525, 7631, 15263, 61050
-};
-
-/**
- * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int ad7152_read_raw_samp_freq(struct device *dev, int *val)
-{
- struct ad7152_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
-
- *val = ad7152_filter_rate_table[chip->filter_rate_setup][0];
-
- return 0;
-}
-
-/**
- * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int ad7152_write_raw_samp_freq(struct device *dev, int val)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- int ret, i;
-
- for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
- if (val >= ad7152_filter_rate_table[i][0])
- break;
-
- if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
- i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
-
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
- if (ret < 0)
- return ret;
-
- chip->filter_rate_setup = i;
-
- return ret;
-}
-
-static int ad7152_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- int ret, i;
-
- mutex_lock(&chip->state_lock);
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- if (val != 1) {
- ret = -EINVAL;
- goto out;
- }
-
- val = (val2 * 1024) / 15625;
-
- ret = i2c_smbus_write_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_GAIN],
- swab16(val));
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
-
- case IIO_CHAN_INFO_CALIBBIAS:
- if ((val < 0) | (val > 0xFFFF)) {
- ret = -EINVAL;
- goto out;
- }
- ret = i2c_smbus_write_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_OFFS],
- swab16(val));
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
- case IIO_CHAN_INFO_SCALE:
- if (val) {
- ret = -EINVAL;
- goto out;
- }
- for (i = 0; i < ARRAY_SIZE(ad7152_scale_table); i++)
- if (val2 == ad7152_scale_table[i])
- break;
-
- chip->setup[chan->channel] &= ~AD7152_SETUP_RANGE_4pF;
- chip->setup[chan->channel] |= AD7152_SETUP_RANGE(i);
-
- ret = i2c_smbus_write_byte_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_SETUP],
- chip->setup[chan->channel]);
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2) {
- ret = -EINVAL;
- goto out;
- }
- ret = ad7152_write_raw_samp_freq(&indio_dev->dev, val);
- if (ret < 0)
- goto out;
-
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
-
-out:
- mutex_unlock(&chip->state_lock);
- return ret;
-}
-
-static int ad7152_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct ad7152_chip_info *chip = iio_priv(indio_dev);
- int ret;
- u8 regval = 0;
-
- mutex_lock(&chip->state_lock);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- /* First set whether in differential mode */
-
- regval = chip->setup[chan->channel];
-
- if (chan->differential)
- chip->setup[chan->channel] |= AD7152_SETUP_CAPDIFF;
- else
- chip->setup[chan->channel] &= ~AD7152_SETUP_CAPDIFF;
-
- if (regval != chip->setup[chan->channel]) {
- ret = i2c_smbus_write_byte_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_SETUP],
- chip->setup[chan->channel]);
- if (ret < 0)
- goto out;
- }
- /* Make sure the channel is enabled */
- if (chan->channel == 0)
- regval = AD7152_CONF_CH1EN;
- else
- regval = AD7152_CONF_CH2EN;
-
- /* Trigger a single read */
- regval |= AD7152_CONF_MODE_SINGLE_CONV;
- ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG,
- regval);
- if (ret < 0)
- goto out;
-
- msleep(ad7152_filter_rate_table[chip->filter_rate_setup][1]);
- /* Now read the actual register */
- ret = i2c_smbus_read_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_DATA]);
- if (ret < 0)
- goto out;
- *val = swab16(ret);
-
- if (chan->differential)
- *val -= 0x8000;
-
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
-
- ret = i2c_smbus_read_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_GAIN]);
- if (ret < 0)
- goto out;
- /* 1 + gain_val / 2^16 */
- *val = 1;
- *val2 = (15625 * swab16(ret)) / 1024;
-
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
- ret = i2c_smbus_read_word_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_OFFS]);
- if (ret < 0)
- goto out;
- *val = swab16(ret);
-
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_SCALE:
- ret = i2c_smbus_read_byte_data(chip->client,
- ad7152_addresses[chan->channel][AD7152_SETUP]);
- if (ret < 0)
- goto out;
- *val = 0;
- *val2 = ad7152_scale_table[ret >> 6];
-
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- ret = ad7152_read_raw_samp_freq(&indio_dev->dev, val);
- if (ret < 0)
- goto out;
-
- ret = IIO_VAL_INT;
- break;
- default:
- ret = -EINVAL;
- }
-out:
- mutex_unlock(&chip->state_lock);
- return ret;
-}
-
-static int ad7152_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- return IIO_VAL_INT_PLUS_NANO;
- default:
- return IIO_VAL_INT_PLUS_MICRO;
- }
-}
-
-static const struct iio_info ad7152_info = {
- .attrs = &ad7152_attribute_group,
- .read_raw = ad7152_read_raw,
- .write_raw = ad7152_write_raw,
- .write_raw_get_fmt = ad7152_write_raw_get_fmt,
-};
-
-static const struct iio_chan_spec ad7152_channels[] = {
- {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- }, {
- .type = IIO_CAPACITANCE,
- .differential = 1,
- .indexed = 1,
- .channel = 0,
- .channel2 = 2,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- }, {
- .type = IIO_CAPACITANCE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- }, {
- .type = IIO_CAPACITANCE,
- .differential = 1,
- .indexed = 1,
- .channel = 1,
- .channel2 = 3,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) |
- BIT(IIO_CHAN_INFO_CALIBBIAS) |
- BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
- }
-};
-
-/*
- * device probe and remove
- */
-
-static int ad7152_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret = 0;
- struct ad7152_chip_info *chip;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
- if (!indio_dev)
- return -ENOMEM;
- chip = iio_priv(indio_dev);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
-
- chip->client = client;
- mutex_init(&chip->state_lock);
-
- /* Establish that the iio_dev is a child of the i2c device */
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ad7152_info;
- indio_dev->channels = ad7152_channels;
- if (id->driver_data == 0)
- indio_dev->num_channels = ARRAY_SIZE(ad7152_channels);
- else
- indio_dev->num_channels = 2;
- indio_dev->num_channels = ARRAY_SIZE(ad7152_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
- if (ret)
- return ret;
-
- dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
-
- return 0;
-}
-
-static const struct i2c_device_id ad7152_id[] = {
- { "ad7152", 0 },
- { "ad7153", 1 },
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad7152_id);
-
-static struct i2c_driver ad7152_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- },
- .probe = ad7152_probe,
- .id_table = ad7152_id,
-};
-module_i2c_driver(ad7152_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices AD7152/3 capacitive sensor driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 1e977014fe5f..0b0287503fb4 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -6,6 +6,7 @@
* Licensed under the GPL-2.
*/
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/device.h>
@@ -71,7 +72,7 @@
struct ad9834_state {
struct spi_device *spi;
struct regulator *reg;
- unsigned int mclk;
+ struct clk *mclk;
unsigned short control;
unsigned short devid;
struct spi_transfer xfer;
@@ -110,12 +111,15 @@ static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
static int ad9834_write_frequency(struct ad9834_state *st,
unsigned long addr, unsigned long fout)
{
+ unsigned long clk_freq;
unsigned long regval;
- if (fout > (st->mclk / 2))
+ clk_freq = clk_get_rate(st->mclk);
+
+ if (fout > (clk_freq / 2))
return -EINVAL;
- regval = ad9834_calc_freqreg(st->mclk, fout);
+ regval = ad9834_calc_freqreg(clk_freq, fout);
st->freq_data[0] = cpu_to_be16(addr | (regval &
RES_MASK(AD9834_FREQ_BITS / 2)));
@@ -389,16 +393,11 @@ static const struct iio_info ad9833_info = {
static int ad9834_probe(struct spi_device *spi)
{
- struct ad9834_platform_data *pdata = dev_get_platdata(&spi->dev);
struct ad9834_state *st;
struct iio_dev *indio_dev;
struct regulator *reg;
int ret;
- if (!pdata) {
- dev_dbg(&spi->dev, "no platform data?\n");
- return -ENODEV;
- }
reg = devm_regulator_get(&spi->dev, "avdd");
if (IS_ERR(reg))
@@ -418,7 +417,14 @@ static int ad9834_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
mutex_init(&st->lock);
- st->mclk = pdata->mclk;
+ st->mclk = devm_clk_get(&spi->dev, NULL);
+
+ ret = clk_prepare_enable(st->mclk);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable master clock\n");
+ goto error_disable_reg;
+ }
+
st->spi = spi;
st->devid = spi_get_device_id(spi)->driver_data;
st->reg = reg;
@@ -454,42 +460,41 @@ static int ad9834_probe(struct spi_device *spi)
spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);
st->control = AD9834_B28 | AD9834_RESET;
+ st->control |= AD9834_DIV2;
- if (!pdata->en_div2)
- st->control |= AD9834_DIV2;
-
- if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834))
+ if (st->devid == ID_AD9834)
st->control |= AD9834_SIGN_PIB;
st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
- goto error_disable_reg;
+ goto error_clock_unprepare;
}
- ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, 1000000);
if (ret)
- goto error_disable_reg;
+ goto error_clock_unprepare;
- ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, 5000000);
if (ret)
- goto error_disable_reg;
+ goto error_clock_unprepare;
- ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE0, 512);
if (ret)
- goto error_disable_reg;
+ goto error_clock_unprepare;
- ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE1, 1024);
if (ret)
- goto error_disable_reg;
+ goto error_clock_unprepare;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_disable_reg;
+ goto error_clock_unprepare;
return 0;
-
+error_clock_unprepare:
+ clk_disable_unprepare(st->mclk);
error_disable_reg:
regulator_disable(reg);
@@ -502,6 +507,7 @@ static int ad9834_remove(struct spi_device *spi)
struct ad9834_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
+ clk_disable_unprepare(st->mclk);
regulator_disable(st->reg);
return 0;
diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h
index ae620f38eb49..da7e83ceedad 100644
--- a/drivers/staging/iio/frequency/ad9834.h
+++ b/drivers/staging/iio/frequency/ad9834.h
@@ -8,32 +8,4 @@
#ifndef IIO_DDS_AD9834_H_
#define IIO_DDS_AD9834_H_
-/*
- * TODO: struct ad7887_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad9834_platform_data - platform specific information
- * @mclk: master clock in Hz
- * @freq0: power up freq0 tuning word in Hz
- * @freq1: power up freq1 tuning word in Hz
- * @phase0: power up phase0 value [0..4095] correlates with 0..2PI
- * @phase1: power up phase1 value [0..4095] correlates with 0..2PI
- * @en_div2: digital output/2 is passed to the SIGN BIT OUT pin
- * @en_signbit_msb_out: the MSB (or MSB/2) of the DAC data is connected to the
- * SIGN BIT OUT pin. en_div2 controls whether it is the MSB
- * or MSB/2 that is output. if en_signbit_msb_out=false,
- * the on-board comparator is connected to SIGN BIT OUT
- */
-
-struct ad9834_platform_data {
- unsigned int mclk;
- unsigned int freq0;
- unsigned int freq1;
- unsigned short phase0;
- unsigned short phase1;
- bool en_div2;
- bool en_signbit_msb_out;
-};
-
#endif /* IIO_DDS_AD9834_H_ */
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 9e52384f5370..3134295f014f 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -82,21 +83,10 @@
#define AD5933_POLL_TIME_ms 10
#define AD5933_INIT_EXCITATION_TIME_ms 100
-/**
- * struct ad5933_platform_data - platform specific data
- * @ext_clk_hz: the external clock frequency in Hz, if not set
- * the driver uses the internal clock (16.776 MHz)
- * @vref_mv: the external reference voltage in millivolt
- */
-
-struct ad5933_platform_data {
- unsigned long ext_clk_hz;
- unsigned short vref_mv;
-};
-
struct ad5933_state {
struct i2c_client *client;
struct regulator *reg;
+ struct clk *mclk;
struct delayed_work work;
struct mutex lock; /* Protect sensor state */
unsigned long mclk_hz;
@@ -112,10 +102,6 @@ struct ad5933_state {
unsigned int poll_time_jiffies;
};
-static struct ad5933_platform_data ad5933_default_pdata = {
- .vref_mv = 3300,
-};
-
#define AD5933_CHANNEL(_type, _extend_name, _info_mask_separate, _address, \
_scan_index, _realbits) { \
.type = (_type), \
@@ -691,10 +677,10 @@ static void ad5933_work(struct work_struct *work)
static int ad5933_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret, voltage_uv = 0;
- struct ad5933_platform_data *pdata = dev_get_platdata(&client->dev);
+ int ret;
struct ad5933_state *st;
struct iio_dev *indio_dev;
+ unsigned long ext_clk_hz = 0;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (!indio_dev)
@@ -706,9 +692,6 @@ static int ad5933_probe(struct i2c_client *client,
mutex_init(&st->lock);
- if (!pdata)
- pdata = &ad5933_default_pdata;
-
st->reg = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(st->reg))
return PTR_ERR(st->reg);
@@ -718,15 +701,28 @@ static int ad5933_probe(struct i2c_client *client,
dev_err(&client->dev, "Failed to enable specified VDD supply\n");
return ret;
}
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
- if (voltage_uv)
- st->vref_mv = voltage_uv / 1000;
- else
- st->vref_mv = pdata->vref_mv;
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref_mv = ret / 1000;
+
+ st->mclk = devm_clk_get(&client->dev, "mclk");
+ if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT) {
+ ret = PTR_ERR(st->mclk);
+ goto error_disable_reg;
+ }
- if (pdata->ext_clk_hz) {
- st->mclk_hz = pdata->ext_clk_hz;
+ if (!IS_ERR(st->mclk)) {
+ ret = clk_prepare_enable(st->mclk);
+ if (ret < 0)
+ goto error_disable_reg;
+ ext_clk_hz = clk_get_rate(st->mclk);
+ }
+
+ if (ext_clk_hz) {
+ st->mclk_hz = ext_clk_hz;
st->ctrl_lb = AD5933_CTRL_EXT_SYSCLK;
} else {
st->mclk_hz = AD5933_INT_OSC_FREQ_Hz;
@@ -746,7 +742,7 @@ static int ad5933_probe(struct i2c_client *client,
ret = ad5933_register_ring_funcs_and_init(indio_dev);
if (ret)
- goto error_disable_reg;
+ goto error_disable_mclk;
ret = ad5933_setup(st);
if (ret)
@@ -760,6 +756,8 @@ static int ad5933_probe(struct i2c_client *client,
error_unreg_ring:
iio_kfifo_free(indio_dev->buffer);
+error_disable_mclk:
+ clk_disable_unprepare(st->mclk);
error_disable_reg:
regulator_disable(st->reg);
@@ -774,6 +772,7 @@ static int ad5933_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
iio_kfifo_free(indio_dev->buffer);
regulator_disable(st->reg);
+ clk_disable_unprepare(st->mclk);
return 0;
}
diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile
index 07dc16cc86f5..412e2105a3a5 100644
--- a/drivers/staging/ks7010/Makefile
+++ b/drivers/staging/ks7010/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_KS7010) += ks7010.o
-ks7010-y := michael_mic.o ks_hostif.o ks_wlan_net.o ks7010_sdio.o
+ks7010-y := ks_hostif.o ks_wlan_net.o ks7010_sdio.o
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
index d393ca58e231..87a6dac4890d 100644
--- a/drivers/staging/ks7010/TODO
+++ b/drivers/staging/ks7010/TODO
@@ -27,8 +27,6 @@ Now the TODOs:
- fix the 'card removal' event when card is inserted when booting
- check what other upstream wireless mechanisms can be used instead of the
custom ones here
-- replace custom Michael MIC implementation with the kernel
- implementation. This task is only required for a *clean* WEXT interface.
Please send any patches to:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 065bce193fac..06ebea0be118 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -6,15 +6,18 @@
* Copyright (C) 2009 Renesas Technology Corp.
*/
+#include <crypto/hash.h>
#include <linux/circ_buf.h>
#include <linux/if_arp.h>
#include <net/iw_handler.h>
#include <uapi/linux/llc.h>
#include "eap_packet.h"
#include "ks_wlan.h"
-#include "michael_mic.h"
#include "ks_hostif.h"
+#define MICHAEL_MIC_KEY_LEN 8
+#define MICHAEL_MIC_LEN 8
+
static inline void inc_smeqhead(struct ks_wlan_private *priv)
{
priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE;
@@ -35,7 +38,7 @@ static inline u8 get_byte(struct ks_wlan_private *priv)
{
u8 data;
- data = *(priv->rxp)++;
+ data = *priv->rxp++;
/* length check in advance ! */
--(priv->rx_size);
return data;
@@ -171,7 +174,7 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info *ap_info)
"- rate_set_size=%d\n",
ap->bssid[0], ap->bssid[1], ap->bssid[2],
ap->bssid[3], ap->bssid[4], ap->bssid[5],
- &(ap->ssid.body[0]),
+ &ap->ssid.body[0],
ap->rate_set.body[0], ap->rate_set.body[1],
ap->rate_set.body[2], ap->rate_set.body[3],
ap->rate_set.body[4], ap->rate_set.body[5],
@@ -191,6 +194,68 @@ static u8 read_ie(unsigned char *bp, u8 max, u8 *body)
return size;
}
+static int
+michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
+{
+ u8 pad_data[4] = { priority, 0, 0, 0 };
+ struct crypto_shash *tfm = NULL;
+ struct shash_desc *desc = NULL;
+ int ret;
+
+ tfm = crypto_alloc_shash("michael_mic", 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto err;
+ }
+
+ ret = crypto_shash_setkey(tfm, key, MICHAEL_MIC_KEY_LEN);
+ if (ret < 0)
+ goto err_free_tfm;
+
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto err_free_tfm;
+ }
+
+ desc->tfm = tfm;
+ desc->flags = 0;
+
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto err_free_desc;
+
+ // Compute the MIC value
+ /*
+ * IEEE802.11i page 47
+ * Figure 43g TKIP MIC processing format
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ * |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ */
+
+ ret = crypto_shash_update(desc, data, 12);
+ if (ret < 0)
+ goto err_free_desc;
+
+ ret = crypto_shash_update(desc, pad_data, 4);
+ if (ret < 0)
+ goto err_free_desc;
+
+ ret = crypto_shash_finup(desc, data + 12, len - 12, result);
+
+err_free_desc:
+ kzfree(desc);
+
+err_free_tfm:
+ crypto_free_shash(tfm);
+
+err:
+ return ret;
+}
+
static
int get_ap_information(struct ks_wlan_private *priv, struct ap_info *ap_info,
struct local_ap *ap)
@@ -273,11 +338,11 @@ int hostif_data_indication_wpa(struct ks_wlan_private *priv,
{
struct ether_hdr *eth_hdr;
unsigned short eth_proto;
- unsigned char recv_mic[8];
+ unsigned char recv_mic[MICHAEL_MIC_LEN];
char buf[128];
unsigned long now;
struct mic_failure *mic_failure;
- struct michael_mic michael_mic;
+ u8 mic[MICHAEL_MIC_LEN];
union iwreq_data wrqu;
unsigned int key_index = auth_type - 1;
struct wpa_key *key = &priv->wpa.key[key_index];
@@ -300,14 +365,20 @@ int hostif_data_indication_wpa(struct ks_wlan_private *priv,
netdev_dbg(priv->net_dev, "TKIP: protocol=%04X: size=%u\n",
eth_proto, priv->rx_size);
/* MIC save */
- memcpy(&recv_mic[0], (priv->rxp) + ((priv->rx_size) - 8), 8);
- priv->rx_size = priv->rx_size - 8;
+ memcpy(&recv_mic[0],
+ (priv->rxp) + ((priv->rx_size) - sizeof(recv_mic)),
+ sizeof(recv_mic));
+ priv->rx_size = priv->rx_size - sizeof(recv_mic);
if (auth_type > 0 && auth_type < 4) { /* auth_type check */
- michael_mic_function(&michael_mic, key->rx_mic_key,
- priv->rxp, priv->rx_size,
- 0, michael_mic.result);
+ int ret;
+
+ ret = michael_mic(key->rx_mic_key,
+ priv->rxp, priv->rx_size,
+ 0, mic);
+ if (ret < 0)
+ return ret;
}
- if (memcmp(michael_mic.result, recv_mic, 8) != 0) {
+ if (memcmp(mic, recv_mic, sizeof(mic)) != 0) {
now = jiffies;
mic_failure = &priv->wpa.mic_failure;
/* MIC FAILURE */
@@ -730,9 +801,9 @@ void hostif_scan_indication(struct ks_wlan_private *priv)
priv->scan_ind_count++;
if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
netdev_dbg(priv->net_dev, " scan_ind_count=%d :: aplist.size=%d\n",
- priv->scan_ind_count, priv->aplist.size);
+ priv->scan_ind_count, priv->aplist.size);
get_ap_information(priv, (struct ap_info *)(priv->rxp),
- &(priv->aplist.ap[priv->scan_ind_count - 1]));
+ &priv->aplist.ap[priv->scan_ind_count - 1]);
priv->aplist.size = priv->scan_ind_count;
} else {
netdev_dbg(priv->net_dev, " count over :: scan_ind_count=%d\n",
@@ -1002,7 +1073,6 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
int result = 0;
unsigned short eth_proto;
struct ether_hdr *eth_hdr;
- struct michael_mic michael_mic;
unsigned short keyinfo = 0;
struct ieee802_1x_hdr *aa1x_hdr;
struct wpa_eapol_key *eap_key;
@@ -1109,17 +1179,20 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
pp->auth_type = cpu_to_le16(TYPE_AUTH);
} else {
if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
- michael_mic_function(&michael_mic,
- priv->wpa.key[0].tx_mic_key,
- &pp->data[0], skb_len,
- 0, michael_mic.result);
- memcpy(p, michael_mic.result, 8);
- length += 8;
- skb_len += 8;
- p += 8;
+ u8 mic[MICHAEL_MIC_LEN];
+
+ ret = michael_mic(priv->wpa.key[0].tx_mic_key,
+ &pp->data[0], skb_len,
+ 0, mic);
+ if (ret < 0)
+ goto err_kfree;
+
+ memcpy(p, mic, sizeof(mic));
+ length += sizeof(mic);
+ skb_len += sizeof(mic);
+ p += sizeof(mic);
pp->auth_type =
cpu_to_le16(TYPE_DATA);
-
} else if (priv->wpa.pairwise_suite ==
IW_AUTH_CIPHER_CCMP) {
pp->auth_type =
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index dc5459ae0b51..3cffc8be6656 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -182,7 +182,7 @@ static int ks_wlan_set_freq(struct net_device *dev,
/* for SLEEP MODE */
/* If setting by frequency, convert to a channel */
if ((fwrq->freq.e == 1) &&
- (fwrq->freq.m >= (int)2.412e8) && (fwrq->freq.m <= (int)2.487e8)) {
+ (fwrq->freq.m >= 241200000) && (fwrq->freq.m <= 248700000)) {
int f = fwrq->freq.m / 100000;
int c = 0;
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
deleted file mode 100644
index 3acd79615f98..000000000000
--- a/drivers/staging/ks7010/michael_mic.c
+++ /dev/null
@@ -1,127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for KeyStream wireless LAN
- *
- * Copyright (C) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#include <asm/unaligned.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include "michael_mic.h"
-
-// Reset the state to the empty message.
-static inline void michael_clear(struct michael_mic *mic)
-{
- mic->l = mic->k0;
- mic->r = mic->k1;
- mic->m_bytes = 0;
-}
-
-static void michael_init(struct michael_mic *mic, u8 *key)
-{
- // Set the key
- mic->k0 = get_unaligned_le32(key);
- mic->k1 = get_unaligned_le32(key + 4);
-
- //clear();
- michael_clear(mic);
-}
-
-static inline void michael_block(struct michael_mic *mic)
-{
- mic->r ^= rol32(mic->l, 17);
- mic->l += mic->r;
- mic->r ^= ((mic->l & 0xff00ff00) >> 8) |
- ((mic->l & 0x00ff00ff) << 8);
- mic->l += mic->r;
- mic->r ^= rol32(mic->l, 3);
- mic->l += mic->r;
- mic->r ^= ror32(mic->l, 2);
- mic->l += mic->r;
-}
-
-static void michael_append(struct michael_mic *mic, u8 *src, int bytes)
-{
- int addlen;
-
- if (mic->m_bytes) {
- addlen = 4 - mic->m_bytes;
- if (addlen > bytes)
- addlen = bytes;
- memcpy(&mic->m[mic->m_bytes], src, addlen);
- mic->m_bytes += addlen;
- src += addlen;
- bytes -= addlen;
-
- if (mic->m_bytes < 4)
- return;
-
- mic->l ^= get_unaligned_le32(mic->m);
- michael_block(mic);
- mic->m_bytes = 0;
- }
-
- while (bytes >= 4) {
- mic->l ^= get_unaligned_le32(src);
- michael_block(mic);
- src += 4;
- bytes -= 4;
- }
-
- if (bytes > 0) {
- mic->m_bytes = bytes;
- memcpy(mic->m, src, bytes);
- }
-}
-
-static void michael_get_mic(struct michael_mic *mic, u8 *dst)
-{
- u8 *data = mic->m;
-
- switch (mic->m_bytes) {
- case 0:
- mic->l ^= 0x5a;
- break;
- case 1:
- mic->l ^= data[0] | 0x5a00;
- break;
- case 2:
- mic->l ^= data[0] | (data[1] << 8) | 0x5a0000;
- break;
- case 3:
- mic->l ^= data[0] | (data[1] << 8) | (data[2] << 16) |
- 0x5a000000;
- break;
- }
- michael_block(mic);
- michael_block(mic);
- // The appendByte function has already computed the result.
- put_unaligned_le32(mic->l, dst);
- put_unaligned_le32(mic->r, dst + 4);
-
- // Reset to the empty message.
- michael_clear(mic);
-}
-
-void michael_mic_function(struct michael_mic *mic, u8 *key,
- u8 *data, unsigned int len, u8 priority, u8 *result)
-{
- u8 pad_data[4] = { priority, 0, 0, 0 };
- // Compute the MIC value
- /*
- * IEEE802.11i page 47
- * Figure 43g TKIP MIC processing format
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- * |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- */
- michael_init(mic, key);
- michael_append(mic, data, 12); /* |DA|SA| */
- michael_append(mic, pad_data, 4); /* |Priority|0|0|0| */
- michael_append(mic, data + 12, len - 12); /* |Data| */
- michael_get_mic(mic, result);
-}
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
deleted file mode 100644
index f0ac164b999b..000000000000
--- a/drivers/staging/ks7010/michael_mic.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for KeyStream wireless LAN
- *
- * Copyright (C) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-/* MichaelMIC routine define */
-struct michael_mic {
- u32 k0; // Key
- u32 k1; // Key
- u32 l; // Current state
- u32 r; // Current state
- u8 m[4]; // Message accumulator (single word)
- int m_bytes; // # bytes in M
- u8 result[8];
-};
-
-void michael_mic_function(struct michael_mic *mic, u8 *key,
- u8 *data, unsigned int len, u8 priority, u8 *result);
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
index 9c57042c877d..9268e507f791 100644
--- a/drivers/staging/media/davinci_vpfe/Makefile
+++ b/drivers/staging/media/davinci_vpfe/Makefile
@@ -6,5 +6,5 @@ davinci-vfpe-objs := \
# Allow building it with COMPILE_TEST on other archs
ifndef CONFIG_ARCH_DAVINCI
-ccflags-y += -Iarch/arm/mach-davinci/include/
+ccflags-y += -I $(srctree)/arch/arm/mach-davinci/include/
endif
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index f8bcf488ecf2..c7662f65f6db 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST) += most_core.o
most_core-y := core.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
obj-$(CONFIG_MOST_CDEV) += cdev/
obj-$(CONFIG_MOST_NET) += net/
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
index afb9870eb50f..21b0bd72c01d 100644
--- a/drivers/staging/most/cdev/Makefile
+++ b/drivers/staging/most/cdev/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MOST_CDEV) += most_cdev.o
most_cdev-objs := cdev.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index ea64aabda94e..f2b347cda8b7 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -453,7 +453,9 @@ static int comp_probe(struct most_interface *iface, int channel_id,
c->devno = MKDEV(comp.major, current_minor);
cdev_init(&c->cdev, &channel_fops);
c->cdev.owner = THIS_MODULE;
- cdev_add(&c->cdev, c->devno, 1);
+ retval = cdev_add(&c->cdev, c->devno, 1);
+ if (retval < 0)
+ goto err_free_c;
c->iface = iface;
c->cfg = cfg;
c->channel_id = channel_id;
@@ -485,6 +487,7 @@ err_free_kfifo_and_del_list:
list_del(&c->list);
err_del_cdev_and_free_channel:
cdev_del(&c->cdev);
+err_free_c:
kfree(c);
err_remove_ida:
ida_simple_remove(&comp.minor_id, current_minor);
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
index 66676f5907ee..6d15f045a767 100644
--- a/drivers/staging/most/dim2/Makefile
+++ b/drivers/staging/most/dim2/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MOST_DIM2) += most_dim2.o
most_dim2-objs := dim2.o hal.o sysfs.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/i2c/Makefile b/drivers/staging/most/i2c/Makefile
index a7d094c1e1c2..c032fea979b3 100644
--- a/drivers/staging/most/i2c/Makefile
+++ b/drivers/staging/most/i2c/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MOST_I2C) += most_i2c.o
most_i2c-objs := i2c.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
index 54500aa77be8..820faec6b296 100644
--- a/drivers/staging/most/net/Makefile
+++ b/drivers/staging/most/net/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MOST_NET) += most_net.o
most_net-objs := net.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/sound/Makefile b/drivers/staging/most/sound/Makefile
index eee8774e38cb..5bb55bb108fb 100644
--- a/drivers/staging/most/sound/Makefile
+++ b/drivers/staging/most/sound/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MOST_SOUND) += most_sound.o
most_sound-objs := sound.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/usb/Makefile b/drivers/staging/most/usb/Makefile
index 18d28cba4fbf..910cd08bad7c 100644
--- a/drivers/staging/most/usb/Makefile
+++ b/drivers/staging/most/usb/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MOST_USB) += most_usb.o
most_usb-objs := usb.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
index 1c8e520e02a2..c6e01b6ecfe6 100644
--- a/drivers/staging/most/video/Makefile
+++ b/drivers/staging/most/video/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MOST_VIDEO) += most_video.o
most_video-objs := video.o
-ccflags-y += -Idrivers/staging/
+ccflags-y += -I $(srctree)/drivers/staging/
diff --git a/drivers/staging/mt7621-dma/Kconfig b/drivers/staging/mt7621-dma/Kconfig
index 2423c40099d1..b6e48a682c44 100644
--- a/drivers/staging/mt7621-dma/Kconfig
+++ b/drivers/staging/mt7621-dma/Kconfig
@@ -1,9 +1,3 @@
-config DMA_RALINK
- tristate "RALINK DMA support"
- depends on RALINK && !SOC_RT288X
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
-
config MTK_HSDMA
tristate "MTK HSDMA support"
depends on RALINK && SOC_MT7621
diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
index d3152d45cf45..c9e3e1619ab0 100644
--- a/drivers/staging/mt7621-dma/Makefile
+++ b/drivers/staging/mt7621-dma/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d67a2504adb1..97571f1d697b 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
* MTK HSDMA support
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/dmaengine.h>
@@ -191,7 +186,7 @@ static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
}
static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
- unsigned reg, u32 val)
+ unsigned int reg, u32 val)
{
writel(val, hsdma->base + reg);
}
@@ -242,7 +237,7 @@ static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
int i;
dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
- chan->tx_idx, chan->rx_idx);
+ chan->tx_idx, chan->rx_idx);
for (i = 0; i < HSDMA_DESCS_NUM; i++) {
tx_desc = &chan->tx_ring[i];
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index 6a1699ce9455..b73385540216 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -136,8 +136,8 @@
&pinctrl {
state_default: pinctrl0 {
gpio {
- ralink,group = "wdt", "rgmii2", "uart3";
- ralink,function = "gpio";
+ groups = "wdt", "rgmii2", "uart3";
+ function = "gpio";
};
};
};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 71f069d59ad8..6aff3680ce4b 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -204,82 +204,82 @@
i2c_pins: i2c0 {
i2c0 {
- group = "i2c";
+ groups = "i2c";
function = "i2c";
};
};
spi_pins: spi0 {
spi0 {
- group = "spi";
+ groups = "spi";
function = "spi";
};
};
uart1_pins: uart1 {
uart1 {
- group = "uart1";
+ groups = "uart1";
function = "uart1";
};
};
uart2_pins: uart2 {
uart2 {
- group = "uart2";
+ groups = "uart2";
function = "uart2";
};
};
uart3_pins: uart3 {
uart3 {
- group = "uart3";
+ groups = "uart3";
function = "uart3";
};
};
rgmii1_pins: rgmii1 {
rgmii1 {
- group = "rgmii1";
+ groups = "rgmii1";
function = "rgmii1";
};
};
rgmii2_pins: rgmii2 {
rgmii2 {
- group = "rgmii2";
+ groups = "rgmii2";
function = "rgmii2";
};
};
mdio_pins: mdio0 {
mdio0 {
- group = "mdio";
+ groups = "mdio";
function = "mdio";
};
};
pcie_pins: pcie0 {
pcie0 {
- group = "pcie";
+ groups = "pcie";
function = "pcie rst";
};
};
nand_pins: nand0 {
spi-nand {
- group = "spi";
+ groups = "spi";
function = "nand1";
};
sdhci-nand {
- group = "sdhci";
+ groups = "sdhci";
function = "nand2";
};
};
sdhci_pins: sdhci0 {
sdhci0 {
- group = "sdhci";
+ groups = "sdhci";
function = "sdhci";
};
};
@@ -420,10 +420,12 @@
status = "disabled";
- resets = <&rstctrl 24 &rstctrl 25 &rstctrl 26>;
- reset-names = "pcie0", "pcie1", "pcie2";
+ resets = <&rstctrl 23 &rstctrl 24 &rstctrl 25 &rstctrl 26>;
+ reset-names = "pcie", "pcie0", "pcie1", "pcie2";
clocks = <&clkctrl 24 &clkctrl 25 &clkctrl 26>;
clock-names = "pcie0", "pcie1", "pcie2";
+ phys = <&pcie0_port>, <&pcie1_port>, <&pcie2_port>;
+ phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
pcie@0,0 {
reg = <0x0000 0 0 0 0>;
@@ -449,4 +451,33 @@
bus-range = <0x00 0xff>;
};
};
+
+ pcie0_phy: pcie-phy@1e149000 {
+ compatible = "mediatek,mt7621-pci-phy";
+ reg = <0x1e149000 0x0700>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcie0_port: pcie-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ pcie1_port: pcie-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
+ pcie1_phy: pcie-phy@1e14a000 {
+ compatible = "mediatek,mt7621-pci-phy";
+ reg = <0x1e14a000 0x0700>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcie2_port: pcie-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+ };
};
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
index 40a7d47be913..8c4228e2c987 100644
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ b/drivers/staging/mt7621-eth/ethtool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
index 40b4cf011660..0071469aea6c 100644
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ b/drivers/staging/mt7621-eth/ethtool.h
@@ -1,12 +1,5 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
* Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index 21a76a8ccc26..6027b19f7bc2 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -1396,8 +1396,7 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
if (!ring->tx_buf)
goto no_tx_mem;
- ring->tx_dma = dma_zalloc_coherent(eth->dev,
- ring->tx_ring_size * sz,
+ ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
&ring->tx_phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->tx_dma)
diff --git a/drivers/staging/mt7621-mmc/Kconfig b/drivers/staging/mt7621-mmc/Kconfig
index c6dfe8c637dc..1eb79cd6e22f 100644
--- a/drivers/staging/mt7621-mmc/Kconfig
+++ b/drivers/staging/mt7621-mmc/Kconfig
@@ -1,6 +1,6 @@
config MTK_MMC
tristate "MTK SD/MMC"
- depends on !MTD_NAND_RALINK && MMC
+ depends on RALINK && MMC
config MTK_AEE_KDUMP
bool "MTK AEE KDUMP"
diff --git a/drivers/staging/mt7621-mmc/dbg.c b/drivers/staging/mt7621-mmc/dbg.c
index eabe0595978b..c7c091fa1da0 100644
--- a/drivers/staging/mt7621-mmc/dbg.c
+++ b/drivers/staging/mt7621-mmc/dbg.c
@@ -36,7 +36,6 @@
* Inc.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kthread.h>
diff --git a/drivers/staging/mt7621-mmc/mt6575_sd.h b/drivers/staging/mt7621-mmc/mt6575_sd.h
index 4e287c140acb..038a484a9476 100644
--- a/drivers/staging/mt7621-mmc/mt6575_sd.h
+++ b/drivers/staging/mt7621-mmc/mt6575_sd.h
@@ -363,7 +363,7 @@ enum {
#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F << 10)
#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
#define MSDC_CKGEN_MSDC_CK_SEL (0x1 << 6)
-#define CARD_READY_FOR_DATA (1 << 8)
+#define CARD_READY_FOR_DATA BIT(8)
#define CARD_CURRENT_STATE(x) ((x & 0x00001E00) >> 9)
/*--------------------------------------------------------------------------*/
diff --git a/drivers/staging/mt7621-pci-phy/Kconfig b/drivers/staging/mt7621-pci-phy/Kconfig
new file mode 100644
index 000000000000..b9f6ab784ee8
--- /dev/null
+++ b/drivers/staging/mt7621-pci-phy/Kconfig
@@ -0,0 +1,7 @@
+config PCI_MT7621_PHY
+ tristate "MediaTek MT7621 PCI PHY Driver"
+ depends on RALINK && OF
+ select GENERIC_PHY
+ help
+ Say 'Y' here to add support for MediaTek MT7621 PCI PHY driver,
+
diff --git a/drivers/staging/mt7621-pci-phy/Makefile b/drivers/staging/mt7621-pci-phy/Makefile
new file mode 100644
index 000000000000..a970056f05c1
--- /dev/null
+++ b/drivers/staging/mt7621-pci-phy/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PCI_MT7621_PHY) += pci-mt7621-phy.o
diff --git a/drivers/staging/mt7621-pci-phy/TODO b/drivers/staging/mt7621-pci-phy/TODO
new file mode 100644
index 000000000000..a255e8f753eb
--- /dev/null
+++ b/drivers/staging/mt7621-pci-phy/TODO
@@ -0,0 +1,4 @@
+
+- general code review and cleanup
+
+Cc: NeilBrown <neil@brown.name> and Sergio Paracuellos <sergio.paracuellos@gmail.com>
diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
new file mode 100644
index 000000000000..33a8a698bdd0
--- /dev/null
+++ b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
@@ -0,0 +1,54 @@
+Mediatek Mt7621 PCIe PHY
+
+Required properties:
+- compatible: must be "mediatek,mt7621-pci-phy"
+- reg: base address and length of the PCIe PHY block
+- #address-cells: must be 1
+- #size-cells: must be 0
+
+Each PCIe PHY should be represented by a child node
+
+Required properties For the child node:
+- reg: the PHY ID
+0 - PCIe RC 0
+1 - PCIe RC 1
+- #phy-cells: must be 0
+
+Example:
+ pcie0_phy: pcie-phy@1a149000 {
+ compatible = "mediatek,mt7621-pci-phy";
+ reg = <0x1a149000 0x0700>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcie0_port: pcie-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ pcie1_port: pcie-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
+ pcie1_phy: pcie-phy@1a14a000 {
+ compatible = "mediatek,mt7621-pci-phy";
+ reg = <0x1a14a000 0x0700>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcie2_port: pcie-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+ };
+
+ /* users of the PCIe phy */
+
+ pcie: pcie@1e140000 {
+ ...
+ ...
+ phys = <&pcie0_port>, <&pcie1_port>, <&pcie2_port>;
+ phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
+ }; \ No newline at end of file
diff --git a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
new file mode 100644
index 000000000000..d3ca2f019112
--- /dev/null
+++ b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mediatek MT7621 PCI PHY Driver
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <mt7621.h>
+#include <ralink_regs.h>
+
+#define RALINK_CLKCFG1 0x30
+#define CHIP_REV_MT7621_E2 0x0101
+
+#define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
+
+#define RG_PE1_PIPE_REG 0x02c
+#define RG_PE1_PIPE_RST BIT(12)
+#define RG_PE1_PIPE_CMD_FRC BIT(4)
+
+#define RG_P0_TO_P1_WIDTH 0x100
+#define RG_PE1_H_LCDDS_REG 0x49c
+#define RG_PE1_H_LCDDS_PCW GENMASK(30, 0)
+#define RG_PE1_H_LCDDS_PCW_VAL(x) ((0x7fffffff & (x)) << 0)
+
+#define RG_PE1_FRC_H_XTAL_REG 0x400
+#define RG_PE1_FRC_H_XTAL_TYPE BIT(8)
+#define RG_PE1_H_XTAL_TYPE GENMASK(10, 9)
+#define RG_PE1_H_XTAL_TYPE_VAL(x) ((0x3 & (x)) << 9)
+
+#define RG_PE1_FRC_PHY_REG 0x000
+#define RG_PE1_FRC_PHY_EN BIT(4)
+#define RG_PE1_PHY_EN BIT(5)
+
+#define RG_PE1_H_PLL_REG 0x490
+#define RG_PE1_H_PLL_BC GENMASK(23, 22)
+#define RG_PE1_H_PLL_BC_VAL(x) ((0x3 & (x)) << 22)
+#define RG_PE1_H_PLL_BP GENMASK(21, 18)
+#define RG_PE1_H_PLL_BP_VAL(x) ((0xf & (x)) << 18)
+#define RG_PE1_H_PLL_IR GENMASK(15, 12)
+#define RG_PE1_H_PLL_IR_VAL(x) ((0xf & (x)) << 12)
+#define RG_PE1_H_PLL_IC GENMASK(11, 8)
+#define RG_PE1_H_PLL_IC_VAL(x) ((0xf & (x)) << 8)
+#define RG_PE1_H_PLL_PREDIV GENMASK(7, 6)
+#define RG_PE1_H_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6)
+#define RG_PE1_PLL_DIVEN GENMASK(3, 1)
+#define RG_PE1_PLL_DIVEN_VAL(x) ((0x7 & (x)) << 1)
+
+#define RG_PE1_H_PLL_FBKSEL_REG 0x4bc
+#define RG_PE1_H_PLL_FBKSEL GENMASK(5, 4)
+#define RG_PE1_H_PLL_FBKSEL_VAL(x) ((0x3 & (x)) << 4)
+
+#define RG_PE1_H_LCDDS_SSC_PRD_REG 0x4a4
+#define RG_PE1_H_LCDDS_SSC_PRD GENMASK(15, 0)
+#define RG_PE1_H_LCDDS_SSC_PRD_VAL(x) ((0xffff & (x)) << 0)
+
+#define RG_PE1_H_LCDDS_SSC_DELTA_REG 0x4a8
+#define RG_PE1_H_LCDDS_SSC_DELTA GENMASK(11, 0)
+#define RG_PE1_H_LCDDS_SSC_DELTA_VAL(x) ((0xfff & (x)) << 0)
+#define RG_PE1_H_LCDDS_SSC_DELTA1 GENMASK(27, 16)
+#define RG_PE1_H_LCDDS_SSC_DELTA1_VAL(x) ((0xff & (x)) << 16)
+
+#define RG_PE1_LCDDS_CLK_PH_INV_REG 0x4a0
+#define RG_PE1_LCDDS_CLK_PH_INV BIT(5)
+
+#define RG_PE1_H_PLL_BR_REG 0x4ac
+#define RG_PE1_H_PLL_BR GENMASK(18, 16)
+#define RG_PE1_H_PLL_BR_VAL(x) ((0x7 & (x)) << 16)
+
+#define RG_PE1_MSTCKDIV_REG 0x414
+#define RG_PE1_MSTCKDIV GENMASK(7, 6)
+#define RG_PE1_MSTCKDIV_VAL(x) ((0x3 & (x)) << 6)
+
+#define RG_PE1_FRC_MSTCKDIV BIT(5)
+
+/**
+ * struct mt7621_pci_phy_instance - Mt7621 Pcie PHY device
+ * @phy: pointer to the kernel PHY device
+ * @port_base: base register
+ * @index: internal ID to identify the Mt7621 PCIe PHY
+ */
+struct mt7621_pci_phy_instance {
+ struct phy *phy;
+ void __iomem *port_base;
+ u32 index;
+};
+
+/**
+ * struct mt7621_pci_phy - Mt7621 Pcie PHY core
+ * @dev: pointer to device
+ * @phys: pointer to Mt7621 PHY device
+ * @nphys: number of PHY devices for this core
+ */
+struct mt7621_pci_phy {
+ struct device *dev;
+ struct mt7621_pci_phy_instance **phys;
+ int nphys;
+};
+
+static inline u32 phy_read(struct mt7621_pci_phy_instance *instance, u32 reg)
+{
+ return readl(instance->port_base + reg);
+}
+
+static inline void phy_write(struct mt7621_pci_phy_instance *instance,
+ u32 val, u32 reg)
+{
+ writel(val, instance->port_base + reg);
+}
+
+static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy,
+ struct mt7621_pci_phy_instance *instance)
+{
+ u32 offset = (instance->index != 1) ?
+ RG_PE1_PIPE_REG : RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH;
+ u32 reg;
+
+ reg = phy_read(instance, offset);
+ reg &= ~(RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
+ reg |= (RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
+ phy_write(instance, reg, offset);
+}
+
+static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy,
+ struct mt7621_pci_phy_instance *instance)
+{
+ struct device *dev = phy->dev;
+ u32 reg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0);
+ u32 offset;
+ u32 val;
+
+ reg = (reg >> 6) & 0x7;
+ /* Set PCIe Port PHY to disable SSC */
+ /* Debug Xtal Type */
+ val = phy_read(instance, RG_PE1_FRC_H_XTAL_REG);
+ val &= ~(RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE);
+ val |= RG_PE1_FRC_H_XTAL_TYPE;
+ val |= RG_PE1_H_XTAL_TYPE_VAL(0x00);
+ phy_write(instance, val, RG_PE1_FRC_H_XTAL_REG);
+
+ /* disable port */
+ offset = (instance->index != 1) ?
+ RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
+ val = phy_read(instance, offset);
+ val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
+ val |= RG_PE1_FRC_PHY_EN;
+ phy_write(instance, val, offset);
+
+ /* Set Pre-divider ratio (for host mode) */
+ val = phy_read(instance, RG_PE1_H_PLL_REG);
+ val &= ~(RG_PE1_H_PLL_PREDIV);
+
+ if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
+ val |= RG_PE1_H_PLL_PREDIV_VAL(0x01);
+ phy_write(instance, val, RG_PE1_H_PLL_REG);
+ dev_info(dev, "Xtal is 40MHz\n");
+ } else { /* 25MHz | 20MHz Xtal */
+ val |= RG_PE1_H_PLL_PREDIV_VAL(0x00);
+ phy_write(instance, val, RG_PE1_H_PLL_REG);
+ if (reg >= 6) {
+ dev_info(dev, "Xtal is 25MHz\n");
+
+ /* Select feedback clock */
+ val = phy_read(instance, RG_PE1_H_PLL_FBKSEL_REG);
+ val &= ~(RG_PE1_H_PLL_FBKSEL);
+ val |= RG_PE1_H_PLL_FBKSEL_VAL(0x01);
+ phy_write(instance, val, RG_PE1_H_PLL_FBKSEL_REG);
+
+ /* DDS NCPO PCW (for host mode) */
+ val = phy_read(instance, RG_PE1_H_LCDDS_SSC_PRD_REG);
+ val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
+ val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000);
+ phy_write(instance, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
+
+ /* DDS SSC dither period control */
+ val = phy_read(instance, RG_PE1_H_LCDDS_SSC_PRD_REG);
+ val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
+ val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d);
+ phy_write(instance, val, RG_PE1_H_LCDDS_SSC_PRD_REG);
+
+ /* DDS SSC dither amplitude control */
+ val = phy_read(instance, RG_PE1_H_LCDDS_SSC_DELTA_REG);
+ val &= ~(RG_PE1_H_LCDDS_SSC_DELTA |
+ RG_PE1_H_LCDDS_SSC_DELTA1);
+ val |= RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a);
+ val |= RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a);
+ phy_write(instance, val, RG_PE1_H_LCDDS_SSC_DELTA_REG);
+ } else {
+ dev_info(dev, "Xtal is 20MHz\n");
+ }
+ }
+
+ /* DDS clock inversion */
+ val = phy_read(instance, RG_PE1_LCDDS_CLK_PH_INV_REG);
+ val &= ~(RG_PE1_LCDDS_CLK_PH_INV);
+ val |= RG_PE1_LCDDS_CLK_PH_INV;
+ phy_write(instance, val, RG_PE1_LCDDS_CLK_PH_INV_REG);
+
+ /* Set PLL bits */
+ val = phy_read(instance, RG_PE1_H_PLL_REG);
+ val &= ~(RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR |
+ RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN);
+ val |= RG_PE1_H_PLL_BC_VAL(0x02);
+ val |= RG_PE1_H_PLL_BP_VAL(0x06);
+ val |= RG_PE1_H_PLL_IR_VAL(0x02);
+ val |= RG_PE1_H_PLL_IC_VAL(0x01);
+ val |= RG_PE1_PLL_DIVEN_VAL(0x02);
+ phy_write(instance, val, RG_PE1_H_PLL_REG);
+
+ val = phy_read(instance, RG_PE1_H_PLL_BR_REG);
+ val &= ~(RG_PE1_H_PLL_BR);
+ val |= RG_PE1_H_PLL_BR_VAL(0x00);
+ phy_write(instance, val, RG_PE1_H_PLL_BR_REG);
+
+ if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
+ /* set force mode enable of da_pe1_mstckdiv */
+ val = phy_read(instance, RG_PE1_MSTCKDIV_REG);
+ val &= ~(RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV);
+ val |= (RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV);
+ phy_write(instance, val, RG_PE1_MSTCKDIV_REG);
+ }
+}
+
+static int mt7621_pci_phy_init(struct phy *phy)
+{
+ struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
+ struct mt7621_pci_phy *mphy = dev_get_drvdata(phy->dev.parent);
+ u32 chip_rev_id = rt_sysc_r32(SYSC_REG_CHIP_REV);
+
+ if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ mt7621_bypass_pipe_rst(mphy, instance);
+
+ mt7621_set_phy_for_ssc(mphy, instance);
+
+ return 0;
+}
+
+static int mt7621_pci_phy_power_on(struct phy *phy)
+{
+ struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
+ u32 offset = (instance->index != 1) ?
+ RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
+ u32 val;
+
+ /* Enable PHY and disable force mode */
+ val = phy_read(instance, offset);
+ val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
+ val |= (RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
+ phy_write(instance, val, offset);
+
+ return 0;
+}
+
+static int mt7621_pci_phy_power_off(struct phy *phy)
+{
+ struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
+ u32 offset = (instance->index != 1) ?
+ RG_PE1_FRC_PHY_REG : RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
+ u32 val;
+
+ /* Disable PHY */
+ val = phy_read(instance, offset);
+ val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
+ val |= RG_PE1_FRC_PHY_EN;
+ phy_write(instance, val, offset);
+
+ return 0;
+}
+
+static int mt7621_pci_phy_exit(struct phy *phy)
+{
+ struct mt7621_pci_phy_instance *instance = phy_get_drvdata(phy);
+
+ rt_sysc_m32(PCIE_PORT_CLK_EN(instance->index), 0, RALINK_CLKCFG1);
+
+ return 0;
+}
+
+static const struct phy_ops mt7621_pci_phy_ops = {
+ .init = mt7621_pci_phy_init,
+ .exit = mt7621_pci_phy_exit,
+ .power_on = mt7621_pci_phy_power_on,
+ .power_off = mt7621_pci_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int mt7621_pci_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child_np;
+ struct phy_provider *provider;
+ struct mt7621_pci_phy *phy;
+ struct resource res;
+ int port, ret;
+ void __iomem *port_base;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->nphys = of_get_child_count(np);
+ phy->phys = devm_kcalloc(dev, phy->nphys,
+ sizeof(*phy->phys), GFP_KERNEL);
+ if (!phy->phys)
+ return -ENOMEM;
+
+ phy->dev = dev;
+ platform_set_drvdata(pdev, phy);
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to get address resource(id-%d)\n", port);
+ return ret;
+ }
+
+ port_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(port_base)) {
+ dev_err(dev, "failed to remap phy regs\n");
+ return PTR_ERR(port_base);
+ }
+
+ port = 0;
+ for_each_child_of_node(np, child_np) {
+ struct mt7621_pci_phy_instance *instance;
+ struct phy *pphy;
+
+ instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ phy->phys[port] = instance;
+
+ pphy = devm_phy_create(dev, child_np, &mt7621_pci_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ ret = PTR_ERR(phy);
+ goto put_child;
+ }
+
+ instance->port_base = port_base;
+ instance->phy = pphy;
+ instance->index = port;
+ phy_set_drvdata(pphy, instance);
+ port++;
+ }
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+
+put_child:
+ of_node_put(child_np);
+ return ret;
+}
+
+static const struct of_device_id mt7621_pci_phy_ids[] = {
+ { .compatible = "mediatek,mt7621-pci-phy" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
+
+static struct platform_driver mt7621_pci_phy_driver = {
+ .probe = mt7621_pci_phy_probe,
+ .driver = {
+ .name = "mt7621-pci-phy",
+ .of_match_table = of_match_ptr(mt7621_pci_phy_ids),
+ },
+};
+
+static int __init mt7621_pci_phy_drv_init(void)
+{
+ return platform_driver_register(&mt7621_pci_phy_driver);
+}
+
+module_init(mt7621_pci_phy_drv_init);
+
+MODULE_AUTHOR("Sergio Paracuellos <sergio.paracuellos@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT7621 PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/mt7621-pci/Makefile b/drivers/staging/mt7621-pci/Makefile
index 607b84bedcc3..d4655a726b61 100644
--- a/drivers/staging/mt7621-pci/Makefile
+++ b/drivers/staging/mt7621-pci/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_SOC_MT7621) += pci-mt7621.o
+obj-$(CONFIG_PCI_MT7621) += pci-mt7621.o
diff --git a/drivers/staging/mt7621-pci/TODO b/drivers/staging/mt7621-pci/TODO
index cf30f629b9fd..ccfd266db4ca 100644
--- a/drivers/staging/mt7621-pci/TODO
+++ b/drivers/staging/mt7621-pci/TODO
@@ -1,12 +1,4 @@
- general code review and cleanup
-- can this be converted to not require PCI_DRIVERS_LEGACY ??
- The irq returned by pcibios_map_irq is a "hwirq" (hardware irq number)
- and pci_assign_irq() assigns this directly to dev->irq, which
- expects a "virq" (virtual irq number). These numbers are different
- on MIPS. There is a gross hack to make it work on one
- specific platform, so it can be tested.
-- Should this be merged with arch/mips/pci/pci-mt7620.c ??
-- ensure device-tree requirements are documented
Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 31310b6fb7db..379ae780c691 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -25,6 +25,7 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <mt7621.h>
@@ -34,13 +35,8 @@
/* sysctl */
#define MT7621_CHIP_REV_ID 0x0c
-#define RALINK_CLKCFG1 0x30
-#define RALINK_RSTCTRL 0x34
#define CHIP_REV_MT7621_E2 0x0101
-/* RALINK_RSTCTRL bits */
-#define RALINK_PCIE_RST BIT(23)
-
/* MediaTek specific configuration registers */
#define PCIE_FTS_NUM 0x70c
#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
@@ -90,81 +86,16 @@
#define PCIE_CLK_GEN_EN BIT(31)
#define PCIE_CLK_GEN_DIS 0
-#define PCIE_CLK_GEN1_DIS GENMASK(30,24)
+#define PCIE_CLK_GEN1_DIS GENMASK(30, 24)
#define PCIE_CLK_GEN1_EN (BIT(27) | BIT(25))
-#define RALINK_PCI_IO_MAP_BASE 0x1e160000
#define MEMORY_BASE 0x0
-/* pcie phy related macros */
-#define RALINK_PCIEPHY_P0P1_CTL_OFFSET 0x9000
-#define RALINK_PCIEPHY_P2_CTL_OFFSET 0xA000
-
-#define RG_P0_TO_P1_WIDTH 0x100
-
-#define RG_PE1_PIPE_REG 0x02c
-#define RG_PE1_PIPE_RST BIT(12)
-#define RG_PE1_PIPE_CMD_FRC BIT(4)
-
-#define RG_PE1_H_LCDDS_REG 0x49c
-#define RG_PE1_H_LCDDS_PCW GENMASK(30, 0)
-#define RG_PE1_H_LCDDS_PCW_VAL(x) ((0x7fffffff & (x)) << 0)
-
-#define RG_PE1_FRC_H_XTAL_REG 0x400
-#define RG_PE1_FRC_H_XTAL_TYPE BIT(8)
-#define RG_PE1_H_XTAL_TYPE GENMASK(10, 9)
-#define RG_PE1_H_XTAL_TYPE_VAL(x) ((0x3 & (x)) << 9)
-
-#define RG_PE1_FRC_PHY_REG 0x000
-#define RG_PE1_FRC_PHY_EN BIT(4)
-#define RG_PE1_PHY_EN BIT(5)
-
-#define RG_PE1_H_PLL_REG 0x490
-#define RG_PE1_H_PLL_BC GENMASK(23, 22)
-#define RG_PE1_H_PLL_BC_VAL(x) ((0x3 & (x)) << 22)
-#define RG_PE1_H_PLL_BP GENMASK(21, 18)
-#define RG_PE1_H_PLL_BP_VAL(x) ((0xf & (x)) << 18)
-#define RG_PE1_H_PLL_IR GENMASK(15, 12)
-#define RG_PE1_H_PLL_IR_VAL(x) ((0xf & (x)) << 12)
-#define RG_PE1_H_PLL_IC GENMASK(11, 8)
-#define RG_PE1_H_PLL_IC_VAL(x) ((0xf & (x)) << 8)
-#define RG_PE1_H_PLL_PREDIV GENMASK(7, 6)
-#define RG_PE1_H_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6)
-#define RG_PE1_PLL_DIVEN GENMASK(3, 1)
-#define RG_PE1_PLL_DIVEN_VAL(x) ((0x7 & (x)) << 1)
-
-#define RG_PE1_H_PLL_FBKSEL_REG 0x4bc
-#define RG_PE1_H_PLL_FBKSEL GENMASK(5, 4)
-#define RG_PE1_H_PLL_FBKSEL_VAL(x) ((0x3 & (x)) << 4)
-
-#define RG_PE1_H_LCDDS_SSC_PRD_REG 0x4a4
-#define RG_PE1_H_LCDDS_SSC_PRD GENMASK(15, 0)
-#define RG_PE1_H_LCDDS_SSC_PRD_VAL(x) ((0xffff & (x)) << 0)
-
-#define RG_PE1_H_LCDDS_SSC_DELTA_REG 0x4a8
-#define RG_PE1_H_LCDDS_SSC_DELTA GENMASK(11, 0)
-#define RG_PE1_H_LCDDS_SSC_DELTA_VAL(x) ((0xfff & (x)) << 0)
-#define RG_PE1_H_LCDDS_SSC_DELTA1 GENMASK(27, 16)
-#define RG_PE1_H_LCDDS_SSC_DELTA1_VAL(x) ((0xff & (x)) << 16)
-
-#define RG_PE1_LCDDS_CLK_PH_INV_REG 0x4a0
-#define RG_PE1_LCDDS_CLK_PH_INV BIT(5)
-
-#define RG_PE1_H_PLL_BR_REG 0x4ac
-#define RG_PE1_H_PLL_BR GENMASK(18, 16)
-#define RG_PE1_H_PLL_BR_VAL(x) ((0x7 & (x)) << 16)
-
-#define RG_PE1_MSTCKDIV_REG 0x414
-#define RG_PE1_MSTCKDIV GENMASK(7, 6)
-#define RG_PE1_MSTCKDIV_VAL(x) ((0x3 & (x)) << 6)
-
-#define RG_PE1_FRC_MSTCKDIV BIT(5)
-
/**
* struct mt7621_pcie_port - PCIe port information
* @base: I/O mapped register base
* @list: port list
* @pcie: pointer to PCIe host info
- * @phy_reg_offset: offset to related phy registers
+ * @phy: pointer to PHY control block
* @pcie_rst: pointer to port reset control
* @slot: port slot
* @enabled: indicates if port is enabled
@@ -173,7 +104,7 @@ struct mt7621_pcie_port {
void __iomem *base;
struct list_head list;
struct mt7621_pcie *pcie;
- u32 phy_reg_offset;
+ struct phy *phy;
struct reset_control *pcie_rst;
u32 slot;
bool enabled;
@@ -188,6 +119,7 @@ struct mt7621_pcie_port {
* @offset: IO / Memory offset
* @dev: Pointer to PCIe device
* @ports: pointer to PCIe port information
+ * @rst: pointer to pcie reset
*/
struct mt7621_pcie {
void __iomem *base;
@@ -200,6 +132,7 @@ struct mt7621_pcie {
resource_size_t io;
} offset;
struct list_head ports;
+ struct reset_control *rst;
};
static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
@@ -265,150 +198,6 @@ static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
}
-static void bypass_pipe_rst(struct mt7621_pcie_port *port)
-{
- struct mt7621_pcie *pcie = port->pcie;
- u32 phy_offset = port->phy_reg_offset;
- u32 offset = (port->slot != 1) ?
- phy_offset + RG_PE1_PIPE_REG :
- phy_offset + RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH;
- u32 reg = pcie_read(pcie, offset);
-
- reg &= ~(RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
- reg |= (RG_PE1_PIPE_RST | RG_PE1_PIPE_CMD_FRC);
- pcie_write(pcie, reg, offset);
-}
-
-static void set_phy_for_ssc(struct mt7621_pcie_port *port)
-{
- struct mt7621_pcie *pcie = port->pcie;
- struct device *dev = pcie->dev;
- u32 phy_offset = port->phy_reg_offset;
- u32 reg = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0);
- u32 offset;
- u32 val;
-
- reg = (reg >> 6) & 0x7;
- /* Set PCIe Port PHY to disable SSC */
- /* Debug Xtal Type */
- offset = phy_offset + RG_PE1_FRC_H_XTAL_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE);
- val |= RG_PE1_FRC_H_XTAL_TYPE;
- val |= RG_PE1_H_XTAL_TYPE_VAL(0x00);
- pcie_write(pcie, val, offset);
-
- /* disable port */
- offset = (port->slot != 1) ?
- phy_offset + RG_PE1_FRC_PHY_REG :
- phy_offset + RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- val |= RG_PE1_FRC_PHY_EN;
- pcie_write(pcie, val, offset);
-
- /* Set Pre-divider ratio (for host mode) */
- offset = phy_offset + RG_PE1_H_PLL_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_H_PLL_PREDIV);
-
- if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
- val |= RG_PE1_H_PLL_PREDIV_VAL(0x01);
- pcie_write(pcie, val, offset);
- dev_info(dev, "Xtal is 40MHz\n");
- } else { /* 25MHz | 20MHz Xtal */
- val |= RG_PE1_H_PLL_PREDIV_VAL(0x00);
- pcie_write(pcie, val, offset);
- if (reg >= 6) {
- dev_info(dev, "Xtal is 25MHz\n");
-
- /* Select feedback clock */
- offset = phy_offset + RG_PE1_H_PLL_FBKSEL_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_H_PLL_FBKSEL);
- val |= RG_PE1_H_PLL_FBKSEL_VAL(0x01);
- pcie_write(pcie, val, offset);
-
- /* DDS NCPO PCW (for host mode) */
- offset = phy_offset + RG_PE1_H_LCDDS_SSC_PRD_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
- val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000);
- pcie_write(pcie, val, offset);
-
- /* DDS SSC dither period control */
- offset = phy_offset + RG_PE1_H_LCDDS_SSC_PRD_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_H_LCDDS_SSC_PRD);
- val |= RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d);
- pcie_write(pcie, val, offset);
-
- /* DDS SSC dither amplitude control */
- offset = phy_offset + RG_PE1_H_LCDDS_SSC_DELTA_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_H_LCDDS_SSC_DELTA |
- RG_PE1_H_LCDDS_SSC_DELTA1);
- val |= RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a);
- val |= RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a);
- pcie_write(pcie, val, offset);
- } else {
- dev_info(dev, "Xtal is 20MHz\n");
- }
- }
-
- /* DDS clock inversion */
- offset = phy_offset + RG_PE1_LCDDS_CLK_PH_INV_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_LCDDS_CLK_PH_INV);
- val |= RG_PE1_LCDDS_CLK_PH_INV;
- pcie_write(pcie, val, offset);
-
- /* Set PLL bits */
- offset = phy_offset + RG_PE1_H_PLL_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR |
- RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN);
- val |= RG_PE1_H_PLL_BC_VAL(0x02);
- val |= RG_PE1_H_PLL_BP_VAL(0x06);
- val |= RG_PE1_H_PLL_IR_VAL(0x02);
- val |= RG_PE1_H_PLL_IC_VAL(0x01);
- val |= RG_PE1_PLL_DIVEN_VAL(0x02);
- pcie_write(pcie, val, offset);
-
- offset = phy_offset + RG_PE1_H_PLL_BR_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_H_PLL_BR);
- val |= RG_PE1_H_PLL_BR_VAL(0x00);
- pcie_write(pcie, val, offset);
-
- if (reg <= 5 && reg >= 3) { /* 40MHz Xtal */
- /* set force mode enable of da_pe1_mstckdiv */
- offset = phy_offset + RG_PE1_MSTCKDIV_REG;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV);
- val |= (RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV);
- pcie_write(pcie, val, offset);
- }
-
- /* Enable PHY and disable force mode */
- offset = (port->slot != 1) ?
- phy_offset + RG_PE1_FRC_PHY_REG :
- phy_offset + RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH;
- val = pcie_read(pcie, offset);
- val &= ~(RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- val |= (RG_PE1_FRC_PHY_EN | RG_PE1_PHY_EN);
- pcie_write(pcie, val, offset);
-}
-
-static void mt7621_enable_phy(struct mt7621_pcie_port *port)
-{
- u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
-
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
- bypass_pipe_rst(port);
- set_phy_for_ssc(port);
-}
-
static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
{
u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
@@ -510,7 +299,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct device *dev = pcie->dev;
struct device_node *pnode = dev->of_node;
struct resource regs;
- char name[6];
+ char name[10];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
@@ -534,11 +323,13 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
return PTR_ERR(port->pcie_rst);
}
+ snprintf(name, sizeof(name), "pcie-phy%d", slot);
+ port->phy = devm_phy_get(dev, name);
+ if (IS_ERR(port->phy))
+ return PTR_ERR(port->phy);
+
port->slot = slot;
port->pcie = pcie;
- port->phy_reg_offset = (slot != 2) ?
- RALINK_PCIEPHY_P0P1_CTL_OFFSET :
- RALINK_PCIEPHY_P2_CTL_OFFSET;
INIT_LIST_HEAD(&port->list);
list_add_tail(&port->list, &pcie->ports);
@@ -563,6 +354,12 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
+ pcie->rst = devm_reset_control_get_exclusive(dev, "pcie");
+ if (PTR_ERR(pcie->rst) == -EPROBE_DEFER) {
+ dev_err(dev, "failed to get pcie reset control\n");
+ return PTR_ERR(pcie->rst);
+ }
+
for_each_available_child_of_node(node, child) {
int slot;
@@ -588,6 +385,7 @@ static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
struct device *dev = pcie->dev;
u32 slot = port->slot;
u32 val = 0;
+ int err;
/*
* Any MT7621 Ralink pcie controller that doesn't have 0x0101 at
@@ -598,18 +396,36 @@ static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
val = read_config(pcie, slot, PCIE_FTS_NUM);
dev_info(dev, "Port %d N_FTS = %x\n", (unsigned int)val, slot);
+ err = phy_init(port->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize port%d phy\n", slot);
+ goto err_phy_init;
+ }
+
+ err = phy_power_on(port->phy);
+ if (err) {
+ dev_err(dev, "failed to power on port%d phy\n", slot);
+ goto err_phy_on;
+ }
+
if ((pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) == 0) {
dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n", slot);
mt7621_control_assert(port);
- rt_sysc_m32(PCIE_PORT_CLK_EN(slot), 0, RALINK_CLKCFG1);
port->enabled = false;
- } else {
- port->enabled = true;
+ err = -ENODEV;
+ goto err_no_link_up;
}
- mt7621_enable_phy(port);
+ port->enabled = true;
return 0;
+
+err_no_link_up:
+ phy_power_off(port->phy);
+err_phy_on:
+ phy_exit(port->phy);
+err_phy_init:
+ return err;
}
static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
@@ -628,13 +444,13 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
}
}
- rt_sysc_m32(0, RALINK_PCIE_RST, RALINK_RSTCTRL);
+ reset_control_assert(pcie->rst);
rt_sysc_m32(0x30, 2 << 4, SYSC_REG_SYSTEM_CONFIG1);
rt_sysc_m32(PCIE_CLK_GEN_EN, PCIE_CLK_GEN_DIS, RALINK_PCIE_CLK_GEN);
rt_sysc_m32(PCIE_CLK_GEN1_DIS, PCIE_CLK_GEN1_EN, RALINK_PCIE_CLK_GEN1);
rt_sysc_m32(PCIE_CLK_GEN_DIS, PCIE_CLK_GEN_EN, RALINK_PCIE_CLK_GEN);
msleep(50);
- rt_sysc_m32(RALINK_PCIE_RST, 0, RALINK_RSTCTRL);
+ reset_control_deassert(pcie->rst);
}
static int mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
@@ -690,7 +506,7 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
if (port->enabled) {
- if (!mt7621_pcie_enable_port(port)) {
+ if (mt7621_pcie_enable_port(port)) {
dev_err(dev, "de-assert port %d PERST_N\n",
port->slot);
continue;
@@ -701,8 +517,9 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
}
for (slot = 0; slot < num_slots_enabled; slot++) {
- val = read_config(pcie, slot, 0x4);
- write_config(pcie, slot, 0x4, val | 0x4);
+ val = read_config(pcie, slot, PCI_COMMAND);
+ val |= PCI_COMMAND_MASTER;
+ write_config(pcie, slot, PCI_COMMAND, val);
/* configure RC FTS number to 250 when it leaves L0s */
val = read_config(pcie, slot, PCIE_FTS_NUM);
val &= ~PCIE_FTS_NUM_MASK;
@@ -714,7 +531,7 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
{
u32 pcie_link_status = 0;
- u32 val= 0;
+ u32 val = 0;
struct mt7621_pcie_port *port;
list_for_each_entry(port, &pcie->ports, list) {
@@ -728,15 +545,15 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
return -1;
/*
- * pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
- * 3'b000 x x x
- * 3'b001 x x 0
- * 3'b010 x 0 x
- * 3'b011 x 1 0
- * 3'b100 0 x x
- * 3'b101 1 x 0
- * 3'b110 1 0 x
- * 3'b111 2 1 0
+ * pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
+ * 3'b000 x x x
+ * 3'b001 x x 0
+ * 3'b010 x 0 x
+ * 3'b011 x 1 0
+ * 3'b100 0 x x
+ * 3'b101 1 x 0
+ * 3'b110 1 0 x
+ * 3'b111 2 1 0
*/
switch (pcie_link_status) {
case 2:
@@ -848,9 +665,6 @@ static int mt7621_pci_probe(struct platform_device *pdev)
return 0;
}
- pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
- pcie_write(pcie, RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE);
-
mt7621_pcie_enable_ports(pcie);
err = mt7621_pci_parse_request_of_pci_ranges(pcie);
diff --git a/drivers/staging/mt7621-pinctrl/Kconfig b/drivers/staging/mt7621-pinctrl/Kconfig
index 37cf9c3273be..fc3612711307 100644
--- a/drivers/staging/mt7621-pinctrl/Kconfig
+++ b/drivers/staging/mt7621-pinctrl/Kconfig
@@ -2,3 +2,4 @@ config PINCTRL_RT2880
bool "RT2800 pinctrl driver for RALINK/Mediatek SOCs"
depends on RALINK
select PINMUX
+ select GENERIC_PINCONF
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index aa98fbb17013..9b52d44abef1 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
@@ -73,48 +74,12 @@ static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev,
return 0;
}
-static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- unsigned int *num_maps)
-{
- struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
- struct property *prop;
- const char *function_name, *group_name;
- int ret;
- int ngroups = 0;
- unsigned int reserved_maps = 0;
-
- for_each_node_with_property(np_config, "group")
- ngroups++;
-
- *map = NULL;
- ret = pinctrl_utils_reserve_map(pctrldev, map, &reserved_maps,
- num_maps, ngroups);
- if (ret) {
- dev_err(p->dev, "can't reserve map: %d\n", ret);
- return ret;
- }
-
- of_property_for_each_string(np_config, "group", prop, group_name) {
- ret = pinctrl_utils_add_map_mux(pctrldev, map, &reserved_maps,
- num_maps, group_name,
- function_name);
- if (ret) {
- dev_err(p->dev, "can't add map: %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static const struct pinctrl_ops rt2880_pctrl_ops = {
.get_groups_count = rt2880_get_group_count,
.get_group_name = rt2880_get_group_name,
.get_group_pins = rt2880_get_group_pins,
- .dt_node_to_map = rt2880_pinctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
};
static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev)
@@ -385,7 +350,6 @@ static int rt2880_pinmux_probe(struct platform_device *pdev)
for_each_compatible_node(np, NULL, "ralink,rt2880-gpio") {
const __be32 *ngpio, *gpiobase;
struct pinctrl_gpio_range *range;
- char *name;
if (!of_device_is_available(np))
continue;
@@ -397,9 +361,10 @@ static int rt2880_pinmux_probe(struct platform_device *pdev)
return -EINVAL;
}
- range = devm_kzalloc(p->dev, sizeof(*range) + 4, GFP_KERNEL);
- range->name = name = (char *) &range[1];
- sprintf(name, "pio");
+ range = devm_kzalloc(p->dev, sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+ range->name = "pio";
range->npins = __be32_to_cpu(*ngpio);
range->base = __be32_to_cpu(*gpiobase);
range->pin_base = range->base;
diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
index 513b6e79b985..b509f9fe3346 100644
--- a/drivers/staging/mt7621-spi/spi-mt7621.c
+++ b/drivers/staging/mt7621-spi/spi-mt7621.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* spi-mt7621.c -- MediaTek MT7621 SPI controller driver
*
@@ -8,34 +9,23 @@
* Some parts are based on spi-orion.c:
* Author: Shadi Ammouri <shadi@marvell.com>
* Copyright (C) 2007-2008 Marvell Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/clk.h>
-#include <linux/err.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/swab.h>
-
-#include <ralink_regs.h>
-#define SPI_BPW_MASK(bits) BIT((bits) - 1)
+#define DRIVER_NAME "spi-mt7621"
-#define DRIVER_NAME "spi-mt7621"
/* in usec */
-#define RALINK_SPI_WAIT_MAX_LOOP 2000
+#define RALINK_SPI_WAIT_MAX_LOOP 2000
/* SPISTAT register bit field */
-#define SPISTAT_BUSY BIT(0)
+#define SPISTAT_BUSY BIT(0)
#define MT7621_SPI_TRANS 0x00
#define SPITRANS_BUSY BIT(16)
@@ -46,17 +36,21 @@
#define SPI_CTL_TX_RX_CNT_MASK 0xff
#define SPI_CTL_START BIT(8)
-#define MT7621_SPI_POLAR 0x38
#define MT7621_SPI_MASTER 0x28
+#define MASTER_MORE_BUFMODE BIT(2)
+#define MASTER_FULL_DUPLEX BIT(10)
+#define MASTER_RS_CLK_SEL GENMASK(27, 16)
+#define MASTER_RS_CLK_SEL_SHIFT 16
+#define MASTER_RS_SLAVE_SEL GENMASK(31, 29)
+
#define MT7621_SPI_MOREBUF 0x2c
+#define MT7621_SPI_POLAR 0x38
#define MT7621_SPI_SPACE 0x3c
#define MT7621_CPHA BIT(5)
#define MT7621_CPOL BIT(4)
#define MT7621_LSB_FIRST BIT(3)
-struct mt7621_spi;
-
struct mt7621_spi {
struct spi_master *master;
void __iomem *base;
@@ -87,9 +81,13 @@ static void mt7621_spi_reset(struct mt7621_spi *rs)
{
u32 master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
- master |= 7 << 29;
- master |= 1 << 2;
- master &= ~(1 << 10);
+ /*
+ * Select SPI device 7, enable "more buffer mode" and disable
+ * full-duplex (only half-duplex really works on this chip
+ * reliably)
+ */
+ master |= MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE;
+ master &= ~MASTER_FULL_DUPLEX;
mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
rs->pending_write = 0;
@@ -125,18 +123,18 @@ static int mt7621_spi_prepare(struct spi_device *spi, unsigned int speed)
rate = 2;
reg = mt7621_spi_read(rs, MT7621_SPI_MASTER);
- reg &= ~(0xfff << 16);
- reg |= (rate - 2) << 16;
+ reg &= ~MASTER_RS_CLK_SEL;
+ reg |= (rate - 2) << MASTER_RS_CLK_SEL_SHIFT;
rs->speed = speed;
reg &= ~MT7621_LSB_FIRST;
if (spi->mode & SPI_LSB_FIRST)
reg |= MT7621_LSB_FIRST;
- /* This SPI controller seems to be tested on SPI flash only
- * and some bits are swizzled under other SPI modes probably
- * due to incorrect wiring inside the silicon. Only mode 0
- * works correctly.
+ /*
+ * This SPI controller seems to be tested on SPI flash only and some
+ * bits are swizzled under other SPI modes probably due to incorrect
+ * wiring inside the silicon. Only mode 0 works correctly.
*/
reg &= ~(MT7621_CPHA | MT7621_CPOL);
@@ -165,9 +163,10 @@ static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
int rx_len, u8 *buf)
{
- /* Combine with any pending write, and perform one or
- * more half-duplex transactions reading 'len' bytes.
- * Data to be written is already in MT7621_SPI_DATA*
+ /*
+ * Combine with any pending write, and perform one or more half-duplex
+ * transactions reading 'len' bytes. Data to be written is already in
+ * MT7621_SPI_DATA.
*/
int tx_len = rs->pending_write;
@@ -197,6 +196,7 @@ static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
*buf++ = val & 0xff;
val >>= 8;
}
+
rx_len -= i;
}
}
@@ -290,6 +290,7 @@ static int mt7621_spi_transfer_one_message(struct spi_master *master,
mt7621_spi_flush(rs);
mt7621_spi_set_cs(spi, 0);
+
msg_done:
m->status = status;
spi_finalize_current_message(master);
@@ -330,6 +331,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
int status = 0;
struct clk *clk;
struct mt7621_spi_ops *ops;
+ int ret;
match = of_match_device(mt7621_spi_match, &pdev->dev);
if (!match)
@@ -353,7 +355,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
return status;
master = spi_alloc_master(&pdev->dev, sizeof(*rs));
- if (master == NULL) {
+ if (!master) {
dev_info(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
@@ -377,7 +379,11 @@ static int mt7621_spi_probe(struct platform_device *pdev)
rs->pending_write = 0;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
- device_reset(&pdev->dev);
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "SPI reset failed!\n");
+ return ret;
+ }
mt7621_spi_reset(rs);
diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig
index d660de51b541..c25a00dd2d5f 100644
--- a/drivers/staging/netlogic/Kconfig
+++ b/drivers/staging/netlogic/Kconfig
@@ -2,6 +2,6 @@ config NETLOGIC_XLR_NET
tristate "Netlogic XLR/XLS network device"
depends on CPU_XLR
select PHYLIB
- ---help---
+ help
This driver support Netlogic XLR/XLS on chip gigabit
Ethernet.
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
index abf4c71ee66b..8be9d0b0c22c 100644
--- a/drivers/staging/netlogic/platform_net.c
+++ b/drivers/staging/netlogic/platform_net.c
@@ -1,36 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * 1. Redistributions of source code must retain the above copyright
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/device.h>
@@ -107,8 +78,9 @@ static struct platform_device *gmac_controller2_init(void *gmac0_addr)
.dev.platform_data = &ndata1,
};
- gmac4_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)), 0xfff);
+ gmac4_addr =
+ ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)),
+ 0xfff);
ndata1.serdes_addr = gmac4_addr;
ndata1.pcs_addr = gmac4_addr;
ndata1.mii_addr = gmac0_addr;
@@ -134,8 +106,9 @@ static void xls_gmac_init(void)
{
int mac;
struct platform_device *xlr_net_dev1;
- void __iomem *gmac0_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), 0xfff);
+ void __iomem *gmac0_addr =
+ ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
+ 0xfff);
static struct xlr_net_data ndata0 = {
.rfr_station = FMN_STNID_GMACRFR_0,
@@ -153,8 +126,9 @@ static void xls_gmac_init(void)
ndata0.mii_addr = gmac0_addr;
/* Passing GPIO base for serdes init. Only needed on sgmii ports */
- gpio_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)), 0xfff);
+ gpio_addr =
+ ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)),
+ 0xfff);
ndata0.gpio_addr = gpio_addr;
ndata0.cpu_mask = nlm_current_node()->coremask;
@@ -214,8 +188,9 @@ static void xlr_gmac_init(void)
.id = 0,
.dev.platform_data = &ndata0,
};
- ndata0.mii_addr = ioremap(CPHYSADDR(
- nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), 0xfff);
+ ndata0.mii_addr =
+ ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
+ 0xfff);
ndata0.cpu_mask = nlm_current_node()->coremask;
diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h
index e1b27f649590..f152d84099a2 100644
--- a/drivers/staging/netlogic/platform_net.h
+++ b/drivers/staging/netlogic/platform_net.h
@@ -1,35 +1,7 @@
-/*
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ *
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define PORTS_PER_CONTROLLER 4
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 4e6611e4c59b..8554fcf4321b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -1,36 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#include <linux/phy.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
index f76e16cfd15d..518ea809b8fa 100644
--- a/drivers/staging/netlogic/xlr_net.h
+++ b/drivers/staging/netlogic/xlr_net.h
@@ -1,36 +1,9 @@
-/*
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ *
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
/* #define MAC_SPLIT_MODE */
#define MAC_SPACING 0x400
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
index 769c36cf6614..ae7ae50071ae 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ b/drivers/staging/octeon-usb/octeon-hcd.h
@@ -1797,7 +1797,7 @@ union cvmx_usbnx_usbp_ctl_status {
* This is a test signal. When the USB Core is
* powered up (not in Susned Mode), an automatic
* tester can use this to disable phy_clock and
- * free_clk, then re-eanable them with an aligned
+ * free_clk, then re-enable them with an aligned
* phase.
* '1': The phy_clk and free_clk outputs are
* disabled. "0": The phy_clock and free_clk outputs
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 2848fa71a33d..d6248eecf123 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
return -ENODEV;
priv->last_link = 0;
- phy_start_aneg(phydev);
+ phy_start(phydev);
return 0;
no_phy:
diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
new file mode 100644
index 000000000000..a12b2c672d48
--- /dev/null
+++ b/drivers/staging/ralink-gdma/Kconfig
@@ -0,0 +1,6 @@
+config DMA_RALINK
+ tristate "RALINK DMA support"
+ depends on RALINK && !SOC_RT288X
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
diff --git a/drivers/staging/ralink-gdma/Makefile b/drivers/staging/ralink-gdma/Makefile
new file mode 100644
index 000000000000..5d917e0729bb
--- /dev/null
+++ b/drivers/staging/ralink-gdma/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
+
+ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/mt7621-dma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 792a63bd55d4..d78042eba6dd 100644
--- a/drivers/staging/mt7621-dma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -821,9 +821,9 @@ static int gdma_dma_probe(struct platform_device *pdev)
return -EINVAL;
data = (struct gdma_data *) match->data;
- dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) +
- (sizeof(struct gdma_dmaengine_chan) * data->chancnt),
- GFP_KERNEL);
+ dma_dev = devm_kzalloc(&pdev->dev,
+ struct_size(dma_dev, chan, data->chancnt),
+ GFP_KERNEL);
if (!dma_dev) {
dev_err(&pdev->dev, "alloc dma device failed\n");
return -EINVAL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 1f232ba6651c..94c9d9f8ee5c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -82,7 +82,7 @@ static void update_BCNTIM(struct adapter *padapter)
/* calculate head_len */
offset = _FIXED_IE_LENGTH_;
- offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
+ offset += pnetwork_mlmeext->ssid.ssid_length + 2;
/* get supported rates len */
p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_,
@@ -785,9 +785,9 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* SSID */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
- memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
- memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
- pbss_network->Ssid.SsidLength = ie_len;
+ memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(pbss_network->ssid.ssid, (p + 2), ie_len);
+ pbss_network->ssid.ssid_length = ie_len;
}
/* channel */
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 407f65cf7150..83a2e58aef53 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -263,7 +263,7 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
int i;
for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (ssid[i].SsidLength) {
+ if (ssid[i].ssid_length) {
memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
psurveyPara->ssid_num++;
}
@@ -316,10 +316,10 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
led_control_8188eu(padapter, LED_CTL_START_TO_LINK);
- if (pmlmepriv->assoc_ssid.SsidLength == 0)
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ if (pmlmepriv->assoc_ssid.ssid_length == 0)
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
else
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
@@ -358,10 +358,10 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
led_control_8188eu(padapter, LED_CTL_START_TO_LINK);
- if (pmlmepriv->assoc_ssid.SsidLength == 0)
+ if (pmlmepriv->assoc_ssid.ssid_length == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
else
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.ssid));
pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index b7be71f904ed..51c3dd6d7ffb 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -88,7 +88,9 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
if (!efuseTbl)
return;
- tmp = kzalloc(EFUSE_MAX_SECTION_88E * (sizeof(void *) + EFUSE_MAX_WORD_UNIT * sizeof(u16)), GFP_KERNEL);
+ tmp = kcalloc(EFUSE_MAX_SECTION_88E,
+ sizeof(void *) + EFUSE_MAX_WORD_UNIT * sizeof(u16),
+ GFP_KERNEL);
if (!tmp) {
DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
goto eFuseWord_failed;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 5c4ff81987bd..094e8e78f0e8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -234,7 +234,7 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
ie += 2;
/* SSID */
- ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz);
+ ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->ssid.ssid_length, pdev_network->ssid.ssid, &sz);
/* supported rates */
if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
@@ -927,7 +927,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
if (pbuf && (wpa_ielen > 0)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_ielen: %d", __func__, wpa_ielen));
- if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
@@ -940,7 +940,7 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
if (pbuf && (wpa_ielen > 0)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n"));
- if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x) == _SUCCESS) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n"));
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
@@ -975,9 +975,9 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
}
rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, NULL, &rsn_len, NULL, &wpa_len);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.ssid.ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_len =%d rsn_len =%d\n", __func__, wpa_len, rsn_len));
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: ssid =%s\n", __func__, pnetwork->network.ssid.ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: wpa_len =%d rsn_len =%d\n", __func__, wpa_len, rsn_len));
if (rsn_len > 0) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 0b3eb0b40975..7d56767cdff6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -77,7 +77,7 @@ u8 rtw_do_join(struct adapter *padapter)
pibss = padapter->registrypriv.dev_network.MacAddress;
- memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(padapter);
@@ -208,7 +208,7 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
- ssid->Ssid, get_fwstate(pmlmepriv));
+ ssid->ssid, get_fwstate(pmlmepriv));
if (!padapter->hw_init_completed) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
@@ -229,8 +229,8 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
- if (pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength &&
- !memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength)) {
+ if (pmlmepriv->assoc_ssid.ssid_length == ssid->ssid_length &&
+ !memcmp(&pmlmepriv->assoc_ssid.ssid, ssid->ssid, ssid->ssid_length)) {
if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
("Set SSID is the same ssid, fw_state = 0x%08x\n",
@@ -257,8 +257,8 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
}
} else {
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set SSID not the same ssid\n"));
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->Ssid, (unsigned int)ssid->SsidLength));
- RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.Ssid, (unsigned int)pmlmepriv->assoc_ssid.SsidLength));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->ssid, (unsigned int)ssid->ssid_length));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.ssid, (unsigned int)pmlmepriv->assoc_ssid.ssid_length));
rtw_disassoc_cmd(padapter, 0, true);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 714f7a70ed64..ca0cf8a86671 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -300,8 +300,8 @@ int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
static int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
{
- return (a->Ssid.SsidLength == b->Ssid.SsidLength) &&
- !memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
+ return (a->ssid.ssid_length == b->ssid.ssid_length) &&
+ !memcmp(a->ssid.ssid, b->ssid.ssid, a->ssid.ssid_length);
}
int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
@@ -315,9 +315,9 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
s_cap = le16_to_cpu(le_scap);
d_cap = le16_to_cpu(le_dcap);
- return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
+ return ((src->ssid.ssid_length == dst->ssid.ssid_length) &&
(!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) &&
- (!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) &&
+ (!memcmp(src->ssid.ssid, dst->ssid.ssid, src->ssid.ssid_length)) &&
((s_cap & WLAN_CAPABILITY_IBSS) ==
(d_cap & WLAN_CAPABILITY_IBSS)) &&
((s_cap & WLAN_CAPABILITY_ESS) ==
@@ -558,7 +558,7 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
pnetwork = (struct wlan_bssid_ex *)pbuf;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("%s, ssid=%s\n", __func__, pnetwork->Ssid.Ssid));
+ ("%s, ssid=%s\n", __func__, pnetwork->ssid.ssid));
len = get_wlan_bssid_ex_sz(pnetwork);
if (len > (sizeof(struct wlan_bssid_ex))) {
@@ -587,8 +587,8 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
/* lock pmlmepriv->lock when you accessing network_q */
if (!check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
- if (pnetwork->Ssid.Ssid[0] == 0)
- pnetwork->Ssid.SsidLength = 0;
+ if (pnetwork->ssid.ssid[0] == 0)
+ pnetwork->ssid.ssid_length = 0;
rtw_add_network(adapter, pnetwork);
}
@@ -636,7 +636,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
- memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
rtw_generate_random_ibss(pibss);
@@ -741,7 +741,7 @@ void rtw_free_assoc_resources_locked(struct adapter *adapter)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n"));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("tgt_network->network.MacAddress=%pM ssid=%s\n",
- tgt_network->network.MacAddress, tgt_network->network.Ssid.Ssid));
+ tgt_network->network.MacAddress, tgt_network->network.ssid.ssid));
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_AP_STATE)) {
struct sta_info *psta;
@@ -975,10 +975,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
- if (pmlmepriv->assoc_ssid.SsidLength == 0)
+ if (pmlmepriv->assoc_ssid.ssid_length == 0)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
else
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
@@ -1279,7 +1279,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
- memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
rtw_update_registrypriv_dev_network(adapter);
@@ -1422,9 +1422,9 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
}
/* check ssid, if needed */
- if (pmlmepriv->assoc_ssid.SsidLength) {
- if (competitor->network.Ssid.SsidLength != pmlmepriv->assoc_ssid.SsidLength ||
- memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength))
+ if (pmlmepriv->assoc_ssid.ssid_length) {
+ if (competitor->network.ssid.ssid_length != pmlmepriv->assoc_ssid.ssid_length ||
+ memcmp(competitor->network.ssid.ssid, pmlmepriv->assoc_ssid.ssid, pmlmepriv->assoc_ssid.ssid_length))
goto exit;
}
@@ -1445,8 +1445,8 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
if (updated) {
DBG_88E("[by_bssid:%u][assoc_ssid:%s]new candidate: %s(%pM rssi:%d\n",
pmlmepriv->assoc_by_bssid,
- pmlmepriv->assoc_ssid.Ssid,
- (*candidate)->network.Ssid.Ssid,
+ pmlmepriv->assoc_ssid.ssid,
+ (*candidate)->network.ssid.ssid,
(*candidate)->network.MacAddress,
(int)(*candidate)->network.Rssi);
DBG_88E("[to_roaming:%u]\n", pmlmepriv->to_roaming);
@@ -1493,7 +1493,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
goto exit;
} else {
DBG_88E("%s: candidate: %s(%pM ch:%u)\n", __func__,
- candidate->network.Ssid.Ssid, candidate->network.MacAddress,
+ candidate->network.ssid.ssid, candidate->network.MacAddress,
candidate->network.Configuration.DSConfig);
}
@@ -1772,7 +1772,7 @@ void rtw_init_registrypriv_dev_network(struct adapter *adapter)
memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN);
- memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
pdev_network->Configuration.Length = sizeof(struct ndis_802_11_config);
pdev_network->Configuration.BeaconPeriod = 100;
@@ -2049,9 +2049,9 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
if (pmlmepriv->to_roaming > 0) {
DBG_88E("roaming from %s(%pM length:%d\n",
- pnetwork->network.Ssid.Ssid, pnetwork->network.MacAddress,
- pnetwork->network.Ssid.SsidLength);
- memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
+ pnetwork->network.ssid.ssid, pnetwork->network.MacAddress,
+ pnetwork->network.ssid.ssid_length);
+ memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.ssid, sizeof(struct ndis_802_11_ssid));
pmlmepriv->assoc_by_bssid = false;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 7a36661ebbed..8f28aefbe6f9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -402,7 +402,7 @@ static void issue_beacon(struct adapter *padapter, int timeout_ms)
pattrib->pktlen += 2;
/* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->ssid.ssid_length, cur_network->ssid.ssid, &pattrib->pktlen);
/* supported rates... */
rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
@@ -562,7 +562,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
/* below for ad-hoc mode */
/* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->ssid.ssid_length, cur_network->ssid.ssid, &pattrib->pktlen);
/* supported rates... */
rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
@@ -652,7 +652,7 @@ static int issue_probereq(struct adapter *padapter,
pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
if (pssid)
- pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &pattrib->pktlen);
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->ssid_length, pssid->ssid, &pattrib->pktlen);
else
pframe = rtw_set_ie(pframe, _SSID_IE_, 0, NULL, &pattrib->pktlen);
@@ -1059,7 +1059,7 @@ static void issue_assocreq(struct adapter *padapter)
pattrib->pktlen += 2;
/* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, pmlmeinfo->network.Ssid.SsidLength, pmlmeinfo->network.Ssid.Ssid, &pattrib->pktlen);
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pmlmeinfo->network.ssid.ssid_length, pmlmeinfo->network.ssid.ssid, &pattrib->pktlen);
/* supported rate & extended supported rate */
@@ -1929,7 +1929,7 @@ static void site_survey(struct adapter *padapter)
int i;
for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
+ if (pmlmeext->sitesurvey_res.ssid[i].ssid_length) {
/* todo: to issue two probe req??? */
issue_probereq(padapter,
&(pmlmeext->sitesurvey_res.ssid[i]),
@@ -2071,10 +2071,10 @@ static u8 collect_bss_info(struct adapter *padapter,
DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
return _FAIL;
}
- memcpy(bssid->Ssid.Ssid, (p + 2), len);
- bssid->Ssid.SsidLength = len;
+ memcpy(bssid->ssid.ssid, (p + 2), len);
+ bssid->ssid.ssid_length = len;
} else {
- bssid->Ssid.SsidLength = 0;
+ bssid->ssid.ssid_length = 0;
}
memset(bssid->SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
@@ -2526,7 +2526,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
/* check (wildcard) SSID */
if (p) {
- if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
+ if ((ielen != 0 && memcmp((void *)(p+2), (void *)cur->ssid.ssid, cur->ssid.ssid_length)) ||
(ielen == 0 && pmlmeinfo->hidden_ssid_mode))
return _SUCCESS;
@@ -2975,10 +2975,10 @@ static unsigned int OnAssocReq(struct adapter *padapter,
goto OnAssocReqFail;
} else {
/* check if ssid match */
- if (memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
+ if (memcmp((void *)(p+2), cur->ssid.ssid, cur->ssid.ssid_length))
status = _STATS_FAILURE_;
- if (ie_len != cur->Ssid.SsidLength)
+ if (ie_len != cur->ssid.ssid_length)
status = _STATS_FAILURE_;
}
@@ -4658,7 +4658,7 @@ void linked_status_chk(struct adapter *padapter)
}
if (rx_chk != _SUCCESS)
- issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr, 3, 1);
+ issue_probereq_ex(padapter, &pmlmeinfo->network.ssid, psta->hwaddr, 3, 1);
if ((tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) || rx_chk != _SUCCESS) {
tx_chk = issue_nulldata(padapter, psta->hwaddr, 0, 3, 1);
@@ -4674,15 +4674,15 @@ void linked_status_chk(struct adapter *padapter)
if (rx_chk != _SUCCESS) {
if (pmlmeext->retry == 0) {
issue_probereq(padapter,
- &pmlmeinfo->network.Ssid,
+ &pmlmeinfo->network.ssid,
pmlmeinfo->network.MacAddress,
false);
issue_probereq(padapter,
- &pmlmeinfo->network.Ssid,
+ &pmlmeinfo->network.ssid,
pmlmeinfo->network.MacAddress,
false);
issue_probereq(padapter,
- &pmlmeinfo->network.Ssid,
+ &pmlmeinfo->network.ssid,
pmlmeinfo->network.MacAddress,
false);
}
@@ -5136,11 +5136,11 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeext->sitesurvey_res.channel_idx = 0;
for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
- if (pparm->ssid[i].SsidLength) {
- memcpy(pmlmeext->sitesurvey_res.ssid[i].Ssid, pparm->ssid[i].Ssid, IW_ESSID_MAX_SIZE);
- pmlmeext->sitesurvey_res.ssid[i].SsidLength = pparm->ssid[i].SsidLength;
+ if (pparm->ssid[i].ssid_length) {
+ memcpy(pmlmeext->sitesurvey_res.ssid[i].ssid, pparm->ssid[i].ssid, IW_ESSID_MAX_SIZE);
+ pmlmeext->sitesurvey_res.ssid[i].ssid_length = pparm->ssid[i].ssid_length;
} else {
- pmlmeext->sitesurvey_res.ssid[i].SsidLength = 0;
+ pmlmeext->sitesurvey_res.ssid[i].ssid_length = 0;
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 364d6ea14bf8..2f90f60f1681 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
- crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+ crypto_ops = lib80211_get_crypto_ops("WEP");
if (!crypto_ops)
return;
@@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
void *crypto_private = NULL;
int status = _SUCCESS;
const int keyindex = prxattrib->key_index;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
char iv[4], icv[4];
if (!crypto_ops) {
@@ -1291,7 +1291,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
void *crypto_private = NULL;
u8 *key, *pframe = skb->data;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp");
+ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
struct security_priv *psecuritypriv = &padapter->securitypriv;
char iv[8], icv[8];
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 5f9c9de1f1da..4480deef95a1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -935,17 +935,17 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
if (ssid_len > NDIS_802_11_LENGTH_SSID)
ssid_len = 0;
}
- memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
- bssid->Ssid.SsidLength = ssid_len;
+ memcpy(bssid->ssid.ssid, (p + 2), ssid_len);
+ bssid->ssid.ssid_length = ssid_len;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
- "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
- bssid->Ssid.SsidLength, cur_network->network.Ssid.Ssid,
- cur_network->network.Ssid.SsidLength));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.ssid.ssid:%s bssid.ssid.ssid_length:%d "
+ "cur_network->network.ssid.ssid:%s len:%d\n", __func__, bssid->ssid.ssid,
+ bssid->ssid.ssid_length, cur_network->network.ssid.ssid,
+ cur_network->network.ssid.ssid_length));
- if (memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
- bssid->Ssid.SsidLength != cur_network->network.Ssid.SsidLength) {
- if (bssid->Ssid.Ssid[0] != '\0' && bssid->Ssid.SsidLength != 0) { /* not hidden ssid */
+ if (memcmp(bssid->ssid.ssid, cur_network->network.ssid.ssid, 32) ||
+ bssid->ssid.ssid_length != cur_network->network.ssid.ssid_length) {
+ if (bssid->ssid.ssid[0] != '\0' && bssid->ssid.ssid_length != 0) { /* not hidden ssid */
DBG_88E("%s(), SSID is not match return FAIL\n", __func__);
goto _mismatch;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 3b1ccd138c3f..1723a47a96b4 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -626,7 +626,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
if (pframe[1] & 2) /* From Ds == 1 */
rtw_secmicappend(&micdata, &pframe[24], 6);
else
- rtw_secmicappend(&micdata, &pframe[10], 6);
+ rtw_secmicappend(&micdata, &pframe[10], 6);
} else { /* ToDS == 0 */
rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
if (pframe[1] & 2) /* From Ds == 1 */
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 1165ee278536..ba3c3e5a8216 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -209,17 +209,8 @@ void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u16 Index, void *pValue)
{
- /* Hook call by reference pointer. */
- switch (CmnInfo) {
- /* Dynamic call by reference pointer. */
- case ODM_CMNINFO_STA_STATUS:
+ if (CmnInfo == ODM_CMNINFO_STA_STATUS)
pDM_Odm->pODM_StaInfo[Index] = (struct sta_info *)pValue;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
}
void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 7022221136f6..47352f210c0b 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -22,7 +22,7 @@
static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
{
u8 read_down = false;
- int retry_cnts = 100;
+ int retry_cnts = 100;
u8 valid;
@@ -118,7 +118,6 @@ exit:
void rtw_hal_add_ra_tid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
{
struct odm_dm_struct *odmpriv = &pAdapter->HalData->odmpriv;
-
u8 macid, init_rate, raid, shortGIrate = false;
macid = arg&0x1f;
@@ -211,9 +210,9 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 rate_len, pktlen;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
pwlanhdr = (struct ieee80211_hdr *)pframe;
@@ -222,7 +221,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
*(fctrl) = 0;
ether_addr_copy(pwlanhdr->addr1, bc_addr);
- ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr2, myid(&adapt->eeprompriv));
ether_addr_copy(pwlanhdr->addr3, cur_network->MacAddress);
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
@@ -257,7 +256,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
/* below for ad-hoc mode */
/* SSID */
- pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pktlen);
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->ssid.ssid_length, cur_network->ssid.ssid, &pktlen);
/* supported rates... */
rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
@@ -294,10 +293,10 @@ _ConstructBeacon:
static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
{
struct ieee80211_hdr *pwlanhdr;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
__le16 *fctrl;
- struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
pwlanhdr = (struct ieee80211_hdr *)pframe;
@@ -314,7 +313,7 @@ static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
ether_addr_copy(pwlanhdr->addr1, pnetwork->MacAddress);
/* TA. */
- ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr2, myid(&adapt->eeprompriv));
*pLength = 16;
}
@@ -331,10 +330,10 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
__le16 *fctrl;
u32 pktlen;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
pwlanhdr = (struct ieee80211_hdr *)pframe;
@@ -347,19 +346,19 @@ static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
case Ndis802_11Infrastructure:
SetToDs(fctrl);
ether_addr_copy(pwlanhdr->addr1, pnetwork->MacAddress);
- ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr2, myid(&adapt->eeprompriv));
ether_addr_copy(pwlanhdr->addr3, StaAddr);
break;
case Ndis802_11APMode:
SetFrDs(fctrl);
ether_addr_copy(pwlanhdr->addr1, StaAddr);
ether_addr_copy(pwlanhdr->addr2, pnetwork->MacAddress);
- ether_addr_copy(pwlanhdr->addr3, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr3, myid(&adapt->eeprompriv));
break;
case Ndis802_11IBSS:
default:
ether_addr_copy(pwlanhdr->addr1, StaAddr);
- ether_addr_copy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)));
+ ether_addr_copy(pwlanhdr->addr2, myid(&adapt->eeprompriv));
ether_addr_copy(pwlanhdr->addr3, pnetwork->MacAddress);
break;
}
@@ -391,13 +390,13 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
__le16 *fctrl;
u8 *mac, *bssid;
u32 pktlen;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
pwlanhdr = (struct ieee80211_hdr *)pframe;
- mac = myid(&(adapt->eeprompriv));
+ mac = myid(&adapt->eeprompriv);
bssid = cur_network->MacAddress;
fctrl = &pwlanhdr->frame_control;
@@ -434,11 +433,11 @@ static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u
/* 2009.10.15 by tynli. */
static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
struct xmit_priv *pxmitpriv;
struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
+ struct mlme_ext_info *pmlmeinfo;
u32 BeaconLength = 0, ProbeRspLength = 0, PSPollLength;
u32 NullDataLength, QosNullLength;
u8 *ReservedPagePacket;
@@ -458,7 +457,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
pxmitpriv = &adapt->xmitpriv;
pmlmeext = &adapt->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
- pnetwork = &(pmlmeinfo->network);
+ pnetwork = &pmlmeinfo->network;
TxDescLen = TXDESC_SIZE;
PageNum = 0;
@@ -476,7 +475,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
PageNum += PageNeed;
adapt->HalData->FwRsvdPageStartOffset = PageNum;
- BufIndex += PageNeed*128;
+ BufIndex += PageNeed * 128;
/* 3 (2) ps-poll *1 page */
RsvdPageLoc.LocPsPoll = PageNum;
@@ -486,7 +485,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength);
PageNum += PageNeed;
- BufIndex += PageNeed*128;
+ BufIndex += PageNeed * 128;
/* 3 (3) null data * 1 page */
RsvdPageLoc.LocNullData = PageNum;
@@ -496,7 +495,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
PageNum += PageNeed;
- BufIndex += PageNeed*128;
+ BufIndex += PageNeed * 128;
/* 3 (4) probe response * 1page */
RsvdPageLoc.LocProbeRsp = PageNum;
@@ -506,7 +505,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
PageNum += PageNeed;
- BufIndex += PageNeed*128;
+ BufIndex += PageNeed * 128;
/* 3 (5) Qos null data */
RsvdPageLoc.LocQosNull = PageNum;
@@ -542,10 +541,10 @@ exit:
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
{
struct hal_data_8188e *haldata = adapt->HalData;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- bool bSendBeacon = false;
- bool bcn_valid = false;
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ bool bSendBeacon = false;
+ bool bcn_valid = false;
u8 DLBcnCount = 0;
u32 poll = 0;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index a72e069269b8..9e5f23392d58 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -440,14 +440,14 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (pxmitbuf == NULL)
+ if (!pxmitbuf)
return false;
/* 3 1. pick up first frame */
rtw_free_xmitframe(pxmitpriv, pxmitframe);
pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- if (pxmitframe == NULL) {
+ if (!pxmitframe) {
/* no more xmit frame, release xmit buffer */
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
return false;
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 947481de9cb1..8245cea2feef 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -1045,7 +1045,6 @@ extern u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8];
void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
-void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI,
bool bForceUpdate, u8 *pRATRState);
diff --git a/drivers/staging/rtl8188eu/include/odm_hwconfig.h b/drivers/staging/rtl8188eu/include/odm_hwconfig.h
index 8cef32dc6350..2cd8a47a3673 100644
--- a/drivers/staging/rtl8188eu/include/odm_hwconfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_hwconfig.h
@@ -93,18 +93,9 @@ struct phy_status_rpt {
#endif
};
-void odm_Init_RSSIForDM(struct odm_dm_struct *pDM_Odm);
-
void ODM_PhyStatusQuery(struct odm_dm_struct *pDM_Odm,
struct odm_phy_status_info *pPhyInfo,
u8 *pPhyStatus,
struct odm_per_pkt_info *pPktinfo);
-void ODM_MacStatusQuery(struct odm_dm_struct *pDM_Odm,
- u8 *pMacStatus,
- u8 MacID,
- bool bPacketMatchBSSID,
- bool bPacketToSelf,
- bool bPacketBeacon);
-
#endif
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 0664d5f30a96..5e91b9428c16 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -791,18 +791,6 @@ enum ht_cap_ampdu_factor {
#define WPS_CM_SW_DISPLAY_P 0x2008
#define WPS_CM_LCD_DISPLAY_P 0x4008
-#define P2P_PRIVATE_IOCTL_SET_LEN 64
-
-enum P2P_PROTO_WK_ID {
- P2P_FIND_PHASE_WK = 0,
- P2P_RESTORE_STATE_WK = 1,
- P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
- P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
- P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
- P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
- P2P_RO_CH_WK = 6,
-};
-
/* =====================WFD Section===================== */
/* For Wi-Fi Display */
#define WFD_ATTR_DEVICE_INFO 0x00
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index 5e13a6ddf083..8462c9c2fd39 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -17,8 +17,8 @@
#define NDIS_802_11_RSSI long /* in dBm */
struct ndis_802_11_ssid {
- u32 SsidLength;
- u8 Ssid[32];
+ u32 ssid_length;
+ u8 ssid[32];
};
enum NDIS_802_11_NETWORK_TYPE {
@@ -180,7 +180,7 @@ struct wlan_bssid_ex {
u32 Length;
unsigned char MacAddress[ETH_ALEN];
u8 Reserved[2];/* 0]: IS beacon frame */
- struct ndis_802_11_ssid Ssid;
+ struct ndis_802_11_ssid ssid;
u32 Privacy;
NDIS_802_11_RSSI Rssi;/* in dBM,raw data ,get from PHY) */
enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse;
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index efaa1c779f4b..eaa4adb32a0d 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -116,8 +116,8 @@ static char *translate_scan(struct adapter *padapter,
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
- iwe.u.data.length = min_t(u16, pnetwork->network.Ssid.SsidLength, 32);
- start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+ iwe.u.data.length = min_t(u16, pnetwork->network.ssid.ssid_length, 32);
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.ssid.ssid);
/* parsing HT_CAP_IE */
p = rtw_get_ie(&pnetwork->network.ies[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.ie_length-12);
@@ -195,7 +195,7 @@ static char *translate_scan(struct adapter *padapter,
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
- start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.ssid.ssid);
/*Add basic and extended rates */
max_rate = 0;
@@ -235,7 +235,7 @@ static char *translate_scan(struct adapter *padapter,
u8 *p;
rtw_get_sec_ie(pnetwork->network.ies, pnetwork->network.ie_length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.ssid.ssid));
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
if (wpa_len > 0) {
@@ -1124,8 +1124,8 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
int len = min_t(int, req->essid_len,
IW_ESSID_MAX_SIZE);
- memcpy(ssid[0].Ssid, req->essid, len);
- ssid[0].SsidLength = len;
+ memcpy(ssid[0].ssid, req->essid, len);
+ ssid[0].ssid_length = len;
DBG_88E("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
@@ -1158,8 +1158,8 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
}
sec_len = *(pos++); len -= 1;
if (sec_len > 0 && sec_len <= len) {
- ssid[ssid_index].SsidLength = sec_len;
- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ ssid[ssid_index].ssid_length = sec_len;
+ memcpy(ssid[ssid_index].ssid, pos, ssid[ssid_index].ssid_length);
ssid_index++;
}
pos += sec_len;
@@ -1310,9 +1310,9 @@ static int rtw_wx_set_essid(struct net_device *dev,
DBG_88E("ssid =%s, len =%d\n", extra, wrqu->essid.length);
memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
- ndis_ssid.SsidLength = len;
- memcpy(ndis_ssid.Ssid, extra, len);
- src_ssid = ndis_ssid.Ssid;
+ ndis_ssid.ssid_length = len;
+ memcpy(ndis_ssid.ssid, extra, len);
+ src_ssid = ndis_ssid.ssid;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
spin_lock_bh(&queue->lock);
@@ -1324,14 +1324,14 @@ static int rtw_wx_set_essid(struct net_device *dev,
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
- dst_ssid = pnetwork->network.Ssid.Ssid;
+ dst_ssid = pnetwork->network.ssid.ssid;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("rtw_wx_set_essid: dst_ssid =%s\n",
- pnetwork->network.Ssid.Ssid));
+ pnetwork->network.ssid.ssid));
- if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) &&
- (pnetwork->network.Ssid.SsidLength == ndis_ssid.SsidLength)) {
+ if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.ssid_length)) &&
+ (pnetwork->network.ssid.ssid_length == ndis_ssid.ssid_length)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("rtw_wx_set_essid: find match, set infra mode\n"));
@@ -1378,8 +1378,8 @@ static int rtw_wx_get_essid(struct net_device *dev,
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
- len = pcur_bss->Ssid.SsidLength;
- memcpy(extra, pcur_bss->Ssid.Ssid, len);
+ len = pcur_bss->ssid.ssid_length;
+ memcpy(extra, pcur_bss->ssid.ssid, len);
} else {
len = 0;
*extra = 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index d5ceb3beabbc..9db11b16cb93 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -13,7 +13,7 @@
void rtw_init_mlme_timer(struct adapter *padapter)
{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
timer_setup(&pmlmepriv->assoc_timer, _rtw_join_timeout_handler, 0);
timer_setup(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler, 0);
@@ -36,34 +36,38 @@ static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
void rtw_reset_securitypriv(struct adapter *adapter)
{
- u8 backup_index = 0;
- u8 backup_counter = 0x00;
- u32 backup_time = 0;
-
- if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
- /* 802.1x */
- /* We have to backup the PMK information for WiFi PMK Caching test item. */
- /* Backup the btkip_countermeasure information. */
- /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
- memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- backup_index = adapter->securitypriv.PMKIDIndex;
- backup_counter = adapter->securitypriv.btkip_countermeasure;
- backup_time = adapter->securitypriv.btkip_countermeasure_time;
- memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
-
- /* Restore the PMK information to securitypriv structure for the following connection. */
- memcpy(&adapter->securitypriv.PMKIDList[0],
- &backup_pmkid[0],
- sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- adapter->securitypriv.PMKIDIndex = backup_index;
- adapter->securitypriv.btkip_countermeasure = backup_counter;
- adapter->securitypriv.btkip_countermeasure_time = backup_time;
- adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+ u8 backup_index;
+ u8 backup_counter;
+ u32 backup_time;
+ struct security_priv *psec_priv = &adapter->securitypriv;
+
+ if (psec_priv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ /* 802.1x
+ * We have to backup the PMK information for WiFi PMK Caching
+ * test item. Backup the btkip_countermeasure information. When
+ * the countermeasure is trigger, the driver have to disconnect
+ * with AP for 60 seconds.
+ */
+ memcpy(backup_pmkid, psec_priv->PMKIDList,
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ backup_index = psec_priv->PMKIDIndex;
+ backup_counter = psec_priv->btkip_countermeasure;
+ backup_time = psec_priv->btkip_countermeasure_time;
+
+ memset(psec_priv, 0, sizeof(*psec_priv));
+
+ /* Restore the PMK information to securitypriv structure
+ * for the following connection.
+ */
+ memcpy(psec_priv->PMKIDList, backup_pmkid,
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ psec_priv->PMKIDIndex = backup_index;
+ psec_priv->btkip_countermeasure = backup_counter;
+ psec_priv->btkip_countermeasure_time = backup_time;
+ psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
} else {
/* reset values in securitypriv */
- struct security_priv *psec_priv = &adapter->securitypriv;
-
psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
psec_priv->dot11PrivacyKeyIndex = 0;
@@ -76,15 +80,16 @@ void rtw_reset_securitypriv(struct adapter *adapter)
void rtw_os_indicate_disconnect(struct adapter *adapter)
{
- netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
+ /* Do it first for tx broadcast pkt after disconnection issue! */
+ netif_carrier_off(adapter->pnetdev);
rtw_indicate_wx_disassoc_event(adapter);
rtw_reset_securitypriv(adapter);
}
void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
{
- uint len;
- u8 *buff, *p, i;
+ uint len;
+ u8 *buff, *p, i;
union iwreq_data wrqu;
RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
@@ -99,14 +104,13 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
memset(buff, 0, IW_CUSTOM_MAX);
p = buff;
p += sprintf(p, "ASSOCINFO(ReqIEs =");
- len = sec_ie[1]+2;
+ len = sec_ie[1] + 2;
len = min_t(uint, len, IW_CUSTOM_MAX);
for (i = 0; i < len; i++)
p += sprintf(p, "%02x", sec_ie[i]);
p += sprintf(p, ")");
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = p-buff;
- wrqu.data.length = min_t(__u16, wrqu.data.length, IW_CUSTOM_MAX);
+ wrqu.data.length = min_t(__u16, p - buff, IW_CUSTOM_MAX);
wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
kfree(buff);
}
@@ -119,7 +123,7 @@ void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
void init_mlme_ext_timer(struct adapter *padapter)
{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
timer_setup(&pmlmeext->survey_timer, survey_timer_hdl, 0);
timer_setup(&pmlmeext->link_timer, link_timer_hdl, 0);
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index abcce2240f15..8dde5a40e253 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -137,12 +137,12 @@ static int netdev_close(struct net_device *pnetdev);
static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
{
- struct registry_priv *registry_par = &padapter->registrypriv;
+ struct registry_priv *registry_par = &padapter->registrypriv;
GlobalDebugLevel = rtw_debug;
- memcpy(registry_par->ssid.Ssid, "ANY", 3);
- registry_par->ssid.SsidLength = 3;
+ memcpy(registry_par->ssid.ssid, "ANY", 3);
+ registry_par->ssid.ssid_length = 3;
registry_par->channel = (u8)rtw_channel;
registry_par->wireless_mode = (u8)rtw_wireless_mode;
@@ -198,8 +198,8 @@ static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
- struct recv_priv *precvpriv = &(padapter->recvpriv);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
padapter->stats.tx_packets = pxmitpriv->tx_pkts;
padapter->stats.rx_packets = precvpriv->rx_pkts;
@@ -248,7 +248,7 @@ static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
- struct adapter *padapter = rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
skb->priority = rtw_classify8021d(skb);
@@ -263,15 +263,15 @@ u16 rtw_recv_select_queue(struct sk_buff *skb)
{
struct iphdr *piphdr;
unsigned int dscp;
- __be16 eth_type;
+ __be16 eth_type;
u32 priority;
u8 *pdata = skb->data;
- memcpy(&eth_type, pdata+(ETH_ALEN<<1), 2);
+ memcpy(&eth_type, pdata + (ETH_ALEN << 1), 2);
switch (eth_type) {
case htons(ETH_P_IP):
- piphdr = (struct iphdr *)(pdata+ETH_HLEN);
+ piphdr = (struct iphdr *)(pdata + ETH_HLEN);
dscp = piphdr->tos & 0xfc;
priority = dscp >> 5;
break;
@@ -286,7 +286,7 @@ static const struct net_device_ops rtw_netdev_ops = {
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
.ndo_start_xmit = rtw_xmit_entry,
- .ndo_select_queue = rtw_select_queue,
+ .ndo_select_queue = rtw_select_queue,
.ndo_set_mac_address = rtw_net_set_mac_address,
.ndo_get_stats = rtw_net_get_stats,
.ndo_do_ioctl = rtw_ioctl,
@@ -323,7 +323,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
padapter->pnetdev = pnetdev;
DBG_88E("register rtw_netdev_ops to netdev_ops\n");
pnetdev->netdev_ops = &rtw_netdev_ops;
- pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
+ pnetdev->watchdog_timeo = HZ * 3; /* 3 second timeout */
pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
loadparam(padapter, pnetdev);
@@ -361,7 +361,7 @@ void rtw_stop_drv_threads(struct adapter *padapter)
static u8 rtw_init_default_value(struct adapter *padapter)
{
struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -431,7 +431,7 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
u8 rtw_init_drv_sw(struct adapter *padapter)
{
- u8 ret8 = _SUCCESS;
+ u8 ret8 = _SUCCESS;
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 2ea2af3286bc..7a090615dcbc 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -84,7 +84,7 @@ static int rtw_android_get_rssi(struct net_device *net, char *command,
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
bytes_written += snprintf(&command[bytes_written], total_len,
"%s rssi %d",
- pcur_network->network.Ssid.Ssid,
+ pcur_network->network.ssid.ssid,
padapter->recvpriv.rssi);
}
return bytes_written;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 28cbd6b3d26c..664d93a7f90d 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
@@ -227,10 +228,10 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
check_fwstate(pmlmepriv, _FW_LINKED)) {
pr_debug("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
__func__, __LINE__,
- pmlmepriv->cur_network.network.Ssid.Ssid,
+ pmlmepriv->cur_network.network.ssid.ssid,
pmlmepriv->cur_network.network.MacAddress,
- pmlmepriv->cur_network.network.Ssid.SsidLength,
- pmlmepriv->assoc_ssid.SsidLength);
+ pmlmepriv->cur_network.network.ssid.ssid_length,
+ pmlmepriv->assoc_ssid.ssid_length);
pmlmepriv->to_roaming = 1;
}
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index a1c096124683..68f53013cb95 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -15,11 +15,11 @@
#include "dot11d.h"
struct channel_list {
- u8 Channel[32];
- u8 Len;
+ u8 channel[32];
+ u8 len;
};
-static struct channel_list ChannelPlan[] = {
+static struct channel_list channel_array[] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64,
149, 153, 157, 161, 165}, 24},
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
@@ -44,130 +44,130 @@ static struct channel_list ChannelPlan[] = {
void dot11d_init(struct rtllib_device *ieee)
{
- struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
+ struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
- pDot11dInfo->bEnabled = false;
+ dot11d_info->enabled = false;
- pDot11dInfo->State = DOT11D_STATE_NONE;
- pDot11dInfo->CountryIeLen = 0;
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER + 1);
+ dot11d_info->state = DOT11D_STATE_NONE;
+ dot11d_info->country_len = 0;
+ memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
RESET_CIE_WATCHDOG(ieee);
}
EXPORT_SYMBOL(dot11d_init);
-void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
+void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee)
{
int i, max_chan = 14, min_chan = 1;
- ieee->bGlobalDomain = false;
+ ieee->global_domain = false;
- if (ChannelPlan[channel_plan].Len != 0) {
+ if (channel_array[channel_plan].len != 0) {
memset(GET_DOT11D_INFO(ieee)->channel_map, 0,
sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
- if (ChannelPlan[channel_plan].Channel[i] < min_chan ||
- ChannelPlan[channel_plan].Channel[i] > max_chan)
+ for (i = 0; i < channel_array[channel_plan].len; i++) {
+ if (channel_array[channel_plan].channel[i] < min_chan ||
+ channel_array[channel_plan].channel[i] > max_chan)
break;
- GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan
- [channel_plan].Channel[i]] = 1;
+ GET_DOT11D_INFO(ieee)->channel_map[channel_array
+ [channel_plan].channel[i]] = 1;
}
}
switch (channel_plan) {
case COUNTRY_CODE_GLOBAL_DOMAIN:
- ieee->bGlobalDomain = true;
+ ieee->global_domain = true;
for (i = 12; i <= 14; i++)
GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
- ieee->IbssStartChnl = 10;
+ ieee->bss_start_channel = 10;
ieee->ibss_maxjoin_chal = 11;
break;
case COUNTRY_CODE_WORLD_WIDE_13:
for (i = 12; i <= 13; i++)
GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
- ieee->IbssStartChnl = 10;
+ ieee->bss_start_channel = 10;
ieee->ibss_maxjoin_chal = 11;
break;
default:
- ieee->IbssStartChnl = 1;
+ ieee->bss_start_channel = 1;
ieee->ibss_maxjoin_chal = 14;
break;
}
}
-EXPORT_SYMBOL(Dot11d_Channelmap);
+EXPORT_SYMBOL(dot11d_channel_map);
-void Dot11d_Reset(struct rtllib_device *ieee)
+void dot11d_reset(struct rtllib_device *ieee)
{
- struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
+ struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
u32 i;
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
for (i = 1; i <= 11; i++)
- (pDot11dInfo->channel_map)[i] = 1;
+ (dot11d_info->channel_map)[i] = 1;
for (i = 12; i <= 14; i++)
- (pDot11dInfo->channel_map)[i] = 2;
- pDot11dInfo->State = DOT11D_STATE_NONE;
- pDot11dInfo->CountryIeLen = 0;
+ (dot11d_info->channel_map)[i] = 2;
+ dot11d_info->state = DOT11D_STATE_NONE;
+ dot11d_info->country_len = 0;
RESET_CIE_WATCHDOG(ieee);
}
-void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
- u16 CoutryIeLen, u8 *pCoutryIe)
+void dot11d_update_country(struct rtllib_device *dev, u8 *address,
+ u16 country_len, u8 *country)
{
- struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
- u8 i, j, NumTriples, MaxChnlNum;
- struct chnl_txpow_triple *pTriple;
-
- memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER + 1);
- MaxChnlNum = 0;
- NumTriples = (CoutryIeLen - 3) / 3;
- pTriple = (struct chnl_txpow_triple *)(pCoutryIe + 3);
- for (i = 0; i < NumTriples; i++) {
- if (MaxChnlNum >= pTriple->FirstChnl) {
+ struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
+ u8 i, j, number_of_triples, max_channel_number;
+ struct chnl_txpow_triple *triple;
+
+ memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
+ max_channel_number = 0;
+ number_of_triples = (country_len - 3) / 3;
+ triple = (struct chnl_txpow_triple *)(country + 3);
+ for (i = 0; i < number_of_triples; i++) {
+ if (max_channel_number >= triple->first_channel) {
netdev_info(dev->dev,
"%s: Invalid country IE, skip it......1\n",
__func__);
return;
}
- if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl +
- pTriple->NumChnls)) {
+ if (MAX_CHANNEL_NUMBER < (triple->first_channel +
+ triple->num_channels)) {
netdev_info(dev->dev,
"%s: Invalid country IE, skip it......2\n",
__func__);
return;
}
- for (j = 0; j < pTriple->NumChnls; j++) {
- pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
- pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] =
- pTriple->MaxTxPowerInDbm;
- MaxChnlNum = pTriple->FirstChnl + j;
+ for (j = 0; j < triple->num_channels; j++) {
+ dot11d_info->channel_map[triple->first_channel + j] = 1;
+ dot11d_info->max_tx_power_list[triple->first_channel + j] =
+ triple->max_tx_power;
+ max_channel_number = triple->first_channel + j;
}
- pTriple = (struct chnl_txpow_triple *)((u8 *)pTriple + 3);
+ triple = (struct chnl_txpow_triple *)((u8 *)triple + 3);
}
- UPDATE_CIE_SRC(dev, pTaddr);
+ UPDATE_CIE_SRC(dev, address);
- pDot11dInfo->CountryIeLen = CoutryIeLen;
- memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen);
- pDot11dInfo->State = DOT11D_STATE_LEARNED;
+ dot11d_info->country_len = country_len;
+ memcpy(dot11d_info->country_buffer, country, country_len);
+ dot11d_info->state = DOT11D_STATE_LEARNED;
}
-void DOT11D_ScanComplete(struct rtllib_device *dev)
+void dot11d_scan_complete(struct rtllib_device *dev)
{
- struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
+ struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
- switch (pDot11dInfo->State) {
+ switch (dot11d_info->state) {
case DOT11D_STATE_LEARNED:
- pDot11dInfo->State = DOT11D_STATE_DONE;
+ dot11d_info->state = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
- Dot11d_Reset(dev);
+ dot11d_reset(dev);
break;
case DOT11D_STATE_NONE:
break;
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 7fa3c4d963c4..6d2b93acfa43 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
@@ -18,9 +11,9 @@
#include "rtllib.h"
struct chnl_txpow_triple {
- u8 FirstChnl;
- u8 NumChnls;
- u8 MaxTxPowerInDbm;
+ u8 first_channel;
+ u8 num_channels;
+ u8 max_tx_power;
};
enum dot11d_state {
@@ -30,62 +23,62 @@ enum dot11d_state {
};
/**
- * struct rt_dot11d_info * @CountryIeLen: value greater than 0 if
- * @CountryIeBuf contains valid country information element.
+ * struct rt_dot11d_info * @country_len: value greater than 0 if
+ * @country_buffer contains valid country information element.
* @channel_map: holds channel values
* 0 - invalid,
* 1 - valid (active scan),
* 2 - valid (passive scan)
- * @CountryIeSrcAddr - Source AP of the country IE
+ * @country_src_addr - Source AP of the country IE
*/
struct rt_dot11d_info {
- bool bEnabled;
+ bool enabled;
- u16 CountryIeLen;
- u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6];
- u8 CountryIeWatchdog;
+ u16 country_len;
+ u8 country_buffer[MAX_IE_LEN];
+ u8 country_src_addr[6];
+ u8 country_watchdog;
u8 channel_map[MAX_CHANNEL_NUMBER + 1];
- u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER + 1];
+ u8 max_tx_power_list[MAX_CHANNEL_NUMBER + 1];
- enum dot11d_state State;
+ enum dot11d_state state;
};
-static inline void cpMacAddr(unsigned char *des, unsigned char *src)
+static inline void copy_mac_addr(unsigned char *des, unsigned char *src)
{
memcpy(des, src, 6);
}
-#define GET_DOT11D_INFO(__pIeeeDev) \
- ((struct rt_dot11d_info *)((__pIeeeDev)->pDot11dInfo))
+#define GET_DOT11D_INFO(__ieee_dev) \
+ ((struct rt_dot11d_info *)((__ieee_dev)->dot11d_info))
-#define IS_DOT11D_ENABLE(__pIeeeDev) \
- (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
-#define IS_COUNTRY_IE_VALID(__pIeeeDev) \
- (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
+#define IS_DOT11D_ENABLE(__ieee_dev) \
+ (GET_DOT11D_INFO(__ieee_dev)->enabled)
+#define IS_COUNTRY_IE_VALID(__ieee_dev) \
+ (GET_DOT11D_INFO(__ieee_dev)->country_len > 0)
-#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) \
+#define IS_EQUAL_CIE_SRC(__ieee_dev, __address) \
ether_addr_equal_unaligned( \
- GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
-#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) \
- cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
+ GET_DOT11D_INFO(__ieee_dev)->country_src_addr, __address)
+#define UPDATE_CIE_SRC(__ieee_dev, __address) \
+ copy_mac_addr(GET_DOT11D_INFO(__ieee_dev)->country_src_addr, __address)
-#define GET_CIE_WATCHDOG(__pIeeeDev) \
- (GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
-static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__pIeeeDev)
+#define GET_CIE_WATCHDOG(__ieee_dev) \
+ (GET_DOT11D_INFO(__ieee_dev)->country_watchdog)
+static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__ieee_dev)
{
- GET_CIE_WATCHDOG(__pIeeeDev) = 0;
+ GET_CIE_WATCHDOG(__ieee_dev) = 0;
}
-#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
+#define UPDATE_CIE_WATCHDOG(__ieee_dev) (++GET_CIE_WATCHDOG(__ieee_dev))
void dot11d_init(struct rtllib_device *dev);
-void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee);
-void Dot11d_Reset(struct rtllib_device *dev);
-void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
- u16 CoutryIeLen, u8 *pCoutryIe);
-void DOT11D_ScanComplete(struct rtllib_device *dev);
+void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee);
+void dot11d_reset(struct rtllib_device *dev);
+void dot11d_update_country(struct rtllib_device *dev, u8 *address,
+ u16 country_len, u8 *country);
+void dot11d_scan_complete(struct rtllib_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 1c6ed5b2a6f9..19bb04b3f097 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -660,9 +660,9 @@ static void _rtl92e_hwconfig(struct net_device *dev)
case WIRELESS_MODE_AUTO:
case WIRELESS_MODE_N_24G:
regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+ RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_N_5G:
regBwOpMode = BW_OPMODE_5G;
@@ -961,7 +961,7 @@ static void _rtl92e_net_update(struct net_device *dev)
net = &priv->rtllib->current_network;
rtl92e_config_rate(dev, &rate_config);
priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
- priv->basic_rate = rate_config &= 0x15f;
+ priv->basic_rate = rate_config &= 0x15f;
rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 96f265eee007..253f1911a3f4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -436,7 +436,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
network->qos_data.param_count)) {
network->qos_data.old_param_count =
network->qos_data.param_count;
- priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
+ priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
schedule_work(&priv->qos_activate);
RT_TRACE(COMP_QOS,
"QoS parameters change call qos_activate\n");
@@ -1049,7 +1049,7 @@ static short _rtl92e_get_channel_map(struct net_device *dev)
}
RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
dot11d_init(priv->rtllib);
- Dot11d_Channelmap(priv->ChannelPlan, priv->rtllib);
+ dot11d_channel_map(priv->ChannelPlan, priv->rtllib);
for (i = 1; i <= 11; i++)
(priv->rtllib->active_channel_map)[i] = 1;
(priv->rtllib->active_channel_map)[12] = 2;
@@ -1573,7 +1573,7 @@ static void _rtl92e_free_rx_ring(struct net_device *dev)
pci_unmap_single(priv->pdev,
*((dma_addr_t *)skb->cb),
priv->rxbuffersize, PCI_DMA_FROMDEVICE);
- kfree_skb(skb);
+ kfree_skb(skb);
}
pci_free_consistent(priv->pdev,
@@ -1728,8 +1728,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
MAX_DEV_ADDR_SIZE);
struct tx_desc *pdesc = NULL;
struct rtllib_hdr_1addr *header = NULL;
- u16 fc = 0, type = 0, stype = 0;
- bool multi_addr = false, broad_addr = false, uni_addr = false;
+ u16 fc = 0, type = 0;
u8 *pda_addr = NULL;
int idx;
u32 fwinfo_size = 0;
@@ -1747,22 +1746,14 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
header = (struct rtllib_hdr_1addr *)(((u8 *)skb->data) + fwinfo_size);
fc = le16_to_cpu(header->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
- stype = WLAN_FC_GET_STYPE(fc);
pda_addr = header->addr1;
if (is_broadcast_ether_addr(pda_addr))
- broad_addr = true;
+ priv->stats.txbytesbroadcast += skb->len - fwinfo_size;
else if (is_multicast_ether_addr(pda_addr))
- multi_addr = true;
- else
- uni_addr = true;
-
- if (uni_addr)
- priv->stats.txbytesunicast += skb->len - fwinfo_size;
- else if (multi_addr)
priv->stats.txbytesmulticast += skb->len - fwinfo_size;
else
- priv->stats.txbytesbroadcast += skb->len - fwinfo_size;
+ priv->stats.txbytesunicast += skb->len - fwinfo_size;
spin_lock_irqsave(&priv->irq_th_lock, flags);
ring = &priv->tx_ring[tcb_desc->queue_index];
@@ -2515,7 +2506,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
if (dev_alloc_name(dev, ifname) < 0) {
RT_TRACE(COMP_INIT,
"Oops: devname already taken! Trying wlan%%d...\n");
- dev_alloc_name(dev, ifname);
+ dev_alloc_name(dev, ifname);
}
RT_TRACE(COMP_INIT, "Driver probe completed1\n");
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 9bf95bd0ad13..157bcee34067 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -996,7 +996,7 @@ static void _rtl92e_dm_check_tx_power_tracking_tssi(struct net_device *dev)
tx_power_track_counter++;
- if (tx_power_track_counter >= 180) {
+ if (tx_power_track_counter >= 180) {
schedule_delayed_work(&priv->txpower_tracking_wq, 0);
tx_power_track_counter = 0;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 843e874b8a06..44e06cba7b7b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -738,7 +738,7 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
else if (wrqu->encoding.length == 0xd) {
ieee->pairwise_key_type = KEY_TYPE_WEP104;
- rtl92e_enable_hw_security_config(dev);
+ rtl92e_enable_hw_security_config(dev);
rtl92e_set_key(dev, key_idx, key_idx, KEY_TYPE_WEP104,
zero_addr[key_idx], 0, hwkey);
rtl92e_set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
@@ -1049,9 +1049,9 @@ static int _rtl92e_wx_set_promisc_mode(struct net_device *dev,
(bPromiscuousOn) ? (true) : (false);
ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame =
(bFilterSourceStationFrame) ? (true) : (false);
- (bPromiscuousOn) ?
- (rtllib_EnableIntelPromiscuousMode(dev, false)) :
- (rtllib_DisableIntelPromiscuousMode(dev, false));
+ (bPromiscuousOn) ?
+ (rtllib_EnableIntelPromiscuousMode(dev, false)) :
+ (rtllib_DisableIntelPromiscuousMode(dev, false));
netdev_info(dev,
"=======>%s(), on = %d, filter src sta = %d\n",
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 687dbb04ed2e..2d330d2bbf6d 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -139,7 +139,7 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
{
union delba_param_set DelbaParamSet;
struct sk_buff *skb = NULL;
- struct rtllib_hdr_3addr *Delba = NULL;
+ struct rtllib_hdr_3addr *Delba = NULL;
u8 *tag = NULL;
u16 len = 6 + ieee->tx_headroom;
@@ -316,7 +316,7 @@ OnADDBAReq_Fail:
int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *rsp = NULL;
+ struct rtllib_hdr_3addr *rsp = NULL;
struct ba_record *pPendingBA, *pAdmittedBA;
struct tx_ts_record *pTS = NULL;
u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
@@ -420,7 +420,7 @@ OnADDBARsp_Reject:
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
{
- struct rtllib_hdr_3addr *delba = NULL;
+ struct rtllib_hdr_3addr *delba = NULL;
union delba_param_set *pDelBaParamSet = NULL;
u8 *dst = NULL;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index c01474a6db1e..61ebd12831c3 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1560,11 +1560,11 @@ struct rtllib_device {
u16 scan_watch_dog;
/* map of allowed channels. 0 is dummy */
- void *pDot11dInfo;
- bool bGlobalDomain;
+ void *dot11d_info;
+ bool global_domain;
u8 active_channel_map[MAX_CHANNEL_NUMBER+1];
- u8 IbssStartChnl;
+ u8 bss_start_channel;
u8 ibss_maxjoin_chal;
int rate; /* current rate */
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index f38f1f74fcd6..55da8c9dfe50 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -285,7 +285,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
- int len;
+ int len;
u8 *pos;
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index fa580ce1cf43..debc2e40af00 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -913,7 +913,7 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
rx_stats->bContainHTC = true;
}
- if (RTLLIB_QOS_HAS_SEQ(fc))
+ if (RTLLIB_QOS_HAS_SEQ(fc))
rx_stats->bIsQosData = true;
return hdrlen;
@@ -1812,7 +1812,7 @@ static inline void rtllib_extract_country_ie(
netdev_info(ieee->dev,
"Received beacon ContryIE, SSID: <%s>\n",
network->ssid);
- Dot11d_UpdateCountryIe(ieee, addr2,
+ dot11d_update_country(ieee, addr2,
info_element->len,
info_element->data);
}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 287d0c11fa38..ee275857868f 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -564,7 +564,7 @@ out:
if (ieee->state >= RTLLIB_LINKED) {
if (IS_DOT11D_ENABLE(ieee))
- DOT11D_ScanComplete(ieee);
+ dot11d_scan_complete(ieee);
}
mutex_unlock(&ieee->scan_mutex);
@@ -623,7 +623,7 @@ static void rtllib_softmac_scan_wq(void *data)
out:
if (IS_DOT11D_ENABLE(ieee))
- DOT11D_ScanComplete(ieee);
+ dot11d_scan_complete(ieee);
ieee->current_network.channel = last_channel;
out1:
@@ -1688,8 +1688,8 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
ieee->current_network.ssid_len);
tmp_ssid_len = ieee->current_network.ssid_len;
}
- memcpy(&ieee->current_network, net,
- sizeof(ieee->current_network));
+ memcpy(&ieee->current_network, net,
+ sizeof(ieee->current_network));
if (!ssidbroad) {
memcpy(ieee->current_network.ssid, tmp_ssid,
tmp_ssid_len);
@@ -2627,7 +2627,7 @@ static void rtllib_start_ibss_wq(void *data)
/* the network definitively is not here.. create a new cell */
if (ieee->state == RTLLIB_NOLINK) {
netdev_info(ieee->dev, "creating new IBSS cell\n");
- ieee->current_network.channel = ieee->IbssStartChnl;
+ ieee->current_network.channel = ieee->bss_start_channel;
if (!ieee->wap_set)
eth_random_addr(ieee->current_network.bssid);
@@ -2719,7 +2719,7 @@ static void rtllib_start_bss(struct rtllib_device *ieee)
unsigned long flags;
if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
- if (!ieee->bGlobalDomain)
+ if (!ieee->global_domain)
return;
}
/* check if we have already found the net we
@@ -2759,7 +2759,7 @@ void rtllib_disassociate(struct rtllib_device *ieee)
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
if (IS_DOT11D_ENABLE(ieee))
- Dot11d_Reset(ieee);
+ dot11d_reset(ieee);
ieee->state = RTLLIB_NOLINK;
ieee->is_set_key = false;
ieee->wap_set = 0;
@@ -2974,8 +2974,8 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->state = RTLLIB_NOLINK;
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
- ieee->pDot11dInfo = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
- if (!ieee->pDot11dInfo)
+ ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
+ if (!ieee->dot11d_info)
netdev_err(ieee->dev, "Can't alloc memory for DOT11D\n");
ieee->LinkDetectInfo.SlotIndex = 0;
ieee->LinkDetectInfo.SlotNum = 2;
@@ -3049,8 +3049,8 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
void rtllib_softmac_free(struct rtllib_device *ieee)
{
mutex_lock(&ieee->wx_mutex);
- kfree(ieee->pDot11dInfo);
- ieee->pDot11dInfo = NULL;
+ kfree(ieee->dot11d_info);
+ ieee->dot11d_info = NULL;
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work_sync(&ieee->associate_retry_wq);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 74d4d2df3eb3..4f4904a300e0 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -541,8 +541,8 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
- } else{
- idx = ieee->crypt_info.tx_keyidx;
+ } else {
+ idx = ieee->crypt_info.tx_keyidx;
}
if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
crypt = &ieee->crypt_info.crypt[idx];
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index 3022728a364c..dcd51bf4aed3 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -7,7 +7,7 @@ ccflags-y += -O2
ccflags-y += -DCONFIG_FORCE_HARD_FLOAT=y
ccflags-y += -DJACKSON_NEW_8187 -DJACKSON_NEW_RX
ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
-ccflags-y += -Idrivers/staging/rtl8192u/ieee80211
+ccflags-y += -I $(srctree)/$(src)/ieee80211
r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o \
r8190_rtl8256.o r819xU_phy.o r819xU_firmware.o \
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 0ac0bbf7d923..f1eaab337dca 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -4957,20 +4957,18 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
struct net_device *dev = usb_get_intfdata(intf);
struct r8192_priv *priv = ieee80211_priv(dev);
- if (dev) {
- unregister_netdev(dev);
-
- RT_TRACE(COMP_DOWN,
- "=============>wlan driver to be removed\n");
- rtl8192_proc_remove_one(dev);
-
- rtl8192_down(dev);
- kfree(priv->pFirmware);
- priv->pFirmware = NULL;
- rtl8192_usb_deleteendpoints(dev);
- usleep_range(10000, 11000);
- }
+ unregister_netdev(dev);
+
+ RT_TRACE(COMP_DOWN, "=============>wlan driver to be removed\n");
+ rtl8192_proc_remove_one(dev);
+
+ rtl8192_down(dev);
+ kfree(priv->pFirmware);
+ priv->pFirmware = NULL;
+ rtl8192_usb_deleteendpoints(dev);
+ usleep_range(10000, 11000);
free_ieee80211(dev);
+
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index bb4f56a5fb01..2eae11dd6b42 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -408,7 +408,7 @@ int r8712_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
match = true;
break;
}
- cnt += in_ie[cnt + 1] + 2; /* goto next */
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
return match;
}
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 8bc45ffd3029..39eb74374d0b 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -358,7 +358,7 @@ u8 r8712_efuse_pg_packet_write(struct _adapter *padapter, const u8 offset,
u8 pg_header = 0;
u16 efuse_addr = 0, curr_size = 0;
u8 efuse_data, target_word_cnts = 0;
- static int repeat_times;
+ int repeat_times;
int sub_repeat;
u8 bResult = true;
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index 5b1004b2df47..07fcf9b9b811 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -939,7 +939,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
}
if (pLed->bLedLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
@@ -991,7 +991,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
}
if (pLed->bLedLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
@@ -1018,7 +1018,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
}
if (pLed->bLedLinkBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
del_timer(&pLed->BlinkTimer);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 986a1d526918..3f17ef6f7e39 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -259,7 +259,7 @@ int r8712_is_same_ibss(struct _adapter *adapter, struct wlan_network *pnetwork)
static int is_same_network(struct wlan_bssid_ex *src,
struct wlan_bssid_ex *dst)
{
- u16 s_cap, d_cap;
+ u16 s_cap, d_cap;
memcpy((u8 *)&s_cap, r8712_get_capability_from_ie(src->IEs), 2);
memcpy((u8 *)&d_cap, r8712_get_capability_from_ie(dst->IEs), 2);
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index f10896df094b..28f736913292 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -54,7 +54,7 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
sint i;
union recv_frame *precvframe;
- memset((unsigned char *)precvpriv, 0, sizeof(struct recv_priv));
+ memset((unsigned char *)precvpriv, 0, sizeof(struct recv_priv));
spin_lock_init(&precvpriv->lock);
_init_queue(&precvpriv->free_recv_queue);
_init_queue(&precvpriv->recv_pending_queue);
@@ -325,7 +325,7 @@ static sint sta2sta_data_frame(struct _adapter *adapter,
*/
if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN))
return _FAIL;
- sta_addr = pattrib->bssid;
+ sta_addr = pattrib->bssid;
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
if (bmcast) {
/* For AP mode, if DA == MCAST, then BSSID should
@@ -404,7 +404,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
if (bmcast)
*psta = r8712_get_bcmc_stainfo(adapter);
else
- *psta = r8712_get_stainfo(pstapriv, pattrib->bssid);
+ *psta = r8712_get_stainfo(pstapriv, pattrib->bssid);
if (*psta == NULL)
return _FAIL;
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) &&
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 9648ee15b40e..7c30b9e68e70 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -25,7 +25,7 @@
static void _init_stainfo(struct sta_info *psta)
{
memset((u8 *)psta, 0, sizeof(struct sta_info));
- spin_lock_init(&psta->lock);
+ spin_lock_init(&psta->lock);
INIT_LIST_HEAD(&psta->list);
INIT_LIST_HEAD(&psta->hash_list);
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 5c7dc9c6f76b..7c8857409916 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -193,8 +193,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
pattrib->ether_type = ntohs(etherhdr.h_proto);
-{
- /*If driver xmit ARP packet, driver can set ps mode to initial
+ /*
+ * If driver xmit ARP packet, driver can set ps mode to initial
* setting. It stands for getting DHCP or fix IP.
*/
if (pattrib->ether_type == 0x0806) {
@@ -206,7 +206,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
padapter->registrypriv.smart_ps);
}
}
-}
+
memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
pattrib->pctrl = 0;
@@ -949,7 +949,7 @@ static void alloc_hwxmits(struct _adapter *padapter)
pxmitpriv->vo_txqueue.head = 0;
hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue;
hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
- pxmitpriv->vi_txqueue.head = 0;
+ pxmitpriv->vi_txqueue.head = 0;
hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue;
hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
pxmitpriv->bk_txqueue.head = 0;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 92d75d7c51ae..005010de9997 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -196,10 +196,6 @@ static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state)
struct _adapter *padapter = netdev_priv(pnetdev);
netdev_info(pnetdev, "Suspending...\n");
- if (!pnetdev || !netif_running(pnetdev)) {
- netdev_info(pnetdev, "Unable to suspend\n");
- return 0;
- }
padapter->bSuspended = true;
rtl871x_intf_stop(padapter);
if (pnetdev->netdev_ops->ndo_stop)
@@ -221,10 +217,6 @@ static int r871x_resume(struct usb_interface *pusb_intf)
struct _adapter *padapter = netdev_priv(pnetdev);
netdev_info(pnetdev, "Resuming...\n");
- if (!pnetdev || !netif_running(pnetdev)) {
- netdev_info(pnetdev, "Unable to resume\n");
- return 0;
- }
netif_device_attach(pnetdev);
if (pnetdev->netdev_ops->ndo_open)
pnetdev->netdev_ops->ndo_open(pnetdev);
@@ -232,13 +224,6 @@ static int r871x_resume(struct usb_interface *pusb_intf)
rtl871x_intf_resume(padapter);
return 0;
}
-
-static int r871x_reset_resume(struct usb_interface *pusb_intf)
-{
- /* dummy routine */
- return 0;
-}
-
#endif
static struct drv_priv drvpriv = {
@@ -249,7 +234,6 @@ static struct drv_priv drvpriv = {
#ifdef CONFIG_PM
.r871xu_drv.suspend = r871x_suspend,
.r871xu_drv.resume = r871x_resume,
- .r871xu_drv.reset_resume = r871x_reset_resume,
#endif
};
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index ea2c187e56bd..91520ca3bbad 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -835,14 +835,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
}
psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
- if (psecnetwork == NULL) {
- kfree(pcmd);
- res = _FAIL;
-
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd :psecnetwork == NULL!!!\n"));
-
- goto exit;
- }
memset(psecnetwork, 0, t_len);
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 625e67f39889..094d61bcb469 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -2389,7 +2389,7 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false) {
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate);
- return ret;
+ return ret;
}
/*
if (pattrib->psta)
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 062fda9962be..bafb2c30e7fb 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -65,7 +65,6 @@ enum _NIC_VERSION {
#include <rtw_event.h>
#include <rtw_mlme_ext.h>
#include <rtw_ap.h>
-#include <rtw_efuse.h>
#include <rtw_version.h>
#include <rtw_odm.h>
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index bcc8dfa8e672..9efb4dcb9d3a 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -850,18 +850,18 @@ enum ieee80211_state {
#define IP_FMT "%pI4"
#define IP_ARG(x) (x)
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static inline int is_multicast_mac_addr(const u8 *addr)
{
return ((addr[0] != 0xff) && (0x01 & addr[0]));
}
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static inline int is_broadcast_mac_addr(const u8 *addr)
{
return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
}
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static inline int is_zero_mac_addr(const u8 *addr)
{
return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index e14d7cc411c9..73b87da15eb2 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -137,7 +137,7 @@ static int isFileReadable(char *path)
ret = PTR_ERR(fp);
}
else {
- oldfs = get_fs(); set_fs(get_ds());
+ oldfs = get_fs(); set_fs(KERNEL_DS);
if (1!=readFile(fp, &buf, 1))
ret = -EINVAL;
@@ -165,7 +165,7 @@ static int retriveFromFile(char *path, u8 *buf, u32 sz)
if (0 == (ret =openFile(&fp, path, O_RDONLY, 0))) {
DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp);
- oldfs = get_fs(); set_fs(get_ds());
+ oldfs = get_fs(); set_fs(KERNEL_DS);
ret =readFile(fp, buf, sz);
set_fs(oldfs);
closeFile(fp);
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 7c03b69b8ed3..6d02904de63f 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -22,7 +22,7 @@ static const struct sdio_device_id sdio_ids[] =
{ SDIO_DEVICE(0x024c, 0xb723), },
{ /* end: all zeroes */ },
};
-static const struct acpi_device_id acpi_ids[] __used = {
+static const struct acpi_device_id acpi_ids[] = {
{"OBDA8723", 0x0000},
{}
};
diff --git a/drivers/staging/rtlwifi/Kconfig b/drivers/staging/rtlwifi/Kconfig
index 7b4276f5c41f..28286a87a601 100644
--- a/drivers/staging/rtlwifi/Kconfig
+++ b/drivers/staging/rtlwifi/Kconfig
@@ -2,7 +2,7 @@ config R8822BE
tristate "Realtek RTL8822BE Wireless Network Adapter"
depends on PCI && MAC80211 && m
select FW_LOADER
- ---help---
+ help
This is the staging driver for Realtek RTL8822BE 802.11ac PCIe
wireless network adapters.
diff --git a/drivers/staging/rtlwifi/efuse.c b/drivers/staging/rtlwifi/efuse.c
index abb0f720cf21..a7c9e186f2b2 100644
--- a/drivers/staging/rtlwifi/efuse.c
+++ b/drivers/staging/rtlwifi/efuse.c
@@ -919,7 +919,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct target_pkt;
u8 write_state = PG_STATE_HEADER;
- int continual = true, dataempty = true, result = true;
+ int continual = true, result = true;
u16 efuse_addr = 0;
u8 efuse_data;
u8 target_word_cnts = 0;
@@ -946,7 +946,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
if (write_state == PG_STATE_HEADER) {
- dataempty = true;
badworden = 0x0F;
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_HEADER\n");
@@ -1182,13 +1181,12 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
int continual = true;
u16 efuse_addr = 0;
- u8 hoffset, hworden;
+ u8 hworden;
u8 efuse_data, word_cnts;
while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
(efuse_addr < EFUSE_MAX_SIZE)) {
if (efuse_data != 0xFF) {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = efuse_calculate_word_cnts(hworden);
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
index 53f55f129a76..ddbeff8224ab 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
@@ -2466,8 +2466,11 @@ halmac_parse_psd_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
segment_size = (u8)PSD_DATA_GET_SEGMENT_SIZE(c2h_buf);
psd_set->data_size = total_size;
- if (!psd_set->data)
+ if (!psd_set->data) {
psd_set->data = kzalloc(psd_set->data_size, GFP_KERNEL);
+ if (!psd_set->data)
+ return HALMAC_RET_MALLOC_FAIL;
+ }
if (segment_id == 0)
psd_set->segment_size = segment_size;
diff --git a/drivers/staging/rtlwifi/pci.h b/drivers/staging/rtlwifi/pci.h
index 7535ac24bfbb..0e55baec95a8 100644
--- a/drivers/staging/rtlwifi/pci.h
+++ b/drivers/staging/rtlwifi/pci.h
@@ -205,7 +205,8 @@ struct rtl_pci {
/*Bcn control register setting */
u32 reg_bcn_ctrl_val;
- /*ASPM*/ u8 const_pci_aspm;
+ /*ASPM*/
+ u8 const_pci_aspm;
u8 const_amdpci_aspm;
u8 const_hwsw_rfoff_d3;
u8 const_support_pciaspm;
diff --git a/drivers/staging/rtlwifi/phydm/phydm_rainfo.c b/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
index b46791a727c7..ed740a93c8b6 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_rainfo.c
@@ -124,7 +124,7 @@ void odm_c2h_ra_para_report_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len)
ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
"VHT_EN =", cmd_buf[4]);
ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s %d\n",
- "Hightest rate =", cmd_buf[5]);
+ "Highest rate =", cmd_buf[5]);
ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
"Lowest rate =", cmd_buf[6]);
ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE, "%5s 0x%x\n",
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index a40396614814..f061dd1382aa 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -486,6 +486,8 @@ bool rtl8822b_halmac_cb_write_data_h2c(struct rtl_priv *rtlpriv, u8 *buf,
/* without GFP_DMA, pci_map_single() may not work */
skb = __netdev_alloc_skb(NULL, size, GFP_ATOMIC | GFP_DMA);
+ if (!skb)
+ return false;
memcpy((u8 *)skb_put(skb, size), buf, size);
return _rtl8822be_send_bcn_or_cmd_packet(rtlpriv->hw, skb, H2C_QUEUE);
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index e43f92080c20..1128eec3bd08 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -1665,7 +1665,10 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
return STATUS_FAIL;
}
- ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
+ retval = ms_read_extra_data(chip, old_blk, i, extra,
+ MS_EXTRA_SIZE);
+ if (retval != STATUS_SUCCESS)
+ return STATUS_FAIL;
retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
MS_EXTRA_SIZE, SystemParm, 6);
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 2c47ae613ea1..c256a2398651 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4437,7 +4437,12 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_init_cmd(chip);
rtsx_add_cmd(chip, CHECK_REG_CMD, 0xFD30, 0x02, 0x02);
- rtsx_send_cmd(chip, SD_CARD, 250);
+ retval = rtsx_send_cmd(chip, SD_CARD, 250);
+ if (retval < 0) {
+ write_err = true;
+ rtsx_clear_sd_error(chip);
+ goto sd_execute_write_cmd_failed;
+ }
retval = sd_update_lock_status(chip);
if (retval != STATUS_SUCCESS) {
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 1273e7d18925..f38051eedb6c 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -5,7 +5,7 @@
#include "ddk750_power.h"
#include "ddk750_dvi.h"
-static void setDisplayControl(int ctrl, int disp_state)
+static void set_display_control(int ctrl, int disp_state)
{
/* state != 0 means turn on both timing & plane en_bit */
unsigned long reg, val, reserved;
@@ -137,12 +137,12 @@ void ddk750_setLogicalDispOut(enum disp_output output)
if (output & PRI_TP_USAGE) {
/* set primary timing and plane en_bit */
- setDisplayControl(0, (output & PRI_TP_MASK) >> PRI_TP_OFFSET);
+ set_display_control(0, (output & PRI_TP_MASK) >> PRI_TP_OFFSET);
}
if (output & SEC_TP_USAGE) {
/* set secondary timing and plane en_bit*/
- setDisplayControl(1, (output & SEC_TP_MASK) >> SEC_TP_OFFSET);
+ set_display_control(1, (output & SEC_TP_MASK) >> SEC_TP_OFFSET);
}
if (output & PNL_SEQ_USAGE) {
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index efd6f4560d3e..d8ec780f7741 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -3,7 +3,7 @@ menu "Speakup console speech"
config SPEAKUP
depends on VT
tristate "Speakup core"
- ---help---
+ help
This is the Speakup screen reader. Think of it as a
video console for blind people. If built in to the
kernel, it can speak everything on the text console from
@@ -43,7 +43,7 @@ config SPEAKUP
if SPEAKUP
config SPEAKUP_SYNTH_ACNTSA
tristate "Accent SA synthesizer support"
- ---help---
+ help
This is the Speakup driver for the Accent SA
synthesizer. You can say y to build it into the kernel,
or m to build it as a module. See the configuration
@@ -52,7 +52,7 @@ config SPEAKUP_SYNTH_ACNTSA
config SPEAKUP_SYNTH_ACNTPC
tristate "Accent PC synthesizer support"
depends on ISA || COMPILE_TEST
- ---help---
+ help
This is the Speakup driver for the accent pc
synthesizer. You can say y to build it into the kernel,
or m to build it as a module. See the configuration
@@ -60,7 +60,7 @@ config SPEAKUP_SYNTH_ACNTPC
config SPEAKUP_SYNTH_APOLLO
tristate "Apollo II synthesizer support"
- ---help---
+ help
This is the Speakup driver for the Apollo II
synthesizer. You can say y to build it into the kernel,
or m to build it as a module. See the configuration
@@ -68,7 +68,7 @@ config SPEAKUP_SYNTH_APOLLO
config SPEAKUP_SYNTH_AUDPTR
tristate "Audapter synthesizer support"
- ---help---
+ help
This is the Speakup driver for the Audapter synthesizer.
You can say y to build it into the kernel, or m to
build it as a module. See the configuration help on the
@@ -76,7 +76,7 @@ config SPEAKUP_SYNTH_AUDPTR
config SPEAKUP_SYNTH_BNS
tristate "Braille 'n' Speak synthesizer support"
- ---help---
+ help
This is the Speakup driver for the Braille 'n' Speak
synthesizer. You can say y to build it into the kernel,
or m to build it as a module. See the configuration
@@ -84,7 +84,7 @@ config SPEAKUP_SYNTH_BNS
config SPEAKUP_SYNTH_DECTLK
tristate "DECtalk Express synthesizer support"
- ---help---
+ help
This is the Speakup driver for the DecTalk Express
synthesizer. You can say y to build it into the kernel,
@@ -93,7 +93,7 @@ config SPEAKUP_SYNTH_DECTLK
config SPEAKUP_SYNTH_DECEXT
tristate "DECtalk External (old) synthesizer support"
- ---help---
+ help
This is the Speakup driver for the DecTalk External
(old) synthesizer. You can say y to build it into the
@@ -105,7 +105,7 @@ config SPEAKUP_SYNTH_DECPC
depends on m
depends on ISA || COMPILE_TEST
tristate "DECtalk PC (big ISA card) synthesizer support"
- ---help---
+ help
This is the Speakup driver for the DecTalk PC (full
length ISA) synthesizer. You can say m to build it as
@@ -127,7 +127,7 @@ config SPEAKUP_SYNTH_DECPC
config SPEAKUP_SYNTH_DTLK
tristate "DoubleTalk PC synthesizer support"
depends on ISA || COMPILE_TEST
- ---help---
+ help
This is the Speakup driver for the internal DoubleTalk
PC synthesizer. You can say y to build it into the
@@ -138,7 +138,7 @@ config SPEAKUP_SYNTH_DTLK
config SPEAKUP_SYNTH_KEYPC
tristate "Keynote Gold PC synthesizer support"
depends on ISA || COMPILE_TEST
- ---help---
+ help
This is the Speakup driver for the Keynote Gold
PC synthesizer. You can say y to build it into the
@@ -148,7 +148,7 @@ config SPEAKUP_SYNTH_KEYPC
config SPEAKUP_SYNTH_LTLK
tristate "DoubleTalk LT/LiteTalk synthesizer support"
----help---
+help
This is the Speakup driver for the LiteTalk/DoubleTalk
LT synthesizer. You can say y to build it into the
@@ -158,7 +158,7 @@ config SPEAKUP_SYNTH_LTLK
config SPEAKUP_SYNTH_SOFT
tristate "Userspace software synthesizer support"
- ---help---
+ help
This is the software synthesizer device node. It will
register a device /dev/softsynth which midware programs
@@ -169,7 +169,7 @@ config SPEAKUP_SYNTH_SOFT
config SPEAKUP_SYNTH_SPKOUT
tristate "Speak Out synthesizer support"
- ---help---
+ help
This is the Speakup driver for the Speakout synthesizer.
You can say y to build it into the kernel, or m to
@@ -178,7 +178,7 @@ config SPEAKUP_SYNTH_SPKOUT
config SPEAKUP_SYNTH_TXPRT
tristate "Transport synthesizer support"
- ---help---
+ help
This is the Speakup driver for the Transport
synthesizer. You can say y to build it into the kernel,
@@ -187,7 +187,7 @@ config SPEAKUP_SYNTH_TXPRT
config SPEAKUP_SYNTH_DUMMY
tristate "Dummy synthesizer driver (for testing)"
- ---help---
+ help
This is a dummy Speakup driver for plugging a mere serial
terminal. This is handy if you want to test speakup but
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 2e36d872662c..11c704b27c3c 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -154,6 +154,7 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
continue;
}
+ /* Do not replace with kstrtoul: here we need temp to be updated */
index = simple_strtoul(cp, &temp, 10);
if (index > 255) {
rejected++;
@@ -787,6 +788,7 @@ static ssize_t message_store_helper(const char *buf, size_t count,
continue;
}
+ /* Do not replace with kstrtoul: here we need temp to be updated */
index = simple_strtoul(cp, &temp, 10);
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 869f40ebf1a7..b6a65b0c1896 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -901,7 +901,8 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
while (start < end) {
sentbuf[bn][i] = get_char(vc, (u_short *)start, &tmp);
if (i > 0) {
- if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.' &&
+ if (sentbuf[bn][i] == SPACE &&
+ sentbuf[bn][i - 1] == '.' &&
numsentences[bn] < 9) {
/* Sentence Marker */
numsentences[bn]++;
@@ -1235,7 +1236,8 @@ int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
key_data_len = (states + 1) * (num_keys + 1);
if (key_data_len + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf)) {
pr_debug("too many key_infos (%d over %u)\n",
- key_data_len + SHIFT_TBL_SIZE + 4, (unsigned int)(sizeof(spk_key_buf)));
+ key_data_len + SHIFT_TBL_SIZE + 4,
+ (unsigned int)(sizeof(spk_key_buf)));
return -EINVAL;
}
memset(k_buffer, 0, SHIFT_TBL_SIZE);
@@ -1249,8 +1251,8 @@ int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
for (i = 1; i <= states; i++) {
ch = *cp1++;
if (ch >= SHIFT_TBL_SIZE) {
- pr_debug("(%d) not valid shift state (max_allowed = %d)\n", ch,
- SHIFT_TBL_SIZE);
+ pr_debug("(%d) not valid shift state (max_allowed = %d)\n",
+ ch, SHIFT_TBL_SIZE);
return -EINVAL;
}
spk_shift_table[ch] = i;
@@ -1258,7 +1260,8 @@ int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
keymap_flags = *cp1++;
while ((ch = *cp1)) {
if (ch >= MAX_KEY) {
- pr_debug("(%d), not valid key, (max_allowed = %d)\n", ch, MAX_KEY);
+ pr_debug("(%d), not valid key, (max_allowed = %d)\n",
+ ch, MAX_KEY);
return -EINVAL;
}
spk_our_keys[ch] = cp1;
@@ -1979,6 +1982,7 @@ oops:
return 1;
}
+ /* Do not replace with kstrtoul: here we need cp to be updated */
goto_pos = simple_strtoul(goto_buf, &cp, 10);
if (*cp == 'x') {
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index 3741c0fcf5bb..ddbb7e97d118 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -192,7 +192,8 @@ static void do_catch_up(struct spk_synth *synth)
synth->io_ops->synth_out(synth, PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
if (!in_escape)
- synth->io_ops->synth_out(synth, PROCSPEECH);
+ synth->io_ops->synth_out(synth,
+ PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock,
flags);
jiffy_delta_val = jiffy_delta->u.n.value;
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index a144f28ee1a8..dccb4ea29d37 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -260,7 +260,8 @@ static void do_catch_up(struct spk_synth *synth)
synth->io_ops->synth_out(synth, PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
if (!in_escape)
- synth->io_ops->synth_out(synth, PROCSPEECH);
+ synth->io_ops->synth_out(synth,
+ PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock,
flags);
jiffy_delta_val = jiffy_delta->u.n.value;
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 947c79532e10..edff6ce85655 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -12,7 +12,9 @@
#include <linux/unistd.h>
#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
#include <linux/poll.h> /* for poll_wait() */
-#include <linux/sched/signal.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
+
+/* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
+#include <linux/sched/signal.h>
#include "spk_priv.h"
#include "speakup.h"
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index cc99fcd1bc6e..1f789bd1c678 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -61,11 +61,16 @@
#define SPEAKUP_HELP 0x2d
#define TOGGLE_CURSORING 0x2e
#define READ_ALL_DOC 0x2f
-#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */
+
+/* one greater than the last func handler */
+#define SPKUP_MAX_FUNC 0x30
+
#define SPK_KEY 0x80
#define FIRST_EDIT_BITS 0x22
#define FIRST_SET_VAR SPELL_DELAY
-#define VAR_START 0x40 /* increase if adding more than 0x3f functions */
+
+/* increase if adding more than 0x3f functions */
+#define VAR_START 0x40
/* keys for setting variables, must be ordered same as the enum for var_ids */
/* with dec being even and inc being 1 greater */
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index c92bbd05516e..0057eb980bec 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -128,7 +128,8 @@ struct spk_io_ops spk_ttyio_ops = {
};
EXPORT_SYMBOL_GPL(spk_ttyio_ops);
-static inline void get_termios(struct tty_struct *tty, struct ktermios *out_termios)
+static inline void get_termios(struct tty_struct *tty,
+ struct ktermios *out_termios)
{
down_read(&tty->termios_rwsem);
*out_termios = tty->termios;
@@ -167,8 +168,9 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
tmp_termios.c_cflag |= CRTSCTS;
tty_set_termios(tty, &tmp_termios);
/*
- * check c_cflag to see if it's updated as tty_set_termios may not return
- * error even when no tty bits are changed by the request.
+ * check c_cflag to see if it's updated as tty_set_termios
+ * may not return error even when no tty bits are
+ * changed by the request.
*/
get_termios(tty, &tmp_termios);
if (!(tmp_termios.c_cflag & CRTSCTS))
@@ -207,10 +209,11 @@ static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
/* No room */
return 0;
if (ret < 0) {
- pr_warn("%s: I/O error, deactivating speakup\n", in_synth->long_name);
- /* No synth any more, so nobody will restart TTYs, and we thus
- * need to do it ourselves. Now that there is no synth we can
- * let application flood anyway
+ pr_warn("%s: I/O error, deactivating speakup\n",
+ in_synth->long_name);
+ /* No synth any more, so nobody will restart TTYs,
+ * and we thus need to do it ourselves. Now that there
+ * is no synth we can let application flood anyway
*/
in_synth->alive = 0;
speakup_start_ttys();
@@ -265,7 +268,8 @@ static void spk_ttyio_send_xchar(char ch)
return;
}
- speakup_tty->ops->send_xchar(speakup_tty, ch);
+ if (speakup_tty->ops->send_xchar)
+ speakup_tty->ops->send_xchar(speakup_tty, ch);
mutex_unlock(&speakup_tty_mutex);
}
@@ -277,7 +281,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
return;
}
- speakup_tty->ops->tiocmset(speakup_tty, set, clear);
+ if (speakup_tty->ops->tiocmset)
+ speakup_tty->ops->tiocmset(speakup_tty, set, clear);
mutex_unlock(&speakup_tty_mutex);
}
@@ -369,7 +374,8 @@ const char *spk_ttyio_synth_immediate(struct spk_synth *synth, const char *buff)
while ((ch = *buff)) {
if (ch == '\n')
ch = synth->procspeech;
- if (tty_write_room(speakup_tty) < 1 || !synth->io_ops->synth_out(synth, ch))
+ if (tty_write_room(speakup_tty) < 1 ||
+ !synth->io_ops->synth_out(synth, ch))
return buff;
buff++;
}
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 1b545152cc49..5741d1cb6227 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -238,7 +238,8 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
if (!var_data->u.n.out_str)
sprintf(cp, var_data->u.n.synth_fmt, (int)val);
else
- sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
+ sprintf(cp, var_data->u.n.synth_fmt,
+ var_data->u.n.out_str[val]);
synth_printf("%s", cp);
return 0;
}
@@ -328,6 +329,7 @@ char *spk_s2uchar(char *start, char *dest)
{
int val;
+ /* Do not replace with kstrtoul: here we need start to be updated */
val = simple_strtoul(skip_spaces(start), &start, 10);
if (*start == ',')
start++;
diff --git a/drivers/staging/unisys/visorhba/Makefile b/drivers/staging/unisys/visorhba/Makefile
index a8a8e0e0fb09..97e48757944a 100644
--- a/drivers/staging/unisys/visorhba/Makefile
+++ b/drivers/staging/unisys/visorhba/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_UNISYS_VISORHBA) += visorhba.o
visorhba-y := visorhba_main.o
-ccflags-y += -Idrivers/staging/unisys/include
-
+ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visornic/Makefile b/drivers/staging/unisys/visornic/Makefile
index 439e95e03300..336a746f793b 100644
--- a/drivers/staging/unisys/visornic/Makefile
+++ b/drivers/staging/unisys/visornic/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_UNISYS_VISORNIC) += visornic.o
visornic-y := visornic_main.o
-ccflags-y += -Idrivers/staging/unisys/include
-
+ccflags-y += -I $(srctree)/$(src)/../include
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 5eeb4b93b45b..1c1a470d2e50 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -896,9 +896,7 @@ static netdev_tx_t visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
/* pad the packet out to minimum size */
padlen = ETH_MIN_PACKET_SIZE - len;
- memset(&skb->data[len], 0, padlen);
- skb->tail += padlen;
- skb->len += padlen;
+ skb_put_zero(skb, padlen);
len += padlen;
firstfraglen += padlen;
}
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Makefile b/drivers/staging/vc04_services/bcm2835-audio/Makefile
index d7b88d164d15..536bd0c11ddb 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-audio/Makefile
@@ -1,5 +1,4 @@
obj-$(CONFIG_SND_BCM2835) += snd-bcm2835.o
snd-bcm2835-objs := bcm2835.o bcm2835-ctl.o bcm2835-pcm.o bcm2835-vchiq.o
-ccflags-y += -Idrivers/staging/vc04_services -D__VCCOREVER__=0x04000000
-
+ccflags-y += -I $(srctree)/$(src)/.. -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Makefile b/drivers/staging/vc04_services/bcm2835-camera/Makefile
index 2a4565e682d8..472f21e1f2a1 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-camera/Makefile
@@ -7,5 +7,5 @@ bcm2835-v4l2-$(CONFIG_VIDEO_BCM2835) := \
obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-v4l2.o
ccflags-y += \
- -Idrivers/staging/vc04_services \
+ -I $(srctree)/$(src)/.. \
-D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 338b6e952515..dd4898861b83 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -407,10 +407,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
/* Allocate enough storage to hold the page pointers and the page
* list
*/
- pagelist = dma_zalloc_coherent(g_dev,
- pagelist_size,
- &dma_addr,
- GFP_KERNEL);
+ pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
+ GFP_KERNEL);
vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 9e17ec651bde..53f5a1cb4636 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -446,6 +446,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
static inline void
remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
{
+ event->fired = 1;
event->armed = 0;
wake_up_all(wq);
}
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index b5ba0c76fb43..b4cdc0b7fee7 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1704,13 +1704,9 @@ static const unsigned short awcFrameTime[MAX_RATE] = {
* Return Value: FrameTime
*
*/
-unsigned int
-BBuGetFrameTime(
- unsigned char byPreambleType,
- unsigned char byPktType,
- unsigned int cbFrameLength,
- unsigned short wRate
-)
+unsigned int BBuGetFrameTime(unsigned char byPreambleType,
+ unsigned char byPktType,
+ unsigned int cbFrameLength, unsigned short wRate)
{
unsigned int uFrameTime;
unsigned int uPreamble;
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 30d9e9d20a39..0cc2e07829c5 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 52e9e6b90b56..6ecbe925026d 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -60,14 +60,9 @@ static const unsigned short cwRXBCNTSFOff[MAX_RATE] = {
/*--------------------- Static Functions --------------------------*/
-static
-void
-s_vCalculateOFDMRParameter(
- unsigned char byRate,
- u8 bb_type,
- unsigned char *pbyTxRate,
- unsigned char *pbyRsvTime
-);
+static void s_vCalculateOFDMRParameter(unsigned char byRate, u8 bb_type,
+ unsigned char *pbyTxRate,
+ unsigned char *pbyRsvTime);
/*--------------------- Export Functions --------------------------*/
@@ -506,10 +501,7 @@ bool CARDbRadioPowerOn(struct vnt_private *priv)
return bResult;
}
-void
-CARDvSafeResetTx(
- struct vnt_private *priv
-)
+void CARDvSafeResetTx(struct vnt_private *priv)
{
unsigned int uu;
struct vnt_tx_desc *pCurrTD;
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 5884fd56153e..d71022aa3f86 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1ab0e8562d40..b370985b58a1 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -440,12 +440,9 @@ static bool device_init_rings(struct vnt_private *priv)
void *vir_pool;
/*allocate all RD/TD rings a single pool*/
- vir_pool = dma_zalloc_coherent(&priv->pcid->dev,
- priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
- priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
- priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
- priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
- &priv->pool_dma, GFP_ATOMIC);
+ vir_pool = dma_alloc_coherent(&priv->pcid->dev,
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
+ &priv->pool_dma, GFP_ATOMIC);
if (!vir_pool) {
dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
return false;
@@ -459,13 +456,9 @@ static bool device_init_rings(struct vnt_private *priv)
priv->rd1_pool_dma = priv->rd0_pool_dma +
priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
- priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev,
- priv->opts.tx_descs[0] * PKT_BUF_SZ +
- priv->opts.tx_descs[1] * PKT_BUF_SZ +
- CB_BEACON_BUF_SIZE +
- CB_MAX_BUF_SIZE,
- &priv->tx_bufs_dma0,
- GFP_ATOMIC);
+ priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
+ priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE,
+ &priv->tx_bufs_dma0, GFP_ATOMIC);
if (!priv->tx0_bufs) {
dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
@@ -812,12 +805,12 @@ static bool device_alloc_rx_buf(struct vnt_private *priv,
}
static void device_free_rx_buf(struct vnt_private *priv,
- struct vnt_rx_desc *rd)
+ struct vnt_rx_desc *rd)
{
struct vnt_rd_info *rd_info = rd->rd_info;
dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
- priv->rx_buf_sz, DMA_FROM_DEVICE);
+ priv->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(rd_info->skb);
}
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index 91dede54cc1f..dcd933a6b66e 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -63,17 +63,19 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
}
switch (key_type) {
- /* fallthrough */
case VNT_KEY_DEFAULTKEY:
/* default key last entry */
entry = MAX_KEY_TABLE - 1;
key->hw_key_idx = entry;
+ /* fall through */
case VNT_KEY_ALLGROUP:
key_mode |= VNT_KEY_ALLGROUP;
if (onfly_latch)
key_mode |= VNT_KEY_ONFLY_ALL;
+ /* fall through */
case VNT_KEY_GROUP_ADDRESS:
key_mode |= mode;
+ /* fall through */
case VNT_KEY_GROUP:
key_mode |= (mode << 4);
key_mode |= VNT_KEY_GROUP;
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 94e700fcd0b6..3fd87f95c524 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index 72a4daa05fdb..2ad3feed9725 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_WILC1000) += wilc1000.o
ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
-DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
-wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
+wilc1000-objs := wilc_wfi_cfgoperations.o wilc_netdev.o wilc_mon.o \
host_interface.o wilc_wlan_cfg.o wilc_wlan.o
obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 70c854d939ce..4dd9a20f6a0b 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -6,25 +6,23 @@
#include "wilc_wfi_netdevice.h"
-#define HOST_IF_SCAN_TIMEOUT 4000
-#define HOST_IF_CONNECT_TIMEOUT 9500
+#define WILC_HIF_SCAN_TIMEOUT_MS 4000
+#define WILC_HIF_CONNECT_TIMEOUT_MS 9500
-#define FALSE_FRMWR_CHANNEL 100
+#define WILC_FALSE_FRMWR_CHANNEL 100
+#define WILC_MAX_RATES_SUPPORTED 12
-#define REAL_JOIN_REQ 0
-
-struct rcvd_async_info {
- u8 *buffer;
- u32 len;
+struct wilc_rcvd_mac_info {
+ u8 status;
};
-struct set_multicast {
- bool enabled;
+struct wilc_set_multicast {
+ u32 enabled;
u32 cnt;
u8 *mc_list;
};
-struct del_all_sta {
+struct wilc_del_all_sta {
u8 assoc_sta;
u8 mac[WILC_MAX_NUM_STA][ETH_ALEN];
};
@@ -36,7 +34,7 @@ struct wilc_op_mode {
struct wilc_reg_frame {
bool reg;
u8 reg_id;
- __le32 frame_type;
+ __le16 frame_type;
} __packed;
struct wilc_drv_handler {
@@ -71,16 +69,16 @@ struct wilc_gtk_key {
u8 key[0];
} __packed;
-union message_body {
- struct rcvd_net_info net_info;
- struct rcvd_async_info async_info;
- struct set_multicast multicast_info;
- struct remain_ch remain_on_ch;
+union wilc_message_body {
+ struct wilc_rcvd_net_info net_info;
+ struct wilc_rcvd_mac_info mac_info;
+ struct wilc_set_multicast mc_info;
+ struct wilc_remain_ch remain_on_ch;
char *data;
};
struct host_if_msg {
- union message_body body;
+ union wilc_message_body body;
struct wilc_vif *vif;
struct work_struct work;
void (*fn)(struct work_struct *ws);
@@ -88,37 +86,50 @@ struct host_if_msg {
bool is_sync;
};
-struct join_bss_param {
- enum bss_types bss_type;
+struct wilc_noa_opp_enable {
+ u8 ct_window;
+ u8 cnt;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct wilc_noa_opp_disable {
+ u8 cnt;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct wilc_join_bss_param {
+ char ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_terminator;
+ u8 bss_type;
+ u8 ch;
+ __le16 cap_info;
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 beacon_period;
u8 dtim_period;
- u16 beacon_period;
- u16 cap_info;
- u8 bssid[6];
- char ssid[MAX_SSID_LEN];
- u8 ssid_len;
- u8 supp_rates[MAX_RATES_SUPPORTED + 1];
- u8 ht_capable;
+ u8 supp_rates[WILC_MAX_RATES_SUPPORTED + 1];
u8 wmm_cap;
u8 uapsd_cap;
- bool rsn_found;
+ u8 ht_capable;
+ u8 rsn_found;
u8 rsn_grp_policy;
u8 mode_802_11i;
- u8 rsn_pcip_policy[3];
- u8 rsn_auth_policy[3];
+ u8 p_suites[3];
+ u8 akm_suites[3];
u8 rsn_cap[2];
- u32 tsf;
u8 noa_enabled;
- u8 opp_enabled;
- u8 ct_window;
- u8 cnt;
+ __le32 tsf_lo;
u8 idx;
- u8 duration[4];
- u8 interval[4];
- u8 start_time[4];
-};
-
-static struct host_if_drv *terminated_handle;
-static struct mutex hif_deinit_lock;
+ u8 opp_enabled;
+ union {
+ struct wilc_noa_opp_disable opp_dis;
+ struct wilc_noa_opp_enable opp_en;
+ };
+} __packed;
/* 'msg' should be free by the caller for syc */
static struct host_if_msg*
@@ -185,7 +196,7 @@ static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
u8 abort_running_scan;
struct wid wid;
struct host_if_drv *hif_drv = vif->hif_drv;
- struct user_scan_req *scan_req;
+ struct wilc_user_scan_req *scan_req;
if (evt == SCAN_EVENT_ABORTED) {
abort_running_scan = 1;
@@ -210,7 +221,7 @@ static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
scan_req = &hif_drv->usr_scan_req;
if (scan_req->scan_result) {
- scan_req->scan_result(evt, NULL, scan_req->arg, NULL);
+ scan_req->scan_result(evt, NULL, scan_req->arg);
scan_req->scan_result = NULL;
}
@@ -218,9 +229,10 @@ static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
}
int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
- u8 *ch_freq_list, u8 ch_list_len, const u8 *ies,
- size_t ies_len, wilc_scan_result scan_result, void *user_arg,
- struct hidden_network *hidden_net)
+ u8 *ch_freq_list, u8 ch_list_len, const u8 *ies, size_t ies_len,
+ void (*scan_result_fn)(enum scan_event,
+ struct wilc_rcvd_net_info *, void *),
+ void *user_arg, struct wilc_probe_ssid *search)
{
int result = 0;
struct wid wid_list[5];
@@ -228,7 +240,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
u32 i;
u8 *buffer;
u8 valuesize = 0;
- u8 *hdn_ntwk_wid_val = NULL;
+ u8 *search_ssid_vals = NULL;
struct host_if_drv *hif_drv = vif->hif_drv;
if (hif_drv->hif_state >= HOST_IF_SCANNING &&
@@ -246,26 +258,24 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
hif_drv->usr_scan_req.ch_cnt = 0;
- if (hidden_net) {
- wid_list[index].id = WID_SSID_PROBE_REQ;
- wid_list[index].type = WID_STR;
-
- for (i = 0; i < hidden_net->n_ssids; i++)
- valuesize += ((hidden_net->net_info[i].ssid_len) + 1);
- hdn_ntwk_wid_val = kmalloc(valuesize + 1, GFP_KERNEL);
- wid_list[index].val = hdn_ntwk_wid_val;
- if (wid_list[index].val) {
+ if (search) {
+ for (i = 0; i < search->n_ssids; i++)
+ valuesize += ((search->ssid_info[i].ssid_len) + 1);
+ search_ssid_vals = kmalloc(valuesize + 1, GFP_KERNEL);
+ if (search_ssid_vals) {
+ wid_list[index].id = WID_SSID_PROBE_REQ;
+ wid_list[index].type = WID_STR;
+ wid_list[index].val = search_ssid_vals;
buffer = wid_list[index].val;
- *buffer++ = hidden_net->n_ssids;
+ *buffer++ = search->n_ssids;
- for (i = 0; i < hidden_net->n_ssids; i++) {
- *buffer++ = hidden_net->net_info[i].ssid_len;
- memcpy(buffer, hidden_net->net_info[i].ssid,
- hidden_net->net_info[i].ssid_len);
- buffer += hidden_net->net_info[i].ssid_len;
+ for (i = 0; i < search->n_ssids; i++) {
+ *buffer++ = search->ssid_info[i].ssid_len;
+ memcpy(buffer, search->ssid_info[i].ssid,
+ search->ssid_info[i].ssid_len);
+ buffer += search->ssid_info[i].ssid_len;
}
-
wid_list[index].size = (s32)(valuesize + 1);
index++;
}
@@ -311,16 +321,16 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
goto error;
}
- hif_drv->usr_scan_req.scan_result = scan_result;
+ hif_drv->usr_scan_req.scan_result = scan_result_fn;
hif_drv->usr_scan_req.arg = user_arg;
hif_drv->scan_timer_vif = vif;
mod_timer(&hif_drv->scan_timer,
- jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
+ jiffies + msecs_to_jiffies(WILC_HIF_SCAN_TIMEOUT_MS));
error:
- if (hidden_net) {
- kfree(hidden_net->net_info);
- kfree(hdn_ntwk_wid_val);
+ if (search) {
+ kfree(search->ssid_info);
+ kfree(search_ssid_vals);
}
return result;
@@ -329,35 +339,16 @@ error:
static int wilc_send_connect_wid(struct wilc_vif *vif)
{
int result = 0;
- struct wid wid_list[8];
- u32 wid_cnt = 0, dummyval = 0;
- u8 *cur_byte = NULL;
+ struct wid wid_list[4];
+ u32 wid_cnt = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
- struct user_conn_req *conn_attr = &hif_drv->usr_conn_req;
- struct join_bss_param *bss_param = hif_drv->usr_conn_req.param;
-
- wid_list[wid_cnt].id = WID_SUCCESS_FRAME_COUNT;
- wid_list[wid_cnt].type = WID_INT;
- wid_list[wid_cnt].size = sizeof(u32);
- wid_list[wid_cnt].val = (s8 *)(&(dummyval));
- wid_cnt++;
-
- wid_list[wid_cnt].id = WID_RECEIVED_FRAGMENT_COUNT;
- wid_list[wid_cnt].type = WID_INT;
- wid_list[wid_cnt].size = sizeof(u32);
- wid_list[wid_cnt].val = (s8 *)(&(dummyval));
- wid_cnt++;
-
- wid_list[wid_cnt].id = WID_FAILED_COUNT;
- wid_list[wid_cnt].type = WID_INT;
- wid_list[wid_cnt].size = sizeof(u32);
- wid_list[wid_cnt].val = (s8 *)(&(dummyval));
- wid_cnt++;
+ struct wilc_conn_info *conn_attr = &hif_drv->conn_info;
+ struct wilc_join_bss_param *bss_param = conn_attr->param;
wid_list[wid_cnt].id = WID_INFO_ELEMENT_ASSOCIATE;
wid_list[wid_cnt].type = WID_BIN_DATA;
- wid_list[wid_cnt].val = conn_attr->ies;
- wid_list[wid_cnt].size = conn_attr->ies_len;
+ wid_list[wid_cnt].val = conn_attr->req_ies;
+ wid_list[wid_cnt].size = conn_attr->req_ies_len;
wid_cnt++;
wid_list[wid_cnt].id = WID_11I_MODE;
@@ -374,97 +365,8 @@ static int wilc_send_connect_wid(struct wilc_vif *vif)
wid_list[wid_cnt].id = WID_JOIN_REQ_EXTENDED;
wid_list[wid_cnt].type = WID_STR;
- wid_list[wid_cnt].size = 112;
- wid_list[wid_cnt].val = kmalloc(wid_list[wid_cnt].size, GFP_KERNEL);
-
- if (!wid_list[wid_cnt].val) {
- result = -EFAULT;
- goto error;
- }
-
- cur_byte = wid_list[wid_cnt].val;
-
- if (conn_attr->ssid) {
- memcpy(cur_byte, conn_attr->ssid, conn_attr->ssid_len);
- cur_byte[conn_attr->ssid_len] = '\0';
- }
- cur_byte += MAX_SSID_LEN;
- *(cur_byte++) = WILC_FW_BSS_TYPE_INFRA;
-
- if (conn_attr->ch >= 1 && conn_attr->ch <= 14) {
- *(cur_byte++) = conn_attr->ch;
- } else {
- netdev_err(vif->ndev, "Channel out of range\n");
- *(cur_byte++) = 0xFF;
- }
- put_unaligned_le16(bss_param->cap_info, cur_byte);
- cur_byte += 2;
-
- if (conn_attr->bssid)
- memcpy(cur_byte, conn_attr->bssid, 6);
- cur_byte += 6;
-
- if (conn_attr->bssid)
- memcpy(cur_byte, conn_attr->bssid, 6);
- cur_byte += 6;
-
- put_unaligned_le16(bss_param->beacon_period, cur_byte);
- cur_byte += 2;
- *(cur_byte++) = bss_param->dtim_period;
-
- memcpy(cur_byte, bss_param->supp_rates, MAX_RATES_SUPPORTED + 1);
- cur_byte += (MAX_RATES_SUPPORTED + 1);
-
- *(cur_byte++) = bss_param->wmm_cap;
- *(cur_byte++) = bss_param->uapsd_cap;
-
- *(cur_byte++) = bss_param->ht_capable;
- conn_attr->ht_capable = bss_param->ht_capable;
-
- *(cur_byte++) = bss_param->rsn_found;
- *(cur_byte++) = bss_param->rsn_grp_policy;
- *(cur_byte++) = bss_param->mode_802_11i;
-
- memcpy(cur_byte, bss_param->rsn_pcip_policy,
- sizeof(bss_param->rsn_pcip_policy));
- cur_byte += sizeof(bss_param->rsn_pcip_policy);
-
- memcpy(cur_byte, bss_param->rsn_auth_policy,
- sizeof(bss_param->rsn_auth_policy));
- cur_byte += sizeof(bss_param->rsn_auth_policy);
-
- memcpy(cur_byte, bss_param->rsn_cap, sizeof(bss_param->rsn_cap));
- cur_byte += sizeof(bss_param->rsn_cap);
-
- *(cur_byte++) = REAL_JOIN_REQ;
- *(cur_byte++) = bss_param->noa_enabled;
-
- if (bss_param->noa_enabled) {
- put_unaligned_le32(bss_param->tsf, cur_byte);
- cur_byte += 4;
-
- *(cur_byte++) = bss_param->opp_enabled;
- *(cur_byte++) = bss_param->idx;
-
- if (bss_param->opp_enabled)
- *(cur_byte++) = bss_param->ct_window;
-
- *(cur_byte++) = bss_param->cnt;
-
- memcpy(cur_byte, bss_param->duration,
- sizeof(bss_param->duration));
- cur_byte += sizeof(bss_param->duration);
-
- memcpy(cur_byte, bss_param->interval,
- sizeof(bss_param->interval));
- cur_byte += sizeof(bss_param->interval);
-
- memcpy(cur_byte, bss_param->start_time,
- sizeof(bss_param->start_time));
- cur_byte += sizeof(bss_param->start_time);
- }
-
- cur_byte = wid_list[wid_cnt].val;
+ wid_list[wid_cnt].size = sizeof(*bss_param);
+ wid_list[wid_cnt].val = (u8 *)bss_param;
wid_cnt++;
result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
@@ -472,25 +374,17 @@ static int wilc_send_connect_wid(struct wilc_vif *vif)
wilc_get_vif_idx(vif));
if (result) {
netdev_err(vif->ndev, "failed to send config packet\n");
- kfree(cur_byte);
goto error;
} else {
hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
}
- kfree(cur_byte);
return 0;
error:
- kfree(conn_attr->bssid);
- conn_attr->bssid = NULL;
-
- kfree(conn_attr->ssid);
- conn_attr->ssid = NULL;
-
- kfree(conn_attr->ies);
- conn_attr->ies = NULL;
+ kfree(conn_attr->req_ies);
+ conn_attr->req_ies = NULL;
return result;
}
@@ -500,7 +394,6 @@ static void handle_connect_timeout(struct work_struct *work)
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
struct wilc_vif *vif = msg->vif;
int result;
- struct connect_info info;
struct wid wid;
u16 dummy_reason_code = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
@@ -512,31 +405,11 @@ static void handle_connect_timeout(struct work_struct *work)
hif_drv->hif_state = HOST_IF_IDLE;
- memset(&info, 0, sizeof(struct connect_info));
+ if (hif_drv->conn_info.conn_result) {
+ hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_CONN_RESP,
+ WILC_MAC_STATUS_DISCONNECTED,
+ hif_drv->conn_info.arg);
- if (hif_drv->usr_conn_req.conn_result) {
- if (hif_drv->usr_conn_req.bssid) {
- memcpy(info.bssid,
- hif_drv->usr_conn_req.bssid, 6);
- }
-
- if (hif_drv->usr_conn_req.ies) {
- info.req_ies_len = hif_drv->usr_conn_req.ies_len;
- info.req_ies = kmemdup(hif_drv->usr_conn_req.ies,
- hif_drv->usr_conn_req.ies_len,
- GFP_KERNEL);
- if (!info.req_ies)
- goto out;
- }
-
- hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_CONN_RESP,
- &info,
- WILC_MAC_STATUS_DISCONNECTED,
- NULL,
- hif_drv->usr_conn_req.arg);
-
- kfree(info.req_ies);
- info.req_ies = NULL;
} else {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
}
@@ -551,346 +424,174 @@ static void handle_connect_timeout(struct work_struct *work)
if (result)
netdev_err(vif->ndev, "Failed to send disconnect\n");
- hif_drv->usr_conn_req.ssid_len = 0;
- kfree(hif_drv->usr_conn_req.ssid);
- hif_drv->usr_conn_req.ssid = NULL;
- kfree(hif_drv->usr_conn_req.bssid);
- hif_drv->usr_conn_req.bssid = NULL;
- hif_drv->usr_conn_req.ies_len = 0;
- kfree(hif_drv->usr_conn_req.ies);
- hif_drv->usr_conn_req.ies = NULL;
+ hif_drv->conn_info.req_ies_len = 0;
+ kfree(hif_drv->conn_info.req_ies);
+ hif_drv->conn_info.req_ies = NULL;
out:
kfree(msg);
}
-static void host_int_fill_join_bss_param(struct join_bss_param *param, u8 *ies,
- u16 *out_index, u8 *pcipher_tc,
- u8 *auth_total_cnt, u32 tsf_lo,
- u8 *rates_no)
+void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ struct cfg80211_crypto_settings *crypto)
{
- u8 ext_rates_no;
- u16 offset;
- u8 pcipher_cnt;
- u8 auth_cnt;
- u8 i, j;
- u16 index = *out_index;
-
- if (ies[index] == WLAN_EID_SUPP_RATES) {
- *rates_no = ies[index + 1];
- param->supp_rates[0] = *rates_no;
- index += 2;
-
- for (i = 0; i < *rates_no; i++)
- param->supp_rates[i + 1] = ies[index + i];
-
- index += *rates_no;
- } else if (ies[index] == WLAN_EID_EXT_SUPP_RATES) {
- ext_rates_no = ies[index + 1];
- if (ext_rates_no > (MAX_RATES_SUPPORTED - *rates_no))
- param->supp_rates[0] = MAX_RATES_SUPPORTED;
- else
- param->supp_rates[0] += ext_rates_no;
- index += 2;
- for (i = 0; i < (param->supp_rates[0] - *rates_no); i++)
- param->supp_rates[*rates_no + i + 1] = ies[index + i];
-
- index += ext_rates_no;
- } else if (ies[index] == WLAN_EID_HT_CAPABILITY) {
- param->ht_capable = true;
- index += ies[index + 1] + 2;
- } else if ((ies[index] == WLAN_EID_VENDOR_SPECIFIC) &&
- (ies[index + 2] == 0x00) && (ies[index + 3] == 0x50) &&
- (ies[index + 4] == 0xF2) && (ies[index + 5] == 0x02) &&
- ((ies[index + 6] == 0x00) || (ies[index + 6] == 0x01)) &&
- (ies[index + 7] == 0x01)) {
- param->wmm_cap = true;
-
- if (ies[index + 8] & BIT(7))
- param->uapsd_cap = true;
- index += ies[index + 1] + 2;
- } else if ((ies[index] == WLAN_EID_VENDOR_SPECIFIC) &&
- (ies[index + 2] == 0x50) && (ies[index + 3] == 0x6f) &&
- (ies[index + 4] == 0x9a) &&
- (ies[index + 5] == 0x09) && (ies[index + 6] == 0x0c)) {
- u16 p2p_cnt;
-
- param->tsf = tsf_lo;
- param->noa_enabled = 1;
- param->idx = ies[index + 9];
-
- if (ies[index + 10] & BIT(7)) {
- param->opp_enabled = 1;
- param->ct_window = ies[index + 10];
- } else {
- param->opp_enabled = 0;
- }
-
- param->cnt = ies[index + 11];
- p2p_cnt = index + 12;
-
- memcpy(param->duration, ies + p2p_cnt, 4);
- p2p_cnt += 4;
-
- memcpy(param->interval, ies + p2p_cnt, 4);
- p2p_cnt += 4;
-
- memcpy(param->start_time, ies + p2p_cnt, 4);
-
- index += ies[index + 1] + 2;
- } else if ((ies[index] == WLAN_EID_RSN) ||
- ((ies[index] == WLAN_EID_VENDOR_SPECIFIC) &&
- (ies[index + 2] == 0x00) &&
- (ies[index + 3] == 0x50) && (ies[index + 4] == 0xF2) &&
- (ies[index + 5] == 0x01))) {
- u16 rsn_idx = index;
-
- if (ies[rsn_idx] == WLAN_EID_RSN) {
- param->mode_802_11i = 2;
- } else {
- if (param->mode_802_11i == 0)
- param->mode_802_11i = 1;
- rsn_idx += 4;
- }
-
- rsn_idx += 7;
- param->rsn_grp_policy = ies[rsn_idx];
- rsn_idx++;
- offset = ies[rsn_idx] * 4;
- pcipher_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
- rsn_idx += 2;
-
- i = *pcipher_tc;
- j = 0;
- for (; i < (pcipher_cnt + *pcipher_tc) && i < 3; i++, j++) {
- u8 *policy = &param->rsn_pcip_policy[i];
-
- *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
- }
-
- *pcipher_tc += pcipher_cnt;
- rsn_idx += offset;
-
- offset = ies[rsn_idx] * 4;
-
- auth_cnt = (ies[rsn_idx] > 3) ? 3 : ies[rsn_idx];
- rsn_idx += 2;
- i = *auth_total_cnt;
- j = 0;
- for (; i < (*auth_total_cnt + auth_cnt); i++, j++) {
- u8 *policy = &param->rsn_auth_policy[i];
-
- *policy = ies[rsn_idx + ((j + 1) * 4) - 1];
- }
-
- *auth_total_cnt += auth_cnt;
- rsn_idx += offset;
-
- if (ies[index] == WLAN_EID_RSN) {
- param->rsn_cap[0] = ies[rsn_idx];
- param->rsn_cap[1] = ies[rsn_idx + 1];
- rsn_idx += 2;
- }
- param->rsn_found = true;
- index += ies[index + 1] + 2;
- } else {
- index += ies[index + 1] + 2;
- }
-
- *out_index = index;
-}
-
-static void *host_int_parse_join_bss_param(struct network_info *info)
-{
- struct join_bss_param *param;
- u16 index = 0;
- u8 rates_no = 0;
- u8 pcipher_total_cnt = 0;
- u8 auth_total_cnt = 0;
+ struct wilc_join_bss_param *param;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ u8 rates_len = 0;
+ const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ int ret;
+ const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
- param->dtim_period = info->dtim_period;
- param->beacon_period = info->beacon_period;
- param->cap_info = info->cap_info;
- memcpy(param->bssid, info->bssid, 6);
- memcpy((u8 *)param->ssid, info->ssid, info->ssid_len + 1);
- param->ssid_len = info->ssid_len;
- memset(param->rsn_pcip_policy, 0xFF, 3);
- memset(param->rsn_auth_policy, 0xFF, 3);
-
- while (index < info->ies_len)
- host_int_fill_join_bss_param(param, info->ies, &index,
- &pcipher_total_cnt,
- &auth_total_cnt, info->tsf_lo,
- &rates_no);
-
- return (void *)param;
-}
-
-static inline u8 *get_bssid(struct ieee80211_mgmt *mgmt)
-{
- if (ieee80211_has_fromds(mgmt->frame_control))
- return mgmt->sa;
- else if (ieee80211_has_tods(mgmt->frame_control))
- return mgmt->da;
- else
- return mgmt->bssid;
-}
+ param->beacon_period = cpu_to_le16(bss->beacon_interval);
+ param->cap_info = cpu_to_le16(bss->capability);
+ param->bss_type = WILC_FW_BSS_TYPE_INFRA;
+ param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
+ ether_addr_copy(param->bssid, bss->bssid);
-static s32 wilc_parse_network_info(u8 *msg_buffer,
- struct network_info **ret_network_info)
-{
- struct network_info *info;
- struct ieee80211_mgmt *mgt;
- u8 *wid_val, *msa, *ies;
- u16 wid_len, rx_len, ies_len;
- u8 msg_type;
- size_t offset;
- const u8 *ch_elm, *tim_elm, *ssid_elm;
-
- msg_type = msg_buffer[0];
- if ('N' != msg_type)
- return -EFAULT;
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ if (ssid_elm) {
+ if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
+ memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
+ }
- wid_len = get_unaligned_le16(&msg_buffer[6]);
- wid_val = &msg_buffer[8];
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ if (tim_elm && tim_elm[1] >= 2)
+ param->dtim_period = tim_elm[3];
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
+ memset(param->p_suites, 0xFF, 3);
+ memset(param->akm_suites, 0xFF, 3);
- info->rssi = wid_val[0];
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ if (rates_ie) {
+ rates_len = rates_ie[1];
+ param->supp_rates[0] = rates_len;
+ memcpy(&param->supp_rates[1], rates_ie + 2, rates_len);
+ }
- msa = &wid_val[1];
- mgt = (struct ieee80211_mgmt *)&wid_val[1];
- rx_len = wid_len - 1;
+ supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data,
+ ies->len);
+ if (supp_rates_ie) {
+ if (supp_rates_ie[1] > (WILC_MAX_RATES_SUPPORTED - rates_len))
+ param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
+ else
+ param->supp_rates[0] += supp_rates_ie[1];
- if (ieee80211_is_probe_resp(mgt->frame_control)) {
- info->cap_info = le16_to_cpu(mgt->u.probe_resp.capab_info);
- info->beacon_period = le16_to_cpu(mgt->u.probe_resp.beacon_int);
- info->tsf = le64_to_cpu(mgt->u.probe_resp.timestamp);
- info->tsf_lo = (u32)info->tsf;
- offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- } else if (ieee80211_is_beacon(mgt->frame_control)) {
- info->cap_info = le16_to_cpu(mgt->u.beacon.capab_info);
- info->beacon_period = le16_to_cpu(mgt->u.beacon.beacon_int);
- info->tsf = le64_to_cpu(mgt->u.beacon.timestamp);
- info->tsf_lo = (u32)info->tsf;
- offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
- } else {
- /* only process probe response and beacon frame */
- kfree(info);
- return -EIO;
+ memcpy(&param->supp_rates[rates_len + 1], supp_rates_ie + 2,
+ (param->supp_rates[0] - rates_len));
}
- ether_addr_copy(info->bssid, get_bssid(mgt));
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ if (ht_ie)
+ param->ht_capable = true;
- ies = mgt->u.beacon.variable;
- ies_len = rx_len - offset;
- if (ies_len <= 0) {
- kfree(info);
- return -EIO;
+ ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+ (u8 *)&noa_attr, sizeof(noa_attr));
+ if (ret > 0) {
+ param->tsf_lo = cpu_to_le32(ies->tsf);
+ param->noa_enabled = 1;
+ param->idx = noa_attr.index;
+ if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
+ param->opp_enabled = 1;
+ param->opp_en.ct_window = noa_attr.oppps_ctwindow;
+ param->opp_en.cnt = noa_attr.desc[0].count;
+ param->opp_en.duration = noa_attr.desc[0].duration;
+ param->opp_en.interval = noa_attr.desc[0].interval;
+ param->opp_en.start_time = noa_attr.desc[0].start_time;
+ } else {
+ param->opp_enabled = 0;
+ param->opp_dis.cnt = noa_attr.desc[0].count;
+ param->opp_dis.duration = noa_attr.desc[0].duration;
+ param->opp_dis.interval = noa_attr.desc[0].interval;
+ param->opp_dis.start_time = noa_attr.desc[0].start_time;
+ }
+ }
+ wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ ies->data, ies->len);
+ if (wmm_ie) {
+ struct ieee80211_wmm_param_ie *ie;
+
+ ie = (struct ieee80211_wmm_param_ie *)wmm_ie;
+ if ((ie->oui_subtype == 0 || ie->oui_subtype == 1) &&
+ ie->version == 1) {
+ param->wmm_cap = true;
+ if (ie->qos_info & BIT(7))
+ param->uapsd_cap = true;
+ }
}
- info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
- if (!info->ies) {
- kfree(info);
- return -ENOMEM;
+ wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data, ies->len);
+ if (wpa_ie) {
+ param->mode_802_11i = 1;
+ param->rsn_found = true;
}
- info->ies_len = ies_len;
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ if (rsn_ie) {
+ int offset = 8;
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len);
- if (ssid_elm) {
- info->ssid_len = ssid_elm[1];
- if (info->ssid_len <= IEEE80211_MAX_SSID_LEN)
- memcpy(info->ssid, ssid_elm + 2, info->ssid_len);
- else
- info->ssid_len = 0;
+ param->mode_802_11i = 2;
+ param->rsn_found = true;
+ //extract RSN capabilities
+ offset += (rsn_ie[offset] * 4) + 2;
+ offset += (rsn_ie[offset] * 4) + 2;
+ memcpy(param->rsn_cap, &rsn_ie[offset], 2);
}
- ch_elm = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ies, ies_len);
- if (ch_elm && ch_elm[1] > 0)
- info->ch = ch_elm[2];
+ if (param->rsn_found) {
+ int i;
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
- if (tim_elm && tim_elm[1] >= 2)
- info->dtim_period = tim_elm[3];
+ param->rsn_grp_policy = crypto->cipher_group & 0xFF;
+ for (i = 0; i < crypto->n_ciphers_pairwise && i < 3; i++)
+ param->p_suites[i] = crypto->ciphers_pairwise[i] & 0xFF;
- *ret_network_info = info;
+ for (i = 0; i < crypto->n_akm_suites && i < 3; i++)
+ param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
+ }
- return 0;
+ return (void *)param;
}
static void handle_rcvd_ntwrk_info(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
- struct wilc_vif *vif = msg->vif;
- struct rcvd_net_info *rcvd_info = &msg->body.net_info;
- u32 i;
- bool found;
- struct network_info *info = NULL;
- void *params;
- struct host_if_drv *hif_drv = vif->hif_drv;
- struct user_scan_req *scan_req = &hif_drv->usr_scan_req;
-
- found = true;
+ struct wilc_rcvd_net_info *rcvd_info = &msg->body.net_info;
+ struct wilc_user_scan_req *scan_req = &msg->vif->hif_drv->usr_scan_req;
+ const u8 *ch_elm;
+ u8 *ies;
+ int ies_len;
+ size_t offset;
- if (!scan_req->scan_result)
+ if (ieee80211_is_probe_resp(rcvd_info->mgmt->frame_control))
+ offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ else if (ieee80211_is_beacon(rcvd_info->mgmt->frame_control))
+ offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ else
goto done;
- wilc_parse_network_info(rcvd_info->buffer, &info);
- if (!info || !scan_req->scan_result) {
- netdev_err(vif->ndev, "%s: info or scan result NULL\n",
- __func__);
+ ies = rcvd_info->mgmt->u.beacon.variable;
+ ies_len = rcvd_info->frame_len - offset;
+ if (ies_len <= 0)
goto done;
- }
-
- for (i = 0; i < scan_req->ch_cnt; i++) {
- if (memcmp(scan_req->net_info[i].bssid, info->bssid, 6) == 0) {
- if (info->rssi <= scan_req->net_info[i].rssi) {
- goto done;
- } else {
- scan_req->net_info[i].rssi = info->rssi;
- found = false;
- break;
- }
- }
- }
-
- if (found) {
- if (scan_req->ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
- scan_req->net_info[scan_req->ch_cnt].rssi = info->rssi;
-
- memcpy(scan_req->net_info[scan_req->ch_cnt].bssid,
- info->bssid, 6);
-
- scan_req->ch_cnt++;
- info->new_network = true;
- params = host_int_parse_join_bss_param(info);
+ ch_elm = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ies, ies_len);
+ if (ch_elm && ch_elm[1] > 0)
+ rcvd_info->ch = ch_elm[2];
- scan_req->scan_result(SCAN_EVENT_NETWORK_FOUND, info,
- scan_req->arg, params);
- }
- } else {
- info->new_network = false;
- scan_req->scan_result(SCAN_EVENT_NETWORK_FOUND, info,
- scan_req->arg, NULL);
- }
+ if (scan_req->scan_result)
+ scan_req->scan_result(SCAN_EVENT_NETWORK_FOUND, rcvd_info,
+ scan_req->arg);
done:
- kfree(rcvd_info->buffer);
- rcvd_info->buffer = NULL;
-
- if (info) {
- kfree(info->ies);
- kfree(info);
- }
-
+ kfree(rcvd_info->mgmt);
kfree(msg);
}
@@ -918,20 +619,8 @@ static void host_int_get_assoc_res_info(struct wilc_vif *vif,
*rcvd_assoc_resp_info_len = wid.size;
}
-static inline void host_int_free_user_conn_req(struct host_if_drv *hif_drv)
-{
- hif_drv->usr_conn_req.ssid_len = 0;
- kfree(hif_drv->usr_conn_req.ssid);
- hif_drv->usr_conn_req.ssid = NULL;
- kfree(hif_drv->usr_conn_req.bssid);
- hif_drv->usr_conn_req.bssid = NULL;
- hif_drv->usr_conn_req.ies_len = 0;
- kfree(hif_drv->usr_conn_req.ies);
- hif_drv->usr_conn_req.ies = NULL;
-}
-
static s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
- struct connect_info *ret_conn_info)
+ struct wilc_conn_info *ret_conn_info)
{
u8 *ies;
u16 ies_len;
@@ -955,10 +644,8 @@ static s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
u8 mac_status)
{
- struct connect_info conn_info;
struct host_if_drv *hif_drv = vif->hif_drv;
-
- memset(&conn_info, 0, sizeof(struct connect_info));
+ struct wilc_conn_info *conn_info = &hif_drv->conn_info;
if (mac_status == WILC_MAC_STATUS_CONNECTED) {
u32 assoc_resp_info_len;
@@ -974,7 +661,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
err = wilc_parse_assoc_resp_info(hif_drv->assoc_resp,
assoc_resp_info_len,
- &conn_info);
+ conn_info);
if (err)
netdev_err(vif->ndev,
"wilc_parse_assoc_resp_info() returned error %d\n",
@@ -982,31 +669,13 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
}
}
- if (hif_drv->usr_conn_req.bssid) {
- memcpy(conn_info.bssid, hif_drv->usr_conn_req.bssid, 6);
-
- if (mac_status == WILC_MAC_STATUS_CONNECTED &&
- conn_info.status == WLAN_STATUS_SUCCESS) {
- memcpy(hif_drv->assoc_bssid,
- hif_drv->usr_conn_req.bssid, ETH_ALEN);
- }
- }
-
- if (hif_drv->usr_conn_req.ies) {
- conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies,
- hif_drv->usr_conn_req.ies_len,
- GFP_KERNEL);
- if (conn_info.req_ies)
- conn_info.req_ies_len = hif_drv->usr_conn_req.ies_len;
- }
-
del_timer(&hif_drv->connect_timer);
- hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_CONN_RESP,
- &conn_info, mac_status, NULL,
- hif_drv->usr_conn_req.arg);
+ conn_info->conn_result(CONN_DISCONN_EVENT_CONN_RESP, mac_status,
+ hif_drv->conn_info.arg);
if (mac_status == WILC_MAC_STATUS_CONNECTED &&
- conn_info.status == WLAN_STATUS_SUCCESS) {
+ conn_info->status == WLAN_STATUS_SUCCESS) {
+ ether_addr_copy(hif_drv->assoc_bssid, conn_info->bssid);
wilc_set_power_mgmt(vif, 0, 0);
hif_drv->hif_state = HOST_IF_CONNECTED;
@@ -1018,44 +687,39 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
hif_drv->hif_state = HOST_IF_IDLE;
}
- kfree(conn_info.resp_ies);
- conn_info.resp_ies = NULL;
+ kfree(conn_info->resp_ies);
+ conn_info->resp_ies = NULL;
+ conn_info->resp_ies_len = 0;
- kfree(conn_info.req_ies);
- conn_info.req_ies = NULL;
- host_int_free_user_conn_req(hif_drv);
+ kfree(conn_info->req_ies);
+ conn_info->req_ies = NULL;
+ conn_info->req_ies_len = 0;
}
static inline void host_int_handle_disconnect(struct wilc_vif *vif)
{
- struct disconnect_info disconn_info;
struct host_if_drv *hif_drv = vif->hif_drv;
- wilc_connect_result conn_result = hif_drv->usr_conn_req.conn_result;
-
- memset(&disconn_info, 0, sizeof(struct disconnect_info));
if (hif_drv->usr_scan_req.scan_result) {
del_timer(&hif_drv->scan_timer);
handle_scan_done(vif, SCAN_EVENT_ABORTED);
}
- disconn_info.reason = 0;
- disconn_info.ie = NULL;
- disconn_info.ie_len = 0;
-
- if (conn_result) {
+ if (hif_drv->conn_info.conn_result) {
vif->obtaining_ip = false;
wilc_set_power_mgmt(vif, 0, 0);
- conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL, 0,
- &disconn_info, hif_drv->usr_conn_req.arg);
+ hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
+ 0, hif_drv->conn_info.arg);
} else {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
}
eth_zero_addr(hif_drv->assoc_bssid);
- host_int_free_user_conn_req(hif_drv);
+ hif_drv->conn_info.req_ies_len = 0;
+ kfree(hif_drv->conn_info.req_ies);
+ hif_drv->conn_info.req_ies = NULL;
hif_drv->hif_state = HOST_IF_IDLE;
}
@@ -1063,55 +727,30 @@ static void handle_rcvd_gnrl_async_info(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
struct wilc_vif *vif = msg->vif;
- struct rcvd_async_info *rcvd_info = &msg->body.async_info;
- u8 msg_type;
- u8 mac_status;
+ struct wilc_rcvd_mac_info *mac_info = &msg->body.mac_info;
struct host_if_drv *hif_drv = vif->hif_drv;
- if (!rcvd_info->buffer) {
- netdev_err(vif->ndev, "%s: buffer is NULL\n", __func__);
- goto free_msg;
- }
-
if (!hif_drv) {
netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__);
- goto free_rcvd_info;
+ goto free_msg;
}
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP ||
- hif_drv->hif_state == HOST_IF_CONNECTED ||
- hif_drv->usr_scan_req.scan_result) {
- if (!hif_drv->usr_conn_req.conn_result) {
- netdev_err(vif->ndev, "%s: conn_result is NULL\n",
- __func__);
- goto free_rcvd_info;
- }
-
- msg_type = rcvd_info->buffer[0];
-
- if ('I' != msg_type) {
- netdev_err(vif->ndev, "Received Message incorrect.\n");
- goto free_rcvd_info;
- }
+ if (!hif_drv->conn_info.conn_result) {
+ netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
+ goto free_msg;
+ }
- mac_status = rcvd_info->buffer[7];
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
- host_int_parse_assoc_resp_info(vif, mac_status);
- } else if ((mac_status == WILC_MAC_STATUS_DISCONNECTED) &&
- (hif_drv->hif_state == HOST_IF_CONNECTED)) {
+ if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
+ host_int_parse_assoc_resp_info(vif, mac_info->status);
+ } else if (mac_info->status == WILC_MAC_STATUS_DISCONNECTED) {
+ if (hif_drv->hif_state == HOST_IF_CONNECTED) {
host_int_handle_disconnect(vif);
- } else if ((mac_status == WILC_MAC_STATUS_DISCONNECTED) &&
- (hif_drv->usr_scan_req.scan_result)) {
+ } else if (hif_drv->usr_scan_req.scan_result) {
del_timer(&hif_drv->scan_timer);
- if (hif_drv->usr_scan_req.scan_result)
- handle_scan_done(vif, SCAN_EVENT_ABORTED);
+ handle_scan_done(vif, SCAN_EVENT_ABORTED);
}
}
-free_rcvd_info:
- kfree(rcvd_info->buffer);
- rcvd_info->buffer = NULL;
-
free_msg:
kfree(msg);
}
@@ -1120,9 +759,8 @@ int wilc_disconnect(struct wilc_vif *vif)
{
struct wid wid;
struct host_if_drv *hif_drv = vif->hif_drv;
- struct disconnect_info disconn_info;
- struct user_scan_req *scan_req;
- struct user_conn_req *conn_req;
+ struct wilc_user_scan_req *scan_req;
+ struct wilc_conn_info *conn_info;
int result;
u16 dummy_reason_code = 0;
@@ -1141,27 +779,21 @@ int wilc_disconnect(struct wilc_vif *vif)
return result;
}
- memset(&disconn_info, 0, sizeof(struct disconnect_info));
-
- disconn_info.reason = 0;
- disconn_info.ie = NULL;
- disconn_info.ie_len = 0;
scan_req = &hif_drv->usr_scan_req;
- conn_req = &hif_drv->usr_conn_req;
+ conn_info = &hif_drv->conn_info;
if (scan_req->scan_result) {
del_timer(&hif_drv->scan_timer);
- scan_req->scan_result(SCAN_EVENT_ABORTED, NULL, scan_req->arg,
- NULL);
+ scan_req->scan_result(SCAN_EVENT_ABORTED, NULL, scan_req->arg);
scan_req->scan_result = NULL;
}
- if (conn_req->conn_result) {
+ if (conn_info->conn_result) {
if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP)
del_timer(&hif_drv->connect_timer);
- conn_req->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL,
- 0, &disconn_info, conn_req->arg);
+ conn_info->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0,
+ conn_info->arg);
} else {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
}
@@ -1170,14 +802,9 @@ int wilc_disconnect(struct wilc_vif *vif)
eth_zero_addr(hif_drv->assoc_bssid);
- conn_req->ssid_len = 0;
- kfree(conn_req->ssid);
- conn_req->ssid = NULL;
- kfree(conn_req->bssid);
- conn_req->bssid = NULL;
- conn_req->ies_len = 0;
- kfree(conn_req->ies);
- conn_req->ies = NULL;
+ conn_info->req_ies_len = 0;
+ kfree(conn_info->req_ies);
+ conn_info->req_ies = NULL;
return 0;
}
@@ -1285,47 +912,29 @@ static void wilc_hif_pack_sta_param(u8 *cur_byte, const u8 *mac,
}
static int handle_remain_on_chan(struct wilc_vif *vif,
- struct remain_ch *hif_remain_ch)
+ struct wilc_remain_ch *hif_remain_ch)
{
int result;
u8 remain_on_chan_flag;
struct wid wid;
struct host_if_drv *hif_drv = vif->hif_drv;
- if (!hif_drv->remain_on_ch_pending) {
- hif_drv->remain_on_ch.arg = hif_remain_ch->arg;
- hif_drv->remain_on_ch.expired = hif_remain_ch->expired;
- hif_drv->remain_on_ch.ready = hif_remain_ch->ready;
- hif_drv->remain_on_ch.ch = hif_remain_ch->ch;
- hif_drv->remain_on_ch.id = hif_remain_ch->id;
- } else {
- hif_remain_ch->ch = hif_drv->remain_on_ch.ch;
- }
+ if (hif_drv->usr_scan_req.scan_result)
+ return -EBUSY;
- if (hif_drv->usr_scan_req.scan_result) {
- hif_drv->remain_on_ch_pending = 1;
- result = -EBUSY;
- goto error;
- }
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
- result = -EBUSY;
- goto error;
- }
+ if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP)
+ return -EBUSY;
- if (vif->obtaining_ip || vif->connecting) {
- result = -EBUSY;
- goto error;
- }
+ if (vif->obtaining_ip || vif->connecting)
+ return -EBUSY;
remain_on_chan_flag = true;
wid.id = WID_REMAIN_ON_CHAN;
wid.type = WID_STR;
wid.size = 2;
wid.val = kmalloc(wid.size, GFP_KERNEL);
- if (!wid.val) {
- result = -ENOMEM;
- goto error;
- }
+ if (!wid.val)
+ return -ENOMEM;
wid.val[0] = remain_on_chan_flag;
wid.val[1] = (s8)hif_remain_ch->ch;
@@ -1333,28 +942,23 @@ static int handle_remain_on_chan(struct wilc_vif *vif,
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
kfree(wid.val);
- if (result != 0)
- netdev_err(vif->ndev, "Failed to set remain on channel\n");
+ if (result)
+ return -EBUSY;
-error:
+ hif_drv->remain_on_ch.arg = hif_remain_ch->arg;
+ hif_drv->remain_on_ch.expired = hif_remain_ch->expired;
+ hif_drv->remain_on_ch.ch = hif_remain_ch->ch;
+ hif_drv->remain_on_ch.cookie = hif_remain_ch->cookie;
hif_drv->remain_on_ch_timer_vif = vif;
- mod_timer(&hif_drv->remain_on_ch_timer,
- jiffies + msecs_to_jiffies(hif_remain_ch->duration));
-
- if (hif_drv->remain_on_ch.ready)
- hif_drv->remain_on_ch.ready(hif_drv->remain_on_ch.arg);
-
- if (hif_drv->remain_on_ch_pending)
- hif_drv->remain_on_ch_pending = 0;
- return result;
+ return 0;
}
static void handle_listen_state_expired(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
struct wilc_vif *vif = msg->vif;
- struct remain_ch *hif_remain_ch = &msg->body.remain_on_ch;
+ struct wilc_remain_ch *hif_remain_ch = &msg->body.remain_on_ch;
u8 remain_on_chan_flag;
struct wid wid;
int result;
@@ -1372,7 +976,7 @@ static void handle_listen_state_expired(struct work_struct *work)
goto free_msg;
wid.val[0] = remain_on_chan_flag;
- wid.val[1] = FALSE_FRMWR_CHANNEL;
+ wid.val[1] = WILC_FALSE_FRMWR_CHANNEL;
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
@@ -1384,7 +988,7 @@ static void handle_listen_state_expired(struct work_struct *work)
if (hif_drv->remain_on_ch.expired) {
hif_drv->remain_on_ch.expired(hif_drv->remain_on_ch.arg,
- hif_remain_ch->id);
+ hif_remain_ch->cookie);
}
} else {
netdev_dbg(vif->ndev, "Not in listen state\n");
@@ -1408,7 +1012,7 @@ static void listen_timer_cb(struct timer_list *t)
if (IS_ERR(msg))
return;
- msg->body.remain_on_ch.id = vif->hif_drv->remain_on_ch.id;
+ msg->body.remain_on_ch.cookie = vif->hif_drv->remain_on_ch.cookie;
result = wilc_enqueue_work(msg);
if (result) {
@@ -1421,32 +1025,27 @@ static void handle_set_mcast_filter(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
struct wilc_vif *vif = msg->vif;
- struct set_multicast *hif_set_mc = &msg->body.multicast_info;
+ struct wilc_set_multicast *set_mc = &msg->body.mc_info;
int result;
struct wid wid;
u8 *cur_byte;
wid.id = WID_SETUP_MULTICAST_FILTER;
wid.type = WID_BIN;
- wid.size = sizeof(struct set_multicast) + (hif_set_mc->cnt * ETH_ALEN);
+ wid.size = sizeof(struct wilc_set_multicast) + (set_mc->cnt * ETH_ALEN);
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
goto error;
cur_byte = wid.val;
- *cur_byte++ = (hif_set_mc->enabled & 0xFF);
- *cur_byte++ = 0;
- *cur_byte++ = 0;
- *cur_byte++ = 0;
+ put_unaligned_le32(set_mc->enabled, cur_byte);
+ cur_byte += 4;
- *cur_byte++ = (hif_set_mc->cnt & 0xFF);
- *cur_byte++ = ((hif_set_mc->cnt >> 8) & 0xFF);
- *cur_byte++ = ((hif_set_mc->cnt >> 16) & 0xFF);
- *cur_byte++ = ((hif_set_mc->cnt >> 24) & 0xFF);
+ put_unaligned_le32(set_mc->cnt, cur_byte);
+ cur_byte += 4;
- if (hif_set_mc->cnt > 0 && hif_set_mc->mc_list)
- memcpy(cur_byte, hif_set_mc->mc_list,
- ((hif_set_mc->cnt) * ETH_ALEN));
+ if (set_mc->cnt > 0 && set_mc->mc_list)
+ memcpy(cur_byte, set_mc->mc_list, set_mc->cnt * ETH_ALEN);
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
@@ -1454,7 +1053,7 @@ static void handle_set_mcast_filter(struct work_struct *work)
netdev_err(vif->ndev, "Failed to send setup multicast\n");
error:
- kfree(hif_set_mc->mc_list);
+ kfree(set_mc->mc_list);
kfree(wid.val);
kfree(msg);
}
@@ -1479,9 +1078,6 @@ static void handle_scan_complete(struct work_struct *work)
handle_scan_done(msg->vif, SCAN_EVENT_DONE);
- if (msg->vif->hif_drv->remain_on_ch_pending)
- handle_remain_on_chan(msg->vif,
- &msg->vif->hif_drv->remain_on_ch);
kfree(msg);
}
@@ -1629,7 +1225,7 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
u8 mode, u8 cipher_mode, u8 index)
{
int result = 0;
- u8 t_key_len = ptk_key_len + RX_MIC_KEY_LEN + TX_MIC_KEY_LEN;
+ u8 t_key_len = ptk_key_len + WILC_RX_MIC_KEY_LEN + WILC_TX_MIC_KEY_LEN;
if (mode == WILC_AP_MODE) {
struct wid wid_list[2];
@@ -1651,11 +1247,11 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
if (rx_mic)
memcpy(&key_buf->key[ptk_key_len], rx_mic,
- RX_MIC_KEY_LEN);
+ WILC_RX_MIC_KEY_LEN);
if (tx_mic)
- memcpy(&key_buf->key[ptk_key_len + RX_MIC_KEY_LEN],
- tx_mic, TX_MIC_KEY_LEN);
+ memcpy(&key_buf->key[ptk_key_len + WILC_RX_MIC_KEY_LEN],
+ tx_mic, WILC_TX_MIC_KEY_LEN);
wid_list[1].id = WID_ADD_PTK;
wid_list[1].type = WID_STR;
@@ -1679,11 +1275,11 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
if (rx_mic)
memcpy(&key_buf->key[ptk_key_len], rx_mic,
- RX_MIC_KEY_LEN);
+ WILC_RX_MIC_KEY_LEN);
if (tx_mic)
- memcpy(&key_buf->key[ptk_key_len + RX_MIC_KEY_LEN],
- tx_mic, TX_MIC_KEY_LEN);
+ memcpy(&key_buf->key[ptk_key_len + WILC_RX_MIC_KEY_LEN],
+ tx_mic, WILC_TX_MIC_KEY_LEN);
wid.id = WID_ADD_PTK;
wid.type = WID_STR;
@@ -1704,7 +1300,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
{
int result = 0;
struct wilc_gtk_key *gtk_key;
- int t_key_len = gtk_key_len + RX_MIC_KEY_LEN + TX_MIC_KEY_LEN;
+ int t_key_len = gtk_key_len + WILC_RX_MIC_KEY_LEN + WILC_TX_MIC_KEY_LEN;
gtk_key = kzalloc(sizeof(*gtk_key) + t_key_len, GFP_KERNEL);
if (!gtk_key)
@@ -1722,11 +1318,11 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
memcpy(&gtk_key->key[0], rx_gtk, gtk_key_len);
if (rx_mic)
- memcpy(&gtk_key->key[gtk_key_len], rx_mic, RX_MIC_KEY_LEN);
+ memcpy(&gtk_key->key[gtk_key_len], rx_mic, WILC_RX_MIC_KEY_LEN);
if (tx_mic)
- memcpy(&gtk_key->key[gtk_key_len + RX_MIC_KEY_LEN],
- tx_mic, TX_MIC_KEY_LEN);
+ memcpy(&gtk_key->key[gtk_key_len + WILC_RX_MIC_KEY_LEN],
+ tx_mic, WILC_TX_MIC_KEY_LEN);
if (mode == WILC_AP_MODE) {
struct wid wid_list[2];
@@ -1744,7 +1340,6 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
ARRAY_SIZE(wid_list),
wilc_get_vif_idx(vif));
- kfree(gtk_key);
} else if (mode == WILC_STATION_MODE) {
struct wid wid;
@@ -1754,9 +1349,9 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
wid.val = (u8 *)gtk_key;
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
wilc_get_vif_idx(vif));
- kfree(gtk_key);
}
+ kfree(gtk_key);
return result;
}
@@ -1794,61 +1389,22 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
return result;
}
-int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
- size_t ssid_len, const u8 *ies, size_t ies_len,
- wilc_connect_result connect_result, void *user_arg,
- u8 security, enum authtype auth_type,
- u8 channel, void *join_params)
+int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
+ size_t ies_len)
{
int result;
struct host_if_drv *hif_drv = vif->hif_drv;
- struct user_conn_req *con_info = &hif_drv->usr_conn_req;
+ struct wilc_conn_info *conn_info = &hif_drv->conn_info;
- if (!hif_drv || !connect_result) {
- netdev_err(vif->ndev,
- "%s: hif driver or connect result is NULL",
- __func__);
- return -EFAULT;
- }
-
- if (!join_params) {
- netdev_err(vif->ndev, "%s: joinparams is NULL\n", __func__);
- return -EFAULT;
- }
-
- if (hif_drv->usr_scan_req.scan_result) {
- netdev_err(vif->ndev, "%s: Scan in progress\n", __func__);
- return -EBUSY;
- }
-
- con_info->security = security;
- con_info->auth_type = auth_type;
- con_info->ch = channel;
- con_info->conn_result = connect_result;
- con_info->arg = user_arg;
- con_info->param = join_params;
-
- if (bssid) {
- con_info->bssid = kmemdup(bssid, 6, GFP_KERNEL);
- if (!con_info->bssid)
- return -ENOMEM;
- }
-
- if (ssid) {
- con_info->ssid_len = ssid_len;
- con_info->ssid = kmemdup(ssid, ssid_len, GFP_KERNEL);
- if (!con_info->ssid) {
- result = -ENOMEM;
- goto free_bssid;
- }
- }
+ if (bssid)
+ ether_addr_copy(conn_info->bssid, bssid);
if (ies) {
- con_info->ies_len = ies_len;
- con_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
- if (!con_info->ies) {
+ conn_info->req_ies_len = ies_len;
+ conn_info->req_ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!conn_info->req_ies) {
result = -ENOMEM;
- goto free_ssid;
+ return result;
}
}
@@ -1858,18 +1414,12 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
hif_drv->connect_timer_vif = vif;
mod_timer(&hif_drv->connect_timer,
- jiffies + msecs_to_jiffies(HOST_IF_CONNECT_TIMEOUT));
+ jiffies + msecs_to_jiffies(WILC_HIF_CONNECT_TIMEOUT_MS));
return 0;
free_ies:
- kfree(con_info->ies);
-
-free_ssid:
- kfree(con_info->ssid);
-
-free_bssid:
- kfree(con_info->bssid);
+ kfree(conn_info->req_ies);
return result;
}
@@ -1900,6 +1450,9 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
int result;
struct wilc_drv_handler drv;
+ if (!hif_drv)
+ return -EFAULT;
+
wid.id = WID_SET_DRV_HANDLER;
wid.type = WID_STR;
wid.size = sizeof(drv);
@@ -1992,7 +1545,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
return result;
}
-int wilc_get_stats_async(struct wilc_vif *vif, struct rf_info *stats)
+static int wilc_get_stats_async(struct wilc_vif *vif, struct rf_info *stats)
{
int result;
struct host_if_msg *msg;
@@ -2091,7 +1644,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
vif->obtaining_ip = false;
if (wilc->clients_count == 0)
- mutex_init(&hif_deinit_lock);
+ mutex_init(&wilc->deinit_lock);
timer_setup(&vif->periodic_rssi, get_periodic_rssi, 0);
mod_timer(&vif->periodic_rssi, jiffies + msecs_to_jiffies(5000));
@@ -2119,9 +1672,7 @@ int wilc_deinit(struct wilc_vif *vif)
return -EFAULT;
}
- mutex_lock(&hif_deinit_lock);
-
- terminated_handle = hif_drv;
+ mutex_lock(&vif->wilc->deinit_lock);
del_timer_sync(&hif_drv->scan_timer);
del_timer_sync(&hif_drv->connect_timer);
@@ -2132,18 +1683,16 @@ int wilc_deinit(struct wilc_vif *vif)
if (hif_drv->usr_scan_req.scan_result) {
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL,
- hif_drv->usr_scan_req.arg,
- NULL);
+ hif_drv->usr_scan_req.arg);
hif_drv->usr_scan_req.scan_result = NULL;
}
hif_drv->hif_state = HOST_IF_IDLE;
kfree(hif_drv);
-
+ vif->hif_drv = NULL;
vif->wilc->clients_count--;
- terminated_handle = NULL;
- mutex_unlock(&hif_deinit_lock);
+ mutex_unlock(&vif->wilc->deinit_lock);
return result;
}
@@ -2155,16 +1704,13 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
- id = buffer[length - 4];
- id |= (buffer[length - 3] << 8);
- id |= (buffer[length - 2] << 16);
- id |= (buffer[length - 1] << 24);
+ id = get_unaligned_le32(&buffer[length - 4]);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
return;
hif_drv = vif->hif_drv;
- if (!hif_drv || hif_drv == terminated_handle) {
+ if (!hif_drv) {
netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
return;
}
@@ -2173,9 +1719,12 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
if (IS_ERR(msg))
return;
- msg->body.net_info.len = length;
- msg->body.net_info.buffer = kmemdup(buffer, length, GFP_KERNEL);
- if (!msg->body.net_info.buffer) {
+ msg->body.net_info.frame_len = get_unaligned_le16(&buffer[6]) - 1;
+ msg->body.net_info.rssi = buffer[8];
+ msg->body.net_info.mgmt = kmemdup(&buffer[9],
+ msg->body.net_info.frame_len,
+ GFP_KERNEL);
+ if (!msg->body.net_info.mgmt) {
kfree(msg);
return;
}
@@ -2183,7 +1732,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
result = wilc_enqueue_work(msg);
if (result) {
netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
- kfree(msg->body.net_info.buffer);
+ kfree(msg->body.net_info.mgmt);
kfree(msg);
}
}
@@ -2196,53 +1745,42 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
- mutex_lock(&hif_deinit_lock);
+ mutex_lock(&wilc->deinit_lock);
- id = buffer[length - 4];
- id |= (buffer[length - 3] << 8);
- id |= (buffer[length - 2] << 16);
- id |= (buffer[length - 1] << 24);
+ id = get_unaligned_le32(&buffer[length - 4]);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif) {
- mutex_unlock(&hif_deinit_lock);
+ mutex_unlock(&wilc->deinit_lock);
return;
}
hif_drv = vif->hif_drv;
- if (!hif_drv || hif_drv == terminated_handle) {
- mutex_unlock(&hif_deinit_lock);
+ if (!hif_drv) {
+ mutex_unlock(&wilc->deinit_lock);
return;
}
- if (!hif_drv->usr_conn_req.conn_result) {
+ if (!hif_drv->conn_info.conn_result) {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
- mutex_unlock(&hif_deinit_lock);
+ mutex_unlock(&wilc->deinit_lock);
return;
}
msg = wilc_alloc_work(vif, handle_rcvd_gnrl_async_info, false);
if (IS_ERR(msg)) {
- mutex_unlock(&hif_deinit_lock);
- return;
- }
-
- msg->body.async_info.len = length;
- msg->body.async_info.buffer = kmemdup(buffer, length, GFP_KERNEL);
- if (!msg->body.async_info.buffer) {
- kfree(msg);
- mutex_unlock(&hif_deinit_lock);
+ mutex_unlock(&wilc->deinit_lock);
return;
}
+ msg->body.mac_info.status = buffer[7];
result = wilc_enqueue_work(msg);
if (result) {
netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
- kfree(msg->body.async_info.buffer);
kfree(msg);
}
- mutex_unlock(&hif_deinit_lock);
+ mutex_unlock(&wilc->deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
@@ -2252,16 +1790,13 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
- id = buffer[length - 4];
- id |= buffer[length - 3] << 8;
- id |= buffer[length - 2] << 16;
- id |= buffer[length - 1] << 24;
+ id = get_unaligned_le32(&buffer[length - 4]);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
return;
hif_drv = vif->hif_drv;
- if (!hif_drv || hif_drv == terminated_handle)
+ if (!hif_drv)
return;
if (hif_drv->usr_scan_req.scan_result) {
@@ -2280,21 +1815,19 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
}
}
-int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
+int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie,
u32 duration, u16 chan,
- wilc_remain_on_chan_expired expired,
- wilc_remain_on_chan_ready ready,
+ void (*expired)(void *, u64),
void *user_arg)
{
- struct remain_ch roc;
+ struct wilc_remain_ch roc;
int result;
roc.ch = chan;
roc.expired = expired;
- roc.ready = ready;
roc.arg = user_arg;
roc.duration = duration;
- roc.id = session_id;
+ roc.cookie = cookie;
result = handle_remain_on_chan(vif, &roc);
if (result)
netdev_err(vif->ndev, "%s: failed to set remain on channel\n",
@@ -2303,7 +1836,7 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
return result;
}
-int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
+int wilc_listen_state_expired(struct wilc_vif *vif, u64 cookie)
{
int result;
struct host_if_msg *msg;
@@ -2320,7 +1853,7 @@ int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
if (IS_ERR(msg))
return PTR_ERR(msg);
- msg->body.remain_on_ch.id = session_id;
+ msg->body.remain_on_ch.cookie = cookie;
result = wilc_enqueue_work(msg);
if (result) {
@@ -2485,7 +2018,7 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
int result;
int i;
u8 assoc_sta = 0;
- struct del_all_sta del_sta;
+ struct wilc_del_all_sta del_sta;
memset(&del_sta, 0x0, sizeof(del_sta));
for (i = 0; i < WILC_MAX_NUM_STA; i++) {
@@ -2564,7 +2097,7 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
return result;
}
-int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled, u32 count,
+int wilc_setup_multicast_filter(struct wilc_vif *vif, u32 enabled, u32 count,
u8 *mc_list)
{
int result;
@@ -2574,9 +2107,9 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled, u32 count,
if (IS_ERR(msg))
return PTR_ERR(msg);
- msg->body.multicast_info.enabled = enabled;
- msg->body.multicast_info.cnt = count;
- msg->body.multicast_info.mc_list = mc_list;
+ msg->body.mc_info.enabled = enabled;
+ msg->body.mc_info.cnt = count;
+ msg->body.mc_info.mc_list = mc_list;
result = wilc_enqueue_work(msg);
if (result) {
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 9b396a79b144..678e62312215 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -18,19 +18,16 @@ enum {
};
#define WILC_MAX_NUM_STA 9
-#define MAX_NUM_SCANNED_NETWORKS 100
-#define MAX_NUM_SCANNED_NETWORKS_SHADOW 130
+#define WILC_MAX_NUM_SCANNED_CH 14
#define WILC_MAX_NUM_PROBED_SSID 10
-#define TX_MIC_KEY_LEN 8
-#define RX_MIC_KEY_LEN 8
+#define WILC_TX_MIC_KEY_LEN 8
+#define WILC_RX_MIC_KEY_LEN 8
#define WILC_MAX_NUM_PMKIDS 16
#define WILC_ADD_STA_LENGTH 40
#define WILC_NUM_CONCURRENT_IFC 2
-#define NUM_RSSI 5
-
enum {
WILC_SET_CFG = 0,
WILC_GET_CFG
@@ -38,48 +35,6 @@ enum {
#define WILC_MAX_ASSOC_RESP_FRAME_SIZE 256
-struct rssi_history_buffer {
- bool full;
- u8 index;
- s8 samples[NUM_RSSI];
-};
-
-struct network_info {
- s8 rssi;
- u16 cap_info;
- u8 ssid[MAX_SSID_LEN];
- u8 ssid_len;
- u8 bssid[6];
- u16 beacon_period;
- u8 dtim_period;
- u8 ch;
- unsigned long time_scan_cached;
- unsigned long time_scan;
- bool new_network;
- u8 found;
- u32 tsf_lo;
- u8 *ies;
- u16 ies_len;
- void *join_params;
- struct rssi_history_buffer rssi_history;
- u64 tsf;
-};
-
-struct connect_info {
- u8 bssid[6];
- u8 *req_ies;
- size_t req_ies_len;
- u8 *resp_ies;
- u16 resp_ies_len;
- u16 status;
-};
-
-struct disconnect_info {
- u16 reason;
- u8 *ie;
- size_t ie_len;
-};
-
struct assoc_resp {
__le16 capab_info;
__le16 status_code;
@@ -129,11 +84,6 @@ enum cfg_param {
WILC_CFG_PARAM_RTS_THRESHOLD = BIT(3)
};
-struct found_net_info {
- u8 bssid[6];
- s8 rssi;
-};
-
enum scan_event {
SCAN_EVENT_NETWORK_FOUND = 0,
SCAN_EVENT_DONE = 1,
@@ -147,72 +97,71 @@ enum conn_event {
CONN_DISCONN_EVENT_FORCE_32BIT = 0xFFFFFFFF
};
-typedef void (*wilc_scan_result)(enum scan_event, struct network_info *,
- void *, void *);
-
-typedef void (*wilc_connect_result)(enum conn_event,
- struct connect_info *,
- u8,
- struct disconnect_info *,
- void *);
+enum {
+ WILC_HIF_SDIO = 0,
+ WILC_HIF_SPI = BIT(0)
+};
-typedef void (*wilc_remain_on_chan_expired)(void *, u32);
-typedef void (*wilc_remain_on_chan_ready)(void *);
+enum {
+ WILC_MAC_STATUS_INIT = -1,
+ WILC_MAC_STATUS_DISCONNECTED = 0,
+ WILC_MAC_STATUS_CONNECTED = 1
+};
-struct rcvd_net_info {
- u8 *buffer;
- u32 len;
+struct wilc_rcvd_net_info {
+ s8 rssi;
+ u8 ch;
+ u16 frame_len;
+ struct ieee80211_mgmt *mgmt;
};
-struct hidden_net_info {
- u8 *ssid;
+struct wilc_probe_ssid_info {
u8 ssid_len;
+ u8 *ssid;
};
-struct hidden_network {
- struct hidden_net_info *net_info;
+struct wilc_probe_ssid {
+ struct wilc_probe_ssid_info *ssid_info;
u8 n_ssids;
+ u32 size;
};
-struct user_scan_req {
- wilc_scan_result scan_result;
+struct wilc_user_scan_req {
+ void (*scan_result)(enum scan_event evt,
+ struct wilc_rcvd_net_info *info, void *priv);
void *arg;
u32 ch_cnt;
- struct found_net_info net_info[MAX_NUM_SCANNED_NETWORKS];
};
-struct user_conn_req {
- u8 *bssid;
- u8 *ssid;
+struct wilc_conn_info {
+ u8 bssid[ETH_ALEN];
u8 security;
enum authtype auth_type;
- size_t ssid_len;
- u8 *ies;
- size_t ies_len;
- wilc_connect_result conn_result;
- bool ht_capable;
u8 ch;
+ u8 *req_ies;
+ size_t req_ies_len;
+ u8 *resp_ies;
+ u16 resp_ies_len;
+ u16 status;
+ void (*conn_result)(enum conn_event evt, u8 status, void *priv_data);
void *arg;
void *param;
};
-struct remain_ch {
+struct wilc_remain_ch {
u16 ch;
u32 duration;
- wilc_remain_on_chan_expired expired;
- wilc_remain_on_chan_ready ready;
+ void (*expired)(void *priv, u64 cookie);
void *arg;
- u32 id;
+ u32 cookie;
};
struct wilc;
struct host_if_drv {
- struct user_scan_req usr_scan_req;
- struct user_conn_req usr_conn_req;
- struct remain_ch remain_on_ch;
- u8 remain_on_ch_pending;
+ struct wilc_user_scan_req usr_scan_req;
+ struct wilc_conn_info conn_info;
+ struct wilc_remain_ch remain_on_ch;
u64 p2p_timeout;
- u8 p2p_connect;
enum host_if_state hif_state;
@@ -232,17 +181,6 @@ struct host_if_drv {
u8 assoc_resp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
};
-struct add_sta_param {
- u8 bssid[ETH_ALEN];
- u16 aid;
- u8 rates_len;
- const u8 *rates;
- bool ht_supported;
- struct ieee80211_ht_cap ht_capa;
- u16 flags_mask;
- u16 flags_set;
-};
-
struct wilc_vif;
int wilc_remove_wep_key(struct wilc_vif *vif, u8 index);
int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index);
@@ -261,18 +199,16 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 cipher_mode);
int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid);
int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr);
-int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
- size_t ssid_len, const u8 *ies, size_t ies_len,
- wilc_connect_result connect_result, void *user_arg,
- u8 security, enum authtype auth_type,
- u8 channel, void *join_params);
+int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
+ size_t ies_len);
int wilc_disconnect(struct wilc_vif *vif);
int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel);
int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level);
int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
- u8 *ch_freq_list, u8 ch_list_len, const u8 *ies,
- size_t ies_len, wilc_scan_result scan_result, void *user_arg,
- struct hidden_network *hidden_network);
+ u8 *ch_freq_list, u8 ch_list_len, const u8 *ies, size_t ies_len,
+ void (*scan_result_fn)(enum scan_event,
+ struct wilc_rcvd_net_info *, void *),
+ void *user_arg, struct wilc_probe_ssid *search);
int wilc_hif_set_cfg(struct wilc_vif *vif,
struct cfg_param_attr *cfg_param);
int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler);
@@ -287,14 +223,13 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr);
int wilc_edit_station(struct wilc_vif *vif, const u8 *mac,
struct station_parameters *params);
int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout);
-int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled, u32 count,
+int wilc_setup_multicast_filter(struct wilc_vif *vif, u32 enabled, u32 count,
u8 *mc_list);
-int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
+int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie,
u32 duration, u16 chan,
- wilc_remain_on_chan_expired expired,
- wilc_remain_on_chan_ready ready,
+ void (*expired)(void *, u64),
void *user_arg);
-int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id);
+int wilc_listen_state_expired(struct wilc_vif *vif, u64 cookie);
void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg);
int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
u8 ifc_id);
@@ -307,4 +242,6 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
+void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ struct cfg80211_crypto_settings *crypto);
#endif
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/wilc_mon.c
index a63446818eac..9fe19a3e1dd4 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/wilc_mon.c
@@ -18,28 +18,20 @@ struct wilc_wfi_radiotap_cb_hdr {
u16 tx_flags;
} __packed;
-static struct net_device *wilc_wfi_mon; /* global monitor netdev */
-
-static u8 srcadd[6];
-static u8 bssid[6];
-
-#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */
-#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive*/
-
#define TX_RADIOTAP_PRESENT ((1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_TX_FLAGS))
-void wilc_wfi_monitor_rx(u8 *buff, u32 size)
+void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size)
{
u32 header, pkt_offset;
struct sk_buff *skb = NULL;
struct wilc_wfi_radiotap_hdr *hdr;
struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
- if (!wilc_wfi_mon)
+ if (!mon_dev)
return;
- if (!netif_running(wilc_wfi_mon))
+ if (!netif_running(mon_dev))
return;
/* Get WILC header */
@@ -94,7 +86,7 @@ void wilc_wfi_monitor_rx(u8 *buff, u32 size)
hdr->rate = 5;
}
- skb->dev = wilc_wfi_mon;
+ skb->dev = mon_dev;
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
@@ -155,13 +147,13 @@ static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
struct wilc_wfi_mon_priv *mon_priv;
struct sk_buff *skb2;
struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
+ u8 srcadd[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
- if (!wilc_wfi_mon)
- return -EFAULT;
-
- mon_priv = netdev_priv(wilc_wfi_mon);
+ mon_priv = netdev_priv(dev);
if (!mon_priv)
return -EFAULT;
+
rtap_len = ieee80211_get_radiotap_len(skb->data);
if (skb->len < rtap_len)
return -1;
@@ -187,7 +179,7 @@ static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
cb_hdr->rate = 5;
cb_hdr->tx_flags = 0x0004;
- skb2->dev = wilc_wfi_mon;
+ skb2->dev = dev;
skb_reset_mac_header(skb2);
skb2->ip_summed = CHECKSUM_UNNECESSARY;
skb2->pkt_type = PACKET_OTHERHOST;
@@ -200,8 +192,8 @@ static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb,
}
skb->dev = mon_priv->real_ndev;
- memcpy(srcadd, &skb->data[10], 6);
- memcpy(bssid, &skb->data[16], 6);
+ ether_addr_copy(srcadd, &skb->data[10]);
+ ether_addr_copy(bssid, &skb->data[16]);
/*
* Identify if data or mgmt packet, if source address and bssid
* fields are equal send it to mgmt frames handler
@@ -223,51 +215,44 @@ static const struct net_device_ops wilc_wfi_netdev_ops = {
};
-struct net_device *wilc_wfi_init_mon_interface(const char *name,
+struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
+ const char *name,
struct net_device *real_dev)
{
struct wilc_wfi_mon_priv *priv;
/*If monitor interface is already initialized, return it*/
- if (wilc_wfi_mon)
- return wilc_wfi_mon;
+ if (wl->monitor_dev)
+ return wl->monitor_dev;
- wilc_wfi_mon = alloc_etherdev(sizeof(struct wilc_wfi_mon_priv));
- if (!wilc_wfi_mon)
+ wl->monitor_dev = alloc_etherdev(sizeof(struct wilc_wfi_mon_priv));
+ if (!wl->monitor_dev)
return NULL;
- wilc_wfi_mon->type = ARPHRD_IEEE80211_RADIOTAP;
- strncpy(wilc_wfi_mon->name, name, IFNAMSIZ);
- wilc_wfi_mon->name[IFNAMSIZ - 1] = 0;
- wilc_wfi_mon->netdev_ops = &wilc_wfi_netdev_ops;
- if (register_netdevice(wilc_wfi_mon)) {
+ wl->monitor_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ strncpy(wl->monitor_dev->name, name, IFNAMSIZ);
+ wl->monitor_dev->name[IFNAMSIZ - 1] = 0;
+ wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
+
+ if (register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
return NULL;
}
- priv = netdev_priv(wilc_wfi_mon);
+ priv = netdev_priv(wl->monitor_dev);
if (!priv)
return NULL;
priv->real_ndev = real_dev;
- return wilc_wfi_mon;
+ return wl->monitor_dev;
}
-void wilc_wfi_deinit_mon_interface(void)
+void wilc_wfi_deinit_mon_interface(struct wilc *wl)
{
- bool rollback_lock = false;
-
- if (wilc_wfi_mon) {
- if (rtnl_is_locked()) {
- rtnl_unlock();
- rollback_lock = true;
- }
- unregister_netdev(wilc_wfi_mon);
+ if (!wl->monitor_dev)
+ return;
- if (rollback_lock) {
- rtnl_lock();
- rollback_lock = false;
- }
- wilc_wfi_mon = NULL;
- }
+ unregister_netdev(wl->monitor_dev);
+ free_netdev(wl->monitor_dev);
+ wl->monitor_dev = NULL;
}
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/wilc_netdev.c
index 721689048648..1787154ee088 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/wilc_netdev.c
@@ -12,86 +12,7 @@
#include "wilc_wfi_cfgoperations.h"
-static int dev_state_ev_handler(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct in_ifaddr *dev_iface = ptr;
- struct wilc_priv *priv;
- struct host_if_drv *hif_drv;
- struct net_device *dev;
- u8 *ip_addr_buf;
- struct wilc_vif *vif;
- u8 null_ip[4] = {0};
- char wlan_dev_name[5] = "wlan0";
-
- if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev)
- return NOTIFY_DONE;
-
- if (memcmp(dev_iface->ifa_label, "wlan0", 5) &&
- memcmp(dev_iface->ifa_label, "p2p0", 4))
- return NOTIFY_DONE;
-
- dev = (struct net_device *)dev_iface->ifa_dev->dev;
- if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
- return NOTIFY_DONE;
-
- priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
- if (!priv)
- return NOTIFY_DONE;
-
- hif_drv = (struct host_if_drv *)priv->hif_drv;
- vif = netdev_priv(dev);
- if (!vif || !hif_drv)
- return NOTIFY_DONE;
-
- switch (event) {
- case NETDEV_UP:
- if (vif->iftype == WILC_STATION_MODE ||
- vif->iftype == WILC_CLIENT_MODE) {
- hif_drv->ifc_up = 1;
- vif->obtaining_ip = false;
- del_timer(&vif->during_ip_timer);
- }
-
- if (vif->wilc->enable_ps)
- wilc_set_power_mgmt(vif, 1, 0);
-
- netdev_dbg(dev, "[%s] Up IP\n", dev_iface->ifa_label);
-
- ip_addr_buf = (char *)&dev_iface->ifa_address;
- netdev_dbg(dev, "IP add=%d:%d:%d:%d\n",
- ip_addr_buf[0], ip_addr_buf[1],
- ip_addr_buf[2], ip_addr_buf[3]);
-
- break;
-
- case NETDEV_DOWN:
- if (vif->iftype == WILC_STATION_MODE ||
- vif->iftype == WILC_CLIENT_MODE) {
- hif_drv->ifc_up = 0;
- vif->obtaining_ip = false;
- }
-
- if (memcmp(dev_iface->ifa_label, wlan_dev_name, 5) == 0)
- wilc_set_power_mgmt(vif, 0, 0);
-
- wilc_resolve_disconnect_aberration(vif);
-
- netdev_dbg(dev, "[%s] Down IP\n", dev_iface->ifa_label);
-
- ip_addr_buf = null_ip;
- netdev_dbg(dev, "IP add=%d:%d:%d:%d\n",
- ip_addr_buf[0], ip_addr_buf[1],
- ip_addr_buf[2], ip_addr_buf[3]);
-
- break;
-
- default:
- break;
- }
-
- return NOTIFY_DONE;
-}
+#define WILC_MULTICAST_TABLE_SIZE 8
static irqreturn_t isr_uh_routine(int irq, void *user_data)
{
@@ -198,7 +119,11 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
{
struct wilc_vif *vif = netdev_priv(wilc_netdev);
- memcpy(vif->bssid, bssid, 6);
+ if (bssid)
+ ether_addr_copy(vif->bssid, bssid);
+ else
+ eth_zero_addr(vif->bssid);
+
vif->mode = mode;
}
@@ -214,7 +139,7 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
-static int linux_wlan_txq_task(void *vp)
+static int wilc_txq_task(void *vp)
{
int ret;
u32 txq_count;
@@ -236,9 +161,11 @@ static int linux_wlan_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(dev, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- if (netif_queue_stopped(wl->vif[0]->ndev))
+ if (wl->vif[0]->mac_opened &&
+ netif_queue_stopped(wl->vif[0]->ndev))
netif_wake_queue(wl->vif[0]->ndev);
- if (netif_queue_stopped(wl->vif[1]->ndev))
+ if (wl->vif[1]->mac_opened &&
+ netif_queue_stopped(wl->vif[1]->ndev))
netif_wake_queue(wl->vif[1]->ndev);
}
} while (ret == -ENOBUFS && !wl->close);
@@ -275,7 +202,7 @@ fail:
return ret;
}
-static int linux_wlan_start_firmware(struct net_device *dev)
+static int wilc_start_firmware(struct net_device *dev)
{
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
@@ -316,204 +243,169 @@ static int wilc1000_firmware_download(struct net_device *dev)
return 0;
}
-static int linux_wlan_init_test_config(struct net_device *dev,
- struct wilc_vif *vif)
+static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
{
- unsigned char c_val[64];
- struct wilc *wilc = vif->wilc;
struct wilc_priv *priv;
struct host_if_drv *hif_drv;
+ u8 b;
+ u16 hw;
+ u32 w;
netdev_dbg(dev, "Start configuring Firmware\n");
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
hif_drv = (struct host_if_drv *)priv->hif_drv;
netdev_dbg(dev, "Host = %p\n", hif_drv);
- wilc_get_chipid(wilc, false);
-
- *(int *)c_val = 1;
-
- if (!wilc_wlan_cfg_set(vif, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0))
- goto fail;
-
- c_val[0] = 0;
- if (!wilc_wlan_cfg_set(vif, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0))
- goto fail;
- c_val[0] = WILC_FW_BSS_TYPE_INFRA;
- if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, c_val, 1, 0, 0))
- goto fail;
-
- c_val[0] = WILC_FW_TX_RATE_AUTO;
- if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
- goto fail;
-
- c_val[0] = WILC_FW_OPER_MODE_G_MIXED_11B_2;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, c_val, 1, 0,
- 0))
- goto fail;
-
- c_val[0] = 1;
- if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0))
- goto fail;
-
- c_val[0] = WILC_FW_PREAMBLE_SHORT;
- if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, c_val, 1, 0, 0))
+ w = vif->iftype;
+ cpu_to_le32s(&w);
+ if (!wilc_wlan_cfg_set(vif, 1, WID_SET_OPERATION_MODE, (u8 *)&w, 4,
+ 0, 0))
goto fail;
- c_val[0] = WILC_FW_11N_PROT_AUTO;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0))
+ b = WILC_FW_BSS_TYPE_INFRA;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, &b, 1, 0, 0))
goto fail;
- c_val[0] = WILC_FW_ACTIVE_SCAN;
- if (!wilc_wlan_cfg_set(vif, 0, WID_SCAN_TYPE, c_val, 1, 0, 0))
+ b = WILC_FW_TX_RATE_AUTO;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, &b, 1, 0, 0))
goto fail;
- c_val[0] = WILC_FW_SITE_SURVEY_OFF;
- if (!wilc_wlan_cfg_set(vif, 0, WID_SITE_SURVEY, c_val, 1, 0, 0))
+ b = WILC_FW_OPER_MODE_G_MIXED_11B_2;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, &b, 1, 0, 0))
goto fail;
- *((int *)c_val) = 0xffff;
- if (!wilc_wlan_cfg_set(vif, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0))
+ b = WILC_FW_PREAMBLE_SHORT;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, &b, 1, 0, 0))
goto fail;
- *((int *)c_val) = 2346;
- if (!wilc_wlan_cfg_set(vif, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0))
+ b = WILC_FW_11N_PROT_AUTO;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_PROT_MECH, &b, 1, 0, 0))
goto fail;
- c_val[0] = 0;
- if (!wilc_wlan_cfg_set(vif, 0, WID_BCAST_SSID, c_val, 1, 0, 0))
+ b = WILC_FW_ACTIVE_SCAN;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_SCAN_TYPE, &b, 1, 0, 0))
goto fail;
- c_val[0] = 1;
- if (!wilc_wlan_cfg_set(vif, 0, WID_QOS_ENABLE, c_val, 1, 0, 0))
+ b = WILC_FW_SITE_SURVEY_OFF;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_SITE_SURVEY, &b, 1, 0, 0))
goto fail;
- c_val[0] = WILC_FW_NO_POWERSAVE;
- if (!wilc_wlan_cfg_set(vif, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0))
+ hw = 0xffff;
+ cpu_to_le16s(&hw);
+ if (!wilc_wlan_cfg_set(vif, 0, WID_RTS_THRESHOLD, (u8 *)&hw, 2, 0, 0))
goto fail;
- c_val[0] = WILC_FW_SEC_NO;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11I_MODE, c_val, 1, 0, 0))
+ hw = 2346;
+ cpu_to_le16s(&hw);
+ if (!wilc_wlan_cfg_set(vif, 0, WID_FRAG_THRESHOLD, (u8 *)&hw, 2, 0, 0))
goto fail;
- c_val[0] = WILC_FW_AUTH_OPEN_SYSTEM;
- if (!wilc_wlan_cfg_set(vif, 0, WID_AUTH_TYPE, c_val, 1, 0, 0))
+ b = 0;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_BCAST_SSID, &b, 1, 0, 0))
goto fail;
- strcpy(c_val, "123456790abcdef1234567890");
- if (!wilc_wlan_cfg_set(vif, 0, WID_WEP_KEY_VALUE, c_val,
- (strlen(c_val) + 1), 0, 0))
+ b = 1;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_QOS_ENABLE, &b, 1, 0, 0))
goto fail;
- strcpy(c_val, "12345678");
- if (!wilc_wlan_cfg_set(vif, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0,
- 0))
+ b = WILC_FW_NO_POWERSAVE;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_POWER_MANAGEMENT, &b, 1, 0, 0))
goto fail;
- strcpy(c_val, "password");
- if (!wilc_wlan_cfg_set(vif, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1),
- 0, 0))
+ b = WILC_FW_SEC_NO;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11I_MODE, &b, 1, 0, 0))
goto fail;
- c_val[0] = 192;
- c_val[1] = 168;
- c_val[2] = 1;
- c_val[3] = 112;
- if (!wilc_wlan_cfg_set(vif, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0))
+ b = WILC_FW_AUTH_OPEN_SYSTEM;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_AUTH_TYPE, &b, 1, 0, 0))
goto fail;
- c_val[0] = 3;
- if (!wilc_wlan_cfg_set(vif, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0))
+ b = 3;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_LISTEN_INTERVAL, &b, 1, 0, 0))
goto fail;
- c_val[0] = 3;
- if (!wilc_wlan_cfg_set(vif, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0))
+ b = 3;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_DTIM_PERIOD, &b, 1, 0, 0))
goto fail;
- c_val[0] = WILC_FW_ACK_POLICY_NORMAL;
- if (!wilc_wlan_cfg_set(vif, 0, WID_ACK_POLICY, c_val, 1, 0, 0))
+ b = WILC_FW_ACK_POLICY_NORMAL;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_ACK_POLICY, &b, 1, 0, 0))
goto fail;
- c_val[0] = 0;
- if (!wilc_wlan_cfg_set(vif, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1,
+ b = 0;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_USER_CONTROL_ON_TX_POWER, &b, 1,
0, 0))
goto fail;
- c_val[0] = 48;
- if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0,
- 0))
+ b = 48;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11A, &b, 1, 0, 0))
goto fail;
- c_val[0] = 28;
- if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0,
- 0))
+ b = 28;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11B, &b, 1, 0, 0))
goto fail;
- *((int *)c_val) = 100;
- if (!wilc_wlan_cfg_set(vif, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0))
+ hw = 100;
+ cpu_to_le16s(&hw);
+ if (!wilc_wlan_cfg_set(vif, 0, WID_BEACON_INTERVAL, (u8 *)&hw, 2, 0, 0))
goto fail;
- c_val[0] = WILC_FW_REKEY_POLICY_DISABLE;
- if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_POLICY, c_val, 1, 0, 0))
+ b = WILC_FW_REKEY_POLICY_DISABLE;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_POLICY, &b, 1, 0, 0))
goto fail;
- *((int *)c_val) = 84600;
- if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0))
+ w = 84600;
+ cpu_to_le32s(&w);
+ if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PERIOD, (u8 *)&w, 4, 0, 0))
goto fail;
- *((int *)c_val) = 500;
- if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0,
+ w = 500;
+ cpu_to_le32s(&w);
+ if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PACKET_COUNT, (u8 *)&w, 4, 0,
0))
goto fail;
- c_val[0] = 1;
- if (!wilc_wlan_cfg_set(vif, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0,
+ b = 1;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_SHORT_SLOT_ALLOWED, &b, 1, 0,
0))
goto fail;
- c_val[0] = WILC_FW_ERP_PROT_SELF_CTS;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0))
+ b = WILC_FW_ERP_PROT_SELF_CTS;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ERP_PROT_TYPE, &b, 1, 0, 0))
goto fail;
- c_val[0] = 1;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ENABLE, c_val, 1, 0, 0))
+ b = 1;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ENABLE, &b, 1, 0, 0))
goto fail;
- c_val[0] = WILC_FW_11N_OP_MODE_HT_MIXED;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OPERATING_MODE, c_val, 1, 0,
- 0))
+ b = WILC_FW_11N_OP_MODE_HT_MIXED;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OPERATING_MODE, &b, 1, 0, 0))
goto fail;
- c_val[0] = 1;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0,
- 0))
+ b = 1;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_TXOP_PROT_DISABLE, &b, 1, 0, 0))
goto fail;
- c_val[0] = WILC_FW_OBBS_NONHT_DETECT_PROTECT_REPORT;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
+ b = WILC_FW_OBBS_NONHT_DETECT_PROTECT_REPORT;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, &b, 1,
0, 0))
goto fail;
- c_val[0] = WILC_FW_HT_PROT_RTS_CTS_NONHT;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0))
+ b = WILC_FW_HT_PROT_RTS_CTS_NONHT;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_HT_PROT_TYPE, &b, 1, 0, 0))
goto fail;
- c_val[0] = 0;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0,
+ b = 0;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_RIFS_PROT_ENABLE, &b, 1, 0,
0))
goto fail;
- c_val[0] = WILC_FW_SMPS_MODE_MIMO;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0))
- goto fail;
-
- c_val[0] = 7;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0,
- 0))
+ b = 7;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_CURRENT_TX_MCS, &b, 1, 0, 0))
goto fail;
- c_val[0] = 1;
- if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1,
+ b = 1;
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, &b, 1,
1, 1))
goto fail;
@@ -609,7 +501,7 @@ static int wlan_initialize_threads(struct net_device *dev)
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
- wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
+ wilc->txq_thread = kthread_run(wilc_txq_task, (void *)dev,
"K_TXQ_TASK");
if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
@@ -667,7 +559,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
goto fail_irq_enable;
}
- ret = linux_wlan_start_firmware(dev);
+ ret = wilc_start_firmware(dev);
if (ret < 0) {
ret = -EIO;
goto fail_irq_enable;
@@ -683,7 +575,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
firmware_ver[size] = '\0';
netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver);
}
- ret = linux_wlan_init_test_config(dev, vif);
+ ret = wilc_init_fw_config(dev, vif);
if (ret < 0) {
netdev_err(dev, "Failed to configure firmware\n");
@@ -807,12 +699,12 @@ static void wilc_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI ||
dev->mc.count > WILC_MULTICAST_TABLE_SIZE) {
- wilc_setup_multicast_filter(vif, false, 0, NULL);
+ wilc_setup_multicast_filter(vif, 0, 0, NULL);
return;
}
if (dev->mc.count == 0) {
- wilc_setup_multicast_filter(vif, true, 0, NULL);
+ wilc_setup_multicast_filter(vif, 1, 0, NULL);
return;
}
@@ -829,11 +721,11 @@ static void wilc_set_multicast_list(struct net_device *dev)
cur_mc += ETH_ALEN;
}
- if (wilc_setup_multicast_filter(vif, true, dev->mc.count, mc_list))
+ if (wilc_setup_multicast_filter(vif, 1, dev->mc.count, mc_list))
kfree(mc_list);
}
-static void linux_wlan_tx_complete(void *priv, int status)
+static void wilc_tx_complete(void *priv, int status)
{
struct tx_complete_data *pv_data = priv;
@@ -847,9 +739,6 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wilc *wilc = vif->wilc;
struct tx_complete_data *tx_data = NULL;
int queue_count;
- char *udp_buf;
- struct iphdr *ih;
- struct ethhdr *eth_h;
if (skb->dev != ndev) {
netdev_err(ndev, "Packet not destined to this device\n");
@@ -867,28 +756,18 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_data->size = skb->len;
tx_data->skb = skb;
- eth_h = (struct ethhdr *)(skb->data);
- if (eth_h->h_proto == cpu_to_be16(0x8e88))
- netdev_dbg(ndev, "EAPOL transmitted\n");
-
- ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
-
- udp_buf = (char *)ih + sizeof(struct iphdr);
- if ((udp_buf[1] == 68 && udp_buf[3] == 67) ||
- (udp_buf[1] == 67 && udp_buf[3] == 68))
- netdev_dbg(ndev, "DHCP Message transmitted, type:%x %x %x\n",
- udp_buf[248], udp_buf[249], udp_buf[250]);
-
vif->netstats.tx_packets++;
vif->netstats.tx_bytes += tx_data->size;
tx_data->bssid = wilc->vif[vif->idx]->bssid;
queue_count = wilc_wlan_txq_add_net_pkt(ndev, (void *)tx_data,
tx_data->buff, tx_data->size,
- linux_wlan_tx_complete);
+ wilc_tx_complete);
if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
- netif_stop_queue(wilc->vif[0]->ndev);
- netif_stop_queue(wilc->vif[1]->ndev);
+ if (wilc->vif[0]->mac_opened)
+ netif_stop_queue(wilc->vif[0]->ndev);
+ if (wilc->vif[1]->mac_opened)
+ netif_stop_queue(wilc->vif[1]->ndev);
}
return 0;
@@ -916,7 +795,6 @@ static int wilc_mac_close(struct net_device *ndev)
netdev_dbg(ndev, "Deinitializing wilc1000\n");
wl->close = 1;
wilc_wlan_deinitialize(ndev);
- wilc_wfi_deinit_mon_interface();
}
vif->mac_opened = 0;
@@ -924,7 +802,8 @@ static int wilc_mac_close(struct net_device *ndev)
return 0;
}
-void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset)
+void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
+ u32 pkt_offset)
{
unsigned int frame_len = 0;
int stats;
@@ -972,7 +851,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
for (i = 0; i < wilc->vif_num; i++) {
vif = netdev_priv(wilc->vif[i]->ndev);
if (vif->monitor_flag) {
- wilc_wfi_monitor_rx(buff, size);
+ wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
return;
}
}
@@ -983,6 +862,76 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
wilc_wfi_p2p_rx(wilc->vif[1]->ndev, buff, size);
}
+static const struct net_device_ops wilc_netdev_ops = {
+ .ndo_init = mac_init_fn,
+ .ndo_open = wilc_mac_open,
+ .ndo_stop = wilc_mac_close,
+ .ndo_start_xmit = wilc_mac_xmit,
+ .ndo_get_stats = mac_stats,
+ .ndo_set_rx_mode = wilc_set_multicast_list,
+};
+
+static int dev_state_ev_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *dev_iface = ptr;
+ struct wilc_priv *priv;
+ struct host_if_drv *hif_drv;
+ struct net_device *dev;
+ struct wilc_vif *vif;
+
+ if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev)
+ return NOTIFY_DONE;
+
+ dev = (struct net_device *)dev_iface->ifa_dev->dev;
+ if (dev->netdev_ops != &wilc_netdev_ops)
+ return NOTIFY_DONE;
+
+ if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
+ return NOTIFY_DONE;
+
+ priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
+ if (!priv)
+ return NOTIFY_DONE;
+
+ hif_drv = (struct host_if_drv *)priv->hif_drv;
+ vif = netdev_priv(dev);
+ if (!vif || !hif_drv)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (vif->iftype == WILC_STATION_MODE ||
+ vif->iftype == WILC_CLIENT_MODE) {
+ hif_drv->ifc_up = 1;
+ vif->obtaining_ip = false;
+ del_timer(&vif->during_ip_timer);
+ }
+
+ if (vif->wilc->enable_ps)
+ wilc_set_power_mgmt(vif, 1, 0);
+
+ break;
+
+ case NETDEV_DOWN:
+ if (vif->iftype == WILC_STATION_MODE ||
+ vif->iftype == WILC_CLIENT_MODE) {
+ hif_drv->ifc_up = 0;
+ vif->obtaining_ip = false;
+ wilc_set_power_mgmt(vif, 0, 0);
+ }
+
+ wilc_resolve_disconnect_aberration(vif);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static struct notifier_block g_dev_notifier = {
.notifier_call = dev_state_ev_handler
};
@@ -1002,19 +951,15 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- if (wilc->vif[0]->ndev || wilc->vif[1]->ndev) {
- for (i = 0; i < WILC_NUM_CONCURRENT_IFC; i++)
- if (wilc->vif[i]->ndev)
- if (wilc->vif[i]->mac_opened)
- wilc_mac_close(wilc->vif[i]->ndev);
-
- for (i = 0; i < WILC_NUM_CONCURRENT_IFC; i++) {
+ for (i = 0; i < WILC_NUM_CONCURRENT_IFC; i++) {
+ if (wilc->vif[i] && wilc->vif[i]->ndev) {
unregister_netdev(wilc->vif[i]->ndev);
wilc_free_wiphy(wilc->vif[i]->ndev);
free_netdev(wilc->vif[i]->ndev);
}
}
+ wilc_wfi_deinit_mon_interface(wilc);
flush_workqueue(wilc->hif_workqueue);
destroy_workqueue(wilc->hif_workqueue);
wilc_wlan_cfg_deinit(wilc);
@@ -1023,15 +968,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
}
EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
-static const struct net_device_ops wilc_netdev_ops = {
- .ndo_init = mac_init_fn,
- .ndo_open = wilc_mac_open,
- .ndo_stop = wilc_mac_close,
- .ndo_start_xmit = wilc_mac_xmit,
- .ndo_get_stats = mac_stats,
- .ndo_set_rx_mode = wilc_set_multicast_list,
-};
-
int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
const struct wilc_hif_func *ops)
{
@@ -1086,8 +1022,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
vif->wilc = *wilc;
vif->ndev = ndev;
wl->vif[i] = vif;
- wl->vif_num = i;
- vif->idx = wl->vif_num;
+ wl->vif_num = i + 1;
+ vif->idx = i;
ndev->netdev_ops = &wilc_netdev_ops;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index e2f739fef21c..b789c57d7e80 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -51,10 +51,6 @@ struct sdio_cmd53 {
static const struct wilc_hif_func wilc_hif_sdio;
-static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data);
-static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data);
-static int sdio_init(struct wilc *wilc, bool resume);
-
static void wilc_sdio_interrupt(struct sdio_func *func)
{
sdio_release_host(func);
@@ -121,8 +117,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
return ret;
}
-static int linux_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
+static int wilc_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
struct wilc *wilc;
int ret;
@@ -160,7 +156,7 @@ static int linux_sdio_probe(struct sdio_func *func,
return 0;
}
-static void linux_sdio_remove(struct sdio_func *func)
+static void wilc_sdio_remove(struct sdio_func *func)
{
struct wilc *wilc = sdio_get_drvdata(func);
@@ -170,7 +166,7 @@ static void linux_sdio_remove(struct sdio_func *func)
wilc_netdev_cleanup(wilc);
}
-static int sdio_reset(struct wilc *wilc)
+static int wilc_sdio_reset(struct wilc *wilc)
{
struct sdio_cmd52 cmd;
int ret;
@@ -205,7 +201,7 @@ static int wilc_sdio_suspend(struct device *dev)
chip_allow_sleep(wilc);
}
- ret = sdio_reset(wilc);
+ ret = wilc_sdio_reset(wilc);
if (ret) {
dev_err(&func->dev, "Fail reset sdio\n");
return ret;
@@ -215,50 +211,6 @@ static int wilc_sdio_suspend(struct device *dev)
return 0;
}
-static int wilc_sdio_resume(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct wilc *wilc = sdio_get_drvdata(func);
-
- dev_info(dev, "sdio resume\n");
- sdio_release_host(func);
- chip_wakeup(wilc);
- sdio_init(wilc, true);
-
- if (wilc->suspend_event)
- host_wakeup_notify(wilc);
-
- chip_allow_sleep(wilc);
-
- return 0;
-}
-
-static const struct of_device_id wilc_of_match[] = {
- { .compatible = "microchip,wilc1000-sdio", },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, wilc_of_match);
-
-static const struct dev_pm_ops wilc_sdio_pm_ops = {
- .suspend = wilc_sdio_suspend,
- .resume = wilc_sdio_resume,
-};
-
-static struct sdio_driver wilc_sdio_driver = {
- .name = SDIO_MODALIAS,
- .id_table = wilc_sdio_ids,
- .probe = linux_sdio_probe,
- .remove = linux_sdio_remove,
- .drv = {
- .pm = &wilc_sdio_pm_ops,
- .of_match_table = wilc_of_match,
- }
-};
-module_driver(wilc_sdio_driver,
- sdio_register_driver,
- sdio_unregister_driver);
-MODULE_LICENSE("GPL");
-
static int wilc_sdio_enable_interrupt(struct wilc *dev)
{
struct sdio_func *func = container_of(dev->dev, struct sdio_func, dev);
@@ -293,7 +245,7 @@ static void wilc_sdio_disable_interrupt(struct wilc *dev)
*
********************************************/
-static int sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
+static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct sdio_cmd52 cmd;
@@ -334,7 +286,7 @@ fail:
return 0;
}
-static int sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
+static int wilc_sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct sdio_cmd52 cmd;
@@ -370,7 +322,7 @@ fail:
*
********************************************/
-static int sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
+static int wilc_sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct sdio_cmd52 cmd;
@@ -404,7 +356,7 @@ fail:
* Sdio interfaces
*
********************************************/
-static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
+static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
@@ -432,7 +384,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
/**
* set the AHB address
**/
- if (!sdio_set_func0_csa_address(wilc, addr))
+ if (!wilc_sdio_set_func0_csa_address(wilc, addr))
goto fail;
cmd.read_write = 1;
@@ -458,7 +410,7 @@ fail:
return 0;
}
-static int sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
+static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
@@ -507,7 +459,7 @@ static int sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.buffer = buf;
cmd.block_size = block_size;
if (addr > 0) {
- if (!sdio_set_func0_csa_address(wilc, addr))
+ if (!wilc_sdio_set_func0_csa_address(wilc, addr))
goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -530,7 +482,7 @@ static int sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
- if (!sdio_set_func0_csa_address(wilc, addr))
+ if (!wilc_sdio_set_func0_csa_address(wilc, addr))
goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -548,7 +500,7 @@ fail:
return 0;
}
-static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
+static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
@@ -571,7 +523,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
} else {
struct sdio_cmd53 cmd;
- if (!sdio_set_func0_csa_address(wilc, addr))
+ if (!wilc_sdio_set_func0_csa_address(wilc, addr))
goto fail;
cmd.read_write = 0;
@@ -600,7 +552,7 @@ fail:
return 0;
}
-static int sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
+static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
@@ -649,7 +601,7 @@ static int sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.buffer = buf;
cmd.block_size = block_size;
if (addr > 0) {
- if (!sdio_set_func0_csa_address(wilc, addr))
+ if (!wilc_sdio_set_func0_csa_address(wilc, addr))
goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -672,7 +624,7 @@ static int sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
cmd.block_size = block_size;
if (addr > 0) {
- if (!sdio_set_func0_csa_address(wilc, addr))
+ if (!wilc_sdio_set_func0_csa_address(wilc, addr))
goto fail;
}
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -696,12 +648,12 @@ fail:
*
********************************************/
-static int sdio_deinit(struct wilc *wilc)
+static int wilc_sdio_deinit(struct wilc *wilc)
{
return 1;
}
-static int sdio_init(struct wilc *wilc, bool resume)
+static int wilc_sdio_init(struct wilc *wilc, bool resume)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
@@ -729,7 +681,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
/**
* function 0 block size
**/
- if (!sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+ if (!wilc_sdio_set_func0_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n");
goto fail;
}
@@ -778,7 +730,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
/**
* func 1 is ready, set func 1 block size
**/
- if (!sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
+ if (!wilc_sdio_set_func1_block_size(wilc, WILC_SDIO_BLOCK_SIZE)) {
dev_err(&func->dev, "Fail set func 1 block size...\n");
goto fail;
}
@@ -801,7 +753,7 @@ static int sdio_init(struct wilc *wilc, bool resume)
* make sure can read back chip id correctly
**/
if (!resume) {
- if (!sdio_read_reg(wilc, 0x1000, &chipid)) {
+ if (!wilc_sdio_read_reg(wilc, 0x1000, &chipid)) {
dev_err(&func->dev, "Fail cmd read chip id...\n");
goto fail;
}
@@ -821,7 +773,7 @@ fail:
return 0;
}
-static int sdio_read_size(struct wilc *wilc, u32 *size)
+static int wilc_sdio_read_size(struct wilc *wilc, u32 *size)
{
u32 tmp;
struct sdio_cmd52 cmd;
@@ -846,14 +798,14 @@ static int sdio_read_size(struct wilc *wilc, u32 *size)
return 1;
}
-static int sdio_read_int(struct wilc *wilc, u32 *int_status)
+static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
u32 tmp;
struct sdio_cmd52 cmd;
- sdio_read_size(wilc, &tmp);
+ wilc_sdio_read_size(wilc, &tmp);
/**
* Read IRQ flags
@@ -905,7 +857,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status)
return 1;
}
-static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
+static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
@@ -1030,7 +982,7 @@ fail:
return 0;
}
-static int sdio_sync_ext(struct wilc *wilc, int nint)
+static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
@@ -1051,13 +1003,13 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
/**
* Disable power sequencer
**/
- if (!sdio_read_reg(wilc, WILC_MISC, &reg)) {
+ if (!wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
dev_err(&func->dev, "Failed read misc reg...\n");
return 0;
}
reg &= ~BIT(8);
- if (!sdio_write_reg(wilc, WILC_MISC, reg)) {
+ if (!wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
dev_err(&func->dev, "Failed write misc reg...\n");
return 0;
}
@@ -1069,14 +1021,14 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
/**
* interrupt pin mux select
**/
- ret = sdio_read_reg(wilc, WILC_PIN_MUX_0, &reg);
+ ret = wilc_sdio_read_reg(wilc, WILC_PIN_MUX_0, &reg);
if (!ret) {
dev_err(&func->dev, "Failed read reg (%08x)...\n",
WILC_PIN_MUX_0);
return 0;
}
reg |= BIT(8);
- ret = sdio_write_reg(wilc, WILC_PIN_MUX_0, reg);
+ ret = wilc_sdio_write_reg(wilc, WILC_PIN_MUX_0, reg);
if (!ret) {
dev_err(&func->dev, "Failed write reg (%08x)...\n",
WILC_PIN_MUX_0);
@@ -1086,7 +1038,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
/**
* interrupt enable
**/
- ret = sdio_read_reg(wilc, WILC_INTR_ENABLE, &reg);
+ ret = wilc_sdio_read_reg(wilc, WILC_INTR_ENABLE, &reg);
if (!ret) {
dev_err(&func->dev, "Failed read reg (%08x)...\n",
WILC_INTR_ENABLE);
@@ -1095,14 +1047,14 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
for (i = 0; (i < 5) && (nint > 0); i++, nint--)
reg |= BIT((27 + i));
- ret = sdio_write_reg(wilc, WILC_INTR_ENABLE, reg);
+ ret = wilc_sdio_write_reg(wilc, WILC_INTR_ENABLE, reg);
if (!ret) {
dev_err(&func->dev, "Failed write reg (%08x)...\n",
WILC_INTR_ENABLE);
return 0;
}
if (nint) {
- ret = sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
+ ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
if (!ret) {
dev_err(&func->dev,
"Failed read reg (%08x)...\n",
@@ -1113,7 +1065,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
- ret = sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
+ ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
if (!ret) {
dev_err(&func->dev,
"Failed write reg (%08x)...\n",
@@ -1127,19 +1079,62 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
/* Global sdio HIF function table */
static const struct wilc_hif_func wilc_hif_sdio = {
- .hif_init = sdio_init,
- .hif_deinit = sdio_deinit,
- .hif_read_reg = sdio_read_reg,
- .hif_write_reg = sdio_write_reg,
- .hif_block_rx = sdio_read,
- .hif_block_tx = sdio_write,
- .hif_read_int = sdio_read_int,
- .hif_clear_int_ext = sdio_clear_int_ext,
- .hif_read_size = sdio_read_size,
- .hif_block_tx_ext = sdio_write,
- .hif_block_rx_ext = sdio_read,
- .hif_sync_ext = sdio_sync_ext,
+ .hif_init = wilc_sdio_init,
+ .hif_deinit = wilc_sdio_deinit,
+ .hif_read_reg = wilc_sdio_read_reg,
+ .hif_write_reg = wilc_sdio_write_reg,
+ .hif_block_rx = wilc_sdio_read,
+ .hif_block_tx = wilc_sdio_write,
+ .hif_read_int = wilc_sdio_read_int,
+ .hif_clear_int_ext = wilc_sdio_clear_int_ext,
+ .hif_read_size = wilc_sdio_read_size,
+ .hif_block_tx_ext = wilc_sdio_write,
+ .hif_block_rx_ext = wilc_sdio_read,
+ .hif_sync_ext = wilc_sdio_sync_ext,
.enable_interrupt = wilc_sdio_enable_interrupt,
.disable_interrupt = wilc_sdio_disable_interrupt,
};
+static int wilc_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wilc *wilc = sdio_get_drvdata(func);
+
+ dev_info(dev, "sdio resume\n");
+ sdio_release_host(func);
+ chip_wakeup(wilc);
+ wilc_sdio_init(wilc, true);
+
+ if (wilc->suspend_event)
+ host_wakeup_notify(wilc);
+
+ chip_allow_sleep(wilc);
+
+ return 0;
+}
+
+static const struct of_device_id wilc_of_match[] = {
+ { .compatible = "microchip,wilc1000-sdio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, wilc_of_match);
+
+static const struct dev_pm_ops wilc_sdio_pm_ops = {
+ .suspend = wilc_sdio_suspend,
+ .resume = wilc_sdio_resume,
+};
+
+static struct sdio_driver wilc_sdio_driver = {
+ .name = SDIO_MODALIAS,
+ .id_table = wilc_sdio_ids,
+ .probe = wilc_sdio_probe,
+ .remove = wilc_sdio_remove,
+ .drv = {
+ .pm = &wilc_sdio_pm_ops,
+ .of_match_table = wilc_of_match,
+ }
+};
+module_driver(wilc_sdio_driver,
+ sdio_register_driver,
+ sdio_unregister_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 153e120eff00..4a1be9e60d74 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -814,7 +814,7 @@ static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
*
********************************************/
-static int _wilc_spi_deinit(struct wilc *wilc)
+static int wilc_spi_deinit(struct wilc *wilc)
{
/*
* TODO:
@@ -1122,7 +1122,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
/* Global spi HIF function table */
static const struct wilc_hif_func wilc_hif_spi = {
.hif_init = wilc_spi_init,
- .hif_deinit = _wilc_spi_deinit,
+ .hif_deinit = wilc_spi_deinit,
.hif_read_reg = wilc_spi_read_reg,
.hif_write_reg = wilc_spi_write_reg,
.hif_block_rx = wilc_spi_read,
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index ac47dda510e0..5e7a4676324e 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -27,10 +27,7 @@
#define GAS_INITIAL_REQ 0x0a
#define GAS_INITIAL_RSP 0x0b
-#define INVALID_CHANNEL 0
-
-#define nl80211_SCAN_RESULT_EXPIRE (3 * HZ)
-#define SCAN_RESULT_EXPIRE (40 * HZ)
+#define WILC_INVALID_CHANNEL 0
static const struct ieee80211_txrx_stypes
wilc_wfi_cfg80211_mgmt_types[NUM_NL80211_IFTYPES] = {
@@ -65,124 +62,15 @@ static const struct wiphy_wowlan_support wowlan_support = {
.flags = WIPHY_WOWLAN_ANY
};
-struct p2p_mgmt_data {
+struct wilc_p2p_mgmt_data {
int size;
u8 *buff;
};
-static u8 wlan_channel = INVALID_CHANNEL;
-static u8 curr_channel;
-static u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09};
-static u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
-
-#define AGING_TIME (9 * 1000)
-#define DURING_IP_TIME_OUT 15000
-
-static void clear_shadow_scan(struct wilc_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->scanned_cnt; i++) {
- kfree(priv->scanned_shadow[i].ies);
- priv->scanned_shadow[i].ies = NULL;
-
- kfree(priv->scanned_shadow[i].join_params);
- priv->scanned_shadow[i].join_params = NULL;
- }
- priv->scanned_cnt = 0;
-}
-
-static u32 get_rssi_avg(struct network_info *network_info)
-{
- u8 i;
- int rssi_v = 0;
- u8 num_rssi = (network_info->rssi_history.full) ?
- NUM_RSSI : (network_info->rssi_history.index);
-
- for (i = 0; i < num_rssi; i++)
- rssi_v += network_info->rssi_history.samples[i];
-
- rssi_v /= num_rssi;
- return rssi_v;
-}
-
-static void refresh_scan(struct wilc_priv *priv, bool direct_scan)
-{
- struct wiphy *wiphy = priv->dev->ieee80211_ptr->wiphy;
- int i;
-
- for (i = 0; i < priv->scanned_cnt; i++) {
- struct network_info *network_info;
- s32 freq;
- struct ieee80211_channel *channel;
- int rssi;
- struct cfg80211_bss *bss;
-
- network_info = &priv->scanned_shadow[i];
-
- if (!memcmp("DIRECT-", network_info->ssid, 7) && !direct_scan)
- continue;
-
- freq = ieee80211_channel_to_frequency((s32)network_info->ch,
- NL80211_BAND_2GHZ);
- channel = ieee80211_get_channel(wiphy, freq);
- rssi = get_rssi_avg(network_info);
- bss = cfg80211_inform_bss(wiphy,
- channel,
- CFG80211_BSS_FTYPE_UNKNOWN,
- network_info->bssid,
- network_info->tsf,
- network_info->cap_info,
- network_info->beacon_period,
- (const u8 *)network_info->ies,
- (size_t)network_info->ies_len,
- (s32)rssi * 100,
- GFP_KERNEL);
- cfg80211_put_bss(wiphy, bss);
- }
-}
-
-static void reset_shadow_found(struct wilc_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->scanned_cnt; i++)
- priv->scanned_shadow[i].found = 0;
-}
-
-static void update_scan_time(struct wilc_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->scanned_cnt; i++)
- priv->scanned_shadow[i].time_scan = jiffies;
-}
-
-static void remove_network_from_shadow(struct timer_list *t)
-{
- struct wilc_priv *priv = from_timer(priv, t, aging_timer);
- unsigned long now = jiffies;
- int i, j;
-
- for (i = 0; i < priv->scanned_cnt; i++) {
- if (!time_after(now, priv->scanned_shadow[i].time_scan +
- (unsigned long)(SCAN_RESULT_EXPIRE)))
- continue;
- kfree(priv->scanned_shadow[i].ies);
- priv->scanned_shadow[i].ies = NULL;
-
- kfree(priv->scanned_shadow[i].join_params);
+static const u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09};
+static const u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
- for (j = i; (j < priv->scanned_cnt - 1); j++)
- priv->scanned_shadow[j] = priv->scanned_shadow[j + 1];
-
- priv->scanned_cnt--;
- }
-
- if (priv->scanned_cnt != 0)
- mod_timer(&priv->aging_timer,
- jiffies + msecs_to_jiffies(AGING_TIME));
-}
+#define WILC_IP_TIMEOUT_MS 15000
static void clear_during_ip(struct timer_list *t)
{
@@ -191,151 +79,36 @@ static void clear_during_ip(struct timer_list *t)
vif->obtaining_ip = false;
}
-static int is_network_in_shadow(struct network_info *nw_info,
- struct wilc_priv *priv)
-{
- int state = -1;
- int i;
-
- if (priv->scanned_cnt == 0) {
- mod_timer(&priv->aging_timer,
- jiffies + msecs_to_jiffies(AGING_TIME));
- state = -1;
- } else {
- for (i = 0; i < priv->scanned_cnt; i++) {
- if (memcmp(priv->scanned_shadow[i].bssid,
- nw_info->bssid, 6) == 0) {
- state = i;
- break;
- }
- }
- }
- return state;
-}
-
-static void add_network_to_shadow(struct network_info *nw_info,
- struct wilc_priv *priv, void *join_params)
-{
- int ap_found = is_network_in_shadow(nw_info, priv);
- u32 ap_index = 0;
- u8 rssi_index = 0;
- struct network_info *shadow_nw_info;
-
- if (priv->scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW)
- return;
-
- if (ap_found == -1) {
- ap_index = priv->scanned_cnt;
- priv->scanned_cnt++;
- } else {
- ap_index = ap_found;
- }
- shadow_nw_info = &priv->scanned_shadow[ap_index];
- rssi_index = shadow_nw_info->rssi_history.index;
- shadow_nw_info->rssi_history.samples[rssi_index++] = nw_info->rssi;
- if (rssi_index == NUM_RSSI) {
- rssi_index = 0;
- shadow_nw_info->rssi_history.full = true;
- }
- shadow_nw_info->rssi_history.index = rssi_index;
- shadow_nw_info->rssi = nw_info->rssi;
- shadow_nw_info->cap_info = nw_info->cap_info;
- shadow_nw_info->ssid_len = nw_info->ssid_len;
- memcpy(shadow_nw_info->ssid, nw_info->ssid, nw_info->ssid_len);
- memcpy(shadow_nw_info->bssid, nw_info->bssid, ETH_ALEN);
- shadow_nw_info->beacon_period = nw_info->beacon_period;
- shadow_nw_info->dtim_period = nw_info->dtim_period;
- shadow_nw_info->ch = nw_info->ch;
- shadow_nw_info->tsf = nw_info->tsf;
- if (ap_found != -1)
- kfree(shadow_nw_info->ies);
- shadow_nw_info->ies = kmemdup(nw_info->ies, nw_info->ies_len,
- GFP_KERNEL);
- if (shadow_nw_info->ies)
- shadow_nw_info->ies_len = nw_info->ies_len;
- else
- shadow_nw_info->ies_len = 0;
- shadow_nw_info->time_scan = jiffies;
- shadow_nw_info->time_scan_cached = jiffies;
- shadow_nw_info->found = 1;
- if (ap_found != -1)
- kfree(shadow_nw_info->join_params);
- shadow_nw_info->join_params = join_params;
-}
-
static void cfg_scan_result(enum scan_event scan_event,
- struct network_info *network_info,
- void *user_void, void *join_params)
+ struct wilc_rcvd_net_info *info, void *user_void)
{
- struct wilc_priv *priv;
- struct wiphy *wiphy;
- s32 freq;
- struct ieee80211_channel *channel;
- struct cfg80211_bss *bss = NULL;
+ struct wilc_priv *priv = user_void;
- priv = user_void;
if (!priv->cfg_scanning)
return;
if (scan_event == SCAN_EVENT_NETWORK_FOUND) {
- wiphy = priv->dev->ieee80211_ptr->wiphy;
-
- if (!wiphy || !network_info)
- return;
+ s32 freq;
+ struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
+ struct wiphy *wiphy = priv->dev->ieee80211_ptr->wiphy;
- if (wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (((s32)network_info->rssi * 100) < 0 ||
- ((s32)network_info->rssi * 100) > 100))
+ if (!wiphy || !info)
return;
- freq = ieee80211_channel_to_frequency((s32)network_info->ch,
+ freq = ieee80211_channel_to_frequency((s32)info->ch,
NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
-
if (!channel)
return;
- if (network_info->new_network) {
- if (priv->rcvd_ch_cnt >= MAX_NUM_SCANNED_NETWORKS)
- return;
-
- priv->rcvd_ch_cnt++;
-
- add_network_to_shadow(network_info, priv, join_params);
-
- if (memcmp("DIRECT-", network_info->ssid, 7))
- return;
-
- bss = cfg80211_inform_bss(wiphy,
- channel,
- CFG80211_BSS_FTYPE_UNKNOWN,
- network_info->bssid,
- network_info->tsf,
- network_info->cap_info,
- network_info->beacon_period,
- (const u8 *)network_info->ies,
- (size_t)network_info->ies_len,
- (s32)network_info->rssi * 100,
- GFP_KERNEL);
+ bss = cfg80211_inform_bss_frame(wiphy, channel, info->mgmt,
+ info->frame_len,
+ (s32)info->rssi * 100,
+ GFP_KERNEL);
+ if (!bss)
cfg80211_put_bss(wiphy, bss);
- } else {
- u32 i;
-
- for (i = 0; i < priv->rcvd_ch_cnt; i++) {
- if (memcmp(priv->scanned_shadow[i].bssid,
- network_info->bssid, 6) == 0)
- break;
- }
-
- if (i >= priv->rcvd_ch_cnt)
- return;
-
- priv->scanned_shadow[i].rssi = network_info->rssi;
- priv->scanned_shadow[i].time_scan = jiffies;
- }
} else if (scan_event == SCAN_EVENT_DONE) {
- refresh_scan(priv, false);
-
mutex_lock(&priv->scan_req_lock);
if (priv->scan_req) {
@@ -344,7 +117,6 @@ static void cfg_scan_result(enum scan_event scan_event,
};
cfg80211_scan_done(priv->scan_req, &info);
- priv->rcvd_ch_cnt = 0;
priv->cfg_scanning = false;
priv->scan_req = NULL;
}
@@ -357,9 +129,6 @@ static void cfg_scan_result(enum scan_event scan_event,
.aborted = false,
};
- update_scan_time(priv);
- refresh_scan(priv, false);
-
cfg80211_scan_done(priv->scan_req, &info);
priv->cfg_scanning = false;
priv->scan_req = NULL;
@@ -368,21 +137,7 @@ static void cfg_scan_result(enum scan_event scan_event,
}
}
-static inline bool wilc_cfg_scan_time_expired(struct wilc_priv *priv, int i)
-{
- unsigned long now = jiffies;
-
- if (time_after(now, priv->scanned_shadow[i].time_scan_cached +
- (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ))))
- return true;
- else
- return false;
-}
-
-static void cfg_connect_result(enum conn_event conn_disconn_evt,
- struct connect_info *conn_info,
- u8 mac_status,
- struct disconnect_info *disconn_info,
+static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
void *priv_data)
{
struct wilc_priv *priv = priv_data;
@@ -390,49 +145,28 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wl = vif->wilc;
struct host_if_drv *wfi_drv = priv->hif_drv;
- u8 null_bssid[ETH_ALEN] = {0};
+ struct wilc_conn_info *conn_info = &wfi_drv->conn_info;
vif->connecting = false;
if (conn_disconn_evt == CONN_DISCONN_EVENT_CONN_RESP) {
- u16 connect_status;
-
- connect_status = conn_info->status;
+ u16 connect_status = conn_info->status;
if (mac_status == WILC_MAC_STATUS_DISCONNECTED &&
- conn_info->status == WLAN_STATUS_SUCCESS) {
+ connect_status == WLAN_STATUS_SUCCESS) {
connect_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- wilc_wlan_set_bssid(priv->dev, null_bssid,
- WILC_STATION_MODE);
+ wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE);
- if (!wfi_drv->p2p_connect)
- wlan_channel = INVALID_CHANNEL;
+ if (vif->iftype != WILC_CLIENT_MODE)
+ wl->sta_ch = WILC_INVALID_CHANNEL;
netdev_err(dev, "Unspecified failure\n");
}
- if (connect_status == WLAN_STATUS_SUCCESS) {
- bool scan_refresh = false;
- u32 i;
-
+ if (connect_status == WLAN_STATUS_SUCCESS)
memcpy(priv->associated_bss, conn_info->bssid,
ETH_ALEN);
- for (i = 0; i < priv->scanned_cnt; i++) {
- if (memcmp(priv->scanned_shadow[i].bssid,
- conn_info->bssid,
- ETH_ALEN) == 0) {
- if (wilc_cfg_scan_time_expired(priv, i))
- scan_refresh = true;
-
- break;
- }
- }
-
- if (scan_refresh)
- refresh_scan(priv, true);
- }
-
cfg80211_connect_result(dev, conn_info->bssid,
conn_info->req_ies,
conn_info->req_ies_len,
@@ -440,23 +174,24 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt,
conn_info->resp_ies_len, connect_status,
GFP_KERNEL);
} else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
+ u16 reason = 0;
+
vif->obtaining_ip = false;
priv->p2p.local_random = 0x01;
priv->p2p.recv_random = 0x00;
priv->p2p.is_wilc_ie = false;
eth_zero_addr(priv->associated_bss);
- wilc_wlan_set_bssid(priv->dev, null_bssid, WILC_STATION_MODE);
+ wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE);
+
+ if (vif->iftype != WILC_CLIENT_MODE)
+ wl->sta_ch = WILC_INVALID_CHANNEL;
- if (!wfi_drv->p2p_connect)
- wlan_channel = INVALID_CHANNEL;
if (wfi_drv->ifc_up && dev == wl->vif[1]->ndev)
- disconn_info->reason = 3;
+ reason = 3;
else if (!wfi_drv->ifc_up && dev == wl->vif[1]->ndev)
- disconn_info->reason = 1;
+ reason = 1;
- cfg80211_disconnected(dev, disconn_info->reason,
- disconn_info->ie, disconn_info->ie_len,
- false, GFP_KERNEL);
+ cfg80211_disconnected(dev, reason, NULL, 0, false, GFP_KERNEL);
}
}
@@ -470,7 +205,7 @@ static int set_channel(struct wiphy *wiphy,
channelnum = ieee80211_frequency_to_channel(chandef->chan->center_freq);
- curr_channel = channelnum;
+ vif->wilc->op_ch = channelnum;
result = wilc_set_mac_chnl_num(vif, channelnum);
if (result != 0)
@@ -481,22 +216,23 @@ static int set_channel(struct wiphy *wiphy,
static inline int
wilc_wfi_cfg_alloc_fill_ssid(struct cfg80211_scan_request *request,
- struct hidden_network *ntwk)
+ struct wilc_probe_ssid *search)
{
int i;
int slot_id = 0;
- ntwk->net_info = kcalloc(request->n_ssids, sizeof(*ntwk->net_info),
- GFP_KERNEL);
- if (!ntwk->net_info)
+ search->ssid_info = kcalloc(request->n_ssids,
+ sizeof(*search->ssid_info), GFP_KERNEL);
+ if (!search->ssid_info)
goto out;
- ntwk->n_ssids = request->n_ssids;
+ search->n_ssids = request->n_ssids;
for (i = 0; i < request->n_ssids; i++) {
if (request->ssids[i].ssid_len > 0) {
- struct hidden_net_info *info = &ntwk->net_info[slot_id];
+ struct wilc_probe_ssid_info *info;
+ info = &search->ssid_info[slot_id];
info->ssid = kmemdup(request->ssids[i].ssid,
request->ssids[i].ssid_len,
GFP_KERNEL);
@@ -506,7 +242,7 @@ wilc_wfi_cfg_alloc_fill_ssid(struct cfg80211_scan_request *request,
info->ssid_len = request->ssids[i].ssid_len;
slot_id++;
} else {
- ntwk->n_ssids -= 1;
+ search->n_ssids -= 1;
}
}
return 0;
@@ -514,9 +250,9 @@ wilc_wfi_cfg_alloc_fill_ssid(struct cfg80211_scan_request *request,
out_free:
for (i = 0; i < slot_id; i++)
- kfree(ntwk->net_info[i].ssid);
+ kfree(search->ssid_info[i].ssid);
- kfree(ntwk->net_info);
+ kfree(search->ssid_info);
out:
return -ENOMEM;
@@ -528,46 +264,41 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
struct wilc_vif *vif = netdev_priv(priv->dev);
u32 i;
int ret = 0;
- u8 scan_ch_list[MAX_NUM_SCANNED_NETWORKS];
- struct hidden_network hidden_ntwk;
-
- priv->scan_req = request;
+ u8 scan_ch_list[WILC_MAX_NUM_SCANNED_CH];
+ struct wilc_probe_ssid probe_ssid;
- priv->rcvd_ch_cnt = 0;
-
- reset_shadow_found(priv);
+ if (request->n_channels > WILC_MAX_NUM_SCANNED_CH) {
+ netdev_err(priv->dev, "Requested scanned channels over\n");
+ return -EINVAL;
+ }
+ priv->scan_req = request;
priv->cfg_scanning = true;
- if (request->n_channels <= MAX_NUM_SCANNED_NETWORKS) {
- for (i = 0; i < request->n_channels; i++) {
- u16 freq = request->channels[i]->center_freq;
+ for (i = 0; i < request->n_channels; i++) {
+ u16 freq = request->channels[i]->center_freq;
- scan_ch_list[i] = ieee80211_frequency_to_channel(freq);
- }
-
- if (request->n_ssids >= 1) {
- if (wilc_wfi_cfg_alloc_fill_ssid(request,
- &hidden_ntwk)) {
- ret = -ENOMEM;
- goto out;
- }
+ scan_ch_list[i] = ieee80211_frequency_to_channel(freq);
+ }
- ret = wilc_scan(vif, WILC_FW_USER_SCAN,
- WILC_FW_ACTIVE_SCAN, scan_ch_list,
- request->n_channels,
- (const u8 *)request->ie,
- request->ie_len, cfg_scan_result,
- (void *)priv, &hidden_ntwk);
- } else {
- ret = wilc_scan(vif, WILC_FW_USER_SCAN,
- WILC_FW_ACTIVE_SCAN, scan_ch_list,
- request->n_channels,
- (const u8 *)request->ie,
- request->ie_len, cfg_scan_result,
- (void *)priv, NULL);
+ if (request->n_ssids >= 1) {
+ if (wilc_wfi_cfg_alloc_fill_ssid(request, &probe_ssid)) {
+ ret = -ENOMEM;
+ goto out;
}
+
+ ret = wilc_scan(vif, WILC_FW_USER_SCAN,
+ WILC_FW_ACTIVE_SCAN, scan_ch_list,
+ request->n_channels,
+ (const u8 *)request->ie,
+ request->ie_len, cfg_scan_result,
+ (void *)priv, &probe_ssid);
} else {
- netdev_err(priv->dev, "Requested scanned channels over\n");
+ ret = wilc_scan(vif, WILC_FW_USER_SCAN,
+ WILC_FW_ACTIVE_SCAN, scan_ch_list,
+ request->n_channels,
+ (const u8 *)request->ie,
+ request->ie_len, cfg_scan_result,
+ (void *)priv, NULL);
}
out:
@@ -585,54 +316,17 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
struct wilc_priv *priv = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(priv->dev);
struct host_if_drv *wfi_drv = priv->hif_drv;
- struct network_info *nw_info;
int ret;
u32 i;
- u32 sel_bssi_idx = UINT_MAX;
u8 security = WILC_FW_SEC_NO;
enum authtype auth_type = WILC_FW_AUTH_ANY;
u32 cipher_group;
+ struct cfg80211_bss *bss;
+ void *join_params;
+ u8 ch;
vif->connecting = true;
- if (!(strncmp(sme->ssid, "DIRECT-", 7)))
- wfi_drv->p2p_connect = 1;
- else
- wfi_drv->p2p_connect = 0;
-
- for (i = 0; i < priv->scanned_cnt; i++) {
- if (sme->ssid_len == priv->scanned_shadow[i].ssid_len &&
- memcmp(priv->scanned_shadow[i].ssid,
- sme->ssid,
- sme->ssid_len) == 0) {
- if (!sme->bssid) {
- if (sel_bssi_idx == UINT_MAX ||
- priv->scanned_shadow[i].rssi >
- priv->scanned_shadow[sel_bssi_idx].rssi)
- sel_bssi_idx = i;
- } else {
- if (memcmp(priv->scanned_shadow[i].bssid,
- sme->bssid,
- ETH_ALEN) == 0) {
- sel_bssi_idx = i;
- break;
- }
- }
- }
- }
-
- if (sel_bssi_idx < priv->scanned_cnt) {
- nw_info = &priv->scanned_shadow[sel_bssi_idx];
- } else {
- ret = -ENOENT;
- goto out_error;
- }
-
- if (ether_addr_equal_unaligned(vif->bssid, nw_info->bssid)) {
- ret = -EALREADY;
- goto out_error;
- }
-
memset(priv->wep_key, 0, sizeof(priv->wep_key));
memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
@@ -706,31 +400,65 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
auth_type = WILC_FW_AUTH_IEEE8021;
}
- curr_channel = nw_info->ch;
+ if (wfi_drv->usr_scan_req.scan_result) {
+ netdev_err(vif->ndev, "%s: Scan in progress\n", __func__);
+ ret = -EBUSY;
+ goto out_error;
+ }
- if (!wfi_drv->p2p_connect)
- wlan_channel = nw_info->ch;
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid,
+ sme->ssid_len, IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY(sme->privacy));
+ if (!bss) {
+ ret = -EINVAL;
+ goto out_error;
+ }
- wilc_wlan_set_bssid(dev, nw_info->bssid, WILC_STATION_MODE);
+ if (ether_addr_equal_unaligned(vif->bssid, bss->bssid)) {
+ ret = -EALREADY;
+ goto out_put_bss;
+ }
- ret = wilc_set_join_req(vif, nw_info->bssid, sme->ssid,
- sme->ssid_len, sme->ie, sme->ie_len,
- cfg_connect_result, (void *)priv,
- security, auth_type,
- nw_info->ch,
- nw_info->join_params);
- if (ret) {
- u8 null_bssid[ETH_ALEN] = {0};
+ join_params = wilc_parse_join_bss_param(bss, &sme->crypto);
+ if (!join_params) {
+ netdev_err(dev, "%s: failed to construct join param\n",
+ __func__);
+ ret = -EINVAL;
+ goto out_put_bss;
+ }
+
+ ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
+ vif->wilc->op_ch = ch;
+ if (vif->iftype != WILC_CLIENT_MODE)
+ vif->wilc->sta_ch = ch;
+
+ wilc_wlan_set_bssid(dev, bss->bssid, WILC_STATION_MODE);
+ wfi_drv->conn_info.security = security;
+ wfi_drv->conn_info.auth_type = auth_type;
+ wfi_drv->conn_info.ch = ch;
+ wfi_drv->conn_info.conn_result = cfg_connect_result;
+ wfi_drv->conn_info.arg = priv;
+ wfi_drv->conn_info.param = join_params;
+
+ ret = wilc_set_join_req(vif, bss->bssid, sme->ie, sme->ie_len);
+ if (ret) {
netdev_err(dev, "wilc_set_join_req(): Error\n");
ret = -ENOENT;
- if (!wfi_drv->p2p_connect)
- wlan_channel = INVALID_CHANNEL;
- wilc_wlan_set_bssid(dev, null_bssid, WILC_STATION_MODE);
- goto out_error;
- }
+ if (vif->iftype != WILC_CLIENT_MODE)
+ vif->wilc->sta_ch = WILC_INVALID_CHANNEL;
+ wilc_wlan_set_bssid(dev, NULL, WILC_STATION_MODE);
+ wfi_drv->conn_info.conn_result = NULL;
+ kfree(join_params);
+ goto out_put_bss;
+ }
+ kfree(join_params);
+ cfg80211_put_bss(wiphy, bss);
return 0;
+out_put_bss:
+ cfg80211_put_bss(wiphy, bss);
+
out_error:
vif->connecting = false;
return ret;
@@ -742,9 +470,7 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
struct wilc_priv *priv = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(priv->dev);
struct wilc *wilc = vif->wilc;
- struct host_if_drv *wfi_drv;
int ret;
- u8 null_bssid[ETH_ALEN] = {0};
vif->connecting = false;
@@ -757,15 +483,14 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
- wfi_drv = (struct host_if_drv *)priv->hif_drv;
- if (!wfi_drv->p2p_connect)
- wlan_channel = INVALID_CHANNEL;
- wilc_wlan_set_bssid(priv->dev, null_bssid, WILC_STATION_MODE);
+ if (vif->iftype != WILC_CLIENT_MODE)
+ wilc->sta_ch = WILC_INVALID_CHANNEL;
+ wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE);
priv->p2p.local_random = 0x01;
priv->p2p.recv_random = 0x00;
priv->p2p.is_wilc_ie = false;
- wfi_drv->p2p_timeout = 0;
+ priv->hif_drv->p2p_timeout = 0;
ret = wilc_disconnect(vif);
if (ret != 0) {
@@ -1210,7 +935,7 @@ static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
}
static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u8 ch_list_attr_idx,
- u8 op_ch_attr_idx)
+ u8 op_ch_attr_idx, u8 sta_ch)
{
int i = 0;
int j = 0;
@@ -1221,7 +946,7 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u8 ch_list_attr_idx,
for (i = ch_list_attr_idx + 3; i < limit; i++) {
if (buf[i] == 0x51) {
for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
- buf[j] = wlan_channel;
+ buf[j] = sta_ch;
break;
}
}
@@ -1229,11 +954,11 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u8 ch_list_attr_idx,
if (op_ch_attr_idx) {
buf[op_ch_attr_idx + 6] = 0x51;
- buf[op_ch_attr_idx + 7] = wlan_channel;
+ buf[op_ch_attr_idx + 7] = sta_ch;
}
}
-static void wilc_wfi_cfg_parse_rx_action(u8 *buf, u32 len)
+static void wilc_wfi_cfg_parse_rx_action(u8 *buf, u32 len, u8 sta_ch)
{
u32 index = 0;
u8 op_channel_attr_index = 0;
@@ -1249,13 +974,13 @@ static void wilc_wfi_cfg_parse_rx_action(u8 *buf, u32 len)
op_channel_attr_index = index;
index += buf[index + 1] + 3;
}
- if (wlan_channel != INVALID_CHANNEL)
+ if (sta_ch != WILC_INVALID_CHANNEL)
wilc_wfi_cfg_parse_ch_attr(buf, channel_list_attr_index,
- op_channel_attr_index);
+ op_channel_attr_index, sta_ch);
}
static void wilc_wfi_cfg_parse_tx_action(u8 *buf, u32 len, bool oper_ch,
- u8 iftype)
+ u8 iftype, u8 sta_ch)
{
u32 index = 0;
u8 op_channel_attr_index = 0;
@@ -1274,9 +999,9 @@ static void wilc_wfi_cfg_parse_tx_action(u8 *buf, u32 len, bool oper_ch,
op_channel_attr_index = index;
index += buf[index + 1] + 3;
}
- if (wlan_channel != INVALID_CHANNEL && oper_ch)
+ if (sta_ch != WILC_INVALID_CHANNEL && oper_ch)
wilc_wfi_cfg_parse_ch_attr(buf, channel_list_attr_index,
- op_channel_attr_index);
+ op_channel_attr_index, sta_ch);
}
static void wilc_wfi_cfg_parse_rx_vendor_spec(struct wilc_priv *priv, u8 *buff,
@@ -1311,7 +1036,8 @@ static void wilc_wfi_cfg_parse_rx_vendor_spec(struct wilc_priv *priv, u8 *buff,
if (buff[i] == P2PELEM_ATTR_ID &&
!(memcmp(p2p_oui, &buff[i + 2], 4))) {
wilc_wfi_cfg_parse_rx_action(&buff[i + 6],
- size - (i + 6));
+ size - (i + 6),
+ vif->wilc->sta_ch);
break;
}
}
@@ -1322,6 +1048,8 @@ void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
{
struct wilc_priv *priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
struct host_if_drv *wfi_drv = priv->hif_drv;
+ struct wilc_vif *vif = netdev_priv(dev);
+ struct wilc *wl = vif->wilc;
u32 header, pkt_offset;
s32 freq;
__le16 fc;
@@ -1342,7 +1070,7 @@ void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
return;
}
- freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ);
fc = ((struct ieee80211_hdr *)buff)->frame_control;
if (!ieee80211_is_action(fc)) {
@@ -1387,33 +1115,18 @@ void wilc_wfi_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
static void wilc_wfi_mgmt_tx_complete(void *priv, int status)
{
- struct p2p_mgmt_data *pv_data = priv;
+ struct wilc_p2p_mgmt_data *pv_data = priv;
kfree(pv_data->buff);
kfree(pv_data);
}
-static void wilc_wfi_remain_on_channel_ready(void *priv_data)
-{
- struct wilc_priv *priv;
-
- priv = priv_data;
-
- priv->p2p_listen_state = true;
-
- cfg80211_ready_on_channel(priv->wdev,
- priv->remain_on_ch_params.listen_cookie,
- priv->remain_on_ch_params.listen_ch,
- priv->remain_on_ch_params.listen_duration,
- GFP_KERNEL);
-}
-
-static void wilc_wfi_remain_on_channel_expired(void *data, u32 session_id)
+static void wilc_wfi_remain_on_channel_expired(void *data, u64 cookie)
{
struct wilc_priv *priv = data;
struct wilc_wfi_p2p_listen_params *params = &priv->remain_on_ch_params;
- if (session_id != params->listen_session_id)
+ if (cookie != params->listen_cookie)
return;
priv->p2p_listen_state = false;
@@ -1430,24 +1143,36 @@ static int remain_on_channel(struct wiphy *wiphy,
int ret = 0;
struct wilc_priv *priv = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(priv->dev);
+ u64 id;
if (wdev->iftype == NL80211_IFTYPE_AP) {
netdev_dbg(vif->ndev, "Required while in AP mode\n");
return ret;
}
- curr_channel = chan->hw_value;
+ id = ++priv->inc_roc_cookie;
+ if (id == 0)
+ id = ++priv->inc_roc_cookie;
+
+ ret = wilc_remain_on_channel(vif, id, duration, chan->hw_value,
+ wilc_wfi_remain_on_channel_expired,
+ (void *)priv);
+ if (ret)
+ return ret;
+
+ vif->wilc->op_ch = chan->hw_value;
priv->remain_on_ch_params.listen_ch = chan;
- priv->remain_on_ch_params.listen_cookie = *cookie;
+ priv->remain_on_ch_params.listen_cookie = id;
+ *cookie = id;
+ priv->p2p_listen_state = true;
priv->remain_on_ch_params.listen_duration = duration;
- priv->remain_on_ch_params.listen_session_id++;
- return wilc_remain_on_channel(vif,
- priv->remain_on_ch_params.listen_session_id,
- duration, chan->hw_value,
- wilc_wfi_remain_on_channel_expired,
- wilc_wfi_remain_on_channel_ready, (void *)priv);
+ cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL);
+ mod_timer(&vif->hif_drv->remain_on_ch_timer,
+ jiffies + msecs_to_jiffies(duration));
+
+ return ret;
}
static int cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1457,12 +1182,14 @@ static int cancel_remain_on_channel(struct wiphy *wiphy,
struct wilc_priv *priv = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(priv->dev);
- return wilc_listen_state_expired(vif,
- priv->remain_on_ch_params.listen_session_id);
+ if (cookie != priv->remain_on_ch_params.listen_cookie)
+ return -ENOENT;
+
+ return wilc_listen_state_expired(vif, cookie);
}
static void wilc_wfi_cfg_tx_vendor_spec(struct wilc_priv *priv,
- struct p2p_mgmt_data *mgmt_tx,
+ struct wilc_p2p_mgmt_data *mgmt_tx,
struct cfg80211_mgmt_tx_params *params,
u8 iftype, u32 buf_len)
{
@@ -1470,6 +1197,7 @@ static void wilc_wfi_cfg_tx_vendor_spec(struct wilc_priv *priv,
size_t len = params->len;
u32 i;
u8 subtype = buf[P2P_PUB_ACTION_SUBTYPE];
+ struct wilc_vif *vif = netdev_priv(priv->dev);
if (subtype == GO_NEG_REQ || subtype == GO_NEG_RSP) {
if (priv->p2p.local_random == 1 &&
@@ -1494,7 +1222,8 @@ static void wilc_wfi_cfg_tx_vendor_spec(struct wilc_priv *priv,
oper_ch = true;
wilc_wfi_cfg_parse_tx_action(tx_buff, len - (i + 6),
- oper_ch, iftype);
+ oper_ch, iftype,
+ vif->wilc->sta_ch);
break;
}
@@ -1520,14 +1249,14 @@ static int mgmt_tx(struct wiphy *wiphy,
const u8 *buf = params->buf;
size_t len = params->len;
const struct ieee80211_mgmt *mgmt;
- struct p2p_mgmt_data *mgmt_tx;
+ struct wilc_p2p_mgmt_data *mgmt_tx;
struct wilc_priv *priv = wiphy_priv(wiphy);
struct host_if_drv *wfi_drv = priv->hif_drv;
struct wilc_vif *vif = netdev_priv(wdev->netdev);
u32 buf_len = len + sizeof(p2p_vendor_spec) + sizeof(priv->p2p.local_random);
int ret = 0;
- *cookie = (unsigned long)buf;
+ *cookie = prandom_u32();
priv->tx_cookie = *cookie;
mgmt = (const struct ieee80211_mgmt *)buf;
@@ -1552,7 +1281,7 @@ static int mgmt_tx(struct wiphy *wiphy,
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
wilc_set_mac_chnl_num(vif, chan->hw_value);
- curr_channel = chan->hw_value;
+ vif->wilc->op_ch = chan->hw_value;
goto out_txq_add_pkt;
}
@@ -1563,7 +1292,7 @@ static int mgmt_tx(struct wiphy *wiphy,
if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC ||
buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF) {
wilc_set_mac_chnl_num(vif, chan->hw_value);
- curr_channel = chan->hw_value;
+ vif->wilc->op_ch = chan->hw_value;
}
switch (buf[ACTION_SUBTYPE_ID]) {
case GAS_INITIAL_REQ:
@@ -1755,7 +1484,7 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
case NL80211_IFTYPE_P2P_GO:
vif->obtaining_ip = true;
mod_timer(&vif->during_ip_timer,
- jiffies + msecs_to_jiffies(DURING_IP_TIME_OUT));
+ jiffies + msecs_to_jiffies(WILC_IP_TIMEOUT_MS));
wilc_set_operation_mode(vif, WILC_AP_MODE);
dev->ieee80211_ptr->iftype = type;
priv->wdev->iftype = type;
@@ -1805,9 +1534,8 @@ static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
int ret;
struct wilc_priv *priv = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(priv->dev);
- u8 null_bssid[ETH_ALEN] = {0};
- wilc_wlan_set_bssid(dev, null_bssid, WILC_AP_MODE);
+ wilc_wlan_set_bssid(dev, NULL, WILC_AP_MODE);
ret = wilc_del_beacon(vif);
@@ -1884,7 +1612,8 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
struct net_device *new_ifc;
if (type == NL80211_IFTYPE_MONITOR) {
- new_ifc = wilc_wfi_init_mon_interface(name, vif->ndev);
+ new_ifc = wilc_wfi_init_mon_interface(vif->wilc, name,
+ vif->ndev);
if (new_ifc) {
vif = netdev_priv(priv->wdev->netdev);
vif->monitor_flag = 1;
@@ -2102,7 +1831,6 @@ int wilc_init_host_int(struct net_device *net)
struct wilc_priv *priv = wdev_priv(net->ieee80211_ptr);
struct wilc_vif *vif = netdev_priv(priv->dev);
- timer_setup(&priv->aging_timer, remove_network_from_shadow, 0);
timer_setup(&vif->during_ip_timer, clear_during_ip, 0);
priv->p2p_listen_state = false;
@@ -2126,8 +1854,6 @@ void wilc_deinit_host_int(struct net_device *net)
mutex_destroy(&priv->scan_req_lock);
ret = wilc_deinit(vif);
- del_timer_sync(&priv->aging_timer);
- clear_shadow_scan(priv);
del_timer_sync(&vif->during_ip_timer);
if (ret)
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index 4812c8e2c79b..31dfa1f141f1 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -13,9 +13,10 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net,
void wilc_free_wiphy(struct net_device *net);
void wilc_deinit_host_int(struct net_device *net);
int wilc_init_host_int(struct net_device *net);
-void wilc_wfi_monitor_rx(u8 *buff, u32 size);
-void wilc_wfi_deinit_mon_interface(void);
-struct net_device *wilc_wfi_init_mon_interface(const char *name,
+void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size);
+void wilc_wfi_deinit_mon_interface(struct wilc *wl);
+struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
+ const char *name,
struct net_device *real_dev);
void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
u16 frame_type, bool reg);
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index c6685c0c238b..df00762487c0 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -65,7 +65,6 @@ struct wilc_wfi_p2p_listen_params {
struct ieee80211_channel *listen_ch;
u32 listen_duration;
u64 listen_cookie;
- u32 listen_session_id;
};
struct wilc_p2p_var {
@@ -137,7 +136,6 @@ struct wilc_priv {
u64 tx_cookie;
bool cfg_scanning;
- u32 rcvd_ch_cnt;
u8 associated_bss[ETH_ALEN];
struct sta_info assoc_stainfo;
@@ -155,8 +153,6 @@ struct wilc_priv {
/* mutexes */
struct mutex scan_req_lock;
bool p2p_listen_state;
- struct timer_list aging_timer;
- struct network_info scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
int scanned_cnt;
struct wilc_p2p_var p2p;
@@ -164,6 +160,7 @@ struct wilc_priv {
struct ieee80211_rate bitrates[ARRAY_SIZE(wilc_bitrates)];
struct ieee80211_supported_band band;
u32 cipher_suites[ARRAY_SIZE(wilc_cipher_suites)];
+ u64 inc_roc_cookie;
};
struct frame_reg {
@@ -251,7 +248,7 @@ struct wilc {
struct mutex cfg_cmd_lock;
struct wilc_cfg_frame cfg_frame;
u32 cfg_frame_offset;
- int cfg_seq_no;
+ u8 cfg_seq_no;
u8 *rx_buffer;
u32 rx_buffer_offset;
@@ -273,13 +270,18 @@ struct wilc {
enum chip_ps_states chip_ps_state;
struct wilc_cfg cfg;
void *bus_data;
+ struct net_device *monitor_dev;
+ /* deinit lock */
+ struct mutex deinit_lock;
+ u8 sta_ch;
+ u8 op_ch;
};
struct wilc_wfi_mon_priv {
struct net_device *real_ndev;
};
-void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
+void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc);
void wilc_netdev_cleanup(struct wilc *wilc);
int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 3c5e9e030cad..c2389695fe20 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -274,7 +274,8 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
}
int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
- u32 buffer_size, wilc_tx_complete_func_t func)
+ u32 buffer_size,
+ void (*tx_complete_fn)(void *, int))
{
struct txq_entry_t *tqe;
struct wilc_vif *vif = netdev_priv(dev);
@@ -292,7 +293,7 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
tqe->type = WILC_NET_PKT;
tqe->buffer = buffer;
tqe->buffer_size = buffer_size;
- tqe->tx_complete_func = func;
+ tqe->tx_complete_func = tx_complete_fn;
tqe->priv = priv;
tqe->ack_idx = NOT_TCP_ACK;
@@ -303,7 +304,8 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
}
int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
- u32 buffer_size, wilc_tx_complete_func_t func)
+ u32 buffer_size,
+ void (*tx_complete_fn)(void *, int))
{
struct txq_entry_t *tqe;
struct wilc_vif *vif = netdev_priv(dev);
@@ -321,7 +323,7 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
tqe->type = WILC_MGMT_PKT;
tqe->buffer = buffer;
tqe->buffer_size = buffer_size;
- tqe->tx_complete_func = func;
+ tqe->tx_complete_func = tx_complete_fn;
tqe->priv = priv;
tqe->ack_idx = NOT_TCP_ACK;
wilc_wlan_txq_add_to_tail(dev, tqe);
@@ -521,7 +523,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (vmm_sz & 0x3)
vmm_sz = (vmm_sz + 4) & ~0x3;
- if ((sum + vmm_sz) > LINUX_TX_SIZE)
+ if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE)
break;
vmm_table[i] = vmm_sz / 4;
@@ -715,9 +717,8 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
} else {
if (!is_cfg_packet) {
if (pkt_len > 0) {
- wilc_frmw_to_linux(wilc, buff_ptr,
- pkt_len,
- pkt_offset);
+ wilc_frmw_to_host(wilc, buff_ptr,
+ pkt_len, pkt_offset);
}
} else {
struct wilc_cfg_rsp rsp;
@@ -809,7 +810,7 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
if (size <= 0)
return;
- if (LINUX_RX_SIZE - offset < size)
+ if (WILC_RX_BUFF_SIZE - offset < size)
offset = 0;
buffer = &wilc->rx_buffer[offset];
@@ -1092,24 +1093,19 @@ static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
{
struct wilc *wilc = vif->wilc;
struct wilc_cfg_frame *cfg = &wilc->cfg_frame;
- int total_len = wilc->cfg_frame_offset + 4 + DRIVER_HANDLER_SIZE;
- int seq_no = wilc->cfg_seq_no % 256;
- int driver_handler = (u32)drv_handler;
+ int t_len = wilc->cfg_frame_offset + sizeof(struct wilc_cfg_cmd_hdr);
if (type == WILC_CFG_SET)
- cfg->wid_header[0] = 'W';
+ cfg->hdr.cmd_type = 'W';
else
- cfg->wid_header[0] = 'Q';
- cfg->wid_header[1] = seq_no;
- cfg->wid_header[2] = (u8)total_len;
- cfg->wid_header[3] = (u8)(total_len >> 8);
- cfg->wid_header[4] = (u8)driver_handler;
- cfg->wid_header[5] = (u8)(driver_handler >> 8);
- cfg->wid_header[6] = (u8)(driver_handler >> 16);
- cfg->wid_header[7] = (u8)(driver_handler >> 24);
- wilc->cfg_seq_no = seq_no;
-
- if (!wilc_wlan_txq_add_cfg_pkt(vif, &cfg->wid_header[0], total_len))
+ cfg->hdr.cmd_type = 'Q';
+
+ cfg->hdr.seq_no = wilc->cfg_seq_no % 256;
+ cfg->hdr.total_len = cpu_to_le16(t_len);
+ cfg->hdr.driver_handler = cpu_to_le32(drv_handler);
+ wilc->cfg_seq_no = cfg->hdr.seq_no;
+
+ if (!wilc_wlan_txq_add_cfg_pkt(vif, (u8 *)&cfg->hdr, t_len))
return -1;
return 0;
@@ -1144,7 +1140,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
ret_size = 0;
if (!wait_for_completion_timeout(&wilc->cfg_event,
- msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
+ WILC_CFG_PKTS_TIMEOUT)) {
netdev_dbg(vif->ndev, "%s: Timed Out\n", __func__);
ret_size = 0;
}
@@ -1182,7 +1178,7 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
ret_size = 0;
if (!wait_for_completion_timeout(&wilc->cfg_event,
- msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
+ WILC_CFG_PKTS_TIMEOUT)) {
netdev_dbg(vif->ndev, "%s: Timed Out\n", __func__);
ret_size = 0;
}
@@ -1252,21 +1248,22 @@ static u32 init_chip(struct net_device *dev)
ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
if (!ret) {
netdev_err(dev, "fail read reg 0x1118\n");
- return ret;
+ goto release;
}
reg |= BIT(0);
ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
if (!ret) {
netdev_err(dev, "fail write reg 0x1118\n");
- return ret;
+ goto release;
}
ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
if (!ret) {
netdev_err(dev, "fail write reg 0xc0000\n");
- return ret;
+ goto release;
}
}
+release:
release_bus(wilc, WILC_BUS_RELEASE_ONLY);
return ret;
@@ -1316,7 +1313,7 @@ int wilc_wlan_init(struct net_device *dev)
}
if (!wilc->tx_buffer)
- wilc->tx_buffer = kmalloc(LINUX_TX_SIZE, GFP_KERNEL);
+ wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
if (!wilc->tx_buffer) {
ret = -ENOBUFS;
@@ -1324,7 +1321,7 @@ int wilc_wlan_init(struct net_device *dev)
}
if (!wilc->rx_buffer)
- wilc->rx_buffer = kmalloc(LINUX_RX_SIZE, GFP_KERNEL);
+ wilc->rx_buffer = kmalloc(WILC_RX_BUFF_SIZE, GFP_KERNEL);
if (!wilc->rx_buffer) {
ret = -ENOBUFS;
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 27667131de1a..1a27f62589a2 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -14,7 +14,6 @@
* Mac eth header length
*
********************************************/
-#define DRIVER_HANDLER_SIZE 4
#define MAX_MAC_HDR_LEN 26 /* QOS_MAC_HDR_LEN */
#define SUB_MSDU_HEADER_LENGTH 14
#define SNAP_HDR_LEN 8
@@ -132,8 +131,8 @@
#define WILC_PLL_TO_SPI 2
#define ABORT_INT BIT(31)
-#define LINUX_RX_SIZE (96 * 1024)
-#define LINUX_TX_SIZE (64 * 1024)
+#define WILC_RX_BUFF_SIZE (96 * 1024)
+#define WILC_TX_BUFF_SIZE (64 * 1024)
#define MODALIAS "WILC_SPI"
#define GPIO_NUM 0x44
@@ -197,7 +196,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
/*time for expiring the completion of cfg packets*/
-#define CFG_PKTS_TIMEOUT 2000
+#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000)
#define IS_MANAGMEMENT 0x100
#define IS_MANAGMEMENT_CALLBACK 0x080
@@ -249,16 +248,30 @@ struct wilc_hif_func {
void (*disable_interrupt)(struct wilc *nic);
};
-#define MAX_CFG_FRAME_SIZE 1468
+#define WILC_MAX_CFG_FRAME_SIZE 1468
+
+struct tx_complete_data {
+ int size;
+ void *buff;
+ u8 *bssid;
+ struct sk_buff *skb;
+};
+
+struct wilc_cfg_cmd_hdr {
+ u8 cmd_type;
+ u8 seq_no;
+ __le16 total_len;
+ __le32 driver_handler;
+};
struct wilc_cfg_frame {
- u8 wid_header[8];
- u8 frame[MAX_CFG_FRAME_SIZE];
+ struct wilc_cfg_cmd_hdr hdr;
+ u8 frame[WILC_MAX_CFG_FRAME_SIZE];
};
struct wilc_cfg_rsp {
- int type;
- u32 seq_no;
+ u8 type;
+ u8 seq_no;
};
struct wilc;
@@ -269,7 +282,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
int wilc_wlan_start(struct wilc *wilc);
int wilc_wlan_stop(struct wilc *wilc);
int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
- u32 buffer_size, wilc_tx_complete_func_t func);
+ u32 buffer_size,
+ void (*tx_complete_fn)(void *, int));
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
void wilc_handle_isr(struct wilc *wilc);
void wilc_wlan_cleanup(struct net_device *dev);
@@ -280,7 +294,7 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
u32 buffer_size);
int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
- u32 buffer_size, wilc_tx_complete_func_t func);
+ u32 buffer_size, void (*func)(void *, int));
void wilc_chip_sleep_manually(struct wilc *wilc);
void wilc_enable_tcp_ack_filter(struct wilc_vif *vif, bool value);
@@ -294,4 +308,6 @@ void chip_allow_sleep(struct wilc *wilc);
void chip_wakeup(struct wilc *wilc);
int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
u32 count, u32 drv);
+int wilc_wlan_init(struct net_device *dev);
+u32 wilc_get_chipid(struct wilc *wilc, bool update);
#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index 8390766358da..9dc5de4eb08d 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -54,7 +54,7 @@ static int wilc_wlan_cfg_set_byte(u8 *frame, u32 offset, u16 id, u8 val8)
{
u8 *buf;
- if ((offset + 4) >= MAX_CFG_FRAME_SIZE)
+ if ((offset + 4) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
buf = &frame[offset];
@@ -71,7 +71,7 @@ static int wilc_wlan_cfg_set_hword(u8 *frame, u32 offset, u16 id, u16 val16)
{
u8 *buf;
- if ((offset + 5) >= MAX_CFG_FRAME_SIZE)
+ if ((offset + 5) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
buf = &frame[offset];
@@ -90,7 +90,7 @@ static int wilc_wlan_cfg_set_word(u8 *frame, u32 offset, u16 id, u32 val32)
{
u8 *buf;
- if ((offset + 7) >= MAX_CFG_FRAME_SIZE)
+ if ((offset + 7) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
buf = &frame[offset];
@@ -112,7 +112,7 @@ static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str,
{
u8 *buf;
- if ((offset + size + 4) >= MAX_CFG_FRAME_SIZE)
+ if ((offset + size + 4) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
buf = &frame[offset];
@@ -134,7 +134,7 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
u32 i;
u8 checksum = 0;
- if ((offset + size + 5) >= MAX_CFG_FRAME_SIZE)
+ if ((offset + size + 5) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
buf = &frame[offset];
@@ -168,7 +168,7 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
while (size > 0) {
i = 0;
- wid = info[0] | (info[1] << 8);
+ wid = get_unaligned_le16(info);
switch (GET_WID_TYPE(wid)) {
case WID_CHAR:
@@ -187,12 +187,13 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
case WID_SHORT:
do {
- if (wl->cfg.hw[i].id == WID_NIL)
+ struct wilc_cfg_hword *hw = &wl->cfg.hw[i];
+
+ if (hw->id == WID_NIL)
break;
- if (wl->cfg.hw[i].id == wid) {
- wl->cfg.hw[i].val = (info[4] |
- (info[5] << 8));
+ if (hw->id == wid) {
+ hw->val = get_unaligned_le16(&info[4]);
break;
}
i++;
@@ -202,14 +203,13 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
case WID_INT:
do {
- if (wl->cfg.w[i].id == WID_NIL)
+ struct wilc_cfg_word *w = &wl->cfg.w[i];
+
+ if (w->id == WID_NIL)
break;
- if (wl->cfg.w[i].id == wid) {
- wl->cfg.w[i].val = (info[4] |
- (info[5] << 8) |
- (info[6] << 16) |
- (info[7] << 24));
+ if (w->id == wid) {
+ w->val = get_unaligned_le32(&info[4]);
break;
}
i++;
@@ -244,7 +244,7 @@ static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info)
{
u32 wid, len;
- wid = info[0] | (info[1] << 8);
+ wid = get_unaligned_le16(info);
len = info[2];
@@ -309,7 +309,7 @@ int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id)
{
u8 *buf;
- if ((offset + 2) >= MAX_CFG_FRAME_SIZE)
+ if ((offset + 2) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
buf = &frame[offset];
@@ -371,8 +371,7 @@ int wilc_wlan_cfg_get_wid_value(struct wilc *wl, u16 wid, u8 *buffer,
break;
if (id == wid) {
- u32 size = (wl->cfg.s[i].str[0] |
- (wl->cfg.s[i].str[1] << 8));
+ u16 size = get_unaligned_le16(wl->cfg.s[i].str);
if (buffer_size >= size) {
memcpy(buffer, &wl->cfg.s[i].str[2],
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index e2310d860291..b15de36e32e0 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -11,44 +11,9 @@
/********************************************
*
- * Host Interface Defines
- *
- ********************************************/
-
-enum {
- WILC_HIF_SDIO = 0,
- WILC_HIF_SPI = BIT(0)
-};
-
-/********************************************
- *
- * Wlan Interface Defines
- *
- ********************************************/
-
-enum {
- WILC_MAC_STATUS_INIT = -1,
- WILC_MAC_STATUS_DISCONNECTED = 0,
- WILC_MAC_STATUS_CONNECTED = 1
-};
-
-struct tx_complete_data {
- int size;
- void *buff;
- u8 *bssid;
- struct sk_buff *skb;
-};
-
-typedef void (*wilc_tx_complete_func_t)(void *, int);
-
-/********************************************
- *
* Wlan Configuration ID
*
********************************************/
-#define WILC_MULTICAST_TABLE_SIZE 8
-#define MAX_SSID_LEN 33
-#define MAX_RATES_SUPPORTED 12
enum bss_types {
WILC_FW_BSS_TYPE_INFRA = 0,
@@ -689,7 +654,6 @@ enum {
WID_TX_POWER_LEVEL_11N = 0x00B1,
/* Custom Character WID list */
- WID_PC_TEST_MODE = 0x00C8,
/* SCAN Complete notification WID*/
WID_SCAN_COMPLETE = 0x00C9,
@@ -836,8 +800,4 @@ enum {
WID_MAX = 0xFFFF
};
-struct wilc;
-int wilc_wlan_init(struct net_device *dev);
-u32 wilc_get_chipid(struct wilc *wilc, bool update);
-
#endif
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 426d4efbabc3..97238018b315 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -4,7 +4,7 @@ config PRISM2_USB
select WIRELESS_EXT
select WEXT_PRIV
default n
- ---help---
+ help
This is the wlan-ng prism 2.5/3 USB driver for a wide range of
old USB wireless devices.
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index e5d7def1f366..8a862f718d5c 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -70,7 +70,8 @@ static int prism2_result2err(int prism2_result)
return err;
}
-static int prism2_domibset_uint32(struct wlandevice *wlandev, u32 did, u32 data)
+static int prism2_domibset_uint32(struct wlandevice *wlandev,
+ u32 did, u32 data)
{
struct p80211msg_dot11req_mibset msg;
struct p80211item_uint32 *mibitem =
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index bb572b7fdfee..94800c007162 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -556,10 +556,9 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
/* Allocate buffer space for chunks */
for (i = 0; i < *ccnt; i++) {
clist[i].data = kzalloc(clist[i].len, GFP_KERNEL);
- if (!clist[i].data) {
- pr_err("failed to allocate image space, exiting.\n");
+ if (!clist[i].data)
return 1;
- }
+
pr_debug("chunk[%d]: addr=0x%06x len=%d\n",
i, clist[i].addr, clist[i].len);
}
diff --git a/drivers/staging/xgifb/Kconfig b/drivers/staging/xgifb/Kconfig
deleted file mode 100644
index bb0ca5974ea0..000000000000
--- a/drivers/staging/xgifb/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config FB_XGI
- tristate "XGI display support"
- depends on FB && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This driver supports notebooks with XGI Z7,Z9,Z11 PCI chips.
- Say Y if you have such a graphics card.
- To compile this driver as a module, choose M here: the
- module will be called xgifb.ko
diff --git a/drivers/staging/xgifb/Makefile b/drivers/staging/xgifb/Makefile
deleted file mode 100644
index 964a843c4521..000000000000
--- a/drivers/staging/xgifb/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_FB_XGI) += xgifb.o
-
-xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o
-
diff --git a/drivers/staging/xgifb/TODO b/drivers/staging/xgifb/TODO
deleted file mode 100644
index 7eb99140a399..000000000000
--- a/drivers/staging/xgifb/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-This drivers still needs a lot of work. I can list all cleanups to do but it's
-going to be long. So, I'm writing "cleanups" and not the list.
-
-Arnaud
-
-TODO:
-- clean ups
-- sort out dup ids with SiS driver
-- remove useless/wrong/unused code...
-- get rid of non-linux related stuff
-
-Please send patches to:
-Arnaud Patard <arnaud.patard@rtp-net.org>
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
deleted file mode 100644
index e19a8291cb2a..000000000000
--- a/drivers/staging/xgifb/XGI_main.h
+++ /dev/null
@@ -1,365 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XGIFB_MAIN
-#define _XGIFB_MAIN
-/* ------------------- Constant Definitions ------------------------- */
-#include "XGIfb.h"
-#include "vb_def.h"
-
-#define PCI_DEVICE_ID_XGI_42 0x042
-#define PCI_DEVICE_ID_XGI_27 0x027
-
-static const struct pci_device_id xgifb_pci_table[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_20)},
- {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_27)},
- {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_40)},
- {PCI_DEVICE(PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XGI_42)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
-
-#define IND_XGI_SCRATCH_REG_CR30 0x30 /* CRs */
-#define IND_XGI_SCRATCH_REG_CR31 0x31
-#define IND_XGI_SCRATCH_REG_CR32 0x32
-#define IND_XGI_SCRATCH_REG_CR33 0x33
-#define IND_XGI_LCD_PANEL 0x36
-#define IND_XGI_SCRATCH_REG_CR37 0x37
-
-#define XGI_DRAM_SIZE_MASK 0xF0 /*SR14 */
-#define XGI_DRAM_SIZE_1MB 0x00
-#define XGI_DRAM_SIZE_2MB 0x01
-#define XGI_DRAM_SIZE_4MB 0x02
-#define XGI_DRAM_SIZE_8MB 0x03
-#define XGI_DRAM_SIZE_16MB 0x04
-#define XGI_DRAM_SIZE_32MB 0x05
-#define XGI_DRAM_SIZE_64MB 0x06
-#define XGI_DRAM_SIZE_128MB 0x07
-#define XGI_DRAM_SIZE_256MB 0x08
-
-/* ------------------- Global Variables ----------------------------- */
-
-/* display status */
-static int XGIfb_crt1off;
-static int XGIfb_forcecrt1 = -1;
-
-/* global flags */
-static int XGIfb_tvmode;
-static int enable_dstn;
-static int XGIfb_ypan = -1;
-
-/* TW: CRT2 type (for overriding autodetection) */
-static int XGIfb_crt2type = -1;
-/* PR: Tv plug type (for overriding autodetection) */
-static int XGIfb_tvplug = -1;
-
-#define MD_XGI315 1
-
-/* mode table */
-static const struct _XGIbios_mode {
- u8 mode_no;
- u16 vesa_mode_no_1; /* "XGI defined" VESA mode number */
- u16 vesa_mode_no_2; /* Real VESA mode numbers */
- u16 xres;
- u16 yres;
- u16 bpp;
- u8 chipset;
-} XGIbios_mode[] = {
- { 0x56, 0x0000, 0x0000, 320, 240, 16, MD_XGI315 },
- { 0x5A, 0x0000, 0x0000, 320, 480, 8, MD_XGI315 },
- { 0x5B, 0x0000, 0x0000, 320, 480, 16, MD_XGI315 },
- { 0x2E, 0x0101, 0x0101, 640, 480, 8, MD_XGI315 },
- { 0x44, 0x0111, 0x0111, 640, 480, 16, MD_XGI315 },
- { 0x62, 0x013a, 0x0112, 640, 480, 32, MD_XGI315 },
- { 0x31, 0x0000, 0x0000, 720, 480, 8, MD_XGI315 },
- { 0x33, 0x0000, 0x0000, 720, 480, 16, MD_XGI315 },
- { 0x35, 0x0000, 0x0000, 720, 480, 32, MD_XGI315 },
- { 0x32, 0x0000, 0x0000, 720, 576, 8, MD_XGI315 },
- { 0x34, 0x0000, 0x0000, 720, 576, 16, MD_XGI315 },
- { 0x36, 0x0000, 0x0000, 720, 576, 32, MD_XGI315 },
- { 0x36, 0x0000, 0x0000, 720, 576, 32, MD_XGI315 },
- { 0x70, 0x0000, 0x0000, 800, 480, 8, MD_XGI315 },
- { 0x7a, 0x0000, 0x0000, 800, 480, 16, MD_XGI315 },
- { 0x76, 0x0000, 0x0000, 800, 480, 32, MD_XGI315 },
- { 0x30, 0x0103, 0x0103, 800, 600, 8, MD_XGI315 },
-#define DEFAULT_MODE 17 /* index for 800x600x16 */
- { 0x47, 0x0114, 0x0114, 800, 600, 16, MD_XGI315 },
- { 0x63, 0x013b, 0x0115, 800, 600, 32, MD_XGI315 },
- { 0x71, 0x0000, 0x0000, 1024, 576, 8, MD_XGI315 },
- { 0x74, 0x0000, 0x0000, 1024, 576, 16, MD_XGI315 },
- { 0x77, 0x0000, 0x0000, 1024, 576, 32, MD_XGI315 },
- { 0x77, 0x0000, 0x0000, 1024, 576, 32, MD_XGI315 },
- { 0x20, 0x0000, 0x0000, 1024, 600, 8, },
- { 0x21, 0x0000, 0x0000, 1024, 600, 16, },
- { 0x22, 0x0000, 0x0000, 1024, 600, 32, },
- { 0x38, 0x0105, 0x0105, 1024, 768, 8, MD_XGI315 },
- { 0x4A, 0x0117, 0x0117, 1024, 768, 16, MD_XGI315 },
- { 0x64, 0x013c, 0x0118, 1024, 768, 32, MD_XGI315 },
- { 0x64, 0x013c, 0x0118, 1024, 768, 32, MD_XGI315 },
- { 0x23, 0x0000, 0x0000, 1152, 768, 8, },
- { 0x24, 0x0000, 0x0000, 1152, 768, 16, },
- { 0x25, 0x0000, 0x0000, 1152, 768, 32, },
- { 0x79, 0x0000, 0x0000, 1280, 720, 8, MD_XGI315 },
- { 0x75, 0x0000, 0x0000, 1280, 720, 16, MD_XGI315 },
- { 0x78, 0x0000, 0x0000, 1280, 720, 32, MD_XGI315 },
- { 0x23, 0x0000, 0x0000, 1280, 768, 8, MD_XGI315 },
- { 0x24, 0x0000, 0x0000, 1280, 768, 16, MD_XGI315 },
- { 0x25, 0x0000, 0x0000, 1280, 768, 32, MD_XGI315 },
- { 0x7C, 0x0000, 0x0000, 1280, 960, 8, MD_XGI315 },
- { 0x7D, 0x0000, 0x0000, 1280, 960, 16, MD_XGI315 },
- { 0x7E, 0x0000, 0x0000, 1280, 960, 32, MD_XGI315 },
- { 0x3A, 0x0107, 0x0107, 1280, 1024, 8, MD_XGI315 },
- { 0x4D, 0x011a, 0x011a, 1280, 1024, 16, MD_XGI315 },
- { 0x65, 0x013d, 0x011b, 1280, 1024, 32, MD_XGI315 },
- { 0x26, 0x0000, 0x0000, 1400, 1050, 8, MD_XGI315 },
- { 0x27, 0x0000, 0x0000, 1400, 1050, 16, MD_XGI315 },
- { 0x28, 0x0000, 0x0000, 1400, 1050, 32, MD_XGI315 },
- { 0x3C, 0x0130, 0x011c, 1600, 1200, 8, MD_XGI315 },
- { 0x3D, 0x0131, 0x011e, 1600, 1200, 16, MD_XGI315 },
- { 0x66, 0x013e, 0x011f, 1600, 1200, 32, MD_XGI315 },
- { 0x68, 0x013f, 0x0000, 1920, 1440, 8, MD_XGI315 },
- { 0x69, 0x0140, 0x0000, 1920, 1440, 16, MD_XGI315 },
- { 0x6B, 0x0141, 0x0000, 1920, 1440, 32, MD_XGI315 },
- { 0x6c, 0x0000, 0x0000, 2048, 1536, 8, MD_XGI315 },
- { 0x6d, 0x0000, 0x0000, 2048, 1536, 16, MD_XGI315 },
- { 0x6e, 0x0000, 0x0000, 2048, 1536, 32, MD_XGI315 },
- { 0 },
-};
-
-static const unsigned short XGI310paneltype[] = {
- LCD_UNKNOWN, LCD_800x600, LCD_1024x768, LCD_1280x1024,
- LCD_640x480, LCD_1024x600, LCD_1152x864, LCD_1280x960,
- LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200,
- LCD_1024x768, LCD_1024x768, LCD_1024x768};
-
-static const struct _XGI_crt2type {
- char name[10];
- int type_no;
- int tvplug_no;
-} XGI_crt2type[] = {
- {"NONE", 0, -1},
- {"LCD", XGIFB_DISP_LCD, -1},
- {"TV", XGIFB_DISP_TV, -1},
- {"VGA", XGIFB_DISP_CRT, -1},
- {"SVIDEO", XGIFB_DISP_TV, TVPLUG_SVIDEO},
- {"COMPOSITE", XGIFB_DISP_TV, TVPLUG_COMPOSITE},
- {"SCART", XGIFB_DISP_TV, TVPLUG_SCART},
- {"none", 0, -1},
- {"lcd", XGIFB_DISP_LCD, -1},
- {"tv", XGIFB_DISP_TV, -1},
- {"vga", XGIFB_DISP_CRT, -1},
- {"svideo", XGIFB_DISP_TV, TVPLUG_SVIDEO},
- {"composite", XGIFB_DISP_TV, TVPLUG_COMPOSITE},
- {"scart", XGIFB_DISP_TV, TVPLUG_SCART},
- {"\0", -1, -1}
-};
-
-/* TV standard */
-static const struct _XGI_tvtype {
- char name[6];
- int type_no;
-} XGI_tvtype[] = {
- {"PAL", 1},
- {"NTSC", 2},
- {"pal", 1},
- {"ntsc", 2},
- {"\0", -1}
-};
-
-static const struct _XGI_vrate {
- u16 idx;
- u16 xres;
- u16 yres;
- u16 refresh;
-} XGIfb_vrate[] = {
- {1, 640, 480, 60}, {2, 640, 480, 72},
- {3, 640, 480, 75}, {4, 640, 480, 85},
-
- {5, 640, 480, 100}, {6, 640, 480, 120},
- {7, 640, 480, 160}, {8, 640, 480, 200},
-
- {1, 720, 480, 60},
- {1, 720, 576, 58},
- {1, 800, 480, 60}, {2, 800, 480, 75}, {3, 800, 480, 85},
- {1, 800, 600, 60}, {2, 800, 600, 72}, {3, 800, 600, 75},
- {4, 800, 600, 85}, {5, 800, 600, 100},
- {6, 800, 600, 120}, {7, 800, 600, 160},
-
- {1, 1024, 768, 60}, {2, 1024, 768, 70}, {3, 1024, 768, 75},
- {4, 1024, 768, 85}, {5, 1024, 768, 100}, {6, 1024, 768, 120},
- {1, 1024, 576, 60}, {2, 1024, 576, 75}, {3, 1024, 576, 85},
- {1, 1024, 600, 60},
- {1, 1152, 768, 60},
- {1, 1280, 720, 60}, {2, 1280, 720, 75}, {3, 1280, 720, 85},
- {1, 1280, 768, 60},
- {1, 1280, 1024, 60}, {2, 1280, 1024, 75}, {3, 1280, 1024, 85},
- {1, 1280, 960, 70},
- {1, 1400, 1050, 60},
- {1, 1600, 1200, 60}, {2, 1600, 1200, 65},
- {3, 1600, 1200, 70}, {4, 1600, 1200, 75},
-
- {5, 1600, 1200, 85}, {6, 1600, 1200, 100},
- {7, 1600, 1200, 120},
-
- {1, 1920, 1440, 60}, {2, 1920, 1440, 65},
- {3, 1920, 1440, 70}, {4, 1920, 1440, 75},
-
- {5, 1920, 1440, 85}, {6, 1920, 1440, 100},
- {1, 2048, 1536, 60}, {2, 2048, 1536, 65},
- {3, 2048, 1536, 70}, {4, 2048, 1536, 75},
-
- {5, 2048, 1536, 85},
- {0, 0, 0, 0}
-};
-
-static const struct _XGI_TV_filter {
- u8 filter[9][4];
-} XGI_TV_filter[] = {
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_0 */
- {0x00, 0xE0, 0x10, 0x60},
- {0x00, 0xEE, 0x10, 0x44},
- {0x00, 0xF4, 0x10, 0x38},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xFC, 0xFB, 0x14, 0x2A},
- {0x00, 0x00, 0x10, 0x20},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_1 */
- {0x00, 0xE0, 0x10, 0x60},
- {0x00, 0xEE, 0x10, 0x44},
- {0x00, 0xF4, 0x10, 0x38},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xFC, 0xFB, 0x14, 0x2A},
- {0x00, 0x00, 0x10, 0x20},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_2 */
- {0xF5, 0xEE, 0x1B, 0x44},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xEB, 0x04, 0x25, 0x18},
- {0xF1, 0x05, 0x1F, 0x16},
- {0xF6, 0x06, 0x1A, 0x14},
- {0xFA, 0x06, 0x16, 0x14},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_3 */
- {0xF1, 0x04, 0x1F, 0x18},
- {0xEE, 0x0D, 0x22, 0x06},
- {0xF7, 0x06, 0x19, 0x14},
- {0xF4, 0x0B, 0x1C, 0x0A},
- {0xFA, 0x07, 0x16, 0x12},
- {0xF9, 0x0A, 0x17, 0x0C},
- {0x00, 0x07, 0x10, 0x12},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_4 */
- {0x00, 0xE0, 0x10, 0x60},
- {0x00, 0xEE, 0x10, 0x44},
- {0x00, 0xF4, 0x10, 0x38},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xFC, 0xFB, 0x14, 0x2A},
- {0x00, 0x00, 0x10, 0x20},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_5 */
- {0xF5, 0xEE, 0x1B, 0x44},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xEB, 0x04, 0x25, 0x18},
- {0xF1, 0x05, 0x1F, 0x16},
- {0xF6, 0x06, 0x1A, 0x14},
- {0xFA, 0x06, 0x16, 0x14},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_6 */
- {0xEB, 0x04, 0x25, 0x18},
- {0xE7, 0x0E, 0x29, 0x04},
- {0xEE, 0x0C, 0x22, 0x08},
- {0xF6, 0x0B, 0x1A, 0x0A},
- {0xF9, 0x0A, 0x17, 0x0C},
- {0xFC, 0x0A, 0x14, 0x0C},
- {0x00, 0x08, 0x10, 0x10},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* NTSCFilter_7 */
- {0xEC, 0x02, 0x24, 0x1C},
- {0xF2, 0x04, 0x1E, 0x18},
- {0xEB, 0x15, 0x25, 0xF6},
- {0xF4, 0x10, 0x1C, 0x00},
- {0xF8, 0x0F, 0x18, 0x02},
- {0x00, 0x04, 0x10, 0x18},
- {0x01, 0x06, 0x0F, 0x14},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_0 */
- {0x00, 0xE0, 0x10, 0x60},
- {0x00, 0xEE, 0x10, 0x44},
- {0x00, 0xF4, 0x10, 0x38},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xFC, 0xFB, 0x14, 0x2A},
- {0x00, 0x00, 0x10, 0x20},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_1 */
- {0x00, 0xE0, 0x10, 0x60},
- {0x00, 0xEE, 0x10, 0x44},
- {0x00, 0xF4, 0x10, 0x38},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xFC, 0xFB, 0x14, 0x2A},
- {0x00, 0x00, 0x10, 0x20},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_2 */
- {0xF5, 0xEE, 0x1B, 0x44},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xF1, 0xF7, 0x01, 0x32},
- {0xF5, 0xFB, 0x1B, 0x2A},
- {0xF9, 0xFF, 0x17, 0x22},
- {0xFB, 0x01, 0x15, 0x1E},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_3 */
- {0xF5, 0xFB, 0x1B, 0x2A},
- {0xEE, 0xFE, 0x22, 0x24},
- {0xF3, 0x00, 0x1D, 0x20},
- {0xF9, 0x03, 0x17, 0x1A},
- {0xFB, 0x02, 0x14, 0x1E},
- {0xFB, 0x04, 0x15, 0x18},
- {0x00, 0x06, 0x10, 0x14},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_4 */
- {0x00, 0xE0, 0x10, 0x60},
- {0x00, 0xEE, 0x10, 0x44},
- {0x00, 0xF4, 0x10, 0x38},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xFC, 0xFB, 0x14, 0x2A},
- {0x00, 0x00, 0x10, 0x20},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_5 */
- {0xF5, 0xEE, 0x1B, 0x44},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xF1, 0xF7, 0x1F, 0x32},
- {0xF5, 0xFB, 0x1B, 0x2A},
- {0xF9, 0xFF, 0x17, 0x22},
- {0xFB, 0x01, 0x15, 0x1E},
- {0x00, 0x04, 0x10, 0x18},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_6 */
- {0xF5, 0xEE, 0x1B, 0x2A},
- {0xEE, 0xFE, 0x22, 0x24},
- {0xF3, 0x00, 0x1D, 0x20},
- {0xF9, 0x03, 0x17, 0x1A},
- {0xFB, 0x02, 0x14, 0x1E},
- {0xFB, 0x04, 0x15, 0x18},
- {0x00, 0x06, 0x10, 0x14},
- {0xFF, 0xFF, 0xFF, 0xFF} } },
- { { {0x00, 0x00, 0x00, 0x40}, /* PALFilter_7 */
- {0xF5, 0xEE, 0x1B, 0x44},
- {0xF8, 0xF4, 0x18, 0x38},
- {0xFC, 0xFB, 0x14, 0x2A},
- {0xEB, 0x05, 0x25, 0x16},
- {0xF1, 0x05, 0x1F, 0x16},
- {0xFA, 0x07, 0x16, 0x12},
- {0x00, 0x07, 0x10, 0x12},
- {0xFF, 0xFF, 0xFF, 0xFF} } }
-};
-
-static int filter = -1;
-
-#endif
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
deleted file mode 100644
index 217c6bb82c0d..000000000000
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ /dev/null
@@ -1,2084 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * XG20, XG21, XG40, XG42 frame buffer device
- * for Linux kernels 2.5.x, 2.6.x
- * Base on TW's sis fbdev code.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/sizes.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "XGI_main.h"
-#include "vb_init.h"
-#include "vb_util.h"
-#include "vb_setmode.h"
-
-#define Index_CR_GPIO_Reg1 0x48
-#define Index_CR_GPIO_Reg3 0x4a
-
-#define GPIOG_EN BIT(6)
-#define GPIOG_READ BIT(1)
-
-static char *forcecrt2type;
-static char *mode;
-static int vesa = -1;
-static unsigned int refresh_rate;
-
-/* -------------------- Macro definitions ---------------------------- */
-
-#ifdef DEBUG
-static void dumpVGAReg(struct xgifb_video_info *xgifb_info)
-{
- struct vb_device_info *vb = &xgifb_info->dev_info;
- u8 i, reg;
-
- xgifb_reg_set(vb->P3c4, 0x05, 0x86);
-
- for (i = 0; i < 0x4f; i++) {
- reg = xgifb_reg_get(vb->P3c4, i);
- pr_debug("o 3c4 %x\n", i);
- pr_debug("i 3c5 => %x\n", reg);
- }
-
- for (i = 0; i < 0xF0; i++) {
- reg = xgifb_reg_get(vb->P3d4, i);
- pr_debug("o 3d4 %x\n", i);
- pr_debug("i 3d5 => %x\n", reg);
- }
-}
-#else
-static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info)
-{
-}
-#endif
-
-/* --------------- Hardware Access Routines -------------------------- */
-
-static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno)
-{
- unsigned short ModeNo = modeno;
- unsigned short ModeIdIndex = 0, ClockIndex = 0;
- unsigned short RefreshRateTableIndex = 0;
-
- InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
-
- XGI_SearchModeID(ModeNo, &ModeIdIndex);
-
- RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, XGI_Pr);
-
- ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
-
- return XGI_VCLKData[ClockIndex].CLOCK * 1000;
-}
-
-static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno, u32 *left_margin,
- u32 *right_margin, u32 *upper_margin,
- u32 *lower_margin, u32 *hsync_len,
- u32 *vsync_len, u32 *sync, u32 *vmode)
-{
- unsigned short ModeNo = modeno;
- unsigned short ModeIdIndex, index = 0;
- unsigned short RefreshRateTableIndex = 0;
-
- unsigned short VRE, VBE, VRS, VDE;
- unsigned short HRE, HBE, HRS, HDE;
- unsigned char sr_data, cr_data, cr_data2;
- int B, C, D, F, temp, j;
-
- InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
- if (!XGI_SearchModeID(ModeNo, &ModeIdIndex))
- return 0;
- RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, XGI_Pr);
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
-
- sr_data = XGI_CRT1Table[index].CR[5];
-
- HDE = XGI330_RefIndex[RefreshRateTableIndex].XRes >> 3;
-
- cr_data = XGI_CRT1Table[index].CR[3];
-
- /* Horizontal retrace (=sync) start */
- HRS = (cr_data & 0xff) | ((unsigned short)(sr_data & 0xC0) << 2);
- F = HRS - HDE - 3;
-
- sr_data = XGI_CRT1Table[index].CR[6];
-
- cr_data = XGI_CRT1Table[index].CR[2];
-
- cr_data2 = XGI_CRT1Table[index].CR[4];
-
- /* Horizontal blank end */
- HBE = (cr_data & 0x1f) | ((unsigned short)(cr_data2 & 0x80) >> 2)
- | ((unsigned short)(sr_data & 0x03) << 6);
-
- /* Horizontal retrace (=sync) end */
- HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3);
-
- temp = HBE - ((HDE - 1) & 255);
- B = (temp > 0) ? temp : (temp + 256);
-
- temp = HRE - ((HDE + F + 3) & 63);
- C = (temp > 0) ? temp : (temp + 64);
-
- D = B - F - C;
-
- *left_margin = D * 8;
- *right_margin = F * 8;
- *hsync_len = C * 8;
-
- sr_data = XGI_CRT1Table[index].CR[14];
-
- cr_data2 = XGI_CRT1Table[index].CR[9];
-
- VDE = XGI330_RefIndex[RefreshRateTableIndex].YRes;
-
- cr_data = XGI_CRT1Table[index].CR[10];
-
- /* Vertical retrace (=sync) start */
- VRS = (cr_data & 0xff) | ((unsigned short)(cr_data2 & 0x04) << 6)
- | ((unsigned short)(cr_data2 & 0x80) << 2)
- | ((unsigned short)(sr_data & 0x08) << 7);
- F = VRS + 1 - VDE;
-
- cr_data = XGI_CRT1Table[index].CR[13];
-
- /* Vertical blank end */
- VBE = (cr_data & 0xff) | ((unsigned short)(sr_data & 0x10) << 4);
- temp = VBE - ((VDE - 1) & 511);
- B = (temp > 0) ? temp : (temp + 512);
-
- cr_data = XGI_CRT1Table[index].CR[11];
-
- /* Vertical retrace (=sync) end */
- VRE = (cr_data & 0x0f) | ((sr_data & 0x20) >> 1);
- temp = VRE - ((VDE + F - 1) & 31);
- C = (temp > 0) ? temp : (temp + 32);
-
- D = B - F - C;
-
- *upper_margin = D;
- *lower_margin = F;
- *vsync_len = C;
-
- if (XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x8000)
- *sync &= ~FB_SYNC_VERT_HIGH_ACT;
- else
- *sync |= FB_SYNC_VERT_HIGH_ACT;
-
- if (XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x4000)
- *sync &= ~FB_SYNC_HOR_HIGH_ACT;
- else
- *sync |= FB_SYNC_HOR_HIGH_ACT;
-
- *vmode = FB_VMODE_NONINTERLACED;
- if (XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x0080) {
- *vmode = FB_VMODE_INTERLACED;
- } else {
- j = 0;
- while (XGI330_EModeIDTable[j].Ext_ModeID != 0xff) {
- if (XGI330_EModeIDTable[j].Ext_ModeID ==
- XGI330_RefIndex[RefreshRateTableIndex].ModeID) {
- if (XGI330_EModeIDTable[j].Ext_ModeFlag &
- DoubleScanMode) {
- *vmode = FB_VMODE_DOUBLE;
- }
- break;
- }
- j++;
- }
- }
-
- return 1;
-}
-
-void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
-{
- XGI_Pr->P3c4 = BaseAddr + 0x14;
- XGI_Pr->P3d4 = BaseAddr + 0x24;
- XGI_Pr->P3c0 = BaseAddr + 0x10;
- XGI_Pr->P3ce = BaseAddr + 0x1e;
- XGI_Pr->P3c2 = BaseAddr + 0x12;
- XGI_Pr->P3cc = BaseAddr + 0x1c;
- XGI_Pr->P3ca = BaseAddr + 0x1a;
- XGI_Pr->P3c6 = BaseAddr + 0x16;
- XGI_Pr->P3c7 = BaseAddr + 0x17;
- XGI_Pr->P3c8 = BaseAddr + 0x18;
- XGI_Pr->P3c9 = BaseAddr + 0x19;
- XGI_Pr->P3da = BaseAddr + 0x2A;
- XGI_Pr->Part0Port = BaseAddr + XGI_CRT2_PORT_00;
- /* Digital video interface registers (LCD) */
- XGI_Pr->Part1Port = BaseAddr + SIS_CRT2_PORT_04;
- /* 301 TV Encoder registers */
- XGI_Pr->Part2Port = BaseAddr + SIS_CRT2_PORT_10;
- /* 301 Macrovision registers */
- XGI_Pr->Part3Port = BaseAddr + SIS_CRT2_PORT_12;
- /* 301 VGA2 (and LCD) registers */
- XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14;
- /* 301 palette address port registers */
- XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2;
-}
-
-/* ------------------ Internal helper routines ----------------- */
-
-static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
-{
- int i = 0;
-
- while ((XGIbios_mode[i].mode_no != 0) &&
- (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
- if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) &&
- (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) &&
- (XGIbios_mode[i].bpp == 8)) {
- return i;
- }
- i++;
- }
-
- return -1;
-}
-
-static void XGIfb_search_mode(struct xgifb_video_info *xgifb_info,
- const char *name)
-{
- unsigned int xres;
- unsigned int yres;
- unsigned int bpp;
- int i;
-
- if (sscanf(name, "%ux%ux%u", &xres, &yres, &bpp) != 3)
- goto invalid_mode;
-
- if (bpp == 24)
- bpp = 32; /* That's for people who mix up color and fb depth. */
-
- for (i = 0; XGIbios_mode[i].mode_no != 0; i++)
- if (XGIbios_mode[i].xres == xres &&
- XGIbios_mode[i].yres == yres &&
- XGIbios_mode[i].bpp == bpp) {
- xgifb_info->mode_idx = i;
- return;
- }
-invalid_mode:
- pr_info("Invalid mode '%s'\n", name);
-}
-
-static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info,
- unsigned int vesamode)
-{
- int i = 0;
-
- if (vesamode == 0)
- goto invalid;
-
- vesamode &= 0x1dff; /* Clean VESA mode number from other flags */
-
- while (XGIbios_mode[i].mode_no != 0) {
- if ((XGIbios_mode[i].vesa_mode_no_1 == vesamode) ||
- (XGIbios_mode[i].vesa_mode_no_2 == vesamode)) {
- xgifb_info->mode_idx = i;
- return;
- }
- i++;
- }
-
-invalid:
- pr_info("Invalid VESA mode 0x%x'\n", vesamode);
-}
-
-static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
-{
- u16 xres, yres;
- struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
- unsigned long required_mem;
-
- if (xgifb_info->chip == XG21) {
- if (xgifb_info->display2 == XGIFB_DISP_LCD) {
- xres = xgifb_info->lvds_data.LVDSHDE;
- yres = xgifb_info->lvds_data.LVDSVDE;
- if (XGIbios_mode[myindex].xres > xres)
- return -1;
- if (XGIbios_mode[myindex].yres > yres)
- return -1;
- if ((XGIbios_mode[myindex].xres < xres) &&
- (XGIbios_mode[myindex].yres < yres)) {
- if (XGIbios_mode[myindex].bpp > 8)
- return -1;
- }
- }
- goto check_memory;
- }
-
- /* FIXME: for now, all is valid on XG27 */
- if (xgifb_info->chip == XG27)
- goto check_memory;
-
- if (!(XGIbios_mode[myindex].chipset & MD_XGI315))
- return -1;
-
- switch (xgifb_info->display2) {
- case XGIFB_DISP_LCD:
- switch (hw_info->ulCRT2LCDType) {
- case LCD_640x480:
- xres = 640;
- yres = 480;
- break;
- case LCD_800x600:
- xres = 800;
- yres = 600;
- break;
- case LCD_1024x600:
- xres = 1024;
- yres = 600;
- break;
- case LCD_1024x768:
- xres = 1024;
- yres = 768;
- break;
- case LCD_1152x768:
- xres = 1152;
- yres = 768;
- break;
- case LCD_1280x960:
- xres = 1280;
- yres = 960;
- break;
- case LCD_1280x768:
- xres = 1280;
- yres = 768;
- break;
- case LCD_1280x1024:
- xres = 1280;
- yres = 1024;
- break;
- case LCD_1400x1050:
- xres = 1400;
- yres = 1050;
- break;
- case LCD_1600x1200:
- xres = 1600;
- yres = 1200;
- break;
- default:
- xres = 0;
- yres = 0;
- break;
- }
- if (XGIbios_mode[myindex].xres > xres)
- return -1;
- if (XGIbios_mode[myindex].yres > yres)
- return -1;
- if ((hw_info->ulExternalChip == 0x01) || /* LVDS */
- (hw_info->ulExternalChip == 0x05)) { /* LVDS+Chrontel */
- switch (XGIbios_mode[myindex].xres) {
- case 512:
- if (XGIbios_mode[myindex].yres != 512)
- return -1;
- if (hw_info->ulCRT2LCDType == LCD_1024x600)
- return -1;
- break;
- case 640:
- if ((XGIbios_mode[myindex].yres != 400) &&
- (XGIbios_mode[myindex].yres != 480))
- return -1;
- break;
- case 800:
- if (XGIbios_mode[myindex].yres != 600)
- return -1;
- break;
- case 1024:
- if ((XGIbios_mode[myindex].yres != 600) &&
- (XGIbios_mode[myindex].yres != 768))
- return -1;
- if ((XGIbios_mode[myindex].yres == 600) &&
- (hw_info->ulCRT2LCDType != LCD_1024x600))
- return -1;
- break;
- case 1152:
- if ((XGIbios_mode[myindex].yres) != 768)
- return -1;
- if (hw_info->ulCRT2LCDType != LCD_1152x768)
- return -1;
- break;
- case 1280:
- if ((XGIbios_mode[myindex].yres != 768) &&
- (XGIbios_mode[myindex].yres != 1024))
- return -1;
- if ((XGIbios_mode[myindex].yres == 768) &&
- (hw_info->ulCRT2LCDType != LCD_1280x768))
- return -1;
- break;
- case 1400:
- if (XGIbios_mode[myindex].yres != 1050)
- return -1;
- break;
- case 1600:
- if (XGIbios_mode[myindex].yres != 1200)
- return -1;
- break;
- default:
- return -1;
- }
- } else {
- switch (XGIbios_mode[myindex].xres) {
- case 512:
- if (XGIbios_mode[myindex].yres != 512)
- return -1;
- break;
- case 640:
- if ((XGIbios_mode[myindex].yres != 400) &&
- (XGIbios_mode[myindex].yres != 480))
- return -1;
- break;
- case 800:
- if (XGIbios_mode[myindex].yres != 600)
- return -1;
- break;
- case 1024:
- if (XGIbios_mode[myindex].yres != 768)
- return -1;
- break;
- case 1280:
- if ((XGIbios_mode[myindex].yres != 960) &&
- (XGIbios_mode[myindex].yres != 1024))
- return -1;
- if (XGIbios_mode[myindex].yres == 960) {
- if (hw_info->ulCRT2LCDType ==
- LCD_1400x1050)
- return -1;
- }
- break;
- case 1400:
- if (XGIbios_mode[myindex].yres != 1050)
- return -1;
- break;
- case 1600:
- if (XGIbios_mode[myindex].yres != 1200)
- return -1;
- break;
- default:
- return -1;
- }
- }
- break;
- case XGIFB_DISP_TV:
- switch (XGIbios_mode[myindex].xres) {
- case 512:
- case 640:
- case 800:
- break;
- case 720:
- if (xgifb_info->TV_type == TVMODE_NTSC) {
- if (XGIbios_mode[myindex].yres != 480)
- return -1;
- } else if (xgifb_info->TV_type == TVMODE_PAL) {
- if (XGIbios_mode[myindex].yres != 576)
- return -1;
- }
- /* LVDS/CHRONTEL does not support 720 */
- if (xgifb_info->hasVB == HASVB_LVDS_CHRONTEL ||
- xgifb_info->hasVB == HASVB_CHRONTEL) {
- return -1;
- }
- break;
- case 1024:
- if (xgifb_info->TV_type == TVMODE_NTSC) {
- if (XGIbios_mode[myindex].bpp == 32)
- return -1;
- }
- break;
- default:
- return -1;
- }
- break;
- case XGIFB_DISP_CRT:
- if (XGIbios_mode[myindex].xres > 1280)
- return -1;
- break;
- case XGIFB_DISP_NONE:
- break;
- }
-
-check_memory:
- required_mem = XGIbios_mode[myindex].xres * XGIbios_mode[myindex].yres *
- XGIbios_mode[myindex].bpp / 8;
- if (required_mem > xgifb_info->video_size)
- return -1;
- return myindex;
-}
-
-static void XGIfb_search_crt2type(const char *name)
-{
- int i = 0;
-
- if (!name)
- return;
-
- while (XGI_crt2type[i].type_no != -1) {
- if (!strcmp(name, XGI_crt2type[i].name)) {
- XGIfb_crt2type = XGI_crt2type[i].type_no;
- XGIfb_tvplug = XGI_crt2type[i].tvplug_no;
- break;
- }
- i++;
- }
- if (XGIfb_crt2type < 0)
- pr_info("Invalid CRT2 type: %s\n", name);
-}
-
-static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
- unsigned int rate)
-{
- u16 xres, yres;
- int i = 0;
-
- xres = XGIbios_mode[xgifb_info->mode_idx].xres;
- yres = XGIbios_mode[xgifb_info->mode_idx].yres;
-
- xgifb_info->rate_idx = 0;
-
- while (XGIfb_vrate[i].idx != 0 && XGIfb_vrate[i].xres <= xres) {
- /* Skip values with xres or yres less than specified */
- if ((XGIfb_vrate[i].yres != yres) ||
- (XGIfb_vrate[i].xres != xres)) {
- i++;
- continue;
- }
- if (XGIfb_vrate[i].refresh == rate) {
- xgifb_info->rate_idx = XGIfb_vrate[i].idx;
- break;
- } else if (XGIfb_vrate[i].refresh > rate) {
- if (XGIfb_vrate[i].refresh - rate <= 3) {
- pr_debug("Adjusting rate from %d up to %d\n",
- rate, XGIfb_vrate[i].refresh);
- xgifb_info->rate_idx = XGIfb_vrate[i].idx;
- xgifb_info->refresh_rate =
- XGIfb_vrate[i].refresh;
- } else if ((XGIfb_vrate[i].idx != 1) &&
- (rate - XGIfb_vrate[i - 1].refresh <= 2)) {
- pr_debug("Adjusting rate from %d down to %d\n",
- rate, XGIfb_vrate[i - 1].refresh);
- xgifb_info->rate_idx = XGIfb_vrate[i - 1].idx;
- xgifb_info->refresh_rate =
- XGIfb_vrate[i - 1].refresh;
- }
- break;
- } else if (rate - XGIfb_vrate[i].refresh <= 2) {
- pr_debug("Adjusting rate from %d down to %d\n",
- rate, XGIfb_vrate[i].refresh);
- xgifb_info->rate_idx = XGIfb_vrate[i].idx;
- break;
- }
- i++;
- }
-
- if (xgifb_info->rate_idx > 0)
- return xgifb_info->rate_idx;
- pr_info("Unsupported rate %d for %dx%d\n",
- rate, xres, yres);
- return 0;
-}
-
-static void XGIfb_search_tvstd(const char *name)
-{
- int i = 0;
-
- if (!name)
- return;
-
- while (XGI_tvtype[i].type_no != -1) {
- if (!strcmp(name, XGI_tvtype[i].name)) {
- XGIfb_tvmode = XGI_tvtype[i].type_no;
- break;
- }
- i++;
- }
-}
-
-/* ----------- FBDev related routines for all series ----------- */
-
-static void XGIfb_bpp_to_var(struct xgifb_video_info *xgifb_info,
- struct fb_var_screeninfo *var)
-{
- switch (var->bits_per_pixel) {
- case 8:
- var->red.offset = 0;
- var->green.offset = 0;
- var->blue.offset = 0;
- var->red.length = 6;
- var->green.length = 6;
- var->blue.length = 6;
- xgifb_info->video_cmap_len = 256;
- break;
- case 16:
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- xgifb_info->video_cmap_len = 16;
- break;
- case 32:
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- xgifb_info->video_cmap_len = 16;
- break;
- }
-}
-
-/* --------------------- SetMode routines ------------------------- */
-
-static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
-{
- struct vb_device_info *vb = &xgifb_info->dev_info;
- u8 cr30 = 0, cr31 = 0;
-
- cr31 = xgifb_reg_get(vb->P3d4, 0x31);
- cr31 &= ~0x60;
-
- switch (xgifb_info->display2) {
- case XGIFB_DISP_CRT:
- cr30 = SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE;
- cr31 |= SIS_DRIVER_MODE;
- break;
- case XGIFB_DISP_LCD:
- cr30 = SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE;
- cr31 |= SIS_DRIVER_MODE;
- break;
- case XGIFB_DISP_TV:
- if (xgifb_info->TV_type == TVMODE_HIVISION)
- cr30 = SIS_VB_OUTPUT_HIVISION
- | SIS_SIMULTANEOUS_VIEW_ENABLE;
- else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
- cr30 = SIS_VB_OUTPUT_SVIDEO
- | SIS_SIMULTANEOUS_VIEW_ENABLE;
- else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
- cr30 = SIS_VB_OUTPUT_COMPOSITE
- | SIS_SIMULTANEOUS_VIEW_ENABLE;
- else if (xgifb_info->TV_plug == TVPLUG_SCART)
- cr30 = SIS_VB_OUTPUT_SCART
- | SIS_SIMULTANEOUS_VIEW_ENABLE;
- cr31 |= SIS_DRIVER_MODE;
-
- if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
- cr31 |= 0x01;
- else
- cr31 &= ~0x01;
- break;
- default: /* disable CRT2 */
- cr30 = 0x00;
- cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE);
- }
-
- xgifb_reg_set(vb->P3d4, IND_XGI_SCRATCH_REG_CR30, cr30);
- xgifb_reg_set(vb->P3d4, IND_XGI_SCRATCH_REG_CR31, cr31);
- xgifb_reg_set(vb->P3d4, IND_XGI_SCRATCH_REG_CR33,
- (xgifb_info->rate_idx & 0x0F));
-}
-
-static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
-{
- struct vb_device_info *vb = &xgifb_info->dev_info;
- u8 reg;
- unsigned char doit = 1;
-
- if (xgifb_info->video_bpp == 8) {
- /*
- * We can't switch off CRT1 on LVDS/Chrontel
- * in 8bpp Modes
- */
- if ((xgifb_info->hasVB == HASVB_LVDS) ||
- (xgifb_info->hasVB == HASVB_LVDS_CHRONTEL)) {
- doit = 0;
- }
- /*
- * We can't switch off CRT1 on 301B-DH
- * in 8bpp Modes if using LCD
- */
- if (xgifb_info->display2 == XGIFB_DISP_LCD)
- doit = 0;
- }
-
- /* We can't switch off CRT1 if bridge is in slave mode */
- if (xgifb_info->hasVB != HASVB_NONE) {
- reg = xgifb_reg_get(vb->Part1Port, 0x00);
-
- if ((reg & 0x50) == 0x10)
- doit = 0;
-
- } else {
- XGIfb_crt1off = 0;
- }
-
- reg = xgifb_reg_get(vb->P3d4, 0x17);
- if ((XGIfb_crt1off) && (doit))
- reg &= ~0x80;
- else
- reg |= 0x80;
- xgifb_reg_set(vb->P3d4, 0x17, reg);
-
- xgifb_reg_and(vb->P3c4, IND_SIS_RAMDAC_CONTROL, ~0x04);
-
- if (xgifb_info->display2 == XGIFB_DISP_TV &&
- xgifb_info->hasVB == HASVB_301) {
- reg = xgifb_reg_get(vb->Part4Port, 0x01);
-
- if (reg < 0xB0) { /* Set filter for XGI301 */
- int filter_tb;
-
- switch (xgifb_info->video_width) {
- case 320:
- filter_tb = (xgifb_info->TV_type ==
- TVMODE_NTSC) ? 4 : 12;
- break;
- case 640:
- filter_tb = (xgifb_info->TV_type ==
- TVMODE_NTSC) ? 5 : 13;
- break;
- case 720:
- filter_tb = (xgifb_info->TV_type ==
- TVMODE_NTSC) ? 6 : 14;
- break;
- case 800:
- filter_tb = (xgifb_info->TV_type ==
- TVMODE_NTSC) ? 7 : 15;
- break;
- default:
- filter_tb = 0;
- filter = -1;
- break;
- }
- xgifb_reg_or(vb->Part1Port, SIS_CRT2_WENABLE_315, 0x01);
-
- if (xgifb_info->TV_type == TVMODE_NTSC) {
- xgifb_reg_and(vb->Part2Port, 0x3a, 0x1f);
-
- if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
- xgifb_reg_and(vb->Part2Port, 0x30, 0xdf);
-
- } else if (xgifb_info->TV_plug
- == TVPLUG_COMPOSITE) {
- xgifb_reg_or(vb->Part2Port, 0x30, 0x20);
-
- switch (xgifb_info->video_width) {
- case 640:
- xgifb_reg_set(vb->Part2Port,
- 0x35,
- 0xEB);
- xgifb_reg_set(vb->Part2Port,
- 0x36,
- 0x04);
- xgifb_reg_set(vb->Part2Port,
- 0x37,
- 0x25);
- xgifb_reg_set(vb->Part2Port,
- 0x38,
- 0x18);
- break;
- case 720:
- xgifb_reg_set(vb->Part2Port,
- 0x35,
- 0xEE);
- xgifb_reg_set(vb->Part2Port,
- 0x36,
- 0x0C);
- xgifb_reg_set(vb->Part2Port,
- 0x37,
- 0x22);
- xgifb_reg_set(vb->Part2Port,
- 0x38,
- 0x08);
- break;
- case 800:
- xgifb_reg_set(vb->Part2Port,
- 0x35,
- 0xEB);
- xgifb_reg_set(vb->Part2Port,
- 0x36,
- 0x15);
- xgifb_reg_set(vb->Part2Port,
- 0x37,
- 0x25);
- xgifb_reg_set(vb->Part2Port,
- 0x38,
- 0xF6);
- break;
- }
- }
-
- } else if (xgifb_info->TV_type == TVMODE_PAL) {
- xgifb_reg_and(vb->Part2Port, 0x3A, 0x1F);
-
- if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
- xgifb_reg_and(vb->Part2Port, 0x30, 0xDF);
-
- } else if (xgifb_info->TV_plug
- == TVPLUG_COMPOSITE) {
- xgifb_reg_or(vb->Part2Port, 0x30, 0x20);
-
- switch (xgifb_info->video_width) {
- case 640:
- xgifb_reg_set(vb->Part2Port,
- 0x35,
- 0xF1);
- xgifb_reg_set(vb->Part2Port,
- 0x36,
- 0xF7);
- xgifb_reg_set(vb->Part2Port,
- 0x37,
- 0x1F);
- xgifb_reg_set(vb->Part2Port,
- 0x38,
- 0x32);
- break;
- case 720:
- xgifb_reg_set(vb->Part2Port,
- 0x35,
- 0xF3);
- xgifb_reg_set(vb->Part2Port,
- 0x36,
- 0x00);
- xgifb_reg_set(vb->Part2Port,
- 0x37,
- 0x1D);
- xgifb_reg_set(vb->Part2Port,
- 0x38,
- 0x20);
- break;
- case 800:
- xgifb_reg_set(vb->Part2Port,
- 0x35,
- 0xFC);
- xgifb_reg_set(vb->Part2Port,
- 0x36,
- 0xFB);
- xgifb_reg_set(vb->Part2Port,
- 0x37,
- 0x14);
- xgifb_reg_set(vb->Part2Port,
- 0x38,
- 0x2A);
- break;
- }
- }
- }
-
- if ((filter >= 0) && (filter <= 7)) {
- const u8 *f = XGI_TV_filter[filter_tb].filter[filter];
-
- pr_debug("FilterTable[%d]-%d: %*ph\n",
- filter_tb, filter, 4, f);
- xgifb_reg_set(vb->Part2Port, 0x35, f[0]);
- xgifb_reg_set(vb->Part2Port, 0x36, f[1]);
- xgifb_reg_set(vb->Part2Port, 0x37, f[2]);
- xgifb_reg_set(vb->Part2Port, 0x38, f[3]);
- }
- }
- }
-}
-
-static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
- struct fb_info *info)
-{
- struct xgifb_video_info *xgifb_info = info->par;
- struct vb_device_info *vb = &xgifb_info->dev_info;
- struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
- unsigned int htotal = var->left_margin + var->xres + var->right_margin
- + var->hsync_len;
- unsigned int vtotal = var->upper_margin + var->yres + var->lower_margin
- + var->vsync_len;
-#if defined(__BIG_ENDIAN)
- u8 cr_data;
-#endif
- unsigned int drate = 0, hrate = 0;
- int found_mode = 0;
- int old_mode;
-
- info->var.xres_virtual = var->xres_virtual;
- info->var.yres_virtual = var->yres_virtual;
- info->var.bits_per_pixel = var->bits_per_pixel;
-
- if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED)
- vtotal <<= 1;
- else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE)
- vtotal <<= 2;
-
- if (!htotal || !vtotal) {
- pr_debug("Invalid 'var' information\n");
- return -EINVAL;
- }
- pr_debug("var->pixclock=%d, htotal=%d, vtotal=%d\n",
- var->pixclock, htotal, vtotal);
-
- if (var->pixclock) {
- drate = 1000000000 / var->pixclock;
- hrate = (drate * 1000) / htotal;
- xgifb_info->refresh_rate = (unsigned int)(hrate * 2
- / vtotal);
- } else {
- xgifb_info->refresh_rate = 60;
- }
-
- pr_debug("Change mode to %dx%dx%d-%dHz\n",
- var->xres, var->yres, var->bits_per_pixel,
- xgifb_info->refresh_rate);
-
- old_mode = xgifb_info->mode_idx;
- xgifb_info->mode_idx = 0;
-
- while ((XGIbios_mode[xgifb_info->mode_idx].mode_no != 0) &&
- (XGIbios_mode[xgifb_info->mode_idx].xres <= var->xres)) {
- if ((XGIbios_mode[xgifb_info->mode_idx].xres == var->xres) &&
- (XGIbios_mode[xgifb_info->mode_idx].yres == var->yres) &&
- (XGIbios_mode[xgifb_info->mode_idx].bpp
- == var->bits_per_pixel)) {
- found_mode = 1;
- break;
- }
- xgifb_info->mode_idx++;
- }
-
- if (found_mode)
- xgifb_info->mode_idx = XGIfb_validate_mode(xgifb_info,
- xgifb_info->mode_idx);
- else
- xgifb_info->mode_idx = -1;
-
- if (xgifb_info->mode_idx < 0) {
- pr_err("Mode %dx%dx%d not supported\n",
- var->xres, var->yres, var->bits_per_pixel);
- xgifb_info->mode_idx = old_mode;
- return -EINVAL;
- }
-
- if (XGIfb_search_refresh_rate(xgifb_info,
- xgifb_info->refresh_rate) == 0) {
- xgifb_info->rate_idx = 1;
- xgifb_info->refresh_rate = 60;
- }
-
- if (isactive) {
- XGIfb_pre_setmode(xgifb_info);
- if (XGISetModeNew(xgifb_info, hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no)
- == 0) {
- pr_err("Setting mode[0x%x] failed\n",
- XGIbios_mode[xgifb_info->mode_idx].mode_no);
- return -EINVAL;
- }
- info->fix.line_length = (info->var.xres_virtual
- * info->var.bits_per_pixel) >> 6;
-
- xgifb_reg_set(vb->P3c4, IND_SIS_PASSWORD, SIS_PASSWORD);
-
- xgifb_reg_set(vb->P3d4, 0x13, (info->fix.line_length & 0x00ff));
- xgifb_reg_set(vb->P3c4, 0x0E,
- (info->fix.line_length & 0xff00) >> 8);
-
- XGIfb_post_setmode(xgifb_info);
-
- pr_debug("Set new mode: %dx%dx%d-%d\n",
- XGIbios_mode[xgifb_info->mode_idx].xres,
- XGIbios_mode[xgifb_info->mode_idx].yres,
- XGIbios_mode[xgifb_info->mode_idx].bpp,
- xgifb_info->refresh_rate);
-
- xgifb_info->video_bpp = XGIbios_mode[xgifb_info->mode_idx].bpp;
- xgifb_info->video_vwidth = info->var.xres_virtual;
- xgifb_info->video_width =
- XGIbios_mode[xgifb_info->mode_idx].xres;
- xgifb_info->video_vheight = info->var.yres_virtual;
- xgifb_info->video_height =
- XGIbios_mode[xgifb_info->mode_idx].yres;
- xgifb_info->org_x = 0;
- xgifb_info->org_y = 0;
- xgifb_info->video_linelength = info->var.xres_virtual
- * (xgifb_info->video_bpp >> 3);
- switch (xgifb_info->video_bpp) {
- case 8:
- xgifb_info->DstColor = 0x0000;
- xgifb_info->XGI310_AccelDepth = 0x00000000;
- xgifb_info->video_cmap_len = 256;
-#if defined(__BIG_ENDIAN)
- cr_data = xgifb_reg_get(vb->P3d4, 0x4D);
- xgifb_reg_set(vb->P3d4, 0x4D, (cr_data & 0xE0));
-#endif
- break;
- case 16:
- xgifb_info->DstColor = 0x8000;
- xgifb_info->XGI310_AccelDepth = 0x00010000;
-#if defined(__BIG_ENDIAN)
- cr_data = xgifb_reg_get(vb->P3d4, 0x4D);
- xgifb_reg_set(vb->P3d4, 0x4D, ((cr_data & 0xE0) | 0x0B));
-#endif
- xgifb_info->video_cmap_len = 16;
- break;
- case 32:
- xgifb_info->DstColor = 0xC000;
- xgifb_info->XGI310_AccelDepth = 0x00020000;
- xgifb_info->video_cmap_len = 16;
-#if defined(__BIG_ENDIAN)
- cr_data = xgifb_reg_get(vb->P3d4, 0x4D);
- xgifb_reg_set(vb->P3d4, 0x4D, ((cr_data & 0xE0) | 0x15));
-#endif
- break;
- default:
- xgifb_info->video_cmap_len = 16;
- pr_err("Unsupported depth %d\n",
- xgifb_info->video_bpp);
- break;
- }
- }
- XGIfb_bpp_to_var(xgifb_info, var); /* update ARGB info */
-
- dumpVGAReg(xgifb_info);
- return 0;
-}
-
-static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct xgifb_video_info *xgifb_info = info->par;
- struct vb_device_info *vb = &xgifb_info->dev_info;
- unsigned int base;
-
- base = var->yoffset * info->var.xres_virtual + var->xoffset;
-
- /* calculate base bpp dep. */
- switch (info->var.bits_per_pixel) {
- case 16:
- base >>= 1;
- break;
- case 32:
- break;
- case 8:
- default:
- base >>= 2;
- break;
- }
-
- xgifb_reg_set(vb->P3c4, IND_SIS_PASSWORD, SIS_PASSWORD);
-
- xgifb_reg_set(vb->P3d4, 0x0D, base & 0xFF);
- xgifb_reg_set(vb->P3d4, 0x0C, (base >> 8) & 0xFF);
- xgifb_reg_set(vb->P3c4, 0x0D, (base >> 16) & 0xFF);
- xgifb_reg_set(vb->P3c4, 0x37, (base >> 24) & 0x03);
- xgifb_reg_and_or(vb->P3c4, 0x37, 0xDF, (base >> 21) & 0x04);
-
- if (xgifb_info->display2 != XGIFB_DISP_NONE) {
- xgifb_reg_or(vb->Part1Port, SIS_CRT2_WENABLE_315, 0x01);
- xgifb_reg_set(vb->Part1Port, 0x06, (base & 0xFF));
- xgifb_reg_set(vb->Part1Port, 0x05, ((base >> 8) & 0xFF));
- xgifb_reg_set(vb->Part1Port, 0x04, ((base >> 16) & 0xFF));
- xgifb_reg_and_or(vb->Part1Port, 0x02, 0x7F,
- ((base >> 24) & 0x01) << 7);
- }
- return 0;
-}
-
-static int XGIfb_open(struct fb_info *info, int user)
-{
- return 0;
-}
-
-static int XGIfb_release(struct fb_info *info, int user)
-{
- return 0;
-}
-
-/* similar to sisfb_get_cmap_len */
-static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
-{
- return (var->bits_per_pixel == 8) ? 256 : 16;
-}
-
-static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
- unsigned int green, unsigned int blue,
- unsigned int transp, struct fb_info *info)
-{
- struct xgifb_video_info *xgifb_info = info->par;
- struct vb_device_info *vb = &xgifb_info->dev_info;
-
- if (regno >= XGIfb_get_cmap_len(&info->var))
- return 1;
-
- switch (info->var.bits_per_pixel) {
- case 8:
- outb(regno, vb->P3c8);
- outb((red >> 10), vb->P3c9);
- outb((green >> 10), vb->P3c9);
- outb((blue >> 10), vb->P3c9);
- if (xgifb_info->display2 != XGIFB_DISP_NONE) {
- outb(regno, vb->Part5Port);
- outb((red >> 8), (vb->Part5Port + 1));
- outb((green >> 8), (vb->Part5Port + 1));
- outb((blue >> 8), (vb->Part5Port + 1));
- }
- break;
- case 16:
- ((u32 *)(info->pseudo_palette))[regno] = ((red & 0xf800))
- | ((green & 0xfc00) >> 5) | ((blue & 0xf800)
- >> 11);
- break;
- case 32:
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- ((u32 *)(info->pseudo_palette))[regno] = (red << 16) | (green
- << 8) | (blue);
- break;
- }
- return 0;
-}
-
-/* ----------- FBDev related routines for all series ---------- */
-
-static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
- struct fb_info *info)
-{
- struct xgifb_video_info *xgifb_info = info->par;
-
- memset(fix, 0, sizeof(struct fb_fix_screeninfo));
-
- strncpy(fix->id, "XGI", sizeof(fix->id) - 1);
-
- /* if register_framebuffer has been called, we must lock */
- if (atomic_read(&info->count))
- mutex_lock(&info->mm_lock);
-
- fix->smem_start = xgifb_info->video_base;
- fix->smem_len = xgifb_info->video_size;
-
- /* if register_framebuffer has been called, we can unlock */
- if (atomic_read(&info->count))
- mutex_unlock(&info->mm_lock);
-
- fix->type = FB_TYPE_PACKED_PIXELS;
- fix->type_aux = 0;
- if (xgifb_info->video_bpp == 8)
- fix->visual = FB_VISUAL_PSEUDOCOLOR;
- else
- fix->visual = FB_VISUAL_DIRECTCOLOR;
- fix->xpanstep = 0;
- if (XGIfb_ypan)
- fix->ypanstep = 1;
- fix->ywrapstep = 0;
- fix->line_length = xgifb_info->video_linelength;
- fix->mmio_start = xgifb_info->mmio_base;
- fix->mmio_len = xgifb_info->mmio_size;
- fix->accel = FB_ACCEL_SIS_XABRE;
-
- return 0;
-}
-
-static int XGIfb_set_par(struct fb_info *info)
-{
- int err;
-
- err = XGIfb_do_set_var(&info->var, 1, info);
- if (err)
- return err;
- XGIfb_get_fix(&info->fix, -1, info);
- return 0;
-}
-
-static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct xgifb_video_info *xgifb_info = info->par;
- unsigned int htotal = var->left_margin + var->xres + var->right_margin
- + var->hsync_len;
- unsigned int vtotal = 0;
- unsigned int drate = 0, hrate = 0;
- int found_mode = 0;
- int search_idx;
-
- if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
- vtotal = var->upper_margin + var->yres + var->lower_margin
- + var->vsync_len;
- vtotal <<= 1;
- } else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
- vtotal = var->upper_margin + var->yres + var->lower_margin
- + var->vsync_len;
- vtotal <<= 2;
- } else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
- vtotal = var->upper_margin + (var->yres / 2)
- + var->lower_margin + var->vsync_len;
- } else
- vtotal = var->upper_margin + var->yres + var->lower_margin
- + var->vsync_len;
-
- if (!(htotal) || !(vtotal)) {
- pr_debug("No valid timing data\n");
- return -EINVAL;
- }
-
- if (var->pixclock && htotal && vtotal) {
- drate = 1000000000 / var->pixclock;
- hrate = (drate * 1000) / htotal;
- xgifb_info->refresh_rate =
- (unsigned int)(hrate * 2 / vtotal);
- pr_debug(
- "%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
- "%s: drate=%d, hrate=%d, refresh_rate=%d\n",
- __func__, var->pixclock, htotal, vtotal,
- __func__, drate, hrate, xgifb_info->refresh_rate);
- } else {
- xgifb_info->refresh_rate = 60;
- }
-
- search_idx = 0;
- while ((XGIbios_mode[search_idx].mode_no != 0) &&
- (XGIbios_mode[search_idx].xres <= var->xres)) {
- if ((XGIbios_mode[search_idx].xres == var->xres) &&
- (XGIbios_mode[search_idx].yres == var->yres) &&
- (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
- if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) {
- found_mode = 1;
- break;
- }
- }
- search_idx++;
- }
-
- if (!found_mode) {
- pr_err("%dx%dx%d is no valid mode\n",
- var->xres, var->yres, var->bits_per_pixel);
- search_idx = 0;
- while (XGIbios_mode[search_idx].mode_no != 0) {
- if ((var->xres <= XGIbios_mode[search_idx].xres) &&
- (var->yres <= XGIbios_mode[search_idx].yres) &&
- (var->bits_per_pixel ==
- XGIbios_mode[search_idx].bpp)) {
- if (XGIfb_validate_mode(xgifb_info,
- search_idx) > 0) {
- found_mode = 1;
- break;
- }
- }
- search_idx++;
- }
- if (found_mode) {
- var->xres = XGIbios_mode[search_idx].xres;
- var->yres = XGIbios_mode[search_idx].yres;
- pr_debug("Adapted to mode %dx%dx%d\n",
- var->xres, var->yres, var->bits_per_pixel);
-
- } else {
- pr_err("Failed to find similar mode to %dx%dx%d\n",
- var->xres, var->yres, var->bits_per_pixel);
- return -EINVAL;
- }
- }
-
- /* Adapt RGB settings */
- XGIfb_bpp_to_var(xgifb_info, var);
-
- if (!XGIfb_ypan) {
- if (var->xres != var->xres_virtual)
- var->xres_virtual = var->xres;
- if (var->yres != var->yres_virtual)
- var->yres_virtual = var->yres;
- }
-
- /* Truncate offsets to maximum if too high */
- if (var->xoffset > var->xres_virtual - var->xres)
- var->xoffset = var->xres_virtual - var->xres - 1;
-
- if (var->yoffset > var->yres_virtual - var->yres)
- var->yoffset = var->yres_virtual - var->yres - 1;
-
- /* Set everything else to 0 */
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->transp.msb_right = 0;
-
- return 0;
-}
-
-static int XGIfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- int err;
-
- if (var->xoffset > (info->var.xres_virtual - info->var.xres))
- return -EINVAL;
- if (var->yoffset > (info->var.yres_virtual - info->var.yres))
- return -EINVAL;
-
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset >= info->var.yres_virtual || var->xoffset)
- return -EINVAL;
- } else if (var->xoffset + info->var.xres > info->var.xres_virtual ||
- var->yoffset + info->var.yres > info->var.yres_virtual) {
- return -EINVAL;
- }
- err = XGIfb_pan_var(var, info);
- if (err < 0)
- return err;
-
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
-
- return 0;
-}
-
-static int XGIfb_blank(int blank, struct fb_info *info)
-{
- struct xgifb_video_info *xgifb_info = info->par;
- struct vb_device_info *vb = &xgifb_info->dev_info;
- u8 reg;
-
- reg = xgifb_reg_get(vb->P3d4, 0x17);
-
- if (blank > 0)
- reg &= 0x7f;
- else
- reg |= 0x80;
-
- xgifb_reg_set(vb->P3d4, 0x17, reg);
- xgifb_reg_set(vb->P3c4, 0x00, 0x01); /* Synchronous Reset */
- xgifb_reg_set(vb->P3c4, 0x00, 0x03); /* End Reset */
- return 0;
-}
-
-static struct fb_ops XGIfb_ops = {
- .owner = THIS_MODULE,
- .fb_open = XGIfb_open,
- .fb_release = XGIfb_release,
- .fb_check_var = XGIfb_check_var,
- .fb_set_par = XGIfb_set_par,
- .fb_setcolreg = XGIfb_setcolreg,
- .fb_pan_display = XGIfb_pan_display,
- .fb_blank = XGIfb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-};
-
-/* ---------------- Chip generation dependent routines ---------------- */
-
-/* for XGI 315/550/650/740/330 */
-
-static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
-{
- struct vb_device_info *vb = &xgifb_info->dev_info;
- u8 ChannelNum, tmp;
- u8 reg = 0;
-
- /* xorg driver sets 32MB * 1 channel */
- if (xgifb_info->chip == XG27)
- xgifb_reg_set(vb->P3c4, IND_SIS_DRAM_SIZE, 0x51);
-
- reg = xgifb_reg_get(vb->P3c4, IND_SIS_DRAM_SIZE);
- if (!reg)
- return -1;
-
- switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) {
- case XGI_DRAM_SIZE_1MB:
- xgifb_info->video_size = 0x100000;
- break;
- case XGI_DRAM_SIZE_2MB:
- xgifb_info->video_size = 0x200000;
- break;
- case XGI_DRAM_SIZE_4MB:
- xgifb_info->video_size = 0x400000;
- break;
- case XGI_DRAM_SIZE_8MB:
- xgifb_info->video_size = 0x800000;
- break;
- case XGI_DRAM_SIZE_16MB:
- xgifb_info->video_size = 0x1000000;
- break;
- case XGI_DRAM_SIZE_32MB:
- xgifb_info->video_size = 0x2000000;
- break;
- case XGI_DRAM_SIZE_64MB:
- xgifb_info->video_size = 0x4000000;
- break;
- case XGI_DRAM_SIZE_128MB:
- xgifb_info->video_size = 0x8000000;
- break;
- case XGI_DRAM_SIZE_256MB:
- xgifb_info->video_size = 0x10000000;
- break;
- default:
- return -1;
- }
-
- tmp = (reg & 0x0c) >> 2;
- switch (xgifb_info->chip) {
- case XG20:
- case XG21:
- case XG27:
- ChannelNum = 1;
- break;
-
- case XG42:
- if (reg & 0x04)
- ChannelNum = 2;
- else
- ChannelNum = 1;
- break;
-
- case XG40:
- default:
- if (tmp == 2)
- ChannelNum = 2;
- else if (tmp == 3)
- ChannelNum = 3;
- else
- ChannelNum = 1;
- break;
- }
-
- xgifb_info->video_size = xgifb_info->video_size * ChannelNum;
-
- pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
- reg, xgifb_info->video_size, ChannelNum);
- return 0;
-}
-
-static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
-{
- struct vb_device_info *vb = &xgifb_info->dev_info;
- u8 cr32, temp = 0;
-
- xgifb_info->TV_plug = 0;
- xgifb_info->TV_type = 0;
-
- cr32 = xgifb_reg_get(vb->P3d4, IND_XGI_SCRATCH_REG_CR32);
-
- if ((cr32 & SIS_CRT1) && !XGIfb_crt1off) {
- XGIfb_crt1off = 0;
- } else {
- if (cr32 & 0x5F)
- XGIfb_crt1off = 1;
- else
- XGIfb_crt1off = 0;
- }
-
- if (!xgifb_info->display2_force) {
- if (cr32 & SIS_VB_TV)
- xgifb_info->display2 = XGIFB_DISP_TV;
- else if (cr32 & SIS_VB_LCD)
- xgifb_info->display2 = XGIFB_DISP_LCD;
- else if (cr32 & SIS_VB_CRT2)
- xgifb_info->display2 = XGIFB_DISP_CRT;
- else
- xgifb_info->display2 = XGIFB_DISP_NONE;
- }
-
- if (XGIfb_tvplug != -1) {
- /* Override with option */
- xgifb_info->TV_plug = XGIfb_tvplug;
- } else if (cr32 & SIS_VB_HIVISION) {
- xgifb_info->TV_type = TVMODE_HIVISION;
- xgifb_info->TV_plug = TVPLUG_SVIDEO;
- } else if (cr32 & SIS_VB_SVIDEO) {
- xgifb_info->TV_plug = TVPLUG_SVIDEO;
- } else if (cr32 & SIS_VB_COMPOSITE) {
- xgifb_info->TV_plug = TVPLUG_COMPOSITE;
- } else if (cr32 & SIS_VB_SCART) {
- xgifb_info->TV_plug = TVPLUG_SCART;
- }
-
- if (xgifb_info->TV_type == 0) {
- temp = xgifb_reg_get(vb->P3d4, 0x38);
- if (temp & 0x10)
- xgifb_info->TV_type = TVMODE_PAL;
- else
- xgifb_info->TV_type = TVMODE_NTSC;
- }
-
- /* Copy forceCRT1 option to CRT1off if option is given */
- if (XGIfb_forcecrt1 != -1) {
- if (XGIfb_forcecrt1)
- XGIfb_crt1off = 0;
- else
- XGIfb_crt1off = 1;
- }
-}
-
-static bool XGIfb_has_VB(struct xgifb_video_info *xgifb_info)
-{
- u8 vb_chipid;
-
- vb_chipid = xgifb_reg_get(xgifb_info->dev_info.Part4Port, 0x00);
- switch (vb_chipid) {
- case 0x01:
- xgifb_info->hasVB = HASVB_301;
- break;
- case 0x02:
- xgifb_info->hasVB = HASVB_302;
- break;
- default:
- xgifb_info->hasVB = HASVB_NONE;
- return false;
- }
- return true;
-}
-
-static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info)
-{
- u8 reg;
-
- if (!XGIfb_has_VB(xgifb_info)) {
- reg = xgifb_reg_get(xgifb_info->dev_info.P3d4,
- IND_XGI_SCRATCH_REG_CR37);
- switch ((reg & SIS_EXTERNAL_CHIP_MASK) >> 1) {
- case SIS_EXTERNAL_CHIP_LVDS:
- xgifb_info->hasVB = HASVB_LVDS;
- break;
- case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL:
- xgifb_info->hasVB = HASVB_LVDS_CHRONTEL;
- break;
- default:
- break;
- }
- }
-}
-
-static int __init xgifb_optval(char *fullopt, int validx)
-{
- unsigned long lres;
-
- if (kstrtoul(fullopt + validx, 0, &lres) < 0 || lres > INT_MAX) {
- pr_err("Invalid value for option: %s\n", fullopt);
- return 0;
- }
- return lres;
-}
-
-static int __init XGIfb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- pr_info("Options: %s\n", options);
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt)
- continue;
-
- if (!strncmp(this_opt, "mode:", 5)) {
- mode = this_opt + 5;
- } else if (!strncmp(this_opt, "vesa:", 5)) {
- vesa = xgifb_optval(this_opt, 5);
- } else if (!strncmp(this_opt, "vrate:", 6)) {
- refresh_rate = xgifb_optval(this_opt, 6);
- } else if (!strncmp(this_opt, "rate:", 5)) {
- refresh_rate = xgifb_optval(this_opt, 5);
- } else if (!strncmp(this_opt, "crt1off", 7)) {
- XGIfb_crt1off = 1;
- } else if (!strncmp(this_opt, "filter:", 7)) {
- filter = xgifb_optval(this_opt, 7);
- } else if (!strncmp(this_opt, "forcecrt2type:", 14)) {
- XGIfb_search_crt2type(this_opt + 14);
- } else if (!strncmp(this_opt, "forcecrt1:", 10)) {
- XGIfb_forcecrt1 = xgifb_optval(this_opt, 10);
- } else if (!strncmp(this_opt, "tvmode:", 7)) {
- XGIfb_search_tvstd(this_opt + 7);
- } else if (!strncmp(this_opt, "tvstandard:", 11)) {
- XGIfb_search_tvstd(this_opt + 7);
- } else if (!strncmp(this_opt, "dstn", 4)) {
- enable_dstn = 1;
- /* DSTN overrules forcecrt2type */
- XGIfb_crt2type = XGIFB_DISP_LCD;
- } else if (!strncmp(this_opt, "noypan", 6)) {
- XGIfb_ypan = 0;
- } else {
- mode = this_opt;
- }
- }
- return 0;
-}
-
-static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- u8 reg, reg1;
- u8 CR48, CR38;
- int ret;
- struct fb_info *fb_info;
- struct xgifb_video_info *xgifb_info;
- struct vb_device_info *vb;
- struct xgi_hw_device_info *hw_info;
- unsigned long video_size_max;
-
- fb_info = framebuffer_alloc(sizeof(*xgifb_info), &pdev->dev);
- if (!fb_info)
- return -ENOMEM;
-
- xgifb_info = fb_info->par;
- vb = &xgifb_info->dev_info;
- hw_info = &xgifb_info->hw_info;
- xgifb_info->fb_info = fb_info;
- xgifb_info->chip_id = pdev->device;
- pci_read_config_byte(pdev,
- PCI_REVISION_ID,
- &xgifb_info->revision_id);
- hw_info->jChipRevision = xgifb_info->revision_id;
-
- xgifb_info->pcibus = pdev->bus->number;
- xgifb_info->pcislot = PCI_SLOT(pdev->devfn);
- xgifb_info->pcifunc = PCI_FUNC(pdev->devfn);
- xgifb_info->subsysvendor = pdev->subsystem_vendor;
- xgifb_info->subsysdevice = pdev->subsystem_device;
-
- video_size_max = pci_resource_len(pdev, 0);
- xgifb_info->video_base = pci_resource_start(pdev, 0);
- xgifb_info->mmio_base = pci_resource_start(pdev, 1);
- xgifb_info->mmio_size = pci_resource_len(pdev, 1);
- xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
- dev_info(&pdev->dev, "Relocate IO address: %llx [%08lx]\n",
- (u64)pci_resource_start(pdev, 2),
- xgifb_info->vga_base);
-
- if (pci_enable_device(pdev)) {
- ret = -EIO;
- goto error;
- }
-
- if (XGIfb_crt2type != -1) {
- xgifb_info->display2 = XGIfb_crt2type;
- xgifb_info->display2_force = true;
- }
-
- XGIRegInit(vb, xgifb_info->vga_base);
-
- xgifb_reg_set(vb->P3c4,
- IND_SIS_PASSWORD, SIS_PASSWORD);
- reg1 = xgifb_reg_get(vb->P3c4, IND_SIS_PASSWORD);
-
- if (reg1 != 0xa1) { /* I/O error */
- dev_err(&pdev->dev, "I/O error\n");
- ret = -EIO;
- goto error_disable;
- }
-
- switch (xgifb_info->chip_id) {
- case PCI_DEVICE_ID_XGI_20:
- xgifb_reg_or(vb->P3d4,
- Index_CR_GPIO_Reg3, GPIOG_EN);
- CR48 = xgifb_reg_get(vb->P3d4,
- Index_CR_GPIO_Reg1);
- if (CR48 & GPIOG_READ)
- xgifb_info->chip = XG21;
- else
- xgifb_info->chip = XG20;
- break;
- case PCI_DEVICE_ID_XGI_40:
- xgifb_info->chip = XG40;
- break;
- case PCI_DEVICE_ID_XGI_42:
- xgifb_info->chip = XG42;
- break;
- case PCI_DEVICE_ID_XGI_27:
- xgifb_info->chip = XG27;
- break;
- default:
- ret = -ENODEV;
- goto error_disable;
- }
-
- dev_info(&pdev->dev, "chipid = %x\n", xgifb_info->chip);
- hw_info->jChipType = xgifb_info->chip;
-
- if (XGIfb_get_dram_size(xgifb_info)) {
- xgifb_info->video_size = min_t(unsigned long, video_size_max,
- SZ_16M);
- } else if (xgifb_info->video_size > video_size_max) {
- xgifb_info->video_size = video_size_max;
- }
-
- /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
- xgifb_reg_or(vb->P3c4,
- IND_SIS_PCI_ADDRESS_SET,
- (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
- /* Enable 2D accelerator engine */
- xgifb_reg_or(vb->P3c4,
- IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D);
-
- hw_info->ulVideoMemorySize = xgifb_info->video_size;
-
- if (!request_mem_region(xgifb_info->video_base,
- xgifb_info->video_size,
- "XGIfb FB")) {
- dev_err(&pdev->dev, "Unable request memory size %x\n",
- xgifb_info->video_size);
- dev_err(&pdev->dev,
- "Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n");
- ret = -ENODEV;
- goto error_disable;
- }
-
- if (!request_mem_region(xgifb_info->mmio_base,
- xgifb_info->mmio_size,
- "XGIfb MMIO")) {
- dev_err(&pdev->dev,
- "Fatal error: Unable to reserve MMIO region\n");
- ret = -ENODEV;
- goto error_0;
- }
-
- xgifb_info->video_vbase =
- ioremap_wc(xgifb_info->video_base, xgifb_info->video_size);
- hw_info->pjVideoMemoryAddress =
- ioremap_wc(xgifb_info->video_base, xgifb_info->video_size);
- xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
- xgifb_info->mmio_size);
-
- dev_info(&pdev->dev,
- "Framebuffer at 0x%llx, mapped to 0x%p, size %dk\n",
- (u64)xgifb_info->video_base,
- xgifb_info->video_vbase,
- xgifb_info->video_size / 1024);
-
- dev_info(&pdev->dev,
- "MMIO at 0x%llx, mapped to 0x%p, size %ldk\n",
- (u64)xgifb_info->mmio_base, xgifb_info->mmio_vbase,
- xgifb_info->mmio_size / 1024);
-
- pci_set_drvdata(pdev, xgifb_info);
- if (!XGIInitNew(pdev))
- dev_err(&pdev->dev, "XGIInitNew() failed!\n");
-
- xgifb_info->mtrr = -1;
-
- xgifb_info->hasVB = HASVB_NONE;
- if ((xgifb_info->chip == XG20) ||
- (xgifb_info->chip == XG27)) {
- xgifb_info->hasVB = HASVB_NONE;
- } else if (xgifb_info->chip == XG21) {
- CR38 = xgifb_reg_get(vb->P3d4, 0x38);
- if ((CR38 & 0xE0) == 0xC0)
- xgifb_info->display2 = XGIFB_DISP_LCD;
- else if ((CR38 & 0xE0) == 0x60)
- xgifb_info->hasVB = HASVB_CHRONTEL;
- else
- xgifb_info->hasVB = HASVB_NONE;
- } else {
- XGIfb_get_VB_type(xgifb_info);
- }
-
- hw_info->ujVBChipID = VB_CHIP_UNKNOWN;
-
- hw_info->ulExternalChip = 0;
-
- switch (xgifb_info->hasVB) {
- case HASVB_301:
- reg = xgifb_reg_get(vb->Part4Port, 0x01);
- if (reg >= 0xE0) {
- hw_info->ujVBChipID = VB_CHIP_302LV;
- dev_info(&pdev->dev,
- "XGI302LV bridge detected (revision 0x%02x)\n",
- reg);
- } else if (reg >= 0xD0) {
- hw_info->ujVBChipID = VB_CHIP_301LV;
- dev_info(&pdev->dev,
- "XGI301LV bridge detected (revision 0x%02x)\n",
- reg);
- } else {
- hw_info->ujVBChipID = VB_CHIP_301;
- dev_info(&pdev->dev, "XGI301 bridge detected\n");
- }
- break;
- case HASVB_302:
- reg = xgifb_reg_get(vb->Part4Port, 0x01);
- if (reg >= 0xE0) {
- hw_info->ujVBChipID = VB_CHIP_302LV;
- dev_info(&pdev->dev,
- "XGI302LV bridge detected (revision 0x%02x)\n",
- reg);
- } else if (reg >= 0xD0) {
- hw_info->ujVBChipID = VB_CHIP_301LV;
- dev_info(&pdev->dev,
- "XGI302LV bridge detected (revision 0x%02x)\n",
- reg);
- } else if (reg >= 0xB0) {
- reg1 = xgifb_reg_get(vb->Part4Port,
- 0x23);
-
- hw_info->ujVBChipID = VB_CHIP_302B;
-
- } else {
- hw_info->ujVBChipID = VB_CHIP_302;
- dev_info(&pdev->dev, "XGI302 bridge detected\n");
- }
- break;
- case HASVB_LVDS:
- hw_info->ulExternalChip = 0x1;
- dev_info(&pdev->dev, "LVDS transmitter detected\n");
- break;
- case HASVB_TRUMPION:
- hw_info->ulExternalChip = 0x2;
- dev_info(&pdev->dev, "Trumpion Zurac LVDS scaler detected\n");
- break;
- case HASVB_CHRONTEL:
- hw_info->ulExternalChip = 0x4;
- dev_info(&pdev->dev, "Chrontel TV encoder detected\n");
- break;
- case HASVB_LVDS_CHRONTEL:
- hw_info->ulExternalChip = 0x5;
- dev_info(&pdev->dev,
- "LVDS transmitter and Chrontel TV encoder detected\n");
- break;
- default:
- dev_info(&pdev->dev, "No or unknown bridge type detected\n");
- break;
- }
-
- if (xgifb_info->hasVB != HASVB_NONE)
- XGIfb_detect_VB(xgifb_info);
- else if (xgifb_info->chip != XG21)
- xgifb_info->display2 = XGIFB_DISP_NONE;
-
- if (xgifb_info->display2 == XGIFB_DISP_LCD) {
- if (!enable_dstn) {
- reg = xgifb_reg_get(vb->P3d4,
- IND_XGI_LCD_PANEL);
- reg &= 0x0f;
- hw_info->ulCRT2LCDType = XGI310paneltype[reg];
- }
- }
-
- xgifb_info->mode_idx = -1;
-
- if (mode)
- XGIfb_search_mode(xgifb_info, mode);
- else if (vesa != -1)
- XGIfb_search_vesamode(xgifb_info, vesa);
-
- if (xgifb_info->mode_idx >= 0)
- xgifb_info->mode_idx =
- XGIfb_validate_mode(xgifb_info, xgifb_info->mode_idx);
-
- if (xgifb_info->mode_idx < 0) {
- if (xgifb_info->display2 == XGIFB_DISP_LCD &&
- xgifb_info->chip == XG21)
- xgifb_info->mode_idx =
- XGIfb_GetXG21DefaultLVDSModeIdx(xgifb_info);
- else
- xgifb_info->mode_idx = DEFAULT_MODE;
- }
-
- if (xgifb_info->mode_idx < 0) {
- dev_err(&pdev->dev, "No supported video mode found\n");
- ret = -EINVAL;
- goto error_1;
- }
-
- /* set default refresh rate */
- xgifb_info->refresh_rate = refresh_rate;
- if (xgifb_info->refresh_rate == 0)
- xgifb_info->refresh_rate = 60;
- if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) {
- xgifb_info->rate_idx = 1;
- xgifb_info->refresh_rate = 60;
- }
-
- xgifb_info->video_bpp = XGIbios_mode[xgifb_info->mode_idx].bpp;
- xgifb_info->video_vwidth =
- xgifb_info->video_width =
- XGIbios_mode[xgifb_info->mode_idx].xres;
- xgifb_info->video_vheight =
- xgifb_info->video_height =
- XGIbios_mode[xgifb_info->mode_idx].yres;
- xgifb_info->org_x = 0;
- xgifb_info->org_y = 0;
- xgifb_info->video_linelength =
- xgifb_info->video_width *
- (xgifb_info->video_bpp >> 3);
- switch (xgifb_info->video_bpp) {
- case 8:
- xgifb_info->DstColor = 0x0000;
- xgifb_info->XGI310_AccelDepth = 0x00000000;
- xgifb_info->video_cmap_len = 256;
- break;
- case 16:
- xgifb_info->DstColor = 0x8000;
- xgifb_info->XGI310_AccelDepth = 0x00010000;
- xgifb_info->video_cmap_len = 16;
- break;
- case 32:
- xgifb_info->DstColor = 0xC000;
- xgifb_info->XGI310_AccelDepth = 0x00020000;
- xgifb_info->video_cmap_len = 16;
- break;
- default:
- xgifb_info->video_cmap_len = 16;
- pr_info("Unsupported depth %d\n",
- xgifb_info->video_bpp);
- break;
- }
-
- pr_info("Default mode is %dx%dx%d (%dHz)\n",
- xgifb_info->video_width, xgifb_info->video_height,
- xgifb_info->video_bpp, xgifb_info->refresh_rate);
-
- fb_info->var.red.length = 8;
- fb_info->var.green.length = 8;
- fb_info->var.blue.length = 8;
- fb_info->var.activate = FB_ACTIVATE_NOW;
- fb_info->var.height = -1;
- fb_info->var.width = -1;
- fb_info->var.vmode = FB_VMODE_NONINTERLACED;
- fb_info->var.xres = xgifb_info->video_width;
- fb_info->var.xres_virtual = xgifb_info->video_width;
- fb_info->var.yres = xgifb_info->video_height;
- fb_info->var.yres_virtual = xgifb_info->video_height;
- fb_info->var.bits_per_pixel = xgifb_info->video_bpp;
-
- XGIfb_bpp_to_var(xgifb_info, &fb_info->var);
-
- fb_info->var.pixclock = (u32)(1000000000 / XGIfb_mode_rate_to_dclock
- (vb, hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no));
-
- if (XGIfb_mode_rate_to_ddata(vb, hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no,
- &fb_info->var.left_margin,
- &fb_info->var.right_margin,
- &fb_info->var.upper_margin,
- &fb_info->var.lower_margin,
- &fb_info->var.hsync_len,
- &fb_info->var.vsync_len,
- &fb_info->var.sync,
- &fb_info->var.vmode)) {
- if ((fb_info->var.vmode & FB_VMODE_MASK) ==
- FB_VMODE_INTERLACED) {
- fb_info->var.yres <<= 1;
- fb_info->var.yres_virtual <<= 1;
- } else if ((fb_info->var.vmode & FB_VMODE_MASK) ==
- FB_VMODE_DOUBLE) {
- fb_info->var.pixclock >>= 1;
- fb_info->var.yres >>= 1;
- fb_info->var.yres_virtual >>= 1;
- }
- }
-
- fb_info->flags = FBINFO_FLAG_DEFAULT;
- fb_info->screen_base = xgifb_info->video_vbase;
- fb_info->fbops = &XGIfb_ops;
- XGIfb_get_fix(&fb_info->fix, -1, fb_info);
- fb_info->pseudo_palette = xgifb_info->pseudo_palette;
-
- fb_alloc_cmap(&fb_info->cmap, 256, 0);
-
- xgifb_info->mtrr = arch_phys_wc_add(xgifb_info->video_base,
- xgifb_info->video_size);
-
- if (register_framebuffer(fb_info) < 0) {
- ret = -EINVAL;
- goto error_mtrr;
- }
-
- dumpVGAReg(xgifb_info);
-
- return 0;
-
-error_mtrr:
- arch_phys_wc_del(xgifb_info->mtrr);
-error_1:
- iounmap(xgifb_info->mmio_vbase);
- iounmap(xgifb_info->video_vbase);
- release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size);
-error_0:
- release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
-error_disable:
- pci_disable_device(pdev);
-error:
- framebuffer_release(fb_info);
- return ret;
-}
-
-/* -------------------- PCI DEVICE HANDLING -------------------- */
-
-static void xgifb_remove(struct pci_dev *pdev)
-{
- struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
- struct fb_info *fb_info = xgifb_info->fb_info;
-
- unregister_framebuffer(fb_info);
- arch_phys_wc_del(xgifb_info->mtrr);
- iounmap(xgifb_info->mmio_vbase);
- iounmap(xgifb_info->video_vbase);
- release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size);
- release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
- pci_disable_device(pdev);
- framebuffer_release(fb_info);
-}
-
-static struct pci_driver xgifb_driver = {
- .name = "xgifb",
- .id_table = xgifb_pci_table,
- .probe = xgifb_probe,
- .remove = xgifb_remove
-};
-
-/* -------------------- MODULE -------------------- */
-
-module_param(mode, charp, 0000);
-MODULE_PARM_DESC(mode,
- "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
-
-module_param(forcecrt2type, charp, 0000);
-MODULE_PARM_DESC(forcecrt2type,
- "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
-
-module_param(vesa, int, 0000);
-MODULE_PARM_DESC(vesa,
- "Selects the desired default display mode by VESA mode number (eg. 0x117).");
-
-module_param(filter, int, 0000);
-MODULE_PARM_DESC(filter,
- "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
-
-static int __init xgifb_init(void)
-{
- char *option = NULL;
-
- if (forcecrt2type)
- XGIfb_search_crt2type(forcecrt2type);
- if (fb_get_options("xgifb", &option))
- return -ENODEV;
- XGIfb_setup(option);
-
- return pci_register_driver(&xgifb_driver);
-}
-
-static void __exit xgifb_remove_module(void)
-{
- pci_unregister_driver(&xgifb_driver);
- pr_debug("Module unloaded\n");
-}
-
-MODULE_DESCRIPTION("Z7 Z9 Z9S Z11 framebuffer device driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("XGITECH , Others");
-module_init(xgifb_init);
-module_exit(xgifb_remove_module);
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
deleted file mode 100644
index 982c676c16c6..000000000000
--- a/drivers/staging/xgifb/XGIfb.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_XGIFB
-#define _LINUX_XGIFB
-#include "vgatypes.h"
-#include "vb_struct.h"
-
-enum xgifb_display_type {
- XGIFB_DISP_NONE = 0,
- XGIFB_DISP_CRT,
- XGIFB_DISP_LCD,
- XGIFB_DISP_TV,
-};
-
-#define HASVB_NONE 0x00
-#define HASVB_301 0x01
-#define HASVB_LVDS 0x02
-#define HASVB_TRUMPION 0x04
-#define HASVB_LVDS_CHRONTEL 0x10
-#define HASVB_302 0x20
-#define HASVB_CHRONTEL 0x80
-
-enum XGI_CHIP_TYPE {
- XG40 = 32,
- XG42,
- XG20 = 48,
- XG21,
- XG27,
-};
-
-enum xgi_tvtype {
- TVMODE_NTSC = 0,
- TVMODE_PAL,
- TVMODE_HIVISION,
- TVTYPE_PALM,
- TVTYPE_PALN,
- TVTYPE_NTSCJ,
- TVMODE_TOTAL
-};
-
-enum xgi_tv_plug {
- TVPLUG_UNKNOWN = 0,
- TVPLUG_COMPOSITE = 1,
- TVPLUG_SVIDEO = 2,
- TVPLUG_COMPOSITE_AND_SVIDEO = 3,
- TVPLUG_SCART = 4,
- TVPLUG_YPBPR_525i = 5,
- TVPLUG_YPBPR_525P = 6,
- TVPLUG_YPBPR_750P = 7,
- TVPLUG_YPBPR_1080i = 8,
- TVPLUG_TOTAL
-};
-
-struct xgifb_video_info {
- struct fb_info *fb_info;
- struct xgi_hw_device_info hw_info;
- struct vb_device_info dev_info;
-
- int mode_idx;
- int rate_idx;
-
- u32 pseudo_palette[17];
-
- int chip_id;
- unsigned int video_size;
- phys_addr_t video_base;
- void __iomem *video_vbase;
- phys_addr_t mmio_base;
- unsigned long mmio_size;
- void __iomem *mmio_vbase;
- unsigned long vga_base;
- int mtrr;
-
- int video_bpp;
- int video_cmap_len;
- int video_width;
- int video_height;
- int video_vwidth;
- int video_vheight;
- int org_x;
- int org_y;
- int video_linelength;
- unsigned int refresh_rate;
-
- enum xgifb_display_type display2; /* the second display output type */
- bool display2_force;
- unsigned char hasVB;
- unsigned char TV_type;
- unsigned char TV_plug;
-
- struct XGI21_LVDSCapStruct lvds_data;
-
- enum XGI_CHIP_TYPE chip;
- unsigned char revision_id;
-
- unsigned short DstColor;
- unsigned long XGI310_AccelDepth;
- unsigned long CommandReg;
-
- unsigned int pcibus;
- unsigned int pcislot;
- unsigned int pcifunc;
-
- unsigned short subsysvendor;
- unsigned short subsysdevice;
-
- char reserved[236];
-};
-
-#endif
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
deleted file mode 100644
index 0311e2682d27..000000000000
--- a/drivers/staging/xgifb/vb_def.h
+++ /dev/null
@@ -1,257 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VB_DEF_
-#define _VB_DEF_
-#include "../../video/fbdev/sis/initdef.h"
-
-#define VB_XGI301C 0x0020 /* for 301C */
-
-#define SupportCRT2in301C 0x0100 /* for 301C */
-#define SetCHTVOverScan 0x8000
-
-#define PanelResInfo 0x1F /* CR36 Panel Type/LCDResInfo */
-#define Panel_1024x768x75 0x22
-#define Panel_1280x1024x75 0x23
-
-#define PanelRef60Hz 0x00
-#define PanelRef75Hz 0x20
-
-#define YPbPr525iVCLK 0x03B
-#define YPbPr525iVCLK_2 0x03A
-
-#define XGI_CRT2_PORT_00 (0x00 - 0x030)
-
-#define SupportAllCRT2 0x0078
-#define NoSupportTV 0x0070
-#define NoSupportHiVisionTV 0x0060
-#define NoSupportLCD 0x0058
-
-/* -------------- SetMode Stack/Scratch */
-#define XGI_SetCRT2ToLCDA 0x0100
-#define SetCRT2ToDualEdge 0x8000
-
-#define ReserveTVOption 0x0008
-
-#define SetTVLowResolution 0x0400
-#define TVSimuMode 0x0800
-#define RPLLDIV2XO 0x1000
-#define NTSC1024x768 0x2000
-#define SetTVLockMode 0x4000
-
-#define XGI_LCDVESATiming 0x0001 /* LCD Info/CR37 */
-#define XGI_EnableLVDSDDA 0x0002
-#define EnableScalingLCD 0x0008
-#define SetPWDEnable 0x0004
-#define SetLCDtoNonExpanding 0x0010
-#define SetLCDDualLink 0x0100
-#define SetLCDLowResolution 0x0200
-
-/* LCD Capability shampoo */
-#define DefaultLCDCap 0x80ea
-#define EnableLCD24bpp 0x0004 /* default */
-#define LCDPolarity 0x00c0 /* default: SyncNN */
-#define XGI_LCDDualLink 0x0100
-#define EnableSpectrum 0x0200
-#define PWDEnable 0x0400
-#define EnableVBCLKDRVLOW 0x4000
-#define EnablePLLSPLOW 0x8000
-
-#define AVIDEOSense 0x01 /* CR32 */
-#define SVIDEOSense 0x02
-#define SCARTSense 0x04
-#define LCDSense 0x08
-#define Monitor2Sense 0x10
-#define Monitor1Sense 0x20
-#define HiTVSense 0x40
-
-#define YPbPrSense 0x80 /* NEW SCRATCH */
-
-#define TVSense 0xc7
-
-#define YPbPrMode 0xe0
-#define YPbPrMode525i 0x00
-#define YPbPrMode525p 0x20
-#define YPbPrMode750p 0x40
-#define YPbPrMode1080i 0x60
-
-#define ScalingLCD 0x08
-
-#define SetYPbPr 0x04
-
-/* ---------------------- VUMA Information */
-#define DisplayDeviceFromCMOS 0x10
-
-/* ---------------------- HK Evnet Definition */
-#define XGI_ModeSwitchStatus 0xf0
-#define ActiveCRT1 0x10
-#define ActiveLCD 0x0020
-#define ActiveTV 0x40
-#define ActiveCRT2 0x80
-
-#define ActiveAVideo 0x01
-#define ActiveSVideo 0x02
-#define ActiveSCART 0x04
-#define ActiveHiTV 0x08
-#define ActiveYPbPr 0x10
-
-#define NTSC1024x768HT 1908
-
-#define YPbPrTV525iHT 1716 /* YPbPr */
-#define YPbPrTV525iVT 525
-#define YPbPrTV525pHT 1716
-#define YPbPrTV525pVT 525
-#define YPbPrTV750pHT 1650
-#define YPbPrTV750pVT 750
-
-#define VCLK25_175 0x00
-#define VCLK28_322 0x01
-#define VCLK31_5 0x02
-#define VCLK36 0x03
-#define VCLK43_163 0x05
-#define VCLK44_9 0x06
-#define VCLK49_5 0x07
-#define VCLK50 0x08
-#define VCLK52_406 0x09
-#define VCLK56_25 0x0A
-#define VCLK68_179 0x0D
-#define VCLK72_852 0x0E
-#define VCLK75 0x0F
-#define VCLK78_75 0x11
-#define VCLK79_411 0x12
-#define VCLK83_95 0x13
-#define VCLK86_6 0x15
-#define VCLK94_5 0x16
-#define VCLK113_309 0x1B
-#define VCLK116_406 0x1C
-#define VCLK135_5 0x1E
-#define VCLK139_054 0x1F
-#define VCLK157_5 0x20
-#define VCLK162 0x21
-#define VCLK175 0x22
-#define VCLK189 0x23
-#define VCLK202_5 0x25
-#define VCLK229_5 0x26
-#define VCLK234 0x27
-#define VCLK254_817 0x29
-#define VCLK266_952 0x2B
-#define VCLK269_655 0x2C
-#define VCLK277_015 0x2E
-#define VCLK291_132 0x30
-#define VCLK291_766 0x31
-#define VCLK315_195 0x33
-#define VCLK323_586 0x34
-#define VCLK330_615 0x35
-#define VCLK340_477 0x37
-#define VCLK375_847 0x38
-#define VCLK388_631 0x39
-#define VCLK125_999 0x51
-#define VCLK148_5 0x52
-#define VCLK217_325 0x55
-#define XGI_YPbPr750pVCLK 0x57
-
-#define VCLK39_77 0x40
-#define YPbPr525pVCLK 0x3A
-#define NTSC1024VCLK 0x41
-#define VCLK35_2 0x49 /* ; 800x480 */
-#define VCLK122_61 0x4A
-#define VCLK80_350 0x4B
-#define VCLK107_385 0x4C
-
-#define RES320x200 0x00
-#define RES320x240 0x01
-#define RES400x300 0x02
-#define RES512x384 0x03
-#define RES640x400 0x04
-#define RES640x480x60 0x05
-#define RES640x480x72 0x06
-#define RES640x480x75 0x07
-#define RES640x480x85 0x08
-#define RES640x480x100 0x09
-#define RES640x480x120 0x0A
-#define RES640x480x160 0x0B
-#define RES640x480x200 0x0C
-#define RES800x600x56 0x0D
-#define RES800x600x60 0x0E
-#define RES800x600x72 0x0F
-#define RES800x600x75 0x10
-#define RES800x600x85 0x11
-#define RES800x600x100 0x12
-#define RES800x600x120 0x13
-#define RES800x600x160 0x14
-#define RES1024x768x43 0x15
-#define RES1024x768x60 0x16
-#define RES1024x768x70 0x17
-#define RES1024x768x75 0x18
-#define RES1024x768x85 0x19
-#define RES1024x768x100 0x1A
-#define RES1024x768x120 0x1B
-#define RES1280x1024x43 0x1C
-#define RES1280x1024x60 0x1D
-#define RES1280x1024x75 0x1E
-#define RES1280x1024x85 0x1F
-#define RES1600x1200x60 0x20
-#define RES1600x1200x65 0x21
-#define RES1600x1200x70 0x22
-#define RES1600x1200x75 0x23
-#define RES1600x1200x85 0x24
-#define RES1600x1200x100 0x25
-#define RES1600x1200x120 0x26
-#define RES1920x1440x60 0x27
-#define RES1920x1440x65 0x28
-#define RES1920x1440x70 0x29
-#define RES1920x1440x75 0x2A
-#define RES1920x1440x85 0x2B
-#define RES1920x1440x100 0x2C
-#define RES2048x1536x60 0x2D
-#define RES2048x1536x65 0x2E
-#define RES2048x1536x70 0x2F
-#define RES2048x1536x75 0x30
-#define RES2048x1536x85 0x31
-#define RES800x480x60 0x32
-#define RES800x480x75 0x33
-#define RES800x480x85 0x34
-#define RES1024x576x60 0x35
-#define RES1024x576x75 0x36
-#define RES1024x576x85 0x37
-#define RES1280x720x60 0x38
-#define RES1280x720x75 0x39
-#define RES1280x720x85 0x3A
-#define RES1280x960x60 0x3B
-#define RES720x480x60 0x3C
-#define RES720x576x56 0x3D
-#define RES856x480x79I 0x3E
-#define RES856x480x60 0x3F
-#define RES1280x768x60 0x40
-#define RES1400x1050x60 0x41
-#define RES1152x864x60 0x42
-#define RES1152x864x75 0x43
-#define RES1024x768x160 0x44
-#define RES1280x960x75 0x45
-#define RES1280x960x85 0x46
-#define RES1280x960x120 0x47
-
-#define XG27_CR8F 0x0C
-#define XG27_SR36 0x30
-#define XG27_SR40 0x04
-#define XG27_SR41 0x00
-#define XG40_CRCF 0x13
-#define XGI330_CRT2Data_1_2 0
-#define XGI330_CRT2Data_4_D 0
-#define XGI330_CRT2Data_4_E 0
-#define XGI330_CRT2Data_4_10 0x80
-#define XGI330_SR07 0x18
-#define XGI330_SR1F 0
-#define XGI330_SR23 0xf6
-#define XGI330_SR24 0x0d
-#define XGI330_SR31 0xc0
-#define XGI330_SR32 0x11
-#define XGI330_SR33 0
-
-extern const struct XGI_ExtStruct XGI330_EModeIDTable[];
-extern const struct XGI_Ext2Struct XGI330_RefIndex[];
-extern const struct XGI_CRT1TableStruct XGI_CRT1Table[];
-extern const struct XGI_ECLKDataStruct XGI340_ECLKData[];
-extern const struct SiS_VCLKData XGI_VCLKData[];
-extern const unsigned char XGI340_CR6B[][4];
-extern const unsigned char XGI340_AGPReg[];
-
-#endif
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
deleted file mode 100644
index ac1c815a3c5e..000000000000
--- a/drivers/staging/xgifb/vb_init.c
+++ /dev/null
@@ -1,1367 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-
-#include "XGIfb.h"
-#include "vb_def.h"
-#include "vb_util.h"
-#include "vb_setmode.h"
-#include "vb_init.h"
-static const unsigned short XGINew_DDRDRAM_TYPE340[4][2] = {
- { 16, 0x45},
- { 8, 0x35},
- { 4, 0x31},
- { 2, 0x21} };
-
-static const unsigned short XGINew_DDRDRAM_TYPE20[12][2] = {
- { 128, 0x5D},
- { 64, 0x59},
- { 64, 0x4D},
- { 32, 0x55},
- { 32, 0x49},
- { 32, 0x3D},
- { 16, 0x51},
- { 16, 0x45},
- { 16, 0x39},
- { 8, 0x41},
- { 8, 0x35},
- { 4, 0x31} };
-
-#define XGIFB_ROM_SIZE 65536
-
-static unsigned char
-XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned char data, temp;
-
- if (HwDeviceExtension->jChipType < XG20) {
- data = xgifb_reg_get(pVBInfo->P3c4, 0x39) & 0x02;
- if (data == 0)
- data = (xgifb_reg_get(pVBInfo->P3c4, 0x3A) &
- 0x02) >> 1;
- return data;
- } else if (HwDeviceExtension->jChipType == XG27) {
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
- /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
- if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
- data = 0; /* DDR */
- else
- data = 1; /* DDRII */
- return data;
- } else if (HwDeviceExtension->jChipType == XG21) {
- /* Independent GPIO control */
- xgifb_reg_and(pVBInfo->P3d4, 0xB4, ~0x02);
- usleep_range(800, 1800);
- xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */
- /* GPIOF 0:DVI 1:DVO */
- data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
- /*
- * HOTPLUG_SUPPORT
- * for current XG20 & XG21, GPIOH is floating, driver will
- * fix DDR temporarily
- */
- /* DVI read GPIOH */
- data &= 0x01; /* 1=DDRII, 0=DDR */
- /* ~HOTPLUG_SUPPORT */
- xgifb_reg_or(pVBInfo->P3d4, 0xB4, 0x02);
- return data;
- }
- data = xgifb_reg_get(pVBInfo->P3d4, 0x97) & 0x01;
-
- if (data == 1)
- data++;
-
- return data;
-}
-
-static void XGINew_DDR1x_MRS_340(unsigned long P3c4,
- struct vb_device_info *pVBInfo)
-{
- xgifb_reg_set(P3c4, 0x18, 0x01);
- xgifb_reg_set(P3c4, 0x19, 0x20);
- xgifb_reg_set(P3c4, 0x16, 0x00);
- xgifb_reg_set(P3c4, 0x16, 0x80);
-
- usleep_range(3, 1003);
- xgifb_reg_set(P3c4, 0x18, 0x00);
- xgifb_reg_set(P3c4, 0x19, 0x20);
- xgifb_reg_set(P3c4, 0x16, 0x00);
- xgifb_reg_set(P3c4, 0x16, 0x80);
-
- usleep_range(60, 1060);
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */
- xgifb_reg_set(P3c4, 0x19, 0x01);
- xgifb_reg_set(P3c4, 0x16, 0x03);
- xgifb_reg_set(P3c4, 0x16, 0x83);
- usleep_range(1, 1001);
- xgifb_reg_set(P3c4, 0x1B, 0x03);
- usleep_range(500, 1500);
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */
- xgifb_reg_set(P3c4, 0x19, 0x00);
- xgifb_reg_set(P3c4, 0x16, 0x03);
- xgifb_reg_set(P3c4, 0x16, 0x83);
- xgifb_reg_set(P3c4, 0x1B, 0x00);
-}
-
-static void XGINew_SetMemoryClock(struct vb_device_info *pVBInfo)
-{
- xgifb_reg_set(pVBInfo->P3c4,
- 0x28,
- pVBInfo->MCLKData[pVBInfo->ram_type].SR28);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x29,
- pVBInfo->MCLKData[pVBInfo->ram_type].SR29);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x2A,
- pVBInfo->MCLKData[pVBInfo->ram_type].SR2A);
-
- xgifb_reg_set(pVBInfo->P3c4,
- 0x2E,
- XGI340_ECLKData[pVBInfo->ram_type].SR2E);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x2F,
- XGI340_ECLKData[pVBInfo->ram_type].SR2F);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x30,
- XGI340_ECLKData[pVBInfo->ram_type].SR30);
-}
-
-static void XGINew_DDRII_Bootup_XG27(
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned long P3c4, struct vb_device_info *pVBInfo)
-{
- unsigned long P3d4 = P3c4 + 0x10;
-
- pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
- XGINew_SetMemoryClock(pVBInfo);
-
- /* Set Double Frequency */
- xgifb_reg_set(P3d4, 0x97, pVBInfo->XGINew_CR97); /* CR97 */
-
- usleep_range(200, 1200);
-
- xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS2 */
- xgifb_reg_set(P3c4, 0x19, 0x80); /* Set SR19 */
- xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
- usleep_range(15, 1015);
- xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
- usleep_range(15, 1015);
-
- xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS3 */
- xgifb_reg_set(P3c4, 0x19, 0xC0); /* Set SR19 */
- xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
- usleep_range(15, 1015);
- xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
- usleep_range(15, 1015);
-
- xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS1 */
- xgifb_reg_set(P3c4, 0x19, 0x40); /* Set SR19 */
- xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
- usleep_range(30, 1030);
- xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
- usleep_range(15, 1015);
-
- xgifb_reg_set(P3c4, 0x18, 0x42); /* Set SR18 */ /* MRS, DLL Enable */
- xgifb_reg_set(P3c4, 0x19, 0x0A); /* Set SR19 */
- xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */
- usleep_range(30, 1030);
- xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */
- xgifb_reg_set(P3c4, 0x16, 0x80); /* Set SR16 */
-
- xgifb_reg_set(P3c4, 0x1B, 0x04); /* Set SR1B */
- usleep_range(60, 1060);
- xgifb_reg_set(P3c4, 0x1B, 0x00); /* Set SR1B */
-
- xgifb_reg_set(P3c4, 0x18, 0x42); /* Set SR18 */ /* MRS, DLL Reset */
- xgifb_reg_set(P3c4, 0x19, 0x08); /* Set SR19 */
- xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */
-
- usleep_range(30, 1030);
- xgifb_reg_set(P3c4, 0x16, 0x83); /* Set SR16 */
- usleep_range(15, 1015);
-
- xgifb_reg_set(P3c4, 0x18, 0x80); /* Set SR18 */ /* MRS, ODT */
- xgifb_reg_set(P3c4, 0x19, 0x46); /* Set SR19 */
- xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
- usleep_range(30, 1030);
- xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
- usleep_range(15, 1015);
-
- xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS */
- xgifb_reg_set(P3c4, 0x19, 0x40); /* Set SR19 */
- xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */
- usleep_range(30, 1030);
- xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */
- usleep_range(15, 1015);
-
- /* Set SR1B refresh control 000:close; 010:open */
- xgifb_reg_set(P3c4, 0x1B, 0x04);
- usleep_range(200, 1200);
-}
-
-static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned long P3c4,
- struct vb_device_info *pVBInfo)
-{
- unsigned long P3d4 = P3c4 + 0x10;
-
- pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
- XGINew_SetMemoryClock(pVBInfo);
-
- xgifb_reg_set(P3d4, 0x97, 0x11); /* CR97 */
-
- usleep_range(200, 1200);
- xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS2 */
- xgifb_reg_set(P3c4, 0x19, 0x80);
- xgifb_reg_set(P3c4, 0x16, 0x05);
- xgifb_reg_set(P3c4, 0x16, 0x85);
-
- xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS3 */
- xgifb_reg_set(P3c4, 0x19, 0xC0);
- xgifb_reg_set(P3c4, 0x16, 0x05);
- xgifb_reg_set(P3c4, 0x16, 0x85);
-
- xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS1 */
- xgifb_reg_set(P3c4, 0x19, 0x40);
- xgifb_reg_set(P3c4, 0x16, 0x05);
- xgifb_reg_set(P3c4, 0x16, 0x85);
-
- xgifb_reg_set(P3c4, 0x18, 0x42); /* MRS1 */
- xgifb_reg_set(P3c4, 0x19, 0x02);
- xgifb_reg_set(P3c4, 0x16, 0x05);
- xgifb_reg_set(P3c4, 0x16, 0x85);
-
- usleep_range(15, 1015);
- xgifb_reg_set(P3c4, 0x1B, 0x04); /* SR1B */
- usleep_range(30, 1030);
- xgifb_reg_set(P3c4, 0x1B, 0x00); /* SR1B */
- usleep_range(100, 1100);
-
- xgifb_reg_set(P3c4, 0x18, 0x42); /* MRS1 */
- xgifb_reg_set(P3c4, 0x19, 0x00);
- xgifb_reg_set(P3c4, 0x16, 0x05);
- xgifb_reg_set(P3c4, 0x16, 0x85);
-
- usleep_range(200, 1200);
-}
-
-static void XGINew_DDR1x_MRS_XG20(unsigned long P3c4,
- struct vb_device_info *pVBInfo)
-{
- xgifb_reg_set(P3c4, 0x18, 0x01);
- xgifb_reg_set(P3c4, 0x19, 0x40);
- xgifb_reg_set(P3c4, 0x16, 0x00);
- xgifb_reg_set(P3c4, 0x16, 0x80);
- usleep_range(60, 1060);
-
- xgifb_reg_set(P3c4, 0x18, 0x00);
- xgifb_reg_set(P3c4, 0x19, 0x40);
- xgifb_reg_set(P3c4, 0x16, 0x00);
- xgifb_reg_set(P3c4, 0x16, 0x80);
- usleep_range(60, 1060);
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */
- xgifb_reg_set(P3c4, 0x19, 0x01);
- xgifb_reg_set(P3c4, 0x16, 0x03);
- xgifb_reg_set(P3c4, 0x16, 0x83);
- usleep_range(1, 1001);
- xgifb_reg_set(P3c4, 0x1B, 0x03);
- usleep_range(500, 1500);
- xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */
- xgifb_reg_set(P3c4, 0x19, 0x00);
- xgifb_reg_set(P3c4, 0x16, 0x03);
- xgifb_reg_set(P3c4, 0x16, 0x83);
- xgifb_reg_set(P3c4, 0x1B, 0x00);
-}
-
-static void XGINew_DDR1x_DefaultRegister(
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned long Port, struct vb_device_info *pVBInfo)
-{
- unsigned long P3d4 = Port, P3c4 = Port - 0x10;
-
- if (HwDeviceExtension->jChipType >= XG20) {
- XGINew_SetMemoryClock(pVBInfo);
- xgifb_reg_set(P3d4,
- 0x82,
- pVBInfo->CR40[11][pVBInfo->ram_type]); /* CR82 */
- xgifb_reg_set(P3d4,
- 0x85,
- pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */
- xgifb_reg_set(P3d4,
- 0x86,
- pVBInfo->CR40[13][pVBInfo->ram_type]); /* CR86 */
-
- xgifb_reg_set(P3d4, 0x98, 0x01);
- xgifb_reg_set(P3d4, 0x9A, 0x02);
-
- XGINew_DDR1x_MRS_XG20(P3c4, pVBInfo);
- } else {
- XGINew_SetMemoryClock(pVBInfo);
-
- switch (HwDeviceExtension->jChipType) {
- case XG42:
- /* CR82 */
- xgifb_reg_set(P3d4,
- 0x82,
- pVBInfo->CR40[11][pVBInfo->ram_type]);
- /* CR85 */
- xgifb_reg_set(P3d4,
- 0x85,
- pVBInfo->CR40[12][pVBInfo->ram_type]);
- /* CR86 */
- xgifb_reg_set(P3d4,
- 0x86,
- pVBInfo->CR40[13][pVBInfo->ram_type]);
- break;
- default:
- xgifb_reg_set(P3d4, 0x82, 0x88);
- xgifb_reg_set(P3d4, 0x86, 0x00);
- /* Insert read command for delay */
- xgifb_reg_get(P3d4, 0x86);
- xgifb_reg_set(P3d4, 0x86, 0x88);
- xgifb_reg_get(P3d4, 0x86);
- xgifb_reg_set(P3d4,
- 0x86,
- pVBInfo->CR40[13][pVBInfo->ram_type]);
- xgifb_reg_set(P3d4, 0x82, 0x77);
- xgifb_reg_set(P3d4, 0x85, 0x00);
-
- /* Insert read command for delay */
- xgifb_reg_get(P3d4, 0x85);
- xgifb_reg_set(P3d4, 0x85, 0x88);
-
- /* Insert read command for delay */
- xgifb_reg_get(P3d4, 0x85);
- /* CR85 */
- xgifb_reg_set(P3d4,
- 0x85,
- pVBInfo->CR40[12][pVBInfo->ram_type]);
- /* CR82 */
- xgifb_reg_set(P3d4,
- 0x82,
- pVBInfo->CR40[11][pVBInfo->ram_type]);
- break;
- }
-
- xgifb_reg_set(P3d4, 0x97, 0x00);
- xgifb_reg_set(P3d4, 0x98, 0x01);
- xgifb_reg_set(P3d4, 0x9A, 0x02);
- XGINew_DDR1x_MRS_340(P3c4, pVBInfo);
- }
-}
-
-static void XGINew_DDR2_DefaultRegister(
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned long Port, struct vb_device_info *pVBInfo)
-{
- unsigned long P3d4 = Port, P3c4 = Port - 0x10;
- /*
- * keep following setting sequence, each setting in
- * the same reg insert idle
- */
- xgifb_reg_set(P3d4, 0x82, 0x77);
- xgifb_reg_set(P3d4, 0x86, 0x00);
- xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
- xgifb_reg_set(P3d4, 0x86, 0x88);
- xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
- /* CR86 */
- xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][pVBInfo->ram_type]);
- xgifb_reg_set(P3d4, 0x82, 0x77);
- xgifb_reg_set(P3d4, 0x85, 0x00);
- xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */
- xgifb_reg_set(P3d4, 0x85, 0x88);
- xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */
- xgifb_reg_set(P3d4,
- 0x85,
- pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */
- if (HwDeviceExtension->jChipType == XG27)
- /* CR82 */
- xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][pVBInfo->ram_type]);
- else
- xgifb_reg_set(P3d4, 0x82, 0xA8); /* CR82 */
-
- xgifb_reg_set(P3d4, 0x98, 0x01);
- xgifb_reg_set(P3d4, 0x9A, 0x02);
- if (HwDeviceExtension->jChipType == XG27)
- XGINew_DDRII_Bootup_XG27(HwDeviceExtension, P3c4, pVBInfo);
- else
- XGINew_DDR2_MRS_XG20(HwDeviceExtension, P3c4, pVBInfo);
-}
-
-static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg,
- u8 shift_factor, u8 mask1, u8 mask2)
-{
- u8 j;
-
- for (j = 0; j < 4; j++) {
- temp2 |= (((seed >> (2 * j)) & 0x03) << shift_factor);
- xgifb_reg_set(P3d4, reg, temp2);
- xgifb_reg_get(P3d4, reg);
- temp2 &= mask1;
- temp2 += mask2;
- }
-}
-
-static void XGINew_SetDRAMDefaultRegister340(
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned long Port, struct vb_device_info *pVBInfo)
-{
- unsigned char temp, temp1, temp2, temp3, j, k;
-
- unsigned long P3d4 = Port, P3c4 = Port - 0x10;
-
- xgifb_reg_set(P3d4, 0x6D, pVBInfo->CR40[8][pVBInfo->ram_type]);
- xgifb_reg_set(P3d4, 0x68, pVBInfo->CR40[5][pVBInfo->ram_type]);
- xgifb_reg_set(P3d4, 0x69, pVBInfo->CR40[6][pVBInfo->ram_type]);
- xgifb_reg_set(P3d4, 0x6A, pVBInfo->CR40[7][pVBInfo->ram_type]);
-
- /* CR6B DQS fine tune delay */
- temp = 0xaa;
- XGI_SetDRAM_Helper(P3d4, temp, 0, 0x6B, 2, 0xF0, 0x10);
-
- /* CR6E DQM fine tune delay */
- XGI_SetDRAM_Helper(P3d4, 0, 0, 0x6E, 2, 0xF0, 0x10);
-
- temp3 = 0;
- for (k = 0; k < 4; k++) {
- /* CR6E_D[1:0] select channel */
- xgifb_reg_and_or(P3d4, 0x6E, 0xFC, temp3);
- XGI_SetDRAM_Helper(P3d4, 0, 0, 0x6F, 0, 0xF8, 0x08);
- temp3 += 0x01;
- }
-
- xgifb_reg_set(P3d4,
- 0x80,
- pVBInfo->CR40[9][pVBInfo->ram_type]); /* CR80 */
- xgifb_reg_set(P3d4,
- 0x81,
- pVBInfo->CR40[10][pVBInfo->ram_type]); /* CR81 */
-
- temp2 = 0x80;
- /* CR89 terminator type select */
- XGI_SetDRAM_Helper(P3d4, 0, temp2, 0x89, 0, 0xF0, 0x10);
-
- temp = 0;
- temp1 = temp & 0x03;
- temp2 |= temp1;
- xgifb_reg_set(P3d4, 0x89, temp2);
-
- temp = pVBInfo->CR40[3][pVBInfo->ram_type];
- temp1 = temp & 0x0F;
- temp2 = (temp >> 4) & 0x07;
- temp3 = temp & 0x80;
- xgifb_reg_set(P3d4, 0x45, temp1); /* CR45 */
- xgifb_reg_set(P3d4, 0x99, temp2); /* CR99 */
- xgifb_reg_or(P3d4, 0x40, temp3); /* CR40_D[7] */
- xgifb_reg_set(P3d4,
- 0x41,
- pVBInfo->CR40[0][pVBInfo->ram_type]); /* CR41 */
-
- if (HwDeviceExtension->jChipType == XG27)
- xgifb_reg_set(P3d4, 0x8F, XG27_CR8F); /* CR8F */
-
- for (j = 0; j <= 6; j++) /* CR90 - CR96 */
- xgifb_reg_set(P3d4, (0x90 + j),
- pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
-
- for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */
- xgifb_reg_set(P3d4, (0xC3 + j),
- pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
-
- for (j = 0; j < 2; j++) /* CR8A - CR8B */
- xgifb_reg_set(P3d4, (0x8A + j),
- pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
-
- if (HwDeviceExtension->jChipType == XG42)
- xgifb_reg_set(P3d4, 0x8C, 0x87);
-
- xgifb_reg_set(P3d4,
- 0x59,
- pVBInfo->CR40[4][pVBInfo->ram_type]); /* CR59 */
-
- xgifb_reg_set(P3d4, 0x83, 0x09); /* CR83 */
- xgifb_reg_set(P3d4, 0x87, 0x00); /* CR87 */
- xgifb_reg_set(P3d4, 0xCF, XG40_CRCF); /* CRCF */
- if (pVBInfo->ram_type) {
- xgifb_reg_set(P3c4, 0x17, 0x80); /* SR17 DDRII */
- if (HwDeviceExtension->jChipType == XG27)
- xgifb_reg_set(P3c4, 0x17, 0x02); /* SR17 DDRII */
-
- } else {
- xgifb_reg_set(P3c4, 0x17, 0x00); /* SR17 DDR */
- }
- xgifb_reg_set(P3c4, 0x1A, 0x87); /* SR1A */
-
- temp = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
- if (temp == 0) {
- XGINew_DDR1x_DefaultRegister(HwDeviceExtension, P3d4, pVBInfo);
- } else {
- xgifb_reg_set(P3d4, 0xB0, 0x80); /* DDRII Dual frequency mode */
- XGINew_DDR2_DefaultRegister(HwDeviceExtension, P3d4, pVBInfo);
- }
- xgifb_reg_set(P3c4, 0x1B, 0x03); /* SR1B */
-}
-
-static unsigned short XGINew_SetDRAMSize20Reg(
- unsigned short dram_size,
- struct vb_device_info *pVBInfo)
-{
- unsigned short data = 0, memsize = 0;
- int RankSize;
- unsigned char ChannelNo;
-
- RankSize = dram_size * pVBInfo->ram_bus / 8;
- data = xgifb_reg_get(pVBInfo->P3c4, 0x13);
- data &= 0x80;
-
- if (data == 0x80)
- RankSize *= 2;
-
- data = 0;
-
- if (pVBInfo->ram_channel == 3)
- ChannelNo = 4;
- else
- ChannelNo = pVBInfo->ram_channel;
-
- if (ChannelNo * RankSize <= 256) {
- while ((RankSize >>= 1) > 0)
- data += 0x10;
-
- memsize = data >> 4;
-
- /* Fix DRAM Sizing Error */
- xgifb_reg_set(pVBInfo->P3c4,
- 0x14,
- (xgifb_reg_get(pVBInfo->P3c4, 0x14) & 0x0F) |
- (data & 0xF0));
- usleep_range(15, 1015);
- }
- return memsize;
-}
-
-static int XGINew_ReadWriteRest(unsigned short StopAddr,
- unsigned short StartAddr,
- struct vb_device_info *pVBInfo)
-{
- int i;
- unsigned long Position = 0;
- void __iomem *fbaddr = pVBInfo->FBAddr;
-
- writel(Position, fbaddr + Position);
-
- for (i = StartAddr; i <= StopAddr; i++) {
- Position = 1 << i;
- writel(Position, fbaddr + Position);
- }
-
- /* Fix #1759 Memory Size error in Multi-Adapter. */
- usleep_range(500, 1500);
-
- Position = 0;
-
- if (readl(fbaddr + Position) != Position)
- return 0;
-
- for (i = StartAddr; i <= StopAddr; i++) {
- Position = 1 << i;
- if (readl(fbaddr + Position) != Position)
- return 0;
- }
- return 1;
-}
-
-static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo)
-{
- unsigned char data;
-
- data = xgifb_reg_get(pVBInfo->P3d4, 0x97);
-
- if ((data & 0x10) == 0) {
- data = xgifb_reg_get(pVBInfo->P3c4, 0x39);
- return (data & 0x02) >> 1;
- }
- return data & 0x01;
-}
-
-static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned char data;
-
- switch (HwDeviceExtension->jChipType) {
- case XG20:
- case XG21:
- data = xgifb_reg_get(pVBInfo->P3d4, 0x97);
- data = data & 0x01;
- pVBInfo->ram_channel = 1; /* XG20 "JUST" one channel */
-
- if (data == 0) { /* Single_32_16 */
-
- if ((HwDeviceExtension->ulVideoMemorySize - 1)
- > 0x1000000) {
- pVBInfo->ram_bus = 32; /* 32 bits */
- /* 22bit + 2 rank + 32bit */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52);
- usleep_range(15, 1015);
-
- if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
- return;
-
- if ((HwDeviceExtension->ulVideoMemorySize - 1) >
- 0x800000) {
- /* 22bit + 1 rank + 32bit */
- xgifb_reg_set(pVBInfo->P3c4,
- 0x13,
- 0x31);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x14,
- 0x42);
- usleep_range(15, 1015);
-
- if (XGINew_ReadWriteRest(23,
- 23,
- pVBInfo) == 1)
- return;
- }
- }
-
- if ((HwDeviceExtension->ulVideoMemorySize - 1) >
- 0x800000) {
- pVBInfo->ram_bus = 16; /* 16 bits */
- /* 22bit + 2 rank + 16bit */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
- usleep_range(15, 1015);
-
- if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
- return;
- xgifb_reg_set(pVBInfo->P3c4,
- 0x13,
- 0x31);
- usleep_range(15, 1015);
- }
-
- } else { /* Dual_16_8 */
- if ((HwDeviceExtension->ulVideoMemorySize - 1) >
- 0x800000) {
- pVBInfo->ram_bus = 16; /* 16 bits */
- /* (0x31:12x8x2) 22bit + 2 rank */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- /* 0x41:16Mx16 bit */
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
- usleep_range(15, 1015);
-
- if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
- return;
-
- if ((HwDeviceExtension->ulVideoMemorySize - 1) >
- 0x400000) {
- /* (0x31:12x8x2) 22bit + 1 rank */
- xgifb_reg_set(pVBInfo->P3c4,
- 0x13,
- 0x31);
- /* 0x31:8Mx16 bit */
- xgifb_reg_set(pVBInfo->P3c4,
- 0x14,
- 0x31);
- usleep_range(15, 1015);
-
- if (XGINew_ReadWriteRest(22,
- 22,
- pVBInfo) == 1)
- return;
- }
- }
-
- if ((HwDeviceExtension->ulVideoMemorySize - 1) >
- 0x400000) {
- pVBInfo->ram_bus = 8; /* 8 bits */
- /* (0x31:12x8x2) 22bit + 2 rank */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
- /* 0x30:8Mx8 bit */
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
- usleep_range(15, 1015);
-
- if (XGINew_ReadWriteRest(22, 21, pVBInfo) == 1)
- return;
-
- /* (0x31:12x8x2) 22bit + 1 rank */
- xgifb_reg_set(pVBInfo->P3c4,
- 0x13,
- 0x31);
- usleep_range(15, 1015);
- }
- }
- break;
-
- case XG27:
- pVBInfo->ram_bus = 16; /* 16 bits */
- pVBInfo->ram_channel = 1; /* Single channel */
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit */
- break;
- case XG42:
- /*
- * XG42 SR14 D[3] Reserve
- * D[2] = 1, Dual Channel
- * = 0, Single Channel
- *
- * It's Different from Other XG40 Series.
- */
- if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */
- pVBInfo->ram_bus = 32; /* 32 bits */
- pVBInfo->ram_channel = 2; /* 2 Channel */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x44);
-
- if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
- return;
-
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x34);
- if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
- return;
-
- pVBInfo->ram_channel = 1; /* Single Channel */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x40);
-
- if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1)
- return;
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
- } else { /* DDR */
- pVBInfo->ram_bus = 64; /* 64 bits */
- pVBInfo->ram_channel = 1; /* 1 channels */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52);
-
- if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
- return;
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x42);
- }
-
- break;
-
- default: /* XG40 */
-
- if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII */
- pVBInfo->ram_bus = 32; /* 32 bits */
- pVBInfo->ram_channel = 3;
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4C);
-
- if (XGINew_ReadWriteRest(25, 23, pVBInfo) == 1)
- return;
-
- pVBInfo->ram_channel = 2; /* 2 channels */
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x48);
-
- if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1)
- return;
-
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x3C);
-
- if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) {
- pVBInfo->ram_channel = 3; /* 4 channels */
- } else {
- pVBInfo->ram_channel = 2; /* 2 channels */
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x38);
- }
- } else { /* DDR */
- pVBInfo->ram_bus = 64; /* 64 bits */
- pVBInfo->ram_channel = 2; /* 2 channels */
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x5A);
-
- if (XGINew_ReadWriteRest(25, 24, pVBInfo) == 1)
- return;
- xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21);
- xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4A);
- }
- break;
- }
-}
-
-static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- u8 i, size;
- unsigned short memsize, start_addr;
- const unsigned short (*dram_table)[2];
-
- xgifb_reg_set(pVBInfo->P3c4, 0x15, 0x00); /* noninterleaving */
- xgifb_reg_set(pVBInfo->P3c4, 0x1C, 0x00); /* nontiling */
- XGINew_CheckChannel(HwDeviceExtension, pVBInfo);
-
- if (HwDeviceExtension->jChipType >= XG20) {
- dram_table = XGINew_DDRDRAM_TYPE20;
- size = ARRAY_SIZE(XGINew_DDRDRAM_TYPE20);
- start_addr = 5;
- } else {
- dram_table = XGINew_DDRDRAM_TYPE340;
- size = ARRAY_SIZE(XGINew_DDRDRAM_TYPE340);
- start_addr = 9;
- }
-
- for (i = 0; i < size; i++) {
- /* SetDRAMSizingType */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x13, 0x80, dram_table[i][1]);
- usleep_range(50, 1050); /* should delay 50 ns */
-
- memsize = XGINew_SetDRAMSize20Reg(dram_table[i][0], pVBInfo);
-
- if (memsize == 0)
- continue;
-
- memsize += (pVBInfo->ram_channel - 2) + 20;
- if ((HwDeviceExtension->ulVideoMemorySize - 1) <
- (unsigned long)(1 << memsize))
- continue;
-
- if (XGINew_ReadWriteRest(memsize, start_addr, pVBInfo) == 1)
- return 1;
- }
- return 0;
-}
-
-static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short data;
-
- pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
-
- XGISetModeNew(xgifb_info, HwDeviceExtension, 0x2e);
-
- data = xgifb_reg_get(pVBInfo->P3c4, 0x21);
- /* disable read cache */
- xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short)(data & 0xDF));
- XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
-
- XGINew_DDRSizing340(HwDeviceExtension, pVBInfo);
- data = xgifb_reg_get(pVBInfo->P3c4, 0x21);
- /* enable read cache */
- xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short)(data | 0x20));
-}
-
-static u8 *xgifb_copy_rom(struct pci_dev *dev, size_t *rom_size)
-{
- void __iomem *rom_address;
- u8 *rom_copy;
-
- rom_address = pci_map_rom(dev, rom_size);
- if (!rom_address)
- return NULL;
-
- rom_copy = vzalloc(XGIFB_ROM_SIZE);
- if (!rom_copy)
- goto done;
-
- *rom_size = min_t(size_t, *rom_size, XGIFB_ROM_SIZE);
- memcpy_fromio(rom_copy, rom_address, *rom_size);
-
-done:
- pci_unmap_rom(dev, rom_address);
- return rom_copy;
-}
-
-static bool xgifb_read_vbios(struct pci_dev *pdev)
-{
- struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
- u8 *vbios;
- unsigned long i;
- unsigned char j;
- struct XGI21_LVDSCapStruct *lvds;
- size_t vbios_size;
- int entry;
-
- vbios = xgifb_copy_rom(pdev, &vbios_size);
- if (!vbios) {
- dev_err(&pdev->dev, "Video BIOS not available\n");
- return false;
- }
- if (vbios_size <= 0x65)
- goto error;
- /*
- * The user can ignore the LVDS bit in the BIOS and force the display
- * type.
- */
- if (!(vbios[0x65] & 0x1) &&
- (!xgifb_info->display2_force ||
- xgifb_info->display2 != XGIFB_DISP_LCD)) {
- vfree(vbios);
- return false;
- }
- if (vbios_size <= 0x317)
- goto error;
- i = vbios[0x316] | (vbios[0x317] << 8);
- if (vbios_size <= i - 1)
- goto error;
- j = vbios[i - 1];
- if (j == 0)
- goto error;
- if (j == 0xff)
- j = 1;
-
- /* Read the LVDS table index scratch register set by the BIOS. */
-
- entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
- if (entry >= j)
- entry = 0;
- i += entry * 25;
- lvds = &xgifb_info->lvds_data;
- if (vbios_size <= i + 24)
- goto error;
- lvds->LVDS_Capability = vbios[i] | (vbios[i + 1] << 8);
- lvds->LVDSHT = vbios[i + 2] | (vbios[i + 3] << 8);
- lvds->LVDSVT = vbios[i + 4] | (vbios[i + 5] << 8);
- lvds->LVDSHDE = vbios[i + 6] | (vbios[i + 7] << 8);
- lvds->LVDSVDE = vbios[i + 8] | (vbios[i + 9] << 8);
- lvds->LVDSHFP = vbios[i + 10] | (vbios[i + 11] << 8);
- lvds->LVDSVFP = vbios[i + 12] | (vbios[i + 13] << 8);
- lvds->LVDSHSYNC = vbios[i + 14] | (vbios[i + 15] << 8);
- lvds->LVDSVSYNC = vbios[i + 16] | (vbios[i + 17] << 8);
- lvds->VCLKData1 = vbios[i + 18];
- lvds->VCLKData2 = vbios[i + 19];
- lvds->PSC_S1 = vbios[i + 20];
- lvds->PSC_S2 = vbios[i + 21];
- lvds->PSC_S3 = vbios[i + 22];
- lvds->PSC_S4 = vbios[i + 23];
- lvds->PSC_S5 = vbios[i + 24];
- vfree(vbios);
- return true;
-error:
- dev_err(&pdev->dev, "Video BIOS corrupted\n");
- vfree(vbios);
- return false;
-}
-
-static void XGINew_ChkSenseStatus(struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx = 0, temp, tempcx, CR3CData;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x32);
-
- if (temp & Monitor1Sense)
- tempbx |= ActiveCRT1;
- if (temp & LCDSense)
- tempbx |= ActiveLCD;
- if (temp & Monitor2Sense)
- tempbx |= ActiveCRT2;
- if (temp & TVSense) {
- tempbx |= ActiveTV;
- if (temp & AVIDEOSense)
- tempbx |= (ActiveAVideo << 8);
- if (temp & SVIDEOSense)
- tempbx |= (ActiveSVideo << 8);
- if (temp & SCARTSense)
- tempbx |= (ActiveSCART << 8);
- if (temp & HiTVSense)
- tempbx |= (ActiveHiTV << 8);
- if (temp & YPbPrSense)
- tempbx |= (ActiveYPbPr << 8);
- }
-
- tempcx = xgifb_reg_get(pVBInfo->P3d4, 0x3d);
- tempcx |= (xgifb_reg_get(pVBInfo->P3d4, 0x3e) << 8);
-
- if (tempbx & tempcx) {
- CR3CData = xgifb_reg_get(pVBInfo->P3d4, 0x3c);
- if (!(CR3CData & DisplayDeviceFromCMOS))
- tempcx = 0x1FF0;
- } else {
- tempcx = 0x1FF0;
- }
-
- tempbx &= tempcx;
- xgifb_reg_set(pVBInfo->P3d4, 0x3d, (tempbx & 0x00FF));
- xgifb_reg_set(pVBInfo->P3d4, 0x3e, ((tempbx & 0xFF00) >> 8));
-}
-
-static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo)
-{
- unsigned short temp, tempcl = 0, tempch = 0, CR31Data, CR38Data;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x3d);
- temp |= xgifb_reg_get(pVBInfo->P3d4, 0x3e) << 8;
- temp |= (xgifb_reg_get(pVBInfo->P3d4, 0x31) & (DriverMode >> 8)) << 8;
-
- if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
- if (temp & ActiveCRT2)
- tempcl = SetCRT2ToRAMDAC;
- }
-
- if (temp & ActiveLCD) {
- tempcl |= SetCRT2ToLCD;
- if (temp & DriverMode) {
- if (temp & ActiveTV) {
- tempch = SetToLCDA | EnableDualEdge;
- temp ^= SetCRT2ToLCD;
-
- if ((temp >> 8) & ActiveAVideo)
- tempcl |= SetCRT2ToAVIDEO;
- if ((temp >> 8) & ActiveSVideo)
- tempcl |= SetCRT2ToSVIDEO;
- if ((temp >> 8) & ActiveSCART)
- tempcl |= SetCRT2ToSCART;
-
- if (pVBInfo->IF_DEF_HiVision == 1) {
- if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVision;
- }
-
- if (pVBInfo->IF_DEF_YPbPr == 1) {
- if ((temp >> 8) & ActiveYPbPr)
- tempch |= SetYPbPr;
- }
- }
- }
- } else {
- if ((temp >> 8) & ActiveAVideo)
- tempcl |= SetCRT2ToAVIDEO;
- if ((temp >> 8) & ActiveSVideo)
- tempcl |= SetCRT2ToSVIDEO;
- if ((temp >> 8) & ActiveSCART)
- tempcl |= SetCRT2ToSCART;
-
- if (pVBInfo->IF_DEF_HiVision == 1) {
- if ((temp >> 8) & ActiveHiTV)
- tempcl |= SetCRT2ToHiVision;
- }
-
- if (pVBInfo->IF_DEF_YPbPr == 1) {
- if ((temp >> 8) & ActiveYPbPr)
- tempch |= SetYPbPr;
- }
- }
-
- tempcl |= SetSimuScanMode;
- if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) ||
- (temp & ActiveTV) ||
- (temp & ActiveCRT2)))
- tempcl ^= (SetSimuScanMode | SwitchCRT2);
- if ((temp & ActiveLCD) && (temp & ActiveTV))
- tempcl ^= (SetSimuScanMode | SwitchCRT2);
- xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl);
-
- CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
- CR31Data &= ~(SetNotSimuMode >> 8);
- if (!(temp & ActiveCRT1))
- CR31Data |= (SetNotSimuMode >> 8);
- CR31Data &= ~(DisableCRT2Display >> 8);
- if (!((temp & ActiveLCD) || (temp & ActiveTV) || (temp & ActiveCRT2)))
- CR31Data |= (DisableCRT2Display >> 8);
- xgifb_reg_set(pVBInfo->P3d4, 0x31, CR31Data);
-
- CR38Data = xgifb_reg_get(pVBInfo->P3d4, 0x38);
- CR38Data &= ~SetYPbPr;
- CR38Data |= tempch;
- xgifb_reg_set(pVBInfo->P3d4, 0x38, CR38Data);
-}
-
-static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
- *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp = HwDeviceExtension->ulCRT2LCDType;
-
- switch (HwDeviceExtension->ulCRT2LCDType) {
- case LCD_640x480:
- case LCD_1024x600:
- case LCD_1152x864:
- case LCD_1280x960:
- case LCD_1152x768:
- case LCD_1920x1440:
- case LCD_2048x1536:
- temp = 0; /* overwrite used ulCRT2LCDType */
- break;
- case LCD_UNKNOWN: /* unknown lcd, do nothing */
- return 0;
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
- return 1;
-}
-
-static void XGINew_GetXG21Sense(struct pci_dev *pdev,
- struct vb_device_info *pVBInfo)
-{
- struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
- unsigned char Temp;
-
- if (xgifb_read_vbios(pdev)) { /* For XG21 LVDS */
- xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
- /* LVDS on chip */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
- } else {
- /* Enable GPIOA/B read */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
- Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0;
- if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */
- XGINew_SenseLCD(&xgifb_info->hw_info, pVBInfo);
- xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
- /* Enable read GPIOF */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x20, 0x20);
- if (xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x04)
- Temp = 0xA0; /* Only DVO on chip */
- else
- Temp = 0x80; /* TMDS on chip */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, Temp);
- /* Disable read GPIOF */
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20);
- }
- }
-}
-
-static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo)
-{
- unsigned char Temp, bCR4A;
-
- bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- /* Enable GPIOA/B/C read */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07);
- Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07;
- xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
-
- if (Temp <= 0x02) {
- /* LVDS setting */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
- xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x21);
- } else {
- /* TMDS/DVO setting */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xA0);
- }
- xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
-}
-
-static unsigned char GetXG21FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char CR38, CR4A, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- /* enable GPIOE read */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x10, 0x10);
- CR38 = xgifb_reg_get(pVBInfo->P3d4, 0x38);
- temp = 0;
- if ((CR38 & 0xE0) > 0x80) {
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
- temp &= 0x08;
- temp >>= 3;
- }
-
- xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
-
- return temp;
-}
-
-static unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- /* enable GPIOA/B/C read */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
- if (temp > 2)
- temp = ((temp & 0x04) >> 1) | ((~temp) & 0x01);
-
- xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
-
- return temp;
-}
-
-static bool xgifb_bridge_is_on(struct vb_device_info *vb_info)
-{
- u8 flag;
-
- flag = xgifb_reg_get(vb_info->Part4Port, 0x00);
- return flag == 1 || flag == 2;
-}
-
-unsigned char XGIInitNew(struct pci_dev *pdev)
-{
- struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
- struct xgi_hw_device_info *HwDeviceExtension = &xgifb_info->hw_info;
- struct vb_device_info VBINF;
- struct vb_device_info *pVBInfo = &VBINF;
- unsigned char i, temp = 0, temp1;
-
- pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
-
- if (!pVBInfo->FBAddr) {
- dev_dbg(&pdev->dev, "pVBInfo->FBAddr == 0\n");
- return 0;
- }
-
- XGIRegInit(pVBInfo, xgifb_info->vga_base);
-
- outb(0x67, pVBInfo->P3c2);
-
- InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
-
- /* Openkey */
- xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
-
- /* GetXG21Sense (GPIO) */
- if (HwDeviceExtension->jChipType == XG21)
- XGINew_GetXG21Sense(pdev, pVBInfo);
-
- if (HwDeviceExtension->jChipType == XG27)
- XGINew_GetXG27Sense(pVBInfo);
-
- /* Reset Extended register */
-
- for (i = 0x06; i < 0x20; i++)
- xgifb_reg_set(pVBInfo->P3c4, i, 0);
-
- for (i = 0x21; i <= 0x27; i++)
- xgifb_reg_set(pVBInfo->P3c4, i, 0);
-
- for (i = 0x31; i <= 0x3B; i++)
- xgifb_reg_set(pVBInfo->P3c4, i, 0);
-
- /* Auto over driver for XG42 */
- if (HwDeviceExtension->jChipType == XG42)
- xgifb_reg_set(pVBInfo->P3c4, 0x3B, 0xC0);
-
- for (i = 0x79; i <= 0x7C; i++)
- xgifb_reg_set(pVBInfo->P3d4, i, 0);
-
- if (HwDeviceExtension->jChipType >= XG20)
- xgifb_reg_set(pVBInfo->P3d4, 0x97, pVBInfo->XGINew_CR97);
-
- /* SetDefExt1Regs begin */
- xgifb_reg_set(pVBInfo->P3c4, 0x07, XGI330_SR07);
- if (HwDeviceExtension->jChipType == XG27) {
- xgifb_reg_set(pVBInfo->P3c4, 0x40, XG27_SR40);
- xgifb_reg_set(pVBInfo->P3c4, 0x41, XG27_SR41);
- }
- xgifb_reg_set(pVBInfo->P3c4, 0x11, 0x0F);
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, XGI330_SR1F);
- /* Frame buffer can read/write SR20 */
- xgifb_reg_set(pVBInfo->P3c4, 0x20, 0xA0);
- /* H/W request for slow corner chip */
- xgifb_reg_set(pVBInfo->P3c4, 0x36, 0x70);
- if (HwDeviceExtension->jChipType == XG27)
- xgifb_reg_set(pVBInfo->P3c4, 0x36, XG27_SR36);
-
- if (HwDeviceExtension->jChipType < XG20) {
- u32 Temp;
-
- /* Set AGP customize registers (in SetDefAGPRegs) Start */
- for (i = 0x47; i <= 0x4C; i++)
- xgifb_reg_set(pVBInfo->P3d4,
- i,
- XGI340_AGPReg[i - 0x47]);
-
- for (i = 0x70; i <= 0x71; i++)
- xgifb_reg_set(pVBInfo->P3d4,
- i,
- XGI340_AGPReg[6 + i - 0x70]);
-
- for (i = 0x74; i <= 0x77; i++)
- xgifb_reg_set(pVBInfo->P3d4,
- i,
- XGI340_AGPReg[8 + i - 0x74]);
-
- pci_read_config_dword(pdev, 0x50, &Temp);
- Temp >>= 20;
- Temp &= 0xF;
-
- if (Temp == 1)
- xgifb_reg_set(pVBInfo->P3d4, 0x48, 0x20); /* CR48 */
- } /* != XG20 */
-
- /* Set PCI */
- xgifb_reg_set(pVBInfo->P3c4, 0x23, XGI330_SR23);
- xgifb_reg_set(pVBInfo->P3c4, 0x24, XGI330_SR24);
- xgifb_reg_set(pVBInfo->P3c4, 0x25, 0);
-
- if (HwDeviceExtension->jChipType < XG20) {
- /* Set VB */
- XGI_UnLockCRT2(pVBInfo);
- /* disable VideoCapture */
- xgifb_reg_and_or(pVBInfo->Part0Port, 0x3F, 0xEF, 0x00);
- xgifb_reg_set(pVBInfo->Part1Port, 0x00, 0x00);
- /* chk if BCLK>=100MHz */
- temp1 = xgifb_reg_get(pVBInfo->P3d4, 0x7B);
-
- xgifb_reg_set(pVBInfo->Part1Port,
- 0x02, XGI330_CRT2Data_1_2);
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x2E, 0x08); /* use VB */
- } /* != XG20 */
-
- xgifb_reg_set(pVBInfo->P3c4, 0x27, 0x1F);
-
- if ((HwDeviceExtension->jChipType == XG42) &&
- XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo) != 0) {
- /* Not DDR */
- xgifb_reg_set(pVBInfo->P3c4,
- 0x31,
- (XGI330_SR31 & 0x3F) | 0x40);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x32,
- (XGI330_SR32 & 0xFC) | 0x01);
- } else {
- xgifb_reg_set(pVBInfo->P3c4, 0x31, XGI330_SR31);
- xgifb_reg_set(pVBInfo->P3c4, 0x32, XGI330_SR32);
- }
- xgifb_reg_set(pVBInfo->P3c4, 0x33, XGI330_SR33);
-
- if (HwDeviceExtension->jChipType < XG20) {
- if (xgifb_bridge_is_on(pVBInfo)) {
- xgifb_reg_set(pVBInfo->Part2Port, 0x00, 0x1C);
- xgifb_reg_set(pVBInfo->Part4Port,
- 0x0D, XGI330_CRT2Data_4_D);
- xgifb_reg_set(pVBInfo->Part4Port,
- 0x0E, XGI330_CRT2Data_4_E);
- xgifb_reg_set(pVBInfo->Part4Port,
- 0x10, XGI330_CRT2Data_4_10);
- xgifb_reg_set(pVBInfo->Part4Port, 0x0F, 0x3F);
- XGI_LockCRT2(pVBInfo);
- }
- } /* != XG20 */
-
- XGI_SenseCRT1(pVBInfo);
-
- if (HwDeviceExtension->jChipType == XG21) {
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x32,
- ~Monitor1Sense,
- Monitor1Sense); /* Z9 default has CRT */
- temp = GetXG21FPBits(pVBInfo);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~0x01, temp);
- }
- if (HwDeviceExtension->jChipType == XG27) {
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x32,
- ~Monitor1Sense,
- Monitor1Sense); /* Z9 default has CRT */
- temp = GetXG27FPBits(pVBInfo);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~0x03, temp);
- }
-
- pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
-
- XGINew_SetDRAMDefaultRegister340(HwDeviceExtension,
- pVBInfo->P3d4,
- pVBInfo);
-
- XGINew_SetDRAMSize_340(xgifb_info, HwDeviceExtension, pVBInfo);
-
- xgifb_reg_set(pVBInfo->P3c4, 0x22, 0xfa);
- xgifb_reg_set(pVBInfo->P3c4, 0x21, 0xa3);
-
- XGINew_ChkSenseStatus(pVBInfo);
- XGINew_SetModeScratch(pVBInfo);
-
- xgifb_reg_set(pVBInfo->P3d4, 0x8c, 0x87);
-
- return 1;
-} /* end of init */
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
deleted file mode 100644
index 2f8a70133ebd..000000000000
--- a/drivers/staging/xgifb/vb_init.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VBINIT_
-#define _VBINIT_
-unsigned char XGIInitNew(struct pci_dev *pdev);
-void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr);
-#endif
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
deleted file mode 100644
index 3782f8641bf2..000000000000
--- a/drivers/staging/xgifb/vb_setmode.c
+++ /dev/null
@@ -1,5528 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/delay.h>
-#include "XGIfb.h"
-
-#include "vb_def.h"
-#include "vb_init.h"
-#include "vb_util.h"
-#include "vb_table.h"
-#include "vb_setmode.h"
-
-#define IndexMask 0xff
-#define TVCLKBASE_315_25 (TVCLKBASE_315 + 25)
-
-static const unsigned short XGINew_VGA_DAC[] = {
- 0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
- 0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
- 0x00, 0x05, 0x08, 0x0B, 0x0E, 0x11, 0x14, 0x18,
- 0x1C, 0x20, 0x24, 0x28, 0x2D, 0x32, 0x38, 0x3F,
- 0x00, 0x10, 0x1F, 0x2F, 0x3F, 0x1F, 0x27, 0x2F,
- 0x37, 0x3F, 0x2D, 0x31, 0x36, 0x3A, 0x3F, 0x00,
- 0x07, 0x0E, 0x15, 0x1C, 0x0E, 0x11, 0x15, 0x18,
- 0x1C, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x00, 0x04,
- 0x08, 0x0C, 0x10, 0x08, 0x0A, 0x0C, 0x0E, 0x10,
- 0x0B, 0x0C, 0x0D, 0x0F, 0x10};
-
-void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
-{
- pVBInfo->MCLKData = XGI340New_MCLKData;
-
- pVBInfo->LCDResInfo = 0;
- pVBInfo->LCDTypeInfo = 0;
- pVBInfo->LCDInfo = 0;
- pVBInfo->VBInfo = 0;
- pVBInfo->TVInfo = 0;
-
- pVBInfo->SR18 = XGI340_SR18;
- pVBInfo->CR40 = XGI340_cr41;
-
- if (ChipType < XG20)
- XGI_GetVBType(pVBInfo);
-
- /* 310 customization related */
- if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
- pVBInfo->LCDCapList = XGI_LCDDLCapList;
- else
- pVBInfo->LCDCapList = XGI_LCDCapList;
-
- if (ChipType >= XG20)
- pVBInfo->XGINew_CR97 = 0x10;
-
- if (ChipType == XG27) {
- unsigned char temp;
-
- pVBInfo->MCLKData = XGI27New_MCLKData;
- pVBInfo->CR40 = XGI27_cr41;
- pVBInfo->XGINew_CR97 = 0xc1;
- pVBInfo->SR18 = XG27_SR18;
-
- /* Z11m DDR */
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
- /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
- if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
- pVBInfo->XGINew_CR97 = 0x80;
- }
-}
-
-static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
-{
- unsigned char SRdata, i;
-
- xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */
-
- for (i = 0; i < 4; i++) {
- /* Get SR1,2,3,4 from file */
- /* SR1 is with screen off 0x20 */
- SRdata = XGI330_StandTable.SR[i];
- /* Set SR 1 2 3 4 */
- xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata);
- }
-}
-
-static void XGI_SetCRTCRegs(struct vb_device_info *pVBInfo)
-{
- unsigned char CRTCdata;
- unsigned short i;
-
- CRTCdata = xgifb_reg_get(pVBInfo->P3d4, 0x11);
- CRTCdata &= 0x7f;
- xgifb_reg_set(pVBInfo->P3d4, 0x11, CRTCdata); /* Unlock CRTC */
-
- for (i = 0; i <= 0x18; i++) {
- /* Get CRTC from file */
- CRTCdata = XGI330_StandTable.CRTC[i];
- xgifb_reg_set(pVBInfo->P3d4, i, CRTCdata); /* Set CRTC(3d4) */
- }
-}
-
-static void XGI_SetATTRegs(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char ARdata;
- unsigned short i, modeflag;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- for (i = 0; i <= 0x13; i++) {
- ARdata = XGI330_StandTable.ATTR[i];
-
- if ((modeflag & Charx8Dot) && i == 0x13) { /* ifndef Dot9 */
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- ARdata = 0;
- } else if ((pVBInfo->VBInfo &
- (SetCRT2ToTV | SetCRT2ToLCD)) &&
- (pVBInfo->VBInfo & SetInSlaveMode)) {
- ARdata = 0;
- }
- }
-
- inb(pVBInfo->P3da); /* reset 3da */
- outb(i, pVBInfo->P3c0); /* set index */
- outb(ARdata, pVBInfo->P3c0); /* set data */
- }
-
- inb(pVBInfo->P3da); /* reset 3da */
- outb(0x14, pVBInfo->P3c0); /* set index */
- outb(0x00, pVBInfo->P3c0); /* set data */
- inb(pVBInfo->P3da); /* Enable Attribute */
- outb(0x20, pVBInfo->P3c0);
-}
-
-static void XGI_SetGRCRegs(struct vb_device_info *pVBInfo)
-{
- unsigned char GRdata;
- unsigned short i;
-
- for (i = 0; i <= 0x08; i++) {
- /* Get GR from file */
- GRdata = XGI330_StandTable.GRC[i];
- xgifb_reg_set(pVBInfo->P3ce, i, GRdata); /* Set GR(3ce) */
- }
-
- if (pVBInfo->ModeType > ModeVGA) {
- GRdata = xgifb_reg_get(pVBInfo->P3ce, 0x05);
- GRdata &= 0xBF; /* 256 color disable */
- xgifb_reg_set(pVBInfo->P3ce, 0x05, GRdata);
- }
-}
-
-static void XGI_ClearExt1Regs(struct vb_device_info *pVBInfo)
-{
- unsigned short i;
-
- for (i = 0x0A; i <= 0x0E; i++)
- xgifb_reg_set(pVBInfo->P3c4, i, 0x00); /* Clear SR0A-SR0E */
-}
-
-static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
-{
- xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, 0x20);
- xgifb_reg_set(pVBInfo->P3c4, 0x2B, XGI_VCLKData[0].SR2B);
- xgifb_reg_set(pVBInfo->P3c4, 0x2C, XGI_VCLKData[0].SR2C);
-
- xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, 0x10);
- xgifb_reg_set(pVBInfo->P3c4, 0x2B, XGI_VCLKData[1].SR2B);
- xgifb_reg_set(pVBInfo->P3c4, 0x2C, XGI_VCLKData[1].SR2C);
-
- xgifb_reg_and(pVBInfo->P3c4, 0x31, ~0x30);
- return 0;
-}
-
-static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- unsigned short *i,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax, tempbx, resinfo, modeflag, infoflag;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- tempbx = XGI330_RefIndex[RefreshRateTableIndex + (*i)].ModeID;
- tempax = 0;
-
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- tempax |= SupportRAMDAC2;
-
- if (pVBInfo->VBType & VB_XGI301C)
- tempax |= SupportCRT2in301C;
- }
-
- /* 301b */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- tempax |= SupportLCD;
-
- if (pVBInfo->LCDResInfo != Panel_1280x1024 &&
- pVBInfo->LCDResInfo != Panel_1280x960 &&
- (pVBInfo->LCDInfo & LCDNonExpanding) &&
- resinfo >= 9)
- return 0;
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) { /* for HiTV */
- tempax |= SupportHiVision;
- if ((pVBInfo->VBInfo & SetInSlaveMode) &&
- ((resinfo == 4) ||
- (resinfo == 3 && (pVBInfo->SetFlag & TVSimuMode)) ||
- (resinfo > 7)))
- return 0;
- } else if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO | SetCRT2ToSVIDEO |
- SetCRT2ToSCART | SetCRT2ToYPbPr525750 |
- SetCRT2ToHiVision)) {
- tempax |= SupportTV;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV |
- VB_SIS302LV | VB_XGI301C))
- tempax |= SupportTV1024;
-
- if (!(pVBInfo->VBInfo & TVSetPAL) &&
- (modeflag & NoSupportSimuTV) &&
- (pVBInfo->VBInfo & SetInSlaveMode) &&
- !(pVBInfo->VBInfo & SetNotSimuMode))
- return 0;
- }
-
- for (; XGI330_RefIndex[RefreshRateTableIndex + (*i)].ModeID ==
- tempbx; (*i)--) {
- infoflag = XGI330_RefIndex[RefreshRateTableIndex + (*i)].Ext_InfoFlag;
- if (infoflag & tempax)
- return 1;
-
- if ((*i) == 0)
- break;
- }
-
- for ((*i) = 0;; (*i)++) {
- infoflag = XGI330_RefIndex[RefreshRateTableIndex + (*i)].Ext_InfoFlag;
- if (XGI330_RefIndex[RefreshRateTableIndex + (*i)].ModeID
- != tempbx) {
- return 0;
- }
-
- if (infoflag & tempax)
- return 1;
- }
- return 1;
-}
-
-static void XGI_SetSync(unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short sync, temp;
-
- /* di+0x00 */
- sync = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag >> 8;
- sync &= 0xC0;
- temp = 0x2F;
- temp |= sync;
- outb(temp, pVBInfo->P3c2); /* Set Misc(3c2) */
-}
-
-static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
- struct xgi_hw_device_info *HwDeviceExtension)
-{
- unsigned char data, data1, pushax;
- unsigned short i, j;
-
- /* unlock cr0-7 */
- data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
- data &= 0x7F;
- xgifb_reg_set(pVBInfo->P3d4, 0x11, data);
-
- data = pVBInfo->TimingH.data[0];
- xgifb_reg_set(pVBInfo->P3d4, 0, data);
-
- for (i = 0x01; i <= 0x04; i++) {
- data = pVBInfo->TimingH.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 1), data);
- }
-
- for (i = 0x05; i <= 0x06; i++) {
- data = pVBInfo->TimingH.data[i];
- xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i + 6), data);
- }
-
- j = xgifb_reg_get(pVBInfo->P3c4, 0x0e);
- j &= 0x1F;
- data = pVBInfo->TimingH.data[7];
- data &= 0xE0;
- data |= j;
- xgifb_reg_set(pVBInfo->P3c4, 0x0e, data);
-
- if (HwDeviceExtension->jChipType >= XG20) {
- data = xgifb_reg_get(pVBInfo->P3d4, 0x04);
- data = data - 1;
- xgifb_reg_set(pVBInfo->P3d4, 0x04, data);
- data = xgifb_reg_get(pVBInfo->P3d4, 0x05);
- data1 = data;
- data1 &= 0xE0;
- data &= 0x1F;
- if (data == 0) {
- pushax = data;
- data = xgifb_reg_get(pVBInfo->P3c4, 0x0c);
- data &= 0xFB;
- xgifb_reg_set(pVBInfo->P3c4, 0x0c, data);
- data = pushax;
- }
- data = data - 1;
- data |= data1;
- xgifb_reg_set(pVBInfo->P3d4, 0x05, data);
- data = xgifb_reg_get(pVBInfo->P3c4, 0x0e);
- data >>= 5;
- data = data + 3;
- if (data > 7)
- data = data - 7;
- data <<= 5;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0e, ~0xE0, data);
- }
-}
-
-static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char data;
- unsigned short i, j;
-
- for (i = 0x00; i <= 0x01; i++) {
- data = pVBInfo->TimingV.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 6), data);
- }
-
- for (i = 0x02; i <= 0x03; i++) {
- data = pVBInfo->TimingV.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x0e), data);
- }
-
- for (i = 0x04; i <= 0x05; i++) {
- data = pVBInfo->TimingV.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x11), data);
- }
-
- j = xgifb_reg_get(pVBInfo->P3c4, 0x0a);
- j &= 0xC0;
- data = pVBInfo->TimingV.data[6];
- data &= 0x3F;
- data |= j;
- xgifb_reg_set(pVBInfo->P3c4, 0x0a, data);
-
- data = pVBInfo->TimingV.data[6];
- data &= 0x80;
- data >>= 2;
-
- i = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- i &= DoubleScanMode;
- if (i)
- data |= 0x80;
-
- j = xgifb_reg_get(pVBInfo->P3d4, 0x09);
- j &= 0x5F;
- data |= j;
- xgifb_reg_set(pVBInfo->P3d4, 0x09, data);
-}
-
-static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo,
- struct xgi_hw_device_info *HwDeviceExtension)
-{
- unsigned char index, data;
- unsigned short i;
-
- /* Get index */
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- index = index & IndexMask;
-
- data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
- data &= 0x7F;
- xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
-
- for (i = 0; i < 8; i++)
- pVBInfo->TimingH.data[i]
- = XGI_CRT1Table[index].CR[i];
-
- for (i = 0; i < 7; i++)
- pVBInfo->TimingV.data[i]
- = XGI_CRT1Table[index].CR[i + 8];
-
- XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
-
- XGI_SetCRT1Timing_V(ModeIdIndex, pVBInfo);
-
- if (pVBInfo->ModeType > 0x03)
- xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
-}
-
-/*
- * Function : XGI_SetXG21CRTC
- * Input : Stand or enhance CRTC table
- * Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F
- * Description : Set LCD timing
- */
-static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char index, Tempax, Tempbx, Tempcx, Tempdx;
- unsigned short Temp1, Temp2, Temp3;
-
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- /* Tempax: CR4 HRS */
- Tempax = XGI_CRT1Table[index].CR[3];
- Tempcx = Tempax; /* Tempcx: HRS */
- /* SR2E[7:0]->HRS */
- xgifb_reg_set(pVBInfo->P3c4, 0x2E, Tempax);
-
- Tempdx = XGI_CRT1Table[index].CR[5]; /* SRB */
- Tempdx &= 0xC0; /* Tempdx[7:6]: SRB[7:6] */
- Temp1 = Tempdx; /* Temp1[7:6]: HRS[9:8] */
- Temp1 <<= 2; /* Temp1[9:8]: HRS[9:8] */
- Temp1 |= Tempax; /* Temp1[9:0]: HRS[9:0] */
-
- Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */
- Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
-
- Tempbx = XGI_CRT1Table[index].CR[6]; /* SRC */
- Tempbx &= 0x04; /* Tempbx[2]: HRE[5] */
- Tempbx <<= 3; /* Tempbx[5]: HRE[5] */
- Tempax |= Tempbx; /* Tempax[5:0]: HRE[5:0] */
-
- Temp2 = Temp1 & 0x3C0; /* Temp2[9:6]: HRS[9:6] */
- Temp2 |= Tempax; /* Temp2[9:0]: HRE[9:0] */
-
- Tempcx &= 0x3F; /* Tempcx[5:0]: HRS[5:0] */
- if (Tempax < Tempcx) /* HRE < HRS */
- Temp2 |= 0x40; /* Temp2 + 0x40 */
-
- Temp2 &= 0xFF;
- Tempax = (unsigned char)Temp2; /* Tempax: HRE[7:0] */
- Tempax <<= 2; /* Tempax[7:2]: HRE[5:0] */
- Tempdx >>= 6; /* Tempdx[7:6]->[1:0] HRS[9:8] */
- Tempax |= Tempdx; /* HRE[5:0]HRS[9:8] */
- /* SR2F D[7:2]->HRE, D[1:0]->HRS */
- xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
- xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
-
- /* CR10 VRS */
- Tempax = XGI_CRT1Table[index].CR[10];
- Tempbx = Tempax; /* Tempbx: VRS */
- Tempax &= 0x01; /* Tempax[0]: VRS[0] */
- xgifb_reg_or(pVBInfo->P3c4, 0x33, Tempax); /* SR33[0]->VRS[0] */
- /* CR7[2][7] VRE */
- Tempax = XGI_CRT1Table[index].CR[9];
- Tempcx = Tempbx >> 1; /* Tempcx[6:0]: VRS[7:1] */
- Tempdx = Tempax & 0x04; /* Tempdx[2]: CR7[2] */
- Tempdx <<= 5; /* Tempdx[7]: VRS[8] */
- Tempcx |= Tempdx; /* Tempcx[7:0]: VRS[8:1] */
- xgifb_reg_set(pVBInfo->P3c4, 0x34, Tempcx); /* SR34[8:1]->VRS */
-
- Temp1 = Tempdx; /* Temp1[7]: Tempdx[7] */
- Temp1 <<= 1; /* Temp1[8]: VRS[8] */
- Temp1 |= Tempbx; /* Temp1[8:0]: VRS[8:0] */
- Tempax &= 0x80;
- Temp2 = Tempax << 2; /* Temp2[9]: VRS[9] */
- Temp1 |= Temp2; /* Temp1[9:0]: VRS[9:0] */
- /* Tempax: SRA */
- Tempax = XGI_CRT1Table[index].CR[14];
- Tempax &= 0x08; /* Tempax[3]: VRS[3] */
- Temp2 = Tempax;
- Temp2 <<= 7; /* Temp2[10]: VRS[10] */
- Temp1 |= Temp2; /* Temp1[10:0]: VRS[10:0] */
-
- /* Tempax: CR11 VRE */
- Tempax = XGI_CRT1Table[index].CR[11];
- Tempax &= 0x0F; /* Tempax[3:0]: VRE[3:0] */
- /* Tempbx: SRA */
- Tempbx = XGI_CRT1Table[index].CR[14];
- Tempbx &= 0x20; /* Tempbx[5]: VRE[5] */
- Tempbx >>= 1; /* Tempbx[4]: VRE[4] */
- Tempax |= Tempbx; /* Tempax[4:0]: VRE[4:0] */
- Temp2 = Temp1 & 0x7E0; /* Temp2[10:5]: VRS[10:5] */
- Temp2 |= Tempax; /* Temp2[10:5]: VRE[10:5] */
-
- Temp3 = Temp1 & 0x1F; /* Temp3[4:0]: VRS[4:0] */
- if (Tempax < Temp3) /* VRE < VRS */
- Temp2 |= 0x20; /* VRE + 0x20 */
-
- Temp2 &= 0xFF;
- Tempax = (unsigned char)Temp2; /* Tempax: VRE[7:0] */
- Tempax <<= 2; /* Tempax[7:0]; VRE[5:0]00 */
- Temp1 &= 0x600; /* Temp1[10:9]: VRS[10:9] */
- Temp1 >>= 9; /* Temp1[1:0]: VRS[10:9] */
- Tempbx = (unsigned char)Temp1;
- Tempax |= Tempbx; /* Tempax[7:0]: VRE[5:0]VRS[10:9] */
- Tempax &= 0x7F;
- /* SR3F D[7:2]->VRE D[1:0]->VRS */
- xgifb_reg_set(pVBInfo->P3c4, 0x3F, Tempax);
-}
-
-static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short index, Tempax, Tempbx, Tempcx;
-
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- /* Tempax: CR4 HRS */
- Tempax = XGI_CRT1Table[index].CR[3];
- Tempbx = Tempax; /* Tempbx: HRS[7:0] */
- /* SR2E[7:0]->HRS */
- xgifb_reg_set(pVBInfo->P3c4, 0x2E, Tempax);
-
- /* SR0B */
- Tempax = XGI_CRT1Table[index].CR[5];
- Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8] */
- Tempbx |= Tempax << 2; /* Tempbx: HRS[9:0] */
-
- Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */
- Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
- Tempcx = Tempax; /* Tempcx: HRE[4:0] */
-
- Tempax = XGI_CRT1Table[index].CR[6]; /* SRC */
- Tempax &= 0x04; /* Tempax[2]: HRE[5] */
- Tempax <<= 3; /* Tempax[5]: HRE[5] */
- Tempcx |= Tempax; /* Tempcx[5:0]: HRE[5:0] */
-
- Tempbx = Tempbx & 0x3C0; /* Tempbx[9:6]: HRS[9:6] */
- Tempbx |= Tempcx; /* Tempbx: HRS[9:6]HRE[5:0] */
-
- /* Tempax: CR4 HRS */
- Tempax = XGI_CRT1Table[index].CR[3];
- Tempax &= 0x3F; /* Tempax: HRS[5:0] */
- if (Tempcx <= Tempax) /* HRE[5:0] < HRS[5:0] */
- Tempbx += 0x40; /* Tempbx= Tempbx + 0x40 : HRE[9:0]*/
-
- Tempax = XGI_CRT1Table[index].CR[5]; /* SR0B */
- Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
- Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/
- Tempax |= (Tempbx << 2) & 0xFF; /* Tempax[7:2]: HRE[5:0] */
- /* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */
- xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
- xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
-
- /* CR10 VRS */
- Tempax = XGI_CRT1Table[index].CR[10];
- /* SR34[7:0]->VRS[7:0] */
- xgifb_reg_set(pVBInfo->P3c4, 0x34, Tempax);
-
- Tempcx = Tempax; /* Tempcx <= VRS[7:0] */
- /* CR7[7][2] VRS[9][8] */
- Tempax = XGI_CRT1Table[index].CR[9];
- Tempbx = Tempax; /* Tempbx <= CR07[7:0] */
- Tempax = Tempax & 0x04; /* Tempax[2]: CR7[2]: VRS[8] */
- Tempax >>= 2; /* Tempax[0]: VRS[8] */
- /* SR35[0]: VRS[8] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax);
- Tempcx |= Tempax << 8; /* Tempcx <= VRS[8:0] */
- Tempcx |= (Tempbx & 0x80) << 2; /* Tempcx <= VRS[9:0] */
- /* Tempax: SR0A */
- Tempax = XGI_CRT1Table[index].CR[14];
- Tempax &= 0x08; /* SR0A[3] VRS[10] */
- Tempcx |= Tempax << 7; /* Tempcx <= VRS[10:0] */
-
- /* Tempax: CR11 VRE */
- Tempax = XGI_CRT1Table[index].CR[11];
- Tempax &= 0x0F; /* Tempax[3:0]: VRE[3:0] */
- /* Tempbx: SR0A */
- Tempbx = XGI_CRT1Table[index].CR[14];
- Tempbx &= 0x20; /* Tempbx[5]: SR0A[5]: VRE[4] */
- Tempbx >>= 1; /* Tempbx[4]: VRE[4] */
- Tempax |= Tempbx; /* Tempax[4:0]: VRE[4:0] */
- Tempbx = Tempcx; /* Tempbx: VRS[10:0] */
- Tempbx &= 0x7E0; /* Tempbx[10:5]: VRS[10:5] */
- Tempbx |= Tempax; /* Tempbx: VRS[10:5]VRE[4:0] */
-
- if (Tempbx <= Tempcx) /* VRE <= VRS */
- Tempbx |= 0x20; /* VRE + 0x20 */
-
- /* Tempax: Tempax[7:0]; VRE[5:0]00 */
- Tempax = (Tempbx << 2) & 0xFF;
- /* SR3F[7:2]:VRE[5:0] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, Tempax);
- Tempax = Tempcx >> 8;
- /* SR35[2:0]:VRS[10:8] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07, Tempax);
-}
-
-static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- temp = (temp & 3) << 6;
- /* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-}
-
-static void xgifb_set_lcd(int chip_id,
- struct vb_device_info *pVBInfo,
- unsigned short RefreshRateTableIndex)
-{
- unsigned short temp;
-
- xgifb_reg_set(pVBInfo->P3d4, 0x2E, 0x00);
- xgifb_reg_set(pVBInfo->P3d4, 0x2F, 0x00);
- xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x00);
- xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x00);
-
- if (chip_id == XG27) {
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- if ((temp & 0x03) == 0) { /* dual 12 */
- xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x13);
- xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x13);
- }
- }
-
- if (chip_id == XG27) {
- XGI_SetXG27FPBits(pVBInfo);
- } else {
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- if (temp & 0x01) {
- /* 18 bits FP */
- xgifb_reg_or(pVBInfo->P3c4, 0x06, 0x40);
- xgifb_reg_or(pVBInfo->P3c4, 0x09, 0x40);
- }
- }
-
- xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x01); /* Negative blank polarity */
-
- xgifb_reg_and(pVBInfo->P3c4, 0x30, ~0x20); /* Hsync polarity */
- xgifb_reg_and(pVBInfo->P3c4, 0x35, ~0x80); /* Vsync polarity */
-
- temp = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
- if (temp & 0x4000) {
- /* Hsync polarity */
- xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
- }
- if (temp & 0x8000) {
- /* Vsync polarity */
- xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
- }
-}
-
-/*
- * Function : XGI_UpdateXG21CRTC
- * Input :
- * Output : CRT1 CRTC
- * Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing
- */
-static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
- struct vb_device_info *pVBInfo,
- unsigned short RefreshRateTableIndex)
-{
- int index = -1;
-
- xgifb_reg_and(pVBInfo->P3d4, 0x11, 0x7F); /* Unlock CR0~7 */
- if (ModeNo == 0x2E &&
- (XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC ==
- RES640x480x60))
- index = 12;
- else if (ModeNo == 0x2E &&
- (XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC ==
- RES640x480x72))
- index = 13;
- else if (ModeNo == 0x2F)
- index = 14;
- else if (ModeNo == 0x50)
- index = 15;
- else if (ModeNo == 0x59)
- index = 16;
-
- if (index != -1) {
- xgifb_reg_set(pVBInfo->P3d4, 0x02,
- XGI_UpdateCRT1Table[index].CR02);
- xgifb_reg_set(pVBInfo->P3d4, 0x03,
- XGI_UpdateCRT1Table[index].CR03);
- xgifb_reg_set(pVBInfo->P3d4, 0x15,
- XGI_UpdateCRT1Table[index].CR15);
- xgifb_reg_set(pVBInfo->P3d4, 0x16,
- XGI_UpdateCRT1Table[index].CR16);
- }
-}
-
-static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag;
-
- unsigned char data;
-
- resindex = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- tempax = XGI330_ModeResInfo[resindex].HTotal;
- tempbx = XGI330_ModeResInfo[resindex].VTotal;
-
- if (modeflag & HalfDCLK)
- tempax >>= 1;
-
- if (modeflag & HalfDCLK)
- tempax <<= 1;
-
- temp = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
-
- if (temp & InterlaceMode)
- tempbx >>= 1;
-
- if (modeflag & DoubleScanMode)
- tempbx <<= 1;
-
- tempcx = 8;
-
- tempax /= tempcx;
- tempax -= 1;
- tempbx -= 1;
- tempcx = tempax;
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x11);
- data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
- data &= 0x7F;
- xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
- xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
- xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
- (unsigned short)((tempcx & 0x0ff00) >> 10));
- xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
- tempax = 0;
- tempbx >>= 8;
-
- if (tempbx & 0x01)
- tempax |= 0x02;
-
- if (tempbx & 0x02)
- tempax |= 0x40;
-
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x42, tempax);
- data = xgifb_reg_get(pVBInfo->P3d4, 0x07);
- tempax = 0;
-
- if (tempbx & 0x04)
- tempax |= 0x02;
-
- xgifb_reg_and_or(pVBInfo->P3d4, 0x0a, ~0x02, tempax);
- xgifb_reg_set(pVBInfo->P3d4, 0x11, temp);
-}
-
-static void XGI_SetCRT1Offset(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp, ah, al, temp2, i, DisplayUnit;
-
- /* GetOffset */
- temp = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeInfo;
- temp >>= 8;
- temp = XGI330_ScreenOffset[temp];
-
- temp2 = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
- temp2 &= InterlaceMode;
-
- if (temp2)
- temp <<= 1;
-
- temp2 = pVBInfo->ModeType - ModeEGA;
-
- switch (temp2) {
- case 0:
- temp2 = 1;
- break;
- case 1:
- temp2 = 2;
- break;
- case 2:
- temp2 = 4;
- break;
- case 3:
- temp2 = 4;
- break;
- case 4:
- temp2 = 6;
- break;
- case 5:
- temp2 = 8;
- break;
- default:
- break;
- }
-
- if ((ModeNo >= 0x26) && (ModeNo <= 0x28))
- temp = temp * temp2 + temp2 / 2;
- else
- temp *= temp2;
-
- /* SetOffset */
- DisplayUnit = temp;
- temp2 = temp;
- temp >>= 8; /* ah */
- temp &= 0x0F;
- i = xgifb_reg_get(pVBInfo->P3c4, 0x0E);
- i &= 0xF0;
- i |= temp;
- xgifb_reg_set(pVBInfo->P3c4, 0x0E, i);
-
- temp = (unsigned char)temp2;
- temp &= 0xFF; /* al */
- xgifb_reg_set(pVBInfo->P3d4, 0x13, temp);
-
- /* SetDisplayUnit */
- temp2 = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
- temp2 &= InterlaceMode;
- if (temp2)
- DisplayUnit >>= 1;
-
- DisplayUnit <<= 5;
- ah = (DisplayUnit & 0xff00) >> 8;
- al = DisplayUnit & 0x00ff;
- if (al == 0)
- ah += 1;
- else
- ah += 2;
-
- if (HwDeviceExtension->jChipType >= XG20)
- if ((ModeNo == 0x4A) | (ModeNo == 0x49))
- ah -= 1;
-
- xgifb_reg_set(pVBInfo->P3c4, 0x10, ah);
-}
-
-static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short VCLKIndex, modeflag;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /* 301b */
- if (pVBInfo->LCDResInfo != Panel_1024x768)
- /* LCDXlat2VCLK */
- VCLKIndex = VCLK108_2_315 + 5;
- else
- VCLKIndex = VCLK65_315 + 2; /* LCDXlat1VCLK */
- } else if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (pVBInfo->SetFlag & RPLLDIV2XO)
- VCLKIndex = TVCLKBASE_315_25 + HiTVVCLKDIV2;
- else
- VCLKIndex = TVCLKBASE_315_25 + HiTVVCLK;
-
- if (pVBInfo->SetFlag & TVSimuMode) {
- if (modeflag & Charx8Dot)
- VCLKIndex = TVCLKBASE_315_25 + HiTVSimuVCLK;
- else
- VCLKIndex = TVCLKBASE_315_25 + HiTVTextVCLK;
- }
-
- /* 301lv */
- if (pVBInfo->VBType & VB_SIS301LV) {
- if (pVBInfo->SetFlag & RPLLDIV2XO)
- VCLKIndex = YPbPr525iVCLK_2;
- else
- VCLKIndex = YPbPr525iVCLK;
- }
- } else if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->SetFlag & RPLLDIV2XO)
- VCLKIndex = TVCLKBASE_315_25 + TVVCLKDIV2;
- else
- VCLKIndex = TVCLKBASE_315_25 + TVVCLK;
- } else { /* for CRT2 */
- /* di+Ext_CRTVCLK */
- VCLKIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
- VCLKIndex &= IndexMask;
- }
-
- return VCLKIndex;
-}
-
-static void XGI_SetCRT1VCLK(unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char index, data;
- unsigned short vclkindex;
-
- if ((pVBInfo->IF_DEF_LVDS == 0) &&
- (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV |
- VB_SIS302LV | VB_XGI301C)) &&
- (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
- vclkindex = XGI_GetVCLK2Ptr(ModeIdIndex, RefreshRateTableIndex,
- pVBInfo);
- data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
- xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
- data = XGI_VBVCLKData[vclkindex].Part4_A;
- xgifb_reg_set(pVBInfo->P3c4, 0x2B, data);
- data = XGI_VBVCLKData[vclkindex].Part4_B;
- xgifb_reg_set(pVBInfo->P3c4, 0x2C, data);
- xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
- } else {
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
- data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
- xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
- xgifb_reg_set(pVBInfo->P3c4, 0x2B, XGI_VCLKData[index].SR2B);
- xgifb_reg_set(pVBInfo->P3c4, 0x2C, XGI_VCLKData[index].SR2C);
- xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
- }
-
- if (HwDeviceExtension->jChipType >= XG20) {
- if (XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag &
- HalfDCLK) {
- data = xgifb_reg_get(pVBInfo->P3c4, 0x2B);
- xgifb_reg_set(pVBInfo->P3c4, 0x2B, data);
- data = xgifb_reg_get(pVBInfo->P3c4, 0x2C);
- index = data;
- index &= 0xE0;
- data &= 0x1F;
- data <<= 1;
- data += 1;
- data |= index;
- xgifb_reg_set(pVBInfo->P3c4, 0x2C, data);
- }
- }
-}
-
-static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
- temp = (temp & 1) << 6;
- /* SR06[6] 18bit Dither */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-}
-
-static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short data;
-
- data = xgifb_reg_get(pVBInfo->P3c4, 0x3D);
- data &= 0xfe;
- xgifb_reg_set(pVBInfo->P3c4, 0x3D, data); /* disable auto-threshold */
-
- xgifb_reg_set(pVBInfo->P3c4, 0x08, 0x34);
- data = xgifb_reg_get(pVBInfo->P3c4, 0x09);
- data &= 0xC0;
- xgifb_reg_set(pVBInfo->P3c4, 0x09, data | 0x30);
- data = xgifb_reg_get(pVBInfo->P3c4, 0x3D);
- data |= 0x01;
- xgifb_reg_set(pVBInfo->P3c4, 0x3D, data);
-
- if (HwDeviceExtension->jChipType == XG21)
- XGI_SetXG21FPBits(pVBInfo); /* Fix SR9[7:6] can't read back */
-}
-
-static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short data, data2 = 0;
- short VCLK;
-
- unsigned char index;
-
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
- index &= IndexMask;
- VCLK = XGI_VCLKData[index].CLOCK;
-
- data = xgifb_reg_get(pVBInfo->P3c4, 0x32);
- data &= 0xf3;
- if (VCLK >= 200)
- data |= 0x0c; /* VCLK > 200 */
-
- if (HwDeviceExtension->jChipType >= XG20)
- data &= ~0x04; /* 2 pixel mode */
-
- xgifb_reg_set(pVBInfo->P3c4, 0x32, data);
-
- if (HwDeviceExtension->jChipType < XG20) {
- data = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
- data &= 0xE7;
- if (VCLK < 200)
- data |= 0x10;
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, data);
- }
-
- data2 = 0x00;
-
- xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2);
- if (HwDeviceExtension->jChipType >= XG27)
- xgifb_reg_and_or(pVBInfo->P3c4, 0x40, 0xFC, data2 & 0x03);
-}
-
-static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short data, data2, data3, infoflag = 0, modeflag, resindex,
- xres;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- infoflag = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
-
- if (xgifb_reg_get(pVBInfo->P3d4, 0x31) & 0x01)
- xgifb_reg_and_or(pVBInfo->P3c4, 0x1F, 0x3F, 0x00);
-
- data = infoflag;
- data2 = 0;
- data2 |= 0x02;
- data3 = pVBInfo->ModeType - ModeVGA;
- data3 <<= 2;
- data2 |= data3;
- data &= InterlaceMode;
-
- if (data)
- data2 |= 0x20;
-
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x3F, data2);
- resindex = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- xres = XGI330_ModeResInfo[resindex].HTotal; /* xres->ax */
-
- data = 0x0000;
- if (infoflag & InterlaceMode) {
- if (xres == 1024)
- data = 0x0035;
- else if (xres == 1280)
- data = 0x0048;
- }
-
- xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFF, data);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFC, 0);
-
- if (modeflag & HalfDCLK)
- xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xF7, 0x08);
-
- data2 = 0;
-
- if (modeflag & LineCompareOff)
- data2 |= 0x08;
-
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0F, ~0x48, data2);
- data = 0x60;
- data = data ^ 0x60;
- data = data ^ 0xA0;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x21, 0x1F, data);
-
- XGI_SetVCLKState(HwDeviceExtension, RefreshRateTableIndex, pVBInfo);
-
- data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
-
- if (HwDeviceExtension->jChipType == XG27) {
- if (data & 0x40)
- data = 0x2c;
- else
- data = 0x6c;
- xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
- xgifb_reg_or(pVBInfo->P3d4, 0x51, 0x10);
- } else if (HwDeviceExtension->jChipType >= XG20) {
- if (data & 0x40)
- data = 0x33;
- else
- data = 0x73;
- xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
- xgifb_reg_set(pVBInfo->P3d4, 0x51, 0x02);
- } else {
- if (data & 0x40)
- data = 0x2c;
- else
- data = 0x6c;
- xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
- }
-}
-
-static void XGI_WriteDAC(unsigned short dl,
- unsigned short ah,
- unsigned short al,
- unsigned short dh,
- struct vb_device_info *pVBInfo)
-{
- unsigned short bh, bl;
-
- bh = ah;
- bl = al;
-
- if (dl != 0) {
- swap(bh, dh);
- if (dl == 1)
- swap(bl, dh);
- else
- swap(bl, bh);
- }
- outb((unsigned short)dh, pVBInfo->P3c9);
- outb((unsigned short)bh, pVBInfo->P3c9);
- outb((unsigned short)bl, pVBInfo->P3c9);
-}
-
-static void XGI_LoadDAC(struct vb_device_info *pVBInfo)
-{
- unsigned short data, data2, i, k, m, n, o, si, di, bx, dl, al, ah, dh;
- const unsigned short *table = XGINew_VGA_DAC;
-
- outb(0xFF, pVBInfo->P3c6);
- outb(0x00, pVBInfo->P3c8);
-
- for (i = 0; i < 16; i++) {
- data = table[i];
-
- for (k = 0; k < 3; k++) {
- data2 = 0;
-
- if (data & 0x01)
- data2 = 0x2A;
-
- if (data & 0x02)
- data2 += 0x15;
-
- outb(data2, pVBInfo->P3c9);
- data >>= 2;
- }
- }
-
- for (i = 16; i < 32; i++) {
- data = table[i];
-
- for (k = 0; k < 3; k++)
- outb(data, pVBInfo->P3c9);
- }
-
- si = 32;
-
- for (m = 0; m < 9; m++) {
- di = si;
- bx = si + 0x04;
- dl = 0;
-
- for (n = 0; n < 3; n++) {
- for (o = 0; o < 5; o++) {
- dh = table[si];
- ah = table[di];
- al = table[bx];
- si++;
- XGI_WriteDAC(dl, ah, al, dh, pVBInfo);
- }
-
- si -= 2;
-
- for (o = 0; o < 3; o++) {
- dh = table[bx];
- ah = table[di];
- al = table[si];
- si--;
- XGI_WriteDAC(dl, ah, al, dh, pVBInfo);
- }
-
- dl++;
- }
-
- si += 5;
- }
-}
-
-static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short resindex, xres, yres, modeflag;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
-
- /* si+Ext_ResInfo */
- resindex = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
-
- xres = XGI330_ModeResInfo[resindex].HTotal;
- yres = XGI330_ModeResInfo[resindex].VTotal;
-
- if (modeflag & HalfDCLK)
- xres <<= 1;
-
- if (modeflag & DoubleScanMode)
- yres <<= 1;
-
- if (xres == 720)
- xres = 640;
-
- pVBInfo->VGAHDE = xres;
- pVBInfo->HDE = xres;
- pVBInfo->VGAVDE = yres;
- pVBInfo->VDE = yres;
-}
-
-static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short i, tempdx, tempbx, modeflag;
-
- tempbx = 0;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- i = 0;
-
- while (table[i].PANELID != 0xff) {
- tempdx = pVBInfo->LCDResInfo;
- if (tempbx & 0x0080) { /* OEMUtil */
- tempbx &= ~0x0080;
- tempdx = pVBInfo->LCDTypeInfo;
- }
-
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempdx &= ~PanelResInfo;
-
- if (table[i].PANELID == tempdx) {
- tempbx = table[i].MASK;
- tempdx = pVBInfo->LCDInfo;
-
- if (modeflag & HalfDCLK)
- tempdx |= SetLCDLowResolution;
-
- tempbx &= tempdx;
- if (tempbx == table[i].CAP)
- break;
- }
- i++;
- }
-
- return table[i].DATAPTR;
-}
-
-static struct SiS_TVData const *XGI_GetTVPtr(
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short i, tempdx, tempal, modeflag;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- tempal = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
- tempal = tempal & 0x3f;
- tempdx = pVBInfo->TVInfo;
-
- if (pVBInfo->VBInfo & SetInSlaveMode)
- tempdx = tempdx | SetTVLockMode;
-
- if (modeflag & HalfDCLK)
- tempdx = tempdx | SetTVLowResolution;
-
- i = 0;
-
- while (XGI_TVDataTable[i].MASK != 0xffff) {
- if ((tempdx & XGI_TVDataTable[i].MASK) ==
- XGI_TVDataTable[i].CAP)
- break;
- i++;
- }
-
- return &XGI_TVDataTable[i].DATAPTR[tempal];
-}
-
-static void XGI_GetLVDSData(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- struct SiS_LVDSData const *LCDPtr;
-
- if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
- return;
-
- LCDPtr = XGI_GetLcdPtr(XGI_EPLLCDDataPtr, ModeIdIndex, pVBInfo);
- pVBInfo->VGAHT = LCDPtr->VGAHT;
- pVBInfo->VGAVT = LCDPtr->VGAVT;
- pVBInfo->HT = LCDPtr->LCDHT;
- pVBInfo->VT = LCDPtr->LCDVT;
-
- if (pVBInfo->LCDInfo & (SetLCDtoNonExpanding | EnableScalingLCD))
- return;
-
- if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
- (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
- pVBInfo->HDE = 1024;
- pVBInfo->VDE = 768;
- } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
- (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
- pVBInfo->HDE = 1280;
- pVBInfo->VDE = 1024;
- } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
- pVBInfo->HDE = 1400;
- pVBInfo->VDE = 1050;
- } else {
- pVBInfo->HDE = 1600;
- pVBInfo->VDE = 1200;
- }
-}
-
-static void XGI_ModCRT1Regs(unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short i;
- struct XGI_LVDSCRT1HDataStruct const *LCDPtr = NULL;
- struct XGI_LVDSCRT1VDataStruct const *LCDPtr1 = NULL;
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr = XGI_GetLcdPtr(xgifb_epllcd_crt1_h, ModeIdIndex,
- pVBInfo);
-
- for (i = 0; i < 8; i++)
- pVBInfo->TimingH.data[i] = LCDPtr[0].Reg[i];
- }
-
- XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr1 = XGI_GetLcdPtr(xgifb_epllcd_crt1_v, ModeIdIndex,
- pVBInfo);
- for (i = 0; i < 7; i++)
- pVBInfo->TimingV.data[i] = LCDPtr1[0].Reg[i];
- }
-
- XGI_SetCRT1Timing_V(ModeIdIndex, pVBInfo);
-}
-
-static unsigned short XGI_GetLCDCapPtr(struct vb_device_info *pVBInfo)
-{
- unsigned char tempal, tempah, tempbl, i;
-
- tempah = xgifb_reg_get(pVBInfo->P3d4, 0x36);
- tempal = tempah & 0x0F;
- tempah = tempah & 0xF0;
- i = 0;
- tempbl = pVBInfo->LCDCapList[i].LCD_ID;
-
- while (tempbl != 0xFF) {
- if (tempbl & 0x80) { /* OEMUtil */
- tempal = tempah;
- tempbl = tempbl & ~(0x80);
- }
-
- if (tempal == tempbl)
- break;
-
- i++;
-
- tempbl = pVBInfo->LCDCapList[i].LCD_ID;
- }
-
- return i;
-}
-
-static unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo)
-{
- unsigned short tempah, tempal, tempbl, i;
-
- tempal = pVBInfo->LCDResInfo;
- tempah = pVBInfo->LCDTypeInfo;
-
- i = 0;
- tempbl = pVBInfo->LCDCapList[i].LCD_ID;
-
- while (tempbl != 0xFF) {
- if ((tempbl & 0x80) && (tempbl != 0x80)) {
- tempal = tempah;
- tempbl &= ~0x80;
- }
-
- if (tempal == tempbl)
- break;
-
- i++;
- tempbl = pVBInfo->LCDCapList[i].LCD_ID;
- }
-
- if (tempbl == 0xFF) {
- pVBInfo->LCDResInfo = Panel_1024x768;
- pVBInfo->LCDTypeInfo = 0;
- i = 0;
- }
-
- return i;
-}
-
-static void XGI_GetLCDSync(unsigned short *HSyncWidth,
- unsigned short *VSyncWidth,
- struct vb_device_info *pVBInfo)
-{
- unsigned short Index;
-
- Index = XGI_GetLCDCapPtr(pVBInfo);
- *HSyncWidth = pVBInfo->LCDCapList[Index].LCD_HSyncWidth;
- *VSyncWidth = pVBInfo->LCDCapList[Index].LCD_VSyncWidth;
-}
-
-static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx, tempax, tempcx, tempdx, push1, push2, modeflag;
- unsigned long temp, temp1, temp2, temp3, push3;
- struct XGI330_LCDDataDesStruct2 const *LCDPtr1 = NULL;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- LCDPtr1 = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeIdIndex, pVBInfo);
-
- XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
- push1 = tempbx;
- push2 = tempax;
-
- /* GetLCDResInfo */
- if ((pVBInfo->LCDResInfo == Panel_1024x768) ||
- (pVBInfo->LCDResInfo == Panel_1024x768x75)) {
- tempax = 1024;
- tempbx = 768;
- } else if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
- (pVBInfo->LCDResInfo == Panel_1280x1024x75)) {
- tempax = 1280;
- tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
- tempax = 1400;
- tempbx = 1050;
- } else {
- tempax = 1600;
- tempbx = 1200;
- }
-
- if (pVBInfo->LCDInfo & SetLCDtoNonExpanding) {
- pVBInfo->HDE = tempax;
- pVBInfo->VDE = tempbx;
- pVBInfo->VGAHDE = tempax;
- pVBInfo->VGAVDE = tempbx;
- }
-
- tempax = pVBInfo->HT;
-
- tempbx = LCDPtr1->LCDHDES;
-
- tempcx = pVBInfo->HDE;
- tempbx = tempbx & 0x0fff;
- tempcx += tempbx;
-
- if (tempcx >= tempax)
- tempcx -= tempax;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x1A, tempbx & 0x07);
-
- tempcx >>= 3;
- tempbx >>= 3;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x16,
- (unsigned short)(tempbx & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x17,
- (unsigned short)(tempcx & 0xff));
-
- tempax = pVBInfo->HT;
-
- tempbx = LCDPtr1->LCDHRS;
-
- tempcx = push2;
-
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempcx = LCDPtr1->LCDHSync;
-
- tempcx += tempbx;
-
- if (tempcx >= tempax)
- tempcx -= tempax;
-
- tempax = tempbx & 0x07;
- tempax >>= 5;
- tempcx >>= 3;
- tempbx >>= 3;
-
- tempcx &= 0x1f;
- tempax |= tempcx;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
- xgifb_reg_set(pVBInfo->Part1Port, 0x14,
- (unsigned short)(tempbx & 0xff));
-
- tempax = pVBInfo->VT;
- tempbx = LCDPtr1->LCDVDES;
- tempcx = pVBInfo->VDE;
-
- tempbx = tempbx & 0x0fff;
- tempcx += tempbx;
- if (tempcx >= tempax)
- tempcx -= tempax;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
- (unsigned short)(tempbx & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
- (unsigned short)(tempcx & 0xff));
-
- tempbx = (tempbx >> 8) & 0x07;
- tempcx = (tempcx >> 8) & 0x07;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
- (unsigned short)((tempcx << 3) | tempbx));
-
- tempax = pVBInfo->VT;
- tempbx = LCDPtr1->LCDVRS;
-
- tempcx = push1;
-
- if (pVBInfo->LCDInfo & EnableScalingLCD)
- tempcx = LCDPtr1->LCDVSync;
-
- tempcx += tempbx;
- if (tempcx >= tempax)
- tempcx -= tempax;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x18,
- (unsigned short)(tempbx & 0xff));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
- (unsigned short)(tempcx & 0x0f));
-
- tempax = ((tempbx >> 8) & 0x07) << 3;
-
- tempbx = pVBInfo->VGAVDE;
- if (tempbx != pVBInfo->VDE)
- tempax |= 0x40;
-
- if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
- tempax |= 0x40;
-
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, tempax);
-
- tempbx = pVBInfo->VDE;
- tempax = pVBInfo->VGAVDE;
-
- temp = tempax; /* 0430 ylshieh */
- temp1 = (temp << 18) / tempbx;
-
- tempdx = (unsigned short)((temp << 18) % tempbx);
-
- if (tempdx != 0)
- temp1 += 1;
-
- temp2 = temp1;
- push3 = temp2;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x37, (unsigned short)(temp2 & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x36, (unsigned short)((temp2 >> 8) & 0xff));
-
- tempbx = (unsigned short)(temp2 >> 16);
- tempax = tempbx & 0x03;
-
- tempbx = pVBInfo->VGAVDE;
- if (tempbx == pVBInfo->VDE)
- tempax |= 0x04;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x35, tempax);
-
- if (pVBInfo->VBType & VB_XGI301C) {
- temp2 = push3;
- xgifb_reg_set(pVBInfo->Part4Port,
- 0x3c,
- (unsigned short)(temp2 & 0xff));
- xgifb_reg_set(pVBInfo->Part4Port,
- 0x3b,
- (unsigned short)((temp2 >> 8) &
- 0xff));
- tempbx = (unsigned short)(temp2 >> 16);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, ~0xc0,
- (unsigned short)((tempbx & 0xff) << 6));
-
- tempcx = pVBInfo->VGAVDE;
- if (tempcx == pVBInfo->VDE)
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x00);
- else
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x08);
- }
-
- tempcx = pVBInfo->VGAHDE;
- tempbx = pVBInfo->HDE;
-
- temp1 = tempcx << 16;
-
- tempax = (unsigned short)(temp1 / tempbx);
-
- if ((tempbx & 0xffff) == (tempcx & 0xffff))
- tempax = 65535;
-
- temp3 = tempax;
- temp1 = pVBInfo->VGAHDE << 16;
-
- temp1 /= temp3;
- temp3 <<= 16;
- temp1 -= 1;
-
- temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
-
- tempax = (unsigned short)(temp3 & 0xff);
- xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
-
- temp1 = pVBInfo->VGAVDE << 18;
- temp1 = temp1 / push3;
- tempbx = (unsigned short)(temp1 & 0xffff);
-
- if (pVBInfo->LCDResInfo == Panel_1024x768)
- tempbx -= 1;
-
- tempax = ((tempbx >> 8) & 0xff) << 3;
- tempax |= (unsigned short)((temp3 >> 8) & 0x07);
- xgifb_reg_set(pVBInfo->Part1Port, 0x20,
- (unsigned short)(tempax & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x21,
- (unsigned short)(tempbx & 0xff));
-
- temp3 >>= 16;
-
- if (modeflag & HalfDCLK)
- temp3 >>= 1;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x22,
- (unsigned short)((temp3 >> 8) & 0xff));
- xgifb_reg_set(pVBInfo->Part1Port, 0x23,
- (unsigned short)(temp3 & 0xff));
-}
-
-/*
- * Function : XGI_GETLCDVCLKPtr
- * Input :
- * Output : al -> VCLK Index
- * Description :
- */
-static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
- struct vb_device_info *pVBInfo)
-{
- unsigned short index;
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- index = XGI_GetLCDCapPtr1(pVBInfo);
-
- if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
- *di_0 = pVBInfo->LCDCapList[index].LCUCHAR_VCLKData1;
- *di_1 = pVBInfo->LCDCapList[index].LCUCHAR_VCLKData2;
- } else { /* LCDA */
- *di_0 = pVBInfo->LCDCapList[index].LCDA_VCLKData1;
- *di_1 = pVBInfo->LCDCapList[index].LCDA_VCLKData2;
- }
- }
-}
-
-static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short index, modeflag;
- unsigned char tempal;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- if ((pVBInfo->SetFlag & ProgrammingCRT2) &&
- !(pVBInfo->LCDInfo & EnableScalingLCD)) { /* {LCDA/LCDB} */
- index = XGI_GetLCDCapPtr(pVBInfo);
- tempal = pVBInfo->LCDCapList[index].LCD_VCLK;
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
- return tempal;
-
- /* {TV} */
- if (pVBInfo->VBType &
- (VB_SIS301B |
- VB_SIS302B |
- VB_SIS301LV |
- VB_SIS302LV |
- VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- tempal = TVCLKBASE_315 + HiTVVCLKDIV2;
- if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = TVCLKBASE_315 + HiTVVCLK;
- if (pVBInfo->TVInfo & TVSimuMode) {
- tempal = TVCLKBASE_315 + HiTVSimuVCLK;
- if (!(modeflag & Charx8Dot))
- tempal = TVCLKBASE_315 +
- HiTVTextVCLK;
- }
- return tempal;
- }
-
- if (pVBInfo->TVInfo & TVSetYPbPr750p)
- return XGI_YPbPr750pVCLK;
-
- if (pVBInfo->TVInfo & TVSetYPbPr525p)
- return YPbPr525pVCLK;
-
- tempal = NTSC1024VCLK;
-
- if (!(pVBInfo->TVInfo & NTSC1024x768)) {
- tempal = TVCLKBASE_315 + TVVCLKDIV2;
- if (!(pVBInfo->TVInfo & RPLLDIV2XO))
- tempal = TVCLKBASE_315 + TVVCLK;
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- return tempal;
- }
- } /* {End of VB} */
-
- inb((pVBInfo->P3ca + 0x02));
- return XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
-}
-
-static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
- unsigned char *di_1, struct vb_device_info *pVBInfo)
-{
- if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
- | VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
- if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
- (pVBInfo->SetFlag & ProgrammingCRT2)) {
- *di_0 = XGI_VBVCLKData[tempal].Part4_A;
- *di_1 = XGI_VBVCLKData[tempal].Part4_B;
- }
- } else {
- *di_0 = XGI_VCLKData[tempal].SR2B;
- *di_1 = XGI_VCLKData[tempal].SR2C;
- }
-}
-
-static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char di_0, di_1, tempal;
- int i;
-
- tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeIdIndex, pVBInfo);
- XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
- XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
-
- for (i = 0; i < 4; i++) {
- xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
- (unsigned short)(0x10 * i));
- if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
- !(pVBInfo->VBInfo & SetInSlaveMode)) {
- xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
- xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
- } else {
- xgifb_reg_set(pVBInfo->P3c4, 0x2b, di_0);
- xgifb_reg_set(pVBInfo->P3c4, 0x2c, di_1);
- }
- }
-}
-
-static void XGI_UpdateModeInfo(struct vb_device_info *pVBInfo)
-{
- unsigned short tempcl, tempch, temp, tempbl, tempax;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- tempcl = 0;
- tempch = 0;
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x01);
-
- if (!(temp & 0x20)) {
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x17);
- if (temp & 0x80) {
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x53);
- if (!(temp & 0x40))
- tempcl |= ActiveCRT1;
- }
- }
-
- temp = xgifb_reg_get(pVBInfo->Part1Port, 0x2e);
- temp &= 0x0f;
-
- if (!(temp == 0x08)) {
- /* Check ChannelA */
- tempax = xgifb_reg_get(pVBInfo->Part1Port, 0x13);
- if (tempax & 0x04)
- tempcl = tempcl | ActiveLCD;
-
- temp &= 0x05;
-
- if (!(tempcl & ActiveLCD)) {
- if (temp == 0x01)
- tempcl |= ActiveCRT2;
- }
-
- if (temp == 0x04)
- tempcl |= ActiveLCD;
-
- if (temp == 0x05) {
- temp = xgifb_reg_get(pVBInfo->Part2Port, 0x00);
-
- if (!(temp & 0x08))
- tempch |= ActiveAVideo;
-
- if (!(temp & 0x04))
- tempch |= ActiveSVideo;
-
- if (temp & 0x02)
- tempch |= ActiveSCART;
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (temp & 0x01)
- tempch |= ActiveHiTV;
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
- temp = xgifb_reg_get(
- pVBInfo->Part2Port,
- 0x4d);
-
- if (temp & 0x10)
- tempch |= ActiveYPbPr;
- }
-
- if (tempch != 0)
- tempcl |= ActiveTV;
- }
- }
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x3d);
- if (tempcl & ActiveLCD) {
- if ((pVBInfo->SetFlag & ReserveTVOption)) {
- if (temp & ActiveTV)
- tempcl |= ActiveTV;
- }
- }
- temp = tempcl;
- tempbl = ~XGI_ModeSwitchStatus;
- xgifb_reg_and_or(pVBInfo->P3d4, 0x3d, tempbl, temp);
-
- if (!(pVBInfo->SetFlag & ReserveTVOption))
- xgifb_reg_set(pVBInfo->P3d4, 0x3e, tempch);
- }
-}
-
-void XGI_GetVBType(struct vb_device_info *pVBInfo)
-{
- unsigned short flag, tempbx, tempah;
-
- tempbx = VB_SIS302B;
- flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
- if (flag == 0x02)
- goto finish;
-
- tempbx = VB_SIS301;
- flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
- if (flag < 0xB0)
- goto finish;
-
- tempbx = VB_SIS301B;
- if (flag < 0xC0)
- goto bigger_than_0xB0;
-
- tempbx = VB_XGI301C;
- if (flag < 0xD0)
- goto bigger_than_0xB0;
-
- tempbx = VB_SIS301LV;
- if (flag < 0xE0)
- goto bigger_than_0xB0;
-
- tempbx = VB_SIS302LV;
- tempah = xgifb_reg_get(pVBInfo->Part4Port, 0x39);
- if (tempah != 0xFF)
- tempbx = VB_XGI301C;
-
-bigger_than_0xB0:
- if (tempbx & (VB_SIS301B | VB_SIS302B)) {
- flag = xgifb_reg_get(pVBInfo->Part4Port, 0x23);
- if (!(flag & 0x02))
- tempbx = tempbx | VB_NoLCD;
- }
-
-finish:
- pVBInfo->VBType = tempbx;
-}
-
-static void XGI_GetVBInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax, push, tempbx, temp, modeflag;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- pVBInfo->SetFlag = 0;
- pVBInfo->ModeType = modeflag & ModeTypeMask;
- tempbx = 0;
-
- if (!(pVBInfo->VBType & 0xFFFF))
- return;
-
- /* Check Display Device */
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x30);
- tempbx = tempbx | temp;
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x31);
- push = temp;
- push <<= 8;
- tempax = temp << 8;
- tempbx = tempbx | tempax;
- temp = SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
- | SetInSlaveMode | DisableCRT2Display;
- temp = 0xFFFF ^ temp;
- tempbx &= temp;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
-
- if (pVBInfo->VBType & (VB_SIS302B | VB_SIS301LV | VB_SIS302LV |
- VB_XGI301C)) {
- if (temp & EnableDualEdge) {
- tempbx |= SetCRT2ToDualEdge;
- if (temp & SetToLCDA)
- tempbx |= XGI_SetCRT2ToLCDA;
- }
- }
-
- if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
- if (temp & SetYPbPr) {
- /* shampoo add for new scratch */
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
- temp &= YPbPrMode;
- tempbx |= SetCRT2ToHiVision;
-
- if (temp != YPbPrMode1080i) {
- tempbx &= ~SetCRT2ToHiVision;
- tempbx |= SetCRT2ToYPbPr525750;
- }
- }
- }
-
- tempax = push; /* restore CR31 */
-
- temp = 0x09FC;
-
- if (!(tempbx & temp)) {
- tempax |= DisableCRT2Display;
- tempbx = 0;
- }
-
- if (!(pVBInfo->VBType & VB_NoLCD)) {
- if (tempbx & XGI_SetCRT2ToLCDA) {
- if (tempbx & SetSimuScanMode)
- tempbx &= (~(SetCRT2ToLCD | SetCRT2ToRAMDAC |
- SwitchCRT2));
- else
- tempbx &= (~(SetCRT2ToLCD | SetCRT2ToRAMDAC |
- SetCRT2ToTV | SwitchCRT2));
- }
- }
-
- /* shampoo add */
- /* for driver abnormal */
- if (!(tempbx & (SwitchCRT2 | SetSimuScanMode))) {
- if (tempbx & SetCRT2ToRAMDAC) {
- tempbx &= (0xFF00 | SetCRT2ToRAMDAC |
- SwitchCRT2 | SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
- }
- }
-
- if (!(pVBInfo->VBType & VB_NoLCD)) {
- if (tempbx & SetCRT2ToLCD) {
- tempbx &= (0xFF00 | SetCRT2ToLCD | SwitchCRT2 |
- SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
- }
- }
-
- if (tempbx & SetCRT2ToSCART) {
- tempbx &= (0xFF00 | SetCRT2ToSCART | SwitchCRT2 |
- SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
- }
-
- if (tempbx & SetCRT2ToYPbPr525750)
- tempbx &= (0xFF00 | SwitchCRT2 | SetSimuScanMode);
-
- if (tempbx & SetCRT2ToHiVision)
- tempbx &= (0xFF00 | SetCRT2ToHiVision | SwitchCRT2 |
- SetSimuScanMode);
-
- if (tempax & DisableCRT2Display) { /* Set Display Device Info */
- if (!(tempbx & (SwitchCRT2 | SetSimuScanMode)))
- tempbx = DisableCRT2Display;
- }
-
- if (!(tempbx & DisableCRT2Display)) {
- if (!(tempbx & DriverMode) || !(modeflag & CRT2Mode)) {
- if (!(tempbx & XGI_SetCRT2ToLCDA))
- tempbx |= (SetInSlaveMode | SetSimuScanMode);
- }
-
- /* LCD+TV can't support in slave mode
- * (Force LCDA+TV->LCDB)
- */
- if ((tempbx & SetInSlaveMode) && (tempbx & XGI_SetCRT2ToLCDA)) {
- tempbx ^= (SetCRT2ToLCD | XGI_SetCRT2ToLCDA |
- SetCRT2ToDualEdge);
- pVBInfo->SetFlag |= ReserveTVOption;
- }
- }
-
- pVBInfo->VBInfo = tempbx;
-}
-
-static void XGI_GetTVInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
-
- tempbx = xgifb_reg_get(pVBInfo->P3d4, 0x35);
- if (tempbx & TVSetPAL) {
- tempbx &= (SetCHTVOverScan |
- TVSetPALM |
- TVSetPALN |
- TVSetPAL);
- if (tempbx & TVSetPALM)
- /* set to NTSC if PAL-M */
- tempbx &= ~TVSetPAL;
- } else {
- tempbx &= (SetCHTVOverScan |
- TVSetNTSCJ |
- TVSetPAL);
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToSCART)
- tempbx |= TVSetPAL;
-
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
- index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
- index1 &= YPbPrMode;
-
- if (index1 == YPbPrMode525i)
- tempbx |= TVSetYPbPr525i;
-
- if (index1 == YPbPrMode525p)
- tempbx = tempbx | TVSetYPbPr525p;
- if (index1 == YPbPrMode750p)
- tempbx = tempbx | TVSetYPbPr750p;
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision)
- tempbx = tempbx | TVSetHiVision | TVSetPAL;
-
- if ((pVBInfo->VBInfo & SetInSlaveMode) &&
- (!(pVBInfo->VBInfo & SetNotSimuMode)))
- tempbx |= TVSimuMode;
-
- if (!(tempbx & TVSetPAL) && (modeflag > 13) && (resinfo == 8)) {
- /* NTSC 1024x768, */
- tempbx |= NTSC1024x768;
- }
-
- tempbx |= RPLLDIV2XO;
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (pVBInfo->VBInfo & SetInSlaveMode)
- tempbx &= (~RPLLDIV2XO);
- } else if (tempbx & (TVSetYPbPr525p | TVSetYPbPr750p)) {
- tempbx &= (~RPLLDIV2XO);
- } else if (!(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B |
- VB_SIS301LV | VB_SIS302LV |
- VB_XGI301C))) {
- if (tempbx & TVSimuMode)
- tempbx &= (~RPLLDIV2XO);
- }
- }
- pVBInfo->TVInfo = tempbx;
-}
-
-static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp, tempax, tempbx, resinfo = 0, LCDIdIndex;
-
- pVBInfo->LCDResInfo = 0;
- pVBInfo->LCDTypeInfo = 0;
- pVBInfo->LCDInfo = 0;
-
- /* si+Ext_ResInfo */
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */
- tempbx = temp & 0x0F;
-
- if (tempbx == 0)
- tempbx = Panel_1024x768; /* default */
-
- /* LCD75 */
- if ((tempbx == Panel_1024x768) || (tempbx == Panel_1280x1024)) {
- if (pVBInfo->VBInfo & DriverMode) {
- tempax = xgifb_reg_get(pVBInfo->P3d4, 0x33);
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
- tempax &= 0x0F;
- else
- tempax >>= 4;
-
- if ((resinfo == 6) || (resinfo == 9)) {
- if (tempax >= 3)
- tempbx |= PanelRef75Hz;
- } else if ((resinfo == 7) || (resinfo == 8)) {
- if (tempax >= 4)
- tempbx |= PanelRef75Hz;
- }
- }
- }
-
- pVBInfo->LCDResInfo = tempbx;
-
- /* End of LCD75 */
-
- if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
- return 0;
-
- tempbx = 0;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
-
- temp &= (ScalingLCD | LCDNonExpanding | LCDSyncBit | SetPWDEnable);
-
- tempbx |= temp;
-
- LCDIdIndex = XGI_GetLCDCapPtr1(pVBInfo);
-
- tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability;
-
- if (((pVBInfo->VBType & VB_SIS302LV) ||
- (pVBInfo->VBType & VB_XGI301C)) && (tempax & XGI_LCDDualLink))
- tempbx |= SetLCDDualLink;
-
- if ((pVBInfo->LCDResInfo == Panel_1400x1050) &&
- (pVBInfo->VBInfo & SetCRT2ToLCD) && (resinfo == 9) &&
- !(tempbx & EnableScalingLCD))
- /*
- * set to center in 1280x1024 LCDB
- * for Panel_1400x1050
- */
- tempbx |= SetLCDtoNonExpanding;
-
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (pVBInfo->VBInfo & SetNotSimuMode)
- tempbx |= XGI_LCDVESATiming;
- } else {
- tempbx |= XGI_LCDVESATiming;
- }
-
- pVBInfo->LCDInfo = tempbx;
-
- return 1;
-}
-
-unsigned char XGI_SearchModeID(unsigned short ModeNo,
- unsigned short *ModeIdIndex)
-{
- for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
- if (XGI330_EModeIDTable[*ModeIdIndex].Ext_ModeID == ModeNo)
- break;
- if (XGI330_EModeIDTable[*ModeIdIndex].Ext_ModeID == 0xFF)
- return 0;
- }
-
- return 1;
-}
-
-static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
-{
- unsigned char ujRet = 0;
- unsigned char i = 0;
-
- for (i = 0; i < 8; i++) {
- ujRet <<= 1;
- ujRet |= (ujDate >> i) & 1;
- }
-
- return ujRet;
-}
-
-/*
- * output
- * bl[5] : LVDS signal
- * bl[1] : LVDS backlight
- * bl[0] : LVDS VDD
- */
-static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x23); /* enable GPIO write */
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
-
- temp = XG21GPIODataTransfer(temp);
- temp &= 0x23;
- xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
- return temp;
-}
-
-/*
- * output
- * bl[5] : LVDS signal
- * bl[1] : LVDS backlight
- * bl[0] : LVDS VDD
- */
-static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, CRB4, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x0C); /* enable GPIO write */
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
-
- temp &= 0x0C;
- temp >>= 2;
- xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
- CRB4 = xgifb_reg_get(pVBInfo->P3d4, 0xB4);
- temp |= ((CRB4 & 0x04) << 3);
- return temp;
-}
-
-/*
- * input
- * bl[5] : 1;LVDS signal on
- * bl[1] : 1;LVDS backlight on
- * bl[0] : 1:LVDS VDD on
- * bh: 100000b : clear bit 5, to set bit5
- * 000010b : clear bit 1, to set bit1
- * 000001b : clear bit 0, to set bit0
- */
-static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x23;
- tempbl &= 0x23;
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
- }
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
-
- temp = XG21GPIODataTransfer(temp);
- temp &= ~tempbh;
- temp |= tempbl;
- xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
-}
-
-static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
- unsigned short tempbh0, tempbl0;
-
- tempbh0 = tempbh;
- tempbl0 = tempbl;
- tempbh0 &= 0x20;
- tempbl0 &= 0x20;
- tempbh0 >>= 3;
- tempbl0 >>= 3;
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x03;
- tempbl &= 0x03;
- tempbh <<= 2;
- tempbl <<= 2; /* GPIOC,GPIOD */
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
-}
-
-static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *pXGIHWDE,
- struct vb_device_info *pVBInfo)
-{
- xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00);
- if (pXGIHWDE->jChipType == XG21) {
- if (pVBInfo->IF_DEF_LVDS == 1) {
- if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x1)) {
- /* LVDS VDD on */
- XGI_XG21BLSignalVDD(0x01, 0x01, pVBInfo);
- mdelay(xgifb_info->lvds_data.PSC_S2);
- }
- if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x20))
- /* LVDS signal on */
- XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
- mdelay(xgifb_info->lvds_data.PSC_S3);
- /* LVDS backlight on */
- XGI_XG21BLSignalVDD(0x02, 0x02, pVBInfo);
- } else {
- /* DVO/DVI signal on */
- XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
- }
- }
-
- if (pXGIHWDE->jChipType == XG27) {
- if (pVBInfo->IF_DEF_LVDS == 1) {
- if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x1)) {
- /* LVDS VDD on */
- XGI_XG27BLSignalVDD(0x01, 0x01, pVBInfo);
- mdelay(xgifb_info->lvds_data.PSC_S2);
- }
- if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x20))
- /* LVDS signal on */
- XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
- mdelay(xgifb_info->lvds_data.PSC_S3);
- /* LVDS backlight on */
- XGI_XG27BLSignalVDD(0x02, 0x02, pVBInfo);
- } else {
- /* DVO/DVI signal on */
- XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
- }
- }
-}
-
-void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *pXGIHWDE,
- struct vb_device_info *pVBInfo)
-{
- if (pXGIHWDE->jChipType == XG21) {
- if (pVBInfo->IF_DEF_LVDS == 1) {
- /* LVDS backlight off */
- XGI_XG21BLSignalVDD(0x02, 0x00, pVBInfo);
- mdelay(xgifb_info->lvds_data.PSC_S3);
- } else {
- /* DVO/DVI signal off */
- XGI_XG21BLSignalVDD(0x20, 0x00, pVBInfo);
- }
- }
-
- if (pXGIHWDE->jChipType == XG27) {
- if ((XGI_XG27GetPSCValue(pVBInfo) & 0x2)) {
- /* LVDS backlight off */
- XGI_XG27BLSignalVDD(0x02, 0x00, pVBInfo);
- mdelay(xgifb_info->lvds_data.PSC_S3);
- }
-
- if (pVBInfo->IF_DEF_LVDS == 0) {
- /* DVO/DVI signal off */
- XGI_XG27BLSignalVDD(0x20, 0x00, pVBInfo);
- }
- }
-
- xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x20);
-}
-
-static void XGI_WaitDisply(struct vb_device_info *pVBInfo)
-{
- while ((inb(pVBInfo->P3da) & 0x01))
- break;
-
- while (!(inb(pVBInfo->P3da) & 0x01))
- break;
-}
-
-static void XGI_AutoThreshold(struct vb_device_info *pVBInfo)
-{
- xgifb_reg_or(pVBInfo->Part1Port, 0x01, 0x40);
-}
-
-static void XGI_SaveCRT2Info(unsigned short ModeNo,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp1, temp2;
-
- /* reserve CR34 for CRT1 Mode No */
- xgifb_reg_set(pVBInfo->P3d4, 0x34, ModeNo);
- temp1 = (pVBInfo->VBInfo & SetInSlaveMode) >> 8;
- temp2 = ~(SetInSlaveMode >> 8);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x31, temp2, temp1);
-}
-
-static void XGI_GetCRT2ResInfo(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short xres, yres, modeflag, resindex;
-
- resindex = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- xres = XGI330_ModeResInfo[resindex].HTotal; /* xres->ax */
- yres = XGI330_ModeResInfo[resindex].VTotal; /* yres->bx */
- /* si+St_ModeFlag */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- if (modeflag & HalfDCLK)
- xres *= 2;
-
- if (modeflag & DoubleScanMode)
- yres *= 2;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
- goto exit;
-
- if (pVBInfo->LCDResInfo == Panel_1600x1200) {
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (yres == 1024)
- yres = 1056;
- }
- }
-
- if (pVBInfo->LCDResInfo == Panel_1280x1024) {
- if (yres == 400)
- yres = 405;
- else if (yres == 350)
- yres = 360;
-
- if (pVBInfo->LCDInfo & XGI_LCDVESATiming) {
- if (yres == 360)
- yres = 375;
- }
- }
-
- if (pVBInfo->LCDResInfo == Panel_1024x768) {
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (!(pVBInfo->LCDInfo & LCDNonExpanding)) {
- if (yres == 350)
- yres = 357;
- else if (yres == 400)
- yres = 420;
- else if (yres == 480)
- yres = 525;
- }
- }
- }
-
- if (xres == 720)
- xres = 640;
-
-exit:
- pVBInfo->VGAHDE = xres;
- pVBInfo->HDE = xres;
- pVBInfo->VGAVDE = yres;
- pVBInfo->VDE = yres;
-}
-
-static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
-{
- if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
- (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
- return 1;
-
- return 0;
-}
-
-static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax, tempbx, temp1, temp2, modeflag = 0, tempcx,
- CRT1Index;
-
- pVBInfo->RVBHCMAX = 1;
- pVBInfo->RVBHCFACT = 1;
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- CRT1Index &= IndexMask;
- temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[0];
- temp2 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[5];
- tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8);
- tempbx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[8];
- tempcx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[14] << 8;
- tempcx &= 0x0100;
- tempcx <<= 2;
- tempbx |= tempcx;
- temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[9];
-
- if (temp1 & 0x01)
- tempbx |= 0x0100;
-
- if (temp1 & 0x20)
- tempbx |= 0x0200;
- tempax += 5;
-
- if (modeflag & Charx8Dot)
- tempax *= 8;
- else
- tempax *= 9;
-
- pVBInfo->VGAHT = tempax;
- pVBInfo->HT = tempax;
- tempbx++;
- pVBInfo->VGAVT = tempbx;
- pVBInfo->VT = tempbx;
-}
-
-static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax = 0, tempbx = 0, modeflag, resinfo;
-
- struct SiS_LCDData const *LCDPtr = NULL;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- pVBInfo->NewFlickerMode = 0;
- pVBInfo->RVBHRS = 50;
-
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- XGI_GetRAMDAC2DATA(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- return;
- }
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr = XGI_GetLcdPtr(XGI_LCDDataTable, ModeIdIndex,
- pVBInfo);
-
- pVBInfo->RVBHCMAX = LCDPtr->RVBHCMAX;
- pVBInfo->RVBHCFACT = LCDPtr->RVBHCFACT;
- pVBInfo->VGAHT = LCDPtr->VGAHT;
- pVBInfo->VGAVT = LCDPtr->VGAVT;
- pVBInfo->HT = LCDPtr->LCDHT;
- pVBInfo->VT = LCDPtr->LCDVT;
-
- if (pVBInfo->LCDResInfo == Panel_1024x768) {
- tempax = 1024;
- tempbx = 768;
-
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (pVBInfo->VGAVDE == 357)
- tempbx = 527;
- else if (pVBInfo->VGAVDE == 420)
- tempbx = 620;
- else if (pVBInfo->VGAVDE == 525)
- tempbx = 775;
- else if (pVBInfo->VGAVDE == 600)
- tempbx = 775;
- }
- } else if (pVBInfo->LCDResInfo == Panel_1024x768x75) {
- tempax = 1024;
- tempbx = 768;
- } else if (pVBInfo->LCDResInfo == Panel_1280x1024) {
- tempax = 1280;
- if (pVBInfo->VGAVDE == 360)
- tempbx = 768;
- else if (pVBInfo->VGAVDE == 375)
- tempbx = 800;
- else if (pVBInfo->VGAVDE == 405)
- tempbx = 864;
- else
- tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel_1280x1024x75) {
- tempax = 1280;
- tempbx = 1024;
- } else if (pVBInfo->LCDResInfo == Panel_1280x960) {
- tempax = 1280;
- if (pVBInfo->VGAVDE == 350)
- tempbx = 700;
- else if (pVBInfo->VGAVDE == 400)
- tempbx = 800;
- else if (pVBInfo->VGAVDE == 1024)
- tempbx = 960;
- else
- tempbx = 960;
- } else if (pVBInfo->LCDResInfo == Panel_1400x1050) {
- tempax = 1400;
- tempbx = 1050;
-
- if (pVBInfo->VGAVDE == 1024) {
- tempax = 1280;
- tempbx = 1024;
- }
- } else if (pVBInfo->LCDResInfo == Panel_1600x1200) {
- tempax = 1600;
- tempbx = 1200; /* alan 10/14/2003 */
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (pVBInfo->VGAVDE == 350)
- tempbx = 875;
- else if (pVBInfo->VGAVDE == 400)
- tempbx = 1000;
- }
- }
-
- if (pVBInfo->LCDInfo & LCDNonExpanding) {
- tempax = pVBInfo->VGAHDE;
- tempbx = pVBInfo->VGAVDE;
- }
-
- pVBInfo->HDE = tempax;
- pVBInfo->VDE = tempbx;
- return;
- }
-
- if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
- struct SiS_TVData const *TVPtr;
-
- TVPtr = XGI_GetTVPtr(ModeIdIndex, RefreshRateTableIndex,
- pVBInfo);
-
- pVBInfo->RVBHCMAX = TVPtr->RVBHCMAX;
- pVBInfo->RVBHCFACT = TVPtr->RVBHCFACT;
- pVBInfo->VGAHT = TVPtr->VGAHT;
- pVBInfo->VGAVT = TVPtr->VGAVT;
- pVBInfo->HDE = TVPtr->TVHDE;
- pVBInfo->VDE = TVPtr->TVVDE;
- pVBInfo->RVBHRS = TVPtr->RVBHRS;
- pVBInfo->NewFlickerMode = TVPtr->FlickerMode;
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (resinfo == 0x08)
- pVBInfo->NewFlickerMode = 0x40;
- else if (resinfo == 0x09)
- pVBInfo->NewFlickerMode = 0x40;
- else if (resinfo == 0x12)
- pVBInfo->NewFlickerMode = 0x40;
-
- if (pVBInfo->VGAVDE == 350)
- pVBInfo->TVInfo |= TVSimuMode;
-
- tempax = ExtHiTVHT;
- tempbx = ExtHiTVVT;
-
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (pVBInfo->TVInfo & TVSimuMode) {
- tempax = StHiTVHT;
- tempbx = StHiTVVT;
-
- if (!(modeflag & Charx8Dot)) {
- tempax = StHiTextTVHT;
- tempbx = StHiTextTVVT;
- }
- }
- }
- } else if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
- if (pVBInfo->TVInfo & TVSetYPbPr750p) {
- tempax = YPbPrTV750pHT; /* Ext750pTVHT */
- tempbx = YPbPrTV750pVT; /* Ext750pTVVT */
- }
-
- if (pVBInfo->TVInfo & TVSetYPbPr525p) {
- tempax = YPbPrTV525pHT; /* Ext525pTVHT */
- tempbx = YPbPrTV525pVT; /* Ext525pTVVT */
- } else if (pVBInfo->TVInfo & TVSetYPbPr525i) {
- tempax = YPbPrTV525iHT; /* Ext525iTVHT */
- tempbx = YPbPrTV525iVT; /* Ext525iTVVT */
- if (pVBInfo->TVInfo & NTSC1024x768)
- tempax = NTSC1024x768HT;
- }
- } else {
- tempax = PALHT;
- tempbx = PALVT;
- if (!(pVBInfo->TVInfo & TVSetPAL)) {
- tempax = NTSCHT;
- tempbx = NTSCVT;
- if (pVBInfo->TVInfo & NTSC1024x768)
- tempax = NTSC1024x768HT;
- }
- }
-
- pVBInfo->HT = tempax;
- pVBInfo->VT = tempbx;
- }
-}
-
-static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char di_0, di_1, tempal;
-
- tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeIdIndex, pVBInfo);
- XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
- XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
-
- if (pVBInfo->VBType & VB_SIS301) { /* shampoo 0129 */
- /* 301 */
- xgifb_reg_set(pVBInfo->Part4Port, 0x0A, 0x10);
- xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
- xgifb_reg_set(pVBInfo->Part4Port, 0x0A, di_0);
- } else { /* 301b/302b/301lv/302lv */
- xgifb_reg_set(pVBInfo->Part4Port, 0x0A, di_0);
- xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
- }
-
- xgifb_reg_set(pVBInfo->Part4Port, 0x00, 0x12);
-
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC)
- xgifb_reg_or(pVBInfo->Part4Port, 0x12, 0x28);
- else
- xgifb_reg_or(pVBInfo->Part4Port, 0x12, 0x08);
-}
-
-static unsigned short XGI_GetColorDepth(unsigned short ModeIdIndex)
-{
- unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 };
- short index;
- unsigned short modeflag;
-
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- index = (modeflag & ModeTypeMask) - ModeEGA;
-
- if (index < 0)
- index = 0;
-
- return ColorDepth[index];
-}
-
-static unsigned short XGI_GetOffset(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex)
-{
- unsigned short temp, colordepth, modeinfo, index, infoflag,
- ColorDepth[] = { 0x01, 0x02, 0x04 };
-
- modeinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeInfo;
- infoflag = XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
-
- index = (modeinfo >> 8) & 0xFF;
-
- temp = XGI330_ScreenOffset[index];
-
- if (infoflag & InterlaceMode)
- temp <<= 1;
-
- colordepth = XGI_GetColorDepth(ModeIdIndex);
-
- if ((ModeNo >= 0x7C) && (ModeNo <= 0x7E)) {
- temp = ModeNo - 0x7C;
- colordepth = ColorDepth[temp];
- temp = 0x6B;
- if (infoflag & InterlaceMode)
- temp <<= 1;
- }
- return temp * colordepth;
-}
-
-static void XGI_SetCRT2Offset(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short offset;
- unsigned char temp;
-
- if (pVBInfo->VBInfo & SetInSlaveMode)
- return;
-
- offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex);
- temp = (unsigned char)(offset & 0xFF);
- xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
- temp = (unsigned char)((offset & 0xFF00) >> 8);
- xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp);
- temp = (unsigned char)(((offset >> 3) & 0xFF) + 1);
- xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
-}
-
-static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
-{
- /* threshold high ,disable auto threshold */
- xgifb_reg_set(pVBInfo->Part1Port, 0x01, 0x3B);
- /* threshold low default 04h */
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x02, ~(0x3F), 0x04);
-}
-
-static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- u8 tempcx;
-
- XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT2FIFO(pVBInfo);
-
- for (tempcx = 4; tempcx < 7; tempcx++)
- xgifb_reg_set(pVBInfo->Part1Port, tempcx, 0x0);
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x50, 0x00);
- xgifb_reg_set(pVBInfo->Part1Port, 0x02, 0x44); /* temp 0206 */
-}
-
-static void XGI_SetGroup1(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0,
- pushbx = 0, CRT1Index, modeflag;
-
- CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
- CRT1Index &= IndexMask;
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- /* bainy change table name */
- if (modeflag & HalfDCLK) {
- /* BTVGA2HT 0x08,0x09 */
- temp = (pVBInfo->VGAHT / 2 - 1) & 0x0FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x08, temp);
- temp = (((pVBInfo->VGAHT / 2 - 1) & 0xFF00) >> 8) << 4;
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x09, ~0x0F0, temp);
- /* BTVGA2HDEE 0x0A,0x0C */
- temp = (pVBInfo->VGAHDE / 2 + 16) & 0x0FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0A, temp);
- tempcx = ((pVBInfo->VGAHT - pVBInfo->VGAHDE) / 2) >> 2;
- pushbx = pVBInfo->VGAHDE / 2 + 16;
- tempcx >>= 1;
- tempbx = pushbx + tempcx; /* bx BTVGA@HRS 0x0B,0x0C */
- tempcx += tempbx;
-
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- tempbx = XGI_CRT1Table[CRT1Index].CR[4];
- tempbx |= ((XGI_CRT1Table[CRT1Index].CR[14] &
- 0xC0) << 2);
- tempbx = (tempbx - 3) << 3; /* (VGAHRS-3)*8 */
- tempcx = XGI_CRT1Table[CRT1Index].CR[5];
- tempcx &= 0x1F;
- temp = XGI_CRT1Table[CRT1Index].CR[15];
- temp = (temp & 0x04) << (5 - 2); /* VGAHRE D[5] */
- tempcx = ((tempcx | temp) - 3) << 3; /* (VGAHRE-3)*8 */
- }
-
- tempbx += 4;
- tempcx += 4;
-
- if (tempcx > (pVBInfo->VGAHT / 2))
- tempcx = pVBInfo->VGAHT / 2;
-
- temp = tempbx & 0x00FF;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x0B, temp);
- } else {
- temp = (pVBInfo->VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */
- xgifb_reg_set(pVBInfo->Part1Port, 0x08, temp);
- temp = (((pVBInfo->VGAHT - 1) & 0xFF00) >> 8) << 4;
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x09, ~0x0F0, temp);
- /* BTVGA2HDEE 0x0A,0x0C */
- temp = (pVBInfo->VGAHDE + 16) & 0x0FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0A, temp);
- tempcx = (pVBInfo->VGAHT - pVBInfo->VGAHDE) >> 2; /* cx */
- pushbx = pVBInfo->VGAHDE + 16;
- tempcx >>= 1;
- tempbx = pushbx + tempcx; /* bx BTVGA@HRS 0x0B,0x0C */
- tempcx += tempbx;
-
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- tempbx = XGI_CRT1Table[CRT1Index].CR[3];
- tempbx |= ((XGI_CRT1Table[CRT1Index].CR[5] &
- 0xC0) << 2);
- tempbx = (tempbx - 3) << 3; /* (VGAHRS-3)*8 */
- tempcx = XGI_CRT1Table[CRT1Index].CR[4];
- tempcx &= 0x1F;
- temp = XGI_CRT1Table[CRT1Index].CR[6];
- temp = (temp & 0x04) << (5 - 2); /* VGAHRE D[5] */
- tempcx = ((tempcx | temp) - 3) << 3; /* (VGAHRE-3)*8 */
- tempbx += 16;
- tempcx += 16;
- }
-
- if (tempcx > pVBInfo->VGAHT)
- tempcx = pVBInfo->VGAHT;
-
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0B, temp);
- }
-
- tempax = (tempax & 0x00FF) | (tempbx & 0xFF00);
- tempbx = pushbx;
- tempbx = (tempbx & 0x00FF) | ((tempbx & 0xFF00) << 4);
- tempax |= (tempbx & 0xFF00);
- temp = (tempax & 0xFF00) >> 8;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0C, temp);
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0D, temp);
- tempcx = pVBInfo->VGAVT - 1;
- temp = tempcx & 0x00FF;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp);
- tempbx = pVBInfo->VGAVDE - 1;
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0F, temp);
- temp = ((tempbx & 0xFF00) << 3) >> 8;
- temp |= ((tempcx & 0xFF00) >> 8);
- xgifb_reg_set(pVBInfo->Part1Port, 0x12, temp);
-
- /* BTVGA2VRS 0x10,0x11 */
- tempbx = (pVBInfo->VGAVT + pVBInfo->VGAVDE) >> 1;
- /* BTVGA2VRE 0x11 */
- tempcx = ((pVBInfo->VGAVT - pVBInfo->VGAVDE) >> 4) + tempbx + 1;
-
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- tempbx = XGI_CRT1Table[CRT1Index].CR[10];
- temp = XGI_CRT1Table[CRT1Index].CR[9];
-
- if (temp & 0x04)
- tempbx |= 0x0100;
-
- if (temp & 0x080)
- tempbx |= 0x0200;
-
- temp = XGI_CRT1Table[CRT1Index].CR[14];
-
- if (temp & 0x08)
- tempbx |= 0x0400;
-
- temp = XGI_CRT1Table[CRT1Index].CR[11];
- tempcx = (tempcx & 0xFF00) | (temp & 0x00FF);
- }
-
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
- temp = ((tempbx & 0xFF00) >> 8) << 4;
- temp = (tempcx & 0x000F) | (temp);
- xgifb_reg_set(pVBInfo->Part1Port, 0x11, temp);
- tempax = 0;
-
- if (modeflag & DoubleScanMode)
- tempax |= 0x80;
-
- if (modeflag & HalfDCLK)
- tempax |= 0x40;
-
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x2C, ~0x0C0, tempax);
-}
-
-static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
-{
- unsigned long tempax, tempbx;
-
- tempbx = ((pVBInfo->VGAVT - pVBInfo->VGAVDE) * pVBInfo->RVBHCMAX)
- & 0xFFFF;
- tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT;
- tempax = (tempax * pVBInfo->HT) / tempbx;
-
- return (unsigned short)tempax;
-}
-
-static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
- modeflag;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
-
- if (!(pVBInfo->VBInfo & SetInSlaveMode))
- return;
-
- temp = 0xFF; /* set MAX HT */
- xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
- tempcx = 0x08;
-
- if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
- modeflag |= Charx8Dot;
-
- tempax = pVBInfo->VGAHDE; /* 0x04 Horizontal Display End */
-
- if (modeflag & HalfDCLK)
- tempax >>= 1;
-
- tempax = (tempax / tempcx) - 1;
- tempbx |= ((tempax & 0x00FF) << 8);
- temp = tempax & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x04, temp);
-
- temp = (tempbx & 0xFF00) >> 8;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)))
- temp += 2;
-
- if ((pVBInfo->VBInfo & SetCRT2ToHiVision) &&
- !(pVBInfo->VBType & VB_SIS301LV) && (resinfo == 7))
- temp -= 2;
- }
-
- /* 0x05 Horizontal Display Start */
- xgifb_reg_set(pVBInfo->Part1Port, 0x05, temp);
- /* 0x06 Horizontal Blank end */
- xgifb_reg_set(pVBInfo->Part1Port, 0x06, 0x03);
-
- if (!(pVBInfo->VBInfo & DisableCRT2Display)) { /* 030226 bainy */
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- tempax = pVBInfo->VGAHT;
- else
- tempax = XGI_GetVGAHT2(pVBInfo);
- }
-
- if (tempax >= pVBInfo->VGAHT)
- tempax = pVBInfo->VGAHT;
-
- if (modeflag & HalfDCLK)
- tempax >>= 1;
-
- tempax = (tempax / tempcx) - 5;
- tempcx = tempax; /* 20030401 0x07 horizontal Retrace Start */
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- temp = (tempbx & 0x00FF) - 1;
- if (!(modeflag & HalfDCLK)) {
- temp -= 6;
- if (pVBInfo->TVInfo & TVSimuMode) {
- temp -= 4;
- temp -= 10;
- }
- }
- } else {
- tempbx = (tempbx & 0xFF00) >> 8;
- tempcx = (tempcx + tempbx) >> 1;
- temp = (tempcx & 0x00FF) + 2;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- temp -= 1;
- if (!(modeflag & HalfDCLK)) {
- if ((modeflag & Charx8Dot)) {
- temp += 4;
- if (pVBInfo->VGAHDE >= 800)
- temp -= 6;
- }
- }
- } else if (!(modeflag & HalfDCLK)) {
- temp -= 4;
- if (pVBInfo->LCDResInfo != Panel_1280x960 &&
- pVBInfo->VGAHDE >= 800) {
- temp -= 7;
- if (pVBInfo->VGAHDE >= 1280 &&
- pVBInfo->LCDResInfo != Panel_1280x960 &&
- (pVBInfo->LCDInfo & LCDNonExpanding))
- temp += 28;
- }
- }
- }
-
- /* 0x07 Horizontal Retrace Start */
- xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
- /* 0x08 Horizontal Retrace End */
- xgifb_reg_set(pVBInfo->Part1Port, 0x08, 0);
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->TVInfo & TVSimuMode) {
- if (ModeNo == 0x50) {
- if (pVBInfo->TVInfo == SetNTSCTV) {
- xgifb_reg_set(pVBInfo->Part1Port,
- 0x07, 0x30);
- xgifb_reg_set(pVBInfo->Part1Port,
- 0x08, 0x03);
- } else {
- xgifb_reg_set(pVBInfo->Part1Port,
- 0x07, 0x2f);
- xgifb_reg_set(pVBInfo->Part1Port,
- 0x08, 0x02);
- }
- }
- }
- }
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x18, 0x03); /* 0x18 SR0B */
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0xF0, 0x00);
- xgifb_reg_set(pVBInfo->Part1Port, 0x09, 0xFF); /* 0x09 Set Max VT */
-
- tempbx = pVBInfo->VGAVT;
- push1 = tempbx;
- tempcx = 0x121;
- tempbx = pVBInfo->VGAVDE; /* 0x0E Vertical Display End */
-
- if (tempbx == 357)
- tempbx = 350;
- if (tempbx == 360)
- tempbx = 350;
- if (tempbx == 375)
- tempbx = 350;
- if (tempbx == 405)
- tempbx = 400;
- if (tempbx == 525)
- tempbx = 480;
-
- push2 = tempbx;
-
- if (pVBInfo->VBInfo & SetCRT2ToLCD) {
- if (pVBInfo->LCDResInfo == Panel_1024x768) {
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (tempbx == 350)
- tempbx += 5;
- if (tempbx == 480)
- tempbx += 5;
- }
- }
- }
- tempbx--;
- tempbx--;
- temp = tempbx & 0x00FF;
- /* 0x10 vertical Blank Start */
- xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
- tempbx = push2;
- tempbx--;
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp);
-
- if (tempbx & 0x0100)
- tempcx |= 0x0002;
-
- tempax = 0x000B;
-
- if (modeflag & DoubleScanMode)
- tempax |= 0x08000;
-
- if (tempbx & 0x0200)
- tempcx |= 0x0040;
-
- temp = (tempax & 0xFF00) >> 8;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0B, temp);
-
- if (tempbx & 0x0400)
- tempcx |= 0x0600;
-
- /* 0x11 Vertical Blank End */
- xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00);
-
- tempax = push1;
- tempax -= tempbx; /* 0x0C Vertical Retrace Start */
- tempax >>= 2;
- push1 = tempax; /* push ax */
-
- if (resinfo != 0x09) {
- tempax <<= 1;
- tempbx += tempax;
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if ((pVBInfo->VBType & VB_SIS301LV) &&
- !(pVBInfo->TVInfo & TVSetHiVision)) {
- if ((pVBInfo->TVInfo & TVSimuMode) &&
- (pVBInfo->TVInfo & TVSetPAL)) {
- if (!(pVBInfo->VBType & VB_SIS301LV) ||
- !(pVBInfo->TVInfo &
- (TVSetYPbPr525p |
- TVSetYPbPr750p |
- TVSetHiVision)))
- tempbx += 40;
- }
- } else {
- tempbx -= 10;
- }
- } else if (pVBInfo->TVInfo & TVSimuMode) {
- if (pVBInfo->TVInfo & TVSetPAL) {
- if (pVBInfo->VBType & VB_SIS301LV) {
- if (!(pVBInfo->TVInfo &
- (TVSetYPbPr525p |
- TVSetYPbPr750p |
- TVSetHiVision)))
- tempbx += 40;
- } else {
- tempbx += 40;
- }
- }
- }
- tempax = push1;
- tempax >>= 2;
- tempax++;
- tempax += tempbx;
- push1 = tempax; /* push ax */
-
- if ((pVBInfo->TVInfo & TVSetPAL)) {
- if (tempbx <= 513) {
- if (tempax >= 513)
- tempbx = 513;
- }
- }
-
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0C, temp);
- tempbx--;
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
-
- if (tempbx & 0x0100)
- tempcx |= 0x0008;
-
- if (tempbx & 0x0200)
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x0B, 0x0FF, 0x20);
-
- tempbx++;
-
- if (tempbx & 0x0100)
- tempcx |= 0x0004;
-
- if (tempbx & 0x0200)
- tempcx |= 0x0080;
-
- if (tempbx & 0x0400)
- tempcx |= 0x0C00;
-
- tempbx = push1; /* pop ax */
- temp = tempbx & 0x00FF;
- temp &= 0x0F;
- /* 0x0D vertical Retrace End */
- xgifb_reg_set(pVBInfo->Part1Port, 0x0D, temp);
-
- if (tempbx & 0x0010)
- tempcx |= 0x2000;
-
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part1Port, 0x0A, temp); /* 0x0A CR07 */
- temp = (tempcx & 0x0FF00) >> 8;
- xgifb_reg_set(pVBInfo->Part1Port, 0x17, temp); /* 0x17 SR0A */
- tempax = modeflag;
- temp = (tempax & 0xFF00) >> 8;
-
- temp = (temp >> 1) & 0x09;
-
- if (pVBInfo->VBType & (VB_SIS301LV | VB_SIS302LV | VB_XGI301C))
- temp |= 0x01;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x16, temp); /* 0x16 SR01 */
- xgifb_reg_set(pVBInfo->Part1Port, 0x0F, 0); /* 0x0F CR14 */
- xgifb_reg_set(pVBInfo->Part1Port, 0x12, 0); /* 0x12 CR17 */
-
- if (pVBInfo->LCDInfo & LCDRGB18Bit)
- temp = 0x80;
- else
- temp = 0x00;
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x1A, temp); /* 0x1A SR0E */
-}
-
-static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
- modeflag;
- unsigned char const *TimingPoint;
-
- unsigned long longtemp, tempeax, tempebx, temp2, tempecx;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- tempax = 0;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToAVIDEO))
- tempax |= 0x0800;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
- tempax |= 0x0400;
-
- if (pVBInfo->VBInfo & SetCRT2ToSCART)
- tempax |= 0x0200;
-
- if (!(pVBInfo->TVInfo & TVSetPAL))
- tempax |= 0x1000;
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision)
- tempax |= 0x0100;
-
- if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
- tempax &= 0xfe00;
-
- tempax = (tempax & 0xff00) >> 8;
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x0, tempax);
- TimingPoint = XGI330_NTSCTiming;
-
- if (pVBInfo->TVInfo & TVSetPAL)
- TimingPoint = XGI330_PALTiming;
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- TimingPoint = XGI330_HiTVExtTiming;
-
- if (pVBInfo->VBInfo & SetInSlaveMode)
- TimingPoint = XGI330_HiTVSt2Timing;
-
- if (pVBInfo->SetFlag & TVSimuMode)
- TimingPoint = XGI330_HiTVSt1Timing;
-
- if (!(modeflag & Charx8Dot))
- TimingPoint = XGI330_HiTVTextTiming;
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
- if (pVBInfo->TVInfo & TVSetYPbPr525i)
- TimingPoint = XGI330_YPbPr525iTiming;
-
- if (pVBInfo->TVInfo & TVSetYPbPr525p)
- TimingPoint = XGI330_YPbPr525pTiming;
-
- if (pVBInfo->TVInfo & TVSetYPbPr750p)
- TimingPoint = XGI330_YPbPr750pTiming;
- }
-
- for (i = 0x01, j = 0; i <= 0x2D; i++, j++)
- xgifb_reg_set(pVBInfo->Part2Port, i, TimingPoint[j]);
-
- for (i = 0x39; i <= 0x45; i++, j++)
- /* di->temp2[j] */
- xgifb_reg_set(pVBInfo->Part2Port, i, TimingPoint[j]);
-
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x3A, 0x1F, 0x00);
-
- temp = pVBInfo->NewFlickerMode;
- temp &= 0x80;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp);
-
- if (pVBInfo->TVInfo & TVSetPAL)
- tempax = 520;
- else
- tempax = 440;
-
- if (pVBInfo->VDE <= tempax) {
- tempax -= pVBInfo->VDE;
- tempax >>= 2;
- tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8);
- push1 = tempax;
- temp = (tempax & 0xFF00) >> 8;
- temp += (unsigned short)TimingPoint[0];
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO
- | SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr525750)) {
- tempcx = pVBInfo->VGAHDE;
- if (tempcx >= 1024) {
- temp = 0x17; /* NTSC */
- if (pVBInfo->TVInfo & TVSetPAL)
- temp = 0x19; /* PAL */
- }
- }
- }
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x01, temp);
- tempax = push1;
- temp = (tempax & 0xFF00) >> 8;
- temp += TimingPoint[1];
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- if ((pVBInfo->VBInfo & (SetCRT2ToAVIDEO
- | SetCRT2ToSVIDEO | SetCRT2ToSCART
- | SetCRT2ToYPbPr525750))) {
- tempcx = pVBInfo->VGAHDE;
- if (tempcx >= 1024) {
- temp = 0x1D; /* NTSC */
- if (pVBInfo->TVInfo & TVSetPAL)
- temp = 0x52; /* PAL */
- }
- }
- }
- xgifb_reg_set(pVBInfo->Part2Port, 0x02, temp);
- }
-
- /* 301b */
- tempcx = pVBInfo->HT;
-
- if (XGI_IsLCDDualLink(pVBInfo))
- tempcx >>= 1;
-
- tempcx -= 2;
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x1B, temp);
-
- temp = (tempcx & 0xFF00) >> 8;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x1D, ~0x0F, temp);
-
- tempcx = pVBInfo->HT >> 1;
- push1 = tempcx; /* push cx */
- tempcx += 7;
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision)
- tempcx -= 4;
-
- temp = tempcx & 0x00FF;
- temp <<= 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x22, 0x0F, temp);
-
- tempbx = TimingPoint[j] | ((TimingPoint[j + 1]) << 8);
- tempbx += tempcx;
- push2 = tempbx;
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x24, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp <<= 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x25, 0x0F, temp);
-
- tempbx = push2;
- tempbx = tempbx + 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- tempbx = tempbx - 4;
- tempcx = tempbx;
- }
-
- temp = (tempbx & 0x00FF) << 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x29, 0x0F, temp);
-
- j += 2;
- tempcx += (TimingPoint[j] | ((TimingPoint[j + 1]) << 8));
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x27, temp);
- temp = ((tempcx & 0xFF00) >> 8) << 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x28, 0x0F, temp);
-
- tempcx += 8;
- if (pVBInfo->VBInfo & SetCRT2ToHiVision)
- tempcx -= 4;
-
- temp = tempcx & 0xFF;
- temp <<= 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x2A, 0x0F, temp);
-
- tempcx = push1; /* pop cx */
- j += 2;
- temp = TimingPoint[j] | ((TimingPoint[j + 1]) << 8);
- tempcx -= temp;
- temp = tempcx & 0x00FF;
- temp <<= 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x2D, 0x0F, temp);
-
- tempcx -= 11;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToTV)) {
- tempax = XGI_GetVGAHT2(pVBInfo);
- tempcx = tempax - 1;
- }
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x2E, temp);
-
- tempbx = pVBInfo->VDE;
-
- if (pVBInfo->VGAVDE == 360)
- tempbx = 746;
- if (pVBInfo->VGAVDE == 375)
- tempbx = 746;
- if (pVBInfo->VGAVDE == 405)
- tempbx = 853;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->VBType &
- (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
- if (!(pVBInfo->TVInfo &
- (TVSetYPbPr525p | TVSetYPbPr750p)))
- tempbx >>= 1;
- } else {
- tempbx >>= 1;
- }
- }
-
- tempbx -= 2;
- temp = tempbx & 0x00FF;
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (pVBInfo->VBType & VB_SIS301LV) {
- if (pVBInfo->TVInfo & TVSetHiVision) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (ModeNo == 0x2f)
- temp += 1;
- }
- }
- } else if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (ModeNo == 0x2f)
- temp += 1;
- }
- }
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x2F, temp);
-
- temp = (tempcx & 0xFF00) >> 8;
- temp |= ((tempbx & 0xFF00) >> 8) << 6;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToHiVision)) {
- if (pVBInfo->VBType & VB_SIS301LV) {
- if (pVBInfo->TVInfo & TVSetHiVision) {
- temp |= 0x10;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
- temp |= 0x20;
- }
- } else {
- temp |= 0x10;
- if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
- temp |= 0x20;
- }
- }
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x30, temp);
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) { /* TV gatingno */
- tempbx = pVBInfo->VDE;
- tempcx = tempbx - 2;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->TVInfo & (TVSetYPbPr525p
- | TVSetYPbPr750p)))
- tempbx >>= 1;
- }
-
- if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
- temp = 0;
- if (tempcx & 0x0400)
- temp |= 0x20;
-
- if (tempbx & 0x0400)
- temp |= 0x40;
-
- xgifb_reg_set(pVBInfo->Part4Port, 0x10, temp);
- }
-
- temp = (((tempbx - 3) & 0x0300) >> 8) << 5;
- xgifb_reg_set(pVBInfo->Part2Port, 0x46, temp);
- temp = (tempbx - 3) & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x47, temp);
- }
-
- tempbx = tempbx & 0x00FF;
-
- if (!(modeflag & HalfDCLK)) {
- tempcx = pVBInfo->VGAHDE;
- if (tempcx >= pVBInfo->HDE) {
- tempbx |= 0x2000;
- tempax &= 0x00FF;
- }
- }
-
- tempcx = 0x0101;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) { /* 301b */
- if (pVBInfo->VGAHDE >= 1024) {
- tempcx = 0x1920;
- if (pVBInfo->VGAHDE >= 1280) {
- tempcx = 0x1420;
- tempbx = tempbx & 0xDFFF;
- }
- }
- }
-
- if (!(tempbx & 0x2000)) {
- if (modeflag & HalfDCLK)
- tempcx = (tempcx & 0xFF00) | ((tempcx & 0x00FF) << 1);
-
- push1 = tempbx;
- tempeax = pVBInfo->VGAHDE;
- tempebx = (tempcx & 0xFF00) >> 8;
- longtemp = tempeax * tempebx;
- tempecx = tempcx & 0x00FF;
- longtemp = longtemp / tempecx;
-
- /* 301b */
- tempecx = 8 * 1024;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- tempecx = tempecx * 8;
- }
-
- longtemp = longtemp * tempecx;
- tempecx = pVBInfo->HDE;
- temp2 = longtemp % tempecx;
- tempeax = longtemp / tempecx;
- if (temp2 != 0)
- tempeax += 1;
-
- tempax = (unsigned short)tempeax;
-
- /* 301b */
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- tempcx = ((tempax & 0xFF00) >> 5) >> 8;
- }
- /* end 301b */
-
- tempbx = push1;
- tempbx = (unsigned short)(((tempeax & 0x0000FF00) & 0x1F00)
- | (tempbx & 0x00FF));
- tempax = (unsigned short)(((tempeax & 0x000000FF) << 8)
- | (tempax & 0x00FF));
- temp = (tempax & 0xFF00) >> 8;
- } else {
- temp = (tempax & 0x00FF) >> 8;
- }
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x44, temp);
- temp = (tempbx & 0xFF00) >> 8;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x45, ~0x03F, temp);
- temp = tempcx & 0x00FF;
-
- if (tempbx & 0x2000)
- temp = 0;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
- temp |= 0x18;
-
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x46, ~0x1F, temp);
- if (pVBInfo->TVInfo & TVSetPAL) {
- tempbx = 0x0382;
- tempcx = 0x007e;
- } else {
- tempbx = 0x0369;
- tempcx = 0x0061;
- }
-
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x4b, temp);
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x4c, temp);
-
- temp = ((tempcx & 0xFF00) >> 8) & 0x03;
- temp <<= 2;
- temp |= ((tempbx & 0xFF00) >> 8) & 0x03;
-
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
- temp |= 0x10;
-
- if (pVBInfo->TVInfo & TVSetYPbPr525p)
- temp |= 0x20;
-
- if (pVBInfo->TVInfo & TVSetYPbPr750p)
- temp |= 0x60;
- }
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp);
- temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
- xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short)(temp - 3));
-
- if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
- if (pVBInfo->TVInfo & NTSC1024x768) {
- TimingPoint = XGI_NTSC1024AdjTime;
- for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
- xgifb_reg_set(pVBInfo->Part2Port, i,
- TimingPoint[j]);
- }
- xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72);
- }
- }
-
- /* Modify for 301C PALM Support */
- if (pVBInfo->VBType & VB_XGI301C) {
- if (pVBInfo->TVInfo & TVSetPALM)
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
- 0x08); /* PALM Mode */
- }
-
- if (pVBInfo->TVInfo & TVSetPALM) {
- tempax = xgifb_reg_get(pVBInfo->Part2Port, 0x01);
- tempax--;
- xgifb_reg_and(pVBInfo->Part2Port, 0x01, tempax);
-
- xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) {
- if (!(pVBInfo->VBInfo & SetInSlaveMode))
- xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00);
- }
-}
-
-static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
- tempbh, tempch;
-
- struct XGI_LCDDesStruct const *LCDBDesPtr = NULL;
-
- /* si+Ext_ResInfo */
- if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
- return;
-
- tempbx = pVBInfo->HDE; /* RHACTE=HDE-1 */
-
- if (XGI_IsLCDDualLink(pVBInfo))
- tempbx >>= 1;
-
- tempbx -= 1;
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x2C, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp <<= 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x2B, 0x0F, temp);
- temp = 0x01;
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x0B, temp);
- tempbx = pVBInfo->VDE; /* RTVACTEO=(VDE-1)&0xFF */
- tempbx--;
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x03, temp);
- temp = ((tempbx & 0xFF00) >> 8) & 0x07;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x0C, ~0x07, temp);
-
- tempcx = pVBInfo->VT - 1;
- temp = tempcx & 0x00FF; /* RVTVT=VT-1 */
- xgifb_reg_set(pVBInfo->Part2Port, 0x19, temp);
- temp = (tempcx & 0xFF00) >> 8;
- temp <<= 5;
- xgifb_reg_set(pVBInfo->Part2Port, 0x1A, temp);
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x09, 0xF0, 0x00);
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xF0, 0x00);
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x17, 0xFB, 0x00);
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x18, 0xDF, 0x00);
-
- /* Customized LCDB Does not add */
- if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
- LCDBDesPtr = XGI_GetLcdPtr(xgifb_lcddldes, ModeIdIndex,
- pVBInfo);
- else
- LCDBDesPtr = XGI_GetLcdPtr(XGI_LCDDesDataTable, ModeIdIndex,
- pVBInfo);
-
- tempah = pVBInfo->LCDResInfo;
- tempah &= PanelResInfo;
-
- if ((tempah == Panel_1024x768) || (tempah == Panel_1024x768x75)) {
- tempbx = 1024;
- tempcx = 768;
- } else if ((tempah == Panel_1280x1024) ||
- (tempah == Panel_1280x1024x75)) {
- tempbx = 1280;
- tempcx = 1024;
- } else if (tempah == Panel_1400x1050) {
- tempbx = 1400;
- tempcx = 1050;
- } else {
- tempbx = 1600;
- tempcx = 1200;
- }
-
- if (pVBInfo->LCDInfo & EnableScalingLCD) {
- tempbx = pVBInfo->HDE;
- tempcx = pVBInfo->VDE;
- }
-
- pushbx = tempbx;
- tempax = pVBInfo->VT;
- pVBInfo->LCDHDES = LCDBDesPtr->LCDHDES;
- pVBInfo->LCDHRS = LCDBDesPtr->LCDHRS;
- pVBInfo->LCDVDES = LCDBDesPtr->LCDVDES;
- pVBInfo->LCDVRS = LCDBDesPtr->LCDVRS;
- tempbx = pVBInfo->LCDVDES;
- tempcx += tempbx;
-
- if (tempcx >= tempax)
- tempcx -= tempax; /* lcdvdes */
-
- temp = tempbx & 0x00FF; /* RVEQ1EQ=lcdvdes */
- xgifb_reg_set(pVBInfo->Part2Port, 0x05, temp);
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x06, temp);
- tempch = ((tempcx & 0xFF00) >> 8) & 0x07;
- tempbh = ((tempbx & 0xFF00) >> 8) & 0x07;
- tempah = tempch;
- tempah <<= 3;
- tempah |= tempbh;
- xgifb_reg_set(pVBInfo->Part2Port, 0x02, tempah);
-
- /* getlcdsync() */
- XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
- tempcx = tempbx;
- tempax = pVBInfo->VT;
- tempbx = pVBInfo->LCDVRS;
-
- tempcx += tempbx;
- if (tempcx >= tempax)
- tempcx -= tempax;
-
- temp = tempbx & 0x00FF; /* RTVACTEE=lcdvrs */
- xgifb_reg_set(pVBInfo->Part2Port, 0x04, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp <<= 4;
- temp |= (tempcx & 0x000F);
- xgifb_reg_set(pVBInfo->Part2Port, 0x01, temp);
- tempcx = pushbx;
- tempax = pVBInfo->HT;
- tempbx = pVBInfo->LCDHDES;
- tempbx &= 0x0FFF;
-
- if (XGI_IsLCDDualLink(pVBInfo)) {
- tempax >>= 1;
- tempbx >>= 1;
- tempcx >>= 1;
- }
-
- if (pVBInfo->VBType & VB_SIS302LV)
- tempbx += 1;
-
- if (pVBInfo->VBType & VB_XGI301C) /* tap4 */
- tempbx += 1;
-
- tempcx += tempbx;
-
- if (tempcx >= tempax)
- tempcx -= tempax;
-
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x1F, temp); /* RHBLKE=lcdhdes */
- temp = ((tempbx & 0xFF00) >> 8) << 4;
- xgifb_reg_set(pVBInfo->Part2Port, 0x20, temp);
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part2Port, 0x23, temp); /* RHEQPLE=lcdhdee */
- temp = (tempcx & 0xFF00) >> 8;
- xgifb_reg_set(pVBInfo->Part2Port, 0x25, temp);
-
- XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
- tempcx = tempax;
- tempax = pVBInfo->HT;
- tempbx = pVBInfo->LCDHRS;
- if (XGI_IsLCDDualLink(pVBInfo)) {
- tempax >>= 1;
- tempbx >>= 1;
- tempcx >>= 1;
- }
-
- if (pVBInfo->VBType & VB_SIS302LV)
- tempbx += 1;
-
- tempcx += tempbx;
-
- if (tempcx >= tempax)
- tempcx -= tempax;
-
- temp = tempbx & 0x00FF; /* RHBURSTS=lcdhrs */
- xgifb_reg_set(pVBInfo->Part2Port, 0x1C, temp);
-
- temp = (tempbx & 0xFF00) >> 8;
- temp <<= 4;
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x1D, ~0x0F0, temp);
- temp = tempcx & 0x00FF; /* RHSYEXP2S=lcdhre */
- xgifb_reg_set(pVBInfo->Part2Port, 0x21, temp);
-
- if (!(pVBInfo->LCDInfo & XGI_LCDVESATiming)) {
- if (pVBInfo->VGAVDE == 525) {
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
- | VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C))
- temp = 0xC6;
- else
- temp = 0xC4;
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
- xgifb_reg_set(pVBInfo->Part2Port, 0x30, 0xB3);
- }
-
- if (pVBInfo->VGAVDE == 420) {
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
- | VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C))
- temp = 0x4F;
- else
- temp = 0x4E;
- xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
- }
- }
-}
-
-/*
- * Function : XGI_GetTap4Ptr
- * Input :
- * Output : di -> Tap4 Reg. Setting Pointer
- * Description :
- */
-static struct XGI301C_Tap4TimingStruct const
-*XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo)
-{
- unsigned short tempax, tempbx, i;
- struct XGI301C_Tap4TimingStruct const *Tap4TimingPtr;
-
- if (tempcx == 0) {
- tempax = pVBInfo->VGAHDE;
- tempbx = pVBInfo->HDE;
- } else {
- tempax = pVBInfo->VGAVDE;
- tempbx = pVBInfo->VDE;
- }
-
- if (tempax <= tempbx)
- return &xgifb_tap4_timing[0];
- Tap4TimingPtr = xgifb_ntsc_525_tap4_timing; /* NTSC */
-
- if (pVBInfo->TVInfo & TVSetPAL)
- Tap4TimingPtr = PALTap4Timing;
-
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
- if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
- (pVBInfo->TVInfo & TVSetYPbPr525p))
- Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
- if (pVBInfo->TVInfo & TVSetYPbPr750p)
- Tap4TimingPtr = YPbPr750pTap4Timing;
- }
-
- if (pVBInfo->VBInfo & SetCRT2ToHiVision)
- Tap4TimingPtr = xgifb_tap4_timing;
-
- i = 0;
- while (Tap4TimingPtr[i].DE != 0xFFFF) {
- if (Tap4TimingPtr[i].DE == tempax)
- break;
- i++;
- }
- return &Tap4TimingPtr[i];
-}
-
-static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
-{
- unsigned short i, j;
- struct XGI301C_Tap4TimingStruct const *Tap4TimingPtr;
-
- if (!(pVBInfo->VBType & VB_XGI301C))
- return;
-
- Tap4TimingPtr = XGI_GetTap4Ptr(0, pVBInfo); /* Set Horizontal Scaling */
- for (i = 0x80, j = 0; i <= 0xBF; i++, j++)
- xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
-
- if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- !(pVBInfo->VBInfo & SetCRT2ToHiVision)) {
- /* Set Vertical Scaling */
- Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo);
- for (i = 0xC0, j = 0; i < 0xFF; i++, j++)
- xgifb_reg_set(pVBInfo->Part2Port,
- i,
- Tap4TimingPtr->Reg[j]);
- }
-
- if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
- !(pVBInfo->VBInfo & SetCRT2ToHiVision))
- /* Enable V.Scaling */
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04);
- else
- /* Enable H.Scaling */
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x10);
-}
-
-static void XGI_SetGroup3(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short i;
- unsigned char const *tempdi;
- unsigned short modeflag;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- xgifb_reg_set(pVBInfo->Part3Port, 0x00, 0x00);
- if (pVBInfo->TVInfo & TVSetPAL) {
- xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
- xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
- } else {
- xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xF5);
- xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xB7);
- }
-
- if (!(pVBInfo->VBInfo & SetCRT2ToTV))
- return;
-
- if (pVBInfo->TVInfo & TVSetPALM) {
- xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
- xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
- xgifb_reg_set(pVBInfo->Part3Port, 0x3D, 0xA8);
- }
-
- if ((pVBInfo->VBInfo & SetCRT2ToHiVision) || (pVBInfo->VBInfo
- & SetCRT2ToYPbPr525750)) {
- if (pVBInfo->TVInfo & TVSetYPbPr525i)
- return;
-
- tempdi = XGI330_HiTVGroup3Data;
- if (pVBInfo->SetFlag & TVSimuMode) {
- tempdi = XGI330_HiTVGroup3Simu;
- if (!(modeflag & Charx8Dot))
- tempdi = XGI330_HiTVGroup3Text;
- }
-
- if (pVBInfo->TVInfo & TVSetYPbPr525p)
- tempdi = XGI330_Ren525pGroup3;
-
- if (pVBInfo->TVInfo & TVSetYPbPr750p)
- tempdi = XGI330_Ren750pGroup3;
-
- for (i = 0; i <= 0x3E; i++)
- xgifb_reg_set(pVBInfo->Part3Port, i, tempdi[i]);
-
- if (pVBInfo->VBType & VB_XGI301C) { /* Marcovision */
- if (pVBInfo->TVInfo & TVSetYPbPr525p)
- xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f);
- }
- }
-}
-
-static void XGI_SetGroup4(unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
-
- unsigned long tempebx, tempeax, templong;
-
- /* si+Ext_ResInfo */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- temp = pVBInfo->RVBHCFACT;
- xgifb_reg_set(pVBInfo->Part4Port, 0x13, temp);
-
- tempbx = pVBInfo->RVBHCMAX;
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x14, temp);
- temp2 = ((tempbx & 0xFF00) >> 8) << 7;
- tempcx = pVBInfo->VGAHT - 1;
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x16, temp);
-
- temp = ((tempcx & 0xFF00) >> 8) << 3;
- temp2 |= temp;
-
- tempcx = pVBInfo->VGAVT - 1;
- if (!(pVBInfo->VBInfo & SetCRT2ToTV))
- tempcx -= 5;
-
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x17, temp);
- temp = temp2 | ((tempcx & 0xFF00) >> 8);
- xgifb_reg_set(pVBInfo->Part4Port, 0x15, temp);
- xgifb_reg_or(pVBInfo->Part4Port, 0x0D, 0x08);
- tempcx = pVBInfo->VBInfo;
- tempbx = pVBInfo->VGAHDE;
-
- if (modeflag & HalfDCLK)
- tempbx >>= 1;
-
- if (XGI_IsLCDDualLink(pVBInfo))
- tempbx >>= 1;
-
- if (tempcx & SetCRT2ToHiVision) {
- temp = 0;
- if (tempbx <= 1024)
- temp = 0xA0;
- if (tempbx == 1280)
- temp = 0xC0;
- } else if (tempcx & SetCRT2ToTV) {
- temp = 0xA0;
- if (tempbx <= 800)
- temp = 0x80;
- } else {
- temp = 0x80;
- if (pVBInfo->VBInfo & SetCRT2ToLCD) {
- temp = 0;
- if (tempbx > 800)
- temp = 0x60;
- }
- }
-
- if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p)) {
- temp = 0x00;
- if (pVBInfo->VGAHDE == 1280)
- temp = 0x40;
- if (pVBInfo->VGAHDE == 1024)
- temp = 0x20;
- }
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x0E, ~0xEF, temp);
-
- tempebx = pVBInfo->VDE;
-
- tempcx = pVBInfo->RVBHRS;
- temp = tempcx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x18, temp);
-
- tempeax = pVBInfo->VGAVDE;
- tempcx |= 0x04000;
-
- if (tempeax <= tempebx) {
- tempcx = tempcx & (~0x4000);
- tempeax = pVBInfo->VGAVDE;
- } else {
- tempeax -= tempebx;
- }
-
- templong = (tempeax * 256 * 1024) % tempebx;
- tempeax = (tempeax * 256 * 1024) / tempebx;
- tempebx = tempeax;
-
- if (templong != 0)
- tempebx++;
-
- temp = (unsigned short)(tempebx & 0x000000FF);
- xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp);
-
- temp = (unsigned short)((tempebx & 0x0000FF00) >> 8);
- xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp);
- tempbx = (unsigned short)(tempebx >> 16);
- temp = tempbx & 0x00FF;
- temp <<= 4;
- temp |= ((tempcx & 0xFF00) >> 8);
- xgifb_reg_set(pVBInfo->Part4Port, 0x19, temp);
-
- /* 301b */
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- temp = 0x0028;
- xgifb_reg_set(pVBInfo->Part4Port, 0x1C, temp);
- tempax = pVBInfo->VGAHDE;
- if (modeflag & HalfDCLK)
- tempax >>= 1;
-
- if (XGI_IsLCDDualLink(pVBInfo))
- tempax >>= 1;
-
- if (pVBInfo->VBInfo & SetCRT2ToLCD) {
- if (tempax > 800)
- tempax -= 800;
- } else if (pVBInfo->VGAHDE > 800) {
- if (pVBInfo->VGAHDE == 1024)
- tempax = (tempax * 25 / 32) - 1;
- else
- tempax = (tempax * 20 / 32) - 1;
- }
- tempax -= 1;
-
- temp = (tempax & 0xFF00) >> 8;
- temp = (temp & 0x0003) << 4;
- xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
- temp = tempax & 0x00FF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
-
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
- if (pVBInfo->VGAHDE > 800)
- xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08);
- }
- temp = 0x0036;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (!(pVBInfo->TVInfo & (NTSC1024x768
- | TVSetYPbPr525p | TVSetYPbPr750p
- | TVSetHiVision))) {
- temp |= 0x0001;
- if ((pVBInfo->VBInfo & SetInSlaveMode) &&
- !(pVBInfo->TVInfo & TVSimuMode))
- temp &= (~0x0001);
- }
- }
-
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x1F, 0x00C0, temp);
- tempbx = pVBInfo->HT;
- if (XGI_IsLCDDualLink(pVBInfo))
- tempbx >>= 1;
- tempbx = (tempbx >> 1) - 2;
- temp = ((tempbx & 0x0700) >> 8) << 3;
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x21, 0x00C0, temp);
- temp = tempbx & 0x00FF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x22, temp);
- }
- /* end 301b */
-
- XGI_SetCRT2VCLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
-}
-
-static void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
-{
- xgifb_reg_and_or(pVBInfo->P3c4, 0x1E, 0xFF, 0x20);
-}
-
-static void XGI_SetGroup5(struct vb_device_info *pVBInfo)
-{
- if (pVBInfo->ModeType == ModeVGA) {
- if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
- | DisableCRT2Display))) {
- XGINew_EnableCRT2(pVBInfo);
- }
- }
-}
-
-static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo)
-{
- xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x00);
-}
-
-static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
- unsigned short ModeNo,
- unsigned short ModeIdIndex)
-{
- unsigned short xres, yres, colordepth, modeflag, resindex;
-
- resindex = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- xres = XGI330_ModeResInfo[resindex].HTotal; /* xres->ax */
- yres = XGI330_ModeResInfo[resindex].VTotal; /* yres->bx */
- /* si+St_ModeFlag */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- if (!(modeflag & Charx8Dot)) {
- xres /= 9;
- xres *= 8;
- }
-
- if ((ModeNo > 0x13) && (modeflag & HalfDCLK))
- xres *= 2;
-
- if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
- yres *= 2;
-
- if (xres > xgifb_info->lvds_data.LVDSHDE)
- return 0;
-
- if (yres > xgifb_info->lvds_data.LVDSVDE)
- return 0;
-
- if (xres != xgifb_info->lvds_data.LVDSHDE ||
- yres != xgifb_info->lvds_data.LVDSVDE) {
- colordepth = XGI_GetColorDepth(ModeIdIndex);
- if (colordepth > 2)
- return 0;
- }
- return 1;
-}
-
-static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
- int chip_id,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char temp, Miscdata;
- unsigned short xres, yres, modeflag, resindex;
- unsigned short LVDSHT, LVDSHBS, LVDSHRS, LVDSHRE, LVDSHBE;
- unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
- unsigned short value;
-
- temp = (unsigned char)((xgifb_info->lvds_data.LVDS_Capability &
- (LCDPolarity << 8)) >> 8);
- temp &= LCDPolarity;
- Miscdata = inb(pVBInfo->P3cc);
-
- outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
-
- temp = xgifb_info->lvds_data.LVDS_Capability & LCDPolarity;
- /* SR35[7] FP VSync polarity */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x80, temp & 0x80);
- /* SR30[5] FP HSync polarity */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x30, ~0x20, (temp & 0x40) >> 1);
-
- if (chip_id == XG27)
- XGI_SetXG27FPBits(pVBInfo);
- else
- XGI_SetXG21FPBits(pVBInfo);
-
- resindex = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- xres = XGI330_ModeResInfo[resindex].HTotal; /* xres->ax */
- yres = XGI330_ModeResInfo[resindex].VTotal; /* yres->bx */
- /* si+St_ModeFlag */
- modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
-
- if (!(modeflag & Charx8Dot))
- xres = xres * 8 / 9;
-
- LVDSHT = xgifb_info->lvds_data.LVDSHT;
-
- LVDSHBS = xres + (xgifb_info->lvds_data.LVDSHDE - xres) / 2;
-
- if (LVDSHBS > LVDSHT)
- LVDSHBS -= LVDSHT;
-
- LVDSHRS = LVDSHBS + xgifb_info->lvds_data.LVDSHFP;
- if (LVDSHRS > LVDSHT)
- LVDSHRS -= LVDSHT;
-
- LVDSHRE = LVDSHRS + xgifb_info->lvds_data.LVDSHSYNC;
- if (LVDSHRE > LVDSHT)
- LVDSHRE -= LVDSHT;
-
- LVDSHBE = LVDSHBS + LVDSHT - xgifb_info->lvds_data.LVDSHDE;
-
- LVDSVT = xgifb_info->lvds_data.LVDSVT;
-
- LVDSVBS = yres + (xgifb_info->lvds_data.LVDSVDE - yres) / 2;
- if (modeflag & DoubleScanMode)
- LVDSVBS += yres / 2;
-
- if (LVDSVBS > LVDSVT)
- LVDSVBS -= LVDSVT;
-
- LVDSVRS = LVDSVBS + xgifb_info->lvds_data.LVDSVFP;
- if (LVDSVRS > LVDSVT)
- LVDSVRS -= LVDSVT;
-
- LVDSVRE = LVDSVRS + xgifb_info->lvds_data.LVDSVSYNC;
- if (LVDSVRE > LVDSVT)
- LVDSVRE -= LVDSVT;
-
- LVDSVBE = LVDSVBS + LVDSVT - xgifb_info->lvds_data.LVDSVDE;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x11);
- xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
-
- if (!(modeflag & Charx8Dot))
- xgifb_reg_or(pVBInfo->P3c4, 0x1, 0x1);
-
- /* HT SR0B[1:0] CR00 */
- value = (LVDSHT >> 3) - 5;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x03, (value & 0x300) >> 8);
- xgifb_reg_set(pVBInfo->P3d4, 0x0, (value & 0xFF));
-
- /* HBS SR0B[5:4] CR02 */
- value = (LVDSHBS >> 3) - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x30, (value & 0x300) >> 4);
- xgifb_reg_set(pVBInfo->P3d4, 0x2, (value & 0xFF));
-
- /* HBE SR0C[1:0] CR05[7] CR03[4:0] */
- value = (LVDSHBE >> 3) - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x03, (value & 0xC0) >> 6);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x80, (value & 0x20) << 2);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x03, ~0x1F, value & 0x1F);
-
- /* HRS SR0B[7:6] CR04 */
- value = (LVDSHRS >> 3) + 2;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0xC0, (value & 0x300) >> 2);
- xgifb_reg_set(pVBInfo->P3d4, 0x4, (value & 0xFF));
-
- /* Panel HRS SR2F[1:0] SR2E[7:0] */
- value--;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0x03, (value & 0x300) >> 8);
- xgifb_reg_set(pVBInfo->P3c4, 0x2E, (value & 0xFF));
-
- /* HRE SR0C[2] CR05[4:0] */
- value = (LVDSHRE >> 3) + 2;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x04, (value & 0x20) >> 3);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x1F, value & 0x1F);
-
- /* Panel HRE SR2F[7:2] */
- value--;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0xFC, value << 2);
-
- /* VT SR0A[0] CR07[5][0] CR06 */
- value = LVDSVT - 2;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x01, (value & 0x400) >> 10);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x20, (value & 0x200) >> 4);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x01, (value & 0x100) >> 8);
- xgifb_reg_set(pVBInfo->P3d4, 0x06, (value & 0xFF));
-
- /* VBS SR0A[2] CR09[5] CR07[3] CR15 */
- value = LVDSVBS - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x04, (value & 0x400) >> 8);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x09, ~0x20, (value & 0x200) >> 4);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x08, (value & 0x100) >> 5);
- xgifb_reg_set(pVBInfo->P3d4, 0x15, (value & 0xFF));
-
- /* VBE SR0A[4] CR16 */
- value = LVDSVBE - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x10, (value & 0x100) >> 4);
- xgifb_reg_set(pVBInfo->P3d4, 0x16, (value & 0xFF));
-
- /* VRS SR0A[3] CR7[7][2] CR10 */
- value = LVDSVRS - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x08, (value & 0x400) >> 7);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x80, (value & 0x200) >> 2);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x04, (value & 0x100) >> 6);
- xgifb_reg_set(pVBInfo->P3d4, 0x10, (value & 0xFF));
-
- if (chip_id == XG27) {
- /* Panel VRS SR35[2:0] SR34[7:0] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07,
- (value & 0x700) >> 8);
- xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
- } else {
- /* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03,
- (value & 0x600) >> 9);
- xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
- }
-
- /* VRE SR0A[5] CR11[3:0] */
- value = LVDSVRE - 1;
- xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x20, (value & 0x10) << 1);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x11, ~0x0F, value & 0x0F);
-
- /* Panel VRE SR3F[7:2] */
- if (chip_id == XG27)
- xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
- (value << 2) & 0xFC);
- else
- /* SR3F[7] has to be 0, h/w bug */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
- (value << 2) & 0x7C);
-
- for (temp = 0, value = 0; temp < 3; temp++) {
- xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x2B, xgifb_info->lvds_data.VCLKData1);
- xgifb_reg_set(pVBInfo->P3c4,
- 0x2C, xgifb_info->lvds_data.VCLKData2);
- value += 0x10;
- }
-
- if (!(modeflag & Charx8Dot)) {
- inb(pVBInfo->P3da); /* reset 3da */
- outb(0x13, pVBInfo->P3c0); /* set index */
- /* set data, panning = 0, shift left 1 dot*/
- outb(0x00, pVBInfo->P3c0);
-
- inb(pVBInfo->P3da); /* Enable Attribute */
- outb(0x20, pVBInfo->P3c0);
-
- inb(pVBInfo->P3da); /* reset 3da */
- }
-}
-
-/*
- * Function : XGI_IsLCDON
- * Input :
- * Output : 0 : Skip PSC Control
- * 1: Disable PSC
- * Description :
- */
-static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
-{
- unsigned short tempax;
-
- tempax = pVBInfo->VBInfo;
- if (tempax & SetCRT2ToDualEdge)
- return 0;
- else if (tempax & (DisableCRT2Display | SwitchCRT2 | SetSimuScanMode))
- return 1;
-
- return 0;
-}
-
-static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempah = 0;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- tempah = 0x3F;
- if (!(pVBInfo->VBInfo &
- (DisableCRT2Display | SetSimuScanMode))) {
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
- tempah = 0x7F; /* Disable Channel A */
- }
- }
-
- /* disable part4_1f */
- xgifb_reg_and(pVBInfo->Part4Port, 0x1F, tempah);
-
- if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
- if (((pVBInfo->VBInfo &
- (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))) ||
- (XGI_IsLCDON(pVBInfo)))
- /* LVDS Driver power down */
- xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x80);
- }
-
- if (pVBInfo->VBInfo & (DisableCRT2Display | XGI_SetCRT2ToLCDA |
- SetSimuScanMode))
- XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
-
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
- /* Power down */
- xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
-
- /* disable TV as primary VGA swap */
- xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xdf);
-
- if ((pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToDualEdge)))
- xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xdf);
-
- if ((pVBInfo->VBInfo &
- (DisableCRT2Display | SetSimuScanMode)) ||
- (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
- (pVBInfo->VBInfo &
- (SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
- xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
-
- if ((pVBInfo->VBInfo &
- (DisableCRT2Display | SetSimuScanMode)) ||
- (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) ||
- (pVBInfo->VBInfo &
- (SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))) {
- /* save Part1 index 0 */
- tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x00);
- /* BTDAC = 1, avoid VB reset */
- xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x10);
- /* disable CRT2 */
- xgifb_reg_and(pVBInfo->Part1Port, 0x1E, 0xDF);
- /* restore Part1 index 0 */
- xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
- }
- } else { /* {301} */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
- xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
- /* Disable CRT2 */
- xgifb_reg_and(pVBInfo->Part1Port, 0x1E, 0xDF);
- /* Disable TV asPrimary VGA swap */
- xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xDF);
- }
-
- if (pVBInfo->VBInfo & (DisableCRT2Display | XGI_SetCRT2ToLCDA
- | SetSimuScanMode))
- XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
- }
-}
-
-/*
- * Function : XGI_GetTVPtrIndex
- * Input :
- * Output :
- * Description : bx 0 : ExtNTSC
- * 1 : StNTSC
- * 2 : ExtPAL
- * 3 : StPAL
- * 4 : ExtHiTV
- * 5 : StHiTV
- * 6 : Ext525i
- * 7 : St525i
- * 8 : Ext525p
- * 9 : St525p
- * A : Ext750p
- * B : St750p
- */
-static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx = 0;
-
- if (pVBInfo->TVInfo & TVSetPAL)
- tempbx = 2;
- if (pVBInfo->TVInfo & TVSetHiVision)
- tempbx = 4;
- if (pVBInfo->TVInfo & TVSetYPbPr525i)
- tempbx = 6;
- if (pVBInfo->TVInfo & TVSetYPbPr525p)
- tempbx = 8;
- if (pVBInfo->TVInfo & TVSetYPbPr750p)
- tempbx = 10;
- if (pVBInfo->TVInfo & TVSimuMode)
- tempbx++;
-
- return tempbx;
-}
-
-/*
- * Function : XGI_GetTVPtrIndex2
- * Input :
- * Output : bx 0 : NTSC
- * 1 : PAL
- * 2 : PALM
- * 3 : PALN
- * 4 : NTSC1024x768
- * 5 : PAL-M 1024x768
- * 6-7: reserved
- * cl 0 : YFilter1
- * 1 : YFilter2
- * ch 0 : 301A
- * 1 : 301B/302B/301LV/302LV
- * Description :
- */
-static void XGI_GetTVPtrIndex2(unsigned short *tempbx,
- unsigned char *tempcl,
- unsigned char *tempch,
- struct vb_device_info *pVBInfo)
-{
- *tempbx = 0;
- *tempcl = 0;
- *tempch = 0;
-
- if (pVBInfo->TVInfo & TVSetPAL)
- *tempbx = 1;
-
- if (pVBInfo->TVInfo & TVSetPALM)
- *tempbx = 2;
-
- if (pVBInfo->TVInfo & TVSetPALN)
- *tempbx = 3;
-
- if (pVBInfo->TVInfo & NTSC1024x768) {
- *tempbx = 4;
- if (pVBInfo->TVInfo & TVSetPALM)
- *tempbx = 5;
- }
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- if (!(pVBInfo->VBInfo & SetInSlaveMode) || (pVBInfo->TVInfo
- & TVSimuMode)) {
- *tempbx += 8;
- *tempcl += 1;
- }
- }
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C))
- (*tempch)++;
-}
-
-static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
-{
- unsigned char tempah, tempbl, tempbh;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA
- | SetCRT2ToTV | SetCRT2ToRAMDAC)) {
- tempbh = 0;
- tempbl = XGI301TVDelay;
-
- if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
- tempbl >>= 4;
- if (pVBInfo->VBInfo &
- (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- tempbh = XGI301LCDDelay;
-
- if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA))
- tempbl = tempbh;
- }
-
- tempbl &= 0x0F;
- tempbh &= 0xF0;
- tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x2D);
-
- if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToLCD
- | SetCRT2ToTV)) { /* Channel B */
- tempah &= 0xF0;
- tempah |= tempbl;
- }
-
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- /* Channel A */
- tempah &= 0x0F;
- tempah |= tempbh;
- }
- xgifb_reg_set(pVBInfo->Part1Port, 0x2D, tempah);
- }
- }
-}
-
-static void XGI_SetLCDCap_A(unsigned short tempcx,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
-
- if (temp & LCDRGB18Bit) {
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
- /* Enable Dither */
- (unsigned short)(0x20 | (tempcx & 0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
- } else {
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
- (unsigned short)(0x30 | (tempcx & 0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
- }
-}
-
-/*
- * Function : XGI_SetLCDCap_B
- * Input : cx -> LCD Capability
- * Output :
- * Description :
- */
-static void XGI_SetLCDCap_B(unsigned short tempcx,
- struct vb_device_info *pVBInfo)
-{
- if (tempcx & EnableLCD24bpp) { /* 24bits */
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
- (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c));
- } else {
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
- (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18));
- /* Enable Dither */
- }
-}
-
-static void XGI_LongWait(struct vb_device_info *pVBInfo)
-{
- unsigned short i;
-
- i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
-
- if (!(i & 0xC0)) {
- for (i = 0; i < 0xFFFF; i++) {
- if (!(inb(pVBInfo->P3da) & 0x08))
- break;
- }
-
- for (i = 0; i < 0xFFFF; i++) {
- if ((inb(pVBInfo->P3da) & 0x08))
- break;
- }
- }
-}
-
-static void SetSpectrum(struct vb_device_info *pVBInfo)
-{
- unsigned short index;
-
- index = XGI_GetLCDCapPtr(pVBInfo);
-
- /* disable down spectrum D[4] */
- xgifb_reg_and(pVBInfo->Part4Port, 0x30, 0x8F);
- XGI_LongWait(pVBInfo);
- xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x20); /* reset spectrum */
- XGI_LongWait(pVBInfo);
-
- xgifb_reg_set(pVBInfo->Part4Port, 0x31,
- pVBInfo->LCDCapList[index].Spectrum_31);
- xgifb_reg_set(pVBInfo->Part4Port, 0x32,
- pVBInfo->LCDCapList[index].Spectrum_32);
- xgifb_reg_set(pVBInfo->Part4Port, 0x33,
- pVBInfo->LCDCapList[index].Spectrum_33);
- xgifb_reg_set(pVBInfo->Part4Port, 0x34,
- pVBInfo->LCDCapList[index].Spectrum_34);
- XGI_LongWait(pVBInfo);
- xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */
-}
-
-static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
-{
- unsigned short tempcx;
-
- tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV |
- VB_SIS302LV | VB_XGI301C)) {
- if (pVBInfo->VBType &
- (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
- /* Set 301LV Capability */
- xgifb_reg_set(pVBInfo->Part4Port, 0x24,
- (unsigned char)(tempcx & 0x1F));
- }
- /* VB Driving */
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D,
- ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
- (unsigned short)((tempcx & (EnableVBCLKDRVLOW |
- EnablePLLSPLOW)) >> 8));
-
- if (pVBInfo->VBInfo & SetCRT2ToLCD)
- XGI_SetLCDCap_B(tempcx, pVBInfo);
- else if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
- XGI_SetLCDCap_A(tempcx, pVBInfo);
-
- if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
- if (tempcx & EnableSpectrum)
- SetSpectrum(pVBInfo);
- }
- } else {
- /* LVDS,CH7017 */
- XGI_SetLCDCap_A(tempcx, pVBInfo);
- }
-}
-
-/*
- * Function : XGI_SetAntiFlicker
- * Input :
- * Output :
- * Description : Set TV Customized Param.
- */
-static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx;
-
- unsigned char tempah;
-
- if (pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))
- return;
-
- tempbx = XGI_GetTVPtrIndex(pVBInfo);
- tempbx &= 0xFE;
- tempah = TVAntiFlickList[tempbx];
- tempah <<= 4;
-
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0x8F, tempah);
-}
-
-static void XGI_SetEdgeEnhance(struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx;
-
- unsigned char tempah;
-
- tempbx = XGI_GetTVPtrIndex(pVBInfo);
- tempbx &= 0xFE;
- tempah = TVEdgeList[tempbx];
- tempah <<= 5;
-
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x3A, 0x1F, tempah);
-}
-
-static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx;
-
- unsigned char tempcl, tempch;
-
- unsigned long tempData;
-
- XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
- tempData = TVPhaseList[tempbx];
-
- xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short)(tempData
- & 0x000000FF));
- xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short)((tempData
- & 0x0000FF00) >> 8));
- xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short)((tempData
- & 0x00FF0000) >> 16));
- xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short)((tempData
- & 0xFF000000) >> 24));
-}
-
-static void XGI_SetYFilter(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx, index;
- unsigned char const *filterPtr;
- unsigned char tempcl, tempch, tempal;
-
- XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
-
- switch (tempbx) {
- case 0x00:
- case 0x04:
- filterPtr = NTSCYFilter1;
- break;
-
- case 0x01:
- filterPtr = PALYFilter1;
- break;
-
- case 0x02:
- case 0x05:
- case 0x0D:
- case 0x03:
- filterPtr = xgifb_palmn_yfilter1;
- break;
-
- case 0x08:
- case 0x0C:
- case 0x0A:
- case 0x0B:
- case 0x09:
- filterPtr = xgifb_yfilter2;
- break;
-
- default:
- return;
- }
-
- tempal = XGI330_EModeIDTable[ModeIdIndex].VB_ExtTVYFilterIndex;
- if (tempcl == 0)
- index = tempal * 4;
- else
- index = tempal * 7;
-
- if ((tempcl == 0) && (tempch == 1)) {
- xgifb_reg_set(pVBInfo->Part2Port, 0x35, 0);
- xgifb_reg_set(pVBInfo->Part2Port, 0x36, 0);
- xgifb_reg_set(pVBInfo->Part2Port, 0x37, 0);
- xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
- } else {
- xgifb_reg_set(pVBInfo->Part2Port, 0x35, filterPtr[index++]);
- xgifb_reg_set(pVBInfo->Part2Port, 0x36, filterPtr[index++]);
- xgifb_reg_set(pVBInfo->Part2Port, 0x37, filterPtr[index++]);
- xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
- }
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- xgifb_reg_set(pVBInfo->Part2Port, 0x48, filterPtr[index++]);
- xgifb_reg_set(pVBInfo->Part2Port, 0x49, filterPtr[index++]);
- xgifb_reg_set(pVBInfo->Part2Port, 0x4A, filterPtr[index++]);
- }
-}
-
-/*
- * Function : XGI_OEM310Setting
- * Input :
- * Output :
- * Description : Customized Param. for 301
- */
-static void XGI_OEM310Setting(unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- XGI_SetDelayComp(pVBInfo);
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
- XGI_SetLCDCap(pVBInfo);
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- XGI_SetPhaseIncr(pVBInfo);
- XGI_SetYFilter(ModeIdIndex, pVBInfo);
- XGI_SetAntiFlicker(pVBInfo);
-
- if (pVBInfo->VBType & VB_SIS301)
- XGI_SetEdgeEnhance(pVBInfo);
- }
-}
-
-/*
- * Function : XGI_SetCRT2ModeRegs
- * Input :
- * Output :
- * Description : Origin code for crt2group
- */
-static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
-{
- unsigned short tempbl;
- short tempcl;
-
- unsigned char tempah;
-
- tempah = 0;
- if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
- tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x00);
- tempah &= ~0x10; /* BTRAMDAC */
- tempah |= 0x40; /* BTRAM */
-
- if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV
- | SetCRT2ToLCD)) {
- tempah = 0x40; /* BTDRAM */
- tempcl = pVBInfo->ModeType;
- tempcl -= ModeVGA;
- if (tempcl >= 0) {
- /* BT Color */
- tempah = 0x008 >> tempcl;
- if (tempah == 0)
- tempah = 1;
- tempah |= 0x040;
- }
- if (pVBInfo->VBInfo & SetInSlaveMode)
- tempah ^= 0x50; /* BTDAC */
- }
- }
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
- tempah = 0x08;
- tempbl = 0xf0;
-
- if (pVBInfo->VBInfo & DisableCRT2Display)
- goto reg_and_or;
-
- tempah = 0x00;
- tempbl = 0xff;
-
- if (!(pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV |
- SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
- goto reg_and_or;
-
- if ((pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
- (!(pVBInfo->VBInfo & SetSimuScanMode))) {
- tempbl &= 0xf7;
- tempah |= 0x01;
- goto reg_and_or;
- }
-
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- tempbl &= 0xf7;
- tempah |= 0x01;
- }
-
- if (!(pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD)))
- goto reg_and_or;
-
- tempbl &= 0xf8;
- tempah = 0x01;
-
- if (!(pVBInfo->VBInfo & SetInSlaveMode))
- tempah |= 0x02;
-
- if (!(pVBInfo->VBInfo & SetCRT2ToRAMDAC)) {
- tempah = tempah ^ 0x05;
- if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
- tempah = tempah ^ 0x01;
- }
-
- if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge))
- tempah |= 0x08;
-
-reg_and_or:
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e, tempbl, tempah);
-
- if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD
- | XGI_SetCRT2ToLCDA)) {
- tempah &= (~0x08);
- if ((pVBInfo->ModeType == ModeVGA) && !(pVBInfo->VBInfo
- & SetInSlaveMode)) {
- tempah |= 0x010;
- }
- tempah |= 0x080;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- tempah |= 0x020;
- if (pVBInfo->VBInfo & DriverMode)
- tempah = tempah ^ 0x20;
- }
-
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D, ~0x0BF, tempah);
- tempah = 0;
-
- if (pVBInfo->LCDInfo & SetLCDDualLink)
- tempah |= 0x40;
-
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->TVInfo & RPLLDIV2XO)
- tempah |= 0x40;
- }
-
- if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
- (pVBInfo->LCDResInfo == Panel_1280x1024x75))
- tempah |= 0x80;
-
- if (pVBInfo->LCDResInfo == Panel_1280x960)
- tempah |= 0x80;
-
- xgifb_reg_set(pVBInfo->Part4Port, 0x0C, tempah);
- }
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- tempah = 0;
- tempbl = 0xfb;
-
- if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
- tempbl = 0xff;
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
- tempah |= 0x04; /* shampoo 0129 */
- }
-
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x13, tempbl, tempah);
- tempah = 0x00;
- tempbl = 0xcf;
- if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
- if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
- tempah |= 0x30;
- }
-
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x2c, tempbl, tempah);
- tempah = 0;
- tempbl = 0x3f;
-
- if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
- if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
- tempah |= 0xc0;
- }
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x21, tempbl, tempah);
- }
-
- tempah = 0;
- tempbl = 0x7f;
- if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
- tempbl = 0xff;
- if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge))
- tempah |= 0x80;
- }
-
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x23, tempbl, tempah);
-
- if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
- if (pVBInfo->LCDInfo & SetLCDDualLink) {
- xgifb_reg_or(pVBInfo->Part4Port, 0x27, 0x20);
- xgifb_reg_or(pVBInfo->Part4Port, 0x34, 0x10);
- }
- }
-}
-
-void XGI_UnLockCRT2(struct vb_device_info *pVBInfo)
-{
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x2f, 0xFF, 0x01);
-}
-
-void XGI_LockCRT2(struct vb_device_info *pVBInfo)
-{
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x2F, 0xFE, 0x00);
-}
-
-unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
- unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- static const u8 LCDARefreshIndex[] = {
- 0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
-
- unsigned short RefreshRateTableIndex, i, index, temp;
-
- index = xgifb_reg_get(pVBInfo->P3d4, 0x33);
- index >>= pVBInfo->SelectCRT2Rate;
- index &= 0x0F;
-
- if (pVBInfo->LCDInfo & LCDNonExpanding)
- index = 0;
-
- if (index > 0)
- index--;
-
- if (pVBInfo->SetFlag & ProgrammingCRT2) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- temp = LCDARefreshIndex[pVBInfo->LCDResInfo & 0x07];
-
- if (index > temp)
- index = temp;
- }
- }
-
- RefreshRateTableIndex = XGI330_EModeIDTable[ModeIdIndex].REFindex;
- ModeNo = XGI330_RefIndex[RefreshRateTableIndex].ModeID;
- if (pXGIHWDE->jChipType >= XG20) { /* for XG20, XG21, XG27 */
- if ((XGI330_RefIndex[RefreshRateTableIndex].XRes == 800) &&
- (XGI330_RefIndex[RefreshRateTableIndex].YRes == 600)) {
- index++;
- }
- /* do the similar adjustment like XGISearchCRT1Rate() */
- if ((XGI330_RefIndex[RefreshRateTableIndex].XRes == 1024) &&
- (XGI330_RefIndex[RefreshRateTableIndex].YRes == 768)) {
- index++;
- }
- if ((XGI330_RefIndex[RefreshRateTableIndex].XRes == 1280) &&
- (XGI330_RefIndex[RefreshRateTableIndex].YRes == 1024)) {
- index++;
- }
- }
-
- i = 0;
- do {
- if (XGI330_RefIndex[RefreshRateTableIndex + i].ModeID != ModeNo)
- break;
- temp = XGI330_RefIndex[RefreshRateTableIndex + i].Ext_InfoFlag;
- temp &= ModeTypeMask;
- if (temp < pVBInfo->ModeType)
- break;
- i++;
- index--;
-
- } while (index != 0xFFFF);
- if (!(pVBInfo->VBInfo & SetCRT2ToRAMDAC)) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- temp = XGI330_RefIndex[RefreshRateTableIndex + i - 1].Ext_InfoFlag;
- if (temp & InterlaceMode)
- i++;
- }
- }
- i--;
- if ((pVBInfo->SetFlag & ProgrammingCRT2)) {
- temp = XGI_AjustCRT2Rate(ModeIdIndex, RefreshRateTableIndex,
- &i, pVBInfo);
- }
- return RefreshRateTableIndex + i;
-}
-
-static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short RefreshRateTableIndex;
-
- pVBInfo->SetFlag |= ProgrammingCRT2;
- RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
- XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo);
- XGI_GetLVDSData(ModeIdIndex, pVBInfo);
- XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo);
- XGI_SetLVDSRegs(ModeIdIndex, pVBInfo);
- XGI_SetCRT2ECLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
-}
-
-static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short ModeIdIndex, RefreshRateTableIndex;
-
- pVBInfo->SetFlag |= ProgrammingCRT2;
- XGI_SearchModeID(ModeNo, &ModeIdIndex);
- pVBInfo->SelectCRT2Rate = 4;
- RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
- XGI_SaveCRT2Info(ModeNo, pVBInfo);
- XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo);
- XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_PreSetGroup1(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup1(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetLockRegs(ModeNo, ModeIdIndex, pVBInfo);
- XGI_SetGroup2(ModeNo, ModeIdIndex, pVBInfo);
- XGI_SetLCDRegs(ModeIdIndex, pVBInfo);
- XGI_SetTap4Regs(pVBInfo);
- XGI_SetGroup3(ModeIdIndex, pVBInfo);
- XGI_SetGroup4(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT2VCLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup5(pVBInfo);
- XGI_AutoThreshold(pVBInfo);
- return 1;
-}
-
-void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
-{
- unsigned char CRTCData[17] = { 0x5F, 0x4F, 0x50, 0x82, 0x55, 0x81,
- 0x0B, 0x3E, 0xE9, 0x0B, 0xDF, 0xE7, 0x04, 0x00, 0x00,
- 0x05, 0x00 };
-
- unsigned char SR01 = 0, SR1F = 0, SR07 = 0, SR06 = 0;
-
- unsigned char CR17, CR63, SR31;
- unsigned short temp;
-
- int i;
-
- xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
-
- /* to fix XG42 single LCD sense to CRT+LCD */
- xgifb_reg_set(pVBInfo->P3d4, 0x57, 0x4A);
- xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
- pVBInfo->P3d4, 0x53) | 0x02));
-
- SR31 = xgifb_reg_get(pVBInfo->P3c4, 0x31);
- CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63);
- SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01);
-
- xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char)(SR01 & 0xDF));
- xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char)(CR63 & 0xBF));
-
- CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17);
- xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char)(CR17 | 0x80));
-
- SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)(SR1F | 0x04));
-
- SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07);
- xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char)(SR07 & 0xFB));
- SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06);
- xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char)(SR06 & 0xC3));
-
- xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00);
-
- for (i = 0; i < 8; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short)i, CRTCData[i]);
-
- for (i = 8; i < 11; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 8),
- CRTCData[i]);
-
- for (i = 11; i < 13; i++)
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 4),
- CRTCData[i]);
-
- for (i = 13; i < 16; i++)
- xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i - 3),
- CRTCData[i]);
-
- xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char)(CRTCData[16]
- & 0xE0));
-
- xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00);
- xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B);
- xgifb_reg_set(pVBInfo->P3c4, 0x2C, 0xE1);
-
- outb(0x00, pVBInfo->P3c8);
-
- for (i = 0; i < 256 * 3; i++)
- outb(0x0F, (pVBInfo->P3c8 + 1)); /* DAC_TEST_PARMS */
-
- mdelay(1);
-
- XGI_WaitDisply(pVBInfo);
- temp = inb(pVBInfo->P3c2);
-
- if (temp & 0x10)
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, 0xDF, 0x20);
- else
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, 0xDF, 0x00);
-
- /* avoid display something, set BLACK DAC if not restore DAC */
- outb(0x00, pVBInfo->P3c8);
-
- for (i = 0; i < 256 * 3; i++)
- outb(0, (pVBInfo->P3c8 + 1));
-
- xgifb_reg_set(pVBInfo->P3c4, 0x01, SR01);
- xgifb_reg_set(pVBInfo->P3d4, 0x63, CR63);
- xgifb_reg_set(pVBInfo->P3c4, 0x31, SR31);
-
- xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
- pVBInfo->P3d4, 0x53) & 0xFD));
- xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)SR1F);
-}
-
-static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempah;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
- /* Power on */
- xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
-
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV |
- SetCRT2ToRAMDAC)) {
- tempah = xgifb_reg_get(pVBInfo->P3c4, 0x32);
- tempah &= 0xDF;
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (!(pVBInfo->VBInfo & SetCRT2ToRAMDAC))
- tempah |= 0x20;
- }
- xgifb_reg_set(pVBInfo->P3c4, 0x32, tempah);
- xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x20);
-
- tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x2E);
-
- if (!(tempah & 0x80))
- xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
- xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
- }
-
- if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
- xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
- 0x20); /* shampoo 0129 */
- if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo &
- (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
- /* LVDS PLL power on */
- xgifb_reg_and(pVBInfo->Part4Port, 0x2A,
- 0x7F);
- /* LVDS Driver power on */
- xgifb_reg_and(pVBInfo->Part4Port, 0x30, 0x7F);
- }
- }
-
- tempah = 0x00;
-
- if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
- tempah = 0xc0;
-
- if (!(pVBInfo->VBInfo & SetSimuScanMode) &&
- (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
- (pVBInfo->VBInfo & SetCRT2ToDualEdge)) {
- tempah = tempah & 0x40;
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
- tempah = tempah ^ 0xC0;
- }
- }
-
- /* EnablePart4_1F */
- xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
-
- XGI_DisableGatingCRT(pVBInfo);
- XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
- } /* 301 */
- else { /* LVDS */
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD
- | XGI_SetCRT2ToLCDA))
- /* enable CRT2 */
- xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20);
-
- tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x2E);
- if (!(tempah & 0x80))
- xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
-
- xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
- XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
- } /* End of VB */
-}
-
-static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned short RefreshRateTableIndex, temp;
-
- XGI_SetSeqRegs(pVBInfo);
- outb(XGI330_StandTable.MISC, pVBInfo->P3c2);
- XGI_SetCRTCRegs(pVBInfo);
- XGI_SetATTRegs(ModeIdIndex, pVBInfo);
- XGI_SetGRCRegs(pVBInfo);
- XGI_ClearExt1Regs(pVBInfo);
-
- if (HwDeviceExtension->jChipType == XG27) {
- if (pVBInfo->IF_DEF_LVDS == 0)
- XGI_SetDefaultVCLK(pVBInfo);
- }
-
- temp = ~ProgrammingCRT2;
- pVBInfo->SetFlag &= temp;
- pVBInfo->SelectCRT2Rate = 0;
-
- if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) {
- if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA
- | SetInSlaveMode)) {
- pVBInfo->SetFlag |= ProgrammingCRT2;
- }
- }
-
- RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
- if (RefreshRateTableIndex != 0xFFFF) {
- XGI_SetSync(RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex,
- pVBInfo, HwDeviceExtension);
- XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
- XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
- }
-
- if (HwDeviceExtension->jChipType >= XG21) {
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
- if (temp & 0xA0) {
- if (HwDeviceExtension->jChipType == XG27)
- XGI_SetXG27CRTC(RefreshRateTableIndex, pVBInfo);
- else
- XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo);
-
- XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
- RefreshRateTableIndex);
-
- xgifb_set_lcd(HwDeviceExtension->jChipType,
- pVBInfo, RefreshRateTableIndex);
-
- if (pVBInfo->IF_DEF_LVDS == 1)
- xgifb_set_lvds(xgifb_info,
- HwDeviceExtension->jChipType,
- ModeIdIndex, pVBInfo);
- }
- }
-
- pVBInfo->SetFlag &= (~ProgrammingCRT2);
- XGI_SetCRT1FIFO(HwDeviceExtension, pVBInfo);
- XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
- XGI_LoadDAC(pVBInfo);
-}
-
-unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo)
-{
- unsigned short ModeIdIndex;
- struct vb_device_info VBINF;
- struct vb_device_info *pVBInfo = &VBINF;
-
- pVBInfo->IF_DEF_LVDS = 0;
-
- if (HwDeviceExtension->jChipType >= XG20)
- pVBInfo->VBType = 0; /* set VBType default 0 */
-
- XGIRegInit(pVBInfo, xgifb_info->vga_base);
-
- /* for x86 Linux, XG21 LVDS */
- if (HwDeviceExtension->jChipType == XG21) {
- if ((xgifb_reg_get(pVBInfo->P3d4, 0x38) & 0xE0) == 0xC0)
- pVBInfo->IF_DEF_LVDS = 1;
- }
- if (HwDeviceExtension->jChipType == XG27) {
- if ((xgifb_reg_get(pVBInfo->P3d4, 0x38) & 0xE0) == 0xC0) {
- if (xgifb_reg_get(pVBInfo->P3d4, 0x30) & 0x20)
- pVBInfo->IF_DEF_LVDS = 1;
- }
- }
-
- InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
- if (ModeNo & 0x80)
- ModeNo = ModeNo & 0x7F;
- xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
-
- if (HwDeviceExtension->jChipType < XG20)
- XGI_UnLockCRT2(pVBInfo);
-
- XGI_SearchModeID(ModeNo, &ModeIdIndex);
-
- if (HwDeviceExtension->jChipType < XG20) {
- XGI_GetVBInfo(ModeIdIndex, pVBInfo);
- XGI_GetTVInfo(ModeIdIndex, pVBInfo);
- XGI_GetLCDInfo(ModeIdIndex, pVBInfo);
- XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
-
- if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
- !(pVBInfo->VBInfo & SwitchCRT2)) {
- XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
-
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
- HwDeviceExtension, pVBInfo);
- }
- }
-
- if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchCRT2)) {
- switch (HwDeviceExtension->ujVBChipID) {
- case VB_CHIP_301: /* fall through */
- case VB_CHIP_302:
- /* add for CRT2 */
- XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
- pVBInfo);
- break;
-
- default:
- break;
- }
- }
-
- XGI_SetCRT2ModeRegs(pVBInfo);
- XGI_OEM310Setting(ModeIdIndex, pVBInfo); /* 0212 */
- XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
- } /* !XG20 */
- else {
- if (pVBInfo->IF_DEF_LVDS == 1)
- if (!XGI_XG21CheckLVDSMode(xgifb_info, ModeNo,
- ModeIdIndex))
- return 0;
-
- pVBInfo->ModeType =
- XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag
- & ModeTypeMask;
-
- pVBInfo->SetFlag = 0;
- pVBInfo->VBInfo = DisableCRT2Display;
-
- XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
-
- XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
- ModeIdIndex, pVBInfo);
-
- XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
- }
-
- XGI_UpdateModeInfo(pVBInfo);
-
- if (HwDeviceExtension->jChipType < XG20)
- XGI_LockCRT2(pVBInfo);
-
- return 1;
-}
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
deleted file mode 100644
index 5904ed1f2686..000000000000
--- a/drivers/staging/xgifb/vb_setmode.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VBSETMODE_
-#define _VBSETMODE_
-
-void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo);
-void XGI_UnLockCRT2(struct vb_device_info *pVBInfo);
-void XGI_LockCRT2(struct vb_device_info *pVBInfo);
-void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *pXGIHWDE,
- struct vb_device_info *pVBInfo);
-void XGI_GetVBType(struct vb_device_info *pVBInfo);
-void XGI_SenseCRT1(struct vb_device_info *pVBInfo);
-unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo);
-
-unsigned char XGI_SearchModeID(unsigned short ModeNo,
- unsigned short *ModeIdIndex);
-unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
- unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo);
-
-#endif
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
deleted file mode 100644
index e256f72f6d8a..000000000000
--- a/drivers/staging/xgifb/vb_struct.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VB_STRUCT_
-#define _VB_STRUCT_
-#include "../../video/fbdev/sis/vstruct.h"
-
-struct XGI_LVDSCRT1HDataStruct {
- unsigned char Reg[8];
-};
-
-struct XGI_LVDSCRT1VDataStruct {
- unsigned char Reg[7];
-};
-
-struct XGI_ExtStruct {
- unsigned char Ext_ModeID;
- unsigned short Ext_ModeFlag;
- unsigned short Ext_ModeInfo;
- unsigned char Ext_RESINFO;
- unsigned char VB_ExtTVYFilterIndex;
- unsigned char REFindex;
-};
-
-struct XGI_Ext2Struct {
- unsigned short Ext_InfoFlag;
- unsigned char Ext_CRT1CRTC;
- unsigned char Ext_CRTVCLK;
- unsigned char Ext_CRT2CRTC;
- unsigned char Ext_CRT2CRTC2;
- unsigned char ModeID;
- unsigned short XRes;
- unsigned short YRes;
-};
-
-struct XGI_ECLKDataStruct {
- unsigned char SR2E, SR2F, SR30;
- unsigned short CLOCK;
-};
-
-/*add for new UNIVGABIOS*/
-struct XGI_LCDDesStruct {
- unsigned short LCDHDES;
- unsigned short LCDHRS;
- unsigned short LCDVDES;
- unsigned short LCDVRS;
-};
-
-struct XGI330_LCDDataDesStruct2 {
- unsigned short LCDHDES;
- unsigned short LCDHRS;
- unsigned short LCDVDES;
- unsigned short LCDVRS;
- unsigned short LCDHSync;
- unsigned short LCDVSync;
-};
-
-struct XGI330_LCDDataTablStruct {
- unsigned char PANELID;
- unsigned short MASK;
- unsigned short CAP;
- void const *DATAPTR;
-};
-
-struct XGI330_TVDataTablStruct {
- unsigned short MASK;
- unsigned short CAP;
- struct SiS_TVData const *DATAPTR;
-};
-
-struct XGI_TimingHStruct {
- unsigned char data[8];
-};
-
-struct XGI_TimingVStruct {
- unsigned char data[7];
-};
-
-struct XGI_XG21CRT1Struct {
- unsigned char ModeID, CR02, CR03, CR15, CR16;
-};
-
-struct XGI330_LCDCapStruct {
- unsigned char LCD_ID;
- unsigned short LCD_Capability;
- unsigned char LCD_HSyncWidth;
- unsigned char LCD_VSyncWidth;
- unsigned char LCD_VCLK;
- unsigned char LCDA_VCLKData1;
- unsigned char LCDA_VCLKData2;
- unsigned char LCUCHAR_VCLKData1;
- unsigned char LCUCHAR_VCLKData2;
- unsigned char Spectrum_31;
- unsigned char Spectrum_32;
- unsigned char Spectrum_33;
- unsigned char Spectrum_34;
-};
-
-struct XGI21_LVDSCapStruct {
- unsigned short LVDS_Capability;
- unsigned short LVDSHT;
- unsigned short LVDSVT;
- unsigned short LVDSHDE;
- unsigned short LVDSVDE;
- unsigned short LVDSHFP;
- unsigned short LVDSVFP;
- unsigned short LVDSHSYNC;
- unsigned short LVDSVSYNC;
- unsigned char VCLKData1;
- unsigned char VCLKData2;
- unsigned char PSC_S1; /* Duration between CPL on and signal on */
- unsigned char PSC_S2; /* Duration signal on and Vdd on */
- unsigned char PSC_S3; /* Duration between CPL off and signal off */
- unsigned char PSC_S4; /* Duration signal off and Vdd off */
- unsigned char PSC_S5;
-};
-
-struct XGI_CRT1TableStruct {
- unsigned char CR[16];
-};
-
-struct XGI301C_Tap4TimingStruct {
- unsigned short DE;
- unsigned char Reg[64]; /* C0-FF */
-};
-
-struct vb_device_info {
- unsigned long P3c4, P3d4, P3c0, P3ce, P3c2, P3cc;
- unsigned long P3ca, P3c6, P3c7, P3c8, P3c9, P3da;
- unsigned long Part0Port, Part1Port, Part2Port;
- unsigned long Part3Port, Part4Port, Part5Port;
- unsigned short RVBHCFACT, RVBHCMAX, RVBHRS;
- unsigned short VGAVT, VGAHT, VGAVDE, VGAHDE;
- unsigned short VT, HT, VDE, HDE;
- unsigned short LCDHRS, LCDVRS, LCDHDES, LCDVDES;
-
- unsigned short ModeType;
- unsigned short IF_DEF_LVDS;
- unsigned short IF_DEF_CRT2Monitor;
- unsigned short IF_DEF_YPbPr;
- unsigned short IF_DEF_HiVision;
- unsigned short LCDResInfo, LCDTypeInfo, VBType;/*301b*/
- unsigned short VBInfo, TVInfo, LCDInfo;
- unsigned short SetFlag;
- unsigned short NewFlickerMode;
- unsigned short SelectCRT2Rate;
-
- void __iomem *FBAddr;
-
- unsigned char const *SR18;
- unsigned char const (*CR40)[3];
-
- struct SiS_MCLKData const *MCLKData;
-
- unsigned char XGINew_CR97;
-
- struct XGI330_LCDCapStruct const *LCDCapList;
-
- struct XGI_TimingHStruct TimingH;
- struct XGI_TimingVStruct TimingV;
-
- int ram_type;
- int ram_channel;
- int ram_bus;
-}; /* _struct vb_device_info */
-
-#endif /* _VB_STRUCT_ */
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
deleted file mode 100644
index 42ecf7fe6766..000000000000
--- a/drivers/staging/xgifb/vb_table.h
+++ /dev/null
@@ -1,2513 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VB_TABLE_
-#define _VB_TABLE_
-static const struct SiS_MCLKData XGI340New_MCLKData[] = {
- {0x16, 0x01, 0x01, 166},
- {0x19, 0x02, 0x01, 124},
- {0x7C, 0x08, 0x01, 200},
-};
-
-static const struct SiS_MCLKData XGI27New_MCLKData[] = {
- {0x5c, 0x23, 0x01, 166},
- {0x19, 0x02, 0x01, 124},
- {0x7C, 0x08, 0x80, 200},
-};
-
-const struct XGI_ECLKDataStruct XGI340_ECLKData[] = {
- {0x5c, 0x23, 0x01, 166},
- {0x55, 0x84, 0x01, 123},
- {0x7C, 0x08, 0x01, 200},
-};
-
-static const unsigned char XG27_SR18[3] = {
- 0x32, 0x32, 0x42 /* SR18 */
-};
-
-static const unsigned char XGI340_SR18[3] = {
- 0x31, 0x42, 0x42 /* SR18 */
-};
-
-static const unsigned char XGI340_cr41[24][3] = {
- {0x20, 0x50, 0x60}, /* 0 CR41 */
- {0xc4, 0x40, 0x84}, /* 1 CR8A */
- {0xc4, 0x40, 0x84}, /* 2 CR8B */
- {0xb5, 0xa4, 0xa4},
- {0xf0, 0xf0, 0xf0},
- {0x90, 0x90, 0x24}, /* 5 CR68 */
- {0x77, 0x77, 0x44}, /* 6 CR69 */
- {0x77, 0x77, 0x44}, /* 7 CR6A */
- {0xff, 0xff, 0xff}, /* 8 CR6D */
- {0x55, 0x55, 0x55}, /* 9 CR80 */
- {0x00, 0x00, 0x00}, /* 10 CR81 */
- {0x88, 0xa8, 0x48}, /* 11 CR82 */
- {0x44, 0x44, 0x77}, /* 12 CR85 */
- {0x48, 0x48, 0x88}, /* 13 CR86 */
- {0x54, 0x54, 0x44}, /* 14 CR90 */
- {0x54, 0x54, 0x44}, /* 15 CR91 */
- {0x0a, 0x0a, 0x07}, /* 16 CR92 */
- {0x44, 0x44, 0x44}, /* 17 CR93 */
- {0x10, 0x10, 0x0A}, /* 18 CR94 */
- {0x11, 0x11, 0x0a}, /* 19 CR95 */
- {0x05, 0x05, 0x05}, /* 20 CR96 */
- {0xf0, 0xf0, 0xf0}, /* 21 CRC3 */
- {0x05, 0x00, 0x02}, /* 22 CRC4 */
- {0x00, 0x00, 0x00} /* 23 CRC5 */
-};
-
-static const unsigned char XGI27_cr41[24][3] = {
- {0x20, 0x40, 0x60}, /* 0 CR41 */
- {0xC4, 0x40, 0x84}, /* 1 CR8A */
- {0xC4, 0x40, 0x84}, /* 2 CR8B */
- {0xB3, 0x13, 0xa4}, /* 3 CR40[7],
- * CR99[2:0],
- * CR45[3:0]
- */
- {0xf0, 0xf5, 0xf0}, /* 4 CR59 */
- {0x90, 0x90, 0x24}, /* 5 CR68 */
- {0x77, 0x67, 0x44}, /* 6 CR69 */
- {0x77, 0x77, 0x44}, /* 7 CR6A */
- {0xff, 0xff, 0xff}, /* 8 CR6D */
- {0x55, 0x55, 0x55}, /* 9 CR80 */
- {0x00, 0x00, 0x00}, /* 10 CR81 */
- {0x88, 0xcc, 0x48}, /* 11 CR82 */
- {0x44, 0x88, 0x77}, /* 12 CR85 */
- {0x48, 0x88, 0x88}, /* 13 CR86 */
- {0x54, 0x32, 0x44}, /* 14 CR90 */
- {0x54, 0x33, 0x44}, /* 15 CR91 */
- {0x0a, 0x07, 0x07}, /* 16 CR92 */
- {0x44, 0x63, 0x44}, /* 17 CR93 */
- {0x10, 0x14, 0x0A}, /* 18 CR94 */
- {0x11, 0x0B, 0x0C}, /* 19 CR95 */
- {0x05, 0x22, 0x05}, /* 20 CR96 */
- {0xf0, 0xf0, 0x00}, /* 21 CRC3 */
- {0x05, 0x00, 0x02}, /* 22 CRC4 */
- {0x00, 0x00, 0x00} /* 23 CRC5 */
-};
-
-/* CR47,CR48,CR49,CR4A,CR4B,CR4C,CR70,CR71,CR74,CR75,CR76,CR77 */
-const unsigned char XGI340_AGPReg[12] = {
- 0x28, 0x23, 0x00, 0x20, 0x00, 0x20,
- 0x00, 0x05, 0xd0, 0x10, 0x10, 0x00
-};
-
-const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
- {0x2e, 0x0a1b, 0x0306, 0x06, 0x05, 0x06},
- {0x2f, 0x0a1b, 0x0305, 0x05, 0x05, 0x05},
- {0x30, 0x2a1b, 0x0407, 0x07, 0x07, 0x0e},
- {0x31, 0x0a1b, 0x030d, 0x0d, 0x06, 0x3d},
- {0x32, 0x0a1b, 0x0a0e, 0x0e, 0x06, 0x3e},
- {0x33, 0x0a1d, 0x0a0d, 0x0d, 0x06, 0x3d},
- {0x34, 0x2a1d, 0x0a0e, 0x0e, 0x06, 0x3e},
- {0x35, 0x0a1f, 0x0a0d, 0x0d, 0x06, 0x3d},
- {0x36, 0x2a1f, 0x0a0e, 0x0e, 0x06, 0x3e},
- {0x38, 0x0a1b, 0x0508, 0x08, 0x00, 0x16},
- {0x3a, 0x0e3b, 0x0609, 0x09, 0x00, 0x1e},
- {0x3c, 0x0e3b, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- * add CRT2MODE [2003/10/07]
- */
- {0x3d, 0x0e7d, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- * add CRT2MODE
- */
- {0x40, 0x9a1c, 0x0000, 0x00, 0x04, 0x00},
- {0x41, 0x9a1d, 0x0000, 0x00, 0x04, 0x00},
- {0x43, 0x0a1c, 0x0306, 0x06, 0x05, 0x06},
- {0x44, 0x0a1d, 0x0306, 0x06, 0x05, 0x06},
- {0x46, 0x2a1c, 0x0407, 0x07, 0x07, 0x0e},
- {0x47, 0x2a1d, 0x0407, 0x07, 0x07, 0x0e},
- {0x49, 0x0a3c, 0x0508, 0x08, 0x00, 0x16},
- {0x4a, 0x0a3d, 0x0508, 0x08, 0x00, 0x16},
- {0x4c, 0x0e7c, 0x0609, 0x09, 0x00, 0x1e},
- {0x4d, 0x0e7d, 0x0609, 0x09, 0x00, 0x1e},
- {0x50, 0x9a1b, 0x0001, 0x01, 0x04, 0x02},
- {0x51, 0xba1b, 0x0103, 0x03, 0x07, 0x03},
- {0x52, 0x9a1b, 0x0204, 0x04, 0x00, 0x04},
- {0x56, 0x9a1d, 0x0001, 0x01, 0x04, 0x02},
- {0x57, 0xba1d, 0x0103, 0x03, 0x07, 0x03},
- {0x58, 0x9a1d, 0x0204, 0x04, 0x00, 0x04},
- {0x59, 0x9a1b, 0x0000, 0x00, 0x04, 0x00},
- {0x5A, 0x021b, 0x0014, 0x01, 0x04, 0x3f},
- {0x5B, 0x0a1d, 0x0014, 0x01, 0x04, 0x3f},
- {0x5d, 0x0a1d, 0x0305, 0x05, 0x07, 0x05},
- {0x62, 0x0a3f, 0x0306, 0x06, 0x05, 0x06},
- {0x63, 0x2a3f, 0x0407, 0x07, 0x07, 0x0e},
- {0x64, 0x0a7f, 0x0508, 0x08, 0x00, 0x16},
- {0x65, 0x0eff, 0x0609, 0x09, 0x00, 0x1e},
- {0x66, 0x0eff, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- * add CRT2MODE
- */
- {0x68, 0x067b, 0x080b, 0x0b, 0x00, 0x29},
- {0x69, 0x06fd, 0x080b, 0x0b, 0x00, 0x29},
- {0x6b, 0x07ff, 0x080b, 0x0b, 0x00, 0x29},
- {0x6c, 0x067b, 0x090c, 0x0c, 0x00, 0x2f},
- {0x6d, 0x06fd, 0x090c, 0x0c, 0x00, 0x2f},
- {0x6e, 0x07ff, 0x090c, 0x0c, 0x00, 0x2f},
- {0x70, 0x2a1b, 0x0410, 0x10, 0x07, 0x34},
- {0x71, 0x0a1b, 0x0511, 0x11, 0x00, 0x37},
- {0x74, 0x0a1d, 0x0511, 0x11, 0x00, 0x37},
- {0x75, 0x0a3d, 0x0612, 0x12, 0x00, 0x3a},
- {0x76, 0x2a1f, 0x0410, 0x10, 0x07, 0x34},
- {0x77, 0x0a1f, 0x0511, 0x11, 0x00, 0x37},
- {0x78, 0x0a3f, 0x0612, 0x12, 0x00, 0x3a},
- {0x79, 0x0a3b, 0x0612, 0x12, 0x00, 0x3a},
- {0x7a, 0x2a1d, 0x0410, 0x10, 0x07, 0x34},
- {0x7b, 0x0e3b, 0x060f, 0x0f, 0x00, 0x1d},
- {0x7c, 0x0e7d, 0x060f, 0x0f, 0x00, 0x1d},
- {0x7d, 0x0eff, 0x060f, 0x0f, 0x00, 0x1d},
- {0x20, 0x0e3b, 0x0D16, 0x16, 0x00, 0x43},
- {0x21, 0x0e7d, 0x0D16, 0x16, 0x00, 0x43},
- {0x22, 0x0eff, 0x0D16, 0x16, 0x00, 0x43},
- {0x23, 0x0e3b, 0x0614, 0x14, 0x00, 0x41},
- {0x24, 0x0e7d, 0x0614, 0x14, 0x00, 0x41},
- {0x25, 0x0eff, 0x0614, 0x14, 0x00, 0x41},
- {0x26, 0x063b, 0x0c15, 0x15, 0x00, 0x42},
- {0x27, 0x067d, 0x0c15, 0x15, 0x00, 0x42},
- {0x28, 0x06ff, 0x0c15, 0x15, 0x00, 0x42},
- {0xff, 0x0000, 0x0000, 0x00, 0x00, 0x00}
-};
-
-static const struct SiS_StandTable_S XGI330_StandTable = {
-/* ExtVGATable */
- 0x00, 0x00, 0x00, 0x0000,
- {0x21, 0x0f, 0x00, 0x0e}, /* 0x21 = 0x01 | (0x20 = screen off) */
- 0x23,
- {0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xea, 0x8c, 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3,
- 0xff},
- {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x01, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f,
- 0xff}
-};
-
-static const struct XGI_XG21CRT1Struct XGI_UpdateCRT1Table[] = {
- {0x01, 0x27, 0x91, 0x8f, 0xc0}, /* 00 */
- {0x03, 0x4f, 0x83, 0x8f, 0xc0}, /* 01 */
- {0x05, 0x27, 0x91, 0x8f, 0xc0}, /* 02 */
- {0x06, 0x4f, 0x83, 0x8f, 0xc0}, /* 03 */
- {0x07, 0x4f, 0x83, 0x8f, 0xc0}, /* 04 */
- {0x0d, 0x27, 0x91, 0x8f, 0xc0}, /* 05 */
- {0x0e, 0x4f, 0x83, 0x8f, 0xc0}, /* 06 */
- {0x0f, 0x4f, 0x83, 0x5d, 0xc0}, /* 07 */
- {0x10, 0x4f, 0x83, 0x5d, 0xc0}, /* 08 */
- {0x11, 0x4f, 0x83, 0xdf, 0x0c}, /* 09 */
- {0x12, 0x4f, 0x83, 0xdf, 0x0c}, /* 10 */
- {0x13, 0x4f, 0x83, 0x8f, 0xc0}, /* 11 */
- {0x2e, 0x4f, 0x83, 0xdf, 0x0c}, /* 12 */
- {0x2e, 0x4f, 0x87, 0xdf, 0xc0}, /* 13 */
- {0x2f, 0x4f, 0x83, 0x8f, 0xc0}, /* 14 */
- {0x50, 0x27, 0x91, 0xdf, 0x0c}, /* 15 */
- {0x59, 0x27, 0x91, 0x8f, 0xc0} /* 16 */
-};
-
-const struct XGI_CRT1TableStruct XGI_CRT1Table[] = {
- { {0x2d, 0x28, 0x90, 0x2c, 0x90, 0x00, 0x04, 0x00,
- 0xbf, 0x1f, 0x9c, 0x8e, 0x96, 0xb9, 0x30} }, /* 0x0 */
- { {0x2d, 0x28, 0x90, 0x2c, 0x90, 0x00, 0x04, 0x00,
- 0x0b, 0x3e, 0xe9, 0x8b, 0xe7, 0x04, 0x00} }, /* 0x1 */
- { {0x3D, 0x31, 0x81, 0x37, 0x1F, 0x00, 0x05, 0x00,
- 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} }, /* 0x2 */
- { {0x4F, 0x3F, 0x93, 0x45, 0x0D, 0x00, 0x01, 0x00,
- 0x24, 0xF5, 0x02, 0x88, 0xFF, 0x25, 0x90} }, /* 0x3 */
- { {0x5F, 0x50, 0x82, 0x55, 0x81, 0x00, 0x05, 0x00,
- 0xBF, 0x1F, 0x9C, 0x8E, 0x96, 0xB9, 0x30} }, /* 0x4 */
- { {0x5F, 0x50, 0x82, 0x55, 0x81, 0x00, 0x05, 0x00,
- 0x0B, 0x3E, 0xE9, 0x8B, 0xE7, 0x04, 0x00} }, /* 0x5 */
- { {0x63, 0x50, 0x86, 0x56, 0x9B, 0x00, 0x01, 0x00,
- 0x06, 0x3E, 0xE8, 0x8B, 0xE7, 0xFF, 0x10} }, /* 0x6 */
- { {0x64, 0x4F, 0x88, 0x55, 0x9D, 0x00, 0x01, 0x00,
- 0xF2, 0x1F, 0xE0, 0x83, 0xDF, 0xF3, 0x10} }, /* 0x7 */
- { {0x63, 0x4F, 0x87, 0x5A, 0x81, 0x00, 0x05, 0x00,
- 0xFB, 0x1F, 0xE0, 0x83, 0xDF, 0xFC, 0x10} }, /* 0x8 */
- { {0x65, 0x4F, 0x89, 0x58, 0x80, 0x00, 0x05, 0x60,
- 0xFB, 0x1F, 0xE0, 0x83, 0xDF, 0xFC, 0x80} }, /* 0x9 */
- { {0x65, 0x4F, 0x89, 0x58, 0x80, 0x00, 0x05, 0x60,
- 0x01, 0x3E, 0xE0, 0x83, 0xDF, 0x02, 0x80} }, /* 0xa */
- { {0x67, 0x4F, 0x8B, 0x58, 0x81, 0x00, 0x05, 0x60,
- 0x0D, 0x3E, 0xE0, 0x83, 0xDF, 0x0E, 0x90} }, /* 0xb */
- { {0x65, 0x4F, 0x89, 0x57, 0x9F, 0x00, 0x01, 0x00,
- 0xFB, 0x1F, 0xE6, 0x8A, 0xDF, 0xFC, 0x10} }, /* 0xc */
- /* 0D (800x600,56Hz) */
- { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00,
- /* (VCLK 36.0MHz) */
- 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} },
- /* 0E (800x600,60Hz) */
- { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00,
- /* (VCLK 40.0MHz) */
- 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} },
- /* 0F (800x600,72Hz) */
- { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00,
- /* (VCLK 50.0MHz) */
- 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} },
- /* 10 (800x600,75Hz) */
- { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00,
- /* (VCLK 49.5MHz) */
- 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} },
- /* 11 (800x600,85Hz) */
- { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00,
- /* (VCLK 56.25MHz) */
- 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} },
- /* 12 (800x600,100Hz) */
- { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60,
- /* (VCLK 75.8MHz) */
- 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} },
- /* 13 (800x600,120Hz) */
- { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60,
- /* (VCLK 79.411MHz) */
- 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} },
- /* 14 (800x600,160Hz) */
- { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60,
- /* (VCLK 105.822MHz) */
- 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} },
- { {0x99, 0x7F, 0x9D, 0x84, 0x1A, 0x00, 0x02, 0x00,
- 0x96, 0x1F, 0x7F, 0x83, 0x7F, 0x97, 0x10} }, /* 0x15 */
- { {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00,
- 0x24, 0xF5, 0x02, 0x88, 0xFF, 0x25, 0x90} }, /* 0x16 */
- { {0xA1, 0x7F, 0x85, 0x86, 0x97, 0x00, 0x02, 0x00,
- 0x24, 0xF5, 0x02, 0x88, 0xFF, 0x25, 0x90} }, /* 0x17 */
- { {0x9F, 0x7F, 0x83, 0x85, 0x91, 0x00, 0x02, 0x00,
- 0x1E, 0xF5, 0x00, 0x83, 0xFF, 0x1F, 0x90} }, /* 0x18 */
- { {0xA7, 0x7F, 0x8B, 0x89, 0x95, 0x00, 0x02, 0x00,
- 0x26, 0xF5, 0x00, 0x83, 0xFF, 0x27, 0x90} }, /* 0x19 */
- { {0xA9, 0x7F, 0x8D, 0x8C, 0x9A, 0x00, 0x02, 0x62,
- 0x2C, 0xF5, 0x00, 0x83, 0xFF, 0x2D, 0x14} }, /* 0x1a */
- { {0xAB, 0x7F, 0x8F, 0x8D, 0x9B, 0x00, 0x02, 0x62,
- 0x35, 0xF5, 0x00, 0x83, 0xFF, 0x36, 0x14} }, /* 0x1b */
- { {0xCF, 0x9F, 0x93, 0xB2, 0x01, 0x00, 0x03, 0x00,
- 0x14, 0xBA, 0x00, 0x83, 0xFF, 0x15, 0x00} }, /* 0x1c */
- { {0xCE, 0x9F, 0x92, 0xA9, 0x17, 0x00, 0x07, 0x00,
- 0x28, 0x5A, 0x00, 0x83, 0xFF, 0x29, 0x89} }, /* 0x1d */
- { {0xCE, 0x9F, 0x92, 0xA5, 0x17, 0x00, 0x07, 0x00,
- 0x28, 0x5A, 0x00, 0x83, 0xFF, 0x29, 0x89} }, /* 0x1e */
- { {0xD3, 0x9F, 0x97, 0xAB, 0x1F, 0x00, 0x07, 0x00,
- 0x2E, 0x5A, 0x00, 0x83, 0xFF, 0x2F, 0x89} }, /* 0x1f */
- { {0x09, 0xC7, 0x8D, 0xD3, 0x0B, 0x01, 0x04, 0x00,
- 0xE0, 0x10, 0xB0, 0x83, 0xAF, 0xE1, 0x2F} }, /* 0x20 */
- { {0x09, 0xC7, 0x8D, 0xD3, 0x0B, 0x01, 0x04, 0x00,
- 0xE0, 0x10, 0xB0, 0x83, 0xAF, 0xE1, 0x2F} }, /* 0x21 */
- { {0x09, 0xC7, 0x8D, 0xD3, 0x0B, 0x01, 0x04, 0x00,
- 0xE0, 0x10, 0xB0, 0x83, 0xAF, 0xE1, 0x2F} }, /* 0x22 */
- { {0x09, 0xC7, 0x8D, 0xD3, 0x0B, 0x01, 0x04, 0x00,
- 0xE0, 0x10, 0xB0, 0x83, 0xAF, 0xE1, 0x2F} }, /* 0x23 */
- { {0x09, 0xC7, 0x8D, 0xD3, 0x0B, 0x01, 0x04, 0x00,
- 0xE0, 0x10, 0xB0, 0x83, 0xAF, 0xE1, 0x2F} }, /* 0x24 */
- { {0x09, 0xC7, 0x8D, 0xD3, 0x0B, 0x01, 0x04, 0x00,
- 0xE0, 0x10, 0xB0, 0x83, 0xAF, 0xE1, 0x2F} }, /* 0x25 */
- { {0x09, 0xC7, 0x8D, 0xD3, 0x0B, 0x01, 0x04, 0x00,
- 0xE0, 0x10, 0xB0, 0x83, 0xAF, 0xE1, 0x2F} }, /* 0x26 */
- { {0x40, 0xEF, 0x84, 0x03, 0x1D, 0x41, 0x01, 0x00,
- 0xDA, 0x1F, 0xA0, 0x83, 0x9F, 0xDB, 0x1F} }, /* 0x27 */
- { {0x43, 0xEF, 0x87, 0x06, 0x00, 0x41, 0x05, 0x62,
- 0xD4, 0x1F, 0xA0, 0x83, 0x9F, 0xD5, 0x9F} }, /* 0x28 */
- { {0x45, 0xEF, 0x89, 0x07, 0x01, 0x41, 0x05, 0x62,
- 0xD9, 0x1F, 0xA0, 0x83, 0x9F, 0xDA, 0x9F} }, /* 0x29 */
- { {0x40, 0xEF, 0x84, 0x03, 0x1D, 0x41, 0x01, 0x00,
- 0xDA, 0x1F, 0xA0, 0x83, 0x9F, 0xDB, 0x1F} }, /* 0x2a */
- { {0x40, 0xEF, 0x84, 0x03, 0x1D, 0x41, 0x01, 0x00,
- 0xDA, 0x1F, 0xA0, 0x83, 0x9F, 0xDB, 0x1F} }, /* 0x2b */
- { {0x40, 0xEF, 0x84, 0x03, 0x1D, 0x41, 0x01, 0x00,
- 0xDA, 0x1F, 0xA0, 0x83, 0x9F, 0xDB, 0x1F} }, /* 0x2c */
- { {0x59, 0xFF, 0x9D, 0x17, 0x13, 0x41, 0x05, 0x44,
- 0x33, 0xBA, 0x00, 0x83, 0xFF, 0x34, 0x0F} }, /* 0x2d */
- { {0x5B, 0xFF, 0x9F, 0x18, 0x14, 0x41, 0x05, 0x44,
- 0x38, 0xBA, 0x00, 0x83, 0xFF, 0x39, 0x0F} }, /* 0x2e */
- { {0x5B, 0xFF, 0x9F, 0x18, 0x14, 0x41, 0x05, 0x44,
- 0x3D, 0xBA, 0x00, 0x83, 0xFF, 0x3E, 0x0F} }, /* 0x2f */
- { {0x5D, 0xFF, 0x81, 0x19, 0x95, 0x41, 0x05, 0x44,
- 0x41, 0xBA, 0x00, 0x84, 0xFF, 0x42, 0x0F} }, /* 0x30 */
- { {0x55, 0xFF, 0x99, 0x0D, 0x0C, 0x41, 0x05, 0x00,
- 0x3E, 0xBA, 0x00, 0x84, 0xFF, 0x3F, 0x0F} }, /* 0x31 */
- { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00,
- 0x72, 0xBA, 0x27, 0x8B, 0xDF, 0x73, 0x80} }, /* 0x32 */
- { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00,
- 0x6F, 0xBA, 0x26, 0x89, 0xDF, 0x6F, 0x80} }, /* 0x33 */
- { {0x7F, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00,
- 0x75, 0xBA, 0x29, 0x8C, 0xDF, 0x75, 0x80} }, /* 0x34 */
- { {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00,
- 0x24, 0xF1, 0xAF, 0x85, 0x3F, 0x25, 0xB0} }, /* 0x35 */
- { {0x9F, 0x7F, 0x83, 0x85, 0x91, 0x00, 0x02, 0x00,
- 0x1E, 0xF1, 0xAD, 0x81, 0x3F, 0x1F, 0xB0} }, /* 0x36 */
- { {0xA7, 0x7F, 0x88, 0x89, 0x15, 0x00, 0x02, 0x00,
- 0x26, 0xF1, 0xB1, 0x85, 0x3F, 0x27, 0xB0} }, /* 0x37 */
- { {0xCE, 0x9F, 0x92, 0xA9, 0x17, 0x00, 0x07, 0x00,
- 0x28, 0xC4, 0x7A, 0x8E, 0xCF, 0x29, 0xA1} }, /* 0x38 */
- { {0xCE, 0x9F, 0x92, 0xA5, 0x17, 0x00, 0x07, 0x00,
- 0x28, 0xD4, 0x7A, 0x8E, 0xCF, 0x29, 0xA1} }, /* 0x39 */
- { {0xD3, 0x9F, 0x97, 0xAB, 0x1F, 0x00, 0x07, 0x00,
- 0x2E, 0xD4, 0x7D, 0x81, 0xCF, 0x2F, 0xA1} }, /* 0x3a */
- { {0xDC, 0x9F, 0x00, 0xAB, 0x19, 0x00, 0x07, 0x00,
- 0xE6, 0xEF, 0xC0, 0xC3, 0xBF, 0xE7, 0x90} }, /* 0x3b */
- { {0x6B, 0x59, 0x8F, 0x5E, 0x8C, 0x00, 0x05, 0x00,
- 0x0B, 0x3E, 0xE9, 0x8B, 0xE7, 0x04, 0x00} }, /* 0x3c */
- { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00,
- 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} }, /* 0x3d */
- { {0x86, 0x6A, 0x8a, 0x74, 0x06, 0x00, 0x02, 0x00,
- 0x8c, 0x15, 0x4f, 0x83, 0xef, 0x8d, 0x30} }, /* 0x3e */
- { {0x81, 0x6A, 0x85, 0x70, 0x00, 0x00, 0x02, 0x00,
- 0x0f, 0x3e, 0xeb, 0x8e, 0xdf, 0x10, 0x00} }, /* 0x3f */
- { {0xCE, 0x9F, 0x92, 0xA9, 0x17, 0x00, 0x07, 0x00,
- 0x20, 0xF5, 0x03, 0x88, 0xFF, 0x21, 0x90} }, /* 0x40 */
- { {0xE6, 0xAE, 0x8A, 0xBD, 0x90, 0x00, 0x03, 0x00,
- 0x3D, 0x10, 0x1A, 0x8D, 0x19, 0x3E, 0x2F} }, /* 0x41 */
- { {0xB9, 0x8F, 0x9D, 0x9B, 0x8A, 0x00, 0x06, 0x00,
- 0x7D, 0xFF, 0x60, 0x83, 0x5F, 0x7E, 0x90} }, /* 0x42 */
- { {0xC3, 0x8F, 0x87, 0x9B, 0x0B, 0x00, 0x07, 0x00,
- 0x82, 0xFF, 0x60, 0x83, 0x5F, 0x83, 0x90} }, /* 0x43 */
- { {0xAD, 0x7F, 0x91, 0x8E, 0x9C, 0x00, 0x02, 0x82,
- 0x49, 0xF5, 0x00, 0x83, 0xFF, 0x4A, 0x90} }, /* 0x44 */
- { {0xCD, 0x9F, 0x91, 0xA7, 0x19, 0x00, 0x07, 0x60,
- 0xE6, 0xFF, 0xC0, 0x83, 0xBF, 0xE7, 0x90} }, /* 0x45 */
- { {0xD3, 0x9F, 0x97, 0xAB, 0x1F, 0x00, 0x07, 0x60,
- 0xF1, 0xFF, 0xC0, 0x83, 0xBF, 0xF2, 0x90} }, /* 0x46 */
- { {0xD7, 0x9F, 0x9B, 0xAC, 0x1E, 0x00, 0x07, 0x00,
- 0x03, 0xDE, 0xC0, 0x84, 0xBF, 0x04, 0x90} } /* 0x47 */
-};
-
-/*add for new UNIVGABIOS*/
-static const struct SiS_LCDData XGI_StLCD1024x768Data[] = {
- {62, 25, 800, 546, 1344, 806},
- {32, 15, 930, 546, 1344, 806},
- {62, 25, 800, 546, 1344, 806}, /*chiawenfordot9->dot8*/
- {104, 45, 945, 496, 1344, 806},
- {62, 25, 800, 546, 1344, 806},
- {31, 18, 1008, 624, 1344, 806},
- {1, 1, 1344, 806, 1344, 806}
-};
-
-static const struct SiS_LCDData XGI_ExtLCD1024x768Data[] = {
- {42, 25, 1536, 419, 1344, 806},
- {48, 25, 1536, 369, 1344, 806},
- {42, 25, 1536, 419, 1344, 806},
- {48, 25, 1536, 369, 1344, 806},
- {12, 5, 896, 500, 1344, 806},
- {42, 25, 1024, 625, 1344, 806},
- {1, 1, 1344, 806, 1344, 806},
- {12, 5, 896, 500, 1344, 806},
- {42, 25, 1024, 625, 1344, 806},
- {1, 1, 1344, 806, 1344, 806},
- {12, 5, 896, 500, 1344, 806},
- {42, 25, 1024, 625, 1344, 806},
- {1, 1, 1344, 806, 1344, 806}
-};
-
-static const struct SiS_LCDData XGI_CetLCD1024x768Data[] = {
- {1, 1, 1344, 806, 1344, 806}, /* ; 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {1, 1, 1344, 806, 1344, 806}, /* 01 (320x350,640x350) */
- {1, 1, 1344, 806, 1344, 806}, /* 02 (360x400,720x400) */
- {1, 1, 1344, 806, 1344, 806}, /* 03 (720x350) */
- {1, 1, 1344, 806, 1344, 806}, /* 04 (640x480x60Hz) */
- {1, 1, 1344, 806, 1344, 806}, /* 05 (800x600x60Hz) */
- {1, 1, 1344, 806, 1344, 806} /* 06 (1024x768x60Hz) */
-};
-
-static const struct SiS_LCDData XGI_StLCD1280x1024Data[] = {
- {22, 5, 800, 510, 1650, 1088},
- {22, 5, 800, 510, 1650, 1088},
- {176, 45, 900, 510, 1650, 1088},
- {176, 45, 900, 510, 1650, 1088},
- {22, 5, 800, 510, 1650, 1088},
- {13, 5, 1024, 675, 1560, 1152},
- {16, 9, 1266, 804, 1688, 1072},
- {1, 1, 1688, 1066, 1688, 1066}
-};
-
-static const struct SiS_LCDData XGI_ExtLCD1280x1024Data[] = {
- {211, 60, 1024, 501, 1688, 1066},
- {211, 60, 1024, 508, 1688, 1066},
- {211, 60, 1024, 501, 1688, 1066},
- {211, 60, 1024, 508, 1688, 1066},
- {211, 60, 1024, 500, 1688, 1066},
- {211, 75, 1024, 625, 1688, 1066},
- {211, 120, 1280, 798, 1688, 1066},
- {1, 1, 1688, 1066, 1688, 1066}
-};
-
-static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
- {1, 1, 1688, 1066, 1688, 1066}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {1, 1, 1688, 1066, 1688, 1066}, /* 01 (320x350,640x350) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 02 (360x400,720x400) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 03 (720x350) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 04 (640x480x60Hz) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 05 (800x600x60Hz) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 06 (1024x768x60Hz) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz) */
- {1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
-};
-
-static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
- {211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */
- {211, 100, 2100, 408, 1688, 1066}, /* 02 (360x400,720x400) */
- {211, 64, 1536, 358, 1688, 1066}, /* 03 (720x350) */
- {211, 48, 840, 488, 1688, 1066}, /* 04 (640x480x60Hz) */
- {211, 72, 1008, 609, 1688, 1066}, /* 05 (800x600x60Hz) */
- {211, 128, 1400, 776, 1688, 1066}, /* 06 (1024x768x60Hz) */
- {1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz
- * w/o Scaling)
- */
- {1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
-};
-
-static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
- {4, 1, 1620, 420, 2160, 1250}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {27, 7, 1920, 375, 2160, 1250}, /* 01 (320x350,640x350) */
- {4, 1, 1620, 420, 2160, 1250}, /* 02 (360x400,720x400)*/
- {27, 7, 1920, 375, 2160, 1250}, /* 03 (720x350) */
- {27, 4, 800, 500, 2160, 1250}, /* 04 (640x480x60Hz) */
- {4, 1, 1080, 625, 2160, 1250}, /* 05 (800x600x60Hz) */
- {5, 2, 1350, 800, 2160, 1250}, /* 06 (1024x768x60Hz) */
- {27, 16, 1500, 1064, 2160, 1250}, /* 07 (1280x1024x60Hz) */
- {9, 7, 1920, 1106, 2160, 1250}, /* 08 (1400x1050x60Hz) */
- {1, 1, 2160, 1250, 2160, 1250} /* 09 (1600x1200x60Hz) ;302lv */
-};
-
-static const struct SiS_LCDData XGI_StLCD1600x1200Data[] = {
- {27, 4, 800, 500, 2160, 1250}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {27, 4, 800, 500, 2160, 1250}, /* 01 (320x350,640x350) */
- {27, 4, 800, 500, 2160, 1250}, /* 02 (360x400,720x400) */
- {27, 4, 800, 500, 2160, 1250}, /* 03 (720x350) */
- {27, 4, 800, 500, 2160, 1250}, /* 04 (320x240,640x480) */
- {4, 1, 1080, 625, 2160, 1250}, /* 05 (400x300,800x600) */
- {5, 2, 1350, 800, 2160, 1250}, /* 06 (512x384,1024x768) */
- {135, 88, 1600, 1100, 2160, 1250}, /* 07 (1280x1024) */
- {1, 1, 1800, 1500, 2160, 1250}, /* 08 (1400x1050) */
- {1, 1, 2160, 1250, 2160, 1250} /* 09 (1600x1200) */
-};
-
-#define XGI_CetLCD1400x1050Data XGI_CetLCD1280x1024Data
-
-static const struct SiS_LCDData XGI_NoScalingData[] = {
- {1, 1, 800, 449, 800, 449},
- {1, 1, 800, 449, 800, 449},
- {1, 1, 900, 449, 900, 449},
- {1, 1, 900, 449, 900, 449},
- {1, 1, 800, 525, 800, 525},
- {1, 1, 1056, 628, 1056, 628},
- {1, 1, 1344, 806, 1344, 806},
- {1, 1, 1688, 1066, 1688, 1066}
-};
-
-static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
- {42, 25, 1536, 419, 1344, 806}, /* ; 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {48, 25, 1536, 369, 1344, 806}, /* ; 01 (320x350,640x350) */
- {42, 25, 1536, 419, 1344, 806}, /* ; 02 (360x400,720x400) */
- {48, 25, 1536, 369, 1344, 806}, /* ; 03 (720x350) */
- {8, 5, 1312, 500, 1312, 800}, /* ; 04 (640x480x75Hz) */
- {41, 25, 1024, 625, 1312, 800}, /* ; 05 (800x600x75Hz) */
- {1, 1, 1312, 800, 1312, 800} /* ; 06 (1024x768x75Hz) */
-};
-
-static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
- {1, 1, 1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {1, 1, 1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */
- {1, 1, 1312, 800, 1312, 800}, /* ; 02 (360x400,720x400) */
- {1, 1, 1312, 800, 1312, 800}, /* ; 03 (720x350) */
- {1, 1, 1312, 800, 1312, 800}, /* ; 04 (640x480x75Hz) */
- {1, 1, 1312, 800, 1312, 800}, /* ; 05 (800x600x75Hz) */
- {1, 1, 1312, 800, 1312, 800} /* ; 06 (1024x768x75Hz) */
-};
-
-static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
- {211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */
- {211, 60, 1024, 501, 1688, 1066}, /* ; 02 (360x400,720x400) */
- {211, 60, 1024, 508, 1688, 1066}, /* ; 03 (720x350) */
- {211, 45, 768, 498, 1688, 1066}, /* ; 04 (640x480x75Hz) */
- {211, 75, 1024, 625, 1688, 1066}, /* ; 05 (800x600x75Hz) */
- {211, 120, 1280, 798, 1688, 1066}, /* ; 06 (1024x768x75Hz) */
- {1, 1, 1688, 1066, 1688, 1066} /* ; 07 (1280x1024x75Hz) */
-};
-
-#define XGI_CetLCD1280x1024x75Data XGI_CetLCD1280x1024Data
-
-static const struct SiS_LCDData XGI_NoScalingDatax75[] = {
- {1, 1, 800, 449, 800, 449}, /* ; 00 (320x200, 320x400,
- * 640x200, 640x400)
- */
- {1, 1, 800, 449, 800, 449}, /* ; 01 (320x350, 640x350) */
- {1, 1, 900, 449, 900, 449}, /* ; 02 (360x400, 720x400) */
- {1, 1, 900, 449, 900, 449}, /* ; 03 (720x350) */
- {1, 1, 840, 500, 840, 500}, /* ; 04 (640x480x75Hz) */
- {1, 1, 1056, 625, 1056, 625}, /* ; 05 (800x600x75Hz) */
- {1, 1, 1312, 800, 1312, 800}, /* ; 06 (1024x768x75Hz) */
- {1, 1, 1688, 1066, 1688, 1066}, /* ; 07 (1280x1024x75Hz) */
- {1, 1, 1688, 1066, 1688, 1066}, /* ; 08 (1400x1050x75Hz)*/
- {1, 1, 2160, 1250, 2160, 1250}, /* ; 09 (1600x1200x75Hz) */
- {1, 1, 1688, 806, 1688, 806} /* ; 0A (1280x768x75Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_ExtLCDDes1024x768Data[] = {
- {9, 1057, 0, 771}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1057, 0, 771}, /* ; 01 (320x350,640x350) */
- {9, 1057, 0, 771}, /* ; 02 (360x400,720x400) */
- {9, 1057, 0, 771}, /* ; 03 (720x350) */
- {9, 1057, 0, 771}, /* ; 04 (640x480x60Hz) */
- {9, 1057, 0, 771}, /* ; 05 (800x600x60Hz) */
- {9, 1057, 805, 770} /* ; 06 (1024x768x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_StLCDDes1024x768Data[] = {
- {9, 1057, 737, 703}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1057, 686, 651}, /* ; 01 (320x350,640x350) */
- {9, 1057, 737, 703}, /* ; 02 (360x400,720x400) */
- {9, 1057, 686, 651}, /* ; 03 (720x350) */
- {9, 1057, 776, 741}, /* ; 04 (640x480x60Hz) */
- {9, 1057, 0, 771}, /* ; 05 (800x600x60Hz) */
- {9, 1057, 805, 770} /* ; 06 (1024x768x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_CetLCDDes1024x768Data[] = {
- {1152, 856, 622, 587}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1152, 856, 597, 562}, /* ; 01 (320x350,640x350) */
- {1152, 856, 622, 587}, /* ; 02 (360x400,720x400) */
- {1152, 856, 597, 562}, /* ; 03 (720x350) */
- {1152, 856, 662, 627}, /* ; 04 (640x480x60Hz) */
- {1232, 936, 722, 687}, /* ; 05 (800x600x60Hz) */
- {0, 1048, 805, 770} /* ; 06 (1024x768x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_ExtLCDDLDes1280x1024Data[] = {
- {18, 1346, 981, 940}, /* 00 (320x200,320x400,640x200,640x400) */
- {18, 1346, 926, 865}, /* 01 (320x350,640x350) */
- {18, 1346, 981, 940}, /* 02 (360x400,720x400) */
- {18, 1346, 926, 865}, /* 03 (720x350) */
- {18, 1346, 0, 1025}, /* 04 (640x480x60Hz) */
- {18, 1346, 0, 1025}, /* 05 (800x600x60Hz) */
- {18, 1346, 1065, 1024}, /* 06 (1024x768x60Hz) */
- {18, 1346, 1065, 1024} /* 07 (1280x1024x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_StLCDDLDes1280x1024Data[] = {
- {18, 1346, 970, 907}, /* 00 (320x200,320x400,640x200,640x400) */
- {18, 1346, 917, 854}, /* 01 (320x350,640x350) */
- {18, 1346, 970, 907}, /* 02 (360x400,720x400) */
- {18, 1346, 917, 854}, /* 03 (720x350) */
- {18, 1346, 0, 1025}, /* 04 (640x480x60Hz) */
- {18, 1346, 0, 1025}, /* 05 (800x600x60Hz) */
- {18, 1346, 1065, 1024}, /* 06 (1024x768x60Hz) */
- {18, 1346, 1065, 1024} /* 07 (1280x1024x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_CetLCDDLDes1280x1024Data[] = {
- {1368, 1008, 752, 711}, /* 00 (320x200,320x400,640x200,640x400) */
- {1368, 1008, 729, 688}, /* 01 (320x350,640x350) */
- {1368, 1008, 752, 711}, /* 02 (360x400,720x400) */
- {1368, 1008, 729, 688}, /* 03 (720x350) */
- {1368, 1008, 794, 753}, /* 04 (640x480x60Hz) */
- {1448, 1068, 854, 813}, /* 05 (800x600x60Hz) */
- {1560, 1200, 938, 897}, /* 06 (1024x768x60Hz) */
- {18, 1346, 1065, 1024} /* 07 (1280x1024x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_ExtLCDDes1280x1024Data[] = {
- {9, 1337, 981, 940}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1337, 926, 884}, /* ; 01 (320x350,640x350) alan, 2003/09/30 */
- {9, 1337, 981, 940}, /* ; 02 (360x400,720x400) */
- {9, 1337, 926, 884}, /* ; 03 (720x350) alan, 2003/09/30 */
- {9, 1337, 0, 1025}, /* ; 04 (640x480x60Hz) */
- {9, 1337, 0, 1025}, /* ; 05 (800x600x60Hz) */
- {9, 1337, 1065, 1024}, /* ; 06 (1024x768x60Hz) */
- {9, 1337, 1065, 1024} /* ; 07 (1280x1024x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_StLCDDes1280x1024Data[] = {
- {9, 1337, 970, 907}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1337, 917, 854}, /* ; 01 (320x350,640x350) */
- {9, 1337, 970, 907}, /* ; 02 (360x400,720x400) */
- {9, 1337, 917, 854}, /* ; 03 (720x350) */
- {9, 1337, 0, 1025}, /* ; 04 (640x480x60Hz) */
- {9, 1337, 0, 1025}, /* ; 05 (800x600x60Hz) */
- {9, 1337, 1065, 1024}, /* ; 06 (1024x768x60Hz) */
- {9, 1337, 1065, 1024} /* ; 07 (1280x1024x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_CetLCDDes1280x1024Data[] = {
- {1368, 1008, 752, 711}, /* 00 (320x200,320x400,640x200,640x400) */
- {1368, 1008, 729, 688}, /* 01 (320x350,640x350) */
- {1368, 1008, 752, 711}, /* 02 (360x400,720x400) */
- {1368, 1008, 729, 688}, /* 03 (720x350) */
- {1368, 1008, 794, 753}, /* 04 (640x480x60Hz) */
- {1448, 1068, 854, 813}, /* 05 (800x600x60Hz) */
- {1560, 1200, 938, 897}, /* 06 (1024x768x60Hz) */
- {9, 1337, 1065, 1024} /* 07 (1280x1024x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct xgifb_lcddldes_1400x1050[] = {
- {18, 1464, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
- {18, 1464, 0, 1051}, /* 01 (320x350,640x350) */
- {18, 1464, 0, 1051}, /* 02 (360x400,720x400) */
- {18, 1464, 0, 1051}, /* 03 (720x350) */
- {18, 1464, 0, 1051}, /* 04 (640x480x60Hz) */
- {18, 1464, 0, 1051}, /* 05 (800x600x60Hz) */
- {18, 1464, 0, 1051}, /* 06 (1024x768x60Hz) */
- {1646, 1406, 1053, 1038}, /* 07 (1280x1024x60Hz) */
- {18, 1464, 0, 1051} /* 08 (1400x1050x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct xgifb_lcddes_1400x1050[] = {
- {9, 1455, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
- {9, 1455, 0, 1051}, /* 01 (320x350,640x350) */
- {9, 1455, 0, 1051}, /* 02 (360x400,720x400) */
- {9, 1455, 0, 1051}, /* 03 (720x350) */
- {9, 1455, 0, 1051}, /* 04 (640x480x60Hz) */
- {9, 1455, 0, 1051}, /* 05 (800x600x60Hz) */
- {9, 1455, 0, 1051}, /* 06 (1024x768x60Hz) */
- {1637, 1397, 1053, 1038}, /* 07 (1280x1024x60Hz) */
- {9, 1455, 0, 1051} /* 08 (1400x1050x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_CetLCDDes1400x1050Data[] = {
- {1308, 1068, 781, 766}, /* 00 (320x200,320x400,640x200,640x400) */
- {1308, 1068, 781, 766}, /* 01 (320x350,640x350) */
- {1308, 1068, 781, 766}, /* 02 (360x400,720x400) */
- {1308, 1068, 781, 766}, /* 03 (720x350) */
- {1308, 1068, 781, 766}, /* 04 (640x480x60Hz) */
- {1388, 1148, 841, 826}, /* 05 (800x600x60Hz) */
- {1490, 1250, 925, 910}, /* 06 (1024x768x60Hz) */
- {1646, 1406, 1053, 1038}, /* 07 (1280x1024x60Hz) */
- {18, 1464, 0, 1051} /* 08 (1400x1050x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_CetLCDDes1400x1050Data2[] = {
- {0, 1448, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
- {0, 1448, 0, 1051}, /* 01 (320x350,640x350) */
- {0, 1448, 0, 1051}, /* 02 (360x400,720x400) */
- {0, 1448, 0, 1051}, /* 03 (720x350) */
- {0, 1448, 0, 1051} /* 04 (640x480x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_ExtLCDDLDes1600x1200Data[] = {
- {18, 1682, 0, 1201}, /* 00 (320x200,320x400,640x200,640x400) */
- {18, 1682, 0, 1201}, /* 01 (320x350,640x350) */
- {18, 1682, 0, 1201}, /* 02 (360x400,720x400) */
- {18, 1682, 0, 1201}, /* 03 (720x350) */
- {18, 1682, 0, 1201}, /* 04 (640x480x60Hz) */
- {18, 1682, 0, 1201}, /* 05 (800x600x60Hz) */
- {18, 1682, 0, 1201}, /* 06 (1024x768x60Hz) */
- {18, 1682, 0, 1201}, /* 07 (1280x1024x60Hz) */
- {18, 1682, 0, 1201}, /* 08 (1400x1050x60Hz) */
- {18, 1682, 0, 1201} /* 09 (1600x1200x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_StLCDDLDes1600x1200Data[] = {
- {18, 1682, 1150, 1101}, /* 00 (320x200,320x400,640x200,640x400) */
- {18, 1682, 1083, 1034}, /* 01 (320x350,640x350) */
- {18, 1682, 1150, 1101}, /* 02 (360x400,720x400) */
- {18, 1682, 1083, 1034}, /* 03 (720x350) */
- {18, 1682, 0, 1201}, /* 04 (640x480x60Hz) */
- {18, 1682, 0, 1201}, /* 05 (800x600x60Hz) */
- {18, 1682, 0, 1201}, /* 06 (1024x768x60Hz) */
- {18, 1682, 1232, 1183}, /* 07 (1280x1024x60Hz) */
- {18, 1682, 0, 1201}, /* 08 (1400x1050x60Hz) */
- {18, 1682, 0, 1201} /* 09 (1600x1200x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_ExtLCDDes1600x1200Data[] = {
- {9, 1673, 0, 1201}, /* 00 (320x200,320x400,640x200,640x400) */
- {9, 1673, 0, 1201}, /* 01 (320x350,640x350) */
- {9, 1673, 0, 1201}, /* 02 (360x400,720x400) */
- {9, 1673, 0, 1201}, /* 03 (720x350) */
- {9, 1673, 0, 1201}, /* 04 (640x480x60Hz) */
- {9, 1673, 0, 1201}, /* 05 (800x600x60Hz) */
- {9, 1673, 0, 1201}, /* 06 (1024x768x60Hz) */
- {9, 1673, 0, 1201}, /* 07 (1280x1024x60Hz) */
- {9, 1673, 0, 1201}, /* 08 (1400x1050x60Hz) */
- {9, 1673, 0, 1201} /* 09 (1600x1200x60Hz) */
-};
-
-static const struct XGI_LCDDesStruct XGI_StLCDDes1600x1200Data[] = {
- {9, 1673, 1150, 1101}, /* 00 (320x200,320x400,640x200,640x400) */
- {9, 1673, 1083, 1034}, /* 01 (320x350,640x350) */
- {9, 1673, 1150, 1101}, /* 02 (360x400,720x400) */
- {9, 1673, 1083, 1034}, /* 03 (720x350) */
- {9, 1673, 0, 1201}, /* 04 (640x480x60Hz) */
- {9, 1673, 0, 1201}, /* 05 (800x600x60Hz) */
- {9, 1673, 0, 1201}, /* 06 (1024x768x60Hz) */
- {9, 1673, 1232, 1183}, /* 07 (1280x1024x60Hz) */
- {9, 1673, 0, 1201}, /* 08 (1400x1050x60Hz) */
- {9, 1673, 0, 1201} /* 09 (1600x1200x60Hz) */
-};
-
-static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] = {
- {9, 657, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {9, 657, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
- {9, 657, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
- {9, 657, 448, 355, 96, 2}, /* 03 (720x350) */
- {9, 657, 1, 483, 96, 2}, /* 04 (640x480x60Hz) */
- {9, 849, 627, 600, 128, 4}, /* 05 (800x600x60Hz) */
- {9, 1057, 805, 770, 0136, 6}, /* 06 (1024x768x60Hz) */
- {9, 1337, 0, 1025, 112, 3}, /* 07 (1280x1024x60Hz) */
- {9, 1457, 0, 1051, 112, 3}, /* 08 (1400x1050x60Hz)*/
- {9, 1673, 0, 1201, 192, 3}, /* 09 (1600x1200x60Hz) */
- {9, 1337, 0, 771, 112, 6} /* 0A (1280x768x60Hz) */
-};
-
-/* ;;1024x768x75Hz */
-static const struct XGI_LCDDesStruct xgifb_lcddes_1024x768x75[] = {
- {9, 1049, 0, 769}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1049, 0, 769}, /* ; 01 (320x350,640x350) */
- {9, 1049, 0, 769}, /* ; 02 (360x400,720x400) */
- {9, 1049, 0, 769}, /* ; 03 (720x350) */
- {9, 1049, 0, 769}, /* ; 04 (640x480x75Hz) */
- {9, 1049, 0, 769}, /* ; 05 (800x600x75Hz) */
- {9, 1049, 0, 769} /* ; 06 (1024x768x75Hz) */
-};
-
-/* ;;1024x768x75Hz */
-static const struct XGI_LCDDesStruct XGI_CetLCDDes1024x768x75Data[] = {
- {1152, 856, 622, 587}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1152, 856, 597, 562}, /* ; 01 (320x350,640x350) */
- {1192, 896, 622, 587}, /* ; 02 (360x400,720x400) */
- {1192, 896, 597, 562}, /* ; 03 (720x350) */
- {1129, 857, 656, 625}, /* ; 04 (640x480x75Hz) */
- {1209, 937, 716, 685}, /* ; 05 (800x600x75Hz) */
- {9, 1049, 0, 769} /* ; 06 (1024x768x75Hz) */
-};
-
-/* ;;1280x1024x75Hz */
-static const struct XGI_LCDDesStruct xgifb_lcddldes_1280x1024x75[] = {
- {18, 1314, 0, 1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {18, 1314, 0, 1025}, /* ; 01 (320x350,640x350) */
- {18, 1314, 0, 1025}, /* ; 02 (360x400,720x400) */
- {18, 1314, 0, 1025}, /* ; 03 (720x350) */
- {18, 1314, 0, 1025}, /* ; 04 (640x480x60Hz) */
- {18, 1314, 0, 1025}, /* ; 05 (800x600x60Hz) */
- {18, 1314, 0, 1025}, /* ; 06 (1024x768x60Hz) */
- {18, 1314, 0, 1025} /* ; 07 (1280x1024x60Hz) */
-};
-
-/* 1280x1024x75Hz */
-static const struct XGI_LCDDesStruct XGI_CetLCDDLDes1280x1024x75Data[] = {
- {1368, 1008, 752, 711}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1368, 1008, 729, 688}, /* ; 01 (320x350,640x350) */
- {1408, 1048, 752, 711}, /* ; 02 (360x400,720x400) */
- {1408, 1048, 729, 688}, /* ; 03 (720x350) */
- {1377, 985, 794, 753}, /* ; 04 (640x480x75Hz) */
- {1457, 1065, 854, 813}, /* ; 05 (800x600x75Hz) */
- {1569, 1177, 938, 897}, /* ; 06 (1024x768x75Hz) */
- {18, 1314, 0, 1025} /* ; 07 (1280x1024x75Hz) */
-};
-
-/* ;;1280x1024x75Hz */
-static const struct XGI_LCDDesStruct xgifb_lcddes_1280x1024x75[] = {
- {9, 1305, 0, 1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {9, 1305, 0, 1025}, /* ; 01 (320x350,640x350) */
- {9, 1305, 0, 1025}, /* ; 02 (360x400,720x400) */
- {9, 1305, 0, 1025}, /* ; 03 (720x350) */
- {9, 1305, 0, 1025}, /* ; 04 (640x480x60Hz) */
- {9, 1305, 0, 1025}, /* ; 05 (800x600x60Hz) */
- {9, 1305, 0, 1025}, /* ; 06 (1024x768x60Hz) */
- {9, 1305, 0, 1025} /* ; 07 (1280x1024x60Hz) */
-};
-
-/* 1280x1024x75Hz */
-static const struct XGI_LCDDesStruct XGI_CetLCDDes1280x1024x75Data[] = {
- {1368, 1008, 752, 711}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1368, 1008, 729, 688}, /* ; 01 (320x350,640x350) */
- {1408, 1048, 752, 711}, /* ; 02 (360x400,720x400) */
- {1408, 1048, 729, 688}, /* ; 03 (720x350) */
- {1377, 985, 794, 753}, /* ; 04 (640x480x75Hz) */
- {1457, 1065, 854, 813}, /* ; 05 (800x600x75Hz) */
- {1569, 1177, 938, 897}, /* ; 06 (1024x768x75Hz) */
- {9, 1305, 0, 1025} /* ; 07 (1280x1024x75Hz) */
-};
-
-/* Scaling LCD 75Hz */
-static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[] = {
- {9, 657, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {9, 657, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
- {9, 738, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
- {9, 738, 448, 355, 108, 2}, /* ; 03 (720x350) */
- {9, 665, 0, 481, 64, 3}, /* ; 04 (640x480x75Hz) */
- {9, 825, 0, 601, 80, 3}, /* ; 05 (800x600x75Hz) */
- {9, 1049, 0, 769, 96, 3}, /* ; 06 (1024x768x75Hz) */
- {9, 1305, 0, 1025, 144, 3}, /* ; 07 (1280x1024x75Hz) */
- {9, 1457, 0, 1051, 112, 3}, /* ; 08 (1400x1050x60Hz)*/
- {9, 1673, 0, 1201, 192, 3}, /* ; 09 (1600x1200x75Hz) */
- {9, 1337, 0, 771, 112, 6} /* ; 0A (1280x768x60Hz) */
-};
-
-static const struct SiS_TVData XGI_StPALData[] = {
- {1, 1, 864, 525, 1270, 400, 100, 0, 760},
- {1, 1, 864, 525, 1270, 350, 100, 0, 760},
- {1, 1, 864, 525, 1270, 400, 0, 0, 720},
- {1, 1, 864, 525, 1270, 350, 0, 0, 720},
- {1, 1, 864, 525, 1270, 480, 50, 0, 760},
- {1, 1, 864, 525, 1270, 600, 50, 0, 0}
-};
-
-static const struct SiS_TVData XGI_ExtPALData[] = {
- {2, 1, 1080, 463, 1270, 500, 50, 0, 50},
- {15, 7, 1152, 413, 1270, 500, 50, 0, 50},
- {2, 1, 1080, 463, 1270, 500, 50, 0, 50},
- {15, 7, 1152, 413, 1270, 500, 50, 0, 50},
- {2, 1, 900, 543, 1270, 500, 0, 0, 50},
- {4, 3, 1080, 663, 1270, 500, 438, 0, 438},
- {1, 1, 1125, 831, 1270, 500, 686, 0, 686}, /*301b*/
- {3, 2, 1080, 619, 1270, 540, 438, 0, 438}
-};
-
-static const struct SiS_TVData XGI_StNTSCData[] = {
- {1, 1, 858, 525, 1270, 400, 50, 0, 760},
- {1, 1, 858, 525, 1270, 350, 50, 0, 640},
- {1, 1, 858, 525, 1270, 400, 0, 0, 720},
- {1, 1, 858, 525, 1270, 350, 0, 0, 720},
- {1, 1, 858, 525, 1270, 480, 0, 0, 760}
-};
-
-static const struct SiS_TVData XGI_ExtNTSCData[] = {
- {9, 5, 1001, 453, 1270, 420, 171, 0, 171},
- {12, 5, 858, 403, 1270, 420, 171, 0, 171},
- {9, 5, 1001, 453, 1270, 420, 171, 0, 171},
- {12, 5, 858, 403, 1270, 420, 171, 0, 171},
- {143, 80, 836, 523, 1270, 420, 224, 0, 0},
- {143, 120, 1008, 643, 1270, 420, 0, 1, 0},
- {1, 1, 1120, 821, 1516, 420, 0, 1, 0}, /*301b*/
- {2, 1, 858, 503, 1584, 480, 0, 1, 0},
- {3, 2, 1001, 533, 1270, 420, 0, 0, 0}
-};
-
-static const struct SiS_TVData XGI_St1HiTVData[] = {
- {1, 1, 892, 563, 690, 800, 0, 0, 0}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
- {1, 1, 1000, 563, 785, 800, 0, 0, 0}, /* 02 (360x400,720x400) */
- {1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
- {1, 1, 892, 563, 690, 960, 0, 0, 0}, /* 04 (320x240,640x480) */
- {8, 5, 1050, 683, 1648, 960, 0x150, 1, 0} /* 05 (400x300,800x600) */
-};
-
-static const struct SiS_TVData XGI_St2HiTVData[] = {
- {3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
- {3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 02 (360x400,720x400) */
- {1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
- {5, 2, 840, 563, 1648, 960, 0x08D, 1, 0}, /* 04 (320x240,640x480) */
- {8, 5, 1050, 683, 1648, 960, 0x17C, 1, 0} /* 05 (400x300,800x600) */
-};
-
-static const struct SiS_TVData XGI_ExtHiTVData[] = {
- {6, 1, 840, 563, 1632, 960, 0, 0, 0}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 01 (320x350,640x350) */
- {3, 1, 840, 483, 1632, 960, 0, 0, 0}, /* 02 (360x400,720x400) */
- {3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 03 (720x350) */
- {5, 1, 840, 563, 1648, 960, 0x166, 1, 0}, /* 04 (320x240,640x480) */
- {16, 5, 1050, 683, 1648, 960, 0x143, 1, 0}, /* 05 (400x300,800x600) */
- {25, 12, 1260, 851, 1648, 960, 0x032, 0, 0}, /* 06 (512x384,1024x768)*/
- {5, 4, 1575, 1124, 1648, 960, 0x128, 0, 0}, /* 07 (1280x1024) */
- {4, 1, 1050, 563, 1548, 960, 0x143, 1, 0}, /* 08 (800x480) */
- {5, 2, 1400, 659, 1648, 960, 0x032, 0, 0}, /* 09 (1024x576) */
- {8, 5, 1750, 803, 1648, 960, 0x128, 0, 0} /* 0A (1280x720) */
-};
-
-static const struct SiS_TVData XGI_ExtYPbPr525iData[] = {
- { 9, 5, 1001, 453, 1270, 420, 171, 0, 171},
- { 12, 5, 858, 403, 1270, 420, 171, 0, 171},
- { 9, 5, 1001, 453, 1270, 420, 171, 0, 171},
- { 12, 5, 858, 403, 1270, 420, 171, 0, 171},
- {143, 80, 836, 523, 1250, 420, 224, 0, 0},
- {143, 120, 1008, 643, 1250, 420, 0, 1, 0},
- { 1, 1, 1120, 821, 1516, 420, 0, 1, 0}, /*301b*/
- { 2, 1, 858, 503, 1584, 480, 0, 1, 0},
- { 3, 2, 1001, 533, 1250, 420, 0, 0, 0}
-};
-
-static const struct SiS_TVData XGI_StYPbPr525iData[] = {
- {1, 1, 858, 525, 1270, 400, 50, 0, 760},
- {1, 1, 858, 525, 1270, 350, 50, 0, 640},
- {1, 1, 858, 525, 1270, 400, 0, 0, 720},
- {1, 1, 858, 525, 1270, 350, 0, 0, 720},
- {1, 1, 858, 525, 1270, 480, 0, 0, 760},
-};
-
-static const struct SiS_TVData XGI_ExtYPbPr525pData[] = {
- { 9, 5, 1001, 453, 1270, 420, 171, 0, 171},
- { 12, 5, 858, 403, 1270, 420, 171, 0, 171},
- { 9, 5, 1001, 453, 1270, 420, 171, 0, 171},
- { 12, 5, 858, 403, 1270, 420, 171, 0, 171},
- {143, 80, 836, 523, 1270, 420, 224, 0, 0},
- {143, 120, 1008, 643, 1270, 420, 0, 1, 0},
- { 1, 1, 1120, 821, 1516, 420, 0, 1, 0}, /*301b*/
- { 2, 1, 858, 503, 1584, 480, 0, 1, 0},
- { 3, 2, 1001, 533, 1270, 420, 0, 0, 0}
-};
-
-static const struct SiS_TVData XGI_StYPbPr525pData[] = {
- {1, 1, 1716, 525, 1270, 400, 50, 0, 760},
- {1, 1, 1716, 525, 1270, 350, 50, 0, 640},
- {1, 1, 1716, 525, 1270, 400, 0, 0, 720},
- {1, 1, 1716, 525, 1270, 350, 0, 0, 720},
- {1, 1, 1716, 525, 1270, 480, 0, 0, 760},
-};
-
-static const struct SiS_TVData XGI_ExtYPbPr750pData[] = {
- { 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 01 (320x350,640x350) */
- { 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 02 (360x400,720x400) */
- {24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 03 (720x350) */
- { 2, 1, 1100, 590, 1130, 640, 50, 0, 0}, /* 04 (320x240,640x480) */
- { 3, 2, 1210, 690, 1130, 660, 50, 0, 0}, /* 05 (400x300,800x600) */
- { 1, 1, 1375, 878, 1130, 640, 638, 0, 0}, /* 06 (1024x768) */
- { 2, 1, 858, 503, 1130, 480, 0, 1, 0}, /* 07 (720x480) */
- { 5, 4, 1815, 570, 1130, 660, 50, 0, 0},
- { 5, 3, 1100, 686, 1130, 640, 50, 1, 0},
- {10, 9, 1320, 830, 1130, 640, 50, 0, 0}
-};
-
-static const struct SiS_TVData XGI_StYPbPr750pData[] = {
- {1, 1, 1650, 750, 1280, 400, 50, 0, 760},
- {1, 1, 1650, 750, 1280, 350, 50, 0, 640},
- {1, 1, 1650, 750, 1280, 400, 0, 0, 720},
- {1, 1, 1650, 750, 1280, 350, 0, 0, 720},
- {1, 1, 1650, 750, 1280, 480, 0, 0, 760},
-};
-
-static const unsigned char XGI330_NTSCTiming[] = {
- 0x17, 0x1d, 0x03, 0x09, 0x05, 0x06, 0x0c, 0x0c,
- 0x94, 0x49, 0x01, 0x0a, 0x06, 0x0d, 0x04, 0x0a,
- 0x06, 0x14, 0x0d, 0x04, 0x0a, 0x00, 0x85, 0x1b,
- 0x0c, 0x50, 0x00, 0x97, 0x00, 0xda, 0x4a, 0x17,
- 0x7d, 0x05, 0x4b, 0x00, 0x00, 0xe2, 0x00, 0x02,
- 0x03, 0x0a, 0x65, 0x9d, 0x08, 0x92, 0x8f, 0x40,
- 0x60, 0x80, 0x14, 0x90, 0x8c, 0x60, 0x14, 0x50,
- 0x00, 0x40, 0x44, 0x00, 0xdb, 0x02, 0x3b, 0x00
-};
-
-static const unsigned char XGI330_PALTiming[] = {
- 0x21, 0x5A, 0x35, 0x6e, 0x04, 0x38, 0x3d, 0x70,
- 0x94, 0x49, 0x01, 0x12, 0x06, 0x3e, 0x35, 0x6d,
- 0x06, 0x14, 0x3e, 0x35, 0x6d, 0x00, 0x45, 0x2b,
- 0x70, 0x50, 0x00, 0x9b, 0x00, 0xd9, 0x5d, 0x17,
- 0x7d, 0x05, 0x45, 0x00, 0x00, 0xe8, 0x00, 0x02,
- 0x0d, 0x00, 0x68, 0xb0, 0x0b, 0x92, 0x8f, 0x40,
- 0x60, 0x80, 0x14, 0x90, 0x8c, 0x60, 0x14, 0x63,
- 0x00, 0x40, 0x3e, 0x00, 0xe1, 0x02, 0x28, 0x00
-};
-
-static const unsigned char XGI330_HiTVExtTiming[] = {
- 0x2D, 0x60, 0x2C, 0x5F, 0x08, 0x31, 0x3A, 0x64,
- 0x28, 0x02, 0x01, 0x3D, 0x06, 0x3E, 0x35, 0x6D,
- 0x06, 0x14, 0x3E, 0x35, 0x6D, 0x00, 0xC5, 0x3F,
- 0x64, 0x90, 0x33, 0x8C, 0x18, 0x36, 0x3E, 0x13,
- 0x2A, 0xDE, 0x2A, 0x44, 0x40, 0x2A, 0x44, 0x40,
- 0x8E, 0x8E, 0x82, 0x07, 0x0B,
- 0x92, 0x0F, 0x40, 0x60, 0x80, 0x14, 0x90, 0x8C,
- 0x60, 0x14, 0x3D, 0x63, 0x4F,
- 0x27, 0x00, 0xfc, 0xff, 0x6a, 0x00
-};
-
-static const unsigned char XGI330_HiTVSt1Timing[] = {
- 0x32, 0x65, 0x2C, 0x5F, 0x08, 0x31, 0x3A, 0x65,
- 0x28, 0x02, 0x01, 0x3D, 0x06, 0x3E, 0x35, 0x6D,
- 0x06, 0x14, 0x3E, 0x35, 0x6D, 0x00, 0xC5, 0x3F,
- 0x65, 0x90, 0x7B, 0xA8, 0x03, 0xF0, 0x87, 0x03,
- 0x11, 0x15, 0x11, 0xCF, 0x10, 0x11, 0xCF, 0x10,
- 0x35, 0x35, 0x3B, 0x69, 0x1D,
- 0x92, 0x0F, 0x40, 0x60, 0x80, 0x14, 0x90, 0x8C,
- 0x60, 0x04, 0x86, 0xAF, 0x5D,
- 0x0E, 0x00, 0xfc, 0xff, 0x2d, 0x00
-};
-
-static const unsigned char XGI330_HiTVSt2Timing[] = {
- 0x32, 0x65, 0x2C, 0x5F, 0x08, 0x31, 0x3A, 0x64,
- 0x28, 0x02, 0x01, 0x3D, 0x06, 0x3E, 0x35, 0x6D,
- 0x06, 0x14, 0x3E, 0x35, 0x6D, 0x00, 0xC5, 0x3F,
- 0x64, 0x90, 0x33, 0x8C, 0x18, 0x36, 0x3E, 0x13,
- 0x2A, 0xDE, 0x2A, 0x44, 0x40, 0x2A, 0x44, 0x40,
- 0x8E, 0x8E, 0x82, 0x07, 0x0B,
- 0x92, 0x0F, 0x40, 0x60, 0x80, 0x14, 0x90, 0x8C,
- 0x60, 0x14, 0x3D, 0x63, 0x4F,
- 0x27, 0x00, 0xFC, 0xff, 0x6a, 0x00
-};
-
-static const unsigned char XGI330_HiTVTextTiming[] = {
- 0x32, 0x65, 0x2C, 0x5F, 0x08, 0x31, 0x3A, 0x65,
- 0x28, 0x02, 0x01, 0x3D, 0x06, 0x3E, 0x35, 0x6D,
- 0x06, 0x14, 0x3E, 0x35, 0x6D, 0x00, 0xC5, 0x3F,
- 0x65, 0x90, 0xE7, 0xBC, 0x03, 0x0C, 0x97, 0x03,
- 0x14, 0x78, 0x14, 0x08, 0x20, 0x14, 0x08, 0x20,
- 0xC8, 0xC8, 0x3B, 0xD2, 0x26,
- 0x92, 0x0F, 0x40, 0x60, 0x80, 0x14, 0x90, 0x8C,
- 0x60, 0x04, 0x96, 0x72, 0x5C,
- 0x11, 0x00, 0xFC, 0xFF, 0x32, 0x00
-};
-
-static const unsigned char XGI330_YPbPr750pTiming[] = {
- 0x30, 0x1d, 0xe8, 0x09, 0x09, 0xed, 0x0c, 0x0c,
- 0x98, 0x0a, 0x01, 0x0c, 0x06, 0x0d, 0x04, 0x0a,
- 0x06, 0x14, 0x0d, 0x04, 0x0a, 0x00, 0x85, 0x3f,
- 0xed, 0x50, 0x70, 0x9f, 0x16, 0x59, 0x60, 0x13,
- 0x27, 0x0b, 0x27, 0xfc, 0x30, 0x27, 0x1c, 0xb0,
- 0x4b, 0x4b, 0x6f, 0x2f, 0x63,
- 0x92, 0x0F, 0x40, 0x60, 0x80, 0x14, 0x90, 0x8C,
- 0x60, 0x14, 0x73, 0x00, 0x40,
- 0x11, 0x00, 0xfc, 0xff, 0x32, 0x00
-};
-
-static const unsigned char XGI330_YPbPr525pTiming[] = {
- 0x3E, 0x11, 0x06, 0x09, 0x0b, 0x0c, 0x0c, 0x0c,
- 0x98, 0x0a, 0x01, 0x0d, 0x06, 0x0d, 0x04, 0x0a,
- 0x06, 0x14, 0x0d, 0x04, 0x0a, 0x00, 0x85, 0x3f,
- 0x0c, 0x50, 0xb2, 0x9f, 0x16, 0x59, 0x4f, 0x13,
- 0xad, 0x11, 0xad, 0x1d, 0x40, 0x8a, 0x3d, 0xb8,
- 0x51, 0x5e, 0x60, 0x49, 0x7d,
- 0x92, 0x0F, 0x40, 0x60, 0x80, 0x14, 0x90, 0x8C,
- 0x60, 0x14, 0x4B, 0x43, 0x41,
- 0x11, 0x00, 0xFC, 0xFF, 0x32, 0x00
-};
-
-static const unsigned char XGI330_YPbPr525iTiming[] = {
- 0x1B, 0x21, 0x03, 0x09, 0x05, 0x06, 0x0C, 0x0C,
- 0x94, 0x49, 0x01, 0x0A, 0x06, 0x0D, 0x04, 0x0A,
- 0x06, 0x14, 0x0D, 0x04, 0x0A, 0x00, 0x85, 0x1B,
- 0x0C, 0x50, 0x00, 0x97, 0x00, 0xDA, 0x4A, 0x17,
- 0x7D, 0x05, 0x4B, 0x00, 0x00, 0xE2, 0x00, 0x02,
- 0x03, 0x0A, 0x65, 0x9D, 0x08,
- 0x92, 0x8F, 0x40, 0x60, 0x80, 0x14, 0x90, 0x8C,
- 0x60, 0x14, 0x4B, 0x00, 0x40,
- 0x44, 0x00, 0xDB, 0x02, 0x3B, 0x00
-};
-
-static const unsigned char XGI330_HiTVGroup3Data[] = {
- 0x00, 0x1A, 0x22, 0x63, 0x62, 0x22, 0x08, 0x5F,
- 0x05, 0x21, 0xB2, 0xB2, 0x55, 0x77, 0x2A, 0xA6,
- 0x25, 0x2F, 0x47, 0xFA, 0xC8, 0xFF, 0x8E, 0x20,
- 0x8C, 0x6E, 0x60, 0x2E, 0x58, 0x48, 0x72, 0x44,
- 0x56, 0x36, 0x4F, 0x6E, 0x3F, 0x80, 0x00, 0x80,
- 0x4F, 0x7F, 0x03, 0xA8, 0x7D, 0x20, 0x1A, 0xA9,
- 0x14, 0x05, 0x03, 0x7E, 0x64, 0x31, 0x14, 0x75,
- 0x18, 0x05, 0x18, 0x05, 0x4C, 0xA8, 0x01
-};
-
-static const unsigned char XGI330_HiTVGroup3Simu[] = {
- 0x00, 0x1A, 0x22, 0x63, 0x62, 0x22, 0x08, 0x95,
- 0xDB, 0x20, 0xB8, 0xB8, 0x55, 0x47, 0x2A, 0xA6,
- 0x25, 0x2F, 0x47, 0xFA, 0xC8, 0xFF, 0x8E, 0x20,
- 0x8C, 0x6E, 0x60, 0x15, 0x26, 0xD3, 0xE4, 0x11,
- 0x56, 0x36, 0x4F, 0x6E, 0x3F, 0x80, 0x00, 0x80,
- 0x67, 0x36, 0x01, 0x47, 0x0E, 0x10, 0xBE, 0xB4,
- 0x01, 0x05, 0x03, 0x7E, 0x65, 0x31, 0x14, 0x75,
- 0x18, 0x05, 0x18, 0x05, 0x4C, 0xA8, 0x01
-};
-
-static const unsigned char XGI330_HiTVGroup3Text[] = {
- 0x00, 0x1A, 0x22, 0x63, 0x62, 0x22, 0x08, 0xA7,
- 0xF5, 0x20, 0xCE, 0xCE, 0x55, 0x47, 0x2A, 0xA6,
- 0x25, 0x2F, 0x47, 0xFA, 0xC8, 0xFF, 0x8E, 0x20,
- 0x8C, 0x6E, 0x60, 0x18, 0x2C, 0x0C, 0x20, 0x22,
- 0x56, 0x36, 0x4F, 0x6E, 0x3F, 0x80, 0x00, 0x80,
- 0x93, 0x3C, 0x01, 0x50, 0x2F, 0x10, 0xF4, 0xCA,
- 0x01, 0x05, 0x03, 0x7E, 0x65, 0x31, 0x14, 0x75,
- 0x18, 0x05, 0x18, 0x05, 0x4C, 0xA8, 0x01
-};
-
-static const unsigned char XGI330_Ren525pGroup3[] = {
- 0x00, 0x14, 0x15, 0x25, 0x55, 0x15, 0x0b, 0x13,
- 0xB1, 0x41, 0x62, 0x62, 0xFF, 0xF4, 0x45, 0xa6,
- 0x25, 0x2F, 0x67, 0xF6, 0xbf, 0xFF, 0x8E, 0x20,
- 0xAC, 0xDA, 0x60, 0xFe, 0x6A, 0x9A, 0x06, 0x10,
- 0xd1, 0x04, 0x18, 0x0a, 0xFF, 0x80, 0x00, 0x80,
- 0x3c, 0x77, 0x00, 0xEF, 0xE0, 0x10, 0xB0, 0xE0,
- 0x10, 0x4F, 0x0F, 0x0F, 0x05, 0x0F, 0x08, 0x6E,
- 0x1a, 0x1F, 0x25, 0x2a, 0x4C, 0xAA, 0x01
-};
-
-static const unsigned char XGI330_Ren750pGroup3[] = {
- 0x00, 0x14, 0x15, 0x25, 0x55, 0x15, 0x0b, 0x7a,
- 0x54, 0x41, 0xE7, 0xE7, 0xFF, 0xF4, 0x45, 0xa6,
- 0x25, 0x2F, 0x67, 0xF6, 0xbf, 0xFF, 0x8E, 0x20,
- 0xAC, 0x6A, 0x60, 0x2b, 0x52, 0xCD, 0x61, 0x10,
- 0x51, 0x04, 0x18, 0x0a, 0x1F, 0x80, 0x00, 0x80,
- 0xFF, 0xA4, 0x04, 0x2B, 0x94, 0x21, 0x72, 0x94,
- 0x26, 0x05, 0x01, 0x0F, 0xed, 0x0F, 0x0A, 0x64,
- 0x18, 0x1D, 0x23, 0x28, 0x4C, 0xAA, 0x01
-};
-
-static const struct SiS_LVDSData XGI_LVDS1024x768Data_1[] = {
- { 960, 438, 1344, 806}, /* 00 (320x200,320x400,640x200,640x400) */
- { 960, 388, 1344, 806}, /* 01 (320x350,640x350) */
- {1040, 438, 1344, 806}, /* 02 (360x400,720x400) */
- {1040, 388, 1344, 806}, /* 03 (720x350) */
- { 960, 518, 1344, 806}, /* 04 (320x240,640x480) */
- {1120, 638, 1344, 806}, /* 05 (400x300,800x600) */
- {1344, 806, 1344, 806} /* 06 (512x384,1024x768) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1024x768Data_2[] = {
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {1344, 806, 1344, 806},
- {800, 449, 1280, 801},
- {800, 525, 1280, 813}
-};
-
-static const struct SiS_LVDSData XGI_LVDS1280x1024Data_1[] = {
- {1048, 442, 1688, 1066},
- {1048, 392, 1688, 1066},
- {1048, 442, 1688, 1066},
- {1048, 392, 1688, 1066},
- {1048, 522, 1688, 1066},
- {1208, 642, 1688, 1066},
- {1432, 810, 1688, 1066},
- {1688, 1066, 1688, 1066}
-};
-
-#define XGI_LVDS1280x1024Data_2 XGI_LVDS1024x768Data_2
-
-static const struct SiS_LVDSData XGI_LVDS1400x1050Data_1[] = {
- {928, 416, 1688, 1066},
- {928, 366, 1688, 1066},
- {928, 416, 1688, 1066},
- {928, 366, 1688, 1066},
- {928, 496, 1688, 1066},
- {1088, 616, 1688, 1066},
- {1312, 784, 1688, 1066},
- {1568, 1040, 1688, 1066},
- {1688, 1066, 1688, 1066}
-};
-
-static const struct SiS_LVDSData XGI_LVDS1400x1050Data_2[] = {
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066},
- {1688, 1066, 1688, 1066}
-};
-
-/* ;;[ycchen] 12/05/02 LCDHTxLCDVT=2048x1320 */
-static const struct SiS_LVDSData XGI_LVDS1600x1200Data_1[] = {
- {1088, 520, 2048, 1320}, /* 00 (320x200,320x400,640x200,640x400) */
- {1088, 470, 2048, 1320}, /* 01 (320x350,640x350) */
- {1088, 520, 2048, 1320}, /* 02 (360x400,720x400) */
- {1088, 470, 2048, 1320}, /* 03 (720x350) */
- {1088, 600, 2048, 1320}, /* 04 (320x240,640x480) */
- {1248, 720, 2048, 1320}, /* 05 (400x300,800x600) */
- {1472, 888, 2048, 1320}, /* 06 (512x384,1024x768) */
- {1728, 1144, 2048, 1320}, /* 07 (640x512,1280x1024) */
- {1848, 1170, 2048, 1320}, /* 08 (1400x1050) */
- {2048, 1320, 2048, 1320} /* 09 (1600x1200) */
-};
-
-static const struct SiS_LVDSData XGI_LVDSNoScalingData[] = {
- { 800, 449, 800, 449}, /* 00 (320x200,320x400,640x200,640x400) */
- { 800, 449, 800, 449}, /* 01 (320x350,640x350) */
- { 800, 449, 800, 449}, /* 02 (360x400,720x400) */
- { 800, 449, 800, 449}, /* 03 (720x350) */
- { 800, 525, 800, 525}, /* 04 (640x480x60Hz) */
- {1056, 628, 1056, 628}, /* 05 (800x600x60Hz) */
- {1344, 806, 1344, 806}, /* 06 (1024x768x60Hz) */
- {1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz) */
- {1688, 1066, 1688, 1066}, /* 08 (1400x1050x60Hz) */
- {2160, 1250, 2160, 1250}, /* 09 (1600x1200x60Hz) */
- {1688, 806, 1688, 806} /* 0A (1280x768x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1024x768Data_1x75[] = {
- { 960, 438, 1312, 800}, /* 00 (320x200,320x400,640x200,640x400) */
- { 960, 388, 1312, 800}, /* 01 (320x350,640x350) */
- {1040, 438, 1312, 800}, /* 02 (360x400,720x400) */
- {1040, 388, 1312, 800}, /* 03 (720x350) */
- { 928, 512, 1312, 800}, /* 04 (320x240,640x480) */
- {1088, 632, 1312, 800}, /* 05 (400x300,800x600) */
- {1312, 800, 1312, 800}, /* 06 (512x384,1024x768) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1024x768Data_2x75[] = {
- {1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */
- {1312, 800, 1312, 800}, /* ; 02 (360x400,720x400) */
- {1312, 800, 1312, 800}, /* ; 03 (720x350) */
- {1312, 800, 1312, 800}, /* ; 04 (320x240,640x480) */
- {1312, 800, 1312, 800}, /* ; 05 (400x300,800x600) */
- {1312, 800, 1312, 800}, /* ; 06 (512x384,1024x768) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1280x1024Data_1x75[] = {
- {1048, 442, 1688, 1066 }, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1048, 392, 1688, 1066 }, /* ; 01 (320x350,640x350) */
- {1128, 442, 1688, 1066 }, /* ; 02 (360x400,720x400) */
- {1128, 392, 1688, 1066 }, /* ; 03 (720x350) */
- {1048, 522, 1688, 1066 }, /* ; 04 (320x240,640x480) */
- {1208, 642, 1688, 1066 }, /* ; 05 (400x300,800x600) */
- {1432, 810, 1688, 1066 }, /* ; 06 (512x384,1024x768) */
- {1688, 1066, 1688, 1066 }, /* ; 06; 07 (640x512,1280x1024) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1280x1024Data_2x75[] = {
- {1688, 1066, 1688, 1066 }, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1688, 1066, 1688, 1066 }, /* ; 01 (320x350,640x350) */
- {1688, 1066, 1688, 1066 }, /* ; 02 (360x400,720x400) */
- {1688, 1066, 1688, 1066 }, /* ; 03 (720x350) */
- {1688, 1066, 1688, 1066 }, /* ; 04 (320x240,640x480) */
- {1688, 1066, 1688, 1066 }, /* ; 05 (400x300,800x600) */
- {1688, 1066, 1688, 1066 }, /* ; 06 (512x384,1024x768) */
- {1688, 1066, 1688, 1066 }, /* ; 06; 07 (640x512,1280x1024) */
-};
-
-static const struct SiS_LVDSData XGI_LVDSNoScalingDatax75[] = {
- { 800, 449, 800, 449}, /* ; 00 (320x200,320x400,640x200,640x400) */
- { 800, 449, 800, 449}, /* ; 01 (320x350,640x350) */
- { 900, 449, 900, 449}, /* ; 02 (360x400,720x400) */
- { 900, 449, 900, 449}, /* ; 03 (720x350) */
- { 800, 500, 800, 500}, /* ; 04 (640x480x75Hz) */
- {1056, 625, 1056, 625}, /* ; 05 (800x600x75Hz) */
- {1312, 800, 1312, 800}, /* ; 06 (1024x768x75Hz) */
- {1688, 1066, 1688, 1066}, /* ; 07 (1280x1024x75Hz) */
- {1688, 1066, 1688, 1066}, /* ; 08 (1400x1050x75Hz)
- * ;;[ycchen] 12/19/02
- */
- {2160, 1250, 2160, 1250}, /* ; 09 (1600x1200x75Hz) */
- {1688, 806, 1688, 806}, /* ; 0A (1280x768x75Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1024x768Des_1[] = {
- {0, 1048, 0, 771}, /* 00 (320x200,320x400,640x200,640x400) */
- {0, 1048, 0, 771}, /* 01 (320x350,640x350) */
- {0, 1048, 0, 771}, /* 02 (360x400,720x400) */
- {0, 1048, 0, 771}, /* 03 (720x350) */
- {0, 1048, 0, 771}, /* 04 (640x480x60Hz) */
- {0, 1048, 0, 771}, /* 05 (800x600x60Hz) */
- {0, 1048, 805, 770} /* 06 (1024x768x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1024x768Des_2[] = {
- {1142, 856, 622, 587}, /* 00 (320x200,320x400,640x200,640x400) */
- {1142, 856, 597, 562}, /* 01 (320x350,640x350) */
- {1142, 856, 622, 587}, /* 02 (360x400,720x400) */
- {1142, 856, 597, 562}, /* 03 (720x350) */
- {1142, 1048, 722, 687}, /* 04 (640x480x60Hz) */
- {1232, 936, 722, 687}, /* 05 (800x600x60Hz) */
- { 0, 1048, 805, 771} /* 06 (1024x768x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1024x768Des_3[] = {
- {320, 24, 622, 587}, /* 00 (320x200,320x400,640x200,640x400) */
- {320, 24, 597, 562}, /* 01 (320x350,640x350) */
- {320, 24, 622, 587}, /* 02 (360x400,720x400) */
- {320, 24, 597, 562}, /* 03 (720x350) */
- {320, 24, 722, 687} /* 04 (640x480x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1280x1024Des_1[] = {
- {0, 1328, 0, 1025}, /* 00 (320x200,320x400,640x200,640x400) */
- {0, 1328, 0, 1025}, /* 01 (320x350,640x350) */
- {0, 1328, 0, 1025}, /* 02 (360x400,720x400) */
- {0, 1328, 0, 1025}, /* 03 (720x350) */
- {0, 1328, 0, 1025}, /* 04 (640x480x60Hz) */
- {0, 1328, 0, 1025}, /* 05 (800x600x60Hz) */
- {0, 1328, 0, 1025}, /* 06 (1024x768x60Hz) */
- {0, 1328, 1065, 1024} /* 07 (1280x1024x60Hz) */
-};
-
- /* The Display setting for DE Mode Panel */
-static const struct SiS_LVDSData XGI_LVDS1280x1024Des_2[] = {
- {1368, 1008, 752, 711}, /* 00 (320x200,320x400,640x200,640x400) */
- {1368, 1008, 729, 688}, /* 01 (320x350,640x350) */
- {1408, 1048, 752, 711}, /* 02 (360x400,720x400) */
- {1408, 1048, 729, 688}, /* 03 (720x350) */
- {1368, 1008, 794, 753}, /* 04 (640x480x60Hz) */
- {1448, 1068, 854, 813}, /* 05 (800x600x60Hz) */
- {1560, 1200, 938, 897}, /* 06 (1024x768x60Hz) */
- {0000, 1328, 0, 1025} /* 07 (1280x1024x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1400x1050Des_1[] = {
- {0, 1448, 0, 1051}, /* 00 (320x200,320x400,640x200,640x400) */
- {0, 1448, 0, 1051}, /* 01 (320x350,640x350) */
- {0, 1448, 0, 1051}, /* 02 (360x400,720x400) */
- {0, 1448, 0, 1051}, /* 03 (720x350) */
- {0, 1448, 0, 1051}, /* 04 (640x480x60Hz) */
- {0, 1448, 0, 1051}, /* 05 (800x600x60Hz) */
- {0, 1448, 0, 1051}, /* 06 (1024x768x60Hz) */
- {0, 1448, 0, 1051}, /* 07 (1280x1024x60Hz) */
- {0, 1448, 0, 1051} /* 08 (1400x1050x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1400x1050Des_2[] = {
- {1308, 1068, 781, 766}, /* 00 (320x200,320x400,640x200,640x400) */
- {1308, 1068, 781, 766}, /* 01 (320x350,640x350) */
- {1308, 1068, 781, 766}, /* 02 (360x400,720x400) */
- {1308, 1068, 781, 766}, /* 03 (720x350) */
- {1308, 1068, 781, 766}, /* 04 (640x480x60Hz) */
- {1388, 1148, 841, 826}, /* 05 (800x600x60Hz) */
- {1490, 1250, 925, 910}, /* 06 (1024x768x60Hz) */
- {1608, 1368, 1053, 1038}, /* 07 (1280x1024x60Hz) */
- { 0, 1448, 0, 1051} /* 08 (1400x1050x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1600x1200Des_1[] = {
- {0, 1664, 0, 1201}, /* 00 (320x200,320x400,640x200,640x400) */
- {0, 1664, 0, 1201}, /* 01 (320x350,640x350) */
- {0, 1664, 0, 1201}, /* 02 (360x400,720x400) */
- {0, 1664, 0, 1201}, /* 03 (720x350) */
- {0, 1664, 0, 1201}, /* 04 (640x480x60Hz) */
- {0, 1664, 0, 1201}, /* 05 (800x600x60Hz) */
- {0, 1664, 0, 1201}, /* 06 (1024x768x60Hz) */
- {0, 1664, 0, 1201}, /* 07 (1280x1024x60Hz) */
- {0, 1664, 0, 1201}, /* 08 (1400x1050x60Hz) */
- {0, 1664, 0, 1201} /* 09 (1600x1200x60Hz) */
-};
-
-static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[] = {
- {0, 648, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {0, 648, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
- {0, 648, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
- {0, 648, 448, 355, 96, 2}, /* 03 (720x350) */
- {0, 648, 1, 483, 96, 2}, /* 04 (640x480x60Hz) */
- {0, 840, 627, 600, 128, 4}, /* 05 (800x600x60Hz) */
- {0, 1048, 805, 770, 136, 6}, /* 06 (1024x768x60Hz) */
- {0, 1328, 0, 1025, 112, 3}, /* 07 (1280x1024x60Hz) */
- {0, 1438, 0, 1051, 112, 3}, /* 08 (1400x1050x60Hz)*/
- {0, 1664, 0, 1201, 192, 3}, /* 09 (1600x1200x60Hz) */
- {0, 1328, 0, 0771, 112, 6} /* 0A (1280x768x60Hz) */
-};
-
-/* ; 1024x768 Full-screen */
-static const struct SiS_LVDSData XGI_LVDS1024x768Des_1x75[] = {
- {0, 1040, 0, 769}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {0, 1040, 0, 769}, /* ; 01 (320x350,640x350) */
- {0, 1040, 0, 769}, /* ; 02 (360x400,720x400) */
- {0, 1040, 0, 769}, /* ; 03 (720x350) */
- {0, 1040, 0, 769}, /* ; 04 (640x480x75Hz) */
- {0, 1040, 0, 769}, /* ; 05 (800x600x75Hz) */
- {0, 1040, 0, 769} /* ; 06 (1024x768x75Hz) */
-};
-
-/* ; 1024x768 center-screen (Enh. Mode) */
-static const struct SiS_LVDSData XGI_LVDS1024x768Des_2x75[] = {
- {1142, 856, 622, 587}, /* 00 (320x200,320x400,640x200,640x400) */
- {1142, 856, 597, 562}, /* 01 (320x350,640x350) */
- {1142, 856, 622, 587}, /* 02 (360x400,720x400) */
- {1142, 856, 597, 562}, /* 03 (720x350) */
- {1142, 1048, 722, 687}, /* 04 (640x480x60Hz) */
- {1232, 936, 722, 687}, /* 05 (800x600x60Hz) */
- { 0, 1048, 805, 771} /* 06 (1024x768x60Hz) */
-};
-
-/* ; 1024x768 center-screen (St.Mode) */
-static const struct SiS_LVDSData XGI_LVDS1024x768Des_3x75[] = {
- {320, 24, 622, 587}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {320, 24, 597, 562}, /* ; 01 (320x350,640x350) */
- {320, 24, 622, 587}, /* ; 02 (360x400,720x400) */
- {320, 24, 597, 562}, /* ; 03 (720x350) */
- {320, 24, 722, 687} /* ; 04 (640x480x60Hz) */
-};
-
-static const struct SiS_LVDSData XGI_LVDS1280x1024Des_1x75[] = {
- {0, 1296, 0, 1025}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {0, 1296, 0, 1025}, /* ; 01 (320x350,640x350) */
- {0, 1296, 0, 1025}, /* ; 02 (360x400,720x400) */
- {0, 1296, 0, 1025}, /* ; 03 (720x350) */
- {0, 1296, 0, 1025}, /* ; 04 (640x480x75Hz) */
- {0, 1296, 0, 1025}, /* ; 05 (800x600x75Hz) */
- {0, 1296, 0, 1025}, /* ; 06 (1024x768x75Hz) */
- {0, 1296, 0, 1025} /* ; 07 (1280x1024x75Hz) */
-};
-
-/* The Display setting for DE Mode Panel */
-/* Set DE as default */
-static const struct SiS_LVDSData XGI_LVDS1280x1024Des_2x75[] = {
- {1368, 976, 752, 711}, /* ; 00 (320x200,320x400,640x200,640x400) */
- {1368, 976, 729, 688}, /* ; 01 (320x350,640x350) */
- {1408, 976, 752, 711}, /* ; 02 (360x400,720x400) */
- {1408, 976, 729, 688}, /* ; 03 (720x350) */
- {1368, 976, 794, 753}, /* ; 04 (640x480x75Hz) */
- {1448, 1036, 854, 813}, /* ; 05 (800x600x75Hz) */
- {1560, 1168, 938, 897}, /* ; 06 (1024x768x75Hz) */
- { 0, 1296, 0, 1025} /* ; 07 (1280x1024x75Hz) */
-};
-
-/* Scaling LCD 75Hz */
-static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = {
- {0, 648, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- * 640x200,640x400)
- */
- {0, 648, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
- {0, 729, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
- {0, 729, 448, 355, 108, 2}, /* ; 03 (720x350) */
- {0, 656, 0, 481, 64, 3}, /* ; 04 (640x480x75Hz) */
- {0, 816, 0, 601, 80, 3}, /* ; 05 (800x600x75Hz) */
- {0, 1040, 0, 769, 96, 3}, /* ; 06 (1024x768x75Hz) */
- {0, 1296, 0, 1025, 144, 3}, /* ; 07 (1280x1024x75Hz) */
- {0, 1448, 0, 1051, 112, 3}, /* ; 08 (1400x1050x75Hz) */
- {0, 1664, 0, 1201, 192, 3}, /* ; 09 (1600x1200x75Hz) */
- {0, 1328, 0, 771, 112, 6} /* ; 0A (1280x768x75Hz) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_H[] = {
- { {0x4B, 0x27, 0x8F, 0x32, 0x1B, 0x00, 0x45, 0x00} }, /* 00 (320x) */
- { {0x4B, 0x27, 0x8F, 0x2B, 0x03, 0x00, 0x44, 0x00} }, /* 01 (360x) */
- { {0x55, 0x31, 0x99, 0x46, 0x1D, 0x00, 0x55, 0x00} }, /* 02 (400x) */
- { {0x63, 0x3F, 0x87, 0x4A, 0x93, 0x00, 0x01, 0x00} }, /* 03 (512x) */
- { {0x73, 0x4F, 0x97, 0x55, 0x86, 0x00, 0x05, 0x00} }, /* 04 (640x) */
- { {0x73, 0x4F, 0x97, 0x55, 0x86, 0x00, 0x05, 0x00} }, /* 05 (720x) */
- { {0x87, 0x63, 0x8B, 0x69, 0x1A, 0x00, 0x26, 0x00} }, /* 06 (800x) */
- { {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00} } /* 07 (1024x) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_H[] = {
- { {0x56, 0x27, 0x9A, 0x30, 0x1E, 0x00, 0x05, 0x00 } }, /* 00 (320x) */
- { {0x56, 0x27, 0x9A, 0x30, 0x1E, 0x00, 0x05, 0x00 } }, /* 01 (360x) */
- { {0x60, 0x31, 0x84, 0x3A, 0x88, 0x00, 0x01, 0x00 } }, /* 02 (400x) */
- { {0x6E, 0x3F, 0x92, 0x48, 0x96, 0x00, 0x01, 0x00 } }, /* 03 (512x) */
- { {0x7E, 0x4F, 0x82, 0x58, 0x06, 0x00, 0x06, 0x00 } }, /* 04 (640x) */
- { {0x7E, 0x4F, 0x82, 0x58, 0x06, 0x00, 0x06, 0x00 } }, /* 05 (720x) */
- { {0x92, 0x63, 0x96, 0x6C, 0x1A, 0x00, 0x06, 0x00 } }, /* 06 (800x) */
- { {0xAE, 0x7F, 0x92, 0x88, 0x96, 0x00, 0x02, 0x00 } }, /* 07 (1024x) */
- { {0xCE, 0x9F, 0x92, 0xA8, 0x16, 0x00, 0x07, 0x00 } } /* 08 (1280x) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_H[] = {
- { {0x63, 0x27, 0x87, 0x3B, 0x8C, 0x00, 0x01, 0x00} }, /* 00 (320x) */
- { {0x63, 0x27, 0x87, 0x3B, 0x8C, 0x00, 0x01, 0x00} }, /* 01 (360x) */
- { {0x63, 0x31, 0x87, 0x3D, 0x8E, 0x00, 0x01, 0x00} }, /* 02 (400x) */
- { {0x63, 0x3F, 0x87, 0x45, 0x96, 0x00, 0x01, 0x00} }, /* 03 (512x) */
- { {0xA3, 0x4F, 0x87, 0x6E, 0x9F, 0x00, 0x06, 0x00} }, /* 04 (640x) */
- { {0xA3, 0x4F, 0x87, 0x6E, 0x9F, 0x00, 0x06, 0x00} }, /* 05 (720x) */
- { {0xA3, 0x63, 0x87, 0x78, 0x89, 0x00, 0x02, 0x00} }, /* 06 (800x) */
- { {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00} } /* 07 (1024x) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_H[] = {
- { {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} }, /* 00 (320x) */
- { {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} }, /* 01 (360x) */
- { {0x7E, 0x40, 0x84, 0x49, 0x91, 0x00, 0x01, 0x00} }, /* 02 (400x) */
- { {0x7E, 0x47, 0x93, 0x50, 0x9E, 0x00, 0x01, 0x00} }, /* 03 (512x) */
- { {0xCE, 0x77, 0x8A, 0x80, 0x8E, 0x00, 0x02, 0x00} }, /* 04 (640x) */
- { {0xCE, 0x77, 0x8A, 0x80, 0x8E, 0x00, 0x02, 0x00} }, /* 05 (720x) */
- { {0xCE, 0x81, 0x94, 0x8A, 0x98, 0x00, 0x02, 0x00} }, /* 06 (800x) */
- { {0xCE, 0x8F, 0x82, 0x98, 0x06, 0x00, 0x07, 0x00} }, /* 07 (1024x) */
- { {0xCE, 0x9F, 0x92, 0xA8, 0x16, 0x00, 0x07, 0x00} } /* 08 (1280x) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_1_H[] = {
- { {0x47, 0x27, 0x8B, 0x2C, 0x1A, 0x00, 0x05, 0x00} }, /* 00 (320x) */
- { {0x47, 0x27, 0x8B, 0x30, 0x1E, 0x00, 0x05, 0x00} }, /* 01 (360x) */
- { {0x51, 0x31, 0x95, 0x36, 0x04, 0x00, 0x01, 0x00} }, /* 02 (400x) */
- { {0x5F, 0x3F, 0x83, 0x44, 0x92, 0x00, 0x01, 0x00} }, /* 03 (512x) */
- { {0x6F, 0x4F, 0x93, 0x54, 0x82, 0x00, 0x05, 0x00} }, /* 04 (640x) */
- { {0x6F, 0x4F, 0x93, 0x54, 0x82, 0x00, 0x05, 0x00} }, /* 05 (720x) */
- { {0x83, 0x63, 0x87, 0x68, 0x16, 0x00, 0x06, 0x00} }, /* 06 (800x) */
- { {0x9F, 0x7F, 0x83, 0x84, 0x92, 0x00, 0x02, 0x00} }, /* 07 (1024x) */
- { {0xBF, 0x9F, 0x83, 0xA4, 0x12, 0x00, 0x07, 0x00} }, /* 08 (1280x) */
- { {0xCE, 0xAE, 0x92, 0xB3, 0x01, 0x00, 0x03, 0x00} } /* 09 (1400x) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11400x1050_2_H[] = {
- { {0x76, 0x3F, 0x83, 0x45, 0x8C, 0x00, 0x41, 0x00} }, /* 00 (320x) */
- { {0x76, 0x3F, 0x83, 0x45, 0x8C, 0x00, 0x41, 0x00} }, /* 01 (360x) */
- { {0x76, 0x31, 0x9A, 0x48, 0x9F, 0x00, 0x41, 0x00} }, /* 02 (400x) */
- { {0x76, 0x3F, 0x9A, 0x4F, 0x96, 0x00, 0x41, 0x00} }, /* 03 (512x) */
- { {0xCE, 0x7E, 0x82, 0x87, 0x9E, 0x00, 0x02, 0x00} }, /* 04 (640x) */
- { {0xCE, 0x7E, 0x82, 0x87, 0x9E, 0x00, 0x02, 0x00} }, /* 05 (720x) */
- { {0xCE, 0x63, 0x92, 0x96, 0x04, 0x00, 0x07, 0x00} }, /* 06 (800x) */
- { {0xCE, 0x7F, 0x92, 0xA4, 0x12, 0x00, 0x07, 0x00} }, /* 07 (1024x) */
- { {0xCE, 0x9F, 0x92, 0xB4, 0x02, 0x00, 0x03, 0x00} }, /* 08 (1280x) */
- { {0xCE, 0xAE, 0x92, 0xBC, 0x0A, 0x00, 0x03, 0x00} } /* 09 (1400x) */
-};
-
-/* ;302lv channelA [ycchen] 12/05/02 LCDHT=2048 */
-/* ; CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11600x1200_1_H[] = {
- { {0x5B, 0x27, 0x9F, 0x32, 0x0A, 0x00, 0x01, 0x00} }, /* 00 (320x) */
- { {0x5B, 0x27, 0x9F, 0x32, 0x0A, 0x00, 0x01, 0x00} }, /* 01 (360x) */
- { {0x65, 0x31, 0x89, 0x3C, 0x94, 0x00, 0x01, 0x00} }, /* 02 (400x) */
- { {0x73, 0x3F, 0x97, 0x4A, 0x82, 0x00, 0x05, 0x00} }, /* 03 (512x) */
- { {0x83, 0x4F, 0x87, 0x51, 0x09, 0x00, 0x06, 0x00} }, /* 04 (640x) */
- { {0x83, 0x4F, 0x87, 0x51, 0x09, 0x00, 0x06, 0x00} }, /* 05 (720x) */
- { {0x97, 0x63, 0x9B, 0x65, 0x1D, 0x00, 0x06, 0xF0} }, /* 06 (800x) */
- { {0xB3, 0x7F, 0x97, 0x81, 0x99, 0x00, 0x02, 0x00} }, /* 07 (1024x) */
- { {0xD3, 0x9F, 0x97, 0xA1, 0x19, 0x00, 0x07, 0x00} }, /* 08 (1280x) */
- { {0xE2, 0xAE, 0x86, 0xB9, 0x91, 0x00, 0x03, 0x00} }, /* 09 (1400x) */
- { {0xFB, 0xC7, 0x9F, 0xC9, 0x81, 0x00, 0x07, 0x00} } /* 0A (1600x) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A+CR09(5->7) */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_V[] = {
- { {0x97, 0x1F, 0x60, 0x87, 0x5D, 0x83, 0x10} }, /* 00 (x350) */
- { {0xB4, 0x1F, 0x92, 0x89, 0x8F, 0xB5, 0x30} }, /* 01 (x400) */
- { {0x04, 0x3E, 0xE2, 0x89, 0xDF, 0x05, 0x00} }, /* 02 (x480) */
- { {0x7C, 0xF0, 0x5A, 0x8F, 0x57, 0x7D, 0xA0} }, /* 03 (x600) */
- { {0x24, 0xF5, 0x02, 0x88, 0xFF, 0x25, 0x90} } /* 04 (x768) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_V[] = {
- { {0x24, 0xBB, 0x31, 0x87, 0x5D, 0x25, 0x30} }, /* 00 (x350) */
- { {0x24, 0xBB, 0x4A, 0x80, 0x8F, 0x25, 0x30} }, /* 01 (x400) */
- { {0x24, 0xBB, 0x72, 0x88, 0xDF, 0x25, 0x30} }, /* 02 (x480) */
- { {0x24, 0xF1, 0xAE, 0x84, 0x57, 0x25, 0xB0} }, /* 03 (x600) */
- { {0x24, 0xF5, 0x02, 0x88, 0xFF, 0x25, 0x90} } /* 04 (x768) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_V[] = {
- { {0x86, 0x1F, 0x5E, 0x82, 0x5D, 0x87, 0x00} }, /* 00 (x350) */
- { {0xB8, 0x1F, 0x90, 0x84, 0x8F, 0xB9, 0x30} }, /* 01 (x400) */
- { {0x08, 0x3E, 0xE0, 0x84, 0xDF, 0x09, 0x00} }, /* 02 (x480) */
- { {0x80, 0xF0, 0x58, 0x8C, 0x57, 0x81, 0xA0} }, /* 03 (x600) */
- { {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} }, /* 04 (x768) */
- { {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* 05 (x1024) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_V[] = {
- { {0x28, 0xD2, 0xAF, 0x83, 0xAE, 0xD8, 0xA1} }, /* 00 (x350) */
- { {0x28, 0xD2, 0xC8, 0x8C, 0xC7, 0xF2, 0x81} }, /* 01 (x400) */
- { {0x28, 0xD2, 0xF0, 0x84, 0xEF, 0x1A, 0xB1} }, /* 02 (x480) */
- { {0x28, 0xDE, 0x2C, 0x8F, 0x2B, 0x56, 0x91} }, /* 03 (x600) */
- { {0x28, 0xDE, 0x80, 0x83, 0x7F, 0xAA, 0x91} }, /* 04 (x768) */
- { {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* 05 (x1024) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_1_V[] = {
- { {0x6C, 0x1F, 0x60, 0x84, 0x5D, 0x6D, 0x10} }, /* 00 (x350) */
- { {0x9E, 0x1F, 0x93, 0x86, 0x8F, 0x9F, 0x30} }, /* 01 (x400) */
- { {0xEE, 0x1F, 0xE2, 0x86, 0xDF, 0xEF, 0x10} }, /* 02 (x480) */
- { {0x66, 0xF0, 0x5A, 0x8e, 0x57, 0x67, 0xA0} }, /* 03 (x600) */
- { {0x0E, 0xF5, 0x02, 0x86, 0xFF, 0x0F, 0x90} }, /* 04 (x768) */
- { {0x0E, 0x5A, 0x02, 0x86, 0xFF, 0x0F, 0x89} }, /* 05 (x1024) */
- { {0x28, 0x10, 0x1A, 0x80, 0x19, 0x29, 0x0F} } /* 06 (x1050) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11400x1050_2_V[] = {
- { {0x28, 0x92, 0xB6, 0x83, 0xB5, 0xCF, 0x81} }, /* 00 (x350) */
- { {0x28, 0x92, 0xD5, 0x82, 0xD4, 0xEE, 0x81} }, /* 01 (x400) */
- { {0x28, 0x92, 0xFD, 0x8A, 0xFC, 0x16, 0xB1} }, /* 02 (x480) */
- { {0x28, 0xD4, 0x39, 0x86, 0x57, 0x29, 0x81} }, /* 03 (x600) */
- { {0x28, 0xD4, 0x8D, 0x9A, 0xFF, 0x29, 0xA1} }, /* 04 (x768) */
- { {0x28, 0x5A, 0x0D, 0x9A, 0xFF, 0x29, 0xA9} }, /* 05 (x1024) */
- { {0x28, 0x10, 0x1A, 0x87, 0x19, 0x29, 0x8F} } /* 06 (x1050) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A+CR09(5->7) */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11600x1200_1_V[] = {
- { {0xd4, 0x1F, 0x81, 0x84, 0x5D, 0xd5, 0x10} }, /* 00 (x350) */
- { {0x06, 0x3e, 0xb3, 0x86, 0x8F, 0x07, 0x20} }, /* 01 (x400) */
- { {0x56, 0xba, 0x03, 0x86, 0xDF, 0x57, 0x00} }, /* 02 (x480) */
- { {0xce, 0xF0, 0x7b, 0x8e, 0x57, 0xcf, 0xa0} }, /* 03 (x600) */
- { {0x76, 0xF5, 0x23, 0x86, 0xFF, 0x77, 0x90} }, /* 04 (x768) */
- { {0x76, 0x5A, 0x23, 0x86, 0xFF, 0x77, 0x89} }, /* 05 (x1024) */
- { {0x90, 0x10, 0x1A, 0x8E, 0x19, 0x91, 0x2F} }, /* 06 (x1050) */
- { {0x26, 0x11, 0xd3, 0x86, 0xaF, 0x27, 0x3f} } /* 07 (x1200) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_1_Hx75[] = {
- { {0x4B, 0x27, 0x8F, 0x32, 0x1B, 0x00, 0x45, 0x00} },/* ; 00 (320x) */
- { {0x4B, 0x27, 0x8F, 0x2B, 0x03, 0x00, 0x44, 0x00} },/* ; 01 (360x) */
- { {0x55, 0x31, 0x99, 0x46, 0x1D, 0x00, 0x55, 0x00} },/* ; 02 (400x) */
- { {0x63, 0x3F, 0x87, 0x4A, 0x93, 0x00, 0x01, 0x00} },/* ; 03 (512x) */
- { {0x6F, 0x4F, 0x93, 0x54, 0x80, 0x00, 0x05, 0x00} },/* ; 04 (640x) */
- { {0x6F, 0x4F, 0x93, 0x54, 0x80, 0x00, 0x05, 0x00} },/* ; 05 (720x) */
- { {0x83, 0x63, 0x87, 0x68, 0x14, 0x00, 0x26, 0x00} },/* ; 06 (800x) */
- { {0x9F, 0x7F, 0x83, 0x85, 0x91, 0x00, 0x02, 0x00} } /* ; 07 (1024x) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A+CR09(5->7) */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_1_Vx75[] = {
- { {0x97, 0x1F, 0x60, 0x87, 0x5D, 0x83, 0x10} },/* ; 00 (x350) */
- { {0xB4, 0x1F, 0x92, 0x89, 0x8F, 0xB5, 0x30} },/* ; 01 (x400) */
- { {0xFE, 0x1F, 0xE0, 0x84, 0xDF, 0xFF, 0x10} },/* ; 02 (x480) */
- { {0x76, 0xF0, 0x58, 0x8C, 0x57, 0x77, 0xA0} },/* ; 03 (x600) */
- { {0x1E, 0xF5, 0x00, 0x83, 0xFF, 0x1F, 0x90} } /* ; 04 (x768) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11024x768_2_Hx75[] = {
- { {0x63, 0x27, 0x87, 0x3B, 0x8C, 0x00, 0x01, 0x00} },/* ; 00 (320x) */
- { {0x63, 0x27, 0x87, 0x3B, 0x8C, 0x00, 0x01, 0x00} },/* ; 01 (360x) */
- { {0x63, 0x31, 0x87, 0x3D, 0x8E, 0x00, 0x01, 0x00} },/* ; 02 (400x) */
- { {0x63, 0x3F, 0x87, 0x45, 0x96, 0x00, 0x01, 0x00} },/* ; 03 (512x) */
- { {0xA3, 0x4F, 0x87, 0x6E, 0x9F, 0x00, 0x06, 0x00} },/* ; 04 (640x) */
- { {0xA3, 0x4F, 0x87, 0x6E, 0x9F, 0x00, 0x06, 0x00} },/* ; 05 (720x) */
- { {0xA3, 0x63, 0x87, 0x78, 0x89, 0x00, 0x02, 0x00} },/* ; 06 (800x) */
- { {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00} } /* ; 07 (1024x) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11024x768_2_Vx75[] = {
- { {0x24, 0xBB, 0x31, 0x87, 0x5D, 0x25, 0x30} },/* ; 00 (x350) */
- { {0x24, 0xBB, 0x4A, 0x80, 0x8F, 0x25, 0x30} },/* ; 01 (x400) */
- { {0x24, 0xBB, 0x72, 0x88, 0xDF, 0x25, 0x30} },/* ; 02 (x480) */
- { {0x24, 0xF1, 0xAE, 0x84, 0x57, 0x25, 0xB0} },/* ; 03 (x600) */
- { {0x24, 0xF5, 0x02, 0x88, 0xFF, 0x25, 0x90} } /* ; 04 (x768) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_1_Hx75[] = {
- { {0x56, 0x27, 0x9A, 0x30, 0x1E, 0x00, 0x05, 0x00} },/* ; 00 (320x) */
- { {0x56, 0x27, 0x9A, 0x30, 0x1E, 0x00, 0x05, 0x00} },/* ; 01 (360x) */
- { {0x60, 0x31, 0x84, 0x3A, 0x88, 0x00, 0x01, 0x00} },/* ; 02 (400x) */
- { {0x6E, 0x3F, 0x92, 0x48, 0x96, 0x00, 0x01, 0x00} },/* ; 03 (512x) */
- { {0x7E, 0x4F, 0x82, 0x54, 0x06, 0x00, 0x06, 0x00} },/* ; 04 (640x) */
- { {0x7E, 0x4F, 0x82, 0x54, 0x06, 0x00, 0x06, 0x00} },/* ; 05 (720x) */
- { {0x92, 0x63, 0x96, 0x68, 0x1A, 0x00, 0x06, 0x00} },/* ; 06 (800x) */
- { {0xAE, 0x7F, 0x92, 0x84, 0x96, 0x00, 0x02, 0x00} },/* ; 07 (1024x) */
- { {0xCE, 0x9F, 0x92, 0xA5, 0x17, 0x00, 0x07, 0x00} } /* ; 08 (1280x) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[] = {
- { {0x86, 0xD1, 0xBC, 0x80, 0xBB, 0xE5, 0x00} },/* ; 00 (x350) */
- { {0xB8, 0x1F, 0x90, 0x84, 0x8F, 0xB9, 0x30} },/* ; 01 (x400) */
- { {0x08, 0x3E, 0xE0, 0x84, 0xDF, 0x09, 0x00} },/* ; 02 (x480) */
- { {0x80, 0xF0, 0x58, 0x8C, 0x57, 0x81, 0xA0} },/* ; 03 (x600) */
- { {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} },/* ; 04 (x768) */
- { {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */
-};
-
-/* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
-static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] = {
- { {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 00 (320x) */
- { {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 01 (360x) */
- { {0x7E, 0x40, 0x84, 0x49, 0x91, 0x00, 0x01, 0x00} },/* ; 02 (400x) */
- { {0x7E, 0x47, 0x93, 0x50, 0x9E, 0x00, 0x01, 0x00} },/* ; 03 (512x) */
- { {0xCE, 0x77, 0x8A, 0x80, 0x8E, 0x00, 0x02, 0x00} },/* ; 04 (640x) */
- { {0xCE, 0x77, 0x8A, 0x80, 0x8E, 0x00, 0x02, 0x00} },/* ; 05 (720x) */
- { {0xCE, 0x81, 0x94, 0x8A, 0x98, 0x00, 0x02, 0x00} },/* ; 06 (800x) */
- { {0xCE, 0x8F, 0x82, 0x98, 0x06, 0x00, 0x07, 0x00} },/* ; 07 (1024x) */
- { {0xCE, 0x9F, 0x92, 0xA8, 0x16, 0x00, 0x07, 0x00} } /* ; 08 (1280x) */
-};
-
-/* CR06,CR07,CR10,CR11,CR15,CR16,SR0A */
-static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_2_Vx75[] = {
- { {0x28, 0xD2, 0xAF, 0x83, 0xAE, 0xD8, 0xA1} },/* ; 00 (x350) */
- { {0x28, 0xD2, 0xC8, 0x8C, 0xC7, 0xF2, 0x81} },/* ; 01 (x400) */
- { {0x28, 0xD2, 0xF0, 0x84, 0xEF, 0x1A, 0xB1} },/* ; 02 (x480) */
- { {0x28, 0xDE, 0x2C, 0x8F, 0x2B, 0x56, 0x91} },/* ; 03 (x600) */
- { {0x28, 0xDE, 0x80, 0x83, 0x7F, 0xAA, 0x91} },/* ; 04 (x768) */
- { {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */
-};
-
-/*add for new UNIVGABIOS*/
-static const struct XGI330_LCDDataTablStruct XGI_LCDDataTable[] = {
- {Panel_1024x768, 0x0019, 0x0001, XGI_ExtLCD1024x768Data },
- {Panel_1024x768, 0x0019, 0x0000, XGI_StLCD1024x768Data },
- {Panel_1024x768, 0x0018, 0x0010, XGI_CetLCD1024x768Data },
- {Panel_1280x1024, 0x0019, 0x0001, XGI_ExtLCD1280x1024Data },
- {Panel_1280x1024, 0x0019, 0x0000, XGI_StLCD1280x1024Data },
- {Panel_1280x1024, 0x0018, 0x0010, XGI_CetLCD1280x1024Data },
- {Panel_1400x1050, 0x0019, 0x0001, xgifb_lcd_1400x1050 },
- {Panel_1400x1050, 0x0019, 0x0000, xgifb_lcd_1400x1050 },
- {Panel_1400x1050, 0x0018, 0x0010, XGI_CetLCD1400x1050Data },
- {Panel_1600x1200, 0x0019, 0x0001, XGI_ExtLCD1600x1200Data },
- {Panel_1600x1200, 0x0019, 0x0000, XGI_StLCD1600x1200Data },
- {PanelRef60Hz, 0x0008, 0x0008, XGI_NoScalingData },
- {Panel_1024x768x75, 0x0019, 0x0001, XGI_ExtLCD1024x768x75Data },
- {Panel_1024x768x75, 0x0019, 0x0000, XGI_ExtLCD1024x768x75Data },
- {Panel_1024x768x75, 0x0018, 0x0010, XGI_CetLCD1024x768x75Data },
- {Panel_1280x1024x75, 0x0019, 0x0001, xgifb_lcd_1280x1024x75 },
- {Panel_1280x1024x75, 0x0019, 0x0000, xgifb_lcd_1280x1024x75 },
- {Panel_1280x1024x75, 0x0018, 0x0010, XGI_CetLCD1280x1024x75Data },
- {PanelRef75Hz, 0x0008, 0x0008, XGI_NoScalingDatax75 },
- {0xFF, 0x0000, 0x0000, NULL } /* End of table */
-};
-
-static const struct XGI330_LCDDataTablStruct XGI_LCDDesDataTable[] = {
- {Panel_1024x768, 0x0019, 0x0001, XGI_ExtLCDDes1024x768Data },
- {Panel_1024x768, 0x0019, 0x0000, XGI_StLCDDes1024x768Data },
- {Panel_1024x768, 0x0018, 0x0010, XGI_CetLCDDes1024x768Data },
- {Panel_1280x1024, 0x0019, 0x0001, XGI_ExtLCDDes1280x1024Data },
- {Panel_1280x1024, 0x0019, 0x0000, XGI_StLCDDes1280x1024Data },
- {Panel_1280x1024, 0x0018, 0x0010, XGI_CetLCDDes1280x1024Data },
- {Panel_1400x1050, 0x0019, 0x0001, xgifb_lcddes_1400x1050 },
- {Panel_1400x1050, 0x0019, 0x0000, xgifb_lcddes_1400x1050 },
- {Panel_1400x1050, 0x0418, 0x0010, XGI_CetLCDDes1400x1050Data },
- {Panel_1400x1050, 0x0418, 0x0410, XGI_CetLCDDes1400x1050Data2 },
- {Panel_1600x1200, 0x0019, 0x0001, XGI_ExtLCDDes1600x1200Data },
- {Panel_1600x1200, 0x0019, 0x0000, XGI_StLCDDes1600x1200Data },
- {PanelRef60Hz, 0x0008, 0x0008, XGI_NoScalingDesData },
- {Panel_1024x768x75, 0x0019, 0x0001, xgifb_lcddes_1024x768x75 },
- {Panel_1024x768x75, 0x0019, 0x0000, xgifb_lcddes_1024x768x75 },
- {Panel_1024x768x75, 0x0018, 0x0010, XGI_CetLCDDes1024x768x75Data },
- {Panel_1280x1024x75, 0x0019, 0x0001, xgifb_lcddes_1280x1024x75 },
- {Panel_1280x1024x75, 0x0019, 0x0000, xgifb_lcddes_1280x1024x75 },
- {Panel_1280x1024x75, 0x0018, 0x0010, XGI_CetLCDDes1280x1024x75Data },
- {PanelRef75Hz, 0x0008, 0x0008, XGI_NoScalingDesDatax75 },
- {0xFF, 0x0000, 0x0000, NULL }
-};
-
-static const struct XGI330_LCDDataTablStruct xgifb_lcddldes[] = {
- {Panel_1024x768, 0x0019, 0x0001, XGI_ExtLCDDes1024x768Data },
- {Panel_1024x768, 0x0019, 0x0000, XGI_StLCDDes1024x768Data },
- {Panel_1024x768, 0x0018, 0x0010, XGI_CetLCDDes1024x768Data },
- {Panel_1280x1024, 0x0019, 0x0001, XGI_ExtLCDDLDes1280x1024Data },
- {Panel_1280x1024, 0x0019, 0x0000, XGI_StLCDDLDes1280x1024Data },
- {Panel_1280x1024, 0x0018, 0x0010, XGI_CetLCDDLDes1280x1024Data },
- {Panel_1400x1050, 0x0019, 0x0001, xgifb_lcddldes_1400x1050 },
- {Panel_1400x1050, 0x0019, 0x0000, xgifb_lcddldes_1400x1050 },
- {Panel_1400x1050, 0x0418, 0x0010, XGI_CetLCDDes1400x1050Data },
- {Panel_1400x1050, 0x0418, 0x0410, XGI_CetLCDDes1400x1050Data2 },
- {Panel_1600x1200, 0x0019, 0x0001, XGI_ExtLCDDLDes1600x1200Data },
- {Panel_1600x1200, 0x0019, 0x0000, XGI_StLCDDLDes1600x1200Data },
- {PanelRef60Hz, 0x0008, 0x0008, XGI_NoScalingDesData },
- {Panel_1024x768x75, 0x0019, 0x0001, xgifb_lcddes_1024x768x75 },
- {Panel_1024x768x75, 0x0019, 0x0000, xgifb_lcddes_1024x768x75 },
- {Panel_1024x768x75, 0x0018, 0x0010, XGI_CetLCDDes1024x768x75Data },
- {Panel_1280x1024x75, 0x0019, 0x0001, xgifb_lcddldes_1280x1024x75 },
- {Panel_1280x1024x75, 0x0019, 0x0000, xgifb_lcddldes_1280x1024x75 },
- {Panel_1280x1024x75, 0x0018, 0x0010, XGI_CetLCDDLDes1280x1024x75Data },
- {PanelRef75Hz, 0x0008, 0x0008, XGI_NoScalingDesDatax75 },
- {0xFF, 0x0000, 0x0000, NULL }
-};
-
-static const struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1_h[] = {
- {Panel_1024x768, 0x0018, 0x0000, XGI_LVDSCRT11024x768_1_H },
- {Panel_1024x768, 0x0018, 0x0010, XGI_LVDSCRT11024x768_2_H },
- {Panel_1280x1024, 0x0018, 0x0000, XGI_LVDSCRT11280x1024_1_H },
- {Panel_1280x1024, 0x0018, 0x0010, XGI_LVDSCRT11280x1024_2_H },
- {Panel_1400x1050, 0x0018, 0x0000, XGI_LVDSCRT11400x1050_1_H },
- {Panel_1400x1050, 0x0018, 0x0010, XGI_LVDSCRT11400x1050_2_H },
- {Panel_1600x1200, 0x0018, 0x0000, XGI_LVDSCRT11600x1200_1_H },
- {Panel_1024x768x75, 0x0018, 0x0000, XGI_LVDSCRT11024x768_1_Hx75 },
- {Panel_1024x768x75, 0x0018, 0x0010, XGI_LVDSCRT11024x768_2_Hx75 },
- {Panel_1280x1024x75, 0x0018, 0x0000, XGI_LVDSCRT11280x1024_1_Hx75 },
- {Panel_1280x1024x75, 0x0018, 0x0010, XGI_LVDSCRT11280x1024_2_Hx75 },
- {0xFF, 0x0000, 0x0000, NULL }
-};
-
-static const struct XGI330_LCDDataTablStruct xgifb_epllcd_crt1_v[] = {
- {Panel_1024x768, 0x0018, 0x0000, XGI_LVDSCRT11024x768_1_V },
- {Panel_1024x768, 0x0018, 0x0010, XGI_LVDSCRT11024x768_2_V },
- {Panel_1280x1024, 0x0018, 0x0000, XGI_LVDSCRT11280x1024_1_V },
- {Panel_1280x1024, 0x0018, 0x0010, XGI_LVDSCRT11280x1024_2_V },
- {Panel_1400x1050, 0x0018, 0x0000, XGI_LVDSCRT11400x1050_1_V },
- {Panel_1400x1050, 0x0018, 0x0010, XGI_LVDSCRT11400x1050_2_V },
- {Panel_1600x1200, 0x0018, 0x0000, XGI_LVDSCRT11600x1200_1_V },
- {Panel_1024x768x75, 0x0018, 0x0000, XGI_LVDSCRT11024x768_1_Vx75 },
- {Panel_1024x768x75, 0x0018, 0x0010, XGI_LVDSCRT11024x768_2_Vx75 },
- {Panel_1280x1024x75, 0x0018, 0x0000, XGI_LVDSCRT11280x1024_1_Vx75 },
- {Panel_1280x1024x75, 0x0018, 0x0010, XGI_LVDSCRT11280x1024_2_Vx75 },
- {0xFF, 0x0000, 0x0000, NULL }
-};
-
-static const struct XGI330_LCDDataTablStruct XGI_EPLLCDDataPtr[] = {
- {Panel_1024x768, 0x0018, 0x0000, XGI_LVDS1024x768Data_1 },
- {Panel_1024x768, 0x0018, 0x0010, XGI_LVDS1024x768Data_2 },
- {Panel_1280x1024, 0x0018, 0x0000, XGI_LVDS1280x1024Data_1 },
- {Panel_1280x1024, 0x0018, 0x0010, XGI_LVDS1280x1024Data_2 },
- {Panel_1400x1050, 0x0018, 0x0000, XGI_LVDS1400x1050Data_1 },
- {Panel_1400x1050, 0x0018, 0x0010, XGI_LVDS1400x1050Data_2 },
- {Panel_1600x1200, 0x0018, 0x0000, XGI_LVDS1600x1200Data_1 },
- {PanelRef60Hz, 0x0008, 0x0008, XGI_LVDSNoScalingData },
- {Panel_1024x768x75, 0x0018, 0x0000, XGI_LVDS1024x768Data_1x75 },
- {Panel_1024x768x75, 0x0018, 0x0010, XGI_LVDS1024x768Data_2x75 },
- {Panel_1280x1024x75, 0x0018, 0x0000, XGI_LVDS1280x1024Data_1x75 },
- {Panel_1280x1024x75, 0x0018, 0x0010, XGI_LVDS1280x1024Data_2x75 },
- {PanelRef75Hz, 0x0008, 0x0008, XGI_LVDSNoScalingDatax75 },
- {0xFF, 0x0000, 0x0000, NULL }
-};
-
-static const struct XGI330_LCDDataTablStruct XGI_EPLLCDDesDataPtr[] = {
- {Panel_1024x768, 0x0018, 0x0000, XGI_LVDS1024x768Des_1 },
- {Panel_1024x768, 0x0618, 0x0410, XGI_LVDS1024x768Des_3 },
- {Panel_1024x768, 0x0018, 0x0010, XGI_LVDS1024x768Des_2 },
- {Panel_1280x1024, 0x0018, 0x0000, XGI_LVDS1280x1024Des_1 },
- {Panel_1280x1024, 0x0018, 0x0010, XGI_LVDS1280x1024Des_2 },
- {Panel_1400x1050, 0x0018, 0x0000, XGI_LVDS1400x1050Des_1 },
- {Panel_1400x1050, 0x0018, 0x0010, XGI_LVDS1400x1050Des_2 },
- {Panel_1600x1200, 0x0018, 0x0000, XGI_LVDS1600x1200Des_1 },
- {PanelRef60Hz, 0x0008, 0x0008, XGI_LVDSNoScalingDesData },
- {Panel_1024x768x75, 0x0018, 0x0000, XGI_LVDS1024x768Des_1x75 },
- {Panel_1024x768x75, 0x0618, 0x0410, XGI_LVDS1024x768Des_3x75 },
- {Panel_1024x768x75, 0x0018, 0x0010, XGI_LVDS1024x768Des_2x75 },
- {Panel_1280x1024x75, 0x0018, 0x0000, XGI_LVDS1280x1024Des_1x75 },
- {Panel_1280x1024x75, 0x0018, 0x0010, XGI_LVDS1280x1024Des_2x75 },
- {PanelRef75Hz, 0x0008, 0x0008, XGI_LVDSNoScalingDesDatax75 },
- {0xFF, 0x0000, 0x0000, NULL }
-};
-
-static const struct XGI330_TVDataTablStruct XGI_TVDataTable[] = {
- {0x09E1, 0x0001, XGI_ExtPALData},
- {0x09E1, 0x0000, XGI_ExtNTSCData},
- {0x09E1, 0x0801, XGI_StPALData},
- {0x09E1, 0x0800, XGI_StNTSCData},
- {0x49E0, 0x0100, XGI_ExtHiTVData},
- {0x49E0, 0x4100, XGI_St2HiTVData},
- {0x49E0, 0x4900, XGI_St1HiTVData},
- {0x09E0, 0x0020, XGI_ExtYPbPr525iData},
- {0x09E0, 0x0040, XGI_ExtYPbPr525pData},
- {0x09E0, 0x0080, XGI_ExtYPbPr750pData},
- {0x09E0, 0x0820, XGI_StYPbPr525iData},
- {0x09E0, 0x0840, XGI_StYPbPr525pData},
- {0x09E0, 0x0880, XGI_StYPbPr750pData},
- {0xffff, 0x0000, XGI_ExtNTSCData},
-};
-
-/* Dual link only */
-static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
-/* LCDCap1024x768 */
- {Panel_1024x768, DefaultLCDCap, 0x88, 0x06, VCLK65_315,
- 0x6C, 0xC3, 0x35, 0x62,
- 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024 */
- {Panel_1280x1024, XGI_LCDDualLink + DefaultLCDCap,
- 0x70, 0x03, VCLK108_2_315,
- 0x70, 0x44, 0xF8, 0x2F,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1400x1050 */
- {Panel_1400x1050, XGI_LCDDualLink + DefaultLCDCap,
- 0x70, 0x03, VCLK108_2_315,
- 0x70, 0x44, 0xF8, 0x2F,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1600x1200 */
- {Panel_1600x1200, XGI_LCDDualLink + DefaultLCDCap,
- 0xC0, 0x03, VCLK162,
- 0x43, 0x22, 0x70, 0x24,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1024x768x75 */
- {Panel_1024x768x75, DefaultLCDCap, 0x60, 0, VCLK78_75,
- 0x2B, 0x61, 0x2B, 0x61,
- 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024x75 */
- {Panel_1280x1024x75, XGI_LCDDualLink + DefaultLCDCap,
- 0x90, 0x03, VCLK135_5,
- 0x54, 0x42, 0x4A, 0x61,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0x88, 0x06, VCLK65_315,
- 0x6C, 0xC3, 0x35, 0x62,
- 0x0A, 0xC0, 0x28, 0x10}
-};
-
-static const struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
-/* LCDCap1024x768 */
- {Panel_1024x768, DefaultLCDCap, 0x88, 0x06, VCLK65_315,
- 0x6C, 0xC3, 0x35, 0x62,
- 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024 */
- {Panel_1280x1024, DefaultLCDCap,
- 0x70, 0x03, VCLK108_2_315,
- 0x70, 0x44, 0xF8, 0x2F,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1400x1050 */
- {Panel_1400x1050, DefaultLCDCap,
- 0x70, 0x03, VCLK108_2_315,
- 0x70, 0x44, 0xF8, 0x2F,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1600x1200 */
- {Panel_1600x1200, DefaultLCDCap,
- 0xC0, 0x03, VCLK162,
- 0x5A, 0x23, 0x5A, 0x23,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCap1024x768x75 */
- {Panel_1024x768x75, DefaultLCDCap, 0x60, 0, VCLK78_75,
- 0x2B, 0x61, 0x2B, 0x61,
- 0x0A, 0xC0, 0x28, 0x10},
-/* LCDCap1280x1024x75 */
- {Panel_1280x1024x75, DefaultLCDCap,
- 0x90, 0x03, VCLK135_5,
- 0x54, 0x42, 0x4A, 0x61,
- 0x0A, 0xC0, 0x30, 0x10},
-/* LCDCapDefault */
- {0xFF, DefaultLCDCap, 0x88, 0x06, VCLK65_315,
- 0x6C, 0xC3, 0x35, 0x62,
- 0x0A, 0xC0, 0x28, 0x10}
-};
-
-const struct XGI_Ext2Struct XGI330_RefIndex[] = {
- {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
- 0x00, 0x10, 0x59, 320, 200},/* 00 */
- {Mode32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
- 0x00, 0x10, 0x00, 320, 400},/* 01 */
- {Mode32Bpp + SupportAllCRT2 + SyncNN, RES320x240, VCLK25_175,
- 0x04, 0x20, 0x50, 320, 240},/* 02 */
- {Mode32Bpp + SupportAllCRT2 + SyncPP, RES400x300, VCLK40,
- 0x05, 0x32, 0x51, 400, 300},/* 03 */
- {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES512x384,
- VCLK65_315, 0x06, 0x43, 0x52, 512, 384},/* 04 */
- {Mode32Bpp + SupportAllCRT2 + SyncPN, RES640x400, VCLK25_175,
- 0x00, 0x14, 0x2f, 640, 400},/* 05 */
- {Mode32Bpp + SupportAllCRT2 + SyncNN, RES640x480x60, VCLK25_175,
- 0x04, 0x24, 0x2e, 640, 480},/* 06 640x480x60Hz (LCD 640x480x60z) */
- {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x72, VCLK31_5,
- 0x04, 0x24, 0x2e, 640, 480},/* 07 640x480x72Hz (LCD 640x480x70Hz) */
- {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES640x480x75, VCLK31_5,
- 0x47, 0x24, 0x2e, 640, 480},/* 08 640x480x75Hz (LCD 640x480x75Hz) */
- {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x85, VCLK36,
- 0x8A, 0x24, 0x2e, 640, 480},/* 09 640x480x85Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x100, VCLK43_163,
- 0x00, 0x24, 0x2e, 640, 480},/* 0a 640x480x100Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x120, VCLK52_406,
- 0x00, 0x24, 0x2e, 640, 480},/* 0b 640x480x120Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES640x480x160, VCLK72_852,
- 0x00, 0x24, 0x2e, 640, 480},/* 0c 640x480x160Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncNN, RES640x480x200, VCLK86_6,
- 0x00, 0x24, 0x2e, 640, 480},/* 0d 640x480x200Hz */
- {Mode32Bpp + NoSupportLCD + SyncPP, RES800x600x56, VCLK36,
- 0x05, 0x36, 0x6a, 800, 600},/* 0e 800x600x56Hz */
- {Mode32Bpp + NoSupportTV + SyncPP, RES800x600x60, VCLK40,
- 0x05, 0x36, 0x6a, 800, 600},/* 0f 800x600x60Hz (LCD 800x600x60Hz) */
- {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x72, VCLK50,
- 0x48, 0x36, 0x6a, 800, 600},/* 10 800x600x72Hz (LCD 800x600x70Hz) */
- {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES800x600x75, VCLK49_5,
- 0x8B, 0x36, 0x6a, 800, 600},/* 11 800x600x75Hz (LCD 800x600x75Hz) */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x600x85, VCLK56_25,
- 0x00, 0x36, 0x6a, 800, 600},/* 12 800x600x85Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x100, VCLK68_179,
- 0x00, 0x36, 0x6a, 800, 600},/* 13 800x600x100Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x120, VCLK83_95,
- 0x00, 0x36, 0x6a, 800, 600},/* 14 800x600x120Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES800x600x160, VCLK116_406,
- 0x00, 0x36, 0x6a, 800, 600},/* 15 800x600x160Hz */
- {Mode32Bpp + InterlaceMode + SyncPP, RES1024x768x43, VCLK44_9,
- 0x00, 0x47, 0x37, 1024, 768},/* 16 1024x768x43Hz */
- /* 17 1024x768x60Hz (LCD 1024x768x60Hz) */
- {Mode32Bpp + NoSupportTV + SyncNN + SupportTV1024, RES1024x768x60,
- VCLK65_315, 0x06, 0x47, 0x37, 1024, 768},
- {Mode32Bpp + NoSupportHiVisionTV + SyncNN, RES1024x768x70, VCLK75,
- 0x49, 0x47, 0x37, 1024, 768},/* 18 1024x768x70Hz (LCD 1024x768x70Hz) */
- {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1024x768x75, VCLK78_75,
- 0x00, 0x47, 0x37, 1024, 768},/* 19 1024x768x75Hz (LCD 1024x768x75Hz) */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x768x85, VCLK94_5,
- 0x8C, 0x47, 0x37, 1024, 768},/* 1a 1024x768x85Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x100, VCLK113_309,
- 0x00, 0x47, 0x37, 1024, 768},/* 1b 1024x768x100Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x120, VCLK139_054,
- 0x00, 0x47, 0x37, 1024, 768},/* 1c 1024x768x120Hz */
- {Mode32Bpp + SupportLCD + SyncPP, RES1280x960x60, VCLK108_2_315,
- 0x08, 0x58, 0x7b, 1280, 960},/* 1d 1280x960x60Hz */
- {Mode32Bpp + InterlaceMode + SyncPP, RES1280x1024x43, VCLK78_75,
- 0x00, 0x58, 0x3a, 1280, 1024},/* 1e 1280x1024x43Hz */
- {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x60, VCLK108_2_315,
- 0x07, 0x58, 0x3a, 1280, 1024},/*1f 1280x1024x60Hz (LCD 1280x1024x60Hz)*/
- {Mode32Bpp + NoSupportTV + SyncPP, RES1280x1024x75, VCLK135_5,
- 0x00, 0x58, 0x3a, 1280, 1024},/*20 1280x1024x75Hz (LCD 1280x1024x75Hz)*/
- {Mode32Bpp + SyncPP, RES1280x1024x85, VCLK157_5,
- 0x00, 0x58, 0x3a, 1280, 1024},/* 21 1280x1024x85Hz */
- /* 22 1600x1200x60Hz */
- {Mode32Bpp + SupportLCD + SyncPP + SupportCRT2in301C,
- RES1600x1200x60, VCLK162, 0x09, 0x7A, 0x3c, 1600, 1200},
- {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x65, VCLK175,
- 0x00, 0x69, 0x3c, 1600, 1200},/* 23 1600x1200x65Hz */
- {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x70, VCLK189,
- 0x00, 0x69, 0x3c, 1600, 1200},/* 24 1600x1200x70Hz */
- {Mode32Bpp + SyncPP + SupportCRT2in301C, RES1600x1200x75, VCLK202_5,
- 0x00, 0x69, 0x3c, 1600, 1200},/* 25 1600x1200x75Hz */
- {Mode32Bpp + SyncPP, RES1600x1200x85, VCLK229_5,
- 0x00, 0x69, 0x3c, 1600, 1200},/* 26 1600x1200x85Hz */
- {Mode32Bpp + SyncPP, RES1600x1200x100, VCLK269_655,
- 0x00, 0x69, 0x3c, 1600, 1200},/* 27 1600x1200x100Hz */
- {Mode32Bpp + SyncPP, RES1600x1200x120, VCLK323_586,
- 0x00, 0x69, 0x3c, 1600, 1200},/* 28 1600x1200x120Hz */
- {Mode32Bpp + SupportLCD + SyncNP, RES1920x1440x60, VCLK234,
- 0x00, 0x00, 0x68, 1920, 1440},/* 29 1920x1440x60Hz */
- {Mode32Bpp + SyncPN, RES1920x1440x65, VCLK254_817,
- 0x00, 0x00, 0x68, 1920, 1440},/* 2a 1920x1440x65Hz */
- {Mode32Bpp + SyncPN, RES1920x1440x70, VCLK277_015,
- 0x00, 0x00, 0x68, 1920, 1440},/* 2b 1920x1440x70Hz */
- {Mode32Bpp + SyncPN, RES1920x1440x75, VCLK291_132,
- 0x00, 0x00, 0x68, 1920, 1440},/* 2c 1920x1440x75Hz */
- {Mode32Bpp + SyncPN, RES1920x1440x85, VCLK330_615,
- 0x00, 0x00, 0x68, 1920, 1440},/* 2d 1920x1440x85Hz */
- {Mode16Bpp + SyncPN, RES1920x1440x100, VCLK388_631,
- 0x00, 0x00, 0x68, 1920, 1440},/* 2e 1920x1440x100Hz */
- {Mode32Bpp + SupportLCD + SyncPN, RES2048x1536x60, VCLK266_952,
- 0x00, 0x00, 0x6c, 2048, 1536},/* 2f 2048x1536x60Hz */
- {Mode32Bpp + SyncPN, RES2048x1536x65, VCLK291_766,
- 0x00, 0x00, 0x6c, 2048, 1536},/* 30 2048x1536x65Hz */
- {Mode32Bpp + SyncPN, RES2048x1536x70, VCLK315_195,
- 0x00, 0x00, 0x6c, 2048, 1536},/* 31 2048x1536x70Hz */
- {Mode32Bpp + SyncPN, RES2048x1536x75, VCLK340_477,
- 0x00, 0x00, 0x6c, 2048, 1536},/* 32 2048x1536x75Hz */
- {Mode16Bpp + SyncPN, RES2048x1536x85, VCLK375_847,
- 0x00, 0x00, 0x6c, 2048, 1536},/* 33 2048x1536x85Hz */
- {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
- SyncPP + SupportYPbPr750p, RES800x480x60, VCLK39_77,
- 0x08, 0x00, 0x70, 800, 480},/* 34 800x480x60Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x75, VCLK49_5,
- 0x08, 0x00, 0x70, 800, 480},/* 35 800x480x75Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES800x480x85, VCLK56_25,
- 0x08, 0x00, 0x70, 800, 480},/* 36 800x480x85Hz */
- {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
- SyncPP + SupportYPbPr750p, RES1024x576x60, VCLK65_315,
- 0x09, 0x00, 0x71, 1024, 576},/* 37 1024x576x60Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x75, VCLK78_75,
- 0x09, 0x00, 0x71, 1024, 576},/* 38 1024x576x75Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1024x576x85, VCLK94_5,
- 0x09, 0x00, 0x71, 1024, 576},/* 39 1024x576x85Hz */
- {Mode32Bpp + SupportHiVision + SupportRAMDAC2 +
- SyncPP + SupportYPbPr750p, RES1280x720x60, VCLK108_2_315,
- 0x0A, 0x00, 0x75, 1280, 720},/* 3a 1280x720x60Hz*/
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x75, VCLK135_5,
- 0x0A, 0x00, 0x75, 1280, 720},/* 3b 1280x720x75Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1280x720x85, VCLK157_5,
- 0x0A, 0x00, 0x75, 1280, 720},/* 3c 1280x720x85Hz */
- {Mode32Bpp + SupportTV + SyncNN, RES720x480x60, VCLK28_322,
- 0x06, 0x00, 0x31, 720, 480},/* 3d 720x480x60Hz */
- {Mode32Bpp + SupportTV + SyncPP, RES720x576x56, VCLK36,
- 0x06, 0x00, 0x32, 720, 576},/* 3e 720x576x56Hz */
- {Mode32Bpp + InterlaceMode + NoSupportLCD + SyncPP, RES856x480x79I,
- VCLK35_2, 0x00, 0x00, 0x00, 856, 480},/* 3f 856x480x79I */
- {Mode32Bpp + NoSupportLCD + SyncNN, RES856x480x60, VCLK35_2,
- 0x00, 0x00, 0x00, 856, 480},/* 40 856x480x60Hz */
- {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1280x768x60,
- VCLK79_411, 0x08, 0x48, 0x23, 1280, 768},/* 41 1280x768x60Hz */
- {Mode32Bpp + NoSupportHiVisionTV + SyncPP, RES1400x1050x60,
- VCLK122_61, 0x08, 0x69, 0x26, 1400, 1050},/* 42 1400x1050x60Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x60, VCLK80_350,
- 0x37, 0x00, 0x20, 1152, 864},/* 43 1152x864x60Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPP, RES1152x864x75, VCLK107_385,
- 0x37, 0x00, 0x20, 1152, 864},/* 44 1152x864x75Hz */
- {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x75,
- VCLK125_999, 0x3A, 0x88, 0x7b, 1280, 960},/* 45 1280x960x75Hz */
- {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x85,
- VCLK148_5, 0x0A, 0x88, 0x7b, 1280, 960},/* 46 1280x960x85Hz */
- {Mode32Bpp + SupportLCD + SupportRAMDAC2 + SyncPP, RES1280x960x120,
- VCLK217_325, 0x3A, 0x88, 0x7b, 1280, 960},/* 47 1280x960x120Hz */
- {Mode32Bpp + SupportRAMDAC2 + SyncPN, RES1024x768x160, VCLK139_054,
- 0x30, 0x47, 0x37, 1024, 768},/* 48 1024x768x160Hz */
-};
-
-static const unsigned char XGI330_ScreenOffset[] = {
- 0x14, 0x19, 0x20, 0x28, 0x32, 0x40,
- 0x50, 0x64, 0x78, 0x80, 0x2d, 0x35,
- 0x57, 0x48
-};
-
-static const struct SiS_ModeResInfo_S XGI330_ModeResInfo[] = {
- { 320, 200, 8, 8},
- { 320, 240, 8, 8},
- { 320, 400, 8, 8},
- { 400, 300, 8, 8},
- { 512, 384, 8, 8},
- { 640, 400, 8, 16},
- { 640, 480, 8, 16},
- { 800, 600, 8, 16},
- {1024, 768, 8, 16},
- {1280, 1024, 8, 16},
- {1600, 1200, 8, 16},
- {1920, 1440, 8, 16},
- {2048, 1536, 8, 16},
- { 720, 480, 8, 16},
- { 720, 576, 8, 16},
- {1280, 960, 8, 16},
- { 800, 480, 8, 16},
- {1024, 576, 8, 16},
- {1280, 720, 8, 16},
- { 856, 480, 8, 16},
- {1280, 768, 8, 16},
- {1400, 1050, 8, 16},
- {1152, 864, 8, 16}
-};
-
-const struct SiS_VCLKData XGI_VCLKData[] = {
- /* SR2B,SR2C,SR2D */
- {0x1B, 0xE1, 25}, /* 00 (25.175MHz) */
- {0x4E, 0xE4, 28}, /* 01 (28.322MHz) */
- {0x57, 0xE4, 31}, /* 02 (31.500MHz) */
- {0xC3, 0xC8, 36}, /* 03 (36.000MHz) */
- {0x42, 0xE2, 40}, /* 04 (40.000MHz) */
- {0xFE, 0xCD, 43}, /* 05 (43.163MHz) */
- {0x5D, 0xC4, 44}, /* 06 (44.900MHz) */
- {0x52, 0xE2, 49}, /* 07 (49.500MHz) */
- {0x53, 0xE2, 50}, /* 08 (50.000MHz) */
- {0x74, 0x67, 52}, /* 09 (52.406MHz) */
- {0x6D, 0x66, 56}, /* 0A (56.250MHz) */
- {0x6C, 0xC3, 65}, /* 0B (65.000MHz) */
- {0x46, 0x44, 67}, /* 0C (67.765MHz) */
- {0xB1, 0x46, 68}, /* 0D (68.179MHz) */
- {0xD3, 0x4A, 72}, /* 0E (72.852MHz) */
- {0x29, 0x61, 75}, /* 0F (75.000MHz) */
- {0x6E, 0x46, 76}, /* 10 (75.800MHz) */
- {0x2B, 0x61, 78}, /* 11 (78.750MHz) */
- {0x31, 0x42, 79}, /* 12 (79.411MHz) */
- {0xAB, 0x44, 83}, /* 13 (83.950MHz) */
- {0x46, 0x25, 84}, /* 14 (84.800MHz) */
- {0x78, 0x29, 86}, /* 15 (86.600MHz) */
- {0x62, 0x44, 94}, /* 16 (94.500MHz) */
- {0x2B, 0x41, 104}, /* 17 (104.998MHz) */
- {0x3A, 0x23, 105}, /* 18 (105.882MHz) */
- {0x70, 0x44, 108}, /* 19 (107.862MHz) */
- {0x3C, 0x23, 109}, /* 1A (109.175MHz) */
- {0x5E, 0x43, 113}, /* 1B (113.309MHz) */
- {0xBC, 0x44, 116}, /* 1C (116.406MHz) */
- {0xE0, 0x46, 132}, /* 1D (132.258MHz) */
- {0x54, 0x42, 135}, /* 1E (135.500MHz) */
- {0x9C, 0x22, 139}, /* 1F (139.275MHz) */
- {0x41, 0x22, 157}, /* 20 (157.500MHz) */
- {0x70, 0x24, 162}, /* 21 (161.793MHz) */
- {0x30, 0x21, 175}, /* 22 (175.000MHz) */
- {0x4E, 0x22, 189}, /* 23 (188.520MHz) */
- {0xDE, 0x26, 194}, /* 24 (194.400MHz) */
- {0x62, 0x06, 202}, /* 25 (202.500MHz) */
- {0x3F, 0x03, 229}, /* 26 (229.500MHz) */
- {0xB8, 0x06, 234}, /* 27 (233.178MHz) */
- {0x34, 0x02, 253}, /* 28 (252.699MHz) */
- {0x58, 0x04, 255}, /* 29 (254.817MHz) */
- {0x24, 0x01, 265}, /* 2A (265.728MHz) */
- {0x9B, 0x02, 267}, /* 2B (266.952MHz) */
- {0x70, 0x05, 270}, /* 2C (269.65567MHz) */
- {0x25, 0x01, 272}, /* 2D (272.04199MHz) */
- {0x9C, 0x02, 277}, /* 2E (277.015MHz) */
- {0x27, 0x01, 286}, /* 2F (286.359985MHz) */
- {0xB3, 0x04, 291}, /* 30 (291.13266MHz) */
- {0xBC, 0x05, 292}, /* 31 (291.766MHz) */
- {0xF6, 0x0A, 310}, /* 32 (309.789459MHz) */
- {0x95, 0x01, 315}, /* 33 (315.195MHz) */
- {0xF0, 0x09, 324}, /* 34 (323.586792MHz) */
- {0xFE, 0x0A, 331}, /* 35 (330.615631MHz) */
- {0xF3, 0x09, 332}, /* 36 (332.177612MHz) */
- {0x5E, 0x03, 340}, /* 37 (340.477MHz) */
- {0xE8, 0x07, 376}, /* 38 (375.847504MHz) */
- {0xDE, 0x06, 389}, /* 39 (388.631439MHz) */
- {0x52, 0x2A, 54}, /* 3A (54.000MHz) */
- {0x52, 0x6A, 27}, /* 3B (27.000MHz) */
- {0x62, 0x24, 70}, /* 3C (70.874991MHz) */
- {0x62, 0x64, 70}, /* 3D (70.1048912MHz) */
- {0xA8, 0x4C, 30}, /* 3E (30.1048912MHz) */
- {0x20, 0x26, 33}, /* 3F (33.7499957MHz) */
- {0x31, 0xc2, 39}, /* 40 (39.77MHz) */
- {0x11, 0x21, 30}, /* 41 (30MHz) }// NTSC 1024X768 */
- {0x2E, 0x48, 25}, /* 42 (25.175MHz) }// ScaleLCD */
- {0x24, 0x46, 25}, /* 43 (25.175MHz) */
- {0x26, 0x64, 28}, /* 44 (28.322MHz) */
- {0x37, 0x64, 40}, /* 45 (40.000MHz) */
- {0xA1, 0x42, 108}, /* 46 (95.000MHz) }// QVGA */
- {0x37, 0x61, 100}, /* 47 (100.00MHz) */
- {0x78, 0x27, 108}, /* 48 (108.200MHz) */
- {0xBF, 0xC8, 35}, /* 49 (35.2MHz) */
- {0x66, 0x43, 123}, /* 4A (122.61Mhz) */
- {0x2C, 0x61, 80}, /* 4B (80.350Mhz) */
- {0x3B, 0x61, 108}, /* 4C (107.385Mhz) */
- {0x69, 0x61, 191}, /* 4D (190.96MHz ) */
- {0x4F, 0x22, 192}, /* 4E (192.069MHz) */
- {0x28, 0x26, 322}, /* 4F (322.273MHz) */
- {0x5C, 0x6B, 27}, /* 50 (27.74HMz) */
- {0x57, 0x24, 126}, /* 51 (125.999MHz) */
- {0x5C, 0x42, 148}, /* 52 (148.5MHz) */
- {0x42, 0x61, 120}, /* 53 (120.839MHz) */
- {0x62, 0x61, 178}, /* 54 (178.992MHz) */
- {0x59, 0x22, 217}, /* 55 (217.325MHz) */
- {0x29, 0x01, 300}, /* 56 (299.505Mhz) */
- {0x52, 0x63, 74}, /* 57 (74.25MHz) */
- {0xFF, 0x00, 0} /* End mark */
-};
-
-static const struct SiS_VBVCLKData XGI_VBVCLKData[] = {
- {0x1B, 0xE1, 25}, /* 00 (25.175MHz) */
- {0x4E, 0xE4, 28}, /* 01 (28.322MHz) */
- {0x57, 0xE4, 31}, /* 02 (31.500MHz) */
- {0xC3, 0xC8, 36}, /* 03 (36.000MHz) */
- {0x42, 0x47, 40}, /* 04 (40.000MHz) */
- {0xFE, 0xCD, 43}, /* 05 (43.163MHz) */
- {0x5D, 0xC4, 44}, /* 06 (44.900MHz) */
- {0x52, 0x47, 49}, /* 07 (49.500MHz) */
- {0x53, 0x47, 50}, /* 08 (50.000MHz) */
- {0x74, 0x67, 52}, /* 09 (52.406MHz) */
- {0x6D, 0x66, 56}, /* 0A (56.250MHz) */
- {0x35, 0x62, 65}, /* 0B (65.000MHz) */
- {0x46, 0x44, 67}, /* 0C (67.765MHz) */
- {0xB1, 0x46, 68}, /* 0D (68.179MHz) */
- {0xD3, 0x4A, 72}, /* 0E (72.852MHz) */
- {0x29, 0x61, 75}, /* 0F (75.000MHz) */
- {0x6D, 0x46, 75}, /* 10 (75.800MHz) */
- {0x41, 0x43, 78}, /* 11 (78.750MHz) */
- {0x31, 0x42, 79}, /* 12 (79.411MHz) */
- {0xAB, 0x44, 83}, /* 13 (83.950MHz) */
- {0x46, 0x25, 84}, /* 14 (84.800MHz) */
- {0x78, 0x29, 86}, /* 15 (86.600MHz) */
- {0x62, 0x44, 94}, /* 16 (94.500MHz) */
- {0x2B, 0x22, 104}, /* 17 (104.998MHz) */
- {0x49, 0x24, 105}, /* 18 (105.882MHz) */
- {0xF8, 0x2F, 108}, /* 19 (108.279MHz) */
- {0x3C, 0x23, 109}, /* 1A (109.175MHz) */
- {0x5E, 0x43, 113}, /* 1B (113.309MHz) */
- {0xBC, 0x44, 116}, /* 1C (116.406MHz) */
- {0xE0, 0x46, 132}, /* 1D (132.258MHz) */
- {0xD4, 0x28, 135}, /* 1E (135.220MHz) */
- {0xEA, 0x2A, 139}, /* 1F (139.275MHz) */
- {0x41, 0x22, 157}, /* 20 (157.500MHz) */
- {0x70, 0x24, 162}, /* 21 (161.793MHz) */
- {0x30, 0x21, 175}, /* 22 (175.000MHz) */
- {0x4E, 0x22, 189}, /* 23 (188.520MHz) */
- {0xDE, 0x26, 194}, /* 24 (194.400MHz) */
- {0x70, 0x07, 202}, /* 25 (202.500MHz) */
- {0x3F, 0x03, 229}, /* 26 (229.500MHz) */
- {0xB8, 0x06, 234}, /* 27 (233.178MHz) */
- {0x34, 0x02, 253}, /* 28 (252.699997 MHz) */
- {0x58, 0x04, 255}, /* 29 (254.817MHz) */
- {0x24, 0x01, 265}, /* 2A (265.728MHz) */
- {0x9B, 0x02, 267}, /* 2B (266.952MHz) */
- {0x70, 0x05, 270}, /* 2C (269.65567 MHz) */
- {0x25, 0x01, 272}, /* 2D (272.041992 MHz) */
- {0x9C, 0x02, 277}, /* 2E (277.015MHz) */
- {0x27, 0x01, 286}, /* 2F (286.359985 MHz) */
- {0x3C, 0x02, 291}, /* 30 (291.132660 MHz) */
- {0xEF, 0x0A, 292}, /* 31 (291.766MHz) */
- {0xF6, 0x0A, 310}, /* 32 (309.789459 MHz) */
- {0x95, 0x01, 315}, /* 33 (315.195MHz) */
- {0xF0, 0x09, 324}, /* 34 (323.586792 MHz) */
- {0xFE, 0x0A, 331}, /* 35 (330.615631 MHz) */
- {0xF3, 0x09, 332}, /* 36 (332.177612 MHz) */
- {0xEA, 0x08, 340}, /* 37 (340.477MHz) */
- {0xE8, 0x07, 376}, /* 38 (375.847504 MHz) */
- {0xDE, 0x06, 389}, /* 39 (388.631439 MHz) */
- {0x52, 0x2A, 54}, /* 3A (54.000MHz) */
- {0x52, 0x6A, 27}, /* 3B (27.000MHz) */
- {0x62, 0x24, 70}, /* 3C (70.874991MHz) */
- {0x62, 0x64, 70}, /* 3D (70.1048912MHz) */
- {0xA8, 0x4C, 30}, /* 3E (30.1048912MHz) */
- {0x20, 0x26, 33}, /* 3F (33.7499957MHz) */
- {0x31, 0xc2, 39}, /* 40 (39.77MHz) */
- {0x11, 0x21, 30}, /* 41 (30MHz) }// NTSC 1024X768 */
- {0x2E, 0x48, 25}, /* 42 (25.175MHz) }// ScaleLCD */
- {0x24, 0x46, 25}, /* 43 (25.175MHz) */
- {0x26, 0x64, 28}, /* 44 (28.322MHz) */
- {0x37, 0x64, 40}, /* 45 (40.000MHz) */
- {0xA1, 0x42, 108}, /* 46 (95.000MHz) }// QVGA */
- {0x37, 0x61, 100}, /* 47 (100.00MHz) */
- {0x78, 0x27, 108}, /* 48 (108.200MHz) */
- {0xBF, 0xC8, 35 }, /* 49 (35.2MHz) */
- {0x66, 0x43, 123}, /* 4A (122.61Mhz) */
- {0x2C, 0x61, 80 }, /* 4B (80.350Mhz) */
- {0x3B, 0x61, 108}, /* 4C (107.385Mhz) */
- {0x69, 0x61, 191}, /* 4D (190.96MHz ) */
- {0x4F, 0x22, 192}, /* 4E (192.069MHz) */
- {0x28, 0x26, 322}, /* 4F (322.273MHz) */
- {0x5C, 0x6B, 27}, /* 50 (27.74HMz) */
- {0x57, 0x24, 126}, /* 51 (125.999MHz) */
- {0x5C, 0x42, 148}, /* 52 (148.5MHz) */
- {0x42, 0x61, 120}, /* 53 (120.839MHz) */
- {0x62, 0x61, 178}, /* 54 (178.992MHz) */
- {0x59, 0x22, 217}, /* 55 (217.325MHz) */
- {0x29, 0x01, 300}, /* 56 (299.505Mhz) */
- {0x52, 0x63, 74}, /* 57 (74.25MHz) */
- {0xFF, 0x00, 0} /* End mark */
-};
-
-#define XGI301TVDelay 0x22
-#define XGI301LCDDelay 0x12
-
-static const unsigned char TVAntiFlickList[] = {/* NTSCAntiFlicker */
- 0x04, /* ; 0 Adaptive */
- 0x00, /* ; 1 new anti-flicker ? */
-
- 0x04, /* ; 0 Adaptive */
- 0x08, /* ; 1 new anti-flicker ? */
-
- 0x04, /* ; 0 ? */
- 0x00 /* ; 1 new anti-flicker ? */
-};
-
-static const unsigned char TVEdgeList[] = {
- 0x00, /* ; 0 NTSC No Edge enhance */
- 0x04, /* ; 1 NTSC Adaptive Edge enhance */
- 0x00, /* ; 0 PAL No Edge enhance */
- 0x04, /* ; 1 PAL Adaptive Edge enhance */
- 0x00, /* ; 0 HiTV */
- 0x00 /* ; 1 HiTV */
-};
-
-static const unsigned long TVPhaseList[] = {
- 0x08BAED21, /* ; 0 NTSC phase */
- 0x00E3052A, /* ; 1 PAL phase */
- 0x9B2EE421, /* ; 2 PAL-M phase */
- 0xBA3EF421, /* ; 3 PAL-N phase */
- 0xA7A28B1E, /* ; 4 NTSC 1024x768 */
- 0xE00A831E, /* ; 5 PAL-M 1024x768 */
- 0x00000000, /* ; 6 reserved */
- 0x00000000, /* ; 7 reserved */
- 0xD67BF021, /* ; 8 NTSC phase */
- 0xE986092A, /* ; 9 PAL phase */
- 0xA4EFE621, /* ; A PAL-M phase */
- 0x4694F621, /* ; B PAL-N phase */
- 0x8BDE711C, /* ; C NTSC 1024x768 */
- 0xE00A831E /* ; D PAL-M 1024x768 */
-};
-
-static const unsigned char NTSCYFilter1[] = {
- 0x00, 0xF4, 0x10, 0x38, /* 0 : 320x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 1 : 360x text mode */
- 0xEB, 0x04, 0x25, 0x18, /* 2 : 640x text mode */
- 0xF1, 0x04, 0x1F, 0x18, /* 3 : 720x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 4 : 320x gra. mode */
- 0xEB, 0x04, 0x25, 0x18, /* 5 : 640x gra. mode */
- 0xEB, 0x15, 0x25, 0xF6 /* 6 : 800x gra. mode */
-};
-
-static const unsigned char PALYFilter1[] = {
- 0x00, 0xF4, 0x10, 0x38, /* 0 : 320x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 1 : 360x text mode */
- 0xF1, 0xF7, 0x1F, 0x32, /* 2 : 640x text mode */
- 0xF3, 0x00, 0x1D, 0x20, /* 3 : 720x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 4 : 320x gra. mode */
- 0xF1, 0xF7, 0x1F, 0x32, /* 5 : 640x gra. mode */
- 0xFC, 0xFB, 0x14, 0x2A /* 6 : 800x gra. mode */
-};
-
-static const unsigned char xgifb_palmn_yfilter1[] = {
- 0x00, 0xF4, 0x10, 0x38, /* 0 : 320x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 1 : 360x text mode */
- 0xEB, 0x04, 0x10, 0x18, /* 2 : 640x text mode */
- 0xF7, 0x06, 0x19, 0x14, /* 3 : 720x text mode */
- 0x00, 0xF4, 0x10, 0x38, /* 4 : 320x gra. mode */
- 0xEB, 0x04, 0x25, 0x18, /* 5 : 640x gra. mode */
- 0xEB, 0x15, 0x25, 0xF6, /* 6 : 800x gra. mode */
- 0xFF, 0xFF, 0xFF, 0xFF /* End of Table */
-};
-
-static const unsigned char xgifb_yfilter2[] = {
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 0 : 320x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 1 : 360x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 2 : 640x text mode */
- 0x01, 0x02, 0xFE, 0xF7, 0x03, 0x27, 0x3C, /* 3 : 720x text mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 4 : 320x gra. mode */
- 0xFF, 0x03, 0x02, 0xF6, 0xFC, 0x27, 0x46, /* 5 : 640x gra. mode */
- 0x01, 0x01, 0xFC, 0xF8, 0x08, 0x26, 0x38, /* 6 : 800x gra. mode */
- 0xFF, 0xFF, 0xFC, 0x00, 0x0F, 0x22, 0x28 /* 7 : 1024xgra. mode */
-};
-
-static const unsigned char XGI_NTSC1024AdjTime[] = {
- 0xa7, 0x07, 0xf2, 0x6e, 0x17, 0x8b, 0x73, 0x53,
- 0x13, 0x40, 0x34, 0xF4, 0x63, 0xBB, 0xCC, 0x7A,
- 0x58, 0xe4, 0x73, 0xd0, 0x13
-};
-
-static const struct XGI301C_Tap4TimingStruct xgifb_tap4_timing[] = {
- {0, {
- 0x00, 0x20, 0x00, 0x00, 0x7F, 0x20, 0x02, 0x7F, /* ; C0-C7 */
- 0x7D, 0x20, 0x04, 0x7F, 0x7D, 0x1F, 0x06, 0x7E, /* ; C8-CF */
- 0x7C, 0x1D, 0x09, 0x7E, 0x7C, 0x1B, 0x0B, 0x7E, /* ; D0-D7 */
- 0x7C, 0x19, 0x0E, 0x7D, 0x7C, 0x17, 0x11, 0x7C, /* ; D8-DF */
- 0x7C, 0x14, 0x14, 0x7C, 0x7C, 0x11, 0x17, 0x7C, /* ; E0-E7 */
- 0x7D, 0x0E, 0x19, 0x7C, 0x7E, 0x0B, 0x1B, 0x7C, /* ; EA-EF */
- 0x7E, 0x09, 0x1D, 0x7C, 0x7F, 0x06, 0x1F, 0x7C, /* ; F0-F7 */
- 0x7F, 0x04, 0x20, 0x7D, 0x00, 0x02, 0x20, 0x7E /* ; F8-FF */
- }
- }
-};
-
-static const struct XGI301C_Tap4TimingStruct PALTap4Timing[] = {
- {600, {
- 0x05, 0x19, 0x05, 0x7D, 0x03, 0x19, 0x06, 0x7E, /* ; C0-C7 */
- 0x02, 0x19, 0x08, 0x7D, 0x01, 0x18, 0x0A, 0x7D, /* ; C8-CF */
- 0x00, 0x18, 0x0C, 0x7C, 0x7F, 0x17, 0x0E, 0x7C, /* ; D0-D7 */
- 0x7E, 0x16, 0x0F, 0x7D, 0x7E, 0x14, 0x11, 0x7D, /* ; D8-DF */
- 0x7D, 0x13, 0x13, 0x7D, 0x7D, 0x11, 0x14, 0x7E, /* ; E0-E7 */
- 0x7D, 0x0F, 0x16, 0x7E, 0x7D, 0x0E, 0x17, 0x7E, /* ; EA-EF */
- 0x7D, 0x0C, 0x18, 0x7F, 0x7D, 0x0A, 0x18, 0x01, /* ; F0-F7 */
- 0x7D, 0x08, 0x19, 0x02, 0x7D, 0x06, 0x19, 0x04 /* ; F8-FF */
- }
- },
- {768, {
- 0x08, 0x12, 0x08, 0x7E, 0x07, 0x12, 0x09, 0x7E, /* ; C0-C7 */
- 0x06, 0x12, 0x0A, 0x7E, 0x05, 0x11, 0x0B, 0x7F, /* ; C8-CF */
- 0x04, 0x11, 0x0C, 0x7F, 0x03, 0x11, 0x0C, 0x00, /* ; D0-D7 */
- 0x03, 0x10, 0x0D, 0x00, 0x02, 0x0F, 0x0E, 0x01, /* ; D8-DF */
- 0x01, 0x0F, 0x0F, 0x01, 0x01, 0x0E, 0x0F, 0x02, /* ; E0-E7 */
- 0x00, 0x0D, 0x10, 0x03, 0x7F, 0x0C, 0x11, 0x04, /* ; EA-EF */
- 0x7F, 0x0C, 0x11, 0x04, 0x7F, 0x0B, 0x11, 0x05, /* ; F0-F7 */
- 0x7E, 0x0A, 0x12, 0x06, 0x7E, 0x09, 0x12, 0x07 /* ; F8-FF */
- }
- },
- {0xFFFF, {
- 0x04, 0x1A, 0x04, 0x7E, 0x02, 0x1B, 0x05, 0x7E, /* ; C0-C7 */
- 0x01, 0x1A, 0x07, 0x7E, 0x00, 0x1A, 0x09, 0x7D, /* ; C8-CF */
- 0x7F, 0x19, 0x0B, 0x7D, 0x7E, 0x18, 0x0D, 0x7D, /* ; D0-D7 */
- 0x7D, 0x17, 0x10, 0x7C, 0x7D, 0x15, 0x12, 0x7C, /* ; D8-DF */
- 0x7C, 0x14, 0x14, 0x7C, 0x7C, 0x12, 0x15, 0x7D, /* ; E0-E7 */
- 0x7C, 0x10, 0x17, 0x7D, 0x7C, 0x0D, 0x18, 0x7F, /* ; EA-EF */
- 0x7D, 0x0B, 0x19, 0x7F, 0x7D, 0x09, 0x1A, 0x00, /* ; F0-F7 */
- 0x7D, 0x07, 0x1A, 0x02, 0x7E, 0x05, 0x1B, 0x02 /* ; F8-FF */
- }
- }
-};
-
-static const struct XGI301C_Tap4TimingStruct xgifb_ntsc_525_tap4_timing[] = {
- {480, {
- 0x04, 0x1A, 0x04, 0x7E, 0x03, 0x1A, 0x06, 0x7D, /* ; C0-C7 */
- 0x01, 0x1A, 0x08, 0x7D, 0x00, 0x19, 0x0A, 0x7D, /* ; C8-CF */
- 0x7F, 0x19, 0x0C, 0x7C, 0x7E, 0x18, 0x0E, 0x7C, /* ; D0-D7 */
- 0x7E, 0x17, 0x10, 0x7B, 0x7D, 0x15, 0x12, 0x7C, /* ; D8-DF */
- 0x7D, 0x13, 0x13, 0x7D, 0x7C, 0x12, 0x15, 0x7D, /* ; E0-E7 */
- 0x7C, 0x10, 0x17, 0x7D, 0x7C, 0x0E, 0x18, 0x7E, /* ; EA-EF */
- 0x7D, 0x0C, 0x19, 0x7E, 0x7D, 0x0A, 0x19, 0x00, /* ; F0-F7 */
- 0x7D, 0x08, 0x1A, 0x01, 0x7E, 0x06, 0x1A, 0x02 /* ; F8-FF */
- }
- },
- {600, {
- 0x07, 0x14, 0x07, 0x7E, 0x06, 0x14, 0x09, 0x7D, /* ; C0-C7 */
- 0x05, 0x14, 0x0A, 0x7D, 0x04, 0x13, 0x0B, 0x7E, /* ; C8-CF */
- 0x03, 0x13, 0x0C, 0x7E, 0x02, 0x12, 0x0D, 0x7F, /* ; D0-D7 */
- 0x01, 0x12, 0x0E, 0x7F, 0x01, 0x11, 0x0F, 0x7F, /* ; D8-DF */
- 0x01, 0x10, 0x10, 0x00, 0x7F, 0x0F, 0x11, 0x01, /* ; E0-E7 */
- 0x7F, 0x0E, 0x12, 0x01, 0x7E, 0x0D, 0x12, 0x03, /* ; EA-EF */
- 0x7E, 0x0C, 0x13, 0x03, 0x7E, 0x0B, 0x13, 0x04, /* ; F0-F7 */
- 0x7E, 0x0A, 0x14, 0x04, 0x7D, 0x09, 0x14, 0x06 /* ; F8-FF */
- }
- },
- {0xFFFF, {
- 0x09, 0x0F, 0x09, 0x7F, 0x08, 0x0F, 0x09, 0x00, /* ; C0-C7 */
- 0x07, 0x0F, 0x0A, 0x00, 0x06, 0x0F, 0x0A, 0x01, /* ; C8-CF */
- 0x06, 0x0E, 0x0B, 0x01, 0x05, 0x0E, 0x0B, 0x02, /* ; D0-D7 */
- 0x04, 0x0E, 0x0C, 0x02, 0x04, 0x0D, 0x0C, 0x03, /* ; D8-DF */
- 0x03, 0x0D, 0x0D, 0x03, 0x02, 0x0C, 0x0D, 0x05, /* ; E0-E7 */
- 0x02, 0x0C, 0x0E, 0x04, 0x01, 0x0B, 0x0E, 0x06, /* ; EA-EF */
- 0x01, 0x0B, 0x0E, 0x06, 0x00, 0x0A, 0x0F, 0x07, /* ; F0-F7 */
- 0x00, 0x0A, 0x0F, 0x07, 0x00, 0x09, 0x0F, 0x08 /* ; F8-FF */
- }
- }
-};
-
-static const struct XGI301C_Tap4TimingStruct YPbPr750pTap4Timing[] = {
- {0xFFFF, {
- 0x05, 0x19, 0x05, 0x7D, 0x03, 0x19, 0x06, 0x7E, /* ; C0-C7 */
- 0x02, 0x19, 0x08, 0x7D, 0x01, 0x18, 0x0A, 0x7D, /* ; C8-CF */
- 0x00, 0x18, 0x0C, 0x7C, 0x7F, 0x17, 0x0E, 0x7C, /* ; D0-D7 */
- 0x7E, 0x16, 0x0F, 0x7D, 0x7E, 0x14, 0x11, 0x7D, /* ; D8-DF */
- 0x7D, 0x13, 0x13, 0x7D, 0x7D, 0x11, 0x14, 0x7E, /* ; E0-E7 */
- 0x7D, 0x0F, 0x16, 0x7E, 0x7D, 0x0E, 0x17, 0x7E, /* ; EA-EF */
- 0x7D, 0x0C, 0x18, 0x7F, 0x7D, 0x0A, 0x18, 0x01, /* ; F0-F7 */
- 0x7D, 0x08, 0x19, 0x02, 0x7D, 0x06, 0x19, 0x04 /* F8-FF */
- }
- }
-};
-#endif
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
deleted file mode 100644
index 0f6d5aac04f6..000000000000
--- a/drivers/staging/xgifb/vb_util.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VBUTIL_
-#define _VBUTIL_
-static inline void xgifb_reg_set(unsigned long port, u8 index, u8 data)
-{
- outb(index, port);
- outb(data, port + 1);
-}
-
-static inline u8 xgifb_reg_get(unsigned long port, u8 index)
-{
- outb(index, port);
- return inb(port + 1);
-}
-
-static inline void xgifb_reg_and_or(unsigned long port, u8 index,
- unsigned int data_and, unsigned int data_or)
-{
- u8 temp;
-
- temp = xgifb_reg_get(port, index);
- temp = (u8)((temp & data_and) | data_or);
- xgifb_reg_set(port, index, temp);
-}
-
-static inline void xgifb_reg_and(unsigned long port, u8 index,
- unsigned int data_and)
-{
- u8 temp;
-
- temp = xgifb_reg_get(port, index);
- temp = (u8)(temp & data_and);
- xgifb_reg_set(port, index, temp);
-}
-
-static inline void xgifb_reg_or(unsigned long port, u8 index,
- unsigned int data_or)
-{
- u8 temp;
-
- temp = xgifb_reg_get(port, index);
- temp |= data_or;
- xgifb_reg_set(port, index, temp);
-}
-#endif
-
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
deleted file mode 100644
index 22919f2368d5..000000000000
--- a/drivers/staging/xgifb/vgatypes.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VGATYPES_
-#define _VGATYPES_
-
-#include <linux/fb.h> /* for struct fb_var_screeninfo for sis.h */
-#include "../../video/fbdev/sis/vgatypes.h"
-#include "../../video/fbdev/sis/sis.h" /* for LCD_TYPE */
-
-enum XGI_VB_CHIP_TYPE {
- VB_CHIP_Legacy = 0,
- VB_CHIP_301,
- VB_CHIP_301B,
- VB_CHIP_301LV,
- VB_CHIP_302,
- VB_CHIP_302B,
- VB_CHIP_302LV,
- VB_CHIP_301C,
- VB_CHIP_302ELV,
- VB_CHIP_UNKNOWN, /* other video bridge or no video bridge */
- MAX_VB_CHIP
-};
-
-struct xgi_hw_device_info {
- unsigned long ulExternalChip; /* NO VB or other video bridge*/
- /* if ujVBChipID = VB_CHIP_UNKNOWN, */
-
- void __iomem *pjVideoMemoryAddress;/* base virtual memory address */
- /* of Linear VGA memory */
-
- unsigned long ulVideoMemorySize; /* size, in bytes, of the
- * memory on the board
- */
-
- unsigned char jChipType; /* Used to Identify Graphics Chip */
- /* defined in the data structure type */
- /* "XGI_CHIP_TYPE" */
-
- unsigned char jChipRevision; /* Used to Identify Graphics
- * Chip Revision
- */
-
- unsigned char ujVBChipID; /* the ID of video bridge */
- /* defined in the data structure type */
- /* "XGI_VB_CHIP_TYPE" */
-
- unsigned long ulCRT2LCDType; /* defined in the data structure type */
-};
-
-/* Additional IOCTL for communication xgifb <> X driver */
-/* If changing this, xgifb.h must also be changed (for xgifb) */
-#endif
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 984941e036c8..bd15a564fe24 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void)
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
- pr_err("nable to kmem_cache_create() for"
+ pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n");
goto bitmap_out;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 72016d0dfca5..8e7fffbb8802 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item,
return count;
}
+/* always zero, but attr needs to remain RW to avoid userspace breakage */
+static ssize_t pi_prot_format_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0\n");
+}
+
static ssize_t pi_prot_format_store(struct config_item *item,
const char *page, size_t count)
{
@@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc);
CONFIGFS_ATTR(, emulate_pr);
CONFIGFS_ATTR(, pi_prot_type);
CONFIGFS_ATTR_RO(, hw_pi_prot_type);
-CONFIGFS_ATTR_WO(, pi_prot_format);
+CONFIGFS_ATTR(, pi_prot_format);
CONFIGFS_ATTR(, pi_prot_verify);
CONFIGFS_ATTR(, enforce_pr_isids);
CONFIGFS_ATTR(, is_nonrot);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1e6d24943565..5831e0eecea1 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -148,7 +148,7 @@ struct tcmu_dev {
size_t ring_size;
struct mutex cmdr_lock;
- struct list_head cmdr_queue;
+ struct list_head qfull_queue;
uint32_t dbi_max;
uint32_t dbi_thresh;
@@ -159,6 +159,7 @@ struct tcmu_dev {
struct timer_list cmd_timer;
unsigned int cmd_time_out;
+ struct list_head inflight_queue;
struct timer_list qfull_timer;
int qfull_time_out;
@@ -179,7 +180,7 @@ struct tcmu_dev {
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
- struct list_head cmdr_queue_entry;
+ struct list_head queue_entry;
uint16_t cmd_id;
@@ -192,6 +193,7 @@ struct tcmu_cmd {
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_INFLIGHT 1
unsigned long flags;
};
/*
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
if (!tcmu_cmd)
return NULL;
- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
+ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
@@ -915,11 +917,13 @@ setup_timer:
return 0;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
- mod_timer(timer, tcmu_cmd->deadline);
+ if (!timer_pending(timer))
+ mod_timer(timer, tcmu_cmd->deadline);
+
return 0;
}
-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
if (ret)
return ret;
- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
tcmu_cmd->cmd_id, udev->name);
return 0;
@@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->cmdr_queue))
+ if (!list_empty(&udev->qfull_queue))
goto queue;
mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
+ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
+
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
return 0;
queue:
- if (add_to_cmdr_queue(tcmu_cmd)) {
+ if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
goto out;
+ list_del_init(&cmd->queue_entry);
+
tcmu_cmd_reset_dbi_cur(cmd);
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@ out:
tcmu_free_cmd(cmd);
}
+static void tcmu_set_next_deadline(struct list_head *queue,
+ struct timer_list *timer)
+{
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
+ unsigned long deadline = 0;
+
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
+ if (!time_after(jiffies, tcmu_cmd->deadline)) {
+ deadline = tcmu_cmd->deadline;
+ break;
+ }
+ }
+
+ if (deadline)
+ mod_timer(timer, deadline);
+ else
+ del_timer(timer);
+}
+
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
+ struct tcmu_cmd *cmd;
int handled = 0;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
- struct tcmu_cmd *cmd;
tcmu_flush_dcache_range(entry, sizeof(*entry));
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
/* no more pending commands */
del_timer(&udev->cmd_timer);
- if (list_empty(&udev->cmdr_queue)) {
+ if (list_empty(&udev->qfull_queue)) {
/*
* no more pending or waiting commands so try to
* reclaim blocks if needed.
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_global_max_blocks)
schedule_delayed_work(&tcmu_unmap_work, 0);
}
+ } else if (udev->cmd_time_out) {
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
}
return handled;
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
if (!time_after(jiffies, cmd->deadline))
return 0;
- is_running = list_empty(&cmd->cmdr_queue_entry);
+ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
se_cmd = cmd->se_cmd;
if (is_running) {
@@ -1287,9 +1317,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
* target_complete_cmd will translate this to LUN COMM FAILURE
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
+ list_del_init(&cmd->queue_entry);
} else {
- list_del_init(&cmd->cmdr_queue_entry);
-
+ list_del_init(&cmd->queue_entry);
idr_remove(&udev->commands, id);
tcmu_free_cmd(cmd);
scsi_status = SAM_STAT_TASK_SET_FULL;
@@ -1372,7 +1402,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
- INIT_LIST_HEAD(&udev->cmdr_queue);
+ INIT_LIST_HEAD(&udev->qfull_queue);
+ INIT_LIST_HEAD(&udev->inflight_queue);
idr_init(&udev->commands);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1414,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
+static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
@@ -1391,15 +1422,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
sense_reason_t scsi_ret;
int ret;
- if (list_empty(&udev->cmdr_queue))
+ if (list_empty(&udev->qfull_queue))
return true;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
- list_splice_init(&udev->cmdr_queue, &cmds);
+ list_splice_init(&udev->qfull_queue, &cmds);
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
- list_del_init(&tcmu_cmd->cmdr_queue_entry);
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
+ list_del_init(&tcmu_cmd->queue_entry);
pr_debug("removing cmd %u on dev %s from queue\n",
tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1468,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
* cmd was requeued, so just put all cmds back in
* the queue
*/
- list_splice_tail(&cmds, &udev->cmdr_queue);
+ list_splice_tail(&cmds, &udev->qfull_queue);
drained = false;
- goto done;
+ break;
}
}
- if (list_empty(&udev->cmdr_queue))
- del_timer(&udev->qfull_timer);
-done:
+
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
return drained;
}
@@ -1454,7 +1484,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
mutex_lock(&udev->cmdr_lock);
tcmu_handle_completions(udev);
- run_cmdr_queue(udev, false);
+ run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
return 0;
@@ -1982,7 +2012,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
/* complete IO that has executed successfully */
tcmu_handle_completions(udev);
/* fail IO waiting to be queued */
- run_cmdr_queue(udev, true);
+ run_qfull_queue(udev, true);
unlock:
mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2027,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!list_empty(&cmd->cmdr_queue_entry))
+ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
continue;
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2036,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
* Userspace was not able to start the
@@ -2666,6 +2697,10 @@ static void check_timedout_devices(void)
mutex_lock(&udev->cmdr_lock);
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
mutex_unlock(&udev->cmdr_lock);
spin_lock_bh(&timed_out_udevs_lock);
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 48d262ae2f04..56263ae3b1d7 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -5,3 +5,4 @@ optee-objs += call.o
optee-objs += rpc.o
optee-objs += supp.o
optee-objs += shm_pool.o
+optee-objs += device.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index e5efce3c08e2..0842b6e6af82 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -634,6 +634,10 @@ static struct optee *optee_probe(struct device_node *np)
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
+ rc = optee_enumerate_devices();
+ if (rc)
+ goto err;
+
pr_info("initialized driver\n");
return optee;
err:
@@ -699,8 +703,10 @@ static int __init optee_driver_init(void)
return -ENODEV;
np = of_find_matching_node(fw_np, optee_match);
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
optee = optee_probe(np);
of_node_put(np);
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
new file mode 100644
index 000000000000..e3a148521ec1
--- /dev/null
+++ b/drivers/tee/optee/device.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include "optee_private.h"
+
+/*
+ * Get device UUIDs
+ *
+ * [out] memref[0] Array of device UUIDs
+ *
+ * Return codes:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_SHORT_BUFFER - Output buffer size less than required
+ */
+#define PTA_CMD_GET_DEVICES 0x0
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int get_devices(struct tee_context *ctx, u32 session,
+ struct tee_shm *device_shm, u32 *shm_size)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke PTA_CMD_GET_DEVICES function */
+ inv_arg.func = PTA_CMD_GET_DEVICES;
+ inv_arg.session = session;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = device_shm;
+ param[0].u.memref.size = *shm_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(ctx, &inv_arg, param);
+ if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) &&
+ (inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) {
+ pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ *shm_size = param[0].u.memref.size;
+
+ return 0;
+}
+
+static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
+{
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+
+ optee_device = kzalloc(sizeof(*optee_device), GFP_KERNEL);
+ if (!optee_device)
+ return -ENOMEM;
+
+ optee_device->dev.bus = &tee_bus_type;
+ dev_set_name(&optee_device->dev, "optee-clnt%u", device_id);
+ uuid_copy(&optee_device->id.uuid, device_uuid);
+
+ rc = device_register(&optee_device->dev);
+ if (rc) {
+ pr_err("device registration failed, err: %d\n", rc);
+ kfree(optee_device);
+ }
+
+ return rc;
+}
+
+int optee_enumerate_devices(void)
+{
+ const uuid_t pta_uuid =
+ UUID_INIT(0x7011a688, 0xddde, 0x4053,
+ 0xa5, 0xa9, 0x7b, 0x3c, 0x4d, 0xdf, 0x13, 0xb8);
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *device_shm = NULL;
+ const uuid_t *device_uuid = NULL;
+ struct tee_context *ctx = NULL;
+ u32 shm_size = 0, idx, num_devices = 0;
+ int rc;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with OP-TEE driver */
+ ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+ if (IS_ERR(ctx))
+ return -ENODEV;
+
+ /* Open session with device enumeration pseudo TA */
+ memcpy(sess_arg.uuid, pta_uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != TEEC_SUCCESS)) {
+ /* Device enumeration pseudo TA not found */
+ rc = 0;
+ goto out_ctx;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, NULL, &shm_size);
+ if (rc < 0 || !shm_size)
+ goto out_sess;
+
+ device_shm = tee_shm_alloc(ctx, shm_size,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(device_shm)) {
+ pr_err("tee_shm_alloc failed\n");
+ rc = PTR_ERR(device_shm);
+ goto out_sess;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size);
+ if (rc < 0)
+ goto out_shm;
+
+ device_uuid = tee_shm_get_va(device_shm, 0);
+ if (IS_ERR(device_uuid)) {
+ pr_err("tee_shm_get_va failed\n");
+ rc = PTR_ERR(device_uuid);
+ goto out_shm;
+ }
+
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+ rc = optee_register_device(&device_uuid[idx], idx);
+ if (rc)
+ goto out_shm;
+ }
+
+out_shm:
+ tee_shm_free(device_shm);
+out_sess:
+ tee_client_close_session(ctx, sess_arg.session);
+out_ctx:
+ tee_client_close_context(ctx);
+
+ return rc;
+}
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 30504901be80..795bc19ae17a 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -1,28 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2015-2019, Linaro Limited
*/
#ifndef _OPTEE_MSG_H
#define _OPTEE_MSG_H
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 35e79386c556..a5e84afd5013 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -28,6 +28,7 @@
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -181,6 +182,8 @@ void optee_free_pages_list(void *array, size_t num_entries);
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset);
+int optee_enumerate_devices(void);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index bbf0cf028c16..c72122d9c997 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -1,28 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2015-2019, Linaro Limited
*/
#ifndef OPTEE_SMC_H
#define OPTEE_SMC_H
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 43626e15703a..92f56b8645e3 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -88,10 +88,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
- struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
+ struct optee_supp_req *req;
bool interruptable;
u32 ret;
+ /*
+ * Return in case there is no supplicant available and
+ * non-blocking request.
+ */
+ if (!supp->ctx && ctx->supp_nowait)
+ return TEEC_ERROR_COMMUNICATION;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return TEEC_ERROR_OUT_OF_MEMORY;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 7b2bb4c50058..17c64fccbb10 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/cdev.h>
-#include <linux/device.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
@@ -106,6 +105,11 @@ static int tee_open(struct inode *inode, struct file *filp)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ /*
+ * Default user-space behaviour is to wait for tee-supplicant
+ * if not present for any requests in this context.
+ */
+ ctx->supp_nowait = false;
filp->private_data = ctx;
return 0;
}
@@ -982,6 +986,16 @@ tee_client_open_context(struct tee_context *start,
} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
put_device(put_dev);
+ /*
+ * Default behaviour for in kernel client is to not wait for
+ * tee-supplicant if not present for any requests in this context.
+ * Also this flag could be configured again before call to
+ * tee_client_open_session() if any in kernel client requires
+ * different behaviour.
+ */
+ if (!IS_ERR(ctx))
+ ctx->supp_nowait = true;
+
return ctx;
}
EXPORT_SYMBOL_GPL(tee_client_open_context);
@@ -1027,6 +1041,48 @@ int tee_client_invoke_func(struct tee_context *ctx,
}
EXPORT_SYMBOL_GPL(tee_client_invoke_func);
+int tee_client_cancel_req(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg *arg)
+{
+ if (!ctx->teedev->desc->ops->cancel_req)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
+ arg->session);
+}
+
+static int tee_client_device_match(struct device *dev,
+ struct device_driver *drv)
+{
+ const struct tee_client_device_id *id_table;
+ struct tee_client_device *tee_device;
+
+ id_table = to_tee_client_driver(drv)->id_table;
+ tee_device = to_tee_client_device(dev);
+
+ while (!uuid_is_null(&id_table->uuid)) {
+ if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
+ return 1;
+ id_table++;
+ }
+
+ return 0;
+}
+
+static int tee_client_device_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
+
+ return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
+}
+
+struct bus_type tee_bus_type = {
+ .name = "tee",
+ .match = tee_client_device_match,
+ .uevent = tee_client_device_uevent,
+};
+EXPORT_SYMBOL_GPL(tee_bus_type);
+
static int __init tee_init(void)
{
int rc;
@@ -1040,18 +1096,32 @@ static int __init tee_init(void)
rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
if (rc) {
pr_err("failed to allocate char dev region\n");
- class_destroy(tee_class);
- tee_class = NULL;
+ goto out_unreg_class;
+ }
+
+ rc = bus_register(&tee_bus_type);
+ if (rc) {
+ pr_err("failed to register tee bus\n");
+ goto out_unreg_chrdev;
}
+ return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+out_unreg_class:
+ class_destroy(tee_class);
+ tee_class = NULL;
+
return rc;
}
static void __exit tee_exit(void)
{
+ bus_unregister(&tee_bus_type);
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
class_destroy(tee_class);
tee_class = NULL;
- unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
}
subsys_initcall(tee_init);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 30323426902e..58bb7d72dc2b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -152,6 +152,7 @@ config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
depends on THERMAL_OF
+ depends on THERMAL=y
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dfd23245f778..6fff16113628 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
cdev = __cpufreq_cooling_register(np, policy, capacitance);
if (IS_ERR(cdev)) {
- pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
+ pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
policy->cpu, PTR_ERR(cdev));
cdev = NULL;
}
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 0582bd12a239..0ca908d12750 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -4,7 +4,7 @@
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
- depends on X86 && ACPI
+ depends on X86 && ACPI && PCI
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 284cf2c5a8fd..8e1cf4d789be 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
struct pci_dev *pci_dev; \
struct platform_device *pdev; \
struct proc_thermal_device *proc_dev; \
-\
+ \
+ if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
+ dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
+ return 0; \
+ } \
+ \
if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
pdev = to_platform_device(dev); \
proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev,
*priv = proc_priv;
ret = proc_thermal_read_ppcc(proc_priv);
- if (!ret) {
- ret = sysfs_create_group(&dev->kobj,
- &power_limit_attribute_group);
-
- }
if (ret)
return ret;
@@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev,
proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
if (IS_ERR(proc_priv->int340x_zone)) {
- ret = PTR_ERR(proc_priv->int340x_zone);
- goto remove_group;
+ return PTR_ERR(proc_priv->int340x_zone);
} else
ret = 0;
@@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev,
remove_zone:
int340x_thermal_zone_remove(proc_priv->int340x_zone);
-remove_group:
- sysfs_remove_group(&proc_priv->dev->kobj,
- &power_limit_attribute_group);
return ret;
}
@@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev)
platform_set_drvdata(pdev, proc_priv);
proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
- return 0;
+ dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
+
+ return sysfs_create_group(&pdev->dev.kobj,
+ &power_limit_attribute_group);
}
static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
proc_priv->soc_dts = intel_soc_dts_iosf_init(
INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
- if (proc_priv->soc_dts && pdev->irq) {
+ if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
ret = pci_enable_msi(pdev);
if (!ret) {
ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
}
- return 0;
+ dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
+
+ return sysfs_create_group(&pdev->dev.kobj,
+ &power_limit_attribute_group);
}
static void proc_thermal_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4bfdb4a1e47d..2df059cc07e2 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np)
ret = of_property_read_u32(np, "polling-delay-passive", &prop);
if (ret < 0) {
- pr_err("missing polling-delay-passive property\n");
+ pr_err("%pOFn: missing polling-delay-passive property\n", np);
goto free_tz;
}
tz->passive_delay = prop;
ret = of_property_read_u32(np, "polling-delay", &prop);
if (ret < 0) {
- pr_err("missing polling-delay property\n");
+ pr_err("%pOFn: missing polling-delay property\n", np);
goto free_tz;
}
tz->polling_delay = prop;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 0840d27381ea..e0a04bfc873e 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -441,4 +441,28 @@ config VCC
depends on SUN_LDOMS
help
Support for Sun logical domain consoles.
+
+config LDISC_AUTOLOAD
+ bool "Automatically load TTY Line Disciplines"
+ default y
+ help
+ Historically the kernel has always automatically loaded any
+ line discipline that is in a kernel module when a user asks
+ for it to be loaded with the TIOCSETD ioctl, or through other
+ means. This is not always the best thing to do on systems
+ where you know you will not be using some of the more
+ "ancient" line disciplines, so prevent the kernel from doing
+ this unless the request is coming from a process with the
+ CAP_SYS_MODULE permissions.
+
+ Say 'Y' here if you trust your userspace users to do the right
+ thing, or if you have only provided the line disciplines that
+ you know you will be using, or if you wish to continue to use
+ the traditional method of on-demand loading of these modules
+ by any user.
+
+ This functionality can be changed at runtime with the
+ dev.tty.ldisc_autoload sysctl, this configuration option will
+ only set the default value of this functionality.
+
endif # TTY
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index dc43fa96c3de..5ef08905fe05 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -492,7 +492,7 @@ static void xencons_backend_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index b0baa4ce10f9..6bbf35682d53 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1516,6 +1516,8 @@ static void ipw_send_setup_packet(struct ipw_hardware *hw)
sizeof(struct ipw_setup_get_version_query_packet),
ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_GET_VERSION_QRY);
+ if (!ver_packet)
+ return;
ver_packet->header.length = sizeof(struct tl_setup_get_version_qry);
/*
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 6f7da9a9d76f..c4e16b31f9ab 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -143,8 +143,8 @@ struct gsm_dlci {
struct sk_buff *skb; /* Frame being sent */
struct sk_buff_head skb_list; /* Queued frames */
/* Data handling callback */
- void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
- void (*prev_data)(struct gsm_dlci *dlci, u8 *data, int len);
+ void (*data)(struct gsm_dlci *dlci, const u8 *data, int len);
+ void (*prev_data)(struct gsm_dlci *dlci, const u8 *data, int len);
struct net_device *net; /* network interface, if created */
};
@@ -988,7 +988,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
* Encode up and queue a UI/UIH frame containing our response.
*/
-static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
+static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
int dlen)
{
struct gsm_msg *msg;
@@ -1073,14 +1073,14 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
* and if need be stuff a break message down the tty.
*/
-static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
- u8 *dp = data;
+ const u8 *dp = data;
struct tty_struct *tty;
while (gsm_read_ea(&addr, *dp++) == 0) {
@@ -1134,13 +1134,13 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
* this into the uplink tty if present
*/
-static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
+static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen)
{
struct tty_port *port;
unsigned int addr = 0;
u8 bits;
int len = clen;
- u8 *dp = data;
+ const u8 *dp = data;
while (gsm_read_ea(&addr, *dp++) == 0) {
len--;
@@ -1189,7 +1189,7 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
*/
static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
- u8 *data, int clen)
+ const u8 *data, int clen)
{
u8 buf[1];
unsigned long flags;
@@ -1261,7 +1261,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
*/
static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
- u8 *data, int clen)
+ const u8 *data, int clen)
{
struct gsm_control *ctrl;
unsigned long flags;
@@ -1553,7 +1553,7 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
* open we shovel the bits down it, if not we drop them.
*/
-static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
+static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
{
/* krefs .. */
struct tty_port *port = &dlci->port;
@@ -1565,14 +1565,11 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
pr_debug("%d bytes for tty\n", len);
switch (dlci->adaption) {
/* Unsupported types */
- /* Packetised interruptible data */
- case 4:
+ case 4: /* Packetised interruptible data */
break;
- /* Packetised uininterruptible voice/data */
- case 3:
+ case 3: /* Packetised uininterruptible voice/data */
break;
- /* Asynchronous serial with line state in each frame */
- case 2:
+ case 2: /* Asynchronous serial with line state in each frame */
while (gsm_read_ea(&modem, *data++) == 0) {
len--;
if (len == 0)
@@ -1583,8 +1580,8 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
gsm_process_modem(tty, dlci, modem, clen);
tty_kref_put(tty);
}
- /* Line state will go via DLCI 0 controls only */
- case 1:
+ /* Fall through */
+ case 1: /* Line state will go via DLCI 0 controls only */
default:
tty_insert_flip_string(port, data, len);
tty_flip_buffer_push(port);
@@ -1603,7 +1600,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
* and we divide up the work accordingly.
*/
-static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len)
+static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
{
/* See what command is involved */
unsigned int command = 0;
@@ -1979,7 +1976,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
gsm->address = 0;
gsm->state = GSM_ADDRESS;
gsm->fcs = INIT_FCS;
- /* Drop through */
+ /* Fall through */
case GSM_ADDRESS: /* Address continuation */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->address, c))
@@ -2214,6 +2211,111 @@ static struct gsm_mux *gsm_alloc_mux(void)
return gsm;
}
+static void gsm_copy_config_values(struct gsm_mux *gsm,
+ struct gsm_config *c)
+{
+ memset(c, 0, sizeof(*c));
+ c->adaption = gsm->adaption;
+ c->encapsulation = gsm->encoding;
+ c->initiator = gsm->initiator;
+ c->t1 = gsm->t1;
+ c->t2 = gsm->t2;
+ c->t3 = 0; /* Not supported */
+ c->n2 = gsm->n2;
+ if (gsm->ftype == UIH)
+ c->i = 1;
+ else
+ c->i = 2;
+ pr_debug("Ftype %d i %d\n", gsm->ftype, c->i);
+ c->mru = gsm->mru;
+ c->mtu = gsm->mtu;
+ c->k = 0;
+}
+
+static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
+{
+ int need_close = 0;
+ int need_restart = 0;
+
+ /* Stuff we don't support yet - UI or I frame transport, windowing */
+ if ((c->adaption != 1 && c->adaption != 2) || c->k)
+ return -EOPNOTSUPP;
+ /* Check the MRU/MTU range looks sane */
+ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
+ return -EINVAL;
+ if (c->n2 < 3)
+ return -EINVAL;
+ if (c->encapsulation > 1) /* Basic, advanced, no I */
+ return -EINVAL;
+ if (c->initiator > 1)
+ return -EINVAL;
+ if (c->i == 0 || c->i > 2) /* UIH and UI only */
+ return -EINVAL;
+ /*
+ * See what is needed for reconfiguration
+ */
+
+ /* Timing fields */
+ if (c->t1 != 0 && c->t1 != gsm->t1)
+ need_restart = 1;
+ if (c->t2 != 0 && c->t2 != gsm->t2)
+ need_restart = 1;
+ if (c->encapsulation != gsm->encoding)
+ need_restart = 1;
+ if (c->adaption != gsm->adaption)
+ need_restart = 1;
+ /* Requires care */
+ if (c->initiator != gsm->initiator)
+ need_close = 1;
+ if (c->mru != gsm->mru)
+ need_restart = 1;
+ if (c->mtu != gsm->mtu)
+ need_restart = 1;
+
+ /*
+ * Close down what is needed, restart and initiate the new
+ * configuration
+ */
+
+ if (need_close || need_restart) {
+ int ret;
+
+ ret = gsm_disconnect(gsm);
+
+ if (ret)
+ return ret;
+ }
+ if (need_restart)
+ gsm_cleanup_mux(gsm);
+
+ gsm->initiator = c->initiator;
+ gsm->mru = c->mru;
+ gsm->mtu = c->mtu;
+ gsm->encoding = c->encapsulation;
+ gsm->adaption = c->adaption;
+ gsm->n2 = c->n2;
+
+ if (c->i == 1)
+ gsm->ftype = UIH;
+ else if (c->i == 2)
+ gsm->ftype = UI;
+
+ if (c->t1)
+ gsm->t1 = c->t1;
+ if (c->t2)
+ gsm->t2 = c->t2;
+
+ /*
+ * FIXME: We need to separate activation/deactivation from adding
+ * and removing from the mux array
+ */
+ if (need_restart)
+ gsm_activate_mux(gsm);
+ if (gsm->initiator && need_close)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ return 0;
+}
+
/**
* gsmld_output - write to link
* @gsm: our mux
@@ -2495,89 +2597,6 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
return mask;
}
-static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
- struct gsm_config *c)
-{
- int need_close = 0;
- int need_restart = 0;
-
- /* Stuff we don't support yet - UI or I frame transport, windowing */
- if ((c->adaption != 1 && c->adaption != 2) || c->k)
- return -EOPNOTSUPP;
- /* Check the MRU/MTU range looks sane */
- if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
- return -EINVAL;
- if (c->n2 < 3)
- return -EINVAL;
- if (c->encapsulation > 1) /* Basic, advanced, no I */
- return -EINVAL;
- if (c->initiator > 1)
- return -EINVAL;
- if (c->i == 0 || c->i > 2) /* UIH and UI only */
- return -EINVAL;
- /*
- * See what is needed for reconfiguration
- */
-
- /* Timing fields */
- if (c->t1 != 0 && c->t1 != gsm->t1)
- need_restart = 1;
- if (c->t2 != 0 && c->t2 != gsm->t2)
- need_restart = 1;
- if (c->encapsulation != gsm->encoding)
- need_restart = 1;
- if (c->adaption != gsm->adaption)
- need_restart = 1;
- /* Requires care */
- if (c->initiator != gsm->initiator)
- need_close = 1;
- if (c->mru != gsm->mru)
- need_restart = 1;
- if (c->mtu != gsm->mtu)
- need_restart = 1;
-
- /*
- * Close down what is needed, restart and initiate the new
- * configuration
- */
-
- if (need_close || need_restart) {
- int ret;
-
- ret = gsm_disconnect(gsm);
-
- if (ret)
- return ret;
- }
- if (need_restart)
- gsm_cleanup_mux(gsm);
-
- gsm->initiator = c->initiator;
- gsm->mru = c->mru;
- gsm->mtu = c->mtu;
- gsm->encoding = c->encapsulation;
- gsm->adaption = c->adaption;
- gsm->n2 = c->n2;
-
- if (c->i == 1)
- gsm->ftype = UIH;
- else if (c->i == 2)
- gsm->ftype = UI;
-
- if (c->t1)
- gsm->t1 = c->t1;
- if (c->t2)
- gsm->t2 = c->t2;
-
- /* FIXME: We need to separate activation/deactivation from adding
- and removing from the mux array */
- if (need_restart)
- gsm_activate_mux(gsm);
- if (gsm->initiator && need_close)
- gsm_dlci_begin_open(gsm->dlci[0]);
- return 0;
-}
-
static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -2586,29 +2605,14 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
switch (cmd) {
case GSMIOC_GETCONF:
- memset(&c, 0, sizeof(c));
- c.adaption = gsm->adaption;
- c.encapsulation = gsm->encoding;
- c.initiator = gsm->initiator;
- c.t1 = gsm->t1;
- c.t2 = gsm->t2;
- c.t3 = 0; /* Not supported */
- c.n2 = gsm->n2;
- if (gsm->ftype == UIH)
- c.i = 1;
- else
- c.i = 2;
- pr_debug("Ftype %d i %d\n", gsm->ftype, c.i);
- c.mru = gsm->mru;
- c.mtu = gsm->mtu;
- c.k = 0;
+ gsm_copy_config_values(gsm, &c);
if (copy_to_user((void *)arg, &c, sizeof(c)))
return -EFAULT;
return 0;
case GSMIOC_SETCONF:
if (copy_from_user(&c, (void *)arg, sizeof(c)))
return -EFAULT;
- return gsmld_config(tty, gsm, &c);
+ return gsm_config(gsm, &c);
default:
return n_tty_ioctl_helper(tty, file, cmd, arg);
}
@@ -2695,7 +2699,7 @@ static void gsm_mux_net_tx_timeout(struct net_device *net)
}
static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
- unsigned char *in_buf, int size)
+ const unsigned char *in_buf, int size)
{
struct net_device *net = dlci->net;
struct sk_buff *skb;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 4164414d4c64..e55c79eb6430 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
/* too large for caller's buffer */
ret = -EOVERFLOW;
} else {
+ __set_current_state(TASK_RUNNING);
if (copy_to_user(buf, rbuf->buf, rbuf->count))
ret = -EFAULT;
else
@@ -776,7 +777,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
case TCOFLUSH:
flush_tx_queue(tty);
}
- /* fall through to default */
+ /* fall through - to default */
default:
error = n_tty_ioctl_helper(tty, file, cmd, arg);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5dc9686697cf..9cdb0fa3c4bf 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -50,8 +50,10 @@
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
-
-/* number of characters left in xmit buffer before select has we have room */
+/*
+ * Until this number of characters is queued in the xmit buffer, select will
+ * return "we have room for writes".
+ */
#define WAKEUP_CHARS 256
/*
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index fed820e9ab9d..3214e22e79f3 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -1317,7 +1317,6 @@ static void remove_sysfs_files(struct nozomi *dc)
static int nozomi_card_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- resource_size_t start;
int ret;
struct nozomi *dc = NULL;
int ndev_idx;
@@ -1357,17 +1356,10 @@ static int nozomi_card_init(struct pci_dev *pdev,
goto err_disable_device;
}
- start = pci_resource_start(dc->pdev, 0);
- if (start == 0) {
- dev_err(&pdev->dev, "No I/O address for card detected\n");
- ret = -ENODEV;
- goto err_rel_regs;
- }
-
/* Find out what card type it is */
nozomi_get_card_type(dc);
- dc->base_addr = ioremap_nocache(start, dc->card_type);
+ dc->base_addr = pci_iomap(dc->pdev, 0, dc->card_type);
if (!dc->base_addr) {
dev_err(&pdev->dev, "Unable to map card MMIO\n");
ret = -ENODEV;
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index fa1672993b4c..d1cdd2ab8b4c 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -233,7 +233,7 @@ static int ttyport_get_tiocm(struct serdev_controller *ctrl)
if (!tty->ops->tiocmget)
return -ENOTSUPP;
- return tty->driver->ops->tiocmget(tty);
+ return tty->ops->tiocmget(tty);
}
static int ttyport_set_tiocm(struct serdev_controller *ctrl, unsigned int set, unsigned int clear)
@@ -244,7 +244,7 @@ static int ttyport_set_tiocm(struct serdev_controller *ctrl, unsigned int set, u
if (!tty->ops->tiocmset)
return -ENOTSUPP;
- return tty->driver->ops->tiocmset(tty, set, clear);
+ return tty->ops->tiocmset(tty, set, clear);
}
static const struct serdev_controller_ops ctrl_ops = {
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 189ab1212d9a..e441221e04b9 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1070,15 +1070,16 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
ret = 0;
}
- }
- /* Initialise interrupt backoff work if required */
- if (up->overrun_backoff_time_ms > 0) {
- uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms;
- INIT_DELAYED_WORK(&uart->overrun_backoff,
- serial_8250_overrun_backoff_work);
- } else {
- uart->overrun_backoff_time_ms = 0;
+ /* Initialise interrupt backoff work if required */
+ if (up->overrun_backoff_time_ms > 0) {
+ uart->overrun_backoff_time_ms =
+ up->overrun_backoff_time_ms;
+ INIT_DELAYED_WORK(&uart->overrun_backoff,
+ serial_8250_overrun_backoff_work);
+ } else {
+ uart->overrun_backoff_time_ms = 0;
+ }
}
mutex_unlock(&serial_mutex);
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 15a8c8dfa92b..424c07c5f629 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -129,22 +129,21 @@ static int __init ingenic_early_console_setup(struct earlycon_device *dev,
return 0;
}
-EARLYCON_DECLARE(jz4740_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4740_uart, "ingenic,jz4740-uart",
ingenic_early_console_setup);
-EARLYCON_DECLARE(jz4770_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4770_uart, "ingenic,jz4770-uart",
ingenic_early_console_setup);
-EARLYCON_DECLARE(jz4775_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4775_uart, "ingenic,jz4775-uart",
ingenic_early_console_setup);
-EARLYCON_DECLARE(jz4780_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4780_uart, "ingenic,jz4780-uart",
ingenic_early_console_setup);
+OF_EARLYCON_DECLARE(x1000_uart, "ingenic,x1000-uart",
+ ingenic_early_console_setup);
+
static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
{
int ier;
@@ -328,12 +327,18 @@ static const struct ingenic_uart_config jz4780_uart_config = {
.fifosize = 64,
};
+static const struct ingenic_uart_config x1000_uart_config = {
+ .tx_loadsz = 32,
+ .fifosize = 64,
+};
+
static const struct of_device_id of_match[] = {
{ .compatible = "ingenic,jz4740-uart", .data = &jz4740_uart_config },
{ .compatible = "ingenic,jz4760-uart", .data = &jz4760_uart_config },
{ .compatible = "ingenic,jz4770-uart", .data = &jz4760_uart_config },
{ .compatible = "ingenic,jz4775-uart", .data = &jz4760_uart_config },
{ .compatible = "ingenic,jz4780-uart", .data = &jz4780_uart_config },
+ { .compatible = "ingenic,x1000-uart", .data = &x1000_uart_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_match);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index e2c407656fa6..c1fdbc0b6840 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
if (dmacnt == 2) {
data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma),
GFP_KERNEL);
+ if (!data->dma)
+ return -ENOMEM;
+
data->dma->fn = mtk8250_dma_filter;
data->dma->rx_size = MTK_UART_RX_SIZE;
data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index a1a85805d010..0277479c87e9 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -130,6 +130,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->flags |= UPF_IOREMAP;
}
+ /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
+ if (of_device_is_compatible(np, "mrvl,mmp-uart"))
+ port->regshift = 2;
+
/* Check for registers offset within the devices address range */
if (of_property_read_u32(np, "reg-shift", &prop) == 0)
port->regshift = prop;
@@ -327,6 +331,7 @@ static const struct of_device_id of_platform_serial_table[] = {
{ .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
{ .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, },
{ .compatible = "ralink,rt2880-uart", .data = (void *)PORT_RT2880, },
+ { .compatible = "intel,xscale-uart", .data = (void *)PORT_XSCALE, },
{ .compatible = "altr,16550-FIFO32",
.data = (void *)PORT_ALTR_16550_F32, },
{ .compatible = "altr,16550-FIFO64",
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index ad7ba7d0f28d..0a8316632d75 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -12,6 +12,7 @@
#define SUPPORT_SYSRQ
#endif
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -1134,10 +1135,12 @@ static int omap8250_probe(struct platform_device *pdev)
{
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ struct device_node *np = pdev->dev.of_node;
struct omap8250_priv *priv;
struct uart_8250_port up;
int ret;
void __iomem *membase;
+ const struct of_device_id *id;
if (!regs || !irq) {
dev_err(&pdev->dev, "missing registers or irq\n");
@@ -1194,27 +1197,31 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.unthrottle = omap_8250_unthrottle;
up.port.rs485_config = omap_8250_rs485_config;
- if (pdev->dev.of_node) {
- const struct of_device_id *id;
-
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
-
- of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &up.port.uartclk);
- priv->wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
-
- id = of_match_device(of_match_ptr(omap8250_dt_ids), &pdev->dev);
- if (id && id->data)
- priv->habit |= *(u8 *)id->data;
- } else {
- ret = pdev->id;
- }
+ ret = of_alias_get_id(np, "serial");
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias/pdev id\n");
+ dev_err(&pdev->dev, "failed to get alias\n");
return ret;
}
up.port.line = ret;
+ if (of_property_read_u32(np, "clock-frequency", &up.port.uartclk)) {
+ struct clk *clk;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
+ up.port.uartclk = clk_get_rate(clk);
+ }
+ }
+
+ priv->wakeirq = irq_of_parse_and_map(np, 1);
+
+ id = of_match_device(of_match_ptr(omap8250_dt_ids), &pdev->dev);
+ if (id && id->data)
+ priv->habit |= *(u8 *)id->data;
+
if (!up.port.uartclk) {
up.port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
@@ -1242,25 +1249,23 @@ static int omap8250_probe(struct platform_device *pdev)
omap_serial_fill_features_erratas(&up, priv);
up.port.handle_irq = omap8250_no_handle_irq;
#ifdef CONFIG_SERIAL_8250_DMA
- if (pdev->dev.of_node) {
- /*
- * Oh DMA support. If there are no DMA properties in the DT then
- * we will fall back to a generic DMA channel which does not
- * really work here. To ensure that we do not get a generic DMA
- * channel assigned, we have the the_no_dma_filter_fn() here.
- * To avoid "failed to request DMA" messages we check for DMA
- * properties in DT.
- */
- ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
- if (ret == 2) {
- up.dma = &priv->omap8250_dma;
- priv->omap8250_dma.fn = the_no_dma_filter_fn;
- priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
- priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
- priv->omap8250_dma.rx_size = RX_TRIGGER;
- priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER;
- priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER;
- }
+ /*
+ * Oh DMA support. If there are no DMA properties in the DT then
+ * we will fall back to a generic DMA channel which does not
+ * really work here. To ensure that we do not get a generic DMA
+ * channel assigned, we have the the_no_dma_filter_fn() here.
+ * To avoid "failed to request DMA" messages we check for DMA
+ * properties in DT.
+ */
+ ret = of_property_count_strings(np, "dma-names");
+ if (ret == 2) {
+ up.dma = &priv->omap8250_dma;
+ priv->omap8250_dma.fn = the_no_dma_filter_fn;
+ priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
+ priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
+ priv->omap8250_dma.rx_size = RX_TRIGGER;
+ priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER;
+ priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER;
}
#endif
ret = serial8250_register_8250_port(&up);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index f80a300b5d68..df41397de478 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2027,6 +2027,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_default_setup,
.exit = pci_plx9050_exit,
},
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
/*
* SBS Technologies, Inc., PMC-OCTALPRO 232
*/
@@ -3375,6 +3480,9 @@ static const struct pci_device_id blacklist[] = {
/* Exar devices */
{ PCI_VDEVICE(EXAR, PCI_ANY_ID), },
{ PCI_VDEVICE(COMMTECH, PCI_ANY_ID), },
+
+ /* End of the black list */
+ { }
};
static int serial_pci_is_class_communication(struct pci_dev *dev)
@@ -3392,25 +3500,6 @@ static int serial_pci_is_class_communication(struct pci_dev *dev)
return 0;
}
-static int serial_pci_is_blacklisted(struct pci_dev *dev)
-{
- const struct pci_device_id *bldev;
-
- /*
- * Do not access blacklisted devices that are known not to
- * feature serial ports or are handled by other modules.
- */
- for (bldev = blacklist;
- bldev < blacklist + ARRAY_SIZE(blacklist);
- bldev++) {
- if (dev->vendor == bldev->vendor &&
- dev->device == bldev->device)
- return -ENODEV;
- }
-
- return 0;
-}
-
/*
* Given a complete unknown PCI device, try to use some heuristics to
* guess what the configuration might be, based on the pitiful PCI
@@ -3420,6 +3509,11 @@ static int
serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
{
int num_iomem, num_port, first_port = -1, i;
+ int rc;
+
+ rc = serial_pci_is_class_communication(dev);
+ if (rc)
+ return rc;
/*
* Should we try to make guesses for multiport serial devices later?
@@ -3629,6 +3723,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
struct pci_serial_quirk *quirk;
struct serial_private *priv;
const struct pciserial_board *board;
+ const struct pci_device_id *exclude;
struct pciserial_board tmp;
int rc;
@@ -3647,13 +3742,9 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
board = &pci_boards[ent->driver_data];
- rc = serial_pci_is_class_communication(dev);
- if (rc)
- return rc;
-
- rc = serial_pci_is_blacklisted(dev);
- if (rc)
- return rc;
+ exclude = pci_match_id(blacklist, dev);
+ if (exclude)
+ return -ENODEV;
rc = pcim_enable_device(dev);
pci_save_state(dev);
@@ -4574,10 +4665,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -4586,10 +4677,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -4598,10 +4689,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -4610,13 +4701,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7951 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -4625,16 +4716,16 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -4643,13 +4734,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
@@ -4658,19 +4749,19 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
/*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index b9bcbe20a2be..c47188860e32 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -113,6 +113,10 @@ static int serial_pxa_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret >= 0)
+ uart.port.line = ret;
+
uart.port.type = PORT_XSCALE;
uart.port.iotype = UPIO_MEM32;
uart.port.mapbase = mmres->start;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 67b9bf3b500e..72966bc0ac76 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -85,6 +85,18 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
with "earlycon=smh" on the kernel command line. The console is
enabled when early_param is processed.
+config SERIAL_EARLYCON_RISCV_SBI
+ bool "Early console using RISC-V SBI"
+ depends on RISCV
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ help
+ Support for early debug console using RISC-V SBI. This enables
+ the console before standard serial driver is probed. This is enabled
+ with "earlycon=sbi" on the kernel command line. The console is
+ enabled when early_param is processed.
+
config SERIAL_SB1250_DUART
tristate "BCM1xxx on-chip DUART serial support"
depends on SIBYTE_SB1xxx_SOC=y
@@ -323,6 +335,28 @@ config SERIAL_TEGRA
are enabled). This driver uses the APB DMA to achieve higher baudrate
and better performance.
+config SERIAL_TEGRA_TCU
+ tristate "NVIDIA Tegra Combined UART"
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX
+ select SERIAL_CORE
+ help
+ Support for the mailbox-based TCU (Tegra Combined UART) serial port.
+ TCU is a virtual serial port that allows multiplexing multiple data
+ streams into a single hardware serial port.
+
+config SERIAL_TEGRA_TCU_CONSOLE
+ bool "Support for console on a Tegra TCU serial port"
+ depends on SERIAL_TEGRA_TCU=y
+ select SERIAL_CORE_CONSOLE
+ default y
+ ---help---
+ If you say Y here, it will be possible to use a the Tegra TCU as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode).
+
+ If unsure, say Y.
+
config SERIAL_MAX3100
tristate "MAX3100 support"
depends on SPI
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8c303736b7e8..40b702aaa85e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SERIAL_CORE) += serial_core.o
obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o
+obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o
# These Sparc drivers have to appear before others such as 8250
# which share ttySx minor node space. Otherwise console device
@@ -76,6 +77,7 @@ obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
+obj-$(CONFIG_SERIAL_TEGRA_TCU) += tegra-tcu.o
obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o
obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 98f193a83392..061590795680 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -442,14 +442,10 @@ static struct console clps711x_console = {
static int uart_clps711x_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- int ret, index = np ? of_alias_get_id(np, "serial") : pdev->id;
struct clps711x_port *s;
struct resource *res;
struct clk *uart_clk;
- int irq;
-
- if (index < 0 || index >= UART_CLPS711X_NR)
- return -EINVAL;
+ int irq, ret;
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
if (!s)
@@ -473,20 +469,11 @@ static int uart_clps711x_probe(struct platform_device *pdev)
if (s->rx_irq < 0)
return s->rx_irq;
- if (!np) {
- char syscon_name[9];
-
- sprintf(syscon_name, "syscon.%i", index + 1);
- s->syscon = syscon_regmap_lookup_by_pdevname(syscon_name);
- if (IS_ERR(s->syscon))
- return PTR_ERR(s->syscon);
- } else {
- s->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
- if (IS_ERR(s->syscon))
- return PTR_ERR(s->syscon);
- }
+ s->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(s->syscon))
+ return PTR_ERR(s->syscon);
- s->port.line = index;
+ s->port.line = of_alias_get_id(np, "serial");
s->port.dev = &pdev->dev;
s->port.iotype = UPIO_MEM32;
s->port.mapbase = res->start;
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
new file mode 100644
index 000000000000..ce81523c3113
--- /dev/null
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V SBI based earlycon
+ *
+ * Copyright (C) 2018 Anup Patel <anup@brainfault.org>
+ */
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <asm/sbi.h>
+
+static void sbi_putc(struct uart_port *port, int c)
+{
+ sbi_console_putchar(c);
+}
+
+static void sbi_console_write(struct console *con,
+ const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+ uart_console_write(&dev->port, s, n, sbi_putc);
+}
+
+static int __init early_sbi_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = sbi_console_write;
+ return 0;
+}
+EARLYCON_DECLARE(sbi, early_sbi_setup);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 241a48e5052c..ea1c85e3b432 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -426,6 +426,17 @@ static void lpuart_dma_tx_complete(void *arg)
spin_unlock_irqrestore(&sport->port.lock, flags);
}
+static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
+{
+ switch (sport->port.iotype) {
+ case UPIO_MEM32:
+ return sport->port.mapbase + UARTDATA;
+ case UPIO_MEM32BE:
+ return sport->port.mapbase + UARTDATA + sizeof(u32) - 1;
+ }
+ return sport->port.mapbase + UARTDR;
+}
+
static int lpuart_dma_tx_request(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
@@ -433,7 +444,7 @@ static int lpuart_dma_tx_request(struct uart_port *port)
struct dma_slave_config dma_tx_sconfig = {};
int ret;
- dma_tx_sconfig.dst_addr = sport->port.mapbase + UARTDR;
+ dma_tx_sconfig.dst_addr = lpuart_dma_datareg_addr(sport);
dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_tx_sconfig.dst_maxburst = 1;
dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
@@ -636,13 +647,19 @@ static void lpuart_start_tx(struct uart_port *port)
static void lpuart32_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long temp;
- temp = lpuart32_read(port, UARTCTRL);
- lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL);
+ if (sport->lpuart_dma_tx_use) {
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(port))
+ lpuart_dma_tx(sport);
+ } else {
+ temp = lpuart32_read(port, UARTCTRL);
+ lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL);
- if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE)
- lpuart32_transmit_buffer(sport);
+ if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE)
+ lpuart32_transmit_buffer(sport);
+ }
}
/* return TIOCSER_TEMT when transmitter is not busy */
@@ -664,8 +681,18 @@ static unsigned int lpuart_tx_empty(struct uart_port *port)
static unsigned int lpuart32_tx_empty(struct uart_port *port)
{
- return (lpuart32_read(port, UARTSTAT) & UARTSTAT_TC) ?
- TIOCSER_TEMT : 0;
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+ unsigned long stat = lpuart32_read(port, UARTSTAT);
+ unsigned long sfifo = lpuart32_read(port, UARTFIFO);
+
+ if (sport->dma_tx_in_progress)
+ return 0;
+
+ if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
+ return TIOCSER_TEMT;
+
+ return 0;
}
static bool lpuart_is_32(struct lpuart_port *sport)
@@ -862,11 +889,10 @@ static irqreturn_t lpuart32_int(int irq, void *dev_id)
rxcount = lpuart32_read(&sport->port, UARTWATER);
rxcount = rxcount >> UARTWATER_RXCNT_OFF;
- if (sts & UARTSTAT_RDRF || rxcount > 0)
+ if ((sts & UARTSTAT_RDRF || rxcount > 0) && !sport->lpuart_dma_rx_use)
lpuart32_rxint(irq, dev_id);
- if ((sts & UARTSTAT_TDRE) &&
- !(lpuart32_read(&sport->port, UARTBAUD) & UARTBAUD_TDMAE))
+ if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
lpuart_txint(irq, dev_id);
lpuart32_write(&sport->port, sts, UARTSTAT);
@@ -881,18 +907,31 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
struct circ_buf *ring = &sport->rx_ring;
unsigned long flags;
int count = 0;
- unsigned char sr;
- sr = readb(sport->port.membase + UARTSR1);
+ if (lpuart_is_32(sport)) {
+ unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
- if (sr & (UARTSR1_PE | UARTSR1_FE)) {
- /* Read DR to clear the error flags */
- readb(sport->port.membase + UARTDR);
+ if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
+ /* Read DR to clear the error flags */
+ lpuart32_read(&sport->port, UARTDATA);
+
+ if (sr & UARTSTAT_PE)
+ sport->port.icount.parity++;
+ else if (sr & UARTSTAT_FE)
+ sport->port.icount.frame++;
+ }
+ } else {
+ unsigned char sr = readb(sport->port.membase + UARTSR1);
- if (sr & UARTSR1_PE)
- sport->port.icount.parity++;
- else if (sr & UARTSR1_FE)
- sport->port.icount.frame++;
+ if (sr & (UARTSR1_PE | UARTSR1_FE)) {
+ /* Read DR to clear the error flags */
+ readb(sport->port.membase + UARTDR);
+
+ if (sr & UARTSR1_PE)
+ sport->port.icount.parity++;
+ else if (sr & UARTSR1_FE)
+ sport->port.icount.frame++;
+ }
}
async_tx_ack(sport->dma_rx_desc);
@@ -1015,7 +1054,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
return -EINVAL;
}
- dma_rx_sconfig.src_addr = sport->port.mapbase + UARTDR;
+ dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport);
dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_rx_sconfig.src_maxburst = 1;
dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
@@ -1043,8 +1082,14 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
dma_async_issue_pending(sport->dma_rx_chan);
- writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
- sport->port.membase + UARTCR5);
+ if (lpuart_is_32(sport)) {
+ unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
+
+ lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
+ } else {
+ writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
+ sport->port.membase + UARTCR5);
+ }
return 0;
}
@@ -1334,6 +1379,8 @@ static int lpuart32_startup(struct uart_port *port)
sport->txfifo_size = 0x1 << (((temp >> UARTFIFO_TXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK) - 1);
+ sport->port.fifosize = sport->txfifo_size;
+
sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK) - 1);
@@ -1342,8 +1389,41 @@ static int lpuart32_startup(struct uart_port *port)
lpuart32_setup_watermark(sport);
temp = lpuart32_read(&sport->port, UARTCTRL);
- temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | UARTCTRL_TE);
- temp |= UARTCTRL_ILIE;
+ temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
+ lpuart32_write(&sport->port, temp, UARTCTRL);
+
+ if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
+ /* set Rx DMA timeout */
+ sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
+ if (!sport->dma_rx_timeout)
+ sport->dma_rx_timeout = 1;
+
+ sport->lpuart_dma_rx_use = true;
+ rx_dma_timer_init(sport);
+ } else {
+ sport->lpuart_dma_rx_use = false;
+ }
+
+ if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
+ init_waitqueue_head(&sport->dma_wait);
+ sport->lpuart_dma_tx_use = true;
+ temp = lpuart32_read(&sport->port, UARTBAUD);
+ lpuart32_write(&sport->port, temp | UARTBAUD_TDMAE, UARTBAUD);
+ } else {
+ sport->lpuart_dma_tx_use = false;
+ }
+
+ if (sport->lpuart_dma_rx_use) {
+ /* RXWATER must be 0 */
+ temp = lpuart32_read(&sport->port, UARTWATER);
+ temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
+ lpuart32_write(&sport->port, temp, UARTWATER);
+ }
+ temp = lpuart32_read(&sport->port, UARTCTRL);
+ if (!sport->lpuart_dma_rx_use)
+ temp |= UARTCTRL_RIE;
+ if (!sport->lpuart_dma_tx_use)
+ temp |= UARTCTRL_TIE;
lpuart32_write(&sport->port, temp, UARTCTRL);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1384,6 +1464,8 @@ static void lpuart_shutdown(struct uart_port *port)
static void lpuart32_shutdown(struct uart_port *port)
{
+ struct lpuart_port *sport =
+ container_of(port, struct lpuart_port, port);
unsigned long temp;
unsigned long flags;
@@ -1396,6 +1478,21 @@ static void lpuart32_shutdown(struct uart_port *port)
lpuart32_write(port, temp, UARTCTRL);
spin_unlock_irqrestore(&port->lock, flags);
+
+ if (sport->lpuart_dma_rx_use) {
+ del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
+ }
+
+ if (sport->lpuart_dma_tx_use) {
+ if (wait_event_interruptible(sport->dma_wait,
+ !sport->dma_tx_in_progress)) {
+ sport->dma_tx_in_progress = false;
+ dmaengine_terminate_all(sport->dma_tx_chan);
+ }
+
+ lpuart32_stop_tx(port);
+ }
}
static void
@@ -1621,7 +1718,10 @@ lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate)
tmp &= ~UARTBAUD_SBR_MASK;
tmp |= sbr & UARTBAUD_SBR_MASK;
- tmp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE);
+ if (!sport->lpuart_dma_rx_use)
+ tmp &= ~UARTBAUD_RDMAE;
+ if (!sport->lpuart_dma_tx_use)
+ tmp &= ~UARTBAUD_TDMAE;
lpuart32_write(&sport->port, tmp, UARTBAUD);
}
@@ -1697,7 +1797,19 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
}
/* ask the core to calculate the divisor */
- baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
+
+ /*
+ * Need to update the Ring buffer length according to the selected
+ * baud rate and restart Rx DMA path.
+ *
+ * Since timer function acqures sport->port.lock, need to stop before
+ * acquring same lock because otherwise del_timer_sync() can deadlock.
+ */
+ if (old && sport->lpuart_dma_rx_use) {
+ del_timer_sync(&sport->lpuart_timer);
+ lpuart_dma_rx_free(&sport->port);
+ }
spin_lock_irqsave(&sport->port.lock, flags);
@@ -1737,6 +1849,13 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, ctrl, UARTCTRL);
/* restore control register */
+ if (old && sport->lpuart_dma_rx_use) {
+ if (!lpuart_start_rx_dma(sport))
+ rx_dma_timer_init(sport);
+ else
+ sport->lpuart_dma_rx_use = false;
+ }
+
spin_unlock_irqrestore(&sport->port.lock, flags);
}
@@ -2306,8 +2425,14 @@ static int lpuart_suspend(struct device *dev)
}
/* Disable Rx DMA to use UART port as wakeup source */
- writeb(readb(sport->port.membase + UARTCR5) & ~UARTCR5_RDMAS,
- sport->port.membase + UARTCR5);
+ if (lpuart_is_32(sport)) {
+ temp = lpuart32_read(&sport->port, UARTBAUD);
+ lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
+ UARTBAUD);
+ } else {
+ writeb(readb(sport->port.membase + UARTCR5) &
+ ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
+ }
}
if (sport->lpuart_dma_tx_use) {
@@ -2333,8 +2458,7 @@ static int lpuart_resume(struct device *dev)
if (lpuart_is_32(sport)) {
lpuart32_setup_watermark(sport);
temp = lpuart32_read(&sport->port, UARTCTRL);
- temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
- UARTCTRL_TE | UARTCTRL_ILIE);
+ temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
lpuart32_write(&sport->port, temp, UARTCTRL);
} else {
lpuart_setup_watermark(sport);
@@ -2353,14 +2477,36 @@ static int lpuart_resume(struct device *dev)
}
if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) {
- init_waitqueue_head(&sport->dma_wait);
- sport->lpuart_dma_tx_use = true;
+ init_waitqueue_head(&sport->dma_wait);
+ sport->lpuart_dma_tx_use = true;
+ if (lpuart_is_32(sport)) {
+ temp = lpuart32_read(&sport->port, UARTBAUD);
+ lpuart32_write(&sport->port,
+ temp | UARTBAUD_TDMAE, UARTBAUD);
+ } else {
writeb(readb(sport->port.membase + UARTCR5) |
UARTCR5_TDMAS, sport->port.membase + UARTCR5);
+ }
} else {
sport->lpuart_dma_tx_use = false;
}
+ if (lpuart_is_32(sport)) {
+ if (sport->lpuart_dma_rx_use) {
+ /* RXWATER must be 0 */
+ temp = lpuart32_read(&sport->port, UARTWATER);
+ temp &= ~(UARTWATER_WATER_MASK <<
+ UARTWATER_RXWATER_OFF);
+ lpuart32_write(&sport->port, temp, UARTWATER);
+ }
+ temp = lpuart32_read(&sport->port, UARTCTRL);
+ if (!sport->lpuart_dma_rx_use)
+ temp |= UARTCTRL_RIE;
+ if (!sport->lpuart_dma_tx_use)
+ temp |= UARTCTRL_TIE;
+ lpuart32_write(&sport->port, temp, UARTCTRL);
+ }
+
uart_resume_port(&lpuart_reg, &sport->port);
return 0;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index e052b69ceb98..9de9f0f239a1 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -114,9 +114,9 @@ struct ltq_uart_port {
static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg)
{
- u32 tmp = readl(reg);
+ u32 tmp = __raw_readl(reg);
- writel((tmp & ~clear) | set, reg);
+ __raw_writel((tmp & ~clear) | set, reg);
}
static inline struct
@@ -144,7 +144,7 @@ lqasc_start_tx(struct uart_port *port)
static void
lqasc_stop_rx(struct uart_port *port)
{
- writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
+ __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
}
static int
@@ -153,11 +153,12 @@ lqasc_rx_chars(struct uart_port *port)
struct tty_port *tport = &port->state->port;
unsigned int ch = 0, rsr = 0, fifocnt;
- fifocnt = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
+ fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
+ ASCFSTAT_RXFFLMASK;
while (fifocnt--) {
u8 flag = TTY_NORMAL;
ch = readb(port->membase + LTQ_ASC_RBUF);
- rsr = (readl(port->membase + LTQ_ASC_STATE)
+ rsr = (__raw_readl(port->membase + LTQ_ASC_STATE)
& ASCSTATE_ANY) | UART_DUMMY_UER_RX;
tty_flip_buffer_push(tport);
port->icount.rx++;
@@ -217,7 +218,7 @@ lqasc_tx_chars(struct uart_port *port)
return;
}
- while (((readl(port->membase + LTQ_ASC_FSTAT) &
+ while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) &
ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
if (port->x_char) {
writeb(port->x_char, port->membase + LTQ_ASC_TBUF);
@@ -245,7 +246,7 @@ lqasc_tx_int(int irq, void *_port)
unsigned long flags;
struct uart_port *port = (struct uart_port *)_port;
spin_lock_irqsave(&ltq_asc_lock, flags);
- writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
+ __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
spin_unlock_irqrestore(&ltq_asc_lock, flags);
lqasc_start_tx(port);
return IRQ_HANDLED;
@@ -270,7 +271,7 @@ lqasc_rx_int(int irq, void *_port)
unsigned long flags;
struct uart_port *port = (struct uart_port *)_port;
spin_lock_irqsave(&ltq_asc_lock, flags);
- writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
+ __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
lqasc_rx_chars(port);
spin_unlock_irqrestore(&ltq_asc_lock, flags);
return IRQ_HANDLED;
@@ -280,7 +281,8 @@ static unsigned int
lqasc_tx_empty(struct uart_port *port)
{
int status;
- status = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK;
+ status = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
+ ASCFSTAT_TXFFLMASK;
return status ? 0 : TIOCSER_TEMT;
}
@@ -313,12 +315,12 @@ lqasc_startup(struct uart_port *port)
asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
port->membase + LTQ_ASC_CLC);
- writel(0, port->membase + LTQ_ASC_PISEL);
- writel(
+ __raw_writel(0, port->membase + LTQ_ASC_PISEL);
+ __raw_writel(
((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
- writel(
+ __raw_writel(
((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
| ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
port->membase + LTQ_ASC_RXFCON);
@@ -350,7 +352,7 @@ lqasc_startup(struct uart_port *port)
goto err2;
}
- writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
+ __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
port->membase + LTQ_ASC_IRNREN);
return 0;
@@ -369,7 +371,7 @@ lqasc_shutdown(struct uart_port *port)
free_irq(ltq_port->rx_irq, port);
free_irq(ltq_port->err_irq, port);
- writel(0, port->membase + LTQ_ASC_CON);
+ __raw_writel(0, port->membase + LTQ_ASC_CON);
asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
port->membase + LTQ_ASC_RXFCON);
asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
@@ -461,13 +463,13 @@ lqasc_set_termios(struct uart_port *port,
asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
/* now we can write the new baudrate into the register */
- writel(divisor, port->membase + LTQ_ASC_BG);
+ __raw_writel(divisor, port->membase + LTQ_ASC_BG);
/* turn the baudrate generator back on */
asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON);
/* enable rx */
- writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
+ __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
spin_unlock_irqrestore(&ltq_asc_lock, flags);
@@ -578,7 +580,7 @@ lqasc_console_putchar(struct uart_port *port, int ch)
return;
do {
- fifofree = (readl(port->membase + LTQ_ASC_FSTAT)
+ fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT)
& ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
} while (fifofree == 0);
writeb(ch, port->membase + LTQ_ASC_TBUF);
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index d1d73261575b..f4e27d0ad947 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -151,6 +151,8 @@ static void lpc32xx_hsuart_console_write(struct console *co, const char *s,
local_irq_restore(flags);
}
+static void lpc32xx_loopback_set(resource_size_t mapbase, int state);
+
static int __init lpc32xx_hsuart_console_setup(struct console *co,
char *options)
{
@@ -170,6 +172,8 @@ static int __init lpc32xx_hsuart_console_setup(struct console *co,
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
+ lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
+
return uart_set_options(port, co, baud, parity, bits, flow);
}
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 4f479841769a..f5bdde405627 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -248,6 +248,7 @@
struct max310x_devtype {
char name[9];
int nr;
+ u8 mode1;
int (*detect)(struct device *);
void (*power)(struct uart_port *, int);
};
@@ -410,6 +411,7 @@ static void max14830_power(struct uart_port *port, int on)
static const struct max310x_devtype max3107_devtype = {
.name = "MAX3107",
.nr = 1,
+ .mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
};
@@ -417,6 +419,7 @@ static const struct max310x_devtype max3107_devtype = {
static const struct max310x_devtype max3108_devtype = {
.name = "MAX3108",
.nr = 1,
+ .mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
};
@@ -424,6 +427,7 @@ static const struct max310x_devtype max3108_devtype = {
static const struct max310x_devtype max3109_devtype = {
.name = "MAX3109",
.nr = 2,
+ .mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
};
@@ -431,6 +435,7 @@ static const struct max310x_devtype max3109_devtype = {
static const struct max310x_devtype max14830_devtype = {
.name = "MAX14830",
.nr = 4,
+ .mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
};
@@ -1197,8 +1202,7 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
return PTR_ERR(regmap);
/* Alloc port structure */
- s = devm_kzalloc(dev, sizeof(*s) +
- sizeof(struct max310x_one) * devtype->nr, GFP_KERNEL);
+ s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
if (!s) {
dev_err(dev, "Error allocating port structure\n");
return -ENOMEM;
@@ -1258,9 +1262,8 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
MAX310X_BRGDIVLSB_REG + offs, &ret);
} while (ret != 0x01);
- regmap_update_bits(s->regmap, MAX310X_MODE1_REG + offs,
- MAX310X_MODE1_AUTOSLEEP_BIT,
- MAX310X_MODE1_AUTOSLEEP_BIT);
+ regmap_write(s->regmap, MAX310X_MODE1_REG + offs,
+ devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
@@ -1294,10 +1297,6 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
max310x_port_read(&s->p[i].port, MAX310X_IRQSTS_REG);
- /* Enable IRQ pin */
- max310x_port_update(&s->p[i].port, MAX310X_MODE1_REG,
- MAX310X_MODE1_IRQSEL_BIT,
- MAX310X_MODE1_IRQSEL_BIT);
/* Initialize queue for start TX */
INIT_WORK(&s->p[i].tx_work, max310x_wq_proc);
/* Initialize queue for changing LOOPBACK mode */
@@ -1467,10 +1466,10 @@ static int __init max310x_uart_init(void)
return ret;
#ifdef CONFIG_SPI_MASTER
- spi_register_driver(&max310x_spi_driver);
+ ret = spi_register_driver(&max310x_spi_driver);
#endif
- return 0;
+ return ret;
}
module_init(max310x_uart_init);
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 8a842591b37c..fbc5bc022a39 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -72,7 +72,8 @@
#define AML_UART_BAUD_USE BIT(23)
#define AML_UART_BAUD_XTAL BIT(24)
-#define AML_UART_PORT_NUM 6
+#define AML_UART_PORT_NUM 12
+#define AML_UART_PORT_OFFSET 6
#define AML_UART_DEV_NAME "ttyAML"
@@ -654,10 +655,20 @@ static int meson_uart_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq;
struct uart_port *port;
int ret = 0;
+ int id = -1;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (pdev->id < 0) {
+ for (id = AML_UART_PORT_OFFSET; id < AML_UART_PORT_NUM; id++) {
+ if (!meson_ports[id]) {
+ pdev->id = id;
+ break;
+ }
+ }
+ }
+
if (pdev->id < 0 || pdev->id >= AML_UART_PORT_NUM)
return -EINVAL;
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index 9f8f63719126..587b42f754cb 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -22,6 +22,7 @@
#include <linux/serial_core.h>
#include <linux/tty_flip.h>
#include <linux/types.h>
+#include <linux/idr.h>
#define SERIAL_NAME "ttyMPS"
#define DRIVER_NAME "mps2-uart"
@@ -65,11 +66,14 @@
#define MPS2_MAX_PORTS 3
+#define UART_PORT_COMBINED_IRQ BIT(0)
+
struct mps2_uart_port {
struct uart_port port;
struct clk *clk;
unsigned int tx_irq;
unsigned int rx_irq;
+ unsigned int flags;
};
static inline struct mps2_uart_port *to_mps2_port(struct uart_port *port)
@@ -264,6 +268,20 @@ static irqreturn_t mps2_uart_oerrirq(int irq, void *data)
return handled;
}
+static irqreturn_t mps2_uart_combinedirq(int irq, void *data)
+{
+ if (mps2_uart_rxirq(irq, data) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ if (mps2_uart_txirq(irq, data) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ if (mps2_uart_oerrirq(irq, data) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
static int mps2_uart_startup(struct uart_port *port)
{
struct mps2_uart_port *mps_port = to_mps2_port(port);
@@ -274,26 +292,37 @@ static int mps2_uart_startup(struct uart_port *port)
mps2_uart_write8(port, control, UARTn_CTRL);
- ret = request_irq(mps_port->rx_irq, mps2_uart_rxirq, 0,
- MAKE_NAME(-rx), mps_port);
- if (ret) {
- dev_err(port->dev, "failed to register rxirq (%d)\n", ret);
- return ret;
- }
+ if (mps_port->flags & UART_PORT_COMBINED_IRQ) {
+ ret = request_irq(port->irq, mps2_uart_combinedirq, 0,
+ MAKE_NAME(-combined), mps_port);
- ret = request_irq(mps_port->tx_irq, mps2_uart_txirq, 0,
- MAKE_NAME(-tx), mps_port);
- if (ret) {
- dev_err(port->dev, "failed to register txirq (%d)\n", ret);
- goto err_free_rxirq;
- }
+ if (ret) {
+ dev_err(port->dev, "failed to register combinedirq (%d)\n", ret);
+ return ret;
+ }
+ } else {
+ ret = request_irq(port->irq, mps2_uart_oerrirq, IRQF_SHARED,
+ MAKE_NAME(-overrun), mps_port);
+
+ if (ret) {
+ dev_err(port->dev, "failed to register oerrirq (%d)\n", ret);
+ return ret;
+ }
+
+ ret = request_irq(mps_port->rx_irq, mps2_uart_rxirq, 0,
+ MAKE_NAME(-rx), mps_port);
+ if (ret) {
+ dev_err(port->dev, "failed to register rxirq (%d)\n", ret);
+ goto err_free_oerrirq;
+ }
- ret = request_irq(port->irq, mps2_uart_oerrirq, IRQF_SHARED,
- MAKE_NAME(-overrun), mps_port);
+ ret = request_irq(mps_port->tx_irq, mps2_uart_txirq, 0,
+ MAKE_NAME(-tx), mps_port);
+ if (ret) {
+ dev_err(port->dev, "failed to register txirq (%d)\n", ret);
+ goto err_free_rxirq;
+ }
- if (ret) {
- dev_err(port->dev, "failed to register oerrirq (%d)\n", ret);
- goto err_free_txirq;
}
control |= UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP;
@@ -302,10 +331,10 @@ static int mps2_uart_startup(struct uart_port *port)
return 0;
-err_free_txirq:
- free_irq(mps_port->tx_irq, mps_port);
err_free_rxirq:
free_irq(mps_port->rx_irq, mps_port);
+err_free_oerrirq:
+ free_irq(port->irq, mps_port);
return ret;
}
@@ -319,8 +348,11 @@ static void mps2_uart_shutdown(struct uart_port *port)
mps2_uart_write8(port, control, UARTn_CTRL);
- free_irq(mps_port->rx_irq, mps_port);
- free_irq(mps_port->tx_irq, mps_port);
+ if (!(mps_port->flags & UART_PORT_COMBINED_IRQ)) {
+ free_irq(mps_port->rx_irq, mps_port);
+ free_irq(mps_port->tx_irq, mps_port);
+ }
+
free_irq(port->irq, mps_port);
}
@@ -397,7 +429,7 @@ static const struct uart_ops mps2_uart_pops = {
.verify_port = mps2_uart_verify_port,
};
-static struct mps2_uart_port mps2_uart_ports[MPS2_MAX_PORTS];
+static DEFINE_IDR(ports_idr);
#ifdef CONFIG_SERIAL_MPS2_UART_CONSOLE
static void mps2_uart_console_putchar(struct uart_port *port, int ch)
@@ -410,7 +442,8 @@ static void mps2_uart_console_putchar(struct uart_port *port, int ch)
static void mps2_uart_console_write(struct console *co, const char *s, unsigned int cnt)
{
- struct uart_port *port = &mps2_uart_ports[co->index].port;
+ struct mps2_uart_port *mps_port = idr_find(&ports_idr, co->index);
+ struct uart_port *port = &mps_port->port;
uart_console_write(port, s, cnt, mps2_uart_console_putchar);
}
@@ -426,7 +459,10 @@ static int mps2_uart_console_setup(struct console *co, char *options)
if (co->index < 0 || co->index >= MPS2_MAX_PORTS)
return -ENODEV;
- mps_port = &mps2_uart_ports[co->index];
+ mps_port = idr_find(&ports_idr, co->index);
+
+ if (!mps_port)
+ return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -487,27 +523,36 @@ static struct uart_driver mps2_uart_driver = {
.cons = MPS2_SERIAL_CONSOLE,
};
-static struct mps2_uart_port *mps2_of_get_port(struct platform_device *pdev)
+static int mps2_of_get_port(struct platform_device *pdev,
+ struct mps2_uart_port *mps_port)
{
struct device_node *np = pdev->dev.of_node;
int id;
if (!np)
- return NULL;
+ return -ENODEV;
id = of_alias_get_id(np, "serial");
+
if (id < 0)
- id = 0;
+ id = idr_alloc_cyclic(&ports_idr, (void *)mps_port, 0, MPS2_MAX_PORTS, GFP_KERNEL);
+ else
+ id = idr_alloc(&ports_idr, (void *)mps_port, id, MPS2_MAX_PORTS, GFP_KERNEL);
- if (WARN_ON(id >= MPS2_MAX_PORTS))
- return NULL;
+ if (id < 0)
+ return id;
+
+ /* Only combined irq is presesnt */
+ if (platform_irq_count(pdev) == 1)
+ mps_port->flags |= UART_PORT_COMBINED_IRQ;
+
+ mps_port->port.line = id;
- mps2_uart_ports[id].port.line = id;
- return &mps2_uart_ports[id];
+ return 0;
}
-static int mps2_init_port(struct mps2_uart_port *mps_port,
- struct platform_device *pdev)
+static int mps2_init_port(struct platform_device *pdev,
+ struct mps2_uart_port *mps_port)
{
struct resource *res;
int ret;
@@ -519,11 +564,6 @@ static int mps2_init_port(struct mps2_uart_port *mps_port,
mps_port->port.mapbase = res->start;
mps_port->port.mapsize = resource_size(res);
-
- mps_port->rx_irq = platform_get_irq(pdev, 0);
- mps_port->tx_irq = platform_get_irq(pdev, 1);
- mps_port->port.irq = platform_get_irq(pdev, 2);
-
mps_port->port.iotype = UPIO_MEM;
mps_port->port.flags = UPF_BOOT_AUTOCONF;
mps_port->port.fifosize = 1;
@@ -542,6 +582,15 @@ static int mps2_init_port(struct mps2_uart_port *mps_port,
clk_disable_unprepare(mps_port->clk);
+
+ if (mps_port->flags & UART_PORT_COMBINED_IRQ) {
+ mps_port->port.irq = platform_get_irq(pdev, 0);
+ } else {
+ mps_port->rx_irq = platform_get_irq(pdev, 0);
+ mps_port->tx_irq = platform_get_irq(pdev, 1);
+ mps_port->port.irq = platform_get_irq(pdev, 2);
+ }
+
return ret;
}
@@ -550,11 +599,16 @@ static int mps2_serial_probe(struct platform_device *pdev)
struct mps2_uart_port *mps_port;
int ret;
- mps_port = mps2_of_get_port(pdev);
- if (!mps_port)
- return -ENODEV;
+ mps_port = devm_kzalloc(&pdev->dev, sizeof(struct mps2_uart_port), GFP_KERNEL);
+
+ if (!mps_port)
+ return -ENOMEM;
+
+ ret = mps2_of_get_port(pdev, mps_port);
+ if (ret)
+ return ret;
- ret = mps2_init_port(mps_port, pdev);
+ ret = mps2_init_port(pdev, mps_port);
if (ret)
return ret;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 736b74fd6623..109096033bb1 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1634,7 +1634,7 @@ static void msm_console_write(struct console *co, const char *s,
__msm_console_write(port, s, count, msm_port->is_uartdm);
}
-static int __init msm_console_setup(struct console *co, char *options)
+static int msm_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 115200;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index a72d6d9fb983..3bcec1c20219 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -89,7 +89,7 @@
#define DEF_FIFO_DEPTH_WORDS 16
#define DEF_TX_WM 2
#define DEF_FIFO_WIDTH_BITS 32
-#define UART_CONSOLE_RX_WM 2
+#define UART_RX_WM 2
#define MAX_LOOPBACK_CFG 3
#ifdef CONFIG_CONSOLE_POLL
@@ -105,10 +105,6 @@ struct qcom_geni_serial_port {
u32 tx_fifo_depth;
u32 tx_fifo_width;
u32 rx_fifo_depth;
- u32 tx_wm;
- u32 rx_wm;
- u32 rx_rfr;
- enum geni_se_xfer_mode xfer_mode;
bool setup;
int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
unsigned int baud;
@@ -225,10 +221,10 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
u32 geni_ios;
- if (uart_console(uport) || !uart_cts_enabled(uport)) {
+ if (uart_console(uport)) {
mctrl |= TIOCM_CTS;
} else {
- geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
+ geni_ios = readl(uport->membase + SE_GENI_IOS);
if (!(geni_ios & IO2_DATA_IN))
mctrl |= TIOCM_CTS;
}
@@ -241,12 +237,12 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
{
u32 uart_manual_rfr = 0;
- if (uart_console(uport) || !uart_cts_enabled(uport))
+ if (uart_console(uport))
return;
if (!(mctrl & TIOCM_RTS))
uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
- writel_relaxed(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
+ writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
}
static const char *qcom_geni_serial_get_type(struct uart_port *uport)
@@ -275,9 +271,6 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
unsigned int fifo_bits;
unsigned long timeout_us = 20000;
- /* Ensure polling is not re-ordered before the prior writes/reads */
- mb();
-
if (uport->private_data) {
port = to_dev_port(uport, uport);
baud = port->baud;
@@ -297,7 +290,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
*/
timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
while (timeout_us) {
- reg = readl_relaxed(uport->membase + offset);
+ reg = readl(uport->membase + offset);
if ((bool)(reg & field) == set)
return true;
udelay(10);
@@ -310,7 +303,7 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
{
u32 m_cmd;
- writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
+ writel(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
m_cmd = UART_START_TX << M_OPCODE_SHFT;
writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
}
@@ -323,13 +316,13 @@ static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
- writel_relaxed(M_GENI_CMD_ABORT, uport->membase +
+ writel(M_GENI_CMD_ABORT, uport->membase +
SE_GENI_M_CMD_CTRL_REG);
irq_clear |= M_CMD_ABORT_EN;
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
}
- writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_abort_rx(struct uart_port *uport)
@@ -339,8 +332,8 @@ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
S_GENI_CMD_ABORT, false);
- writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
- writel_relaxed(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
+ writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ writel(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -349,19 +342,13 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
u32 rx_fifo;
u32 status;
- status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
- writel_relaxed(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
+ writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
- status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
- writel_relaxed(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
+ writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
- /*
- * Ensure the writes to clear interrupts is not re-ordered after
- * reading the data.
- */
- mb();
-
- status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
if (!(status & RX_FIFO_WC_MSK))
return NO_POLL_CHAR;
@@ -372,15 +359,12 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
unsigned char c)
{
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
-
- writel_relaxed(port->tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
qcom_geni_serial_setup_tx(uport, 1);
WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_TX_FIFO_WATERMARK_EN, true));
- writel_relaxed(c, uport->membase + SE_GENI_TX_FIFOn);
- writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
- SE_GENI_M_IRQ_CLEAR);
+ writel(c, uport->membase + SE_GENI_TX_FIFOn);
+ writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_poll_tx_done(uport);
}
#endif
@@ -388,7 +372,7 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
{
- writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
+ writel(ch, uport->membase + SE_GENI_TX_FIFOn);
}
static void
@@ -407,7 +391,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
bytes_to_send++;
}
- writel_relaxed(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
qcom_geni_serial_setup_tx(uport, bytes_to_send);
for (i = 0; i < count; ) {
size_t chars_to_write = 0;
@@ -425,7 +409,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
chars_to_write = min_t(size_t, count - i, avail / 2);
uart_console_write(uport, s + i, chars_to_write,
qcom_geni_serial_wr_char);
- writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ writel(M_TX_FIFO_WATERMARK_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
i += chars_to_write;
}
@@ -454,7 +438,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
else
spin_lock_irqsave(&uport->lock, flags);
- geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ geni_status = readl(uport->membase + SE_GENI_STATUS);
/* Cancel the current write to log the fault */
if (!locked) {
@@ -464,11 +448,10 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
geni_se_abort_m_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
- writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+ writel(M_CMD_ABORT_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
}
- writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
- SE_GENI_M_IRQ_CLEAR);
+ writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
} else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
/*
* It seems we can't interrupt existing transfers if all data
@@ -477,9 +460,8 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
qcom_geni_serial_poll_tx_done(uport);
if (uart_circ_chars_pending(&uport->state->xmit)) {
- irq_en = readl_relaxed(uport->membase +
- SE_GENI_M_IRQ_EN);
- writel_relaxed(irq_en | M_TX_FIFO_WATERMARK_EN,
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
}
@@ -567,29 +549,20 @@ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
static void qcom_geni_serial_start_tx(struct uart_port *uport)
{
u32 irq_en;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
u32 status;
- if (port->xfer_mode == GENI_SE_FIFO) {
- /*
- * readl ensures reading & writing of IRQ_EN register
- * is not re-ordered before checking the status of the
- * Serial Engine.
- */
- status = readl(uport->membase + SE_GENI_STATUS);
- if (status & M_GENI_CMD_ACTIVE)
- return;
+ status = readl(uport->membase + SE_GENI_STATUS);
+ if (status & M_GENI_CMD_ACTIVE)
+ return;
- if (!qcom_geni_serial_tx_empty(uport))
- return;
+ if (!qcom_geni_serial_tx_empty(uport))
+ return;
- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
- irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
- writel_relaxed(port->tx_wm, uport->membase +
- SE_GENI_TX_WATERMARK_REG);
- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- }
+ writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
static void qcom_geni_serial_stop_tx(struct uart_port *uport)
@@ -598,35 +571,24 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
u32 status;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
- irq_en &= ~M_CMD_DONE_EN;
- if (port->xfer_mode == GENI_SE_FIFO) {
- irq_en &= ~M_TX_FIFO_WATERMARK_EN;
- writel_relaxed(0, uport->membase +
- SE_GENI_TX_WATERMARK_REG);
- }
- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+ writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ status = readl(uport->membase + SE_GENI_STATUS);
/* Possible stop tx is called multiple times. */
if (!(status & M_GENI_CMD_ACTIVE))
return;
- /*
- * Ensure cancel command write is not re-ordered before checking
- * the status of the Primary Sequencer.
- */
- mb();
-
geni_se_cancel_m_cmd(&port->se);
if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_CANCEL_EN, true)) {
geni_se_abort_m_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
- writel_relaxed(M_CMD_ABORT_EN, uport->membase +
- SE_GENI_M_IRQ_CLEAR);
+ writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
- writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_start_rx(struct uart_port *uport)
@@ -635,27 +597,19 @@ static void qcom_geni_serial_start_rx(struct uart_port *uport)
u32 status;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ status = readl(uport->membase + SE_GENI_STATUS);
if (status & S_GENI_CMD_ACTIVE)
qcom_geni_serial_stop_rx(uport);
- /*
- * Ensure setup command write is not re-ordered before checking
- * the status of the Secondary Sequencer.
- */
- mb();
-
geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
- if (port->xfer_mode == GENI_SE_FIFO) {
- irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
- irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
- writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+ writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
- irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- }
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
static void qcom_geni_serial_stop_rx(struct uart_port *uport)
@@ -665,32 +619,24 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
u32 irq_clear = S_CMD_DONE_EN;
- if (port->xfer_mode == GENI_SE_FIFO) {
- irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
- irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
- writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+ writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
- irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- }
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+ status = readl(uport->membase + SE_GENI_STATUS);
/* Possible stop rx is called multiple times. */
if (!(status & S_GENI_CMD_ACTIVE))
return;
- /*
- * Ensure cancel command write is not re-ordered before checking
- * the status of the Secondary Sequencer.
- */
- mb();
-
geni_se_cancel_s_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
S_GENI_CMD_CANCEL, false);
- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
- writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ status = readl(uport->membase + SE_GENI_STATUS);
+ writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
if (status & S_GENI_CMD_ACTIVE)
qcom_geni_serial_abort_rx(uport);
}
@@ -704,7 +650,7 @@ static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
u32 total_bytes;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
word_cnt = status & RX_FIFO_WC_MSK;
last_word_partial = status & RX_LAST;
last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
@@ -734,7 +680,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
unsigned int chunk;
int tail;
- status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
+ status = readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
/* Complete the current tx command before taking newly added data */
if (active)
@@ -760,9 +706,9 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
qcom_geni_serial_setup_tx(uport, pending);
port->tx_remaining = pending;
- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
- writel_relaxed(irq_en | M_TX_FIFO_WATERMARK_EN,
+ writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
@@ -795,14 +741,14 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
* cleared it in qcom_geni_serial_isr it will have already reasserted
* so we must clear it again here after our writes.
*/
- writel_relaxed(M_TX_FIFO_WATERMARK_EN,
+ writel(M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_CLEAR);
out_write_wakeup:
if (!port->tx_remaining) {
- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (irq_en & M_TX_FIFO_WATERMARK_EN)
- writel_relaxed(irq_en & ~M_TX_FIFO_WATERMARK_EN,
+ writel(irq_en & ~M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
@@ -812,12 +758,12 @@ out_write_wakeup:
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
- unsigned int m_irq_status;
- unsigned int s_irq_status;
- unsigned int geni_status;
+ u32 m_irq_en;
+ u32 m_irq_status;
+ u32 s_irq_status;
+ u32 geni_status;
struct uart_port *uport = dev;
unsigned long flags;
- unsigned int m_irq_en;
bool drop_rx = false;
struct tty_port *tport = &uport->state->port;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
@@ -826,12 +772,12 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
return IRQ_NONE;
spin_lock_irqsave(&uport->lock, flags);
- m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
- s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
- geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
- m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
- writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
- writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
+ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
+ geni_status = readl(uport->membase + SE_GENI_STATUS);
+ m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ writel(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
goto out_unlock;
@@ -877,17 +823,6 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
}
-static void set_rfr_wm(struct qcom_geni_serial_port *port)
-{
- /*
- * Set RFR (Flow off) to FIFO_DEPTH - 2.
- * RX WM level at 10% RX_FIFO_DEPTH.
- * TX WM level at 10% TX_FIFO_DEPTH.
- */
- port->rx_rfr = port->rx_fifo_depth - 2;
- port->rx_wm = UART_CONSOLE_RX_WM;
- port->tx_wm = DEF_TX_WM;
-}
static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
@@ -907,7 +842,7 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport)
static int qcom_geni_serial_port_setup(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
+ u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
u32 proto;
if (uart_console(uport)) {
@@ -928,21 +863,19 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
get_tx_fifo_size(port);
- set_rfr_wm(port);
- writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
+ writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
/*
* Make an unconditional cancel on the main sequencer to reset
* it else we could end up in data loss scenarios.
*/
- port->xfer_mode = GENI_SE_FIFO;
if (uart_console(uport))
qcom_geni_serial_poll_tx_done(uport);
geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
false, true, false);
geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
false, false, true);
- geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
- geni_se_select_mode(&port->se, port->xfer_mode);
+ geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
+ geni_se_select_mode(&port->se, GENI_SE_FIFO);
if (!uart_console(uport)) {
port->rx_fifo = devm_kcalloc(uport->dev,
port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
@@ -1008,14 +941,14 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
struct ktermios *termios, struct ktermios *old)
{
unsigned int baud;
- unsigned int bits_per_char;
- unsigned int tx_trans_cfg;
- unsigned int tx_parity_cfg;
- unsigned int rx_trans_cfg;
- unsigned int rx_parity_cfg;
- unsigned int stop_bit_len;
+ u32 bits_per_char;
+ u32 tx_trans_cfg;
+ u32 tx_parity_cfg;
+ u32 rx_trans_cfg;
+ u32 rx_parity_cfg;
+ u32 stop_bit_len;
unsigned int clk_div;
- unsigned long ser_clk_cfg;
+ u32 ser_clk_cfg;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
unsigned long clk_rate;
@@ -1033,10 +966,10 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
/* parity */
- tx_trans_cfg = readl_relaxed(uport->membase + SE_UART_TX_TRANS_CFG);
- tx_parity_cfg = readl_relaxed(uport->membase + SE_UART_TX_PARITY_CFG);
- rx_trans_cfg = readl_relaxed(uport->membase + SE_UART_RX_TRANS_CFG);
- rx_parity_cfg = readl_relaxed(uport->membase + SE_UART_RX_PARITY_CFG);
+ tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG);
+ tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG);
+ rx_trans_cfg = readl(uport->membase + SE_UART_RX_TRANS_CFG);
+ rx_parity_cfg = readl(uport->membase + SE_UART_RX_PARITY_CFG);
if (termios->c_cflag & PARENB) {
tx_trans_cfg |= UART_TX_PAR_EN;
rx_trans_cfg |= UART_RX_PAR_EN;
@@ -1092,17 +1025,17 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
uart_update_timeout(uport, termios->c_cflag, baud);
if (!uart_console(uport))
- writel_relaxed(port->loopback,
+ writel(port->loopback,
uport->membase + SE_UART_LOOPBACK_CFG);
- writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
- writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
- writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
- writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
- writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
- writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
- writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
- writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
- writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+ writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+ writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
out_restart_rx:
qcom_geni_serial_start_rx(uport);
}
@@ -1193,13 +1126,13 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
geni_se_select_mode(&se, GENI_SE_FIFO);
- writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
- writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
- writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
- writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
- writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
- writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
- writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+ writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+ writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+ writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+ writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+ writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+ writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
dev->con->write = qcom_geni_serial_earlycon_write;
dev->con->setup = NULL;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 9fc3559f80d9..83fd51607741 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1694,6 +1694,42 @@ s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
}
#endif
+static int s3c24xx_serial_enable_baudclk(struct s3c24xx_uart_port *ourport)
+{
+ struct device *dev = ourport->port.dev;
+ struct s3c24xx_uart_info *info = ourport->info;
+ char clk_name[MAX_CLK_NAME_LENGTH];
+ unsigned int clk_sel;
+ struct clk *clk;
+ int clk_num;
+ int ret;
+
+ clk_sel = ourport->cfg->clk_sel ? : info->def_clk_sel;
+ for (clk_num = 0; clk_num < info->num_clks; clk_num++) {
+ if (!(clk_sel & (1 << clk_num)))
+ continue;
+
+ sprintf(clk_name, "clk_uart_baud%d", clk_num);
+ clk = clk_get(dev, clk_name);
+ if (IS_ERR(clk))
+ continue;
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ continue;
+ }
+
+ ourport->baudclk = clk;
+ ourport->baudclk_rate = clk_get_rate(clk);
+ s3c24xx_serial_setsource(&ourport->port, clk_num);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/* s3c24xx_serial_init_port
*
* initialise a single serial port from the platform device given
@@ -1788,6 +1824,10 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
goto err;
}
+ ret = s3c24xx_serial_enable_baudclk(ourport);
+ if (ret)
+ pr_warn("uart: failed to enable baudclk\n");
+
/* Keep all interrupts masked and cleared */
if (s3c24xx_serial_has_interrupt_mask(port)) {
wr_regl(port, S3C64XX_UINTM, 0xf);
@@ -1901,6 +1941,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
* and keeps the clock enabled in this case.
*/
clk_disable_unprepare(ourport->clk);
+ if (!IS_ERR(ourport->baudclk))
+ clk_disable_unprepare(ourport->baudclk);
ret = s3c24xx_serial_cpufreq_register(ourport);
if (ret < 0)
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 268098681856..635178cf3eed 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1187,9 +1187,7 @@ static int sc16is7xx_probe(struct device *dev,
return PTR_ERR(regmap);
/* Alloc port structure */
- s = devm_kzalloc(dev, sizeof(*s) +
- sizeof(struct sc16is7xx_one) * devtype->nr_uart,
- GFP_KERNEL);
+ s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
if (!s) {
dev_err(dev, "Error allocating port structure\n");
return -ENOMEM;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d4cca5bdaf1c..351843f847c0 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
struct uart_port *port;
unsigned long flags;
+ if (!state)
+ return;
+
port = uart_port_lock(state, flags);
__uart_start(tty);
uart_port_unlock(port, flags);
@@ -550,10 +553,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
int ret = 0;
circ = &state->xmit;
- if (!circ->buf)
+ port = uart_port_lock(state, flags);
+ if (!circ->buf) {
+ uart_port_unlock(port, flags);
return 0;
+ }
- port = uart_port_lock(state, flags);
if (port && uart_circ_chars_free(circ) != 0) {
circ->buf[circ->head] = c;
circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
@@ -586,11 +591,13 @@ static int uart_write(struct tty_struct *tty,
return -EL3HLT;
}
+ port = uart_port_lock(state, flags);
circ = &state->xmit;
- if (!circ->buf)
+ if (!circ->buf) {
+ uart_port_unlock(port, flags);
return 0;
+ }
- port = uart_port_lock(state, flags);
while (port) {
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
if (count < c)
@@ -723,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
upstat_t mask = UPSTAT_SYNC_FIFO;
struct uart_port *port;
+ if (!state)
+ return;
+
port = uart_port_ref(state);
if (!port)
return;
@@ -2834,7 +2844,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
*/
tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
uport->line, uport->dev, port, uport->tty_groups);
- if (likely(!IS_ERR(tty_dev))) {
+ if (!IS_ERR(tty_dev)) {
device_set_wakeup_capable(tty_dev, 1);
} else {
dev_err(uport->dev, "Cannot register tty device on line %d\n",
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 8df0fd824520..060fcd42b6d5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1243,12 +1243,22 @@ static int sci_dma_rx_find_active(struct sci_port *s)
return -1;
}
-static void sci_rx_dma_release(struct sci_port *s)
+static void sci_dma_rx_chan_invalidate(struct sci_port *s)
+{
+ unsigned int i;
+
+ s->chan_rx = NULL;
+ for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++)
+ s->cookie_rx[i] = -EINVAL;
+ s->active_rx = 0;
+}
+
+static void sci_dma_rx_release(struct sci_port *s)
{
struct dma_chan *chan = s->chan_rx_saved;
- s->chan_rx_saved = s->chan_rx = NULL;
- s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
+ s->chan_rx_saved = NULL;
+ sci_dma_rx_chan_invalidate(s);
dmaengine_terminate_sync(chan);
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
sg_dma_address(&s->sg_rx[0]));
@@ -1264,6 +1274,20 @@ static void start_hrtimer_us(struct hrtimer *hrt, unsigned long usec)
hrtimer_start(hrt, t, HRTIMER_MODE_REL);
}
+static void sci_dma_rx_reenable_irq(struct sci_port *s)
+{
+ struct uart_port *port = &s->port;
+ u16 scr;
+
+ /* Direct new serial port interrupts back to CPU */
+ scr = serial_port_in(port, SCSCR);
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ scr &= ~SCSCR_RDRQE;
+ enable_irq(s->irqs[SCIx_RXI_IRQ]);
+ }
+ serial_port_out(port, SCSCR, scr | SCSCR_RIE);
+}
+
static void sci_dma_rx_complete(void *arg)
{
struct sci_port *s = arg;
@@ -1313,12 +1337,13 @@ fail:
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
/* Switch to PIO */
spin_lock_irqsave(&port->lock, flags);
- s->chan_rx = NULL;
- sci_start_rx(port);
+ dmaengine_terminate_async(chan);
+ sci_dma_rx_chan_invalidate(s);
+ sci_dma_rx_reenable_irq(s);
spin_unlock_irqrestore(&port->lock, flags);
}
-static void sci_tx_dma_release(struct sci_port *s)
+static void sci_dma_tx_release(struct sci_port *s)
{
struct dma_chan *chan = s->chan_tx_saved;
@@ -1331,7 +1356,7 @@ static void sci_tx_dma_release(struct sci_port *s)
dma_release_channel(chan);
}
-static int sci_submit_rx(struct sci_port *s, bool port_lock_held)
+static int sci_dma_rx_submit(struct sci_port *s, bool port_lock_held)
{
struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port;
@@ -1367,17 +1392,14 @@ fail:
spin_lock_irqsave(&port->lock, flags);
if (i)
dmaengine_terminate_async(chan);
- for (i = 0; i < 2; i++)
- s->cookie_rx[i] = -EINVAL;
- s->active_rx = 0;
- s->chan_rx = NULL;
+ sci_dma_rx_chan_invalidate(s);
sci_start_rx(port);
if (!port_lock_held)
spin_unlock_irqrestore(&port->lock, flags);
return -EAGAIN;
}
-static void work_fn_tx(struct work_struct *work)
+static void sci_dma_tx_work_fn(struct work_struct *work)
{
struct sci_port *s = container_of(work, struct sci_port, work_tx);
struct dma_async_tx_descriptor *desc;
@@ -1436,7 +1458,7 @@ switch_to_pio:
return;
}
-static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
+static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
{
struct sci_port *s = container_of(t, struct sci_port, rx_timer);
struct dma_chan *chan = s->chan_rx;
@@ -1446,7 +1468,6 @@ static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
unsigned long flags;
unsigned int read;
int active, count;
- u16 scr;
dev_dbg(port->dev, "DMA Rx timed out\n");
@@ -1494,15 +1515,9 @@ static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
}
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- sci_submit_rx(s, true);
+ sci_dma_rx_submit(s, true);
- /* Direct new serial port interrupts back to CPU */
- scr = serial_port_in(port, SCSCR);
- if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- scr &= ~SCSCR_RDRQE;
- enable_irq(s->irqs[SCIx_RXI_IRQ]);
- }
- serial_port_out(port, SCSCR, scr | SCSCR_RIE);
+ sci_dma_rx_reenable_irq(s);
spin_unlock_irqrestore(&port->lock, flags);
@@ -1580,7 +1595,7 @@ static void sci_request_dma(struct uart_port *port)
__func__, UART_XMIT_SIZE,
port->state->xmit.buf, &s->tx_dma_addr);
- INIT_WORK(&s->work_tx, work_fn_tx);
+ INIT_WORK(&s->work_tx, sci_dma_tx_work_fn);
s->chan_tx_saved = s->chan_tx = chan;
}
}
@@ -1615,12 +1630,12 @@ static void sci_request_dma(struct uart_port *port)
}
hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s->rx_timer.function = rx_timer_fn;
+ s->rx_timer.function = sci_dma_rx_timer_fn;
s->chan_rx_saved = s->chan_rx = chan;
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- sci_submit_rx(s, false);
+ sci_dma_rx_submit(s, false);
}
}
@@ -1629,9 +1644,9 @@ static void sci_free_dma(struct uart_port *port)
struct sci_port *s = to_sci_port(port);
if (s->chan_tx_saved)
- sci_tx_dma_release(s);
+ sci_dma_tx_release(s);
if (s->chan_rx_saved)
- sci_rx_dma_release(s);
+ sci_dma_rx_release(s);
}
static void sci_flush_buffer(struct uart_port *port)
@@ -1669,7 +1684,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
disable_irq_nosync(irq);
scr |= SCSCR_RDRQE;
} else {
- if (sci_submit_rx(s, false) < 0)
+ if (sci_dma_rx_submit(s, false) < 0)
goto handle_pio;
scr &= ~SCSCR_RIE;
@@ -1921,7 +1936,7 @@ out_nomem:
static void sci_free_irq(struct sci_port *port)
{
- int i;
+ int i, j;
/*
* Intentionally in reverse order so we iterate over the muxed
@@ -1937,6 +1952,13 @@ static void sci_free_irq(struct sci_port *port)
if (unlikely(irq < 0))
continue;
+ /* Check if already freed (irq was muxed) */
+ for (j = 0; j < i; j++)
+ if (port->irqs[j] == irq)
+ j = i + 1;
+ if (j > i)
+ continue;
+
free_irq(port->irqs[i], port);
kfree(port->irqstr[i]);
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 4287ca305b6b..1891a45ac05d 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -371,7 +371,7 @@ static void sprd_set_termios(struct uart_port *port,
/* ask the core to calculate the divisor for us */
baud = uart_get_baud_rate(port, termios, old, 0, SPRD_BAUD_IO_LIMIT);
- quot = (unsigned int)((port->uartclk + baud / 2) / baud);
+ quot = port->uartclk / baud;
/* set data length */
switch (termios->c_cflag & CSIZE) {
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
new file mode 100644
index 000000000000..aaf8748a6147
--- /dev/null
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/console.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#define TCU_MBOX_BYTE(i, x) ((x) << (i * 8))
+#define TCU_MBOX_BYTE_V(x, i) (((x) >> (i * 8)) & 0xff)
+#define TCU_MBOX_NUM_BYTES(x) ((x) << 24)
+#define TCU_MBOX_NUM_BYTES_V(x) (((x) >> 24) & 0x3)
+
+struct tegra_tcu {
+ struct uart_driver driver;
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
+ struct console console;
+#endif
+ struct uart_port port;
+
+ struct mbox_client tx_client, rx_client;
+ struct mbox_chan *tx, *rx;
+};
+
+static unsigned int tegra_tcu_uart_tx_empty(struct uart_port *port)
+{
+ return TIOCSER_TEMT;
+}
+
+static void tegra_tcu_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static unsigned int tegra_tcu_uart_get_mctrl(struct uart_port *port)
+{
+ return 0;
+}
+
+static void tegra_tcu_uart_stop_tx(struct uart_port *port)
+{
+}
+
+static void tegra_tcu_write_one(struct tegra_tcu *tcu, u32 value,
+ unsigned int count)
+{
+ void *msg;
+
+ value |= TCU_MBOX_NUM_BYTES(count);
+ msg = (void *)(unsigned long)value;
+ mbox_send_message(tcu->tx, msg);
+ mbox_flush(tcu->tx, 1000);
+}
+
+static void tegra_tcu_write(struct tegra_tcu *tcu, const char *s,
+ unsigned int count)
+{
+ unsigned int written = 0, i = 0;
+ bool insert_nl = false;
+ u32 value = 0;
+
+ while (i < count) {
+ if (insert_nl) {
+ value |= TCU_MBOX_BYTE(written++, '\n');
+ insert_nl = false;
+ i++;
+ } else if (s[i] == '\n') {
+ value |= TCU_MBOX_BYTE(written++, '\r');
+ insert_nl = true;
+ } else {
+ value |= TCU_MBOX_BYTE(written++, s[i++]);
+ }
+
+ if (written == 3) {
+ tegra_tcu_write_one(tcu, value, 3);
+ value = written = 0;
+ }
+ }
+
+ if (written)
+ tegra_tcu_write_one(tcu, value, written);
+}
+
+static void tegra_tcu_uart_start_tx(struct uart_port *port)
+{
+ struct tegra_tcu *tcu = port->private_data;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long count;
+
+ for (;;) {
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (!count)
+ break;
+
+ tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ }
+
+ uart_write_wakeup(port);
+}
+
+static void tegra_tcu_uart_stop_rx(struct uart_port *port)
+{
+}
+
+static void tegra_tcu_uart_break_ctl(struct uart_port *port, int ctl)
+{
+}
+
+static int tegra_tcu_uart_startup(struct uart_port *port)
+{
+ return 0;
+}
+
+static void tegra_tcu_uart_shutdown(struct uart_port *port)
+{
+}
+
+static void tegra_tcu_uart_set_termios(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+}
+
+static const struct uart_ops tegra_tcu_uart_ops = {
+ .tx_empty = tegra_tcu_uart_tx_empty,
+ .set_mctrl = tegra_tcu_uart_set_mctrl,
+ .get_mctrl = tegra_tcu_uart_get_mctrl,
+ .stop_tx = tegra_tcu_uart_stop_tx,
+ .start_tx = tegra_tcu_uart_start_tx,
+ .stop_rx = tegra_tcu_uart_stop_rx,
+ .break_ctl = tegra_tcu_uart_break_ctl,
+ .startup = tegra_tcu_uart_startup,
+ .shutdown = tegra_tcu_uart_shutdown,
+ .set_termios = tegra_tcu_uart_set_termios,
+};
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
+static void tegra_tcu_console_write(struct console *cons, const char *s,
+ unsigned int count)
+{
+ struct tegra_tcu *tcu = container_of(cons, struct tegra_tcu, console);
+
+ tegra_tcu_write(tcu, s, count);
+}
+
+static int tegra_tcu_console_setup(struct console *cons, char *options)
+{
+ return 0;
+}
+#endif
+
+static void tegra_tcu_receive(struct mbox_client *cl, void *msg)
+{
+ struct tegra_tcu *tcu = container_of(cl, struct tegra_tcu, rx_client);
+ struct tty_port *port = &tcu->port.state->port;
+ u32 value = (u32)(unsigned long)msg;
+ unsigned int num_bytes, i;
+
+ num_bytes = TCU_MBOX_NUM_BYTES_V(value);
+
+ for (i = 0; i < num_bytes; i++)
+ tty_insert_flip_char(port, TCU_MBOX_BYTE_V(value, i),
+ TTY_NORMAL);
+
+ tty_flip_buffer_push(port);
+}
+
+static int tegra_tcu_probe(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ struct tegra_tcu *tcu;
+ int err;
+
+ tcu = devm_kzalloc(&pdev->dev, sizeof(*tcu), GFP_KERNEL);
+ if (!tcu)
+ return -ENOMEM;
+
+ tcu->tx_client.dev = &pdev->dev;
+ tcu->rx_client.dev = &pdev->dev;
+ tcu->rx_client.rx_callback = tegra_tcu_receive;
+
+ tcu->tx = mbox_request_channel_byname(&tcu->tx_client, "tx");
+ if (IS_ERR(tcu->tx)) {
+ err = PTR_ERR(tcu->tx);
+ dev_err(&pdev->dev, "failed to get tx mailbox: %d\n", err);
+ return err;
+ }
+
+ tcu->rx = mbox_request_channel_byname(&tcu->rx_client, "rx");
+ if (IS_ERR(tcu->rx)) {
+ err = PTR_ERR(tcu->rx);
+ dev_err(&pdev->dev, "failed to get rx mailbox: %d\n", err);
+ goto free_tx;
+ }
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
+ /* setup the console */
+ strcpy(tcu->console.name, "ttyTCU");
+ tcu->console.device = uart_console_device;
+ tcu->console.flags = CON_PRINTBUFFER | CON_ANYTIME;
+ tcu->console.index = -1;
+ tcu->console.write = tegra_tcu_console_write;
+ tcu->console.setup = tegra_tcu_console_setup;
+ tcu->console.data = &tcu->driver;
+#endif
+
+ /* setup the driver */
+ tcu->driver.owner = THIS_MODULE;
+ tcu->driver.driver_name = "tegra-tcu";
+ tcu->driver.dev_name = "ttyTCU";
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
+ tcu->driver.cons = &tcu->console;
+#endif
+ tcu->driver.nr = 1;
+
+ err = uart_register_driver(&tcu->driver);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register UART driver: %d\n",
+ err);
+ goto free_rx;
+ }
+
+ /* setup the port */
+ port = &tcu->port;
+ spin_lock_init(&port->lock);
+ port->dev = &pdev->dev;
+ port->type = PORT_TEGRA_TCU;
+ port->ops = &tegra_tcu_uart_ops;
+ port->fifosize = 1;
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->private_data = tcu;
+
+ err = uart_add_one_port(&tcu->driver, port);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add UART port: %d\n", err);
+ goto unregister_uart;
+ }
+
+ platform_set_drvdata(pdev, tcu);
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
+ register_console(&tcu->console);
+#endif
+
+ return 0;
+
+unregister_uart:
+ uart_unregister_driver(&tcu->driver);
+free_rx:
+ mbox_free_channel(tcu->rx);
+free_tx:
+ mbox_free_channel(tcu->tx);
+
+ return err;
+}
+
+static int tegra_tcu_remove(struct platform_device *pdev)
+{
+ struct tegra_tcu *tcu = platform_get_drvdata(pdev);
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_TCU_CONSOLE)
+ unregister_console(&tcu->console);
+#endif
+ uart_remove_one_port(&tcu->driver, &tcu->port);
+ uart_unregister_driver(&tcu->driver);
+ mbox_free_channel(tcu->rx);
+ mbox_free_channel(tcu->tx);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_tcu_match[] = {
+ { .compatible = "nvidia,tegra194-tcu" },
+ { }
+};
+
+static struct platform_driver tegra_tcu_driver = {
+ .driver = {
+ .name = "tegra-tcu",
+ .of_match_table = tegra_tcu_match,
+ },
+ .probe = tegra_tcu_probe,
+ .remove = tegra_tcu_remove,
+};
+module_platform_driver(tegra_tcu_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NVIDIA Tegra Combined UART driver");
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 094f2958cb2b..74089f5e5b53 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -364,7 +364,13 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
cdns_uart_handle_tx(dev_id);
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
- if (isrstatus & CDNS_UART_IXR_RXMASK)
+
+ /*
+ * Skip RX processing if RX is disabled as RXEMPTY will never be set
+ * as read bytes will not be removed from the FIFO.
+ */
+ if (isrstatus & CDNS_UART_IXR_RXMASK &&
+ !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
cdns_uart_handle_rx(dev_id, isrstatus);
spin_unlock(&port->lock);
@@ -1547,27 +1553,33 @@ static int cdns_uart_probe(struct platform_device *pdev)
}
cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (PTR_ERR(cdns_uart_data->pclk) == -EPROBE_DEFER) {
+ rc = PTR_ERR(cdns_uart_data->pclk);
+ goto err_out_unregister_driver;
+ }
+
if (IS_ERR(cdns_uart_data->pclk)) {
cdns_uart_data->pclk = devm_clk_get(&pdev->dev, "aper_clk");
- if (!IS_ERR(cdns_uart_data->pclk))
- dev_err(&pdev->dev, "clock name 'aper_clk' is deprecated.\n");
+ if (IS_ERR(cdns_uart_data->pclk)) {
+ rc = PTR_ERR(cdns_uart_data->pclk);
+ goto err_out_unregister_driver;
+ }
+ dev_err(&pdev->dev, "clock name 'aper_clk' is deprecated.\n");
}
- if (IS_ERR(cdns_uart_data->pclk)) {
- dev_err(&pdev->dev, "pclk clock not found.\n");
- rc = PTR_ERR(cdns_uart_data->pclk);
+
+ cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "uart_clk");
+ if (PTR_ERR(cdns_uart_data->uartclk) == -EPROBE_DEFER) {
+ rc = PTR_ERR(cdns_uart_data->uartclk);
goto err_out_unregister_driver;
}
- cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "uart_clk");
if (IS_ERR(cdns_uart_data->uartclk)) {
cdns_uart_data->uartclk = devm_clk_get(&pdev->dev, "ref_clk");
- if (!IS_ERR(cdns_uart_data->uartclk))
- dev_err(&pdev->dev, "clock name 'ref_clk' is deprecated.\n");
- }
- if (IS_ERR(cdns_uart_data->uartclk)) {
- dev_err(&pdev->dev, "uart_clk clock not found.\n");
- rc = PTR_ERR(cdns_uart_data->uartclk);
- goto err_out_unregister_driver;
+ if (IS_ERR(cdns_uart_data->uartclk)) {
+ rc = PTR_ERR(cdns_uart_data->uartclk);
+ goto err_out_unregister_driver;
+ }
+ dev_err(&pdev->dev, "clock name 'ref_clk' is deprecated.\n");
}
rc = clk_prepare_enable(cdns_uart_data->pclk);
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index d55c858d6058..84f26e43b229 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -4325,41 +4325,6 @@ static int mgsl_init_tty(void)
return 0;
}
-/* enumerate user specified ISA adapters
- */
-static void mgsl_enum_isa_devices(void)
-{
- struct mgsl_struct *info;
- int i;
-
- /* Check for user specified ISA devices */
-
- for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
- if ( debug_level >= DEBUG_LEVEL_INFO )
- printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
- io[i], irq[i], dma[i] );
-
- info = mgsl_allocate_device();
- if ( !info ) {
- /* error allocating device instance data */
- if ( debug_level >= DEBUG_LEVEL_ERROR )
- printk( "can't allocate device instance data.\n");
- continue;
- }
-
- /* Copy user configuration info to device instance data */
- info->io_base = (unsigned int)io[i];
- info->irq_level = (unsigned int)irq[i];
- info->irq_level = irq_canonicalize(info->irq_level);
- info->dma_level = (unsigned int)dma[i];
- info->bus_type = MGSL_BUS_TYPE_ISA;
- info->io_addr_size = 16;
- info->irq_flags = 0;
-
- mgsl_add_device( info );
- }
-}
-
static void synclink_cleanup(void)
{
int rc;
@@ -4403,7 +4368,6 @@ static int __init synclink_init(void)
printk("%s %s\n", driver_name, driver_version);
- mgsl_enum_isa_devices();
if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
else
@@ -5025,12 +4989,6 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
info->mbre_bit = BIT8;
outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
- if (info->bus_type == MGSL_BUS_TYPE_ISA) {
- /* Enable DMAEN (Port 7, Bit 14) */
- /* This connects the DMA request signal to the ISA bus */
- usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
- }
-
/* DMA Control Register (DCR)
*
* <15..14> 10 Priority mode = Alternating Tx/Rx
@@ -6007,12 +5965,6 @@ static void usc_set_async_mode( struct mgsl_struct *info )
usc_EnableMasterIrqBit( info );
- if (info->bus_type == MGSL_BUS_TYPE_ISA) {
- /* Enable INTEN (Port 6, Bit12) */
- /* This connects the IRQ request signal to the ISA bus */
- usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
- }
-
if (info->params.loopback) {
info->loopback_bits = 0x300;
outw(0x0300, info->io_base + CCAR);
@@ -6107,12 +6059,6 @@ static void usc_set_sync_mode( struct mgsl_struct *info )
usc_loopback_frame( info );
usc_set_sdlc_mode( info );
- if (info->bus_type == MGSL_BUS_TYPE_ISA) {
- /* Enable INTEN (Port 6, Bit12) */
- /* This connects the IRQ request signal to the ISA bus */
- usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
- }
-
usc_enable_aux_clock(info, info->params.clock_speed);
if (info->params.loopback)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 1f03078ec352..fa0ce7dd9e24 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -539,7 +539,6 @@ void __handle_sysrq(int key, bool check_mask)
*/
orig_log_level = console_loglevel;
console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
- pr_info("SysRq : ");
op_p = __sysrq_get_key_op(key);
if (op_p) {
@@ -548,14 +547,15 @@ void __handle_sysrq(int key, bool check_mask)
* should not) and is the invoked operation enabled?
*/
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
- pr_cont("%s\n", op_p->action_msg);
+ pr_info("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
op_p->handler(key);
} else {
- pr_cont("This sysrq operation is disabled.\n");
+ pr_info("This sysrq operation is disabled.\n");
+ console_loglevel = orig_log_level;
}
} else {
- pr_cont("HELP : ");
+ pr_info("HELP : ");
/* Only print the help msg once per handler */
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 77070c2d1240..ec145a59f199 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -26,7 +26,7 @@
* Byte threshold to limit memory consumption for flip buffers.
* The actual memory limit is > 2x this amount.
*/
-#define TTYB_DEFAULT_MEM_LIMIT 65536
+#define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
/*
* We default to dicing tty buffer allocations to this many characters
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index bfe9ad85b362..5fa250157025 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -513,6 +513,8 @@ static const struct file_operations hung_up_tty_fops = {
static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
+extern void tty_sysctl_init(void);
+
/**
* tty_wakeup - request more data
* @tty: terminal
@@ -1256,7 +1258,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
- int retval;
+ struct tty_ldisc *ld;
+ int retval = 0;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
@@ -1268,13 +1271,18 @@ static int tty_reopen(struct tty_struct *tty)
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
return -EBUSY;
- retval = tty_ldisc_lock(tty, 5 * HZ);
- if (retval)
- return retval;
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld) {
+ tty_ldisc_deref(ld);
+ } else {
+ retval = tty_ldisc_lock(tty, 5 * HZ);
+ if (retval)
+ return retval;
- if (!tty->ldisc)
- retval = tty_ldisc_reinit(tty, tty->termios.c_line);
- tty_ldisc_unlock(tty);
+ if (!tty->ldisc)
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+ tty_ldisc_unlock(tty);
+ }
if (retval == 0)
tty->count++;
@@ -2183,7 +2191,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return -EIO;
- ld->ops->receive_buf(tty, &ch, &mbz, 1);
+ if (ld->ops->receive_buf)
+ ld->ops->receive_buf(tty, &ch, &mbz, 1);
tty_ldisc_deref(ld);
return 0;
}
@@ -3476,6 +3485,7 @@ void console_sysfs_notify(void)
*/
int __init tty_init(void)
{
+ tty_sysctl_init();
cdev_init(&tty_cdev, &tty_fops);
if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 45eda69b150c..e38f104db174 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* takes tty_ldiscs_lock to guard against ldisc races
*/
+#if defined(CONFIG_LDISC_AUTOLOAD)
+ #define INITIAL_AUTOLOAD_STATE 1
+#else
+ #define INITIAL_AUTOLOAD_STATE 0
+#endif
+static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
+
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
@@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
*/
ldops = get_ldops(disc);
if (IS_ERR(ldops)) {
+ if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
+ return ERR_PTR(-EPERM);
request_module("tty-ldisc-%d", disc);
ldops = get_ldops(disc);
if (IS_ERR(ldops))
@@ -845,3 +854,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
}
+
+static int zero;
+static int one = 1;
+static struct ctl_table tty_table[] = {
+ {
+ .procname = "ldisc_autoload",
+ .data = &tty_ldisc_autoload,
+ .maxlen = sizeof(tty_ldisc_autoload),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ { }
+};
+
+static struct ctl_table tty_dir_table[] = {
+ {
+ .procname = "tty",
+ .mode = 0555,
+ .child = tty_table,
+ },
+ { }
+};
+
+static struct ctl_table tty_root_table[] = {
+ {
+ .procname = "dev",
+ .mode = 0555,
+ .child = tty_dir_table,
+ },
+ { }
+};
+
+void tty_sysctl_init(void)
+{
+ register_sysctl_table(tty_root_table);
+}
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 2384ea85ffaf..160f46115aaa 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -80,7 +80,7 @@
struct vcs_poll_data {
struct notifier_block notifier;
unsigned int cons_num;
- bool seen_last_update;
+ int event;
wait_queue_head_t waitq;
struct fasync_struct *fasync;
};
@@ -93,9 +93,18 @@ vcs_notifier(struct notifier_block *nb, unsigned long code, void *_param)
struct vcs_poll_data *poll =
container_of(nb, struct vcs_poll_data, notifier);
int currcons = poll->cons_num;
-
- if (code != VT_UPDATE)
+ int fa_band;
+
+ switch (code) {
+ case VT_UPDATE:
+ fa_band = POLL_PRI;
+ break;
+ case VT_DEALLOCATE:
+ fa_band = POLL_HUP;
+ break;
+ default:
return NOTIFY_DONE;
+ }
if (currcons == 0)
currcons = fg_console;
@@ -104,9 +113,9 @@ vcs_notifier(struct notifier_block *nb, unsigned long code, void *_param)
if (currcons != vc->vc_num)
return NOTIFY_DONE;
- poll->seen_last_update = false;
+ poll->event = code;
wake_up_interruptible(&poll->waitq);
- kill_fasync(&poll->fasync, SIGIO, POLL_IN);
+ kill_fasync(&poll->fasync, SIGIO, fa_band);
return NOTIFY_OK;
}
@@ -131,6 +140,15 @@ vcs_poll_data_get(struct file *file)
poll->cons_num = console(file_inode(file));
init_waitqueue_head(&poll->waitq);
poll->notifier.notifier_call = vcs_notifier;
+ /*
+ * In order not to lose any update event, we must pretend one might
+ * have occurred before we have a chance to register our notifier.
+ * This is also how user space has come to detect which kernels
+ * support POLLPRI on /dev/vcs* devices i.e. using poll() with
+ * POLLPRI and a zero timeout.
+ */
+ poll->event = VT_UPDATE;
+
if (register_vt_notifier(&poll->notifier) != 0) {
kfree(poll);
return NULL;
@@ -261,7 +279,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
poll = file->private_data;
if (count && poll)
- poll->seen_last_update = true;
+ poll->event = 0;
read = 0;
ret = 0;
while (count) {
@@ -335,8 +353,9 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (p < HEADER_SIZE) {
size_t tmp_count;
- con_buf0[0] = (char)vc->vc_rows;
- con_buf0[1] = (char)vc->vc_cols;
+ /* clamp header values if they don't fit */
+ con_buf0[0] = min(vc->vc_rows, 0xFFu);
+ con_buf0[1] = min(vc->vc_cols, 0xFFu);
getconsxy(vc, con_buf0 + 2);
con_buf_start += p;
@@ -615,12 +634,21 @@ static __poll_t
vcs_poll(struct file *file, poll_table *wait)
{
struct vcs_poll_data *poll = vcs_poll_data_get(file);
- __poll_t ret = DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+ __poll_t ret = DEFAULT_POLLMASK|EPOLLERR;
if (poll) {
poll_wait(file, &poll->waitq, wait);
- if (poll->seen_last_update)
+ switch (poll->event) {
+ case VT_UPDATE:
+ ret = DEFAULT_POLLMASK|EPOLLPRI;
+ break;
+ case VT_DEALLOCATE:
+ ret = DEFAULT_POLLMASK|EPOLLHUP|EPOLLERR;
+ break;
+ case 0:
ret = DEFAULT_POLLMASK;
+ break;
+ }
}
return ret;
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 41ec8e5010f3..d34984aa646d 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -935,8 +935,11 @@ static void flush_scrollback(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
+ set_origin(vc);
if (vc->vc_sw->con_flush_scrollback)
vc->vc_sw->con_flush_scrollback(vc);
+ else
+ vc->vc_sw->con_switch(vc);
}
/*
@@ -1272,6 +1275,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (con_is_visible(vc))
update_screen(vc);
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
+ notify_update(vc);
return err;
}
@@ -1341,6 +1345,8 @@ struct vc_data *vc_deallocate(unsigned int currcons)
* VT102 emulator
*/
+enum { EPecma = 0, EPdec, EPeq, EPgt, EPlt};
+
#define set_kbd(vc, x) vt_set_kbd_mode_bit((vc)->vc_num, (x))
#define clr_kbd(vc, x) vt_clr_kbd_mode_bit((vc)->vc_num, (x))
#define is_kbd(vc, x) vt_get_kbd_mode_bit((vc)->vc_num, (x))
@@ -1502,8 +1508,10 @@ static void csi_J(struct vc_data *vc, int vpar)
count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
start = (unsigned short *)vc->vc_origin;
break;
+ case 3: /* include scrollback */
+ flush_scrollback(vc);
+ /* fallthrough */
case 2: /* erase whole display */
- case 3: /* (and scrollback buffer later) */
vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
count = vc->vc_cols * vc->vc_rows;
start = (unsigned short *)vc->vc_origin;
@@ -1512,13 +1520,7 @@ static void csi_J(struct vc_data *vc, int vpar)
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
- if (vpar == 3) {
- set_origin(vc);
- flush_scrollback(vc);
- if (con_is_visible(vc))
- update_screen(vc);
- } else if (con_should_update(vc))
- do_update_region(vc, (unsigned long) start, count);
+ update_region(vc, (unsigned long) start, count);
vc->vc_need_wrap = 0;
}
@@ -1627,9 +1629,9 @@ static void rgb_background(struct vc_data *vc, const struct rgb *c)
/*
* ITU T.416 Higher colour modes. They break the usual properties of SGR codes
- * and thus need to be detected and ignored by hand. Strictly speaking, that
- * standard also wants : rather than ; as separators, contrary to ECMA-48, but
- * no one produces such codes and almost no one accepts them.
+ * and thus need to be detected and ignored by hand. That standard also
+ * wants : rather than ; as separators but sequences containing : are currently
+ * completely ignored by the parser.
*
* Subcommands 3 (CMY) and 4 (CMYK) are so insane there's no point in
* supporting them.
@@ -1814,7 +1816,7 @@ static void set_mode(struct vc_data *vc, int on_off)
int i;
for (i = 0; i <= vc->vc_npar; i++)
- if (vc->vc_ques) {
+ if (vc->vc_priv == EPdec) {
switch(vc->vc_par[i]) { /* DEC private modes set/reset */
case 1: /* Cursor keys send ^[Ox/^[[x */
if (on_off)
@@ -2021,7 +2023,7 @@ static void restore_cur(struct vc_data *vc)
}
enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
- EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
+ EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
ESpalette, ESosc };
/* console_lock is held (except via vc_init()) */
@@ -2030,7 +2032,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
vc->vc_top = 0;
vc->vc_bottom = vc->vc_rows;
vc->vc_state = ESnormal;
- vc->vc_ques = 0;
+ vc->vc_priv = EPecma;
vc->vc_translate = set_translate(LAT1_MAP, vc);
vc->vc_G0_charset = LAT1_MAP;
vc->vc_G1_charset = GRAF_MAP;
@@ -2111,6 +2113,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
lf(vc);
if (!is_kbd(vc, lnm))
return;
+ /* fall through */
case 13:
cr(vc);
return;
@@ -2233,9 +2236,22 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_state=ESfunckey;
return;
}
- vc->vc_ques = (c == '?');
- if (vc->vc_ques)
+ switch (c) {
+ case '?':
+ vc->vc_priv = EPdec;
return;
+ case '>':
+ vc->vc_priv = EPgt;
+ return;
+ case '=':
+ vc->vc_priv = EPeq;
+ return;
+ case '<':
+ vc->vc_priv = EPlt;
+ return;
+ }
+ vc->vc_priv = EPecma;
+ /* fall through */
case ESgetpars:
if (c == ';' && vc->vc_npar < NPAR - 1) {
vc->vc_npar++;
@@ -2245,16 +2261,22 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_par[vc->vc_npar] += c - '0';
return;
}
+ if (c >= 0x20 && c <= 0x3f) { /* 0x2x, 0x3a and 0x3c - 0x3f */
+ vc->vc_state = EScsiignore;
+ return;
+ }
vc->vc_state = ESnormal;
switch(c) {
case 'h':
- set_mode(vc, 1);
+ if (vc->vc_priv <= EPdec)
+ set_mode(vc, 1);
return;
case 'l':
- set_mode(vc, 0);
+ if (vc->vc_priv <= EPdec)
+ set_mode(vc, 0);
return;
case 'c':
- if (vc->vc_ques) {
+ if (vc->vc_priv == EPdec) {
if (vc->vc_par[0])
vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
else
@@ -2263,7 +2285,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
}
break;
case 'm':
- if (vc->vc_ques) {
+ if (vc->vc_priv == EPdec) {
clear_selection();
if (vc->vc_par[0])
vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
@@ -2273,7 +2295,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
}
break;
case 'n':
- if (!vc->vc_ques) {
+ if (vc->vc_priv == EPecma) {
if (vc->vc_par[0] == 5)
status_report(tty);
else if (vc->vc_par[0] == 6)
@@ -2281,8 +2303,8 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
}
return;
}
- if (vc->vc_ques) {
- vc->vc_ques = 0;
+ if (vc->vc_priv != EPecma) {
+ vc->vc_priv = EPecma;
return;
}
switch(c) {
@@ -2405,6 +2427,11 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
return;
}
return;
+ case EScsiignore:
+ if (c >= 20 && c <= 0x3f)
+ return;
+ vc->vc_state = ESnormal;
+ return;
case ESpercent:
vc->vc_state = ESnormal;
switch (c) {
@@ -2764,8 +2791,8 @@ rescan_last_byte:
con_flush(vc, draw_from, draw_to, &draw_x);
vc_uniscr_debug_check(vc);
console_conditional_schedule();
- console_unlock();
notify_update(vc);
+ console_unlock();
return n;
}
@@ -2884,8 +2911,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
unsigned char c;
static DEFINE_SPINLOCK(printing_lock);
const ushort *start;
- ushort cnt = 0;
- ushort myx;
+ ushort start_x, cnt;
int kmsg_console;
/* console busy or not yet initialized */
@@ -2898,10 +2924,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
vc = vc_cons[kmsg_console - 1].d;
- /* read `x' only after setting currcons properly (otherwise
- the `x' macro will read the x of the foreground console). */
- myx = vc->vc_x;
-
if (!vc_cons_allocated(fg_console)) {
/* impossible */
/* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
@@ -2916,53 +2938,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
hide_cursor(vc);
start = (ushort *)vc->vc_pos;
-
- /* Contrived structure to try to emulate original need_wrap behaviour
- * Problems caused when we have need_wrap set on '\n' character */
+ start_x = vc->vc_x;
+ cnt = 0;
while (count--) {
c = *b++;
if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
- if (cnt > 0) {
- if (con_is_visible(vc))
- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
- vc->vc_x += cnt;
- if (vc->vc_need_wrap)
- vc->vc_x--;
- cnt = 0;
- }
+ if (cnt && con_is_visible(vc))
+ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
+ cnt = 0;
if (c == 8) { /* backspace */
bs(vc);
start = (ushort *)vc->vc_pos;
- myx = vc->vc_x;
+ start_x = vc->vc_x;
continue;
}
if (c != 13)
lf(vc);
cr(vc);
start = (ushort *)vc->vc_pos;
- myx = vc->vc_x;
+ start_x = vc->vc_x;
if (c == 10 || c == 13)
continue;
}
+ vc_uniscr_putc(vc, c);
scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
notify_write(vc, c);
cnt++;
- if (myx == vc->vc_cols - 1) {
- vc->vc_need_wrap = 1;
- continue;
- }
- vc->vc_pos += 2;
- myx++;
- }
- if (cnt > 0) {
- if (con_is_visible(vc))
- vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
- vc->vc_x += cnt;
- if (vc->vc_x == vc->vc_cols) {
- vc->vc_x--;
+ if (vc->vc_x == vc->vc_cols - 1) {
vc->vc_need_wrap = 1;
+ } else {
+ vc->vc_pos += 2;
+ vc->vc_x++;
}
}
+ if (cnt && con_is_visible(vc))
+ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
set_cursor(vc);
notify_update(vc);
@@ -4607,8 +4617,9 @@ EXPORT_SYMBOL_GPL(screen_pos);
void getconsxy(struct vc_data *vc, unsigned char *p)
{
- p[0] = vc->vc_x;
- p[1] = vc->vc_y;
+ /* clamp values if they don't fit */
+ p[0] = min(vc->vc_x, 0xFFu);
+ p[1] = min(vc->vc_y, 0xFFu);
}
void putconsxy(struct vc_data *vc, unsigned char *p)
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 131342280b46..a57698985f9c 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -491,10 +491,10 @@ static int uio_open(struct inode *inode, struct file *filep)
if (!idev->info) {
mutex_unlock(&idev->info_lock);
ret = -EINVAL;
- goto err_alloc_listener;
+ goto err_infoopen;
}
- if (idev->info && idev->info->open)
+ if (idev->info->open)
ret = idev->info->open(idev->info, inode);
mutex_unlock(&idev->info_lock);
if (ret)
@@ -635,7 +635,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
goto out;
}
- if (!idev->info || !idev->info->irq) {
+ if (!idev->info->irq) {
retval = -EIO;
goto out;
}
@@ -940,9 +940,12 @@ int __uio_register_device(struct module *owner,
atomic_set(&idev->event, 0);
ret = uio_get_minor(idev);
- if (ret)
+ if (ret) {
+ kfree(idev);
return ret;
+ }
+ device_initialize(&idev->dev);
idev->dev.devt = MKDEV(uio_major, idev->minor);
idev->dev.class = &uio_class;
idev->dev.parent = parent;
@@ -953,7 +956,7 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_device_create;
- ret = device_register(&idev->dev);
+ ret = device_add(&idev->dev);
if (ret)
goto err_device_create;
@@ -985,9 +988,10 @@ int __uio_register_device(struct module *owner,
err_request_irq:
uio_dev_del_attributes(idev);
err_uio_dev_add_attributes:
- device_unregister(&idev->dev);
+ device_del(&idev->dev);
err_device_create:
uio_free_minor(idev);
+ put_device(&idev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(__uio_register_device);
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 8773e373ffe5..dde5cbb27178 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -39,6 +39,22 @@ to_uio_pci_generic_dev(struct uio_info *info)
return container_of(info, struct uio_pci_generic_dev, info);
}
+static int release(struct uio_info *info, struct inode *inode)
+{
+ struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
+
+ /*
+ * This driver is insecure when used with devices doing DMA, but some
+  * people (mis)use it with such devices.
+  * Let's at least make sure DMA isn't left enabled after the userspace
+  * driver closes the fd.
+  * Note that there's a non-zero chance doing this will wedge the device
+  * at least until reset.
+ */
+ pci_clear_master(gdev->pdev);
+ return 0;
+}
+
/* Interrupt handler. Read/modify/write the command register to disable
* the interrupt. */
static irqreturn_t irqhandler(int irq, struct uio_info *info)
@@ -78,6 +94,7 @@ static int probe(struct pci_dev *pdev,
gdev->info.name = "uio_pci_generic";
gdev->info.version = DRIVER_VERSION;
+ gdev->info.release = release;
gdev->pdev = pdev;
if (pdev->irq) {
gdev->info.irq = pdev->irq;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 70e6c956c23c..e4b27413f528 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB device configuration
#
diff --git a/drivers/usb/README b/drivers/usb/README
deleted file mode 100644
index 2144e7dbfa41..000000000000
--- a/drivers/usb/README
+++ /dev/null
@@ -1,54 +0,0 @@
-To understand all the Linux-USB framework, you'll use these resources:
-
- * This source code. This is necessarily an evolving work, and
- includes kerneldoc that should help you get a current overview.
- ("make pdfdocs", and then look at "usb.pdf" for host side and
- "gadget.pdf" for peripheral side.) Also, Documentation/usb has
- more information.
-
- * The USB 2.0 specification (from www.usb.org), with supplements
- such as those for USB OTG and the various device classes.
- The USB specification has a good overview chapter, and USB
- peripherals conform to the widely known "Chapter 9".
-
- * Chip specifications for USB controllers. Examples include
- host controllers (on PCs, servers, and more); peripheral
- controllers (in devices with Linux firmware, like printers or
- cell phones); and hard-wired peripherals like Ethernet adapters.
-
- * Specifications for other protocols implemented by USB peripheral
- functions. Some are vendor-specific; others are vendor-neutral
- but just standardized outside of the www.usb.org team.
-
-Here is a list of what each subdirectory here is, and what is contained in
-them.
-
-core/ - This is for the core USB host code, including the
- usbfs files and the hub class driver ("hub_wq").
-
-host/ - This is for USB host controller drivers. This
- includes UHCI, OHCI, EHCI, and others that might
- be used with more specialized "embedded" systems.
-
-gadget/ - This is for USB peripheral controller drivers and
- the various gadget drivers which talk to them.
-
-
-Individual USB driver directories. A new driver should be added to the
-first subdirectory in the list below that it fits into.
-
-image/ - This is for still image drivers, like scanners or
- digital cameras.
-../input/ - This is for any driver that uses the input subsystem,
- like keyboard, mice, touchscreens, tablets, etc.
-../media/ - This is for multimedia drivers, like video cameras,
- radios, and any other drivers that talk to the v4l
- subsystem.
-../net/ - This is for network drivers.
-serial/ - This is for USB to serial drivers.
-storage/ - This is for USB mass-storage drivers.
-class/ - This is for all USB device drivers that do not fit
- into any of the above categories, and work for a range
- of USB Class specified devices.
-misc/ - This is for all USB device drivers that do not fit
- into any of the above categories.
diff --git a/drivers/usb/atm/Kconfig b/drivers/usb/atm/Kconfig
index 0f922942a07a..989aaa3b080d 100644
--- a/drivers/usb/atm/Kconfig
+++ b/drivers/usb/atm/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB/ATM DSL configuration
#
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index ee34e9046f7e..eb37ebfcb123 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config USB_CHIPIDEA
tristate "ChipIdea Highspeed Dual Role Controller"
depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index e81de9ca8729..ceec8d5985d4 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -7,10 +7,8 @@
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/dma-mapping.h>
#include <linux/usb/chipidea.h>
#include <linux/usb/of.h>
#include <linux/clk.h>
@@ -152,8 +150,8 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
dev_warn(dev, "No over current polarity defined\n");
}
- if (of_find_property(np, "external-vbus-divider", NULL))
- data->evdo = 1;
+ data->pwr_pol = of_property_read_bool(np, "power-active-high");
+ data->evdo = of_property_read_bool(np, "external-vbus-divider");
if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
data->ulpi = 1;
@@ -316,7 +314,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (IS_ERR(data->usbmisc_data))
return PTR_ERR(data->usbmisc_data);
- if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) {
+ if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC)
+ && data->usbmisc_data) {
pdata.flags |= CI_HDRC_IMX_IS_HSIC;
data->usbmisc_data->hsic = 1;
data->pinctrl = devm_pinctrl_get(dev);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 7cc53e2ce564..c842e03f8767 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -18,6 +18,7 @@ struct imx_usbmisc_data {
/* true if dt specifies polarity */
unsigned int oc_pol_configured:1;
+ unsigned int pwr_pol:1; /* power polarity */
unsigned int evdo:1; /* set external vbus divider option */
unsigned int ulpi:1; /* connected to an ULPI phy */
unsigned int hsic:1; /* HSIC controlller */
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 772851bee99b..12025358bb3c 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -130,6 +130,7 @@ static int tegra_udc_remove(struct platform_device *pdev)
{
struct tegra_udc *udc = platform_get_drvdata(pdev);
+ ci_hdrc_remove_device(udc->dev);
usb_phy_set_suspend(udc->phy, 1);
clk_disable_unprepare(udc->clk);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 7bfcbb23c2a4..27749ace2d93 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -954,25 +954,47 @@ static int ci_hdrc_probe(struct platform_device *pdev)
} else if (ci->platdata->usb_phy) {
ci->usb_phy = ci->platdata->usb_phy;
} else {
+ /* Look for a generic PHY first */
ci->phy = devm_phy_get(dev->parent, "usb-phy");
- ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2);
- /* if both generic PHY and USB PHY layers aren't enabled */
- if (PTR_ERR(ci->phy) == -ENOSYS &&
- PTR_ERR(ci->usb_phy) == -ENXIO) {
- ret = -ENXIO;
+ if (PTR_ERR(ci->phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
goto ulpi_exit;
+ } else if (IS_ERR(ci->phy)) {
+ ci->phy = NULL;
}
- if (IS_ERR(ci->phy) && IS_ERR(ci->usb_phy)) {
- ret = -EPROBE_DEFER;
- goto ulpi_exit;
+ /* Look for a legacy USB PHY from device-tree next */
+ if (!ci->phy) {
+ ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent,
+ "phys", 0);
+
+ if (PTR_ERR(ci->usb_phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto ulpi_exit;
+ } else if (IS_ERR(ci->usb_phy)) {
+ ci->usb_phy = NULL;
+ }
}
- if (IS_ERR(ci->phy))
- ci->phy = NULL;
- else if (IS_ERR(ci->usb_phy))
- ci->usb_phy = NULL;
+ /* Look for any registered legacy USB PHY as last resort */
+ if (!ci->phy && !ci->usb_phy) {
+ ci->usb_phy = devm_usb_get_phy(dev->parent,
+ USB_PHY_TYPE_USB2);
+
+ if (PTR_ERR(ci->usb_phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto ulpi_exit;
+ } else if (IS_ERR(ci->usb_phy)) {
+ ci->usb_phy = NULL;
+ }
+ }
+
+ /* No USB PHY was found in the end */
+ if (!ci->phy && !ci->usb_phy) {
+ ret = -ENXIO;
+ goto ulpi_exit;
+ }
}
ret = ci_usb_phy_init(ci);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 097ffbca0bd9..d8b67e150b12 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -63,6 +63,7 @@
#define MX6_BM_NON_BURST_SETTING BIT(1)
#define MX6_BM_OVER_CUR_DIS BIT(7)
#define MX6_BM_OVER_CUR_POLARITY BIT(8)
+#define MX6_BM_PWR_POLARITY BIT(9)
#define MX6_BM_WAKEUP_ENABLE BIT(10)
#define MX6_BM_UTMI_ON_CLOCK BIT(13)
#define MX6_BM_ID_WAKEUP BIT(16)
@@ -383,6 +384,9 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
+ /* If the polarity is not set keep it as setup by the bootlader */
+ if (data->pwr_pol == 1)
+ reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base + data->index * 4);
/* SoC non-burst setting */
@@ -585,6 +589,9 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
+ /* If the polarity is not set keep it as setup by the bootlader */
+ if (data->pwr_pol == 1)
+ reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base);
reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
diff --git a/drivers/usb/class/Kconfig b/drivers/usb/class/Kconfig
index 971385fe9abc..52f3a531a82f 100644
--- a/drivers/usb/class/Kconfig
+++ b/drivers/usb/class/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Class driver configuration
#
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ed8c62b2d9d1..739f8960811a 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1865,6 +1865,13 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+ { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index bec581fb7c63..9e9caff905d5 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -1099,7 +1099,7 @@ static int wdm_post_reset(struct usb_interface *intf)
rv = recover_from_urb_loss(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
- return 0;
+ return rv;
}
static struct usb_driver wdm_driver = {
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 4d75d9a80001..bdb6bd0b63a6 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Core configuration
#
@@ -90,3 +91,15 @@ config USB_LEDS_TRIGGER_USBPORT
This driver allows LEDs to be controlled by USB events. Enabling this
trigger allows specifying list of USB ports that should turn on LED
when some USB device gets connected.
+
+config USB_AUTOSUSPEND_DELAY
+ int "Default autosuspend delay"
+ depends on USB
+ default 2
+ help
+ The default autosuspend delay in seconds. Can be overridden
+ with the usbcore.autosuspend command line or module parameter.
+
+ The default value Linux has always had is 2 seconds. Change
+ this value if you want a different delay and cannot modify
+ the command line or module parameter.
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7b5cb28ffb35..20ff036b4c22 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -552,7 +552,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
unsigned char *buffer2;
int size2;
struct usb_descriptor_header *header;
- int len, retval;
+ int retval;
u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES];
unsigned iad_num = 0;
@@ -707,8 +707,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
nalts[i] = j = USB_MAXALTSETTING;
}
- len = sizeof(*intfc) + sizeof(struct usb_host_interface) * j;
- config->intf_cache[i] = intfc = kzalloc(len, GFP_KERNEL);
+ intfc = kzalloc(struct_size(intfc, altsetting, j), GFP_KERNEL);
+ config->intf_cache[i] = intfc;
if (!intfc)
return -ENOMEM;
kref_init(&intfc->ref);
@@ -800,13 +800,11 @@ int usb_get_configuration(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
int ncfg = dev->descriptor.bNumConfigurations;
- int result = 0;
+ int result = -ENOMEM;
unsigned int cfgno, length;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
- cfgno = 0;
- result = -ENOMEM;
if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, "
"using maximum allowed: %d\n", ncfg, USB_MAXCONFIG);
@@ -832,8 +830,7 @@ int usb_get_configuration(struct usb_device *dev)
if (!desc)
goto err2;
- result = 0;
- for (; cfgno < ncfg; cfgno++) {
+ for (cfgno = 0; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
* the whole configuration is */
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
@@ -889,7 +886,6 @@ int usb_get_configuration(struct usb_device *dev)
goto err;
}
}
- result = 0;
err:
kfree(desc);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index d65566341dd1..fa783531ee88 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -604,7 +604,7 @@ static void async_completed(struct urb *urb)
snoop(&urb->dev->dev, "urb complete\n");
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
as->status, COMPLETE, NULL, 0);
- if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
+ if (usb_urb_dir_in(urb))
snoop_urb_data(urb, urb->actual_length);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
@@ -1564,12 +1564,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
}
for (totlen = u = 0; u < number_of_packets; u++) {
/*
- * arbitrary limit need for USB 3.0
- * bMaxBurst (0~15 allowed, 1~16 packets)
- * bmAttributes (bit 1:0, mult 0~2, 1~3 packets)
- * sizemax: 1024 * 16 * 3 = 49152
+ * arbitrary limit need for USB 3.1 Gen2
+ * sizemax: 96 DPs at SSP, 96 * 1024 = 98304
*/
- if (isopkt[u].length > 49152) {
+ if (isopkt[u].length > 98304) {
ret = -EINVAL;
goto error;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 53564386ed57..8987cec9549d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1896,14 +1896,11 @@ int usb_runtime_idle(struct device *dev)
return -EBUSY;
}
-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
int ret = -EPERM;
- if (enable && !udev->usb2_hw_lpm_allowed)
- return 0;
-
if (hcd->driver->set_usb2_hw_lpm) {
ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
if (!ret)
@@ -1913,6 +1910,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
return ret;
}
+int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+ if (!udev->usb2_hw_lpm_capable ||
+ !udev->usb2_hw_lpm_allowed ||
+ udev->usb2_hw_lpm_enabled)
+ return 0;
+
+ return usb_set_usb2_hardware_lpm(udev, 1);
+}
+
+int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+{
+ if (!udev->usb2_hw_lpm_enabled)
+ return 0;
+
+ return usb_set_usb2_hardware_lpm(udev, 0);
+}
+
#endif /* CONFIG_PM */
struct bus_type usb_bus_type = {
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 356b05c82dbc..1ac9c1e5f773 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -118,6 +118,31 @@ int usb_choose_configuration(struct usb_device *udev)
continue;
}
+ /*
+ * Select first configuration as default for audio so that
+ * devices that don't comply with UAC3 protocol are supported.
+ * But, still iterate through other configurations and
+ * select UAC3 compliant config if present.
+ */
+ if (desc && is_audio(desc)) {
+ /* Always prefer the first found UAC3 config */
+ if (is_uac3_config(desc)) {
+ best = c;
+ break;
+ }
+
+ /* If there is no UAC3 config, prefer the first config */
+ else if (i == 0)
+ best = c;
+
+ /* Unconditional continue, because the rest of the code
+ * in the loop is irrelevant for audio devices, and
+ * because it can reassign best, which for audio devices
+ * we don't want.
+ */
+ continue;
+ }
+
/* When the first config's first interface is one of Microsoft's
* pet nonstandard Ethernet-over-USB protocols, ignore it unless
* this kernel has enabled the necessary host side driver.
@@ -132,22 +157,6 @@ int usb_choose_configuration(struct usb_device *udev)
#endif
}
- /*
- * Select first configuration as default for audio so that
- * devices that don't comply with UAC3 protocol are supported.
- * But, still iterate through other configurations and
- * select UAC3 compliant config if present.
- */
- if (i == 0 && num_configs > 1 && desc && is_audio(desc)) {
- best = c;
- continue;
- }
-
- if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) {
- best = c;
- break;
- }
-
/* From the remaining configs, choose the first one whose
* first interface is for a non-vendor-specific class.
* Reason: Linux is more likely to have a class driver
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 015b126ce455..3189181bb628 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -373,13 +373,19 @@ static const u8 ss_rh_config_descriptor[] = {
* -1 is authorized for all devices except wireless (old behaviour)
* 0 is unauthorized for all devices
* 1 is authorized for all devices
+ * 2 is authorized for internal devices
*/
-static int authorized_default = -1;
+#define USB_AUTHORIZE_WIRED -1
+#define USB_AUTHORIZE_NONE 0
+#define USB_AUTHORIZE_ALL 1
+#define USB_AUTHORIZE_INTERNAL 2
+
+static int authorized_default = USB_AUTHORIZE_WIRED;
module_param(authorized_default, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(authorized_default,
"Default USB device authorization: 0 is not authorized, 1 is "
- "authorized, -1 is authorized except for wireless USB (default, "
- "old behaviour");
+ "authorized, 2 is authorized for internal devices, -1 is "
+ "authorized except for wireless USB (default, old behaviour)");
/*-------------------------------------------------------------------------*/
/**
@@ -884,7 +890,7 @@ static ssize_t authorized_default_show(struct device *dev,
struct usb_hcd *hcd;
hcd = bus_to_hcd(usb_bus);
- return snprintf(buf, PAGE_SIZE, "%u\n", !!HCD_DEV_AUTHORIZED(hcd));
+ return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
}
static ssize_t authorized_default_store(struct device *dev,
@@ -900,11 +906,8 @@ static ssize_t authorized_default_store(struct device *dev,
hcd = bus_to_hcd(usb_bus);
result = sscanf(buf, "%u\n", &val);
if (result == 1) {
- if (val)
- set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- else
- clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
-
+ hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
+ val : USB_DEVICE_AUTHORIZE_ALL;
result = size;
} else {
result = -EINVAL;
@@ -2736,6 +2739,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
if (retval)
return retval;
+ retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+ PHY_MODE_USB_HOST_SS);
+ if (retval)
+ goto err_usb_phy_roothub_power_on;
+
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
if (retval)
goto err_usb_phy_roothub_power_on;
@@ -2743,18 +2751,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
- /* Keep old behaviour if authorized_default is not in [0, 1]. */
- if (authorized_default < 0 || authorized_default > 1) {
- if (hcd->wireless)
- clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- else
- set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- } else {
- if (authorized_default)
- set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- else
- clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+ switch (authorized_default) {
+ case USB_AUTHORIZE_NONE:
+ hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE;
+ break;
+
+ case USB_AUTHORIZE_ALL:
+ hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL;
+ break;
+
+ case USB_AUTHORIZE_INTERNAL:
+ hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL;
+ break;
+
+ case USB_AUTHORIZE_WIRED:
+ default:
+ hcd->dev_policy = hcd->wireless ?
+ USB_DEVICE_AUTHORIZE_NONE : USB_DEVICE_AUTHORIZE_ALL;
+ break;
}
+
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* per default all interfaces are authorized */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1d1e61e980f3..8d4631c81b9f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -108,6 +108,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
+static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
+ u16 portstatus);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@@ -607,6 +609,36 @@ static int hub_port_status(struct usb_hub *hub, int port1,
status, change, NULL);
}
+static void hub_resubmit_irq_urb(struct usb_hub *hub)
+{
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&hub->irq_urb_lock, flags);
+
+ if (hub->quiescing) {
+ spin_unlock_irqrestore(&hub->irq_urb_lock, flags);
+ return;
+ }
+
+ status = usb_submit_urb(hub->urb, GFP_ATOMIC);
+ if (status && status != -ENODEV && status != -EPERM &&
+ status != -ESHUTDOWN) {
+ dev_err(hub->intfdev, "resubmit --> %d\n", status);
+ mod_timer(&hub->irq_urb_retry, jiffies + HZ);
+ }
+
+ spin_unlock_irqrestore(&hub->irq_urb_lock, flags);
+}
+
+static void hub_retry_irq_urb(struct timer_list *t)
+{
+ struct usb_hub *hub = from_timer(hub, t, irq_urb_retry);
+
+ hub_resubmit_irq_urb(hub);
+}
+
+
static void kick_hub_wq(struct usb_hub *hub)
{
struct usb_interface *intf;
@@ -709,12 +741,7 @@ static void hub_irq(struct urb *urb)
kick_hub_wq(hub);
resubmit:
- if (hub->quiescing)
- return;
-
- status = usb_submit_urb(hub->urb, GFP_ATOMIC);
- if (status != 0 && status != -ENODEV && status != -EPERM)
- dev_err(hub->intfdev, "resubmit --> %d\n", status);
+ hub_resubmit_irq_urb(hub);
}
/* USB 2.0 spec Section 11.24.2.3 */
@@ -1112,6 +1139,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_ENABLE);
}
+ /* Make sure a warm-reset request is handled by port_event */
+ if (type == HUB_RESUME &&
+ hub_port_warm_reset_required(hub, port1, portstatus))
+ set_bit(port1, hub->event_bits);
+
/*
* Add debounce if USB3 link is in polling/link training state.
* Link will automatically transition to Enabled state after
@@ -1264,10 +1296,13 @@ enum hub_quiescing_type {
static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
{
struct usb_device *hdev = hub->hdev;
+ unsigned long flags;
int i;
/* hub_wq and related activity won't re-trigger */
+ spin_lock_irqsave(&hub->irq_urb_lock, flags);
hub->quiescing = 1;
+ spin_unlock_irqrestore(&hub->irq_urb_lock, flags);
if (type != HUB_SUSPEND) {
/* Disconnect all the children */
@@ -1278,6 +1313,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
}
/* Stop hub_wq and related activity */
+ del_timer_sync(&hub->irq_urb_retry);
usb_kill_urb(hub->urb);
if (hub->has_indicators)
cancel_delayed_work_sync(&hub->leds);
@@ -1810,6 +1846,8 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
INIT_DELAYED_WORK(&hub->leds, led_work);
INIT_DELAYED_WORK(&hub->init_work, NULL);
INIT_WORK(&hub->events, hub_event);
+ spin_lock_init(&hub->irq_urb_lock);
+ timer_setup(&hub->irq_urb_retry, hub_retry_irq_urb, 0);
usb_get_intf(intf);
usb_get_dev(hdev);
@@ -3220,8 +3258,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
}
/* disable USB2 hardware LPM */
- if (udev->usb2_hw_lpm_enabled == 1)
- usb_set_usb2_hardware_lpm(udev, 0);
+ usb_disable_usb2_hardware_lpm(udev);
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
@@ -3259,8 +3296,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
usb_enable_ltm(udev);
err_ltm:
/* Try to enable USB2 hardware LPM again */
- if (udev->usb2_hw_lpm_capable == 1)
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
if (udev->do_remote_wakeup)
(void) usb_disable_remote_wakeup(udev);
@@ -3543,8 +3579,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
hub_port_logical_disconnect(hub, port1);
} else {
/* Try to enable USB2 hardware LPM */
- if (udev->usb2_hw_lpm_capable == 1)
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
/* Try to enable USB3 LTM */
usb_enable_ltm(udev);
@@ -4435,7 +4470,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
}
}
@@ -5649,8 +5684,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
/* Disable USB2 hardware LPM.
* It will be re-enabled by the enumeration process.
*/
- if (udev->usb2_hw_lpm_enabled == 1)
- usb_set_usb2_hardware_lpm(udev, 0);
+ usb_disable_usb2_hardware_lpm(udev);
/* Disable LPM while we reset the device and reinstall the alt settings.
* Device-initiated LPM, and system exit latency settings are cleared
@@ -5753,7 +5787,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
done:
/* Now that the alt settings are re-installed, enable LTM and LPM. */
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
usb_release_bos_descriptor(udev);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 4accfb63f7dc..a9e24e4b8df1 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -69,6 +69,8 @@ struct usb_hub {
struct delayed_work leds;
struct delayed_work init_work;
struct work_struct events;
+ spinlock_t irq_urb_lock;
+ struct timer_list irq_urb_retry;
struct usb_port **ports;
};
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index dc7f7fd71684..c12ac56606c3 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
.attrs = ports_attrs,
};
-static const struct attribute_group *ports_groups[] = {
- &ports_group,
- NULL
-};
-
/***************************************
* Adding & removing ports
***************************************/
@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
static int usbport_trig_activate(struct led_classdev *led_cdev)
{
struct usbport_trig_data *usbport_data;
+ int err;
usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
if (!usbport_data)
@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
/* List of ports */
INIT_LIST_HEAD(&usbport_data->ports);
+ err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
+ if (err)
+ goto err_free;
usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
usbport_trig_update_count(usbport_data);
@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
usbport_data->nb.notifier_call = usbport_trig_notify;
led_set_trigger_data(led_cdev, usbport_data);
usb_register_notify(&usbport_data->nb);
-
return 0;
+
+err_free:
+ kfree(usbport_data);
+ return err;
}
static void usbport_trig_deactivate(struct led_classdev *led_cdev)
@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
usbport_trig_remove_port(usbport_data, port);
}
+ sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
+
usb_unregister_notify(&usbport_data->nb);
kfree(usbport_data);
@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
.name = "usbport",
.activate = usbport_trig_activate,
.deactivate = usbport_trig_deactivate,
- .groups = ports_groups,
};
static int __init usbport_trig_init(void)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index bfa5eda0cc26..82239f27c4cc 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1243,8 +1243,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev->actconfig->interface[i] = NULL;
}
- if (dev->usb2_hw_lpm_enabled == 1)
- usb_set_usb2_hardware_lpm(dev, 0);
+ usb_disable_usb2_hardware_lpm(dev);
usb_unlocked_disable_lpm(dev);
usb_disable_ltm(dev);
@@ -2007,6 +2006,13 @@ free_interfaces:
for (i = 0; i < nintf; ++i) {
struct usb_interface *intf = cp->interface[i];
+ if (intf->dev.of_node &&
+ !of_device_is_available(intf->dev.of_node)) {
+ dev_info(&dev->dev, "skipping disabled interface %d\n",
+ intf->cur_altsetting->desc.bInterfaceNumber);
+ continue;
+ }
+
dev_dbg(&dev->dev,
"adding %s (config #%d, interface %d)\n",
dev_name(&intf->dev), configuration,
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 38b2c776c4b4..7580493b867a 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -123,6 +123,34 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub)
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_exit);
+int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
+ enum phy_mode mode)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_set_mode(roothub_entry->phy, mode);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ list_for_each_entry_continue_reverse(roothub_entry, head, list)
+ phy_power_off(roothub_entry->phy);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode);
+
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index 88a3c037e9df..dad564e2d2d4 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -16,6 +16,8 @@ struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
+int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
+ enum phy_mode mode);
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 514c5214ddb2..8bc35d53408b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
/* Corsair K70 RGB */
- { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair Strafe */
{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index ea18284dfa9a..7e88fdfe3cf5 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
if (!ret) {
udev->usb2_hw_lpm_allowed = value;
- ret = usb_set_usb2_hardware_lpm(udev, value);
+ if (value)
+ ret = usb_enable_usb2_hardware_lpm(udev);
+ else
+ ret = usb_disable_usb2_hardware_lpm(udev);
}
usb_unlock_device(udev);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index f51750bcd152..0eab79f82ce4 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -70,9 +70,8 @@ struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
{
struct urb *urb;
- urb = kmalloc(sizeof(struct urb) +
- iso_packets * sizeof(struct usb_iso_packet_descriptor),
- mem_flags);
+ urb = kmalloc(struct_size(urb, iso_frame_desc, iso_packets),
+ mem_flags);
if (!urb)
return NULL;
usb_init_urb(urb);
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index e221861b3187..9043d7242d67 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -139,86 +139,123 @@ static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
return acpi_find_child_device(parent, raw, false);
}
-static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+static struct acpi_device *
+usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
struct usb_device *udev;
struct acpi_device *adev;
acpi_handle *parent_handle;
+ int port1;
+
+ /* Get the struct usb_device point of port's hub */
+ udev = to_usb_device(port_dev->dev.parent->parent);
/*
- * In the ACPI DSDT table, only usb root hub and usb ports are
- * acpi device nodes. The hierarchy like following.
- * Device (EHC1)
- * Device (HUBN)
- * Device (PR01)
- * Device (PR11)
- * Device (PR12)
- * Device (PR13)
- * ...
- * So all binding process is divided into two parts. binding
- * root hub and usb ports.
+ * The root hub ports' parent is the root hub. The non-root-hub
+ * ports' parent is the parent hub port which the hub is
+ * connected to.
*/
- if (is_usb_device(dev)) {
- udev = to_usb_device(dev);
- if (udev->parent)
+ if (!udev->parent) {
+ adev = ACPI_COMPANION(&udev->dev);
+ port1 = usb_hcd_find_raw_port_number(bus_to_hcd(udev->bus),
+ port_dev->portnum);
+ } else {
+ parent_handle = usb_get_hub_port_acpi_handle(udev->parent,
+ udev->portnum);
+ if (!parent_handle)
return NULL;
- /* root hub is only child (_ADR=0) under its parent, the HC */
- adev = ACPI_COMPANION(dev->parent);
- return acpi_find_child_device(adev, 0, false);
- } else if (is_usb_port(dev)) {
- struct usb_port *port_dev = to_usb_port(dev);
- int port1 = port_dev->portnum;
- struct acpi_pld_info *pld;
- acpi_handle *handle;
- acpi_status status;
-
- /* Get the struct usb_device point of port's hub */
- udev = to_usb_device(dev->parent->parent);
-
- /*
- * The root hub ports' parent is the root hub. The non-root-hub
- * ports' parent is the parent hub port which the hub is
- * connected to.
- */
- if (!udev->parent) {
- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- int raw;
-
- raw = usb_hcd_find_raw_port_number(hcd, port1);
-
- adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
- raw);
-
- if (!adev)
- return NULL;
- } else {
- parent_handle =
- usb_get_hub_port_acpi_handle(udev->parent,
- udev->portnum);
- if (!parent_handle)
- return NULL;
-
- acpi_bus_get_device(parent_handle, &adev);
-
- adev = usb_acpi_find_port(adev, port1);
-
- if (!adev)
- return NULL;
- }
- handle = adev->handle;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_FAILURE(status) || !pld)
- return adev;
+ acpi_bus_get_device(parent_handle, &adev);
+ port1 = port_dev->portnum;
+ }
+ return usb_acpi_find_port(adev, port1);
+}
+
+static struct acpi_device *
+usb_acpi_find_companion_for_port(struct usb_port *port_dev)
+{
+ struct acpi_device *adev;
+ struct acpi_pld_info *pld;
+ acpi_handle *handle;
+ acpi_status status;
+
+ adev = usb_acpi_get_companion_for_port(port_dev);
+ if (!adev)
+ return NULL;
+
+ handle = adev->handle;
+ status = acpi_get_physical_device_location(handle, &pld);
+ if (!ACPI_FAILURE(status) && pld) {
port_dev->location = USB_ACPI_LOCATION_VALID
| pld->group_token << 8 | pld->group_position;
port_dev->connect_type = usb_acpi_get_connect_type(handle, pld);
ACPI_FREE(pld);
+ }
- return adev;
+ return adev;
+}
+
+static struct acpi_device *
+usb_acpi_find_companion_for_device(struct usb_device *udev)
+{
+ struct acpi_device *adev;
+ struct usb_port *port_dev;
+ struct usb_hub *hub;
+
+ if (!udev->parent) {
+ /* root hub is only child (_ADR=0) under its parent, the HC */
+ adev = ACPI_COMPANION(udev->dev.parent);
+ return acpi_find_child_device(adev, 0, false);
}
+ hub = usb_hub_to_struct_hub(udev->parent);
+ if (!hub)
+ return NULL;
+
+ /*
+ * This is an embedded USB device connected to a port and such
+ * devices share port's ACPI companion.
+ */
+ port_dev = hub->ports[udev->portnum - 1];
+ return usb_acpi_get_companion_for_port(port_dev);
+}
+
+static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+{
+ /*
+ * The USB hierarchy like following:
+ *
+ * Device (EHC1)
+ * Device (HUBN)
+ * Device (PR01)
+ * Device (PR11)
+ * Device (PR12)
+ * Device (FN12)
+ * Device (FN13)
+ * Device (PR13)
+ * ...
+ * where HUBN is root hub, and PRNN are USB ports and devices
+ * connected to them, and FNNN are individualk functions for
+ * connected composite USB devices. PRNN and FNNN may contain
+ * _CRS and other methods describing sideband resources for
+ * the connected device.
+ *
+ * On the kernel side both root hub and embedded USB devices are
+ * represented as instances of usb_device structure, and ports
+ * are represented as usb_port structures, so the whole process
+ * is split into 2 parts: finding companions for devices and
+ * finding companions for ports.
+ *
+ * Note that we do not handle individual functions of composite
+ * devices yet, for that we would need to assign companions to
+ * devices corresponding to USB interfaces.
+ */
+ if (is_usb_device(dev))
+ return usb_acpi_find_companion_for_device(to_usb_device(dev));
+ else if (is_usb_port(dev))
+ return usb_acpi_find_companion_for_port(to_usb_port(dev));
+
return NULL;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 4ebfbd737905..7fcb9f782931 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -46,8 +46,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#include "usb.h"
-
+#include "hub.h"
const char *usbcore_name = "usbcore";
@@ -65,8 +64,8 @@ int usb_disabled(void)
EXPORT_SYMBOL_GPL(usb_disabled);
#ifdef CONFIG_PM
-static int usb_autosuspend_delay = 2; /* Default delay value,
- * in seconds */
+/* Default delay value, in seconds */
+static int usb_autosuspend_delay = CONFIG_USB_AUTOSUSPEND_DELAY;
module_param_named(autosuspend, usb_autosuspend_delay, int, 0644);
MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
@@ -536,6 +535,27 @@ static unsigned usb_bus_is_wusb(struct usb_bus *bus)
return hcd->wireless;
}
+static bool usb_dev_authorized(struct usb_device *dev, struct usb_hcd *hcd)
+{
+ struct usb_hub *hub;
+
+ if (!dev->parent)
+ return true; /* Root hub always ok [and always wired] */
+
+ switch (hcd->dev_policy) {
+ case USB_DEVICE_AUTHORIZE_NONE:
+ default:
+ return false;
+
+ case USB_DEVICE_AUTHORIZE_ALL:
+ return true;
+
+ case USB_DEVICE_AUTHORIZE_INTERNAL:
+ hub = usb_hub_to_struct_hub(dev->parent);
+ return hub->ports[dev->portnum - 1]->connect_type ==
+ USB_PORT_CONNECT_TYPE_HARD_WIRED;
+ }
+}
/**
* usb_alloc_dev - usb device constructor (usbcore-internal)
@@ -663,12 +683,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
#endif
- if (root_hub) /* Root hub always ok [and always wired] */
- dev->authorized = 1;
- else {
- dev->authorized = !!HCD_DEV_AUTHORIZED(usb_hcd);
+
+ dev->authorized = usb_dev_authorized(dev, usb_hcd);
+ if (!root_hub)
dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
- }
+
return dev;
}
EXPORT_SYMBOL_GPL(usb_alloc_dev);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 546a2219454b..d95a5358f73d 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
extern int usb_runtime_suspend(struct device *dev);
extern int usb_runtime_resume(struct device *dev);
extern int usb_runtime_idle(struct device *dev);
-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
+extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
+extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
#else
@@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
return 0;
}
-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
{
return 0;
}
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index b6a495e98fd8..68d095ae2865 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
depends on HAS_DMA
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 68ad75a7460d..6812a8a3a98b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -261,7 +261,7 @@ static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
- dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
+ dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
}
}
@@ -768,22 +768,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
return desc_size;
}
-/*
- * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
- * @hs_ep: The endpoint
- * @dma_buff: DMA address to use
- * @len: Length of the transfer
- *
- * This function will iterate over descriptor chain and fill its entries
- * with corresponding information based on transfer data.
- */
-static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
+static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_dma_desc **desc,
dma_addr_t dma_buff,
- unsigned int len)
+ unsigned int len,
+ bool true_last)
{
- struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
- struct dwc2_dma_desc *desc = hs_ep->desc_list;
u32 mps = hs_ep->ep.maxpacket;
u32 maxsize = 0;
u32 offset = 0;
@@ -798,39 +789,77 @@ static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
hs_ep->desc_count = 1;
for (i = 0; i < hs_ep->desc_count; ++i) {
- desc->status = 0;
- desc->status |= (DEV_DMA_BUFF_STS_HBUSY
+ (*desc)->status = 0;
+ (*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
<< DEV_DMA_BUFF_STS_SHIFT);
if (len > maxsize) {
if (!hs_ep->index && !dir_in)
- desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+ (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
- desc->status |= (maxsize <<
- DEV_DMA_NBYTES_SHIFT & mask);
- desc->buf = dma_buff + offset;
+ (*desc)->status |=
+ maxsize << DEV_DMA_NBYTES_SHIFT & mask;
+ (*desc)->buf = dma_buff + offset;
len -= maxsize;
offset += maxsize;
} else {
- desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+ if (true_last)
+ (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
if (dir_in)
- desc->status |= (len % mps) ? DEV_DMA_SHORT :
- ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0);
- if (len > maxsize)
- dev_err(hsotg->dev, "wrong len %d\n", len);
+ (*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
+ ((hs_ep->send_zlp && true_last) ?
+ DEV_DMA_SHORT : 0);
- desc->status |=
+ (*desc)->status |=
len << DEV_DMA_NBYTES_SHIFT & mask;
- desc->buf = dma_buff + offset;
+ (*desc)->buf = dma_buff + offset;
}
- desc->status &= ~DEV_DMA_BUFF_STS_MASK;
- desc->status |= (DEV_DMA_BUFF_STS_HREADY
+ (*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
+ (*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
<< DEV_DMA_BUFF_STS_SHIFT);
- desc++;
+ (*desc)++;
+ }
+}
+
+/*
+ * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
+ * @hs_ep: The endpoint
+ * @ureq: Request to transfer
+ * @offset: offset in bytes
+ * @len: Length of the transfer
+ *
+ * This function will iterate over descriptor chain and fill its entries
+ * with corresponding information based on transfer data.
+ */
+static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
+ struct usb_request *ureq,
+ unsigned int offset,
+ unsigned int len)
+{
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ struct scatterlist *sg;
+ int i;
+ u8 desc_count = 0;
+
+ /* non-DMA sg buffer */
+ if (!ureq->num_sgs) {
+ dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
+ ureq->dma + offset, len, true);
+ return;
}
+
+ /* DMA sg buffer */
+ for_each_sg(ureq->sg, sg, ureq->num_sgs, i) {
+ dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
+ sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
+ sg_is_last(sg));
+ desc_count += hs_ep->desc_count;
+ }
+
+ hs_ep->desc_count = desc_count;
}
/*
@@ -944,7 +973,13 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
hs_ep->next_desc = 0;
list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
- ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ dma_addr_t dma_addr = hs_req->req.dma;
+
+ if (hs_req->req.num_sgs) {
+ WARN_ON(hs_req->req.num_sgs > 1);
+ dma_addr = sg_dma_address(hs_req->req.sg);
+ }
+ ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
hs_req->req.length);
if (ret)
break;
@@ -1100,7 +1135,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
offset = ureq->actual;
/* Fill DDMA chain entries */
- dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
+ dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq, offset,
length);
/* write descriptor chain address to control register */
@@ -1399,7 +1434,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
*/
if (using_desc_dma(hs) && hs_ep->isochronous) {
if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
- dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+ dma_addr_t dma_addr = hs_req->req.dma;
+
+ if (hs_req->req.num_sgs) {
+ WARN_ON(hs_req->req.num_sgs > 1);
+ dma_addr = sg_dma_address(hs_req->req.sg);
+ }
+ dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
hs_req->req.length);
}
return 0;
@@ -1987,13 +2028,12 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
index);
if (using_desc_dma(hsotg)) {
- /* Not specific buffer needed for ep0 ZLP */
- dma_addr_t dma = hs_ep->desc_list_dma;
-
if (!index)
dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
- dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
+ /* Not specific buffer needed for ep0 ZLP */
+ dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &hs_ep->desc_list,
+ hs_ep->desc_list_dma, 0, true);
} else {
dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
DXEPTSIZ_XFERSIZE(0),
@@ -4005,6 +4045,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
ret = -ENOMEM;
goto error1;
}
+ epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
hsotg->fifo_map |= 1 << fifo_index;
epctrl |= DXEPCTL_TXFNUM(fifo_index);
hs_ep->fifo_index = fifo_index;
@@ -4385,6 +4426,7 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
hsotg->enabled = 0;
spin_unlock_irqrestore(&hsotg->lock, flags);
+ gadget->sg_supported = using_desc_dma(hsotg);
dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
return 0;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index dd82fa516f3f..3f087962f498 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3981,10 +3981,8 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
gfp_t mem_flags)
{
struct dwc2_hcd_urb *urb;
- u32 size = sizeof(*urb) + iso_desc_count *
- sizeof(struct dwc2_hcd_iso_packet_desc);
- urb = kzalloc(size, mem_flags);
+ urb = kzalloc(struct_size(urb, iso_descs, iso_desc_count), mem_flags);
if (urb)
urb->packet_count = iso_desc_count;
return urb;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 1a0404fda596..2b1494460d0c 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
depends on (USB || USB_GADGET) && HAS_DMA
@@ -86,11 +88,11 @@ config USB_DWC3_HAPS
platform, please say 'Y' or 'M' here.
config USB_DWC3_KEYSTONE
- tristate "Texas Instruments Keystone2 Platforms"
- depends on ARCH_KEYSTONE || COMPILE_TEST
+ tristate "Texas Instruments Keystone2/AM654 Platforms"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
default USB_DWC3
help
- Support of USB2/3 functionality in TI Keystone2 platforms.
+ Support of USB2/3 functionality in TI Keystone2 and AM654 platforms.
Say 'Y' or 'M' here if you have one such device
config USB_DWC3_OF_SIMPLE
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index df876418cb78..1528d395b156 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -692,7 +692,6 @@ struct dwc3_ep {
#define DWC3_EP_WEDGE BIT(2)
#define DWC3_EP_TRANSFER_STARTED BIT(3)
#define DWC3_EP_PENDING_REQUEST BIT(5)
-#define DWC3_EP_END_TRANSFER_PENDING BIT(7)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -863,6 +862,7 @@ struct dwc3_hwparams {
* @num_pending_sgs: counter to pending sgs
* @num_queued_sgs: counter to the number of sgs which already got queued
* @remaining: amount of data remaining
+ * @status: internal dwc3 request status tracking
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
* @trb_dma: DMA address of @trb
@@ -871,7 +871,6 @@ struct dwc3_hwparams {
* or unaligned OUT)
* @direction: IN or OUT direction flag
* @mapped: true when request has been dma-mapped
- * @started: request is started
*/
struct dwc3_request {
struct usb_request request;
@@ -883,6 +882,14 @@ struct dwc3_request {
unsigned num_pending_sgs;
unsigned int num_queued_sgs;
unsigned remaining;
+
+ unsigned int status;
+#define DWC3_REQUEST_STATUS_QUEUED 0
+#define DWC3_REQUEST_STATUS_STARTED 1
+#define DWC3_REQUEST_STATUS_CANCELLED 2
+#define DWC3_REQUEST_STATUS_COMPLETED 3
+#define DWC3_REQUEST_STATUS_UNKNOWN -1
+
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
@@ -892,7 +899,6 @@ struct dwc3_request {
unsigned needs_extra_trb:1;
unsigned direction:1;
unsigned mapped:1;
- unsigned started:1;
};
/*
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 4f75ab3505b7..6759a7efd8d5 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -193,65 +193,69 @@ static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
* dwc3_gadget_event_string - returns event name
* @event: the event code
*/
-static inline const char *
-dwc3_gadget_event_string(char *str, const struct dwc3_event_devt *event)
+static inline const char *dwc3_gadget_event_string(char *str, size_t size,
+ const struct dwc3_event_devt *event)
{
enum dwc3_link_state state = event->event_info & DWC3_LINK_STATE_MASK;
switch (event->type) {
case DWC3_DEVICE_EVENT_DISCONNECT:
- sprintf(str, "Disconnect: [%s]",
+ snprintf(str, size, "Disconnect: [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_RESET:
- sprintf(str, "Reset [%s]", dwc3_gadget_link_string(state));
+ snprintf(str, size, "Reset [%s]",
+ dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_CONNECT_DONE:
- sprintf(str, "Connection Done [%s]",
+ snprintf(str, size, "Connection Done [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
- sprintf(str, "Link Change [%s]",
+ snprintf(str, size, "Link Change [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_WAKEUP:
- sprintf(str, "WakeUp [%s]", dwc3_gadget_link_string(state));
+ snprintf(str, size, "WakeUp [%s]",
+ dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_EOPF:
- sprintf(str, "End-Of-Frame [%s]",
+ snprintf(str, size, "End-Of-Frame [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_SOF:
- sprintf(str, "Start-Of-Frame [%s]",
+ snprintf(str, size, "Start-Of-Frame [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
- sprintf(str, "Erratic Error [%s]",
+ snprintf(str, size, "Erratic Error [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
- sprintf(str, "Command Complete [%s]",
+ snprintf(str, size, "Command Complete [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_OVERFLOW:
- sprintf(str, "Overflow [%s]", dwc3_gadget_link_string(state));
+ snprintf(str, size, "Overflow [%s]",
+ dwc3_gadget_link_string(state));
break;
default:
- sprintf(str, "UNKNOWN");
+ snprintf(str, size, "UNKNOWN");
}
return str;
}
-static inline void dwc3_decode_get_status(__u8 t, __u16 i, __u16 l, char *str)
+static inline void dwc3_decode_get_status(__u8 t, __u16 i, __u16 l, char *str,
+ size_t size)
{
switch (t & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
- sprintf(str, "Get Interface Status(Intf = %d, Length = %d)",
- i, l);
+ snprintf(str, size, "Get Interface Status(Intf = %d, Length = %d)",
+ i, l);
break;
case USB_RECIP_ENDPOINT:
- sprintf(str, "Get Endpoint Status(ep%d%s)",
+ snprintf(str, size, "Get Endpoint Status(ep%d%s)",
i & ~USB_DIR_IN,
i & USB_DIR_IN ? "in" : "out");
break;
@@ -259,11 +263,11 @@ static inline void dwc3_decode_get_status(__u8 t, __u16 i, __u16 l, char *str)
}
static inline void dwc3_decode_set_clear_feature(__u8 t, __u8 b, __u16 v,
- __u16 i, char *str)
+ __u16 i, char *str, size_t size)
{
switch (t & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
- sprintf(str, "%s Device Feature(%s%s)",
+ snprintf(str, size, "%s Device Feature(%s%s)",
b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
({char *s;
switch (v) {
@@ -311,13 +315,13 @@ static inline void dwc3_decode_set_clear_feature(__u8 t, __u8 b, __u16 v,
} s; }) : "");
break;
case USB_RECIP_INTERFACE:
- sprintf(str, "%s Interface Feature(%s)",
+ snprintf(str, size, "%s Interface Feature(%s)",
b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
v == USB_INTRF_FUNC_SUSPEND ?
"Function Suspend" : "UNKNOWN");
break;
case USB_RECIP_ENDPOINT:
- sprintf(str, "%s Endpoint Feature(%s ep%d%s)",
+ snprintf(str, size, "%s Endpoint Feature(%s ep%d%s)",
b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
v == USB_ENDPOINT_HALT ? "Halt" : "UNKNOWN",
i & ~USB_DIR_IN,
@@ -326,15 +330,15 @@ static inline void dwc3_decode_set_clear_feature(__u8 t, __u8 b, __u16 v,
}
}
-static inline void dwc3_decode_set_address(__u16 v, char *str)
+static inline void dwc3_decode_set_address(__u16 v, char *str, size_t size)
{
- sprintf(str, "Set Address(Addr = %02x)", v);
+ snprintf(str, size, "Set Address(Addr = %02x)", v);
}
static inline void dwc3_decode_get_set_descriptor(__u8 t, __u8 b, __u16 v,
- __u16 i, __u16 l, char *str)
+ __u16 i, __u16 l, char *str, size_t size)
{
- sprintf(str, "%s %s Descriptor(Index = %d, Length = %d)",
+ snprintf(str, size, "%s %s Descriptor(Index = %d, Length = %d)",
b == USB_REQ_GET_DESCRIPTOR ? "Get" : "Set",
({ char *s;
switch (v >> 8) {
@@ -393,87 +397,92 @@ static inline void dwc3_decode_get_set_descriptor(__u8 t, __u8 b, __u16 v,
}
-static inline void dwc3_decode_get_configuration(__u16 l, char *str)
+static inline void dwc3_decode_get_configuration(__u16 l, char *str,
+ size_t size)
{
- sprintf(str, "Get Configuration(Length = %d)", l);
+ snprintf(str, size, "Get Configuration(Length = %d)", l);
}
-static inline void dwc3_decode_set_configuration(__u8 v, char *str)
+static inline void dwc3_decode_set_configuration(__u8 v, char *str, size_t size)
{
- sprintf(str, "Set Configuration(Config = %d)", v);
+ snprintf(str, size, "Set Configuration(Config = %d)", v);
}
-static inline void dwc3_decode_get_intf(__u16 i, __u16 l, char *str)
+static inline void dwc3_decode_get_intf(__u16 i, __u16 l, char *str,
+ size_t size)
{
- sprintf(str, "Get Interface(Intf = %d, Length = %d)", i, l);
+ snprintf(str, size, "Get Interface(Intf = %d, Length = %d)", i, l);
}
-static inline void dwc3_decode_set_intf(__u8 v, __u16 i, char *str)
+static inline void dwc3_decode_set_intf(__u8 v, __u16 i, char *str, size_t size)
{
- sprintf(str, "Set Interface(Intf = %d, Alt.Setting = %d)", i, v);
+ snprintf(str, size, "Set Interface(Intf = %d, Alt.Setting = %d)", i, v);
}
-static inline void dwc3_decode_synch_frame(__u16 i, __u16 l, char *str)
+static inline void dwc3_decode_synch_frame(__u16 i, __u16 l, char *str,
+ size_t size)
{
- sprintf(str, "Synch Frame(Endpoint = %d, Length = %d)", i, l);
+ snprintf(str, size, "Synch Frame(Endpoint = %d, Length = %d)", i, l);
}
-static inline void dwc3_decode_set_sel(__u16 l, char *str)
+static inline void dwc3_decode_set_sel(__u16 l, char *str, size_t size)
{
- sprintf(str, "Set SEL(Length = %d)", l);
+ snprintf(str, size, "Set SEL(Length = %d)", l);
}
-static inline void dwc3_decode_set_isoch_delay(__u8 v, char *str)
+static inline void dwc3_decode_set_isoch_delay(__u8 v, char *str, size_t size)
{
- sprintf(str, "Set Isochronous Delay(Delay = %d ns)", v);
+ snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", v);
}
/**
* dwc3_decode_ctrl - returns a string represetion of ctrl request
*/
-static inline const char *dwc3_decode_ctrl(char *str, __u8 bRequestType,
- __u8 bRequest, __u16 wValue, __u16 wIndex, __u16 wLength)
+static inline const char *dwc3_decode_ctrl(char *str, size_t size,
+ __u8 bRequestType, __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
{
switch (bRequest) {
case USB_REQ_GET_STATUS:
- dwc3_decode_get_status(bRequestType, wIndex, wLength, str);
+ dwc3_decode_get_status(bRequestType, wIndex, wLength, str,
+ size);
break;
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
dwc3_decode_set_clear_feature(bRequestType, bRequest, wValue,
- wIndex, str);
+ wIndex, str, size);
break;
case USB_REQ_SET_ADDRESS:
- dwc3_decode_set_address(wValue, str);
+ dwc3_decode_set_address(wValue, str, size);
break;
case USB_REQ_GET_DESCRIPTOR:
case USB_REQ_SET_DESCRIPTOR:
dwc3_decode_get_set_descriptor(bRequestType, bRequest, wValue,
- wIndex, wLength, str);
+ wIndex, wLength, str, size);
break;
case USB_REQ_GET_CONFIGURATION:
- dwc3_decode_get_configuration(wLength, str);
+ dwc3_decode_get_configuration(wLength, str, size);
break;
case USB_REQ_SET_CONFIGURATION:
- dwc3_decode_set_configuration(wValue, str);
+ dwc3_decode_set_configuration(wValue, str, size);
break;
case USB_REQ_GET_INTERFACE:
- dwc3_decode_get_intf(wIndex, wLength, str);
+ dwc3_decode_get_intf(wIndex, wLength, str, size);
break;
case USB_REQ_SET_INTERFACE:
- dwc3_decode_set_intf(wValue, wIndex, str);
+ dwc3_decode_set_intf(wValue, wIndex, str, size);
break;
case USB_REQ_SYNCH_FRAME:
- dwc3_decode_synch_frame(wIndex, wLength, str);
+ dwc3_decode_synch_frame(wIndex, wLength, str, size);
break;
case USB_REQ_SET_SEL:
- dwc3_decode_set_sel(wLength, str);
+ dwc3_decode_set_sel(wLength, str, size);
break;
case USB_REQ_SET_ISOCH_DELAY:
- dwc3_decode_set_isoch_delay(wValue, str);
+ dwc3_decode_set_isoch_delay(wValue, str, size);
break;
default:
- sprintf(str, "%02x %02x %02x %02x %02x %02x %02x %02x",
+ snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x",
bRequestType, bRequest,
cpu_to_le16(wValue) & 0xff,
cpu_to_le16(wValue) >> 8,
@@ -490,16 +499,15 @@ static inline const char *dwc3_decode_ctrl(char *str, __u8 bRequestType,
* dwc3_ep_event_string - returns event name
* @event: then event code
*/
-static inline const char *
-dwc3_ep_event_string(char *str, const struct dwc3_event_depevt *event,
- u32 ep0state)
+static inline const char *dwc3_ep_event_string(char *str, size_t size,
+ const struct dwc3_event_depevt *event, u32 ep0state)
{
u8 epnum = event->endpoint_number;
size_t len;
int status;
int ret;
- ret = sprintf(str, "ep%d%s: ", epnum >> 1,
+ ret = snprintf(str, size, "ep%d%s: ", epnum >> 1,
(epnum & 1) ? "in" : "out");
if (ret < 0)
return "UNKNOWN";
@@ -509,7 +517,7 @@ dwc3_ep_event_string(char *str, const struct dwc3_event_depevt *event,
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
len = strlen(str);
- sprintf(str + len, "Transfer Complete (%c%c%c)",
+ snprintf(str + len, size - len, "Transfer Complete (%c%c%c)",
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
status & DEPEVT_STATUS_LST ? 'L' : 'l');
@@ -517,12 +525,13 @@ dwc3_ep_event_string(char *str, const struct dwc3_event_depevt *event,
len = strlen(str);
if (epnum <= 1)
- sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state));
+ snprintf(str + len, size - len, " [%s]",
+ dwc3_ep0_state_string(ep0state));
break;
case DWC3_DEPEVT_XFERINPROGRESS:
len = strlen(str);
- sprintf(str + len, "Transfer In Progress [%d] (%c%c%c)",
+ snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)",
event->parameters,
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
@@ -531,47 +540,51 @@ dwc3_ep_event_string(char *str, const struct dwc3_event_depevt *event,
case DWC3_DEPEVT_XFERNOTREADY:
len = strlen(str);
- sprintf(str + len, "Transfer Not Ready [%d]%s",
+ snprintf(str + len, size - len, "Transfer Not Ready [%d]%s",
event->parameters,
status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
" (Active)" : " (Not Active)");
+ len = strlen(str);
+
/* Control Endpoints */
if (epnum <= 1) {
int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
switch (phase) {
case DEPEVT_STATUS_CONTROL_DATA:
- strcat(str, " [Data Phase]");
+ snprintf(str + ret, size - ret,
+ " [Data Phase]");
break;
case DEPEVT_STATUS_CONTROL_STATUS:
- strcat(str, " [Status Phase]");
+ snprintf(str + ret, size - ret,
+ " [Status Phase]");
}
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- strcat(str, "FIFO");
+ snprintf(str + ret, size - ret, "FIFO");
break;
case DWC3_DEPEVT_STREAMEVT:
status = event->status;
switch (status) {
case DEPEVT_STREAMEVT_FOUND:
- sprintf(str + ret, " Stream %d Found",
+ snprintf(str + ret, size - ret, " Stream %d Found",
event->parameters);
break;
case DEPEVT_STREAMEVT_NOTFOUND:
default:
- strcat(str, " Stream Not Found");
+ snprintf(str + ret, size - ret, " Stream Not Found");
break;
}
break;
case DWC3_DEPEVT_EPCMDCMPLT:
- strcat(str, "Endpoint Command Complete");
+ snprintf(str + ret, size - ret, "Endpoint Command Complete");
break;
default:
- sprintf(str, "UNKNOWN");
+ snprintf(str, size, "UNKNOWN");
}
return str;
@@ -611,14 +624,15 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
}
}
-static inline const char *dwc3_decode_event(char *str, u32 event, u32 ep0state)
+static inline const char *dwc3_decode_event(char *str, size_t size, u32 event,
+ u32 ep0state)
{
const union dwc3_event evt = (union dwc3_event) event;
if (evt.type.is_devspec)
- return dwc3_gadget_event_string(str, &evt.devt);
+ return dwc3_gadget_event_string(str, size, &evt.devt);
else
- return dwc3_ep_event_string(str, &evt.depevt, ep0state);
+ return dwc3_ep_event_string(str, size, &evt.depevt, ep0state);
}
static inline const char *dwc3_ep_cmd_status_string(int status)
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 869725d15c74..726100d1ac0d 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -457,8 +457,13 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
* This device property is for kernel internal use only and
* is expected to be set by the glue code.
*/
- if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
- return extcon_get_extcon_dev(name);
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
+ edev = extcon_get_extcon_dev(name);
+ if (!edev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return edev;
+ }
np_phy = of_parse_phandle(dev->of_node, "phys", 0);
np_conn = of_graph_get_remote_node(np_phy, -1, -1);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index cb7fcd7c0ad8..c1e9ea621f41 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
for (i = 0; i < exynos->num_clks; i++) {
ret = clk_prepare_enable(exynos->clks[i]);
if (ret) {
- while (--i > 0)
+ while (i-- > 0)
clk_disable_unprepare(exynos->clks[i]);
return ret;
}
@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev)
for (i = 0; i < exynos->num_clks; i++) {
ret = clk_prepare_enable(exynos->clks[i]);
if (ret) {
- while (--i > 0)
+ while (i-- > 0)
clk_disable_unprepare(exynos->clks[i]);
return ret;
}
diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c
index 02d57d98ef9b..3cecbf169452 100644
--- a/drivers/usb/dwc3/dwc3-haps.c
+++ b/drivers/usb/dwc3/dwc3-haps.c
@@ -106,6 +106,15 @@ static const struct pci_device_id dwc3_haps_id_table[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ /*
+ * i.MX6QP and i.MX7D platform use a PCIe controller with the
+ * same VID and PID as this USB controller. The system may
+ * incorrectly match this driver to that PCIe controller. To
+ * workaround this, specifically use class type USB to prevent
+ * incorrect driver matching.
+ */
+ .class = (PCI_CLASS_SERIAL_USB << 8),
+ .class_mask = 0xffff00,
},
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 193a9a88222a..cbee5fb9b9fb 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -106,6 +106,10 @@ static int kdwc3_probe(struct platform_device *pdev)
goto err_irq;
}
+ /* IRQ processing not required currently for AM65 */
+ if (of_device_is_compatible(node, "ti,am654-dwc3"))
+ goto skip_irq;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "missing irq\n");
@@ -123,6 +127,7 @@ static int kdwc3_probe(struct platform_device *pdev)
kdwc3_enable_irqs(kdwc);
+skip_irq:
error = of_platform_populate(node, NULL, NULL, dev);
if (error) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
@@ -152,8 +157,11 @@ static int kdwc3_remove_core(struct device *dev, void *c)
static int kdwc3_remove(struct platform_device *pdev)
{
struct dwc3_keystone *kdwc = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ if (!of_device_is_compatible(node, "ti,am654-dwc3"))
+ kdwc3_disable_irqs(kdwc);
- kdwc3_disable_irqs(kdwc);
device_for_each_child(&pdev->dev, NULL, kdwc3_remove_core);
pm_runtime_put_sync(kdwc->dev);
pm_runtime_disable(kdwc->dev);
@@ -165,6 +173,7 @@ static int kdwc3_remove(struct platform_device *pdev)
static const struct of_device_id kdwc3_of_match[] = {
{ .compatible = "ti,keystone-dwc3", },
+ { .compatible = "ti,am654-dwc3" },
{},
};
MODULE_DEVICE_TABLE(of, kdwc3_of_match);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index a6d0203e40b6..184df4daa590 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -595,6 +595,7 @@ static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
static const struct of_device_id dwc3_qcom_of_match[] = {
{ .compatible = "qcom,dwc3" },
{ .compatible = "qcom,msm8996-dwc3" },
+ { .compatible = "qcom,msm8998-dwc3" },
{ .compatible = "qcom,sdm845-dwc3" },
{ }
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07bd31bb2f8a..e293400cc6e9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -174,9 +174,9 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
{
struct dwc3 *dwc = dep->dwc;
- req->started = false;
list_del(&req->list);
req->remaining = 0;
+ req->needs_extra_trb = false;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -208,6 +208,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
struct dwc3 *dwc = dep->dwc;
dwc3_gadget_del_and_unmap_request(dep, req, status);
+ req->status = DWC3_REQUEST_STATUS_COMPLETED;
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
@@ -383,19 +384,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
- if (ret == 0) {
- switch (DWC3_DEPCMD_CMD(cmd)) {
- case DWC3_DEPCMD_STARTTRANSFER:
- dep->flags |= DWC3_EP_TRANSFER_STARTED;
- dwc3_gadget_ep_get_transfer_index(dep);
- break;
- case DWC3_DEPCMD_ENDTRANSFER:
- dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
- break;
- default:
- /* nothing */
- break;
- }
+ if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+ dwc3_gadget_ep_get_transfer_index(dep);
}
if (saved_config) {
@@ -641,7 +632,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
- dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg |= DWC3_DALEPENA_EP(dep->number);
@@ -697,12 +687,13 @@ out:
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force);
+static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
- dwc3_stop_active_transfer(dep, true);
+ dwc3_stop_active_transfer(dep, true, false);
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->started_list)) {
@@ -747,7 +738,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->type = 0;
- dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
+ dep->flags = 0;
/* Clear out the ep descriptors for non-ep0 */
if (dep->number > 1) {
@@ -846,6 +837,7 @@ static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
req->direction = dep->direction;
req->epnum = dep->number;
req->dep = dep;
+ req->status = DWC3_REQUEST_STATUS_UNKNOWN;
trace_dwc3_alloc_request(req);
@@ -1118,7 +1110,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = length % maxp;
- if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
+ if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
@@ -1359,7 +1351,7 @@ static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
* to wait for the next XferNotReady to test the command again
*/
if (cmd_status == 0) {
- dwc3_stop_active_transfer(dep, true);
+ dwc3_stop_active_transfer(dep, true, true);
return 0;
}
}
@@ -1434,6 +1426,11 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
&req->request, req->dep->name))
return -EINVAL;
+ if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
+ "%s: request %pK already in flight\n",
+ dep->name, &req->request))
+ return -EINVAL;
+
pm_runtime_get(dwc->dev);
req->request.actual = 0;
@@ -1442,6 +1439,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
trace_dwc3_ep_queue(req);
list_add_tail(&req->list, &dep->pending_list);
+ req->status = DWC3_REQUEST_STATUS_QUEUED;
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
@@ -1505,6 +1503,8 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
}
+
+ req->num_trbs = 0;
}
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
@@ -1546,13 +1546,16 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
if (r == req) {
/* wait until it is processed */
- dwc3_stop_active_transfer(dep, true);
+ dwc3_stop_active_transfer(dep, true, true);
if (!r->trb)
goto out0;
dwc3_gadget_move_cancelled_request(req);
- goto out0;
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED)
+ goto out0;
+ else
+ goto out1;
}
dev_err(dwc->dev, "request %pK was not queued to %s\n",
request, ep->name);
@@ -1560,6 +1563,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
goto out0;
}
+out1:
dwc3_gadget_giveback(dep, req, -ECONNRESET);
out0:
@@ -1984,6 +1988,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
+ dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
@@ -2498,7 +2503,7 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (stop) {
- dwc3_stop_active_transfer(dep, true);
+ dwc3_stop_active_transfer(dep, true, true);
dep->flags = DWC3_EP_ENABLED;
}
@@ -2545,7 +2550,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep = dwc->eps[epnum];
if (!(dep->flags & DWC3_EP_ENABLED)) {
- if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
return;
/* Handle only EPCMDCMPLT when EP disabled */
@@ -2569,7 +2574,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
- dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
}
break;
@@ -2619,15 +2624,15 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
}
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force)
+static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt)
{
struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
- if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
- !dep->resource_index)
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
return;
/*
@@ -2663,17 +2668,15 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force)
cmd = DWC3_DEPCMD_ENDTRANSFER;
cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
- cmd |= DWC3_DEPCMD_CMDIOC;
+ cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
- if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
- dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
udelay(100);
- }
}
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
@@ -3337,6 +3340,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err4;
}
+ dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
+
return 0;
err4:
@@ -3379,6 +3384,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
+ synchronize_irq(dwc->irq_gadget);
+
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 023a473648eb..3ed738e86ea7 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -75,7 +75,7 @@ static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
{
struct dwc3_ep *dep = req->dep;
- req->started = true;
+ req->status = DWC3_REQUEST_STATUS_STARTED;
list_move_tail(&req->list, &dep->started_list);
}
@@ -90,7 +90,7 @@ static inline void dwc3_gadget_move_cancelled_request(struct dwc3_request *req)
{
struct dwc3_ep *dep = req->dep;
- req->started = false;
+ req->status = DWC3_REQUEST_STATUS_CANCELLED;
list_move_tail(&req->list, &dep->cancelled_list);
}
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index e97a00593dda..818a63da1a44 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -59,8 +59,8 @@ DECLARE_EVENT_CLASS(dwc3_log_event,
__entry->ep0state = dwc->ep0state;
),
TP_printk("event (%08x): %s", __entry->event,
- dwc3_decode_event(__get_str(str), __entry->event,
- __entry->ep0state))
+ dwc3_decode_event(__get_str(str), DWC3_MSG_MAX,
+ __entry->event, __entry->ep0state))
);
DEFINE_EVENT(dwc3_log_event, dwc3_event,
@@ -86,7 +86,8 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
__entry->wIndex = le16_to_cpu(ctrl->wIndex);
__entry->wLength = le16_to_cpu(ctrl->wLength);
),
- TP_printk("%s", dwc3_decode_ctrl(__get_str(str), __entry->bRequestType,
+ TP_printk("%s", dwc3_decode_ctrl(__get_str(str), DWC3_MSG_MAX,
+ __entry->bRequestType,
__entry->bRequest, __entry->wValue,
__entry->wIndex, __entry->wLength)
)
@@ -305,7 +306,7 @@ DECLARE_EVENT_CLASS(dwc3_log_ep,
__entry->trb_enqueue = dep->trb_enqueue;
__entry->trb_dequeue = dep->trb_dequeue;
),
- TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c:%c:%c",
+ TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c:%c",
__get_str(name), __entry->maxpacket,
__entry->maxpacket_limit, __entry->max_streams,
__entry->maxburst, __entry->trb_enqueue,
@@ -315,7 +316,6 @@ DECLARE_EVENT_CLASS(dwc3_log_ep,
__entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
__entry->flags & DWC3_EP_TRANSFER_STARTED ? 'B' : 'b',
__entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
- __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e',
__entry->direction ? '<' : '>'
)
);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 31cce7805eb2..ec189d7855a0 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Gadget support on a system involves
# (a) a peripheral controller, and
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 71b15c65b90f..1eb4fa2e623f 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -67,9 +67,6 @@ struct usb_ep *usb_ep_autoconfig_ss(
)
{
struct usb_ep *ep;
- u8 type;
-
- type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
if (gadget->ops->match_ep) {
ep = gadget->ops->match_ep(gadget, desc, ep_comp);
@@ -109,16 +106,6 @@ found_ep:
desc->bEndpointAddress |= gadget->out_epnum;
}
- /* report (variable) full speed bulk maxpacket */
- if ((type == USB_ENDPOINT_XFER_BULK) && !ep_comp) {
- int size = ep->maxpacket_limit;
-
- /* min() doesn't work on bitfields with gcc-3.5 */
- if (size > 64)
- size = 64;
- desc->wMaxPacketSize = cpu_to_le16(size);
- }
-
ep->address = desc->bEndpointAddress;
ep->desc = NULL;
ep->comp_desc = NULL;
@@ -152,9 +139,10 @@ EXPORT_SYMBOL_GPL(usb_ep_autoconfig_ss);
*
* On success, this returns an claimed usb_ep, and modifies the endpoint
* descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
- * is initialized as if the endpoint were used at full speed. To prevent
- * the endpoint from being returned by a later autoconfig call, claims it
- * by assigning ep->claimed to true.
+ * is initialized as if the endpoint were used at full speed. Because of
+ * that the users must consider adjusting the autoconfigured descriptor.
+ * To prevent the endpoint from being returned by a later autoconfig call,
+ * claims it by assigning ep->claimed to true.
*
* On failure, this returns a null endpoint descriptor.
*/
@@ -163,7 +151,26 @@ struct usb_ep *usb_ep_autoconfig(
struct usb_endpoint_descriptor *desc
)
{
- return usb_ep_autoconfig_ss(gadget, desc, NULL);
+ struct usb_ep *ep;
+ u8 type;
+
+ ep = usb_ep_autoconfig_ss(gadget, desc, NULL);
+ if (!ep)
+ return NULL;
+
+ type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* report (variable) full speed bulk maxpacket */
+ if (type == USB_ENDPOINT_XFER_BULK) {
+ int size = ep->maxpacket_limit;
+
+ /* min() doesn't work on bitfields with gcc-3.5 */
+ if (size > 64)
+ size = 64;
+ desc->wMaxPacketSize = cpu_to_le16(size);
+ }
+
+ return ep;
}
EXPORT_SYMBOL_GPL(usb_ep_autoconfig);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 1e5430438703..20413c276c61 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1082,6 +1082,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
+ wait_for_completion(&done);
interrupted = ep->status < 0;
}
@@ -2843,12 +2844,18 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
struct usb_request *req;
struct usb_ep *ep;
u8 bEndpointAddress;
+ u16 wMaxPacketSize;
/*
* We back up bEndpointAddress because autoconfig overwrites
* it with physical endpoint address.
*/
bEndpointAddress = ds->bEndpointAddress;
+ /*
+ * We back up wMaxPacketSize because autoconfig treats
+ * endpoint descriptors as if they were full speed.
+ */
+ wMaxPacketSize = ds->wMaxPacketSize;
pr_vdebug("autoconfig\n");
ep = usb_ep_autoconfig(func->gadget, ds);
if (unlikely(!ep))
@@ -2869,6 +2876,11 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
*/
if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
ds->bEndpointAddress = bEndpointAddress;
+ /*
+ * Restore wMaxPacketSize which was potentially
+ * overwritten by autoconfig.
+ */
+ ds->wMaxPacketSize = wMaxPacketSize;
}
ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9cdef108fb1b..ed68a4860b7d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func(
ss = kzalloc(sizeof(*ss), GFP_KERNEL);
if (!ss)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ss_opts = container_of(fi, struct f_ss_opts, func_inst);
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 2746a926a8d9..00d346965f7a 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -459,10 +459,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
} else if (intf == uac1->as_in_intf) {
uac1->as_in_alt = alt;
- if (alt)
- ret = u_audio_start_playback(&uac1->g_audio);
- else
- u_audio_stop_playback(&uac1->g_audio);
+ if (alt)
+ ret = u_audio_start_playback(&uac1->g_audio);
+ else
+ u_audio_stop_playback(&uac1->g_audio);
} else {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return -EINVAL;
@@ -568,6 +568,7 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
as_out_interface_alt_0_desc.bInterfaceNumber = status;
as_out_interface_alt_1_desc.bInterfaceNumber = status;
+ ac_header_desc.baInterfaceNr[0] = status;
uac1->as_out_intf = status;
uac1->as_out_alt = 0;
@@ -576,6 +577,7 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
as_in_interface_alt_0_desc.bInterfaceNumber = status;
as_in_interface_alt_1_desc.bInterfaceNumber = status;
+ ac_header_desc.baInterfaceNr[1] = status;
uac1->as_in_intf = status;
uac1->as_in_alt = 0;
diff --git a/drivers/usb/gadget/function/u_ecm.h b/drivers/usb/gadget/function/u_ecm.h
index 050aa672ee7f..098ece573a5e 100644
--- a/drivers/usb/gadget/function/u_ecm.h
+++ b/drivers/usb/gadget/function/u_ecm.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_ECM_H
diff --git a/drivers/usb/gadget/function/u_eem.h b/drivers/usb/gadget/function/u_eem.h
index de3828d3e8f0..921386a375cf 100644
--- a/drivers/usb/gadget/function/u_eem.h
+++ b/drivers/usb/gadget/function/u_eem.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_EEM_H
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index cd33cee4d78b..d8b92485b727 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef __U_ETHER_CONFIGFS_H
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index c3aba4dfa958..f9b0cf67360d 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_FFS_H
diff --git a/drivers/usb/gadget/function/u_gether.h b/drivers/usb/gadget/function/u_gether.h
index 5b7e2eb90336..ce4f07626f96 100644
--- a/drivers/usb/gadget/function/u_gether.h
+++ b/drivers/usb/gadget/function/u_gether.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_GETHER_H
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index 2f5ca4bfa7ff..1594bfa312eb 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -7,7 +7,7 @@
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_HID_H
diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h
index 5599aa5fc977..29bf006c0a13 100644
--- a/drivers/usb/gadget/function/u_midi.h
+++ b/drivers/usb/gadget/function/u_midi.h
@@ -7,7 +7,7 @@
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_MIDI_H
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 67324f983343..d483e45c0f77 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_NCM_H
diff --git a/drivers/usb/gadget/function/u_printer.h b/drivers/usb/gadget/function/u_printer.h
index 6088ff744194..78797764f478 100644
--- a/drivers/usb/gadget/function/u_printer.h
+++ b/drivers/usb/gadget/function/u_printer.h
@@ -7,7 +7,7 @@
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_PRINTER_H
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index d65fb4ebac3c..1e148b76f339 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_RNDIS_H
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 29436f75bbe0..65f634ec7fc2 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/tty.h>
@@ -26,6 +25,7 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/kthread.h>
+#include <linux/workqueue.h>
#include <linux/kfifo.h>
#include "u_serial.h"
@@ -110,7 +110,7 @@ struct gs_port {
int read_allocated;
struct list_head read_queue;
unsigned n_read;
- struct tasklet_struct push;
+ struct delayed_work push;
struct list_head write_pool;
int write_started;
@@ -352,9 +352,10 @@ __acquires(&port->port_lock)
* So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
* can be buffered before the TTY layer's buffers (currently 64 KB).
*/
-static void gs_rx_push(unsigned long _port)
+static void gs_rx_push(struct work_struct *work)
{
- struct gs_port *port = (void *)_port;
+ struct delayed_work *w = to_delayed_work(work);
+ struct gs_port *port = container_of(w, struct gs_port, push);
struct tty_struct *tty;
struct list_head *queue = &port->read_queue;
bool disconnect = false;
@@ -429,21 +430,13 @@ static void gs_rx_push(unsigned long _port)
/* We want our data queue to become empty ASAP, keeping data
* in the tty and ldisc (not here). If we couldn't push any
- * this time around, there may be trouble unless there's an
- * implicit tty_unthrottle() call on its way...
+ * this time around, RX may be starved, so wait until next jiffy.
*
- * REVISIT we should probably add a timer to keep the tasklet
- * from starving ... but it's not clear that case ever happens.
+ * We may leave non-empty queue only when there is a tty, and
+ * either it is throttled or there is no more room in flip buffer.
*/
- if (!list_empty(queue) && tty) {
- if (!tty_throttled(tty)) {
- if (do_push)
- tasklet_schedule(&port->push);
- else
- pr_warn("ttyGS%d: RX not scheduled?\n",
- port->port_num);
- }
- }
+ if (!list_empty(queue) && !tty_throttled(tty))
+ schedule_delayed_work(&port->push, 1);
/* If we're still connected, refill the USB RX queue. */
if (!disconnect && port->port_usb)
@@ -459,7 +452,7 @@ static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
/* Queue all received data until the tty layer is ready for it. */
spin_lock(&port->port_lock);
list_add_tail(&req->list, &port->read_queue);
- tasklet_schedule(&port->push);
+ schedule_delayed_work(&port->push, 0);
spin_unlock(&port->port_lock);
}
@@ -854,8 +847,8 @@ static void gs_unthrottle(struct tty_struct *tty)
* rts/cts, or other handshaking with the host, but if the
* read queue backs up enough we'll be NAKing OUT packets.
*/
- tasklet_schedule(&port->push);
pr_vdebug("ttyGS%d: unthrottle\n", port->port_num);
+ schedule_delayed_work(&port->push, 0);
}
spin_unlock_irqrestore(&port->port_lock, flags);
}
@@ -1159,7 +1152,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
init_waitqueue_head(&port->drain_wait);
init_waitqueue_head(&port->close_wait);
- tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+ INIT_DELAYED_WORK(&port->push, gs_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
@@ -1186,7 +1179,7 @@ static int gs_closed(struct gs_port *port)
static void gserial_free_port(struct gs_port *port)
{
- tasklet_kill(&port->push);
+ cancel_delayed_work_sync(&port->push);
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
WARN_ON(port->port_usb != NULL);
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 8362ee572e1e..82048791eb6e 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -7,7 +7,7 @@
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_UAC2_H
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 5242d489e20a..16da49a2fcf2 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef U_UVC_H
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 099d650082e5..1473d25ff17a 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -56,6 +56,8 @@ extern unsigned int uvc_gadget_trace_param;
dev_dbg(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
#define uvcg_info(f, fmt, args...) \
dev_info(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
+#define uvcg_warn(f, fmt, args...) \
+ dev_warn(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
#define uvcg_err(f, fmt, args...) \
dev_err(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index bc1e2af566c3..00fb58e50a15 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -7,7 +7,7 @@
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#include <linux/sort.h>
@@ -1570,10 +1570,6 @@ uvcg_uncompressed_##cname##_store(struct config_item *item, \
if (ret) \
goto end; \
\
- if (num > 255) { \
- ret = -EINVAL; \
- goto end; \
- } \
u->desc.aname = num; \
ret = len; \
end: \
@@ -1767,10 +1763,6 @@ uvcg_mjpeg_##cname##_store(struct config_item *item, \
if (ret) \
goto end; \
\
- if (num > 255) { \
- ret = -EINVAL; \
- goto end; \
- } \
u->desc.aname = num; \
ret = len; \
end: \
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 8549c0b27b9d..341391dbc81f 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -7,7 +7,7 @@
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef UVC_CONFIGFS_H
#define UVC_CONFIGFS_H
diff --git a/drivers/usb/gadget/function/uvc_v4l2.h b/drivers/usb/gadget/function/uvc_v4l2.h
index a75e9c397446..452d71059b3f 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.h
+++ b/drivers/usb/gadget/function/uvc_v4l2.h
@@ -7,7 +7,7 @@
*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef __UVC_V4L2_H__
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index 278dc52c7604..dff12103f696 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -7,7 +7,7 @@
*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef __UVC_VIDEO_H__
#define __UVC_VIDEO_H__
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 784bf86dad4f..d7c9e4fca895 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Gadget support on a system involves
# (a) a peripheral controller, and
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 37ca0e669bd8..249277d0e53f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1218,27 +1218,27 @@ ep0_poll (struct file *fd, poll_table *wait)
if (dev->state <= STATE_DEV_OPENED)
return DEFAULT_POLLMASK;
- poll_wait(fd, &dev->wait, wait);
-
- spin_lock_irq (&dev->lock);
-
- /* report fd mode change before acting on it */
- if (dev->setup_abort) {
- dev->setup_abort = 0;
- mask = EPOLLHUP;
- goto out;
- }
-
- if (dev->state == STATE_DEV_SETUP) {
- if (dev->setup_in || dev->setup_can_stall)
- mask = EPOLLOUT;
- } else {
- if (dev->ev_next != 0)
- mask = EPOLLIN;
- }
+ poll_wait(fd, &dev->wait, wait);
+
+ spin_lock_irq(&dev->lock);
+
+ /* report fd mode change before acting on it */
+ if (dev->setup_abort) {
+ dev->setup_abort = 0;
+ mask = EPOLLHUP;
+ goto out;
+ }
+
+ if (dev->state == STATE_DEV_SETUP) {
+ if (dev->setup_in || dev->setup_can_stall)
+ mask = EPOLLOUT;
+ } else {
+ if (dev->ev_next != 0)
+ mask = EPOLLIN;
+ }
out:
- spin_unlock_irq(&dev->lock);
- return mask;
+ spin_unlock_irq(&dev->lock);
+ return mask;
}
static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index dbaa46eee853..6aea1ecb3999 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -5,7 +5,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#include "u_f.h"
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 09f90447fed5..eaa13fd3dc7f 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -7,7 +7,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef __U_F_H__
diff --git a/drivers/usb/gadget/u_os_desc.h b/drivers/usb/gadget/u_os_desc.h
index 8acd21779ac8..5d7d35c8cc31 100644
--- a/drivers/usb/gadget/u_os_desc.h
+++ b/drivers/usb/gadget/u_os_desc.h
@@ -7,7 +7,7 @@
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
#ifndef __U_OS_DESC_H__
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 0a16cbd4e528..ef0259a950ba 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Gadget support on a system involves
# (a) a peripheral controller, and
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 4a28e3fbeb0b..83340f4fdc6e 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -120,7 +120,7 @@ static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
/* No current DMA ongoing */
req->active = false;
- /* Grab lenght out of HW */
+ /* Grab length out of HW */
len = VHUB_EP_DMA_TX_SIZE(stat);
/* If not using DMA, copy data out if needed */
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 35ba0e55a2e9..7c040f56100e 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -295,7 +295,7 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
dsize = AST_VHUB_HUB_DESC_SIZE;
memcpy(ep->buf, &ast_vhub_hub_desc, dsize);
BUILD_BUG_ON(dsize > sizeof(ast_vhub_hub_desc));
- BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
+ BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
default:
return std_req_stall;
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index c74ac25dddcd..3e88c7670b2e 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config USB_BDC_UDC
tristate "Broadcom USB3.0 device controller IP driver(BDC)"
depends on USB_GADGET && HAS_DMA
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 6305bf2c8b59..44c2a5eef785 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -311,8 +311,8 @@ int bdc_ep_clear_stall(struct bdc *bdc, int epnum)
/* if the endpoint it not stallled */
if (!(ep->flags & BDC_EP_STALL)) {
ret = bdc_ep_set_stall(bdc, epnum);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
}
}
/* Preserve the seq number for ep0 only */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 01b44e159623..ccbd1d34eb2a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -172,8 +172,9 @@ static int scratchpad_setup(struct bdc *bdc)
/* Refer to BDC spec, Table 4 for description of SPB */
sp_buff_size = 1 << (sp_buff_size + 5);
dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
- bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size,
- &bdc->scratchpad.sp_dma, GFP_KERNEL);
+ bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
+ &bdc->scratchpad.sp_dma,
+ GFP_KERNEL);
if (!bdc->scratchpad.buff)
goto fail;
@@ -202,11 +203,9 @@ static int setup_srr(struct bdc *bdc, int interrupter)
bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
bdc->srr.dqp_index = 0;
/* allocate the status report descriptors */
- bdc->srr.sr_bds = dma_zalloc_coherent(
- bdc->dev,
- NUM_SR_ENTRIES * sizeof(struct bdc_bd),
- &bdc->srr.dma_addr,
- GFP_KERNEL);
+ bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ &bdc->srr.dma_addr, GFP_KERNEL);
if (!bdc->srr.sr_bds)
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 87d6b12779f2..7cf34beb50df 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -281,10 +281,10 @@ EXPORT_SYMBOL_GPL(usb_ep_queue);
* @ep:the endpoint associated with the request
* @req:the request being canceled
*
- * If the request is still active on the endpoint, it is dequeued and its
- * completion routine is called (with status -ECONNRESET); else a negative
- * error code is returned. This is guaranteed to happen before the call to
- * usb_ep_dequeue() returns.
+ * If the request is still active on the endpoint, it is dequeued and
+ * eventually its completion routine is called (with status -ECONNRESET);
+ * else a negative error code is returned. This routine is asynchronous,
+ * that is, it may return before the completion routine runs.
*
* Note that some hardware can't clear out write fifos (to unlink the request
* at the head of the queue) except as part of disconnecting from usb. Such
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index bc6abaea907d..cec49294bac6 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -326,6 +326,7 @@ dma_reset:
static void fotg210_start_dma(struct fotg210_ep *ep,
struct fotg210_request *req)
{
+ struct device *dev = &ep->fotg210->gadget.dev;
dma_addr_t d;
u8 *buffer;
u32 length;
@@ -348,18 +349,14 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
length = req->req.length;
}
- d = dma_map_single(NULL, buffer, length,
+ d = dma_map_single(dev, buffer, length,
ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (dma_mapping_error(NULL, d)) {
+ if (dma_mapping_error(dev, d)) {
pr_err("dma_mapping_error\n");
return;
}
- dma_sync_single_for_device(NULL, d, length,
- ep->dir_in ? DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
-
fotg210_enable_dma(ep, d, length);
/* check if dma is done */
@@ -370,7 +367,7 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
/* update actual transfer length */
req->req.actual += length;
- dma_unmap_single(NULL, d, length, DMA_TO_DEVICE);
+ dma_unmap_single(dev, d, length, DMA_TO_DEVICE);
}
static void fotg210_ep0_queue(struct fotg210_ep *ep,
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 660878a19505..b77f3126580e 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
#if defined(PLX_PCI_RDK2)
/* see if PCI int for us by checking irqstat */
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
- if (!intcsr & (1 << NET2272_PCI_IRQ)) {
+ if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
spin_unlock(&dev->lock);
return IRQ_NONE;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index e7dae5379e04..f63f82450bf4 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -516,8 +516,8 @@ static int net2280_disable(struct usb_ep *_ep)
unsigned long flags;
ep = container_of(_ep, struct net2280_ep, ep);
- if (!_ep || !ep->desc || _ep->name == ep0name) {
- pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
+ if (!_ep || _ep->name == ep0name) {
+ pr_err("%s: Invalid ep=%p\n", __func__, _ep);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
@@ -2279,7 +2279,7 @@ static void usb_reinit_338x(struct net2280 *dev)
* - It is safe to set for all connection speeds; all chip revisions.
* - R-M-W to leave other bits undisturbed.
* - Reference PLX TT-7372
- */
+ */
val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 6e34f9594159..7dc248546fd4 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2630,6 +2630,10 @@ MODULE_DEVICE_TABLE(of, usb3_of_match);
static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
{
+ .soc_id = "r8a774c0",
+ .data = &renesas_usb3_priv_r8a77990,
+ },
+ {
.soc_id = "r8a7795", .revision = "ES1.*",
.data = &renesas_usb3_priv_r8a7795_es1,
},
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index d4da47f4f6f4..3fcded31405a 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -947,15 +947,14 @@ static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
UDC_DMA_STP_STS_BS_HOST_READY,
UDC_DMA_STP_STS_BS);
-
- /* clear NAK by writing CNAK */
- if (ep->naking) {
- tmp = readl(&ep->regs->ctl);
- tmp |= AMD_BIT(UDC_EPCTL_CNAK);
- writel(tmp, &ep->regs->ctl);
- ep->naking = 0;
- UDC_QUEUE_CNAK(ep, ep->num);
- }
+ /* clear NAK by writing CNAK */
+ if (ep->naking) {
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 16758b12a5e9..d809671c5fea 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Host Controller Drivers
#
@@ -69,13 +70,13 @@ config USB_XHCI_MTK
If unsure, say N.
config USB_XHCI_MVEBU
- tristate "xHCI support for Marvell Armada 375/38x"
+ tristate "xHCI support for Marvell Armada 375/38x/37xx"
select USB_XHCI_PLATFORM
depends on HAS_IOMEM
depends on ARCH_MVEBU || COMPILE_TEST
---help---
Say 'Y' to enable the support for the xHCI host controller
- found in Marvell Armada 375/38x ARM SOCs.
+ found in Marvell Armada 375/38x/37xx ARM SOCs.
config USB_XHCI_RCAR
tristate "xHCI support for Renesas R-Car SoCs"
@@ -179,8 +180,7 @@ config XPS_USB_HCD_XILINX
devices only.
config USB_EHCI_FSL
- tristate "Support for Freescale PPC on-chip EHCI USB controller"
- depends on FSL_SOC
+ tristate "Support for Freescale on-chip EHCI USB controller"
select USB_EHCI_ROOT_HUB_TT
---help---
Variation of ARC USB block used in some Freescale chips.
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 0a9fd2022acf..e3d0c1c25160 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
+#include <linux/io.h>
#include "ehci.h"
#include "ehci-fsl.h"
@@ -50,6 +51,7 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
struct resource *res;
int irq;
int retval;
+ u32 tmp;
pr_debug("initializing FSL-SOC USB Controller\n");
@@ -114,17 +116,22 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
}
/* Enable USB controller, 83xx or 8536 */
- if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
- clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK, 0x4);
-
+ if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6) {
+ tmp = ioread32be(hcd->regs + FSL_SOC_USB_CTRL);
+ tmp &= ~CONTROL_REGISTER_W1C_MASK;
+ tmp |= 0x4;
+ iowrite32be(tmp, hcd->regs + FSL_SOC_USB_CTRL);
+ }
/*
* Enable UTMI phy and program PTS field in UTMI mode before asserting
* controller reset for USB Controller version 2.5
*/
if (pdata->has_fsl_erratum_a007792) {
- clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK, CTRL_UTMI_PHY_EN);
+ tmp = ioread32be(hcd->regs + FSL_SOC_USB_CTRL);
+ tmp &= ~CONTROL_REGISTER_W1C_MASK;
+ tmp |= CTRL_UTMI_PHY_EN;
+ iowrite32be(tmp, hcd->regs + FSL_SOC_USB_CTRL);
+
writel(PORT_PTS_UTMI, hcd->regs + FSL_SOC_USB_PORTSC1);
}
@@ -174,7 +181,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
enum fsl_usb2_phy_modes phy_mode,
unsigned int port_offset)
{
- u32 portsc;
+ u32 portsc, tmp;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
struct device *dev = hcd->self.controller;
@@ -192,11 +199,16 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
case FSL_USB2_PHY_ULPI:
if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
- clrbits32(non_ehci + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK | UTMI_PHY_EN);
- clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK,
- ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
+ /* turn off UTMI PHY first */
+ tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+ tmp &= ~(CONTROL_REGISTER_W1C_MASK | UTMI_PHY_EN);
+ iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
+
+ /* then turn on ULPI and enable USB controller */
+ tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+ tmp &= ~CONTROL_REGISTER_W1C_MASK;
+ tmp |= ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN;
+ iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
}
portsc |= PORT_PTS_ULPI;
break;
@@ -210,16 +222,21 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
case FSL_USB2_PHY_UTMI_DUAL:
if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
- clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK, UTMI_PHY_EN);
+ tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+ tmp &= ~CONTROL_REGISTER_W1C_MASK;
+ tmp |= UTMI_PHY_EN;
+ iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
+
mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI PHY CLK to
become stable - 10ms*/
}
/* enable UTMI PHY */
- if (pdata->have_sysif_regs)
- clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK,
- CTRL_UTMI_PHY_EN);
+ if (pdata->have_sysif_regs) {
+ tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+ tmp &= ~CONTROL_REGISTER_W1C_MASK;
+ tmp |= CTRL_UTMI_PHY_EN;
+ iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
+ }
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
@@ -241,9 +258,12 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
- if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
- clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
- CONTROL_REGISTER_W1C_MASK, USB_CTRL_USB_EN);
+ if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs) {
+ tmp = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+ tmp &= ~CONTROL_REGISTER_W1C_MASK;
+ tmp |= USB_CTRL_USB_EN;
+ iowrite32be(tmp, non_ehci + FSL_SOC_USB_CTRL);
+ }
return 0;
}
@@ -284,14 +304,9 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
return -EINVAL;
if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
- unsigned int chip, rev, svr;
-
- svr = mfspr(SPRN_SVR);
- chip = svr >> 16;
- rev = (svr >> 4) & 0xf;
/* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */
- if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055))
+ if (pdata->has_fsl_erratum_14 == 1)
ehci->has_fsl_port_bug = 1;
if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index f26109eafdbf..66ec1fdf9fe7 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
MODULE_ALIAS("mv-ehci");
MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1ad72647a069..790acf3633e8 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -182,6 +182,23 @@ static int ehci_orion_drv_reset(struct usb_hcd *hcd)
return ret;
}
+static int __maybe_unused ehci_orion_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ return ehci_suspend(hcd, device_may_wakeup(dev));
+}
+
+static int __maybe_unused ehci_orion_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ return ehci_resume(hcd, false);
+}
+
+static SIMPLE_DEV_PM_OPS(ehci_orion_pm_ops, ehci_orion_drv_suspend,
+ ehci_orion_drv_resume);
+
static const struct ehci_driver_overrides orion_overrides __initconst = {
.extra_priv_size = sizeof(struct orion_ehci_hcd),
.reset = ehci_orion_drv_reset,
@@ -257,15 +274,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
if (IS_ERR(priv->phy)) {
err = PTR_ERR(priv->phy);
if (err != -ENOSYS)
- goto err_phy_get;
- } else {
- err = phy_init(priv->phy);
- if (err)
- goto err_phy_init;
-
- err = phy_power_on(priv->phy);
- if (err)
- goto err_phy_power_on;
+ goto err_dis_clk;
}
/*
@@ -297,19 +306,12 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
- goto err_add_hcd;
+ goto err_dis_clk;
device_wakeup_enable(hcd->self.controller);
return 0;
-err_add_hcd:
- if (!IS_ERR(priv->phy))
- phy_power_off(priv->phy);
-err_phy_power_on:
- if (!IS_ERR(priv->phy))
- phy_exit(priv->phy);
-err_phy_init:
-err_phy_get:
+err_dis_clk:
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
usb_put_hcd(hcd);
@@ -327,11 +329,6 @@ static int ehci_orion_drv_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
- if (!IS_ERR(priv->phy)) {
- phy_power_off(priv->phy);
- phy_exit(priv->phy);
- }
-
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
@@ -354,6 +351,7 @@ static struct platform_driver ehci_orion_driver = {
.driver = {
.name = "orion-ehci",
.of_match_table = ehci_orion_dt_ids,
+ .pm = &ehci_orion_pm_ops,
},
};
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 677f9d592109..4f8b8a08c914 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -225,6 +225,12 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
pdata->has_fsl_erratum_a005697 =
of_property_read_bool(np, "fsl,usb_erratum-a005697");
+ if (of_get_property(np, "fsl,usb_erratum_14", NULL))
+ pdata->has_fsl_erratum_14 = 1;
+ else
+ pdata->has_fsl_erratum_14 = 0;
+
+
/*
* Determine whether phy_clk_valid needs to be checked
* by reading property in device tree
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index ec6739ef3129..fc35a7993b7b 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -141,8 +141,11 @@ static struct regmap *at91_dt_syscon_sfr(void)
struct regmap *regmap;
regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
- if (IS_ERR(regmap))
- regmap = NULL;
+ if (IS_ERR(regmap)) {
+ regmap = syscon_regmap_lookup_by_compatible("microchip,sam9x60-sfr");
+ if (IS_ERR(regmap))
+ regmap = NULL;
+ }
return regmap;
}
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index a55cbba40a5a..ca8a94f15ac0 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -40,6 +41,8 @@ struct da8xx_ohci_hcd {
struct regulator *vbus_reg;
struct notifier_block nb;
unsigned int reg_enabled;
+ struct gpio_desc *vbus_gpio;
+ struct gpio_desc *oc_gpio;
};
#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
@@ -86,12 +89,13 @@ static void ohci_da8xx_disable(struct usb_hcd *hcd)
static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ struct device *dev = hcd->self.controller;
int ret;
- if (hub && hub->set_power)
- return hub->set_power(1, on);
+ if (da8xx_ohci->vbus_gpio) {
+ gpiod_set_value_cansleep(da8xx_ohci->vbus_gpio, on);
+ return 0;
+ }
if (!da8xx_ohci->vbus_reg)
return 0;
@@ -119,11 +123,9 @@ static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
static int ohci_da8xx_get_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
- if (hub && hub->get_power)
- return hub->get_power(1);
+ if (da8xx_ohci->vbus_gpio)
+ return gpiod_get_value_cansleep(da8xx_ohci->vbus_gpio);
if (da8xx_ohci->vbus_reg)
return regulator_is_enabled(da8xx_ohci->vbus_reg);
@@ -134,13 +136,11 @@ static int ohci_da8xx_get_power(struct usb_hcd *hcd)
static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
unsigned int flags;
int ret;
- if (hub && hub->get_oci)
- return hub->get_oci(1);
+ if (da8xx_ohci->oc_gpio)
+ return gpiod_get_value_cansleep(da8xx_ohci->oc_gpio);
if (!da8xx_ohci->vbus_reg)
return 0;
@@ -158,10 +158,8 @@ static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
- if (hub && hub->set_power)
+ if (da8xx_ohci->vbus_gpio)
return 1;
if (da8xx_ohci->vbus_reg)
@@ -173,10 +171,8 @@ static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
- if (hub && hub->get_oci)
+ if (da8xx_ohci->oc_gpio)
return 1;
if (da8xx_ohci->vbus_reg)
@@ -196,19 +192,6 @@ static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
return 0;
}
-/*
- * Handle the port over-current indicator change.
- */
-static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
- unsigned port)
-{
- ocic_mask |= 1 << port;
-
- /* Once over-current is detected, the port needs to be powered down */
- if (hub->get_oci(port) > 0)
- hub->set_power(port, 0);
-}
-
static int ohci_da8xx_regulator_event(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -223,16 +206,23 @@ static int ohci_da8xx_regulator_event(struct notifier_block *nb,
return 0;
}
+static irqreturn_t ohci_da8xx_oc_handler(int irq, void *data)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = data;
+
+ if (gpiod_get_value(da8xx_ohci->oc_gpio))
+ gpiod_set_value(da8xx_ohci->vbus_gpio, 0);
+
+ return IRQ_HANDLED;
+}
+
static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
int ret = 0;
- if (hub && hub->ocic_notify) {
- ret = hub->ocic_notify(ohci_da8xx_ocic_handler);
- } else if (da8xx_ohci->vbus_reg) {
+ if (!da8xx_ohci->oc_gpio && da8xx_ohci->vbus_reg) {
da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event;
ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg,
&da8xx_ohci->nb);
@@ -244,15 +234,6 @@ static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
return ret;
}
-static void ohci_da8xx_unregister_notify(struct usb_hcd *hcd)
-{
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
-
- if (hub && hub->ocic_notify)
- hub->ocic_notify(NULL);
-}
-
static int ohci_da8xx_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
@@ -402,34 +383,35 @@ MODULE_DEVICE_TABLE(of, da8xx_ohci_ids);
static int ohci_da8xx_probe(struct platform_device *pdev)
{
struct da8xx_ohci_hcd *da8xx_ohci;
+ struct device *dev = &pdev->dev;
+ int error, hcd_irq, oc_irq;
struct usb_hcd *hcd;
struct resource *mem;
- int error, irq;
- hcd = usb_create_hcd(&ohci_da8xx_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
+
+ hcd = usb_create_hcd(&ohci_da8xx_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
da8xx_ohci = to_da8xx_ohci(hcd);
da8xx_ohci->hcd = hcd;
- da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, NULL);
+ da8xx_ohci->usb11_clk = devm_clk_get(dev, NULL);
if (IS_ERR(da8xx_ohci->usb11_clk)) {
error = PTR_ERR(da8xx_ohci->usb11_clk);
if (error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get clock.\n");
+ dev_err(dev, "Failed to get clock.\n");
goto err;
}
- da8xx_ohci->usb11_phy = devm_phy_get(&pdev->dev, "usb-phy");
+ da8xx_ohci->usb11_phy = devm_phy_get(dev, "usb-phy");
if (IS_ERR(da8xx_ohci->usb11_phy)) {
error = PTR_ERR(da8xx_ohci->usb11_phy);
if (error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get phy.\n");
+ dev_err(dev, "Failed to get phy.\n");
goto err;
}
- da8xx_ohci->vbus_reg = devm_regulator_get_optional(&pdev->dev, "vbus");
+ da8xx_ohci->vbus_reg = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(da8xx_ohci->vbus_reg)) {
error = PTR_ERR(da8xx_ohci->vbus_reg);
if (error == -ENODEV) {
@@ -437,13 +419,34 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
} else if (error == -EPROBE_DEFER) {
goto err;
} else {
- dev_err(&pdev->dev, "Failed to get regulator\n");
+ dev_err(dev, "Failed to get regulator\n");
goto err;
}
}
+ da8xx_ohci->vbus_gpio = devm_gpiod_get_optional(dev, "vbus",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(da8xx_ohci->vbus_gpio))
+ goto err;
+
+ da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
+ if (IS_ERR(da8xx_ohci->oc_gpio))
+ goto err;
+
+ if (da8xx_ohci->oc_gpio) {
+ oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
+ if (oc_irq < 0)
+ goto err;
+
+ error = devm_request_irq(dev, oc_irq, ohci_da8xx_oc_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "OHCI over-current indicator", da8xx_ohci);
+ if (error)
+ goto err;
+ }
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
+ hcd->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(hcd->regs)) {
error = PTR_ERR(hcd->regs);
goto err;
@@ -451,13 +454,13 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
+ hcd_irq = platform_get_irq(pdev, 0);
+ if (hcd_irq < 0) {
error = -ENODEV;
goto err;
}
- error = usb_add_hcd(hcd, irq, 0);
+ error = usb_add_hcd(hcd, hcd_irq, 0);
if (error)
goto err;
@@ -480,7 +483,6 @@ static int ohci_da8xx_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- ohci_da8xx_unregister_notify(hcd);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index c5e6e8d0b5ef..47c5515a9ce4 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -1323,7 +1323,7 @@ static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
}
/* by default, enable interrupt on urb completion */
- qtd->hw_token |= cpu_to_le32(QTD_IOC);
+ qtd->hw_token |= cpu_to_le32(QTD_IOC);
return head;
cleanup:
@@ -2253,16 +2253,12 @@ static void scan_periodic(struct oxu_hcd *oxu)
for (;;) {
union ehci_shadow q, *q_p;
__le32 type, *hw_p;
- unsigned uframes;
/* don't scan past the live uframe */
frame = now_uframe >> 3;
- if (frame == (clock >> 3))
- uframes = now_uframe & 0x07;
- else {
+ if (frame != (clock >> 3)) {
/* safe to scan the whole frame at once */
now_uframe |= 0x07;
- uframes = 8;
}
restart:
@@ -2832,7 +2828,6 @@ static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int num, rem;
- int transfer_buffer_length;
void *transfer_buffer;
struct urb *murb;
int i, ret;
@@ -2843,7 +2838,6 @@ static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* Otherwise we should verify the USB transfer buffer size! */
transfer_buffer = urb->transfer_buffer;
- transfer_buffer_length = urb->transfer_buffer_length;
num = urb->transfer_buffer_length / 4096;
rem = urb->transfer_buffer_length % 4096;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 5b8a3d9530c4..934584f0a20a 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2477,7 +2477,8 @@ static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
kfree(urbq);
- } urb->error_count = 0;
+ }
+ urb->error_count = 0;
usb_hcd_giveback_urb(hcd, urb, status);
return 0;
} else if (list_empty(&endp->urb_more)) {
@@ -2982,7 +2983,8 @@ static int u132_remove(struct platform_device *pdev)
while (rings-- > 0) {
struct u132_ring *ring = &u132->ring[rings];
u132_ring_cancel_work(u132, ring);
- } while (endps-- > 0) {
+ }
+ while (endps-- > 0) {
struct u132_endp *endp = u132->endp[endps];
if (endp)
u132_endp_cancel_work(u132, endp);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 6218bfe54f52..98deb5f64268 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -596,9 +596,9 @@ static int uhci_start(struct usb_hcd *hcd)
&uhci_debug_operations);
#endif
- uhci->frame = dma_zalloc_coherent(uhci_dev(uhci),
- UHCI_NUMFRAMES * sizeof(*uhci->frame),
- &uhci->frame_dma_handle, GFP_KERNEL);
+ uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
+ UHCI_NUMFRAMES * sizeof(*uhci->frame),
+ &uhci->frame_dma_handle, GFP_KERNEL);
if (!uhci->frame) {
dev_err(uhci_dev(uhci),
"unable to allocate consistent memory for frame list\n");
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Makefile
index 26df0138079e..859d20079df6 100644
--- a/drivers/usb/host/whci/Kbuild
+++ b/drivers/usb/host/whci/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
whci-hcd-y := \
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 86cff5c28eff..c78be578abb0 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -181,7 +181,7 @@ static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
xhci_dbc_flush_single_request(req);
}
-static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc)
+static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
{
xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
@@ -687,7 +687,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
!(portsc & DBC_PORTSC_CONN_STATUS)) {
xhci_info(xhci, "DbC cable unplugged\n");
dbc->state = DS_ENABLED;
- xhci_dbc_flush_reqests(dbc);
+ xhci_dbc_flush_requests(dbc);
return EVT_DISC;
}
@@ -697,7 +697,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
xhci_info(xhci, "DbC port reset\n");
writel(portsc, &dbc->regs->portsc);
dbc->state = DS_ENABLED;
- xhci_dbc_flush_reqests(dbc);
+ xhci_dbc_flush_requests(dbc);
return EVT_DISC;
}
diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h
index ac5bc40f5c3a..f7a4e2492b00 100644
--- a/drivers/usb/host/xhci-debugfs.h
+++ b/drivers/usb/host/xhci-debugfs.h
@@ -80,7 +80,6 @@ struct xhci_regset {
char name[DEBUGFS_NAMELEN];
struct debugfs_regset32 regset;
size_t nregs;
- struct dentry *parent;
struct list_head list;
};
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 36a3eb8849f1..cf5e17962179 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -933,7 +933,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
* that tt_info, then free the child first. Recursive.
* We can't rely on udev at this point to find child-parent relationships.
*/
-void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *vdev;
struct list_head *tt_list_head;
@@ -1672,8 +1672,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
- void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
- flags);
+ void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+ flags);
if (!buf)
goto fail_sp4;
@@ -1799,8 +1799,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_erst_entry *entry;
size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
- erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
- size, &erst->erst_dma_addr, flags);
+ erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
+ size, &erst->erst_dma_addr, flags);
if (!erst->entries)
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 32e158568788..60651a50770f 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -13,6 +13,7 @@
#include <linux/usb/hcd.h>
#include "xhci-mvebu.h"
+#include "xhci.h"
#define USB3_MAX_WINDOWS 4
#define USB3_WIN_CTRL(w) (0x0 + ((w) * 8))
@@ -72,3 +73,13 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+
+int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ /* Without reset on resume, the HC won't work at all */
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+
+ return 0;
+}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 09791df2cec0..ca0a3a5721dd 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,10 +12,16 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
return 0;
}
+
+static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
+{
+ return 0;
+}
#endif
#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index a9ec7051f286..c2fe218e051f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -194,6 +194,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ef09cb06212f..0ac4ec975547 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -98,6 +98,10 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
.init_quirk = xhci_mvebu_mbus_init_quirk,
};
+static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+ .init_quirk = xhci_mvebu_a3700_init_quirk,
+};
+
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V1,
.init_quirk = xhci_rcar_init_quirk,
@@ -124,6 +128,9 @@ static const struct of_device_id usb_xhci_of_match[] = {
.compatible = "marvell,armada-380-xhci",
.data = &xhci_plat_marvell_armada,
}, {
+ .compatible = "marvell,armada3700-xhci",
+ .data = &xhci_plat_marvell_armada3700,
+ }, {
.compatible = "renesas,xhci-r8a7790",
.data = &xhci_plat_renesas_rcar_gen2,
}, {
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 938ff06c0349..efb0cad8710e 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -941,9 +941,9 @@ static void tegra_xusb_powerdomain_remove(struct device *dev,
device_link_del(tegra->genpd_dl_ss);
if (tegra->genpd_dl_host)
device_link_del(tegra->genpd_dl_host);
- if (tegra->genpd_dev_ss)
+ if (!IS_ERR_OR_NULL(tegra->genpd_dev_ss))
dev_pm_domain_detach(tegra->genpd_dev_ss, true);
- if (tegra->genpd_dev_host)
+ if (!IS_ERR_OR_NULL(tegra->genpd_dev_host))
dev_pm_domain_detach(tegra->genpd_dev_host, true);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 005e65922608..7fa58c99f126 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1455,8 +1455,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
else
num_tds = 1;
- urb_priv = kzalloc(sizeof(struct urb_priv) +
- num_tds * sizeof(struct xhci_td), mem_flags);
+ urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
if (!urb_priv)
return -ENOMEM;
diff --git a/drivers/usb/image/Kconfig b/drivers/usb/image/Kconfig
index 320d368c8dac..26c75f309da9 100644
--- a/drivers/usb/image/Kconfig
+++ b/drivers/usb/image/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Imaging devices configuration
#
diff --git a/drivers/usb/isp1760/Kconfig b/drivers/usb/isp1760/Kconfig
index c94b7d953399..b1022cc490a2 100644
--- a/drivers/usb/isp1760/Kconfig
+++ b/drivers/usb/isp1760/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config USB_ISP1760
tristate "NXP ISP 1760/1761 support"
depends on USB || USB_GADGET
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 68d2f2cd17dd..be04c117fe80 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Miscellaneous driver configuration
#
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 76c718ac8c78..257efacf3551 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -915,7 +915,6 @@ static int ftdi_elan_respond_engine(struct usb_ftdi *ftdi)
int bytes_read = 0;
int retry_on_empty = 1;
int retry_on_timeout = 3;
- int empty_packets = 0;
read:{
int packet_bytes = 0;
int retval = usb_bulk_msg(ftdi->udev,
@@ -960,31 +959,6 @@ read:{
dev_err(&ftdi->udev->dev, "error = %d with packet_bytes = %d with total %d bytes%s\n",
retval, packet_bytes, bytes_read, diag);
return retval;
- } else if (packet_bytes == 2) {
- unsigned char s0 = ftdi->bulk_in_buffer[0];
- unsigned char s1 = ftdi->bulk_in_buffer[1];
- empty_packets += 1;
- if (s0 == 0x31 && s1 == 0x60) {
- if (retry_on_empty-- > 0) {
- goto more;
- } else
- return 0;
- } else if (s0 == 0x31 && s1 == 0x00) {
- if (retry_on_empty-- > 0) {
- goto more;
- } else
- return 0;
- } else {
- if (retry_on_empty-- > 0) {
- goto more;
- } else
- return 0;
- }
- } else if (packet_bytes == 1) {
- if (retry_on_empty-- > 0) {
- goto more;
- } else
- return 0;
} else {
if (retry_on_empty-- > 0) {
goto more;
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 36bc28c884ad..9b632ab24f03 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config USB_SISUSBVGA
tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
diff --git a/drivers/usb/misc/sisusbvga/Makefile b/drivers/usb/misc/sisusbvga/Makefile
index 6ed3a638261a..6551bce68ac5 100644
--- a/drivers/usb/misc/sisusbvga/Makefile
+++ b/drivers/usb/misc/sisusbvga/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga.o
-sisusbvga-y := sisusb.o sisusb_init.o sisusb_con.o
+sisusbvga-y := sisusb.o
+sisusbvga-$(CONFIG_USB_SISUSBVGA_CON) += sisusb_con.o sisusb_init.o
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 3198d0477cf8..9560fde621ee 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -53,7 +53,7 @@
#include "sisusb.h"
#include "sisusb_init.h"
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
#include <linux/font.h>
#endif
@@ -61,7 +61,7 @@
/* Forward declarations / clean-up routines */
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
static int sisusb_first_vc;
static int sisusb_last_vc;
module_param_named(first, sisusb_first_vc, int, 0);
@@ -1198,7 +1198,7 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
/* High level: Gfx (indexed) register access */
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
{
return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
@@ -1272,7 +1272,7 @@ int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
/* Write/read video ram */
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data)
{
return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data);
@@ -2255,7 +2255,7 @@ static int sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
}
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
/* Set up default text mode:
* - Set text mode (0x03)
@@ -2448,7 +2448,7 @@ void sisusb_delete(struct kref *kref)
sisusb->sisusb_dev = NULL;
sisusb_free_buffers(sisusb);
sisusb_free_urbs(sisusb);
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
kfree(sisusb->SiS_Pr);
#endif
kfree(sisusb);
@@ -2844,7 +2844,7 @@ static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
case SUCMD_HANDLETEXTMODE:
retval = 0;
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
/* Gfx core must be initialized, SiS_Pr must exist */
if (!sisusb->gfxinit || !sisusb->SiS_Pr)
return -ENODEV;
@@ -2860,7 +2860,7 @@ static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
#endif
break;
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
case SUCMD_SETMODE:
/* Gfx core must be initialized, SiS_Pr must exist */
if (!sisusb->gfxinit || !sisusb->SiS_Pr)
@@ -2944,7 +2944,7 @@ static long sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
x.sisusb_vramsize = sisusb->vramsize;
x.sisusb_minor = sisusb->minor;
x.sisusb_fbdevactive = 0;
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
x.sisusb_conactive = sisusb->haveconsole ? 1 : 0;
#else
x.sisusb_conactive = 0;
@@ -2975,7 +2975,7 @@ err_out:
return retval;
}
-#ifdef SISUSB_NEW_CONFIG_COMPAT
+#ifdef CONFIG_COMPAT
static long sisusb_compat_ioctl(struct file *f, unsigned int cmd,
unsigned long arg)
{
@@ -2998,7 +2998,7 @@ static const struct file_operations usb_sisusb_fops = {
.read = sisusb_read,
.write = sisusb_write,
.llseek = sisusb_lseek,
-#ifdef SISUSB_NEW_CONFIG_COMPAT
+#ifdef CONFIG_COMPAT
.compat_ioctl = sisusb_compat_ioctl,
#endif
.unlocked_ioctl = sisusb_ioctl
@@ -3091,7 +3091,7 @@ static int sisusb_probe(struct usb_interface *intf,
dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n",
sisusb->numobufs);
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
/* Allocate our SiS_Pr */
sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL);
if (!sisusb->SiS_Pr) {
@@ -3112,7 +3112,7 @@ static int sisusb_probe(struct usb_interface *intf,
if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) {
int initscreen = 1;
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
if (sisusb_first_vc > 0 && sisusb_last_vc > 0 &&
sisusb_first_vc <= sisusb_last_vc &&
sisusb_last_vc <= MAX_NR_CONSOLES)
@@ -3134,7 +3134,7 @@ static int sisusb_probe(struct usb_interface *intf,
dev_dbg(&sisusb->sisusb_dev->dev, "*** RWTEST END ***\n");
#endif
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
sisusb_console_init(sisusb, sisusb_first_vc, sisusb_last_vc);
#endif
@@ -3160,7 +3160,7 @@ static void sisusb_disconnect(struct usb_interface *intf)
if (!sisusb)
return;
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
sisusb_console_exit(sisusb);
#endif
@@ -3210,7 +3210,7 @@ static struct usb_driver sisusb_driver = {
static int __init usb_sisusb_init(void)
{
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
sisusb_init_concode();
#endif
diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h
index 20f03ad0ea16..8a5e6bb07d05 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.h
+++ b/drivers/usb/misc/sisusbvga/sisusb.h
@@ -38,17 +38,8 @@
#ifndef _SISUSB_H_
#define _SISUSB_H_
-#ifdef CONFIG_COMPAT
-#define SISUSB_NEW_CONFIG_COMPAT
-#endif
-
#include <linux/mutex.h>
-/* For older kernels, support for text consoles is by default
- * off. To enable text console support, change the following:
- */
-/* #define CONFIG_USB_SISUSBVGA_CON */
-
/* Version Information */
#define SISUSB_VERSION 0
@@ -57,10 +48,6 @@
/* Include console and mode switching code? */
-#ifdef CONFIG_USB_SISUSBVGA_CON
-#define INCL_SISUSB_CON 1
-#endif
-
#include <linux/console.h>
#include <linux/vt_kern.h>
#include "sisusb_struct.h"
@@ -139,7 +126,7 @@ struct sisusb_usb_data {
unsigned char gfxinit; /* graphics core initialized? */
unsigned short chipid, chipvendor;
unsigned short chiprevision;
-#ifdef INCL_SISUSB_CON
+#ifdef CONFIG_USB_SISUSBVGA_CON
struct SiS_Private *SiS_Pr;
unsigned long scrbuf;
unsigned int scrbuf_size;
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index c4f017e1d17a..cd0155310fea 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -70,13 +70,6 @@
#include "sisusb.h"
#include "sisusb_init.h"
-#ifdef INCL_SISUSB_CON
-
-#define sisusbcon_writew(val, addr) (*(addr) = (val))
-#define sisusbcon_readw(addr) (*(addr))
-#define sisusbcon_memmovew(d, s, c) memmove(d, s, c)
-#define sisusbcon_memcpyw(d, s, c) memcpy(d, s, c)
-
/* vc_data -> sisusb conversion table */
static struct sisusb_usb_data *mysisusbs[MAX_NR_CONSOLES];
@@ -86,9 +79,7 @@ static const struct consw sisusb_con;
static inline void
sisusbcon_memsetw(u16 *s, u16 c, unsigned int count)
{
- count /= 2;
- while (count--)
- sisusbcon_writew(c, s++);
+ memset16(s, c, count / 2);
}
static inline void
@@ -346,25 +337,30 @@ sisusbcon_invert_region(struct vc_data *vc, u16 *p, int count)
*/
while (count--) {
- u16 a = sisusbcon_readw(p);
-
- a = ((a) & 0x88ff) |
- (((a) & 0x7000) >> 4) |
- (((a) & 0x0700) << 4);
+ u16 a = *p;
- sisusbcon_writew(a, p++);
+ *p++ = ((a) & 0x88ff) |
+ (((a) & 0x7000) >> 4) |
+ (((a) & 0x0700) << 4);
}
}
-#define SISUSB_VADDR(x,y) \
- ((u16 *)c->vc_origin + \
- (y) * sisusb->sisusb_num_columns + \
- (x))
+static inline void *sisusb_vaddr(const struct sisusb_usb_data *sisusb,
+ const struct vc_data *c, unsigned int x, unsigned int y)
+{
+ return (u16 *)c->vc_origin + y * sisusb->sisusb_num_columns + x;
+}
+
+static inline unsigned long sisusb_haddr(const struct sisusb_usb_data *sisusb,
+ const struct vc_data *c, unsigned int x, unsigned int y)
+{
+ unsigned long offset = c->vc_origin - sisusb->scrbuf;
-#define SISUSB_HADDR(x,y) \
- ((u16 *)(sisusb->vrambase + (c->vc_origin - sisusb->scrbuf)) + \
- (y) * sisusb->sisusb_num_columns + \
- (x))
+ /* 2 bytes per each character */
+ offset += 2 * (y * sisusb->sisusb_num_columns + x);
+
+ return sisusb->vrambase + offset;
+}
/* Interface routine */
static void
@@ -382,9 +378,8 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
return;
}
-
- sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), 2);
+ sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y),
+ sisusb_haddr(sisusb, c, x, y), 2);
mutex_unlock(&sisusb->lock);
}
@@ -395,8 +390,6 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
int count, int y, int x)
{
struct sisusb_usb_data *sisusb;
- u16 *dest;
- int i;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
@@ -408,18 +401,15 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
* because the vt does this AFTER calling us.
*/
- dest = SISUSB_VADDR(x, y);
-
- for (i = count; i > 0; i--)
- sisusbcon_writew(sisusbcon_readw(s++), dest++);
+ memcpy(sisusb_vaddr(sisusb, c, x, y), s, count * 2);
if (sisusb_is_inactive(c, sisusb)) {
mutex_unlock(&sisusb->lock);
return;
}
- sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), count * 2);
+ sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y),
+ sisusb_haddr(sisusb, c, x, y), count * 2);
mutex_unlock(&sisusb->lock);
}
@@ -446,7 +436,7 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
* this AFTER calling us.
*/
- dest = SISUSB_VADDR(x, y);
+ dest = sisusb_vaddr(sisusb, c, x, y);
cols = sisusb->sisusb_num_columns;
@@ -472,8 +462,8 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
length = ((height * cols) - x - (cols - width - x)) * 2;
- sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), length);
+ sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y),
+ sisusb_haddr(sisusb, c, x, y), length);
mutex_unlock(&sisusb->lock);
}
@@ -517,12 +507,10 @@ sisusbcon_switch(struct vc_data *c)
(int)(sisusb->scrbuf + sisusb->scrbuf_size - c->vc_origin));
/* Restore the screen contents */
- sisusbcon_memcpyw((u16 *)c->vc_origin, (u16 *)c->vc_screenbuf,
- length);
+ memcpy((u16 *)c->vc_origin, (u16 *)c->vc_screenbuf, length);
- sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin,
- (long)SISUSB_HADDR(0, 0),
- length);
+ sisusb_copy_memory(sisusb, (char *)c->vc_origin,
+ sisusb_haddr(sisusb, c, 0, 0), length);
mutex_unlock(&sisusb->lock);
@@ -556,8 +544,7 @@ sisusbcon_save_screen(struct vc_data *c)
(int)(sisusb->scrbuf + sisusb->scrbuf_size - c->vc_origin));
/* Save the screen contents to vc's private buffer */
- sisusbcon_memcpyw((u16 *)c->vc_screenbuf, (u16 *)c->vc_origin,
- length);
+ memcpy((u16 *)c->vc_screenbuf, (u16 *)c->vc_origin, length);
mutex_unlock(&sisusb->lock);
}
@@ -628,10 +615,8 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
sisusbcon_memsetw((u16 *)c->vc_origin,
c->vc_video_erase_char,
c->vc_screenbuf_size);
- sisusb_copy_memory(sisusb,
- (unsigned char *)c->vc_origin,
- (u32)(sisusb->vrambase +
- (c->vc_origin - sisusb->scrbuf)),
+ sisusb_copy_memory(sisusb, (char *)c->vc_origin,
+ sisusb_haddr(sisusb, c, 0, 0),
c->vc_screenbuf_size);
sisusb->con_blanked = 1;
ret = 1;
@@ -796,24 +781,24 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
switch (dir) {
case SM_UP:
- sisusbcon_memmovew(SISUSB_VADDR(0, t),
- SISUSB_VADDR(0, t + lines),
+ memmove(sisusb_vaddr(sisusb, c, 0, t),
+ sisusb_vaddr(sisusb, c, 0, t + lines),
(b - t - lines) * cols * 2);
- sisusbcon_memsetw(SISUSB_VADDR(0, b - lines), eattr,
- lines * cols * 2);
+ sisusbcon_memsetw(sisusb_vaddr(sisusb, c, 0, b - lines),
+ eattr, lines * cols * 2);
break;
case SM_DOWN:
- sisusbcon_memmovew(SISUSB_VADDR(0, t + lines),
- SISUSB_VADDR(0, t),
+ memmove(sisusb_vaddr(sisusb, c, 0, t + lines),
+ sisusb_vaddr(sisusb, c, 0, t),
(b - t - lines) * cols * 2);
- sisusbcon_memsetw(SISUSB_VADDR(0, t), eattr,
+ sisusbcon_memsetw(sisusb_vaddr(sisusb, c, 0, t), eattr,
lines * cols * 2);
break;
}
- sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t),
- (long)SISUSB_HADDR(0, t), length);
+ sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, 0, t),
+ sisusb_haddr(sisusb, c, 0, t), length);
mutex_unlock(&sisusb->lock);
@@ -830,7 +815,6 @@ sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
int copyall = 0;
unsigned long oldorigin;
unsigned int delta = lines * c->vc_size_row;
- u32 originoffset;
/* Returning != 0 means we have done the scrolling successfully.
* Returning 0 makes vt do the scrolling on its own.
@@ -874,7 +858,7 @@ sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
if (c->vc_scr_end + delta >=
sisusb->scrbuf + sisusb->scrbuf_size) {
- sisusbcon_memcpyw((u16 *)sisusb->scrbuf,
+ memcpy((u16 *)sisusb->scrbuf,
(u16 *)(oldorigin + delta),
c->vc_screenbuf_size - delta);
c->vc_origin = sisusb->scrbuf;
@@ -892,12 +876,10 @@ sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
case SM_DOWN:
if (oldorigin - delta < sisusb->scrbuf) {
- sisusbcon_memmovew((u16 *)(sisusb->scrbuf +
- sisusb->scrbuf_size -
- c->vc_screenbuf_size +
- delta),
- (u16 *)oldorigin,
- c->vc_screenbuf_size - delta);
+ memmove((void *)sisusb->scrbuf + sisusb->scrbuf_size -
+ c->vc_screenbuf_size + delta,
+ (u16 *)oldorigin,
+ c->vc_screenbuf_size - delta);
c->vc_origin = sisusb->scrbuf +
sisusb->scrbuf_size -
c->vc_screenbuf_size;
@@ -913,23 +895,21 @@ sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
break;
}
- originoffset = (u32)(c->vc_origin - sisusb->scrbuf);
-
if (copyall)
sisusb_copy_memory(sisusb,
(char *)c->vc_origin,
- (u32)(sisusb->vrambase + originoffset),
+ sisusb_haddr(sisusb, c, 0, 0),
c->vc_screenbuf_size);
else if (dir == SM_UP)
sisusb_copy_memory(sisusb,
(char *)c->vc_origin + c->vc_screenbuf_size - delta,
- (u32)sisusb->vrambase + originoffset +
+ sisusb_haddr(sisusb, c, 0, 0) +
c->vc_screenbuf_size - delta,
delta);
else
sisusb_copy_memory(sisusb,
(char *)c->vc_origin,
- (u32)(sisusb->vrambase + originoffset),
+ sisusb_haddr(sisusb, c, 0, 0),
delta);
c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
@@ -1534,8 +1514,3 @@ void __init sisusb_init_concode(void)
for (i = 0; i < MAX_NR_CONSOLES; i++)
mysisusbs[i] = NULL;
}
-
-#endif /* INCL_CON */
-
-
-
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index 6a30e8bd9221..66f6ab5acd97 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -44,9 +44,6 @@
#include <linux/spinlock.h>
#include "sisusb.h"
-
-#ifdef INCL_SISUSB_CON
-
#include "sisusb_init.h"
/*********************************************/
@@ -955,5 +952,3 @@ int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo)
return SiSUSBSetMode(SiS_Pr, ModeNo);
}
-
-#endif /* INCL_SISUSB_CON */
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index a6efb9a72939..4d72b7d1d383 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -337,10 +337,12 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
int len, err, i;
- u32 property_u32 = 0;
+ u32 port, property_u32 = 0;
const u32 *cproperty_u32;
const char *cproperty_char;
char str[USB251XB_STRING_BUFSIZE / 2];
+ struct property *prop;
+ const __be32 *p;
if (!np) {
dev_err(dev, "failed to get ofdata\n");
@@ -539,6 +541,16 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
(wchar_t *)hub->serial,
USB251XB_STRING_BUFSIZE);
+ /*
+ * The datasheet documents the register as 'Port Swap' but in real the
+ * register controls the USB DP/DM signal swapping for each port.
+ */
+ hub->port_swap = USB251XB_DEF_PORT_SWAP;
+ of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
+ if ((port >= 0) && (port <= data->port_cnt))
+ hub->port_swap |= BIT(port);
+ }
+
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
*/
@@ -546,7 +558,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->boost_up = USB251XB_DEF_BOOST_UP;
hub->boost_57 = USB251XB_DEF_BOOST_57;
hub->boost_14 = USB251XB_DEF_BOOST_14;
- hub->port_swap = USB251XB_DEF_PORT_SWAP;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
hub->port_map34 = USB251XB_DEF_PORT_MAP_34;
hub->port_map56 = USB251XB_DEF_PORT_MAP_56;
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index f723f7b8c9ac..d5141aa79dd4 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -355,11 +355,8 @@ static int usb3503_platform_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int usb3503_i2c_suspend(struct device *dev)
+static int usb3503_suspend(struct usb3503 *hub)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct usb3503 *hub = i2c_get_clientdata(client);
-
usb3503_switch_mode(hub, USB3503_MODE_STANDBY);
if (hub->clk)
@@ -368,11 +365,8 @@ static int usb3503_i2c_suspend(struct device *dev)
return 0;
}
-static int usb3503_i2c_resume(struct device *dev)
+static int usb3503_resume(struct usb3503 *hub)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct usb3503 *hub = i2c_get_clientdata(client);
-
if (hub->clk)
clk_prepare_enable(hub->clk);
@@ -380,11 +374,38 @@ static int usb3503_i2c_resume(struct device *dev)
return 0;
}
+
+static int usb3503_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return usb3503_suspend(i2c_get_clientdata(client));
+}
+
+static int usb3503_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return usb3503_resume(i2c_get_clientdata(client));
+}
+
+static int usb3503_platform_suspend(struct device *dev)
+{
+ return usb3503_suspend(dev_get_drvdata(dev));
+}
+
+static int usb3503_platform_resume(struct device *dev)
+{
+ return usb3503_resume(dev_get_drvdata(dev));
+}
#endif
static SIMPLE_DEV_PM_OPS(usb3503_i2c_pm_ops, usb3503_i2c_suspend,
usb3503_i2c_resume);
+static SIMPLE_DEV_PM_OPS(usb3503_platform_pm_ops, usb3503_platform_suspend,
+ usb3503_platform_resume);
+
static const struct i2c_device_id usb3503_id[] = {
{ USB3503_I2C_NAME, 0 },
{ }
@@ -415,6 +436,7 @@ static struct platform_driver usb3503_platform_driver = {
.driver = {
.name = USB3503_I2C_NAME,
.of_match_table = of_match_ptr(usb3503_of_match),
+ .pm = &usb3503_platform_pm_ops,
},
.probe = usb3503_platform_probe,
.remove = usb3503_platform_remove,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index c7f82310e73e..98ada1a3425c 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -347,6 +347,14 @@ static unsigned get_maxpacket(struct usb_device *udev, int pipe)
return le16_to_cpup(&ep->desc.wMaxPacketSize);
}
+static int ss_isoc_get_packet_num(struct usb_device *udev, int pipe)
+{
+ struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe);
+
+ return USB_SS_MULT(ep->ss_ep_comp.bmAttributes)
+ * (1 + ep->ss_ep_comp.bMaxBurst);
+}
+
static void simple_fill_buf(struct urb *urb)
{
unsigned i;
@@ -1976,8 +1984,13 @@ static struct urb *iso_alloc_urb(
if (bytes < 0 || !desc)
return NULL;
+
maxp = usb_endpoint_maxp(desc);
- maxp *= usb_endpoint_maxp_mult(desc);
+ if (udev->speed >= USB_SPEED_SUPER)
+ maxp *= ss_isoc_get_packet_num(udev, pipe);
+ else
+ maxp *= usb_endpoint_maxp_mult(desc);
+
packets = DIV_ROUND_UP(bytes, maxp);
urb = usb_alloc_urb(packets, GFP_KERNEL);
@@ -2065,17 +2078,24 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
packets *= param->iterations;
if (context.is_iso) {
+ int transaction_num;
+
+ if (udev->speed >= USB_SPEED_SUPER)
+ transaction_num = ss_isoc_get_packet_num(udev, pipe);
+ else
+ transaction_num = usb_endpoint_maxp_mult(desc);
+
dev_info(&dev->intf->dev,
"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
1 << (desc->bInterval - 1),
- (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
+ (udev->speed >= USB_SPEED_HIGH) ? "micro" : "",
usb_endpoint_maxp(desc),
- usb_endpoint_maxp_mult(desc));
+ transaction_num);
dev_info(&dev->intf->dev,
"total %lu msec (%lu packets)\n",
(packets * (1 << (desc->bInterval - 1)))
- / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
+ / ((udev->speed >= USB_SPEED_HIGH) ? 8 : 1),
packets);
}
diff --git a/drivers/usb/mon/Kconfig b/drivers/usb/mon/Kconfig
index 5c6ffa2a612e..48f1b2dadb24 100644
--- a/drivers/usb/mon/Kconfig
+++ b/drivers/usb/mon/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Monitor configuration
#
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index 40bbf1f53337..bcc23486c4ed 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+#
# For MTK USB3.0 IP
config USB_MTU3
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index ad08895e78f9..f742fddc5e2c 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Dual Role (OTG-ready) Controller Drivers
# for silicon based on Mentor Graphics INVENTRA designs
@@ -111,9 +112,9 @@ config USB_MUSB_UX500
config USB_MUSB_JZ4740
tristate "JZ4740"
depends on NOP_USB_XCEIV
- depends on MACH_JZ4740 || COMPILE_TEST
+ depends on MIPS || COMPILE_TEST
depends on USB_MUSB_GADGET
- depends on USB_OTG_BLACKLIST_HUB
+ depends on USB=n || USB_OTG_BLACKLIST_HUB
config USB_MUSB_AM335X_CHILD
tristate
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 04d8b2bc205a..a60627bf7be3 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/usb/usb_phy_generic.h>
@@ -188,11 +189,20 @@ static int jz4740_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id jz4740_musb_of_match[] = {
+ { .compatible = "ingenic,jz4740-musb" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_musb_of_match);
+#endif
+
static struct platform_driver jz4740_driver = {
.probe = jz4740_probe,
.remove = jz4740_remove,
.driver = {
.name = "musb-jz4740",
+ .of_match_table = of_match_ptr(jz4740_musb_of_match),
},
};
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index eae8b1b1b45b..ffe462a657b1 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
}
if (request) {
- u8 is_dma = 0;
- bool short_packet = false;
trace_musb_req_tx(req);
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
- is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
*/
if ((request->zero && request->length)
&& (request->length % musb_ep->packet_sz == 0)
- && (request->actual == request->length))
- short_packet = true;
+ && (request->actual == request->length)) {
- if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
- (is_dma && (!dma->desired_mode ||
- (request->actual &
- (musb_ep->packet_sz - 1)))))
- short_packet = true;
-
- if (short_packet) {
/*
* On DMA completion, FIFO may not be
* available yet...
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index b59ce9ad14ce..eb308ec35c66 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -378,7 +378,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
qh = first_qh(head);
break;
}
- /* else: fall through */
+ /* fall through */
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
@@ -1283,7 +1283,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY);
}
- return;
+ return;
}
done:
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a688f7f87829..5fc6825745f2 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
channel->status = MUSB_DMA_STATUS_FREE;
/* completed */
- if ((devctl & MUSB_DEVCTL_HM)
- && (musb_channel->transmit)
- && ((channel->desired_mode == 0)
- || (channel->actual_len &
- (musb_channel->max_packet_sz - 1)))
- ) {
+ if (musb_channel->transmit &&
+ (!channel->desired_mode ||
+ (channel->actual_len %
+ musb_channel->max_packet_sz))) {
u8 epnum = musb_channel->epnum;
int offset = musb->io.ep_offset(epnum,
MUSB_TXCSR);
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
*/
musb_ep_select(mbase, epnum);
txcsr = musb_readw(mbase, offset);
- txcsr &= ~(MUSB_TXCSR_DMAENAB
+ if (channel->desired_mode == 1) {
+ txcsr &= ~(MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_AUTOSET);
- musb_writew(mbase, offset, txcsr);
- /* Send out the packet */
- txcsr &= ~MUSB_TXCSR_DMAMODE;
+ musb_writew(mbase, offset, txcsr);
+ /* Send out the packet */
+ txcsr &= ~MUSB_TXCSR_DMAMODE;
+ txcsr |= MUSB_TXCSR_DMAENAB;
+ }
txcsr |= MUSB_TXCSR_TXPKTRDY;
musb_writew(mbase, offset, txcsr);
}
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index d7312eed6088..8c509b060c09 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Physical Layer USB driver configuration
#
@@ -21,7 +22,7 @@ config AB8500_USB
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 27bdb7222527..f5f0568d8533 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
- if (ret)
- return ret;
am_phy->usb_phy_gen.phy.init = am335x_init;
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
device_set_wakeup_enable(dev, false);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
- return 0;
+ return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
}
static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 183550b63faa..dade34d70419 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -400,7 +400,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
{
struct twl6030_usb *twl = platform_get_drvdata(pdev);
- cancel_delayed_work(&twl->get_status_work);
+ cancel_delayed_work_sync(&twl->get_status_work);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
REG_INT_MSK_LINE_C);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 4e59c649db81..ddd3be48f948 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -340,7 +340,7 @@ static void usbhsh_pipe_detach(struct usbhsh_hpriv *hpriv,
pipe = usbhsh_uep_to_pipe(uep);
if (unlikely(!pipe)) {
- dev_err(dev, "uep doens't have pipe\n");
+ dev_err(dev, "uep doesn't have pipe\n");
} else if (1 == uep->counter--) { /* last user */
struct usb_host_endpoint *ep = usbhsh_uep_to_ep(uep);
struct usbhsh_device *udev = usbhsh_uep_to_udev(uep);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index aa3820448286..5e730e9b40ef 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -59,7 +59,7 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
/* The controller on R-Car Gen3 needs to wait up to 45 usec */
- udelay(45);
+ usleep_range(45, 90);
} else {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
}
diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c
index 5b287257ec11..8c739bd24acd 100644
--- a/drivers/usb/renesas_usbhs/rza.c
+++ b/drivers/usb/renesas_usbhs/rza.c
@@ -35,7 +35,7 @@ static int usbhs_rza1_hardware_init(struct platform_device *pdev)
/* Enable USB PLL (NOTE: ch0 controls both ch0 and ch1) */
usbhs_bset(priv, SYSCFG, UPLLE, UPLLE);
- udelay(1000);
+ usleep_range(1000, 2000);
usbhs_bset(priv, SUSPMODE, SUSPM, SUSPM);
return 0;
diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
index e4194ac94510..f8b31aa67526 100644
--- a/drivers/usb/roles/Kconfig
+++ b/drivers/usb/roles/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config USB_ROLE_SWITCH
tristate "USB Role Switch Support"
help
diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
index c02873206fc1..757a7d2797eb 100644
--- a/drivers/usb/roles/Makefile
+++ b/drivers/usb/roles/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
roles-y := class.o
obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 99116af07f1d..f45d8df5cfb8 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -8,6 +8,7 @@
*/
#include <linux/usb/role.h>
+#include <linux/property.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -84,7 +85,12 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
-static int __switch_match(struct device *dev, const void *name)
+static int switch_fwnode_match(struct device *dev, const void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode;
+}
+
+static int switch_name_match(struct device *dev, const void *name)
{
return !strcmp((const char *)name, dev_name(dev));
}
@@ -94,8 +100,16 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
{
struct device *dev;
- dev = class_find_device(role_class, NULL, con->endpoint[ep],
- __switch_match);
+ if (con->fwnode) {
+ if (!fwnode_property_present(con->fwnode, con->id))
+ return NULL;
+
+ dev = class_find_device(role_class, NULL, con->fwnode,
+ switch_fwnode_match);
+ } else {
+ dev = class_find_device(role_class, NULL, con->endpoint[ep],
+ switch_name_match);
+ }
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
@@ -266,6 +280,7 @@ usb_role_switch_register(struct device *parent,
sw->get = desc->get;
sw->dev.parent = parent;
+ sw->dev.fwnode = desc->fwnode;
sw->dev.class = role_class;
sw->dev.type = &usb_role_dev_type;
dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 533f127c30ad..7d031911d04e 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Serial device configuration
#
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c0777a374a88..fffe23ab0189 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
@@ -245,6 +246,7 @@ struct cp210x_serial_private {
u8 gpio_input;
#endif
u8 partnum;
+ speed_t min_speed;
speed_t max_speed;
bool use_actual_rate;
};
@@ -443,10 +445,10 @@ struct cp210x_pin_mode {
#define CP210X_PIN_MODE_GPIO BIT(0)
/*
- * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes.
- * Structure needs padding due to unused/unspecified bytes.
+ * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes
+ * on a CP2105 chip. Structure needs padding due to unused/unspecified bytes.
*/
-struct cp210x_config {
+struct cp210x_dual_port_config {
__le16 gpio_mode;
u8 __pad0[2];
__le16 reset_state;
@@ -457,6 +459,19 @@ struct cp210x_config {
u8 device_cfg;
} __packed;
+/*
+ * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xd bytes
+ * on a CP2104 chip. Structure needs padding due to unused/unspecified bytes.
+ */
+struct cp210x_single_port_config {
+ __le16 gpio_mode;
+ u8 __pad0[2];
+ __le16 reset_state;
+ u8 __pad1[4];
+ __le16 suspend_state;
+ u8 device_cfg;
+} __packed;
+
/* GPIO modes */
#define CP210X_SCI_GPIO_MODE_OFFSET 9
#define CP210X_SCI_GPIO_MODE_MASK GENMASK(11, 9)
@@ -464,11 +479,19 @@ struct cp210x_config {
#define CP210X_ECI_GPIO_MODE_OFFSET 2
#define CP210X_ECI_GPIO_MODE_MASK GENMASK(3, 2)
+#define CP210X_GPIO_MODE_OFFSET 8
+#define CP210X_GPIO_MODE_MASK GENMASK(11, 8)
+
/* CP2105 port configuration values */
#define CP2105_GPIO0_TXLED_MODE BIT(0)
#define CP2105_GPIO1_RXLED_MODE BIT(1)
#define CP2105_GPIO1_RS485_MODE BIT(2)
+/* CP2104 port configuration values */
+#define CP2104_GPIO0_TXLED_MODE BIT(0)
+#define CP2104_GPIO1_RXLED_MODE BIT(1)
+#define CP2104_GPIO2_RS485_MODE BIT(2)
+
/* CP2102N configuration array indices */
#define CP210X_2NCONFIG_CONFIG_VERSION_IDX 2
#define CP210X_2NCONFIG_GPIO_MODE_IDX 581
@@ -1051,14 +1074,11 @@ static speed_t cp210x_get_an205_rate(speed_t baud)
return cp210x_an205_table1[i].rate;
}
-static speed_t cp210x_get_actual_rate(struct usb_serial *serial, speed_t baud)
+static speed_t cp210x_get_actual_rate(speed_t baud)
{
- struct cp210x_serial_private *priv = usb_get_serial_data(serial);
unsigned int prescale = 1;
unsigned int div;
- baud = clamp(baud, 300u, priv->max_speed);
-
if (baud <= 365)
prescale = 4;
@@ -1101,20 +1121,18 @@ static void cp210x_change_speed(struct tty_struct *tty,
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
u32 baud;
- baud = tty->termios.c_ospeed;
-
/*
* This maps the requested rate to the actual rate, a valid rate on
* cp2102 or cp2103, or to an arbitrary rate in [1M, max_speed].
*
* NOTE: B0 is not implemented.
*/
+ baud = clamp(tty->termios.c_ospeed, priv->min_speed, priv->max_speed);
+
if (priv->use_actual_rate)
- baud = cp210x_get_actual_rate(serial, baud);
+ baud = cp210x_get_actual_rate(baud);
else if (baud < 1000000)
baud = cp210x_get_an205_rate(baud);
- else if (baud > priv->max_speed)
- baud = priv->max_speed;
dev_dbg(&port->dev, "%s - setting baud rate to %u\n", __func__, baud);
if (cp210x_write_u32_reg(port, CP210X_SET_BAUDRATE, baud)) {
@@ -1353,8 +1371,13 @@ static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
if (priv->partnum == CP210X_PARTNUM_CP2105)
req_type = REQTYPE_INTERFACE_TO_HOST;
+ result = usb_autopm_get_interface(serial->interface);
+ if (result)
+ return result;
+
result = cp210x_read_vendor_block(serial, req_type,
CP210X_READ_LATCH, &buf, sizeof(buf));
+ usb_autopm_put_interface(serial->interface);
if (result < 0)
return result;
@@ -1375,6 +1398,10 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
buf.mask = BIT(gpio);
+ result = usb_autopm_get_interface(serial->interface);
+ if (result)
+ goto out;
+
if (priv->partnum == CP210X_PARTNUM_CP2105) {
result = cp210x_write_vendor_block(serial,
REQTYPE_HOST_TO_INTERFACE,
@@ -1392,6 +1419,8 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
+ usb_autopm_put_interface(serial->interface);
+out:
if (result < 0) {
dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
result);
@@ -1470,7 +1499,7 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
struct cp210x_pin_mode mode;
- struct cp210x_config config;
+ struct cp210x_dual_port_config config;
u8 intf_num = cp210x_interface_num(serial);
u8 iface_config;
int result;
@@ -1529,6 +1558,56 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
return 0;
}
+static int cp2104_gpioconf_init(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ struct cp210x_single_port_config config;
+ u8 iface_config;
+ u8 gpio_latch;
+ int result;
+ u8 i;
+
+ result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PORTCONFIG, &config,
+ sizeof(config));
+ if (result < 0)
+ return result;
+
+ priv->gc.ngpio = 4;
+
+ iface_config = config.device_cfg;
+ priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
+ CP210X_GPIO_MODE_MASK) >>
+ CP210X_GPIO_MODE_OFFSET);
+ gpio_latch = (u8)((le16_to_cpu(config.reset_state) &
+ CP210X_GPIO_MODE_MASK) >>
+ CP210X_GPIO_MODE_OFFSET);
+
+ /* mark all pins which are not in GPIO mode */
+ if (iface_config & CP2104_GPIO0_TXLED_MODE) /* GPIO 0 */
+ priv->gpio_altfunc |= BIT(0);
+ if (iface_config & CP2104_GPIO1_RXLED_MODE) /* GPIO 1 */
+ priv->gpio_altfunc |= BIT(1);
+ if (iface_config & CP2104_GPIO2_RS485_MODE) /* GPIO 2 */
+ priv->gpio_altfunc |= BIT(2);
+
+ /*
+ * Like CP2102N, CP2104 has also no strict input and output pin
+ * modes.
+ * Do the same input mode emulation as CP2102N.
+ */
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ /*
+ * Set direction to "input" iff pin is open-drain and reset
+ * value is 1.
+ */
+ if (!(priv->gpio_pushpull & BIT(i)) && (gpio_latch & BIT(i)))
+ priv->gpio_input |= BIT(i);
+ }
+
+ return 0;
+}
+
static int cp2102n_gpioconf_init(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -1574,12 +1653,6 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
if (config_version != 0x01)
return -ENOTSUPP;
- /*
- * We only support 4 GPIOs even on the QFN28 package, because
- * config locations of GPIOs 4-6 determined using reverse
- * engineering revealed conflicting offsets with other
- * documented functions. So we'll just play it safe for now.
- */
priv->gc.ngpio = 4;
/*
@@ -1594,6 +1667,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
/* 0 indicates GPIO mode, 1 is alternate function */
priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+ if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
+ /*
+ * For the QFN28 package, GPIO4-6 are controlled by
+ * the low three bits of the mode/latch fields.
+ * Contrary to the document linked above, the bits for
+ * the SUSPEND pins are elsewhere. No alternate
+ * function is available for these pins.
+ */
+ priv->gc.ngpio = 7;
+ gpio_latch |= (gpio_rst_latch & 7) << 4;
+ priv->gpio_pushpull |= (gpio_pushpull & 7) << 4;
+ }
+
/*
* The CP2102N does not strictly has input and output pin modes,
* it only knows open-drain and push-pull modes which is set at
@@ -1620,6 +1706,9 @@ static int cp210x_gpio_init(struct usb_serial *serial)
int result;
switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2104:
+ result = cp2104_gpioconf_init(serial);
+ break;
case CP210X_PARTNUM_CP2105:
result = cp2105_gpioconf_init(serial);
break;
@@ -1716,6 +1805,7 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
bool use_actual_rate = false;
+ speed_t min = 300;
speed_t max;
switch (priv->partnum) {
@@ -1738,6 +1828,7 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
use_actual_rate = true;
max = 2000000; /* ECI */
} else {
+ min = 2400;
max = 921600; /* SCI */
}
break;
@@ -1752,6 +1843,7 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
break;
}
+ priv->min_speed = min;
priv->max_speed = max;
priv->use_actual_rate = use_actual_rate;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1ab2a6191013..8f5b17471759 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1025,6 +1025,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+ /* EZPrototypes devices */
+ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
{ } /* Terminating entry */
};
@@ -1783,6 +1785,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
int result;
u16 val;
+ result = usb_autopm_get_interface(serial->interface);
+ if (result)
+ return result;
+
val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
@@ -1795,6 +1801,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
val, result);
}
+ usb_autopm_put_interface(serial->interface);
+
return result;
}
@@ -1846,9 +1854,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
unsigned char *buf;
int result;
+ result = usb_autopm_get_interface(serial->interface);
+ if (result)
+ return result;
+
buf = kmalloc(1, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ usb_autopm_put_interface(serial->interface);
return -ENOMEM;
+ }
result = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
@@ -1863,6 +1877,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
}
kfree(buf);
+ usb_autopm_put_interface(serial->interface);
return result;
}
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 975d02666c5a..b863bedb55a1 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1309,6 +1309,12 @@
#define IONICS_PLUGCOMPUTER_PID 0x0102
/*
+ * EZPrototypes (PID reseller)
+ */
+#define EZPROTOTYPES_VID 0x1c40
+#define HJELMSLUND_USB485_ISO_PID 0x0477
+
+/*
* Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/keyspan_usa26msg.h b/drivers/usb/serial/keyspan_usa26msg.h
index 09e21e84fc4e..a68f1fb25b8a 100644
--- a/drivers/usb/serial/keyspan_usa26msg.h
+++ b/drivers/usb/serial/keyspan_usa26msg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
usa26msg.h
diff --git a/drivers/usb/serial/keyspan_usa28msg.h b/drivers/usb/serial/keyspan_usa28msg.h
index dee454c4609a..a19f3fe5d98d 100644
--- a/drivers/usb/serial/keyspan_usa28msg.h
+++ b/drivers/usb/serial/keyspan_usa28msg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
usa28msg.h
diff --git a/drivers/usb/serial/keyspan_usa49msg.h b/drivers/usb/serial/keyspan_usa49msg.h
index 163b2dea2ec5..8c3970fdd868 100644
--- a/drivers/usb/serial/keyspan_usa49msg.h
+++ b/drivers/usb/serial/keyspan_usa49msg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
usa49msg.h
diff --git a/drivers/usb/serial/keyspan_usa67msg.h b/drivers/usb/serial/keyspan_usa67msg.h
index 20fa3e2f7187..dcf502fdbb44 100644
--- a/drivers/usb/serial/keyspan_usa67msg.h
+++ b/drivers/usb/serial/keyspan_usa67msg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
usa67msg.h
diff --git a/drivers/usb/serial/keyspan_usa90msg.h b/drivers/usb/serial/keyspan_usa90msg.h
index 86708ecd8735..c4ca0f631d20 100644
--- a/drivers/usb/serial/keyspan_usa90msg.h
+++ b/drivers/usb/serial/keyspan_usa90msg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
usa90msg.h
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index aef15497ff31..11b21d9410f3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1148,6 +1148,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
.driver_info = NCTRL(0) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 98e7a5df0f6d..bb3f9aa4a909 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 4e2554d55362..559941ca884d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -8,6 +8,7 @@
#define PL2303_VENDOR_ID 0x067b
#define PL2303_PRODUCT_ID 0x2303
+#define PL2303_PRODUCT_ID_TB 0x2304
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
@@ -20,6 +21,7 @@
#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
+
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4d0273508043..edbbb13d6de6 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
- { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
+ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 6fd427284b12..59aad38b490a 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB Storage driver configuration
#
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index edcf2be0e0eb..395cf8fb5870 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -167,6 +167,7 @@ static int rio_karma_transport(struct scsi_cmnd *srb, struct us_data *us)
static void rio_karma_destructor(void *extra)
{
struct karma_data *data = (struct karma_data *) extra;
+
kfree(data->recv);
}
@@ -174,6 +175,7 @@ static int rio_karma_init(struct us_data *us)
{
int ret = 0;
struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO);
+
if (!data)
goto out;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index fde2e71a6ade..a73ea495d5a7 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev)
if (!(us->fflags & US_FL_NEEDS_CAP16))
sdev->try_rc_10_first = 1;
- /* assume SPC3 or latter devices support sense size > 18 */
- if (sdev->scsi_level > SCSI_SPC_2)
+ /*
+ * assume SPC3 or latter devices support sense size > 18
+ * unless US_FL_BAD_SENSE quirk is specified.
+ */
+ if (sdev->scsi_level > SCSI_SPC_2 &&
+ !(us->fflags & US_FL_BAD_SENSE))
us->fflags |= US_FL_SANE_SENSE;
/*
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f7f83b21dc74..ea0d27a94afe 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1266,6 +1266,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
US_FL_FIX_CAPACITY ),
/*
+ * Reported by Icenowy Zheng <icenowy@aosc.io>
+ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
+ * that do not process read/write command if a long sense is requested,
+ * so force to use 18-byte sense.
+ */
+UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
+ "SMI",
+ "SM3350 UFS-to-USB-Mass-Storage bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
+/*
* Reported by Paul Hartman <paul.hartman+linux@gmail.com>
* This card reader returns "Illegal Request, Logical Block Address
* Out of Range" for the first READ(10) after a new card is inserted.
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 30a847c2089d..89d9193bd1cf 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig TYPEC
tristate "USB Type-C Support"
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index efef2a64bc51..ef2226eb7a33 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "USB Type-C Alternate Mode drivers"
diff --git a/drivers/usb/typec/altmodes/Makefile b/drivers/usb/typec/altmodes/Makefile
index 5caf094ef71a..eda8456f1c92 100644
--- a/drivers/usb/typec/altmodes/Makefile
+++ b/drivers/usb/typec/altmodes/Makefile
@@ -1,2 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
obj-$(CONFIG_TYPEC_DP_ALTMODE) += typec_displayport.o
typec_displayport-y := displayport.o
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 3f06e94771a7..610d790bc9be 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -24,10 +24,6 @@ enum {
DP_CONF_DUAL_D,
};
-/* Helper for setting/getting the pin assignement value to the configuration */
-#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
-#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
-
/* Pin assignments that use USB3.1 Gen2 signaling to carry DP protocol */
#define DP_PIN_ASSIGN_GEN2_BR_MASK (BIT(DP_PIN_ASSIGN_A) | \
BIT(DP_PIN_ASSIGN_B))
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 5db0593ca0bd..2eb623841847 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include "bus.h"
@@ -204,15 +205,32 @@ static void typec_altmode_put_partner(struct altmode *altmode)
put_device(&adev->dev);
}
-static int __typec_port_match(struct device *dev, const void *name)
+static int typec_port_fwnode_match(struct device *dev, const void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode;
+}
+
+static int typec_port_name_match(struct device *dev, const void *name)
{
return !strcmp((const char *)name, dev_name(dev));
}
static void *typec_port_match(struct device_connection *con, int ep, void *data)
{
- return class_find_device(typec_class, NULL, con->endpoint[ep],
- __typec_port_match);
+ struct device *dev;
+
+ /*
+ * FIXME: Check does the fwnode supports the requested SVID. If it does
+ * we need to return ERR_PTR(-PROBE_DEFER) when there is no device.
+ */
+ if (con->fwnode)
+ return class_find_device(typec_class, NULL, con->fwnode,
+ typec_port_fwnode_match);
+
+ dev = class_find_device(typec_class, NULL, con->endpoint[ep],
+ typec_port_name_match);
+
+ return dev ? dev : ERR_PTR(-EPROBE_DEFER);
}
struct typec_altmode *
@@ -277,7 +295,7 @@ void typec_altmode_update_active(struct typec_altmode *adev, bool active)
if (adev->active == active)
return;
- if (!is_typec_port(adev->dev.parent)) {
+ if (!is_typec_port(adev->dev.parent) && adev->dev.driver) {
if (!active)
module_put(adev->dev.driver->owner);
else
@@ -1496,11 +1514,8 @@ typec_port_register_altmode(struct typec_port *port,
{
struct typec_altmode *adev;
struct typec_mux *mux;
- char id[10];
-
- sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
- mux = typec_mux_get(&port->dev, id);
+ mux = typec_mux_get(&port->dev, desc);
if (IS_ERR(mux))
return ERR_CAST(mux);
@@ -1593,7 +1608,7 @@ struct typec_port *typec_register_port(struct device *parent,
return ERR_CAST(port->sw);
}
- port->mux = typec_mux_get(&port->dev, "typec-mux");
+ port->mux = typec_mux_get(&port->dev, NULL);
if (IS_ERR(port->mux)) {
put_device(&port->dev);
return ERR_CAST(port->mux);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index d990aa510fab..2ce54f3fc79c 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -11,6 +11,8 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#include <linux/usb/typec_mux.h>
static DEFINE_MUTEX(switch_lock);
@@ -23,15 +25,25 @@ static void *typec_switch_match(struct device_connection *con, int ep,
{
struct typec_switch *sw;
- list_for_each_entry(sw, &switch_list, entry)
- if (!strcmp(con->endpoint[ep], dev_name(sw->dev)))
- return sw;
+ if (!con->fwnode) {
+ list_for_each_entry(sw, &switch_list, entry)
+ if (!strcmp(con->endpoint[ep], dev_name(sw->dev)))
+ return sw;
+ return ERR_PTR(-EPROBE_DEFER);
+ }
/*
- * We only get called if a connection was found, tell the caller to
- * wait for the switch to show up.
+ * With OF graph the mux node must have a boolean device property named
+ * "orientation-switch".
*/
- return ERR_PTR(-EPROBE_DEFER);
+ if (con->id && !fwnode_property_present(con->fwnode, con->id))
+ return NULL;
+
+ list_for_each_entry(sw, &switch_list, entry)
+ if (dev_fwnode(sw->dev) == con->fwnode)
+ return sw;
+
+ return con->id ? ERR_PTR(-EPROBE_DEFER) : NULL;
}
/**
@@ -48,7 +60,7 @@ struct typec_switch *typec_switch_get(struct device *dev)
struct typec_switch *sw;
mutex_lock(&switch_lock);
- sw = device_connection_find_match(dev, "typec-switch", NULL,
+ sw = device_connection_find_match(dev, "orientation-switch", NULL,
typec_switch_match);
if (!IS_ERR_OR_NULL(sw)) {
WARN_ON(!try_module_get(sw->dev->driver->owner));
@@ -112,35 +124,87 @@ EXPORT_SYMBOL_GPL(typec_switch_unregister);
static void *typec_mux_match(struct device_connection *con, int ep, void *data)
{
+ const struct typec_altmode_desc *desc = data;
struct typec_mux *mux;
+ int nval;
+ bool match;
+ u16 *val;
+ int i;
- list_for_each_entry(mux, &mux_list, entry)
- if (!strcmp(con->endpoint[ep], dev_name(mux->dev)))
- return mux;
+ if (!con->fwnode) {
+ list_for_each_entry(mux, &mux_list, entry)
+ if (!strcmp(con->endpoint[ep], dev_name(mux->dev)))
+ return mux;
+ return ERR_PTR(-EPROBE_DEFER);
+ }
/*
- * We only get called if a connection was found, tell the caller to
- * wait for the switch to show up.
+ * Check has the identifier already been "consumed". If it
+ * has, no need to do any extra connection identification.
*/
+ match = !con->id;
+ if (match)
+ goto find_mux;
+
+ /* Accessory Mode muxes */
+ if (!desc) {
+ match = fwnode_property_present(con->fwnode, "accessory");
+ if (match)
+ goto find_mux;
+ return NULL;
+ }
+
+ /* Alternate Mode muxes */
+ nval = fwnode_property_read_u16_array(con->fwnode, "svid", NULL, 0);
+ if (nval <= 0)
+ return NULL;
+
+ val = kcalloc(nval, sizeof(*val), GFP_KERNEL);
+ if (!val)
+ return ERR_PTR(-ENOMEM);
+
+ nval = fwnode_property_read_u16_array(con->fwnode, "svid", val, nval);
+ if (nval < 0) {
+ kfree(val);
+ return ERR_PTR(nval);
+ }
+
+ for (i = 0; i < nval; i++) {
+ match = val[i] == desc->svid;
+ if (match) {
+ kfree(val);
+ goto find_mux;
+ }
+ }
+ kfree(val);
+ return NULL;
+
+find_mux:
+ list_for_each_entry(mux, &mux_list, entry)
+ if (dev_fwnode(mux->dev) == con->fwnode)
+ return mux;
+
return ERR_PTR(-EPROBE_DEFER);
}
/**
* typec_mux_get - Find USB Type-C Multiplexer
* @dev: The caller device
- * @name: Mux identifier
+ * @desc: Alt Mode description
*
* Finds a mux linked to the caller. This function is primarily meant for the
* Type-C drivers. Returns a reference to the mux on success, NULL if no
* matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a connection
* was found but the mux has not been enumerated yet.
*/
-struct typec_mux *typec_mux_get(struct device *dev, const char *name)
+struct typec_mux *typec_mux_get(struct device *dev,
+ const struct typec_altmode_desc *desc)
{
struct typec_mux *mux;
mutex_lock(&mux_lock);
- mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
+ mux = device_connection_find_match(dev, "mode-switch", (void *)desc,
+ typec_mux_match);
if (!IS_ERR_OR_NULL(mux)) {
WARN_ON(!try_module_get(mux->dev->driver->owner));
get_device(mux->dev);
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 9a954d2b8d8f..01ed0d5e10e8 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
menu "USB Type-C Multiplexer/DeMultiplexer Switch support"
config TYPEC_MUX_PI3USB30532
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index f03ea8a61768..72481bbb2af3 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config TYPEC_TCPM
tristate "USB Type-C Port Controller Manager"
depends on USB
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4bc29b586698..0f62db091d8d 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
pdo_pps_apdo_max_voltage(snk));
port->pps_data.max_curr = min_pps_apdo_current(src, snk);
port->pps_data.out_volt = min(port->pps_data.max_volt,
- port->pps_data.out_volt);
+ max(port->pps_data.min_volt,
+ port->pps_data.out_volt));
port->pps_data.op_curr = min(port->pps_data.max_curr,
port->pps_data.op_curr);
}
@@ -4434,66 +4435,6 @@ sink:
return 0;
}
-int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo)
-{
- if (tcpm_validate_caps(port, pdo, nr_pdo))
- return -EINVAL;
-
- mutex_lock(&port->lock);
- port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
- switch (port->state) {
- case SRC_UNATTACHED:
- case SRC_ATTACH_WAIT:
- case SRC_TRYWAIT:
- tcpm_set_cc(port, tcpm_rp_cc(port));
- break;
- case SRC_SEND_CAPABILITIES:
- case SRC_NEGOTIATE_CAPABILITIES:
- case SRC_READY:
- case SRC_WAIT_NEW_CAPABILITIES:
- tcpm_set_cc(port, tcpm_rp_cc(port));
- tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
- break;
- default:
- break;
- }
- mutex_unlock(&port->lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
-
-int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo,
- unsigned int operating_snk_mw)
-{
- if (tcpm_validate_caps(port, pdo, nr_pdo))
- return -EINVAL;
-
- mutex_lock(&port->lock);
- port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
- port->operating_snk_mw = operating_snk_mw;
- port->update_sink_caps = true;
-
- switch (port->state) {
- case SNK_NEGOTIATE_CAPABILITIES:
- case SNK_NEGOTIATE_PPS_CAPABILITIES:
- case SNK_READY:
- case SNK_TRANSITION_SINK:
- case SNK_TRANSITION_SINK_VBUS:
- if (port->pps_data.active)
- tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
- else
- tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
- break;
- default:
- break;
- }
- mutex_unlock(&port->lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
-
/* Power Supply access to expose source power information */
enum tcpm_psy_online_states {
TCPM_PSY_OFFLINE = 0,
@@ -4810,12 +4751,12 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
err = devm_tcpm_psy_register(port);
if (err)
- goto out_destroy_wq;
+ goto out_role_sw_put;
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (IS_ERR(port->typec_port)) {
err = PTR_ERR(port->typec_port);
- goto out_destroy_wq;
+ goto out_role_sw_put;
}
if (tcpc->config && tcpc->config->alt_modes) {
@@ -4848,8 +4789,10 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
tcpm_log(port, "%s: registered", dev_name(dev));
return port;
-out_destroy_wq:
+out_role_sw_put:
usb_role_switch_put(port->role_sw);
+out_destroy_wq:
+ tcpm_debugfs_exit(port);
destroy_workqueue(port->wq);
return ERR_PTR(err);
}
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index 1c0033ad8738..c674abe3cf99 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -14,6 +14,8 @@
#include <linux/usb/typec.h>
/* Register offsets */
+#define TPS_REG_VID 0x00
+#define TPS_REG_MODE 0x03
#define TPS_REG_CMD1 0x08
#define TPS_REG_DATA1 0x09
#define TPS_REG_INT_EVENT1 0x14
@@ -66,6 +68,20 @@ struct tps6598x_rx_identity_reg {
#define TPS_TASK_TIMEOUT 1
#define TPS_TASK_REJECTED 3
+enum {
+ TPS_MODE_APP,
+ TPS_MODE_BOOT,
+ TPS_MODE_BIST,
+ TPS_MODE_DISC,
+};
+
+static const char *const modes[] = {
+ [TPS_MODE_APP] = "APP ",
+ [TPS_MODE_BOOT] = "BOOT",
+ [TPS_MODE_BIST] = "BIST",
+ [TPS_MODE_DISC] = "DISC",
+};
+
/* Unrecognized commands will be replaced with "!CMD" */
#define INVALID_CMD(_cmd_) (_cmd_ == 0x444d4321)
@@ -110,6 +126,20 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
return 0;
}
+static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
+ void *val, size_t len)
+{
+ u8 data[TPS_MAX_LEN + 1];
+
+ if (!tps->i2c_protocol)
+ return regmap_raw_write(tps->regmap, reg, val, len);
+
+ data[0] = len;
+ memcpy(&data[1], val, len);
+
+ return regmap_raw_write(tps->regmap, reg, data, sizeof(data));
+}
+
static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
{
return tps6598x_block_read(tps, reg, val, sizeof(u16));
@@ -127,23 +157,23 @@ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
{
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16));
+ return tps6598x_block_write(tps, reg, &val, sizeof(u16));
}
static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val)
{
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
+ return tps6598x_block_write(tps, reg, &val, sizeof(u32));
}
static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
{
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64));
+ return tps6598x_block_write(tps, reg, &val, sizeof(u64));
}
static inline int
tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
{
- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
+ return tps6598x_block_write(tps, reg, &val, sizeof(u32));
}
static int tps6598x_read_partner_identity(struct tps6598x *tps)
@@ -229,8 +259,8 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
return -EBUSY;
if (in_len) {
- ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1,
- in_data, in_len);
+ ret = tps6598x_block_write(tps, TPS_REG_DATA1,
+ in_data, in_len);
if (ret)
return ret;
}
@@ -384,6 +414,32 @@ err_unlock:
return IRQ_HANDLED;
}
+static int tps6598x_check_mode(struct tps6598x *tps)
+{
+ char mode[5] = { };
+ int ret;
+
+ ret = tps6598x_read32(tps, TPS_REG_MODE, (void *)mode);
+ if (ret)
+ return ret;
+
+ switch (match_string(modes, ARRAY_SIZE(modes), mode)) {
+ case TPS_MODE_APP:
+ return 0;
+ case TPS_MODE_BOOT:
+ dev_warn(tps->dev, "dead-battery condition\n");
+ return 0;
+ case TPS_MODE_BIST:
+ case TPS_MODE_DISC:
+ default:
+ dev_err(tps->dev, "controller in unsupported mode \"%s\"\n",
+ mode);
+ break;
+ }
+
+ return -ENODEV;
+}
+
static const struct regmap_config tps6598x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -409,10 +465,8 @@ static int tps6598x_probe(struct i2c_client *client)
if (IS_ERR(tps->regmap))
return PTR_ERR(tps->regmap);
- ret = tps6598x_read32(tps, 0, &vid);
- if (ret < 0)
- return ret;
- if (!vid)
+ ret = tps6598x_read32(tps, TPS_REG_VID, &vid);
+ if (ret < 0 || !vid)
return -ENODEV;
/*
@@ -425,6 +479,11 @@ static int tps6598x_probe(struct i2c_client *client)
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
tps->i2c_protocol = true;
+ /* Make sure the controller has application firmware running */
+ ret = tps6598x_check_mode(tps);
+ if (ret)
+ return ret;
+
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 78118883f96c..15c2ac7db02d 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config TYPEC_UCSI
tristate "USB Type-C Connector System Software Interface driver"
depends on !CPU_BIG_ENDIAN
diff --git a/drivers/usb/typec/ucsi/debug.h b/drivers/usb/typec/ucsi/debug.h
deleted file mode 100644
index fdeff39df120..000000000000
--- a/drivers/usb/typec/ucsi/debug.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UCSI_DEBUG_H
-#define __UCSI_DEBUG_H
-
-#include "ucsi.h"
-
-static const char * const ucsi_cmd_strs[] = {
- [0] = "Unknown command",
- [UCSI_PPM_RESET] = "PPM_RESET",
- [UCSI_CANCEL] = "CANCEL",
- [UCSI_CONNECTOR_RESET] = "CONNECTOR_RESET",
- [UCSI_ACK_CC_CI] = "ACK_CC_CI",
- [UCSI_SET_NOTIFICATION_ENABLE] = "SET_NOTIFICATION_ENABLE",
- [UCSI_GET_CAPABILITY] = "GET_CAPABILITY",
- [UCSI_GET_CONNECTOR_CAPABILITY] = "GET_CONNECTOR_CAPABILITY",
- [UCSI_SET_UOM] = "SET_UOM",
- [UCSI_SET_UOR] = "SET_UOR",
- [UCSI_SET_PDM] = "SET_PDM",
- [UCSI_SET_PDR] = "SET_PDR",
- [UCSI_GET_ALTERNATE_MODES] = "GET_ALTERNATE_MODES",
- [UCSI_GET_CAM_SUPPORTED] = "GET_CAM_SUPPORTED",
- [UCSI_GET_CURRENT_CAM] = "GET_CURRENT_CAM",
- [UCSI_SET_NEW_CAM] = "SET_NEW_CAM",
- [UCSI_GET_PDOS] = "GET_PDOS",
- [UCSI_GET_CABLE_PROPERTY] = "GET_CABLE_PROPERTY",
- [UCSI_GET_CONNECTOR_STATUS] = "GET_CONNECTOR_STATUS",
- [UCSI_GET_ERROR_STATUS] = "GET_ERROR_STATUS",
-};
-
-static inline const char *ucsi_cmd_str(u64 raw_cmd)
-{
- u8 cmd = raw_cmd & GENMASK(7, 0);
-
- return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
-}
-
-static const char * const ucsi_ack_strs[] = {
- [0] = "",
- [UCSI_ACK_EVENT] = "event",
- [UCSI_ACK_CMD] = "command",
-};
-
-static inline const char *ucsi_ack_str(u8 ack)
-{
- return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack];
-}
-
-static inline const char *ucsi_cci_str(u32 cci)
-{
- if (cci & GENMASK(7, 0)) {
- if (cci & BIT(29))
- return "Event pending (ACK completed)";
- if (cci & BIT(31))
- return "Event pending (command completed)";
- return "Connector Change";
- }
- if (cci & BIT(29))
- return "ACK completed";
- if (cci & BIT(31))
- return "Command completed";
-
- return "";
-}
-
-#endif /* __UCSI_DEBUG_H */
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index d9a6ff6e673c..ffa3b4c3f338 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -1,3 +1,62 @@
// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
+#include "ucsi.h"
#include "trace.h"
+
+static const char * const ucsi_cmd_strs[] = {
+ [0] = "Unknown command",
+ [UCSI_PPM_RESET] = "PPM_RESET",
+ [UCSI_CANCEL] = "CANCEL",
+ [UCSI_CONNECTOR_RESET] = "CONNECTOR_RESET",
+ [UCSI_ACK_CC_CI] = "ACK_CC_CI",
+ [UCSI_SET_NOTIFICATION_ENABLE] = "SET_NOTIFICATION_ENABLE",
+ [UCSI_GET_CAPABILITY] = "GET_CAPABILITY",
+ [UCSI_GET_CONNECTOR_CAPABILITY] = "GET_CONNECTOR_CAPABILITY",
+ [UCSI_SET_UOM] = "SET_UOM",
+ [UCSI_SET_UOR] = "SET_UOR",
+ [UCSI_SET_PDM] = "SET_PDM",
+ [UCSI_SET_PDR] = "SET_PDR",
+ [UCSI_GET_ALTERNATE_MODES] = "GET_ALTERNATE_MODES",
+ [UCSI_GET_CAM_SUPPORTED] = "GET_CAM_SUPPORTED",
+ [UCSI_GET_CURRENT_CAM] = "GET_CURRENT_CAM",
+ [UCSI_SET_NEW_CAM] = "SET_NEW_CAM",
+ [UCSI_GET_PDOS] = "GET_PDOS",
+ [UCSI_GET_CABLE_PROPERTY] = "GET_CABLE_PROPERTY",
+ [UCSI_GET_CONNECTOR_STATUS] = "GET_CONNECTOR_STATUS",
+ [UCSI_GET_ERROR_STATUS] = "GET_ERROR_STATUS",
+};
+
+const char *ucsi_cmd_str(u64 raw_cmd)
+{
+ u8 cmd = raw_cmd & GENMASK(7, 0);
+
+ return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
+}
+
+static const char * const ucsi_ack_strs[] = {
+ [0] = "",
+ [UCSI_ACK_EVENT] = "event",
+ [UCSI_ACK_CMD] = "command",
+};
+
+const char *ucsi_ack_str(u8 ack)
+{
+ return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack];
+}
+
+const char *ucsi_cci_str(u32 cci)
+{
+ if (cci & GENMASK(7, 0)) {
+ if (cci & BIT(29))
+ return "Event pending (ACK completed)";
+ if (cci & BIT(31))
+ return "Event pending (command completed)";
+ return "Connector Change";
+ }
+ if (cci & BIT(29))
+ return "ACK completed";
+ if (cci & BIT(31))
+ return "Command completed";
+
+ return "";
+}
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
index d5092446ecc6..5e2906df2db7 100644
--- a/drivers/usb/typec/ucsi/trace.h
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -7,8 +7,11 @@
#define __UCSI_TRACE_H
#include <linux/tracepoint.h>
-#include "ucsi.h"
-#include "debug.h"
+
+const char *ucsi_cmd_str(u64 raw_cmd);
+const char *ucsi_ack_str(u8 ack);
+const char *ucsi_cci_str(u32 cci);
+const char *ucsi_recipient_str(u8 recipient);
DECLARE_EVENT_CLASS(ucsi_log_ack,
TP_PROTO(u8 ack),
diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
index a20b65cb6678..2f86b28fa3da 100644
--- a/drivers/usb/usbip/Kconfig
+++ b/drivers/usb/usbip/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config USBIP_CORE
tristate "USB/IP support"
depends on NET
diff --git a/drivers/usb/usbip/README b/drivers/usb/usbip/README
deleted file mode 100644
index 41a2cf2e77a6..000000000000
--- a/drivers/usb/usbip/README
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
- - more discussion about the protocol
- - testing
- - review of the userspace interface
- - document the protocol
-
-Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 1e592ec94ba4..f46ee1fefe02 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -702,8 +702,10 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
}
vdev = &vhci_hcd->vdev[portnum-1];
- /* patch to usb_sg_init() is in 2.5.60 */
- BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
+ if (!urb->transfer_buffer && urb->transfer_buffer_length) {
+ dev_dbg(dev, "Null URB transfer buffer\n");
+ return -EINVAL;
+ }
spin_lock_irqsave(&vhci->lock, flags);
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 1634d8698e15..a72c17ff1c6a 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -297,7 +297,8 @@ static void vep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct vrequest *req;
- if (WARN_ON(!_ep || !_req))
+ /* ep is always valid here - see usb_ep_free_request() */
+ if (!_req)
return;
req = to_vrequest(_req);
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
index 348de1d6726e..12e89189ca7d 100644
--- a/drivers/usb/wusbcore/Kconfig
+++ b/drivers/usb/wusbcore/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Wireless USB Core configuration
#
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 222228c5c1e1..af77064c7456 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -302,10 +302,8 @@ static ssize_t cbaf_wusb_chid_show(struct device *dev,
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
- char pr_chid[WUSB_CKHDID_STRSIZE];
- ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid);
- return scnprintf(buf, PAGE_SIZE, "%s\n", pr_chid);
+ return sprintf(buf, "%16ph\n", cbaf->chid.data);
}
static ssize_t cbaf_wusb_chid_store(struct device *dev,
@@ -415,10 +413,8 @@ static ssize_t cbaf_wusb_cdid_show(struct device *dev,
{
struct usb_interface *iface = to_usb_interface(dev);
struct cbaf *cbaf = usb_get_intfdata(iface);
- char pr_cdid[WUSB_CKHDID_STRSIZE];
- ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid);
- return scnprintf(buf, PAGE_SIZE, "%s\n", pr_cdid);
+ return sprintf(buf, "%16ph\n", cbaf->cdid.data);
}
static ssize_t cbaf_wusb_cdid_store(struct device *dev,
@@ -503,7 +499,6 @@ static int cbaf_cc_upload(struct cbaf *cbaf)
int result;
struct device *dev = &cbaf->usb_iface->dev;
struct wusb_cbaf_cc_data *ccd;
- char pr_cdid[WUSB_CKHDID_STRSIZE];
ccd = cbaf->buffer;
*ccd = cbaf_cc_data_defaults;
@@ -513,10 +508,8 @@ static int cbaf_cc_upload(struct cbaf *cbaf)
ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
dev_dbg(dev, "Trying to upload CC:\n");
- ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID);
- dev_dbg(dev, " CHID %s\n", pr_cdid);
- ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID);
- dev_dbg(dev, " CDID %s\n", pr_cdid);
+ dev_dbg(dev, " CHID %16ph\n", ccd->CHID.data);
+ dev_dbg(dev, " CDID %16ph\n", ccd->CDID.data);
dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups);
result = usb_control_msg(
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
index 85a1acf3a729..67b0a4c412b2 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -50,10 +50,9 @@ static ssize_t wusb_cdid_show(struct device *dev,
wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
if (wusb_dev == NULL)
return -ENODEV;
- result = ckhdid_printf(buf, PAGE_SIZE, &wusb_dev->cdid);
- strcat(buf, "\n");
+ result = sprintf(buf, "%16ph\n", wusb_dev->cdid.data);
wusb_dev_put(wusb_dev);
- return result + 1;
+ return result;
}
static DEVICE_ATTR_RO(wusb_cdid);
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index fcb06aef2675..a93837d57d53 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -532,7 +532,7 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
}
dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr);
- ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID);
+ sprintf(pr_cdid, "%16ph", dnc->CDID.data);
dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n",
pr_cdid,
wusb_dn_connect_prev_dev_addr(dnc),
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 01f2f21830c0..abf88cea37bb 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -662,9 +662,9 @@ static void __wa_setup_isoc_packet_descr(
/* populate isoc packet descriptor. */
packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
- packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
- (sizeof(packet_desc->PacketLength[0]) *
- seg->isoc_frame_count));
+ packet_desc->wLength = cpu_to_le16(struct_size(packet_desc,
+ PacketLength,
+ seg->isoc_frame_count));
for (frame_index = 0; frame_index < seg->isoc_frame_count;
++frame_index) {
int offset_index = frame_index + seg->isoc_frame_offset;
@@ -2438,7 +2438,7 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
struct wa_rpipe *rpipe;
unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
unsigned first_frame_index = 0, rpipe_ready = 0;
- int expected_size;
+ size_t expected_size;
/* We have a xfer result buffer; check it */
dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
@@ -2460,11 +2460,10 @@ static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
goto error_bad_seg;
seg = xfer->seg[wa->dti_isoc_xfer_seg];
rpipe = xfer->ep->hcpriv;
- expected_size = sizeof(*packet_status) +
- (sizeof(packet_status->PacketStatus[0]) *
- seg->isoc_frame_count);
+ expected_size = struct_size(packet_status, PacketStatus,
+ seg->isoc_frame_count);
if (urb->actual_length != expected_size) {
- dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
+ dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %zu needed)\n",
urb->actual_length, expected_size);
goto error_bad_seg;
}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index e5ba6140c1ba..d0b404d258e8 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -80,17 +80,13 @@ static ssize_t wusb_chid_show(struct device *dev,
{
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
const struct wusb_ckhdid *chid;
- ssize_t result = 0;
if (wusbhc->wuie_host_info != NULL)
chid = &wusbhc->wuie_host_info->CHID;
else
chid = &wusb_ckhdid_zero;
- result += ckhdid_printf(buf, PAGE_SIZE, chid);
- result += sprintf(buf + result, "\n");
-
- return result;
+ return sprintf(buf, "%16ph\n", chid->data);
}
/*
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
index 1d2a939cfcf8..ed993e363472 100644
--- a/drivers/uwb/drp-ie.c
+++ b/drivers/uwb/drp-ie.c
@@ -125,9 +125,8 @@ static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
{
struct uwb_ie_drp *drp_ie;
- drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
- UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
- GFP_KERNEL);
+ drp_ie = kzalloc(struct_size(drp_ie, allocs, UWB_NUM_ZONES),
+ GFP_KERNEL);
if (drp_ie)
drp_ie->hdr.element_id = UWB_IE_DRP;
return drp_ie;
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index 0212f0ee8aea..b96fedc77ee5 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -60,9 +60,9 @@ struct mdev_device *mdev_from_dev(struct device *dev)
}
EXPORT_SYMBOL(mdev_from_dev);
-uuid_le mdev_uuid(struct mdev_device *mdev)
+const guid_t *mdev_uuid(struct mdev_device *mdev)
{
- return mdev->uuid;
+ return &mdev->uuid;
}
EXPORT_SYMBOL(mdev_uuid);
@@ -88,8 +88,7 @@ static void mdev_release_parent(struct kref *kref)
put_device(dev);
}
-static
-inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
+static inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
{
if (parent)
kref_get(&parent->ref);
@@ -276,7 +275,8 @@ static void mdev_device_release(struct device *dev)
kfree(mdev);
}
-int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
+int mdev_device_create(struct kobject *kobj,
+ struct device *dev, const guid_t *uuid)
{
int ret;
struct mdev_device *mdev, *tmp;
@@ -291,7 +291,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
/* Check for duplicate */
list_for_each_entry(tmp, &mdev_list, next) {
- if (!uuid_le_cmp(tmp->uuid, uuid)) {
+ if (guid_equal(&tmp->uuid, uuid)) {
mutex_unlock(&mdev_list_lock);
ret = -EEXIST;
goto mdev_fail;
@@ -305,7 +305,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
goto mdev_fail;
}
- memcpy(&mdev->uuid, &uuid, sizeof(uuid_le));
+ guid_copy(&mdev->uuid, uuid);
list_add(&mdev->next, &mdev_list);
mutex_unlock(&mdev_list_lock);
@@ -315,7 +315,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
mdev->dev.parent = dev;
mdev->dev.bus = &mdev_bus_type;
mdev->dev.release = mdev_device_release;
- dev_set_name(&mdev->dev, "%pUl", uuid.b);
+ dev_set_name(&mdev->dev, "%pUl", uuid);
ret = device_register(&mdev->dev);
if (ret) {
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index b5819b7d7ef7..379758c52b1b 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -28,7 +28,7 @@ struct mdev_parent {
struct mdev_device {
struct device dev;
struct mdev_parent *parent;
- uuid_le uuid;
+ guid_t uuid;
void *driver_data;
struct kref ref;
struct list_head next;
@@ -58,7 +58,8 @@ void parent_remove_sysfs_files(struct mdev_parent *parent);
int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type);
void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
-int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid);
+int mdev_device_create(struct kobject *kobj,
+ struct device *dev, const guid_t *uuid);
int mdev_device_remove(struct device *dev, bool force_remove);
#endif /* MDEV_PRIVATE_H */
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index ce5dd219f2c8..5193a0e0ce5a 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -55,7 +55,7 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev,
const char *buf, size_t count)
{
char *str;
- uuid_le uuid;
+ guid_t uuid;
int ret;
if ((count < UUID_STRING_LEN) || (count > UUID_STRING_LEN + 1))
@@ -65,12 +65,12 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev,
if (!str)
return -ENOMEM;
- ret = uuid_le_to_bin(str, &uuid);
+ ret = guid_parse(str, &uuid);
kfree(str);
if (ret)
return ret;
- ret = mdev_device_create(kobj, dev, uuid);
+ ret = mdev_device_create(kobj, dev, &uuid);
if (ret)
return ret;
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
index 228ccdb8d1c8..b2aa986ab9ed 100644
--- a/drivers/vfio/pci/trace.h
+++ b/drivers/vfio/pci/trace.h
@@ -1,13 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* VFIO PCI mmap/mmap_fault tracepoints
*
* Copyright (C) 2018 IBM Corp. All rights reserved.
* Author: Alexey Kardashevskiy <aik@ozlabs.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#undef TRACE_SYSTEM
@@ -94,7 +90,7 @@ TRACE_EVENT(vfio_pci_npu2_mmap,
#endif /* _TRACE_VFIO_PCI_H */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index ff60bd1ea587..a25659b5a5d1 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -209,6 +209,57 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
return false;
}
+static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 pmcsr;
+
+ if (!pdev->pm_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
+}
+
+/*
+ * pci_set_power_state() wrapper handling devices which perform a soft reset on
+ * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
+ * restore when returned to D0. Saved separately from pci_saved_state for use
+ * by PM capability emulation and separately from pci_dev internal saved state
+ * to avoid it being overwritten and consumed around other resets.
+ */
+int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ bool needs_restore = false, needs_save = false;
+ int ret;
+
+ if (vdev->needs_pm_restore) {
+ if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
+ pci_save_state(pdev);
+ needs_save = true;
+ }
+
+ if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
+ needs_restore = true;
+ }
+
+ ret = pci_set_power_state(pdev, state);
+
+ if (!ret) {
+ /* D3 might be unsupported via quirk, skip unless in D3 */
+ if (needs_save && pdev->current_state >= PCI_D3hot) {
+ vdev->pm_save = pci_store_saved_state(pdev);
+ } else if (needs_restore) {
+ pci_load_and_free_saved_state(pdev, &vdev->pm_save);
+ pci_restore_state(pdev);
+ }
+ }
+
+ return ret;
+}
+
static int vfio_pci_enable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
@@ -216,7 +267,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
u16 cmd;
u8 msix_pos;
- pci_set_power_state(pdev, PCI_D0);
+ vfio_pci_set_power_state(vdev, PCI_D0);
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
@@ -407,7 +458,7 @@ out:
vfio_pci_try_bus_reset(vdev);
if (!disable_idle_d3)
- pci_set_power_state(pdev, PCI_D3hot);
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
}
static void vfio_pci_release(void *device_data)
@@ -708,6 +759,7 @@ static long vfio_pci_ioctl(void *device_data,
{
void __iomem *io;
size_t size;
+ u16 orig_cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -723,15 +775,23 @@ static long vfio_pci_ioctl(void *device_data,
break;
}
- /* Is it really there? */
+ /*
+ * Is it really there? Enable memory decode for
+ * implicit access in pci_map_rom().
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig_cmd | PCI_COMMAND_MEMORY);
+
io = pci_map_rom(pdev, &size);
- if (!io || !size) {
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
info.size = 0;
- break;
}
- pci_unmap_rom(pdev, io);
- info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -1286,6 +1346,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vfio_pci_set_vga_decode(vdev, false));
}
+ vfio_pci_probe_power_state(vdev);
+
if (!disable_idle_d3) {
/*
* pci-core sets the device power state to an unknown value at
@@ -1296,8 +1358,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* be able to get to D3. Therefore first do a D0 transition
* before going to D3.
*/
- pci_set_power_state(pdev, PCI_D0);
- pci_set_power_state(pdev, PCI_D3hot);
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
}
return ret;
@@ -1316,6 +1378,11 @@ static void vfio_pci_remove(struct pci_dev *pdev)
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
kfree(vdev->region);
mutex_destroy(&vdev->ioeventfds_lock);
+
+ if (!disable_idle_d3)
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ kfree(vdev->pm_save);
kfree(vdev);
if (vfio_pci_is_vga(pdev)) {
@@ -1324,9 +1391,6 @@ static void vfio_pci_remove(struct pci_dev *pdev)
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
}
-
- if (!disable_idle_d3)
- pci_set_power_state(pdev, PCI_D0);
}
static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
@@ -1551,7 +1615,7 @@ put_devs:
tmp->needs_reset = false;
if (tmp != vdev && !disable_idle_d3)
- pci_set_power_state(tmp->pdev, PCI_D3hot);
+ vfio_pci_set_power_state(tmp, PCI_D3hot);
}
vfio_device_put(devs.devices[i]);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 423ea1f98441..e82b51114687 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -691,7 +691,7 @@ static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
break;
}
- pci_set_power_state(vdev->pdev, state);
+ vfio_pci_set_power_state(vdev, state);
}
return count;
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 054a2cf9dd8e..32f695ffe128 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -1,14 +1,10 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
*
* Copyright (C) 2018 IBM Corp. All rights reserved.
* Author: Alexey Kardashevskiy <aik@ozlabs.ru>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Register an on-GPU RAM region for cacheable access.
*
* Derived from original vfio_pci_igd.c:
@@ -178,11 +174,11 @@ static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
struct vfio_pci_region *region, struct vfio_info_cap *caps)
{
struct vfio_pci_nvgpu_data *data = region->data;
- struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 };
-
- cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
- cap.header.version = 1;
- cap.tgt = data->gpu_tgt;
+ struct vfio_region_info_cap_nvlink2_ssatgt cap = {
+ .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
+ .header.version = 1,
+ .tgt = data->gpu_tgt
+ };
return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
}
@@ -365,18 +361,18 @@ static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
struct vfio_pci_region *region, struct vfio_info_cap *caps)
{
struct vfio_pci_npu2_data *data = region->data;
- struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 };
- struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 };
+ struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
+ .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
+ .header.version = 1,
+ .tgt = data->gpu_tgt
+ };
+ struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
+ .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
+ .header.version = 1,
+ .link_speed = data->link_speed
+ };
int ret;
- captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
- captgt.header.version = 1;
- captgt.tgt = data->gpu_tgt;
-
- capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD;
- capspd.header.version = 1;
- capspd.link_speed = data->link_speed;
-
ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
if (ret)
return ret;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 8c0009f00818..1812cf22fc4f 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -114,7 +114,9 @@ struct vfio_pci_device {
bool has_vga;
bool needs_reset;
bool nointx;
+ bool needs_pm_restore;
struct pci_saved_state *pci_saved_state;
+ struct pci_saved_state *pm_save;
struct vfio_pci_reflck *reflck;
int refcnt;
int ioeventfds_nr;
@@ -161,6 +163,10 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
unsigned int type, unsigned int subtype,
const struct vfio_pci_regops *ops,
size_t size, u32 flags, void *data);
+
+extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
+ pci_power_t state);
+
#ifdef CONFIG_VFIO_PCI_IGD
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
#else
diff --git a/drivers/vfio/platform/reset/Makefile b/drivers/vfio/platform/reset/Makefile
index 57abd4f0ac5b..7294c5ea122e 100644
--- a/drivers/vfio/platform/reset/Makefile
+++ b/drivers/vfio/platform/reset/Makefile
@@ -2,8 +2,6 @@
vfio-platform-calxedaxgmac-y := vfio_platform_calxedaxgmac.o
vfio-platform-amdxgbe-y := vfio_platform_amdxgbe.o
-ccflags-y += -Idrivers/vfio/platform
-
obj-$(CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET) += vfio-platform-calxedaxgmac.o
obj-$(CONFIG_VFIO_PLATFORM_AMDXGBE_RESET) += vfio-platform-amdxgbe.o
obj-$(CONFIG_VFIO_PLATFORM_BCMFLEXRM_RESET) += vfio_platform_bcmflexrm.o
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index bcd419cfd79c..3ddb2704221d 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -25,7 +25,7 @@
#include <uapi/linux/mdio.h>
#include <linux/delay.h>
-#include "vfio_platform_private.h"
+#include "../vfio_platform_private.h"
#define DMA_MR 0x3000
#define MAC_VR 0x0110
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
index d45c3be71198..16165a62b86d 100644
--- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include "vfio_platform_private.h"
+#include "../vfio_platform_private.h"
/* FlexRM configuration */
#define RING_REGS_SIZE 0x10000
diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
index 49e5df6e8f29..e0356de5df54 100644
--- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
+++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/io.h>
-#include "vfio_platform_private.h"
+#include "../vfio_platform_private.h"
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "Eric Auger <eric.auger@linaro.org>"
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 64833879f75d..a3030cdf3c18 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -2219,12 +2219,12 @@ static int __init vfio_init(void)
vfio.class->devnode = vfio_devnode;
- ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
+ ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
if (ret)
goto err_alloc_chrdev;
cdev_init(&vfio.group_cdev, &vfio_group_fops);
- ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
+ ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
if (ret)
goto err_cdev_add;
@@ -2236,7 +2236,7 @@ static int __init vfio_init(void)
return 0;
err_cdev_add:
- unregister_chrdev_region(vfio.group_devt, MINORMASK);
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
@@ -2254,7 +2254,7 @@ static void __exit vfio_cleanup(void)
#endif
idr_destroy(&vfio.group_idr);
cdev_del(&vfio.group_cdev);
- unregister_chrdev_region(vfio.group_devt, MINORMASK);
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
class_destroy(vfio.class);
vfio.class = NULL;
misc_deregister(&vfio_dev);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c424913324e3..8dbb270998f4 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1235,7 +1235,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
}
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- table_group->ops->unset_window(table_group, i);
+ if (container->tables[i])
+ table_group->ops->unset_window(table_group, i);
table_group->ops->release_ownership(table_group);
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 7651cfb14836..73652e21efec 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
return -EINVAL;
if (!unmap->size || unmap->size & mask)
return -EINVAL;
- if (unmap->iova + unmap->size < unmap->iova ||
+ if (unmap->iova + unmap->size - 1 < unmap->iova ||
unmap->size > SIZE_MAX)
return -EINVAL;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 36f3d0f49e60..df51a35cf537 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net)
if (nvq->done_idx > VHOST_NET_BATCH)
vhost_net_signal_used(nvq);
if (unlikely(vq_log))
- vhost_log_write(vq, vq_log, log, vhost_len);
+ vhost_log_write(vq, vq_log, log, vhost_len,
+ vq->iov, in);
total_len += vhost_len;
if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
vhost_poll_queue(&vq->poll);
@@ -1336,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].rx_ring = NULL;
vhost_net_buf_init(&n->vqs[i].rxq);
}
- vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+ UIO_MAXIOV + VHOST_NET_BATCH);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8e10ab436d1f..23593cb23dd0 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
- struct virtio_scsi_ctrl_tmf_resp __user *resp;
struct virtio_scsi_ctrl_tmf_resp rsp;
+ struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp));
rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
- resp = vq->iov[vc->out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
+
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
@@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
{
- struct virtio_scsi_ctrl_an_resp __user *resp;
struct virtio_scsi_ctrl_an_resp rsp;
+ struct iov_iter iov_iter;
int ret;
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
rsp.response = VIRTIO_SCSI_S_OK;
- resp = vq->iov[vc->out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
+
+ iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ if (likely(ret == sizeof(rsp)))
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
@@ -1623,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
vhost_scsi_init_inflight(vs, NULL);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9f7942cbcbb2..a2e5dc7716e2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
vq->indirect = kmalloc_array(UIO_MAXIOV,
sizeof(*vq->indirect),
GFP_KERNEL);
- vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
+ vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
GFP_KERNEL);
- vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
+ vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
}
void vhost_dev_init(struct vhost_dev *dev,
- struct vhost_virtqueue **vqs, int nvqs)
+ struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
{
struct vhost_virtqueue *vq;
int i;
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->iotlb = NULL;
dev->mm = NULL;
dev->worker = NULL;
+ dev->iov_limit = iov_limit;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
@@ -1034,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
int type, ret;
ret = copy_from_iter(&type, sizeof(type), from);
- if (ret != sizeof(type))
+ if (ret != sizeof(type)) {
+ ret = -EINVAL;
goto done;
+ }
switch (type) {
case VHOST_IOTLB_MSG:
@@ -1054,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
iov_iter_advance(from, offset);
ret = copy_from_iter(&msg, sizeof(msg), from);
- if (ret != sizeof(msg))
+ if (ret != sizeof(msg)) {
+ ret = -EINVAL;
goto done;
+ }
if (vhost_process_iotlb_msg(dev, &msg)) {
ret = -EFAULT;
goto done;
@@ -1733,13 +1738,87 @@ static int log_write(void __user *log_base,
return r;
}
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+ struct vhost_umem *umem = vq->umem;
+ struct vhost_umem_node *u;
+ u64 start, end, l, min;
+ int r;
+ bool hit = false;
+
+ while (len) {
+ min = len;
+ /* More than one GPAs can be mapped into a single HVA. So
+ * iterate all possible umems here to be safe.
+ */
+ list_for_each_entry(u, &umem->umem_list, link) {
+ if (u->userspace_addr > hva - 1 + len ||
+ u->userspace_addr - 1 + u->size < hva)
+ continue;
+ start = max(u->userspace_addr, hva);
+ end = min(u->userspace_addr - 1 + u->size,
+ hva - 1 + len);
+ l = end - start + 1;
+ r = log_write(vq->log_base,
+ u->start + start - u->userspace_addr,
+ l);
+ if (r < 0)
+ return r;
+ hit = true;
+ min = min(l, min);
+ }
+
+ if (!hit)
+ return -EFAULT;
+
+ len -= min;
+ hva += min;
+ }
+
+ return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+ struct iovec iov[64];
+ int i, ret;
+
+ if (!vq->iotlb)
+ return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+ ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+ len, iov, 64, VHOST_ACCESS_WO);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ret; i++) {
+ ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len)
+ unsigned int log_num, u64 len, struct iovec *iov, int count)
{
int i, r;
/* Make sure data written is seen before log. */
smp_wmb();
+
+ if (vq->iotlb) {
+ for (i = 0; i < count; i++) {
+ r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+ iov[i].iov_len);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+ }
+
for (i = 0; i < log_num; ++i) {
u64 l = min(log[i].len, len);
r = log_write(vq->log_base, log[i].addr, l);
@@ -1769,9 +1848,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
smp_wmb();
/* Log used flag write. */
used = &vq->used->flags;
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof vq->used->flags);
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof vq->used->flags);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
@@ -1789,9 +1867,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
smp_wmb();
/* Log avail event write */
used = vhost_avail_event(vq);
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof *vhost_avail_event(vq));
+ log_used(vq, (used - (void __user *)vq->used),
+ sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
@@ -2191,10 +2268,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
/* Make sure data is seen before log. */
smp_wmb();
/* Log used ring entry write. */
- log_write(vq->log_base,
- vq->log_addr +
- ((void __user *)used - (void __user *)vq->used),
- count * sizeof *used);
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
+ count * sizeof *used);
}
old = vq->last_used_idx;
new = (vq->last_used_idx += count);
@@ -2236,9 +2311,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
/* Make sure used idx is seen before log. */
smp_wmb();
/* Log used index update. */
- log_write(vq->log_base,
- vq->log_addr + offsetof(struct vring_used, idx),
- sizeof vq->used->idx);
+ log_used(vq, offsetof(struct vring_used, idx),
+ sizeof vq->used->idx);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 466ef7542291..9490e7ddb340 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -170,9 +170,11 @@ struct vhost_dev {
struct list_head read_list;
struct list_head pending_list;
wait_queue_head_t wait;
+ int iov_limit;
};
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+ int nvqs, int iov_limit);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
@@ -205,7 +207,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
- unsigned int log_num, u64 len);
+ unsigned int log_num, u64 len,
+ struct iovec *iov, int count);
int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bc42d38ae031..bb5fc0e9fbc2 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
@@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
hash_del_rcu(&vsock->hash);
vsock->guest_cid = guest_cid;
- hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
mutex_unlock(&vhost_vsock_mutex);
return 0;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 6d8dc2c77520..51e0c4be08df 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -174,7 +174,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
return -ENODEV;
}
for_each_child_of_node(nproot, np) {
- if (!of_node_cmp(np->name, name)) {
+ if (of_node_name_eq(np, name)) {
of_property_read_u32(np, "marvell,88pm860x-iset",
&iset);
data->iset = PM8606_WLED_CURRENT(iset);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index f9ef0673a083..feb90764a811 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -30,6 +30,7 @@ struct pwm_bl_data {
struct device *dev;
unsigned int lth_brightness;
unsigned int *levels;
+ bool enabled;
struct regulator *power_supply;
struct gpio_desc *enable_gpio;
unsigned int scale;
@@ -50,7 +51,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
int err;
pwm_get_state(pb->pwm, &state);
- if (state.enabled)
+ if (pb->enabled)
return;
err = regulator_enable(pb->power_supply);
@@ -65,6 +66,8 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
if (pb->enable_gpio)
gpiod_set_value_cansleep(pb->enable_gpio, 1);
+
+ pb->enabled = true;
}
static void pwm_backlight_power_off(struct pwm_bl_data *pb)
@@ -72,7 +75,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
struct pwm_state state;
pwm_get_state(pb->pwm, &state);
- if (!state.enabled)
+ if (!pb->enabled)
return;
if (pb->enable_gpio)
@@ -86,6 +89,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
pwm_apply_state(pb->pwm, &state);
regulator_disable(pb->power_supply);
+ pb->enabled = false;
}
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
@@ -269,6 +273,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
memset(data, 0, sizeof(*data));
/*
+ * These values are optional and set as 0 by default, the out values
+ * are modified only if a valid u32 value can be decoded.
+ */
+ of_property_read_u32(node, "post-pwm-on-delay-ms",
+ &data->post_pwm_on_delay);
+ of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
+
+ data->enable_gpio = -EINVAL;
+
+ /*
* Determine the number of brightness levels, if this property is not
* set a default table of brightness levels will be used.
*/
@@ -380,15 +394,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
data->max_brightness--;
}
- /*
- * These values are optional and set as 0 by default, the out values
- * are modified only if a valid u32 value can be decoded.
- */
- of_property_read_u32(node, "post-pwm-on-delay-ms",
- &data->post_pwm_on_delay);
- of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
-
- data->enable_gpio = -EINVAL;
return 0;
}
@@ -483,6 +488,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->check_fb = data->check_fb;
pb->exit = data->exit;
pb->dev = &pdev->dev;
+ pb->enabled = false;
pb->post_pwm_on_delay = data->post_pwm_on_delay;
pb->pwm_off_delay = data->pwm_off_delay;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 09731b2f6815..c6b3bdbbdbc9 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
static void vgacon_restore_screen(struct vc_data *c)
{
+ c->vc_origin = c->vc_visible_origin;
vgacon_scrollback_cur->save = 0;
if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
int start, end, count, soff;
if (!lines) {
- c->vc_visible_origin = c->vc_origin;
- vga_set_mem_top(c);
+ vgacon_restore_screen(c);
return;
}
@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
if (!vgacon_scrollback_cur->save) {
vgacon_cursor(c, CM_ERASE);
vgacon_save_screen(c);
+ c->vc_origin = (unsigned long)c->vc_screenbuf;
vgacon_scrollback_cur->save = 1;
}
@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
int copysize;
int diff = c->vc_rows - count;
- void *d = (void *) c->vc_origin;
+ void *d = (void *) c->vc_visible_origin;
void *s = (void *) c->vc_screenbuf;
count *= c->vc_size_row;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index ae7712c9687a..58a9590c9db6 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -536,7 +536,7 @@ config FB_IMSTT
bool "IMS Twin Turbo display support"
depends on (FB = y) && PCI
select FB_CFB_IMAGEBLIT
- select FB_MACMODES if PPC
+ select FB_MACMODES if PPC_PMAC
help
The IMS Twin Turbo is a PCI-based frame buffer card bundled with
many Macintosh and compatible computers.
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 9cb0ef7ac29e..7af8db28bb80 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -411,35 +411,23 @@ static int __init init_control(struct fb_info_control *p)
full = p->total_vram == 0x400000;
/* Try to pick a video mode out of NVRAM if we have one. */
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM) {
+ cmode = default_cmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
cmode = nvram_read_byte(NV_CMODE);
- if(cmode < CMODE_8 || cmode > CMODE_32)
- cmode = CMODE_8;
- } else
-#endif
- cmode=default_cmode;
-#ifdef CONFIG_NVRAM
- if (default_vmode == VMODE_NVRAM) {
+ if (cmode < CMODE_8 || cmode > CMODE_32)
+ cmode = CMODE_8;
+
+ vmode = default_vmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
vmode = nvram_read_byte(NV_VMODE);
- if (vmode < 1 || vmode > VMODE_MAX ||
- control_mac_modes[vmode - 1].m[full] < cmode) {
- sense = read_control_sense(p);
- printk("Monitor sense value = 0x%x, ", sense);
- vmode = mac_map_monitor_sense(sense);
- if (control_mac_modes[vmode - 1].m[full] < cmode)
- vmode = VMODE_640_480_60;
- }
- } else
-#endif
- {
- vmode=default_vmode;
- if (control_mac_modes[vmode - 1].m[full] < cmode) {
- if (cmode > CMODE_8)
- cmode--;
- else
- vmode = VMODE_640_480_60;
- }
+ if (vmode < 1 || vmode > VMODE_MAX ||
+ control_mac_modes[vmode - 1].m[full] < cmode) {
+ sense = read_control_sense(p);
+ printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
+ vmode = mac_map_monitor_sense(sense);
+ if (control_mac_modes[vmode - 1].m[full] < 0)
+ vmode = VMODE_640_480_60;
+ cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
}
/* Initialize info structure */
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8976190b6c1f..bfa1360ec750 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt)
continue;
}
#endif
+
+ if (!strncmp(options, "logo-pos:", 9)) {
+ options += 9;
+ if (!strcmp(options, "center"))
+ fb_center_logo = true;
+ continue;
+ }
}
return 1;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 558ed2ed3124..cb43a2258c51 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb);
int num_registered_fb __read_mostly;
EXPORT_SYMBOL(num_registered_fb);
+bool fb_center_logo __read_mostly;
+EXPORT_SYMBOL(fb_center_logo);
+
static struct fb_info *get_fb_info(unsigned int idx)
{
struct fb_info *fb_info;
@@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
fb_set_logo(info, logo, logo_new, fb_logo.depth);
}
-#ifdef CONFIG_FB_LOGO_CENTER
- {
+ if (fb_center_logo) {
int xres = info->var.xres;
int yres = info->var.yres;
@@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
--n;
image.dx = (xres - n * (logo->width + 8) - 8) / 2;
image.dy = y ?: (yres - logo->height) / 2;
+ } else {
+ image.dx = 0;
+ image.dy = y;
}
-#else
- image.dx = 0;
- image.dy = y;
-#endif
+
image.width = logo->width;
image.height = logo->height;
@@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
}
height = fb_logo.logo->height;
-#ifdef CONFIG_FB_LOGO_CENTER
- height += (yres - fb_logo.logo->height) / 2;
-#endif
+ if (fb_center_logo)
+ height += (yres - fb_logo.logo->height) / 2;
return fb_prepare_extra_logos(info, height, yres);
}
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index a74096c53cb5..43f2a4816860 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1446,9 +1446,9 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
- par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
- &par->p_palette_base,
- GFP_KERNEL | GFP_DMA);
+ par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
+ &par->p_palette_base,
+ GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
dev_err(&device->dev,
"GLCD: kmalloc for palette buffer failed\n");
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 901ca4ed10e9..5d9670daf60e 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -30,9 +30,8 @@
#include <asm/io.h>
#include <linux/uaccess.h>
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
#include <linux/nvram.h>
-#include <asm/prom.h>
#include "macmodes.h"
#endif
@@ -327,14 +326,13 @@ enum {
TVP = 1
};
-#define USE_NV_MODES 1
#define INIT_BPP 8
#define INIT_XRES 640
#define INIT_YRES 480
static int inverse = 0;
static char fontname[40] __initdata = { 0 };
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
static signed char init_vmode = -1, init_cmode = -1;
#endif
@@ -1390,8 +1388,8 @@ static void init_imstt(struct fb_info *info)
}
}
-#if USE_NV_MODES && defined(CONFIG_PPC32)
- {
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+ if (IS_REACHABLE(CONFIG_NVRAM) && machine_is(powermac)) {
int vmode = init_vmode, cmode = init_cmode;
if (vmode == -1) {
@@ -1409,12 +1407,13 @@ static void init_imstt(struct fb_info *info)
info->var.yres = info->var.yres_virtual = INIT_YRES;
info->var.bits_per_pixel = INIT_BPP;
}
- }
-#else
- info->var.xres = info->var.xres_virtual = INIT_XRES;
- info->var.yres = info->var.yres_virtual = INIT_YRES;
- info->var.bits_per_pixel = INIT_BPP;
+ } else
#endif
+ {
+ info->var.xres = info->var.xres_virtual = INIT_XRES;
+ info->var.yres = info->var.yres_virtual = INIT_YRES;
+ info->var.bits_per_pixel = INIT_BPP;
+ }
if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
@@ -1565,7 +1564,7 @@ imsttfb_setup(char *options)
inverse = 1;
fb_invert_cmaps();
}
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
else if (!strncmp(this_opt, "vmode:", 6)) {
int vmode = simple_strtoul(this_opt+6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 838869c6490c..d11b5e6210ed 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -111,12 +111,12 @@
#include "matroxfb_g450.h"
#include <linux/matroxfb.h>
#include <linux/interrupt.h>
+#include <linux/nvram.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
-unsigned char nvram_read_byte(int);
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
#endif
@@ -1872,10 +1872,11 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
#ifndef MODULE
if (machine_is(powermac)) {
struct fb_var_screeninfo var;
+
if (default_vmode <= 0 || default_vmode > VMODE_MAX)
default_vmode = VMODE_640_480_60;
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM)
+#if defined(CONFIG_PPC32)
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
#endif
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 31f769d67195..057d3cdef92e 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
}
static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
- const char *name, unsigned long address)
+ unsigned long address)
{
struct offb_par *par = (struct offb_par *) info->par;
- if (dp && !strncmp(name, "ATY,Rage128", 11)) {
+ if (of_node_name_prefix(dp, "ATY,Rage128")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_r128;
- } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12)
- || !strncmp(name, "ATY,RageM3p12A", 14))) {
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
+ of_node_name_prefix(dp, "ATY,RageM3p12A")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_M3A;
- } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) {
+ } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_M3B;
- } else if (dp && !strncmp(name, "ATY,Rage6", 9)) {
+ } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
if (par->cmap_adr)
par->cmap_type = cmap_radeon;
- } else if (!strncmp(name, "ATY,", 4)) {
+ } else if (of_node_name_prefix(dp, "ATY,")) {
unsigned long base = address & 0xff000000UL;
par->cmap_adr =
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
if (par->cmap_adr)
par->cmap_type = cmap_gxt2000;
- } else if (dp && !strncmp(name, "vga,Display-", 12)) {
+ } else if (of_node_name_prefix(dp, "vga,Display-")) {
/* Look for AVIVO initialized by SLOF */
struct device_node *pciparent = of_get_parent(dp);
const u32 *vid, *did;
@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name,
par->cmap_type = cmap_unknown;
if (depth == 8)
- offb_init_palette_hacks(info, dp, name, address);
+ offb_init_palette_hacks(info, dp, address);
else
fix->visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 53f93616c671..8e23160ec59f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
int r = 0;
+ memset(&p, 0, sizeof(p));
+
switch (cmd) {
case OMAPFB_SYNC_GFX:
DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index bf6b7fb83cf4..76f299375a00 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -345,23 +345,18 @@ static int platinum_init_fb(struct fb_info *info)
sense = read_platinum_sense(pinfo);
printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense);
- if (default_vmode == VMODE_NVRAM) {
-#ifdef CONFIG_NVRAM
+
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
default_vmode = nvram_read_byte(NV_VMODE);
- if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
- !platinum_reg_init[default_vmode-1])
-#endif
- default_vmode = VMODE_CHOOSE;
- }
- if (default_vmode == VMODE_CHOOSE) {
+ if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
+ !platinum_reg_init[default_vmode - 1]) {
default_vmode = mac_map_monitor_sense(sense);
+ if (!platinum_reg_init[default_vmode - 1])
+ default_vmode = VMODE_640_480_60;
}
- if (default_vmode <= 0 || default_vmode > VMODE_MAX)
- default_vmode = VMODE_640_480_60;
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM)
+
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
-#endif
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
/*
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index d51c3a8009cb..e04fde9c1fcd 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -63,15 +63,8 @@
#include "macmodes.h"
#include "valkyriefb.h"
-#ifdef CONFIG_MAC
-/* We don't yet have functions to read the PRAM... perhaps we can
- adapt them from the PPC code? */
-static int default_vmode = VMODE_CHOOSE;
-static int default_cmode = CMODE_8;
-#else
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
-#endif
struct fb_par_valkyrie {
int vmode, cmode;
@@ -283,24 +276,21 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense);
/* Try to pick a video mode out of NVRAM if we have one. */
-#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM)
- if (default_vmode == VMODE_NVRAM) {
+#ifdef CONFIG_PPC_PMAC
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
default_vmode = nvram_read_byte(NV_VMODE);
- if (default_vmode <= 0
- || default_vmode > VMODE_MAX
- || !valkyrie_reg_init[default_vmode - 1])
- default_vmode = VMODE_CHOOSE;
- }
#endif
- if (default_vmode == VMODE_CHOOSE)
+ if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
+ !valkyrie_reg_init[default_vmode - 1]) {
default_vmode = mac_map_monitor_sense(p->sense);
- if (!valkyrie_reg_init[default_vmode - 1])
- default_vmode = VMODE_640_480_67;
-#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM)
- if (default_cmode == CMODE_NVRAM)
+ if (!valkyrie_reg_init[default_vmode - 1])
+ default_vmode = VMODE_640_480_67;
+ }
+
+#ifdef CONFIG_PPC_PMAC
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
#endif
-
/*
* Reduce the pixel size if we don't have enough VRAM or bandwidth.
*/
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 1e972c4e88b1..d1f6196c8b9a 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,15 +10,6 @@ menuconfig LOGO
if LOGO
-config FB_LOGO_CENTER
- bool "Center the logo"
- depends on FB=y
- help
- When this option is selected, the bootup logo is centered both
- horizontally and vertically. If more than one logo is displayed
- due to multiple CPUs, the collected line of logos is centered
- as a whole.
-
config FB_LOGO_EXTRA
bool
depends on FB=y
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 1475ed5ffcde..df7d09409efe 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1484,8 +1484,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
#ifdef CONFIG_COMPAT
case VBG_IOCTL_HGCM_CALL_32(0):
f32bit = true;
- /* Fall through */
#endif
+ /* Fall through */
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 728ecd1eea30..fb12fe205f86 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -61,6 +61,10 @@ enum virtio_balloon_vq {
VIRTIO_BALLOON_VQ_MAX
};
+enum virtio_balloon_config_read {
+ VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
+};
+
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
@@ -77,14 +81,20 @@ struct virtio_balloon {
/* Prevent updating balloon when it is being canceled. */
spinlock_t stop_update_lock;
bool stop_update;
+ /* Bitmap to indicate if reading the related config fields are needed */
+ unsigned long config_read_bitmap;
/* The list of allocated free pages, waiting to be given back to mm */
struct list_head free_page_list;
spinlock_t free_page_list_lock;
/* The number of free page blocks on the above list */
unsigned long num_free_page_blocks;
- /* The cmd id received from host */
- u32 cmd_id_received;
+ /*
+ * The cmd id received from host.
+ * Read it via virtio_balloon_cmd_id_received to get the latest value
+ * sent from host.
+ */
+ u32 cmd_id_received_cache;
/* The cmd id that is actively in use */
__virtio32 cmd_id_active;
/* Buffer to store the stop sign */
@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
return num_returned;
}
+static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
+{
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ return;
+
+ /* No need to queue the work if the bit was already set. */
+ if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
+ &vb->config_read_bitmap))
+ return;
+
+ queue_work(vb->balloon_wq, &vb->report_free_page_work);
+}
+
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
unsigned long flags;
- s64 diff = towards_target(vb);
-
- if (diff) {
- spin_lock_irqsave(&vb->stop_update_lock, flags);
- if (!vb->stop_update)
- queue_work(system_freezable_wq,
- &vb->update_balloon_size_work);
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
- }
- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
- virtio_cread(vdev, struct virtio_balloon_config,
- free_page_report_cmd_id, &vb->cmd_id_received);
- if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
- /* Pass ULONG_MAX to give back all the free pages */
- return_free_pages_to_mm(vb, ULONG_MAX);
- } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
- vb->cmd_id_received !=
- virtio32_to_cpu(vdev, vb->cmd_id_active)) {
- spin_lock_irqsave(&vb->stop_update_lock, flags);
- if (!vb->stop_update) {
- queue_work(vb->balloon_wq,
- &vb->report_free_page_work);
- }
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
- }
+ spin_lock_irqsave(&vb->stop_update_lock, flags);
+ if (!vb->stop_update) {
+ queue_work(system_freezable_wq,
+ &vb->update_balloon_size_work);
+ virtio_balloon_queue_free_page_work(vb);
}
+ spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
static void update_balloon_size(struct virtio_balloon *vb)
@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
return 0;
}
+static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
+{
+ if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
+ &vb->config_read_bitmap))
+ virtio_cread(vb->vdev, struct virtio_balloon_config,
+ free_page_report_cmd_id,
+ &vb->cmd_id_received_cache);
+
+ return vb->cmd_id_received_cache;
+}
+
static int send_cmd_id_start(struct virtio_balloon *vb)
{
struct scatterlist sg;
@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
while (virtqueue_get_buf(vq, &unused))
;
- vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
+ vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
+ virtio_balloon_cmd_id_received(vb));
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
if (!err)
@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
* stop the reporting.
*/
cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
- if (cmd_id_active != vb->cmd_id_received)
+ if (unlikely(cmd_id_active !=
+ virtio_balloon_cmd_id_received(vb)))
break;
/*
@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
return 0;
}
-static void report_free_page_func(struct work_struct *work)
+static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
{
int err;
- struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
- report_free_page_work);
struct device *dev = &vb->vdev->dev;
/* Start by sending the received cmd id to host with an outbuf. */
@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
dev_err(dev, "Failed to send a stop id, err = %d\n", err);
}
+static void report_free_page_func(struct work_struct *work)
+{
+ struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
+ report_free_page_work);
+ u32 cmd_id_received;
+
+ cmd_id_received = virtio_balloon_cmd_id_received(vb);
+ if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
+ /* Pass ULONG_MAX to give back all the free pages */
+ return_free_pages_to_mm(vb, ULONG_MAX);
+ } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
+ cmd_id_received !=
+ virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
+ virtio_balloon_report_free_page(vb);
+ }
+}
+
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_del_vqs;
}
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
- vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
+ vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
VIRTIO_BALLOON_CMD_ID_STOP);
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 4cd9ea5c75be..d9dd0f789279 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
- int i, err;
+ int i, err, queue_idx = 0;
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vm_dev);
@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return err;
for (i = 0; i < nvqs; ++i) {
- vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i],
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false);
if (IS_ERR(vqs[i])) {
vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 465a6f5142cc..d0584c040c60 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u16 msix_vec;
- int i, err, nvectors, allocated_vectors;
+ int i, err, nvectors, allocated_vectors, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
@@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
msix_vec = allocated_vectors++;
else
msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
msix_vec);
if (IS_ERR(vqs[i])) {
@@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
const char * const names[], const bool *ctx)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- int i, err;
+ int i, err, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
@@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
vqs[i] = NULL;
continue;
}
- vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+ vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
ctx ? ctx[i] : false,
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cd7e755484e3..a0b07c331255 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -152,7 +152,12 @@ struct vring_virtqueue {
/* Available for packed ring */
struct {
/* Actual memory layout for this queue. */
- struct vring_packed vring;
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
/* Driver ring wrap counter. */
bool avail_wrap_counter;
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
!context;
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+ if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
+ vq->weak_barriers = false;
+
vq->packed.ring_dma_addr = ring_dma_addr;
vq->packed.driver_event_dma_addr = driver_event_dma_addr;
vq->packed.device_event_dma_addr = device_event_dma_addr;
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
!context;
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+ if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
+ vq->weak_barriers = false;
+
vq->split.queue_dma_addr = 0;
vq->split.queue_size_in_bytes = 0;
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_RING_PACKED:
break;
+ case VIRTIO_F_ORDER_PLATFORM:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index ed05514cc2dc..1834524ae373 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/types.h>
+#include <linux/mfd/bcm2835-pm.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/watchdog.h>
@@ -47,6 +48,8 @@ struct bcm2835_wdt {
spinlock_t lock;
};
+static struct bcm2835_wdt *bcm2835_power_off_wdt;
+
static unsigned int heartbeat;
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -148,10 +151,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
*/
static void bcm2835_power_off(void)
{
- struct device_node *np =
- of_find_compatible_node(NULL, NULL, "brcm,bcm2835-pm-wdt");
- struct platform_device *pdev = of_find_device_by_node(np);
- struct bcm2835_wdt *wdt = platform_get_drvdata(pdev);
+ struct bcm2835_wdt *wdt = bcm2835_power_off_wdt;
u32 val;
/*
@@ -169,7 +169,7 @@ static void bcm2835_power_off(void)
static int bcm2835_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct bcm2835_wdt *wdt;
int err;
@@ -181,10 +181,7 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(wdt->base))
- return PTR_ERR(wdt->base);
+ wdt->base = pm->base;
watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt);
watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
@@ -211,8 +208,10 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
return err;
}
- if (pm_power_off == NULL)
+ if (pm_power_off == NULL) {
pm_power_off = bcm2835_power_off;
+ bcm2835_power_off_wdt = wdt;
+ }
dev_info(dev, "Broadcom BCM2835 watchdog timer");
return 0;
@@ -226,18 +225,11 @@ static int bcm2835_wdt_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id bcm2835_wdt_of_match[] = {
- { .compatible = "brcm,bcm2835-pm-wdt", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm2835_wdt_of_match);
-
static struct platform_driver bcm2835_wdt_driver = {
.probe = bcm2835_wdt_probe,
.remove = bcm2835_wdt_remove,
.driver = {
.name = "bcm2835-wdt",
- .of_match_table = bcm2835_wdt_of_match,
},
};
module_platform_driver(bcm2835_wdt_driver);
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 5c4a764717c4..81208cd3f4ec 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -17,6 +17,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 98967f0a7d10..db7c57d82cfd 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -18,6 +18,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 0d3a0fbbd7a5..52941207a12a 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (!res)
+ return -ENODEV;
priv->io_base = devm_ioport_map(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(priv->io_base))
- return PTR_ERR(priv->io_base);
+ if (!priv->io_base)
+ return -ENOMEM;
watchdog_set_drvdata(&priv->wdd, priv);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ceb5048de9a7..39b229f9e256 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -369,14 +369,20 @@ static enum bp_state reserve_additional_memory(void)
return BP_ECANCELED;
}
-static void xen_online_page(struct page *page)
+static void xen_online_page(struct page *page, unsigned int order)
{
- __online_page_set_limits(page);
+ unsigned long i, size = (1 << order);
+ unsigned long start_pfn = page_to_pfn(page);
+ struct page *p;
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
mutex_lock(&balloon_mutex);
-
- __balloon_append(page);
-
+ for (i = 0; i < size; i++) {
+ p = pfn_to_page(start_pfn + i);
+ __online_page_set_limits(p);
+ __SetPageOffline(p);
+ __balloon_append(p);
+ }
mutex_unlock(&balloon_mutex);
}
@@ -441,6 +447,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
/* Relinquish the page back to the allocator. */
+ __ClearPageOffline(page);
free_reserved_page(page);
}
@@ -467,6 +474,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
+ __SetPageOffline(page);
adjust_managed_page_count(page, -1);
xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 93194f3e7540..117e76b2f939 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
xen_have_vector_callback = 0;
return;
}
- pr_info("Xen HVM callback vector for event delivery is enabled\n");
+ pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
xen_hvm_callback_vector);
}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 2e5d845b5091..7aa64d1b119c 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
/* write the data, then modify the indexes */
virt_wmb();
- if (ret < 0)
+ if (ret < 0) {
+ atomic_set(&map->read, 0);
intf->in_error = ret;
- else
+ } else
intf->in_prod = prod + ret;
/* update the indexes, then notify the other end */
virt_wmb();
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
static void pvcalls_sk_state_change(struct sock *sock)
{
struct sock_mapping *map = sock->sk_user_data;
- struct pvcalls_data_intf *intf;
if (map == NULL)
return;
- intf = map->ring;
- intf->in_error = -ENOTCONN;
+ atomic_inc(&map->read);
notify_remote_via_irq(map->irq);
}
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8f3e6f..8a249c95c193 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -31,6 +31,12 @@
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
#define PVCALLS_FRONT_MAX_SPIN 5000
+static struct proto pvcalls_proto = {
+ .name = "PVCalls",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct sock),
+};
+
struct pvcalls_bedata {
struct xen_pvcalls_front_ring ring;
grant_ref_t ref;
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
return ret;
}
+static void free_active_ring(struct sock_mapping *map)
+{
+ if (!map->active.ring)
+ return;
+
+ free_pages((unsigned long)map->active.data.in,
+ map->active.ring->ring_order);
+ free_page((unsigned long)map->active.ring);
+}
+
+static int alloc_active_ring(struct sock_mapping *map)
+{
+ void *bytes;
+
+ map->active.ring = (struct pvcalls_data_intf *)
+ get_zeroed_page(GFP_KERNEL);
+ if (!map->active.ring)
+ goto out;
+
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PVCALLS_RING_ORDER);
+ if (!bytes)
+ goto out;
+
+ map->active.data.in = bytes;
+ map->active.data.out = bytes +
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ return 0;
+
+out:
+ free_active_ring(map);
+ return -ENOMEM;
+}
+
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
- map->active.ring = (struct pvcalls_data_intf *)
- __get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (map->active.ring == NULL)
- goto out_error;
- map->active.ring->ring_order = PVCALLS_RING_ORDER;
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PVCALLS_RING_ORDER);
- if (bytes == NULL)
- goto out_error;
+ bytes = map->active.data.in;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
- map->active.data.in = bytes;
- map->active.data.out = bytes +
- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
-
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
- free_page((unsigned long)map->active.ring);
return ret;
}
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+ ret = alloc_active_ring(map);
+ if (ret < 0) {
+ pvcalls_exit_sock(sock);
+ return ret;
+ }
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
@@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf,
virt_mb();
size = pvcalls_queued(prod, cons, array_size);
- if (size >= array_size)
+ if (size > array_size)
return -EINVAL;
+ if (size == array_size)
+ return 0;
if (len > array_size - size)
len = array_size - size;
@@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
error = intf->in_error;
/* get pointers before reading from the ring */
virt_rmb();
- if (error < 0)
- return error;
size = pvcalls_queued(prod, cons, array_size);
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
if (size == 0)
- return 0;
+ return error ?: size;
if (len > size)
len = size;
@@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
- spin_lock(&bedata->socket_lock);
- ret = get_request(bedata, &req_id);
- if (ret < 0) {
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
- spin_unlock(&bedata->socket_lock);
+ pvcalls_exit_sock(sock);
+ return -ENOMEM;
+ }
+ ret = alloc_active_ring(map2);
+ if (ret < 0) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
- if (map2 == NULL) {
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map2);
+ kfree(map2);
pvcalls_exit_sock(sock);
- return -ENOMEM;
+ return ret;
}
+
ret = create_active(map2, &evtchn);
if (ret < 0) {
+ free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
@@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
received:
map2->sock = newsock;
- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
if (!newsock->sk) {
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock)
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
- if (READ_ONCE(map->passive.inflight_req_id) !=
- PVCALLS_INVALID_ID) {
+ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
+ READ_ONCE(map->passive.inflight_req_id) != 0) {
pvcalls_front_free_map(bedata,
map->passive.accept_map);
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 989cf872b98c..bb7888429be6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -645,7 +645,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#ifdef CONFIG_ARM
if (xen_get_dma_ops(dev)->mmap)
return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
dma_addr, size, attrs);
@@ -662,7 +662,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#ifdef CONFIG_ARM
if (xen_get_dma_ops(dev)->get_sgtable) {
#if 0
/*
diff --git a/fs/Makefile b/fs/Makefile
index 293733f61594..23fcd8c164a3 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -12,7 +12,8 @@ obj-y := open.o read_write.o file_table.o super.o \
attr.o bad_inode.o file.o filesystems.o namespace.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o splice.o sync.o utimes.o d_path.o \
- stack.o fs_struct.o statfs.o fs_pin.o nsfs.o
+ stack.o fs_struct.o statfs.o fs_pin.o nsfs.o \
+ fs_types.o
ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o block_dev.o direct-io.o mpage.o
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index cf445dbd5f2e..9de46116c749 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -173,6 +173,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
rcu_assign_pointer(cell->vl_servers, vllist);
cell->dns_expiry = TIME64_MAX;
+ __clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags);
} else {
cell->dns_expiry = ktime_get_real_seconds();
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 0568fd986821..e432bd27a2e7 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -208,7 +208,7 @@ again:
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
goto again;
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
afs_lock_may_be_available(vnode);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 6b17d3620414..1a4ce07fb406 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
valid = true;
} else {
- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
}
@@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode)
#endif
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
_leave("");
}
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index 07bc10f076aa..d443e2bfa094 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus {
struct yfs_xdr_u64 max_quota;
struct yfs_xdr_u64 file_quota;
} __packed;
+
+enum yfs_lock_type {
+ yfs_LockNone = -1,
+ yfs_LockRead = 0,
+ yfs_LockWrite = 1,
+ yfs_LockExtend = 2,
+ yfs_LockRelease = 3,
+ yfs_LockMandatoryRead = 0x100,
+ yfs_LockMandatoryWrite = 0x101,
+ yfs_LockMandatoryExtend = 0x102,
+};
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a7b44863d502..2c588f9bbbda 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls;
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
+static void afs_delete_async_call(struct work_struct *);
static void afs_process_async_call(struct work_struct *);
static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
@@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call)
}
}
+static struct afs_call *afs_get_call(struct afs_call *call,
+ enum afs_call_trace why)
+{
+ int u = atomic_inc_return(&call->usage);
+
+ trace_afs_call(call, why, u,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+ return call;
+}
+
/*
* Queue the call for actual work.
*/
static void afs_queue_call_work(struct afs_call *call)
{
if (call->type->work) {
- int u = atomic_inc_return(&call->usage);
-
- trace_afs_call(call, afs_call_trace_work, u,
- atomic_read(&call->net->nr_outstanding_calls),
- __builtin_return_address(0));
-
INIT_WORK(&call->work, call->type->work);
+ afs_get_call(call, afs_call_trace_work);
if (!queue_work(afs_wq, &call->work))
afs_put_call(call);
}
@@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
}
}
+ /* If the call is going to be asynchronous, we need an extra ref for
+ * the call to hold itself so the caller need not hang on to its ref.
+ */
+ if (call->async)
+ afs_get_call(call, afs_call_trace_get);
+
/* create a call */
rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
(unsigned long)call,
@@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
goto error_do_abort;
}
- /* at this point, an async call may no longer exist as it may have
- * already completed */
- if (call->async)
+ /* Note that at this point, we may have received the reply or an abort
+ * - and an asynchronous call may already have completed.
+ */
+ if (call->async) {
+ afs_put_call(call);
return -EINPROGRESS;
+ }
return afs_wait_for_call_to_complete(call, ac);
error_do_abort:
- call->state = AFS_CALL_COMPLETE;
if (ret != -ECONNABORTED) {
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret, "KSD");
@@ -463,8 +478,24 @@ error_do_abort:
error_kill_call:
if (call->type->done)
call->type->done(call);
- afs_put_call(call);
+
+ /* We need to dispose of the extra ref we grabbed for an async call.
+ * The call, however, might be queued on afs_async_calls and we need to
+ * make sure we don't get any more notifications that might requeue it.
+ */
+ if (call->rxcall) {
+ rxrpc_kernel_end_call(call->net->socket, call->rxcall);
+ call->rxcall = NULL;
+ }
+ if (call->async) {
+ if (cancel_work_sync(&call->async_work))
+ afs_put_call(call);
+ afs_put_call(call);
+ }
+
ac->error = ret;
+ call->state = AFS_CALL_COMPLETE;
+ afs_put_call(call);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 95d0761cdb34..155dc14caef9 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
if (vldb->fs_mask[i] & type_mask)
nr_servers++;
- slist = kzalloc(sizeof(struct afs_server_list) +
- sizeof(struct afs_server_entry) * nr_servers,
- GFP_KERNEL);
+ slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
if (!slist)
goto error;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 12658c1363ae..5aa57929e8c2 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
bp = xdr_encode_YFSFid(bp, &vnode->fid);
bp = xdr_encode_string(bp, name, namesz);
bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
- bp = xdr_encode_u32(bp, 0); /* ViceLockType */
+ bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
yfs_check_req(call, bp);
afs_use_fs_server(call, fc->cbi);
diff --git a/fs/aio.c b/fs/aio.c
index b906ff70c90f..38b741aef0bf 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -167,9 +167,13 @@ struct kioctx {
unsigned id;
};
+/*
+ * First field must be the file pointer in all the
+ * iocb unions! See also 'struct kiocb' in <linux/fs.h>
+ */
struct fsync_iocb {
- struct work_struct work;
struct file *file;
+ struct work_struct work;
bool datasync;
};
@@ -183,8 +187,15 @@ struct poll_iocb {
struct work_struct work;
};
+/*
+ * NOTE! Each of the iocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'ki_filp' in this struct.
+ */
struct aio_kiocb {
union {
+ struct file *ki_filp;
struct kiocb rw;
struct fsync_iocb fsync;
struct poll_iocb poll;
@@ -1060,6 +1071,8 @@ static inline void iocb_put(struct aio_kiocb *iocb)
{
if (refcount_read(&iocb->ki_refcnt) == 0 ||
refcount_dec_and_test(&iocb->ki_refcnt)) {
+ if (iocb->ki_filp)
+ fput(iocb->ki_filp);
percpu_ref_put(&iocb->ki_ctx->reqs);
kmem_cache_free(kiocb_cachep, iocb);
}
@@ -1424,7 +1437,6 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
file_end_write(kiocb->ki_filp);
}
- fput(kiocb->ki_filp);
aio_complete(iocb, res, res2);
}
@@ -1432,10 +1444,8 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
{
int ret;
- req->ki_filp = fget(iocb->aio_fildes);
- if (unlikely(!req->ki_filp))
- return -EBADF;
req->ki_complete = aio_complete_rw;
+ req->private = NULL;
req->ki_pos = iocb->aio_offset;
req->ki_flags = iocb_flags(req->ki_filp);
if (iocb->aio_flags & IOCB_FLAG_RESFD)
@@ -1450,7 +1460,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
ret = ioprio_check_cap(iocb->aio_reqprio);
if (ret) {
pr_debug("aio ioprio check cap error: %d\n", ret);
- goto out_fput;
+ return ret;
}
req->ki_ioprio = iocb->aio_reqprio;
@@ -1459,14 +1469,10 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
if (unlikely(ret))
- goto out_fput;
+ return ret;
req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
return 0;
-
-out_fput:
- fput(req->ki_filp);
- return ret;
}
static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
@@ -1520,24 +1526,19 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
if (ret)
return ret;
file = req->ki_filp;
-
- ret = -EBADF;
if (unlikely(!(file->f_mode & FMODE_READ)))
- goto out_fput;
+ return -EBADF;
ret = -EINVAL;
if (unlikely(!file->f_op->read_iter))
- goto out_fput;
+ return -EINVAL;
ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
if (ret)
- goto out_fput;
+ return ret;
ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
if (!ret)
aio_rw_done(req, call_read_iter(file, req, &iter));
kfree(iovec);
-out_fput:
- if (unlikely(ret))
- fput(file);
return ret;
}
@@ -1554,16 +1555,14 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
return ret;
file = req->ki_filp;
- ret = -EBADF;
if (unlikely(!(file->f_mode & FMODE_WRITE)))
- goto out_fput;
- ret = -EINVAL;
+ return -EBADF;
if (unlikely(!file->f_op->write_iter))
- goto out_fput;
+ return -EINVAL;
ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
if (ret)
- goto out_fput;
+ return ret;
ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
if (!ret) {
/*
@@ -1581,9 +1580,6 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
aio_rw_done(req, call_write_iter(file, req, &iter));
}
kfree(iovec);
-out_fput:
- if (unlikely(ret))
- fput(file);
return ret;
}
@@ -1593,7 +1589,6 @@ static void aio_fsync_work(struct work_struct *work)
int ret;
ret = vfs_fsync(req->file, req->datasync);
- fput(req->file);
aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
}
@@ -1604,13 +1599,8 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
iocb->aio_rw_flags))
return -EINVAL;
- req->file = fget(iocb->aio_fildes);
- if (unlikely(!req->file))
- return -EBADF;
- if (unlikely(!req->file->f_op->fsync)) {
- fput(req->file);
+ if (unlikely(!req->file->f_op->fsync))
return -EINVAL;
- }
req->datasync = datasync;
INIT_WORK(&req->work, aio_fsync_work);
@@ -1620,10 +1610,7 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
{
- struct file *file = iocb->poll.file;
-
aio_complete(iocb, mangle_poll(mask), 0);
- fput(file);
}
static void aio_poll_complete_work(struct work_struct *work)
@@ -1679,6 +1666,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
__poll_t mask = key_to_poll(key);
+ unsigned long flags;
req->woken = true;
@@ -1687,10 +1675,15 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (!(mask & req->events))
return 0;
- /* try to complete the iocb inline if we can: */
- if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+ /*
+ * Try to complete the iocb inline if we can. Use
+ * irqsave/irqrestore because not all filesystems (e.g. fuse)
+ * call this function with IRQs disabled and because IRQs
+ * have to be disabled before ctx_lock is obtained.
+ */
+ if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
list_del(&iocb->ki_list);
- spin_unlock(&iocb->ki_ctx->ctx_lock);
+ spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
list_del_init(&req->wait.entry);
aio_poll_complete(iocb, mask);
@@ -1742,9 +1735,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
INIT_WORK(&req->work, aio_poll_complete_work);
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
- req->file = fget(iocb->aio_fildes);
- if (unlikely(!req->file))
- return -EBADF;
req->head = NULL;
req->woken = false;
@@ -1787,10 +1777,8 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
spin_unlock_irq(&ctx->ctx_lock);
out:
- if (unlikely(apt.error)) {
- fput(req->file);
+ if (unlikely(apt.error))
return apt.error;
- }
if (mask)
aio_poll_complete(aiocb, mask);
@@ -1828,6 +1816,11 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
if (unlikely(!req))
goto out_put_reqs_available;
+ req->ki_filp = fget(iocb->aio_fildes);
+ ret = -EBADF;
+ if (unlikely(!req->ki_filp))
+ goto out_put_req;
+
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
/*
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
@@ -2198,11 +2191,11 @@ SYSCALL_DEFINE6(io_pgetevents_time32,
#if defined(CONFIG_COMPAT_32BIT_TIME)
-COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
- compat_long_t, min_nr,
- compat_long_t, nr,
- struct io_event __user *, events,
- struct old_timespec32 __user *, timeout)
+SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
+ __s32, min_nr,
+ __s32, nr,
+ struct io_event __user *, events,
+ struct old_timespec32 __user *, timeout)
{
struct timespec64 t;
int ret;
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index d441244b79df..28d9c2b1b3bb 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
pkt.len = dentry->d_name.len;
memcpy(pkt.name, dentry->d_name.name, pkt.len);
pkt.name[pkt.len] = '\0';
- dput(dentry);
if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
ret = -EFAULT;
@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
+ dput(dentry);
+
return ret;
}
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 0e8ea2d9a2bb..078992eee299 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -266,8 +266,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
}
root_inode = autofs_get_inode(s, S_IFDIR | 0755);
root = d_make_root(root_inode);
- if (!root)
+ if (!root) {
+ ret = -ENOMEM;
goto fail_ino;
+ }
pipe = NULL;
root->d_fsdata = ino;
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index ca9725f18e00..1fefd87eb4b4 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -29,97 +29,14 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-#include <asm/a.out-core.h>
static int load_aout_binary(struct linux_binprm *);
static int load_aout_library(struct file*);
-#ifdef CONFIG_COREDUMP
-/*
- * Routine writes a core dump image in the current directory.
- * Currently only a stub-function.
- *
- * Note that setuid/setgid files won't make a core-dump if the uid/gid
- * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
- * field, which also makes sure the core-dumps won't be recursive if the
- * dumping of the process results in another error..
- */
-static int aout_core_dump(struct coredump_params *cprm)
-{
- mm_segment_t fs;
- int has_dumped = 0;
- void __user *dump_start;
- int dump_size;
- struct user dump;
-#ifdef __alpha__
-# define START_DATA(u) ((void __user *)u.start_data)
-#else
-# define START_DATA(u) ((void __user *)((u.u_tsize << PAGE_SHIFT) + \
- u.start_code))
-#endif
-# define START_STACK(u) ((void __user *)u.start_stack)
-
- fs = get_fs();
- set_fs(KERNEL_DS);
- has_dumped = 1;
- strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
- dump.u_ar0 = offsetof(struct user, regs);
- dump.signal = cprm->siginfo->si_signo;
- aout_dump_thread(cprm->regs, &dump);
-
-/* If the size of the dump file exceeds the rlimit, then see what would happen
- if we wrote the stack, but not the data area. */
- if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
- dump.u_dsize = 0;
-
-/* Make sure we have enough room to write the stack and data areas. */
- if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
- dump.u_ssize = 0;
-
-/* make sure we actually have a data and stack area to dump */
- set_fs(USER_DS);
- if (!access_ok(START_DATA(dump), dump.u_dsize << PAGE_SHIFT))
- dump.u_dsize = 0;
- if (!access_ok(START_STACK(dump), dump.u_ssize << PAGE_SHIFT))
- dump.u_ssize = 0;
-
- set_fs(KERNEL_DS);
-/* struct user */
- if (!dump_emit(cprm, &dump, sizeof(dump)))
- goto end_coredump;
-/* Now dump all of the user data. Include malloced stuff as well */
- if (!dump_skip(cprm, PAGE_SIZE - sizeof(dump)))
- goto end_coredump;
-/* now we start writing out the user space info */
- set_fs(USER_DS);
-/* Dump the data area */
- if (dump.u_dsize != 0) {
- dump_start = START_DATA(dump);
- dump_size = dump.u_dsize << PAGE_SHIFT;
- if (!dump_emit(cprm, dump_start, dump_size))
- goto end_coredump;
- }
-/* Now prepare to dump the stack area */
- if (dump.u_ssize != 0) {
- dump_start = START_STACK(dump);
- dump_size = dump.u_ssize << PAGE_SHIFT;
- if (!dump_emit(cprm, dump_start, dump_size))
- goto end_coredump;
- }
-end_coredump:
- set_fs(fs);
- return has_dumped;
-}
-#else
-#define aout_core_dump NULL
-#endif
-
static struct linux_binfmt aout_format = {
.module = THIS_MODULE,
.load_binary = load_aout_binary,
.load_shlib = load_aout_library,
- .core_dump = aout_core_dump,
- .min_coredump = PAGE_SIZE
};
#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index d0078cbb718b..e996174cbfc0 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -14,13 +14,30 @@
#include <linux/err.h>
#include <linux/fs.h>
+static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
+static inline char *next_non_spacetab(char *first, const char *last)
+{
+ for (; first <= last; first++)
+ if (!spacetab(*first))
+ return first;
+ return NULL;
+}
+static inline char *next_terminator(char *first, const char *last)
+{
+ for (; first <= last; first++)
+ if (spacetab(*first) || !*first)
+ return first;
+ return NULL;
+}
+
static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
- char *cp;
+ char *cp, *buf_end;
struct file *file;
int retval;
+ /* Not ours to exec if we don't start with "#!". */
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
@@ -33,23 +50,41 @@ static int load_script(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
- /*
- * This section does the #! interpretation.
- * Sorta complicated, but hopefully it will work. -TYT
- */
-
+ /* Release since we are not mapping a binary into memory. */
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
- for (cp = bprm->buf+2;; cp++) {
- if (cp >= bprm->buf + BINPRM_BUF_SIZE)
+ /*
+ * This section handles parsing the #! line into separate
+ * interpreter path and argument strings. We must be careful
+ * because bprm->buf is not yet guaranteed to be NUL-terminated
+ * (though the buffer will have trailing NUL padding when the
+ * file size was smaller than the buffer size).
+ *
+ * We do not want to exec a truncated interpreter path, so either
+ * we find a newline (which indicates nothing is truncated), or
+ * we find a space/tab/NUL after the interpreter path (which
+ * itself may be preceded by spaces/tabs). Truncating the
+ * arguments is fine: the interpreter can re-read the script to
+ * parse them on its own.
+ */
+ buf_end = bprm->buf + sizeof(bprm->buf) - 1;
+ cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
+ if (!cp) {
+ cp = next_non_spacetab(bprm->buf + 2, buf_end);
+ if (!cp)
+ return -ENOEXEC; /* Entire buf is spaces/tabs */
+ /*
+ * If there is no later space/tab/NUL we must assume the
+ * interpreter path is truncated.
+ */
+ if (!next_terminator(cp, buf_end))
return -ENOEXEC;
- if (!*cp || (*cp == '\n'))
- break;
+ cp = buf_end;
}
+ /* NUL-terminate the buffer and any trailing spaces/tabs. */
*cp = '\0';
-
while (cp > bprm->buf) {
cp--;
if ((*cp == ' ') || (*cp == '\t'))
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c546cdce77e6..58a4c1217fa8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
}
EXPORT_SYMBOL(invalidate_bdev);
+static void set_init_blocksize(struct block_device *bdev)
+{
+ unsigned bsize = bdev_logical_block_size(bdev);
+ loff_t size = i_size_read(bdev->bd_inode);
+
+ while (bsize < PAGE_SIZE) {
+ if (size & bsize)
+ break;
+ bsize <<= 1;
+ }
+ bdev->bd_block_size = bsize;
+ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+}
+
int set_blocksize(struct block_device *bdev, int size)
{
/* Size must be a power of two, and between 512 and PAGE_SIZE */
@@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change);
void bd_set_size(struct block_device *bdev, loff_t size)
{
- unsigned bsize = bdev_logical_block_size(bdev);
-
inode_lock(bdev->bd_inode);
i_size_write(bdev->bd_inode, size);
inode_unlock(bdev->bd_inode);
- while (bsize < PAGE_SIZE) {
- if (size & bsize)
- break;
- bsize <<= 1;
- }
- bdev->bd_block_size = bsize;
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
EXPORT_SYMBOL(bd_set_size);
@@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
}
}
- if (!ret)
+ if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+ set_init_blocksize(bdev);
+ }
/*
* If the device is invalidated, rescan partition
@@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear;
}
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
+ set_init_blocksize(bdev);
}
if (bdev->bd_bdi == &noop_backing_dev_info)
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 3b66c957ea6f..5810463dc6d2 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -9,6 +9,7 @@
#include <linux/posix_acl_xattr.h>
#include <linux/posix_acl.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/slab.h>
#include "ctree.h"
@@ -72,8 +73,16 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
}
if (acl) {
+ unsigned int nofs_flag;
+
size = posix_acl_xattr_size(acl->a_count);
+ /*
+ * We're holding a transaction handle, so use a NOFS memory
+ * allocation context to avoid deadlock if reclaim happens.
+ */
+ nofs_flag = memalloc_nofs_save();
value = kmalloc(size, GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
if (!value) {
ret = -ENOMEM;
goto out;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index d522494698fa..122cb97c7909 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -139,13 +139,11 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
}
if (flags & WQ_HIGHPRI)
- ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
- ret->current_active, "btrfs",
- name);
+ ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
+ ret->current_active, name);
else
- ret->normal_wq = alloc_workqueue("%s-%s", flags,
- ret->current_active, "btrfs",
- name);
+ ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
+ ret->current_active, name);
if (!ret->normal_wq) {
kfree(ret);
return NULL;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 78556447e1d5..11459fe84a29 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -712,7 +712,7 @@ out:
* read tree blocks and add keys where required.
*/
static int add_missing_keys(struct btrfs_fs_info *fs_info,
- struct preftrees *preftrees)
+ struct preftrees *preftrees, bool lock)
{
struct prelim_ref *ref;
struct extent_buffer *eb;
@@ -737,12 +737,14 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
free_extent_buffer(eb);
return -EIO;
}
- btrfs_tree_read_lock(eb);
+ if (lock)
+ btrfs_tree_read_lock(eb);
if (btrfs_header_level(eb) == 0)
btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
else
btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
- btrfs_tree_read_unlock(eb);
+ if (lock)
+ btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
cond_resched();
@@ -1227,7 +1229,7 @@ again:
btrfs_release_path(path);
- ret = add_missing_keys(fs_info, &preftrees);
+ ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
if (ret)
goto out;
@@ -1288,11 +1290,15 @@ again:
ret = -EIO;
goto out;
}
- btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+
+ if (!path->skip_locking) {
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_read(eb);
+ }
ret = find_extent_in_eb(eb, bytenr,
*extent_item_pos, &eie, ignore_offset);
- btrfs_tree_read_unlock_blocking(eb);
+ if (!path->skip_locking)
+ btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
if (ret < 0)
goto out;
@@ -1650,7 +1656,7 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
/* make sure we can use eb after releasing the path */
if (eb != eb_in) {
if (!path->skip_locking)
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
path->nodes[0] = NULL;
path->locks[0] = 0;
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 548057630b69..eb8e20b740d6 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -730,6 +730,28 @@ struct heuristic_ws {
struct list_head list;
};
+static struct workspace_manager heuristic_wsm;
+
+static void heuristic_init_workspace_manager(void)
+{
+ btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
+}
+
+static void heuristic_cleanup_workspace_manager(void)
+{
+ btrfs_cleanup_workspace_manager(&heuristic_wsm);
+}
+
+static struct list_head *heuristic_get_workspace(unsigned int level)
+{
+ return btrfs_get_workspace(&heuristic_wsm, level);
+}
+
+static void heuristic_put_workspace(struct list_head *ws)
+{
+ btrfs_put_workspace(&heuristic_wsm, ws);
+}
+
static void free_heuristic_ws(struct list_head *ws)
{
struct heuristic_ws *workspace;
@@ -742,7 +764,7 @@ static void free_heuristic_ws(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *alloc_heuristic_ws(void)
+static struct list_head *alloc_heuristic_ws(unsigned int level)
{
struct heuristic_ws *ws;
@@ -769,65 +791,59 @@ fail:
return ERR_PTR(-ENOMEM);
}
-struct workspaces_list {
- struct list_head idle_ws;
- spinlock_t ws_lock;
- /* Number of free workspaces */
- int free_ws;
- /* Total number of allocated workspaces */
- atomic_t total_ws;
- /* Waiters for a free workspace */
- wait_queue_head_t ws_wait;
+const struct btrfs_compress_op btrfs_heuristic_compress = {
+ .init_workspace_manager = heuristic_init_workspace_manager,
+ .cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
+ .get_workspace = heuristic_get_workspace,
+ .put_workspace = heuristic_put_workspace,
+ .alloc_workspace = alloc_heuristic_ws,
+ .free_workspace = free_heuristic_ws,
};
-static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
-
-static struct workspaces_list btrfs_heuristic_ws;
-
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
+ /* The heuristic is represented as compression type 0 */
+ &btrfs_heuristic_compress,
&btrfs_zlib_compress,
&btrfs_lzo_compress,
&btrfs_zstd_compress,
};
-void __init btrfs_init_compress(void)
+void btrfs_init_workspace_manager(struct workspace_manager *wsm,
+ const struct btrfs_compress_op *ops)
{
struct list_head *workspace;
- int i;
- INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
- spin_lock_init(&btrfs_heuristic_ws.ws_lock);
- atomic_set(&btrfs_heuristic_ws.total_ws, 0);
- init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
+ wsm->ops = ops;
- workspace = alloc_heuristic_ws();
+ INIT_LIST_HEAD(&wsm->idle_ws);
+ spin_lock_init(&wsm->ws_lock);
+ atomic_set(&wsm->total_ws, 0);
+ init_waitqueue_head(&wsm->ws_wait);
+
+ /*
+ * Preallocate one workspace for each compression type so we can
+ * guarantee forward progress in the worst case
+ */
+ workspace = wsm->ops->alloc_workspace(0);
if (IS_ERR(workspace)) {
pr_warn(
- "BTRFS: cannot preallocate heuristic workspace, will try later\n");
+ "BTRFS: cannot preallocate compression workspace, will try later\n");
} else {
- atomic_set(&btrfs_heuristic_ws.total_ws, 1);
- btrfs_heuristic_ws.free_ws = 1;
- list_add(workspace, &btrfs_heuristic_ws.idle_ws);
+ atomic_set(&wsm->total_ws, 1);
+ wsm->free_ws = 1;
+ list_add(workspace, &wsm->idle_ws);
}
+}
- for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
- INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
- spin_lock_init(&btrfs_comp_ws[i].ws_lock);
- atomic_set(&btrfs_comp_ws[i].total_ws, 0);
- init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
+void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
+{
+ struct list_head *ws;
- /*
- * Preallocate one workspace for each compression type so
- * we can guarantee forward progress in the worst case
- */
- workspace = btrfs_compress_op[i]->alloc_workspace();
- if (IS_ERR(workspace)) {
- pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
- } else {
- atomic_set(&btrfs_comp_ws[i].total_ws, 1);
- btrfs_comp_ws[i].free_ws = 1;
- list_add(workspace, &btrfs_comp_ws[i].idle_ws);
- }
+ while (!list_empty(&wsman->idle_ws)) {
+ ws = wsman->idle_ws.next;
+ list_del(ws);
+ wsman->ops->free_workspace(ws);
+ atomic_dec(&wsman->total_ws);
}
}
@@ -837,11 +853,11 @@ void __init btrfs_init_compress(void)
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/
-static struct list_head *__find_workspace(int type, bool heuristic)
+struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
+ unsigned int level)
{
struct list_head *workspace;
int cpus = num_online_cpus();
- int idx = type - 1;
unsigned nofs_flag;
struct list_head *idle_ws;
spinlock_t *ws_lock;
@@ -849,19 +865,11 @@ static struct list_head *__find_workspace(int type, bool heuristic)
wait_queue_head_t *ws_wait;
int *free_ws;
- if (heuristic) {
- idle_ws = &btrfs_heuristic_ws.idle_ws;
- ws_lock = &btrfs_heuristic_ws.ws_lock;
- total_ws = &btrfs_heuristic_ws.total_ws;
- ws_wait = &btrfs_heuristic_ws.ws_wait;
- free_ws = &btrfs_heuristic_ws.free_ws;
- } else {
- idle_ws = &btrfs_comp_ws[idx].idle_ws;
- ws_lock = &btrfs_comp_ws[idx].ws_lock;
- total_ws = &btrfs_comp_ws[idx].total_ws;
- ws_wait = &btrfs_comp_ws[idx].ws_wait;
- free_ws = &btrfs_comp_ws[idx].free_ws;
- }
+ idle_ws = &wsm->idle_ws;
+ ws_lock = &wsm->ws_lock;
+ total_ws = &wsm->total_ws;
+ ws_wait = &wsm->ws_wait;
+ free_ws = &wsm->free_ws;
again:
spin_lock(ws_lock);
@@ -892,10 +900,7 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- if (heuristic)
- workspace = alloc_heuristic_ws();
- else
- workspace = btrfs_compress_op[idx]->alloc_workspace();
+ workspace = wsm->ops->alloc_workspace(level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -926,85 +931,47 @@ again:
return workspace;
}
-static struct list_head *find_workspace(int type)
+static struct list_head *get_workspace(int type, int level)
{
- return __find_workspace(type, false);
+ return btrfs_compress_op[type]->get_workspace(level);
}
/*
* put a workspace struct back on the list or free it if we have enough
* idle ones sitting around
*/
-static void __free_workspace(int type, struct list_head *workspace,
- bool heuristic)
+void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
{
- int idx = type - 1;
struct list_head *idle_ws;
spinlock_t *ws_lock;
atomic_t *total_ws;
wait_queue_head_t *ws_wait;
int *free_ws;
- if (heuristic) {
- idle_ws = &btrfs_heuristic_ws.idle_ws;
- ws_lock = &btrfs_heuristic_ws.ws_lock;
- total_ws = &btrfs_heuristic_ws.total_ws;
- ws_wait = &btrfs_heuristic_ws.ws_wait;
- free_ws = &btrfs_heuristic_ws.free_ws;
- } else {
- idle_ws = &btrfs_comp_ws[idx].idle_ws;
- ws_lock = &btrfs_comp_ws[idx].ws_lock;
- total_ws = &btrfs_comp_ws[idx].total_ws;
- ws_wait = &btrfs_comp_ws[idx].ws_wait;
- free_ws = &btrfs_comp_ws[idx].free_ws;
- }
+ idle_ws = &wsm->idle_ws;
+ ws_lock = &wsm->ws_lock;
+ total_ws = &wsm->total_ws;
+ ws_wait = &wsm->ws_wait;
+ free_ws = &wsm->free_ws;
spin_lock(ws_lock);
if (*free_ws <= num_online_cpus()) {
- list_add(workspace, idle_ws);
+ list_add(ws, idle_ws);
(*free_ws)++;
spin_unlock(ws_lock);
goto wake;
}
spin_unlock(ws_lock);
- if (heuristic)
- free_heuristic_ws(workspace);
- else
- btrfs_compress_op[idx]->free_workspace(workspace);
+ wsm->ops->free_workspace(ws);
atomic_dec(total_ws);
wake:
cond_wake_up(ws_wait);
}
-static void free_workspace(int type, struct list_head *ws)
+static void put_workspace(int type, struct list_head *ws)
{
- return __free_workspace(type, ws, false);
-}
-
-/*
- * cleanup function for module exit
- */
-static void free_workspaces(void)
-{
- struct list_head *workspace;
- int i;
-
- while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
- workspace = btrfs_heuristic_ws.idle_ws.next;
- list_del(workspace);
- free_heuristic_ws(workspace);
- atomic_dec(&btrfs_heuristic_ws.total_ws);
- }
-
- for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
- while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
- workspace = btrfs_comp_ws[i].idle_ws.next;
- list_del(workspace);
- btrfs_compress_op[i]->free_workspace(workspace);
- atomic_dec(&btrfs_comp_ws[i].total_ws);
- }
- }
+ return btrfs_compress_op[type]->put_workspace(ws);
}
/*
@@ -1036,18 +1003,17 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
unsigned long *total_in,
unsigned long *total_out)
{
+ int type = btrfs_compress_type(type_level);
+ int level = btrfs_compress_level(type_level);
struct list_head *workspace;
int ret;
- int type = type_level & 0xF;
-
- workspace = find_workspace(type);
- btrfs_compress_op[type - 1]->set_level(workspace, type_level);
- ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
+ workspace = get_workspace(type, level);
+ ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
start, pages,
out_pages,
total_in, total_out);
- free_workspace(type, workspace);
+ put_workspace(type, workspace);
return ret;
}
@@ -1071,9 +1037,9 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
int ret;
int type = cb->compress_type;
- workspace = find_workspace(type);
- ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
- free_workspace(type, workspace);
+ workspace = get_workspace(type, 0);
+ ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
+ put_workspace(type, workspace);
return ret;
}
@@ -1089,19 +1055,29 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
struct list_head *workspace;
int ret;
- workspace = find_workspace(type);
-
- ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
+ workspace = get_workspace(type, 0);
+ ret = btrfs_compress_op[type]->decompress(workspace, data_in,
dest_page, start_byte,
srclen, destlen);
+ put_workspace(type, workspace);
- free_workspace(type, workspace);
return ret;
}
+void __init btrfs_init_compress(void)
+{
+ int i;
+
+ for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
+ btrfs_compress_op[i]->init_workspace_manager();
+}
+
void __cold btrfs_exit_compress(void)
{
- free_workspaces();
+ int i;
+
+ for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
+ btrfs_compress_op[i]->cleanup_workspace_manager();
}
/*
@@ -1512,7 +1488,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
*/
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
- struct list_head *ws_list = __find_workspace(0, true);
+ struct list_head *ws_list = get_workspace(0, 0);
struct heuristic_ws *ws;
u32 i;
u8 byte;
@@ -1581,18 +1557,29 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
}
out:
- __free_workspace(0, ws_list, true);
+ put_workspace(0, ws_list);
return ret;
}
-unsigned int btrfs_compress_str2level(const char *str)
+/*
+ * Convert the compression suffix (eg. after "zlib" starting with ":") to
+ * level, unrecognized string will set the default level
+ */
+unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
{
- if (strncmp(str, "zlib", 4) != 0)
+ unsigned int level = 0;
+ int ret;
+
+ if (!type)
return 0;
- /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
- if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
- return str[5] - '0';
+ if (str[0] == ':') {
+ ret = kstrtouint(str + 1, 10, &level);
+ if (ret)
+ level = 0;
+ }
+
+ level = btrfs_compress_op[type]->set_level(level);
- return BTRFS_ZLIB_DEFAULT_LEVEL;
+ return level;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index ddda9b80bf20..9976fe0f7526 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -64,6 +64,16 @@ struct compressed_bio {
u32 sums;
};
+static inline unsigned int btrfs_compress_type(unsigned int type_level)
+{
+ return (type_level & 0xF);
+}
+
+static inline unsigned int btrfs_compress_level(unsigned int type_level)
+{
+ return ((type_level & 0xF0) >> 4);
+}
+
void __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);
@@ -87,7 +97,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
-unsigned btrfs_compress_str2level(const char *str);
+unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
@@ -97,8 +107,35 @@ enum btrfs_compression_type {
BTRFS_COMPRESS_TYPES = 3,
};
+struct workspace_manager {
+ const struct btrfs_compress_op *ops;
+ struct list_head idle_ws;
+ spinlock_t ws_lock;
+ /* Number of free workspaces */
+ int free_ws;
+ /* Total number of allocated workspaces */
+ atomic_t total_ws;
+ /* Waiters for a free workspace */
+ wait_queue_head_t ws_wait;
+};
+
+void btrfs_init_workspace_manager(struct workspace_manager *wsm,
+ const struct btrfs_compress_op *ops);
+struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
+ unsigned int level);
+void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws);
+void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm);
+
struct btrfs_compress_op {
- struct list_head *(*alloc_workspace)(void);
+ void (*init_workspace_manager)(void);
+
+ void (*cleanup_workspace_manager)(void);
+
+ struct list_head *(*get_workspace)(unsigned int level);
+
+ void (*put_workspace)(struct list_head *ws);
+
+ struct list_head *(*alloc_workspace)(unsigned int level);
void (*free_workspace)(struct list_head *workspace);
@@ -119,9 +156,18 @@ struct btrfs_compress_op {
unsigned long start_byte,
size_t srclen, size_t destlen);
- void (*set_level)(struct list_head *ws, unsigned int type);
+ /*
+ * This bounds the level set by the user to be within range of a
+ * particular compression type. It returns the level that will be used
+ * if the level is out of bounds or the default if 0 is passed in.
+ */
+ unsigned int (*set_level)(unsigned int level);
};
+/* The heuristic workspaces are managed via the 0th workspace manager */
+#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1)
+
+extern const struct btrfs_compress_op btrfs_heuristic_compress;
extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
extern const struct btrfs_compress_op btrfs_zstd_compress;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d92462fe66c8..324df36d28bf 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -13,6 +13,7 @@
#include "print-tree.h"
#include "locking.h"
#include "volumes.h"
+#include "qgroup.h"
static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path, int level);
@@ -45,11 +46,18 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
if (!p->nodes[i] || !p->locks[i])
continue;
- btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
- if (p->locks[i] == BTRFS_READ_LOCK)
+ /*
+ * If we currently have a spinning reader or writer lock this
+ * will bump the count of blocking holders and drop the
+ * spinlock.
+ */
+ if (p->locks[i] == BTRFS_READ_LOCK) {
+ btrfs_set_lock_blocking_read(p->nodes[i]);
p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
- else if (p->locks[i] == BTRFS_WRITE_LOCK)
+ } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
+ btrfs_set_lock_blocking_write(p->nodes[i]);
p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
+ }
}
}
@@ -968,6 +976,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
return 0;
}
+static struct extent_buffer *alloc_tree_block_no_bg_flush(
+ struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 parent_start,
+ const struct btrfs_disk_key *disk_key,
+ int level,
+ u64 hint,
+ u64 empty_size)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_buffer *ret;
+
+ /*
+ * If we are COWing a node/leaf from the extent, chunk, device or free
+ * space trees, make sure that we do not finish block group creation of
+ * pending block groups. We do this to avoid a deadlock.
+ * COWing can result in allocation of a new chunk, and flushing pending
+ * block groups (btrfs_create_pending_block_groups()) can be triggered
+ * when finishing allocation of a new chunk. Creation of a pending block
+ * group modifies the extent, chunk, device and free space trees,
+ * therefore we could deadlock with ourselves since we are holding a
+ * lock on an extent buffer that btrfs_create_pending_block_groups() may
+ * try to COW later.
+ * For similar reasons, we also need to delay flushing pending block
+ * groups when splitting a leaf or node, from one of those trees, since
+ * we are holding a write lock on it and its parent or when inserting a
+ * new root node for one of those trees.
+ */
+ if (root == fs_info->extent_root ||
+ root == fs_info->chunk_root ||
+ root == fs_info->dev_root ||
+ root == fs_info->free_space_root)
+ trans->can_flush_pending_bgs = false;
+
+ ret = btrfs_alloc_tree_block(trans, root, parent_start,
+ root->root_key.objectid, disk_key, level,
+ hint, empty_size);
+ trans->can_flush_pending_bgs = true;
+
+ return ret;
+}
+
/*
* does the dirty work in cow of a single block. The parent block (if
* supplied) is updated to point to the new cow copy. The new buffer is marked
@@ -1015,26 +1065,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
- /*
- * If we are COWing a node/leaf from the extent, chunk or device trees,
- * make sure that we do not finish block group creation of pending block
- * groups. We do this to avoid a deadlock.
- * COWing can result in allocation of a new chunk, and flushing pending
- * block groups (btrfs_create_pending_block_groups()) can be triggered
- * when finishing allocation of a new chunk. Creation of a pending block
- * group modifies the extent, chunk and device trees, therefore we could
- * deadlock with ourselves since we are holding a lock on an extent
- * buffer that btrfs_create_pending_block_groups() may try to COW later.
- */
- if (root == fs_info->extent_root ||
- root == fs_info->chunk_root ||
- root == fs_info->dev_root)
- trans->can_flush_pending_bgs = false;
-
- cow = btrfs_alloc_tree_block(trans, root, parent_start,
- root->root_key.objectid, &disk_key, level,
- search_start, empty_size);
- trans->can_flush_pending_bgs = true;
+ cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
+ level, search_start, empty_size);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -1264,7 +1296,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
return eb;
btrfs_set_path_blocking(path);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
BUG_ON(tm->slot != 0);
@@ -1354,7 +1386,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(fs_info, logical);
} else {
- btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb_root);
eb = btrfs_clone_extent_buffer(eb_root);
btrfs_tree_read_unlock_blocking(eb_root);
free_extent_buffer(eb_root);
@@ -1462,9 +1494,16 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
search_start = buf->start & ~((u64)SZ_1G - 1);
if (parent)
- btrfs_set_lock_blocking(parent);
- btrfs_set_lock_blocking(buf);
+ btrfs_set_lock_blocking_write(parent);
+ btrfs_set_lock_blocking_write(buf);
+ /*
+ * Before CoWing this block for later modification, check if it's
+ * the subtree root and do the delayed subtree trace if needed.
+ *
+ * Also We don't care about the error, as it's handled internally.
+ */
+ btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
ret = __btrfs_cow_block(trans, root, buf, parent,
parent_slot, cow_ret, search_start, 0);
@@ -1558,7 +1597,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
if (parent_nritems <= 1)
return 0;
- btrfs_set_lock_blocking(parent);
+ btrfs_set_lock_blocking_write(parent);
for (i = start_slot; i <= end_slot; i++) {
struct btrfs_key first_key;
@@ -1617,7 +1656,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
search_start = last_block;
btrfs_tree_lock(cur);
- btrfs_set_lock_blocking(cur);
+ btrfs_set_lock_blocking_write(cur);
err = __btrfs_cow_block(trans, root, cur, parent, i,
&cur, search_start,
min(16 * blocksize,
@@ -1832,7 +1871,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
btrfs_tree_lock(child);
- btrfs_set_lock_blocking(child);
+ btrfs_set_lock_blocking_write(child);
ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
if (ret) {
btrfs_tree_unlock(child);
@@ -1870,7 +1909,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (left) {
btrfs_tree_lock(left);
- btrfs_set_lock_blocking(left);
+ btrfs_set_lock_blocking_write(left);
wret = btrfs_cow_block(trans, root, left,
parent, pslot - 1, &left);
if (wret) {
@@ -1885,7 +1924,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (right) {
btrfs_tree_lock(right);
- btrfs_set_lock_blocking(right);
+ btrfs_set_lock_blocking_write(right);
wret = btrfs_cow_block(trans, root, right,
parent, pslot + 1, &right);
if (wret) {
@@ -2048,7 +2087,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
u32 left_nr;
btrfs_tree_lock(left);
- btrfs_set_lock_blocking(left);
+ btrfs_set_lock_blocking_write(left);
left_nr = btrfs_header_nritems(left);
if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -2103,7 +2142,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
u32 right_nr;
btrfs_tree_lock(right);
- btrfs_set_lock_blocking(right);
+ btrfs_set_lock_blocking_write(right);
right_nr = btrfs_header_nritems(right);
if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -2505,26 +2544,6 @@ done:
return ret;
}
-static void key_search_validate(struct extent_buffer *b,
- const struct btrfs_key *key,
- int level)
-{
-#ifdef CONFIG_BTRFS_ASSERT
- struct btrfs_disk_key disk_key;
-
- btrfs_cpu_key_to_disk(&disk_key, key);
-
- if (level == 0)
- ASSERT(!memcmp_extent_buffer(b, &disk_key,
- offsetof(struct btrfs_leaf, items[0].key),
- sizeof(disk_key)));
- else
- ASSERT(!memcmp_extent_buffer(b, &disk_key,
- offsetof(struct btrfs_node, ptrs[0].key),
- sizeof(disk_key)));
-#endif
-}
-
static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
int level, int *prev_cmp, int *slot)
{
@@ -2533,7 +2552,6 @@ static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
return *prev_cmp;
}
- key_search_validate(b, key, level);
*slot = 0;
return 0;
@@ -2981,6 +2999,8 @@ again:
*/
prev_cmp = -1;
ret = key_search(b, key, level, &prev_cmp, &slot);
+ if (ret < 0)
+ goto done;
if (level != 0) {
int dec = 0;
@@ -3343,8 +3363,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(lower, &lower_key, 0);
- c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
- &lower_key, level, root->node->start, 0);
+ c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
+ root->node->start, 0);
if (IS_ERR(c))
return PTR_ERR(c);
@@ -3473,8 +3493,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
mid = (c_nritems + 1) / 2;
btrfs_node_key(c, &disk_key, mid);
- split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
- &disk_key, level, c->start, 0);
+ split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
+ c->start, 0);
if (IS_ERR(split))
return PTR_ERR(split);
@@ -3747,7 +3767,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
return 1;
btrfs_tree_lock(right);
- btrfs_set_lock_blocking(right);
+ btrfs_set_lock_blocking_write(right);
free_space = btrfs_leaf_free_space(fs_info, right);
if (free_space < data_size)
@@ -3981,7 +4001,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
return 1;
btrfs_tree_lock(left);
- btrfs_set_lock_blocking(left);
+ btrfs_set_lock_blocking_write(left);
free_space = btrfs_leaf_free_space(fs_info, left);
if (free_space < data_size) {
@@ -4258,8 +4278,8 @@ again:
else
btrfs_item_key(l, &disk_key, mid);
- right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
- &disk_key, 0, l->start, 0);
+ right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
+ l->start, 0);
if (IS_ERR(right))
return PTR_ERR(right);
@@ -5132,6 +5152,10 @@ again:
nritems = btrfs_header_nritems(cur);
level = btrfs_header_level(cur);
sret = btrfs_bin_search(cur, min_key, level, &slot);
+ if (sret < 0) {
+ ret = sret;
+ goto out;
+ }
/* at the lowest level, we're done, setup the path and exit */
if (level == path->lowest_level) {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0a68cf7032f5..85140913c0f5 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -35,6 +35,7 @@
struct btrfs_trans_handle;
struct btrfs_transaction;
struct btrfs_pending_snapshot;
+struct btrfs_delayed_ref_root;
extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep;
@@ -786,6 +787,9 @@ enum {
* main phase. The fs_info::balance_ctl is initialized.
*/
BTRFS_FS_BALANCE_RUNNING,
+
+ /* Indicate that the cleaner thread is awake and doing something. */
+ BTRFS_FS_CLEANER_RUNNING,
};
struct btrfs_fs_info {
@@ -930,7 +934,8 @@ struct btrfs_fs_info {
spinlock_t delayed_iput_lock;
struct list_head delayed_iputs;
- struct mutex cleaner_delayed_iput_mutex;
+ atomic_t nr_delayed_iputs;
+ wait_queue_head_t delayed_iputs_wait;
/* this protects tree_mod_seq_list */
spinlock_t tree_mod_seq_lock;
@@ -1070,10 +1075,13 @@ struct btrfs_fs_info {
atomic_t scrubs_paused;
atomic_t scrub_cancel_req;
wait_queue_head_t scrub_pause_wait;
- int scrub_workers_refcnt;
+ /*
+ * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
+ * running.
+ */
+ refcount_t scrub_workers_refcnt;
struct btrfs_workqueue *scrub_workers;
struct btrfs_workqueue *scrub_wr_completion_workers;
- struct btrfs_workqueue *scrub_nocow_workers;
struct btrfs_workqueue *scrub_parity_workers;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
@@ -1195,6 +1203,24 @@ enum {
BTRFS_ROOT_MULTI_LOG_TASKS,
BTRFS_ROOT_DIRTY,
BTRFS_ROOT_DELETING,
+
+ /*
+ * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
+ *
+ * Set for the subvolume tree owning the reloc tree.
+ */
+ BTRFS_ROOT_DEAD_RELOC_TREE,
+};
+
+/*
+ * Record swapped tree blocks of a subvolume tree for delayed subtree trace
+ * code. For detail check comment in fs/btrfs/qgroup.c.
+ */
+struct btrfs_qgroup_swapped_blocks {
+ spinlock_t lock;
+ /* RM_EMPTY_ROOT() of above blocks[] */
+ bool swapped;
+ struct rb_root blocks[BTRFS_MAX_LEVEL];
};
/*
@@ -1308,6 +1334,14 @@ struct btrfs_root {
u64 nr_ordered_extents;
/*
+ * Not empty if this subvolume root has gone through tree block swap
+ * (relocation)
+ *
+ * Will be used by reloc_control::dirty_subvol_roots.
+ */
+ struct list_head reloc_dirty_list;
+
+ /*
* Number of currently running SEND ioctls to prevent
* manipulation with the read-only status via SUBVOL_SETFLAGS
*/
@@ -1324,6 +1358,9 @@ struct btrfs_root {
/* Number of active swapfiles */
atomic_t nr_swapfiles;
+ /* Record pairs of swapped blocks for qgroup */
+ struct btrfs_qgroup_swapped_blocks swapped_blocks;
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
u64 alloc_bytenr;
#endif
@@ -2661,6 +2698,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait);
+void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
@@ -2768,7 +2808,8 @@ enum btrfs_flush_state {
FLUSH_DELALLOC = 5,
FLUSH_DELALLOC_WAIT = 6,
ALLOC_CHUNK = 7,
- COMMIT_TRANS = 8,
+ ALLOC_CHUNK_FORCE = 8,
+ COMMIT_TRANS = 9,
};
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
@@ -3174,8 +3215,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
/* inode.c */
struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
- struct page *page, size_t pg_offset, u64 start,
- u64 len, int create);
+ u64 start, u64 len);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes);
@@ -3247,6 +3287,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
+int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
@@ -3254,7 +3295,7 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
-int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started, unsigned long *nr_written,
struct writeback_control *wbc);
int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end);
@@ -3483,21 +3524,18 @@ do { \
rcu_read_unlock(); \
} while (0)
-#ifdef CONFIG_BTRFS_ASSERT
-
__cold
static inline void assfail(const char *expr, const char *file, int line)
{
- pr_err("assertion failed: %s, file: %s, line: %d\n",
- expr, file, line);
- BUG();
+ if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
+ pr_err("assertion failed: %s, file: %s, line: %d\n",
+ expr, file, line);
+ BUG();
+ }
}
#define ASSERT(expr) \
(likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-#else
-#define ASSERT(expr) ((void)0)
-#endif
/*
* Use that for functions that are conditionally exported for sanity tests but
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index cad36c99a483..7d2a413df90d 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -602,17 +602,14 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
- head_ref->qgroup_reserved = 0;
- head_ref->qgroup_ref_root = 0;
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
if (qrecord) {
if (ref_root && reserved) {
- head_ref->qgroup_ref_root = ref_root;
- head_ref->qgroup_reserved = reserved;
+ qrecord->data_rsv = reserved;
+ qrecord->data_rsv_refroot = ref_root;
}
-
qrecord->bytenr = bytenr;
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
@@ -651,10 +648,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
- WARN_ON(qrecord && head_ref->qgroup_ref_root
- && head_ref->qgroup_reserved
- && existing->qgroup_ref_root
- && existing->qgroup_reserved);
update_existing_head_ref(trans, existing, head_ref,
old_ref_mod);
/*
@@ -770,7 +763,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
- record = kmalloc(sizeof(*record), GFP_NOFS);
+ record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
@@ -867,7 +860,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
- record = kmalloc(sizeof(*record), GFP_NOFS);
+ record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
kmem_cache_free(btrfs_delayed_ref_head_cachep,
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index d2af974f68a1..70606da440aa 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -103,17 +103,6 @@ struct btrfs_delayed_ref_head {
int ref_mod;
/*
- * For qgroup reserved space freeing.
- *
- * ref_root and reserved will be recorded after
- * BTRFS_ADD_DELAYED_EXTENT is called.
- * And will be used to free reserved qgroup space at
- * run_delayed_refs() time.
- */
- u64 qgroup_ref_root;
- u64 qgroup_reserved;
-
- /*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
* until the delayed ref is processed. must_insert_reserved is
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 8750c835f535..ee193c5222b2 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -111,11 +111,11 @@ no_valid_dev_replace_entry_found:
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
- dev_replace->srcdev = btrfs_find_device(fs_info, src_devid,
- NULL, NULL);
- dev_replace->tgtdev = btrfs_find_device(fs_info,
+ dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices,
+ src_devid, NULL, NULL, true);
+ dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices,
BTRFS_DEV_REPLACE_DEVID,
- NULL, NULL);
+ NULL, NULL, true);
/*
* allow 'btrfs dev replace_cancel' if src/tgt device is
* missing
@@ -862,6 +862,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
btrfs_destroy_dev_replace_tgtdev(tgt_device);
break;
default:
+ up_write(&dev_replace->rwsem);
result = -EINVAL;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8da2f380d3c0..5216e7b3f9ad 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -17,6 +17,7 @@
#include <linux/semaphore.h>
#include <linux/error-injection.h>
#include <linux/crc32c.h>
+#include <linux/sched/mm.h>
#include <asm/unaligned.h>
#include "ctree.h"
#include "disk-io.h"
@@ -341,7 +342,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
if (need_lock) {
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
}
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
@@ -1120,7 +1121,7 @@ void clean_tree_block(struct btrfs_fs_info *fs_info,
-buf->len,
fs_info->dirty_metadata_batch);
/* ugh, clear_extent_buffer_dirty needs to lock the page */
- btrfs_set_lock_blocking(buf);
+ btrfs_set_lock_blocking_write(buf);
clear_extent_buffer_dirty(buf);
}
}
@@ -1175,6 +1176,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
INIT_LIST_HEAD(&root->delalloc_root);
INIT_LIST_HEAD(&root->ordered_extents);
INIT_LIST_HEAD(&root->ordered_root);
+ INIT_LIST_HEAD(&root->reloc_dirty_list);
INIT_LIST_HEAD(&root->logged_list[0]);
INIT_LIST_HEAD(&root->logged_list[1]);
spin_lock_init(&root->inode_lock);
@@ -1218,6 +1220,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->anon_dev = 0;
spin_lock_init(&root->root_item_lock);
+ btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
}
static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
@@ -1258,10 +1261,17 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *root;
struct btrfs_key key;
+ unsigned int nofs_flag;
int ret = 0;
uuid_le uuid = NULL_UUID_LE;
+ /*
+ * We're holding a transaction handle, so use a NOFS memory allocation
+ * context to avoid deadlock if reclaim happens.
+ */
+ nofs_flag = memalloc_nofs_save();
root = btrfs_alloc_root(fs_info, GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
if (!root)
return ERR_PTR(-ENOMEM);
@@ -1682,6 +1692,8 @@ static int cleaner_kthread(void *arg)
while (1) {
again = 0;
+ set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+
/* Make the cleaner go to sleep early. */
if (btrfs_need_cleaner_sleep(fs_info))
goto sleep;
@@ -1705,9 +1717,7 @@ static int cleaner_kthread(void *arg)
goto sleep;
}
- mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
btrfs_run_delayed_iputs(fs_info);
- mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
again = btrfs_clean_one_deleted_snapshot(root);
mutex_unlock(&fs_info->cleaner_mutex);
@@ -1728,6 +1738,7 @@ static int cleaner_kthread(void *arg)
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
+ clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
if (kthread_should_park())
kthread_parkme();
if (kthread_should_stop())
@@ -2098,7 +2109,7 @@ static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
atomic_set(&fs_info->scrubs_paused, 0);
atomic_set(&fs_info->scrub_cancel_req, 0);
init_waitqueue_head(&fs_info->scrub_pause_wait);
- fs_info->scrub_workers_refcnt = 0;
+ refcount_set(&fs_info->scrub_workers_refcnt, 0);
}
static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
@@ -2663,7 +2674,6 @@ int open_ctree(struct super_block *sb,
mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
- mutex_init(&fs_info->cleaner_delayed_iput_mutex);
seqlock_init(&fs_info->profiles_lock);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
@@ -2685,6 +2695,7 @@ int open_ctree(struct super_block *sb,
atomic_set(&fs_info->defrag_running, 0);
atomic_set(&fs_info->qgroup_op_seq, 0);
atomic_set(&fs_info->reada_works_cnt, 0);
+ atomic_set(&fs_info->nr_delayed_iputs, 0);
atomic64_set(&fs_info->tree_mod_seq, 0);
fs_info->sb = sb;
fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
@@ -2762,6 +2773,7 @@ int open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->transaction_wait);
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
+ init_waitqueue_head(&fs_info->delayed_iputs_wait);
INIT_LIST_HEAD(&fs_info->pinned_chunks);
@@ -4201,6 +4213,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->ordered_root_lock);
}
spin_unlock(&fs_info->ordered_root_lock);
+
+ /*
+ * We need this here because if we've been flipped read-only we won't
+ * get sync() from the umount, so we need to make sure any ordered
+ * extents that haven't had their dirty pages IO start writeout yet
+ * actually get run and error out properly.
+ */
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
@@ -4227,16 +4247,9 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
head = rb_entry(node, struct btrfs_delayed_ref_head,
href_node);
- if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->refs);
- spin_unlock(&delayed_refs->lock);
-
- mutex_lock(&head->mutex);
- mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref_head(head);
- spin_lock(&delayed_refs->lock);
+ if (btrfs_delayed_ref_lock(delayed_refs, head))
continue;
- }
+
spin_lock(&head->lock);
while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
ref = rb_entry(n, struct btrfs_delayed_ref_node,
@@ -4252,12 +4265,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (head->must_insert_reserved)
pin_bytes = true;
btrfs_free_delayed_extent_op(head->extent_op);
- delayed_refs->num_heads--;
- if (head->processing == 0)
- delayed_refs->num_heads_ready--;
- atomic_dec(&delayed_refs->num_entries);
- rb_erase_cached(&head->href_node, &delayed_refs->href_root);
- RB_CLEAR_NODE(&head->href_node);
+ btrfs_delete_ref_head(delayed_refs, head);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
mutex_unlock(&head->mutex);
@@ -4265,6 +4273,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (pin_bytes)
btrfs_pin_extent(fs_info, head->bytenr,
head->num_bytes, 1);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
btrfs_put_delayed_ref_head(head);
cond_resched();
spin_lock(&delayed_refs->lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b15afeae16df..994f0cc41799 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
return ret ? ret : 1;
}
-static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head)
+void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_ref_root *delayed_refs =
- &trans->transaction->delayed_refs;
int nr_items = 1; /* Dropping this ref head update. */
if (head->total_ref_mod < 0) {
@@ -2494,9 +2492,6 @@ static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
}
}
- /* Also free its reserved qgroup space */
- btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
- head->qgroup_reserved);
btrfs_delayed_refs_rsv_release(fs_info, nr_items);
}
@@ -2544,7 +2539,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
}
}
- cleanup_ref_head_accounting(trans, head);
+ btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
@@ -3015,8 +3010,7 @@ again:
}
if (run_all) {
- if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans);
+ btrfs_create_pending_block_groups(trans);
spin_lock(&delayed_refs->lock);
node = rb_first_cached(&delayed_refs->href_root);
@@ -4282,10 +4276,14 @@ commit_trans:
/*
* The cleaner kthread might still be doing iput
* operations. Wait for it to finish so that
- * more space is released.
+ * more space is released. We don't need to
+ * explicitly run the delayed iputs here because
+ * the commit_transaction would have woken up
+ * the cleaner.
*/
- mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
- mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
+ ret = btrfs_wait_on_delayed_iputs(fs_info);
+ if (ret)
+ return ret;
goto again;
} else {
btrfs_end_transaction(trans);
@@ -4398,7 +4396,6 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *sinfo, int force)
{
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 bytes_used = btrfs_space_info_used(sinfo, false);
u64 thresh;
@@ -4406,14 +4403,6 @@ static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
return 1;
/*
- * We need to take into account the global rsv because for all intents
- * and purposes it's used space. Don't worry about locking the
- * global_rsv, it doesn't change except when the transaction commits.
- */
- if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
- bytes_used += calc_global_rsv_need_space(global_rsv);
-
- /*
* in limited mode, we want to have some free space up to
* about 1% of the FS size.
*/
@@ -4743,7 +4732,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
- u64 max_reclaim;
+ u64 async_pages;
u64 items;
long time_left;
unsigned long nr_pages;
@@ -4768,25 +4757,36 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
loops = 0;
while (delalloc_bytes && loops < 3) {
- max_reclaim = min(delalloc_bytes, to_reclaim);
- nr_pages = max_reclaim >> PAGE_SHIFT;
+ nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+
+ /*
+ * Triggers inode writeback for up to nr_pages. This will invoke
+ * ->writepages callback and trigger delalloc filling
+ * (btrfs_run_delalloc_range()).
+ */
btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
+
/*
- * We need to wait for the async pages to actually start before
- * we do anything.
+ * We need to wait for the compressed pages to start before
+ * we continue.
*/
- max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
- if (!max_reclaim)
+ async_pages = atomic_read(&fs_info->async_delalloc_pages);
+ if (!async_pages)
goto skip_async;
- if (max_reclaim <= nr_pages)
- max_reclaim = 0;
+ /*
+ * Calculate how many compressed pages we want to be written
+ * before we continue. I.e if there are more async pages than we
+ * require wait_event will wait until nr_pages are written.
+ */
+ if (async_pages <= nr_pages)
+ async_pages = 0;
else
- max_reclaim -= nr_pages;
+ async_pages -= nr_pages;
wait_event(fs_info->async_submit_wait,
atomic_read(&fs_info->async_delalloc_pages) <=
- (int)max_reclaim);
+ (int)async_pages);
skip_async:
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets) &&
@@ -4810,6 +4810,7 @@ skip_async:
}
struct reserve_ticket {
+ u64 orig_bytes;
u64 bytes;
int error;
struct list_head list;
@@ -4853,10 +4854,19 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
if (!bytes_needed)
return 0;
- /* See if there is enough pinned space to make this reservation */
- if (__percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes_needed,
- BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
+ trans = btrfs_join_transaction(fs_info->extent_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ /*
+ * See if there is enough pinned space to make this reservation, or if
+ * we have block groups that are going to be freed, allowing us to
+ * possibly do a chunk allocation the next loop through.
+ */
+ if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
+ __percpu_counter_compare(&space_info->total_bytes_pinned,
+ bytes_needed,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
goto commit;
/*
@@ -4864,7 +4874,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
* this reservation.
*/
if (space_info != delayed_rsv->space_info)
- return -ENOSPC;
+ goto enospc;
spin_lock(&delayed_rsv->lock);
reclaim_bytes += delayed_rsv->reserved;
@@ -4879,16 +4889,14 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
if (__percpu_counter_compare(&space_info->total_bytes_pinned,
bytes_needed,
- BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) {
- return -ENOSPC;
- }
+ BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
+ goto enospc;
commit:
- trans = btrfs_join_transaction(fs_info->extent_root);
- if (IS_ERR(trans))
- return -ENOSPC;
-
return btrfs_commit_transaction(trans);
+enospc:
+ btrfs_end_transaction(trans);
+ return -ENOSPC;
}
/*
@@ -4941,6 +4949,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
btrfs_end_transaction(trans);
break;
case ALLOC_CHUNK:
+ case ALLOC_CHUNK_FORCE:
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -4948,12 +4957,21 @@ static void flush_space(struct btrfs_fs_info *fs_info,
}
ret = do_chunk_alloc(trans,
btrfs_metadata_alloc_profile(fs_info),
- CHUNK_ALLOC_NO_FORCE);
+ (state == ALLOC_CHUNK) ?
+ CHUNK_ALLOC_NO_FORCE : CHUNK_ALLOC_FORCE);
btrfs_end_transaction(trans);
if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
case COMMIT_TRANS:
+ /*
+ * If we have pending delayed iputs then we could free up a
+ * bunch of pinned space, so make sure we run the iputs before
+ * we do our pinned bytes check below.
+ */
+ btrfs_run_delayed_iputs(fs_info);
+ btrfs_wait_on_delayed_iputs(fs_info);
+
ret = may_commit_transaction(fs_info, space_info);
break;
default:
@@ -5023,7 +5041,7 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
-static void wake_all_tickets(struct list_head *head)
+static bool wake_all_tickets(struct list_head *head)
{
struct reserve_ticket *ticket;
@@ -5032,7 +5050,10 @@ static void wake_all_tickets(struct list_head *head)
list_del_init(&ticket->list);
ticket->error = -ENOSPC;
wake_up(&ticket->wait);
+ if (ticket->bytes != ticket->orig_bytes)
+ return true;
}
+ return false;
}
/*
@@ -5084,11 +5105,28 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
commit_cycles--;
}
+ /*
+ * We don't want to force a chunk allocation until we've tried
+ * pretty hard to reclaim space. Think of the case where we
+ * freed up a bunch of space and so have a lot of pinned space
+ * to reclaim. We would rather use that than possibly create a
+ * underutilized metadata chunk. So if this is our first run
+ * through the flushing state machine skip ALLOC_CHUNK_FORCE and
+ * commit the transaction. If nothing has changed the next go
+ * around then we can force a chunk allocation.
+ */
+ if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
+ flush_state++;
+
if (flush_state > COMMIT_TRANS) {
commit_cycles++;
if (commit_cycles > 2) {
- wake_all_tickets(&space_info->tickets);
- space_info->flush = 0;
+ if (wake_all_tickets(&space_info->tickets)) {
+ flush_state = FLUSH_DELAYED_ITEMS_NR;
+ commit_cycles--;
+ } else {
+ space_info->flush = 0;
+ }
} else {
flush_state = FLUSH_DELAYED_ITEMS_NR;
}
@@ -5102,12 +5140,18 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
INIT_WORK(work, btrfs_async_reclaim_metadata_space);
}
+static const enum btrfs_flush_state priority_flush_states[] = {
+ FLUSH_DELAYED_ITEMS_NR,
+ FLUSH_DELAYED_ITEMS,
+ ALLOC_CHUNK,
+};
+
static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
struct reserve_ticket *ticket)
{
u64 to_reclaim;
- int flush_state = FLUSH_DELAYED_ITEMS_NR;
+ int flush_state;
spin_lock(&space_info->lock);
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
@@ -5118,8 +5162,10 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
}
spin_unlock(&space_info->lock);
+ flush_state = 0;
do {
- flush_space(fs_info, space_info, to_reclaim, flush_state);
+ flush_space(fs_info, space_info, to_reclaim,
+ priority_flush_states[flush_state]);
flush_state++;
spin_lock(&space_info->lock);
if (ticket->bytes == 0) {
@@ -5127,23 +5173,16 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
return;
}
spin_unlock(&space_info->lock);
-
- /*
- * Priority flushers can't wait on delalloc without
- * deadlocking.
- */
- if (flush_state == FLUSH_DELALLOC ||
- flush_state == FLUSH_DELALLOC_WAIT)
- flush_state = ALLOC_CHUNK;
- } while (flush_state < COMMIT_TRANS);
+ } while (flush_state < ARRAY_SIZE(priority_flush_states));
}
static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
- struct reserve_ticket *ticket, u64 orig_bytes)
+ struct reserve_ticket *ticket)
{
DEFINE_WAIT(wait);
+ u64 reclaim_bytes = 0;
int ret = 0;
spin_lock(&space_info->lock);
@@ -5164,14 +5203,12 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
ret = ticket->error;
if (!list_empty(&ticket->list))
list_del_init(&ticket->list);
- if (ticket->bytes && ticket->bytes < orig_bytes) {
- u64 num_bytes = orig_bytes - ticket->bytes;
- update_bytes_may_use(space_info, -num_bytes);
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags, num_bytes, 0);
- }
+ if (ticket->bytes && ticket->bytes < ticket->orig_bytes)
+ reclaim_bytes = ticket->orig_bytes - ticket->bytes;
spin_unlock(&space_info->lock);
+ if (reclaim_bytes)
+ space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
return ret;
}
@@ -5197,6 +5234,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
{
struct reserve_ticket ticket;
u64 used;
+ u64 reclaim_bytes = 0;
int ret = 0;
ASSERT(orig_bytes);
@@ -5232,6 +5270,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
* the list and we will do our own flushing further down.
*/
if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
+ ticket.orig_bytes = orig_bytes;
ticket.bytes = orig_bytes;
ticket.error = 0;
init_waitqueue_head(&ticket.wait);
@@ -5272,25 +5311,21 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
return ret;
if (flush == BTRFS_RESERVE_FLUSH_ALL)
- return wait_reserve_ticket(fs_info, space_info, &ticket,
- orig_bytes);
+ return wait_reserve_ticket(fs_info, space_info, &ticket);
ret = 0;
priority_reclaim_metadata_space(fs_info, space_info, &ticket);
spin_lock(&space_info->lock);
if (ticket.bytes) {
- if (ticket.bytes < orig_bytes) {
- u64 num_bytes = orig_bytes - ticket.bytes;
- update_bytes_may_use(space_info, -num_bytes);
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags,
- num_bytes, 0);
-
- }
+ if (ticket.bytes < orig_bytes)
+ reclaim_bytes = orig_bytes - ticket.bytes;
list_del_init(&ticket.list);
ret = -ENOSPC;
}
spin_unlock(&space_info->lock);
+
+ if (reclaim_bytes)
+ space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
ASSERT(list_empty(&ticket.list));
return ret;
}
@@ -5768,6 +5803,21 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
+static void calc_refill_bytes(struct btrfs_block_rsv *block_rsv,
+ u64 *metadata_bytes, u64 *qgroup_bytes)
+{
+ *metadata_bytes = 0;
+ *qgroup_bytes = 0;
+
+ spin_lock(&block_rsv->lock);
+ if (block_rsv->reserved < block_rsv->size)
+ *metadata_bytes = block_rsv->size - block_rsv->reserved;
+ if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
+ *qgroup_bytes = block_rsv->qgroup_rsv_size -
+ block_rsv->qgroup_rsv_reserved;
+ spin_unlock(&block_rsv->lock);
+}
+
/**
* btrfs_inode_rsv_refill - refill the inode block rsv.
* @inode - the inode we are refilling.
@@ -5783,25 +5833,42 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
{
struct btrfs_root *root = inode->root;
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
- u64 num_bytes = 0;
- u64 qgroup_num_bytes = 0;
+ u64 num_bytes, last = 0;
+ u64 qgroup_num_bytes;
int ret = -ENOSPC;
- spin_lock(&block_rsv->lock);
- if (block_rsv->reserved < block_rsv->size)
- num_bytes = block_rsv->size - block_rsv->reserved;
- if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
- qgroup_num_bytes = block_rsv->qgroup_rsv_size -
- block_rsv->qgroup_rsv_reserved;
- spin_unlock(&block_rsv->lock);
-
+ calc_refill_bytes(block_rsv, &num_bytes, &qgroup_num_bytes);
if (num_bytes == 0)
return 0;
- ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true);
- if (ret)
- return ret;
- ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ do {
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes,
+ true);
+ if (ret)
+ return ret;
+ ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ if (ret) {
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
+ last = num_bytes;
+ /*
+ * If we are fragmented we can end up with a lot of
+ * outstanding extents which will make our size be much
+ * larger than our reserved amount.
+ *
+ * If the reservation happens here, it might be very
+ * big though not needed in the end, if the delalloc
+ * flushing happens.
+ *
+ * If this is the case try and do the reserve again.
+ */
+ if (flush == BTRFS_RESERVE_FLUSH_ALL)
+ calc_refill_bytes(block_rsv, &num_bytes,
+ &qgroup_num_bytes);
+ if (num_bytes == 0)
+ return 0;
+ }
+ } while (ret && last != num_bytes);
+
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, false);
trace_btrfs_space_reservation(root->fs_info, "delalloc",
@@ -5811,8 +5878,7 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
spin_lock(&block_rsv->lock);
block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
spin_unlock(&block_rsv->lock);
- } else
- btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
+ }
return ret;
}
@@ -7188,7 +7254,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (head->must_insert_reserved)
ret = 1;
- cleanup_ref_head_accounting(trans, head);
+ btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
@@ -8059,6 +8125,15 @@ loop:
return ret;
}
+#define DUMP_BLOCK_RSV(fs_info, rsv_name) \
+do { \
+ struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
+ spin_lock(&__rsv->lock); \
+ btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
+ __rsv->size, __rsv->reserved); \
+ spin_unlock(&__rsv->lock); \
+} while (0)
+
static void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups)
@@ -8078,6 +8153,12 @@ static void dump_space_info(struct btrfs_fs_info *fs_info,
info->bytes_readonly);
spin_unlock(&info->lock);
+ DUMP_BLOCK_RSV(fs_info, global_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
+ DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
+
if (!dump_block_groups)
return;
@@ -8485,7 +8566,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
clean_tree_block(fs_info, buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
- btrfs_set_lock_blocking(buf);
+ btrfs_set_lock_blocking_write(buf);
set_extent_buffer_uptodate(buf);
memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
@@ -8910,7 +8991,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
reada = 1;
}
btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ btrfs_set_lock_blocking_write(next);
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
&wc->refs[level - 1],
@@ -8970,7 +9051,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
return -EIO;
}
btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ btrfs_set_lock_blocking_write(next);
}
level--;
@@ -9082,7 +9163,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (!path->locks[level]) {
BUG_ON(level == 0);
btrfs_tree_lock(eb);
- btrfs_set_lock_blocking(eb);
+ btrfs_set_lock_blocking_write(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
ret = btrfs_lookup_extent_info(trans, fs_info,
@@ -9124,7 +9205,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (!path->locks[level] &&
btrfs_header_generation(eb) == trans->transid) {
btrfs_tree_lock(eb);
- btrfs_set_lock_blocking(eb);
+ btrfs_set_lock_blocking_write(eb);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
}
clean_tree_block(fs_info, eb);
@@ -9291,7 +9372,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
- btrfs_set_lock_blocking(path->nodes[level]);
+ btrfs_set_lock_blocking_write(path->nodes[level]);
path->slots[level] = 0;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
memset(&wc->update_progress, 0,
@@ -9321,7 +9402,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
level = btrfs_header_level(root->node);
while (1) {
btrfs_tree_lock(path->nodes[level]);
- btrfs_set_lock_blocking(path->nodes[level]);
+ btrfs_set_lock_blocking_write(path->nodes[level]);
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
ret = btrfs_lookup_extent_info(trans, fs_info,
@@ -9588,6 +9669,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
{
struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes;
+ u64 sinfo_used;
u64 min_allocable_bytes;
int ret = -ENOSPC;
@@ -9614,9 +9696,10 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
num_bytes = cache->key.offset - cache->reserved - cache->pinned -
cache->bytes_super - btrfs_block_group_used(&cache->item);
+ sinfo_used = btrfs_space_info_used(sinfo, true);
- if (btrfs_space_info_used(sinfo, true) + num_bytes +
- min_allocable_bytes <= sinfo->total_bytes) {
+ if (sinfo_used + num_bytes + min_allocable_bytes <=
+ sinfo->total_bytes) {
sinfo->bytes_readonly += num_bytes;
cache->ro++;
list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
@@ -9625,6 +9708,15 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
out:
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
+ if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
+ btrfs_info(cache->fs_info,
+ "unable to make block group %llu ro",
+ cache->key.objectid);
+ btrfs_info(cache->fs_info,
+ "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
+ sinfo_used, num_bytes, min_allocable_bytes);
+ dump_space_info(cache->fs_info, cache->space_info, 0, 0);
+ }
return ret;
}
@@ -10774,13 +10866,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
}
spin_lock(&trans->transaction->dirty_bgs_lock);
- if (!list_empty(&block_group->dirty_list)) {
- WARN_ON(1);
- }
- if (!list_empty(&block_group->io_list)) {
- WARN_ON(1);
- }
+ WARN_ON(!list_empty(&block_group->dirty_list));
+ WARN_ON(!list_empty(&block_group->io_list));
spin_unlock(&trans->transaction->dirty_bgs_lock);
+
btrfs_remove_free_space_cache(block_group);
spin_lock(&block_group->space_info->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 52abe4082680..ca259c75bbcd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -147,7 +147,38 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits,
return ret;
}
-static void flush_write_bio(struct extent_page_data *epd);
+static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
+ unsigned long bio_flags)
+{
+ blk_status_t ret = 0;
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
+ struct page *page = bvec->bv_page;
+ struct extent_io_tree *tree = bio->bi_private;
+ u64 start;
+
+ start = page_offset(page) + bvec->bv_offset;
+
+ bio->bi_private = NULL;
+
+ if (tree->ops)
+ ret = tree->ops->submit_bio_hook(tree->private_data, bio,
+ mirror_num, bio_flags, start);
+ else
+ btrfsic_submit_bio(bio);
+
+ return blk_status_to_errno(ret);
+}
+
+static void flush_write_bio(struct extent_page_data *epd)
+{
+ if (epd->bio) {
+ int ret;
+
+ ret = submit_one_bio(epd->bio, 0, 0);
+ BUG_ON(ret < 0); /* -ENOMEM */
+ epd->bio = NULL;
+ }
+}
int __init extent_io_init(void)
{
@@ -281,8 +312,8 @@ do_insert:
}
static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
- struct rb_node **prev_ret,
struct rb_node **next_ret,
+ struct rb_node **prev_ret,
struct rb_node ***p_ret,
struct rb_node **parent_ret)
{
@@ -311,23 +342,23 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
if (parent_ret)
*parent_ret = prev;
- if (prev_ret) {
+ if (next_ret) {
orig_prev = prev;
while (prev && offset > prev_entry->end) {
prev = rb_next(prev);
prev_entry = rb_entry(prev, struct tree_entry, rb_node);
}
- *prev_ret = prev;
+ *next_ret = prev;
prev = orig_prev;
}
- if (next_ret) {
+ if (prev_ret) {
prev_entry = rb_entry(prev, struct tree_entry, rb_node);
while (prev && offset < prev_entry->start) {
prev = rb_prev(prev);
prev_entry = rb_entry(prev, struct tree_entry, rb_node);
}
- *next_ret = prev;
+ *prev_ret = prev;
}
return NULL;
}
@@ -338,12 +369,12 @@ tree_search_for_insert(struct extent_io_tree *tree,
struct rb_node ***p_ret,
struct rb_node **parent_ret)
{
- struct rb_node *prev = NULL;
+ struct rb_node *next= NULL;
struct rb_node *ret;
- ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
+ ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
if (!ret)
- return prev;
+ return next;
return ret;
}
@@ -585,7 +616,6 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (delete)
bits |= ~EXTENT_CTLBITS;
- bits |= EXTENT_FIRST_DELALLOC;
if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
clear = 1;
@@ -850,7 +880,6 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
btrfs_debug_check_extent_io_range(tree, start, end);
- bits |= EXTENT_FIRST_DELALLOC;
again:
if (!prealloc && gfpflags_allow_blocking(mask)) {
/*
@@ -2692,28 +2721,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
return bio;
}
-static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
- unsigned long bio_flags)
-{
- blk_status_t ret = 0;
- struct bio_vec *bvec = bio_last_bvec_all(bio);
- struct page *page = bvec->bv_page;
- struct extent_io_tree *tree = bio->bi_private;
- u64 start;
-
- start = page_offset(page) + bvec->bv_offset;
-
- bio->bi_private = NULL;
-
- if (tree->ops)
- ret = tree->ops->submit_bio_hook(tree->private_data, bio,
- mirror_num, bio_flags, start);
- else
- btrfsic_submit_bio(bio);
-
- return blk_status_to_errno(ret);
-}
-
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @tree: tree so we can call our merge_bio hook
@@ -4007,17 +4014,6 @@ retry:
return ret;
}
-static void flush_write_bio(struct extent_page_data *epd)
-{
- if (epd->bio) {
- int ret;
-
- ret = submit_one_bio(epd->bio, 0, 0);
- BUG_ON(ret < 0); /* -ENOMEM */
- epd->bio = NULL;
- }
-}
-
int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{
int ret;
@@ -4259,8 +4255,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
if (len == 0)
break;
len = ALIGN(len, sectorsize);
- em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, offset,
- len, 0);
+ em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
if (IS_ERR_OR_NULL(em))
return em;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 9673be3f3d1f..08749e0b9c32 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -18,17 +18,16 @@
#define EXTENT_BOUNDARY (1U << 9)
#define EXTENT_NODATASUM (1U << 10)
#define EXTENT_CLEAR_META_RESV (1U << 11)
-#define EXTENT_FIRST_DELALLOC (1U << 12)
-#define EXTENT_NEED_WAIT (1U << 13)
-#define EXTENT_DAMAGED (1U << 14)
-#define EXTENT_NORESERVE (1U << 15)
-#define EXTENT_QGROUP_RESERVED (1U << 16)
-#define EXTENT_CLEAR_DATA_RESV (1U << 17)
-#define EXTENT_DELALLOC_NEW (1U << 18)
+#define EXTENT_NEED_WAIT (1U << 12)
+#define EXTENT_DAMAGED (1U << 13)
+#define EXTENT_NORESERVE (1U << 14)
+#define EXTENT_QGROUP_RESERVED (1U << 15)
+#define EXTENT_CLEAR_DATA_RESV (1U << 16)
+#define EXTENT_DELALLOC_NEW (1U << 17)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
EXTENT_CLEAR_DATA_RESV)
-#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
+#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
/*
* flags for bio submission. The high bits indicate the compression
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index a042a193c120..928f729c55ba 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -210,6 +210,9 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
if (!list_empty(&prev->list) || !list_empty(&next->list))
return 0;
+ ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
+ prev->block_start != EXTENT_MAP_DELALLOC);
+
if (extent_map_end(prev) == next->start &&
prev->flags == next->flags &&
prev->bdev == next->bdev &&
@@ -217,8 +220,6 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
prev->block_start == EXTENT_MAP_HOLE) ||
(next->block_start == EXTENT_MAP_INLINE &&
prev->block_start == EXTENT_MAP_INLINE) ||
- (next->block_start == EXTENT_MAP_DELALLOC &&
- prev->block_start == EXTENT_MAP_DELALLOC) ||
(next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
next->block_start == extent_map_block_end(prev)))) {
return 1;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index ef05a0121652..473f039fcd7c 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -9,6 +9,7 @@
#define EXTENT_MAP_LAST_BYTE ((u64)-4)
#define EXTENT_MAP_HOLE ((u64)-3)
#define EXTENT_MAP_INLINE ((u64)-2)
+/* used only during fiemap calls */
#define EXTENT_MAP_DELALLOC ((u64)-1)
/* bits for the extent_map::flags field */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index d38dc8c31533..34fe8a58b0e9 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -3218,8 +3218,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
&cached_state);
while (start < inode->i_size) {
- em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0,
- start, len, 0);
+ em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
em = NULL;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 43eb4535319d..3f180b857e20 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -453,7 +453,6 @@ static noinline void compress_file_range(struct inode *inode,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 blocksize = fs_info->sectorsize;
u64 actual_end;
- u64 isize = i_size_read(inode);
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
@@ -467,7 +466,7 @@ static noinline void compress_file_range(struct inode *inode,
inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
SZ_16K);
- actual_end = min_t(u64, isize, end + 1);
+ actual_end = min_t(u64, i_size_read(inode), end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
@@ -714,9 +713,9 @@ static void free_async_extent_pages(struct async_extent *async_extent)
* queued. We walk all the async extents created by compress_file_range
* and send them down to the disk.
*/
-static noinline void submit_compressed_extents(struct inode *inode,
- struct async_cow *async_cow)
+static noinline void submit_compressed_extents(struct async_cow *async_cow)
{
+ struct inode *inode = async_cow->inode;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct async_extent *async_extent;
u64 alloc_hint = 0;
@@ -1166,8 +1165,14 @@ static noinline void async_cow_submit(struct btrfs_work *work)
5 * SZ_1M)
cond_wake_up_nomb(&fs_info->async_submit_wait);
+ /*
+ * ->inode could be NULL if async_cow_start has failed to compress,
+ * in which case we don't have anything to submit, yet we need to
+ * always adjust ->async_delalloc_pages as its paired with the init
+ * happening in cow_file_range_async
+ */
if (async_cow->inode)
- submit_compressed_extents(async_cow->inode, async_cow);
+ submit_compressed_extents(async_cow);
}
static noinline void async_cow_free(struct btrfs_work *work)
@@ -1194,7 +1199,12 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
- async_cow->inode = igrab(inode);
+ /*
+ * igrab is called higher up in the call chain, take only the
+ * lightweight reference for the callback lifetime
+ */
+ ihold(inode);
+ async_cow->inode = inode;
async_cow->fs_info = fs_info;
async_cow->locked_page = locked_page;
async_cow->start = start;
@@ -1586,11 +1596,10 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
* Function to process delayed allocation (create CoW) for ranges which are
* being touched for the first time.
*/
-int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started, unsigned long *nr_written,
struct writeback_control *wbc)
{
- struct inode *inode = private_data;
int ret;
int force_cow = need_force_cow(inode, start, end);
unsigned int write_flags = wbc_to_write_flags(wbc);
@@ -3129,9 +3138,6 @@ out:
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
- /* Try to release some metadata so we don't get an OOM but don't wait */
- btrfs_btree_balance_dirty_nodelay(fs_info);
-
return ret;
}
@@ -3250,10 +3256,13 @@ void btrfs_add_delayed_iput(struct inode *inode)
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
+ atomic_inc(&fs_info->nr_delayed_iputs);
spin_lock(&fs_info->delayed_iput_lock);
ASSERT(list_empty(&binode->delayed_iput));
list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
+ if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
+ wake_up_process(fs_info->cleaner_kthread);
}
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
@@ -3268,11 +3277,32 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
list_del_init(&inode->delayed_iput);
spin_unlock(&fs_info->delayed_iput_lock);
iput(&inode->vfs_inode);
+ if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
+ wake_up(&fs_info->delayed_iputs_wait);
spin_lock(&fs_info->delayed_iput_lock);
}
spin_unlock(&fs_info->delayed_iput_lock);
}
+/**
+ * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running
+ * @fs_info - the fs_info for this fs
+ * @return - EINTR if we were killed, 0 if nothing's pending
+ *
+ * This will wait on any delayed iputs that are currently running with KILLABLE
+ * set. Once they are all done running we will return, unless we are killed in
+ * which case we return EINTR. This helps in user operations like fallocate etc
+ * that might get blocked on the iputs.
+ */
+int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
+{
+ int ret = wait_event_killable(fs_info->delayed_iputs_wait,
+ atomic_read(&fs_info->nr_delayed_iputs) == 0);
+ if (ret)
+ return -EINTR;
+ return 0;
+}
+
/*
* This creates an orphan entry for the given inode in case something goes wrong
* in the middle of an unlink.
@@ -5263,13 +5293,15 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ u64 delayed_refs_extra = btrfs_calc_trans_metadata_size(fs_info, 1);
int failures = 0;
for (;;) {
struct btrfs_trans_handle *trans;
int ret;
- ret = btrfs_block_rsv_refill(root, rsv, rsv->size,
+ ret = btrfs_block_rsv_refill(root, rsv,
+ rsv->size + delayed_refs_extra,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret && ++failures > 2) {
@@ -5278,9 +5310,28 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
return ERR_PTR(-ENOSPC);
}
+ /*
+ * Evict can generate a large amount of delayed refs without
+ * having a way to add space back since we exhaust our temporary
+ * block rsv. We aren't allowed to do FLUSH_ALL in this case
+ * because we could deadlock with so many things in the flushing
+ * code, so we have to try and hold some extra space to
+ * compensate for our delayed ref generation. If we can't get
+ * that space then we need see if we can steal our minimum from
+ * the global reserve. We will be ratelimited by the amount of
+ * space we have for the delayed refs rsv, so we'll end up
+ * committing and trying again.
+ */
trans = btrfs_join_transaction(root);
- if (IS_ERR(trans) || !ret)
+ if (IS_ERR(trans) || !ret) {
+ if (!IS_ERR(trans)) {
+ trans->block_rsv = &fs_info->trans_block_rsv;
+ trans->bytes_reserved = delayed_refs_extra;
+ btrfs_block_rsv_migrate(rsv, trans->block_rsv,
+ delayed_refs_extra, 1);
+ }
return trans;
+ }
/*
* Try to steal from the global reserve if there is space for
@@ -6732,7 +6783,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
u64 extent_start = 0;
u64 extent_end = 0;
u64 objectid = btrfs_ino(inode);
- u32 found_type;
+ u8 extent_type;
struct btrfs_path *path = NULL;
struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *item;
@@ -6787,9 +6838,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
if (ret < 0) {
err = ret;
goto out;
- }
-
- if (ret != 0) {
+ } else if (ret > 0) {
if (path->slots[0] == 0)
goto not_found;
path->slots[0]--;
@@ -6798,11 +6847,9 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- /* are we inside the extent that was found? */
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- found_type = found_key.type;
if (found_key.objectid != objectid ||
- found_type != BTRFS_EXTENT_DATA_KEY) {
+ found_key.type != BTRFS_EXTENT_DATA_KEY) {
/*
* If we backup past the first extent we want to move forward
* and see if there is an extent in front of us, otherwise we'll
@@ -6813,16 +6860,16 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
goto next;
}
- found_type = btrfs_file_extent_type(leaf, item);
+ extent_type = btrfs_file_extent_type(leaf, item);
extent_start = found_key.offset;
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ if (extent_type == BTRFS_FILE_EXTENT_REG ||
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
extent_start);
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_ram_bytes(leaf, item);
@@ -6841,9 +6888,9 @@ next:
if (ret < 0) {
err = ret;
goto out;
- }
- if (ret > 0)
+ } else if (ret > 0) {
goto not_found;
+ }
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
@@ -6854,19 +6901,22 @@ next:
goto not_found;
if (start > found_key.offset)
goto next;
+
+ /* New extent overlaps with existing one */
em->start = start;
em->orig_start = start;
em->len = found_key.offset - start;
- goto not_found_em;
+ em->block_start = EXTENT_MAP_HOLE;
+ goto insert;
}
btrfs_extent_item_to_extent_map(inode, path, item,
new_inline, em);
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ if (extent_type == BTRFS_FILE_EXTENT_REG ||
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
goto insert;
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
unsigned long ptr;
char *map;
size_t size;
@@ -6917,7 +6967,6 @@ not_found:
em->start = start;
em->orig_start = start;
em->len = len;
-not_found_em:
em->block_start = EXTENT_MAP_HOLE;
insert:
btrfs_release_path(path);
@@ -6947,19 +6996,17 @@ out:
}
struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
- struct page *page,
- size_t pg_offset, u64 start, u64 len,
- int create)
+ u64 start, u64 len)
{
struct extent_map *em;
struct extent_map *hole_em = NULL;
- u64 range_start = start;
+ u64 delalloc_start = start;
u64 end;
- u64 found;
- u64 found_end;
+ u64 delalloc_len;
+ u64 delalloc_end;
int err = 0;
- em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
+ em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em))
return em;
/*
@@ -6984,80 +7031,84 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
em = NULL;
/* ok, we didn't find anything, lets look for delalloc */
- found = count_range_bits(&inode->io_tree, &range_start,
+ delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start,
end, len, EXTENT_DELALLOC, 1);
- found_end = range_start + found;
- if (found_end < range_start)
- found_end = (u64)-1;
+ delalloc_end = delalloc_start + delalloc_len;
+ if (delalloc_end < delalloc_start)
+ delalloc_end = (u64)-1;
/*
- * we didn't find anything useful, return
- * the original results from get_extent()
+ * We didn't find anything useful, return the original results from
+ * get_extent()
*/
- if (range_start > end || found_end <= start) {
+ if (delalloc_start > end || delalloc_end <= start) {
em = hole_em;
hole_em = NULL;
goto out;
}
- /* adjust the range_start to make sure it doesn't
- * go backwards from the start they passed in
+ /*
+ * Adjust the delalloc_start to make sure it doesn't go backwards from
+ * the start they passed in
*/
- range_start = max(start, range_start);
- found = found_end - range_start;
+ delalloc_start = max(start, delalloc_start);
+ delalloc_len = delalloc_end - delalloc_start;
- if (found > 0) {
- u64 hole_start = start;
- u64 hole_len = len;
+ if (delalloc_len > 0) {
+ u64 hole_start;
+ u64 hole_len;
+ const u64 hole_end = extent_map_end(hole_em);
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
+ em->bdev = NULL;
+
+ ASSERT(hole_em);
/*
- * when btrfs_get_extent can't find anything it
- * returns one huge hole
+ * When btrfs_get_extent can't find anything it returns one
+ * huge hole
*
- * make sure what it found really fits our range, and
- * adjust to make sure it is based on the start from
- * the caller
+ * Make sure what it found really fits our range, and adjust to
+ * make sure it is based on the start from the caller
*/
- if (hole_em) {
- u64 calc_end = extent_map_end(hole_em);
-
- if (calc_end <= start || (hole_em->start > end)) {
- free_extent_map(hole_em);
- hole_em = NULL;
- } else {
- hole_start = max(hole_em->start, start);
- hole_len = calc_end - hole_start;
- }
+ if (hole_end <= start || hole_em->start > end) {
+ free_extent_map(hole_em);
+ hole_em = NULL;
+ } else {
+ hole_start = max(hole_em->start, start);
+ hole_len = hole_end - hole_start;
}
- em->bdev = NULL;
- if (hole_em && range_start > hole_start) {
- /* our hole starts before our delalloc, so we
- * have to return just the parts of the hole
- * that go until the delalloc starts
+
+ if (hole_em && delalloc_start > hole_start) {
+ /*
+ * Our hole starts before our delalloc, so we have to
+ * return just the parts of the hole that go until the
+ * delalloc starts
*/
- em->len = min(hole_len,
- range_start - hole_start);
+ em->len = min(hole_len, delalloc_start - hole_start);
em->start = hole_start;
em->orig_start = hole_start;
/*
- * don't adjust block start at all,
- * it is fixed at EXTENT_MAP_HOLE
+ * Don't adjust block start at all, it is fixed at
+ * EXTENT_MAP_HOLE
*/
em->block_start = hole_em->block_start;
em->block_len = hole_len;
if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
} else {
- em->start = range_start;
- em->len = found;
- em->orig_start = range_start;
+ /*
+ * Hole is out of passed range or it starts after
+ * delalloc range
+ */
+ em->start = delalloc_start;
+ em->len = delalloc_len;
+ em->orig_start = delalloc_start;
em->block_start = EXTENT_MAP_DELALLOC;
- em->block_len = found;
+ em->block_len = delalloc_len;
}
} else {
return hole_em;
@@ -9911,7 +9962,6 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
- WARN_ON_ONCE(!inode);
btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
btrfs_run_delalloc_work, NULL, NULL);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fab9443f6a42..494f0f10d70e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1642,7 +1642,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
btrfs_info(fs_info, "resizing devid %llu", devid);
}
- device = btrfs_find_device(fs_info, devid, NULL, NULL);
+ device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!device) {
btrfs_info(fs_info, "resizer unable to find device %llu",
devid);
@@ -3178,7 +3178,8 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
s_uuid = di_args->uuid;
rcu_read_lock();
- dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, di_args->devid, s_uuid,
+ NULL, true);
if (!dev) {
ret = -ENODEV;
@@ -3221,32 +3222,38 @@ static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
inode_lock_nested(inode2, I_MUTEX_CHILD);
}
-static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
+static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len)
+{
+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
+ unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
+}
+
+static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len)
+{
+ if (inode1 < inode2) {
+ swap(inode1, inode2);
+ swap(loff1, loff2);
+ } else if (inode1 == inode2 && loff2 < loff1) {
+ swap(loff1, loff2);
+ }
+ lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
+ lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
+}
+
+static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
struct inode *dst, u64 dst_loff)
{
- u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
int ret;
- u64 len = olen;
- if (loff + len == src->i_size)
- len = ALIGN(src->i_size, bs) - loff;
/*
- * For same inode case we don't want our length pushed out past i_size
- * as comparing that data range makes no sense.
- *
- * This effectively means we require aligned extents for the single
- * inode case, whereas the other cases allow an unaligned length so long
- * as it ends at i_size.
- */
- if (dst == src && len != olen)
- return -EINVAL;
-
- /*
- * Lock destination range to serialize with concurrent readpages().
+ * Lock destination range to serialize with concurrent readpages() and
+ * source range to serialize with relocation.
*/
- lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1);
- ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
- unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1);
+ btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+ ret = btrfs_clone(src, dst, loff, len, len, dst_loff, 1);
+ btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
return ret;
}
@@ -3257,21 +3264,10 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
struct inode *dst, u64 dst_loff)
{
int ret;
- int num_pages = PAGE_ALIGN(BTRFS_MAX_DEDUPE_LEN) >> PAGE_SHIFT;
u64 i, tail_len, chunk_count;
- /* don't make the dst file partly checksummed */
- if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM))
- return -EINVAL;
-
- if (IS_SWAPFILE(src) || IS_SWAPFILE(dst))
- return -ETXTBSY;
-
tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
- if (chunk_count == 0)
- num_pages = PAGE_ALIGN(tail_len) >> PAGE_SHIFT;
for (i = 0; i < chunk_count; i++) {
ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
@@ -3887,14 +3883,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* be either compressed or non-compressed.
*/
- /* don't make the dst file partly checksummed */
- if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
- return -EINVAL;
-
- if (IS_SWAPFILE(src) || IS_SWAPFILE(inode))
- return -ETXTBSY;
-
/*
* VFS's generic_remap_file_range_prep() protects us from cloning the
* eof block into the middle of a file, which would result in corruption
@@ -3905,17 +3893,33 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
len = ALIGN(src->i_size, bs) - off;
if (destoff > inode->i_size) {
+ const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
+
ret = btrfs_cont_expand(inode, inode->i_size, destoff);
if (ret)
return ret;
+ /*
+ * We may have truncated the last block if the inode's size is
+ * not sector size aligned, so we need to wait for writeback to
+ * complete before proceeding further, otherwise we can race
+ * with cloning and attempt to increment a reference to an
+ * extent that no longer exists (writeback completed right after
+ * we found the previous extent covering eof and before we
+ * attempted to increment its reference count).
+ */
+ ret = btrfs_wait_ordered_range(inode, wb_start,
+ destoff - wb_start);
+ if (ret)
+ return ret;
}
/*
- * Lock destination range to serialize with concurrent readpages().
+ * Lock destination range to serialize with concurrent readpages() and
+ * source range to serialize with relocation.
*/
- lock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1);
+ btrfs_double_extent_lock(src, off, inode, destoff, len);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
- unlock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1);
+ btrfs_double_extent_unlock(src, off, inode, destoff, len);
/*
* Truncate page cache pages so that future reads will see the cloned
* data immediately and not the previous data.
@@ -3954,6 +3958,13 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
else
btrfs_double_inode_lock(inode_in, inode_out);
+ /* don't make the dst file partly checksummed */
+ if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
+ (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/*
* Now that the inodes are locked, we need to start writeback ourselves
* and can not rely on the writeback from the VFS's generic helper
@@ -4344,7 +4355,7 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
&sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
0);
- if (copy_to_user(arg, sa, sizeof(*sa)))
+ if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
if (!(sa->flags & BTRFS_SCRUB_READONLY))
@@ -4377,7 +4388,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
- if (copy_to_user(arg, sa, sizeof(*sa)))
+ if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
kfree(sa);
@@ -4401,7 +4412,7 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
ret = btrfs_get_dev_stats(fs_info, sa);
- if (copy_to_user(arg, sa, sizeof(*sa)))
+ if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
kfree(sa);
@@ -4447,7 +4458,7 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
break;
}
- if (copy_to_user(arg, p, sizeof(*p)))
+ if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p)))
ret = -EFAULT;
out:
kfree(p);
@@ -4753,7 +4764,7 @@ do_balance:
ret = btrfs_balance(fs_info, bctl, bargs);
bctl = NULL;
- if (arg) {
+ if ((ret == 0 || ret == -ECANCELED) && arg) {
if (copy_to_user(arg, bargs, sizeof(*bargs)))
ret = -EFAULT;
}
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 1da768e5ef75..82b84e4daad1 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -14,43 +14,58 @@
static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
-/*
- * if we currently have a spinning reader or writer lock
- * (indicated by the rw flag) this will bump the count
- * of blocking holders and drop the spinlock.
- */
-void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
+void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
{
/*
- * no lock is required. The lock owner may change if
- * we have a read lock, but it won't change to or away
- * from us. If we have the write lock, we are the owner
- * and it'll never change.
+ * No lock is required. The lock owner may change if we have a read
+ * lock, but it won't change to or away from us. If we have the write
+ * lock, we are the owner and it'll never change.
*/
if (eb->lock_nested && current->pid == eb->lock_owner)
return;
- if (rw == BTRFS_WRITE_LOCK) {
- if (atomic_read(&eb->blocking_writers) == 0) {
- WARN_ON(atomic_read(&eb->spinning_writers) != 1);
- atomic_dec(&eb->spinning_writers);
- btrfs_assert_tree_locked(eb);
- atomic_inc(&eb->blocking_writers);
- write_unlock(&eb->lock);
- }
- } else if (rw == BTRFS_READ_LOCK) {
- btrfs_assert_tree_read_locked(eb);
- atomic_inc(&eb->blocking_readers);
- WARN_ON(atomic_read(&eb->spinning_readers) == 0);
- atomic_dec(&eb->spinning_readers);
- read_unlock(&eb->lock);
+ btrfs_assert_tree_read_locked(eb);
+ atomic_inc(&eb->blocking_readers);
+ WARN_ON(atomic_read(&eb->spinning_readers) == 0);
+ atomic_dec(&eb->spinning_readers);
+ read_unlock(&eb->lock);
+}
+
+void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
+{
+ /*
+ * No lock is required. The lock owner may change if we have a read
+ * lock, but it won't change to or away from us. If we have the write
+ * lock, we are the owner and it'll never change.
+ */
+ if (eb->lock_nested && current->pid == eb->lock_owner)
+ return;
+ if (atomic_read(&eb->blocking_writers) == 0) {
+ WARN_ON(atomic_read(&eb->spinning_writers) != 1);
+ atomic_dec(&eb->spinning_writers);
+ btrfs_assert_tree_locked(eb);
+ atomic_inc(&eb->blocking_writers);
+ write_unlock(&eb->lock);
}
}
-/*
- * if we currently have a blocking lock, take the spinlock
- * and drop our blocking count
- */
-void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
+void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
+{
+ /*
+ * No lock is required. The lock owner may change if we have a read
+ * lock, but it won't change to or away from us. If we have the write
+ * lock, we are the owner and it'll never change.
+ */
+ if (eb->lock_nested && current->pid == eb->lock_owner)
+ return;
+ BUG_ON(atomic_read(&eb->blocking_readers) == 0);
+ read_lock(&eb->lock);
+ atomic_inc(&eb->spinning_readers);
+ /* atomic_dec_and_test implies a barrier */
+ if (atomic_dec_and_test(&eb->blocking_readers))
+ cond_wake_up_nomb(&eb->read_lock_wq);
+}
+
+void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
{
/*
* no lock is required. The lock owner may change if
@@ -60,23 +75,13 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
*/
if (eb->lock_nested && current->pid == eb->lock_owner)
return;
-
- if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
- BUG_ON(atomic_read(&eb->blocking_writers) != 1);
- write_lock(&eb->lock);
- WARN_ON(atomic_read(&eb->spinning_writers));
- atomic_inc(&eb->spinning_writers);
- /* atomic_dec_and_test implies a barrier */
- if (atomic_dec_and_test(&eb->blocking_writers))
- cond_wake_up_nomb(&eb->write_lock_wq);
- } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
- BUG_ON(atomic_read(&eb->blocking_readers) == 0);
- read_lock(&eb->lock);
- atomic_inc(&eb->spinning_readers);
- /* atomic_dec_and_test implies a barrier */
- if (atomic_dec_and_test(&eb->blocking_readers))
- cond_wake_up_nomb(&eb->read_lock_wq);
- }
+ BUG_ON(atomic_read(&eb->blocking_writers) != 1);
+ write_lock(&eb->lock);
+ WARN_ON(atomic_read(&eb->spinning_writers));
+ atomic_inc(&eb->spinning_writers);
+ /* atomic_dec_and_test implies a barrier */
+ if (atomic_dec_and_test(&eb->blocking_writers))
+ cond_wake_up_nomb(&eb->write_lock_wq);
}
/*
@@ -232,16 +237,9 @@ again:
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
write_lock(&eb->lock);
- if (atomic_read(&eb->blocking_readers)) {
+ if (atomic_read(&eb->blocking_readers) ||
+ atomic_read(&eb->blocking_writers)) {
write_unlock(&eb->lock);
- wait_event(eb->read_lock_wq,
- atomic_read(&eb->blocking_readers) == 0);
- goto again;
- }
- if (atomic_read(&eb->blocking_writers)) {
- write_unlock(&eb->lock);
- wait_event(eb->write_lock_wq,
- atomic_read(&eb->blocking_writers) == 0);
goto again;
}
WARN_ON(atomic_read(&eb->spinning_writers));
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 29135def468e..595014f64830 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -17,8 +17,10 @@ void btrfs_tree_unlock(struct extent_buffer *eb);
void btrfs_tree_read_lock(struct extent_buffer *eb);
void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
-void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw);
-void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
+void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
+void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
+void btrfs_clear_lock_blocking_read(struct extent_buffer *eb);
+void btrfs_clear_lock_blocking_write(struct extent_buffer *eb);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
@@ -37,13 +39,4 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
BUG();
}
-static inline void btrfs_set_lock_blocking(struct extent_buffer *eb)
-{
- btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);
-}
-
-static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb)
-{
- btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING);
-}
#endif
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 90639140439f..579d53ae256f 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -61,6 +61,28 @@ struct workspace {
struct list_head list;
};
+static struct workspace_manager wsm;
+
+static void lzo_init_workspace_manager(void)
+{
+ btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress);
+}
+
+static void lzo_cleanup_workspace_manager(void)
+{
+ btrfs_cleanup_workspace_manager(&wsm);
+}
+
+static struct list_head *lzo_get_workspace(unsigned int level)
+{
+ return btrfs_get_workspace(&wsm, level);
+}
+
+static void lzo_put_workspace(struct list_head *ws)
+{
+ btrfs_put_workspace(&wsm, ws);
+}
+
static void lzo_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -71,7 +93,7 @@ static void lzo_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *lzo_alloc_workspace(void)
+static struct list_head *lzo_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
@@ -485,11 +507,16 @@ out:
return ret;
}
-static void lzo_set_level(struct list_head *ws, unsigned int type)
+static unsigned int lzo_set_level(unsigned int level)
{
+ return 0;
}
const struct btrfs_compress_op btrfs_lzo_compress = {
+ .init_workspace_manager = lzo_init_workspace_manager,
+ .cleanup_workspace_manager = lzo_cleanup_workspace_manager,
+ .get_workspace = lzo_get_workspace,
+ .put_workspace = lzo_put_workspace,
.alloc_workspace = lzo_alloc_workspace,
.free_workspace = lzo_free_workspace,
.compress_pages = lzo_compress_pages,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 4e473a998219..c1cd5558a646 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1546,12 +1546,18 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
parent_node = *p;
entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
node);
- if (bytenr < entry->bytenr)
+ if (bytenr < entry->bytenr) {
p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
+ } else if (bytenr > entry->bytenr) {
p = &(*p)->rb_right;
- else
+ } else {
+ if (record->data_rsv && !entry->data_rsv) {
+ entry->data_rsv = record->data_rsv;
+ entry->data_rsv_refroot =
+ record->data_rsv_refroot;
+ }
return 1;
+ }
}
rb_link_node(&record->node, parent_node, p);
@@ -1597,7 +1603,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
|| bytenr == 0 || num_bytes == 0)
return 0;
- record = kmalloc(sizeof(*record), gfp_flag);
+ record = kzalloc(sizeof(*record), gfp_flag);
if (!record)
return -ENOMEM;
@@ -1832,7 +1838,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
src_path->nodes[cur_level] = eb;
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
}
@@ -1973,7 +1979,7 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
dst_path->slots[cur_level] = 0;
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
need_cleanup = true;
}
@@ -2017,86 +2023,30 @@ out:
return ret;
}
-/*
- * Inform qgroup to trace subtree swap used in balance.
- *
- * Unlike btrfs_qgroup_trace_subtree(), this function will only trace
- * new tree blocks whose generation is equal to (or larger than) @last_snapshot.
- *
- * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
- * @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
- * and then go down @src_eb (pointed by @src_parent and @src_slot) to find
- * the counterpart of the tree block, then mark both tree blocks as qgroup dirty,
- * and skip all tree blocks whose generation is smaller than last_snapshot.
- *
- * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
- * which could be the cause of very slow balance if the file tree is large.
- *
- * @src_parent, @src_slot: pointer to src (file tree) eb.
- * @dst_parent, @dst_slot: pointer to dst (reloc tree) eb.
- */
-int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *bg_cache,
- struct extent_buffer *src_parent, int src_slot,
- struct extent_buffer *dst_parent, int dst_slot,
- u64 last_snapshot)
+static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
+ struct extent_buffer *src_eb,
+ struct extent_buffer *dst_eb,
+ u64 last_snapshot, bool trace_leaf)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_path *dst_path = NULL;
- struct btrfs_key first_key;
- struct extent_buffer *src_eb = NULL;
- struct extent_buffer *dst_eb = NULL;
- bool trace_leaf = false;
- u64 child_gen;
- u64 child_bytenr;
int level;
int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
- /* Check parameter order */
- if (btrfs_node_ptr_generation(src_parent, src_slot) >
- btrfs_node_ptr_generation(dst_parent, dst_slot)) {
+ /* Wrong parameter order */
+ if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
btrfs_err_rl(fs_info,
"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
- btrfs_node_ptr_generation(src_parent, src_slot),
- btrfs_node_ptr_generation(dst_parent, dst_slot));
+ btrfs_header_generation(src_eb),
+ btrfs_header_generation(dst_eb));
return -EUCLEAN;
}
- /*
- * Only trace leaf if we're relocating data block groups, this could
- * reduce tons of data extents tracing for meta/sys bg relocation.
- */
- if (bg_cache->flags & BTRFS_BLOCK_GROUP_DATA)
- trace_leaf = true;
- /* Read out real @src_eb, pointed by @src_parent and @src_slot */
- child_bytenr = btrfs_node_blockptr(src_parent, src_slot);
- child_gen = btrfs_node_ptr_generation(src_parent, src_slot);
- btrfs_node_key_to_cpu(src_parent, &first_key, src_slot);
-
- src_eb = read_tree_block(fs_info, child_bytenr, child_gen,
- btrfs_header_level(src_parent) - 1, &first_key);
- if (IS_ERR(src_eb)) {
- ret = PTR_ERR(src_eb);
- goto out;
- }
-
- /* Read out real @dst_eb, pointed by @src_parent and @src_slot */
- child_bytenr = btrfs_node_blockptr(dst_parent, dst_slot);
- child_gen = btrfs_node_ptr_generation(dst_parent, dst_slot);
- btrfs_node_key_to_cpu(dst_parent, &first_key, dst_slot);
-
- dst_eb = read_tree_block(fs_info, child_bytenr, child_gen,
- btrfs_header_level(dst_parent) - 1, &first_key);
- if (IS_ERR(dst_eb)) {
- ret = PTR_ERR(dst_eb);
- goto out;
- }
-
if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
- ret = -EINVAL;
+ ret = -EIO;
goto out;
}
@@ -2106,14 +2056,13 @@ int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
ret = -ENOMEM;
goto out;
}
-
/* For dst_path */
extent_buffer_get(dst_eb);
dst_path->nodes[level] = dst_eb;
dst_path->slots[level] = 0;
dst_path->locks[level] = 0;
- /* Do the generation-aware breadth-first search */
+ /* Do the generation aware breadth-first search */
ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
level, last_snapshot, trace_leaf);
if (ret < 0)
@@ -2121,8 +2070,6 @@ int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
ret = 0;
out:
- free_extent_buffer(src_eb);
- free_extent_buffer(dst_eb);
btrfs_free_path(dst_path);
if (ret < 0)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2207,7 +2154,7 @@ walk_down:
path->slots[level] = 0;
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
@@ -2576,6 +2523,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
goto cleanup;
}
+ /* Free the reserved data space */
+ btrfs_qgroup_free_refroot(fs_info,
+ record->data_rsv_refroot,
+ record->data_rsv,
+ BTRFS_QGROUP_RSV_DATA);
/*
* Use SEQ_LAST as time_seq to do special search, which
* doesn't lock tree or delayed_refs and search current
@@ -2842,16 +2794,15 @@ out:
/*
* Two limits to commit transaction in advance.
*
- * For RATIO, it will be 1/RATIO of the remaining limit
- * (excluding data and prealloc meta) as threshold.
+ * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
* For SIZE, it will be in byte unit as threshold.
*/
-#define QGROUP_PERTRANS_RATIO 32
-#define QGROUP_PERTRANS_SIZE SZ_32M
+#define QGROUP_FREE_RATIO 32
+#define QGROUP_FREE_SIZE SZ_32M
static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
const struct btrfs_qgroup *qg, u64 num_bytes)
{
- u64 limit;
+ u64 free;
u64 threshold;
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
@@ -2870,20 +2821,21 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
*/
if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
- if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
- limit = qg->max_excl;
- else
- limit = qg->max_rfer;
- threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
- qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
- QGROUP_PERTRANS_RATIO;
- threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
+ if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
+ free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
+ threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
+ QGROUP_FREE_SIZE);
+ } else {
+ free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
+ threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
+ QGROUP_FREE_SIZE);
+ }
/*
* Use transaction_kthread to commit transaction, so we no
* longer need to bother nested transaction nor lock context.
*/
- if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
+ if (free < threshold)
btrfs_commit_transaction_locksafe(fs_info);
}
@@ -2959,7 +2911,6 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
qg = unode_aux_to_qgroup(unode);
- trace_qgroup_update_reserve(fs_info, qg, num_bytes, type);
qgroup_rsv_add(fs_info, qg, num_bytes, type);
}
@@ -3026,7 +2977,6 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
qg = unode_aux_to_qgroup(unode);
- trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type);
qgroup_rsv_release(fs_info, qg, num_bytes, type);
list_for_each_entry(glist, &qg->groups, next_group) {
@@ -3783,3 +3733,241 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
}
extent_changeset_release(&changeset);
}
+
+void btrfs_qgroup_init_swapped_blocks(
+ struct btrfs_qgroup_swapped_blocks *swapped_blocks)
+{
+ int i;
+
+ spin_lock_init(&swapped_blocks->lock);
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++)
+ swapped_blocks->blocks[i] = RB_ROOT;
+ swapped_blocks->swapped = false;
+}
+
+/*
+ * Delete all swapped blocks record of @root.
+ * Every record here means we skipped a full subtree scan for qgroup.
+ *
+ * Gets called when committing one transaction.
+ */
+void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
+{
+ struct btrfs_qgroup_swapped_blocks *swapped_blocks;
+ int i;
+
+ swapped_blocks = &root->swapped_blocks;
+
+ spin_lock(&swapped_blocks->lock);
+ if (!swapped_blocks->swapped)
+ goto out;
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+ struct rb_root *cur_root = &swapped_blocks->blocks[i];
+ struct btrfs_qgroup_swapped_block *entry;
+ struct btrfs_qgroup_swapped_block *next;
+
+ rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
+ node)
+ kfree(entry);
+ swapped_blocks->blocks[i] = RB_ROOT;
+ }
+ swapped_blocks->swapped = false;
+out:
+ spin_unlock(&swapped_blocks->lock);
+}
+
+/*
+ * Add subtree roots record into @subvol_root.
+ *
+ * @subvol_root: tree root of the subvolume tree get swapped
+ * @bg: block group under balance
+ * @subvol_parent/slot: pointer to the subtree root in subvolume tree
+ * @reloc_parent/slot: pointer to the subtree root in reloc tree
+ * BOTH POINTERS ARE BEFORE TREE SWAP
+ * @last_snapshot: last snapshot generation of the subvolume tree
+ */
+int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
+ struct btrfs_root *subvol_root,
+ struct btrfs_block_group_cache *bg,
+ struct extent_buffer *subvol_parent, int subvol_slot,
+ struct extent_buffer *reloc_parent, int reloc_slot,
+ u64 last_snapshot)
+{
+ struct btrfs_fs_info *fs_info = subvol_root->fs_info;
+ struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
+ struct btrfs_qgroup_swapped_block *block;
+ struct rb_node **cur;
+ struct rb_node *parent = NULL;
+ int level = btrfs_header_level(subvol_parent) - 1;
+ int ret = 0;
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+
+ if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
+ btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
+ btrfs_err_rl(fs_info,
+ "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
+ __func__,
+ btrfs_node_ptr_generation(subvol_parent, subvol_slot),
+ btrfs_node_ptr_generation(reloc_parent, reloc_slot));
+ return -EUCLEAN;
+ }
+
+ block = kmalloc(sizeof(*block), GFP_NOFS);
+ if (!block) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * @reloc_parent/slot is still before swap, while @block is going to
+ * record the bytenr after swap, so we do the swap here.
+ */
+ block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
+ block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
+ reloc_slot);
+ block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
+ block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
+ subvol_slot);
+ block->last_snapshot = last_snapshot;
+ block->level = level;
+ if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
+ block->trace_leaf = true;
+ else
+ block->trace_leaf = false;
+ btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
+
+ /* Insert @block into @blocks */
+ spin_lock(&blocks->lock);
+ cur = &blocks->blocks[level].rb_node;
+ while (*cur) {
+ struct btrfs_qgroup_swapped_block *entry;
+
+ parent = *cur;
+ entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
+ node);
+
+ if (entry->subvol_bytenr < block->subvol_bytenr) {
+ cur = &(*cur)->rb_left;
+ } else if (entry->subvol_bytenr > block->subvol_bytenr) {
+ cur = &(*cur)->rb_right;
+ } else {
+ if (entry->subvol_generation !=
+ block->subvol_generation ||
+ entry->reloc_bytenr != block->reloc_bytenr ||
+ entry->reloc_generation !=
+ block->reloc_generation) {
+ /*
+ * Duplicated but mismatch entry found.
+ * Shouldn't happen.
+ *
+ * Marking qgroup inconsistent should be enough
+ * for end users.
+ */
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ ret = -EEXIST;
+ }
+ kfree(block);
+ goto out_unlock;
+ }
+ }
+ rb_link_node(&block->node, parent, cur);
+ rb_insert_color(&block->node, &blocks->blocks[level]);
+ blocks->swapped = true;
+out_unlock:
+ spin_unlock(&blocks->lock);
+out:
+ if (ret < 0)
+ fs_info->qgroup_flags |=
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ return ret;
+}
+
+/*
+ * Check if the tree block is a subtree root, and if so do the needed
+ * delayed subtree trace for qgroup.
+ *
+ * This is called during btrfs_cow_block().
+ */
+int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *subvol_eb)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
+ struct btrfs_qgroup_swapped_block *block;
+ struct extent_buffer *reloc_eb = NULL;
+ struct rb_node *node;
+ bool found = false;
+ bool swapped = false;
+ int level = btrfs_header_level(subvol_eb);
+ int ret = 0;
+ int i;
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+ if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
+ return 0;
+
+ spin_lock(&blocks->lock);
+ if (!blocks->swapped) {
+ spin_unlock(&blocks->lock);
+ return 0;
+ }
+ node = blocks->blocks[level].rb_node;
+
+ while (node) {
+ block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
+ if (block->subvol_bytenr < subvol_eb->start) {
+ node = node->rb_left;
+ } else if (block->subvol_bytenr > subvol_eb->start) {
+ node = node->rb_right;
+ } else {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ spin_unlock(&blocks->lock);
+ goto out;
+ }
+ /* Found one, remove it from @blocks first and update blocks->swapped */
+ rb_erase(&block->node, &blocks->blocks[level]);
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+ if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
+ swapped = true;
+ break;
+ }
+ }
+ blocks->swapped = swapped;
+ spin_unlock(&blocks->lock);
+
+ /* Read out reloc subtree root */
+ reloc_eb = read_tree_block(fs_info, block->reloc_bytenr,
+ block->reloc_generation, block->level,
+ &block->first_key);
+ if (IS_ERR(reloc_eb)) {
+ ret = PTR_ERR(reloc_eb);
+ reloc_eb = NULL;
+ goto free_out;
+ }
+ if (!extent_buffer_uptodate(reloc_eb)) {
+ ret = -EIO;
+ goto free_out;
+ }
+
+ ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
+ block->last_snapshot, block->trace_leaf);
+free_out:
+ kfree(block);
+ free_extent_buffer(reloc_eb);
+out:
+ if (ret < 0) {
+ btrfs_err_rl(fs_info,
+ "failed to account subtree at bytenr %llu: %d",
+ subvol_eb->start, ret);
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ }
+ return ret;
+}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 20c6bd5fa701..46ba7bd2961c 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -6,6 +6,8 @@
#ifndef BTRFS_QGROUP_H
#define BTRFS_QGROUP_H
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
#include "ulist.h"
#include "delayed-ref.h"
@@ -38,6 +40,66 @@
*/
/*
+ * Special performance optimization for balance.
+ *
+ * For balance, we need to swap subtree of subvolume and reloc trees.
+ * In theory, we need to trace all subtree blocks of both subvolume and reloc
+ * trees, since their owner has changed during such swap.
+ *
+ * However since balance has ensured that both subtrees are containing the
+ * same contents and have the same tree structures, such swap won't cause
+ * qgroup number change.
+ *
+ * But there is a race window between subtree swap and transaction commit,
+ * during that window, if we increase/decrease tree level or merge/split tree
+ * blocks, we still need to trace the original subtrees.
+ *
+ * So for balance, we use a delayed subtree tracing, whose workflow is:
+ *
+ * 1) Record the subtree root block get swapped.
+ *
+ * During subtree swap:
+ * O = Old tree blocks
+ * N = New tree blocks
+ * reloc tree subvolume tree X
+ * Root Root
+ * / \ / \
+ * NA OB OA OB
+ * / | | \ / | | \
+ * NC ND OE OF OC OD OE OF
+ *
+ * In this case, NA and OA are going to be swapped, record (NA, OA) into
+ * subvolume tree X.
+ *
+ * 2) After subtree swap.
+ * reloc tree subvolume tree X
+ * Root Root
+ * / \ / \
+ * OA OB NA OB
+ * / | | \ / | | \
+ * OC OD OE OF NC ND OE OF
+ *
+ * 3a) COW happens for OB
+ * If we are going to COW tree block OB, we check OB's bytenr against
+ * tree X's swapped_blocks structure.
+ * If it doesn't fit any, nothing will happen.
+ *
+ * 3b) COW happens for NA
+ * Check NA's bytenr against tree X's swapped_blocks, and get a hit.
+ * Then we do subtree scan on both subtrees OA and NA.
+ * Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND).
+ *
+ * Then no matter what we do to subvolume tree X, qgroup numbers will
+ * still be correct.
+ * Then NA's record gets removed from X's swapped_blocks.
+ *
+ * 4) Transaction commit
+ * Any record in X's swapped_blocks gets removed, since there is no
+ * modification to the swapped subtrees, no need to trigger heavy qgroup
+ * subtree rescan for them.
+ */
+
+/*
* Record a dirty extent, and info qgroup to update quota on it
* TODO: Use kmem cache to alloc it.
*/
@@ -45,9 +107,38 @@ struct btrfs_qgroup_extent_record {
struct rb_node node;
u64 bytenr;
u64 num_bytes;
+
+ /*
+ * For qgroup reserved data space freeing.
+ *
+ * @data_rsv_refroot and @data_rsv will be recorded after
+ * BTRFS_ADD_DELAYED_EXTENT is called.
+ * And will be used to free reserved qgroup space at
+ * transaction commit time.
+ */
+ u32 data_rsv; /* reserved data space needs to be freed */
+ u64 data_rsv_refroot; /* which root the reserved data belongs to */
struct ulist *old_roots;
};
+struct btrfs_qgroup_swapped_block {
+ struct rb_node node;
+
+ int level;
+ bool trace_leaf;
+
+ /* bytenr/generation of the tree block in subvolume tree after swap */
+ u64 subvol_bytenr;
+ u64 subvol_generation;
+
+ /* bytenr/generation of the tree block in reloc tree after swap */
+ u64 reloc_bytenr;
+ u64 reloc_generation;
+
+ u64 last_snapshot;
+ struct btrfs_key first_key;
+};
+
/*
* Qgroup reservation types:
*
@@ -236,12 +327,6 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
-
-int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
- struct btrfs_block_group_cache *bg_cache,
- struct extent_buffer *src_parent, int src_slot,
- struct extent_buffer *dst_parent, int dst_slot,
- u64 last_snapshot);
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes, struct ulist *old_roots,
struct ulist *new_roots);
@@ -252,15 +337,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes,
enum btrfs_qgroup_rsv_type type);
-static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
- u64 ref_root, u64 num_bytes)
-{
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
- return;
- trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
- btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes,
- BTRFS_QGROUP_RSV_DATA);
-}
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
@@ -325,4 +401,18 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
void btrfs_qgroup_check_reserved_leak(struct inode *inode);
+/* btrfs_qgroup_swapped_blocks related functions */
+void btrfs_qgroup_init_swapped_blocks(
+ struct btrfs_qgroup_swapped_blocks *swapped_blocks);
+
+void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
+int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
+ struct btrfs_root *subvol_root,
+ struct btrfs_block_group_cache *bg,
+ struct extent_buffer *subvol_parent, int subvol_slot,
+ struct extent_buffer *reloc_parent, int reloc_slot,
+ u64 last_snapshot);
+int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct extent_buffer *eb);
+
#endif
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index c3557c12656b..d09b6cdb785a 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -583,7 +583,7 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
return -EIO;
}
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
path->nodes[level-1] = eb;
path->slots[level-1] = 0;
path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING;
@@ -987,7 +987,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
return -ENOMEM;
eb = btrfs_read_lock_root_node(fs_info->extent_root);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_set_lock_blocking_read(eb);
level = btrfs_header_level(eb);
path->nodes[level] = eb;
path->slots[level] = 0;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 272b287f8cf0..ddf028509931 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -162,6 +162,8 @@ struct reloc_control {
struct mapping_tree reloc_root_tree;
/* list of reloc trees */
struct list_head reloc_roots;
+ /* list of subvolume trees that get relocated */
+ struct list_head dirty_subvol_roots;
/* size of metadata reservation for merging reloc trees */
u64 merging_rsv_size;
/* size of relocated tree nodes */
@@ -1467,15 +1469,17 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root_item *root_item;
int ret;
- if (!root->reloc_root)
+ if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
+ !root->reloc_root)
goto out;
reloc_root = root->reloc_root;
root_item = &reloc_root->root_item;
+ /* root->reloc_root will stay until current relocation finished */
if (fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
- root->reloc_root = NULL;
+ set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
__del_reloc_root(reloc_root);
}
@@ -1773,7 +1777,7 @@ again:
btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
eb = btrfs_lock_root_node(dest);
- btrfs_set_lock_blocking(eb);
+ btrfs_set_lock_blocking_write(eb);
level = btrfs_header_level(eb);
if (level < lowest_level) {
@@ -1786,7 +1790,7 @@ again:
ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
BUG_ON(ret);
}
- btrfs_set_lock_blocking(eb);
+ btrfs_set_lock_blocking_write(eb);
if (next_key) {
next_key->objectid = (u64)-1;
@@ -1802,6 +1806,8 @@ again:
BUG_ON(level < lowest_level);
ret = btrfs_bin_search(parent, &key, level, &slot);
+ if (ret < 0)
+ break;
if (ret && slot > 0)
slot--;
@@ -1852,7 +1858,7 @@ again:
slot, &eb);
BUG_ON(ret);
}
- btrfs_set_lock_blocking(eb);
+ btrfs_set_lock_blocking_write(eb);
btrfs_tree_unlock(parent);
free_extent_buffer(parent);
@@ -1885,15 +1891,18 @@ again:
* If not traced, we will leak data numbers
* 2) Fs subtree
* If not traced, we will double count old data
- * and tree block numbers, if current trans doesn't free
- * data reloc tree inode.
+ *
+ * We don't scan the subtree right now, but only record
+ * the swapped tree blocks.
+ * The real subtree rescan is delayed until we have new
+ * CoW on the subtree root node before transaction commit.
*/
- ret = btrfs_qgroup_trace_subtree_swap(trans, rc->block_group,
- parent, slot, path->nodes[level],
- path->slots[level], last_snapshot);
+ ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
+ rc->block_group, parent, slot,
+ path->nodes[level], path->slots[level],
+ last_snapshot);
if (ret < 0)
break;
-
/*
* swap blocks in fs tree and reloc tree.
*/
@@ -2121,6 +2130,58 @@ static int find_next_key(struct btrfs_path *path, int level,
}
/*
+ * Insert current subvolume into reloc_control::dirty_subvol_roots
+ */
+static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc,
+ struct btrfs_root *root)
+{
+ struct btrfs_root *reloc_root = root->reloc_root;
+ struct btrfs_root_item *reloc_root_item;
+
+ /* @root must be a subvolume tree root with a valid reloc tree */
+ ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(reloc_root);
+
+ reloc_root_item = &reloc_root->root_item;
+ memset(&reloc_root_item->drop_progress, 0,
+ sizeof(reloc_root_item->drop_progress));
+ reloc_root_item->drop_level = 0;
+ btrfs_set_root_refs(reloc_root_item, 0);
+ btrfs_update_reloc_root(trans, root);
+
+ if (list_empty(&root->reloc_dirty_list)) {
+ btrfs_grab_fs_root(root);
+ list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
+ }
+}
+
+static int clean_dirty_subvols(struct reloc_control *rc)
+{
+ struct btrfs_root *root;
+ struct btrfs_root *next;
+ int ret = 0;
+
+ list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
+ reloc_dirty_list) {
+ struct btrfs_root *reloc_root = root->reloc_root;
+
+ clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
+ list_del_init(&root->reloc_dirty_list);
+ root->reloc_root = NULL;
+ if (reloc_root) {
+ int ret2;
+
+ ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1);
+ if (ret2 < 0 && !ret)
+ ret = ret2;
+ }
+ btrfs_put_fs_root(root);
+ }
+ return ret;
+}
+
+/*
* merge the relocated tree blocks in reloc tree with corresponding
* fs tree.
*/
@@ -2128,7 +2189,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- LIST_HEAD(inode_list);
struct btrfs_key key;
struct btrfs_key next_key;
struct btrfs_trans_handle *trans = NULL;
@@ -2259,13 +2319,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
out:
btrfs_free_path(path);
- if (err == 0) {
- memset(&root_item->drop_progress, 0,
- sizeof(root_item->drop_progress));
- root_item->drop_level = 0;
- btrfs_set_root_refs(root_item, 0);
- btrfs_update_reloc_root(trans, root);
- }
+ if (err == 0)
+ insert_dirty_subvol(trans, rc, root);
if (trans)
btrfs_end_transaction_throttle(trans);
@@ -2410,14 +2465,6 @@ again:
} else {
list_del_init(&reloc_root->root_list);
}
-
- ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
- if (ret < 0) {
- if (list_empty(&reloc_root->root_list))
- list_add_tail(&reloc_root->root_list,
- &reloc_roots);
- goto out;
- }
}
if (found) {
@@ -2685,6 +2732,10 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (!lowest) {
ret = btrfs_bin_search(upper->eb, key,
upper->level, &slot);
+ if (ret < 0) {
+ err = ret;
+ goto next;
+ }
BUG_ON(ret);
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (node->eb->start == bytenr)
@@ -2720,6 +2771,10 @@ static int do_relocation(struct btrfs_trans_handle *trans,
} else {
ret = btrfs_bin_search(upper->eb, key, upper->level,
&slot);
+ if (ret < 0) {
+ err = ret;
+ goto next;
+ }
BUG_ON(ret);
}
@@ -2752,7 +2807,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
goto next;
}
btrfs_tree_lock(eb);
- btrfs_set_lock_blocking(eb);
+ btrfs_set_lock_blocking_write(eb);
if (!node->eb) {
ret = btrfs_cow_block(trans, root, eb, upper->eb,
@@ -4079,6 +4134,9 @@ restart:
goto out_free;
}
btrfs_commit_transaction(trans);
+ ret = clean_dirty_subvols(rc);
+ if (ret < 0 && !err)
+ err = ret;
out_free:
btrfs_free_block_rsv(fs_info, rc->block_rsv);
btrfs_free_path(path);
@@ -4173,6 +4231,7 @@ static struct reloc_control *alloc_reloc_control(void)
return NULL;
INIT_LIST_HEAD(&rc->reloc_roots);
+ INIT_LIST_HEAD(&rc->dirty_subvol_roots);
backref_cache_init(&rc->backref_cache);
mapping_tree_init(&rc->reloc_root_tree);
extent_io_tree_init(&rc->processed_blocks, NULL);
@@ -4468,6 +4527,10 @@ int btrfs_recover_relocation(struct btrfs_root *root)
goto out_free;
}
err = btrfs_commit_transaction(trans);
+
+ ret = clean_dirty_subvols(rc);
+ if (ret < 0 && !err)
+ err = ret;
out_free:
kfree(rc);
out:
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 65bda0682928..0d2b957ca3a3 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -21,12 +21,12 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
struct btrfs_root_item *item)
{
uuid_le uuid;
- int len;
+ u32 len;
int need_reset = 0;
len = btrfs_item_size_nr(eb, slot);
read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot),
- min_t(int, len, (int)sizeof(*item)));
+ min_t(u32, len, sizeof(*item)));
if (len < sizeof(*item))
need_reset = 1;
if (!need_reset && btrfs_root_generation(item)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6dcd36d7b849..a99588536c79 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -584,6 +584,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
sctx->curr = -1;
sctx->fs_info = fs_info;
+ INIT_LIST_HEAD(&sctx->csum_list);
for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
struct scrub_bio *sbio;
@@ -608,7 +609,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
atomic_set(&sctx->workers_pending, 0);
atomic_set(&sctx->cancel_req, 0);
sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
- INIT_LIST_HEAD(&sctx->csum_list);
spin_lock_init(&sctx->list_lock);
spin_lock_init(&sctx->stat_lock);
@@ -3741,25 +3741,33 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size;
- if (fs_info->scrub_workers_refcnt == 0) {
+ lockdep_assert_held(&fs_info->scrub_lock);
+
+ if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
+ ASSERT(fs_info->scrub_workers == NULL);
fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
flags, is_dev_replace ? 1 : max_active, 4);
if (!fs_info->scrub_workers)
goto fail_scrub_workers;
+ ASSERT(fs_info->scrub_wr_completion_workers == NULL);
fs_info->scrub_wr_completion_workers =
btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
max_active, 2);
if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers;
+ ASSERT(fs_info->scrub_parity_workers == NULL);
fs_info->scrub_parity_workers =
btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2);
if (!fs_info->scrub_parity_workers)
goto fail_scrub_parity_workers;
+
+ refcount_set(&fs_info->scrub_workers_refcnt, 1);
+ } else {
+ refcount_inc(&fs_info->scrub_workers_refcnt);
}
- ++fs_info->scrub_workers_refcnt;
return 0;
fail_scrub_parity_workers:
@@ -3770,16 +3778,6 @@ fail_scrub_workers:
return -ENOMEM;
}
-static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
-{
- if (--fs_info->scrub_workers_refcnt == 0) {
- btrfs_destroy_workqueue(fs_info->scrub_workers);
- btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
- btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
- }
- WARN_ON(fs_info->scrub_workers_refcnt < 0);
-}
-
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
int readonly, int is_dev_replace)
@@ -3788,6 +3786,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
int ret;
struct btrfs_device *dev;
unsigned int nofs_flag;
+ struct btrfs_workqueue *scrub_workers = NULL;
+ struct btrfs_workqueue *scrub_wr_comp = NULL;
+ struct btrfs_workqueue *scrub_parity = NULL;
if (btrfs_fs_closing(fs_info))
return -EINVAL;
@@ -3835,7 +3836,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return PTR_ERR(sctx);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
!is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -3903,6 +3904,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
*/
nofs_flag = memalloc_nofs_save();
if (!is_dev_replace) {
+ btrfs_info(fs_info, "scrub: started on devid %llu", devid);
/*
* by holding device list mutex, we can
* kick off writing super in log tree sync.
@@ -3925,11 +3927,26 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (progress)
memcpy(progress, &sctx->stat, sizeof(*progress));
+ if (!is_dev_replace)
+ btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
+ ret ? "not finished" : "finished", devid, ret);
+
mutex_lock(&fs_info->scrub_lock);
dev->scrub_ctx = NULL;
- scrub_workers_put(fs_info);
+ if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
+ scrub_workers = fs_info->scrub_workers;
+ scrub_wr_comp = fs_info->scrub_wr_completion_workers;
+ scrub_parity = fs_info->scrub_parity_workers;
+
+ fs_info->scrub_workers = NULL;
+ fs_info->scrub_wr_completion_workers = NULL;
+ fs_info->scrub_parity_workers = NULL;
+ }
mutex_unlock(&fs_info->scrub_lock);
+ btrfs_destroy_workqueue(scrub_workers);
+ btrfs_destroy_workqueue(scrub_wr_comp);
+ btrfs_destroy_workqueue(scrub_parity);
scrub_put_ctx(sctx);
return ret;
@@ -4012,7 +4029,7 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct scrub_ctx *sctx = NULL;
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (dev)
sctx = dev->scrub_ctx;
if (sctx)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c5586ffd1426..120e4340792a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -529,7 +529,9 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
if (token != Opt_compress &&
token != Opt_compress_force)
info->compress_level =
- btrfs_compress_str2level(args[0].from);
+ btrfs_compress_str2level(
+ BTRFS_COMPRESS_ZLIB,
+ args[0].from + 4);
btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -542,9 +544,13 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
btrfs_clear_opt(info->mount_opt, NODATASUM);
btrfs_set_fs_incompat(info, COMPRESS_LZO);
no_compress = 0;
- } else if (strcmp(args[0].from, "zstd") == 0) {
+ } else if (strncmp(args[0].from, "zstd", 4) == 0) {
compress_type = "zstd";
info->compress_type = BTRFS_COMPRESS_ZSTD;
+ info->compress_level =
+ btrfs_compress_str2level(
+ BTRFS_COMPRESS_ZSTD,
+ args[0].from + 4);
btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -1621,6 +1627,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
flags | SB_RDONLY, device_name, data);
if (IS_ERR(mnt_root)) {
root = ERR_CAST(mnt_root);
+ kfree(subvol_name);
goto out;
}
@@ -1630,12 +1637,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (error < 0) {
root = ERR_PTR(error);
mntput(mnt_root);
+ kfree(subvol_name);
goto out;
}
}
}
if (IS_ERR(mnt_root)) {
root = ERR_CAST(mnt_root);
+ kfree(subvol_name);
goto out;
}
@@ -2187,6 +2196,9 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
ret = PTR_ERR_OR_ZERO(device);
mutex_unlock(&uuid_mutex);
break;
+ case BTRFS_IOC_FORGET_DEV:
+ ret = btrfs_forget_devices(vol->name);
+ break;
case BTRFS_IOC_DEVICES_READY:
mutex_lock(&uuid_mutex);
device = btrfs_scan_one_device(vol->name, FMODE_READ,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 127fa1535f58..acdad6d658f5 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -122,6 +122,7 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans)
if (is_fstree(root->root_key.objectid))
btrfs_unpin_free_ino(root);
clear_btree_io_tree(&root->dirty_log_pages);
+ btrfs_qgroup_clean_swapped_blocks(root);
}
/* We can free old roots now. */
@@ -845,19 +846,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_trans_release_metadata(trans);
trans->block_rsv = NULL;
- if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans);
+ btrfs_create_pending_block_groups(trans);
btrfs_trans_release_chunk_metadata(trans);
- if (lock && should_end_transaction(trans) &&
- READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
- spin_lock(&info->trans_lock);
- if (cur_trans->state == TRANS_STATE_RUNNING)
- cur_trans->state = TRANS_STATE_BLOCKED;
- spin_unlock(&info->trans_lock);
- }
-
if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
if (throttle)
return btrfs_commit_transaction(trans);
@@ -1540,7 +1532,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto fail;
}
- btrfs_set_lock_blocking(old);
+ btrfs_set_lock_blocking_write(old);
ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
/* clean up in any case */
@@ -1879,6 +1871,21 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
+/*
+ * Release reserved delayed ref space of all pending block groups of the
+ * transaction and remove them from the list
+ */
+static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group_cache *block_group, *tmp;
+
+ list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+ list_del_init(&block_group->bg_list);
+ }
+}
+
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
/*
@@ -1936,8 +1943,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
cur_trans->delayed_refs.flushing = 1;
smp_wmb();
- if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans);
+ btrfs_create_pending_block_groups(trans);
ret = btrfs_run_delayed_refs(trans, 0);
if (ret) {
@@ -2270,6 +2276,7 @@ scrub_continue:
btrfs_scrub_continue(fs_info);
cleanup_transaction:
btrfs_trans_release_metadata(trans);
+ btrfs_cleanup_pending_block_groups(trans);
btrfs_trans_release_chunk_metadata(trans);
trans->block_rsv = NULL;
btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 3c0987ab587d..5f9e2dd413af 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -52,7 +52,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
u32 nritems;
root_node = btrfs_lock_root_node(root);
- btrfs_set_lock_blocking(root_node);
+ btrfs_set_lock_blocking_write(root_node);
nritems = btrfs_header_nritems(root_node);
root->defrag_max.objectid = 0;
/* from above we know this is not a leaf */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ac232b3d6d7e..f06454a55e00 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -27,6 +27,7 @@
#define LOG_INODE_ALL 0
#define LOG_INODE_EXISTS 1
#define LOG_OTHER_INODE 2
+#define LOG_OTHER_INODE_ALL 3
/*
* directory trouble cases
@@ -1330,6 +1331,67 @@ out:
return ret;
}
+static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct inode *dir, struct inode *inode, const char *name,
+ int namelen, u64 ref_index)
+{
+ struct btrfs_dir_item *dir_item;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ struct inode *other_inode = NULL;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ dir_item = btrfs_lookup_dir_item(NULL, root, path,
+ btrfs_ino(BTRFS_I(dir)),
+ name, namelen, 0);
+ if (!dir_item) {
+ btrfs_release_path(path);
+ goto add_link;
+ } else if (IS_ERR(dir_item)) {
+ ret = PTR_ERR(dir_item);
+ goto out;
+ }
+
+ /*
+ * Our inode's dentry collides with the dentry of another inode which is
+ * in the log but not yet processed since it has a higher inode number.
+ * So delete that other dentry.
+ */
+ btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
+ btrfs_release_path(path);
+ other_inode = read_one_inode(root, key.objectid);
+ if (!other_inode) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
+ name, namelen);
+ if (ret)
+ goto out;
+ /*
+ * If we dropped the link count to 0, bump it so that later the iput()
+ * on the inode will not free it. We will fixup the link count later.
+ */
+ if (other_inode->i_nlink == 0)
+ inc_nlink(other_inode);
+
+ ret = btrfs_run_delayed_items(trans);
+ if (ret)
+ goto out;
+add_link:
+ ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
+ name, namelen, 0, ref_index);
+out:
+ iput(other_inode);
+ btrfs_free_path(path);
+
+ return ret;
+}
+
/*
* replay one inode back reference item found in the log tree.
* eb, slot and key refer to the buffer and key found in the log tree.
@@ -1466,9 +1528,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
goto out;
/* insert our name */
- ret = btrfs_add_link(trans, BTRFS_I(dir),
- BTRFS_I(inode),
- name, namelen, 0, ref_index);
+ ret = add_link(trans, root, dir, inode, name, namelen,
+ ref_index);
if (ret)
goto out;
@@ -2663,7 +2724,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ btrfs_set_lock_blocking_write(next);
clean_tree_block(fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -2747,7 +2808,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ btrfs_set_lock_blocking_write(next);
clean_tree_block(fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -2829,7 +2890,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ btrfs_set_lock_blocking_write(next);
clean_tree_block(fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -3706,6 +3767,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
found_key.type = 0;
ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
&start_slot);
+ if (ret < 0)
+ break;
ret = btrfs_del_items(trans, log, path, start_slot,
path->slots[0] - start_slot + 1);
@@ -4717,7 +4780,7 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
const int slot,
const struct btrfs_key *key,
struct btrfs_inode *inode,
- u64 *other_ino)
+ u64 *other_ino, u64 *other_parent)
{
int ret;
struct btrfs_path *search_path;
@@ -4780,8 +4843,13 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
btrfs_dir_item_key_to_cpu(search_path->nodes[0],
di, &di_key);
if (di_key.type == BTRFS_INODE_ITEM_KEY) {
- ret = 1;
- *other_ino = di_key.objectid;
+ if (di_key.objectid != key->objectid) {
+ ret = 1;
+ *other_ino = di_key.objectid;
+ *other_parent = parent;
+ } else {
+ ret = 0;
+ }
} else {
ret = -EAGAIN;
}
@@ -4801,6 +4869,144 @@ out:
return ret;
}
+struct btrfs_ino_list {
+ u64 ino;
+ u64 parent;
+ struct list_head list;
+};
+
+static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_log_ctx *ctx,
+ u64 ino, u64 parent)
+{
+ struct btrfs_ino_list *ino_elem;
+ LIST_HEAD(inode_list);
+ int ret = 0;
+
+ ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
+ if (!ino_elem)
+ return -ENOMEM;
+ ino_elem->ino = ino;
+ ino_elem->parent = parent;
+ list_add_tail(&ino_elem->list, &inode_list);
+
+ while (!list_empty(&inode_list)) {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_key key;
+ struct inode *inode;
+
+ ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
+ list);
+ ino = ino_elem->ino;
+ parent = ino_elem->parent;
+ list_del(&ino_elem->list);
+ kfree(ino_elem);
+ if (ret)
+ continue;
+
+ btrfs_release_path(path);
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+ inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ /*
+ * If the other inode that had a conflicting dir entry was
+ * deleted in the current transaction, we need to log its parent
+ * directory.
+ */
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ if (ret == -ENOENT) {
+ key.objectid = parent;
+ inode = btrfs_iget(fs_info->sb, &key, root,
+ NULL);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ } else {
+ ret = btrfs_log_inode(trans, root,
+ BTRFS_I(inode),
+ LOG_OTHER_INODE_ALL,
+ 0, LLONG_MAX, ctx);
+ iput(inode);
+ }
+ }
+ continue;
+ }
+ /*
+ * We are safe logging the other inode without acquiring its
+ * lock as long as we log with the LOG_INODE_EXISTS mode. We
+ * are safe against concurrent renames of the other inode as
+ * well because during a rename we pin the log and update the
+ * log with the new name before we unpin it.
+ */
+ ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
+ LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
+ if (ret) {
+ iput(inode);
+ continue;
+ }
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ iput(inode);
+ continue;
+ }
+
+ while (true) {
+ struct extent_buffer *leaf = path->nodes[0];
+ int slot = path->slots[0];
+ u64 other_ino = 0;
+ u64 other_parent = 0;
+
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ break;
+ } else if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.objectid != ino ||
+ (key.type != BTRFS_INODE_REF_KEY &&
+ key.type != BTRFS_INODE_EXTREF_KEY)) {
+ ret = 0;
+ break;
+ }
+
+ ret = btrfs_check_ref_name_override(leaf, slot, &key,
+ BTRFS_I(inode), &other_ino,
+ &other_parent);
+ if (ret < 0)
+ break;
+ if (ret > 0) {
+ ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
+ if (!ino_elem) {
+ ret = -ENOMEM;
+ break;
+ }
+ ino_elem->ino = other_ino;
+ ino_elem->parent = other_parent;
+ list_add_tail(&ino_elem->list, &inode_list);
+ ret = 0;
+ }
+ path->slots[0]++;
+ }
+ iput(inode);
+ }
+
+ return ret;
+}
+
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@@ -4840,6 +5046,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
u64 logged_isize = 0;
bool need_log_inode_item = true;
bool xattrs_logged = false;
+ bool recursive_logging = false;
path = btrfs_alloc_path();
if (!path)
@@ -4885,8 +5092,12 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
return ret;
}
- if (inode_only == LOG_OTHER_INODE) {
- inode_only = LOG_INODE_EXISTS;
+ if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
+ recursive_logging = true;
+ if (inode_only == LOG_OTHER_INODE)
+ inode_only = LOG_INODE_EXISTS;
+ else
+ inode_only = LOG_INODE_ALL;
mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&inode->log_mutex);
@@ -4981,20 +5192,19 @@ again:
if ((min_key.type == BTRFS_INODE_REF_KEY ||
min_key.type == BTRFS_INODE_EXTREF_KEY) &&
- inode->generation == trans->transid) {
+ inode->generation == trans->transid &&
+ !recursive_logging) {
u64 other_ino = 0;
+ u64 other_parent = 0;
ret = btrfs_check_ref_name_override(path->nodes[0],
path->slots[0], &min_key, inode,
- &other_ino);
+ &other_ino, &other_parent);
if (ret < 0) {
err = ret;
goto out_unlock;
} else if (ret > 0 && ctx &&
other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
- struct btrfs_key inode_key;
- struct inode *other_inode;
-
if (ins_nr > 0) {
ins_nr++;
} else {
@@ -5010,43 +5220,13 @@ again:
goto out_unlock;
}
ins_nr = 0;
- btrfs_release_path(path);
- inode_key.objectid = other_ino;
- inode_key.type = BTRFS_INODE_ITEM_KEY;
- inode_key.offset = 0;
- other_inode = btrfs_iget(fs_info->sb,
- &inode_key, root,
- NULL);
- /*
- * If the other inode that had a conflicting dir
- * entry was deleted in the current transaction,
- * we don't need to do more work nor fallback to
- * a transaction commit.
- */
- if (other_inode == ERR_PTR(-ENOENT)) {
- goto next_key;
- } else if (IS_ERR(other_inode)) {
- err = PTR_ERR(other_inode);
- goto out_unlock;
- }
- /*
- * We are safe logging the other inode without
- * acquiring its i_mutex as long as we log with
- * the LOG_INODE_EXISTS mode. We're safe against
- * concurrent renames of the other inode as well
- * because during a rename we pin the log and
- * update the log with the new name before we
- * unpin it.
- */
- err = btrfs_log_inode(trans, root,
- BTRFS_I(other_inode),
- LOG_OTHER_INODE, 0, LLONG_MAX,
- ctx);
- iput(other_inode);
+
+ err = log_conflicting_inodes(trans, root, path,
+ ctx, other_ino, other_parent);
if (err)
goto out_unlock;
- else
- goto next_key;
+ btrfs_release_path(path);
+ goto next_key;
}
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2576b1a379c9..9024eee889b9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -415,27 +415,6 @@ static struct btrfs_device *__alloc_device(void)
return dev;
}
-/*
- * Find a device specified by @devid or @uuid in the list of @fs_devices, or
- * return NULL.
- *
- * If devid and uuid are both specified, the match must be exact, otherwise
- * only devid is used.
- */
-static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices,
- u64 devid, const u8 *uuid)
-{
- struct btrfs_device *dev;
-
- list_for_each_entry(dev, &fs_devices->devices, dev_list) {
- if (dev->devid == devid &&
- (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
- return dev;
- }
- }
- return NULL;
-}
-
static noinline struct btrfs_fs_devices *find_fsid(
const u8 *fsid, const u8 *metadata_fsid)
{
@@ -734,6 +713,17 @@ static void pending_bios_fn(struct btrfs_work *work)
run_scheduled_bios(device);
}
+static bool device_path_matched(const char *path, struct btrfs_device *device)
+{
+ int found;
+
+ rcu_read_lock();
+ found = strcmp(rcu_str_deref(device->name), path);
+ rcu_read_unlock();
+
+ return found == 0;
+}
+
/*
* Search and remove all stale (devices which are not mounted) devices.
* When both inputs are NULL, it will search and release all stale devices.
@@ -741,52 +731,57 @@ static void pending_bios_fn(struct btrfs_work *work)
* matching this path only.
* skip_dev: Optional. Will skip this device when searching for the stale
* devices.
+ * Return: 0 for success or if @path is NULL.
+ * -EBUSY if @path is a mounted device.
+ * -ENOENT if @path does not match any device in the list.
*/
-static void btrfs_free_stale_devices(const char *path,
+static int btrfs_free_stale_devices(const char *path,
struct btrfs_device *skip_device)
{
struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
struct btrfs_device *device, *tmp_device;
+ int ret = 0;
+
+ if (path)
+ ret = -ENOENT;
list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
- mutex_lock(&fs_devices->device_list_mutex);
- if (fs_devices->opened) {
- mutex_unlock(&fs_devices->device_list_mutex);
- continue;
- }
+ mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp_device,
&fs_devices->devices, dev_list) {
- int not_found = 0;
-
if (skip_device && skip_device == device)
continue;
if (path && !device->name)
continue;
-
- rcu_read_lock();
- if (path)
- not_found = strcmp(rcu_str_deref(device->name),
- path);
- rcu_read_unlock();
- if (not_found)
+ if (path && !device_path_matched(path, device))
continue;
+ if (fs_devices->opened) {
+ /* for an already deleted device return 0 */
+ if (path && ret != 0)
+ ret = -EBUSY;
+ break;
+ }
/* delete the stale device */
fs_devices->num_devices--;
list_del(&device->dev_list);
btrfs_free_device(device);
+ ret = 0;
if (fs_devices->num_devices == 0)
break;
}
mutex_unlock(&fs_devices->device_list_mutex);
+
if (fs_devices->num_devices == 0) {
btrfs_sysfs_remove_fsid(fs_devices);
list_del(&fs_devices->fs_list);
free_fs_devices(fs_devices);
}
}
+
+ return ret;
}
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
@@ -957,19 +952,19 @@ static noinline struct btrfs_device *device_list_add(const char *path,
else
fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
- fs_devices->fsid_change = fsid_change_in_progress;
-
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
+ fs_devices->fsid_change = fsid_change_in_progress;
+
mutex_lock(&fs_devices->device_list_mutex);
list_add(&fs_devices->fs_list, &fs_uuids);
device = NULL;
} else {
mutex_lock(&fs_devices->device_list_mutex);
- device = find_device(fs_devices, devid,
- disk_super->dev_item.uuid);
+ device = btrfs_find_device(fs_devices, devid,
+ disk_super->dev_item.uuid, NULL, false);
/*
* If this disk has been pulled into an fs devices created by
@@ -1134,7 +1129,6 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
mutex_lock(&orig->device_list_mutex);
fs_devices->total_devices = orig->total_devices;
- /* We have held the volume lock, it is safe to get the devices. */
list_for_each_entry(orig_dev, &orig->devices, dev_list) {
struct rcu_string *name;
@@ -1451,6 +1445,17 @@ static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
return 0;
}
+int btrfs_forget_devices(const char *path)
+{
+ int ret;
+
+ mutex_lock(&uuid_mutex);
+ ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
+ mutex_unlock(&uuid_mutex);
+
+ return ret;
+}
+
/*
* Look for a btrfs signature on a device. This may be called out of the mount path
* and we are not allowed to call set_blocksize during the scan. The superblock
@@ -2385,11 +2390,11 @@ static struct btrfs_device *btrfs_find_device_by_path(
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
if (btrfs_fs_incompat(fs_info, METADATA_UUID))
- device = btrfs_find_device(fs_info, devid, dev_uuid,
- disk_super->metadata_uuid);
+ device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
+ disk_super->metadata_uuid, true);
else
- device = btrfs_find_device(fs_info, devid,
- dev_uuid, disk_super->fsid);
+ device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
+ disk_super->fsid, true);
brelse(bh);
if (!device)
@@ -2398,50 +2403,38 @@ static struct btrfs_device *btrfs_find_device_by_path(
return device;
}
-static struct btrfs_device *btrfs_find_device_missing_or_by_path(
- struct btrfs_fs_info *fs_info, const char *device_path)
-{
- struct btrfs_device *device = NULL;
- if (strcmp(device_path, "missing") == 0) {
- struct list_head *devices;
- struct btrfs_device *tmp;
-
- devices = &fs_info->fs_devices->devices;
- list_for_each_entry(tmp, devices, dev_list) {
- if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
- &tmp->dev_state) && !tmp->bdev) {
- device = tmp;
- break;
- }
- }
-
- if (!device)
- return ERR_PTR(-ENOENT);
- } else {
- device = btrfs_find_device_by_path(fs_info, device_path);
- }
-
- return device;
-}
-
/*
* Lookup a device given by device id, or the path if the id is 0.
*/
struct btrfs_device *btrfs_find_device_by_devspec(
- struct btrfs_fs_info *fs_info, u64 devid, const char *devpath)
+ struct btrfs_fs_info *fs_info, u64 devid,
+ const char *device_path)
{
struct btrfs_device *device;
if (devid) {
- device = btrfs_find_device(fs_info, devid, NULL, NULL);
+ device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
+ NULL, true);
if (!device)
return ERR_PTR(-ENOENT);
- } else {
- if (!devpath || !devpath[0])
- return ERR_PTR(-EINVAL);
- device = btrfs_find_device_missing_or_by_path(fs_info, devpath);
+ return device;
}
- return device;
+
+ if (!device_path || !device_path[0])
+ return ERR_PTR(-EINVAL);
+
+ if (strcmp(device_path, "missing") == 0) {
+ /* Find first missing device */
+ list_for_each_entry(device, &fs_info->fs_devices->devices,
+ dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &device->dev_state) && !device->bdev)
+ return device;
+ }
+ return ERR_PTR(-ENOENT);
+ }
+
+ return btrfs_find_device_by_path(fs_info, device_path);
}
/*
@@ -2563,7 +2556,8 @@ next_slot:
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_FSID_SIZE);
- device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
+ device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
+ fs_uuid, true);
BUG_ON(!device); /* Logic error */
if (device->fs_devices->seeding) {
@@ -6616,21 +6610,36 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
return BLK_STS_OK;
}
-struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
- u8 *uuid, u8 *fsid)
+/*
+ * Find a device specified by @devid or @uuid in the list of @fs_devices, or
+ * return NULL.
+ *
+ * If devid and uuid are both specified, the match must be exact, otherwise
+ * only devid is used.
+ *
+ * If @seed is true, traverse through the seed devices.
+ */
+struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
+ u64 devid, u8 *uuid, u8 *fsid,
+ bool seed)
{
struct btrfs_device *device;
- struct btrfs_fs_devices *cur_devices;
- cur_devices = fs_info->fs_devices;
- while (cur_devices) {
+ while (fs_devices) {
if (!fsid ||
- !memcmp(cur_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
- device = find_device(cur_devices, devid, uuid);
- if (device)
- return device;
+ !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
+ list_for_each_entry(device, &fs_devices->devices,
+ dev_list) {
+ if (device->devid == devid &&
+ (!uuid || memcmp(device->uuid, uuid,
+ BTRFS_UUID_SIZE) == 0))
+ return device;
+ }
}
- cur_devices = cur_devices->seed;
+ if (seed)
+ fs_devices = fs_devices->seed;
+ else
+ return NULL;
}
return NULL;
}
@@ -6782,10 +6791,10 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
}
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
- (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
(type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
- (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != 1)) {
btrfs_err(fs_info,
@@ -6875,8 +6884,8 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
- map->stripes[i].dev = btrfs_find_device(fs_info, devid,
- uuid, NULL);
+ map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
+ devid, uuid, NULL, true);
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
@@ -7015,7 +7024,8 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
return PTR_ERR(fs_devices);
}
- device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
+ device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
+ fs_uuid, true);
if (!device) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_report_missing_device(fs_info, devid,
@@ -7605,7 +7615,8 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
int i;
mutex_lock(&fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info, stats->devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
+ true);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
@@ -7819,12 +7830,25 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
}
/* Make sure no dev extent is beyond device bondary */
- dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
ret = -EUCLEAN;
goto out;
}
+
+ /* It's possible this device is a dummy for seed device */
+ if (dev->disk_total_bytes == 0) {
+ dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,
+ NULL, false);
+ if (!dev) {
+ btrfs_err(fs_info, "failed to find seed devid %llu",
+ devid);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+
if (physical_offset + physical_len > dev->disk_total_bytes) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ed806649a473..3ad9d58d1b66 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -416,6 +416,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
struct btrfs_device *btrfs_scan_one_device(const char *path,
fmode_t flags, void *holder);
+int btrfs_forget_devices(const char *path);
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
void btrfs_assign_next_active_device(struct btrfs_device *device,
@@ -433,8 +434,8 @@ void __exit btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size);
-struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
- u8 *uuid, u8 *fsid);
+struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
+ u64 devid, u8 *uuid, u8 *fsid, bool seed);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
int btrfs_balance(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 970ff3e35bb3..b86b7ad6b900 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -27,6 +27,33 @@ struct workspace {
int level;
};
+static struct workspace_manager wsm;
+
+static void zlib_init_workspace_manager(void)
+{
+ btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress);
+}
+
+static void zlib_cleanup_workspace_manager(void)
+{
+ btrfs_cleanup_workspace_manager(&wsm);
+}
+
+static struct list_head *zlib_get_workspace(unsigned int level)
+{
+ struct list_head *ws = btrfs_get_workspace(&wsm, level);
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+
+ workspace->level = level;
+
+ return ws;
+}
+
+static void zlib_put_workspace(struct list_head *ws)
+{
+ btrfs_put_workspace(&wsm, ws);
+}
+
static void zlib_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -36,7 +63,7 @@ static void zlib_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *zlib_alloc_workspace(void)
+static struct list_head *zlib_alloc_workspace(unsigned int level)
{
struct workspace *workspace;
int workspacesize;
@@ -48,6 +75,7 @@ static struct list_head *zlib_alloc_workspace(void)
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
+ workspace->level = level;
workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!workspace->strm.workspace || !workspace->buf)
goto fail;
@@ -390,18 +418,19 @@ next:
return ret;
}
-static void zlib_set_level(struct list_head *ws, unsigned int type)
+static unsigned int zlib_set_level(unsigned int level)
{
- struct workspace *workspace = list_entry(ws, struct workspace, list);
- unsigned level = (type & 0xF0) >> 4;
-
- if (level > 9)
- level = 9;
+ if (!level)
+ return BTRFS_ZLIB_DEFAULT_LEVEL;
- workspace->level = level > 0 ? level : 3;
+ return min_t(unsigned int, level, 9);
}
const struct btrfs_compress_op btrfs_zlib_compress = {
+ .init_workspace_manager = zlib_init_workspace_manager,
+ .cleanup_workspace_manager = zlib_cleanup_workspace_manager,
+ .get_workspace = zlib_get_workspace,
+ .put_workspace = zlib_put_workspace,
.alloc_workspace = zlib_alloc_workspace,
.free_workspace = zlib_free_workspace,
.compress_pages = zlib_compress_pages,
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index af6ec59972f5..3e418a3aeb11 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -6,25 +6,31 @@
*/
#include <linux/bio.h>
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/sched/mm.h>
#include <linux/pagemap.h>
#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/zstd.h>
#include "compression.h"
+#include "ctree.h"
#define ZSTD_BTRFS_MAX_WINDOWLOG 17
#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
#define ZSTD_BTRFS_DEFAULT_LEVEL 3
+#define ZSTD_BTRFS_MAX_LEVEL 15
+/* 307s to avoid pathologically clashing with transaction commit */
+#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
-static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len)
+static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
+ size_t src_len)
{
- ZSTD_parameters params = ZSTD_getParams(ZSTD_BTRFS_DEFAULT_LEVEL,
- src_len, 0);
+ ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
@@ -36,11 +42,290 @@ struct workspace {
void *mem;
size_t size;
char *buf;
+ unsigned int level;
+ unsigned int req_level;
+ unsigned long last_used; /* jiffies */
struct list_head list;
+ struct list_head lru_list;
ZSTD_inBuffer in_buf;
ZSTD_outBuffer out_buf;
};
+/*
+ * Zstd Workspace Management
+ *
+ * Zstd workspaces have different memory requirements depending on the level.
+ * The zstd workspaces are managed by having individual lists for each level
+ * and a global lru. Forward progress is maintained by protecting a max level
+ * workspace.
+ *
+ * Getting a workspace is done by using the bitmap to identify the levels that
+ * have available workspaces and scans up. This lets us recycle higher level
+ * workspaces because of the monotonic memory guarantee. A workspace's
+ * last_used is only updated if it is being used by the corresponding memory
+ * level. Putting a workspace involves adding it back to the appropriate places
+ * and adding it back to the lru if necessary.
+ *
+ * A timer is used to reclaim workspaces if they have not been used for
+ * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around.
+ * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
+ */
+
+struct zstd_workspace_manager {
+ const struct btrfs_compress_op *ops;
+ spinlock_t lock;
+ struct list_head lru_list;
+ struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
+ unsigned long active_map;
+ wait_queue_head_t wait;
+ struct timer_list timer;
+};
+
+static struct zstd_workspace_manager wsm;
+
+static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
+
+static inline struct workspace *list_to_workspace(struct list_head *list)
+{
+ return container_of(list, struct workspace, list);
+}
+
+/*
+ * zstd_reclaim_timer_fn - reclaim timer
+ * @t: timer
+ *
+ * This scans the lru_list and attempts to reclaim any workspace that hasn't
+ * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
+ */
+static void zstd_reclaim_timer_fn(struct timer_list *timer)
+{
+ unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
+ struct list_head *pos, *next;
+
+ spin_lock(&wsm.lock);
+
+ if (list_empty(&wsm.lru_list)) {
+ spin_unlock(&wsm.lock);
+ return;
+ }
+
+ list_for_each_prev_safe(pos, next, &wsm.lru_list) {
+ struct workspace *victim = container_of(pos, struct workspace,
+ lru_list);
+ unsigned int level;
+
+ if (time_after(victim->last_used, reclaim_threshold))
+ break;
+
+ /* workspace is in use */
+ if (victim->req_level)
+ continue;
+
+ level = victim->level;
+ list_del(&victim->lru_list);
+ list_del(&victim->list);
+ wsm.ops->free_workspace(&victim->list);
+
+ if (list_empty(&wsm.idle_ws[level - 1]))
+ clear_bit(level - 1, &wsm.active_map);
+
+ }
+
+ if (!list_empty(&wsm.lru_list))
+ mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
+
+ spin_unlock(&wsm.lock);
+}
+
+/*
+ * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
+ *
+ * It is possible based on the level configurations that a higher level
+ * workspace uses less memory than a lower level workspace. In order to reuse
+ * workspaces, this must be made a monotonic relationship. This precomputes
+ * the required memory for each level and enforces the monotonicity between
+ * level and memory required.
+ */
+static void zstd_calc_ws_mem_sizes(void)
+{
+ size_t max_size = 0;
+ unsigned int level;
+
+ for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
+ ZSTD_parameters params =
+ zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
+ size_t level_size =
+ max_t(size_t,
+ ZSTD_CStreamWorkspaceBound(params.cParams),
+ ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+
+ max_size = max_t(size_t, max_size, level_size);
+ zstd_ws_mem_sizes[level - 1] = max_size;
+ }
+}
+
+static void zstd_init_workspace_manager(void)
+{
+ struct list_head *ws;
+ int i;
+
+ zstd_calc_ws_mem_sizes();
+
+ wsm.ops = &btrfs_zstd_compress;
+ spin_lock_init(&wsm.lock);
+ init_waitqueue_head(&wsm.wait);
+ timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0);
+
+ INIT_LIST_HEAD(&wsm.lru_list);
+ for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
+ INIT_LIST_HEAD(&wsm.idle_ws[i]);
+
+ ws = wsm.ops->alloc_workspace(ZSTD_BTRFS_MAX_LEVEL);
+ if (IS_ERR(ws)) {
+ pr_warn(
+ "BTRFS: cannot preallocate zstd compression workspace\n");
+ } else {
+ set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map);
+ list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
+ }
+}
+
+static void zstd_cleanup_workspace_manager(void)
+{
+ struct workspace *workspace;
+ int i;
+
+ del_timer(&wsm.timer);
+
+ for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
+ while (!list_empty(&wsm.idle_ws[i])) {
+ workspace = container_of(wsm.idle_ws[i].next,
+ struct workspace, list);
+ list_del(&workspace->list);
+ list_del(&workspace->lru_list);
+ wsm.ops->free_workspace(&workspace->list);
+ }
+ }
+}
+
+/*
+ * zstd_find_workspace - find workspace
+ * @level: compression level
+ *
+ * This iterates over the set bits in the active_map beginning at the requested
+ * compression level. This lets us utilize already allocated workspaces before
+ * allocating a new one. If the workspace is of a larger size, it is used, but
+ * the place in the lru_list and last_used times are not updated. This is to
+ * offer the opportunity to reclaim the workspace in favor of allocating an
+ * appropriately sized one in the future.
+ */
+static struct list_head *zstd_find_workspace(unsigned int level)
+{
+ struct list_head *ws;
+ struct workspace *workspace;
+ int i = level - 1;
+
+ spin_lock(&wsm.lock);
+ for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
+ if (!list_empty(&wsm.idle_ws[i])) {
+ ws = wsm.idle_ws[i].next;
+ workspace = list_to_workspace(ws);
+ list_del_init(ws);
+ /* keep its place if it's a lower level using this */
+ workspace->req_level = level;
+ if (level == workspace->level)
+ list_del(&workspace->lru_list);
+ if (list_empty(&wsm.idle_ws[i]))
+ clear_bit(i, &wsm.active_map);
+ spin_unlock(&wsm.lock);
+ return ws;
+ }
+ }
+ spin_unlock(&wsm.lock);
+
+ return NULL;
+}
+
+/*
+ * zstd_get_workspace - zstd's get_workspace
+ * @level: compression level
+ *
+ * If @level is 0, then any compression level can be used. Therefore, we begin
+ * scanning from 1. We first scan through possible workspaces and then after
+ * attempt to allocate a new workspace. If we fail to allocate one due to
+ * memory pressure, go to sleep waiting for the max level workspace to free up.
+ */
+static struct list_head *zstd_get_workspace(unsigned int level)
+{
+ struct list_head *ws;
+ unsigned int nofs_flag;
+
+ /* level == 0 means we can use any workspace */
+ if (!level)
+ level = 1;
+
+again:
+ ws = zstd_find_workspace(level);
+ if (ws)
+ return ws;
+
+ nofs_flag = memalloc_nofs_save();
+ ws = wsm.ops->alloc_workspace(level);
+ memalloc_nofs_restore(nofs_flag);
+
+ if (IS_ERR(ws)) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
+ schedule();
+ finish_wait(&wsm.wait, &wait);
+
+ goto again;
+ }
+
+ return ws;
+}
+
+/*
+ * zstd_put_workspace - zstd put_workspace
+ * @ws: list_head for the workspace
+ *
+ * When putting back a workspace, we only need to update the LRU if we are of
+ * the requested compression level. Here is where we continue to protect the
+ * max level workspace or update last_used accordingly. If the reclaim timer
+ * isn't set, it is also set here. Only the max level workspace tries and wakes
+ * up waiting workspaces.
+ */
+static void zstd_put_workspace(struct list_head *ws)
+{
+ struct workspace *workspace = list_to_workspace(ws);
+
+ spin_lock(&wsm.lock);
+
+ /* A node is only taken off the lru if we are the corresponding level */
+ if (workspace->req_level == workspace->level) {
+ /* Hide a max level workspace from reclaim */
+ if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
+ INIT_LIST_HEAD(&workspace->lru_list);
+ } else {
+ workspace->last_used = jiffies;
+ list_add(&workspace->lru_list, &wsm.lru_list);
+ if (!timer_pending(&wsm.timer))
+ mod_timer(&wsm.timer,
+ jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
+ }
+ }
+
+ set_bit(workspace->level - 1, &wsm.active_map);
+ list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]);
+ workspace->req_level = 0;
+
+ spin_unlock(&wsm.lock);
+
+ if (workspace->level == ZSTD_BTRFS_MAX_LEVEL)
+ cond_wake_up(&wsm.wait);
+}
+
static void zstd_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -50,25 +335,25 @@ static void zstd_free_workspace(struct list_head *ws)
kfree(workspace);
}
-static struct list_head *zstd_alloc_workspace(void)
+static struct list_head *zstd_alloc_workspace(unsigned int level)
{
- ZSTD_parameters params =
- zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT);
struct workspace *workspace;
workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
if (!workspace)
return ERR_PTR(-ENOMEM);
- workspace->size = max_t(size_t,
- ZSTD_CStreamWorkspaceBound(params.cParams),
- ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+ workspace->size = zstd_ws_mem_sizes[level - 1];
+ workspace->level = level;
+ workspace->req_level = level;
+ workspace->last_used = jiffies;
workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!workspace->mem || !workspace->buf)
goto fail;
INIT_LIST_HEAD(&workspace->list);
+ INIT_LIST_HEAD(&workspace->lru_list);
return &workspace->list;
fail:
@@ -95,7 +380,8 @@ static int zstd_compress_pages(struct list_head *ws,
unsigned long len = *total_out;
const unsigned long nr_dest_pages = *out_pages;
unsigned long max_out = nr_dest_pages * PAGE_SIZE;
- ZSTD_parameters params = zstd_get_btrfs_parameters(len);
+ ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
+ len);
*out_pages = 0;
*total_out = 0;
@@ -419,11 +705,19 @@ finish:
return ret;
}
-static void zstd_set_level(struct list_head *ws, unsigned int type)
+static unsigned int zstd_set_level(unsigned int level)
{
+ if (!level)
+ return ZSTD_BTRFS_DEFAULT_LEVEL;
+
+ return min_t(unsigned int, level, ZSTD_BTRFS_MAX_LEVEL);
}
const struct btrfs_compress_op btrfs_zstd_compress = {
+ .init_workspace_manager = zstd_init_workspace_manager,
+ .cleanup_workspace_manager = zstd_cleanup_workspace_manager,
+ .get_workspace = zstd_get_workspace,
+ .put_workspace = zstd_put_workspace,
.alloc_workspace = zstd_alloc_workspace,
.free_workspace = zstd_free_workspace,
.compress_pages = zstd_compress_pages,
diff --git a/fs/buffer.c b/fs/buffer.c
index 52d024bfdbc1..48318fb74938 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
struct buffer_head *head;
struct page *page;
int all_mapped = 1;
+ static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
@@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
* file io on the block device and getblk. It gets dealt with
* elsewhere, don't buffer_error if we had some unmapped buffers
*/
- if (all_mapped) {
- printk("__find_get_block_slow() failed. "
- "block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block,
- (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%zu\n",
- bh->b_state, bh->b_size);
- printk("device %pg blocksize: %d\n", bdev,
- 1 << bd_inode->i_blkbits);
+ ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
+ if (all_mapped && __ratelimit(&last_warned)) {
+ printk("__find_get_block_slow() failed. block=%llu, "
+ "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
+ "device %pg blocksize: %d\n",
+ (unsigned long long)block,
+ (unsigned long long)bh->b_blocknr,
+ bh->b_state, bh->b_size, bdev,
+ 1 << bd_inode->i_blkbits);
}
out_unlock:
spin_unlock(&bd_mapping->private_lock);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5d0c05e288cc..a47c541f8006 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1494,10 +1494,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
if (err < 0 || off >= i_size_read(inode)) {
unlock_page(page);
put_page(page);
- if (err == -ENOMEM)
- ret = VM_FAULT_OOM;
- else
- ret = VM_FAULT_SIGBUS;
+ ret = vmf_error(err);
goto out_inline;
}
if (err < PAGE_SIZE)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 94c026bba2c2..bba28a5034ba 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1035,6 +1035,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
list_del_init(&ci->i_snap_realm_item);
ci->i_snap_realm_counter++;
ci->i_snap_realm = NULL;
+ if (realm->ino == ci->i_vino.ino)
+ realm->inode = NULL;
spin_unlock(&realm->inodes_with_caps_lock);
ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
realm);
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 03f4d24db8fe..9455d3aef0c3 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -3,19 +3,6 @@
* quota.c - CephFS quota
*
* Copyright (C) 2017-2018 SUSE
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/statfs.h>
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 041c27ea8de1..f74193da0e09 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+ if (list_empty(&ci->i_snap_flush_item))
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 4e9a7cc488da..da2cd8e89062 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -530,7 +530,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_putc(m, ',');
pos = m->count;
- ret = ceph_print_client_options(m, fsc->client);
+ ret = ceph_print_client_options(m, fsc->client, false);
if (ret)
return ret;
@@ -640,7 +640,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
opt = NULL; /* fsc->client now owns this */
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
- fsc->client->osdc.abort_on_full = true;
+ ceph_set_opt(fsc->client, ABORT_ON_FULL);
if (!fsopt->mds_namespace) {
ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 593fb422d0f3..e92a2fee3c57 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -252,6 +252,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, ",ACL");
#endif
seq_putc(m, '\n');
+ seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
seq_printf(m, "Servers:");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 26776eddd85d..7652551a1fc4 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.15"
+#define CIFS_VERSION "2.17"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 01ded7038b19..94dbdbe5be34 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1438,6 +1438,7 @@ struct mid_q_entry {
int mid_state; /* wish this were enum but can not pass to wait_event */
unsigned int mid_flags;
__le16 command; /* smb command code */
+ unsigned int optype; /* operation type */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool multiEnd:1; /* both received */
@@ -1574,6 +1575,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
kfree(param);
}
+static inline bool is_interrupt_error(int error)
+{
+ switch (error) {
+ case -EINTR:
+ case -ERESTARTSYS:
+ case -ERESTARTNOHAND:
+ case -ERESTARTNOINTR:
+ return true;
+ }
+ return false;
+}
+
+static inline bool is_retryable_error(int error)
+{
+ if (is_interrupt_error(error) || error == -EAGAIN)
+ return true;
+ return false;
+}
+
#define MID_FREE 0
#define MID_REQUEST_ALLOCATED 1
#define MID_REQUEST_SUBMITTED 2
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b1f49c1c543a..bb54ccf8481c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -128,24 +128,31 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
int rc;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
- char tree[MAX_TREE_SIZE + 1];
+ char *tree;
const char *tcp_host;
size_t tcp_host_len;
const char *dfs_host;
size_t dfs_host_len;
+ tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
if (tcon->ipc) {
- snprintf(tree, sizeof(tree), "\\\\%s\\IPC$",
+ snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
tcon->ses->server->hostname);
- return CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ goto out;
}
- if (!tcon->dfs_path)
- return CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (!tcon->dfs_path) {
+ rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ goto out;
+ }
rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
if (rc)
- return rc;
+ goto out;
extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
&tcp_host_len);
@@ -165,7 +172,7 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
continue;
}
- snprintf(tree, sizeof(tree), "\\%s", tgt);
+ snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
if (!rc)
@@ -182,6 +189,8 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
rc = -ENOENT;
}
dfs_cache_free_tgts(&tl);
+out:
+ kfree(tree);
return rc;
}
#else
@@ -1540,18 +1549,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
}
static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ bool malformed)
{
int length;
- struct cifs_readdata *rdata = mid->callback_data;
length = cifs_discard_remaining_data(server);
- dequeue_mid(mid, rdata->result);
+ dequeue_mid(mid, malformed);
mid->resp_buf = server->smallbuf;
server->smallbuf = NULL;
return length;
}
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+ struct cifs_readdata *rdata = mid->callback_data;
+
+ return __cifs_readv_discard(server, mid, rdata->result);
+}
+
int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
@@ -1593,12 +1610,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return -1;
}
+ /* set up first two iov for signature check and to get credits */
+ rdata->iov[0].iov_base = buf;
+ rdata->iov[0].iov_len = 4;
+ rdata->iov[1].iov_base = buf + 4;
+ rdata->iov[1].iov_len = server->total_read - 4;
+ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
/* Was the SMB read successful? */
rdata->result = server->ops->map_error(buf, false);
if (rdata->result != 0) {
cifs_dbg(FYI, "%s: server returned error %d\n",
__func__, rdata->result);
- return cifs_readv_discard(server, mid);
+ /* normal error on read response */
+ return __cifs_readv_discard(server, mid, false);
}
/* Is there enough to get to the rest of the READ_RSP header? */
@@ -1642,14 +1670,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
server->total_read += length;
}
- /* set up first iov for signature check */
- rdata->iov[0].iov_base = buf;
- rdata->iov[0].iov_len = 4;
- rdata->iov[1].iov_base = buf + 4;
- rdata->iov[1].iov_len = server->total_read - 4;
- cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
- rdata->iov[0].iov_base, server->total_read);
-
/* how much data is in the response? */
#ifdef CONFIG_CIFS_SMB_DIRECT
use_rdma_mr = rdata->mr;
@@ -2114,7 +2134,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
for (j = 0; j < nr_pages; j++) {
unlock_page(wdata2->pages[j]);
- if (rc != 0 && rc != -EAGAIN) {
+ if (rc != 0 && !is_retryable_error(rc)) {
SetPageError(wdata2->pages[j]);
end_page_writeback(wdata2->pages[j]);
put_page(wdata2->pages[j]);
@@ -2123,7 +2143,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
if (rc) {
kref_put(&wdata2->refcount, cifs_writedata_release);
- if (rc == -EAGAIN)
+ if (is_retryable_error(rc))
continue;
break;
}
@@ -2132,7 +2152,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
i += nr_pages;
} while (i < wdata->nr_pages);
- mapping_set_error(inode->i_mapping, rc);
+ if (rc != 0 && !is_retryable_error(rc))
+ mapping_set_error(inode->i_mapping, rc);
kref_put(&wdata->refcount, cifs_writedata_release);
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f66529679ca2..8463c940e0e5 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -433,9 +433,10 @@ static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
kfree(server->hostname);
server->hostname = extract_hostname(name);
- if (!server->hostname) {
- cifs_dbg(FYI, "%s: failed to extract hostname from target: %d\n",
- __func__, -ENOMEM);
+ if (IS_ERR(server->hostname)) {
+ cifs_dbg(FYI,
+ "%s: failed to extract hostname from target: %ld\n",
+ __func__, PTR_ERR(server->hostname));
}
}
@@ -719,6 +720,21 @@ server_unresponsive(struct TCP_Server_Info *server)
return false;
}
+static inline bool
+zero_credits(struct TCP_Server_Info *server)
+{
+ int val;
+
+ spin_lock(&server->req_lock);
+ val = server->credits + server->echo_credits + server->oplock_credits;
+ if (server->in_flight == 0 && val == 0) {
+ spin_unlock(&server->req_lock);
+ return true;
+ }
+ spin_unlock(&server->req_lock);
+ return false;
+}
+
static int
cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
{
@@ -731,6 +747,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
try_to_freeze();
+ /* reconnect if no credits and no requests in flight */
+ if (zero_credits(server)) {
+ cifs_reconnect(server);
+ return -ECONNABORTED;
+ }
+
if (server_unresponsive(server))
return -ECONNABORTED;
if (cifs_rdma_enabled(server) && server->smbd_conn)
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index cd63c4a70875..09b7d0d4f6e4 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -776,6 +776,7 @@ static int get_tgt_list(const struct dfs_cache_entry *ce,
it->it_name = kstrndup(t->t_name, strlen(t->t_name),
GFP_KERNEL);
if (!it->it_name) {
+ kfree(it);
rc = -ENOMEM;
goto err_free_it;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e3e3a7550205..659ce1b92c44 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -733,7 +733,8 @@ reopen_success:
if (can_flush) {
rc = filemap_write_and_wait(inode->i_mapping);
- mapping_set_error(inode->i_mapping, rc);
+ if (!is_interrupt_error(rc))
+ mapping_set_error(inode->i_mapping, rc);
if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path,
@@ -1132,14 +1133,18 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf) {
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
free_xid(xid);
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -1472,12 +1477,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -2110,6 +2119,7 @@ static int cifs_writepages(struct address_space *mapping,
pgoff_t end, index;
struct cifs_writedata *wdata;
int rc = 0;
+ int saved_rc = 0;
unsigned int xid;
/*
@@ -2138,8 +2148,10 @@ retry:
rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
&wsize, &credits);
- if (rc)
+ if (rc != 0) {
+ done = true;
break;
+ }
tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
@@ -2147,6 +2159,7 @@ retry:
&found_pages);
if (!wdata) {
rc = -ENOMEM;
+ done = true;
add_credits_and_wake_if(server, credits, 0);
break;
}
@@ -2175,7 +2188,7 @@ retry:
if (rc != 0) {
add_credits_and_wake_if(server, wdata->credits, 0);
for (i = 0; i < nr_pages; ++i) {
- if (rc == -EAGAIN)
+ if (is_retryable_error(rc))
redirty_page_for_writepage(wbc,
wdata->pages[i]);
else
@@ -2183,7 +2196,7 @@ retry:
end_page_writeback(wdata->pages[i]);
put_page(wdata->pages[i]);
}
- if (rc != -EAGAIN)
+ if (!is_retryable_error(rc))
mapping_set_error(mapping, rc);
}
kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2193,6 +2206,15 @@ retry:
continue;
}
+ /* Return immediately if we received a signal during writing */
+ if (is_interrupt_error(rc)) {
+ done = true;
+ break;
+ }
+
+ if (rc != 0 && saved_rc == 0)
+ saved_rc = rc;
+
wbc->nr_to_write -= nr_pages;
if (wbc->nr_to_write <= 0)
done = true;
@@ -2210,6 +2232,9 @@ retry:
goto retry;
}
+ if (saved_rc != 0)
+ rc = saved_rc;
+
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
@@ -2242,8 +2267,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
set_page_writeback(page);
retry_write:
rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
- if (rc == -EAGAIN) {
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (is_retryable_error(rc)) {
+ if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
goto retry_write;
redirty_page_for_writepage(wbc, page);
} else if (rc != 0) {
@@ -2671,6 +2696,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
if (rc) {
+ kvfree(wdata->pages);
kfree(wdata);
add_credits_and_wake_if(server, credits, 0);
break;
@@ -2682,6 +2708,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
if (rc) {
for (i = 0; i < nr_pages; i++)
put_page(wdata->pages[i]);
+ kvfree(wdata->pages);
kfree(wdata);
add_credits_and_wake_if(server, credits, 0);
break;
@@ -3361,8 +3388,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
}
rc = cifs_read_allocate_pages(rdata, npages);
- if (rc)
- goto error;
+ if (rc) {
+ kvfree(rdata->pages);
+ kfree(rdata);
+ add_credits_and_wake_if(server, credits, 0);
+ break;
+ }
rdata->tailsz = PAGE_SIZE;
}
@@ -3382,7 +3413,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
if (!rdata->cfile->invalidHandle ||
!(rc = cifs_reopen_file(rdata->cfile, true)))
rc = server->ops->async_readv(rdata);
-error:
if (rc) {
add_credits_and_wake_if(server, rdata->credits, 0);
kref_put(&rdata->refcount,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 13fb59aadebc..478003644916 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2257,6 +2257,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
+ if (is_interrupt_error(rc)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
mapping_set_error(inode->i_mapping, rc);
rc = 0;
@@ -2400,6 +2405,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
+ if (is_interrupt_error(rc)) {
+ rc = -ERESTARTSYS;
+ goto cifs_setattr_exit;
+ }
+
mapping_set_error(inode->i_mapping, rc);
rc = 0;
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 4ed10dd086e6..b204e84b87fb 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -122,12 +122,14 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < sizeof(struct smb2_lock_element))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf)
@@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf) {
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index f14533da3a93..01a76bccdb8d 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -293,6 +293,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct smb2_file_all_info *smb2_data;
__u32 create_options = 0;
+ struct cifs_fid fid;
+ bool no_cached_open = tcon->nohandlecache;
*adjust_tz = false;
*symlink = false;
@@ -301,6 +303,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
+
+ /* If it is a root and its handle is cached then use it */
+ if (!strlen(full_path) && !no_cached_open) {
+ rc = open_shroot(xid, tcon, &fid);
+ if (rc)
+ goto out;
+ rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid, smb2_data);
+ close_shroot(&tcon->crfid);
+ if (rc)
+ goto out;
+ move_smb2_info_to_cifs(data, smb2_data);
+ goto out;
+ }
+
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 6a9c47541c53..7b8b58fb4d3f 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
return false;
+ if (rsp->sync_hdr.CreditRequest) {
+ spin_lock(&server->req_lock);
+ server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+ }
+
if (rsp->StructureSize !=
smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
if (le16_to_cpu(rsp->StructureSize) == 44)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index cf7eb891804f..6f96e2292856 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -34,6 +34,7 @@
#include "cifs_ioctl.h"
#include "smbdirect.h"
+/* Change credits for different ops and return the total number of credits */
static int
change_conf(struct TCP_Server_Info *server)
{
@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
server->oplock_credits = server->echo_credits = 0;
switch (server->credits) {
case 0:
- return -1;
+ return 0;
case 1:
server->echoes = false;
server->oplocks = false;
- cifs_dbg(VFS, "disabling echoes and oplocks\n");
break;
case 2:
server->echoes = true;
server->oplocks = false;
server->echo_credits = 1;
- cifs_dbg(FYI, "disabling oplocks\n");
break;
default:
server->echoes = true;
@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
server->echo_credits = 1;
}
server->credits -= server->echo_credits + server->oplock_credits;
- return 0;
+ return server->credits + server->echo_credits + server->oplock_credits;
}
static void
smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
const int optype)
{
- int *val, rc = 0;
+ int *val, rc = -1;
+
spin_lock(&server->req_lock);
val = server->ops->get_credits_field(server, optype);
@@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
}
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- if (rc)
- cifs_reconnect(server);
+
+ if (server->tcpStatus == CifsNeedReconnect)
+ return;
+
+ switch (rc) {
+ case -1:
+ /* change_conf hasn't been executed */
+ break;
+ case 0:
+ cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
+ break;
+ case 1:
+ cifs_dbg(VFS, "disabling echoes and oplocks\n");
+ break;
+ case 2:
+ cifs_dbg(FYI, "disabling oplocks\n");
+ break;
+ default:
+ cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
+ }
}
static void
@@ -136,7 +154,11 @@ smb2_get_credits(struct mid_q_entry *mid)
{
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
- return le16_to_cpu(shdr->CreditRequest);
+ if (mid->mid_state == MID_RESPONSE_RECEIVED
+ || mid->mid_state == MID_RESPONSE_MALFORMED)
+ return le16_to_cpu(shdr->CreditRequest);
+
+ return 0;
}
static int
@@ -165,14 +187,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
scredits = server->credits;
/* can deadlock with reopen */
- if (scredits == 1) {
+ if (scredits <= 8) {
*num = SMB2_MAX_BUFFER_SIZE;
*credits = 0;
break;
}
- /* leave one credit for a possible reopen */
- scredits--;
+ /* leave some credits for reopen and other ops */
+ scredits -= 8;
*num = min_t(unsigned int, size,
scredits * SMB2_MAX_BUFFER_SIZE);
@@ -844,7 +866,9 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
FILE_READ_EA,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE,
- SMB2_MAX_EA_BUF,
+ CIFSMaxBufSize -
+ MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE,
&rsp_iov, &buftype, cifs_sb);
if (rc) {
/*
@@ -3189,11 +3213,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
server->ops->is_status_pending(buf, server, 0))
return -1;
- rdata->result = server->ops->map_error(buf, false);
+ /* set up first two iov to get credits */
+ rdata->iov[0].iov_base = buf;
+ rdata->iov[0].iov_len = 4;
+ rdata->iov[1].iov_base = buf + 4;
+ rdata->iov[1].iov_len =
+ min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
+ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
+ rdata->result = server->ops->map_error(buf, true);
if (rdata->result != 0) {
cifs_dbg(FYI, "%s: server returned error %d\n",
__func__, rdata->result);
- dequeue_mid(mid, rdata->result);
+ /* normal error on read response */
+ dequeue_mid(mid, false);
return 0;
}
@@ -3266,14 +3302,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
return 0;
}
- /* set up first iov for signature check */
- rdata->iov[0].iov_base = buf;
- rdata->iov[0].iov_len = 4;
- rdata->iov[1].iov_base = buf + 4;
- rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
- cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
- rdata->iov[0].iov_base, server->vals->read_rsp_size);
-
length = rdata->copy_into_pages(server, rdata, &iter);
kfree(bvec);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index e57f6aa1d638..77b3aaa39b35 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -162,24 +162,31 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
int rc;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
- char tree[MAX_TREE_SIZE + 1];
+ char *tree;
const char *tcp_host;
size_t tcp_host_len;
const char *dfs_host;
size_t dfs_host_len;
+ tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
if (tcon->ipc) {
- snprintf(tree, sizeof(tree), "\\\\%s\\IPC$",
+ snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
tcon->ses->server->hostname);
- return SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ goto out;
}
- if (!tcon->dfs_path)
- return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (!tcon->dfs_path) {
+ rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ goto out;
+ }
rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
if (rc)
- return rc;
+ goto out;
extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
&tcp_host_len);
@@ -199,7 +206,7 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
continue;
}
- snprintf(tree, sizeof(tree), "\\%s", tgt);
+ snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
if (!rc)
@@ -216,6 +223,8 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
rc = -ENOENT;
}
dfs_cache_free_tgts(&tl);
+out:
+ kfree(tree);
return rc;
}
#else
@@ -2807,6 +2816,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buftype = CIFS_NO_BUFFER;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
+ bool allocated = false;
cifs_dbg(FYI, "Query Info\n");
@@ -2846,14 +2856,21 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
"Error %d allocating memory for acl\n",
rc);
*dlen = 0;
+ rc = -ENOMEM;
goto qinf_exit;
}
+ allocated = true;
}
}
rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength),
&rsp_iov, min_len, *data);
+ if (rc && allocated) {
+ kfree(*data);
+ *data = NULL;
+ *dlen = 0;
+ }
qinf_exit:
SMB2_query_info_free(&rqst);
@@ -2907,9 +2924,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->callback_data;
struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
- unsigned int credits_received = 1;
+ unsigned int credits_received = 0;
- if (mid->mid_state == MID_RESPONSE_RECEIVED)
+ if (mid->mid_state == MID_RESPONSE_RECEIVED
+ || mid->mid_state == MID_RESPONSE_MALFORMED)
credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
DeleteMidQEntry(mid);
@@ -3166,7 +3184,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
- unsigned int credits_received = 1;
+ unsigned int credits_received = 0;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_pages = rdata->pages,
@@ -3205,6 +3223,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
task_io_account_read(rdata->got_bytes);
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
+ case MID_RESPONSE_MALFORMED:
+ credits_received = le16_to_cpu(shdr->CreditRequest);
+ /* fall through */
default:
if (rdata->result != -ENODATA)
rdata->result = -EIO;
@@ -3220,8 +3241,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
rdata->mr = NULL;
}
#endif
- if (rdata->result)
+ if (rdata->result && rdata->result != -ENODATA) {
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
+ trace_smb3_read_err(0 /* xid */,
+ rdata->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, rdata->offset,
+ rdata->bytes, rdata->result);
+ } else
+ trace_smb3_read_done(0 /* xid */,
+ rdata->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid,
+ rdata->offset, rdata->got_bytes);
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
@@ -3278,12 +3308,14 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (rdata->credits) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
- shdr->CreditRequest = shdr->CreditCharge;
+ shdr->CreditRequest =
+ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
spin_lock(&server->req_lock);
server->credits += rdata->credits -
le16_to_cpu(shdr->CreditCharge);
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
+ rdata->credits = le16_to_cpu(shdr->CreditCharge);
flags |= CIFS_HAS_CREDITS;
}
@@ -3294,13 +3326,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (rc) {
kref_put(&rdata->refcount, cifs_readdata_release);
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
- trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
- io_parms.tcon->tid, io_parms.tcon->ses->Suid,
- io_parms.offset, io_parms.length);
- } else
- trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
- io_parms.tcon->tid, io_parms.tcon->ses->Suid,
- io_parms.offset, io_parms.length);
+ trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
+ io_parms.tcon->tid,
+ io_parms.tcon->ses->Suid,
+ io_parms.offset, io_parms.length, rc);
+ }
cifs_small_buf_release(buf);
return rc;
@@ -3344,10 +3374,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
if (rc != -ENODATA) {
cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
cifs_dbg(VFS, "Send error in read = %d\n", rc);
+ trace_smb3_read_err(xid, req->PersistentFileId,
+ io_parms->tcon->tid, ses->Suid,
+ io_parms->offset, io_parms->length,
+ rc);
}
- trace_smb3_read_err(rc, xid, req->PersistentFileId,
- io_parms->tcon->tid, ses->Suid,
- io_parms->offset, io_parms->length);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
return rc == -ENODATA ? 0 : rc;
} else
@@ -3388,7 +3419,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
- unsigned int credits_received = 1;
+ unsigned int credits_received = 0;
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
@@ -3416,6 +3447,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
case MID_RETRY_NEEDED:
wdata->result = -EAGAIN;
break;
+ case MID_RESPONSE_MALFORMED:
+ credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ /* fall through */
default:
wdata->result = -EIO;
break;
@@ -3433,8 +3467,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->mr = NULL;
}
#endif
- if (wdata->result)
+ if (wdata->result) {
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+ trace_smb3_write_err(0 /* no xid */,
+ wdata->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, wdata->offset,
+ wdata->bytes, wdata->result);
+ } else
+ trace_smb3_write_done(0 /* no xid */,
+ wdata->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid,
+ wdata->offset, wdata->bytes);
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
@@ -3555,12 +3598,14 @@ smb2_async_writev(struct cifs_writedata *wdata,
if (wdata->credits) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
- shdr->CreditRequest = shdr->CreditCharge;
+ shdr->CreditRequest =
+ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
spin_lock(&server->req_lock);
server->credits += wdata->credits -
le16_to_cpu(shdr->CreditCharge);
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
+ wdata->credits = le16_to_cpu(shdr->CreditCharge);
flags |= CIFS_HAS_CREDITS;
}
@@ -3574,10 +3619,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
wdata->bytes, rc);
kref_put(&wdata->refcount, release);
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
- } else
- trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
- tcon->tid, tcon->ses->Suid, wdata->offset,
- wdata->bytes);
+ }
async_writev_out:
cifs_small_buf_release(req);
@@ -3803,8 +3845,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
srch_inf->endOfSearch = true;
rc = 0;
- }
- cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+ } else
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}
@@ -4399,8 +4441,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
- please_key_low = (__u64 *)req->LeaseKey;
- please_key_high = (__u64 *)(req->LeaseKey+8);
+ please_key_low = (__u64 *)lease_key;
+ please_key_high = (__u64 *)(lease_key+8);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 7a2d0a2255e6..538e2299805f 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,9 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
-#define MAX_SMB2_HDR_SIZE 0x00b0
+/* 52 transform hdr + 64 hdr + 88 create rsp */
+#define SMB2_TRANSFORM_HEADER_SIZE 52
+#define MAX_SMB2_HDR_SIZE 204
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
@@ -648,6 +649,13 @@ struct smb2_create_req {
__u8 Buffer[0];
} __packed;
+/*
+ * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
+ * 88 (fixed part of create response) + 520 (path) + 150 (contexts) +
+ * 2 bytes of padding.
+ */
+#define MAX_SMB2_CREATE_RESPONSE_SIZE 824
+
struct smb2_create_rsp {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 89 */
@@ -996,6 +1004,11 @@ struct smb2_close_req {
__u64 VolatileFileId; /* opaque endianness */
} __packed;
+/*
+ * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data)
+ */
+#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124
+
struct smb2_close_rsp {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* 60 */
@@ -1398,8 +1411,6 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
char FileName[0]; /* Name to be assigned to new link */
} __packed; /* level 11 Set */
-#define SMB2_MAX_EA_BUF 65536
-
struct smb2_file_full_ea_info { /* encoding of response for level 15 */
__le32 next_entry_offset;
__u8 flags;
diff --git a/fs/cifs/trace.c b/fs/cifs/trace.c
index bd4a546feec1..465483787193 100644
--- a/fs/cifs/trace.c
+++ b/fs/cifs/trace.c
@@ -3,16 +3,6 @@
* Copyright (C) 2018, Microsoft Corporation.
*
* Author(s): Steve French <stfrench@microsoft.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index fb049809555f..59be48206932 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -3,16 +3,6 @@
* Copyright (C) 2018, Microsoft Corporation.
*
* Author(s): Steve French <stfrench@microsoft.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cifs
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 5be7302853b6..53532bd3f50d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -387,7 +387,7 @@ smbd_done:
if (rc < 0 && rc != -EINTR)
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
- else
+ else if (rc > 0)
rc = 0;
return rc;
@@ -783,8 +783,25 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
}
static void
-cifs_noop_callback(struct mid_q_entry *mid)
+cifs_compound_callback(struct mid_q_entry *mid)
+{
+ struct TCP_Server_Info *server = mid->server;
+
+ add_credits(server, server->ops->get_credits(mid), mid->optype);
+}
+
+static void
+cifs_compound_last_callback(struct mid_q_entry *mid)
{
+ cifs_compound_callback(mid);
+ cifs_wake_up_task(mid);
+}
+
+static void
+cifs_cancelled_callback(struct mid_q_entry *mid)
+{
+ cifs_compound_callback(mid);
+ DeleteMidQEntry(mid);
}
int
@@ -795,7 +812,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
int i, j, rc = 0;
int timeout, optype;
struct mid_q_entry *midQ[MAX_COMPOUND];
- unsigned int credits = 0;
+ bool cancelled_mid[MAX_COMPOUND] = {false};
+ unsigned int credits[MAX_COMPOUND] = {0};
char *buf;
timeout = flags & CIFS_TIMEOUT_MASK;
@@ -813,13 +831,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -ENOENT;
/*
- * Ensure that we do not send more than 50 overlapping requests
- * to the same server. We may make this configurable later or
- * use ses->maxReq.
+ * Ensure we obtain 1 credit per request in the compound chain.
+ * It can be optimized further by waiting for all the credits
+ * at once but this can wait long enough if we don't have enough
+ * credits due to some heavy operations in progress or the server
+ * not granting us much, so a fallback to the current approach is
+ * needed anyway.
*/
- rc = wait_for_free_request(ses->server, timeout, optype);
- if (rc)
- return rc;
+ for (i = 0; i < num_rqst; i++) {
+ rc = wait_for_free_request(ses->server, timeout, optype);
+ if (rc) {
+ /*
+ * We haven't sent an SMB packet to the server yet but
+ * we already obtained credits for i requests in the
+ * compound chain - need to return those credits back
+ * for future use. Note that we need to call add_credits
+ * multiple times to match the way we obtained credits
+ * in the first place and to account for in flight
+ * requests correctly.
+ */
+ for (j = 0; j < i; j++)
+ add_credits(ses->server, 1, optype);
+ return rc;
+ }
+ credits[i] = 1;
+ }
/*
* Make sure that we sign in the same order that we send on this socket
@@ -835,18 +871,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
for (j = 0; j < i; j++)
cifs_delete_mid(midQ[j]);
mutex_unlock(&ses->server->srv_mutex);
+
/* Update # of requests on wire to server */
- add_credits(ses->server, 1, optype);
+ for (j = 0; j < num_rqst; j++)
+ add_credits(ses->server, credits[j], optype);
return PTR_ERR(midQ[i]);
}
midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+ midQ[i]->optype = optype;
/*
- * We don't invoke the callback compounds unless it is the last
- * request.
+ * Invoke callback for every part of the compound chain
+ * to calculate credits properly. Wake up this thread only when
+ * the last element is received.
*/
if (i < num_rqst - 1)
- midQ[i]->callback = cifs_noop_callback;
+ midQ[i]->callback = cifs_compound_callback;
+ else
+ midQ[i]->callback = cifs_compound_last_callback;
}
cifs_in_send_inc(ses->server);
rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
@@ -860,8 +902,20 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
- if (rc < 0)
+ if (rc < 0) {
+ /* Sending failed for some reason - return credits back */
+ for (i = 0; i < num_rqst; i++)
+ add_credits(ses->server, credits[i], optype);
goto out;
+ }
+
+ /*
+ * At this point the request is passed to the network stack - we assume
+ * that any credits taken from the server structure on the client have
+ * been spent and we can't return them back. Once we receive responses
+ * we will collect credits granted by the server in the mid callbacks
+ * and add those credits to the server structure.
+ */
/*
* Compounding is never used during session establish.
@@ -875,36 +929,34 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(ses->server, midQ[i]);
- if (rc != 0) {
+ if (rc != 0)
+ break;
+ }
+ if (rc != 0) {
+ for (; i < num_rqst; i++) {
cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(ses->server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
- midQ[i]->callback = DeleteMidQEntry;
- spin_unlock(&GlobalMid_Lock);
- add_credits(ses->server, 1, optype);
- return rc;
+ midQ[i]->callback = cifs_cancelled_callback;
+ cancelled_mid[i] = true;
+ credits[i] = 0;
}
spin_unlock(&GlobalMid_Lock);
}
}
- for (i = 0; i < num_rqst; i++)
- if (midQ[i]->resp_buf)
- credits += ses->server->ops->get_credits(midQ[i]);
- if (!credits)
- credits = 1;
-
for (i = 0; i < num_rqst; i++) {
if (rc < 0)
goto out;
rc = cifs_sync_mid_result(midQ[i], ses->server);
if (rc != 0) {
- add_credits(ses->server, credits, optype);
- return rc;
+ /* mark this mid as cancelled to not free it below */
+ cancelled_mid[i] = true;
+ goto out;
}
if (!midQ[i]->resp_buf ||
@@ -951,9 +1003,10 @@ out:
* This is prevented above by using a noop callback that will not
* wake this thread except for the very last PDU.
*/
- for (i = 0; i < num_rqst; i++)
- cifs_delete_mid(midQ[i]);
- add_credits(ses->server, credits, optype);
+ for (i = 0; i < num_rqst; i++) {
+ if (!cancelled_mid[i])
+ cifs_delete_mid(midQ[i]);
+ }
return rc;
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 1e11a683f63d..322ce9686bdb 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -47,7 +47,7 @@ static int derive_key_aes(const u8 *master_key,
tfm = NULL;
goto out;
}
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
res = -ENOMEM;
@@ -257,7 +257,7 @@ allocate_skcipher_for_mode(struct fscrypt_mode *mode, const u8 *raw_key,
mode->friendly_name,
crypto_skcipher_alg(tfm)->base.cra_driver_name);
}
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
if (err)
goto err_free_tfm;
diff --git a/fs/dcache.c b/fs/dcache.c
index 2593153471cf..aac41adf4743 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -119,6 +119,7 @@ struct dentry_stat_t dentry_stat = {
static DEFINE_PER_CPU(long, nr_dentry);
static DEFINE_PER_CPU(long, nr_dentry_unused);
+static DEFINE_PER_CPU(long, nr_dentry_negative);
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
@@ -152,11 +153,22 @@ static long get_nr_dentry_unused(void)
return sum < 0 ? 0 : sum;
}
+static long get_nr_dentry_negative(void)
+{
+ int i;
+ long sum = 0;
+
+ for_each_possible_cpu(i)
+ sum += per_cpu(nr_dentry_negative, i);
+ return sum < 0 ? 0 : sum;
+}
+
int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
size_t *lenp, loff_t *ppos)
{
dentry_stat.nr_dentry = get_nr_dentry();
dentry_stat.nr_unused = get_nr_dentry_unused();
+ dentry_stat.nr_negative = get_nr_dentry_negative();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
#endif
@@ -317,6 +329,8 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL;
+ if (dentry->d_flags & DCACHE_LRU_LIST)
+ this_cpu_inc(nr_dentry_negative);
}
static void dentry_free(struct dentry *dentry)
@@ -371,6 +385,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
* The per-cpu "nr_dentry_unused" counters are updated with
* the DCACHE_LRU_LIST bit.
*
+ * The per-cpu "nr_dentry_negative" counters are only updated
+ * when deleted from or added to the per-superblock LRU list, not
+ * from/to the shrink list. That is to avoid an unneeded dec/inc
+ * pair when moving from LRU to shrink list in select_collect().
+ *
* These helper functions make sure we always follow the
* rules. d_lock must be held by the caller.
*/
@@ -380,6 +399,8 @@ static void d_lru_add(struct dentry *dentry)
D_FLAG_VERIFY(dentry, 0);
dentry->d_flags |= DCACHE_LRU_LIST;
this_cpu_inc(nr_dentry_unused);
+ if (d_is_negative(dentry))
+ this_cpu_inc(nr_dentry_negative);
WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
@@ -388,6 +409,8 @@ static void d_lru_del(struct dentry *dentry)
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
+ if (d_is_negative(dentry))
+ this_cpu_dec(nr_dentry_negative);
WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
}
@@ -418,6 +441,8 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags &= ~DCACHE_LRU_LIST;
this_cpu_dec(nr_dentry_unused);
+ if (d_is_negative(dentry))
+ this_cpu_dec(nr_dentry_negative);
list_lru_isolate(lru, &dentry->d_lru);
}
@@ -426,6 +451,8 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
{
D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
dentry->d_flags |= DCACHE_SHRINK_LIST;
+ if (d_is_negative(dentry))
+ this_cpu_dec(nr_dentry_negative);
list_lru_isolate_move(lru, &dentry->d_lru, list);
}
@@ -1188,15 +1215,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
*/
void shrink_dcache_sb(struct super_block *sb)
{
- long freed;
-
do {
LIST_HEAD(dispose);
- freed = list_lru_walk(&sb->s_dentry_lru,
+ list_lru_walk(&sb->s_dentry_lru,
dentry_lru_isolate_shrink, &dispose, 1024);
-
- this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
} while (list_lru_count(&sb->s_dentry_lru) > 0);
}
@@ -1820,6 +1843,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
WARN_ON(d_in_lookup(dentry));
spin_lock(&dentry->d_lock);
+ /*
+ * Decrement negative dentry count if it was in the LRU list.
+ */
+ if (dentry->d_flags & DCACHE_LRU_LIST)
+ this_cpu_dec(nr_dentry_negative);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 13b01351dd1c..95b5e78c22b1 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -324,7 +324,7 @@ static struct dentry *failed_creating(struct dentry *dentry)
inode_unlock(d_inode(dentry->d_parent));
dput(dentry);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
static struct dentry *end_creating(struct dentry *dentry)
@@ -347,7 +347,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
dentry = start_creating(name, parent);
if (IS_ERR(dentry))
- return NULL;
+ return dentry;
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode))
@@ -386,7 +386,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %NULL will be returned.
+ * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
@@ -422,8 +423,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
* debugfs core.
*
* It is your responsibility to protect your struct file_operation
- * methods against file removals by means of debugfs_use_file_start()
- * and debugfs_use_file_finish(). ->open() is still protected by
+ * methods against file removals by means of debugfs_file_get()
+ * and debugfs_file_put(). ->open() is still protected by
* debugfs though.
*
* Any struct file_operations defined by means of
@@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %NULL will be returned.
+ * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
@@ -495,7 +497,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %NULL will be returned.
+ * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
@@ -506,7 +509,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
struct inode *inode;
if (IS_ERR(dentry))
- return NULL;
+ return dentry;
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode))
@@ -545,7 +548,7 @@ struct dentry *debugfs_create_automount(const char *name,
struct inode *inode;
if (IS_ERR(dentry))
- return NULL;
+ return dentry;
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode))
@@ -581,8 +584,8 @@ EXPORT_SYMBOL(debugfs_create_automount);
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the symbolic
* link is to be removed (no automatic cleanup happens if your module is
- * unloaded, you are responsible here.) If an error occurs, %NULL will be
- * returned.
+ * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR)
+ * will be returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
@@ -594,12 +597,12 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
struct inode *inode;
char *link = kstrdup(target, GFP_KERNEL);
if (!link)
- return NULL;
+ return ERR_PTR(-ENOMEM);
dentry = start_creating(name, parent);
if (IS_ERR(dentry)) {
kfree(link);
- return NULL;
+ return dentry;
}
inode = debugfs_get_inode(dentry->d_sb);
@@ -787,6 +790,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *dentry = NULL, *trap;
struct name_snapshot old_name;
+ if (IS_ERR(old_dir))
+ return old_dir;
+ if (IS_ERR(new_dir))
+ return new_dir;
+ if (IS_ERR_OR_NULL(old_dentry))
+ return old_dentry;
+
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
@@ -820,7 +830,9 @@ exit:
if (dentry && !IS_ERR(dentry))
dput(dentry);
unlock_rename(new_dir, old_dir);
- return NULL;
+ if (IS_ERR(dentry))
+ return dentry;
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(debugfs_rename);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index dbc1a1f080ce..ec2fb6fe6d37 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
unsigned long fs_count; /* Number of filesystem-sized blocks */
int create;
unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
+ loff_t i_size;
/*
* If there was a memory error and we've overwritten all the
@@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
*/
create = dio->op == REQ_OP_WRITE;
if (dio->flags & DIO_SKIP_HOLES) {
- if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
- i_blkbits))
+ i_size = i_size_read(dio->inode);
+ if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
create = 0;
}
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 76976d6e50f9..c98ad9777ad9 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1089,12 +1089,12 @@ static void sctp_connect_to_sock(struct connection *con)
* since O_NONBLOCK argument in connect() function does not work here,
* then, we should restore the default value of this attribute.
*/
- kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv,
+ kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv,
sizeof(tv));
result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
0);
memset(&tv, 0, sizeof(tv));
- kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv,
+ kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv,
sizeof(tv));
if (result == -EINPROGRESS)
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 82377017130f..d31b6c72b476 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
+ /*
+ * We must skip inodes in unusual state. We may also skip
+ * inodes without pages but we deliberately won't in case
+ * we need to reschedule to avoid softlockups.
+ */
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
- (inode->i_mapping->nrpages == 0)) {
+ (inode->i_mapping->nrpages == 0 && !need_resched())) {
spin_unlock(&inode->i_lock);
continue;
}
@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_list_lock);
+ cond_resched();
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 4dd842f72846..f664da55234e 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -610,7 +610,8 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
full_alg_name);
goto out_free;
}
- crypto_skcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ crypto_skcipher_set_flags(crypt_stat->tfm,
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
rc = 0;
out_free:
kfree(full_alg_name);
@@ -1590,7 +1591,7 @@ ecryptfs_process_key_cipher(struct crypto_skcipher **key_tfm,
"[%s]; rc = [%d]\n", full_alg_name, rc);
goto out;
}
- crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
if (*key_size == 0)
*key_size = crypto_skcipher_default_keysize(*key_tfm);
get_random_bytes(dummy_key, *key_size);
diff --git a/fs/exec.c b/fs/exec.c
index fb72d36f7823..74f3672146a7 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -932,7 +932,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
if (bytes < 0) {
ret = bytes;
- goto out;
+ goto out_free;
}
if (bytes == 0)
@@ -1189,7 +1189,7 @@ no_thread_group:
flush_itimer_signals();
#endif
- if (atomic_read(&oldsighand->count) != 1) {
+ if (refcount_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/*
* This ->sighand is shared with the CLONE_SIGHAND
@@ -1199,7 +1199,7 @@ no_thread_group:
if (!newsighand)
return -ENOMEM;
- atomic_set(&newsighand->count, 1);
+ refcount_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 3b8114def693..13318e255ebf 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -252,33 +252,10 @@ ext2_validate_entry(char *base, unsigned offset, unsigned mask)
return (char *)p - base;
}
-static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
- [EXT2_FT_UNKNOWN] = DT_UNKNOWN,
- [EXT2_FT_REG_FILE] = DT_REG,
- [EXT2_FT_DIR] = DT_DIR,
- [EXT2_FT_CHRDEV] = DT_CHR,
- [EXT2_FT_BLKDEV] = DT_BLK,
- [EXT2_FT_FIFO] = DT_FIFO,
- [EXT2_FT_SOCK] = DT_SOCK,
- [EXT2_FT_SYMLINK] = DT_LNK,
-};
-
-#define S_SHIFT 12
-static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
- [S_IFREG >> S_SHIFT] = EXT2_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = EXT2_FT_DIR,
- [S_IFCHR >> S_SHIFT] = EXT2_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = EXT2_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = EXT2_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = EXT2_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = EXT2_FT_SYMLINK,
-};
-
static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
{
- umode_t mode = inode->i_mode;
if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
- de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
else
de->file_type = 0;
}
@@ -293,14 +270,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
- unsigned char *types = NULL;
bool need_revalidate = !inode_eq_iversion(inode, file->f_version);
+ bool has_filetype;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
return 0;
- if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
- types = ext2_filetype_table;
+ has_filetype =
+ EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE);
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
@@ -335,8 +312,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
if (de->inode) {
unsigned char d_type = DT_UNKNOWN;
- if (types && de->file_type < EXT2_FT_MAX)
- d_type = types[de->file_type];
+ if (has_filetype)
+ d_type = fs_ftype_to_dtype(de->file_type);
if (!dir_emit(ctx, de->name, de->name_len,
le32_to_cpu(de->inode),
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index e770cd100a6a..10ab238de9a6 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -604,22 +604,6 @@ struct ext2_dir_entry_2 {
};
/*
- * Ext2 directory file types. Only the low 3 bits are used. The
- * other bits are reserved for now.
- */
-enum {
- EXT2_FT_UNKNOWN = 0,
- EXT2_FT_REG_FILE = 1,
- EXT2_FT_DIR = 2,
- EXT2_FT_CHRDEV = 3,
- EXT2_FT_BLKDEV = 4,
- EXT2_FT_FIFO = 5,
- EXT2_FT_SOCK = 6,
- EXT2_FT_SYMLINK = 7,
- EXT2_FT_MAX
-};
-
-/*
* EXT2_DIR_PAD defines the directory entries boundaries
*
* NOTE: It must be a multiple of 4
@@ -774,6 +758,7 @@ extern int ext2_write_inode (struct inode *, struct writeback_control *);
extern void ext2_evict_inode(struct inode *);
extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern int ext2_setattr (struct dentry *, struct iattr *);
+extern int ext2_getattr (const struct path *, struct kstat *, u32, unsigned int);
extern void ext2_set_inode_flags(struct inode *inode);
extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 28b2609f25c1..39c4772e96c9 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -199,6 +199,7 @@ const struct inode_operations ext2_file_inode_operations = {
#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
#endif
+ .getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
.set_acl = ext2_set_acl,
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 5c3d7b7e4975..a0c5ea91fcd4 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -222,8 +222,6 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
best_desc = desc;
}
}
- if (!best_desc)
- return -1;
return best_group;
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index e4bb9386c045..c27c27300d95 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -717,7 +717,7 @@ static int ext2_get_blocks(struct inode *inode,
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
/*
- * Next look up the indirect map to count the totoal number of
+ * Next look up the indirect map to count the total number of
* direct blocks to allocate for this branch.
*/
count = ext2_blks_to_allocate(partial, indirect_blks,
@@ -1239,6 +1239,7 @@ do_indirects:
mark_inode_dirty(inode);
ext2_free_branches(inode, &nr, &nr+1, 1);
}
+ /* fall through */
case EXT2_IND_BLOCK:
nr = i_data[EXT2_DIND_BLOCK];
if (nr) {
@@ -1246,6 +1247,7 @@ do_indirects:
mark_inode_dirty(inode);
ext2_free_branches(inode, &nr, &nr+1, 2);
}
+ /* fall through */
case EXT2_DIND_BLOCK:
nr = i_data[EXT2_TIND_BLOCK];
if (nr) {
@@ -1635,6 +1637,32 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
+int ext2_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_falgs)
+{
+ struct inode *inode = d_inode(path->dentry);
+ struct ext2_inode_info *ei = EXT2_I(inode);
+ unsigned int flags;
+
+ flags = ei->i_flags & EXT2_FL_USER_VISIBLE;
+ if (flags & EXT2_APPEND_FL)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (flags & EXT2_COMPR_FL)
+ stat->attributes |= STATX_ATTR_COMPRESSED;
+ if (flags & EXT2_IMMUTABLE_FL)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ if (flags & EXT2_NODUMP_FL)
+ stat->attributes |= STATX_ATTR_NODUMP;
+ stat->attributes_mask |= (STATX_ATTR_APPEND |
+ STATX_ATTR_COMPRESSED |
+ STATX_ATTR_ENCRYPTED |
+ STATX_ATTR_IMMUTABLE |
+ STATX_ATTR_NODUMP);
+
+ generic_fillattr(inode, stat);
+ return 0;
+}
+
int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 0c26dcc5d850..ccfbbf59e2fc 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -416,6 +416,7 @@ const struct inode_operations ext2_dir_inode_operations = {
#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
#endif
+ .getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
.set_acl = ext2_set_acl,
@@ -426,6 +427,7 @@ const struct inode_operations ext2_special_inode_operations = {
#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
#endif
+ .getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
.set_acl = ext2_set_acl,
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 73b2d528237f..0128010a0874 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -757,7 +757,8 @@ static loff_t ext2_max_size(int bits)
{
loff_t res = EXT2_NDIR_BLOCKS;
int meta_blocks;
- loff_t upper_limit;
+ unsigned int upper_limit;
+ unsigned int ppb = 1 << (bits-2);
/* This is calculated to be the largest file size for a
* dense, file such that the total number of
@@ -771,24 +772,34 @@ static loff_t ext2_max_size(int bits)
/* total blocks in file system block size */
upper_limit >>= (bits - 9);
+ /* Compute how many blocks we can address by block tree */
+ res += 1LL << (bits-2);
+ res += 1LL << (2*(bits-2));
+ res += 1LL << (3*(bits-2));
+ /* Does block tree limit file size? */
+ if (res < upper_limit)
+ goto check_lfs;
+ res = upper_limit;
+ /* How many metadata blocks are needed for addressing upper_limit? */
+ upper_limit -= EXT2_NDIR_BLOCKS;
/* indirect blocks */
meta_blocks = 1;
+ upper_limit -= ppb;
/* double indirect blocks */
- meta_blocks += 1 + (1LL << (bits-2));
- /* tripple indirect blocks */
- meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
-
- upper_limit -= meta_blocks;
- upper_limit <<= bits;
-
- res += 1LL << (bits-2);
- res += 1LL << (2*(bits-2));
- res += 1LL << (3*(bits-2));
+ if (upper_limit < ppb * ppb) {
+ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
+ res -= meta_blocks;
+ goto check_lfs;
+ }
+ meta_blocks += 1 + ppb;
+ upper_limit -= ppb * ppb;
+ /* tripple indirect blocks for the rest */
+ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
+ DIV_ROUND_UP(upper_limit, ppb*ppb);
+ res -= meta_blocks;
+check_lfs:
res <<= bits;
- if (res > upper_limit)
- res = upper_limit;
-
if (res > MAX_LFS_FILESIZE)
res = MAX_LFS_FILESIZE;
@@ -1024,8 +1035,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
- if (EXT2_INODE_SIZE(sb) == 0)
- goto cantfind_ext2;
sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
goto cantfind_ext2;
@@ -1087,12 +1096,14 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
+ ret = -ENOMEM;
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock);
sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
if (!sbi->s_debts) {
+ ret = -ENOMEM;
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount_group_desc;
}
@@ -1148,6 +1159,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_EXT2_FS_XATTR
sbi->s_ea_block_cache = ext2_xattr_create_cache();
if (!sbi->s_ea_block_cache) {
+ ret = -ENOMEM;
ext2_msg(sb, KERN_ERR, "Failed to create ea_block_cache");
goto failed_mount3;
}
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index d5589ddcc281..00cdb8679486 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -23,6 +23,7 @@
const struct inode_operations ext2_symlink_inode_operations = {
.get_link = page_get_link,
+ .getattr = ext2_getattr,
.setattr = ext2_setattr,
#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
@@ -31,6 +32,7 @@ const struct inode_operations ext2_symlink_inode_operations = {
const struct inode_operations ext2_fast_symlink_inode_operations = {
.get_link = simple_get_link,
+ .getattr = ext2_getattr,
.setattr = ext2_setattr,
#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 4f30876ee325..1e33e0ac8cf1 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -342,6 +342,7 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
return;
spin_lock(&EXT2_SB(sb)->s_lock);
+ ext2_update_dynamic_rev(sb);
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
spin_unlock(&EXT2_SB(sb)->s_lock);
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 712f00995390..5508baa11bb6 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
- ret = file_write_and_wait_range(file, start, end);
- if (ret)
- return ret;
-
if (!journal) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL
- };
-
- ret = ext4_write_inode(inode, &wbc);
+ ret = __generic_file_fsync(file, start, end, datasync);
if (!ret)
ret = ext4_sync_parent(inode);
if (test_opt(inode->i_sb, BARRIER))
@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
+ ret = file_write_and_wait_range(file, start, end);
+ if (ret)
+ return ret;
/*
* data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index ebcc121920ba..fd7f170e2f2d 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -506,30 +506,16 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
kvfree(si);
}
-int __init f2fs_create_root_stats(void)
+void __init f2fs_create_root_stats(void)
{
- struct dentry *file;
-
f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
- if (!f2fs_debugfs_root)
- return -ENOMEM;
- file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
- NULL, &stat_fops);
- if (!file) {
- debugfs_remove(f2fs_debugfs_root);
- f2fs_debugfs_root = NULL;
- return -ENOMEM;
- }
-
- return 0;
+ debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root, NULL,
+ &stat_fops);
}
void f2fs_destroy_root_stats(void)
{
- if (!f2fs_debugfs_root)
- return;
-
debugfs_remove_recursive(f2fs_debugfs_root);
f2fs_debugfs_root = NULL;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 12fabd6735dd..8f23ee6e8eb9 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3328,7 +3328,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
int f2fs_build_stats(struct f2fs_sb_info *sbi);
void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
-int __init f2fs_create_root_stats(void);
+void __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
#else
#define stat_inc_cp_count(si) do { } while (0)
@@ -3366,7 +3366,7 @@ void f2fs_destroy_root_stats(void);
static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline int __init f2fs_create_root_stats(void) { return 0; }
+static inline void __init f2fs_create_root_stats(void) { }
static inline void f2fs_destroy_root_stats(void) { }
#endif
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index c46a1d4318d4..3d3ce9eb6d13 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -3545,9 +3545,7 @@ static int __init init_f2fs_fs(void)
err = register_filesystem(&f2fs_fs_type);
if (err)
goto free_shrinker;
- err = f2fs_create_root_stats();
- if (err)
- goto free_filesystem;
+ f2fs_create_root_stats();
err = f2fs_init_post_read_processing();
if (err)
goto free_root_stats;
@@ -3555,7 +3553,6 @@ static int __init init_f2fs_fs(void)
free_root_stats:
f2fs_destroy_root_stats();
-free_filesystem:
unregister_filesystem(&f2fs_fs_type);
free_shrinker:
unregister_shrinker(&f2fs_shrinker_info);
diff --git a/fs/file.c b/fs/file.c
index 3209ee271c41..a10487aa0a84 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -457,6 +457,7 @@ struct files_struct init_files = {
.full_fds_bits = init_files.full_fds_bits_init,
},
.file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
+ .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
};
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b40168fcc94a..36855c1f8daf 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
struct work_struct work;
};
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+ down_write(&bdi->wb_switch_rwsem);
+}
+
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+ up_write(&bdi->wb_switch_rwsem);
+}
+
static void inode_switch_wbs_work_fn(struct work_struct *work)
{
struct inode_switch_wbs_context *isw =
container_of(work, struct inode_switch_wbs_context, work);
struct inode *inode = isw->inode;
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
bool switched = false;
/*
+ * If @inode switches cgwb membership while sync_inodes_sb() is
+ * being issued, sync_inodes_sb() might miss it. Synchronize.
+ */
+ down_read(&bdi->wb_switch_rwsem);
+
+ /*
* By the time control reaches here, RCU grace period has passed
* since I_WB_SWITCH assertion and all wb stat update transactions
* between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -428,6 +445,8 @@ skip_switch:
spin_unlock(&new_wb->list_lock);
spin_unlock(&old_wb->list_lock);
+ up_read(&bdi->wb_switch_rwsem);
+
if (switched) {
wb_wakeup(new_wb);
wb_put(old_wb);
@@ -468,9 +487,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (inode->i_state & I_WB_SWITCH)
return;
+ /*
+ * Avoid starting new switches while sync_inodes_sb() is in
+ * progress. Otherwise, if the down_write protected issue path
+ * blocks heavily, we might end up starting a large number of
+ * switches which will block on the rwsem.
+ */
+ if (!down_read_trylock(&bdi->wb_switch_rwsem))
+ return;
+
isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
if (!isw)
- return;
+ goto out_unlock;
/* find and pin the new wb */
rcu_read_lock();
@@ -504,12 +532,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
- return;
+ goto out_unlock;
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
+out_unlock:
+ up_read(&bdi->wb_switch_rwsem);
}
/**
@@ -887,6 +917,9 @@ fs_initcall(cgroup_writeback_init);
#else /* CONFIG_CGROUP_WRITEBACK */
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
__releases(&inode->i_lock)
@@ -2413,8 +2446,11 @@ void sync_inodes_sb(struct super_block *sb)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
+ /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
+ bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false);
wb_wait_for_completion(bdi, &done);
+ bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb);
}
diff --git a/fs/fs_types.c b/fs/fs_types.c
new file mode 100644
index 000000000000..78365e5dc08c
--- /dev/null
+++ b/fs/fs_types.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/fs.h>
+#include <linux/export.h>
+
+/*
+ * fs on-disk file type to dirent file type conversion
+ */
+static const unsigned char fs_dtype_by_ftype[FT_MAX] = {
+ [FT_UNKNOWN] = DT_UNKNOWN,
+ [FT_REG_FILE] = DT_REG,
+ [FT_DIR] = DT_DIR,
+ [FT_CHRDEV] = DT_CHR,
+ [FT_BLKDEV] = DT_BLK,
+ [FT_FIFO] = DT_FIFO,
+ [FT_SOCK] = DT_SOCK,
+ [FT_SYMLINK] = DT_LNK
+};
+
+/**
+ * fs_ftype_to_dtype() - fs on-disk file type to dirent type.
+ * @filetype: The on-disk file type to convert.
+ *
+ * This function converts the on-disk file type value (FT_*) to the directory
+ * entry type (DT_*).
+ *
+ * Context: Any context.
+ * Return:
+ * * DT_UNKNOWN - Unknown type
+ * * DT_FIFO - FIFO
+ * * DT_CHR - Character device
+ * * DT_DIR - Directory
+ * * DT_BLK - Block device
+ * * DT_REG - Regular file
+ * * DT_LNK - Symbolic link
+ * * DT_SOCK - Local-domain socket
+ */
+unsigned char fs_ftype_to_dtype(unsigned int filetype)
+{
+ if (filetype >= FT_MAX)
+ return DT_UNKNOWN;
+
+ return fs_dtype_by_ftype[filetype];
+}
+EXPORT_SYMBOL_GPL(fs_ftype_to_dtype);
+
+/*
+ * dirent file type to fs on-disk file type conversion
+ * Values not initialized explicitly are FT_UNKNOWN (0).
+ */
+static const unsigned char fs_ftype_by_dtype[DT_MAX] = {
+ [DT_REG] = FT_REG_FILE,
+ [DT_DIR] = FT_DIR,
+ [DT_LNK] = FT_SYMLINK,
+ [DT_CHR] = FT_CHRDEV,
+ [DT_BLK] = FT_BLKDEV,
+ [DT_FIFO] = FT_FIFO,
+ [DT_SOCK] = FT_SOCK,
+};
+
+/**
+ * fs_umode_to_ftype() - file mode to on-disk file type.
+ * @mode: The file mode to convert.
+ *
+ * This function converts the file mode value to the on-disk file type (FT_*).
+ *
+ * Context: Any context.
+ * Return:
+ * * FT_UNKNOWN - Unknown type
+ * * FT_REG_FILE - Regular file
+ * * FT_DIR - Directory
+ * * FT_CHRDEV - Character device
+ * * FT_BLKDEV - Block device
+ * * FT_FIFO - FIFO
+ * * FT_SOCK - Local-domain socket
+ * * FT_SYMLINK - Symbolic link
+ */
+unsigned char fs_umode_to_ftype(umode_t mode)
+{
+ return fs_ftype_by_dtype[S_DT(mode)];
+}
+EXPORT_SYMBOL_GPL(fs_umode_to_ftype);
+
+/**
+ * fs_umode_to_dtype() - file mode to dirent file type.
+ * @mode: The file mode to convert.
+ *
+ * This function converts the file mode value to the directory
+ * entry type (DT_*).
+ *
+ * Context: Any context.
+ * Return:
+ * * DT_UNKNOWN - Unknown type
+ * * DT_FIFO - FIFO
+ * * DT_CHR - Character device
+ * * DT_DIR - Directory
+ * * DT_BLK - Block device
+ * * DT_REG - Regular file
+ * * DT_LNK - Symbolic link
+ * * DT_SOCK - Local-domain socket
+ */
+unsigned char fs_umode_to_dtype(umode_t mode)
+{
+ return fs_ftype_to_dtype(fs_umode_to_ftype(mode));
+}
+EXPORT_SYMBOL_GPL(fs_umode_to_dtype);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a5e516a40e7a..809c0f2f9942 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
req->in.h.nodeid = outarg->nodeid;
req->in.numargs = 2;
req->in.argpages = 1;
- req->page_descs[0].offset = offset;
req->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
req->pages[req->num_pages] = page;
+ req->page_descs[req->num_pages].offset = offset;
req->page_descs[req->num_pages].length = this_num;
req->num_pages++;
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
ret = fuse_dev_do_write(fud, &cs, len);
+ pipe_lock(pipe);
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
+ pipe_unlock(pipe);
out:
kvfree(bufs);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ffaffe18352a..a59c16bd90ac 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
spin_unlock(&fc->lock);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(page, NR_WRITEBACK_TEMP);
+ dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
fuse_writepage_free(fc, new_req);
fuse_request_free(new_req);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 76baaa6be393..c2d4099429be 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
fc->user_ns = get_user_ns(user_ns);
+ fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
}
EXPORT_SYMBOL_GPL(fuse_conn_init);
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->user_id = d.user_id;
fc->group_id = d.group_id;
fc->max_read = max_t(unsigned, 4096, d.max_read);
- fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
/* Used by get_root_inode() */
sb->s_fs_info = fc;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index f15b4c57c4bd..78510ab91835 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -28,7 +28,6 @@
#include "util.h"
#include "trans.h"
#include "dir.h"
-#include "lops.h"
struct workqueue_struct *gfs2_freeze_wq;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 5bfaf381921a..b8830fda51e8 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -733,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
lh->lh_crc = cpu_to_be32(crc);
gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
- gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
+ gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, op_flags);
log_flush_wait(sdp);
}
@@ -810,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
gfs2_ordered_write(sdp);
lops_before_commit(sdp, tr);
- gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
+ gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, 0);
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
log_flush_wait(sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 94dcab655bc0..2295042bc625 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -17,9 +17,7 @@
#include <linux/bio.h>
#include <linux/fs.h>
#include <linux/list_sort.h>
-#include <linux/blkdev.h>
-#include "bmap.h"
#include "dir.h"
#include "gfs2.h"
#include "incore.h"
@@ -195,6 +193,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
/**
* gfs2_end_log_write - end of i/o to the log
* @bio: The bio
+ * @error: Status of i/o request
*
* Each bio_vec contains either data from the pagecache or data
* relating to the log itself. Here we iterate over the bio_vec
@@ -231,19 +230,20 @@ static void gfs2_end_log_write(struct bio *bio)
/**
* gfs2_log_submit_bio - Submit any pending log bio
* @biop: Address of the bio pointer
- * @opf: REQ_OP | op_flags
+ * @op: REQ_OP
+ * @op_flags: req_flag_bits
*
* Submit any pending part-built or full bio to the block device. If
* there is no pending bio, then this is a no-op.
*/
-void gfs2_log_submit_bio(struct bio **biop, int opf)
+void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags)
{
struct bio *bio = *biop;
if (bio) {
struct gfs2_sbd *sdp = bio->bi_private;
atomic_inc(&sdp->sd_log_in_flight);
- bio->bi_opf = opf;
+ bio_set_op_attrs(bio, op, op_flags);
submit_bio(bio);
*biop = NULL;
}
@@ -304,7 +304,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
nblk >>= sdp->sd_fsb2bb_shift;
if (blkno == nblk && !flush)
return bio;
- gfs2_log_submit_bio(biop, op);
+ gfs2_log_submit_bio(biop, op, 0);
}
*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
@@ -375,184 +375,6 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
gfs2_log_bmap(sdp));
}
-/**
- * gfs2_end_log_read - end I/O callback for reads from the log
- * @bio: The bio
- *
- * Simply unlock the pages in the bio. The main thread will wait on them and
- * process them in order as necessary.
- */
-
-static void gfs2_end_log_read(struct bio *bio)
-{
- struct page *page;
- struct bio_vec *bvec;
- int i;
-
- bio_for_each_segment_all(bvec, bio, i) {
- page = bvec->bv_page;
- if (bio->bi_status) {
- int err = blk_status_to_errno(bio->bi_status);
-
- SetPageError(page);
- mapping_set_error(page->mapping, err);
- }
- unlock_page(page);
- }
-
- bio_put(bio);
-}
-
-/**
- * gfs2_jhead_pg_srch - Look for the journal head in a given page.
- * @jd: The journal descriptor
- * @page: The page to look in
- *
- * Returns: 1 if found, 0 otherwise.
- */
-
-static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head,
- struct page *page)
-{
- struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- struct gfs2_log_header_host uninitialized_var(lh);
- void *kaddr = kmap_atomic(page);
- unsigned int offset;
- bool ret = false;
-
- for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
- if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
- if (lh.lh_sequence > head->lh_sequence)
- *head = lh;
- else {
- ret = true;
- break;
- }
- }
- }
- kunmap_atomic(kaddr);
- return ret;
-}
-
-/**
- * gfs2_jhead_process_page - Search/cleanup a page
- * @jd: The journal descriptor
- * @index: Index of the page to look into
- * @done: If set, perform only cleanup, else search and set if found.
- *
- * Find the page with 'index' in the journal's mapping. Search the page for
- * the journal head if requested (cleanup == false). Release refs on the
- * page so the page cache can reclaim it (put_page() twice). We grabbed a
- * reference on this page two times, first when we did a find_or_create_page()
- * to obtain the page to add it to the bio and second when we do a
- * find_get_page() here to get the page to wait on while I/O on it is being
- * completed.
- * This function is also used to free up a page we might've grabbed but not
- * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
- * submitted the I/O, but we already found the jhead so we only need to drop
- * our references to the page.
- */
-
-static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
- struct gfs2_log_header_host *head,
- bool *done)
-{
- struct page *page;
-
- page = find_get_page(jd->jd_inode->i_mapping, index);
- wait_on_page_locked(page);
-
- if (PageError(page))
- *done = true;
-
- if (!*done)
- *done = gfs2_jhead_pg_srch(jd, head, page);
-
- put_page(page); /* Once for find_get_page */
- put_page(page); /* Once more for find_or_create_page */
-}
-
-/**
- * gfs2_find_jhead - find the head of a log
- * @jd: The journal descriptor
- * @head: The log descriptor for the head of the log is returned here
- *
- * Do a search of a journal by reading it in large chunks using bios and find
- * the valid log entry with the highest sequence number. (i.e. the log head)
- *
- * Returns: 0 on success, errno otherwise
- */
-
-int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
-{
- struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- struct address_space *mapping = jd->jd_inode->i_mapping;
- struct gfs2_journal_extent *je;
- u32 block, read_idx = 0, submit_idx = 0, index = 0;
- int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
- int blocks_per_page = 1 << shift, sz, ret = 0;
- struct bio *bio = NULL;
- struct page *page;
- bool done = false;
- errseq_t since;
-
- memset(head, 0, sizeof(*head));
- if (list_empty(&jd->extent_list))
- gfs2_map_journal_extents(sdp, jd);
-
- since = filemap_sample_wb_err(mapping);
- list_for_each_entry(je, &jd->extent_list, list) {
- for (block = 0; block < je->blocks; block += blocks_per_page) {
- index = (je->lblock + block) >> shift;
-
- page = find_or_create_page(mapping, index, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
- done = true;
- goto out;
- }
-
- if (bio) {
- sz = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (sz == PAGE_SIZE)
- goto page_added;
- submit_idx = index;
- submit_bio(bio);
- bio = NULL;
- }
-
- bio = gfs2_log_alloc_bio(sdp,
- je->dblock + (index << shift),
- gfs2_end_log_read);
- bio->bi_opf = REQ_OP_READ;
- sz = bio_add_page(bio, page, PAGE_SIZE, 0);
- gfs2_assert_warn(sdp, sz == PAGE_SIZE);
-
-page_added:
- if (submit_idx <= read_idx + BIO_MAX_PAGES) {
- /* Keep at least one bio in flight */
- continue;
- }
-
- gfs2_jhead_process_page(jd, read_idx++, head, &done);
- if (done)
- goto out; /* found */
- }
- }
-
-out:
- if (bio)
- submit_bio(bio);
- while (read_idx <= index)
- gfs2_jhead_process_page(jd, read_idx++, head, &done);
-
- if (!ret)
- ret = filemap_check_wb_err(mapping, since);
-
- return ret;
-}
-
static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
u32 ld_length, u32 ld_data1)
{
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 331160fc568b..711c4d89c063 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -30,10 +30,8 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
unsigned size, unsigned offset, u64 blkno);
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
-extern void gfs2_log_submit_bio(struct bio **biop, int opf);
+extern void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
-extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1179763f6370..b041cb8ae383 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -41,7 +41,6 @@
#include "dir.h"
#include "meta_io.h"
#include "trace_gfs2.h"
-#include "lops.h"
#define DO 0
#define UNDO 1
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 7389e445a7a7..2dac43065382 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -182,6 +182,129 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
}
/**
+ * find_good_lh - find a good log header
+ * @jd: the journal
+ * @blk: the segment to start searching from
+ * @lh: the log header to fill in
+ * @forward: if true search forward in the log, else search backward
+ *
+ * Call get_log_header() to get a log header for a segment, but if the
+ * segment is bad, either scan forward or backward until we find a good one.
+ *
+ * Returns: errno
+ */
+
+static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
+ struct gfs2_log_header_host *head)
+{
+ unsigned int orig_blk = *blk;
+ int error;
+
+ for (;;) {
+ error = get_log_header(jd, *blk, head);
+ if (error <= 0)
+ return error;
+
+ if (++*blk == jd->jd_blocks)
+ *blk = 0;
+
+ if (*blk == orig_blk) {
+ gfs2_consist_inode(GFS2_I(jd->jd_inode));
+ return -EIO;
+ }
+ }
+}
+
+/**
+ * jhead_scan - make sure we've found the head of the log
+ * @jd: the journal
+ * @head: this is filled in with the log descriptor of the head
+ *
+ * At this point, seg and lh should be either the head of the log or just
+ * before. Scan forward until we find the head.
+ *
+ * Returns: errno
+ */
+
+static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
+{
+ unsigned int blk = head->lh_blkno;
+ struct gfs2_log_header_host lh;
+ int error;
+
+ for (;;) {
+ if (++blk == jd->jd_blocks)
+ blk = 0;
+
+ error = get_log_header(jd, blk, &lh);
+ if (error < 0)
+ return error;
+ if (error == 1)
+ continue;
+
+ if (lh.lh_sequence == head->lh_sequence) {
+ gfs2_consist_inode(GFS2_I(jd->jd_inode));
+ return -EIO;
+ }
+ if (lh.lh_sequence < head->lh_sequence)
+ break;
+
+ *head = lh;
+ }
+
+ return 0;
+}
+
+/**
+ * gfs2_find_jhead - find the head of a log
+ * @jd: the journal
+ * @head: the log descriptor for the head of the log is returned here
+ *
+ * Do a binary search of a journal and find the valid log entry with the
+ * highest sequence number. (i.e. the log head)
+ *
+ * Returns: errno
+ */
+
+int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
+{
+ struct gfs2_log_header_host lh_1, lh_m;
+ u32 blk_1, blk_2, blk_m;
+ int error;
+
+ blk_1 = 0;
+ blk_2 = jd->jd_blocks - 1;
+
+ for (;;) {
+ blk_m = (blk_1 + blk_2) / 2;
+
+ error = find_good_lh(jd, &blk_1, &lh_1);
+ if (error)
+ return error;
+
+ error = find_good_lh(jd, &blk_m, &lh_m);
+ if (error)
+ return error;
+
+ if (blk_1 == blk_m || blk_m == blk_2)
+ break;
+
+ if (lh_1.lh_sequence <= lh_m.lh_sequence)
+ blk_1 = blk_m;
+ else
+ blk_2 = blk_m;
+ }
+
+ error = jhead_scan(jd, &lh_1);
+ if (error)
+ return error;
+
+ *head = lh_1;
+
+ return error;
+}
+
+/**
* foreach_descriptor - go through the active part of the log
* @jd: the journal
* @start: the first log header in the active region
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 99575ab81202..11d81248be85 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -27,6 +27,8 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
+extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head);
extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
extern void gfs2_recover_func(struct work_struct *work);
extern int __get_log_header(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 831d7cb5a49c..17a8d3b43990 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
goto next_iter;
}
if (ret == -E2BIG) {
- n += rbm->bii - initial_bii;
rbm->bii = 0;
rbm->offset = 0;
+ n += (rbm->bii - initial_bii);
goto res_covered_end_of_rgrp;
}
return ret;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index d4b11c903971..ca71163ff7cf 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -45,7 +45,6 @@
#include "util.h"
#include "sys.h"
#include "xattr.h"
-#include "lops.h"
#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a2fcea5f8225..b0eef008de67 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -383,16 +383,17 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
* truncation is indicated by end of range being LLONG_MAX
* In this case, we first scan the range and release found pages.
* After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
- * maps and global counts.
+ * maps and global counts. Page faults can not race with truncation
+ * in this routine. hugetlb_no_page() prevents page faults in the
+ * truncated range. It checks i_size before allocation, and again after
+ * with the page table lock for the page held. The same lock must be
+ * acquired to unmap a page.
* hole punch is indicated if end is not LLONG_MAX
* In the hole punch case we scan the range and release found pages.
* Only when releasing a page is the associated region/reserv map
* deleted. The region/reserv map for ranges without associated
- * pages are not modified.
- *
- * Callers of this routine must hold the i_mmap_rwsem in write mode to prevent
- * races with page faults.
- *
+ * pages are not modified. Page faults can race with hole punch.
+ * This is indicated if we find a mapped page.
* Note: If the passed end of range value is beyond the end of file, but
* not LLONG_MAX this routine still performs a hole punch operation.
*/
@@ -422,14 +423,32 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i];
+ u32 hash;
index = page->index;
+ hash = hugetlb_fault_mutex_hash(h, current->mm,
+ &pseudo_vma,
+ mapping, index, 0);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
+
/*
- * A mapped page is impossible as callers should unmap
- * all references before calling. And, i_mmap_rwsem
- * prevents the creation of additional mappings.
+ * If page is mapped, it was faulted in after being
+ * unmapped in caller. Unmap (again) now after taking
+ * the fault mutex. The mutex will prevent faults
+ * until we finish removing the page.
+ *
+ * This race can only happen in the hole punch case.
+ * Getting here in a truncate operation is a bug.
*/
- VM_BUG_ON(page_mapped(page));
+ if (unlikely(page_mapped(page))) {
+ BUG_ON(truncate_op);
+
+ i_mmap_lock_write(mapping);
+ hugetlb_vmdelete_list(&mapping->i_mmap,
+ index * pages_per_huge_page(h),
+ (index + 1) * pages_per_huge_page(h));
+ i_mmap_unlock_write(mapping);
+ }
lock_page(page);
/*
@@ -451,6 +470,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
}
unlock_page(page);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
huge_pagevec_release(&pvec);
cond_resched();
@@ -462,20 +482,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
static void hugetlbfs_evict_inode(struct inode *inode)
{
- struct address_space *mapping = inode->i_mapping;
struct resv_map *resv_map;
- /*
- * The vfs layer guarantees that there are no other users of this
- * inode. Therefore, it would be safe to call remove_inode_hugepages
- * without holding i_mmap_rwsem. We acquire and hold here to be
- * consistent with other callers. Since there will be no contention
- * on the semaphore, overhead is negligible.
- */
- i_mmap_lock_write(mapping);
remove_inode_hugepages(inode, 0, LLONG_MAX);
- i_mmap_unlock_write(mapping);
-
resv_map = (struct resv_map *)inode->i_mapping->private_data;
/* root inode doesn't have the resv_map, so we should check it */
if (resv_map)
@@ -496,8 +505,8 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
- remove_inode_hugepages(inode, offset, LLONG_MAX);
i_mmap_unlock_write(mapping);
+ remove_inode_hugepages(inode, offset, LLONG_MAX);
return 0;
}
@@ -521,7 +530,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
inode_lock(inode);
/* protected by i_mutex */
- if (info->seals & F_SEAL_WRITE) {
+ if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
inode_unlock(inode);
return -EPERM;
}
@@ -531,8 +540,8 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
hugetlb_vmdelete_list(&mapping->i_mmap,
hole_start >> PAGE_SHIFT,
hole_end >> PAGE_SHIFT);
- remove_inode_hugepages(inode, hole_start, hole_end);
i_mmap_unlock_write(mapping);
+ remove_inode_hugepages(inode, hole_start, hole_end);
inode_unlock(inode);
}
@@ -615,11 +624,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
/* addr is the offset within the file (zero based) */
addr = index * hpage_size;
- /*
- * fault mutex taken here, protects against fault path
- * and hole punch. inode_lock previously taken protects
- * against truncation.
- */
+ /* mutex taken here, fault path and hole punch */
hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
index, addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -854,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
+
+ /*
+ * page_private is subpool pointer in hugetlb pages. Transfer to
+ * new page. PagePrivate is not associated with page_private for
+ * hugetlb pages and can not be set here as only page_huge_active
+ * pages can be migrated.
+ */
+ if (page_private(page)) {
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ }
+
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
diff --git a/fs/inode.c b/fs/inode.c
index 0cd47fe0dbe5..e9d97add2b36 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
return LRU_REMOVED;
}
- /*
- * Recently referenced inodes and inodes with many attached pages
- * get one more pass.
- */
- if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
+ /* recently referenced inodes get one more pass */
+ if (inode->i_state & I_REFERENCED) {
inode->i_state &= ~I_REFERENCED;
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
@@ -2096,14 +2093,8 @@ EXPORT_SYMBOL(inode_dio_wait);
void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask)
{
- unsigned int old_flags, new_flags;
-
WARN_ON_ONCE(flags & ~mask);
- do {
- old_flags = READ_ONCE(inode->i_flags);
- new_flags = (old_flags & ~mask) | flags;
- } while (unlikely(cmpxchg(&inode->i_flags, old_flags,
- new_flags) != old_flags));
+ set_mask_bits(&inode->i_flags, mask, flags);
}
EXPORT_SYMBOL(inode_set_flags);
diff --git a/fs/iomap.c b/fs/iomap.c
index a3088fae567b..897c60215dd1 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
atomic_set(&iop->read_count, 0);
atomic_set(&iop->write_count, 0);
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+
+ /*
+ * migrate_page_move_mapping() assumes that pages with private data have
+ * their count elevated by 1.
+ */
+ get_page(page);
set_page_private(page, (unsigned long)iop);
SetPagePrivate(page);
return iop;
@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
WARN_ON_ONCE(atomic_read(&iop->write_count));
ClearPagePrivate(page);
set_page_private(page, 0);
+ put_page(page);
kfree(iop);
}
@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
if (page_has_private(page)) {
ClearPagePrivate(page);
+ get_page(newpage);
set_page_private(newpage, page_private(page));
set_page_private(page, 0);
+ put_page(page);
SetPagePrivate(newpage);
}
@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos = iocb->ki_pos, start = pos;
loff_t end = iocb->ki_pos + count - 1, ret = 0;
unsigned int flags = IOMAP_DIRECT;
+ bool wait_for_completion = is_sync_kiocb(iocb);
struct blk_plug plug;
struct iomap_dio *dio;
@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->end_io = end_io;
dio->error = 0;
dio->flags = 0;
- dio->wait_for_completion = is_sync_kiocb(iocb);
dio->submit.iter = iter;
dio->submit.waiter = current;
@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
- if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
+ if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
!inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0)
@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (ret <= 0) {
/* magic error code to fall back to buffered I/O */
if (ret == -ENOTBLK) {
- dio->wait_for_completion = true;
+ wait_for_completion = true;
ret = 0;
}
break;
@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (dio->flags & IOMAP_DIO_WRITE_FUA)
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
+ /*
+ * We are about to drop our additional submission reference, which
+ * might be the last reference to the dio. There are three three
+ * different ways we can progress here:
+ *
+ * (a) If this is the last reference we will always complete and free
+ * the dio ourselves.
+ * (b) If this is not the last reference, and we serve an asynchronous
+ * iocb, we must never touch the dio after the decrement, the
+ * I/O completion handler will complete and free it.
+ * (c) If this is not the last reference, but we serve a synchronous
+ * iocb, the I/O completion handler will wake us up on the drop
+ * of the final reference, and we will complete and free it here
+ * after we got woken by the I/O completion handler.
+ */
+ dio->wait_for_completion = wait_for_completion;
if (!atomic_dec_and_test(&dio->ref)) {
- if (!dio->wait_for_completion)
+ if (!wait_for_completion)
return -EIOCBQUEUED;
for (;;) {
@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
__set_current_state(TASK_RUNNING);
}
- ret = iomap_dio_complete(dio);
-
- return ret;
+ return iomap_dio_complete(dio);
out_free_dio:
kfree(dio);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 4ca0b5c18192..b84d635567d3 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -536,8 +536,8 @@ void kernfs_put(struct kernfs_node *kn)
security_release_secctx(kn->iattr->ia_secdata,
kn->iattr->ia_secdata_len);
simple_xattrs_free(&kn->iattr->xattrs);
+ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
}
- kfree(kn->iattr);
spin_lock(&kernfs_idr_lock);
idr_remove(&root->ino_idr, kn->id.ino);
spin_unlock(&kernfs_idr_lock);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index f8d5021a652e..ae948aaa4c53 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -832,26 +832,35 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
+{
+ struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry);
+ struct kernfs_open_node *on = kn->attr.open;
+
+ poll_wait(of->file, &on->poll, wait);
+
+ if (of->event != atomic_read(&on->event))
+ return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+
+ return DEFAULT_POLLMASK;
+}
+
static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
{
struct kernfs_open_file *of = kernfs_of(filp);
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
- struct kernfs_open_node *on = kn->attr.open;
+ __poll_t ret;
if (!kernfs_get_active(kn))
- goto trigger;
+ return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
- poll_wait(filp, &on->poll, wait);
+ if (kn->attr.ops->poll)
+ ret = kn->attr.ops->poll(of, wait);
+ else
+ ret = kernfs_generic_poll(of, wait);
kernfs_put_active(kn);
-
- if (of->event != atomic_read(&on->event))
- goto trigger;
-
- return DEFAULT_POLLMASK;
-
- trigger:
- return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+ return ret;
}
static void kernfs_notify_workfn(struct work_struct *work)
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 80cebcd94c90..0c1fd945ce42 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -42,7 +42,7 @@ static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
if (kn->iattr)
goto out_unlock;
- kn->iattr = kzalloc(sizeof(struct kernfs_iattrs), GFP_KERNEL);
+ kn->iattr = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL);
if (!kn->iattr)
goto out_unlock;
iattrs = &kn->iattr->ia_iattr;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 3d83b114bb08..dba810cd83b1 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -78,7 +78,7 @@ static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry)
}
extern const struct super_operations kernfs_sops;
-extern struct kmem_cache *kernfs_node_cache;
+extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
/*
* inode.c
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index fdf527b6d79c..f3ac352699cf 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -20,7 +20,7 @@
#include "kernfs-internal.h"
-struct kmem_cache *kernfs_node_cache;
+struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
static int kernfs_sop_remount_fs(struct super_block *sb, int *flags, char *data)
{
@@ -196,8 +196,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
return dentry;
knparent = find_next_ancestor(kn, NULL);
- if (WARN_ON(!knparent))
+ if (WARN_ON(!knparent)) {
+ dput(dentry);
return ERR_PTR(-EINVAL);
+ }
do {
struct dentry *dtmp;
@@ -206,8 +208,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
if (kn == knparent)
return dentry;
kntmp = find_next_ancestor(kn, knparent);
- if (WARN_ON(!kntmp))
+ if (WARN_ON(!kntmp)) {
+ dput(dentry);
return ERR_PTR(-EINVAL);
+ }
dtmp = lookup_one_len_unlocked(kntmp->name, dentry,
strlen(kntmp->name));
dput(dentry);
@@ -417,4 +421,9 @@ void __init kernfs_init(void)
0,
SLAB_PANIC | SLAB_TYPESAFE_BY_RCU,
NULL);
+
+ /* Creates slab cache for kernfs inode attributes */
+ kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache",
+ sizeof(struct kernfs_iattrs),
+ 0, SLAB_PANIC, NULL);
}
diff --git a/fs/locks.c b/fs/locks.c
index ff6af2c32601..eaa1cfaf73b0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1058,7 +1058,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
return -ENOMEM;
}
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
@@ -1100,7 +1100,7 @@ find_conflict:
out:
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
@@ -1138,7 +1138,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
new_fl2 = locks_alloc_lock();
}
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
@@ -1312,7 +1312,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
}
out:
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
/*
* Free any unused locks.
*/
@@ -1584,7 +1584,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return error;
}
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
@@ -1636,13 +1636,13 @@ restart:
locks_insert_block(fl, new_fl, leases_conflict);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_blocker, break_time);
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
@@ -1659,7 +1659,7 @@ restart:
}
out:
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
@@ -1729,7 +1729,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
@@ -1739,7 +1739,7 @@ int fcntl_getlease(struct file *filp)
break;
}
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
}
@@ -1813,7 +1813,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL;
}
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg, lease->fl_flags);
@@ -1884,7 +1884,7 @@ out_setup:
lease->fl_lmops->lm_setup(lease, priv);
out:
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
@@ -1907,7 +1907,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
return error;
}
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp &&
@@ -1920,7 +1920,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
return error;
}
@@ -2643,13 +2643,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
if (list_empty(&ctx->flc_lease))
return;
- percpu_down_read_preempt_disable(&file_rwsem);
+ percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
if (filp == fl->fl_file)
lease_modify(fl, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
- percpu_up_read_preempt_enable(&file_rwsem);
+ percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
}
diff --git a/fs/namespace.c b/fs/namespace.c
index e5de0e372df2..98a8c182af4f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2700,7 +2700,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
if (!access_ok(from, n))
return n;
- current->kernel_uaccess_faults_ok++;
while (n) {
if (__get_user(c, f)) {
memset(t, 0, n);
@@ -2710,7 +2709,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
f++;
n--;
}
- current->kernel_uaccess_faults_ok--;
return n;
}
@@ -2748,7 +2746,7 @@ void *copy_mount_options(const void __user * data)
char *copy_mount_string(const void __user *data)
{
- return data ? strndup_user(data, PAGE_SIZE) : NULL;
+ return data ? strndup_user(data, PATH_MAX) : NULL;
}
/*
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 46d691ba04bc..45b2322e092d 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
- ssize_t ret;
-
if (file_inode(file_in) == file_inode(file_out))
return -EINVAL;
-retry:
- ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
- if (ret == -EAGAIN)
- goto retry;
- return ret;
+ return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
}
static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 3f23b6840547..bf34ddaa2ad7 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -44,6 +44,7 @@
#include <linux/keyctl.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
+#include <keys/request_key_auth-type.h>
#include <linux/module.h>
#include "internal.h"
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy;
struct idmap_legacy_upcalldata {
struct rpc_pipe_msg pipe_msg;
struct idmap_msg idmap_msg;
- struct key_construction *key_cons;
+ struct key *authkey;
struct idmap *idmap;
};
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = {
{ Opt_find_err, NULL }
};
-static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
+static int nfs_idmap_legacy_upcall(struct key *, void *);
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
size_t);
static void idmap_release_pipe(struct inode *);
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
static void
nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
{
- struct key_construction *cons = idmap->idmap_upcall_data->key_cons;
+ struct key *authkey = idmap->idmap_upcall_data->authkey;
kfree(idmap->idmap_upcall_data);
idmap->idmap_upcall_data = NULL;
- complete_request_key(cons, ret);
+ complete_request_key(authkey, ret);
+ key_put(authkey);
}
static void
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
}
-static int nfs_idmap_legacy_upcall(struct key_construction *cons,
- const char *op,
- void *aux)
+static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
{
struct idmap_legacy_upcalldata *data;
+ struct request_key_auth *rka = get_request_key_auth(authkey);
struct rpc_pipe_msg *msg;
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
- struct key *key = cons->key;
+ struct key *key = rka->target_key;
int ret = -ENOKEY;
if (!aux)
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
msg = &data->pipe_msg;
im = &data->idmap_msg;
data->idmap = idmap;
- data->key_cons = cons;
+ data->authkey = key_get(authkey);
ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
if (ret < 0)
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
out2:
kfree(data);
out1:
- complete_request_key(cons, ret);
+ complete_request_key(authkey, ret);
return ret;
}
@@ -651,9 +652,10 @@ out:
static ssize_t
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
+ struct request_key_auth *rka;
struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct idmap *idmap = (struct idmap *)rpci->private;
- struct key_construction *cons;
+ struct key *authkey;
struct idmap_msg im;
size_t namelen_in;
int ret = -ENOKEY;
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (idmap->idmap_upcall_data == NULL)
goto out_noupcall;
- cons = idmap->idmap_upcall_data->key_cons;
+ authkey = idmap->idmap_upcall_data->authkey;
+ rka = get_request_key_auth(authkey);
if (mlen != sizeof(im)) {
ret = -ENOSPC;
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
ret = nfs_idmap_read_and_verify_message(&im,
&idmap->idmap_upcall_data->idmap_msg,
- cons->key, cons->authkey);
+ rka->target_key, authkey);
if (ret >= 0) {
- key_set_timeout(cons->key, nfs_idmap_cache_timeout);
+ key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
ret = mlen;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 22ce3c8a2f46..0570391eaa16 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1895,6 +1895,11 @@ static int nfs_parse_devname(const char *dev_name,
size_t len;
char *end;
+ if (unlikely(!dev_name || !*dev_name)) {
+ dfprintk(MOUNT, "NFS: device name not specified\n");
+ return -EINVAL;
+ }
+
/* Is the host name protected with square brakcets? */
if (*dev_name == '[') {
end = strchr(++dev_name, ']');
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5a0bbf917a32..d09c9f878141 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -238,9 +238,9 @@ out:
}
/* A writeback failed: mark the page as bad, and invalidate the page cache */
-static void nfs_set_pageerror(struct page *page)
+static void nfs_set_pageerror(struct address_space *mapping)
{
- nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
+ nfs_zap_mapping(mapping->host, mapping);
}
/*
@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
nfs_set_page_writeback(page);
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
- ret = 0;
+ ret = req->wb_context->error;
/* If there is a fatal error that covers this write, just exit */
- if (nfs_error_is_fatal_on_server(req->wb_context->error))
+ if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
+ ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error;
/*
@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
nfs_context_set_write_error(req->wb_context, ret);
if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
- }
+ } else
+ ret = -EAGAIN;
nfs_redirty_request(req);
- ret = -EAGAIN;
} else
nfs_add_stats(page_file_mapping(page)->host,
NFSIOS_WRITEPAGES, 1);
@@ -993,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
(hdr->good_bytes < bytes)) {
- nfs_set_pageerror(req->wb_page);
+ nfs_set_pageerror(page_file_mapping(req->wb_page));
nfs_context_set_write_error(req->wb_context, hdr->error);
goto remove_req;
}
@@ -1347,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page,
unsigned int offset, unsigned int count)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
- struct inode *inode = page_file_mapping(page)->host;
+ struct address_space *mapping = page_file_mapping(page);
+ struct inode *inode = mapping->host;
int status = 0;
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -1365,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page,
status = nfs_writepage_setup(ctx, page, offset, count);
if (status < 0)
- nfs_set_pageerror(page);
+ nfs_set_pageerror(mapping);
else
__set_page_dirty_nobuffers(page);
out:
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index b33f9785b756..72a7681f4046 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
retval = nfsd_idmap_init(net);
if (retval)
goto out_idmap_error;
- nn->nfsd4_lease = 45; /* default lease time */
- nn->nfsd4_grace = 45;
+ nn->nfsd4_lease = 90; /* default lease time */
+ nn->nfsd4_grace = 90;
nn->somebody_reclaimed = false;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 9824e32b2f23..7dc98e14655d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
loff_t cloned;
cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
+ if (cloned < 0)
+ return nfserrno(cloned);
if (count && cloned != count)
- cloned = -EINVAL;
- return nfserrno(cloned < 0 ? cloned : 0);
+ return nfserrno(-EINVAL);
+ return 0;
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
index 41355ce74ac0..735bfb2e9190 100644
--- a/fs/notify/fanotify/Kconfig
+++ b/fs/notify/fanotify/Kconfig
@@ -2,6 +2,7 @@ config FANOTIFY
bool "Filesystem wide access notification"
select FSNOTIFY
select ANON_INODES
+ select EXPORTFS
default n
---help---
Say Y here to enable fanotify support. fanotify is a file access
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 3723f3d18d20..6b9c27548997 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -13,22 +13,40 @@
#include <linux/wait.h>
#include <linux/audit.h>
#include <linux/sched/mm.h>
+#include <linux/statfs.h>
#include "fanotify.h"
static bool should_merge(struct fsnotify_event *old_fsn,
struct fsnotify_event *new_fsn)
{
- struct fanotify_event_info *old, *new;
+ struct fanotify_event *old, *new;
pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
old = FANOTIFY_E(old_fsn);
new = FANOTIFY_E(new_fsn);
- if (old_fsn->inode == new_fsn->inode && old->pid == new->pid &&
- old->path.mnt == new->path.mnt &&
- old->path.dentry == new->path.dentry)
- return true;
+ if (old_fsn->inode != new_fsn->inode || old->pid != new->pid ||
+ old->fh_type != new->fh_type || old->fh_len != new->fh_len)
+ return false;
+
+ if (fanotify_event_has_path(old)) {
+ return old->path.mnt == new->path.mnt &&
+ old->path.dentry == new->path.dentry;
+ } else if (fanotify_event_has_fid(old)) {
+ /*
+ * We want to merge many dirent events in the same dir (i.e.
+ * creates/unlinks/renames), but we do not want to merge dirent
+ * events referring to subdirs with dirent events referring to
+ * non subdirs, otherwise, user won't be able to tell from a
+ * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+
+ * unlink pair or rmdir+create pair of events.
+ */
+ return (old->mask & FS_ISDIR) == (new->mask & FS_ISDIR) &&
+ fanotify_fid_equal(&old->fid, &new->fid, old->fh_len);
+ }
+
+ /* Do not merge events if we failed to encode fid */
return false;
}
@@ -36,20 +54,22 @@ static bool should_merge(struct fsnotify_event *old_fsn,
static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
{
struct fsnotify_event *test_event;
+ struct fanotify_event *new;
pr_debug("%s: list=%p event=%p\n", __func__, list, event);
+ new = FANOTIFY_E(event);
/*
* Don't merge a permission event with any other event so that we know
* the event structure we have created in fanotify_handle_event() is the
* one we should check for permission response.
*/
- if (fanotify_is_perm_event(event->mask))
+ if (fanotify_is_perm_event(new->mask))
return 0;
list_for_each_entry_reverse(test_event, list, list) {
if (should_merge(test_event, event)) {
- test_event->mask |= event->mask;
+ FANOTIFY_E(test_event)->mask |= new->mask;
return 1;
}
}
@@ -57,15 +77,44 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
return 0;
}
+/*
+ * Wait for response to permission event. The function also takes care of
+ * freeing the permission event (or offloads that in case the wait is canceled
+ * by a signal). The function returns 0 in case access got allowed by userspace,
+ * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case
+ * the wait got interrupted by a signal.
+ */
static int fanotify_get_response(struct fsnotify_group *group,
- struct fanotify_perm_event_info *event,
+ struct fanotify_perm_event *event,
struct fsnotify_iter_info *iter_info)
{
int ret;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
- wait_event(group->fanotify_data.access_waitq, event->response);
+ ret = wait_event_killable(group->fanotify_data.access_waitq,
+ event->state == FAN_EVENT_ANSWERED);
+ /* Signal pending? */
+ if (ret < 0) {
+ spin_lock(&group->notification_lock);
+ /* Event reported to userspace and no answer yet? */
+ if (event->state == FAN_EVENT_REPORTED) {
+ /* Event will get freed once userspace answers to it */
+ event->state = FAN_EVENT_CANCELED;
+ spin_unlock(&group->notification_lock);
+ return ret;
+ }
+ /* Event not yet reported? Just remove it. */
+ if (event->state == FAN_EVENT_INIT)
+ fsnotify_remove_queued_event(group, &event->fae.fse);
+ /*
+ * Event may be also answered in case signal delivery raced
+ * with wakeup. In that case we have nothing to do besides
+ * freeing the event and reporting error.
+ */
+ spin_unlock(&group->notification_lock);
+ goto out;
+ }
/* userspace responded, convert to something usable */
switch (event->response & ~FAN_AUDIT) {
@@ -81,11 +130,11 @@ static int fanotify_get_response(struct fsnotify_group *group,
if (event->response & FAN_AUDIT)
audit_fanotify(event->response & ~FAN_AUDIT);
- event->response = 0;
-
pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
group, event, ret);
-
+out:
+ fsnotify_destroy_event(group, &event->fae.fse);
+
return ret;
}
@@ -95,11 +144,13 @@ static int fanotify_get_response(struct fsnotify_group *group,
* been included within the event mask, but have not been explicitly
* requested by the user, will not be present in the returned mask.
*/
-static u32 fanotify_group_event_mask(struct fsnotify_iter_info *iter_info,
- u32 event_mask, const void *data,
- int data_type)
+static u32 fanotify_group_event_mask(struct fsnotify_group *group,
+ struct fsnotify_iter_info *iter_info,
+ u32 event_mask, const void *data,
+ int data_type)
{
__u32 marks_mask = 0, marks_ignored_mask = 0;
+ __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS;
const struct path *path = data;
struct fsnotify_mark *mark;
int type;
@@ -107,14 +158,14 @@ static u32 fanotify_group_event_mask(struct fsnotify_iter_info *iter_info,
pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
__func__, iter_info->report_mask, event_mask, data, data_type);
- /* If we don't have enough info to send an event to userspace say no */
- if (data_type != FSNOTIFY_EVENT_PATH)
- return 0;
-
- /* Sorry, fanotify only gives a damn about files and dirs */
- if (!d_is_reg(path->dentry) &&
- !d_can_lookup(path->dentry))
- return 0;
+ if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
+ /* Do we have path to open a file descriptor? */
+ if (data_type != FSNOTIFY_EVENT_PATH)
+ return 0;
+ /* Path type events are only relevant for files and dirs */
+ if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry))
+ return 0;
+ }
fsnotify_foreach_obj_type(type) {
if (!fsnotify_iter_should_report_type(iter_info, type))
@@ -133,20 +184,106 @@ static u32 fanotify_group_event_mask(struct fsnotify_iter_info *iter_info,
marks_ignored_mask |= mark->ignored_mask;
}
- if (d_is_dir(path->dentry) &&
+ test_mask = event_mask & marks_mask & ~marks_ignored_mask;
+
+ /*
+ * dirent modification events (create/delete/move) do not carry the
+ * child entry name/inode information. Instead, we report FAN_ONDIR
+ * for mkdir/rmdir so user can differentiate them from creat/unlink.
+ *
+ * For backward compatibility and consistency, do not report FAN_ONDIR
+ * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR
+ * to user in FAN_REPORT_FID mode for all event types.
+ */
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
+ /* Do not report FAN_ONDIR without any event */
+ if (!(test_mask & ~FAN_ONDIR))
+ return 0;
+ } else {
+ user_mask &= ~FAN_ONDIR;
+ }
+
+ if (event_mask & FS_ISDIR &&
!(marks_mask & FS_ISDIR & ~marks_ignored_mask))
return 0;
- return event_mask & FANOTIFY_OUTGOING_EVENTS & marks_mask &
- ~marks_ignored_mask;
+ return test_mask & user_mask;
+}
+
+static int fanotify_encode_fid(struct fanotify_event *event,
+ struct inode *inode, gfp_t gfp,
+ __kernel_fsid_t *fsid)
+{
+ struct fanotify_fid *fid = &event->fid;
+ int dwords, bytes = 0;
+ int err, type;
+
+ fid->ext_fh = NULL;
+ dwords = 0;
+ err = -ENOENT;
+ type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL);
+ if (!dwords)
+ goto out_err;
+
+ bytes = dwords << 2;
+ if (bytes > FANOTIFY_INLINE_FH_LEN) {
+ /* Treat failure to allocate fh as failure to allocate event */
+ err = -ENOMEM;
+ fid->ext_fh = kmalloc(bytes, gfp);
+ if (!fid->ext_fh)
+ goto out_err;
+ }
+
+ type = exportfs_encode_inode_fh(inode, fanotify_fid_fh(fid, bytes),
+ &dwords, NULL);
+ err = -EINVAL;
+ if (!type || type == FILEID_INVALID || bytes != dwords << 2)
+ goto out_err;
+
+ fid->fsid = *fsid;
+ event->fh_len = bytes;
+
+ return type;
+
+out_err:
+ pr_warn_ratelimited("fanotify: failed to encode fid (fsid=%x.%x, "
+ "type=%d, bytes=%d, err=%i)\n",
+ fsid->val[0], fsid->val[1], type, bytes, err);
+ kfree(fid->ext_fh);
+ fid->ext_fh = NULL;
+ event->fh_len = 0;
+
+ return FILEID_INVALID;
}
-struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
- struct inode *inode, u32 mask,
- const struct path *path)
+/*
+ * The inode to use as identifier when reporting fid depends on the event.
+ * Report the modified directory inode on dirent modification events.
+ * Report the "victim" inode otherwise.
+ * For example:
+ * FS_ATTRIB reports the child inode even if reported on a watched parent.
+ * FS_CREATE reports the modified dir inode and not the created inode.
+ */
+static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask,
+ const void *data, int data_type)
{
- struct fanotify_event_info *event = NULL;
+ if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS)
+ return to_tell;
+ else if (data_type == FSNOTIFY_EVENT_INODE)
+ return (struct inode *)data;
+ else if (data_type == FSNOTIFY_EVENT_PATH)
+ return d_inode(((struct path *)data)->dentry);
+ return NULL;
+}
+
+struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
+ struct inode *inode, u32 mask,
+ const void *data, int data_type,
+ __kernel_fsid_t *fsid)
+{
+ struct fanotify_event *event = NULL;
gfp_t gfp = GFP_KERNEL_ACCOUNT;
+ struct inode *id = fanotify_fid_inode(inode, mask, data, data_type);
/*
* For queues with unlimited length lost events are not expected and
@@ -160,28 +297,36 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
memalloc_use_memcg(group->memcg);
if (fanotify_is_perm_event(mask)) {
- struct fanotify_perm_event_info *pevent;
+ struct fanotify_perm_event *pevent;
pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
if (!pevent)
goto out;
event = &pevent->fae;
pevent->response = 0;
+ pevent->state = FAN_EVENT_INIT;
goto init;
}
event = kmem_cache_alloc(fanotify_event_cachep, gfp);
if (!event)
goto out;
init: __maybe_unused
- fsnotify_init_event(&event->fse, inode, mask);
+ fsnotify_init_event(&event->fse, inode);
+ event->mask = mask;
if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
event->pid = get_pid(task_pid(current));
else
event->pid = get_pid(task_tgid(current));
- if (path) {
- event->path = *path;
+ event->fh_len = 0;
+ if (id && FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
+ /* Report the event without a file identifier on encode error */
+ event->fh_type = fanotify_encode_fid(event, id, gfp, fsid);
+ } else if (data_type == FSNOTIFY_EVENT_PATH) {
+ event->fh_type = FILEID_ROOT;
+ event->path = *((struct path *)data);
path_get(&event->path);
} else {
+ event->fh_type = FILEID_INVALID;
event->path.mnt = NULL;
event->path.dentry = NULL;
}
@@ -190,6 +335,29 @@ out:
return event;
}
+/*
+ * Get cached fsid of the filesystem containing the object from any connector.
+ * All connectors are supposed to have the same fsid, but we do not verify that
+ * here.
+ */
+static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
+{
+ int type;
+ __kernel_fsid_t fsid = {};
+
+ fsnotify_foreach_obj_type(type) {
+ if (!fsnotify_iter_should_report_type(iter_info, type))
+ continue;
+
+ fsid = iter_info->marks[type]->connector->fsid;
+ if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
+ continue;
+ return fsid;
+ }
+
+ return fsid;
+}
+
static int fanotify_handle_event(struct fsnotify_group *group,
struct inode *inode,
u32 mask, const void *data, int data_type,
@@ -197,14 +365,22 @@ static int fanotify_handle_event(struct fsnotify_group *group,
struct fsnotify_iter_info *iter_info)
{
int ret = 0;
- struct fanotify_event_info *event;
+ struct fanotify_event *event;
struct fsnotify_event *fsn_event;
+ __kernel_fsid_t fsid = {};
BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
+ BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
+ BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO);
+ BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM);
+ BUILD_BUG_ON(FAN_CREATE != FS_CREATE);
+ BUILD_BUG_ON(FAN_DELETE != FS_DELETE);
+ BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF);
+ BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
@@ -213,9 +389,10 @@ static int fanotify_handle_event(struct fsnotify_group *group,
BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 12);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19);
- mask = fanotify_group_event_mask(iter_info, mask, data, data_type);
+ mask = fanotify_group_event_mask(group, iter_info, mask, data,
+ data_type);
if (!mask)
return 0;
@@ -231,7 +408,11 @@ static int fanotify_handle_event(struct fsnotify_group *group,
return 0;
}
- event = fanotify_alloc_event(group, inode, mask, data);
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_FID))
+ fsid = fanotify_get_fsid(iter_info);
+
+ event = fanotify_alloc_event(group, inode, mask, data, data_type,
+ &fsid);
ret = -ENOMEM;
if (unlikely(!event)) {
/*
@@ -255,7 +436,6 @@ static int fanotify_handle_event(struct fsnotify_group *group,
} else if (fanotify_is_perm_event(mask)) {
ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
iter_info);
- fsnotify_destroy_event(group, fsn_event);
}
finish:
if (fanotify_is_perm_event(mask))
@@ -275,12 +455,15 @@ static void fanotify_free_group_priv(struct fsnotify_group *group)
static void fanotify_free_event(struct fsnotify_event *fsn_event)
{
- struct fanotify_event_info *event;
+ struct fanotify_event *event;
event = FANOTIFY_E(fsn_event);
- path_put(&event->path);
+ if (fanotify_event_has_path(event))
+ path_put(&event->path);
+ else if (fanotify_event_has_ext_fh(event))
+ kfree(event->fid.ext_fh);
put_pid(event->pid);
- if (fanotify_is_perm_event(fsn_event->mask)) {
+ if (fanotify_is_perm_event(event->mask)) {
kmem_cache_free(fanotify_perm_event_cachep,
FANOTIFY_PE(fsn_event));
return;
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index ea05b8a401e7..68b30504284c 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -2,26 +2,112 @@
#include <linux/fsnotify_backend.h>
#include <linux/path.h>
#include <linux/slab.h>
+#include <linux/exportfs.h>
extern struct kmem_cache *fanotify_mark_cache;
extern struct kmem_cache *fanotify_event_cachep;
extern struct kmem_cache *fanotify_perm_event_cachep;
+/* Possible states of the permission event */
+enum {
+ FAN_EVENT_INIT,
+ FAN_EVENT_REPORTED,
+ FAN_EVENT_ANSWERED,
+ FAN_EVENT_CANCELED,
+};
+
+/*
+ * 3 dwords are sufficient for most local fs (64bit ino, 32bit generation).
+ * For 32bit arch, fid increases the size of fanotify_event by 12 bytes and
+ * fh_* fields increase the size of fanotify_event by another 4 bytes.
+ * For 64bit arch, fid increases the size of fanotify_fid by 8 bytes and
+ * fh_* fields are packed in a hole after mask.
+ */
+#if BITS_PER_LONG == 32
+#define FANOTIFY_INLINE_FH_LEN (3 << 2)
+#else
+#define FANOTIFY_INLINE_FH_LEN (4 << 2)
+#endif
+
+struct fanotify_fid {
+ __kernel_fsid_t fsid;
+ union {
+ unsigned char fh[FANOTIFY_INLINE_FH_LEN];
+ unsigned char *ext_fh;
+ };
+};
+
+static inline void *fanotify_fid_fh(struct fanotify_fid *fid,
+ unsigned int fh_len)
+{
+ return fh_len <= FANOTIFY_INLINE_FH_LEN ? fid->fh : fid->ext_fh;
+}
+
+static inline bool fanotify_fid_equal(struct fanotify_fid *fid1,
+ struct fanotify_fid *fid2,
+ unsigned int fh_len)
+{
+ return fid1->fsid.val[0] == fid2->fsid.val[0] &&
+ fid1->fsid.val[1] == fid2->fsid.val[1] &&
+ !memcmp(fanotify_fid_fh(fid1, fh_len),
+ fanotify_fid_fh(fid2, fh_len), fh_len);
+}
+
/*
* Structure for normal fanotify events. It gets allocated in
* fanotify_handle_event() and freed when the information is retrieved by
* userspace
*/
-struct fanotify_event_info {
+struct fanotify_event {
struct fsnotify_event fse;
+ u32 mask;
/*
- * We hold ref to this path so it may be dereferenced at any point
- * during this object's lifetime
+ * Those fields are outside fanotify_fid to pack fanotify_event nicely
+ * on 64bit arch and to use fh_type as an indication of whether path
+ * or fid are used in the union:
+ * FILEID_ROOT (0) for path, > 0 for fid, FILEID_INVALID for neither.
*/
- struct path path;
+ u8 fh_type;
+ u8 fh_len;
+ u16 pad;
+ union {
+ /*
+ * We hold ref to this path so it may be dereferenced at any
+ * point during this object's lifetime
+ */
+ struct path path;
+ /*
+ * With FAN_REPORT_FID, we do not hold any reference on the
+ * victim object. Instead we store its NFS file handle and its
+ * filesystem's fsid as a unique identifier.
+ */
+ struct fanotify_fid fid;
+ };
struct pid *pid;
};
+static inline bool fanotify_event_has_path(struct fanotify_event *event)
+{
+ return event->fh_type == FILEID_ROOT;
+}
+
+static inline bool fanotify_event_has_fid(struct fanotify_event *event)
+{
+ return event->fh_type != FILEID_ROOT &&
+ event->fh_type != FILEID_INVALID;
+}
+
+static inline bool fanotify_event_has_ext_fh(struct fanotify_event *event)
+{
+ return fanotify_event_has_fid(event) &&
+ event->fh_len > FANOTIFY_INLINE_FH_LEN;
+}
+
+static inline void *fanotify_event_fh(struct fanotify_event *event)
+{
+ return fanotify_fid_fh(&event->fid, event->fh_len);
+}
+
/*
* Structure for permission fanotify events. It gets allocated and freed in
* fanotify_handle_event() since we wait there for user response. When the
@@ -29,16 +115,17 @@ struct fanotify_event_info {
* group->notification_list to group->fanotify_data.access_list to wait for
* user response.
*/
-struct fanotify_perm_event_info {
- struct fanotify_event_info fae;
- int response; /* userspace answer to question */
+struct fanotify_perm_event {
+ struct fanotify_event fae;
+ unsigned short response; /* userspace answer to the event */
+ unsigned short state; /* state of the event */
int fd; /* fd we passed to userspace for this event */
};
-static inline struct fanotify_perm_event_info *
+static inline struct fanotify_perm_event *
FANOTIFY_PE(struct fsnotify_event *fse)
{
- return container_of(fse, struct fanotify_perm_event_info, fae.fse);
+ return container_of(fse, struct fanotify_perm_event, fae.fse);
}
static inline bool fanotify_is_perm_event(u32 mask)
@@ -47,11 +134,12 @@ static inline bool fanotify_is_perm_event(u32 mask)
mask & FANOTIFY_PERM_EVENTS;
}
-static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse)
+static inline struct fanotify_event *FANOTIFY_E(struct fsnotify_event *fse)
{
- return container_of(fse, struct fanotify_event_info, fse);
+ return container_of(fse, struct fanotify_event, fse);
}
-struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
- struct inode *inode, u32 mask,
- const struct path *path);
+struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
+ struct inode *inode, u32 mask,
+ const void *data, int data_type,
+ __kernel_fsid_t *fsid);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9c870b0d2b56..56992b32c6bb 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -17,6 +17,8 @@
#include <linux/compat.h>
#include <linux/sched/signal.h>
#include <linux/memcontrol.h>
+#include <linux/statfs.h>
+#include <linux/exportfs.h>
#include <asm/ioctls.h>
@@ -47,33 +49,55 @@ struct kmem_cache *fanotify_mark_cache __read_mostly;
struct kmem_cache *fanotify_event_cachep __read_mostly;
struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
+#define FANOTIFY_EVENT_ALIGN 4
+
+static int fanotify_event_info_len(struct fanotify_event *event)
+{
+ if (!fanotify_event_has_fid(event))
+ return 0;
+
+ return roundup(sizeof(struct fanotify_event_info_fid) +
+ sizeof(struct file_handle) + event->fh_len,
+ FANOTIFY_EVENT_ALIGN);
+}
+
/*
* Get an fsnotify notification event if one exists and is small
* enough to fit in "count". Return an error pointer if the count
- * is not large enough.
- *
- * Called with the group->notification_lock held.
+ * is not large enough. When permission event is dequeued, its state is
+ * updated accordingly.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
- assert_spin_locked(&group->notification_lock);
+ size_t event_size = FAN_EVENT_METADATA_LEN;
+ struct fsnotify_event *fsn_event = NULL;
pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
+ spin_lock(&group->notification_lock);
if (fsnotify_notify_queue_is_empty(group))
- return NULL;
+ goto out;
- if (FAN_EVENT_METADATA_LEN > count)
- return ERR_PTR(-EINVAL);
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
+ event_size += fanotify_event_info_len(
+ FANOTIFY_E(fsnotify_peek_first_event(group)));
+ }
- /* held the notification_lock the whole time, so this is the
- * same event we peeked above */
- return fsnotify_remove_first_event(group);
+ if (event_size > count) {
+ fsn_event = ERR_PTR(-EINVAL);
+ goto out;
+ }
+ fsn_event = fsnotify_remove_first_event(group);
+ if (fanotify_is_perm_event(FANOTIFY_E(fsn_event)->mask))
+ FANOTIFY_PE(fsn_event)->state = FAN_EVENT_REPORTED;
+out:
+ spin_unlock(&group->notification_lock);
+ return fsn_event;
}
static int create_fd(struct fsnotify_group *group,
- struct fanotify_event_info *event,
+ struct fanotify_event *event,
struct file **file)
{
int client_fd;
@@ -114,62 +138,32 @@ static int create_fd(struct fsnotify_group *group,
return client_fd;
}
-static int fill_event_metadata(struct fsnotify_group *group,
- struct fanotify_event_metadata *metadata,
- struct fsnotify_event *fsn_event,
- struct file **file)
-{
- int ret = 0;
- struct fanotify_event_info *event;
-
- pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
- group, metadata, fsn_event);
-
- *file = NULL;
- event = container_of(fsn_event, struct fanotify_event_info, fse);
- metadata->event_len = FAN_EVENT_METADATA_LEN;
- metadata->metadata_len = FAN_EVENT_METADATA_LEN;
- metadata->vers = FANOTIFY_METADATA_VERSION;
- metadata->reserved = 0;
- metadata->mask = fsn_event->mask & FANOTIFY_OUTGOING_EVENTS;
- metadata->pid = pid_vnr(event->pid);
- if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
- metadata->fd = FAN_NOFD;
- else {
- metadata->fd = create_fd(group, event, file);
- if (metadata->fd < 0)
- ret = metadata->fd;
- }
-
- return ret;
-}
-
-static struct fanotify_perm_event_info *dequeue_event(
- struct fsnotify_group *group, int fd)
+/*
+ * Finish processing of permission event by setting it to ANSWERED state and
+ * drop group->notification_lock.
+ */
+static void finish_permission_event(struct fsnotify_group *group,
+ struct fanotify_perm_event *event,
+ unsigned int response)
+ __releases(&group->notification_lock)
{
- struct fanotify_perm_event_info *event, *return_e = NULL;
-
- spin_lock(&group->notification_lock);
- list_for_each_entry(event, &group->fanotify_data.access_list,
- fae.fse.list) {
- if (event->fd != fd)
- continue;
+ bool destroy = false;
- list_del_init(&event->fae.fse.list);
- return_e = event;
- break;
- }
+ assert_spin_locked(&group->notification_lock);
+ event->response = response;
+ if (event->state == FAN_EVENT_CANCELED)
+ destroy = true;
+ else
+ event->state = FAN_EVENT_ANSWERED;
spin_unlock(&group->notification_lock);
-
- pr_debug("%s: found return_re=%p\n", __func__, return_e);
-
- return return_e;
+ if (destroy)
+ fsnotify_destroy_event(group, &event->fae.fse);
}
static int process_access_response(struct fsnotify_group *group,
struct fanotify_response *response_struct)
{
- struct fanotify_perm_event_info *event;
+ struct fanotify_perm_event *event;
int fd = response_struct->fd;
int response = response_struct->response;
@@ -194,48 +188,115 @@ static int process_access_response(struct fsnotify_group *group,
if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT))
return -EINVAL;
- event = dequeue_event(group, fd);
- if (!event)
- return -ENOENT;
+ spin_lock(&group->notification_lock);
+ list_for_each_entry(event, &group->fanotify_data.access_list,
+ fae.fse.list) {
+ if (event->fd != fd)
+ continue;
- event->response = response;
- wake_up(&group->fanotify_data.access_waitq);
+ list_del_init(&event->fae.fse.list);
+ finish_permission_event(group, event, response);
+ wake_up(&group->fanotify_data.access_waitq);
+ return 0;
+ }
+ spin_unlock(&group->notification_lock);
+
+ return -ENOENT;
+}
+
+static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
+{
+ struct fanotify_event_info_fid info = { };
+ struct file_handle handle = { };
+ size_t fh_len = event->fh_len;
+ size_t len = fanotify_event_info_len(event);
+
+ if (!len)
+ return 0;
+
+ if (WARN_ON_ONCE(len < sizeof(info) + sizeof(handle) + fh_len))
+ return -EFAULT;
+
+ /* Copy event info fid header followed by vaiable sized file handle */
+ info.hdr.info_type = FAN_EVENT_INFO_TYPE_FID;
+ info.hdr.len = len;
+ info.fsid = event->fid.fsid;
+ if (copy_to_user(buf, &info, sizeof(info)))
+ return -EFAULT;
+
+ buf += sizeof(info);
+ len -= sizeof(info);
+ handle.handle_type = event->fh_type;
+ handle.handle_bytes = fh_len;
+ if (copy_to_user(buf, &handle, sizeof(handle)))
+ return -EFAULT;
+
+ buf += sizeof(handle);
+ len -= sizeof(handle);
+ if (copy_to_user(buf, fanotify_event_fh(event), fh_len))
+ return -EFAULT;
+
+ /* Pad with 0's */
+ buf += fh_len;
+ len -= fh_len;
+ WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN);
+ if (len > 0 && clear_user(buf, len))
+ return -EFAULT;
return 0;
}
static ssize_t copy_event_to_user(struct fsnotify_group *group,
- struct fsnotify_event *event,
+ struct fsnotify_event *fsn_event,
char __user *buf, size_t count)
{
- struct fanotify_event_metadata fanotify_event_metadata;
- struct file *f;
- int fd, ret;
-
- pr_debug("%s: group=%p event=%p\n", __func__, group, event);
-
- ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
- if (ret < 0)
- return ret;
+ struct fanotify_event_metadata metadata;
+ struct fanotify_event *event;
+ struct file *f = NULL;
+ int ret, fd = FAN_NOFD;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
+
+ event = container_of(fsn_event, struct fanotify_event, fse);
+ metadata.event_len = FAN_EVENT_METADATA_LEN;
+ metadata.metadata_len = FAN_EVENT_METADATA_LEN;
+ metadata.vers = FANOTIFY_METADATA_VERSION;
+ metadata.reserved = 0;
+ metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS;
+ metadata.pid = pid_vnr(event->pid);
+
+ if (fanotify_event_has_path(event)) {
+ fd = create_fd(group, event, &f);
+ if (fd < 0)
+ return fd;
+ } else if (fanotify_event_has_fid(event)) {
+ metadata.event_len += fanotify_event_info_len(event);
+ }
+ metadata.fd = fd;
- fd = fanotify_event_metadata.fd;
ret = -EFAULT;
/*
* Sanity check copy size in case get_one_event() and
* fill_event_metadata() event_len sizes ever get out of sync.
*/
- if (WARN_ON_ONCE(fanotify_event_metadata.event_len > count))
+ if (WARN_ON_ONCE(metadata.event_len > count))
goto out_close_fd;
- if (copy_to_user(buf, &fanotify_event_metadata,
- fanotify_event_metadata.event_len))
+
+ if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN))
goto out_close_fd;
if (fanotify_is_perm_event(event->mask))
- FANOTIFY_PE(event)->fd = fd;
+ FANOTIFY_PE(fsn_event)->fd = fd;
- if (fd != FAN_NOFD)
+ if (fanotify_event_has_path(event)) {
fd_install(fd, f);
- return fanotify_event_metadata.event_len;
+ } else if (fanotify_event_has_fid(event)) {
+ ret = copy_fid_to_user(event, buf + FAN_EVENT_METADATA_LEN);
+ if (ret < 0)
+ return ret;
+ }
+
+ return metadata.event_len;
out_close_fd:
if (fd != FAN_NOFD) {
@@ -276,10 +337,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
add_wait_queue(&group->notification_waitq, &wait);
while (1) {
- spin_lock(&group->notification_lock);
kevent = get_one_event(group, count);
- spin_unlock(&group->notification_lock);
-
if (IS_ERR(kevent)) {
ret = PTR_ERR(kevent);
break;
@@ -316,11 +374,13 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
* Permission events get queued to wait for response. Other
* events can be destroyed now.
*/
- if (!fanotify_is_perm_event(kevent->mask)) {
+ if (!fanotify_is_perm_event(FANOTIFY_E(kevent)->mask)) {
fsnotify_destroy_event(group, kevent);
} else {
if (ret <= 0) {
- FANOTIFY_PE(kevent)->response = FAN_DENY;
+ spin_lock(&group->notification_lock);
+ finish_permission_event(group,
+ FANOTIFY_PE(kevent), FAN_DENY);
wake_up(&group->fanotify_data.access_waitq);
} else {
spin_lock(&group->notification_lock);
@@ -370,7 +430,7 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t
static int fanotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
- struct fanotify_perm_event_info *event, *next;
+ struct fanotify_perm_event *event;
struct fsnotify_event *fsn_event;
/*
@@ -385,13 +445,12 @@ static int fanotify_release(struct inode *ignored, struct file *file)
* and simulate reply from userspace.
*/
spin_lock(&group->notification_lock);
- list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
- fae.fse.list) {
- pr_debug("%s: found group=%p event=%p\n", __func__, group,
- event);
-
+ while (!list_empty(&group->fanotify_data.access_list)) {
+ event = list_first_entry(&group->fanotify_data.access_list,
+ struct fanotify_perm_event, fae.fse.list);
list_del_init(&event->fae.fse.list);
- event->response = FAN_ALLOW;
+ finish_permission_event(group, event, FAN_ALLOW);
+ spin_lock(&group->notification_lock);
}
/*
@@ -401,13 +460,14 @@ static int fanotify_release(struct inode *ignored, struct file *file)
*/
while (!fsnotify_notify_queue_is_empty(group)) {
fsn_event = fsnotify_remove_first_event(group);
- if (!(fsn_event->mask & FANOTIFY_PERM_EVENTS)) {
+ if (!(FANOTIFY_E(fsn_event)->mask & FANOTIFY_PERM_EVENTS)) {
spin_unlock(&group->notification_lock);
fsnotify_destroy_event(group, fsn_event);
- spin_lock(&group->notification_lock);
} else {
- FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
+ finish_permission_event(group, FANOTIFY_PE(fsn_event),
+ FAN_ALLOW);
}
+ spin_lock(&group->notification_lock);
}
spin_unlock(&group->notification_lock);
@@ -598,7 +658,8 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
fsnotify_connp_t *connp,
- unsigned int type)
+ unsigned int type,
+ __kernel_fsid_t *fsid)
{
struct fsnotify_mark *mark;
int ret;
@@ -611,7 +672,7 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
return ERR_PTR(-ENOMEM);
fsnotify_init_mark(mark, group);
- ret = fsnotify_add_mark_locked(mark, connp, type, 0);
+ ret = fsnotify_add_mark_locked(mark, connp, type, 0, fsid);
if (ret) {
fsnotify_put_mark(mark);
return ERR_PTR(ret);
@@ -623,7 +684,8 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
static int fanotify_add_mark(struct fsnotify_group *group,
fsnotify_connp_t *connp, unsigned int type,
- __u32 mask, unsigned int flags)
+ __u32 mask, unsigned int flags,
+ __kernel_fsid_t *fsid)
{
struct fsnotify_mark *fsn_mark;
__u32 added;
@@ -631,7 +693,7 @@ static int fanotify_add_mark(struct fsnotify_group *group,
mutex_lock(&group->mark_mutex);
fsn_mark = fsnotify_find_mark(connp, group);
if (!fsn_mark) {
- fsn_mark = fanotify_add_new_mark(group, connp, type);
+ fsn_mark = fanotify_add_new_mark(group, connp, type, fsid);
if (IS_ERR(fsn_mark)) {
mutex_unlock(&group->mark_mutex);
return PTR_ERR(fsn_mark);
@@ -648,23 +710,23 @@ static int fanotify_add_mark(struct fsnotify_group *group,
static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
struct vfsmount *mnt, __u32 mask,
- unsigned int flags)
+ unsigned int flags, __kernel_fsid_t *fsid)
{
return fanotify_add_mark(group, &real_mount(mnt)->mnt_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags);
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags, fsid);
}
static int fanotify_add_sb_mark(struct fsnotify_group *group,
- struct super_block *sb, __u32 mask,
- unsigned int flags)
+ struct super_block *sb, __u32 mask,
+ unsigned int flags, __kernel_fsid_t *fsid)
{
return fanotify_add_mark(group, &sb->s_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_SB, mask, flags);
+ FSNOTIFY_OBJ_TYPE_SB, mask, flags, fsid);
}
static int fanotify_add_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
- unsigned int flags)
+ unsigned int flags, __kernel_fsid_t *fsid)
{
pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
@@ -679,7 +741,7 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
return 0;
return fanotify_add_mark(group, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, mask, flags);
+ FSNOTIFY_OBJ_TYPE_INODE, mask, flags, fsid);
}
/* fanotify syscalls */
@@ -688,7 +750,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
struct fsnotify_group *group;
int f_flags, fd;
struct user_struct *user;
- struct fanotify_event_info *oevent;
+ struct fanotify_event *oevent;
pr_debug("%s: flags=%x event_f_flags=%x\n",
__func__, flags, event_f_flags);
@@ -715,6 +777,10 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
return -EINVAL;
}
+ if ((flags & FAN_REPORT_FID) &&
+ (flags & FANOTIFY_CLASS_BITS) != FAN_CLASS_NOTIF)
+ return -EINVAL;
+
user = get_current_user();
if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
free_uid(user);
@@ -739,7 +805,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
atomic_inc(&user->fanotify_listeners);
group->memcg = get_mem_cgroup_from_mm(current->mm);
- oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL);
+ oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL,
+ FSNOTIFY_EVENT_NONE, NULL);
if (unlikely(!oevent)) {
fd = -ENOMEM;
goto out_destroy_group;
@@ -801,6 +868,48 @@ out_destroy_group:
return fd;
}
+/* Check if filesystem can encode a unique fid */
+static int fanotify_test_fid(struct path *path, __kernel_fsid_t *fsid)
+{
+ __kernel_fsid_t root_fsid;
+ int err;
+
+ /*
+ * Make sure path is not in filesystem with zero fsid (e.g. tmpfs).
+ */
+ err = vfs_get_fsid(path->dentry, fsid);
+ if (err)
+ return err;
+
+ if (!fsid->val[0] && !fsid->val[1])
+ return -ENODEV;
+
+ /*
+ * Make sure path is not inside a filesystem subvolume (e.g. btrfs)
+ * which uses a different fsid than sb root.
+ */
+ err = vfs_get_fsid(path->dentry->d_sb->s_root, &root_fsid);
+ if (err)
+ return err;
+
+ if (root_fsid.val[0] != fsid->val[0] ||
+ root_fsid.val[1] != fsid->val[1])
+ return -EXDEV;
+
+ /*
+ * We need to make sure that the file system supports at least
+ * encoding a file handle so user can use name_to_handle_at() to
+ * compare fid returned with event to the file handle of watched
+ * objects. However, name_to_handle_at() requires that the
+ * filesystem also supports decoding file handles.
+ */
+ if (!path->dentry->d_sb->s_export_op ||
+ !path->dentry->d_sb->s_export_op->fh_to_dentry)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
int dfd, const char __user *pathname)
{
@@ -809,6 +918,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
struct fsnotify_group *group;
struct fd f;
struct path path;
+ __kernel_fsid_t __fsid, *fsid = NULL;
u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
int ret;
@@ -871,6 +981,18 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
group->priority == FS_PRIO_0)
goto fput_and_out;
+ /*
+ * Events with data type inode do not carry enough information to report
+ * event->fd, so we do not allow setting a mask for inode events unless
+ * group supports reporting fid.
+ * inode events are not supported on a mount mark, because they do not
+ * carry enough information (i.e. path) to be filtered by mount point.
+ */
+ if (mask & FANOTIFY_INODE_EVENTS &&
+ (!FAN_GROUP_FLAG(group, FAN_REPORT_FID) ||
+ mark_type == FAN_MARK_MOUNT))
+ goto fput_and_out;
+
if (flags & FAN_MARK_FLUSH) {
ret = 0;
if (mark_type == FAN_MARK_MOUNT)
@@ -886,6 +1008,14 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
if (ret)
goto fput_and_out;
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
+ ret = fanotify_test_fid(&path, &__fsid);
+ if (ret)
+ goto path_put_and_out;
+
+ fsid = &__fsid;
+ }
+
/* inode held in place by reference to path; group by fget on fd */
if (mark_type == FAN_MARK_INODE)
inode = path.dentry->d_inode;
@@ -896,24 +1026,31 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
case FAN_MARK_ADD:
if (mark_type == FAN_MARK_MOUNT)
- ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
+ ret = fanotify_add_vfsmount_mark(group, mnt, mask,
+ flags, fsid);
else if (mark_type == FAN_MARK_FILESYSTEM)
- ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask, flags);
+ ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask,
+ flags, fsid);
else
- ret = fanotify_add_inode_mark(group, inode, mask, flags);
+ ret = fanotify_add_inode_mark(group, inode, mask,
+ flags, fsid);
break;
case FAN_MARK_REMOVE:
if (mark_type == FAN_MARK_MOUNT)
- ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
+ ret = fanotify_remove_vfsmount_mark(group, mnt, mask,
+ flags);
else if (mark_type == FAN_MARK_FILESYSTEM)
- ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask, flags);
+ ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask,
+ flags);
else
- ret = fanotify_remove_inode_mark(group, inode, mask, flags);
+ ret = fanotify_remove_inode_mark(group, inode, mask,
+ flags);
break;
default:
ret = -EINVAL;
}
+path_put_and_out:
path_put(&path);
fput_and_out:
fdput(f);
@@ -950,15 +1087,15 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
*/
static int __init fanotify_user_setup(void)
{
- BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 7);
+ BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 8);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
SLAB_PANIC|SLAB_ACCOUNT);
- fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
+ fanotify_event_cachep = KMEM_CACHE(fanotify_event, SLAB_PANIC);
if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
fanotify_perm_event_cachep =
- KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC);
+ KMEM_CACHE(fanotify_perm_event, SLAB_PANIC);
}
return 0;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ecf09b6243d9..df06f3da166c 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -328,16 +328,15 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
const unsigned char *file_name, u32 cookie)
{
struct fsnotify_iter_info iter_info = {};
- struct super_block *sb = NULL;
+ struct super_block *sb = to_tell->i_sb;
struct mount *mnt = NULL;
- __u32 mnt_or_sb_mask = 0;
+ __u32 mnt_or_sb_mask = sb->s_fsnotify_mask;
int ret = 0;
__u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
if (data_is == FSNOTIFY_EVENT_PATH) {
mnt = real_mount(((const struct path *)data)->mnt);
- sb = mnt->mnt.mnt_sb;
- mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask;
+ mnt_or_sb_mask |= mnt->mnt_fsnotify_mask;
}
/* An event "on child" is not intended for a mount/sb mark */
if (mask & FS_EVENT_ON_CHILD)
@@ -350,8 +349,8 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
* SRCU because we have no references to any objects and do not
* need SRCU to keep them "alive".
*/
- if (!to_tell->i_fsnotify_marks &&
- (!mnt || (!mnt->mnt_fsnotify_marks && !sb->s_fsnotify_marks)))
+ if (!to_tell->i_fsnotify_marks && !sb->s_fsnotify_marks &&
+ (!mnt || !mnt->mnt_fsnotify_marks))
return 0;
/*
* if this is a modify event we may need to clear the ignored masks
@@ -366,11 +365,11 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+ iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] =
+ fsnotify_first_mark(&sb->s_fsnotify_marks);
if (mnt) {
iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
- iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] =
- fsnotify_first_mark(&sb->s_fsnotify_marks);
}
/*
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
index 7e4578d35b61..74ae60305189 100644
--- a/fs/notify/inotify/inotify.h
+++ b/fs/notify/inotify/inotify.h
@@ -5,6 +5,7 @@
struct inotify_event_info {
struct fsnotify_event fse;
+ u32 mask;
int wd;
u32 sync_cookie;
int name_len;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index f4184b4f3815..ff30abd6a49b 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -43,11 +43,11 @@ static bool event_compare(struct fsnotify_event *old_fsn,
{
struct inotify_event_info *old, *new;
- if (old_fsn->mask & FS_IN_IGNORED)
- return false;
old = INOTIFY_E(old_fsn);
new = INOTIFY_E(new_fsn);
- if ((old_fsn->mask == new_fsn->mask) &&
+ if (old->mask & FS_IN_IGNORED)
+ return false;
+ if ((old->mask == new->mask) &&
(old_fsn->inode == new_fsn->inode) &&
(old->name_len == new->name_len) &&
(!old->name_len || !strcmp(old->name, new->name)))
@@ -113,8 +113,18 @@ int inotify_handle_event(struct fsnotify_group *group,
return -ENOMEM;
}
+ /*
+ * We now report FS_ISDIR flag with MOVE_SELF and DELETE_SELF events
+ * for fanotify. inotify never reported IN_ISDIR with those events.
+ * It looks like an oversight, but to avoid the risk of breaking
+ * existing inotify programs, mask the flag out from those events.
+ */
+ if (mask & (IN_MOVE_SELF | IN_DELETE_SELF))
+ mask &= ~IN_ISDIR;
+
fsn_event = &event->fse;
- fsnotify_init_event(fsn_event, inode, mask);
+ fsnotify_init_event(fsn_event, inode);
+ event->mask = mask;
event->wd = i_mark->wd;
event->sync_cookie = cookie;
event->name_len = len;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 105576daca4a..e2901fbb9f76 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -189,7 +189,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
*/
pad_name_len = round_event_name_len(fsn_event);
inotify_event.len = pad_name_len;
- inotify_event.mask = inotify_mask_to_arg(fsn_event->mask);
+ inotify_event.mask = inotify_mask_to_arg(event->mask);
inotify_event.wd = event->wd;
inotify_event.cookie = event->sync_cookie;
@@ -634,7 +634,8 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
return ERR_PTR(-ENOMEM);
}
group->overflow_event = &oevent->fse;
- fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
+ fsnotify_init_event(group->overflow_event, NULL);
+ oevent->mask = FS_Q_OVERFLOW;
oevent->wd = -1;
oevent->sync_cookie = 0;
oevent->name_len = 0;
@@ -724,8 +725,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
return -EBADF;
/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
- if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
- return -EINVAL;
+ if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
+ ret = -EINVAL;
+ goto fput_and_out;
+ }
/* verify that this is indeed an inotify instance */
if (unlikely(f.file->f_op != &inotify_fops)) {
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d2dd16cb5989..d593d4269561 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -82,6 +82,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
+#include <linux/ratelimit.h>
#include <linux/atomic.h>
@@ -481,7 +482,8 @@ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
}
static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
- unsigned int type)
+ unsigned int type,
+ __kernel_fsid_t *fsid)
{
struct inode *inode = NULL;
struct fsnotify_mark_connector *conn;
@@ -493,6 +495,11 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
INIT_HLIST_HEAD(&conn->list);
conn->type = type;
conn->obj = connp;
+ /* Cache fsid of filesystem containing the object */
+ if (fsid)
+ conn->fsid = *fsid;
+ else
+ conn->fsid.val[0] = conn->fsid.val[1] = 0;
if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
inode = igrab(fsnotify_conn_inode(conn));
/*
@@ -544,7 +551,7 @@ out:
*/
static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
fsnotify_connp_t *connp, unsigned int type,
- int allow_dups)
+ int allow_dups, __kernel_fsid_t *fsid)
{
struct fsnotify_mark *lmark, *last = NULL;
struct fsnotify_mark_connector *conn;
@@ -553,15 +560,36 @@ static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
if (WARN_ON(!fsnotify_valid_obj_type(type)))
return -EINVAL;
+
+ /* Backend is expected to check for zero fsid (e.g. tmpfs) */
+ if (fsid && WARN_ON_ONCE(!fsid->val[0] && !fsid->val[1]))
+ return -ENODEV;
+
restart:
spin_lock(&mark->lock);
conn = fsnotify_grab_connector(connp);
if (!conn) {
spin_unlock(&mark->lock);
- err = fsnotify_attach_connector_to_object(connp, type);
+ err = fsnotify_attach_connector_to_object(connp, type, fsid);
if (err)
return err;
goto restart;
+ } else if (fsid && (conn->fsid.val[0] || conn->fsid.val[1]) &&
+ (fsid->val[0] != conn->fsid.val[0] ||
+ fsid->val[1] != conn->fsid.val[1])) {
+ /*
+ * Backend is expected to check for non uniform fsid
+ * (e.g. btrfs), but maybe we missed something?
+ * Only allow setting conn->fsid once to non zero fsid.
+ * inotify and non-fid fanotify groups do not set nor test
+ * conn->fsid.
+ */
+ pr_warn_ratelimited("%s: fsid mismatch on object of type %u: "
+ "%x.%x != %x.%x\n", __func__, conn->type,
+ fsid->val[0], fsid->val[1],
+ conn->fsid.val[0], conn->fsid.val[1]);
+ err = -EXDEV;
+ goto out_err;
}
/* is mark the first mark? */
@@ -606,7 +634,7 @@ out_err:
*/
int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_connp_t *connp, unsigned int type,
- int allow_dups)
+ int allow_dups, __kernel_fsid_t *fsid)
{
struct fsnotify_group *group = mark->group;
int ret = 0;
@@ -627,7 +655,7 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_get_mark(mark); /* for g_list */
spin_unlock(&mark->lock);
- ret = fsnotify_add_mark_list(mark, connp, type, allow_dups);
+ ret = fsnotify_add_mark_list(mark, connp, type, allow_dups, fsid);
if (ret)
goto err;
@@ -648,13 +676,13 @@ err:
}
int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
- unsigned int type, int allow_dups)
+ unsigned int type, int allow_dups, __kernel_fsid_t *fsid)
{
int ret;
struct fsnotify_group *group = mark->group;
mutex_lock(&group->mark_mutex);
- ret = fsnotify_add_mark_locked(mark, connp, type, allow_dups);
+ ret = fsnotify_add_mark_locked(mark, connp, type, allow_dups, fsid);
mutex_unlock(&group->mark_mutex);
return ret;
}
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 3c3e36745f59..5f3a54d444b5 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -71,7 +71,7 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event)
{
/* Overflow events are per-group and we don't want to free them */
- if (!event || event->mask == FS_Q_OVERFLOW)
+ if (!event || event == group->overflow_event)
return;
/*
* If the event is still queued, we have a problem... Do an unreliable
@@ -141,6 +141,18 @@ queue:
return ret;
}
+void fsnotify_remove_queued_event(struct fsnotify_group *group,
+ struct fsnotify_event *event)
+{
+ assert_spin_locked(&group->notification_lock);
+ /*
+ * We need to init list head for the case of overflow event so that
+ * check in fsnotify_add_event() works
+ */
+ list_del_init(&event->list);
+ group->q_len--;
+}
+
/*
* Remove and return the first event from the notification list. It is the
* responsibility of the caller to destroy the obtained event
@@ -155,13 +167,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
event = list_first_entry(&group->notification_list,
struct fsnotify_event, list);
- /*
- * We need to init list head for the case of overflow event so that
- * check in fsnotify_add_event() works
- */
- list_del_init(&event->list);
- group->q_len--;
-
+ fsnotify_remove_queued_event(group, event);
return event;
}
@@ -194,23 +200,3 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
}
spin_unlock(&group->notification_lock);
}
-
-/*
- * fsnotify_create_event - Allocate a new event which will be sent to each
- * group's handle_event function if the group was interested in this
- * particular event.
- *
- * @inode the inode which is supposed to receive the event (sometimes a
- * parent of the inode to which the event happened.
- * @mask what actually happened.
- * @data pointer to the object which was actually affected
- * @data_type flag indication if the data is a file, path, inode, nothing...
- * @name the filename, if available
- */
-void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode,
- u32 mask)
-{
- INIT_LIST_HEAD(&event->list);
- event->inode = inode;
- event->mask = mask;
-}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index d1cbb27808e2..6f0999015a44 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7532,10 +7532,11 @@ static int ocfs2_trim_group(struct super_block *sb,
return count;
}
-int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
+static
+int ocfs2_trim_mainbm(struct super_block *sb, struct fstrim_range *range)
{
struct ocfs2_super *osb = OCFS2_SB(sb);
- u64 start, len, trimmed, first_group, last_group, group;
+ u64 start, len, trimmed = 0, first_group, last_group = 0, group = 0;
int ret, cnt;
u32 first_bit, last_bit, minlen;
struct buffer_head *main_bm_bh = NULL;
@@ -7543,7 +7544,6 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
struct buffer_head *gd_bh = NULL;
struct ocfs2_dinode *main_bm;
struct ocfs2_group_desc *gd = NULL;
- struct ocfs2_trim_fs_info info, *pinfo = NULL;
start = range->start >> osb->s_clustersize_bits;
len = range->len >> osb->s_clustersize_bits;
@@ -7552,6 +7552,9 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (minlen >= osb->bitmap_cpg || range->len < sb->s_blocksize)
return -EINVAL;
+ trace_ocfs2_trim_mainbm(start, len, minlen);
+
+next_group:
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
@@ -7570,64 +7573,34 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
main_bm = (struct ocfs2_dinode *)main_bm_bh->b_data;
- if (start >= le32_to_cpu(main_bm->i_clusters)) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- len = range->len >> osb->s_clustersize_bits;
- if (start + len > le32_to_cpu(main_bm->i_clusters))
- len = le32_to_cpu(main_bm->i_clusters) - start;
-
- trace_ocfs2_trim_fs(start, len, minlen);
-
- ocfs2_trim_fs_lock_res_init(osb);
- ret = ocfs2_trim_fs_lock(osb, NULL, 1);
- if (ret < 0) {
- if (ret != -EAGAIN) {
- mlog_errno(ret);
- ocfs2_trim_fs_lock_res_uninit(osb);
+ /*
+ * Do some check before trim the first group.
+ */
+ if (!group) {
+ if (start >= le32_to_cpu(main_bm->i_clusters)) {
+ ret = -EINVAL;
goto out_unlock;
}
- mlog(ML_NOTICE, "Wait for trim on device (%s) to "
- "finish, which is running from another node.\n",
- osb->dev_str);
- ret = ocfs2_trim_fs_lock(osb, &info, 0);
- if (ret < 0) {
- mlog_errno(ret);
- ocfs2_trim_fs_lock_res_uninit(osb);
- goto out_unlock;
- }
+ if (start + len > le32_to_cpu(main_bm->i_clusters))
+ len = le32_to_cpu(main_bm->i_clusters) - start;
- if (info.tf_valid && info.tf_success &&
- info.tf_start == start && info.tf_len == len &&
- info.tf_minlen == minlen) {
- /* Avoid sending duplicated trim to a shared device */
- mlog(ML_NOTICE, "The same trim on device (%s) was "
- "just done from node (%u), return.\n",
- osb->dev_str, info.tf_nodenum);
- range->len = info.tf_trimlen;
- goto out_trimunlock;
- }
+ /*
+ * Determine first and last group to examine based on
+ * start and len
+ */
+ first_group = ocfs2_which_cluster_group(main_bm_inode, start);
+ if (first_group == osb->first_cluster_group_blkno)
+ first_bit = start;
+ else
+ first_bit = start - ocfs2_blocks_to_clusters(sb,
+ first_group);
+ last_group = ocfs2_which_cluster_group(main_bm_inode,
+ start + len - 1);
+ group = first_group;
}
- info.tf_nodenum = osb->node_num;
- info.tf_start = start;
- info.tf_len = len;
- info.tf_minlen = minlen;
-
- /* Determine first and last group to examine based on start and len */
- first_group = ocfs2_which_cluster_group(main_bm_inode, start);
- if (first_group == osb->first_cluster_group_blkno)
- first_bit = start;
- else
- first_bit = start - ocfs2_blocks_to_clusters(sb, first_group);
- last_group = ocfs2_which_cluster_group(main_bm_inode, start + len - 1);
- last_bit = osb->bitmap_cpg;
-
- trimmed = 0;
- for (group = first_group; group <= last_group;) {
+ do {
if (first_bit + len >= osb->bitmap_cpg)
last_bit = osb->bitmap_cpg;
else
@@ -7659,21 +7632,81 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
group = ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
else
group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
- }
- range->len = trimmed * sb->s_blocksize;
+ } while (0);
- info.tf_trimlen = range->len;
- info.tf_success = (ret ? 0 : 1);
- pinfo = &info;
-out_trimunlock:
- ocfs2_trim_fs_unlock(osb, pinfo);
- ocfs2_trim_fs_lock_res_uninit(osb);
out_unlock:
ocfs2_inode_unlock(main_bm_inode, 0);
brelse(main_bm_bh);
+ main_bm_bh = NULL;
out_mutex:
inode_unlock(main_bm_inode);
iput(main_bm_inode);
+
+ /*
+ * If all the groups trim are not done or failed, but we should release
+ * main_bm related locks for avoiding the current IO starve, then go to
+ * trim the next group
+ */
+ if (ret >= 0 && group <= last_group)
+ goto next_group;
out:
+ range->len = trimmed * sb->s_blocksize;
+ return ret;
+}
+
+int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+ struct ocfs2_trim_fs_info info, *pinfo = NULL;
+
+ ocfs2_trim_fs_lock_res_init(osb);
+
+ trace_ocfs2_trim_fs(range->start, range->len, range->minlen);
+
+ ret = ocfs2_trim_fs_lock(osb, NULL, 1);
+ if (ret < 0) {
+ if (ret != -EAGAIN) {
+ mlog_errno(ret);
+ ocfs2_trim_fs_lock_res_uninit(osb);
+ return ret;
+ }
+
+ mlog(ML_NOTICE, "Wait for trim on device (%s) to "
+ "finish, which is running from another node.\n",
+ osb->dev_str);
+ ret = ocfs2_trim_fs_lock(osb, &info, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ ocfs2_trim_fs_lock_res_uninit(osb);
+ return ret;
+ }
+
+ if (info.tf_valid && info.tf_success &&
+ info.tf_start == range->start &&
+ info.tf_len == range->len &&
+ info.tf_minlen == range->minlen) {
+ /* Avoid sending duplicated trim to a shared device */
+ mlog(ML_NOTICE, "The same trim on device (%s) was "
+ "just done from node (%u), return.\n",
+ osb->dev_str, info.tf_nodenum);
+ range->len = info.tf_trimlen;
+ goto out;
+ }
+ }
+
+ info.tf_nodenum = osb->node_num;
+ info.tf_start = range->start;
+ info.tf_len = range->len;
+ info.tf_minlen = range->minlen;
+
+ ret = ocfs2_trim_mainbm(sb, range);
+
+ info.tf_trimlen = range->len;
+ info.tf_success = (ret < 0 ? 0 : 1);
+ pinfo = &info;
+out:
+ ocfs2_trim_fs_unlock(osb, pinfo);
+ ocfs2_trim_fs_lock_res_uninit(osb);
return ret;
}
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 0e4166cc23a0..4ac775e32240 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -621,13 +621,15 @@ static void o2nm_node_group_drop_item(struct config_group *group,
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
- o2net_disconnect_node(node);
+ if (cluster->cl_nodes[node->nd_num] == node) {
+ o2net_disconnect_node(node);
- if (cluster->cl_has_local &&
- (cluster->cl_local_node == node->nd_num)) {
- cluster->cl_has_local = 0;
- cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
- o2net_stop_listening(node);
+ if (cluster->cl_has_local &&
+ (cluster->cl_local_node == node->nd_num)) {
+ cluster->cl_has_local = 0;
+ cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
+ o2net_stop_listening(node);
+ }
}
/* XXX call into net to stop this node from trading messages */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 7c835824247e..af405586c5b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -686,6 +686,9 @@ void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+ /* Only one trimfs thread are allowed to work at the same time. */
+ mutex_lock(&osb->obs_trim_fs_mutex);
+
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS, 0, 0, lockres->l_name);
ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_TRIM_FS,
@@ -698,6 +701,8 @@ void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
ocfs2_simple_drop_lockres(osb, lockres);
ocfs2_lock_res_free(lockres);
+
+ mutex_unlock(&osb->obs_trim_fs_mutex);
}
static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 4f86ac0027b5..1f029fbe8b8d 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -407,6 +407,7 @@ struct ocfs2_super
struct ocfs2_lock_res osb_rename_lockres;
struct ocfs2_lock_res osb_nfs_sync_lockres;
struct ocfs2_lock_res osb_trim_fs_lockres;
+ struct mutex obs_trim_fs_mutex;
struct ocfs2_dlm_debug *osb_dlm_debug;
struct dentry *osb_debug_root;
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 2ee76a90ba8f..dc4bce1649c1 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -712,6 +712,8 @@ TRACE_EVENT(ocfs2_trim_extent,
DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_trim_group);
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_trim_mainbm);
+
DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_trim_fs);
/* End of trace events for fs/ocfs2/alloc.c. */
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index d7407994f308..ea0756d83250 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -55,7 +55,7 @@ struct ocfs2_slot_info {
unsigned int si_blocks;
struct buffer_head **si_bh;
unsigned int si_num_slots;
- struct ocfs2_slot *si_slots;
+ struct ocfs2_slot si_slots[];
};
@@ -420,9 +420,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
struct inode *inode = NULL;
struct ocfs2_slot_info *si;
- si = kzalloc(sizeof(struct ocfs2_slot_info) +
- (sizeof(struct ocfs2_slot) * osb->max_slots),
- GFP_KERNEL);
+ si = kzalloc(struct_size(si, si_slots, osb->max_slots), GFP_KERNEL);
if (!si) {
status = -ENOMEM;
mlog_errno(status);
@@ -431,8 +429,6 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
si->si_extended = ocfs2_uses_extended_slot_map(osb);
si->si_num_slots = osb->max_slots;
- si->si_slots = (struct ocfs2_slot *)((char *)si +
- sizeof(struct ocfs2_slot_info));
inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 3415e0b09398..96ae7cedd487 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1847,6 +1847,8 @@ static int ocfs2_mount_volume(struct super_block *sb)
if (ocfs2_is_hard_readonly(osb))
goto leave;
+ mutex_init(&osb->obs_trim_fs_mutex);
+
status = ocfs2_dlm_init(osb);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index a5a2fe76568f..b094d3d79354 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -398,8 +398,6 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter
loff_t pos = iocb->ki_pos;
ssize_t rc = 0;
- BUG_ON(iocb->private);
-
gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
orangefs_stats.reads++;
@@ -416,8 +414,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
loff_t pos;
ssize_t rc;
- BUG_ON(iocb->private);
-
gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
inode_lock(file->f_mapping->host);
diff --git a/fs/pipe.c b/fs/pipe.c
index bdc5d3c0977d..51d5fd8840ab 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -140,8 +140,7 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
struct page *page = buf->page;
if (page_count(page) == 1) {
- if (memcg_kmem_enabled())
- memcg_kmem_uncharge(page, 0);
+ memcg_kmem_uncharge(page, 0);
__SetPageLocked(page);
return 0;
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9d428d5a0ac8..2edbb657f859 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -343,28 +343,28 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
#ifdef CONFIG_SECCOMP
seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
#endif
- seq_printf(m, "\nSpeculation_Store_Bypass:\t");
+ seq_puts(m, "\nSpeculation_Store_Bypass:\t");
switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
case -EINVAL:
- seq_printf(m, "unknown");
+ seq_puts(m, "unknown");
break;
case PR_SPEC_NOT_AFFECTED:
- seq_printf(m, "not vulnerable");
+ seq_puts(m, "not vulnerable");
break;
case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
- seq_printf(m, "thread force mitigated");
+ seq_puts(m, "thread force mitigated");
break;
case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
- seq_printf(m, "thread mitigated");
+ seq_puts(m, "thread mitigated");
break;
case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
- seq_printf(m, "thread vulnerable");
+ seq_puts(m, "thread vulnerable");
break;
case PR_SPEC_DISABLE:
- seq_printf(m, "globally mitigated");
+ seq_puts(m, "globally mitigated");
break;
default:
- seq_printf(m, "vulnerable");
+ seq_puts(m, "vulnerable");
break;
}
seq_putc(m, '\n');
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a23651ce6960..5ab1849971b4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -140,9 +140,13 @@ struct pid_entry {
#define REG(NAME, MODE, fops) \
NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
#define ONE(NAME, MODE, show) \
- NOD(NAME, (S_IFREG|(MODE)), \
+ NOD(NAME, (S_IFREG|(MODE)), \
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
+#define ATTR(LSM, NAME, MODE) \
+ NOD(NAME, (S_IFREG|(MODE)), \
+ NULL, &proc_pid_attr_operations, \
+ { .lsm = LSM })
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
@@ -456,7 +460,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
if (unlikely(!sched_info_on()))
- seq_printf(m, "0 0 0\n");
+ seq_puts(m, "0 0 0\n");
else
seq_printf(m, "%llu %llu %lu\n",
(unsigned long long)task->se.sum_exec_runtime,
@@ -1086,10 +1090,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
task_lock(p);
if (!p->vfork_done && process_shares_mm(p, mm)) {
- pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
- task_pid_nr(p), p->comm,
- p->signal->oom_score_adj, oom_adj,
- task_pid_nr(task), task->comm);
p->signal->oom_score_adj = oom_adj;
if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
p->signal->oom_score_adj_min = (short)oom_adj;
@@ -2525,7 +2525,7 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
if (!task)
return -ESRCH;
- length = security_getprocattr(task,
+ length = security_getprocattr(task, PROC_I(inode)->op.lsm,
(char*)file->f_path.dentry->d_name.name,
&p);
put_task_struct(task);
@@ -2574,7 +2574,9 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
if (rv < 0)
goto out_free;
- rv = security_setprocattr(file->f_path.dentry->d_name.name, page, count);
+ rv = security_setprocattr(PROC_I(inode)->op.lsm,
+ file->f_path.dentry->d_name.name, page,
+ count);
mutex_unlock(&current->signal->cred_guard_mutex);
out_free:
kfree(page);
@@ -2588,13 +2590,53 @@ static const struct file_operations proc_pid_attr_operations = {
.llseek = generic_file_llseek,
};
+#define LSM_DIR_OPS(LSM) \
+static int proc_##LSM##_attr_dir_iterate(struct file *filp, \
+ struct dir_context *ctx) \
+{ \
+ return proc_pident_readdir(filp, ctx, \
+ LSM##_attr_dir_stuff, \
+ ARRAY_SIZE(LSM##_attr_dir_stuff)); \
+} \
+\
+static const struct file_operations proc_##LSM##_attr_dir_ops = { \
+ .read = generic_read_dir, \
+ .iterate = proc_##LSM##_attr_dir_iterate, \
+ .llseek = default_llseek, \
+}; \
+\
+static struct dentry *proc_##LSM##_attr_dir_lookup(struct inode *dir, \
+ struct dentry *dentry, unsigned int flags) \
+{ \
+ return proc_pident_lookup(dir, dentry, \
+ LSM##_attr_dir_stuff, \
+ ARRAY_SIZE(LSM##_attr_dir_stuff)); \
+} \
+\
+static const struct inode_operations proc_##LSM##_attr_dir_inode_ops = { \
+ .lookup = proc_##LSM##_attr_dir_lookup, \
+ .getattr = pid_getattr, \
+ .setattr = proc_setattr, \
+}
+
+#ifdef CONFIG_SECURITY_SMACK
+static const struct pid_entry smack_attr_dir_stuff[] = {
+ ATTR("smack", "current", 0666),
+};
+LSM_DIR_OPS(smack);
+#endif
+
static const struct pid_entry attr_dir_stuff[] = {
- REG("current", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
- REG("prev", S_IRUGO, proc_pid_attr_operations),
- REG("exec", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
- REG("fscreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
- REG("keycreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
- REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
+ ATTR(NULL, "current", 0666),
+ ATTR(NULL, "prev", 0444),
+ ATTR(NULL, "exec", 0666),
+ ATTR(NULL, "fscreate", 0666),
+ ATTR(NULL, "keycreate", 0666),
+ ATTR(NULL, "sockcreate", 0666),
+#ifdef CONFIG_SECURITY_SMACK
+ DIR("smack", 0555,
+ proc_smack_attr_dir_inode_ops, proc_smack_attr_dir_ops),
+#endif
};
static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
@@ -3165,7 +3207,7 @@ static struct dentry *proc_pid_instantiate(struct dentry * dentry,
return d_splice_alias(inode, dentry);
}
-struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
+struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
{
struct task_struct *task;
unsigned tgid;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8ae109429a88..e39bac94dead 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
inode = proc_get_inode(dir->i_sb, de);
if (!inode)
return ERR_PTR(-ENOMEM);
- d_set_d_op(dentry, &proc_misc_dentry_ops);
+ d_set_d_op(dentry, de->proc_dops);
return d_splice_alias(inode, dentry);
}
read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
INIT_LIST_HEAD(&ent->pde_openers);
proc_set_user(ent, (*parent)->uid, (*parent)->gid);
+ ent->proc_dops = &proc_misc_dentry_ops;
+
out:
return ent;
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5185d7f6a51e..ea575375f210 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -44,6 +44,7 @@ struct proc_dir_entry {
struct completion *pde_unload_completion;
const struct inode_operations *proc_iops;
const struct file_operations *proc_fops;
+ const struct dentry_operations *proc_dops;
union {
const struct seq_operations *seq_ops;
int (*single_show)(struct seq_file *, void *);
@@ -81,6 +82,7 @@ union proc_op {
int (*proc_show)(struct seq_file *m,
struct pid_namespace *ns, struct pid *pid,
struct task_struct *task);
+ const char *lsm;
};
struct proc_inode {
@@ -161,7 +163,7 @@ extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struc
extern void pid_update_inode(struct task_struct *, struct inode *);
extern int pid_delete_dentry(const struct dentry *);
extern int proc_pid_readdir(struct file *, struct dir_context *);
-extern struct dentry *proc_pid_lookup(struct inode *, struct dentry *, unsigned int);
+struct dentry *proc_pid_lookup(struct dentry *, unsigned int);
extern loff_t mem_lseek(struct file *, loff_t, int);
/* Lookups */
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 40b05e0d4274..544d1ee15aee 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -152,8 +152,8 @@ u64 stable_page_flags(struct page *page)
else if (page_count(page) == 0 && is_free_buddy_page(page))
u |= 1 << KPF_BUDDY;
- if (PageBalloon(page))
- u |= 1 << KPF_BALLOON;
+ if (PageOffline(page))
+ u |= 1 << KPF_OFFLINE;
if (PageTable(page))
u |= 1 << KPF_PGTABLE;
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index d5e0fcb3439e..a7b12435519e 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
return maybe_get_net(PDE_NET(PDE(inode)));
}
+static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return 0;
+}
+
+static const struct dentry_operations proc_net_dentry_ops = {
+ .d_revalidate = proc_net_d_revalidate,
+ .d_delete = always_delete_dentry,
+};
+
+static void pde_force_lookup(struct proc_dir_entry *pde)
+{
+ /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
+ pde->proc_dops = &proc_net_dentry_ops;
+}
+
static int seq_open_net(struct inode *inode, struct file *file)
{
unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_seq_fops;
p->seq_ops = ops;
p->state_size = state_size;
@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_seq_fops;
p->seq_ops = ops;
p->state_size = state_size;
@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_single_fops;
p->single_show = show;
return proc_register(parent, p);
@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_single_fops;
p->single_show = show;
p->write = write;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index f4b1a9d2eca6..621e6ec322ca 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -154,7 +154,7 @@ static int proc_root_getattr(const struct path *path, struct kstat *stat,
static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags)
{
- if (!proc_pid_lookup(dir, dentry, flags))
+ if (!proc_pid_lookup(dentry, flags))
return NULL;
return proc_lookup(dir, dentry, flags);
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 127265e5c55f..57c0a1047250 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -38,6 +38,7 @@ int proc_setup_self(struct super_block *s)
struct inode *root_inode = d_inode(s->s_root);
struct pid_namespace *ns = proc_pid_ns(root_inode);
struct dentry *self;
+ int ret = -ENOMEM;
inode_lock(root_inode);
self = d_alloc_name(s->s_root, "self");
@@ -51,20 +52,19 @@ int proc_setup_self(struct super_block *s)
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_op = &proc_self_inode_operations;
d_add(self, inode);
+ ret = 0;
} else {
dput(self);
- self = ERR_PTR(-ENOMEM);
}
- } else {
- self = ERR_PTR(-ENOMEM);
}
inode_unlock(root_inode);
- if (IS_ERR(self)) {
+
+ if (ret)
pr_err("proc_fill_super: can't allocate /proc/self\n");
- return PTR_ERR(self);
- }
- ns->proc_self = self;
- return 0;
+ else
+ ns->proc_self = self;
+
+ return ret;
}
void __init proc_self_init(void)
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 535eda7857cf..80c305f206bb 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -23,21 +23,21 @@
#ifdef arch_idle_time
-static u64 get_idle_time(int cpu)
+static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
{
u64 idle;
- idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+ idle = kcs->cpustat[CPUTIME_IDLE];
if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
idle += arch_idle_time(cpu);
return idle;
}
-static u64 get_iowait_time(int cpu)
+static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
{
u64 iowait;
- iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
+ iowait = kcs->cpustat[CPUTIME_IOWAIT];
if (cpu_online(cpu) && nr_iowait_cpu(cpu))
iowait += arch_idle_time(cpu);
return iowait;
@@ -45,7 +45,7 @@ static u64 get_iowait_time(int cpu)
#else
-static u64 get_idle_time(int cpu)
+static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
{
u64 idle, idle_usecs = -1ULL;
@@ -54,14 +54,14 @@ static u64 get_idle_time(int cpu)
if (idle_usecs == -1ULL)
/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
- idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+ idle = kcs->cpustat[CPUTIME_IDLE];
else
idle = idle_usecs * NSEC_PER_USEC;
return idle;
}
-static u64 get_iowait_time(int cpu)
+static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
{
u64 iowait, iowait_usecs = -1ULL;
@@ -70,7 +70,7 @@ static u64 get_iowait_time(int cpu)
if (iowait_usecs == -1ULL)
/* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
- iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
+ iowait = kcs->cpustat[CPUTIME_IOWAIT];
else
iowait = iowait_usecs * NSEC_PER_USEC;
@@ -79,6 +79,31 @@ static u64 get_iowait_time(int cpu)
#endif
+static void show_irq_gap(struct seq_file *p, unsigned int gap)
+{
+ static const char zeros[] = " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0";
+
+ while (gap > 0) {
+ unsigned int inc;
+
+ inc = min_t(unsigned int, gap, ARRAY_SIZE(zeros) / 2);
+ seq_write(p, zeros, 2 * inc);
+ gap -= inc;
+ }
+}
+
+static void show_all_irqs(struct seq_file *p)
+{
+ unsigned int i, next = 0;
+
+ for_each_active_irq(i) {
+ show_irq_gap(p, i - next);
+ seq_put_decimal_ull(p, " ", kstat_irqs_usr(i));
+ next = i + 1;
+ }
+ show_irq_gap(p, nr_irqs - next);
+}
+
static int show_stat(struct seq_file *p, void *v)
{
int i, j;
@@ -95,16 +120,18 @@ static int show_stat(struct seq_file *p, void *v)
getboottime64(&boottime);
for_each_possible_cpu(i) {
- user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
- nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
- system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
- idle += get_idle_time(i);
- iowait += get_iowait_time(i);
- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+ struct kernel_cpustat *kcs = &kcpustat_cpu(i);
+
+ user += kcs->cpustat[CPUTIME_USER];
+ nice += kcs->cpustat[CPUTIME_NICE];
+ system += kcs->cpustat[CPUTIME_SYSTEM];
+ idle += get_idle_time(kcs, i);
+ iowait += get_iowait_time(kcs, i);
+ irq += kcs->cpustat[CPUTIME_IRQ];
+ softirq += kcs->cpustat[CPUTIME_SOFTIRQ];
+ steal += kcs->cpustat[CPUTIME_STEAL];
+ guest += kcs->cpustat[CPUTIME_GUEST];
+ guest_nice += kcs->cpustat[CPUTIME_GUEST_NICE];
sum += kstat_cpu_irqs_sum(i);
sum += arch_irq_stat_cpu(i);
@@ -130,17 +157,19 @@ static int show_stat(struct seq_file *p, void *v)
seq_putc(p, '\n');
for_each_online_cpu(i) {
+ struct kernel_cpustat *kcs = &kcpustat_cpu(i);
+
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
- user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
- nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
- system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
- idle = get_idle_time(i);
- iowait = get_iowait_time(i);
- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+ user = kcs->cpustat[CPUTIME_USER];
+ nice = kcs->cpustat[CPUTIME_NICE];
+ system = kcs->cpustat[CPUTIME_SYSTEM];
+ idle = get_idle_time(kcs, i);
+ iowait = get_iowait_time(kcs, i);
+ irq = kcs->cpustat[CPUTIME_IRQ];
+ softirq = kcs->cpustat[CPUTIME_SOFTIRQ];
+ steal = kcs->cpustat[CPUTIME_STEAL];
+ guest = kcs->cpustat[CPUTIME_GUEST];
+ guest_nice = kcs->cpustat[CPUTIME_GUEST_NICE];
seq_printf(p, "cpu%d", i);
seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
@@ -156,9 +185,7 @@ static int show_stat(struct seq_file *p, void *v)
}
seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
- /* sum again ? it could be updated? */
- for_each_irq_nr(j)
- seq_put_decimal_ull(p, " ", kstat_irqs_usr(j));
+ show_all_irqs(p);
seq_printf(p,
"\nctxt %llu\n"
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0ec9edab2f3..beccb0b1d57c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -423,7 +423,7 @@ struct mem_size_stats {
};
static void smaps_account(struct mem_size_stats *mss, struct page *page,
- bool compound, bool young, bool dirty)
+ bool compound, bool young, bool dirty, bool locked)
{
int i, nr = compound ? 1 << compound_order(page) : 1;
unsigned long size = nr * PAGE_SIZE;
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
else
mss->private_clean += size;
mss->pss += (u64)size << PSS_SHIFT;
+ if (locked)
+ mss->pss_locked += (u64)size << PSS_SHIFT;
return;
}
for (i = 0; i < nr; i++, page++) {
int mapcount = page_mapcount(page);
+ unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
if (mapcount >= 2) {
if (dirty || PageDirty(page))
mss->shared_dirty += PAGE_SIZE;
else
mss->shared_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+ mss->pss += pss / mapcount;
+ if (locked)
+ mss->pss_locked += pss / mapcount;
} else {
if (dirty || PageDirty(page))
mss->private_dirty += PAGE_SIZE;
else
mss->private_clean += PAGE_SIZE;
- mss->pss += PAGE_SIZE << PSS_SHIFT;
+ mss->pss += pss;
+ if (locked)
+ mss->pss_locked += pss;
}
}
}
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
if (pte_present(*pte)) {
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page)
return;
- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
+ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page;
/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
/* pass */;
else
VM_BUG_ON_PAGE(1, page);
- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
+ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
}
}
#endif
-
/* mmap_sem is held in m_start */
walk_page_vma(vma, &smaps_walk);
- if (vma->vm_flags & VM_LOCKED)
- mss->pss_locked += mss->pss;
}
#define SEQ_PUT_DEC(str, val) \
@@ -942,10 +948,12 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
pte_t ptent = *pte;
if (pte_present(ptent)) {
- ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
- ptent = pte_wrprotect(ptent);
+ pte_t old_pte;
+
+ old_pte = ptep_modify_prot_start(vma, addr, pte);
+ ptent = pte_wrprotect(old_pte);
ptent = pte_clear_soft_dirty(ptent);
- ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
+ ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent);
set_pte_at(vma->vm_mm, addr, pte, ptent);
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 0b63d68dedb2..36bf0f2e102e 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -64,7 +64,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
else
bytes += kobjsize(current->files);
- if (current->sighand && atomic_read(&current->sighand->count) > 1)
+ if (current->sighand && refcount_read(&current->sighand->count) > 1)
sbytes += kobjsize(current->sighand);
else
bytes += kobjsize(current->sighand);
@@ -178,7 +178,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
seq_file_path(m, file, "");
} else if (mm && is_stack(vma)) {
seq_pad(m, ' ');
- seq_printf(m, "[stack]");
+ seq_puts(m, "[stack]");
}
seq_putc(m, '\n');
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index b905010ca9eb..f61ae53533f5 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -38,6 +38,7 @@ int proc_setup_thread_self(struct super_block *s)
struct inode *root_inode = d_inode(s->s_root);
struct pid_namespace *ns = proc_pid_ns(root_inode);
struct dentry *thread_self;
+ int ret = -ENOMEM;
inode_lock(root_inode);
thread_self = d_alloc_name(s->s_root, "thread-self");
@@ -51,20 +52,19 @@ int proc_setup_thread_self(struct super_block *s)
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_op = &proc_thread_self_inode_operations;
d_add(thread_self, inode);
+ ret = 0;
} else {
dput(thread_self);
- thread_self = ERR_PTR(-ENOMEM);
}
- } else {
- thread_self = ERR_PTR(-ENOMEM);
}
inode_unlock(root_inode);
- if (IS_ERR(thread_self)) {
+
+ if (ret)
pr_err("proc_fill_super: can't allocate /proc/thread_self\n");
- return PTR_ERR(thread_self);
- }
- ns->proc_thread_self = thread_self;
- return 0;
+ else
+ ns->proc_thread_self = thread_self;
+
+ return ret;
}
void __init proc_thread_self_init(void)
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 96f7d32cd184..898c8321b343 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
struct pstore_record *record)
{
struct persistent_ram_zone *prz;
- bool update = (record->type == PSTORE_TYPE_DMESG);
/* Give up if we never existed or have hit the end. */
if (!przs)
@@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
return NULL;
/* Update old/shadowed buffer. */
- if (update)
+ if (prz->type == PSTORE_TYPE_DMESG)
persistent_ram_save_old(prz);
if (!persistent_ram_old_size(prz))
@@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ramoops_platform_data *pdata = dev->platform_data;
+ struct ramoops_platform_data pdata_local;
struct ramoops_context *cxt = &oops_cxt;
size_t dump_mem_sz;
phys_addr_t paddr;
int err = -EINVAL;
if (dev_of_node(dev) && !pdata) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- pr_err("cannot allocate platform data buffer\n");
- err = -ENOMEM;
- goto fail_out;
- }
+ pdata = &pdata_local;
+ memset(pdata, 0, sizeof(*pdata));
err = ramoops_parse_dt(pdev, pdata);
if (err < 0)
diff --git a/fs/read_write.c b/fs/read_write.c
index ff3c5e6f87cf..30df848b7451 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -426,7 +426,7 @@ ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
ssize_t result;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
/* The cast to a user pointer is valid due to the set_fs() */
result = vfs_read(file, (void __user *)buf, count, pos);
set_fs(old_fs);
@@ -499,7 +499,7 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
return -EINVAL;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
p = (__force const char __user *)buf;
if (count > MAX_RW_COUNT)
count = MAX_RW_COUNT;
@@ -521,7 +521,7 @@ ssize_t kernel_write(struct file *file, const void *buf, size_t count,
ssize_t res;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
/* The cast to a user pointer is valid due to the set_fs() */
res = vfs_write(file, (__force const char __user *)buf, count, pos);
set_fs(old_fs);
diff --git a/fs/select.c b/fs/select.c
index d0f35dbc0e8f..6cbc9ff56ba0 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1379,7 +1379,7 @@ COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
#if defined(CONFIG_COMPAT_32BIT_TIME)
-COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
+COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
struct old_timespec32 __user *, tsp, void __user *, sig)
{
@@ -1402,7 +1402,7 @@ COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
#endif
#if defined(CONFIG_COMPAT_32BIT_TIME)
-COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
+COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
unsigned int, nfds, struct old_timespec32 __user *, tsp,
const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
{
diff --git a/fs/splice.c b/fs/splice.c
index de2ede048473..6489fb9436e4 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -357,7 +357,7 @@ static ssize_t kernel_readv(struct file *file, const struct kvec *vec,
ssize_t res;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
/* The cast to a user pointer is valid due to the set_fs() */
res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos, 0);
set_fs(old_fs);
@@ -1123,6 +1123,9 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (ipipe == opipe)
return -EINVAL;
+ if ((in->f_flags | out->f_flags) & O_NONBLOCK)
+ flags |= SPLICE_F_NONBLOCK;
+
return splice_pipe_to_pipe(ipipe, opipe, len, flags);
}
@@ -1148,6 +1151,9 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (unlikely(ret < 0))
return ret;
+ if (in->f_flags & O_NONBLOCK)
+ flags |= SPLICE_F_NONBLOCK;
+
file_start_write(out);
ret = do_splice_from(ipipe, out, &offset, len, flags);
file_end_write(out);
@@ -1172,6 +1178,9 @@ static long do_splice(struct file *in, loff_t __user *off_in,
offset = in->f_pos;
}
+ if (out->f_flags & O_NONBLOCK)
+ flags |= SPLICE_F_NONBLOCK;
+
pipe_lock(opipe);
ret = wait_for_space(opipe, flags);
if (!ret)
@@ -1717,6 +1726,9 @@ static long do_tee(struct file *in, struct file *out, size_t len,
* copying the data.
*/
if (ipipe && opipe && ipipe != opipe) {
+ if ((in->f_flags | out->f_flags) & O_NONBLOCK)
+ flags |= SPLICE_F_NONBLOCK;
+
/*
* Keep going, unless we encounter an error. The ipipe/opipe
* ordering doesn't really matter.
diff --git a/fs/statfs.c b/fs/statfs.c
index f0216629621d..eea7af6f2f22 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -67,6 +67,20 @@ static int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf)
return retval;
}
+int vfs_get_fsid(struct dentry *dentry, __kernel_fsid_t *fsid)
+{
+ struct kstatfs st;
+ int error;
+
+ error = statfs_by_dentry(dentry, &st);
+ if (error)
+ return error;
+
+ *fsid = st.f_fsid;
+ return 0;
+}
+EXPORT_SYMBOL(vfs_get_fsid);
+
int vfs_statfs(const struct path *path, struct kstatfs *buf)
{
int error;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index feeae8081c22..aa85f2874a9f 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -43,7 +43,8 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
kuid_t uid;
kgid_t gid;
- BUG_ON(!kobj);
+ if (WARN_ON(!kobj))
+ return -EINVAL;
if (kobj->parent)
parent = kobj->parent->sd;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index bb71db63c99c..130fc6fbcc03 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -17,7 +17,6 @@
#include <linux/seq_file.h>
#include "sysfs.h"
-#include "../kernfs/kernfs-internal.h"
/*
* Determine ktype->sysfs_ops for the given kernfs_node. This function
@@ -325,7 +324,8 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
kuid_t uid;
kgid_t gid;
- BUG_ON(!kobj || !kobj->sd || !attr);
+ if (WARN_ON(!kobj || !kobj->sd || !attr))
+ return -EINVAL;
kobject_get_ownership(kobj, &uid, &gid);
return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode,
@@ -496,6 +496,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
{
int i;
+
for (i = 0; ptr[i]; i++)
sysfs_remove_file(kobj, ptr[i]);
}
@@ -537,7 +538,8 @@ int sysfs_create_bin_file(struct kobject *kobj,
kuid_t uid;
kgid_t gid;
- BUG_ON(!kobj || !kobj->sd || !attr);
+ if (WARN_ON(!kobj || !kobj->sd || !attr))
+ return -EINVAL;
kobject_get_ownership(kobj, &uid, &gid);
return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true,
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 1eb2d6307663..57038604d4a8 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -112,7 +112,8 @@ static int internal_create_group(struct kobject *kobj, int update,
kgid_t gid;
int error;
- BUG_ON(!kobj || (!update && !kobj->sd));
+ if (WARN_ON(!kobj || (!update && !kobj->sd)))
+ return -EINVAL;
/* Updates may happen before the object has been instantiated */
if (unlikely(update && !kobj->sd))
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 215c225b2ca1..c4deecc80f67 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -23,7 +23,8 @@ static int sysfs_do_create_link_sd(struct kernfs_node *parent,
{
struct kernfs_node *kn, *target = NULL;
- BUG_ON(!name || !parent);
+ if (WARN_ON(!name || !parent))
+ return -EINVAL;
/*
* We don't own @target_kobj and it may be removed at any time.
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 803ca070d42e..6a6fc8aa1de7 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -560,7 +560,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *,
}
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
+SYSCALL_DEFINE4(timerfd_settime32, int, ufd, int, flags,
const struct old_itimerspec32 __user *, utmr,
struct old_itimerspec32 __user *, otmr)
{
@@ -577,7 +577,7 @@ COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
return ret;
}
-COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
+SYSCALL_DEFINE2(timerfd_gettime32, int, ufd,
struct old_itimerspec32 __user *, otmr)
{
struct itimerspec64 kotmr;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e3d684ea3203..ffd8038ff728 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1474,6 +1474,17 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
if (lvd->integritySeqExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
ret = 0;
+
+ if (!sbi->s_lvid_bh) {
+ /* We can't generate unique IDs without a valid LVID */
+ if (sb_rdonly(sb)) {
+ UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
+ } else {
+ udf_warn(sb, "Damaged or missing LVID, forcing "
+ "readonly mount\n");
+ ret = -EACCES;
+ }
+ }
out_bh:
brelse(bh);
return ret;
@@ -1943,13 +1954,24 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
return 0;
}
+static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
+ lvid->descTag.descCRC = cpu_to_le16(
+ crc_itu_t(0, (char *)lvid + sizeof(struct tag),
+ le16_to_cpu(lvid->descTag.descCRCLength)));
+ lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+}
+
static void udf_open_lvid(struct super_block *sb)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct buffer_head *bh = sbi->s_lvid_bh;
struct logicalVolIntegrityDesc *lvid;
struct logicalVolIntegrityDescImpUse *lvidiu;
- struct timespec64 ts;
if (!bh)
return;
@@ -1961,18 +1983,12 @@ static void udf_open_lvid(struct super_block *sb)
mutex_lock(&sbi->s_alloc_mutex);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
- ktime_get_real_ts64(&ts);
- udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
else
UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
- lvid->descTag.descCRC = cpu_to_le16(
- crc_itu_t(0, (char *)lvid + sizeof(struct tag),
- le16_to_cpu(lvid->descTag.descCRCLength)));
-
- lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+ udf_finalize_lvid(lvid);
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
mutex_unlock(&sbi->s_alloc_mutex);
@@ -1986,7 +2002,6 @@ static void udf_close_lvid(struct super_block *sb)
struct buffer_head *bh = sbi->s_lvid_bh;
struct logicalVolIntegrityDesc *lvid;
struct logicalVolIntegrityDescImpUse *lvidiu;
- struct timespec64 ts;
if (!bh)
return;
@@ -1998,8 +2013,6 @@ static void udf_close_lvid(struct super_block *sb)
mutex_lock(&sbi->s_alloc_mutex);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
- ktime_get_real_ts64(&ts);
- udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
@@ -2009,17 +2022,13 @@ static void udf_close_lvid(struct super_block *sb)
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
- lvid->descTag.descCRC = cpu_to_le16(
- crc_itu_t(0, (char *)lvid + sizeof(struct tag),
- le16_to_cpu(lvid->descTag.descCRCLength)));
-
- lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
/*
* We set buffer uptodate unconditionally here to avoid spurious
* warnings from mark_buffer_dirty() when previous EIO has marked
* the buffer as !uptodate
*/
set_buffer_uptodate(bh);
+ udf_finalize_lvid(lvid);
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
mutex_unlock(&sbi->s_alloc_mutex);
@@ -2048,8 +2057,8 @@ u64 lvid_get_unique_id(struct super_block *sb)
if (!(++uniqueID & 0xFFFFFFFF))
uniqueID += 16;
lvhd->uniqueID = cpu_to_le64(uniqueID);
+ udf_updated_lvid(sb);
mutex_unlock(&sbi->s_alloc_mutex);
- mark_buffer_dirty(bh);
return ret;
}
@@ -2320,11 +2329,17 @@ static int udf_sync_fs(struct super_block *sb, int wait)
mutex_lock(&sbi->s_alloc_mutex);
if (sbi->s_lvid_dirty) {
+ struct buffer_head *bh = sbi->s_lvid_bh;
+ struct logicalVolIntegrityDesc *lvid;
+
+ lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+ udf_finalize_lvid(lvid);
+
/*
* Blockdevice will be synced later so we don't have to submit
* the buffer for IO
*/
- mark_buffer_dirty(sbi->s_lvid_bh);
+ mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
}
mutex_unlock(&sbi->s_alloc_mutex);
diff --git a/fs/utimes.c b/fs/utimes.c
index bdcf2daf39c1..350c9c16ace1 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -224,8 +224,8 @@ SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
* of sys_utimes.
*/
#ifdef __ARCH_WANT_SYS_UTIME32
-COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
- struct old_utimbuf32 __user *, t)
+SYSCALL_DEFINE2(utime32, const char __user *, filename,
+ struct old_utimbuf32 __user *, t)
{
struct timespec64 tv[2];
@@ -240,7 +240,7 @@ COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
}
#endif
-COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags)
+SYSCALL_DEFINE4(utimensat_time32, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags)
{
struct timespec64 tv[2];
@@ -276,14 +276,14 @@ static long do_compat_futimesat(unsigned int dfd, const char __user *filename,
return do_utimes(dfd, filename, t ? tv : NULL, 0);
}
-COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd,
+SYSCALL_DEFINE3(futimesat_time32, unsigned int, dfd,
const char __user *, filename,
struct old_timeval32 __user *, t)
{
return do_compat_futimesat(dfd, filename, t);
}
-COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct old_timeval32 __user *, t)
+SYSCALL_DEFINE2(utimes_time32, const char __user *, filename, struct old_timeval32 __user *, t)
{
return do_compat_futimesat(AT_FDCWD, filename, t);
}
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 999ad8d00d43..1ef8acf35e7d 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -339,14 +339,14 @@ xfs_ag_init_headers(
{ /* BNO root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
- .ops = &xfs_allocbt_buf_ops,
+ .ops = &xfs_bnobt_buf_ops,
.work = &xfs_bnoroot_init,
.need_init = true
},
{ /* CNT root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
- .ops = &xfs_allocbt_buf_ops,
+ .ops = &xfs_cntbt_buf_ops,
.work = &xfs_cntroot_init,
.need_init = true
},
@@ -361,7 +361,7 @@ xfs_ag_init_headers(
{ /* FINO root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
- .ops = &xfs_inobt_buf_ops,
+ .ops = &xfs_finobt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_FINO,
.need_init = xfs_sb_version_hasfinobt(&mp->m_sb)
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e701ebc36c06..e2ba2a3b63b2 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -281,7 +281,7 @@ xfs_ag_resv_init(
*/
ask = used = 0;
- mp->m_inotbt_nores = true;
+ mp->m_finobt_nores = true;
error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
&used);
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index b715668886a4..bc3367b8b7bb 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -568,9 +568,9 @@ xfs_agfl_verify(
if (!xfs_sb_version_hascrc(&mp->m_sb))
return NULL;
- if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
+ if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
return __this_address;
- if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
+ if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
/*
* during growfs operations, the perag is not fully initialised,
@@ -643,6 +643,7 @@ xfs_agfl_write_verify(
const struct xfs_buf_ops xfs_agfl_buf_ops = {
.name = "xfs_agfl",
+ .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
.verify_read = xfs_agfl_read_verify,
.verify_write = xfs_agfl_write_verify,
.verify_struct = xfs_agfl_verify,
@@ -2587,8 +2588,10 @@ xfs_agf_verify(
return __this_address;
}
- if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
- XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
+ if (!xfs_verify_magic(bp, agf->agf_magicnum))
+ return __this_address;
+
+ if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
@@ -2670,6 +2673,7 @@ xfs_agf_write_verify(
const struct xfs_buf_ops xfs_agf_buf_ops = {
.name = "xfs_agf",
+ .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
.verify_read = xfs_agf_read_verify,
.verify_write = xfs_agf_write_verify,
.verify_struct = xfs_agf_verify,
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 4e59cc8a2802..9fe949f6055e 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -297,48 +297,34 @@ xfs_allocbt_verify(
struct xfs_perag *pag = bp->b_pag;
xfs_failaddr_t fa;
unsigned int level;
+ xfs_btnum_t btnum = XFS_BTNUM_BNOi;
+
+ if (!xfs_verify_magic(bp, block->bb_magic))
+ return __this_address;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
+ }
/*
- * magic number and level verification
- *
- * During growfs operations, we can't verify the exact level or owner as
- * the perag is not fully initialised and hence not attached to the
- * buffer. In this case, check against the maximum tree depth.
+ * The perag may not be attached during grow operations or fully
+ * initialized from the AGF during log recovery. Therefore we can only
+ * check against maximum tree depth from those contexts.
*
- * Similarly, during log recovery we will have a perag structure
- * attached, but the agf information will not yet have been initialised
- * from the on disk AGF. Again, we can only check against maximum limits
- * in this case.
+ * Otherwise check against the per-tree limit. Peek at one of the
+ * verifier magic values to determine the type of tree we're verifying
+ * against.
*/
level = be16_to_cpu(block->bb_level);
- switch (block->bb_magic) {
- case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
- fa = xfs_btree_sblock_v5hdr_verify(bp);
- if (fa)
- return fa;
- /* fall through */
- case cpu_to_be32(XFS_ABTB_MAGIC):
- if (pag && pag->pagf_init) {
- if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
- return __this_address;
- } else if (level >= mp->m_ag_maxlevels)
+ if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
+ btnum = XFS_BTNUM_CNTi;
+ if (pag && pag->pagf_init) {
+ if (level >= pag->pagf_levels[btnum])
return __this_address;
- break;
- case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
- fa = xfs_btree_sblock_v5hdr_verify(bp);
- if (fa)
- return fa;
- /* fall through */
- case cpu_to_be32(XFS_ABTC_MAGIC):
- if (pag && pag->pagf_init) {
- if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
- return __this_address;
- } else if (level >= mp->m_ag_maxlevels)
- return __this_address;
- break;
- default:
+ } else if (level >= mp->m_ag_maxlevels)
return __this_address;
- }
return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
}
@@ -377,13 +363,23 @@ xfs_allocbt_write_verify(
}
-const struct xfs_buf_ops xfs_allocbt_buf_ops = {
- .name = "xfs_allocbt",
+const struct xfs_buf_ops xfs_bnobt_buf_ops = {
+ .name = "xfs_bnobt",
+ .magic = { cpu_to_be32(XFS_ABTB_MAGIC),
+ cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
.verify_read = xfs_allocbt_read_verify,
.verify_write = xfs_allocbt_write_verify,
.verify_struct = xfs_allocbt_verify,
};
+const struct xfs_buf_ops xfs_cntbt_buf_ops = {
+ .name = "xfs_cntbt",
+ .magic = { cpu_to_be32(XFS_ABTC_MAGIC),
+ cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
+ .verify_read = xfs_allocbt_read_verify,
+ .verify_write = xfs_allocbt_write_verify,
+ .verify_struct = xfs_allocbt_verify,
+};
STATIC int
xfs_bnobt_keys_inorder(
@@ -448,7 +444,7 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
.key_diff = xfs_bnobt_key_diff,
- .buf_ops = &xfs_allocbt_buf_ops,
+ .buf_ops = &xfs_bnobt_buf_ops,
.diff_two_keys = xfs_bnobt_diff_two_keys,
.keys_inorder = xfs_bnobt_keys_inorder,
.recs_inorder = xfs_bnobt_recs_inorder,
@@ -470,7 +466,7 @@ static const struct xfs_btree_ops xfs_cntbt_ops = {
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
.key_diff = xfs_cntbt_key_diff,
- .buf_ops = &xfs_allocbt_buf_ops,
+ .buf_ops = &xfs_cntbt_buf_ops,
.diff_two_keys = xfs_cntbt_diff_two_keys,
.keys_inorder = xfs_cntbt_keys_inorder,
.recs_inorder = xfs_cntbt_recs_inorder,
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 844ed87b1900..2dd9ee2a2e08 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -1336,3 +1336,20 @@ xfs_attr_node_get(xfs_da_args_t *args)
xfs_da_state_free(state);
return retval;
}
+
+/* Returns true if the attribute entry name is valid. */
+bool
+xfs_attr_namecheck(
+ const void *name,
+ size_t length)
+{
+ /*
+ * MAXNAMELEN includes the trailing null, but (name/length) leave it
+ * out, so use >= for the length check.
+ */
+ if (length >= MAXNAMELEN)
+ return false;
+
+ /* There shouldn't be any nulls here */
+ return !memchr(name, 0, length);
+}
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index bdf52a333f3f..2297d8467666 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -145,6 +145,6 @@ int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
int xfs_attr_remove_args(struct xfs_da_args *args);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
-
+bool xfs_attr_namecheck(const void *name, size_t length);
#endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 2652d00842d6..1f6e3965ff74 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -245,25 +245,14 @@ xfs_attr3_leaf_verify(
struct xfs_attr_leaf_entry *entries;
uint32_t end; /* must be 32bit - see below */
int i;
+ xfs_failaddr_t fa;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
-
- if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC)
- return __this_address;
+ fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
+ if (fa)
+ return fa;
- if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return __this_address;
- if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
- return __this_address;
- if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
- return __this_address;
- } else {
- if (ichdr.magic != XFS_ATTR_LEAF_MAGIC)
- return __this_address;
- }
/*
* In recovery there is a transient state where count == 0 is valid
* because we may have transitioned an empty shortform attr to a leaf
@@ -369,6 +358,8 @@ xfs_attr3_leaf_read_verify(
const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
.name = "xfs_attr3_leaf",
+ .magic16 = { cpu_to_be16(XFS_ATTR_LEAF_MAGIC),
+ cpu_to_be16(XFS_ATTR3_LEAF_MAGIC) },
.verify_read = xfs_attr3_leaf_read_verify,
.verify_write = xfs_attr3_leaf_write_verify,
.verify_struct = xfs_attr3_leaf_verify,
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index d89363c6b523..65ff600a8067 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -79,6 +79,7 @@ xfs_attr3_rmt_hdr_ok(
static xfs_failaddr_t
xfs_attr3_rmt_verify(
struct xfs_mount *mp,
+ struct xfs_buf *bp,
void *ptr,
int fsbsize,
xfs_daddr_t bno)
@@ -87,7 +88,7 @@ xfs_attr3_rmt_verify(
if (!xfs_sb_version_hascrc(&mp->m_sb))
return __this_address;
- if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC))
+ if (!xfs_verify_magic(bp, rmt->rm_magic))
return __this_address;
if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
@@ -131,7 +132,7 @@ __xfs_attr3_rmt_read_verify(
*failaddr = __this_address;
return -EFSBADCRC;
}
- *failaddr = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
+ *failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
if (*failaddr)
return -EFSCORRUPTED;
len -= blksize;
@@ -193,7 +194,7 @@ xfs_attr3_rmt_write_verify(
while (len > 0) {
struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
- fa = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
+ fa = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
if (fa) {
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
@@ -220,6 +221,7 @@ xfs_attr3_rmt_write_verify(
const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
.name = "xfs_attr3_rmt",
+ .magic = { 0, cpu_to_be32(XFS_ATTR3_RMT_MAGIC) },
.verify_read = xfs_attr3_rmt_read_verify,
.verify_write = xfs_attr3_rmt_write_verify,
.verify_struct = xfs_attr3_rmt_verify_struct,
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 332eefa2700b..48502cb9990f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -577,42 +577,44 @@ __xfs_bmap_add_free(
*/
/*
- * Transform a btree format file with only one leaf node, where the
- * extents list will fit in the inode, into an extents format file.
- * Since the file extents are already in-core, all we have to do is
- * give up the space for the btree root and pitch the leaf block.
+ * Convert the inode format to extent format if it currently is in btree format,
+ * but the extent list is small enough that it fits into the extent format.
+ *
+ * Since the extents are already in-core, all we have to do is give up the space
+ * for the btree root and pitch the leaf block.
*/
STATIC int /* error */
xfs_bmap_btree_to_extents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_btree_cur_t *cur, /* btree cursor */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode pointer */
+ struct xfs_btree_cur *cur, /* btree cursor */
int *logflagsp, /* inode logging flags */
int whichfork) /* data or attr fork */
{
- /* REFERENCED */
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_btree_block *rblock = ifp->if_broot;
struct xfs_btree_block *cblock;/* child btree block */
xfs_fsblock_t cbno; /* child block number */
xfs_buf_t *cbp; /* child block's buffer */
int error; /* error return value */
- struct xfs_ifork *ifp; /* inode fork data */
- xfs_mount_t *mp; /* mount point structure */
__be64 *pp; /* ptr to block address */
- struct xfs_btree_block *rblock;/* root btree block */
struct xfs_owner_info oinfo;
- mp = ip->i_mount;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ /* check if we actually need the extent format first: */
+ if (!xfs_bmap_wants_extents(ip, whichfork))
+ return 0;
+
+ ASSERT(cur);
ASSERT(whichfork != XFS_COW_FORK);
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
- rblock = ifp->if_broot;
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
+
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
cbno = be64_to_cpu(*pp);
- *logflagsp = 0;
#ifdef DEBUG
XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
xfs_btree_check_lptr(cur, cbno, 1));
@@ -635,7 +637,7 @@ xfs_bmap_btree_to_extents(
ASSERT(ifp->if_broot == NULL);
ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
+ *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
return 0;
}
@@ -2029,7 +2031,7 @@ done:
/*
* Convert an unwritten allocation to a real allocation or vice versa.
*/
-STATIC int /* error */
+int /* error */
xfs_bmap_add_extent_unwritten_real(
struct xfs_trans *tp,
xfs_inode_t *ip, /* incore inode pointer */
@@ -3685,17 +3687,6 @@ xfs_trim_extent(
}
}
-/* trim extent to within eof */
-void
-xfs_trim_extent_eof(
- struct xfs_bmbt_irec *irec,
- struct xfs_inode *ip)
-
-{
- xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
- i_size_read(VFS_I(ip))));
-}
-
/*
* Trim the returned map to the required bounds
*/
@@ -4203,6 +4194,44 @@ xfs_bmapi_convert_unwritten(
return 0;
}
+static inline xfs_extlen_t
+xfs_bmapi_minleft(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ int fork)
+{
+ if (tp && tp->t_firstblock != NULLFSBLOCK)
+ return 0;
+ if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE)
+ return 1;
+ return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1;
+}
+
+/*
+ * Log whatever the flags say, even if error. Otherwise we might miss detecting
+ * a case where the data is changed, there's an error, and it's not logged so we
+ * don't shutdown when we should. Don't bother logging extents/btree changes if
+ * we converted to the other format.
+ */
+static void
+xfs_bmapi_finish(
+ struct xfs_bmalloca *bma,
+ int whichfork,
+ int error)
+{
+ if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
+ XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ bma->logflags &= ~xfs_ilog_fext(whichfork);
+ else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
+ XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE)
+ bma->logflags &= ~xfs_ilog_fbroot(whichfork);
+
+ if (bma->logflags)
+ xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
+ if (bma->cur)
+ xfs_btree_del_cursor(bma->cur, error);
+}
+
/*
* Map file blocks to filesystem blocks, and allocate blocks or convert the
* extent state if necessary. Details behaviour is controlled by the flags
@@ -4247,9 +4276,7 @@ xfs_bmapi_write(
ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
- ASSERT(tp != NULL ||
- (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
- (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
+ ASSERT(tp != NULL);
ASSERT(len > 0);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -4282,25 +4309,12 @@ xfs_bmapi_write(
XFS_STATS_INC(mp, xs_blk_mapw);
- if (!tp || tp->t_firstblock == NULLFSBLOCK) {
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
- bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
- else
- bma.minleft = 1;
- } else {
- bma.minleft = 0;
- }
-
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
if (error)
goto error0;
}
- n = 0;
- end = bno + len;
- obno = bno;
-
if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
eof = true;
if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
@@ -4309,7 +4323,11 @@ xfs_bmapi_write(
bma.ip = ip;
bma.total = total;
bma.datatype = 0;
+ bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
+ n = 0;
+ end = bno + len;
+ obno = bno;
while (bno < end && n < *nmap) {
bool need_alloc = false, wasdelay = false;
@@ -4323,26 +4341,7 @@ xfs_bmapi_write(
ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
(flags & XFS_BMAPI_COWFORK)));
- if (flags & XFS_BMAPI_DELALLOC) {
- /*
- * For the COW fork we can reasonably get a
- * request for converting an extent that races
- * with other threads already having converted
- * part of it, as there converting COW to
- * regular blocks is not protected using the
- * IOLOCK.
- */
- ASSERT(flags & XFS_BMAPI_COWFORK);
- if (!(flags & XFS_BMAPI_COWFORK)) {
- error = -EIO;
- goto error0;
- }
-
- if (eof || bno >= end)
- break;
- } else {
- need_alloc = true;
- }
+ need_alloc = true;
} else if (isnullstartblock(bma.got.br_startblock)) {
wasdelay = true;
}
@@ -4351,8 +4350,7 @@ xfs_bmapi_write(
* First, deal with the hole before the allocated space
* that we found, if any.
*/
- if ((need_alloc || wasdelay) &&
- !(flags & XFS_BMAPI_CONVERT_ONLY)) {
+ if (need_alloc || wasdelay) {
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
@@ -4420,49 +4418,130 @@ xfs_bmapi_write(
}
*nmap = n;
- /*
- * Transform from btree to extents, give it cur.
- */
- if (xfs_bmap_wants_extents(ip, whichfork)) {
- int tmp_logflags = 0;
-
- ASSERT(bma.cur);
- error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
- &tmp_logflags, whichfork);
- bma.logflags |= tmp_logflags;
- if (error)
- goto error0;
- }
+ error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
+ whichfork);
+ if (error)
+ goto error0;
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
XFS_IFORK_NEXTENTS(ip, whichfork) >
XFS_IFORK_MAXEXT(ip, whichfork));
- error = 0;
+ xfs_bmapi_finish(&bma, whichfork, 0);
+ xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
+ orig_nmap, *nmap);
+ return 0;
error0:
+ xfs_bmapi_finish(&bma, whichfork, error);
+ return error;
+}
+
+/*
+ * Convert an existing delalloc extent to real blocks based on file offset. This
+ * attempts to allocate the entire delalloc extent and may require multiple
+ * invocations to allocate the target offset if a large enough physical extent
+ * is not available.
+ */
+int
+xfs_bmapi_convert_delalloc(
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_fileoff_t offset_fsb,
+ struct xfs_bmbt_irec *imap,
+ unsigned int *seq)
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmalloca bma = { NULL };
+ struct xfs_trans *tp;
+ int error;
+
/*
- * Log everything. Do this after conversion, there's no point in
- * logging the extent records if we've converted to btree format.
+ * Space for the extent and indirect blocks was reserved when the
+ * delalloc extent was created so there's no need to do so here.
*/
- if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- bma.logflags &= ~xfs_ilog_fext(whichfork);
- else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
- bma.logflags &= ~xfs_ilog_fbroot(whichfork);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
+ XFS_TRANS_RESERVE, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+ if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
+ bma.got.br_startoff > offset_fsb) {
+ /*
+ * No extent found in the range we are trying to convert. This
+ * should only happen for the COW fork, where another thread
+ * might have moved the extent to the data fork in the meantime.
+ */
+ WARN_ON_ONCE(whichfork != XFS_COW_FORK);
+ error = -EAGAIN;
+ goto out_trans_cancel;
+ }
+
/*
- * Log whatever the flags say, even if error. Otherwise we might miss
- * detecting a case where the data is changed, there's an error,
- * and it's not logged so we don't shutdown when we should.
+ * If we find a real extent here we raced with another thread converting
+ * the extent. Just return the real extent at this offset.
*/
- if (bma.logflags)
- xfs_trans_log_inode(tp, ip, bma.logflags);
+ if (!isnullstartblock(bma.got.br_startblock)) {
+ *imap = bma.got;
+ *seq = READ_ONCE(ifp->if_seq);
+ goto out_trans_cancel;
+ }
+
+ bma.tp = tp;
+ bma.ip = ip;
+ bma.wasdel = true;
+ bma.offset = bma.got.br_startoff;
+ bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
+ bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+ bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
+ if (whichfork == XFS_COW_FORK)
+ bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
- if (bma.cur) {
- xfs_btree_del_cursor(bma.cur, error);
+ if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
+ bma.prev.br_startoff = NULLFILEOFF;
+
+ error = xfs_bmapi_allocate(&bma);
+ if (error)
+ goto out_finish;
+
+ error = -ENOSPC;
+ if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
+ goto out_finish;
+ error = -EFSCORRUPTED;
+ if (WARN_ON_ONCE(!bma.got.br_startblock && !XFS_IS_REALTIME_INODE(ip)))
+ goto out_finish;
+
+ XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
+ XFS_STATS_INC(mp, xs_xstrat_quick);
+
+ ASSERT(!isnullstartblock(bma.got.br_startblock));
+ *imap = bma.got;
+ *seq = READ_ONCE(ifp->if_seq);
+
+ if (whichfork == XFS_COW_FORK) {
+ error = xfs_refcount_alloc_cow_extent(tp, bma.blkno,
+ bma.length);
+ if (error)
+ goto out_finish;
}
- if (!error)
- xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
- orig_nmap, *nmap);
+
+ error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
+ whichfork);
+ if (error)
+ goto out_finish;
+
+ xfs_bmapi_finish(&bma, whichfork, 0);
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+
+out_finish:
+ xfs_bmapi_finish(&bma, whichfork, error);
+out_trans_cancel:
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -4536,13 +4615,7 @@ xfs_bmapi_remap(
if (error)
goto error0;
- if (xfs_bmap_wants_extents(ip, whichfork)) {
- int tmp_logflags = 0;
-
- error = xfs_bmap_btree_to_extents(tp, ip, cur,
- &tmp_logflags, whichfork);
- logflags |= tmp_logflags;
- }
+ error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
error0:
if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
@@ -5406,24 +5479,11 @@ nodelete:
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
&tmp_logflags, whichfork);
logflags |= tmp_logflags;
- if (error)
- goto error0;
- }
- /*
- * transform from btree to extents, give it cur
- */
- else if (xfs_bmap_wants_extents(ip, whichfork)) {
- ASSERT(cur != NULL);
- error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
+ } else {
+ error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
whichfork);
- logflags |= tmp_logflags;
- if (error)
- goto error0;
}
- /*
- * transform from extents to local?
- */
- error = 0;
+
error0:
/*
* Log everything. Do this after conversion, there's no point in
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 09d3ea97cc15..8f597f9abdbe 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -95,12 +95,6 @@ struct xfs_extent_free_item
/* Map something in the CoW fork. */
#define XFS_BMAPI_COWFORK 0x200
-/* Only convert delalloc space, don't allocate entirely new extents */
-#define XFS_BMAPI_DELALLOC 0x400
-
-/* Only convert unwritten extents, don't allocate new blocks */
-#define XFS_BMAPI_CONVERT_ONLY 0x800
-
/* Skip online discard of freed extents */
#define XFS_BMAPI_NODISCARD 0x1000
@@ -117,8 +111,6 @@ struct xfs_extent_free_item
{ XFS_BMAPI_ZERO, "ZERO" }, \
{ XFS_BMAPI_REMAP, "REMAP" }, \
{ XFS_BMAPI_COWFORK, "COWFORK" }, \
- { XFS_BMAPI_DELALLOC, "DELALLOC" }, \
- { XFS_BMAPI_CONVERT_ONLY, "CONVERT_ONLY" }, \
{ XFS_BMAPI_NODISCARD, "NODISCARD" }, \
{ XFS_BMAPI_NORMAP, "NORMAP" }
@@ -181,7 +173,6 @@ static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec)
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
-void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
@@ -228,6 +219,13 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
int eof);
+int xfs_bmapi_convert_delalloc(struct xfs_inode *ip, int whichfork,
+ xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap,
+ unsigned int *seq);
+int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp,
+ struct xfs_inode *ip, int whichfork,
+ struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp,
+ struct xfs_bmbt_irec *new, int *logflagsp);
static inline void
xfs_bmap_add_free(
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index cdb74d2e2a43..aff82ed112c9 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -416,8 +416,10 @@ xfs_bmbt_verify(
xfs_failaddr_t fa;
unsigned int level;
- switch (block->bb_magic) {
- case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
+ if (!xfs_verify_magic(bp, block->bb_magic))
+ return __this_address;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
/*
* XXX: need a better way of verifying the owner here. Right now
* just make sure there has been one set.
@@ -425,11 +427,6 @@ xfs_bmbt_verify(
fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
if (fa)
return fa;
- /* fall through */
- case cpu_to_be32(XFS_BMAP_MAGIC):
- break;
- default:
- return __this_address;
}
/*
@@ -481,6 +478,8 @@ xfs_bmbt_write_verify(
const struct xfs_buf_ops xfs_bmbt_buf_ops = {
.name = "xfs_bmbt",
+ .magic = { cpu_to_be32(XFS_BMAP_MAGIC),
+ cpu_to_be32(XFS_BMAP_CRC_MAGIC) },
.verify_read = xfs_bmbt_read_verify,
.verify_write = xfs_bmbt_write_verify,
.verify_struct = xfs_bmbt_verify,
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 376bee94b5dd..e2737e2ac2ae 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -116,6 +116,34 @@ xfs_da_state_free(xfs_da_state_t *state)
kmem_zone_free(xfs_da_state_zone, state);
}
+/*
+ * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
+ * accessible on v5 filesystems. This header format is common across da node,
+ * attr leaf and dir leaf blocks.
+ */
+xfs_failaddr_t
+xfs_da3_blkinfo_verify(
+ struct xfs_buf *bp,
+ struct xfs_da3_blkinfo *hdr3)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_da_blkinfo *hdr = &hdr3->hdr;
+
+ if (!xfs_verify_magic16(bp, hdr->magic))
+ return __this_address;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
+ return __this_address;
+ if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
+ return __this_address;
+ if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
+ return __this_address;
+ }
+
+ return NULL;
+}
+
static xfs_failaddr_t
xfs_da3_node_verify(
struct xfs_buf *bp)
@@ -124,27 +152,16 @@ xfs_da3_node_verify(
struct xfs_da_intnode *hdr = bp->b_addr;
struct xfs_da3_icnode_hdr ichdr;
const struct xfs_dir_ops *ops;
+ xfs_failaddr_t fa;
ops = xfs_dir_get_ops(mp, NULL);
ops->node_hdr_from_disk(&ichdr, hdr);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
-
- if (ichdr.magic != XFS_DA3_NODE_MAGIC)
- return __this_address;
+ fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
+ if (fa)
+ return fa;
- if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return __this_address;
- if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
- return __this_address;
- if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
- return __this_address;
- } else {
- if (ichdr.magic != XFS_DA_NODE_MAGIC)
- return __this_address;
- }
if (ichdr.level == 0)
return __this_address;
if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
@@ -257,6 +274,8 @@ xfs_da3_node_verify_struct(
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.name = "xfs_da3_node",
+ .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
+ cpu_to_be16(XFS_DA3_NODE_MAGIC) },
.verify_read = xfs_da3_node_read_verify,
.verify_write = xfs_da3_node_write_verify,
.verify_struct = xfs_da3_node_verify_struct,
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 5d5bf3bffc78..ae654e06b2fb 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -869,4 +869,7 @@ static inline unsigned int xfs_dir2_dirblock_bytes(struct xfs_sb *sbp)
return 1 << (sbp->sb_blocklog + sbp->sb_dirblklog);
}
+xfs_failaddr_t xfs_da3_blkinfo_verify(struct xfs_buf *bp,
+ struct xfs_da3_blkinfo *hdr3);
+
#endif /* __XFS_DA_FORMAT_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 229152cd1a24..156ce95c9c45 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -703,3 +703,20 @@ xfs_dir2_shrink_inode(
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
return 0;
}
+
+/* Returns true if the directory entry name is valid. */
+bool
+xfs_dir2_namecheck(
+ const void *name,
+ size_t length)
+{
+ /*
+ * MAXNAMELEN includes the trailing null, but (name/length) leave it
+ * out, so use >= for the length check.
+ */
+ if (length >= MAXNAMELEN)
+ return false;
+
+ /* There shouldn't be any slashes or nulls here */
+ return !memchr(name, '/', length) && !memchr(name, 0, length);
+}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index c3e3f6b813d8..f54244779492 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -326,5 +326,6 @@ xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp, uint8_t filetype);
void *xfs_dir3_data_endp(struct xfs_da_geometry *geo,
struct xfs_dir2_data_hdr *hdr);
+bool xfs_dir2_namecheck(const void *name, size_t length);
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 30ed5919da72..b7d6d78f4ce2 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -53,18 +53,16 @@ xfs_dir3_block_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ if (!xfs_verify_magic(bp, hdr3->magic))
+ return __this_address;
+
if (xfs_sb_version_hascrc(&mp->m_sb)) {
- if (hdr3->magic != cpu_to_be32(XFS_DIR3_BLOCK_MAGIC))
- return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
return __this_address;
- } else {
- if (hdr3->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))
- return __this_address;
}
return __xfs_dir3_data_check(NULL, bp);
}
@@ -112,6 +110,8 @@ xfs_dir3_block_write_verify(
const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
.name = "xfs_dir3_block",
+ .magic = { cpu_to_be32(XFS_DIR2_BLOCK_MAGIC),
+ cpu_to_be32(XFS_DIR3_BLOCK_MAGIC) },
.verify_read = xfs_dir3_block_read_verify,
.verify_write = xfs_dir3_block_write_verify,
.verify_struct = xfs_dir3_block_verify,
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 01162c62ec8f..b7b9ce002cb9 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -252,18 +252,16 @@ xfs_dir3_data_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ if (!xfs_verify_magic(bp, hdr3->magic))
+ return __this_address;
+
if (xfs_sb_version_hascrc(&mp->m_sb)) {
- if (hdr3->magic != cpu_to_be32(XFS_DIR3_DATA_MAGIC))
- return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
return __this_address;
- } else {
- if (hdr3->magic != cpu_to_be32(XFS_DIR2_DATA_MAGIC))
- return __this_address;
}
return __xfs_dir3_data_check(NULL, bp);
}
@@ -339,6 +337,8 @@ xfs_dir3_data_write_verify(
const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
.name = "xfs_dir3_data",
+ .magic = { cpu_to_be32(XFS_DIR2_DATA_MAGIC),
+ cpu_to_be32(XFS_DIR3_DATA_MAGIC) },
.verify_read = xfs_dir3_data_read_verify,
.verify_write = xfs_dir3_data_write_verify,
.verify_struct = xfs_dir3_data_verify,
@@ -346,6 +346,8 @@ const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
.name = "xfs_dir3_data_reada",
+ .magic = { cpu_to_be32(XFS_DIR2_DATA_MAGIC),
+ cpu_to_be32(XFS_DIR3_DATA_MAGIC) },
.verify_read = xfs_dir3_data_reada_verify,
.verify_write = xfs_dir3_data_write_verify,
};
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index 1728a3e6f5cf..9a3767818c50 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -142,41 +142,22 @@ xfs_dir3_leaf_check_int(
*/
static xfs_failaddr_t
xfs_dir3_leaf_verify(
- struct xfs_buf *bp,
- uint16_t magic)
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_dir2_leaf *leaf = bp->b_addr;
+ xfs_failaddr_t fa;
- ASSERT(magic == XFS_DIR2_LEAF1_MAGIC || magic == XFS_DIR2_LEAFN_MAGIC);
-
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
- uint16_t magic3;
-
- magic3 = (magic == XFS_DIR2_LEAF1_MAGIC) ? XFS_DIR3_LEAF1_MAGIC
- : XFS_DIR3_LEAFN_MAGIC;
-
- if (leaf3->info.hdr.magic != cpu_to_be16(magic3))
- return __this_address;
- if (!uuid_equal(&leaf3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return __this_address;
- if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
- return __this_address;
- if (!xfs_log_check_lsn(mp, be64_to_cpu(leaf3->info.lsn)))
- return __this_address;
- } else {
- if (leaf->hdr.info.magic != cpu_to_be16(magic))
- return __this_address;
- }
+ fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
+ if (fa)
+ return fa;
return xfs_dir3_leaf_check_int(mp, NULL, NULL, leaf);
}
static void
-__read_verify(
- struct xfs_buf *bp,
- uint16_t magic)
+xfs_dir3_leaf_read_verify(
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
xfs_failaddr_t fa;
@@ -185,23 +166,22 @@ __read_verify(
!xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
- fa = xfs_dir3_leaf_verify(bp, magic);
+ fa = xfs_dir3_leaf_verify(bp);
if (fa)
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
}
}
static void
-__write_verify(
- struct xfs_buf *bp,
- uint16_t magic)
+xfs_dir3_leaf_write_verify(
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
xfs_failaddr_t fa;
- fa = xfs_dir3_leaf_verify(bp, magic);
+ fa = xfs_dir3_leaf_verify(bp);
if (fa) {
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
@@ -216,60 +196,22 @@ __write_verify(
xfs_buf_update_cksum(bp, XFS_DIR3_LEAF_CRC_OFF);
}
-static xfs_failaddr_t
-xfs_dir3_leaf1_verify(
- struct xfs_buf *bp)
-{
- return xfs_dir3_leaf_verify(bp, XFS_DIR2_LEAF1_MAGIC);
-}
-
-static void
-xfs_dir3_leaf1_read_verify(
- struct xfs_buf *bp)
-{
- __read_verify(bp, XFS_DIR2_LEAF1_MAGIC);
-}
-
-static void
-xfs_dir3_leaf1_write_verify(
- struct xfs_buf *bp)
-{
- __write_verify(bp, XFS_DIR2_LEAF1_MAGIC);
-}
-
-static xfs_failaddr_t
-xfs_dir3_leafn_verify(
- struct xfs_buf *bp)
-{
- return xfs_dir3_leaf_verify(bp, XFS_DIR2_LEAFN_MAGIC);
-}
-
-static void
-xfs_dir3_leafn_read_verify(
- struct xfs_buf *bp)
-{
- __read_verify(bp, XFS_DIR2_LEAFN_MAGIC);
-}
-
-static void
-xfs_dir3_leafn_write_verify(
- struct xfs_buf *bp)
-{
- __write_verify(bp, XFS_DIR2_LEAFN_MAGIC);
-}
-
const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = {
.name = "xfs_dir3_leaf1",
- .verify_read = xfs_dir3_leaf1_read_verify,
- .verify_write = xfs_dir3_leaf1_write_verify,
- .verify_struct = xfs_dir3_leaf1_verify,
+ .magic16 = { cpu_to_be16(XFS_DIR2_LEAF1_MAGIC),
+ cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) },
+ .verify_read = xfs_dir3_leaf_read_verify,
+ .verify_write = xfs_dir3_leaf_write_verify,
+ .verify_struct = xfs_dir3_leaf_verify,
};
const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
.name = "xfs_dir3_leafn",
- .verify_read = xfs_dir3_leafn_read_verify,
- .verify_write = xfs_dir3_leafn_write_verify,
- .verify_struct = xfs_dir3_leafn_verify,
+ .magic16 = { cpu_to_be16(XFS_DIR2_LEAFN_MAGIC),
+ cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) },
+ .verify_read = xfs_dir3_leaf_read_verify,
+ .verify_write = xfs_dir3_leaf_write_verify,
+ .verify_struct = xfs_dir3_leaf_verify,
};
int
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index f1bb3434f51c..3b03703c5c3d 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -87,20 +87,18 @@ xfs_dir3_free_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_dir2_free_hdr *hdr = bp->b_addr;
+ if (!xfs_verify_magic(bp, hdr->magic))
+ return __this_address;
+
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
- if (hdr3->magic != cpu_to_be32(XFS_DIR3_FREE_MAGIC))
- return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
return __this_address;
- } else {
- if (hdr->magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC))
- return __this_address;
}
/* XXX: should bounds check the xfs_dir3_icfree_hdr here */
@@ -151,6 +149,8 @@ xfs_dir3_free_write_verify(
const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
.name = "xfs_dir3_free",
+ .magic = { cpu_to_be32(XFS_DIR2_FREE_MAGIC),
+ cpu_to_be32(XFS_DIR3_FREE_MAGIC) },
.verify_read = xfs_dir3_free_read_verify,
.verify_write = xfs_dir3_free_write_verify,
.verify_struct = xfs_dir3_free_verify,
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index d293f371dd54..fb5bd9a804f6 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -277,6 +277,8 @@ xfs_dquot_buf_write_verify(
const struct xfs_buf_ops xfs_dquot_buf_ops = {
.name = "xfs_dquot",
+ .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
+ cpu_to_be16(XFS_DQUOT_MAGIC) },
.verify_read = xfs_dquot_buf_read_verify,
.verify_write = xfs_dquot_buf_write_verify,
.verify_struct = xfs_dquot_buf_verify_struct,
@@ -284,6 +286,8 @@ const struct xfs_buf_ops xfs_dquot_buf_ops = {
const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
.name = "xfs_dquot_ra",
+ .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
+ cpu_to_be16(XFS_DQUOT_MAGIC) },
.verify_read = xfs_dquot_buf_readahead_verify,
.verify_write = xfs_dquot_buf_write_verify,
};
diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
index 66077a105cbb..79e6c4fb1d8a 100644
--- a/fs/xfs/libxfs/xfs_errortag.h
+++ b/fs/xfs/libxfs/xfs_errortag.h
@@ -54,7 +54,8 @@
#define XFS_ERRTAG_BUF_LRU_REF 31
#define XFS_ERRTAG_FORCE_SCRUB_REPAIR 32
#define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33
-#define XFS_ERRTAG_MAX 34
+#define XFS_ERRTAG_IUNLINK_FALLBACK 34
+#define XFS_ERRTAG_MAX 35
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -93,5 +94,6 @@
#define XFS_RANDOM_BUF_LRU_REF 2
#define XFS_RANDOM_FORCE_SCRUB_REPAIR 1
#define XFS_RANDOM_FORCE_SUMMARY_RECALC 1
+#define XFS_RANDOM_IUNLINK_FALLBACK (XFS_RANDOM_DEFAULT/10)
#endif /* __XFS_ERRORTAG_H_ */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index d32152fc8a6c..fe9898875097 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2508,7 +2508,7 @@ xfs_agi_verify(
/*
* Validate the magic number of the agi block.
*/
- if (agi->agi_magicnum != cpu_to_be32(XFS_AGI_MAGIC))
+ if (!xfs_verify_magic(bp, agi->agi_magicnum))
return __this_address;
if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
return __this_address;
@@ -2582,6 +2582,7 @@ xfs_agi_write_verify(
const struct xfs_buf_ops xfs_agi_buf_ops = {
.name = "xfs_agi",
+ .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
.verify_read = xfs_agi_read_verify,
.verify_write = xfs_agi_write_verify,
.verify_struct = xfs_agi_verify,
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 9b25e7a0df47..1080381ff243 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -124,7 +124,7 @@ xfs_finobt_alloc_block(
union xfs_btree_ptr *new,
int *stat)
{
- if (cur->bc_mp->m_inotbt_nores)
+ if (cur->bc_mp->m_finobt_nores)
return xfs_inobt_alloc_block(cur, start, new, stat);
return __xfs_inobt_alloc_block(cur, start, new, stat,
XFS_AG_RESV_METADATA);
@@ -154,7 +154,7 @@ xfs_finobt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
- if (cur->bc_mp->m_inotbt_nores)
+ if (cur->bc_mp->m_finobt_nores)
return xfs_inobt_free_block(cur, bp);
return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
}
@@ -260,6 +260,9 @@ xfs_inobt_verify(
xfs_failaddr_t fa;
unsigned int level;
+ if (!xfs_verify_magic(bp, block->bb_magic))
+ return __this_address;
+
/*
* During growfs operations, we can't verify the exact owner as the
* perag is not fully initialised and hence not attached to the buffer.
@@ -270,18 +273,10 @@ xfs_inobt_verify(
* but beware of the landmine (i.e. need to check pag->pagi_init) if we
* ever do.
*/
- switch (block->bb_magic) {
- case cpu_to_be32(XFS_IBT_CRC_MAGIC):
- case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
fa = xfs_btree_sblock_v5hdr_verify(bp);
if (fa)
return fa;
- /* fall through */
- case cpu_to_be32(XFS_IBT_MAGIC):
- case cpu_to_be32(XFS_FIBT_MAGIC):
- break;
- default:
- return __this_address;
}
/* level verification */
@@ -328,6 +323,16 @@ xfs_inobt_write_verify(
const struct xfs_buf_ops xfs_inobt_buf_ops = {
.name = "xfs_inobt",
+ .magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
+ .verify_read = xfs_inobt_read_verify,
+ .verify_write = xfs_inobt_write_verify,
+ .verify_struct = xfs_inobt_verify,
+};
+
+const struct xfs_buf_ops xfs_finobt_buf_ops = {
+ .name = "xfs_finobt",
+ .magic = { cpu_to_be32(XFS_FIBT_MAGIC),
+ cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
.verify_read = xfs_inobt_read_verify,
.verify_write = xfs_inobt_write_verify,
.verify_struct = xfs_inobt_verify,
@@ -389,7 +394,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
- .buf_ops = &xfs_inobt_buf_ops,
+ .buf_ops = &xfs_finobt_buf_ops,
.diff_two_keys = xfs_inobt_diff_two_keys,
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index 771dd072015d..bc690f2409fa 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -614,16 +614,15 @@ xfs_iext_realloc_root(
}
/*
- * Increment the sequence counter if we are on a COW fork. This allows
- * the writeback code to skip looking for a COW extent if the COW fork
- * hasn't changed. We use WRITE_ONCE here to ensure the update to the
- * sequence counter is seen before the modifications to the extent
- * tree itself take effect.
+ * Increment the sequence counter on extent tree changes. If we are on a COW
+ * fork, this allows the writeback code to skip looking for a COW extent if the
+ * COW fork hasn't changed. We use WRITE_ONCE here to ensure the update to the
+ * sequence counter is seen before the modifications to the extent tree itself
+ * take effect.
*/
static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp, int state)
{
- if (state & BMAP_COWFORK)
- WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
+ WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
}
void
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 09d9c8cfa4a0..e021d5133ccb 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -97,10 +97,9 @@ xfs_inode_buf_verify(
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
- di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
+ di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
xfs_dinode_good_version(mp, dip->di_version) &&
- (unlinked_ino == NULLAGINO ||
- xfs_verify_agino(mp, agno, unlinked_ino));
+ xfs_verify_agino_or_null(mp, agno, unlinked_ino);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP))) {
if (readahead) {
@@ -147,12 +146,16 @@ xfs_inode_buf_write_verify(
const struct xfs_buf_ops xfs_inode_buf_ops = {
.name = "xfs_inode",
+ .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
+ cpu_to_be16(XFS_DINODE_MAGIC) },
.verify_read = xfs_inode_buf_read_verify,
.verify_write = xfs_inode_buf_write_verify,
};
const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
- .name = "xxfs_inode_ra",
+ .name = "xfs_inode_ra",
+ .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
+ cpu_to_be16(XFS_DINODE_MAGIC) },
.verify_read = xfs_inode_buf_readahead_verify,
.verify_write = xfs_inode_buf_write_verify,
};
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 60361d2d74a1..00c62ce170d0 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -14,7 +14,7 @@ struct xfs_dinode;
*/
struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
- unsigned int if_seq; /* cow fork mod counter */
+ unsigned int if_seq; /* fork mod counter */
struct xfs_btree_block *if_broot; /* file's incore btree root */
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index d9eab657b63e..6f47ab876d90 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -209,7 +209,7 @@ xfs_refcountbt_verify(
xfs_failaddr_t fa;
unsigned int level;
- if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
+ if (!xfs_verify_magic(bp, block->bb_magic))
return __this_address;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
@@ -264,6 +264,7 @@ xfs_refcountbt_write_verify(
const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
.name = "xfs_refcountbt",
+ .magic = { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
.verify_read = xfs_refcountbt_read_verify,
.verify_write = xfs_refcountbt_write_verify,
.verify_struct = xfs_refcountbt_verify,
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index f79cf040d745..5738e11055e6 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -310,7 +310,7 @@ xfs_rmapbt_verify(
* from the on disk AGF. Again, we can only check against maximum limits
* in this case.
*/
- if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
+ if (!xfs_verify_magic(bp, block->bb_magic))
return __this_address;
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
@@ -365,6 +365,7 @@ xfs_rmapbt_write_verify(
const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
.name = "xfs_rmapbt",
+ .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
.verify_read = xfs_rmapbt_read_verify,
.verify_write = xfs_rmapbt_write_verify,
.verify_struct = xfs_rmapbt_verify,
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index b5a82acd7dfe..77a3a4085de3 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -225,10 +225,11 @@ xfs_validate_sb_common(
struct xfs_buf *bp,
struct xfs_sb *sbp)
{
+ struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
uint32_t agcount = 0;
uint32_t rem;
- if (sbp->sb_magicnum != XFS_SB_MAGIC) {
+ if (!xfs_verify_magic(bp, dsb->sb_magicnum)) {
xfs_warn(mp, "bad magic number");
return -EWRONGFS;
}
@@ -781,12 +782,14 @@ out_error:
const struct xfs_buf_ops xfs_sb_buf_ops = {
.name = "xfs_sb",
+ .magic = { cpu_to_be32(XFS_SB_MAGIC), cpu_to_be32(XFS_SB_MAGIC) },
.verify_read = xfs_sb_read_verify,
.verify_write = xfs_sb_write_verify,
};
const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
.name = "xfs_sb_quiet",
+ .magic = { cpu_to_be32(XFS_SB_MAGIC), cpu_to_be32(XFS_SB_MAGIC) },
.verify_read = xfs_sb_quiet_read_verify,
.verify_write = xfs_sb_write_verify,
};
@@ -874,7 +877,7 @@ xfs_initialize_perag_data(
uint64_t bfreelst = 0;
uint64_t btree = 0;
uint64_t fdblocks;
- int error;
+ int error = 0;
for (index = 0; index < agcount; index++) {
/*
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 1c5debe748f0..4e909791aeac 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -25,7 +25,8 @@ extern const struct xfs_buf_ops xfs_agf_buf_ops;
extern const struct xfs_buf_ops xfs_agi_buf_ops;
extern const struct xfs_buf_ops xfs_agf_buf_ops;
extern const struct xfs_buf_ops xfs_agfl_buf_ops;
-extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
+extern const struct xfs_buf_ops xfs_bnobt_buf_ops;
+extern const struct xfs_buf_ops xfs_cntbt_buf_ops;
extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
extern const struct xfs_buf_ops xfs_refcountbt_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
@@ -36,6 +37,7 @@ extern const struct xfs_buf_ops xfs_dquot_buf_ops;
extern const struct xfs_buf_ops xfs_symlink_buf_ops;
extern const struct xfs_buf_ops xfs_agi_buf_ops;
extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+extern const struct xfs_buf_ops xfs_finobt_buf_ops;
extern const struct xfs_buf_ops xfs_inode_buf_ops;
extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
extern const struct xfs_buf_ops xfs_dquot_buf_ops;
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index 77d80106f989..a0ccc253c43d 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -95,7 +95,7 @@ xfs_symlink_verify(
if (!xfs_sb_version_hascrc(&mp->m_sb))
return __this_address;
- if (dsl->sl_magic != cpu_to_be32(XFS_SYMLINK_MAGIC))
+ if (!xfs_verify_magic(bp, dsl->sl_magic))
return __this_address;
if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
@@ -159,6 +159,7 @@ xfs_symlink_write_verify(
const struct xfs_buf_ops xfs_symlink_buf_ops = {
.name = "xfs_symlink",
+ .magic = { 0, cpu_to_be32(XFS_SYMLINK_MAGIC) },
.verify_read = xfs_symlink_read_verify,
.verify_write = xfs_symlink_write_verify,
.verify_struct = xfs_symlink_verify,
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index 3306fc42cfad..de310712dd6d 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -116,6 +116,19 @@ xfs_verify_agino(
}
/*
+ * Verify that an AG inode number pointer neither points outside the AG
+ * nor points at static metadata, or is NULLAGINO.
+ */
+bool
+xfs_verify_agino_or_null(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agino_t agino)
+{
+ return agino == NULLAGINO || xfs_verify_agino(mp, agno, agino);
+}
+
+/*
* Verify that an FS inode number pointer neither points outside the
* filesystem nor points at static AG metadata.
*/
@@ -204,3 +217,14 @@ xfs_verify_icount(
xfs_icount_range(mp, &min, &max);
return icount >= min && icount <= max;
}
+
+/* Sanity-checking of dir/attr block offsets. */
+bool
+xfs_verify_dablk(
+ struct xfs_mount *mp,
+ xfs_fileoff_t dabno)
+{
+ xfs_dablk_t max_dablk = -1U;
+
+ return dabno <= max_dablk;
+}
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 8f02855a019a..c5a25403b4db 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -183,10 +183,13 @@ void xfs_agino_range(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t *first, xfs_agino_t *last);
bool xfs_verify_agino(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t agino);
+bool xfs_verify_agino_or_null(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agino_t agino);
bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount);
+bool xfs_verify_dablk(struct xfs_mount *mp, xfs_fileoff_t off);
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 90955ab1e895..ddf06bfaa29d 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -399,7 +399,7 @@ xchk_agf_xref_cntbt(
if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return;
if (!have) {
- if (agf->agf_freeblks != be32_to_cpu(0))
+ if (agf->agf_freeblks != cpu_to_be32(0))
xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
return;
}
@@ -864,19 +864,17 @@ xchk_agi(
/* Check inode pointers */
agino = be32_to_cpu(agi->agi_newino);
- if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
+ if (!xfs_verify_agino_or_null(mp, agno, agino))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
agino = be32_to_cpu(agi->agi_dirino);
- if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
+ if (!xfs_verify_agino_or_null(mp, agno, agino))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check unlinked inode buckets */
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
agino = be32_to_cpu(agi->agi_unlinked[i]);
- if (agino == NULLAGINO)
- continue;
- if (!xfs_verify_agino(mp, agno, agino))
+ if (!xfs_verify_agino_or_null(mp, agno, agino))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
}
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 03d1e15cceba..64e31f87d490 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -341,23 +341,19 @@ xrep_agf(
struct xrep_find_ag_btree fab[XREP_AGF_MAX] = {
[XREP_AGF_BNOBT] = {
.rmap_owner = XFS_RMAP_OWN_AG,
- .buf_ops = &xfs_allocbt_buf_ops,
- .magic = XFS_ABTB_CRC_MAGIC,
+ .buf_ops = &xfs_bnobt_buf_ops,
},
[XREP_AGF_CNTBT] = {
.rmap_owner = XFS_RMAP_OWN_AG,
- .buf_ops = &xfs_allocbt_buf_ops,
- .magic = XFS_ABTC_CRC_MAGIC,
+ .buf_ops = &xfs_cntbt_buf_ops,
},
[XREP_AGF_RMAPBT] = {
.rmap_owner = XFS_RMAP_OWN_AG,
.buf_ops = &xfs_rmapbt_buf_ops,
- .magic = XFS_RMAP_CRC_MAGIC,
},
[XREP_AGF_REFCOUNTBT] = {
.rmap_owner = XFS_RMAP_OWN_REFC,
.buf_ops = &xfs_refcountbt_buf_ops,
- .magic = XFS_REFC_CRC_MAGIC,
},
[XREP_AGF_END] = {
.buf_ops = NULL,
@@ -875,12 +871,10 @@ xrep_agi(
[XREP_AGI_INOBT] = {
.rmap_owner = XFS_RMAP_OWN_INOBT,
.buf_ops = &xfs_inobt_buf_ops,
- .magic = XFS_IBT_CRC_MAGIC,
},
[XREP_AGI_FINOBT] = {
.rmap_owner = XFS_RMAP_OWN_INOBT,
- .buf_ops = &xfs_inobt_buf_ops,
- .magic = XFS_FIBT_CRC_MAGIC,
+ .buf_ops = &xfs_finobt_buf_ops,
},
[XREP_AGI_END] = {
.buf_ops = NULL
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index 81d5e90547a1..dce74ec57038 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -82,12 +82,23 @@ xchk_xattr_listent(
sx = container_of(context, struct xchk_xattr, context);
+ if (xchk_should_terminate(sx->sc, &error)) {
+ context->seen_enough = 1;
+ return;
+ }
+
if (flags & XFS_ATTR_INCOMPLETE) {
/* Incomplete attr key, just mark the inode for preening. */
xchk_ino_set_preen(sx->sc, context->dp->i_ino);
return;
}
+ /* Does this name make sense? */
+ if (!xfs_attr_namecheck(name, namelen)) {
+ xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
+ return;
+ }
+
args.flags = ATTR_KERNOTIME;
if (flags & XFS_ATTR_ROOT)
args.flags |= ATTR_ROOT;
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index e1d11f3223e3..a703cd58a90e 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -281,6 +281,31 @@ xchk_bmap_extent_xref(
xchk_ag_free(info->sc, &info->sc->sa);
}
+/*
+ * Directories and attr forks should never have blocks that can't be addressed
+ * by a xfs_dablk_t.
+ */
+STATIC void
+xchk_bmap_dirattr_extent(
+ struct xfs_inode *ip,
+ struct xchk_bmap_info *info,
+ struct xfs_bmbt_irec *irec)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t off;
+
+ if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
+ return;
+
+ if (!xfs_verify_dablk(mp, irec->br_startoff))
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ off = irec->br_startoff + irec->br_blockcount - 1;
+ if (!xfs_verify_dablk(mp, off))
+ xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
+}
+
/* Scrub a single extent record. */
STATIC int
xchk_bmap_extent(
@@ -305,6 +330,8 @@ xchk_bmap_extent(
xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
+ xchk_bmap_dirattr_extent(ip, info, irec);
+
/* There should never be a "hole" extent in either extent list. */
if (irec->br_startblock == HOLESTARTBLOCK)
xchk_fblock_set_corrupt(info->sc, info->whichfork,
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index cd3e4d768a18..a38a22785a1a 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -129,6 +129,12 @@ xchk_dir_actor(
goto out;
}
+ /* Does this name make sense? */
+ if (!xfs_dir2_namecheck(name, namelen)) {
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ goto out;
+ }
+
if (!strncmp(".", name, namelen)) {
/* If this is "." then check that the inum matches the dir. */
if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 882dc56c5c21..700114f79a7d 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -47,6 +47,12 @@ xchk_setup_ag_iallocbt(
struct xchk_iallocbt {
/* Number of inodes we see while scanning inobt. */
unsigned long long inodes;
+
+ /* Expected next startino, for big block filesystems. */
+ xfs_agino_t next_startino;
+
+ /* Expected end of the current inode cluster. */
+ xfs_agino_t next_cluster_ino;
};
/*
@@ -128,41 +134,57 @@ xchk_iallocbt_freecount(
return hweight64(freemask);
}
-/* Check a particular inode with ir_free. */
+/*
+ * Check that an inode's allocation status matches ir_free in the inobt
+ * record. First we try querying the in-core inode state, and if the inode
+ * isn't loaded we examine the on-disk inode directly.
+ *
+ * Since there can be 1:M and M:1 mappings between inobt records and inode
+ * clusters, we pass in the inode location information as an inobt record;
+ * the index of an inode cluster within the inobt record (as well as the
+ * cluster buffer itself); and the index of the inode within the cluster.
+ *
+ * @irec is the inobt record.
+ * @irec_ino is the inode offset from the start of the record.
+ * @dip is the on-disk inode.
+ */
STATIC int
-xchk_iallocbt_check_cluster_freemask(
+xchk_iallocbt_check_cluster_ifree(
struct xchk_btree *bs,
- xfs_ino_t fsino,
- xfs_agino_t chunkino,
- xfs_agino_t clusterino,
struct xfs_inobt_rec_incore *irec,
- struct xfs_buf *bp)
+ unsigned int irec_ino,
+ struct xfs_dinode *dip)
{
- struct xfs_dinode *dip;
struct xfs_mount *mp = bs->cur->bc_mp;
- bool inode_is_free = false;
+ xfs_ino_t fsino;
+ xfs_agino_t agino;
+ bool irec_free;
+ bool ino_inuse;
bool freemask_ok;
- bool inuse;
int error = 0;
if (xchk_should_terminate(bs->sc, &error))
return error;
- dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize);
+ /*
+ * Given an inobt record and the offset of an inode from the start of
+ * the record, compute which fs inode we're talking about.
+ */
+ agino = irec->ir_startino + irec_ino;
+ fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
+ irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
+
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
- (dip->di_version >= 3 &&
- be64_to_cpu(dip->di_ino) != fsino + clusterino)) {
+ (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out;
}
- if (irec->ir_free & XFS_INOBT_MASK(chunkino + clusterino))
- inode_is_free = true;
- error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp,
- fsino + clusterino, &inuse);
+ error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
+ &ino_inuse);
if (error == -ENODATA) {
/* Not cached, just read the disk buffer */
- freemask_ok = inode_is_free ^ !!(dip->di_mode);
+ freemask_ok = irec_free ^ !!(dip->di_mode);
if (!bs->sc->try_harder && !freemask_ok)
return -EDEADLOCK;
} else if (error < 0) {
@@ -174,7 +196,7 @@ xchk_iallocbt_check_cluster_freemask(
goto out;
} else {
/* Inode is all there. */
- freemask_ok = inode_is_free ^ inuse;
+ freemask_ok = irec_free ^ ino_inuse;
}
if (!freemask_ok)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
@@ -182,86 +204,221 @@ out:
return 0;
}
-/* Make sure the free mask is consistent with what the inodes think. */
+/*
+ * Check that the holemask and freemask of a hypothetical inode cluster match
+ * what's actually on disk. If sparse inodes are enabled, the cluster does
+ * not actually have to map to inodes if the corresponding holemask bit is set.
+ *
+ * @cluster_base is the first inode in the cluster within the @irec.
+ */
STATIC int
-xchk_iallocbt_check_freemask(
+xchk_iallocbt_check_cluster(
struct xchk_btree *bs,
- struct xfs_inobt_rec_incore *irec)
+ struct xfs_inobt_rec_incore *irec,
+ unsigned int cluster_base)
{
struct xfs_imap imap;
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_dinode *dip;
- struct xfs_buf *bp;
- xfs_ino_t fsino;
- xfs_agino_t nr_inodes;
- xfs_agino_t agino;
- xfs_agino_t chunkino;
- xfs_agino_t clusterino;
+ struct xfs_buf *cluster_bp;
+ unsigned int nr_inodes;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
xfs_agblock_t agbno;
- uint16_t holemask;
+ unsigned int cluster_index;
+ uint16_t cluster_mask = 0;
uint16_t ir_holemask;
int error = 0;
- /* Make sure the freemask matches the inode records. */
- nr_inodes = mp->m_inodes_per_cluster;
-
- for (agino = irec->ir_startino;
- agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
- agino += mp->m_inodes_per_cluster) {
- fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
- chunkino = agino - irec->ir_startino;
- agbno = XFS_AGINO_TO_AGBNO(mp, agino);
-
- /* Compute the holemask mask for this cluster. */
- for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
- clusterino += XFS_INODES_PER_HOLEMASK_BIT)
- holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
- XFS_INODES_PER_HOLEMASK_BIT);
-
- /* The whole cluster must be a hole or not a hole. */
- ir_holemask = (irec->ir_holemask & holemask);
- if (ir_holemask != holemask && ir_holemask != 0) {
+ nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
+ mp->m_inodes_per_cluster);
+
+ /* Map this inode cluster */
+ agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
+
+ /* Compute a bitmask for this cluster that can be used for holemask. */
+ for (cluster_index = 0;
+ cluster_index < nr_inodes;
+ cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
+ cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
+ XFS_INODES_PER_HOLEMASK_BIT);
+
+ /*
+ * Map the first inode of this cluster to a buffer and offset.
+ * Be careful about inobt records that don't align with the start of
+ * the inode buffer when block sizes are large enough to hold multiple
+ * inode chunks. When this happens, cluster_base will be zero but
+ * ir_startino can be large enough to make im_boffset nonzero.
+ */
+ ir_holemask = (irec->ir_holemask & cluster_mask);
+ imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
+ imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
+ imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino);
+
+ if (imap.im_boffset != 0 && cluster_base != 0) {
+ ASSERT(imap.im_boffset == 0 || cluster_base == 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return 0;
+ }
+
+ trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
+ imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
+ cluster_mask, ir_holemask,
+ XFS_INO_TO_OFFSET(mp, irec->ir_startino +
+ cluster_base));
+
+ /* The whole cluster must be a hole or not a hole. */
+ if (ir_holemask != cluster_mask && ir_holemask != 0) {
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return 0;
+ }
+
+ /* If any part of this is a hole, skip it. */
+ if (ir_holemask) {
+ xchk_xref_is_not_owned_by(bs->sc, agbno,
+ mp->m_blocks_per_cluster,
+ &XFS_RMAP_OINFO_INODES);
+ return 0;
+ }
+
+ xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
+ &XFS_RMAP_OINFO_INODES);
+
+ /* Grab the inode cluster buffer. */
+ error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp,
+ 0, 0);
+ if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
+ return error;
+
+ /* Check free status of each inode within this cluster. */
+ for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
+ struct xfs_dinode *dip;
+
+ if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- continue;
+ break;
}
- /* If any part of this is a hole, skip it. */
- if (ir_holemask) {
- xchk_xref_is_not_owned_by(bs->sc, agbno,
- mp->m_blocks_per_cluster,
- &XFS_RMAP_OINFO_INODES);
- continue;
+ dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
+ error = xchk_iallocbt_check_cluster_ifree(bs, irec,
+ cluster_base + cluster_index, dip);
+ if (error)
+ break;
+ imap.im_boffset += mp->m_sb.sb_inodesize;
+ }
+
+ xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
+ return error;
+}
+
+/*
+ * For all the inode clusters that could map to this inobt record, make sure
+ * that the holemask makes sense and that the allocation status of each inode
+ * matches the freemask.
+ */
+STATIC int
+xchk_iallocbt_check_clusters(
+ struct xchk_btree *bs,
+ struct xfs_inobt_rec_incore *irec)
+{
+ unsigned int cluster_base;
+ int error = 0;
+
+ /*
+ * For the common case where this inobt record maps to multiple inode
+ * clusters this will call _check_cluster for each cluster.
+ *
+ * For the case that multiple inobt records map to a single cluster,
+ * this will call _check_cluster once.
+ */
+ for (cluster_base = 0;
+ cluster_base < XFS_INODES_PER_CHUNK;
+ cluster_base += bs->sc->mp->m_inodes_per_cluster) {
+ error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+
+/*
+ * Make sure this inode btree record is aligned properly. Because a fs block
+ * contains multiple inodes, we check that the inobt record is aligned to the
+ * correct inode, not just the correct block on disk. This results in a finer
+ * grained corruption check.
+ */
+STATIC void
+xchk_iallocbt_rec_alignment(
+ struct xchk_btree *bs,
+ struct xfs_inobt_rec_incore *irec)
+{
+ struct xfs_mount *mp = bs->sc->mp;
+ struct xchk_iallocbt *iabt = bs->private;
+
+ /*
+ * finobt records have different positioning requirements than inobt
+ * records: each finobt record must have a corresponding inobt record.
+ * That is checked in the xref function, so for now we only catch the
+ * obvious case where the record isn't at all aligned properly.
+ *
+ * Note that if a fs block contains more than a single chunk of inodes,
+ * we will have finobt records only for those chunks containing free
+ * inodes, and therefore expect chunk alignment of finobt records.
+ * Otherwise, we expect that the finobt record is aligned to the
+ * cluster alignment as told by the superblock.
+ */
+ if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
+ unsigned int imask;
+
+ imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
+ mp->m_cluster_align_inodes) - 1;
+ if (irec->ir_startino & imask)
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return;
+ }
+
+ if (iabt->next_startino != NULLAGINO) {
+ /*
+ * We're midway through a cluster of inodes that is mapped by
+ * multiple inobt records. Did we get the record for the next
+ * irec in the sequence?
+ */
+ if (irec->ir_startino != iabt->next_startino) {
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return;
}
- xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
- &XFS_RMAP_OINFO_INODES);
+ iabt->next_startino += XFS_INODES_PER_CHUNK;
- /* Grab the inode cluster buffer. */
- imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
- agbno);
- imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
- imap.im_boffset = 0;
-
- error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
- &dip, &bp, 0, 0);
- if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
- &error))
- continue;
-
- /* Which inodes are free? */
- for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
- error = xchk_iallocbt_check_cluster_freemask(bs,
- fsino, chunkino, clusterino, irec, bp);
- if (error) {
- xfs_trans_brelse(bs->cur->bc_tp, bp);
- return error;
- }
+ /* Are we done with the cluster? */
+ if (iabt->next_startino >= iabt->next_cluster_ino) {
+ iabt->next_startino = NULLAGINO;
+ iabt->next_cluster_ino = NULLAGINO;
}
+ return;
+ }
+
+ /* inobt records must be aligned to cluster and inoalignmnt size. */
+ if (irec->ir_startino & (mp->m_cluster_align_inodes - 1)) {
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return;
+ }
- xfs_trans_brelse(bs->cur->bc_tp, bp);
+ if (irec->ir_startino & (mp->m_inodes_per_cluster - 1)) {
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ return;
}
- return error;
+ if (mp->m_inodes_per_cluster <= XFS_INODES_PER_CHUNK)
+ return;
+
+ /*
+ * If this is the start of an inode cluster that can be mapped by
+ * multiple inobt records, the next inobt record must follow exactly
+ * after this one.
+ */
+ iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
+ iabt->next_cluster_ino = irec->ir_startino + mp->m_inodes_per_cluster;
}
/* Scrub an inobt/finobt record. */
@@ -276,7 +433,6 @@ xchk_iallocbt_rec(
uint64_t holes;
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
xfs_agino_t agino;
- xfs_agblock_t agbno;
xfs_extlen_t len;
int holecount;
int i;
@@ -303,11 +459,9 @@ xchk_iallocbt_rec(
goto out;
}
- /* Make sure this record is aligned to cluster and inoalignmnt size. */
- agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
- if ((agbno & (mp->m_cluster_align - 1)) ||
- (agbno & (mp->m_blocks_per_cluster - 1)))
- xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_iallocbt_rec_alignment(bs, &irec);
+ if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
iabt->inodes += irec.ir_count;
@@ -320,7 +474,7 @@ xchk_iallocbt_rec(
if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
goto out;
- goto check_freemask;
+ goto check_clusters;
}
/* Check each chunk of a sparse inode cluster. */
@@ -346,8 +500,8 @@ xchk_iallocbt_rec(
holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
-check_freemask:
- error = xchk_iallocbt_check_freemask(bs, &irec);
+check_clusters:
+ error = xchk_iallocbt_check_clusters(bs, &irec);
if (error)
goto out;
@@ -429,6 +583,8 @@ xchk_iallocbt(
struct xfs_btree_cur *cur;
struct xchk_iallocbt iabt = {
.inodes = 0,
+ .next_startino = NULLAGINO,
+ .next_cluster_ino = NULLAGINO,
};
int error;
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 1c8eecfe52b8..f28f4bad317b 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -743,7 +743,8 @@ xrep_findroot_block(
/* Ensure the block magic matches the btree type we're looking for. */
btblock = XFS_BUF_TO_BLOCK(bp);
- if (be32_to_cpu(btblock->bb_magic) != fab->magic)
+ ASSERT(fab->buf_ops->magic[1] != 0);
+ if (btblock->bb_magic != fab->buf_ops->magic[1])
goto out;
/*
@@ -768,18 +769,23 @@ xrep_findroot_block(
if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
&mp->m_sb.sb_meta_uuid))
goto out;
+ /*
+ * Read verifiers can reference b_ops, so we set the pointer
+ * here. If the verifier fails we'll reset the buffer state
+ * to what it was before we touched the buffer.
+ */
+ bp->b_ops = fab->buf_ops;
fab->buf_ops->verify_read(bp);
if (bp->b_error) {
+ bp->b_ops = NULL;
bp->b_error = 0;
goto out;
}
/*
* Some read verifiers will (re)set b_ops, so we must be
- * careful not to blow away any such assignment.
+ * careful not to change b_ops after running the verifier.
*/
- if (!bp->b_ops)
- bp->b_ops = fab->buf_ops;
}
/*
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index f2fc18bb7605..d990314eb08b 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -42,9 +42,6 @@ struct xrep_find_ag_btree {
/* in: buffer ops */
const struct xfs_buf_ops *buf_ops;
- /* in: magic number of the btree */
- uint32_t magic;
-
/* out: the highest btree block found and the tree height */
xfs_agblock_t root;
unsigned int height;
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index 665d4bbb17cc..dbe115b075f7 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -141,9 +141,8 @@ xchk_xref_is_used_rt_space(
startext = fsbno;
endext = fsbno + len - 1;
do_div(startext, sc->mp->m_sb.sb_rextsize);
- if (do_div(endext, sc->mp->m_sb.sb_rextsize))
- endext++;
- extcount = endext - startext;
+ do_div(endext, sc->mp->m_sb.sb_rextsize);
+ extcount = endext - startext + 1;
xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount,
&is_free);
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 8344b14031ef..3c83e8b3b39c 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -545,6 +545,51 @@ TRACE_EVENT(xchk_xref_error,
__entry->ret_ip)
);
+TRACE_EVENT(xchk_iallocbt_check_cluster,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agino_t startino, xfs_daddr_t map_daddr,
+ unsigned short map_len, unsigned int chunk_ino,
+ unsigned int nr_inodes, uint16_t cluster_mask,
+ uint16_t holemask, unsigned int cluster_ino),
+ TP_ARGS(mp, agno, startino, map_daddr, map_len, chunk_ino, nr_inodes,
+ cluster_mask, holemask, cluster_ino),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, startino)
+ __field(xfs_daddr_t, map_daddr)
+ __field(unsigned short, map_len)
+ __field(unsigned int, chunk_ino)
+ __field(unsigned int, nr_inodes)
+ __field(unsigned int, cluster_ino)
+ __field(uint16_t, cluster_mask)
+ __field(uint16_t, holemask)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->startino = startino;
+ __entry->map_daddr = map_daddr;
+ __entry->map_len = map_len;
+ __entry->chunk_ino = chunk_ino;
+ __entry->nr_inodes = nr_inodes;
+ __entry->cluster_mask = cluster_mask;
+ __entry->holemask = holemask;
+ __entry->cluster_ino = cluster_ino;
+ ),
+ TP_printk("dev %d:%d agno %d startino %u daddr 0x%llx len %d chunkino %u nr_inodes %u cluster_mask 0x%x holemask 0x%x cluster_ino %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->startino,
+ __entry->map_daddr,
+ __entry->map_len,
+ __entry->chunk_ino,
+ __entry->nr_inodes,
+ __entry->cluster_mask,
+ __entry->holemask,
+ __entry->cluster_ino)
+)
+
/* repair tracepoints */
#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 338b9d9984e0..7b8bb6bde981 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -28,7 +28,8 @@
*/
struct xfs_writepage_ctx {
struct xfs_bmbt_irec imap;
- unsigned int io_type;
+ int fork;
+ unsigned int data_seq;
unsigned int cow_seq;
struct xfs_ioend *ioend;
};
@@ -255,30 +256,20 @@ xfs_end_io(
*/
error = blk_status_to_errno(ioend->io_bio->bi_status);
if (unlikely(error)) {
- switch (ioend->io_type) {
- case XFS_IO_COW:
+ if (ioend->io_fork == XFS_COW_FORK)
xfs_reflink_cancel_cow_range(ip, offset, size, true);
- break;
- }
-
goto done;
}
/*
- * Success: commit the COW or unwritten blocks if needed.
+ * Success: commit the COW or unwritten blocks if needed.
*/
- switch (ioend->io_type) {
- case XFS_IO_COW:
+ if (ioend->io_fork == XFS_COW_FORK)
error = xfs_reflink_end_cow(ip, offset, size);
- break;
- case XFS_IO_UNWRITTEN:
- /* writeback should never update isize */
+ else if (ioend->io_state == XFS_EXT_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false);
- break;
- default:
+ else
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
- break;
- }
done:
if (ioend->io_append_trans)
@@ -293,7 +284,8 @@ xfs_end_bio(
struct xfs_ioend *ioend = bio->bi_private;
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
- if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
+ if (ioend->io_fork == XFS_COW_FORK ||
+ ioend->io_state == XFS_EXT_UNWRITTEN)
queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
else if (ioend->io_append_trans)
queue_work(mp->m_data_workqueue, &ioend->io_work);
@@ -301,6 +293,75 @@ xfs_end_bio(
xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
}
+/*
+ * Fast revalidation of the cached writeback mapping. Return true if the current
+ * mapping is valid, false otherwise.
+ */
+static bool
+xfs_imap_valid(
+ struct xfs_writepage_ctx *wpc,
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb)
+{
+ if (offset_fsb < wpc->imap.br_startoff ||
+ offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
+ return false;
+ /*
+ * If this is a COW mapping, it is sufficient to check that the mapping
+ * covers the offset. Be careful to check this first because the caller
+ * can revalidate a COW mapping without updating the data seqno.
+ */
+ if (wpc->fork == XFS_COW_FORK)
+ return true;
+
+ /*
+ * This is not a COW mapping. Check the sequence number of the data fork
+ * because concurrent changes could have invalidated the extent. Check
+ * the COW fork because concurrent changes since the last time we
+ * checked (and found nothing at this offset) could have added
+ * overlapping blocks.
+ */
+ if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
+ return false;
+ if (xfs_inode_has_cow_data(ip) &&
+ wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
+ return false;
+ return true;
+}
+
+/*
+ * Pass in a dellalloc extent and convert it to real extents, return the real
+ * extent that maps offset_fsb in wpc->imap.
+ *
+ * The current page is held locked so nothing could have removed the block
+ * backing offset_fsb, although it could have moved from the COW to the data
+ * fork by another thread.
+ */
+static int
+xfs_convert_blocks(
+ struct xfs_writepage_ctx *wpc,
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb)
+{
+ int error;
+
+ /*
+ * Attempt to allocate whatever delalloc extent currently backs
+ * offset_fsb and put the result into wpc->imap. Allocate in a loop
+ * because it may take several attempts to allocate real blocks for a
+ * contiguous delalloc extent if free space is sufficiently fragmented.
+ */
+ do {
+ error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
+ &wpc->imap, wpc->fork == XFS_COW_FORK ?
+ &wpc->cow_seq : &wpc->data_seq);
+ if (error)
+ return error;
+ } while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
+
+ return 0;
+}
+
STATIC int
xfs_map_blocks(
struct xfs_writepage_ctx *wpc,
@@ -310,26 +371,16 @@ xfs_map_blocks(
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
ssize_t count = i_blocksize(inode);
- xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_fileoff_t cow_fsb = NULLFILEOFF;
struct xfs_bmbt_irec imap;
- int whichfork = XFS_DATA_FORK;
struct xfs_iext_cursor icur;
- bool imap_valid;
+ int retries = 0;
int error = 0;
- /*
- * We have to make sure the cached mapping is within EOF to protect
- * against eofblocks trimming on file release leaving us with a stale
- * mapping. Otherwise, a page for a subsequent file extending buffered
- * write could get picked up by this writeback cycle and written to the
- * wrong blocks.
- *
- * Note that what we really want here is a generic mapping invalidation
- * mechanism to protect us from arbitrary extent modifying contexts, not
- * just eofblocks.
- */
- xfs_trim_extent_eof(&wpc->imap, ip);
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
/*
* COW fork blocks can overlap data fork blocks even if the blocks
@@ -346,31 +397,19 @@ xfs_map_blocks(
* against concurrent updates and provides a memory barrier on the way
* out that ensures that we always see the current value.
*/
- imap_valid = offset_fsb >= wpc->imap.br_startoff &&
- offset_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount;
- if (imap_valid &&
- (!xfs_inode_has_cow_data(ip) ||
- wpc->io_type == XFS_IO_COW ||
- wpc->cow_seq == READ_ONCE(ip->i_cowfp->if_seq)))
+ if (xfs_imap_valid(wpc, ip, offset_fsb))
return 0;
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
/*
* If we don't have a valid map, now it's time to get a new one for this
* offset. This will convert delayed allocations (including COW ones)
* into real extents. If we return without a valid map, it means we
* landed in a hole and we skip the block.
*/
+retry:
xfs_ilock(ip, XFS_ILOCK_SHARED);
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS));
- ASSERT(offset <= mp->m_super->s_maxbytes);
-
- if (offset > mp->m_super->s_maxbytes - count)
- count = mp->m_super->s_maxbytes - offset;
- end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
/*
* Check if this is offset is covered by a COW extents, and if yes use
@@ -382,30 +421,16 @@ xfs_map_blocks(
if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- /*
- * Truncate can race with writeback since writeback doesn't
- * take the iolock and truncate decreases the file size before
- * it starts truncating the pages between new_size and old_size.
- * Therefore, we can end up in the situation where writeback
- * gets a CoW fork mapping but the truncate makes the mapping
- * invalid and we end up in here trying to get a new mapping.
- * bail out here so that we simply never get a valid mapping
- * and so we drop the write altogether. The page truncation
- * will kill the contents anyway.
- */
- if (offset > i_size_read(inode)) {
- wpc->io_type = XFS_IO_HOLE;
- return 0;
- }
- whichfork = XFS_COW_FORK;
- wpc->io_type = XFS_IO_COW;
+
+ wpc->fork = XFS_COW_FORK;
goto allocate_blocks;
}
/*
- * Map valid and no COW extent in the way? We're done.
+ * No COW extent overlap. Revalidate now that we may have updated
+ * ->cow_seq. If the data mapping is still valid, we're done.
*/
- if (imap_valid) {
+ if (xfs_imap_valid(wpc, ip, offset_fsb)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
}
@@ -417,49 +442,65 @@ xfs_map_blocks(
*/
if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
imap.br_startoff = end_fsb; /* fake a hole past EOF */
+ wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ wpc->fork = XFS_DATA_FORK;
+
+ /* landed in a hole or beyond EOF? */
if (imap.br_startoff > offset_fsb) {
- /* landed in a hole or beyond EOF */
imap.br_blockcount = imap.br_startoff - offset_fsb;
imap.br_startoff = offset_fsb;
imap.br_startblock = HOLESTARTBLOCK;
- wpc->io_type = XFS_IO_HOLE;
- } else {
- /*
- * Truncate to the next COW extent if there is one. This is the
- * only opportunity to do this because we can skip COW fork
- * lookups for the subsequent blocks in the mapping; however,
- * the requirement to treat the COW range separately remains.
- */
- if (cow_fsb != NULLFILEOFF &&
- cow_fsb < imap.br_startoff + imap.br_blockcount)
- imap.br_blockcount = cow_fsb - imap.br_startoff;
-
- if (isnullstartblock(imap.br_startblock)) {
- /* got a delalloc extent */
- wpc->io_type = XFS_IO_DELALLOC;
- goto allocate_blocks;
- }
-
- if (imap.br_state == XFS_EXT_UNWRITTEN)
- wpc->io_type = XFS_IO_UNWRITTEN;
- else
- wpc->io_type = XFS_IO_OVERWRITE;
+ imap.br_state = XFS_EXT_NORM;
}
+ /*
+ * Truncate to the next COW extent if there is one. This is the only
+ * opportunity to do this because we can skip COW fork lookups for the
+ * subsequent blocks in the mapping; however, the requirement to treat
+ * the COW range separately remains.
+ */
+ if (cow_fsb != NULLFILEOFF &&
+ cow_fsb < imap.br_startoff + imap.br_blockcount)
+ imap.br_blockcount = cow_fsb - imap.br_startoff;
+
+ /* got a delalloc extent? */
+ if (imap.br_startblock != HOLESTARTBLOCK &&
+ isnullstartblock(imap.br_startblock))
+ goto allocate_blocks;
+
wpc->imap = imap;
- trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
+ trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
return 0;
allocate_blocks:
- error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap,
- &wpc->cow_seq);
- if (error)
+ error = xfs_convert_blocks(wpc, ip, offset_fsb);
+ if (error) {
+ /*
+ * If we failed to find the extent in the COW fork we might have
+ * raced with a COW to data fork conversion or truncate.
+ * Restart the lookup to catch the extent in the data fork for
+ * the former case, but prevent additional retries to avoid
+ * looping forever for the latter case.
+ */
+ if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
+ goto retry;
+ ASSERT(error != -EAGAIN);
return error;
- ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
- imap.br_startoff + imap.br_blockcount <= cow_fsb);
- wpc->imap = imap;
- trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
+ }
+
+ /*
+ * Due to merging the return real extent might be larger than the
+ * original delalloc one. Trim the return extent to the next COW
+ * boundary again to force a re-lookup.
+ */
+ if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
+ cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
+ wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
+
+ ASSERT(wpc->imap.br_startoff <= offset_fsb);
+ ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
+ trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
return 0;
}
@@ -484,7 +525,7 @@ xfs_submit_ioend(
int status)
{
/* Convert CoW extents to regular */
- if (!status && ioend->io_type == XFS_IO_COW) {
+ if (!status && ioend->io_fork == XFS_COW_FORK) {
/*
* Yuk. This can do memory allocation, but is not a
* transactional operation so everything is done in GFP_KERNEL
@@ -502,7 +543,8 @@ xfs_submit_ioend(
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
- ioend->io_type != XFS_IO_UNWRITTEN &&
+ (ioend->io_fork == XFS_COW_FORK ||
+ ioend->io_state != XFS_EXT_UNWRITTEN) &&
xfs_ioend_is_append(ioend) &&
!ioend->io_append_trans)
status = xfs_setfilesize_trans_alloc(ioend);
@@ -531,7 +573,8 @@ xfs_submit_ioend(
static struct xfs_ioend *
xfs_alloc_ioend(
struct inode *inode,
- unsigned int type,
+ int fork,
+ xfs_exntst_t state,
xfs_off_t offset,
struct block_device *bdev,
sector_t sector)
@@ -545,7 +588,8 @@ xfs_alloc_ioend(
ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_type = type;
+ ioend->io_fork = fork;
+ ioend->io_state = state;
ioend->io_inode = inode;
ioend->io_size = 0;
ioend->io_offset = offset;
@@ -606,13 +650,15 @@ xfs_add_to_ioend(
sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
- if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
+ if (!wpc->ioend ||
+ wpc->fork != wpc->ioend->io_fork ||
+ wpc->imap.br_state != wpc->ioend->io_state ||
sector != bio_end_sector(wpc->ioend->io_bio) ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
- bdev, sector);
+ wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
+ wpc->imap.br_state, offset, bdev, sector);
}
if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
@@ -721,7 +767,7 @@ xfs_writepage_map(
error = xfs_map_blocks(wpc, inode, file_offset);
if (error)
break;
- if (wpc->io_type == XFS_IO_HOLE)
+ if (wpc->imap.br_startblock == HOLESTARTBLOCK)
continue;
xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
&submit_list);
@@ -916,9 +962,7 @@ xfs_vm_writepage(
struct page *page,
struct writeback_control *wbc)
{
- struct xfs_writepage_ctx wpc = {
- .io_type = XFS_IO_HOLE,
- };
+ struct xfs_writepage_ctx wpc = { };
int ret;
ret = xfs_do_writepage(page, wbc, &wpc);
@@ -932,9 +976,7 @@ xfs_vm_writepages(
struct address_space *mapping,
struct writeback_control *wbc)
{
- struct xfs_writepage_ctx wpc = {
- .io_type = XFS_IO_HOLE,
- };
+ struct xfs_writepage_ctx wpc = { };
int ret;
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
@@ -981,7 +1023,7 @@ xfs_vm_bmap(
* Since we don't pass back blockdev info, we can't return bmap
* information for rt files either.
*/
- if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
+ if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
return 0;
return iomap_bmap(mapping, block, &xfs_iomap_ops);
}
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index e5c23948a8ab..6c2615b83c5d 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -9,32 +9,12 @@
extern struct bio_set xfs_ioend_bioset;
/*
- * Types of I/O for bmap clustering and I/O completion tracking.
- *
- * This enum is used in string mapping in xfs_trace.h; please keep the
- * TRACE_DEFINE_ENUMs for it up to date.
- */
-enum {
- XFS_IO_HOLE, /* covers region without any block allocation */
- XFS_IO_DELALLOC, /* covers delalloc region */
- XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
- XFS_IO_OVERWRITE, /* covers already allocated extent */
- XFS_IO_COW, /* covers copy-on-write extent */
-};
-
-#define XFS_IO_TYPES \
- { XFS_IO_HOLE, "hole" }, \
- { XFS_IO_DELALLOC, "delalloc" }, \
- { XFS_IO_UNWRITTEN, "unwritten" }, \
- { XFS_IO_OVERWRITE, "overwrite" }, \
- { XFS_IO_COW, "CoW" }
-
-/*
* Structure for buffered I/O completions.
*/
struct xfs_ioend {
struct list_head io_list; /* next ioend in chain */
- unsigned int io_type; /* delalloc / unwritten */
+ int io_fork; /* inode fork written back */
+ xfs_exntst_t io_state; /* extent state */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index a58034049995..3d213a7394c5 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -555,6 +555,7 @@ xfs_attr_put_listent(
attrlist_ent_t *aep;
int arraytop;
+ ASSERT(!context->seen_enough);
ASSERT(!(context->flags & ATTR_KERNOVAL));
ASSERT(context->count >= 0);
ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 1ee8c5539fa4..2db43ff4f8b5 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1162,16 +1162,13 @@ xfs_zero_file_space(
* by virtue of the hole punch.
*/
error = xfs_free_file_space(ip, offset, len);
- if (error)
- goto out;
+ if (error || xfs_is_always_cow_inode(ip))
+ return error;
- error = xfs_alloc_file_space(ip, round_down(offset, blksize),
+ return xfs_alloc_file_space(ip, round_down(offset, blksize),
round_up(offset + len, blksize) -
round_down(offset, blksize),
XFS_BMAPI_PREALLOC);
-out:
- return error;
-
}
static int
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index eedc5e0156ff..548344e25128 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -776,13 +776,24 @@ _xfs_buf_read(
}
/*
- * If the caller passed in an ops structure and the buffer doesn't have ops
- * assigned, set the ops and use them to verify the contents. If the contents
- * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no
- * recorded errors and is already in XBF_DONE state.
+ * Reverify a buffer found in cache without an attached ->b_ops.
+ *
+ * If the caller passed an ops structure and the buffer doesn't have ops
+ * assigned, set the ops and use it to verify the contents. If verification
+ * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
+ * already in XBF_DONE state on entry.
+ *
+ * Under normal operations, every in-core buffer is verified on read I/O
+ * completion. There are two scenarios that can lead to in-core buffers without
+ * an assigned ->b_ops. The first is during log recovery of buffers on a V4
+ * filesystem, though these buffers are purged at the end of recovery. The
+ * other is online repair, which intentionally reads with a NULL buffer ops to
+ * run several verifiers across an in-core buffer in order to establish buffer
+ * type. If repair can't establish that, the buffer will be left in memory
+ * with NULL buffer ops.
*/
int
-xfs_buf_ensure_ops(
+xfs_buf_reverify(
struct xfs_buf *bp,
const struct xfs_buf_ops *ops)
{
@@ -824,7 +835,7 @@ xfs_buf_read_map(
return bp;
}
- xfs_buf_ensure_ops(bp, ops);
+ xfs_buf_reverify(bp, ops);
if (flags & XBF_ASYNC) {
/*
@@ -1536,8 +1547,7 @@ __xfs_buf_submit(
xfs_buf_ioerror(bp, -EIO);
bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp);
- if (bp->b_flags & XBF_ASYNC)
- xfs_buf_ioend(bp);
+ xfs_buf_ioend(bp);
return -EIO;
}
@@ -2194,3 +2204,40 @@ void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
atomic_set(&bp->b_lru_ref, lru_ref);
}
+
+/*
+ * Verify an on-disk magic value against the magic value specified in the
+ * verifier structure. The verifier magic is in disk byte order so the caller is
+ * expected to pass the value directly from disk.
+ */
+bool
+xfs_verify_magic(
+ struct xfs_buf *bp,
+ __be32 dmagic)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ int idx;
+
+ idx = xfs_sb_version_hascrc(&mp->m_sb);
+ if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])))
+ return false;
+ return dmagic == bp->b_ops->magic[idx];
+}
+/*
+ * Verify an on-disk magic value against the magic value specified in the
+ * verifier structure. The verifier magic is in disk byte order so the caller is
+ * expected to pass the value directly from disk.
+ */
+bool
+xfs_verify_magic16(
+ struct xfs_buf *bp,
+ __be16 dmagic)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ int idx;
+
+ idx = xfs_sb_version_hascrc(&mp->m_sb);
+ if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])))
+ return false;
+ return dmagic == bp->b_ops->magic16[idx];
+}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index b9f5511ea998..d0b96e071cec 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -125,6 +125,10 @@ struct xfs_buf_map {
struct xfs_buf_ops {
char *name;
+ union {
+ __be32 magic[2]; /* v4 and v5 on disk magic values */
+ __be16 magic16[2]; /* v4 and v5 on disk magic values */
+ };
void (*verify_read)(struct xfs_buf *);
void (*verify_write)(struct xfs_buf *);
xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
@@ -385,6 +389,8 @@ extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
-int xfs_buf_ensure_ops(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
+int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
+bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
+bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 9866f542e77b..a1e177f66404 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -51,6 +51,7 @@ static unsigned int xfs_errortag_random_default[] = {
XFS_RANDOM_BUF_LRU_REF,
XFS_RANDOM_FORCE_SCRUB_REPAIR,
XFS_RANDOM_FORCE_SUMMARY_RECALC,
+ XFS_RANDOM_IUNLINK_FALLBACK,
};
struct xfs_errortag_attr {
@@ -159,6 +160,7 @@ XFS_ERRORTAG_ATTR_RW(log_item_pin, XFS_ERRTAG_LOG_ITEM_PIN);
XFS_ERRORTAG_ATTR_RW(buf_lru_ref, XFS_ERRTAG_BUF_LRU_REF);
XFS_ERRORTAG_ATTR_RW(force_repair, XFS_ERRTAG_FORCE_SCRUB_REPAIR);
XFS_ERRORTAG_ATTR_RW(bad_summary, XFS_ERRTAG_FORCE_SUMMARY_RECALC);
+XFS_ERRORTAG_ATTR_RW(iunlink_fallback, XFS_ERRTAG_IUNLINK_FALLBACK);
static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(noerror),
@@ -195,6 +197,7 @@ static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(buf_lru_ref),
XFS_ERRORTAG_ATTR_LIST(force_repair),
XFS_ERRORTAG_ATTR_LIST(bad_summary),
+ XFS_ERRORTAG_ATTR_LIST(iunlink_fallback),
NULL,
};
@@ -357,7 +360,8 @@ xfs_buf_verifier_error(
fa = failaddr ? failaddr : __return_address;
__xfs_buf_ioerror(bp, error, fa);
- xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx %s",
+ xfs_alert_tag(mp, XFS_PTAG_VERIFIER_ERROR,
+ "Metadata %s detected at %pS, %s block 0x%llx %s",
bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
fa, bp->b_ops->name, bp->b_bn, name);
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 246d3e989c6c..602aa7d62b66 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -98,5 +98,6 @@ extern int xfs_errortag_clearall(struct xfs_mount *mp);
#define XFS_PTAG_SHUTDOWN_IOERROR 0x00000020
#define XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040
#define XFS_PTAG_FSBLOCK_ZERO 0x00000080
+#define XFS_PTAG_VERIFIER_ERROR 0x00000100
#endif /* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index e47425071e65..770cc2edf777 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -507,7 +507,7 @@ xfs_file_dio_aio_write(
* We can't properly handle unaligned direct I/O to reflink
* files yet, as we can't unshare a partial block.
*/
- if (xfs_is_reflink_inode(ip)) {
+ if (xfs_is_cow_inode(ip)) {
trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
return -EREMCHG;
}
@@ -872,14 +872,27 @@ xfs_file_fallocate(
goto out_unlock;
}
- if (mode & FALLOC_FL_ZERO_RANGE)
+ if (mode & FALLOC_FL_ZERO_RANGE) {
error = xfs_zero_file_space(ip, offset, len);
- else {
- if (mode & FALLOC_FL_UNSHARE_RANGE) {
- error = xfs_reflink_unshare(ip, offset, len);
- if (error)
- goto out_unlock;
+ } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
+ error = xfs_reflink_unshare(ip, offset, len);
+ if (error)
+ goto out_unlock;
+
+ if (!xfs_is_always_cow_inode(ip)) {
+ error = xfs_alloc_file_space(ip, offset, len,
+ XFS_BMAPI_PREALLOC);
}
+ } else {
+ /*
+ * If always_cow mode we can't use preallocations and
+ * thus should not create them.
+ */
+ if (xfs_is_always_cow_inode(ip)) {
+ error = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
error = xfs_alloc_file_space(ip, offset, len,
XFS_BMAPI_PREALLOC);
}
@@ -1068,10 +1081,10 @@ xfs_file_llseek(
default:
return generic_file_llseek(file, offset, whence);
case SEEK_HOLE:
- offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
+ offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
break;
case SEEK_DATA:
- offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
+ offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
break;
}
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index f3ef70c542e1..584648582ba7 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -533,6 +533,7 @@ xfs_fs_reserve_ag_blocks(
int error = 0;
int err2;
+ mp->m_finobt_nores = false;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
pag = xfs_perag_get(mp, agno);
err2 = xfs_ag_resv_init(pag, NULL);
diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c
index 5169e84ae382..d0d377384120 100644
--- a/fs/xfs/xfs_globals.c
+++ b/fs/xfs/xfs_globals.c
@@ -16,7 +16,7 @@ xfs_param_t xfs_params = {
/* MIN DFLT MAX */
.sgid_inherit = { 0, 0, 1 },
.symlink_mode = { 0, 0, 1 },
- .panic_mask = { 0, 0, 255 },
+ .panic_mask = { 0, 0, 256 },
.error_level = { 0, 3, 11 },
.syncd_timer = { 1*100, 30*100, 7200*100},
.stats_clear = { 0, 0, 1 },
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ae667ba74a1c..f643a9295179 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1332,7 +1332,7 @@ xfs_create_tmpfile(
if (error)
goto out_trans_cancel;
- error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
+ error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
if (error)
goto out_trans_cancel;
@@ -1754,7 +1754,7 @@ xfs_inactive_ifree(
* now remains allocated and sits on the unlinked list until the fs is
* repaired.
*/
- if (unlikely(mp->m_inotbt_nores)) {
+ if (unlikely(mp->m_finobt_nores)) {
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
&tp);
@@ -1907,86 +1907,510 @@ xfs_inactive(
}
/*
- * This is called when the inode's link count goes to 0 or we are creating a
- * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
- * set to true as the link count is dropped to zero by the VFS after we've
- * created the file successfully, so we have to add it to the unlinked list
- * while the link count is non-zero.
+ * In-Core Unlinked List Lookups
+ * =============================
+ *
+ * Every inode is supposed to be reachable from some other piece of metadata
+ * with the exception of the root directory. Inodes with a connection to a
+ * file descriptor but not linked from anywhere in the on-disk directory tree
+ * are collectively known as unlinked inodes, though the filesystem itself
+ * maintains links to these inodes so that on-disk metadata are consistent.
+ *
+ * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
+ * header contains a number of buckets that point to an inode, and each inode
+ * record has a pointer to the next inode in the hash chain. This
+ * singly-linked list causes scaling problems in the iunlink remove function
+ * because we must walk that list to find the inode that points to the inode
+ * being removed from the unlinked hash bucket list.
+ *
+ * What if we modelled the unlinked list as a collection of records capturing
+ * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd
+ * have a fast way to look up unlinked list predecessors, which avoids the
+ * slow list walk. That's exactly what we do here (in-core) with a per-AG
+ * rhashtable.
+ *
+ * Because this is a backref cache, we ignore operational failures since the
+ * iunlink code can fall back to the slow bucket walk. The only errors that
+ * should bubble out are for obviously incorrect situations.
+ *
+ * All users of the backref cache MUST hold the AGI buffer lock to serialize
+ * access or have otherwise provided for concurrency control.
+ */
+
+/* Capture a "X.next_unlinked = Y" relationship. */
+struct xfs_iunlink {
+ struct rhash_head iu_rhash_head;
+ xfs_agino_t iu_agino; /* X */
+ xfs_agino_t iu_next_unlinked; /* Y */
+};
+
+/* Unlinked list predecessor lookup hashtable construction */
+static int
+xfs_iunlink_obj_cmpfn(
+ struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const xfs_agino_t *key = arg->key;
+ const struct xfs_iunlink *iu = obj;
+
+ if (iu->iu_next_unlinked != *key)
+ return 1;
+ return 0;
+}
+
+static const struct rhashtable_params xfs_iunlink_hash_params = {
+ .min_size = XFS_AGI_UNLINKED_BUCKETS,
+ .key_len = sizeof(xfs_agino_t),
+ .key_offset = offsetof(struct xfs_iunlink,
+ iu_next_unlinked),
+ .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
+ .automatic_shrinking = true,
+ .obj_cmpfn = xfs_iunlink_obj_cmpfn,
+};
+
+/*
+ * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such
+ * relation is found.
+ */
+static xfs_agino_t
+xfs_iunlink_lookup_backref(
+ struct xfs_perag *pag,
+ xfs_agino_t agino)
+{
+ struct xfs_iunlink *iu;
+
+ iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
+ xfs_iunlink_hash_params);
+ return iu ? iu->iu_agino : NULLAGINO;
+}
+
+/*
+ * Take ownership of an iunlink cache entry and insert it into the hash table.
+ * If successful, the entry will be owned by the cache; if not, it is freed.
+ * Either way, the caller does not own @iu after this call.
+ */
+static int
+xfs_iunlink_insert_backref(
+ struct xfs_perag *pag,
+ struct xfs_iunlink *iu)
+{
+ int error;
+
+ error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
+ &iu->iu_rhash_head, xfs_iunlink_hash_params);
+ /*
+ * Fail loudly if there already was an entry because that's a sign of
+ * corruption of in-memory data. Also fail loudly if we see an error
+ * code we didn't anticipate from the rhashtable code. Currently we
+ * only anticipate ENOMEM.
+ */
+ if (error) {
+ WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
+ kmem_free(iu);
+ }
+ /*
+ * Absorb any runtime errors that aren't a result of corruption because
+ * this is a cache and we can always fall back to bucket list scanning.
+ */
+ if (error != 0 && error != -EEXIST)
+ error = 0;
+ return error;
+}
+
+/* Remember that @prev_agino.next_unlinked = @this_agino. */
+static int
+xfs_iunlink_add_backref(
+ struct xfs_perag *pag,
+ xfs_agino_t prev_agino,
+ xfs_agino_t this_agino)
+{
+ struct xfs_iunlink *iu;
+
+ if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
+ return 0;
+
+ iu = kmem_zalloc(sizeof(*iu), KM_SLEEP | KM_NOFS);
+ iu->iu_agino = prev_agino;
+ iu->iu_next_unlinked = this_agino;
+
+ return xfs_iunlink_insert_backref(pag, iu);
+}
+
+/*
+ * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
+ * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there
+ * wasn't any such entry then we don't bother.
+ */
+static int
+xfs_iunlink_change_backref(
+ struct xfs_perag *pag,
+ xfs_agino_t agino,
+ xfs_agino_t next_unlinked)
+{
+ struct xfs_iunlink *iu;
+ int error;
+
+ /* Look up the old entry; if there wasn't one then exit. */
+ iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
+ xfs_iunlink_hash_params);
+ if (!iu)
+ return 0;
+
+ /*
+ * Remove the entry. This shouldn't ever return an error, but if we
+ * couldn't remove the old entry we don't want to add it again to the
+ * hash table, and if the entry disappeared on us then someone's
+ * violated the locking rules and we need to fail loudly. Either way
+ * we cannot remove the inode because internal state is or would have
+ * been corrupt.
+ */
+ error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
+ &iu->iu_rhash_head, xfs_iunlink_hash_params);
+ if (error)
+ return error;
+
+ /* If there is no new next entry just free our item and return. */
+ if (next_unlinked == NULLAGINO) {
+ kmem_free(iu);
+ return 0;
+ }
+
+ /* Update the entry and re-add it to the hash table. */
+ iu->iu_next_unlinked = next_unlinked;
+ return xfs_iunlink_insert_backref(pag, iu);
+}
+
+/* Set up the in-core predecessor structures. */
+int
+xfs_iunlink_init(
+ struct xfs_perag *pag)
+{
+ return rhashtable_init(&pag->pagi_unlinked_hash,
+ &xfs_iunlink_hash_params);
+}
+
+/* Free the in-core predecessor structures. */
+static void
+xfs_iunlink_free_item(
+ void *ptr,
+ void *arg)
+{
+ struct xfs_iunlink *iu = ptr;
+ bool *freed_anything = arg;
+
+ *freed_anything = true;
+ kmem_free(iu);
+}
+
+void
+xfs_iunlink_destroy(
+ struct xfs_perag *pag)
+{
+ bool freed_anything = false;
+
+ rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
+ xfs_iunlink_free_item, &freed_anything);
+
+ ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
+}
+
+/*
+ * Point the AGI unlinked bucket at an inode and log the results. The caller
+ * is responsible for validating the old value.
+ */
+STATIC int
+xfs_iunlink_update_bucket(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ struct xfs_buf *agibp,
+ unsigned int bucket_index,
+ xfs_agino_t new_agino)
+{
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
+ xfs_agino_t old_value;
+ int offset;
+
+ ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
+
+ old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
+ trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
+ old_value, new_agino);
+
+ /*
+ * We should never find the head of the list already set to the value
+ * passed in because either we're adding or removing ourselves from the
+ * head of the list.
+ */
+ if (old_value == new_agino)
+ return -EFSCORRUPTED;
+
+ agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
+ offset = offsetof(struct xfs_agi, agi_unlinked) +
+ (sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
+ return 0;
+}
+
+/* Set an on-disk inode's next_unlinked pointer. */
+STATIC void
+xfs_iunlink_update_dinode(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_agino_t agino,
+ struct xfs_buf *ibp,
+ struct xfs_dinode *dip,
+ struct xfs_imap *imap,
+ xfs_agino_t next_agino)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ int offset;
+
+ ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
+
+ trace_xfs_iunlink_update_dinode(mp, agno, agino,
+ be32_to_cpu(dip->di_next_unlinked), next_agino);
+
+ dip->di_next_unlinked = cpu_to_be32(next_agino);
+ offset = imap->im_boffset +
+ offsetof(struct xfs_dinode, di_next_unlinked);
+
+ /* need to recalc the inode CRC if appropriate */
+ xfs_dinode_calc_crc(mp, dip);
+ xfs_trans_inode_buf(tp, ibp);
+ xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
+ xfs_inobp_check(mp, ibp);
+}
+
+/* Set an in-core inode's unlinked pointer and return the old value. */
+STATIC int
+xfs_iunlink_update_inode(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ xfs_agnumber_t agno,
+ xfs_agino_t next_agino,
+ xfs_agino_t *old_next_agino)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_dinode *dip;
+ struct xfs_buf *ibp;
+ xfs_agino_t old_value;
+ int error;
+
+ ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
+
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0);
+ if (error)
+ return error;
+
+ /* Make sure the old pointer isn't garbage. */
+ old_value = be32_to_cpu(dip->di_next_unlinked);
+ if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ /*
+ * Since we're updating a linked list, we should never find that the
+ * current pointer is the same as the new value, unless we're
+ * terminating the list.
+ */
+ *old_next_agino = old_value;
+ if (old_value == next_agino) {
+ if (next_agino != NULLAGINO)
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ /* Ok, update the new pointer. */
+ xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ ibp, dip, &ip->i_imap, next_agino);
+ return 0;
+out:
+ xfs_trans_brelse(tp, ibp);
+ return error;
+}
+
+/*
+ * This is called when the inode's link count has gone to 0 or we are creating
+ * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
*
* We place the on-disk inode on a list in the AGI. It will be pulled from this
* list when the inode is freed.
*/
STATIC int
xfs_iunlink(
- struct xfs_trans *tp,
- struct xfs_inode *ip)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
{
- xfs_mount_t *mp = tp->t_mountp;
- xfs_agi_t *agi;
- xfs_dinode_t *dip;
- xfs_buf_t *agibp;
- xfs_buf_t *ibp;
- xfs_agino_t agino;
- short bucket_index;
- int offset;
- int error;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_agi *agi;
+ struct xfs_buf *agibp;
+ xfs_agino_t next_agino;
+ xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+ short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
+ int error;
+ ASSERT(VFS_I(ip)->i_nlink == 0);
ASSERT(VFS_I(ip)->i_mode != 0);
+ trace_xfs_iunlink(ip);
- /*
- * Get the agi buffer first. It ensures lock ordering
- * on the list.
- */
- error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
+ /* Get the agi buffer first. It ensures lock ordering on the list. */
+ error = xfs_read_agi(mp, tp, agno, &agibp);
if (error)
return error;
agi = XFS_BUF_TO_AGI(agibp);
/*
- * Get the index into the agi hash table for the
- * list this inode will go on.
+ * Get the index into the agi hash table for the list this inode will
+ * go on. Make sure the pointer isn't garbage and that this inode
+ * isn't already on the list.
*/
- agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
- ASSERT(agino != 0);
- bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
- ASSERT(agi->agi_unlinked[bucket_index]);
- ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
+ next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
+ if (next_agino == agino ||
+ !xfs_verify_agino_or_null(mp, agno, next_agino))
+ return -EFSCORRUPTED;
+
+ if (next_agino != NULLAGINO) {
+ struct xfs_perag *pag;
+ xfs_agino_t old_agino;
+
+ /*
+ * There is already another inode in the bucket, so point this
+ * inode to the current head of the list.
+ */
+ error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
+ &old_agino);
+ if (error)
+ return error;
+ ASSERT(old_agino == NULLAGINO);
- if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
/*
- * There is already another inode in the bucket we need
- * to add ourselves to. Add us at the front of the list.
- * Here we put the head pointer into our next pointer,
- * and then we fall through to point the head at us.
+ * agino has been unlinked, add a backref from the next inode
+ * back to agino.
*/
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
- 0, 0);
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_iunlink_add_backref(pag, agino, next_agino);
+ xfs_perag_put(pag);
if (error)
return error;
+ }
+
+ /* Point the head of the list to point to this inode. */
+ return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
+}
- ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
- dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
- offset = ip->i_imap.im_boffset +
- offsetof(xfs_dinode_t, di_next_unlinked);
+/* Return the imap, dinode pointer, and buffer for an inode. */
+STATIC int
+xfs_iunlink_map_ino(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_agino_t agino,
+ struct xfs_imap *imap,
+ struct xfs_dinode **dipp,
+ struct xfs_buf **bpp)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ int error;
- /* need to recalc the inode CRC if appropriate */
- xfs_dinode_calc_crc(mp, dip);
+ imap->im_blkno = 0;
+ error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
+ if (error) {
+ xfs_warn(mp, "%s: xfs_imap returned error %d.",
+ __func__, error);
+ return error;
+ }
- xfs_trans_inode_buf(tp, ibp);
- xfs_trans_log_buf(tp, ibp, offset,
- (offset + sizeof(xfs_agino_t) - 1));
- xfs_inobp_check(mp, ibp);
+ error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0);
+ if (error) {
+ xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Walk the unlinked chain from @head_agino until we find the inode that
+ * points to @target_agino. Return the inode number, map, dinode pointer,
+ * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
+ *
+ * @tp, @pag, @head_agino, and @target_agino are input parameters.
+ * @agino, @imap, @dipp, and @bpp are all output parameters.
+ *
+ * Do not call this function if @target_agino is the head of the list.
+ */
+STATIC int
+xfs_iunlink_map_prev(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_agino_t head_agino,
+ xfs_agino_t target_agino,
+ xfs_agino_t *agino,
+ struct xfs_imap *imap,
+ struct xfs_dinode **dipp,
+ struct xfs_buf **bpp,
+ struct xfs_perag *pag)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_agino_t next_agino;
+ int error;
+
+ ASSERT(head_agino != target_agino);
+ *bpp = NULL;
+
+ /* See if our backref cache can find it faster. */
+ *agino = xfs_iunlink_lookup_backref(pag, target_agino);
+ if (*agino != NULLAGINO) {
+ error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
+ if (error)
+ return error;
+
+ if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
+ return 0;
+
+ /*
+ * If we get here the cache contents were corrupt, so drop the
+ * buffer and fall back to walking the bucket list.
+ */
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ WARN_ON_ONCE(1);
+ }
+
+ trace_xfs_iunlink_map_prev_fallback(mp, agno);
+
+ /* Otherwise, walk the entire bucket until we find it. */
+ next_agino = head_agino;
+ while (next_agino != target_agino) {
+ xfs_agino_t unlinked_agino;
+
+ if (*bpp)
+ xfs_trans_brelse(tp, *bpp);
+
+ *agino = next_agino;
+ error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
+ bpp);
+ if (error)
+ return error;
+
+ unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
+ /*
+ * Make sure this pointer is valid and isn't an obvious
+ * infinite loop.
+ */
+ if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
+ next_agino == unlinked_agino) {
+ XFS_CORRUPTION_ERROR(__func__,
+ XFS_ERRLEVEL_LOW, mp,
+ *dipp, sizeof(**dipp));
+ error = -EFSCORRUPTED;
+ return error;
+ }
+ next_agino = unlinked_agino;
}
- /*
- * Point the bucket head pointer at the inode being inserted.
- */
- ASSERT(agino != 0);
- agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
- offset = offsetof(xfs_agi_t, agi_unlinked) +
- (sizeof(xfs_agino_t) * bucket_index);
- xfs_trans_log_buf(tp, agibp, offset,
- (offset + sizeof(xfs_agino_t) - 1));
return 0;
}
@@ -1995,181 +2419,106 @@ xfs_iunlink(
*/
STATIC int
xfs_iunlink_remove(
- xfs_trans_t *tp,
- xfs_inode_t *ip)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
{
- xfs_ino_t next_ino;
- xfs_mount_t *mp;
- xfs_agi_t *agi;
- xfs_dinode_t *dip;
- xfs_buf_t *agibp;
- xfs_buf_t *ibp;
- xfs_agnumber_t agno;
- xfs_agino_t agino;
- xfs_agino_t next_agino;
- xfs_buf_t *last_ibp;
- xfs_dinode_t *last_dip = NULL;
- short bucket_index;
- int offset, last_offset = 0;
- int error;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_agi *agi;
+ struct xfs_buf *agibp;
+ struct xfs_buf *last_ibp;
+ struct xfs_dinode *last_dip = NULL;
+ struct xfs_perag *pag = NULL;
+ xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+ xfs_agino_t next_agino;
+ xfs_agino_t head_agino;
+ short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
+ int error;
- mp = tp->t_mountp;
- agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
+ trace_xfs_iunlink_remove(ip);
- /*
- * Get the agi buffer first. It ensures lock ordering
- * on the list.
- */
+ /* Get the agi buffer first. It ensures lock ordering on the list. */
error = xfs_read_agi(mp, tp, agno, &agibp);
if (error)
return error;
-
agi = XFS_BUF_TO_AGI(agibp);
/*
- * Get the index into the agi hash table for the
- * list this inode will go on.
+ * Get the index into the agi hash table for the list this inode will
+ * go on. Make sure the head pointer isn't garbage.
*/
- agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
- if (!xfs_verify_agino(mp, agno, agino))
- return -EFSCORRUPTED;
- bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
- if (!xfs_verify_agino(mp, agno,
- be32_to_cpu(agi->agi_unlinked[bucket_index]))) {
+ head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
+ if (!xfs_verify_agino(mp, agno, head_agino)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
agi, sizeof(*agi));
return -EFSCORRUPTED;
}
- if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
- /*
- * We're at the head of the list. Get the inode's on-disk
- * buffer to see if there is anyone after us on the list.
- * Only modify our next pointer if it is not already NULLAGINO.
- * This saves us the overhead of dealing with the buffer when
- * there is no need to change it.
- */
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
- 0, 0);
- if (error) {
- xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
- __func__, error);
- return error;
- }
- next_agino = be32_to_cpu(dip->di_next_unlinked);
- ASSERT(next_agino != 0);
- if (next_agino != NULLAGINO) {
- dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
- offset = ip->i_imap.im_boffset +
- offsetof(xfs_dinode_t, di_next_unlinked);
-
- /* need to recalc the inode CRC if appropriate */
- xfs_dinode_calc_crc(mp, dip);
-
- xfs_trans_inode_buf(tp, ibp);
- xfs_trans_log_buf(tp, ibp, offset,
- (offset + sizeof(xfs_agino_t) - 1));
- xfs_inobp_check(mp, ibp);
- } else {
- xfs_trans_brelse(tp, ibp);
- }
- /*
- * Point the bucket head pointer at the next inode.
- */
- ASSERT(next_agino != 0);
- ASSERT(next_agino != agino);
- agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
- offset = offsetof(xfs_agi_t, agi_unlinked) +
- (sizeof(xfs_agino_t) * bucket_index);
- xfs_trans_log_buf(tp, agibp, offset,
- (offset + sizeof(xfs_agino_t) - 1));
- } else {
- /*
- * We need to search the list for the inode being freed.
- */
- next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- last_ibp = NULL;
- while (next_agino != agino) {
- struct xfs_imap imap;
+ /*
+ * Set our inode's next_unlinked pointer to NULL and then return
+ * the old pointer value so that we can update whatever was previous
+ * to us in the list to point to whatever was next in the list.
+ */
+ error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
+ if (error)
+ return error;
- if (last_ibp)
- xfs_trans_brelse(tp, last_ibp);
+ /*
+ * If there was a backref pointing from the next inode back to this
+ * one, remove it because we've removed this inode from the list.
+ *
+ * Later, if this inode was in the middle of the list we'll update
+ * this inode's backref to point from the next inode.
+ */
+ if (next_agino != NULLAGINO) {
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_iunlink_change_backref(pag, next_agino,
+ NULLAGINO);
+ if (error)
+ goto out;
+ }
- imap.im_blkno = 0;
- next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
+ if (head_agino == agino) {
+ /* Point the head of the list to the next unlinked inode. */
+ error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
+ next_agino);
+ if (error)
+ goto out;
+ } else {
+ struct xfs_imap imap;
+ xfs_agino_t prev_agino;
- error = xfs_imap(mp, tp, next_ino, &imap, 0);
- if (error) {
- xfs_warn(mp,
- "%s: xfs_imap returned error %d.",
- __func__, error);
- return error;
- }
+ if (!pag)
+ pag = xfs_perag_get(mp, agno);
- error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
- &last_ibp, 0, 0);
- if (error) {
- xfs_warn(mp,
- "%s: xfs_imap_to_bp returned error %d.",
- __func__, error);
- return error;
- }
+ /* We need to search the list for the inode being freed. */
+ error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
+ &prev_agino, &imap, &last_dip, &last_ibp,
+ pag);
+ if (error)
+ goto out;
- last_offset = imap.im_boffset;
- next_agino = be32_to_cpu(last_dip->di_next_unlinked);
- if (!xfs_verify_agino(mp, agno, next_agino)) {
- XFS_CORRUPTION_ERROR(__func__,
- XFS_ERRLEVEL_LOW, mp,
- last_dip, sizeof(*last_dip));
- return -EFSCORRUPTED;
- }
- }
+ /* Point the previous inode on the list to the next inode. */
+ xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
+ last_dip, &imap, next_agino);
/*
- * Now last_ibp points to the buffer previous to us on the
- * unlinked list. Pull us from the list.
+ * Now we deal with the backref for this inode. If this inode
+ * pointed at a real inode, change the backref that pointed to
+ * us to point to our old next. If this inode was the end of
+ * the list, delete the backref that pointed to us. Note that
+ * change_backref takes care of deleting the backref if
+ * next_agino is NULLAGINO.
*/
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
- 0, 0);
- if (error) {
- xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
- __func__, error);
- return error;
- }
- next_agino = be32_to_cpu(dip->di_next_unlinked);
- ASSERT(next_agino != 0);
- ASSERT(next_agino != agino);
- if (next_agino != NULLAGINO) {
- dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
- offset = ip->i_imap.im_boffset +
- offsetof(xfs_dinode_t, di_next_unlinked);
-
- /* need to recalc the inode CRC if appropriate */
- xfs_dinode_calc_crc(mp, dip);
-
- xfs_trans_inode_buf(tp, ibp);
- xfs_trans_log_buf(tp, ibp, offset,
- (offset + sizeof(xfs_agino_t) - 1));
- xfs_inobp_check(mp, ibp);
- } else {
- xfs_trans_brelse(tp, ibp);
- }
- /*
- * Point the previous inode on the list to the next inode.
- */
- last_dip->di_next_unlinked = cpu_to_be32(next_agino);
- ASSERT(next_agino != 0);
- offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
-
- /* need to recalc the inode CRC if appropriate */
- xfs_dinode_calc_crc(mp, last_dip);
-
- xfs_trans_inode_buf(tp, last_ibp);
- xfs_trans_log_buf(tp, last_ibp, offset,
- (offset + sizeof(xfs_agino_t) - 1));
- xfs_inobp_check(mp, last_ibp);
+ error = xfs_iunlink_change_backref(pag, agino, next_agino);
+ if (error)
+ goto out;
}
- return 0;
+
+out:
+ if (pag)
+ xfs_perag_put(pag);
+ return error;
}
/*
@@ -2833,11 +3182,9 @@ xfs_rename_alloc_whiteout(
/*
* Prepare the tmpfile inode as if it were created through the VFS.
- * Otherwise, the link increment paths will complain about nlink 0->1.
- * Drop the link count as done by d_tmpfile(), complete the inode setup
- * and flag it as linkable.
+ * Complete the inode setup and flag it as linkable. nlink is already
+ * zero, so we can skip the drop_nlink.
*/
- drop_nlink(VFS_I(tmpfile));
xfs_setup_iops(tmpfile);
xfs_finish_inode_setup(tmpfile);
VFS_I(tmpfile)->i_state |= I_LINKABLE;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index be2014520155..e62074a5257c 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -500,4 +500,7 @@ extern struct kmem_zone *xfs_inode_zone;
bool xfs_inode_verify_forks(struct xfs_inode *ip);
+int xfs_iunlink_init(struct xfs_perag *pag);
+void xfs_iunlink_destroy(struct xfs_perag *pag);
+
#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 27c93b5f029d..63d323916bba 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -35,18 +35,40 @@
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
-void
+static int
+xfs_alert_fsblock_zero(
+ xfs_inode_t *ip,
+ xfs_bmbt_irec_t *imap)
+{
+ xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
+ "Access to block zero in inode %llu "
+ "start_block: %llx start_off: %llx "
+ "blkcnt: %llx extent-state: %x",
+ (unsigned long long)ip->i_ino,
+ (unsigned long long)imap->br_startblock,
+ (unsigned long long)imap->br_startoff,
+ (unsigned long long)imap->br_blockcount,
+ imap->br_state);
+ return -EFSCORRUPTED;
+}
+
+int
xfs_bmbt_to_iomap(
struct xfs_inode *ip,
struct iomap *iomap,
- struct xfs_bmbt_irec *imap)
+ struct xfs_bmbt_irec *imap,
+ bool shared)
{
struct xfs_mount *mp = ip->i_mount;
+ if (unlikely(!imap->br_startblock && !XFS_IS_REALTIME_INODE(ip)))
+ return xfs_alert_fsblock_zero(ip, imap);
+
if (imap->br_startblock == HOLESTARTBLOCK) {
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
- } else if (imap->br_startblock == DELAYSTARTBLOCK) {
+ } else if (imap->br_startblock == DELAYSTARTBLOCK ||
+ isnullstartblock(imap->br_startblock)) {
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_DELALLOC;
} else {
@@ -60,6 +82,13 @@ xfs_bmbt_to_iomap(
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+
+ if (xfs_ipincount(ip) &&
+ (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
+ iomap->flags |= IOMAP_F_DIRTY;
+ if (shared)
+ iomap->flags |= IOMAP_F_SHARED;
+ return 0;
}
static void
@@ -138,23 +167,6 @@ xfs_iomap_eof_align_last_fsb(
return 0;
}
-STATIC int
-xfs_alert_fsblock_zero(
- xfs_inode_t *ip,
- xfs_bmbt_irec_t *imap)
-{
- xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
- "Access to block zero in inode %llu "
- "start_block: %llx start_off: %llx "
- "blkcnt: %llx extent-state: %x",
- (unsigned long long)ip->i_ino,
- (unsigned long long)imap->br_startblock,
- (unsigned long long)imap->br_startoff,
- (unsigned long long)imap->br_blockcount,
- imap->br_state);
- return -EFSCORRUPTED;
-}
-
int
xfs_iomap_write_direct(
xfs_inode_t *ip,
@@ -383,12 +395,13 @@ xfs_quota_calc_throttle(
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
struct xfs_inode *ip,
+ int whichfork,
loff_t offset,
loff_t count,
struct xfs_iext_cursor *icur)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
struct xfs_bmbt_irec prev;
int shift = 0;
@@ -522,15 +535,16 @@ xfs_file_iomap_begin_delay(
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t maxbytes_fsb =
XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
xfs_fileoff_t end_fsb;
- int error = 0, eof = 0;
- struct xfs_bmbt_irec got;
- struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec imap, cmap;
+ struct xfs_iext_cursor icur, ccur;
xfs_fsblock_t prealloc_blocks = 0;
+ bool eof = false, cow_eof = false, shared = false;
+ int whichfork = XFS_DATA_FORK;
+ int error = 0;
ASSERT(!XFS_IS_REALTIME_INODE(ip));
ASSERT(!xfs_get_extsz_hint(ip));
@@ -548,7 +562,7 @@ xfs_file_iomap_begin_delay(
XFS_STATS_INC(mp, xs_blk_mapw);
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
if (error)
goto out_unlock;
@@ -556,53 +570,101 @@ xfs_file_iomap_begin_delay(
end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
- eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got);
+ /*
+ * Search the data fork fork first to look up our source mapping. We
+ * always need the data fork map, as we have to return it to the
+ * iomap code so that the higher level write code can read data in to
+ * perform read-modify-write cycles for unaligned writes.
+ */
+ eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
if (eof)
- got.br_startoff = end_fsb; /* fake hole until the end */
+ imap.br_startoff = end_fsb; /* fake hole until the end */
+
+ /* We never need to allocate blocks for zeroing a hole. */
+ if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
+ xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
+ goto out_unlock;
+ }
- if (got.br_startoff <= offset_fsb) {
+ /*
+ * Search the COW fork extent list even if we did not find a data fork
+ * extent. This serves two purposes: first this implements the
+ * speculative preallocation using cowextsize, so that we also unshare
+ * block adjacent to shared blocks instead of just the shared blocks
+ * themselves. Second the lookup in the extent list is generally faster
+ * than going out to the shared extent tree.
+ */
+ if (xfs_is_cow_inode(ip)) {
+ if (!ip->i_cowfp) {
+ ASSERT(!xfs_is_reflink_inode(ip));
+ xfs_ifork_init_cow(ip);
+ }
+ cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
+ &ccur, &cmap);
+ if (!cow_eof && cmap.br_startoff <= offset_fsb) {
+ trace_xfs_reflink_cow_found(ip, &cmap);
+ whichfork = XFS_COW_FORK;
+ goto done;
+ }
+ }
+
+ if (imap.br_startoff <= offset_fsb) {
/*
* For reflink files we may need a delalloc reservation when
* overwriting shared extents. This includes zeroing of
* existing extents that contain data.
*/
- if (xfs_is_reflink_inode(ip) &&
- ((flags & IOMAP_WRITE) ||
- got.br_state != XFS_EXT_UNWRITTEN)) {
- xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
- error = xfs_reflink_reserve_cow(ip, &got);
- if (error)
- goto out_unlock;
+ if (!xfs_is_cow_inode(ip) ||
+ ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
+ trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
+ &imap);
+ goto done;
}
- trace_xfs_iomap_found(ip, offset, count, 0, &got);
- goto done;
- }
+ xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
- if (flags & IOMAP_ZERO) {
- xfs_hole_to_iomap(ip, iomap, offset_fsb, got.br_startoff);
- goto out_unlock;
+ /* Trim the mapping to the nearest shared extent boundary. */
+ error = xfs_inode_need_cow(ip, &imap, &shared);
+ if (error)
+ goto out_unlock;
+
+ /* Not shared? Just report the (potentially capped) extent. */
+ if (!shared) {
+ trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
+ &imap);
+ goto done;
+ }
+
+ /*
+ * Fork all the shared blocks from our write offset until the
+ * end of the extent.
+ */
+ whichfork = XFS_COW_FORK;
+ end_fsb = imap.br_startoff + imap.br_blockcount;
+ } else {
+ /*
+ * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
+ * pages to keep the chunks of work done where somewhat
+ * symmetric with the work writeback does. This is a completely
+ * arbitrary number pulled out of thin air.
+ *
+ * Note that the values needs to be less than 32-bits wide until
+ * the lower level functions are updated.
+ */
+ count = min_t(loff_t, count, 1024 * PAGE_SIZE);
+ end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
+
+ if (xfs_is_always_cow_inode(ip))
+ whichfork = XFS_COW_FORK;
}
error = xfs_qm_dqattach_locked(ip, false);
if (error)
goto out_unlock;
- /*
- * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
- * to keep the chunks of work done where somewhat symmetric with the
- * work writeback does. This is a completely arbitrary number pulled
- * out of thin air as a best guess for initial testing.
- *
- * Note that the values needs to be less than 32-bits wide until
- * the lower level functions are updated.
- */
- count = min_t(loff_t, count, 1024 * PAGE_SIZE);
- end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
-
if (eof) {
- prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count,
- &icur);
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset,
+ count, &icur);
if (prealloc_blocks) {
xfs_extlen_t align;
xfs_off_t end_offset;
@@ -623,9 +685,11 @@ xfs_file_iomap_begin_delay(
}
retry:
- error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
- end_fsb - offset_fsb, prealloc_blocks, &got, &icur,
- eof);
+ error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
+ end_fsb - offset_fsb, prealloc_blocks,
+ whichfork == XFS_DATA_FORK ? &imap : &cmap,
+ whichfork == XFS_DATA_FORK ? &icur : &ccur,
+ whichfork == XFS_DATA_FORK ? eof : cow_eof);
switch (error) {
case 0:
break;
@@ -647,186 +711,22 @@ retry:
* them out if the write happens to fail.
*/
iomap->flags |= IOMAP_F_NEW;
- trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
+ trace_xfs_iomap_alloc(ip, offset, count, whichfork,
+ whichfork == XFS_DATA_FORK ? &imap : &cmap);
done:
- if (isnullstartblock(got.br_startblock))
- got.br_startblock = DELAYSTARTBLOCK;
-
- if (!got.br_startblock) {
- error = xfs_alert_fsblock_zero(ip, &got);
- if (error)
+ if (whichfork == XFS_COW_FORK) {
+ if (imap.br_startoff > offset_fsb) {
+ xfs_trim_extent(&cmap, offset_fsb,
+ imap.br_startoff - offset_fsb);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
goto out_unlock;
- }
-
- xfs_bmbt_to_iomap(ip, iomap, &got);
-
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return error;
-}
-
-/*
- * Pass in a delayed allocate extent, convert it to real extents;
- * return to the caller the extent we create which maps on top of
- * the originating callers request.
- *
- * Called without a lock on the inode.
- *
- * We no longer bother to look at the incoming map - all we have to
- * guarantee is that whatever we allocate fills the required range.
- */
-int
-xfs_iomap_write_allocate(
- xfs_inode_t *ip,
- int whichfork,
- xfs_off_t offset,
- xfs_bmbt_irec_t *imap,
- unsigned int *cow_seq)
-{
- xfs_mount_t *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_fileoff_t offset_fsb, last_block;
- xfs_fileoff_t end_fsb, map_start_fsb;
- xfs_filblks_t count_fsb;
- xfs_trans_t *tp;
- int nimaps;
- int error = 0;
- int flags = XFS_BMAPI_DELALLOC;
- int nres;
-
- if (whichfork == XFS_COW_FORK)
- flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
-
- /*
- * Make sure that the dquots are there.
- */
- error = xfs_qm_dqattach(ip);
- if (error)
- return error;
-
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- count_fsb = imap->br_blockcount;
- map_start_fsb = imap->br_startoff;
-
- XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
-
- while (count_fsb != 0) {
- /*
- * Set up a transaction with which to allocate the
- * backing store for the file. Do allocations in a
- * loop until we get some space in the range we are
- * interested in. The other space that might be allocated
- * is in the delayed allocation extent on which we sit
- * but before our buffer starts.
- */
- nimaps = 0;
- while (nimaps == 0) {
- nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
- /*
- * We have already reserved space for the extent and any
- * indirect blocks when creating the delalloc extent,
- * there is no need to reserve space in this transaction
- * again.
- */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
- 0, XFS_TRANS_RESERVE, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
-
- /*
- * it is possible that the extents have changed since
- * we did the read call as we dropped the ilock for a
- * while. We have to be careful about truncates or hole
- * punchs here - we are not allowed to allocate
- * non-delalloc blocks here.
- *
- * The only protection against truncation is the pages
- * for the range we are being asked to convert are
- * locked and hence a truncate will block on them
- * first.
- *
- * As a result, if we go beyond the range we really
- * need and hit an delalloc extent boundary followed by
- * a hole while we have excess blocks in the map, we
- * will fill the hole incorrectly and overrun the
- * transaction reservation.
- *
- * Using a single map prevents this as we are forced to
- * check each map we look for overlap with the desired
- * range and abort as soon as we find it. Also, given
- * that we only return a single map, having one beyond
- * what we can return is probably a bit silly.
- *
- * We also need to check that we don't go beyond EOF;
- * this is a truncate optimisation as a truncate sets
- * the new file size before block on the pages we
- * currently have locked under writeback. Because they
- * are about to be tossed, we don't need to write them
- * back....
- */
- nimaps = 1;
- end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
- error = xfs_bmap_last_offset(ip, &last_block,
- XFS_DATA_FORK);
- if (error)
- goto trans_cancel;
-
- last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
- if ((map_start_fsb + count_fsb) > last_block) {
- count_fsb = last_block - map_start_fsb;
- if (count_fsb == 0) {
- error = -EAGAIN;
- goto trans_cancel;
- }
- }
-
- /*
- * From this point onwards we overwrite the imap
- * pointer that the caller gave to us.
- */
- error = xfs_bmapi_write(tp, ip, map_start_fsb,
- count_fsb, flags, nres, imap,
- &nimaps);
- if (error)
- goto trans_cancel;
-
- error = xfs_trans_commit(tp);
- if (error)
- goto error0;
-
- if (whichfork == XFS_COW_FORK)
- *cow_seq = READ_ONCE(ifp->if_seq);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
-
- /*
- * See if we were able to allocate an extent that
- * covers at least part of the callers request
- */
- if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
- return xfs_alert_fsblock_zero(ip, imap);
-
- if ((offset_fsb >= imap->br_startoff) &&
- (offset_fsb < (imap->br_startoff +
- imap->br_blockcount))) {
- XFS_STATS_INC(mp, xs_xstrat_quick);
- return 0;
}
-
- /*
- * So far we have not mapped the requested part of the
- * file, just surrounding data, try again.
- */
- count_fsb -= imap->br_blockcount;
- map_start_fsb = imap->br_startoff + imap->br_blockcount;
+ /* ensure we only report blocks we have a reservation for */
+ xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
+ shared = true;
}
-
-trans_cancel:
- xfs_trans_cancel(tp);
-error0:
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
+out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -975,7 +875,7 @@ xfs_ilock_for_iomap(
* COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here.
*/
- if (xfs_is_reflink_inode(ip) && is_write) {
+ if (xfs_is_cow_inode(ip) && is_write) {
/*
* FIXME: It could still overwrite on unshared extents and not
* need allocation.
@@ -1009,7 +909,7 @@ relock:
* check, so if we got ILOCK_SHARED for a write and but we're now a
* reflink inode we have to switch to ILOCK_EXCL and relock.
*/
- if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+ if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
xfs_iunlock(ip, mode);
mode = XFS_ILOCK_EXCL;
goto relock;
@@ -1081,23 +981,33 @@ xfs_file_iomap_begin(
* Break shared extents if necessary. Checks for non-blocking IO have
* been done up front, so we don't need to do them here.
*/
- if (xfs_is_reflink_inode(ip)) {
+ if (xfs_is_cow_inode(ip)) {
+ struct xfs_bmbt_irec cmap;
+ bool directio = (flags & IOMAP_DIRECT);
+
/* if zeroing doesn't need COW allocation, then we are done. */
if ((flags & IOMAP_ZERO) &&
!needs_cow_for_zeroing(&imap, nimaps))
goto out_found;
- if (flags & IOMAP_DIRECT) {
- /* may drop and re-acquire the ilock */
- error = xfs_reflink_allocate_cow(ip, &imap, &shared,
- &lockmode);
- if (error)
- goto out_unlock;
- } else {
- error = xfs_reflink_reserve_cow(ip, &imap);
- if (error)
- goto out_unlock;
- }
+ /* may drop and re-acquire the ilock */
+ cmap = imap;
+ error = xfs_reflink_allocate_cow(ip, &cmap, &shared, &lockmode,
+ directio);
+ if (error)
+ goto out_unlock;
+
+ /*
+ * For buffered writes we need to report the address of the
+ * previous block (if there was any) so that the higher level
+ * write code can perform read-modify-write operations; we
+ * won't need the CoW fork mapping until writeback. For direct
+ * I/O, which must be block aligned, we need to report the
+ * newly allocated address. If the data fork has a hole, copy
+ * the COW fork mapping to avoid allocating to the data fork.
+ */
+ if (directio || imap.br_startblock == HOLESTARTBLOCK)
+ imap = cmap;
end_fsb = imap.br_startoff + imap.br_blockcount;
length = XFS_FSB_TO_B(mp, end_fsb) - offset;
@@ -1139,23 +1049,15 @@ xfs_file_iomap_begin(
return error;
iomap->flags |= IOMAP_F_NEW;
- trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
+ trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
out_finish:
- if (xfs_ipincount(ip) && (ip->i_itemp->ili_fsync_fields
- & ~XFS_ILOG_TIMESTAMP))
- iomap->flags |= IOMAP_F_DIRTY;
-
- xfs_bmbt_to_iomap(ip, iomap, &imap);
-
- if (shared)
- iomap->flags |= IOMAP_F_SHARED;
- return 0;
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
out_found:
ASSERT(nimaps);
xfs_iunlock(ip, lockmode);
- trace_xfs_iomap_found(ip, offset, length, 0, &imap);
+ trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
goto out_finish;
out_unlock:
@@ -1241,6 +1143,92 @@ const struct iomap_ops xfs_iomap_ops = {
};
static int
+xfs_seek_iomap_begin(
+ struct inode *inode,
+ loff_t offset,
+ loff_t length,
+ unsigned flags,
+ struct iomap *iomap)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
+ xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec imap, cmap;
+ int error = 0;
+ unsigned lockmode;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ lockmode = xfs_ilock_data_map_shared(ip);
+ if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ goto out_unlock;
+ }
+
+ if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
+ /*
+ * If we found a data extent we are done.
+ */
+ if (imap.br_startoff <= offset_fsb)
+ goto done;
+ data_fsb = imap.br_startoff;
+ } else {
+ /*
+ * Fake a hole until the end of the file.
+ */
+ data_fsb = min(XFS_B_TO_FSB(mp, offset + length),
+ XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
+ }
+
+ /*
+ * If a COW fork extent covers the hole, report it - capped to the next
+ * data fork extent:
+ */
+ if (xfs_inode_has_cow_data(ip) &&
+ xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
+ cow_fsb = cmap.br_startoff;
+ if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
+ if (data_fsb < cow_fsb + cmap.br_blockcount)
+ end_fsb = min(end_fsb, data_fsb);
+ xfs_trim_extent(&cmap, offset_fsb, end_fsb);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
+ /*
+ * This is a COW extent, so we must probe the page cache
+ * because there could be dirty page cache being backed
+ * by this extent.
+ */
+ iomap->type = IOMAP_UNWRITTEN;
+ goto out_unlock;
+ }
+
+ /*
+ * Else report a hole, capped to the next found data or COW extent.
+ */
+ if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
+ imap.br_blockcount = cow_fsb - offset_fsb;
+ else
+ imap.br_blockcount = data_fsb - offset_fsb;
+ imap.br_startoff = offset_fsb;
+ imap.br_startblock = HOLESTARTBLOCK;
+ imap.br_state = XFS_EXT_NORM;
+done:
+ xfs_trim_extent(&imap, offset_fsb, end_fsb);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+out_unlock:
+ xfs_iunlock(ip, lockmode);
+ return error;
+}
+
+const struct iomap_ops xfs_seek_iomap_ops = {
+ .iomap_begin = xfs_seek_iomap_begin,
+};
+
+static int
xfs_xattr_iomap_begin(
struct inode *inode,
loff_t offset,
@@ -1273,12 +1261,10 @@ xfs_xattr_iomap_begin(
out_unlock:
xfs_iunlock(ip, lockmode);
- if (!error) {
- ASSERT(nimaps);
- xfs_bmbt_to_iomap(ip, iomap, &imap);
- }
-
- return error;
+ if (error)
+ return error;
+ ASSERT(nimaps);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
}
const struct iomap_ops xfs_xattr_iomap_ops = {
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index c6170548831b..5c2f6aa6d78f 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -13,12 +13,10 @@ struct xfs_bmbt_irec;
int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *, int);
-int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
- struct xfs_bmbt_irec *, unsigned int *);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
-void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
- struct xfs_bmbt_irec *);
+int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
+ struct xfs_bmbt_irec *, bool shared);
xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
static inline xfs_filblks_t
@@ -42,6 +40,7 @@ xfs_aligned_fsb_count(
}
extern const struct iomap_ops xfs_iomap_ops;
+extern const struct iomap_ops xfs_seek_iomap_ops;
extern const struct iomap_ops xfs_xattr_iomap_ops;
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index f48ffd7a8d3e..74047bd0c1ae 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -191,9 +191,18 @@ xfs_generic_create(
xfs_setup_iops(ip);
- if (tmpfile)
+ if (tmpfile) {
+ /*
+ * The VFS requires that any inode fed to d_tmpfile must have
+ * nlink == 1 so that it can decrement the nlink in d_tmpfile.
+ * However, we created the temp file with nlink == 0 because
+ * we're not allowed to put an inode with nlink > 0 on the
+ * unlinked list. Therefore we have to set nlink to 1 so that
+ * d_tmpfile can immediately set it back to zero.
+ */
+ set_nlink(inode, 1);
d_tmpfile(dentry, inode);
- else
+ } else
d_instantiate(dentry, inode);
xfs_finish_inode_setup(ip);
@@ -522,6 +531,10 @@ xfs_vn_getattr(
}
}
+ /*
+ * Note: If you add another clause to set an attribute flag, please
+ * update attributes_mask below.
+ */
if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
@@ -529,6 +542,10 @@ xfs_vn_getattr(
if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
stat->attributes |= STATX_ATTR_NODUMP;
+ stat->attributes_mask |= (STATX_ATTR_IMMUTABLE |
+ STATX_ATTR_APPEND |
+ STATX_ATTR_NODUMP);
+
switch (inode->i_mode & S_IFMT) {
case S_IFBLK:
case S_IFCHR:
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 9fe88d125f0a..3371d1ff27c4 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2439,17 +2439,21 @@ xlog_recover_validate_buf_type(
case XFS_BLFT_BTREE_BUF:
switch (magic32) {
case XFS_ABTB_CRC_MAGIC:
- case XFS_ABTC_CRC_MAGIC:
case XFS_ABTB_MAGIC:
+ bp->b_ops = &xfs_bnobt_buf_ops;
+ break;
+ case XFS_ABTC_CRC_MAGIC:
case XFS_ABTC_MAGIC:
- bp->b_ops = &xfs_allocbt_buf_ops;
+ bp->b_ops = &xfs_cntbt_buf_ops;
break;
case XFS_IBT_CRC_MAGIC:
- case XFS_FIBT_CRC_MAGIC:
case XFS_IBT_MAGIC:
- case XFS_FIBT_MAGIC:
bp->b_ops = &xfs_inobt_buf_ops;
break;
+ case XFS_FIBT_CRC_MAGIC:
+ case XFS_FIBT_MAGIC:
+ bp->b_ops = &xfs_finobt_buf_ops;
+ break;
case XFS_BMAP_CRC_MAGIC:
case XFS_BMAP_MAGIC:
bp->b_ops = &xfs_bmbt_buf_ops;
@@ -3045,7 +3049,7 @@ xlog_recover_inode_pass2(
* Make sure the place we're flushing out to really looks
* like an inode!
*/
- if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
+ if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
xfs_alert(mp,
"%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
__func__, dip, bp, in_f->ilf_ino);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b4d8c318be3c..fd63b0b1307c 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -149,6 +149,7 @@ xfs_free_perag(
spin_unlock(&mp->m_perag_lock);
ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
+ xfs_iunlink_destroy(pag);
xfs_buf_hash_destroy(pag);
mutex_destroy(&pag->pag_ici_reclaim_lock);
call_rcu(&pag->rcu_head, __xfs_free_perag);
@@ -227,6 +228,9 @@ xfs_initialize_perag(
/* first new pag is fully initialized */
if (first_initialised == NULLAGNUMBER)
first_initialised = index;
+ error = xfs_iunlink_init(pag);
+ if (error)
+ goto out_hash_destroy;
}
index = xfs_set_inode_alloc(mp, agcount);
@@ -249,6 +253,7 @@ out_unwind_new_pags:
if (!pag)
break;
xfs_buf_hash_destroy(pag);
+ xfs_iunlink_destroy(pag);
mutex_destroy(&pag->pag_ici_reclaim_lock);
kmem_free(pag);
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 7daafe064af8..110f927cf943 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -138,7 +138,7 @@ typedef struct xfs_mount {
struct mutex m_growlock; /* growfs mutex */
int m_fixedfsid[2]; /* unchanged for life of FS */
uint64_t m_flags; /* global mount flags */
- bool m_inotbt_nores; /* no per-AG finobt resv. */
+ bool m_finobt_nores; /* no per-AG finobt resv. */
int m_ialloc_inos; /* inodes in inode allocation */
int m_ialloc_blks; /* blocks in inode allocation */
int m_ialloc_min_blks;/* min blocks in sparse inode
@@ -194,6 +194,7 @@ typedef struct xfs_mount {
*/
uint32_t m_generation;
+ bool m_always_cow;
bool m_fail_unmount;
#ifdef DEBUG
/*
@@ -396,6 +397,13 @@ typedef struct xfs_perag {
/* reference count */
uint8_t pagf_refcount_level;
+
+ /*
+ * Unlinked inode information. This incore information reflects
+ * data stored in the AGI, so callers must hold the AGI buffer lock
+ * or have some other means to control concurrency.
+ */
+ struct rhashtable pagi_unlinked_hash;
} xfs_perag_t;
static inline struct xfs_ag_resv *
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index d3e04d20d8d4..c8ba98fae30a 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -125,6 +125,27 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
+
+ /*
+ * The v5 superblock format extended several v4 header structures with
+ * additional data. While new fields are only accessible on v5
+ * superblocks, it's important that the v5 structures place original v4
+ * fields/headers in the correct location on-disk. For example, we must
+ * be able to find magic values at the same location in certain blocks
+ * regardless of superblock version.
+ *
+ * The following checks ensure that various v5 data structures place the
+ * subset of v4 metadata associated with the same type of block at the
+ * start of the on-disk block. If there is no data structure definition
+ * for certain types of v4 blocks, traverse down to the first field of
+ * common metadata (e.g., magic value) and make sure it is at offset
+ * zero.
+ */
+ XFS_CHECK_OFFSET(struct xfs_dir3_leaf, hdr.info.hdr, 0);
+ XFS_CHECK_OFFSET(struct xfs_da3_intnode, hdr.info.hdr, 0);
+ XFS_CHECK_OFFSET(struct xfs_dir3_data_hdr, hdr.magic, 0);
+ XFS_CHECK_OFFSET(struct xfs_dir3_free, hdr.hdr.magic, 0);
+ XFS_CHECK_OFFSET(struct xfs_attr3_leafblock, hdr.info.hdr, 0);
}
#endif /* __XFS_ONDISK_H */
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index f44c3599527d..bde2c9f56a46 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -185,7 +185,7 @@ xfs_fs_map_blocks(
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- xfs_bmbt_to_iomap(ip, iomap, &imap);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
*device_generation = mp->m_generation;
return error;
out_unlock:
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c5b4fa004ca4..680ae7662a78 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -192,7 +192,7 @@ xfs_reflink_trim_around_shared(
int error = 0;
/* Holes, unwritten, and delalloc extents cannot be shared */
- if (!xfs_is_reflink_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
+ if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
*shared = false;
return 0;
}
@@ -234,93 +234,59 @@ xfs_reflink_trim_around_shared(
}
}
-/*
- * Trim the passed in imap to the next shared/unshared extent boundary, and
- * if imap->br_startoff points to a shared extent reserve space for it in the
- * COW fork.
- *
- * Note that imap will always contain the block numbers for the existing blocks
- * in the data fork, as the upper layers need them for read-modify-write
- * operations.
- */
-int
-xfs_reflink_reserve_cow(
+bool
+xfs_inode_need_cow(
struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap)
+ struct xfs_bmbt_irec *imap,
+ bool *shared)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
- struct xfs_bmbt_irec got;
- int error = 0;
- bool eof = false;
- struct xfs_iext_cursor icur;
- bool shared;
-
- /*
- * Search the COW fork extent list first. This serves two purposes:
- * first this implement the speculative preallocation using cowextisze,
- * so that we also unshared block adjacent to shared blocks instead
- * of just the shared blocks themselves. Second the lookup in the
- * extent list is generally faster than going out to the shared extent
- * tree.
- */
-
- if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &icur, &got))
- eof = true;
- if (!eof && got.br_startoff <= imap->br_startoff) {
- trace_xfs_reflink_cow_found(ip, imap);
- xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+ /* We can't update any real extents in always COW mode. */
+ if (xfs_is_always_cow_inode(ip) &&
+ !isnullstartblock(imap->br_startblock)) {
+ *shared = true;
return 0;
}
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, imap, &shared);
- if (error)
- return error;
-
- /* Not shared? Just report the (potentially capped) extent. */
- if (!shared)
- return 0;
-
- /*
- * Fork all the shared blocks from our write offset until the end of
- * the extent.
- */
- error = xfs_qm_dqattach_locked(ip, false);
- if (error)
- return error;
-
- error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
- imap->br_blockcount, 0, &got, &icur, eof);
- if (error == -ENOSPC || error == -EDQUOT)
- trace_xfs_reflink_cow_enospc(ip, imap);
- if (error)
- return error;
-
- xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
- trace_xfs_reflink_cow_alloc(ip, &got);
- return 0;
+ return xfs_reflink_trim_around_shared(ip, imap, shared);
}
-/* Convert part of an unwritten CoW extent to a real one. */
-STATIC int
-xfs_reflink_convert_cow_extent(
- struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap,
- xfs_fileoff_t offset_fsb,
- xfs_filblks_t count_fsb)
+static int
+xfs_reflink_convert_cow_locked(
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb,
+ xfs_filblks_t count_fsb)
{
- int nimaps = 1;
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec got;
+ struct xfs_btree_cur *dummy_cur = NULL;
+ int dummy_logflags;
+ int error = 0;
- if (imap->br_state == XFS_EXT_NORM)
+ if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
return 0;
- xfs_trim_extent(imap, offset_fsb, count_fsb);
- trace_xfs_reflink_convert_cow(ip, imap);
- if (imap->br_blockcount == 0)
- return 0;
- return xfs_bmapi_write(NULL, ip, imap->br_startoff, imap->br_blockcount,
- XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, 0, imap,
- &nimaps);
+ do {
+ if (got.br_startoff >= offset_fsb + count_fsb)
+ break;
+ if (got.br_state == XFS_EXT_NORM)
+ continue;
+ if (WARN_ON_ONCE(isnullstartblock(got.br_startblock)))
+ return -EIO;
+
+ xfs_trim_extent(&got, offset_fsb, count_fsb);
+ if (!got.br_blockcount)
+ continue;
+
+ got.br_state = XFS_EXT_NORM;
+ error = xfs_bmap_add_extent_unwritten_real(NULL, ip,
+ XFS_COW_FORK, &icur, &dummy_cur, &got,
+ &dummy_logflags);
+ if (error)
+ return error;
+ } while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got));
+
+ return error;
}
/* Convert all of the unwritten CoW extents in a file's range to real ones. */
@@ -334,15 +300,12 @@ xfs_reflink_convert_cow(
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_filblks_t count_fsb = end_fsb - offset_fsb;
- struct xfs_bmbt_irec imap;
- int nimaps = 1, error = 0;
+ int error;
ASSERT(count != 0);
xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_bmapi_write(NULL, ip, offset_fsb, count_fsb,
- XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT |
- XFS_BMAPI_CONVERT_ONLY, 0, &imap, &nimaps);
+ error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -375,7 +338,7 @@ xfs_find_trim_cow_extent(
if (got.br_startoff > offset_fsb) {
xfs_trim_extent(imap, imap->br_startoff,
got.br_startoff - imap->br_startoff);
- return xfs_reflink_trim_around_shared(ip, imap, shared);
+ return xfs_inode_need_cow(ip, imap, shared);
}
*shared = true;
@@ -397,7 +360,8 @@ xfs_reflink_allocate_cow(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
bool *shared,
- uint *lockmode)
+ uint *lockmode,
+ bool convert_now)
{
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = imap->br_startoff;
@@ -409,7 +373,10 @@ xfs_reflink_allocate_cow(
xfs_extlen_t resblks = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(xfs_is_reflink_inode(ip));
+ if (!ip->i_cowfp) {
+ ASSERT(!xfs_is_reflink_inode(ip));
+ xfs_ifork_init_cow(ip);
+ }
error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
if (error || !*shared)
@@ -471,7 +438,16 @@ xfs_reflink_allocate_cow(
if (nimaps == 0)
return -ENOSPC;
convert:
- return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
+ xfs_trim_extent(imap, offset_fsb, count_fsb);
+ /*
+ * COW fork extents are supposed to remain unwritten until we're ready
+ * to initiate a disk write. For direct I/O we are going to write the
+ * data and need the conversion, but for buffered writes we're done.
+ */
+ if (!convert_now || imap->br_state == XFS_EXT_NORM)
+ return 0;
+ trace_xfs_reflink_convert_cow(ip, imap);
+ return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
out_unreserve:
xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
@@ -586,7 +562,7 @@ xfs_reflink_cancel_cow_range(
int error;
trace_xfs_reflink_cancel_cow_range(ip, offset, count);
- ASSERT(xfs_is_reflink_inode(ip));
+ ASSERT(ip->i_cowfp);
offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
if (count == NULLFILEOFF)
@@ -1192,7 +1168,7 @@ xfs_reflink_remap_blocks(
break;
ASSERT(nimaps == 1);
- trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_IO_OVERWRITE,
+ trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_DATA_FORK,
&imap);
/* Translate imap into the destination file. */
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 6d73daef1f13..28a43b7f581d 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -6,16 +6,28 @@
#ifndef __XFS_REFLINK_H
#define __XFS_REFLINK_H 1
+static inline bool xfs_is_always_cow_inode(struct xfs_inode *ip)
+{
+ return ip->i_mount->m_always_cow &&
+ xfs_sb_version_hasreflink(&ip->i_mount->m_sb);
+}
+
+static inline bool xfs_is_cow_inode(struct xfs_inode *ip)
+{
+ return xfs_is_reflink_inode(ip) || xfs_is_always_cow_inode(ip);
+}
+
extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t aglen,
xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared);
+bool xfs_inode_need_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
+ bool *shared);
-extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap);
extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode);
+ struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode,
+ bool convert_now);
extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index c9097cb0b955..f093ea244849 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1594,6 +1594,13 @@ xfs_mount_alloc(
INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
mp->m_kobj.kobject.kset = xfs_kset;
+ /*
+ * We don't create the finobt per-ag space reservation until after log
+ * recovery, so we must set this to true so that an ifree transaction
+ * started during log recovery will not depend on space reservations
+ * for finobt expansion.
+ */
+ mp->m_finobt_nores = true;
return mp;
}
@@ -1729,11 +1736,18 @@ xfs_fs_fill_super(
}
}
- if (xfs_sb_version_hasreflink(&mp->m_sb) && mp->m_sb.sb_rblocks) {
- xfs_alert(mp,
+ if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (mp->m_sb.sb_rblocks) {
+ xfs_alert(mp,
"reflink not compatible with realtime device!");
- error = -EINVAL;
- goto out_filestream_unmount;
+ error = -EINVAL;
+ goto out_filestream_unmount;
+ }
+
+ if (xfs_globals.always_cow) {
+ xfs_info(mp, "using DEBUG-only always_cow mode.");
+ mp->m_always_cow = true;
+ }
}
if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h
index 168488130a19..ad7f9be13087 100644
--- a/fs/xfs/xfs_sysctl.h
+++ b/fs/xfs/xfs_sysctl.h
@@ -85,6 +85,7 @@ struct xfs_globals {
int log_recovery_delay; /* log recovery delay (secs) */
int mount_delay; /* mount setup delay (secs) */
bool bug_on_assert; /* BUG() the kernel on assert failure */
+ bool always_cow; /* use COW fork for all overwrites */
};
extern struct xfs_globals xfs_globals;
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index cd6a994a7250..cabda13f3c64 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -183,10 +183,34 @@ mount_delay_show(
}
XFS_SYSFS_ATTR_RW(mount_delay);
+static ssize_t
+always_cow_store(
+ struct kobject *kobject,
+ const char *buf,
+ size_t count)
+{
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &xfs_globals.always_cow);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t
+always_cow_show(
+ struct kobject *kobject,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.always_cow);
+}
+XFS_SYSFS_ATTR_RW(always_cow);
+
static struct attribute *xfs_dbg_attrs[] = {
ATTR_LIST(bug_on_assert),
ATTR_LIST(log_recovery_delay),
ATTR_LIST(mount_delay),
+ ATTR_LIST(always_cow),
NULL,
};
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 6fcc893dfc91..47fb07d86efd 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1218,23 +1218,17 @@ DEFINE_EVENT(xfs_readpage_class, name, \
DEFINE_READPAGE_EVENT(xfs_vm_readpage);
DEFINE_READPAGE_EVENT(xfs_vm_readpages);
-TRACE_DEFINE_ENUM(XFS_IO_HOLE);
-TRACE_DEFINE_ENUM(XFS_IO_DELALLOC);
-TRACE_DEFINE_ENUM(XFS_IO_UNWRITTEN);
-TRACE_DEFINE_ENUM(XFS_IO_OVERWRITE);
-TRACE_DEFINE_ENUM(XFS_IO_COW);
-
DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
- int type, struct xfs_bmbt_irec *irec),
- TP_ARGS(ip, offset, count, type, irec),
+ int whichfork, struct xfs_bmbt_irec *irec),
+ TP_ARGS(ip, offset, count, whichfork, irec),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(loff_t, size)
__field(loff_t, offset)
__field(size_t, count)
- __field(int, type)
+ __field(int, whichfork)
__field(xfs_fileoff_t, startoff)
__field(xfs_fsblock_t, startblock)
__field(xfs_filblks_t, blockcount)
@@ -1245,33 +1239,33 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
__entry->size = ip->i_d.di_size;
__entry->offset = offset;
__entry->count = count;
- __entry->type = type;
+ __entry->whichfork = whichfork;
__entry->startoff = irec ? irec->br_startoff : 0;
__entry->startblock = irec ? irec->br_startblock : 0;
__entry->blockcount = irec ? irec->br_blockcount : 0;
),
TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count %zd "
- "type %s startoff 0x%llx startblock %lld blockcount 0x%llx",
+ "fork %s startoff 0x%llx startblock %lld blockcount 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->size,
__entry->offset,
__entry->count,
- __print_symbolic(__entry->type, XFS_IO_TYPES),
+ __entry->whichfork == XFS_COW_FORK ? "cow" : "data",
__entry->startoff,
(int64_t)__entry->startblock,
__entry->blockcount)
)
-#define DEFINE_IOMAP_EVENT(name) \
+#define DEFINE_IMAP_EVENT(name) \
DEFINE_EVENT(xfs_imap_class, name, \
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
- int type, struct xfs_bmbt_irec *irec), \
- TP_ARGS(ip, offset, count, type, irec))
-DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
-DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
-DEFINE_IOMAP_EVENT(xfs_iomap_found);
+ int whichfork, struct xfs_bmbt_irec *irec), \
+ TP_ARGS(ip, offset, count, whichfork, irec))
+DEFINE_IMAP_EVENT(xfs_map_blocks_found);
+DEFINE_IMAP_EVENT(xfs_map_blocks_alloc);
+DEFINE_IMAP_EVENT(xfs_iomap_alloc);
+DEFINE_IMAP_EVENT(xfs_iomap_found);
DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -3078,7 +3072,7 @@ DEFINE_EVENT(xfs_inode_irec_class, name, \
DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag);
DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag);
DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size);
-DEFINE_IOMAP_EVENT(xfs_reflink_remap_imap);
+DEFINE_IMAP_EVENT(xfs_reflink_remap_imap);
TRACE_EVENT(xfs_reflink_remap_blocks_loop,
TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset,
xfs_filblks_t len, struct xfs_inode *dest,
@@ -3202,13 +3196,10 @@ DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error);
/* copy on write */
DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared);
-DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
-DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
-
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
@@ -3371,6 +3362,84 @@ DEFINE_TRANS_EVENT(xfs_trans_roll);
DEFINE_TRANS_EVENT(xfs_trans_add_item);
DEFINE_TRANS_EVENT(xfs_trans_free_items);
+TRACE_EVENT(xfs_iunlink_update_bucket,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int bucket,
+ xfs_agino_t old_ptr, xfs_agino_t new_ptr),
+ TP_ARGS(mp, agno, bucket, old_ptr, new_ptr),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(unsigned int, bucket)
+ __field(xfs_agino_t, old_ptr)
+ __field(xfs_agino_t, new_ptr)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->bucket = bucket;
+ __entry->old_ptr = old_ptr;
+ __entry->new_ptr = new_ptr;
+ ),
+ TP_printk("dev %d:%d agno %u bucket %u old 0x%x new 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->bucket,
+ __entry->old_ptr,
+ __entry->new_ptr)
+);
+
+TRACE_EVENT(xfs_iunlink_update_dinode,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
+ xfs_agino_t old_ptr, xfs_agino_t new_ptr),
+ TP_ARGS(mp, agno, agino, old_ptr, new_ptr),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, agino)
+ __field(xfs_agino_t, old_ptr)
+ __field(xfs_agino_t, new_ptr)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agino = agino;
+ __entry->old_ptr = old_ptr;
+ __entry->new_ptr = new_ptr;
+ ),
+ TP_printk("dev %d:%d agno %u agino 0x%x old 0x%x new 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agino,
+ __entry->old_ptr,
+ __entry->new_ptr)
+);
+
+DECLARE_EVENT_CLASS(xfs_ag_inode_class,
+ TP_PROTO(struct xfs_inode *ip),
+ TP_ARGS(ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, agino)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
+ __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
+ ),
+ TP_printk("dev %d:%d agno %u agino %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno, __entry->agino)
+)
+
+#define DEFINE_AGINODE_EVENT(name) \
+DEFINE_EVENT(xfs_ag_inode_class, name, \
+ TP_PROTO(struct xfs_inode *ip), \
+ TP_ARGS(ip))
+DEFINE_AGINODE_EVENT(xfs_iunlink);
+DEFINE_AGINODE_EVENT(xfs_iunlink_remove);
+DEFINE_AG_EVENT(xfs_iunlink_map_prev_fallback);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans_bmap.c b/fs/xfs/xfs_trans_bmap.c
index 11cff449d055..e1c7d55b32c3 100644
--- a/fs/xfs/xfs_trans_bmap.c
+++ b/fs/xfs/xfs_trans_bmap.c
@@ -17,7 +17,6 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_inode.h"
-#include "xfs_defer.h"
/*
* This routine is called to allocate a "bmap update done"
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 629f1479c9d2..7d65ebf1e847 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -277,7 +277,7 @@ xfs_trans_read_buf_map(
* release this buffer when it kills the tranaction.
*/
ASSERT(bp->b_ops != NULL);
- error = xfs_buf_ensure_ops(bp, ops);
+ error = xfs_buf_reverify(bp, ops);
if (error) {
xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 0710434eb240..8ee7a3f8bb20 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -18,7 +18,6 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_trace.h"
-#include "xfs_defer.h"
/*
* This routine is called to allocate an "extent free done"
diff --git a/fs/xfs/xfs_trans_refcount.c b/fs/xfs/xfs_trans_refcount.c
index 6c947ff4faf6..8d734728dd1b 100644
--- a/fs/xfs/xfs_trans_refcount.c
+++ b/fs/xfs/xfs_trans_refcount.c
@@ -16,7 +16,6 @@
#include "xfs_refcount_item.h"
#include "xfs_alloc.h"
#include "xfs_refcount.h"
-#include "xfs_defer.h"
/*
* This routine is called to allocate a "refcount update done"
diff --git a/fs/xfs/xfs_trans_rmap.c b/fs/xfs/xfs_trans_rmap.c
index a42890931ecd..5c7936b1be13 100644
--- a/fs/xfs/xfs_trans_rmap.c
+++ b/fs/xfs/xfs_trans_rmap.c
@@ -16,7 +16,6 @@
#include "xfs_rmap_item.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
-#include "xfs_defer.h"
/* Set the map extent flags for this reverse mapping. */
static void
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 63ee1d5bf1d7..9a63016009a1 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -129,6 +129,9 @@ __xfs_xattr_put_listent(
char *offset;
int arraytop;
+ if (context->count < 0 || context->seen_enough)
+ return;
+
if (!context->alist)
goto compute_size;
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 6488d572739c..c2acd29f973d 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -3,7 +3,7 @@
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 53c088247d36..16a83959e616 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -3,7 +3,7 @@
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -141,7 +141,7 @@
/*
* Maximal number of elements the Result Stack can contain,
- * it may be an arbitray value not exceeding the types of
+ * it may be an arbitrary value not exceeding the types of
* result_size and result_count (now u8).
*/
#define ACPI_RESULTS_OBJ_NUM_MAX 255
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 09f46050961f..233a72f169bb 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -3,7 +3,7 @@
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -311,7 +311,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"An ACPI name contains invalid character(s)"),
EXCEP_TXT("AE_AML_NAME_NOT_FOUND",
"Could not resolve a named reference"),
- EXCEP_TXT("AE_AML_INTERNAL", "An internal error within the interprete"),
+ EXCEP_TXT("AE_AML_INTERNAL",
+ "An internal error within the interpreter"),
EXCEP_TXT("AE_AML_INVALID_SPACE_ID",
"An Operation Region SpaceID is invalid"),
EXCEP_TXT("AE_AML_STRING_LIMIT",
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 6f69a4f638f8..8b3eae96706a 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -3,7 +3,7 @@
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 6db9a6d40c85..30b1ae53689f 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -3,7 +3,7 @@
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,7 @@
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist
+#define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist
#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist
#define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i)
@@ -213,6 +214,7 @@
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
#define ACPI_BIOS_WARNING(plist)
+#define ACPI_BIOS_EXCEPTION(plist)
#define ACPI_BIOS_ERROR(plist)
#define ACPI_DEBUG_OBJECT(obj,l,i)
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index ccdc5981bc91..bc7d39ecf574 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -3,7 +3,7 @@
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index eb1f21af7556..1b59fb61f67d 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -5,7 +5,7 @@
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 7aa38b648564..24dbb4e742a6 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -3,7 +3,7 @@
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20181213
+#define ACPI_CA_VERSION 0x20190215
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -157,14 +157,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
/*
- * Optionally support module level code by parsing an entire table as
- * a method as it is loaded. Default is TRUE.
- * NOTE, this is essentially obsolete and will be removed soon
- * (01/2018).
- */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_execute_tables_as_methods, TRUE);
-
-/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
* (address mismatch) between the 32-bit and 64-bit versions of the
* address. Although ACPICA adheres to the ACPI specification which
@@ -903,6 +895,12 @@ ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
acpi_bios_error(const char *module_name,
u32 line_number,
const char *format, ...))
+ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(4)
+ void ACPI_INTERNAL_VAR_XFACE
+ acpi_bios_exception(const char *module_name,
+ u32 line_number,
+ acpi_status status,
+ const char *format, ...))
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
acpi_bios_warning(const char *module_name,
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 724ad5f29a16..62930583219f 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -3,7 +3,7 @@
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -139,7 +139,7 @@ struct acpi_resource_irq {
u8 descriptor_length;
u8 triggering;
u8 polarity;
- u8 sharable;
+ u8 shareable;
u8 wake_capable;
u8 interrupt_count;
u8 interrupts[1];
@@ -328,7 +328,7 @@ struct acpi_resource_extended_irq {
u8 producer_consumer;
u8 triggering;
u8 polarity;
- u8 sharable;
+ u8 shareable;
u8 wake_capable;
u8 interrupt_count;
struct acpi_resource_source resource_source;
@@ -348,7 +348,7 @@ struct acpi_resource_gpio {
u8 connection_type;
u8 producer_consumer; /* For values, see Producer/Consumer above */
u8 pin_config;
- u8 sharable; /* For values, see Interrupt Attributes above */
+ u8 shareable; /* For values, see Interrupt Attributes above */
u8 wake_capable; /* For values, see Interrupt Attributes above */
u8 io_restriction;
u8 triggering; /* For values, see Interrupt Attributes above */
@@ -508,7 +508,7 @@ struct acpi_resource_uart_serialbus {
struct acpi_resource_pin_function {
u8 revision_id;
u8 pin_config;
- u8 sharable; /* For values, see Interrupt Attributes above */
+ u8 shareable; /* For values, see Interrupt Attributes above */
u16 function_number;
u16 pin_table_length;
u16 vendor_length;
@@ -520,7 +520,7 @@ struct acpi_resource_pin_function {
struct acpi_resource_pin_config {
u8 revision_id;
u8 producer_consumer; /* For values, see Producer/Consumer above */
- u8 sharable; /* For values, see Interrupt Attributes above */
+ u8 shareable; /* For values, see Interrupt Attributes above */
u8 pin_config_type;
u32 pin_config_value;
u16 pin_table_length;
@@ -560,7 +560,7 @@ struct acpi_resource_pin_group {
struct acpi_resource_pin_group_function {
u8 revision_id;
u8 producer_consumer; /* For values, see Producer/Consumer above */
- u8 sharable; /* For values, see Interrupt Attributes above */
+ u8 shareable; /* For values, see Interrupt Attributes above */
u16 function_number;
u16 vendor_length;
struct acpi_resource_source resource_source;
@@ -571,7 +571,7 @@ struct acpi_resource_pin_group_function {
struct acpi_resource_pin_group_config {
u8 revision_id;
u8 producer_consumer; /* For values, see Producer/Consumer above */
- u8 sharable; /* For values, see Interrupt Attributes above */
+ u8 shareable; /* For values, see Interrupt Attributes above */
u8 pin_config_type; /* For values, see pin_config_type above */
u32 pin_config_value;
u16 vendor_length;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 0a977eca0a74..65cc9cbf1141 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -3,7 +3,7 @@
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index ab424509cae9..d14037ddf108 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -3,7 +3,7 @@
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -562,7 +562,7 @@ struct acpi_dmar_hardware_unit {
#define ACPI_DMAR_INCLUDE_ALL (1)
-/* 1: Reserved Memory Defininition */
+/* 1: Reserved Memory Definition */
struct acpi_dmar_reserved_memory {
struct acpi_dmar_header header;
@@ -1001,6 +1001,11 @@ struct acpi_table_gtdt {
#define ACPI_GTDT_INTERRUPT_POLARITY (1<<1)
#define ACPI_GTDT_ALWAYS_ON (1<<2)
+struct acpi_gtdt_el2 {
+ u32 virtual_el2_timer_gsiv;
+ u32 virtual_el2_timer_flags;
+};
+
/* Common GTDT subtable header */
struct acpi_gtdt_header {
@@ -1390,7 +1395,7 @@ struct acpi_table_hmat {
/* Values for HMAT structure types */
enum acpi_hmat_type {
- ACPI_HMAT_TYPE_ADDRESS_RANGE = 0, /* Memory subystem address range */
+ ACPI_HMAT_TYPE_ADDRESS_RANGE = 0, /* Memory subsystem address range */
ACPI_HMAT_TYPE_LOCALITY = 1, /* System locality latency and bandwidth information */
ACPI_HMAT_TYPE_CACHE = 2, /* Memory side cache information */
ACPI_HMAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
@@ -1406,17 +1411,17 @@ struct acpi_hmat_structure {
* HMAT Structures, correspond to Type in struct acpi_hmat_structure
*/
-/* 0: Memory subystem address range */
+/* 0: Memory proximity domain attributes */
-struct acpi_hmat_address_range {
+struct acpi_hmat_proximity_domain {
struct acpi_hmat_structure header;
u16 flags;
u16 reserved1;
u32 processor_PD; /* Processor proximity domain */
u32 memory_PD; /* Memory proximity domain */
u32 reserved2;
- u64 physical_address_base; /* Physical address range base */
- u64 physical_address_length; /* Physical address range length */
+ u64 reserved3;
+ u64 reserved4;
};
/* Masks for Flags field above */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index c50ef7e6b942..e45ced27f4c3 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -3,7 +3,7 @@
*
* Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -143,7 +143,7 @@ struct acpi_iort_memory_access {
*/
struct acpi_iort_its_group {
u32 its_count;
- u32 identifiers[1]; /* GIC ITS identifier arrary */
+ u32 identifiers[1]; /* GIC ITS identifier array */
};
struct acpi_iort_named_component {
@@ -623,7 +623,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */
+/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -641,7 +641,8 @@ struct acpi_madt_generic_interrupt {
u64 gicr_base_address;
u64 arm_mpidr;
u8 efficiency_class;
- u8 reserved2[3];
+ u8 reserved2[1];
+ u16 spe_interrupt; /* ACPI 6.3 */
};
/* Masks for Flags field above */
@@ -1361,6 +1362,7 @@ struct acpi_pdtt_channel {
#define ACPI_PDTT_RUNTIME_TRIGGER (1)
#define ACPI_PDTT_WAIT_COMPLETION (1<<1)
+#define ACPI_PDTT_TRIGGER_ORDER (1<<2)
/*******************************************************************************
*
@@ -1472,8 +1474,11 @@ struct acpi_pptt_processor {
/* Flags */
-#define ACPI_PPTT_PHYSICAL_PACKAGE (1) /* Physical package */
-#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (2) /* ACPI Processor ID valid */
+#define ACPI_PPTT_PHYSICAL_PACKAGE (1)
+#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID (1<<1)
+#define ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD (1<<2) /* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_LEAF_NODE (1<<3) /* ACPI 6.3 */
+#define ACPI_PPTT_ACPI_IDENTICAL (1<<4) /* ACPI 6.3 */
/* 1: Cache Type Structure */
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index ea1ca49c9c1b..7a58c10ce421 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -3,7 +3,7 @@
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -190,7 +190,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
- ACPI_SRAT_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+ ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */
+ ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */
};
/*
@@ -271,6 +272,22 @@ struct acpi_srat_gic_its_affinity {
u32 its_id;
};
+/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */
+
+struct acpi_srat_generic_affinity {
+ struct acpi_subtable_header header;
+ u8 reserved;
+ u8 device_handle_type;
+ u32 proximity_domain;
+ u8 device_handle[16];
+ u32 flags;
+ u32 reserved1;
+};
+
+/* Flags for struct acpi_srat_generic_affinity */
+
+#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */
+
/*******************************************************************************
*
* STAO - Status Override Table (_STA override) - ACPI 6.0
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 2590627dbfcc..f73382e82c26 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -3,7 +3,7 @@
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -617,8 +617,9 @@ typedef u64 acpi_integer;
#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C
#define ACPI_NOTIFY_AFFINITY_UPDATE (u8) 0x0D
#define ACPI_NOTIFY_MEMORY_UPDATE (u8) 0x0E
+#define ACPI_NOTIFY_DISCONNECT_RECOVER (u8) 0x0F
-#define ACPI_GENERIC_NOTIFY_MAX 0x0E
+#define ACPI_GENERIC_NOTIFY_MAX 0x0F
#define ACPI_SPECIFIC_NOTIFY_MAX 0x84
/*
@@ -885,15 +886,6 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ENABLE_EVENT 1
#define ACPI_DISABLE_EVENT 0
-/* Sleep function dispatch */
-
-typedef acpi_status (*acpi_sleep_function) (u8 sleep_state);
-
-struct acpi_sleep_functions {
- acpi_sleep_function legacy_function;
- acpi_sleep_function extended_function;
-};
-
/*
* External ACPI object definition
*/
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index e63f214531ad..23262cab047a 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -3,7 +3,7 @@
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 4f34734e7f36..ba6fd7202775 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -137,6 +137,7 @@ struct cppc_cpudata {
cpumask_var_t shared_cpu_map;
};
+extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 82cb4eb225a4..e3f1cddb4ac8 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -13,7 +13,6 @@
* estatus: memory buffer for error status block, allocated during
* HEST parsing.
*/
-#define GHES_TO_CLEAR 0x0001
#define GHES_EXITING 0x0002
struct ghes {
@@ -22,7 +21,6 @@ struct ghes {
struct acpi_hest_generic_v2 *generic_v2;
};
struct acpi_hest_generic_status *estatus;
- u64 buffer_paddr;
unsigned long flags;
union {
struct list_head list;
@@ -52,6 +50,8 @@ enum {
GHES_SEV_PANIC = 0x3,
};
+int ghes_estatus_pool_init(int num_ghes);
+
/* From drivers/edac/ghes_edac.c */
#ifdef CONFIG_EDAC_GHES
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index f444e5b0fdaa..35ab3f87cc29 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -3,7 +3,7 @@
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 47d690eafe4c..2e36c8344897 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -3,7 +3,7 @@
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 085db95a3dae..6a0705b433d2 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -3,7 +3,7 @@
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 5d2b667af829..8dda2856aca1 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -3,7 +3,7 @@
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index 626265833a54..d2cc247248cb 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -3,7 +3,7 @@
*
* Name: acintel.h - VC specific defines, etc.
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index e3d21d014fcc..9ff328fd946a 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -3,7 +3,7 @@
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index d754a1b12721..cc4f1eb53cba 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -3,7 +3,7 @@
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 0d4b1d3dbc1e..e8730c6b9fe2 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -1,3 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
/*
* This file provides wrappers with KASAN instrumentation for atomic operations.
* To use this functionality an arch's atomic.h file needs to define all
@@ -9,459 +14,1775 @@
* arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
* double instrumentation.
*/
-
-#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
-#define _LINUX_ATOMIC_INSTRUMENTED_H
+#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
#include <linux/kasan-checks.h>
-static __always_inline int atomic_read(const atomic_t *v)
+static inline int
+atomic_read(const atomic_t *v)
{
kasan_check_read(v, sizeof(*v));
return arch_atomic_read(v);
}
+#define atomic_read atomic_read
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+#if defined(arch_atomic_read_acquire)
+static inline int
+atomic_read_acquire(const atomic_t *v)
{
kasan_check_read(v, sizeof(*v));
- return arch_atomic64_read(v);
+ return arch_atomic_read_acquire(v);
}
+#define atomic_read_acquire atomic_read_acquire
+#endif
-static __always_inline void atomic_set(atomic_t *v, int i)
+static inline void
+atomic_set(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
+#define atomic_set atomic_set
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+#if defined(arch_atomic_set_release)
+static inline void
+atomic_set_release(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_set(v, i);
+ arch_atomic_set_release(v, i);
}
+#define atomic_set_release atomic_set_release
+#endif
-static __always_inline int atomic_xchg(atomic_t *v, int i)
+static inline void
+atomic_add(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_xchg(v, i);
+ arch_atomic_add(i, v);
}
+#define atomic_add atomic_add
-static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
+#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
+static inline int
+atomic_add_return(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_xchg(v, i);
+ return arch_atomic_add_return(i, v);
}
+#define atomic_add_return atomic_add_return
+#endif
-static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+#if defined(arch_atomic_add_return_acquire)
+static inline int
+atomic_add_return_acquire(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_cmpxchg(v, old, new);
+ return arch_atomic_add_return_acquire(i, v);
}
+#define atomic_add_return_acquire atomic_add_return_acquire
+#endif
-static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+#if defined(arch_atomic_add_return_release)
+static inline int
+atomic_add_return_release(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg(v, old, new);
+ return arch_atomic_add_return_release(i, v);
}
+#define atomic_add_return_release atomic_add_return_release
+#endif
-#ifdef arch_atomic_try_cmpxchg
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+#if defined(arch_atomic_add_return_relaxed)
+static inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- kasan_check_read(old, sizeof(*old));
- return arch_atomic_try_cmpxchg(v, old, new);
+ return arch_atomic_add_return_relaxed(i, v);
}
+#define atomic_add_return_relaxed atomic_add_return_relaxed
#endif
-#ifdef arch_atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
+static inline int
+atomic_fetch_add(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- kasan_check_read(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg(v, old, new);
+ return arch_atomic_fetch_add(i, v);
}
+#define atomic_fetch_add atomic_fetch_add
#endif
-#ifdef arch_atomic_fetch_add_unless
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+#if defined(arch_atomic_fetch_add_acquire)
+static inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add_unless(v, a, u);
+ return arch_atomic_fetch_add_acquire(i, v);
}
+#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#endif
-#ifdef arch_atomic64_fetch_add_unless
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+#if defined(arch_atomic_fetch_add_release)
+static inline int
+atomic_fetch_add_release(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_unless(v, a, u);
+ return arch_atomic_fetch_add_release(i, v);
}
+#define atomic_fetch_add_release atomic_fetch_add_release
#endif
-#ifdef arch_atomic_inc
-#define atomic_inc atomic_inc
-static __always_inline void atomic_inc(atomic_t *v)
+#if defined(arch_atomic_fetch_add_relaxed)
+static inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#endif
+
+static inline void
+atomic_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_sub(i, v);
+}
+#define atomic_sub atomic_sub
+
+#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return(i, v);
+}
+#define atomic_sub_return atomic_sub_return
+#endif
+
+#if defined(arch_atomic_sub_return_acquire)
+static inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return_acquire(i, v);
+}
+#define atomic_sub_return_acquire atomic_sub_return_acquire
+#endif
+
+#if defined(arch_atomic_sub_return_release)
+static inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return_release(i, v);
+}
+#define atomic_sub_return_release atomic_sub_return_release
+#endif
+
+#if defined(arch_atomic_sub_return_relaxed)
+static inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#endif
+
+#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
+static inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub(i, v);
+}
+#define atomic_fetch_sub atomic_fetch_sub
+#endif
+
+#if defined(arch_atomic_fetch_sub_acquire)
+static inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_acquire(i, v);
+}
+#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#endif
+
+#if defined(arch_atomic_fetch_sub_release)
+static inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_release(i, v);
+}
+#define atomic_fetch_sub_release atomic_fetch_sub_release
+#endif
+
+#if defined(arch_atomic_fetch_sub_relaxed)
+static inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#endif
+
+#if defined(arch_atomic_inc)
+static inline void
+atomic_inc(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_inc(v);
}
+#define atomic_inc atomic_inc
#endif
-#ifdef arch_atomic64_inc
-#define atomic64_inc atomic64_inc
-static __always_inline void atomic64_inc(atomic64_t *v)
+#if defined(arch_atomic_inc_return)
+static inline int
+atomic_inc_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_inc(v);
+ return arch_atomic_inc_return(v);
}
+#define atomic_inc_return atomic_inc_return
#endif
-#ifdef arch_atomic_dec
-#define atomic_dec atomic_dec
-static __always_inline void atomic_dec(atomic_t *v)
+#if defined(arch_atomic_inc_return_acquire)
+static inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_return_acquire(v);
+}
+#define atomic_inc_return_acquire atomic_inc_return_acquire
+#endif
+
+#if defined(arch_atomic_inc_return_release)
+static inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_return_release(v);
+}
+#define atomic_inc_return_release atomic_inc_return_release
+#endif
+
+#if defined(arch_atomic_inc_return_relaxed)
+static inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_return_relaxed(v);
+}
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+#endif
+
+#if defined(arch_atomic_fetch_inc)
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc(v);
+}
+#define atomic_fetch_inc atomic_fetch_inc
+#endif
+
+#if defined(arch_atomic_fetch_inc_acquire)
+static inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_acquire(v);
+}
+#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#endif
+
+#if defined(arch_atomic_fetch_inc_release)
+static inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_release(v);
+}
+#define atomic_fetch_inc_release atomic_fetch_inc_release
+#endif
+
+#if defined(arch_atomic_fetch_inc_relaxed)
+static inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+#endif
+
+#if defined(arch_atomic_dec)
+static inline void
+atomic_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_dec(v);
}
+#define atomic_dec atomic_dec
#endif
-#ifdef atch_atomic64_dec
-#define atomic64_dec
-static __always_inline void atomic64_dec(atomic64_t *v)
+#if defined(arch_atomic_dec_return)
+static inline int
+atomic_dec_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_dec(v);
+ return arch_atomic_dec_return(v);
}
+#define atomic_dec_return atomic_dec_return
#endif
-static __always_inline void atomic_add(int i, atomic_t *v)
+#if defined(arch_atomic_dec_return_acquire)
+static inline int
+atomic_dec_return_acquire(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic_add(i, v);
+ return arch_atomic_dec_return_acquire(v);
}
+#define atomic_dec_return_acquire atomic_dec_return_acquire
+#endif
-static __always_inline void atomic64_add(s64 i, atomic64_t *v)
+#if defined(arch_atomic_dec_return_release)
+static inline int
+atomic_dec_return_release(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_add(i, v);
+ return arch_atomic_dec_return_release(v);
}
+#define atomic_dec_return_release atomic_dec_return_release
+#endif
-static __always_inline void atomic_sub(int i, atomic_t *v)
+#if defined(arch_atomic_dec_return_relaxed)
+static inline int
+atomic_dec_return_relaxed(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic_sub(i, v);
+ return arch_atomic_dec_return_relaxed(v);
}
+#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#endif
-static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
+#if defined(arch_atomic_fetch_dec)
+static inline int
+atomic_fetch_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_sub(i, v);
+ return arch_atomic_fetch_dec(v);
}
+#define atomic_fetch_dec atomic_fetch_dec
+#endif
-static __always_inline void atomic_and(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_dec_acquire)
+static inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_acquire(v);
+}
+#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#endif
+
+#if defined(arch_atomic_fetch_dec_release)
+static inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_release(v);
+}
+#define atomic_fetch_dec_release atomic_fetch_dec_release
+#endif
+
+#if defined(arch_atomic_fetch_dec_relaxed)
+static inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+#endif
+
+static inline void
+atomic_and(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
+#define atomic_and atomic_and
-static __always_inline void atomic64_and(s64 i, atomic64_t *v)
+#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
+static inline int
+atomic_fetch_and(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_and(i, v);
+ return arch_atomic_fetch_and(i, v);
+}
+#define atomic_fetch_and atomic_fetch_and
+#endif
+
+#if defined(arch_atomic_fetch_and_acquire)
+static inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_acquire(i, v);
+}
+#define atomic_fetch_and_acquire atomic_fetch_and_acquire
+#endif
+
+#if defined(arch_atomic_fetch_and_release)
+static inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_release(i, v);
+}
+#define atomic_fetch_and_release atomic_fetch_and_release
+#endif
+
+#if defined(arch_atomic_fetch_and_relaxed)
+static inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_relaxed(i, v);
}
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#endif
+
+#if defined(arch_atomic_andnot)
+static inline void
+atomic_andnot(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_andnot(i, v);
+}
+#define atomic_andnot atomic_andnot
+#endif
-static __always_inline void atomic_or(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_andnot)
+static inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot(i, v);
+}
+#define atomic_fetch_andnot atomic_fetch_andnot
+#endif
+
+#if defined(arch_atomic_fetch_andnot_acquire)
+static inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_acquire(i, v);
+}
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#endif
+
+#if defined(arch_atomic_fetch_andnot_release)
+static inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_release(i, v);
+}
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#endif
+
+#if defined(arch_atomic_fetch_andnot_relaxed)
+static inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#endif
+
+static inline void
+atomic_or(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
+#define atomic_or atomic_or
-static __always_inline void atomic64_or(s64 i, atomic64_t *v)
+#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
+static inline int
+atomic_fetch_or(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_or(i, v);
+ return arch_atomic_fetch_or(i, v);
+}
+#define atomic_fetch_or atomic_fetch_or
+#endif
+
+#if defined(arch_atomic_fetch_or_acquire)
+static inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_acquire(i, v);
+}
+#define atomic_fetch_or_acquire atomic_fetch_or_acquire
+#endif
+
+#if defined(arch_atomic_fetch_or_release)
+static inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_release(i, v);
+}
+#define atomic_fetch_or_release atomic_fetch_or_release
+#endif
+
+#if defined(arch_atomic_fetch_or_relaxed)
+static inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_relaxed(i, v);
}
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#endif
-static __always_inline void atomic_xor(int i, atomic_t *v)
+static inline void
+atomic_xor(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
+#define atomic_xor atomic_xor
-static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
+#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
+static inline int
+atomic_fetch_xor(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- arch_atomic64_xor(i, v);
+ return arch_atomic_fetch_xor(i, v);
}
+#define atomic_fetch_xor atomic_fetch_xor
+#endif
-#ifdef arch_atomic_inc_return
-#define atomic_inc_return atomic_inc_return
-static __always_inline int atomic_inc_return(atomic_t *v)
+#if defined(arch_atomic_fetch_xor_acquire)
+static inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_inc_return(v);
+ return arch_atomic_fetch_xor_acquire(i, v);
}
+#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
#endif
-#ifdef arch_atomic64_in_return
-#define atomic64_inc_return atomic64_inc_return
-static __always_inline s64 atomic64_inc_return(atomic64_t *v)
+#if defined(arch_atomic_fetch_xor_release)
+static inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_return(v);
+ return arch_atomic_fetch_xor_release(i, v);
}
+#define atomic_fetch_xor_release atomic_fetch_xor_release
#endif
-#ifdef arch_atomic_dec_return
-#define atomic_dec_return atomic_dec_return
-static __always_inline int atomic_dec_return(atomic_t *v)
+#if defined(arch_atomic_fetch_xor_relaxed)
+static inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_return(v);
+ return arch_atomic_fetch_xor_relaxed(i, v);
}
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#endif
-#ifdef arch_atomic64_dec_return
-#define atomic64_dec_return atomic64_dec_return
-static __always_inline s64 atomic64_dec_return(atomic64_t *v)
+#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
+static inline int
+atomic_xchg(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_return(v);
+ return arch_atomic_xchg(v, i);
}
+#define atomic_xchg atomic_xchg
#endif
-#ifdef arch_atomic64_inc_not_zero
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
+#if defined(arch_atomic_xchg_acquire)
+static inline int
+atomic_xchg_acquire(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_not_zero(v);
+ return arch_atomic_xchg_acquire(v, i);
}
+#define atomic_xchg_acquire atomic_xchg_acquire
#endif
-#ifdef arch_atomic64_dec_if_positive
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
+#if defined(arch_atomic_xchg_release)
+static inline int
+atomic_xchg_release(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_if_positive(v);
+ return arch_atomic_xchg_release(v, i);
}
+#define atomic_xchg_release atomic_xchg_release
#endif
-#ifdef arch_atomic_dec_and_test
-#define atomic_dec_and_test atomic_dec_and_test
-static __always_inline bool atomic_dec_and_test(atomic_t *v)
+#if defined(arch_atomic_xchg_relaxed)
+static inline int
+atomic_xchg_relaxed(atomic_t *v, int i)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_dec_and_test(v);
+ return arch_atomic_xchg_relaxed(v, i);
}
+#define atomic_xchg_relaxed atomic_xchg_relaxed
#endif
-#ifdef arch_atomic64_dec_and_test
-#define atomic64_dec_and_test atomic64_dec_and_test
-static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
+#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
+static inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_dec_and_test(v);
+ return arch_atomic_cmpxchg(v, old, new);
}
+#define atomic_cmpxchg atomic_cmpxchg
#endif
-#ifdef arch_atomic_inc_and_test
-#define atomic_inc_and_test atomic_inc_and_test
-static __always_inline bool atomic_inc_and_test(atomic_t *v)
+#if defined(arch_atomic_cmpxchg_acquire)
+static inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic_cmpxchg_release)
+static inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_release(v, old, new);
+}
+#define atomic_cmpxchg_release atomic_cmpxchg_release
+#endif
+
+#if defined(arch_atomic_cmpxchg_relaxed)
+static inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic_try_cmpxchg)
+static inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg(v, old, new);
+}
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_acquire)
+static inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_acquire(v, old, new);
+}
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_release)
+static inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_release(v, old, new);
+}
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_relaxed)
+static inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic_sub_and_test)
+static inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_and_test(i, v);
+}
+#define atomic_sub_and_test atomic_sub_and_test
+#endif
+
+#if defined(arch_atomic_dec_and_test)
+static inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_and_test(v);
+}
+#define atomic_dec_and_test atomic_dec_and_test
+#endif
+
+#if defined(arch_atomic_inc_and_test)
+static inline bool
+atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
+#define atomic_inc_and_test atomic_inc_and_test
#endif
-#ifdef arch_atomic64_inc_and_test
-#define atomic64_inc_and_test atomic64_inc_and_test
-static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
+#if defined(arch_atomic_add_negative)
+static inline bool
+atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_inc_and_test(v);
+ return arch_atomic_add_negative(i, v);
}
+#define atomic_add_negative atomic_add_negative
#endif
-static __always_inline int atomic_add_return(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_add_unless)
+static inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_return(i, v);
+ return arch_atomic_fetch_add_unless(v, a, u);
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+#endif
+
+#if defined(arch_atomic_add_unless)
+static inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_unless(v, a, u);
+}
+#define atomic_add_unless atomic_add_unless
+#endif
+
+#if defined(arch_atomic_inc_not_zero)
+static inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_not_zero(v);
+}
+#define atomic_inc_not_zero atomic_inc_not_zero
+#endif
+
+#if defined(arch_atomic_inc_unless_negative)
+static inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_unless_negative(v);
+}
+#define atomic_inc_unless_negative atomic_inc_unless_negative
+#endif
+
+#if defined(arch_atomic_dec_unless_positive)
+static inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_unless_positive(v);
+}
+#define atomic_dec_unless_positive atomic_dec_unless_positive
+#endif
+
+#if defined(arch_atomic_dec_if_positive)
+static inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_if_positive(v);
+}
+#define atomic_dec_if_positive atomic_dec_if_positive
+#endif
+
+static inline s64
+atomic64_read(const atomic64_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic64_read(v);
+}
+#define atomic64_read atomic64_read
+
+#if defined(arch_atomic64_read_acquire)
+static inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic64_read_acquire(v);
+}
+#define atomic64_read_acquire atomic64_read_acquire
+#endif
+
+static inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_set(v, i);
+}
+#define atomic64_set atomic64_set
+
+#if defined(arch_atomic64_set_release)
+static inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_set_release(v, i);
}
+#define atomic64_set_release atomic64_set_release
+#endif
-static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+static inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_add(i, v);
+}
+#define atomic64_add atomic64_add
+
+#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
+static inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
+#define atomic64_add_return atomic64_add_return
+#endif
-static __always_inline int atomic_sub_return(int i, atomic_t *v)
+#if defined(arch_atomic64_add_return_acquire)
+static inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_return(i, v);
+ return arch_atomic64_add_return_acquire(i, v);
}
+#define atomic64_add_return_acquire atomic64_add_return_acquire
+#endif
-static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_add_return_release)
+static inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_sub_return(i, v);
+ return arch_atomic64_add_return_release(i, v);
}
+#define atomic64_add_return_release atomic64_add_return_release
+#endif
-static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+#if defined(arch_atomic64_add_return_relaxed)
+static inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_add(i, v);
+ return arch_atomic64_add_return_relaxed(i, v);
}
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#endif
-static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
+static inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
+#define atomic64_fetch_add atomic64_fetch_add
+#endif
-static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_add_acquire)
+static inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_sub(i, v);
+ return arch_atomic64_fetch_add_acquire(i, v);
}
+#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_add_release)
+static inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_release(i, v);
+}
+#define atomic64_fetch_add_release atomic64_fetch_add_release
+#endif
-static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_fetch_add_relaxed)
+static inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#endif
+
+static inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_sub(i, v);
+}
+#define atomic64_sub atomic64_sub
+
+#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
+static inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return(i, v);
+}
+#define atomic64_sub_return atomic64_sub_return
+#endif
+
+#if defined(arch_atomic64_sub_return_acquire)
+static inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_acquire(i, v);
+}
+#define atomic64_sub_return_acquire atomic64_sub_return_acquire
+#endif
+
+#if defined(arch_atomic64_sub_return_release)
+static inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_release(i, v);
+}
+#define atomic64_sub_return_release atomic64_sub_return_release
+#endif
+
+#if defined(arch_atomic64_sub_return_relaxed)
+static inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#endif
+
+#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
+static inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
+#define atomic64_fetch_sub atomic64_fetch_sub
+#endif
-static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_sub_acquire)
+static inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_and(i, v);
+ return arch_atomic64_fetch_sub_acquire(i, v);
+}
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_sub_release)
+static inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_release(i, v);
}
+#define atomic64_fetch_sub_release atomic64_fetch_sub_release
+#endif
-static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_fetch_sub_relaxed)
+static inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif
+
+#if defined(arch_atomic64_inc)
+static inline void
+atomic64_inc(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_inc(v);
+}
+#define atomic64_inc atomic64_inc
+#endif
+
+#if defined(arch_atomic64_inc_return)
+static inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return(v);
+}
+#define atomic64_inc_return atomic64_inc_return
+#endif
+
+#if defined(arch_atomic64_inc_return_acquire)
+static inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_acquire(v);
+}
+#define atomic64_inc_return_acquire atomic64_inc_return_acquire
+#endif
+
+#if defined(arch_atomic64_inc_return_release)
+static inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_release(v);
+}
+#define atomic64_inc_return_release atomic64_inc_return_release
+#endif
+
+#if defined(arch_atomic64_inc_return_relaxed)
+static inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_relaxed(v);
+}
+#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+#endif
+
+#if defined(arch_atomic64_fetch_inc)
+static inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc(v);
+}
+#define atomic64_fetch_inc atomic64_fetch_inc
+#endif
+
+#if defined(arch_atomic64_fetch_inc_acquire)
+static inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_acquire(v);
+}
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_inc_release)
+static inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_release(v);
+}
+#define atomic64_fetch_inc_release atomic64_fetch_inc_release
+#endif
+
+#if defined(arch_atomic64_fetch_inc_relaxed)
+static inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
+#endif
+
+#if defined(arch_atomic64_dec)
+static inline void
+atomic64_dec(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_dec(v);
+}
+#define atomic64_dec atomic64_dec
+#endif
+
+#if defined(arch_atomic64_dec_return)
+static inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return(v);
+}
+#define atomic64_dec_return atomic64_dec_return
+#endif
+
+#if defined(arch_atomic64_dec_return_acquire)
+static inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_acquire(v);
+}
+#define atomic64_dec_return_acquire atomic64_dec_return_acquire
+#endif
+
+#if defined(arch_atomic64_dec_return_release)
+static inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_release(v);
+}
+#define atomic64_dec_return_release atomic64_dec_return_release
+#endif
+
+#if defined(arch_atomic64_dec_return_relaxed)
+static inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_relaxed(v);
+}
+#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#endif
+
+#if defined(arch_atomic64_fetch_dec)
+static inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec(v);
+}
+#define atomic64_fetch_dec atomic64_fetch_dec
+#endif
+
+#if defined(arch_atomic64_fetch_dec_acquire)
+static inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_acquire(v);
+}
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_dec_release)
+static inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_release(v);
+}
+#define atomic64_fetch_dec_release atomic64_fetch_dec_release
+#endif
+
+#if defined(arch_atomic64_fetch_dec_relaxed)
+static inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+#endif
+
+static inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_and(i, v);
+}
+#define atomic64_and atomic64_and
+
+#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
+static inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
+#define atomic64_fetch_and atomic64_fetch_and
+#endif
-static __always_inline int atomic_fetch_or(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_and_acquire)
+static inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_or(i, v);
+ return arch_atomic64_fetch_and_acquire(i, v);
+}
+#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_and_release)
+static inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_release(i, v);
+}
+#define atomic64_fetch_and_release atomic64_fetch_and_release
+#endif
+
+#if defined(arch_atomic64_fetch_and_relaxed)
+static inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#endif
+
+#if defined(arch_atomic64_andnot)
+static inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_andnot(i, v);
+}
+#define atomic64_andnot atomic64_andnot
+#endif
+
+#if defined(arch_atomic64_fetch_andnot)
+static inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot(i, v);
+}
+#define atomic64_fetch_andnot atomic64_fetch_andnot
+#endif
+
+#if defined(arch_atomic64_fetch_andnot_acquire)
+static inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_andnot_release)
+static inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_release(i, v);
+}
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#endif
+
+#if defined(arch_atomic64_fetch_andnot_relaxed)
+static inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#endif
+
+static inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_or(i, v);
}
+#define atomic64_or atomic64_or
-static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
+#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
+static inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
+#define atomic64_fetch_or atomic64_fetch_or
+#endif
-static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_or_acquire)
+static inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_fetch_xor(i, v);
+ return arch_atomic64_fetch_or_acquire(i, v);
+}
+#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_or_release)
+static inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_release(i, v);
+}
+#define atomic64_fetch_or_release atomic64_fetch_or_release
+#endif
+
+#if defined(arch_atomic64_fetch_or_relaxed)
+static inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#endif
+
+static inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_xor(i, v);
}
+#define atomic64_xor atomic64_xor
-static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
+#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
+static inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
+#define atomic64_fetch_xor atomic64_fetch_xor
+#endif
-#ifdef arch_atomic_sub_and_test
-#define atomic_sub_and_test atomic_sub_and_test
-static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_xor_acquire)
+static inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_sub_and_test(i, v);
+ return arch_atomic64_fetch_xor_acquire(i, v);
}
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
#endif
-#ifdef arch_atomic64_sub_and_test
-#define atomic64_sub_and_test atomic64_sub_and_test
-static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_fetch_xor_release)
+static inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_release(i, v);
+}
+#define atomic64_fetch_xor_release atomic64_fetch_xor_release
+#endif
+
+#if defined(arch_atomic64_fetch_xor_relaxed)
+static inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
+
+#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
+static inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg(v, i);
+}
+#define atomic64_xchg atomic64_xchg
+#endif
+
+#if defined(arch_atomic64_xchg_acquire)
+static inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg_acquire(v, i);
+}
+#define atomic64_xchg_acquire atomic64_xchg_acquire
+#endif
+
+#if defined(arch_atomic64_xchg_release)
+static inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg_release(v, i);
+}
+#define atomic64_xchg_release atomic64_xchg_release
+#endif
+
+#if defined(arch_atomic64_xchg_relaxed)
+static inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#endif
+
+#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
+static inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+#define atomic64_cmpxchg atomic64_cmpxchg
+#endif
+
+#if defined(arch_atomic64_cmpxchg_acquire)
+static inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic64_cmpxchg_release)
+static inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_release(v, old, new);
+}
+#define atomic64_cmpxchg_release atomic64_cmpxchg_release
+#endif
+
+#if defined(arch_atomic64_cmpxchg_relaxed)
+static inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg)
+static inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg(v, old, new);
+}
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg_acquire)
+static inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg_release)
+static inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_release(v, old, new);
+}
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg_relaxed)
+static inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic64_sub_and_test)
+static inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
+#define atomic64_sub_and_test atomic64_sub_and_test
#endif
-#ifdef arch_atomic_add_negative
-#define atomic_add_negative atomic_add_negative
-static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+#if defined(arch_atomic64_dec_and_test)
+static inline bool
+atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic_add_negative(i, v);
+ return arch_atomic64_dec_and_test(v);
}
+#define atomic64_dec_and_test atomic64_dec_and_test
#endif
-#ifdef arch_atomic64_add_negative
-#define atomic64_add_negative atomic64_add_negative
-static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_inc_and_test)
+static inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_and_test(v);
+}
+#define atomic64_inc_and_test atomic64_inc_and_test
+#endif
+
+#if defined(arch_atomic64_add_negative)
+static inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
+#define atomic64_add_negative atomic64_add_negative
+#endif
+
+#if defined(arch_atomic64_fetch_add_unless)
+static inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_unless(v, a, u);
+}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#endif
+
+#if defined(arch_atomic64_add_unless)
+static inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_unless(v, a, u);
+}
+#define atomic64_add_unless atomic64_add_unless
+#endif
+
+#if defined(arch_atomic64_inc_not_zero)
+static inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_not_zero(v);
+}
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+#endif
+
+#if defined(arch_atomic64_inc_unless_negative)
+static inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_unless_negative(v);
+}
+#define atomic64_inc_unless_negative atomic64_inc_unless_negative
+#endif
+
+#if defined(arch_atomic64_dec_unless_positive)
+static inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_unless_positive(v);
+}
+#define atomic64_dec_unless_positive atomic64_dec_unless_positive
+#endif
+
+#if defined(arch_atomic64_dec_if_positive)
+static inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_if_positive(v);
+}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+#endif
+
+#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
+#define xchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, __VA_ARGS__); \
+})
+#endif
+
+#if defined(arch_xchg_acquire)
+#define xchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+#endif
+
+#if defined(arch_xchg_release)
+#define xchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+#endif
+
+#if defined(arch_xchg_relaxed)
+#define xchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+#endif
+
+#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
+#define cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+#endif
+
+#if defined(arch_cmpxchg_acquire)
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+#endif
+
+#if defined(arch_cmpxchg_release)
+#define cmpxchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
#endif
-#define xchg(ptr, new) \
+#if defined(arch_cmpxchg_relaxed)
+#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg(__ai_ptr, (new)); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
+#endif
-#define cmpxchg(ptr, old, new) \
+#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
+#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg(__ai_ptr, (old), (new)); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
+#endif
-#define sync_cmpxchg(ptr, old, new) \
+#if defined(arch_cmpxchg64_acquire)
+#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
+#endif
-#define cmpxchg_local(ptr, old, new) \
+#if defined(arch_cmpxchg64_release)
+#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_local(__ai_ptr, (old), (new)); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
+#endif
-#define cmpxchg64(ptr, old, new) \
+#if defined(arch_cmpxchg64_relaxed)
+#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64(__ai_ptr, (old), (new)); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
+#endif
-#define cmpxchg64_local(ptr, old, new) \
+#define cmpxchg_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
})
-#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+#define cmpxchg64_local(ptr, ...) \
({ \
- typeof(p1) __ai_p1 = (p1); \
- kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
- arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
})
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
-({ \
- typeof(p1) __ai_p1 = (p1); \
- kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
- arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
+#define sync_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_double(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#define cmpxchg_double_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
})
-#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
+// b29b625d5de9280f680e42c7be859b55b15e5f6a
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 87d14476edc2..881c7e27af28 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -1,269 +1,1013 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
-/*
- * Copyright (C) 2005 Silicon Graphics, Inc.
- * Christoph Lameter
- *
- * Allows to provide arch independent atomic definitions without the need to
- * edit all arch specific atomic.h files.
- */
#include <asm/types.h>
-/*
- * Suppport for atomic_long_t
- *
- * Casts for parameters are avoided for existing atomic functions in order to
- * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
- * macros of a platform may have.
- */
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
+#endif
-#if BITS_PER_LONG == 64
+#ifdef CONFIG_64BIT
-typedef atomic64_t atomic_long_t;
+static inline long
+atomic_long_read(const atomic_long_t *v)
+{
+ return atomic64_read(v);
+}
-#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
-#define ATOMIC_LONG_PFX(x) atomic64 ## x
-#define ATOMIC_LONG_TYPE s64
+static inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+ return atomic64_read_acquire(v);
+}
-#else
+static inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ atomic64_set(v, i);
+}
-typedef atomic_t atomic_long_t;
+static inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+ atomic64_set_release(v, i);
+}
-#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-#define ATOMIC_LONG_PFX(x) atomic ## x
-#define ATOMIC_LONG_TYPE int
+static inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+ atomic64_add(i, v);
+}
-#endif
+static inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ return atomic64_add_return(i, v);
+}
+
+static inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_add_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ return atomic64_add_return_release(i, v);
+}
+
+static inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_add_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_add(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_add_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_add_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_add_relaxed(i, v);
+}
+
+static inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+ atomic64_sub(i, v);
+}
+
+static inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ return atomic64_sub_return(i, v);
+}
+
+static inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_sub_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ return atomic64_sub_return_release(i, v);
+}
+
+static inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_sub_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_sub(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_sub_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_sub_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_sub_relaxed(i, v);
+}
+
+static inline void
+atomic_long_inc(atomic_long_t *v)
+{
+ atomic64_inc(v);
+}
+
+static inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+ return atomic64_inc_return(v);
+}
+
+static inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ return atomic64_inc_return_acquire(v);
+}
+
+static inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+ return atomic64_inc_return_release(v);
+}
+
+static inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ return atomic64_inc_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+ return atomic64_fetch_inc(v);
+}
+
+static inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ return atomic64_fetch_inc_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ return atomic64_fetch_inc_release(v);
+}
+
+static inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ return atomic64_fetch_inc_relaxed(v);
+}
+
+static inline void
+atomic_long_dec(atomic_long_t *v)
+{
+ atomic64_dec(v);
+}
+
+static inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+ return atomic64_dec_return(v);
+}
+
+static inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ return atomic64_dec_return_acquire(v);
+}
+
+static inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+ return atomic64_dec_return_release(v);
+}
+
+static inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ return atomic64_dec_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+ return atomic64_fetch_dec(v);
+}
+
+static inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ return atomic64_fetch_dec_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ return atomic64_fetch_dec_release(v);
+}
+
+static inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ return atomic64_fetch_dec_relaxed(v);
+}
+
+static inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+ atomic64_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_and_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_and_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_and_relaxed(i, v);
+}
+
+static inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+ atomic64_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_andnot_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_andnot_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+ atomic64_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_or_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_or_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_or_relaxed(i, v);
+}
+
+static inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+ atomic64_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_xor_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_xor_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ return atomic64_fetch_xor_relaxed(i, v);
+}
+
+static inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+ return atomic64_xchg(v, i);
+}
+
+static inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+ return atomic64_xchg_acquire(v, i);
+}
+
+static inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ return atomic64_xchg_release(v, i);
+}
+
+static inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+ return atomic64_xchg_relaxed(v, i);
+}
+
+static inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ return atomic64_cmpxchg(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ return atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ return atomic64_cmpxchg_release(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ return atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ return atomic64_try_cmpxchg(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ return atomic64_sub_and_test(i, v);
+}
+
+static inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ return atomic64_dec_and_test(v);
+}
+
+static inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+ return atomic64_inc_and_test(v);
+}
+
+static inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ return atomic64_add_negative(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ return atomic64_fetch_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ return atomic64_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ return atomic64_inc_not_zero(v);
+}
+
+static inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ return atomic64_inc_unless_negative(v);
+}
+
+static inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ return atomic64_dec_unless_positive(v);
+}
+
+static inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ return atomic64_dec_if_positive(v);
+}
+
+#else /* CONFIG_64BIT */
+
+static inline long
+atomic_long_read(const atomic_long_t *v)
+{
+ return atomic_read(v);
+}
+
+static inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+ return atomic_read_acquire(v);
+}
+
+static inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ atomic_set(v, i);
+}
+
+static inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+ atomic_set_release(v, i);
+}
+
+static inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+ atomic_add(i, v);
+}
+
+static inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ return atomic_add_return(i, v);
+}
+
+static inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ return atomic_add_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ return atomic_add_return_release(i, v);
+}
+
+static inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_add_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ return atomic_fetch_add(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ return atomic_fetch_add_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ return atomic_fetch_add_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_fetch_add_relaxed(i, v);
+}
+
+static inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+ atomic_sub(i, v);
+}
+
+static inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ return atomic_sub_return(i, v);
+}
+
+static inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ return atomic_sub_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ return atomic_sub_return_release(i, v);
+}
+
+static inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_sub_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ return atomic_fetch_sub(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ return atomic_fetch_sub_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ return atomic_fetch_sub_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_fetch_sub_relaxed(i, v);
+}
+
+static inline void
+atomic_long_inc(atomic_long_t *v)
+{
+ atomic_inc(v);
+}
+
+static inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+ return atomic_inc_return(v);
+}
+
+static inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ return atomic_inc_return_acquire(v);
+}
+
+static inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+ return atomic_inc_return_release(v);
+}
+
+static inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ return atomic_inc_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+ return atomic_fetch_inc(v);
+}
+
+static inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ return atomic_fetch_inc_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ return atomic_fetch_inc_release(v);
+}
+
+static inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ return atomic_fetch_inc_relaxed(v);
+}
+
+static inline void
+atomic_long_dec(atomic_long_t *v)
+{
+ atomic_dec(v);
+}
+
+static inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+ return atomic_dec_return(v);
+}
+
+static inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ return atomic_dec_return_acquire(v);
+}
+
+static inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+ return atomic_dec_return_release(v);
+}
+
+static inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ return atomic_dec_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+ return atomic_fetch_dec(v);
+}
+
+static inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ return atomic_fetch_dec_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ return atomic_fetch_dec_release(v);
+}
+
+static inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ return atomic_fetch_dec_relaxed(v);
+}
+
+static inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+ atomic_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ return atomic_fetch_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ return atomic_fetch_and_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ return atomic_fetch_and_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_fetch_and_relaxed(i, v);
+}
+
+static inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+ atomic_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ return atomic_fetch_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ return atomic_fetch_andnot_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ return atomic_fetch_andnot_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_fetch_andnot_relaxed(i, v);
+}
-#define ATOMIC_LONG_READ_OP(mo) \
-static inline long atomic_long_read##mo(const atomic_long_t *l) \
-{ \
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
- \
- return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
-}
-ATOMIC_LONG_READ_OP()
-ATOMIC_LONG_READ_OP(_acquire)
-
-#undef ATOMIC_LONG_READ_OP
-
-#define ATOMIC_LONG_SET_OP(mo) \
-static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
-{ \
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
- \
- ATOMIC_LONG_PFX(_set##mo)(v, i); \
-}
-ATOMIC_LONG_SET_OP()
-ATOMIC_LONG_SET_OP(_release)
-
-#undef ATOMIC_LONG_SET_OP
-
-#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
-static inline long \
-atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
-{ \
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
- \
- return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
-}
-ATOMIC_LONG_ADD_SUB_OP(add,)
-ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
-ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
-ATOMIC_LONG_ADD_SUB_OP(add, _release)
-ATOMIC_LONG_ADD_SUB_OP(sub,)
-ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
-ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
-ATOMIC_LONG_ADD_SUB_OP(sub, _release)
-
-#undef ATOMIC_LONG_ADD_SUB_OP
-
-#define atomic_long_cmpxchg_relaxed(l, old, new) \
- (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
- (old), (new)))
-#define atomic_long_cmpxchg_acquire(l, old, new) \
- (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
- (old), (new)))
-#define atomic_long_cmpxchg_release(l, old, new) \
- (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
- (old), (new)))
-#define atomic_long_cmpxchg(l, old, new) \
- (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
-
-
-#define atomic_long_try_cmpxchg_relaxed(l, old, new) \
- (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
- (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg_acquire(l, old, new) \
- (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
- (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg_release(l, old, new) \
- (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
- (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg(l, old, new) \
- (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \
- (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-
-
-#define atomic_long_xchg_relaxed(v, new) \
- (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg_acquire(v, new) \
- (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg_release(v, new) \
- (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg(v, new) \
- (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-
-static __always_inline void atomic_long_inc(atomic_long_t *l)
-{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
- ATOMIC_LONG_PFX(_inc)(v);
-}
-
-static __always_inline void atomic_long_dec(atomic_long_t *l)
+static inline void
+atomic_long_or(long i, atomic_long_t *v)
{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ atomic_or(i, v);
+}
- ATOMIC_LONG_PFX(_dec)(v);
+static inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ return atomic_fetch_or(i, v);
}
-#define ATOMIC_LONG_FETCH_OP(op, mo) \
-static inline long \
-atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \
-{ \
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
- \
- return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \
+static inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ return atomic_fetch_or_acquire(i, v);
}
-ATOMIC_LONG_FETCH_OP(add, )
-ATOMIC_LONG_FETCH_OP(add, _relaxed)
-ATOMIC_LONG_FETCH_OP(add, _acquire)
-ATOMIC_LONG_FETCH_OP(add, _release)
-ATOMIC_LONG_FETCH_OP(sub, )
-ATOMIC_LONG_FETCH_OP(sub, _relaxed)
-ATOMIC_LONG_FETCH_OP(sub, _acquire)
-ATOMIC_LONG_FETCH_OP(sub, _release)
-ATOMIC_LONG_FETCH_OP(and, )
-ATOMIC_LONG_FETCH_OP(and, _relaxed)
-ATOMIC_LONG_FETCH_OP(and, _acquire)
-ATOMIC_LONG_FETCH_OP(and, _release)
-ATOMIC_LONG_FETCH_OP(andnot, )
-ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
-ATOMIC_LONG_FETCH_OP(andnot, _acquire)
-ATOMIC_LONG_FETCH_OP(andnot, _release)
-ATOMIC_LONG_FETCH_OP(or, )
-ATOMIC_LONG_FETCH_OP(or, _relaxed)
-ATOMIC_LONG_FETCH_OP(or, _acquire)
-ATOMIC_LONG_FETCH_OP(or, _release)
-ATOMIC_LONG_FETCH_OP(xor, )
-ATOMIC_LONG_FETCH_OP(xor, _relaxed)
-ATOMIC_LONG_FETCH_OP(xor, _acquire)
-ATOMIC_LONG_FETCH_OP(xor, _release)
+static inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ return atomic_fetch_or_release(i, v);
+}
-#undef ATOMIC_LONG_FETCH_OP
+static inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_fetch_or_relaxed(i, v);
+}
-#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \
-static inline long \
-atomic_long_fetch_##op##mo(atomic_long_t *l) \
-{ \
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
- \
- return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \
+static inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+ atomic_xor(i, v);
}
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc,)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec,)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
+static inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ return atomic_fetch_xor(i, v);
+}
-#undef ATOMIC_LONG_FETCH_INC_DEC_OP
+static inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ return atomic_fetch_xor_acquire(i, v);
+}
-#define ATOMIC_LONG_OP(op) \
-static __always_inline void \
-atomic_long_##op(long i, atomic_long_t *l) \
-{ \
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
- \
- ATOMIC_LONG_PFX(_##op)(i, v); \
+static inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ return atomic_fetch_xor_release(i, v);
}
-ATOMIC_LONG_OP(add)
-ATOMIC_LONG_OP(sub)
-ATOMIC_LONG_OP(and)
-ATOMIC_LONG_OP(andnot)
-ATOMIC_LONG_OP(or)
-ATOMIC_LONG_OP(xor)
+static inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ return atomic_fetch_xor_relaxed(i, v);
+}
-#undef ATOMIC_LONG_OP
+static inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+ return atomic_xchg(v, i);
+}
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+static inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ return atomic_xchg_acquire(v, i);
+}
- return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
+static inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ return atomic_xchg_release(v, i);
}
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
+static inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ return atomic_xchg_relaxed(v, i);
+}
- return ATOMIC_LONG_PFX(_dec_and_test)(v);
+static inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ return atomic_cmpxchg(v, old, new);
}
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
+static inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ return atomic_cmpxchg_acquire(v, old, new);
+}
- return ATOMIC_LONG_PFX(_inc_and_test)(v);
+static inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ return atomic_cmpxchg_release(v, old, new);
}
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+static inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ return atomic_cmpxchg_relaxed(v, old, new);
+}
- return ATOMIC_LONG_PFX(_add_negative)(i, v);
+static inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ return atomic_try_cmpxchg(v, (int *)old, new);
}
-#define ATOMIC_LONG_INC_DEC_OP(op, mo) \
-static inline long \
-atomic_long_##op##_return##mo(atomic_long_t *l) \
-{ \
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
- \
- return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \
+static inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ return atomic_try_cmpxchg_acquire(v, (int *)old, new);
}
-ATOMIC_LONG_INC_DEC_OP(inc,)
-ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
-ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
-ATOMIC_LONG_INC_DEC_OP(inc, _release)
-ATOMIC_LONG_INC_DEC_OP(dec,)
-ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
-ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
-ATOMIC_LONG_INC_DEC_OP(dec, _release)
-#undef ATOMIC_LONG_INC_DEC_OP
+static inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ return atomic_try_cmpxchg_release(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ return atomic_sub_and_test(i, v);
+}
+
+static inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ return atomic_dec_and_test(v);
+}
+
+static inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+ return atomic_inc_and_test(v);
+}
+
+static inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ return atomic_add_negative(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ return atomic_fetch_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ return atomic_add_unless(v, a, u);
+}
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+static inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ return atomic_inc_not_zero(v);
+}
- return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
+static inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ return atomic_inc_unless_negative(v);
}
-#define atomic_long_inc_not_zero(l) \
- ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+static inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ return atomic_dec_unless_positive(v);
+}
-#define atomic_long_cond_read_relaxed(v, c) \
- ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c))
-#define atomic_long_cond_read_acquire(v, c) \
- ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+static inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ return atomic_dec_if_positive(v);
+}
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+#endif /* CONFIG_64BIT */
+#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+// 77558968132ce4f911ad53f6f52ce423006f6268
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 20561a60db9c..0e9bd9c83870 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -211,9 +211,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/*
* WARN_ON_SMP() is for cases that the warning is either
* meaningless for !SMP or may even cause failures.
- * This is usually used for cases that we have
- * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
- * returns 0 for uniprocessor settings.
* It can also be used with values that are only defined
* on SMP:
*
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 5b63b94ef6b5..a008f504a2d0 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -36,6 +36,17 @@ extern u64 ioread64(void __iomem *);
extern u64 ioread64be(void __iomem *);
#endif
+#ifdef readq
+#define ioread64_lo_hi ioread64_lo_hi
+#define ioread64_hi_lo ioread64_hi_lo
+#define ioread64be_lo_hi ioread64be_lo_hi
+#define ioread64be_hi_lo ioread64be_hi_lo
+extern u64 ioread64_lo_hi(void __iomem *addr);
+extern u64 ioread64_hi_lo(void __iomem *addr);
+extern u64 ioread64be_lo_hi(void __iomem *addr);
+extern u64 ioread64be_hi_lo(void __iomem *addr);
+#endif
+
extern void iowrite8(u8, void __iomem *);
extern void iowrite16(u16, void __iomem *);
extern void iowrite16be(u16, void __iomem *);
@@ -46,6 +57,17 @@ extern void iowrite64(u64, void __iomem *);
extern void iowrite64be(u64, void __iomem *);
#endif
+#ifdef writeq
+#define iowrite64_lo_hi iowrite64_lo_hi
+#define iowrite64_hi_lo iowrite64_hi_lo
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+extern void iowrite64_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64_hi_lo(u64 val, void __iomem *addr);
+extern void iowrite64be_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
+#endif
+
/*
* "string" versions of the above. Note that they
* use native byte ordering for the accesses (on
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index 27bf3377b0cb..fe801f01625e 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -7,7 +7,7 @@
*/
#ifdef CONFIG_MMU
-#error need to prove a real asm/page.h
+#error need to provide a real asm/page.h
#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 05e61e6c843f..fa782fba51ee 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -606,7 +606,7 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd)
return 0;
}
-static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
+static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{
@@ -615,10 +615,10 @@ static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
* non-present, preventing the hardware from asynchronously
* updating it.
*/
- return ptep_get_and_clear(mm, addr, ptep);
+ return ptep_get_and_clear(vma->vm_mm, addr, ptep);
}
-static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
+static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -626,7 +626,7 @@ static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
* The pte is non-present, so there's no hardware state to
* preserve.
*/
- set_pte_at(mm, addr, ptep, pte);
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
}
#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -644,22 +644,22 @@ static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
* queue the update to be done at some later time. The update must be
* actually committed before the pte lock is released, however.
*/
-static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{
- return __ptep_modify_prot_start(mm, addr, ptep);
+ return __ptep_modify_prot_start(vma, addr, ptep);
}
/*
* Commit an update to a pte, leaving any hardware-controlled bits in
* the PTE unmodified.
*/
-static inline void ptep_modify_prot_commit(struct mm_struct *mm,
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t old_pte, pte_t pte)
{
- __ptep_modify_prot_commit(mm, addr, ptep, pte);
+ __ptep_modify_prot_commit(vma, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
#endif /* CONFIG_MMU */
diff --git a/include/uapi/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
index 8b78c0ba08b1..8b78c0ba08b1 100644
--- a/include/uapi/asm-generic/shmparam.h
+++ b/include/asm-generic/shmparam.h
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index d82c78a79da5..b3d2241e03f8 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -22,7 +22,6 @@
#endif
#ifndef get_fs
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 4a5ad10e75f0..4be38cd0b8d5 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -143,7 +143,9 @@ extern const struct crypto_type crypto_blkcipher_type;
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
+int crypto_register_templates(struct crypto_template *tmpls, int count);
void crypto_unregister_template(struct crypto_template *tmpl);
+void crypto_unregister_templates(struct crypto_template *tmpls, int count);
struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
@@ -185,10 +187,8 @@ static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
-void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
- unsigned int head);
-struct crypto_instance *crypto_alloc_instance(const char *name,
- struct crypto_alg *alg);
+void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
+ unsigned int head);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
diff --git a/include/crypto/arc4.h b/include/crypto/arc4.h
new file mode 100644
index 000000000000..5b2c24ab0139
--- /dev/null
+++ b/include/crypto/arc4.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Common values for ARC4 Cipher Algorithm
+ */
+
+#ifndef _CRYPTO_ARC4_H
+#define _CRYPTO_ARC4_H
+
+#define ARC4_MIN_KEY_SIZE 1
+#define ARC4_MAX_KEY_SIZE 256
+#define ARC4_BLOCK_SIZE 1
+
+#endif /* _CRYPTO_ARC4_H */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 482461d8931d..0d464db74bf5 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -169,9 +169,6 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
-void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
-
-int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
static inline struct alg_sock *alg_sk(struct sock *sk)
{
@@ -230,15 +227,11 @@ static inline bool af_alg_readable(struct sock *sk)
return PAGE_SIZE <= af_alg_rcvbuf(sk);
}
-int af_alg_alloc_tsgl(struct sock *sk);
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset);
-void af_alg_free_areq_sgls(struct af_alg_async_req *areq);
-int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags);
void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags);
-void af_alg_data_wakeup(struct sock *sk);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
index 40623f4457df..8c602b187c58 100644
--- a/include/crypto/internal/cryptouser.h
+++ b/include/crypto/internal/cryptouser.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <net/netlink.h>
+extern struct sock *crypto_nlsk;
+
struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
#ifdef CONFIG_CRYPTO_STATS
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index a0b0ad9d585e..e355fdb642a9 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -170,7 +170,7 @@ static inline unsigned int ahash_instance_headroom(void)
static inline struct ahash_instance *ahash_alloc_instance(
const char *name, struct crypto_alg *alg)
{
- return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
+ return crypto_alloc_instance(name, alg, ahash_instance_headroom());
}
static inline void ahash_request_complete(struct ahash_request *req, int err)
@@ -233,8 +233,8 @@ static inline void *shash_instance_ctx(struct shash_instance *inst)
static inline struct shash_instance *shash_alloc_instance(
const char *name, struct crypto_alg *alg)
{
- return crypto_alloc_instance2(name, alg,
- sizeof(struct shash_alg) - sizeof(*alg));
+ return crypto_alloc_instance(name, alg,
+ sizeof(struct shash_alg) - sizeof(*alg));
}
static inline struct crypto_shash *crypto_spawn_shash(
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 453e867b4bd9..9de6032209cb 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -205,5 +205,20 @@ static inline unsigned int crypto_skcipher_alg_max_keysize(
return alg->max_keysize;
}
+/* Helpers for simple block cipher modes of operation */
+struct skcipher_ctx_simple {
+ struct crypto_cipher *cipher; /* underlying block cipher */
+};
+static inline struct crypto_cipher *
+skcipher_cipher_simple(struct crypto_skcipher *tfm)
+{
+ struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
+
+ return ctx->cipher;
+}
+struct skcipher_instance *
+skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
+ struct crypto_alg **cipher_alg_ret);
+
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index ba782e10065e..ad2aa743dd99 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -1,15 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* The MORUS-1280 Authenticated-Encryption Algorithm
* Common glue skeleton -- header file
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#ifndef _CRYPTO_MORUS1280_GLUE_H
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index 27fa790a2362..df8e1103ff94 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -1,15 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* The MORUS-640 Authenticated-Encryption Algorithm
* Common glue skeleton -- header file
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#ifndef _CRYPTO_MORUS640_GLUE_H
diff --git a/include/crypto/morus_common.h b/include/crypto/morus_common.h
index 39f28c749951..969510a9a56c 100644
--- a/include/crypto/morus_common.h
+++ b/include/crypto/morus_common.h
@@ -1,15 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* The MORUS Authenticated-Encryption Algorithm
* Common definitions
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#ifndef _CRYPTO_MORUS_COMMON_H
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index 4af119f7e07b..856e32af8657 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -19,7 +19,7 @@
#define STREEBOG_BLOCK_SIZE 64
struct streebog_uint512 {
- u64 qword[8];
+ __le64 qword[8];
};
struct streebog_state {
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 34d94c95445a..75fd96ff976b 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -47,8 +47,8 @@ static inline int xts_verify_key(struct crypto_skcipher *tfm,
}
/* ensure that the AES and tweak key are not identical */
- if ((fips_enabled || crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_WEAK_KEY) &&
+ if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
!crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
return -EINVAL;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index bdb0d5548f39..a3184416ddc5 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -57,8 +57,7 @@
#include <linux/workqueue.h>
#include <linux/dma-fence.h>
#include <linux/module.h>
-
-#include <asm/mman.h>
+#include <linux/mman.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 4923b00328c1..93a386be38fa 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -5,6 +5,7 @@
#define _DRM_AUDIO_COMPONENT_H_
struct drm_audio_component;
+struct device;
/**
* struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 5736c942c85b..2d4fc2d33810 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1365,6 +1365,13 @@ enum drm_dp_quirk {
* to 16 bits. So will give a constant value (0x8000) for compatability.
*/
DP_DPCD_QUIRK_CONSTANT_N,
+ /**
+ * @DP_DPCD_QUIRK_NO_PSR:
+ *
+ * The device does not support PSR even if reports that it supports or
+ * driver still need to implement proper handling for such device.
+ */
+ DP_DPCD_QUIRK_NO_PSR,
};
/**
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 59f005b419cf..727af08e5ea6 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -616,7 +616,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+int __must_check
+drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index a6de09c5e47f..c21682f76cd3 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -250,4 +250,22 @@ struct hdcp2_dp_errata_stream_type {
#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+/*
+ * Helper functions to convert 24bit big endian hdcp sequence number to
+ * host format and back
+ */
+static inline
+u32 drm_hdcp2_seq_num_to_u32(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN])
+{
+ return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16);
+}
+
+static inline
+void drm_hdcp2_u32_to_seq_num(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
+{
+ seq_num[0] = val >> 16;
+ seq_num[1] = val >> 8;
+ seq_num[2] = val;
+}
+
#endif
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index fca22d463e1b..dcb95bd9dee6 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -26,6 +26,11 @@
#include "drm_audio_component.h"
+enum i915_component_type {
+ I915_COMPONENT_AUDIO = 1,
+ I915_COMPONENT_HDCP,
+};
+
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
*/
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c44703f471b3..7523e9a7b6e2 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -100,4 +100,19 @@ extern struct resource intel_graphics_stolen_res;
#define INTEL_GEN11_BSM_DW1 0xc4
#define INTEL_BSM_MASK (-(1u << 20))
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
new file mode 100644
index 000000000000..8c344255146a
--- /dev/null
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0+) */
+/*
+ * Copyright © 2017-2018 Intel Corporation
+ *
+ * Authors:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#ifndef _I915_MEI_HDCP_INTERFACE_H_
+#define _I915_MEI_HDCP_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <drm/drm_hdcp.h>
+#include <drm/i915_drm.h>
+
+/**
+ * enum hdcp_port_type - HDCP port implementation type defined by ME FW
+ * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
+ * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
+ * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
+ * (HDMI 2.0) solution
+ * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
+ * solution
+ */
+enum hdcp_port_type {
+ HDCP_PORT_TYPE_INVALID,
+ HDCP_PORT_TYPE_INTEGRATED,
+ HDCP_PORT_TYPE_LSPCON,
+ HDCP_PORT_TYPE_CPDP
+};
+
+/**
+ * enum hdcp_wired_protocol - HDCP adaptation used on the port
+ * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
+ * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
+ * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
+ */
+enum hdcp_wired_protocol {
+ HDCP_PROTOCOL_INVALID,
+ HDCP_PROTOCOL_HDMI,
+ HDCP_PROTOCOL_DP
+};
+
+/**
+ * struct hdcp_port_data - intel specific HDCP port data
+ * @port: port index as per I915
+ * @port_type: HDCP port type as per ME FW classification
+ * @protocol: HDCP adaptation as per ME FW
+ * @k: No of streams transmitted on a port. Only on DP MST this is != 1
+ * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
+ * streams
+ */
+struct hdcp_port_data {
+ enum port port;
+ u8 port_type;
+ u8 protocol;
+ u16 k;
+ u32 seq_num_m;
+ struct hdcp2_streamid_type *streams;
+};
+
+/**
+ * struct i915_hdcp_component_ops- ops for HDCP2.2 services.
+ * @owner: Module providing the ops
+ * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
+ * And Prepare AKE_Init.
+ * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
+ * AKE_Send_Cert and prepare
+ AKE_Stored_Km/AKE_No_Stored_Km
+ * @verify_hprime: Verify AKE_Send_H_prime
+ * @store_pairing_info: Store pairing info received
+ * @initiate_locality_check: Prepare LC_Init
+ * @verify_lprime: Verify lprime
+ * @get_session_key: Prepare SKE_Send_Eks
+ * @repeater_check_flow_prepare_ack: Validate the Downstream topology
+ * and prepare rep_ack
+ * @verify_mprime: Verify mprime
+ * @enable_hdcp_authentication: Mark a port as authenticated.
+ * @close_hdcp_session: Close the Wired HDCP Tx session per port.
+ * This also disables the authenticated state of the port.
+ */
+struct i915_hdcp_component_ops {
+ /**
+ * @owner: mei_hdcp module
+ */
+ struct module *owner;
+
+ int (*initiate_hdcp2_session)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data);
+ int (*verify_receiver_cert_prepare_km)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert
+ *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz);
+ int (*verify_hprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime);
+ int (*store_pairing_info)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info
+ *pairing_info);
+ int (*initiate_locality_check)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data);
+ int (*verify_lprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime);
+ int (*get_session_key)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data);
+ int (*repeater_check_flow_prepare_ack)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack);
+ int (*verify_mprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready);
+ int (*enable_hdcp_authentication)(struct device *dev,
+ struct hdcp_port_data *data);
+ int (*close_hdcp_session)(struct device *dev,
+ struct hdcp_port_data *data);
+};
+
+/**
+ * struct i915_hdcp_component_master - Used for communication between i915
+ * and mei_hdcp drivers for the HDCP2.2 services
+ * @mei_dev: device that provide the HDCP2.2 service from MEI Bus.
+ * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver.
+ */
+struct i915_hdcp_comp_master {
+ struct device *mei_dev;
+ const struct i915_hdcp_component_ops *ops;
+
+ /* To protect the above members. */
+ struct mutex mutex;
+};
+
+#endif /* _I915_MEI_HDCP_INTERFACE_H_ */
diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h
index 27359ad83904..dcc679a7ad85 100644
--- a/include/dt-bindings/clock/ath79-clk.h
+++ b/include/dt-bindings/clock/ath79-clk.h
@@ -13,7 +13,9 @@
#define ATH79_CLK_CPU 0
#define ATH79_CLK_DDR 1
#define ATH79_CLK_AHB 2
+#define ATH79_CLK_REF 3
+#define ATH79_CLK_MDIO 4
-#define ATH79_CLK_END 3
+#define ATH79_CLK_END 5
#endif /* __DT_BINDINGS_ATH79_CLK_H */
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index b53be41929be..04f7ac345984 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -350,7 +350,7 @@
#define IMX8MQ_CLK_VPU_G2_ROOT 241
/* SCCG PLL GATE */
-#define IMX8MQ_SYS1_PLL_OUT 232
+#define IMX8MQ_SYS1_PLL_OUT 242
#define IMX8MQ_SYS2_PLL_OUT 243
#define IMX8MQ_SYS3_PLL_OUT 244
#define IMX8MQ_DRAM_PLL_OUT 245
@@ -372,24 +372,24 @@
/* txesc clock */
#define IMX8MQ_CLK_DSI_IPG_DIV 256
-#define IMX8MQ_CLK_TMU_ROOT 265
+#define IMX8MQ_CLK_TMU_ROOT 257
/* Display root clocks */
-#define IMX8MQ_CLK_DISP_AXI_ROOT 266
-#define IMX8MQ_CLK_DISP_APB_ROOT 267
-#define IMX8MQ_CLK_DISP_RTRM_ROOT 268
+#define IMX8MQ_CLK_DISP_AXI_ROOT 258
+#define IMX8MQ_CLK_DISP_APB_ROOT 259
+#define IMX8MQ_CLK_DISP_RTRM_ROOT 260
-#define IMX8MQ_CLK_OCOTP_ROOT 269
+#define IMX8MQ_CLK_OCOTP_ROOT 261
-#define IMX8MQ_CLK_DRAM_ALT_ROOT 270
-#define IMX8MQ_CLK_DRAM_CORE 271
+#define IMX8MQ_CLK_DRAM_ALT_ROOT 262
+#define IMX8MQ_CLK_DRAM_CORE 263
-#define IMX8MQ_CLK_MU_ROOT 272
-#define IMX8MQ_VIDEO2_PLL_OUT 273
+#define IMX8MQ_CLK_MU_ROOT 264
+#define IMX8MQ_VIDEO2_PLL_OUT 265
-#define IMX8MQ_CLK_CLKO2 274
+#define IMX8MQ_CLK_CLKO2 266
-#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275
+#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 267
-#define IMX8MQ_CLK_END 276
+#define IMX8MQ_CLK_END 268
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 7b24fc791146..228a5e234af0 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -71,7 +71,6 @@
#define MMP2_CLK_CCIC1_MIX 117
#define MMP2_CLK_CCIC1_PHY 118
#define MMP2_CLK_CCIC1_SPHY 119
-#define MMP2_CLK_SP 120
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h
index f6b07c5399de..d0bff9ec5c66 100644
--- a/include/dt-bindings/clock/r8a7778-clock.h
+++ b/include/dt-bindings/clock/r8a7778-clock.h
@@ -30,6 +30,8 @@
#define R8A7778_CLK_SCIF3 23
#define R8A7778_CLK_SCIF4 22
#define R8A7778_CLK_SCIF5 21
+#define R8A7778_CLK_HSCIF0 19
+#define R8A7778_CLK_HSCIF1 18
#define R8A7778_CLK_TMU0 16
#define R8A7778_CLK_TMU1 15
#define R8A7778_CLK_TMU2 14
diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h
new file mode 100644
index 000000000000..82706b2706ac
--- /dev/null
+++ b/include/dt-bindings/iio/adc/ingenic,adc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_IIO_ADC_INGENIC_ADC_H
+#define _DT_BINDINGS_IIO_ADC_INGENIC_ADC_H
+
+/* ADC channel idx. */
+#define INGENIC_ADC_AUX 0
+#define INGENIC_ADC_BATTERY 1
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h
new file mode 100644
index 000000000000..7b2393be7361
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm845.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SDM845 interconnect IDs
+ *
+ * Copyright (c) 2018, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_BLSP_1 1
+#define MASTER_TSIF 2
+#define MASTER_SDCC_2 3
+#define MASTER_SDCC_4 4
+#define MASTER_UFS_CARD 5
+#define MASTER_UFS_MEM 6
+#define MASTER_PCIE_0 7
+#define MASTER_A2NOC_CFG 8
+#define MASTER_QDSS_BAM 9
+#define MASTER_BLSP_2 10
+#define MASTER_CNOC_A2NOC 11
+#define MASTER_CRYPTO 12
+#define MASTER_IPA 13
+#define MASTER_PCIE_1 14
+#define MASTER_QDSS_ETR 15
+#define MASTER_USB3_0 16
+#define MASTER_USB3_1 17
+#define MASTER_CAMNOC_HF0_UNCOMP 18
+#define MASTER_CAMNOC_HF1_UNCOMP 19
+#define MASTER_CAMNOC_SF_UNCOMP 20
+#define MASTER_SPDM 21
+#define MASTER_TIC 22
+#define MASTER_SNOC_CNOC 23
+#define MASTER_QDSS_DAP 24
+#define MASTER_CNOC_DC_NOC 25
+#define MASTER_APPSS_PROC 26
+#define MASTER_GNOC_CFG 27
+#define MASTER_LLCC 28
+#define MASTER_TCU_0 29
+#define MASTER_MEM_NOC_CFG 30
+#define MASTER_GNOC_MEM_NOC 31
+#define MASTER_MNOC_HF_MEM_NOC 32
+#define MASTER_MNOC_SF_MEM_NOC 33
+#define MASTER_SNOC_GC_MEM_NOC 34
+#define MASTER_SNOC_SF_MEM_NOC 35
+#define MASTER_GFX3D 36
+#define MASTER_CNOC_MNOC_CFG 37
+#define MASTER_CAMNOC_HF0 38
+#define MASTER_CAMNOC_HF1 39
+#define MASTER_CAMNOC_SF 40
+#define MASTER_MDP0 41
+#define MASTER_MDP1 42
+#define MASTER_ROTATOR 43
+#define MASTER_VIDEO_P0 44
+#define MASTER_VIDEO_P1 45
+#define MASTER_VIDEO_PROC 46
+#define MASTER_SNOC_CFG 47
+#define MASTER_A1NOC_SNOC 48
+#define MASTER_A2NOC_SNOC 49
+#define MASTER_GNOC_SNOC 50
+#define MASTER_MEM_NOC_SNOC 51
+#define MASTER_ANOC_PCIE_SNOC 52
+#define MASTER_PIMEM 53
+#define MASTER_GIC 54
+#define SLAVE_A1NOC_SNOC 55
+#define SLAVE_SERVICE_A1NOC 56
+#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57
+#define SLAVE_A2NOC_SNOC 58
+#define SLAVE_ANOC_PCIE_SNOC 59
+#define SLAVE_SERVICE_A2NOC 60
+#define SLAVE_CAMNOC_UNCOMP 61
+#define SLAVE_A1NOC_CFG 62
+#define SLAVE_A2NOC_CFG 63
+#define SLAVE_AOP 64
+#define SLAVE_AOSS 65
+#define SLAVE_CAMERA_CFG 66
+#define SLAVE_CLK_CTL 67
+#define SLAVE_CDSP_CFG 68
+#define SLAVE_RBCPR_CX_CFG 69
+#define SLAVE_CRYPTO_0_CFG 70
+#define SLAVE_DCC_CFG 71
+#define SLAVE_CNOC_DDRSS 72
+#define SLAVE_DISPLAY_CFG 73
+#define SLAVE_GLM 74
+#define SLAVE_GFX3D_CFG 75
+#define SLAVE_IMEM_CFG 76
+#define SLAVE_IPA_CFG 77
+#define SLAVE_CNOC_MNOC_CFG 78
+#define SLAVE_PCIE_0_CFG 79
+#define SLAVE_PCIE_1_CFG 80
+#define SLAVE_PDM 81
+#define SLAVE_SOUTH_PHY_CFG 82
+#define SLAVE_PIMEM_CFG 83
+#define SLAVE_PRNG 84
+#define SLAVE_QDSS_CFG 85
+#define SLAVE_BLSP_2 86
+#define SLAVE_BLSP_1 87
+#define SLAVE_SDCC_2 88
+#define SLAVE_SDCC_4 89
+#define SLAVE_SNOC_CFG 90
+#define SLAVE_SPDM_WRAPPER 91
+#define SLAVE_SPSS_CFG 92
+#define SLAVE_TCSR 93
+#define SLAVE_TLMM_NORTH 94
+#define SLAVE_TLMM_SOUTH 95
+#define SLAVE_TSIF 96
+#define SLAVE_UFS_CARD_CFG 97
+#define SLAVE_UFS_MEM_CFG 98
+#define SLAVE_USB3_0 99
+#define SLAVE_USB3_1 100
+#define SLAVE_VENUS_CFG 101
+#define SLAVE_VSENSE_CTRL_CFG 102
+#define SLAVE_CNOC_A2NOC 103
+#define SLAVE_SERVICE_CNOC 104
+#define SLAVE_LLCC_CFG 105
+#define SLAVE_MEM_NOC_CFG 106
+#define SLAVE_GNOC_SNOC 107
+#define SLAVE_GNOC_MEM_NOC 108
+#define SLAVE_SERVICE_GNOC 109
+#define SLAVE_EBI1 110
+#define SLAVE_MSS_PROC_MS_MPU_CFG 111
+#define SLAVE_MEM_NOC_GNOC 112
+#define SLAVE_LLCC 113
+#define SLAVE_MEM_NOC_SNOC 114
+#define SLAVE_SERVICE_MEM_NOC 115
+#define SLAVE_MNOC_SF_MEM_NOC 116
+#define SLAVE_MNOC_HF_MEM_NOC 117
+#define SLAVE_SERVICE_MNOC 118
+#define SLAVE_APPSS 119
+#define SLAVE_SNOC_CNOC 120
+#define SLAVE_SNOC_MEM_NOC_GC 121
+#define SLAVE_SNOC_MEM_NOC_SF 122
+#define SLAVE_IMEM 123
+#define SLAVE_PCIE_0 124
+#define SLAVE_PCIE_1 125
+#define SLAVE_PIMEM 126
+#define SLAVE_SERVICE_SNOC 127
+#define SLAVE_QDSS_STM 128
+#define SLAVE_TCU 129
+
+#endif
diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h
index 15d531aa6e78..ef4a7f944848 100644
--- a/include/dt-bindings/power/mt8173-power.h
+++ b/include/dt-bindings/power/mt8173-power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
-#define _DT_BINDINGS_POWER_MT8183_POWER_H
+#ifndef _DT_BINDINGS_POWER_MT8173_POWER_H
+#define _DT_BINDINGS_POWER_MT8173_POWER_H
#define MT8173_POWER_DOMAIN_VDEC 0
#define MT8173_POWER_DOMAIN_VENC 1
@@ -13,4 +13,4 @@
#define MT8173_POWER_DOMAIN_MFG_2D 8
#define MT8173_POWER_DOMAIN_MFG 9
-#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
+#endif /* _DT_BINDINGS_POWER_MT8173_POWER_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
new file mode 100644
index 000000000000..87d9c6611682
--- /dev/null
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
+#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
+
+/* SDM845 Power Domain Indexes */
+#define SDM845_EBI 0
+#define SDM845_MX 1
+#define SDM845_MX_AO 2
+#define SDM845_CX 3
+#define SDM845_CX_AO 4
+#define SDM845_LMX 5
+#define SDM845_LCX 6
+#define SDM845_GFX 7
+#define SDM845_MSS 8
+
+/* SDM845 Power Domain performance levels */
+#define RPMH_REGULATOR_LEVEL_RETENTION 16
+#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
+#define RPMH_REGULATOR_LEVEL_SVS 128
+#define RPMH_REGULATOR_LEVEL_SVS_L1 192
+#define RPMH_REGULATOR_LEVEL_NOM 256
+#define RPMH_REGULATOR_LEVEL_NOM_L1 320
+#define RPMH_REGULATOR_LEVEL_NOM_L2 336
+#define RPMH_REGULATOR_LEVEL_TURBO 384
+#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+
+/* MSM8996 Power Domain Indexes */
+#define MSM8996_VDDCX 0
+#define MSM8996_VDDCX_AO 1
+#define MSM8996_VDDCX_VFC 2
+#define MSM8996_VDDMX 3
+#define MSM8996_VDDMX_AO 4
+#define MSM8996_VDDSSCX 5
+#define MSM8996_VDDSSCX_VFC 6
+
+#endif
diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h
new file mode 100644
index 000000000000..0d9a412fd5e0
--- /dev/null
+++ b/include/dt-bindings/power/xlnx-zynqmp-power.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_ZYNQMP_POWER_H
+#define _DT_BINDINGS_ZYNQMP_POWER_H
+
+#define PD_USB_0 22
+#define PD_USB_1 23
+#define PD_TTC_0 24
+#define PD_TTC_1 25
+#define PD_TTC_2 26
+#define PD_TTC_3 27
+#define PD_SATA 28
+#define PD_ETH_0 29
+#define PD_ETH_1 30
+#define PD_ETH_2 31
+#define PD_ETH_3 32
+#define PD_UART_0 33
+#define PD_UART_1 34
+#define PD_SPI_0 35
+#define PD_SPI_1 36
+#define PD_I2C_0 37
+#define PD_I2C_1 38
+#define PD_SD_0 39
+#define PD_SD_1 40
+#define PD_DP 41
+#define PD_GDMA 42
+#define PD_ADMA 43
+#define PD_NAND 44
+#define PD_QSPI 45
+#define PD_GPIO 46
+#define PD_CAN_0 47
+#define PD_CAN_1 48
+#define PD_GPU 58
+#define PD_PCIE 59
+
+#endif
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
index ad6f55dabd6d..0f2e0fe45ca4 100644
--- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
@@ -1,12 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
* Copyright (c) 2017 Amlogic, inc.
* Author: Yixun Lan <yixun.lan@amlogic.com>
*
- * SPDX-License-Identifier: (GPL-2.0+ OR BSD)
*/
#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
new file mode 100644
index 000000000000..8063e8314eef
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H
+
+/* RESET0 */
+#define RESET_HIU 0
+/* 1 */
+#define RESET_DOS 2
+/* 3-4 */
+#define RESET_VIU 5
+#define RESET_AFIFO 6
+#define RESET_VID_PLL_DIV 7
+/* 8-9 */
+#define RESET_VENC 10
+#define RESET_ASSIST 11
+#define RESET_PCIE_CTRL_A 12
+#define RESET_VCBUS 13
+#define RESET_PCIE_PHY 14
+#define RESET_PCIE_APB 15
+#define RESET_GIC 16
+#define RESET_CAPB3_DECODE 17
+/* 18 */
+#define RESET_HDMITX_CAPB3 19
+#define RESET_DVALIN_CAPB3 20
+#define RESET_DOS_CAPB3 21
+/* 22 */
+#define RESET_CBUS_CAPB3 23
+#define RESET_AHB_CNTL 24
+#define RESET_AHB_DATA 25
+#define RESET_VCBUS_CLK81 26
+/* 27-31 */
+/* RESET1 */
+/* 32 */
+#define RESET_DEMUX 33
+#define RESET_USB 34
+#define RESET_DDR 35
+/* 36 */
+#define RESET_BT656 37
+#define RESET_AHB_SRAM 38
+/* 39 */
+#define RESET_PARSER 40
+/* 41 */
+#define RESET_ISA 42
+#define RESET_ETHERNET 43
+#define RESET_SD_EMMC_A 44
+#define RESET_SD_EMMC_B 45
+#define RESET_SD_EMMC_C 46
+/* 47-60 */
+#define RESET_AUDIO_CODEC 61
+/* 62-63 */
+/* RESET2 */
+/* 64 */
+#define RESET_AUDIO 65
+#define RESET_HDMITX_PHY 66
+/* 67 */
+#define RESET_MIPI_DSI_HOST 68
+#define RESET_ALOCKER 69
+#define RESET_GE2D 70
+#define RESET_PARSER_REG 71
+#define RESET_PARSER_FETCH 72
+#define RESET_CTL 73
+#define RESET_PARSER_TOP 74
+/* 75-77 */
+#define RESET_DVALIN 78
+#define RESET_HDMITX 79
+/* 80-95 */
+/* RESET3 */
+/* 96-95 */
+#define RESET_DEMUX_TOP 105
+#define RESET_DEMUX_DES_PL 106
+#define RESET_DEMUX_S2P_0 107
+#define RESET_DEMUX_S2P_1 108
+#define RESET_DEMUX_0 109
+#define RESET_DEMUX_1 110
+#define RESET_DEMUX_2 111
+/* 112-127 */
+/* RESET4 */
+/* 128-129 */
+#define RESET_MIPI_DSI_PHY 130
+/* 131-132 */
+#define RESET_RDMA 133
+#define RESET_VENCI 134
+#define RESET_VENCP 135
+/* 136 */
+#define RESET_VDAC 137
+/* 138-139 */
+#define RESET_VDI6 140
+#define RESET_VENCL 141
+#define RESET_I2C_M1 142
+#define RESET_I2C_M2 143
+/* 144-159 */
+/* RESET5 */
+/* 160-191 */
+/* RESET6 */
+#define RESET_GEN 192
+#define RESET_SPICC0 193
+#define RESET_SC 194
+#define RESET_SANA_3 195
+#define RESET_I2C_M0 196
+#define RESET_TS_PLL 197
+#define RESET_SPICC1 198
+#define RESET_STREAM 199
+#define RESET_TS_CPU 200
+#define RESET_UART0 201
+#define RESET_UART1_2 202
+#define RESET_ASYNC0 203
+#define RESET_ASYNC1 204
+#define RESET_SPIFC0 205
+#define RESET_I2C_M3 206
+/* 207-223 */
+/* RESET7 */
+#define RESET_USB_DDR_0 224
+#define RESET_USB_DDR_1 225
+#define RESET_USB_DDR_2 226
+#define RESET_USB_DDR_3 227
+#define RESET_TS_GPU 228
+#define RESET_DEVICE_MMC_ARB 229
+#define RESET_DVALIN_DMC_PIPL 230
+#define RESET_VID_LOCK 231
+#define RESET_NIC_DMC_PIPL 232
+#define RESET_DMC_VPU_PIPL 233
+#define RESET_GE2D_DMC_PIPL 234
+#define RESET_HCODEC_DMC_PIPL 235
+#define RESET_WAVE420_DMC_PIPL 236
+#define RESET_HEVCF_DMC_PIPL 237
+/* 238-255 */
+
+#endif
diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h
new file mode 100644
index 000000000000..57c592498aa0
--- /dev/null
+++ b/include/dt-bindings/reset/imx8mq-reset.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Zodiac Inflight Innovations
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ */
+
+#ifndef DT_BINDING_RESET_IMX8MQ_H
+#define DT_BINDING_RESET_IMX8MQ_H
+
+#define IMX8MQ_RESET_A53_CORE_POR_RESET0 0
+#define IMX8MQ_RESET_A53_CORE_POR_RESET1 1
+#define IMX8MQ_RESET_A53_CORE_POR_RESET2 2
+#define IMX8MQ_RESET_A53_CORE_POR_RESET3 3
+#define IMX8MQ_RESET_A53_CORE_RESET0 4
+#define IMX8MQ_RESET_A53_CORE_RESET1 5
+#define IMX8MQ_RESET_A53_CORE_RESET2 6
+#define IMX8MQ_RESET_A53_CORE_RESET3 7
+#define IMX8MQ_RESET_A53_DBG_RESET0 8
+#define IMX8MQ_RESET_A53_DBG_RESET1 9
+#define IMX8MQ_RESET_A53_DBG_RESET2 10
+#define IMX8MQ_RESET_A53_DBG_RESET3 11
+#define IMX8MQ_RESET_A53_ETM_RESET0 12
+#define IMX8MQ_RESET_A53_ETM_RESET1 13
+#define IMX8MQ_RESET_A53_ETM_RESET2 14
+#define IMX8MQ_RESET_A53_ETM_RESET3 15
+#define IMX8MQ_RESET_A53_SOC_DBG_RESET 16
+#define IMX8MQ_RESET_A53_L2RESET 17
+#define IMX8MQ_RESET_SW_NON_SCLR_M4C_RST 18
+#define IMX8MQ_RESET_OTG1_PHY_RESET 19
+#define IMX8MQ_RESET_OTG2_PHY_RESET 20
+#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21
+#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22
+#define IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N 23
+#define IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N 24
+#define IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N 25
+#define IMX8MQ_RESET_PCIEPHY 26
+#define IMX8MQ_RESET_PCIEPHY_PERST 27
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29
+#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30
+#define IMX8MQ_RESET_DISP_RESET 31
+#define IMX8MQ_RESET_GPU_RESET 32
+#define IMX8MQ_RESET_VPU_RESET 33
+#define IMX8MQ_RESET_PCIEPHY2 34
+#define IMX8MQ_RESET_PCIEPHY2_PERST 35
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37
+#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38
+#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39
+#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40
+#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41
+#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42
+#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43
+#define IMX8MQ_RESET_DDRC1_PRST 44
+#define IMX8MQ_RESET_DDRC1_CORE_RESET 45
+#define IMX8MQ_RESET_DDRC1_PHY_RESET 46
+#define IMX8MQ_RESET_DDRC2_PRST 47
+#define IMX8MQ_RESET_DDRC2_CORE_RESET 48
+#define IMX8MQ_RESET_DDRC2_PHY_RESET 49
+
+#define IMX8MQ_RESET_NUM 50
+
+#endif
diff --git a/include/dt-bindings/reset/xlnx-zynqmp-resets.h b/include/dt-bindings/reset/xlnx-zynqmp-resets.h
new file mode 100644
index 000000000000..d44525b9f8db
--- /dev/null
+++ b/include/dt-bindings/reset/xlnx-zynqmp-resets.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_ZYNQMP_RESETS_H
+#define _DT_BINDINGS_ZYNQMP_RESETS_H
+
+#define ZYNQMP_RESET_PCIE_CFG 0
+#define ZYNQMP_RESET_PCIE_BRIDGE 1
+#define ZYNQMP_RESET_PCIE_CTRL 2
+#define ZYNQMP_RESET_DP 3
+#define ZYNQMP_RESET_SWDT_CRF 4
+#define ZYNQMP_RESET_AFI_FM5 5
+#define ZYNQMP_RESET_AFI_FM4 6
+#define ZYNQMP_RESET_AFI_FM3 7
+#define ZYNQMP_RESET_AFI_FM2 8
+#define ZYNQMP_RESET_AFI_FM1 9
+#define ZYNQMP_RESET_AFI_FM0 10
+#define ZYNQMP_RESET_GDMA 11
+#define ZYNQMP_RESET_GPU_PP1 12
+#define ZYNQMP_RESET_GPU_PP0 13
+#define ZYNQMP_RESET_GPU 14
+#define ZYNQMP_RESET_GT 15
+#define ZYNQMP_RESET_SATA 16
+#define ZYNQMP_RESET_ACPU3_PWRON 17
+#define ZYNQMP_RESET_ACPU2_PWRON 18
+#define ZYNQMP_RESET_ACPU1_PWRON 19
+#define ZYNQMP_RESET_ACPU0_PWRON 20
+#define ZYNQMP_RESET_APU_L2 21
+#define ZYNQMP_RESET_ACPU3 22
+#define ZYNQMP_RESET_ACPU2 23
+#define ZYNQMP_RESET_ACPU1 24
+#define ZYNQMP_RESET_ACPU0 25
+#define ZYNQMP_RESET_DDR 26
+#define ZYNQMP_RESET_APM_FPD 27
+#define ZYNQMP_RESET_SOFT 28
+#define ZYNQMP_RESET_GEM0 29
+#define ZYNQMP_RESET_GEM1 30
+#define ZYNQMP_RESET_GEM2 31
+#define ZYNQMP_RESET_GEM3 32
+#define ZYNQMP_RESET_QSPI 33
+#define ZYNQMP_RESET_UART0 34
+#define ZYNQMP_RESET_UART1 35
+#define ZYNQMP_RESET_SPI0 36
+#define ZYNQMP_RESET_SPI1 37
+#define ZYNQMP_RESET_SDIO0 38
+#define ZYNQMP_RESET_SDIO1 39
+#define ZYNQMP_RESET_CAN0 40
+#define ZYNQMP_RESET_CAN1 41
+#define ZYNQMP_RESET_I2C0 42
+#define ZYNQMP_RESET_I2C1 43
+#define ZYNQMP_RESET_TTC0 44
+#define ZYNQMP_RESET_TTC1 45
+#define ZYNQMP_RESET_TTC2 46
+#define ZYNQMP_RESET_TTC3 47
+#define ZYNQMP_RESET_SWDT_CRL 48
+#define ZYNQMP_RESET_NAND 49
+#define ZYNQMP_RESET_ADMA 50
+#define ZYNQMP_RESET_GPIO 51
+#define ZYNQMP_RESET_IOU_CC 52
+#define ZYNQMP_RESET_TIMESTAMP 53
+#define ZYNQMP_RESET_RPU_R50 54
+#define ZYNQMP_RESET_RPU_R51 55
+#define ZYNQMP_RESET_RPU_AMBA 56
+#define ZYNQMP_RESET_OCM 57
+#define ZYNQMP_RESET_RPU_PGE 58
+#define ZYNQMP_RESET_USB0_CORERESET 59
+#define ZYNQMP_RESET_USB1_CORERESET 60
+#define ZYNQMP_RESET_USB0_HIBERRESET 61
+#define ZYNQMP_RESET_USB1_HIBERRESET 62
+#define ZYNQMP_RESET_USB0_APB 63
+#define ZYNQMP_RESET_USB1_APB 64
+#define ZYNQMP_RESET_IPI 65
+#define ZYNQMP_RESET_APM_LPD 66
+#define ZYNQMP_RESET_RTC 67
+#define ZYNQMP_RESET_SYSMON 68
+#define ZYNQMP_RESET_AFI_FM6 69
+#define ZYNQMP_RESET_LPD_SWDT 70
+#define ZYNQMP_RESET_FPD 71
+#define ZYNQMP_RESET_RPU_DBG1 72
+#define ZYNQMP_RESET_RPU_DBG0 73
+#define ZYNQMP_RESET_DBG_LPD 74
+#define ZYNQMP_RESET_DBG_FPD 75
+#define ZYNQMP_RESET_APLL 76
+#define ZYNQMP_RESET_DPLL 77
+#define ZYNQMP_RESET_VPLL 78
+#define ZYNQMP_RESET_IOPLL 79
+#define ZYNQMP_RESET_RPLL 80
+#define ZYNQMP_RESET_GPO3_PL_0 81
+#define ZYNQMP_RESET_GPO3_PL_1 82
+#define ZYNQMP_RESET_GPO3_PL_2 83
+#define ZYNQMP_RESET_GPO3_PL_3 84
+#define ZYNQMP_RESET_GPO3_PL_4 85
+#define ZYNQMP_RESET_GPO3_PL_5 86
+#define ZYNQMP_RESET_GPO3_PL_6 87
+#define ZYNQMP_RESET_GPO3_PL_7 88
+#define ZYNQMP_RESET_GPO3_PL_8 89
+#define ZYNQMP_RESET_GPO3_PL_9 90
+#define ZYNQMP_RESET_GPO3_PL_10 91
+#define ZYNQMP_RESET_GPO3_PL_11 92
+#define ZYNQMP_RESET_GPO3_PL_12 93
+#define ZYNQMP_RESET_GPO3_PL_13 94
+#define ZYNQMP_RESET_GPO3_PL_14 95
+#define ZYNQMP_RESET_GPO3_PL_15 96
+#define ZYNQMP_RESET_GPO3_PL_16 97
+#define ZYNQMP_RESET_GPO3_PL_17 98
+#define ZYNQMP_RESET_GPO3_PL_18 99
+#define ZYNQMP_RESET_GPO3_PL_19 100
+#define ZYNQMP_RESET_GPO3_PL_20 101
+#define ZYNQMP_RESET_GPO3_PL_21 102
+#define ZYNQMP_RESET_GPO3_PL_22 103
+#define ZYNQMP_RESET_GPO3_PL_23 104
+#define ZYNQMP_RESET_GPO3_PL_24 105
+#define ZYNQMP_RESET_GPO3_PL_25 106
+#define ZYNQMP_RESET_GPO3_PL_26 107
+#define ZYNQMP_RESET_GPO3_PL_27 108
+#define ZYNQMP_RESET_GPO3_PL_28 109
+#define ZYNQMP_RESET_GPO3_PL_29 110
+#define ZYNQMP_RESET_GPO3_PL_30 111
+#define ZYNQMP_RESET_GPO3_PL_31 112
+#define ZYNQMP_RESET_RPU_LS 113
+#define ZYNQMP_RESET_PS_ONLY 114
+#define ZYNQMP_RESET_PL 115
+#define ZYNQMP_RESET_PS_PL0 116
+#define ZYNQMP_RESET_PS_PL1 117
+#define ZYNQMP_RESET_PS_PL2 118
+#define ZYNQMP_RESET_PS_PL3 119
+
+#endif
diff --git a/include/dt-bindings/soc/bcm2835-pm.h b/include/dt-bindings/soc/bcm2835-pm.h
new file mode 100644
index 000000000000..153d75b8d99f
--- /dev/null
+++ b/include/dt-bindings/soc/bcm2835-pm.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef _DT_BINDINGS_ARM_BCM2835_PM_H
+#define _DT_BINDINGS_ARM_BCM2835_PM_H
+
+#define BCM2835_POWER_DOMAIN_GRAFX 0
+#define BCM2835_POWER_DOMAIN_GRAFX_V3D 1
+#define BCM2835_POWER_DOMAIN_IMAGE 2
+#define BCM2835_POWER_DOMAIN_IMAGE_PERI 3
+#define BCM2835_POWER_DOMAIN_IMAGE_ISP 4
+#define BCM2835_POWER_DOMAIN_IMAGE_H264 5
+#define BCM2835_POWER_DOMAIN_USB 6
+#define BCM2835_POWER_DOMAIN_DSI0 7
+#define BCM2835_POWER_DOMAIN_DSI1 8
+#define BCM2835_POWER_DOMAIN_CAM0 9
+#define BCM2835_POWER_DOMAIN_CAM1 10
+#define BCM2835_POWER_DOMAIN_CCP2TX 11
+#define BCM2835_POWER_DOMAIN_HDMI 12
+
+#define BCM2835_POWER_DOMAIN_COUNT 13
+
+#define BCM2835_RESET_V3D 0
+#define BCM2835_RESET_ISP 1
+#define BCM2835_RESET_H264 2
+
+#define BCM2835_RESET_COUNT 3
+
+#endif /* _DT_BINDINGS_ARM_BCM2835_PM_H */
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h
new file mode 100644
index 000000000000..a726dd3f1dc6
--- /dev/null
+++ b/include/keys/request_key_auth-type.h
@@ -0,0 +1,36 @@
+/* request_key authorisation token key type
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H
+#define _KEYS_REQUEST_KEY_AUTH_TYPE_H
+
+#include <linux/key.h>
+
+/*
+ * Authorisation record for request_key().
+ */
+struct request_key_auth {
+ struct key *target_key;
+ struct key *dest_keyring;
+ const struct cred *cred;
+ void *callout_info;
+ size_t callout_len;
+ pid_t pid;
+ char op[8];
+} __randomize_layout;
+
+static inline struct request_key_auth *get_request_key_auth(const struct key *key)
+{
+ return key->payload.data[0];
+}
+
+
+#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index e098cbe27db5..12babe991594 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -31,7 +31,7 @@
struct user_key_payload {
struct rcu_head rcu; /* RCU destructor */
unsigned short datalen; /* length of this data */
- char data[0]; /* actual data */
+ char data[0] __aligned(__alignof__(u64)); /* actual data */
};
extern struct key_type key_type_user;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4f31f96bbfab..c36c86f1ec9a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -100,7 +100,7 @@ enum vgic_irq_config {
};
struct vgic_irq {
- spinlock_t irq_lock; /* Protects the content of the struct */
+ raw_spinlock_t irq_lock; /* Protects the content of the struct */
struct list_head lpi_list; /* Used to link all LPIs together */
struct list_head ap_list;
@@ -256,7 +256,7 @@ struct vgic_dist {
u64 propbaser;
/* Protects the lpi_list and the count value below. */
- spinlock_t lpi_list_lock;
+ raw_spinlock_t lpi_list_lock;
struct list_head lpi_list_head;
int lpi_list_count;
@@ -307,7 +307,7 @@ struct vgic_cpu {
unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
- spinlock_t ap_list_lock; /* Protects the ap_list */
+ raw_spinlock_t ap_list_lock; /* Protects the ap_list */
/*
* List of IRQs that this VCPU should consider because they are either
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 87715f20b69a..03b4c4f225d0 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1014,6 +1014,13 @@ struct acpi_gpio_mapping {
/* Ignore IoRestriction field */
#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
+/*
+ * When ACPI GPIO mapping table is in use the index parameter inside it
+ * refers to the GPIO resource in _CRS method. That index has no
+ * distinction of actual type of the resource. When consumer wants to
+ * get GpioIo type explicitly, this quirk may be used.
+ */
+#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1)
unsigned int quirks;
};
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 942afbd544b7..3305ea7f9dc7 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -11,7 +11,11 @@ enum sdei_conduit_types {
CONDUIT_HVC,
};
+#include <acpi/ghes.h>
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
#include <asm/sdei.h>
+#endif
/* Arch code should override this to set the entry point from firmware... */
#ifndef sdei_arch_get_entry_point
@@ -39,6 +43,11 @@ int sdei_event_unregister(u32 event_num);
int sdei_event_enable(u32 event_num);
int sdei_event_disable(u32 event_num);
+/* GHES register/unregister helpers */
+int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
+ sdei_event_callback *critical_cb);
+int sdei_unregister_ghes(struct ghes *ghes);
+
#ifdef CONFIG_ARM_SDE_INTERFACE
/* For use by arch code when CPU hotplug notifiers are not appropriate. */
int sdei_mask_local_cpu(void);
diff --git a/include/linux/async.h b/include/linux/async.h
index 6b0226bdaadc..f81d6dbffe68 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -14,6 +14,8 @@
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/numa.h>
+#include <linux/device.h>
typedef u64 async_cookie_t;
typedef void (*async_func_t) (void *data, async_cookie_t cookie);
@@ -37,9 +39,83 @@ struct async_domain {
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
.registered = 0 }
-extern async_cookie_t async_schedule(async_func_t func, void *data);
-extern async_cookie_t async_schedule_domain(async_func_t func, void *data,
- struct async_domain *domain);
+async_cookie_t async_schedule_node(async_func_t func, void *data,
+ int node);
+async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+ int node,
+ struct async_domain *domain);
+
+/**
+ * async_schedule - schedule a function for asynchronous execution
+ * @func: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t async_schedule(async_func_t func, void *data)
+{
+ return async_schedule_node(func, data, NUMA_NO_NODE);
+}
+
+/**
+ * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
+ * @func: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ * @domain: the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_domain(async_func_t func, void *data,
+ struct async_domain *domain)
+{
+ return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain);
+}
+
+/**
+ * async_schedule_dev - A device specific version of async_schedule
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function. By doing this we can try to
+ * provide for the best possible outcome by operating on the device on the
+ * CPUs closest to the device.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_dev(async_func_t func, struct device *dev)
+{
+ return async_schedule_node(func, dev, dev_to_node(dev));
+}
+
+/**
+ * async_schedule_dev_domain - A device specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ * @domain: the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function. By doing this we can try to
+ * provide for the best possible outcome by operating on the device on the
+ * CPUs closest to the device.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_dev_domain(async_func_t func, struct device *dev,
+ struct async_domain *domain)
+{
+ return async_schedule_node_domain(func, dev, dev_to_node(dev), domain);
+}
+
void async_unregister_domain(struct async_domain *domain);
extern void async_synchronize_full(void);
extern void async_synchronize_full_domain(struct async_domain *domain);
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 23f805562f4e..5a90f28d5ff2 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -158,7 +158,7 @@ extern int sysctl_aarp_retransmit_limit;
extern int sysctl_aarp_resolve_time;
#ifdef CONFIG_SYSCTL
-extern void atalk_register_sysctl(void);
+extern int atalk_register_sysctl(void);
extern void atalk_unregister_sysctl(void);
#else
#define atalk_register_sysctl() do { } while(0)
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
new file mode 100644
index 000000000000..a7d240e465c0
--- /dev/null
+++ b/include/linux/atomic-fallback.h
@@ -0,0 +1,2295 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#ifndef xchg_relaxed
+#define xchg_relaxed xchg
+#define xchg_acquire xchg
+#define xchg_release xchg
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) \
+ __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) \
+ __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) \
+ __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+
+#endif /* xchg_relaxed */
+
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxed cmpxchg
+#define cmpxchg_acquire cmpxchg
+#define cmpxchg_release cmpxchg
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+ __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+ __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+ __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg_relaxed */
+
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed cmpxchg64
+#define cmpxchg64_acquire cmpxchg64
+#define cmpxchg64_release cmpxchg64
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+ __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+ __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+ __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg64_relaxed */
+
+#ifndef atomic_read_acquire
+static inline int
+atomic_read_acquire(const atomic_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define atomic_read_acquire atomic_read_acquire
+#endif
+
+#ifndef atomic_set_release
+static inline void
+atomic_set_release(atomic_t *v, int i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define atomic_set_release atomic_set_release
+#endif
+
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+#define atomic_add_return_relaxed atomic_add_return
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+static inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_add_return_acquire atomic_add_return_acquire
+#endif
+
+#ifndef atomic_add_return_release
+static inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_add_return_relaxed(i, v);
+}
+#define atomic_add_return_release atomic_add_return_release
+#endif
+
+#ifndef atomic_add_return
+static inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_add_return atomic_add_return
+#endif
+
+#endif /* atomic_add_return_relaxed */
+
+#ifndef atomic_fetch_add_relaxed
+#define atomic_fetch_add_acquire atomic_fetch_add
+#define atomic_fetch_add_release atomic_fetch_add
+#define atomic_fetch_add_relaxed atomic_fetch_add
+#else /* atomic_fetch_add_relaxed */
+
+#ifndef atomic_fetch_add_acquire
+static inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_add_acquire atomic_fetch_add_acquire
+#endif
+
+#ifndef atomic_fetch_add_release
+static inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_add_relaxed(i, v);
+}
+#define atomic_fetch_add_release atomic_fetch_add_release
+#endif
+
+#ifndef atomic_fetch_add
+static inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_add atomic_fetch_add
+#endif
+
+#endif /* atomic_fetch_add_relaxed */
+
+#ifndef atomic_sub_return_relaxed
+#define atomic_sub_return_acquire atomic_sub_return
+#define atomic_sub_return_release atomic_sub_return
+#define atomic_sub_return_relaxed atomic_sub_return
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+static inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_sub_return_acquire atomic_sub_return_acquire
+#endif
+
+#ifndef atomic_sub_return_release
+static inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_sub_return_relaxed(i, v);
+}
+#define atomic_sub_return_release atomic_sub_return_release
+#endif
+
+#ifndef atomic_sub_return
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_sub_return atomic_sub_return
+#endif
+
+#endif /* atomic_sub_return_relaxed */
+
+#ifndef atomic_fetch_sub_relaxed
+#define atomic_fetch_sub_acquire atomic_fetch_sub
+#define atomic_fetch_sub_release atomic_fetch_sub
+#define atomic_fetch_sub_relaxed atomic_fetch_sub
+#else /* atomic_fetch_sub_relaxed */
+
+#ifndef atomic_fetch_sub_acquire
+static inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#endif
+
+#ifndef atomic_fetch_sub_release
+static inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_sub_relaxed(i, v);
+}
+#define atomic_fetch_sub_release atomic_fetch_sub_release
+#endif
+
+#ifndef atomic_fetch_sub
+static inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_sub atomic_fetch_sub
+#endif
+
+#endif /* atomic_fetch_sub_relaxed */
+
+#ifndef atomic_inc
+static inline void
+atomic_inc(atomic_t *v)
+{
+ atomic_add(1, v);
+}
+#define atomic_inc atomic_inc
+#endif
+
+#ifndef atomic_inc_return_relaxed
+#ifdef atomic_inc_return
+#define atomic_inc_return_acquire atomic_inc_return
+#define atomic_inc_return_release atomic_inc_return
+#define atomic_inc_return_relaxed atomic_inc_return
+#endif /* atomic_inc_return */
+
+#ifndef atomic_inc_return
+static inline int
+atomic_inc_return(atomic_t *v)
+{
+ return atomic_add_return(1, v);
+}
+#define atomic_inc_return atomic_inc_return
+#endif
+
+#ifndef atomic_inc_return_acquire
+static inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ return atomic_add_return_acquire(1, v);
+}
+#define atomic_inc_return_acquire atomic_inc_return_acquire
+#endif
+
+#ifndef atomic_inc_return_release
+static inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ return atomic_add_return_release(1, v);
+}
+#define atomic_inc_return_release atomic_inc_return_release
+#endif
+
+#ifndef atomic_inc_return_relaxed
+static inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+ return atomic_add_return_relaxed(1, v);
+}
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+#endif
+
+#else /* atomic_inc_return_relaxed */
+
+#ifndef atomic_inc_return_acquire
+static inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ int ret = atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_inc_return_acquire atomic_inc_return_acquire
+#endif
+
+#ifndef atomic_inc_return_release
+static inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_inc_return_relaxed(v);
+}
+#define atomic_inc_return_release atomic_inc_return_release
+#endif
+
+#ifndef atomic_inc_return
+static inline int
+atomic_inc_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_inc_return atomic_inc_return
+#endif
+
+#endif /* atomic_inc_return_relaxed */
+
+#ifndef atomic_fetch_inc_relaxed
+#ifdef atomic_fetch_inc
+#define atomic_fetch_inc_acquire atomic_fetch_inc
+#define atomic_fetch_inc_release atomic_fetch_inc
+#define atomic_fetch_inc_relaxed atomic_fetch_inc
+#endif /* atomic_fetch_inc */
+
+#ifndef atomic_fetch_inc
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ return atomic_fetch_add(1, v);
+}
+#define atomic_fetch_inc atomic_fetch_inc
+#endif
+
+#ifndef atomic_fetch_inc_acquire
+static inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ return atomic_fetch_add_acquire(1, v);
+}
+#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#endif
+
+#ifndef atomic_fetch_inc_release
+static inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ return atomic_fetch_add_release(1, v);
+}
+#define atomic_fetch_inc_release atomic_fetch_inc_release
+#endif
+
+#ifndef atomic_fetch_inc_relaxed
+static inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ return atomic_fetch_add_relaxed(1, v);
+}
+#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+#endif
+
+#else /* atomic_fetch_inc_relaxed */
+
+#ifndef atomic_fetch_inc_acquire
+static inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ int ret = atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#endif
+
+#ifndef atomic_fetch_inc_release
+static inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_inc_relaxed(v);
+}
+#define atomic_fetch_inc_release atomic_fetch_inc_release
+#endif
+
+#ifndef atomic_fetch_inc
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_inc atomic_fetch_inc
+#endif
+
+#endif /* atomic_fetch_inc_relaxed */
+
+#ifndef atomic_dec
+static inline void
+atomic_dec(atomic_t *v)
+{
+ atomic_sub(1, v);
+}
+#define atomic_dec atomic_dec
+#endif
+
+#ifndef atomic_dec_return_relaxed
+#ifdef atomic_dec_return
+#define atomic_dec_return_acquire atomic_dec_return
+#define atomic_dec_return_release atomic_dec_return
+#define atomic_dec_return_relaxed atomic_dec_return
+#endif /* atomic_dec_return */
+
+#ifndef atomic_dec_return
+static inline int
+atomic_dec_return(atomic_t *v)
+{
+ return atomic_sub_return(1, v);
+}
+#define atomic_dec_return atomic_dec_return
+#endif
+
+#ifndef atomic_dec_return_acquire
+static inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+ return atomic_sub_return_acquire(1, v);
+}
+#define atomic_dec_return_acquire atomic_dec_return_acquire
+#endif
+
+#ifndef atomic_dec_return_release
+static inline int
+atomic_dec_return_release(atomic_t *v)
+{
+ return atomic_sub_return_release(1, v);
+}
+#define atomic_dec_return_release atomic_dec_return_release
+#endif
+
+#ifndef atomic_dec_return_relaxed
+static inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+ return atomic_sub_return_relaxed(1, v);
+}
+#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#endif
+
+#else /* atomic_dec_return_relaxed */
+
+#ifndef atomic_dec_return_acquire
+static inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+ int ret = atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_dec_return_acquire atomic_dec_return_acquire
+#endif
+
+#ifndef atomic_dec_return_release
+static inline int
+atomic_dec_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_dec_return_relaxed(v);
+}
+#define atomic_dec_return_release atomic_dec_return_release
+#endif
+
+#ifndef atomic_dec_return
+static inline int
+atomic_dec_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_dec_return atomic_dec_return
+#endif
+
+#endif /* atomic_dec_return_relaxed */
+
+#ifndef atomic_fetch_dec_relaxed
+#ifdef atomic_fetch_dec
+#define atomic_fetch_dec_acquire atomic_fetch_dec
+#define atomic_fetch_dec_release atomic_fetch_dec
+#define atomic_fetch_dec_relaxed atomic_fetch_dec
+#endif /* atomic_fetch_dec */
+
+#ifndef atomic_fetch_dec
+static inline int
+atomic_fetch_dec(atomic_t *v)
+{
+ return atomic_fetch_sub(1, v);
+}
+#define atomic_fetch_dec atomic_fetch_dec
+#endif
+
+#ifndef atomic_fetch_dec_acquire
+static inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ return atomic_fetch_sub_acquire(1, v);
+}
+#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#endif
+
+#ifndef atomic_fetch_dec_release
+static inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ return atomic_fetch_sub_release(1, v);
+}
+#define atomic_fetch_dec_release atomic_fetch_dec_release
+#endif
+
+#ifndef atomic_fetch_dec_relaxed
+static inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ return atomic_fetch_sub_relaxed(1, v);
+}
+#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+#endif
+
+#else /* atomic_fetch_dec_relaxed */
+
+#ifndef atomic_fetch_dec_acquire
+static inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ int ret = atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#endif
+
+#ifndef atomic_fetch_dec_release
+static inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_dec_relaxed(v);
+}
+#define atomic_fetch_dec_release atomic_fetch_dec_release
+#endif
+
+#ifndef atomic_fetch_dec
+static inline int
+atomic_fetch_dec(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_dec atomic_fetch_dec
+#endif
+
+#endif /* atomic_fetch_dec_relaxed */
+
+#ifndef atomic_fetch_and_relaxed
+#define atomic_fetch_and_acquire atomic_fetch_and
+#define atomic_fetch_and_release atomic_fetch_and
+#define atomic_fetch_and_relaxed atomic_fetch_and
+#else /* atomic_fetch_and_relaxed */
+
+#ifndef atomic_fetch_and_acquire
+static inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_and_acquire atomic_fetch_and_acquire
+#endif
+
+#ifndef atomic_fetch_and_release
+static inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_and_relaxed(i, v);
+}
+#define atomic_fetch_and_release atomic_fetch_and_release
+#endif
+
+#ifndef atomic_fetch_and
+static inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_and atomic_fetch_and
+#endif
+
+#endif /* atomic_fetch_and_relaxed */
+
+#ifndef atomic_andnot
+static inline void
+atomic_andnot(int i, atomic_t *v)
+{
+ atomic_and(~i, v);
+}
+#define atomic_andnot atomic_andnot
+#endif
+
+#ifndef atomic_fetch_andnot_relaxed
+#ifdef atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
+
+#ifndef atomic_fetch_andnot
+static inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ return atomic_fetch_and(~i, v);
+}
+#define atomic_fetch_andnot atomic_fetch_andnot
+#endif
+
+#ifndef atomic_fetch_andnot_acquire
+static inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ return atomic_fetch_and_acquire(~i, v);
+}
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#endif
+
+#ifndef atomic_fetch_andnot_release
+static inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ return atomic_fetch_and_release(~i, v);
+}
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#endif
+
+#ifndef atomic_fetch_andnot_relaxed
+static inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ return atomic_fetch_and_relaxed(~i, v);
+}
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#endif
+
+#else /* atomic_fetch_andnot_relaxed */
+
+#ifndef atomic_fetch_andnot_acquire
+static inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#endif
+
+#ifndef atomic_fetch_andnot_release
+static inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_andnot_relaxed(i, v);
+}
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#endif
+
+#ifndef atomic_fetch_andnot
+static inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_andnot atomic_fetch_andnot
+#endif
+
+#endif /* atomic_fetch_andnot_relaxed */
+
+#ifndef atomic_fetch_or_relaxed
+#define atomic_fetch_or_acquire atomic_fetch_or
+#define atomic_fetch_or_release atomic_fetch_or
+#define atomic_fetch_or_relaxed atomic_fetch_or
+#else /* atomic_fetch_or_relaxed */
+
+#ifndef atomic_fetch_or_acquire
+static inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_or_acquire atomic_fetch_or_acquire
+#endif
+
+#ifndef atomic_fetch_or_release
+static inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_or_relaxed(i, v);
+}
+#define atomic_fetch_or_release atomic_fetch_or_release
+#endif
+
+#ifndef atomic_fetch_or
+static inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_or atomic_fetch_or
+#endif
+
+#endif /* atomic_fetch_or_relaxed */
+
+#ifndef atomic_fetch_xor_relaxed
+#define atomic_fetch_xor_acquire atomic_fetch_xor
+#define atomic_fetch_xor_release atomic_fetch_xor
+#define atomic_fetch_xor_relaxed atomic_fetch_xor
+#else /* atomic_fetch_xor_relaxed */
+
+#ifndef atomic_fetch_xor_acquire
+static inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ int ret = atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#endif
+
+#ifndef atomic_fetch_xor_release
+static inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return atomic_fetch_xor_relaxed(i, v);
+}
+#define atomic_fetch_xor_release atomic_fetch_xor_release
+#endif
+
+#ifndef atomic_fetch_xor
+static inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_fetch_xor atomic_fetch_xor
+#endif
+
+#endif /* atomic_fetch_xor_relaxed */
+
+#ifndef atomic_xchg_relaxed
+#define atomic_xchg_acquire atomic_xchg
+#define atomic_xchg_release atomic_xchg
+#define atomic_xchg_relaxed atomic_xchg
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+static inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+ int ret = atomic_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_xchg_acquire atomic_xchg_acquire
+#endif
+
+#ifndef atomic_xchg_release
+static inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+ __atomic_release_fence();
+ return atomic_xchg_relaxed(v, i);
+}
+#define atomic_xchg_release atomic_xchg_release
+#endif
+
+#ifndef atomic_xchg
+static inline int
+atomic_xchg(atomic_t *v, int i)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_xchg atomic_xchg
+#endif
+
+#endif /* atomic_xchg_relaxed */
+
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_acquire atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+static inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ int ret = atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#endif
+
+#ifndef atomic_cmpxchg_release
+static inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ __atomic_release_fence();
+ return atomic_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_cmpxchg_release atomic_cmpxchg_release
+#endif
+
+#ifndef atomic_cmpxchg
+static inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_cmpxchg atomic_cmpxchg
+#endif
+
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_try_cmpxchg_relaxed
+#ifdef atomic_try_cmpxchg
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
+#endif /* atomic_try_cmpxchg */
+
+#ifndef atomic_try_cmpxchg
+static inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+#endif
+
+#ifndef atomic_try_cmpxchg_acquire
+static inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic_try_cmpxchg_release
+static inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#endif
+
+#ifndef atomic_try_cmpxchg_relaxed
+static inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* atomic_try_cmpxchg_relaxed */
+
+#ifndef atomic_try_cmpxchg_acquire
+static inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic_try_cmpxchg_release
+static inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ __atomic_release_fence();
+ return atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#endif
+
+#ifndef atomic_try_cmpxchg
+static inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+#endif
+
+#endif /* atomic_try_cmpxchg_relaxed */
+
+#ifndef atomic_sub_and_test
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+ return atomic_sub_return(i, v) == 0;
+}
+#define atomic_sub_and_test atomic_sub_and_test
+#endif
+
+#ifndef atomic_dec_and_test
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+ return atomic_dec_return(v) == 0;
+}
+#define atomic_dec_and_test atomic_dec_and_test
+#endif
+
+#ifndef atomic_inc_and_test
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+ return atomic_inc_return(v) == 0;
+}
+#define atomic_inc_and_test atomic_inc_and_test
+#endif
+
+#ifndef atomic_add_negative
+/**
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+#define atomic_add_negative atomic_add_negative
+#endif
+
+#ifndef atomic_fetch_add_unless
+/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+#endif
+
+#ifndef atomic_add_unless
+/**
+ * atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ return atomic_fetch_add_unless(v, a, u) != u;
+}
+#define atomic_add_unless atomic_add_unless
+#endif
+
+#ifndef atomic_inc_not_zero
+/**
+ * atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+ return atomic_add_unless(v, 1, 0);
+}
+#define atomic_inc_not_zero atomic_inc_not_zero
+#endif
+
+#ifndef atomic_inc_unless_negative
+static inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define atomic_inc_unless_negative atomic_inc_unless_negative
+#endif
+
+#ifndef atomic_dec_unless_positive
+static inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define atomic_dec_unless_positive atomic_dec_unless_positive
+#endif
+
+#ifndef atomic_dec_if_positive
+static inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ int dec, c = atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define atomic_dec_if_positive atomic_dec_if_positive
+#endif
+
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef atomic64_read_acquire
+static inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define atomic64_read_acquire atomic64_read_acquire
+#endif
+
+#ifndef atomic64_set_release
+static inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define atomic64_set_release atomic64_set_release
+#endif
+
+#ifndef atomic64_add_return_relaxed
+#define atomic64_add_return_acquire atomic64_add_return
+#define atomic64_add_return_release atomic64_add_return
+#define atomic64_add_return_relaxed atomic64_add_return
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+static inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_add_return_acquire atomic64_add_return_acquire
+#endif
+
+#ifndef atomic64_add_return_release
+static inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_add_return_relaxed(i, v);
+}
+#define atomic64_add_return_release atomic64_add_return_release
+#endif
+
+#ifndef atomic64_add_return
+static inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_add_return atomic64_add_return
+#endif
+
+#endif /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_fetch_add_relaxed
+#define atomic64_fetch_add_acquire atomic64_fetch_add
+#define atomic64_fetch_add_release atomic64_fetch_add
+#define atomic64_fetch_add_relaxed atomic64_fetch_add
+#else /* atomic64_fetch_add_relaxed */
+
+#ifndef atomic64_fetch_add_acquire
+static inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#endif
+
+#ifndef atomic64_fetch_add_release
+static inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_add_relaxed(i, v);
+}
+#define atomic64_fetch_add_release atomic64_fetch_add_release
+#endif
+
+#ifndef atomic64_fetch_add
+static inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_add atomic64_fetch_add
+#endif
+
+#endif /* atomic64_fetch_add_relaxed */
+
+#ifndef atomic64_sub_return_relaxed
+#define atomic64_sub_return_acquire atomic64_sub_return
+#define atomic64_sub_return_release atomic64_sub_return
+#define atomic64_sub_return_relaxed atomic64_sub_return
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+static inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_sub_return_acquire atomic64_sub_return_acquire
+#endif
+
+#ifndef atomic64_sub_return_release
+static inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_sub_return_relaxed(i, v);
+}
+#define atomic64_sub_return_release atomic64_sub_return_release
+#endif
+
+#ifndef atomic64_sub_return
+static inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_sub_return atomic64_sub_return
+#endif
+
+#endif /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_fetch_sub_relaxed
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub
+#define atomic64_fetch_sub_release atomic64_fetch_sub
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
+#else /* atomic64_fetch_sub_relaxed */
+
+#ifndef atomic64_fetch_sub_acquire
+static inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#endif
+
+#ifndef atomic64_fetch_sub_release
+static inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_sub_relaxed(i, v);
+}
+#define atomic64_fetch_sub_release atomic64_fetch_sub_release
+#endif
+
+#ifndef atomic64_fetch_sub
+static inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_sub atomic64_fetch_sub
+#endif
+
+#endif /* atomic64_fetch_sub_relaxed */
+
+#ifndef atomic64_inc
+static inline void
+atomic64_inc(atomic64_t *v)
+{
+ atomic64_add(1, v);
+}
+#define atomic64_inc atomic64_inc
+#endif
+
+#ifndef atomic64_inc_return_relaxed
+#ifdef atomic64_inc_return
+#define atomic64_inc_return_acquire atomic64_inc_return
+#define atomic64_inc_return_release atomic64_inc_return
+#define atomic64_inc_return_relaxed atomic64_inc_return
+#endif /* atomic64_inc_return */
+
+#ifndef atomic64_inc_return
+static inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ return atomic64_add_return(1, v);
+}
+#define atomic64_inc_return atomic64_inc_return
+#endif
+
+#ifndef atomic64_inc_return_acquire
+static inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ return atomic64_add_return_acquire(1, v);
+}
+#define atomic64_inc_return_acquire atomic64_inc_return_acquire
+#endif
+
+#ifndef atomic64_inc_return_release
+static inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ return atomic64_add_return_release(1, v);
+}
+#define atomic64_inc_return_release atomic64_inc_return_release
+#endif
+
+#ifndef atomic64_inc_return_relaxed
+static inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ return atomic64_add_return_relaxed(1, v);
+}
+#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+#endif
+
+#else /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_inc_return_acquire
+static inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ s64 ret = atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_inc_return_acquire atomic64_inc_return_acquire
+#endif
+
+#ifndef atomic64_inc_return_release
+static inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_inc_return_relaxed(v);
+}
+#define atomic64_inc_return_release atomic64_inc_return_release
+#endif
+
+#ifndef atomic64_inc_return
+static inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_inc_return atomic64_inc_return
+#endif
+
+#endif /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_fetch_inc_relaxed
+#ifdef atomic64_fetch_inc
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc
+#define atomic64_fetch_inc_release atomic64_fetch_inc
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
+#endif /* atomic64_fetch_inc */
+
+#ifndef atomic64_fetch_inc
+static inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ return atomic64_fetch_add(1, v);
+}
+#define atomic64_fetch_inc atomic64_fetch_inc
+#endif
+
+#ifndef atomic64_fetch_inc_acquire
+static inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ return atomic64_fetch_add_acquire(1, v);
+}
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#endif
+
+#ifndef atomic64_fetch_inc_release
+static inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ return atomic64_fetch_add_release(1, v);
+}
+#define atomic64_fetch_inc_release atomic64_fetch_inc_release
+#endif
+
+#ifndef atomic64_fetch_inc_relaxed
+static inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ return atomic64_fetch_add_relaxed(1, v);
+}
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
+#endif
+
+#else /* atomic64_fetch_inc_relaxed */
+
+#ifndef atomic64_fetch_inc_acquire
+static inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#endif
+
+#ifndef atomic64_fetch_inc_release
+static inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_inc_relaxed(v);
+}
+#define atomic64_fetch_inc_release atomic64_fetch_inc_release
+#endif
+
+#ifndef atomic64_fetch_inc
+static inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_inc atomic64_fetch_inc
+#endif
+
+#endif /* atomic64_fetch_inc_relaxed */
+
+#ifndef atomic64_dec
+static inline void
+atomic64_dec(atomic64_t *v)
+{
+ atomic64_sub(1, v);
+}
+#define atomic64_dec atomic64_dec
+#endif
+
+#ifndef atomic64_dec_return_relaxed
+#ifdef atomic64_dec_return
+#define atomic64_dec_return_acquire atomic64_dec_return
+#define atomic64_dec_return_release atomic64_dec_return
+#define atomic64_dec_return_relaxed atomic64_dec_return
+#endif /* atomic64_dec_return */
+
+#ifndef atomic64_dec_return
+static inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ return atomic64_sub_return(1, v);
+}
+#define atomic64_dec_return atomic64_dec_return
+#endif
+
+#ifndef atomic64_dec_return_acquire
+static inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ return atomic64_sub_return_acquire(1, v);
+}
+#define atomic64_dec_return_acquire atomic64_dec_return_acquire
+#endif
+
+#ifndef atomic64_dec_return_release
+static inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ return atomic64_sub_return_release(1, v);
+}
+#define atomic64_dec_return_release atomic64_dec_return_release
+#endif
+
+#ifndef atomic64_dec_return_relaxed
+static inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ return atomic64_sub_return_relaxed(1, v);
+}
+#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#endif
+
+#else /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_dec_return_acquire
+static inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ s64 ret = atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_dec_return_acquire atomic64_dec_return_acquire
+#endif
+
+#ifndef atomic64_dec_return_release
+static inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_dec_return_relaxed(v);
+}
+#define atomic64_dec_return_release atomic64_dec_return_release
+#endif
+
+#ifndef atomic64_dec_return
+static inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_dec_return atomic64_dec_return
+#endif
+
+#endif /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_fetch_dec_relaxed
+#ifdef atomic64_fetch_dec
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec
+#define atomic64_fetch_dec_release atomic64_fetch_dec
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
+#endif /* atomic64_fetch_dec */
+
+#ifndef atomic64_fetch_dec
+static inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ return atomic64_fetch_sub(1, v);
+}
+#define atomic64_fetch_dec atomic64_fetch_dec
+#endif
+
+#ifndef atomic64_fetch_dec_acquire
+static inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ return atomic64_fetch_sub_acquire(1, v);
+}
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#endif
+
+#ifndef atomic64_fetch_dec_release
+static inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ return atomic64_fetch_sub_release(1, v);
+}
+#define atomic64_fetch_dec_release atomic64_fetch_dec_release
+#endif
+
+#ifndef atomic64_fetch_dec_relaxed
+static inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ return atomic64_fetch_sub_relaxed(1, v);
+}
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+#endif
+
+#else /* atomic64_fetch_dec_relaxed */
+
+#ifndef atomic64_fetch_dec_acquire
+static inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#endif
+
+#ifndef atomic64_fetch_dec_release
+static inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_dec_relaxed(v);
+}
+#define atomic64_fetch_dec_release atomic64_fetch_dec_release
+#endif
+
+#ifndef atomic64_fetch_dec
+static inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_dec atomic64_fetch_dec
+#endif
+
+#endif /* atomic64_fetch_dec_relaxed */
+
+#ifndef atomic64_fetch_and_relaxed
+#define atomic64_fetch_and_acquire atomic64_fetch_and
+#define atomic64_fetch_and_release atomic64_fetch_and
+#define atomic64_fetch_and_relaxed atomic64_fetch_and
+#else /* atomic64_fetch_and_relaxed */
+
+#ifndef atomic64_fetch_and_acquire
+static inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#endif
+
+#ifndef atomic64_fetch_and_release
+static inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_and_relaxed(i, v);
+}
+#define atomic64_fetch_and_release atomic64_fetch_and_release
+#endif
+
+#ifndef atomic64_fetch_and
+static inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_and atomic64_fetch_and
+#endif
+
+#endif /* atomic64_fetch_and_relaxed */
+
+#ifndef atomic64_andnot
+static inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+ atomic64_and(~i, v);
+}
+#define atomic64_andnot atomic64_andnot
+#endif
+
+#ifndef atomic64_fetch_andnot_relaxed
+#ifdef atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
+
+#ifndef atomic64_fetch_andnot
+static inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ return atomic64_fetch_and(~i, v);
+}
+#define atomic64_fetch_andnot atomic64_fetch_andnot
+#endif
+
+#ifndef atomic64_fetch_andnot_acquire
+static inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ return atomic64_fetch_and_acquire(~i, v);
+}
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef atomic64_fetch_andnot_release
+static inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ return atomic64_fetch_and_release(~i, v);
+}
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#endif
+
+#ifndef atomic64_fetch_andnot_relaxed
+static inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ return atomic64_fetch_and_relaxed(~i, v);
+}
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* atomic64_fetch_andnot_relaxed */
+
+#ifndef atomic64_fetch_andnot_acquire
+static inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef atomic64_fetch_andnot_release
+static inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_andnot_relaxed(i, v);
+}
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#endif
+
+#ifndef atomic64_fetch_andnot
+static inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_andnot atomic64_fetch_andnot
+#endif
+
+#endif /* atomic64_fetch_andnot_relaxed */
+
+#ifndef atomic64_fetch_or_relaxed
+#define atomic64_fetch_or_acquire atomic64_fetch_or
+#define atomic64_fetch_or_release atomic64_fetch_or
+#define atomic64_fetch_or_relaxed atomic64_fetch_or
+#else /* atomic64_fetch_or_relaxed */
+
+#ifndef atomic64_fetch_or_acquire
+static inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#endif
+
+#ifndef atomic64_fetch_or_release
+static inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_or_relaxed(i, v);
+}
+#define atomic64_fetch_or_release atomic64_fetch_or_release
+#endif
+
+#ifndef atomic64_fetch_or
+static inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_or atomic64_fetch_or
+#endif
+
+#endif /* atomic64_fetch_or_relaxed */
+
+#ifndef atomic64_fetch_xor_relaxed
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor
+#define atomic64_fetch_xor_release atomic64_fetch_xor
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
+#else /* atomic64_fetch_xor_relaxed */
+
+#ifndef atomic64_fetch_xor_acquire
+static inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#endif
+
+#ifndef atomic64_fetch_xor_release
+static inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return atomic64_fetch_xor_relaxed(i, v);
+}
+#define atomic64_fetch_xor_release atomic64_fetch_xor_release
+#endif
+
+#ifndef atomic64_fetch_xor
+static inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_fetch_xor atomic64_fetch_xor
+#endif
+
+#endif /* atomic64_fetch_xor_relaxed */
+
+#ifndef atomic64_xchg_relaxed
+#define atomic64_xchg_acquire atomic64_xchg
+#define atomic64_xchg_release atomic64_xchg
+#define atomic64_xchg_relaxed atomic64_xchg
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+static inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ s64 ret = atomic64_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_xchg_acquire atomic64_xchg_acquire
+#endif
+
+#ifndef atomic64_xchg_release
+static inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ __atomic_release_fence();
+ return atomic64_xchg_relaxed(v, i);
+}
+#define atomic64_xchg_release atomic64_xchg_release
+#endif
+
+#ifndef atomic64_xchg
+static inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_xchg atomic64_xchg
+#endif
+
+#endif /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_cmpxchg_relaxed
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg
+#define atomic64_cmpxchg_release atomic64_cmpxchg
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+static inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#endif
+
+#ifndef atomic64_cmpxchg_release
+static inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ __atomic_release_fence();
+ return atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_cmpxchg_release atomic64_cmpxchg_release
+#endif
+
+#ifndef atomic64_cmpxchg
+static inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_cmpxchg atomic64_cmpxchg
+#endif
+
+#endif /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_try_cmpxchg_relaxed
+#ifdef atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
+#endif /* atomic64_try_cmpxchg */
+
+#ifndef atomic64_try_cmpxchg
+static inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+#endif
+
+#ifndef atomic64_try_cmpxchg_acquire
+static inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic64_try_cmpxchg_release
+static inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#endif
+
+#ifndef atomic64_try_cmpxchg_relaxed
+static inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* atomic64_try_cmpxchg_relaxed */
+
+#ifndef atomic64_try_cmpxchg_acquire
+static inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic64_try_cmpxchg_release
+static inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ __atomic_release_fence();
+ return atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#endif
+
+#ifndef atomic64_try_cmpxchg
+static inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+#endif
+
+#endif /* atomic64_try_cmpxchg_relaxed */
+
+#ifndef atomic64_sub_and_test
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
+#define atomic64_sub_and_test atomic64_sub_and_test
+#endif
+
+#ifndef atomic64_dec_and_test
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+ return atomic64_dec_return(v) == 0;
+}
+#define atomic64_dec_and_test atomic64_dec_and_test
+#endif
+
+#ifndef atomic64_inc_and_test
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+ return atomic64_inc_return(v) == 0;
+}
+#define atomic64_inc_and_test atomic64_inc_and_test
+#endif
+
+#ifndef atomic64_add_negative
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+#define atomic64_add_negative atomic64_add_negative
+#endif
+
+#ifndef atomic64_fetch_add_unless
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 c = atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#endif
+
+#ifndef atomic64_add_unless
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ return atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define atomic64_add_unless atomic64_add_unless
+#endif
+
+#ifndef atomic64_inc_not_zero
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+ return atomic64_add_unless(v, 1, 0);
+}
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 c = atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define atomic64_inc_unless_negative atomic64_inc_unless_negative
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 c = atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define atomic64_dec_unless_positive atomic64_dec_unless_positive
+#endif
+
+#ifndef atomic64_dec_if_positive
+static inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 dec, c = atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+#endif
+
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// 25de4a2804d70f57e994fe3b419148658bb5378a
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 1e8e88bdaf09..4c0d009a46f0 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -25,14 +25,6 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
-#ifndef atomic_read_acquire
-#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
-#endif
-
-#ifndef atomic_set_release
-#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
-#endif
-
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
@@ -79,1238 +71,7 @@
__ret; \
})
-/* atomic_add_return_relaxed */
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_relaxed atomic_add_return
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-#define atomic_add_return_acquire(...) \
- __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return_release
-#define atomic_add_return_release(...) \
- __atomic_op_release(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return
-#define atomic_add_return(...) \
- __atomic_op_fence(atomic_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic_add_return_relaxed */
-
-#ifndef atomic_inc
-#define atomic_inc(v) atomic_add(1, (v))
-#endif
-
-/* atomic_inc_return_relaxed */
-#ifndef atomic_inc_return_relaxed
-
-#ifndef atomic_inc_return
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
-#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
-#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
-#else /* atomic_inc_return */
-#define atomic_inc_return_relaxed atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-#endif /* atomic_inc_return */
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-#define atomic_inc_return_acquire(...) \
- __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return_release
-#define atomic_inc_return_release(...) \
- __atomic_op_release(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return
-#define atomic_inc_return(...) \
- __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic_inc_return_relaxed */
-
-/* atomic_sub_return_relaxed */
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-#define atomic_sub_return_acquire(...) \
- __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return_release
-#define atomic_sub_return_release(...) \
- __atomic_op_release(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return
-#define atomic_sub_return(...) \
- __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic_sub_return_relaxed */
-
-#ifndef atomic_dec
-#define atomic_dec(v) atomic_sub(1, (v))
-#endif
-
-/* atomic_dec_return_relaxed */
-#ifndef atomic_dec_return_relaxed
-
-#ifndef atomic_dec_return
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
-#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
-#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
-#else /* atomic_dec_return */
-#define atomic_dec_return_relaxed atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-#endif /* atomic_dec_return */
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-#define atomic_dec_return_acquire(...) \
- __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return_release
-#define atomic_dec_return_release(...) \
- __atomic_op_release(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return
-#define atomic_dec_return(...) \
- __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic_dec_return_relaxed */
-
-
-/* atomic_fetch_add_relaxed */
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-#define atomic_fetch_add_acquire(...) \
- __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add_release
-#define atomic_fetch_add_release(...) \
- __atomic_op_release(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(...) \
- __atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_add_relaxed */
-
-/* atomic_fetch_inc_relaxed */
-#ifndef atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(v) atomic_fetch_add(1, (v))
-#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v))
-#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v))
-#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v))
-#else /* atomic_fetch_inc */
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-#define atomic_fetch_inc_acquire(...) \
- __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc_release
-#define atomic_fetch_inc_release(...) \
- __atomic_op_release(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(...) \
- __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_inc_relaxed */
-
-/* atomic_fetch_sub_relaxed */
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-#define atomic_fetch_sub_acquire(...) \
- __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub_release
-#define atomic_fetch_sub_release(...) \
- __atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub
-#define atomic_fetch_sub(...) \
- __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_sub_relaxed */
-
-/* atomic_fetch_dec_relaxed */
-#ifndef atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v))
-#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v))
-#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v))
-#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v))
-#else /* atomic_fetch_dec */
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-#define atomic_fetch_dec_acquire(...) \
- __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec_release
-#define atomic_fetch_dec_release(...) \
- __atomic_op_release(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(...) \
- __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_dec_relaxed */
-
-/* atomic_fetch_or_relaxed */
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-#define atomic_fetch_or_acquire(...) \
- __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or_release
-#define atomic_fetch_or_release(...) \
- __atomic_op_release(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or
-#define atomic_fetch_or(...) \
- __atomic_op_fence(atomic_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_or_relaxed */
-
-/* atomic_fetch_and_relaxed */
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-#define atomic_fetch_and_acquire(...) \
- __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and_release
-#define atomic_fetch_and_release(...) \
- __atomic_op_release(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and
-#define atomic_fetch_and(...) \
- __atomic_op_fence(atomic_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_andnot
-#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-
-#ifndef atomic_fetch_andnot
-#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
-#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
-#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
-#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
-#else /* atomic_fetch_andnot */
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-#endif /* atomic_fetch_andnot */
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-#define atomic_fetch_andnot_acquire(...) \
- __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot_release
-#define atomic_fetch_andnot_release(...) \
- __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot
-#define atomic_fetch_andnot(...) \
- __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_andnot_relaxed */
-
-/* atomic_fetch_xor_relaxed */
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-#define atomic_fetch_xor_acquire(...) \
- __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor_release
-#define atomic_fetch_xor_release(...) \
- __atomic_op_release(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor
-#define atomic_fetch_xor(...) \
- __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_xor_relaxed */
-
-
-/* atomic_xchg_relaxed */
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_relaxed atomic_xchg
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-#define atomic_xchg_acquire(...) \
- __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg_release
-#define atomic_xchg_release(...) \
- __atomic_op_release(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg
-#define atomic_xchg(...) \
- __atomic_op_fence(atomic_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic_xchg_relaxed */
-
-/* atomic_cmpxchg_relaxed */
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-#define atomic_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg_release
-#define atomic_cmpxchg_release(...) \
- __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg
-#define atomic_cmpxchg(...) \
- __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg
-
-#define __atomic_try_cmpxchg(type, _p, _po, _n) \
-({ \
- typeof(_po) __po = (_po); \
- typeof(*(_po)) __r, __o = *__po; \
- __r = atomic_cmpxchg##type((_p), __o, (_n)); \
- if (unlikely(__r != __o)) \
- *__po = __r; \
- likely(__r == __o); \
-})
-
-#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
-#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic_try_cmpxchg */
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-/* cmpxchg_relaxed */
-#ifndef cmpxchg_relaxed
-#define cmpxchg_relaxed cmpxchg
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-#endif /* cmpxchg_relaxed */
-
-/* cmpxchg64_relaxed */
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_relaxed cmpxchg64
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-#endif /* cmpxchg64_relaxed */
-
-/* xchg_relaxed */
-#ifndef xchg_relaxed
-#define xchg_relaxed xchg
-#define xchg_acquire xchg
-#define xchg_release xchg
-
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-#endif /* xchg_relaxed */
-
-/**
- * atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns the original value of @v.
- */
-#ifndef atomic_fetch_add_unless
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#endif
-
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static inline bool atomic_add_unless(atomic_t *v, int a, int u)
-{
- return atomic_fetch_add_unless(v, a, u) != u;
-}
-
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-#ifndef atomic_inc_not_zero
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#endif
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic_inc_and_test
-static inline bool atomic_inc_and_test(atomic_t *v)
-{
- return atomic_inc_return(v) == 0;
-}
-#endif
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#ifndef atomic_dec_and_test
-static inline bool atomic_dec_and_test(atomic_t *v)
-{
- return atomic_dec_return(v) == 0;
-}
-#endif
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic_sub_and_test
-static inline bool atomic_sub_and_test(int i, atomic_t *v)
-{
- return atomic_sub_return(i, v) == 0;
-}
-#endif
-
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#ifndef atomic_add_negative
-static inline bool atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-#endif
-
-#ifndef atomic_inc_unless_negative
-static inline bool atomic_inc_unless_negative(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#endif
-
-#ifndef atomic_dec_unless_positive
-static inline bool atomic_dec_unless_positive(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#endif
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-#ifndef atomic_dec_if_positive
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#endif
-
-#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef atomic64_read_acquire
-#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
-#endif
-
-#ifndef atomic64_set_release
-#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
-#endif
-
-/* atomic64_add_return_relaxed */
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-#define atomic64_add_return_acquire(...) \
- __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return_release
-#define atomic64_add_return_release(...) \
- __atomic_op_release(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return
-#define atomic64_add_return(...) \
- __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_inc
-#define atomic64_inc(v) atomic64_add(1, (v))
-#endif
-
-/* atomic64_inc_return_relaxed */
-#ifndef atomic64_inc_return_relaxed
-
-#ifndef atomic64_inc_return
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
-#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
-#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
-#else /* atomic64_inc_return */
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-#endif /* atomic64_inc_return */
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-#define atomic64_inc_return_acquire(...) \
- __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return_release
-#define atomic64_inc_return_release(...) \
- __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return
-#define atomic64_inc_return(...) \
- __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_inc_return_relaxed */
-
-
-/* atomic64_sub_return_relaxed */
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-#define atomic64_sub_return_acquire(...) \
- __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return_release
-#define atomic64_sub_return_release(...) \
- __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return
-#define atomic64_sub_return(...) \
- __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_dec
-#define atomic64_dec(v) atomic64_sub(1, (v))
-#endif
-
-/* atomic64_dec_return_relaxed */
-#ifndef atomic64_dec_return_relaxed
-
-#ifndef atomic64_dec_return
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
-#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
-#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
-#else /* atomic64_dec_return */
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-#endif /* atomic64_dec_return */
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-#define atomic64_dec_return_acquire(...) \
- __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return_release
-#define atomic64_dec_return_release(...) \
- __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return
-#define atomic64_dec_return(...) \
- __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_dec_return_relaxed */
-
-
-/* atomic64_fetch_add_relaxed */
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-#define atomic64_fetch_add_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add_release
-#define atomic64_fetch_add_release(...) \
- __atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add
-#define atomic64_fetch_add(...) \
- __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_add_relaxed */
-
-/* atomic64_fetch_inc_relaxed */
-#ifndef atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v))
-#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v))
-#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v))
-#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v))
-#else /* atomic64_fetch_inc */
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-#define atomic64_fetch_inc_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc_release
-#define atomic64_fetch_inc_release(...) \
- __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(...) \
- __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_inc_relaxed */
-
-/* atomic64_fetch_sub_relaxed */
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-#define atomic64_fetch_sub_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub_release
-#define atomic64_fetch_sub_release(...) \
- __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub
-#define atomic64_fetch_sub(...) \
- __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_sub_relaxed */
-
-/* atomic64_fetch_dec_relaxed */
-#ifndef atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v))
-#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v))
-#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v))
-#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v))
-#else /* atomic64_fetch_dec */
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-#define atomic64_fetch_dec_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec_release
-#define atomic64_fetch_dec_release(...) \
- __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(...) \
- __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_dec_relaxed */
-
-/* atomic64_fetch_or_relaxed */
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-#define atomic64_fetch_or_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or_release
-#define atomic64_fetch_or_release(...) \
- __atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or
-#define atomic64_fetch_or(...) \
- __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_or_relaxed */
-
-/* atomic64_fetch_and_relaxed */
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-#define atomic64_fetch_and_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and_release
-#define atomic64_fetch_and_release(...) \
- __atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and
-#define atomic64_fetch_and(...) \
- __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_andnot
-#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-
-#ifndef atomic64_fetch_andnot
-#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
-#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
-#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
-#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
-#else /* atomic64_fetch_andnot */
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-#endif /* atomic64_fetch_andnot */
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-#define atomic64_fetch_andnot_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-#define atomic64_fetch_andnot_release(...) \
- __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot
-#define atomic64_fetch_andnot(...) \
- __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_andnot_relaxed */
-
-/* atomic64_fetch_xor_relaxed */
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-#define atomic64_fetch_xor_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor_release
-#define atomic64_fetch_xor_release(...) \
- __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor
-#define atomic64_fetch_xor(...) \
- __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_xor_relaxed */
-
-
-/* atomic64_xchg_relaxed */
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_relaxed atomic64_xchg
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-#define atomic64_xchg_acquire(...) \
- __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg_release
-#define atomic64_xchg_release(...) \
- __atomic_op_release(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg
-#define atomic64_xchg(...) \
- __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_xchg_relaxed */
-
-/* atomic64_cmpxchg_relaxed */
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-#define atomic64_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg_release
-#define atomic64_cmpxchg_release(...) \
- __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg
-#define atomic64_cmpxchg(...) \
- __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg
-
-#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
-({ \
- typeof(_po) __po = (_po); \
- typeof(*(_po)) __r, __o = *__po; \
- __r = atomic64_cmpxchg##type((_p), __o, (_n)); \
- if (unlikely(__r != __o)) \
- *__po = __r; \
- likely(__r == __o); \
-})
-
-#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
-#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic64_try_cmpxchg */
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-/**
- * atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns the original value of @v.
- */
-#ifndef atomic64_fetch_add_unless
-static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
- long long u)
-{
- long long c = atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#endif
-
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
-{
- return atomic64_fetch_add_unless(v, a, u) != u;
-}
-
-/**
- * atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-#ifndef atomic64_inc_not_zero
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#endif
-
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic64_inc_and_test
-static inline bool atomic64_inc_and_test(atomic64_t *v)
-{
- return atomic64_inc_return(v) == 0;
-}
-#endif
-
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#ifndef atomic64_dec_and_test
-static inline bool atomic64_dec_and_test(atomic64_t *v)
-{
- return atomic64_dec_return(v) == 0;
-}
-#endif
-
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic64_sub_and_test
-static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
-{
- return atomic64_sub_return(i, v) == 0;
-}
-#endif
-
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#ifndef atomic64_add_negative
-static inline bool atomic64_add_negative(long long i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-#endif
-
-#ifndef atomic64_inc_unless_negative
-static inline bool atomic64_inc_unless_negative(atomic64_t *v)
-{
- long long c = atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#endif
-
-#ifndef atomic64_dec_unless_positive
-static inline bool atomic64_dec_unless_positive(atomic64_t *v)
-{
- long long c = atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#endif
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic64_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic64 variable, v, was not decremented.
- */
-#ifndef atomic64_dec_if_positive
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
-{
- long long dec, c = atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#endif
-
-#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#include <linux/atomic-fallback.h>
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c31157135598..07e02d6df5ad 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -190,6 +190,7 @@ struct backing_dev_info {
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
+ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c28a47cbe355..f9b029180241 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -365,7 +365,7 @@ unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
rcu_read_lock();
/*
- * Paired with store_release in inode_switch_wb_work_fn() and
+ * Paired with store_release in inode_switch_wbs_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 53051f3d8f25..f111c780ef1d 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -4,15 +4,18 @@
*
* Common interface definitions for making balloon pages movable by compaction.
*
- * Despite being perfectly possible to perform ballooned pages migration, they
- * make a special corner case to compaction scans because balloon pages are not
- * enlisted at any LRU list like the other pages we do compact / migrate.
+ * Balloon page migration makes use of the general non-lru movable page
+ * feature.
+ *
+ * page->private is used to reference the responsible balloon device.
+ * page->mapping is used in context of non-lru page migration to reference
+ * the address space operations for page isolation/migration/compaction.
*
* As the page isolation scanning step a compaction thread does is a lockless
* procedure (from a page standpoint), it might bring some racy situations while
* performing balloon page compaction. In order to sort out these racy scenarios
* and safely perform balloon's page compaction and migration we must, always,
- * ensure following these three simple rules:
+ * ensure following these simple rules:
*
* i. when updating a balloon's page ->mapping element, strictly do it under
* the following lock order, independently of the far superior
@@ -21,19 +24,8 @@
* +--spin_lock_irq(&b_dev_info->pages_lock);
* ... page->mapping updates here ...
*
- * ii. before isolating or dequeueing a balloon page from the balloon device
- * pages list, the page reference counter must be raised by one and the
- * extra refcount must be dropped when the page is enqueued back into
- * the balloon device page list, thus a balloon page keeps its reference
- * counter raised only while it is under our special handling;
- *
- * iii. after the lockless scan step have selected a potential balloon page for
- * isolation, re-test the PageBalloon mark and the PagePrivate flag
- * under the proper page lock, to ensure isolating a valid balloon page
- * (not yet isolated, nor under release procedure)
- *
- * iv. isolation or dequeueing procedure must clear PagePrivate flag under
- * page lock together with removing page from balloon device page list.
+ * ii. isolation or dequeueing procedure must remove the page from balloon
+ * device page list under b_dev_info->pages_lock.
*
* The functions provided by this interface are placed to help on coping with
* the aforementioned balloon page corner case, as well as to ensure the simple
@@ -103,7 +95,7 @@ extern int balloon_page_migrate(struct address_space *mapping,
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
- __SetPageBalloon(page);
+ __SetPageOffline(page);
__SetPageMovable(page, balloon->inode->i_mapping);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
@@ -119,7 +111,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
*/
static inline void balloon_page_delete(struct page *page)
{
- __ClearPageBalloon(page);
+ __ClearPageOffline(page);
__ClearPageMovable(page);
set_page_private(page, 0);
/*
@@ -149,13 +141,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
- __SetPageBalloon(page);
+ __SetPageOffline(page);
list_add(&page->lru, &balloon->pages);
}
static inline void balloon_page_delete(struct page *page)
{
- __ClearPageBalloon(page);
+ __ClearPageOffline(page);
list_del(&page->lru);
}
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index ef61f3607e99..60b94b944e9f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -332,6 +332,8 @@ extern int bcma_arch_register_fallback_sprom(
struct ssb_sprom *out));
struct bcma_bus {
+ struct device *dev;
+
/* The MMIO area. */
void __iomem *mmio;
@@ -339,14 +341,7 @@ struct bcma_bus {
enum bcma_hosttype hosttype;
bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
- union {
- /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
- struct pci_dev *host_pci;
- /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
- struct sdio_func *host_sdio;
- /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
- struct platform_device *host_pdev;
- };
+ struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */
struct bcma_chipinfo chipinfo;
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 7cca5f859a90..f3c43519baa7 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -6,6 +6,7 @@
struct bcma_soc {
struct bcma_bus bus;
+ struct device *dev;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5c7e7f859a24..d66bf5f32610 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -287,7 +287,7 @@ enum req_opf {
REQ_OP_DISCARD = 3,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
- /* seset a zone write pointer */
+ /* reset a zone write pointer */
REQ_OP_ZONE_RESET = 6,
/* write the same sector many times */
REQ_OP_WRITE_SAME = 7,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 8804753805ac..7bb2d8de9f30 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
- return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
+ /*
+ * Tracing should ignore starting sector for passthrough requests and
+ * requests where starting sector didn't get set.
+ */
+ if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
+ return 0;
+ return blk_rq_pos(rq);
}
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 588dd5f0bd85..695b2a880d9a 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -78,7 +78,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp);
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags);
+ enum bpf_attach_type type);
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e734f163bd0b..a2132e09dc1c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -16,6 +16,7 @@
#include <linux/rbtree_latch.h>
#include <linux/numa.h>
#include <linux/wait.h>
+#include <linux/u64_stats_sync.h>
struct bpf_verifier_env;
struct perf_event;
@@ -72,14 +73,15 @@ struct bpf_map {
u32 value_size;
u32 max_entries;
u32 map_flags;
- u32 pages;
+ int spin_lock_off; /* >=0 valid offset, <0 error */
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
+ u32 pages;
bool unpriv_array;
- /* 55 bytes hole */
+ /* 51 bytes hole */
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
@@ -91,6 +93,36 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN];
};
+static inline bool map_value_has_spin_lock(const struct bpf_map *map)
+{
+ return map->spin_lock_off >= 0;
+}
+
+static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
+{
+ if (likely(!map_value_has_spin_lock(map)))
+ return;
+ *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
+ (struct bpf_spin_lock){};
+}
+
+/* copy everything but bpf_spin_lock */
+static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
+{
+ if (unlikely(map_value_has_spin_lock(map))) {
+ u32 off = map->spin_lock_off;
+
+ memcpy(dst, src, off);
+ memcpy(dst + off + sizeof(struct bpf_spin_lock),
+ src + off + sizeof(struct bpf_spin_lock),
+ map->value_size - off - sizeof(struct bpf_spin_lock));
+ } else {
+ memcpy(dst, src, map->value_size);
+ }
+}
+void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
+ bool lock_src);
+
struct bpf_offload_dev;
struct bpf_offloaded_map;
@@ -162,6 +194,8 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
+ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
+ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
};
/* type of values returned from helper functions */
@@ -171,6 +205,7 @@ enum bpf_return_type {
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
+ RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -224,6 +259,10 @@ enum bpf_reg_type {
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
+ PTR_TO_SOCK_COMMON, /* reg points to sock_common */
+ PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
+ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
+ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
};
/* The information passed from prog-specific *_is_valid_access
@@ -268,9 +307,15 @@ struct bpf_verifier_ops {
};
struct bpf_prog_offload_ops {
+ /* verifier basic callbacks */
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
+ /* verifier optimization callbacks (called after .finalize) */
+ int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+ int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
+ /* program management callbacks */
int (*prepare)(struct bpf_prog *prog);
int (*translate)(struct bpf_prog *prog);
void (*destroy)(struct bpf_prog *prog);
@@ -283,6 +328,7 @@ struct bpf_prog_offload {
void *dev_priv;
struct list_head offloads;
bool dev_state;
+ bool opt_failed;
void *jited_image;
u32 jited_len;
};
@@ -295,6 +341,12 @@ enum bpf_cgroup_storage_type {
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+struct bpf_prog_stats {
+ u64 cnt;
+ u64 nsecs;
+ struct u64_stats_sync syncp;
+};
+
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
@@ -344,6 +396,7 @@ struct bpf_prog_aux {
* main prog always has linfo_idx == 0
*/
u32 linfo_idx;
+ struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -397,6 +450,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
/* an array of programs to be executed under rcu_lock.
*
@@ -511,6 +567,7 @@ void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
extern int sysctl_unprivileged_bpf_disabled;
+extern int sysctl_bpf_stats_enabled;
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
@@ -725,8 +782,9 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
struct bpf_offload_dev *
-bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops);
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
@@ -869,7 +927,8 @@ extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
-
+extern const struct bpf_func_proto bpf_spin_lock_proto;
+extern const struct bpf_func_proto bpf_spin_unlock_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
/* Shared helpers among cBPF and eBPF. */
@@ -877,6 +936,9 @@ void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#if defined(CONFIG_NET)
+bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
@@ -885,6 +947,12 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_prog *prog,
u32 *target_size);
#else
+static inline bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
static inline bool bpf_sock_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
@@ -901,4 +969,31 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
}
#endif
+#ifdef CONFIG_INET
+bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+
+u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+#else
+static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+#endif /* CONFIG_INET */
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 44d9ab4809bd..08bf2f1fe553 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -6,9 +6,11 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter)
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act)
BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act)
BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
+#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr)
+#endif
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 27b74947cd2b..69f7a3449eda 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -148,6 +148,7 @@ struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
u32 curframe;
+ u32 active_spin_lock;
bool speculative;
};
@@ -172,6 +173,7 @@ struct bpf_verifier_state_list {
#define BPF_ALU_SANITIZE_SRC 1U
#define BPF_ALU_SANITIZE_DST 2U
#define BPF_ALU_NEG_VALUE (1U << 2)
+#define BPF_ALU_NON_POINTER (1U << 3)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
@@ -186,6 +188,7 @@ struct bpf_insn_aux_data {
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
u8 alu_state; /* used in combination with alu_limit */
+ unsigned int orig_idx; /* original instruction index */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -264,5 +267,10 @@ int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
+void
+bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+void
+bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index f02cee0225d4..d815622cd31e 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -3,13 +3,22 @@
#define _LINUX_BPFILTER_H
#include <uapi/linux/bpfilter.h>
+#include <linux/umh.h>
struct sock;
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
-extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
- char __user *optval,
- unsigned int optlen, bool is_set);
+struct bpfilter_umh_ops {
+ struct umh_info info;
+ /* since ip_getsockopt() can run in parallel, serialize access to umh */
+ struct mutex lock;
+ int (*sockopt)(struct sock *sk, int optname,
+ char __user *optval,
+ unsigned int optlen, bool is_set);
+ int (*start)(void);
+ bool stop;
+};
+extern struct bpfilter_umh_ops bpfilter_ops;
#endif
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 12502e25e767..455d31b55828 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -50,6 +50,7 @@ u32 btf_id(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
+int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
diff --git a/include/linux/capability.h b/include/linux/capability.h
index b769330e9380..ecce0f43c73a 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -210,6 +210,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
+extern bool ns_capable_setid(struct user_namespace *ns, int cap);
#else
static inline bool has_capability(struct task_struct *t, int cap)
{
@@ -241,6 +242,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
{
return true;
}
+static inline bool ns_capable_setid(struct user_namespace *ns, int cap)
+{
+ return true;
+}
#endif /* CONFIG_MULTIUSER */
extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 68bb09c29ce8..a420c07904bc 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -35,6 +35,7 @@
#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
+#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -53,7 +54,7 @@ struct ceph_options {
unsigned long osd_request_timeout; /* jiffies */
/*
- * any type that can't be simply compared or doesn't need need
+ * any type that can't be simply compared or doesn't need
* to be compared should go beyond this point,
* ceph_compare_options() should be updated accordingly
*/
@@ -281,7 +282,8 @@ extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
void *private);
-int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
+ bool show_all);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7a2af5034278..2294f963dab7 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -354,7 +354,6 @@ struct ceph_osd_client {
struct rb_root linger_map_checks;
atomic_t num_requests;
atomic_t num_homeless;
- bool abort_on_full; /* abort w/ ENOSPC when full */
int abort_err;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8fcbae1b8db0..1c70803e9f77 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -32,6 +32,7 @@ struct kernfs_node;
struct kernfs_ops;
struct kernfs_open_file;
struct seq_file;
+struct poll_table_struct;
#define MAX_CGROUP_TYPE_NAMELEN 32
#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -574,6 +575,9 @@ struct cftype {
ssize_t (*write)(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lockdep_key;
#endif
@@ -602,7 +606,7 @@ struct cgroup_subsys {
void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
- void (*free)(struct task_struct *task);
+ void (*release)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
bool early_init:1;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 9968332cceed..81f58b4a5418 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -121,6 +121,7 @@ extern int cgroup_can_fork(struct task_struct *p);
extern void cgroup_cancel_fork(struct task_struct *p);
extern void cgroup_post_fork(struct task_struct *p);
void cgroup_exit(struct task_struct *p);
+void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p);
int cgroup_init_early(void);
@@ -697,6 +698,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
static inline void cgroup_cancel_fork(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {}
static inline void cgroup_exit(struct task_struct *p) {}
+static inline void cgroup_release(struct task_struct *p) {}
static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 68250a57aace..9569e7c786d3 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -88,14 +88,13 @@ extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
extern int sysctl_extfrag_threshold;
-extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags,
- const struct alloc_context *ac, enum compact_priority prio);
+ const struct alloc_context *ac, enum compact_priority prio,
+ struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags, int classzone_idx);
@@ -227,8 +226,8 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i
#endif /* CONFIG_COMPACTION */
-#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
struct node;
+#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 056be0d03722..ebddcb6cfcf8 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -132,37 +132,6 @@ struct compat_tms {
compat_clock_t tms_cstime;
};
-struct compat_timex {
- compat_uint_t modes;
- compat_long_t offset;
- compat_long_t freq;
- compat_long_t maxerror;
- compat_long_t esterror;
- compat_int_t status;
- compat_long_t constant;
- compat_long_t precision;
- compat_long_t tolerance;
- struct old_timeval32 time;
- compat_long_t tick;
- compat_long_t ppsfreq;
- compat_long_t jitter;
- compat_int_t shift;
- compat_long_t stabil;
- compat_long_t jitcnt;
- compat_long_t calcnt;
- compat_long_t errcnt;
- compat_long_t stbcnt;
- compat_int_t tai;
-
- compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
- compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
- compat_int_t:32; compat_int_t:32; compat_int_t:32;
-};
-
-struct timex;
-int compat_get_timex(struct timex *, const struct compat_timex __user *);
-int compat_put_timex(struct compat_timex __user *, const struct timex *);
-
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
typedef struct {
@@ -551,11 +520,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
u32 __user *iocb);
-asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
- compat_long_t min_nr,
- compat_long_t nr,
- struct io_event __user *events,
- struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
@@ -648,7 +612,7 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
compat_loff_t __user *offset, compat_size_t count);
/* fs/select.c */
-asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
+asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
struct old_timespec32 __user *tsp,
@@ -658,7 +622,7 @@ asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *exp,
struct __kernel_timespec __user *tsp,
void __user *sig);
-asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
+asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds,
unsigned int nfds,
struct old_timespec32 __user *tsp,
const compat_sigset_t __user *sigmask,
@@ -688,19 +652,6 @@ asmlinkage long compat_sys_newfstat(unsigned int fd,
/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */
-/* fs/timerfd.c */
-asmlinkage long compat_sys_timerfd_gettime(int ufd,
- struct old_itimerspec32 __user *otmr);
-asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
- const struct old_itimerspec32 __user *utmr,
- struct old_itimerspec32 __user *otmr);
-
-/* fs/utimes.c */
-asmlinkage long compat_sys_utimensat(unsigned int dfd,
- const char __user *filename,
- struct old_timespec32 __user *t,
- int flags);
-
/* kernel/exit.c */
asmlinkage long compat_sys_waitid(int, compat_pid_t,
struct compat_siginfo __user *, int,
@@ -709,9 +660,6 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t,
/* kernel/futex.c */
-asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
- struct old_timespec32 __user *utime, u32 __user *uaddr2,
- u32 val3);
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
compat_size_t len);
@@ -719,10 +667,6 @@ asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
-/* kernel/hrtimer.c */
-asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp,
- struct old_timespec32 __user *rmtp);
-
/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
struct compat_itimerval __user *it);
@@ -740,20 +684,6 @@ asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
-asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
- struct old_itimerspec32 __user *setting);
-asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct old_itimerspec32 __user *new,
- struct old_itimerspec32 __user *old);
-asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
- struct old_timespec32 __user *tp);
-asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
- struct old_timespec32 __user *tp);
-asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
- struct old_timespec32 __user *tp);
-asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- struct old_timespec32 __user *rqtp,
- struct old_timespec32 __user *rmtp);
/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
@@ -766,8 +696,6 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
-asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct old_timespec32 __user *interval);
/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
@@ -785,7 +713,7 @@ asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
compat_size_t sigsetsize);
-asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
+asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese,
struct compat_siginfo __user *uinfo,
struct old_timespec32 __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese,
@@ -808,7 +736,6 @@ asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
/* kernel/timer.c */
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
@@ -817,14 +744,6 @@ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
-asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
- const char __user *u_msg_ptr,
- compat_size_t msg_len, unsigned int msg_prio,
- const struct old_timespec32 __user *u_abs_timeout);
-asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
- char __user *u_msg_ptr,
- compat_size_t msg_len, unsigned int __user *u_msg_prio,
- const struct old_timespec32 __user *u_abs_timeout);
asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
const struct compat_sigevent __user *u_notification);
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
@@ -840,8 +759,6 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct old_timespec32 __user *timeout);
/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
@@ -899,7 +816,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
struct __kernel_timespec __user *timeout);
-asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_wait4(compat_pid_t pid,
@@ -910,8 +827,6 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
-asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
- struct compat_timex __user *tp);
asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags);
asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
@@ -952,8 +867,6 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
/* __ARCH_WANT_SYSCALL_NO_AT */
asmlinkage long compat_sys_open(const char __user *filename, int flags,
umode_t mode);
-asmlinkage long compat_sys_utimes(const char __user *filename,
- struct old_timeval32 __user *t);
/* __ARCH_WANT_SYSCALL_NO_FLAGS */
asmlinkage long compat_sys_signalfd(int ufd,
@@ -967,12 +880,6 @@ asmlinkage long compat_sys_newlstat(const char __user *filename,
struct compat_stat __user *statbuf);
/* __ARCH_WANT_SYSCALL_DEPRECATED */
-asmlinkage long compat_sys_time(old_time32_t __user *tloc);
-asmlinkage long compat_sys_utime(const char __user *filename,
- struct old_utimbuf32 __user *t);
-asmlinkage long compat_sys_futimesat(unsigned int dfd,
- const char __user *filename,
- struct old_timeval32 __user *t);
asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
struct old_timeval32 __user *tvp);
@@ -1007,9 +914,6 @@ asmlinkage long compat_sys_sigaction(int sig,
struct compat_old_sigaction __user *oact);
#endif
-/* obsolete: kernel/time/time.c */
-asmlinkage long compat_sys_stime(old_time32_t __user *tptr);
-
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 39f668d5066b..333a6695a918 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -3,9 +3,8 @@
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
-/* Some compiler specific definitions are overwritten here
- * for Clang compiler
- */
+/* Compiler specific definitions for Clang compiler */
+
#define uninitialized_var(x) x = *(&(x))
/* same as gcc, this was present in clang-2.6 so we can assume it works
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 5776da43da97..e8579412ad21 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -58,17 +58,13 @@
(typeof(ptr)) (__ptr + (off)); \
})
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) \
- __asm__ ("" : "=r" (var) : "0" (var))
-
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 517bd14e1222..b17f3cd18334 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -5,9 +5,7 @@
#ifdef __ECC
-/* Some compiler specific definitions are overwritten here
- * for Intel ECC compiler
- */
+/* Compiler specific definitions for Intel ECC compiler */
#include <asm/intrinsics.h>
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index fc5004a4b07d..445348facea9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
#ifndef OPTIMIZER_HIDE_VAR
-#define OPTIMIZER_HIDE_VAR(var) barrier()
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#endif
/* Not-quite-unique ID. */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 19f32b0c29af..6b318efd8a74 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -34,6 +34,7 @@
#ifndef __has_attribute
# define __has_attribute(x) __GCC4_has_attribute_##x
# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
+# define __GCC4_has_attribute___copy__ 0
# define __GCC4_has_attribute___designated_init__ 0
# define __GCC4_has_attribute___externally_visible__ 1
# define __GCC4_has_attribute___noclone__ 1
@@ -101,6 +102,19 @@
#define __attribute_const__ __attribute__((__const__))
/*
+ * Optional: only supported since gcc >= 9
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
+ */
+#if __has_attribute(__copy__)
+# define __copy(symbol) __attribute__((__copy__(symbol)))
+#else
+# define __copy(symbol)
+#endif
+
+/*
* Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
* attribute warnings entirely and for good") for more information.
*
diff --git a/include/linux/component.h b/include/linux/component.h
index e71fbbbc74e2..16de18f473d7 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -4,16 +4,38 @@
#include <linux/stddef.h>
+
struct device;
+/**
+ * struct component_ops - callbacks for component drivers
+ *
+ * Components are registered with component_add() and unregistered with
+ * component_del().
+ */
struct component_ops {
+ /**
+ * @bind:
+ *
+ * Called through component_bind_all() when the aggregate driver is
+ * ready to bind the overall driver.
+ */
int (*bind)(struct device *comp, struct device *master,
void *master_data);
+ /**
+ * @unbind:
+ *
+ * Called through component_unbind_all() when the aggregate driver is
+ * ready to bind the overall driver, or when component_bind_all() fails
+ * part-ways through and needs to unbind some already bound components.
+ */
void (*unbind)(struct device *comp, struct device *master,
void *master_data);
};
int component_add(struct device *, const struct component_ops *);
+int component_add_typed(struct device *dev, const struct component_ops *ops,
+ int subcomponent);
void component_del(struct device *, const struct component_ops *);
int component_bind_all(struct device *master, void *master_data);
@@ -21,8 +43,42 @@ void component_unbind_all(struct device *master, void *master_data);
struct master;
+/**
+ * struct component_master_ops - callback for the aggregate driver
+ *
+ * Aggregate drivers are registered with component_master_add_with_match() and
+ * unregistered with component_master_del().
+ */
struct component_master_ops {
+ /**
+ * @bind:
+ *
+ * Called when all components or the aggregate driver, as specified in
+ * the match list passed to component_master_add_with_match(), are
+ * ready. Usually there are 3 steps to bind an aggregate driver:
+ *
+ * 1. Allocate a structure for the aggregate driver.
+ *
+ * 2. Bind all components to the aggregate driver by calling
+ * component_bind_all() with the aggregate driver structure as opaque
+ * pointer data.
+ *
+ * 3. Register the aggregate driver with the subsystem to publish its
+ * interfaces.
+ *
+ * Note that the lifetime of the aggregate driver does not align with
+ * any of the underlying &struct device instances. Therefore devm cannot
+ * be used and all resources acquired or allocated in this callback must
+ * be explicitly released in the @unbind callback.
+ */
int (*bind)(struct device *master);
+ /**
+ * @unbind:
+ *
+ * Called when either the aggregate driver, using
+ * component_master_del(), or one of its components, using
+ * component_del(), is unregistered.
+ */
void (*unbind)(struct device *master);
};
@@ -37,7 +93,27 @@ void component_match_add_release(struct device *master,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data);
+void component_match_add_typed(struct device *master,
+ struct component_match **matchptr,
+ int (*compare_typed)(struct device *, int, void *), void *compare_data);
+/**
+ * component_match_add - add a component match entry
+ * @master: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @compare: compare function to match against all components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @master
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions.
+ *
+ * See also component_match_add_release() and component_match_add_typed().
+ */
static inline void component_match_add(struct device *master,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index ab137f97ecbd..ed798e114663 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -119,7 +119,7 @@ struct vc_data {
unsigned int vc_s_blink : 1;
unsigned int vc_s_reverse : 1;
/* misc */
- unsigned int vc_ques : 1;
+ unsigned int vc_priv : 3;
unsigned int vc_need_wrap : 1;
unsigned int vc_can_do_color : 1;
unsigned int vc_report_mouse : 2;
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 46c67a764877..7b87965f7a65 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -154,8 +154,9 @@ struct coresight_connection {
* @orphan: true if the component has connections that haven't been linked.
* @enable: 'true' if component is currently part of an active path.
* @activated: 'true' only if a _sink_ has been activated. A sink can be
- activated but not yet enabled. Enabling for a _sink_
- happens when a source has been selected for that it.
+ * activated but not yet enabled. Enabling for a _sink_
+ * appens when a source has been selected for that it.
+ * @ea: Device attribute for sink representation under PMU directory.
*/
struct coresight_device {
struct coresight_connection *conns;
@@ -168,7 +169,9 @@ struct coresight_device {
atomic_t *refcnt;
bool orphan;
bool enable; /* true only if configured as part of a path */
+ /* sink specific fields */
bool activated; /* true only if a sink is part of a path */
+ struct dev_ext_attribute *ea;
};
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 218df7f4d3e1..5041357d0297 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
-extern void cpu_smt_check_topology_early(void);
extern void cpu_smt_check_topology(void);
#else
# define cpu_smt_control (CPU_SMT_ENABLED)
static inline void cpu_smt_disable(bool force) { }
-static inline void cpu_smt_check_topology_early(void) { }
static inline void cpu_smt_check_topology(void) { }
#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c86d6d8bdfed..b160e98076e3 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -151,6 +151,9 @@ struct cpufreq_policy {
/* For cpufreq driver's internal use */
void *driver_data;
+
+ /* Pointer to the cooling device if used for thermal mitigation */
+ struct thermal_cooling_device *cdev;
};
/* Only for ACPI */
@@ -254,20 +257,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name)
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
-
#define define_one_global_ro(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
@@ -330,6 +325,8 @@ struct cpufreq_driver {
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
+ int (*online)(struct cpufreq_policy *policy);
+ int (*offline)(struct cpufreq_policy *policy);
int (*exit)(struct cpufreq_policy *policy);
void (*stop_cpu)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
@@ -346,14 +343,15 @@ struct cpufreq_driver {
};
/* flags */
-#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
- all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
- kernel "constants" aren't
- affected by frequency
- transitions */
-#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
- speed mismatches */
+
+/* driver isn't removed even if all ->init() calls failed */
+#define CPUFREQ_STICKY BIT(0)
+
+/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
+#define CPUFREQ_CONST_LOOPS BIT(1)
+
+/* don't warn on suspend/resume speed mismatches */
+#define CPUFREQ_PM_NO_WARN BIT(2)
/*
* This should be set by platforms having multiple clock-domains, i.e.
@@ -361,14 +359,14 @@ struct cpufreq_driver {
* be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
* governor with different tunables for different clusters.
*/
-#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
/*
* Driver will do POSTCHANGE notifications from outside of their ->target()
* routine and so must set cpufreq_driver->flags with this flag, so that core
* can handle them specially.
*/
-#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
+#define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
/*
* Set by drivers which want cpufreq core to check if CPU is running at a
@@ -377,13 +375,19 @@ struct cpufreq_driver {
* from the table. And if that fails, we will stop further boot process by
* issuing a BUG_ON().
*/
-#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
+#define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
/*
* Set by drivers to disallow use of governors with "dynamic_switching" flag
* set.
*/
-#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6)
+#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
+
+/*
+ * Set by drivers that want the core to automatically register the cpufreq
+ * driver as a thermal cooling device.
+ */
+#define CPUFREQ_IS_COOLING_DEV BIT(7)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index fd586d0301e7..e78281d07b70 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -121,6 +121,7 @@ enum cpuhp_state {
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
+ CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4dff74f48d4b..3b39472324a3 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -69,11 +69,9 @@ struct cpuidle_state {
/* Idle State Flags */
#define CPUIDLE_FLAG_NONE (0x00)
-#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */
-#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
-#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
-
-#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
+#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
+#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4907c9df86b3..ddd45bb74887 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -15,7 +15,6 @@
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/key.h>
-#include <linux/selinux.h>
#include <linux/atomic.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 902ec171fc6d..f2565a103158 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -118,7 +118,7 @@
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
-#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
+#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
@@ -188,14 +188,6 @@ struct blkcipher_desc {
u32 flags;
};
-struct cipher_desc {
- struct crypto_tfm *tfm;
- void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
- const u8 *src, unsigned int nbytes);
- void *info;
-};
-
/**
* DOC: Block Cipher Algorithm Definitions
*
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h
index 05b97144d342..28e6cf1356da 100644
--- a/include/linux/davinci_emac.h
+++ b/include/linux/davinci_emac.h
@@ -46,5 +46,4 @@ enum {
EMAC_VERSION_2, /* DM646x */
};
-void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index ef4b70f64f33..60996e64c579 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -62,9 +62,10 @@ extern const struct qstr slash_name;
struct dentry_stat_t {
long nr_dentry;
long nr_unused;
- long age_limit; /* age in seconds */
- long want_pages; /* pages requested by system */
- long dummy[2];
+ long age_limit; /* age in seconds */
+ long want_pages; /* pages requested by system */
+ long nr_negative; /* # of unused negative dentries */
+ long dummy; /* Reserved for future use */
};
extern struct dentry_stat_t dentry_stat;
diff --git a/include/linux/device.h b/include/linux/device.h
index 6cb4640b6160..54b586105179 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -341,6 +341,7 @@ struct device *driver_find_device(struct device_driver *drv,
struct device *start, void *data,
int (*match)(struct device *dev, void *data));
+void driver_deferred_probe_add(struct device *dev);
int driver_deferred_probe_check_state(struct device *dev);
/**
@@ -757,11 +758,17 @@ struct device_dma_parameters {
/**
* struct device_connection - Device Connection Descriptor
+ * @fwnode: The device node of the connected device
* @endpoint: The names of the two devices connected together
* @id: Unique identifier for the connection
* @list: List head, private, for internal use only
+ *
+ * NOTE: @fwnode is not used together with @endpoint. @fwnode is used when
+ * platform firmware defines the connection. When the connection is registered
+ * with device_connection_add() @endpoint is used instead.
*/
struct device_connection {
+ struct fwnode_handle *fwnode;
const char *endpoint[2];
const char *id;
struct list_head list;
@@ -827,12 +834,14 @@ enum device_link_state {
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
* AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
+ * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
#define DL_FLAG_PM_RUNTIME BIT(2)
#define DL_FLAG_RPM_ACTIVE BIT(3)
#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
+#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
/**
* struct device_link - Device link representation.
@@ -845,6 +854,7 @@ enum device_link_state {
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @kref: Count repeated addition of the same link.
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @supplier_preactivated: Supplier has been made active before consumer probe.
*/
struct device_link {
struct device *supplier;
@@ -853,11 +863,12 @@ struct device_link {
struct list_head c_node;
enum device_link_state status;
u32 flags;
- bool rpm_active;
+ refcount_t rpm_active;
struct kref kref;
#ifdef CONFIG_SRCU
struct rcu_head rcu_head;
#endif
+ bool supplier_preactivated; /* Owned by consumer probe. */
};
/**
@@ -985,7 +996,7 @@ struct device {
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
- dev_set/get_drvdata */
+ dev_set_drvdata/dev_get_drvdata */
struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
@@ -1035,7 +1046,6 @@ struct device {
spinlock_t devres_lock;
struct list_head devres_head;
- struct klist_node knode_class;
struct class *class;
const struct attribute_group **groups; /* optional groups */
@@ -1095,7 +1105,7 @@ static inline void set_dev_node(struct device *dev, int node)
#else
static inline int dev_to_node(struct device *dev)
{
- return -1;
+ return NUMA_NO_NODE;
}
static inline void set_dev_node(struct device *dev, int node)
{
@@ -1165,6 +1175,16 @@ static inline bool device_async_suspend_enabled(struct device *dev)
return !!dev->power.async_suspend;
}
+static inline bool device_pm_not_required(struct device *dev)
+{
+ return dev->power.no_pm;
+}
+
+static inline void device_set_pm_not_required(struct device *dev)
+{
+ dev->power.no_pm = true;
+}
+
static inline void dev_pm_syscore_device(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
@@ -1382,28 +1402,28 @@ void device_link_remove(void *consumer, struct device *supplier);
#ifdef CONFIG_PRINTK
-__printf(3, 0)
+__printf(3, 0) __cold
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args);
-__printf(3, 4)
+__printf(3, 4) __cold
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
-__printf(3, 4)
+__printf(3, 4) __cold
void dev_printk(const char *level, const struct device *dev,
const char *fmt, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void _dev_emerg(const struct device *dev, const char *fmt, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void _dev_alert(const struct device *dev, const char *fmt, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void _dev_crit(const struct device *dev, const char *fmt, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void _dev_err(const struct device *dev, const char *fmt, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void _dev_warn(const struct device *dev, const char *fmt, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void _dev_notice(const struct device *dev, const char *fmt, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void _dev_info(const struct device *dev, const char *fmt, ...);
#else
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index cef2127e1d70..f6ded992c183 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -717,15 +717,6 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#endif
-/*
- * Please always use dma_alloc_coherent instead as it already zeroes the memory!
- */
-static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return dma_alloc_coherent(dev, size, dma_handle, flag);
-}
-
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 45ff763fba76..ae96ea145ae3 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -48,7 +48,20 @@ typedef u16 efi_char16_t; /* UNICODE character */
typedef u64 efi_physical_addr_t;
typedef void *efi_handle_t;
-typedef guid_t efi_guid_t;
+/*
+ * The UEFI spec and EDK2 reference implementation both define EFI_GUID as
+ * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment
+ * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM),
+ * this means that firmware services invoked by the kernel may assume that
+ * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that
+ * do not tolerate misalignment. So let's set the minimum alignment to 32 bits.
+ *
+ * Note that the UEFI spec as well as some comments in the EDK2 code base
+ * suggest that EFI_GUID should be 64-bit aligned, but this appears to be
+ * a mistake, given that no code seems to exist that actually enforces that
+ * or relies on it.
+ */
+typedef guid_t efi_guid_t __aligned(__alignof__(u32));
#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
@@ -1198,8 +1211,6 @@ static inline bool efi_enabled(int feature)
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
extern bool efi_is_table_address(unsigned long phys_addr);
-
-extern int efi_apply_persistent_mem_reservations(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1218,11 +1229,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
-
-static inline int efi_apply_persistent_mem_reservations(void)
-{
- return 0;
-}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1706,19 +1712,19 @@ extern int efi_tpm_eventlog_init(void);
* fault happened while executing an efi runtime service.
*/
enum efi_rts_ids {
- NONE,
- GET_TIME,
- SET_TIME,
- GET_WAKEUP_TIME,
- SET_WAKEUP_TIME,
- GET_VARIABLE,
- GET_NEXT_VARIABLE,
- SET_VARIABLE,
- QUERY_VARIABLE_INFO,
- GET_NEXT_HIGH_MONO_COUNT,
- RESET_SYSTEM,
- UPDATE_CAPSULE,
- QUERY_CAPSULE_CAPS,
+ EFI_NONE,
+ EFI_GET_TIME,
+ EFI_SET_TIME,
+ EFI_GET_WAKEUP_TIME,
+ EFI_SET_WAKEUP_TIME,
+ EFI_GET_VARIABLE,
+ EFI_GET_NEXT_VARIABLE,
+ EFI_SET_VARIABLE,
+ EFI_QUERY_VARIABLE_INFO,
+ EFI_GET_NEXT_HIGH_MONO_COUNT,
+ EFI_RESET_SYSTEM,
+ EFI_UPDATE_CAPSULE,
+ EFI_QUERY_CAPSULE_CAPS,
};
/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2c0af7b00715..e2f3b21cd72a 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -44,6 +44,7 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
const unsigned char *haddr);
+__be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index afd9596ce636..e6ebc9761822 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -98,10 +98,6 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
-/* number of link mode bits/ulongs handled internally by kernel */
-#define __ETHTOOL_LINK_MODE_MASK_NBITS \
- (__ETHTOOL_LINK_MODE_LAST + 1)
-
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -400,4 +396,19 @@ struct ethtool_ops {
void (*get_ethtool_phy_stats)(struct net_device *,
struct ethtool_stats *, u64 *);
};
+
+struct ethtool_rx_flow_rule {
+ struct flow_rule *rule;
+ unsigned long priv[0];
+};
+
+struct ethtool_rx_flow_spec_input {
+ const struct ethtool_rx_flow_spec *fs;
+ u32 rss_ctx;
+};
+
+struct ethtool_rx_flow_rule *
+ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
+void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
+
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 9e2142795335..b79fa9bb7359 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -19,7 +19,7 @@
FAN_CLASS_PRE_CONTENT)
#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \
- FAN_REPORT_TID | \
+ FAN_REPORT_TID | FAN_REPORT_FID | \
FAN_CLOEXEC | FAN_NONBLOCK | \
FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
@@ -35,10 +35,28 @@
FAN_MARK_IGNORED_SURV_MODIFY | \
FAN_MARK_FLUSH)
-/* Events that user can request to be notified on */
-#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \
+/*
+ * Events that can be reported with data type FSNOTIFY_EVENT_PATH.
+ * Note that FAN_MODIFY can also be reported with data type
+ * FSNOTIFY_EVENT_INODE.
+ */
+#define FANOTIFY_PATH_EVENTS (FAN_ACCESS | FAN_MODIFY | \
FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC)
+/*
+ * Directory entry modification events - reported only to directory
+ * where entry is modified and not to a watching parent.
+ */
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
+
+/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
+#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF)
+
+/* Events that user can request to be notified on */
+#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
+ FANOTIFY_INODE_EVENTS)
+
/* Events that require a permission response from user */
#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
FAN_OPEN_EXEC_PERM)
@@ -49,7 +67,7 @@
/* Events that may be reported to user */
#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \
FANOTIFY_PERM_EVENTS | \
- FAN_Q_OVERFLOW)
+ FAN_Q_OVERFLOW | FAN_ONDIR)
#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
FANOTIFY_EVENT_FLAGS)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 7cdd31a69719..f52ef0ad6781 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info);
extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
+extern bool fb_center_logo;
extern struct class *fb_class;
#define for_each_registered_fb(i) \
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 27dc7a60693e..d019df946cb2 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -12,7 +12,7 @@
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
-#define force_o_largefile() (BITS_PER_LONG != 32)
+#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T))
#endif
#if BITS_PER_LONG == 32
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ad106d845b22..6074aa064b54 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -277,6 +277,26 @@ struct sock_reuseport;
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
@@ -513,7 +533,24 @@ struct sk_filter {
struct bpf_prog *prog;
};
-#define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi)
+DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
+
+#define BPF_PROG_RUN(prog, ctx) ({ \
+ u32 ret; \
+ cant_sleep(); \
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
+ struct bpf_prog_stats *stats; \
+ u64 start = sched_clock(); \
+ ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
+ stats = this_cpu_ptr(prog->aux->stats); \
+ u64_stats_update_begin(&stats->syncp); \
+ stats->cnt++; \
+ stats->nsecs += sched_clock() - start; \
+ u64_stats_update_end(&stats->syncp); \
+ } else { \
+ ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
+ } \
+ ret; })
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
@@ -591,8 +628,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data;
}
-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
@@ -611,15 +648,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res;
}
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ u32 res;
+
+ preempt_disable();
+ res = __bpf_prog_run_save_cb(prog, skb);
+ preempt_enable();
+ return res;
+}
+
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
+ u32 res;
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- return BPF_PROG_RUN(prog, skb);
+ preempt_disable();
+ res = BPF_PROG_RUN(prog, skb);
+ preempt_enable();
+ return res;
}
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
@@ -729,6 +781,7 @@ void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
+struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);
@@ -778,6 +831,7 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
void bpf_clear_redirect_map(struct bpf_map *map);
@@ -859,7 +913,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
-
+u64 bpf_jit_alloc_exec_limit(void);
+void *bpf_jit_alloc_exec(unsigned long size);
+void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
@@ -951,6 +1007,7 @@ bpf_address_lookup(unsigned long addr, unsigned long *size,
void bpf_prog_kallsyms_add(struct bpf_prog *fp);
void bpf_prog_kallsyms_del(struct bpf_prog *fp);
+void bpf_get_prog_name(const struct bpf_prog *prog, char *sym);
#else /* CONFIG_BPF_JIT */
@@ -1006,6 +1063,12 @@ static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}
+
+static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
+{
+ sym[0] = '\0';
+}
+
#endif /* CONFIG_BPF_JIT */
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h
index e21c49aba92f..031dd4d3c766 100644
--- a/include/linux/firmware/imx/svc/misc.h
+++ b/include/linux/firmware/imx/svc/misc.h
@@ -52,4 +52,7 @@ int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 *val);
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr);
+
#endif /* _SC_MISC_API_H */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 3c3c28eff56a..642dab10f65d 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -28,12 +28,35 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
#define PM_GET_TRUSTZONE_VERSION 0xa03
+#define PM_SET_SUSPEND_MODE 0xa02
+#define GET_CALLBACK_DATA 0xa01
/* Number of 32bits values in payload */
#define PAYLOAD_ARG_CNT 4U
+/* Number of arguments for a callback */
+#define CB_ARG_CNT 4
+
+/* Payload size (consists of callback API ID + arguments) */
+#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
+
+#define ZYNQMP_PM_MAX_QOS 100U
+
+/* Node capabilities */
+#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
+#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
+#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
+#define ZYNQMP_PM_CAPABILITY_POWER 0x8U
+
enum pm_api_id {
PM_GET_API_VERSION = 1,
+ PM_REQUEST_NODE = 13,
+ PM_RELEASE_NODE,
+ PM_SET_REQUIREMENT,
+ PM_RESET_ASSERT = 17,
+ PM_RESET_GET_STATUS,
+ PM_PM_INIT_FINALIZE = 21,
+ PM_GET_CHIPID = 24,
PM_IOCTL = 34,
PM_QUERY_DATA,
PM_CLOCK_ENABLE,
@@ -75,6 +98,149 @@ enum pm_query_id {
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
};
+enum zynqmp_pm_reset_action {
+ PM_RESET_ACTION_RELEASE,
+ PM_RESET_ACTION_ASSERT,
+ PM_RESET_ACTION_PULSE,
+};
+
+enum zynqmp_pm_reset {
+ ZYNQMP_PM_RESET_START = 1000,
+ ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
+ ZYNQMP_PM_RESET_PCIE_BRIDGE,
+ ZYNQMP_PM_RESET_PCIE_CTRL,
+ ZYNQMP_PM_RESET_DP,
+ ZYNQMP_PM_RESET_SWDT_CRF,
+ ZYNQMP_PM_RESET_AFI_FM5,
+ ZYNQMP_PM_RESET_AFI_FM4,
+ ZYNQMP_PM_RESET_AFI_FM3,
+ ZYNQMP_PM_RESET_AFI_FM2,
+ ZYNQMP_PM_RESET_AFI_FM1,
+ ZYNQMP_PM_RESET_AFI_FM0,
+ ZYNQMP_PM_RESET_GDMA,
+ ZYNQMP_PM_RESET_GPU_PP1,
+ ZYNQMP_PM_RESET_GPU_PP0,
+ ZYNQMP_PM_RESET_GPU,
+ ZYNQMP_PM_RESET_GT,
+ ZYNQMP_PM_RESET_SATA,
+ ZYNQMP_PM_RESET_ACPU3_PWRON,
+ ZYNQMP_PM_RESET_ACPU2_PWRON,
+ ZYNQMP_PM_RESET_ACPU1_PWRON,
+ ZYNQMP_PM_RESET_ACPU0_PWRON,
+ ZYNQMP_PM_RESET_APU_L2,
+ ZYNQMP_PM_RESET_ACPU3,
+ ZYNQMP_PM_RESET_ACPU2,
+ ZYNQMP_PM_RESET_ACPU1,
+ ZYNQMP_PM_RESET_ACPU0,
+ ZYNQMP_PM_RESET_DDR,
+ ZYNQMP_PM_RESET_APM_FPD,
+ ZYNQMP_PM_RESET_SOFT,
+ ZYNQMP_PM_RESET_GEM0,
+ ZYNQMP_PM_RESET_GEM1,
+ ZYNQMP_PM_RESET_GEM2,
+ ZYNQMP_PM_RESET_GEM3,
+ ZYNQMP_PM_RESET_QSPI,
+ ZYNQMP_PM_RESET_UART0,
+ ZYNQMP_PM_RESET_UART1,
+ ZYNQMP_PM_RESET_SPI0,
+ ZYNQMP_PM_RESET_SPI1,
+ ZYNQMP_PM_RESET_SDIO0,
+ ZYNQMP_PM_RESET_SDIO1,
+ ZYNQMP_PM_RESET_CAN0,
+ ZYNQMP_PM_RESET_CAN1,
+ ZYNQMP_PM_RESET_I2C0,
+ ZYNQMP_PM_RESET_I2C1,
+ ZYNQMP_PM_RESET_TTC0,
+ ZYNQMP_PM_RESET_TTC1,
+ ZYNQMP_PM_RESET_TTC2,
+ ZYNQMP_PM_RESET_TTC3,
+ ZYNQMP_PM_RESET_SWDT_CRL,
+ ZYNQMP_PM_RESET_NAND,
+ ZYNQMP_PM_RESET_ADMA,
+ ZYNQMP_PM_RESET_GPIO,
+ ZYNQMP_PM_RESET_IOU_CC,
+ ZYNQMP_PM_RESET_TIMESTAMP,
+ ZYNQMP_PM_RESET_RPU_R50,
+ ZYNQMP_PM_RESET_RPU_R51,
+ ZYNQMP_PM_RESET_RPU_AMBA,
+ ZYNQMP_PM_RESET_OCM,
+ ZYNQMP_PM_RESET_RPU_PGE,
+ ZYNQMP_PM_RESET_USB0_CORERESET,
+ ZYNQMP_PM_RESET_USB1_CORERESET,
+ ZYNQMP_PM_RESET_USB0_HIBERRESET,
+ ZYNQMP_PM_RESET_USB1_HIBERRESET,
+ ZYNQMP_PM_RESET_USB0_APB,
+ ZYNQMP_PM_RESET_USB1_APB,
+ ZYNQMP_PM_RESET_IPI,
+ ZYNQMP_PM_RESET_APM_LPD,
+ ZYNQMP_PM_RESET_RTC,
+ ZYNQMP_PM_RESET_SYSMON,
+ ZYNQMP_PM_RESET_AFI_FM6,
+ ZYNQMP_PM_RESET_LPD_SWDT,
+ ZYNQMP_PM_RESET_FPD,
+ ZYNQMP_PM_RESET_RPU_DBG1,
+ ZYNQMP_PM_RESET_RPU_DBG0,
+ ZYNQMP_PM_RESET_DBG_LPD,
+ ZYNQMP_PM_RESET_DBG_FPD,
+ ZYNQMP_PM_RESET_APLL,
+ ZYNQMP_PM_RESET_DPLL,
+ ZYNQMP_PM_RESET_VPLL,
+ ZYNQMP_PM_RESET_IOPLL,
+ ZYNQMP_PM_RESET_RPLL,
+ ZYNQMP_PM_RESET_GPO3_PL_0,
+ ZYNQMP_PM_RESET_GPO3_PL_1,
+ ZYNQMP_PM_RESET_GPO3_PL_2,
+ ZYNQMP_PM_RESET_GPO3_PL_3,
+ ZYNQMP_PM_RESET_GPO3_PL_4,
+ ZYNQMP_PM_RESET_GPO3_PL_5,
+ ZYNQMP_PM_RESET_GPO3_PL_6,
+ ZYNQMP_PM_RESET_GPO3_PL_7,
+ ZYNQMP_PM_RESET_GPO3_PL_8,
+ ZYNQMP_PM_RESET_GPO3_PL_9,
+ ZYNQMP_PM_RESET_GPO3_PL_10,
+ ZYNQMP_PM_RESET_GPO3_PL_11,
+ ZYNQMP_PM_RESET_GPO3_PL_12,
+ ZYNQMP_PM_RESET_GPO3_PL_13,
+ ZYNQMP_PM_RESET_GPO3_PL_14,
+ ZYNQMP_PM_RESET_GPO3_PL_15,
+ ZYNQMP_PM_RESET_GPO3_PL_16,
+ ZYNQMP_PM_RESET_GPO3_PL_17,
+ ZYNQMP_PM_RESET_GPO3_PL_18,
+ ZYNQMP_PM_RESET_GPO3_PL_19,
+ ZYNQMP_PM_RESET_GPO3_PL_20,
+ ZYNQMP_PM_RESET_GPO3_PL_21,
+ ZYNQMP_PM_RESET_GPO3_PL_22,
+ ZYNQMP_PM_RESET_GPO3_PL_23,
+ ZYNQMP_PM_RESET_GPO3_PL_24,
+ ZYNQMP_PM_RESET_GPO3_PL_25,
+ ZYNQMP_PM_RESET_GPO3_PL_26,
+ ZYNQMP_PM_RESET_GPO3_PL_27,
+ ZYNQMP_PM_RESET_GPO3_PL_28,
+ ZYNQMP_PM_RESET_GPO3_PL_29,
+ ZYNQMP_PM_RESET_GPO3_PL_30,
+ ZYNQMP_PM_RESET_GPO3_PL_31,
+ ZYNQMP_PM_RESET_RPU_LS,
+ ZYNQMP_PM_RESET_PS_ONLY,
+ ZYNQMP_PM_RESET_PL,
+ ZYNQMP_PM_RESET_PS_PL0,
+ ZYNQMP_PM_RESET_PS_PL1,
+ ZYNQMP_PM_RESET_PS_PL2,
+ ZYNQMP_PM_RESET_PS_PL3,
+ ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
+};
+
+enum zynqmp_pm_suspend_reason {
+ SUSPEND_POWER_REQUEST = 201,
+ SUSPEND_ALERT,
+ SUSPEND_SYSTEM_SHUTDOWN,
+};
+
+enum zynqmp_pm_request_ack {
+ ZYNQMP_PM_REQUEST_ACK_NO = 1,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING,
+ ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -91,6 +257,7 @@ struct zynqmp_pm_query_data {
struct zynqmp_eemi_ops {
int (*get_api_version)(u32 *version);
+ int (*get_chipid)(u32 *idcode, u32 *version);
int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
int (*clock_enable)(u32 clock_id);
int (*clock_disable)(u32 clock_id);
@@ -102,8 +269,25 @@ struct zynqmp_eemi_ops {
int (*clock_setparent)(u32 clock_id, u32 parent_id);
int (*clock_getparent)(u32 clock_id, u32 *parent_id);
int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
+ int (*reset_assert)(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag);
+ int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status);
+ int (*init_finalize)(void);
+ int (*set_suspend_mode)(u32 mode);
+ int (*request_node)(const u32 node,
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack);
+ int (*release_node)(const u32 node);
+ int (*set_requirement)(const u32 node,
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack);
};
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 *ret_payload);
+
#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
#else
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 011965c08b93..6d775984905b 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -7,6 +7,13 @@
#include <linux/bitops.h>
#include <linux/jump_label.h>
+/*
+ * Return code to denote that requested number of
+ * frontswap pages are unused(moved to page cache).
+ * Used in in shmem_unuse and try_to_unuse.
+ */
+#define FRONTSWAP_PAGES_UNUSED 2
+
struct frontswap_ops {
void (*init)(unsigned); /* this swap type was just swapon'ed */
int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 811c77743dad..4ede9ffa362a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -37,6 +37,7 @@
#include <linux/uuid.h>
#include <linux/errseq.h>
#include <linux/ioprio.h>
+#include <linux/fs_types.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -304,13 +305,19 @@ enum rw_hint {
struct kiocb {
struct file *ki_filp;
+
+ /* The 'ki_filp' pointer is shared in a union for aio */
+ randomized_struct_fields_start
+
loff_t ki_pos;
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
void *private;
int ki_flags;
u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
-} __randomize_layout;
+
+ randomized_struct_fields_end
+};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
@@ -1479,11 +1486,12 @@ struct super_block {
struct user_namespace *s_user_ns;
/*
- * Keep the lru lists last in the structure so they always sit on their
- * own individual cachelines.
+ * The list_lru structure is essentially just a pointer to a table
+ * of per-node lru lists, each of which has its own spinlock.
+ * There is no need to put them into separate cachelines.
*/
- struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
- struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
+ struct list_lru s_dentry_lru;
+ struct list_lru s_inode_lru;
struct rcu_head rcu;
struct work_struct destroy_work;
@@ -1700,22 +1708,6 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
/*
- * File types
- *
- * NOTE! These match bits 12..15 of stat.st_mode
- * (ie "(i_mode >> 12) & 15").
- */
-#define DT_UNKNOWN 0
-#define DT_FIFO 1
-#define DT_CHR 2
-#define DT_DIR 4
-#define DT_BLK 6
-#define DT_REG 8
-#define DT_LNK 10
-#define DT_SOCK 12
-#define DT_WHT 14
-
-/*
* This is the "filldir" function type, used by readdir() to let
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
@@ -2084,7 +2076,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
* I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
* synchronize competing switching instances and to tell
* wb stat updates to grab the i_pages lock. See
- * inode_switch_wb_work_fn() for details.
+ * inode_switch_wbs_work_fn() for details.
*
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
* and work dirs among overlayfs mounts.
diff --git a/include/linux/fs_types.h b/include/linux/fs_types.h
new file mode 100644
index 000000000000..54816791196f
--- /dev/null
+++ b/include/linux/fs_types.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_TYPES_H
+#define _LINUX_FS_TYPES_H
+
+/*
+ * This is a header for the common implementation of dirent
+ * to fs on-disk file type conversion. Although the fs on-disk
+ * bits are specific to every file system, in practice, many
+ * file systems use the exact same on-disk format to describe
+ * the lower 3 file type bits that represent the 7 POSIX file
+ * types.
+ *
+ * It is important to note that the definitions in this
+ * header MUST NOT change. This would break both the
+ * userspace ABI and the on-disk format of filesystems
+ * using this code.
+ *
+ * All those file systems can use this generic code for the
+ * conversions.
+ */
+
+/*
+ * struct dirent file types
+ * exposed to user via getdents(2), readdir(3)
+ *
+ * These match bits 12..15 of stat.st_mode
+ * (ie "(i_mode >> 12) & 15").
+ */
+#define S_DT_SHIFT 12
+#define S_DT(mode) (((mode) & S_IFMT) >> S_DT_SHIFT)
+#define S_DT_MASK (S_IFMT >> S_DT_SHIFT)
+
+/* these are defined by POSIX and also present in glibc's dirent.h */
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+#define DT_MAX (S_DT_MASK + 1) /* 16 */
+
+/*
+ * fs on-disk file types.
+ * Only the low 3 bits are used for the POSIX file types.
+ * Other bits are reserved for fs private use.
+ * These definitions are shared and used by multiple filesystems,
+ * and MUST NOT change under any circumstances.
+ *
+ * Note that no fs currently stores the whiteout type on-disk,
+ * so whiteout dirents are exposed to user as DT_CHR.
+ */
+#define FT_UNKNOWN 0
+#define FT_REG_FILE 1
+#define FT_DIR 2
+#define FT_CHRDEV 3
+#define FT_BLKDEV 4
+#define FT_FIFO 5
+#define FT_SOCK 6
+#define FT_SYMLINK 7
+
+#define FT_MAX 8
+
+/*
+ * declarations for helper functions, accompanying implementation
+ * is in fs/fs_types.c
+ */
+extern unsigned char fs_ftype_to_dtype(unsigned int filetype);
+extern unsigned char fs_umode_to_ftype(umode_t mode);
+extern unsigned char fs_umode_to_dtype(umode_t mode);
+
+#endif
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 941b11811f85..1fc0edd71c52 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -135,8 +135,6 @@ struct ccsr_guts {
u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
-u32 fsl_guts_get_svr(void);
-
/* Alternate function signal multiplex control */
#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 741f567253ef..975553a9f75d 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -193,6 +193,7 @@ struct fsl_mc_device {
struct resource *regions;
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
+ struct device_link *consumer_link;
};
#define to_fsl_mc_device(_dev) \
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index c1f003aadcce..992bf9fa1729 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -7,6 +7,7 @@
#define __PTP_QORIQ_H__
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/ptp_clock_kernel.h>
/*
@@ -49,7 +50,7 @@ struct etts_regs {
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
};
-struct qoriq_ptp_registers {
+struct ptp_qoriq_registers {
struct ctrl_regs __iomem *ctrl_regs;
struct alarm_regs __iomem *alarm_regs;
struct fiper_regs __iomem *fiper_regs;
@@ -57,15 +58,15 @@ struct qoriq_ptp_registers {
};
/* Offset definitions for the four register groups */
-#define CTRL_REGS_OFFSET 0x0
-#define ALARM_REGS_OFFSET 0x40
-#define FIPER_REGS_OFFSET 0x80
-#define ETTS_REGS_OFFSET 0xa0
+#define ETSEC_CTRL_REGS_OFFSET 0x0
+#define ETSEC_ALARM_REGS_OFFSET 0x40
+#define ETSEC_FIPER_REGS_OFFSET 0x80
+#define ETSEC_ETTS_REGS_OFFSET 0xa0
-#define FMAN_CTRL_REGS_OFFSET 0x80
-#define FMAN_ALARM_REGS_OFFSET 0xb8
-#define FMAN_FIPER_REGS_OFFSET 0xd0
-#define FMAN_ETTS_REGS_OFFSET 0xe0
+#define CTRL_REGS_OFFSET 0x80
+#define ALARM_REGS_OFFSET 0xb8
+#define FIPER_REGS_OFFSET 0xd0
+#define ETTS_REGS_OFFSET 0xe0
/* Bit definitions for the TMR_CTRL register */
@@ -120,6 +121,8 @@ struct qoriq_ptp_registers {
/* Bit definitions for the TMR_STAT register */
#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
#define STAT_VEC_MASK (0x3f)
+#define ETS1_VLD (1<<24)
+#define ETS2_VLD (1<<25)
/* Bit definitions for the TMR_PRSC register */
#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
@@ -134,13 +137,16 @@ struct qoriq_ptp_registers {
#define DEFAULT_FIPER1_PERIOD 1000000000
#define DEFAULT_FIPER2_PERIOD 100000
-struct qoriq_ptp {
+struct ptp_qoriq {
void __iomem *base;
- struct qoriq_ptp_registers regs;
+ struct ptp_qoriq_registers regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
struct resource *rsrc;
+ struct dentry *debugfs_root;
+ struct device *dev;
+ bool extts_fifo_support;
int irq;
int phc_index;
u64 alarm_interval; /* for periodic alarm */
@@ -151,19 +157,49 @@ struct qoriq_ptp {
u32 cksel;
u32 tmr_fiper1;
u32 tmr_fiper2;
+ u32 (*read)(unsigned __iomem *addr);
+ void (*write)(unsigned __iomem *addr, u32 val);
};
-static inline u32 qoriq_read(unsigned __iomem *addr)
+static inline u32 qoriq_read_be(unsigned __iomem *addr)
{
- u32 val;
-
- val = ioread32be(addr);
- return val;
+ return ioread32be(addr);
}
-static inline void qoriq_write(unsigned __iomem *addr, u32 val)
+static inline void qoriq_write_be(unsigned __iomem *addr, u32 val)
{
iowrite32be(val, addr);
}
+static inline u32 qoriq_read_le(unsigned __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void qoriq_write_le(unsigned __iomem *addr, u32 val)
+{
+ iowrite32(val, addr);
+}
+
+irqreturn_t ptp_qoriq_isr(int irq, void *priv);
+int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ const struct ptp_clock_info *caps);
+void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq);
+int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
+int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta);
+int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int ptp_qoriq_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts);
+int ptp_qoriq_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+#ifdef CONFIG_DEBUG_FS
+void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
+void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
+#else
+static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
+{ }
+static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
+{ }
+#endif
+
#endif
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 60cef8227534..5da56a674f2f 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -98,10 +98,11 @@ struct fsl_usb2_platform_data {
unsigned suspended:1;
unsigned already_suspended:1;
- unsigned has_fsl_erratum_a007792:1;
- unsigned has_fsl_erratum_a005275:1;
+ unsigned has_fsl_erratum_a007792:1;
+ unsigned has_fsl_erratum_14:1;
+ unsigned has_fsl_erratum_a005275:1;
unsigned has_fsl_erratum_a005697:1;
- unsigned check_phy_clk_valid:1;
+ unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
u32 pm_command;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 2ccb08cb5d6a..09587e2860b5 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -17,8 +17,22 @@
#include <linux/slab.h>
#include <linux/bug.h>
+/*
+ * Notify this @dir inode about a change in the directory entry @dentry.
+ *
+ * Unlike fsnotify_parent(), the event will be reported regardless of the
+ * FS_EVENT_ON_CHILD mask on the parent inode.
+ */
+static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry,
+ __u32 mask)
+{
+ return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
+ dentry->d_name.name, 0);
+}
+
/* Notify this dentry's parent about a child's events. */
-static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+static inline int fsnotify_parent(const struct path *path,
+ struct dentry *dentry, __u32 mask)
{
if (!dentry)
dentry = path->dentry;
@@ -65,6 +79,9 @@ static inline int fsnotify_perm(struct file *file, int mask)
fsnotify_mask = FS_ACCESS_PERM;
}
+ if (S_ISDIR(inode->i_mode))
+ fsnotify_mask |= FS_ISDIR;
+
return fsnotify_path(inode, path, fsnotify_mask);
}
@@ -73,7 +90,12 @@ static inline int fsnotify_perm(struct file *file, int mask)
*/
static inline void fsnotify_link_count(struct inode *inode)
{
- fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ __u32 mask = FS_ATTRIB;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -81,12 +103,14 @@ static inline void fsnotify_link_count(struct inode *inode)
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
const unsigned char *old_name,
- int isdir, struct inode *target, struct dentry *moved)
+ int isdir, struct inode *target,
+ struct dentry *moved)
{
struct inode *source = moved->d_inode;
u32 fs_cookie = fsnotify_get_cookie();
- __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
- __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
+ __u32 old_dir_mask = FS_MOVED_FROM;
+ __u32 new_dir_mask = FS_MOVED_TO;
+ __u32 mask = FS_MOVE_SELF;
const unsigned char *new_name = moved->d_name.name;
if (old_dir == new_dir)
@@ -95,6 +119,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
+ mask |= FS_ISDIR;
}
fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
@@ -106,7 +131,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
fsnotify_link_count(target);
if (source)
- fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0);
audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE);
}
@@ -128,15 +153,35 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
/*
* fsnotify_nameremove - a filename was removed from a directory
+ *
+ * This is mostly called under parent vfs inode lock so name and
+ * dentry->d_parent should be stable. However there are some corner cases where
+ * inode lock is not held. So to be on the safe side and be reselient to future
+ * callers and out of tree users of d_delete(), we do not assume that d_parent
+ * and d_name are stable and we use dget_parent() and
+ * take_dentry_name_snapshot() to grab stable references.
*/
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
{
+ struct dentry *parent;
+ struct name_snapshot name;
__u32 mask = FS_DELETE;
+ /* d_delete() of pseudo inode? (e.g. __ns_get_path() playing tricks) */
+ if (IS_ROOT(dentry))
+ return;
+
if (isdir)
mask |= FS_ISDIR;
- fsnotify_parent(NULL, dentry, mask);
+ parent = dget_parent(dentry);
+ take_dentry_name_snapshot(&name, dentry);
+
+ fsnotify(d_inode(parent), mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
+ name.name, 0);
+
+ release_dentry_name_snapshot(&name);
+ dput(parent);
}
/*
@@ -144,7 +189,12 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
- fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ __u32 mask = FS_DELETE_SELF;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
__fsnotify_inode_delete(inode);
}
@@ -155,7 +205,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+ fsnotify_dirent(inode, dentry, FS_CREATE);
}
/*
@@ -176,12 +226,9 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
*/
static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
{
- __u32 mask = (FS_CREATE | FS_ISDIR);
- struct inode *d_inode = dentry->d_inode;
-
audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+ fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR);
}
/*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 7639774e7475..dfc28fcb4de8 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -59,27 +59,33 @@
* dnotify and inotify. */
#define FS_EVENT_ON_CHILD 0x08000000
-/* This is a list of all events that may get sent to a parernt based on fs event
- * happening to inodes inside that directory */
-#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
- FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
- FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
- FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \
- FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
-
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
+/*
+ * Directory entry modification events - reported only to directory
+ * where entry is modified and not to a watching parent.
+ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
+ * when a directory entry inside a child subdir changes.
+ */
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE)
+
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
FS_OPEN_EXEC_PERM)
+/*
+ * This is a list of all events that may get sent to a parent based on fs event
+ * happening to inodes inside that directory.
+ */
+#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \
+ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
+ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \
+ FS_OPEN | FS_OPEN_EXEC)
+
/* Events that can be reported to backends */
-#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
- FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
- FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
- FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
- FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
- FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \
- FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
+#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
+ FS_EVENTS_POSS_ON_CHILD | \
+ FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \
+ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED)
/* Extra flags that may be reported with event or control handling of events */
#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
@@ -129,7 +135,6 @@ struct fsnotify_event {
struct list_head list;
/* inode may ONLY be dereferenced during handle_event(). */
struct inode *inode; /* either the inode the event happened to or its parent */
- u32 mask; /* the type of access, bitwise OR for FS_* event types */
};
/*
@@ -288,6 +293,7 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
struct fsnotify_mark_connector {
spinlock_t lock;
unsigned int type; /* Type of object [lock] */
+ __kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
/* Object pointer [lock] */
fsnotify_connp_t *obj;
@@ -416,6 +422,9 @@ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
/* return AND dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
+/* Remove event queued in the notification list */
+extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
+ struct fsnotify_event *event);
/* functions used to manipulate the marks attached to inodes */
@@ -428,28 +437,35 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark,
/* Find mark belonging to given group in the list of marks */
extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
struct fsnotify_group *group);
+/* Get cached fsid of filesystem containing object */
+extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
+ __kernel_fsid_t *fsid);
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
fsnotify_connp_t *connp, unsigned int type,
- int allow_dups);
+ int allow_dups, __kernel_fsid_t *fsid);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp, unsigned int type,
- int allow_dups);
+ fsnotify_connp_t *connp,
+ unsigned int type, int allow_dups,
+ __kernel_fsid_t *fsid);
+
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct inode *inode,
int allow_dups)
{
return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups);
+ FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
int allow_dups)
{
return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups);
+ FSNOTIFY_OBJ_TYPE_INODE, allow_dups,
+ NULL);
}
+
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
@@ -479,9 +495,12 @@ extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
-/* put here because inotify does some weird stuff when destroying watches */
-extern void fsnotify_init_event(struct fsnotify_event *event,
- struct inode *to_tell, u32 mask);
+static inline void fsnotify_init_event(struct fsnotify_event *event,
+ struct inode *inode)
+{
+ INIT_LIST_HEAD(&event->list);
+ event->inode = inode;
+}
#else
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5f5e25fd6149..fdab7de7490d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -24,21 +24,21 @@ struct vm_area_struct;
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
-#define ___GFP_WRITE 0x100u
-#define ___GFP_NOWARN 0x200u
-#define ___GFP_RETRY_MAYFAIL 0x400u
-#define ___GFP_NOFAIL 0x800u
-#define ___GFP_NORETRY 0x1000u
-#define ___GFP_MEMALLOC 0x2000u
-#define ___GFP_COMP 0x4000u
-#define ___GFP_ZERO 0x8000u
-#define ___GFP_NOMEMALLOC 0x10000u
-#define ___GFP_HARDWALL 0x20000u
-#define ___GFP_THISNODE 0x40000u
-#define ___GFP_ATOMIC 0x80000u
-#define ___GFP_ACCOUNT 0x100000u
-#define ___GFP_DIRECT_RECLAIM 0x200000u
-#define ___GFP_KSWAPD_RECLAIM 0x400000u
+#define ___GFP_ZERO 0x100u
+#define ___GFP_ATOMIC 0x200u
+#define ___GFP_DIRECT_RECLAIM 0x400u
+#define ___GFP_KSWAPD_RECLAIM 0x800u
+#define ___GFP_WRITE 0x1000u
+#define ___GFP_NOWARN 0x2000u
+#define ___GFP_RETRY_MAYFAIL 0x4000u
+#define ___GFP_NOFAIL 0x8000u
+#define ___GFP_NORETRY 0x10000u
+#define ___GFP_MEMALLOC 0x20000u
+#define ___GFP_COMP 0x40000u
+#define ___GFP_NOMEMALLOC 0x80000u
+#define ___GFP_HARDWALL 0x100000u
+#define ___GFP_THISNODE 0x200000u
+#define ___GFP_ACCOUNT 0x400000u
#ifdef CONFIG_LOCKDEP
#define ___GFP_NOLOCKDEP 0x800000u
#else
diff --git a/include/linux/gnss.h b/include/linux/gnss.h
index 43546977098c..36968a0f33e8 100644
--- a/include/linux/gnss.h
+++ b/include/linux/gnss.h
@@ -22,6 +22,7 @@ enum gnss_type {
GNSS_TYPE_NMEA = 0,
GNSS_TYPE_SIRF,
GNSS_TYPE_UBX,
+ GNSS_TYPE_MTK,
GNSS_TYPE_COUNT
};
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f216c563..2d6100edf204 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -24,7 +24,10 @@
#ifdef CONFIG_DEBUG_FS
+#include <linux/kfifo.h>
+
#define HID_DEBUG_BUFSIZE 512
+#define HID_DEBUG_FIFOSIZE 512
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@ void hid_debug_init(void);
void hid_debug_exit(void);
void hid_debug_event(struct hid_device *, char *);
-
struct hid_debug_list {
- char *hid_debug_buf;
- int head;
- int tail;
+ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
struct fasync_struct *fasync;
struct hid_device *hdev;
struct list_head node;
@@ -64,4 +64,3 @@ struct hid_debug_list {
#endif
#endif
-
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d99287327ef2..f9707d1dcb58 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -430,7 +430,7 @@ struct hid_local {
*/
struct hid_collection {
- struct hid_collection *parent;
+ int parent_idx; /* device->collection */
unsigned type;
unsigned usage;
unsigned level;
@@ -658,7 +658,6 @@ struct hid_parser {
unsigned int *collection_stack;
unsigned int collection_stack_ptr;
unsigned int collection_stack_size;
- struct hid_collection *active_collection;
struct hid_device *device;
unsigned int scan_flags;
};
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 087fd5f48c91..ea35263eb76b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -371,6 +371,8 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask);
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
+struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nmask);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
@@ -493,17 +495,54 @@ static inline pgoff_t basepage_index(struct page *page)
extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
-static inline bool hugepage_migration_supported(struct hstate *h)
-{
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+#ifndef arch_hugetlb_migration_supported
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
+{
if ((huge_page_shift(h) == PMD_SHIFT) ||
- (huge_page_shift(h) == PGDIR_SHIFT))
+ (huge_page_shift(h) == PUD_SHIFT) ||
+ (huge_page_shift(h) == PGDIR_SHIFT))
return true;
else
return false;
+}
+#endif
#else
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
+{
return false;
+}
#endif
+
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+ return arch_hugetlb_migration_supported(h);
+}
+
+/*
+ * Movability check is different as compared to migration check.
+ * It determines whether or not a huge page should be placed on
+ * movable zone or not. Movability of any huge page should be
+ * required only if huge page size is supported for migration.
+ * There wont be any reason for the huge page to be movable if
+ * it is not migratable to start with. Also the size of the huge
+ * page should be large enough to be placed under a movable zone
+ * and still feasible enough to be migratable. Just the presence
+ * in movable zone does not make the migration feasible.
+ *
+ * So even though large huge page sizes like the gigantic ones
+ * are migratable they should not be movable because its not
+ * feasible to migrate them from movable zone.
+ */
+static inline bool hugepage_movable_supported(struct hstate *h)
+{
+ if (!hugepage_migration_supported(h))
+ return false;
+
+ if (hstate_is_gigantic(h))
+ return false;
+ return true;
}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
@@ -543,6 +582,26 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
set_huge_pte_at(mm, addr, ptep, pte);
}
#endif
+
+#ifndef huge_ptep_modify_prot_start
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+#endif
+
+#ifndef huge_ptep_modify_prot_commit
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
+{
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page(v, a, r) NULL
@@ -602,6 +661,11 @@ static inline bool hugepage_migration_supported(struct hstate *h)
return false;
}
+static inline bool hugepage_movable_supported(struct hstate *h)
+{
+ return false;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f0885cc01db6..64698ec8f2ac 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -222,8 +222,8 @@ static inline u32 hv_get_avail_to_write_percent(
* struct contains the fundamental information about an offer.
*/
struct vmbus_channel_offer {
- uuid_le if_type;
- uuid_le if_instance;
+ guid_t if_type;
+ guid_t if_instance;
/*
* These two fields are not currently used.
@@ -614,8 +614,8 @@ struct vmbus_channel_initiate_contact {
/* Hyper-V socket: guest's connect()-ing to host */
struct vmbus_channel_tl_connect_request {
struct vmbus_channel_message_header header;
- uuid_le guest_endpoint_id;
- uuid_le host_service_id;
+ guid_t guest_endpoint_id;
+ guid_t host_service_id;
} __packed;
struct vmbus_channel_version_response {
@@ -714,7 +714,7 @@ enum vmbus_device_type {
struct vmbus_device {
u16 dev_type;
- uuid_le guid;
+ guid_t guid;
bool perf_device;
};
@@ -751,6 +751,19 @@ struct vmbus_channel {
u64 interrupts; /* Host to Guest interrupts */
u64 sig_events; /* Guest to Host events */
+ /*
+ * Guest to host interrupts caused by the outbound ring buffer changing
+ * from empty to not empty.
+ */
+ u64 intr_out_empty;
+
+ /*
+ * Indicates that a full outbound ring buffer was encountered. The flag
+ * is set to true when a full outbound ring buffer is encountered and
+ * set to false when a write to the outbound ring buffer is completed.
+ */
+ bool out_full_flag;
+
/* Channel callback's invoked in softirq context */
struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
@@ -903,6 +916,24 @@ struct vmbus_channel {
* vmbus_connection.work_queue and hang: see vmbus_process_offer().
*/
struct work_struct add_channel_work;
+
+ /*
+ * Guest to host interrupts caused by the inbound ring buffer changing
+ * from full to not full while a packet is waiting.
+ */
+ u64 intr_in_full;
+
+ /*
+ * The total number of write operations that encountered a full
+ * outbound ring buffer.
+ */
+ u64 out_full_total;
+
+ /*
+ * The number of write operations that were the first to encounter a
+ * full outbound ring buffer.
+ */
+ u64 out_full_first;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -936,6 +967,21 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
static inline void set_channel_pending_send_size(struct vmbus_channel *c,
u32 size)
{
+ unsigned long flags;
+
+ if (size) {
+ spin_lock_irqsave(&c->outbound.ring_lock, flags);
+ ++c->out_full_total;
+
+ if (!c->out_full_flag) {
+ ++c->out_full_first;
+ c->out_full_flag = true;
+ }
+ spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
+ } else {
+ c->out_full_flag = false;
+ }
+
c->outbound.ring_buffer->pending_send_sz = size;
}
@@ -1096,7 +1142,7 @@ struct hv_driver {
bool hvsock;
/* the device type supported by this driver */
- uuid_le dev_type;
+ guid_t dev_type;
const struct hv_vmbus_device_id *id_table;
struct device_driver driver;
@@ -1116,10 +1162,10 @@ struct hv_driver {
/* Base device object */
struct hv_device {
/* the device type id of this device */
- uuid_le dev_type;
+ guid_t dev_type;
/* the device instance id of this device */
- uuid_le dev_instance;
+ guid_t dev_instance;
u16 vendor_id;
u16 device_id;
@@ -1159,8 +1205,9 @@ struct hv_ring_buffer_debug_info {
u32 bytes_avail_towrite;
};
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
- struct hv_ring_buffer_debug_info *debug_info);
+
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
/* Vmbus interface */
#define vmbus_driver_register(driver) \
@@ -1187,102 +1234,102 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
* {f8615163-df3e-46c5-913f-f2d2f965ed0e}
*/
#define HV_NIC_GUID \
- .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
- 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
+ .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
+ 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
/*
* IDE GUID
* {32412632-86cb-44a2-9b5c-50d1417354f5}
*/
#define HV_IDE_GUID \
- .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
- 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
+ .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
+ 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
/*
* SCSI GUID
* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
*/
#define HV_SCSI_GUID \
- .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
- 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
+ .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
+ 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
/*
* Shutdown GUID
* {0e0b6031-5213-4934-818b-38d90ced39db}
*/
#define HV_SHUTDOWN_GUID \
- .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
- 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
+ .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
+ 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
/*
* Time Synch GUID
* {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
*/
#define HV_TS_GUID \
- .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
- 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
+ .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
+ 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
/*
* Heartbeat GUID
* {57164f39-9115-4e78-ab55-382f3bd5422d}
*/
#define HV_HEART_BEAT_GUID \
- .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
- 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
+ .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
+ 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
/*
* KVP GUID
* {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
*/
#define HV_KVP_GUID \
- .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
- 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
+ .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
+ 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
/*
* Dynamic memory GUID
* {525074dc-8985-46e2-8057-a307dc18a502}
*/
#define HV_DM_GUID \
- .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
- 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
+ .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
+ 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
/*
* Mouse GUID
* {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
*/
#define HV_MOUSE_GUID \
- .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
- 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
+ .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
+ 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
/*
* Keyboard GUID
* {f912ad6d-2b17-48ea-bd65-f927a61c7684}
*/
#define HV_KBD_GUID \
- .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
- 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
+ .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
+ 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
/*
* VSS (Backup/Restore) GUID
*/
#define HV_VSS_GUID \
- .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
- 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
+ .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
+ 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
/*
* Synthetic Video GUID
* {DA0A7802-E377-4aac-8E77-0558EB1073F8}
*/
#define HV_SYNTHVID_GUID \
- .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
- 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
+ .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
+ 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
/*
* Synthetic FC GUID
* {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
*/
#define HV_SYNTHFC_GUID \
- .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
- 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
+ .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
+ 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
/*
* Guest File Copy Service
@@ -1290,16 +1337,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
*/
#define HV_FCOPY_GUID \
- .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
- 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
+ .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
+ 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
/*
* NetworkDirect. This is the guest RDMA service.
* {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
*/
#define HV_ND_GUID \
- .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
- 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
+ .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
+ 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
/*
* PCI Express Pass Through
@@ -1307,8 +1354,8 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
*/
#define HV_PCIE_GUID \
- .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
- 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
+ .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
+ 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
* Linux doesn't support the 3 devices: the first two are for
@@ -1320,16 +1367,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
*/
#define HV_AVMA1_GUID \
- .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
- 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+ .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
#define HV_AVMA2_GUID \
- .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
- 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+ .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
#define HV_RDV_GUID \
- .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
- 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+ .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
/*
* Common header for Hyper-V ICs
@@ -1431,7 +1478,7 @@ struct ictimesync_ref_data {
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
- uuid_le data;
+ guid_t data;
struct vmbus_channel *channel;
void (*callback)(void *context);
};
@@ -1451,8 +1498,8 @@ void vmbus_setevent(struct vmbus_channel *channel);
extern __u32 vmbus_proto_version;
-int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
- const uuid_le *shv_host_servie_id);
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id);
void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e7d29ae633cd..971cf76a78a0 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -615,6 +615,7 @@ struct ide_drive_s {
/* current sense rq and buffer */
bool sense_rq_armed;
+ bool sense_rq_active;
struct request *sense_rq;
struct request_sense sense_data;
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern void ide_timer_expiry(struct timer_list *t);
extern irqreturn_t ide_intr(int irq, void *dev_id);
extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 3b04e72315e1..48703ec60d06 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -8,7 +8,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 Intel Corporation
+ * Copyright (c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1803,6 +1803,9 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04
#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
+#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
+#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
+#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
/* 802.11ax HE PHY capabilities */
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
@@ -1926,11 +1929,11 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0
#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
@@ -1938,6 +1941,11 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
+#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US 0x00
+#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US 0x40
+#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US 0x80
+#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0
+#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0
/* 802.11ax HE TX/RX MCS NSS Support */
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
@@ -2016,7 +2024,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
-#define IEEE80211_HE_OPERATION_CO_LOCATED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
@@ -2046,7 +2054,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
oper_len += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_CO_LOCATED_BSS)
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
oper_len++;
/* Add the first byte (extension ID) to the total length */
@@ -2118,6 +2126,8 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0
#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1
#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_LCI 8
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC 11
/* 802.11g ERP information element */
#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
@@ -2475,6 +2485,8 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_HE_OPERATION = 36,
WLAN_EID_EXT_UORA = 37,
WLAN_EID_EXT_HE_MU_EDCA = 38,
+ WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52,
+ WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55,
};
/* Action category code */
@@ -2656,6 +2668,11 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
+/* Multiple BSSID capability is set in the 6th bit of 3rd byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
+
/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
@@ -2691,6 +2708,9 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5)
#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6)
+/* Defines support for enhanced multi-bssid advertisement*/
+#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
@@ -2882,6 +2902,34 @@ enum ieee80211_sa_query_action {
WLAN_ACTION_SA_QUERY_RESPONSE = 1,
};
+/**
+ * struct ieee80211_bssid_index
+ *
+ * This structure refers to "Multiple BSSID-index element"
+ *
+ * @bssid_index: BSSID index
+ * @dtim_period: optional, overrides transmitted BSS dtim period
+ * @dtim_count: optional, overrides transmitted BSS dtim count
+ */
+struct ieee80211_bssid_index {
+ u8 bssid_index;
+ u8 dtim_period;
+ u8 dtim_count;
+};
+
+/**
+ * struct ieee80211_multiple_bssid_configuration
+ *
+ * This structure refers to "Multiple BSSID Configuration element"
+ *
+ * @bssid_count: total number of active BSSIDs in the set
+ * @profile_periodicity: the least number of beacon frames need to be received
+ * in order to discover all the nontransmitted BSSIDs in the set.
+ */
+struct ieee80211_multiple_bssid_configuration {
+ u8 bssid_count;
+ u8 profile_periodicity;
+};
#define SUITE(oui, id) (((oui) << 8) | (id))
@@ -3243,4 +3291,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+struct element {
+ u8 id;
+ u8 datalen;
+ u8 data[];
+} __packed;
+
+/* element iteration helpers */
+#define for_each_element(_elem, _data, _datalen) \
+ for (_elem = (const struct element *)(_data); \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) && \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) + _elem->datalen; \
+ _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(element, _id, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, _data, _datalen) \
+ for_each_element(element, _data, _datalen) \
+ if (element->id == WLAN_EID_EXTENSION && \
+ element->datalen > 0 && \
+ element->data[0] == (extid))
+
+#define for_each_subelement(sub, element) \
+ for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element) \
+ for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element) \
+ for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+ const void *data, size_t datalen)
+{
+ return (const u8 *)element == (const u8 *)data + datalen;
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 6756fea18b69..e44746de95cd 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_IPGRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
+ case ARPHRD_RAWIP:
return false;
default:
return true;
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 119f53941c12..cc85f4524dbf 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -18,6 +18,7 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/refcount.h>
#include <uapi/linux/igmp.h>
@@ -106,6 +107,14 @@ struct ip_mc_list {
#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
+static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (skb_transport_offset(skb) + ip_transport_len(skb) < len)
+ return -EINVAL;
+
+ return pskb_may_pull(skb, len);
+}
+
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
@@ -128,8 +137,14 @@ extern void ip_mc_up(struct in_device *);
extern void ip_mc_down(struct in_device *);
extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
-extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
+extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp);
+static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
+{
+ return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL);
+}
+extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
+ gfp_t gfp);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
+int ip_mc_check_igmp(struct sk_buff *skb);
#endif
diff --git a/include/linux/ihex.h b/include/linux/ihex.h
index 75c194391869..98cb5ce0b0a0 100644
--- a/include/linux/ihex.h
+++ b/include/linux/ihex.h
@@ -21,12 +21,24 @@ struct ihex_binrec {
uint8_t data[0];
} __attribute__((packed));
+static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p)
+{
+ return be16_to_cpu(p->len) + sizeof(*p);
+}
+
/* Find the next record, taking into account the 4-byte alignment */
static inline const struct ihex_binrec *
+__ihex_next_binrec(const struct ihex_binrec *rec)
+{
+ const void *p = rec;
+
+ return p + ALIGN(ihex_binrec_size(rec), 4);
+}
+
+static inline const struct ihex_binrec *
ihex_next_binrec(const struct ihex_binrec *rec)
{
- int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
- rec = (void *)&rec->data[next];
+ rec = __ihex_next_binrec(rec);
return be16_to_cpu(rec->len) ? rec : NULL;
}
@@ -34,18 +46,15 @@ ihex_next_binrec(const struct ihex_binrec *rec)
/* Check that ihex_next_binrec() won't take us off the end of the image... */
static inline int ihex_validate_fw(const struct firmware *fw)
{
- const struct ihex_binrec *rec;
- size_t ofs = 0;
+ const struct ihex_binrec *end, *rec;
- while (ofs <= fw->size - sizeof(*rec)) {
- rec = (void *)&fw->data[ofs];
+ rec = (const void *)fw->data;
+ end = (const void *)&fw->data[fw->size - sizeof(*end)];
+ for (; rec <= end; rec = __ihex_next_binrec(rec)) {
/* Zero length marks end of records */
- if (!be16_to_cpu(rec->len))
+ if (rec == end && !be16_to_cpu(rec->len))
return 0;
-
- /* Point to next record... */
- ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
}
return -EINVAL;
}
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 8092b8e7f37e..45e9667f0a8c 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -260,6 +260,7 @@ struct st_sensor_settings {
struct st_sensor_data {
struct device *dev;
struct iio_trigger *trig;
+ struct iio_mount_matrix *mount_matrix;
struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
struct regulator *vdd;
diff --git a/include/linux/in.h b/include/linux/in.h
index 31b493734763..435e7f2a513a 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -60,6 +60,11 @@ static inline bool ipv4_is_lbcast(__be32 addr)
return addr == htonl(INADDR_BROADCAST);
}
+static inline bool ipv4_is_all_snoopers(__be32 addr)
+{
+ return addr == htonl(INADDR_ALLSNOOPERS_GROUP);
+}
+
static inline bool ipv4_is_zeronet(__be32 addr)
{
return (addr & htonl(0xff000000)) == htonl(0x00000000);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a7083a45a26c..6049baa5b8bc 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -13,6 +13,7 @@
#include <linux/securebits.h>
#include <linux/seqlock.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/sched/autogroup.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index 14beaff9b445..d77fe34fb00a 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -25,3 +25,6 @@ extern phys_addr_t phys_initrd_start;
extern unsigned long phys_initrd_size;
extern unsigned int real_root_dev;
+
+extern char __initramfs_start[];
+extern unsigned long __initramfs_size;
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
new file mode 100644
index 000000000000..63caccadc2db
--- /dev/null
+++ b/include/linux/interconnect-provider.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_PROVIDER_H
+#define __LINUX_INTERCONNECT_PROVIDER_H
+
+#include <linux/interconnect.h>
+
+#define icc_units_to_bps(bw) ((bw) * 1000ULL)
+
+struct icc_node;
+struct of_phandle_args;
+
+/**
+ * struct icc_onecell_data - driver data for onecell interconnect providers
+ *
+ * @num_nodes: number of nodes in this device
+ * @nodes: array of pointers to the nodes in this device
+ */
+struct icc_onecell_data {
+ unsigned int num_nodes;
+ struct icc_node *nodes[];
+};
+
+struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+ void *data);
+
+/**
+ * struct icc_provider - interconnect provider (controller) entity that might
+ * provide multiple interconnect controls
+ *
+ * @provider_list: list of the registered interconnect providers
+ * @nodes: internal list of the interconnect provider nodes
+ * @set: pointer to device specific set operation function
+ * @aggregate: pointer to device specific aggregate operation function
+ * @xlate: provider-specific callback for mapping nodes from phandle arguments
+ * @dev: the device this interconnect provider belongs to
+ * @users: count of active users
+ * @data: pointer to private data
+ */
+struct icc_provider {
+ struct list_head provider_list;
+ struct list_head nodes;
+ int (*set)(struct icc_node *src, struct icc_node *dst);
+ int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw,
+ u32 *agg_avg, u32 *agg_peak);
+ struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
+ struct device *dev;
+ int users;
+ void *data;
+};
+
+/**
+ * struct icc_node - entity that is part of the interconnect topology
+ *
+ * @id: platform specific node id
+ * @name: node name used in debugfs
+ * @links: a list of targets pointing to where we can go next when traversing
+ * @num_links: number of links to other interconnect nodes
+ * @provider: points to the interconnect provider of this node
+ * @node_list: the list entry in the parent provider's "nodes" list
+ * @search_list: list used when walking the nodes graph
+ * @reverse: pointer to previous node when walking the nodes graph
+ * @is_traversed: flag that is used when walking the nodes graph
+ * @req_list: a list of QoS constraint requests associated with this node
+ * @avg_bw: aggregated value of average bandwidth requests from all consumers
+ * @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @data: pointer to private data
+ */
+struct icc_node {
+ int id;
+ const char *name;
+ struct icc_node **links;
+ size_t num_links;
+
+ struct icc_provider *provider;
+ struct list_head node_list;
+ struct list_head search_list;
+ struct icc_node *reverse;
+ u8 is_traversed:1;
+ struct hlist_head req_list;
+ u32 avg_bw;
+ u32 peak_bw;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+struct icc_node *icc_node_create(int id);
+void icc_node_destroy(int id);
+int icc_link_create(struct icc_node *node, const int dst_id);
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
+void icc_node_add(struct icc_node *node, struct icc_provider *provider);
+void icc_node_del(struct icc_node *node);
+int icc_provider_add(struct icc_provider *provider);
+int icc_provider_del(struct icc_provider *provider);
+
+#else
+
+static inline struct icc_node *icc_node_create(int id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+void icc_node_destroy(int id)
+{
+}
+
+static inline int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ return -ENOTSUPP;
+}
+
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+{
+ return -ENOTSUPP;
+}
+
+void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+}
+
+void icc_node_del(struct icc_node *node)
+{
+}
+
+static inline int icc_provider_add(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+static inline int icc_provider_del(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_PROVIDER_H */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
new file mode 100644
index 000000000000..dc25864755ba
--- /dev/null
+++ b/include/linux/interconnect.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_H
+#define __LINUX_INTERCONNECT_H
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* macros for converting to icc units */
+#define Bps_to_icc(x) ((x) / 1000)
+#define kBps_to_icc(x) (x)
+#define MBps_to_icc(x) ((x) * 1000)
+#define GBps_to_icc(x) ((x) * 1000 * 1000)
+#define bps_to_icc(x) (1)
+#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0))
+#define Mbps_to_icc(x) ((x) * 1000 / 8)
+#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
+
+struct icc_path;
+struct device;
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+struct icc_path *icc_get(struct device *dev, const int src_id,
+ const int dst_id);
+struct icc_path *of_icc_get(struct device *dev, const char *name);
+void icc_put(struct icc_path *path);
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
+
+#else
+
+static inline struct icc_path *icc_get(struct device *dev, const int src_id,
+ const int dst_id)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline void icc_put(struct icc_path *path)
+{
+}
+
+static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ return 0;
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c672f34235e7..690b238a44d5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -156,6 +156,10 @@ __request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
void __percpu *percpu_dev_id);
+extern int __must_check
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev);
+
static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
@@ -164,9 +168,16 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
devname, percpu_dev_id);
}
+extern int __must_check
+request_percpu_nmi(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *dev);
+
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
+extern const void *free_nmi(unsigned int irq, void *dev_id);
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
+
struct device;
extern int __must_check
@@ -217,6 +228,13 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
+extern void disable_nmi_nosync(unsigned int irq);
+extern void disable_percpu_nmi(unsigned int irq);
+extern void enable_nmi(unsigned int irq);
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
+extern int prepare_percpu_nmi(unsigned int irq);
+extern void teardown_percpu_nmi(unsigned int irq);
+
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
@@ -241,25 +259,35 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+#define IRQ_AFFINITY_MAX_SETS 4
+
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
- * @nr_sets: Length of passed in *sets array
- * @sets: Number of affinitized sets
+ * @nr_sets: The number of interrupt sets for which affinity
+ * spreading is required
+ * @set_size: Array holding the size of each interrupt set
+ * @calc_sets: Callback for calculating the number and size
+ * of interrupt sets
+ * @priv: Private data for usage by @calc_sets, usually a
+ * pointer to driver/device specific data.
*/
struct irq_affinity {
- int pre_vectors;
- int post_vectors;
- int nr_sets;
- int *sets;
+ unsigned int pre_vectors;
+ unsigned int post_vectors;
+ unsigned int nr_sets;
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
+ void *priv;
};
/**
* struct irq_affinity_desc - Interrupt affinity descriptor
* @mask: cpumask to hold the affinity assignment
+ * @is_managed: 1 if the interrupt is managed internally
*/
struct irq_affinity_desc {
struct cpumask mask;
@@ -313,9 +341,10 @@ extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
struct irq_affinity_desc *
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -349,13 +378,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
}
static inline struct irq_affinity_desc *
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
{
return NULL;
}
-static inline int
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
+static inline unsigned int
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd)
{
return maxvec;
}
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index 862d786a904f..ae21b72cce85 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -55,4 +55,68 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed hi_lo_writeq_relaxed
#endif
+#ifndef ioread64_hi_lo
+#define ioread64_hi_lo ioread64_hi_lo
+static inline u64 ioread64_hi_lo(void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32(addr + sizeof(u32));
+ low = ioread32(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_hi_lo
+#define iowrite64_hi_lo iowrite64_hi_lo
+static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32(val >> 32, addr + sizeof(u32));
+ iowrite32(val, addr);
+}
+#endif
+
+#ifndef ioread64be_hi_lo
+#define ioread64be_hi_lo ioread64be_hi_lo
+static inline u64 ioread64be_hi_lo(void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32be(addr);
+ low = ioread32be(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_hi_lo
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32be(val >> 32, addr);
+ iowrite32be(val, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#define ioread64 ioread64_hi_lo
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#define iowrite64 iowrite64_hi_lo
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#define ioread64be ioread64be_hi_lo
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#define iowrite64be iowrite64be_hi_lo
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index d042e7bb5adb..faaa842dbdb9 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -55,4 +55,68 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed lo_hi_writeq_relaxed
#endif
+#ifndef ioread64_lo_hi
+#define ioread64_lo_hi ioread64_lo_hi
+static inline u64 ioread64_lo_hi(void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_lo_hi
+#define iowrite64_lo_hi iowrite64_lo_hi
+static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64be_lo_hi
+#define ioread64be_lo_hi ioread64be_lo_hi
+static inline u64 ioread64be_lo_hi(void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32be(addr + sizeof(u32));
+ high = ioread32be(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_lo_hi
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32be(val, addr + sizeof(u32));
+ iowrite32be(val >> 32, addr);
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#define ioread64 ioread64_lo_hi
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#define iowrite64 iowrite64_lo_hi
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#define ioread64be ioread64be_lo_hi
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#define iowrite64be iowrite64be_lo_hi
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 492bc6513533..482b7b7c9f30 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -34,4 +34,9 @@ static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_transport_header(skb);
}
+
+static inline unsigned int ip_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 495e834c1367..ea7c7906591e 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -104,6 +104,12 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
return (struct ipv6hdr *)skb_transport_header(skb);
}
+static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) -
+ skb_network_header_len(skb);
+}
+
/*
This structure contains results of exthdrs parsing
as offsets from skb->nh.
@@ -275,7 +281,8 @@ struct ipv6_pinfo {
dontfrag:1,
autoflowlabel:1,
autoflowlabel_set:1,
- mc_all:1;
+ mc_all:1,
+ rtalert_isolate:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index def2b2aac8b1..5e91f6bcaacd 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -442,6 +442,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
* @ipi_send_single: send a single IPI to destination cpus
* @ipi_send_mask: send an IPI to destination cpus in cpumask
+ * @irq_nmi_setup: function called from core code before enabling an NMI
+ * @irq_nmi_teardown: function called from core code after disabling an NMI
* @flags: chip specific flags
*/
struct irq_chip {
@@ -490,6 +492,9 @@ struct irq_chip {
void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
+ int (*irq_nmi_setup)(struct irq_data *data);
+ void (*irq_nmi_teardown)(struct irq_data *data);
+
unsigned long flags;
};
@@ -505,6 +510,7 @@ struct irq_chip {
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
* IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
+ * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
@@ -515,6 +521,7 @@ enum {
IRQCHIP_ONESHOT_SAFE = (1 << 5),
IRQCHIP_EOI_THREADED = (1 << 6),
IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
+ IRQCHIP_SUPPORTS_NMI = (1 << 8),
};
#include <linux/irqdesc.h>
@@ -594,6 +601,9 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
+extern void handle_fasteoi_nmi(struct irq_desc *desc);
+extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
+
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 071b4cbdf010..c848a7cc502e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -319,7 +319,7 @@
#define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h
new file mode 100644
index 000000000000..ea4e087fac98
--- /dev/null
+++ b/include/linux/irqchip/irq-davinci-aintc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#ifndef _LINUX_IRQ_DAVINCI_AINTC_
+#define _LINUX_IRQ_DAVINCI_AINTC_
+
+#include <linux/ioport.h>
+
+/**
+ * struct davinci_aintc_config - configuration data for davinci-aintc driver.
+ *
+ * @reg: register range to map
+ * @num_irqs: number of HW interrupts supported by the controller
+ * @prios: an array of size num_irqs containing priority settings for
+ * each interrupt
+ */
+struct davinci_aintc_config {
+ struct resource reg;
+ unsigned int num_irqs;
+ u8 *prios;
+};
+
+void davinci_aintc_init(const struct davinci_aintc_config *config);
+
+#endif /* _LINUX_IRQ_DAVINCI_AINTC_ */
diff --git a/include/linux/irqchip/irq-davinci-cp-intc.h b/include/linux/irqchip/irq-davinci-cp-intc.h
new file mode 100644
index 000000000000..8d71ed5b5a61
--- /dev/null
+++ b/include/linux/irqchip/irq-davinci-cp-intc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#ifndef _LINUX_IRQ_DAVINCI_CP_INTC_
+#define _LINUX_IRQ_DAVINCI_CP_INTC_
+
+#include <linux/ioport.h>
+
+/**
+ * struct davinci_cp_intc_config - configuration data for davinci-cp-intc
+ * driver.
+ *
+ * @reg: register range to map
+ * @num_irqs: number of HW interrupts supported by the controller
+ */
+struct davinci_cp_intc_config {
+ struct resource reg;
+ unsigned int num_irqs;
+};
+
+int davinci_cp_intc_init(const struct davinci_cp_intc_config *config);
+
+#endif /* _LINUX_IRQ_DAVINCI_CP_INTC_ */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dd1e40ddac7d..d6e2ab538ef2 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -28,6 +28,7 @@ struct pt_regs;
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
+ * @tot_count: stats field for non-percpu irqs
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
@@ -65,6 +66,7 @@ struct irq_desc {
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
+ unsigned int tot_count;
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
@@ -171,6 +173,11 @@ static inline int handle_domain_irq(struct irq_domain *domain,
{
return __handle_domain_irq(domain, hwirq, true, regs);
}
+
+#ifdef CONFIG_IRQ_DOMAIN
+int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
+ struct pt_regs *regs);
+#endif
#endif
/* Test to see if a driver has successfully requested an irq */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 35965f41d7be..d2130dc7c0e6 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -265,6 +265,7 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
+extern struct irq_domain *irq_get_default_host(void);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
const struct irq_affinity_desc *affinity);
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index d314150658a4..a61dc075e2ce 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_KASAN_CHECKS_H
#define _LINUX_KASAN_CHECKS_H
-#ifdef CONFIG_KASAN
+#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL)
void kasan_check_read(const volatile void *p, unsigned int size);
void kasan_check_write(const volatile void *p, unsigned int size);
#else
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 8f0e68e250a7..a8868a32098c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -245,8 +245,10 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- void ___might_sleep(const char *file, int line, int preempt_offset);
- void __might_sleep(const char *file, int line, int preempt_offset);
+extern void ___might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_sleep(const char *file, int line, int preempt_offset);
+
/**
* might_sleep - annotation for functions that can sleep
*
@@ -259,6 +261,13 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+/**
+ * cant_sleep - annotation for functions that cannot sleep
+ *
+ * this macro will print a stack trace if it is executed with preemption enabled
+ */
+# define cant_sleep() \
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
@@ -266,6 +275,7 @@ extern int _cond_resched(void);
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
+# define cant_sleep() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 5b36b1287a5a..0cac1207bb00 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -25,6 +25,7 @@ struct seq_file;
struct vm_area_struct;
struct super_block;
struct file_system_type;
+struct poll_table_struct;
struct kernfs_open_node;
struct kernfs_iattrs;
@@ -261,6 +262,9 @@ struct kernfs_ops {
ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -350,6 +354,8 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns);
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
void kernfs_notify(struct kernfs_node *kn);
const void *kernfs_super_ns(struct super_block *sb);
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index bc9af551fc83..e49d1de0614e 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -21,15 +21,6 @@ struct kernel_pkey_query;
struct kernel_pkey_params;
/*
- * key under-construction record
- * - passed to the request_key actor if supplied
- */
-struct key_construction {
- struct key *key; /* key being constructed */
- struct key *authkey;/* authorisation for key being constructed */
-};
-
-/*
* Pre-parsed payload, used by key add, update and instantiate.
*
* This struct will be cleared and data and datalen will be set with the data
@@ -50,8 +41,7 @@ struct key_preparsed_payload {
time64_t expiry; /* Expiry time of key */
} __randomize_layout;
-typedef int (*request_key_actor_t)(struct key_construction *key,
- const char *op, void *aux);
+typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
/*
* Preparsed matching criterion.
@@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
- struct key *instkey);
+ struct key *authkey);
extern int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
- struct key *instkey);
-extern void complete_request_key(struct key_construction *cons, int error);
+ struct key *authkey);
+extern void complete_request_key(struct key *authkey, int error);
static inline int key_negate_and_link(struct key *key,
unsigned timeout,
struct key *keyring,
- struct key *instkey)
+ struct key *authkey)
{
- return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
+ return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
}
extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 161e8164abcf..e48b1e453ff5 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -53,6 +53,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+bool reuse_ksm_page(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
#else /* !CONFIG_KSM */
@@ -86,6 +88,11 @@ static inline void rmap_walk_ksm(struct page *page,
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
}
+static inline bool reuse_ksm_page(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return false;
+}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..2c89e60bc752 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -56,6 +56,7 @@ void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
+bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
@@ -85,7 +86,7 @@ enum {
struct kthread_worker {
unsigned int flags;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct list_head work_list;
struct list_head delayed_work_list;
struct task_struct *task;
@@ -106,7 +107,7 @@ struct kthread_delayed_work {
};
#define KTHREAD_WORKER_INIT(worker) { \
- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
}
@@ -164,9 +165,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
#define kthread_init_delayed_work(dwork, fn) \
do { \
kthread_init_work(&(dwork)->work, (fn)); \
- __init_timer(&(dwork)->timer, \
- kthread_delayed_work_timer_fn, \
- TIMER_IRQSAFE); \
+ timer_setup(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, 0); \
} while (0)
int kthread_worker_fn(void *worker_ptr);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 5263f87e1d2c..78204650fe2a 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -219,6 +219,19 @@ extern int led_set_brightness_sync(struct led_classdev *led_cdev,
extern int led_update_brightness(struct led_classdev *led_cdev);
/**
+ * led_get_default_pattern - return default pattern
+ *
+ * @led_cdev: the LED to get default pattern for
+ * @size: pointer for storing the number of elements in returned array,
+ * modified only if return != NULL
+ *
+ * Return: Allocated array of integers with default pattern from device tree
+ * or NULL. Caller is responsible for kfree().
+ */
+extern u32 *led_get_default_pattern(struct led_classdev *led_cdev,
+ unsigned int *size);
+
+/**
* led_sysfs_disable - disable LED sysfs interface
* @led_cdev: the LED to set
*
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 5440f11b0907..ad609617aeb8 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
enum nvdimm_security_state {
+ NVDIMM_SECURITY_ERROR = -1,
NVDIMM_SECURITY_DISABLED,
NVDIMM_SECURITY_UNLOCKED,
NVDIMM_SECURITY_LOCKED,
@@ -234,7 +235,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
cmd_mask, num_flush, flush_wpq, NULL, NULL);
}
-int nvdimm_security_setup_events(struct nvdimm *nvdimm);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
diff --git a/include/linux/list.h b/include/linux/list.h
index edb7628e46ed..79626b5ab36c 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -207,6 +207,17 @@ static inline void list_bulk_move_tail(struct list_head *head,
}
/**
+ * list_is_first -- tests whether @ list is the first entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_first(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->prev == head;
+}
+
+/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c5335df2372f..79c3873d58ac 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -46,16 +46,22 @@ extern int lock_stat;
#define NR_LOCKDEP_CACHING_CLASSES 2
/*
- * Lock-classes are keyed via unique addresses, by embedding the
- * lockclass-key into the kernel (or module) .data section. (For
- * static locks we use the lock address itself as the key.)
+ * A lockdep key is associated with each lock object. For static locks we use
+ * the lock address itself as the key. Dynamically allocated lock objects can
+ * have a statically or dynamically allocated key. Dynamically allocated lock
+ * keys must be registered before being used and must be unregistered before
+ * the key memory is freed.
*/
struct lockdep_subclass_key {
char __one_byte;
} __attribute__ ((__packed__));
+/* hash_entry is used to keep track of dynamically allocated keys. */
struct lock_class_key {
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+ union {
+ struct hlist_node hash_entry;
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+ };
};
extern struct lock_class_key __lockdep_no_validate__;
@@ -63,7 +69,8 @@ extern struct lock_class_key __lockdep_no_validate__;
#define LOCKSTAT_POINTS 4
/*
- * The lock-class itself:
+ * The lock-class itself. The order of the structure members matters.
+ * reinit_class() zeroes the key member and all subsequent members.
*/
struct lock_class {
/*
@@ -72,10 +79,19 @@ struct lock_class {
struct hlist_node hash_entry;
/*
- * global list of all lock-classes:
+ * Entry in all_lock_classes when in use. Entry in free_lock_classes
+ * when not in use. Instances that are being freed are on one of the
+ * zapped_classes lists.
*/
struct list_head lock_entry;
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
struct lockdep_subclass_key *key;
unsigned int subclass;
unsigned int dep_gen_id;
@@ -87,13 +103,6 @@ struct lock_class {
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
/*
- * These fields represent a directed graph of lock dependencies,
- * to every node we attach a list of "forward" and a list of
- * "backward" graph nodes.
- */
- struct list_head locks_after, locks_before;
-
- /*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
@@ -104,7 +113,7 @@ struct lock_class {
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
#endif
-};
+} __no_randomize_layout;
#ifdef CONFIG_LOCK_STAT
struct lock_time {
@@ -178,6 +187,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
struct lock_list {
struct list_head entry;
struct lock_class *class;
+ struct lock_class *links_to;
struct stack_trace trace;
int distance;
@@ -264,10 +274,14 @@ extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
extern asmlinkage void lockdep_sys_exit(void);
+extern void lockdep_set_selftest_task(struct task_struct *task);
extern void lockdep_off(void);
extern void lockdep_on(void);
+extern void lockdep_register_key(struct lock_class_key *key);
+extern void lockdep_unregister_key(struct lock_class_key *key);
+
/*
* These methods are used by specific locking variants (spinlocks,
* rwlocks, mutexes and rwsems) to pass init/acquire/release events
@@ -394,6 +408,10 @@ static inline void lockdep_on(void)
{
}
+static inline void lockdep_set_selftest_task(struct task_struct *task)
+{
+}
+
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
@@ -425,6 +443,14 @@ static inline void lockdep_on(void)
*/
struct lock_class_key { };
+static inline void lockdep_register_key(struct lock_class_key *key)
+{
+}
+
+static inline void lockdep_unregister_key(struct lock_class_key *key)
+{
+}
+
/*
* The lockdep_map takes no space if lockdep is disabled:
*/
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index d0b5c7a05832..85a301632cf1 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1270,7 +1270,7 @@
* @cred contains the credentials to use.
* @ns contains the user namespace we want the capability in
* @cap contains the capability <include/linux/capability.h>.
- * @audit contains whether to write an audit message or not
+ * @opts contains options for the capable check <include/linux/security.h>
* Return 0 if the capability is granted for @tsk.
* @syslog:
* Check permission before accessing the kernel message ring or changing
@@ -1445,8 +1445,10 @@ union security_list_options {
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
- int (*capable)(const struct cred *cred, struct user_namespace *ns,
- int cap, int audit);
+ int (*capable)(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts);
int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
int (*quota_on)(struct dentry *dentry);
int (*syslog)(int type);
@@ -2026,6 +2028,18 @@ struct security_hook_list {
} __randomize_layout;
/*
+ * Security blob size or offset data.
+ */
+struct lsm_blob_sizes {
+ int lbs_cred;
+ int lbs_file;
+ int lbs_inode;
+ int lbs_ipc;
+ int lbs_msg_msg;
+ int lbs_task;
+};
+
+/*
* Initializing a security_hook_list structure takes
* up a lot of space in a source file. This macro takes
* care of the common case and reduces the amount of
@@ -2040,9 +2054,21 @@ extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
char *lsm);
+#define LSM_FLAG_LEGACY_MAJOR BIT(0)
+#define LSM_FLAG_EXCLUSIVE BIT(1)
+
+enum lsm_order {
+ LSM_ORDER_FIRST = -1, /* This is only for capabilities. */
+ LSM_ORDER_MUTABLE = 0,
+};
+
struct lsm_info {
const char *name; /* Required. */
+ enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */
+ unsigned long flags; /* Optional: flags describing LSM */
+ int *enabled; /* Optional: controlled by CONFIG_LSM */
int (*init)(void); /* Required. */
+ struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */
};
extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
@@ -2082,17 +2108,6 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
#define __lsm_ro_after_init __ro_after_init
#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
-extern int __init security_module_enable(const char *module);
-extern void __init capability_add_hooks(void);
-#ifdef CONFIG_SECURITY_YAMA
-extern void __init yama_add_hooks(void);
-#else
-static inline void __init yama_add_hooks(void) { }
-#endif
-#ifdef CONFIG_SECURITY_LOADPIN
-void __init loadpin_add_hooks(void);
-#else
-static inline void loadpin_add_hooks(void) { };
-#endif
+extern int lsm_inode_alloc(struct inode *inode);
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 1eb6f244588d..73d04743a2bb 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -20,6 +20,8 @@
#define MARVELL_PHY_ID_88E1540 0x01410eb0
#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+#define MARVELL_PHY_ID_88X3310 0x002b09a0
+#define MARVELL_PHY_ID_88E2110 0x002b09b0
/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
* not have a model ID. So the switch driver traps reads to the ID2
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index b6e048e1045f..d7aee90e5da5 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -120,7 +120,7 @@ struct mdev_driver {
extern void *mdev_get_drvdata(struct mdev_device *mdev);
extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
-extern uuid_le mdev_uuid(struct mdev_device *mdev);
+extern const guid_t *mdev_uuid(struct mdev_device *mdev);
extern struct bus_type mdev_bus_type;
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index bfa7114167d7..3e99ae3ed87f 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -261,6 +261,50 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
return reg;
}
+/**
+ * linkmode_adv_to_mii_10gbt_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the C45
+ * 10GBASE-T AN CONTROL (7.32) register.
+ */
+static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV2_5G;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV5G;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+ return result;
+}
+
+/**
+ * mii_10gbt_stat_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @adv: value of the C45 10GBASE-T AN STATUS register
+ *
+ * A small helper function that translates C45 10GBASE-T AN STATUS register bits
+ * to linkmode advertisement settings. Other bits in advertising aren't changed.
+ */
+static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP2_5G);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP5G);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 7fde40e17c8b..03b6ba2a63f8 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -55,6 +55,8 @@ struct mei_cl_device {
void *priv_data;
};
+#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
+
struct mei_cl_driver {
struct device_driver driver;
const char *name;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 64c41cf45590..859b55b66db2 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -29,9 +29,6 @@ extern unsigned long max_pfn;
*/
extern unsigned long long max_possible_pfn;
-#define INIT_MEMBLOCK_REGIONS 128
-#define INIT_PHYSMEM_REGIONS 4
-
/**
* enum memblock_flags - definition of memory region attributes
* @MEMBLOCK_NONE: no special request
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 83ae11cbd12c..1f3d880b7ca1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -429,6 +429,11 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
+{
+ return mem_cgroup_from_css(seq_css(m));
+}
+
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
struct mem_cgroup_per_node *mz;
@@ -937,6 +942,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
+{
+ return NULL;
+}
+
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
return NULL;
@@ -1273,12 +1283,12 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
void memcg_kmem_put_cache(struct kmem_cache *cachep);
-int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
#ifdef CONFIG_MEMCG_KMEM
-int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
-void memcg_kmem_uncharge(struct page *page, int order);
+int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
+void __memcg_kmem_uncharge(struct page *page, int order);
+int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
+ struct mem_cgroup *memcg);
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1300,6 +1310,26 @@ static inline bool memcg_kmem_enabled(void)
return static_branch_unlikely(&memcg_kmem_enabled_key);
}
+static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+{
+ if (memcg_kmem_enabled())
+ return __memcg_kmem_charge(page, gfp, order);
+ return 0;
+}
+
+static inline void memcg_kmem_uncharge(struct page *page, int order)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_uncharge(page, order);
+}
+
+static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
+ int order, struct mem_cgroup *memcg)
+{
+ if (memcg_kmem_enabled())
+ return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ return 0;
+}
/*
* helper for accessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function
@@ -1325,6 +1355,15 @@ static inline void memcg_kmem_uncharge(struct page *page, int order)
{
}
+static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+{
+ return 0;
+}
+
+static inline void __memcg_kmem_uncharge(struct page *page, int order)
+{
+}
+
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 07da5c6c5ba0..52869d6d38b3 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -21,14 +21,16 @@ struct vmem_altmap;
* walkers which rely on the fully initialized page->flags and others
* should use this rather than pfn_valid && pfn_to_page
*/
-#define pfn_to_online_page(pfn) \
-({ \
- struct page *___page = NULL; \
- unsigned long ___nr = pfn_to_section_nr(pfn); \
- \
- if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
- ___page = pfn_to_page(pfn); \
- ___page; \
+#define pfn_to_online_page(pfn) \
+({ \
+ struct page *___page = NULL; \
+ unsigned long ___pfn = pfn; \
+ unsigned long ___nr = pfn_to_section_nr(___pfn); \
+ \
+ if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
+ pfn_valid_within(___pfn)) \
+ ___page = pfn_to_page(___pfn); \
+ ___page; \
})
/*
@@ -87,7 +89,7 @@ extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
unsigned long *valid_start, unsigned long *valid_end);
extern void __offline_isolated_pages(unsigned long, unsigned long);
-typedef void (*online_page_callback_t)(struct page *page);
+typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
extern int set_online_page_callback(online_page_callback_t callback);
extern int restore_online_page_callback(online_page_callback_t callback);
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
new file mode 100644
index 000000000000..ed37dc40e82a
--- /dev/null
+++ b/include/linux/mfd/bcm2835-pm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef BCM2835_MFD_PM_H
+#define BCM2835_MFD_PM_H
+
+#include <linux/regmap.h>
+
+struct bcm2835_pm {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *asb;
+};
+
+#endif /* BCM2835_MFD_PM_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 9a9631f0559e..fc91082d4c35 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2791,6 +2791,100 @@ struct ec_response_battery_vendor_param {
} __packed;
/*****************************************************************************/
+/* Commands for I2S recording on audio codec. */
+
+#define EC_CMD_CODEC_I2S 0x00BC
+
+enum ec_codec_i2s_subcmd {
+ EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
+ EC_CODEC_SET_GAIN = 0x1,
+ EC_CODEC_GET_GAIN = 0x2,
+ EC_CODEC_I2S_ENABLE = 0x3,
+ EC_CODEC_I2S_SET_CONFIG = 0x4,
+ EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
+ EC_CODEC_I2S_SET_BCLK = 0x6,
+};
+
+enum ec_sample_depth_value {
+ EC_CODEC_SAMPLE_DEPTH_16 = 0,
+ EC_CODEC_SAMPLE_DEPTH_24 = 1,
+};
+
+enum ec_i2s_config {
+ EC_DAI_FMT_I2S = 0,
+ EC_DAI_FMT_RIGHT_J = 1,
+ EC_DAI_FMT_LEFT_J = 2,
+ EC_DAI_FMT_PCM_A = 3,
+ EC_DAI_FMT_PCM_B = 4,
+ EC_DAI_FMT_PCM_TDM = 5,
+};
+
+struct ec_param_codec_i2s {
+ /*
+ * enum ec_codec_i2s_subcmd
+ */
+ uint8_t cmd;
+ union {
+ /*
+ * EC_CODEC_SET_SAMPLE_DEPTH
+ * Value should be one of ec_sample_depth_value.
+ */
+ uint8_t depth;
+
+ /*
+ * EC_CODEC_SET_GAIN
+ * Value should be 0~43 for both channels.
+ */
+ struct ec_param_codec_i2s_set_gain {
+ uint8_t left;
+ uint8_t right;
+ } __packed gain;
+
+ /*
+ * EC_CODEC_I2S_ENABLE
+ * 1 to enable, 0 to disable.
+ */
+ uint8_t i2s_enable;
+
+ /*
+ * EC_CODEC_I2S_SET_COFNIG
+ * Value should be one of ec_i2s_config.
+ */
+ uint8_t i2s_config;
+
+ /*
+ * EC_CODEC_I2S_SET_TDM_CONFIG
+ * Value should be one of ec_i2s_config.
+ */
+ struct ec_param_codec_i2s_tdm {
+ /*
+ * 0 to 496
+ */
+ int16_t ch0_delay;
+ /*
+ * -1 to 496
+ */
+ int16_t ch1_delay;
+ uint8_t adjacent_to_ch0;
+ uint8_t adjacent_to_ch1;
+ } __packed tdm_param;
+
+ /*
+ * EC_CODEC_I2S_SET_BCLK
+ */
+ uint32_t bclk;
+ };
+} __packed;
+
+/*
+ * For subcommand EC_CODEC_GET_GAIN.
+ */
+struct ec_response_codec_gain {
+ uint8_t left;
+ uint8_t right;
+} __packed;
+
+/*****************************************************************************/
/* System commands */
/*
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
index ab16ad283def..2083fa20821d 100644
--- a/include/linux/mfd/ingenic-tcu.h
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -41,7 +41,7 @@
#define TCU_TCSR_PRESCALE_LSB 3
#define TCU_TCSR_PRESCALE_MASK 0x38
-#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */
+#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */
#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index fe69c0f4398f..4d5d51a9c8a6 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -15,6 +15,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/madera/pdata.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -37,6 +38,8 @@ enum madera_type {
#define MADERA_MAX_MICBIAS 4
+#define MADERA_MAX_HP_OUTPUT 3
+
/* Notifier events */
#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
#define MADERA_NOTIFY_HPDET 0x2
@@ -183,6 +186,10 @@ struct madera {
unsigned int num_childbias[MADERA_MAX_MICBIAS];
struct snd_soc_dapm_context *dapm;
+ struct mutex dapm_ptr_lock;
+ unsigned int hp_ena;
+ bool out_clamp[MADERA_MAX_HP_OUTPUT];
+ bool out_shorted[MADERA_MAX_HP_OUTPUT];
struct blocking_notifier_head notifier;
};
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index b9a53e013bff..483168403ae5 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -78,6 +78,8 @@
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
+#define STEPCONFIG_RFP(val) ((val) << 12)
+#define STEPCONFIG_RFP_VREFP (0x3 << 12)
#define STEPCONFIG_INM_MASK (0xF << 15)
#define STEPCONFIG_INM(val) ((val) << 15)
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
@@ -86,6 +88,8 @@
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
+#define STEPCONFIG_RFM(val) ((val) << 23)
+#define STEPCONFIG_RFM_VREFN (0x3 << 23)
/* Delay register */
#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e2687a30e5a1..739b7bf37eaa 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -79,7 +79,7 @@
/* Some controllers have a CBSY bit */
#define TMIO_MMC_HAVE_CBSY BIT(11)
-/* Some controllers that support HS400 use use 4 taps while others use 8. */
+/* Some controllers that support HS400 use 4 taps while others use 8. */
#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index c204d9a79436..45cdcd0fee53 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -208,6 +208,7 @@ enum tps65218_regulator_id {
/* LDOs */
TPS65218_LDO_1,
/* LS's */
+ TPS65218_LS_2,
TPS65218_LS_3,
};
@@ -218,7 +219,7 @@ enum tps65218_regulator_id {
/* Number of LDO voltage regulators available */
#define TPS65218_NUM_LDO 1
/* Number of total LS current regulators available */
-#define TPS65218_NUM_LS 1
+#define TPS65218_NUM_LS 2
/* Number of total regulators available */
#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \
+ TPS65218_NUM_LS)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8c4a820bd4c1..f93a5598b942 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -67,7 +67,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
-#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
@@ -342,6 +342,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe,
+
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
@@ -591,7 +593,7 @@ struct mlx5_eqe_cmd {
};
struct mlx5_eqe_page_req {
- u8 rsvd0[2];
+ __be16 ec_function;
__be16 func_id;
__be32 num_pages;
__be32 rsvd1[5];
@@ -1201,6 +1203,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
+#define MLX5_CAP_ODP_MAX(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
+
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 54299251d40d..5ffb5df1a2c2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -195,6 +195,7 @@ struct mlx5_rsc_debug {
enum mlx5_dev_event {
MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
+ MLX5_DEV_EVENT_PORT_AFFINITY = 129,
};
enum mlx5_port_status {
@@ -522,6 +523,7 @@ struct mlx5_priv {
atomic_t reg_pages;
struct list_head free_list;
int vfs_pages;
+ int peer_pf_pages;
struct mlx5_core_health health;
@@ -652,6 +654,7 @@ struct mlx5_core_dev {
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
+ u8 embedded_cpu;
} caps;
u64 sys_image_guid;
phys_addr_t iseg_base;
@@ -850,11 +853,30 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+struct mlx5_async_ctx {
+ struct mlx5_core_dev *dev;
+ atomic_t num_inflight;
+ struct wait_queue_head wait;
+};
+
+struct mlx5_async_work;
+
+typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
+
+struct mlx5_async_work {
+ struct mlx5_async_ctx *ctx;
+ mlx5_async_cbk_t user_callback;
+};
+
+void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
+ struct mlx5_async_ctx *ctx);
+void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
+int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+ void *out, int out_size, mlx5_async_cbk_t callback,
+ struct mlx5_async_work *work);
+
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
-int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size, mlx5_cmd_cbk_t callback,
- void *context);
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
@@ -885,9 +907,10 @@ void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
- u32 *in, int inlen,
- u32 *out, int outlen,
- mlx5_cmd_cbk_t callback, void *context);
+ struct mlx5_async_ctx *async_ctx, u32 *in,
+ int inlen, u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen);
@@ -897,14 +920,12 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
- u16 opmod, u8 port);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages);
+ s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@@ -1021,6 +1042,7 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
@@ -1058,11 +1080,29 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
}
-#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
-#define MLX5_VPORT_MANAGER(mdev) \
- (MLX5_CAP_GEN(mdev, vport_group_manager) && \
- (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
- mlx5_core_is_pf(mdev))
+static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
+{
+ return dev->caps.embedded_cpu;
+}
+
+static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev)
+{
+ return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
+}
+
+static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
+}
+
+#define MLX5_HOST_PF_MAX_VFS (127u)
+static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_ecpf_esw_manager(dev))
+ return MLX5_HOST_PF_MAX_VFS;
+ else
+ return pci_sriov_get_totalvfs(dev->pdev);
+}
static inline int mlx5_get_gid_table_len(u16 param)
{
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index fab5121ffb8f..96d8435421de 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -22,6 +22,12 @@ enum {
NUM_REP_TYPES,
};
+enum {
+ REP_UNREGISTERED,
+ REP_REGISTERED,
+ REP_LOADED,
+};
+
struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_if {
int (*load)(struct mlx5_core_dev *dev,
@@ -29,7 +35,7 @@ struct mlx5_eswitch_rep_if {
void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
void *priv;
- bool valid;
+ u8 state;
};
struct mlx5_eswitch_rep {
@@ -40,13 +46,10 @@ struct mlx5_eswitch_rep {
u32 vlan_refcount;
};
-void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- int vport_index,
- struct mlx5_eswitch_rep_if *rep_if,
- u8 rep_type);
-void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index,
- u8 rep_type);
+void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep_if *rep_if,
+ u8 rep_type);
+void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
int vport,
u8 rep_type);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 35fe5217b244..3b83288749c6 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -72,6 +72,7 @@ enum {
enum {
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
};
@@ -141,6 +142,7 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
+ MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
@@ -629,7 +631,8 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x17];
+ u8 reserved_at_5[0x16];
+ u8 ecpf_vport_exists[0x1];
u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
@@ -831,7 +834,9 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
- u8 reserved_at_e0[0x720];
+ struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
+
+ u8 reserved_at_100[0x700];
};
struct mlx5_ifc_calc_op {
@@ -4438,7 +4443,8 @@ struct mlx5_ifc_query_pages_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 num_pages[0x20];
@@ -4457,7 +4463,8 @@ struct mlx5_ifc_query_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5877,7 +5884,8 @@ struct mlx5_ifc_manage_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 input_num_entries[0x20];
@@ -6055,7 +6063,8 @@ struct mlx5_ifc_enable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -6099,7 +6108,8 @@ struct mlx5_ifc_disable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -7817,21 +7827,23 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
- u8 reserved_at_24[0x3c];
+ u8 reserved_at_24[0x1c];
+
+ u8 ext_eth_proto_capability[0x20];
u8 eth_proto_capability[0x20];
u8 ib_link_width_capability[0x10];
u8 ib_proto_capability[0x10];
- u8 reserved_at_a0[0x20];
+ u8 ext_eth_proto_admin[0x20];
u8 eth_proto_admin[0x20];
u8 ib_link_width_admin[0x10];
u8 ib_proto_admin[0x10];
- u8 reserved_at_100[0x20];
+ u8 ext_eth_proto_oper[0x20];
u8 eth_proto_oper[0x20];
@@ -8280,7 +8292,9 @@ struct mlx5_ifc_mpegc_reg_bits {
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x6d];
u8 rx_icrc_encapsulated_counter[0x1];
- u8 reserved_at_6e[0x8];
+ u8 reserved_at_6e[0x4];
+ u8 ptys_extended_ethernet[0x1];
+ u8 reserved_at_73[0x3];
u8 pfcc_mask[0x1];
u8 reserved_at_77[0x3];
u8 per_lane_error_counters[0x1];
@@ -8459,9 +8473,17 @@ struct mlx5_ifc_pamp_reg_bits {
struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_at_10[0x2e];
+ u8 reserved_at_10[0x10];
+ u8 entropy_force_cap[0x1];
+ u8 entropy_calc_cap[0x1];
+ u8 entropy_gre_calc_cap[0x1];
+ u8 reserved_at_23[0x1b];
u8 fcs_cap[0x1];
- u8 reserved_at_3f[0x1f];
+ u8 reserved_at_3f[0x1];
+ u8 entropy_force[0x1];
+ u8 entropy_calc[0x1];
+ u8 entropy_gre_calc[0x1];
+ u8 reserved_at_43[0x1b];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
@@ -8746,7 +8768,8 @@ struct mlx5_ifc_initial_seg_bits {
u8 initializing[0x1];
u8 reserved_at_fe1[0x4];
u8 nic_interface_supported[0x3];
- u8 reserved_at_fe8[0x18];
+ u8 embedded_cpu[0x1];
+ u8 reserved_at_fe9[0x17];
struct mlx5_ifc_health_buffer_bits health_buffer;
@@ -9513,4 +9536,44 @@ struct mlx5_ifc_mtrc_ctrl_bits {
u8 reserved_at_80[0x180];
};
+struct mlx5_ifc_host_params_context_bits {
+ u8 host_number[0x8];
+ u8 reserved_at_8[0x8];
+ u8 host_num_of_vfs[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 host_pci_bus[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 host_pci_device[0x10];
+
+ u8 reserved_at_60[0x10];
+ u8 host_pci_function[0x10];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_query_host_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_host_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_host_params_context_bits host_params_context;
+
+ u8 reserved_at_280[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index bf4bc01ffb0c..64e78394fc9c 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -92,6 +92,22 @@ enum mlx5e_link_mode {
MLX5E_LINK_MODES_NUMBER,
};
+enum mlx5e_ext_link_mode {
+ MLX5E_SGMII_100M = 0,
+ MLX5E_1000BASE_X_SGMII = 1,
+ MLX5E_5GBASE_R = 3,
+ MLX5E_10GBASE_XFI_XAUI_1 = 4,
+ MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5,
+ MLX5E_25GAUI_1_25GBASE_CR_KR = 6,
+ MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7,
+ MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
+ MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
+ MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
+ MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
+ MLX5E_400GAUI_8 = 15,
+ MLX5E_EXT_LINK_MODES_NUMBER,
+};
+
enum mlx5e_connector_type {
MLX5E_PORT_UNKNOWN = 0,
MLX5E_PORT_NONE = 1,
@@ -106,31 +122,23 @@ enum mlx5e_connector_type {
};
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
+ (ext ? MLX5_GET(reg, out, ext_##field) : \
+ MLX5_GET(reg, out, field))
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
- u32 *proto_cap, int proto_mask);
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
- u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, u8 local_port);
-int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
- u32 *proto_oper, u8 local_port);
-int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
- u32 proto_admin, int proto_mask);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
- u8 *an_status,
- u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
@@ -174,6 +182,8 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 9c694808c212..0eef548b9946 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -36,15 +36,38 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
+#define MLX5_VPORT_PF_PLACEHOLDER (1u)
+#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
+#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
+
+#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
+ MLX5_VPORT_UPLINK_PLACEHOLDER + \
+ MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
+
+#define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS(mdev) + \
+ mlx5_core_max_vfs(mdev))
+
+#define MLX5_VPORT_MANAGER(mdev) \
+ (MLX5_CAP_GEN(mdev, vport_group_manager) && \
+ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
+ mlx5_core_is_pf(mdev))
+
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
};
+enum {
+ MLX5_VPORT_PF = 0x0,
+ MLX5_VPORT_FIRST_VF = 0x1,
+ MLX5_VPORT_ECPF = 0xfffe,
+ MLX5_VPORT_UPLINK = 0xffff
+};
+
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport, u8 state);
+ u16 vport, u8 other_vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
@@ -60,7 +83,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
- u32 vport, u64 node_guid);
+ u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
@@ -78,7 +101,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size);
@@ -87,7 +110,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
- u32 vport,
+ u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all);
@@ -96,7 +119,7 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_mc,
int promisc_all);
int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
u16 vlans[],
int *size);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
@@ -106,7 +129,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
- u64 *rx_discard_vport_down,
+ u8 other_vport, u64 *rx_discard_vport_down,
u64 *tx_discard_vport_down);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
int vf, u8 port_num, void *out,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80bb6408fe73..20ec56f8e2bb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1536,7 +1536,8 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
-#ifdef CONFIG_FS_DAX
+
+#if defined(CONFIG_FS_DAX) || defined(CONFIG_CMA)
long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2c471a2c43fa..ab9b48420200 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -80,7 +80,7 @@ struct page {
struct { /* Page cache and anonymous pages */
/**
* @lru: Pageout list, eg. active_list protected by
- * zone_lru_lock. Sometimes used as a generic list
+ * pgdat->lru_lock. Sometimes used as a generic list
* by the page owner.
*/
struct list_head lru;
@@ -95,6 +95,13 @@ struct page {
*/
unsigned long private;
};
+ struct { /* page_pool used by netstack */
+ /**
+ * @dma_addr: might require a 64-bit value even on
+ * 32-bit architectures.
+ */
+ dma_addr_t dma_addr;
+ };
struct { /* slab, slob and slub */
union {
struct list_head slab_list; /* uses lru */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index de7377815b6b..19566ab9decb 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -133,6 +133,8 @@ struct mmc_ext_csd {
struct sd_scr {
unsigned char sda_vsn;
unsigned char sda_spec3;
+ unsigned char sda_spec4;
+ unsigned char sda_specx;
unsigned char bus_widths;
#define SD_SCR_BUS_WIDTH_1 (1<<0)
#define SD_SCR_BUS_WIDTH_4 (1<<2)
@@ -277,6 +279,7 @@ struct mmc_card {
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
unsigned int eg_boundary; /* don't cross erase-group boundaries */
+ unsigned int erase_arg; /* erase / trim / discard */
u8 erased_byte; /* value of erased bytes */
u32 raw_cid[4]; /* raw card CID */
@@ -308,6 +311,7 @@ struct mmc_card {
unsigned int nr_parts;
unsigned int bouncesz; /* Bounce buffer size */
+ struct workqueue_struct *complete_wq; /* Private workqueue */
};
static inline bool mmc_large_sector(struct mmc_card *card)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4d35ff36ceff..43d0f0c496f6 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -478,6 +478,11 @@ static inline void *mmc_priv(struct mmc_host *host)
return (void *)host->private;
}
+static inline struct mmc_host *mmc_from_priv(void *priv)
+{
+ return container_of(priv, struct mmc_host, private);
+}
+
#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
#define mmc_dev(x) ((x)->parent)
@@ -502,17 +507,11 @@ void sdio_run_irqs(struct mmc_host *host);
void sdio_signal_irq(struct mmc_host *host);
#ifdef CONFIG_REGULATOR
-int mmc_regulator_get_ocrmask(struct regulator *supply);
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit);
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
#else
-static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
-{
- return 0;
-}
-
static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
@@ -527,7 +526,6 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
}
#endif
-u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
int mmc_regulator_get_supply(struct mmc_host *mmc);
static inline int mmc_card_is_removable(struct mmc_host *host)
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 1ebcf9ba1256..ec94a5aa02bb 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -91,4 +91,10 @@
#define SD_SWITCH_ACCESS_DEF 0
#define SD_SWITCH_ACCESS_HS 1
+/*
+ * Erase/discard
+ */
+#define SD_ERASE_ARG 0x00000000
+#define SD_DISCARD_ARG 0x00000001
+
#endif /* LINUX_MMC_SD_H */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index feebd7aa6f5c..9fd3ce64a885 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -22,7 +22,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce, bool *gpio_invert);
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx, bool override_active_level,
+ unsigned int idx,
unsigned int debounce, bool *gpio_invert);
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cc4a507d7ca4..fba7741533be 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -480,6 +480,8 @@ struct zone {
unsigned long compact_cached_free_pfn;
/* pfn where async and sync compaction migration scanner should start */
unsigned long compact_cached_migrate_pfn[2];
+ unsigned long compact_init_migrate_pfn;
+ unsigned long compact_init_free_pfn;
#endif
#ifdef CONFIG_COMPACTION
@@ -520,6 +522,12 @@ enum pgdat_flags {
PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
};
+enum zone_flags {
+ ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
+ * Cleared when kswapd is woken.
+ */
+};
+
static inline unsigned long zone_managed_pages(struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
@@ -722,10 +730,6 @@ typedef struct pglist_data {
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
-static inline spinlock_t *zone_lru_lock(struct zone *zone)
-{
- return &zone->zone_pgdat->lru_lock;
-}
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
{
@@ -1293,7 +1297,7 @@ void memory_present(int nid, unsigned long start, unsigned long end);
/*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
+ * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
* pfn_valid_within() should be used in this case; we optimise this away
* when we have no holes within a MAX_ORDER_NR_PAGES block.
*/
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index f9bd2f34b99f..14eaeeb46f41 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -779,4 +779,13 @@ struct typec_device_id {
kernel_ulong_t driver_data;
};
+/**
+ * struct tee_client_device_id - tee based device identifier
+ * @uuid: For TEE based client devices we use the device uuid as
+ * the identifier.
+ */
+struct tee_client_device_id {
+ uuid_t uuid;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 9a21fe3509af..f5bc4c046461 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -129,13 +129,13 @@ extern void cleanup_module(void);
#define module_init(initfn) \
static inline initcall_t __maybe_unused __inittest(void) \
{ return initfn; } \
- int init_module(void) __attribute__((alias(#initfn)));
+ int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
static inline exitcall_t __maybe_unused __exittest(void) \
{ return exitfn; } \
- void cleanup_module(void) __attribute__((alias(#exitfn)));
+ void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
#endif
@@ -828,7 +828,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
extern bool retpoline_module_ok(bool has_retpoline);
#else
static inline bool retpoline_module_ok(bool has_retpoline)
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 33e240acdc6d..b7445a44a814 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -16,13 +16,12 @@
#ifndef __LINUX_MTD_RAWNAND_H
#define __LINUX_MTD_RAWNAND_H
-#include <linux/wait.h>
-#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
#include <linux/mtd/jedec.h>
#include <linux/mtd/onfi.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/types.h>
@@ -897,25 +896,17 @@ struct nand_controller_ops {
/**
* struct nand_controller - Structure used to describe a NAND controller
*
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
+ * @lock: lock used to serialize accesses to the NAND controller
* @ops: NAND controller operations.
*/
struct nand_controller {
- spinlock_t lock;
- struct nand_chip *active;
- wait_queue_head_t wq;
+ struct mutex lock;
const struct nand_controller_ops *ops;
};
static inline void nand_controller_init(struct nand_controller *nfc)
{
- nfc->active = NULL;
- spin_lock_init(&nfc->lock);
- init_waitqueue_head(&nfc->wq);
+ mutex_init(&nfc->lock);
}
/**
@@ -936,7 +927,6 @@ static inline void nand_controller_init(struct nand_controller *nfc)
* @waitfunc: hardware specific function for wait on ready.
* @block_bad: check if a block is bad, using OOB markers
* @block_markbad: mark a block bad
- * @erase: erase function
* @set_features: set the NAND chip features
* @get_features: get the NAND chip features
* @chip_delay: chip dependent delay for transferring data from array to read
@@ -962,7 +952,6 @@ struct nand_legacy {
int (*waitfunc)(struct nand_chip *chip);
int (*block_bad)(struct nand_chip *chip, loff_t ofs);
int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
- int (*erase)(struct nand_chip *chip, int page);
int (*set_features)(struct nand_chip *chip, int feature_addr,
u8 *subfeature_para);
int (*get_features)(struct nand_chip *chip, int feature_addr,
@@ -983,7 +972,6 @@ struct nand_legacy {
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @state: [INTERN] the current state of the NAND device
* @oob_poi: "poison value buffer," used for laying out OOB data
* before writing
* @page_shift: [INTERN] number of address bits in a page (column
@@ -1034,6 +1022,9 @@ struct nand_legacy {
* cur_cs < numchips. NAND Controller drivers should not
* modify this value, but they're allowed to read it.
* @read_retries: [INTERN] the number of read retry modes supported
+ * @lock: lock protecting the suspended field. Also used to
+ * serialize accesses to the NAND device.
+ * @suspended: set to 1 when the device is suspended, 0 when it's not.
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -1088,7 +1079,8 @@ struct nand_chip {
int read_retries;
- flstate_t state;
+ struct mutex lock;
+ unsigned int suspended : 1;
uint8_t *oob_poi;
struct nand_controller *controller;
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index fa2d89e38e40..b3d360b0ee3d 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -46,9 +46,13 @@
#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */
+#define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */
#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */
#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */
+#define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */
+#define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */
#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */
#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */
@@ -69,9 +73,13 @@
#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */
+#define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */
#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */
#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */
+#define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */
+#define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */
#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */
#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
@@ -458,7 +466,7 @@ struct spi_nor_hwcaps {
/*
*(Fast) Read capabilities.
* MUST be ordered by priority: the higher bit position, the higher priority.
- * As a matter of performances, it is relevant to use Octo SPI protocols first,
+ * As a matter of performances, it is relevant to use Octal SPI protocols first,
* then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
* (Slow) Read.
*/
@@ -479,7 +487,7 @@ struct spi_nor_hwcaps {
#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
-#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11)
#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
@@ -488,7 +496,7 @@ struct spi_nor_hwcaps {
/*
* Page Program capabilities.
* MUST be ordered by priority: the higher bit position, the higher priority.
- * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the
+ * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the
* legacy SPI 1-1-1 protocol.
* Note that Dual Page Programs are not supported because there is no existing
* JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
@@ -502,7 +510,7 @@ struct spi_nor_hwcaps {
#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
-#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20)
+#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20)
#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2b2a6dce1630..4c76fe2c8488 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -11,6 +11,8 @@
#define _LINUX_NETDEV_FEATURES_H
#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
typedef u64 netdev_features_t;
@@ -154,8 +156,26 @@ enum {
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
-#define for_each_netdev_feature(mask_addr, bit) \
- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+ /* like BITMAP_LAST_WORD_MASK() for u64
+ * this sets the most significant 64 - start to 0.
+ */
+ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+ return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit) \
+ for ((bit) = find_next_netdev_feature((mask_addr), \
+ NETDEV_FEATURE_COUNT); \
+ (bit) >= 0; \
+ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1377d085ef99..26f69cf763f4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -274,6 +274,7 @@ struct header_ops {
const struct net_device *dev,
const unsigned char *haddr);
bool (*validate)(const char *ll_header, unsigned int len);
+ __be16 (*parse_protocol)(const struct sk_buff *skb);
};
/* These flag bits are private to the generic network queueing
@@ -630,6 +631,7 @@ struct netdev_queue {
} ____cacheline_aligned_in_smp;
extern int sysctl_fb_tunnels_only_for_init_net;
+extern int sysctl_devconf_inherit_init_net;
static inline bool net_has_fallback_tunnels(const struct net *net)
{
@@ -867,7 +869,6 @@ enum bpf_netdev_command {
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
- XDP_QUERY_XSK_UMEM,
XDP_SETUP_XSK_UMEM,
};
@@ -894,10 +895,10 @@ struct netdev_bpf {
struct {
struct bpf_offloaded_map *offmap;
};
- /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
+ /* XDP_SETUP_XSK_UMEM */
struct {
- struct xdp_umem *umem; /* out for query*/
- u16 queue_id; /* in for query */
+ struct xdp_umem *umem;
+ u16 queue_id;
} xsk;
};
};
@@ -940,6 +941,8 @@ struct dev_ifalias {
char ifalias[];
};
+struct devlink;
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -1152,7 +1155,8 @@ struct dev_ifalias {
*
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 vid, u16 flags)
+ * const unsigned char *addr, u16 vid, u16 flags,
+ * struct netlink_ext_ack *extack);
* Adds an FDB entry to dev for addr.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
@@ -1186,6 +1190,10 @@ struct dev_ifalias {
* not implement this, it is assumed that the hw is not able to have
* multiple net devices on single physical port.
*
+ * int (*ndo_get_port_parent_id)(struct net_device *dev,
+ * struct netdev_phys_item_id *ppid)
+ * Called to get the parent ID of the physical port of this device.
+ *
* void (*ndo_udp_tunnel_add)(struct net_device *dev,
* struct udp_tunnel_info *ti);
* Called by UDP tunnel to notify a driver about the UDP port and socket
@@ -1243,6 +1251,10 @@ struct dev_ifalias {
* that got dropped are freed/returned via xdp_return_frame().
* Returns negative number, means general error invoking ndo, meaning
* no frames were xmit'ed and core-caller will free all frames.
+ * struct devlink *(*ndo_get_devlink)(struct net_device *dev);
+ * Get devlink instance associated with a given netdev.
+ * Called with a reference on the netdevice and devlink locks only,
+ * rtnl_lock is not held.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1376,7 +1388,8 @@ struct net_device_ops {
struct net_device *dev,
const unsigned char *addr,
u16 vid,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
@@ -1409,6 +1422,8 @@ struct net_device_ops {
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+ int (*ndo_get_port_parent_id)(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
void (*ndo_udp_tunnel_add)(struct net_device *dev,
@@ -1438,6 +1453,7 @@ struct net_device_ops {
u32 flags);
int (*ndo_xsk_async_xmit)(struct net_device *dev,
u32 queue_id);
+ struct devlink * (*ndo_get_devlink)(struct net_device *dev);
};
/**
@@ -1483,6 +1499,7 @@ struct net_device_ops {
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
+ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1514,6 +1531,7 @@ enum netdev_priv_flags {
IFF_NO_RX_HANDLER = 1<<26,
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
+ IFF_L3MDEV_RX_HANDLER = 1<<29,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1544,6 +1562,7 @@ enum netdev_priv_flags {
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
+#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
/**
* struct net_device - The DEVICE structure.
@@ -1824,9 +1843,6 @@ struct net_device {
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
-#ifdef CONFIG_NET_SWITCHDEV
- const struct switchdev_ops *switchdev_ops;
-#endif
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
#endif
@@ -2928,6 +2944,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->parse_protocol)
+ return 0;
+ return dev->header_ops->parse_protocol(skb);
+}
+
/* ll_header must have at least hard_header_len allocated */
static inline bool dev_validate_header(const struct net_device *dev,
char *ll_header, int len)
@@ -3648,7 +3673,11 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
+int dev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid, bool recurse);
+bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
+int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3858,7 +3887,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
- return (1 << debug_value) - 1;
+ return (1U << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
@@ -4549,6 +4578,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
+static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
+}
+
static inline bool netif_is_l3_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_MASTER;
@@ -4660,22 +4694,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
return " (unknown)";
}
-__printf(3, 4)
+__printf(3, 4) __cold
void netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void netdev_emerg(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void netdev_alert(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void netdev_crit(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void netdev_err(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void netdev_warn(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void netdev_notice(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
+__printf(2, 3) __cold
void netdev_info(const struct net_device *dev, const char *format, ...);
#define netdev_level_once(level, dev, fmt, ...) \
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 6989e2e4eabf..25f9a770fb84 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -19,27 +19,18 @@ struct nf_conn;
struct nf_ct_gre_keymap {
struct list_head list;
struct nf_conntrack_tuple tuple;
-};
-
-enum grep_conntrack {
- GRE_CT_UNREPLIED,
- GRE_CT_REPLIED,
- GRE_CT_MAX
-};
-
-struct netns_proto_gre {
- struct nf_proto_net nf;
- rwlock_t keymap_lock;
- struct list_head keymap_list;
- unsigned int gre_timeouts[GRE_CT_MAX];
+ struct rcu_head rcu;
};
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
+void nf_ct_gre_keymap_flush(struct net *net);
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
+bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+ struct net *net, struct nf_conntrack_tuple *tuple);
#endif /* __KERNEL__ */
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 9077b3ebea08..bf384b3eedb8 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -289,9 +289,9 @@ bool xt_find_jump_offset(const unsigned int *offsets,
int xt_check_proc_name(const char *name, unsigned int size);
-int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u16 proto,
bool inv_proto);
-int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u16 proto,
bool inv_proto);
int xt_match_to_user(const struct xt_entry_match *m,
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 95ab5cc64422..082e2c41b7ff 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -25,7 +25,6 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
-int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
#else
static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol)
@@ -37,11 +36,6 @@ static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
{
return -EOPNOTSUPP;
}
-static inline int nf_ip_reroute(struct sk_buff *skb,
- const struct nf_queue_entry *entry)
-{
- return -EOPNOTSUPP;
-}
#endif /* CONFIG_INET */
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index c0dc4dd78887..471e9467105b 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -25,23 +25,24 @@ struct nf_queue_entry;
* if IPv6 is a module.
*/
struct nf_ipv6_ops {
+#if IS_MODULE(CONFIG_IPV6)
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
+ int (*route_me_harder)(struct net *net, struct sk_buff *skb);
+ int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int srcprefs,
+ struct in6_addr *saddr);
+ int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict);
+#endif
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
- int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
- bool strict);
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
};
#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
+#include <net/addrconf.h>
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
@@ -49,6 +50,49 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
return rcu_dereference(nf_ipv6_ops);
}
+static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict)
+{
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (!v6_ops)
+ return 1;
+
+ return v6_ops->chk_addr(net, addr, dev, strict);
+#else
+ return ipv6_chk_addr(net, addr, dev, strict);
+#endif
+}
+
+int __nf_ip6_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict);
+
+static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
+{
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+
+ if (v6ops)
+ return v6ops->route(net, dst, fl, strict);
+
+ return -EHOSTUNREACH;
+#endif
+#if IS_BUILTIN(CONFIG_IPV6)
+ return __nf_ip6_route(net, dst, fl, strict);
+#else
+ return -EHOSTUNREACH;
+#endif
+}
+
+int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
#else /* CONFIG_NETFILTER */
static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 4e8add270200..593d1b9c33a8 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -126,6 +126,7 @@ void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
const struct netlink_ext_ack *extack);
int netlink_has_listeners(struct sock *sk, unsigned int group);
+bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 5a30ad594ccc..27e7fa36f707 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -444,8 +444,8 @@ static inline int next_memory_node(int nid)
return next_node(nid, node_states[N_MEMORY]);
}
-extern int nr_node_ids;
-extern int nr_online_nodes;
+extern unsigned int nr_node_ids;
+extern unsigned int nr_online_nodes;
static inline void node_set_online(int nid)
{
@@ -485,8 +485,8 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
-#define nr_node_ids 1
-#define nr_online_nodes 1
+#define nr_node_ids 1U
+#define nr_online_nodes 1U
#define node_set_online(node) node_set_state((node), N_ONLINE)
#define node_set_offline(node) node_clear_state((node), N_ONLINE)
diff --git a/include/linux/nvram.h b/include/linux/nvram.h
index 28bfb9ab94ca..d29d9c93a927 100644
--- a/include/linux/nvram.h
+++ b/include/linux/nvram.h
@@ -2,13 +2,132 @@
#ifndef _LINUX_NVRAM_H
#define _LINUX_NVRAM_H
+#include <linux/errno.h>
#include <uapi/linux/nvram.h>
-/* __foo is foo without grabbing the rtc_lock - get it yourself */
-extern unsigned char __nvram_read_byte(int i);
-extern unsigned char nvram_read_byte(int i);
-extern void __nvram_write_byte(unsigned char c, int i);
-extern void nvram_write_byte(unsigned char c, int i);
-extern int __nvram_check_checksum(void);
-extern int nvram_check_checksum(void);
+#ifdef CONFIG_PPC
+#include <asm/machdep.h>
+#endif
+
+/**
+ * struct nvram_ops - NVRAM functionality made available to drivers
+ * @read: validate checksum (if any) then load a range of bytes from NVRAM
+ * @write: store a range of bytes to NVRAM then update checksum (if any)
+ * @read_byte: load a single byte from NVRAM
+ * @write_byte: store a single byte to NVRAM
+ * @get_size: return the fixed number of bytes in the NVRAM
+ *
+ * Architectures which provide an nvram ops struct need not implement all
+ * of these methods. If the NVRAM hardware can be accessed only one byte
+ * at a time then it may be sufficient to provide .read_byte and .write_byte.
+ * If the NVRAM has a checksum (and it is to be checked) the .read and
+ * .write methods can be used to implement that efficiently.
+ *
+ * Portable drivers may use the wrapper functions defined here.
+ * The nvram_read() and nvram_write() functions call the .read and .write
+ * methods when available and fall back on the .read_byte and .write_byte
+ * methods otherwise.
+ */
+
+struct nvram_ops {
+ ssize_t (*get_size)(void);
+ unsigned char (*read_byte)(int);
+ void (*write_byte)(unsigned char, int);
+ ssize_t (*read)(char *, size_t, loff_t *);
+ ssize_t (*write)(char *, size_t, loff_t *);
+#if defined(CONFIG_X86) || defined(CONFIG_M68K)
+ long (*initialize)(void);
+ long (*set_checksum)(void);
+#endif
+};
+
+extern const struct nvram_ops arch_nvram_ops;
+
+static inline ssize_t nvram_get_size(void)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_size)
+ return ppc_md.nvram_size();
+#else
+ if (arch_nvram_ops.get_size)
+ return arch_nvram_ops.get_size();
+#endif
+ return -ENODEV;
+}
+
+static inline unsigned char nvram_read_byte(int addr)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_read_val)
+ return ppc_md.nvram_read_val(addr);
+#else
+ if (arch_nvram_ops.read_byte)
+ return arch_nvram_ops.read_byte(addr);
+#endif
+ return 0xFF;
+}
+
+static inline void nvram_write_byte(unsigned char val, int addr)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_write_val)
+ ppc_md.nvram_write_val(addr, val);
+#else
+ if (arch_nvram_ops.write_byte)
+ arch_nvram_ops.write_byte(val, addr);
+#endif
+}
+
+static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos)
+{
+ ssize_t nvram_size = nvram_get_size();
+ loff_t i;
+ char *p = buf;
+
+ if (nvram_size < 0)
+ return nvram_size;
+ for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
+ *p = nvram_read_byte(i);
+ *ppos = i;
+ return p - buf;
+}
+
+static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos)
+{
+ ssize_t nvram_size = nvram_get_size();
+ loff_t i;
+ char *p = buf;
+
+ if (nvram_size < 0)
+ return nvram_size;
+ for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
+ nvram_write_byte(*p, i);
+ *ppos = i;
+ return p - buf;
+}
+
+static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_read)
+ return ppc_md.nvram_read(buf, count, ppos);
+#else
+ if (arch_nvram_ops.read)
+ return arch_nvram_ops.read(buf, count, ppos);
+#endif
+ return nvram_read_bytes(buf, count, ppos);
+}
+
+static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_write)
+ return ppc_md.nvram_write(buf, count, ppos);
+#else
+ if (arch_nvram_ops.write)
+ return arch_nvram_ops.write(buf, count, ppos);
+#endif
+ return nvram_write_bytes(buf, count, ppos);
+}
+
#endif /* _LINUX_NVRAM_H */
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
index 34f38c186ea0..78021777df46 100644
--- a/include/linux/objagg.h
+++ b/include/linux/objagg.h
@@ -6,14 +6,19 @@
struct objagg_ops {
size_t obj_size;
+ bool (*delta_check)(void *priv, const void *parent_obj,
+ const void *obj);
+ int (*hints_obj_cmp)(const void *obj1, const void *obj2);
void * (*delta_create)(void *priv, void *parent_obj, void *obj);
void (*delta_destroy)(void *priv, void *delta_priv);
- void * (*root_create)(void *priv, void *obj);
+ void * (*root_create)(void *priv, void *obj, unsigned int root_id);
+#define OBJAGG_OBJ_ROOT_ID_INVALID UINT_MAX
void (*root_destroy)(void *priv, void *root_priv);
};
struct objagg;
struct objagg_obj;
+struct objagg_hints;
const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
@@ -21,7 +26,8 @@ const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
-struct objagg *objagg_create(const struct objagg_ops *ops, void *priv);
+struct objagg *objagg_create(const struct objagg_ops *ops,
+ struct objagg_hints *hints, void *priv);
void objagg_destroy(struct objagg *objagg);
struct objagg_obj_stats {
@@ -36,6 +42,7 @@ struct objagg_obj_stats_info {
};
struct objagg_stats {
+ unsigned int root_count;
unsigned int stats_info_count;
struct objagg_obj_stats_info stats_info[];
};
@@ -43,4 +50,14 @@ struct objagg_stats {
const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
void objagg_stats_put(const struct objagg_stats *objagg_stats);
+enum objagg_opt_algo_type {
+ OBJAGG_OPT_ALGO_SIMPLE_GREEDY,
+};
+
+struct objagg_hints *objagg_hints_get(struct objagg *objagg,
+ enum objagg_opt_algo_type opt_algo_type);
+void objagg_hints_put(struct objagg_hints *objagg_hints);
+const struct objagg_stats *
+objagg_hints_stats_get(struct objagg_hints *objagg_hints);
+
#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index fe472e5195a9..e240992e5cb6 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -50,7 +50,6 @@ struct of_irq_controller;
struct device_node {
const char *name;
- const char *type;
phandle phandle;
const char *full_name;
struct fwnode_handle fwnode;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 39b4494e29f1..9f8712a4b1a5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -17,8 +17,37 @@
/*
* Various page->flags bits:
*
- * PG_reserved is set for special pages, which can never be swapped out. Some
- * of them might not even exist...
+ * PG_reserved is set for special pages. The "struct page" of such a page
+ * should in general not be touched (e.g. set dirty) except by its owner.
+ * Pages marked as PG_reserved include:
+ * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
+ * initrd, HW tables)
+ * - Pages reserved or allocated early during boot (before the page allocator
+ * was initialized). This includes (depending on the architecture) the
+ * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
+ * much more. Once (if ever) freed, PG_reserved is cleared and they will
+ * be given to the page allocator.
+ * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
+ * to read/write these pages might end badly. Don't touch!
+ * - The zero page(s)
+ * - Pages not added to the page allocator when onlining a section because
+ * they were excluded via the online_page_callback() or because they are
+ * PG_hwpoison.
+ * - Pages allocated in the context of kexec/kdump (loaded kernel image,
+ * control pages, vmcoreinfo)
+ * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
+ * not marked PG_reserved (as they might be in use by somebody else who does
+ * not respect the caching strategy).
+ * - Pages part of an offline section (struct pages of offline sections should
+ * not be trusted as they will be initialized when first onlined).
+ * - MCA pages on ia64
+ * - Pages holding CPU notes for POWER Firmware Assisted Dump
+ * - Device memory (e.g. PMEM, DAX, HMM)
+ * Some PG_reserved pages will be excluded from the hibernation image.
+ * PG_reserved does in general not hinder anybody from dumping or swapping
+ * and is no longer required for remap_pfn_range(). ioremap might require it.
+ * Consequently, PG_reserved for a page mapped into user space can indicate
+ * the zero page, the vDSO, MMIO pages or device memory.
*
* The PG_private bitflag is set on pagecache pages if they contain filesystem
* specific data (which is normally at page->private). It can be used by
@@ -671,7 +700,7 @@ PAGEFLAG_FALSE(DoubleMap)
/* Reserve 0x0000007f to catch underflows of page_mapcount */
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
-#define PG_balloon 0x00000100
+#define PG_offline 0x00000100
#define PG_kmemcg 0x00000200
#define PG_table 0x00000400
@@ -706,10 +735,13 @@ static __always_inline void __ClearPage##uname(struct page *page) \
PAGE_TYPE_OPS(Buddy, buddy)
/*
- * PageBalloon() is true for pages that are on the balloon page list
- * (see mm/balloon_compaction.c).
+ * PageOffline() indicates that the page is logically offline although the
+ * containing section is online. (e.g. inflated in a balloon driver or
+ * not onlined when onlining the section).
+ * The content of these pages is effectively stale. Such pages should not
+ * be touched (read/write/dump/save) except by their owner.
*/
-PAGE_TYPE_OPS(Balloon, balloon)
+PAGE_TYPE_OPS(Offline, offline)
/*
* If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e2d7039af6a3..b477a70cc2e4 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -164,7 +164,7 @@ void release_pages(struct page **pages, int nr);
* will find the page or it will not. Likewise, the old find_get_page could run
* either before the insertion or afterwards, depending on timing.
*/
-static inline int page_cache_get_speculative(struct page *page)
+static inline int __page_cache_add_speculative(struct page *page, int count)
{
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
@@ -180,10 +180,10 @@ static inline int page_cache_get_speculative(struct page *page)
* SMP requires.
*/
VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_inc(page);
+ page_ref_add(page, count);
#else
- if (unlikely(!get_page_unless_zero(page))) {
+ if (unlikely(!page_ref_add_unless(page, count, 0))) {
/*
* Either the page has been freed, or will be freed.
* In either case, retry here and the caller should
@@ -197,27 +197,14 @@ static inline int page_cache_get_speculative(struct page *page)
return 1;
}
-/*
- * Same as above, but add instead of inc (could just be merged)
- */
-static inline int page_cache_add_speculative(struct page *page, int count)
+static inline int page_cache_get_speculative(struct page *page)
{
- VM_BUG_ON(in_interrupt());
-
-#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, count);
-
-#else
- if (unlikely(!page_ref_add_unless(page, count, 0)))
- return 0;
-#endif
- VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
+ return __page_cache_add_speculative(page, 1);
+}
- return 1;
+static inline int page_cache_add_speculative(struct page *page, int count)
+{
+ return __page_cache_add_speculative(page, count);
}
#ifdef CONFIG_NUMA
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 397607a0c0eb..f41f1d041e2c 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,6 +460,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
+#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
@@ -468,6 +469,18 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
extern void parport_daisy_deselect_all (struct parport *port);
extern int parport_daisy_select (struct parport *port, int daisy, int mode);
+#ifdef CONFIG_PARPORT_1284
+extern int daisy_drv_init(void);
+extern void daisy_drv_exit(void);
+#else
+static inline int daisy_drv_init(void)
+{
+ return 0;
+}
+
+static inline void daisy_drv_exit(void) {}
+#endif
+
/* Lowlevel drivers _can_ call this support function to handle irqs. */
static inline void parport_generic_irq(struct parport *port)
{
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index cb1adf0b78a9..249d4d7fbf18 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -24,7 +24,7 @@ static inline void *
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
- return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
+ return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 65f1d8c2f082..e7c51b00cdfe 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1393,7 +1393,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
}
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
- const struct irq_affinity *affd);
+ struct irq_affinity *affd);
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
@@ -1419,7 +1419,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
- const struct irq_affinity *aff_desc)
+ struct irq_affinity *aff_desc)
{
if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
return 1;
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 71b75643c432..03cb4b6f842e 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
-static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
might_sleep();
@@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */
- barrier();
/*
- * The barrier() prevents the compiler from
+ * The preempt_enable() prevents the compiler from
* bleeding the critical section out.
*/
-}
-
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
-{
- percpu_down_read_preempt_disable(sem);
preempt_enable();
}
@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
return ret;
}
-static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
- /*
- * The barrier() prevents the compiler from
- * bleeding the critical section out.
- */
- barrier();
+ preempt_disable();
/*
* Same as in percpu_down_read().
*/
@@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
-static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
-{
- preempt_disable();
- percpu_up_read_preempt_enable(sem);
-}
-
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1d5c551a5add..e47ef764f613 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -53,8 +53,8 @@ struct perf_guest_info_callbacks {
#include <linux/atomic.h>
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
-#include <linux/workqueue.h>
#include <linux/cgroup.h>
+#include <linux/refcount.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -244,6 +244,7 @@ struct perf_event;
#define PERF_PMU_CAP_EXCLUSIVE 0x10
#define PERF_PMU_CAP_ITRACE 0x20
#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
+#define PERF_PMU_CAP_NO_EXCLUDE 0x80
/**
* struct pmu - generic performance monitoring unit
@@ -409,7 +410,7 @@ struct pmu {
/*
* Set up pmu-private data structures for an AUX area
*/
- void *(*setup_aux) (int cpu, void **pages,
+ void *(*setup_aux) (struct perf_event *event, void **pages,
int nr_pages, bool overwrite);
/* optional */
@@ -447,6 +448,11 @@ struct pmu {
* Filter events for PMU-specific reasons.
*/
int (*filter_match) (struct perf_event *event); /* optional */
+
+ /*
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+ */
+ int (*check_period) (struct perf_event *event, u64 value); /* optional */
};
enum perf_addr_filter_action_t {
@@ -489,6 +495,11 @@ struct perf_addr_filters_head {
unsigned int nr_file_filters;
};
+struct perf_addr_filter_range {
+ unsigned long start;
+ unsigned long size;
+};
+
/**
* enum perf_event_state - the states of an event:
*/
@@ -665,7 +676,7 @@ struct perf_event {
/* address range filters */
struct perf_addr_filters_head addr_filters;
/* vma address array for file-based filders */
- unsigned long *addr_filters_offs;
+ struct perf_addr_filter_range *addr_filter_ranges;
unsigned long addr_filters_gen;
void (*destroy)(struct perf_event *);
@@ -737,7 +748,7 @@ struct perf_event_context {
int nr_stat;
int nr_freq;
int rotate_disable;
- atomic_t refcount;
+ refcount_t refcount;
struct task_struct *task;
/*
@@ -978,9 +989,9 @@ extern void perf_event_output_forward(struct perf_event *event,
extern void perf_event_output_backward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
-extern void perf_event_output(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs);
+extern int perf_event_output(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
static inline bool
is_default_overflow_handler(struct perf_event *event)
@@ -1004,6 +1015,15 @@ perf_event__output_id_sample(struct perf_event *event,
extern void
perf_log_lost_samples(struct perf_event *event, u64 lost);
+static inline bool event_has_any_exclude_flag(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+
+ return attr->exclude_idle || attr->exclude_user ||
+ attr->exclude_kernel || attr->exclude_hv ||
+ attr->exclude_guest || attr->exclude_host;
+}
+
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
@@ -1113,6 +1133,13 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
}
extern void perf_event_mmap(struct vm_area_struct *vma);
+
+extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
+ bool unregister, const char *sym);
+extern void perf_event_bpf_event(struct bpf_prog *prog,
+ enum perf_bpf_event_type type,
+ u16 flags);
+
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -1333,6 +1360,13 @@ static inline int perf_unregister_guest_info_callbacks
(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
+
+typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
+static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
+ bool unregister, const char *sym) { }
+static inline void perf_event_bpf_event(struct bpf_prog *prog,
+ enum perf_bpf_event_type type,
+ u16 flags) { }
static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
static inline void perf_event_namespaces(struct task_struct *tsk) { }
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3b051f761450..34084892a466 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
+#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
extern const int phy_10_100_features_array[4];
@@ -304,11 +306,6 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
* - irq or timer will set NOLINK if link goes down
* - phy_stop moves to HALTED
*
- * CHANGELINK: PHY experienced a change in link state
- * - timer moves to RUNNING if link
- * - timer moves to NOLINK if the link is down
- * - phy_stop moves to HALTED
- *
* HALTED: PHY is up, but no polling or interrupts are done. Or
* PHY is in an error state.
*
@@ -327,7 +324,6 @@ enum phy_state {
PHY_RUNNING,
PHY_NOLINK,
PHY_FORCING,
- PHY_CHANGELINK,
PHY_RESUMING
};
@@ -467,8 +463,8 @@ struct phy_device {
* only works for PHYs with IDs which match this field
* name: The friendly name of this PHY type
* phy_id_mask: Defines the important bits of the phy_id
- * features: A list of features (speed, duplex, etc) supported
- * by this PHY
+ * features: A mandatory list of features (speed, duplex, etc)
+ * supported by this PHY
* flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
*
@@ -506,6 +502,12 @@ struct phy_driver {
*/
int (*probe)(struct phy_device *phydev);
+ /*
+ * Probe the hardware to determine what abilities it has.
+ * Should only set phydev->supported.
+ */
+ int (*get_features)(struct phy_device *phydev);
+
/* PHY Power Management */
int (*suspend)(struct phy_device *phydev);
int (*resume)(struct phy_device *phydev);
@@ -671,13 +673,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
bool exact);
size_t phy_speeds(unsigned int *speeds, size_t size,
unsigned long *mask);
-
-static inline bool __phy_is_started(struct phy_device *phydev)
-{
- WARN_ON(!mutex_is_locked(&phydev->lock));
-
- return phydev->state >= PHY_UP;
-}
+void of_set_phy_supported(struct phy_device *phydev);
+void of_set_phy_eee_broken(struct phy_device *phydev);
/**
* phy_is_started - Convenience function to check whether PHY is started
@@ -685,29 +682,12 @@ static inline bool __phy_is_started(struct phy_device *phydev)
*/
static inline bool phy_is_started(struct phy_device *phydev)
{
- bool started;
-
- mutex_lock(&phydev->lock);
- started = __phy_is_started(phydev);
- mutex_unlock(&phydev->lock);
-
- return started;
+ return phydev->state >= PHY_UP;
}
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
/**
- * phy_read_mmd - Convenience function for reading a register
- * from an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for phy_read();
- */
-int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
-
-/**
* phy_read - Convenience function for reading a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to read
@@ -762,9 +742,68 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
val);
}
+/**
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Same rules as for phy_read();
+ */
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+
+/**
+ * __phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Same rules as for __phy_read();
+ */
+int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+
+/**
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for phy_write();
+ */
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+
+/**
+ * __phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for __phy_write();
+ */
+int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+
+int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
+ u16 set);
+int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
+ u16 set);
int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+
/**
* __phy_set_bits - Convenience function for setting bits in a PHY register
* @phydev: the phy_device struct
@@ -815,6 +854,66 @@ static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
}
/**
+ * __phy_set_bits_mmd - Convenience function for setting bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return __phy_modify_mmd(phydev, devad, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits_mmd - Convenience function for clearing bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return __phy_modify_mmd(phydev, devad, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits_mmd - Convenience function for setting bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to set
+ */
+static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return phy_modify_mmd(phydev, devad, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits_mmd - Convenience function for clearing bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return phy_modify_mmd(phydev, devad, regnum, val, 0);
+}
+
+/**
* phy_interrupt_is_valid - Convenience function for testing a given PHY irq
* @phydev: the phy_device struct
*
@@ -890,18 +989,6 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
return phydev->is_pseudo_fixed_link;
}
-/**
- * phy_write_mmd - Convenience function for writing a register
- * on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for phy_write();
- */
-int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
-
int phy_save_page(struct phy_device *phydev);
int phy_select_page(struct phy_device *phydev, int page);
int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
@@ -957,7 +1044,6 @@ int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
-int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
@@ -991,6 +1077,7 @@ void phy_attached_info(struct phy_device *phydev);
int genphy_config_init(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
+int genphy_config_eee_advert(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
@@ -1003,6 +1090,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
{
return 0;
}
+static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
+{
+ return 0;
+}
+static inline int genphy_no_config_intr(struct phy_device *phydev)
+{
+ return 0;
+}
int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
u16 regnum);
int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
@@ -1010,21 +1105,20 @@ int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
+int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart);
int genphy_c45_aneg_done(struct phy_device *phydev);
-int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask);
+int genphy_c45_read_link(struct phy_device *phydev);
int genphy_c45_read_lpa(struct phy_device *phydev);
int genphy_c45_read_pma(struct phy_device *phydev);
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
+int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
+int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_read_status(struct phy_device *phydev);
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
-int gen10g_read_status(struct phy_device *phydev);
-int gen10g_no_soft_reset(struct phy_device *phydev);
-int gen10g_config_init(struct phy_device *phydev);
-int gen10g_suspend(struct phy_device *phydev);
-int gen10g_resume(struct phy_device *phydev);
static inline int phy_read_status(struct phy_device *phydev)
{
@@ -1052,7 +1146,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
-int phy_start_interrupts(struct phy_device *phydev);
+void phy_request_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
@@ -1183,4 +1277,7 @@ module_exit(phy_module_exit)
#define module_phy_driver(__phy_drivers) \
phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
+bool phy_driver_is_genphy(struct phy_device *phydev);
+bool phy_driver_is_genphy_10g(struct phy_device *phydev);
+
#endif /* __PHY_H */
diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h
index c08aacc0ac35..a877ffee845d 100644
--- a/include/linux/phy/phy-mipi-dphy.h
+++ b/include/linux/phy/phy-mipi-dphy.h
@@ -6,8 +6,6 @@
#ifndef __PHY_MIPI_DPHY_H_
#define __PHY_MIPI_DPHY_H_
-#include <video/videomode.h>
-
/**
* struct phy_configure_opts_mipi_dphy - MIPI D-PHY configuration set
*
@@ -192,10 +190,10 @@ struct phy_configure_opts_mipi_dphy {
/**
* @init:
*
- * Time, in picoseconds for the initialization period to
+ * Time, in microseconds for the initialization period to
* complete.
*
- * Minimum value: 100000000 ps
+ * Minimum value: 100 us
*/
unsigned int init;
@@ -246,11 +244,11 @@ struct phy_configure_opts_mipi_dphy {
/**
* @wakeup:
*
- * Time, in picoseconds, that a transmitter drives a Mark-1
+ * Time, in microseconds, that a transmitter drives a Mark-1
* state prior to a Stop state in order to initiate an exit
* from ULPS.
*
- * Minimum value: 1000000000 ps
+ * Minimum value: 1000 us
*/
unsigned int wakeup;
@@ -271,7 +269,8 @@ struct phy_configure_opts_mipi_dphy {
/**
* @lanes:
*
- * Number of active data lanes used for the transmissions.
+ * Number of active, consecutive, data lanes, starting from
+ * lane 0, used for the transmissions.
*/
unsigned char lanes;
};
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e8e118d70fd7..3f350e2749fe 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -42,6 +42,7 @@ enum phy_mode {
PHY_MODE_PCIE,
PHY_MODE_ETHERNET,
PHY_MODE_MIPI_DPHY,
+ PHY_MODE_SATA
};
/**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 9525567b1951..1e5d86ebdaeb 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -15,30 +15,41 @@ struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status,
- int link_gpio);
+ struct fixed_phy_status *status);
extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
- int link_gpio,
struct device_node *np);
+
+extern struct phy_device *
+fixed_phy_register_with_gpiod(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct gpio_desc *gpiod);
+
extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status,
- int link_gpio)
+ struct fixed_phy_status *status)
{
return -ENODEV;
}
static inline struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
- int gpio_link,
struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct phy_device *
+fixed_phy_register_with_gpiod(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct gpio_desc *gpiod)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void fixed_phy_unregister(struct phy_device *phydev)
{
}
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 021fc6595856..6411c624f63a 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -149,6 +149,13 @@ int mac_link_state(struct net_device *ndev,
* configuration word. Nothing is advertised by the MAC. The MAC is
* responsible for reading the configuration word and configuring
* itself accordingly.
+ *
+ * Implementations are expected to update the MAC to reflect the
+ * requested settings - i.o.w., if nothing has changed between two
+ * calls, no action is expected. If only flow control settings have
+ * changed, flow control should be updated *without* taking the link
+ * down. This "update" behaviour is critical to avoid bouncing the
+ * link up status.
*/
void mac_config(struct net_device *ndev, unsigned int mode,
const struct phylink_link_state *state);
@@ -220,6 +227,7 @@ void phylink_ethtool_get_pauseparam(struct phylink *,
int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_get_eee_err(struct phylink *);
+int phylink_init_eee(struct phylink *, bool);
int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
index 8eaef2f2b691..c3b61ead41f2 100644
--- a/include/linux/platform_data/b53.h
+++ b/include/linux/platform_data/b53.h
@@ -20,7 +20,7 @@
#define __B53_H
#include <linux/kernel.h>
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
struct b53_platform_data {
/* Must be first such that dsa_register_switch() can access it */
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
new file mode 100644
index 000000000000..3fbf9f2793b5
--- /dev/null
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI DaVinci CPUFreq platform support.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
+ */
+
+#ifndef _MACH_DAVINCI_CPUFREQ_H
+#define _MACH_DAVINCI_CPUFREQ_H
+
+#include <linux/cpufreq.h>
+
+struct davinci_cpufreq_config {
+ struct cpufreq_frequency_table *freq_table;
+ int (*set_voltage)(unsigned int index);
+ int (*init)(void);
+};
+
+#endif /* _MACH_DAVINCI_CPUFREQ_H */
diff --git a/include/linux/platform_data/dsa.h b/include/linux/platform_data/dsa.h
new file mode 100644
index 000000000000..d4d9bf2060a6
--- /dev/null
+++ b/include/linux/platform_data/dsa.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DSA_PDATA_H
+#define __DSA_PDATA_H
+
+struct device;
+struct net_device;
+
+#define DSA_MAX_SWITCHES 4
+#define DSA_MAX_PORTS 12
+#define DSA_RTABLE_NONE -1
+
+struct dsa_chip_data {
+ /*
+ * How to access the switch configuration registers.
+ */
+ struct device *host_dev;
+ int sw_addr;
+
+ /*
+ * Reference to network devices
+ */
+ struct device *netdev[DSA_MAX_PORTS];
+
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
+
+ /* Device tree node pointer for this specific switch chip
+ * used during switch setup in case additional properties
+ * and resources needs to be used
+ */
+ struct device_node *of_node;
+
+ /*
+ * The names of the switch's ports. Use "cpu" to
+ * designate the switch port that the cpu is connected to,
+ * "dsa" to indicate that this port is a DSA link to
+ * another switch, NULL to indicate the port is unused,
+ * or any other string to indicate this is a physical port.
+ */
+ char *port_names[DSA_MAX_PORTS];
+ struct device_node *port_dn[DSA_MAX_PORTS];
+
+ /*
+ * An array of which element [a] indicates which port on this
+ * switch should be used to send packets to that are destined
+ * for switch a. Can be NULL if there is only one switch chip.
+ */
+ s8 rtable[DSA_MAX_SWITCHES];
+};
+
+struct dsa_platform_data {
+ /*
+ * Reference to a Linux network interface that connects
+ * to the root switch chip of the tree.
+ */
+ struct device *netdev;
+ struct net_device *of_netdev;
+
+ /*
+ * Info structs describing each of the switch chips
+ * connected via this network interface.
+ */
+ int nr_chips;
+ struct dsa_chip_data *chip;
+};
+
+
+#endif /* __DSA_PDATA_H */
diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h
index f63af2955ea0..963730b44aea 100644
--- a/include/linux/platform_data/mv88e6xxx.h
+++ b/include/linux/platform_data/mv88e6xxx.h
@@ -2,7 +2,7 @@
#ifndef __DSA_MV88E6XXX_H
#define __DSA_MV88E6XXX_H
-#include <net/dsa.h>
+#include <linux/platform_data/dsa.h>
struct dsa_mv88e6xxx_pdata {
/* Must be first, such that dsa_register_switch() can access this
diff --git a/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h b/include/linux/platform_data/spi-ath79.h
index aa71216edf99..aa71216edf99 100644
--- a/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h
+++ b/include/linux/platform_data/spi-ath79.h
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
index 0926e99f2e8f..879f5c78b91a 100644
--- a/include/linux/platform_data/usb-davinci.h
+++ b/include/linux/platform_data/usb-davinci.h
@@ -11,22 +11,8 @@
#ifndef __ASM_ARCH_USB_H
#define __ASM_ARCH_USB_H
-struct da8xx_ohci_root_hub;
-
-typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub,
- unsigned port);
-
/* Passed as the platform data to the OHCI driver */
struct da8xx_ohci_root_hub {
- /* Switch the port power on/off */
- int (*set_power)(unsigned port, int on);
- /* Read the port power status */
- int (*get_power)(unsigned port);
- /* Read the port over-current indicator */
- int (*get_oci)(unsigned port);
- /* Over-current indicator change notification (pass NULL to disable) */
- int (*ocic_notify)(da8xx_ocic_handler_t handler);
-
/* Time from power on to power good (in 2 ms units) */
u8 potpgt;
};
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index c7c081dc6034..466a8d02e298 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -63,6 +63,7 @@ extern int platform_add_devices(struct platform_device **, int);
struct platform_device_info {
struct device *parent;
struct fwnode_handle *fwnode;
+ bool of_node_reused;
const char *name;
int id;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0bd9de116826..06f7ed893928 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -592,6 +592,7 @@ struct dev_pm_info {
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
bool is_late_suspended:1;
+ bool no_pm:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
u32 driver_flags;
@@ -633,9 +634,9 @@ struct dev_pm_info {
int runtime_error;
int autosuspend_delay;
u64 last_busy;
- unsigned long active_jiffies;
- unsigned long suspended_jiffies;
- unsigned long accounting_timestamp;
+ u64 active_time;
+ u64 suspended_time;
+ u64 accounting_timestamp;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index dd364abb649a..1ed5874bcee0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -271,7 +271,7 @@ int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index);
struct device *genpd_dev_pm_attach_by_name(struct device *dev,
- char *name);
+ const char *name);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
@@ -324,7 +324,7 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
}
static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
- char *name)
+ const char *name)
{
return NULL;
}
@@ -341,7 +341,7 @@ int dev_pm_domain_attach(struct device *dev, bool power_on);
struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index);
struct device *dev_pm_domain_attach_by_name(struct device *dev,
- char *name);
+ const char *name);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
@@ -355,7 +355,7 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
return NULL;
}
static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
- char *name)
+ const char *name)
{
return NULL;
}
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0a2a88e5a383..24c757a32a7b 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -86,6 +86,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
+
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
@@ -108,6 +110,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
+void dev_pm_opp_remove_all_dynamic(struct device *dev);
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
@@ -157,6 +160,11 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
+static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
return false;
@@ -217,6 +225,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
}
+static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
@@ -322,6 +334,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+void dev_pm_opp_of_register_em(struct cpumask *cpus);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -360,6 +373,11 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{
return NULL;
}
+
+static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
+{
+}
+
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
return -ENOTSUPP;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 54af4eef169f..9dc6eebf62d2 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
+ WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
@@ -113,6 +113,8 @@ static inline bool pm_runtime_is_irq_safe(struct device *dev)
return dev->power.irq_safe;
}
+extern u64 pm_runtime_suspended_time(struct device *dev);
+
#else /* !CONFIG_PM */
static inline bool queue_pm_work(struct work_struct *work) { return false; }
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 15927ebc22f2..5046bad0c1c5 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -30,7 +30,7 @@
*/
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
-/********** mm/debug-pagealloc.c **********/
+/********** mm/page_poison.c **********/
#ifdef CONFIG_PAGE_POISONING_ZERO
#define PAGE_POISON 0x00
#else
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 3a3bc71017d5..18674d7d5b1c 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -51,7 +51,7 @@ struct posix_clock;
struct posix_clock_operations {
struct module *owner;
- int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
+ int (*clock_adjtime)(struct posix_clock *pc, struct __kernel_timex *tx);
int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index e96581ca7c9d..b20798fc5191 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -12,7 +12,7 @@ struct siginfo;
struct cpu_timer_list {
struct list_head entry;
- u64 expires, incr;
+ u64 expires;
struct task_struct *task;
int firing;
};
diff --git a/include/linux/property.h b/include/linux/property.h
index 3789ec755fb6..65d3420dd5d1 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -258,7 +258,7 @@ struct property_entry {
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
(struct property_entry) { \
.name = _name_, \
- .length = sizeof(_val_), \
+ .length = sizeof(const char *), \
.type = DEV_PROP_STRING, \
{ .value = { .str = _val_ } }, \
}
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 186cd8e970c7..8da46ac44a2e 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -26,7 +26,6 @@
#include <linux/cache.h>
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/cache.h>
#include <linux/slab.h>
#include <asm/errno.h>
#endif
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 1637385bcc17..d0aecc04c54b 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -13,6 +13,7 @@
#ifndef __QCOM_SCM_H
#define __QCOM_SCM_H
+#include <linux/err.h>
#include <linux/types.h>
#include <linux/cpumask.h>
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 59ddf9af909e..2dd0a9ed5b36 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -663,6 +663,37 @@ out:
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ u32 cur_prod, page_mask, page_cnt, page_diff;
+
+ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
+ p_chain->u.chain32.prod_idx;
+
+ /* Assume that number of elements in a page is power of 2 */
+ page_mask = ~p_chain->elem_per_page_mask;
+
+ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
+ * reaches the first element of next page before the page index
+ * is incremented. See qed_chain_produce().
+ * Index wrap around is not a problem because the difference
+ * between current and given producer indices is always
+ * positive and lower than the chain's capacity.
+ */
+ page_diff = (((cur_prod - 1) & page_mask) -
+ ((prod_idx - 1) & page_mask)) /
+ p_chain->elem_per_page;
+
+ page_cnt = qed_chain_get_page_cnt(p_chain);
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.prod_page_idx =
+ (p_chain->pbl.c.u16.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ else
+ p_chain->pbl.c.u32.prod_page_idx =
+ (p_chain->pbl.c.u32.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ }
+
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16) prod_idx;
else
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 91c536a01b56..f6165d304b4d 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -38,7 +38,6 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
-#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/compiler.h>
@@ -644,6 +643,7 @@ struct qed_dev_info {
u16 mtu;
bool wol_support;
+ bool smart_an;
/* MBI version */
u32 mbi_version;
@@ -764,6 +764,7 @@ struct qed_probe_params {
u32 dp_module;
u8 dp_level;
bool is_vf;
+ bool recov_in_prog;
};
#define QED_DRV_VER_STR_SIZE 12
@@ -810,6 +811,7 @@ struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev,
struct qed_link_output *link);
+ void (*schedule_recovery_handler)(void *dev);
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
@@ -1058,6 +1060,24 @@ struct qed_common_ops {
void __iomem *db_addr, void *db_data);
/**
+ * @brief recovery_process - Trigger a recovery process
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*recovery_process)(struct qed_dev *cdev);
+
+/**
+ * @brief recovery_prolog - Execute the prolog operations of a recovery process
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*recovery_prolog)(struct qed_dev *cdev);
+
+/**
* @brief update_drv_state - API to inform the change in the driver state.
*
* @param cdev
diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h
index 9904617a9730..5a00c7a473bf 100644
--- a/include/linux/qed/qede_rdma.h
+++ b/include/linux/qed/qede_rdma.h
@@ -74,21 +74,23 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv);
bool qede_rdma_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_QED_RDMA)
-int qede_rdma_dev_add(struct qede_dev *dev);
+int qede_rdma_dev_add(struct qede_dev *dev, bool recovery);
void qede_rdma_dev_event_open(struct qede_dev *dev);
void qede_rdma_dev_event_close(struct qede_dev *dev);
-void qede_rdma_dev_remove(struct qede_dev *dev);
+void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery);
void qede_rdma_event_changeaddr(struct qede_dev *edr);
#else
-static inline int qede_rdma_dev_add(struct qede_dev *dev)
+static inline int qede_rdma_dev_add(struct qede_dev *dev,
+ bool recovery)
{
return 0;
}
static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {}
static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {}
-static inline void qede_rdma_dev_remove(struct qede_dev *dev) {}
+static inline void qede_rdma_dev_remove(struct qede_dev *dev,
+ bool recovery) {}
static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {}
#endif
#endif
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
index 426cee67f0e2..b8e094b125ee 100644
--- a/include/linux/rcu_node_tree.h
+++ b/include/linux/rcu_node_tree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU node combining tree definitions. These are used to compute
* global attributes while avoiding common-case global contention. A key
@@ -11,23 +12,9 @@
* because the size of the TREE SRCU srcu_struct structure depends
* on these definitions.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2017
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#ifndef __LINUX_RCU_NODE_TREE_H
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index c3ad00e63556..87404cb015f1 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU segmented callback lists
*
@@ -5,23 +6,9 @@
* because the size of the TREE SRCU srcu_struct structure depends
* on these definitions.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2017
*
- * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com>
*/
#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index ece7ed9a4a70..6fc53a1345b3 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU-based infrastructure for lightweight reader-writer locking
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (c) 2015, Red Hat, Inc.
*
* Author: Oleg Nesterov <oleg@redhat.com>
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4db8bcacc51a..6cdb1db776cf 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -1,25 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2001
*
* Author: Dipankar Sarma <dipankar@in.ibm.com>
*
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
@@ -89,7 +76,7 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
extern int rcu_scheduler_active __read_mostly;
-void rcu_check_callbacks(int user);
+void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
@@ -309,16 +296,16 @@ static inline void rcu_preempt_sleep_check(void) { }
*/
#ifdef __CHECKER__
-#define rcu_dereference_sparse(p, space) \
+#define rcu_check_sparse(p, space) \
((void)(((typeof(*p) space *)p) == p))
#else /* #ifdef __CHECKER__ */
-#define rcu_dereference_sparse(p, space)
+#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
#define __rcu_access_pointer(p, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
- rcu_dereference_sparse(p, space); \
+ rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_check(p, c, space) \
@@ -326,13 +313,13 @@ static inline void rcu_preempt_sleep_check(void) { }
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
- rcu_dereference_sparse(p, space); \
+ rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
- rcu_dereference_sparse(p, space); \
+ rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
#define rcu_dereference_raw(p) \
@@ -382,6 +369,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_assign_pointer(p, v) \
({ \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
+ rcu_check_sparse(p, __rcu); \
\
if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
@@ -785,7 +773,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define RCU_INIT_POINTER(p, v) \
do { \
- rcu_dereference_sparse(p, __rcu); \
+ rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
@@ -859,7 +847,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/* Has the specified rcu_head structure been handed to call_rcu()? */
-/*
+/**
* rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
* @rhp: The rcu_head structure to initialize.
*
@@ -874,10 +862,10 @@ static inline void rcu_head_init(struct rcu_head *rhp)
rhp->func = (rcu_callback_t)~0L;
}
-/*
+/**
* rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
* @rhp: The rcu_head structure to test.
- * @func: The function passed to call_rcu() along with @rhp.
+ * @f: The function passed to call_rcu() along with @rhp.
*
* Returns @true if the @rhp has been passed to call_rcu() with @func,
* and @false otherwise. Emits a warning in any other case, including
@@ -896,57 +884,4 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
return false;
}
-
-/* Transitional pre-consolidation compatibility definitions. */
-
-static inline void synchronize_rcu_bh(void)
-{
- synchronize_rcu();
-}
-
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_rcu_expedited();
-}
-
-static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
-{
- call_rcu(head, func);
-}
-
-static inline void rcu_barrier_bh(void)
-{
- rcu_barrier();
-}
-
-static inline void synchronize_sched(void)
-{
- synchronize_rcu();
-}
-
-static inline void synchronize_sched_expedited(void)
-{
- synchronize_rcu_expedited();
-}
-
-static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
-{
- call_rcu(head, func);
-}
-
-static inline void rcu_barrier_sched(void)
-{
- rcu_barrier();
-}
-
-static inline unsigned long get_state_synchronize_sched(void)
-{
- return get_state_synchronize_rcu();
-}
-
-static inline void cond_synchronize_sched(unsigned long oldstate)
-{
- cond_synchronize_rcu(oldstate);
-}
-
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index af65d1f36ddb..8e727f57d814 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2008
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 7f83179177d1..735601ac27d3 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -1,26 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2008
*
* Author: Dipankar Sarma <dipankar@in.ibm.com>
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
+ * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
*
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
*
* For detailed explanation of Read-Copy Update mechanism see -
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 1781b6cb793c..daeec7dbd65c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1131,11 +1131,37 @@ struct regmap_irq {
.reg_offset = (_id) / (_reg_bits), \
}
+#define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \
+ { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] }
+
+struct regmap_irq_sub_irq_map {
+ unsigned int num_regs;
+ unsigned int *offset;
+};
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
* @name: Descriptive name for IRQ controller.
*
+ * @main_status: Base main status register address. For chips which have
+ * interrupts arranged in separate sub-irq blocks with own IRQ
+ * registers and which have a main IRQ registers indicating
+ * sub-irq blocks with unhandled interrupts. For such chips fill
+ * sub-irq register information in status_base, mask_base and
+ * ack_base.
+ * @num_main_status_bits: Should be given to chips where number of meaningfull
+ * main status bits differs from num_regs.
+ * @sub_reg_offsets: arrays of mappings from main register bits to sub irq
+ * registers. First item in array describes the registers
+ * for first main status bit. Second array for second bit etc.
+ * Offset is given as sub register status offset to
+ * status_base. Should contain num_regs arrays.
+ * Can be provided for chips with more complex mapping than
+ * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
+ * @num_main_regs: Number of 'main status' irq registers for chips which have
+ * main_status set.
+ *
* @status_base: Base status register address.
* @mask_base: Base mask register address.
* @mask_writeonly: Base mask register is write only.
@@ -1181,6 +1207,11 @@ struct regmap_irq {
struct regmap_irq_chip {
const char *name;
+ unsigned int main_status;
+ unsigned int num_main_status_bits;
+ struct regmap_irq_sub_irq_map *sub_reg_offsets;
+ int num_main_regs;
+
unsigned int status_base;
unsigned int mask_base;
unsigned int unmask_base;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 389bcaf7900f..377da2357118 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -264,6 +264,7 @@ enum regulator_type {
* @continuous_voltage_range: Indicates if the regulator can set any
* voltage within constrains range.
* @n_voltages: Number of selectors available for ops.list_voltage().
+ * @n_current_limits: Number of selectors available for current limits
*
* @min_uV: Voltage given by the lowest selector (if linear mapping)
* @uV_step: Voltage increase with each selector (if linear mapping)
@@ -278,14 +279,15 @@ enum regulator_type {
* @n_linear_ranges: Number of entries in the @linear_ranges (and in
* linear_range_selectors if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
+ * @curr_table: Current limit mapping table (if table based mapping)
*
* @vsel_range_reg: Register for range selector when using pickable ranges
* and regulator_regmap_X_voltage_X_pickable functions.
* @vsel_range_mask: Mask for register bitfield used for range selector
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
* @vsel_mask: Mask for register bitfield used for selector
- * @csel_reg: Register for TPS65218 LS3 current regulator
- * @csel_mask: Mask for TPS65218 LS3 current regulator
+ * @csel_reg: Register for current limit selector using regmap set_current_limit
+ * @csel_mask: Mask for register bitfield used for current limit selector
* @apply_reg: Register for initiate voltage change on the output when
* using regulator_set_voltage_sel_regmap
* @apply_bit: Register bitfield used for initiate voltage change on the
@@ -333,6 +335,7 @@ struct regulator_desc {
int id;
unsigned int continuous_voltage_range:1;
unsigned n_voltages;
+ unsigned int n_current_limits;
const struct regulator_ops *ops;
int irq;
enum regulator_type type;
@@ -351,6 +354,7 @@ struct regulator_desc {
int n_linear_ranges;
const unsigned int *volt_table;
+ const unsigned int *curr_table;
unsigned int vsel_range_reg;
unsigned int vsel_range_mask;
@@ -401,13 +405,7 @@ struct regulator_desc {
* NULL).
* @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
* insufficient.
- * @ena_gpio_initialized: GPIO controlling regulator enable was properly
- * initialized, meaning that >= 0 is a valid gpio
- * identifier and < 0 is a non existent gpio.
- * @ena_gpio: GPIO controlling regulator enable.
- * @ena_gpiod: GPIO descriptor controlling regulator enable.
- * @ena_gpio_invert: Sense for GPIO enable control.
- * @ena_gpio_flags: Flags to use when calling gpio_request_one()
+ * @ena_gpiod: GPIO controlling regulator enable.
*/
struct regulator_config {
struct device *dev;
@@ -416,11 +414,7 @@ struct regulator_config {
struct device_node *of_node;
struct regmap *regmap;
- bool ena_gpio_initialized;
- int ena_gpio;
struct gpio_desc *ena_gpiod;
- unsigned int ena_gpio_invert:1;
- unsigned int ena_gpio_flags;
};
/*
@@ -503,6 +497,7 @@ int regulator_notifier_call_chain(struct regulator_dev *rdev,
void *rdev_get_drvdata(struct regulator_dev *rdev);
struct device *rdev_get_dev(struct regulator_dev *rdev);
+struct regmap *rdev_get_regmap(struct regulator_dev *rdev);
int rdev_get_id(struct regulator_dev *rdev);
int regulator_mode_to_status(unsigned int);
@@ -543,9 +538,18 @@ int regulator_set_pull_down_regmap(struct regulator_dev *rdev);
int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
bool enable);
+int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
+ int min_uA, int max_uA);
+int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
void regulator_lock(struct regulator_dev *rdev);
void regulator_unlock(struct regulator_dev *rdev);
+/*
+ * Helper functions intended to be used by regulator drivers prior registering
+ * their regulators.
+ */
+int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
+ unsigned int selector);
#endif
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 1a4340ed8e2b..f10140da7145 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -25,14 +25,6 @@ struct regulator_init_data;
* @input_supply: Name of the input regulator supply
* @microvolts: Output voltage of regulator
* @startup_delay: Start-up time in microseconds
- * @gpio_is_open_drain: Gpio pin is open drain or normal type.
- * If it is open drain type then HIGH will be set
- * through PULL-UP with setting gpio as input
- * and low will be set as gpio-output with driven
- * to low. For non-open-drain case, the gpio will
- * will be in output and drive to low/high accordingly.
- * @enable_high: Polarity of enable GPIO
- * 1 = Active high, 0 = Active low
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
@@ -48,8 +40,6 @@ struct fixed_voltage_config {
const char *input_supply;
int microvolts;
unsigned startup_delay;
- unsigned gpio_is_open_drain:1;
- unsigned enable_high:1;
unsigned enabled_at_boot:1;
struct regulator_init_data *init_data;
};
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h
index 19fbd267406d..11cd6375215d 100644
--- a/include/linux/regulator/gpio-regulator.h
+++ b/include/linux/regulator/gpio-regulator.h
@@ -21,6 +21,8 @@
#ifndef __REGULATOR_GPIO_H
#define __REGULATOR_GPIO_H
+#include <linux/gpio/consumer.h>
+
struct regulator_init_data;
enum regulator_type;
@@ -44,18 +46,14 @@ struct gpio_regulator_state {
/**
* struct gpio_regulator_config - config structure
* @supply_name: Name of the regulator supply
- * @enable_gpio: GPIO to use for enable control
- * set to -EINVAL if not used
- * @enable_high: Polarity of enable GPIO
- * 1 = Active high, 0 = Active low
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
* the default state
* @startup_delay: Start-up time in microseconds
- * @gpios: Array containing the gpios needed to control
- * the setting of the regulator
- * @nr_gpios: Number of gpios
+ * @gflags: Array of GPIO configuration flags for initial
+ * states
+ * @ngpios: Number of GPIOs and configurations available
* @states: Array of gpio_regulator_state entries describing
* the gpio state for specific voltages
* @nr_states: Number of states available
@@ -69,13 +67,11 @@ struct gpio_regulator_state {
struct gpio_regulator_config {
const char *supply_name;
- int enable_gpio;
- unsigned enable_high:1;
unsigned enabled_at_boot:1;
unsigned startup_delay;
- struct gpio *gpios;
- int nr_gpios;
+ enum gpiod_flags *gflags;
+ int ngpios;
struct gpio_regulator_state *states;
int nr_states;
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 29af6d6b2f4b..c1901b61ca30 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -32,6 +32,8 @@ struct reset_control *devm_reset_control_array_get(struct device *dev,
struct reset_control *of_reset_control_array_get(struct device_node *np,
bool shared, bool optional);
+int reset_control_get_count(struct device *dev);
+
#else
static inline int reset_control_reset(struct reset_control *rstc)
@@ -97,6 +99,11 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline int reset_control_get_count(struct device *dev)
+{
+ return -ENOENT;
+}
+
#endif /* CONFIG_RESET_CONTROLLER */
static inline int __must_check device_reset(struct device *dev)
@@ -138,7 +145,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
*
* Returns a struct reset_control or IS_ERR() condition containing errno.
* This function is intended for use with reset-controls which are shared
- * between hardware-blocks.
+ * between hardware blocks.
*
* When a reset-control is shared, the behavior of reset_control_assert /
* deassert is changed, the reset-core will keep track of a deassert_count
@@ -187,7 +194,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
}
/**
- * of_reset_control_get_shared - Lookup and obtain an shared reference
+ * of_reset_control_get_shared - Lookup and obtain a shared reference
* to a reset controller.
* @node: device to be reset by the controller
* @id: reset line name
@@ -229,7 +236,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
}
/**
- * of_reset_control_get_shared_by_index - Lookup and obtain an shared
+ * of_reset_control_get_shared_by_index - Lookup and obtain a shared
* reference to a reset controller
* by index.
* @node: device to be reset by the controller
@@ -322,7 +329,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
/**
* devm_reset_control_get_shared_by_index - resource managed
- * reset_control_get_shared
+ * reset_control_get_shared
* @dev: device to be reset by the controller
* @index: index of the reset controller
*
diff --git a/include/linux/reset/socfpga.h b/include/linux/reset/socfpga.h
new file mode 100644
index 000000000000..b11a2047c342
--- /dev/null
+++ b/include/linux/reset/socfpga.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_RESET_SOCFPGA_H__
+#define __LINUX_RESET_SOCFPGA_H__
+
+void __init socfpga_reset_init(void);
+
+#endif /* __LINUX_RESET_SOCFPGA_H__ */
diff --git a/include/linux/reset/sunxi.h b/include/linux/reset/sunxi.h
new file mode 100644
index 000000000000..1ad7fffb413e
--- /dev/null
+++ b/include/linux/reset/sunxi.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_RESET_SUNXI_H__
+#define __LINUX_RESET_SUNXI_H__
+
+void __init sun6i_reset_init(void);
+
+#endif /* __LINUX_RESET_SUNXI_H__ */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 20f9c6af7473..ae9c0f71f311 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1113,14 +1113,6 @@ static inline int rhashtable_replace_fast(
return err;
}
-/* Obsolete function, do not use in new code. */
-static inline int rhashtable_walk_init(struct rhashtable *ht,
- struct rhashtable_iter *iter, gfp_t gfp)
-{
- rhashtable_walk_enter(ht, iter);
- return 0;
-}
-
/**
* rhltable_walk_enter - Initialise an iterator
* @hlt: Table to walk over
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 765119df759a..1549584a1538 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -21,6 +21,7 @@
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
@@ -47,6 +48,7 @@ struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
+struct capture_control;
struct robust_list_head;
struct sched_attr;
struct sched_param;
@@ -356,12 +358,6 @@ struct util_est {
* For cfs_rq, it is the aggregated load_avg of all runnable and
* blocked sched_entities.
*
- * load_avg may also take frequency scaling into account:
- *
- * load_avg = runnable% * scale_load_down(load) * freq%
- *
- * where freq% is the CPU frequency normalized to the highest frequency.
- *
* [util_avg definition]
*
* util_avg = running% * SCHED_CAPACITY_SCALE
@@ -370,17 +366,14 @@ struct util_est {
* a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
* and blocked sched_entities.
*
- * util_avg may also factor frequency scaling and CPU capacity scaling:
- *
- * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
+ * load_avg and util_avg don't direcly factor frequency scaling and CPU
+ * capacity scaling. The scaling is done through the rq_clock_pelt that
+ * is used for computing those signals (see update_rq_clock_pelt())
*
- * where freq% is the same as above, and capacity% is the CPU capacity
- * normalized to the greatest capacity (due to uarch differences, etc).
- *
- * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
- * themselves are in the range of [0, 1]. To do fixed point arithmetics,
- * we therefore scale them to as large a range as necessary. This is for
- * example reflected by util_avg's SCHED_CAPACITY_SCALE.
+ * N.B., the above ratios (runnable% and running%) themselves are in the
+ * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
+ * to as large a range as necessary. This is for example reflected by
+ * util_avg's SCHED_CAPACITY_SCALE.
*
* [Overflow issue]
*
@@ -607,7 +600,7 @@ struct task_struct {
randomized_struct_fields_start
void *stack;
- atomic_t usage;
+ refcount_t usage;
/* Per task flags (PF_*), defined further below: */
unsigned int flags;
unsigned int ptrace;
@@ -739,12 +732,6 @@ struct task_struct {
unsigned use_memdelay:1;
#endif
- /*
- * May usercopy functions fault on kernel addresses?
- * This is not just a single bit because this can potentially nest.
- */
- unsigned int kernel_uaccess_faults_ok;
-
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
@@ -966,6 +953,9 @@ struct task_struct {
struct io_context *io_context;
+#ifdef CONFIG_COMPACTION
+ struct capture_control *capture_control;
+#endif
/* Ptrace state: */
unsigned long ptrace_message;
kernel_siginfo_t *last_siginfo;
@@ -997,7 +987,7 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_RESCTRL
+#ifdef CONFIG_X86_CPU_RESCTRL
u32 closid;
u32 rmid;
#endif
@@ -1195,7 +1185,7 @@ struct task_struct {
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* A live task holds one reference: */
- atomic_t stack_refcount;
+ refcount_t stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCH
int patch_state;
@@ -1408,9 +1398,10 @@ extern struct pid *cad_pid;
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
+#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
@@ -1460,6 +1451,7 @@ static inline bool is_percpu_thread(void)
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
+#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1488,6 +1480,10 @@ TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
@@ -1755,9 +1751,9 @@ static __always_inline bool need_resched(void)
static inline unsigned int task_cpu(const struct task_struct *p)
{
#ifdef CONFIG_THREAD_INFO_IN_TASK
- return p->cpu;
+ return READ_ONCE(p->cpu);
#else
- return task_thread_info(p)->cpu;
+ return READ_ONCE(task_thread_info(p)->cpu);
#endif
}
@@ -1906,6 +1902,14 @@ static inline void rseq_execve(struct task_struct *t)
#endif
+void __exit_umh(struct task_struct *tsk);
+
+static inline void exit_umh(struct task_struct *tsk)
+{
+ if (unlikely(tsk->flags & PF_UMH))
+ __exit_umh(tsk);
+}
+
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ec912d01126f..ecdc6542070f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
+#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3bfa6a0cbba4..0cd9f10423fb 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -148,17 +148,25 @@ static inline bool in_vfork(struct task_struct *tsk)
* Applies per-task gfp context to the given allocation flags.
* PF_MEMALLOC_NOIO implies GFP_NOIO
* PF_MEMALLOC_NOFS implies GFP_NOFS
+ * PF_MEMALLOC_NOCMA implies no allocation from CMA region.
*/
static inline gfp_t current_gfp_context(gfp_t flags)
{
- /*
- * NOIO implies both NOIO and NOFS and it is a weaker context
- * so always make sure it makes precedence
- */
- if (unlikely(current->flags & PF_MEMALLOC_NOIO))
- flags &= ~(__GFP_IO | __GFP_FS);
- else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
- flags &= ~__GFP_FS;
+ if (unlikely(current->flags &
+ (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
+ /*
+ * NOIO implies both NOIO and NOFS and it is a weaker context
+ * so always make sure it makes precedence
+ */
+ if (current->flags & PF_MEMALLOC_NOIO)
+ flags &= ~(__GFP_IO | __GFP_FS);
+ else if (current->flags & PF_MEMALLOC_NOFS)
+ flags &= ~__GFP_FS;
+#ifdef CONFIG_CMA
+ if (current->flags & PF_MEMALLOC_NOCMA)
+ flags &= ~__GFP_MOVABLE;
+#endif
+ }
return flags;
}
@@ -248,6 +256,30 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
}
+#ifdef CONFIG_CMA
+static inline unsigned int memalloc_nocma_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC_NOCMA;
+
+ current->flags |= PF_MEMALLOC_NOCMA;
+ return flags;
+}
+
+static inline void memalloc_nocma_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags;
+}
+#else
+static inline unsigned int memalloc_nocma_save(void)
+{
+ return 0;
+}
+
+static inline void memalloc_nocma_restore(unsigned int flags)
+{
+}
+#endif
+
#ifdef CONFIG_MEMCG
/**
* memalloc_use_memcg - Starts the remote memcg charging scope.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 13789d10a50e..ae5655197698 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -8,13 +8,14 @@
#include <linux/sched/jobctl.h>
#include <linux/sched/task.h>
#include <linux/cred.h>
+#include <linux/refcount.h>
/*
* Types defining task->signal and task->sighand and APIs using them:
*/
struct sighand_struct {
- atomic_t count;
+ refcount_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
wait_queue_head_t signalfd_wqh;
@@ -82,7 +83,7 @@ struct multiprocess_signals {
* the locking of signal_struct.
*/
struct signal_struct {
- atomic_t sigcnt;
+ refcount_t sigcnt;
atomic_t live;
int nr_threads;
struct list_head thread_head;
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index a9c32daeb9d8..99ce6d728df7 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -83,4 +83,11 @@ extern int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+extern unsigned int sysctl_sched_energy_aware;
+extern int sched_energy_aware_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
#endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 44c6f15800ff..2e97a2227045 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -88,13 +88,13 @@ extern void sched_exec(void);
#define sched_exec() {}
#endif
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
{
- if (atomic_dec_and_test(&t->usage))
+ if (refcount_dec_and_test(&t->usage))
__put_task_struct(t);
}
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index 6a841929073f..2413427e439c 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -61,7 +61,7 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline void *try_get_task_stack(struct task_struct *tsk)
{
- return atomic_inc_not_zero(&tsk->stack_refcount) ?
+ return refcount_inc_not_zero(&tsk->stack_refcount) ?
task_stack_page(tsk) : NULL;
}
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index c31d3a47a47c..57c7ed3fe465 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -176,10 +176,10 @@ typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP 0x01
struct sd_data {
- struct sched_domain **__percpu sd;
- struct sched_domain_shared **__percpu sds;
- struct sched_group **__percpu sg;
- struct sched_group_capacity **__percpu sgc;
+ struct sched_domain *__percpu *sd;
+ struct sched_domain_shared *__percpu *sds;
+ struct sched_group *__percpu *sg;
+ struct sched_group_capacity *__percpu *sgc;
};
struct sched_domain_topology_level {
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 10b19a192b2d..ad826d2a4557 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -24,9 +24,13 @@
* called near the end of a function. Otherwise, the list can be
* re-initialized for later re-use by wake_q_init().
*
- * Note that this can cause spurious wakeups. schedule() callers
+ * NOTE that this can cause spurious wakeups. schedule() callers
* must ensure the call is done inside a loop, confirming that the
* wakeup condition has in fact occurred.
+ *
+ * NOTE that there is no guarantee the wakeup will happen any later than the
+ * wake_q_add() location. Therefore task must be ready to be woken at the
+ * location of the wake_q_add().
*/
#include <linux/sched.h>
@@ -47,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head *head)
head->lastp = &head->first;
}
-extern void wake_q_add(struct wake_q_head *head,
- struct task_struct *task);
+extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
+extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
#endif /* _LINUX_SCHED_WAKE_Q_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index e8febec62ffb..2b35a43d11d6 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -54,9 +54,12 @@ struct xattr;
struct xfrm_sec_ctx;
struct mm_struct;
+/* Default (no) options for the capable function */
+#define CAP_OPT_NONE 0x0
/* If capable should audit the security request */
-#define SECURITY_CAP_NOAUDIT 0
-#define SECURITY_CAP_AUDIT 1
+#define CAP_OPT_NOAUDIT BIT(1)
+/* If capable is being called by a setid function */
+#define CAP_OPT_INSETID BIT(2)
/* LSM Agnostic defines for sb_set_mnt_opts */
#define SECURITY_LSM_NATIVE_LABELS 1
@@ -72,7 +75,7 @@ enum lsm_event {
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
- int cap, int audit);
+ int cap, unsigned int opts);
extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
@@ -207,10 +210,10 @@ int security_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-int security_capable(const struct cred *cred, struct user_namespace *ns,
- int cap);
-int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
- int cap);
+int security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
@@ -366,8 +369,10 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, char *name, char **value);
-int security_setprocattr(const char *name, void *value, size_t size);
+int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
+ char **value);
+int security_setprocattr(const char *lsm, const char *name, void *value,
+ size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -462,14 +467,11 @@ static inline int security_capset(struct cred *new,
}
static inline int security_capable(const struct cred *cred,
- struct user_namespace *ns, int cap)
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
{
- return cap_capable(cred, ns, cap, SECURITY_CAP_AUDIT);
-}
-
-static inline int security_capable_noaudit(const struct cred *cred,
- struct user_namespace *ns, int cap) {
- return cap_capable(cred, ns, cap, SECURITY_CAP_NOAUDIT);
+ return cap_capable(cred, ns, cap, opts);
}
static inline int security_quotactl(int cmds, int type, int id,
@@ -1112,15 +1114,18 @@ static inline int security_sem_semop(struct kern_ipc_perm *sma,
return 0;
}
-static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode)
+static inline void security_d_instantiate(struct dentry *dentry,
+ struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, char *name, char **value)
+static inline int security_getprocattr(struct task_struct *p, const char *lsm,
+ char *name, char **value)
{
return -EINVAL;
}
-static inline int security_setprocattr(char *name, void *value, size_t size)
+static inline int security_setprocattr(const char *lsm, char *name,
+ void *value, size_t size)
{
return -EINVAL;
}
diff --git a/include/linux/selinux.h b/include/linux/selinux.h
deleted file mode 100644
index 44f459612690..000000000000
--- a/include/linux/selinux.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * SELinux services exported to the rest of the kernel.
- *
- * Author: James Morris <jmorris@redhat.com>
- *
- * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
- * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
- * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
- */
-#ifndef _LINUX_SELINUX_H
-#define _LINUX_SELINUX_H
-
-struct selinux_audit_rule;
-struct audit_context;
-struct kern_ipc_perm;
-
-#ifdef CONFIG_SECURITY_SELINUX
-
-/**
- * selinux_is_enabled - is SELinux enabled?
- */
-bool selinux_is_enabled(void);
-#else
-
-static inline bool selinux_is_enabled(void)
-{
- return false;
-}
-#endif /* CONFIG_SECURITY_SELINUX */
-
-#endif /* _LINUX_SELINUX_H */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index f155dc607112..f3fb1edb3526 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -72,7 +72,8 @@ extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
-extern int shmem_unuse(swp_entry_t entry, struct page *page);
+extern int shmem_unuse(unsigned int type, bool frontswap,
+ unsigned long *fs_pages_to_unuse);
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
diff --git a/include/linux/signal.h b/include/linux/signal.h
index cc7e2c1cd444..9702016734b1 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig);
#endif
#define siginmask(sig, mask) \
- ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
+ ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
#define SIG_KERNEL_ONLY_MASK (\
rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 93f56fddd92a..27beb549ffbe 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1221,6 +1221,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
}
#endif
+struct bpf_flow_keys;
+bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
+ const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ struct bpf_flow_keys *flow_keys);
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
@@ -1884,12 +1889,12 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
*
* A buffer cannot be placed on two lists at the same time.
*/
-void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_head(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_after(list, (struct sk_buff *)list, newsk);
}
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
/**
* __skb_queue_tail - queue a buffer at the list tail
@@ -1901,12 +1906,12 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
*
* A buffer cannot be placed on two lists at the same time.
*/
-void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_before(list, (struct sk_buff *)list, newsk);
}
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
/*
* remove sk_buff from list. _Must_ be called atomically, and with
@@ -1933,7 +1938,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
-struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek(list);
@@ -1941,6 +1945,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
__skb_unlink(skb, list);
return skb;
}
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
/**
* __skb_dequeue_tail - remove from the tail of the queue
@@ -1950,7 +1955,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
* so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty.
*/
-struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
@@ -1958,6 +1962,7 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
__skb_unlink(skb, list);
return skb;
}
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline bool skb_is_nonlinear(const struct sk_buff *skb)
@@ -2424,8 +2429,7 @@ static inline void skb_pop_mac_header(struct sk_buff *skb)
skb->mac_header = skb->network_header;
}
-static inline void skb_probe_transport_header(struct sk_buff *skb,
- const int offset_hint)
+static inline void skb_probe_transport_header(struct sk_buff *skb)
{
struct flow_keys_basic keys;
@@ -2434,8 +2438,6 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
- else
- skb_set_transport_header(skb, offset_hint);
}
static inline void skb_mac_header_rebuild(struct sk_buff *skb)
@@ -2648,13 +2650,13 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
-void skb_queue_purge(struct sk_buff_head *list);
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
kfree_skb(skb);
}
+void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
@@ -3023,7 +3025,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
}
/**
- * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * __skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
* @free_on_error: free buffer on error
@@ -3218,6 +3220,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
*
* This is exactly the same as pskb_trim except that it ensures the
* checksum of received packets are still valid after the operation.
+ * It can change skb pointers.
*/
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
@@ -3480,16 +3483,25 @@ static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
/**
* skb_get_timestamp - get timestamp from a skb
* @skb: skb to get stamp from
- * @stamp: pointer to struct timeval to store stamp in
+ * @stamp: pointer to struct __kernel_old_timeval to store stamp in
*
* Timestamps are stored in the skb as offsets to a base timestamp.
* This function converts the offset back to a struct timeval and stores
* it in stamp.
*/
static inline void skb_get_timestamp(const struct sk_buff *skb,
- struct timeval *stamp)
+ struct __kernel_old_timeval *stamp)
{
- *stamp = ktime_to_timeval(skb->tstamp);
+ *stamp = ns_to_kernel_old_timeval(skb->tstamp);
+}
+
+static inline void skb_get_new_timestamp(const struct sk_buff *skb,
+ struct __kernel_sock_timeval *stamp)
+{
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
+
+ stamp->tv_sec = ts.tv_sec;
+ stamp->tv_usec = ts.tv_nsec / 1000;
}
static inline void skb_get_timestampns(const struct sk_buff *skb,
@@ -3498,6 +3510,15 @@ static inline void skb_get_timestampns(const struct sk_buff *skb,
*stamp = ktime_to_timespec(skb->tstamp);
}
+static inline void skb_get_new_timestampns(const struct sk_buff *skb,
+ struct __kernel_timespec *stamp)
+{
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
+
+ stamp->tv_sec = ts.tv_sec;
+ stamp->tv_nsec = ts.tv_nsec;
+}
+
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
@@ -4211,6 +4232,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
}
+static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
+{
+ return skb_is_gso(skb) &&
+ skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
+}
+
static inline void skb_gso_reset(struct sk_buff *skb)
{
skb_shinfo(skb)->gso_size = 0;
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 3a1a1dbc6f49..d2153789bd9f 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -81,12 +81,12 @@ struct kmem_cache_order_objects {
*/
struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab;
- /* Used for retriving partial slabs etc */
+ /* Used for retrieving partial slabs, etc. */
slab_flags_t flags;
unsigned long min_partial;
- unsigned int size; /* The size of an object including meta data */
- unsigned int object_size;/* The size of an object without meta data */
- unsigned int offset; /* Free pointer offset. */
+ unsigned int size; /* The size of an object including metadata */
+ unsigned int object_size;/* The size of an object without metadata */
+ unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
@@ -110,7 +110,7 @@ struct kmem_cache {
#endif
#ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params;
- /* for propagation, maximum size of a stored attr */
+ /* For propagation, maximum size of a stored attr */
unsigned int max_attr_size;
#ifdef CONFIG_SYSFS
struct kset *memcg_kset;
@@ -151,7 +151,7 @@ struct kmem_cache {
#else
#define slub_cpu_partial(s) (0)
#define slub_set_cpu_partial(s, n)
-#endif // CONFIG_SLUB_CPU_PARTIAL
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 69c285b1c990..eb71a50b8afc 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -162,6 +162,12 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc);
*/
int qcom_llcc_probe(struct platform_device *pdev,
const struct llcc_slice_config *table, u32 sz);
+
+/**
+ * qcom_llcc_remove - remove the sct table
+ * @pdev: Platform device pointer
+ */
+int qcom_llcc_remove(struct platform_device *pdev);
#else
static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ab2041a00e01..6016daeecee4 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -349,9 +349,17 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+struct timespec64;
struct __kernel_timespec;
struct old_timespec32;
+struct scm_timestamping_internal {
+ struct timespec64 ts[3];
+};
+
+extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss);
+extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss);
+
/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
* forbid_cmsg_compat==false
*/
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index b0674e330ef6..c1c59473cef9 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -22,7 +22,7 @@
struct dma_chan;
/* device.platform_data for SSP controller devices */
-struct pxa2xx_spi_master {
+struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
bool is_slave;
@@ -54,7 +54,7 @@ struct pxa2xx_spi_chip {
#include <linux/clk.h>
-extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
+extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
#endif
#endif
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 3fe24500c5ee..3703d0dcac2e 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -330,6 +330,11 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf);
ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
+struct spi_mem_dirmap_desc *
+devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info);
+void devm_spi_mem_dirmap_destroy(struct device *dev,
+ struct spi_mem_dirmap_desc *desc);
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 314d922ca607..662b336aa2e4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -12,6 +12,7 @@
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
+#include <linux/gpio/consumer.h>
struct dma_chan;
struct property_entry;
@@ -116,8 +117,13 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
* @modalias: Name of the driver to use with this device, or an alias
* for that name. This appears in the sysfs "modalias" attribute
* for driver coldplugging, and in uevents used for hotplugging
- * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
+ * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
+ * not using a GPIO line) use cs_gpiod in new drivers by opting in on
+ * the spi_master.
+ * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
+ * @word_delay_usecs: microsecond delay to be inserted between consecutive
+ * words of a transfer
*
* @statistics: statistics for the spi_device
*
@@ -163,7 +169,9 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- int cs_gpio; /* chip select gpio */
+ int cs_gpio; /* LEGACY: chip select gpio */
+ struct gpio_desc *cs_gpiod; /* chip select gpio desc */
+ uint8_t word_delay_usecs; /* inter-word delay */
/* the statistics */
struct spi_statistics statistics;
@@ -376,9 +384,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* controller has native support for memory like operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
- * number. Any individual value may be -ENOENT for CS lines that
+ * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
+ * CS number. Any individual value may be -ENOENT for CS lines that
+ * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
+ * in new drivers.
+ * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+ * number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
+ * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
+ * GPIO descriptors rather than using global GPIO numbers grabbed by the
+ * driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
+ * and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
* @statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
@@ -557,6 +573,8 @@ struct spi_controller {
/* gpio chip select */
int *cs_gpios;
+ struct gpio_desc **cs_gpiods;
+ bool use_gpio_descriptors;
/* statistics */
struct spi_statistics statistics;
@@ -706,6 +724,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
+ * @word_delay_usecs: microseconds to inter word delay after each word size
+ * (set by bits_per_word) transmission.
* @word_delay: clock cycles to inter word delay after each word size
* (set by bits_per_word) transmission.
* @transfer_list: transfers are sequenced through @spi_message.transfers
@@ -788,6 +808,7 @@ struct spi_transfer {
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
+ u8 word_delay_usecs;
u16 delay_usecs;
u32 speed_hz;
u16 word_delay;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c614375cd264..c495b2d51569 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -1,24 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Sleepable Read-Copy Update mechanism for mutual exclusion
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2006
* Copyright (C) Fujitsu, 2012
*
- * Author: Paul McKenney <paulmck@us.ibm.com>
+ * Author: Paul McKenney <paulmck@linux.ibm.com>
* Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
@@ -223,6 +210,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
+ WARN_ON_ONCE(idx & ~0x1);
rcu_lock_release(&(ssp)->dep_map);
__srcu_read_unlock(ssp, idx);
}
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index b19216aaaef2..5a5a1941ca15 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -1,24 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Sleepable Read-Copy Update mechanism for mutual exclusion,
* tiny variant.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2017
*
- * Author: Paul McKenney <paulmck@us.ibm.com>
+ * Author: Paul McKenney <paulmck@linux.ibm.com>
*/
#ifndef _LINUX_SRCU_TINY_H
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 6f292bd3e7db..7f7c8c050f63 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -1,24 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Sleepable Read-Copy Update mechanism for mutual exclusion,
* tree variant.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2017
*
- * Author: Paul McKenney <paulmck@us.ibm.com>
+ * Author: Paul McKenney <paulmck@linux.ibm.com>
*/
#ifndef _LINUX_SRCU_TREE_H
@@ -45,7 +32,8 @@ struct srcu_data {
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
bool srcu_cblist_invoking; /* Invoking these CBs? */
- struct delayed_work work; /* Context for CB invoking. */
+ struct timer_list delay_work; /* Delay for CB invoking */
+ struct work_struct work; /* Context for CB invoking. */
struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
struct srcu_node *mynode; /* Leaf srcu_node. */
unsigned long grpmask; /* Mask for leaf srcu_node */
diff --git a/include/linux/statfs.h b/include/linux/statfs.h
index 3142e98546ac..9bc69edb8f18 100644
--- a/include/linux/statfs.h
+++ b/include/linux/statfs.h
@@ -41,4 +41,7 @@ struct kstatfs {
#define ST_NODIRATIME 0x0800 /* do not update directory access times */
#define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */
+struct dentry;
+extern int vfs_get_fsid(struct dentry *dentry, __kernel_fsid_t *fsid);
+
#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7ddfc65586b0..4335bd771ce5 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
struct clk *pclk;
struct clk *clk_ptp_ref;
unsigned int clk_ptp_rate;
+ unsigned int clk_ref_rate;
struct reset_control *stmmac_rst;
struct stmmac_axi *axi;
int has_gmac4;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 622025ac1461..fc50e21b3b88 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -307,7 +307,7 @@ struct vma_swap_readahead {
};
/* linux/mm/workingset.c */
-void *workingset_eviction(struct address_space *mapping, struct page *page);
+void *workingset_eviction(struct page *page);
void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
@@ -625,7 +625,7 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
return vm_swappiness;
/* root ? */
- if (mem_cgroup_disabled() || !memcg->css.parent)
+ if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
return vm_swappiness;
return memcg->swappiness;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 257cccba3062..94369f5bd8e5 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -54,7 +54,7 @@ struct __sysctl_args;
struct sysinfo;
struct timespec;
struct timeval;
-struct timex;
+struct __kernel_timex;
struct timezone;
struct tms;
struct utimbuf;
@@ -297,6 +297,11 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id,
long nr,
struct io_event __user *events,
struct __kernel_timespec __user *timeout);
+asmlinkage long sys_io_getevents_time32(__u32 ctx_id,
+ __s32 min_nr,
+ __s32 nr,
+ struct io_event __user *events,
+ struct old_timespec32 __user *timeout);
asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
long min_nr,
long nr,
@@ -522,11 +527,19 @@ asmlinkage long sys_timerfd_settime(int ufd, int flags,
const struct __kernel_itimerspec __user *utmr,
struct __kernel_itimerspec __user *otmr);
asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime32(int ufd,
+ struct old_itimerspec32 __user *otmr);
+asmlinkage long sys_timerfd_settime32(int ufd, int flags,
+ const struct old_itimerspec32 __user *utmr,
+ struct old_itimerspec32 __user *otmr);
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
struct __kernel_timespec __user *utimes,
int flags);
+asmlinkage long sys_utimensat_time32(unsigned int dfd,
+ const char __user *filename,
+ struct old_timespec32 __user *t, int flags);
/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
@@ -555,6 +568,9 @@ asmlinkage long sys_unshare(unsigned long unshare_flags);
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
struct __kernel_timespec __user *utime, u32 __user *uaddr2,
u32 val3);
+asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val,
+ struct old_timespec32 __user *utime, u32 __user *uaddr2,
+ u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
size_t __user *len_ptr);
@@ -564,6 +580,8 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
/* kernel/hrtimer.c */
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
+asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/itimer.c */
asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
@@ -591,7 +609,7 @@ asmlinkage long sys_timer_gettime(timer_t timer_id,
asmlinkage long sys_timer_getoverrun(timer_t timer_id);
asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
const struct __kernel_itimerspec __user *new_setting,
- struct itimerspec __user *old_setting);
+ struct __kernel_itimerspec __user *old_setting);
asmlinkage long sys_timer_delete(timer_t timer_id);
asmlinkage long sys_clock_settime(clockid_t which_clock,
const struct __kernel_timespec __user *tp);
@@ -602,6 +620,20 @@ asmlinkage long sys_clock_getres(clockid_t which_clock,
asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags,
const struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
+asmlinkage long sys_timer_gettime32(timer_t timer_id,
+ struct old_itimerspec32 __user *setting);
+asmlinkage long sys_timer_settime32(timer_t timer_id, int flags,
+ struct old_itimerspec32 __user *new,
+ struct old_itimerspec32 __user *old);
+asmlinkage long sys_clock_settime32(clockid_t which_clock,
+ struct old_timespec32 __user *tp);
+asmlinkage long sys_clock_gettime32(clockid_t which_clock,
+ struct old_timespec32 __user *tp);
+asmlinkage long sys_clock_getres_time32(clockid_t which_clock,
+ struct old_timespec32 __user *tp);
+asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags,
+ struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/printk.c */
asmlinkage long sys_syslog(int type, char __user *buf, int len);
@@ -627,6 +659,8 @@ asmlinkage long sys_sched_get_priority_max(int policy);
asmlinkage long sys_sched_get_priority_min(int policy);
asmlinkage long sys_sched_rr_get_interval(pid_t pid,
struct __kernel_timespec __user *interval);
+asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid,
+ struct old_timespec32 __user *interval);
/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
@@ -695,7 +729,8 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_settimeofday(struct timeval __user *tv,
struct timezone __user *tz);
-asmlinkage long sys_adjtimex(struct timex __user *txc_p);
+asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p);
+asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p);
/* kernel/timer.c */
asmlinkage long sys_getpid(void);
@@ -714,9 +749,18 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t
asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct __kernel_timespec __user *abs_timeout);
asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification);
asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat);
+asmlinkage long sys_mq_timedreceive_time32(mqd_t mqdes,
+ char __user *u_msg_ptr,
+ unsigned int msg_len, unsigned int __user *u_msg_prio,
+ const struct old_timespec32 __user *u_abs_timeout);
+asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes,
+ const char __user *u_msg_ptr,
+ unsigned int msg_len, unsigned int msg_prio,
+ const struct old_timespec32 __user *u_abs_timeout);
/* ipc/msg.c */
asmlinkage long sys_msgget(key_t key, int msgflg);
+asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp,
size_t msgsz, long msgtyp, int msgflg);
@@ -726,14 +770,19 @@ asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp,
/* ipc/sem.c */
asmlinkage long sys_semget(key_t key, int nsems, int semflg);
asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
+asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
unsigned nsops,
const struct __kernel_timespec __user *timeout);
+asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops,
+ unsigned nsops,
+ const struct old_timespec32 __user *timeout);
asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
unsigned nsops);
/* ipc/shm.c */
asmlinkage long sys_shmget(key_t key, size_t size, int flag);
+asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
asmlinkage long sys_shmdt(char __user *shmaddr);
@@ -867,7 +916,9 @@ asmlinkage long sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
asmlinkage long sys_clock_adjtime(clockid_t which_clock,
- struct timex __user *tx);
+ struct __kernel_timex __user *tx);
+asmlinkage long sys_clock_adjtime32(clockid_t which_clock,
+ struct old_timex32 __user *tx);
asmlinkage long sys_syncfs(int fd);
asmlinkage long sys_setns(int fd, int nstype);
asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
@@ -1003,6 +1054,7 @@ asmlinkage long sys_alarm(unsigned int seconds);
asmlinkage long sys_getpgrp(void);
asmlinkage long sys_pause(void);
asmlinkage long sys_time(time_t __user *tloc);
+asmlinkage long sys_time32(old_time32_t __user *tloc);
#ifdef __ARCH_WANT_SYS_UTIME
asmlinkage long sys_utime(char __user *filename,
struct utimbuf __user *times);
@@ -1011,6 +1063,13 @@ asmlinkage long sys_utimes(char __user *filename,
asmlinkage long sys_futimesat(int dfd, const char __user *filename,
struct timeval __user *utimes);
#endif
+asmlinkage long sys_futimesat_time32(unsigned int dfd,
+ const char __user *filename,
+ struct old_timeval32 __user *t);
+asmlinkage long sys_utime32(const char __user *filename,
+ struct old_utimbuf32 __user *t);
+asmlinkage long sys_utimes_time32(const char __user *filename,
+ struct old_timeval32 __user *t);
asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
asmlinkage long sys_getdents(unsigned int fd,
struct linux_dirent __user *dirent,
@@ -1035,6 +1094,7 @@ asmlinkage long sys_fork(void);
/* obsolete: kernel/time/time.c */
asmlinkage long sys_stime(time_t __user *tptr);
+asmlinkage long sys_stime32(old_time32_t __user *tptr);
/* obsolete: kernel/signal.c */
asmlinkage long sys_sigpending(old_sigset_t __user *uset);
@@ -1185,6 +1245,10 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
ssize_t ksys_readahead(int fd, loff_t offset, size_t count);
+int ksys_ipc(unsigned int call, int first, unsigned long second,
+ unsigned long third, void __user * ptr, long fifth);
+int compat_ksys_ipc(u32 call, int first, int second,
+ u32 third, u32 ptr, u32 fifth);
/*
* The following kernel syscall equivalents are just wrappers to fs-internal
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 6cfe05893a76..4a49f80e7f71 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -15,11 +15,14 @@
#ifndef __TEE_DRV_H
#define __TEE_DRV_H
-#include <linux/types.h>
+#include <linux/device.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/tee.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
/*
* The file describes the API provided by the generic TEE driver to the
@@ -47,6 +50,11 @@ struct tee_shm_pool;
* @releasing: flag that indicates if context is being released right now.
* It is needed to break circular dependency on context during
* shared memory release.
+ * @supp_nowait: flag that indicates that requests in this context should not
+ * wait for tee-supplicant daemon to be started if not present
+ * and just return with an error code. It is needed for requests
+ * that arises from TEE based kernel drivers that should be
+ * non-blocking in nature.
*/
struct tee_context {
struct tee_device *teedev;
@@ -54,6 +62,7 @@ struct tee_context {
void *data;
struct kref refcount;
bool releasing;
+ bool supp_nowait;
};
struct tee_param_memref {
@@ -526,6 +535,18 @@ int tee_client_invoke_func(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
+/**
+ * tee_client_cancel_req() - Request cancellation of the previous open-session
+ * or invoke-command operations in a Trusted Application
+ * @ctx: TEE Context
+ * @arg: Cancellation arguments, see description of
+ * struct tee_ioctl_cancel_arg
+ *
+ * Returns < 0 on error else 0 if the cancellation was successfully requested.
+ */
+int tee_client_cancel_req(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg *arg);
+
static inline bool tee_param_is_memref(struct tee_param *param)
{
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
@@ -538,4 +559,31 @@ static inline bool tee_param_is_memref(struct tee_param *param)
}
}
+extern struct bus_type tee_bus_type;
+
+/**
+ * struct tee_client_device - tee based device
+ * @id: device identifier
+ * @dev: device structure
+ */
+struct tee_client_device {
+ struct tee_client_device_id id;
+ struct device dev;
+};
+
+#define to_tee_client_device(d) container_of(d, struct tee_client_device, dev)
+
+/**
+ * struct tee_client_driver - tee client driver
+ * @id_table: device id table supported by this driver
+ * @driver: driver structure
+ */
+struct tee_client_driver {
+ const struct tee_client_device_id *id_table;
+ struct device_driver driver;
+};
+
+#define to_tee_client_driver(d) \
+ container_of(d, struct tee_client_driver, driver)
+
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/time32.h b/include/linux/time32.h
index 118b9977080c..0a1f302a1753 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -10,6 +10,7 @@
*/
#include <linux/time64.h>
+#include <linux/timex.h>
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
@@ -35,13 +36,42 @@ struct old_utimbuf32 {
old_time32_t modtime;
};
+struct old_timex32 {
+ u32 modes;
+ s32 offset;
+ s32 freq;
+ s32 maxerror;
+ s32 esterror;
+ s32 status;
+ s32 constant;
+ s32 precision;
+ s32 tolerance;
+ struct old_timeval32 time;
+ s32 tick;
+ s32 ppsfreq;
+ s32 jitter;
+ s32 shift;
+ s32 stabil;
+ s32 jitcnt;
+ s32 calcnt;
+ s32 errcnt;
+ s32 stbcnt;
+ s32 tai;
+
+ s32:32; s32:32; s32:32; s32:32;
+ s32:32; s32:32; s32:32; s32:32;
+ s32:32; s32:32; s32:32;
+};
+
extern int get_old_timespec32(struct timespec64 *, const void __user *);
extern int put_old_timespec32(const struct timespec64 *, void __user *);
extern int get_old_itimerspec32(struct itimerspec64 *its,
const struct old_itimerspec32 __user *uits);
extern int put_old_itimerspec32(const struct itimerspec64 *its,
struct old_itimerspec32 __user *uits);
-
+struct __kernel_timex;
+int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *);
+int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *);
#if __BITS_PER_LONG == 64
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 05634afba0db..f38d382ffec1 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -7,14 +7,6 @@
typedef __s64 time64_t;
typedef __u64 timeu64_t;
-/* CONFIG_64BIT_TIME enables new 64 bit time_t syscalls in the compat path
- * and 32-bit emulation.
- */
-#ifndef CONFIG_64BIT_TIME
-#define __kernel_timespec timespec
-#define __kernel_itimerspec itimerspec
-#endif
-
#include <uapi/linux/time.h>
struct timespec64 {
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 39c25dbebfe8..ce0859763670 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -151,7 +151,9 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
#define NTP_INTERVAL_FREQ (HZ)
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
-extern int do_adjtimex(struct timex *);
+extern int do_adjtimex(struct __kernel_timex *);
+extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx);
+
extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val);
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 48fad21109fc..23d80db426d7 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common functions for in-kernel torture tests.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2014
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#ifndef __LINUX_TORTURE_H
@@ -50,11 +37,12 @@
do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
/* Definitions for online/offline exerciser. */
+typedef void torture_ofl_func(void);
bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
unsigned long *sum_offl, int *min_onl, int *max_onl);
bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
unsigned long *sum_onl, int *min_onl, int *max_onl);
-int torture_onoff_init(long ooholdoff, long oointerval);
+int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f);
void torture_onoff_stats(void);
bool torture_onoff_failures(void);
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 235f51b62c71..0c08de356d0d 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -47,6 +47,8 @@ struct umh_info {
const char *cmdline;
struct file *pipe_to_umh;
struct file *pipe_from_umh;
+ struct list_head list;
+ void (*cleanup)(struct umh_info *info);
pid_t pid;
};
int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 7dc3a411bece..695931b03684 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -72,6 +72,12 @@ struct giveback_urb_bh {
struct usb_host_endpoint *completing_ep;
};
+enum usb_dev_authorize_policy {
+ USB_DEVICE_AUTHORIZE_NONE = 0,
+ USB_DEVICE_AUTHORIZE_ALL = 1,
+ USB_DEVICE_AUTHORIZE_INTERNAL = 2,
+};
+
struct usb_hcd {
/*
@@ -117,7 +123,6 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
-#define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -142,8 +147,7 @@ struct usb_hcd {
* or they require explicit user space authorization; this bit is
* settable through /sys/class/usb_host/X/authorized_default
*/
-#define HCD_DEV_AUTHORIZED(hcd) \
- ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED))
+ enum usb_dev_authorize_policy dev_policy;
/* Flags that get set only during HCD registration or removal. */
unsigned rh_registered:1;/* is root hub registered? */
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index edc51be4a77c..c05ffa6abda9 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -18,6 +18,7 @@ typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev);
/**
* struct usb_role_switch_desc - USB Role Switch Descriptor
+ * @fwnode: The device node to be associated with the role switch
* @usb2_port: Optional reference to the host controller port device (USB2)
* @usb3_port: Optional reference to the host controller port device (USB3)
* @udc: Optional reference to the peripheral controller device
@@ -32,6 +33,7 @@ typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev);
* usb_role_switch_register() before registering the switch.
*/
struct usb_role_switch_desc {
+ struct fwnode_handle *fwnode;
struct device *usb2_port;
struct device *usb3_port;
struct device *udc;
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 50c74a77db55..0c532ca3f079 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -159,12 +159,6 @@ struct tcpm_port;
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
void tcpm_unregister_port(struct tcpm_port *port);
-int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo);
-int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo,
- unsigned int operating_snk_mw);
-
void tcpm_vbus_change(struct tcpm_port *port);
void tcpm_cc_change(struct tcpm_port *port);
void tcpm_pd_receive(struct tcpm_port *port,
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index 55ae781d60a9..7fa12ef8d09a 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -92,4 +92,8 @@ enum {
#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
+/* Helper for setting/getting the pin assignement value to the configuration */
+#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
+#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
+
#endif /* __USB_TYPEC_DP_H */
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 79293f630ee1..43f40685e53c 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -47,7 +47,8 @@ void typec_switch_put(struct typec_switch *sw);
int typec_switch_register(struct typec_switch *sw);
void typec_switch_unregister(struct typec_switch *sw);
-struct typec_mux *typec_mux_get(struct device *dev, const char *name);
+struct typec_mux *
+typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc);
void typec_mux_put(struct typec_mux *mux);
int typec_mux_register(struct typec_mux *mux);
void typec_mux_unregister(struct typec_mux *mux);
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 9e4a3213f2c2..65adee629106 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -236,22 +236,6 @@ enum {
WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
};
-static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size,
- const struct wusb_ckhdid *ckhdid)
-{
- return scnprintf(pr_ckhdid, size,
- "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx "
- "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx",
- ckhdid->data[0], ckhdid->data[1],
- ckhdid->data[2], ckhdid->data[3],
- ckhdid->data[4], ckhdid->data[5],
- ckhdid->data[6], ckhdid->data[7],
- ckhdid->data[8], ckhdid->data[9],
- ckhdid->data[10], ckhdid->data[11],
- ckhdid->data[12], ckhdid->data[13],
- ckhdid->data[14], ckhdid->data[15]);
-}
-
/*
* WUSB Crypto stuff (WUSB1.0[6])
*/
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 32baf8e26735..987b6491b946 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -12,6 +12,11 @@ struct irq_affinity;
/**
* virtio_config_ops - operations for configuring a virtio device
+ * Note: Do not assume that a transport implements all of the operations
+ * getting/setting a value as a simple read/write! Generally speaking,
+ * any of @get/@set, @get_status/@set_status, or @get_features/
+ * @finalize_features are NOT safe to be called from an atomic
+ * context.
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
@@ -22,7 +27,7 @@ struct irq_affinity;
* offset: the offset of the configuration field
* buf: the buffer to read the field value from.
* len: the length of the buffer
- * @generation: config generation counter
+ * @generation: config generation counter (optional)
* vdev: the virtio_device
* Returns the config generation counter
* @get_status: read the status byte
@@ -48,17 +53,17 @@ struct irq_affinity;
* @del_vqs: free virtqueues found by find_vqs().
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
- * Returns the first 32 feature bits (all we currently need).
+ * Returns the first 64 feature bits (all we currently need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
* Returns 0 on success or error status
- * @bus_name: return the bus name associated with the device
+ * @bus_name: return the bus name associated with the device (optional)
* vdev: the virtio_device
* This returns a pointer to the bus name a la pci_name from which
* the caller can then copy.
- * @set_vq_affinity: set the affinity for a virtqueue.
+ * @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional).
*/
typedef void vq_callback_t(struct virtqueue *);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index cb462f9ab7dd..0d1fe9297ac6 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
+ } else {
+ /* gso packets without NEEDS_CSUM do not set transport_offset.
+ * probe and drop if does not match one of the above types.
+ */
+ if (gso_type && skb->network_header) {
+ if (!skb->protocol)
+ virtio_net_hdr_set_proto(skb, hdr);
+retry:
+ skb_probe_transport_header(skb);
+ if (!skb_transport_header_was_set(skb)) {
+ /* UFO does not specify ipv4 or 6: try both */
+ if (gso_type & SKB_GSO_UDP &&
+ skb->protocol == htons(ETH_P_IP)) {
+ skb->protocol = htons(ETH_P_IPV6);
+ goto retry;
+ }
+ return -EINVAL;
+ }
+ }
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index b724ef7005de..eaa1e762bf06 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -45,6 +45,7 @@
#define VMCI_CAPS_GUESTCALL 0x2
#define VMCI_CAPS_DATAGRAM 0x4
#define VMCI_CAPS_NOTIFICATIONS 0x8
+#define VMCI_CAPS_PPN64 0x10
/* Interrupt Cause register bits. */
#define VMCI_ICR_DATAGRAM 0x1
@@ -569,8 +570,10 @@ struct vmci_resource_query_msg {
*/
struct vmci_notify_bm_set_msg {
struct vmci_datagram hdr;
- u32 bitmap_ppn;
- u32 _pad;
+ union {
+ u32 bitmap_ppn32;
+ u64 bitmap_ppn64;
+ };
};
/*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index ed7c122cb31f..5f3efabc36f4 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -308,7 +308,7 @@ do { \
#define __wait_event_freezable(wq_head, condition) \
___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
- schedule(); try_to_freeze())
+ freezable_schedule())
/**
* wait_event_freezable - sleep (or freeze) until a condition gets true
@@ -367,7 +367,7 @@ do { \
#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_INTERRUPTIBLE, 0, timeout, \
- __ret = schedule_timeout(__ret); try_to_freeze())
+ __ret = freezable_schedule_timeout(__ret))
/*
* like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
@@ -588,7 +588,7 @@ do { \
#define __wait_event_freezable_exclusive(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
- schedule(); try_to_freeze())
+ freezable_schedule())
#define wait_event_freezable_exclusive(wq, condition) \
({ \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 60d673e15632..d59525fca4d3 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -390,43 +390,23 @@ extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
-extern struct workqueue_struct *
-__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
- struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
-
/**
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
* @max_active: max in-flight work items, 0 for default
- * @args...: args for @fmt
+ * remaining args: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
* information on WQ_* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
- * The __lock_name macro dance is to guarantee that single lock_class_key
- * doesn't end up with different namesm, which isn't allowed by lockdep.
- *
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
-#ifdef CONFIG_LOCKDEP
-#define alloc_workqueue(fmt, flags, max_active, args...) \
-({ \
- static struct lock_class_key __key; \
- const char *__lock_name; \
- \
- __lock_name = "(wq_completion)"#fmt#args; \
- \
- __alloc_workqueue_key((fmt), (flags), (max_active), \
- &__key, __lock_name, ##args); \
-})
-#else
-#define alloc_workqueue(fmt, flags, max_active, args...) \
- __alloc_workqueue_key((fmt), (flags), (max_active), \
- NULL, NULL, ##args)
-#endif
+struct workqueue_struct *alloc_workqueue(const char *fmt,
+ unsigned int flags,
+ int max_active, ...);
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
@@ -463,6 +443,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
+extern bool queue_work_node(int node, struct workqueue_struct *wq,
+ struct work_struct *work);
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index f492e21c4aa2..5d9d318bcf7a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry)
*/
static inline bool xa_is_err(const void *entry)
{
- return unlikely(xa_is_internal(entry));
+ return unlikely(xa_is_internal(entry) &&
+ entry >= xa_mk_internal(-MAX_ERRNO));
}
/**
@@ -286,7 +287,6 @@ struct xarray {
*/
#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
-void xa_init_flags(struct xarray *, gfp_t flags);
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_erase(struct xarray *, unsigned long index);
@@ -304,6 +304,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
void xa_destroy(struct xarray *);
/**
+ * xa_init_flags() - Initialise an empty XArray with flags.
+ * @xa: XArray.
+ * @flags: XA_FLAG values.
+ *
+ * If you need to initialise an XArray with special flags (eg you need
+ * to take the lock from interrupt context), use this function instead
+ * of xa_init().
+ *
+ * Context: Any context.
+ */
+static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
+{
+ spin_lock_init(&xa->xa_lock);
+ xa->xa_flags = flags;
+ xa->xa_head = NULL;
+}
+
+/**
* xa_init() - Initialise an empty XArray.
* @xa: XArray.
*
@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
}
/**
- * xa_for_each() - Iterate over a portion of an XArray.
+ * xa_for_each_start() - Iterate over a portion of an XArray.
* @xa: XArray.
+ * @index: Index of @entry.
* @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. You may modify @index during the iteration if you
+ * want to skip or reprocess indices. It is safe to modify the array
+ * during the iteration. At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_start() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_start().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each_start(xa, index, entry, start) \
+ for (index = start, \
+ entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
+ entry; \
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+
+/**
+ * xa_for_each() - Iterate over present entries in an XArray.
+ * @xa: XArray.
* @index: Index of @entry.
- * @max: Maximum index to retrieve from array.
- * @filter: Selection criterion.
+ * @entry: Entry retrieved from array.
*
- * Initialise @index to the lowest index you want to retrieve from the
- * array. During the iteration, @entry will have the value of the entry
- * stored in @xa at @index. The iteration will skip all entries in the
- * array which do not match @filter. You may modify @index during the
- * iteration if you want to skip or reprocess indices. It is safe to modify
- * the array during the iteration. At the end of the iteration, @entry will
- * be set to NULL and @index will have a value less than or equal to max.
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. You may modify @index during the iteration if you want
+ * to skip or reprocess indices. It is safe to modify the array during the
+ * iteration. At the end of the iteration, @entry will be set to NULL and
+ * @index will have a value less than or equal to max.
*
* xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
* to handle your own locking with xas_for_each(), and if you have to unlock
@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
*
* Context: Any context. Takes and releases the RCU lock.
*/
-#define xa_for_each(xa, entry, index, max, filter) \
- for (entry = xa_find(xa, &index, max, filter); entry; \
- entry = xa_find_after(xa, &index, max, filter))
+#define xa_for_each(xa, index, entry) \
+ xa_for_each_start(xa, index, entry, 0)
+
+/**
+ * xa_for_each_marked() - Iterate over marked entries in an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @filter: Selection criterion.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. The iteration will skip all entries in the array
+ * which do not match @filter. You may modify @index during the iteration
+ * if you want to skip or reprocess indices. It is safe to modify the array
+ * during the iteration. At the end of the iteration, @entry will be set to
+ * NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
+ * You have to handle your own locking with xas_for_each(), and if you have
+ * to unlock after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each_marked() iterator
+ * instead. The xas_for_each_marked() iterator will expand into more inline
+ * code than xa_for_each_marked().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each_marked(xa, index, entry, filter) \
+ for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
+ entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
@@ -393,40 +463,13 @@ void *__xa_erase(struct xarray *, unsigned long index);
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
+int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
/**
- * __xa_insert() - Store this entry in the XArray unless another entry is
- * already present.
- * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
- *
- * If you would rather see the existing entry in the array, use __xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
- *
- * Context: Any context. Expects xa_lock to be held on entry. May
- * release and reacquire xa_lock if the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
- * -ENOMEM if memory could not be allocated.
- */
-static inline int __xa_insert(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
-{
- void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
- if (!curr)
- return 0;
- if (xa_is_err(curr))
- return xa_err(curr);
- return -EEXIST;
-}
-
-/**
* xa_store_bh() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
}
/**
- * xa_store_irq() - Erase this entry from the XArray.
+ * xa_store_irq() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
* @entry: New entry.
* @gfp: Memory allocation flags.
*
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
*
- * Context: Process context. Takes and releases the xa_lock.
- * May sleep if the @gfp flags permit.
+ * Context: Any context. Takes and releases the xa_lock. May sleep if
+ * the @gfp flags permit.
* Return: 0 if the store succeeded. -EEXIST if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
- void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
- if (!curr)
- return 0;
- if (xa_is_err(curr))
- return xa_err(curr);
- return -EEXIST;
+ int err;
+
+ xa_lock(xa);
+ err = __xa_insert(xa, index, entry, gfp);
+ xa_unlock(xa);
+
+ return err;
+}
+
+/**
+ * xa_insert_bh() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs. May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ int err;
+
+ xa_lock_bh(xa);
+ err = __xa_insert(xa, index, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return err;
+}
+
+/**
+ * xa_insert_irq() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts. May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ int err;
+
+ xa_lock_irq(xa);
+ err = __xa_insert(xa, index, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return err;
}
/**
@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry)
(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
}
-#define XA_ZERO_ENTRY xa_mk_internal(256)
-#define XA_RETRY_ENTRY xa_mk_internal(257)
+#define XA_RETRY_ENTRY xa_mk_internal(256)
+#define XA_ZERO_ENTRY xa_mk_internal(257)
/**
* xa_is_zero() - Is the entry a zero entry?
@@ -996,6 +1098,17 @@ static inline bool xa_is_retry(const void *entry)
}
/**
+ * xa_is_advanced() - Is the entry only permitted for the advanced API?
+ * @entry: Entry to be stored in the XArray.
+ *
+ * Return: %true if the entry cannot be stored by the normal API.
+ */
+static inline bool xa_is_advanced(const void *entry)
+{
+ return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
+}
+
+/**
* typedef xa_update_node_t - A callback function from the XArray.
* @node: The node which is being processed
*
diff --git a/include/net/act_api.h b/include/net/act_api.h
index dbc795ec659e..c745e9ccfab2 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -80,7 +80,7 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
struct tc_action_ops {
struct list_head head;
char kind[IFNAMSIZ];
- __u32 type; /* TBD to match kind */
+ enum tca_id id; /* identifier should match kind */
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 1656c5978498..269ec27385e9 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -49,6 +49,7 @@ struct prefix_info {
struct in6_addr prefix;
};
+#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>
#include <net/ipv6.h>
@@ -201,6 +202,15 @@ u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
/*
* multicast prototypes (mcast.c)
*/
+static inline int ipv6_mc_may_pull(struct sk_buff *skb,
+ unsigned int len)
+{
+ if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
+ return -EINVAL;
+
+ return pskb_may_pull(skb, len);
+}
+
int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
@@ -219,7 +229,8 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
+int ipv6_mc_check_icmpv6(struct sk_buff *skb);
+int ipv6_mc_check_mld(struct sk_buff *skb);
void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
@@ -237,6 +248,7 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6);
+ int (*ipv6_route_input)(struct sk_buff *skb);
struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
struct fib6_info *(*fib6_lookup)(struct net *net, int oif,
@@ -489,6 +501,20 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
#endif
}
+static inline bool ipv6_addr_is_all_snoopers(const struct in6_addr *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ __be64 *p = (__be64 *)addr;
+
+ return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
+ (p[1] ^ cpu_to_be64(0x6a))) == 0UL;
+#else
+ return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+ addr->s6_addr32[1] | addr->s6_addr32[2] |
+ (addr->s6_addr32[3] ^ htonl(0x0000006a))) == 0;
+#endif
+}
+
#ifdef CONFIG_PROC_FS
int if6_proc_init(void);
void if6_proc_exit(void);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 1adefe42c0a6..2bfb87eb98ce 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -21,18 +21,6 @@ struct socket;
struct rxrpc_call;
/*
- * Call completion condition (state == RXRPC_CALL_COMPLETE).
- */
-enum rxrpc_call_completion {
- RXRPC_CALL_SUCCEEDED, /* - Normal termination */
- RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
- RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
- RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
- RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
- NR__RXRPC_CALL_COMPLETIONS
-};
-
-/*
* Debug ID counter for tracing.
*/
extern atomic_t rxrpc_debug_id;
@@ -73,10 +61,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
- struct sockaddr_rxrpc *, struct key *);
-int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
- enum rxrpc_call_completion *, u32 *);
u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 3f9aea8087e3..8b7eb46ad72d 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
void __ax25_put_route(ax25_route *ax25_rt);
+extern rwlock_t ax25_route_lock;
+
+static inline void ax25_route_lock_use(void)
+{
+ read_lock(&ax25_route_lock);
+}
+
+static inline void ax25_route_lock_unuse(void)
+{
+ read_unlock(&ax25_route_lock);
+}
+
static inline void ax25_put_route(ax25_route *ax25_rt)
{
if (refcount_dec_and_test(&ax25_rt->refcount))
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index ec9d6bc65855..fabee6db0abb 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -276,7 +276,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
void bt_accept_unlink(struct sock *sk);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index c36dc1e20556..fbba43e9bef5 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -158,6 +158,18 @@ enum {
*/
HCI_QUIRK_INVALID_BDADDR,
+ /* When this quirk is set, the public Bluetooth address
+ * initially reported by HCI Read BD Address command
+ * is considered invalid. The public BD Address can be
+ * specified in the fwnode property 'local-bd-address'.
+ * If this property does not exist or is invalid controller
+ * configuration is required before this device can be used.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_USE_BDADDR_PROPERTY,
+
/* When this quirk is set, the duplicate filtering during
* scanning is based on Bluetooth devices addresses. To allow
* RSSI based updates, restart scanning if needed.
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e5ea633ea368..094e61e07030 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -437,6 +437,7 @@ struct hci_dev {
int (*post_init)(struct hci_dev *hdev);
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+ void (*cmd_timeout)(struct hci_dev *hdev);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index fc3111515f5c..c781e1afd683 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -180,6 +180,19 @@ struct port;
#pragma pack(8)
#endif
+struct bond_3ad_stats {
+ atomic64_t lacpdu_rx;
+ atomic64_t lacpdu_tx;
+ atomic64_t lacpdu_unknown_rx;
+ atomic64_t lacpdu_illegal_rx;
+
+ atomic64_t marker_rx;
+ atomic64_t marker_tx;
+ atomic64_t marker_resp_rx;
+ atomic64_t marker_resp_tx;
+ atomic64_t marker_unknown_rx;
+};
+
/* aggregator structure(43.4.5 in the 802.3ad standard) */
typedef struct aggregator {
struct mac_addr aggregator_mac_address;
@@ -265,6 +278,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
+ struct bond_3ad_stats stats;
u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
@@ -272,6 +286,7 @@ struct ad_bond_info {
struct ad_slave_info {
struct aggregator aggregator; /* 802.3ad aggregator structure */
struct port port; /* 802.3ad port structure */
+ struct bond_3ad_stats stats;
u16 id;
};
@@ -307,5 +322,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+size_t bond_3ad_stats_size(void);
#endif /* _NET_BOND_3AD_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e0c41eb1c860..bb307a11ee63 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6,7 +6,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -836,6 +836,17 @@ struct cfg80211_bitrate_mask {
};
/**
+ * enum cfg80211_ap_settings_flags - AP settings flags
+ *
+ * Used by cfg80211_ap_settings
+ *
+ * @AP_SETTINGS_EXTERNAL_AUTH_SUPPORT: AP supports external authentication
+ */
+enum cfg80211_ap_settings_flags {
+ AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = BIT(0),
+};
+
+/**
* struct cfg80211_ap_settings - AP configuration
*
* Used to configure an AP interface.
@@ -865,6 +876,7 @@ struct cfg80211_bitrate_mask {
* @he_cap: HE capabilities (or %NULL if HE isn't enabled)
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
+ * @flags: flags, as defined in enum cfg80211_ap_settings_flags
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -890,6 +902,7 @@ struct cfg80211_ap_settings {
const struct ieee80211_vht_cap *vht_cap;
const struct ieee80211_he_cap_elem *he_cap;
bool ht_required, vht_required;
+ u32 flags;
};
/**
@@ -1003,6 +1016,7 @@ enum station_parameters_apply_mask {
* @support_p2p_ps: information if station supports P2P PS mechanism
* @he_capa: HE capabilities of station
* @he_capa_len: the length of the HE capabilities
+ * @airtime_weight: airtime scheduler weight for this station
*/
struct station_parameters {
const u8 *supported_rates;
@@ -1032,6 +1046,7 @@ struct station_parameters {
int support_p2p_ps;
const struct ieee80211_he_cap_elem *he_capa;
u8 he_capa_len;
+ u16 airtime_weight;
};
/**
@@ -1300,6 +1315,8 @@ struct cfg80211_tid_stats {
* from this peer
* @connected_to_gate: true if mesh STA has a path to mesh gate
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
+ * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
+ * @airtime_weight: current airtime scheduling weight
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
* Note that this doesn't use the @filled bit, but is used if non-NULL.
@@ -1350,8 +1367,9 @@ struct station_info {
u32 expected_throughput;
- u64 rx_beacon;
+ u64 tx_duration;
u64 rx_duration;
+ u64 rx_beacon;
u8 rx_beacon_signal_avg;
u8 connected_to_gate;
@@ -1359,6 +1377,8 @@ struct station_info {
s8 ack_signal;
s8 avg_ack_signal;
+ u16 airtime_weight;
+
u32 rx_mpdu_count;
u32 fcs_err_count;
};
@@ -1422,6 +1442,8 @@ enum monitor_flags {
* @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled
* @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled
* @MPATH_INFO_FLAGS: @flags filled
+ * @MPATH_INFO_HOP_COUNT: @hop_count filled
+ * @MPATH_INFO_PATH_CHANGE: @path_change_count filled
*/
enum mpath_info_flags {
MPATH_INFO_FRAME_QLEN = BIT(0),
@@ -1431,6 +1453,8 @@ enum mpath_info_flags {
MPATH_INFO_DISCOVERY_TIMEOUT = BIT(4),
MPATH_INFO_DISCOVERY_RETRIES = BIT(5),
MPATH_INFO_FLAGS = BIT(6),
+ MPATH_INFO_HOP_COUNT = BIT(7),
+ MPATH_INFO_PATH_CHANGE = BIT(8),
};
/**
@@ -1450,6 +1474,8 @@ enum mpath_info_flags {
* This number should increase every time the list of mesh paths
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
+ * @hop_count: hops to destination
+ * @path_change_count: total number of path changes to destination
*/
struct mpath_info {
u32 filled;
@@ -1460,6 +1486,8 @@ struct mpath_info {
u32 discovery_timeout;
u8 discovery_retries;
u8 flags;
+ u8 hop_count;
+ u32 path_change_count;
int generation;
};
@@ -2007,9 +2035,15 @@ struct cfg80211_bss_ies {
* a BSS that hides the SSID in its beacon, this points to the BSS struct
* that holds the beacon data. @beacon_ies is still valid, of course, and
* points to the same data as hidden_beacon_bss->beacon_ies in that case.
+ * @transmitted_bss: pointer to the transmitted BSS, if this is a
+ * non-transmitted one (multi-BSSID support)
+ * @nontrans_list: list of non-transmitted BSS, if this is a transmitted one
+ * (multi-BSSID support)
* @signal: signal strength value (type depends on the wiphy's signal_type)
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @bssid_index: index in the multiple BSS set
+ * @max_bssid_indicator: max number of members in the BSS set
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
@@ -2021,6 +2055,8 @@ struct cfg80211_bss {
const struct cfg80211_bss_ies __rcu *proberesp_ies;
struct cfg80211_bss *hidden_beacon_bss;
+ struct cfg80211_bss *transmitted_bss;
+ struct list_head nontrans_list;
s32 signal;
@@ -2031,19 +2067,36 @@ struct cfg80211_bss {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 bssid_index;
+ u8 max_bssid_indicator;
+
u8 priv[0] __aligned(sizeof(void *));
};
/**
+ * ieee80211_bss_get_elem - find element with given ID
+ * @bss: the bss to search
+ * @id: the element ID
+ *
+ * Note that the return value is an RCU-protected pointer, so
+ * rcu_read_lock() must be held when calling this function.
+ * Return: %NULL if not found.
+ */
+const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id);
+
+/**
* ieee80211_bss_get_ie - find IE with given ID
* @bss: the bss to search
- * @ie: the IE ID
+ * @id: the element ID
*
* Note that the return value is an RCU-protected pointer, so
* rcu_read_lock() must be held when calling this function.
* Return: %NULL if not found.
*/
-const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
+static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
+{
+ return (void *)ieee80211_bss_get_elem(bss, id);
+}
/**
@@ -2391,6 +2444,8 @@ enum wiphy_params_flags {
WIPHY_PARAM_TXQ_QUANTUM = 1 << 8,
};
+#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
+
/**
* struct cfg80211_pmksa - PMK Security Association
*
@@ -2815,6 +2870,7 @@ struct cfg80211_pmk_conf {
* use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you
* the real status code for failures. Used only for the authentication
* response command interface (user space to driver).
+ * @pmkid: The identifier to refer a PMKSA.
*/
struct cfg80211_external_auth_params {
enum nl80211_external_auth_action action;
@@ -2822,6 +2878,7 @@ struct cfg80211_external_auth_params {
struct cfg80211_ssid ssid;
unsigned int key_mgmt_suite;
u16 status;
+ const u8 *pmkid;
};
/**
@@ -4112,6 +4169,8 @@ struct cfg80211_pmsr_capabilities {
* @signal_type: signal type reported in &struct cfg80211_bss.
* @cipher_suites: supported cipher suites
* @n_cipher_suites: number of supported cipher suites
+ * @akm_suites: supported AKM suites
+ * @n_akm_suites: number of supported AKM suites
* @retry_short: Retry limit for short frames (dot11ShortRetryLimit)
* @retry_long: Retry limit for long frames (dot11LongRetryLimit)
* @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold);
@@ -4265,6 +4324,11 @@ struct cfg80211_pmsr_capabilities {
* @txq_memory_limit: configuration internal TX queue memory limit
* @txq_quantum: configuration of internal TX queue scheduler quantum
*
+ * @support_mbssid: can HW support association with nontransmitted AP
+ * @support_only_he_mbssid: don't parse MBSSID elements if it is not
+ * HE AP, in order to avoid compatibility issues.
+ * @support_mbssid must be set for this to have any effect.
+ *
* @pmsr_capa: peer measurement capabilities
*/
struct wiphy {
@@ -4310,6 +4374,9 @@ struct wiphy {
int n_cipher_suites;
const u32 *cipher_suites;
+ int n_akm_suites;
+ const u32 *akm_suites;
+
u8 retry_short;
u8 retry_long;
u32 frag_threshold;
@@ -4402,6 +4469,9 @@ struct wiphy {
u32 txq_memory_limit;
u32 txq_quantum;
+ u8 support_mbssid:1,
+ support_only_he_mbssid:1;
+
const struct cfg80211_pmsr_capabilities *pmsr_capa;
char priv[0] __aligned(NETDEV_ALIGN);
@@ -4573,6 +4643,17 @@ struct cfg80211_cqm_config;
* @mesh_id_len: (private) Used by the internal configuration code
* @mesh_id_up_len: (private) Used by the internal configuration code
* @wext: (private) Used by the internal wireless extensions compat code
+ * @wext.ibss: (private) IBSS data part of wext handling
+ * @wext.connect: (private) connection handling data
+ * @wext.keys: (private) (WEP) key data
+ * @wext.ie: (private) extra elements for association
+ * @wext.ie_len: (private) length of extra elements
+ * @wext.bssid: (private) selected network BSSID
+ * @wext.ssid: (private) selected network SSID
+ * @wext.default_key: (private) selected default key index
+ * @wext.default_mgmt_key: (private) selected default management key index
+ * @wext.prev_bssid: (private) previous BSSID for reassociation
+ * @wext.prev_bssid_valid: (private) previous BSSID validity
* @use_4addr: indicates 4addr mode is used on this interface, must be
* set by driver (if supported) on add_interface BEFORE registering the
* netdev and may otherwise be used by driver read-only, will be update
@@ -4672,7 +4753,8 @@ struct wireless_dev {
struct cfg80211_cached_keys *keys;
const u8 *ie;
size_t ie_len;
- u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 prev_bssid[ETH_ALEN];
u8 ssid[IEEE80211_MAX_SSID_LEN];
s8 default_key, default_mgmt_key;
bool prev_bssid_valid;
@@ -4951,6 +5033,33 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
struct cfg80211_qos_map *qos_map);
/**
+ * cfg80211_find_elem_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE data where the byte array should match.
+ * Note the difference to cfg80211_find_ie_match() which considers
+ * the offset to start from the element ID byte, but here we take
+ * the data portion instead.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const struct element *
+cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset);
+
+/**
* cfg80211_find_ie_match - match information element and byte array in data
*
* @eid: element ID
@@ -4974,9 +5083,44 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
* having to fit into the given data and being large enough for the
* byte array to match.
*/
-const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
- const u8 *match, int match_len,
- int match_offset);
+static inline const u8 *
+cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset)
+{
+ /* match_offset can't be smaller than 2, unless match_len is
+ * zero, in which case match_offset must be zero as well.
+ */
+ if (WARN_ON((match_len && match_offset < 2) ||
+ (!match_len && match_offset)))
+ return NULL;
+
+ return (void *)cfg80211_find_elem_match(eid, ies, len,
+ match, match_len,
+ match_offset ?
+ match_offset - 2 : 0);
+}
+
+/**
+ * cfg80211_find_elem - find information element in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_elem(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_elem_match(eid, ies, len, NULL, 0, 0);
+}
/**
* cfg80211_find_ie - find information element in data
@@ -4999,6 +5143,28 @@ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
}
/**
+ * cfg80211_find_ext_elem - find information element with EID Extension in data
+ *
+ * @ext_eid: element ID Extension
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the etended element could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_ext_elem(u8 ext_eid, const u8 *ies, int len)
+{
+ return cfg80211_find_elem_match(WLAN_EID_EXTENSION, ies, len,
+ &ext_eid, 1, 0);
+}
+
+/**
* cfg80211_find_ext_ie - find information element with EID Extension in data
*
* @ext_eid: element ID Extension
@@ -5020,6 +5186,25 @@ static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len)
}
/**
+ * cfg80211_find_vendor_elem - find vendor specific information element in data
+ *
+ * @oui: vendor OUI
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the vendor specific element ID could not be found or if the
+ * element is invalid (claims to be longer than the given data); otherwise
+ * return the element structure for the requested element.
+ *
+ * Note: There are no checks on the element length other than having to fit into
+ * the given data.
+ */
+const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
+ const u8 *ies,
+ unsigned int len);
+
+/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
*
* @oui: vendor OUI
@@ -5035,8 +5220,12 @@ static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len)
* Note: There are no checks on the element length other than having to fit into
* the given data.
*/
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
- const u8 *ies, int len);
+static inline const u8 *
+cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
+ const u8 *ies, unsigned int len)
+{
+ return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
+}
/**
* cfg80211_send_layer2_update - send layer 2 update frame
@@ -5282,6 +5471,27 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
}
/**
+ * cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID
+ * @bssid: transmitter BSSID
+ * @max_bssid: max BSSID indicator, taken from Multiple BSSID element
+ * @mbssid_index: BSSID index, taken from Multiple BSSID index element
+ * @new_bssid: calculated nontransmitted BSSID
+ */
+static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
+ u8 mbssid_index, u8 *new_bssid)
+{
+ u64 bssid_u64 = ether_addr_to_u64(bssid);
+ u64 mask = GENMASK_ULL(max_bssid - 1, 0);
+ u64 new_bssid_u64;
+
+ new_bssid_u64 = bssid_u64 & ~mask;
+
+ new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask;
+
+ u64_to_ether_addr(new_bssid_u64, new_bssid);
+}
+
+/**
* enum cfg80211_bss_frame_type - frame type that the BSS data came from
* @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
* from a beacon or probe response
@@ -5466,10 +5676,12 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* @dev: network device
* @bss: the BSS that association was requested with, ownership of the pointer
* moves to cfg80211 in this call
- * @buf: authentication frame (header + body)
+ * @buf: (Re)Association Response frame (header + body)
* @len: length of the frame data
* @uapsd_queues: bitmap of queues configured for uapsd. Same format
* as the AC bitmap in the QoS info field
+ * @req_ies: information elements from the (Re)Association Request frame
+ * @req_ies_len: length of req_ies data
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
@@ -5479,7 +5691,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
void cfg80211_rx_assoc_resp(struct net_device *dev,
struct cfg80211_bss *bss,
const u8 *buf, size_t len,
- int uapsd_queues);
+ int uapsd_queues,
+ const u8 *req_ies, size_t req_ies_len);
/**
* cfg80211_assoc_timeout - notification of timed out association
@@ -5568,7 +5781,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
* @dev: network device
* @macaddr: the MAC address of the new candidate
* @ie: information elements advertised by the peer candidate
- * @ie_len: lenght of the information elements buffer
+ * @ie_len: length of the information elements buffer
* @gfp: allocation flags
*
* This function notifies cfg80211 that the mesh peer candidate has been
@@ -5641,6 +5854,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
struct wireless_dev *wdev,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
+ unsigned int portid,
int vendor_event_idx,
int approxlen, gfp_t gfp);
@@ -5691,6 +5905,15 @@ cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
/**
+ * cfg80211_vendor_cmd_get_sender
+ * @wiphy: the wiphy
+ *
+ * Return the current netlink port ID in a vendor command handler.
+ * Valid to call only there.
+ */
+unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy);
+
+/**
* cfg80211_vendor_event_alloc - allocate vendor-specific event skb
* @wiphy: the wiphy
* @wdev: the wireless device
@@ -5717,7 +5940,42 @@ cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev,
{
return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
NL80211_ATTR_VENDOR_DATA,
- event_idx, approxlen, gfp);
+ 0, event_idx, approxlen, gfp);
+}
+
+/**
+ * cfg80211_vendor_event_alloc_ucast - alloc unicast vendor-specific event skb
+ * @wiphy: the wiphy
+ * @wdev: the wireless device
+ * @event_idx: index of the vendor event in the wiphy's vendor_events
+ * @portid: port ID of the receiver
+ * @approxlen: an upper bound of the length of the data that will
+ * be put into the skb
+ * @gfp: allocation flags
+ *
+ * This function allocates and pre-fills an skb for an event to send to
+ * a specific (userland) socket. This socket would previously have been
+ * obtained by cfg80211_vendor_cmd_get_sender(), and the caller MUST take
+ * care to register a netlink notifier to see when the socket closes.
+ *
+ * If wdev != NULL, both the ifindex and identifier of the specified
+ * wireless device are added to the event message before the vendor data
+ * attribute.
+ *
+ * When done filling the skb, call cfg80211_vendor_event() with the
+ * skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_event_alloc_ucast(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ unsigned int portid, int approxlen,
+ int event_idx, gfp_t gfp)
+{
+ return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
+ NL80211_ATTR_VENDOR_DATA,
+ portid, event_idx, approxlen, gfp);
}
/**
@@ -5817,7 +6075,7 @@ static inline struct sk_buff *
cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
{
return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE,
- NL80211_ATTR_TESTDATA, -1,
+ NL80211_ATTR_TESTDATA, 0, -1,
approxlen, gfp);
}
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 67f4293bc970..63de99e09f04 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -30,6 +30,7 @@ struct devlink {
struct list_head param_list;
struct list_head region_list;
u32 snapshot_id;
+ struct list_head reporter_list;
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
@@ -48,6 +49,7 @@ struct devlink_port_attrs {
struct devlink_port {
struct list_head list;
+ struct list_head param_list;
struct devlink *devlink;
unsigned index;
bool registered;
@@ -61,6 +63,7 @@ struct devlink_sb_pool_info {
enum devlink_sb_pool_type pool_type;
u32 size;
enum devlink_sb_threshold_type threshold_type;
+ u32 cell_size;
};
/**
@@ -355,6 +358,7 @@ struct devlink_param_item {
const struct devlink_param *param;
union devlink_param_value driverinit_value;
bool driverinit_value_valid;
+ bool published;
};
enum devlink_param_generic_id {
@@ -419,10 +423,55 @@ enum devlink_param_generic_id {
.validate = _validate, \
}
+/* Part number, identifier of board design */
+#define DEVLINK_INFO_VERSION_GENERIC_BOARD_ID "board.id"
+/* Revision of board design */
+#define DEVLINK_INFO_VERSION_GENERIC_BOARD_REV "board.rev"
+/* Maker of the board */
+#define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture"
+
+/* Control processor FW version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt"
+/* Data path microcode controlling high-speed packet processing */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_APP "fw.app"
+/* UNDI software version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_UNDI "fw.undi"
+/* NCSI support/handler version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_NCSI "fw.ncsi"
+
struct devlink_region;
+struct devlink_info_req;
typedef void devlink_snapshot_data_dest_t(const void *data);
+struct devlink_fmsg;
+struct devlink_health_reporter;
+
+enum devlink_health_reporter_state {
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY,
+ DEVLINK_HEALTH_REPORTER_STATE_ERROR,
+};
+
+/**
+ * struct devlink_health_reporter_ops - Reporter operations
+ * @name: reporter name
+ * @recover: callback to recover from reported error
+ * if priv_ctx is NULL, run a full recover
+ * @dump: callback to dump an object
+ * if priv_ctx is NULL, run a full dump
+ * @diagnose: callback to diagnose the current status
+ */
+
+struct devlink_health_reporter_ops {
+ char *name;
+ int (*recover)(struct devlink_health_reporter *reporter,
+ void *priv_ctx);
+ int (*dump)(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx);
+ int (*diagnose)(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg);
+};
+
struct devlink_ops {
int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
@@ -475,6 +524,11 @@ struct devlink_ops {
int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode,
struct netlink_ext_ack *extack);
+ int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+ int (*flash_update)(struct devlink *devlink, const char *file_name,
+ const char *component,
+ struct netlink_ext_ack *extack);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -489,6 +543,15 @@ static inline struct devlink *priv_to_devlink(void *priv)
return container_of(priv, struct devlink, priv);
}
+static inline struct devlink *netdev_to_devlink(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+ if (dev->netdev_ops->ndo_get_devlink)
+ return dev->netdev_ops->ndo_get_devlink(dev);
+#endif
+ return NULL;
+}
+
struct ib_device;
#if IS_ENABLED(CONFIG_NET_DEVLINK)
@@ -567,11 +630,28 @@ int devlink_params_register(struct devlink *devlink,
void devlink_params_unregister(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
+void devlink_params_publish(struct devlink *devlink);
+void devlink_params_unpublish(struct devlink *devlink);
+int devlink_port_params_register(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count);
+void devlink_port_params_unregister(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count);
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val);
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
+int
+devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value *init_val);
+int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+void devlink_port_param_value_changed(struct devlink_port *devlink_port,
+ u32 param_id);
void devlink_param_value_str_fill(union devlink_param_value *dst_val,
const char *src);
struct devlink_region *devlink_region_create(struct devlink *devlink,
@@ -583,6 +663,70 @@ u32 devlink_region_shapshot_id_get(struct devlink *devlink);
int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
u8 *data, u32 snapshot_id,
devlink_snapshot_data_dest_t *data_destructor);
+int devlink_info_serial_number_put(struct devlink_info_req *req,
+ const char *sn);
+int devlink_info_driver_name_put(struct devlink_info_req *req,
+ const char *name);
+int devlink_info_version_fixed_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value);
+int devlink_info_version_stored_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value);
+int devlink_info_version_running_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value);
+
+int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
+int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
+
+int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name);
+int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
+
+int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
+
+int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value);
+int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
+int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
+int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
+int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len);
+
+int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value);
+int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value);
+int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value);
+int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value);
+int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value);
+int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u16 value_len);
+
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, bool auto_recover,
+ void *priv);
+void
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
+
+void *
+devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
+int devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx);
+void
+devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
+ enum devlink_health_reporter_state state);
+
+void devlink_compat_running_version(struct net_device *dev,
+ char *buf, size_t len);
+int devlink_compat_flash_update(struct net_device *dev, const char *file_name);
#else
@@ -601,6 +745,14 @@ static inline void devlink_unregister(struct devlink *devlink)
{
}
+static inline void devlink_params_publish(struct devlink *devlink)
+{
+}
+
+static inline void devlink_params_unpublish(struct devlink *devlink)
+{
+}
+
static inline void devlink_free(struct devlink *devlink)
{
kfree(devlink);
@@ -792,6 +944,21 @@ devlink_params_unregister(struct devlink *devlink,
}
static inline int
+devlink_port_params_register(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return 0;
+}
+
+static inline void
+devlink_port_params_unregister(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+}
+
+static inline int
devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val)
{
@@ -805,12 +972,34 @@ devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
return -EOPNOTSUPP;
}
+static inline int
+devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value *init_val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value init_val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void
devlink_param_value_changed(struct devlink *devlink, u32 param_id)
{
}
static inline void
+devlink_port_param_value_changed(struct devlink_port *devlink_port,
+ u32 param_id)
+{
+}
+
+static inline void
devlink_param_value_str_fill(union devlink_param_value *dst_val,
const char *src)
{
@@ -844,6 +1033,201 @@ devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
return 0;
}
+static inline int
+devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
+{
+ return 0;
+}
+
+static inline int
+devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
+{
+ return 0;
+}
+
+static inline int
+devlink_info_version_fixed_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return 0;
+}
+
+static inline int
+devlink_info_version_stored_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return 0;
+}
+
+static inline int
+devlink_info_version_running_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value)
+{
+ return 0;
+}
+
+static inline int
+devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u16 value_len)
+{
+ return 0;
+}
+
+static inline struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, bool auto_recover,
+ void *priv)
+{
+ return NULL;
+}
+
+static inline void
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
+{
+}
+
+static inline void *
+devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
+{
+ return NULL;
+}
+
+static inline int
+devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ return 0;
+}
+
+static inline void
+devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
+ enum devlink_health_reporter_state state)
+{
+}
+
+static inline void
+devlink_compat_running_version(struct net_device *dev, char *buf, size_t len)
+{
+}
+
+static inline int
+devlink_compat_flash_update(struct net_device *dev, const char *file_name)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b3eefe8e18fd..ae480bba11f5 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/net_tstamp.h>
#include <linux/phy.h>
+#include <linux/platform_data/dsa.h>
#include <net/devlink.h>
#include <net/switchdev.h>
@@ -37,6 +38,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_GSWIP,
DSA_TAG_PROTO_KSZ9477,
+ DSA_TAG_PROTO_KSZ9893,
DSA_TAG_PROTO_LAN9303,
DSA_TAG_PROTO_MTK,
DSA_TAG_PROTO_QCA,
@@ -44,66 +46,6 @@ enum dsa_tag_protocol {
DSA_TAG_LAST, /* MUST BE LAST */
};
-#define DSA_MAX_SWITCHES 4
-#define DSA_MAX_PORTS 12
-
-#define DSA_RTABLE_NONE -1
-
-struct dsa_chip_data {
- /*
- * How to access the switch configuration registers.
- */
- struct device *host_dev;
- int sw_addr;
-
- /*
- * Reference to network devices
- */
- struct device *netdev[DSA_MAX_PORTS];
-
- /* set to size of eeprom if supported by the switch */
- int eeprom_len;
-
- /* Device tree node pointer for this specific switch chip
- * used during switch setup in case additional properties
- * and resources needs to be used
- */
- struct device_node *of_node;
-
- /*
- * The names of the switch's ports. Use "cpu" to
- * designate the switch port that the cpu is connected to,
- * "dsa" to indicate that this port is a DSA link to
- * another switch, NULL to indicate the port is unused,
- * or any other string to indicate this is a physical port.
- */
- char *port_names[DSA_MAX_PORTS];
- struct device_node *port_dn[DSA_MAX_PORTS];
-
- /*
- * An array of which element [a] indicates which port on this
- * switch should be used to send packets to that are destined
- * for switch a. Can be NULL if there is only one switch chip.
- */
- s8 rtable[DSA_MAX_SWITCHES];
-};
-
-struct dsa_platform_data {
- /*
- * Reference to a Linux network interface that connects
- * to the root switch chip of the tree.
- */
- struct device *netdev;
- struct net_device *of_netdev;
-
- /*
- * Info structs describing each of the switch chips
- * connected via this network interface.
- */
- int nr_chips;
- struct dsa_chip_data *chip;
-};
-
struct packet_type;
struct dsa_switch;
@@ -208,6 +150,11 @@ struct dsa_port {
* Original copy of the master netdev ethtool_ops
*/
const struct ethtool_ops *orig_ethtool_ops;
+
+ /*
+ * Original copy of the master netdev net_device_ops
+ */
+ const struct net_device_ops *orig_ndo_ops;
};
struct dsa_switch {
@@ -418,8 +365,7 @@ struct dsa_switch_ops {
*/
int (*port_enable)(struct dsa_switch *ds, int port,
struct phy_device *phy);
- void (*port_disable)(struct dsa_switch *ds, int port,
- struct phy_device *phy);
+ void (*port_disable)(struct dsa_switch *ds, int port);
/*
* Port's MAC EEE settings
@@ -454,6 +400,8 @@ struct dsa_switch_ops {
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
void (*port_fast_age)(struct dsa_switch *ds, int port);
+ int (*port_egress_floods)(struct dsa_switch *ds, int port,
+ bool unicast, bool multicast);
/*
* VLAN support
diff --git a/include/net/flow.h b/include/net/flow.h
index 93f2c9a0f098..a50fb77a0b27 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -40,6 +40,7 @@ struct flowi_common {
__u32 flowic_secid;
kuid_t flowic_uid;
struct flowi_tunnel flowic_tun_key;
+ __u32 flowic_multipath_hash;
};
union flowi_uli {
@@ -78,6 +79,7 @@ struct flowi4 {
#define flowi4_secid __fl_common.flowic_secid
#define flowi4_tun_key __fl_common.flowic_tun_key
#define flowi4_uid __fl_common.flowic_uid
+#define flowi4_multipath_hash __fl_common.flowic_multipath_hash
/* (saddr,daddr) must be grouped, same order as in IP header */
__be32 saddr;
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
new file mode 100644
index 000000000000..d035183c8d03
--- /dev/null
+++ b/include/net/flow_offload.h
@@ -0,0 +1,203 @@
+#ifndef _NET_FLOW_OFFLOAD_H
+#define _NET_FLOW_OFFLOAD_H
+
+#include <net/flow_dissector.h>
+
+struct flow_match {
+ struct flow_dissector *dissector;
+ void *mask;
+ void *key;
+};
+
+struct flow_match_basic {
+ struct flow_dissector_key_basic *key, *mask;
+};
+
+struct flow_match_control {
+ struct flow_dissector_key_control *key, *mask;
+};
+
+struct flow_match_eth_addrs {
+ struct flow_dissector_key_eth_addrs *key, *mask;
+};
+
+struct flow_match_vlan {
+ struct flow_dissector_key_vlan *key, *mask;
+};
+
+struct flow_match_ipv4_addrs {
+ struct flow_dissector_key_ipv4_addrs *key, *mask;
+};
+
+struct flow_match_ipv6_addrs {
+ struct flow_dissector_key_ipv6_addrs *key, *mask;
+};
+
+struct flow_match_ip {
+ struct flow_dissector_key_ip *key, *mask;
+};
+
+struct flow_match_ports {
+ struct flow_dissector_key_ports *key, *mask;
+};
+
+struct flow_match_icmp {
+ struct flow_dissector_key_icmp *key, *mask;
+};
+
+struct flow_match_tcp {
+ struct flow_dissector_key_tcp *key, *mask;
+};
+
+struct flow_match_mpls {
+ struct flow_dissector_key_mpls *key, *mask;
+};
+
+struct flow_match_enc_keyid {
+ struct flow_dissector_key_keyid *key, *mask;
+};
+
+struct flow_match_enc_opts {
+ struct flow_dissector_key_enc_opts *key, *mask;
+};
+
+struct flow_rule;
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+ struct flow_match_basic *out);
+void flow_rule_match_control(const struct flow_rule *rule,
+ struct flow_match_control *out);
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+ struct flow_match_eth_addrs *out);
+void flow_rule_match_vlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out);
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out);
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out);
+void flow_rule_match_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out);
+void flow_rule_match_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out);
+void flow_rule_match_tcp(const struct flow_rule *rule,
+ struct flow_match_tcp *out);
+void flow_rule_match_icmp(const struct flow_rule *rule,
+ struct flow_match_icmp *out);
+void flow_rule_match_mpls(const struct flow_rule *rule,
+ struct flow_match_mpls *out);
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+ struct flow_match_control *out);
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out);
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out);
+void flow_rule_match_enc_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out);
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out);
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+ struct flow_match_enc_keyid *out);
+void flow_rule_match_enc_opts(const struct flow_rule *rule,
+ struct flow_match_enc_opts *out);
+
+enum flow_action_id {
+ FLOW_ACTION_ACCEPT = 0,
+ FLOW_ACTION_DROP,
+ FLOW_ACTION_TRAP,
+ FLOW_ACTION_GOTO,
+ FLOW_ACTION_REDIRECT,
+ FLOW_ACTION_MIRRED,
+ FLOW_ACTION_VLAN_PUSH,
+ FLOW_ACTION_VLAN_POP,
+ FLOW_ACTION_VLAN_MANGLE,
+ FLOW_ACTION_TUNNEL_ENCAP,
+ FLOW_ACTION_TUNNEL_DECAP,
+ FLOW_ACTION_MANGLE,
+ FLOW_ACTION_ADD,
+ FLOW_ACTION_CSUM,
+ FLOW_ACTION_MARK,
+ FLOW_ACTION_WAKE,
+ FLOW_ACTION_QUEUE,
+};
+
+/* This is mirroring enum pedit_header_type definition for easy mapping between
+ * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
+ * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
+ */
+enum flow_action_mangle_base {
+ FLOW_ACT_MANGLE_UNSPEC = 0,
+ FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+ FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ FLOW_ACT_MANGLE_HDR_TYPE_UDP,
+};
+
+struct flow_action_entry {
+ enum flow_action_id id;
+ union {
+ u32 chain_index; /* FLOW_ACTION_GOTO */
+ struct net_device *dev; /* FLOW_ACTION_REDIRECT */
+ struct { /* FLOW_ACTION_VLAN */
+ u16 vid;
+ __be16 proto;
+ u8 prio;
+ } vlan;
+ struct { /* FLOW_ACTION_PACKET_EDIT */
+ enum flow_action_mangle_base htype;
+ u32 offset;
+ u32 mask;
+ u32 val;
+ } mangle;
+ const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
+ u32 csum_flags; /* FLOW_ACTION_CSUM */
+ u32 mark; /* FLOW_ACTION_MARK */
+ struct { /* FLOW_ACTION_QUEUE */
+ u32 ctx;
+ u32 index;
+ u8 vf;
+ } queue;
+ };
+};
+
+struct flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry entries[0];
+};
+
+static inline bool flow_action_has_entries(const struct flow_action *action)
+{
+ return action->num_entries;
+}
+
+#define flow_action_for_each(__i, __act, __actions) \
+ for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
+
+struct flow_rule {
+ struct flow_match match;
+ struct flow_action action;
+};
+
+struct flow_rule *flow_rule_alloc(unsigned int num_actions);
+
+static inline bool flow_rule_match_key(const struct flow_rule *rule,
+ enum flow_dissector_key_id key)
+{
+ return dissector_uses_key(rule->match.dissector, key);
+}
+
+struct flow_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 lastused;
+};
+
+static inline void flow_stats_update(struct flow_stats *flow_stats,
+ u64 bytes, u64 pkts, u64 lastused)
+{
+ flow_stats->pkts += pkts;
+ flow_stats->bytes += bytes;
+ flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
+}
+
+#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 6ac3a5bd0117..e0f709d26dde 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -22,6 +22,7 @@
#include <net/inet_sock.h>
#include <net/snmp.h>
+#include <net/ip.h>
struct icmp_err {
int errno;
@@ -39,7 +40,13 @@ struct net_proto_family;
struct sk_buff;
struct net;
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
int icmp_rcv(struct sk_buff *skb);
int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 8014153bdd49..459d355f6506 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 Intel Corporation
+ * Copyright (c) 2018-2019 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -291,6 +291,12 @@ enum ieee80211_radiotap_he_bits {
IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW = 0x00c0,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_20MHZ = 0,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_40MHZ = 1,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_80MHZ = 2,
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_160MHZ = 3,
IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
};
@@ -343,6 +349,7 @@ struct ieee80211_radiotap_lsig {
enum ieee80211_radiotap_zero_len_psdu_type {
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED = 1,
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff,
};
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 371b3b45fd5c..ff40e1d08157 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -139,8 +139,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[88 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
+ u64 icsk_ca_priv[104 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
@@ -314,4 +314,29 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+
+#define TCP_PINGPONG_THRESH 3
+
+static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
+{
+ inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
+}
+
+static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
+{
+ inet_csk(sk)->icsk_ack.pingpong = 0;
+}
+
+static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
+{
+ return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
+}
+
+static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ack.pingpong < U8_MAX)
+ icsk->icsk_ack.pingpong++;
+}
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 1662cbc0b46b..378904ee9129 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -56,7 +56,6 @@ struct frag_v6_compare_key {
* @timer: queue expiration timer
* @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
- * @fragments: received fragments head
* @rb_fragments: received fragments rb-tree root
* @fragments_tail: received fragments tail
* @last_run_head: the head of the last "run". see ip_fragment.c
@@ -77,8 +76,7 @@ struct inet_frag_queue {
struct timer_list timer;
spinlock_t lock;
refcount_t refcnt;
- struct sk_buff *fragments; /* Used in IPv6. */
- struct rb_root rb_fragments; /* Used in IPv4. */
+ struct rb_root rb_fragments;
struct sk_buff *fragments_tail;
struct sk_buff *last_run_head;
ktime_t stamp;
@@ -153,4 +151,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
extern const u8 ip_frag_ecn_table[16];
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK 0
+#define IPFRAG_DUP 1
+#define IPFRAG_OVERLAP 2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
#endif
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 00b5e7825508..74ff688568a0 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -39,6 +39,7 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ u32 n_redirects;
unsigned long rate_last;
/*
* Once inet_peer is queued for deletion (refcnt == 0), following field
diff --git a/include/net/ip.h b/include/net/ip.h
index 8866bfce6121..be3cad9c2e4c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -667,6 +667,8 @@ static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
}
void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb, __be32 *info);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
@@ -716,7 +718,7 @@ extern int sysctl_icmp_msgs_burst;
int ip_misc_proc_init(void);
#endif
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack);
#endif /* _IP_H */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index c5969762a8f4..9c8214d2116d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
struct netlink_ext_ack *extack);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb, struct fib_dump_filter *filter);
-int fib_table_flush(struct net *net, struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 34f019650941..af645604f328 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -241,7 +241,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
int proto,
__be32 daddr, __be32 saddr,
__be32 key, __u8 tos, int oif,
- __u32 mark)
+ __u32 mark, __u32 tun_inner_hash)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = oif;
@@ -251,6 +251,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
fl4->flowi4_proto = proto;
fl4->fl4_gre_key = key;
fl4->flowi4_mark = mark;
+ fl4->flowi4_multipath_hash = tun_inner_hash;
}
int ip_tunnel_init(struct net_device *dev);
@@ -267,7 +268,7 @@ void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
- const u8 proto);
+ const u8 proto, int tunnel_hlen);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a0d2e0bb9a94..047f9a5ccaad 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -453,9 +453,6 @@ struct ip_vs_protocol {
int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
- int (*csum_check)(int af, struct sk_buff *skb,
- struct ip_vs_protocol *pp);
-
const char *(*state_name)(int state);
void (*state_transition)(struct ip_vs_conn *cp, int direction,
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 6ced1e6899b6..28aa9b30aece 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
/* Don't send error if the first segment did not arrive. */
- head = fq->q.fragments;
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
+
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ head = inet_frag_pull_head(&fq->q);
+ if (!head)
goto out;
head->dev = dev;
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 78fa0ac4613c..5175fd63cd82 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
if (netif_is_l3_slave(skb->dev))
master = netdev_master_upper_dev_get_rcu(skb->dev);
- else if (netif_is_l3_master(skb->dev))
+ else if (netif_is_l3_master(skb->dev) ||
+ netif_has_l3_rx_handler(skb->dev))
master = skb->dev;
if (master && master->l3mdev_ops->l3mdev_l3_rcv)
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 33fd9ba7e0e5..671113bcb2cc 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -126,6 +126,8 @@ int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int lwtunnel_input(struct sk_buff *skb);
int lwtunnel_xmit(struct sk_buff *skb);
+int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
+ bool ingress);
static inline void lwtunnel_set_redirect(struct dst_entry *dst)
{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 88219cc137c3..ac2ed8ec662b 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -108,9 +108,15 @@
* The driver is expected to initialize its private per-queue data for stations
* and interfaces in the .add_interface and .sta_add ops.
*
- * The driver can't access the queue directly. To dequeue a frame, it calls
- * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it
- * calls the .wake_tx_queue driver op.
+ * The driver can't access the queue directly. To dequeue a frame from a
+ * txq, it calls ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a
+ * queue, it calls the .wake_tx_queue driver op.
+ *
+ * Drivers can optionally delegate responsibility for scheduling queues to
+ * mac80211, to take advantage of airtime fairness accounting. In this case, to
+ * obtain the next queue to pull frames from, the driver calls
+ * ieee80211_next_txq(). The driver is then expected to return the txq using
+ * ieee80211_return_txq().
*
* For AP powersave TIM handling, the driver only needs to indicate if it has
* buffered packets in the driver specific data structures by calling
@@ -585,6 +591,14 @@ struct ieee80211_ftm_responder_params {
* @ftm_responder: whether to enable or disable fine timing measurement FTM
* responder functionality.
* @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
+ * @nontransmitted: this BSS is a nontransmitted BSS profile
+ * @transmitter_bssid: the address of transmitter AP
+ * @bssid_index: index inside the multiple BSSID set
+ * @bssid_indicator: 2^bssid_indicator is the maximum number of APs in set
+ * @ema_ap: AP supports enhancements of discovery and advertisement of
+ * nontransmitted BSSIDs
+ * @profile_periodicity: the least number of beacon frames need to be received
+ * in order to discover all the nontransmitted BSSIDs in the set.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -638,6 +652,13 @@ struct ieee80211_bss_conf {
bool protected_keep_alive;
bool ftm_responder;
struct ieee80211_ftm_responder_params *ftmr_params;
+ /* Multiple BSSID data */
+ bool nontransmitted;
+ u8 transmitter_bssid[ETH_ALEN];
+ u8 bssid_index;
+ u8 bssid_indicator;
+ bool ema_ap;
+ u8 profile_periodicity;
};
/**
@@ -936,8 +957,32 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @band: the band to transmit on (use for checking for races)
* @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
* @ack_frame_id: internal frame ID for TX status, used internally
- * @control: union for control data
- * @status: union for status data
+ * @control: union part for control data
+ * @control.rates: TX rates array to try
+ * @control.rts_cts_rate_idx: rate for RTS or CTS
+ * @control.use_rts: use RTS
+ * @control.use_cts_prot: use RTS/CTS
+ * @control.short_preamble: use short preamble (CCK only)
+ * @control.skip_table: skip externally configured rate table
+ * @control.jiffies: timestamp for expiry on powersave clients
+ * @control.vif: virtual interface (may be NULL)
+ * @control.hw_key: key to encrypt with (may be NULL)
+ * @control.flags: control flags, see &enum mac80211_tx_control_flags
+ * @control.enqueue_time: enqueue time (for iTXQs)
+ * @driver_rates: alias to @control.rates to reserve space
+ * @pad: padding
+ * @rate_driver_data: driver use area if driver needs @control.rates
+ * @status: union part for status data
+ * @status.rates: attempted rates
+ * @status.ack_signal: ACK signal
+ * @status.ampdu_ack_len: AMPDU ack length
+ * @status.ampdu_len: AMPDU length
+ * @status.antenna: (legacy, kept only for iwlegacy)
+ * @status.tx_time: airtime consumed for transmission
+ * @status.is_valid_ack_signal: ACK signal is valid
+ * @status.status_driver_data: driver use area
+ * @ack: union part for pure ACK data
+ * @ack.cookie: cookie for the ACK
* @driver_data: array of driver_data pointers
* @ampdu_ack_len: number of acked aggregated frames.
* relevant only if IEEE80211_TX_STAT_AMPDU was set.
@@ -1157,6 +1202,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
* @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
* (&struct ieee80211_radiotap_he, mac80211 will fill in
+ *
* - DATA3_DATA_MCS
* - DATA3_DATA_DCM
* - DATA3_CODING
@@ -1164,6 +1210,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* - DATA5_DATA_BW_RU_ALLOC
* - DATA6_NSTS
* - DATA3_STBC
+ *
* from the RX info data, so leave those zeroed when building this data)
* @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
* (&struct ieee80211_radiotap_he_mu)
@@ -1214,7 +1261,7 @@ enum mac80211_rx_flags {
* @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission,
* if the driver fills this value it should add
* %IEEE80211_RADIOTAP_MCS_HAVE_FMT
- * to hw.radiotap_mcs_details to advertise that fact
+ * to @hw.radiotap_mcs_details to advertise that fact.
* @RX_ENC_FLAG_LDPC: LDPC was used
* @RX_ENC_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
* @RX_ENC_FLAG_BF: packet was beamformed
@@ -1472,6 +1519,9 @@ struct ieee80211_conf {
* scheduled channel switch, as indicated by the AP.
* @chandef: the new channel to switch to
* @count: the number of TBTT's until the channel switch event
+ * @delay: maximum delay between the time the AP transmitted the last beacon in
+ * current channel and the expected time of the first beacon in the new
+ * channel, expressed in TU.
*/
struct ieee80211_channel_switch {
u64 timestamp;
@@ -1479,6 +1529,7 @@ struct ieee80211_channel_switch {
bool block_tx;
struct cfg80211_chan_def chandef;
u8 count;
+ u32 delay;
};
/**
@@ -2184,6 +2235,14 @@ struct ieee80211_txq {
* MMPDUs on station interfaces. This of course requires the driver to use
* TXQs to start with.
*
+ * @IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN: Driver does not report accurate A-MPDU
+ * length in tx status information
+ *
+ * @IEEE80211_HW_SUPPORTS_MULTI_BSSID: Hardware supports multi BSSID
+ *
+ * @IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID: Hardware supports multi BSSID
+ * only for HE APs. Applies if @IEEE80211_HW_SUPPORTS_MULTI_BSSID is set.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2232,6 +2291,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_BUFF_MMPDU_TXQ,
IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
IEEE80211_HW_STA_MMPDU_TXQ,
+ IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN,
+ IEEE80211_HW_SUPPORTS_MULTI_BSSID,
+ IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2323,12 +2385,14 @@ enum ieee80211_hw_flags {
* @radiotap_he: HE radiotap validity flags
*
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
- * 'units_pos' member is set to a non-negative value it must be set to
- * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
- * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value, and then the timestamp
+ * @units_pos member is set to a non-negative value then the timestamp
* field will be added and populated from the &struct ieee80211_rx_status
- * device_timestamp. If the 'accuracy' member is non-negative, it's put
- * into the accuracy radiotap field and the accuracy known flag is set.
+ * device_timestamp.
+ * @radiotap_timestamp.units_pos: Must be set to a combination of a
+ * IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
+ * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value.
+ * @radiotap_timestamp.accuracy: If non-negative, fills the accuracy in the
+ * radiotap field and the accuracy known flag will be set.
*
* @netdev_features: netdev features to be set in each netdev created
* from this HW. Note that not all features are usable with mac80211,
@@ -2354,6 +2418,9 @@ enum ieee80211_hw_flags {
* @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from
* them are encountered. The default should typically not be changed,
* unless the driver has good reasons for needing more buffers.
+ *
+ * @weight_multiplier: Driver specific airtime weight multiplier used while
+ * refilling deficit of each TXQ.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2390,6 +2457,7 @@ struct ieee80211_hw {
const struct ieee80211_cipher_scheme *cipher_schemes;
u8 max_nan_de_entries;
u8 tx_sk_pacing_shift;
+ u8 weight_multiplier;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -3575,7 +3643,12 @@ enum ieee80211_reconfig_type {
* @post_channel_switch: This is an optional callback that is called
* after a channel switch procedure is completed, allowing the
* driver to go back to a normal configuration.
- *
+ * @abort_channel_switch: This is an optional callback that is called
+ * when channel switch procedure was completed, allowing the
+ * driver to go back to a normal configuration.
+ * @channel_switch_rx_beacon: This is an optional callback that is called
+ * when channel switch procedure is in progress and additional beacon with
+ * CSA IE was received, allowing driver to track changes in count.
* @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
* information in bss_conf is set up and the beacon can be retrieved. A
* channel context is bound before this is called.
@@ -3878,6 +3951,11 @@ struct ieee80211_ops {
int (*post_channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+ void (*abort_channel_switch)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
@@ -5402,6 +5480,34 @@ void ieee80211_sta_eosp(struct ieee80211_sta *pubsta);
void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
/**
+ * ieee80211_sta_register_airtime - register airtime usage for a sta/tid
+ *
+ * Register airtime usage for a given sta on a given tid. The driver can call
+ * this function to notify mac80211 that a station used a certain amount of
+ * airtime. This information will be used by the TXQ scheduler to schedule
+ * stations in a way that ensures airtime fairness.
+ *
+ * The reported airtime should as a minimum include all time that is spent
+ * transmitting to the remote station, including overhead and padding, but not
+ * including time spent waiting for a TXOP. If the time is not reported by the
+ * hardware it can in some cases be calculated from the rate and known frame
+ * composition. When possible, the time should include any failed transmission
+ * attempts.
+ *
+ * The driver can either call this function synchronously for every packet or
+ * aggregate, or asynchronously as airtime usage information becomes available.
+ * TX and RX airtime can be reported together, or separately by setting one of
+ * them to 0.
+ *
+ * @pubsta: the station
+ * @tid: the TID to register airtime for
+ * @tx_airtime: airtime used during TX (in usec)
+ * @rx_airtime: airtime used during RX (in usec)
+ */
+void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
+ u32 tx_airtime, u32 rx_airtime);
+
+/**
* ieee80211_iter_keys - iterate keys programmed into the device
* @hw: pointer obtained from ieee80211_alloc_hw()
* @vif: virtual interface to iterate, may be %NULL for all
@@ -6103,7 +6209,8 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
* ieee80211_tx_dequeue - dequeue a packet from a software tx queue
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
- * @txq: pointer obtained from station or virtual interface
+ * @txq: pointer obtained from station or virtual interface, or from
+ * ieee80211_next_txq()
*
* Returns the skb if successful, %NULL if no frame was available.
*
@@ -6119,6 +6226,94 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
/**
+ * ieee80211_next_txq - get next tx queue to pull packets from
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @ac: AC number to return packets from.
+ *
+ * Should only be called between calls to ieee80211_txq_schedule_start()
+ * and ieee80211_txq_schedule_end().
+ * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
+ * is returned, it should be returned with ieee80211_return_txq() after the
+ * driver has finished scheduling it.
+ */
+struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
+
+/**
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ *
+ * Should only be called between calls to ieee80211_txq_schedule_start()
+ * and ieee80211_txq_schedule_end().
+ */
+void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @ac: AC number to acquire locks for
+ *
+ * Acquire locks needed to schedule TXQs from the given AC. Should be called
+ * before ieee80211_next_txq() or ieee80211_return_txq().
+ */
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
+ __acquires(txq_lock);
+
+/**
+ * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @ac: AC number to acquire locks for
+ *
+ * Release locks previously acquired by ieee80211_txq_schedule_end().
+ */
+void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+ __releases(txq_lock);
+
+/**
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ *
+ * Schedules a TXQ for transmission if it is not already scheduled. Takes a
+ * lock, which means it must *not* be called between
+ * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
+ */
+void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+ __acquires(txq_lock) __releases(txq_lock);
+
+/**
+ * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
+ *
+ * This function is used to check whether given txq is allowed to transmit by
+ * the airtime scheduler, and can be used by drivers to access the airtime
+ * fairness accounting without going using the scheduling order enfored by
+ * next_txq().
+ *
+ * Returns %true if the airtime scheduler thinks the TXQ should be allowed to
+ * transmit, and %false if it should be throttled. This function can also have
+ * the side effect of rotating the TXQ in the scheduler rotation, which will
+ * eventually bring the deficit to positive and allow the station to transmit
+ * again.
+ *
+ * The API ieee80211_txq_may_transmit() also ensures that TXQ list will be
+ * aligned aginst driver's own round-robin scheduler list. i.e it rotates
+ * the TXQ list till it makes the requested node becomes the first entry
+ * in TXQ list. Thus both the TXQ list and driver's list are in sync. If this
+ * function returns %true, the driver is expected to schedule packets
+ * for transmission, and then return the TXQ through ieee80211_return_txq().
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ */
+bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+
+/**
* ieee80211_txq_get_depth - get pending frame/byte count of given txq
*
* The values are not guaranteed to be coherent with regard to each other, i.e.
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 99d4148e0f90..a68ced28d8f4 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -31,6 +31,7 @@
#include <net/netns/xfrm.h>
#include <net/netns/mpls.h>
#include <net/netns/can.h>
+#include <net/netns/xdp.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
#include <linux/skbuff.h>
@@ -161,6 +162,9 @@ struct net {
#if IS_ENABLED(CONFIG_CAN)
struct netns_can can;
#endif
+#ifdef CONFIG_XDP_SOCKETS
+ struct netns_xdp xdp;
+#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
} __randomize_layout;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 4cd56808ac4e..89808ce293c4 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -43,7 +43,6 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
}
struct net_device *setup_pre_routing(struct sk_buff *skb);
-void br_netfilter_enable(void);
#if IS_ENABLED(CONFIG_IPV6)
int br_validate_ipv6(struct net *net, struct sk_buff *skb);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 135ee702c7b0..2c8c2b023848 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -22,5 +22,8 @@ extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite;
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre;
+#endif
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 2eb43fcefc50..40e0e0623f46 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -5,6 +5,7 @@
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/icmp.h>
+#include <net/netfilter/nf_reject.h>
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 3a5a9a36a0b2..4a3ef9ebdf6f 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -3,6 +3,7 @@
#define _IPV6_NF_REJECT_H
#include <linux/icmpv6.h>
+#include <net/netfilter/nf_reject.h>
void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
unsigned int hooknum);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 249d0a5b12b8..5ee7b30b4917 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -187,28 +187,26 @@ bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num, struct net *net,
struct nf_conntrack_tuple *tuple);
-bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
- unsigned long extra_jiffies, int do_acct);
+ u32 extra_jiffies, bool do_acct);
/* Refresh conntrack for this many jiffies and do accounting */
static inline void nf_ct_refresh_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
- unsigned long extra_jiffies)
+ u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1);
+ __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
}
/* Refresh conntrack for this many jiffies */
static inline void nf_ct_refresh(struct nf_conn *ct,
const struct sk_buff *skb,
- unsigned long extra_jiffies)
+ u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
+ __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
}
/* kill conntrack and do accounting */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index afc9b3620473..ae41e92251dd 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -26,7 +26,7 @@ int nf_conntrack_init_net(struct net *net);
void nf_conntrack_cleanup_net(struct net *net);
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
-int nf_conntrack_proto_pernet_init(struct net *net);
+void nf_conntrack_proto_pernet_init(struct net *net);
void nf_conntrack_proto_pernet_fini(struct net *net);
int nf_conntrack_proto_init(void);
@@ -39,8 +39,7 @@ void nf_conntrack_init_end(void);
void nf_conntrack_cleanup_end(void);
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l4proto *l4proto);
+ const struct nf_conntrack_tuple *orig);
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index ae7b86f587f2..778087591983 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -27,27 +27,6 @@ struct nf_conntrack_l4proto {
/* protoinfo nlattr size, closes a hole */
u16 nlattr_size;
- /* Try to fill in the third arg: dataoff is offset past network protocol
- hdr. Return true if possible. */
- bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple);
-
- /* Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Only used by icmp, most protocols use a generic version.
- */
- bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
-
- /* Returns verdict for packet, or -1 for invalid. */
- int (*packet)(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state);
-
- /* Called when a conntrack entry is destroyed */
- void (*destroy)(struct nf_conn *ct);
-
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
@@ -79,16 +58,22 @@ struct nf_conntrack_l4proto {
/* Print out the private part of the conntrack. */
void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
#endif
- unsigned int *net_id;
- /* Init l4proto pernet data */
- int (*init_net)(struct net *net);
+};
- /* Return the per-net protocol part. */
- struct nf_proto_net *(*get_net_proto)(struct net *net);
+bool icmp_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple);
- /* Module (if any) which this is connected to. */
- struct module *me;
-};
+bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple);
+
+bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig);
+bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig);
int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
struct sk_buff *skb,
@@ -99,31 +84,63 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state);
+
+int nf_conntrack_icmp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_udp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_udplite_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_dccp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+int nf_conntrack_gre_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
+
+void nf_conntrack_generic_init_net(struct net *net);
+void nf_conntrack_tcp_init_net(struct net *net);
+void nf_conntrack_udp_init_net(struct net *net);
+void nf_conntrack_gre_init_net(struct net *net);
+void nf_conntrack_dccp_init_net(struct net *net);
+void nf_conntrack_sctp_init_net(struct net *net);
+void nf_conntrack_icmp_init_net(struct net *net);
+void nf_conntrack_icmpv6_init_net(struct net *net);
+
/* Existing built-in generic protocol */
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
#define MAX_NF_CT_PROTO IPPROTO_UDPLITE
-const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto);
-
-const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto);
-void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p);
-
-/* Protocol pernet registration. */
-int nf_ct_l4proto_pernet_register_one(struct net *net,
- const struct nf_conntrack_l4proto *proto);
-void nf_ct_l4proto_pernet_unregister_one(struct net *net,
- const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_pernet_register(struct net *net,
- const struct nf_conntrack_l4proto *const proto[],
- unsigned int num_proto);
-void nf_ct_l4proto_pernet_unregister(struct net *net,
- const struct nf_conntrack_l4proto *const proto[],
- unsigned int num_proto);
-
-/* Protocol global registration. */
-int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
-void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto);
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
@@ -192,4 +209,11 @@ static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
}
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+static inline struct nf_gre_net *nf_gre_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.gre;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 7d5cda7ce32a..3e370cb36263 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -84,7 +84,6 @@ struct flow_offload {
struct nf_flow_route {
struct {
struct dst_entry *dst;
- int ifindex;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index a17eb2f8d40e..cf332c4e0b32 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -31,8 +31,7 @@ struct nf_conn;
/* The structure embedded in the conntrack structure. */
struct nf_conn_nat {
union nf_conntrack_nat_help help;
-#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
- IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE)
int masq_index;
#endif
};
@@ -47,10 +46,6 @@ extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct);
-/* Is this tuple already taken? (not by us)*/
-int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
- const struct nf_conn *ignored_conntrack);
-
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
@@ -65,8 +60,7 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
struct nf_conn_nat *nat,
const struct net_device *out)
{
-#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
- IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
+#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE)
return nat && nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
nat->masq_index != out->ifindex;
@@ -79,4 +73,43 @@ int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
const struct nf_hook_ops *nat_ops, unsigned int ops_count);
void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
unsigned int ops_count);
+
+unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, struct sk_buff *skb);
+
+unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
+ enum nf_nat_manip_type mtype,
+ enum ip_conntrack_dir dir);
+void nf_nat_csum_recalc(struct sk_buff *skb,
+ u8 nfproto, u8 proto, void *data, __sum16 *check,
+ int datalen, int oldlen);
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum);
+
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum, unsigned int hdrlen);
+
+int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+
+int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+
+unsigned int
+nf_nat_inet_fn(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+int nf_xfrm_me_harder(struct net *n, struct sk_buff *s, unsigned int family);
+
+static inline int nf_nat_initialized(struct nf_conn *ct,
+ enum nf_nat_manip_type manip)
+{
+ if (manip == NF_NAT_MANIP_SRC)
+ return ct->status & IPS_SRC_NAT_DONE;
+ else
+ return ct->status & IPS_DST_NAT_DONE;
+}
#endif
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
deleted file mode 100644
index dc7cd0440229..000000000000
--- a/include/net/netfilter/nf_nat_core.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_NAT_CORE_H
-#define _NF_NAT_CORE_H
-#include <linux/list.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-
-/* This header used to share core functionality between the standalone
- NAT module, and the compatibility layer's use of NAT for masquerading. */
-
-unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int hooknum, struct sk_buff *skb);
-
-unsigned int
-nf_nat_inet_fn(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state);
-
-int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family);
-
-static inline int nf_nat_initialized(struct nf_conn *ct,
- enum nf_nat_manip_type manip)
-{
- if (manip == NF_NAT_MANIP_SRC)
- return ct->status & IPS_SRC_NAT_DONE;
- else
- return ct->status & IPS_DST_NAT_DONE;
-}
-
-#endif /* _NF_NAT_CORE_H */
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
deleted file mode 100644
index d774ca0c4c5e..000000000000
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_NAT_L3PROTO_H
-#define _NF_NAT_L3PROTO_H
-
-struct nf_nat_l3proto {
- u8 l3proto;
-
- bool (*manip_pkt)(struct sk_buff *skb,
- unsigned int iphdroff,
- const struct nf_conntrack_tuple *target,
- enum nf_nat_manip_type maniptype);
-
- void (*csum_update)(struct sk_buff *skb, unsigned int iphdroff,
- __sum16 *check,
- const struct nf_conntrack_tuple *t,
- enum nf_nat_manip_type maniptype);
-
- void (*csum_recalc)(struct sk_buff *skb, u8 proto,
- void *data, __sum16 *check,
- int datalen, int oldlen);
-
- void (*decode_session)(struct sk_buff *skb,
- const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- unsigned long statusbit,
- struct flowi *fl);
-
- int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range2 *range);
-};
-
-int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
-void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
-const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
-
-int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum);
-
-int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum, unsigned int hdrlen);
-
-int nf_nat_l3proto_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops);
-void nf_nat_l3proto_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
-
-int nf_nat_l3proto_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops);
-void nf_nat_l3proto_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
-
-#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
deleted file mode 100644
index 95a4655bd1ad..000000000000
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Header for use in defining a given protocol. */
-#ifndef _NF_NAT_L4PROTO_H
-#define _NF_NAT_L4PROTO_H
-#include <net/netfilter/nf_nat.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-struct nf_nat_l3proto;
-
-/* Translate a packet to the target according to manip type. Return on success. */
-bool nf_nat_l4proto_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype);
-#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h
new file mode 100644
index 000000000000..221f877f29d1
--- /dev/null
+++ b/include/net/netfilter/nf_reject.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_REJECT_H
+#define _NF_REJECT_H
+
+static inline bool nf_reject_verify_csum(__u8 proto)
+{
+ /* Skip protocols that don't use 16-bit one's complement checksum
+ * of the entire payload.
+ */
+ switch (proto) {
+ /* Protocols with other integrity checks. */
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_SCTP:
+
+ /* Protocols with partial checksums. */
+ case IPPROTO_UDPLITE:
+ case IPPROTO_DCCP:
+
+ /* Protocols with optional checksums. */
+ case IPPROTO_GRE:
+ return false;
+ }
+ return true;
+}
+
+#endif /* _NF_REJECT_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 841835a387e1..c331e96a713b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -469,9 +469,7 @@ struct nft_set_binding {
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
- struct nft_set_binding *binding);
-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
- struct nft_set_binding *binding);
+ struct nft_set_binding *binding, bool commit);
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
/**
@@ -692,10 +690,12 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
gcb->elems[gcb->head.cnt++] = elem;
}
+struct nft_expr_ops;
/**
* struct nft_expr_type - nf_tables expression type
*
* @select_ops: function to select nft_expr_ops
+ * @release_ops: release nft_expr_ops
* @ops: default ops, used when no select_ops functions is present
* @list: used internally
* @name: Identifier
@@ -708,6 +708,7 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
struct nft_expr_type {
const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
const struct nlattr * const tb[]);
+ void (*release_ops)(const struct nft_expr_ops *ops);
const struct nft_expr_ops *ops;
struct list_head list;
const char *name;
@@ -721,6 +722,13 @@ struct nft_expr_type {
#define NFT_EXPR_STATEFUL 0x1
#define NFT_EXPR_GC 0x2
+enum nft_trans_phase {
+ NFT_TRANS_PREPARE,
+ NFT_TRANS_ABORT,
+ NFT_TRANS_COMMIT,
+ NFT_TRANS_RELEASE
+};
+
/**
* struct nft_expr_ops - nf_tables expression operations
*
@@ -750,7 +758,8 @@ struct nft_expr_ops {
void (*activate)(const struct nft_ctx *ctx,
const struct nft_expr *expr);
void (*deactivate)(const struct nft_ctx *ctx,
- const struct nft_expr *expr);
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase);
void (*destroy)(const struct nft_ctx *ctx,
const struct nft_expr *expr);
void (*destroy_clone)(const struct nft_ctx *ctx,
@@ -1012,21 +1021,32 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
const struct nft_verdict *v);
/**
+ * struct nft_object_hash_key - key to lookup nft_object
+ *
+ * @name: name of the stateful object to look up
+ * @table: table the object belongs to
+ */
+struct nft_object_hash_key {
+ const char *name;
+ const struct nft_table *table;
+};
+
+/**
* struct nft_object - nf_tables stateful object
*
* @list: table stateful object list node
- * @table: table this object belongs to
- * @name: name of this stateful object
+ * @key: keys that identify this object
+ * @rhlhead: nft_objname_ht node
* @genmask: generation mask
* @use: number of references to this stateful object
* @handle: unique object handle
* @ops: object operations
- * @data: object data, layout depends on type
+ * @data: object data, layout depends on type
*/
struct nft_object {
struct list_head list;
- char *name;
- struct nft_table *table;
+ struct rhlist_head rhlhead;
+ struct nft_object_hash_key key;
u32 genmask:2,
use:30;
u64 handle;
@@ -1043,11 +1063,12 @@ static inline void *nft_obj_data(const struct nft_object *obj)
#define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr))
-struct nft_object *nft_obj_lookup(const struct nft_table *table,
+struct nft_object *nft_obj_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u32 objtype,
u8 genmask);
-void nft_obj_notify(struct net *net, struct nft_table *table,
+void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq,
int event, int family, int report, gfp_t gfp);
@@ -1323,12 +1344,15 @@ struct nft_trans_rule {
struct nft_trans_set {
struct nft_set *set;
u32 set_id;
+ bool bound;
};
#define nft_trans_set(trans) \
(((struct nft_trans_set *)trans->data)->set)
#define nft_trans_set_id(trans) \
(((struct nft_trans_set *)trans->data)->set_id)
+#define nft_trans_set_bound(trans) \
+ (((struct nft_trans_set *)trans->data)->bound)
struct nft_trans_chain {
bool update;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 2046d104f323..7281895fa6d9 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -80,6 +80,22 @@ struct nft_regs;
struct nft_pktinfo;
void nft_meta_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_cmp_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_payload_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_immediate_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_bitwise_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_range_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_byteorder_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_dynset_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_rt_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h
deleted file mode 100644
index e51ab3815797..000000000000
--- a/include/net/netfilter/nft_masq.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NFT_MASQ_H_
-#define _NFT_MASQ_H_
-
-struct nft_masq {
- u32 flags;
- enum nft_registers sreg_proto_min:8;
- enum nft_registers sreg_proto_max:8;
-};
-
-extern const struct nla_policy nft_masq_policy[];
-
-int nft_masq_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[]);
-
-int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr);
-
-int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data);
-
-#endif /* _NFT_MASQ_H_ */
diff --git a/include/net/netfilter/nft_redir.h b/include/net/netfilter/nft_redir.h
deleted file mode 100644
index 4a970737c03c..000000000000
--- a/include/net/netfilter/nft_redir.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NFT_REDIR_H_
-#define _NFT_REDIR_H_
-
-struct nft_redir {
- enum nft_registers sreg_proto_min:8;
- enum nft_registers sreg_proto_max:8;
- u16 flags;
-};
-
-extern const struct nla_policy nft_redir_policy[];
-
-int nft_redir_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[]);
-
-int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr);
-
-int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data);
-
-#endif /* _NFT_REDIR_H_ */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 4c1e99303b5a..23f27b0b3cef 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -306,10 +306,14 @@ struct nla_policy {
#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
-#define NLA_POLICY_NESTED(maxattr, policy) \
+#define _NLA_POLICY_NESTED(maxattr, policy) \
{ .type = NLA_NESTED, .validation_data = policy, .len = maxattr }
-#define NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
+#define _NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
{ .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr }
+#define NLA_POLICY_NESTED(policy) \
+ _NLA_POLICY_NESTED(ARRAY_SIZE(policy) - 1, policy)
+#define NLA_POLICY_NESTED_ARRAY(policy) \
+ _NLA_POLICY_NESTED_ARRAY(ARRAY_SIZE(policy) - 1, policy)
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_INT_TYPE(tp) \
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 51cba0b8adf5..f19b53130bf7 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -18,21 +18,11 @@
struct ctl_table_header;
struct nf_conntrack_ecache;
-struct nf_proto_net {
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header *ctl_table_header;
- struct ctl_table *ctl_table;
-#endif
- unsigned int users;
-};
-
struct nf_generic_net {
- struct nf_proto_net pn;
unsigned int timeout;
};
struct nf_tcp_net {
- struct nf_proto_net pn;
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
unsigned int tcp_loose;
unsigned int tcp_be_liberal;
@@ -46,18 +36,15 @@ enum udp_conntrack {
};
struct nf_udp_net {
- struct nf_proto_net pn;
unsigned int timeouts[UDP_CT_MAX];
};
struct nf_icmp_net {
- struct nf_proto_net pn;
unsigned int timeout;
};
#ifdef CONFIG_NF_CT_PROTO_DCCP
struct nf_dccp_net {
- struct nf_proto_net pn;
int dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
};
@@ -65,11 +52,23 @@ struct nf_dccp_net {
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net {
- struct nf_proto_net pn;
unsigned int timeouts[SCTP_CONNTRACK_MAX];
};
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+enum gre_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+struct nf_gre_net {
+ struct list_head keymap_list;
+ unsigned int timeouts[GRE_CT_MAX];
+};
+#endif
+
struct nf_ip_net {
struct nf_generic_net generic;
struct nf_tcp_net tcp;
@@ -82,6 +81,9 @@ struct nf_ip_net {
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net sctp;
#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_gre_net gre;
+#endif
};
struct ct_pcpu {
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index ef1ed529f33c..b028a1dc150d 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -83,7 +83,7 @@ struct netns_ipv6 {
struct fib6_table *fib6_local_tbl;
struct fib_rules_ops *fib6_rules_ops;
#endif
- struct sock **icmp_sk;
+ struct sock * __percpu *icmp_sk;
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
diff --git a/include/net/netns/xdp.h b/include/net/netns/xdp.h
new file mode 100644
index 000000000000..e5734261ba0a
--- /dev/null
+++ b/include/net/netns/xdp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NETNS_XDP_H__
+#define __NETNS_XDP_H__
+
+#include <linux/rculist.h>
+#include <linux/mutex.h>
+
+struct netns_xdp {
+ struct mutex lock;
+ struct hlist_head list;
+};
+
+#endif /* __NETNS_XDP_H__ */
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b669fe6dbc3b..98f31c7ea23d 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -63,10 +63,11 @@ struct pnpipehdr {
u8 state_after_reset; /* reset request */
u8 error_code; /* any response */
u8 pep_type; /* status indication */
- u8 data[1];
+ u8 data0; /* anything else */
};
+ u8 data[];
};
-#define other_pep_type data[1]
+#define other_pep_type data[0]
static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
{
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 40965fbbcd31..d5e7a1af346f 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -6,6 +6,7 @@
#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
+#include <net/flow_offload.h>
/* TC action not accessible from user space */
#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
@@ -16,6 +17,7 @@ struct tcf_walker {
int stop;
int skip;
int count;
+ bool nonempty;
unsigned long cookie;
int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
};
@@ -43,6 +45,10 @@ bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
u32 chain_index);
void tcf_chain_put_by_act(struct tcf_chain *chain);
+struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
+ struct tcf_chain *chain);
+struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
+ struct tcf_proto *tp, bool rtnl_held);
void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -284,12 +290,13 @@ struct tcf_exts {
int police;
};
-static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
+static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
+ int action, int police)
{
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
exts->nr_actions = 0;
- exts->net = NULL;
+ exts->net = net;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
if (!exts->actions)
@@ -411,7 +418,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts, bool ovr,
+ struct tcf_exts *exts, bool ovr, bool rtnl_held,
struct netlink_ext_ack *extack);
void tcf_exts_destroy(struct tcf_exts *exts);
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
@@ -619,8 +626,11 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
+int tc_setup_flow_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop);
+unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
enum tc_block_command {
TC_BLOCK_BIND,
@@ -760,13 +770,17 @@ struct tc_cls_flower_offload {
struct tc_cls_common_offload common;
enum tc_fl_command command;
unsigned long cookie;
- struct flow_dissector *dissector;
- struct fl_flow_key *mask;
- struct fl_flow_key *key;
- struct tcf_exts *exts;
+ struct flow_rule *rule;
+ struct flow_stats stats;
u32 classid;
};
+static inline struct flow_rule *
+tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
+{
+ return tc_flow_cmd->rule;
+}
+
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 9481f2c142e2..31284c078d06 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -51,7 +52,10 @@ struct qdisc_size_table {
struct qdisc_skb_head {
struct sk_buff *head;
struct sk_buff *tail;
- __u32 qlen;
+ union {
+ u32 qlen;
+ atomic_t atomic_qlen;
+ };
spinlock_t lock;
};
@@ -178,6 +182,7 @@ static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
}
struct Qdisc_class_ops {
+ unsigned int flags;
/* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
int (*graft)(struct Qdisc *, unsigned long cl,
@@ -209,6 +214,13 @@ struct Qdisc_class_ops {
struct gnet_dump *);
};
+/* Qdisc_class_ops flag values */
+
+/* Implements API that doesn't require rtnl lock */
+enum qdisc_class_ops_flags {
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
+};
+
struct Qdisc_ops {
struct Qdisc_ops *next;
const struct Qdisc_class_ops *cl_ops;
@@ -272,19 +284,21 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto *tp,
+ void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack);
void* (*get)(struct tcf_proto*, u32 handle);
+ void (*put)(struct tcf_proto *tp, void *f);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- void **, bool,
+ void **, bool, bool,
struct netlink_ext_ack *);
int (*delete)(struct tcf_proto *tp, void *arg,
- bool *last,
+ bool *last, bool rtnl_held,
struct netlink_ext_ack *);
- void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+ void (*walk)(struct tcf_proto *tp,
+ struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack);
@@ -297,12 +311,18 @@ struct tcf_proto_ops {
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
- struct sk_buff *skb, struct tcmsg*);
+ struct sk_buff *skb, struct tcmsg*,
+ bool);
int (*tmplt_dump)(struct sk_buff *skb,
struct net *net,
void *tmplt_priv);
struct module *owner;
+ int flags;
+};
+
+enum tcf_proto_ops_flags {
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
};
struct tcf_proto {
@@ -321,6 +341,12 @@ struct tcf_proto {
void *data;
const struct tcf_proto_ops *ops;
struct tcf_chain *chain;
+ /* Lock protects tcf_proto shared state and can be used by unlocked
+ * classifiers to protect their private data.
+ */
+ spinlock_t lock;
+ bool deleting;
+ refcount_t refcnt;
struct rcu_head rcu;
};
@@ -340,6 +366,8 @@ struct qdisc_skb_cb {
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
+ /* Protects filter_chain. */
+ struct mutex filter_chain_lock;
struct tcf_proto __rcu *filter_chain;
struct list_head list;
struct tcf_block *block;
@@ -347,11 +375,16 @@ struct tcf_chain {
unsigned int refcnt;
unsigned int action_refcnt;
bool explicitly_created;
+ bool flushing;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
};
struct tcf_block {
+ /* Lock protects tcf_block and lifetime-management data of chains
+ * attached to the block (refcnt, action_refcnt, explicitly_created).
+ */
+ struct mutex lock;
struct list_head chain_list;
u32 index; /* block index for shared blocks */
refcount_t refcnt;
@@ -369,6 +402,34 @@ struct tcf_block {
struct rcu_head rcu;
};
+#ifdef CONFIG_PROVE_LOCKING
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
+{
+ return lockdep_is_held(&chain->filter_chain_lock);
+}
+
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
+{
+ return lockdep_is_held(&tp->lock);
+}
+#else
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
+{
+ return true;
+}
+
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
+{
+ return true;
+}
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+
+#define tcf_chain_dereference(p, chain) \
+ rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
+
+#define tcf_proto_dereference(p, tp) \
+ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
+
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
{
if (*flags & TCA_CLS_FLAGS_IN_HW)
@@ -408,27 +469,19 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-{
- return this_cpu_ptr(q->cpu_qstats)->qlen;
-}
-
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
-static inline int qdisc_qlen_sum(const struct Qdisc *q)
+static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
{
- __u32 qlen = q->qstats.qlen;
- int i;
+ u32 qlen = q->qstats.qlen;
- if (q->flags & TCQ_F_NOLOCK) {
- for_each_possible_cpu(i)
- qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
- } else {
+ if (q->flags & TCQ_F_NOLOCK)
+ qlen += atomic_read(&q->q.atomic_qlen);
+ else
qlen += q->q.qlen;
- }
return qlen;
}
@@ -580,8 +633,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
- unsigned int len);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
#ifdef CONFIG_NET_SCHED
int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
void *type_data);
@@ -825,14 +877,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}
-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
{
- this_cpu_inc(sch->cpu_qstats->qlen);
+ atomic_inc(&sch->q.atomic_qlen);
}
-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
{
- this_cpu_dec(sch->cpu_qstats->qlen);
+ atomic_dec(&sch->q.atomic_qlen);
}
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 003020eb6e66..58e4b23cecf4 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -199,6 +199,8 @@ struct sctp_sock {
__u32 flowlabel;
__u8 dscp;
+ int pf_retrans;
+
/* The initial Path MTU to use for new associations. */
__u32 pathmtu;
@@ -209,6 +211,8 @@ struct sctp_sock {
/* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
__u32 param_flags;
+ __u32 default_ss;
+
struct sctp_rtoinfo rtoinfo;
struct sctp_paddrparams paddrparam;
struct sctp_assocparams assocparams;
diff --git a/include/net/smc.h b/include/net/smc.h
index 9ef49f8b1002..bd9c0fb3b577 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -74,6 +74,7 @@ struct smcd_dev {
struct list_head vlan;
struct workqueue_struct *event_wq;
u8 pnetid[SMC_MAX_PNETID_LEN];
+ bool pnetid_by_user;
};
struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
diff --git a/include/net/sock.h b/include/net/sock.h
index 2b229f7be8eb..328cb7cb7b0b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -805,6 +805,7 @@ enum sock_flags {
SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
SOCK_TXTIME,
SOCK_XDP, /* XDP is attached */
+ SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1277,7 +1278,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
percpu_counter_inc(sk->sk_prot->sockets_allocated);
}
-static inline int
+static inline u64
sk_sockets_allocated_read_positive(struct sock *sk)
{
return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index a7fdab5ee6c3..0ebd67ae7012 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -20,14 +20,7 @@
#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
#define SWITCHDEV_F_DEFER BIT(2)
-struct switchdev_trans_item {
- struct list_head list;
- void *data;
- void (*destructor)(const void *data);
-};
-
struct switchdev_trans {
- struct list_head item_list;
bool ph_prepare;
};
@@ -43,10 +36,9 @@ static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans)
enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_UNDEFINED,
- SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
SWITCHDEV_ATTR_ID_PORT_STP_STATE,
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
- SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
+ SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
SWITCHDEV_ATTR_ID_PORT_MROUTER,
SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
@@ -61,10 +53,8 @@ struct switchdev_attr {
void *complete_priv;
void (*complete)(struct net_device *dev, int err, void *priv);
union {
- struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
- unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
- unsigned long brport_flags_support; /* PORT_BRIDGE_FLAGS_SUPPORT */
+ unsigned long brport_flags; /* PORT_{PRE}_BRIDGE_FLAGS */
bool mrouter; /* PORT_MROUTER */
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
@@ -108,28 +98,8 @@ struct switchdev_obj_port_mdb {
#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
container_of((OBJ), struct switchdev_obj_port_mdb, obj)
-void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
- void *data, void (*destructor)(void const *),
- struct switchdev_trans_item *tritem);
-void *switchdev_trans_item_dequeue(struct switchdev_trans *trans);
-
typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
-/**
- * struct switchdev_ops - switchdev operations
- *
- * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
- *
- * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
- */
-struct switchdev_ops {
- int (*switchdev_port_attr_get)(struct net_device *dev,
- struct switchdev_attr *attr);
- int (*switchdev_port_attr_set)(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans);
-};
-
enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
SWITCHDEV_FDB_DEL_TO_BRIDGE,
@@ -139,6 +109,7 @@ enum switchdev_notifier_type {
SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
+ SWITCHDEV_PORT_ATTR_SET, /* May be blocking . */
SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE,
SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE,
@@ -167,6 +138,13 @@ struct switchdev_notifier_port_obj_info {
bool handled;
};
+struct switchdev_notifier_port_attr_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_attr *attr;
+ struct switchdev_trans *trans;
+ bool handled;
+};
+
static inline struct net_device *
switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
{
@@ -182,8 +160,6 @@ switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
#ifdef CONFIG_NET_SWITCHDEV
void switchdev_deferred_process(void);
-int switchdev_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr);
int switchdev_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr);
int switchdev_port_obj_add(struct net_device *dev,
@@ -195,7 +171,8 @@ int switchdev_port_obj_del(struct net_device *dev,
int register_switchdev_notifier(struct notifier_block *nb);
int unregister_switchdev_notifier(struct notifier_block *nb);
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
- struct switchdev_notifier_info *info);
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack);
int register_switchdev_blocking_notifier(struct notifier_block *nb);
int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
@@ -207,9 +184,6 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
-bool switchdev_port_same_parent_id(struct net_device *a,
- struct net_device *b);
-
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
@@ -223,19 +197,18 @@ int switchdev_handle_port_obj_del(struct net_device *dev,
int (*del_cb)(struct net_device *dev,
const struct switchdev_obj *obj));
-#define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops))
+int switchdev_handle_port_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*set_cb)(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans));
#else
static inline void switchdev_deferred_process(void)
{
}
-static inline int switchdev_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- return -EOPNOTSUPP;
-}
-
static inline int switchdev_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr)
{
@@ -267,7 +240,8 @@ static inline int unregister_switchdev_notifier(struct notifier_block *nb)
static inline int call_switchdev_notifiers(unsigned long val,
struct net_device *dev,
- struct switchdev_notifier_info *info)
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
{
return NOTIFY_DONE;
}
@@ -293,12 +267,6 @@ call_switchdev_blocking_notifiers(unsigned long val,
return NOTIFY_DONE;
}
-static inline bool switchdev_port_same_parent_id(struct net_device *a,
- struct net_device *b)
-{
- return false;
-}
-
static inline int
switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
@@ -321,8 +289,16 @@ switchdev_handle_port_obj_del(struct net_device *dev,
return 0;
}
-#define SWITCHDEV_SET_OPS(netdev, ops) do {} while (0)
-
+static inline int
+switchdev_handle_port_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*set_cb)(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans))
+{
+ return 0;
+}
#endif
#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 32d2454c0479..68269e4581b7 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -21,7 +21,7 @@ struct tcf_csum {
static inline bool is_tcf_csum(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_CSUM)
+ if (a->ops && a->ops->id == TCA_ID_CSUM)
return true;
#endif
return false;
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index ef8dd0db70ce..ee8d005f56fc 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -22,7 +22,7 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act,
#ifdef CONFIG_NET_CLS_ACT
struct tcf_gact *gact;
- if (a->ops && a->ops->type != TCA_ACT_GACT)
+ if (a->ops && a->ops->id != TCA_ID_GACT)
return false;
gact = to_gact(a);
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index a2e9cbca5c9e..c757585a05b0 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -17,7 +17,7 @@ struct tcf_mirred {
static inline bool is_tcf_mirred_egress_redirect(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_MIRRED)
+ if (a->ops && a->ops->id == TCA_ID_MIRRED)
return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR;
#endif
return false;
@@ -26,7 +26,7 @@ static inline bool is_tcf_mirred_egress_redirect(const struct tc_action *a)
static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_MIRRED)
+ if (a->ops && a->ops->id == TCA_ID_MIRRED)
return to_mirred(a)->tcfm_eaction == TCA_EGRESS_MIRROR;
#endif
return false;
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index fac3ad4a86de..748cf87a4d7e 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -23,7 +23,7 @@ struct tcf_pedit {
static inline bool is_tcf_pedit(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_PEDIT)
+ if (a->ops && a->ops->id == TCA_ID_PEDIT)
return true;
#endif
return false;
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
index 01dbfea32672..0a559d4b6f0f 100644
--- a/include/net/tc_act/tc_sample.h
+++ b/include/net/tc_act/tc_sample.h
@@ -20,7 +20,7 @@ struct tcf_sample {
static inline bool is_tcf_sample(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- return a->ops && a->ops->type == TCA_ACT_SAMPLE;
+ return a->ops && a->ops->id == TCA_ID_SAMPLE;
#else
return false;
#endif
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 911bbac838a2..85c5c4756d92 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -44,7 +44,7 @@ static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
#ifdef CONFIG_NET_CLS_ACT
u32 flags;
- if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
+ if (a->ops && a->ops->id == TCA_ID_SKBEDIT) {
rcu_read_lock();
flags = rcu_dereference(to_skbedit(a)->params)->flags;
rcu_read_unlock();
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index 46b8c7f1c8d5..23d5b8b19f3e 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -34,7 +34,7 @@ static inline bool is_tcf_tunnel_set(const struct tc_action *a)
struct tcf_tunnel_key *t = to_tunnel_key(a);
struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
- if (a->ops && a->ops->type == TCA_ACT_TUNNEL_KEY)
+ if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_SET;
#endif
return false;
@@ -46,7 +46,7 @@ static inline bool is_tcf_tunnel_release(const struct tc_action *a)
struct tcf_tunnel_key *t = to_tunnel_key(a);
struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
- if (a->ops && a->ops->type == TCA_ACT_TUNNEL_KEY)
+ if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_RELEASE;
#endif
return false;
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 22ae260d6869..fe39ed502bef 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -30,7 +30,7 @@ struct tcf_vlan {
static inline bool is_tcf_vlan(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_VLAN)
+ if (a->ops && a->ops->id == TCA_ID_VLAN)
return true;
#endif
return false;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e0a65c067662..68ee02523b87 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -406,8 +406,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
void tcp_data_ready(struct sock *sk);
+#ifdef CONFIG_MMU
int tcp_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
+#endif
void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
@@ -1556,7 +1558,7 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
#ifdef CONFIG_TCP_MD5SIG
#include <linux/jump_label.h>
-extern struct static_key tcp_md5_needed;
+extern struct static_key_false tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family);
@@ -1565,7 +1567,7 @@ tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
- if (!static_key_false(&tcp_md5_needed))
+ if (!static_branch_unlikely(&tcp_md5_needed))
return NULL;
return __tcp_md5_do_lookup(sk, addr, family);
}
@@ -1606,6 +1608,7 @@ struct tcp_fastopen_request {
struct msghdr *data; /* data in MSG_FASTOPEN */
size_t size;
int copied; /* queued in tcp_connect() */
+ struct ubuf_info *uarg;
};
void tcp_free_fastopen_req(struct tcp_sock *tp);
void tcp_fastopen_destroy_cipher(struct sock *sk);
@@ -1713,20 +1716,9 @@ static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
}
-static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
-{
- if (tcp_write_queue_empty(sk))
- tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
-}
-
-static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
-{
- __skb_queue_tail(&sk->sk_write_queue, skb);
-}
-
static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
- __tcp_add_write_queue_tail(sk, skb);
+ __skb_queue_tail(&sk->sk_write_queue, skb);
/* Queue it, remembering where we must start sending. */
if (sk->sk_write_queue.next == skb)
diff --git a/include/net/tls.h b/include/net/tls.h
index 2a6ac8d642af..a5a938583295 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -119,11 +119,21 @@ struct tls_rec {
/* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
struct scatterlist sg_aead_out[2];
+ char content_type;
+ struct scatterlist sg_content_type;
+
char aad_space[TLS_AAD_SPACE_SIZE];
+ u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE];
struct aead_request aead_req;
u8 aead_req_ctx[];
};
+struct tls_msg {
+ struct strp_msg rxm;
+ u8 control;
+};
+
struct tx_work {
struct delayed_work work;
struct sock *sk;
@@ -137,6 +147,7 @@ struct tls_sw_context_tx {
struct list_head tx_list;
atomic_t encrypt_pending;
int async_notify;
+ int async_capable;
#define BIT_TX_SCHEDULED 0
unsigned long tx_bitmask;
@@ -145,12 +156,13 @@ struct tls_sw_context_tx {
struct tls_sw_context_rx {
struct crypto_aead *aead_recv;
struct crypto_wait async_wait;
-
struct strparser strp;
+ struct sk_buff_head rx_list; /* list of decrypted 'data' records */
void (*saved_data_ready)(struct sock *sk);
struct sk_buff *recv_pkt;
u8 control;
+ int async_capable;
bool decrypted;
atomic_t decrypt_pending;
bool async_notify;
@@ -187,26 +199,34 @@ struct tls_offload_context_tx {
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
-enum {
- TLS_PENDING_CLOSED_RECORD
-};
-
struct cipher_context {
- u16 prepend_size;
- u16 tag_size;
- u16 overhead_size;
- u16 iv_size;
char *iv;
- u16 rec_seq_size;
char *rec_seq;
};
union tls_crypto_context {
struct tls_crypto_info info;
- struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ };
+};
+
+struct tls_prot_info {
+ u16 version;
+ u16 cipher_type;
+ u16 prepend_size;
+ u16 tag_size;
+ u16 overhead_size;
+ u16 iv_size;
+ u16 rec_seq_size;
+ u16 aad_size;
+ u16 tail_size;
};
struct tls_context {
+ struct tls_prot_info prot_info;
+
union tls_crypto_context crypto_send;
union tls_crypto_context crypto_recv;
@@ -311,12 +331,14 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags);
-int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
- int flags, long *timeo);
+static inline struct tls_msg *tls_msg(struct sk_buff *skb)
+{
+ return (struct tls_msg *)strp_msg(skb);
+}
-static inline bool tls_is_pending_closed_record(struct tls_context *ctx)
+static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
{
- return test_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
+ return !!ctx->partially_sent_record;
}
static inline int tls_complete_pending_work(struct sock *sk,
@@ -328,17 +350,12 @@ static inline int tls_complete_pending_work(struct sock *sk,
if (unlikely(sk->sk_write_pending))
rc = wait_on_pending_writer(sk, timeo);
- if (!rc && tls_is_pending_closed_record(ctx))
- rc = tls_push_pending_closed_record(sk, ctx, flags, timeo);
+ if (!rc && tls_is_partially_sent_record(ctx))
+ rc = tls_push_partial_record(sk, ctx, flags);
return rc;
}
-static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
-{
- return !!ctx->partially_sent_record;
-}
-
static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
{
return tls_ctx->pending_open_record_frags;
@@ -389,59 +406,92 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len)
return (i == -1);
}
+static inline struct tls_context *tls_get_ctx(const struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ return icsk->icsk_ulp_data;
+}
+
static inline void tls_advance_record_sn(struct sock *sk,
- struct cipher_context *ctx)
+ struct cipher_context *ctx,
+ int version)
{
- if (tls_bigint_increment(ctx->rec_seq, ctx->rec_seq_size))
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+
+ if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
tls_err_abort(sk, EBADMSG);
- tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
- ctx->iv_size);
+
+ if (version != TLS_1_3_VERSION) {
+ tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ prot->iv_size);
+ }
}
static inline void tls_fill_prepend(struct tls_context *ctx,
char *buf,
size_t plaintext_len,
- unsigned char record_type)
+ unsigned char record_type,
+ int version)
{
- size_t pkt_len, iv_size = ctx->tx.iv_size;
+ struct tls_prot_info *prot = &ctx->prot_info;
+ size_t pkt_len, iv_size = prot->iv_size;
+
+ pkt_len = plaintext_len + prot->tag_size;
+ if (version != TLS_1_3_VERSION) {
+ pkt_len += iv_size;
- pkt_len = plaintext_len + iv_size + ctx->tx.tag_size;
+ memcpy(buf + TLS_NONCE_OFFSET,
+ ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
+ }
/* we cover nonce explicit here as well, so buf should be of
* size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
*/
- buf[0] = record_type;
- buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
- buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
+ buf[0] = version == TLS_1_3_VERSION ?
+ TLS_RECORD_TYPE_DATA : record_type;
+ /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
+ buf[1] = TLS_1_2_VERSION_MINOR;
+ buf[2] = TLS_1_2_VERSION_MAJOR;
/* we can use IV for nonce explicit according to spec */
buf[3] = pkt_len >> 8;
buf[4] = pkt_len & 0xFF;
- memcpy(buf + TLS_NONCE_OFFSET,
- ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
}
static inline void tls_make_aad(char *buf,
size_t size,
char *record_sequence,
int record_sequence_size,
- unsigned char record_type)
+ unsigned char record_type,
+ int version)
{
- memcpy(buf, record_sequence, record_sequence_size);
+ if (version != TLS_1_3_VERSION) {
+ memcpy(buf, record_sequence, record_sequence_size);
+ buf += 8;
+ } else {
+ size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+ }
- buf[8] = record_type;
- buf[9] = TLS_1_2_VERSION_MAJOR;
- buf[10] = TLS_1_2_VERSION_MINOR;
- buf[11] = size >> 8;
- buf[12] = size & 0xFF;
+ buf[0] = version == TLS_1_3_VERSION ?
+ TLS_RECORD_TYPE_DATA : record_type;
+ buf[1] = TLS_1_2_VERSION_MAJOR;
+ buf[2] = TLS_1_2_VERSION_MINOR;
+ buf[3] = size >> 8;
+ buf[4] = size & 0xFF;
}
-static inline struct tls_context *tls_get_ctx(const struct sock *sk)
+static inline void xor_iv_with_seq(int version, char *iv, char *seq)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ int i;
- return icsk->icsk_ulp_data;
+ if (version == TLS_1_3_VERSION) {
+ for (i = 0; i < 8; i++)
+ iv[i + 4] ^= seq[i];
+ }
}
+
static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
const struct tls_context *tls_ctx)
{
@@ -469,6 +519,9 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
return !!tls_sw_ctx_tx(ctx);
}
+void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
+void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
+
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 236403eb5ba6..00254a58824b 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -428,7 +428,8 @@ struct switchdev_notifier_vxlan_fdb_info {
int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
struct switchdev_notifier_vxlan_fdb_info *fdb_info);
int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
- struct notifier_block *nb);
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni);
#else
@@ -440,7 +441,8 @@ vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
}
static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
- struct notifier_block *nb)
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -451,4 +453,35 @@ vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
}
#endif
+static inline void vxlan_flag_attr_error(int attrtype,
+ struct netlink_ext_ack *extack)
+{
+#define VXLAN_FLAG(flg) \
+ case IFLA_VXLAN_##flg: \
+ NL_SET_ERR_MSG_MOD(extack, \
+ "cannot change " #flg " flag"); \
+ break
+ switch (attrtype) {
+ VXLAN_FLAG(TTL_INHERIT);
+ VXLAN_FLAG(LEARNING);
+ VXLAN_FLAG(PROXY);
+ VXLAN_FLAG(RSC);
+ VXLAN_FLAG(L2MISS);
+ VXLAN_FLAG(L3MISS);
+ VXLAN_FLAG(COLLECT_METADATA);
+ VXLAN_FLAG(UDP_ZERO_CSUM6_TX);
+ VXLAN_FLAG(UDP_ZERO_CSUM6_RX);
+ VXLAN_FLAG(REMCSUM_TX);
+ VXLAN_FLAG(REMCSUM_RX);
+ VXLAN_FLAG(GBP);
+ VXLAN_FLAG(GPE);
+ VXLAN_FLAG(REMCSUM_NOPARTIAL);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, \
+ "cannot change flag");
+ break;
+ }
+#undef VXLAN_FLAG
+}
+
#endif
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 13acb9803a6d..61cf7dbb6782 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -42,6 +42,7 @@ struct xdp_umem {
struct work_struct work;
struct page **pgs;
u32 npgs;
+ int id;
struct net_device *dev;
struct xdp_umem_fq_reuse *fq_reuse;
u16 queue_id;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7298a53b9702..85386becbaea 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
xfrm_pol_put(pols[i]);
}
-void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *, bool);
static inline void __xfrm_state_put(struct xfrm_state *x)
{
@@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
static inline void xfrm_state_put(struct xfrm_state *x)
{
if (refcount_dec_and_test(&x->refcnt))
- __xfrm_state_destroy(x);
+ __xfrm_state_destroy(x, false);
+}
+
+static inline void xfrm_state_put_sync(struct xfrm_state *x)
+{
+ if (refcount_dec_and_test(&x->refcnt))
+ __xfrm_state_destroy(x, true);
}
static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo {
struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_delete(struct xfrm_state *x);
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a3ceed3a040a..80debf5982ac 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2579,9 +2579,10 @@ struct ib_device {
const struct uapi_definition *driver_def;
enum rdma_driver_id driver_id;
+
/*
- * Provides synchronization between device unregistration and netlink
- * commands on a device. To be used only by core.
+ * Positive refcount indicates that the device is currently
+ * registered and cannot be unregistered.
*/
refcount_t refcount;
struct completion unreg_completion;
@@ -3926,6 +3927,25 @@ static inline bool ib_access_writable(int access_flags)
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
+/**
+ * ib_device_try_get: Hold a registration lock
+ * device: The device to lock
+ *
+ * A device under an active registration lock cannot become unregistered. It
+ * is only possible to obtain a registration lock on a device that is fully
+ * registered, otherwise this function returns false.
+ *
+ * The registration lock is only necessary for actions which require the
+ * device to still be registered. Uses that only require the device pointer to
+ * be valid should use get_device(&ibdev->dev) to hold the memory.
+ *
+ */
+static inline bool ib_device_try_get(struct ib_device *dev)
+{
+ return refcount_inc_not_zero(&dev->refcount);
+}
+
+void ib_device_put(struct ib_device *device);
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 4be1aa4435ae..7800e12ee042 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -73,6 +73,8 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_CUSTOMER_OTP = 0x00030021,
RPI_FIRMWARE_GET_DOMAIN_STATE = 0x00030030,
RPI_FIRMWARE_GET_THROTTLED = 0x00030046,
+ RPI_FIRMWARE_GET_CLOCK_MEASURED = 0x00030047,
+ RPI_FIRMWARE_NOTIFY_REBOOT = 0x00030048,
RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001,
RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002,
RPI_FIRMWARE_SET_VOLTAGE = 0x00038003,
@@ -86,6 +88,8 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_SET_GPIO_CONFIG = 0x00038043,
RPI_FIRMWARE_GET_PERIPH_REG = 0x00030045,
RPI_FIRMWARE_SET_PERIPH_REG = 0x00038045,
+ RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049,
+ RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050,
/* Dispmanx TAGS */
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index 3fbd71c27ba3..672cfb58046f 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -57,7 +57,8 @@ struct dpaa2_io_desc {
u32 qman_version;
};
-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
+ struct device *dev);
void dpaa2_io_down(struct dpaa2_io *d);
@@ -90,10 +91,14 @@ struct dpaa2_io_notification_ctx {
void *dpio_private;
};
+int dpaa2_io_get_cpu(struct dpaa2_io *d);
+
int dpaa2_io_service_register(struct dpaa2_io *service,
- struct dpaa2_io_notification_ctx *ctx);
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev);
void dpaa2_io_service_deregister(struct dpaa2_io *service,
- struct dpaa2_io_notification_ctx *ctx);
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev);
int dpaa2_io_service_rearm(struct dpaa2_io *service,
struct dpaa2_io_notification_ctx *ctx);
@@ -106,9 +111,9 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
const struct dpaa2_fd *fd);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
-int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
+int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,
const u64 *buffers, unsigned int num_buffers);
-int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
+int dpaa2_io_service_acquire(struct dpaa2_io *d, u16 bpid,
u64 *buffers, unsigned int num_buffers);
struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index b02f926a0216..45960aa08f4a 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -23,6 +23,7 @@
#include <soc/tegra/bpmp-abi.h>
struct tegra_bpmp_clk;
+struct tegra_bpmp_ops;
struct tegra_bpmp_soc {
struct {
@@ -32,6 +33,8 @@ struct tegra_bpmp_soc {
unsigned int timeout;
} cpu_tx, thread, cpu_rx;
} channels;
+
+ const struct tegra_bpmp_ops *ops;
unsigned int num_resets;
};
@@ -47,6 +50,7 @@ struct tegra_bpmp_channel {
struct tegra_bpmp_mb_data *ob;
struct completion completion;
struct tegra_ivc *ivc;
+ unsigned int index;
};
typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int mrq,
@@ -63,12 +67,7 @@ struct tegra_bpmp_mrq {
struct tegra_bpmp {
const struct tegra_bpmp_soc *soc;
struct device *dev;
-
- struct {
- struct gen_pool *pool;
- dma_addr_t phys;
- void *virt;
- } tx, rx;
+ void *priv;
struct {
struct mbox_client client;
@@ -173,6 +172,8 @@ static inline bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp,
}
#endif
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp);
+
#if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP)
int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp);
#else
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index a9db1b501de1..b32ee5d82dd4 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -161,7 +161,6 @@ enum tegra_io_pad {
#define TEGRA_IO_RAIL_LVDS TEGRA_IO_PAD_LVDS
#ifdef CONFIG_SOC_TEGRA_PMC
-int tegra_powergate_is_powered(unsigned int id);
int tegra_powergate_power_on(unsigned int id);
int tegra_powergate_power_off(unsigned int id);
int tegra_powergate_remove_clamping(unsigned int id);
@@ -182,11 +181,6 @@ void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
#else
-static inline int tegra_powergate_is_powered(unsigned int id)
-{
- return -ENOSYS;
-}
-
static inline int tegra_powergate_power_on(unsigned int id)
{
return -ENOSYS;
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 0cdc3999ecfa..c5188ff724d1 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -173,7 +173,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ if (stream->direction == SND_COMPRESS_PLAYBACK)
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ else
+ stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+
wake_up(&stream->runtime->sleep);
}
diff --git a/include/sound/core.h b/include/sound/core.h
index 36a5934cf4b1..e923c23e05dd 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -120,7 +120,6 @@ struct snd_card {
struct list_head ctl_files; /* active control files */
struct snd_info_entry *proc_root; /* root for soundcard specific files */
- struct snd_info_entry *proc_id; /* the card id */
struct proc_dir_entry *proc_root_link; /* number link to real id */
struct list_head files_list; /* all files associated to this card */
diff --git a/include/sound/cs35l36.h b/include/sound/cs35l36.h
new file mode 100644
index 000000000000..8f8049d390f0
--- /dev/null
+++ b/include/sound/cs35l36.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/sound/cs35l36.h -- Platform data for CS35L36
+ *
+ * Copyright 2018 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ *
+ */
+
+#ifndef __CS35L36_H
+#define __CS35L36_H
+
+struct cs35l36_vpbr_cfg {
+ bool is_present;
+ bool vpbr_en;
+ int vpbr_thld;
+ int vpbr_atk_rate;
+ int vpbr_atk_vol;
+ int vpbr_max_attn;
+ int vpbr_wait;
+ int vpbr_rel_rate;
+ int vpbr_mute_en;
+};
+
+struct cs35l36_platform_data {
+ bool multi_amp_mode;
+ bool dcm_mode;
+ bool amp_pcm_inv;
+ bool imon_pol_inv;
+ bool vmon_pol_inv;
+ int boost_ind;
+ int bst_vctl;
+ int bst_vctl_sel;
+ int bst_ipk;
+ bool extern_boost;
+ int temp_warn_thld;
+ int irq_drv_sel;
+ int irq_gpio_sel;
+ struct cs35l36_vpbr_cfg vpbr_config;
+};
+
+#endif /* __CS35L36_H */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 2c4cfaa135a6..c679f6116580 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -99,10 +99,6 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
* playback.
*/
#define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
-/*
- * The PCM streams have custom channel names specified.
- */
-#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4)
/**
* struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 7fa48b100936..cc7c8d42d4fd 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -68,6 +68,7 @@ struct hda_bus {
unsigned int response_reset:1; /* controller was reset */
unsigned int in_reset:1; /* during reset operation */
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
+ unsigned int bus_probing :1; /* during probing process */
int primary_dig_out_type; /* primary digital out PCM type */
unsigned int mixer_assigned; /* codec addr for mixer name */
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
index 2ec31b358950..d4804c72d959 100644
--- a/include/sound/hda_component.h
+++ b/include/sound/hda_component.h
@@ -20,7 +20,7 @@ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_acomp_init(struct hdac_bus *bus,
const struct drm_audio_component_audio_ops *aops,
- int (*match_master)(struct device *, void *),
+ int (*match_master)(struct device *, int, void *),
size_t extra_size);
int snd_hdac_acomp_exit(struct hdac_bus *bus);
int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
@@ -47,7 +47,8 @@ static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t ni
}
static inline int snd_hdac_acomp_init(struct hdac_bus *bus,
const struct drm_audio_component_audio_ops *aops,
- int (*match_master)(struct device *, void *),
+ int (*match_master)(struct device *,
+ int, void *),
size_t extra_size)
{
return -ENODEV;
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 2ab39fb52d7a..0fd39295b426 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -79,6 +79,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
/* stream register offsets from stream base */
#define AZX_REG_SD_CTL 0x00
+#define AZX_REG_SD_CTL_3B 0x02 /* 3rd byte of SD_CTL register */
#define AZX_REG_SD_STS 0x03
#define AZX_REG_SD_LPIB 0x04
#define AZX_REG_SD_CBL 0x08
@@ -165,6 +166,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define SD_INT_COMPLETE 0x04 /* completion interrupt */
#define SD_INT_MASK (SD_INT_DESC_ERR|SD_INT_FIFO_ERR|\
SD_INT_COMPLETE)
+#define SD_CTL_STRIPE_MASK 0x3 /* stripe control mask */
/* SD_STS */
#define SD_STS_FIFO_READY 0x20 /* FIFO ready */
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h
index 2a8573a00ea6..e36b77531c5c 100644
--- a/include/sound/hda_verbs.h
+++ b/include/sound/hda_verbs.h
@@ -66,6 +66,7 @@ enum {
#define AC_VERB_GET_CONFIG_DEFAULT 0x0f1c
/* f20: AFG/MFG */
#define AC_VERB_GET_SUBSYSTEM_ID 0x0f20
+#define AC_VERB_GET_STRIPE_CONTROL 0x0f24
#define AC_VERB_GET_CVT_CHAN_COUNT 0x0f2d
#define AC_VERB_GET_HDMI_DIP_SIZE 0x0f2e
#define AC_VERB_GET_HDMI_ELDD 0x0f2f
@@ -110,6 +111,7 @@ enum {
#define AC_VERB_SET_CONFIG_DEFAULT_BYTES_3 0x71f
#define AC_VERB_SET_EAPD 0x788
#define AC_VERB_SET_CODEC_RESET 0x7ff
+#define AC_VERB_SET_STRIPE_CONTROL 0x724
#define AC_VERB_SET_CVT_CHAN_COUNT 0x72d
#define AC_VERB_SET_HDMI_DIP_INDEX 0x730
#define AC_VERB_SET_HDMI_DIP_DATA 0x731
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index b4fa1c775251..45f944d57982 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -539,6 +539,9 @@ void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
unsigned int streams);
void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
unsigned int streams);
+int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
+ struct snd_pcm_substream *substream);
+
/*
* macros for easy use
*/
diff --git a/include/sound/info.h b/include/sound/info.h
index becdf66d2825..97fdda41e076 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -82,7 +82,6 @@ struct snd_info_entry {
struct snd_info_entry_ops *ops;
} c;
struct snd_info_entry *parent;
- struct snd_card *card;
struct module *module;
void *private_data;
void (*private_free)(struct snd_info_entry *entry);
@@ -160,6 +159,13 @@ static inline void snd_info_set_text_ops(struct snd_info_entry *entry,
entry->c.text.read = read;
}
+int snd_card_rw_proc_new(struct snd_card *card, const char *name,
+ void *private_data,
+ void (*read)(struct snd_info_entry *,
+ struct snd_info_buffer *),
+ void (*write)(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer));
+
int snd_info_check_reserved_words(const char *str);
#else
@@ -189,10 +195,38 @@ static inline int snd_card_proc_new(struct snd_card *card, const char *name,
static inline void snd_info_set_text_ops(struct snd_info_entry *entry __attribute__((unused)),
void *private_data,
void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) {}
+static inline int snd_card_rw_proc_new(struct snd_card *card, const char *name,
+ void *private_data,
+ void (*read)(struct snd_info_entry *,
+ struct snd_info_buffer *),
+ void (*write)(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer))
+{
+ return 0;
+}
static inline int snd_info_check_reserved_words(const char *str) { return 1; }
#endif
+/**
+ * snd_card_ro_proc_new - Create a read-only text proc file entry for the card
+ * @card: the card instance
+ * @name: the file name
+ * @private_data: the arbitrary private data
+ * @read: the read callback
+ *
+ * This proc file entry will be registered via snd_card_register() call, and
+ * it will be removed automatically at the card removal, too.
+ */
+static inline int
+snd_card_ro_proc_new(struct snd_card *card, const char *name,
+ void *private_data,
+ void (*read)(struct snd_info_entry *,
+ struct snd_info_buffer *))
+{
+ return snd_card_rw_proc_new(card, name, private_data, read, NULL);
+}
+
/*
* OSS info part
*/
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index af3fa577fa06..1ac0dd82a916 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -37,7 +37,6 @@ struct snd_dma_device {
};
#define snd_dma_pci_data(pci) (&(pci)->dev)
-#define snd_dma_isa_data() NULL
#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index d6bd3caf6878..465d7d033c4c 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -30,6 +30,7 @@
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/pm_qos.h>
+#include <linux/refcount.h>
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
@@ -439,7 +440,7 @@ struct snd_pcm_group { /* keep linked substreams */
spinlock_t lock;
struct mutex mutex;
struct list_head substreams;
- int count;
+ refcount_t refs;
};
struct pid;
@@ -470,7 +471,6 @@ struct snd_pcm_substream {
struct snd_pcm_group self_group; /* fake group for non linked substream (with substream lock inside) */
struct snd_pcm_group *group; /* pointer to current group */
/* -- assigned files -- */
- void *file;
int ref_count;
atomic_t mmap_count;
unsigned int f_flags;
@@ -482,15 +482,6 @@ struct snd_pcm_substream {
#endif
#ifdef CONFIG_SND_VERBOSE_PROCFS
struct snd_info_entry *proc_root;
- struct snd_info_entry *proc_info_entry;
- struct snd_info_entry *proc_hw_params_entry;
- struct snd_info_entry *proc_sw_params_entry;
- struct snd_info_entry *proc_status_entry;
- struct snd_info_entry *proc_prealloc_entry;
- struct snd_info_entry *proc_prealloc_max_entry;
-#ifdef CONFIG_SND_PCM_XRUN_DEBUG
- struct snd_info_entry *proc_xrun_injection_entry;
-#endif
#endif /* CONFIG_SND_VERBOSE_PROCFS */
/* misc flags */
unsigned int hw_opened: 1;
@@ -512,10 +503,8 @@ struct snd_pcm_str {
#endif
#ifdef CONFIG_SND_VERBOSE_PROCFS
struct snd_info_entry *proc_root;
- struct snd_info_entry *proc_info_entry;
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
unsigned int xrun_debug; /* 0 = disabled, 1 = verbose, 2 = stacktrace */
- struct snd_info_entry *proc_xrun_debug_entry;
#endif
#endif
struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */
@@ -538,6 +527,7 @@ struct snd_pcm {
void (*private_free) (struct snd_pcm *pcm);
bool internal; /* pcm is for internal use only */
bool nonatomic; /* whole PCM operations are in non-atomic context */
+ bool no_device_suspend; /* don't invoke device PM suspend */
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
struct snd_pcm_oss oss;
#endif
@@ -581,13 +571,8 @@ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status);
int snd_pcm_drain_done(struct snd_pcm_substream *substream);
int snd_pcm_stop_xrun(struct snd_pcm_substream *substream);
#ifdef CONFIG_PM
-int snd_pcm_suspend(struct snd_pcm_substream *substream);
int snd_pcm_suspend_all(struct snd_pcm *pcm);
#else
-static inline int snd_pcm_suspend(struct snd_pcm_substream *substream)
-{
- return 0;
-}
static inline int snd_pcm_suspend_all(struct snd_pcm *pcm)
{
return 0;
@@ -1200,12 +1185,12 @@ static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime,
* Memory
*/
-int snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream);
-int snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm);
-int snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
+void snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream);
+void snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm);
+void snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
int type, struct device *data,
size_t size, size_t max);
-int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
+void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int type, void *data,
size_t size, size_t max);
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 6d69ed2bd7b1..7afe45389972 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -75,7 +75,7 @@ void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai);
&dai_link->codec_dai_name, \
list_name, cells_name, NULL)
#define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, dai_link->platform, \
+ asoc_simple_card_parse_dai(node, dai_link->platforms, \
&dai_link->platform_of_node, \
NULL, list_name, cells_name, NULL)
int asoc_simple_card_parse_dai(struct device_node *node,
@@ -108,7 +108,7 @@ int asoc_simple_card_parse_graph_dai(struct device_node *ep,
int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
struct asoc_simple_dai *simple_dai);
-int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link);
+void asoc_simple_card_canonicalize_platform(struct snd_soc_dai_link *dai_link);
void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
int is_single_links);
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 266e64e3c24c..35b38e41e5b2 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -22,20 +22,37 @@ struct snd_soc_acpi_package_context {
#define SND_ACPI_I2C_ID_LEN (4 + ACPI_ID_LEN + 3 + 1)
#if IS_ENABLED(CONFIG_ACPI)
+/* acpi match */
+struct snd_soc_acpi_mach *
+snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
+
bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx);
+
+/* check all codecs */
+struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg);
+
#else
+/* acpi match */
+static inline struct snd_soc_acpi_mach *
+snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
+{
+ return NULL;
+}
+
static inline bool
snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx)
{
return false;
}
-#endif
-/* acpi match */
-struct snd_soc_acpi_mach *
-snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
+/* check all codecs */
+static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
+{
+ return NULL;
+}
+#endif
/**
* snd_soc_acpi_mach_params: interface for machine driver configuration
@@ -69,9 +86,6 @@ struct snd_soc_acpi_mach_params {
* is not constant since this field may be updated at run-time
* @sof_fw_filename: Sound Open Firmware file name, if enabled
* @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
- * @asoc_plat_name: ASoC platform name, used for binding machine drivers
- * if non NULL
- * @new_mach_data: machine driver private data fixup
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
@@ -85,8 +99,6 @@ struct snd_soc_acpi_mach {
struct snd_soc_acpi_mach_params mach_params;
const char *sof_fw_filename;
const char *sof_tplg_filename;
- const char *asoc_plat_name;
- struct platform_device * (*new_mach_data)(void *pdata);
};
#define SND_SOC_ACPI_MAX_CODECS 3
@@ -105,7 +117,4 @@ struct snd_soc_acpi_codecs {
u8 codecs[SND_SOC_ACPI_MAX_CODECS][ACPI_ID_LEN];
};
-/* check all codecs */
-struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg);
-
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index bd8163f151cb..c00a0b8ade08 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -214,21 +214,21 @@ struct device;
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
-#define SND_SOC_DAPM_AIF_IN(wname, stname, wslot, wreg, wshift, winvert) \
+#define SND_SOC_DAPM_AIF_IN(wname, stname, wchan, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
- SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
-#define SND_SOC_DAPM_AIF_IN_E(wname, stname, wslot, wreg, wshift, winvert, \
+ .channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
+#define SND_SOC_DAPM_AIF_IN_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
- SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
-#define SND_SOC_DAPM_AIF_OUT(wname, stname, wslot, wreg, wshift, winvert) \
+#define SND_SOC_DAPM_AIF_OUT(wname, stname, wchan, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
- SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
-#define SND_SOC_DAPM_AIF_OUT_E(wname, stname, wslot, wreg, wshift, winvert, \
+ .channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
+#define SND_SOC_DAPM_AIF_OUT_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
- SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
@@ -407,6 +407,10 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
+int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
@@ -519,6 +523,9 @@ enum snd_soc_dapm_type {
snd_soc_dapm_asrc, /* DSP/CODEC ASRC component */
snd_soc_dapm_encoder, /* FW/SW audio encoder component */
snd_soc_dapm_decoder, /* FW/SW audio decoder component */
+
+ /* Don't edit below this line */
+ SND_SOC_DAPM_TYPE_COUNT
};
enum snd_soc_dapm_subclass {
@@ -540,6 +547,8 @@ struct snd_soc_dapm_route {
/* Note: currently only supported for links where source is a supply */
int (*connected)(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
+
+ struct snd_soc_dobj dobj;
};
/* dapm audio path between two widgets */
@@ -625,6 +634,8 @@ struct snd_soc_dapm_widget {
int endpoints[2];
struct clk *clk;
+
+ int channel;
};
struct snd_soc_dapm_update {
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index fa4b8413d2e2..5223896de26f 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -38,12 +38,14 @@ struct snd_soc_dapm_route;
enum snd_soc_dobj_type {
SND_SOC_DOBJ_NONE = 0, /* object is not dynamic */
SND_SOC_DOBJ_MIXER,
- SND_SOC_DOBJ_ENUM,
SND_SOC_DOBJ_BYTES,
- SND_SOC_DOBJ_PCM,
+ SND_SOC_DOBJ_ENUM,
+ SND_SOC_DOBJ_GRAPH,
+ SND_SOC_DOBJ_WIDGET,
SND_SOC_DOBJ_DAI_LINK,
+ SND_SOC_DOBJ_PCM,
SND_SOC_DOBJ_CODEC_LINK,
- SND_SOC_DOBJ_WIDGET,
+ SND_SOC_DOBJ_BACKEND_LINK,
};
/* dynamic control object */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 8ec1de856ee7..eb7db605955b 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -802,6 +802,9 @@ struct snd_soc_component_driver {
int probe_order;
int remove_order;
+ /* signal if the module handling the component cannot be removed */
+ unsigned int ignore_module_refcount:1;
+
/* bits */
unsigned int idle_bias_on:1;
unsigned int suspend_bias_off:1;
@@ -891,6 +894,18 @@ struct snd_soc_dai_link {
/* config - must be set by machine driver */
const char *name; /* Codec name */
const char *stream_name; /* Stream name */
+
+ /*
+ * cpu_name
+ * cpu_of_node
+ * cpu_dai_name
+ *
+ * These are legacy style, and will be replaced to
+ * modern style (= snd_soc_dai_link_component) in the future,
+ * but, not yet supported so far.
+ * If modern style was supported for CPU, all driver will switch
+ * to use it, and, legacy style code will be removed from ALSA SoC.
+ */
/*
* You MAY specify the link's CPU-side device, either by device name,
* or by DT/OF node, but not both. If this information is omitted,
@@ -906,6 +921,19 @@ struct snd_soc_dai_link {
* only, which only works well when that device exposes a single DAI.
*/
const char *cpu_dai_name;
+
+ /*
+ * codec_name
+ * codec_of_node
+ * codec_dai_name
+ *
+ * These are legacy style, it will be converted to modern style
+ * (= snd_soc_dai_link_component) automatically in soc-core
+ * if driver is using legacy style.
+ * Driver shouldn't use both legacy and modern style in the same time.
+ * If modern style was supported for CPU, all driver will switch
+ * to use it, and, legacy style code will be removed from ALSA SoC.
+ */
/*
* You MUST specify the link's codec, either by device name, or by
* DT/OF node, but not both.
@@ -919,13 +947,25 @@ struct snd_soc_dai_link {
unsigned int num_codecs;
/*
+ * platform_name
+ * platform_of_node
+ *
+ * These are legacy style, it will be converted to modern style
+ * (= snd_soc_dai_link_component) automatically in soc-core
+ * if driver is using legacy style.
+ * Driver shouldn't use both legacy and modern style in the same time.
+ * If modern style was supported for CPU, all driver will switch
+ * to use it, and, legacy style code will be removed from ALSA SoC.
+ */
+ /*
* You MAY specify the link's platform/PCM/DMA driver, either by
* device name, or by DT/OF node, but not both. Some forms of link
* do not need a platform.
*/
const char *platform_name;
struct device_node *platform_of_node;
- struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link_component *platforms;
+ unsigned int num_platforms;
int id; /* optional ID for machine driver link identification */
@@ -985,6 +1025,12 @@ struct snd_soc_dai_link {
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int ignore:1;
+ /*
+ * This driver uses legacy platform naming. Set by the core, machine
+ * drivers should not modify this value.
+ */
+ unsigned int legacy_platform:1;
+
struct list_head list; /* DAI link list of the soc card */
struct snd_soc_dobj dobj; /* For topology */
};
@@ -1537,6 +1583,37 @@ struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
return NULL;
}
+static inline
+int snd_soc_fixup_dai_links_platform_name(struct snd_soc_card *card,
+ const char *platform_name)
+{
+ struct snd_soc_dai_link *dai_link;
+ const char *name;
+ int i;
+
+ if (!platform_name) /* nothing to do */
+ return 0;
+
+ /* set platform name for each dailink */
+ for_each_card_prelinks(card, i, dai_link) {
+ name = devm_kstrdup(card->dev, platform_name, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ if (dai_link->platforms)
+ /* only single platform is supported for now */
+ dai_link->platforms->name = name;
+ else
+ /*
+ * legacy mode, this case will be removed when all
+ * derivers are switched to modern style dai_link.
+ */
+ dai_link->platform_name = name;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
extern struct dentry *snd_soc_debugfs_root;
#endif
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 33d291888ba9..e3f005eae1f7 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -25,6 +25,7 @@
enum afs_call_trace {
afs_call_trace_alloc,
afs_call_trace_free,
+ afs_call_trace_get,
afs_call_trace_put,
afs_call_trace_wake,
afs_call_trace_work,
@@ -159,6 +160,7 @@ enum afs_file_error {
#define afs_call_traces \
EM(afs_call_trace_alloc, "ALLOC") \
EM(afs_call_trace_free, "FREE ") \
+ EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
EM(afs_call_trace_wake, "WAKE ") \
E_(afs_call_trace_work, "WORK ")
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 2887503e4d12..ab1cc33adbac 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1051,6 +1051,7 @@ TRACE_EVENT(btrfs_trigger_flush,
{ FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \
{ FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \
{ ALLOC_CHUNK, "ALLOC_CHUNK"}, \
+ { ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE"}, \
{ COMMIT_TRANS, "COMMIT_TRANS"})
TRACE_EVENT(btrfs_flush_space,
@@ -1512,35 +1513,6 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
TP_ARGS(inode, start, len, reserved, op)
);
-DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
-
- TP_PROTO(const struct btrfs_fs_info *fs_info,
- u64 ref_root, u64 reserved),
-
- TP_ARGS(fs_info, ref_root, reserved),
-
- TP_STRUCT__entry_btrfs(
- __field( u64, ref_root )
- __field( u64, reserved )
- ),
-
- TP_fast_assign_btrfs(fs_info,
- __entry->ref_root = ref_root;
- __entry->reserved = reserved;
- ),
-
- TP_printk_btrfs("root=%llu reserved=%llu op=free",
- __entry->ref_root, __entry->reserved)
-);
-
-DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
-
- TP_PROTO(const struct btrfs_fs_info *fs_info,
- u64 ref_root, u64 reserved),
-
- TP_ARGS(fs_info, ref_root, reserved)
-);
-
DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct btrfs_qgroup_extent_record *rec),
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 44acfbca1266..6f60a78d9a7e 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -46,6 +46,131 @@ TRACE_EVENT(devlink_hwmsg,
(int) __entry->len, __get_dynamic_array(buf), __entry->len)
);
+/*
+ * Tracepoint for devlink hardware error:
+ */
+TRACE_EVENT(devlink_hwerr,
+ TP_PROTO(const struct devlink *devlink, int err, const char *msg),
+
+ TP_ARGS(devlink, err, msg),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink->dev->bus->name)
+ __string(dev_name, dev_name(devlink->dev))
+ __string(driver_name, devlink->dev->driver->name)
+ __field(int, err)
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(bus_name, devlink->dev->bus->name);
+ __assign_str(dev_name, dev_name(devlink->dev));
+ __assign_str(driver_name, devlink->dev->driver->name);
+ __entry->err = err;
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s err=%d %s",
+ __get_str(bus_name), __get_str(dev_name),
+ __get_str(driver_name), __entry->err, __get_str(msg))
+);
+
+/*
+ * Tracepoint for devlink health message:
+ */
+TRACE_EVENT(devlink_health_report,
+ TP_PROTO(const struct devlink *devlink, const char *reporter_name,
+ const char *msg),
+
+ TP_ARGS(devlink, reporter_name, msg),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink->dev->bus->name)
+ __string(dev_name, dev_name(devlink->dev))
+ __string(driver_name, devlink->dev->driver->name)
+ __string(reporter_name, msg)
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(bus_name, devlink->dev->bus->name);
+ __assign_str(dev_name, dev_name(devlink->dev));
+ __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(reporter_name, reporter_name);
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s reporter_name=%s: %s",
+ __get_str(bus_name), __get_str(dev_name),
+ __get_str(driver_name), __get_str(reporter_name),
+ __get_str(msg))
+);
+
+/*
+ * Tracepoint for devlink health recover aborted message:
+ */
+TRACE_EVENT(devlink_health_recover_aborted,
+ TP_PROTO(const struct devlink *devlink, const char *reporter_name,
+ bool health_state, u64 time_since_last_recover),
+
+ TP_ARGS(devlink, reporter_name, health_state, time_since_last_recover),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink->dev->bus->name)
+ __string(dev_name, dev_name(devlink->dev))
+ __string(driver_name, devlink->dev->driver->name)
+ __string(reporter_name, reporter_name)
+ __field(bool, health_state)
+ __field(u64, time_since_last_recover)
+ ),
+
+ TP_fast_assign(
+ __assign_str(bus_name, devlink->dev->bus->name);
+ __assign_str(dev_name, dev_name(devlink->dev));
+ __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(reporter_name, reporter_name);
+ __entry->health_state = health_state;
+ __entry->time_since_last_recover = time_since_last_recover;
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s reporter_name=%s: health_state=%d time_since_last_recover=%llu recover aborted",
+ __get_str(bus_name), __get_str(dev_name),
+ __get_str(driver_name), __get_str(reporter_name),
+ __entry->health_state,
+ __entry->time_since_last_recover)
+);
+
+/*
+ * Tracepoint for devlink health reporter state update:
+ */
+TRACE_EVENT(devlink_health_reporter_state_update,
+ TP_PROTO(const struct devlink *devlink, const char *reporter_name,
+ bool new_state),
+
+ TP_ARGS(devlink, reporter_name, new_state),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink->dev->bus->name)
+ __string(dev_name, dev_name(devlink->dev))
+ __string(driver_name, devlink->dev->driver->name)
+ __string(reporter_name, reporter_name)
+ __field(u8, new_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(bus_name, devlink->dev->bus->name);
+ __assign_str(dev_name, dev_name(devlink->dev));
+ __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(reporter_name, reporter_name);
+ __entry->new_state = new_state;
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s reporter_name=%s: new_state=%d",
+ __get_str(bus_name), __get_str(dev_name),
+ __get_str(driver_name), __get_str(reporter_name),
+ __entry->new_state)
+);
+
#endif /* _TRACE_DEVLINK_H */
/* This part must be outside protection */
@@ -64,6 +189,10 @@ static inline void trace_devlink_hwmsg(const struct devlink *devlink,
{
}
+static inline void trace_devlink_hwerr(const struct devlink *devlink,
+ int err, const char *msg)
+{
+}
#endif /* _TRACE_DEVLINK_H */
#endif
diff --git a/include/trace/events/mlxsw.h b/include/trace/events/mlxsw.h
new file mode 100644
index 000000000000..6a4cfaef33a2
--- /dev/null
+++ b/include/trace/events/mlxsw.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlxsw
+
+#if !defined(_MLXSW_TRACEPOINT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLXSW_TRACEPOINT_H
+
+#include <linux/tracepoint.h>
+
+struct mlxsw_sp;
+struct mlxsw_sp_acl_atcam_region;
+struct mlxsw_sp_acl_tcam_vregion;
+
+TRACE_EVENT(mlxsw_sp_acl_atcam_entry_add_ctcam_spill,
+ TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_acl_atcam_region *aregion),
+
+ TP_ARGS(mlxsw_sp, aregion),
+
+ TP_STRUCT__entry(
+ __field(const void *, mlxsw_sp)
+ __field(const void *, aregion)
+ ),
+
+ TP_fast_assign(
+ __entry->mlxsw_sp = mlxsw_sp;
+ __entry->aregion = aregion;
+ ),
+
+ TP_printk("mlxsw_sp %p, aregion %p",
+ __entry->mlxsw_sp, __entry->aregion)
+);
+
+TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_rehash,
+ TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_acl_tcam_vregion *vregion),
+
+ TP_ARGS(mlxsw_sp, vregion),
+
+ TP_STRUCT__entry(
+ __field(const void *, mlxsw_sp)
+ __field(const void *, vregion)
+ ),
+
+ TP_fast_assign(
+ __entry->mlxsw_sp = mlxsw_sp;
+ __entry->vregion = vregion;
+ ),
+
+ TP_printk("mlxsw_sp %p, vregion %p",
+ __entry->mlxsw_sp, __entry->vregion)
+);
+
+TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_migrate,
+ TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_acl_tcam_vregion *vregion),
+
+ TP_ARGS(mlxsw_sp, vregion),
+
+ TP_STRUCT__entry(
+ __field(const void *, mlxsw_sp)
+ __field(const void *, vregion)
+ ),
+
+ TP_fast_assign(
+ __entry->mlxsw_sp = mlxsw_sp;
+ __entry->vregion = vregion;
+ ),
+
+ TP_printk("mlxsw_sp %p, vregion %p",
+ __entry->mlxsw_sp, __entry->vregion)
+);
+
+TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_migrate_end,
+ TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_acl_tcam_vregion *vregion),
+
+ TP_ARGS(mlxsw_sp, vregion),
+
+ TP_STRUCT__entry(
+ __field(const void *, mlxsw_sp)
+ __field(const void *, vregion)
+ ),
+
+ TP_fast_assign(
+ __entry->mlxsw_sp = mlxsw_sp;
+ __entry->vregion = vregion;
+ ),
+
+ TP_printk("mlxsw_sp %p, vregion %p",
+ __entry->mlxsw_sp, __entry->vregion)
+);
+
+TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_rehash_dis,
+ TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_acl_tcam_vregion *vregion),
+
+ TP_ARGS(mlxsw_sp, vregion),
+
+ TP_STRUCT__entry(
+ __field(const void *, mlxsw_sp)
+ __field(const void *, vregion)
+ ),
+
+ TP_fast_assign(
+ __entry->mlxsw_sp = mlxsw_sp;
+ __entry->vregion = vregion;
+ ),
+
+ TP_printk("mlxsw_sp %p, vregion %p",
+ __entry->mlxsw_sp, __entry->vregion)
+);
+
+#endif /* _MLXSW_TRACEPOINT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
new file mode 100644
index 000000000000..0bdb08557763
--- /dev/null
+++ b/include/trace/events/neigh.h
@@ -0,0 +1,206 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM neigh
+
+#if !defined(_TRACE_NEIGH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NEIGH_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/tracepoint.h>
+#include <net/neighbour.h>
+
+#define neigh_state_str(state) \
+ __print_symbolic(state, \
+ { NUD_INCOMPLETE, "incomplete" }, \
+ { NUD_REACHABLE, "reachable" }, \
+ { NUD_STALE, "stale" }, \
+ { NUD_DELAY, "delay" }, \
+ { NUD_PROBE, "probe" }, \
+ { NUD_FAILED, "failed" }, \
+ { NUD_NOARP, "noarp" }, \
+ { NUD_PERMANENT, "permanent"})
+
+TRACE_EVENT(neigh_update,
+
+ TP_PROTO(struct neighbour *n, const u8 *lladdr, u8 new,
+ u32 flags, u32 nlmsg_pid),
+
+ TP_ARGS(n, lladdr, new, flags, nlmsg_pid),
+
+ TP_STRUCT__entry(
+ __field(u32, family)
+ __string(dev, (n->dev ? n->dev->name : "NULL"))
+ __array(u8, lladdr, MAX_ADDR_LEN)
+ __field(u8, lladdr_len)
+ __field(u8, flags)
+ __field(u8, nud_state)
+ __field(u8, type)
+ __field(u8, dead)
+ __field(int, refcnt)
+ __array(__u8, primary_key4, 4)
+ __array(__u8, primary_key6, 16)
+ __field(unsigned long, confirmed)
+ __field(unsigned long, updated)
+ __field(unsigned long, used)
+ __array(u8, new_lladdr, MAX_ADDR_LEN)
+ __field(u8, new_state)
+ __field(u32, update_flags)
+ __field(u32, pid)
+ ),
+
+ TP_fast_assign(
+ int lladdr_len = (n->dev ? n->dev->addr_len : MAX_ADDR_LEN);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->family = n->tbl->family;
+ __assign_str(dev, (n->dev ? n->dev->name : "NULL"));
+ __entry->lladdr_len = lladdr_len;
+ memcpy(__entry->lladdr, n->ha, lladdr_len);
+ __entry->flags = n->flags;
+ __entry->nud_state = n->nud_state;
+ __entry->type = n->type;
+ __entry->dead = n->dead;
+ __entry->refcnt = refcount_read(&n->refcnt);
+ pin6 = (struct in6_addr *)__entry->primary_key6;
+ p32 = (__be32 *)__entry->primary_key4;
+
+ if (n->tbl->family == AF_INET)
+ *p32 = *(__be32 *)n->primary_key;
+ else
+ *p32 = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (n->tbl->family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->primary_key6;
+ *pin6 = *(struct in6_addr *)n->primary_key;
+ } else
+#endif
+ {
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ }
+ __entry->confirmed = n->confirmed;
+ __entry->updated = n->updated;
+ __entry->used = n->used;
+ if (lladdr)
+ memcpy(__entry->new_lladdr, lladdr, lladdr_len);
+ __entry->new_state = new;
+ __entry->update_flags = flags;
+ __entry->pid = nlmsg_pid;
+ ),
+
+ TP_printk("family %d dev %s lladdr %s flags %02x nud_state %s type %02x "
+ "dead %d refcnt %d primary_key4 %pI4 primary_key6 %pI6c "
+ "confirmed %lu updated %lu used %lu new_lladdr %s "
+ "new_state %s update_flags %02x pid %d",
+ __entry->family, __get_str(dev),
+ __print_hex_str(__entry->lladdr, __entry->lladdr_len),
+ __entry->flags, neigh_state_str(__entry->nud_state),
+ __entry->type, __entry->dead, __entry->refcnt,
+ __entry->primary_key4, __entry->primary_key6,
+ __entry->confirmed, __entry->updated, __entry->used,
+ __print_hex_str(__entry->new_lladdr, __entry->lladdr_len),
+ neigh_state_str(__entry->new_state),
+ __entry->update_flags, __entry->pid)
+);
+
+DECLARE_EVENT_CLASS(neigh__update,
+ TP_PROTO(struct neighbour *n, int err),
+ TP_ARGS(n, err),
+ TP_STRUCT__entry(
+ __field(u32, family)
+ __string(dev, (n->dev ? n->dev->name : "NULL"))
+ __array(u8, lladdr, MAX_ADDR_LEN)
+ __field(u8, lladdr_len)
+ __field(u8, flags)
+ __field(u8, nud_state)
+ __field(u8, type)
+ __field(u8, dead)
+ __field(int, refcnt)
+ __array(__u8, primary_key4, 4)
+ __array(__u8, primary_key6, 16)
+ __field(unsigned long, confirmed)
+ __field(unsigned long, updated)
+ __field(unsigned long, used)
+ __field(u32, err)
+ ),
+
+ TP_fast_assign(
+ int lladdr_len = (n->dev ? n->dev->addr_len : MAX_ADDR_LEN);
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __entry->family = n->tbl->family;
+ __assign_str(dev, (n->dev ? n->dev->name : "NULL"));
+ __entry->lladdr_len = lladdr_len;
+ memcpy(__entry->lladdr, n->ha, lladdr_len);
+ __entry->flags = n->flags;
+ __entry->nud_state = n->nud_state;
+ __entry->type = n->type;
+ __entry->dead = n->dead;
+ __entry->refcnt = refcount_read(&n->refcnt);
+ pin6 = (struct in6_addr *)__entry->primary_key6;
+ p32 = (__be32 *)__entry->primary_key4;
+
+ if (n->tbl->family == AF_INET)
+ *p32 = *(__be32 *)n->primary_key;
+ else
+ *p32 = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (n->tbl->family == AF_INET6) {
+ pin6 = (struct in6_addr *)__entry->primary_key6;
+ *pin6 = *(struct in6_addr *)n->primary_key;
+ } else
+#endif
+ {
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ }
+
+ __entry->confirmed = n->confirmed;
+ __entry->updated = n->updated;
+ __entry->used = n->used;
+ __entry->err = err;
+ ),
+
+ TP_printk("family %d dev %s lladdr %s flags %02x nud_state %s type %02x "
+ "dead %d refcnt %d primary_key4 %pI4 primary_key6 %pI6c "
+ "confirmed %lu updated %lu used %lu err %d",
+ __entry->family, __get_str(dev),
+ __print_hex_str(__entry->lladdr, __entry->lladdr_len),
+ __entry->flags, neigh_state_str(__entry->nud_state),
+ __entry->type, __entry->dead, __entry->refcnt,
+ __entry->primary_key4, __entry->primary_key6,
+ __entry->confirmed, __entry->updated, __entry->used,
+ __entry->err)
+);
+
+DEFINE_EVENT(neigh__update, neigh_update_done,
+ TP_PROTO(struct neighbour *neigh, int err),
+ TP_ARGS(neigh, err)
+);
+
+DEFINE_EVENT(neigh__update, neigh_timer_handler,
+ TP_PROTO(struct neighbour *neigh, int err),
+ TP_ARGS(neigh, err)
+);
+
+DEFINE_EVENT(neigh__update, neigh_event_send_done,
+ TP_PROTO(struct neighbour *neigh, int err),
+ TP_ARGS(neigh, err)
+);
+
+DEFINE_EVENT(neigh__update, neigh_event_send_dead,
+ TP_PROTO(struct neighbour *neigh, int err),
+ TP_ARGS(neigh, err)
+);
+
+DEFINE_EVENT(neigh__update, neigh_cleanup_and_release,
+ TP_PROTO(struct neighbour *neigh, int rc),
+ TP_ARGS(neigh, rc)
+);
+
+#endif /* _TRACE_NEIGH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 277bb9d25779..aef6869f563d 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -109,6 +109,16 @@ TRACE_EVENT(spi_message_done,
(unsigned)__entry->actual, (unsigned)__entry->frame)
);
+/*
+ * consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
+ * that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or
+ * SPI_CONTROLLER_MUST_RX.
+ */
+#define spi_valid_txbuf(msg, xfer) \
+ (xfer->tx_buf && xfer->tx_buf != msg->spi->controller->dummy_tx)
+#define spi_valid_rxbuf(msg, xfer) \
+ (xfer->rx_buf && xfer->rx_buf != msg->spi->controller->dummy_rx)
+
DECLARE_EVENT_CLASS(spi_transfer,
TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
@@ -120,6 +130,10 @@ DECLARE_EVENT_CLASS(spi_transfer,
__field( int, chip_select )
__field( struct spi_transfer *, xfer )
__field( int, len )
+ __dynamic_array(u8, rx_buf,
+ spi_valid_rxbuf(msg, xfer) ? xfer->len : 0)
+ __dynamic_array(u8, tx_buf,
+ spi_valid_txbuf(msg, xfer) ? xfer->len : 0)
),
TP_fast_assign(
@@ -127,12 +141,21 @@ DECLARE_EVENT_CLASS(spi_transfer,
__entry->chip_select = msg->spi->chip_select;
__entry->xfer = xfer;
__entry->len = xfer->len;
+
+ if (spi_valid_txbuf(msg, xfer))
+ memcpy(__get_dynamic_array(tx_buf),
+ xfer->tx_buf, xfer->len);
+
+ if (spi_valid_rxbuf(msg, xfer))
+ memcpy(__get_dynamic_array(rx_buf),
+ xfer->rx_buf, xfer->len);
),
- TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num,
- (int)__entry->chip_select,
- (struct spi_message *)__entry->xfer,
- (int)__entry->len)
+ TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]",
+ __entry->bus_num, __entry->chip_select,
+ __entry->xfer, __entry->len,
+ __get_dynamic_array_len(tx_buf), __get_dynamic_array(tx_buf),
+ __get_dynamic_array_len(rx_buf), __get_dynamic_array(rx_buf))
);
DEFINE_EVENT(spi_transfer, spi_transfer_start,
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index e7ee32861d51..abd238d0f7a4 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -15,9 +15,7 @@
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index a12692e5f7a8..c8b430cb6dc4 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -3,6 +3,7 @@
#define __ASM_GENERIC_SOCKET_H
#include <asm/sockios.h>
+#include <asm/bitsperlong.h>
/* For setsockopt(2) */
#define SOL_SOCKET 1
@@ -29,8 +30,8 @@
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
+#define SO_RCVTIMEO_OLD 20
+#define SO_SNDTIMEO_OLD 21
#endif
/* Security levels - as per NRL IPv6 - don't actually do anything */
@@ -46,21 +47,14 @@
#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_ACCEPTCONN 30
#define SO_PEERSEC 31
#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
#define SO_MARK 36
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
@@ -110,4 +104,42 @@
#define SO_TXTIME 61
#define SCM_TXTIME SO_TXTIME
+#define SO_BINDTOIFINDEX 62
+
+#define SO_TIMESTAMP_OLD 29
+#define SO_TIMESTAMPNS_OLD 35
+#define SO_TIMESTAMPING_OLD 37
+
+#define SO_TIMESTAMP_NEW 63
+#define SO_TIMESTAMPNS_NEW 64
+#define SO_TIMESTAMPING_NEW 65
+
+#define SO_RCVTIMEO_NEW 66
+#define SO_SNDTIMEO_NEW 67
+
+#if !defined(__KERNEL__)
+
+#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+/* on 64-bit and x32, avoid the ?: operator */
+#define SO_TIMESTAMP SO_TIMESTAMP_OLD
+#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD
+#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD
+
+#define SO_RCVTIMEO SO_RCVTIMEO_OLD
+#define SO_SNDTIMEO SO_SNDTIMEO_OLD
+#else
+#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW)
+#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW)
+#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW)
+
+#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW)
+#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW)
+#endif
+
+#define SCM_TIMESTAMP SO_TIMESTAMP
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#endif
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index d90127298f12..12cdf611d217 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -38,8 +38,10 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
-__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
+#endif
/* fs/xattr.c */
#define __NR_setxattr 5
@@ -179,7 +181,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
#define __NR_fchown 55
__SYSCALL(__NR_fchown, sys_fchown)
#define __NR_openat 56
-__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+__SYSCALL(__NR_openat, sys_openat)
#define __NR_close 57
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
@@ -222,10 +224,12 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
/* fs/select.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
-__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
#define __NR_ppoll 73
-__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
+#endif
/* fs/signalfd.c */
#define __NR_signalfd4 74
@@ -269,16 +273,20 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
-__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
- compat_sys_timerfd_settime)
+__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
+ sys_timerfd_settime)
#define __NR_timerfd_gettime 87
-__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
- compat_sys_timerfd_gettime)
+__SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
+ sys_timerfd_gettime)
+#endif
/* fs/utimes.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
-__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
+#endif
/* kernel/acct.c */
#define __NR_acct 89
@@ -309,8 +317,10 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
__SYSCALL(__NR_unshare, sys_unshare)
/* kernel/futex.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
-__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
+#endif
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@@ -319,8 +329,10 @@ __SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
/* kernel/hrtimer.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
-__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
+#endif
/* kernel/itimer.c */
#define __NR_getitimer 102
@@ -341,23 +353,29 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
-__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
+#endif
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
-__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
+#endif
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
-__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
#define __NR_clock_gettime 113
-__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+__SC_3264(__NR_clock_gettime, sys_clock_gettime32, sys_clock_gettime)
#define __NR_clock_getres 114
-__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+__SC_3264(__NR_clock_getres, sys_clock_getres_time32, sys_clock_getres)
#define __NR_clock_nanosleep 115
-__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
- compat_sys_clock_nanosleep)
+__SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
+ sys_clock_nanosleep)
+#endif
/* kernel/printk.c */
#define __NR_syslog 116
@@ -388,9 +406,11 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
-__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
- compat_sys_sched_rr_get_interval)
+__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
+ sys_sched_rr_get_interval)
+#endif
/* kernel/signal.c */
#define __NR_restart_syscall 128
@@ -411,9 +431,11 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
-__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
- compat_sys_rt_sigtimedwait)
+__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
+ sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
+#endif
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
@@ -467,10 +489,15 @@ __SYSCALL(__NR_uname, sys_newuname)
__SYSCALL(__NR_sethostname, sys_sethostname)
#define __NR_setdomainname 162
__SYSCALL(__NR_setdomainname, sys_setdomainname)
+
+#ifdef __ARCH_WANT_SET_GET_RLIMIT
+/* getrlimit and setrlimit are superseded with prlimit64 */
#define __NR_getrlimit 163
__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
#define __NR_setrlimit 164
__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#endif
+
#define __NR_getrusage 165
__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
#define __NR_umask 166
@@ -481,12 +508,14 @@ __SYSCALL(__NR_prctl, sys_prctl)
__SYSCALL(__NR_getcpu, sys_getcpu)
/* kernel/time.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
#define __NR_settimeofday 170
__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
#define __NR_adjtimex 171
-__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
+#endif
/* kernel/timer.c */
#define __NR_getpid 172
@@ -511,11 +540,13 @@ __SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
-__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
#define __NR_mq_timedreceive 183
-__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
- compat_sys_mq_timedreceive)
+__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
+ sys_mq_timedreceive)
+#endif
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
@@ -536,8 +567,10 @@ __SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+#endif
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
@@ -658,8 +691,10 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
-__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
+#endif
/*
* Architectures may provide up to 16 syscalls of their own
@@ -667,8 +702,10 @@ __SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
*/
#define __NR_arch_specific_syscall 244
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#endif
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@@ -678,10 +715,11 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#define __NR_name_to_handle_at 264
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
-__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
- compat_sys_open_by_handle_at)
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
-__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
+#endif
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@@ -734,15 +772,60 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
-__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
+#endif
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+#if __BITS_PER_LONG == 32
+#define __NR_clock_gettime64 403
+__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
+#define __NR_clock_settime64 404
+__SYSCALL(__NR_clock_settime64, sys_clock_settime)
+#define __NR_clock_adjtime64 405
+__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
+#define __NR_clock_getres_time64 406
+__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
+#define __NR_clock_nanosleep_time64 407
+__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
+#define __NR_timer_gettime64 408
+__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
+#define __NR_timer_settime64 409
+__SYSCALL(__NR_timer_settime64, sys_timer_settime)
+#define __NR_timerfd_gettime64 410
+__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
+#define __NR_timerfd_settime64 411
+__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
+#define __NR_utimensat_time64 412
+__SYSCALL(__NR_utimensat_time64, sys_utimensat)
+#define __NR_pselect6_time64 413
+__SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+#define __NR_ppoll_time64 414
+__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+#define __NR_io_pgetevents_time64 416
+__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+#define __NR_recvmmsg_time64 417
+__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+#define __NR_mq_timedsend_time64 418
+__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
+#define __NR_mq_timedreceive_time64 419
+__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
+#define __NR_semtimedop_time64 420
+__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
+#define __NR_rt_sigtimedwait_time64 421
+__SC_COMP(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time64)
+#define __NR_futex_time64 422
+__SYSCALL(__NR_futex_time64, sys_futex)
+#define __NR_sched_rr_get_interval_time64 423
+__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#endif
#undef __NR_syscalls
-#define __NR_syscalls 295
+#define __NR_syscalls 424
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index b9ba520f7e4b..2832134e5397 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -41,6 +41,14 @@ enum {
enum {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+
+ /**
+ * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
+ *
+ * Only when set, causes senders to include their security
+ * context
+ */
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
};
#ifdef BINDER_IPC_32BIT
@@ -218,6 +226,7 @@ struct binder_node_info_for_ref {
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
+#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
/*
* NOTE: Two special error codes you should check for when calling
@@ -276,6 +285,11 @@ struct binder_transaction_data {
} data;
};
+struct binder_transaction_data_secctx {
+ struct binder_transaction_data transaction_data;
+ binder_uintptr_t secctx;
+};
+
struct binder_transaction_data_sg {
struct binder_transaction_data transaction_data;
binder_size_t buffers_size;
@@ -311,6 +325,11 @@ enum binder_driver_return_protocol {
BR_OK = _IO('r', 1),
/* No parameters! */
+ BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
+ struct binder_transaction_data_secctx),
+ /*
+ * binder_transaction_data_secctx: the received command.
+ */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binderfs.h
index 65b2efd1a0a5..87410477aea9 100644
--- a/include/uapi/linux/android/binder_ctl.h
+++ b/include/uapi/linux/android/binderfs.h
@@ -4,8 +4,8 @@
*
*/
-#ifndef _UAPI_LINUX_BINDER_CTL_H
-#define _UAPI_LINUX_BINDER_CTL_H
+#ifndef _UAPI_LINUX_BINDERFS_H
+#define _UAPI_LINUX_BINDERFS_H
#include <linux/android/binder.h>
#include <linux/types.h>
@@ -22,8 +22,8 @@
*/
struct binderfs_device {
char name[BINDERFS_MAX_NAME + 1];
- __u8 major;
- __u8 minor;
+ __u32 major;
+ __u32 minor;
};
/**
@@ -31,5 +31,5 @@ struct binderfs_device {
*/
#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
-#endif /* _UAPI_LINUX_BINDER_CTL_H */
+#endif /* _UAPI_LINUX_BINDERFS_H */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 36a7e3f18e69..f28acd952d03 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -400,6 +400,8 @@ enum {
/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_RISCV32 (EM_RISCV|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_RISCV64 (EM_RISCV|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_S390 (EM_S390)
#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
#define AUDIT_ARCH_SH (EM_SH)
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 894d8d2f713d..c99336f4eefe 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -229,7 +229,7 @@ struct batadv_ogm_packet {
* @packet_type: batman-adv packet type, part of the general header
* @version: batman-adv protocol version, part of the general header
* @ttl: time to live for this packet, part of the general header
- * @flags: reseved for routing relevant flags - currently always 0
+ * @flags: reserved for routing relevant flags - currently always 0
* @seqno: sequence number
* @orig: originator mac address
* @tvlv_len: length of the appended tvlv buffer (in bytes)
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 324a0e1143e7..305bf316dd03 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: MIT */
-/* Copyright (C) 2016-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2019 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
@@ -27,6 +27,7 @@
#define BATADV_NL_NAME "batadv"
+#define BATADV_NL_MCAST_GROUP_CONFIG "config"
#define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter"
/**
@@ -139,6 +140,20 @@ enum batadv_mcast_flags_priv {
};
/**
+ * enum batadv_gw_modes - gateway mode of node
+ */
+enum batadv_gw_modes {
+ /** @BATADV_GW_MODE_OFF: gw mode disabled */
+ BATADV_GW_MODE_OFF,
+
+ /** @BATADV_GW_MODE_CLIENT: send DHCP requests to gw servers */
+ BATADV_GW_MODE_CLIENT,
+
+ /** @BATADV_GW_MODE_SERVER: announce itself as gatway server */
+ BATADV_GW_MODE_SERVER,
+};
+
+/**
* enum batadv_nl_attrs - batman-adv netlink attributes
*/
enum batadv_nl_attrs {
@@ -344,6 +359,138 @@ enum batadv_nl_attrs {
*/
BATADV_ATTR_MCAST_FLAGS_PRIV,
+ /**
+ * @BATADV_ATTR_VLANID: VLAN id on top of soft interface
+ */
+ BATADV_ATTR_VLANID,
+
+ /**
+ * @BATADV_ATTR_AGGREGATED_OGMS_ENABLED: whether the batman protocol
+ * messages of the mesh interface shall be aggregated or not.
+ */
+ BATADV_ATTR_AGGREGATED_OGMS_ENABLED,
+
+ /**
+ * @BATADV_ATTR_AP_ISOLATION_ENABLED: whether the data traffic going
+ * from a wireless client to another wireless client will be silently
+ * dropped.
+ */
+ BATADV_ATTR_AP_ISOLATION_ENABLED,
+
+ /**
+ * @BATADV_ATTR_ISOLATION_MARK: the isolation mark which is used to
+ * classify clients as "isolated" by the Extended Isolation feature.
+ */
+ BATADV_ATTR_ISOLATION_MARK,
+
+ /**
+ * @BATADV_ATTR_ISOLATION_MASK: the isolation (bit)mask which is used to
+ * classify clients as "isolated" by the Extended Isolation feature.
+ */
+ BATADV_ATTR_ISOLATION_MASK,
+
+ /**
+ * @BATADV_ATTR_BONDING_ENABLED: whether the data traffic going through
+ * the mesh will be sent using multiple interfaces at the same time.
+ */
+ BATADV_ATTR_BONDING_ENABLED,
+
+ /**
+ * @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED: whether the bridge loop
+ * avoidance feature is enabled. This feature detects and avoids loops
+ * between the mesh and devices bridged with the soft interface
+ */
+ BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED,
+
+ /**
+ * @BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED: whether the distributed
+ * arp table feature is enabled. This feature uses a distributed hash
+ * table to answer ARP requests without flooding the request through
+ * the whole mesh.
+ */
+ BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED,
+
+ /**
+ * @BATADV_ATTR_FRAGMENTATION_ENABLED: whether the data traffic going
+ * through the mesh will be fragmented or silently discarded if the
+ * packet size exceeds the outgoing interface MTU.
+ */
+ BATADV_ATTR_FRAGMENTATION_ENABLED,
+
+ /**
+ * @BATADV_ATTR_GW_BANDWIDTH_DOWN: defines the download bandwidth which
+ * is propagated by this node if %BATADV_ATTR_GW_BANDWIDTH_MODE was set
+ * to 'server'.
+ */
+ BATADV_ATTR_GW_BANDWIDTH_DOWN,
+
+ /**
+ * @BATADV_ATTR_GW_BANDWIDTH_UP: defines the upload bandwidth which
+ * is propagated by this node if %BATADV_ATTR_GW_BANDWIDTH_MODE was set
+ * to 'server'.
+ */
+ BATADV_ATTR_GW_BANDWIDTH_UP,
+
+ /**
+ * @BATADV_ATTR_GW_MODE: defines the state of the gateway features.
+ * Possible values are specified in enum batadv_gw_modes
+ */
+ BATADV_ATTR_GW_MODE,
+
+ /**
+ * @BATADV_ATTR_GW_SEL_CLASS: defines the selection criteria this node
+ * will use to choose a gateway if gw_mode was set to 'client'.
+ */
+ BATADV_ATTR_GW_SEL_CLASS,
+
+ /**
+ * @BATADV_ATTR_HOP_PENALTY: defines the penalty which will be applied
+ * to an originator message's tq-field on every hop.
+ */
+ BATADV_ATTR_HOP_PENALTY,
+
+ /**
+ * @BATADV_ATTR_LOG_LEVEL: bitmask with to define which debug messages
+ * should be send to the debug log/trace ring buffer
+ */
+ BATADV_ATTR_LOG_LEVEL,
+
+ /**
+ * @BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED: whether multicast
+ * optimizations should be replaced by simple broadcast-like flooding
+ * of multicast packets. If set to non-zero then all nodes in the mesh
+ * are going to use classic flooding for any multicast packet with no
+ * optimizations.
+ */
+ BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED,
+
+ /**
+ * @BATADV_ATTR_NETWORK_CODING_ENABLED: whether Network Coding (using
+ * some magic to send fewer wifi packets but still the same content) is
+ * enabled or not.
+ */
+ BATADV_ATTR_NETWORK_CODING_ENABLED,
+
+ /**
+ * @BATADV_ATTR_ORIG_INTERVAL: defines the interval in milliseconds in
+ * which batman sends its protocol messages.
+ */
+ BATADV_ATTR_ORIG_INTERVAL,
+
+ /**
+ * @BATADV_ATTR_ELP_INTERVAL: defines the interval in milliseconds in
+ * which batman emits probing packets for neighbor sensing (ELP).
+ */
+ BATADV_ATTR_ELP_INTERVAL,
+
+ /**
+ * @BATADV_ATTR_THROUGHPUT_OVERRIDE: defines the throughput value to be
+ * used by B.A.T.M.A.N. V when estimating the link throughput using
+ * this interface. If the value is set to 0 then batman-adv will try to
+ * estimate the throughput by itself.
+ */
+ BATADV_ATTR_THROUGHPUT_OVERRIDE,
+
/* add attributes above here, update the policy in netlink.c */
/**
@@ -372,10 +519,14 @@ enum batadv_nl_commands {
BATADV_CMD_UNSPEC,
/**
- * @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv
- * device
+ * @BATADV_CMD_GET_MESH: Get attributes from softif/mesh
+ */
+ BATADV_CMD_GET_MESH,
+
+ /**
+ * @BATADV_CMD_GET_MESH_INFO: Alias for @BATADV_CMD_GET_MESH
*/
- BATADV_CMD_GET_MESH_INFO,
+ BATADV_CMD_GET_MESH_INFO = BATADV_CMD_GET_MESH,
/**
* @BATADV_CMD_TP_METER: Start a tp meter session
@@ -393,9 +544,15 @@ enum batadv_nl_commands {
BATADV_CMD_GET_ROUTING_ALGOS,
/**
- * @BATADV_CMD_GET_HARDIFS: Query list of hard interfaces
+ * @BATADV_CMD_GET_HARDIF: Get attributes from a hardif of the
+ * current softif
*/
- BATADV_CMD_GET_HARDIFS,
+ BATADV_CMD_GET_HARDIF,
+
+ /**
+ * @BATADV_CMD_GET_HARDIFS: Alias for @BATADV_CMD_GET_HARDIF
+ */
+ BATADV_CMD_GET_HARDIFS = BATADV_CMD_GET_HARDIF,
/**
* @BATADV_CMD_GET_TRANSTABLE_LOCAL: Query list of local translations
@@ -443,6 +600,29 @@ enum batadv_nl_commands {
*/
BATADV_CMD_GET_MCAST_FLAGS,
+ /**
+ * @BATADV_CMD_SET_MESH: Set attributes for softif/mesh
+ */
+ BATADV_CMD_SET_MESH,
+
+ /**
+ * @BATADV_CMD_SET_HARDIF: Set attributes for hardif of the
+ * current softif
+ */
+ BATADV_CMD_SET_HARDIF,
+
+ /**
+ * @BATADV_CMD_GET_VLAN: Get attributes from a VLAN of the
+ * current softif
+ */
+ BATADV_CMD_GET_VLAN,
+
+ /**
+ * @BATADV_CMD_SET_VLAN: Set attributes for VLAN of the
+ * current softif
+ */
+ BATADV_CMD_SET_VLAN,
+
/* add new commands above here */
/**
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 6fa38d001d84..498eec813494 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -138,6 +138,7 @@ struct blk_zone_range {
* @BLKRESETZONE: Reset the write pointer of the zones in the specified
* sector range. The sector range must be zone aligned.
* @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
+ * @BLKGETNRZONES: Get the total number of zones of the device.
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 91c43884f295..3c38ac9a92a7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -14,6 +14,7 @@
/* Extended instruction set based on top of classic BPF */
/* instruction classes */
+#define BPF_JMP32 0x06 /* jmp mode in word width */
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
@@ -266,6 +267,7 @@ enum bpf_attach_type {
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
+#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
/* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0)
@@ -2014,6 +2016,19 @@ union bpf_attr {
* Only works if *skb* contains an IPv6 packet. Insert a
* Segment Routing Header (**struct ipv6_sr_hdr**) inside
* the IPv6 header.
+ * **BPF_LWT_ENCAP_IP**
+ * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
+ * must be IPv4 or IPv6, followed by zero or more
+ * additional headers, up to LWT_BPF_MAX_HEADROOM total
+ * bytes in all prepended headers. Please note that
+ * if skb_is_gso(skb) is true, no more than two headers
+ * can be prepended, and the inner header, if present,
+ * should be either GRE or UDP/GUE.
+ *
+ * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
+ * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
+ * by bpf programs of types BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT.
*
* A call to this helper is susceptible to change the underlaying
* packet buffer. Therefore, at load time, all checks on pointers
@@ -2327,6 +2342,30 @@ union bpf_attr {
* "**y**".
* Return
* 0
+ *
+ * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_sock** pointer such
+ * that all the fields in bpf_sock can be accessed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or NULL in
+ * case of failure.
+ *
+ * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_tcp_sock** pointer from a
+ * **struct bpf_sock** pointer.
+ *
+ * Return
+ * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * case of failure.
+ *
+ * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
+ * Description
+ * Sets ECN of IP header to ce (congestion encountered) if
+ * current value is ect (ECN capable). Works with IPv6 and IPv4.
+ * Return
+ * 1 if set, 0 if not set.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2421,7 +2460,12 @@ union bpf_attr {
FN(map_peek_elem), \
FN(msg_push_data), \
FN(msg_pop_data), \
- FN(rc_pointer_rel),
+ FN(rc_pointer_rel), \
+ FN(spin_lock), \
+ FN(spin_unlock), \
+ FN(sk_fullsock), \
+ FN(tcp_sock), \
+ FN(skb_ecn_set_ce),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2494,7 +2538,8 @@ enum bpf_hdr_start_off {
/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6,
- BPF_LWT_ENCAP_SEG6_INLINE
+ BPF_LWT_ENCAP_SEG6_INLINE,
+ BPF_LWT_ENCAP_IP,
};
#define __bpf_md_ptr(type, name) \
@@ -2540,6 +2585,8 @@ struct __sk_buff {
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
__u64 tstamp;
__u32 wire_len;
+ __u32 gso_segs;
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
struct bpf_tunnel_key {
@@ -2581,7 +2628,15 @@ enum bpf_ret_code {
BPF_DROP = 2,
/* 3-6 reserved */
BPF_REDIRECT = 7,
- /* >127 are reserved for prog type specific return codes */
+ /* >127 are reserved for prog type specific return codes.
+ *
+ * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
+ * changed and should be routed based on its new L3 header.
+ * (This is an L3 redirect, as opposed to L2 redirect
+ * represented by BPF_REDIRECT above).
+ */
+ BPF_LWT_REROUTE = 128,
};
struct bpf_sock {
@@ -2591,14 +2646,52 @@ struct bpf_sock {
__u32 protocol;
__u32 mark;
__u32 priority;
- __u32 src_ip4; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ /* IP address also allows 1 and 2 bytes access */
+ __u32 src_ip4;
+ __u32 src_ip6[4];
+ __u32 src_port; /* host byte order */
+ __u32 dst_port; /* network byte order */
+ __u32 dst_ip4;
+ __u32 dst_ip6[4];
+ __u32 state;
+};
+
+struct bpf_tcp_sock {
+ __u32 snd_cwnd; /* Sending congestion window */
+ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ __u32 rtt_min;
+ __u32 snd_ssthresh; /* Slow start size threshold */
+ __u32 rcv_nxt; /* What we want to receive next */
+ __u32 snd_nxt; /* Next sequence we send */
+ __u32 snd_una; /* First byte we want an ack for */
+ __u32 mss_cache; /* Cached effective mss, not including SACKS */
+ __u32 ecn_flags; /* ECN status bits. */
+ __u32 rate_delivered; /* saved rate sample: packets delivered */
+ __u32 rate_interval_us; /* saved rate sample: time elapsed */
+ __u32 packets_out; /* Packets which are "in flight" */
+ __u32 retrans_out; /* Retransmitted packets out */
+ __u32 total_retrans; /* Total retransmits for entire connection */
+ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
*/
- __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
+ * total number of data segments in.
*/
- __u32 src_port; /* Allows 4-byte read.
- * Stored in host byte order
+ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ __u32 lost_out; /* Lost packets */
+ __u32 sacked_out; /* SACK'd packets */
+ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
+ */
+ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
*/
};
@@ -2728,6 +2821,8 @@ struct bpf_prog_info {
__u32 jited_line_info_rec_size;
__u32 nr_prog_tags;
__aligned_u64 prog_tags;
+ __u64 run_time_ns;
+ __u64 run_cnt;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -3054,4 +3149,7 @@ struct bpf_line_info {
__u32 line_col;
};
+struct bpf_spin_lock {
+ __u32 val;
+};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index e0763bc4158e..c195896d478f 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -837,6 +837,8 @@ enum btrfs_err_code {
struct btrfs_ioctl_vol_args)
#define BTRFS_IOC_SCAN_DEV _IOW(BTRFS_IOCTL_MAGIC, 4, \
struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_FORGET_DEV _IOW(BTRFS_IOCTL_MAGIC, 5, \
+ struct btrfs_ioctl_vol_args)
/* trans start and trans end are dangerous, and only for
* use by applications that know how to avoid the
* resulting deadlocks
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 6e52d3660654..5bb4ea67d84f 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -89,6 +89,22 @@ enum devlink_command {
DEVLINK_CMD_REGION_DEL,
DEVLINK_CMD_REGION_READ,
+ DEVLINK_CMD_PORT_PARAM_GET, /* can dump */
+ DEVLINK_CMD_PORT_PARAM_SET,
+ DEVLINK_CMD_PORT_PARAM_NEW,
+ DEVLINK_CMD_PORT_PARAM_DEL,
+
+ DEVLINK_CMD_INFO_GET, /* can dump */
+
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ DEVLINK_CMD_HEALTH_REPORTER_SET,
+ DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
+ DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
+ DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
+ DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
+
+ DEVLINK_CMD_FLASH_UPDATE,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -285,6 +301,37 @@ enum devlink_attr {
DEVLINK_ATTR_REGION_CHUNK_ADDR, /* u64 */
DEVLINK_ATTR_REGION_CHUNK_LEN, /* u64 */
+ DEVLINK_ATTR_INFO_DRIVER_NAME, /* string */
+ DEVLINK_ATTR_INFO_SERIAL_NUMBER, /* string */
+ DEVLINK_ATTR_INFO_VERSION_FIXED, /* nested */
+ DEVLINK_ATTR_INFO_VERSION_RUNNING, /* nested */
+ DEVLINK_ATTR_INFO_VERSION_STORED, /* nested */
+ DEVLINK_ATTR_INFO_VERSION_NAME, /* string */
+ DEVLINK_ATTR_INFO_VERSION_VALUE, /* string */
+
+ DEVLINK_ATTR_SB_POOL_CELL_SIZE, /* u32 */
+
+ DEVLINK_ATTR_FMSG, /* nested */
+ DEVLINK_ATTR_FMSG_OBJ_NEST_START, /* flag */
+ DEVLINK_ATTR_FMSG_PAIR_NEST_START, /* flag */
+ DEVLINK_ATTR_FMSG_ARR_NEST_START, /* flag */
+ DEVLINK_ATTR_FMSG_NEST_END, /* flag */
+ DEVLINK_ATTR_FMSG_OBJ_NAME, /* string */
+ DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE, /* u8 */
+ DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA, /* dynamic */
+
+ DEVLINK_ATTR_HEALTH_REPORTER, /* nested */
+ DEVLINK_ATTR_HEALTH_REPORTER_NAME, /* string */
+ DEVLINK_ATTR_HEALTH_REPORTER_STATE, /* u8 */
+ DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT, /* u64 */
+ DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT, /* u64 */
+ DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS, /* u64 */
+ DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD, /* u64 */
+ DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER, /* u8 */
+
+ DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME, /* string */
+ DEVLINK_ATTR_FLASH_UPDATE_COMPONENT, /* string */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index c0151200f7d1..28491dac074b 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -3,6 +3,7 @@
#define _UAPI_LINUX_ERRQUEUE_H
#include <linux/types.h>
+#include <linux/time_types.h>
struct sock_extended_err {
__u32 ee_errno;
@@ -41,6 +42,10 @@ struct scm_timestamping {
struct timespec ts[3];
};
+struct scm_timestamping64 {
+ struct __kernel_timespec ts[3];
+};
+
/* The type of scm_timestamping, passed in sock_extended_err ee_info.
* This defines the type of ts[0]. For SCM_TSTAMP_SND only, if ts[0]
* is zero, then this is a hardware timestamp and recorded in ts[2].
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 17be76aeb468..3652b239dad1 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1432,6 +1432,13 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
+
+ /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
+ * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
+ * macro for bits > 31. The only way to use indices > 31 is to
+ * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API.
+ */
+
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
@@ -1453,15 +1460,24 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
-
- /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
- * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
- * macro for bits > 31. The only way to use indices > 31 is to
- * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API.
- */
-
- __ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66,
+
+ /* must be last entry */
+ __ETHTOOL_LINK_MODE_MASK_NBITS
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
@@ -1569,6 +1585,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_50000 50000
#define SPEED_56000 56000
#define SPEED_100000 100000
+#define SPEED_200000 200000
#define SPEED_UNKNOWN -1
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index 909c98fcace2..b9effa6f8503 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -7,9 +7,16 @@
/* the following events that user-space can register for */
#define FAN_ACCESS 0x00000001 /* File was accessed */
#define FAN_MODIFY 0x00000002 /* File was modified */
+#define FAN_ATTRIB 0x00000004 /* Metadata changed */
#define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */
#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define FAN_OPEN 0x00000020 /* File was opened */
+#define FAN_MOVED_FROM 0x00000040 /* File was moved from X */
+#define FAN_MOVED_TO 0x00000080 /* File was moved to Y */
+#define FAN_CREATE 0x00000100 /* Subfile was created */
+#define FAN_DELETE 0x00000200 /* Subfile was deleted */
+#define FAN_DELETE_SELF 0x00000400 /* Self was deleted */
+#define FAN_MOVE_SELF 0x00000800 /* Self was moved */
#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
@@ -24,6 +31,7 @@
/* helper events */
#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */
+#define FAN_MOVE (FAN_MOVED_FROM | FAN_MOVED_TO) /* moves */
/* flags used for fanotify_init() */
#define FAN_CLOEXEC 0x00000001
@@ -44,6 +52,7 @@
/* Flags to determine fanotify event format */
#define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */
+#define FAN_REPORT_FID 0x00000200 /* Report unique file id */
/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
@@ -106,6 +115,26 @@ struct fanotify_event_metadata {
__s32 pid;
};
+#define FAN_EVENT_INFO_TYPE_FID 1
+
+/* Variable length info record following event metadata */
+struct fanotify_event_info_header {
+ __u8 info_type;
+ __u8 pad;
+ __u16 len;
+};
+
+/* Unique file identifier info record */
+struct fanotify_event_info_fid {
+ struct fanotify_event_info_header hdr;
+ __kernel_fsid_t fsid;
+ /*
+ * Following is an opaque struct file_handle that can be passed as
+ * an argument to open_by_handle_at(2).
+ */
+ unsigned char handle[0];
+};
+
struct fanotify_response {
__s32 fd;
__u32 response;
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 6448cdd9a350..a2f8658f1c55 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -41,6 +41,7 @@
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
+#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
/* (1U << 31) is reserved for signed error codes */
/*
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index caf8dc019250..325395f56bfa 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -108,6 +108,8 @@ struct icmp6hdr {
#define ICMPV6_MOBILE_PREFIX_SOL 146
#define ICMPV6_MOBILE_PREFIX_ADV 147
+#define ICMPV6_MRDISC_ADV 151
+
/*
* Codes for Destination Unreachable
*/
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index 61a1bf6e865e..790585f0e61b 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -117,6 +117,30 @@ struct ad_info {
__u8 partner_system[ETH_ALEN];
};
+/* Embedded inside LINK_XSTATS_TYPE_BOND */
+enum {
+ BOND_XSTATS_UNSPEC,
+ BOND_XSTATS_3AD,
+ __BOND_XSTATS_MAX
+};
+#define BOND_XSTATS_MAX (__BOND_XSTATS_MAX - 1)
+
+/* Embedded inside BOND_XSTATS_3AD */
+enum {
+ BOND_3AD_STAT_LACPDU_RX,
+ BOND_3AD_STAT_LACPDU_TX,
+ BOND_3AD_STAT_LACPDU_UNKNOWN_RX,
+ BOND_3AD_STAT_LACPDU_ILLEGAL_RX,
+ BOND_3AD_STAT_MARKER_RX,
+ BOND_3AD_STAT_MARKER_TX,
+ BOND_3AD_STAT_MARKER_RESP_RX,
+ BOND_3AD_STAT_MARKER_RESP_TX,
+ BOND_3AD_STAT_MARKER_UNKNOWN_RX,
+ BOND_3AD_STAT_PAD,
+ __BOND_3AD_STAT_MAX
+};
+#define BOND_3AD_STAT_MAX (__BOND_3AD_STAT_MAX - 1)
+
#endif /* _LINUX_IF_BONDING_H */
/*
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index d6533828123a..5b225ff63b48 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -925,6 +925,7 @@ enum {
enum {
LINK_XSTATS_TYPE_UNSPEC,
LINK_XSTATS_TYPE_BRIDGE,
+ LINK_XSTATS_TYPE_BOND,
__LINK_XSTATS_TYPE_MAX
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h
index 7e44ac02ca18..90c28bc466c6 100644
--- a/include/uapi/linux/igmp.h
+++ b/include/uapi/linux/igmp.h
@@ -93,6 +93,7 @@ struct igmpv3_query {
#define IGMP_MTRACE_RESP 0x1e
#define IGMP_MTRACE 0x1f
+#define IGMP_MRDISC_ADV 0x30 /* From RFC4286 */
/*
* Use the BSD names for these for compatibility
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 92baabc103ac..fdd81affca4b 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -46,6 +46,7 @@ enum iio_chan_type {
IIO_GRAVITY,
IIO_POSITIONRELATIVE,
IIO_PHASE,
+ IIO_MASSCONCENTRATION,
};
enum iio_modifier {
@@ -87,6 +88,12 @@ enum iio_modifier {
IIO_MOD_VOC,
IIO_MOD_LIGHT_UV,
IIO_MOD_LIGHT_DUV,
+ IIO_MOD_PM1,
+ IIO_MOD_PM2P5,
+ IIO_MOD_PM4,
+ IIO_MOD_PM10,
+ IIO_MOD_ETHANOL,
+ IIO_MOD_H2,
};
enum iio_event_type {
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index f6052e70bf40..e7ad9d350a28 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -268,7 +268,7 @@ struct sockaddr_in {
#define IN_MULTICAST(a) IN_CLASSD(a)
#define IN_MULTICAST_NET 0xe0000000
-#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
@@ -292,10 +292,11 @@ struct sockaddr_in {
#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
/* Defines for Multicast INADDR */
-#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
-#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
-#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
-#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
+#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
+#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
+#define INADDR_ALLSNOOPERS_GROUP 0xe000006aU /* 224.0.0.106 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 71d82fe15b03..9f2273a08356 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -178,6 +178,7 @@ struct in6_flowlabel_req {
#define IPV6_JOIN_ANYCAST 27
#define IPV6_LEAVE_ANYCAST 28
#define IPV6_MULTICAST_ALL 29
+#define IPV6_ROUTER_ALERT_ISOLATE 30
/* IPV6_MTU_DISCOVER values */
#define IPV6_PMTUDISC_DONT 0
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 14565d703291..e8baca85bac6 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -137,15 +137,21 @@ enum {
INET_DIAG_TCLASS,
INET_DIAG_SKMEMINFO,
INET_DIAG_SHUTDOWN,
- INET_DIAG_DCTCPINFO,
- INET_DIAG_PROTOCOL, /* response attribute only */
+
+ /*
+ * Next extenstions cannot be requested in struct inet_diag_req_v2:
+ * its field idiag_ext has only 8 bits.
+ */
+
+ INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
+ INET_DIAG_PROTOCOL, /* response attribute only */
INET_DIAG_SKV6ONLY,
INET_DIAG_LOCALS,
INET_DIAG_PEERS,
INET_DIAG_PAD,
- INET_DIAG_MARK,
- INET_DIAG_BBRINFO,
- INET_DIAG_CLASS_ID,
+ INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
+ INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
+ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
INET_DIAG_MD5SIG,
__INET_DIAG_MAX,
};
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index fb78f6f500f3..f056b2a00d5c 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -26,13 +26,17 @@
*/
struct input_event {
-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
+#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
struct timeval time;
#define input_event_sec time.tv_sec
#define input_event_usec time.tv_usec
#else
__kernel_ulong_t __sec;
+#if defined(__sparc__) && defined(__arch64__)
+ unsigned int __usec;
+#else
__kernel_ulong_t __usec;
+#endif
#define input_event_sec __sec
#define input_event_usec __usec
#endif
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 21b9113c69da..6f2f2720f3ac 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -32,7 +32,7 @@
#define KPF_KSM 21
#define KPF_THP 22
-#define KPF_BALLOON 23
+#define KPF_OFFLINE 23
#define KPF_ZERO_PAGE 24
#define KPF_IDLE 25
#define KPF_PGTABLE 26
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index d435b00d64ad..0a552061ff1c 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -45,6 +45,7 @@
#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
+#define MDIO_PMA_NG_EXTABLE 21 /* 2.5G/5G PMA/PMD extended ability */
#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
@@ -92,6 +93,10 @@
#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
/* 10PASS-TS/2BASE-TL */
#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04)
+/* 2.5 Gb/s */
+#define MDIO_CTRL1_SPEED2_5G (MDIO_CTRL1_SPEEDSELEXT | 0x18)
+/* 5 Gb/s */
+#define MDIO_CTRL1_SPEED5G (MDIO_CTRL1_SPEEDSELEXT | 0x1c)
/* Status register 1. */
#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */
@@ -115,6 +120,7 @@
/* Device present registers. */
#define MDIO_DEVS_PRESENT(devad) (1 << (devad))
+#define MDIO_DEVS_C22PRESENT MDIO_DEVS_PRESENT(0)
#define MDIO_DEVS_PMAPMD MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD)
#define MDIO_DEVS_WIS MDIO_DEVS_PRESENT(MDIO_MMD_WIS)
#define MDIO_DEVS_PCS MDIO_DEVS_PRESENT(MDIO_MMD_PCS)
@@ -123,6 +129,8 @@
#define MDIO_DEVS_TC MDIO_DEVS_PRESENT(MDIO_MMD_TC)
#define MDIO_DEVS_AN MDIO_DEVS_PRESENT(MDIO_MMD_AN)
#define MDIO_DEVS_C22EXT MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT)
+#define MDIO_DEVS_VEND1 MDIO_DEVS_PRESENT(MDIO_MMD_VEND1)
+#define MDIO_DEVS_VEND2 MDIO_DEVS_PRESENT(MDIO_MMD_VEND2)
/* Control register 2. */
#define MDIO_PMA_CTRL2_TYPE 0x000f /* PMA/PMD type selection */
@@ -142,6 +150,8 @@
#define MDIO_PMA_CTRL2_1000BKX 0x000d /* 1000BASE-KX type */
#define MDIO_PMA_CTRL2_100BTX 0x000e /* 100BASE-TX type */
#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */
+#define MDIO_PMA_CTRL2_2_5GBT 0x0030 /* 2.5GBaseT type */
+#define MDIO_PMA_CTRL2_5GBT 0x0031 /* 5GBaseT type */
#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */
#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */
#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */
@@ -195,6 +205,7 @@
#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */
#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */
#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */
+#define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */
/* PHY XGXS lane state register. */
#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
@@ -231,9 +242,13 @@
#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00
/* AN 10GBASE-T control register. */
+#define MDIO_AN_10GBT_CTRL_ADV2_5G 0x0080 /* Advertise 2.5GBASE-T */
+#define MDIO_AN_10GBT_CTRL_ADV5G 0x0100 /* Advertise 5GBASE-T */
#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */
/* AN 10GBASE-T status register. */
+#define MDIO_AN_10GBT_STAT_LP2_5G 0x0020 /* LP is 2.5GBT capable */
+#define MDIO_AN_10GBT_STAT_LP5G 0x0040 /* LP is 5GBT capable */
#define MDIO_AN_10GBT_STAT_LPTRR 0x0200 /* LP training reset req. */
#define MDIO_AN_10GBT_STAT_LPLTABLE 0x0400 /* LP loop timing ability */
#define MDIO_AN_10GBT_STAT_LP10G 0x0800 /* LP is 10GBT capable */
@@ -262,6 +277,10 @@
#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
+/* 2.5G/5G Extended abilities register. */
+#define MDIO_PMA_NG_EXTABLE_2_5GBT 0x0001 /* 2.5GBASET ability */
+#define MDIO_PMA_NG_EXTABLE_5GBT 0x0002 /* 5GBASET ability */
+
/* LASI RX_ALARM control/status registers. */
#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index d0f515d53299..fc1a64c3447b 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -12,6 +12,10 @@
#define OVERCOMMIT_ALWAYS 1
#define OVERCOMMIT_NEVER 2
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+
/*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page
* size other than the default is desired. See hugetlb_encode.h.
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 5d37a9ccce63..11c8c1fc1124 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -28,12 +28,19 @@
#define MRT_TABLE (MRT_BASE+9) /* Specify mroute table ID */
#define MRT_ADD_MFC_PROXY (MRT_BASE+10) /* Add a (*,*|G) mfc entry */
#define MRT_DEL_MFC_PROXY (MRT_BASE+11) /* Del a (*,*|G) mfc entry */
-#define MRT_MAX (MRT_BASE+11)
+#define MRT_FLUSH (MRT_BASE+12) /* Flush all mfc entries and/or vifs */
+#define MRT_MAX (MRT_BASE+12)
#define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */
#define SIOCGETSGCNT (SIOCPROTOPRIVATE+1)
#define SIOCGETRPF (SIOCPROTOPRIVATE+2)
+/* MRT_FLUSH optional flags */
+#define MRT_FLUSH_MFC 1 /* Flush multicast entries */
+#define MRT_FLUSH_MFC_STATIC 2 /* Flush static multicast entries */
+#define MRT_FLUSH_VIFS 4 /* Flush multicast vifs */
+#define MRT_FLUSH_VIFS_STATIC 8 /* Flush static multicast vifs */
+
#define MAXVIFS 32
typedef unsigned long vifbitmap_t; /* User mode code depends on this lot */
typedef unsigned short vifi_t;
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index 9999cc006390..c36177a86516 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -31,12 +31,19 @@
#define MRT6_TABLE (MRT6_BASE+9) /* Specify mroute table ID */
#define MRT6_ADD_MFC_PROXY (MRT6_BASE+10) /* Add a (*,*|G) mfc entry */
#define MRT6_DEL_MFC_PROXY (MRT6_BASE+11) /* Del a (*,*|G) mfc entry */
-#define MRT6_MAX (MRT6_BASE+11)
+#define MRT6_FLUSH (MRT6_BASE+12) /* Flush all mfc entries and/or vifs */
+#define MRT6_MAX (MRT6_BASE+12)
#define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE /* IP protocol privates */
#define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1)
#define SIOCGETRPF (SIOCPROTOPRIVATE+2)
+/* MRT6_FLUSH optional flags */
+#define MRT6_FLUSH_MFC 1 /* Flush multicast entries */
+#define MRT6_FLUSH_MFC_STATIC 2 /* Flush static multicast entries */
+#define MRT6_FLUSH_MIFS 4 /* Flushing multicast vifs */
+#define MRT6_FLUSH_MIFS_STATIC 8 /* Flush static multicast vifs */
+
#define MAXMIFS 32
typedef unsigned long mifbitmap_t; /* User mode code depends on this lot */
typedef unsigned short mifi_t;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 7de4f1bdaf06..a66c8de006cc 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -219,6 +219,7 @@ enum nft_chain_attributes {
* @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
* @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
* @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
+ * @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32)
*/
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
@@ -231,6 +232,7 @@ enum nft_rule_attributes {
NFTA_RULE_USERDATA,
NFTA_RULE_PAD,
NFTA_RULE_ID,
+ NFTA_RULE_POSITION_ID,
__NFTA_RULE_MAX
};
#define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1)
@@ -789,6 +791,8 @@ enum nft_exthdr_attributes {
* @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
* @NFT_META_PRANDOM: a 32bit pseudo-random number
* @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp)
+ * @NFT_META_IIFKIND: packet input interface kind name (dev->rtnl_link_ops->kind)
+ * @NFT_META_OIFKIND: packet output interface kind name (dev->rtnl_link_ops->kind)
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -817,6 +821,8 @@ enum nft_meta_keys {
NFT_META_CGROUP,
NFT_META_PRANDOM,
NFT_META_SECPATH,
+ NFT_META_IIFKIND,
+ NFT_META_OIFKIND,
};
/**
@@ -871,8 +877,8 @@ enum nft_hash_attributes {
NFTA_HASH_SEED,
NFTA_HASH_OFFSET,
NFTA_HASH_TYPE,
- NFTA_HASH_SET_NAME,
- NFTA_HASH_SET_ID,
+ NFTA_HASH_SET_NAME, /* deprecated */
+ NFTA_HASH_SET_ID, /* deprecated */
__NFTA_HASH_MAX,
};
#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
@@ -1721,10 +1727,19 @@ enum nft_tunnel_keys {
};
#define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1)
+enum nft_tunnel_mode {
+ NFT_TUNNEL_MODE_NONE,
+ NFT_TUNNEL_MODE_RX,
+ NFT_TUNNEL_MODE_TX,
+ __NFT_TUNNEL_MODE_MAX
+};
+#define NFT_TUNNEL_MODE_MAX (__NFT_TUNNEL_MODE_MAX - 1)
+
enum nft_tunnel_attributes {
NFTA_TUNNEL_UNSPEC,
NFTA_TUNNEL_KEY,
NFTA_TUNNEL_DREG,
+ NFTA_TUNNEL_MODE,
__NFTA_TUNNEL_MAX
};
#define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1)
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 31ae5c7f10e3..dd4f86ee286e 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1565,6 +1565,12 @@ enum nl80211_commands {
* (a u32 with flags from &enum nl80211_wpa_versions).
* @NL80211_ATTR_AKM_SUITES: Used with CONNECT, ASSOCIATE, and NEW_BEACON to
* indicate which key management algorithm(s) to use (an array of u32).
+ * This attribute is also sent in response to @NL80211_CMD_GET_WIPHY,
+ * indicating the supported AKM suites, intended for specific drivers which
+ * implement SME and have constraints on which AKMs are supported and also
+ * the cases where an AKM support is offloaded to the driver/firmware.
+ * If there is no such notification from the driver, user space should
+ * assume the driver supports all the AKM suites.
*
* @NL80211_ATTR_REQ_IE: (Re)association request information elements as
* sent out by the card, for ROAM and successful CONNECT events.
@@ -2260,10 +2266,10 @@ enum nl80211_commands {
* &enum nl80211_external_auth_action value). This is used with the
* %NL80211_CMD_EXTERNAL_AUTH request event.
* @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user
- * space supports external authentication. This attribute shall be used
- * only with %NL80211_CMD_CONNECT request. The driver may offload
- * authentication processing to user space if this capability is indicated
- * in NL80211_CMD_CONNECT requests from the user space.
+ * space supports external authentication. This attribute shall be used
+ * with %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP request. The driver
+ * may offload authentication processing to user space if this capability
+ * is indicated in the respective requests from the user space.
*
* @NL80211_ATTR_NSS: Station's New/updated RX_NSS value notified using this
* u8 attribute. This is used with %NL80211_CMD_STA_OPMODE_CHANGED.
@@ -2299,6 +2305,9 @@ enum nl80211_commands {
* This is also used for capability advertisement in the wiphy information,
* with the appropriate sub-attributes.
*
+ * @NL80211_ATTR_AIRTIME_WEIGHT: Station's weight when scheduled by the airtime
+ * scheduler.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2748,6 +2757,8 @@ enum nl80211_attrs {
NL80211_ATTR_PEER_MEASUREMENTS,
+ NL80211_ATTR_AIRTIME_WEIGHT,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3125,6 +3136,9 @@ enum nl80211_sta_bss_param {
* might not be fully accurate.
* @NL80211_STA_INFO_CONNECTED_TO_GATE: set to true if STA has a path to a
* mesh gate (u8, 0 or 1)
+ * @NL80211_STA_INFO_TX_DURATION: aggregate PPDU duration for all frames
+ * sent to the station (u64, usec)
+ * @NL80211_STA_INFO_AIRTIME_WEIGHT: current airtime weight for station (u16)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3168,6 +3182,8 @@ enum nl80211_sta_info {
NL80211_STA_INFO_RX_MPDUS,
NL80211_STA_INFO_FCS_ERROR_COUNT,
NL80211_STA_INFO_CONNECTED_TO_GATE,
+ NL80211_STA_INFO_TX_DURATION,
+ NL80211_STA_INFO_AIRTIME_WEIGHT,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -3277,8 +3293,10 @@ enum nl80211_mpath_flags {
* &enum nl80211_mpath_flags;
* @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec
* @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries
+ * @NL80211_MPATH_INFO_HOP_COUNT: hop count to destination
+ * @NL80211_MPATH_INFO_PATH_CHANGE: total number of path changes to destination
* @NL80211_MPATH_INFO_MAX: highest mesh path information attribute number
- * currently defind
+ * currently defined
* @__NL80211_MPATH_INFO_AFTER_LAST: internal use
*/
enum nl80211_mpath_info {
@@ -3290,6 +3308,8 @@ enum nl80211_mpath_info {
NL80211_MPATH_INFO_FLAGS,
NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
NL80211_MPATH_INFO_DISCOVERY_RETRIES,
+ NL80211_MPATH_INFO_HOP_COUNT,
+ NL80211_MPATH_INFO_PATH_CHANGE,
/* keep last */
__NL80211_MPATH_INFO_AFTER_LAST,
@@ -5316,6 +5336,13 @@ enum nl80211_feature_flags {
* if this flag is not set. Ignoring this can leak clear text packets and/or
* freeze the connection.
*
+ * @NL80211_EXT_FEATURE_AIRTIME_FAIRNESS: Driver supports getting airtime
+ * fairness for transmitted packets and has enabled airtime fairness
+ * scheduling.
+ *
+ * @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching
+ * (set/del PMKSA operations) in AP mode.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5355,6 +5382,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
NL80211_EXT_FEATURE_CAN_REPLACE_PTK0,
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
+ NL80211_EXT_FEATURE_AP_PMKSA_CACHING,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5606,9 +5635,14 @@ enum nl80211_crit_proto_id {
* Used by cfg80211_rx_mgmt()
*
* @NL80211_RXMGMT_FLAG_ANSWERED: frame was answered by device/driver.
+ * @NL80211_RXMGMT_FLAG_EXTERNAL_AUTH: Host driver intends to offload
+ * the authentication. Exclusively defined for host drivers that
+ * advertises the SME functionality but would like the userspace
+ * to handle certain authentication algorithms (e.g. SAE).
*/
enum nl80211_rxmgmt_flags {
NL80211_RXMGMT_FLAG_ANSWERED = 1 << 0,
+ NL80211_RXMGMT_FLAG_EXTERNAL_AUTH = 1 << 1,
};
/*
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 9de8780ac8d9..7198ddd0c6b1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -372,7 +372,9 @@ struct perf_event_attr {
context_switch : 1, /* context switch data */
write_backward : 1, /* Write ring buffer from end to beginning */
namespaces : 1, /* include namespaces data */
- __reserved_1 : 35;
+ ksymbol : 1, /* include ksymbol events */
+ bpf_event : 1, /* include bpf events */
+ __reserved_1 : 33;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -445,8 +447,6 @@ struct perf_event_query_bpf {
__u32 ids[0];
};
-#define perf_flags(attr) (*(&(attr)->read_format + 1))
-
/*
* Ioctls that can be done on a perf event fd:
*/
@@ -965,9 +965,58 @@ enum perf_event_type {
*/
PERF_RECORD_NAMESPACES = 16,
+ /*
+ * Record ksymbol register/unregister events:
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 addr;
+ * u32 len;
+ * u16 ksym_type;
+ * u16 flags;
+ * char name[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_KSYMBOL = 17,
+
+ /*
+ * Record bpf events:
+ * enum perf_bpf_event_type {
+ * PERF_BPF_EVENT_UNKNOWN = 0,
+ * PERF_BPF_EVENT_PROG_LOAD = 1,
+ * PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ * };
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u16 type;
+ * u16 flags;
+ * u32 id;
+ * u8 tag[BPF_TAG_SIZE];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_BPF_EVENT = 18,
+
PERF_RECORD_MAX, /* non-ABI */
};
+enum perf_record_ksymbol_type {
+ PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
+ PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
+};
+
+#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
+
+enum perf_bpf_event_type {
+ PERF_BPF_EVENT_UNKNOWN = 0,
+ PERF_BPF_EVENT_PROG_LOAD = 1,
+ PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ PERF_BPF_EVENT_MAX, /* non-ABI */
+};
+
#define PERF_MAX_STACK_DEPTH 127
#define PERF_MAX_CONTEXTS_PER_STACK 8
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 95d0db2a8350..51a0496f78ea 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -63,12 +63,49 @@ enum {
#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
+/* These macros are put here for binary compatibility with userspace apps that
+ * make use of them. For kernel code and new userspace apps, use the TCA_ID_*
+ * versions.
+ */
+#define TCA_ACT_GACT 5
+#define TCA_ACT_IPT 6
+#define TCA_ACT_PEDIT 7
+#define TCA_ACT_MIRRED 8
+#define TCA_ACT_NAT 9
+#define TCA_ACT_XT 10
+#define TCA_ACT_SKBEDIT 11
+#define TCA_ACT_VLAN 12
+#define TCA_ACT_BPF 13
+#define TCA_ACT_CONNMARK 14
+#define TCA_ACT_SKBMOD 15
+#define TCA_ACT_CSUM 16
+#define TCA_ACT_TUNNEL_KEY 17
+#define TCA_ACT_SIMP 22
+#define TCA_ACT_IFE 25
+#define TCA_ACT_SAMPLE 26
+
/* Action type identifiers*/
-enum {
- TCA_ID_UNSPEC=0,
- TCA_ID_POLICE=1,
+enum tca_id {
+ TCA_ID_UNSPEC = 0,
+ TCA_ID_POLICE = 1,
+ TCA_ID_GACT = TCA_ACT_GACT,
+ TCA_ID_IPT = TCA_ACT_IPT,
+ TCA_ID_PEDIT = TCA_ACT_PEDIT,
+ TCA_ID_MIRRED = TCA_ACT_MIRRED,
+ TCA_ID_NAT = TCA_ACT_NAT,
+ TCA_ID_XT = TCA_ACT_XT,
+ TCA_ID_SKBEDIT = TCA_ACT_SKBEDIT,
+ TCA_ID_VLAN = TCA_ACT_VLAN,
+ TCA_ID_BPF = TCA_ACT_BPF,
+ TCA_ID_CONNMARK = TCA_ACT_CONNMARK,
+ TCA_ID_SKBMOD = TCA_ACT_SKBMOD,
+ TCA_ID_CSUM = TCA_ACT_CSUM,
+ TCA_ID_TUNNEL_KEY = TCA_ACT_TUNNEL_KEY,
+ TCA_ID_SIMP = TCA_ACT_SIMP,
+ TCA_ID_IFE = TCA_ACT_IFE,
+ TCA_ID_SAMPLE = TCA_ACT_SAMPLE,
/* other actions go here */
- __TCA_ID_MAX=255
+ __TCA_ID_MAX = 255
};
#define TCA_ID_MAX __TCA_ID_MAX
@@ -333,12 +370,19 @@ enum {
/* Basic filter */
+struct tc_basic_pcnt {
+ __u64 rcnt;
+ __u64 rhit;
+};
+
enum {
TCA_BASIC_UNSPEC,
TCA_BASIC_CLASSID,
TCA_BASIC_EMATCHES,
TCA_BASIC_ACT,
TCA_BASIC_POLICE,
+ TCA_BASIC_PCNT,
+ TCA_BASIC_PAD,
__TCA_BASIC_MAX
};
@@ -527,11 +571,17 @@ enum {
/* Match-all classifier */
+struct tc_matchall_pcnt {
+ __u64 rhit;
+};
+
enum {
TCA_MATCHALL_UNSPEC,
TCA_MATCHALL_CLASSID,
TCA_MATCHALL_ACT,
TCA_MATCHALL_FLAGS,
+ TCA_MATCHALL_PCNT,
+ TCA_MATCHALL_PAD,
__TCA_MATCHALL_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 0d18b1d1fbbc..7ee74c3474bf 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -954,7 +954,7 @@ enum {
#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
struct tc_pie_xstats {
- __u32 prob; /* current probability */
+ __u64 prob; /* current probability */
__u32 delay; /* current delay in ms */
__u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
__u32 packets_in; /* total number of packets enqueued */
@@ -1021,6 +1021,7 @@ enum {
TCA_CAKE_INGRESS,
TCA_CAKE_ACK_FILTER,
TCA_CAKE_SPLIT_GSO,
+ TCA_CAKE_FWMARK,
__TCA_CAKE_MAX
};
#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
diff --git a/include/uapi/linux/pmu.h b/include/uapi/linux/pmu.h
index 97256f90e6df..f2fc1bd80017 100644
--- a/include/uapi/linux/pmu.h
+++ b/include/uapi/linux/pmu.h
@@ -19,7 +19,9 @@
#define PMU_POWER_CTRL 0x11 /* control power of some devices */
#define PMU_ADB_CMD 0x20 /* send ADB packet */
#define PMU_ADB_POLL_OFF 0x21 /* disable ADB auto-poll */
+#define PMU_WRITE_XPRAM 0x32 /* write eXtended Parameter RAM */
#define PMU_WRITE_NVRAM 0x33 /* write non-volatile RAM */
+#define PMU_READ_XPRAM 0x3a /* read eXtended Parameter RAM */
#define PMU_READ_NVRAM 0x3b /* read non-volatile RAM */
#define PMU_SET_RTC 0x30 /* set real-time clock */
#define PMU_READ_RTC 0x38 /* read real-time clock */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index b4875a93363a..094bb03b9cc2 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -219,6 +219,7 @@ struct prctl_mm_map {
# define PR_SPEC_ENABLE (1UL << 1)
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+# define PR_SPEC_DISABLE_NOEXEC (1UL << 4)
/* Reset arm64 pointer authentication keys */
#define PR_PAC_RESET_KEYS 54
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index d73d83950265..1bc794ad957a 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -147,7 +147,7 @@ struct ptp_pin_desc {
#define PTP_SYS_OFFSET_PRECISE \
_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
#define PTP_SYS_OFFSET_EXTENDED \
- _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
+ _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 8b73cb603c5f..5d0f76c780e5 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -69,6 +69,12 @@
#define RDS_TRANS_COUNT 3
#define RDS_TRANS_NONE (~0)
+/* IOCTLS commands for SOL_RDS */
+#define SIOCRDSSETTOS (SIOCPROTOPRIVATE)
+#define SIOCRDSGETTOS (SIOCPROTOPRIVATE + 1)
+
+typedef __u8 rds_tos_t;
+
/*
* Control message types for SOL_RDS.
*
@@ -149,6 +155,7 @@ struct rds_info_connection {
__be32 faddr;
__u8 transport[TRANSNAMSIZ]; /* null term ascii */
__u8 flags;
+ __u8 tos;
} __attribute__((packed));
struct rds6_info_connection {
@@ -171,6 +178,7 @@ struct rds_info_message {
__be16 lport;
__be16 fport;
__u8 flags;
+ __u8 tos;
} __attribute__((packed));
struct rds6_info_message {
@@ -214,6 +222,7 @@ struct rds_info_tcp_socket {
__u32 last_sent_nxt;
__u32 last_expected_una;
__u32 last_seen_una;
+ __u8 tos;
} __attribute__((packed));
struct rds6_info_tcp_socket {
@@ -240,6 +249,7 @@ struct rds_info_rdma_connection {
__u32 max_send_sge;
__u32 rdma_mr_max;
__u32 rdma_mr_size;
+ __u8 tos;
};
struct rds6_info_rdma_connection {
@@ -253,6 +263,7 @@ struct rds6_info_rdma_connection {
__u32 max_send_sge;
__u32 rdma_mr_max;
__u32 rdma_mr_size;
+ __u8 tos;
};
/* RDS message Receive Path Latency points */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d584073532b8..b8f2c4d56532 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -59,6 +59,10 @@
typedef __s32 sctp_assoc_t;
+#define SCTP_FUTURE_ASSOC 0
+#define SCTP_CURRENT_ASSOC 1
+#define SCTP_ALL_ASSOC 2
+
/* The following symbols come from the Sockets API Extensions for
* SCTP <draft-ietf-tsvwg-sctpsocket-07.txt>.
*/
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index df4a7534e239..6009ee2c2e99 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -79,6 +79,9 @@
/* Nuvoton UART */
#define PORT_NPCM 40
+/* NVIDIA Tegra Combined UART */
+#define PORT_TEGRA_TCU 41
+
/* Intel EG20 */
#define PORT_PCH_8LINE 44
#define PORT_PCH_2LINE 45
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 6e89a5df49a4..653c4f94f76e 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -13,8 +13,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_BPF 13
-
struct tc_act_bpf {
tc_gen;
};
diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h
index 80caa47b1933..9f8f6f709feb 100644
--- a/include/uapi/linux/tc_act/tc_connmark.h
+++ b/include/uapi/linux/tc_act/tc_connmark.h
@@ -5,8 +5,6 @@
#include <linux/types.h>
#include <linux/pkt_cls.h>
-#define TCA_ACT_CONNMARK 14
-
struct tc_connmark {
tc_gen;
__u16 zone;
diff --git a/include/uapi/linux/tc_act/tc_csum.h b/include/uapi/linux/tc_act/tc_csum.h
index 0ecf4d29e2f3..94b2044929de 100644
--- a/include/uapi/linux/tc_act/tc_csum.h
+++ b/include/uapi/linux/tc_act/tc_csum.h
@@ -5,8 +5,6 @@
#include <linux/types.h>
#include <linux/pkt_cls.h>
-#define TCA_ACT_CSUM 16
-
enum {
TCA_CSUM_UNSPEC,
TCA_CSUM_PARMS,
diff --git a/include/uapi/linux/tc_act/tc_gact.h b/include/uapi/linux/tc_act/tc_gact.h
index 94273c3b81b0..37e5392e02c7 100644
--- a/include/uapi/linux/tc_act/tc_gact.h
+++ b/include/uapi/linux/tc_act/tc_gact.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/pkt_cls.h>
-#define TCA_ACT_GACT 5
struct tc_gact {
tc_gen;
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index 2f48490ef386..8c401f185675 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -6,7 +6,6 @@
#include <linux/pkt_cls.h>
#include <linux/ife.h>
-#define TCA_ACT_IFE 25
/* Flag bits for now just encoding/decoding; mutually exclusive */
#define IFE_ENCODE 1
#define IFE_DECODE 0
diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h
index b743c8bddd13..c48d7da6750d 100644
--- a/include/uapi/linux/tc_act/tc_ipt.h
+++ b/include/uapi/linux/tc_act/tc_ipt.h
@@ -4,9 +4,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_IPT 6
-#define TCA_ACT_XT 10
-
enum {
TCA_IPT_UNSPEC,
TCA_IPT_TABLE,
diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h
index 5dd671cf5776..2500a0005d05 100644
--- a/include/uapi/linux/tc_act/tc_mirred.h
+++ b/include/uapi/linux/tc_act/tc_mirred.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/pkt_cls.h>
-#define TCA_ACT_MIRRED 8
#define TCA_EGRESS_REDIR 1 /* packet redirect to EGRESS*/
#define TCA_EGRESS_MIRROR 2 /* mirror packet to EGRESS */
#define TCA_INGRESS_REDIR 3 /* packet redirect to INGRESS*/
diff --git a/include/uapi/linux/tc_act/tc_nat.h b/include/uapi/linux/tc_act/tc_nat.h
index 086be842587b..21399c2c6130 100644
--- a/include/uapi/linux/tc_act/tc_nat.h
+++ b/include/uapi/linux/tc_act/tc_nat.h
@@ -5,8 +5,6 @@
#include <linux/pkt_cls.h>
#include <linux/types.h>
-#define TCA_ACT_NAT 9
-
enum {
TCA_NAT_UNSPEC,
TCA_NAT_PARMS,
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 24ec792dacc1..f3e61b04fa01 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -5,8 +5,6 @@
#include <linux/types.h>
#include <linux/pkt_cls.h>
-#define TCA_ACT_PEDIT 7
-
enum {
TCA_PEDIT_UNSPEC,
TCA_PEDIT_TM,
diff --git a/include/uapi/linux/tc_act/tc_sample.h b/include/uapi/linux/tc_act/tc_sample.h
index bd7e9f03abd2..fee1bcc20793 100644
--- a/include/uapi/linux/tc_act/tc_sample.h
+++ b/include/uapi/linux/tc_act/tc_sample.h
@@ -6,8 +6,6 @@
#include <linux/pkt_cls.h>
#include <linux/if_ether.h>
-#define TCA_ACT_SAMPLE 26
-
struct tc_sample {
tc_gen;
};
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 6de6071ebed6..800e93377218 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -23,8 +23,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_SKBEDIT 11
-
#define SKBEDIT_F_PRIORITY 0x1
#define SKBEDIT_F_QUEUE_MAPPING 0x2
#define SKBEDIT_F_MARK 0x4
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
index 38c072f66f2f..c525b3503797 100644
--- a/include/uapi/linux/tc_act/tc_skbmod.h
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -13,8 +13,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_SKBMOD 15
-
#define SKBMOD_F_DMAC 0x1
#define SKBMOD_F_SMAC 0x2
#define SKBMOD_F_ETYPE 0x4
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index be384d63e1b5..41c8b462c177 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -14,8 +14,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_TUNNEL_KEY 17
-
#define TCA_TUNNEL_KEY_ACT_SET 1
#define TCA_TUNNEL_KEY_ACT_RELEASE 2
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 0d7b5fd6605b..168995b54a70 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -13,8 +13,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_VLAN 12
-
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
#define TCA_VLAN_ACT_MODIFY 3
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 6b56a2208be7..958932effc5e 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -3,7 +3,7 @@
#define _UAPI_LINUX_TIME_H
#include <linux/types.h>
-
+#include <linux/time_types.h>
#ifndef _STRUCT_TIMESPEC
#define _STRUCT_TIMESPEC
@@ -23,7 +23,6 @@ struct timezone {
int tz_dsttime; /* type of dst correction */
};
-
/*
* Names of the interval timers, and structure
* defining a timer setting:
@@ -42,32 +41,6 @@ struct itimerval {
struct timeval it_value; /* current value */
};
-#ifndef __kernel_timespec
-struct __kernel_timespec {
- __kernel_time64_t tv_sec; /* seconds */
- long long tv_nsec; /* nanoseconds */
-};
-#endif
-
-#ifndef __kernel_itimerspec
-struct __kernel_itimerspec {
- struct __kernel_timespec it_interval; /* timer period */
- struct __kernel_timespec it_value; /* timer expiration */
-};
-#endif
-
-/*
- * legacy timeval structure, only embedded in structures that
- * traditionally used 'timeval' to pass time intervals (not absolute
- * times). Do not add new users. If user space fails to compile
- * here, this is probably because it is not y2038 safe and needs to
- * be changed to use another interface.
- */
-struct __kernel_old_timeval {
- __kernel_long_t tv_sec;
- __kernel_long_t tv_usec;
-};
-
/*
* The IDs of the various system clocks (for POSIX.1b interval timers):
*/
diff --git a/include/uapi/linux/time_types.h b/include/uapi/linux/time_types.h
new file mode 100644
index 000000000000..27bfc8fc6904
--- /dev/null
+++ b/include/uapi/linux/time_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_TIME_TYPES_H
+#define _UAPI_LINUX_TIME_TYPES_H
+
+#include <linux/types.h>
+
+struct __kernel_timespec {
+ __kernel_time64_t tv_sec; /* seconds */
+ long long tv_nsec; /* nanoseconds */
+};
+
+struct __kernel_itimerspec {
+ struct __kernel_timespec it_interval; /* timer period */
+ struct __kernel_timespec it_value; /* timer expiration */
+};
+
+/*
+ * legacy timeval structure, only embedded in structures that
+ * traditionally used 'timeval' to pass time intervals (not absolute
+ * times). Do not add new users. If user space fails to compile
+ * here, this is probably because it is not y2038 safe and needs to
+ * be changed to use another interface.
+ */
+#ifndef __kernel_old_timeval
+struct __kernel_old_timeval {
+ __kernel_long_t tv_sec;
+ __kernel_long_t tv_usec;
+};
+#endif
+
+struct __kernel_sock_timeval {
+ __s64 tv_sec;
+ __s64 tv_usec;
+};
+
+#endif /* _UAPI_LINUX_TIME_TYPES_H */
diff --git a/include/uapi/linux/timex.h b/include/uapi/linux/timex.h
index 92685d826444..9f517f9010bb 100644
--- a/include/uapi/linux/timex.h
+++ b/include/uapi/linux/timex.h
@@ -92,6 +92,45 @@ struct timex {
int :32; int :32; int :32;
};
+struct __kernel_timex_timeval {
+ __kernel_time64_t tv_sec;
+ long long tv_usec;
+};
+
+struct __kernel_timex {
+ unsigned int modes; /* mode selector */
+ int :32; /* pad */
+ long long offset; /* time offset (usec) */
+ long long freq; /* frequency offset (scaled ppm) */
+ long long maxerror;/* maximum error (usec) */
+ long long esterror;/* estimated error (usec) */
+ int status; /* clock command/status */
+ int :32; /* pad */
+ long long constant;/* pll time constant */
+ long long precision;/* clock precision (usec) (read only) */
+ long long tolerance;/* clock frequency tolerance (ppm)
+ * (read only)
+ */
+ struct __kernel_timex_timeval time; /* (read only, except for ADJ_SETOFFSET) */
+ long long tick; /* (modified) usecs between clock ticks */
+
+ long long ppsfreq;/* pps frequency (scaled ppm) (ro) */
+ long long jitter; /* pps jitter (us) (ro) */
+ int shift; /* interval duration (s) (shift) (ro) */
+ int :32; /* pad */
+ long long stabil; /* pps stability (scaled ppm) (ro) */
+ long long jitcnt; /* jitter limit exceeded (ro) */
+ long long calcnt; /* calibration intervals (ro) */
+ long long errcnt; /* calibration errors (ro) */
+ long long stbcnt; /* stability limit exceeded (ro) */
+
+ int tai; /* TAI offset (ro) */
+
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32;
+};
+
/*
* Mode codes (timex.mode)
*/
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index ff02287495ac..401d6f01de6a 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -51,6 +51,10 @@
#define TLS_1_2_VERSION_MINOR 0x3
#define TLS_1_2_VERSION TLS_VERSION_NUMBER(TLS_1_2)
+#define TLS_1_3_VERSION_MAJOR 0x3
+#define TLS_1_3_VERSION_MINOR 0x4
+#define TLS_1_3_VERSION TLS_VERSION_NUMBER(TLS_1_3)
+
/* Supported ciphers */
#define TLS_CIPHER_AES_GCM_128 51
#define TLS_CIPHER_AES_GCM_128_IV_SIZE 8
@@ -59,6 +63,13 @@
#define TLS_CIPHER_AES_GCM_128_TAG_SIZE 16
#define TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE 8
+#define TLS_CIPHER_AES_GCM_256 52
+#define TLS_CIPHER_AES_GCM_256_IV_SIZE 8
+#define TLS_CIPHER_AES_GCM_256_KEY_SIZE 32
+#define TLS_CIPHER_AES_GCM_256_SALT_SIZE 4
+#define TLS_CIPHER_AES_GCM_256_TAG_SIZE 16
+#define TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE 8
+
#define TLS_SET_RECORD_TYPE 1
#define TLS_GET_RECORD_TYPE 2
@@ -75,4 +86,12 @@ struct tls12_crypto_info_aes_gcm_128 {
unsigned char rec_seq[TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE];
};
+struct tls12_crypto_info_aes_gcm_256 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_AES_GCM_256_IV_SIZE];
+ unsigned char key[TLS_CIPHER_AES_GCM_256_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_AES_GCM_256_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE];
+};
+
#endif /* _UAPI_LINUX_TLS_H */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 1196e1c1d4f6..ff8e7dc9d4dd 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -79,6 +79,12 @@
#define VIRTIO_F_RING_PACKED 34
/*
+ * This feature indicates that memory accesses by the driver and the
+ * device are ordered in a way described by the platform.
+ */
+#define VIRTIO_F_ORDER_PLATFORM 36
+
+/*
* Does the device support Single Root I/O Virtualization?
*/
#define VIRTIO_F_SR_IOV 37
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 2414f8af26b3..4c4e24c291a5 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -213,14 +213,4 @@ struct vring_packed_desc {
__le16 flags;
};
-struct vring_packed {
- unsigned int num;
-
- struct vring_packed_desc *desc;
-
- struct vring_packed_desc_event *driver;
-
- struct vring_packed_desc_event *device;
-};
-
#endif /* _UAPI_LINUX_VIRTIO_RING_H */
diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h
new file mode 100644
index 000000000000..78b2591a7782
--- /dev/null
+++ b/include/uapi/linux/xdp_diag.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * xdp_diag: interface for query/monitor XDP sockets
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef _LINUX_XDP_DIAG_H
+#define _LINUX_XDP_DIAG_H
+
+#include <linux/types.h>
+
+struct xdp_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u16 pad;
+ __u32 xdiag_ino;
+ __u32 xdiag_show;
+ __u32 xdiag_cookie[2];
+};
+
+struct xdp_diag_msg {
+ __u8 xdiag_family;
+ __u8 xdiag_type;
+ __u16 pad;
+ __u32 xdiag_ino;
+ __u32 xdiag_cookie[2];
+};
+
+#define XDP_SHOW_INFO (1 << 0) /* Basic information */
+#define XDP_SHOW_RING_CFG (1 << 1)
+#define XDP_SHOW_UMEM (1 << 2)
+#define XDP_SHOW_MEMINFO (1 << 3)
+
+enum {
+ XDP_DIAG_NONE,
+ XDP_DIAG_INFO,
+ XDP_DIAG_UID,
+ XDP_DIAG_RX_RING,
+ XDP_DIAG_TX_RING,
+ XDP_DIAG_UMEM,
+ XDP_DIAG_UMEM_FILL_RING,
+ XDP_DIAG_UMEM_COMPLETION_RING,
+ XDP_DIAG_MEMINFO,
+ __XDP_DIAG_MAX,
+};
+
+#define XDP_DIAG_MAX (__XDP_DIAG_MAX - 1)
+
+struct xdp_diag_info {
+ __u32 ifindex;
+ __u32 queue_id;
+};
+
+struct xdp_diag_ring {
+ __u32 entries; /*num descs */
+};
+
+#define XDP_DU_F_ZEROCOPY (1 << 0)
+
+struct xdp_diag_umem {
+ __u64 size;
+ __u32 id;
+ __u32 num_pages;
+ __u32 chunk_size;
+ __u32 headroom;
+ __u32 ifindex;
+ __u32 queue_id;
+ __u32 flags;
+ __u32 refs;
+};
+
+#endif /* _LINUX_XDP_DIAG_H */
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
new file mode 100644
index 000000000000..6d701af9fc42
--- /dev/null
+++ b/include/uapi/misc/fastrpc.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_FASTRPC_H__
+#define __QCOM_FASTRPC_H__
+
+#include <linux/types.h>
+
+#define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
+#define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32)
+#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
+#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
+#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
+
+struct fastrpc_invoke_args {
+ __u64 ptr;
+ __u64 length;
+ __s32 fd;
+ __u32 reserved;
+};
+
+struct fastrpc_invoke {
+ __u32 handle;
+ __u32 sc;
+ __u64 args;
+};
+
+struct fastrpc_init_create {
+ __u32 filelen; /* elf file length */
+ __s32 filefd; /* fd for the file */
+ __u32 attrs;
+ __u32 siglen;
+ __u64 file; /* pointer to elf file */
+};
+
+struct fastrpc_alloc_dma_buf {
+ __s32 fd; /* fd */
+ __u32 flags; /* flags to map with */
+ __u64 size; /* size */
+};
+
+#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
new file mode 100644
index 000000000000..7fd6f633534c
--- /dev/null
+++ b/include/uapi/misc/habanalabs.h
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HABANALABS_H_
+#define HABANALABS_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Defines that are asic-specific but constitutes as ABI between kernel driver
+ * and userspace
+ */
+#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
+
+/*
+ * Queue Numbering
+ *
+ * The external queues (DMA channels + CPU) MUST be before the internal queues
+ * and each group (DMA channels + CPU and internal) must be contiguous inside
+ * itself but there can be a gap between the two groups (although not
+ * recommended)
+ */
+
+enum goya_queue_id {
+ GOYA_QUEUE_ID_DMA_0 = 0,
+ GOYA_QUEUE_ID_DMA_1,
+ GOYA_QUEUE_ID_DMA_2,
+ GOYA_QUEUE_ID_DMA_3,
+ GOYA_QUEUE_ID_DMA_4,
+ GOYA_QUEUE_ID_CPU_PQ,
+ GOYA_QUEUE_ID_MME,
+ GOYA_QUEUE_ID_TPC0,
+ GOYA_QUEUE_ID_TPC1,
+ GOYA_QUEUE_ID_TPC2,
+ GOYA_QUEUE_ID_TPC3,
+ GOYA_QUEUE_ID_TPC4,
+ GOYA_QUEUE_ID_TPC5,
+ GOYA_QUEUE_ID_TPC6,
+ GOYA_QUEUE_ID_TPC7,
+ GOYA_QUEUE_ID_SIZE
+};
+
+/* Opcode for management ioctl */
+#define HL_INFO_HW_IP_INFO 0
+#define HL_INFO_HW_EVENTS 1
+#define HL_INFO_DRAM_USAGE 2
+#define HL_INFO_HW_IDLE 3
+
+#define HL_INFO_VERSION_MAX_LEN 128
+
+struct hl_info_hw_ip_info {
+ __u64 sram_base_address;
+ __u64 dram_base_address;
+ __u64 dram_size;
+ __u32 sram_size;
+ __u32 num_of_events;
+ __u32 device_id; /* PCI Device ID */
+ __u32 reserved[3];
+ __u32 armcp_cpld_version;
+ __u32 psoc_pci_pll_nr;
+ __u32 psoc_pci_pll_nf;
+ __u32 psoc_pci_pll_od;
+ __u32 psoc_pci_pll_div_factor;
+ __u8 tpc_enabled_mask;
+ __u8 dram_enabled;
+ __u8 pad[2];
+ __u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
+};
+
+struct hl_info_dram_usage {
+ __u64 dram_free_mem;
+ __u64 ctx_dram_mem;
+};
+
+struct hl_info_hw_idle {
+ __u32 is_idle;
+ __u32 pad;
+};
+
+struct hl_info_args {
+ /* Location of relevant struct in userspace */
+ __u64 return_pointer;
+ /*
+ * The size of the return value. Just like "size" in "snprintf",
+ * it limits how many bytes the kernel can write
+ *
+ * For hw_events array, the size should be
+ * hl_info_hw_ip_info.num_of_events * sizeof(__u32)
+ */
+ __u32 return_size;
+
+ /* HL_INFO_* */
+ __u32 op;
+
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+/* Opcode to create a new command buffer */
+#define HL_CB_OP_CREATE 0
+/* Opcode to destroy previously created command buffer */
+#define HL_CB_OP_DESTROY 1
+
+struct hl_cb_in {
+ /* Handle of CB or 0 if we want to create one */
+ __u64 cb_handle;
+ /* HL_CB_OP_* */
+ __u32 op;
+ /* Size of CB. Maximum size is 2MB. The minimum size that will be
+ * allocated, regardless of this parameter's value, is PAGE_SIZE
+ */
+ __u32 cb_size;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct hl_cb_out {
+ /* Handle of CB */
+ __u64 cb_handle;
+};
+
+union hl_cb_args {
+ struct hl_cb_in in;
+ struct hl_cb_out out;
+};
+
+/*
+ * This structure size must always be fixed to 64-bytes for backward
+ * compatibility
+ */
+struct hl_cs_chunk {
+ /*
+ * For external queue, this represents a Handle of CB on the Host
+ * For internal queue, this represents an SRAM or DRAM address of the
+ * internal CB
+ */
+ __u64 cb_handle;
+ /* Index of queue to put the CB on */
+ __u32 queue_index;
+ /*
+ * Size of command buffer with valid packets
+ * Can be smaller then actual CB size
+ */
+ __u32 cb_size;
+ /* HL_CS_CHUNK_FLAGS_* */
+ __u32 cs_chunk_flags;
+ /* Align structure to 64 bytes */
+ __u32 pad[11];
+};
+
+#define HL_CS_FLAGS_FORCE_RESTORE 0x1
+
+#define HL_CS_STATUS_SUCCESS 0
+
+struct hl_cs_in {
+ /* this holds address of array of hl_cs_chunk for restore phase */
+ __u64 chunks_restore;
+ /* this holds address of array of hl_cs_chunk for execution phase */
+ __u64 chunks_execute;
+ /* this holds address of array of hl_cs_chunk for store phase -
+ * Currently not in use
+ */
+ __u64 chunks_store;
+ /* Number of chunks in restore phase array */
+ __u32 num_chunks_restore;
+ /* Number of chunks in execution array */
+ __u32 num_chunks_execute;
+ /* Number of chunks in restore phase array - Currently not in use */
+ __u32 num_chunks_store;
+ /* HL_CS_FLAGS_* */
+ __u32 cs_flags;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+};
+
+struct hl_cs_out {
+ /* this holds the sequence number of the CS to pass to wait ioctl */
+ __u64 seq;
+ /* HL_CS_STATUS_* */
+ __u32 status;
+ __u32 pad;
+};
+
+union hl_cs_args {
+ struct hl_cs_in in;
+ struct hl_cs_out out;
+};
+
+struct hl_wait_cs_in {
+ /* Command submission sequence number */
+ __u64 seq;
+ /* Absolute timeout to wait in microseconds */
+ __u64 timeout_us;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+#define HL_WAIT_CS_STATUS_COMPLETED 0
+#define HL_WAIT_CS_STATUS_BUSY 1
+#define HL_WAIT_CS_STATUS_TIMEDOUT 2
+#define HL_WAIT_CS_STATUS_ABORTED 3
+#define HL_WAIT_CS_STATUS_INTERRUPTED 4
+
+struct hl_wait_cs_out {
+ /* HL_WAIT_CS_STATUS_* */
+ __u32 status;
+ __u32 pad;
+};
+
+union hl_wait_cs_args {
+ struct hl_wait_cs_in in;
+ struct hl_wait_cs_out out;
+};
+
+/* Opcode to alloc device memory */
+#define HL_MEM_OP_ALLOC 0
+/* Opcode to free previously allocated device memory */
+#define HL_MEM_OP_FREE 1
+/* Opcode to map host memory */
+#define HL_MEM_OP_MAP 2
+/* Opcode to unmap previously mapped host memory */
+#define HL_MEM_OP_UNMAP 3
+
+/* Memory flags */
+#define HL_MEM_CONTIGUOUS 0x1
+#define HL_MEM_SHARED 0x2
+#define HL_MEM_USERPTR 0x4
+
+struct hl_mem_in {
+ union {
+ /* HL_MEM_OP_ALLOC- allocate device memory */
+ struct {
+ /* Size to alloc */
+ __u64 mem_size;
+ } alloc;
+
+ /* HL_MEM_OP_FREE - free device memory */
+ struct {
+ /* Handle returned from HL_MEM_OP_ALLOC */
+ __u64 handle;
+ } free;
+
+ /* HL_MEM_OP_MAP - map device memory */
+ struct {
+ /*
+ * Requested virtual address of mapped memory.
+ * KMD will try to map the requested region to this
+ * hint address, as long as the address is valid and
+ * not already mapped. The user should check the
+ * returned address of the IOCTL to make sure he got
+ * the hint address. Passing 0 here means that KMD
+ * will choose the address itself.
+ */
+ __u64 hint_addr;
+ /* Handle returned from HL_MEM_OP_ALLOC */
+ __u64 handle;
+ } map_device;
+
+ /* HL_MEM_OP_MAP - map host memory */
+ struct {
+ /* Address of allocated host memory */
+ __u64 host_virt_addr;
+ /*
+ * Requested virtual address of mapped memory.
+ * KMD will try to map the requested region to this
+ * hint address, as long as the address is valid and
+ * not already mapped. The user should check the
+ * returned address of the IOCTL to make sure he got
+ * the hint address. Passing 0 here means that KMD
+ * will choose the address itself.
+ */
+ __u64 hint_addr;
+ /* Size of allocated host memory */
+ __u64 mem_size;
+ } map_host;
+
+ /* HL_MEM_OP_UNMAP - unmap host memory */
+ struct {
+ /* Virtual address returned from HL_MEM_OP_MAP */
+ __u64 device_virt_addr;
+ } unmap;
+ };
+
+ /* HL_MEM_OP_* */
+ __u32 op;
+ /* HL_MEM_* flags */
+ __u32 flags;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct hl_mem_out {
+ union {
+ /*
+ * Used for HL_MEM_OP_MAP as the virtual address that was
+ * assigned in the device VA space.
+ * A value of 0 means the requested operation failed.
+ */
+ __u64 device_virt_addr;
+
+ /*
+ * Used for HL_MEM_OP_ALLOC. This is the assigned
+ * handle for the allocated memory
+ */
+ __u64 handle;
+ };
+};
+
+union hl_mem_args {
+ struct hl_mem_in in;
+ struct hl_mem_out out;
+};
+
+/*
+ * Various information operations such as:
+ * - H/W IP information
+ * - Current dram usage
+ *
+ * The user calls this IOCTL with an opcode that describes the required
+ * information. The user should supply a pointer to a user-allocated memory
+ * chunk, which will be filled by the driver with the requested information.
+ *
+ * The user supplies the maximum amount of size to copy into the user's memory,
+ * in order to prevent data corruption in case of differences between the
+ * definitions of structures in kernel and userspace, e.g. in case of old
+ * userspace and new kernel driver
+ */
+#define HL_IOCTL_INFO \
+ _IOWR('H', 0x01, struct hl_info_args)
+
+/*
+ * Command Buffer
+ * - Request a Command Buffer
+ * - Destroy a Command Buffer
+ *
+ * The command buffers are memory blocks that reside in DMA-able address
+ * space and are physically contiguous so they can be accessed by the device
+ * directly. They are allocated using the coherent DMA API.
+ *
+ * When creating a new CB, the IOCTL returns a handle of it, and the user-space
+ * process needs to use that handle to mmap the buffer so it can access them.
+ *
+ */
+#define HL_IOCTL_CB \
+ _IOWR('H', 0x02, union hl_cb_args)
+
+/*
+ * Command Submission
+ *
+ * To submit work to the device, the user need to call this IOCTL with a set
+ * of JOBS. That set of JOBS constitutes a CS object.
+ * Each JOB will be enqueued on a specific queue, according to the user's input.
+ * There can be more then one JOB per queue.
+ *
+ * There are two types of queues - external and internal. External queues
+ * are DMA queues which transfer data from/to the Host. All other queues are
+ * internal. The driver will get completion notifications from the device only
+ * on JOBS which are enqueued in the external queues.
+ *
+ * For jobs on external queues, the user needs to create command buffers
+ * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
+ * internal queues, the user needs to prepare a "command buffer" with packets
+ * on either the SRAM or DRAM, and give the device address of that buffer to
+ * the CS ioctl.
+ *
+ * This IOCTL is asynchronous in regard to the actual execution of the CS. This
+ * means it returns immediately after ALL the JOBS were enqueued on their
+ * relevant queues. Therefore, the user mustn't assume the CS has been completed
+ * or has even started to execute.
+ *
+ * Upon successful enqueue, the IOCTL returns an opaque handle which the user
+ * can use with the "Wait for CS" IOCTL to check whether the handle's CS
+ * external JOBS have been completed. Note that if the CS has internal JOBS
+ * which can execute AFTER the external JOBS have finished, the driver might
+ * report that the CS has finished executing BEFORE the internal JOBS have
+ * actually finish executing.
+ *
+ * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
+ * a second set is for "execution" phase and a third set is for "store" phase.
+ * The JOBS on the "restore" phase are enqueued only after context-switch
+ * (or if its the first CS for this context). The user can also order the
+ * driver to run the "restore" phase explicitly
+ *
+ */
+#define HL_IOCTL_CS \
+ _IOWR('H', 0x03, union hl_cs_args)
+
+/*
+ * Wait for Command Submission
+ *
+ * The user can call this IOCTL with a handle it received from the CS IOCTL
+ * to wait until the handle's CS has finished executing. The user will wait
+ * inside the kernel until the CS has finished or until the user-requeusted
+ * timeout has expired.
+ *
+ * The return value of the IOCTL is a standard Linux error code. The possible
+ * values are:
+ *
+ * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal
+ * that the user process received
+ * ETIMEDOUT - The CS has caused a timeout on the device
+ * EIO - The CS was aborted (usually because the device was reset)
+ * ENODEV - The device wants to do hard-reset (so user need to close FD)
+ *
+ * The driver also returns a custom define inside the IOCTL which can be:
+ *
+ * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
+ * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
+ * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device
+ * (ETIMEDOUT)
+ * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
+ * device was reset (EIO)
+ * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
+ *
+ */
+
+#define HL_IOCTL_WAIT_CS \
+ _IOWR('H', 0x04, union hl_wait_cs_args)
+
+/*
+ * Memory
+ * - Map host memory to device MMU
+ * - Unmap host memory from device MMU
+ *
+ * This IOCTL allows the user to map host memory to the device MMU
+ *
+ * For host memory, the IOCTL doesn't allocate memory. The user is supposed
+ * to allocate the memory in user-space (malloc/new). The driver pins the
+ * physical pages (up to the allowed limit by the OS), assigns a virtual
+ * address in the device VA space and initializes the device MMU.
+ *
+ * There is an option for the user to specify the requested virtual address.
+ *
+ */
+#define HL_IOCTL_MEMORY \
+ _IOWR('H', 0x05, union hl_mem_args)
+
+#define HL_COMMAND_START 0x01
+#define HL_COMMAND_END 0x06
+
+#endif /* HABANALABS_H_ */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index ef3c7ec793a7..eb76b38a00d4 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -52,6 +52,11 @@ struct hns_roce_ib_create_srq {
__aligned_u64 que_addr;
};
+struct hns_roce_ib_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
struct hns_roce_ib_create_qp {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index d13fd490b66d..6e73f0274e41 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
PVRDMA_WR_BIND_MW,
PVRDMA_WR_REG_SIG_MR,
+ PVRDMA_WR_ERROR,
};
enum pvrdma_wc_status {
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 59a260712a56..2ca9164a79bf 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -1,17 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <asm/dma-mapping.h>
-#include <linux/dma-mapping.h>
-
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
- if (dev && dev->archdata.dev_dma_ops)
- return dev->archdata.dev_dma_ops;
- return get_arch_dma_ops(NULL);
-}
+#ifndef _XEN_ARM_PAGE_COHERENT_H
+#define _XEN_ARM_PAGE_COHERENT_H
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
@@ -21,87 +10,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
unsigned long attrs);
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
void __xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir);
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
- return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
- xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
- dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long page_pfn = page_to_xen_pfn(page);
- unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
- unsigned long compound_pages =
- (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
- bool local = (page_pfn <= dev_pfn) &&
- (dev_pfn - page_pfn < compound_pages);
-
- /*
- * Dom0 is mapped 1:1, while the Linux page can span across
- * multiple Xen pages, it's not possible for it to contain a
- * mix of local and foreign Xen pages. So if the first xen_pfn
- * == mfn the page is local otherwise it's a foreign page
- * grant-mapped in dom0. If the page is local we can safely
- * call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (local)
- xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
- else
- __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long pfn = PFN_DOWN(handle);
- /*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
- * foreign mfn will always return false. If the page is local we can
- * safely call the native dma_ops function, otherwise we call the xen
- * specific function.
- */
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->unmap_page)
- xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
- } else
- __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
- xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(handle);
- if (pfn_valid(pfn)) {
- if (xen_get_dma_ops(hwdev)->sync_single_for_device)
- xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
- } else
- __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+#endif /* _XEN_ARM_PAGE_COHERENT_H */
diff --git a/init/Kconfig b/init/Kconfig
index d47cb77a220e..c9386a365eea 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -512,6 +512,17 @@ config PSI_DEFAULT_DISABLED
per default but can be enabled through passing psi=1 on the
kernel commandline during boot.
+ This feature adds some code to the task wakeup and sleep
+ paths of the scheduler. The overhead is too low to affect
+ common scheduling-intense workloads in practice (such as
+ webservers, memcache), but it does show up in artificial
+ scheduler stress tests, such as hackbench.
+
+ If you are paranoid and not sure what the kernel will be
+ used for, say Y.
+
+ Say N if unsure.
+
endmenu # "CPU/Task time and stats accounting"
config CPU_ISOLATION
@@ -825,7 +836,7 @@ config CGROUP_PIDS
PIDs controller is designed to stop this from happening.
It should be noted that organisational operations (such as attaching
- to a cgroup hierarchy will *not* be blocked by the PIDs controller),
+ to a cgroup hierarchy) will *not* be blocked by the PIDs controller,
since the PIDs limit only affects a process's ability to fork, not to
attach to a cgroup.
@@ -1124,6 +1135,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
bool "Dead code and data elimination (EXPERIMENTAL)"
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
depends on EXPERT
+ depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
depends on $(cc-option,-ffunction-sections -fdata-sections)
depends on $(ld-option,--gc-sections)
help
diff --git a/init/init_task.c b/init/init_task.c
index 39c3109acc1a..c70ef656d0f4 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/audit.h>
+#include <linux/numa.h>
#include <asm/pgtable.h>
#include <linux/uaccess.h>
@@ -44,7 +45,7 @@ static struct signal_struct init_signals = {
};
static struct sighand_struct init_sighand = {
- .count = ATOMIC_INIT(1),
+ .count = REFCOUNT_INIT(1),
.action = { { { .sa_handler = SIG_DFL, } }, },
.siglock = __SPIN_LOCK_UNLOCKED(init_sighand.siglock),
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
@@ -61,11 +62,11 @@ struct task_struct init_task
= {
#ifdef CONFIG_THREAD_INFO_IN_TASK
.thread_info = INIT_THREAD_INFO(init_task),
- .stack_refcount = ATOMIC_INIT(1),
+ .stack_refcount = REFCOUNT_INIT(1),
#endif
.state = 0,
.stack = init_stack,
- .usage = ATOMIC_INIT(2),
+ .usage = REFCOUNT_INIT(2),
.flags = PF_KTHREAD,
.prio = MAX_PRIO - 20,
.static_prio = MAX_PRIO - 20,
@@ -154,7 +155,7 @@ struct task_struct init_task
.vtime.state = VTIME_SYS,
#endif
#ifdef CONFIG_NUMA_BALANCING
- .numa_preferred_nid = -1,
+ .numa_preferred_nid = NUMA_NO_NODE,
.numa_group = NULL,
.numa_faults = NULL,
#endif
diff --git a/init/initramfs.c b/init/initramfs.c
index 7cea802d00ef..fca899622937 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -550,6 +550,7 @@ skip:
initrd_end = 0;
}
+#ifdef CONFIG_BLK_DEV_RAM
#define BUF_SIZE 1024
static void __init clean_rootfs(void)
{
@@ -596,6 +597,7 @@ static void __init clean_rootfs(void)
ksys_close(fd);
kfree(buf);
}
+#endif
static int __init populate_rootfs(void)
{
@@ -638,10 +640,8 @@ static int __init populate_rootfs(void)
printk(KERN_INFO "Unpacking initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start,
initrd_end - initrd_start);
- if (err) {
+ if (err)
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
- clean_rootfs();
- }
free_initrd();
#endif
}
diff --git a/init/main.c b/init/main.c
index e2e80ca3165a..c86a1c8f19f4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void)
initrd_start = 0;
}
#endif
- page_ext_init();
kmemleak_init();
setup_per_cpu_pageset();
numa_policy_init();
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void)
sched_init_smp();
page_alloc_init_late();
+ /* Initialize page ext after all struct pages are initialized. */
+ page_ext_init();
do_basic_setup();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c595bed7bfcb..c839bf83231d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1471,10 +1471,10 @@ static int compat_prepare_timeout(const struct old_timespec32 __user *p,
return 0;
}
-COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
- const char __user *, u_msg_ptr,
- compat_size_t, msg_len, unsigned int, msg_prio,
- const struct old_timespec32 __user *, u_abs_timeout)
+SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
+ const char __user *, u_msg_ptr,
+ unsigned int, msg_len, unsigned int, msg_prio,
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
@@ -1486,10 +1486,10 @@ COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
}
-COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
- char __user *, u_msg_ptr,
- compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
- const struct old_timespec32 __user *, u_abs_timeout)
+SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
+ char __user *, u_msg_ptr,
+ unsigned int, msg_len, unsigned int __user *, u_msg_prio,
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
diff --git a/ipc/msg.c b/ipc/msg.c
index 0833c6405915..8dec945fa030 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -567,9 +567,8 @@ out_unlock:
return err;
}
-long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
+static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int version)
{
- int version;
struct ipc_namespace *ns;
struct msqid64_ds msqid64;
int err;
@@ -577,7 +576,6 @@ long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
if (msqid < 0 || cmd < 0)
return -EINVAL;
- version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
@@ -613,9 +611,23 @@ long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
{
- return ksys_msgctl(msqid, cmd, buf);
+ return ksys_msgctl(msqid, cmd, buf, IPC_64);
}
+#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
+long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
+{
+ int version = ipc_parse_version(&cmd);
+
+ return ksys_msgctl(msqid, cmd, buf, version);
+}
+
+SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
+{
+ return ksys_old_msgctl(msqid, cmd, buf);
+}
+#endif
+
#ifdef CONFIG_COMPAT
struct compat_msqid_ds {
@@ -689,12 +701,11 @@ static int copy_compat_msqid_to_user(void __user *buf, struct msqid64_ds *in,
}
}
-long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr)
+static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int version)
{
struct ipc_namespace *ns;
int err;
struct msqid64_ds msqid64;
- int version = compat_ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
@@ -734,8 +745,22 @@ long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr)
COMPAT_SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, void __user *, uptr)
{
- return compat_ksys_msgctl(msqid, cmd, uptr);
+ return compat_ksys_msgctl(msqid, cmd, uptr, IPC_64);
}
+
+#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr)
+{
+ int version = compat_ipc_parse_version(&cmd);
+
+ return compat_ksys_msgctl(msqid, cmd, uptr, version);
+}
+
+COMPAT_SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, void __user *, uptr)
+{
+ return compat_ksys_old_msgctl(msqid, cmd, uptr);
+}
+#endif
#endif
static int testmsg(struct msg_msg *msg, long type, int mode)
diff --git a/ipc/sem.c b/ipc/sem.c
index 745dc6187e84..80909464acff 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1634,9 +1634,8 @@ out_up:
return err;
}
-long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
+static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
{
- int version;
struct ipc_namespace *ns;
void __user *p = (void __user *)arg;
struct semid64_ds semid64;
@@ -1645,7 +1644,6 @@ long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
if (semid < 0)
return -EINVAL;
- version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
@@ -1691,9 +1689,23 @@ long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg)
SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
{
- return ksys_semctl(semid, semnum, cmd, arg);
+ return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
}
+#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
+long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
+{
+ int version = ipc_parse_version(&cmd);
+
+ return ksys_semctl(semid, semnum, cmd, arg, version);
+}
+
+SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
+{
+ return ksys_old_semctl(semid, semnum, cmd, arg);
+}
+#endif
+
#ifdef CONFIG_COMPAT
struct compat_semid_ds {
@@ -1744,12 +1756,11 @@ static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
}
}
-long compat_ksys_semctl(int semid, int semnum, int cmd, int arg)
+static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
{
void __user *p = compat_ptr(arg);
struct ipc_namespace *ns;
struct semid64_ds semid64;
- int version = compat_ipc_parse_version(&cmd);
int err;
ns = current->nsproxy->ipc_ns;
@@ -1792,8 +1803,22 @@ long compat_ksys_semctl(int semid, int semnum, int cmd, int arg)
COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
{
- return compat_ksys_semctl(semid, semnum, cmd, arg);
+ return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
}
+
+#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
+{
+ int version = compat_ipc_parse_version(&cmd);
+
+ return compat_ksys_semctl(semid, semnum, cmd, arg, version);
+}
+
+COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
+{
+ return compat_ksys_old_semctl(semid, semnum, cmd, arg);
+}
+#endif
#endif
/* If the task doesn't already have a undo_list, then allocate one
@@ -2225,7 +2250,7 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
return do_semtimedop(semid, tsems, nsops, NULL);
}
-COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
+SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
unsigned int, nsops,
const struct old_timespec32 __user *, timeout)
{
diff --git a/ipc/shm.c b/ipc/shm.c
index 0842411cb0e9..ce1ca9f7c6e9 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1137,16 +1137,15 @@ out_unlock1:
return err;
}
-long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
+static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version)
{
- int err, version;
+ int err;
struct ipc_namespace *ns;
struct shmid64_ds sem64;
if (cmd < 0 || shmid < 0)
return -EINVAL;
- version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
@@ -1194,8 +1193,22 @@ long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
{
- return ksys_shmctl(shmid, cmd, buf);
+ return ksys_shmctl(shmid, cmd, buf, IPC_64);
+}
+
+#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
+long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
+{
+ int version = ipc_parse_version(&cmd);
+
+ return ksys_shmctl(shmid, cmd, buf, version);
+}
+
+SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+{
+ return ksys_old_shmctl(shmid, cmd, buf);
}
+#endif
#ifdef CONFIG_COMPAT
@@ -1319,11 +1332,10 @@ static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
}
}
-long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr)
+long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
{
struct ipc_namespace *ns;
struct shmid64_ds sem64;
- int version = compat_ipc_parse_version(&cmd);
int err;
ns = current->nsproxy->ipc_ns;
@@ -1378,8 +1390,22 @@ long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr)
COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
{
- return compat_ksys_shmctl(shmid, cmd, uptr);
+ return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64);
}
+
+#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr)
+{
+ int version = compat_ipc_parse_version(&cmd);
+
+ return compat_ksys_shmctl(shmid, cmd, uptr, version);
+}
+
+COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr)
+{
+ return compat_ksys_old_shmctl(shmid, cmd, uptr);
+}
+#endif
#endif
/*
diff --git a/ipc/syscall.c b/ipc/syscall.c
index 1ac06e3983c0..581bdff4e7c5 100644
--- a/ipc/syscall.c
+++ b/ipc/syscall.c
@@ -17,8 +17,8 @@
#include <linux/shm.h>
#include <linux/uaccess.h>
-SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
- unsigned long, third, void __user *, ptr, long, fifth)
+int ksys_ipc(unsigned int call, int first, unsigned long second,
+ unsigned long third, void __user * ptr, long fifth)
{
int version, ret;
@@ -47,7 +47,7 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
return -EINVAL;
if (get_user(arg, (unsigned long __user *) ptr))
return -EFAULT;
- return ksys_semctl(first, second, third, arg);
+ return ksys_old_semctl(first, second, third, arg);
}
case MSGSND:
@@ -75,7 +75,7 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
case MSGGET:
return ksys_msgget((key_t) first, second);
case MSGCTL:
- return ksys_msgctl(first, second,
+ return ksys_old_msgctl(first, second,
(struct msqid_ds __user *)ptr);
case SHMAT:
@@ -100,12 +100,18 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
case SHMGET:
return ksys_shmget(first, second, third);
case SHMCTL:
- return ksys_shmctl(first, second,
+ return ksys_old_shmctl(first, second,
(struct shmid_ds __user *) ptr);
default:
return -ENOSYS;
}
}
+
+SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
+ unsigned long, third, void __user *, ptr, long, fifth)
+{
+ return ksys_ipc(call, first, second, third, ptr, fifth);
+}
#endif
#ifdef CONFIG_COMPAT
@@ -121,8 +127,8 @@ struct compat_ipc_kludge {
};
#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
-COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
- u32, third, compat_uptr_t, ptr, u32, fifth)
+int compat_ksys_ipc(u32 call, int first, int second,
+ u32 third, compat_uptr_t ptr, u32 fifth)
{
int version;
u32 pad;
@@ -146,7 +152,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
return -EINVAL;
if (get_user(pad, (u32 __user *) compat_ptr(ptr)))
return -EFAULT;
- return compat_ksys_semctl(first, second, third, pad);
+ return compat_ksys_old_semctl(first, second, third, pad);
case MSGSND:
return compat_ksys_msgsnd(first, ptr, second, third);
@@ -171,7 +177,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
case MSGGET:
return ksys_msgget(first, second);
case MSGCTL:
- return compat_ksys_msgctl(first, second, compat_ptr(ptr));
+ return compat_ksys_old_msgctl(first, second, compat_ptr(ptr));
case SHMAT: {
int err;
@@ -190,10 +196,16 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
case SHMGET:
return ksys_shmget(first, (unsigned int)second, third);
case SHMCTL:
- return compat_ksys_shmctl(first, second, compat_ptr(ptr));
+ return compat_ksys_old_shmctl(first, second, compat_ptr(ptr));
}
return -ENOSYS;
}
+
+COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
+ u32, third, compat_uptr_t, ptr, u32, fifth)
+{
+ return compat_ksys_ipc(call, first, second, third, ptr, fifth);
+}
#endif
#endif
diff --git a/ipc/util.h b/ipc/util.h
index d768fdbed515..e272be622ae7 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -160,10 +160,7 @@ static inline void ipc_update_pid(struct pid **pos, struct pid *pid)
}
}
-#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
-/* On IA-64, we always use the "64-bit version" of the IPC structures. */
-# define ipc_parse_version(cmd) IPC_64
-#else
+#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
int ipc_parse_version(int *cmd);
#endif
@@ -246,13 +243,9 @@ int get_compat_ipc64_perm(struct ipc64_perm *,
static inline int compat_ipc_parse_version(int *cmd)
{
-#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
int version = *cmd & IPC_64;
*cmd &= ~IPC_64;
return version;
-#else
- return IPC_64;
-#endif
}
#endif
@@ -261,29 +254,29 @@ long ksys_semtimedop(int semid, struct sembuf __user *tsops,
unsigned int nsops,
const struct __kernel_timespec __user *timeout);
long ksys_semget(key_t key, int nsems, int semflg);
-long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg);
+long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
long ksys_msgget(key_t key, int msgflg);
-long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
long msgtyp, int msgflg);
long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
int msgflg);
long ksys_shmget(key_t key, size_t size, int shmflg);
long ksys_shmdt(char __user *shmaddr);
-long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
+long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
const struct old_timespec32 __user *timeout);
#ifdef CONFIG_COMPAT
-long compat_ksys_semctl(int semid, int semnum, int cmd, int arg);
-long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr);
+long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg);
+long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr);
long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
compat_long_t msgtyp, int msgflg);
long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
-long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr);
+long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr);
#endif /* CONFIG_COMPAT */
#endif
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 84d882f3e299..fbba478ae522 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -242,6 +242,9 @@ config QUEUED_SPINLOCKS
def_bool y if ARCH_USE_QUEUED_SPINLOCKS
depends on SMP
+config BPF_ARCH_SPINLOCK
+ bool
+
config ARCH_USE_QUEUED_RWLOCKS
bool
diff --git a/kernel/async.c b/kernel/async.c
index a893d6170944..f6bd0d9885e1 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -149,7 +149,25 @@ static void async_run_entry_fn(struct work_struct *work)
wake_up(&async_done);
}
-static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain)
+/**
+ * async_schedule_node_domain - NUMA specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ * @node: NUMA node that we want to schedule this on or close to
+ * @domain: the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.
+ *
+ * Note: This function may be called from atomic or non-atomic contexts.
+ *
+ * The node requested will be honored on a best effort basis. If the node
+ * has no CPUs associated with it then the work is distributed among all
+ * available CPUs.
+ */
+async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+ int node, struct async_domain *domain)
{
struct async_entry *entry;
unsigned long flags;
@@ -195,43 +213,30 @@ static async_cookie_t __async_schedule(async_func_t func, void *data, struct asy
current->flags |= PF_USED_ASYNC;
/* schedule for execution */
- queue_work(system_unbound_wq, &entry->work);
+ queue_work_node(node, system_unbound_wq, &entry->work);
return newcookie;
}
+EXPORT_SYMBOL_GPL(async_schedule_node_domain);
/**
- * async_schedule - schedule a function for asynchronous execution
+ * async_schedule_node - NUMA specific version of async_schedule
* @func: function to execute asynchronously
* @data: data pointer to pass to the function
+ * @node: NUMA node that we want to schedule this on or close to
*
* Returns an async_cookie_t that may be used for checkpointing later.
* Note: This function may be called from atomic or non-atomic contexts.
- */
-async_cookie_t async_schedule(async_func_t func, void *data)
-{
- return __async_schedule(func, data, &async_dfl_domain);
-}
-EXPORT_SYMBOL_GPL(async_schedule);
-
-/**
- * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
- * @func: function to execute asynchronously
- * @data: data pointer to pass to the function
- * @domain: the domain
*
- * Returns an async_cookie_t that may be used for checkpointing later.
- * @domain may be used in the async_synchronize_*_domain() functions to
- * wait within a certain synchronization domain rather than globally. A
- * synchronization domain is specified via @domain. Note: This function
- * may be called from atomic or non-atomic contexts.
+ * The node requested will be honored on a best effort basis. If the node
+ * has no CPUs associated with it then the work is distributed among all
+ * available CPUs.
*/
-async_cookie_t async_schedule_domain(async_func_t func, void *data,
- struct async_domain *domain)
+async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
{
- return __async_schedule(func, data, domain);
+ return async_schedule_node_domain(func, data, node, &async_dfl_domain);
}
-EXPORT_SYMBOL_GPL(async_schedule_domain);
+EXPORT_SYMBOL_GPL(async_schedule_node);
/**
* async_synchronize_full - synchronize all asynchronous function calls
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 25632a75d630..c72e0d8e1e65 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -253,8 +253,9 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
+ char *val;
- if (unlikely(map_flags > BPF_EXIST))
+ if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
/* unknown flags */
return -EINVAL;
@@ -262,17 +263,25 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
/* all elements were pre-allocated, cannot insert a new one */
return -E2BIG;
- if (unlikely(map_flags == BPF_NOEXIST))
+ if (unlikely(map_flags & BPF_NOEXIST))
/* all elements already exist */
return -EEXIST;
- if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+ if (unlikely((map_flags & BPF_F_LOCK) &&
+ !map_value_has_spin_lock(map)))
+ return -EINVAL;
+
+ if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
value, map->value_size);
- else
- memcpy(array->value +
- array->elem_size * (index & array->index_mask),
- value, map->value_size);
+ } else {
+ val = array->value +
+ array->elem_size * (index & array->index_mask);
+ if (map_flags & BPF_F_LOCK)
+ copy_map_value_locked(map, val, value, false);
+ else
+ copy_map_value(map, val, value);
+ }
return 0;
}
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 715f9fcf4712..bd3921b1514b 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -157,7 +157,7 @@
*
*/
-#define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
+#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
@@ -355,6 +355,11 @@ static bool btf_type_is_struct(const struct btf_type *t)
return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
}
+static bool __btf_type_is_struct(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
+}
+
static bool btf_type_is_array(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
@@ -467,7 +472,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
return kind_ops[BTF_INFO_KIND(t->info)];
}
-bool btf_name_offset_valid(const struct btf *btf, u32 offset)
+static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{
return BTF_STR_OFFSET_VALID(offset) &&
offset < btf->hdr.str_len;
@@ -525,7 +530,7 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
/*
* Regular int is not a bit field and it must be either
- * u8/u16/u32/u64.
+ * u8/u16/u32/u64 or __int128.
*/
static bool btf_type_int_is_regular(const struct btf_type *t)
{
@@ -538,7 +543,8 @@ static bool btf_type_int_is_regular(const struct btf_type *t)
if (BITS_PER_BYTE_MASKED(nr_bits) ||
BTF_INT_OFFSET(int_data) ||
(nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
- nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
+ nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
+ nr_bytes != (2 * sizeof(u64)))) {
return false;
}
@@ -1063,9 +1069,9 @@ static int btf_int_check_member(struct btf_verifier_env *env,
nr_copy_bits = BTF_INT_BITS(int_data) +
BITS_PER_BYTE_MASKED(struct_bits_off);
- if (nr_copy_bits > BITS_PER_U64) {
+ if (nr_copy_bits > BITS_PER_U128) {
btf_verifier_log_member(env, struct_type, member,
- "nr_copy_bits exceeds 64");
+ "nr_copy_bits exceeds 128");
return -EINVAL;
}
@@ -1119,9 +1125,9 @@ static int btf_int_check_kflag_member(struct btf_verifier_env *env,
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
- if (nr_copy_bits > BITS_PER_U64) {
+ if (nr_copy_bits > BITS_PER_U128) {
btf_verifier_log_member(env, struct_type, member,
- "nr_copy_bits exceeds 64");
+ "nr_copy_bits exceeds 128");
return -EINVAL;
}
@@ -1168,9 +1174,9 @@ static s32 btf_int_check_meta(struct btf_verifier_env *env,
nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
- if (nr_bits > BITS_PER_U64) {
+ if (nr_bits > BITS_PER_U128) {
btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
- BITS_PER_U64);
+ BITS_PER_U128);
return -EINVAL;
}
@@ -1211,33 +1217,93 @@ static void btf_int_log(struct btf_verifier_env *env,
btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
}
+static void btf_int128_print(struct seq_file *m, void *data)
+{
+ /* data points to a __int128 number.
+ * Suppose
+ * int128_num = *(__int128 *)data;
+ * The below formulas shows what upper_num and lower_num represents:
+ * upper_num = int128_num >> 64;
+ * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
+ */
+ u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = *(u64 *)data;
+ lower_num = *(u64 *)(data + 8);
+#else
+ upper_num = *(u64 *)(data + 8);
+ lower_num = *(u64 *)data;
+#endif
+ if (upper_num == 0)
+ seq_printf(m, "0x%llx", lower_num);
+ else
+ seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
+}
+
+static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
+ u16 right_shift_bits)
+{
+ u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = print_num[0];
+ lower_num = print_num[1];
+#else
+ upper_num = print_num[1];
+ lower_num = print_num[0];
+#endif
+
+ /* shake out un-needed bits by shift/or operations */
+ if (left_shift_bits >= 64) {
+ upper_num = lower_num << (left_shift_bits - 64);
+ lower_num = 0;
+ } else {
+ upper_num = (upper_num << left_shift_bits) |
+ (lower_num >> (64 - left_shift_bits));
+ lower_num = lower_num << left_shift_bits;
+ }
+
+ if (right_shift_bits >= 64) {
+ lower_num = upper_num >> (right_shift_bits - 64);
+ upper_num = 0;
+ } else {
+ lower_num = (lower_num >> right_shift_bits) |
+ (upper_num << (64 - right_shift_bits));
+ upper_num = upper_num >> right_shift_bits;
+ }
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ print_num[0] = upper_num;
+ print_num[1] = lower_num;
+#else
+ print_num[0] = lower_num;
+ print_num[1] = upper_num;
+#endif
+}
+
static void btf_bitfield_seq_show(void *data, u8 bits_offset,
u8 nr_bits, struct seq_file *m)
{
u16 left_shift_bits, right_shift_bits;
u8 nr_copy_bytes;
u8 nr_copy_bits;
- u64 print_num;
+ u64 print_num[2] = {};
- data += BITS_ROUNDDOWN_BYTES(bits_offset);
- bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
- print_num = 0;
- memcpy(&print_num, data, nr_copy_bytes);
+ memcpy(print_num, data, nr_copy_bytes);
#ifdef __BIG_ENDIAN_BITFIELD
left_shift_bits = bits_offset;
#else
- left_shift_bits = BITS_PER_U64 - nr_copy_bits;
+ left_shift_bits = BITS_PER_U128 - nr_copy_bits;
#endif
- right_shift_bits = BITS_PER_U64 - nr_bits;
-
- print_num <<= left_shift_bits;
- print_num >>= right_shift_bits;
+ right_shift_bits = BITS_PER_U128 - nr_bits;
- seq_printf(m, "0x%llx", print_num);
+ btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
+ btf_int128_print(m, print_num);
}
@@ -1252,10 +1318,12 @@ static void btf_int_bits_seq_show(const struct btf *btf,
/*
* bits_offset is at most 7.
- * BTF_INT_OFFSET() cannot exceed 64 bits.
+ * BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
- btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m);
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
}
static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -1274,6 +1342,9 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
}
switch (nr_bits) {
+ case 128:
+ btf_int128_print(m, data);
+ break;
case 64:
if (sign)
seq_printf(m, "%lld", *(s64 *)data);
@@ -1459,7 +1530,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
/* "typedef void new_void", "const void"...etc */
if (!btf_type_is_void(next_type) &&
- !btf_type_is_fwd(next_type)) {
+ !btf_type_is_fwd(next_type) &&
+ !btf_type_is_func_proto(next_type)) {
btf_verifier_log_type(env, v->t, "Invalid type_id");
return -EINVAL;
}
@@ -1979,6 +2051,43 @@ static void btf_struct_log(struct btf_verifier_env *env,
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
}
+/* find 'struct bpf_spin_lock' in map value.
+ * return >= 0 offset if found
+ * and < 0 in case of error
+ */
+int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
+{
+ const struct btf_member *member;
+ u32 i, off = -ENOENT;
+
+ if (!__btf_type_is_struct(t))
+ return -EINVAL;
+
+ for_each_member(i, t, member) {
+ const struct btf_type *member_type = btf_type_by_id(btf,
+ member->type);
+ if (!__btf_type_is_struct(member_type))
+ continue;
+ if (member_type->size != sizeof(struct bpf_spin_lock))
+ continue;
+ if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
+ "bpf_spin_lock"))
+ continue;
+ if (off != -ENOENT)
+ /* only one 'struct bpf_spin_lock' is allowed */
+ return -E2BIG;
+ off = btf_member_bit_offset(t, member);
+ if (off % 8)
+ /* valid C code cannot generate such BTF */
+ return -EINVAL;
+ off /= 8;
+ if (off % __alignof__(struct bpf_spin_lock))
+ /* valid struct bpf_spin_lock will be 4 byte aligned */
+ return -EINVAL;
+ }
+ return off;
+}
+
static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
u32 type_id, void *data, u8 bits_offset,
struct seq_file *m)
@@ -2001,12 +2110,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
member_offset = btf_member_bit_offset(t, member);
bitfield_size = btf_member_bitfield_size(t, member);
+ bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
+ bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
if (bitfield_size) {
- btf_bitfield_seq_show(data, member_offset,
+ btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
bitfield_size, m);
} else {
- bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
- bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
ops = btf_type_ops(member_type);
ops->seq_show(btf, member_type, member->type,
data + bytes_offset, bits8_offset, m);
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 9425c2fb872f..4e807973aa80 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -230,6 +230,7 @@ cleanup:
* @cgrp: The cgroup which descendants to traverse
* @prog: A program to attach
* @type: Type of attach operation
+ * @flags: Option flags
*
* Must be called with cgroup_mutex held.
*/
@@ -363,7 +364,7 @@ cleanup:
* Must be called with cgroup_mutex held.
*/
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 unused_flags)
+ enum bpf_attach_type type)
{
struct list_head *progs = &cgrp->bpf.progs[type];
enum bpf_cgroup_storage_type stype;
@@ -572,7 +573,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
bpf_compute_and_save_data_end(skb, &saved_data_end);
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
- bpf_prog_run_save_cb);
+ __bpf_prog_run_save_cb);
bpf_restore_data_end(skb, saved_data_end);
__skb_pull(skb, offset);
skb->sk = save_sk;
@@ -718,6 +719,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
+ /* fall through */
default:
return NULL;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f908b9356025..ff09d32a8a1b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -78,7 +78,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
return NULL;
}
-struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
+struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
{
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
struct bpf_prog_aux *aux;
@@ -104,6 +104,32 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
return fp;
}
+
+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
+{
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
+ struct bpf_prog *prog;
+ int cpu;
+
+ prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
+ if (!prog)
+ return NULL;
+
+ prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
+ if (!prog->aux->stats) {
+ kfree(prog->aux);
+ vfree(prog);
+ return NULL;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct bpf_prog_stats *pstats;
+
+ pstats = per_cpu_ptr(prog->aux->stats, cpu);
+ u64_stats_init(&pstats->syncp);
+ }
+ return prog;
+}
EXPORT_SYMBOL_GPL(bpf_prog_alloc);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
@@ -231,7 +257,10 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
void __bpf_prog_free(struct bpf_prog *fp)
{
- kfree(fp->aux);
+ if (fp->aux) {
+ free_percpu(fp->aux->stats);
+ kfree(fp->aux);
+ }
vfree(fp);
}
@@ -307,15 +336,16 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0;
}
-static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
- u32 curr, const bool probe_pass)
+static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
+ s32 end_new, u32 curr, const bool probe_pass)
{
const s64 imm_min = S32_MIN, imm_max = S32_MAX;
+ s32 delta = end_new - end_old;
s64 imm = insn->imm;
- if (curr < pos && curr + imm + 1 > pos)
+ if (curr < pos && curr + imm + 1 >= end_old)
imm += delta;
- else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
+ else if (curr >= end_new && curr + imm + 1 < end_new)
imm -= delta;
if (imm < imm_min || imm > imm_max)
return -ERANGE;
@@ -324,15 +354,16 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
return 0;
}
-static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
- u32 curr, const bool probe_pass)
+static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
+ s32 end_new, u32 curr, const bool probe_pass)
{
const s32 off_min = S16_MIN, off_max = S16_MAX;
+ s32 delta = end_new - end_old;
s32 off = insn->off;
- if (curr < pos && curr + off + 1 > pos)
+ if (curr < pos && curr + off + 1 >= end_old)
off += delta;
- else if (curr > pos + delta && curr + off + 1 <= pos + delta)
+ else if (curr >= end_new && curr + off + 1 < end_new)
off -= delta;
if (off < off_min || off > off_max)
return -ERANGE;
@@ -341,10 +372,10 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
return 0;
}
-static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
- const bool probe_pass)
+static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
+ s32 end_new, const bool probe_pass)
{
- u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
+ u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
struct bpf_insn *insn = prog->insnsi;
int ret = 0;
@@ -356,22 +387,23 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
* do any other adjustments. Therefore skip the patchlet.
*/
if (probe_pass && i == pos) {
- i += delta + 1;
- insn++;
+ i = end_new;
+ insn = prog->insnsi + end_old;
}
code = insn->code;
- if (BPF_CLASS(code) != BPF_JMP ||
+ if ((BPF_CLASS(code) != BPF_JMP &&
+ BPF_CLASS(code) != BPF_JMP32) ||
BPF_OP(code) == BPF_EXIT)
continue;
/* Adjust offset of jmps if we cross patch boundaries. */
if (BPF_OP(code) == BPF_CALL) {
if (insn->src_reg != BPF_PSEUDO_CALL)
continue;
- ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
- probe_pass);
+ ret = bpf_adj_delta_to_imm(insn, pos, end_old,
+ end_new, i, probe_pass);
} else {
- ret = bpf_adj_delta_to_off(insn, pos, delta, i,
- probe_pass);
+ ret = bpf_adj_delta_to_off(insn, pos, end_old,
+ end_new, i, probe_pass);
}
if (ret)
break;
@@ -421,7 +453,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* we afterwards may not fail anymore.
*/
if (insn_adj_cnt > cnt_max &&
- bpf_adj_branches(prog, off, insn_delta, true))
+ bpf_adj_branches(prog, off, off + 1, off + len, true))
return NULL;
/* Several new instructions need to be inserted. Make room
@@ -453,13 +485,25 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* the ship has sailed to reverse to the original state. An
* overflow cannot happen at this point.
*/
- BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
+ BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
bpf_adj_linfo(prog_adj, off, insn_delta);
return prog_adj;
}
+int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+{
+ /* Branch offsets can't overflow when program is shrinking, no need
+ * to call bpf_adj_branches(..., true) here
+ */
+ memmove(prog->insnsi + off, prog->insnsi + off + cnt,
+ sizeof(struct bpf_insn) * (prog->len - off - cnt));
+ prog->len -= cnt;
+
+ return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
+}
+
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
{
int i;
@@ -495,7 +539,7 @@ bpf_get_prog_addr_region(const struct bpf_prog *prog,
*symbol_end = addr + hdr->pages * PAGE_SIZE;
}
-static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
+void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
{
const char *end = sym + KSYM_NAME_LEN;
const struct btf_type *type;
@@ -934,6 +978,27 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
break;
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ /* Accommodate for extra offset in case of a backjump. */
+ off = from->off;
+ if (off < 0)
+ off -= 2;
+ *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+ *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+ *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
+ off);
+ break;
+
case BPF_LD | BPF_IMM | BPF_DW:
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
@@ -1130,6 +1195,31 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
INSN_2(JMP, CALL), \
/* Exit instruction. */ \
INSN_2(JMP, EXIT), \
+ /* 32-bit Jump instructions. */ \
+ /* Register based. */ \
+ INSN_3(JMP32, JEQ, X), \
+ INSN_3(JMP32, JNE, X), \
+ INSN_3(JMP32, JGT, X), \
+ INSN_3(JMP32, JLT, X), \
+ INSN_3(JMP32, JGE, X), \
+ INSN_3(JMP32, JLE, X), \
+ INSN_3(JMP32, JSGT, X), \
+ INSN_3(JMP32, JSLT, X), \
+ INSN_3(JMP32, JSGE, X), \
+ INSN_3(JMP32, JSLE, X), \
+ INSN_3(JMP32, JSET, X), \
+ /* Immediate based. */ \
+ INSN_3(JMP32, JEQ, K), \
+ INSN_3(JMP32, JNE, K), \
+ INSN_3(JMP32, JGT, K), \
+ INSN_3(JMP32, JLT, K), \
+ INSN_3(JMP32, JGE, K), \
+ INSN_3(JMP32, JLE, K), \
+ INSN_3(JMP32, JSGT, K), \
+ INSN_3(JMP32, JSLT, K), \
+ INSN_3(JMP32, JSGE, K), \
+ INSN_3(JMP32, JSLE, K), \
+ INSN_3(JMP32, JSET, K), \
/* Jump instructions. */ \
/* Register based. */ \
INSN_3(JMP, JEQ, X), \
@@ -1202,8 +1292,9 @@ bool bpf_opcode_in_insntable(u8 code)
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
/**
* __bpf_prog_run - run eBPF program on a given context
- * @ctx: is the data we are operating on
+ * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
* @insn: is the array of eBPF instructions
+ * @stack: is the eBPF storage stack
*
* Decode and execute eBPF instructions.
*/
@@ -1390,145 +1481,49 @@ select_insn:
out:
CONT;
}
- /* JMP */
JMP_JA:
insn += insn->off;
CONT;
- JMP_JEQ_X:
- if (DST == SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JEQ_K:
- if (DST == IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JNE_X:
- if (DST != SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JNE_K:
- if (DST != IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGT_X:
- if (DST > SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGT_K:
- if (DST > IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLT_X:
- if (DST < SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLT_K:
- if (DST < IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGE_X:
- if (DST >= SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JGE_K:
- if (DST >= IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLE_X:
- if (DST <= SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JLE_K:
- if (DST <= IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGT_X:
- if (((s64) DST) > ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGT_K:
- if (((s64) DST) > ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLT_X:
- if (((s64) DST) < ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLT_K:
- if (((s64) DST) < ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGE_X:
- if (((s64) DST) >= ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSGE_K:
- if (((s64) DST) >= ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLE_X:
- if (((s64) DST) <= ((s64) SRC)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSLE_K:
- if (((s64) DST) <= ((s64) IMM)) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSET_X:
- if (DST & SRC) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
- JMP_JSET_K:
- if (DST & IMM) {
- insn += insn->off;
- CONT_JMP;
- }
- CONT;
JMP_EXIT:
return BPF_R0;
-
+ /* JMP */
+#define COND_JMP(SIGN, OPCODE, CMP_OP) \
+ JMP_##OPCODE##_X: \
+ if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT; \
+ JMP32_##OPCODE##_X: \
+ if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT; \
+ JMP_##OPCODE##_K: \
+ if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT; \
+ JMP32_##OPCODE##_K: \
+ if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
+ insn += insn->off; \
+ CONT_JMP; \
+ } \
+ CONT;
+ COND_JMP(u, JEQ, ==)
+ COND_JMP(u, JNE, !=)
+ COND_JMP(u, JGT, >)
+ COND_JMP(u, JLT, <)
+ COND_JMP(u, JGE, >=)
+ COND_JMP(u, JLE, <=)
+ COND_JMP(u, JSET, &)
+ COND_JMP(s, JSGT, >)
+ COND_JMP(s, JSLT, <)
+ COND_JMP(s, JSGE, >=)
+ COND_JMP(s, JSLE, <=)
+#undef COND_JMP
/* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \
@@ -2036,6 +2031,8 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
const struct bpf_func_proto bpf_map_push_elem_proto __weak;
const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
+const struct bpf_func_proto bpf_spin_lock_proto __weak;
+const struct bpf_func_proto bpf_spin_unlock_proto __weak;
const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
@@ -2101,6 +2098,10 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
return -EFAULT;
}
+DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
+EXPORT_SYMBOL(bpf_stats_enabled_key);
+int sysctl_bpf_stats_enabled __read_mostly;
+
/* All definitions of tracepoints related to BPF. */
#define CREATE_TRACE_POINTS
#include <linux/bpf_trace.h>
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index d6b76377cb6e..de73f55e42fd 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -67,7 +67,7 @@ const char *const bpf_class_string[8] = {
[BPF_STX] = "stx",
[BPF_ALU] = "alu",
[BPF_JMP] = "jmp",
- [BPF_RET] = "BUG",
+ [BPF_JMP32] = "jmp32",
[BPF_ALU64] = "alu64",
};
@@ -136,23 +136,22 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
else
print_bpf_end_insn(verbose, cbs->private_data, insn);
} else if (BPF_OP(insn->code) == BPF_NEG) {
- verbose(cbs->private_data, "(%02x) r%d = %s-r%d\n",
- insn->code, insn->dst_reg,
- class == BPF_ALU ? "(u32) " : "",
+ verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n",
+ insn->code, class == BPF_ALU ? 'w' : 'r',
+ insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(cbs->private_data, "(%02x) %sr%d %s %sr%d\n",
- insn->code, class == BPF_ALU ? "(u32) " : "",
+ verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n",
+ insn->code, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
- class == BPF_ALU ? "(u32) " : "",
+ class == BPF_ALU ? 'w' : 'r',
insn->src_reg);
} else {
- verbose(cbs->private_data, "(%02x) %sr%d %s %s%d\n",
- insn->code, class == BPF_ALU ? "(u32) " : "",
+ verbose(cbs->private_data, "(%02x) %c%d %s %d\n",
+ insn->code, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
- class == BPF_ALU ? "(u32) " : "",
insn->imm);
}
} else if (class == BPF_STX) {
@@ -220,7 +219,7 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
return;
}
- } else if (class == BPF_JMP) {
+ } else if (class == BPF_JMP32 || class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
@@ -244,13 +243,18 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
verbose(cbs->private_data, "(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(cbs->private_data, "(%02x) if r%d %s r%d goto pc%+d\n",
- insn->code, insn->dst_reg,
+ verbose(cbs->private_data,
+ "(%02x) if %c%d %s %c%d goto pc%+d\n",
+ insn->code, class == BPF_JMP32 ? 'w' : 'r',
+ insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
+ class == BPF_JMP32 ? 'w' : 'r',
insn->src_reg, insn->off);
} else {
- verbose(cbs->private_data, "(%02x) if r%d %s 0x%x goto pc%+d\n",
- insn->code, insn->dst_reg,
+ verbose(cbs->private_data,
+ "(%02x) if %c%d %s 0x%x goto pc%+d\n",
+ insn->code, class == BPF_JMP32 ? 'w' : 'r',
+ insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->imm, insn->off);
}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 4b7c76765d9d..fed15cf94dca 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
}
if (htab_is_prealloc(htab)) {
- pcpu_freelist_push(&htab->freelist, &l->fnode);
+ __pcpu_freelist_push(&htab->freelist, &l->fnode);
} else {
atomic_dec(&htab->count);
l->htab = htab;
@@ -718,21 +718,12 @@ static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
BITS_PER_LONG == 64;
}
-static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
-{
- u32 size = htab->map.value_size;
-
- if (percpu || fd_htab_map_needs_adjust(htab))
- size = round_up(size, 8);
- return size;
-}
-
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
bool percpu, bool onallcpus,
struct htab_elem *old_elem)
{
- u32 size = htab_size_value(htab, percpu);
+ u32 size = htab->map.value_size;
bool prealloc = htab_is_prealloc(htab);
struct htab_elem *l_new, **pl_new;
void __percpu *pptr;
@@ -748,7 +739,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
} else {
struct pcpu_freelist_node *l;
- l = pcpu_freelist_pop(&htab->freelist);
+ l = __pcpu_freelist_pop(&htab->freelist);
if (!l)
return ERR_PTR(-E2BIG);
l_new = container_of(l, struct htab_elem, fnode);
@@ -770,10 +761,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
}
+ check_and_init_map_lock(&htab->map,
+ l_new->key + round_up(key_size, 8));
}
memcpy(l_new->key, key, key_size);
if (percpu) {
+ size = round_up(size, 8);
if (prealloc) {
pptr = htab_elem_get_ptr(l_new, key_size);
} else {
@@ -791,8 +785,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
if (!prealloc)
htab_elem_set_ptr(l_new, key_size, pptr);
- } else {
+ } else if (fd_htab_map_needs_adjust(htab)) {
+ size = round_up(size, 8);
memcpy(l_new->key + round_up(key_size, 8), value, size);
+ } else {
+ copy_map_value(&htab->map,
+ l_new->key + round_up(key_size, 8),
+ value);
}
l_new->hash = hash;
@@ -805,11 +804,11 @@ dec_count:
static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
u64 map_flags)
{
- if (l_old && map_flags == BPF_NOEXIST)
+ if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
/* elem already exists */
return -EEXIST;
- if (!l_old && map_flags == BPF_EXIST)
+ if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
/* elem doesn't exist, cannot update it */
return -ENOENT;
@@ -828,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
u32 key_size, hash;
int ret;
- if (unlikely(map_flags > BPF_EXIST))
+ if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
/* unknown flags */
return -EINVAL;
@@ -841,6 +840,28 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
b = __select_bucket(htab, hash);
head = &b->head;
+ if (unlikely(map_flags & BPF_F_LOCK)) {
+ if (unlikely(!map_value_has_spin_lock(map)))
+ return -EINVAL;
+ /* find an element without taking the bucket lock */
+ l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
+ htab->n_buckets);
+ ret = check_flags(htab, l_old, map_flags);
+ if (ret)
+ return ret;
+ if (l_old) {
+ /* grab the element lock and update value in place */
+ copy_map_value_locked(map,
+ l_old->key + round_up(key_size, 8),
+ value, false);
+ return 0;
+ }
+ /* fall through, grab the bucket lock and lookup again.
+ * 99.9% chance that the element won't be found,
+ * but second lookup under lock has to be done.
+ */
+ }
+
/* bpf_map_update_elem() can be called in_irq() */
raw_spin_lock_irqsave(&b->lock, flags);
@@ -850,6 +871,20 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
if (ret)
goto err;
+ if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
+ /* first lookup without the bucket lock didn't find the element,
+ * but second lookup with the bucket lock found it.
+ * This case is highly unlikely, but has to be dealt with:
+ * grab the element lock in addition to the bucket lock
+ * and update element in place
+ */
+ copy_map_value_locked(map,
+ l_old->key + round_up(key_size, 8),
+ value, false);
+ ret = 0;
+ goto err;
+ }
+
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
l_old);
if (IS_ERR(l_new)) {
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index a74972b07e74..a411fc17d265 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -221,6 +221,102 @@ const struct bpf_func_proto bpf_get_current_comm_proto = {
.arg2_type = ARG_CONST_SIZE,
};
+#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
+
+static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
+{
+ arch_spinlock_t *l = (void *)lock;
+ union {
+ __u32 val;
+ arch_spinlock_t lock;
+ } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
+
+ compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
+ BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
+ BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
+ arch_spin_lock(l);
+}
+
+static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
+{
+ arch_spinlock_t *l = (void *)lock;
+
+ arch_spin_unlock(l);
+}
+
+#else
+
+static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
+{
+ atomic_t *l = (void *)lock;
+
+ BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
+ do {
+ atomic_cond_read_relaxed(l, !VAL);
+ } while (atomic_xchg(l, 1));
+}
+
+static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
+{
+ atomic_t *l = (void *)lock;
+
+ atomic_set_release(l, 0);
+}
+
+#endif
+
+static DEFINE_PER_CPU(unsigned long, irqsave_flags);
+
+notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __bpf_spin_lock(lock);
+ __this_cpu_write(irqsave_flags, flags);
+ return 0;
+}
+
+const struct bpf_func_proto bpf_spin_lock_proto = {
+ .func = bpf_spin_lock,
+ .gpl_only = false,
+ .ret_type = RET_VOID,
+ .arg1_type = ARG_PTR_TO_SPIN_LOCK,
+};
+
+notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+{
+ unsigned long flags;
+
+ flags = __this_cpu_read(irqsave_flags);
+ __bpf_spin_unlock(lock);
+ local_irq_restore(flags);
+ return 0;
+}
+
+const struct bpf_func_proto bpf_spin_unlock_proto = {
+ .func = bpf_spin_unlock,
+ .gpl_only = false,
+ .ret_type = RET_VOID,
+ .arg1_type = ARG_PTR_TO_SPIN_LOCK,
+};
+
+void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
+ bool lock_src)
+{
+ struct bpf_spin_lock *lock;
+
+ if (lock_src)
+ lock = src + map->spin_lock_off;
+ else
+ lock = dst + map->spin_lock_off;
+ preempt_disable();
+ ____bpf_spin_lock(lock);
+ copy_map_value(map, dst, src);
+ ____bpf_spin_unlock(lock);
+ preempt_enable();
+}
+
#ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)
{
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 07a34ef562a0..6b572e2de7fb 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -131,7 +131,14 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
struct bpf_cgroup_storage *storage;
struct bpf_storage_buffer *new;
- if (flags != BPF_ANY && flags != BPF_EXIST)
+ if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST)))
+ return -EINVAL;
+
+ if (unlikely(flags & BPF_NOEXIST))
+ return -EINVAL;
+
+ if (unlikely((flags & BPF_F_LOCK) &&
+ !map_value_has_spin_lock(map)))
return -EINVAL;
storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -139,6 +146,11 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
if (!storage)
return -ENOENT;
+ if (flags & BPF_F_LOCK) {
+ copy_map_value_locked(map, storage->buf->data, value, false);
+ return 0;
+ }
+
new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
map->value_size,
__GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
@@ -147,6 +159,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return -ENOMEM;
memcpy(&new->data[0], value, map->value_size);
+ check_and_init_map_lock(map, new->data);
new = xchg(&storage->buf, new);
kfree_rcu(new, rcu);
@@ -483,6 +496,7 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
storage->buf = kmalloc_node(size, flags, map->numa_node);
if (!storage->buf)
goto enomem;
+ check_and_init_map_lock(map, storage->buf->data);
} else {
storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
if (!storage->percpu_buf)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index abf1002080df..93a5cbbde421 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
}
if (!node || node->prefixlen != key->prefixlen ||
+ node->prefixlen != matchlen ||
(node->flags & LPM_TREE_NODE_FLAG_IM)) {
ret = -ENOENT;
goto out;
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 99d243e1ad6e..3dff41403583 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -12,6 +12,7 @@
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
+ u32 inner_map_meta_size;
struct fd f;
f = fdget(inner_map_ufd);
@@ -36,7 +37,17 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
return ERR_PTR(-EINVAL);
}
- inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
+ if (map_value_has_spin_lock(inner_map)) {
+ fdput(f);
+ return ERR_PTR(-ENOTSUPP);
+ }
+
+ inner_map_meta_size = sizeof(*inner_map_meta);
+ /* In some cases verifier needs to access beyond just base map. */
+ if (inner_map->ops == &array_map_ops)
+ inner_map_meta_size = sizeof(struct bpf_array);
+
+ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
if (!inner_map_meta) {
fdput(f);
return ERR_PTR(-ENOMEM);
@@ -46,8 +57,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta->key_size = inner_map->key_size;
inner_map_meta->value_size = inner_map->value_size;
inner_map_meta->map_flags = inner_map->map_flags;
- inner_map_meta->ops = inner_map->ops;
inner_map_meta->max_entries = inner_map->max_entries;
+ inner_map_meta->spin_lock_off = inner_map->spin_lock_off;
+
+ /* Misc members not needed in bpf_map_meta_equal() check. */
+ inner_map_meta->ops = inner_map->ops;
+ if (inner_map->ops == &array_map_ops) {
+ inner_map_meta->unpriv_array = inner_map->unpriv_array;
+ container_of(inner_map_meta, struct bpf_array, map)->index_mask =
+ container_of(inner_map, struct bpf_array, map)->index_mask;
+ }
fdput(f);
return inner_map_meta;
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 54cf2b9c44a4..ba635209ae9a 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -35,6 +35,7 @@ static DECLARE_RWSEM(bpf_devs_lock);
struct bpf_offload_dev {
const struct bpf_prog_offload_ops *ops;
struct list_head netdevs;
+ void *priv;
};
struct bpf_offload_netdev {
@@ -173,6 +174,41 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
return ret;
}
+void
+bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn)
+{
+ const struct bpf_prog_offload_ops *ops;
+ struct bpf_prog_offload *offload;
+ int ret = -EOPNOTSUPP;
+
+ down_read(&bpf_devs_lock);
+ offload = env->prog->aux->offload;
+ if (offload) {
+ ops = offload->offdev->ops;
+ if (!offload->opt_failed && ops->replace_insn)
+ ret = ops->replace_insn(env, off, insn);
+ offload->opt_failed |= ret;
+ }
+ up_read(&bpf_devs_lock);
+}
+
+void
+bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
+{
+ struct bpf_prog_offload *offload;
+ int ret = -EOPNOTSUPP;
+
+ down_read(&bpf_devs_lock);
+ offload = env->prog->aux->offload;
+ if (offload) {
+ if (!offload->opt_failed && offload->offdev->ops->remove_insns)
+ ret = offload->offdev->ops->remove_insns(env, off, cnt);
+ offload->opt_failed |= ret;
+ }
+ up_read(&bpf_devs_lock);
+}
+
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload = prog->aux->offload;
@@ -634,7 +670,7 @@ unlock:
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
struct bpf_offload_dev *
-bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
{
struct bpf_offload_dev *offdev;
int err;
@@ -653,6 +689,7 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
return ERR_PTR(-ENOMEM);
offdev->ops = ops;
+ offdev->priv = priv;
INIT_LIST_HEAD(&offdev->netdevs);
return offdev;
@@ -665,3 +702,9 @@ void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
kfree(offdev);
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
+
+void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
+{
+ return offdev->priv;
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 673fa6fe2d73..0c1b4ba9e90e 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
free_percpu(s->freelist);
}
-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
- struct pcpu_freelist_node *node)
+static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
+ struct pcpu_freelist_node *node)
{
raw_spin_lock(&head->lock);
node->next = head->first;
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
raw_spin_unlock(&head->lock);
}
-void pcpu_freelist_push(struct pcpu_freelist *s,
+void __pcpu_freelist_push(struct pcpu_freelist *s,
struct pcpu_freelist_node *node)
{
struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
- __pcpu_freelist_push(head, node);
+ ___pcpu_freelist_push(head, node);
+}
+
+void pcpu_freelist_push(struct pcpu_freelist *s,
+ struct pcpu_freelist_node *node)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __pcpu_freelist_push(s, node);
+ local_irq_restore(flags);
}
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
for_each_possible_cpu(cpu) {
again:
head = per_cpu_ptr(s->freelist, cpu);
- __pcpu_freelist_push(head, buf);
+ ___pcpu_freelist_push(head, buf);
i++;
buf += elem_size;
if (i == nr_elems)
@@ -74,14 +84,12 @@ again:
local_irq_restore(flags);
}
-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
{
struct pcpu_freelist_head *head;
struct pcpu_freelist_node *node;
- unsigned long flags;
int orig_cpu, cpu;
- local_irq_save(flags);
orig_cpu = cpu = raw_smp_processor_id();
while (1) {
head = per_cpu_ptr(s->freelist, cpu);
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
node = head->first;
if (node) {
head->first = node->next;
- raw_spin_unlock_irqrestore(&head->lock, flags);
+ raw_spin_unlock(&head->lock);
return node;
}
raw_spin_unlock(&head->lock);
cpu = cpumask_next(cpu, cpu_possible_mask);
if (cpu >= nr_cpu_ids)
cpu = 0;
- if (cpu == orig_cpu) {
- local_irq_restore(flags);
+ if (cpu == orig_cpu)
return NULL;
- }
}
}
+
+struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+{
+ struct pcpu_freelist_node *ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = __pcpu_freelist_pop(s);
+ local_irq_restore(flags);
+ return ret;
+}
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
index 3049aae8ea1e..c3960118e617 100644
--- a/kernel/bpf/percpu_freelist.h
+++ b/kernel/bpf/percpu_freelist.h
@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
struct pcpu_freelist_node *next;
};
+/* pcpu_freelist_* do spin_lock_irqsave. */
void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
+/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
+void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems);
int pcpu_freelist_init(struct pcpu_freelist *);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 90daf285de03..950ab2f28922 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
struct stack_map_irq_work *work;
work = container_of(entry, struct stack_map_irq_work, irq_work);
- up_read(work->sem);
+ up_read_non_owner(work->sem);
work->sem = NULL;
}
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
if (nhdr->n_type == BPF_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
memcpy(build_id,
note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- BPF_BUILD_ID_SIZE);
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
return 0;
}
new_offs = note_offs + sizeof(Elf32_Nhdr) +
@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = page_address(page);
+ page_addr = kmap_atomic(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = stack_map_get_build_id_64(page_addr, build_id);
out:
+ kunmap_atomic(page_addr);
put_page(page);
return ret;
}
@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
}
return;
}
@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
continue;
}
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
@@ -332,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
} else {
work->sem = &current->mm->mmap_sem;
irq_work_queue(&work->irq_work);
+ /*
+ * The irq_work will release the mmap_sem with
+ * up_read_non_owner(). The rwsem_release() is called
+ * here to release the lock from lockdep's perspective.
+ */
+ rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
}
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index b155cd17c1bd..62f6bced3a3c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -463,7 +463,7 @@ int map_check_no_btf(const struct bpf_map *map,
return -ENOTSUPP;
}
-static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
+static int map_check_btf(struct bpf_map *map, const struct btf *btf,
u32 btf_key_id, u32 btf_value_id)
{
const struct btf_type *key_type, *value_type;
@@ -478,6 +478,22 @@ static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
if (!value_type || value_size != map->value_size)
return -EINVAL;
+ map->spin_lock_off = btf_find_spin_lock(btf, value_type);
+
+ if (map_value_has_spin_lock(map)) {
+ if (map->map_type != BPF_MAP_TYPE_HASH &&
+ map->map_type != BPF_MAP_TYPE_ARRAY &&
+ map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
+ return -ENOTSUPP;
+ if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
+ map->value_size) {
+ WARN_ONCE(1,
+ "verifier bug spin_lock_off %d value_size %d\n",
+ map->spin_lock_off, map->value_size);
+ return -EFAULT;
+ }
+ }
+
if (map->ops->map_check_btf)
ret = map->ops->map_check_btf(map, btf, key_type, value_type);
@@ -542,6 +558,8 @@ static int map_create(union bpf_attr *attr)
map->btf = btf;
map->btf_key_type_id = attr->btf_key_type_id;
map->btf_value_type_id = attr->btf_value_type_id;
+ } else {
+ map->spin_lock_off = -EINVAL;
}
err = security_bpf_map_alloc(map);
@@ -559,12 +577,12 @@ static int map_create(union bpf_attr *attr)
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
/* failed to allocate fd.
- * bpf_map_put() is needed because the above
+ * bpf_map_put_with_uref() is needed because the above
* bpf_map_alloc_id() has published the map
* to the userspace and the userspace may
* have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
*/
- bpf_map_put(map);
+ bpf_map_put_with_uref(map);
return err;
}
@@ -664,7 +682,7 @@ static void *__bpf_copy_key(void __user *ukey, u64 key_size)
}
/* last field in 'union bpf_attr' used by this command */
-#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
+#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
static int map_lookup_elem(union bpf_attr *attr)
{
@@ -680,6 +698,9 @@ static int map_lookup_elem(union bpf_attr *attr)
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
return -EINVAL;
+ if (attr->flags & ~BPF_F_LOCK)
+ return -EINVAL;
+
f = fdget(ufd);
map = __bpf_map_get(f);
if (IS_ERR(map))
@@ -690,6 +711,12 @@ static int map_lookup_elem(union bpf_attr *attr)
goto err_put;
}
+ if ((attr->flags & BPF_F_LOCK) &&
+ !map_value_has_spin_lock(map)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
key = __bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
@@ -713,8 +740,13 @@ static int map_lookup_elem(union bpf_attr *attr)
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_lookup_elem(map, key, value);
- } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ goto done;
+ }
+
+ preempt_disable();
+ this_cpu_inc(bpf_prog_active);
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value);
@@ -740,11 +772,20 @@ static int map_lookup_elem(union bpf_attr *attr)
err = -ENOENT;
} else {
err = 0;
- memcpy(value, ptr, value_size);
+ if (attr->flags & BPF_F_LOCK)
+ /* lock 'ptr' and copy everything but lock */
+ copy_map_value_locked(map, value, ptr, true);
+ else
+ copy_map_value(map, value, ptr);
+ /* mask lock, since value wasn't zero inited */
+ check_and_init_map_lock(map, value);
}
rcu_read_unlock();
}
+ this_cpu_dec(bpf_prog_active);
+ preempt_enable();
+done:
if (err)
goto free_value;
@@ -800,6 +841,12 @@ static int map_update_elem(union bpf_attr *attr)
goto err_put;
}
+ if ((attr->flags & BPF_F_LOCK) &&
+ !map_value_has_spin_lock(map)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
key = __bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
@@ -1211,6 +1258,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
+ perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
bpf_prog_kallsyms_del_all(prog);
@@ -1236,24 +1284,54 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
return 0;
}
+static void bpf_prog_get_stats(const struct bpf_prog *prog,
+ struct bpf_prog_stats *stats)
+{
+ u64 nsecs = 0, cnt = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct bpf_prog_stats *st;
+ unsigned int start;
+ u64 tnsecs, tcnt;
+
+ st = per_cpu_ptr(prog->aux->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&st->syncp);
+ tnsecs = st->nsecs;
+ tcnt = st->cnt;
+ } while (u64_stats_fetch_retry_irq(&st->syncp, start));
+ nsecs += tnsecs;
+ cnt += tcnt;
+ }
+ stats->nsecs = nsecs;
+ stats->cnt = cnt;
+}
+
#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_prog *prog = filp->private_data;
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
+ struct bpf_prog_stats stats;
+ bpf_prog_get_stats(prog, &stats);
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
seq_printf(m,
"prog_type:\t%u\n"
"prog_jited:\t%u\n"
"prog_tag:\t%s\n"
"memlock:\t%llu\n"
- "prog_id:\t%u\n",
+ "prog_id:\t%u\n"
+ "run_time_ns:\t%llu\n"
+ "run_cnt:\t%llu\n",
prog->type,
prog->jited,
prog_tag,
prog->pages * 1ULL << PAGE_SHIFT,
- prog->aux->id);
+ prog->aux->id,
+ stats.nsecs,
+ stats.cnt);
}
#endif
@@ -1554,6 +1632,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
}
bpf_prog_kallsyms_add(prog);
+ perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
return err;
free_used_maps:
@@ -1978,7 +2057,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
fd = bpf_map_new_fd(map, f_flags);
if (fd < 0)
- bpf_map_put(map);
+ bpf_map_put_with_uref(map);
return fd;
}
@@ -2075,6 +2154,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
struct bpf_prog_info info = {};
u32 info_len = attr->info.info_len;
+ struct bpf_prog_stats stats;
char __user *uinsns;
u32 ulen;
int err;
@@ -2114,6 +2194,10 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
if (err)
return err;
+ bpf_prog_get_stats(prog, &stats);
+ info.run_time_ns = stats.nsecs;
+ info.run_cnt = stats.cnt;
+
if (!capable(CAP_SYS_ADMIN)) {
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f6bc62a9ee8e..a7b96bf0e654 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -213,6 +213,7 @@ struct bpf_call_arg_meta {
s64 msize_smax_value;
u64 msize_umax_value;
int ptr_id;
+ int func_id;
};
static DEFINE_MUTEX(bpf_verifier_lock);
@@ -330,10 +331,19 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type)
type == PTR_TO_PACKET_META;
}
+static bool type_is_sk_pointer(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_SOCK_COMMON ||
+ type == PTR_TO_TCP_SOCK;
+}
+
static bool reg_type_may_be_null(enum bpf_reg_type type)
{
return type == PTR_TO_MAP_VALUE_OR_NULL ||
- type == PTR_TO_SOCKET_OR_NULL;
+ type == PTR_TO_SOCKET_OR_NULL ||
+ type == PTR_TO_SOCK_COMMON_OR_NULL ||
+ type == PTR_TO_TCP_SOCK_OR_NULL;
}
static bool type_is_refcounted(enum bpf_reg_type type)
@@ -351,6 +361,12 @@ static bool reg_is_refcounted(const struct bpf_reg_state *reg)
return type_is_refcounted(reg->type);
}
+static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
+{
+ return reg->type == PTR_TO_MAP_VALUE &&
+ map_value_has_spin_lock(reg->map_ptr);
+}
+
static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
{
return type_is_refcounted_or_null(reg->type);
@@ -370,6 +386,12 @@ static bool is_release_function(enum bpf_func_id func_id)
return func_id == BPF_FUNC_sk_release;
}
+static bool is_acquire_function(enum bpf_func_id func_id)
+{
+ return func_id == BPF_FUNC_sk_lookup_tcp ||
+ func_id == BPF_FUNC_sk_lookup_udp;
+}
+
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
@@ -385,6 +407,10 @@ static const char * const reg_type_str[] = {
[PTR_TO_FLOW_KEYS] = "flow_keys",
[PTR_TO_SOCKET] = "sock",
[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
+ [PTR_TO_SOCK_COMMON] = "sock_common",
+ [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
+ [PTR_TO_TCP_SOCK] = "tcp_sock",
+ [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
};
static char slot_type_char[] = {
@@ -611,13 +637,10 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
}
/* release function corresponding to acquire_reference_state(). Idempotent. */
-static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
+static int release_reference_state(struct bpf_func_state *state, int ptr_id)
{
int i, last_idx;
- if (!ptr_id)
- return -EFAULT;
-
last_idx = state->acquired_refs - 1;
for (i = 0; i < state->acquired_refs; i++) {
if (state->refs[i].id == ptr_id) {
@@ -629,21 +652,7 @@ static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
return 0;
}
}
- return -EFAULT;
-}
-
-/* variation on the above for cases where we expect that there must be an
- * outstanding reference for the specified ptr_id.
- */
-static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
-{
- struct bpf_func_state *state = cur_func(env);
- int err;
-
- err = __release_reference_state(state, ptr_id);
- if (WARN_ON_ONCE(err != 0))
- verbose(env, "verifier internal error: can't release reference\n");
- return err;
+ return -EINVAL;
}
static int transfer_reference_state(struct bpf_func_state *dst,
@@ -712,6 +721,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
}
dst_state->speculative = src->speculative;
dst_state->curframe = src->curframe;
+ dst_state->active_spin_lock = src->active_spin_lock;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
@@ -1095,7 +1105,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
- if (BPF_CLASS(code) != BPF_JMP)
+ if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
@@ -1201,6 +1211,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case CONST_PTR_TO_MAP:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
+ case PTR_TO_SOCK_COMMON:
+ case PTR_TO_SOCK_COMMON_OR_NULL:
+ case PTR_TO_TCP_SOCK:
+ case PTR_TO_TCP_SOCK_OR_NULL:
return true;
default:
return false;
@@ -1483,6 +1497,21 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
if (err)
verbose(env, "R%d max value is outside of the array range\n",
regno);
+
+ if (map_value_has_spin_lock(reg->map_ptr)) {
+ u32 lock = reg->map_ptr->spin_lock_off;
+
+ /* if any part of struct bpf_spin_lock can be touched by
+ * load/store reject this program.
+ * To check that [x1, x2) overlaps with [y1, y2)
+ * it is sufficient to check x1 < y2 && y1 < x2.
+ */
+ if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
+ lock < reg->umax_value + off + size) {
+ verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
+ return -EACCES;
+ }
+ }
return err;
}
@@ -1617,12 +1646,14 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
return 0;
}
-static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
- int size, enum bpf_access_type t)
+static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
+ u32 regno, int off, int size,
+ enum bpf_access_type t)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = &regs[regno];
- struct bpf_insn_access_aux info;
+ struct bpf_insn_access_aux info = {};
+ bool valid;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
@@ -1630,13 +1661,31 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
return -EACCES;
}
- if (!bpf_sock_is_valid_access(off, size, t, &info)) {
- verbose(env, "invalid bpf_sock access off=%d size=%d\n",
- off, size);
- return -EACCES;
+ switch (reg->type) {
+ case PTR_TO_SOCK_COMMON:
+ valid = bpf_sock_common_is_valid_access(off, size, t, &info);
+ break;
+ case PTR_TO_SOCKET:
+ valid = bpf_sock_is_valid_access(off, size, t, &info);
+ break;
+ case PTR_TO_TCP_SOCK:
+ valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
+ break;
+ default:
+ valid = false;
}
- return 0;
+
+ if (valid) {
+ env->insn_aux_data[insn_idx].ctx_field_size =
+ info.ctx_field_size;
+ return 0;
+ }
+
+ verbose(env, "R%d invalid %s access off=%d size=%d\n",
+ regno, reg_type_str[reg->type], off, size);
+
+ return -EACCES;
}
static bool __is_pointer_value(bool allow_ptr_leaks,
@@ -1662,8 +1711,14 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
- return reg->type == PTR_TO_CTX ||
- reg->type == PTR_TO_SOCKET;
+ return reg->type == PTR_TO_CTX;
+}
+
+static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
+{
+ const struct bpf_reg_state *reg = reg_state(env, regno);
+
+ return type_is_sk_pointer(reg->type);
}
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
@@ -1774,6 +1829,12 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
case PTR_TO_SOCKET:
pointer_desc = "sock ";
break;
+ case PTR_TO_SOCK_COMMON:
+ pointer_desc = "sock_common ";
+ break;
+ case PTR_TO_TCP_SOCK:
+ pointer_desc = "tcp_sock ";
+ break;
default:
break;
}
@@ -1977,11 +2038,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
* PTR_TO_PACKET[_META,_END]. In the latter
* case, we know the offset is zero.
*/
- if (reg_type == SCALAR_VALUE)
+ if (reg_type == SCALAR_VALUE) {
mark_reg_unknown(env, regs, value_regno);
- else
+ } else {
mark_reg_known_zero(env, regs,
value_regno);
+ if (reg_type_may_be_null(reg_type))
+ regs[value_regno].id = ++env->id_gen;
+ }
regs[value_regno].type = reg_type;
}
@@ -2027,12 +2091,13 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
err = check_flow_keys_access(env, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
- } else if (reg->type == PTR_TO_SOCKET) {
+ } else if (type_is_sk_pointer(reg->type)) {
if (t == BPF_WRITE) {
- verbose(env, "cannot write into socket\n");
+ verbose(env, "R%d cannot write into %s\n",
+ regno, reg_type_str[reg->type]);
return -EACCES;
}
- err = check_sock_access(env, regno, off, size, t);
+ err = check_sock_access(env, insn_idx, regno, off, size, t);
if (!err && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else {
@@ -2076,7 +2141,8 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
if (is_ctx_reg(env, insn->dst_reg) ||
is_pkt_reg(env, insn->dst_reg) ||
- is_flow_key_reg(env, insn->dst_reg)) {
+ is_flow_key_reg(env, insn->dst_reg) ||
+ is_sk_reg(env, insn->dst_reg)) {
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
@@ -2192,6 +2258,91 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
}
}
+/* Implementation details:
+ * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
+ * Two bpf_map_lookups (even with the same key) will have different reg->id.
+ * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
+ * value_or_null->value transition, since the verifier only cares about
+ * the range of access to valid map value pointer and doesn't care about actual
+ * address of the map element.
+ * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
+ * reg->id > 0 after value_or_null->value transition. By doing so
+ * two bpf_map_lookups will be considered two different pointers that
+ * point to different bpf_spin_locks.
+ * The verifier allows taking only one bpf_spin_lock at a time to avoid
+ * dead-locks.
+ * Since only one bpf_spin_lock is allowed the checks are simpler than
+ * reg_is_refcounted() logic. The verifier needs to remember only
+ * one spin_lock instead of array of acquired_refs.
+ * cur_state->active_spin_lock remembers which map value element got locked
+ * and clears it after bpf_spin_unlock.
+ */
+static int process_spin_lock(struct bpf_verifier_env *env, int regno,
+ bool is_lock)
+{
+ struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+ struct bpf_verifier_state *cur = env->cur_state;
+ bool is_const = tnum_is_const(reg->var_off);
+ struct bpf_map *map = reg->map_ptr;
+ u64 val = reg->var_off.value;
+
+ if (reg->type != PTR_TO_MAP_VALUE) {
+ verbose(env, "R%d is not a pointer to map_value\n", regno);
+ return -EINVAL;
+ }
+ if (!is_const) {
+ verbose(env,
+ "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
+ regno);
+ return -EINVAL;
+ }
+ if (!map->btf) {
+ verbose(env,
+ "map '%s' has to have BTF in order to use bpf_spin_lock\n",
+ map->name);
+ return -EINVAL;
+ }
+ if (!map_value_has_spin_lock(map)) {
+ if (map->spin_lock_off == -E2BIG)
+ verbose(env,
+ "map '%s' has more than one 'struct bpf_spin_lock'\n",
+ map->name);
+ else if (map->spin_lock_off == -ENOENT)
+ verbose(env,
+ "map '%s' doesn't have 'struct bpf_spin_lock'\n",
+ map->name);
+ else
+ verbose(env,
+ "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
+ map->name);
+ return -EINVAL;
+ }
+ if (map->spin_lock_off != val + reg->off) {
+ verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
+ val + reg->off);
+ return -EINVAL;
+ }
+ if (is_lock) {
+ if (cur->active_spin_lock) {
+ verbose(env,
+ "Locking two bpf_spin_locks are not allowed\n");
+ return -EINVAL;
+ }
+ cur->active_spin_lock = reg->id;
+ } else {
+ if (!cur->active_spin_lock) {
+ verbose(env, "bpf_spin_unlock without taking a lock\n");
+ return -EINVAL;
+ }
+ if (cur->active_spin_lock != reg->id) {
+ verbose(env, "bpf_spin_unlock of different lock\n");
+ return -EINVAL;
+ }
+ cur->active_spin_lock = 0;
+ }
+ return 0;
+}
+
static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_MEM ||
@@ -2258,6 +2409,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
err = check_ctx_reg(env, reg, regno);
if (err < 0)
return err;
+ } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
+ expected_type = PTR_TO_SOCK_COMMON;
+ /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
+ if (!type_is_sk_pointer(type))
+ goto err_type;
} else if (arg_type == ARG_PTR_TO_SOCKET) {
expected_type = PTR_TO_SOCKET;
if (type != expected_type)
@@ -2268,6 +2424,17 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
return -EFAULT;
}
meta->ptr_id = reg->id;
+ } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
+ if (meta->func_id == BPF_FUNC_spin_lock) {
+ if (process_spin_lock(env, regno, true))
+ return -EACCES;
+ } else if (meta->func_id == BPF_FUNC_spin_unlock) {
+ if (process_spin_lock(env, regno, false))
+ return -EACCES;
+ } else {
+ verbose(env, "verifier internal error\n");
+ return -EFAULT;
+ }
} else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
@@ -2661,7 +2828,7 @@ static int release_reference(struct bpf_verifier_env *env,
for (i = 0; i <= vstate->curframe; i++)
release_reg_references(env, vstate->frame[i], meta->ptr_id);
- return release_reference_state(env, meta->ptr_id);
+ return release_reference_state(cur_func(env), meta->ptr_id);
}
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@@ -2887,6 +3054,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return err;
}
+ meta.func_id = func_id;
/* check args */
err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
if (err)
@@ -2926,8 +3094,11 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
}
} else if (is_release_function(func_id)) {
err = release_reference(env, &meta);
- if (err)
+ if (err) {
+ verbose(env, "func %s#%d reference has not been acquired before\n",
+ func_id_name(func_id), func_id);
return err;
+ }
}
regs = cur_regs(env);
@@ -2969,17 +3140,30 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].map_ptr = meta.map_ptr;
if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
+ if (map_value_has_spin_lock(meta.map_ptr))
+ regs[BPF_REG_0].id = ++env->id_gen;
} else {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;
}
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
- int id = acquire_reference_state(env, insn_idx);
- if (id < 0)
- return id;
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
- regs[BPF_REG_0].id = id;
+ if (is_acquire_function(func_id)) {
+ int id = acquire_reference_state(env, insn_idx);
+
+ if (id < 0)
+ return id;
+ /* For release_reference() */
+ regs[BPF_REG_0].id = id;
+ } else {
+ /* For mark_ptr_or_null_reg() */
+ regs[BPF_REG_0].id = ++env->id_gen;
+ }
+ } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
+ mark_reg_known_zero(env, regs, BPF_REG_0);
+ regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
+ regs[BPF_REG_0].id = ++env->id_gen;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
@@ -3103,6 +3287,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
}
}
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+ const struct bpf_insn *insn)
+{
+ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
+}
+
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
+ u32 alu_state, u32 alu_limit)
+{
+ /* If we arrived here from different branches with different
+ * state or limits to sanitize, then this won't work.
+ */
+ if (aux->alu_state &&
+ (aux->alu_state != alu_state ||
+ aux->alu_limit != alu_limit))
+ return -EACCES;
+
+ /* Corresponding fixup done in fixup_bpf_calls(). */
+ aux->alu_state = alu_state;
+ aux->alu_limit = alu_limit;
+ return 0;
+}
+
+static int sanitize_val_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
+{
+ struct bpf_insn_aux_data *aux = cur_aux(env);
+
+ if (can_skip_alu_sanitation(env, insn))
+ return 0;
+
+ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+}
+
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
@@ -3117,7 +3335,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_reg_state tmp;
bool ret;
- if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
+ if (can_skip_alu_sanitation(env, insn))
return 0;
/* We already marked aux for masking from non-speculative
@@ -3133,19 +3351,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
return 0;
-
- /* If we arrived here from different branches with different
- * limits to sanitize, then this won't work.
- */
- if (aux->alu_state &&
- (aux->alu_state != alu_state ||
- aux->alu_limit != alu_limit))
+ if (update_alu_sanitation_state(aux, alu_state, alu_limit))
return -EACCES;
-
- /* Corresponding fixup done in fixup_bpf_calls(). */
- aux->alu_state = alu_state;
- aux->alu_limit = alu_limit;
-
do_sim:
/* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of
@@ -3216,6 +3423,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
case PTR_TO_PACKET_END:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
+ case PTR_TO_SOCK_COMMON:
+ case PTR_TO_SOCK_COMMON_OR_NULL:
+ case PTR_TO_TCP_SOCK:
+ case PTR_TO_TCP_SOCK_OR_NULL:
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
@@ -3418,6 +3629,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+ u32 dst = insn->dst_reg;
+ int ret;
if (insn_bitness == 32) {
/* Relevant for 32-bit RSH: Information can propagate towards
@@ -3452,6 +3665,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
switch (opcode) {
case BPF_ADD:
+ ret = sanitize_val_alu(env, insn);
+ if (ret < 0) {
+ verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+ return ret;
+ }
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
@@ -3471,6 +3689,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
+ ret = sanitize_val_alu(env, insn);
+ if (ret < 0) {
+ verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+ return ret;
+ }
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
@@ -3996,11 +4219,50 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
* 0 - branch will not be taken and fall-through to next insn
* -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
*/
-static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
+ bool is_jmp32)
{
+ struct bpf_reg_state reg_lo;
+ s64 sval;
+
if (__is_pointer_value(false, reg))
return -1;
+ if (is_jmp32) {
+ reg_lo = *reg;
+ reg = &reg_lo;
+ /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
+ * could truncate high bits and update umin/umax according to
+ * information of low bits.
+ */
+ coerce_reg_to_size(reg, 4);
+ /* smin/smax need special handling. For example, after coerce,
+ * if smin_value is 0x00000000ffffffffLL, the value is -1 when
+ * used as operand to JMP32. It is a negative number from s32's
+ * point of view, while it is a positive number when seen as
+ * s64. The smin/smax are kept as s64, therefore, when used with
+ * JMP32, they need to be transformed into s32, then sign
+ * extended back to s64.
+ *
+ * Also, smin/smax were copied from umin/umax. If umin/umax has
+ * different sign bit, then min/max relationship doesn't
+ * maintain after casting into s32, for this case, set smin/smax
+ * to safest range.
+ */
+ if ((reg->umax_value ^ reg->umin_value) &
+ (1ULL << 31)) {
+ reg->smin_value = S32_MIN;
+ reg->smax_value = S32_MAX;
+ }
+ reg->smin_value = (s64)(s32)reg->smin_value;
+ reg->smax_value = (s64)(s32)reg->smax_value;
+
+ val = (u32)val;
+ sval = (s64)(s32)val;
+ } else {
+ sval = (s64)val;
+ }
+
switch (opcode) {
case BPF_JEQ:
if (tnum_is_const(reg->var_off))
@@ -4023,9 +4285,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSGT:
- if (reg->smin_value > (s64)val)
+ if (reg->smin_value > sval)
return 1;
- else if (reg->smax_value < (s64)val)
+ else if (reg->smax_value < sval)
return 0;
break;
case BPF_JLT:
@@ -4035,9 +4297,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSLT:
- if (reg->smax_value < (s64)val)
+ if (reg->smax_value < sval)
return 1;
- else if (reg->smin_value >= (s64)val)
+ else if (reg->smin_value >= sval)
return 0;
break;
case BPF_JGE:
@@ -4047,9 +4309,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSGE:
- if (reg->smin_value >= (s64)val)
+ if (reg->smin_value >= sval)
return 1;
- else if (reg->smax_value < (s64)val)
+ else if (reg->smax_value < sval)
return 0;
break;
case BPF_JLE:
@@ -4059,9 +4321,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return 0;
break;
case BPF_JSLE:
- if (reg->smax_value <= (s64)val)
+ if (reg->smax_value <= sval)
return 1;
- else if (reg->smin_value > (s64)val)
+ else if (reg->smin_value > sval)
return 0;
break;
}
@@ -4069,6 +4331,29 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return -1;
}
+/* Generate min value of the high 32-bit from TNUM info. */
+static u64 gen_hi_min(struct tnum var)
+{
+ return var.value & ~0xffffffffULL;
+}
+
+/* Generate max value of the high 32-bit from TNUM info. */
+static u64 gen_hi_max(struct tnum var)
+{
+ return (var.value | var.mask) & ~0xffffffffULL;
+}
+
+/* Return true if VAL is compared with a s64 sign extended from s32, and they
+ * are with the same signedness.
+ */
+static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
+{
+ return ((s32)sval >= 0 &&
+ reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
+ ((s32)sval < 0 &&
+ reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
+}
+
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
@@ -4076,8 +4361,10 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
- u8 opcode)
+ u8 opcode, bool is_jmp32)
{
+ s64 sval;
+
/* If the dst_reg is a pointer, we can't learn anything about its
* variable offset from the compare (unless src_reg were a pointer into
* the same object, but we don't bother with that.
@@ -4087,19 +4374,31 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
if (__is_pointer_value(false, false_reg))
return;
+ val = is_jmp32 ? (u32)val : val;
+ sval = is_jmp32 ? (s64)(s32)val : (s64)val;
+
switch (opcode) {
case BPF_JEQ:
- /* If this is false then we know nothing Jon Snow, but if it is
- * true then we know for sure.
- */
- __mark_reg_known(true_reg, val);
- break;
case BPF_JNE:
- /* If this is true we know nothing Jon Snow, but if it is false
- * we know the value for sure;
+ {
+ struct bpf_reg_state *reg =
+ opcode == BPF_JEQ ? true_reg : false_reg;
+
+ /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
+ * if it is true we know the value for sure. Likewise for
+ * BPF_JNE.
*/
- __mark_reg_known(false_reg, val);
+ if (is_jmp32) {
+ u64 old_v = reg->var_off.value;
+ u64 hi_mask = ~0xffffffffULL;
+
+ reg->var_off.value = (old_v & hi_mask) | val;
+ reg->var_off.mask &= hi_mask;
+ } else {
+ __mark_reg_known(reg, val);
+ }
break;
+ }
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
@@ -4107,38 +4406,61 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
- case BPF_JGT:
- false_reg->umax_value = min(false_reg->umax_value, val);
- true_reg->umin_value = max(true_reg->umin_value, val + 1);
- break;
- case BPF_JSGT:
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
- break;
- case BPF_JLT:
- false_reg->umin_value = max(false_reg->umin_value, val);
- true_reg->umax_value = min(true_reg->umax_value, val - 1);
- break;
- case BPF_JSLT:
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
- break;
case BPF_JGE:
- false_reg->umax_value = min(false_reg->umax_value, val - 1);
- true_reg->umin_value = max(true_reg->umin_value, val);
+ case BPF_JGT:
+ {
+ u64 false_umax = opcode == BPF_JGT ? val : val - 1;
+ u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
+
+ if (is_jmp32) {
+ false_umax += gen_hi_max(false_reg->var_off);
+ true_umin += gen_hi_min(true_reg->var_off);
+ }
+ false_reg->umax_value = min(false_reg->umax_value, false_umax);
+ true_reg->umin_value = max(true_reg->umin_value, true_umin);
break;
+ }
case BPF_JSGE:
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
+ case BPF_JSGT:
+ {
+ s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
+ s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
+
+ /* If the full s64 was not sign-extended from s32 then don't
+ * deduct further info.
+ */
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smax_value = min(false_reg->smax_value, false_smax);
+ true_reg->smin_value = max(true_reg->smin_value, true_smin);
break;
+ }
case BPF_JLE:
- false_reg->umin_value = max(false_reg->umin_value, val + 1);
- true_reg->umax_value = min(true_reg->umax_value, val);
+ case BPF_JLT:
+ {
+ u64 false_umin = opcode == BPF_JLT ? val : val + 1;
+ u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
+
+ if (is_jmp32) {
+ false_umin += gen_hi_min(false_reg->var_off);
+ true_umax += gen_hi_max(true_reg->var_off);
+ }
+ false_reg->umin_value = max(false_reg->umin_value, false_umin);
+ true_reg->umax_value = min(true_reg->umax_value, true_umax);
break;
+ }
case BPF_JSLE:
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
+ case BPF_JSLT:
+ {
+ s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
+ s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
+
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smin_value = max(false_reg->smin_value, false_smin);
+ true_reg->smax_value = min(true_reg->smax_value, true_smax);
break;
+ }
default:
break;
}
@@ -4161,24 +4483,34 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
- u8 opcode)
+ u8 opcode, bool is_jmp32)
{
+ s64 sval;
+
if (__is_pointer_value(false, false_reg))
return;
+ val = is_jmp32 ? (u32)val : val;
+ sval = is_jmp32 ? (s64)(s32)val : (s64)val;
+
switch (opcode) {
case BPF_JEQ:
- /* If this is false then we know nothing Jon Snow, but if it is
- * true then we know for sure.
- */
- __mark_reg_known(true_reg, val);
- break;
case BPF_JNE:
- /* If this is true we know nothing Jon Snow, but if it is false
- * we know the value for sure;
- */
- __mark_reg_known(false_reg, val);
+ {
+ struct bpf_reg_state *reg =
+ opcode == BPF_JEQ ? true_reg : false_reg;
+
+ if (is_jmp32) {
+ u64 old_v = reg->var_off.value;
+ u64 hi_mask = ~0xffffffffULL;
+
+ reg->var_off.value = (old_v & hi_mask) | val;
+ reg->var_off.mask &= hi_mask;
+ } else {
+ __mark_reg_known(reg, val);
+ }
break;
+ }
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
@@ -4186,38 +4518,58 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
- case BPF_JGT:
- true_reg->umax_value = min(true_reg->umax_value, val - 1);
- false_reg->umin_value = max(false_reg->umin_value, val);
- break;
- case BPF_JSGT:
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
- break;
- case BPF_JLT:
- true_reg->umin_value = max(true_reg->umin_value, val + 1);
- false_reg->umax_value = min(false_reg->umax_value, val);
- break;
- case BPF_JSLT:
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
- break;
case BPF_JGE:
- true_reg->umax_value = min(true_reg->umax_value, val);
- false_reg->umin_value = max(false_reg->umin_value, val + 1);
+ case BPF_JGT:
+ {
+ u64 false_umin = opcode == BPF_JGT ? val : val + 1;
+ u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
+
+ if (is_jmp32) {
+ false_umin += gen_hi_min(false_reg->var_off);
+ true_umax += gen_hi_max(true_reg->var_off);
+ }
+ false_reg->umin_value = max(false_reg->umin_value, false_umin);
+ true_reg->umax_value = min(true_reg->umax_value, true_umax);
break;
+ }
case BPF_JSGE:
- true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
- false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
+ case BPF_JSGT:
+ {
+ s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
+ s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
+
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smin_value = max(false_reg->smin_value, false_smin);
+ true_reg->smax_value = min(true_reg->smax_value, true_smax);
break;
+ }
case BPF_JLE:
- true_reg->umin_value = max(true_reg->umin_value, val);
- false_reg->umax_value = min(false_reg->umax_value, val - 1);
+ case BPF_JLT:
+ {
+ u64 false_umax = opcode == BPF_JLT ? val : val - 1;
+ u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
+
+ if (is_jmp32) {
+ false_umax += gen_hi_max(false_reg->var_off);
+ true_umin += gen_hi_min(true_reg->var_off);
+ }
+ false_reg->umax_value = min(false_reg->umax_value, false_umax);
+ true_reg->umin_value = max(true_reg->umin_value, true_umin);
break;
+ }
case BPF_JSLE:
- true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
- false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
+ case BPF_JSLT:
+ {
+ s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
+ s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
+
+ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
+ break;
+ false_reg->smax_value = min(false_reg->smax_value, false_smax);
+ true_reg->smin_value = max(true_reg->smin_value, true_smin);
break;
+ }
default:
break;
}
@@ -4308,8 +4660,13 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
}
} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
reg->type = PTR_TO_SOCKET;
+ } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
+ reg->type = PTR_TO_SOCK_COMMON;
+ } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
+ reg->type = PTR_TO_TCP_SOCK;
}
- if (is_null || !reg_is_refcounted(reg)) {
+ if (is_null || !(reg_is_refcounted(reg) ||
+ reg_may_point_to_spin_lock(reg))) {
/* We don't need id from this point onwards anymore,
* thus we should better reset it, so that state
* pruning has chances to take effect.
@@ -4331,7 +4688,7 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
int i, j;
if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
- __release_reference_state(state, id);
+ release_reference_state(state, id);
for (i = 0; i < MAX_BPF_REG; i++)
mark_ptr_or_null_reg(state, &regs[i], id, is_null);
@@ -4355,6 +4712,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
if (BPF_SRC(insn->code) != BPF_X)
return false;
+ /* Pointers are always 64-bit. */
+ if (BPF_CLASS(insn->code) == BPF_JMP32)
+ return false;
+
switch (BPF_OP(insn->code)) {
case BPF_JGT:
if ((dst_reg->type == PTR_TO_PACKET &&
@@ -4447,16 +4808,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
+ bool is_jmp32;
int err;
- if (opcode > BPF_JSLE) {
- verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
+ /* Only conditional jumps are expected to reach here. */
+ if (opcode == BPF_JA || opcode > BPF_JSLE) {
+ verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
- verbose(env, "BPF_JMP uses reserved fields\n");
+ verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
@@ -4472,7 +4835,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
} else {
if (insn->src_reg != BPF_REG_0) {
- verbose(env, "BPF_JMP uses reserved fields\n");
+ verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
}
@@ -4483,9 +4846,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return err;
dst_reg = &regs[insn->dst_reg];
+ is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (BPF_SRC(insn->code) == BPF_K) {
- int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+ int pred = is_branch_taken(dst_reg, insn->imm, opcode,
+ is_jmp32);
if (pred == 1) {
/* only follow the goto, ignore fall-through */
@@ -4513,30 +4878,51 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state lo_reg0 = *dst_reg;
+ struct bpf_reg_state lo_reg1 = *src_reg;
+ struct bpf_reg_state *src_lo, *dst_lo;
+
+ dst_lo = &lo_reg0;
+ src_lo = &lo_reg1;
+ coerce_reg_to_size(dst_lo, 4);
+ coerce_reg_to_size(src_lo, 4);
+
if (dst_reg->type == SCALAR_VALUE &&
- regs[insn->src_reg].type == SCALAR_VALUE) {
- if (tnum_is_const(regs[insn->src_reg].var_off))
+ src_reg->type == SCALAR_VALUE) {
+ if (tnum_is_const(src_reg->var_off) ||
+ (is_jmp32 && tnum_is_const(src_lo->var_off)))
reg_set_min_max(&other_branch_regs[insn->dst_reg],
- dst_reg, regs[insn->src_reg].var_off.value,
- opcode);
- else if (tnum_is_const(dst_reg->var_off))
+ dst_reg,
+ is_jmp32
+ ? src_lo->var_off.value
+ : src_reg->var_off.value,
+ opcode, is_jmp32);
+ else if (tnum_is_const(dst_reg->var_off) ||
+ (is_jmp32 && tnum_is_const(dst_lo->var_off)))
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
- &regs[insn->src_reg],
- dst_reg->var_off.value, opcode);
- else if (opcode == BPF_JEQ || opcode == BPF_JNE)
+ src_reg,
+ is_jmp32
+ ? dst_lo->var_off.value
+ : dst_reg->var_off.value,
+ opcode, is_jmp32);
+ else if (!is_jmp32 &&
+ (opcode == BPF_JEQ || opcode == BPF_JNE))
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
- &regs[insn->src_reg],
- &regs[insn->dst_reg], opcode);
+ src_reg, dst_reg, opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
- dst_reg, insn->imm, opcode);
+ dst_reg, insn->imm, opcode, is_jmp32);
}
- /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
- if (BPF_SRC(insn->code) == BPF_K &&
+ /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
+ * NOTE: these optimizations below are related with pointer comparison
+ * which will never be JMP32.
+ */
+ if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
reg_type_may_be_null(dst_reg->type)) {
/* Mark all identical registers in each branch as either
@@ -4678,6 +5064,11 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
+ if (env->cur_state->active_spin_lock) {
+ verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
+ return -EINVAL;
+ }
+
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
@@ -4865,7 +5256,8 @@ peek_stack:
goto check_state;
t = insn_stack[cur_stack - 1];
- if (BPF_CLASS(insns[t].code) == BPF_JMP) {
+ if (BPF_CLASS(insns[t].code) == BPF_JMP ||
+ BPF_CLASS(insns[t].code) == BPF_JMP32) {
u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) {
@@ -4962,13 +5354,14 @@ static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- u32 i, nfuncs, urec_size, min_size, prev_offset;
+ u32 i, nfuncs, urec_size, min_size;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info *krecord;
const struct btf_type *type;
struct bpf_prog *prog;
const struct btf *btf;
void __user *urecord;
+ u32 prev_offset = 0;
int ret = 0;
nfuncs = attr->func_info_cnt;
@@ -5412,8 +5805,11 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK.
- * We don't care about the 'id' value, because nothing
- * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
+ * 'id' is not compared, since it's only used for maps with
+ * bpf_spin_lock inside map element and in such cases if
+ * the rest of the prog is valid for one map element then
+ * it's valid for all map elements regardless of the key
+ * used in bpf_map_lookup()
*/
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
range_within(rold, rcur) &&
@@ -5461,6 +5857,10 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
case PTR_TO_FLOW_KEYS:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
+ case PTR_TO_SOCK_COMMON:
+ case PTR_TO_SOCK_COMMON_OR_NULL:
+ case PTR_TO_TCP_SOCK:
+ case PTR_TO_TCP_SOCK_OR_NULL:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
@@ -5616,6 +6016,9 @@ static bool states_equal(struct bpf_verifier_env *env,
if (old->speculative && !cur->speculative)
return false;
+ if (old->active_spin_lock != cur->active_spin_lock)
+ return false;
+
/* for states to be equal callsites have to be the same
* and all frame states need to be equivalent
*/
@@ -5778,6 +6181,10 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
case PTR_TO_CTX:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
+ case PTR_TO_SOCK_COMMON:
+ case PTR_TO_SOCK_COMMON_OR_NULL:
+ case PTR_TO_TCP_SOCK:
+ case PTR_TO_TCP_SOCK_OR_NULL:
return false;
default:
return true;
@@ -6020,7 +6427,7 @@ static int do_check(struct bpf_verifier_env *env)
if (err)
return err;
- } else if (class == BPF_JMP) {
+ } else if (class == BPF_JMP || class == BPF_JMP32) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
@@ -6028,11 +6435,18 @@ static int do_check(struct bpf_verifier_env *env)
insn->off != 0 ||
(insn->src_reg != BPF_REG_0 &&
insn->src_reg != BPF_PSEUDO_CALL) ||
- insn->dst_reg != BPF_REG_0) {
+ insn->dst_reg != BPF_REG_0 ||
+ class == BPF_JMP32) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
+ if (env->cur_state->active_spin_lock &&
+ (insn->src_reg == BPF_PSEUDO_CALL ||
+ insn->imm != BPF_FUNC_spin_unlock)) {
+ verbose(env, "function calls are not allowed while holding a lock\n");
+ return -EINVAL;
+ }
if (insn->src_reg == BPF_PSEUDO_CALL)
err = check_func_call(env, insn, &env->insn_idx);
else
@@ -6044,7 +6458,8 @@ static int do_check(struct bpf_verifier_env *env)
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
- insn->dst_reg != BPF_REG_0) {
+ insn->dst_reg != BPF_REG_0 ||
+ class == BPF_JMP32) {
verbose(env, "BPF_JA uses reserved fields\n");
return -EINVAL;
}
@@ -6056,11 +6471,17 @@ static int do_check(struct bpf_verifier_env *env)
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
- insn->dst_reg != BPF_REG_0) {
+ insn->dst_reg != BPF_REG_0 ||
+ class == BPF_JMP32) {
verbose(env, "BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
+ if (env->cur_state->active_spin_lock) {
+ verbose(env, "bpf_spin_unlock is missing\n");
+ return -EINVAL;
+ }
+
if (state->curframe) {
/* exit from nested function */
env->prev_insn_idx = env->insn_idx;
@@ -6158,6 +6579,19 @@ static int check_map_prealloc(struct bpf_map *map)
!(map->map_flags & BPF_F_NO_PREALLOC);
}
+static bool is_tracing_prog_type(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_KPROBE:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
@@ -6180,6 +6614,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
}
}
+ if ((is_tracing_prog_type(prog->type) ||
+ prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
+ map_value_has_spin_lock(map)) {
+ verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
+ return -EINVAL;
+ }
+
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
@@ -6396,6 +6837,153 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
+static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
+ u32 off, u32 cnt)
+{
+ int i, j;
+
+ /* find first prog starting at or after off (first to remove) */
+ for (i = 0; i < env->subprog_cnt; i++)
+ if (env->subprog_info[i].start >= off)
+ break;
+ /* find first prog starting at or after off + cnt (first to stay) */
+ for (j = i; j < env->subprog_cnt; j++)
+ if (env->subprog_info[j].start >= off + cnt)
+ break;
+ /* if j doesn't start exactly at off + cnt, we are just removing
+ * the front of previous prog
+ */
+ if (env->subprog_info[j].start != off + cnt)
+ j--;
+
+ if (j > i) {
+ struct bpf_prog_aux *aux = env->prog->aux;
+ int move;
+
+ /* move fake 'exit' subprog as well */
+ move = env->subprog_cnt + 1 - j;
+
+ memmove(env->subprog_info + i,
+ env->subprog_info + j,
+ sizeof(*env->subprog_info) * move);
+ env->subprog_cnt -= j - i;
+
+ /* remove func_info */
+ if (aux->func_info) {
+ move = aux->func_info_cnt - j;
+
+ memmove(aux->func_info + i,
+ aux->func_info + j,
+ sizeof(*aux->func_info) * move);
+ aux->func_info_cnt -= j - i;
+ /* func_info->insn_off is set after all code rewrites,
+ * in adjust_btf_func() - no need to adjust
+ */
+ }
+ } else {
+ /* convert i from "first prog to remove" to "first to adjust" */
+ if (env->subprog_info[i].start == off)
+ i++;
+ }
+
+ /* update fake 'exit' subprog as well */
+ for (; i <= env->subprog_cnt; i++)
+ env->subprog_info[i].start -= cnt;
+
+ return 0;
+}
+
+static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
+ u32 cnt)
+{
+ struct bpf_prog *prog = env->prog;
+ u32 i, l_off, l_cnt, nr_linfo;
+ struct bpf_line_info *linfo;
+
+ nr_linfo = prog->aux->nr_linfo;
+ if (!nr_linfo)
+ return 0;
+
+ linfo = prog->aux->linfo;
+
+ /* find first line info to remove, count lines to be removed */
+ for (i = 0; i < nr_linfo; i++)
+ if (linfo[i].insn_off >= off)
+ break;
+
+ l_off = i;
+ l_cnt = 0;
+ for (; i < nr_linfo; i++)
+ if (linfo[i].insn_off < off + cnt)
+ l_cnt++;
+ else
+ break;
+
+ /* First live insn doesn't match first live linfo, it needs to "inherit"
+ * last removed linfo. prog is already modified, so prog->len == off
+ * means no live instructions after (tail of the program was removed).
+ */
+ if (prog->len != off && l_cnt &&
+ (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
+ l_cnt--;
+ linfo[--i].insn_off = off + cnt;
+ }
+
+ /* remove the line info which refer to the removed instructions */
+ if (l_cnt) {
+ memmove(linfo + l_off, linfo + i,
+ sizeof(*linfo) * (nr_linfo - i));
+
+ prog->aux->nr_linfo -= l_cnt;
+ nr_linfo = prog->aux->nr_linfo;
+ }
+
+ /* pull all linfo[i].insn_off >= off + cnt in by cnt */
+ for (i = l_off; i < nr_linfo; i++)
+ linfo[i].insn_off -= cnt;
+
+ /* fix up all subprogs (incl. 'exit') which start >= off */
+ for (i = 0; i <= env->subprog_cnt; i++)
+ if (env->subprog_info[i].linfo_idx > l_off) {
+ /* program may have started in the removed region but
+ * may not be fully removed
+ */
+ if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
+ env->subprog_info[i].linfo_idx -= l_cnt;
+ else
+ env->subprog_info[i].linfo_idx = l_off;
+ }
+
+ return 0;
+}
+
+static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ unsigned int orig_prog_len = env->prog->len;
+ int err;
+
+ if (bpf_prog_is_dev_bound(env->prog->aux))
+ bpf_prog_offload_remove_insns(env, off, cnt);
+
+ err = bpf_remove_insns(env->prog, off, cnt);
+ if (err)
+ return err;
+
+ err = adjust_subprog_starts_after_remove(env, off, cnt);
+ if (err)
+ return err;
+
+ err = bpf_adj_linfo_after_remove(env, off, cnt);
+ if (err)
+ return err;
+
+ memmove(aux_data + off, aux_data + off + cnt,
+ sizeof(*aux_data) * (orig_prog_len - off - cnt));
+
+ return 0;
+}
+
/* The verifier does more data flow analysis than llvm and will not
* explore branches that are dead at run time. Malicious programs can
* have dead code too. Therefore replace all dead at-run-time code
@@ -6422,6 +7010,91 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
}
}
+static bool insn_is_cond_jump(u8 code)
+{
+ u8 op;
+
+ if (BPF_CLASS(code) == BPF_JMP32)
+ return true;
+
+ if (BPF_CLASS(code) != BPF_JMP)
+ return false;
+
+ op = BPF_OP(code);
+ return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
+}
+
+static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ const int insn_cnt = env->prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (!insn_is_cond_jump(insn->code))
+ continue;
+
+ if (!aux_data[i + 1].seen)
+ ja.off = insn->off;
+ else if (!aux_data[i + 1 + insn->off].seen)
+ ja.off = 0;
+ else
+ continue;
+
+ if (bpf_prog_is_dev_bound(env->prog->aux))
+ bpf_prog_offload_replace_insn(env, i, &ja);
+
+ memcpy(insn, &ja, sizeof(ja));
+ }
+}
+
+static int opt_remove_dead_code(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ int insn_cnt = env->prog->len;
+ int i, err;
+
+ for (i = 0; i < insn_cnt; i++) {
+ int j;
+
+ j = 0;
+ while (i + j < insn_cnt && !aux_data[i + j].seen)
+ j++;
+ if (!j)
+ continue;
+
+ err = verifier_remove_insns(env, i, j);
+ if (err)
+ return err;
+ insn_cnt = env->prog->len;
+ }
+
+ return 0;
+}
+
+static int opt_remove_nops(struct bpf_verifier_env *env)
+{
+ const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ int i, err;
+
+ for (i = 0; i < insn_cnt; i++) {
+ if (memcmp(&insn[i], &ja, sizeof(ja)))
+ continue;
+
+ err = verifier_remove_insns(env, i, 1);
+ if (err)
+ return err;
+ insn_cnt--;
+ i--;
+ }
+
+ return 0;
+}
+
/* convert load instructions that access fields of a context type into a
* sequence of instructions that access fields of the underlying structure:
* struct __sk_buff -> struct sk_buff
@@ -6514,8 +7187,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
convert_ctx_access = ops->convert_ctx_access;
break;
case PTR_TO_SOCKET:
+ case PTR_TO_SOCK_COMMON:
convert_ctx_access = bpf_sock_convert_ctx_access;
break;
+ case PTR_TO_TCP_SOCK:
+ convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
+ break;
default:
continue;
}
@@ -6643,7 +7320,12 @@ static int jit_subprogs(struct bpf_verifier_env *env)
subprog_end = env->subprog_info[i + 1].start;
len = subprog_end - subprog_start;
- func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
+ /* BPF_PROG_RUN doesn't call subprogs directly,
+ * hence main prog stats include the runtime of subprogs.
+ * subprogs don't have IDs and not reachable via prog_get_next_id
+ * func[i]->aux->stats will never be accessed and stays NULL
+ */
+ func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
if (!func[i])
goto out_free;
memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
@@ -6882,7 +7564,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
- if (!aux->alu_state)
+ if (!aux->alu_state ||
+ aux->alu_state == BPF_ALU_NON_POINTER)
continue;
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
@@ -7112,7 +7795,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
{
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
- int ret = -EINVAL;
+ int i, len, ret = -EINVAL;
+ bool is_priv;
/* no program is valid */
if (ARRAY_SIZE(bpf_verifier_ops) == 0)
@@ -7126,12 +7810,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
return -ENOMEM;
log = &env->log;
+ len = (*prog)->len;
env->insn_aux_data =
- vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
- (*prog)->len));
+ vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
+ for (i = 0; i < len; i++)
+ env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
@@ -7159,6 +7845,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
+ is_priv = capable(CAP_SYS_ADMIN);
+ env->allow_ptr_leaks = is_priv;
+
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
@@ -7176,8 +7865,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (!env->explored_states)
goto skip_full_check;
- env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
-
ret = check_subprogs(env);
if (ret < 0)
goto skip_full_check;
@@ -7207,8 +7894,17 @@ skip_full_check:
ret = check_max_stack_depth(env);
/* instruction rewrites happen after this point */
- if (ret == 0)
- sanitize_dead_code(env);
+ if (is_priv) {
+ if (ret == 0)
+ opt_hard_wire_dead_code_branches(env);
+ if (ret == 0)
+ ret = opt_remove_dead_code(env);
+ if (ret == 0)
+ ret = opt_remove_nops(env);
+ } else {
+ if (ret == 0)
+ sanitize_dead_code(env);
+ }
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
diff --git a/kernel/capability.c b/kernel/capability.c
index 1e1c0236f55b..1444f3954d75 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -93,9 +93,7 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
break;
case _LINUX_CAPABILITY_VERSION_2:
warn_deprecated_v2();
- /*
- * fall through - v3 is otherwise equivalent to v2.
- */
+ /* fall through - v3 is otherwise equivalent to v2. */
case _LINUX_CAPABILITY_VERSION_3:
*tocopy = _LINUX_CAPABILITY_U32S_3;
break;
@@ -299,7 +297,7 @@ bool has_ns_capability(struct task_struct *t,
int ret;
rcu_read_lock();
- ret = security_capable(__task_cred(t), ns, cap);
+ ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NONE);
rcu_read_unlock();
return (ret == 0);
@@ -340,7 +338,7 @@ bool has_ns_capability_noaudit(struct task_struct *t,
int ret;
rcu_read_lock();
- ret = security_capable_noaudit(__task_cred(t), ns, cap);
+ ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NOAUDIT);
rcu_read_unlock();
return (ret == 0);
@@ -363,7 +361,9 @@ bool has_capability_noaudit(struct task_struct *t, int cap)
return has_ns_capability_noaudit(t, &init_user_ns, cap);
}
-static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit)
+static bool ns_capable_common(struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
{
int capable;
@@ -372,8 +372,7 @@ static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit)
BUG();
}
- capable = audit ? security_capable(current_cred(), ns, cap) :
- security_capable_noaudit(current_cred(), ns, cap);
+ capable = security_capable(current_cred(), ns, cap, opts);
if (capable == 0) {
current->flags |= PF_SUPERPRIV;
return true;
@@ -394,7 +393,7 @@ static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit)
*/
bool ns_capable(struct user_namespace *ns, int cap)
{
- return ns_capable_common(ns, cap, true);
+ return ns_capable_common(ns, cap, CAP_OPT_NONE);
}
EXPORT_SYMBOL(ns_capable);
@@ -412,11 +411,30 @@ EXPORT_SYMBOL(ns_capable);
*/
bool ns_capable_noaudit(struct user_namespace *ns, int cap)
{
- return ns_capable_common(ns, cap, false);
+ return ns_capable_common(ns, cap, CAP_OPT_NOAUDIT);
}
EXPORT_SYMBOL(ns_capable_noaudit);
/**
+ * ns_capable_setid - Determine if the current task has a superior capability
+ * in effect, while signalling that this check is being done from within a
+ * setid syscall.
+ * @ns: The usernamespace we want the capability in
+ * @cap: The capability to be tested for
+ *
+ * Return true if the current task has the given superior capability currently
+ * available for use, false if not.
+ *
+ * This sets PF_SUPERPRIV on the task if the capability is available on the
+ * assumption that it's about to be used.
+ */
+bool ns_capable_setid(struct user_namespace *ns, int cap)
+{
+ return ns_capable_common(ns, cap, CAP_OPT_INSETID);
+}
+EXPORT_SYMBOL(ns_capable_setid);
+
+/**
* capable - Determine if the current task has a superior capability in effect
* @cap: The capability to be tested for
*
@@ -448,10 +466,11 @@ EXPORT_SYMBOL(capable);
bool file_ns_capable(const struct file *file, struct user_namespace *ns,
int cap)
{
+
if (WARN_ON_ONCE(!cap_valid(cap)))
return false;
- if (security_capable(file->f_cred, ns, cap) == 0)
+ if (security_capable(file->f_cred, ns, cap, CAP_OPT_NONE) == 0)
return true;
return false;
@@ -500,10 +519,12 @@ bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
{
int ret = 0; /* An absent tracer adds no restrictions */
const struct cred *cred;
+
rcu_read_lock();
cred = rcu_dereference(tsk->ptracer_cred);
if (cred)
- ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
+ ret = security_capable(cred, ns, CAP_SYS_PTRACE,
+ CAP_OPT_NOAUDIT);
rcu_read_unlock();
return (ret == 0);
}
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index c950864016e2..c9a35f09e4b9 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -198,7 +198,7 @@ int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
void cgroup_free_root(struct cgroup_root *root);
void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
-int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags);
+int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
struct cgroup_root *root, unsigned long magic,
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 583b969b0c0e..f94a7229974e 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1116,13 +1116,11 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
void *data, unsigned long magic,
struct cgroup_namespace *ns)
{
- struct super_block *pinned_sb = NULL;
struct cgroup_sb_opts opts;
struct cgroup_root *root;
struct cgroup_subsys *ss;
struct dentry *dentry;
int i, ret;
- bool new_root = false;
cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
@@ -1184,29 +1182,6 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
if (root->flags ^ opts.flags)
pr_warn("new mount options do not match the existing superblock, will be ignored\n");
- /*
- * We want to reuse @root whose lifetime is governed by its
- * ->cgrp. Let's check whether @root is alive and keep it
- * that way. As cgroup_kill_sb() can happen anytime, we
- * want to block it by pinning the sb so that @root doesn't
- * get killed before mount is complete.
- *
- * With the sb pinned, tryget_live can reliably indicate
- * whether @root can be reused. If it's being killed,
- * drain it. We can use wait_queue for the wait but this
- * path is super cold. Let's just sleep a bit and retry.
- */
- pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
- if (IS_ERR(pinned_sb) ||
- !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
- mutex_unlock(&cgroup_mutex);
- if (!IS_ERR_OR_NULL(pinned_sb))
- deactivate_super(pinned_sb);
- msleep(10);
- ret = restart_syscall();
- goto out_free;
- }
-
ret = 0;
goto out_unlock;
}
@@ -1232,15 +1207,20 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
ret = -ENOMEM;
goto out_unlock;
}
- new_root = true;
init_cgroup_root(root, &opts);
- ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
+ ret = cgroup_setup_root(root, opts.subsys_mask);
if (ret)
cgroup_free_root(root);
out_unlock:
+ if (!ret && !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
+ mutex_unlock(&cgroup_mutex);
+ msleep(10);
+ ret = restart_syscall();
+ goto out_free;
+ }
mutex_unlock(&cgroup_mutex);
out_free:
kfree(opts.release_agent);
@@ -1252,25 +1232,13 @@ out_free:
dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
CGROUP_SUPER_MAGIC, ns);
- /*
- * There's a race window after we release cgroup_mutex and before
- * allocating a superblock. Make sure a concurrent process won't
- * be able to re-use the root during this window by delaying the
- * initialization of root refcnt.
- */
- if (new_root) {
- mutex_lock(&cgroup_mutex);
- percpu_ref_reinit(&root->cgrp.self.refcnt);
- mutex_unlock(&cgroup_mutex);
+ if (!IS_ERR(dentry) && percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
+ struct super_block *sb = dentry->d_sb;
+ dput(dentry);
+ deactivate_locked_super(sb);
+ msleep(10);
+ dentry = ERR_PTR(restart_syscall());
}
-
- /*
- * If @pinned_sb, we're reusing an existing root and holding an
- * extra ref on its sb. Mount is complete. Put the extra ref.
- */
- if (pinned_sb)
- deactivate_super(pinned_sb);
-
return dentry;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index f31bd61c9466..eef24a25bda7 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -197,7 +197,7 @@ static u64 css_serial_nr_next = 1;
*/
static u16 have_fork_callback __read_mostly;
static u16 have_exit_callback __read_mostly;
-static u16 have_free_callback __read_mostly;
+static u16 have_release_callback __read_mostly;
static u16 have_canfork_callback __read_mostly;
/* cgroup namespace for init task */
@@ -1927,7 +1927,7 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}
-int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
+int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
{
LIST_HEAD(tmp_links);
struct cgroup *root_cgrp = &root->cgrp;
@@ -1944,7 +1944,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
root_cgrp->ancestor_ids[0] = ret;
ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
- ref_flags, GFP_KERNEL);
+ 0, GFP_KERNEL);
if (ret)
goto out;
@@ -2033,7 +2033,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
struct cgroup_namespace *ns)
{
struct dentry *dentry;
- bool new_sb;
+ bool new_sb = false;
dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
@@ -2043,6 +2043,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
*/
if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
struct dentry *nsdentry;
+ struct super_block *sb = dentry->d_sb;
struct cgroup *cgrp;
mutex_lock(&cgroup_mutex);
@@ -2053,12 +2054,14 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
- nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
+ nsdentry = kernfs_node_dentry(cgrp->kn, sb);
dput(dentry);
+ if (IS_ERR(nsdentry))
+ deactivate_locked_super(sb);
dentry = nsdentry;
}
- if (IS_ERR(dentry) || !new_sb)
+ if (!new_sb)
cgroup_put(&root->cgrp);
return dentry;
@@ -2118,18 +2121,16 @@ static void cgroup_kill_sb(struct super_block *sb)
struct cgroup_root *root = cgroup_root_from_kf(kf_root);
/*
- * If @root doesn't have any mounts or children, start killing it.
+ * If @root doesn't have any children, start killing it.
* This prevents new mounts by disabling percpu_ref_tryget_live().
* cgroup_mount() may wait for @root's release.
*
* And don't kill the default root.
*/
- if (!list_empty(&root->cgrp.self.children) ||
- root == &cgrp_dfl_root)
- cgroup_put(&root->cgrp);
- else
+ if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
+ !percpu_ref_is_dying(&root->cgrp.self.refcnt))
percpu_ref_kill(&root->cgrp.self.refcnt);
-
+ cgroup_put(&root->cgrp);
kernfs_kill_sb(sb);
}
@@ -3533,6 +3534,16 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
return ret ?: nbytes;
}
+static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
+{
+ struct cftype *cft = of->kn->priv;
+
+ if (cft->poll)
+ return cft->poll(of, pt);
+
+ return kernfs_generic_poll(of, pt);
+}
+
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
{
return seq_cft(seq)->seq_start(seq, ppos);
@@ -3571,6 +3582,7 @@ static struct kernfs_ops cgroup_kf_single_ops = {
.open = cgroup_file_open,
.release = cgroup_file_release,
.write = cgroup_file_write,
+ .poll = cgroup_file_poll,
.seq_show = cgroup_seqfile_show,
};
@@ -3579,6 +3591,7 @@ static struct kernfs_ops cgroup_kf_ops = {
.open = cgroup_file_open,
.release = cgroup_file_release,
.write = cgroup_file_write,
+ .poll = cgroup_file_poll,
.seq_start = cgroup_seqfile_start,
.seq_next = cgroup_seqfile_next,
.seq_stop = cgroup_seqfile_stop,
@@ -5313,7 +5326,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
have_fork_callback |= (bool)ss->fork << ss->id;
have_exit_callback |= (bool)ss->exit << ss->id;
- have_free_callback |= (bool)ss->free << ss->id;
+ have_release_callback |= (bool)ss->release << ss->id;
have_canfork_callback |= (bool)ss->can_fork << ss->id;
/* At system boot, before all subsystems have been
@@ -5399,7 +5412,7 @@ int __init cgroup_init(void)
hash_add(css_set_table, &init_css_set.hlist,
css_set_hash(init_css_set.subsys));
- BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0));
+ BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
mutex_unlock(&cgroup_mutex);
@@ -5749,16 +5762,19 @@ void cgroup_exit(struct task_struct *tsk)
} while_each_subsys_mask();
}
-void cgroup_free(struct task_struct *task)
+void cgroup_release(struct task_struct *task)
{
- struct css_set *cset = task_css_set(task);
struct cgroup_subsys *ss;
int ssid;
- do_each_subsys_mask(ss, ssid, have_free_callback) {
- ss->free(task);
+ do_each_subsys_mask(ss, ssid, have_release_callback) {
+ ss->release(task);
} while_each_subsys_mask();
+}
+void cgroup_free(struct task_struct *task)
+{
+ struct css_set *cset = task_css_set(task);
put_css_set(cset);
}
@@ -5996,7 +6012,7 @@ int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
int ret;
mutex_lock(&cgroup_mutex);
- ret = __cgroup_bpf_detach(cgrp, prog, type, flags);
+ ret = __cgroup_bpf_detach(cgrp, prog, type);
mutex_unlock(&cgroup_mutex);
return ret;
}
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 479743db6c37..72afd55f70c6 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -203,19 +203,6 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
return css_cs(cs->css.parent);
}
-#ifdef CONFIG_NUMA
-static inline bool task_has_mempolicy(struct task_struct *task)
-{
- return task->mempolicy;
-}
-#else
-static inline bool task_has_mempolicy(struct task_struct *task)
-{
- return false;
-}
-#endif
-
-
/* bits in struct cpuset flags field */
typedef enum {
CS_ONLINE,
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index 9829c67ebc0a..c9960baaa14f 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -247,7 +247,7 @@ static void pids_cancel_fork(struct task_struct *task)
pids_uncharge(pids, 1);
}
-static void pids_free(struct task_struct *task)
+static void pids_release(struct task_struct *task)
{
struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
@@ -342,7 +342,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
.cancel_attach = pids_cancel_attach,
.can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork,
- .free = pids_free,
+ .release = pids_release,
.legacy_cftypes = pids_files,
.dfl_cftypes = pids_files,
.threaded = true,
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index d503d1a9007c..bb95a35e8c2d 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -87,7 +87,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
struct cgroup *root, int cpu)
{
struct cgroup_rstat_cpu *rstatc;
- struct cgroup *parent;
if (pos == root)
return NULL;
@@ -115,8 +114,8 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
* However, due to the way we traverse, @pos will be the first
* child in most cases. The only exception is @root.
*/
- parent = cgroup_parent(pos);
- if (parent && rstatc->updated_next) {
+ if (rstatc->updated_next) {
+ struct cgroup *parent = cgroup_parent(pos);
struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
struct cgroup_rstat_cpu *nrstatc;
struct cgroup **nextp;
@@ -140,9 +139,12 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
* updated stat.
*/
smp_mb();
+
+ return pos;
}
- return pos;
+ /* only happens for @root */
+ return NULL;
}
/* see cgroup_rstat_flush() */
diff --git a/kernel/compat.c b/kernel/compat.c
index f01affa17e22..d8a36c6ad7c9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -20,7 +20,6 @@
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/security.h>
-#include <linux/timex.h>
#include <linux/export.h>
#include <linux/migrate.h>
#include <linux/posix-timers.h>
@@ -30,69 +29,6 @@
#include <linux/uaccess.h>
-int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
-{
- struct compat_timex tx32;
-
- memset(txc, 0, sizeof(struct timex));
- if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
- return -EFAULT;
-
- txc->modes = tx32.modes;
- txc->offset = tx32.offset;
- txc->freq = tx32.freq;
- txc->maxerror = tx32.maxerror;
- txc->esterror = tx32.esterror;
- txc->status = tx32.status;
- txc->constant = tx32.constant;
- txc->precision = tx32.precision;
- txc->tolerance = tx32.tolerance;
- txc->time.tv_sec = tx32.time.tv_sec;
- txc->time.tv_usec = tx32.time.tv_usec;
- txc->tick = tx32.tick;
- txc->ppsfreq = tx32.ppsfreq;
- txc->jitter = tx32.jitter;
- txc->shift = tx32.shift;
- txc->stabil = tx32.stabil;
- txc->jitcnt = tx32.jitcnt;
- txc->calcnt = tx32.calcnt;
- txc->errcnt = tx32.errcnt;
- txc->stbcnt = tx32.stbcnt;
-
- return 0;
-}
-
-int compat_put_timex(struct compat_timex __user *utp, const struct timex *txc)
-{
- struct compat_timex tx32;
-
- memset(&tx32, 0, sizeof(struct compat_timex));
- tx32.modes = txc->modes;
- tx32.offset = txc->offset;
- tx32.freq = txc->freq;
- tx32.maxerror = txc->maxerror;
- tx32.esterror = txc->esterror;
- tx32.status = txc->status;
- tx32.constant = txc->constant;
- tx32.precision = txc->precision;
- tx32.tolerance = txc->tolerance;
- tx32.time.tv_sec = txc->time.tv_sec;
- tx32.time.tv_usec = txc->time.tv_usec;
- tx32.tick = txc->tick;
- tx32.ppsfreq = txc->ppsfreq;
- tx32.jitter = txc->jitter;
- tx32.shift = txc->shift;
- tx32.stabil = txc->stabil;
- tx32.jitcnt = txc->jitcnt;
- tx32.calcnt = txc->calcnt;
- tx32.errcnt = txc->errcnt;
- tx32.stbcnt = txc->stbcnt;
- tx32.tai = txc->tai;
- if (copy_to_user(utp, &tx32, sizeof(struct compat_timex)))
- return -EFAULT;
- return 0;
-}
-
static int __compat_get_timeval(struct timeval *tv, const struct old_timeval32 __user *ctv)
{
return (!access_ok(ctv, sizeof(*ctv)) ||
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 91d5c38eb7e5..025f419d16f6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -313,6 +313,15 @@ void cpus_write_unlock(void)
void lockdep_assert_cpus_held(void)
{
+ /*
+ * We can't have hotplug operations before userspace starts running,
+ * and some init codepaths will knowingly not take the hotplug lock.
+ * This is all valid, so mute lockdep until it makes sense to report
+ * unheld locks.
+ */
+ if (system_state < SYSTEM_RUNNING)
+ return;
+
percpu_rwsem_assert_held(&cpu_hotplug_lock);
}
@@ -376,9 +385,6 @@ void __weak arch_smt_update(void) { }
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
-EXPORT_SYMBOL_GPL(cpu_smt_control);
-
-static bool cpu_smt_available __read_mostly;
void __init cpu_smt_disable(bool force)
{
@@ -397,25 +403,11 @@ void __init cpu_smt_disable(bool force)
/*
* The decision whether SMT is supported can only be done after the full
- * CPU identification. Called from architecture code before non boot CPUs
- * are brought up.
- */
-void __init cpu_smt_check_topology_early(void)
-{
- if (!topology_smt_supported())
- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
-}
-
-/*
- * If SMT was disabled by BIOS, detect it here, after the CPUs have been
- * brought online. This ensures the smt/l1tf sysfs entries are consistent
- * with reality. cpu_smt_available is set to true during the bringup of non
- * boot CPUs when a SMT sibling is detected. Note, this may overwrite
- * cpu_smt_control's previous setting.
+ * CPU identification. Called from architecture code.
*/
void __init cpu_smt_check_topology(void)
{
- if (!cpu_smt_available)
+ if (!topology_smt_supported())
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
}
@@ -428,18 +420,10 @@ early_param("nosmt", smt_cmdline_disable);
static inline bool cpu_smt_allowed(unsigned int cpu)
{
- if (topology_is_primary_thread(cpu))
+ if (cpu_smt_control == CPU_SMT_ENABLED)
return true;
- /*
- * If the CPU is not a 'primary' thread and the booted_once bit is
- * set then the processor has SMT support. Store this information
- * for the late check of SMT support in cpu_smt_check_topology().
- */
- if (per_cpu(cpuhp_state, cpu).booted_once)
- cpu_smt_available = true;
-
- if (cpu_smt_control == CPU_SMT_ENABLED)
+ if (topology_is_primary_thread(cpu))
return true;
/*
@@ -2090,10 +2074,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
*/
cpuhp_offline_cpu_device(cpu);
}
- if (!ret) {
+ if (!ret)
cpu_smt_control = ctrlval;
- arch_smt_update();
- }
cpu_maps_update_done();
return ret;
}
@@ -2104,7 +2086,6 @@ static int cpuhp_smt_enable(void)
cpu_maps_update_begin();
cpu_smt_control = CPU_SMT_ENABLED;
- arch_smt_update();
for_each_present_cpu(cpu) {
/* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 933cb3e45b98..093c9f917ed0 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -464,6 +464,8 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
#ifdef CONFIG_HUGETLB_PAGE
VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
+#define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
+ VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
#endif
arch_crash_save_vmcoreinfo();
diff --git a/kernel/cred.c b/kernel/cred.c
index 21f4a97085b4..45d77284aed0 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -760,19 +760,6 @@ bool creds_are_invalid(const struct cred *cred)
{
if (cred->magic != CRED_MAGIC)
return true;
-#ifdef CONFIG_SECURITY_SELINUX
- /*
- * cred->security == NULL if security_cred_alloc_blank() or
- * security_prepare_creds() returned an error.
- */
- if (selinux_is_enabled() && cred->security) {
- if ((unsigned long) cred->security < PAGE_SIZE)
- return true;
- if ((*(u32 *)cred->security & 0xffffff00) ==
- (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
- return true;
- }
-#endif
return false;
}
EXPORT_SYMBOL(creds_are_invalid);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d6361776dc5c..1fb6fd68b9c7 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -378,6 +378,8 @@ void __init swiotlb_exit(void)
memblock_free_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
+ io_tlb_start = 0;
+ io_tlb_end = 0;
io_tlb_nslabs = 0;
max_segment = 0;
}
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 24a77c34e9ad..c2b41a263166 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance events callchain code, extracted from core.c:
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
- *
- * For licensing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3cd13a30f732..5f59d848171e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance events core code:
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
- *
- * For licensing details see kernel-base/COPYING
*/
#include <linux/fs.h>
@@ -385,6 +384,8 @@ static atomic_t nr_namespaces_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static atomic_t nr_freq_events __read_mostly;
static atomic_t nr_switch_events __read_mostly;
+static atomic_t nr_ksymbol_events __read_mostly;
+static atomic_t nr_bpf_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
@@ -436,18 +437,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
- if (ret || !write)
- return ret;
-
+ int ret;
+ int perf_cpu = sysctl_perf_cpu_time_max_percent;
/*
* If throttling is disabled don't allow the write:
*/
- if (sysctl_perf_cpu_time_max_percent == 100 ||
- sysctl_perf_cpu_time_max_percent == 0)
+ if (write && (perf_cpu == 100 || perf_cpu == 0))
return -EINVAL;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
update_perf_cpu_limits();
@@ -1171,7 +1172,7 @@ static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
static void get_ctx(struct perf_event_context *ctx)
{
- WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
+ refcount_inc(&ctx->refcount);
}
static void free_ctx(struct rcu_head *head)
@@ -1185,7 +1186,7 @@ static void free_ctx(struct rcu_head *head)
static void put_ctx(struct perf_event_context *ctx)
{
- if (atomic_dec_and_test(&ctx->refcount)) {
+ if (refcount_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
@@ -1254,6 +1255,7 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event_context::lock
* perf_event::mmap_mutex
* mmap_sem
+ * perf_addr_filters_head::lock
*
* cpu_hotplug_lock
* pmus_lock
@@ -1267,7 +1269,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
again:
rcu_read_lock();
ctx = READ_ONCE(event->ctx);
- if (!atomic_inc_not_zero(&ctx->refcount)) {
+ if (!refcount_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
@@ -1400,7 +1402,7 @@ retry:
}
if (ctx->task == TASK_TOMBSTONE ||
- !atomic_inc_not_zero(&ctx->refcount)) {
+ !refcount_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
@@ -2797,7 +2799,7 @@ static int perf_event_stop(struct perf_event *event, int restart)
*
* (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
* we update the addresses of corresponding vmas in
- * event::addr_filters_offs array and bump the event::addr_filters_gen;
+ * event::addr_filter_ranges array and bump the event::addr_filters_gen;
* (p2) when an event is scheduled in (pmu::add), it calls
* perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
* if the generation has changed since the previous call.
@@ -4056,7 +4058,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->event_list);
INIT_LIST_HEAD(&ctx->pinned_active);
INIT_LIST_HEAD(&ctx->flexible_active);
- atomic_set(&ctx->refcount, 1);
+ refcount_set(&ctx->refcount, 1);
}
static struct perf_event_context *
@@ -4235,7 +4237,7 @@ static bool is_sb_event(struct perf_event *event)
if (attr->mmap || attr->mmap_data || attr->mmap2 ||
attr->comm || attr->comm_exec ||
- attr->task ||
+ attr->task || attr->ksymbol ||
attr->context_switch)
return true;
return false;
@@ -4305,6 +4307,10 @@ static void unaccount_event(struct perf_event *event)
dec = true;
if (has_branch_stack(event))
dec = true;
+ if (event->attr.ksymbol)
+ atomic_dec(&nr_ksymbol_events);
+ if (event->attr.bpf_event)
+ atomic_dec(&nr_bpf_events);
if (dec) {
if (!atomic_add_unless(&perf_sched_count, -1, 1))
@@ -4440,7 +4446,7 @@ static void _free_event(struct perf_event *event)
perf_event_free_bpf_prog(event);
perf_addr_filters_splice(event, NULL);
- kfree(event->addr_filters_offs);
+ kfree(event->addr_filter_ranges);
if (event->destroy)
event->destroy(event);
@@ -4963,6 +4969,11 @@ static void __perf_event_period(struct perf_event *event,
}
}
+static int perf_event_check_period(struct perf_event *event, u64 value)
+{
+ return event->pmu->check_period(event, value);
+}
+
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
u64 value;
@@ -4979,6 +4990,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
+ if (perf_event_check_period(event, value))
+ return -EINVAL;
+
event_function_call(event, __perf_event_period, &value);
return 0;
@@ -5388,7 +5402,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
- if (!atomic_inc_not_zero(&rb->refcount))
+ if (!refcount_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
@@ -5398,7 +5412,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
void ring_buffer_put(struct ring_buffer *rb)
{
- if (!atomic_dec_and_test(&rb->refcount))
+ if (!refcount_dec_and_test(&rb->refcount))
return;
WARN_ON_ONCE(!list_empty(&rb->event_list));
@@ -5463,7 +5477,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
/* this has to be the last one */
rb_free_aux(rb);
- WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+ WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
mutex_unlock(&event->mmap_mutex);
}
@@ -6489,7 +6503,7 @@ void perf_prepare_sample(struct perf_event_header *header,
data->phys_addr = perf_virt_to_phys(data->addr);
}
-static __always_inline void
+static __always_inline int
__perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs,
@@ -6499,13 +6513,15 @@ __perf_event_output(struct perf_event *event,
{
struct perf_output_handle handle;
struct perf_event_header header;
+ int err;
/* protect the callchain buffers */
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
- if (output_begin(&handle, event, header.size))
+ err = output_begin(&handle, event, header.size);
+ if (err)
goto exit;
perf_output_sample(&handle, &header, data, event);
@@ -6514,6 +6530,7 @@ __perf_event_output(struct perf_event *event,
exit:
rcu_read_unlock();
+ return err;
}
void
@@ -6532,12 +6549,12 @@ perf_event_output_backward(struct perf_event *event,
__perf_event_output(event, data, regs, perf_output_begin_backward);
}
-void
+int
perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- __perf_event_output(event, data, regs, perf_output_begin);
+ return __perf_event_output(event, data, regs, perf_output_begin);
}
/*
@@ -6678,7 +6695,8 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
if (filter->path.dentry) {
- event->addr_filters_offs[count] = 0;
+ event->addr_filter_ranges[count].start = 0;
+ event->addr_filter_ranges[count].size = 0;
restart++;
}
@@ -7358,28 +7376,47 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
return true;
}
+static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
+ struct vm_area_struct *vma,
+ struct perf_addr_filter_range *fr)
+{
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct file *file = vma->vm_file;
+
+ if (!perf_addr_filter_match(filter, file, off, vma_size))
+ return false;
+
+ if (filter->offset < off) {
+ fr->start = vma->vm_start;
+ fr->size = min(vma_size, filter->size - (off - filter->offset));
+ } else {
+ fr->start = vma->vm_start + filter->offset - off;
+ fr->size = min(vma->vm_end - fr->start, filter->size);
+ }
+
+ return true;
+}
+
static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
struct vm_area_struct *vma = data;
- unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
- struct file *file = vma->vm_file;
struct perf_addr_filter *filter;
unsigned int restart = 0, count = 0;
+ unsigned long flags;
if (!has_addr_filter(event))
return;
- if (!file)
+ if (!vma->vm_file)
return;
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
- if (perf_addr_filter_match(filter, file, off,
- vma->vm_end - vma->vm_start)) {
- event->addr_filters_offs[count] = vma->vm_start;
+ if (perf_addr_filter_vma_adjust(filter, vma,
+ &event->addr_filter_ranges[count]))
restart++;
- }
count++;
}
@@ -7650,6 +7687,207 @@ static void perf_log_throttle(struct perf_event *event, int enable)
perf_output_end(&handle);
}
+/*
+ * ksymbol register/unregister tracking
+ */
+
+struct perf_ksymbol_event {
+ const char *name;
+ int name_len;
+ struct {
+ struct perf_event_header header;
+ u64 addr;
+ u32 len;
+ u16 ksym_type;
+ u16 flags;
+ } event_id;
+};
+
+static int perf_event_ksymbol_match(struct perf_event *event)
+{
+ return event->attr.ksymbol;
+}
+
+static void perf_event_ksymbol_output(struct perf_event *event, void *data)
+{
+ struct perf_ksymbol_event *ksymbol_event = data;
+ struct perf_output_handle handle;
+ struct perf_sample_data sample;
+ int ret;
+
+ if (!perf_event_ksymbol_match(event))
+ return;
+
+ perf_event_header__init_id(&ksymbol_event->event_id.header,
+ &sample, event);
+ ret = perf_output_begin(&handle, event,
+ ksymbol_event->event_id.header.size);
+ if (ret)
+ return;
+
+ perf_output_put(&handle, ksymbol_event->event_id);
+ __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
+ perf_event__output_id_sample(event, &handle, &sample);
+
+ perf_output_end(&handle);
+}
+
+void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
+ const char *sym)
+{
+ struct perf_ksymbol_event ksymbol_event;
+ char name[KSYM_NAME_LEN];
+ u16 flags = 0;
+ int name_len;
+
+ if (!atomic_read(&nr_ksymbol_events))
+ return;
+
+ if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
+ ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
+ goto err;
+
+ strlcpy(name, sym, KSYM_NAME_LEN);
+ name_len = strlen(name) + 1;
+ while (!IS_ALIGNED(name_len, sizeof(u64)))
+ name[name_len++] = '\0';
+ BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
+
+ if (unregister)
+ flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
+
+ ksymbol_event = (struct perf_ksymbol_event){
+ .name = name,
+ .name_len = name_len,
+ .event_id = {
+ .header = {
+ .type = PERF_RECORD_KSYMBOL,
+ .size = sizeof(ksymbol_event.event_id) +
+ name_len,
+ },
+ .addr = addr,
+ .len = len,
+ .ksym_type = ksym_type,
+ .flags = flags,
+ },
+ };
+
+ perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
+ return;
+err:
+ WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
+}
+
+/*
+ * bpf program load/unload tracking
+ */
+
+struct perf_bpf_event {
+ struct bpf_prog *prog;
+ struct {
+ struct perf_event_header header;
+ u16 type;
+ u16 flags;
+ u32 id;
+ u8 tag[BPF_TAG_SIZE];
+ } event_id;
+};
+
+static int perf_event_bpf_match(struct perf_event *event)
+{
+ return event->attr.bpf_event;
+}
+
+static void perf_event_bpf_output(struct perf_event *event, void *data)
+{
+ struct perf_bpf_event *bpf_event = data;
+ struct perf_output_handle handle;
+ struct perf_sample_data sample;
+ int ret;
+
+ if (!perf_event_bpf_match(event))
+ return;
+
+ perf_event_header__init_id(&bpf_event->event_id.header,
+ &sample, event);
+ ret = perf_output_begin(&handle, event,
+ bpf_event->event_id.header.size);
+ if (ret)
+ return;
+
+ perf_output_put(&handle, bpf_event->event_id);
+ perf_event__output_id_sample(event, &handle, &sample);
+
+ perf_output_end(&handle);
+}
+
+static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
+ enum perf_bpf_event_type type)
+{
+ bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
+ char sym[KSYM_NAME_LEN];
+ int i;
+
+ if (prog->aux->func_cnt == 0) {
+ bpf_get_prog_name(prog, sym);
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
+ (u64)(unsigned long)prog->bpf_func,
+ prog->jited_len, unregister, sym);
+ } else {
+ for (i = 0; i < prog->aux->func_cnt; i++) {
+ struct bpf_prog *subprog = prog->aux->func[i];
+
+ bpf_get_prog_name(subprog, sym);
+ perf_event_ksymbol(
+ PERF_RECORD_KSYMBOL_TYPE_BPF,
+ (u64)(unsigned long)subprog->bpf_func,
+ subprog->jited_len, unregister, sym);
+ }
+ }
+}
+
+void perf_event_bpf_event(struct bpf_prog *prog,
+ enum perf_bpf_event_type type,
+ u16 flags)
+{
+ struct perf_bpf_event bpf_event;
+
+ if (type <= PERF_BPF_EVENT_UNKNOWN ||
+ type >= PERF_BPF_EVENT_MAX)
+ return;
+
+ switch (type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ if (atomic_read(&nr_ksymbol_events))
+ perf_event_bpf_emit_ksymbols(prog, type);
+ break;
+ default:
+ break;
+ }
+
+ if (!atomic_read(&nr_bpf_events))
+ return;
+
+ bpf_event = (struct perf_bpf_event){
+ .prog = prog,
+ .event_id = {
+ .header = {
+ .type = PERF_RECORD_BPF_EVENT,
+ .size = sizeof(bpf_event.event_id),
+ },
+ .type = type,
+ .flags = flags,
+ .id = prog->aux->id,
+ },
+ };
+
+ BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
+
+ memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
+ perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
+}
+
void perf_event_itrace_started(struct perf_event *event)
{
event->attach_state |= PERF_ATTACH_ITRACE;
@@ -8768,26 +9006,19 @@ static void perf_addr_filters_splice(struct perf_event *event,
* @filter; if so, adjust filter's address range.
* Called with mm::mmap_sem down for reading.
*/
-static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
- struct mm_struct *mm)
+static void perf_addr_filter_apply(struct perf_addr_filter *filter,
+ struct mm_struct *mm,
+ struct perf_addr_filter_range *fr)
{
struct vm_area_struct *vma;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
- struct file *file = vma->vm_file;
- unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long vma_size = vma->vm_end - vma->vm_start;
-
- if (!file)
- continue;
-
- if (!perf_addr_filter_match(filter, file, off, vma_size))
+ if (!vma->vm_file)
continue;
- return vma->vm_start;
+ if (perf_addr_filter_vma_adjust(filter, vma, fr))
+ return;
}
-
- return 0;
}
/*
@@ -8821,15 +9052,15 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
- event->addr_filters_offs[count] = 0;
+ event->addr_filter_ranges[count].start = 0;
+ event->addr_filter_ranges[count].size = 0;
/*
* Adjust base offset if the filter is associated to a binary
* that needs to be mapped:
*/
if (filter->path.dentry)
- event->addr_filters_offs[count] =
- perf_addr_filter_apply(filter, mm);
+ perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
count++;
}
@@ -9391,6 +9622,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
return 0;
}
+static int perf_event_nop_int(struct perf_event *event, u64 value)
+{
+ return 0;
+}
+
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9927,9 @@ got_cpu_context:
pmu->pmu_disable = perf_pmu_nop_void;
}
+ if (!pmu->check_period)
+ pmu->check_period = perf_event_nop_int;
+
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
@@ -9772,6 +10011,15 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
if (ctx)
perf_event_ctx_unlock(event->group_leader, ctx);
+ if (!ret) {
+ if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
+ event_has_any_exclude_flag(event)) {
+ if (event->destroy)
+ event->destroy(event);
+ ret = -EINVAL;
+ }
+ }
+
if (ret)
module_put(pmu->module);
@@ -9900,6 +10148,10 @@ static void account_event(struct perf_event *event)
inc = true;
if (is_cgroup_event(event))
inc = true;
+ if (event->attr.ksymbol)
+ atomic_inc(&nr_ksymbol_events);
+ if (event->attr.bpf_event)
+ atomic_inc(&nr_bpf_events);
if (inc) {
/*
@@ -10082,14 +10334,28 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
goto err_pmu;
if (has_addr_filter(event)) {
- event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!event->addr_filters_offs) {
+ event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
+ sizeof(struct perf_addr_filter_range),
+ GFP_KERNEL);
+ if (!event->addr_filter_ranges) {
err = -ENOMEM;
goto err_per_task;
}
+ /*
+ * Clone the parent's vma offsets: they are valid until exec()
+ * even if the mm is not shared with the parent.
+ */
+ if (event->parent) {
+ struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
+
+ raw_spin_lock_irq(&ifh->lock);
+ memcpy(event->addr_filter_ranges,
+ event->parent->addr_filter_ranges,
+ pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
+ raw_spin_unlock_irq(&ifh->lock);
+ }
+
/* force hw sync on the address filters */
event->addr_filters_gen = 1;
}
@@ -10108,7 +10374,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
return event;
err_addr_filters:
- kfree(event->addr_filters_offs);
+ kfree(event->addr_filter_ranges);
err_per_task:
exclusive_event_destroy(event);
@@ -10391,7 +10657,7 @@ __perf_event_ctx_lock_double(struct perf_event *group_leader,
again:
rcu_read_lock();
gctx = READ_ONCE(group_leader->ctx);
- if (!atomic_inc_not_zero(&gctx->refcount)) {
+ if (!refcount_inc_not_zero(&gctx->refcount)) {
rcu_read_unlock();
goto again;
}
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 5befb338a18d..c5cd852fe86b 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -1,18 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) 2007 Alan Stern
* Copyright (C) IBM Corporation, 2009
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 6dc725a7e7bc..79c47076700a 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -4,13 +4,14 @@
#include <linux/hardirq.h>
#include <linux/uaccess.h>
+#include <linux/refcount.h>
/* Buffer handling */
#define RING_BUFFER_WRITABLE 0x01
struct ring_buffer {
- atomic_t refcount;
+ refcount_t refcount;
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work;
@@ -48,7 +49,7 @@ struct ring_buffer {
atomic_t aux_mmap_count;
unsigned long aux_mmap_locked;
void (*free_aux)(void *);
- atomic_t aux_refcount;
+ refcount_t aux_refcount;
void **aux_pages;
void *aux_priv;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 4a9937076331..678ccec60d8f 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance events ring-buffer code:
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
- *
- * For licensing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
@@ -285,7 +284,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
else
rb->overwrite = 1;
- atomic_set(&rb->refcount, 1);
+ refcount_set(&rb->refcount, 1);
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
@@ -358,7 +357,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if (!atomic_read(&rb->aux_mmap_count))
goto err;
- if (!atomic_inc_not_zero(&rb->aux_refcount))
+ if (!refcount_inc_not_zero(&rb->aux_refcount))
goto err;
/*
@@ -658,7 +657,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
goto out;
}
- rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
+ rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
overwrite);
if (!rb->aux_priv)
goto out;
@@ -671,7 +670,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
* we keep a refcount here to make sure either of the two can
* reference them safely.
*/
- atomic_set(&rb->aux_refcount, 1);
+ refcount_set(&rb->aux_refcount, 1);
rb->aux_overwrite = overwrite;
rb->aux_watermark = watermark;
@@ -690,7 +689,7 @@ out:
void rb_free_aux(struct ring_buffer *rb)
{
- if (atomic_dec_and_test(&rb->aux_refcount))
+ if (refcount_dec_and_test(&rb->aux_refcount))
__rb_free_aux(rb);
}
@@ -734,6 +733,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
size = sizeof(struct ring_buffer);
size += nr_pages * sizeof(void *);
+ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
+ goto fail;
+
rb = kzalloc(size, GFP_KERNEL);
if (!rb)
goto fail;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 8aef47ee7bfa..affa830a198c 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* User-space Probes (UProbes)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2008-2012
* Authors:
* Srikar Dronamraju
diff --git a/kernel/exit.c b/kernel/exit.c
index 2d14979577ee..2166c2d92ddc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -219,6 +219,7 @@ repeat:
}
write_unlock_irq(&tasklist_lock);
+ cgroup_release(p);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
@@ -307,7 +308,7 @@ void rcuwait_wake_up(struct rcuwait *w)
* MB (A) MB (B)
* [L] cond [L] tsk
*/
- smp_rmb(); /* (B) */
+ smp_mb(); /* (B) */
/*
* Avoid using task_rcu_dereference() magic as long as we are careful,
@@ -558,12 +559,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
return NULL;
}
-static struct task_struct *find_child_reaper(struct task_struct *father)
+static struct task_struct *find_child_reaper(struct task_struct *father,
+ struct list_head *dead)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *reaper = pid_ns->child_reaper;
+ struct task_struct *p, *n;
if (likely(reaper != father))
return reaper;
@@ -579,6 +582,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
panic("Attempted to kill init! exitcode=0x%08x\n",
father->signal->group_exit_code ?: father->exit_code);
}
+
+ list_for_each_entry_safe(p, n, dead, ptrace_entry) {
+ list_del_init(&p->ptrace_entry);
+ release_task(p);
+ }
+
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
@@ -668,7 +677,7 @@ static void forget_original_parent(struct task_struct *father,
exit_ptrace(father, dead);
/* Can drop and reacquire tasklist_lock */
- reaper = find_child_reaper(father);
+ reaper = find_child_reaper(father, dead);
if (list_empty(&father->children))
return;
@@ -866,6 +875,7 @@ void __noreturn do_exit(long code)
exit_task_namespaces(tsk);
exit_task_work(tsk);
exit_thread(tsk);
+ exit_umh(tsk);
/*
* Flush inherited counters to the parent - before the parent
diff --git a/kernel/fork.c b/kernel/fork.c
index a60459947f18..77059b211608 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -217,6 +217,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
memset(s->addr, 0, THREAD_SIZE);
tsk->stack_vm_area = s;
+ tsk->stack = s->addr;
return s->addr;
}
@@ -428,7 +429,7 @@ static void release_task_stack(struct task_struct *tsk)
#ifdef CONFIG_THREAD_INFO_IN_TASK
void put_task_stack(struct task_struct *tsk)
{
- if (atomic_dec_and_test(&tsk->stack_refcount))
+ if (refcount_dec_and_test(&tsk->stack_refcount))
release_task_stack(tsk);
}
#endif
@@ -446,7 +447,7 @@ void free_task(struct task_struct *tsk)
* If the task had a separate stack allocation, it should be gone
* by now.
*/
- WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
+ WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
#endif
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
@@ -709,14 +710,14 @@ static inline void free_signal_struct(struct signal_struct *sig)
static inline void put_signal_struct(struct signal_struct *sig)
{
- if (atomic_dec_and_test(&sig->sigcnt))
+ if (refcount_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
- WARN_ON(atomic_read(&tsk->usage));
+ WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
cgroup_free(tsk);
@@ -866,7 +867,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->stack_vm_area = stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
- atomic_set(&tsk->stack_refcount, 1);
+ refcount_set(&tsk->stack_refcount, 1);
#endif
if (err)
@@ -895,7 +896,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
* One for us, one for whoever does the "release_task()" (usually
* parent)
*/
- atomic_set(&tsk->usage, 2);
+ refcount_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
@@ -1462,7 +1463,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
- atomic_inc(&current->sighand->count);
+ refcount_inc(&current->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
@@ -1470,7 +1471,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
if (!sig)
return -ENOMEM;
- atomic_set(&sig->count, 1);
+ refcount_set(&sig->count, 1);
spin_lock_irq(&current->sighand->siglock);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
spin_unlock_irq(&current->sighand->siglock);
@@ -1479,7 +1480,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
void __cleanup_sighand(struct sighand_struct *sighand)
{
- if (atomic_dec_and_test(&sighand->count)) {
+ if (refcount_dec_and_test(&sighand->count)) {
signalfd_cleanup(sighand);
/*
* sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
@@ -1526,7 +1527,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
- atomic_set(&sig->sigcnt, 1);
+ refcount_set(&sig->sigcnt, 1);
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
@@ -1833,8 +1834,6 @@ static __latent_entropy struct task_struct *copy_process(
posix_cpu_timers_init(p);
- p->start_time = ktime_get_ns();
- p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
audit_set_context(p, NULL);
cgroup_fork(p);
@@ -2001,6 +2000,17 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_free_pid;
/*
+ * From this point on we must avoid any synchronous user-space
+ * communication until we take the tasklist-lock. In particular, we do
+ * not want user-space to be able to predict the process start-time by
+ * stalling fork(2) after we recorded the start_time but before it is
+ * visible to the system.
+ */
+
+ p->start_time = ktime_get_ns();
+ p->real_start_time = ktime_get_boot_ns();
+
+ /*
* Make it visible to the rest of the system, but dont wake it up yet.
* Need tasklist lock for parent etc handling!
*/
@@ -2072,7 +2082,7 @@ static __latent_entropy struct task_struct *copy_process(
} else {
current->signal->nr_threads++;
atomic_inc(&current->signal->live);
- atomic_inc(&current->signal->sigcnt);
+ refcount_inc(&current->signal->sigcnt);
task_join_group_stop(p);
list_add_tail_rcu(&p->thread_group,
&p->group_leader->thread_group);
@@ -2429,7 +2439,7 @@ static int check_unshare_flags(unsigned long unshare_flags)
return -EINVAL;
}
if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
- if (atomic_read(&current->sighand->count) > 1)
+ if (refcount_read(&current->sighand->count) > 1)
return -EINVAL;
}
if (unshare_flags & CLONE_VM) {
diff --git a/kernel/futex.c b/kernel/futex.c
index be3bff2315ff..c3b73b0311bc 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -68,6 +68,7 @@
#include <linux/freezer.h>
#include <linux/memblock.h>
#include <linux/fault-inject.h>
+#include <linux/refcount.h>
#include <asm/futex.h>
@@ -212,7 +213,7 @@ struct futex_pi_state {
struct rt_mutex pi_mutex;
struct task_struct *owner;
- atomic_t refcount;
+ refcount_t refcount;
union futex_key key;
} __randomize_layout;
@@ -321,12 +322,8 @@ static int __init fail_futex_debugfs(void)
if (IS_ERR(dir))
return PTR_ERR(dir);
- if (!debugfs_create_bool("ignore-private", mode, dir,
- &fail_futex.ignore_private)) {
- debugfs_remove_recursive(dir);
- return -ENOMEM;
- }
-
+ debugfs_create_bool("ignore-private", mode, dir,
+ &fail_futex.ignore_private);
return 0;
}
@@ -803,7 +800,7 @@ static int refill_pi_state_cache(void)
INIT_LIST_HEAD(&pi_state->list);
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
- atomic_set(&pi_state->refcount, 1);
+ refcount_set(&pi_state->refcount, 1);
pi_state->key = FUTEX_KEY_INIT;
current->pi_state_cache = pi_state;
@@ -823,7 +820,7 @@ static struct futex_pi_state *alloc_pi_state(void)
static void get_pi_state(struct futex_pi_state *pi_state)
{
- WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
+ WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
}
/*
@@ -835,7 +832,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
if (!pi_state)
return;
- if (!atomic_dec_and_test(&pi_state->refcount))
+ if (!refcount_dec_and_test(&pi_state->refcount))
return;
/*
@@ -865,7 +862,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
* refcount is at 0 - put it back to 1.
*/
pi_state->owner = NULL;
- atomic_set(&pi_state->refcount, 1);
+ refcount_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
}
}
@@ -908,7 +905,7 @@ void exit_pi_state_list(struct task_struct *curr)
* In that case; drop the locks to let put_pi_state() make
* progress and retry the loop.
*/
- if (!atomic_inc_not_zero(&pi_state->refcount)) {
+ if (!refcount_inc_not_zero(&pi_state->refcount)) {
raw_spin_unlock_irq(&curr->pi_lock);
cpu_relax();
raw_spin_lock_irq(&curr->pi_lock);
@@ -1064,7 +1061,7 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
* and futex_wait_requeue_pi() as it cannot go to 0 and consequently
* free pi_state before we can take a reference ourselves.
*/
- WARN_ON(!atomic_read(&pi_state->refcount));
+ WARN_ON(!refcount_read(&pi_state->refcount));
/*
* Now that we have a pi_state, we can acquire wait_lock
@@ -1452,11 +1449,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
return;
- /*
- * Queue the task for later wakeup for after we've released
- * the hb->lock. wake_q_add() grabs reference to p.
- */
- wake_q_add(wake_q, p);
+ get_task_struct(p);
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@@ -1466,6 +1459,12 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
* plist_del in __unqueue_futex().
*/
smp_store_release(&q->lock_ptr, NULL);
+
+ /*
+ * Queue the task for later wakeup for after we've released
+ * the hb->lock. wake_q_add() grabs reference to p.
+ */
+ wake_q_add_safe(wake_q, p);
}
/*
@@ -2218,11 +2217,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
* decrement the counter at queue_unlock() when some error has
* occurred and we don't end up adding the task to the list.
*/
- hb_waiters_inc(hb);
+ hb_waiters_inc(hb); /* implies smp_mb(); (A) */
q->lock_ptr = &hb->lock;
- spin_lock(&hb->lock); /* implies smp_mb(); (A) */
+ spin_lock(&hb->lock);
return hb;
}
@@ -2858,35 +2857,39 @@ retry_private:
* and BUG when futex_unlock_pi() interleaves with this.
*
* Therefore acquire wait_lock while holding hb->lock, but drop the
- * latter before calling rt_mutex_start_proxy_lock(). This still fully
- * serializes against futex_unlock_pi() as that does the exact same
- * lock handoff sequence.
+ * latter before calling __rt_mutex_start_proxy_lock(). This
+ * interleaves with futex_unlock_pi() -- which does a similar lock
+ * handoff -- such that the latter can observe the futex_q::pi_state
+ * before __rt_mutex_start_proxy_lock() is done.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
spin_unlock(q.lock_ptr);
+ /*
+ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
+ * such that futex_unlock_pi() is guaranteed to observe the waiter when
+ * it sees the futex_q::pi_state.
+ */
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
if (ret) {
if (ret == 1)
ret = 0;
-
- spin_lock(q.lock_ptr);
- goto no_block;
+ goto cleanup;
}
-
if (unlikely(to))
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
+cleanup:
spin_lock(q.lock_ptr);
/*
- * If we failed to acquire the lock (signal/timeout), we must
+ * If we failed to acquire the lock (deadlock/signal/timeout), we must
* first acquire the hb->lock before removing the lock from the
- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
- * wait lists consistent.
+ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
+ * lists consistent.
*
* In particular; it is important that futex_unlock_pi() can not
* observe this inconsistency.
@@ -3010,6 +3013,10 @@ retry:
* there is no point where we hold neither; and therefore
* wake_futex_pi() must observe a state consistent with what we
* observed.
+ *
+ * In particular; this forces __rt_mutex_start_proxy() to
+ * complete such that we're guaranteed to observe the
+ * rt_waiter. Also see the WARN in wake_futex_pi().
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
@@ -3812,7 +3819,7 @@ err_unlock:
#endif /* CONFIG_COMPAT */
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 45b68b4ea48b..f18cd5aa33e8 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -9,7 +9,7 @@
#include <linux/cpu.h>
static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
- int cpus_per_vec)
+ unsigned int cpus_per_vec)
{
const struct cpumask *siblmsk;
int cpu, sibl;
@@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
}
static int __irq_build_affinity_masks(const struct irq_affinity *affd,
- int startvec, int numvecs, int firstvec,
+ unsigned int startvec,
+ unsigned int numvecs,
+ unsigned int firstvec,
cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask,
struct cpumask *nmsk,
struct irq_affinity_desc *masks)
{
- int n, nodes, cpus_per_vec, extra_vecs, done = 0;
- int last_affv = firstvec + numvecs;
- int curvec = startvec;
+ unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0;
+ unsigned int last_affv = firstvec + numvecs;
+ unsigned int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE;
if (!cpumask_weight(cpu_mask))
@@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_or(&masks[curvec].mask,
- &masks[curvec].mask,
- node_to_cpumask[n]);
+ cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
+ node_to_cpumask[n]);
if (++curvec == last_affv)
curvec = firstvec;
}
- done = numvecs;
- goto out;
+ return numvecs;
}
for_each_node_mask(n, nodemsk) {
- int ncpus, v, vecs_to_assign, vecs_per_node;
+ unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */
vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
@@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
curvec = firstvec;
--nodes;
}
-
-out:
return done;
}
@@ -174,19 +172,24 @@ out:
* 2) spread other possible CPUs on these vectors
*/
static int irq_build_affinity_masks(const struct irq_affinity *affd,
- int startvec, int numvecs, int firstvec,
- cpumask_var_t *node_to_cpumask,
+ unsigned int startvec, unsigned int numvecs,
+ unsigned int firstvec,
struct irq_affinity_desc *masks)
{
- int curvec = startvec, nr_present, nr_others;
- int ret = -ENOMEM;
+ unsigned int curvec = startvec, nr_present, nr_others;
+ cpumask_var_t *node_to_cpumask;
cpumask_var_t nmsk, npresmsk;
+ int ret = -ENOMEM;
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return ret;
if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
- goto fail;
+ goto fail_nmsk;
+
+ node_to_cpumask = alloc_node_to_cpumask();
+ if (!node_to_cpumask)
+ goto fail_npresmsk;
ret = 0;
/* Stabilize the cpumasks */
@@ -217,13 +220,22 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
if (nr_present < numvecs)
WARN_ON(nr_present + nr_others < numvecs);
+ free_node_to_cpumask(node_to_cpumask);
+
+ fail_npresmsk:
free_cpumask_var(npresmsk);
- fail:
+ fail_nmsk:
free_cpumask_var(nmsk);
return ret;
}
+static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
+{
+ affd->nr_sets = 1;
+ affd->set_size[0] = affvecs;
+}
+
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
* @nvecs: The total number of vectors
@@ -232,50 +244,62 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
* Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/
struct irq_affinity_desc *
-irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
{
- int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
- int curvec, usedvecs;
- cpumask_var_t *node_to_cpumask;
+ unsigned int affvecs, curvec, usedvecs, i;
struct irq_affinity_desc *masks = NULL;
- int i, nr_sets;
/*
- * If there aren't any vectors left after applying the pre/post
- * vectors don't bother with assigning affinity.
+ * Determine the number of vectors which need interrupt affinities
+ * assigned. If the pre/post request exhausts the available vectors
+ * then nothing to do here except for invoking the calc_sets()
+ * callback so the device driver can adjust to the situation. If there
+ * is only a single vector, then managing the queue is pointless as
+ * well.
*/
- if (nvecs == affd->pre_vectors + affd->post_vectors)
+ if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
+ affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
+ else
+ affvecs = 0;
+
+ /*
+ * Simple invocations do not provide a calc_sets() callback. Install
+ * the generic one.
+ */
+ if (!affd->calc_sets)
+ affd->calc_sets = default_calc_sets;
+
+ /* Recalculate the sets */
+ affd->calc_sets(affd, affvecs);
+
+ if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
return NULL;
- node_to_cpumask = alloc_node_to_cpumask();
- if (!node_to_cpumask)
+ /* Nothing to assign? */
+ if (!affvecs)
return NULL;
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
- goto outnodemsk;
+ return NULL;
/* Fill out vectors at the beginning that don't need affinity */
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+
/*
* Spread on present CPUs starting from affd->pre_vectors. If we
* have multiple sets, build each sets affinity mask separately.
*/
- nr_sets = affd->nr_sets;
- if (!nr_sets)
- nr_sets = 1;
-
- for (i = 0, usedvecs = 0; i < nr_sets; i++) {
- int this_vecs = affd->sets ? affd->sets[i] : affvecs;
+ for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
+ unsigned int this_vecs = affd->set_size[i];
int ret;
ret = irq_build_affinity_masks(affd, curvec, this_vecs,
- curvec, node_to_cpumask, masks);
+ curvec, masks);
if (ret) {
kfree(masks);
- masks = NULL;
- goto outnodemsk;
+ return NULL;
}
curvec += this_vecs;
usedvecs += this_vecs;
@@ -293,8 +317,6 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
masks[i].is_managed = 1;
-outnodemsk:
- free_node_to_cpumask(node_to_cpumask);
return masks;
}
@@ -304,25 +326,22 @@ outnodemsk:
* @maxvec: The maximum number of vectors available
* @affd: Description of the affinity requirements
*/
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd)
{
- int resv = affd->pre_vectors + affd->post_vectors;
- int vecs = maxvec - resv;
- int set_vecs;
+ unsigned int resv = affd->pre_vectors + affd->post_vectors;
+ unsigned int set_vecs;
if (resv > minvec)
return 0;
- if (affd->nr_sets) {
- int i;
-
- for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
- set_vecs += affd->sets[i];
+ if (affd->calc_sets) {
+ set_vecs = maxvec - resv;
} else {
get_online_cpus();
set_vecs = cpumask_weight(cpu_possible_mask);
put_online_cpus();
}
- return resv + min(set_vecs, vecs);
+ return resv + min(set_vecs, maxvec - resv);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 34e969069488..99b7dd6982a4 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -730,6 +730,37 @@ out:
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
/**
+ * handle_fasteoi_nmi - irq handler for NMI interrupt lines
+ * @desc: the interrupt description structure for this irq
+ *
+ * A simple NMI-safe handler, considering the restrictions
+ * from request_nmi.
+ *
+ * Only a single callback will be issued to the chip: an ->eoi()
+ * call when the interrupt has been serviced. This enables support
+ * for modern forms of interrupt handlers, which handle the flow
+ * details in hardware, transparently.
+ */
+void handle_fasteoi_nmi(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irqaction *action = desc->action;
+ unsigned int irq = irq_desc_get_irq(desc);
+ irqreturn_t res;
+
+ trace_irq_handler_entry(irq, action);
+ /*
+ * NMIs cannot be shared, there is only one action.
+ */
+ res = action->handler(irq, action->dev_id);
+ trace_irq_handler_exit(irq, action, res);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
+
+/**
* handle_edge_irq - edge type IRQ handler
* @desc: the interrupt description structure for this irq
*
@@ -855,7 +886,11 @@ void handle_percpu_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- kstat_incr_irqs_this_cpu(desc);
+ /*
+ * PER CPU interrupts are not serialized. Do not touch
+ * desc->tot_count.
+ */
+ __kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
@@ -884,7 +919,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;
- kstat_incr_irqs_this_cpu(desc);
+ /*
+ * PER CPU interrupts are not serialized. Do not touch
+ * desc->tot_count.
+ */
+ __kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
@@ -908,6 +947,29 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
+/**
+ * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
+ * dev ids
+ * @desc: the interrupt description structure for this irq
+ *
+ * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
+ * as a percpu pointer.
+ */
+void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irqaction *action = desc->action;
+ unsigned int irq = irq_desc_get_irq(desc);
+ irqreturn_t res;
+
+ trace_irq_handler_entry(irq, action);
+ res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
+ trace_irq_handler_exit(irq, action, res);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
static void
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
int is_chained, const char *name)
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 6f636136cccc..516c00a5e867 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -56,6 +56,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
+ BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI),
};
static void
@@ -140,6 +141,7 @@ static const struct irq_bit_descr irqdesc_istates[] = {
BIT_MASK_DESCR(IRQS_WAITING),
BIT_MASK_DESCR(IRQS_PENDING),
BIT_MASK_DESCR(IRQS_SUSPENDED),
+ BIT_MASK_DESCR(IRQS_NMI),
};
@@ -203,8 +205,8 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
- if (irq_settings_is_level(desc)) {
- /* Can't do level, sorry */
+ if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) {
+ /* Can't do level nor NMIs, sorry */
err = -EINVAL;
} else {
desc->istate |= IRQS_PENDING;
@@ -256,8 +258,6 @@ static int __init irq_debugfs_init(void)
int irq;
root_dir = debugfs_create_dir("irq", NULL);
- if (!root_dir)
- return -ENOMEM;
irq_domain_debugfs_init(root_dir);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 38554bc35375..6df5ddfdb0f8 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -166,7 +166,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
__irq_wake_thread(desc, action);
- /* Fall through to add to randomness */
+ /* Fall through - to add to randomness */
case IRQ_HANDLED:
*flags |= action->flags;
break;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index ca6afa267070..70c3053bc1f6 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -49,6 +49,7 @@ enum {
* IRQS_WAITING - irq is waiting
* IRQS_PENDING - irq is pending and replayed later
* IRQS_SUSPENDED - irq is suspended
+ * IRQS_NMI - irq line is used to deliver NMIs
*/
enum {
IRQS_AUTODETECT = 0x00000001,
@@ -60,6 +61,7 @@ enum {
IRQS_PENDING = 0x00000200,
IRQS_SUSPENDED = 0x00000800,
IRQS_TIMINGS = 0x00001000,
+ IRQS_NMI = 0x00002000,
};
#include "debug.h"
@@ -242,12 +244,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
#undef __irqd_to_state
-static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
+static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
{
__this_cpu_inc(*desc->kstat_irqs);
__this_cpu_inc(kstat.irqs_sum);
}
+static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
+{
+ __kstat_incr_irqs_this_cpu(desc);
+ desc->tot_count++;
+}
+
static inline int irq_desc_get_node(struct irq_desc *desc)
{
return irq_common_data_get_node(&desc->irq_common_data);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index ee062b7939d3..13539e12cd80 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
desc->depth = 1;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
+ desc->tot_count = 0;
desc->name = NULL;
desc->owner = owner;
for_each_possible_cpu(cpu)
@@ -457,7 +458,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
/* Validate affinity mask(s) */
if (affinity) {
- for (i = 0; i < cnt; i++, i++) {
+ for (i = 0; i < cnt; i++) {
if (cpumask_empty(&affinity[i].mask))
return -EINVAL;
}
@@ -669,6 +670,41 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
set_irq_regs(old_regs);
return ret;
}
+
+#ifdef CONFIG_IRQ_DOMAIN
+/**
+ * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain
+ * @domain: The domain where to perform the lookup
+ * @hwirq: The HW irq number to convert to a logical one
+ * @regs: Register file coming from the low-level handling code
+ *
+ * Returns: 0 on success, or -EINVAL if conversion has failed
+ */
+int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
+ struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ unsigned int irq;
+ int ret = 0;
+
+ nmi_enter();
+
+ irq = irq_find_mapping(domain, hwirq);
+
+ /*
+ * ack_bad_irq is not NMI-safe, just report
+ * an invalid interrupt.
+ */
+ if (likely(irq))
+ generic_handle_irq(irq);
+ else
+ ret = -EINVAL;
+
+ nmi_exit();
+ set_irq_regs(old_regs);
+ return ret;
+}
+#endif
#endif
/* Dynamic interrupt handling */
@@ -919,11 +955,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
unsigned int kstat_irqs(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- int cpu;
unsigned int sum = 0;
+ int cpu;
if (!desc || !desc->kstat_irqs)
return 0;
+ if (!irq_settings_is_per_cpu_devid(desc) &&
+ !irq_settings_is_per_cpu(desc))
+ return desc->tot_count;
+
for_each_possible_cpu(cpu)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
return sum;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 8b0be4bd6565..3bf9793d8825 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -458,6 +458,20 @@ void irq_set_default_host(struct irq_domain *domain)
}
EXPORT_SYMBOL_GPL(irq_set_default_host);
+/**
+ * irq_get_default_host() - Retrieve the "default" irq domain
+ *
+ * Returns: the default domain, if any.
+ *
+ * Modern code should never use this. This should only be used on
+ * systems that cannot implement a firmware->fwnode mapping (which
+ * both DT and ACPI provide).
+ */
+struct irq_domain *irq_get_default_host(void)
+{
+ return irq_default_domain;
+}
+
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
@@ -1749,8 +1763,6 @@ void __init irq_domain_debugfs_init(struct dentry *root)
struct irq_domain *d;
domain_dir = debugfs_create_dir("domains", root);
- if (!domain_dir)
- return;
debugfs_create_file("default", 0444, domain_dir, NULL,
&irq_domain_debug_fops);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a4888ce4667a..9ec34a2a6638 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -341,7 +341,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
/* The release function is promised process context */
might_sleep();
- if (!desc)
+ if (!desc || desc->istate & IRQS_NMI)
return -EINVAL;
/* Complete initialisation of *notify */
@@ -393,6 +393,9 @@ int irq_setup_affinity(struct irq_desc *desc)
}
cpumask_and(&mask, cpu_online_mask, set);
+ if (cpumask_empty(&mask))
+ cpumask_copy(&mask, cpu_online_mask);
+
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);
@@ -550,6 +553,21 @@ bool disable_hardirq(unsigned int irq)
}
EXPORT_SYMBOL_GPL(disable_hardirq);
+/**
+ * disable_nmi_nosync - disable an nmi without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables and enables are
+ * nested.
+ * The interrupt to disable must have been requested through request_nmi.
+ * Unlike disable_nmi(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ */
+void disable_nmi_nosync(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+}
+
void __enable_irq(struct irq_desc *desc)
{
switch (desc->depth) {
@@ -606,6 +624,20 @@ out:
}
EXPORT_SYMBOL(enable_irq);
+/**
+ * enable_nmi - enable handling of an nmi
+ * @irq: Interrupt to enable
+ *
+ * The interrupt to enable must have been requested through request_nmi.
+ * Undoes the effect of one call to disable_nmi(). If this
+ * matches the last disable, processing of interrupts on this
+ * IRQ line is re-enabled.
+ */
+void enable_nmi(unsigned int irq)
+{
+ enable_irq(irq);
+}
+
static int set_irq_wake_real(unsigned int irq, unsigned int on)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -641,6 +673,12 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
if (!desc)
return -EINVAL;
+ /* Don't use NMIs as wake up interrupts please */
+ if (desc->istate & IRQS_NMI) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
@@ -663,6 +701,8 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
}
}
+
+out_unlock:
irq_put_desc_busunlock(desc, flags);
return ret;
}
@@ -723,6 +763,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
case IRQ_SET_MASK_OK_DONE:
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
irqd_set(&desc->irq_data, flags);
+ /* fall through */
case IRQ_SET_MASK_OK_NOCOPY:
flags = irqd_get_trigger_type(&desc->irq_data);
@@ -1125,6 +1166,39 @@ static void irq_release_resources(struct irq_desc *desc)
c->irq_release_resources(d);
}
+static bool irq_supports_nmi(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /* Only IRQs directly managed by the root irqchip can be set as NMI */
+ if (d->parent_data)
+ return false;
+#endif
+ /* Don't support NMIs for chips behind a slow bus */
+ if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
+ return false;
+
+ return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
+}
+
+static int irq_nmi_setup(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ struct irq_chip *c = d->chip;
+
+ return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
+}
+
+static void irq_nmi_teardown(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ struct irq_chip *c = d->chip;
+
+ if (c->irq_nmi_teardown)
+ c->irq_nmi_teardown(d);
+}
+
static int
setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
{
@@ -1299,9 +1373,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* fields must have IRQF_SHARED set and the bits which
* set the trigger type must match. Also all must
* agree on ONESHOT.
+ * Interrupt lines used for NMIs cannot be shared.
*/
unsigned int oldtype;
+ if (desc->istate & IRQS_NMI) {
+ pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
+ new->name, irq, desc->irq_data.chip->name);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/*
* If nobody did set the configuration before, inherit
* the one provided by the requester.
@@ -1753,6 +1835,59 @@ const void *free_irq(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL(free_irq);
+/* This function must be called with desc->lock held */
+static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
+{
+ const char *devname = NULL;
+
+ desc->istate &= ~IRQS_NMI;
+
+ if (!WARN_ON(desc->action == NULL)) {
+ irq_pm_remove_action(desc, desc->action);
+ devname = desc->action->name;
+ unregister_handler_proc(irq, desc->action);
+
+ kfree(desc->action);
+ desc->action = NULL;
+ }
+
+ irq_settings_clr_disable_unlazy(desc);
+ irq_shutdown(desc);
+
+ irq_release_resources(desc);
+
+ irq_chip_pm_put(&desc->irq_data);
+ module_put(desc->owner);
+
+ return devname;
+}
+
+const void *free_nmi(unsigned int irq, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+ const void *devname;
+
+ if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
+ return NULL;
+
+ if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ return NULL;
+
+ /* NMI still enabled */
+ if (WARN_ON(desc->depth == 0))
+ disable_nmi_nosync(irq);
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ irq_nmi_teardown(desc);
+ devname = __cleanup_nmi(irq, desc);
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return devname;
+}
+
/**
* request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
@@ -1922,6 +2057,101 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
}
EXPORT_SYMBOL_GPL(request_any_context_irq);
+/**
+ * request_nmi - allocate an interrupt line for NMI delivery
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Threaded handler for threaded interrupts.
+ * @irqflags: Interrupt type flags
+ * @name: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. It sets up the IRQ line
+ * to be handled as an NMI.
+ *
+ * An interrupt line delivering NMIs cannot be shared and IRQ handling
+ * cannot be threaded.
+ *
+ * Interrupt lines requested for NMI delivering must produce per cpu
+ * interrupts and have auto enabling setting disabled.
+ *
+ * Dev_id must be globally unique. Normally the address of the
+ * device data structure is used as the cookie. Since the handler
+ * receives this value it makes sense to use it.
+ *
+ * If the interrupt line cannot be used to deliver NMIs, function
+ * will fail and return a negative value.
+ */
+int request_nmi(unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *name, void *dev_id)
+{
+ struct irqaction *action;
+ struct irq_desc *desc;
+ unsigned long flags;
+ int retval;
+
+ if (irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
+
+ /* NMI cannot be shared, used for Polling */
+ if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
+ return -EINVAL;
+
+ if (!(irqflags & IRQF_PERCPU))
+ return -EINVAL;
+
+ if (!handler)
+ return -EINVAL;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc || irq_settings_can_autoenable(desc) ||
+ !irq_settings_can_request(desc) ||
+ WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
+ !irq_supports_nmi(desc))
+ return -EINVAL;
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
+ action->name = name;
+ action->dev_id = dev_id;
+
+ retval = irq_chip_pm_get(&desc->irq_data);
+ if (retval < 0)
+ goto err_out;
+
+ retval = __setup_irq(irq, desc, action);
+ if (retval)
+ goto err_irq_setup;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ /* Setup NMI state */
+ desc->istate |= IRQS_NMI;
+ retval = irq_nmi_setup(desc);
+ if (retval) {
+ __cleanup_nmi(irq, desc);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return -EINVAL;
+ }
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return 0;
+
+err_irq_setup:
+ irq_chip_pm_put(&desc->irq_data);
+err_out:
+ kfree(action);
+
+ return retval;
+}
+
void enable_percpu_irq(unsigned int irq, unsigned int type)
{
unsigned int cpu = smp_processor_id();
@@ -1956,6 +2186,11 @@ out:
}
EXPORT_SYMBOL_GPL(enable_percpu_irq);
+void enable_percpu_nmi(unsigned int irq, unsigned int type)
+{
+ enable_percpu_irq(irq, type);
+}
+
/**
* irq_percpu_is_enabled - Check whether the per cpu irq is enabled
* @irq: Linux irq number to check for
@@ -1995,6 +2230,11 @@ void disable_percpu_irq(unsigned int irq)
}
EXPORT_SYMBOL_GPL(disable_percpu_irq);
+void disable_percpu_nmi(unsigned int irq)
+{
+ disable_percpu_irq(irq);
+}
+
/*
* Internal function to unregister a percpu irqaction.
*/
@@ -2026,6 +2266,8 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
/* Found it - now remove it from the list of entries: */
desc->action = NULL;
+ desc->istate &= ~IRQS_NMI;
+
raw_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
@@ -2079,6 +2321,19 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
}
EXPORT_SYMBOL_GPL(free_percpu_irq);
+void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc || !irq_settings_is_per_cpu_devid(desc))
+ return;
+
+ if (WARN_ON(!(desc->istate & IRQS_NMI)))
+ return;
+
+ kfree(__free_percpu_irq(irq, dev_id));
+}
+
/**
* setup_percpu_irq - setup a per-cpu interrupt
* @irq: Interrupt line to setup
@@ -2169,6 +2424,158 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
EXPORT_SYMBOL_GPL(__request_percpu_irq);
/**
+ * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * @name: An ascii name for the claiming device
+ * @dev_id: A percpu cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
+ * have to be setup on each CPU by calling prepare_percpu_nmi() before
+ * being enabled on the same CPU by using enable_percpu_nmi().
+ *
+ * Dev_id must be globally unique. It is a per-cpu variable, and
+ * the handler gets called with the interrupted CPU's instance of
+ * that variable.
+ *
+ * Interrupt lines requested for NMI delivering should have auto enabling
+ * setting disabled.
+ *
+ * If the interrupt line cannot be used to deliver NMIs, function
+ * will fail returning a negative value.
+ */
+int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
+ const char *name, void __percpu *dev_id)
+{
+ struct irqaction *action;
+ struct irq_desc *desc;
+ unsigned long flags;
+ int retval;
+
+ if (!handler)
+ return -EINVAL;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc || !irq_settings_can_request(desc) ||
+ !irq_settings_is_per_cpu_devid(desc) ||
+ irq_settings_can_autoenable(desc) ||
+ !irq_supports_nmi(desc))
+ return -EINVAL;
+
+ /* The line cannot already be NMI */
+ if (desc->istate & IRQS_NMI)
+ return -EINVAL;
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
+ | IRQF_NOBALANCING;
+ action->name = name;
+ action->percpu_dev_id = dev_id;
+
+ retval = irq_chip_pm_get(&desc->irq_data);
+ if (retval < 0)
+ goto err_out;
+
+ retval = __setup_irq(irq, desc, action);
+ if (retval)
+ goto err_irq_setup;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ desc->istate |= IRQS_NMI;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return 0;
+
+err_irq_setup:
+ irq_chip_pm_put(&desc->irq_data);
+err_out:
+ kfree(action);
+
+ return retval;
+}
+
+/**
+ * prepare_percpu_nmi - performs CPU local setup for NMI delivery
+ * @irq: Interrupt line to prepare for NMI delivery
+ *
+ * This call prepares an interrupt line to deliver NMI on the current CPU,
+ * before that interrupt line gets enabled with enable_percpu_nmi().
+ *
+ * As a CPU local operation, this should be called from non-preemptible
+ * context.
+ *
+ * If the interrupt line cannot be used to deliver NMIs, function
+ * will fail returning a negative value.
+ */
+int prepare_percpu_nmi(unsigned int irq)
+{
+ unsigned long flags;
+ struct irq_desc *desc;
+ int ret = 0;
+
+ WARN_ON(preemptible());
+
+ desc = irq_get_desc_lock(irq, &flags,
+ IRQ_GET_DESC_CHECK_PERCPU);
+ if (!desc)
+ return -EINVAL;
+
+ if (WARN(!(desc->istate & IRQS_NMI),
+ KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
+ irq)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = irq_nmi_setup(desc);
+ if (ret) {
+ pr_err("Failed to setup NMI delivery: irq %u\n", irq);
+ goto out;
+ }
+
+out:
+ irq_put_desc_unlock(desc, flags);
+ return ret;
+}
+
+/**
+ * teardown_percpu_nmi - undoes NMI setup of IRQ line
+ * @irq: Interrupt line from which CPU local NMI configuration should be
+ * removed
+ *
+ * This call undoes the setup done by prepare_percpu_nmi().
+ *
+ * IRQ line should not be enabled for the current CPU.
+ *
+ * As a CPU local operation, this should be called from non-preemptible
+ * context.
+ */
+void teardown_percpu_nmi(unsigned int irq)
+{
+ unsigned long flags;
+ struct irq_desc *desc;
+
+ WARN_ON(preemptible());
+
+ desc = irq_get_desc_lock(irq, &flags,
+ IRQ_GET_DESC_CHECK_PERCPU);
+ if (!desc)
+ return;
+
+ if (WARN_ON(!(desc->istate & IRQS_NMI)))
+ goto out;
+
+ irq_nmi_teardown(desc);
+out:
+ irq_put_desc_unlock(desc, flags);
+}
+
+/**
* irq_get_irqchip_state - returns the irqchip state of a interrupt.
* @irq: Interrupt line that is forwarded to a VM
* @which: One of IRQCHIP_STATE_* the caller wants to know about
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index f3a04994e063..14934afa9e68 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -494,7 +494,7 @@ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
static int get_ksymbol_bpf(struct kallsym_iter *iter)
{
- iter->module_name[0] = '\0';
+ strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
iter->exported = 0;
return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
&iter->value, &iter->type,
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f4ddfdd2d07e..c83e54727131 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1396,7 +1396,7 @@ bool __weak arch_within_kprobe_blacklist(unsigned long addr)
addr < (unsigned long)__kprobes_text_end;
}
-bool within_kprobe_blacklist(unsigned long addr)
+static bool __within_kprobe_blacklist(unsigned long addr)
{
struct kprobe_blacklist_entry *ent;
@@ -1410,7 +1410,26 @@ bool within_kprobe_blacklist(unsigned long addr)
if (addr >= ent->start_addr && addr < ent->end_addr)
return true;
}
+ return false;
+}
+bool within_kprobe_blacklist(unsigned long addr)
+{
+ char symname[KSYM_NAME_LEN], *p;
+
+ if (__within_kprobe_blacklist(addr))
+ return true;
+
+ /* Check if the address is on a suffixed-symbol */
+ if (!lookup_symbol_name(addr, symname)) {
+ p = strchr(symname, '.');
+ if (!p)
+ return false;
+ *p = '\0';
+ addr = (unsigned long)kprobe_lookup_name(symname, 0);
+ if (addr)
+ return __within_kprobe_blacklist(addr);
+ }
return false;
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b5..5942eeafb9ac 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,6 +20,7 @@
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/numa.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
@@ -101,6 +102,12 @@ bool kthread_should_stop(void)
}
EXPORT_SYMBOL(kthread_should_stop);
+bool __kthread_should_park(struct task_struct *k)
+{
+ return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
+}
+EXPORT_SYMBOL_GPL(__kthread_should_park);
+
/**
* kthread_should_park - should this kthread park now?
*
@@ -114,7 +121,7 @@ EXPORT_SYMBOL(kthread_should_stop);
*/
bool kthread_should_park(void)
{
- return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
+ return __kthread_should_park(current);
}
EXPORT_SYMBOL_GPL(kthread_should_park);
@@ -599,7 +606,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
struct lock_class_key *key)
{
memset(worker, 0, sizeof(struct kthread_worker));
- spin_lock_init(&worker->lock);
+ raw_spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list);
INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +648,21 @@ repeat:
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
- spin_lock_irq(&worker->lock);
+ raw_spin_lock_irq(&worker->lock);
worker->task = NULL;
- spin_unlock_irq(&worker->lock);
+ raw_spin_unlock_irq(&worker->lock);
return 0;
}
work = NULL;
- spin_lock_irq(&worker->lock);
+ raw_spin_lock_irq(&worker->lock);
if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
- spin_unlock_irq(&worker->lock);
+ raw_spin_unlock_irq(&worker->lock);
if (work) {
__set_current_state(TASK_RUNNING);
@@ -675,7 +682,7 @@ __kthread_create_worker(int cpu, unsigned int flags,
{
struct kthread_worker *worker;
struct task_struct *task;
- int node = -1;
+ int node = NUMA_NO_NODE;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker)
@@ -812,12 +819,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
bool ret = false;
unsigned long flags;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
kthread_insert_work(worker, work, &worker->work_list);
ret = true;
}
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -835,6 +842,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
struct kthread_work *work = &dwork->work;
struct kthread_worker *worker = work->worker;
+ unsigned long flags;
/*
* This might happen when a pending work is reinitialized.
@@ -843,7 +851,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
if (WARN_ON_ONCE(!worker))
return;
- spin_lock(&worker->lock);
+ raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -852,7 +860,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list);
- spin_unlock(&worker->lock);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
@@ -908,14 +916,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
unsigned long flags;
bool ret = false;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
__kthread_queue_delayed_work(worker, dwork, delay);
ret = true;
}
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +959,7 @@ void kthread_flush_work(struct kthread_work *work)
if (!worker)
return;
- spin_lock_irq(&worker->lock);
+ raw_spin_lock_irq(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -963,7 +971,7 @@ void kthread_flush_work(struct kthread_work *work)
else
noop = true;
- spin_unlock_irq(&worker->lock);
+ raw_spin_unlock_irq(&worker->lock);
if (!noop)
wait_for_completion(&fwork.done);
@@ -996,9 +1004,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
- spin_unlock_irqrestore(&worker->lock, *flags);
+ raw_spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer);
- spin_lock_irqsave(&worker->lock, *flags);
+ raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}
@@ -1045,7 +1053,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
unsigned long flags;
int ret = false;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */
if (!work->worker)
@@ -1062,7 +1070,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1084,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
if (!worker)
goto out;
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
@@ -1090,13 +1098,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
* In the meantime, block any queuing by setting the canceling counter.
*/
work->canceling++;
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
kthread_flush_work(work);
- spin_lock_irqsave(&worker->lock, flags);
+ raw_spin_lock_irqsave(&worker->lock, flags);
work->canceling--;
out_fast:
- spin_unlock_irqrestore(&worker->lock, flags);
+ raw_spin_unlock_irqrestore(&worker->lock, flags);
out:
return ret;
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 95932333a48b..21cb81fe6359 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -45,11 +45,14 @@
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/kprobes.h>
#include <asm/sections.h>
@@ -81,6 +84,7 @@ module_param(lock_stat, int, 0644);
* code to recurse back into the lockdep code...
*/
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+static struct task_struct *lockdep_selftest_task_struct;
static int graph_lock(void)
{
@@ -130,13 +134,17 @@ static inline int debug_locks_off_graph_unlock(void)
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
/*
* All data structures here are protected by the global debug_lock.
*
- * Mutex key structs only get allocated, once during bootup, and never
- * get freed - this significantly simplifies the debugging code.
+ * nr_lock_classes is the number of elements of lock_classes[] that is
+ * in use.
*/
+#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
+#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
+static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
unsigned long nr_lock_classes;
#ifndef CONFIG_DEBUG_LOCKDEP
static
@@ -277,11 +285,42 @@ static inline void lock_release_holdtime(struct held_lock *hlock)
#endif
/*
- * We keep a global list of all lock classes. The list only grows,
- * never shrinks. The list is only accessed with the lockdep
- * spinlock lock held.
+ * We keep a global list of all lock classes. The list is only accessed with
+ * the lockdep spinlock lock held. free_lock_classes is a list with free
+ * elements. These elements are linked together by the lock_entry member in
+ * struct lock_class.
*/
LIST_HEAD(all_lock_classes);
+static LIST_HEAD(free_lock_classes);
+
+/**
+ * struct pending_free - information about data structures about to be freed
+ * @zapped: Head of a list with struct lock_class elements.
+ * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
+ * are about to be freed.
+ */
+struct pending_free {
+ struct list_head zapped;
+ DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
+};
+
+/**
+ * struct delayed_free - data structures used for delayed freeing
+ *
+ * A data structure for delayed freeing of data structures that may be
+ * accessed by RCU readers at the time these were freed.
+ *
+ * @rcu_head: Used to schedule an RCU callback for freeing data structures.
+ * @index: Index of @pf to which freed data structures are added.
+ * @scheduled: Whether or not an RCU callback has been scheduled.
+ * @pf: Array with information about data structures about to be freed.
+ */
+static struct delayed_free {
+ struct rcu_head rcu_head;
+ int index;
+ int scheduled;
+ struct pending_free pf[2];
+} delayed_free;
/*
* The lockdep classes are in a hash-table as well, for fast lookup:
@@ -331,6 +370,11 @@ void lockdep_on(void)
}
EXPORT_SYMBOL(lockdep_on);
+void lockdep_set_selftest_task(struct task_struct *task)
+{
+ lockdep_selftest_task_struct = task;
+}
+
/*
* Debugging switches:
*/
@@ -599,7 +643,7 @@ static int very_verbose(struct lock_class *class)
* Is this the address of a static object:
*/
#ifdef __KERNEL__
-static int static_obj(void *obj)
+static int static_obj(const void *obj)
{
unsigned long start = (unsigned long) &_stext,
end = (unsigned long) &_end,
@@ -716,6 +760,17 @@ static bool assign_lock_key(struct lockdep_map *lock)
{
unsigned long can_addr, addr = (unsigned long)lock;
+#ifdef __KERNEL__
+ /*
+ * lockdep_free_key_range() assumes that struct lock_class_key
+ * objects do not overlap. Since we use the address of lock
+ * objects as class key for static objects, check whether the
+ * size of lock_class_key objects does not exceed the size of
+ * the smallest lock object.
+ */
+ BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
+#endif
+
if (__is_kernel_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (__is_module_percpu_address(addr, &can_addr))
@@ -735,6 +790,280 @@ static bool assign_lock_key(struct lockdep_map *lock)
return true;
}
+#ifdef CONFIG_DEBUG_LOCKDEP
+
+/* Check whether element @e occurs in list @h */
+static bool in_list(struct list_head *e, struct list_head *h)
+{
+ struct list_head *f;
+
+ list_for_each(f, h) {
+ if (e == f)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check whether entry @e occurs in any of the locks_after or locks_before
+ * lists.
+ */
+static bool in_any_class_list(struct list_head *e)
+{
+ struct lock_class *class;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+ class = &lock_classes[i];
+ if (in_list(e, &class->locks_after) ||
+ in_list(e, &class->locks_before))
+ return true;
+ }
+ return false;
+}
+
+static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
+{
+ struct lock_list *e;
+
+ list_for_each_entry(e, h, entry) {
+ if (e->links_to != c) {
+ printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
+ c->name ? : "(?)",
+ (unsigned long)(e - list_entries),
+ e->links_to && e->links_to->name ?
+ e->links_to->name : "(?)",
+ e->class && e->class->name ? e->class->name :
+ "(?)");
+ return false;
+ }
+ }
+ return true;
+}
+
+static u16 chain_hlocks[];
+
+static bool check_lock_chain_key(struct lock_chain *chain)
+{
+#ifdef CONFIG_PROVE_LOCKING
+ u64 chain_key = 0;
+ int i;
+
+ for (i = chain->base; i < chain->base + chain->depth; i++)
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
+ /*
+ * The 'unsigned long long' casts avoid that a compiler warning
+ * is reported when building tools/lib/lockdep.
+ */
+ if (chain->chain_key != chain_key) {
+ printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
+ (unsigned long long)(chain - lock_chains),
+ (unsigned long long)chain->chain_key,
+ (unsigned long long)chain_key);
+ return false;
+ }
+#endif
+ return true;
+}
+
+static bool in_any_zapped_class_list(struct lock_class *class)
+{
+ struct pending_free *pf;
+ int i;
+
+ for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
+ if (in_list(&class->lock_entry, &pf->zapped))
+ return true;
+ }
+
+ return false;
+}
+
+static bool __check_data_structures(void)
+{
+ struct lock_class *class;
+ struct lock_chain *chain;
+ struct hlist_head *head;
+ struct lock_list *e;
+ int i;
+
+ /* Check whether all classes occur in a lock list. */
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+ class = &lock_classes[i];
+ if (!in_list(&class->lock_entry, &all_lock_classes) &&
+ !in_list(&class->lock_entry, &free_lock_classes) &&
+ !in_any_zapped_class_list(class)) {
+ printk(KERN_INFO "class %px/%s is not in any class list\n",
+ class, class->name ? : "(?)");
+ return false;
+ }
+ }
+
+ /* Check whether all classes have valid lock lists. */
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+ class = &lock_classes[i];
+ if (!class_lock_list_valid(class, &class->locks_before))
+ return false;
+ if (!class_lock_list_valid(class, &class->locks_after))
+ return false;
+ }
+
+ /* Check the chain_key of all lock chains. */
+ for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
+ head = chainhash_table + i;
+ hlist_for_each_entry_rcu(chain, head, entry) {
+ if (!check_lock_chain_key(chain))
+ return false;
+ }
+ }
+
+ /*
+ * Check whether all list entries that are in use occur in a class
+ * lock list.
+ */
+ for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
+ e = list_entries + i;
+ if (!in_any_class_list(&e->entry)) {
+ printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ e->class->name ? : "(?)",
+ e->links_to->name ? : "(?)");
+ return false;
+ }
+ }
+
+ /*
+ * Check whether all list entries that are not in use do not occur in
+ * a class lock list.
+ */
+ for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
+ e = list_entries + i;
+ if (in_any_class_list(&e->entry)) {
+ printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ e->class && e->class->name ? e->class->name :
+ "(?)",
+ e->links_to && e->links_to->name ?
+ e->links_to->name : "(?)");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int check_consistency = 0;
+module_param(check_consistency, int, 0644);
+
+static void check_data_structures(void)
+{
+ static bool once = false;
+
+ if (check_consistency && !once) {
+ if (!__check_data_structures()) {
+ once = true;
+ WARN_ON(once);
+ }
+ }
+}
+
+#else /* CONFIG_DEBUG_LOCKDEP */
+
+static inline void check_data_structures(void) { }
+
+#endif /* CONFIG_DEBUG_LOCKDEP */
+
+/*
+ * Initialize the lock_classes[] array elements, the free_lock_classes list
+ * and also the delayed_free structure.
+ */
+static void init_data_structures_once(void)
+{
+ static bool initialization_happened;
+ int i;
+
+ if (likely(initialization_happened))
+ return;
+
+ initialization_happened = true;
+
+ init_rcu_head(&delayed_free.rcu_head);
+ INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
+ INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
+
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+ list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
+ INIT_LIST_HEAD(&lock_classes[i].locks_after);
+ INIT_LIST_HEAD(&lock_classes[i].locks_before);
+ }
+}
+
+static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
+{
+ unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
+
+ return lock_keys_hash + hash;
+}
+
+/* Register a dynamically allocated key. */
+void lockdep_register_key(struct lock_class_key *key)
+{
+ struct hlist_head *hash_head;
+ struct lock_class_key *k;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(static_obj(key)))
+ return;
+ hash_head = keyhashentry(key);
+
+ raw_local_irq_save(flags);
+ if (!graph_lock())
+ goto restore_irqs;
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ if (WARN_ON_ONCE(k == key))
+ goto out_unlock;
+ }
+ hlist_add_head_rcu(&key->hash_entry, hash_head);
+out_unlock:
+ graph_unlock();
+restore_irqs:
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_register_key);
+
+/* Check whether a key has been registered as a dynamic key. */
+static bool is_dynamic_key(const struct lock_class_key *key)
+{
+ struct hlist_head *hash_head;
+ struct lock_class_key *k;
+ bool found = false;
+
+ if (WARN_ON_ONCE(static_obj(key)))
+ return false;
+
+ /*
+ * If lock debugging is disabled lock_keys_hash[] may contain
+ * pointers to memory that has already been freed. Avoid triggering
+ * a use-after-free in that case by returning early.
+ */
+ if (!debug_locks)
+ return true;
+
+ hash_head = keyhashentry(key);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ if (k == key) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return found;
+}
+
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
@@ -756,7 +1085,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (!lock->key) {
if (!assign_lock_key(lock))
return NULL;
- } else if (!static_obj(lock->key)) {
+ } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
return NULL;
}
@@ -775,11 +1104,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
goto out_unlock_set;
}
- /*
- * Allocate a new key from the static array, and add it to
- * the hash:
- */
- if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
+ init_data_structures_once();
+
+ /* Allocate a new lock class and add it to the hash. */
+ class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
+ lock_entry);
+ if (!class) {
if (!debug_locks_off_graph_unlock()) {
return NULL;
}
@@ -788,13 +1118,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
dump_stack();
return NULL;
}
- class = lock_classes + nr_lock_classes++;
+ nr_lock_classes++;
debug_atomic_inc(nr_unused_locks);
class->key = key;
class->name = lock->name;
class->subclass = subclass;
- INIT_LIST_HEAD(&class->locks_before);
- INIT_LIST_HEAD(&class->locks_after);
+ WARN_ON_ONCE(!list_empty(&class->locks_before));
+ WARN_ON_ONCE(!list_empty(&class->locks_after));
class->name_version = count_matching_names(class);
/*
* We use RCU's safe list-add method to make
@@ -802,9 +1132,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
*/
hlist_add_head_rcu(&class->hash_entry, hash_head);
/*
- * Add it to the global list of classes:
+ * Remove the class from the free list and add it to the global list
+ * of classes.
*/
- list_add_tail(&class->lock_entry, &all_lock_classes);
+ list_move_tail(&class->lock_entry, &all_lock_classes);
if (verbose(class)) {
graph_unlock();
@@ -845,7 +1176,10 @@ out_set_class_cache:
*/
static struct lock_list *alloc_list_entry(void)
{
- if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
+ int idx = find_first_zero_bit(list_entries_in_use,
+ ARRAY_SIZE(list_entries));
+
+ if (idx >= ARRAY_SIZE(list_entries)) {
if (!debug_locks_off_graph_unlock())
return NULL;
@@ -853,13 +1187,16 @@ static struct lock_list *alloc_list_entry(void)
dump_stack();
return NULL;
}
- return list_entries + nr_list_entries++;
+ nr_list_entries++;
+ __set_bit(idx, list_entries_in_use);
+ return list_entries + idx;
}
/*
* Add a new dependency to the head of the list:
*/
-static int add_lock_to_list(struct lock_class *this, struct list_head *head,
+static int add_lock_to_list(struct lock_class *this,
+ struct lock_class *links_to, struct list_head *head,
unsigned long ip, int distance,
struct stack_trace *trace)
{
@@ -873,6 +1210,7 @@ static int add_lock_to_list(struct lock_class *this, struct list_head *head,
return 0;
entry->class = this;
+ entry->links_to = links_to;
entry->distance = distance;
entry->trace = *trace;
/*
@@ -955,7 +1293,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
unsigned long nr;
nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
+ WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
lock->parent = parent;
lock->class->dep_gen_id = lockdep_dependency_gen_id;
}
@@ -965,7 +1303,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
unsigned long nr;
nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
+ WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
return lock->class->dep_gen_id == lockdep_dependency_gen_id;
}
@@ -1624,29 +1962,18 @@ static const char *state_rnames[] = {
static inline const char *state_name(enum lock_usage_bit bit)
{
- return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
+ return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2];
}
static int exclusive_bit(int new_bit)
{
- /*
- * USED_IN
- * USED_IN_READ
- * ENABLED
- * ENABLED_READ
- *
- * bit 0 - write/read
- * bit 1 - used_in/enabled
- * bit 2+ state
- */
-
- int state = new_bit & ~3;
- int dir = new_bit & 2;
+ int state = new_bit & LOCK_USAGE_STATE_MASK;
+ int dir = new_bit & LOCK_USAGE_DIR_MASK;
/*
* keep state, bit flip the direction and strip read.
*/
- return state | (dir ^ 2);
+ return state | (dir ^ LOCK_USAGE_DIR_MASK);
}
static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
@@ -1842,6 +2169,24 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct lock_list this;
int ret;
+ if (!hlock_class(prev)->key || !hlock_class(next)->key) {
+ /*
+ * The warning statements below may trigger a use-after-free
+ * of the class name. It is better to trigger a use-after free
+ * and to have the class name most of the time instead of not
+ * having the class name available.
+ */
+ WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
+ "Detected use-after-free of lock class %px/%s\n",
+ hlock_class(prev),
+ hlock_class(prev)->name);
+ WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
+ "Detected use-after-free of lock class %px/%s\n",
+ hlock_class(next),
+ hlock_class(next)->name);
+ return 2;
+ }
+
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
@@ -1918,14 +2263,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
- ret = add_lock_to_list(hlock_class(next),
+ ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(prev)->locks_after,
next->acquire_ip, distance, trace);
if (!ret)
return 0;
- ret = add_lock_to_list(hlock_class(prev),
+ ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(next)->locks_before,
next->acquire_ip, distance, trace);
if (!ret)
@@ -2018,8 +2363,8 @@ out_bug:
return 0;
}
-unsigned long nr_lock_chains;
struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
int nr_chain_hlocks;
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
@@ -2153,6 +2498,33 @@ static int check_no_collision(struct task_struct *curr,
}
/*
+ * Given an index that is >= -1, return the index of the next lock chain.
+ * Return -2 if there is no next lock chain.
+ */
+long lockdep_next_lockchain(long i)
+{
+ i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
+ return i < ARRAY_SIZE(lock_chains) ? i : -2;
+}
+
+unsigned long lock_chain_count(void)
+{
+ return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
+}
+
+/* Must be called with the graph lock held. */
+static struct lock_chain *alloc_lock_chain(void)
+{
+ int idx = find_first_zero_bit(lock_chains_in_use,
+ ARRAY_SIZE(lock_chains));
+
+ if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
+ return NULL;
+ __set_bit(idx, lock_chains_in_use);
+ return lock_chains + idx;
+}
+
+/*
* Adds a dependency chain into chain hashtable. And must be called with
* graph_lock held.
*
@@ -2169,19 +2541,15 @@ static inline int add_chain_cache(struct task_struct *curr,
int i, j;
/*
- * Allocate a new chain entry from the static array, and add
- * it to the hash:
- */
-
- /*
- * We might need to take the graph lock, ensure we've got IRQs
+ * The caller must hold the graph lock, ensure we've got IRQs
* disabled to make this an IRQ-safe lock.. for recursion reasons
* lockdep won't complain about its own locking errors.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
- if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
+ chain = alloc_lock_chain();
+ if (!chain) {
if (!debug_locks_off_graph_unlock())
return 0;
@@ -2189,7 +2557,6 @@ static inline int add_chain_cache(struct task_struct *curr,
dump_stack();
return 0;
}
- chain = lock_chains + nr_lock_chains++;
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
i = get_first_held_lock(curr, hlock);
@@ -2206,16 +2573,8 @@ static inline int add_chain_cache(struct task_struct *curr,
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
- }
-
- if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
nr_chain_hlocks += chain->depth;
-
-#ifdef CONFIG_DEBUG_LOCKDEP
- /*
- * Important for check_no_collision().
- */
- if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
+ } else {
if (!debug_locks_off_graph_unlock())
return 0;
@@ -2223,7 +2582,6 @@ static inline int add_chain_cache(struct task_struct *curr,
dump_stack();
return 0;
}
-#endif
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
@@ -2233,19 +2591,16 @@ static inline int add_chain_cache(struct task_struct *curr,
}
/*
- * Look up a dependency chain.
+ * Look up a dependency chain. Must be called with either the graph lock or
+ * the RCU read lock held.
*/
static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
{
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
- /*
- * We can walk it lock-free, because entries only get added
- * to the hash:
- */
hlist_for_each_entry_rcu(chain, hash_head, entry) {
- if (chain->chain_key == chain_key) {
+ if (READ_ONCE(chain->chain_key) == chain_key) {
debug_atomic_inc(chain_lookup_hits);
return chain;
}
@@ -2662,8 +3017,8 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
int excl_bit = exclusive_bit(new_bit);
- int read = new_bit & 1;
- int dir = new_bit & 2;
+ int read = new_bit & LOCK_USAGE_READ_MASK;
+ int dir = new_bit & LOCK_USAGE_DIR_MASK;
/*
* mark USED_IN has to look forwards -- to ensure no dependency
@@ -2687,19 +3042,19 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
* states.
*/
if ((!read || !dir || STRICT_READ_CHECKS) &&
- !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
+ !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
return 0;
/*
* Check for read in write conflicts
*/
if (!read) {
- if (!valid_state(curr, this, new_bit, excl_bit + 1))
+ if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
return 0;
if (STRICT_READ_CHECKS &&
- !usage(curr, this, excl_bit + 1,
- state_name(new_bit + 1)))
+ !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
+ state_name(new_bit + LOCK_USAGE_READ_MASK)))
return 0;
}
@@ -2709,35 +3064,28 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
return 1;
}
-enum mark_type {
-#define LOCKDEP_STATE(__STATE) __STATE,
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
-};
-
/*
* Mark all held locks with a usage bit:
*/
static int
-mark_held_locks(struct task_struct *curr, enum mark_type mark)
+mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
{
- enum lock_usage_bit usage_bit;
struct held_lock *hlock;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
+ enum lock_usage_bit hlock_bit = base_bit;
hlock = curr->held_locks + i;
- usage_bit = 2 + (mark << 2); /* ENABLED */
if (hlock->read)
- usage_bit += 1; /* READ */
+ hlock_bit += LOCK_USAGE_READ_MASK;
- BUG_ON(usage_bit >= LOCK_USAGE_STATES);
+ BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
if (!hlock->check)
continue;
- if (!mark_lock(curr, hlock, usage_bit))
+ if (!mark_lock(curr, hlock, hlock_bit))
return 0;
}
@@ -2758,7 +3106,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
*/
- if (!mark_held_locks(curr, HARDIRQ))
+ if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
return;
/*
* If we have softirqs enabled, then set the usage
@@ -2766,7 +3114,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
* this bit from being set before)
*/
if (curr->softirqs_enabled)
- if (!mark_held_locks(curr, SOFTIRQ))
+ if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
return;
curr->hardirq_enable_ip = ip;
@@ -2814,6 +3162,7 @@ void lockdep_hardirqs_on(unsigned long ip)
__trace_hardirqs_on_caller(ip);
current->lockdep_recursion = 0;
}
+NOKPROBE_SYMBOL(lockdep_hardirqs_on);
/*
* Hardirqs were disabled:
@@ -2843,6 +3192,7 @@ void lockdep_hardirqs_off(unsigned long ip)
} else
debug_atomic_inc(redundant_hardirqs_off);
}
+NOKPROBE_SYMBOL(lockdep_hardirqs_off);
/*
* Softirqs will be enabled:
@@ -2880,7 +3230,7 @@ void trace_softirqs_on(unsigned long ip)
* enabled too:
*/
if (curr->hardirqs_enabled)
- mark_held_locks(curr, SOFTIRQ);
+ mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
current->lockdep_recursion = 0;
}
@@ -3119,13 +3469,12 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (DEBUG_LOCKS_WARN_ON(!key))
return;
/*
- * Sanity check, the lock-class key must be persistent:
+ * Sanity check, the lock-class key must either have been allocated
+ * statically or must have been registered as a dynamic key.
*/
- if (!static_obj(key)) {
- printk("BUG: key %px not in .data!\n", key);
- /*
- * What it says above ^^^^^, I suggest you read it.
- */
+ if (!static_obj(key) && !is_dynamic_key(key)) {
+ if (debug_locks)
+ printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
DEBUG_LOCKS_WARN_ON(1);
return;
}
@@ -3335,6 +3684,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (nest_lock && !__lock_is_held(nest_lock, -1))
return print_lock_nested_lock_not_held(curr, hlock, ip);
+ if (!debug_locks_silent) {
+ WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
+ WARN_ON_ONCE(!hlock_class(hlock)->key);
+ }
+
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
return 0;
@@ -3497,6 +3851,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
unsigned int depth;
int i;
+ if (unlikely(!debug_locks))
+ return 0;
+
depth = curr->lockdep_depth;
/*
* This function is about (re)setting the class of a held lock,
@@ -3535,6 +3892,9 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
unsigned int depth;
int i;
+ if (unlikely(!debug_locks))
+ return 0;
+
depth = curr->lockdep_depth;
/*
* This function is about (re)setting the class of a held lock,
@@ -3650,7 +4010,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
return 0;
}
-static int __lock_is_held(const struct lockdep_map *lock, int read)
+static nokprobe_inline
+int __lock_is_held(const struct lockdep_map *lock, int read)
{
struct task_struct *curr = current;
int i;
@@ -3883,6 +4244,7 @@ int lock_is_held_type(const struct lockdep_map *lock, int read)
return ret;
}
EXPORT_SYMBOL_GPL(lock_is_held_type);
+NOKPROBE_SYMBOL(lock_is_held_type);
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
{
@@ -4123,29 +4485,131 @@ void lockdep_reset(void)
raw_local_irq_restore(flags);
}
+/* Remove a class from a lock chain. Must be called with the graph lock held. */
+static void remove_class_from_lock_chain(struct pending_free *pf,
+ struct lock_chain *chain,
+ struct lock_class *class)
+{
+#ifdef CONFIG_PROVE_LOCKING
+ struct lock_chain *new_chain;
+ u64 chain_key;
+ int i;
+
+ for (i = chain->base; i < chain->base + chain->depth; i++) {
+ if (chain_hlocks[i] != class - lock_classes)
+ continue;
+ /* The code below leaks one chain_hlock[] entry. */
+ if (--chain->depth > 0) {
+ memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
+ (chain->base + chain->depth - i) *
+ sizeof(chain_hlocks[0]));
+ }
+ /*
+ * Each lock class occurs at most once in a lock chain so once
+ * we found a match we can break out of this loop.
+ */
+ goto recalc;
+ }
+ /* Since the chain has not been modified, return. */
+ return;
+
+recalc:
+ chain_key = 0;
+ for (i = chain->base; i < chain->base + chain->depth; i++)
+ chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
+ if (chain->depth && chain->chain_key == chain_key)
+ return;
+ /* Overwrite the chain key for concurrent RCU readers. */
+ WRITE_ONCE(chain->chain_key, chain_key);
+ /*
+ * Note: calling hlist_del_rcu() from inside a
+ * hlist_for_each_entry_rcu() loop is safe.
+ */
+ hlist_del_rcu(&chain->entry);
+ __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
+ if (chain->depth == 0)
+ return;
+ /*
+ * If the modified lock chain matches an existing lock chain, drop
+ * the modified lock chain.
+ */
+ if (lookup_chain_cache(chain_key))
+ return;
+ new_chain = alloc_lock_chain();
+ if (WARN_ON_ONCE(!new_chain)) {
+ debug_locks_off();
+ return;
+ }
+ *new_chain = *chain;
+ hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
+#endif
+}
+
+/* Must be called with the graph lock held. */
+static void remove_class_from_lock_chains(struct pending_free *pf,
+ struct lock_class *class)
+{
+ struct lock_chain *chain;
+ struct hlist_head *head;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
+ head = chainhash_table + i;
+ hlist_for_each_entry_rcu(chain, head, entry) {
+ remove_class_from_lock_chain(pf, chain, class);
+ }
+ }
+}
+
/*
* Remove all references to a lock class. The caller must hold the graph lock.
*/
-static void zap_class(struct lock_class *class)
+static void zap_class(struct pending_free *pf, struct lock_class *class)
{
+ struct lock_list *entry;
int i;
+ WARN_ON_ONCE(!class->key);
+
/*
* Remove all dependencies this lock is
* involved in:
*/
- for (i = 0; i < nr_list_entries; i++) {
- if (list_entries[i].class == class)
- list_del_rcu(&list_entries[i].entry);
+ for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
+ entry = list_entries + i;
+ if (entry->class != class && entry->links_to != class)
+ continue;
+ __clear_bit(i, list_entries_in_use);
+ nr_list_entries--;
+ list_del_rcu(&entry->entry);
+ }
+ if (list_empty(&class->locks_after) &&
+ list_empty(&class->locks_before)) {
+ list_move_tail(&class->lock_entry, &pf->zapped);
+ hlist_del_rcu(&class->hash_entry);
+ WRITE_ONCE(class->key, NULL);
+ WRITE_ONCE(class->name, NULL);
+ nr_lock_classes--;
+ } else {
+ WARN_ONCE(true, "%s() failed for class %s\n", __func__,
+ class->name);
}
- /*
- * Unhash the class and remove it from the all_lock_classes list:
- */
- hlist_del_rcu(&class->hash_entry);
- list_del(&class->lock_entry);
- RCU_INIT_POINTER(class->key, NULL);
- RCU_INIT_POINTER(class->name, NULL);
+ remove_class_from_lock_chains(pf, class);
+}
+
+static void reinit_class(struct lock_class *class)
+{
+ void *const p = class;
+ const unsigned int offset = offsetof(struct lock_class, key);
+
+ WARN_ON_ONCE(!class->lock_entry.next);
+ WARN_ON_ONCE(!list_empty(&class->locks_after));
+ WARN_ON_ONCE(!list_empty(&class->locks_before));
+ memset(p + offset, 0, sizeof(*class) - offset);
+ WARN_ON_ONCE(!class->lock_entry.next);
+ WARN_ON_ONCE(!list_empty(&class->locks_after));
+ WARN_ON_ONCE(!list_empty(&class->locks_before));
}
static inline int within(const void *addr, void *start, unsigned long size)
@@ -4153,55 +4617,175 @@ static inline int within(const void *addr, void *start, unsigned long size)
return addr >= start && addr < start + size;
}
+static bool inside_selftest(void)
+{
+ return current == lockdep_selftest_task_struct;
+}
+
+/* The caller must hold the graph lock. */
+static struct pending_free *get_pending_free(void)
+{
+ return delayed_free.pf + delayed_free.index;
+}
+
+static void free_zapped_rcu(struct rcu_head *cb);
+
/*
- * Used in module.c to remove lock classes from memory that is going to be
- * freed; and possibly re-used by other modules.
- *
- * We will have had one sync_sched() before getting here, so we're guaranteed
- * nobody will look up these exact classes -- they're properly dead but still
- * allocated.
+ * Schedule an RCU callback if no RCU callback is pending. Must be called with
+ * the graph lock held.
*/
-void lockdep_free_key_range(void *start, unsigned long size)
+static void call_rcu_zapped(struct pending_free *pf)
+{
+ WARN_ON_ONCE(inside_selftest());
+
+ if (list_empty(&pf->zapped))
+ return;
+
+ if (delayed_free.scheduled)
+ return;
+
+ delayed_free.scheduled = true;
+
+ WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
+ delayed_free.index ^= 1;
+
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+}
+
+/* The caller must hold the graph lock. May be called from RCU context. */
+static void __free_zapped_classes(struct pending_free *pf)
{
struct lock_class *class;
- struct hlist_head *head;
+
+ check_data_structures();
+
+ list_for_each_entry(class, &pf->zapped, lock_entry)
+ reinit_class(class);
+
+ list_splice_init(&pf->zapped, &free_lock_classes);
+
+#ifdef CONFIG_PROVE_LOCKING
+ bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
+ pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
+ bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
+#endif
+}
+
+static void free_zapped_rcu(struct rcu_head *ch)
+{
+ struct pending_free *pf;
unsigned long flags;
- int i;
- int locked;
+
+ if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
+ return;
raw_local_irq_save(flags);
- locked = graph_lock();
+ if (!graph_lock())
+ goto out_irq;
+
+ /* closed head */
+ pf = delayed_free.pf + (delayed_free.index ^ 1);
+ __free_zapped_classes(pf);
+ delayed_free.scheduled = false;
/*
- * Unhash all classes that were created by this module:
+ * If there's anything on the open list, close and start a new callback.
*/
+ call_rcu_zapped(delayed_free.pf + delayed_free.index);
+
+ graph_unlock();
+out_irq:
+ raw_local_irq_restore(flags);
+}
+
+/*
+ * Remove all lock classes from the class hash table and from the
+ * all_lock_classes list whose key or name is in the address range [start,
+ * start + size). Move these lock classes to the zapped_classes list. Must
+ * be called with the graph lock held.
+ */
+static void __lockdep_free_key_range(struct pending_free *pf, void *start,
+ unsigned long size)
+{
+ struct lock_class *class;
+ struct hlist_head *head;
+ int i;
+
+ /* Unhash all classes that were created by a module. */
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
hlist_for_each_entry_rcu(class, head, hash_entry) {
- if (within(class->key, start, size))
- zap_class(class);
- else if (within(class->name, start, size))
- zap_class(class);
+ if (!within(class->key, start, size) &&
+ !within(class->name, start, size))
+ continue;
+ zap_class(pf, class);
}
}
+}
- if (locked)
- graph_unlock();
+/*
+ * Used in module.c to remove lock classes from memory that is going to be
+ * freed; and possibly re-used by other modules.
+ *
+ * We will have had one synchronize_rcu() before getting here, so we're
+ * guaranteed nobody will look up these exact classes -- they're properly dead
+ * but still allocated.
+ */
+static void lockdep_free_key_range_reg(void *start, unsigned long size)
+{
+ struct pending_free *pf;
+ unsigned long flags;
+ int locked;
+
+ init_data_structures_once();
+
+ raw_local_irq_save(flags);
+ locked = graph_lock();
+ if (!locked)
+ goto out_irq;
+
+ pf = get_pending_free();
+ __lockdep_free_key_range(pf, start, size);
+ call_rcu_zapped(pf);
+
+ graph_unlock();
+out_irq:
raw_local_irq_restore(flags);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to.
- *
- * sync_sched() is sufficient because the read-side is IRQ disable.
*/
synchronize_rcu();
+}
- /*
- * XXX at this point we could return the resources to the pool;
- * instead we leak them. We would need to change to bitmap allocators
- * instead of the linear allocators we have now.
- */
+/*
+ * Free all lockdep keys in the range [start, start+size). Does not sleep.
+ * Ignores debug_locks. Must only be used by the lockdep selftests.
+ */
+static void lockdep_free_key_range_imm(void *start, unsigned long size)
+{
+ struct pending_free *pf = delayed_free.pf;
+ unsigned long flags;
+
+ init_data_structures_once();
+
+ raw_local_irq_save(flags);
+ arch_spin_lock(&lockdep_lock);
+ __lockdep_free_key_range(pf, start, size);
+ __free_zapped_classes(pf);
+ arch_spin_unlock(&lockdep_lock);
+ raw_local_irq_restore(flags);
+}
+
+void lockdep_free_key_range(void *start, unsigned long size)
+{
+ init_data_structures_once();
+
+ if (inside_selftest())
+ lockdep_free_key_range_imm(start, size);
+ else
+ lockdep_free_key_range_reg(start, size);
}
/*
@@ -4226,14 +4810,12 @@ static bool lock_class_cache_is_registered(struct lockdep_map *lock)
return false;
}
-void lockdep_reset_lock(struct lockdep_map *lock)
+/* The caller must hold the graph lock. Does not sleep. */
+static void __lockdep_reset_lock(struct pending_free *pf,
+ struct lockdep_map *lock)
{
struct lock_class *class;
- unsigned long flags;
- int j, locked;
-
- raw_local_irq_save(flags);
- locked = graph_lock();
+ int j;
/*
* Remove all classes this lock might have:
@@ -4244,27 +4826,104 @@ void lockdep_reset_lock(struct lockdep_map *lock)
*/
class = look_up_lock_class(lock, j);
if (class)
- zap_class(class);
+ zap_class(pf, class);
}
/*
* Debug check: in the end all mapped classes should
* be gone.
*/
- if (unlikely(lock_class_cache_is_registered(lock))) {
- if (debug_locks_off_graph_unlock()) {
- /*
- * We all just reset everything, how did it match?
- */
- WARN_ON(1);
+ if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
+ debug_locks_off();
+}
+
+/*
+ * Remove all information lockdep has about a lock if debug_locks == 1. Free
+ * released data structures from RCU context.
+ */
+static void lockdep_reset_lock_reg(struct lockdep_map *lock)
+{
+ struct pending_free *pf;
+ unsigned long flags;
+ int locked;
+
+ raw_local_irq_save(flags);
+ locked = graph_lock();
+ if (!locked)
+ goto out_irq;
+
+ pf = get_pending_free();
+ __lockdep_reset_lock(pf, lock);
+ call_rcu_zapped(pf);
+
+ graph_unlock();
+out_irq:
+ raw_local_irq_restore(flags);
+}
+
+/*
+ * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
+ * lockdep selftests.
+ */
+static void lockdep_reset_lock_imm(struct lockdep_map *lock)
+{
+ struct pending_free *pf = delayed_free.pf;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ arch_spin_lock(&lockdep_lock);
+ __lockdep_reset_lock(pf, lock);
+ __free_zapped_classes(pf);
+ arch_spin_unlock(&lockdep_lock);
+ raw_local_irq_restore(flags);
+}
+
+void lockdep_reset_lock(struct lockdep_map *lock)
+{
+ init_data_structures_once();
+
+ if (inside_selftest())
+ lockdep_reset_lock_imm(lock);
+ else
+ lockdep_reset_lock_reg(lock);
+}
+
+/* Unregister a dynamically allocated key. */
+void lockdep_unregister_key(struct lock_class_key *key)
+{
+ struct hlist_head *hash_head = keyhashentry(key);
+ struct lock_class_key *k;
+ struct pending_free *pf;
+ unsigned long flags;
+ bool found = false;
+
+ might_sleep();
+
+ if (WARN_ON_ONCE(static_obj(key)))
+ return;
+
+ raw_local_irq_save(flags);
+ if (!graph_lock())
+ goto out_irq;
+
+ pf = get_pending_free();
+ hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+ if (k == key) {
+ hlist_del_rcu(&k->hash_entry);
+ found = true;
+ break;
}
- goto out_restore;
}
- if (locked)
- graph_unlock();
-
-out_restore:
+ WARN_ON_ONCE(!found);
+ __lockdep_free_key_range(pf, key, 1);
+ call_rcu_zapped(pf);
+ graph_unlock();
+out_irq:
raw_local_irq_restore(flags);
+
+ /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+ synchronize_rcu();
}
+EXPORT_SYMBOL_GPL(lockdep_unregister_key);
void __init lockdep_init(void)
{
@@ -4278,20 +4937,24 @@ void __init lockdep_init(void)
printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
- printk(" memory used by lock dependency info: %lu kB\n",
- (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
- sizeof(struct list_head) * CLASSHASH_SIZE +
- sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
- sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
- sizeof(struct list_head) * CHAINHASH_SIZE
+ printk(" memory used by lock dependency info: %zu kB\n",
+ (sizeof(lock_classes) +
+ sizeof(classhash_table) +
+ sizeof(list_entries) +
+ sizeof(list_entries_in_use) +
+ sizeof(chainhash_table) +
+ sizeof(delayed_free)
#ifdef CONFIG_PROVE_LOCKING
- + sizeof(struct circular_queue)
+ + sizeof(lock_cq)
+ + sizeof(lock_chains)
+ + sizeof(lock_chains_in_use)
+ + sizeof(chain_hlocks)
#endif
) / 1024
);
- printk(" per task-struct memory footprint: %lu bytes\n",
- sizeof(struct held_lock) * MAX_LOCK_DEPTH);
+ printk(" per task-struct memory footprint: %zu bytes\n",
+ sizeof(((struct task_struct *)NULL)->held_locks));
}
static void
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 88c847a41c8a..d4c197425f68 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -22,6 +22,10 @@ enum lock_usage_bit {
LOCK_USAGE_STATES
};
+#define LOCK_USAGE_READ_MASK 1
+#define LOCK_USAGE_DIR_MASK 2
+#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
+
/*
* Usage-state bitmasks:
*/
@@ -96,7 +100,8 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
extern unsigned long nr_lock_classes;
extern unsigned long nr_list_entries;
-extern unsigned long nr_lock_chains;
+long lockdep_next_lockchain(long i);
+unsigned long lock_chain_count(void);
extern int nr_chain_hlocks;
extern unsigned long nr_stack_trace_entries;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 3d31f9b0059e..9c49ec645d8b 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -104,18 +104,18 @@ static const struct seq_operations lockdep_ops = {
#ifdef CONFIG_PROVE_LOCKING
static void *lc_start(struct seq_file *m, loff_t *pos)
{
+ if (*pos < 0)
+ return NULL;
+
if (*pos == 0)
return SEQ_START_TOKEN;
- if (*pos - 1 < nr_lock_chains)
- return lock_chains + (*pos - 1);
-
- return NULL;
+ return lock_chains + (*pos - 1);
}
static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
{
- (*pos)++;
+ *pos = lockdep_next_lockchain(*pos - 1) + 1;
return lc_start(m, pos);
}
@@ -268,7 +268,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
- nr_lock_chains, MAX_LOCKDEP_CHAINS);
+ lock_chain_count(), MAX_LOCKDEP_CHAINS);
seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
#endif
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 7d0b0ed74404..ad40a2617063 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Module-based torture test facility for locking
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2014
*
- * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
* Davidlohr Bueso <dave@stgolabs.net>
* Based on kernel/rcu/torture.c.
*/
@@ -45,7 +32,7 @@
#include <linux/torture.h>
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
torture_param(int, nwriters_stress, -1,
"Number of write-locking stress-test threads");
@@ -970,7 +957,7 @@ static int __init lock_torture_init(void)
/* Prepare torture context. */
if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ,
- onoff_interval * HZ);
+ onoff_interval * HZ, NULL);
if (firsterr)
goto unwind;
}
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 8a8c3c208c5e..5e9247dc2515 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -124,9 +124,6 @@ static inline __pure u32 encode_tail(int cpu, int idx)
{
u32 tail;
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(idx > 3);
-#endif
tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
@@ -412,12 +409,28 @@ pv_queue:
idx = node->count++;
tail = encode_tail(smp_processor_id(), idx);
+ /*
+ * 4 nodes are allocated based on the assumption that there will
+ * not be nested NMIs taking spinlocks. That may not be true in
+ * some architectures even though the chance of needing more than
+ * 4 nodes will still be extremely unlikely. When that happens,
+ * we fall back to spinning on the lock directly without using
+ * any MCS node. This is not the most elegant solution, but is
+ * simple enough.
+ */
+ if (unlikely(idx >= MAX_NODES)) {
+ qstat_inc(qstat_lock_no_node, true);
+ while (!queued_spin_trylock(lock))
+ cpu_relax();
+ goto release;
+ }
+
node = grab_mcs_node(node, idx);
/*
* Keep counts of non-zero index values:
*/
- qstat_inc(qstat_lock_idx1 + idx - 1, idx);
+ qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
/*
* Ensure that we increment the head node->count before initialising
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index 42d3d8dc8f49..d73f85388d5c 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -30,6 +30,13 @@
* pv_wait_node - # of vCPU wait's at a non-head queue node
* lock_pending - # of locking operations via pending code
* lock_slowpath - # of locking operations via MCS lock queue
+ * lock_use_node2 - # of locking operations that use 2nd per-CPU node
+ * lock_use_node3 - # of locking operations that use 3rd per-CPU node
+ * lock_use_node4 - # of locking operations that use 4th per-CPU node
+ * lock_no_node - # of locking operations without using per-CPU node
+ *
+ * Subtracting lock_use_node[234] from lock_slowpath will give you
+ * lock_use_node1.
*
* Writing to the "reset_counters" file will reset all the above counter
* values.
@@ -55,9 +62,10 @@ enum qlock_stats {
qstat_pv_wait_node,
qstat_lock_pending,
qstat_lock_slowpath,
- qstat_lock_idx1,
- qstat_lock_idx2,
- qstat_lock_idx3,
+ qstat_lock_use_node2,
+ qstat_lock_use_node3,
+ qstat_lock_use_node4,
+ qstat_lock_no_node,
qstat_num, /* Total number of statistical counters */
qstat_reset_cnts = qstat_num,
};
@@ -85,9 +93,10 @@ static const char * const qstat_names[qstat_num + 1] = {
[qstat_pv_wait_node] = "pv_wait_node",
[qstat_lock_pending] = "lock_pending",
[qstat_lock_slowpath] = "lock_slowpath",
- [qstat_lock_idx1] = "lock_index1",
- [qstat_lock_idx2] = "lock_index2",
- [qstat_lock_idx3] = "lock_index3",
+ [qstat_lock_use_node2] = "lock_use_node2",
+ [qstat_lock_use_node3] = "lock_use_node3",
+ [qstat_lock_use_node4] = "lock_use_node4",
+ [qstat_lock_no_node] = "lock_no_node",
[qstat_reset_cnts] = "reset_counters",
};
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 581edcc63c26..978d63a8261c 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
rt_mutex_set_owner(lock, NULL);
}
+/**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock: the rt_mutex to take
+ * @waiter: the pre-initialized rt_mutex_waiter
+ * @task: the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
+ *
+ * Returns:
+ * 0 - task blocked on lock
+ * 1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
int ret;
+ lockdep_assert_held(&lock->wait_lock);
+
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
ret = 0;
}
- if (unlikely(ret))
- remove_waiter(lock, waiter);
-
debug_rt_mutex_print_deadlock(waiter);
return ret;
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
*
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
+ * on failure.
+ *
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
- * Special API call for FUTEX_REQUEUE_PI support.
+ * Special API call for PI-futex support.
*/
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
raw_spin_lock_irq(&lock->wait_lock);
ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+ if (unlikely(ret))
+ remove_waiter(lock, waiter);
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
* @lock: the rt_mutex we were woken on
* @waiter: the pre-initialized rt_mutex_waiter
*
- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
+ * rt_mutex_wait_proxy_lock().
*
* Unless we acquired the lock; we're still enqueued on the wait-list and can
* in fact still be granted ownership until we're removed. Therefore we can
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 09b180063ee1..fbe96341beee 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -198,15 +198,20 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
woken++;
tsk = waiter->task;
- wake_q_add(wake_q, tsk);
+ get_task_struct(tsk);
list_del(&waiter->list);
/*
- * Ensure that the last operation is setting the reader
+ * Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
* race with do_exit() by always holding a reference count
* to the task to wakeup.
*/
smp_store_release(&waiter->task, NULL);
+ /*
+ * Ensure issuing the wakeup (either by us or someone else)
+ * after setting the reader waiter to nil.
+ */
+ wake_q_add_safe(wake_q, tsk);
}
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index d9dc2c38764a..7d66ee68aaaf 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -10,6 +10,7 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/debugfs.h>
#include <linux/energy_model.h>
#include <linux/sched/topology.h>
#include <linux/slab.h>
@@ -23,6 +24,60 @@ static DEFINE_PER_CPU(struct em_perf_domain *, em_data);
*/
static DEFINE_MUTEX(em_pd_mutex);
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *rootdir;
+
+static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
+{
+ struct dentry *d;
+ char name[24];
+
+ snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
+
+ /* Create per-cs directory */
+ d = debugfs_create_dir(name, pd);
+ debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
+ debugfs_create_ulong("power", 0444, d, &cs->power);
+ debugfs_create_ulong("cost", 0444, d, &cs->cost);
+}
+
+static int em_debug_cpus_show(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
+
+static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
+{
+ struct dentry *d;
+ char name[8];
+ int i;
+
+ snprintf(name, sizeof(name), "pd%d", cpu);
+
+ /* Create the directory of the performance domain */
+ d = debugfs_create_dir(name, rootdir);
+
+ debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
+
+ /* Create a sub-directory for each capacity state */
+ for (i = 0; i < pd->nr_cap_states; i++)
+ em_debug_create_cs(&pd->table[i], d);
+}
+
+static int __init em_debug_init(void)
+{
+ /* Create /sys/kernel/debug/energy_model directory */
+ rootdir = debugfs_create_dir("energy_model", NULL);
+
+ return 0;
+}
+core_initcall(em_debug_init);
+#else /* CONFIG_DEBUG_FS */
+static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
+#endif
static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
struct em_data_callback *cb)
{
@@ -102,6 +157,8 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
pd->nr_cap_states = nr_states;
cpumask_copy(to_cpumask(pd->cpus), span);
+ em_debug_create_pd(pd, cpu);
+
return pd;
free_cs_table:
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index b7a82502857a..9d22131afc1e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -582,10 +582,8 @@ static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
qos->pm_qos_power_miscdev.name = qos->name;
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
- if (d) {
- (void)debugfs_create_file(qos->name, S_IRUGO, d,
- (void *)qos, &pm_qos_debug_fops);
- }
+ debugfs_create_file(qos->name, S_IRUGO, d, (void *)qos,
+ &pm_qos_debug_fops);
return misc_register(&qos->pm_qos_power_miscdev);
}
@@ -685,8 +683,6 @@ static int __init pm_qos_power_init(void)
BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
d = debugfs_create_dir("pm_qos", NULL);
- if (IS_ERR_OR_NULL(d))
- d = NULL;
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
ret = register_pm_qos_misc(pm_qos_array[i], d);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 640b2034edd6..4802b039b89f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1215,14 +1215,16 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
if (!pfn_valid(pfn))
return NULL;
- page = pfn_to_page(pfn);
- if (page_zone(page) != zone)
+ page = pfn_to_online_page(pfn);
+ if (!page || page_zone(page) != zone)
return NULL;
BUG_ON(!PageHighMem(page));
- if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
- PageReserved(page))
+ if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
+ return NULL;
+
+ if (PageReserved(page) || PageOffline(page))
return NULL;
if (page_is_guard(page))
@@ -1277,8 +1279,8 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
if (!pfn_valid(pfn))
return NULL;
- page = pfn_to_page(pfn);
- if (page_zone(page) != zone)
+ page = pfn_to_online_page(pfn);
+ if (!page || page_zone(page) != zone)
return NULL;
BUG_ON(PageHighMem(page));
@@ -1286,6 +1288,9 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
return NULL;
+ if (PageOffline(page))
+ return NULL;
+
if (PageReserved(page)
&& (!kernel_page_present(page) || pfn_is_nosave(pfn)))
return NULL;
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index 939a2056c87a..37301430970e 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -87,36 +87,6 @@ config RCU_STALL_COMMON
config RCU_NEED_SEGCBLIST
def_bool ( TREE_RCU || PREEMPT_RCU || TREE_SRCU )
-config CONTEXT_TRACKING
- bool
-
-config CONTEXT_TRACKING_FORCE
- bool "Force context tracking"
- depends on CONTEXT_TRACKING
- default y if !NO_HZ_FULL
- help
- The major pre-requirement for full dynticks to work is to
- support the context tracking subsystem. But there are also
- other dependencies to provide in order to make the full
- dynticks working.
-
- This option stands for testing when an arch implements the
- context tracking backend but doesn't yet fullfill all the
- requirements to make the full dynticks feature working.
- Without the full dynticks, there is no way to test the support
- for context tracking and the subsystems that rely on it: RCU
- userspace extended quiescent state and tickless cputime
- accounting. This option copes with the absence of the full
- dynticks subsystem by forcing the context tracking on all
- CPUs in the system.
-
- Say Y only if you're working on the development of an
- architecture backend for the context tracking.
-
- Say N otherwise, this option brings an overhead that you
- don't want in production.
-
-
config RCU_FANOUT
int "Tree-based hierarchical RCU fanout value"
range 2 64 if 64BIT
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index a393e24a9195..acee72c0b24b 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update definitions shared among RCU implementations.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2011
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#ifndef __LINUX_RCU_H
@@ -30,7 +17,7 @@
#define RCU_TRACE(stmt)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
-/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
@@ -462,8 +449,6 @@ void rcu_request_urgent_qs_task(struct task_struct *t);
enum rcutorture_type {
RCU_FLAVOR,
- RCU_BH_FLAVOR,
- RCU_SCHED_FLAVOR,
RCU_TASKS_FLAVOR,
SRCU_FLAVOR,
INVALID_RCU_FLAVOR
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
index 5aff271adf1e..9bd5f6023c21 100644
--- a/kernel/rcu/rcu_segcblist.c
+++ b/kernel/rcu/rcu_segcblist.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* RCU segmented callback lists, function definitions
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2017
*
- * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#include <linux/types.h>
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
index 948470cef385..71b64648464e 100644
--- a/kernel/rcu/rcu_segcblist.h
+++ b/kernel/rcu/rcu_segcblist.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU segmented callback lists, internal-to-rcu header file
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2017
*
- * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#include <linux/rcu_segcblist.h>
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index b459da70b4fc..c29761152874 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Read-Copy Update module-based performance-test facility
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2015
*
- * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#define pr_fmt(fmt) fmt
@@ -54,7 +41,7 @@
#include "rcu.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
#define PERF_FLAG "-perf:"
#define PERFOUT_STRING(s) \
@@ -83,13 +70,19 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
* Various other use cases may of course be specified.
*/
+#ifdef MODULE
+# define RCUPERF_SHUTDOWN 0
+#else
+# define RCUPERF_SHUTDOWN 1
+#endif
+
torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, nwriters, -1, "Number of RCU updater threads");
-torture_param(bool, shutdown, !IS_ENABLED(MODULE),
+torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
"Shutdown at end of performance tests.");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index f6e85faa4ff4..f14d1b18a74f 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Read-Copy Update module-based torture test facility
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2005, 2006
*
- * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
* Josh Triplett <josh@joshtriplett.org>
*
* See also: Documentation/RCU/torture.txt
@@ -61,7 +48,7 @@
#include "rcu.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
/* Bits for ->extendables field, extendables param, and related definitions. */
@@ -1630,21 +1617,34 @@ static bool rcu_fwd_emergency_stop;
#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
-static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)];
+struct rcu_launder_hist {
+ long n_launders;
+ unsigned long launder_gp_seq;
+};
+#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
+static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
+static unsigned long rcu_launder_gp_seq_start;
static void rcu_torture_fwd_cb_hist(void)
{
+ unsigned long gps;
+ unsigned long gps_old;
int i;
int j;
for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
- if (n_launders_hist[i] > 0)
+ if (n_launders_hist[i].n_launders > 0)
break;
pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
__func__, jiffies - rcu_fwd_startat);
- for (j = 0; j <= i; j++)
- pr_cont(" %ds/%d: %ld",
- j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]);
+ gps_old = rcu_launder_gp_seq_start;
+ for (j = 0; j <= i; j++) {
+ gps = n_launders_hist[j].launder_gp_seq;
+ pr_cont(" %ds/%d: %ld:%ld",
+ j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders,
+ rcutorture_seq_diff(gps, gps_old));
+ gps_old = gps;
+ }
pr_cont("\n");
}
@@ -1666,7 +1666,8 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
if (i >= ARRAY_SIZE(n_launders_hist))
i = ARRAY_SIZE(n_launders_hist) - 1;
- n_launders_hist[i]++;
+ n_launders_hist[i].n_launders++;
+ n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
}
@@ -1786,9 +1787,10 @@ static void rcu_torture_fwd_prog_cr(void)
n_max_cbs = 0;
n_max_gps = 0;
for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
- n_launders_hist[i] = 0;
+ n_launders_hist[i].n_launders = 0;
cver = READ_ONCE(rcu_torture_current_version);
gps = cur_ops->get_gp_seq();
+ rcu_launder_gp_seq_start = gps;
while (time_before(jiffies, stopat) &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
rfcp = READ_ONCE(rcu_fwd_cb_head);
@@ -2228,6 +2230,14 @@ static void rcu_test_debug_objects(void)
#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
}
+static void rcutorture_sync(void)
+{
+ static unsigned long n;
+
+ if (cur_ops->sync && !(++n & 0xfff))
+ cur_ops->sync();
+}
+
static int __init
rcu_torture_init(void)
{
@@ -2389,7 +2399,8 @@ rcu_torture_init(void)
firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
if (firsterr)
goto unwind;
- firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
+ firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
+ rcutorture_sync);
if (firsterr)
goto unwind;
firsterr = rcu_torture_stall_init();
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 32dfd6522548..5d4a39a6505a 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -1,24 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Sleepable Read-Copy Update mechanism for mutual exclusion,
* tiny version for non-preemptible single-CPU use.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2017
*
- * Author: Paul McKenney <paulmck@us.ibm.com>
+ * Author: Paul McKenney <paulmck@linux.ibm.com>
*/
#include <linux/export.h>
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3600d88d8956..a60b8ba9e1ac 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -1,24 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Sleepable Read-Copy Update mechanism for mutual exclusion.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2006
* Copyright (C) Fujitsu, 2012
*
- * Author: Paul McKenney <paulmck@us.ibm.com>
+ * Author: Paul McKenney <paulmck@linux.ibm.com>
* Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
@@ -58,6 +45,7 @@ static bool __read_mostly srcu_init_done;
static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
static void process_srcu(struct work_struct *work);
+static void srcu_delay_timer(struct timer_list *t);
/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
#define spin_lock_rcu_node(p) \
@@ -156,7 +144,8 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
snp->grphi = cpu;
}
sdp->cpu = cpu;
- INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
+ INIT_WORK(&sdp->work, srcu_invoke_callbacks);
+ timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
sdp->ssp = ssp;
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
if (is_static)
@@ -386,13 +375,19 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
} else {
flush_delayed_work(&ssp->work);
}
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
+
if (quiesced) {
- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work)))
+ if (WARN_ON(timer_pending(&sdp->delay_work)))
+ return; /* Just leak it! */
+ if (WARN_ON(work_pending(&sdp->work)))
return; /* Just leak it! */
} else {
- flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work);
+ del_timer_sync(&sdp->delay_work);
+ flush_work(&sdp->work);
}
+ }
if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
WARN_ON(srcu_readers_active(ssp))) {
pr_info("%s: Active srcu_struct %p state: %d\n",
@@ -463,39 +458,23 @@ static void srcu_gp_start(struct srcu_struct *ssp)
WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
}
-/*
- * Track online CPUs to guide callback workqueue placement.
- */
-DEFINE_PER_CPU(bool, srcu_online);
-void srcu_online_cpu(unsigned int cpu)
+static void srcu_delay_timer(struct timer_list *t)
{
- WRITE_ONCE(per_cpu(srcu_online, cpu), true);
-}
+ struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
-void srcu_offline_cpu(unsigned int cpu)
-{
- WRITE_ONCE(per_cpu(srcu_online, cpu), false);
+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
}
-/*
- * Place the workqueue handler on the specified CPU if online, otherwise
- * just run it whereever. This is useful for placing workqueue handlers
- * that are to invoke the specified CPU's callbacks.
- */
-static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct delayed_work *dwork,
+static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
unsigned long delay)
{
- bool ret;
+ if (!delay) {
+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
+ return;
+ }
- preempt_disable();
- if (READ_ONCE(per_cpu(srcu_online, cpu)))
- ret = queue_delayed_work_on(cpu, wq, dwork, delay);
- else
- ret = queue_delayed_work(wq, dwork, delay);
- preempt_enable();
- return ret;
+ timer_reduce(&sdp->delay_work, jiffies + delay);
}
/*
@@ -504,7 +483,7 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
*/
static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
{
- srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
+ srcu_queue_delayed_work_on(sdp, delay);
}
/*
@@ -1186,7 +1165,8 @@ static void srcu_invoke_callbacks(struct work_struct *work)
struct srcu_data *sdp;
struct srcu_struct *ssp;
- sdp = container_of(work, struct srcu_data, work.work);
+ sdp = container_of(work, struct srcu_data, work);
+
ssp = sdp->ssp;
rcu_cblist_init(&ready_cbs);
spin_lock_irq_rcu_node(sdp);
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index be10036fa621..a8304d90573f 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* RCU-based infrastructure for lightweight reader-writer locking
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (c) 2015, Red Hat, Inc.
*
* Author: Oleg Nesterov <oleg@redhat.com>
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 5f5963ba313e..911bd9076d43 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2008
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
@@ -76,7 +63,7 @@ void rcu_qs(void)
* be called from hardirq context. It is normally called from the
* scheduling-clock interrupt.
*/
-void rcu_check_callbacks(int user)
+void rcu_sched_clock_irq(int user)
{
if (user) {
rcu_qs();
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9180158756d2..acd6ccf56faf 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1,27 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Read-Copy Update mechanism for mutual exclusion
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2008
*
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
* Manfred Spraul <manfred@colorfullife.com>
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
+ * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical version
*
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
*
* For detailed explanation of Read-Copy Update mechanism see -
@@ -62,6 +49,8 @@
#include <linux/suspend.h>
#include <linux/ftrace.h>
#include <linux/tick.h>
+#include <linux/sysrq.h>
+#include <linux/kprobes.h>
#include "tree.h"
#include "rcu.h"
@@ -115,6 +104,9 @@ int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
/* panic() on RCU Stall sysctl. */
int sysctl_panic_on_rcu_stall __read_mostly;
+/* Commandeer a sysrq key to dump RCU's tree. */
+static bool sysrq_rcu;
+module_param(sysrq_rcu, bool, 0444);
/*
* The rcu_scheduler_active variable is initialized to the value
@@ -479,7 +471,6 @@ module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next
module_param(rcu_kick_kthreads, bool, 0644);
static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
-static void force_quiescent_state(void);
static int rcu_pending(void);
/*
@@ -504,13 +495,12 @@ unsigned long rcu_exp_batches_completed(void)
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
/*
- * Force a quiescent state.
+ * Return the root node of the rcu_state structure.
*/
-void rcu_force_quiescent_state(void)
+static struct rcu_node *rcu_get_root(void)
{
- force_quiescent_state();
+ return &rcu_state.node[0];
}
-EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
* Convert a ->gp_state value to a character string.
@@ -529,19 +519,30 @@ void show_rcu_gp_kthreads(void)
{
int cpu;
unsigned long j;
+ unsigned long ja;
+ unsigned long jr;
+ unsigned long jw;
struct rcu_data *rdp;
struct rcu_node *rnp;
- j = jiffies - READ_ONCE(rcu_state.gp_activity);
- pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n",
+ j = jiffies;
+ ja = j - READ_ONCE(rcu_state.gp_activity);
+ jr = j - READ_ONCE(rcu_state.gp_req_activity);
+ jw = j - READ_ONCE(rcu_state.gp_wake_time);
+ pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state),
- rcu_state.gp_state, rcu_state.gp_kthread->state, j);
+ rcu_state.gp_state,
+ rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
+ ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
+ (long)READ_ONCE(rcu_state.gp_seq),
+ (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
+ READ_ONCE(rcu_state.gp_flags));
rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
continue;
- pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
- rnp->grplo, rnp->grphi, rnp->gp_seq,
- rnp->gp_seq_needed);
+ pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
+ rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
+ (long)rnp->gp_seq_needed);
if (!rcu_is_leaf_node(rnp))
continue;
for_each_leaf_node_possible_cpu(rnp, cpu) {
@@ -550,14 +551,35 @@ void show_rcu_gp_kthreads(void)
ULONG_CMP_GE(rcu_state.gp_seq,
rdp->gp_seq_needed))
continue;
- pr_info("\tcpu %d ->gp_seq_needed %lu\n",
- cpu, rdp->gp_seq_needed);
+ pr_info("\tcpu %d ->gp_seq_needed %ld\n",
+ cpu, (long)rdp->gp_seq_needed);
}
}
/* sched_show_task(rcu_state.gp_kthread); */
}
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
+/* Dump grace-period-request information due to commandeered sysrq. */
+static void sysrq_show_rcu(int key)
+{
+ show_rcu_gp_kthreads();
+}
+
+static struct sysrq_key_op sysrq_rcudump_op = {
+ .handler = sysrq_show_rcu,
+ .help_msg = "show-rcu(y)",
+ .action_msg = "Show RCU tree",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init rcu_sysrq_init(void)
+{
+ if (sysrq_rcu)
+ return register_sysrq_key('y', &sysrq_rcudump_op);
+ return 0;
+}
+early_initcall(rcu_sysrq_init);
+
/*
* Send along grace-period-related data for rcutorture diagnostics.
*/
@@ -566,8 +588,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
{
switch (test_type) {
case RCU_FLAVOR:
- case RCU_BH_FLAVOR:
- case RCU_SCHED_FLAVOR:
*flags = READ_ONCE(rcu_state.gp_flags);
*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
break;
@@ -578,14 +598,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
/*
- * Return the root node of the rcu_state structure.
- */
-static struct rcu_node *rcu_get_root(void)
-{
- return &rcu_state.node[0];
-}
-
-/*
* Enter an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
*
@@ -701,7 +713,6 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
/**
* rcu_nmi_exit - inform RCU of exit from NMI context
- * @irq: Is this call from rcu_irq_exit?
*
* If you add or remove a call to rcu_nmi_exit(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
@@ -872,6 +883,7 @@ void rcu_nmi_enter(void)
{
rcu_nmi_enter_common(false);
}
+NOKPROBE_SYMBOL(rcu_nmi_enter);
/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -1115,7 +1127,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
}
/*
- * NO_HZ_FULL CPUs can run in-kernel without rcu_check_callbacks!
+ * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
* The above code handles this, but only for straight cond_resched().
* And some in-kernel loops check need_resched() before calling
* cond_resched(), which defeats the above code for CPUs that are
@@ -1181,7 +1193,7 @@ static void rcu_check_gp_kthread_starvation(void)
pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
rcu_state.name, j,
(long)rcu_seq_current(&rcu_state.gp_seq),
- rcu_state.gp_flags,
+ READ_ONCE(rcu_state.gp_flags),
gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
if (gpk) {
@@ -1310,7 +1322,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
panic_on_rcu_stall();
- force_quiescent_state(); /* Kick them all. */
+ rcu_force_quiescent_state(); /* Kick them all. */
}
static void print_cpu_stall(void)
@@ -1557,17 +1569,28 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
}
/*
- * Awaken the grace-period kthread. Don't do a self-awaken, and don't
- * bother awakening when there is nothing for the grace-period kthread
- * to do (as in several CPUs raced to awaken, and we lost), and finally
- * don't try to awaken a kthread that has not yet been created.
+ * Awaken the grace-period kthread. Don't do a self-awaken (unless in
+ * an interrupt or softirq handler), and don't bother awakening when there
+ * is nothing for the grace-period kthread to do (as in several CPUs raced
+ * to awaken, and we lost), and finally don't try to awaken a kthread that
+ * has not yet been created. If all those checks are passed, track some
+ * debug information and awaken.
+ *
+ * So why do the self-wakeup when in an interrupt or softirq handler
+ * in the grace-period kthread's context? Because the kthread might have
+ * been interrupted just as it was going to sleep, and just after the final
+ * pre-sleep check of the awaken condition. In this case, a wakeup really
+ * is required, and is therefore supplied.
*/
static void rcu_gp_kthread_wake(void)
{
- if (current == rcu_state.gp_kthread ||
+ if ((current == rcu_state.gp_kthread &&
+ !in_interrupt() && !in_serving_softirq()) ||
!READ_ONCE(rcu_state.gp_flags) ||
!rcu_state.gp_kthread)
return;
+ WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
+ WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
swake_up_one(&rcu_state.gp_wq);
}
@@ -1711,7 +1734,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
zero_cpu_stall_ticks(rdp);
}
rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
- if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
+ if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
rdp->gp_seq_needed = rnp->gp_seq_needed;
WRITE_ONCE(rdp->gpwrap, false);
rcu_gpnum_ovf(rnp, rdp);
@@ -1939,7 +1962,7 @@ static void rcu_gp_fqs_loop(void)
if (!ret) {
rcu_state.jiffies_force_qs = jiffies + j;
WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
- jiffies + 3 * j);
+ jiffies + (j ? 3 * j : 2));
}
trace_rcu_grace_period(rcu_state.name,
READ_ONCE(rcu_state.gp_seq),
@@ -2497,14 +2520,14 @@ static void rcu_do_batch(struct rcu_data *rdp)
}
/*
- * Check to see if this CPU is in a non-context-switch quiescent state
- * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
- * Also schedule RCU core processing.
- *
- * This function must be called from hardirq context. It is normally
- * invoked from the scheduling-clock interrupt.
+ * This function is invoked from each scheduling-clock interrupt,
+ * and checks to see if this CPU is in a non-context-switch quiescent
+ * state, for example, user mode or idle loop. It also schedules RCU
+ * core processing. If the current grace period has gone on too long,
+ * it will ask the scheduler to manufacture a context switch for the sole
+ * purpose of providing a providing the needed quiescent state.
*/
-void rcu_check_callbacks(int user)
+void rcu_sched_clock_irq(int user)
{
trace_rcu_utilization(TPS("Start scheduler-tick"));
raw_cpu_inc(rcu_data.ticks_this_gp);
@@ -2517,7 +2540,7 @@ void rcu_check_callbacks(int user)
}
__this_cpu_write(rcu_data.rcu_urgent_qs, false);
}
- rcu_flavor_check_callbacks(user);
+ rcu_flavor_sched_clock_irq(user);
if (rcu_pending())
invoke_rcu_core();
@@ -2578,7 +2601,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
* Force quiescent states on reluctant CPUs, and also detect which
* CPUs are in dyntick-idle mode.
*/
-static void force_quiescent_state(void)
+void rcu_force_quiescent_state(void)
{
unsigned long flags;
bool ret;
@@ -2610,6 +2633,7 @@ static void force_quiescent_state(void)
raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
rcu_gp_kthread_wake();
}
+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
* This function checks for grace-period requests that fail to motivate
@@ -2657,16 +2681,11 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
- pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
- __func__, (long)READ_ONCE(rcu_state.gp_seq),
- (long)READ_ONCE(rnp_root->gp_seq_needed),
- j - rcu_state.gp_req_activity, j - rcu_state.gp_activity,
- rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name,
- rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL);
WARN_ON(1);
if (rnp_root != rnp)
raw_spin_unlock_rcu_node(rnp_root);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ show_rcu_gp_kthreads();
}
/*
@@ -2711,12 +2730,8 @@ void rcu_fwd_progress_check(unsigned long j)
}
EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
-/*
- * This does the RCU core processing work for the specified rcu_data
- * structures. This may be called only from the CPU to whom the rdp
- * belongs.
- */
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
+/* Perform RCU core processing work for the current CPU. */
+static __latent_entropy void rcu_core(struct softirq_action *unused)
{
unsigned long flags;
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
@@ -2801,9 +2816,9 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
/*
* Force the grace period if too many callbacks or too long waiting.
- * Enforce hysteresis, and don't invoke force_quiescent_state()
+ * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
* if some other CPU has recently done so. Also, don't bother
- * invoking force_quiescent_state() if the newly enqueued callback
+ * invoking rcu_force_quiescent_state() if the newly enqueued callback
* is the only one waiting for a grace period to complete.
*/
if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
@@ -2820,7 +2835,7 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
rdp->blimit = LONG_MAX;
if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
- force_quiescent_state();
+ rcu_force_quiescent_state();
rdp->n_force_qs_snap = rcu_state.n_force_qs;
rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
}
@@ -2889,9 +2904,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
rcu_segcblist_init(&rdp->cblist);
}
rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
- if (!lazy)
- rcu_idle_count_callbacks_posted();
-
if (__is_kfree_rcu_offset((unsigned long)func))
trace_rcu_kfree_callback(rcu_state.name, head,
(unsigned long)func,
@@ -2961,6 +2973,79 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);
+/*
+ * During early boot, any blocking grace-period wait automatically
+ * implies a grace period. Later on, this is never the case for PREEMPT.
+ *
+ * Howevr, because a context switch is a grace period for !PREEMPT, any
+ * blocking grace-period wait automatically implies a grace period if
+ * there is only one CPU online at any point time during execution of
+ * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
+ * occasionally incorrectly indicate that there are multiple CPUs online
+ * when there was in fact only one the whole time, as this just adds some
+ * overhead: RCU still operates correctly.
+ */
+static int rcu_blocking_is_gp(void)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PREEMPT))
+ return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
+ might_sleep(); /* Check for RCU read-side critical section. */
+ preempt_disable();
+ ret = num_online_cpus() <= 1;
+ preempt_enable();
+ return ret;
+}
+
+/**
+ * synchronize_rcu - wait until a grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full grace
+ * period has elapsed, in other words after all currently executing RCU
+ * read-side critical sections have completed. Note, however, that
+ * upon return from synchronize_rcu(), the caller might well be executing
+ * concurrently with new RCU read-side critical sections that began while
+ * synchronize_rcu() was waiting. RCU read-side critical sections are
+ * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
+ * In addition, regions of code across which interrupts, preemption, or
+ * softirqs have been disabled also serve as RCU read-side critical
+ * sections. This includes hardware interrupt handlers, softirq handlers,
+ * and NMI handlers.
+ *
+ * Note that this guarantee implies further memory-ordering guarantees.
+ * On systems with more than one CPU, when synchronize_rcu() returns,
+ * each CPU is guaranteed to have executed a full memory barrier since
+ * the end of its last RCU read-side critical section whose beginning
+ * preceded the call to synchronize_rcu(). In addition, each CPU having
+ * an RCU read-side critical section that extends beyond the return from
+ * synchronize_rcu() is guaranteed to have executed a full memory barrier
+ * after the beginning of synchronize_rcu() and before the beginning of
+ * that RCU read-side critical section. Note that these guarantees include
+ * CPUs that are offline, idle, or executing in user mode, as well as CPUs
+ * that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked synchronize_rcu(), which returned
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
+ * to have executed a full memory barrier during the execution of
+ * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
+ * again only if the system has more than one CPU).
+ */
+void synchronize_rcu(void)
+{
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_rcu() in RCU read-side critical section");
+ if (rcu_blocking_is_gp())
+ return;
+ if (rcu_gp_is_expedited())
+ synchronize_rcu_expedited();
+ else
+ wait_rcu_gp(call_rcu);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu);
+
/**
* get_state_synchronize_rcu - Snapshot current RCU state
*
@@ -3049,28 +3134,6 @@ static int rcu_pending(void)
}
/*
- * Return true if the specified CPU has any callback. If all_lazy is
- * non-NULL, store an indication of whether all callbacks are lazy.
- * (If there are no callbacks, all of them are deemed to be lazy.)
- */
-static bool rcu_cpu_has_callbacks(bool *all_lazy)
-{
- bool al = true;
- bool hc = false;
- struct rcu_data *rdp;
-
- rdp = this_cpu_ptr(&rcu_data);
- if (!rcu_segcblist_empty(&rdp->cblist)) {
- hc = true;
- if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist))
- al = false;
- }
- if (all_lazy)
- *all_lazy = al;
- return hc;
-}
-
-/*
* Helper function for rcu_barrier() tracing. If tracing is disabled,
* the compiler is expected to optimize this away.
*/
@@ -3299,7 +3362,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcu_prepare_kthreads(cpu);
- rcu_spawn_all_nocb_kthreads(cpu);
+ rcu_spawn_cpu_nocb_kthread(cpu);
return 0;
}
@@ -3329,8 +3392,6 @@ int rcutree_online_cpu(unsigned int cpu)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->ffmask |= rdp->grpmask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- if (IS_ENABLED(CONFIG_TREE_SRCU))
- srcu_online_cpu(cpu);
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return 0; /* Too early in boot for scheduler work. */
sync_sched_exp_online_cleanup(cpu);
@@ -3355,8 +3416,6 @@ int rcutree_offline_cpu(unsigned int cpu)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcutree_affinity_setting(cpu, cpu);
- if (IS_ENABLED(CONFIG_TREE_SRCU))
- srcu_offline_cpu(cpu);
return 0;
}
@@ -3777,7 +3836,7 @@ void __init rcu_init(void)
rcu_init_one();
if (dump_tree)
rcu_dump_rcu_node_tree();
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+ open_softirq(RCU_SOFTIRQ, rcu_core);
/*
* We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d90b02b53c0e..bb4f995f2d3f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -1,25 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
* Internal non-public definitions.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2008
*
* Author: Ingo Molnar <mingo@elte.hu>
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Paul E. McKenney <paulmck@linux.ibm.com>
*/
#include <linux/cache.h>
@@ -36,7 +23,6 @@
/* Communicate arguments to a workqueue handler. */
struct rcu_exp_work {
- smp_call_func_t rew_func;
unsigned long rew_s;
struct work_struct rew_work;
};
@@ -194,10 +180,7 @@ struct rcu_data {
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
bool rcu_urgent_qs; /* GP old need light quiescent state. */
#ifdef CONFIG_RCU_FAST_NO_HZ
- bool all_lazy; /* Are all CPU's CBs lazy? */
- unsigned long nonlazy_posted; /* # times non-lazy CB posted to CPU. */
- unsigned long nonlazy_posted_snap;
- /* Nonlazy_posted snapshot. */
+ bool all_lazy; /* All CPU's CBs lazy at idle start? */
unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
@@ -234,7 +217,13 @@ struct rcu_data {
/* Leader CPU takes GP-end wakeups. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- /* 6) Diagnostic data, including RCU CPU stall warnings. */
+ /* 6) RCU priority boosting. */
+ struct task_struct *rcu_cpu_kthread_task;
+ /* rcuc per-CPU kthread or NULL. */
+ unsigned int rcu_cpu_kthread_status;
+ char rcu_cpu_has_work;
+
+ /* 7) Diagnostic data, including RCU CPU stall warnings. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */
@@ -303,6 +292,8 @@ struct rcu_state {
struct swait_queue_head gp_wq; /* Where GP task waits. */
short gp_flags; /* Commands for GP task. */
short gp_state; /* GP kthread sleep state. */
+ unsigned long gp_wake_time; /* Last GP kthread wake. */
+ unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
/* End of fields guarded by root rcu_node's lock. */
@@ -402,13 +393,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
int rcu_dynticks_snap(struct rcu_data *rdp);
-#ifdef CONFIG_RCU_BOOST
-DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
-DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
-DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-DECLARE_PER_CPU(char, rcu_cpu_has_work);
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
/* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void);
static void rcu_qs(void);
@@ -420,7 +404,7 @@ static void rcu_print_detail_task_stall(void);
static int rcu_print_task_stall(struct rcu_node *rnp);
static int rcu_print_task_exp_stall(struct rcu_node *rnp);
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
-static void rcu_flavor_check_callbacks(int user);
+static void rcu_flavor_sched_clock_irq(int user);
void call_rcu(struct rcu_head *head, rcu_callback_t func);
static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
@@ -431,7 +415,6 @@ static void __init rcu_spawn_boost_kthreads(void);
static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(void);
static void rcu_prepare_for_idle(void);
-static void rcu_idle_count_callbacks_posted(void);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
static void rcu_preempt_deferred_qs(struct task_struct *t);
@@ -451,7 +434,7 @@ static bool rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
-static void rcu_spawn_all_nocb_kthreads(int cpu);
+static void rcu_spawn_cpu_nocb_kthread(int cpu);
static void __init rcu_spawn_nocb_kthreads(void);
#ifdef CONFIG_RCU_NOCB_CPU
static void __init rcu_organize_nocb_kthreads(void);
@@ -462,11 +445,3 @@ static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void);
static void rcu_dynticks_task_exit(void);
-
-#ifdef CONFIG_SRCU
-void srcu_online_cpu(unsigned int cpu);
-void srcu_offline_cpu(unsigned int cpu);
-#else /* #ifdef CONFIG_SRCU */
-void srcu_online_cpu(unsigned int cpu) { }
-void srcu_offline_cpu(unsigned int cpu) { }
-#endif /* #else #ifdef CONFIG_SRCU */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 928fe5893a57..4c2a0189e748 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -1,27 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU expedited grace periods
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2016
*
- * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#include <linux/lockdep.h>
+static void rcu_exp_handler(void *unused);
+
/*
* Record the start of an expedited grace period.
*/
@@ -344,7 +333,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
{
int cpu;
unsigned long flags;
- smp_call_func_t func;
unsigned long mask_ofl_test;
unsigned long mask_ofl_ipi;
int ret;
@@ -352,7 +340,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
container_of(wp, struct rcu_exp_work, rew_work);
struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
- func = rewp->rew_func;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
/* Each pass checks a CPU for identity, offline, and idle. */
@@ -396,7 +383,7 @@ retry_ipi:
mask_ofl_test |= mask;
continue;
}
- ret = smp_call_function_single(cpu, func, NULL, 0);
+ ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
if (!ret) {
mask_ofl_ipi &= ~mask;
continue;
@@ -426,7 +413,7 @@ retry_ipi:
* Select the nodes that the upcoming expedited grace period needs
* to wait for.
*/
-static void sync_rcu_exp_select_cpus(smp_call_func_t func)
+static void sync_rcu_exp_select_cpus(void)
{
int cpu;
struct rcu_node *rnp;
@@ -440,7 +427,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
rnp->exp_need_flush = false;
if (!READ_ONCE(rnp->expmask))
continue; /* Avoid early boot non-existent wq. */
- rnp->rew.rew_func = func;
if (!READ_ONCE(rcu_par_gp_wq) ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
rcu_is_last_leaf_node(rnp)) {
@@ -449,7 +435,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
continue;
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- preempt_disable();
cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
/* If all offline, queue the work on an unbound CPU. */
if (unlikely(cpu > rnp->grphi - rnp->grplo))
@@ -457,7 +442,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
else
cpu += rnp->grplo;
queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
- preempt_enable();
rnp->exp_need_flush = true;
}
@@ -580,10 +564,10 @@ static void rcu_exp_wait_wake(unsigned long s)
* Common code to drive an expedited grace period forward, used by
* workqueues and mid-boot-time tasks.
*/
-static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
+static void rcu_exp_sel_wait_wake(unsigned long s)
{
/* Initialize the rcu_node tree in preparation for the wait. */
- sync_rcu_exp_select_cpus(func);
+ sync_rcu_exp_select_cpus();
/* Wait and clean up, including waking everyone. */
rcu_exp_wait_wake(s);
@@ -597,52 +581,7 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
struct rcu_exp_work *rewp;
rewp = container_of(wp, struct rcu_exp_work, rew_work);
- rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
-}
-
-/*
- * Given a smp_call_function() handler, kick off the specified
- * implementation of expedited grace period.
- */
-static void _synchronize_rcu_expedited(smp_call_func_t func)
-{
- struct rcu_data *rdp;
- struct rcu_exp_work rew;
- struct rcu_node *rnp;
- unsigned long s;
-
- /* If expedited grace periods are prohibited, fall back to normal. */
- if (rcu_gp_is_normal()) {
- wait_rcu_gp(call_rcu);
- return;
- }
-
- /* Take a snapshot of the sequence number. */
- s = rcu_exp_gp_seq_snap();
- if (exp_funnel_lock(s))
- return; /* Someone else did our work for us. */
-
- /* Ensure that load happens before action based on it. */
- if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
- /* Direct call during scheduler init and early_initcalls(). */
- rcu_exp_sel_wait_wake(func, s);
- } else {
- /* Marshall arguments & schedule the expedited grace period. */
- rew.rew_func = func;
- rew.rew_s = s;
- INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
- queue_work(rcu_gp_wq, &rew.rew_work);
- }
-
- /* Wait for expedited grace period to complete. */
- rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
- rnp = rcu_get_root();
- wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
- sync_exp_work_done(s));
- smp_mb(); /* Workqueue actions happen before return. */
-
- /* Let the next expedited grace period start. */
- mutex_unlock(&rcu_state.exp_mutex);
+ rcu_exp_sel_wait_wake(rewp->rew_s);
}
#ifdef CONFIG_PREEMPT_RCU
@@ -654,7 +593,7 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
* ->expmask fields in the rcu_node tree. Otherwise, immediately
* report the quiescent state.
*/
-static void sync_rcu_exp_handler(void *unused)
+static void rcu_exp_handler(void *unused)
{
unsigned long flags;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -697,6 +636,7 @@ static void sync_rcu_exp_handler(void *unused)
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return;
}
/*
@@ -730,43 +670,10 @@ static void sync_sched_exp_online_cleanup(int cpu)
{
}
-/**
- * synchronize_rcu_expedited - Brute-force RCU grace period
- *
- * Wait for an RCU-preempt grace period, but expedite it. The basic
- * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
- * checks whether the CPU is in an RCU-preempt critical section, and
- * if so, it sets a flag that causes the outermost rcu_read_unlock()
- * to report the quiescent state. On the other hand, if the CPU is
- * not in an RCU read-side critical section, the IPI handler reports
- * the quiescent state immediately.
- *
- * Although this is a greate improvement over previous expedited
- * implementations, it is still unfriendly to real-time workloads, so is
- * thus not recommended for any sort of common-case code. In fact, if
- * you are using synchronize_rcu_expedited() in a loop, please restructure
- * your code to batch your updates, and then Use a single synchronize_rcu()
- * instead.
- *
- * This has the same semantics as (but is more brutal than) synchronize_rcu().
- */
-void synchronize_rcu_expedited(void)
-{
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
-
- if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
- return;
- _synchronize_rcu_expedited(sync_rcu_exp_handler);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
-
#else /* #ifdef CONFIG_PREEMPT_RCU */
/* Invoked on each online non-idle CPU for expedited quiescent state. */
-static void sync_sched_exp_handler(void *unused)
+static void rcu_exp_handler(void *unused)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
@@ -798,44 +705,78 @@ static void sync_sched_exp_online_cleanup(int cpu)
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
return;
- ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
+ ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
WARN_ON_ONCE(ret);
}
-/*
- * Because a context switch is a grace period for !PREEMPT, any
- * blocking grace-period wait automatically implies a grace period if
- * there is only one CPU online at any point time during execution of
- * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
- * occasionally incorrectly indicate that there are multiple CPUs online
- * when there was in fact only one the whole time, as this just adds some
- * overhead: RCU still operates correctly.
- */
-static int rcu_blocking_is_gp(void)
-{
- int ret;
-
- might_sleep(); /* Check for RCU read-side critical section. */
- preempt_disable();
- ret = num_online_cpus() <= 1;
- preempt_enable();
- return ret;
-}
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-/* PREEMPT=n implementation of synchronize_rcu_expedited(). */
+/**
+ * synchronize_rcu_expedited - Brute-force RCU grace period
+ *
+ * Wait for an RCU grace period, but expedite it. The basic idea is to
+ * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
+ * the CPU is in an RCU critical section, and if so, it sets a flag that
+ * causes the outermost rcu_read_unlock() to report the quiescent state
+ * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
+ * other hand, if the CPU is not in an RCU read-side critical section,
+ * the IPI handler reports the quiescent state immediately.
+ *
+ * Although this is a greate improvement over previous expedited
+ * implementations, it is still unfriendly to real-time workloads, so is
+ * thus not recommended for any sort of common-case code. In fact, if
+ * you are using synchronize_rcu_expedited() in a loop, please restructure
+ * your code to batch your updates, and then Use a single synchronize_rcu()
+ * instead.
+ *
+ * This has the same semantics as (but is more brutal than) synchronize_rcu().
+ */
void synchronize_rcu_expedited(void)
{
+ struct rcu_data *rdp;
+ struct rcu_exp_work rew;
+ struct rcu_node *rnp;
+ unsigned long s;
+
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");
- /* If only one CPU, this is automatically a grace period. */
+ /* Is the state is such that the call is a grace period? */
if (rcu_blocking_is_gp())
return;
- _synchronize_rcu_expedited(sync_sched_exp_handler);
+ /* If expedited grace periods are prohibited, fall back to normal. */
+ if (rcu_gp_is_normal()) {
+ wait_rcu_gp(call_rcu);
+ return;
+ }
+
+ /* Take a snapshot of the sequence number. */
+ s = rcu_exp_gp_seq_snap();
+ if (exp_funnel_lock(s))
+ return; /* Someone else did our work for us. */
+
+ /* Ensure that load happens before action based on it. */
+ if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
+ /* Direct call during scheduler init and early_initcalls(). */
+ rcu_exp_sel_wait_wake(s);
+ } else {
+ /* Marshall arguments & schedule the expedited grace period. */
+ rew.rew_s = s;
+ INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+ queue_work(rcu_gp_wq, &rew.rew_work);
+ }
+
+ /* Wait for expedited grace period to complete. */
+ rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
+ rnp = rcu_get_root();
+ wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
+ sync_exp_work_done(s));
+ smp_mb(); /* Workqueue actions happen before return. */
+
+ /* Let the next expedited grace period start. */
+ mutex_unlock(&rcu_state.exp_mutex);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
-
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1b3dd2fc0cd6..97dba50f6fb2 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1,27 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
* Internal non-public definitions that provide either classic
* or preemptible semantics.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright Red Hat, 2009
* Copyright IBM Corporation, 2009
*
* Author: Ingo Molnar <mingo@elte.hu>
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Paul E. McKenney <paulmck@linux.ibm.com>
*/
#include <linux/delay.h>
@@ -34,17 +21,7 @@
#include "../time/tick-internal.h"
#ifdef CONFIG_RCU_BOOST
-
#include "../locking/rtmutex_common.h"
-
-/*
- * Control variables for per-CPU and per-rcu_node kthreads.
- */
-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-DEFINE_PER_CPU(char, rcu_cpu_has_work);
-
#else /* #ifdef CONFIG_RCU_BOOST */
/*
@@ -307,7 +284,7 @@ static void rcu_qs(void)
__this_cpu_read(rcu_data.gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
- barrier(); /* Coordinate with rcu_flavor_check_callbacks(). */
+ barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
current->rcu_read_unlock_special.b.need_qs = false;
}
}
@@ -788,13 +765,13 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
}
/*
- * Check for a quiescent state from the current CPU. When a task blocks,
- * the task is recorded in the corresponding CPU's rcu_node structure,
- * which is checked elsewhere.
- *
- * Caller must disable hard irqs.
+ * Check for a quiescent state from the current CPU, including voluntary
+ * context switches for Tasks RCU. When a task blocks, the task is
+ * recorded in the corresponding CPU's rcu_node structure, which is checked
+ * elsewhere, hence this function need only check for quiescent states
+ * related to the current CPU, not to those related to tasks.
*/
-static void rcu_flavor_check_callbacks(int user)
+static void rcu_flavor_sched_clock_irq(int user)
{
struct task_struct *t = current;
@@ -825,54 +802,6 @@ static void rcu_flavor_check_callbacks(int user)
t->rcu_read_unlock_special.b.need_qs = true;
}
-/**
- * synchronize_rcu - wait until a grace period has elapsed.
- *
- * Control will return to the caller some time after a full grace
- * period has elapsed, in other words after all currently executing RCU
- * read-side critical sections have completed. Note, however, that
- * upon return from synchronize_rcu(), the caller might well be executing
- * concurrently with new RCU read-side critical sections that began while
- * synchronize_rcu() was waiting. RCU read-side critical sections are
- * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
- * In addition, regions of code across which interrupts, preemption, or
- * softirqs have been disabled also serve as RCU read-side critical
- * sections. This includes hardware interrupt handlers, softirq handlers,
- * and NMI handlers.
- *
- * Note that this guarantee implies further memory-ordering guarantees.
- * On systems with more than one CPU, when synchronize_rcu() returns,
- * each CPU is guaranteed to have executed a full memory barrier since
- * the end of its last RCU read-side critical section whose beginning
- * preceded the call to synchronize_rcu(). In addition, each CPU having
- * an RCU read-side critical section that extends beyond the return from
- * synchronize_rcu() is guaranteed to have executed a full memory barrier
- * after the beginning of synchronize_rcu() and before the beginning of
- * that RCU read-side critical section. Note that these guarantees include
- * CPUs that are offline, idle, or executing in user mode, as well as CPUs
- * that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked synchronize_rcu(), which returned
- * to its caller on CPU B, then both CPU A and CPU B are guaranteed
- * to have executed a full memory barrier during the execution of
- * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
- * again only if the system has more than one CPU).
- */
-void synchronize_rcu(void)
-{
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_rcu() in RCU read-side critical section");
- if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
- return;
- if (rcu_gp_is_expedited())
- synchronize_rcu_expedited();
- else
- wait_rcu_gp(call_rcu);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu);
-
/*
* Check for a task exiting while in a preemptible-RCU read-side
* critical section, clean up if so. No need to issue warnings,
@@ -1088,14 +1017,10 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
}
/*
- * Check to see if this CPU is in a non-context-switch quiescent state
- * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
- * Also schedule RCU core processing.
- *
- * This function must be called from hardirq context. It is normally
- * invoked from the scheduling-clock interrupt.
+ * Check to see if this CPU is in a non-context-switch quiescent state,
+ * namely user mode and idle loop.
*/
-static void rcu_flavor_check_callbacks(int user)
+static void rcu_flavor_sched_clock_irq(int user)
{
if (user || rcu_is_cpu_rrupt_from_idle()) {
@@ -1115,22 +1040,6 @@ static void rcu_flavor_check_callbacks(int user)
}
}
-/* PREEMPT=n implementation of synchronize_rcu(). */
-void synchronize_rcu(void)
-{
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_rcu() in RCU read-side critical section");
- if (rcu_blocking_is_gp())
- return;
- if (rcu_gp_is_expedited())
- synchronize_rcu_expedited();
- else
- wait_rcu_gp(call_rcu);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu);
-
/*
* Because preemptible RCU does not exist, tasks cannot possibly exit
* while in preemptible RCU read-side critical sections.
@@ -1307,11 +1216,11 @@ static void invoke_rcu_callbacks_kthread(void)
unsigned long flags;
local_irq_save(flags);
- __this_cpu_write(rcu_cpu_has_work, 1);
- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
- current != __this_cpu_read(rcu_cpu_kthread_task)) {
- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
- __this_cpu_read(rcu_cpu_kthread_status));
+ __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
+ if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL &&
+ current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) {
+ rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task),
+ __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
}
local_irq_restore(flags);
}
@@ -1322,7 +1231,7 @@ static void invoke_rcu_callbacks_kthread(void)
*/
static bool rcu_is_callbacks_kthread(void)
{
- return __this_cpu_read(rcu_cpu_kthread_task) == current;
+ return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
}
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
@@ -1369,11 +1278,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
return 0;
}
-static void rcu_kthread_do_work(void)
-{
- rcu_do_batch(this_cpu_ptr(&rcu_data));
-}
-
static void rcu_cpu_kthread_setup(unsigned int cpu)
{
struct sched_param sp;
@@ -1384,12 +1288,12 @@ static void rcu_cpu_kthread_setup(unsigned int cpu)
static void rcu_cpu_kthread_park(unsigned int cpu)
{
- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+ per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
}
static int rcu_cpu_kthread_should_run(unsigned int cpu)
{
- return __this_cpu_read(rcu_cpu_has_work);
+ return __this_cpu_read(rcu_data.rcu_cpu_has_work);
}
/*
@@ -1399,21 +1303,20 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
*/
static void rcu_cpu_kthread(unsigned int cpu)
{
- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
+ unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
+ char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
int spincnt;
for (spincnt = 0; spincnt < 10; spincnt++) {
trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
local_bh_disable();
*statusp = RCU_KTHREAD_RUNNING;
- this_cpu_inc(rcu_cpu_kthread_loops);
local_irq_disable();
work = *workp;
*workp = 0;
local_irq_enable();
if (work)
- rcu_kthread_do_work();
+ rcu_do_batch(this_cpu_ptr(&rcu_data));
local_bh_enable();
if (*workp == 0) {
trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
@@ -1459,7 +1362,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
}
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
- .store = &rcu_cpu_kthread_task,
+ .store = &rcu_data.rcu_cpu_kthread_task,
.thread_should_run = rcu_cpu_kthread_should_run,
.thread_fn = rcu_cpu_kthread,
.thread_comm = "rcuc/%u",
@@ -1476,7 +1379,7 @@ static void __init rcu_spawn_boost_kthreads(void)
int cpu;
for_each_possible_cpu(cpu)
- per_cpu(rcu_cpu_has_work, cpu) = 0;
+ per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
return;
rcu_for_each_leaf_node(rnp)
@@ -1543,7 +1446,7 @@ static void rcu_prepare_kthreads(int cpu)
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
*nextevt = KTIME_MAX;
- return rcu_cpu_has_callbacks(NULL);
+ return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist);
}
/*
@@ -1562,14 +1465,6 @@ static void rcu_prepare_for_idle(void)
{
}
-/*
- * Don't bother keeping a running count of the number of RCU callbacks
- * posted because CONFIG_RCU_FAST_NO_HZ=n.
- */
-static void rcu_idle_count_callbacks_posted(void)
-{
-}
-
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
/*
@@ -1652,11 +1547,8 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
lockdep_assert_irqs_disabled();
- /* Snapshot to detect later posting of non-lazy callback. */
- rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
-
/* If no callbacks, RCU doesn't need the CPU. */
- if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) {
+ if (rcu_segcblist_empty(&rdp->cblist)) {
*nextevt = KTIME_MAX;
return 0;
}
@@ -1670,11 +1562,12 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
rdp->last_accelerate = jiffies;
/* Request timer delay depending on laziness, and round. */
- if (!rdp->all_lazy) {
+ rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist);
+ if (rdp->all_lazy) {
+ dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
+ } else {
dj = round_up(rcu_idle_gp_delay + jiffies,
rcu_idle_gp_delay) - jiffies;
- } else {
- dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
}
*nextevt = basemono + dj * TICK_NSEC;
return 0;
@@ -1704,7 +1597,7 @@ static void rcu_prepare_for_idle(void)
/* Handle nohz enablement switches conservatively. */
tne = READ_ONCE(tick_nohz_active);
if (tne != rdp->tick_nohz_enabled_snap) {
- if (rcu_cpu_has_callbacks(NULL))
+ if (!rcu_segcblist_empty(&rdp->cblist))
invoke_rcu_core(); /* force nohz to see update. */
rdp->tick_nohz_enabled_snap = tne;
return;
@@ -1717,10 +1610,8 @@ static void rcu_prepare_for_idle(void)
* callbacks, invoke RCU core for the side-effect of recalculating
* idle duration on re-entry to idle.
*/
- if (rdp->all_lazy &&
- rdp->nonlazy_posted != rdp->nonlazy_posted_snap) {
+ if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) {
rdp->all_lazy = false;
- rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
invoke_rcu_core();
return;
}
@@ -1756,19 +1647,6 @@ static void rcu_cleanup_after_idle(void)
invoke_rcu_core();
}
-/*
- * Keep a running count of the number of non-lazy callbacks posted
- * on this CPU. This running counter (which is never decremented) allows
- * rcu_prepare_for_idle() to detect when something out of the idle loop
- * posts a callback, even if an equal number of callbacks are invoked.
- * Of course, callbacks should only be posted from within a trace event
- * designed to be called from idle or from within RCU_NONIDLE().
- */
-static void rcu_idle_count_callbacks_posted(void)
-{
- __this_cpu_add(rcu_data.nonlazy_posted, 1);
-}
-
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
#ifdef CONFIG_RCU_FAST_NO_HZ
@@ -1776,13 +1654,12 @@ static void rcu_idle_count_callbacks_posted(void)
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap;
- sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
+ sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
rdp->last_accelerate & 0xffff, jiffies & 0xffff,
- ulong2long(nlpd),
- rdp->all_lazy ? 'L' : '.',
- rdp->tick_nohz_enabled_snap ? '.' : 'D');
+ ".l"[rdp->all_lazy],
+ ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
+ ".D"[!rdp->tick_nohz_enabled_snap]);
}
#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
@@ -1868,22 +1745,24 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
/*
* Offload callback processing from the boot-time-specified set of CPUs
- * specified by rcu_nocb_mask. For each CPU in the set, there is a
- * kthread created that pulls the callbacks from the corresponding CPU,
- * waits for a grace period to elapse, and invokes the callbacks.
- * The no-CBs CPUs do a wake_up() on their kthread when they insert
- * a callback into any empty list, unless the rcu_nocb_poll boot parameter
- * has been specified, in which case each kthread actively polls its
- * CPU. (Which isn't so great for energy efficiency, but which does
- * reduce RCU's overhead on that CPU.)
+ * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
+ * created that pull the callbacks from the corresponding CPU, wait for
+ * a grace period to elapse, and invoke the callbacks. These kthreads
+ * are organized into leaders, which manage incoming callbacks, wait for
+ * grace periods, and awaken followers, and the followers, which only
+ * invoke callbacks. Each leader is its own follower. The no-CBs CPUs
+ * do a wake_up() on their kthread when they insert a callback into any
+ * empty list, unless the rcu_nocb_poll boot parameter has been specified,
+ * in which case each kthread actively polls its CPU. (Which isn't so great
+ * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
*
* This is intended to be used in conjunction with Frederic Weisbecker's
* adaptive-idle work, which would seriously reduce OS jitter on CPUs
* running CPU-bound user-mode computations.
*
- * Offloading of callback processing could also in theory be used as
- * an energy-efficiency measure because CPUs with no RCU callbacks
- * queued are more aggressive about entering dyntick-idle mode.
+ * Offloading of callbacks can also be used as an energy-efficiency
+ * measure because CPUs with no RCU callbacks queued are more aggressive
+ * about entering dyntick-idle mode.
*/
@@ -1987,10 +1866,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
}
-/*
- * Does the specified CPU need an RCU callback for this invocation
- * of rcu_barrier()?
- */
+/* Does rcu_barrier need to queue an RCU callback on the specified CPU? */
static bool rcu_nocb_cpu_needs_barrier(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
@@ -2006,8 +1882,8 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
* callbacks would be posted. In the worst case, the first
* barrier in rcu_barrier() suffices (but the caller cannot
* necessarily rely on this, not a substitute for the caller
- * getting the concurrency design right!). There must also be
- * a barrier between the following load an posting of a callback
+ * getting the concurrency design right!). There must also be a
+ * barrier between the following load and posting of a callback
* (if a callback is in fact needed). This is associated with an
* atomic_inc() in the caller.
*/
@@ -2517,9 +2393,9 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
/*
* If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo kthreads, spawn them.
+ * rcuo kthread, spawn it.
*/
-static void rcu_spawn_all_nocb_kthreads(int cpu)
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
{
if (rcu_scheduler_fully_active)
rcu_spawn_one_nocb_kthread(cpu);
@@ -2536,7 +2412,7 @@ static void __init rcu_spawn_nocb_kthreads(void)
int cpu;
for_each_online_cpu(cpu)
- rcu_spawn_all_nocb_kthreads(cpu);
+ rcu_spawn_cpu_nocb_kthread(cpu);
}
/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
@@ -2670,7 +2546,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
}
-static void rcu_spawn_all_nocb_kthreads(int cpu)
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
{
}
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 1971869c4072..cbaa976c5945 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -1,26 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Read-Copy Update mechanism for mutual exclusion
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2001
*
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
* Manfred Spraul <manfred@colorfullife.com>
*
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
@@ -52,6 +39,7 @@
#include <linux/tick.h>
#include <linux/rcupdate_wait.h>
#include <linux/sched/isolation.h>
+#include <linux/kprobes.h>
#define CREATE_TRACE_POINTS
@@ -249,6 +237,7 @@ int notrace debug_lockdep_rcu_enabled(void)
current->lockdep_recursion == 0;
}
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
/**
* rcu_read_lock_held() - might we be in RCU read-side critical section?
diff --git a/kernel/relay.c b/kernel/relay.c
index 04f248644e06..9e0f52375487 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
dentry = chan->cb->create_buf_file(tmpname, chan->parent,
S_IRUSR, buf,
&chan->is_global);
+ if (IS_ERR(dentry))
+ dentry = NULL;
kfree(tmpname);
@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
dentry = chan->cb->create_buf_file(NULL, NULL,
S_IRUSR, buf,
&chan->is_global);
- if (WARN_ON(dentry))
+ if (IS_ERR_OR_NULL(dentry))
goto free_buf;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a674c7db2f29..ead464a0f2e5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -107,11 +107,12 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
* [L] ->on_rq
* RELEASE (rq->lock)
*
- * If we observe the old CPU in task_rq_lock, the acquire of
+ * If we observe the old CPU in task_rq_lock(), the acquire of
* the old rq->lock will fully serialize against the stores.
*
- * If we observe the new CPU in task_rq_lock, the acquire will
- * pair with the WMB to ensure we must then also see migrating.
+ * If we observe the new CPU in task_rq_lock(), the address
+ * dependency headed by '[L] rq = task_rq()' and the acquire
+ * will pair with the WMB to ensure we then also see migrating.
*/
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
rq_pin_lock(rq, rf);
@@ -180,6 +181,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal);
#endif
+ update_rq_clock_pelt(rq, delta);
}
void update_rq_clock(struct rq *rq)
@@ -396,7 +398,7 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif
#endif
-void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
struct wake_q_node *node = &task->wake_q;
@@ -405,19 +407,60 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
- * This cmpxchg() executes a full barrier, which pairs with the full
- * barrier executed by the wakeup in wake_up_q().
+ * In order to ensure that a pending wakeup will observe our pending
+ * state, even in the failed case, an explicit smp_mb() must be used.
*/
- if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
- return;
-
- get_task_struct(task);
+ smp_mb__before_atomic();
+ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
+ return false;
/*
* The head is context local, there can be no concurrency.
*/
*head->lastp = node;
head->lastp = &node->next;
+ return true;
+}
+
+/**
+ * wake_q_add() - queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ */
+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+ if (__wake_q_add(head, task))
+ get_task_struct(task);
+}
+
+/**
+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ *
+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
+ * that already hold reference to @task can call the 'safe' version and trust
+ * wake_q to do the right thing depending whether or not the @task is already
+ * queued for wakeup.
+ */
+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
+{
+ if (!__wake_q_add(head, task))
+ put_task_struct(task);
}
void wake_up_q(struct wake_q_head *head)
@@ -915,7 +958,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
{
lockdep_assert_held(&rq->lock);
- p->on_rq = TASK_ON_RQ_MIGRATING;
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
dequeue_task(rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, new_cpu);
rq_unlock(rq, rf);
@@ -2177,6 +2220,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
+#ifdef CONFIG_COMPACTION
+ p->capture_control = NULL;
+#endif
init_numa_balancing(clone_flags, p);
}
@@ -2418,7 +2464,7 @@ void wake_up_new_task(struct task_struct *p)
#endif
rq = __task_rq_lock(p, &rf);
update_rq_clock(rq);
- post_init_entity_util_avg(&p->se);
+ post_init_entity_util_avg(p);
activate_task(rq, p, ENQUEUE_NOCLOCK);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -5252,9 +5298,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
}
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
- compat_pid_t, pid,
- struct old_timespec32 __user *, interval)
+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
+ struct old_timespec32 __user *, interval)
{
struct timespec64 t;
int retval = sched_rr_get_interval(pid, &t);
@@ -5854,14 +5899,11 @@ void __init sched_init_smp(void)
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
- * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
- * but there won't be any contention on it.
+ * happen.
*/
- cpus_read_lock();
mutex_lock(&sched_domains_mutex);
sched_init_domains(cpu_active_mask);
mutex_unlock(&sched_domains_mutex);
- cpus_read_unlock();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
@@ -6149,6 +6191,34 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL(___might_sleep);
+
+void __cant_sleep(const char *file, int line, int preempt_offset)
+{
+ static unsigned long prev_jiffy;
+
+ if (irqs_disabled())
+ return;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return;
+
+ if (preempt_count() > preempt_offset)
+ return;
+
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
+ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(),
+ current->pid, current->comm);
+
+ debug_show_held_locks(current);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL_GPL(__cant_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 22bd8980f32f..835671f0f917 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -48,8 +48,8 @@ EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
*
* Clear the update_util_data pointer for the given CPU.
*
- * Callers must use RCU-sched callbacks to free any memory that might be
- * accessed via the old update_util_data pointer or invoke synchronize_sched()
+ * Callers must use RCU callbacks to free any memory that might be
+ * accessed via the old update_util_data pointer or invoke synchronize_rcu()
* right after this function to avoid use-after-free.
*/
void cpufreq_remove_update_util_hook(int cpu)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 033ec7c45f13..2efe629425be 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -859,7 +859,7 @@ static void sugov_stop(struct cpufreq_policy *policy)
for_each_cpu(cpu, policy->cpus)
cpufreq_remove_update_util_hook(cpu);
- synchronize_sched();
+ synchronize_rcu();
if (!policy->fast_switch_enabled) {
irq_work_sync(&sg_policy->irq_work);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fb8b7b5d745d..6a73e41a2016 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1767,7 +1767,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
deadline_queue_push_tasks(rq);
if (rq->curr->sched_class != &dl_sched_class)
- update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
return p;
}
@@ -1776,7 +1776,7 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
{
update_curr_dl(rq);
- update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
+ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
@@ -1793,7 +1793,7 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
{
update_curr_dl(rq);
- update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
+ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
/*
* Even when we have runtime, update_curr_dl() might have resulted in us
* not being the leftmost task anymore. In that case NEED_RESCHED will
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index de3de997e245..8039d62ae36e 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -315,6 +315,7 @@ void register_sched_domain_sysctl(void)
{
static struct ctl_table *cpu_entries;
static struct ctl_table **cpu_idx;
+ static bool init_done = false;
char buf[32];
int i;
@@ -344,7 +345,10 @@ void register_sched_domain_sysctl(void)
if (!cpumask_available(sd_sysctl_cpus)) {
if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
return;
+ }
+ if (!init_done) {
+ init_done = true;
/* init to possible to not have holes in @cpu_entries */
cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50aa2aba69bd..ea74d43924b2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -248,13 +248,6 @@ const struct sched_class fair_sched_class;
*/
#ifdef CONFIG_FAIR_GROUP_SCHED
-
-/* cpu runqueue to which this cfs_rq is attached */
-static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
-{
- return cfs_rq->rq;
-}
-
static inline struct task_struct *task_of(struct sched_entity *se)
{
SCHED_WARN_ON(!entity_is_task(se));
@@ -282,79 +275,103 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
return grp->my_q;
}
-static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
+static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
- if (!cfs_rq->on_list) {
- struct rq *rq = rq_of(cfs_rq);
- int cpu = cpu_of(rq);
+ struct rq *rq = rq_of(cfs_rq);
+ int cpu = cpu_of(rq);
+
+ if (cfs_rq->on_list)
+ return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
+
+ cfs_rq->on_list = 1;
+
+ /*
+ * Ensure we either appear before our parent (if already
+ * enqueued) or force our parent to appear after us when it is
+ * enqueued. The fact that we always enqueue bottom-up
+ * reduces this to two cases and a special case for the root
+ * cfs_rq. Furthermore, it also means that we will always reset
+ * tmp_alone_branch either when the branch is connected
+ * to a tree or when we reach the top of the tree
+ */
+ if (cfs_rq->tg->parent &&
+ cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
/*
- * Ensure we either appear before our parent (if already
- * enqueued) or force our parent to appear after us when it is
- * enqueued. The fact that we always enqueue bottom-up
- * reduces this to two cases and a special case for the root
- * cfs_rq. Furthermore, it also means that we will always reset
- * tmp_alone_branch either when the branch is connected
- * to a tree or when we reach the beg of the tree
+ * If parent is already on the list, we add the child
+ * just before. Thanks to circular linked property of
+ * the list, this means to put the child at the tail
+ * of the list that starts by parent.
*/
- if (cfs_rq->tg->parent &&
- cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
- /*
- * If parent is already on the list, we add the child
- * just before. Thanks to circular linked property of
- * the list, this means to put the child at the tail
- * of the list that starts by parent.
- */
- list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
- /*
- * The branch is now connected to its tree so we can
- * reset tmp_alone_branch to the beginning of the
- * list.
- */
- rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
- } else if (!cfs_rq->tg->parent) {
- /*
- * cfs rq without parent should be put
- * at the tail of the list.
- */
- list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq->leaf_cfs_rq_list);
- /*
- * We have reach the beg of a tree so we can reset
- * tmp_alone_branch to the beginning of the list.
- */
- rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
- } else {
- /*
- * The parent has not already been added so we want to
- * make sure that it will be put after us.
- * tmp_alone_branch points to the beg of the branch
- * where we will add parent.
- */
- list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
- rq->tmp_alone_branch);
- /*
- * update tmp_alone_branch to points to the new beg
- * of the branch
- */
- rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
- }
+ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
+ &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
+ /*
+ * The branch is now connected to its tree so we can
+ * reset tmp_alone_branch to the beginning of the
+ * list.
+ */
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+ return true;
+ }
- cfs_rq->on_list = 1;
+ if (!cfs_rq->tg->parent) {
+ /*
+ * cfs rq without parent should be put
+ * at the tail of the list.
+ */
+ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
+ &rq->leaf_cfs_rq_list);
+ /*
+ * We have reach the top of a tree so we can reset
+ * tmp_alone_branch to the beginning of the list.
+ */
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+ return true;
}
+
+ /*
+ * The parent has not already been added so we want to
+ * make sure that it will be put after us.
+ * tmp_alone_branch points to the begin of the branch
+ * where we will add parent.
+ */
+ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
+ /*
+ * update tmp_alone_branch to points to the new begin
+ * of the branch
+ */
+ rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
+ return false;
}
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (cfs_rq->on_list) {
+ struct rq *rq = rq_of(cfs_rq);
+
+ /*
+ * With cfs_rq being unthrottled/throttled during an enqueue,
+ * it can happen the tmp_alone_branch points the a leaf that
+ * we finally want to del. In this case, tmp_alone_branch moves
+ * to the prev element but it will point to rq->leaf_cfs_rq_list
+ * at the end of the enqueue.
+ */
+ if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
+ rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
+
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
cfs_rq->on_list = 0;
}
}
-/* Iterate through all leaf cfs_rq's on a runqueue: */
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
- list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+static inline void assert_list_leaf_cfs_rq(struct rq *rq)
+{
+ SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
+}
+
+/* Iterate thr' all leaf cfs_rq's on a runqueue */
+#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
+ list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
+ leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */
static inline struct cfs_rq *
@@ -410,12 +427,6 @@ static inline struct task_struct *task_of(struct sched_entity *se)
return container_of(se, struct task_struct, se);
}
-static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
-{
- return container_of(cfs_rq, struct rq, cfs);
-}
-
-
#define for_each_sched_entity(se) \
for (; se; se = NULL)
@@ -438,16 +449,21 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
return NULL;
}
-static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
+static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
+ return true;
}
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
- for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+static inline void assert_list_leaf_cfs_rq(struct rq *rq)
+{
+}
+
+#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
+ for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
@@ -686,9 +702,8 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
-#ifdef CONFIG_SMP
#include "pelt.h"
-#include "sched-pelt.h"
+#ifdef CONFIG_SMP
static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
static unsigned long task_h_load(struct task_struct *p);
@@ -744,8 +759,9 @@ static void attach_entity_cfs_rq(struct sched_entity *se);
* Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
* if util_avg > util_avg_cap.
*/
-void post_init_entity_util_avg(struct sched_entity *se)
+void post_init_entity_util_avg(struct task_struct *p)
{
+ struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
@@ -763,22 +779,19 @@ void post_init_entity_util_avg(struct sched_entity *se)
}
}
- if (entity_is_task(se)) {
- struct task_struct *p = task_of(se);
- if (p->sched_class != &fair_sched_class) {
- /*
- * For !fair tasks do:
- *
- update_cfs_rq_load_avg(now, cfs_rq);
- attach_entity_load_avg(cfs_rq, se, 0);
- switched_from_fair(rq, p);
- *
- * such that the next switched_to_fair() has the
- * expected state.
- */
- se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
- return;
- }
+ if (p->sched_class != &fair_sched_class) {
+ /*
+ * For !fair tasks do:
+ *
+ update_cfs_rq_load_avg(now, cfs_rq);
+ attach_entity_load_avg(cfs_rq, se, 0);
+ switched_from_fair(rq, p);
+ *
+ * such that the next switched_to_fair() has the
+ * expected state.
+ */
+ se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
+ return;
}
attach_entity_cfs_rq(se);
@@ -788,7 +801,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
void init_entity_runnable_average(struct sched_entity *se)
{
}
-void post_init_entity_util_avg(struct sched_entity *se)
+void post_init_entity_util_avg(struct task_struct *p)
{
}
static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
@@ -1035,7 +1048,7 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
unsigned int sysctl_numa_balancing_scan_delay = 1000;
struct numa_group {
- atomic_t refcount;
+ refcount_t refcount;
spinlock_t lock; /* nr_tasks, tasks */
int nr_tasks;
@@ -1104,7 +1117,7 @@ static unsigned int task_scan_start(struct task_struct *p)
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
- period *= atomic_read(&ng->refcount);
+ period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
}
@@ -1127,7 +1140,7 @@ static unsigned int task_scan_max(struct task_struct *p)
unsigned long private = group_faults_priv(ng);
unsigned long period = smax;
- period *= atomic_read(&ng->refcount);
+ period *= refcount_read(&ng->refcount);
period *= shared + 1;
period /= private + shared + 1;
@@ -1160,7 +1173,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
/* New address space, reset the preferred nid */
if (!(clone_flags & CLONE_VM)) {
- p->numa_preferred_nid = -1;
+ p->numa_preferred_nid = NUMA_NO_NODE;
return;
}
@@ -1180,13 +1193,13 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
{
- rq->nr_numa_running += (p->numa_preferred_nid != -1);
+ rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
}
static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
- rq->nr_numa_running -= (p->numa_preferred_nid != -1);
+ rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
}
@@ -1400,7 +1413,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
* two full passes of the "multi-stage node selection" test that is
* executed below.
*/
- if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+ if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
(cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
return true;
@@ -1848,7 +1861,7 @@ static void numa_migrate_preferred(struct task_struct *p)
unsigned long interval = HZ;
/* This task has no NUMA fault statistics yet */
- if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
+ if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
return;
/* Periodically retry migrating the task to the preferred node */
@@ -2095,7 +2108,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
static void task_numa_placement(struct task_struct *p)
{
- int seq, nid, max_nid = -1;
+ int seq, nid, max_nid = NUMA_NO_NODE;
unsigned long max_faults = 0;
unsigned long fault_types[2] = { 0, 0 };
unsigned long total_faults;
@@ -2203,12 +2216,12 @@ static void task_numa_placement(struct task_struct *p)
static inline int get_numa_group(struct numa_group *grp)
{
- return atomic_inc_not_zero(&grp->refcount);
+ return refcount_inc_not_zero(&grp->refcount);
}
static inline void put_numa_group(struct numa_group *grp)
{
- if (atomic_dec_and_test(&grp->refcount))
+ if (refcount_dec_and_test(&grp->refcount))
kfree_rcu(grp, rcu);
}
@@ -2229,7 +2242,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
if (!grp)
return;
- atomic_set(&grp->refcount, 1);
+ refcount_set(&grp->refcount, 1);
grp->active_nodes = 1;
grp->max_faults_cpu = 0;
spin_lock_init(&grp->lock);
@@ -2638,7 +2651,8 @@ static void update_scan_period(struct task_struct *p, int new_cpu)
* the preferred node.
*/
if (dst_nid == p->numa_preferred_nid ||
- (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+ (p->numa_preferred_nid != NUMA_NO_NODE &&
+ src_nid != p->numa_preferred_nid))
return;
}
@@ -3122,7 +3136,7 @@ void set_task_rq_fair(struct sched_entity *se,
p_last_update_time = prev->avg.last_update_time;
n_last_update_time = next->avg.last_update_time;
#endif
- __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
+ __update_load_avg_blocked_se(p_last_update_time, se);
se->avg.last_update_time = n_last_update_time;
}
@@ -3257,11 +3271,11 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
/*
* runnable_sum can't be lower than running_sum
- * As running sum is scale with CPU capacity wehreas the runnable sum
- * is not we rescale running_sum 1st
+ * Rescale running sum to be in the same range as runnable sum
+ * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT]
+ * runnable_sum is in [0 : LOAD_AVG_MAX]
*/
- running_sum = se->avg.util_sum /
- arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+ running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
runnable_sum = max(runnable_sum, running_sum);
load_sum = (s64)se_weight(se) * runnable_sum;
@@ -3364,7 +3378,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
/**
* update_cfs_rq_load_avg - update the cfs_rq's load/util averages
- * @now: current time, as per cfs_rq_clock_task()
+ * @now: current time, as per cfs_rq_clock_pelt()
* @cfs_rq: cfs_rq to update
*
* The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
@@ -3409,7 +3423,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
decayed = 1;
}
- decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
+ decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
#ifndef CONFIG_64BIT
smp_wmb();
@@ -3499,9 +3513,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- u64 now = cfs_rq_clock_task(cfs_rq);
- struct rq *rq = rq_of(cfs_rq);
- int cpu = cpu_of(rq);
+ u64 now = cfs_rq_clock_pelt(cfs_rq);
int decayed;
/*
@@ -3509,7 +3521,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* track group sched_entity load average for task_h_load calc in migration
*/
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
- __update_load_avg_se(now, cpu, cfs_rq, se);
+ __update_load_avg_se(now, cfs_rq, se);
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
@@ -3561,7 +3573,7 @@ void sync_entity_load_avg(struct sched_entity *se)
u64 last_update_time;
last_update_time = cfs_rq_last_update_time(cfs_rq);
- __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);
+ __update_load_avg_blocked_se(last_update_time, se);
}
/*
@@ -3577,10 +3589,6 @@ void remove_entity_load_avg(struct sched_entity *se)
* tasks cannot exit without having gone through wake_up_new_task() ->
* post_init_entity_util_avg() which will have added things to the
* cfs_rq, so we can remove unconditionally.
- *
- * Similarly for groups, they will have passed through
- * post_init_entity_util_avg() before unregister_sched_fair_group()
- * calls this.
*/
sync_entity_load_avg(se);
@@ -3654,6 +3662,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
{
long last_ewma_diff;
struct util_est ue;
+ int cpu;
if (!sched_feat(UTIL_EST))
return;
@@ -3688,6 +3697,14 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
return;
/*
+ * To avoid overestimation of actual task utilization, skip updates if
+ * we cannot grant there is idle time in this CPU.
+ */
+ cpu = cpu_of(rq_of(cfs_rq));
+ if (task_util(p) > capacity_orig_of(cpu))
+ return;
+
+ /*
* Update Task's estimated utilization
*
* When *p completes an activation we can consolidate another sample
@@ -4429,6 +4446,10 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
/* adjust cfs_rq_clock_task() */
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
cfs_rq->throttled_clock_task;
+
+ /* Add cfs_rq with already running entity in the list */
+ if (cfs_rq->nr_running >= 1)
+ list_add_leaf_cfs_rq(cfs_rq);
}
return 0;
@@ -4440,8 +4461,10 @@ static int tg_throttle_down(struct task_group *tg, void *data)
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
/* group is entering throttled state, stop time */
- if (!cfs_rq->throttle_count)
+ if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_task = rq_clock_task(rq);
+ list_del_leaf_cfs_rq(cfs_rq);
+ }
cfs_rq->throttle_count++;
return 0;
@@ -4544,6 +4567,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
break;
}
+ assert_list_leaf_cfs_rq(rq);
+
if (!se)
add_nr_running(rq, task_delta);
@@ -4565,7 +4590,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
struct rq *rq = rq_of(cfs_rq);
struct rq_flags rf;
- rq_lock(rq, &rf);
+ rq_lock_irqsave(rq, &rf);
if (!cfs_rq_throttled(cfs_rq))
goto next;
@@ -4582,7 +4607,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
unthrottle_cfs_rq(cfs_rq);
next:
- rq_unlock(rq, &rf);
+ rq_unlock_irqrestore(rq, &rf);
if (!remaining)
break;
@@ -4598,7 +4623,7 @@ next:
* period the timer is deactivated until scheduling resumes; cfs_b->idle is
* used to track this state.
*/
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
{
u64 runtime, runtime_expires;
int throttled;
@@ -4640,11 +4665,11 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
cfs_b->distribute_running = 1;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime,
runtime_expires);
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
@@ -4753,17 +4778,18 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
{
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
+ unsigned long flags;
u64 expires;
/* confirm we're still not at a refresh boundary */
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
if (cfs_b->distribute_running) {
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
@@ -4774,18 +4800,18 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
if (runtime)
cfs_b->distribute_running = 1;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
if (!runtime)
return;
runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
if (expires == cfs_b->runtime_expires)
lsub_positive(&cfs_b->runtime, runtime);
cfs_b->distribute_running = 0;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
}
/*
@@ -4863,20 +4889,21 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
+ unsigned long flags;
int overrun;
int idle = 0;
- raw_spin_lock(&cfs_b->lock);
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
for (;;) {
overrun = hrtimer_forward_now(timer, cfs_b->period);
if (!overrun)
break;
- idle = do_sched_cfs_period_timer(cfs_b, overrun);
+ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
}
if (idle)
cfs_b->period_active = 0;
- raw_spin_unlock(&cfs_b->lock);
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
@@ -4986,6 +5013,12 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
}
#else /* CONFIG_CFS_BANDWIDTH */
+
+static inline bool cfs_bandwidth_used(void)
+{
+ return false;
+}
+
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
return rq_clock_task(rq_of(cfs_rq));
@@ -5177,6 +5210,23 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
}
+ if (cfs_bandwidth_used()) {
+ /*
+ * When bandwidth control is enabled; the cfs_rq_throttled()
+ * breaks in the above iteration can result in incomplete
+ * leaf list maintenance, resulting in triggering the assertion
+ * below.
+ */
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ if (list_add_leaf_cfs_rq(cfs_rq))
+ break;
+ }
+ }
+
+ assert_list_leaf_cfs_rq(rq);
+
hrtick_update(rq);
}
@@ -5556,11 +5606,6 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-static unsigned long capacity_orig_of(int cpu)
-{
- return cpu_rq(cpu)->cpu_capacity_orig;
-}
-
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -5980,6 +6025,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+EXPORT_SYMBOL_GPL(sched_smt_present);
static inline void set_idle_cores(int cpu, int val)
{
@@ -6052,7 +6098,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
bool idle = true;
for_each_cpu(cpu, cpu_smt_mask(core)) {
- cpumask_clear_cpu(cpu, cpus);
+ __cpumask_clear_cpu(cpu, cpus);
if (!available_idle_cpu(cpu))
idle = false;
}
@@ -6072,7 +6118,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
/*
* Scan the local SMT mask for idle CPUs.
*/
-static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static int select_idle_smt(struct task_struct *p, int target)
{
int cpu;
@@ -6096,7 +6142,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
return -1;
}
-static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+static inline int select_idle_smt(struct task_struct *p, int target)
{
return -1;
}
@@ -6201,7 +6247,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
if ((unsigned)i < nr_cpumask_bits)
return i;
- i = select_idle_smt(p, sd, target);
+ i = select_idle_smt(p, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
@@ -6607,7 +6653,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
- if (static_branch_unlikely(&sched_energy_present)) {
+ if (sched_energy_enabled()) {
new_cpu = find_energy_efficient_cpu(p, prev_cpu);
if (new_cpu >= 0)
return new_cpu;
@@ -7026,6 +7072,12 @@ idle:
if (new_tasks > 0)
goto again;
+ /*
+ * rq is about to be idle, check if we need to update the
+ * lost_idle_time of clock_pelt
+ */
+ update_idle_rq_clock_pelt(rq);
+
return NULL;
}
@@ -7646,10 +7698,27 @@ static inline bool others_have_blocked(struct rq *rq)
#ifdef CONFIG_FAIR_GROUP_SCHED
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+ if (cfs_rq->load.weight)
+ return false;
+
+ if (cfs_rq->avg.load_sum)
+ return false;
+
+ if (cfs_rq->avg.util_sum)
+ return false;
+
+ if (cfs_rq->avg.runnable_load_sum)
+ return false;
+
+ return true;
+}
+
static void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct cfs_rq *cfs_rq;
+ struct cfs_rq *cfs_rq, *pos;
const struct sched_class *curr_class;
struct rq_flags rf;
bool done = true;
@@ -7661,14 +7730,10 @@ static void update_blocked_averages(int cpu)
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
*/
- for_each_leaf_cfs_rq(rq, cfs_rq) {
+ for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se;
- /* throttled entities do not contribute to load */
- if (throttled_hierarchy(cfs_rq))
- continue;
-
- if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+ if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
update_tg_load_avg(cfs_rq, 0);
/* Propagate pending load changes to the parent, if any: */
@@ -7676,14 +7741,21 @@ static void update_blocked_averages(int cpu)
if (se && !skip_blocked_update(se))
update_load_avg(cfs_rq_of(se), se, 0);
+ /*
+ * There can be a lot of idle CPU cgroups. Don't let fully
+ * decayed cfs_rqs linger on the list.
+ */
+ if (cfs_rq_is_decayed(cfs_rq))
+ list_del_leaf_cfs_rq(cfs_rq);
+
/* Don't need periodic decay once load/util_avg are null */
if (cfs_rq_has_blocked(cfs_rq))
done = false;
}
curr_class = rq->curr->sched_class;
- update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
- update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
+ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (others_have_blocked(rq))
@@ -7753,11 +7825,11 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
- update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+ update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
curr_class = rq->curr->sched_class;
- update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
- update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
+ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
+ update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
@@ -8451,9 +8523,7 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
return 0;
- env->imbalance = DIV_ROUND_CLOSEST(
- sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
- SCHED_CAPACITY_SCALE);
+ env->imbalance = sds->busiest_stat.group_load;
return 1;
}
@@ -8635,7 +8705,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
*/
update_sd_lb_stats(env, &sds);
- if (static_branch_unlikely(&sched_energy_present)) {
+ if (sched_energy_enabled()) {
struct root_domain *rd = env->dst_rq->rd;
if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
@@ -8826,21 +8896,25 @@ static struct rq *find_busiest_queue(struct lb_env *env,
*/
#define MAX_PINNED_INTERVAL 512
-static int need_active_balance(struct lb_env *env)
+static inline bool
+asym_active_balance(struct lb_env *env)
{
- struct sched_domain *sd = env->sd;
+ /*
+ * ASYM_PACKING needs to force migrate tasks from busy but
+ * lower priority CPUs in order to pack all tasks in the
+ * highest priority CPUs.
+ */
+ return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
+ sched_asym_prefer(env->dst_cpu, env->src_cpu);
+}
- if (env->idle == CPU_NEWLY_IDLE) {
+static inline bool
+voluntary_active_balance(struct lb_env *env)
+{
+ struct sched_domain *sd = env->sd;
- /*
- * ASYM_PACKING needs to force migrate tasks from busy but
- * lower priority CPUs in order to pack all tasks in the
- * highest priority CPUs.
- */
- if ((sd->flags & SD_ASYM_PACKING) &&
- sched_asym_prefer(env->dst_cpu, env->src_cpu))
- return 1;
- }
+ if (asym_active_balance(env))
+ return 1;
/*
* The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
@@ -8858,6 +8932,16 @@ static int need_active_balance(struct lb_env *env)
if (env->src_grp_type == group_misfit_task)
return 1;
+ return 0;
+}
+
+static int need_active_balance(struct lb_env *env)
+{
+ struct sched_domain *sd = env->sd;
+
+ if (voluntary_active_balance(env))
+ return 1;
+
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}
@@ -9022,7 +9106,7 @@ more_balance:
if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
/* Prevent to re-select dst_cpu via env's CPUs */
- cpumask_clear_cpu(env.dst_cpu, env.cpus);
+ __cpumask_clear_cpu(env.dst_cpu, env.cpus);
env.dst_rq = cpu_rq(env.new_dst_cpu);
env.dst_cpu = env.new_dst_cpu;
@@ -9049,7 +9133,7 @@ more_balance:
/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(env.flags & LBF_ALL_PINNED)) {
- cpumask_clear_cpu(cpu_of(busiest), cpus);
+ __cpumask_clear_cpu(cpu_of(busiest), cpus);
/*
* Attempting to continue load balancing at the current
* sched_domain level only makes sense if there are
@@ -9119,7 +9203,7 @@ more_balance:
} else
sd->nr_balance_failed = 0;
- if (likely(!active_balance)) {
+ if (likely(!active_balance) || voluntary_active_balance(&env)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
} else {
@@ -9468,15 +9552,8 @@ static void kick_ilb(unsigned int flags)
}
/*
- * Current heuristic for kicking the idle load balancer in the presence
- * of an idle cpu in the system.
- * - This rq has more than one task.
- * - This rq has at least one CFS task and the capacity of the CPU is
- * significantly reduced because of RT tasks or IRQs.
- * - At parent of LLC scheduler domain level, this cpu's scheduler group has
- * multiple busy cpu.
- * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
- * domain span are idle.
+ * Current decision point for kicking the idle load balancer in the presence
+ * of idle CPUs in the system.
*/
static void nohz_balancer_kick(struct rq *rq)
{
@@ -9518,8 +9595,13 @@ static void nohz_balancer_kick(struct rq *rq)
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds) {
/*
- * XXX: write a coherent comment on why we do this.
- * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
+ * If there is an imbalance between LLC domains (IOW we could
+ * increase the overall cache use), we need some less-loaded LLC
+ * domain to pull some load. Likewise, we may need to spread
+ * load within the current LLC domain (e.g. packed SMT cores but
+ * other CPUs are idle). We can't really know from here how busy
+ * the others are - so just get a nohz balance going if it looks
+ * like this LLC domain has tasks we could move.
*/
nr_busy = atomic_read(&sds->nr_busy_cpus);
if (nr_busy > 1) {
@@ -9532,7 +9614,7 @@ static void nohz_balancer_kick(struct rq *rq)
sd = rcu_dereference(rq->sd);
if (sd) {
if ((rq->cfs.h_nr_running >= 1) &&
- check_cpu_capacity(rq, sd)) {
+ check_cpu_capacity(rq, sd)) {
flags = NOHZ_KICK_MASK;
goto unlock;
}
@@ -9540,11 +9622,7 @@ static void nohz_balancer_kick(struct rq *rq)
sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
if (sd) {
- for_each_cpu(i, sched_domain_span(sd)) {
- if (i == cpu ||
- !cpumask_test_cpu(i, nohz.idle_cpus_mask))
- continue;
-
+ for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
if (sched_asym_prefer(i, cpu)) {
flags = NOHZ_KICK_MASK;
goto unlock;
@@ -10545,10 +10623,10 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SCHED_DEBUG
void print_cfs_stats(struct seq_file *m, int cpu)
{
- struct cfs_rq *cfs_rq;
+ struct cfs_rq *cfs_rq, *pos;
rcu_read_lock();
- for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
+ for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
print_cfs_rq(m, cpu, cfs_rq);
rcu_read_unlock();
}
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index 81faddba9e20..b02d148e7672 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -80,7 +80,7 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags)
cpumask_andnot(housekeeping_mask,
cpu_possible_mask, non_housekeeping_mask);
if (cpumask_empty(housekeeping_mask))
- cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
+ __cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
} else {
cpumask_var_t tmp;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 90fb5bc12ad4..befce29bd882 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include "sched.h"
-#include "sched-pelt.h"
#include "pelt.h"
/*
@@ -106,16 +105,12 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
* n=1
*/
static __always_inline u32
-accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
+accumulate_sum(u64 delta, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
- unsigned long scale_freq, scale_cpu;
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
- scale_freq = arch_scale_freq_capacity(cpu);
- scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
-
delta += sa->period_contrib;
periods = delta / 1024; /* A period is 1024us (~1ms) */
@@ -137,13 +132,12 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
}
sa->period_contrib = delta;
- contrib = cap_scale(contrib, scale_freq);
if (load)
sa->load_sum += load * contrib;
if (runnable)
sa->runnable_load_sum += runnable * contrib;
if (running)
- sa->util_sum += contrib * scale_cpu;
+ sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
return periods;
}
@@ -177,7 +171,7 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/
static __always_inline int
-___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
+___update_load_sum(u64 now, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
u64 delta;
@@ -221,7 +215,7 @@ ___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
* Step 1: accumulate *_sum since last_update_time. If we haven't
* crossed period boundaries, finish.
*/
- if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
+ if (!accumulate_sum(delta, sa, load, runnable, running))
return 0;
return 1;
@@ -267,9 +261,9 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
* runnable_load_avg = \Sum se->avg.runable_load_avg
*/
-int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
+int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
- if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
+ if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
return 1;
}
@@ -277,9 +271,9 @@ int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
return 0;
}
-int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
+int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
+ if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
cfs_rq->curr == se)) {
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
@@ -290,9 +284,9 @@ int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_e
return 0;
}
-int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
+int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
- if (___update_load_sum(now, cpu, &cfs_rq->avg,
+ if (___update_load_sum(now, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
scale_load_down(cfs_rq->runnable_weight),
cfs_rq->curr != NULL)) {
@@ -317,7 +311,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
{
- if (___update_load_sum(now, rq->cpu, &rq->avg_rt,
+ if (___update_load_sum(now, &rq->avg_rt,
running,
running,
running)) {
@@ -340,7 +334,7 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{
- if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
+ if (___update_load_sum(now, &rq->avg_dl,
running,
running,
running)) {
@@ -365,22 +359,31 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
int update_irq_load_avg(struct rq *rq, u64 running)
{
int ret = 0;
+
+ /*
+ * We can't use clock_pelt because irq time is not accounted in
+ * clock_task. Instead we directly scale the running time to
+ * reflect the real amount of computation
+ */
+ running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
+ running = cap_scale(running, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
+
/*
* We know the time that has been used by interrupt since last update
* but we don't when. Let be pessimistic and assume that interrupt has
* happened just before the update. This is not so far from reality
* because interrupt will most probably wake up task and trig an update
- * of rq clock during which the metric si updated.
+ * of rq clock during which the metric is updated.
* We start to decay with normal context time and then we add the
* interrupt context time.
* We can safely remove running from rq->clock because
* rq->clock += delta with delta >= running
*/
- ret = ___update_load_sum(rq->clock - running, rq->cpu, &rq->avg_irq,
+ ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
0,
0,
0);
- ret += ___update_load_sum(rq->clock, rq->cpu, &rq->avg_irq,
+ ret += ___update_load_sum(rq->clock, &rq->avg_irq,
1,
1,
1);
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 7e56b489ff32..7489d5f56960 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -1,8 +1,9 @@
#ifdef CONFIG_SMP
+#include "sched-pelt.h"
-int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
-int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
-int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
+int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
@@ -42,6 +43,101 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
WRITE_ONCE(avg->util_est.enqueued, enqueued);
}
+/*
+ * The clock_pelt scales the time to reflect the effective amount of
+ * computation done during the running delta time but then sync back to
+ * clock_task when rq is idle.
+ *
+ *
+ * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
+ * @ max capacity ------******---------------******---------------
+ * @ half capacity ------************---------************---------
+ * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16
+ *
+ */
+static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
+{
+ if (unlikely(is_idle_task(rq->curr))) {
+ /* The rq is idle, we can sync to clock_task */
+ rq->clock_pelt = rq_clock_task(rq);
+ return;
+ }
+
+ /*
+ * When a rq runs at a lower compute capacity, it will need
+ * more time to do the same amount of work than at max
+ * capacity. In order to be invariant, we scale the delta to
+ * reflect how much work has been really done.
+ * Running longer results in stealing idle time that will
+ * disturb the load signal compared to max capacity. This
+ * stolen idle time will be automatically reflected when the
+ * rq will be idle and the clock will be synced with
+ * rq_clock_task.
+ */
+
+ /*
+ * Scale the elapsed time to reflect the real amount of
+ * computation
+ */
+ delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
+ delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
+
+ rq->clock_pelt += delta;
+}
+
+/*
+ * When rq becomes idle, we have to check if it has lost idle time
+ * because it was fully busy. A rq is fully used when the /Sum util_sum
+ * is greater or equal to:
+ * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
+ * For optimization and computing rounding purpose, we don't take into account
+ * the position in the current window (period_contrib) and we use the higher
+ * bound of util_sum to decide.
+ */
+static inline void update_idle_rq_clock_pelt(struct rq *rq)
+{
+ u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
+ u32 util_sum = rq->cfs.avg.util_sum;
+ util_sum += rq->avg_rt.util_sum;
+ util_sum += rq->avg_dl.util_sum;
+
+ /*
+ * Reflecting stolen time makes sense only if the idle
+ * phase would be present at max capacity. As soon as the
+ * utilization of a rq has reached the maximum value, it is
+ * considered as an always runnig rq without idle time to
+ * steal. This potential idle time is considered as lost in
+ * this case. We keep track of this lost idle time compare to
+ * rq's clock_task.
+ */
+ if (util_sum >= divider)
+ rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
+}
+
+static inline u64 rq_clock_pelt(struct rq *rq)
+{
+ lockdep_assert_held(&rq->lock);
+ assert_clock_updated(rq);
+
+ return rq->clock_pelt - rq->lost_idle_time;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
+static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+{
+ if (unlikely(cfs_rq->throttle_count))
+ return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
+
+ return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
+}
+#else
+static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+{
+ return rq_clock_pelt(rq_of(cfs_rq));
+}
+#endif
+
#else
static inline int
@@ -67,6 +163,18 @@ update_irq_load_avg(struct rq *rq, u64 running)
{
return 0;
}
+
+static inline u64 rq_clock_pelt(struct rq *rq)
+{
+ return rq_clock_task(rq);
+}
+
+static inline void
+update_rq_clock_pelt(struct rq *rq, s64 delta) { }
+
+static inline void
+update_idle_rq_clock_pelt(struct rq *rq) { }
+
#endif
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index fe24de3fbc93..0e97ca9306ef 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -124,6 +124,7 @@
* sampling of the aggregate task states would be.
*/
+#include "../workqueue_internal.h"
#include <linux/sched/loadavg.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
@@ -321,7 +322,7 @@ static bool update_stats(struct psi_group *group)
expires = group->next_update;
if (now < expires)
goto out;
- if (now - expires > psi_period)
+ if (now - expires >= psi_period)
missed_periods = div_u64(now - expires, psi_period);
/*
@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
groupc->tasks[t]++;
write_seqcount_end(&groupc->seq);
-
- if (!delayed_work_pending(&group->clock_work))
- schedule_delayed_work(&group->clock_work, PSI_FREQ);
}
static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set)
{
int cpu = task_cpu(task);
struct psi_group *group;
+ bool wake_clock = true;
void *iter = NULL;
if (!task->pid)
@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set)
task->psi_flags &= ~clear;
task->psi_flags |= set;
- while ((group = iterate_groups(task, &iter)))
+ /*
+ * Periodic aggregation shuts off if there is a period of no
+ * task changes, so we wake it back up if necessary. However,
+ * don't do this if the task change is the aggregation worker
+ * itself going to sleep, or we'll ping-pong forever.
+ */
+ if (unlikely((clear & TSK_RUNNING) &&
+ (task->flags & PF_WQ_WORKER) &&
+ wq_worker_last_func(task) == psi_update_work))
+ wake_clock = false;
+
+ while ((group = iterate_groups(task, &iter))) {
psi_group_change(group, cpu, clear, set);
+ if (wake_clock && !delayed_work_pending(&group->clock_work))
+ schedule_delayed_work(&group->clock_work, PSI_FREQ);
+ }
}
void psi_memstall_tick(struct task_struct *task, int cpu)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e4f398ad9e73..90fa23d36565 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1587,7 +1587,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* rt task
*/
if (rq->curr->sched_class != &rt_sched_class)
- update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
return p;
}
@@ -1596,7 +1596,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
- update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
/*
* The previous task needs to be made eligible for pushing
@@ -2325,7 +2325,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
- update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+ update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
watchdog(rq, p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d04530bf251f..efa686eeff26 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -861,7 +861,10 @@ struct rq {
unsigned int clock_update_flags;
u64 clock;
- u64 clock_task;
+ /* Ensure that all clocks are in the same cache line */
+ u64 clock_task ____cacheline_aligned;
+ u64 clock_pelt;
+ unsigned long lost_idle_time;
atomic_t nr_iowait;
@@ -951,6 +954,22 @@ struct rq {
#endif
};
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+/* CPU runqueue to which this cfs_rq is attached */
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+{
+ return cfs_rq->rq;
+}
+
+#else
+
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+{
+ return container_of(cfs_rq, struct rq, cfs);
+}
+#endif
+
static inline int cpu_of(struct rq *rq)
{
#ifdef CONFIG_SMP
@@ -1260,7 +1279,7 @@ extern void sched_ttwu_pending(void);
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
- * See detach_destroy_domains: synchronize_sched for details.
+ * See destroy_sched_domains: call_rcu for details.
*
* The domain tree of any CPU may only be accessed from within
* preempt-disabled sections.
@@ -1460,9 +1479,9 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
*/
smp_wmb();
#ifdef CONFIG_THREAD_INFO_IN_TASK
- p->cpu = cpu;
+ WRITE_ONCE(p->cpu, cpu);
#else
- task_thread_info(p)->cpu = cpu;
+ WRITE_ONCE(task_thread_info(p)->cpu, cpu);
#endif
p->wake_cpu = cpu;
#endif
@@ -1563,7 +1582,7 @@ static inline int task_on_rq_queued(struct task_struct *p)
static inline int task_on_rq_migrating(struct task_struct *p)
{
- return p->on_rq == TASK_ON_RQ_MIGRATING;
+ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
}
/*
@@ -1781,7 +1800,7 @@ extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
unsigned long to_ratio(u64 period, u64 runtime);
extern void init_entity_runnable_average(struct sched_entity *se);
-extern void post_init_entity_util_avg(struct sched_entity *se);
+extern void post_init_entity_util_avg(struct task_struct *p);
#ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(struct rq *rq);
@@ -2211,6 +2230,13 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
# define arch_scale_freq_invariant() false
#endif
+#ifdef CONFIG_SMP
+static inline unsigned long capacity_orig_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_capacity_orig;
+}
+#endif
+
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
/**
* enum schedutil_type - CPU utilization type
@@ -2299,11 +2325,19 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
#endif
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+
#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
-#else
+
+DECLARE_STATIC_KEY_FALSE(sched_energy_present);
+
+static inline bool sched_energy_enabled(void)
+{
+ return static_branch_unlikely(&sched_energy_present);
+}
+
+#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
+
#define perf_domain_span(pd) NULL
-#endif
+static inline bool sched_energy_enabled(void) { return false; }
-#ifdef CONFIG_SMP
-extern struct static_key_false sched_energy_present;
-#endif
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 3f35ba1d8fde..ab7f371a3a17 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -201,11 +201,37 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
return 1;
}
-DEFINE_STATIC_KEY_FALSE(sched_energy_present);
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+DEFINE_STATIC_KEY_FALSE(sched_energy_present);
+unsigned int sysctl_sched_energy_aware = 1;
DEFINE_MUTEX(sched_energy_mutex);
bool sched_energy_update;
+#ifdef CONFIG_PROC_SYSCTL
+int sched_energy_aware_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, state;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (!ret && write) {
+ state = static_branch_unlikely(&sched_energy_present);
+ if (state != sysctl_sched_energy_aware) {
+ mutex_lock(&sched_energy_mutex);
+ sched_energy_update = 1;
+ rebuild_sched_domains();
+ sched_energy_update = 0;
+ mutex_unlock(&sched_energy_mutex);
+ }
+ }
+
+ return ret;
+}
+#endif
+
static void free_pd(struct perf_domain *pd)
{
struct perf_domain *tmp;
@@ -322,6 +348,9 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
struct cpufreq_policy *policy;
struct cpufreq_governor *gov;
+ if (!sysctl_sched_energy_aware)
+ goto free;
+
/* EAS is enabled for asymmetric CPU capacity topologies. */
if (!per_cpu(sd_asym_cpucapacity, cpu)) {
if (sched_debug()) {
@@ -442,7 +471,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
- call_rcu_sched(&old_rd->rcu, free_rootdomain);
+ call_rcu(&old_rd->rcu, free_rootdomain);
}
void sched_get_rd(struct root_domain *rd)
@@ -455,7 +484,7 @@ void sched_put_rd(struct root_domain *rd)
if (!atomic_dec_and_test(&rd->refcount))
return;
- call_rcu_sched(&rd->rcu, free_rootdomain);
+ call_rcu(&rd->rcu, free_rootdomain);
}
static int init_rootdomain(struct root_domain *rd)
@@ -676,7 +705,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
}
struct s_data {
- struct sched_domain ** __percpu sd;
+ struct sched_domain * __percpu *sd;
struct root_domain *rd;
};
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d7f538847b84..54a0347ca812 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -267,6 +267,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
*/
+ preempt_disable();
for (; f; f = f->prev) {
u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
@@ -275,6 +276,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
*match = f;
}
}
+ preempt_enable();
return ret;
}
#endif /* CONFIG_SECCOMP_FILTER */
@@ -443,8 +445,8 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
* behavior of privileged children.
*/
if (!task_no_new_privs(current) &&
- security_capable_noaudit(current_cred(), current_user_ns(),
- CAP_SYS_ADMIN) != 0)
+ security_capable(current_cred(), current_user_ns(),
+ CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0)
return ERR_PTR(-EACCES);
/* Allocate a new seccomp_filter */
@@ -976,6 +978,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file)
struct seccomp_filter *filter = file->private_data;
struct seccomp_knotif *knotif;
+ if (!filter)
+ return 0;
+
mutex_lock(&filter->notify_lock);
/*
@@ -1300,6 +1305,7 @@ out:
out_put_fd:
if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
if (ret < 0) {
+ listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);
} else {
diff --git a/kernel/signal.c b/kernel/signal.c
index e1d7ad8e6ab1..5d53183e2705 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
}
EXPORT_SYMBOL_GPL(dequeue_signal);
+static int dequeue_synchronous_signal(kernel_siginfo_t *info)
+{
+ struct task_struct *tsk = current;
+ struct sigpending *pending = &tsk->pending;
+ struct sigqueue *q, *sync = NULL;
+
+ /*
+ * Might a synchronous signal be in the queue?
+ */
+ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
+ return 0;
+
+ /*
+ * Return the first synchronous signal in the queue.
+ */
+ list_for_each_entry(q, &pending->list, list) {
+ /* Synchronous signals have a postive si_code */
+ if ((q->info.si_code > SI_USER) &&
+ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
+ sync = q;
+ goto next;
+ }
+ }
+ return 0;
+next:
+ /*
+ * Check if there is another siginfo for the same signal.
+ */
+ list_for_each_entry_continue(q, &pending->list, list) {
+ if (q->info.si_signo == sync->info.si_signo)
+ goto still_pending;
+ }
+
+ sigdelset(&pending->signal, sync->info.si_signo);
+ recalc_sigpending();
+still_pending:
+ list_del_init(&sync->list);
+ copy_siginfo(info, &sync->info);
+ __sigqueue_free(sync);
+ return info->si_signo;
+}
+
/*
* Tell a process that it has a new active signal..
*
@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
result = TRACE_SIGNAL_DELIVERED;
/*
- * Skip useless siginfo allocation for SIGKILL SIGSTOP,
- * and kernel threads.
+ * Skip useless siginfo allocation for SIGKILL and kernel threads.
*/
- if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
+ if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
goto out_set;
/*
@@ -2394,6 +2435,14 @@ relock:
goto relock;
}
+ /* Has this task already been marked for death? */
+ if (signal_group_exit(signal)) {
+ ksig->info.si_signo = signr = SIGKILL;
+ sigdelset(&current->pending.signal, SIGKILL);
+ recalc_sigpending();
+ goto fatal;
+ }
+
for (;;) {
struct k_sigaction *ka;
@@ -2407,7 +2456,15 @@ relock:
goto relock;
}
- signr = dequeue_signal(current, &current->blocked, &ksig->info);
+ /*
+ * Signals generated by the execution of an instruction
+ * need to be delivered before any other pending signals
+ * so that the instruction pointer in the signal stack
+ * frame points to the faulting instruction.
+ */
+ signr = dequeue_synchronous_signal(&ksig->info);
+ if (!signr)
+ signr = dequeue_signal(current, &current->blocked, &ksig->info);
if (!signr)
break; /* will return 0 */
@@ -2489,6 +2546,7 @@ relock:
continue;
}
+ fatal:
spin_unlock_irq(&sighand->siglock);
/*
@@ -3397,7 +3455,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
}
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
+COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
struct compat_siginfo __user *, uinfo,
struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
{
diff --git a/kernel/smp.c b/kernel/smp.c
index 163c451af42e..f4cf1b0bb3b8 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -584,8 +584,6 @@ void __init smp_init(void)
num_nodes, (num_nodes > 1 ? "s" : ""),
num_cpus, (num_cpus > 1 ? "s" : ""));
- /* Final decision about SMT support */
- cpu_smt_check_topology();
/* Any cleanup work */
smp_cpus_done(setup_max_cpus);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d28813306b2c..10277429ed84 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -89,7 +89,8 @@ static bool ksoftirqd_running(unsigned long pending)
if (pending & SOFTIRQ_NOW_MASK)
return false;
- return tsk && (tsk->state == TASK_RUNNING);
+ return tsk && (tsk->state == TASK_RUNNING) &&
+ !__kthread_should_park(tsk);
}
/*
diff --git a/kernel/sys.c b/kernel/sys.c
index a48cbf1414b8..c5f875048aef 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -516,7 +516,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
new->uid = kruid;
if (!uid_eq(old->uid, kruid) &&
!uid_eq(old->euid, kruid) &&
- !ns_capable(old->user_ns, CAP_SETUID))
+ !ns_capable_setid(old->user_ns, CAP_SETUID))
goto error;
}
@@ -525,7 +525,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
if (!uid_eq(old->uid, keuid) &&
!uid_eq(old->euid, keuid) &&
!uid_eq(old->suid, keuid) &&
- !ns_capable(old->user_ns, CAP_SETUID))
+ !ns_capable_setid(old->user_ns, CAP_SETUID))
goto error;
}
@@ -584,7 +584,7 @@ long __sys_setuid(uid_t uid)
old = current_cred();
retval = -EPERM;
- if (ns_capable(old->user_ns, CAP_SETUID)) {
+ if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
new->suid = new->uid = kuid;
if (!uid_eq(kuid, old->uid)) {
retval = set_user(new);
@@ -646,7 +646,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
old = current_cred();
retval = -EPERM;
- if (!ns_capable(old->user_ns, CAP_SETUID)) {
+ if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
goto error;
@@ -814,7 +814,7 @@ long __sys_setfsuid(uid_t uid)
if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
- ns_capable(old->user_ns, CAP_SETUID)) {
+ ns_capable_setid(old->user_ns, CAP_SETUID)) {
if (!uid_eq(kuid, old->fsuid)) {
new->fsuid = kuid;
if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
@@ -1207,7 +1207,8 @@ DECLARE_RWSEM(uts_sem);
/*
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
- * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
+ * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
+ * 2.6.60.
*/
static int override_release(char __user *release, size_t len)
{
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index ab9d0e3c6d50..62a6c8707799 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -42,9 +42,11 @@ COND_SYSCALL(io_destroy);
COND_SYSCALL(io_submit);
COND_SYSCALL_COMPAT(io_submit);
COND_SYSCALL(io_cancel);
+COND_SYSCALL(io_getevents_time32);
COND_SYSCALL(io_getevents);
+COND_SYSCALL(io_pgetevents_time32);
COND_SYSCALL(io_pgetevents);
-COND_SYSCALL_COMPAT(io_getevents);
+COND_SYSCALL_COMPAT(io_pgetevents_time32);
COND_SYSCALL_COMPAT(io_pgetevents);
/* fs/xattr.c */
@@ -114,9 +116,9 @@ COND_SYSCALL_COMPAT(signalfd4);
/* fs/timerfd.c */
COND_SYSCALL(timerfd_create);
COND_SYSCALL(timerfd_settime);
-COND_SYSCALL_COMPAT(timerfd_settime);
+COND_SYSCALL(timerfd_settime32);
COND_SYSCALL(timerfd_gettime);
-COND_SYSCALL_COMPAT(timerfd_gettime);
+COND_SYSCALL(timerfd_gettime32);
/* fs/utimes.c */
@@ -135,7 +137,7 @@ COND_SYSCALL(capset);
/* kernel/futex.c */
COND_SYSCALL(futex);
-COND_SYSCALL_COMPAT(futex);
+COND_SYSCALL(futex_time32);
COND_SYSCALL(set_robust_list);
COND_SYSCALL_COMPAT(set_robust_list);
COND_SYSCALL(get_robust_list);
@@ -187,9 +189,9 @@ COND_SYSCALL(mq_open);
COND_SYSCALL_COMPAT(mq_open);
COND_SYSCALL(mq_unlink);
COND_SYSCALL(mq_timedsend);
-COND_SYSCALL_COMPAT(mq_timedsend);
+COND_SYSCALL(mq_timedsend_time32);
COND_SYSCALL(mq_timedreceive);
-COND_SYSCALL_COMPAT(mq_timedreceive);
+COND_SYSCALL(mq_timedreceive_time32);
COND_SYSCALL(mq_notify);
COND_SYSCALL_COMPAT(mq_notify);
COND_SYSCALL(mq_getsetattr);
@@ -197,8 +199,10 @@ COND_SYSCALL_COMPAT(mq_getsetattr);
/* ipc/msg.c */
COND_SYSCALL(msgget);
+COND_SYSCALL(old_msgctl);
COND_SYSCALL(msgctl);
COND_SYSCALL_COMPAT(msgctl);
+COND_SYSCALL_COMPAT(old_msgctl);
COND_SYSCALL(msgrcv);
COND_SYSCALL_COMPAT(msgrcv);
COND_SYSCALL(msgsnd);
@@ -206,16 +210,20 @@ COND_SYSCALL_COMPAT(msgsnd);
/* ipc/sem.c */
COND_SYSCALL(semget);
+COND_SYSCALL(old_semctl);
COND_SYSCALL(semctl);
COND_SYSCALL_COMPAT(semctl);
+COND_SYSCALL_COMPAT(old_semctl);
COND_SYSCALL(semtimedop);
-COND_SYSCALL_COMPAT(semtimedop);
+COND_SYSCALL(semtimedop_time32);
COND_SYSCALL(semop);
/* ipc/shm.c */
COND_SYSCALL(shmget);
+COND_SYSCALL(old_shmctl);
COND_SYSCALL(shmctl);
COND_SYSCALL_COMPAT(shmctl);
+COND_SYSCALL_COMPAT(old_shmctl);
COND_SYSCALL(shmat);
COND_SYSCALL_COMPAT(shmat);
COND_SYSCALL(shmdt);
@@ -285,7 +293,7 @@ COND_SYSCALL(perf_event_open);
COND_SYSCALL(accept4);
COND_SYSCALL(recvmmsg);
COND_SYSCALL(recvmmsg_time32);
-COND_SYSCALL_COMPAT(recvmmsg);
+COND_SYSCALL_COMPAT(recvmmsg_time32);
COND_SYSCALL_COMPAT(recvmmsg_time64);
/*
@@ -366,6 +374,7 @@ COND_SYSCALL(kexec_file_load);
/* s390 */
COND_SYSCALL(s390_pci_mmio_read);
COND_SYSCALL(s390_pci_mmio_write);
+COND_SYSCALL(s390_ipc);
COND_SYSCALL_COMPAT(s390_ipc);
/* powerpc */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ba4d9e85feb8..14f30b4a1b64 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -224,6 +224,11 @@ static int proc_dostring_coredump(struct ctl_table *table, int write,
#endif
static int proc_dopipe_max_size(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
+#ifdef CONFIG_BPF_SYSCALL
+static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
#ifdef CONFIG_MAGIC_SYSRQ
/* Note: sysrq code uses its own private copy */
@@ -467,6 +472,17 @@ static struct ctl_table kern_table[] = {
.extra1 = &one,
},
#endif
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+ {
+ .procname = "sched_energy_aware",
+ .data = &sysctl_sched_energy_aware,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_energy_aware_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
@@ -1229,6 +1245,15 @@ static struct ctl_table kern_table[] = {
.extra1 = &one,
.extra2 = &one,
},
+ {
+ .procname = "bpf_stats_enabled",
+ .data = &sysctl_bpf_stats_enabled,
+ .maxlen = sizeof(sysctl_bpf_stats_enabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_bpf_stats,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
#endif
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
{
@@ -1446,7 +1471,7 @@ static struct ctl_table vm_table[] = {
.data = &sysctl_extfrag_threshold,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = sysctl_extfrag_handler,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &min_extfrag_threshold,
.extra2 = &max_extfrag_threshold,
},
@@ -3260,6 +3285,29 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
#endif /* CONFIG_PROC_SYSCTL */
+#ifdef CONFIG_BPF_SYSCALL
+static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret, bpf_stats = *(int *)table->data;
+ struct ctl_table tmp = *table;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ tmp.data = &bpf_stats;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret) {
+ *(int *)table->data = bpf_stats;
+ if (bpf_stats)
+ static_branch_enable(&bpf_stats_enabled_key);
+ else
+ static_branch_disable(&bpf_stats_enabled_key);
+ }
+ return ret;
+}
+#endif
/*
* No sense putting this after each symbol definition, twice,
* exception granted :-)
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 58b981f4bb5d..e2c038d6c13c 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -117,6 +117,35 @@ config NO_HZ_FULL
endchoice
+config CONTEXT_TRACKING
+ bool
+
+config CONTEXT_TRACKING_FORCE
+ bool "Force context tracking"
+ depends on CONTEXT_TRACKING
+ default y if !NO_HZ_FULL
+ help
+ The major pre-requirement for full dynticks to work is to
+ support the context tracking subsystem. But there are also
+ other dependencies to provide in order to make the full
+ dynticks working.
+
+ This option stands for testing when an arch implements the
+ context tracking backend but doesn't yet fullfill all the
+ requirements to make the full dynticks feature working.
+ Without the full dynticks, there is no way to test the support
+ for context tracking and the subsystems that rely on it: RCU
+ userspace extended quiescent state and tickless cputime
+ accounting. This option copes with the absence of the full
+ dynticks subsystem by forcing the context tracking on all
+ CPUs in the system.
+
+ Say Y only if you're working on the development of an
+ architecture backend for the context tracking.
+
+ Say N otherwise, this option brings an overhead that you
+ don't want in production.
+
config NO_HZ
bool "Old Idle dynticks config"
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index f5cfa1b73d6f..41dfff23c1f9 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -364,7 +364,7 @@ static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
switch (state) {
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
-
+ /* fall through */
default:
return false;
}
@@ -1771,7 +1771,7 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE2(nanosleep, struct old_timespec32 __user *, rqtp,
+SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
struct old_timespec32 __user *, rmtp)
{
struct timespec64 tu;
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 36a2bef00125..92a90014a925 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -188,13 +188,13 @@ static inline int is_error_status(int status)
&& (status & (STA_PPSWANDER|STA_PPSERROR)));
}
-static inline void pps_fill_timex(struct timex *txc)
+static inline void pps_fill_timex(struct __kernel_timex *txc)
{
txc->ppsfreq = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
PPM_SCALE_INV, NTP_SCALE_SHIFT);
txc->jitter = pps_jitter;
if (!(time_status & STA_NANO))
- txc->jitter /= NSEC_PER_USEC;
+ txc->jitter = pps_jitter / NSEC_PER_USEC;
txc->shift = pps_shift;
txc->stabil = pps_stabil;
txc->jitcnt = pps_jitcnt;
@@ -220,7 +220,7 @@ static inline int is_error_status(int status)
return status & (STA_UNSYNC|STA_CLOCKERR);
}
-static inline void pps_fill_timex(struct timex *txc)
+static inline void pps_fill_timex(struct __kernel_timex *txc)
{
/* PPS is not implemented, so these are zero */
txc->ppsfreq = 0;
@@ -633,7 +633,7 @@ void ntp_notify_cmos_timer(void)
/*
* Propagate a new txc->status value into the NTP state:
*/
-static inline void process_adj_status(const struct timex *txc)
+static inline void process_adj_status(const struct __kernel_timex *txc)
{
if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
time_state = TIME_OK;
@@ -656,7 +656,8 @@ static inline void process_adj_status(const struct timex *txc)
}
-static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai)
+static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
+ s32 *time_tai)
{
if (txc->modes & ADJ_STATUS)
process_adj_status(txc);
@@ -707,7 +708,8 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai
* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
*/
-int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
+int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts,
+ s32 *time_tai)
{
int result;
@@ -729,7 +731,7 @@ int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
NTP_SCALE_SHIFT);
if (!(time_status & STA_NANO))
- txc->offset /= NSEC_PER_USEC;
+ txc->offset = (u32)txc->offset / NSEC_PER_USEC;
}
result = time_state; /* mostly `TIME_OK' */
@@ -754,7 +756,7 @@ int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
txc->time.tv_sec = (time_t)ts->tv_sec;
txc->time.tv_usec = ts->tv_nsec;
if (!(time_status & STA_NANO))
- txc->time.tv_usec /= NSEC_PER_USEC;
+ txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC;
/* Handle leapsec adjustments */
if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) {
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index c24b0e13f011..40e6122e634e 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -8,6 +8,6 @@ extern void ntp_clear(void);
extern u64 ntp_tick_length(void);
extern ktime_t ntp_get_next_leap(void);
extern int second_overflow(time64_t secs);
-extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai);
+extern int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts, s32 *time_tai);
extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 425bbfce6819..ec960bb939fd 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -228,7 +228,7 @@ static void put_clock_desc(struct posix_clock_desc *cd)
fput(cd->fp);
}
-static int pc_clock_adjtime(clockid_t id, struct timex *tx)
+static int pc_clock_adjtime(clockid_t id, struct __kernel_timex *tx)
{
struct posix_clock_desc cd;
int err;
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 8f0644af40be..0a426f4e3125 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -67,13 +67,13 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now)
int i;
u64 delta, incr;
- if (timer->it.cpu.incr == 0)
+ if (!timer->it_interval)
return;
if (now < timer->it.cpu.expires)
return;
- incr = timer->it.cpu.incr;
+ incr = timer->it_interval;
delta = now + incr - timer->it.cpu.expires;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
@@ -520,7 +520,7 @@ static void cpu_timer_fire(struct k_itimer *timer)
*/
wake_up_process(timer->it_process);
timer->it.cpu.expires = 0;
- } else if (timer->it.cpu.incr == 0) {
+ } else if (!timer->it_interval) {
/*
* One-shot timer. Clear it as soon as it's fired.
*/
@@ -606,7 +606,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
*/
ret = 0;
- old_incr = timer->it.cpu.incr;
+ old_incr = timer->it_interval;
old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) {
timer->it.cpu.firing = -1;
@@ -684,7 +684,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
*/
- timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
+ timer->it_interval = timespec64_to_ktime(new->it_interval);
/*
* This acts as a modification timestamp for the timer,
@@ -723,7 +723,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
/*
* Easy part: convert the reload time.
*/
- itp->it_interval = ns_to_timespec64(timer->it.cpu.incr);
+ itp->it_interval = ktime_to_timespec64(timer->it_interval);
if (!timer->it.cpu.expires)
return;
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index a51895486e5e..67df65f887ac 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -45,6 +45,7 @@ SYS_NI(timer_delete);
SYS_NI(clock_adjtime);
SYS_NI(getitimer);
SYS_NI(setitimer);
+SYS_NI(clock_adjtime32);
#ifdef __ARCH_WANT_SYS_ALARM
SYS_NI(alarm);
#endif
@@ -150,16 +151,16 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
#ifdef CONFIG_COMPAT
COMPAT_SYS_NI(timer_create);
-COMPAT_SYS_NI(clock_adjtime);
-COMPAT_SYS_NI(timer_settime);
-COMPAT_SYS_NI(timer_gettime);
COMPAT_SYS_NI(getitimer);
COMPAT_SYS_NI(setitimer);
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
- struct old_timespec32 __user *, tp)
+SYS_NI(timer_settime32);
+SYS_NI(timer_gettime32);
+
+SYSCALL_DEFINE2(clock_settime32, const clockid_t, which_clock,
+ struct old_timespec32 __user *, tp)
{
struct timespec64 new_tp;
@@ -171,8 +172,8 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
return do_sys_settimeofday64(&new_tp, NULL);
}
-COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
- struct old_timespec32 __user *, tp)
+SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
+ struct old_timespec32 __user *, tp)
{
int ret;
struct timespec64 kernel_tp;
@@ -186,8 +187,8 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
return 0;
}
-COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
- struct old_timespec32 __user *, tp)
+SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
+ struct old_timespec32 __user *, tp)
{
struct timespec64 rtn_tp = {
.tv_sec = 0,
@@ -206,9 +207,9 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
}
}
-COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
- struct old_timespec32 __user *, rqtp,
- struct old_timespec32 __user *, rmtp)
+SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
+ struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
struct timespec64 t;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 0e84bb72a3da..29176635991f 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -179,7 +179,7 @@ static int posix_clock_realtime_set(const clockid_t which_clock,
}
static int posix_clock_realtime_adj(const clockid_t which_clock,
- struct timex *t)
+ struct __kernel_timex *t)
{
return do_adjtimex(t);
}
@@ -730,8 +730,8 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
- struct old_itimerspec32 __user *, setting)
+SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id,
+ struct old_itimerspec32 __user *, setting)
{
struct itimerspec64 cur_setting;
@@ -903,9 +903,9 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
}
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
- struct old_itimerspec32 __user *, new,
- struct old_itimerspec32 __user *, old)
+SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags,
+ struct old_itimerspec32 __user *, new,
+ struct old_itimerspec32 __user *, old)
{
struct itimerspec64 new_spec, old_spec;
struct itimerspec64 *rtn = old ? &old_spec : NULL;
@@ -1047,22 +1047,28 @@ SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
return error;
}
-SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
- struct timex __user *, utx)
+int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
- struct timex ktx;
- int err;
if (!kc)
return -EINVAL;
if (!kc->clock_adj)
return -EOPNOTSUPP;
+ return kc->clock_adj(which_clock, ktx);
+}
+
+SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
+ struct __kernel_timex __user *, utx)
+{
+ struct __kernel_timex ktx;
+ int err;
+
if (copy_from_user(&ktx, utx, sizeof(ktx)))
return -EFAULT;
- err = kc->clock_adj(which_clock, &ktx);
+ err = do_clock_adjtime(which_clock, &ktx);
if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
return -EFAULT;
@@ -1090,8 +1096,8 @@ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
- struct old_timespec32 __user *, tp)
+SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock,
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1105,8 +1111,8 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
return kc->clock_set(which_clock, &ts);
}
-COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
- struct old_timespec32 __user *, tp)
+SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1123,40 +1129,26 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
return err;
}
-#endif
-
-#ifdef CONFIG_COMPAT
-
-COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
- struct compat_timex __user *, utp)
+SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
+ struct old_timex32 __user *, utp)
{
- const struct k_clock *kc = clockid_to_kclock(which_clock);
- struct timex ktx;
+ struct __kernel_timex ktx;
int err;
- if (!kc)
- return -EINVAL;
- if (!kc->clock_adj)
- return -EOPNOTSUPP;
-
- err = compat_get_timex(&ktx, utp);
+ err = get_old_timex32(&ktx, utp);
if (err)
return err;
- err = kc->clock_adj(which_clock, &ktx);
+ err = do_clock_adjtime(which_clock, &ktx);
if (err >= 0)
- err = compat_put_timex(utp, &ktx);
+ err = put_old_timex32(utp, &ktx);
return err;
}
-#endif
-
-#ifdef CONFIG_COMPAT_32BIT_TIME
-
-COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
- struct old_timespec32 __user *, tp)
+SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1212,9 +1204,9 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
- struct old_timespec32 __user *, rqtp,
- struct old_timespec32 __user *, rmtp)
+SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
+ struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 t;
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
index ddb21145211a..de5daa6d975a 100644
--- a/kernel/time/posix-timers.h
+++ b/kernel/time/posix-timers.h
@@ -8,7 +8,7 @@ struct k_clock {
const struct timespec64 *tp);
int (*clock_get)(const clockid_t which_clock,
struct timespec64 *tp);
- int (*clock_adj)(const clockid_t which_clock, struct timex *tx);
+ int (*clock_adj)(const clockid_t which_clock, struct __kernel_timex *tx);
int (*timer_create)(struct k_itimer *timer);
int (*nsleep)(const clockid_t which_clock, int flags,
const struct timespec64 *);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 803fa67aace9..ee834d4fb814 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -375,6 +375,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
switch (mode) {
case TICK_BROADCAST_FORCE:
tick_broadcast_forced = 1;
+ /* fall through */
case TICK_BROADCAST_ON:
cpumask_set_cpu(cpu, tick_broadcast_on);
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 2edb5088a70b..c3f756f8534b 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -98,11 +98,11 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
#endif /* __ARCH_WANT_SYS_TIME */
-#ifdef CONFIG_COMPAT
-#ifdef __ARCH_WANT_COMPAT_SYS_TIME
+#ifdef CONFIG_COMPAT_32BIT_TIME
+#ifdef __ARCH_WANT_SYS_TIME32
/* old_time32_t is a 32 bit "long" and needs to get converted. */
-COMPAT_SYSCALL_DEFINE1(time, old_time32_t __user *, tloc)
+SYSCALL_DEFINE1(time32, old_time32_t __user *, tloc)
{
old_time32_t i;
@@ -116,7 +116,7 @@ COMPAT_SYSCALL_DEFINE1(time, old_time32_t __user *, tloc)
return i;
}
-COMPAT_SYSCALL_DEFINE1(stime, old_time32_t __user *, tptr)
+SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr)
{
struct timespec64 tv;
int err;
@@ -134,7 +134,7 @@ COMPAT_SYSCALL_DEFINE1(stime, old_time32_t __user *, tptr)
return 0;
}
-#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
+#endif /* __ARCH_WANT_SYS_TIME32 */
#endif
SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
@@ -263,35 +263,99 @@ COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
}
#endif
-SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
+#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
+SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p)
{
- struct timex txc; /* Local copy of parameter */
+ struct __kernel_timex txc; /* Local copy of parameter */
int ret;
/* Copy the user data space into the kernel copy
* structure. But bear in mind that the structures
* may change
*/
- if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
+ if (copy_from_user(&txc, txc_p, sizeof(struct __kernel_timex)))
return -EFAULT;
ret = do_adjtimex(&txc);
- return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
+ return copy_to_user(txc_p, &txc, sizeof(struct __kernel_timex)) ? -EFAULT : ret;
}
+#endif
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
+int get_old_timex32(struct __kernel_timex *txc, const struct old_timex32 __user *utp)
+{
+ struct old_timex32 tx32;
+
+ memset(txc, 0, sizeof(struct __kernel_timex));
+ if (copy_from_user(&tx32, utp, sizeof(struct old_timex32)))
+ return -EFAULT;
+
+ txc->modes = tx32.modes;
+ txc->offset = tx32.offset;
+ txc->freq = tx32.freq;
+ txc->maxerror = tx32.maxerror;
+ txc->esterror = tx32.esterror;
+ txc->status = tx32.status;
+ txc->constant = tx32.constant;
+ txc->precision = tx32.precision;
+ txc->tolerance = tx32.tolerance;
+ txc->time.tv_sec = tx32.time.tv_sec;
+ txc->time.tv_usec = tx32.time.tv_usec;
+ txc->tick = tx32.tick;
+ txc->ppsfreq = tx32.ppsfreq;
+ txc->jitter = tx32.jitter;
+ txc->shift = tx32.shift;
+ txc->stabil = tx32.stabil;
+ txc->jitcnt = tx32.jitcnt;
+ txc->calcnt = tx32.calcnt;
+ txc->errcnt = tx32.errcnt;
+ txc->stbcnt = tx32.stbcnt;
+
+ return 0;
+}
+
+int put_old_timex32(struct old_timex32 __user *utp, const struct __kernel_timex *txc)
+{
+ struct old_timex32 tx32;
+
+ memset(&tx32, 0, sizeof(struct old_timex32));
+ tx32.modes = txc->modes;
+ tx32.offset = txc->offset;
+ tx32.freq = txc->freq;
+ tx32.maxerror = txc->maxerror;
+ tx32.esterror = txc->esterror;
+ tx32.status = txc->status;
+ tx32.constant = txc->constant;
+ tx32.precision = txc->precision;
+ tx32.tolerance = txc->tolerance;
+ tx32.time.tv_sec = txc->time.tv_sec;
+ tx32.time.tv_usec = txc->time.tv_usec;
+ tx32.tick = txc->tick;
+ tx32.ppsfreq = txc->ppsfreq;
+ tx32.jitter = txc->jitter;
+ tx32.shift = txc->shift;
+ tx32.stabil = txc->stabil;
+ tx32.jitcnt = txc->jitcnt;
+ tx32.calcnt = txc->calcnt;
+ tx32.errcnt = txc->errcnt;
+ tx32.stbcnt = txc->stbcnt;
+ tx32.tai = txc->tai;
+ if (copy_to_user(utp, &tx32, sizeof(struct old_timex32)))
+ return -EFAULT;
+ return 0;
+}
-COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
+SYSCALL_DEFINE1(adjtimex_time32, struct old_timex32 __user *, utp)
{
- struct timex txc;
+ struct __kernel_timex txc;
int err, ret;
- err = compat_get_timex(&txc, utp);
+ err = get_old_timex32(&txc, utp);
if (err)
return err;
ret = do_adjtimex(&txc);
- err = compat_put_timex(utp, &txc);
+ err = put_old_timex32(utp, &txc);
if (err)
return err;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index ac5dbf2cd4a2..f986e1918d12 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -2234,7 +2234,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
/**
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
*/
-static int timekeeping_validate_timex(const struct timex *txc)
+static int timekeeping_validate_timex(const struct __kernel_timex *txc)
{
if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
@@ -2300,7 +2300,7 @@ static int timekeeping_validate_timex(const struct timex *txc)
/**
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
*/
-int do_adjtimex(struct timex *txc)
+int do_adjtimex(struct __kernel_timex *txc)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 86489950d690..b73e8850e58d 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -37,15 +37,8 @@ DEFINE_SHOW_ATTRIBUTE(tk_debug_sleep_time);
static int __init tk_debug_sleep_time_init(void)
{
- struct dentry *d;
-
- d = debugfs_create_file("sleep_time", 0444, NULL, NULL,
- &tk_debug_sleep_time_fops);
- if (!d) {
- pr_err("Failed to create sleep_time debug file\n");
- return -ENOMEM;
- }
-
+ debugfs_create_file("sleep_time", 0444, NULL, NULL,
+ &tk_debug_sleep_time_fops);
return 0;
}
late_initcall(tk_debug_sleep_time_init);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 444156debfa0..2fce056f8a49 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -647,7 +647,7 @@ static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
-
+ /* fall through */
default:
return false;
}
@@ -1632,7 +1632,7 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
run_local_timers();
- rcu_check_callbacks(user_tick);
+ rcu_sched_clock_irq(user_tick);
#ifdef CONFIG_IRQ_WORK
if (in_irq())
irq_work_tick();
diff --git a/kernel/torture.c b/kernel/torture.c
index bbf6d473e50c..8faa1a9aaeb9 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Common functions for in-kernel torture tests.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (C) IBM Corporation, 2014
*
- * Author: Paul E. McKenney <paulmck@us.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
* Based on kernel/rcu/torture.c.
*/
@@ -53,7 +40,7 @@
#include "rcu/rcu.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
static char *torture_type;
static int verbose;
@@ -75,6 +62,7 @@ static DEFINE_MUTEX(fullstop_mutex);
static struct task_struct *onoff_task;
static long onoff_holdoff;
static long onoff_interval;
+static torture_ofl_func *onoff_f;
static long n_offline_attempts;
static long n_offline_successes;
static unsigned long sum_offline;
@@ -118,6 +106,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
pr_alert("%s" TORTURE_FLAG
"torture_onoff task: offlined %d\n",
torture_type, cpu);
+ if (onoff_f)
+ onoff_f();
(*n_offl_successes)++;
delta = jiffies - starttime;
*sum_offl += delta;
@@ -243,11 +233,12 @@ stop:
/*
* Initiate online-offline handling.
*/
-int torture_onoff_init(long ooholdoff, long oointerval)
+int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f)
{
#ifdef CONFIG_HOTPLUG_CPU
onoff_holdoff = ooholdoff;
onoff_interval = oointerval;
+ onoff_f = f;
if (onoff_interval <= 0)
return 0;
return torture_create_kthread(torture_onoff, NULL, onoff_task);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 8b068adb9da1..d64c00afceb5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -431,8 +431,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
if (unlikely(event->oncpu != cpu))
return -EOPNOTSUPP;
- perf_event_output(event, sd, regs);
- return 0;
+ return perf_event_output(event, sd, regs);
}
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
@@ -1204,22 +1203,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
{
- int err;
-
- mutex_lock(&bpf_event_mutex);
- err = __bpf_probe_register(btp, prog);
- mutex_unlock(&bpf_event_mutex);
- return err;
+ return __bpf_probe_register(btp, prog);
}
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
{
- int err;
-
- mutex_lock(&bpf_event_mutex);
- err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
- mutex_unlock(&bpf_event_mutex);
- return err;
+ return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
}
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c521b7347482..c4238b441624 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
const char tgid_space[] = " ";
const char space[] = " ";
+ print_event_info(buf, m);
+
seq_printf(m, "# %s _-----=> irqs-off\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 27821480105e..217ef481fbbb 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1301,7 +1301,7 @@ static int parse_pred(const char *str, void *data,
/* go past the last quote */
i++;
- } else if (isdigit(str[i])) {
+ } else if (isdigit(str[i]) || str[i] == '-') {
/* Make sure the field is not a string */
if (is_string_field(field)) {
@@ -1314,6 +1314,9 @@ static int parse_pred(const char *str, void *data,
goto err_free;
}
+ if (str[i] == '-')
+ i++;
+
/* We allow 0xDEADBEEF */
while (isalnum(str[i]))
i++;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d3294721f119..d42a473b8240 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -14,6 +14,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include "trace.h"
@@ -365,7 +366,7 @@ out:
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
-static inline void
+static nokprobe_inline void
start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
{
int cpu;
@@ -401,7 +402,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
atomic_dec(&data->disabled);
}
-static inline void
+static nokprobe_inline void
stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
{
int cpu;
@@ -443,6 +444,7 @@ void start_critical_timings(void)
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
+NOKPROBE_SYMBOL(start_critical_timings);
void stop_critical_timings(void)
{
@@ -452,6 +454,7 @@ void stop_critical_timings(void)
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
}
EXPORT_SYMBOL_GPL(stop_critical_timings);
+NOKPROBE_SYMBOL(stop_critical_timings);
#ifdef CONFIG_FUNCTION_TRACER
static bool function_enabled;
@@ -611,6 +614,7 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
if (!preempt_trace(pc) && irq_trace())
stop_critical_timing(a0, a1, pc);
}
+NOKPROBE_SYMBOL(tracer_hardirqs_on);
void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
{
@@ -619,6 +623,7 @@ void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
if (!preempt_trace(pc) && irq_trace())
start_critical_timing(a0, a1, pc);
}
+NOKPROBE_SYMBOL(tracer_hardirqs_off);
static int irqsoff_tracer_init(struct trace_array *tr)
{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5c19b8c41c7e..99592c27465e 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -607,11 +607,17 @@ static int trace_kprobe_create(int argc, const char *argv[])
char buf[MAX_EVENT_NAME_LEN];
unsigned int flags = TPARG_FL_KERNEL;
- /* argc must be >= 1 */
- if (argv[0][0] == 'r') {
+ switch (argv[0][0]) {
+ case 'r':
is_return = true;
flags |= TPARG_FL_RETURN;
- } else if (argv[0][0] != 'p' || argc < 2)
+ break;
+ case 'p':
+ break;
+ default:
+ return -ECANCELED;
+ }
+ if (argc < 2)
return -ECANCELED;
event = strchr(&argv[0][1], ':');
@@ -855,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
static nokprobe_inline int
fetch_store_strlen(unsigned long addr)
{
- mm_segment_t old_fs;
int ret, len = 0;
u8 c;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- pagefault_disable();
-
do {
- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
+ ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
len++;
} while (c && ret == 0 && len < MAX_STRING_SIZE);
- pagefault_enable();
- set_fs(old_fs);
-
return (ret < 0) ? ret : len;
}
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 71f553cceb3c..4d8e99fdbbbe 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -9,6 +9,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include "trace.h"
#define CREATE_TRACE_POINTS
@@ -30,6 +31,7 @@ void trace_hardirqs_on(void)
lockdep_hardirqs_on(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
+NOKPROBE_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void)
{
@@ -43,6 +45,7 @@ void trace_hardirqs_off(void)
lockdep_hardirqs_off(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_off);
+NOKPROBE_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
@@ -56,6 +59,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
lockdep_hardirqs_on(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
+NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
@@ -69,6 +73,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
lockdep_hardirqs_off(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
+NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_TRACE_IRQFLAGS */
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
index 5c56afc17cf8..4737bb8c07a3 100644
--- a/kernel/trace/trace_probe_tmpl.h
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
if (unlikely(arg->dynamic))
*dl = make_data_loc(maxlen, dyndata - base);
ret = process_fetch_insn(arg->code, regs, dl, base);
- if (unlikely(ret < 0 && arg->dynamic))
+ if (unlikely(ret < 0 && arg->dynamic)) {
*dl = make_data_loc(0, dyndata - base);
- else
+ } else {
dyndata += ret;
+ maxlen -= ret;
+ }
}
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e335576b9411..9bde07c06362 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -5,7 +5,7 @@
* Copyright (C) IBM Corporation, 2010-2012
* Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
*/
-#define pr_fmt(fmt) "trace_kprobe: " fmt
+#define pr_fmt(fmt) "trace_uprobe: " fmt
#include <linux/ctype.h>
#include <linux/module.h>
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
if (ret >= 0) {
if (ret == maxlen)
dst[ret - 1] = '\0';
+ else
+ /*
+ * Include the terminating null byte. In this case it
+ * was copied by strncpy_from_user but not accounted
+ * for in ret.
+ */
+ ret++;
*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
}
diff --git a/kernel/umh.c b/kernel/umh.c
index 0baa672e023c..d937cbad903a 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -37,6 +37,8 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
static DECLARE_RWSEM(umhelper_sem);
+static LIST_HEAD(umh_list);
+static DEFINE_MUTEX(umh_list_lock);
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
@@ -100,10 +102,12 @@ static int call_usermodehelper_exec_async(void *data)
commit_creds(new);
sub_info->pid = task_pid_nr(current);
- if (sub_info->file)
+ if (sub_info->file) {
retval = do_execve_file(sub_info->file,
sub_info->argv, sub_info->envp);
- else
+ if (!retval)
+ current->flags |= PF_UMH;
+ } else
retval = do_execve(getname_kernel(sub_info->path),
(const char __user *const __user *)sub_info->argv,
(const char __user *const __user *)sub_info->envp);
@@ -517,6 +521,11 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
goto out;
err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+ if (!err) {
+ mutex_lock(&umh_list_lock);
+ list_add(&info->list, &umh_list);
+ mutex_unlock(&umh_list_lock);
+ }
out:
fput(file);
return err;
@@ -679,6 +688,26 @@ static int proc_cap_handler(struct ctl_table *table, int write,
return 0;
}
+void __exit_umh(struct task_struct *tsk)
+{
+ struct umh_info *info;
+ pid_t pid = tsk->pid;
+
+ mutex_lock(&umh_list_lock);
+ list_for_each_entry(info, &umh_list, list) {
+ if (info->pid == pid) {
+ list_del(&info->list);
+ mutex_unlock(&umh_list_lock);
+ goto out;
+ }
+ }
+ mutex_unlock(&umh_list_lock);
+ return;
+out:
+ if (info->cleanup)
+ info->cleanup(info);
+}
+
struct ctl_table usermodehelper_table[] = {
{
.procname = "bset",
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 392be4b252f6..69bd8083930c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -259,6 +259,8 @@ struct workqueue_struct {
struct wq_device *wq_dev; /* I: for sysfs interface */
#endif
#ifdef CONFIG_LOCKDEP
+ char *lock_name;
+ struct lock_class_key key;
struct lockdep_map lockdep_map;
#endif
char name[WQ_NAME_LEN]; /* I: workqueue name */
@@ -646,7 +648,7 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
* The following mb guarantees that previous clear of a PENDING bit
* will not be reordered with any speculative LOADS or STORES from
* work->current_func, which is executed afterwards. This possible
- * reordering can lead to a missed execution on attempt to qeueue
+ * reordering can lead to a missed execution on attempt to queue
* the same @work. E.g. consider this case:
*
* CPU#0 CPU#1
@@ -910,6 +912,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
}
/**
+ * wq_worker_last_func - retrieve worker's last work function
+ *
+ * Determine the last function a worker executed. This is called from
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
+ *
+ * Return:
+ * The last work function %current executed as a worker, NULL if it
+ * hasn't executed any work yet.
+ */
+work_func_t wq_worker_last_func(struct task_struct *task)
+{
+ struct worker *worker = kthread_data(task);
+
+ return worker->last_func;
+}
+
+/**
* worker_set_flags - set worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to set
@@ -1321,7 +1343,7 @@ static bool is_chained_work(struct workqueue_struct *wq)
worker = current_wq_worker();
/*
- * Return %true iff I'm a worker execuing a work item on @wq. If
+ * Return %true iff I'm a worker executing a work item on @wq. If
* I'm @worker, it's safe to dereference it without locking.
*/
return worker && worker->current_pwq->wq == wq;
@@ -1492,6 +1514,90 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
}
EXPORT_SYMBOL(queue_work_on);
+/**
+ * workqueue_select_cpu_near - Select a CPU based on NUMA node
+ * @node: NUMA node ID that we want to select a CPU from
+ *
+ * This function will attempt to find a "random" cpu available on a given
+ * node. If there are no CPUs available on the given node it will return
+ * WORK_CPU_UNBOUND indicating that we should just schedule to any
+ * available CPU if we need to schedule this work.
+ */
+static int workqueue_select_cpu_near(int node)
+{
+ int cpu;
+
+ /* No point in doing this if NUMA isn't enabled for workqueues */
+ if (!wq_numa_enabled)
+ return WORK_CPU_UNBOUND;
+
+ /* Delay binding to CPU if node is not valid or online */
+ if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
+ return WORK_CPU_UNBOUND;
+
+ /* Use local node/cpu if we are already there */
+ cpu = raw_smp_processor_id();
+ if (node == cpu_to_node(cpu))
+ return cpu;
+
+ /* Use "random" otherwise know as "first" online CPU of node */
+ cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
+
+ /* If CPU is valid return that, otherwise just defer */
+ return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
+}
+
+/**
+ * queue_work_node - queue work on a "random" cpu for a given NUMA node
+ * @node: NUMA node that we are targeting the work for
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * We queue the work to a "random" CPU within a given NUMA node. The basic
+ * idea here is to provide a way to somehow associate work with a given
+ * NUMA node.
+ *
+ * This function will only make a best effort attempt at getting this onto
+ * the right NUMA node. If no node is requested or the requested node is
+ * offline then we just fall back to standard queue_work behavior.
+ *
+ * Currently the "random" CPU ends up being the first available CPU in the
+ * intersection of cpu_online_mask and the cpumask of the node, unless we
+ * are running on the node. In that case we just use the current CPU.
+ *
+ * Return: %false if @work was already on a queue, %true otherwise.
+ */
+bool queue_work_node(int node, struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ /*
+ * This current implementation is specific to unbound workqueues.
+ * Specifically we only return the first available CPU for a given
+ * node instead of cycling through individual CPUs within the node.
+ *
+ * If this is used with a per-cpu workqueue then the logic in
+ * workqueue_select_cpu_near would need to be updated to allow for
+ * some round robin type logic.
+ */
+ WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
+
+ local_irq_save(flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ int cpu = workqueue_select_cpu_near(node);
+
+ __queue_work(cpu, wq, work);
+ ret = true;
+ }
+
+ local_irq_restore(flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(queue_work_node);
+
void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
@@ -1619,7 +1725,7 @@ static void rcu_work_rcufn(struct rcu_head *rcu)
*
* Return: %false if @rwork was already pending, %true otherwise. Note
* that a full RCU grace period is guaranteed only after a %true return.
- * While @rwork is guarnateed to be executed after a %false return, the
+ * While @rwork is guaranteed to be executed after a %false return, the
* execution may happen before a full RCU grace period has passed.
*/
bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
@@ -2184,6 +2290,9 @@ __acquires(&pool->lock)
if (unlikely(cpu_intensive))
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
+ /* tag the worker for identification in schedule() */
+ worker->last_func = worker->current_func;
+
/* we're done with it, release */
hash_del(&worker->hentry);
worker->current_work = NULL;
@@ -2908,6 +3017,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
if (WARN_ON(!wq_online))
return false;
+ if (WARN_ON(!work->func))
+ return false;
+
if (!from_cancel) {
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
@@ -3314,11 +3426,49 @@ static int init_worker_pool(struct worker_pool *pool)
return 0;
}
+#ifdef CONFIG_LOCKDEP
+static void wq_init_lockdep(struct workqueue_struct *wq)
+{
+ char *lock_name;
+
+ lockdep_register_key(&wq->key);
+ lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
+ if (!lock_name)
+ lock_name = wq->name;
+ lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
+}
+
+static void wq_unregister_lockdep(struct workqueue_struct *wq)
+{
+ lockdep_unregister_key(&wq->key);
+}
+
+static void wq_free_lockdep(struct workqueue_struct *wq)
+{
+ if (wq->lock_name != wq->name)
+ kfree(wq->lock_name);
+}
+#else
+static void wq_init_lockdep(struct workqueue_struct *wq)
+{
+}
+
+static void wq_unregister_lockdep(struct workqueue_struct *wq)
+{
+}
+
+static void wq_free_lockdep(struct workqueue_struct *wq)
+{
+}
+#endif
+
static void rcu_free_wq(struct rcu_head *rcu)
{
struct workqueue_struct *wq =
container_of(rcu, struct workqueue_struct, rcu);
+ wq_free_lockdep(wq);
+
if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->cpu_pwqs);
else
@@ -3509,8 +3659,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
* If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free.
*/
- if (is_last)
+ if (is_last) {
+ wq_unregister_lockdep(wq);
call_rcu(&wq->rcu, rcu_free_wq);
+ }
}
/**
@@ -4044,11 +4196,9 @@ static int init_rescuer(struct workqueue_struct *wq)
return 0;
}
-struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
- unsigned int flags,
- int max_active,
- struct lock_class_key *key,
- const char *lock_name, ...)
+struct workqueue_struct *alloc_workqueue(const char *fmt,
+ unsigned int flags,
+ int max_active, ...)
{
size_t tbl_size = 0;
va_list args;
@@ -4083,7 +4233,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
goto err_free_wq;
}
- va_start(args, lock_name);
+ va_start(args, max_active);
vsnprintf(wq->name, sizeof(wq->name), fmt, args);
va_end(args);
@@ -4100,7 +4250,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
INIT_LIST_HEAD(&wq->flusher_overflow);
INIT_LIST_HEAD(&wq->maydays);
- lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
+ wq_init_lockdep(wq);
INIT_LIST_HEAD(&wq->list);
if (alloc_and_link_pwqs(wq) < 0)
@@ -4138,7 +4288,7 @@ err_destroy:
destroy_workqueue(wq);
return NULL;
}
-EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
+EXPORT_SYMBOL_GPL(alloc_workqueue);
/**
* destroy_workqueue - safely terminate a workqueue
@@ -4191,6 +4341,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
kthread_stop(wq->rescuer->task);
if (!(wq->flags & WQ_UNBOUND)) {
+ wq_unregister_lockdep(wq);
/*
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 66fbb5a9e633..cb68b03ca89a 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -53,6 +53,9 @@ struct worker {
/* used only by rescuers to point to the target workqueue */
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
+
+ /* used by the scheduler to determine a worker's last known identity */
+ work_func_t last_func;
};
/**
@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
/*
* Scheduler hooks for concurrency managed workqueue. Only to be used from
- * sched/core.c and workqueue.c.
+ * sched/ and workqueue.c.
*/
void wq_worker_waking_up(struct task_struct *task, int cpu);
struct task_struct *wq_worker_sleeping(struct task_struct *task);
+work_func_t wq_worker_last_func(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d4df5b24d75e..b19cc9c36475 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -222,7 +222,6 @@ config ENABLE_MUST_CHECK
config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
- default 3072 if KASAN_EXTRA
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1280 if (!64BIT && PARISC)
default 1024 if (!64BIT && !PARISC)
@@ -266,23 +265,6 @@ config UNUSED_SYMBOLS
you really need it, and what the merge plan to the mainline kernel for
your module is.
-config PAGE_OWNER
- bool "Track page owner"
- depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
- select DEBUG_FS
- select STACKTRACE
- select STACKDEPOT
- select PAGE_EXTENSION
- help
- This keeps track of what call chain is the owner of a page, may
- help to find bare alloc_page(s) leaks. Even if you include this
- feature on your build, it is disabled in default. You should pass
- "page_owner=on" to boot parameter in order to enable it. Eats
- a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
- for user-space helper.
-
- If unsure, say N.
-
config DEBUG_FS
bool "Debug Filesystem"
help
@@ -1700,7 +1682,6 @@ if RUNTIME_TESTING_MENU
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_FS
- depends on BLOCK
help
This module enables testing of the different dumping mechanisms by
inducing system failures at predefined crash points.
@@ -1876,6 +1857,19 @@ config TEST_LKM
If unsure, say N.
+config TEST_VMALLOC
+ tristate "Test module for stress/performance analysis of vmalloc allocator"
+ default n
+ depends on MMU
+ depends on m
+ help
+ This builds the "test_vmalloc" module that should be used for
+ stress and performance analysis. So, any new change for vmalloc
+ subsystem can be evaluated from performance and stability point
+ of view.
+
+ If unsure, say N.
+
config TEST_USER_COPY
tristate "Test user/kernel boundary protections"
depends on m
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index d8c474b6691e..9950b660e62d 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -78,16 +78,6 @@ config KASAN_SW_TAGS
endchoice
-config KASAN_EXTRA
- bool "KASAN: extra checks"
- depends on KASAN_GENERIC && DEBUG_KERNEL && !COMPILE_TEST
- help
- This enables further checks in generic KASAN, for now it only
- includes the address-use-after-scope check that can lead to
- excessive kernel stack usage, frame size warnings and longer
- compile time.
- See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715
-
choice
prompt "Instrumentation type"
depends on KASAN
@@ -113,6 +103,28 @@ config KASAN_INLINE
endchoice
+config KASAN_STACK_ENABLE
+ bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ default !(CLANG_VERSION < 90000)
+ depends on KASAN
+ help
+ The LLVM stack address sanitizer has a know problem that
+ causes excessive stack usage in a lot of functions, see
+ https://bugs.llvm.org/show_bug.cgi?id=38809
+ Disabling asan-stack makes it safe to run kernels build
+ with clang-8 with KASAN enabled, though it loses some of
+ the functionality.
+ This feature is always disabled when compile-testing with clang-8
+ or earlier to avoid cluttering the output in stack overflow
+ warnings, but clang-8 users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc and later clang versions it is
+ assumed to always be safe to use and enabled by default.
+
+config KASAN_STACK
+ int
+ default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
+ default 0
+
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
depends on KASAN && S390
diff --git a/lib/Makefile b/lib/Makefile
index e1b59da71418..cbfacd55aeca 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -60,6 +60,7 @@ UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_LKM) += test_module.o
+obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c6659cb37033..59875eb278ea 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -768,9 +768,11 @@ all_leaves_cluster_together:
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
- blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
- pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
- new_s0->index_key[keylen - 1] &= ~blank;
+ if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ }
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
diff --git a/lib/bsearch.c b/lib/bsearch.c
index 18b445b010c3..82512fe7b33c 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/bsearch.h>
+#include <linux/kprobes.h>
/*
* bsearch - binary search an array of elements
@@ -53,3 +54,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size,
return NULL;
}
EXPORT_SYMBOL(bsearch);
+NOKPROBE_SYMBOL(bsearch);
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 8d666ab84b5c..087a3e9a0202 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -5,6 +5,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/numa.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -206,7 +207,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
- if (node == -1) {
+ if (node == NUMA_NO_NODE) {
for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0)
return cpu;
diff --git a/lib/crc32.c b/lib/crc32.c
index 45b1d67a1767..4a20455d1f61 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
-u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
-u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
/*
* This multiplies the polynomials x and y modulo the given modulus.
diff --git a/lib/devres.c b/lib/devres.c
index faccf1a037d0..69bed2f38306 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -134,7 +134,6 @@ EXPORT_SYMBOL(devm_iounmap);
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
{
resource_size_t size;
- const char *name;
void __iomem *dest_ptr;
BUG_ON(!dev);
@@ -145,9 +144,8 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
size = resource_size(res);
- name = res->name ?: dev_name(dev);
- if (!devm_request_mem_region(dev, res->start, size, name)) {
+ if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
dev_err(dev, "can't request region for resource %pR\n", res);
return IOMEM_ERR_PTR(-EBUSY);
}
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 14436f4ca6bd..30e0f9770f88 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
if (x <= ULONG_MAX)
return int_sqrt((unsigned long) x);
- m = 1ULL << (fls64(x) & ~1ULL);
+ m = 1ULL << ((fls64(x) - 1) & ~1ULL);
while (m != 0) {
b = y + m;
y >>= 1;
diff --git a/lib/iomap.c b/lib/iomap.c
index 541d926da95e..e909ab71e995 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -65,8 +65,9 @@ static void bad_io_access(unsigned long port, const char *access)
#endif
#ifndef mmio_read16be
-#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
-#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
+#define mmio_read16be(addr) swab16(readw(addr))
+#define mmio_read32be(addr) swab32(readl(addr))
+#define mmio_read64be(addr) swab64(readq(addr))
#endif
unsigned int ioread8(void __iomem *addr)
@@ -100,14 +101,89 @@ EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
+#ifdef readq
+static u64 pio_read64_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = inl(port);
+ hi = inl(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = inl(port + sizeof(u32));
+ lo = inl(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = pio_read32be(port + sizeof(u32));
+ hi = pio_read32be(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = pio_read32be(port);
+ lo = pio_read32be(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+u64 ioread64_lo_hi(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64_hi_lo(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64be_lo_hi(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_lo_hi(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64be_hi_lo(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_hi_lo(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+EXPORT_SYMBOL(ioread64_lo_hi);
+EXPORT_SYMBOL(ioread64_hi_lo);
+EXPORT_SYMBOL(ioread64be_lo_hi);
+EXPORT_SYMBOL(ioread64be_hi_lo);
+
+#endif /* readq */
+
#ifndef pio_write16be
#define pio_write16be(val,port) outw(swab16(val),port)
#define pio_write32be(val,port) outl(swab32(val),port)
#endif
#ifndef mmio_write16be
-#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
-#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
+#define mmio_write16be(val,port) writew(swab16(val),port)
+#define mmio_write32be(val,port) writel(swab32(val),port)
+#define mmio_write64be(val,port) writeq(swab64(val),port)
#endif
void iowrite8(u8 val, void __iomem *addr)
@@ -136,6 +212,62 @@ EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
+#ifdef writeq
+static void pio_write64_lo_hi(u64 val, unsigned long port)
+{
+ outl(val, port);
+ outl(val >> 32, port + sizeof(u32));
+}
+
+static void pio_write64_hi_lo(u64 val, unsigned long port)
+{
+ outl(val >> 32, port + sizeof(u32));
+ outl(val, port);
+}
+
+static void pio_write64be_lo_hi(u64 val, unsigned long port)
+{
+ pio_write32be(val, port + sizeof(u32));
+ pio_write32be(val >> 32, port);
+}
+
+static void pio_write64be_hi_lo(u64 val, unsigned long port)
+{
+ pio_write32be(val >> 32, port);
+ pio_write32be(val, port + sizeof(u32));
+}
+
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64_lo_hi(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64_hi_lo(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64be_lo_hi(val, port),
+ mmio_write64be(val, addr));
+}
+
+void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64be_hi_lo(val, port),
+ mmio_write64be(val, addr));
+}
+
+EXPORT_SYMBOL(iowrite64_lo_hi);
+EXPORT_SYMBOL(iowrite64_hi_lo);
+EXPORT_SYMBOL(iowrite64be_lo_hi);
+EXPORT_SYMBOL(iowrite64be_hi_lo);
+
+#endif /* readq */
+
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__raw" accesses, since we don't want to
diff --git a/lib/kobject.c b/lib/kobject.c
index b72e00fd7d09..aa89edcd2b63 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -887,7 +887,7 @@ static void kset_release(struct kobject *kobj)
kfree(kset);
}
-void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+static void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
if (kobj->parent)
kobject_get_ownership(kobj->parent, uid, gid);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 27c6118afd1c..f05802687ba4 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -200,7 +200,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
r = kobject_action_type(buf, count, &action, &action_args);
if (r) {
- msg = "unknown uevent action string\n";
+ msg = "unknown uevent action string";
goto out;
}
@@ -212,7 +212,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
r = kobject_action_args(action_args,
count - (action_args - buf), &env);
if (r == -EINVAL) {
- msg = "incorrect uevent action arguments\n";
+ msg = "incorrect uevent action arguments";
goto out;
}
@@ -224,7 +224,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
out:
if (r) {
devpath = kobject_get_path(kobj, GFP_KERNEL);
- printk(KERN_WARNING "synth uevent: %s: %s",
+ pr_warn("synth uevent: %s: %s\n",
devpath ?: "unknown device",
msg ?: "failed to send uevent");
kfree(devpath);
@@ -765,8 +765,7 @@ static int uevent_net_init(struct net *net)
ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
if (!ue_sk->sk) {
- printk(KERN_ERR
- "kobject_uevent: unable to create netlink socket!\n");
+ pr_err("kobject_uevent: unable to create netlink socket!\n");
kfree(ue_sk);
return -ENODEV;
}
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 1e1bbf171eca..a1705545e6ac 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -1989,6 +1989,7 @@ void locking_selftest(void)
init_shared_classes();
debug_locks_silent = !debug_locks_verbose;
+ lockdep_set_selftest_task(current);
DO_TESTCASE_6R("A-A deadlock", AA);
DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
@@ -2097,5 +2098,6 @@ void locking_selftest(void)
printk("---------------------------------\n");
debug_locks = 1;
}
+ lockdep_set_selftest_task(NULL);
debug_locks_silent = 0;
}
diff --git a/lib/objagg.c b/lib/objagg.c
index c9b457a91153..576be22e86de 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rhashtable.h>
+#include <linux/idr.h>
#include <linux/list.h>
#include <linux/sort.h>
#include <linux/objagg.h>
@@ -11,6 +12,34 @@
#define CREATE_TRACE_POINTS
#include <trace/events/objagg.h>
+struct objagg_hints {
+ struct rhashtable node_ht;
+ struct rhashtable_params ht_params;
+ struct list_head node_list;
+ unsigned int node_count;
+ unsigned int root_count;
+ unsigned int refcount;
+ const struct objagg_ops *ops;
+};
+
+struct objagg_hints_node {
+ struct rhash_head ht_node; /* member of objagg_hints->node_ht */
+ struct list_head list; /* member of objagg_hints->node_list */
+ struct objagg_hints_node *parent;
+ unsigned int root_id;
+ struct objagg_obj_stats_info stats_info;
+ unsigned long obj[0];
+};
+
+static struct objagg_hints_node *
+objagg_hints_lookup(struct objagg_hints *objagg_hints, void *obj)
+{
+ if (!objagg_hints)
+ return NULL;
+ return rhashtable_lookup_fast(&objagg_hints->node_ht, obj,
+ objagg_hints->ht_params);
+}
+
struct objagg {
const struct objagg_ops *ops;
void *priv;
@@ -18,6 +47,8 @@ struct objagg {
struct rhashtable_params ht_params;
struct list_head obj_list;
unsigned int obj_count;
+ struct ida root_ida;
+ struct objagg_hints *hints;
};
struct objagg_obj {
@@ -30,6 +61,7 @@ struct objagg_obj {
void *delta_priv; /* user delta private */
void *root_priv; /* user root private */
};
+ unsigned int root_id;
unsigned int refcount; /* counts number of users of this object
* including nested objects
*/
@@ -130,7 +162,8 @@ static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
static int objagg_obj_parent_assign(struct objagg *objagg,
struct objagg_obj *objagg_obj,
- struct objagg_obj *parent)
+ struct objagg_obj *parent,
+ bool take_parent_ref)
{
void *delta_priv;
@@ -144,7 +177,8 @@ static int objagg_obj_parent_assign(struct objagg *objagg,
*/
objagg_obj->parent = parent;
objagg_obj->delta_priv = delta_priv;
- objagg_obj_ref_inc(objagg_obj->parent);
+ if (take_parent_ref)
+ objagg_obj_ref_inc(objagg_obj->parent);
trace_objagg_obj_parent_assign(objagg, objagg_obj,
parent,
parent->refcount);
@@ -164,7 +198,7 @@ static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
if (!objagg_obj_is_root(objagg_obj_cur))
continue;
err = objagg_obj_parent_assign(objagg, objagg_obj,
- objagg_obj_cur);
+ objagg_obj_cur, true);
if (!err)
return 0;
}
@@ -184,16 +218,68 @@ static void objagg_obj_parent_unassign(struct objagg *objagg,
__objagg_obj_put(objagg, objagg_obj->parent);
}
+static int objagg_obj_root_id_alloc(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_hints_node *hnode)
+{
+ unsigned int min, max;
+ int root_id;
+
+ /* In case there are no hints available, the root id is invalid. */
+ if (!objagg->hints) {
+ objagg_obj->root_id = OBJAGG_OBJ_ROOT_ID_INVALID;
+ return 0;
+ }
+
+ if (hnode) {
+ min = hnode->root_id;
+ max = hnode->root_id;
+ } else {
+ /* For objects with no hint, start after the last
+ * hinted root_id.
+ */
+ min = objagg->hints->root_count;
+ max = ~0;
+ }
+
+ root_id = ida_alloc_range(&objagg->root_ida, min, max, GFP_KERNEL);
+
+ if (root_id < 0)
+ return root_id;
+ objagg_obj->root_id = root_id;
+ return 0;
+}
+
+static void objagg_obj_root_id_free(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg->hints)
+ return;
+ ida_free(&objagg->root_ida, objagg_obj->root_id);
+}
+
static int objagg_obj_root_create(struct objagg *objagg,
- struct objagg_obj *objagg_obj)
+ struct objagg_obj *objagg_obj,
+ struct objagg_hints_node *hnode)
{
- objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
- objagg_obj->obj);
- if (IS_ERR(objagg_obj->root_priv))
- return PTR_ERR(objagg_obj->root_priv);
+ int err;
+ err = objagg_obj_root_id_alloc(objagg, objagg_obj, hnode);
+ if (err)
+ return err;
+ objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
+ objagg_obj->obj,
+ objagg_obj->root_id);
+ if (IS_ERR(objagg_obj->root_priv)) {
+ err = PTR_ERR(objagg_obj->root_priv);
+ goto err_root_create;
+ }
trace_objagg_obj_root_create(objagg, objagg_obj);
return 0;
+
+err_root_create:
+ objagg_obj_root_id_free(objagg, objagg_obj);
+ return err;
}
static void objagg_obj_root_destroy(struct objagg *objagg,
@@ -201,19 +287,69 @@ static void objagg_obj_root_destroy(struct objagg *objagg,
{
trace_objagg_obj_root_destroy(objagg, objagg_obj);
objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
+ objagg_obj_root_id_free(objagg, objagg_obj);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj);
+
+static int objagg_obj_init_with_hints(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ bool *hint_found)
+{
+ struct objagg_hints_node *hnode;
+ struct objagg_obj *parent;
+ int err;
+
+ hnode = objagg_hints_lookup(objagg->hints, objagg_obj->obj);
+ if (!hnode) {
+ *hint_found = false;
+ return 0;
+ }
+ *hint_found = true;
+
+ if (!hnode->parent)
+ return objagg_obj_root_create(objagg, objagg_obj, hnode);
+
+ parent = __objagg_obj_get(objagg, hnode->parent->obj);
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+
+ err = objagg_obj_parent_assign(objagg, objagg_obj, parent, false);
+ if (err) {
+ *hint_found = false;
+ err = 0;
+ goto err_parent_assign;
+ }
+
+ return 0;
+
+err_parent_assign:
+ objagg_obj_put(objagg, parent);
+ return err;
}
static int objagg_obj_init(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
+ bool hint_found;
int err;
+ /* First, try to use hints if they are available and
+ * if they provide result.
+ */
+ err = objagg_obj_init_with_hints(objagg, objagg_obj, &hint_found);
+ if (err)
+ return err;
+
+ if (hint_found)
+ return 0;
+
/* Try to find if the object can be aggregated under an existing one. */
err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
if (!err)
return 0;
/* If aggregation is not possible, make the object a root. */
- return objagg_obj_root_create(objagg, objagg_obj);
+ return objagg_obj_root_create(objagg, objagg_obj, NULL);
}
static void objagg_obj_fini(struct objagg *objagg,
@@ -349,8 +485,9 @@ EXPORT_SYMBOL(objagg_obj_put);
/**
* objagg_create - creates a new objagg instance
- * @ops: user-specific callbacks
- * @priv: pointer to a private data passed to the ops
+ * @ops: user-specific callbacks
+ * @objagg_hints: hints, can be NULL
+ * @priv: pointer to a private data passed to the ops
*
* Note: all locking must be provided by the caller.
*
@@ -374,18 +511,25 @@ EXPORT_SYMBOL(objagg_obj_put);
* Returns a pointer to newly created objagg instance in case of success,
* otherwise it returns pointer error using ERR_PTR macro.
*/
-struct objagg *objagg_create(const struct objagg_ops *ops, void *priv)
+struct objagg *objagg_create(const struct objagg_ops *ops,
+ struct objagg_hints *objagg_hints, void *priv)
{
struct objagg *objagg;
int err;
if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
- !ops->delta_create || !ops->delta_destroy))
+ !ops->delta_check || !ops->delta_create ||
+ !ops->delta_destroy))
return ERR_PTR(-EINVAL);
+
objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
if (!objagg)
return ERR_PTR(-ENOMEM);
objagg->ops = ops;
+ if (objagg_hints) {
+ objagg->hints = objagg_hints;
+ objagg_hints->refcount++;
+ }
objagg->priv = priv;
INIT_LIST_HEAD(&objagg->obj_list);
@@ -397,6 +541,8 @@ struct objagg *objagg_create(const struct objagg_ops *ops, void *priv)
if (err)
goto err_rhashtable_init;
+ ida_init(&objagg->root_ida);
+
trace_objagg_create(objagg);
return objagg;
@@ -415,8 +561,11 @@ EXPORT_SYMBOL(objagg_create);
void objagg_destroy(struct objagg *objagg)
{
trace_objagg_destroy(objagg);
+ ida_destroy(&objagg->root_ida);
WARN_ON(!list_empty(&objagg->obj_list));
rhashtable_destroy(&objagg->obj_ht);
+ if (objagg->hints)
+ objagg_hints_put(objagg->hints);
kfree(objagg);
}
EXPORT_SYMBOL(objagg_destroy);
@@ -472,6 +621,8 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
objagg_stats->stats_info[i].objagg_obj = objagg_obj;
objagg_stats->stats_info[i].is_root =
objagg_obj_is_root(objagg_obj);
+ if (objagg_stats->stats_info[i].is_root)
+ objagg_stats->root_count++;
i++;
}
objagg_stats->stats_info_count = i;
@@ -485,7 +636,7 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
EXPORT_SYMBOL(objagg_stats_get);
/**
- * objagg_stats_puts - puts stats of the objagg instance
+ * objagg_stats_put - puts stats of the objagg instance
* @objagg_stats: objagg instance stats
*
* Note: all locking must be provided by the caller.
@@ -496,6 +647,410 @@ void objagg_stats_put(const struct objagg_stats *objagg_stats)
}
EXPORT_SYMBOL(objagg_stats_put);
+static struct objagg_hints_node *
+objagg_hints_node_create(struct objagg_hints *objagg_hints,
+ struct objagg_obj *objagg_obj, size_t obj_size,
+ struct objagg_hints_node *parent_hnode)
+{
+ unsigned int user_count = objagg_obj->stats.user_count;
+ struct objagg_hints_node *hnode;
+ int err;
+
+ hnode = kzalloc(sizeof(*hnode) + obj_size, GFP_KERNEL);
+ if (!hnode)
+ return ERR_PTR(-ENOMEM);
+ memcpy(hnode->obj, &objagg_obj->obj, obj_size);
+ hnode->stats_info.stats.user_count = user_count;
+ hnode->stats_info.stats.delta_user_count = user_count;
+ if (parent_hnode) {
+ parent_hnode->stats_info.stats.delta_user_count += user_count;
+ } else {
+ hnode->root_id = objagg_hints->root_count++;
+ hnode->stats_info.is_root = true;
+ }
+ hnode->stats_info.objagg_obj = objagg_obj;
+
+ err = rhashtable_insert_fast(&objagg_hints->node_ht, &hnode->ht_node,
+ objagg_hints->ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ list_add(&hnode->list, &objagg_hints->node_list);
+ hnode->parent = parent_hnode;
+ objagg_hints->node_count++;
+
+ return hnode;
+
+err_ht_insert:
+ kfree(hnode);
+ return ERR_PTR(err);
+}
+
+static void objagg_hints_flush(struct objagg_hints *objagg_hints)
+{
+ struct objagg_hints_node *hnode, *tmp;
+
+ list_for_each_entry_safe(hnode, tmp, &objagg_hints->node_list, list) {
+ list_del(&hnode->list);
+ rhashtable_remove_fast(&objagg_hints->node_ht, &hnode->ht_node,
+ objagg_hints->ht_params);
+ kfree(hnode);
+ }
+}
+
+struct objagg_tmp_node {
+ struct objagg_obj *objagg_obj;
+ bool crossed_out;
+};
+
+struct objagg_tmp_graph {
+ struct objagg_tmp_node *nodes;
+ unsigned long nodes_count;
+ unsigned long *edges;
+};
+
+static int objagg_tmp_graph_edge_index(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ return index * graph->nodes_count + parent_index;
+}
+
+static void objagg_tmp_graph_edge_set(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ int edge_index = objagg_tmp_graph_edge_index(graph, index,
+ parent_index);
+
+ __set_bit(edge_index, graph->edges);
+}
+
+static bool objagg_tmp_graph_is_edge(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ int edge_index = objagg_tmp_graph_edge_index(graph, index,
+ parent_index);
+
+ return test_bit(edge_index, graph->edges);
+}
+
+static unsigned int objagg_tmp_graph_node_weight(struct objagg_tmp_graph *graph,
+ unsigned int index)
+{
+ struct objagg_tmp_node *node = &graph->nodes[index];
+ unsigned int weight = node->objagg_obj->stats.user_count;
+ int j;
+
+ /* Node weight is sum of node users and all other nodes users
+ * that this node can represent with delta.
+ */
+
+ for (j = 0; j < graph->nodes_count; j++) {
+ if (!objagg_tmp_graph_is_edge(graph, index, j))
+ continue;
+ node = &graph->nodes[j];
+ if (node->crossed_out)
+ continue;
+ weight += node->objagg_obj->stats.user_count;
+ }
+ return weight;
+}
+
+static int objagg_tmp_graph_node_max_weight(struct objagg_tmp_graph *graph)
+{
+ struct objagg_tmp_node *node;
+ unsigned int max_weight = 0;
+ unsigned int weight;
+ int max_index = -1;
+ int i;
+
+ for (i = 0; i < graph->nodes_count; i++) {
+ node = &graph->nodes[i];
+ if (node->crossed_out)
+ continue;
+ weight = objagg_tmp_graph_node_weight(graph, i);
+ if (weight >= max_weight) {
+ max_weight = weight;
+ max_index = i;
+ }
+ }
+ return max_index;
+}
+
+static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
+{
+ unsigned int nodes_count = objagg->obj_count;
+ struct objagg_tmp_graph *graph;
+ struct objagg_tmp_node *node;
+ struct objagg_tmp_node *pnode;
+ struct objagg_obj *objagg_obj;
+ size_t alloc_size;
+ int i, j;
+
+ graph = kzalloc(sizeof(*graph), GFP_KERNEL);
+ if (!graph)
+ return NULL;
+
+ graph->nodes = kcalloc(nodes_count, sizeof(*graph->nodes), GFP_KERNEL);
+ if (!graph->nodes)
+ goto err_nodes_alloc;
+ graph->nodes_count = nodes_count;
+
+ alloc_size = BITS_TO_LONGS(nodes_count * nodes_count) *
+ sizeof(unsigned long);
+ graph->edges = kzalloc(alloc_size, GFP_KERNEL);
+ if (!graph->edges)
+ goto err_edges_alloc;
+
+ i = 0;
+ list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+ node = &graph->nodes[i++];
+ node->objagg_obj = objagg_obj;
+ }
+
+ /* Assemble a temporary graph. Insert edge X->Y in case Y can be
+ * in delta of X.
+ */
+ for (i = 0; i < nodes_count; i++) {
+ for (j = 0; j < nodes_count; j++) {
+ if (i == j)
+ continue;
+ pnode = &graph->nodes[i];
+ node = &graph->nodes[j];
+ if (objagg->ops->delta_check(objagg->priv,
+ pnode->objagg_obj->obj,
+ node->objagg_obj->obj)) {
+ objagg_tmp_graph_edge_set(graph, i, j);
+
+ }
+ }
+ }
+ return graph;
+
+err_edges_alloc:
+ kfree(graph->nodes);
+err_nodes_alloc:
+ kfree(graph);
+ return NULL;
+}
+
+static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph)
+{
+ kfree(graph->edges);
+ kfree(graph->nodes);
+ kfree(graph);
+}
+
+static int
+objagg_opt_simple_greedy_fillup_hints(struct objagg_hints *objagg_hints,
+ struct objagg *objagg)
+{
+ struct objagg_hints_node *hnode, *parent_hnode;
+ struct objagg_tmp_graph *graph;
+ struct objagg_tmp_node *node;
+ int index;
+ int j;
+ int err;
+
+ graph = objagg_tmp_graph_create(objagg);
+ if (!graph)
+ return -ENOMEM;
+
+ /* Find the nodes from the ones that can accommodate most users
+ * and cross them out of the graph. Save them to the hint list.
+ */
+ while ((index = objagg_tmp_graph_node_max_weight(graph)) != -1) {
+ node = &graph->nodes[index];
+ node->crossed_out = true;
+ hnode = objagg_hints_node_create(objagg_hints,
+ node->objagg_obj,
+ objagg->ops->obj_size,
+ NULL);
+ if (IS_ERR(hnode)) {
+ err = PTR_ERR(hnode);
+ goto out;
+ }
+ parent_hnode = hnode;
+ for (j = 0; j < graph->nodes_count; j++) {
+ if (!objagg_tmp_graph_is_edge(graph, index, j))
+ continue;
+ node = &graph->nodes[j];
+ if (node->crossed_out)
+ continue;
+ node->crossed_out = true;
+ hnode = objagg_hints_node_create(objagg_hints,
+ node->objagg_obj,
+ objagg->ops->obj_size,
+ parent_hnode);
+ if (IS_ERR(hnode)) {
+ err = PTR_ERR(hnode);
+ goto out;
+ }
+ }
+ }
+
+ err = 0;
+out:
+ objagg_tmp_graph_destroy(graph);
+ return err;
+}
+
+struct objagg_opt_algo {
+ int (*fillup_hints)(struct objagg_hints *objagg_hints,
+ struct objagg *objagg);
+};
+
+static const struct objagg_opt_algo objagg_opt_simple_greedy = {
+ .fillup_hints = objagg_opt_simple_greedy_fillup_hints,
+};
+
+
+static const struct objagg_opt_algo *objagg_opt_algos[] = {
+ [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
+};
+
+static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ struct objagg_hints *objagg_hints =
+ container_of(ht, struct objagg_hints, node_ht);
+ const struct objagg_ops *ops = objagg_hints->ops;
+ const char *ptr = obj;
+
+ ptr += ht->p.key_offset;
+ return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
+ memcmp(ptr, arg->key, ht->p.key_len);
+}
+
+/**
+ * objagg_hints_get - obtains hints instance
+ * @objagg: objagg instance
+ * @opt_algo_type: type of hints finding algorithm
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * According to the algo type, the existing objects of objagg instance
+ * are going to be went-through to assemble an optimal tree. We call this
+ * tree hints. These hints can be later on used for creation of
+ * a new objagg instance. There, the future object creations are going
+ * to be consulted with these hints in order to find out, where exactly
+ * the new object should be put as a root or delta.
+ *
+ * Returns a pointer to hints instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_hints *objagg_hints_get(struct objagg *objagg,
+ enum objagg_opt_algo_type opt_algo_type)
+{
+ const struct objagg_opt_algo *algo = objagg_opt_algos[opt_algo_type];
+ struct objagg_hints *objagg_hints;
+ int err;
+
+ objagg_hints = kzalloc(sizeof(*objagg_hints), GFP_KERNEL);
+ if (!objagg_hints)
+ return ERR_PTR(-ENOMEM);
+
+ objagg_hints->ops = objagg->ops;
+ objagg_hints->refcount = 1;
+
+ INIT_LIST_HEAD(&objagg_hints->node_list);
+
+ objagg_hints->ht_params.key_len = objagg->ops->obj_size;
+ objagg_hints->ht_params.key_offset =
+ offsetof(struct objagg_hints_node, obj);
+ objagg_hints->ht_params.head_offset =
+ offsetof(struct objagg_hints_node, ht_node);
+ objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
+
+ err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = algo->fillup_hints(objagg_hints, objagg);
+ if (err)
+ goto err_fillup_hints;
+
+ if (WARN_ON(objagg_hints->node_count != objagg->obj_count)) {
+ err = -EINVAL;
+ goto err_node_count_check;
+ }
+
+ return objagg_hints;
+
+err_node_count_check:
+err_fillup_hints:
+ objagg_hints_flush(objagg_hints);
+ rhashtable_destroy(&objagg_hints->node_ht);
+err_rhashtable_init:
+ kfree(objagg_hints);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_hints_get);
+
+/**
+ * objagg_hints_put - puts hints instance
+ * @objagg_hints: objagg hints instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_hints_put(struct objagg_hints *objagg_hints)
+{
+ if (--objagg_hints->refcount)
+ return;
+ objagg_hints_flush(objagg_hints);
+ rhashtable_destroy(&objagg_hints->node_ht);
+ kfree(objagg_hints);
+}
+EXPORT_SYMBOL(objagg_hints_put);
+
+/**
+ * objagg_hints_stats_get - obtains stats of the hints instance
+ * @objagg_hints: hints instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all objects
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ * indexes.
+ * 3) In case multiple objects have the same delta user count,
+ * the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *
+objagg_hints_stats_get(struct objagg_hints *objagg_hints)
+{
+ struct objagg_stats *objagg_stats;
+ struct objagg_hints_node *hnode;
+ int i;
+
+ objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
+ objagg_hints->node_count),
+ GFP_KERNEL);
+ if (!objagg_stats)
+ return ERR_PTR(-ENOMEM);
+
+ i = 0;
+ list_for_each_entry(hnode, &objagg_hints->node_list, list) {
+ memcpy(&objagg_stats->stats_info[i], &hnode->stats_info,
+ sizeof(objagg_stats->stats_info[0]));
+ if (objagg_stats->stats_info[i].is_root)
+ objagg_stats->root_count++;
+ i++;
+ }
+ objagg_stats->stats_info_count = i;
+
+ sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+ sizeof(struct objagg_obj_stats_info),
+ objagg_stats_info_sort_cmp_func, NULL);
+
+ return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_hints_stats_get);
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Object aggregation manager");
diff --git a/lib/refcount.c b/lib/refcount.c
index ebcf8cd49e05..6e904af0fb3e 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -33,6 +33,9 @@
* Note that the allocator is responsible for ordering things between free()
* and alloc().
*
+ * The decrements dec_and_test() and sub_and_test() also provide acquire
+ * ordering on success.
+ *
*/
#include <linux/mutex.h>
@@ -164,8 +167,8 @@ EXPORT_SYMBOL(refcount_inc_checked);
* at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
@@ -190,7 +193,12 @@ bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
- return !new;
+ if (!new) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+ return false;
+
}
EXPORT_SYMBOL(refcount_sub_and_test_checked);
@@ -202,8 +210,8 @@ EXPORT_SYMBOL(refcount_sub_and_test_checked);
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 852ffa5160f1..0a105d4af166 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -682,7 +682,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
* rhashtable_walk_exit - Free an iterator
* @iter: Hash table Iterator
*
- * This function frees resources allocated by rhashtable_walk_init.
+ * This function frees resources allocated by rhashtable_walk_enter.
*/
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 65c2d06250a6..5b382c1244ed 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -26,14 +26,10 @@
static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
{
unsigned long mask, val;
- unsigned long __maybe_unused flags;
bool ret = false;
+ unsigned long flags;
- /* Silence bogus lockdep warning */
-#if defined(CONFIG_LOCKDEP)
- local_irq_save(flags);
-#endif
- spin_lock(&sb->map[index].swap_lock);
+ spin_lock_irqsave(&sb->map[index].swap_lock, flags);
if (!sb->map[index].cleared)
goto out_unlock;
@@ -54,10 +50,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
ret = true;
out_unlock:
- spin_unlock(&sb->map[index].swap_lock);
-#if defined(CONFIG_LOCKDEP)
- local_irq_restore(flags);
-#endif
+ spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
return ret;
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 85925aaa4fff..157d9e31f6c2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -5,10 +5,11 @@
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/export.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
-notrace static unsigned int check_preemption_disabled(const char *what1,
- const char *what2)
+notrace static nokprobe_inline
+unsigned int check_preemption_disabled(const char *what1, const char *what2)
{
int this_cpu = raw_smp_processor_id();
@@ -56,9 +57,11 @@ notrace unsigned int debug_smp_processor_id(void)
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id);
+NOKPROBE_SYMBOL(debug_smp_processor_id);
notrace void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);
+NOKPROBE_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index f3e570722a7e..0845f635f404 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6668,12 +6668,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
+ preempt_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
finish = ktime_get_ns();
+ preempt_enable();
*duration = finish - start;
do_div(*duration, runs);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 51b78405bf24..7de2702621dc 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -480,29 +480,6 @@ static noinline void __init copy_user_test(void)
kfree(kmem);
}
-static noinline void __init use_after_scope_test(void)
-{
- volatile char *volatile p;
-
- pr_info("use-after-scope on int\n");
- {
- int local = 0;
-
- p = (char *)&local;
- }
- p[0] = 1;
- p[3] = 1;
-
- pr_info("use-after-scope on array\n");
- {
- char local[1024] = {0};
-
- p = local;
- }
- p[0] = 1;
- p[1023] = 1;
-}
-
static noinline void __init kasan_alloca_oob_left(void)
{
volatile int i = 10;
@@ -682,7 +659,6 @@ static int __init kmalloc_tests_init(void)
kasan_alloca_oob_right();
ksize_unpoisons_memory();
copy_user_test();
- use_after_scope_test();
kmem_cache_double_free();
kmem_cache_invalid_free();
kasan_memchr();
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022111e0..9cf77628fc91 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
config->test_driver = NULL;
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
}
static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index ab57144bb0cd..72c1abfa154d 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -87,6 +87,15 @@ static void world_obj_put(struct world *world, struct objagg *objagg,
#define MAX_KEY_ID_DIFF 5
+static bool delta_check(void *priv, const void *parent_obj, const void *obj)
+{
+ const struct tokey *parent_key = parent_obj;
+ const struct tokey *key = obj;
+ int diff = key->id - parent_key->id;
+
+ return diff >= 0 && diff <= MAX_KEY_ID_DIFF;
+}
+
static void *delta_create(void *priv, void *parent_obj, void *obj)
{
struct tokey *parent_key = parent_obj;
@@ -95,7 +104,7 @@ static void *delta_create(void *priv, void *parent_obj, void *obj)
int diff = key->id - parent_key->id;
struct delta *delta;
- if (diff < 0 || diff > MAX_KEY_ID_DIFF)
+ if (!delta_check(priv, parent_obj, obj))
return ERR_PTR(-EINVAL);
delta = kzalloc(sizeof(*delta), GFP_KERNEL);
@@ -115,7 +124,7 @@ static void delta_destroy(void *priv, void *delta_priv)
kfree(delta);
}
-static void *root_create(void *priv, void *obj)
+static void *root_create(void *priv, void *obj, unsigned int id)
{
struct world *world = priv;
struct tokey *key = obj;
@@ -268,6 +277,12 @@ stats_put:
return err;
}
+static bool delta_check_dummy(void *priv, const void *parent_obj,
+ const void *obj)
+{
+ return false;
+}
+
static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
{
return ERR_PTR(-EOPNOTSUPP);
@@ -279,6 +294,7 @@ static void delta_destroy_dummy(void *priv, void *delta_priv)
static const struct objagg_ops nodelta_ops = {
.obj_size = sizeof(struct tokey),
+ .delta_check = delta_check_dummy,
.delta_create = delta_create_dummy,
.delta_destroy = delta_destroy_dummy,
.root_create = root_create,
@@ -292,7 +308,7 @@ static int test_nodelta(void)
int i;
int err;
- objagg = objagg_create(&nodelta_ops, &world);
+ objagg = objagg_create(&nodelta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
@@ -357,6 +373,7 @@ err_stats_second_zero:
static const struct objagg_ops delta_ops = {
.obj_size = sizeof(struct tokey),
+ .delta_check = delta_check,
.delta_create = delta_create,
.delta_destroy = delta_destroy,
.root_create = root_create,
@@ -728,8 +745,10 @@ static int check_expect_stats(struct objagg *objagg,
int err;
stats = objagg_stats_get(objagg);
- if (IS_ERR(stats))
+ if (IS_ERR(stats)) {
+ *errmsg = "objagg_stats_get() failed.";
return PTR_ERR(stats);
+ }
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;
@@ -769,7 +788,6 @@ static int test_delta_action_item(struct world *world,
if (err)
goto errout;
- errmsg = NULL;
err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
if (err) {
pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
@@ -793,7 +811,7 @@ static int test_delta(void)
int i;
int err;
- objagg = objagg_create(&delta_ops, &world);
+ objagg = objagg_create(&delta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
@@ -815,6 +833,170 @@ err_do_action_item:
return err;
}
+struct hints_case {
+ const unsigned int *key_ids;
+ size_t key_ids_count;
+ struct expect_stats expect_stats;
+ struct expect_stats expect_stats_hints;
+};
+
+static const unsigned int hints_case_key_ids[] = {
+ 1, 7, 3, 5, 3, 1, 30, 8, 8, 5, 6, 8,
+};
+
+static const struct hints_case hints_case = {
+ .key_ids = hints_case_key_ids,
+ .key_ids_count = ARRAY_SIZE(hints_case_key_ids),
+ .expect_stats =
+ EXPECT_STATS(7, ROOT(1, 2, 7), ROOT(7, 1, 4), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(3, 2),
+ DELTA(5, 2), DELTA(6, 1)),
+ .expect_stats_hints =
+ EXPECT_STATS(7, ROOT(3, 2, 9), ROOT(1, 2, 2), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(5, 2),
+ DELTA(6, 1), DELTA(7, 1)),
+};
+
+static void __pr_debug_stats(const struct objagg_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < stats->stats_info_count; i++)
+ pr_debug("Stat index %d key %u: u %d, d %d, %s\n", i,
+ obj_to_key_id(stats->stats_info[i].objagg_obj),
+ stats->stats_info[i].stats.user_count,
+ stats->stats_info[i].stats.delta_user_count,
+ stats->stats_info[i].is_root ? "root" : "noroot");
+}
+
+static void pr_debug_stats(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return;
+ __pr_debug_stats(stats);
+ objagg_stats_put(stats);
+}
+
+static void pr_debug_hints_stats(struct objagg_hints *objagg_hints)
+{
+ const struct objagg_stats *stats;
+
+ stats = objagg_hints_stats_get(objagg_hints);
+ if (IS_ERR(stats))
+ return;
+ __pr_debug_stats(stats);
+ objagg_stats_put(stats);
+}
+
+static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ const struct objagg_stats *stats;
+ int err;
+
+ stats = objagg_hints_stats_get(objagg_hints);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+ err = __check_expect_stats(stats, expect_stats, errmsg);
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int test_hints_case(const struct hints_case *hints_case)
+{
+ struct objagg_obj *objagg_obj;
+ struct objagg_hints *hints;
+ struct world world2 = {};
+ struct world world = {};
+ struct objagg *objagg2;
+ struct objagg *objagg;
+ const char *errmsg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world, objagg,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg);
+ err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Stats: %s\n", errmsg);
+ goto err_check_expect_stats;
+ }
+
+ hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
+ if (IS_ERR(hints)) {
+ err = PTR_ERR(hints);
+ goto err_hints_get;
+ }
+
+ pr_debug_hints_stats(hints);
+ err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints,
+ &errmsg);
+ if (err) {
+ pr_err("Hints stats: %s\n", errmsg);
+ goto err_check_expect_hints_stats;
+ }
+
+ objagg2 = objagg_create(&delta_ops, hints, &world2);
+ if (IS_ERR(objagg2))
+ return PTR_ERR(objagg2);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world2, objagg2,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world2_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg2);
+ err = check_expect_stats(objagg2, &hints_case->expect_stats_hints,
+ &errmsg);
+ if (err) {
+ pr_err("Stats2: %s\n", errmsg);
+ goto err_check_expect_stats2;
+ }
+
+ err = 0;
+
+err_check_expect_stats2:
+err_world2_obj_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world2, objagg, hints_case->key_ids[i]);
+ objagg_hints_put(hints);
+ objagg_destroy(objagg2);
+ i = hints_case->key_ids_count;
+err_check_expect_hints_stats:
+err_hints_get:
+err_check_expect_stats:
+err_world_obj_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, hints_case->key_ids[i]);
+
+ objagg_destroy(objagg);
+ return err;
+}
+static int test_hints(void)
+{
+ return test_hints_case(&hints_case);
+}
+
static int __init test_objagg_init(void)
{
int err;
@@ -822,7 +1004,10 @@ static int __init test_objagg_init(void)
err = test_nodelta();
if (err)
return err;
- return test_delta();
+ err = test_delta();
+ if (err)
+ return err;
+ return test_hints();
}
static void __exit test_objagg_exit(void)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 6a8ac7626797..3bd2e91bfc29 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -177,16 +177,11 @@ static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array,
static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
{
- unsigned int err, total = 0, chain_len = 0;
+ unsigned int total = 0, chain_len = 0;
struct rhashtable_iter hti;
struct rhash_head *pos;
- err = rhashtable_walk_init(ht, &hti, GFP_KERNEL);
- if (err) {
- pr_warn("Test failed: allocation error");
- return;
- }
-
+ rhashtable_walk_enter(ht, &hti);
rhashtable_walk_start(&hti);
while ((pos = rhashtable_walk_next(&hti))) {
@@ -395,7 +390,7 @@ static int __init test_rhltable(unsigned int entries)
if (WARN(err, "cannot remove element at slot %d", i))
continue;
} else {
- if (WARN(err != -ENOENT, "removed non-existant element %d, error %d not %d",
+ if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d",
i, err, -ENOENT))
continue;
}
@@ -440,7 +435,7 @@ static int __init test_rhltable(unsigned int entries)
if (WARN(err, "cannot remove element at slot %d", i))
continue;
} else {
- if (WARN(err != -ENOENT, "removed non-existant element, error %d not %d",
+ if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
err, -ENOENT))
continue;
}
@@ -541,38 +536,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
int cnt, bool slow)
{
- struct rhltable rhlt;
+ struct rhltable *rhlt;
unsigned int i, ret;
const char *key;
int err = 0;
- err = rhltable_init(&rhlt, &test_rht_params_dup);
- if (WARN_ON(err))
+ rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
+ if (WARN_ON(!rhlt))
+ return -EINVAL;
+
+ err = rhltable_init(rhlt, &test_rht_params_dup);
+ if (WARN_ON(err)) {
+ kfree(rhlt);
return err;
+ }
for (i = 0; i < cnt; i++) {
rhl_test_objects[i].value.tid = i;
- key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
+ key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
key += test_rht_params_dup.key_offset;
if (slow) {
- err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
+ err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
&rhl_test_objects[i].list_node.rhead));
if (err == -EAGAIN)
err = 0;
} else
- err = rhltable_insert(&rhlt,
+ err = rhltable_insert(rhlt,
&rhl_test_objects[i].list_node,
test_rht_params_dup);
if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
goto skip_print;
}
- ret = print_ht(&rhlt);
+ ret = print_ht(rhlt);
WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
skip_print:
- rhltable_destroy(&rhlt);
+ rhltable_destroy(rhlt);
+ kfree(rhlt);
return 0;
}
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
new file mode 100644
index 000000000000..83cdcaa82bf6
--- /dev/null
+++ b/lib/test_vmalloc.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Test module for stress and analyze performance of vmalloc allocator.
+ * (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com>
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/kthread.h>
+#include <linux/moduleparam.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/rwsem.h>
+#include <linux/mm.h>
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg) \
+
+__param(bool, single_cpu_test, false,
+ "Use single first online CPU to run tests");
+
+__param(bool, sequential_test_order, false,
+ "Use sequential stress tests order");
+
+__param(int, test_repeat_count, 1,
+ "Set test repeat counter");
+
+__param(int, test_loop_count, 1000000,
+ "Set test loop counter");
+
+__param(int, run_test_mask, INT_MAX,
+ "Set tests specified in the mask.\n\n"
+ "\t\tid: 1, name: fix_size_alloc_test\n"
+ "\t\tid: 2, name: full_fit_alloc_test\n"
+ "\t\tid: 4, name: long_busy_list_alloc_test\n"
+ "\t\tid: 8, name: random_size_alloc_test\n"
+ "\t\tid: 16, name: fix_align_alloc_test\n"
+ "\t\tid: 32, name: random_size_align_alloc_test\n"
+ "\t\tid: 64, name: align_shift_alloc_test\n"
+ "\t\tid: 128, name: pcpu_alloc_test\n"
+ /* Add a new test case description here. */
+);
+
+/*
+ * Depends on single_cpu_test parameter. If it is true, then
+ * use first online CPU to trigger a test on, otherwise go with
+ * all online CPUs.
+ */
+static cpumask_t cpus_run_test_mask = CPU_MASK_NONE;
+
+/*
+ * Read write semaphore for synchronization of setup
+ * phase that is done in main thread and workers.
+ */
+static DECLARE_RWSEM(prepare_for_test_rwsem);
+
+/*
+ * Completion tracking for worker threads.
+ */
+static DECLARE_COMPLETION(test_all_done_comp);
+static atomic_t test_n_undone = ATOMIC_INIT(0);
+
+static inline void
+test_report_one_done(void)
+{
+ if (atomic_dec_and_test(&test_n_undone))
+ complete(&test_all_done_comp);
+}
+
+static int random_size_align_alloc_test(void)
+{
+ unsigned long size, align, rnd;
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ get_random_bytes(&rnd, sizeof(rnd));
+
+ /*
+ * Maximum 1024 pages, if PAGE_SIZE is 4096.
+ */
+ align = 1 << (rnd % 23);
+
+ /*
+ * Maximum 10 pages.
+ */
+ size = ((rnd % 10) + 1) * PAGE_SIZE;
+
+ ptr = __vmalloc_node_range(size, align,
+ VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_ZERO,
+ PAGE_KERNEL,
+ 0, 0, __builtin_return_address(0));
+
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+/*
+ * This test case is supposed to be failed.
+ */
+static int align_shift_alloc_test(void)
+{
+ unsigned long align;
+ void *ptr;
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++) {
+ align = ((unsigned long) 1) << i;
+
+ ptr = __vmalloc_node_range(PAGE_SIZE, align,
+ VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_ZERO,
+ PAGE_KERNEL,
+ 0, 0, __builtin_return_address(0));
+
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int fix_align_alloc_test(void)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr = __vmalloc_node_range(5 * PAGE_SIZE,
+ THREAD_ALIGN << 1,
+ VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_ZERO,
+ PAGE_KERNEL,
+ 0, 0, __builtin_return_address(0));
+
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int random_size_alloc_test(void)
+{
+ unsigned int n;
+ void *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ get_random_bytes(&n, sizeof(i));
+ n = (n % 100) + 1;
+
+ p = vmalloc(n * PAGE_SIZE);
+
+ if (!p)
+ return -1;
+
+ *((__u8 *)p) = 1;
+ vfree(p);
+ }
+
+ return 0;
+}
+
+static int long_busy_list_alloc_test(void)
+{
+ void *ptr_1, *ptr_2;
+ void **ptr;
+ int rv = -1;
+ int i;
+
+ ptr = vmalloc(sizeof(void *) * 15000);
+ if (!ptr)
+ return rv;
+
+ for (i = 0; i < 15000; i++)
+ ptr[i] = vmalloc(1 * PAGE_SIZE);
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr_1 = vmalloc(100 * PAGE_SIZE);
+ if (!ptr_1)
+ goto leave;
+
+ ptr_2 = vmalloc(1 * PAGE_SIZE);
+ if (!ptr_2) {
+ vfree(ptr_1);
+ goto leave;
+ }
+
+ *((__u8 *)ptr_1) = 0;
+ *((__u8 *)ptr_2) = 1;
+
+ vfree(ptr_1);
+ vfree(ptr_2);
+ }
+
+ /* Success */
+ rv = 0;
+
+leave:
+ for (i = 0; i < 15000; i++)
+ vfree(ptr[i]);
+
+ vfree(ptr);
+ return rv;
+}
+
+static int full_fit_alloc_test(void)
+{
+ void **ptr, **junk_ptr, *tmp;
+ int junk_length;
+ int rv = -1;
+ int i;
+
+ junk_length = fls(num_online_cpus());
+ junk_length *= (32 * 1024 * 1024 / PAGE_SIZE);
+
+ ptr = vmalloc(sizeof(void *) * junk_length);
+ if (!ptr)
+ return rv;
+
+ junk_ptr = vmalloc(sizeof(void *) * junk_length);
+ if (!junk_ptr) {
+ vfree(ptr);
+ return rv;
+ }
+
+ for (i = 0; i < junk_length; i++) {
+ ptr[i] = vmalloc(1 * PAGE_SIZE);
+ junk_ptr[i] = vmalloc(1 * PAGE_SIZE);
+ }
+
+ for (i = 0; i < junk_length; i++)
+ vfree(junk_ptr[i]);
+
+ for (i = 0; i < test_loop_count; i++) {
+ tmp = vmalloc(1 * PAGE_SIZE);
+
+ if (!tmp)
+ goto error;
+
+ *((__u8 *)tmp) = 1;
+ vfree(tmp);
+ }
+
+ /* Success */
+ rv = 0;
+
+error:
+ for (i = 0; i < junk_length; i++)
+ vfree(ptr[i]);
+
+ vfree(ptr);
+ vfree(junk_ptr);
+
+ return rv;
+}
+
+static int fix_size_alloc_test(void)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr = vmalloc(3 * PAGE_SIZE);
+
+ if (!ptr)
+ return -1;
+
+ *((__u8 *)ptr) = 0;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int
+pcpu_alloc_test(void)
+{
+ int rv = 0;
+#ifndef CONFIG_NEED_PER_CPU_KM
+ void __percpu **pcpu;
+ size_t size, align;
+ int i;
+
+ pcpu = vmalloc(sizeof(void __percpu *) * 35000);
+ if (!pcpu)
+ return -1;
+
+ for (i = 0; i < 35000; i++) {
+ unsigned int r;
+
+ get_random_bytes(&r, sizeof(i));
+ size = (r % (PAGE_SIZE / 4)) + 1;
+
+ /*
+ * Maximum PAGE_SIZE
+ */
+ get_random_bytes(&r, sizeof(i));
+ align = 1 << ((i % 11) + 1);
+
+ pcpu[i] = __alloc_percpu(size, align);
+ if (!pcpu[i])
+ rv = -1;
+ }
+
+ for (i = 0; i < 35000; i++)
+ free_percpu(pcpu[i]);
+
+ vfree(pcpu);
+#endif
+ return rv;
+}
+
+struct test_case_desc {
+ const char *test_name;
+ int (*test_func)(void);
+};
+
+static struct test_case_desc test_case_array[] = {
+ { "fix_size_alloc_test", fix_size_alloc_test },
+ { "full_fit_alloc_test", full_fit_alloc_test },
+ { "long_busy_list_alloc_test", long_busy_list_alloc_test },
+ { "random_size_alloc_test", random_size_alloc_test },
+ { "fix_align_alloc_test", fix_align_alloc_test },
+ { "random_size_align_alloc_test", random_size_align_alloc_test },
+ { "align_shift_alloc_test", align_shift_alloc_test },
+ { "pcpu_alloc_test", pcpu_alloc_test },
+ /* Add a new test case here. */
+};
+
+struct test_case_data {
+ int test_failed;
+ int test_passed;
+ u64 time;
+};
+
+/* Split it to get rid of: WARNING: line over 80 characters */
+static struct test_case_data
+ per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)];
+
+static struct test_driver {
+ struct task_struct *task;
+ unsigned long start;
+ unsigned long stop;
+ int cpu;
+} per_cpu_test_driver[NR_CPUS];
+
+static void shuffle_array(int *arr, int n)
+{
+ unsigned int rnd;
+ int i, j, x;
+
+ for (i = n - 1; i > 0; i--) {
+ get_random_bytes(&rnd, sizeof(rnd));
+
+ /* Cut the range. */
+ j = rnd % i;
+
+ /* Swap indexes. */
+ x = arr[i];
+ arr[i] = arr[j];
+ arr[j] = x;
+ }
+}
+
+static int test_func(void *private)
+{
+ struct test_driver *t = private;
+ cpumask_t newmask = CPU_MASK_NONE;
+ int random_array[ARRAY_SIZE(test_case_array)];
+ int index, i, j, ret;
+ ktime_t kt;
+ u64 delta;
+
+ cpumask_set_cpu(t->cpu, &newmask);
+ set_cpus_allowed_ptr(current, &newmask);
+
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
+ random_array[i] = i;
+
+ if (!sequential_test_order)
+ shuffle_array(random_array, ARRAY_SIZE(test_case_array));
+
+ /*
+ * Block until initialization is done.
+ */
+ down_read(&prepare_for_test_rwsem);
+
+ t->start = get_cycles();
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
+ index = random_array[i];
+
+ /*
+ * Skip tests if run_test_mask has been specified.
+ */
+ if (!((run_test_mask & (1 << index)) >> index))
+ continue;
+
+ kt = ktime_get();
+ for (j = 0; j < test_repeat_count; j++) {
+ ret = test_case_array[index].test_func();
+ if (!ret)
+ per_cpu_test_data[t->cpu][index].test_passed++;
+ else
+ per_cpu_test_data[t->cpu][index].test_failed++;
+ }
+
+ /*
+ * Take an average time that test took.
+ */
+ delta = (u64) ktime_us_delta(ktime_get(), kt);
+ do_div(delta, (u32) test_repeat_count);
+
+ per_cpu_test_data[t->cpu][index].time = delta;
+ }
+ t->stop = get_cycles();
+
+ up_read(&prepare_for_test_rwsem);
+ test_report_one_done();
+
+ /*
+ * Wait for the kthread_stop() call.
+ */
+ while (!kthread_should_stop())
+ msleep(10);
+
+ return 0;
+}
+
+static void
+init_test_configurtion(void)
+{
+ /*
+ * Reset all data of all CPUs.
+ */
+ memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data));
+
+ if (single_cpu_test)
+ cpumask_set_cpu(cpumask_first(cpu_online_mask),
+ &cpus_run_test_mask);
+ else
+ cpumask_and(&cpus_run_test_mask, cpu_online_mask,
+ cpu_online_mask);
+
+ if (test_repeat_count <= 0)
+ test_repeat_count = 1;
+
+ if (test_loop_count <= 0)
+ test_loop_count = 1;
+}
+
+static void do_concurrent_test(void)
+{
+ int cpu, ret;
+
+ /*
+ * Set some basic configurations plus sanity check.
+ */
+ init_test_configurtion();
+
+ /*
+ * Put on hold all workers.
+ */
+ down_write(&prepare_for_test_rwsem);
+
+ for_each_cpu(cpu, &cpus_run_test_mask) {
+ struct test_driver *t = &per_cpu_test_driver[cpu];
+
+ t->cpu = cpu;
+ t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu);
+
+ if (!IS_ERR(t->task))
+ /* Success. */
+ atomic_inc(&test_n_undone);
+ else
+ pr_err("Failed to start kthread for %d CPU\n", cpu);
+ }
+
+ /*
+ * Now let the workers do their job.
+ */
+ up_write(&prepare_for_test_rwsem);
+
+ /*
+ * Sleep quiet until all workers are done with 1 second
+ * interval. Since the test can take a lot of time we
+ * can run into a stack trace of the hung task. That is
+ * why we go with completion_timeout and HZ value.
+ */
+ do {
+ ret = wait_for_completion_timeout(&test_all_done_comp, HZ);
+ } while (!ret);
+
+ for_each_cpu(cpu, &cpus_run_test_mask) {
+ struct test_driver *t = &per_cpu_test_driver[cpu];
+ int i;
+
+ if (!IS_ERR(t->task))
+ kthread_stop(t->task);
+
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
+ if (!((run_test_mask & (1 << i)) >> i))
+ continue;
+
+ pr_info(
+ "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n",
+ test_case_array[i].test_name,
+ per_cpu_test_data[cpu][i].test_passed,
+ per_cpu_test_data[cpu][i].test_failed,
+ test_repeat_count, test_loop_count,
+ per_cpu_test_data[cpu][i].time);
+ }
+
+ pr_info("All test took CPU%d=%lu cycles\n",
+ cpu, t->stop - t->start);
+ }
+}
+
+static int vmalloc_test_init(void)
+{
+ do_concurrent_test();
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void vmalloc_test_exit(void)
+{
+}
+
+module_init(vmalloc_test_init)
+module_exit(vmalloc_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Uladzislau Rezki");
+MODULE_DESCRIPTION("vmalloc test module");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 4676c0a1eeca..c596a957f764 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
xa_set_mark(xa, index + 1, XA_MARK_0);
XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
- xa_set_mark(xa, index + 2, XA_MARK_1);
+ xa_set_mark(xa, index + 2, XA_MARK_2);
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
xa_store_order(xa, index, order, xa_mk_index(index),
GFP_KERNEL);
@@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
void *entry;
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
- XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
- XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
/* We should see two elements in the array */
rcu_read_lock();
@@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa)
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
- unsigned long index = 0;
+ unsigned long index;
/* An array with a reserved entry is not empty */
XA_BUG_ON(xa, !xa_empty(xa));
@@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa)
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
- /* And so does xa_insert */
+ /* But xa_insert does not */
xa_reserve(xa, 12345678, GFP_KERNEL);
- XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
- xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
+ -EEXIST);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
XA_BUG_ON(xa, !xa_empty(xa));
/* Can iterate through a reserved entry */
@@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa)
xa_reserve(xa, 6, GFP_KERNEL);
xa_store_index(xa, 7, GFP_KERNEL);
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, index != 5 && index != 7);
}
xa_destroy(xa);
@@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa)
static noinline void check_find_2(struct xarray *xa)
{
void *entry;
- unsigned long i, j, index = 0;
+ unsigned long i, j, index;
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, true);
}
for (i = 0; i < 1024; i++) {
xa_store_index(xa, index, GFP_KERNEL);
j = 0;
- index = 0;
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, xa_mk_index(index) != entry);
XA_BUG_ON(xa, index != j++);
}
@@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa)
for (i = 0; i < 100; i++) {
for (j = 0; j < 100; j++) {
+ rcu_read_lock();
for (k = 0; k < 100; k++) {
xas_set(&xas, j);
xas_for_each_marked(&xas, entry, k, XA_MARK_0)
@@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa)
XA_BUG_ON(xa,
xas.xa_node != XAS_RESTART);
}
+ rcu_read_unlock();
}
xa_store_index(xa, i, GFP_KERNEL);
xa_set_mark(xa, i, XA_MARK_0);
@@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa)
}
}
+static void check_align_1(struct xarray *xa, char *name)
+{
+ int i;
+ unsigned int id;
+ unsigned long index;
+ void *entry;
+
+ for (i = 0; i < 8; i++) {
+ id = 0;
+ XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL)
+ != 0);
+ XA_BUG_ON(xa, id != i);
+ }
+ xa_for_each(xa, index, entry)
+ XA_BUG_ON(xa, xa_is_err(entry));
+ xa_destroy(xa);
+}
+
+static noinline void check_align(struct xarray *xa)
+{
+ char name[] = "Motorola 68000";
+
+ check_align_1(xa, name);
+ check_align_1(xa, name + 1);
+ check_align_1(xa, name + 2);
+ check_align_1(xa, name + 3);
+// check_align_2(xa, name);
+}
+
static LIST_HEAD(shadow_nodes);
static void test_update_node(struct xa_node *node)
@@ -1332,6 +1364,7 @@ static int xarray_checks(void)
check_create_range(&array);
check_store_range(&array);
check_store_iter(&array);
+ check_align(&xa0);
check_workingset(&array, 0);
check_workingset(&array, 64);
diff --git a/lib/xarray.c b/lib/xarray.c
index 5f3f9311de89..81c3171ddde9 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas)
if (xas->xa_shift > node->shift)
break;
entry = xas_descend(xas, node);
+ if (node->shift == 0)
+ break;
}
return entry;
}
@@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
for (;;) {
void *entry = xa_entry_locked(xas->xa, node, offset);
- if (xa_is_node(entry)) {
+ if (node->shift && xa_is_node(entry)) {
node = xa_to_node(entry);
offset = 0;
continue;
@@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head)
/*
* xas_create() - Create a slot to store an entry in.
* @xas: XArray operation state.
+ * @allow_root: %true if we can store the entry in the root directly
*
* Most users will not need to call this function directly, as it is called
* by xas_store(). It is useful for doing conditional store operations
@@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head)
* If the slot was newly created, returns %NULL. If it failed to create the
* slot, returns %NULL and indicates the error in @xas.
*/
-static void *xas_create(struct xa_state *xas)
+static void *xas_create(struct xa_state *xas, bool allow_root)
{
struct xarray *xa = xas->xa;
void *entry;
@@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas)
shift = xas_expand(xas, entry);
if (shift < 0)
return NULL;
+ if (!shift && !allow_root)
+ shift = XA_CHUNK_SHIFT;
entry = xa_head_locked(xa);
slot = &xa->xa_head;
} else if (xas_error(xas)) {
@@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas)
xas->xa_sibs = 0;
for (;;) {
- xas_create(xas);
+ xas_create(xas, true);
if (xas_error(xas))
goto restore;
if (xas->xa_index <= (index | XA_CHUNK_MASK))
@@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry)
bool value = xa_is_value(entry);
if (entry)
- first = xas_create(xas);
+ first = xas_create(xas, !xa_is_node(entry));
else
first = xas_load(xas);
@@ -1251,35 +1256,6 @@ void *xas_find_conflict(struct xa_state *xas)
EXPORT_SYMBOL_GPL(xas_find_conflict);
/**
- * xa_init_flags() - Initialise an empty XArray with flags.
- * @xa: XArray.
- * @flags: XA_FLAG values.
- *
- * If you need to initialise an XArray with special flags (eg you need
- * to take the lock from interrupt context), use this function instead
- * of xa_init().
- *
- * Context: Any context.
- */
-void xa_init_flags(struct xarray *xa, gfp_t flags)
-{
- unsigned int lock_type;
- static struct lock_class_key xa_lock_irq;
- static struct lock_class_key xa_lock_bh;
-
- spin_lock_init(&xa->xa_lock);
- xa->xa_flags = flags;
- xa->xa_head = NULL;
-
- lock_type = xa_lock_type(xa);
- if (lock_type == XA_LOCK_IRQ)
- lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
- else if (lock_type == XA_LOCK_BH)
- lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
-}
-EXPORT_SYMBOL(xa_init_flags);
-
-/**
* xa_load() - Load an entry from an XArray.
* @xa: XArray.
* @index: index into array.
@@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr)
{
if (xa_is_zero(curr))
return NULL;
- XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
if (xas_error(xas))
curr = xas->xa_node;
return curr;
@@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY;
@@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY;
@@ -1465,6 +1440,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
EXPORT_SYMBOL(__xa_cmpxchg);
/**
+ * __xa_insert() - Store this entry in the XArray if no entry is present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+ if (!entry)
+ entry = XA_ZERO_ENTRY;
+
+ do {
+ curr = xas_load(&xas);
+ if (!curr) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } else {
+ xas_set_err(&xas, -EEXIST);
+ }
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_error(&xas);
+}
+EXPORT_SYMBOL(__xa_insert);
+
+/**
* __xa_reserve() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
@@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
if (last + 1)
order = __ffs(last + 1);
xas_set_order(&xas, last, order);
- xas_create(&xas);
+ xas_create(&xas, true);
if (xas_error(&xas))
goto unlock;
}
@@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
XA_STATE(xas, xa, 0);
int err;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (WARN_ON_ONCE(!xa_track_free(xa)))
return -EINVAL;
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 9a7b8b049d04..e3df921208c0 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -39,6 +39,23 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT
Enable debug page memory allocations by default? This value
can be overridden by debug_pagealloc=off|on.
+config PAGE_OWNER
+ bool "Track page owner"
+ depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
+ select DEBUG_FS
+ select STACKTRACE
+ select STACKDEPOT
+ select PAGE_EXTENSION
+ help
+ This keeps track of what call chain is the owner of a page, may
+ help to find bare alloc_page(s) leaks. Even if you include this
+ feature on your build, it is disabled in default. You should pass
+ "page_owner=on" to boot parameter in order to enable it. Eats
+ a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
+ for user-space helper.
+
+ If unsure, say N.
+
config PAGE_POISONING
bool "Poison pages after freeing"
select PAGE_POISONING_NO_SANITY if HIBERNATION
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8a8bb8796c6c..72e6d0c55cfa 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
bdi->cgwb_congested_tree = RB_ROOT;
mutex_init(&bdi->cgwb_release_mutex);
+ init_rwsem(&bdi->wb_switch_rwsem);
ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (!ret) {
diff --git a/mm/cma.c b/mm/cma.c
index c7b39dd3b4f6..f4f3a8a57d86 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -353,12 +353,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
if (ret)
- goto err;
+ goto free_mem;
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
&base);
return 0;
+free_mem:
+ memblock_free(base, size);
err:
pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
return ret;
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index ad6723e9d110..8d7b2fd52225 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -21,8 +21,6 @@ struct cma_mem {
unsigned long n;
};
-static struct dentry *cma_debugfs_root;
-
static int cma_debugfs_get(void *data, u64 *val)
{
unsigned long *p = data;
@@ -162,7 +160,7 @@ static int cma_alloc_write(void *data, u64 val)
}
DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
-static void cma_debugfs_add_one(struct cma *cma, int idx)
+static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
{
struct dentry *tmp;
char name[16];
@@ -170,7 +168,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
scnprintf(name, sizeof(name), "cma-%s", cma->name);
- tmp = debugfs_create_dir(name, cma_debugfs_root);
+ tmp = debugfs_create_dir(name, root_dentry);
debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
@@ -188,14 +186,13 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
static int __init cma_debugfs_init(void)
{
+ struct dentry *cma_debugfs_root;
int i;
cma_debugfs_root = debugfs_create_dir("cma", NULL);
- if (!cma_debugfs_root)
- return -ENOMEM;
for (i = 0; i < cma_area_count; i++)
- cma_debugfs_add_one(&cma_areas[i], i);
+ cma_debugfs_add_one(&cma_areas[i], cma_debugfs_root);
return 0;
}
diff --git a/mm/compaction.c b/mm/compaction.c
index ef29490b0f46..f171a83707ce 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -66,7 +66,7 @@ static unsigned long release_freepages(struct list_head *freelist)
return high_pfn;
}
-static void map_pages(struct list_head *list)
+static void split_map_pages(struct list_head *list)
{
unsigned int i, order, nr_pages;
struct page *page, *next;
@@ -237,6 +237,70 @@ static bool pageblock_skip_persistent(struct page *page)
return false;
}
+static bool
+__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
+ bool check_target)
+{
+ struct page *page = pfn_to_online_page(pfn);
+ struct page *end_page;
+ unsigned long block_pfn;
+
+ if (!page)
+ return false;
+ if (zone != page_zone(page))
+ return false;
+ if (pageblock_skip_persistent(page))
+ return false;
+
+ /*
+ * If skip is already cleared do no further checking once the
+ * restart points have been set.
+ */
+ if (check_source && check_target && !get_pageblock_skip(page))
+ return true;
+
+ /*
+ * If clearing skip for the target scanner, do not select a
+ * non-movable pageblock as the starting point.
+ */
+ if (!check_source && check_target &&
+ get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
+ return false;
+
+ /*
+ * Only clear the hint if a sample indicates there is either a
+ * free page or an LRU page in the block. One or other condition
+ * is necessary for the block to be a migration source/target.
+ */
+ block_pfn = pageblock_start_pfn(pfn);
+ pfn = max(block_pfn, zone->zone_start_pfn);
+ page = pfn_to_page(pfn);
+ if (zone != page_zone(page))
+ return false;
+ pfn = block_pfn + pageblock_nr_pages;
+ pfn = min(pfn, zone_end_pfn(zone));
+ end_page = pfn_to_page(pfn);
+
+ do {
+ if (pfn_valid_within(pfn)) {
+ if (check_source && PageLRU(page)) {
+ clear_pageblock_skip(page);
+ return true;
+ }
+
+ if (check_target && PageBuddy(page)) {
+ clear_pageblock_skip(page);
+ return true;
+ }
+ }
+
+ page += (1 << PAGE_ALLOC_COSTLY_ORDER);
+ pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
+ } while (page < end_page);
+
+ return false;
+}
+
/*
* This function is called to clear all cached information on pageblocks that
* should be skipped for page isolation when the migrate and free page scanner
@@ -244,30 +308,54 @@ static bool pageblock_skip_persistent(struct page *page)
*/
static void __reset_isolation_suitable(struct zone *zone)
{
- unsigned long start_pfn = zone->zone_start_pfn;
- unsigned long end_pfn = zone_end_pfn(zone);
- unsigned long pfn;
+ unsigned long migrate_pfn = zone->zone_start_pfn;
+ unsigned long free_pfn = zone_end_pfn(zone);
+ unsigned long reset_migrate = free_pfn;
+ unsigned long reset_free = migrate_pfn;
+ bool source_set = false;
+ bool free_set = false;
+
+ if (!zone->compact_blockskip_flush)
+ return;
zone->compact_blockskip_flush = false;
- /* Walk the zone and mark every pageblock as suitable for isolation */
- for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
- struct page *page;
-
+ /*
+ * Walk the zone and update pageblock skip information. Source looks
+ * for PageLRU while target looks for PageBuddy. When the scanner
+ * is found, both PageBuddy and PageLRU are checked as the pageblock
+ * is suitable as both source and target.
+ */
+ for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
+ free_pfn -= pageblock_nr_pages) {
cond_resched();
- page = pfn_to_online_page(pfn);
- if (!page)
- continue;
- if (zone != page_zone(page))
- continue;
- if (pageblock_skip_persistent(page))
- continue;
+ /* Update the migrate PFN */
+ if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
+ migrate_pfn < reset_migrate) {
+ source_set = true;
+ reset_migrate = migrate_pfn;
+ zone->compact_init_migrate_pfn = reset_migrate;
+ zone->compact_cached_migrate_pfn[0] = reset_migrate;
+ zone->compact_cached_migrate_pfn[1] = reset_migrate;
+ }
- clear_pageblock_skip(page);
+ /* Update the free PFN */
+ if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
+ free_pfn > reset_free) {
+ free_set = true;
+ reset_free = free_pfn;
+ zone->compact_init_free_pfn = reset_free;
+ zone->compact_cached_free_pfn = reset_free;
+ }
}
- reset_cached_positions(zone);
+ /* Leave no distance if no suitable block was reset */
+ if (reset_migrate >= reset_free) {
+ zone->compact_cached_migrate_pfn[0] = migrate_pfn;
+ zone->compact_cached_migrate_pfn[1] = migrate_pfn;
+ zone->compact_cached_free_pfn = free_pfn;
+ }
}
void reset_isolation_suitable(pg_data_t *pgdat)
@@ -286,15 +374,53 @@ void reset_isolation_suitable(pg_data_t *pgdat)
}
/*
+ * Sets the pageblock skip bit if it was clear. Note that this is a hint as
+ * locks are not required for read/writers. Returns true if it was already set.
+ */
+static bool test_and_set_skip(struct compact_control *cc, struct page *page,
+ unsigned long pfn)
+{
+ bool skip;
+
+ /* Do no update if skip hint is being ignored */
+ if (cc->ignore_skip_hint)
+ return false;
+
+ if (!IS_ALIGNED(pfn, pageblock_nr_pages))
+ return false;
+
+ skip = get_pageblock_skip(page);
+ if (!skip && !cc->no_set_skip_hint)
+ set_pageblock_skip(page);
+
+ return skip;
+}
+
+static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
+{
+ struct zone *zone = cc->zone;
+
+ pfn = pageblock_end_pfn(pfn);
+
+ /* Set for isolation rather than compaction */
+ if (cc->no_set_skip_hint)
+ return;
+
+ if (pfn > zone->compact_cached_migrate_pfn[0])
+ zone->compact_cached_migrate_pfn[0] = pfn;
+ if (cc->mode != MIGRATE_ASYNC &&
+ pfn > zone->compact_cached_migrate_pfn[1])
+ zone->compact_cached_migrate_pfn[1] = pfn;
+}
+
+/*
* If no pages were isolated then mark this pageblock to be skipped in the
* future. The information is later cleared by __reset_isolation_suitable().
*/
static void update_pageblock_skip(struct compact_control *cc,
- struct page *page, unsigned long nr_isolated,
- bool migrate_scanner)
+ struct page *page, unsigned long pfn)
{
struct zone *zone = cc->zone;
- unsigned long pfn;
if (cc->no_set_skip_hint)
return;
@@ -302,24 +428,11 @@ static void update_pageblock_skip(struct compact_control *cc,
if (!page)
return;
- if (nr_isolated)
- return;
-
set_pageblock_skip(page);
- pfn = page_to_pfn(page);
-
/* Update where async and sync compaction should restart */
- if (migrate_scanner) {
- if (pfn > zone->compact_cached_migrate_pfn[0])
- zone->compact_cached_migrate_pfn[0] = pfn;
- if (cc->mode != MIGRATE_ASYNC &&
- pfn > zone->compact_cached_migrate_pfn[1])
- zone->compact_cached_migrate_pfn[1] = pfn;
- } else {
- if (pfn < zone->compact_cached_free_pfn)
- zone->compact_cached_free_pfn = pfn;
- }
+ if (pfn < zone->compact_cached_free_pfn)
+ zone->compact_cached_free_pfn = pfn;
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
@@ -334,32 +447,42 @@ static inline bool pageblock_skip_persistent(struct page *page)
}
static inline void update_pageblock_skip(struct compact_control *cc,
- struct page *page, unsigned long nr_isolated,
- bool migrate_scanner)
+ struct page *page, unsigned long pfn)
+{
+}
+
+static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
+{
+}
+
+static bool test_and_set_skip(struct compact_control *cc, struct page *page,
+ unsigned long pfn)
{
+ return false;
}
#endif /* CONFIG_COMPACTION */
/*
* Compaction requires the taking of some coarse locks that are potentially
- * very heavily contended. For async compaction, back out if the lock cannot
- * be taken immediately. For sync compaction, spin on the lock if needed.
+ * very heavily contended. For async compaction, trylock and record if the
+ * lock is contended. The lock will still be acquired but compaction will
+ * abort when the current block is finished regardless of success rate.
+ * Sync compaction acquires the lock.
*
- * Returns true if the lock is held
- * Returns false if the lock is not held and compaction should abort
+ * Always returns true which makes it easier to track lock state in callers.
*/
-static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
+static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
struct compact_control *cc)
{
- if (cc->mode == MIGRATE_ASYNC) {
- if (!spin_trylock_irqsave(lock, *flags)) {
- cc->contended = true;
- return false;
- }
- } else {
- spin_lock_irqsave(lock, *flags);
+ /* Track if the lock is contended in async mode */
+ if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
+ if (spin_trylock_irqsave(lock, *flags))
+ return true;
+
+ cc->contended = true;
}
+ spin_lock_irqsave(lock, *flags);
return true;
}
@@ -391,37 +514,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
return true;
}
- if (need_resched()) {
- if (cc->mode == MIGRATE_ASYNC) {
- cc->contended = true;
- return true;
- }
- cond_resched();
- }
-
- return false;
-}
-
-/*
- * Aside from avoiding lock contention, compaction also periodically checks
- * need_resched() and either schedules in sync compaction or aborts async
- * compaction. This is similar to what compact_unlock_should_abort() does, but
- * is used where no lock is concerned.
- *
- * Returns false when no scheduling was needed, or sync compaction scheduled.
- * Returns true when async compaction should abort.
- */
-static inline bool compact_should_abort(struct compact_control *cc)
-{
- /* async compaction aborts if contended */
- if (need_resched()) {
- if (cc->mode == MIGRATE_ASYNC) {
- cc->contended = true;
- return true;
- }
-
- cond_resched();
- }
+ cond_resched();
return false;
}
@@ -435,19 +528,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long *start_pfn,
unsigned long end_pfn,
struct list_head *freelist,
+ unsigned int stride,
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
- struct page *cursor, *valid_page = NULL;
+ struct page *cursor;
unsigned long flags = 0;
bool locked = false;
unsigned long blockpfn = *start_pfn;
unsigned int order;
+ /* Strict mode is for isolation, speed is secondary */
+ if (strict)
+ stride = 1;
+
cursor = pfn_to_page(blockpfn);
/* Isolate free pages. */
- for (; blockpfn < end_pfn; blockpfn++, cursor++) {
+ for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
int isolated;
struct page *page = cursor;
@@ -465,9 +563,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (!pfn_valid_within(blockpfn))
goto isolate_fail;
- if (!valid_page)
- valid_page = page;
-
/*
* For compound pages such as THP and hugetlbfs, we can save
* potentially a lot of iterations if we skip them at once.
@@ -495,18 +590,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* recheck as well.
*/
if (!locked) {
- /*
- * The zone lock must be held to isolate freepages.
- * Unfortunately this is a very coarse lock and can be
- * heavily contended if there are parallel allocations
- * or parallel compactions. For async compaction do not
- * spin on the lock and we acquire the lock as late as
- * possible.
- */
- locked = compact_trylock_irqsave(&cc->zone->lock,
+ locked = compact_lock_irqsave(&cc->zone->lock,
&flags, cc);
- if (!locked)
- break;
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
@@ -565,10 +650,6 @@ isolate_fail:
if (strict && blockpfn < end_pfn)
total_isolated = 0;
- /* Update the pageblock-skip if the whole pageblock was scanned */
- if (blockpfn == end_pfn)
- update_pageblock_skip(cc, valid_page, total_isolated, false);
-
cc->total_free_scanned += nr_scanned;
if (total_isolated)
count_compact_events(COMPACTISOLATED, total_isolated);
@@ -626,7 +707,7 @@ isolate_freepages_range(struct compact_control *cc,
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, &freelist, true);
+ block_end_pfn, &freelist, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
@@ -644,7 +725,7 @@ isolate_freepages_range(struct compact_control *cc,
}
/* __isolate_free_page() does not map the pages */
- map_pages(&freelist);
+ split_map_pages(&freelist);
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
@@ -657,16 +738,16 @@ isolate_freepages_range(struct compact_control *cc,
}
/* Similar to reclaim, but different enough that they don't share logic */
-static bool too_many_isolated(struct zone *zone)
+static bool too_many_isolated(pg_data_t *pgdat)
{
unsigned long active, inactive, isolated;
- inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
- node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
- active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
- node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
- isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
- node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
+ inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
+ node_page_state(pgdat, NR_INACTIVE_ANON);
+ active = node_page_state(pgdat, NR_ACTIVE_FILE) +
+ node_page_state(pgdat, NR_ACTIVE_ANON);
+ isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
+ node_page_state(pgdat, NR_ISOLATED_ANON);
return isolated > (inactive + active) / 2;
}
@@ -693,7 +774,7 @@ static unsigned long
isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
unsigned long end_pfn, isolate_mode_t isolate_mode)
{
- struct zone *zone = cc->zone;
+ pg_data_t *pgdat = cc->zone->zone_pgdat;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct lruvec *lruvec;
unsigned long flags = 0;
@@ -702,13 +783,14 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
unsigned long start_pfn = low_pfn;
bool skip_on_failure = false;
unsigned long next_skip_pfn = 0;
+ bool skip_updated = false;
/*
* Ensure that there are not too many pages isolated from the LRU
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
- while (unlikely(too_many_isolated(zone))) {
+ while (unlikely(too_many_isolated(pgdat))) {
/* async migration should just abort */
if (cc->mode == MIGRATE_ASYNC)
return 0;
@@ -719,8 +801,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
return 0;
}
- if (compact_should_abort(cc))
- return 0;
+ cond_resched();
if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
skip_on_failure = true;
@@ -758,8 +839,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* if contended.
*/
if (!(low_pfn % SWAP_CLUSTER_MAX)
- && compact_unlock_should_abort(zone_lru_lock(zone), flags,
- &locked, cc))
+ && compact_unlock_should_abort(&pgdat->lru_lock,
+ flags, &locked, cc))
break;
if (!pfn_valid_within(low_pfn))
@@ -768,8 +849,19 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
page = pfn_to_page(low_pfn);
- if (!valid_page)
+ /*
+ * Check if the pageblock has already been marked skipped.
+ * Only the aligned PFN is checked as the caller isolates
+ * COMPACT_CLUSTER_MAX at a time so the second call must
+ * not falsely conclude that the block should be skipped.
+ */
+ if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
+ if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
+ low_pfn = end_pfn;
+ goto isolate_abort;
+ }
valid_page = page;
+ }
/*
* Skip if free. We read page order here without zone lock
@@ -818,7 +910,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (unlikely(__PageMovable(page)) &&
!PageIsolated(page)) {
if (locked) {
- spin_unlock_irqrestore(zone_lru_lock(zone),
+ spin_unlock_irqrestore(&pgdat->lru_lock,
flags);
locked = false;
}
@@ -848,10 +940,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* If we already hold the lock, we can skip some rechecking */
if (!locked) {
- locked = compact_trylock_irqsave(zone_lru_lock(zone),
+ locked = compact_lock_irqsave(&pgdat->lru_lock,
&flags, cc);
- if (!locked)
- break;
+
+ /* Try get exclusive access under lock */
+ if (!skip_updated) {
+ skip_updated = true;
+ if (test_and_set_skip(cc, page, low_pfn))
+ goto isolate_abort;
+ }
/* Recheck PageLRU and PageCompound under lock */
if (!PageLRU(page))
@@ -868,7 +965,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
}
- lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
/* Try isolate the page */
if (__isolate_lru_page(page, isolate_mode) != 0)
@@ -887,16 +984,13 @@ isolate_success:
nr_isolated++;
/*
- * Record where we could have freed pages by migration and not
- * yet flushed them to buddy allocator.
- * - this is the lowest page that was isolated and likely be
- * then freed by migration.
+ * Avoid isolating too much unless this block is being
+ * rescanned (e.g. dirty/writeback pages, parallel allocation)
+ * or a lock is contended. For contention, isolate quickly to
+ * potentially remove one source of contention.
*/
- if (!cc->last_migrated_pfn)
- cc->last_migrated_pfn = low_pfn;
-
- /* Avoid isolating too much */
- if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
+ if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
+ !cc->rescan && !cc->contended) {
++low_pfn;
break;
}
@@ -913,12 +1007,11 @@ isolate_fail:
*/
if (nr_isolated) {
if (locked) {
- spin_unlock_irqrestore(zone_lru_lock(zone), flags);
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
locked = false;
}
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
- cc->last_migrated_pfn = 0;
nr_isolated = 0;
}
@@ -939,15 +1032,23 @@ isolate_fail:
if (unlikely(low_pfn > end_pfn))
low_pfn = end_pfn;
+isolate_abort:
if (locked)
- spin_unlock_irqrestore(zone_lru_lock(zone), flags);
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
/*
- * Update the pageblock-skip information and cached scanner pfn,
- * if the whole pageblock was scanned without isolating any page.
+ * Updated the cached scanner pfn once the pageblock has been scanned
+ * Pages will either be migrated in which case there is no point
+ * scanning in the near future or migration failed in which case the
+ * failure reason may persist. The block is marked for skipping if
+ * there were no pages isolated in the block or if the block is
+ * rescanned twice in a row.
*/
- if (low_pfn == end_pfn)
- update_pageblock_skip(cc, valid_page, nr_isolated, true);
+ if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
+ if (valid_page && !skip_updated)
+ set_pageblock_skip(valid_page);
+ update_cached_migrate(cc, low_pfn);
+ }
trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
nr_scanned, nr_isolated);
@@ -1013,6 +1114,9 @@ static bool suitable_migration_source(struct compact_control *cc,
{
int block_mt;
+ if (pageblock_skip_persistent(page))
+ return false;
+
if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
return true;
@@ -1050,6 +1154,12 @@ static bool suitable_migration_target(struct compact_control *cc,
return false;
}
+static inline unsigned int
+freelist_scan_limit(struct compact_control *cc)
+{
+ return (COMPACT_CLUSTER_MAX >> cc->fast_search_fail) + 1;
+}
+
/*
* Test whether the free scanner has reached the same or lower pageblock than
* the migration scanner, and compaction should thus terminate.
@@ -1061,6 +1171,248 @@ static inline bool compact_scanners_met(struct compact_control *cc)
}
/*
+ * Used when scanning for a suitable migration target which scans freelists
+ * in reverse. Reorders the list such as the unscanned pages are scanned
+ * first on the next iteration of the free scanner
+ */
+static void
+move_freelist_head(struct list_head *freelist, struct page *freepage)
+{
+ LIST_HEAD(sublist);
+
+ if (!list_is_last(freelist, &freepage->lru)) {
+ list_cut_before(&sublist, freelist, &freepage->lru);
+ if (!list_empty(&sublist))
+ list_splice_tail(&sublist, freelist);
+ }
+}
+
+/*
+ * Similar to move_freelist_head except used by the migration scanner
+ * when scanning forward. It's possible for these list operations to
+ * move against each other if they search the free list exactly in
+ * lockstep.
+ */
+static void
+move_freelist_tail(struct list_head *freelist, struct page *freepage)
+{
+ LIST_HEAD(sublist);
+
+ if (!list_is_first(freelist, &freepage->lru)) {
+ list_cut_position(&sublist, freelist, &freepage->lru);
+ if (!list_empty(&sublist))
+ list_splice_tail(&sublist, freelist);
+ }
+}
+
+static void
+fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
+{
+ unsigned long start_pfn, end_pfn;
+ struct page *page = pfn_to_page(pfn);
+
+ /* Do not search around if there are enough pages already */
+ if (cc->nr_freepages >= cc->nr_migratepages)
+ return;
+
+ /* Minimise scanning during async compaction */
+ if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
+ return;
+
+ /* Pageblock boundaries */
+ start_pfn = pageblock_start_pfn(pfn);
+ end_pfn = min(start_pfn + pageblock_nr_pages, zone_end_pfn(cc->zone));
+
+ /* Scan before */
+ if (start_pfn != pfn) {
+ isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
+ if (cc->nr_freepages >= cc->nr_migratepages)
+ return;
+ }
+
+ /* Scan after */
+ start_pfn = pfn + nr_isolated;
+ if (start_pfn != end_pfn)
+ isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+
+ /* Skip this pageblock in the future as it's full or nearly full */
+ if (cc->nr_freepages < cc->nr_migratepages)
+ set_pageblock_skip(page);
+}
+
+/* Search orders in round-robin fashion */
+static int next_search_order(struct compact_control *cc, int order)
+{
+ order--;
+ if (order < 0)
+ order = cc->order - 1;
+
+ /* Search wrapped around? */
+ if (order == cc->search_order) {
+ cc->search_order--;
+ if (cc->search_order < 0)
+ cc->search_order = cc->order - 1;
+ return -1;
+ }
+
+ return order;
+}
+
+static unsigned long
+fast_isolate_freepages(struct compact_control *cc)
+{
+ unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
+ unsigned int nr_scanned = 0;
+ unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
+ unsigned long nr_isolated = 0;
+ unsigned long distance;
+ struct page *page = NULL;
+ bool scan_start = false;
+ int order;
+
+ /* Full compaction passes in a negative order */
+ if (cc->order <= 0)
+ return cc->free_pfn;
+
+ /*
+ * If starting the scan, use a deeper search and use the highest
+ * PFN found if a suitable one is not found.
+ */
+ if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
+ limit = pageblock_nr_pages >> 1;
+ scan_start = true;
+ }
+
+ /*
+ * Preferred point is in the top quarter of the scan space but take
+ * a pfn from the top half if the search is problematic.
+ */
+ distance = (cc->free_pfn - cc->migrate_pfn);
+ low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
+ min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
+
+ if (WARN_ON_ONCE(min_pfn > low_pfn))
+ low_pfn = min_pfn;
+
+ /*
+ * Search starts from the last successful isolation order or the next
+ * order to search after a previous failure
+ */
+ cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
+
+ for (order = cc->search_order;
+ !page && order >= 0;
+ order = next_search_order(cc, order)) {
+ struct free_area *area = &cc->zone->free_area[order];
+ struct list_head *freelist;
+ struct page *freepage;
+ unsigned long flags;
+ unsigned int order_scanned = 0;
+
+ if (!area->nr_free)
+ continue;
+
+ spin_lock_irqsave(&cc->zone->lock, flags);
+ freelist = &area->free_list[MIGRATE_MOVABLE];
+ list_for_each_entry_reverse(freepage, freelist, lru) {
+ unsigned long pfn;
+
+ order_scanned++;
+ nr_scanned++;
+ pfn = page_to_pfn(freepage);
+
+ if (pfn >= highest)
+ highest = pageblock_start_pfn(pfn);
+
+ if (pfn >= low_pfn) {
+ cc->fast_search_fail = 0;
+ cc->search_order = order;
+ page = freepage;
+ break;
+ }
+
+ if (pfn >= min_pfn && pfn > high_pfn) {
+ high_pfn = pfn;
+
+ /* Shorten the scan if a candidate is found */
+ limit >>= 1;
+ }
+
+ if (order_scanned >= limit)
+ break;
+ }
+
+ /* Use a minimum pfn if a preferred one was not found */
+ if (!page && high_pfn) {
+ page = pfn_to_page(high_pfn);
+
+ /* Update freepage for the list reorder below */
+ freepage = page;
+ }
+
+ /* Reorder to so a future search skips recent pages */
+ move_freelist_head(freelist, freepage);
+
+ /* Isolate the page if available */
+ if (page) {
+ if (__isolate_free_page(page, order)) {
+ set_page_private(page, order);
+ nr_isolated = 1 << order;
+ cc->nr_freepages += nr_isolated;
+ list_add_tail(&page->lru, &cc->freepages);
+ count_compact_events(COMPACTISOLATED, nr_isolated);
+ } else {
+ /* If isolation fails, abort the search */
+ order = -1;
+ page = NULL;
+ }
+ }
+
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+
+ /*
+ * Smaller scan on next order so the total scan ig related
+ * to freelist_scan_limit.
+ */
+ if (order_scanned >= limit)
+ limit = min(1U, limit >> 1);
+ }
+
+ if (!page) {
+ cc->fast_search_fail++;
+ if (scan_start) {
+ /*
+ * Use the highest PFN found above min. If one was
+ * not found, be pessemistic for direct compaction
+ * and use the min mark.
+ */
+ if (highest) {
+ page = pfn_to_page(highest);
+ cc->free_pfn = highest;
+ } else {
+ if (cc->direct_compaction) {
+ page = pfn_to_page(min_pfn);
+ cc->free_pfn = min_pfn;
+ }
+ }
+ }
+ }
+
+ if (highest && highest >= cc->zone->compact_cached_free_pfn) {
+ highest -= pageblock_nr_pages;
+ cc->zone->compact_cached_free_pfn = highest;
+ }
+
+ cc->total_free_scanned += nr_scanned;
+ if (!page)
+ return cc->free_pfn;
+
+ low_pfn = page_to_pfn(page);
+ fast_isolate_around(cc, low_pfn, nr_isolated);
+ return low_pfn;
+}
+
+/*
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
*/
@@ -1073,6 +1425,12 @@ static void isolate_freepages(struct compact_control *cc)
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
struct list_head *freelist = &cc->freepages;
+ unsigned int stride;
+
+ /* Try a small search of the free lists for a candidate */
+ isolate_start_pfn = fast_isolate_freepages(cc);
+ if (cc->nr_freepages)
+ goto splitmap;
/*
* Initialise the free scanner. The starting point is where we last
@@ -1086,10 +1444,11 @@ static void isolate_freepages(struct compact_control *cc)
* is using.
*/
isolate_start_pfn = cc->free_pfn;
- block_start_pfn = pageblock_start_pfn(cc->free_pfn);
+ block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
zone_end_pfn(zone));
low_pfn = pageblock_end_pfn(cc->migrate_pfn);
+ stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
/*
* Isolate free pages until enough are available to migrate the
@@ -1100,14 +1459,14 @@ static void isolate_freepages(struct compact_control *cc)
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
+ unsigned long nr_isolated;
+
/*
* This can iterate a massively long zone without finding any
- * suitable migration targets, so periodically check if we need
- * to schedule, or even abort async compaction.
+ * suitable migration targets, so periodically check resched.
*/
- if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
- && compact_should_abort(cc))
- break;
+ if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
+ cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
@@ -1123,15 +1482,15 @@ static void isolate_freepages(struct compact_control *cc)
continue;
/* Found a block suitable for isolating free pages from. */
- isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
- freelist, false);
+ nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
+ block_end_pfn, freelist, stride, false);
- /*
- * If we isolated enough freepages, or aborted due to lock
- * contention, terminate.
- */
- if ((cc->nr_freepages >= cc->nr_migratepages)
- || cc->contended) {
+ /* Update the skip hint if the full pageblock was scanned */
+ if (isolate_start_pfn == block_end_pfn)
+ update_pageblock_skip(cc, page, block_start_pfn);
+
+ /* Are enough freepages isolated? */
+ if (cc->nr_freepages >= cc->nr_migratepages) {
if (isolate_start_pfn >= block_end_pfn) {
/*
* Restart at previous pageblock if more
@@ -1148,10 +1507,14 @@ static void isolate_freepages(struct compact_control *cc)
*/
break;
}
- }
- /* __isolate_free_page() does not map the pages */
- map_pages(freelist);
+ /* Adjust stride depending on isolation */
+ if (nr_isolated) {
+ stride = 1;
+ continue;
+ }
+ stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
+ }
/*
* Record where the free scanner will restart next time. Either we
@@ -1160,6 +1523,10 @@ static void isolate_freepages(struct compact_control *cc)
* and the loop terminated due to isolate_start_pfn < low_pfn
*/
cc->free_pfn = isolate_start_pfn;
+
+splitmap:
+ /* __isolate_free_page() does not map the pages */
+ split_map_pages(freelist);
}
/*
@@ -1172,13 +1539,8 @@ static struct page *compaction_alloc(struct page *migratepage,
struct compact_control *cc = (struct compact_control *)data;
struct page *freepage;
- /*
- * Isolate free pages if necessary, and if we are not aborting due to
- * contention.
- */
if (list_empty(&cc->freepages)) {
- if (!cc->contended)
- isolate_freepages(cc);
+ isolate_freepages(cc);
if (list_empty(&cc->freepages))
return NULL;
@@ -1217,6 +1579,147 @@ typedef enum {
*/
int sysctl_compact_unevictable_allowed __read_mostly = 1;
+static inline void
+update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
+{
+ if (cc->fast_start_pfn == ULONG_MAX)
+ return;
+
+ if (!cc->fast_start_pfn)
+ cc->fast_start_pfn = pfn;
+
+ cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
+}
+
+static inline unsigned long
+reinit_migrate_pfn(struct compact_control *cc)
+{
+ if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
+ return cc->migrate_pfn;
+
+ cc->migrate_pfn = cc->fast_start_pfn;
+ cc->fast_start_pfn = ULONG_MAX;
+
+ return cc->migrate_pfn;
+}
+
+/*
+ * Briefly search the free lists for a migration source that already has
+ * some free pages to reduce the number of pages that need migration
+ * before a pageblock is free.
+ */
+static unsigned long fast_find_migrateblock(struct compact_control *cc)
+{
+ unsigned int limit = freelist_scan_limit(cc);
+ unsigned int nr_scanned = 0;
+ unsigned long distance;
+ unsigned long pfn = cc->migrate_pfn;
+ unsigned long high_pfn;
+ int order;
+
+ /* Skip hints are relied on to avoid repeats on the fast search */
+ if (cc->ignore_skip_hint)
+ return pfn;
+
+ /*
+ * If the migrate_pfn is not at the start of a zone or the start
+ * of a pageblock then assume this is a continuation of a previous
+ * scan restarted due to COMPACT_CLUSTER_MAX.
+ */
+ if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
+ return pfn;
+
+ /*
+ * For smaller orders, just linearly scan as the number of pages
+ * to migrate should be relatively small and does not necessarily
+ * justify freeing up a large block for a small allocation.
+ */
+ if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
+ return pfn;
+
+ /*
+ * Only allow kcompactd and direct requests for movable pages to
+ * quickly clear out a MOVABLE pageblock for allocation. This
+ * reduces the risk that a large movable pageblock is freed for
+ * an unmovable/reclaimable small allocation.
+ */
+ if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
+ return pfn;
+
+ /*
+ * When starting the migration scanner, pick any pageblock within the
+ * first half of the search space. Otherwise try and pick a pageblock
+ * within the first eighth to reduce the chances that a migration
+ * target later becomes a source.
+ */
+ distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
+ if (cc->migrate_pfn != cc->zone->zone_start_pfn)
+ distance >>= 2;
+ high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
+
+ for (order = cc->order - 1;
+ order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
+ order--) {
+ struct free_area *area = &cc->zone->free_area[order];
+ struct list_head *freelist;
+ unsigned long flags;
+ struct page *freepage;
+
+ if (!area->nr_free)
+ continue;
+
+ spin_lock_irqsave(&cc->zone->lock, flags);
+ freelist = &area->free_list[MIGRATE_MOVABLE];
+ list_for_each_entry(freepage, freelist, lru) {
+ unsigned long free_pfn;
+
+ nr_scanned++;
+ free_pfn = page_to_pfn(freepage);
+ if (free_pfn < high_pfn) {
+ /*
+ * Avoid if skipped recently. Ideally it would
+ * move to the tail but even safe iteration of
+ * the list assumes an entry is deleted, not
+ * reordered.
+ */
+ if (get_pageblock_skip(freepage)) {
+ if (list_is_last(freelist, &freepage->lru))
+ break;
+
+ continue;
+ }
+
+ /* Reorder to so a future search skips recent pages */
+ move_freelist_tail(freelist, freepage);
+
+ update_fast_start_pfn(cc, free_pfn);
+ pfn = pageblock_start_pfn(free_pfn);
+ cc->fast_search_fail = 0;
+ set_pageblock_skip(freepage);
+ break;
+ }
+
+ if (nr_scanned >= limit) {
+ cc->fast_search_fail++;
+ move_freelist_tail(freelist, freepage);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+ }
+
+ cc->total_migrate_scanned += nr_scanned;
+
+ /*
+ * If fast scanning failed then use a cached entry for a page block
+ * that had free pages as the basis for starting a linear scan.
+ */
+ if (pfn == cc->migrate_pfn)
+ pfn = reinit_migrate_pfn(cc);
+
+ return pfn;
+}
+
/*
* Isolate all pages that can be migrated from the first suitable block,
* starting at the block pointed to by the migrate scanner pfn within
@@ -1232,16 +1735,25 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
const isolate_mode_t isolate_mode =
(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
+ bool fast_find_block;
/*
* Start at where we last stopped, or beginning of the zone as
- * initialized by compact_zone()
+ * initialized by compact_zone(). The first failure will use
+ * the lowest PFN as the starting point for linear scanning.
*/
- low_pfn = cc->migrate_pfn;
+ low_pfn = fast_find_migrateblock(cc);
block_start_pfn = pageblock_start_pfn(low_pfn);
if (block_start_pfn < zone->zone_start_pfn)
block_start_pfn = zone->zone_start_pfn;
+ /*
+ * fast_find_migrateblock marks a pageblock skipped so to avoid
+ * the isolation_suitable check below, check whether the fast
+ * search was successful.
+ */
+ fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
+
/* Only scan within a pageblock boundary */
block_end_pfn = pageblock_end_pfn(low_pfn);
@@ -1250,6 +1762,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
* Do not cross the free scanner.
*/
for (; block_end_pfn <= cc->free_pfn;
+ fast_find_block = false,
low_pfn = block_end_pfn,
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
@@ -1257,34 +1770,45 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
/*
* This can potentially iterate a massively long zone with
* many pageblocks unsuitable, so periodically check if we
- * need to schedule, or even abort async compaction.
+ * need to schedule.
*/
- if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
- && compact_should_abort(cc))
- break;
+ if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
+ cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
if (!page)
continue;
- /* If isolation recently failed, do not retry */
- if (!isolation_suitable(cc, page))
+ /*
+ * If isolation recently failed, do not retry. Only check the
+ * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
+ * to be visited multiple times. Assume skip was checked
+ * before making it "skip" so other compaction instances do
+ * not scan the same block.
+ */
+ if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
+ !fast_find_block && !isolation_suitable(cc, page))
continue;
/*
- * For async compaction, also only scan in MOVABLE blocks.
- * Async compaction is optimistic to see if the minimum amount
- * of work satisfies the allocation.
+ * For async compaction, also only scan in MOVABLE blocks
+ * without huge pages. Async compaction is optimistic to see
+ * if the minimum amount of work satisfies the allocation.
+ * The cached PFN is updated as it's possible that all
+ * remaining blocks between source and target are unsuitable
+ * and the compaction scanners fail to meet.
*/
- if (!suitable_migration_source(cc, page))
+ if (!suitable_migration_source(cc, page)) {
+ update_cached_migrate(cc, block_end_pfn);
continue;
+ }
/* Perform the isolation */
low_pfn = isolate_migratepages_block(cc, low_pfn,
block_end_pfn, isolate_mode);
- if (!low_pfn || cc->contended)
+ if (!low_pfn)
return ISOLATE_ABORT;
/*
@@ -1310,19 +1834,16 @@ static inline bool is_via_compact_memory(int order)
return order == -1;
}
-static enum compact_result __compact_finished(struct zone *zone,
- struct compact_control *cc)
+static enum compact_result __compact_finished(struct compact_control *cc)
{
unsigned int order;
const int migratetype = cc->migratetype;
-
- if (cc->contended || fatal_signal_pending(current))
- return COMPACT_CONTENDED;
+ int ret;
/* Compaction run completes if the migrate and free scanner meet */
if (compact_scanners_met(cc)) {
/* Let the next compaction start anew. */
- reset_cached_positions(zone);
+ reset_cached_positions(cc->zone);
/*
* Mark that the PG_migrate_skip information should be cleared
@@ -1331,7 +1852,7 @@ static enum compact_result __compact_finished(struct zone *zone,
* based on an allocation request.
*/
if (cc->direct_compaction)
- zone->compact_blockskip_flush = true;
+ cc->zone->compact_blockskip_flush = true;
if (cc->whole_zone)
return COMPACT_COMPLETE;
@@ -1342,20 +1863,19 @@ static enum compact_result __compact_finished(struct zone *zone,
if (is_via_compact_memory(cc->order))
return COMPACT_CONTINUE;
- if (cc->finishing_block) {
- /*
- * We have finished the pageblock, but better check again that
- * we really succeeded.
- */
- if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
- cc->finishing_block = false;
- else
- return COMPACT_CONTINUE;
- }
+ /*
+ * Always finish scanning a pageblock to reduce the possibility of
+ * fallbacks in the future. This is particularly important when
+ * migration source is unmovable/reclaimable but it's not worth
+ * special casing.
+ */
+ if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
+ return COMPACT_CONTINUE;
/* Direct compactor: Is a suitable page free? */
+ ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < MAX_ORDER; order++) {
- struct free_area *area = &zone->free_area[order];
+ struct free_area *area = &cc->zone->free_area[order];
bool can_steal;
/* Job done if page is free of the right migratetype */
@@ -1393,21 +1913,23 @@ static enum compact_result __compact_finished(struct zone *zone,
return COMPACT_SUCCESS;
}
- cc->finishing_block = true;
- return COMPACT_CONTINUE;
+ ret = COMPACT_CONTINUE;
+ break;
}
}
- return COMPACT_NO_SUITABLE_PAGE;
+ if (cc->contended || fatal_signal_pending(current))
+ ret = COMPACT_CONTENDED;
+
+ return ret;
}
-static enum compact_result compact_finished(struct zone *zone,
- struct compact_control *cc)
+static enum compact_result compact_finished(struct compact_control *cc)
{
int ret;
- ret = __compact_finished(zone, cc);
- trace_mm_compaction_finished(zone, cc->order, ret);
+ ret = __compact_finished(cc);
+ trace_mm_compaction_finished(cc->zone, cc->order, ret);
if (ret == COMPACT_NO_SUITABLE_PAGE)
ret = COMPACT_CONTINUE;
@@ -1534,15 +2056,18 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
return false;
}
-static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
+static enum compact_result
+compact_zone(struct compact_control *cc, struct capture_control *capc)
{
enum compact_result ret;
- unsigned long start_pfn = zone->zone_start_pfn;
- unsigned long end_pfn = zone_end_pfn(zone);
+ unsigned long start_pfn = cc->zone->zone_start_pfn;
+ unsigned long end_pfn = zone_end_pfn(cc->zone);
+ unsigned long last_migrated_pfn;
const bool sync = cc->mode != MIGRATE_ASYNC;
+ bool update_cached;
cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
- ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
+ ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
/* Compaction is likely to fail */
if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
@@ -1555,8 +2080,8 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
* Clear pageblock skip if there were failures recently and compaction
* is about to be retried after being deferred.
*/
- if (compaction_restarting(zone, cc->order))
- __reset_isolation_suitable(zone);
+ if (compaction_restarting(cc->zone, cc->order))
+ __reset_isolation_suitable(cc->zone);
/*
* Setup to move all movable pages to the end of the zone. Used cached
@@ -1564,43 +2089,76 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
* want to compact the whole zone), but check that it is initialised
* by ensuring the values are within zone boundaries.
*/
+ cc->fast_start_pfn = 0;
if (cc->whole_zone) {
cc->migrate_pfn = start_pfn;
cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
} else {
- cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
- cc->free_pfn = zone->compact_cached_free_pfn;
+ cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
+ cc->free_pfn = cc->zone->compact_cached_free_pfn;
if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
- zone->compact_cached_free_pfn = cc->free_pfn;
+ cc->zone->compact_cached_free_pfn = cc->free_pfn;
}
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
cc->migrate_pfn = start_pfn;
- zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
- zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
+ cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
+ cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
}
- if (cc->migrate_pfn == start_pfn)
+ if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
cc->whole_zone = true;
}
- cc->last_migrated_pfn = 0;
+ last_migrated_pfn = 0;
+
+ /*
+ * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
+ * the basis that some migrations will fail in ASYNC mode. However,
+ * if the cached PFNs match and pageblocks are skipped due to having
+ * no isolation candidates, then the sync state does not matter.
+ * Until a pageblock with isolation candidates is found, keep the
+ * cached PFNs in sync to avoid revisiting the same blocks.
+ */
+ update_cached = !sync &&
+ cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync);
migrate_prep_local();
- while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
+ while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
int err;
+ unsigned long start_pfn = cc->migrate_pfn;
+
+ /*
+ * Avoid multiple rescans which can happen if a page cannot be
+ * isolated (dirty/writeback in async mode) or if the migrated
+ * pages are being allocated before the pageblock is cleared.
+ * The first rescan will capture the entire pageblock for
+ * migration. If it fails, it'll be marked skip and scanning
+ * will proceed as normal.
+ */
+ cc->rescan = false;
+ if (pageblock_start_pfn(last_migrated_pfn) ==
+ pageblock_start_pfn(start_pfn)) {
+ cc->rescan = true;
+ }
- switch (isolate_migratepages(zone, cc)) {
+ switch (isolate_migratepages(cc->zone, cc)) {
case ISOLATE_ABORT:
ret = COMPACT_CONTENDED;
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
+ last_migrated_pfn = 0;
goto out;
case ISOLATE_NONE:
+ if (update_cached) {
+ cc->zone->compact_cached_migrate_pfn[1] =
+ cc->zone->compact_cached_migrate_pfn[0];
+ }
+
/*
* We haven't isolated and migrated anything, but
* there might still be unflushed migrations from
@@ -1608,6 +2166,8 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
*/
goto check_drain;
case ISOLATE_SUCCESS:
+ update_cached = false;
+ last_migrated_pfn = start_pfn;
;
}
@@ -1639,8 +2199,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
cc->migrate_pfn = block_end_pfn(
cc->migrate_pfn - 1, cc->order);
/* Draining pcplists is useless in this case */
- cc->last_migrated_pfn = 0;
-
+ last_migrated_pfn = 0;
}
}
@@ -1652,21 +2211,26 @@ check_drain:
* compact_finished() can detect immediately if allocation
* would succeed.
*/
- if (cc->order > 0 && cc->last_migrated_pfn) {
+ if (cc->order > 0 && last_migrated_pfn) {
int cpu;
unsigned long current_block_start =
block_start_pfn(cc->migrate_pfn, cc->order);
- if (cc->last_migrated_pfn < current_block_start) {
+ if (last_migrated_pfn < current_block_start) {
cpu = get_cpu();
lru_add_drain_cpu(cpu);
- drain_local_pages(zone);
+ drain_local_pages(cc->zone);
put_cpu();
/* No more flushing until we migrate again */
- cc->last_migrated_pfn = 0;
+ last_migrated_pfn = 0;
}
}
+ /* Stop if a page has been captured */
+ if (capc && capc->page) {
+ ret = COMPACT_SUCCESS;
+ break;
+ }
}
out:
@@ -1685,8 +2249,8 @@ out:
* Only go back, not forward. The cached pfn might have been
* already reset to zone end in compact_finished()
*/
- if (free_pfn > zone->compact_cached_free_pfn)
- zone->compact_cached_free_pfn = free_pfn;
+ if (free_pfn > cc->zone->compact_cached_free_pfn)
+ cc->zone->compact_cached_free_pfn = free_pfn;
}
count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
@@ -1700,7 +2264,8 @@ out:
static enum compact_result compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask, enum compact_priority prio,
- unsigned int alloc_flags, int classzone_idx)
+ unsigned int alloc_flags, int classzone_idx,
+ struct page **capture)
{
enum compact_result ret;
struct compact_control cc = {
@@ -1709,6 +2274,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.total_migrate_scanned = 0,
.total_free_scanned = 0,
.order = order,
+ .search_order = order,
.gfp_mask = gfp_mask,
.zone = zone,
.mode = (prio == COMPACT_PRIO_ASYNC) ?
@@ -1720,14 +2286,24 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
};
+ struct capture_control capc = {
+ .cc = &cc,
+ .page = NULL,
+ };
+
+ if (capture)
+ current->capture_control = &capc;
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
- ret = compact_zone(zone, &cc);
+ ret = compact_zone(&cc, &capc);
VM_BUG_ON(!list_empty(&cc.freepages));
VM_BUG_ON(!list_empty(&cc.migratepages));
+ *capture = capc.page;
+ current->capture_control = NULL;
+
return ret;
}
@@ -1745,7 +2321,7 @@ int sysctl_extfrag_threshold = 500;
*/
enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
- enum compact_priority prio)
+ enum compact_priority prio, struct page **capture)
{
int may_perform_io = gfp_mask & __GFP_IO;
struct zoneref *z;
@@ -1773,7 +2349,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
}
status = compact_zone_order(zone, order, gfp_mask, prio,
- alloc_flags, ac_classzone_idx(ac));
+ alloc_flags, ac_classzone_idx(ac), capture);
rc = max(status, rc);
/* The allocation should succeed, stop compacting */
@@ -1841,7 +2417,7 @@ static void compact_node(int nid)
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
- compact_zone(zone, &cc);
+ compact_zone(&cc, NULL);
VM_BUG_ON(!list_empty(&cc.freepages));
VM_BUG_ON(!list_empty(&cc.migratepages));
@@ -1876,14 +2452,6 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
return 0;
}
-int sysctl_extfrag_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos)
-{
- proc_dointvec_minmax(table, write, buffer, length, ppos);
-
- return 0;
-}
-
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
static ssize_t sysfs_compact_node(struct device *dev,
struct device_attribute *attr,
@@ -1948,6 +2516,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
struct zone *zone;
struct compact_control cc = {
.order = pgdat->kcompactd_max_order,
+ .search_order = pgdat->kcompactd_max_order,
.total_migrate_scanned = 0,
.total_free_scanned = 0,
.classzone_idx = pgdat->kcompactd_classzone_idx,
@@ -1983,7 +2552,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
if (kthread_should_stop())
return;
- status = compact_zone(zone, &cc);
+ status = compact_zone(&cc, NULL);
if (status == COMPACT_SUCCESS) {
compaction_defer_reset(zone, cc.order, false);
diff --git a/mm/debug.c b/mm/debug.c
index 0abb987dad9b..1611cf00a137 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {
void __dump_page(struct page *page, const char *reason)
{
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping;
bool page_poisoned = PagePoisoned(page);
int mapcount;
@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason)
goto hex_only;
}
+ mapping = page_mapping(page);
+
/*
* Avoid VM_BUG_ON() in page_mapcount().
* page->_mapcount space in struct page is used by sl[aou]b pages to
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 6d4b97e7e9e9..76a160083506 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -114,10 +114,9 @@ static DEVICE_ATTR(pools, 0444, show_pools, NULL);
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @boundary: returned blocks won't cross this power of two boundary
- * Context: !in_interrupt()
+ * Context: not in_interrupt()
*
- * Returns a dma allocation pool with the requested characteristics, or
- * null if one can't be created. Given one of these pools, dma_pool_alloc()
+ * Given one of these pools, dma_pool_alloc()
* may be used to allocate memory. Such memory will all have "consistent"
* DMA mappings, accessible by the device and its driver without using
* cache flushing primitives. The actual size of blocks allocated may be
@@ -127,6 +126,9 @@ static DEVICE_ATTR(pools, 0444, show_pools, NULL);
* cross that size boundary. This is useful for devices which have
* addressing restrictions on individual DMA transfers, such as not crossing
* boundaries of 4KBytes.
+ *
+ * Return: a dma allocation pool with the requested characteristics, or
+ * %NULL if one can't be created.
*/
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t boundary)
@@ -313,7 +315,7 @@ EXPORT_SYMBOL(dma_pool_destroy);
* @mem_flags: GFP_* bitmask
* @handle: pointer to dma address of block
*
- * This returns the kernel virtual address of a currently unused block,
+ * Return: the kernel virtual address of a currently unused block,
* and reports its dma address through the handle.
* If such a memory block can't be allocated, %NULL is returned.
*/
@@ -498,6 +500,9 @@ static int dmam_pool_match(struct device *dev, void *res, void *match_data)
*
* Managed dma_pool_create(). DMA pool created with this function is
* automatically destroyed on driver detach.
+ *
+ * Return: a managed dma allocation pool with the requested
+ * characteristics, or %NULL if one can't be created.
*/
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation)
diff --git a/mm/failslab.c b/mm/failslab.c
index b135ebb88b6f..ec5aad211c5b 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -48,18 +48,12 @@ static int __init failslab_debugfs_init(void)
if (IS_ERR(dir))
return PTR_ERR(dir);
- if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
- &failslab.ignore_gfp_reclaim))
- goto fail;
- if (!debugfs_create_bool("cache-filter", mode, dir,
- &failslab.cache_filter))
- goto fail;
+ debugfs_create_bool("ignore-gfp-wait", mode, dir,
+ &failslab.ignore_gfp_reclaim);
+ debugfs_create_bool("cache-filter", mode, dir,
+ &failslab.cache_filter);
return 0;
-fail:
- debugfs_remove_recursive(dir);
-
- return -ENOMEM;
}
late_initcall(failslab_debugfs_init);
diff --git a/mm/filemap.c b/mm/filemap.c
index 9f5e323e883e..a3b4021c448f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -98,8 +98,8 @@
* ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->i_pages lock (try_to_unmap_one)
- * ->zone_lru_lock(zone) (follow_page->mark_page_accessed)
- * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page)
+ * ->pgdat->lru_lock (follow_page->mark_page_accessed)
+ * ->pgdat->lru_lock (check_pte_range->isolate_lru_page)
* ->private_lock (page_remove_rmap->set_page_dirty)
* ->i_pages lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
@@ -392,6 +392,8 @@ static int filemap_check_and_keep_errors(struct address_space *mapping)
* opposed to a regular memory cleansing writeback. The difference between
* these two operations is that if a dirty page/buffer is encountered, it must
* be waited upon, and not just skipped over.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end, int sync_mode)
@@ -438,6 +440,8 @@ EXPORT_SYMBOL(filemap_fdatawrite_range);
*
* This is a mostly non-blocking flush. Not suitable for data-integrity
* purposes - I/O may not be started against all dirty pages.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int filemap_flush(struct address_space *mapping)
{
@@ -453,6 +457,9 @@ EXPORT_SYMBOL(filemap_flush);
*
* Find at least one page in the range supplied, usually used to check if
* direct writing in this range will trigger a writeback.
+ *
+ * Return: %true if at least one page exists in the specified range,
+ * %false otherwise.
*/
bool filemap_range_has_page(struct address_space *mapping,
loff_t start_byte, loff_t end_byte)
@@ -529,6 +536,8 @@ static void __filemap_fdatawait_range(struct address_space *mapping,
* Since the error status of the address space is cleared by this function,
* callers are responsible for checking the return value and handling and/or
* reporting the error.
+ *
+ * Return: error status of the address space.
*/
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
loff_t end_byte)
@@ -551,6 +560,8 @@ EXPORT_SYMBOL(filemap_fdatawait_range);
* Since the error status of the file is advanced by this function,
* callers are responsible for checking the return value and handling and/or
* reporting the error.
+ *
+ * Return: error status of the address space vs. the file->f_wb_err cursor.
*/
int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
{
@@ -572,6 +583,8 @@ EXPORT_SYMBOL(file_fdatawait_range);
* Use this function if callers don't handle errors themselves. Expected
* call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
* fsfreeze(8)
+ *
+ * Return: error status of the address space.
*/
int filemap_fdatawait_keep_errors(struct address_space *mapping)
{
@@ -623,6 +636,8 @@ EXPORT_SYMBOL(filemap_write_and_wait);
*
* Note that @lend is inclusive (describes the last byte to be written) so
* that this function can be used to write to the very end-of-file (end = -1).
+ *
+ * Return: error status of the address space.
*/
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
@@ -678,6 +693,8 @@ EXPORT_SYMBOL(__filemap_set_wb_err);
* While we handle mapping->wb_err with atomic operations, the f_wb_err
* value is protected by the f_lock since we must ensure that it reflects
* the latest value swapped in for this file descriptor.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int file_check_and_advance_wb_err(struct file *file)
{
@@ -720,6 +737,8 @@ EXPORT_SYMBOL(file_check_and_advance_wb_err);
*
* After writing out and waiting on the data, we check and advance the
* f_wb_err cursor to the latest value, and return any errors detected there.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
{
@@ -753,6 +772,8 @@ EXPORT_SYMBOL(file_write_and_wait_range);
* caller must do that.
*
* The remove + add is atomic. This function cannot fail.
+ *
+ * Return: %0
*/
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
@@ -867,6 +888,8 @@ error:
*
* This function is used to add a page to the pagecache. It must be locked.
* This function does not add the page to the LRU. The caller must do that.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
@@ -1463,7 +1486,7 @@ EXPORT_SYMBOL(page_cache_prev_miss);
* If the slot holds a shadow entry of a previously evicted page, or a
* swap entry from shmem/tmpfs, it is returned.
*
- * Otherwise, %NULL is returned.
+ * Return: the found page or shadow entry, %NULL if nothing is found.
*/
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
{
@@ -1521,9 +1544,9 @@ EXPORT_SYMBOL(find_get_entry);
* If the slot holds a shadow entry of a previously evicted page, or a
* swap entry from shmem/tmpfs, it is returned.
*
- * Otherwise, %NULL is returned.
- *
* find_lock_entry() may sleep.
+ *
+ * Return: the found page or shadow entry, %NULL if nothing is found.
*/
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
{
@@ -1563,12 +1586,14 @@ EXPORT_SYMBOL(find_lock_entry);
* - FGP_CREAT: If page is not present then a new page is allocated using
* @gfp_mask and added to the page cache and the VM's LRU
* list. The page is returned locked and with an increased
- * refcount. Otherwise, NULL is returned.
+ * refcount.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
*
* If there is a page cache page, it is returned with an increased refcount.
+ *
+ * Return: the found page or %NULL otherwise.
*/
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t gfp_mask)
@@ -1656,8 +1681,7 @@ EXPORT_SYMBOL(pagecache_get_page);
* Any shadow entries of evicted pages, or swap entries from
* shmem/tmpfs, are included in the returned array.
*
- * find_get_entries() returns the number of pages and shadow entries
- * which were found.
+ * Return: the number of pages and shadow entries which were found.
*/
unsigned find_get_entries(struct address_space *mapping,
pgoff_t start, unsigned int nr_entries,
@@ -1727,8 +1751,8 @@ retry:
* indexes. There may be holes in the indices due to not-present pages.
* We also update @start to index the next page for the traversal.
*
- * find_get_pages_range() returns the number of pages which were found. If this
- * number is smaller than @nr_pages, the end of specified range has been
+ * Return: the number of pages which were found. If this number is
+ * smaller than @nr_pages, the end of specified range has been
* reached.
*/
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
@@ -1765,7 +1789,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pages[ret] = page;
if (++ret == nr_pages) {
- *start = page->index + 1;
+ *start = xas.xa_index + 1;
goto out;
}
continue;
@@ -1801,7 +1825,7 @@ out:
* find_get_pages_contig() works exactly like find_get_pages(), except
* that the returned number of pages are guaranteed to be contiguous.
*
- * find_get_pages_contig() returns the number of pages which were found.
+ * Return: the number of pages which were found.
*/
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
unsigned int nr_pages, struct page **pages)
@@ -1837,16 +1861,6 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
if (unlikely(page != xas_reload(&xas)))
goto put_page;
- /*
- * must check mapping and index after taking the ref.
- * otherwise we can get both false positives and false
- * negatives, which is just confusing to the caller.
- */
- if (!page->mapping || page_to_pgoff(page) != xas.xa_index) {
- put_page(page);
- break;
- }
-
pages[ret] = page;
if (++ret == nr_pages)
break;
@@ -1872,6 +1886,8 @@ EXPORT_SYMBOL(find_get_pages_contig);
*
* Like find_get_pages, except we only return pages which are tagged with
* @tag. We update @index to index the next page for the traversal.
+ *
+ * Return: the number of pages which were found.
*/
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
@@ -1911,7 +1927,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pages[ret] = page;
if (++ret == nr_pages) {
- *index = page->index + 1;
+ *index = xas.xa_index + 1;
goto out;
}
continue;
@@ -1949,6 +1965,8 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
*
* Like find_get_entries, except we only return entries which are tagged with
* @tag.
+ *
+ * Return: the number of entries which were found.
*/
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
xa_mark_t tag, unsigned int nr_entries,
@@ -2034,6 +2052,10 @@ static void shrink_readahead_size_eio(struct file *filp,
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
+ *
+ * Return:
+ * * total number of bytes copied, including those the were already @written
+ * * negative error code if nothing was copied
*/
static ssize_t generic_file_buffered_read(struct kiocb *iocb,
struct iov_iter *iter, ssize_t written)
@@ -2295,6 +2317,9 @@ out:
*
* This is the "read_iter()" routine for all filesystems
* that can use the page cache directly.
+ * Return:
+ * * number of bytes copied, even for partial reads
+ * * negative error code if nothing was read
*/
ssize_t
generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
@@ -2362,6 +2387,8 @@ EXPORT_SYMBOL(generic_file_read_iter);
*
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
{
@@ -2476,6 +2503,8 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
* has not been released.
*
* We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
+ *
+ * Return: bitwise-OR of %VM_FAULT_ codes.
*/
vm_fault_t filemap_fault(struct vm_fault *vmf)
{
@@ -2861,6 +2890,8 @@ out:
* not set, try to fill the page and wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
+ *
+ * Return: up to date page on success, ERR_PTR() on failure.
*/
struct page *read_cache_page(struct address_space *mapping,
pgoff_t index,
@@ -2881,6 +2912,8 @@ EXPORT_SYMBOL(read_cache_page);
* any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
+ *
+ * Return: up to date page on success, ERR_PTR() on failure.
*/
struct page *read_cache_page_gfp(struct address_space *mapping,
pgoff_t index,
@@ -3081,7 +3114,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_flags & IOCB_NOWAIT) {
/* If there are pages to writeback, return */
if (filemap_range_has_page(inode->i_mapping, pos,
- pos + write_len))
+ pos + write_len - 1))
return -EAGAIN;
} else {
written = filemap_write_and_wait_range(mapping, pos,
@@ -3264,6 +3297,10 @@ EXPORT_SYMBOL(generic_perform_write);
* This function does *not* take care of syncing data in case of O_SYNC write.
* A caller has to handle it. This is mainly due to the fact that we want to
* avoid syncing under i_mutex.
+ *
+ * Return:
+ * * number of bytes written, even for truncated writes
+ * * negative error code if no data has been written at all
*/
ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
@@ -3348,6 +3385,10 @@ EXPORT_SYMBOL(__generic_file_write_iter);
* This is a wrapper around __generic_file_write_iter() to be used by most
* filesystems. It takes care of syncing the file in case of O_SYNC file
* and acquires i_mutex as needed.
+ * Return:
+ * * negative error code if no data has been written at all of
+ * vfs_fsync_range() failed for a synchronous write
+ * * number of bytes written, even for truncated writes
*/
ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
@@ -3374,8 +3415,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
- * (presumably at page->private). If the release was successful, return '1'.
- * Otherwise return zero.
+ * (presumably at page->private).
*
* This may also be called if PG_fscache is set on a page, indicating that the
* page is known to the local caching routines.
@@ -3383,6 +3423,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
*
+ * Return: %1 if the release was successful, otherwise return zero.
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
diff --git a/mm/gup.c b/mm/gup.c
index 05acd7e2eb22..22291db50013 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -13,6 +13,9 @@
#include <linux/sched/signal.h>
#include <linux/rwsem.h>
#include <linux/hugetlb.h>
+#include <linux/migrate.h>
+#include <linux/mm_inline.h>
+#include <linux/sched/mm.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@@ -1126,7 +1129,167 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
}
EXPORT_SYMBOL(get_user_pages);
+#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
+
#ifdef CONFIG_FS_DAX
+static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
+{
+ long i;
+ struct vm_area_struct *vma_prev = NULL;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct vm_area_struct *vma = vmas[i];
+
+ if (vma == vma_prev)
+ continue;
+
+ vma_prev = vma;
+
+ if (vma_is_fsdax(vma))
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_CMA
+static struct page *new_non_cma_page(struct page *page, unsigned long private)
+{
+ /*
+ * We want to make sure we allocate the new page from the same node
+ * as the source page.
+ */
+ int nid = page_to_nid(page);
+ /*
+ * Trying to allocate a page for migration. Ignore allocation
+ * failure warnings. We don't force __GFP_THISNODE here because
+ * this node here is the node where we have CMA reservation and
+ * in some case these nodes will have really less non movable
+ * allocation memory.
+ */
+ gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
+
+ if (PageHighMem(page))
+ gfp_mask |= __GFP_HIGHMEM;
+
+#ifdef CONFIG_HUGETLB_PAGE
+ if (PageHuge(page)) {
+ struct hstate *h = page_hstate(page);
+ /*
+ * We don't want to dequeue from the pool because pool pages will
+ * mostly be from the CMA region.
+ */
+ return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
+ }
+#endif
+ if (PageTransHuge(page)) {
+ struct page *thp;
+ /*
+ * ignore allocation failure warnings
+ */
+ gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
+
+ /*
+ * Remove the movable mask so that we don't allocate from
+ * CMA area again.
+ */
+ thp_gfpmask &= ~__GFP_MOVABLE;
+ thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
+ if (!thp)
+ return NULL;
+ prep_transhuge_page(thp);
+ return thp;
+ }
+
+ return __alloc_pages_node(nid, gfp_mask, 0);
+}
+
+static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
+ unsigned int gup_flags,
+ struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ long i;
+ bool drain_allow = true;
+ bool migrate_allow = true;
+ LIST_HEAD(cma_page_list);
+
+check_again:
+ for (i = 0; i < nr_pages; i++) {
+ /*
+ * If we get a page from the CMA zone, since we are going to
+ * be pinning these entries, we might as well move them out
+ * of the CMA zone if possible.
+ */
+ if (is_migrate_cma_page(pages[i])) {
+
+ struct page *head = compound_head(pages[i]);
+
+ if (PageHuge(head)) {
+ isolate_huge_page(head, &cma_page_list);
+ } else {
+ if (!PageLRU(head) && drain_allow) {
+ lru_add_drain_all();
+ drain_allow = false;
+ }
+
+ if (!isolate_lru_page(head)) {
+ list_add_tail(&head->lru, &cma_page_list);
+ mod_node_page_state(page_pgdat(head),
+ NR_ISOLATED_ANON +
+ page_is_file_cache(head),
+ hpage_nr_pages(head));
+ }
+ }
+ }
+ }
+
+ if (!list_empty(&cma_page_list)) {
+ /*
+ * drop the above get_user_pages reference.
+ */
+ for (i = 0; i < nr_pages; i++)
+ put_page(pages[i]);
+
+ if (migrate_pages(&cma_page_list, new_non_cma_page,
+ NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+ /*
+ * some of the pages failed migration. Do get_user_pages
+ * without migration.
+ */
+ migrate_allow = false;
+
+ if (!list_empty(&cma_page_list))
+ putback_movable_pages(&cma_page_list);
+ }
+ /*
+ * We did migrate all the pages, Try to get the page references again
+ * migrating any new CMA pages which we failed to isolate earlier.
+ */
+ nr_pages = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+ if ((nr_pages > 0) && migrate_allow) {
+ drain_allow = true;
+ goto check_again;
+ }
+ }
+
+ return nr_pages;
+}
+#else
+static inline long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
+ unsigned int gup_flags,
+ struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ return nr_pages;
+}
+#endif
+
/*
* This is the same as get_user_pages() in that it assumes we are
* operating on the current task's mm, but it goes further to validate
@@ -1140,11 +1303,11 @@ EXPORT_SYMBOL(get_user_pages);
* Contrast this to iov_iter_get_pages() usages which are transient.
*/
long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas_arg)
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas_arg)
{
struct vm_area_struct **vmas = vmas_arg;
- struct vm_area_struct *vma_prev = NULL;
+ unsigned long flags;
long rc, i;
if (!pages)
@@ -1157,31 +1320,20 @@ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
return -ENOMEM;
}
+ flags = memalloc_nocma_save();
rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+ memalloc_nocma_restore(flags);
+ if (rc < 0)
+ goto out;
- for (i = 0; i < rc; i++) {
- struct vm_area_struct *vma = vmas[i];
-
- if (vma == vma_prev)
- continue;
-
- vma_prev = vma;
-
- if (vma_is_fsdax(vma))
- break;
- }
-
- /*
- * Either get_user_pages() failed, or the vma validation
- * succeeded, in either case we don't need to put_page() before
- * returning.
- */
- if (i >= rc)
+ if (check_dax_vmas(vmas, rc)) {
+ for (i = 0; i < rc; i++)
+ put_page(pages[i]);
+ rc = -EOPNOTSUPP;
goto out;
+ }
- for (i = 0; i < rc; i++)
- put_page(pages[i]);
- rc = -EOPNOTSUPP;
+ rc = check_and_migrate_cma_pages(start, rc, gup_flags, pages, vmas);
out:
if (vmas != vmas_arg)
kfree(vmas);
@@ -1674,7 +1826,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
if (!pmd_present(pmd))
return 0;
- if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
+ if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
+ pmd_devmap(pmd))) {
/*
* NUMA hinting faults need to be handled in the GUP
* slowpath for accounting purposes and so that they
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 5b42d3d4b60a..6c0279e70cc4 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -122,12 +122,8 @@ static const struct file_operations gup_benchmark_fops = {
static int gup_benchmark_init(void)
{
- void *ret;
-
- ret = debugfs_create_file_unsafe("gup_benchmark", 0600, NULL, NULL,
- &gup_benchmark_fops);
- if (!ret)
- pr_warn("Failed to create gup_benchmark in debugfs");
+ debugfs_create_file_unsafe("gup_benchmark", 0600, NULL, NULL,
+ &gup_benchmark_fops);
return 0;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index faf357eaf0ce..404acdcd0455 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -33,6 +33,7 @@
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
#include <linux/oom.h>
+#include <linux/numa.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -616,6 +617,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
+ count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
}
return 0;
@@ -1337,6 +1339,7 @@ alloc:
}
count_vm_event(THP_FAULT_ALLOC);
+ count_memcg_events(memcg, THP_FAULT_ALLOC, 1);
if (!page)
clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
@@ -1475,7 +1478,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
struct anon_vma *anon_vma = NULL;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
- int page_nid = -1, this_nid = numa_node_id();
+ int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
int target_nid, last_cpupid = -1;
bool page_locked;
bool migrated = false;
@@ -1520,7 +1523,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
*/
page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr);
- if (target_nid == -1) {
+ if (target_nid == NUMA_NO_NODE) {
/* If the page was locked, there are no parallel migrations */
if (page_locked)
goto clear_pmdnuma;
@@ -1528,7 +1531,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
- page_nid = -1;
+ page_nid = NUMA_NO_NODE;
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
@@ -1549,14 +1552,14 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
unlock_page(page);
put_page(page);
- page_nid = -1;
+ page_nid = NUMA_NO_NODE;
goto out_unlock;
}
/* Bail if we fail to protect against THP splits for any reason */
if (unlikely(!anon_vma)) {
put_page(page);
- page_nid = -1;
+ page_nid = NUMA_NO_NODE;
goto clear_pmdnuma;
}
@@ -1618,7 +1621,7 @@ out:
if (anon_vma)
page_unlock_anon_vma_read(anon_vma);
- if (page_nid != -1)
+ if (page_nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
flags);
@@ -1979,7 +1982,6 @@ spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
pud_t *pud, unsigned long addr)
{
- pud_t orig_pud;
spinlock_t *ptl;
ptl = __pud_trans_huge_lock(pud, vma);
@@ -1991,8 +1993,7 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
* pgtable_trans_huge_withdraw after finishing pudp related
* operations.
*/
- orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
- tlb->fullmm);
+ pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
if (vma_is_dax(vma)) {
spin_unlock(ptl);
@@ -2437,11 +2438,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
pgoff_t end, unsigned long flags)
{
struct page *head = compound_head(page);
- struct zone *zone = page_zone(head);
+ pg_data_t *pgdat = page_pgdat(head);
struct lruvec *lruvec;
int i;
- lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
+ lruvec = mem_cgroup_page_lruvec(head, pgdat);
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(head);
@@ -2472,7 +2473,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
xa_unlock(&head->mapping->i_pages);
}
- spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
remap_page(head);
@@ -2683,7 +2684,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
lru_add_drain();
/* prevent PageLRU to go away from under us, and freeze lru stats */
- spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
+ spin_lock_irqsave(&pgdata->lru_lock, flags);
if (mapping) {
XA_STATE(xas, &mapping->i_pages, page_index(head));
@@ -2728,7 +2729,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
spin_unlock(&pgdata->split_queue_lock);
fail: if (mapping)
xa_unlock(&mapping->i_pages);
- spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
+ spin_unlock_irqrestore(&pgdata->lru_lock, flags);
remap_page(head);
ret = -EBUSY;
}
@@ -2886,12 +2887,8 @@ DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
static int __init split_huge_pages_debugfs(void)
{
- void *ret;
-
- ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
- &split_huge_pages_fops);
- if (!ret)
- pr_warn("Failed to create split_huge_pages in debugfs");
+ debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
+ &split_huge_pages_fops);
return 0;
}
late_initcall(split_huge_pages_debugfs);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 745088810965..97b1e0290c66 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -25,6 +25,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/jhash.h>
+#include <linux/numa.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -887,7 +888,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
- int node = -1;
+ int node = NUMA_NO_NODE;
zonelist = node_zonelist(nid, gfp_mask);
@@ -919,7 +920,7 @@ retry_cpuset:
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
- if (hugepage_migration_supported(h))
+ if (hugepage_movable_supported(h))
return GFP_HIGHUSER_MOVABLE;
else
return GFP_HIGHUSER;
@@ -1586,8 +1587,8 @@ out_unlock:
return page;
}
-static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nmask)
+struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nmask)
{
struct page *page;
@@ -3238,7 +3239,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct page *ptepage;
unsigned long addr;
int cow;
- struct address_space *mapping = vma->vm_file->f_mapping;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mmu_notifier_range range;
@@ -3250,23 +3250,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
mmu_notifier_range_init(&range, src, vma->vm_start,
vma->vm_end);
mmu_notifier_invalidate_range_start(&range);
- } else {
- /*
- * For shared mappings i_mmap_rwsem must be held to call
- * huge_pte_alloc, otherwise the returned ptep could go
- * away if part of a shared pmd and another thread calls
- * huge_pmd_unshare.
- */
- i_mmap_lock_read(mapping);
}
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl;
-
src_pte = huge_pte_offset(src, addr, sz);
if (!src_pte)
continue;
-
dst_pte = huge_pte_alloc(dst, addr, sz);
if (!dst_pte) {
ret = -ENOMEM;
@@ -3337,8 +3327,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
if (cow)
mmu_notifier_invalidate_range_end(&range);
- else
- i_mmap_unlock_read(mapping);
return ret;
}
@@ -3637,7 +3625,6 @@ retry_avoidcopy:
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
- set_page_huge_active(new_page);
mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
mmu_notifier_invalidate_range_start(&range);
@@ -3658,6 +3645,7 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
+ set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
@@ -3742,6 +3730,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
+ bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
@@ -3755,16 +3744,16 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
/*
- * We can not race with truncation due to holding i_mmap_rwsem.
- * Check once here for faults beyond end of file.
+ * Use page lock to guard against racing truncation
+ * before we get page_table_lock.
*/
- size = i_size_read(mapping->host) >> huge_page_shift(h);
- if (idx >= size)
- goto out;
-
retry:
page = find_lock_page(mapping, idx);
if (!page) {
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ if (idx >= size)
+ goto out;
+
/*
* Check for page in userfault range
*/
@@ -3784,18 +3773,14 @@ retry:
};
/*
- * hugetlb_fault_mutex and i_mmap_rwsem must be
- * dropped before handling userfault. Reacquire
- * after handling fault to make calling code simpler.
+ * hugetlb_fault_mutex must be dropped before
+ * handling userfault. Reacquire after handling
+ * fault to make calling code simpler.
*/
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
idx, haddr);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
-
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
-
- i_mmap_lock_read(mapping);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
goto out;
}
@@ -3807,7 +3792,7 @@ retry:
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
- set_page_huge_active(page);
+ new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3854,6 +3839,9 @@ retry:
}
ptl = huge_pte_lock(h, mm, ptep);
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ if (idx >= size)
+ goto backout;
ret = 0;
if (!huge_pte_none(huge_ptep_get(ptep)))
@@ -3875,6 +3863,15 @@ retry:
}
spin_unlock(ptl);
+
+ /*
+ * Only make newly allocated pages active. Existing pages found
+ * in the pagecache could be !page_huge_active() if they have been
+ * isolated for migration.
+ */
+ if (new_page)
+ set_page_huge_active(page);
+
unlock_page(page);
out:
return ret;
@@ -3940,11 +3937,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (ptep) {
- /*
- * Since we hold no locks, ptep could be stale. That is
- * OK as we are only making decisions based on content and
- * not actually modifying content here.
- */
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait_huge(vma, mm, ptep);
@@ -3952,33 +3944,20 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
+ } else {
+ ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
+ if (!ptep)
+ return VM_FAULT_OOM;
}
- /*
- * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
- * until finished with ptep. This serves two purposes:
- * 1) It prevents huge_pmd_unshare from being called elsewhere
- * and making the ptep no longer valid.
- * 2) It synchronizes us with file truncation.
- *
- * ptep could have already be assigned via huge_pte_offset. That
- * is OK, as huge_pte_alloc will return the same value unless
- * something changed.
- */
mapping = vma->vm_file->f_mapping;
- i_mmap_lock_read(mapping);
- ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
- if (!ptep) {
- i_mmap_unlock_read(mapping);
- return VM_FAULT_OOM;
- }
+ idx = vma_hugecache_offset(h, vma, haddr);
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- idx = vma_hugecache_offset(h, vma, haddr);
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -4066,7 +4045,6 @@ out_ptl:
}
out_mutex:
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
/*
* Generally it's safe to hold refcount during waiting page lock. But
* here we just wait to defer the next page fault to avoid busy loop and
@@ -4128,7 +4106,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
* the set_pte_at() write.
*/
__SetPageUptodate(page);
- set_page_huge_active(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4196,6 +4173,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
+ set_page_huge_active(page);
if (vm_shared)
unlock_page(page);
ret = 0;
@@ -4301,7 +4279,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
break;
}
if (ret & VM_FAULT_RETRY) {
- if (nonblocking)
+ if (nonblocking &&
+ !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*nonblocking = 0;
*nr_pages = 0;
/*
@@ -4420,10 +4399,12 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
continue;
}
if (!huge_pte_none(pte)) {
- pte = huge_ptep_get_and_clear(mm, address, ptep);
- pte = pte_mkhuge(huge_pte_modify(pte, newprot));
+ pte_t old_pte;
+
+ old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
+ pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
pte = arch_make_huge_pte(pte, vma, NULL, 0);
- set_huge_pte_at(mm, address, ptep, pte);
+ huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
pages++;
}
spin_unlock(ptl);
@@ -4671,12 +4652,10 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
- * code much cleaner.
- *
- * This routine must be called with i_mmap_rwsem held in at least read mode.
- * For hugetlbfs, this prevents removal of any page table entries associated
- * with the address space. This is important as we are setting up sharing
- * based on existing page table entries (mappings).
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_rwsem section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
*/
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
@@ -4693,6 +4672,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
+ i_mmap_lock_write(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
@@ -4722,6 +4702,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ i_mmap_unlock_write(mapping);
return pte;
}
@@ -4732,7 +4713,7 @@ out:
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
- * Called with page table lock held and i_mmap_rwsem held in write mode.
+ * called with page table lock held.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
diff --git a/mm/internal.h b/mm/internal.h
index f4a7bb02decf..9eeaf2b95166 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -163,6 +163,7 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
+extern void __free_pages_core(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
@@ -183,14 +184,16 @@ extern int user_min_free_kbytes;
struct compact_control {
struct list_head freepages; /* List of free pages to migrate to */
struct list_head migratepages; /* List of pages being migrated */
+ unsigned int nr_freepages; /* Number of isolated free pages */
+ unsigned int nr_migratepages; /* Number of pages to migrate */
+ unsigned long free_pfn; /* isolate_freepages search base */
+ unsigned long migrate_pfn; /* isolate_migratepages search base */
+ unsigned long fast_start_pfn; /* a pfn to start linear scan from */
struct zone *zone;
- unsigned long nr_freepages; /* Number of isolated free pages */
- unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long total_migrate_scanned;
unsigned long total_free_scanned;
- unsigned long free_pfn; /* isolate_freepages search base */
- unsigned long migrate_pfn; /* isolate_migratepages search base */
- unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
+ unsigned short fast_search_fail;/* failures to use free list searches */
+ short search_order; /* order to start a fast search at */
const gfp_t gfp_mask; /* gfp mask of a direct compactor */
int order; /* order a direct compactor needs */
int migratetype; /* migratetype of direct compactor */
@@ -203,7 +206,16 @@ struct compact_control {
bool direct_compaction; /* False from kcompactd or /proc/... */
bool whole_zone; /* Whole zone should/has been scanned */
bool contended; /* Signal lock or sched contention */
- bool finishing_block; /* Finishing current pageblock */
+ bool rescan; /* Rescanning the same pageblock */
+};
+
+/*
+ * Used in direct compaction when a page should be taken from the freelists
+ * immediately when one is created during the free path.
+ */
+struct capture_control {
+ struct compact_control *cc;
+ struct page *page;
};
unsigned long
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 0a14fcff70ed..5d1065efbd47 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -5,7 +5,10 @@ UBSAN_SANITIZE_generic.o := n
UBSAN_SANITIZE_tags.o := n
KCOV_INSTRUMENT := n
+CFLAGS_REMOVE_common.o = -pg
CFLAGS_REMOVE_generic.o = -pg
+CFLAGS_REMOVE_tags.o = -pg
+
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 03d5d1374ca7..80bbe62b16cd 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -14,6 +14,8 @@
*
*/
+#define __KASAN_INTERNAL
+
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -298,8 +300,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
return;
}
- cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
-
*flags |= SLAB_KASAN;
}
@@ -349,28 +349,48 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
}
/*
- * Since it's desirable to only call object contructors once during slab
- * allocation, we preassign tags to all such objects. Also preassign tags for
- * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
- * For SLAB allocator we can't preassign tags randomly since the freelist is
- * stored as an array of indexes instead of a linked list. Assign tags based
- * on objects indexes, so that objects that are next to each other get
- * different tags.
- * After a tag is assigned, the object always gets allocated with the same tag.
- * The reason is that we can't change tags for objects with constructors on
- * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
- * code can save the pointer to the object somewhere (e.g. in the object
- * itself). Then if we retag it, the old saved pointer will become invalid.
+ * This function assigns a tag to an object considering the following:
+ * 1. A cache might have a constructor, which might save a pointer to a slab
+ * object somewhere (e.g. in the object itself). We preassign a tag for
+ * each object in caches with constructors during slab creation and reuse
+ * the same tag each time a particular object is allocated.
+ * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
+ * accessed after being freed. We preassign tags for objects in these
+ * caches as well.
+ * 3. For SLAB allocator we can't preassign tags randomly since the freelist
+ * is stored as an array of indexes instead of a linked list. Assign tags
+ * based on objects indexes, so that objects that are next to each other
+ * get different tags.
*/
-static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new)
+static u8 assign_tag(struct kmem_cache *cache, const void *object,
+ bool init, bool keep_tag)
{
+ /*
+ * 1. When an object is kmalloc()'ed, two hooks are called:
+ * kasan_slab_alloc() and kasan_kmalloc(). We assign the
+ * tag only in the first one.
+ * 2. We reuse the same tag for krealloc'ed objects.
+ */
+ if (keep_tag)
+ return get_tag(object);
+
+ /*
+ * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
+ * set, assign a tag when the object is being allocated (init == false).
+ */
if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
- return new ? KASAN_TAG_KERNEL : random_tag();
+ return init ? KASAN_TAG_KERNEL : random_tag();
+ /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
#ifdef CONFIG_SLAB
+ /* For SLAB assign tags based on the object index in the freelist. */
return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
#else
- return new ? random_tag() : get_tag(object);
+ /*
+ * For SLUB assign a random tag during slab creation, otherwise reuse
+ * the already assigned tag.
+ */
+ return init ? random_tag() : get_tag(object);
#endif
}
@@ -386,17 +406,12 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
__memset(alloc_info, 0, sizeof(*alloc_info));
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
- object = set_tag(object, assign_tag(cache, object, true));
+ object = set_tag(object,
+ assign_tag(cache, object, true, false));
return (void *)object;
}
-void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
- gfp_t flags)
-{
- return kasan_kmalloc(cache, object, cache->object_size, flags);
-}
-
static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
@@ -452,8 +467,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return __kasan_slab_free(cache, object, ip, true);
}
-void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
- size_t size, gfp_t flags)
+static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
+ size_t size, gfp_t flags, bool keep_tag)
{
unsigned long redzone_start;
unsigned long redzone_end;
@@ -471,7 +486,7 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_SHADOW_SCALE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
- tag = assign_tag(cache, object, false);
+ tag = assign_tag(cache, object, false, keep_tag);
/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -483,6 +498,18 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
return set_tag(object, tag);
}
+
+void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
+ gfp_t flags)
+{
+ return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
+}
+
+void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
+ size_t size, gfp_t flags)
+{
+ return __kasan_kmalloc(cache, object, size, flags, true);
+}
EXPORT_SYMBOL(kasan_kmalloc);
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
@@ -522,7 +549,8 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
if (unlikely(!PageSlab(page)))
return kasan_kmalloc_large(object, size, flags);
else
- return kasan_kmalloc(page->slab_cache, object, size, flags);
+ return __kasan_kmalloc(page->slab_cache, object, size,
+ flags, true);
}
void kasan_poison_kfree(void *ptr, unsigned long ip)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index ccb6207276e3..504c79363a34 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -275,25 +275,6 @@ EXPORT_SYMBOL(__asan_storeN_noabort);
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
-/* Emitted by compiler to poison large objects when they go out of scope. */
-void __asan_poison_stack_memory(const void *addr, size_t size)
-{
- /*
- * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
- * by redzones, so we simply round up size to simplify logic.
- */
- kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
- KASAN_USE_AFTER_SCOPE);
-}
-EXPORT_SYMBOL(__asan_poison_stack_memory);
-
-/* Emitted by compiler to unpoison large objects when they go into scope. */
-void __asan_unpoison_stack_memory(const void *addr, size_t size)
-{
- kasan_unpoison_shadow(addr, size);
-}
-EXPORT_SYMBOL(__asan_unpoison_stack_memory);
-
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
diff --git a/mm/kasan/generic_report.c b/mm/kasan/generic_report.c
index 5e12035888f2..36c645939bc9 100644
--- a/mm/kasan/generic_report.c
+++ b/mm/kasan/generic_report.c
@@ -82,9 +82,6 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
case KASAN_KMALLOC_FREE:
bug_type = "use-after-free";
break;
- case KASAN_USE_AFTER_SCOPE:
- bug_type = "use-after-scope";
- break;
case KASAN_ALLOCA_LEFT:
case KASAN_ALLOCA_RIGHT:
bug_type = "alloca-out-of-bounds";
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index 45a1b5e38e1e..fcaa1ca03175 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -42,7 +42,7 @@ static inline bool kasan_p4d_table(pgd_t pgd)
#else
static inline bool kasan_p4d_table(pgd_t pgd)
{
- return 0;
+ return false;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 3
@@ -54,7 +54,7 @@ static inline bool kasan_pud_table(p4d_t p4d)
#else
static inline bool kasan_pud_table(p4d_t p4d)
{
- return 0;
+ return false;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2
@@ -66,7 +66,7 @@ static inline bool kasan_pmd_table(pud_t pud)
#else
static inline bool kasan_pmd_table(pud_t pud)
{
- return 0;
+ return false;
}
#endif
pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index ea51b2d898ec..3e0c11f7d7a1 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -34,7 +34,6 @@
#define KASAN_STACK_MID 0xF2
#define KASAN_STACK_RIGHT 0xF3
#define KASAN_STACK_PARTIAL 0xF4
-#define KASAN_USE_AFTER_SCOPE 0xF8
/*
* alloca redzone shadow values
@@ -187,8 +186,6 @@ void __asan_unregister_globals(struct kasan_global *globals, size_t size);
void __asan_loadN(unsigned long addr, size_t size);
void __asan_storeN(unsigned long addr, size_t size);
void __asan_handle_no_return(void);
-void __asan_poison_stack_memory(const void *addr, size_t size);
-void __asan_unpoison_stack_memory(const void *addr, size_t size);
void __asan_alloca_poison(unsigned long addr, size_t size);
void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 0777649e07c4..63fca3172659 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -46,7 +46,7 @@ void kasan_init_tags(void)
int cpu;
for_each_possible_cpu(cpu)
- per_cpu(prng_state, cpu) = get_random_u32();
+ per_cpu(prng_state, cpu) = (u32)get_cycles();
}
/*
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 4f017339ddb2..449044378782 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1074,6 +1074,7 @@ static void collapse_huge_page(struct mm_struct *mm,
BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(new_page, vma, address, true);
mem_cgroup_commit_charge(new_page, memcg, false, true);
+ count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
lru_cache_add_active_or_unevictable(new_page, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
@@ -1502,6 +1503,7 @@ xa_unlocked:
page_ref_add(new_page, HPAGE_PMD_NR - 1);
set_page_dirty(new_page);
mem_cgroup_commit_charge(new_page, memcg, false, true);
+ count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
lru_cache_add_anon(new_page);
/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f9d9dc250428..707fa5579f66 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
unsigned long flags;
struct kmemleak_object *object, *parent;
struct rb_node **link, *rb_parent;
+ unsigned long untagged_ptr;
object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
if (!object) {
@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
write_lock_irqsave(&kmemleak_lock, flags);
- min_addr = min(min_addr, ptr);
- max_addr = max(max_addr, ptr + size);
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+ min_addr = min(min_addr, untagged_ptr);
+ max_addr = max(max_addr, untagged_ptr + size);
link = &object_tree_root.rb_node;
rb_parent = NULL;
while (*link) {
@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end,
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
unsigned long flags;
+ unsigned long untagged_ptr;
read_lock_irqsave(&kmemleak_lock, flags);
for (ptr = start; ptr < end; ptr++) {
@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end,
pointer = *ptr;
kasan_enable_current();
- if (pointer < min_addr || pointer >= max_addr)
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
+ if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
continue;
/*
diff --git a/mm/ksm.c b/mm/ksm.c
index 6c48ad13b4c9..fc64874dc6f4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -598,7 +598,7 @@ static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
chain->chain_prune_time = jiffies;
chain->rmap_hlist_len = STABLE_NODE_CHAIN;
#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
- chain->nid = -1; /* debug */
+ chain->nid = NUMA_NO_NODE; /* debug */
#endif
ksm_stable_node_chains++;
@@ -667,6 +667,12 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
free_stable_node(stable_node);
}
+enum get_ksm_page_flags {
+ GET_KSM_PAGE_NOLOCK,
+ GET_KSM_PAGE_LOCK,
+ GET_KSM_PAGE_TRYLOCK
+};
+
/*
* get_ksm_page: checks if the page indicated by the stable node
* is still its ksm page, despite having held no reference to it.
@@ -686,7 +692,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
* a page to put something that might look like our key in page->mapping.
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
+static struct page *get_ksm_page(struct stable_node *stable_node,
+ enum get_ksm_page_flags flags)
{
struct page *page;
void *expected_mapping;
@@ -706,8 +713,9 @@ again:
* case this node is no longer referenced, and should be freed;
* however, it might mean that the page is under page_ref_freeze().
* The __remove_mapping() case is easy, again the node is now stale;
- * but if page is swapcache in migrate_page_move_mapping(), it might
- * still be our page, in which case it's essential to keep the node.
+ * the same is in reuse_ksm_page() case; but if page is swapcache
+ * in migrate_page_move_mapping(), it might still be our page,
+ * in which case it's essential to keep the node.
*/
while (!get_page_unless_zero(page)) {
/*
@@ -728,8 +736,15 @@ again:
goto stale;
}
- if (lock_it) {
+ if (flags == GET_KSM_PAGE_TRYLOCK) {
+ if (!trylock_page(page)) {
+ put_page(page);
+ return ERR_PTR(-EBUSY);
+ }
+ } else if (flags == GET_KSM_PAGE_LOCK)
lock_page(page);
+
+ if (flags != GET_KSM_PAGE_NOLOCK) {
if (READ_ONCE(page->mapping) != expected_mapping) {
unlock_page(page);
put_page(page);
@@ -763,7 +778,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
struct page *page;
stable_node = rmap_item->head;
- page = get_ksm_page(stable_node, true);
+ page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
if (!page)
goto out;
@@ -863,7 +878,7 @@ static int remove_stable_node(struct stable_node *stable_node)
struct page *page;
int err;
- page = get_ksm_page(stable_node, true);
+ page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
if (!page) {
/*
* get_ksm_page did remove_node_from_stable_tree itself.
@@ -1385,7 +1400,7 @@ static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
* stable_node parameter itself will be freed from
* under us if it returns NULL.
*/
- _tree_page = get_ksm_page(dup, false);
+ _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
if (!_tree_page)
continue;
nr += 1;
@@ -1508,7 +1523,7 @@ static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
if (!is_stable_node_chain(stable_node)) {
if (is_page_sharing_candidate(stable_node)) {
*_stable_node_dup = stable_node;
- return get_ksm_page(stable_node, false);
+ return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
}
/*
* _stable_node_dup set to NULL means the stable_node
@@ -1613,7 +1628,8 @@ again:
* wrprotected at all times. Any will work
* fine to continue the walk.
*/
- tree_page = get_ksm_page(stable_node_any, false);
+ tree_page = get_ksm_page(stable_node_any,
+ GET_KSM_PAGE_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_page) {
@@ -1673,7 +1689,12 @@ again:
* It would be more elegant to return stable_node
* than kpage, but that involves more changes.
*/
- tree_page = get_ksm_page(stable_node_dup, true);
+ tree_page = get_ksm_page(stable_node_dup,
+ GET_KSM_PAGE_TRYLOCK);
+
+ if (PTR_ERR(tree_page) == -EBUSY)
+ return ERR_PTR(-EBUSY);
+
if (unlikely(!tree_page))
/*
* The tree may have been rebalanced,
@@ -1842,7 +1863,8 @@ again:
* wrprotected at all times. Any will work
* fine to continue the walk.
*/
- tree_page = get_ksm_page(stable_node_any, false);
+ tree_page = get_ksm_page(stable_node_any,
+ GET_KSM_PAGE_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_page) {
@@ -2068,6 +2090,9 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
remove_rmap_item_from_tree(rmap_item);
if (kpage) {
+ if (PTR_ERR(kpage) == -EBUSY)
+ return;
+
err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
if (!err) {
/*
@@ -2242,7 +2267,8 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
list_for_each_entry_safe(stable_node, next,
&migrate_nodes, list) {
- page = get_ksm_page(stable_node, false);
+ page = get_ksm_page(stable_node,
+ GET_KSM_PAGE_NOLOCK);
if (page)
put_page(page);
cond_resched();
@@ -2642,6 +2668,31 @@ again:
goto again;
}
+bool reuse_ksm_page(struct page *page,
+ struct vm_area_struct *vma,
+ unsigned long address)
+{
+#ifdef CONFIG_DEBUG_VM
+ if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
+ WARN_ON(!page_mapped(page)) ||
+ WARN_ON(!PageLocked(page))) {
+ dump_page(page, "reuse_ksm_page");
+ return false;
+ }
+#endif
+
+ if (PageSwapCache(page) || !page_stable_node(page))
+ return false;
+ /* Prohibit parallel get_ksm_page() */
+ if (!page_ref_freeze(page, 1))
+ return false;
+
+ page_move_anon_rmap(page, vma);
+ page->index = linear_page_index(vma, address);
+ page_ref_unfreeze(page, 1);
+
+ return true;
+}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 5b30625fd365..0730bf8ff39f 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -601,7 +601,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
struct lock_class_key *key, struct shrinker *shrinker)
{
int i;
- size_t size = sizeof(*lru->node) * nr_node_ids;
int err = -ENOMEM;
#ifdef CONFIG_MEMCG_KMEM
@@ -612,7 +611,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
#endif
memcg_get_cache_ids();
- lru->node = kzalloc(size, GFP_KERNEL);
+ lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
if (!lru->node)
goto out;
diff --git a/mm/maccess.c b/mm/maccess.c
index f3416632e5a4..ec00be51a24f 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -30,10 +30,8 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
set_fs(KERNEL_DS);
pagefault_disable();
- current->kernel_uaccess_faults_ok++;
ret = __copy_from_user_inatomic(dst,
(__force const void __user *)src, size);
- current->kernel_uaccess_faults_ok--;
pagefault_enable();
set_fs(old_fs);
@@ -60,9 +58,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
set_fs(KERNEL_DS);
pagefault_disable();
- current->kernel_uaccess_faults_ok++;
ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
- current->kernel_uaccess_faults_ok--;
pagefault_enable();
set_fs(old_fs);
@@ -98,13 +94,11 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
set_fs(KERNEL_DS);
pagefault_disable();
- current->kernel_uaccess_faults_ok++;
do {
ret = __get_user(*dst++, (const char __user __force *)src++);
} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
- current->kernel_uaccess_faults_ok--;
dst[-1] = '\0';
pagefault_enable();
set_fs(old_fs);
diff --git a/mm/memblock.c b/mm/memblock.c
index 022d4cbb3618..470601115892 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -26,6 +26,13 @@
#include "internal.h"
+#define INIT_MEMBLOCK_REGIONS 128
+#define INIT_PHYSMEM_REGIONS 4
+
+#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
+# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
+#endif
+
/**
* DOC: memblock overview
*
@@ -92,7 +99,7 @@ unsigned long max_pfn;
unsigned long long max_possible_pfn;
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
#endif
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = {
.reserved.regions = memblock_reserved_init_regions,
.reserved.cnt = 1, /* empty dummy entry */
- .reserved.max = INIT_MEMBLOCK_REGIONS,
+ .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
.reserved.name = "reserved",
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
@@ -1998,8 +2005,7 @@ DEFINE_SHOW_ATTRIBUTE(memblock_debug);
static int __init memblock_init_debugfs(void)
{
struct dentry *root = debugfs_create_dir("memblock", NULL);
- if (!root)
- return -ENXIO;
+
debugfs_create_file("memory", 0444, root,
&memblock.memory, &memblock_debug_fops);
debugfs_create_file("reserved", 0444, root,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index af7f18b32389..532e0e2a4817 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -39,6 +39,7 @@
#include <linux/shmem_fs.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
+#include <linux/vm_event_item.h>
#include <linux/smp.h>
#include <linux/page-flags.h>
#include <linux/backing-dev.h>
@@ -248,6 +249,12 @@ enum res_type {
iter != NULL; \
iter = mem_cgroup_iter(NULL, iter, NULL))
+static inline bool should_force_charge(void)
+{
+ return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
+ (current->flags & PF_EXITING);
+}
+
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
@@ -1389,8 +1396,13 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
};
bool ret;
- mutex_lock(&oom_lock);
- ret = out_of_memory(&oc);
+ if (mutex_lock_killable(&oom_lock))
+ return true;
+ /*
+ * A few threads which were not waiting at mutex_lock_killable() can
+ * fail to bail out. Therefore, check again after holding oom_lock.
+ */
+ ret = should_force_charge() || out_of_memory(&oc);
mutex_unlock(&oom_lock);
return ret;
}
@@ -2209,9 +2221,7 @@ retry:
* bypass the last charges so that they can exit quickly and
* free their memory.
*/
- if (unlikely(tsk_is_oom_victim(current) ||
- fatal_signal_pending(current) ||
- current->flags & PF_EXITING))
+ if (unlikely(should_force_charge()))
goto force;
/*
@@ -2352,13 +2362,13 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
static void lock_page_lru(struct page *page, int *isolated)
{
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
- spin_lock_irq(zone_lru_lock(zone));
+ spin_lock_irq(&pgdat->lru_lock);
if (PageLRU(page)) {
struct lruvec *lruvec;
- lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_lru(page));
*isolated = 1;
@@ -2368,17 +2378,17 @@ static void lock_page_lru(struct page *page, int *isolated)
static void unlock_page_lru(struct page *page, int isolated)
{
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
if (isolated) {
struct lruvec *lruvec;
- lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
VM_BUG_ON_PAGE(PageLRU(page), page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, page_lru(page));
}
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_unlock_irq(&pgdat->lru_lock);
}
static void commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -2573,7 +2583,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
}
/**
- * memcg_kmem_charge_memcg: charge a kmem page
+ * __memcg_kmem_charge_memcg: charge a kmem page
* @page: page to charge
* @gfp: reclaim mode
* @order: allocation order
@@ -2581,7 +2591,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
*
* Returns 0 on success, an error code on failure.
*/
-int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
+int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg)
{
unsigned int nr_pages = 1 << order;
@@ -2604,24 +2614,24 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
}
/**
- * memcg_kmem_charge: charge a kmem page to the current memory cgroup
+ * __memcg_kmem_charge: charge a kmem page to the current memory cgroup
* @page: page to charge
* @gfp: reclaim mode
* @order: allocation order
*
* Returns 0 on success, an error code on failure.
*/
-int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
{
struct mem_cgroup *memcg;
int ret = 0;
- if (mem_cgroup_disabled() || memcg_kmem_bypass())
+ if (memcg_kmem_bypass())
return 0;
memcg = get_mem_cgroup_from_current();
if (!mem_cgroup_is_root(memcg)) {
- ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
if (!ret)
__SetPageKmemcg(page);
}
@@ -2629,11 +2639,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
return ret;
}
/**
- * memcg_kmem_uncharge: uncharge a kmem page
+ * __memcg_kmem_uncharge: uncharge a kmem page
* @page: page to uncharge
* @order: allocation order
*/
-void memcg_kmem_uncharge(struct page *page, int order)
+void __memcg_kmem_uncharge(struct page *page, int order)
{
struct mem_cgroup *memcg = page->mem_cgroup;
unsigned int nr_pages = 1 << order;
@@ -2664,7 +2674,7 @@ void memcg_kmem_uncharge(struct page *page, int order)
/*
* Because tail pages are not marked as "used", set it. We're under
- * zone_lru_lock and migration entries setup in all page mappings.
+ * pgdat->lru_lock and migration entries setup in all page mappings.
*/
void mem_cgroup_split_huge_fixup(struct page *head)
{
@@ -3337,7 +3347,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
const struct numa_stat *stat;
int nid;
unsigned long nr;
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
@@ -3388,7 +3398,7 @@ static const char *const memcg1_event_names[] = {
static int memcg_stat_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
unsigned long memory, memsw;
struct mem_cgroup *mi;
unsigned int i;
@@ -3626,8 +3636,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
size = thresholds->primary ? thresholds->primary->size + 1 : 1;
/* Allocate memory for new array of thresholds */
- new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
- GFP_KERNEL);
+ new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
goto unlock;
@@ -3821,7 +3830,7 @@ static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
@@ -4420,7 +4429,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *memcg;
- size_t size;
+ unsigned int size;
int node;
size = sizeof(struct mem_cgroup);
@@ -5354,6 +5363,16 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
root_mem_cgroup->use_hierarchy = false;
}
+static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
+{
+ if (value == PAGE_COUNTER_MAX)
+ seq_puts(m, "max\n");
+ else
+ seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
+
+ return 0;
+}
+
static u64 memory_current_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
@@ -5364,15 +5383,8 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
static int memory_min_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long min = READ_ONCE(memcg->memory.min);
-
- if (min == PAGE_COUNTER_MAX)
- seq_puts(m, "max\n");
- else
- seq_printf(m, "%llu\n", (u64)min * PAGE_SIZE);
-
- return 0;
+ return seq_puts_memcg_tunable(m,
+ READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
}
static ssize_t memory_min_write(struct kernfs_open_file *of,
@@ -5394,15 +5406,8 @@ static ssize_t memory_min_write(struct kernfs_open_file *of,
static int memory_low_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long low = READ_ONCE(memcg->memory.low);
-
- if (low == PAGE_COUNTER_MAX)
- seq_puts(m, "max\n");
- else
- seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
-
- return 0;
+ return seq_puts_memcg_tunable(m,
+ READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
}
static ssize_t memory_low_write(struct kernfs_open_file *of,
@@ -5424,15 +5429,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
static int memory_high_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long high = READ_ONCE(memcg->high);
-
- if (high == PAGE_COUNTER_MAX)
- seq_puts(m, "max\n");
- else
- seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
-
- return 0;
+ return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high));
}
static ssize_t memory_high_write(struct kernfs_open_file *of,
@@ -5461,15 +5458,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
static int memory_max_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long max = READ_ONCE(memcg->memory.max);
-
- if (max == PAGE_COUNTER_MAX)
- seq_puts(m, "max\n");
- else
- seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
-
- return 0;
+ return seq_puts_memcg_tunable(m,
+ READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
}
static ssize_t memory_max_write(struct kernfs_open_file *of,
@@ -5523,7 +5513,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
static int memory_events_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
seq_printf(m, "low %lu\n",
atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
@@ -5541,7 +5531,7 @@ static int memory_events_show(struct seq_file *m, void *v)
static int memory_stat_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
struct accumulated_stats acc;
int i;
@@ -5582,6 +5572,15 @@ static int memory_stat_show(struct seq_file *m, void *v)
seq_printf(m, "file_writeback %llu\n",
(u64)acc.stat[NR_WRITEBACK] * PAGE_SIZE);
+ /*
+ * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
+ * with the NR_ANON_THP vm counter, but right now it's a pain in the
+ * arse because it requires migrating the work out of rmap to a place
+ * where the page->mem_cgroup is set up and stable.
+ */
+ seq_printf(m, "anon_thp %llu\n",
+ (u64)acc.stat[MEMCG_RSS_HUGE] * PAGE_SIZE);
+
for (i = 0; i < NR_LRU_LISTS; i++)
seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i],
(u64)acc.lru_pages[i] * PAGE_SIZE);
@@ -5613,12 +5612,18 @@ static int memory_stat_show(struct seq_file *m, void *v)
seq_printf(m, "pglazyfree %lu\n", acc.events[PGLAZYFREE]);
seq_printf(m, "pglazyfreed %lu\n", acc.events[PGLAZYFREED]);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ seq_printf(m, "thp_fault_alloc %lu\n", acc.events[THP_FAULT_ALLOC]);
+ seq_printf(m, "thp_collapse_alloc %lu\n",
+ acc.events[THP_COLLAPSE_ALLOC]);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
return 0;
}
static int memory_oom_group_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
seq_printf(m, "%d\n", memcg->oom_group);
@@ -5747,7 +5752,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
*
* | memory.current, if memory.current < memory.low
* low_usage = |
- | 0, otherwise.
+ * | 0, otherwise.
*
*
* Such definition of the effective memory.low provides the expected
@@ -6601,15 +6606,8 @@ static u64 swap_current_read(struct cgroup_subsys_state *css,
static int swap_max_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- unsigned long max = READ_ONCE(memcg->swap.max);
-
- if (max == PAGE_COUNTER_MAX)
- seq_puts(m, "max\n");
- else
- seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
-
- return 0;
+ return seq_puts_memcg_tunable(m,
+ READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
}
static ssize_t swap_max_write(struct kernfs_open_file *of,
@@ -6631,7 +6629,7 @@ static ssize_t swap_max_write(struct kernfs_open_file *of,
static int swap_events_show(struct seq_file *m, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
seq_printf(m, "max %lu\n",
atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
diff --git a/mm/memfd.c b/mm/memfd.c
index 97264c79d2cd..650e65a46b9c 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -131,7 +131,8 @@ static unsigned int *memfd_file_seals_ptr(struct file *file)
#define F_ALL_SEALS (F_SEAL_SEAL | \
F_SEAL_SHRINK | \
F_SEAL_GROW | \
- F_SEAL_WRITE)
+ F_SEAL_WRITE | \
+ F_SEAL_FUTURE_WRITE)
static int memfd_add_seals(struct file *file, unsigned int seals)
{
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 6379fff1a5ff..fc8b51744579 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
if (fail || tk->addr_valid == 0) {
pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, tk->tsk->pid);
- force_sig(SIGKILL, tk->tsk);
+ do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
+ tk->tsk, PIDTYPE_PID);
}
/*
@@ -966,7 +967,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping;
LIST_HEAD(tokill);
- bool unmap_success = true;
+ bool unmap_success;
int kill = 1, forcekill;
struct page *hpage = *hpagep;
bool mlocked = PageMlocked(hpage);
@@ -1028,19 +1029,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
- if (!PageHuge(hpage)) {
- unmap_success = try_to_unmap(hpage, ttu);
- } else if (mapping) {
- /*
- * For hugetlb pages, try_to_unmap could potentially call
- * huge_pmd_unshare. Because of this, take semaphore in
- * write mode here and set TTU_RMAP_LOCKED to indicate we
- * have taken the lock at this higer level.
- */
- i_mmap_lock_write(mapping);
- unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
- i_mmap_unlock_write(mapping);
- }
+ unmap_success = try_to_unmap(hpage, ttu);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
@@ -1836,19 +1825,17 @@ static int soft_offline_in_use_page(struct page *page, int flags)
struct page *hpage = compound_head(page);
if (!PageHuge(page) && PageTransHuge(hpage)) {
- lock_page(hpage);
- if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
- unlock_page(hpage);
- if (!PageAnon(hpage))
+ lock_page(page);
+ if (!PageAnon(page) || unlikely(split_huge_page(page))) {
+ unlock_page(page);
+ if (!PageAnon(page))
pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
else
pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
- put_hwpoison_page(hpage);
+ put_hwpoison_page(page);
return -EBUSY;
}
- unlock_page(hpage);
- get_hwpoison_page(page);
- put_hwpoison_page(hpage);
+ unlock_page(page);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index a52663c0612d..47fe250307c7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -69,6 +69,7 @@
#include <linux/userfaultfd_k.h>
#include <linux/dax.h>
#include <linux/oom.h>
+#include <linux/numa.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -1451,7 +1452,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
retval = -EINVAL;
- if (PageAnon(page))
+ if (PageAnon(page) || PageSlab(page) || page_has_type(page))
goto out;
retval = -ENOMEM;
flush_dcache_page(page);
@@ -1503,6 +1504,8 @@ out:
* under mm->mmap_sem write-lock, so it can change vma->vm_flags.
* Caller must set VM_MIXEDMAP on vma if it wants to call this
* function from other places, for example from page-fault handler.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
@@ -1830,7 +1833,9 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
* @size: size of map area
* @prot: page protection flags for this mapping
*
- * Note: this is only safe if the mm semaphore is held when called.
+ * Note: this is only safe if the mm semaphore is held when called.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
@@ -1903,6 +1908,8 @@ EXPORT_SYMBOL(remap_pfn_range);
*
* NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
* whatever write-combining details or similar.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
@@ -2381,12 +2388,13 @@ oom:
*
* This function handles all that is needed to finish a write page fault in a
* shared mapping due to PTE being read-only once the mapped page is prepared.
- * It handles locking of PTE and modifying it. The function returns
- * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
- * lock.
+ * It handles locking of PTE and modifying it.
*
* The function expects the page to be locked or other protection against
* concurrent faults / writeback (such as DAX radix tree locks).
+ *
+ * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
+ * we acquired PTE lock.
*/
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
{
@@ -2504,8 +2512,11 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
+ if (PageAnon(vmf->page)) {
int total_map_swapcount;
+ if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
+ page_count(vmf->page) != 1))
+ goto copy;
if (!trylock_page(vmf->page)) {
get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2520,6 +2531,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
}
put_page(vmf->page);
}
+ if (PageKsm(vmf->page)) {
+ bool reused = reuse_ksm_page(vmf->page, vmf->vma,
+ vmf->address);
+ unlock_page(vmf->page);
+ if (!reused)
+ goto copy;
+ wp_page_reuse(vmf);
+ return VM_FAULT_WRITE;
+ }
if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
if (total_map_swapcount == 1) {
/*
@@ -2540,7 +2560,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
(VM_WRITE|VM_SHARED))) {
return wp_page_shared(vmf);
}
-
+copy:
/*
* Ok, we need to copy. Oh, well..
*/
@@ -2994,6 +3014,28 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret;
+ /*
+ * Preallocate pte before we take page_lock because this might lead to
+ * deadlocks for memcg reclaim which waits for pages under writeback:
+ * lock_page(A)
+ * SetPageWriteback(A)
+ * unlock_page(A)
+ * lock_page(B)
+ * lock_page(B)
+ * pte_alloc_pne
+ * shrink_page_list
+ * wait_on_page_writeback(A)
+ * SetPageWriteback(B)
+ * unlock_page(B)
+ * # flush A, B to clear the writeback
+ */
+ if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
+ vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
+ if (!vmf->prealloc_pte)
+ return VM_FAULT_OOM;
+ smp_wmb(); /* See comment in __pte_alloc() */
+ }
+
ret = vma->vm_ops->fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
VM_FAULT_DONE_COW)))
@@ -3179,6 +3221,8 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
*
* Target users are page handler itself and implementations of
* vm_ops->map_pages.
+ *
+ * Return: %0 on success, %VM_FAULT_ code in case of error.
*/
vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page)
@@ -3239,11 +3283,12 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
* This function handles all that is needed to finish a page fault once the
* page to fault in is prepared. It handles locking of PTEs, inserts PTE for
* given page, adds reverse page mapping, handles memcg charges and LRU
- * addition. The function returns 0 on success, VM_FAULT_ code in case of
- * error.
+ * addition.
*
* The function expects the page to be locked and on success it consumes a
* reference of a page being mapped (for the PTE which maps it).
+ *
+ * Return: %0 on success, %VM_FAULT_ code in case of error.
*/
vm_fault_t finish_fault(struct vm_fault *vmf)
{
@@ -3299,12 +3344,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
static int __init fault_around_debugfs(void)
{
- void *ret;
-
- ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
- &fault_around_bytes_fops);
- if (!ret)
- pr_warn("Failed to create fault_around_bytes in debugfs");
+ debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
+ &fault_around_bytes_fops);
return 0;
}
late_initcall(fault_around_debugfs);
@@ -3495,10 +3536,13 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
* but allow concurrent faults).
* The mmap_sem may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
+ * If mmap_sem is released, vma may become invalid (for example
+ * by other thread calling munmap()).
*/
static vm_fault_t do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct mm_struct *vm_mm = vma->vm_mm;
vm_fault_t ret;
/*
@@ -3539,7 +3583,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
/* preallocated pagetable is unused: free it */
if (vmf->prealloc_pte) {
- pte_free(vma->vm_mm, vmf->prealloc_pte);
+ pte_free(vm_mm, vmf->prealloc_pte);
vmf->prealloc_pte = NULL;
}
return ret;
@@ -3564,11 +3608,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL;
- int page_nid = -1;
+ int page_nid = NUMA_NO_NODE;
int last_cpupid;
int target_nid;
bool migrated = false;
- pte_t pte;
+ pte_t pte, old_pte;
bool was_writable = pte_savedwrite(vmf->orig_pte);
int flags = 0;
@@ -3588,12 +3632,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
* Make it present again, Depending on how arch implementes non
* accessible ptes, some can allow access by kernel mode.
*/
- pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
- pte = pte_modify(pte, vma->vm_page_prot);
+ old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+ pte = pte_modify(old_pte, vma->vm_page_prot);
pte = pte_mkyoung(pte);
if (was_writable)
pte = pte_mkwrite(pte);
- ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
+ ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
update_mmu_cache(vma, vmf->address, vmf->pte);
page = vm_normal_page(vma, vmf->address, pte);
@@ -3631,7 +3675,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
&flags);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- if (target_nid == -1) {
+ if (target_nid == NUMA_NO_NODE) {
put_page(page);
goto out;
}
@@ -3645,7 +3689,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
flags |= TNF_MIGRATE_FAIL;
out:
- if (page_nid != -1)
+ if (page_nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, page_nid, 1, flags);
return 0;
}
@@ -4077,8 +4121,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
goto out;
if (range) {
- range->start = address & PAGE_MASK;
- range->end = range->start + PAGE_SIZE;
+ mmu_notifier_range_init(range, mm, address & PAGE_MASK,
+ (address & PAGE_MASK) + PAGE_SIZE);
mmu_notifier_invalidate_range_start(range);
}
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
@@ -4128,7 +4172,7 @@ EXPORT_SYMBOL(follow_pte_pmd);
*
* Only IO mappings and raw PFN mappings are allowed.
*
- * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ * Return: zero and the pfn at @pfn on success, -ve otherwise.
*/
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn)
@@ -4278,6 +4322,8 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
+ *
+ * Return: number of bytes copied from source to destination.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b9a667d36c55..6b05576fb4ec 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -47,7 +47,7 @@
* and restore_online_page_callback() for generic callback restore.
*/
-static void generic_online_page(struct page *page);
+static void generic_online_page(struct page *page, unsigned int order);
static online_page_callback_t online_page_callback = generic_online_page;
static DEFINE_MUTEX(online_page_callback_lock);
@@ -656,26 +656,40 @@ void __online_page_free(struct page *page)
}
EXPORT_SYMBOL_GPL(__online_page_free);
-static void generic_online_page(struct page *page)
+static void generic_online_page(struct page *page, unsigned int order)
{
- __online_page_set_limits(page);
- __online_page_increment_counters(page);
- __online_page_free(page);
+ kernel_map_pages(page, 1 << order, 1);
+ __free_pages_core(page, order);
+ totalram_pages_add(1UL << order);
+#ifdef CONFIG_HIGHMEM
+ if (PageHighMem(page))
+ totalhigh_pages_add(1UL << order);
+#endif
+}
+
+static int online_pages_blocks(unsigned long start, unsigned long nr_pages)
+{
+ unsigned long end = start + nr_pages;
+ int order, onlined_pages = 0;
+
+ while (start < end) {
+ order = min(MAX_ORDER - 1,
+ get_order(PFN_PHYS(end) - PFN_PHYS(start)));
+ (*online_page_callback)(pfn_to_page(start), order);
+
+ onlined_pages += (1UL << order);
+ start += (1UL << order);
+ }
+ return onlined_pages;
}
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
- unsigned long i;
unsigned long onlined_pages = *(unsigned long *)arg;
- struct page *page;
if (PageReserved(pfn_to_page(start_pfn)))
- for (i = 0; i < nr_pages; i++) {
- page = pfn_to_page(start_pfn + i);
- (*online_page_callback)(page);
- onlined_pages++;
- }
+ onlined_pages += online_pages_blocks(start_pfn, nr_pages);
online_mem_sections(start_pfn, start_pfn + nr_pages);
@@ -689,9 +703,9 @@ static void node_states_check_changes_online(unsigned long nr_pages,
{
int nid = zone_to_nid(zone);
- arg->status_change_nid = -1;
- arg->status_change_nid_normal = -1;
- arg->status_change_nid_high = -1;
+ arg->status_change_nid = NUMA_NO_NODE;
+ arg->status_change_nid_normal = NUMA_NO_NODE;
+ arg->status_change_nid_high = NUMA_NO_NODE;
if (!node_state(nid, N_MEMORY))
arg->status_change_nid = nid;
@@ -1188,11 +1202,13 @@ static inline int pageblock_free(struct page *page)
return PageBuddy(page) && page_order(page) >= pageblock_order;
}
-/* Return the start of the next active pageblock after a given page */
-static struct page *next_active_pageblock(struct page *page)
+/* Return the pfn of the start of the next active pageblock after a given pfn */
+static unsigned long next_active_pageblock(unsigned long pfn)
{
+ struct page *page = pfn_to_page(pfn);
+
/* Ensure the starting page is pageblock-aligned */
- BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
+ BUG_ON(pfn & (pageblock_nr_pages - 1));
/* If the entire pageblock is free, move to the end of free page */
if (pageblock_free(page)) {
@@ -1200,16 +1216,16 @@ static struct page *next_active_pageblock(struct page *page)
/* be careful. we don't have locks, page_order can be changed.*/
order = page_order(page);
if ((order < MAX_ORDER) && (order >= pageblock_order))
- return page + (1 << order);
+ return pfn + (1 << order);
}
- return page + pageblock_nr_pages;
+ return pfn + pageblock_nr_pages;
}
-static bool is_pageblock_removable_nolock(struct page *page)
+static bool is_pageblock_removable_nolock(unsigned long pfn)
{
+ struct page *page = pfn_to_page(pfn);
struct zone *zone;
- unsigned long pfn;
/*
* We have to be careful here because we are iterating over memory
@@ -1232,12 +1248,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
/* Checks if this range of memory is likely to be hot-removable. */
bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
- struct page *page = pfn_to_page(start_pfn);
- struct page *end_page = page + nr_pages;
+ unsigned long end_pfn, pfn;
+
+ end_pfn = min(start_pfn + nr_pages,
+ zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
/* Check the starting page of each pageblock within the range */
- for (; page < end_page; page = next_active_pageblock(page)) {
- if (!is_pageblock_removable_nolock(page))
+ for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
+ if (!is_pageblock_removable_nolock(pfn))
return false;
cond_resched();
}
@@ -1273,6 +1291,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
i++;
if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
continue;
+ /* Check if we got outside of the zone */
+ if (zone && !zone_spans_pfn(zone, pfn + i))
+ return 0;
page = pfn_to_page(pfn + i);
if (zone && page_zone(page) != zone)
return 0;
@@ -1301,23 +1322,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
- struct page *page;
+
for (pfn = start; pfn < end; pfn++) {
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageLRU(page))
- return pfn;
- if (__PageMovable(page))
- return pfn;
- if (PageHuge(page)) {
- if (hugepage_migration_supported(page_hstate(page)) &&
- page_huge_active(page))
- return pfn;
- else
- pfn = round_up(pfn + 1,
- 1 << compound_order(page)) - 1;
- }
- }
+ struct page *page, *head;
+ unsigned long skip;
+
+ if (!pfn_valid(pfn))
+ continue;
+ page = pfn_to_page(pfn);
+ if (PageLRU(page))
+ return pfn;
+ if (__PageMovable(page))
+ return pfn;
+
+ if (!PageHuge(page))
+ continue;
+ head = compound_head(page);
+ if (hugepage_migration_supported(page_hstate(head)) &&
+ page_huge_active(head))
+ return pfn;
+ skip = (1 << compound_order(head)) - (page - head);
+ pfn += skip - 1;
}
return 0;
}
@@ -1344,7 +1369,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
- int not_managed = 0;
int ret = 0;
LIST_HEAD(source);
@@ -1355,12 +1379,12 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (PageHuge(page)) {
struct page *head = compound_head(page);
- pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
if (compound_order(head) > PFN_SECTION_SHIFT) {
ret = -EBUSY;
break;
}
- isolate_huge_page(page, &source);
+ pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+ isolate_huge_page(head, &source);
continue;
} else if (PageTransHuge(page))
pfn = page_to_pfn(compound_head(page))
@@ -1392,7 +1416,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
else
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
if (!ret) { /* Success */
- put_page(page);
list_add_tail(&page->lru, &source);
if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON +
@@ -1401,22 +1424,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
} else {
pr_warn("failed to isolate pfn %lx\n", pfn);
dump_page(page, "isolation failed");
- put_page(page);
- /* Because we don't have big zone->lock. we should
- check this again here. */
- if (page_count(page)) {
- not_managed++;
- ret = -EBUSY;
- break;
- }
}
+ put_page(page);
}
if (!list_empty(&source)) {
- if (not_managed) {
- putback_movable_pages(&source);
- goto out;
- }
-
/* Allocate a new page from the nearest neighbor node */
ret = migrate_pages(&source, new_node_page, NULL, 0,
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
@@ -1429,7 +1440,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
putback_movable_pages(&source);
}
}
-out:
+
return ret;
}
@@ -1499,9 +1510,9 @@ static void node_states_check_changes_offline(unsigned long nr_pages,
unsigned long present_pages = 0;
enum zone_type zt;
- arg->status_change_nid = -1;
- arg->status_change_nid_normal = -1;
- arg->status_change_nid_high = -1;
+ arg->status_change_nid = NUMA_NO_NODE;
+ arg->status_change_nid_normal = NUMA_NO_NODE;
+ arg->status_change_nid_high = NUMA_NO_NODE;
/*
* Check whether node_states[N_NORMAL_MEMORY] will be changed.
@@ -1576,7 +1587,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
we assume this for now. .*/
if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
&valid_end)) {
- mem_hotplug_done();
ret = -EINVAL;
reason = "multizone range";
goto failed_removal;
@@ -1591,7 +1601,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
MIGRATE_MOVABLE,
SKIP_HWPOISON | REPORT_FAILURE);
if (ret) {
- mem_hotplug_done();
reason = "failure to isolate range";
goto failed_removal;
}
@@ -1617,7 +1626,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
cond_resched();
lru_add_drain_all();
- drain_all_pages(zone);
pfn = scan_movable_pages(pfn, end_pfn);
if (pfn) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d4496d9d34f5..af171ccb56a2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -350,7 +350,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol)
return;
- if (!mpol_store_user_nodemask(pol) &&
+ if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
+ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
int uninitialized_var(pval);
nodemask_t nodes;
- if (nmask != NULL && maxnode < MAX_NUMNODES)
+ if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
+ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask)
@@ -2304,7 +2304,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
unsigned long pgoff;
int thiscpu = raw_smp_processor_id();
int thisnid = cpu_to_node(thiscpu);
- int polnid = -1;
+ int polnid = NUMA_NO_NODE;
int ret = -1;
pol = get_vma_policy(vma, addr);
diff --git a/mm/mempool.c b/mm/mempool.c
index 0ef8cc8d1602..85efab3da720 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -222,6 +222,8 @@ EXPORT_SYMBOL(mempool_init_node);
*
* Like mempool_create(), but initializes the pool in (i.e. embedded in another
* structure).
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
@@ -245,6 +247,8 @@ EXPORT_SYMBOL(mempool_init);
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
* functions might sleep - as long as the mempool_alloc() function is not called
* from IRQ contexts.
+ *
+ * Return: pointer to the created memory pool object or %NULL on error.
*/
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
@@ -289,6 +293,8 @@ EXPORT_SYMBOL(mempool_create_node);
* Note, the caller must guarantee that no mempool_destroy is called
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
int mempool_resize(mempool_t *pool, int new_min_nr)
{
@@ -363,6 +369,8 @@ EXPORT_SYMBOL(mempool_resize);
* *never* fails when called from process contexts. (it might
* fail if called from an IRQ context.)
* Note: using __GFP_ZERO is not supported.
+ *
+ * Return: pointer to the allocated element or %NULL on error.
*/
void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
diff --git a/mm/migrate.c b/mm/migrate.c
index ccf8966caf6f..ac6f4939bb59 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -100,7 +100,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
/*
* Check PageMovable before holding a PG_lock because page's owner
* assumes anybody doesn't touch PG_lock of newly allocated page
- * so unconditionally grapping the lock ruins page's owner side.
+ * so unconditionally grabbing the lock ruins page's owner side.
*/
if (unlikely(!__PageMovable(page)))
goto out_putpage;
@@ -374,7 +374,7 @@ unlock:
}
#endif
-static int expected_page_refs(struct page *page)
+static int expected_page_refs(struct address_space *mapping, struct page *page)
{
int expected_count = 1;
@@ -384,7 +384,7 @@ static int expected_page_refs(struct page *page)
*/
expected_count += is_device_private_page(page);
expected_count += is_device_public_page(page);
- if (page_mapping(page))
+ if (mapping)
expected_count += hpage_nr_pages(page) + page_has_private(page);
return expected_count;
@@ -405,7 +405,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, page_index(page));
struct zone *oldzone, *newzone;
int dirty;
- int expected_count = expected_page_refs(page) + extra_count;
+ int expected_count = expected_page_refs(mapping, page) + extra_count;
if (!mapping) {
/* Anonymous page without mapping */
@@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
/* Simple case, sync compaction */
if (mode != MIGRATE_ASYNC) {
do {
- get_bh(bh);
lock_buffer(bh);
bh = bh->b_this_page;
@@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
/* async case, we cannot block on lock_buffer so use trylock_buffer */
do {
- get_bh(bh);
if (!trylock_buffer(bh)) {
/*
* We failed to lock the buffer and cannot stall in
* async migration. Release the taken locks
*/
struct buffer_head *failed_bh = bh;
- put_bh(failed_bh);
bh = head;
while (bh != failed_bh) {
unlock_buffer(bh);
- put_bh(bh);
bh = bh->b_this_page;
}
return false;
@@ -754,7 +750,7 @@ static int __buffer_migrate_page(struct address_space *mapping,
return migrate_page(mapping, newpage, page, mode);
/* Check whether page does not have extra refs before we do more work */
- expected_count = expected_page_refs(page);
+ expected_count = expected_page_refs(mapping, page);
if (page_count(page) != expected_count)
return -EAGAIN;
@@ -818,7 +814,6 @@ unlock_buffers:
bh = head;
do {
unlock_buffer(bh);
- put_bh(bh);
bh = bh->b_this_page;
} while (bh != head);
@@ -916,7 +911,7 @@ static int fallback_migrate_page(struct address_space *mapping,
*/
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
- return -EAGAIN;
+ return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
return migrate_page(mapping, newpage, page, mode);
}
@@ -1135,10 +1130,13 @@ out:
* If migration is successful, decrease refcount of the newpage
* which will not free the page because new page owner increased
* refcounter. As well, if it is LRU page, add the page to LRU
- * list in here.
+ * list in here. Use the old state of the isolated source page to
+ * determine if we migrated a LRU page. newpage was already unlocked
+ * and possibly modified by its owner - don't rely on the page
+ * state.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
- if (unlikely(__PageMovable(newpage)))
+ if (unlikely(!is_lru))
put_page(newpage);
else
putback_lru_page(newpage);
@@ -1289,7 +1287,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
struct anon_vma *anon_vma = NULL;
/*
- * Movability of hugepages depends on architectures and hugepage size.
+ * Migratability of hugepages depends on architectures and their size.
* This check is necessary because some callers of hugepage migration
* like soft offline and memory hotremove don't walk through page
* tables or check whether the hugepage is pmd-based or not before
@@ -1317,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
+ /*
+ * Check for pages which are in the process of being freed. Without
+ * page_mapping() set, hugetlbfs specific move page routine will not
+ * be called and we could leak usage counts for subpools.
+ */
+ if (page_private(hpage) && !page_mapping(hpage)) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
@@ -1324,19 +1332,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
goto put_anon;
if (page_mapped(hpage)) {
- struct address_space *mapping = page_mapping(hpage);
-
- /*
- * try_to_unmap could potentially call huge_pmd_unshare.
- * Because of this, take semaphore in write mode here and
- * set TTU_RMAP_LOCKED to let lower levels know we have
- * taken the lock.
- */
- i_mmap_lock_write(mapping);
try_to_unmap(hpage,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
- TTU_RMAP_LOCKED);
- i_mmap_unlock_write(mapping);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
page_was_mapped = 1;
}
@@ -1358,6 +1355,7 @@ put_anon:
put_new_page = NULL;
}
+out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)
diff --git a/mm/mincore.c b/mm/mincore.c
index f0f91461a9f4..218099b5ed31 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -42,14 +42,72 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
return 0;
}
-static int mincore_unmapped_range(unsigned long addr, unsigned long end,
- struct mm_walk *walk)
+/*
+ * Later we can get more picky about what "in core" means precisely.
+ * For now, simply check to see if the page is in the page cache,
+ * and is up to date; i.e. that no page-in operation would be required
+ * at this time if an application were to map and access this page.
+ */
+static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
+{
+ unsigned char present = 0;
+ struct page *page;
+
+ /*
+ * When tmpfs swaps out a page from a file, any process mapping that
+ * file will not get a swp_entry_t in its pte, but rather it is like
+ * any other file mapping (ie. marked !present and faulted in with
+ * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
+ */
+#ifdef CONFIG_SWAP
+ if (shmem_mapping(mapping)) {
+ page = find_get_entry(mapping, pgoff);
+ /*
+ * shmem/tmpfs may return swap: account for swapcache
+ * page too.
+ */
+ if (xa_is_value(page)) {
+ swp_entry_t swp = radix_to_swp_entry(page);
+ page = find_get_page(swap_address_space(swp),
+ swp_offset(swp));
+ }
+ } else
+ page = find_get_page(mapping, pgoff);
+#else
+ page = find_get_page(mapping, pgoff);
+#endif
+ if (page) {
+ present = PageUptodate(page);
+ put_page(page);
+ }
+
+ return present;
+}
+
+static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
+ struct vm_area_struct *vma, unsigned char *vec)
{
- unsigned char *vec = walk->private;
unsigned long nr = (end - addr) >> PAGE_SHIFT;
+ int i;
- memset(vec, 0, nr);
- walk->private += nr;
+ if (vma->vm_file) {
+ pgoff_t pgoff;
+
+ pgoff = linear_page_index(vma, addr);
+ for (i = 0; i < nr; i++, pgoff++)
+ vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
+ } else {
+ for (i = 0; i < nr; i++)
+ vec[i] = 0;
+ }
+ return nr;
+}
+
+static int mincore_unmapped_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ walk->private += __mincore_unmapped_range(addr, end,
+ walk->vma, walk->private);
return 0;
}
@@ -69,9 +127,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out;
}
- /* We'll consider a THP page under construction to be there */
if (pmd_trans_unstable(pmd)) {
- memset(vec, 1, nr);
+ __mincore_unmapped_range(addr, end, vma, vec);
goto out;
}
@@ -80,17 +137,28 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t pte = *ptep;
if (pte_none(pte))
- *vec = 0;
+ __mincore_unmapped_range(addr, addr + PAGE_SIZE,
+ vma, vec);
else if (pte_present(pte))
*vec = 1;
else { /* pte is a swap entry */
swp_entry_t entry = pte_to_swp_entry(pte);
- /*
- * migration or hwpoison entries are always
- * uptodate
- */
- *vec = !!non_swap_entry(entry);
+ if (non_swap_entry(entry)) {
+ /*
+ * migration or hwpoison entries are always
+ * uptodate
+ */
+ *vec = 1;
+ } else {
+#ifdef CONFIG_SWAP
+ *vec = mincore_page(swap_address_space(entry),
+ swp_offset(entry));
+#else
+ WARN_ON(1);
+ *vec = 1;
+#endif
+ }
}
vec++;
}
diff --git a/mm/mlock.c b/mm/mlock.c
index 41cc47e28ad6..080f3b36415b 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -182,7 +182,7 @@ static void __munlock_isolation_failed(struct page *page)
unsigned int munlock_vma_page(struct page *page)
{
int nr_pages;
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
/* For try_to_munlock() and to serialize with page migration */
BUG_ON(!PageLocked(page));
@@ -194,7 +194,7 @@ unsigned int munlock_vma_page(struct page *page)
* might otherwise copy PageMlocked to part of the tail pages before
* we clear it in the head page. It also stabilizes hpage_nr_pages().
*/
- spin_lock_irq(zone_lru_lock(zone));
+ spin_lock_irq(&pgdat->lru_lock);
if (!TestClearPageMlocked(page)) {
/* Potentially, PTE-mapped THP: do not skip the rest PTEs */
@@ -203,17 +203,17 @@ unsigned int munlock_vma_page(struct page *page)
}
nr_pages = hpage_nr_pages(page);
- __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
+ __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
if (__munlock_isolate_lru_page(page, true)) {
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_unlock_irq(&pgdat->lru_lock);
__munlock_isolated_page(page);
goto out;
}
__munlock_isolation_failed(page);
unlock_out:
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_unlock_irq(&pgdat->lru_lock);
out:
return nr_pages - 1;
@@ -298,7 +298,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
pagevec_init(&pvec_putback);
/* Phase 1: page isolation */
- spin_lock_irq(zone_lru_lock(zone));
+ spin_lock_irq(&zone->zone_pgdat->lru_lock);
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];
@@ -325,7 +325,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
pvec->pages[i] = NULL;
}
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_unlock_irq(&zone->zone_pgdat->lru_lock);
/* Now we can release pins of pages that we are not munlocking */
pagevec_release(&pvec_putback);
diff --git a/mm/mmap.c b/mm/mmap.c
index f901065c4c64..41eb48d9b527 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -438,7 +438,7 @@ static void vma_gap_update(struct vm_area_struct *vma)
{
/*
* As it turns out, RB_DECLARE_CALLBACKS() already created a callback
- * function that does exacltly what we want.
+ * function that does exactly what we want.
*/
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
}
@@ -1012,7 +1012,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
* VM_SOFTDIRTY should not prevent from VMA merging, if we
* match the flags but dirty bit -- the caller should mark
* merged VMA as dirty. If dirty bit won't be excluded from
- * comparison, we increase pressue on the memory system forcing
+ * comparison, we increase pressure on the memory system forcing
* the kernel to generate new VMAs when old one could be
* extended instead.
*/
@@ -1115,7 +1115,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
* PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
* might become case 1 below case 2 below case 3 below
*
- * It is important for case 8 that the the vma NNNN overlapping the
+ * It is important for case 8 that the vma NNNN overlapping the
* region AAAA is never going to extended over XXXX. Instead XXXX must
* be extended in region AAAA and NNNN must be removed. This way in
* all cases where vma_merge succeeds, the moment vma_adjust drops the
@@ -1645,7 +1645,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
/*
- * Some shared mappigns will want the pages marked read-only
+ * Some shared mappings will want the pages marked read-only
* to track write events. If so, we'll downgrade vm_page_prot
* to the private version (using protection_map[] without the
* VM_SHARED bit).
@@ -2126,13 +2126,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
struct vm_unmapped_area_info info;
const unsigned long mmap_end = arch_get_mmap_end(addr);
@@ -2426,12 +2425,11 @@ int expand_downwards(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
- int error;
+ int error = 0;
address &= PAGE_MASK;
- error = security_mmap_addr(address);
- if (error)
- return error;
+ if (address < mmap_min_addr)
+ return -EPERM;
/* Enforce stack_guard_gap */
prev = vma->vm_prev;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 36cb358db170..028c724dcb1a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -110,8 +110,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
continue;
}
- ptent = ptep_modify_prot_start(mm, addr, pte);
- ptent = pte_modify(ptent, newprot);
+ oldpte = ptep_modify_prot_start(vma, addr, pte);
+ ptent = pte_modify(oldpte, newprot);
if (preserve_write)
ptent = pte_mk_savedwrite(ptent);
@@ -121,7 +121,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
!(vma->vm_flags & VM_SOFTDIRTY))) {
ptent = pte_mkwrite(ptent);
}
- ptep_modify_prot_commit(mm, addr, pte, ptent);
+ ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
pages++;
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/mm/mremap.c b/mm/mremap.c
index 3320616ed93f..e3edef6b7a12 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -516,6 +516,23 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if (addr + old_len > new_addr && new_addr + new_len > addr)
goto out;
+ /*
+ * move_vma() need us to stay 4 maps below the threshold, otherwise
+ * it will bail out at the very beginning.
+ * That is a problem if we have already unmaped the regions here
+ * (new_addr, and old_addr), because userspace will not know the
+ * state of the vma's after it gets -ENOMEM.
+ * So, to avoid such scenario we can pre-compute if the whole
+ * operation has high chances to success map-wise.
+ * Worst-scenario case is when both vma's (new_addr and old_addr) get
+ * split in 3 before unmaping it.
+ * That means 2 more maps (1 for each) to the ones we already hold.
+ * Check whether current map count plus 2 still leads us to 4 maps below
+ * the threshold, otherwise return -ENOMEM here to be more safe.
+ */
+ if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
+ return -ENOMEM;
+
ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
if (ret)
goto out;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f0e8cd9edb1a..3a2484884cfd 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -647,8 +647,8 @@ static int oom_reaper(void *unused)
static void wake_oom_reaper(struct task_struct *tsk)
{
- /* tsk is already queued? */
- if (tsk == oom_reaper_list || tsk->oom_reaper_list)
+ /* mm is already queued? */
+ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
return;
get_task_struct(tsk);
@@ -843,7 +843,7 @@ static bool task_will_free_mem(struct task_struct *task)
return ret;
}
-static void __oom_kill_process(struct task_struct *victim)
+static void __oom_kill_process(struct task_struct *victim, const char *message)
{
struct task_struct *p;
struct mm_struct *mm;
@@ -874,8 +874,9 @@ static void __oom_kill_process(struct task_struct *victim)
*/
do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
mark_oom_victim(victim);
- pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
- task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
+ pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
+ message, task_pid_nr(victim), victim->comm,
+ K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
K(get_mm_counter(victim->mm, MM_FILEPAGES)),
K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
@@ -926,24 +927,20 @@ static void __oom_kill_process(struct task_struct *victim)
* Kill provided task unless it's secured by setting
* oom_score_adj to OOM_SCORE_ADJ_MIN.
*/
-static int oom_kill_memcg_member(struct task_struct *task, void *unused)
+static int oom_kill_memcg_member(struct task_struct *task, void *message)
{
- if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
+ if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
+ !is_global_init(task)) {
get_task_struct(task);
- __oom_kill_process(task);
+ __oom_kill_process(task, message);
}
return 0;
}
static void oom_kill_process(struct oom_control *oc, const char *message)
{
- struct task_struct *p = oc->chosen;
- unsigned int points = oc->chosen_points;
- struct task_struct *victim = p;
- struct task_struct *child;
- struct task_struct *t;
+ struct task_struct *victim = oc->chosen;
struct mem_cgroup *oom_group;
- unsigned int victim_points = 0;
static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -952,49 +949,18 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
* its children or threads, just give it access to memory reserves
* so it can die quickly
*/
- task_lock(p);
- if (task_will_free_mem(p)) {
- mark_oom_victim(p);
- wake_oom_reaper(p);
- task_unlock(p);
- put_task_struct(p);
+ task_lock(victim);
+ if (task_will_free_mem(victim)) {
+ mark_oom_victim(victim);
+ wake_oom_reaper(victim);
+ task_unlock(victim);
+ put_task_struct(victim);
return;
}
- task_unlock(p);
+ task_unlock(victim);
if (__ratelimit(&oom_rs))
- dump_header(oc, p);
-
- pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
- message, task_pid_nr(p), p->comm, points);
-
- /*
- * If any of p's children has a different mm and is eligible for kill,
- * the one with the highest oom_badness() score is sacrificed for its
- * parent. This attempts to lose the minimal amount of work done while
- * still freeing memory.
- */
- read_lock(&tasklist_lock);
- for_each_thread(p, t) {
- list_for_each_entry(child, &t->children, sibling) {
- unsigned int child_points;
-
- if (process_shares_mm(child, p->mm))
- continue;
- /*
- * oom_badness() returns 0 if the thread is unkillable
- */
- child_points = oom_badness(child,
- oc->memcg, oc->nodemask, oc->totalpages);
- if (child_points > victim_points) {
- put_task_struct(victim);
- victim = child;
- victim_points = child_points;
- get_task_struct(victim);
- }
- }
- }
- read_unlock(&tasklist_lock);
+ dump_header(oc, victim);
/*
* Do we need to kill the entire memory cgroup?
@@ -1003,14 +969,15 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
*/
oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
- __oom_kill_process(victim);
+ __oom_kill_process(victim, message);
/*
* If necessary, kill all tasks in the selected memory cgroup.
*/
if (oom_group) {
mem_cgroup_print_oom_group(oom_group);
- mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, NULL);
+ mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
+ (void*)message);
mem_cgroup_put(oom_group);
}
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7d1010453fb9..9f61dfec6a1f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -270,7 +270,7 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
* node_dirtyable_memory - number of dirtyable pages in a node
* @pgdat: the node
*
- * Returns the node's number of pages potentially available for dirty
+ * Return: the node's number of pages potentially available for dirty
* page cache. This is the base value for the per-node dirty limits.
*/
static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
@@ -355,7 +355,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
/**
* global_dirtyable_memory - number of globally dirtyable pages
*
- * Returns the global number of pages potentially available for dirty
+ * Return: the global number of pages potentially available for dirty
* page cache. This is the base value for the global dirty limits.
*/
static unsigned long global_dirtyable_memory(void)
@@ -470,7 +470,7 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
* node_dirty_limit - maximum number of dirty pages allowed in a node
* @pgdat: the node
*
- * Returns the maximum number of dirty pages allowed in a node, based
+ * Return: the maximum number of dirty pages allowed in a node, based
* on the node's dirtyable memory.
*/
static unsigned long node_dirty_limit(struct pglist_data *pgdat)
@@ -495,7 +495,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
* node_dirty_ok - tells whether a node is within its dirty limits
* @pgdat: the node to check
*
- * Returns %true when the dirty pages in @pgdat are within the node's
+ * Return: %true when the dirty pages in @pgdat are within the node's
* dirty limit, %false if the limit is exceeded.
*/
bool node_dirty_ok(struct pglist_data *pgdat)
@@ -743,9 +743,6 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
* __wb_calc_thresh - @wb's share of dirty throttling threshold
* @dtc: dirty_throttle_context of interest
*
- * Returns @wb's dirty limit in pages. The term "dirty" in the context of
- * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- *
* Note that balance_dirty_pages() will only seriously take it as a hard limit
* when sleeping max_pause per page is not enough to keep the dirty pages under
* control. For example, when the device is completely stalled due to some error
@@ -759,6 +756,9 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
*
* The wb's share of dirty limit will be adapting to its throughput and
* bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
+ *
+ * Return: @wb's dirty limit in pages. The term "dirty" in the context of
+ * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
*/
static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
{
@@ -1918,7 +1918,9 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
* @wb: bdi_writeback of interest
*
* Determines whether background writeback should keep writing @wb or it's
- * clean enough. Returns %true if writeback should continue.
+ * clean enough.
+ *
+ * Return: %true if writeback should continue.
*/
bool wb_over_bg_thresh(struct bdi_writeback *wb)
{
@@ -2147,6 +2149,8 @@ EXPORT_SYMBOL(tag_pages_for_writeback);
* lock/page writeback access order inversion - we should only ever lock
* multiple pages in ascending page->index order, and looping back to the start
* of the file violates that rule and causes deadlocks.
+ *
+ * Return: %0 on success, negative error code otherwise
*/
int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
@@ -2305,6 +2309,8 @@ static int __writepage(struct page *page, struct writeback_control *wbc,
*
* This is a library function, which implements the writepages()
* address_space_operation.
+ *
+ * Return: %0 on success, negative error code otherwise
*/
int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc)
@@ -2351,6 +2357,8 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
*
* Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
* function returns.
+ *
+ * Return: %0 on success, negative error code otherwise
*/
int write_one_page(struct page *page)
{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cde5dac6229a..3eb01dedfb50 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -289,8 +289,8 @@ EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if MAX_NUMNODES > 1
-int nr_node_ids __read_mostly = MAX_NUMNODES;
-int nr_online_nodes __read_mostly = 1;
+unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
+unsigned int nr_online_nodes __read_mostly = 1;
EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -789,6 +789,57 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
return 0;
}
+#ifdef CONFIG_COMPACTION
+static inline struct capture_control *task_capc(struct zone *zone)
+{
+ struct capture_control *capc = current->capture_control;
+
+ return capc &&
+ !(current->flags & PF_KTHREAD) &&
+ !capc->page &&
+ capc->cc->zone == zone &&
+ capc->cc->direct_compaction ? capc : NULL;
+}
+
+static inline bool
+compaction_capture(struct capture_control *capc, struct page *page,
+ int order, int migratetype)
+{
+ if (!capc || order != capc->cc->order)
+ return false;
+
+ /* Do not accidentally pollute CMA or isolated regions*/
+ if (is_migrate_cma(migratetype) ||
+ is_migrate_isolate(migratetype))
+ return false;
+
+ /*
+ * Do not let lower order allocations polluate a movable pageblock.
+ * This might let an unmovable request use a reclaimable pageblock
+ * and vice-versa but no more than normal fallback logic which can
+ * have trouble finding a high-order free page.
+ */
+ if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
+ return false;
+
+ capc->page = page;
+ return true;
+}
+
+#else
+static inline struct capture_control *task_capc(struct zone *zone)
+{
+ return NULL;
+}
+
+static inline bool
+compaction_capture(struct capture_control *capc, struct page *page,
+ int order, int migratetype)
+{
+ return false;
+}
+#endif /* CONFIG_COMPACTION */
+
/*
* Freeing function for a buddy system allocator.
*
@@ -822,6 +873,7 @@ static inline void __free_one_page(struct page *page,
unsigned long uninitialized_var(buddy_pfn);
struct page *buddy;
unsigned int max_order;
+ struct capture_control *capc = task_capc(zone);
max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
@@ -837,6 +889,11 @@ static inline void __free_one_page(struct page *page,
continue_merging:
while (order < max_order - 1) {
+ if (compaction_capture(capc, page, order, migratetype)) {
+ __mod_zone_freepage_state(zone, -(1 << order),
+ migratetype);
+ return;
+ }
buddy_pfn = __find_buddy_pfn(pfn, order);
buddy = page + (buddy_pfn - pfn);
@@ -1056,7 +1113,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (PageMappingFlags(page))
page->mapping = NULL;
if (memcg_kmem_enabled() && PageKmemcg(page))
- memcg_kmem_uncharge(page, order);
+ __memcg_kmem_uncharge(page, order);
if (check_free)
bad += free_pages_check(page);
if (bad)
@@ -1303,7 +1360,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
-static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+void __free_pages_core(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
@@ -1382,7 +1439,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
{
if (early_page_uninitialised(pfn))
return;
- return __free_pages_boot_core(page, order);
+ __free_pages_core(page, order);
}
/*
@@ -1472,14 +1529,14 @@ static void __init deferred_free_range(unsigned long pfn,
if (nr_pages == pageblock_nr_pages &&
(pfn & (pageblock_nr_pages - 1)) == 0) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- __free_pages_boot_core(page, pageblock_order);
+ __free_pages_core(page, pageblock_order);
return;
}
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if ((pfn & (pageblock_nr_pages - 1)) == 0)
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- __free_pages_boot_core(page, 0);
+ __free_pages_core(page, 0);
}
}
@@ -1945,8 +2002,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
- kernel_poison_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
+ kernel_poison_pages(page, 1 << order, 1);
set_page_owner(page, order, gfp_flags);
}
@@ -2170,6 +2227,18 @@ static inline void boost_watermark(struct zone *zone)
max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
watermark_boost_factor, 10000);
+
+ /*
+ * high watermark may be uninitialised if fragmentation occurs
+ * very early in boot so do not boost. We do not fall
+ * through and boost by pageblock_nr_pages as failing
+ * allocations that early means that reclaim is not going
+ * to help and it may even be impossible to reclaim the
+ * boosted watermark resulting in a hang.
+ */
+ if (!max_boost)
+ return;
+
max_boost = max(pageblock_nr_pages, max_boost);
zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
@@ -2214,7 +2283,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
*/
boost_watermark(zone);
if (alloc_flags & ALLOC_KSWAPD)
- wakeup_kswapd(zone, 0, 0, zone_idx(zone));
+ set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
/* We are not allowed to try stealing from the whole block */
if (!whole_block)
@@ -2950,7 +3019,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
* watermark, because we already know our high-order page
* exists.
*/
- watermark = min_wmark_pages(zone) + (1UL << order);
+ watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
return 0;
@@ -3102,6 +3171,12 @@ struct page *rmqueue(struct zone *preferred_zone,
local_irq_restore(flags);
out:
+ /* Separate test+clear to avoid unnecessary atomics */
+ if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
+ clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
+ wakeup_kswapd(zone, 0, 0, zone_idx(zone));
+ }
+
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
@@ -3155,24 +3230,14 @@ static int __init fail_page_alloc_debugfs(void)
dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
&fail_page_alloc.attr);
- if (IS_ERR(dir))
- return PTR_ERR(dir);
-
- if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
- &fail_page_alloc.ignore_gfp_reclaim))
- goto fail;
- if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
- &fail_page_alloc.ignore_gfp_highmem))
- goto fail;
- if (!debugfs_create_u32("min-order", mode, dir,
- &fail_page_alloc.min_order))
- goto fail;
- return 0;
-fail:
- debugfs_remove_recursive(dir);
+ debugfs_create_bool("ignore-gfp-wait", mode, dir,
+ &fail_page_alloc.ignore_gfp_reclaim);
+ debugfs_create_bool("ignore-gfp-highmem", mode, dir,
+ &fail_page_alloc.ignore_gfp_highmem);
+ debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
- return -ENOMEM;
+ return 0;
}
late_initcall(fail_page_alloc_debugfs);
@@ -3692,7 +3757,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, enum compact_result *compact_result)
{
- struct page *page;
+ struct page *page = NULL;
unsigned long pflags;
unsigned int noreclaim_flag;
@@ -3703,13 +3768,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
noreclaim_flag = memalloc_noreclaim_save();
*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
- prio);
+ prio, &page);
memalloc_noreclaim_restore(noreclaim_flag);
psi_memstall_leave(&pflags);
- if (*compact_result <= COMPACT_INACTIVE)
+ if (*compact_result <= COMPACT_INACTIVE) {
+ WARN_ON_ONCE(page);
return NULL;
+ }
/*
* At least in one zone compaction wasn't deferred or skipped, so let's
@@ -3717,7 +3784,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
*/
count_vm_event(COMPACTSTALL);
- page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
+ /* Prep a captured page if available */
+ if (page)
+ prep_new_page(page, order, gfp_mask, alloc_flags);
+
+ /* Try get a page from the freelist if available */
+ if (!page)
+ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
if (page) {
struct zone *zone = page_zone(page);
@@ -4550,7 +4623,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
out:
if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
- unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
+ unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
__free_pages(page, order);
page = NULL;
}
@@ -4669,11 +4742,11 @@ refill:
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
- page_ref_add(page, size - 1);
+ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
/* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page_is_pfmemalloc(page);
- nc->pagecnt_bias = size;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
nc->offset = size;
}
@@ -4689,10 +4762,10 @@ refill:
size = nc->size;
#endif
/* OK, page count is 0, we can safely set it */
- set_page_count(page, size);
+ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
/* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
}
@@ -4743,6 +4816,8 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
* This function is also limited by MAX_ORDER.
*
* Memory allocated by this function must be released by free_pages_exact().
+ *
+ * Return: pointer to the allocated area or %NULL in case of error.
*/
void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
@@ -4763,6 +4838,8 @@ EXPORT_SYMBOL(alloc_pages_exact);
*
* Like alloc_pages_exact(), but try to allocate on node nid first before falling
* back.
+ *
+ * Return: pointer to the allocated area or %NULL in case of error.
*/
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
{
@@ -4796,11 +4873,13 @@ EXPORT_SYMBOL(free_pages_exact);
* nr_free_zone_pages - count number of pages beyond high watermark
* @offset: The zone index of the highest zone
*
- * nr_free_zone_pages() counts the number of counts pages which are beyond the
+ * nr_free_zone_pages() counts the number of pages which are beyond the
* high watermark within all zones at or below a given zone index. For each
* zone, the number of pages is calculated as:
*
* nr_free_zone_pages = managed_pages - high_pages
+ *
+ * Return: number of pages beyond high watermark.
*/
static unsigned long nr_free_zone_pages(int offset)
{
@@ -4827,6 +4906,9 @@ static unsigned long nr_free_zone_pages(int offset)
*
* nr_free_buffer_pages() counts the number of pages which are beyond the high
* watermark within ZONE_DMA and ZONE_NORMAL.
+ *
+ * Return: number of pages beyond high watermark within ZONE_DMA and
+ * ZONE_NORMAL.
*/
unsigned long nr_free_buffer_pages(void)
{
@@ -4839,6 +4921,8 @@ EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
*
* nr_free_pagecache_pages() counts the number of pages which are beyond the
* high watermark within all zones.
+ *
+ * Return: number of pages beyond high watermark within all zones.
*/
unsigned long nr_free_pagecache_pages(void)
{
@@ -5285,7 +5369,8 @@ static int node_load[MAX_NUMNODES];
* from each node to each node in the system), and should also prefer nodes
* with no CPUs, since presumably they'll have very little allocation pressure
* on them otherwise.
- * It returns -1 if no node is found.
+ *
+ * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
*/
static int find_next_best_node(int node, nodemask_t *used_node_mask)
{
@@ -5591,7 +5676,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
else
page_group_by_mobility_disabled = 0;
- pr_info("Built %i zonelists, mobility grouping %s. Total pages: %ld\n",
+ pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
nr_online_nodes,
page_group_by_mobility_disabled ? "off" : "on",
vm_total_pages);
@@ -5695,18 +5780,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
cond_resched();
}
}
-#ifdef CONFIG_SPARSEMEM
- /*
- * If the zone does not span the rest of the section then
- * we should at least initialize those pages. Otherwise we
- * could blow up on a poisoned page in some paths which depend
- * on full sections being initialized (e.g. memory hotplug).
- */
- while (end_pfn % PAGES_PER_SECTION) {
- __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
- end_pfn++;
- }
-#endif
}
#ifdef CONFIG_ZONE_DEVICE
@@ -6010,7 +6083,7 @@ int __meminit __early_pfn_to_nid(unsigned long pfn,
return state->last_nid;
nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
- if (nid != -1) {
+ if (nid != NUMA_NO_NODE) {
state->last_start = start_pfn;
state->last_end = end_pfn;
state->last_nid = nid;
@@ -6208,7 +6281,7 @@ unsigned long __init __absent_pages_in_range(int nid,
* @start_pfn: The start PFN to start searching for holes
* @end_pfn: The end PFN to stop searching for holes
*
- * It returns the number of pages frames in memory holes within a range.
+ * Return: the number of pages frames in memory holes within a range.
*/
unsigned long __init absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn)
@@ -6370,10 +6443,14 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
{
unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
zone->pageblock_flags = NULL;
- if (usemapsize)
+ if (usemapsize) {
zone->pageblock_flags =
memblock_alloc_node_nopanic(usemapsize,
pgdat->node_id);
+ if (!zone->pageblock_flags)
+ panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
+ usemapsize, zone->name, pgdat->node_id);
+ }
}
#else
static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
@@ -6603,6 +6680,9 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
map = memblock_alloc_node_nopanic(size, pgdat->node_id);
+ if (!map)
+ panic("Failed to allocate %ld bytes for node %d memory map\n",
+ size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
}
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
@@ -6758,14 +6838,14 @@ void __init setup_nr_node_ids(void)
* model has fine enough granularity to avoid incorrect mapping for the
* populated node map.
*
- * Returns the determined alignment in pfn's. 0 if there is no alignment
+ * Return: the determined alignment in pfn's. 0 if there is no alignment
* requirement (single node).
*/
unsigned long __init node_map_pfn_alignment(void)
{
unsigned long accl_mask = 0, last_end = 0;
unsigned long start, end, mask;
- int last_nid = -1;
+ int last_nid = NUMA_NO_NODE;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
@@ -6813,7 +6893,7 @@ static unsigned long __init find_min_pfn_for_node(int nid)
/**
* find_min_pfn_with_active_regions - Find the minimum PFN registered
*
- * It returns the minimum PFN based on information provided via
+ * Return: the minimum PFN based on information provided via
* memblock_set_node().
*/
unsigned long __init find_min_pfn_with_active_regions(void)
@@ -7261,7 +7341,6 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
return pages;
}
-EXPORT_SYMBOL(free_reserved_area);
#ifdef CONFIG_HIGHMEM
void free_highmem_page(struct page *page)
@@ -7490,7 +7569,7 @@ static void __setup_per_zone_wmarks(void)
* value here.
*
* The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
- * deltas control asynch page reclaim, and so should
+ * deltas control async page reclaim, and so should
* not be capped for highmem.
*/
unsigned long min_pages;
@@ -7967,7 +8046,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
/*
* Hugepages are not in LRU lists, but they're movable.
- * We need not scan over tail pages bacause we don't
+ * We need not scan over tail pages because we don't
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
@@ -8106,7 +8185,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
* pageblocks in the range. Once isolated, the pageblocks should not
* be modified by others.
*
- * Returns zero on success or negative error code. On success all
+ * Return: zero on success or negative error code. On success all
* pages which PFN is in [start, end) are allocated for the caller and
* need to be freed with free_contig_range().
*/
@@ -8190,7 +8269,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
*/
lru_add_drain_all();
- drain_all_pages(cc.zone);
order = 0;
outer_start = start;
diff --git a/mm/page_ext.c b/mm/page_ext.c
index ae44f7adbe07..ab4244920e0f 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -273,6 +273,7 @@ static void free_page_ext(void *addr)
table_size = get_entry_size() * PAGES_PER_SECTION;
BUG_ON(PageReserved(page));
+ kmemleak_free(addr);
free_pages_exact(addr, table_size);
}
}
@@ -300,7 +301,7 @@ static int __meminit online_page_ext(unsigned long start_pfn,
start = SECTION_ALIGN_DOWN(start_pfn);
end = SECTION_ALIGN_UP(start_pfn + nr_pages);
- if (nid == -1) {
+ if (nid == NUMA_NO_NODE) {
/*
* In this case, "nid" already exists and contains valid memory.
* "start_pfn" passed to us is a pfn which is an arg for
@@ -398,10 +399,8 @@ void __init page_ext_init(void)
* We know some arch can have a nodes layout such as
* -------------pfn-------------->
* N0 | N1 | N2 | N0 | N1 | N2|....
- *
- * Take into account DEFERRED_STRUCT_PAGE_INIT.
*/
- if (early_pfn_to_nid(pfn) != nid)
+ if (pfn_to_nid(pfn) != nid)
continue;
if (init_section_page_ext(pfn, nid))
goto oom;
diff --git a/mm/page_idle.c b/mm/page_idle.c
index b9e4b42b33ab..0b39ec0c945c 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -31,7 +31,7 @@
static struct page *page_idle_get_page(unsigned long pfn)
{
struct page *page;
- struct zone *zone;
+ pg_data_t *pgdat;
if (!pfn_valid(pfn))
return NULL;
@@ -41,13 +41,13 @@ static struct page *page_idle_get_page(unsigned long pfn)
!get_page_unless_zero(page))
return NULL;
- zone = page_zone(page);
- spin_lock_irq(zone_lru_lock(zone));
+ pgdat = page_pgdat(page);
+ spin_lock_irq(&pgdat->lru_lock);
if (unlikely(!PageLRU(page))) {
put_page(page);
page = NULL;
}
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_unlock_irq(&pgdat->lru_lock);
return page;
}
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 28b06524939f..925b6f44a444 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -625,16 +625,14 @@ static const struct file_operations proc_page_owner_operations = {
static int __init pageowner_init(void)
{
- struct dentry *dentry;
-
if (!static_branch_unlikely(&page_owner_inited)) {
pr_info("page_owner is disabled\n");
return 0;
}
- dentry = debugfs_create_file("page_owner", 0400, NULL,
- NULL, &proc_page_owner_operations);
+ debugfs_create_file("page_owner", 0400, NULL, NULL,
+ &proc_page_owner_operations);
- return PTR_ERR_OR_ZERO(dentry);
+ return 0;
}
late_initcall(pageowner_init)
diff --git a/mm/page_poison.c b/mm/page_poison.c
index f0c15e9017c0..21d4f97cb49b 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -6,6 +6,7 @@
#include <linux/page_ext.h>
#include <linux/poison.h>
#include <linux/ratelimit.h>
+#include <linux/kasan.h>
static bool want_page_poisoning __read_mostly;
@@ -40,7 +41,10 @@ static void poison_page(struct page *page)
{
void *addr = kmap_atomic(page);
+ /* KASAN still think the page is in-use, so skip it. */
+ kasan_disable_current();
memset(addr, PAGE_POISON, PAGE_SIZE);
+ kasan_enable_current();
kunmap_atomic(addr);
}
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 0f643dc2dc65..b68d5df14731 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -67,7 +67,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
pcpu_set_page_chunk(nth_page(pages, i), chunk);
chunk->data = pages;
- chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
+ chunk->base_addr = page_address(pages);
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_chunk_populated(chunk, 0, nr_pages, false);
diff --git a/mm/percpu.c b/mm/percpu.c
index db86282fd024..c5c750781628 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2384,7 +2384,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
ai->atom_size = atom_size;
ai->alloc_size = alloc_size;
- for (group = 0, unit = 0; group_cnt[group]; group++) {
+ for (group = 0, unit = 0; group < nr_groups; group++) {
struct pcpu_group_info *gi = &ai->groups[group];
/*
diff --git a/mm/readahead.c b/mm/readahead.c
index 1ae16522412a..a4593654a26c 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -81,6 +81,8 @@ static void read_cache_pages_invalidate_pages(struct address_space *mapping,
* @data: private data for the callback routine.
*
* Hides the details of the LRU cache etc from the filesystems.
+ *
+ * Returns: %0 on success, error return by @filler otherwise
*/
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
int (*filler)(void *, struct page *), void *data)
diff --git a/mm/rmap.c b/mm/rmap.c
index 21a26cf51114..b30c7c71d1d9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -25,10 +25,9 @@
* page->flags PG_locked (lock_page)
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
* mapping->i_mmap_rwsem
- * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
- * zone_lru_lock (in mark_page_accessed, isolate_lru_page)
+ * pgdat->lru_lock (in mark_page_accessed, isolate_lru_page)
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in __set_page_dirty_buffers)
@@ -1372,16 +1371,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
- mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start,
- min(vma->vm_end, vma->vm_start +
+ mmu_notifier_range_init(&range, vma->vm_mm, address,
+ min(vma->vm_end, address +
(PAGE_SIZE << compound_order(page))));
if (PageHuge(page)) {
/*
* If sharing is possible, start and end will be adjusted
* accordingly.
- *
- * If called for a huge page, caller must hold i_mmap_rwsem
- * in write mode as it is possible to call huge_pmd_unshare.
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
diff --git a/mm/shmem.c b/mm/shmem.c
index 6ece1e2fe76e..b3db3779a30a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -36,6 +36,7 @@
#include <linux/uio.h>
#include <linux/khugepaged.h>
#include <linux/hugetlb.h>
+#include <linux/frontswap.h>
#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
@@ -123,6 +124,10 @@ static unsigned long shmem_default_max_inodes(void)
static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index);
+static int shmem_swapin_page(struct inode *inode, pgoff_t index,
+ struct page **pagep, enum sgp_type sgp,
+ gfp_t gfp, struct vm_area_struct *vma,
+ vm_fault_t *fault_type);
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp,
gfp_t gfp, struct vm_area_struct *vma,
@@ -1089,159 +1094,184 @@ static void shmem_evict_inode(struct inode *inode)
clear_inode(inode);
}
-static unsigned long find_swap_entry(struct xarray *xa, void *item)
+extern struct swap_info_struct *swap_info[];
+
+static int shmem_find_swap_entries(struct address_space *mapping,
+ pgoff_t start, unsigned int nr_entries,
+ struct page **entries, pgoff_t *indices,
+ bool frontswap)
{
- XA_STATE(xas, xa, 0);
- unsigned int checked = 0;
- void *entry;
+ XA_STATE(xas, &mapping->i_pages, start);
+ struct page *page;
+ unsigned int ret = 0;
+
+ if (!nr_entries)
+ return 0;
rcu_read_lock();
- xas_for_each(&xas, entry, ULONG_MAX) {
- if (xas_retry(&xas, entry))
+ xas_for_each(&xas, page, ULONG_MAX) {
+ if (xas_retry(&xas, page))
continue;
- if (entry == item)
- break;
- checked++;
- if ((checked % XA_CHECK_SCHED) != 0)
+
+ if (!xa_is_value(page))
continue;
- xas_pause(&xas);
- cond_resched_rcu();
+
+ if (frontswap) {
+ swp_entry_t entry = radix_to_swp_entry(page);
+
+ if (!frontswap_test(swap_info[swp_type(entry)],
+ swp_offset(entry)))
+ continue;
+ }
+
+ indices[ret] = xas.xa_index;
+ entries[ret] = page;
+
+ if (need_resched()) {
+ xas_pause(&xas);
+ cond_resched_rcu();
+ }
+ if (++ret == nr_entries)
+ break;
}
rcu_read_unlock();
- return entry ? xas.xa_index : -1;
+ return ret;
}
/*
- * If swap found in inode, free it and move page from swapcache to filecache.
+ * Move the swapped pages for an inode to page cache. Returns the count
+ * of pages swapped in, or the error in case of failure.
*/
-static int shmem_unuse_inode(struct shmem_inode_info *info,
- swp_entry_t swap, struct page **pagep)
+static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
+ pgoff_t *indices)
{
- struct address_space *mapping = info->vfs_inode.i_mapping;
- void *radswap;
- pgoff_t index;
- gfp_t gfp;
+ int i = 0;
+ int ret = 0;
int error = 0;
+ struct address_space *mapping = inode->i_mapping;
- radswap = swp_to_radix_entry(swap);
- index = find_swap_entry(&mapping->i_pages, radswap);
- if (index == -1)
- return -EAGAIN; /* tell shmem_unuse we found nothing */
-
- /*
- * Move _head_ to start search for next from here.
- * But be careful: shmem_evict_inode checks list_empty without taking
- * mutex, and there's an instant in list_move_tail when info->swaplist
- * would appear empty, if it were the only one on shmem_swaplist.
- */
- if (shmem_swaplist.next != &info->swaplist)
- list_move_tail(&shmem_swaplist, &info->swaplist);
+ for (i = 0; i < pvec.nr; i++) {
+ struct page *page = pvec.pages[i];
- gfp = mapping_gfp_mask(mapping);
- if (shmem_should_replace_page(*pagep, gfp)) {
- mutex_unlock(&shmem_swaplist_mutex);
- error = shmem_replace_page(pagep, gfp, info, index);
- mutex_lock(&shmem_swaplist_mutex);
- /*
- * We needed to drop mutex to make that restrictive page
- * allocation, but the inode might have been freed while we
- * dropped it: although a racing shmem_evict_inode() cannot
- * complete without emptying the page cache, our page lock
- * on this swapcache page is not enough to prevent that -
- * free_swap_and_cache() of our swap entry will only
- * trylock_page(), removing swap from page cache whatever.
- *
- * We must not proceed to shmem_add_to_page_cache() if the
- * inode has been freed, but of course we cannot rely on
- * inode or mapping or info to check that. However, we can
- * safely check if our swap entry is still in use (and here
- * it can't have got reused for another page): if it's still
- * in use, then the inode cannot have been freed yet, and we
- * can safely proceed (if it's no longer in use, that tells
- * nothing about the inode, but we don't need to unuse swap).
- */
- if (!page_swapcount(*pagep))
- error = -ENOENT;
+ if (!xa_is_value(page))
+ continue;
+ error = shmem_swapin_page(inode, indices[i],
+ &page, SGP_CACHE,
+ mapping_gfp_mask(mapping),
+ NULL, NULL);
+ if (error == 0) {
+ unlock_page(page);
+ put_page(page);
+ ret++;
+ }
+ if (error == -ENOMEM)
+ break;
+ error = 0;
}
+ return error ? error : ret;
+}
- /*
- * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
- * but also to hold up shmem_evict_inode(): so inode cannot be freed
- * beneath us (pagelock doesn't help until the page is in pagecache).
- */
- if (!error)
- error = shmem_add_to_page_cache(*pagep, mapping, index,
- radswap, gfp);
- if (error != -ENOMEM) {
- /*
- * Truncation and eviction use free_swap_and_cache(), which
- * only does trylock page: if we raced, best clean up here.
- */
- delete_from_swap_cache(*pagep);
- set_page_dirty(*pagep);
- if (!error) {
- spin_lock_irq(&info->lock);
- info->swapped--;
- spin_unlock_irq(&info->lock);
- swap_free(swap);
+/*
+ * If swap found in inode, free it and move page from swapcache to filecache.
+ */
+static int shmem_unuse_inode(struct inode *inode, unsigned int type,
+ bool frontswap, unsigned long *fs_pages_to_unuse)
+{
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t start = 0;
+ struct pagevec pvec;
+ pgoff_t indices[PAGEVEC_SIZE];
+ bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
+ int ret = 0;
+
+ pagevec_init(&pvec);
+ do {
+ unsigned int nr_entries = PAGEVEC_SIZE;
+
+ if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
+ nr_entries = *fs_pages_to_unuse;
+
+ pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
+ pvec.pages, indices,
+ frontswap);
+ if (pvec.nr == 0) {
+ ret = 0;
+ break;
}
- }
- return error;
+
+ ret = shmem_unuse_swap_entries(inode, pvec, indices);
+ if (ret < 0)
+ break;
+
+ if (frontswap_partial) {
+ *fs_pages_to_unuse -= ret;
+ if (*fs_pages_to_unuse == 0) {
+ ret = FRONTSWAP_PAGES_UNUSED;
+ break;
+ }
+ }
+
+ start = indices[pvec.nr - 1];
+ } while (true);
+
+ return ret;
}
/*
- * Search through swapped inodes to find and replace swap by page.
+ * Read all the shared memory data that resides in the swap
+ * device 'type' back into memory, so the swap device can be
+ * unused.
*/
-int shmem_unuse(swp_entry_t swap, struct page *page)
+int shmem_unuse(unsigned int type, bool frontswap,
+ unsigned long *fs_pages_to_unuse)
{
- struct list_head *this, *next;
- struct shmem_inode_info *info;
- struct mem_cgroup *memcg;
+ struct shmem_inode_info *info, *next;
+ struct inode *inode;
+ struct inode *prev_inode = NULL;
int error = 0;
- /*
- * There's a faint possibility that swap page was replaced before
- * caller locked it: caller will come back later with the right page.
- */
- if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
- goto out;
+ if (list_empty(&shmem_swaplist))
+ return 0;
+
+ mutex_lock(&shmem_swaplist_mutex);
/*
- * Charge page using GFP_KERNEL while we can wait, before taking
- * the shmem_swaplist_mutex which might hold up shmem_writepage().
- * Charged back to the user (not to caller) when swap account is used.
+ * The extra refcount on the inode is necessary to safely dereference
+ * p->next after re-acquiring the lock. New shmem inodes with swap
+ * get added to the end of the list and we will scan them all.
*/
- error = mem_cgroup_try_charge_delay(page, current->mm, GFP_KERNEL,
- &memcg, false);
- if (error)
- goto out;
- /* No memory allocation: swap entry occupies the slot for the page */
- error = -EAGAIN;
-
- mutex_lock(&shmem_swaplist_mutex);
- list_for_each_safe(this, next, &shmem_swaplist) {
- info = list_entry(this, struct shmem_inode_info, swaplist);
- if (info->swapped)
- error = shmem_unuse_inode(info, swap, &page);
- else
+ list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
+ if (!info->swapped) {
list_del_init(&info->swaplist);
+ continue;
+ }
+
+ inode = igrab(&info->vfs_inode);
+ if (!inode)
+ continue;
+
+ mutex_unlock(&shmem_swaplist_mutex);
+ if (prev_inode)
+ iput(prev_inode);
+ prev_inode = inode;
+
+ error = shmem_unuse_inode(inode, type, frontswap,
+ fs_pages_to_unuse);
cond_resched();
- if (error != -EAGAIN)
+
+ mutex_lock(&shmem_swaplist_mutex);
+ next = list_next_entry(info, swaplist);
+ if (!info->swapped)
+ list_del_init(&info->swaplist);
+ if (error)
break;
- /* found nothing in this: move on to search the next */
}
mutex_unlock(&shmem_swaplist_mutex);
- if (error) {
- if (error != -ENOMEM)
- error = 0;
- mem_cgroup_cancel_charge(page, memcg, false);
- } else
- mem_cgroup_commit_charge(page, memcg, true, false);
-out:
- unlock_page(page);
- put_page(page);
+ if (prev_inode)
+ iput(prev_inode);
+
return error;
}
@@ -1325,7 +1355,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
*/
mutex_lock(&shmem_swaplist_mutex);
if (list_empty(&info->swaplist))
- list_add_tail(&info->swaplist, &shmem_swaplist);
+ list_add(&info->swaplist, &shmem_swaplist);
if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
spin_lock_irq(&info->lock);
@@ -1576,6 +1606,116 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
}
/*
+ * Swap in the page pointed to by *pagep.
+ * Caller has to make sure that *pagep contains a valid swapped page.
+ * Returns 0 and the page in pagep if success. On failure, returns the
+ * the error code and NULL in *pagep.
+ */
+static int shmem_swapin_page(struct inode *inode, pgoff_t index,
+ struct page **pagep, enum sgp_type sgp,
+ gfp_t gfp, struct vm_area_struct *vma,
+ vm_fault_t *fault_type)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
+ struct mem_cgroup *memcg;
+ struct page *page;
+ swp_entry_t swap;
+ int error;
+
+ VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
+ swap = radix_to_swp_entry(*pagep);
+ *pagep = NULL;
+
+ /* Look it up and read it in.. */
+ page = lookup_swap_cache(swap, NULL, 0);
+ if (!page) {
+ /* Or update major stats only when swapin succeeds?? */
+ if (fault_type) {
+ *fault_type |= VM_FAULT_MAJOR;
+ count_vm_event(PGMAJFAULT);
+ count_memcg_event_mm(charge_mm, PGMAJFAULT);
+ }
+ /* Here we actually start the io */
+ page = shmem_swapin(swap, gfp, info, index);
+ if (!page) {
+ error = -ENOMEM;
+ goto failed;
+ }
+ }
+
+ /* We have to do this with page locked to prevent races */
+ lock_page(page);
+ if (!PageSwapCache(page) || page_private(page) != swap.val ||
+ !shmem_confirm_swap(mapping, index, swap)) {
+ error = -EEXIST;
+ goto unlock;
+ }
+ if (!PageUptodate(page)) {
+ error = -EIO;
+ goto failed;
+ }
+ wait_on_page_writeback(page);
+
+ if (shmem_should_replace_page(page, gfp)) {
+ error = shmem_replace_page(&page, gfp, info, index);
+ if (error)
+ goto failed;
+ }
+
+ error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
+ false);
+ if (!error) {
+ error = shmem_add_to_page_cache(page, mapping, index,
+ swp_to_radix_entry(swap), gfp);
+ /*
+ * We already confirmed swap under page lock, and make
+ * no memory allocation here, so usually no possibility
+ * of error; but free_swap_and_cache() only trylocks a
+ * page, so it is just possible that the entry has been
+ * truncated or holepunched since swap was confirmed.
+ * shmem_undo_range() will have done some of the
+ * unaccounting, now delete_from_swap_cache() will do
+ * the rest.
+ */
+ if (error) {
+ mem_cgroup_cancel_charge(page, memcg, false);
+ delete_from_swap_cache(page);
+ }
+ }
+ if (error)
+ goto failed;
+
+ mem_cgroup_commit_charge(page, memcg, true, false);
+
+ spin_lock_irq(&info->lock);
+ info->swapped--;
+ shmem_recalc_inode(inode);
+ spin_unlock_irq(&info->lock);
+
+ if (sgp == SGP_WRITE)
+ mark_page_accessed(page);
+
+ delete_from_swap_cache(page);
+ set_page_dirty(page);
+ swap_free(swap);
+
+ *pagep = page;
+ return 0;
+failed:
+ if (!shmem_confirm_swap(mapping, index, swap))
+ error = -EEXIST;
+unlock:
+ if (page) {
+ unlock_page(page);
+ put_page(page);
+ }
+
+ return error;
+}
+
+/*
* shmem_getpage_gfp - find page in cache, or get from swap, or allocate
*
* If we allocate a new one we do not mark it dirty. That's up to the
@@ -1596,7 +1736,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct mm_struct *charge_mm;
struct mem_cgroup *memcg;
struct page *page;
- swp_entry_t swap;
enum sgp_type sgp_huge = sgp;
pgoff_t hindex = index;
int error;
@@ -1608,17 +1747,23 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
sgp = SGP_CACHE;
repeat:
- swap.val = 0;
+ if (sgp <= SGP_CACHE &&
+ ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
+ return -EINVAL;
+ }
+
+ sbinfo = SHMEM_SB(inode->i_sb);
+ charge_mm = vma ? vma->vm_mm : current->mm;
+
page = find_lock_entry(mapping, index);
if (xa_is_value(page)) {
- swap = radix_to_swp_entry(page);
- page = NULL;
- }
+ error = shmem_swapin_page(inode, index, &page,
+ sgp, gfp, vma, fault_type);
+ if (error == -EEXIST)
+ goto repeat;
- if (sgp <= SGP_CACHE &&
- ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
- error = -EINVAL;
- goto unlock;
+ *pagep = page;
+ return error;
}
if (page && sgp == SGP_WRITE)
@@ -1632,7 +1777,7 @@ repeat:
put_page(page);
page = NULL;
}
- if (page || (sgp == SGP_READ && !swap.val)) {
+ if (page || sgp == SGP_READ) {
*pagep = page;
return 0;
}
@@ -1641,215 +1786,138 @@ repeat:
* Fast cache lookup did not find it:
* bring it back from swap or allocate.
*/
- sbinfo = SHMEM_SB(inode->i_sb);
- charge_mm = vma ? vma->vm_mm : current->mm;
-
- if (swap.val) {
- /* Look it up and read it in.. */
- page = lookup_swap_cache(swap, NULL, 0);
- if (!page) {
- /* Or update major stats only when swapin succeeds?? */
- if (fault_type) {
- *fault_type |= VM_FAULT_MAJOR;
- count_vm_event(PGMAJFAULT);
- count_memcg_event_mm(charge_mm, PGMAJFAULT);
- }
- /* Here we actually start the io */
- page = shmem_swapin(swap, gfp, info, index);
- if (!page) {
- error = -ENOMEM;
- goto failed;
- }
- }
-
- /* We have to do this with page locked to prevent races */
- lock_page(page);
- if (!PageSwapCache(page) || page_private(page) != swap.val ||
- !shmem_confirm_swap(mapping, index, swap)) {
- error = -EEXIST; /* try again */
- goto unlock;
- }
- if (!PageUptodate(page)) {
- error = -EIO;
- goto failed;
- }
- wait_on_page_writeback(page);
-
- if (shmem_should_replace_page(page, gfp)) {
- error = shmem_replace_page(&page, gfp, info, index);
- if (error)
- goto failed;
- }
- error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
- false);
- if (!error) {
- error = shmem_add_to_page_cache(page, mapping, index,
- swp_to_radix_entry(swap), gfp);
- /*
- * We already confirmed swap under page lock, and make
- * no memory allocation here, so usually no possibility
- * of error; but free_swap_and_cache() only trylocks a
- * page, so it is just possible that the entry has been
- * truncated or holepunched since swap was confirmed.
- * shmem_undo_range() will have done some of the
- * unaccounting, now delete_from_swap_cache() will do
- * the rest.
- * Reset swap.val? No, leave it so "failed" goes back to
- * "repeat": reading a hole and writing should succeed.
- */
- if (error) {
- mem_cgroup_cancel_charge(page, memcg, false);
- delete_from_swap_cache(page);
- }
- }
- if (error)
- goto failed;
-
- mem_cgroup_commit_charge(page, memcg, true, false);
-
- spin_lock_irq(&info->lock);
- info->swapped--;
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
-
- if (sgp == SGP_WRITE)
- mark_page_accessed(page);
-
- delete_from_swap_cache(page);
- set_page_dirty(page);
- swap_free(swap);
-
- } else {
- if (vma && userfaultfd_missing(vma)) {
- *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
- return 0;
- }
+ if (vma && userfaultfd_missing(vma)) {
+ *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
+ return 0;
+ }
- /* shmem_symlink() */
- if (mapping->a_ops != &shmem_aops)
- goto alloc_nohuge;
- if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
- goto alloc_nohuge;
- if (shmem_huge == SHMEM_HUGE_FORCE)
+ /* shmem_symlink() */
+ if (mapping->a_ops != &shmem_aops)
+ goto alloc_nohuge;
+ if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
+ goto alloc_nohuge;
+ if (shmem_huge == SHMEM_HUGE_FORCE)
+ goto alloc_huge;
+ switch (sbinfo->huge) {
+ loff_t i_size;
+ pgoff_t off;
+ case SHMEM_HUGE_NEVER:
+ goto alloc_nohuge;
+ case SHMEM_HUGE_WITHIN_SIZE:
+ off = round_up(index, HPAGE_PMD_NR);
+ i_size = round_up(i_size_read(inode), PAGE_SIZE);
+ if (i_size >= HPAGE_PMD_SIZE &&
+ i_size >> PAGE_SHIFT >= off)
goto alloc_huge;
- switch (sbinfo->huge) {
- loff_t i_size;
- pgoff_t off;
- case SHMEM_HUGE_NEVER:
- goto alloc_nohuge;
- case SHMEM_HUGE_WITHIN_SIZE:
- off = round_up(index, HPAGE_PMD_NR);
- i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >= HPAGE_PMD_SIZE &&
- i_size >> PAGE_SHIFT >= off)
- goto alloc_huge;
- /* fallthrough */
- case SHMEM_HUGE_ADVISE:
- if (sgp_huge == SGP_HUGE)
- goto alloc_huge;
- /* TODO: implement fadvise() hints */
- goto alloc_nohuge;
- }
+ /* fallthrough */
+ case SHMEM_HUGE_ADVISE:
+ if (sgp_huge == SGP_HUGE)
+ goto alloc_huge;
+ /* TODO: implement fadvise() hints */
+ goto alloc_nohuge;
+ }
alloc_huge:
- page = shmem_alloc_and_acct_page(gfp, inode, index, true);
- if (IS_ERR(page)) {
-alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
- index, false);
- }
- if (IS_ERR(page)) {
- int retry = 5;
- error = PTR_ERR(page);
- page = NULL;
- if (error != -ENOSPC)
- goto failed;
- /*
- * Try to reclaim some spece by splitting a huge page
- * beyond i_size on the filesystem.
- */
- while (retry--) {
- int ret;
- ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
- if (ret == SHRINK_STOP)
- break;
- if (ret)
- goto alloc_nohuge;
- }
- goto failed;
- }
-
- if (PageTransHuge(page))
- hindex = round_down(index, HPAGE_PMD_NR);
- else
- hindex = index;
+ page = shmem_alloc_and_acct_page(gfp, inode, index, true);
+ if (IS_ERR(page)) {
+alloc_nohuge:
+ page = shmem_alloc_and_acct_page(gfp, inode,
+ index, false);
+ }
+ if (IS_ERR(page)) {
+ int retry = 5;
- if (sgp == SGP_WRITE)
- __SetPageReferenced(page);
+ error = PTR_ERR(page);
+ page = NULL;
+ if (error != -ENOSPC)
+ goto unlock;
+ /*
+ * Try to reclaim some space by splitting a huge page
+ * beyond i_size on the filesystem.
+ */
+ while (retry--) {
+ int ret;
- error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
- PageTransHuge(page));
- if (error)
- goto unacct;
- error = shmem_add_to_page_cache(page, mapping, hindex,
- NULL, gfp & GFP_RECLAIM_MASK);
- if (error) {
- mem_cgroup_cancel_charge(page, memcg,
- PageTransHuge(page));
- goto unacct;
+ ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
+ if (ret == SHRINK_STOP)
+ break;
+ if (ret)
+ goto alloc_nohuge;
}
- mem_cgroup_commit_charge(page, memcg, false,
- PageTransHuge(page));
- lru_cache_add_anon(page);
+ goto unlock;
+ }
- spin_lock_irq(&info->lock);
- info->alloced += 1 << compound_order(page);
- inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
- alloced = true;
+ if (PageTransHuge(page))
+ hindex = round_down(index, HPAGE_PMD_NR);
+ else
+ hindex = index;
- if (PageTransHuge(page) &&
- DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
- hindex + HPAGE_PMD_NR - 1) {
- /*
- * Part of the huge page is beyond i_size: subject
- * to shrink under memory pressure.
- */
- spin_lock(&sbinfo->shrinklist_lock);
- /*
- * _careful to defend against unlocked access to
- * ->shrink_list in shmem_unused_huge_shrink()
- */
- if (list_empty_careful(&info->shrinklist)) {
- list_add_tail(&info->shrinklist,
- &sbinfo->shrinklist);
- sbinfo->shrinklist_len++;
- }
- spin_unlock(&sbinfo->shrinklist_lock);
- }
+ if (sgp == SGP_WRITE)
+ __SetPageReferenced(page);
+
+ error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
+ PageTransHuge(page));
+ if (error)
+ goto unacct;
+ error = shmem_add_to_page_cache(page, mapping, hindex,
+ NULL, gfp & GFP_RECLAIM_MASK);
+ if (error) {
+ mem_cgroup_cancel_charge(page, memcg,
+ PageTransHuge(page));
+ goto unacct;
+ }
+ mem_cgroup_commit_charge(page, memcg, false,
+ PageTransHuge(page));
+ lru_cache_add_anon(page);
+
+ spin_lock_irq(&info->lock);
+ info->alloced += 1 << compound_order(page);
+ inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
+ shmem_recalc_inode(inode);
+ spin_unlock_irq(&info->lock);
+ alloced = true;
+ if (PageTransHuge(page) &&
+ DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
+ hindex + HPAGE_PMD_NR - 1) {
/*
- * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
+ * Part of the huge page is beyond i_size: subject
+ * to shrink under memory pressure.
*/
- if (sgp == SGP_FALLOC)
- sgp = SGP_WRITE;
-clear:
+ spin_lock(&sbinfo->shrinklist_lock);
/*
- * Let SGP_WRITE caller clear ends if write does not fill page;
- * but SGP_FALLOC on a page fallocated earlier must initialize
- * it now, lest undo on failure cancel our earlier guarantee.
+ * _careful to defend against unlocked access to
+ * ->shrink_list in shmem_unused_huge_shrink()
*/
- if (sgp != SGP_WRITE && !PageUptodate(page)) {
- struct page *head = compound_head(page);
- int i;
+ if (list_empty_careful(&info->shrinklist)) {
+ list_add_tail(&info->shrinklist,
+ &sbinfo->shrinklist);
+ sbinfo->shrinklist_len++;
+ }
+ spin_unlock(&sbinfo->shrinklist_lock);
+ }
- for (i = 0; i < (1 << compound_order(head)); i++) {
- clear_highpage(head + i);
- flush_dcache_page(head + i);
- }
- SetPageUptodate(head);
+ /*
+ * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
+ */
+ if (sgp == SGP_FALLOC)
+ sgp = SGP_WRITE;
+clear:
+ /*
+ * Let SGP_WRITE caller clear ends if write does not fill page;
+ * but SGP_FALLOC on a page fallocated earlier must initialize
+ * it now, lest undo on failure cancel our earlier guarantee.
+ */
+ if (sgp != SGP_WRITE && !PageUptodate(page)) {
+ struct page *head = compound_head(page);
+ int i;
+
+ for (i = 0; i < (1 << compound_order(head)); i++) {
+ clear_highpage(head + i);
+ flush_dcache_page(head + i);
}
+ SetPageUptodate(head);
}
/* Perhaps the file has been truncated since we checked */
@@ -1879,9 +1947,6 @@ unacct:
put_page(page);
goto alloc_nohuge;
}
-failed:
- if (swap.val && !shmem_confirm_swap(mapping, index, swap))
- error = -EEXIST;
unlock:
if (page) {
unlock_page(page);
@@ -2125,6 +2190,24 @@ out_nomem:
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct shmem_inode_info *info = SHMEM_I(file_inode(file));
+
+ if (info->seals & F_SEAL_FUTURE_WRITE) {
+ /*
+ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+ * "future write" seal active.
+ */
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+ return -EPERM;
+
+ /*
+ * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
+ * read-only mapping, take care to not allow mprotect to revert
+ * protections.
+ */
+ vma->vm_flags &= ~(VM_MAYWRITE);
+ }
+
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
@@ -2375,8 +2458,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index = pos >> PAGE_SHIFT;
/* i_mutex is held by caller */
- if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
- if (info->seals & F_SEAL_WRITE)
+ if (unlikely(info->seals & (F_SEAL_GROW |
+ F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
+ if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
return -EPERM;
if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
return -EPERM;
@@ -2639,7 +2723,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
/* protected by i_mutex */
- if (info->seals & F_SEAL_WRITE) {
+ if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
error = -EPERM;
goto out;
}
@@ -2848,16 +2932,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(old_dentry);
- int ret;
+ int ret = 0;
/*
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
+ * But if an O_TMPFILE file is linked into the tmpfs, the
+ * first link must skip that, to get the accounting right.
*/
- ret = shmem_reserve_inode(inode->i_sb);
- if (ret)
- goto out;
+ if (inode->i_nlink) {
+ ret = shmem_reserve_inode(inode->i_sb);
+ if (ret)
+ goto out;
+ }
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
@@ -3843,7 +3931,8 @@ int __init shmem_init(void)
return 0;
}
-int shmem_unuse(swp_entry_t swap, struct page *page)
+int shmem_unuse(unsigned int type, bool frontswap,
+ unsigned long *fs_pages_to_unuse)
{
return 0;
}
diff --git a/mm/slab.c b/mm/slab.c
index 73fe23e649c9..28652e4218e0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -550,14 +550,6 @@ static void start_cpu_timer(int cpu)
static void init_arraycache(struct array_cache *ac, int limit, int batch)
{
- /*
- * The array_cache structures contain pointers to free object.
- * However, when such objects are allocated or transferred to another
- * cache the pointers are not cleared and they could be counted as
- * valid references during a kmemleak scan. Therefore, kmemleak must
- * not scan such objects.
- */
- kmemleak_no_scan(ac);
if (ac) {
ac->avail = 0;
ac->limit = limit;
@@ -573,6 +565,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
struct array_cache *ac = NULL;
ac = kmalloc_node(memsize, gfp, node);
+ /*
+ * The array_cache structures contain pointers to free object.
+ * However, when such objects are allocated or transferred to another
+ * cache the pointers are not cleared and they could be counted as
+ * valid references during a kmemleak scan. Therefore, kmemleak must
+ * not scan such objects.
+ */
+ kmemleak_no_scan(ac);
init_arraycache(ac, entries, batchcount);
return ac;
}
@@ -666,20 +666,22 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
struct alien_cache *alc = NULL;
alc = kmalloc_node(memsize, gfp, node);
- init_arraycache(&alc->ac, entries, batch);
- spin_lock_init(&alc->lock);
+ if (alc) {
+ kmemleak_no_scan(alc);
+ init_arraycache(&alc->ac, entries, batch);
+ spin_lock_init(&alc->lock);
+ }
return alc;
}
static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
struct alien_cache **alc_ptr;
- size_t memsize = sizeof(void *) * nr_node_ids;
int i;
if (limit > 1)
limit = 12;
- alc_ptr = kzalloc_node(memsize, gfp, node);
+ alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
if (!alc_ptr)
return NULL;
@@ -1725,6 +1727,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
* This could be made much more intelligent. For now, try to avoid using
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
+ *
+ * Return: number of left-over bytes in a slab
*/
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, slab_flags_t flags)
@@ -1973,6 +1977,8 @@ static bool set_on_slab_cache(struct kmem_cache *cachep,
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
+ *
+ * Return: a pointer to the created cache or %NULL in case of error
*/
int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
{
@@ -2357,7 +2363,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
void *freelist;
void *addr = page_address(page);
- page->s_mem = kasan_reset_tag(addr) + colour_off;
+ page->s_mem = addr + colour_off;
page->active = 0;
if (OBJFREELIST_SLAB(cachep))
@@ -2366,6 +2372,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
+ freelist = kasan_reset_tag(freelist);
if (!freelist)
return NULL;
} else {
@@ -2679,6 +2686,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
offset *= cachep->colour_off;
+ /*
+ * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
+ * page_address() in the latter returns a non-tagged pointer,
+ * as it should be for slab pages.
+ */
+ kasan_poison_slab(page);
+
/* Get slab management. */
freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, page_node);
@@ -2687,7 +2701,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
slab_map_pages(cachep, page, freelist);
- kasan_poison_slab(page);
cache_init_objs(cachep, page);
if (gfpflags_allow_blocking(local_flags))
@@ -3533,12 +3546,13 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
*
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
+ *
+ * Return: pointer to the new object or %NULL in case of error
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);
- ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@@ -3623,12 +3637,13 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
* node, which can improve the performance for cpu bound structures.
*
* Fallback to other node is possible if __GFP_THISNODE is not set.
+ *
+ * Return: pointer to the new object or %NULL in case of error
*/
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
- ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
@@ -3692,6 +3707,8 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
* @caller: function caller for debug tracking of the caller
+ *
+ * Return: pointer to the allocated memory or %NULL in case of error
*/
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
unsigned long caller)
@@ -4157,6 +4174,8 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
* @buffer: user buffer
* @count: data length
* @ppos: unused
+ *
+ * Return: %0 on success, negative error code otherwise.
*/
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
@@ -4406,6 +4425,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
unsigned int objnr;
unsigned long offset;
+ ptr = kasan_reset_tag(ptr);
+
/* Find and validate object. */
cachep = page->slab_cache;
objnr = obj_to_index(cachep, page, (void *)ptr);
@@ -4448,6 +4469,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
+ *
+ * Return: size of the actual memory used by @objp in bytes
*/
size_t ksize(const void *objp)
{
diff --git a/mm/slab.h b/mm/slab.h
index 4190c24ef0e9..e5e6658eeacc 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -276,8 +276,6 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
- if (!memcg_kmem_enabled())
- return 0;
if (is_root_cache(s))
return 0;
return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
@@ -286,8 +284,6 @@ static __always_inline int memcg_charge_slab(struct page *page,
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
- if (!memcg_kmem_enabled())
- return;
memcg_kmem_uncharge(page, order);
}
@@ -437,11 +433,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) {
- void *object = p[i];
-
- kmemleak_alloc_recursive(object, s->object_size, 1,
+ p[i] = kasan_slab_alloc(s, p[i], flags);
+ /* As p[i] might get tagged, call kmemleak hook after KASAN. */
+ kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
- p[i] = kasan_slab_alloc(s, object, flags);
}
if (memcg_kmem_enabled())
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 81732d05e74a..03eeb8b7b4b1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -939,6 +939,8 @@ EXPORT_SYMBOL(kmem_cache_destroy);
*
* Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released.
+ *
+ * Return: %0 if all slabs were released, non-zero otherwise
*/
int kmem_cache_shrink(struct kmem_cache *cachep)
{
@@ -1228,8 +1230,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
flags |= __GFP_COMP;
page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL;
- kmemleak_alloc(ret, size, 1, flags);
ret = kasan_kmalloc_large(ret, size, flags);
+ /* As ret might get tagged, call kmemleak hook after KASAN. */
+ kmemleak_alloc(ret, size, 1, flags);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
@@ -1424,7 +1427,7 @@ void dump_unreclaimable_slab(void)
#if defined(CONFIG_MEMCG)
void *memcg_slab_start(struct seq_file *m, loff_t *pos)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
mutex_lock(&slab_mutex);
return seq_list_start(&memcg->kmem_caches, *pos);
@@ -1432,7 +1435,7 @@ void *memcg_slab_start(struct seq_file *m, loff_t *pos)
void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
return seq_list_next(p, &memcg->kmem_caches, pos);
}
@@ -1446,7 +1449,7 @@ int memcg_slab_show(struct seq_file *m, void *p)
{
struct kmem_cache *s = list_entry(p, struct kmem_cache,
memcg_params.kmem_caches_node);
- struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
if (p == memcg->kmem_caches.next)
print_slabinfo_header(m);
@@ -1527,6 +1530,8 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
* This function is like krealloc() except it never frees the originally
* allocated buffer. Use this if you don't want to free the buffer immediately
* like, for example, with RCU.
+ *
+ * Return: pointer to the allocated memory or %NULL in case of error
*/
void *__krealloc(const void *p, size_t new_size, gfp_t flags)
{
@@ -1548,6 +1553,8 @@ EXPORT_SYMBOL(__krealloc);
* lesser of the new and old sizes. If @p is %NULL, krealloc()
* behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
* %NULL pointer, the object pointed to is freed.
+ *
+ * Return: pointer to the allocated memory or %NULL in case of error
*/
void *krealloc(const void *p, size_t new_size, gfp_t flags)
{
diff --git a/mm/slub.c b/mm/slub.c
index 36c0befeebd8..1b08fbcb7e61 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
- return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
+ /*
+ * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
+ * Normally, this doesn't cause any issues, as both set_freepointer()
+ * and get_freepointer() are called with a pointer with the same tag.
+ * However, there are some issues with CONFIG_SLUB_DEBUG code. For
+ * example, when __free_slub() iterates over objects in a cache, it
+ * passes untagged pointers to check_object(). check_object() in turns
+ * calls get_freepointer() with an untagged pointer, which causes the
+ * freepointer to be restored incorrectly.
+ */
+ return (void *)((unsigned long)ptr ^ s->random ^
+ (unsigned long)kasan_reset_tag((void *)ptr_addr));
#else
return ptr;
#endif
@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
__p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size)
-#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
- for (__p = fixup_red_left(__s, __addr), __idx = 1; \
- __idx <= __objects; \
- __p += (__s)->size, __idx++)
-
/* Determine object index from a given position */
static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
{
- return (p - addr) / s->size;
+ return (kasan_reset_tag(p) - addr) / s->size;
}
static inline unsigned int order_objects(unsigned int order, unsigned int size)
@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
base = page_address(page);
+ object = kasan_reset_tag(object);
object = restore_red_left(s, object);
if (object < base || object >= base + page->objects * s->size ||
(object - base) % s->size) {
@@ -1075,9 +1082,18 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object);
}
+static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
+{
+ if (!(s->flags & SLAB_POISON))
+ return;
+
+ metadata_access_enable();
+ memset(addr, POISON_INUSE, PAGE_SIZE << order);
+ metadata_access_disable();
+}
+
static inline int alloc_consistency_checks(struct kmem_cache *s,
- struct page *page,
- void *object, unsigned long addr)
+ struct page *page, void *object)
{
if (!check_slab(s, page))
return 0;
@@ -1098,7 +1114,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s,
void *object, unsigned long addr)
{
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- if (!alloc_consistency_checks(s, page, object, addr))
+ if (!alloc_consistency_checks(s, page, object))
goto bad;
}
@@ -1330,6 +1346,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
+static inline void setup_page_debug(struct kmem_cache *s,
+ void *addr, int order) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1374,8 +1392,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
*/
static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
+ ptr = kasan_kmalloc_large(ptr, size, flags);
+ /* As ptr might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ptr, size, 1, flags);
- return kasan_kmalloc_large(ptr, size, flags);
+ return ptr;
}
static __always_inline void kfree_hook(void *x)
@@ -1641,27 +1661,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
- start = page_address(page);
+ kasan_poison_slab(page);
- if (unlikely(s->flags & SLAB_POISON))
- memset(start, POISON_INUSE, PAGE_SIZE << order);
+ start = page_address(page);
- kasan_poison_slab(page);
+ setup_page_debug(s, start, order);
shuffle = shuffle_freelist(s, page);
if (!shuffle) {
- for_each_object_idx(p, idx, s, start, page->objects) {
- if (likely(idx < page->objects)) {
- next = p + s->size;
- next = setup_object(s, page, next);
- set_freepointer(s, p, next);
- } else
- set_freepointer(s, p, NULL);
- }
start = fixup_red_left(s, start);
start = setup_object(s, page, start);
page->freelist = start;
+ for (idx = 0, p = start; idx < page->objects - 1; idx++) {
+ next = p + s->size;
+ next = setup_object(s, page, next);
+ set_freepointer(s, p, next);
+ p = next;
+ }
+ set_freepointer(s, p, NULL);
}
page->inuse = page->objects;
@@ -2111,7 +2129,7 @@ redo:
if (!lock) {
lock = 1;
/*
- * Taking the spinlock removes the possiblity
+ * Taking the spinlock removes the possibility
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -2235,8 +2253,8 @@ static void unfreeze_partials(struct kmem_cache *s,
}
/*
- * Put a page that was just frozen (in __slab_free) into a partial page
- * slot if available.
+ * Put a page that was just frozen (in __slab_free|get_partial_node) into a
+ * partial page slot if available.
*
* If we did not find a slot then simply move all the partials to the
* per node partial list.
@@ -2463,8 +2481,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
stat(s, ALLOC_SLAB);
c->page = page;
*pc = c;
- } else
- freelist = NULL;
+ }
return freelist;
}
@@ -3846,6 +3863,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
unsigned int offset;
size_t object_size;
+ ptr = kasan_reset_tag(ptr);
+
/* Find object and usable object size. */
s = page->slab_cache;
@@ -4243,7 +4262,7 @@ void __init kmem_cache_init(void)
cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
slub_cpu_dead);
- pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
+ pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
diff --git a/mm/sparse.c b/mm/sparse.c
index 7ea5dc6c6b19..77a0554fa5bd 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -197,7 +197,7 @@ static inline int next_present_section_nr(int section_nr)
}
#define for_each_present_section_nr(start, section_nr) \
for (section_nr = next_present_section_nr(start-1); \
- ((section_nr >= 0) && \
+ ((section_nr != -1) && \
(section_nr <= __highest_present_section_nr)); \
section_nr = next_present_section_nr(section_nr))
diff --git a/mm/swap.c b/mm/swap.c
index 4929bc1be60e..301ed4e04320 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -58,16 +58,16 @@ static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
struct lruvec *lruvec;
unsigned long flags;
- spin_lock_irqsave(zone_lru_lock(zone), flags);
- lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+ spin_lock_irqsave(&pgdat->lru_lock, flags);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
- spin_unlock_irqrestore(zone_lru_lock(zone), flags);
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
}
__ClearPageWaiters(page);
mem_cgroup_uncharge(page);
@@ -320,19 +320,14 @@ static inline void activate_page_drain(int cpu)
{
}
-static bool need_activate_page_drain(int cpu)
-{
- return false;
-}
-
void activate_page(struct page *page)
{
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
page = compound_head(page);
- spin_lock_irq(zone_lru_lock(zone));
- __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL);
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_lock_irq(&pgdat->lru_lock);
+ __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
+ spin_unlock_irq(&pgdat->lru_lock);
}
#endif
@@ -653,13 +648,15 @@ void lru_add_drain(void)
put_cpu();
}
+#ifdef CONFIG_SMP
+
+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
}
-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-
/*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -702,6 +699,12 @@ void lru_add_drain_all(void)
mutex_unlock(&lock);
}
+#else
+void lru_add_drain_all(void)
+{
+ lru_add_drain();
+}
+#endif
/**
* release_pages - batched put_page()
diff --git a/mm/swap_state.c b/mm/swap_state.c
index fd2f21e1c60a..85245fdec8d9 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -523,7 +523,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* This has been extended to use the NUMA policies from the mm triggering
* the readahead.
*
- * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
+ * Caller must hold read mmap_sem if vmf->vma is not NULL.
*/
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
@@ -543,6 +543,13 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!mask)
goto skip;
+ /* Test swap type to make sure the dereference is safe */
+ if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
+ struct inode *inode = si->swap_file->f_mapping->host;
+ if (inode_read_congested(inode))
+ goto skip;
+ }
+
do_poll = false;
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
@@ -691,6 +698,20 @@ static void swap_ra_info(struct vm_fault *vmf,
pte_unmap(orig_pte);
}
+/**
+ * swap_vma_readahead - swap in pages in hope we need them soon
+ * @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
+ * @vmf: fault information
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * Primitive swap readahead code. We simply read in a few pages whoes
+ * virtual addresses are around the fault address in the same vma.
+ *
+ * Caller must hold read mmap_sem if vmf->vma is not NULL.
+ *
+ */
static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
diff --git a/mm/swapfile.c b/mm/swapfile.c
index dbac1d49469d..2b8d9c3fbb47 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -98,6 +98,15 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0);
atomic_t nr_rotate_swap = ATOMIC_INIT(0);
+static struct swap_info_struct *swap_type_to_swap_info(int type)
+{
+ if (type >= READ_ONCE(nr_swapfiles))
+ return NULL;
+
+ smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */
+ return READ_ONCE(swap_info[type]);
+}
+
static inline unsigned char swap_count(unsigned char ent)
{
return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
@@ -1044,12 +1053,14 @@ noswap:
/* The only caller of this function is now suspend routine */
swp_entry_t get_swap_page_of_type(int type)
{
- struct swap_info_struct *si;
+ struct swap_info_struct *si = swap_type_to_swap_info(type);
pgoff_t offset;
- si = swap_info[type];
+ if (!si)
+ goto fail;
+
spin_lock(&si->lock);
- if (si && (si->flags & SWP_WRITEOK)) {
+ if (si->flags & SWP_WRITEOK) {
atomic_long_dec(&nr_swap_pages);
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
@@ -1060,6 +1071,7 @@ swp_entry_t get_swap_page_of_type(int type)
atomic_long_inc(&nr_swap_pages);
}
spin_unlock(&si->lock);
+fail:
return (swp_entry_t) {0};
}
@@ -1071,9 +1083,9 @@ static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
if (!entry.val)
goto out;
type = swp_type(entry);
- if (type >= nr_swapfiles)
+ p = swap_type_to_swap_info(type);
+ if (!p)
goto bad_nofile;
- p = swap_info[type];
if (!(p->flags & SWP_USED))
goto bad_device;
offset = swp_offset(entry);
@@ -1697,10 +1709,9 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
sector_t swapdev_block(int type, pgoff_t offset)
{
struct block_device *bdev;
+ struct swap_info_struct *si = swap_type_to_swap_info(type);
- if ((unsigned int)type >= nr_swapfiles)
- return 0;
- if (!(swap_info[type]->flags & SWP_WRITEOK))
+ if (!si || !(si->flags & SWP_WRITEOK))
return 0;
return map_swap_entry(swp_entry(type, offset), &bdev);
}
@@ -1799,44 +1810,77 @@ out_nolock:
}
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- swp_entry_t entry, struct page *page)
+ unsigned long addr, unsigned long end,
+ unsigned int type, bool frontswap,
+ unsigned long *fs_pages_to_unuse)
{
- pte_t swp_pte = swp_entry_to_pte(entry);
+ struct page *page;
+ swp_entry_t entry;
pte_t *pte;
+ struct swap_info_struct *si;
+ unsigned long offset;
int ret = 0;
+ volatile unsigned char *swap_map;
- /*
- * We don't actually need pte lock while scanning for swp_pte: since
- * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
- * page table while we're scanning; though it could get zapped, and on
- * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
- * of unmatched parts which look like swp_pte, so unuse_pte must
- * recheck under pte lock. Scanning without pte lock lets it be
- * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
- */
+ si = swap_info[type];
pte = pte_offset_map(pmd, addr);
do {
- /*
- * swapoff spends a _lot_ of time in this loop!
- * Test inline before going to call unuse_pte.
- */
- if (unlikely(pte_same_as_swp(*pte, swp_pte))) {
- pte_unmap(pte);
- ret = unuse_pte(vma, pmd, addr, entry, page);
- if (ret)
- goto out;
- pte = pte_offset_map(pmd, addr);
+ struct vm_fault vmf;
+
+ if (!is_swap_pte(*pte))
+ continue;
+
+ entry = pte_to_swp_entry(*pte);
+ if (swp_type(entry) != type)
+ continue;
+
+ offset = swp_offset(entry);
+ if (frontswap && !frontswap_test(si, offset))
+ continue;
+
+ pte_unmap(pte);
+ swap_map = &si->swap_map[offset];
+ vmf.vma = vma;
+ vmf.address = addr;
+ vmf.pmd = pmd;
+ page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf);
+ if (!page) {
+ if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
+ goto try_next;
+ return -ENOMEM;
+ }
+
+ lock_page(page);
+ wait_on_page_writeback(page);
+ ret = unuse_pte(vma, pmd, addr, entry, page);
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
+ goto out;
}
+
+ try_to_free_swap(page);
+ unlock_page(page);
+ put_page(page);
+
+ if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) {
+ ret = FRONTSWAP_PAGES_UNUSED;
+ goto out;
+ }
+try_next:
+ pte = pte_offset_map(pmd, addr);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
+
+ ret = 0;
out:
return ret;
}
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
- swp_entry_t entry, struct page *page)
+ unsigned int type, bool frontswap,
+ unsigned long *fs_pages_to_unuse)
{
pmd_t *pmd;
unsigned long next;
@@ -1848,7 +1892,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
next = pmd_addr_end(addr, end);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
- ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
+ ret = unuse_pte_range(vma, pmd, addr, next, type,
+ frontswap, fs_pages_to_unuse);
if (ret)
return ret;
} while (pmd++, addr = next, addr != end);
@@ -1857,7 +1902,8 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
unsigned long addr, unsigned long end,
- swp_entry_t entry, struct page *page)
+ unsigned int type, bool frontswap,
+ unsigned long *fs_pages_to_unuse)
{
pud_t *pud;
unsigned long next;
@@ -1868,7 +1914,8 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
+ ret = unuse_pmd_range(vma, pud, addr, next, type,
+ frontswap, fs_pages_to_unuse);
if (ret)
return ret;
} while (pud++, addr = next, addr != end);
@@ -1877,7 +1924,8 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
- swp_entry_t entry, struct page *page)
+ unsigned int type, bool frontswap,
+ unsigned long *fs_pages_to_unuse)
{
p4d_t *p4d;
unsigned long next;
@@ -1888,78 +1936,66 @@ static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d))
continue;
- ret = unuse_pud_range(vma, p4d, addr, next, entry, page);
+ ret = unuse_pud_range(vma, p4d, addr, next, type,
+ frontswap, fs_pages_to_unuse);
if (ret)
return ret;
} while (p4d++, addr = next, addr != end);
return 0;
}
-static int unuse_vma(struct vm_area_struct *vma,
- swp_entry_t entry, struct page *page)
+static int unuse_vma(struct vm_area_struct *vma, unsigned int type,
+ bool frontswap, unsigned long *fs_pages_to_unuse)
{
pgd_t *pgd;
unsigned long addr, end, next;
int ret;
- if (page_anon_vma(page)) {
- addr = page_address_in_vma(page, vma);
- if (addr == -EFAULT)
- return 0;
- else
- end = addr + PAGE_SIZE;
- } else {
- addr = vma->vm_start;
- end = vma->vm_end;
- }
+ addr = vma->vm_start;
+ end = vma->vm_end;
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- ret = unuse_p4d_range(vma, pgd, addr, next, entry, page);
+ ret = unuse_p4d_range(vma, pgd, addr, next, type,
+ frontswap, fs_pages_to_unuse);
if (ret)
return ret;
} while (pgd++, addr = next, addr != end);
return 0;
}
-static int unuse_mm(struct mm_struct *mm,
- swp_entry_t entry, struct page *page)
+static int unuse_mm(struct mm_struct *mm, unsigned int type,
+ bool frontswap, unsigned long *fs_pages_to_unuse)
{
struct vm_area_struct *vma;
int ret = 0;
- if (!down_read_trylock(&mm->mmap_sem)) {
- /*
- * Activate page so shrink_inactive_list is unlikely to unmap
- * its ptes while lock is dropped, so swapoff can make progress.
- */
- activate_page(page);
- unlock_page(page);
- down_read(&mm->mmap_sem);
- lock_page(page);
- }
+ down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
- break;
+ if (vma->anon_vma) {
+ ret = unuse_vma(vma, type, frontswap,
+ fs_pages_to_unuse);
+ if (ret)
+ break;
+ }
cond_resched();
}
up_read(&mm->mmap_sem);
- return (ret < 0)? ret: 0;
+ return ret;
}
/*
* Scan swap_map (or frontswap_map if frontswap parameter is true)
- * from current position to next entry still in use.
- * Recycle to start on reaching the end, returning 0 when empty.
+ * from current position to next entry still in use. Return 0
+ * if there are no inuse entries after prev till end of the map.
*/
static unsigned int find_next_to_unuse(struct swap_info_struct *si,
unsigned int prev, bool frontswap)
{
- unsigned int max = si->max;
- unsigned int i = prev;
+ unsigned int i;
unsigned char count;
/*
@@ -1968,20 +2004,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
* hits are okay, and sys_swapoff() has already prevented new
* allocations from this area (while holding swap_lock).
*/
- for (;;) {
- if (++i >= max) {
- if (!prev) {
- i = 0;
- break;
- }
- /*
- * No entries in use at top of swap_map,
- * loop back to start and recheck there.
- */
- max = prev + 1;
- prev = 0;
- i = 1;
- }
+ for (i = prev + 1; i < si->max; i++) {
count = READ_ONCE(si->swap_map[i]);
if (count && swap_count(count) != SWAP_MAP_BAD)
if (!frontswap || frontswap_test(si, i))
@@ -1989,240 +2012,121 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
if ((i % LATENCY_LIMIT) == 0)
cond_resched();
}
+
+ if (i == si->max)
+ i = 0;
+
return i;
}
/*
- * We completely avoid races by reading each swap page in advance,
- * and then search for the process using it. All the necessary
- * page table adjustments can then be made atomically.
- *
- * if the boolean frontswap is true, only unuse pages_to_unuse pages;
+ * If the boolean frontswap is true, only unuse pages_to_unuse pages;
* pages_to_unuse==0 means all pages; ignored if frontswap is false
*/
+#define SWAP_UNUSE_MAX_TRIES 3
int try_to_unuse(unsigned int type, bool frontswap,
unsigned long pages_to_unuse)
{
+ struct mm_struct *prev_mm;
+ struct mm_struct *mm;
+ struct list_head *p;
+ int retval = 0;
struct swap_info_struct *si = swap_info[type];
- struct mm_struct *start_mm;
- volatile unsigned char *swap_map; /* swap_map is accessed without
- * locking. Mark it as volatile
- * to prevent compiler doing
- * something odd.
- */
- unsigned char swcount;
struct page *page;
swp_entry_t entry;
- unsigned int i = 0;
- int retval = 0;
+ unsigned int i;
+ int retries = 0;
- /*
- * When searching mms for an entry, a good strategy is to
- * start at the first mm we freed the previous entry from
- * (though actually we don't notice whether we or coincidence
- * freed the entry). Initialize this start_mm with a hold.
- *
- * A simpler strategy would be to start at the last mm we
- * freed the previous entry from; but that would take less
- * advantage of mmlist ordering, which clusters forked mms
- * together, child after parent. If we race with dup_mmap(), we
- * prefer to resolve parent before child, lest we miss entries
- * duplicated after we scanned child: using last mm would invert
- * that.
- */
- start_mm = &init_mm;
- mmget(&init_mm);
+ if (!si->inuse_pages)
+ return 0;
- /*
- * Keep on scanning until all entries have gone. Usually,
- * one pass through swap_map is enough, but not necessarily:
- * there are races when an instance of an entry might be missed.
- */
- while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
+ if (!frontswap)
+ pages_to_unuse = 0;
+
+retry:
+ retval = shmem_unuse(type, frontswap, &pages_to_unuse);
+ if (retval)
+ goto out;
+
+ prev_mm = &init_mm;
+ mmget(prev_mm);
+
+ spin_lock(&mmlist_lock);
+ p = &init_mm.mmlist;
+ while ((p = p->next) != &init_mm.mmlist) {
if (signal_pending(current)) {
retval = -EINTR;
break;
}
- /*
- * Get a page for the entry, using the existing swap
- * cache page if there is one. Otherwise, get a clean
- * page and read the swap into it.
- */
- swap_map = &si->swap_map[i];
- entry = swp_entry(type, i);
- page = read_swap_cache_async(entry,
- GFP_HIGHUSER_MOVABLE, NULL, 0, false);
- if (!page) {
- /*
- * Either swap_duplicate() failed because entry
- * has been freed independently, and will not be
- * reused since sys_swapoff() already disabled
- * allocation from here, or alloc_page() failed.
- */
- swcount = *swap_map;
- /*
- * We don't hold lock here, so the swap entry could be
- * SWAP_MAP_BAD (when the cluster is discarding).
- * Instead of fail out, We can just skip the swap
- * entry because swapoff will wait for discarding
- * finish anyway.
- */
- if (!swcount || swcount == SWAP_MAP_BAD)
- continue;
- retval = -ENOMEM;
- break;
- }
+ mm = list_entry(p, struct mm_struct, mmlist);
+ if (!mmget_not_zero(mm))
+ continue;
+ spin_unlock(&mmlist_lock);
+ mmput(prev_mm);
+ prev_mm = mm;
+ retval = unuse_mm(mm, type, frontswap, &pages_to_unuse);
- /*
- * Don't hold on to start_mm if it looks like exiting.
- */
- if (atomic_read(&start_mm->mm_users) == 1) {
- mmput(start_mm);
- start_mm = &init_mm;
- mmget(&init_mm);
+ if (retval) {
+ mmput(prev_mm);
+ goto out;
}
/*
- * Wait for and lock page. When do_swap_page races with
- * try_to_unuse, do_swap_page can handle the fault much
- * faster than try_to_unuse can locate the entry. This
- * apparently redundant "wait_on_page_locked" lets try_to_unuse
- * defer to do_swap_page in such a case - in some tests,
- * do_swap_page and try_to_unuse repeatedly compete.
- */
- wait_on_page_locked(page);
- wait_on_page_writeback(page);
- lock_page(page);
- wait_on_page_writeback(page);
-
- /*
- * Remove all references to entry.
+ * Make sure that we aren't completely killing
+ * interactive performance.
*/
- swcount = *swap_map;
- if (swap_count(swcount) == SWAP_MAP_SHMEM) {
- retval = shmem_unuse(entry, page);
- /* page has already been unlocked and released */
- if (retval < 0)
- break;
- continue;
- }
- if (swap_count(swcount) && start_mm != &init_mm)
- retval = unuse_mm(start_mm, entry, page);
-
- if (swap_count(*swap_map)) {
- int set_start_mm = (*swap_map >= swcount);
- struct list_head *p = &start_mm->mmlist;
- struct mm_struct *new_start_mm = start_mm;
- struct mm_struct *prev_mm = start_mm;
- struct mm_struct *mm;
-
- mmget(new_start_mm);
- mmget(prev_mm);
- spin_lock(&mmlist_lock);
- while (swap_count(*swap_map) && !retval &&
- (p = p->next) != &start_mm->mmlist) {
- mm = list_entry(p, struct mm_struct, mmlist);
- if (!mmget_not_zero(mm))
- continue;
- spin_unlock(&mmlist_lock);
- mmput(prev_mm);
- prev_mm = mm;
+ cond_resched();
+ spin_lock(&mmlist_lock);
+ }
+ spin_unlock(&mmlist_lock);
- cond_resched();
+ mmput(prev_mm);
- swcount = *swap_map;
- if (!swap_count(swcount)) /* any usage ? */
- ;
- else if (mm == &init_mm)
- set_start_mm = 1;
- else
- retval = unuse_mm(mm, entry, page);
-
- if (set_start_mm && *swap_map < swcount) {
- mmput(new_start_mm);
- mmget(mm);
- new_start_mm = mm;
- set_start_mm = 0;
- }
- spin_lock(&mmlist_lock);
- }
- spin_unlock(&mmlist_lock);
- mmput(prev_mm);
- mmput(start_mm);
- start_mm = new_start_mm;
- }
- if (retval) {
- unlock_page(page);
- put_page(page);
- break;
- }
+ i = 0;
+ while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
- /*
- * If a reference remains (rare), we would like to leave
- * the page in the swap cache; but try_to_unmap could
- * then re-duplicate the entry once we drop page lock,
- * so we might loop indefinitely; also, that page could
- * not be swapped out to other storage meanwhile. So:
- * delete from cache even if there's another reference,
- * after ensuring that the data has been saved to disk -
- * since if the reference remains (rarer), it will be
- * read from disk into another page. Splitting into two
- * pages would be incorrect if swap supported "shared
- * private" pages, but they are handled by tmpfs files.
- *
- * Given how unuse_vma() targets one particular offset
- * in an anon_vma, once the anon_vma has been determined,
- * this splitting happens to be just what is needed to
- * handle where KSM pages have been swapped out: re-reading
- * is unnecessarily slow, but we can fix that later on.
- */
- if (swap_count(*swap_map) &&
- PageDirty(page) && PageSwapCache(page)) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- };
-
- swap_writepage(compound_head(page), &wbc);
- lock_page(page);
- wait_on_page_writeback(page);
- }
+ entry = swp_entry(type, i);
+ page = find_get_page(swap_address_space(entry), i);
+ if (!page)
+ continue;
/*
* It is conceivable that a racing task removed this page from
- * swap cache just before we acquired the page lock at the top,
- * or while we dropped it in unuse_mm(). The page might even
- * be back in swap cache on another swap area: that we must not
- * delete, since it may not have been written out to swap yet.
- */
- if (PageSwapCache(page) &&
- likely(page_private(page) == entry.val) &&
- (!PageTransCompound(page) ||
- !swap_page_trans_huge_swapped(si, entry)))
- delete_from_swap_cache(compound_head(page));
-
- /*
- * So we could skip searching mms once swap count went
- * to 1, we did not mark any present ptes as dirty: must
- * mark page dirty so shrink_page_list will preserve it.
+ * swap cache just before we acquired the page lock. The page
+ * might even be back in swap cache on another swap area. But
+ * that is okay, try_to_free_swap() only removes stale pages.
*/
- SetPageDirty(page);
+ lock_page(page);
+ wait_on_page_writeback(page);
+ try_to_free_swap(page);
unlock_page(page);
put_page(page);
/*
- * Make sure that we aren't completely killing
- * interactive performance.
+ * For frontswap, we just need to unuse pages_to_unuse, if
+ * it was specified. Need not check frontswap again here as
+ * we already zeroed out pages_to_unuse if not frontswap.
*/
- cond_resched();
- if (frontswap && pages_to_unuse > 0) {
- if (!--pages_to_unuse)
- break;
- }
+ if (pages_to_unuse && --pages_to_unuse == 0)
+ goto out;
}
- mmput(start_mm);
- return retval;
+ /*
+ * Lets check again to see if there are still swap entries in the map.
+ * If yes, we would need to do retry the unuse logic again.
+ * Under global memory pressure, swap entries can be reinserted back
+ * into process space after the mmlist loop above passes over them.
+ * Its not worth continuosuly retrying to unuse the swap in this case.
+ * So we try SWAP_UNUSE_MAX_TRIES times.
+ */
+ if (++retries >= SWAP_UNUSE_MAX_TRIES)
+ retval = -EBUSY;
+ else if (si->inuse_pages)
+ goto retry;
+
+out:
+ return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
}
/*
@@ -2258,7 +2162,7 @@ static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
struct swap_extent *se;
pgoff_t offset;
- sis = swap_info[swp_type(entry)];
+ sis = swp_swap_info(entry);
*bdev = sis->bdev;
offset = swp_offset(entry);
@@ -2700,9 +2604,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
if (!l)
return SEQ_START_TOKEN;
- for (type = 0; type < nr_swapfiles; type++) {
- smp_rmb(); /* read nr_swapfiles before swap_info[type] */
- si = swap_info[type];
+ for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
@@ -2722,9 +2624,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
else
type = si->type + 1;
- for (; type < nr_swapfiles; type++) {
- smp_rmb(); /* read nr_swapfiles before swap_info[type] */
- si = swap_info[type];
+ for (; (si = swap_type_to_swap_info(type)); type++) {
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
++*pos;
@@ -2813,9 +2713,8 @@ static struct swap_info_struct *alloc_swap_info(void)
struct swap_info_struct *p;
unsigned int type;
int i;
- int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
- p = kvzalloc(size, GFP_KERNEL);
+ p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -2831,14 +2730,14 @@ static struct swap_info_struct *alloc_swap_info(void)
}
if (type >= nr_swapfiles) {
p->type = type;
- swap_info[type] = p;
+ WRITE_ONCE(swap_info[type], p);
/*
* Write swap_info[type] before nr_swapfiles, in case a
* racing procfs swap_start() or swap_next() is reading them.
* (We never shrink nr_swapfiles, we never free this entry.)
*/
smp_wmb();
- nr_swapfiles++;
+ WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
} else {
kvfree(p);
p = swap_info[type];
@@ -3358,7 +3257,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
{
struct swap_info_struct *p;
struct swap_cluster_info *ci;
- unsigned long offset, type;
+ unsigned long offset;
unsigned char count;
unsigned char has_cache;
int err = -EINVAL;
@@ -3366,10 +3265,10 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
if (non_swap_entry(entry))
goto out;
- type = swp_type(entry);
- if (type >= nr_swapfiles)
+ p = swp_swap_info(entry);
+ if (!p)
goto bad_file;
- p = swap_info[type];
+
offset = swp_offset(entry);
if (unlikely(offset >= p->max))
goto out;
@@ -3466,7 +3365,7 @@ int swapcache_prepare(swp_entry_t entry)
struct swap_info_struct *swp_swap_info(swp_entry_t entry)
{
- return swap_info[swp_type(entry)];
+ return swap_type_to_swap_info(swp_type(entry));
}
struct swap_info_struct *page_swap_info(struct page *page)
diff --git a/mm/truncate.c b/mm/truncate.c
index 798e7ccfb030..b7d3c99f00c9 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -539,6 +539,8 @@ EXPORT_SYMBOL(truncate_inode_pages_final);
* invalidate_mapping_pages() will not block on IO activity. It will not
* invalidate pages which are dirty, locked, under writeback or mapped into
* pagetables.
+ *
+ * Return: the number of the pages that were invalidated
*/
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
@@ -664,7 +666,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
* Any pages which are found to be mapped into pagetables are unmapped prior to
* invalidation.
*
- * Returns -EBUSY if any pages could not be invalidated.
+ * Return: -EBUSY if any pages could not be invalidated.
*/
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end)
@@ -761,7 +763,7 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
* Any pages which are found to be mapped into pagetables are unmapped prior to
* invalidation.
*
- * Returns -EBUSY if any pages could not be invalidated.
+ * Return: -EBUSY if any pages could not be invalidated.
*/
int invalidate_inode_pages2(struct address_space *mapping)
{
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 852eb4e53f06..14faadcedd06 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -247,7 +247,8 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
/*
* Validates that the given object is:
* - not bogus address
- * - known-safe heap or stack object
+ * - fully contained by stack (or stack frame, when available)
+ * - fully within SLAB object (or object whitelist area, when available)
* - not in kernel text
*/
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
@@ -262,9 +263,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
/* Check for invalid addresses. */
check_bogus_address((const unsigned long)ptr, n, to_user);
- /* Check for bad heap object. */
- check_heap_object(ptr, n, to_user);
-
/* Check for bad stack object. */
switch (check_stack_object(ptr, n)) {
case NOT_STACK:
@@ -282,6 +280,9 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
usercopy_abort("process stack", NULL, to_user, 0, n);
}
+ /* Check for bad heap object. */
+ check_heap_object(ptr, n, to_user);
+
/* Check for object in kernel to avoid text exposure. */
check_kernel_text_object((const unsigned long)ptr, n, to_user);
}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 065c1ce191c4..d59b5a73dfb3 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -267,14 +267,10 @@ retry:
VM_BUG_ON(dst_addr & ~huge_page_mask(h));
/*
- * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
- * i_mmap_rwsem ensures the dst_pte remains valid even
- * in the case of shared pmds. fault mutex prevents
- * races with other faulting threads.
+ * Serialize via hugetlb_fault_mutex
*/
- mapping = dst_vma->vm_file->f_mapping;
- i_mmap_lock_read(mapping);
idx = linear_page_index(dst_vma, dst_addr);
+ mapping = dst_vma->vm_file->f_mapping;
hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
idx, dst_addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -283,7 +279,6 @@ retry:
dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
goto out_unlock;
}
@@ -291,7 +286,6 @@ retry:
dst_pteval = huge_ptep_get(dst_pte);
if (!huge_pte_none(dst_pteval)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
goto out_unlock;
}
@@ -299,7 +293,6 @@ retry:
dst_addr, src_addr, &page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
vm_alloc_shared = vm_shared;
cond_resched();
diff --git a/mm/util.c b/mm/util.c
index 4df23d64aac7..d559bde497a9 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -36,6 +36,8 @@ EXPORT_SYMBOL(kfree_const);
* kstrdup - allocate space for and copy an existing string
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Return: newly allocated copy of @s or %NULL in case of error
*/
char *kstrdup(const char *s, gfp_t gfp)
{
@@ -58,9 +60,10 @@ EXPORT_SYMBOL(kstrdup);
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
- * Function returns source string if it is in .rodata section otherwise it
- * fallbacks to kstrdup.
- * Strings allocated by kstrdup_const should be freed by kfree_const.
+ * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
+ *
+ * Return: source string if it is in .rodata section otherwise
+ * fallback to kstrdup.
*/
const char *kstrdup_const(const char *s, gfp_t gfp)
{
@@ -78,6 +81,8 @@ EXPORT_SYMBOL(kstrdup_const);
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*
* Note: Use kmemdup_nul() instead if the size is known exactly.
+ *
+ * Return: newly allocated copy of @s or %NULL in case of error
*/
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
@@ -103,6 +108,8 @@ EXPORT_SYMBOL(kstrndup);
* @src: memory region to duplicate
* @len: memory region length
* @gfp: GFP mask to use
+ *
+ * Return: newly allocated copy of @src or %NULL in case of error
*/
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
@@ -120,6 +127,9 @@ EXPORT_SYMBOL(kmemdup);
* @s: The data to stringify
* @len: The size of the data
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Return: newly allocated copy of @s with NUL-termination or %NULL in
+ * case of error
*/
char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
{
@@ -143,14 +153,14 @@ EXPORT_SYMBOL(kmemdup_nul);
* @src: source address in user space
* @len: number of bytes to copy
*
- * Returns an ERR_PTR() on failure. Result is physically
+ * Return: an ERR_PTR() on failure. Result is physically
* contiguous, to be freed by kfree().
*/
void *memdup_user(const void __user *src, size_t len)
{
void *p;
- p = kmalloc_track_caller(len, GFP_USER);
+ p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -169,7 +179,7 @@ EXPORT_SYMBOL(memdup_user);
* @src: source address in user space
* @len: number of bytes to copy
*
- * Returns an ERR_PTR() on failure. Result may be not
+ * Return: an ERR_PTR() on failure. Result may be not
* physically contiguous. Use kvfree() to free.
*/
void *vmemdup_user(const void __user *src, size_t len)
@@ -193,6 +203,8 @@ EXPORT_SYMBOL(vmemdup_user);
* strndup_user - duplicate an existing string from user space
* @s: The string to duplicate
* @n: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Return: newly allocated copy of @s or %NULL in case of error
*/
char *strndup_user(const char __user *s, long n)
{
@@ -224,7 +236,7 @@ EXPORT_SYMBOL(strndup_user);
* @src: source address in user space
* @len: number of bytes to copy
*
- * Returns an ERR_PTR() on failure.
+ * Return: an ERR_PTR() on failure.
*/
void *memdup_user_nul(const void __user *src, size_t len)
{
@@ -310,10 +322,6 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- *
* get_user_pages_fast provides equivalent functionality to get_user_pages,
* operating on current and current->mm, with force=0 and vma=NULL. However
* unlike get_user_pages, it must be called without mmap_sem held.
@@ -325,6 +333,10 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
* pages have to be faulted in, it may turn out to be slightly slower so
* callers need to carefully consider what to use. On many architectures,
* get_user_pages_fast simply falls back to get_user_pages.
+ *
+ * Return: number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
*/
int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
@@ -386,6 +398,8 @@ EXPORT_SYMBOL(vm_mmap);
*
* Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
* fall back to vmalloc.
+ *
+ * Return: pointer to the allocated memory of %NULL in case of failure
*/
void *kvmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -478,7 +492,7 @@ bool page_mapped(struct page *page)
return true;
if (PageHuge(page))
return false;
- for (i = 0; i < hpage_nr_pages(page); i++) {
+ for (i = 0; i < (1 << compound_order(page)); i++) {
if (atomic_read(&page[i]._mapcount) >= 0)
return true;
}
@@ -729,7 +743,8 @@ error:
* @buffer: the buffer to copy to.
* @buflen: the length of the buffer. Larger cmdline values are truncated
* to this length.
- * Returns the size of the cmdline field copied. Note that the copy does
+ *
+ * Return: the size of the cmdline field copied. Note that the copy does
* not guarantee an ending NULL byte.
*/
int get_cmdline(struct task_struct *task, char *buffer, int buflen)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 871e41c55e23..e86ba6e74b50 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -498,7 +498,11 @@ nocache:
}
found:
- if (addr + size > vend)
+ /*
+ * Check also calculated address against the vstart,
+ * because it can be 0 because of big align request.
+ */
+ if (addr + size > vend || addr < vstart)
goto overflow;
va->va_start = addr;
@@ -840,7 +844,7 @@ static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
* @order: how many 2^order pages should be occupied in newly allocated block
* @gfp_mask: flags for the page level allocator
*
- * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
+ * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
*/
static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
{
@@ -1187,6 +1191,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata;
+
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add
@@ -1421,13 +1426,15 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
}
/**
- * get_vm_area - reserve a contiguous kernel virtual area
- * @size: size of the area
- * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
+ * get_vm_area - reserve a contiguous kernel virtual area
+ * @size: size of the area
+ * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
*
- * Search an area of @size in the kernel virtual mapping area,
- * and reserved it for out purposes. Returns the area descriptor
- * on success or %NULL on failure.
+ * Search an area of @size in the kernel virtual mapping area,
+ * and reserved it for out purposes. Returns the area descriptor
+ * on success or %NULL on failure.
+ *
+ * Return: the area descriptor on success or %NULL on failure.
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
@@ -1444,12 +1451,14 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
}
/**
- * find_vm_area - find a continuous kernel virtual area
- * @addr: base address
+ * find_vm_area - find a continuous kernel virtual area
+ * @addr: base address
+ *
+ * Search for the kernel VM area starting at @addr, and return it.
+ * It is up to the caller to do all required locking to keep the returned
+ * pointer valid.
*
- * Search for the kernel VM area starting at @addr, and return it.
- * It is up to the caller to do all required locking to keep the returned
- * pointer valid.
+ * Return: pointer to the found area or %NULL on faulure
*/
struct vm_struct *find_vm_area(const void *addr)
{
@@ -1463,12 +1472,14 @@ struct vm_struct *find_vm_area(const void *addr)
}
/**
- * remove_vm_area - find and remove a continuous kernel virtual area
- * @addr: base address
+ * remove_vm_area - find and remove a continuous kernel virtual area
+ * @addr: base address
*
- * Search for the kernel VM area starting at @addr, and remove it.
- * This function returns the found VM area, but using it is NOT safe
- * on SMP machines, except for its size or flags.
+ * Search for the kernel VM area starting at @addr, and remove it.
+ * This function returns the found VM area, but using it is NOT safe
+ * on SMP machines, except for its size or flags.
+ *
+ * Return: pointer to the found area or %NULL on faulure
*/
struct vm_struct *remove_vm_area(const void *addr)
{
@@ -1505,7 +1516,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
addr))
return;
- area = find_vmap_area((unsigned long)addr)->vm;
+ area = find_vm_area(addr);
if (unlikely(!area)) {
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
@@ -1548,11 +1559,11 @@ static inline void __vfree_deferred(const void *addr)
}
/**
- * vfree_atomic - release memory allocated by vmalloc()
- * @addr: memory base address
+ * vfree_atomic - release memory allocated by vmalloc()
+ * @addr: memory base address
*
- * This one is just like vfree() but can be called in any atomic context
- * except NMIs.
+ * This one is just like vfree() but can be called in any atomic context
+ * except NMIs.
*/
void vfree_atomic(const void *addr)
{
@@ -1565,21 +1576,29 @@ void vfree_atomic(const void *addr)
__vfree_deferred(addr);
}
+static void __vfree(const void *addr)
+{
+ if (unlikely(in_interrupt()))
+ __vfree_deferred(addr);
+ else
+ __vunmap(addr, 1);
+}
+
/**
- * vfree - release memory allocated by vmalloc()
- * @addr: memory base address
+ * vfree - release memory allocated by vmalloc()
+ * @addr: memory base address
*
- * Free the virtually continuous memory area starting at @addr, as
- * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
- * NULL, no operation is performed.
+ * Free the virtually continuous memory area starting at @addr, as
+ * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
+ * NULL, no operation is performed.
*
- * Must not be called in NMI context (strictly speaking, only if we don't
- * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
- * conventions for vfree() arch-depenedent would be a really bad idea)
+ * Must not be called in NMI context (strictly speaking, only if we don't
+ * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
+ * conventions for vfree() arch-depenedent would be a really bad idea)
*
- * May sleep if called *not* from interrupt context.
+ * May sleep if called *not* from interrupt context.
*
- * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
+ * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
*/
void vfree(const void *addr)
{
@@ -1591,21 +1610,19 @@ void vfree(const void *addr)
if (!addr)
return;
- if (unlikely(in_interrupt()))
- __vfree_deferred(addr);
- else
- __vunmap(addr, 1);
+
+ __vfree(addr);
}
EXPORT_SYMBOL(vfree);
/**
- * vunmap - release virtual mapping obtained by vmap()
- * @addr: memory base address
+ * vunmap - release virtual mapping obtained by vmap()
+ * @addr: memory base address
*
- * Free the virtually contiguous memory area starting at @addr,
- * which was created from the page array passed to vmap().
+ * Free the virtually contiguous memory area starting at @addr,
+ * which was created from the page array passed to vmap().
*
- * Must not be called in interrupt context.
+ * Must not be called in interrupt context.
*/
void vunmap(const void *addr)
{
@@ -1617,17 +1634,19 @@ void vunmap(const void *addr)
EXPORT_SYMBOL(vunmap);
/**
- * vmap - map an array of pages into virtually contiguous space
- * @pages: array of page pointers
- * @count: number of pages to map
- * @flags: vm_area->flags
- * @prot: page protection for the mapping
- *
- * Maps @count pages from @pages into contiguous kernel virtual
- * space.
+ * vmap - map an array of pages into virtually contiguous space
+ * @pages: array of page pointers
+ * @count: number of pages to map
+ * @flags: vm_area->flags
+ * @prot: page protection for the mapping
+ *
+ * Maps @count pages from @pages into contiguous kernel virtual
+ * space.
+ *
+ * Return: the address of the area or %NULL on failure
*/
void *vmap(struct page **pages, unsigned int count,
- unsigned long flags, pgprot_t prot)
+ unsigned long flags, pgprot_t prot)
{
struct vm_struct *area;
unsigned long size; /* In bytes */
@@ -1709,25 +1728,27 @@ fail:
warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure, allocated %ld of %ld bytes",
(area->nr_pages*PAGE_SIZE), area->size);
- vfree(area->addr);
+ __vfree(area->addr);
return NULL;
}
/**
- * __vmalloc_node_range - allocate virtually contiguous memory
- * @size: allocation size
- * @align: desired alignment
- * @start: vm area range start
- * @end: vm area range end
- * @gfp_mask: flags for the page level allocator
- * @prot: protection mask for the allocated pages
- * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
- * @node: node to use for allocation or NUMA_NO_NODE
- * @caller: caller's return address
- *
- * Allocate enough pages to cover @size from the page level
- * allocator with @gfp_mask flags. Map them into contiguous
- * kernel virtual space, using a pagetable protection of @prot.
+ * __vmalloc_node_range - allocate virtually contiguous memory
+ * @size: allocation size
+ * @align: desired alignment
+ * @start: vm area range start
+ * @end: vm area range end
+ * @gfp_mask: flags for the page level allocator
+ * @prot: protection mask for the allocated pages
+ * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
+ * @node: node to use for allocation or NUMA_NO_NODE
+ * @caller: caller's return address
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator with @gfp_mask flags. Map them into contiguous
+ * kernel virtual space, using a pagetable protection of @prot.
+ *
+ * Return: the address of the area or %NULL on failure
*/
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
@@ -1768,25 +1789,35 @@ fail:
return NULL;
}
+/*
+ * This is only for performance analysis of vmalloc and stress purpose.
+ * It is required by vmalloc test module, therefore do not use it other
+ * than that.
+ */
+#ifdef CONFIG_TEST_VMALLOC_MODULE
+EXPORT_SYMBOL_GPL(__vmalloc_node_range);
+#endif
+
/**
- * __vmalloc_node - allocate virtually contiguous memory
- * @size: allocation size
- * @align: desired alignment
- * @gfp_mask: flags for the page level allocator
- * @prot: protection mask for the allocated pages
- * @node: node to use for allocation or NUMA_NO_NODE
- * @caller: caller's return address
+ * __vmalloc_node - allocate virtually contiguous memory
+ * @size: allocation size
+ * @align: desired alignment
+ * @gfp_mask: flags for the page level allocator
+ * @prot: protection mask for the allocated pages
+ * @node: node to use for allocation or NUMA_NO_NODE
+ * @caller: caller's return address
*
- * Allocate enough pages to cover @size from the page level
- * allocator with @gfp_mask flags. Map them into contiguous
- * kernel virtual space, using a pagetable protection of @prot.
+ * Allocate enough pages to cover @size from the page level
+ * allocator with @gfp_mask flags. Map them into contiguous
+ * kernel virtual space, using a pagetable protection of @prot.
*
- * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
- * and __GFP_NOFAIL are not supported
+ * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
+ * and __GFP_NOFAIL are not supported
*
- * Any use of gfp flags outside of GFP_KERNEL should be consulted
- * with mm people.
+ * Any use of gfp flags outside of GFP_KERNEL should be consulted
+ * with mm people.
*
+ * Return: pointer to the allocated memory or %NULL on error
*/
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
@@ -1818,13 +1849,16 @@ void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
}
/**
- * vmalloc - allocate virtually contiguous memory
- * @size: allocation size
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
+ * vmalloc - allocate virtually contiguous memory
+ * @size: allocation size
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
*
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
+ * Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc(unsigned long size)
{
@@ -1834,14 +1868,17 @@ void *vmalloc(unsigned long size)
EXPORT_SYMBOL(vmalloc);
/**
- * vzalloc - allocate virtually contiguous memory with zero fill
- * @size: allocation size
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
- * The memory allocated is set to zero.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
+ * vzalloc - allocate virtually contiguous memory with zero fill
+ * @size: allocation size
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ * The memory allocated is set to zero.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
*/
void *vzalloc(unsigned long size)
{
@@ -1856,34 +1893,30 @@ EXPORT_SYMBOL(vzalloc);
*
* The resulting memory area is zeroed so it can be mapped to userspace
* without leaking data.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_user(unsigned long size)
{
- struct vm_struct *area;
- void *ret;
-
- ret = __vmalloc_node(size, SHMLBA,
- GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL, NUMA_NO_NODE,
- __builtin_return_address(0));
- if (ret) {
- area = find_vm_area(ret);
- area->flags |= VM_USERMAP;
- }
- return ret;
+ return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
+ VM_USERMAP, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_user);
/**
- * vmalloc_node - allocate memory on a specific node
- * @size: allocation size
- * @node: numa node
+ * vmalloc_node - allocate memory on a specific node
+ * @size: allocation size
+ * @node: numa node
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
*
- * Allocate enough pages to cover @size from the page level
- * allocator and map them into contiguous kernel virtual space.
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
*
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
+ * Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_node(unsigned long size, int node)
{
@@ -1903,6 +1936,8 @@ EXPORT_SYMBOL(vmalloc_node);
*
* For tight control over page level allocator and protection flags
* use __vmalloc_node() instead.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
*/
void *vzalloc_node(unsigned long size, int node)
{
@@ -1912,17 +1947,18 @@ void *vzalloc_node(unsigned long size, int node)
EXPORT_SYMBOL(vzalloc_node);
/**
- * vmalloc_exec - allocate virtually contiguous, executable memory
- * @size: allocation size
+ * vmalloc_exec - allocate virtually contiguous, executable memory
+ * @size: allocation size
*
- * Kernel-internal function to allocate enough pages to cover @size
- * the page level allocator and map them into contiguous and
- * executable kernel virtual space.
+ * Kernel-internal function to allocate enough pages to cover @size
+ * the page level allocator and map them into contiguous and
+ * executable kernel virtual space.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
*
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
+ * Return: pointer to the allocated memory or %NULL on error
*/
-
void *vmalloc_exec(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
@@ -1942,11 +1978,13 @@ void *vmalloc_exec(unsigned long size)
#endif
/**
- * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
- * @size: allocation size
+ * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
+ * @size: allocation size
*
- * Allocate enough 32bit PA addressable pages to cover @size from the
- * page level allocator and map them into contiguous kernel virtual space.
+ * Allocate enough 32bit PA addressable pages to cover @size from the
+ * page level allocator and map them into contiguous kernel virtual space.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_32(unsigned long size)
{
@@ -1957,23 +1995,19 @@ EXPORT_SYMBOL(vmalloc_32);
/**
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
- * @size: allocation size
+ * @size: allocation size
*
* The resulting memory area is 32bit addressable and zeroed so it can be
* mapped to userspace without leaking data.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_32_user(unsigned long size)
{
- struct vm_struct *area;
- void *ret;
-
- ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
- NUMA_NO_NODE, __builtin_return_address(0));
- if (ret) {
- area = find_vm_area(ret);
- area->flags |= VM_USERMAP;
- }
- return ret;
+ return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
+ GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
+ VM_USERMAP, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_32_user);
@@ -2059,31 +2093,29 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
}
/**
- * vread() - read vmalloc area in a safe way.
- * @buf: buffer for reading data
- * @addr: vm address.
- * @count: number of bytes to be read.
- *
- * Returns # of bytes which addr and buf should be increased.
- * (same number to @count). Returns 0 if [addr...addr+count) doesn't
- * includes any intersect with alive vmalloc area.
- *
- * This function checks that addr is a valid vmalloc'ed area, and
- * copy data from that area to a given buffer. If the given memory range
- * of [addr...addr+count) includes some valid address, data is copied to
- * proper area of @buf. If there are memory holes, they'll be zero-filled.
- * IOREMAP area is treated as memory hole and no copy is done.
- *
- * If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0. @buf should be kernel's buffer.
- *
- * Note: In usual ops, vread() is never necessary because the caller
- * should know vmalloc() area is valid and can use memcpy().
- * This is for routines which have to access vmalloc area without
- * any informaion, as /dev/kmem.
- *
+ * vread() - read vmalloc area in a safe way.
+ * @buf: buffer for reading data
+ * @addr: vm address.
+ * @count: number of bytes to be read.
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * copy data from that area to a given buffer. If the given memory range
+ * of [addr...addr+count) includes some valid address, data is copied to
+ * proper area of @buf. If there are memory holes, they'll be zero-filled.
+ * IOREMAP area is treated as memory hole and no copy is done.
+ *
+ * If [addr...addr+count) doesn't includes any intersects with alive
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
+ *
+ * Note: In usual ops, vread() is never necessary because the caller
+ * should know vmalloc() area is valid and can use memcpy().
+ * This is for routines which have to access vmalloc area without
+ * any informaion, as /dev/kmem.
+ *
+ * Return: number of bytes for which addr and buf should be increased
+ * (same number as @count) or %0 if [addr...addr+count) doesn't
+ * include any intersection with valid vmalloc area
*/
-
long vread(char *buf, char *addr, unsigned long count)
{
struct vmap_area *va;
@@ -2140,31 +2172,29 @@ finished:
}
/**
- * vwrite() - write vmalloc area in a safe way.
- * @buf: buffer for source data
- * @addr: vm address.
- * @count: number of bytes to be read.
- *
- * Returns # of bytes which addr and buf should be incresed.
- * (same number to @count).
- * If [addr...addr+count) doesn't includes any intersect with valid
- * vmalloc area, returns 0.
- *
- * This function checks that addr is a valid vmalloc'ed area, and
- * copy data from a buffer to the given addr. If specified range of
- * [addr...addr+count) includes some valid address, data is copied from
- * proper area of @buf. If there are memory holes, no copy to hole.
- * IOREMAP area is treated as memory hole and no copy is done.
- *
- * If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0. @buf should be kernel's buffer.
- *
- * Note: In usual ops, vwrite() is never necessary because the caller
- * should know vmalloc() area is valid and can use memcpy().
- * This is for routines which have to access vmalloc area without
- * any informaion, as /dev/kmem.
+ * vwrite() - write vmalloc area in a safe way.
+ * @buf: buffer for source data
+ * @addr: vm address.
+ * @count: number of bytes to be read.
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * copy data from a buffer to the given addr. If specified range of
+ * [addr...addr+count) includes some valid address, data is copied from
+ * proper area of @buf. If there are memory holes, no copy to hole.
+ * IOREMAP area is treated as memory hole and no copy is done.
+ *
+ * If [addr...addr+count) doesn't includes any intersects with alive
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
+ *
+ * Note: In usual ops, vwrite() is never necessary because the caller
+ * should know vmalloc() area is valid and can use memcpy().
+ * This is for routines which have to access vmalloc area without
+ * any informaion, as /dev/kmem.
+ *
+ * Return: number of bytes for which addr and buf should be
+ * increased (same number as @count) or %0 if [addr...addr+count)
+ * doesn't include any intersection with valid vmalloc area
*/
-
long vwrite(char *buf, char *addr, unsigned long count)
{
struct vmap_area *va;
@@ -2216,20 +2246,20 @@ finished:
}
/**
- * remap_vmalloc_range_partial - map vmalloc pages to userspace
- * @vma: vma to cover
- * @uaddr: target user address to start at
- * @kaddr: virtual address of vmalloc kernel memory
- * @size: size of map area
+ * remap_vmalloc_range_partial - map vmalloc pages to userspace
+ * @vma: vma to cover
+ * @uaddr: target user address to start at
+ * @kaddr: virtual address of vmalloc kernel memory
+ * @size: size of map area
*
- * Returns: 0 for success, -Exxx on failure
+ * Returns: 0 for success, -Exxx on failure
*
- * This function checks that @kaddr is a valid vmalloc'ed area,
- * and that it is big enough to cover the range starting at
- * @uaddr in @vma. Will return failure if that criteria isn't
- * met.
+ * This function checks that @kaddr is a valid vmalloc'ed area,
+ * and that it is big enough to cover the range starting at
+ * @uaddr in @vma. Will return failure if that criteria isn't
+ * met.
*
- * Similar to remap_pfn_range() (see mm/memory.c)
+ * Similar to remap_pfn_range() (see mm/memory.c)
*/
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
void *kaddr, unsigned long size)
@@ -2248,7 +2278,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
if (!(area->flags & VM_USERMAP))
return -EINVAL;
- if (kaddr + size > area->addr + area->size)
+ if (kaddr + size > area->addr + get_vm_area_size(area))
return -EINVAL;
do {
@@ -2271,18 +2301,18 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
EXPORT_SYMBOL(remap_vmalloc_range_partial);
/**
- * remap_vmalloc_range - map vmalloc pages to userspace
- * @vma: vma to cover (map full range of vma)
- * @addr: vmalloc memory
- * @pgoff: number of pages into addr before first page to map
+ * remap_vmalloc_range - map vmalloc pages to userspace
+ * @vma: vma to cover (map full range of vma)
+ * @addr: vmalloc memory
+ * @pgoff: number of pages into addr before first page to map
*
- * Returns: 0 for success, -Exxx on failure
+ * Returns: 0 for success, -Exxx on failure
*
- * This function checks that addr is a valid vmalloc'ed area, and
- * that it is big enough to cover the vma. Will return failure if
- * that criteria isn't met.
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * that it is big enough to cover the vma. Will return failure if
+ * that criteria isn't met.
*
- * Similar to remap_pfn_range() (see mm/memory.c)
+ * Similar to remap_pfn_range() (see mm/memory.c)
*/
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff)
@@ -2314,18 +2344,18 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
}
/**
- * alloc_vm_area - allocate a range of kernel address space
- * @size: size of the area
- * @ptes: returns the PTEs for the address space
+ * alloc_vm_area - allocate a range of kernel address space
+ * @size: size of the area
+ * @ptes: returns the PTEs for the address space
*
- * Returns: NULL on failure, vm_struct on success
+ * Returns: NULL on failure, vm_struct on success
*
- * This function reserves a range of kernel address space, and
- * allocates pagetables to map that range. No actual mappings
- * are created.
+ * This function reserves a range of kernel address space, and
+ * allocates pagetables to map that range. No actual mappings
+ * are created.
*
- * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
- * allocated for the VM area are returned.
+ * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
+ * allocated for the VM area are returned.
*/
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
{
@@ -2751,4 +2781,3 @@ static int __init proc_vmalloc_init(void)
module_init(proc_vmalloc_init);
#endif
-
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a714c4f800e9..a5ad0b35ab8e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -374,7 +374,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
*/
int prealloc_shrinker(struct shrinker *shrinker)
{
- size_t size = sizeof(*shrinker->nr_deferred);
+ unsigned int size = sizeof(*shrinker->nr_deferred);
if (shrinker->flags & SHRINKER_NUMA_AWARE)
size *= nr_node_ids;
@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
delta = freeable / 2;
}
- /*
- * Make sure we apply some minimal pressure on default priority
- * even on small cgroups. Stale objects are not only consuming memory
- * by themselves, but can also hold a reference to a dying cgroup,
- * preventing it from being reclaimed. A dying cgroup with all
- * corresponding structures like per-cpu stats and kmem caches
- * can be really big, so it may lead to a significant waste of memory.
- */
- delta = max_t(unsigned long long, delta, min(freeable, batch_size));
-
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -962,7 +952,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
*/
if (reclaimed && page_is_file_cache(page) &&
!mapping_exiting(mapping) && !dax_mapping(mapping))
- shadow = workingset_eviction(mapping, page);
+ shadow = workingset_eviction(page);
__delete_from_page_cache(page, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags);
@@ -1116,16 +1106,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
- int pgactivate = 0;
- unsigned nr_unqueued_dirty = 0;
- unsigned nr_dirty = 0;
- unsigned nr_congested = 0;
unsigned nr_reclaimed = 0;
- unsigned nr_writeback = 0;
- unsigned nr_immediate = 0;
- unsigned nr_ref_keep = 0;
- unsigned nr_unmap_fail = 0;
+ memset(stat, 0, sizeof(*stat));
cond_resched();
while (!list_empty(page_list)) {
@@ -1169,10 +1152,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
page_check_dirty_writeback(page, &dirty, &writeback);
if (dirty || writeback)
- nr_dirty++;
+ stat->nr_dirty++;
if (dirty && !writeback)
- nr_unqueued_dirty++;
+ stat->nr_unqueued_dirty++;
/*
* Treat this page as congested if the underlying BDI is or if
@@ -1184,7 +1167,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (((dirty || writeback) && mapping &&
inode_write_congested(mapping->host)) ||
(writeback && PageReclaim(page)))
- nr_congested++;
+ stat->nr_congested++;
/*
* If a page at the tail of the LRU is under writeback, there
@@ -1233,7 +1216,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (current_is_kswapd() &&
PageReclaim(page) &&
test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
- nr_immediate++;
+ stat->nr_immediate++;
goto activate_locked;
/* Case 2 above */
@@ -1251,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* and it's also appropriate in global reclaim.
*/
SetPageReclaim(page);
- nr_writeback++;
+ stat->nr_writeback++;
goto activate_locked;
/* Case 3 above */
@@ -1271,7 +1254,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
case PAGEREF_ACTIVATE:
goto activate_locked;
case PAGEREF_KEEP:
- nr_ref_keep++;
+ stat->nr_ref_keep++;
goto keep_locked;
case PAGEREF_RECLAIM:
case PAGEREF_RECLAIM_CLEAN:
@@ -1336,7 +1319,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
if (!try_to_unmap(page, flags)) {
- nr_unmap_fail++;
+ stat->nr_unmap_fail++;
goto activate_locked;
}
}
@@ -1484,7 +1467,7 @@ activate_locked:
VM_BUG_ON_PAGE(PageActive(page), page);
if (!PageMlocked(page)) {
SetPageActive(page);
- pgactivate++;
+ stat->nr_activate++;
count_memcg_page_event(page, PGACTIVATE);
}
keep_locked:
@@ -1499,18 +1482,8 @@ keep:
free_unref_page_list(&free_pages);
list_splice(&ret_pages, page_list);
- count_vm_events(PGACTIVATE, pgactivate);
-
- if (stat) {
- stat->nr_dirty = nr_dirty;
- stat->nr_congested = nr_congested;
- stat->nr_unqueued_dirty = nr_unqueued_dirty;
- stat->nr_writeback = nr_writeback;
- stat->nr_immediate = nr_immediate;
- stat->nr_activate = pgactivate;
- stat->nr_ref_keep = nr_ref_keep;
- stat->nr_unmap_fail = nr_unmap_fail;
- }
+ count_vm_events(PGACTIVATE, stat->nr_activate);
+
return nr_reclaimed;
}
@@ -1522,6 +1495,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
.priority = DEF_PRIORITY,
.may_unmap = 1,
};
+ struct reclaim_stat dummy_stat;
unsigned long ret;
struct page *page, *next;
LIST_HEAD(clean_pages);
@@ -1535,7 +1509,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
}
ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
- TTU_IGNORE_ACCESS, NULL, true);
+ TTU_IGNORE_ACCESS, &dummy_stat, true);
list_splice(&clean_pages, page_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
return ret;
@@ -1640,8 +1614,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
}
-/*
- * zone_lru_lock is heavily contended. Some of the functions that
+/**
+ * pgdat->lru_lock is heavily contended. Some of the functions that
* shrink the lists perform better by taking out a batch of pages
* and working on them outside the LRU lock.
*
@@ -1663,7 +1637,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
- isolate_mode_t mode, enum lru_list lru)
+ enum lru_list lru)
{
struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
@@ -1672,6 +1646,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long skipped = 0;
unsigned long scan, total_scan, nr_pages;
LIST_HEAD(pages_skipped);
+ isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
scan = 0;
for (total_scan = 0;
@@ -1775,11 +1750,11 @@ int isolate_lru_page(struct page *page)
WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
if (PageLRU(page)) {
- struct zone *zone = page_zone(page);
+ pg_data_t *pgdat = page_pgdat(page);
struct lruvec *lruvec;
- spin_lock_irq(zone_lru_lock(zone));
- lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+ spin_lock_irq(&pgdat->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
if (PageLRU(page)) {
int lru = page_lru(page);
get_page(page);
@@ -1787,7 +1762,7 @@ int isolate_lru_page(struct page *page)
del_page_from_lru_list(page, lruvec, lru);
ret = 0;
}
- spin_unlock_irq(zone_lru_lock(zone));
+ spin_unlock_irq(&pgdat->lru_lock);
}
return ret;
}
@@ -1909,8 +1884,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
- struct reclaim_stat stat = {};
- isolate_mode_t isolate_mode = 0;
+ struct reclaim_stat stat;
int file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
@@ -1931,13 +1905,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
lru_add_drain();
- if (!sc->may_unmap)
- isolate_mode |= ISOLATE_UNMAPPED;
-
spin_lock_irq(&pgdat->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
- &nr_scanned, sc, isolate_mode, lru);
+ &nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
reclaim_stat->recent_scanned[file] += nr_taken;
@@ -2019,9 +1990,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* processes, from rmap.
*
* If the pages are mostly unmapped, the processing is fast and it is
- * appropriate to hold zone_lru_lock across the whole operation. But if
+ * appropriate to hold pgdat->lru_lock across the whole operation. But if
* the pages are mapped, the processing is slow (page_referenced()) so we
- * should drop zone_lru_lock around each page. It's impossible to balance
+ * should drop pgdat->lru_lock around each page. It's impossible to balance
* this, so instead we remove the pages from the LRU while processing them.
* It is safe to rely on PG_active against the non-LRU pages in here because
* nobody will play with that bit on a non-LRU page.
@@ -2094,19 +2065,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
unsigned nr_deactivate, nr_activate;
unsigned nr_rotated = 0;
- isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
lru_add_drain();
- if (!sc->may_unmap)
- isolate_mode |= ISOLATE_UNMAPPED;
-
spin_lock_irq(&pgdat->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
- &nr_scanned, sc, isolate_mode, lru);
+ &nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
reclaim_stat->recent_scanned[file] += nr_taken;
@@ -2764,16 +2731,15 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
sc->nr_reclaimed - reclaimed);
/*
- * Direct reclaim and kswapd have to scan all memory
- * cgroups to fulfill the overall scan target for the
- * node.
+ * Kswapd have to scan all memory cgroups to fulfill
+ * the overall scan target for the node.
*
* Limit reclaim, on the other hand, only cares about
* nr_to_reclaim pages to be reclaimed and it will
* retry with decreasing priority if one round over the
* whole hierarchy is not sufficient.
*/
- if (!global_reclaim(sc) &&
+ if (!current_is_kswapd() &&
sc->nr_reclaimed >= sc->nr_to_reclaim) {
mem_cgroup_iter_break(root, memcg);
break;
@@ -3537,7 +3503,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
*
* kswapd scans the zones in the highmem->normal->dma direction. It skips
* zones which have free_pages > high_wmark_pages(zone), but once a zone is
- * found to have free_pages <= high_wmark_pages(zone), any page is that zone
+ * found to have free_pages <= high_wmark_pages(zone), any page in that zone
* or lower is eligible for reclaim until at least one usable zone is
* balanced.
*/
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 83b30edc2f7f..36b56f858f0f 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -2121,21 +2121,14 @@ static int __init extfrag_debug_init(void)
struct dentry *extfrag_debug_root;
extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
- if (!extfrag_debug_root)
- return -ENOMEM;
- if (!debugfs_create_file("unusable_index", 0444,
- extfrag_debug_root, NULL, &unusable_file_ops))
- goto fail;
+ debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
+ &unusable_file_ops);
- if (!debugfs_create_file("extfrag_index", 0444,
- extfrag_debug_root, NULL, &extfrag_file_ops))
- goto fail;
+ debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
+ &extfrag_file_ops);
return 0;
-fail:
- debugfs_remove_recursive(extfrag_debug_root);
- return -ENOMEM;
}
module_init(extfrag_debug_init);
diff --git a/mm/workingset.c b/mm/workingset.c
index dcb994f2acc2..0bedf67502d5 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -215,13 +215,12 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
/**
* workingset_eviction - note the eviction of a page from memory
- * @mapping: address space the page was backing
* @page: the page being evicted
*
- * Returns a shadow entry to be stored in @mapping->i_pages in place
+ * Returns a shadow entry to be stored in @page->mapping->i_pages in place
* of the evicted @page so that a later refault can be detected.
*/
-void *workingset_eviction(struct address_space *mapping, struct page *page)
+void *workingset_eviction(struct page *page)
{
struct pglist_data *pgdat = page_pgdat(page);
struct mem_cgroup *memcg = page_memcg(page);
diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
index 6c152f9ea26e..536aae52eead 100644
--- a/net/6lowpan/debugfs.c
+++ b/net/6lowpan/debugfs.c
@@ -41,9 +41,9 @@ static int lowpan_ctx_flag_active_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(lowpan_ctx_flag_active_fops,
- lowpan_ctx_flag_active_get,
- lowpan_ctx_flag_active_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(lowpan_ctx_flag_active_fops,
+ lowpan_ctx_flag_active_get,
+ lowpan_ctx_flag_active_set, "%llu\n");
static int lowpan_ctx_flag_c_set(void *data, u64 val)
{
@@ -66,8 +66,8 @@ static int lowpan_ctx_flag_c_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(lowpan_ctx_flag_c_fops, lowpan_ctx_flag_c_get,
- lowpan_ctx_flag_c_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(lowpan_ctx_flag_c_fops, lowpan_ctx_flag_c_get,
+ lowpan_ctx_flag_c_set, "%llu\n");
static int lowpan_ctx_plen_set(void *data, u64 val)
{
@@ -97,8 +97,8 @@ static int lowpan_ctx_plen_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(lowpan_ctx_plen_fops, lowpan_ctx_plen_get,
- lowpan_ctx_plen_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(lowpan_ctx_plen_fops, lowpan_ctx_plen_get,
+ lowpan_ctx_plen_set, "%llu\n");
static int lowpan_ctx_pfx_show(struct seq_file *file, void *offset)
{
@@ -184,15 +184,15 @@ static int lowpan_dev_debugfs_ctx_init(struct net_device *dev,
if (!root)
return -EINVAL;
- dentry = debugfs_create_file("active", 0644, root,
- &ldev->ctx.table[id],
- &lowpan_ctx_flag_active_fops);
+ dentry = debugfs_create_file_unsafe("active", 0644, root,
+ &ldev->ctx.table[id],
+ &lowpan_ctx_flag_active_fops);
if (!dentry)
return -EINVAL;
- dentry = debugfs_create_file("compression", 0644, root,
- &ldev->ctx.table[id],
- &lowpan_ctx_flag_c_fops);
+ dentry = debugfs_create_file_unsafe("compression", 0644, root,
+ &ldev->ctx.table[id],
+ &lowpan_ctx_flag_c_fops);
if (!dentry)
return -EINVAL;
@@ -202,9 +202,9 @@ static int lowpan_dev_debugfs_ctx_init(struct net_device *dev,
if (!dentry)
return -EINVAL;
- dentry = debugfs_create_file("prefix_len", 0644, root,
- &ldev->ctx.table[id],
- &lowpan_ctx_plen_fops);
+ dentry = debugfs_create_file_unsafe("prefix_len", 0644, root,
+ &ldev->ctx.table[id],
+ &lowpan_ctx_plen_fops);
if (!dentry)
return -EINVAL;
@@ -245,8 +245,8 @@ static int lowpan_short_addr_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(lowpan_short_addr_fops, lowpan_short_addr_get,
- NULL, "0x%04llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(lowpan_short_addr_fops, lowpan_short_addr_get, NULL,
+ "0x%04llx\n");
static int lowpan_dev_debugfs_802154_init(const struct net_device *dev,
struct lowpan_dev *ldev)
@@ -260,9 +260,9 @@ static int lowpan_dev_debugfs_802154_init(const struct net_device *dev,
if (!root)
return -EINVAL;
- dentry = debugfs_create_file("short_addr", 0444, root,
- lowpan_802154_dev(dev)->wdev->ieee802154_ptr,
- &lowpan_short_addr_fops);
+ dentry = debugfs_create_file_unsafe("short_addr", 0444, root,
+ lowpan_802154_dev(dev)->wdev->ieee802154_ptr,
+ &lowpan_short_addr_fops);
if (!dentry)
return -EINVAL;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b2d9c8f27cd7..15293c2a5dd8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -31,7 +31,6 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <net/arp.h>
-#include <net/switchdev.h>
#include "vlan.h"
#include "vlanproc.h"
diff --git a/net/Kconfig b/net/Kconfig
index 5cb9de1aaf88..1efe1f9ee492 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -403,7 +403,7 @@ config LWTUNNEL
config LWTUNNEL_BPF
bool "Execute BPF program as route nexthop action"
- depends on LWTUNNEL
+ depends on LWTUNNEL && INET
default y if LWTUNNEL=y
---help---
Allows to run BPF programs as a nexthop action following a route
@@ -429,21 +429,12 @@ config NET_SOCK_MSG
with the help of BPF programs.
config NET_DEVLINK
- tristate "Network physical/parent device Netlink interface"
+ bool "Network physical/parent device Netlink interface"
help
Network physical/parent device Netlink interface provides
infrastructure to support access to physical chip-wide config and
monitoring.
-config MAY_USE_DEVLINK
- tristate
- default m if NET_DEVLINK=m
- default y if NET_DEVLINK=y || NET_DEVLINK=n
- help
- Drivers using the devlink infrastructure should have a dependency
- on MAY_USE_DEVLINK to ensure they do not cause link errors when
- devlink is a loadable module and the driver using it is built-in.
-
config PAGE_POOL
bool
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 8006295f8bd7..77f203f1febc 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -210,56 +210,34 @@ static const struct seq_operations atalk_seq_socket_ops = {
.show = atalk_seq_socket_show,
};
-static struct proc_dir_entry *atalk_proc_dir;
-
int __init atalk_proc_init(void)
{
- struct proc_dir_entry *p;
- int rc = -ENOMEM;
+ if (!proc_mkdir("atalk", init_net.proc_net))
+ return -ENOMEM;
- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
- if (!atalk_proc_dir)
+ if (!proc_create_seq("atalk/interface", 0444, init_net.proc_net,
+ &atalk_seq_interface_ops))
goto out;
- p = proc_create_seq("interface", 0444, atalk_proc_dir,
- &atalk_seq_interface_ops);
- if (!p)
- goto out_interface;
-
- p = proc_create_seq("route", 0444, atalk_proc_dir,
- &atalk_seq_route_ops);
- if (!p)
- goto out_route;
+ if (!proc_create_seq("atalk/route", 0444, init_net.proc_net,
+ &atalk_seq_route_ops))
+ goto out;
- p = proc_create_seq("socket", 0444, atalk_proc_dir,
- &atalk_seq_socket_ops);
- if (!p)
- goto out_socket;
+ if (!proc_create_seq("atalk/socket", 0444, init_net.proc_net,
+ &atalk_seq_socket_ops))
+ goto out;
- p = proc_create_seq_private("arp", 0444, atalk_proc_dir, &aarp_seq_ops,
- sizeof(struct aarp_iter_state), NULL);
- if (!p)
- goto out_arp;
+ if (!proc_create_seq_private("atalk/arp", 0444, init_net.proc_net,
+ &aarp_seq_ops,
+ sizeof(struct aarp_iter_state), NULL))
+ goto out;
- rc = 0;
out:
- return rc;
-out_arp:
- remove_proc_entry("socket", atalk_proc_dir);
-out_socket:
- remove_proc_entry("route", atalk_proc_dir);
-out_route:
- remove_proc_entry("interface", atalk_proc_dir);
-out_interface:
- remove_proc_entry("atalk", init_net.proc_net);
- goto out;
+ remove_proc_subtree("atalk", init_net.proc_net);
+ return -ENOMEM;
}
-void __exit atalk_proc_exit(void)
+void atalk_proc_exit(void)
{
- remove_proc_entry("interface", atalk_proc_dir);
- remove_proc_entry("route", atalk_proc_dir);
- remove_proc_entry("socket", atalk_proc_dir);
- remove_proc_entry("arp", atalk_proc_dir);
- remove_proc_entry("atalk", init_net.proc_net);
+ remove_proc_subtree("atalk", init_net.proc_net);
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 9b6bc5abe946..795fbc6c06aa 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst =
/* Called by proto.c on kernel start up */
static int __init atalk_init(void)
{
- int rc = proto_register(&ddp_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&ddp_proto, 0);
+ if (rc)
goto out;
- (void)sock_register(&atalk_family_ops);
+ rc = sock_register(&atalk_family_ops);
+ if (rc)
+ goto out_proto;
+
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
if (!ddp_dl)
printk(atalk_err_snap);
@@ -1923,12 +1927,33 @@ static int __init atalk_init(void)
dev_add_pack(&ltalk_packet_type);
dev_add_pack(&ppptalk_packet_type);
- register_netdevice_notifier(&ddp_notifier);
+ rc = register_netdevice_notifier(&ddp_notifier);
+ if (rc)
+ goto out_sock;
+
aarp_proto_init();
- atalk_proc_init();
- atalk_register_sysctl();
+ rc = atalk_proc_init();
+ if (rc)
+ goto out_aarp;
+
+ rc = atalk_register_sysctl();
+ if (rc)
+ goto out_proc;
out:
return rc;
+out_proc:
+ atalk_proc_exit();
+out_aarp:
+ aarp_cleanup_module();
+ unregister_netdevice_notifier(&ddp_notifier);
+out_sock:
+ dev_remove_pack(&ppptalk_packet_type);
+ dev_remove_pack(&ltalk_packet_type);
+ unregister_snap_client(ddp_dl);
+ sock_unregister(PF_APPLETALK);
+out_proto:
+ proto_unregister(&ddp_proto);
+ goto out;
}
module_init(atalk_init);
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index c744a853fa5f..d945b7c0176d 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
static struct ctl_table_header *atalk_table_header;
-void atalk_register_sysctl(void)
+int __init atalk_register_sysctl(void)
{
atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
+ if (!atalk_table_header)
+ return -ENOMEM;
+ return 0;
}
void atalk_unregister_sysctl(void)
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 0b0495a41bbe..d79221fd4dae 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -134,7 +134,8 @@ static void vcc_seq_stop(struct seq_file *seq, void *v)
static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = vcc_walk(seq, 1);
- *pos += !!PTR_ERR(v);
+ if (v)
+ (*pos)++;
return v;
}
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 70417e9b932d..314bbc8010fb 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
dst = (ax25_address *)(bp + 1);
src = (ax25_address *)(bp + 8);
+ ax25_route_lock_use();
route = ax25_get_route(dst, NULL);
if (route) {
digipeat = route->digipeat;
@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
ax25_queue_xmit(skb, dev);
put:
- if (route)
- ax25_put_route(route);
+ ax25_route_lock_unuse();
return NETDEV_TX_OK;
}
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a0eff323af12..66f74c85cf6b 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -40,7 +40,7 @@
#include <linux/export.h>
static ax25_route *ax25_route_list;
-static DEFINE_RWLOCK(ax25_route_lock);
+DEFINE_RWLOCK(ax25_route_lock);
void ax25_rt_device_down(struct net_device *dev)
{
@@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
* Find AX.25 route
*
* Only routes with a reference count of zero can be destroyed.
+ * Must be called with ax25_route_lock read locked.
*/
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
{
@@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
ax25_route *ax25_def_rt = NULL;
ax25_route *ax25_rt;
- read_lock(&ax25_route_lock);
/*
* Bind to the physical interface we heard them on, or the default
* route if none is found;
@@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
if (ax25_spe_rt != NULL)
ax25_rt = ax25_spe_rt;
- if (ax25_rt != NULL)
- ax25_hold_route(ax25_rt);
-
- read_unlock(&ax25_route_lock);
-
return ax25_rt;
}
@@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
ax25_route *ax25_rt;
int err = 0;
- if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
+ ax25_route_lock_use();
+ ax25_rt = ax25_get_route(addr, NULL);
+ if (!ax25_rt) {
+ ax25_route_lock_unuse();
return -EHOSTUNREACH;
-
+ }
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
err = -EHOSTUNREACH;
goto put;
@@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
}
put:
- ax25_put_route(ax25_rt);
-
+ ax25_route_lock_unuse();
return err;
}
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index c386e6981416..a31db5e9ac8e 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 9b58160fe485..a887ecc3efa1 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index ea309ad06175..7b7e15641fef 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 534b790c3753..25e7bb51928c 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Linus Lüssing
*
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index f97e566f0402..de61091af666 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
index 3dc6a7a43eb7..785f6666273c 100644
--- a/net/batman-adv/bat_iv_ogm.h
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 90e33f84d37a..445594ed58af 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2019 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*
diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h
index ec4a2a569750..465a4fc23354 100644
--- a/net/batman-adv/bat_v.h
+++ b/net/batman-adv/bat_v.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Linus Lüssing
*
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index e8090f099eb8..a9b7919c9de5 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
+ /* free the TID stats immediately */
+ cfg80211_sinfo_release_content(&sinfo);
+
dev_put(real_netdev);
if (ret == -ENOENT) {
/* Node is not associated anymore! It would be
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
index e8c7b7fd290d..75f189ee4a1c 100644
--- a/net/batman-adv/bat_v_elp.h
+++ b/net/batman-adv/bat_v_elp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2019 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 2948b41b06d4..c9698ad41854 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2019 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
index e5be14c908c6..f67cf7ee06b2 100644
--- a/net/batman-adv/bat_v_ogm.h
+++ b/net/batman-adv/bat_v_ogm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2019 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index a296a4d851f5..63e134e763e3 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2006-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2019 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 48f683289531..f3a05ad9afad 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2006-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2019 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5fdde2947802..ef39aabdb694 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 71f95a3e4d3f..31771c751efb 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index d4a7702e48d8..3b9d1ad2f467 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 8de018e5c577..c0b8694041ec 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index b9ffe1826527..310a4f353008 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
@@ -19,6 +19,7 @@
#include "distributed-arp-table.h"
#include "main.h"
+#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
@@ -29,6 +30,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -42,6 +44,7 @@
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/udp.h>
#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/genetlink.h>
@@ -60,6 +63,49 @@
#include "translation-table.h"
#include "tvlv.h"
+enum batadv_bootpop {
+ BATADV_BOOTREPLY = 2,
+};
+
+enum batadv_boothtype {
+ BATADV_HTYPE_ETHERNET = 1,
+};
+
+enum batadv_dhcpoptioncode {
+ BATADV_DHCP_OPT_PAD = 0,
+ BATADV_DHCP_OPT_MSG_TYPE = 53,
+ BATADV_DHCP_OPT_END = 255,
+};
+
+enum batadv_dhcptype {
+ BATADV_DHCPACK = 5,
+};
+
+/* { 99, 130, 83, 99 } */
+#define BATADV_DHCP_MAGIC 1669485411
+
+struct batadv_dhcp_packet {
+ __u8 op;
+ __u8 htype;
+ __u8 hlen;
+ __u8 hops;
+ __be32 xid;
+ __be16 secs;
+ __be16 flags;
+ __be32 ciaddr;
+ __be32 yiaddr;
+ __be32 siaddr;
+ __be32 giaddr;
+ __u8 chaddr[16];
+ __u8 sname[64];
+ __u8 file[128];
+ __be32 magic;
+ __u8 options[0];
+};
+
+#define BATADV_DHCP_YIADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->yiaddr)
+#define BATADV_DHCP_CHADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->chaddr)
+
static void batadv_dat_purge(struct work_struct *work);
/**
@@ -1440,6 +1486,361 @@ out:
}
/**
+ * batadv_dat_check_dhcp_ipudp() - check skb for IP+UDP headers valid for DHCP
+ * @skb: the packet to check
+ * @ip_src: a buffer to store the IPv4 source address in
+ *
+ * Checks whether the given skb has an IP and UDP header valid for a DHCP
+ * message from a DHCP server. And if so, stores the IPv4 source address in
+ * the provided buffer.
+ *
+ * Return: True if valid, false otherwise.
+ */
+static bool
+batadv_dat_check_dhcp_ipudp(struct sk_buff *skb, __be32 *ip_src)
+{
+ unsigned int offset = skb_network_offset(skb);
+ struct udphdr *udphdr, _udphdr;
+ struct iphdr *iphdr, _iphdr;
+
+ iphdr = skb_header_pointer(skb, offset, sizeof(_iphdr), &_iphdr);
+ if (!iphdr || iphdr->version != 4 || iphdr->ihl * 4 < sizeof(_iphdr))
+ return false;
+
+ if (iphdr->protocol != IPPROTO_UDP)
+ return false;
+
+ offset += iphdr->ihl * 4;
+ skb_set_transport_header(skb, offset);
+
+ udphdr = skb_header_pointer(skb, offset, sizeof(_udphdr), &_udphdr);
+ if (!udphdr || udphdr->source != htons(67))
+ return false;
+
+ *ip_src = get_unaligned(&iphdr->saddr);
+
+ return true;
+}
+
+/**
+ * batadv_dat_check_dhcp() - examine packet for valid DHCP message
+ * @skb: the packet to check
+ * @proto: ethernet protocol hint (behind a potential vlan)
+ * @ip_src: a buffer to store the IPv4 source address in
+ *
+ * Checks whether the given skb is a valid DHCP packet. And if so, stores the
+ * IPv4 source address in the provided buffer.
+ *
+ * Caller needs to ensure that the skb network header is set correctly.
+ *
+ * Return: If skb is a valid DHCP packet, then returns its op code
+ * (e.g. BOOTREPLY vs. BOOTREQUEST). Otherwise returns -EINVAL.
+ */
+static int
+batadv_dat_check_dhcp(struct sk_buff *skb, __be16 proto, __be32 *ip_src)
+{
+ __be32 *magic, _magic;
+ unsigned int offset;
+ struct {
+ __u8 op;
+ __u8 htype;
+ __u8 hlen;
+ __u8 hops;
+ } *dhcp_h, _dhcp_h;
+
+ if (proto != htons(ETH_P_IP))
+ return -EINVAL;
+
+ if (!batadv_dat_check_dhcp_ipudp(skb, ip_src))
+ return -EINVAL;
+
+ offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ if (skb->len < offset + sizeof(struct batadv_dhcp_packet))
+ return -EINVAL;
+
+ dhcp_h = skb_header_pointer(skb, offset, sizeof(_dhcp_h), &_dhcp_h);
+ if (!dhcp_h || dhcp_h->htype != BATADV_HTYPE_ETHERNET ||
+ dhcp_h->hlen != ETH_ALEN)
+ return -EINVAL;
+
+ offset += offsetof(struct batadv_dhcp_packet, magic);
+
+ magic = skb_header_pointer(skb, offset, sizeof(_magic), &_magic);
+ if (!magic || get_unaligned(magic) != htonl(BATADV_DHCP_MAGIC))
+ return -EINVAL;
+
+ return dhcp_h->op;
+}
+
+/**
+ * batadv_dat_get_dhcp_message_type() - get message type of a DHCP packet
+ * @skb: the DHCP packet to parse
+ *
+ * Iterates over the DHCP options of the given DHCP packet to find a
+ * DHCP Message Type option and parse it.
+ *
+ * Caller needs to ensure that the given skb is a valid DHCP packet and
+ * that the skb transport header is set correctly.
+ *
+ * Return: The found DHCP message type value, if found. -EINVAL otherwise.
+ */
+static int batadv_dat_get_dhcp_message_type(struct sk_buff *skb)
+{
+ unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ u8 *type, _type;
+ struct {
+ u8 type;
+ u8 len;
+ } *tl, _tl;
+
+ offset += sizeof(struct batadv_dhcp_packet);
+
+ while ((tl = skb_header_pointer(skb, offset, sizeof(_tl), &_tl))) {
+ if (tl->type == BATADV_DHCP_OPT_MSG_TYPE)
+ break;
+
+ if (tl->type == BATADV_DHCP_OPT_END)
+ break;
+
+ if (tl->type == BATADV_DHCP_OPT_PAD)
+ offset++;
+ else
+ offset += tl->len + sizeof(_tl);
+ }
+
+ /* Option Overload Code not supported */
+ if (!tl || tl->type != BATADV_DHCP_OPT_MSG_TYPE ||
+ tl->len != sizeof(_type))
+ return -EINVAL;
+
+ offset += sizeof(_tl);
+
+ type = skb_header_pointer(skb, offset, sizeof(_type), &_type);
+ if (!type)
+ return -EINVAL;
+
+ return *type;
+}
+
+/**
+ * batadv_dat_get_dhcp_yiaddr() - get yiaddr from a DHCP packet
+ * @skb: the DHCP packet to parse
+ * @buf: a buffer to store the yiaddr in
+ *
+ * Caller needs to ensure that the given skb is a valid DHCP packet and
+ * that the skb transport header is set correctly.
+ *
+ * Return: True on success, false otherwise.
+ */
+static bool batadv_dat_dhcp_get_yiaddr(struct sk_buff *skb, __be32 *buf)
+{
+ unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ __be32 *yiaddr;
+
+ offset += offsetof(struct batadv_dhcp_packet, yiaddr);
+ yiaddr = skb_header_pointer(skb, offset, BATADV_DHCP_YIADDR_LEN, buf);
+
+ if (!yiaddr)
+ return false;
+
+ if (yiaddr != buf)
+ *buf = get_unaligned(yiaddr);
+
+ return true;
+}
+
+/**
+ * batadv_dat_get_dhcp_chaddr() - get chaddr from a DHCP packet
+ * @skb: the DHCP packet to parse
+ * @buf: a buffer to store the chaddr in
+ *
+ * Caller needs to ensure that the given skb is a valid DHCP packet and
+ * that the skb transport header is set correctly.
+ *
+ * Return: True on success, false otherwise
+ */
+static bool batadv_dat_get_dhcp_chaddr(struct sk_buff *skb, u8 *buf)
+{
+ unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ u8 *chaddr;
+
+ offset += offsetof(struct batadv_dhcp_packet, chaddr);
+ chaddr = skb_header_pointer(skb, offset, BATADV_DHCP_CHADDR_LEN, buf);
+
+ if (!chaddr)
+ return false;
+
+ if (chaddr != buf)
+ memcpy(buf, chaddr, BATADV_DHCP_CHADDR_LEN);
+
+ return true;
+}
+
+/**
+ * batadv_dat_put_dhcp() - puts addresses from a DHCP packet into the DHT and
+ * DAT cache
+ * @bat_priv: the bat priv with all the soft interface information
+ * @chaddr: the DHCP client MAC address
+ * @yiaddr: the DHCP client IP address
+ * @hw_dst: the DHCP server MAC address
+ * @ip_dst: the DHCP server IP address
+ * @vid: VLAN identifier
+ *
+ * Adds given MAC/IP pairs to the local DAT cache and propagates them further
+ * into the DHT.
+ *
+ * For the DHT propagation, client MAC + IP will appear as the ARP Reply
+ * transmitter (and hw_dst/ip_dst as the target).
+ */
+static void batadv_dat_put_dhcp(struct batadv_priv *bat_priv, u8 *chaddr,
+ __be32 yiaddr, u8 *hw_dst, __be32 ip_dst,
+ unsigned short vid)
+{
+ struct sk_buff *skb;
+
+ skb = batadv_dat_arp_create_reply(bat_priv, yiaddr, ip_dst, chaddr,
+ hw_dst, vid);
+ if (!skb)
+ return;
+
+ skb_set_network_header(skb, ETH_HLEN);
+
+ batadv_dat_entry_add(bat_priv, yiaddr, chaddr, vid);
+ batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
+
+ batadv_dat_send_data(bat_priv, skb, yiaddr, vid, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
+
+ consume_skb(skb);
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from outgoing DHCPACK (server address): %pI4, %pM (vid: %i)\n",
+ &ip_dst, hw_dst, batadv_print_vid(vid));
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from outgoing DHCPACK (client address): %pI4, %pM (vid: %i)\n",
+ &yiaddr, chaddr, batadv_print_vid(vid));
+}
+
+/**
+ * batadv_dat_check_dhcp_ack() - examine packet for valid DHCP message
+ * @skb: the packet to check
+ * @proto: ethernet protocol hint (behind a potential vlan)
+ * @ip_src: a buffer to store the IPv4 source address in
+ * @chaddr: a buffer to store the DHCP Client Hardware Address in
+ * @yiaddr: a buffer to store the DHCP Your IP Address in
+ *
+ * Checks whether the given skb is a valid DHCPACK. And if so, stores the
+ * IPv4 server source address (ip_src), client MAC address (chaddr) and client
+ * IPv4 address (yiaddr) in the provided buffers.
+ *
+ * Caller needs to ensure that the skb network header is set correctly.
+ *
+ * Return: True if the skb is a valid DHCPACK. False otherwise.
+ */
+static bool
+batadv_dat_check_dhcp_ack(struct sk_buff *skb, __be16 proto, __be32 *ip_src,
+ u8 *chaddr, __be32 *yiaddr)
+{
+ int type;
+
+ type = batadv_dat_check_dhcp(skb, proto, ip_src);
+ if (type != BATADV_BOOTREPLY)
+ return false;
+
+ type = batadv_dat_get_dhcp_message_type(skb);
+ if (type != BATADV_DHCPACK)
+ return false;
+
+ if (!batadv_dat_dhcp_get_yiaddr(skb, yiaddr))
+ return false;
+
+ if (!batadv_dat_get_dhcp_chaddr(skb, chaddr))
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_dat_snoop_outgoing_dhcp_ack() - snoop DHCPACK and fill DAT with it
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to snoop
+ * @proto: ethernet protocol hint (behind a potential vlan)
+ * @vid: VLAN identifier
+ *
+ * This function first checks whether the given skb is a valid DHCPACK. If
+ * so then its source MAC and IP as well as its DHCP Client Hardware Address
+ * field and DHCP Your IP Address field are added to the local DAT cache and
+ * propagated into the DHT.
+ *
+ * Caller needs to ensure that the skb mac and network headers are set
+ * correctly.
+ */
+void batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ __be16 proto,
+ unsigned short vid)
+{
+ u8 chaddr[BATADV_DHCP_CHADDR_LEN];
+ __be32 ip_src, yiaddr;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ return;
+
+ if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
+ return;
+
+ batadv_dat_put_dhcp(bat_priv, chaddr, yiaddr, eth_hdr(skb)->h_source,
+ ip_src, vid);
+}
+
+/**
+ * batadv_dat_snoop_incoming_dhcp_ack() - snoop DHCPACK and fill DAT cache
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to snoop
+ * @hdr_size: header size, up to the tail of the batman-adv header
+ *
+ * This function first checks whether the given skb is a valid DHCPACK. If
+ * so then its source MAC and IP as well as its DHCP Client Hardware Address
+ * field and DHCP Your IP Address field are added to the local DAT cache.
+ */
+void batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ u8 chaddr[BATADV_DHCP_CHADDR_LEN];
+ struct ethhdr *ethhdr;
+ __be32 ip_src, yiaddr;
+ unsigned short vid;
+ __be16 proto;
+ u8 *hw_src;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ return;
+
+ if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
+ return;
+
+ ethhdr = (struct ethhdr *)(skb->data + hdr_size);
+ skb_set_network_header(skb, hdr_size + ETH_HLEN);
+ proto = ethhdr->h_proto;
+
+ if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
+ return;
+
+ hw_src = ethhdr->h_source;
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ batadv_dat_entry_add(bat_priv, yiaddr, chaddr, vid);
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from incoming DHCPACK (server address): %pI4, %pM (vid: %i)\n",
+ &ip_src, hw_src, batadv_print_vid(vid));
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from incoming DHCPACK (client address): %pI4, %pM (vid: %i)\n",
+ &yiaddr, chaddr, batadv_print_vid(vid));
+}
+
+/**
* batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
* dropped (because the node has already obtained the reply via DAT) or not
* @bat_priv: the bat priv with all the soft interface information
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index a04596028337..68c0ff321acd 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2011-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
@@ -46,6 +46,12 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb);
bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size);
+void batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ __be16 proto,
+ unsigned short vid);
+void batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size);
bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet);
@@ -140,6 +146,19 @@ batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
return false;
}
+static inline void
+batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, __be16 proto,
+ unsigned short vid)
+{
+}
+
+static inline void
+batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+}
+
static inline bool
batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 5b71a289d04f..b506d15b8230 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2019 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index 944512e07782..abdac26579bf 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2013-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2019 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 9d8e5eda2314..f5811f61aa92 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2009-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -47,7 +47,6 @@
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
-#include "gateway_common.h"
#include "hard-interface.h"
#include "log.h"
#include "netlink.h"
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index f0b86fcb2493..b5732c8be81a 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2009-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 936c107f3199..e064de45e22c 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2009-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -28,6 +28,7 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
#include "gateway_client.h"
#include "log.h"
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 80afb2793687..128467a0fb89 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2009-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -25,12 +25,6 @@
struct net_device;
-enum batadv_gw_modes {
- BATADV_GW_MODE_OFF,
- BATADV_GW_MODE_CLIENT,
- BATADV_GW_MODE_SERVER,
-};
-
/**
* enum batadv_bandwidth_units - bandwidth unit types
*/
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 508f4416dfc9..96ef7c70b4d9 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -20,7 +20,6 @@
#include "main.h"
#include <linux/atomic.h>
-#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/gfp.h>
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
parent_dev = __dev_get_by_index((struct net *)parent_net,
dev_get_iflink(net_dev));
/* if we got a NULL parent_dev there is something broken.. */
- if (WARN(!parent_dev, "Cannot find parent device"))
+ if (!parent_dev) {
+ pr_err("Cannot find parent device\n");
return false;
+ }
if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
return false;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index d1c0f6189301..48de28c83401 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 9194f4d891b1..56a08ce193d5 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2006-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2019 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 0e36fa1c7c3e..37507b6d4006 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2006-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2019 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 6d5859714f52..9859ababb82e 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 958be22beda9..5f8926522ff0 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 75f602e1ce94..3e610df8debf 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index 35f4f397ed57..660e9bcc85a2 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index d1ed839fd32b..75750870cf04 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index b572066325e4..3ed669d7dc6b 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -25,7 +25,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2019.0"
+#define BATADV_SOURCE_VERSION "2019.1"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 69244e4598f5..f91b1b6265cf 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2014-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2019 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
*
@@ -674,7 +674,7 @@ static void batadv_mcast_mla_update(struct work_struct *work)
*/
static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
{
- if (ip_mc_check_igmp(skb, NULL) < 0)
+ if (ip_mc_check_igmp(skb) < 0)
return false;
switch (igmp_hdr(skb)->type) {
@@ -741,7 +741,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
*/
static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
{
- if (ipv6_mc_check_mld(skb, NULL) < 0)
+ if (ipv6_mc_check_mld(skb) < 0)
return false;
switch (icmp6_hdr(skb)->icmp6_type) {
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 3b04ab13f0eb..466013fe88af 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2014-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2019 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
*
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 2dc3304cee54..67a58da2e6a0 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2016-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2019 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
@@ -20,13 +20,17 @@
#include "main.h"
#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -47,21 +51,54 @@
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
+#include "gateway_common.h"
#include "hard-interface.h"
+#include "log.h"
#include "multicast.h"
+#include "network-coding.h"
#include "originator.h"
#include "soft-interface.h"
#include "tp_meter.h"
#include "translation-table.h"
+struct net;
+
struct genl_family batadv_netlink_family;
/* multicast groups */
enum batadv_netlink_multicast_groups {
+ BATADV_NL_MCGRP_CONFIG,
BATADV_NL_MCGRP_TPMETER,
};
+/**
+ * enum batadv_genl_ops_flags - flags for genl_ops's internal_flags
+ */
+enum batadv_genl_ops_flags {
+ /**
+ * @BATADV_FLAG_NEED_MESH: request requires valid soft interface in
+ * attribute BATADV_ATTR_MESH_IFINDEX and expects a pointer to it to be
+ * saved in info->user_ptr[0]
+ */
+ BATADV_FLAG_NEED_MESH = BIT(0),
+
+ /**
+ * @BATADV_FLAG_NEED_HARDIF: request requires valid hard interface in
+ * attribute BATADV_ATTR_HARD_IFINDEX and expects a pointer to it to be
+ * saved in info->user_ptr[1]
+ */
+ BATADV_FLAG_NEED_HARDIF = BIT(1),
+
+ /**
+ * @BATADV_FLAG_NEED_VLAN: request requires valid vlan in
+ * attribute BATADV_ATTR_VLANID and expects a pointer to it to be
+ * saved in info->user_ptr[1]
+ */
+ BATADV_FLAG_NEED_VLAN = BIT(2),
+};
+
static const struct genl_multicast_group batadv_netlink_mcgrps[] = {
+ [BATADV_NL_MCGRP_CONFIG] = { .name = BATADV_NL_MCAST_GROUP_CONFIG },
[BATADV_NL_MCGRP_TPMETER] = { .name = BATADV_NL_MCAST_GROUP_TPMETER },
};
@@ -104,6 +141,26 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
[BATADV_ATTR_DAT_CACHE_VID] = { .type = NLA_U16 },
[BATADV_ATTR_MCAST_FLAGS] = { .type = NLA_U32 },
[BATADV_ATTR_MCAST_FLAGS_PRIV] = { .type = NLA_U32 },
+ [BATADV_ATTR_VLANID] = { .type = NLA_U16 },
+ [BATADV_ATTR_AGGREGATED_OGMS_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_AP_ISOLATION_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_ISOLATION_MARK] = { .type = NLA_U32 },
+ [BATADV_ATTR_ISOLATION_MASK] = { .type = NLA_U32 },
+ [BATADV_ATTR_BONDING_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_FRAGMENTATION_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_GW_BANDWIDTH_DOWN] = { .type = NLA_U32 },
+ [BATADV_ATTR_GW_BANDWIDTH_UP] = { .type = NLA_U32 },
+ [BATADV_ATTR_GW_MODE] = { .type = NLA_U8 },
+ [BATADV_ATTR_GW_SEL_CLASS] = { .type = NLA_U32 },
+ [BATADV_ATTR_HOP_PENALTY] = { .type = NLA_U8 },
+ [BATADV_ATTR_LOG_LEVEL] = { .type = NLA_U32 },
+ [BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_NETWORK_CODING_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_ORIG_INTERVAL] = { .type = NLA_U32 },
+ [BATADV_ATTR_ELP_INTERVAL] = { .type = NLA_U32 },
+ [BATADV_ATTR_THROUGHPUT_OVERRIDE] = { .type = NLA_U32 },
};
/**
@@ -122,20 +179,75 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
}
/**
- * batadv_netlink_mesh_info_put() - fill in generic information about mesh
- * interface
- * @msg: netlink message to be sent back
- * @soft_iface: interface for which the data should be taken
+ * batadv_netlink_mesh_fill_ap_isolation() - Add ap_isolation softif attribute
+ * @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
*
- * Return: 0 on success, < 0 on error
+ * Return: 0 on success or negative error number in case of failure
*/
-static int
-batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
+static int batadv_netlink_mesh_fill_ap_isolation(struct sk_buff *msg,
+ struct batadv_priv *bat_priv)
+{
+ struct batadv_softif_vlan *vlan;
+ u8 ap_isolation;
+
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (!vlan)
+ return 0;
+
+ ap_isolation = atomic_read(&vlan->ap_isolation);
+ batadv_softif_vlan_put(vlan);
+
+ return nla_put_u8(msg, BATADV_ATTR_AP_ISOLATION_ENABLED,
+ !!ap_isolation);
+}
+
+/**
+ * batadv_option_set_ap_isolation() - Set ap_isolation from genl msg
+ * @attr: parsed BATADV_ATTR_AP_ISOLATION_ENABLED attribute
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_mesh_ap_isolation(struct nlattr *attr,
+ struct batadv_priv *bat_priv)
+{
+ struct batadv_softif_vlan *vlan;
+
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (!vlan)
+ return -ENOENT;
+
+ atomic_set(&vlan->ap_isolation, !!nla_get_u8(attr));
+ batadv_softif_vlan_put(vlan);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_mesh_fill() - Fill message with mesh attributes
+ * @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cmd: type of message to generate
+ * @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additional flags for message
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_mesh_fill(struct sk_buff *msg,
+ struct batadv_priv *bat_priv,
+ enum batadv_nl_commands cmd,
+ u32 portid, u32 seq, int flags)
{
- struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct net_device *soft_iface = bat_priv->soft_iface;
struct batadv_hard_iface *primary_if = NULL;
struct net_device *hard_iface;
- int ret = -ENOBUFS;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
+ if (!hdr)
+ return -ENOBUFS;
if (nla_put_string(msg, BATADV_ATTR_VERSION, BATADV_SOURCE_VERSION) ||
nla_put_string(msg, BATADV_ATTR_ALGO_NAME,
@@ -146,16 +258,16 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
soft_iface->dev_addr) ||
nla_put_u8(msg, BATADV_ATTR_TT_TTVN,
(u8)atomic_read(&bat_priv->tt.vn)))
- goto out;
+ goto nla_put_failure;
#ifdef CONFIG_BATMAN_ADV_BLA
if (nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
ntohs(bat_priv->bla.claim_dest.group)))
- goto out;
+ goto nla_put_failure;
#endif
if (batadv_mcast_mesh_info_put(msg, bat_priv))
- goto out;
+ goto nla_put_failure;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if && primary_if->if_status == BATADV_IF_ACTIVE) {
@@ -167,77 +279,345 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
hard_iface->name) ||
nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN,
hard_iface->dev_addr))
- goto out;
+ goto nla_put_failure;
}
- ret = 0;
+ if (nla_put_u8(msg, BATADV_ATTR_AGGREGATED_OGMS_ENABLED,
+ !!atomic_read(&bat_priv->aggregated_ogms)))
+ goto nla_put_failure;
+
+ if (batadv_netlink_mesh_fill_ap_isolation(msg, bat_priv))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_ISOLATION_MARK,
+ bat_priv->isolation_mark))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_ISOLATION_MASK,
+ bat_priv->isolation_mark_mask))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, BATADV_ATTR_BONDING_ENABLED,
+ !!atomic_read(&bat_priv->bonding)))
+ goto nla_put_failure;
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ if (nla_put_u8(msg, BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED,
+ !!atomic_read(&bat_priv->bridge_loop_avoidance)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_BLA */
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ if (nla_put_u8(msg, BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED,
+ !!atomic_read(&bat_priv->distributed_arp_table)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+ if (nla_put_u8(msg, BATADV_ATTR_FRAGMENTATION_ENABLED,
+ !!atomic_read(&bat_priv->fragmentation)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_GW_BANDWIDTH_DOWN,
+ atomic_read(&bat_priv->gw.bandwidth_down)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_GW_BANDWIDTH_UP,
+ atomic_read(&bat_priv->gw.bandwidth_up)))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, BATADV_ATTR_GW_MODE,
+ atomic_read(&bat_priv->gw.mode)))
+ goto nla_put_failure;
+
+ if (bat_priv->algo_ops->gw.get_best_gw_node &&
+ bat_priv->algo_ops->gw.is_eligible) {
+ /* GW selection class is not available if the routing algorithm
+ * in use does not implement the GW API
+ */
+ if (nla_put_u32(msg, BATADV_ATTR_GW_SEL_CLASS,
+ atomic_read(&bat_priv->gw.sel_class)))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY,
+ atomic_read(&bat_priv->hop_penalty)))
+ goto nla_put_failure;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ if (nla_put_u32(msg, BATADV_ATTR_LOG_LEVEL,
+ atomic_read(&bat_priv->log_level)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_DEBUG */
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ if (nla_put_u8(msg, BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED,
+ !atomic_read(&bat_priv->multicast_mode)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_MCAST */
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ if (nla_put_u8(msg, BATADV_ATTR_NETWORK_CODING_ENABLED,
+ !!atomic_read(&bat_priv->network_coding)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_NC */
+
+ if (nla_put_u32(msg, BATADV_ATTR_ORIG_INTERVAL,
+ atomic_read(&bat_priv->orig_interval)))
+ goto nla_put_failure;
- out:
if (primary_if)
batadv_hardif_put(primary_if);
- return ret;
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ if (primary_if)
+ batadv_hardif_put(primary_if);
+
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
}
/**
- * batadv_netlink_get_mesh_info() - handle incoming BATADV_CMD_GET_MESH_INFO
- * netlink request
- * @skb: received netlink message
- * @info: receiver information
+ * batadv_netlink_notify_mesh() - send softif attributes to listener
+ * @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success, < 0 on error
*/
-static int
-batadv_netlink_get_mesh_info(struct sk_buff *skb, struct genl_info *info)
+int batadv_netlink_notify_mesh(struct batadv_priv *bat_priv)
{
- struct net *net = genl_info_net(info);
- struct net_device *soft_iface;
- struct sk_buff *msg = NULL;
- void *msg_head;
- int ifindex;
+ struct sk_buff *msg;
int ret;
- if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
- return -EINVAL;
-
- ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
- if (!ifindex)
- return -EINVAL;
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
+ ret = batadv_netlink_mesh_fill(msg, bat_priv, BATADV_CMD_SET_MESH,
+ 0, 0, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
}
+ genlmsg_multicast_netns(&batadv_netlink_family,
+ dev_net(bat_priv->soft_iface), msg, 0,
+ BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_get_mesh() - Get softif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_get_mesh(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct sk_buff *msg;
+ int ret;
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg) {
- ret = -ENOMEM;
- goto out;
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_mesh_fill(msg, bat_priv, BATADV_CMD_GET_MESH,
+ info->snd_portid, info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
}
- msg_head = genlmsg_put(msg, info->snd_portid, info->snd_seq,
- &batadv_netlink_family, 0,
- BATADV_CMD_GET_MESH_INFO);
- if (!msg_head) {
- ret = -ENOBUFS;
- goto out;
+ ret = genlmsg_reply(msg, info);
+
+ return ret;
+}
+
+/**
+ * batadv_netlink_set_mesh() - Set softif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct nlattr *attr;
+
+ if (info->attrs[BATADV_ATTR_AGGREGATED_OGMS_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_AGGREGATED_OGMS_ENABLED];
+
+ atomic_set(&bat_priv->aggregated_ogms, !!nla_get_u8(attr));
}
- ret = batadv_netlink_mesh_info_put(msg, soft_iface);
+ if (info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED];
- out:
- if (soft_iface)
- dev_put(soft_iface);
+ batadv_netlink_set_mesh_ap_isolation(attr, bat_priv);
+ }
- if (ret) {
- if (msg)
- nlmsg_free(msg);
- return ret;
+ if (info->attrs[BATADV_ATTR_ISOLATION_MARK]) {
+ attr = info->attrs[BATADV_ATTR_ISOLATION_MARK];
+
+ bat_priv->isolation_mark = nla_get_u32(attr);
}
- genlmsg_end(msg, msg_head);
- return genlmsg_reply(msg, info);
+ if (info->attrs[BATADV_ATTR_ISOLATION_MASK]) {
+ attr = info->attrs[BATADV_ATTR_ISOLATION_MASK];
+
+ bat_priv->isolation_mark_mask = nla_get_u32(attr);
+ }
+
+ if (info->attrs[BATADV_ATTR_BONDING_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_BONDING_ENABLED];
+
+ atomic_set(&bat_priv->bonding, !!nla_get_u8(attr));
+ }
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ if (info->attrs[BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED];
+
+ atomic_set(&bat_priv->bridge_loop_avoidance,
+ !!nla_get_u8(attr));
+ batadv_bla_status_update(bat_priv->soft_iface);
+ }
+#endif /* CONFIG_BATMAN_ADV_BLA */
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ if (info->attrs[BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED];
+
+ atomic_set(&bat_priv->distributed_arp_table,
+ !!nla_get_u8(attr));
+ batadv_dat_status_update(bat_priv->soft_iface);
+ }
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+ if (info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
+
+ atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr));
+ batadv_update_min_mtu(bat_priv->soft_iface);
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {
+ attr = info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN];
+
+ atomic_set(&bat_priv->gw.bandwidth_down, nla_get_u32(attr));
+ batadv_gw_tvlv_container_update(bat_priv);
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_UP]) {
+ attr = info->attrs[BATADV_ATTR_GW_BANDWIDTH_UP];
+
+ atomic_set(&bat_priv->gw.bandwidth_up, nla_get_u32(attr));
+ batadv_gw_tvlv_container_update(bat_priv);
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_MODE]) {
+ u8 gw_mode;
+
+ attr = info->attrs[BATADV_ATTR_GW_MODE];
+ gw_mode = nla_get_u8(attr);
+
+ if (gw_mode <= BATADV_GW_MODE_SERVER) {
+ /* Invoking batadv_gw_reselect() is not enough to really
+ * de-select the current GW. It will only instruct the
+ * gateway client code to perform a re-election the next
+ * time that this is needed.
+ *
+ * When gw client mode is being switched off the current
+ * GW must be de-selected explicitly otherwise no GW_ADD
+ * uevent is thrown on client mode re-activation. This
+ * is operation is performed in
+ * batadv_gw_check_client_stop().
+ */
+ batadv_gw_reselect(bat_priv);
+
+ /* always call batadv_gw_check_client_stop() before
+ * changing the gateway state
+ */
+ batadv_gw_check_client_stop(bat_priv);
+ atomic_set(&bat_priv->gw.mode, gw_mode);
+ batadv_gw_tvlv_container_update(bat_priv);
+ }
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_SEL_CLASS] &&
+ bat_priv->algo_ops->gw.get_best_gw_node &&
+ bat_priv->algo_ops->gw.is_eligible) {
+ /* setting the GW selection class is allowed only if the routing
+ * algorithm in use implements the GW API
+ */
+
+ u32 sel_class_max = 0xffffffffu;
+ u32 sel_class;
+
+ attr = info->attrs[BATADV_ATTR_GW_SEL_CLASS];
+ sel_class = nla_get_u32(attr);
+
+ if (!bat_priv->algo_ops->gw.store_sel_class)
+ sel_class_max = BATADV_TQ_MAX_VALUE;
+
+ if (sel_class >= 1 && sel_class <= sel_class_max) {
+ atomic_set(&bat_priv->gw.sel_class, sel_class);
+ batadv_gw_reselect(bat_priv);
+ }
+ }
+
+ if (info->attrs[BATADV_ATTR_HOP_PENALTY]) {
+ attr = info->attrs[BATADV_ATTR_HOP_PENALTY];
+
+ atomic_set(&bat_priv->hop_penalty, nla_get_u8(attr));
+ }
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ if (info->attrs[BATADV_ATTR_LOG_LEVEL]) {
+ attr = info->attrs[BATADV_ATTR_LOG_LEVEL];
+
+ atomic_set(&bat_priv->log_level,
+ nla_get_u32(attr) & BATADV_DBG_ALL);
+ }
+#endif /* CONFIG_BATMAN_ADV_DEBUG */
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ if (info->attrs[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED];
+
+ atomic_set(&bat_priv->multicast_mode, !nla_get_u8(attr));
+ }
+#endif /* CONFIG_BATMAN_ADV_MCAST */
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ if (info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED];
+
+ atomic_set(&bat_priv->network_coding, !!nla_get_u8(attr));
+ batadv_nc_status_update(bat_priv->soft_iface);
+ }
+#endif /* CONFIG_BATMAN_ADV_NC */
+
+ if (info->attrs[BATADV_ATTR_ORIG_INTERVAL]) {
+ u32 orig_interval;
+
+ attr = info->attrs[BATADV_ATTR_ORIG_INTERVAL];
+ orig_interval = nla_get_u32(attr);
+
+ orig_interval = min_t(u32, orig_interval, INT_MAX);
+ orig_interval = max_t(u32, orig_interval, 2 * BATADV_JITTER);
+
+ atomic_set(&bat_priv->orig_interval, orig_interval);
+ }
+
+ batadv_netlink_notify_mesh(bat_priv);
+
+ return 0;
}
/**
@@ -329,40 +709,24 @@ err_genlmsg:
static int
batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
{
- struct net *net = genl_info_net(info);
- struct net_device *soft_iface;
- struct batadv_priv *bat_priv;
+ struct batadv_priv *bat_priv = info->user_ptr[0];
struct sk_buff *msg = NULL;
u32 test_length;
void *msg_head;
- int ifindex;
u32 cookie;
u8 *dst;
int ret;
- if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
- return -EINVAL;
-
if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
return -EINVAL;
if (!info->attrs[BATADV_ATTR_TPMETER_TEST_TIME])
return -EINVAL;
- ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
- if (!ifindex)
- return -EINVAL;
-
dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
test_length = nla_get_u32(info->attrs[BATADV_ATTR_TPMETER_TEST_TIME]);
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
@@ -377,15 +741,11 @@ batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- bat_priv = netdev_priv(soft_iface);
batadv_tp_start(bat_priv, dst, test_length, &cookie);
ret = batadv_netlink_tp_meter_put(msg, cookie);
out:
- if (soft_iface)
- dev_put(soft_iface);
-
if (ret) {
if (msg)
nlmsg_free(msg);
@@ -406,65 +766,53 @@ batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
static int
batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info)
{
- struct net *net = genl_info_net(info);
- struct net_device *soft_iface;
- struct batadv_priv *bat_priv;
- int ifindex;
+ struct batadv_priv *bat_priv = info->user_ptr[0];
u8 *dst;
int ret = 0;
- if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
- return -EINVAL;
-
if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
return -EINVAL;
- ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
- if (!ifindex)
- return -EINVAL;
-
dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
- soft_iface = dev_get_by_index(net, ifindex);
- if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
- ret = -ENODEV;
- goto out;
- }
-
- bat_priv = netdev_priv(soft_iface);
batadv_tp_stop(bat_priv, dst, BATADV_TP_REASON_CANCEL);
-out:
- if (soft_iface)
- dev_put(soft_iface);
-
return ret;
}
/**
- * batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
+ * batadv_netlink_hardif_fill() - Fill message with hardif attributes
* @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hard_iface: hard interface which was modified
+ * @cmd: type of message to generate
* @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additional flags for message
* @cb: Control block containing additional options
- * @hard_iface: Hard interface to dump
*
- * Return: error code, or 0 on success
+ * Return: 0 on success or negative error number in case of failure
*/
-static int
-batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid,
- struct netlink_callback *cb,
- struct batadv_hard_iface *hard_iface)
+static int batadv_netlink_hardif_fill(struct sk_buff *msg,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ enum batadv_nl_commands cmd,
+ u32 portid, u32 seq, int flags,
+ struct netlink_callback *cb)
{
struct net_device *net_dev = hard_iface->net_dev;
void *hdr;
- hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
- &batadv_netlink_family, NLM_F_MULTI,
- BATADV_CMD_GET_HARDIFS);
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
if (!hdr)
- return -EMSGSIZE;
+ return -ENOBUFS;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr);
- genl_dump_check_consistent(cb, hdr);
+ if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX,
+ bat_priv->soft_iface->ifindex))
+ goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
net_dev->ifindex) ||
@@ -479,27 +827,137 @@ batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid,
goto nla_put_failure;
}
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ if (nla_put_u32(msg, BATADV_ATTR_ELP_INTERVAL,
+ atomic_read(&hard_iface->bat_v.elp_interval)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_THROUGHPUT_OVERRIDE,
+ atomic_read(&hard_iface->bat_v.throughput_override)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
+
genlmsg_end(msg, hdr);
return 0;
- nla_put_failure:
+nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
- * batadv_netlink_dump_hardifs() - Dump all hard interface into a messages
+ * batadv_netlink_notify_hardif() - send hardif attributes to listener
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hard_iface: hard interface which was modified
+ *
+ * Return: 0 on success, < 0 on error
+ */
+int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
+ BATADV_CMD_SET_HARDIF, 0, 0, 0, NULL);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ genlmsg_multicast_netns(&batadv_netlink_family,
+ dev_net(bat_priv->soft_iface), msg, 0,
+ BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_get_hardif() - Get hardif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_get_hardif(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
+ BATADV_CMD_GET_HARDIF,
+ info->snd_portid, info->snd_seq, 0,
+ NULL);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_reply(msg, info);
+
+ return ret;
+}
+
+/**
+ * batadv_netlink_set_hardif() - Set hardif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_hardif(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ struct nlattr *attr;
+
+ if (info->attrs[BATADV_ATTR_ELP_INTERVAL]) {
+ attr = info->attrs[BATADV_ATTR_ELP_INTERVAL];
+
+ atomic_set(&hard_iface->bat_v.elp_interval, nla_get_u32(attr));
+ }
+
+ if (info->attrs[BATADV_ATTR_THROUGHPUT_OVERRIDE]) {
+ attr = info->attrs[BATADV_ATTR_THROUGHPUT_OVERRIDE];
+
+ atomic_set(&hard_iface->bat_v.throughput_override,
+ nla_get_u32(attr));
+ }
+#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
+
+ batadv_netlink_notify_hardif(bat_priv, hard_iface);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_dump_hardif() - Dump all hard interface into a messages
* @msg: Netlink message to dump into
* @cb: Parameters from query
*
* Return: error code, or length of reply message on success
*/
static int
-batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
+batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb)
{
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hard_iface *hard_iface;
+ struct batadv_priv *bat_priv;
int ifindex;
int portid = NETLINK_CB(cb->skb).portid;
int skip = cb->args[0];
@@ -519,6 +977,8 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
return -ENODEV;
}
+ bat_priv = netdev_priv(soft_iface);
+
rtnl_lock();
cb->seq = batadv_hardif_generation << 1 | 1;
@@ -529,8 +989,10 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
if (i++ < skip)
continue;
- if (batadv_netlink_dump_hardif_entry(msg, portid, cb,
- hard_iface)) {
+ if (batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
+ BATADV_CMD_GET_HARDIF,
+ portid, cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, cb)) {
i--;
break;
}
@@ -545,24 +1007,361 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
return msg->len;
}
+/**
+ * batadv_netlink_vlan_fill() - Fill message with vlan attributes
+ * @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: vlan which was modified
+ * @cmd: type of message to generate
+ * @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additional flags for message
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_vlan_fill(struct sk_buff *msg,
+ struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan,
+ enum batadv_nl_commands cmd,
+ u32 portid, u32 seq, int flags)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX,
+ bat_priv->soft_iface->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_VLANID, vlan->vid & VLAN_VID_MASK))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, BATADV_ATTR_AP_ISOLATION_ENABLED,
+ !!atomic_read(&vlan->ap_isolation)))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_netlink_notify_vlan() - send vlan attributes to listener
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: vlan which was modified
+ *
+ * Return: 0 on success, < 0 on error
+ */
+int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_vlan_fill(msg, bat_priv, vlan,
+ BATADV_CMD_SET_VLAN, 0, 0, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ genlmsg_multicast_netns(&batadv_netlink_family,
+ dev_net(bat_priv->soft_iface), msg, 0,
+ BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_get_vlan() - Get vlan attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_get_vlan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_softif_vlan *vlan = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_vlan_fill(msg, bat_priv, vlan, BATADV_CMD_GET_VLAN,
+ info->snd_portid, info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_reply(msg, info);
+
+ return ret;
+}
+
+/**
+ * batadv_netlink_set_vlan() - Get vlan attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_vlan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_softif_vlan *vlan = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct nlattr *attr;
+
+ if (info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED];
+
+ atomic_set(&vlan->ap_isolation, !!nla_get_u8(attr));
+ }
+
+ batadv_netlink_notify_vlan(bat_priv, vlan);
+
+ return 0;
+}
+
+/**
+ * batadv_get_softif_from_info() - Retrieve soft interface from genl attributes
+ * @net: the applicable net namespace
+ * @info: receiver information
+ *
+ * Return: Pointer to soft interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+static struct net_device *
+batadv_get_softif_from_info(struct net *net, struct genl_info *info)
+{
+ struct net_device *soft_iface;
+ int ifindex;
+
+ if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface)
+ return ERR_PTR(-ENODEV);
+
+ if (!batadv_softif_is_valid(soft_iface))
+ goto err_put_softif;
+
+ return soft_iface;
+
+err_put_softif:
+ dev_put(soft_iface);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * batadv_get_hardif_from_info() - Retrieve hardif from genl attributes
+ * @bat_priv: the bat priv with all the soft interface information
+ * @net: the applicable net namespace
+ * @info: receiver information
+ *
+ * Return: Pointer to hard interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+static struct batadv_hard_iface *
+batadv_get_hardif_from_info(struct batadv_priv *bat_priv, struct net *net,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct net_device *hard_dev;
+ unsigned int hardif_index;
+
+ if (!info->attrs[BATADV_ATTR_HARD_IFINDEX])
+ return ERR_PTR(-EINVAL);
+
+ hardif_index = nla_get_u32(info->attrs[BATADV_ATTR_HARD_IFINDEX]);
+
+ hard_dev = dev_get_by_index(net, hardif_index);
+ if (!hard_dev)
+ return ERR_PTR(-ENODEV);
+
+ hard_iface = batadv_hardif_get_by_netdev(hard_dev);
+ if (!hard_iface)
+ goto err_put_harddev;
+
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ goto err_put_hardif;
+
+ /* hard_dev is referenced by hard_iface and not needed here */
+ dev_put(hard_dev);
+
+ return hard_iface;
+
+err_put_hardif:
+ batadv_hardif_put(hard_iface);
+err_put_harddev:
+ dev_put(hard_dev);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * batadv_get_vlan_from_info() - Retrieve vlan from genl attributes
+ * @bat_priv: the bat priv with all the soft interface information
+ * @net: the applicable net namespace
+ * @info: receiver information
+ *
+ * Return: Pointer to vlan on success (with increased refcnt), error pointer
+ * on error
+ */
+static struct batadv_softif_vlan *
+batadv_get_vlan_from_info(struct batadv_priv *bat_priv, struct net *net,
+ struct genl_info *info)
+{
+ struct batadv_softif_vlan *vlan;
+ u16 vid;
+
+ if (!info->attrs[BATADV_ATTR_VLANID])
+ return ERR_PTR(-EINVAL);
+
+ vid = nla_get_u16(info->attrs[BATADV_ATTR_VLANID]);
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
+ if (!vlan)
+ return ERR_PTR(-ENOENT);
+
+ return vlan;
+}
+
+/**
+ * batadv_pre_doit() - Prepare batman-adv genl doit request
+ * @ops: requested netlink operation
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_priv *bat_priv = NULL;
+ struct batadv_softif_vlan *vlan;
+ struct net_device *soft_iface;
+ u8 user_ptr1_flags;
+ u8 mesh_dep_flags;
+ int ret;
+
+ user_ptr1_flags = BATADV_FLAG_NEED_HARDIF | BATADV_FLAG_NEED_VLAN;
+ if (WARN_ON(hweight8(ops->internal_flags & user_ptr1_flags) > 1))
+ return -EINVAL;
+
+ mesh_dep_flags = BATADV_FLAG_NEED_HARDIF | BATADV_FLAG_NEED_VLAN;
+ if (WARN_ON((ops->internal_flags & mesh_dep_flags) &&
+ (~ops->internal_flags & BATADV_FLAG_NEED_MESH)))
+ return -EINVAL;
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_MESH) {
+ soft_iface = batadv_get_softif_from_info(net, info);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
+
+ bat_priv = netdev_priv(soft_iface);
+ info->user_ptr[0] = bat_priv;
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF) {
+ hard_iface = batadv_get_hardif_from_info(bat_priv, net, info);
+ if (IS_ERR(hard_iface)) {
+ ret = PTR_ERR(hard_iface);
+ goto err_put_softif;
+ }
+
+ info->user_ptr[1] = hard_iface;
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_VLAN) {
+ vlan = batadv_get_vlan_from_info(bat_priv, net, info);
+ if (IS_ERR(vlan)) {
+ ret = PTR_ERR(vlan);
+ goto err_put_softif;
+ }
+
+ info->user_ptr[1] = vlan;
+ }
+
+ return 0;
+
+err_put_softif:
+ if (bat_priv)
+ dev_put(bat_priv->soft_iface);
+
+ return ret;
+}
+
+/**
+ * batadv_post_doit() - End batman-adv genl doit request
+ * @ops: requested netlink operation
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ */
+static void batadv_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_softif_vlan *vlan;
+ struct batadv_priv *bat_priv;
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF &&
+ info->user_ptr[1]) {
+ hard_iface = info->user_ptr[1];
+
+ batadv_hardif_put(hard_iface);
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_VLAN && info->user_ptr[1]) {
+ vlan = info->user_ptr[1];
+ batadv_softif_vlan_put(vlan);
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_MESH && info->user_ptr[0]) {
+ bat_priv = info->user_ptr[0];
+ dev_put(bat_priv->soft_iface);
+ }
+}
+
static const struct genl_ops batadv_netlink_ops[] = {
{
- .cmd = BATADV_CMD_GET_MESH_INFO,
- .flags = GENL_ADMIN_PERM,
+ .cmd = BATADV_CMD_GET_MESH,
+ /* can be retrieved by unprivileged users */
.policy = batadv_netlink_policy,
- .doit = batadv_netlink_get_mesh_info,
+ .doit = batadv_netlink_get_mesh,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_TP_METER,
.flags = GENL_ADMIN_PERM,
.policy = batadv_netlink_policy,
.doit = batadv_netlink_tp_meter_start,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_TP_METER_CANCEL,
.flags = GENL_ADMIN_PERM,
.policy = batadv_netlink_policy,
.doit = batadv_netlink_tp_meter_cancel,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_GET_ROUTING_ALGOS,
@@ -571,10 +1370,13 @@ static const struct genl_ops batadv_netlink_ops[] = {
.dumpit = batadv_algo_dump,
},
{
- .cmd = BATADV_CMD_GET_HARDIFS,
- .flags = GENL_ADMIN_PERM,
+ .cmd = BATADV_CMD_GET_HARDIF,
+ /* can be retrieved by unprivileged users */
.policy = batadv_netlink_policy,
- .dumpit = batadv_netlink_dump_hardifs,
+ .dumpit = batadv_netlink_dump_hardif,
+ .doit = batadv_netlink_get_hardif,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_HARDIF,
},
{
.cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL,
@@ -630,7 +1432,37 @@ static const struct genl_ops batadv_netlink_ops[] = {
.policy = batadv_netlink_policy,
.dumpit = batadv_mcast_flags_dump,
},
-
+ {
+ .cmd = BATADV_CMD_SET_MESH,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .doit = batadv_netlink_set_mesh,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
+ },
+ {
+ .cmd = BATADV_CMD_SET_HARDIF,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .doit = batadv_netlink_set_hardif,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_HARDIF,
+ },
+ {
+ .cmd = BATADV_CMD_GET_VLAN,
+ /* can be retrieved by unprivileged users */
+ .policy = batadv_netlink_policy,
+ .doit = batadv_netlink_get_vlan,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_VLAN,
+ },
+ {
+ .cmd = BATADV_CMD_SET_VLAN,
+ .flags = GENL_ADMIN_PERM,
+ .policy = batadv_netlink_policy,
+ .doit = batadv_netlink_set_vlan,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_VLAN,
+ },
};
struct genl_family batadv_netlink_family __ro_after_init = {
@@ -639,6 +1471,8 @@ struct genl_family batadv_netlink_family __ro_after_init = {
.version = 1,
.maxattr = BATADV_ATTR_MAX,
.netnsok = true,
+ .pre_doit = batadv_pre_doit,
+ .post_doit = batadv_post_doit,
.module = THIS_MODULE,
.ops = batadv_netlink_ops,
.n_ops = ARRAY_SIZE(batadv_netlink_ops),
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index 571d9a5ae7aa..7273368544fc 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2016-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2019 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
@@ -34,6 +34,12 @@ int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
u8 result, u32 test_time, u64 total_bytes,
u32 cookie);
+int batadv_netlink_notify_mesh(struct batadv_priv *bat_priv);
+int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface);
+int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan);
+
extern struct genl_family batadv_netlink_family;
#endif /* _NET_BATMAN_ADV_NETLINK_H_ */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 34caf129a9bf..278762bd94c6 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2019 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
*
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 65c346812bc1..96ef0a511fc7 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2019 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
*
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 56a981af5c92..e5cdf89ef63c 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2009-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index a8b4c7b667ec..dca1e4a34ec6 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index cc3ed93a6d51..cae0e5dd0768 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -1043,6 +1043,8 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
hdr_size))
goto rx_success;
+ batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
+
batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
orig_node);
@@ -1278,6 +1280,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
goto rx_success;
+ batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
+
/* broadcast for me */
batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index db54c2d9b8bf..0102d69d345c 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 4a35f5c2f52b..66a8b3e44501 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 64cce07b8fe6..1f6132922e60 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5db5a0a4c959..2e367230376b 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -50,13 +50,13 @@
#include <linux/string.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bridge_loop_avoidance.h"
#include "debugfs.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
-#include "gateway_common.h"
#include "hard-interface.h"
#include "multicast.h"
#include "network-coding.h"
@@ -212,6 +212,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
enum batadv_forw_mode forw_mode;
struct batadv_orig_node *mcast_single_orig = NULL;
int network_offset = ETH_HLEN;
+ __be16 proto;
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dropped;
@@ -221,14 +222,21 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
netif_trans_update(soft_iface);
vid = batadv_get_vid(skb, 0);
+
+ skb_reset_mac_header(skb);
ethhdr = eth_hdr(skb);
- switch (ntohs(ethhdr->h_proto)) {
+ proto = ethhdr->h_proto;
+
+ switch (ntohs(proto)) {
case ETH_P_8021Q:
+ if (!pskb_may_pull(skb, sizeof(*vhdr)))
+ goto dropped;
vhdr = vlan_eth_hdr(skb);
+ proto = vhdr->h_vlan_encapsulated_proto;
/* drop batman-in-batman packets to prevent loops */
- if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN)) {
+ if (proto != htons(ETH_P_BATMAN)) {
network_offset += VLAN_HLEN;
break;
}
@@ -256,6 +264,9 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
goto dropped;
}
+ /* Snoop address candidates from DHCPACKs for early DAT filling */
+ batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
+
/* don't accept stp packets. STP does not help in meshes.
* better use the bridge loop avoidance ...
*
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index daf87f07fadd..538bb661878c 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 09427fc6494a..0b4b3fb778a6 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -40,6 +40,7 @@
#include <linux/stringify.h>
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
@@ -47,6 +48,7 @@
#include "gateway_common.h"
#include "hard-interface.h"
#include "log.h"
+#include "netlink.h"
#include "network-coding.h"
#include "soft-interface.h"
@@ -153,9 +155,14 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
{ \
struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
struct batadv_priv *bat_priv = netdev_priv(net_dev); \
+ ssize_t length; \
+ \
+ length = __batadv_store_bool_attr(buff, count, _post_func, attr,\
+ &bat_priv->_name, net_dev); \
\
- return __batadv_store_bool_attr(buff, count, _post_func, attr, \
- &bat_priv->_name, net_dev); \
+ batadv_netlink_notify_mesh(bat_priv); \
+ \
+ return length; \
}
#define BATADV_ATTR_SIF_SHOW_BOOL(_name) \
@@ -185,11 +192,16 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
{ \
struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
struct batadv_priv *bat_priv = netdev_priv(net_dev); \
+ ssize_t length; \
\
- return __batadv_store_uint_attr(buff, count, _min, _max, \
- _post_func, attr, \
- &bat_priv->_var, net_dev, \
- NULL); \
+ length = __batadv_store_uint_attr(buff, count, _min, _max, \
+ _post_func, attr, \
+ &bat_priv->_var, net_dev, \
+ NULL); \
+ \
+ batadv_netlink_notify_mesh(bat_priv); \
+ \
+ return length; \
}
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
@@ -222,6 +234,11 @@ ssize_t batadv_store_vlan_##_name(struct kobject *kobj, \
attr, &vlan->_name, \
bat_priv->soft_iface); \
\
+ if (vlan->vid) \
+ batadv_netlink_notify_vlan(bat_priv, vlan); \
+ else \
+ batadv_netlink_notify_mesh(bat_priv); \
+ \
batadv_softif_vlan_put(vlan); \
return res; \
}
@@ -255,6 +272,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
{ \
struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
struct batadv_hard_iface *hard_iface; \
+ struct batadv_priv *bat_priv; \
ssize_t length; \
\
hard_iface = batadv_hardif_get_by_netdev(net_dev); \
@@ -267,6 +285,11 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
hard_iface->soft_iface, \
net_dev); \
\
+ if (hard_iface->soft_iface) { \
+ bat_priv = netdev_priv(hard_iface->soft_iface); \
+ batadv_netlink_notify_hardif(bat_priv, hard_iface); \
+ } \
+ \
batadv_hardif_put(hard_iface); \
return length; \
}
@@ -536,6 +559,9 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
batadv_gw_check_client_stop(bat_priv);
atomic_set(&bat_priv->gw.mode, (unsigned int)gw_mode_tmp);
batadv_gw_tvlv_container_update(bat_priv);
+
+ batadv_netlink_notify_mesh(bat_priv);
+
return count;
}
@@ -562,6 +588,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
size_t count)
{
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ ssize_t length;
/* setting the GW selection class is allowed only if the routing
* algorithm in use implements the GW API
@@ -577,10 +604,14 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
return bat_priv->algo_ops->gw.store_sel_class(bat_priv, buff,
count);
- return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
- batadv_post_gw_reselect, attr,
- &bat_priv->gw.sel_class,
- bat_priv->soft_iface, NULL);
+ length = __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
+ batadv_post_gw_reselect, attr,
+ &bat_priv->gw.sel_class,
+ bat_priv->soft_iface, NULL);
+
+ batadv_netlink_notify_mesh(bat_priv);
+
+ return length;
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -600,12 +631,18 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
struct attribute *attr, char *buff,
size_t count)
{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ ssize_t length;
if (buff[count - 1] == '\n')
buff[count - 1] = '\0';
- return batadv_gw_bandwidth_set(net_dev, buff, count);
+ length = batadv_gw_bandwidth_set(net_dev, buff, count);
+
+ batadv_netlink_notify_mesh(bat_priv);
+
+ return length;
}
/**
@@ -673,6 +710,8 @@ static ssize_t batadv_store_isolation_mark(struct kobject *kobj,
"New skb mark for extended isolation: %#.8x/%#.8x\n",
bat_priv->isolation_mark, bat_priv->isolation_mark_mask);
+ batadv_netlink_notify_mesh(bat_priv);
+
return count;
}
@@ -1077,6 +1116,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
struct attribute *attr,
char *buff, size_t count)
{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
struct batadv_hard_iface *hard_iface;
u32 tp_override;
@@ -1107,6 +1147,8 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
+ batadv_netlink_notify_hardif(bat_priv, hard_iface);
+
out:
batadv_hardif_put(hard_iface);
return count;
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index c1e3fb69952d..705ffbe763f4 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 11520de96ccb..500109bbd551 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2019 B.A.T.M.A.N. contributors:
*
* Edo Monticelli, Antonio Quartulli
*
diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h
index 68e600974759..6b4d0f733896 100644
--- a/net/batman-adv/tp_meter.h
+++ b/net/batman-adv/tp_meter.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2019 B.A.T.M.A.N. contributors:
*
* Edo Monticelli, Antonio Quartulli
*
diff --git a/net/batman-adv/trace.c b/net/batman-adv/trace.c
index 8e1024217cff..f77c917ed20d 100644
--- a/net/batman-adv/trace.c
+++ b/net/batman-adv/trace.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors:
*
* Sven Eckelmann
*
diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h
index 104784be94d7..5e5579051400 100644
--- a/net/batman-adv/trace.h
+++ b/net/batman-adv/trace.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2019 B.A.T.M.A.N. contributors:
*
* Sven Eckelmann
*
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8dcd4968cde7..f73d79139ae7 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 01b6c8eafaf9..61bca75e5911 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 40e69c9346d2..7e947b01919d 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index ef5867f49824..c0f033b1acb8 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index cbe17da36fcb..a21b34ed6548 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2007-2018 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 9d79c7de234a..a7cd23f00bde 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -1108,8 +1108,8 @@ static int lowpan_enable_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
- lowpan_enable_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
+ lowpan_enable_set, "%llu\n");
static ssize_t lowpan_control_write(struct file *fp,
const char __user *user_buffer,
@@ -1278,9 +1278,10 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
static int __init bt_6lowpan_init(void)
{
- lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
- bt_debugfs, NULL,
- &lowpan_enable_fops);
+ lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
+ 0644, bt_debugfs,
+ NULL,
+ &lowpan_enable_fops);
lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
bt_debugfs, NULL,
&lowpan_control_fops);
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 58fc6333d412..5f918ea18b5a 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -174,7 +174,7 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
num_ctrl++;
}
- len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
+ len = struct_size(rsp, cl, num_ctrl);
rsp = kmalloc(len, GFP_ATOMIC);
if (!rsp) {
read_unlock(&hci_dev_list_lock);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index deacc52d7ff1..8d12198eaa94 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -154,15 +154,25 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
}
EXPORT_SYMBOL(bt_sock_unlink);
-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
{
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (bh)
+ bh_lock_sock_nested(sk);
+ else
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
- release_sock(sk);
+
+ if (bh)
+ bh_unlock_sock(sk);
+ else
+ release_sock(sk);
+
parent->sk_ack_backlog++;
}
EXPORT_SYMBOL(bt_accept_enqueue);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 7352fe85674b..d6b2540ba7f8 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -30,6 +30,7 @@
#include <linux/rfkill.h>
#include <linux/debugfs.h>
#include <linux/crypto.h>
+#include <linux/property.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -1355,6 +1356,32 @@ done:
return err;
}
+/**
+ * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
+ * (BD_ADDR) for a HCI device from
+ * a firmware node property.
+ * @hdev: The HCI device
+ *
+ * Search the firmware node for 'local-bd-address'.
+ *
+ * All-zero BD addresses are rejected, because those could be properties
+ * that exist in the firmware tables, but were not updated by the firmware. For
+ * example, the DTS could define 'local-bd-address', with zero BD addresses.
+ */
+static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
+ bdaddr_t ba;
+ int ret;
+
+ ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
+ (u8 *)&ba, sizeof(ba));
+ if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
+ return;
+
+ bacpy(&hdev->public_addr, &ba);
+}
+
static int hci_dev_do_open(struct hci_dev *hdev)
{
int ret = 0;
@@ -1422,6 +1449,22 @@ static int hci_dev_do_open(struct hci_dev *hdev)
if (hdev->setup)
ret = hdev->setup(hdev);
+ if (ret)
+ goto setup_failed;
+
+ if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
+ if (!bacmp(&hdev->public_addr, BDADDR_ANY))
+ hci_dev_get_bd_addr_from_property(hdev);
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
+ hdev->set_bdaddr)
+ ret = hdev->set_bdaddr(hdev,
+ &hdev->public_addr);
+ else
+ ret = -EADDRNOTAVAIL;
+ }
+
+setup_failed:
/* The transport driver can set these quirks before
* creating the HCI device or in its setup callback.
*
@@ -2578,6 +2621,9 @@ static void hci_cmd_timeout(struct work_struct *work)
bt_dev_err(hdev, "command tx timeout");
}
+ if (hdev->cmd_timeout)
+ hdev->cmd_timeout(hdev);
+
atomic_set(&hdev->cmd_cnt, 1);
queue_work(hdev->workqueue, &hdev->cmd_work);
}
@@ -3401,7 +3447,7 @@ EXPORT_SYMBOL(hci_resume_dev);
/* Reset HCI device */
int hci_reset_dev(struct hci_dev *hdev)
{
- const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
+ static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
struct sk_buff *skb;
skb = bt_skb_alloc(3, GFP_ATOMIC);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index ac2826ce162b..609fd6871c5a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3556,8 +3556,8 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
+ if (skb->len < sizeof(*ev) ||
+ skb->len < struct_size(ev, handles, ev->num_hndl)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
@@ -3644,8 +3644,8 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
+ if (skb->len < sizeof(*ev) ||
+ skb->len < struct_size(ev, handles, ev->num_hndl)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1506e1632394..d32077b28433 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -831,8 +831,6 @@ static int hci_sock_release(struct socket *sock)
if (!sk)
return 0;
- hdev = hci_pi(sk)->hdev;
-
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_MONITOR:
atomic_dec(&monitor_promisc);
@@ -854,6 +852,7 @@ static int hci_sock_release(struct socket *sock)
bt_sock_unlink(&hci_sk_list, sk);
+ hdev = hci_pi(sk)->hdev;
if (hdev) {
if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
/* When releasing a user channel exclusive access,
@@ -1383,9 +1382,9 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
if (mask & HCI_CMSG_TSTAMP) {
#ifdef CONFIG_COMPAT
- struct compat_timeval ctv;
+ struct old_timeval32 ctv;
#endif
- struct timeval tv;
+ struct __kernel_old_timeval tv;
void *data;
int len;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2a7fb517d460..f17e393b43b4 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3337,16 +3337,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+ if (len < 0)
+ break;
hint = type & L2CAP_CONF_HINT;
type &= L2CAP_CONF_MASK;
switch (type) {
case L2CAP_CONF_MTU:
+ if (olen != 2)
+ break;
mtu = val;
break;
case L2CAP_CONF_FLUSH_TO:
+ if (olen != 2)
+ break;
chan->flush_to = val;
break;
@@ -3354,26 +3360,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
break;
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *) val, olen);
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *) val, olen);
break;
case L2CAP_CONF_FCS:
+ if (olen != 1)
+ break;
if (val == L2CAP_FCS_NONE)
set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
break;
case L2CAP_CONF_EFS:
- if (olen == sizeof(efs)) {
- remote_efs = 1;
- memcpy(&efs, (void *) val, olen);
- }
+ if (olen != sizeof(efs))
+ break;
+ remote_efs = 1;
+ memcpy(&efs, (void *) val, olen);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
return -ECONNREFUSED;
-
set_bit(FLAG_EXT_CTRL, &chan->flags);
set_bit(CONF_EWS_RECV, &chan->conf_state);
chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
@@ -3383,7 +3393,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
default:
if (hint)
break;
-
result = L2CAP_CONF_UNKNOWN;
*((u8 *) ptr++) = type;
break;
@@ -3548,58 +3557,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if (len < 0)
+ break;
switch (type) {
case L2CAP_CONF_MTU:
+ if (olen != 2)
+ break;
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
+ endptr - ptr);
break;
case L2CAP_CONF_FLUSH_TO:
+ if (olen != 2)
+ break;
chan->flush_to = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
+ chan->flush_to, endptr - ptr);
break;
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
-
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *)val, olen);
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
rfc.mode != chan->mode)
return -ECONNREFUSED;
-
chan->fcs = 0;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
chan->tx_win, endptr - ptr);
break;
case L2CAP_CONF_EFS:
- if (olen == sizeof(efs)) {
- memcpy(&efs, (void *)val, olen);
-
- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != chan->local_stype)
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs, endptr - ptr);
- }
+ if (olen != sizeof(efs))
+ break;
+ memcpy(&efs, (void *)val, olen);
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs, endptr - ptr);
break;
case L2CAP_CONF_FCS:
+ if (olen != 1)
+ break;
if (*result == L2CAP_CONF_PENDING)
if (val == L2CAP_FCS_NONE)
set_bit(CONF_RECV_NO_FCS,
@@ -3728,13 +3744,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if (len < 0)
+ break;
switch (type) {
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *)val, olen);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
txwin_ext = val;
break;
}
@@ -4244,6 +4265,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
goto done;
break;
}
+ /* fall through */
default:
l2cap_chan_set_err(chan, ECONNRESET);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 686bdc6b35b0..a3a2cd55e23a 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1252,7 +1252,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
l2cap_sock_init(sk, parent);
- bt_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk, false);
release_sock(parent);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ccce954f8146..2457f408d17d 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -474,7 +474,6 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_rp_read_ext_index_list *rp;
struct hci_dev *d;
- size_t rp_len;
u16 count;
int err;
@@ -488,8 +487,7 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
count++;
}
- rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
- rp = kmalloc(rp_len, GFP_ATOMIC);
+ rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
if (!rp) {
read_unlock(&hci_dev_list_lock);
return -ENOMEM;
@@ -525,7 +523,6 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
}
rp->num_controllers = cpu_to_le16(count);
- rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
read_unlock(&hci_dev_list_lock);
@@ -538,7 +535,8 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
- MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
+ MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
+ struct_size(rp, entry, count));
kfree(rp);
@@ -551,7 +549,8 @@ static bool is_configured(struct hci_dev *hdev)
!hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
return false;
- if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
+ if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
+ test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
!bacmp(&hdev->public_addr, BDADDR_ANY))
return false;
@@ -566,7 +565,8 @@ static __le32 get_missing_options(struct hci_dev *hdev)
!hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
options |= MGMT_OPTION_EXTERNAL_CONFIG;
- if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
+ if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
+ test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
!bacmp(&hdev->public_addr, BDADDR_ANY))
options |= MGMT_OPTION_PUBLIC_ADDRESS;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1a635df80643..3a9e9d9670be 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -483,6 +483,7 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
/* if closing a dlc in a session that hasn't been started,
* just close and unlink the dlc
*/
+ /* fall through */
default:
rfcomm_dlc_clear_timer(d);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index aa0db1d1bd9b..b1f49fcc0478 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -988,7 +988,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
- bt_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk, true);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 529b38996d8b..9a580999ca57 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -193,7 +193,7 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
conn->sk = sk;
if (parent)
- bt_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk, true);
}
static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..da7051d62727 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@
#include <net/sock.h>
#include <net/tcp.h>
-static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
- u32 ret;
-
- preempt_disable();
- rcu_read_lock();
- bpf_cgroup_storage_set(storage);
- ret = BPF_PROG_RUN(prog, ctx);
- rcu_read_unlock();
- preempt_enable();
-
- return ret;
-}
-
-static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
- u32 *time)
+static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+ u32 *retval, u32 *time)
{
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0;
+ int ret = 0;
u32 i;
for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
if (!repeat)
repeat = 1;
+
+ rcu_read_lock();
+ preempt_disable();
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
- *ret = bpf_test_run_one(prog, ctx, storage);
+ bpf_cgroup_storage_set(storage);
+ *retval = BPF_PROG_RUN(prog, ctx);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
if (need_resched()) {
- if (signal_pending(current))
- break;
time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
cond_resched();
+
+ rcu_read_lock();
+ preempt_disable();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
do_div(time_spent, repeat);
*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
- return 0;
+ return ret;
}
static int bpf_test_finish(const union bpf_attr *kattr,
@@ -240,3 +243,99 @@ out:
kfree(data);
return ret;
}
+
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ u32 size = kattr->test.data_size_in;
+ u32 repeat = kattr->test.repeat;
+ struct bpf_flow_keys flow_keys;
+ u64 time_start, time_spent = 0;
+ struct bpf_skb_data_end *cb;
+ u32 retval, duration;
+ struct sk_buff *skb;
+ struct sock *sk;
+ void *data;
+ int ret;
+ u32 i;
+
+ if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+ return -EINVAL;
+
+ data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ sk = kzalloc(sizeof(*sk), GFP_USER);
+ if (!sk) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ sock_net_set(sk, current->nsproxy->net_ns);
+ sock_init_data(NULL, sk);
+
+ skb = build_skb(data, 0);
+ if (!skb) {
+ kfree(data);
+ kfree(sk);
+ return -ENOMEM;
+ }
+ skb->sk = sk;
+
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ __skb_put(skb, size);
+ skb->protocol = eth_type_trans(skb,
+ current->nsproxy->net_ns->loopback_dev);
+ skb_reset_network_header(skb);
+
+ cb = (struct bpf_skb_data_end *)skb->cb;
+ cb->qdisc_cb.flow_keys = &flow_keys;
+
+ if (!repeat)
+ repeat = 1;
+
+ rcu_read_lock();
+ preempt_disable();
+ time_start = ktime_get_ns();
+ for (i = 0; i < repeat; i++) {
+ retval = __skb_flow_bpf_dissect(prog, skb,
+ &flow_keys_dissector,
+ &flow_keys);
+
+ if (signal_pending(current)) {
+ preempt_enable();
+ rcu_read_unlock();
+
+ ret = -EINTR;
+ goto out;
+ }
+
+ if (need_resched()) {
+ time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
+ cond_resched();
+
+ rcu_read_lock();
+ preempt_disable();
+ time_start = ktime_get_ns();
+ }
+ }
+ time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
+ do_div(time_spent, repeat);
+ duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+
+ ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+ retval, duration);
+
+out:
+ kfree_skb(skb);
+ kfree(sk);
+ return ret;
+}
diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile
index 0947ee7f70d5..854395fb98cd 100644
--- a/net/bpfilter/Makefile
+++ b/net/bpfilter/Makefile
@@ -5,7 +5,7 @@
hostprogs-y := bpfilter_umh
bpfilter_umh-objs := main.o
-KBUILD_HOSTCFLAGS += -I. -Itools/include/ -Itools/include/uapi
+KBUILD_HOSTCFLAGS += -Itools/include/ -Itools/include/uapi
HOSTCC := $(CC)
ifeq ($(CONFIG_BPFILTER_UMH), y)
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index 7acfc83087d5..7ee4fea93637 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -13,39 +13,24 @@
extern char bpfilter_umh_start;
extern char bpfilter_umh_end;
-static struct umh_info info;
-/* since ip_getsockopt() can run in parallel, serialize access to umh */
-static DEFINE_MUTEX(bpfilter_lock);
-
-static void shutdown_umh(struct umh_info *info)
+static void shutdown_umh(void)
{
struct task_struct *tsk;
- if (!info->pid)
+ if (bpfilter_ops.stop)
return;
- tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
+
+ tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID);
if (tsk) {
force_sig(SIGKILL, tsk);
put_task_struct(tsk);
}
- fput(info->pipe_to_umh);
- fput(info->pipe_from_umh);
- info->pid = 0;
}
static void __stop_umh(void)
{
- if (IS_ENABLED(CONFIG_INET)) {
- bpfilter_process_sockopt = NULL;
- shutdown_umh(&info);
- }
-}
-
-static void stop_umh(void)
-{
- mutex_lock(&bpfilter_lock);
- __stop_umh();
- mutex_unlock(&bpfilter_lock);
+ if (IS_ENABLED(CONFIG_INET))
+ shutdown_umh();
}
static int __bpfilter_process_sockopt(struct sock *sk, int optname,
@@ -63,10 +48,10 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
req.cmd = optname;
req.addr = (long __force __user)optval;
req.len = optlen;
- mutex_lock(&bpfilter_lock);
- if (!info.pid)
+ if (!bpfilter_ops.info.pid)
goto out;
- n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos);
+ n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
+ &pos);
if (n != sizeof(req)) {
pr_err("write fail %zd\n", n);
__stop_umh();
@@ -74,7 +59,8 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
goto out;
}
pos = 0;
- n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos);
+ n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply),
+ &pos);
if (n != sizeof(reply)) {
pr_err("read fail %zd\n", n);
__stop_umh();
@@ -83,37 +69,59 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
}
ret = reply.status;
out:
- mutex_unlock(&bpfilter_lock);
return ret;
}
-static int __init load_umh(void)
+static int start_umh(void)
{
int err;
/* fork usermode process */
- info.cmdline = "bpfilter_umh";
err = fork_usermode_blob(&bpfilter_umh_start,
&bpfilter_umh_end - &bpfilter_umh_start,
- &info);
+ &bpfilter_ops.info);
if (err)
return err;
- pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
+ bpfilter_ops.stop = false;
+ pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid);
/* health check that usermode process started correctly */
if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
- stop_umh();
+ shutdown_umh();
return -EFAULT;
}
- if (IS_ENABLED(CONFIG_INET))
- bpfilter_process_sockopt = &__bpfilter_process_sockopt;
return 0;
}
+static int __init load_umh(void)
+{
+ int err;
+
+ mutex_lock(&bpfilter_ops.lock);
+ if (!bpfilter_ops.stop) {
+ err = -EFAULT;
+ goto out;
+ }
+ err = start_umh();
+ if (!err && IS_ENABLED(CONFIG_INET)) {
+ bpfilter_ops.sockopt = &__bpfilter_process_sockopt;
+ bpfilter_ops.start = &start_umh;
+ }
+out:
+ mutex_unlock(&bpfilter_ops.lock);
+ return err;
+}
+
static void __exit fini_umh(void)
{
- stop_umh();
+ mutex_lock(&bpfilter_ops.lock);
+ if (IS_ENABLED(CONFIG_INET)) {
+ shutdown_umh();
+ bpfilter_ops.start = NULL;
+ bpfilter_ops.sockopt = NULL;
+ }
+ mutex_unlock(&bpfilter_ops.lock);
}
module_init(load_umh);
module_exit(fini_umh);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
index 40311d10d2f2..9ea6100dca87 100644
--- a/net/bpfilter/bpfilter_umh_blob.S
+++ b/net/bpfilter/bpfilter_umh_blob.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
- .section .init.rodata, "a"
+ .section .rodata, "a"
.global bpfilter_umh_start
bpfilter_umh_start:
.incbin "net/bpfilter/bpfilter_umh"
diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
index 1317f108df8a..61ce8454a88e 100644
--- a/net/bpfilter/main.c
+++ b/net/bpfilter/main.c
@@ -6,7 +6,7 @@
#include <sys/socket.h>
#include <fcntl.h>
#include <unistd.h>
-#include "include/uapi/linux/bpf.h"
+#include "../../include/uapi/linux/bpf.h"
#include <asm/unistd.h>
#include "msgfmt.h"
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index fe3c758791ca..00573cc46c98 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -915,7 +915,8 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
/* Add new permanent fdb entry with RTM_NEWNEIGH */
int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 nlh_flags)
+ const unsigned char *addr, u16 vid, u16 nlh_flags,
+ struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
@@ -1128,6 +1129,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
err = -ENOMEM;
goto err_unlock;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
fdb->added_by_external_learn = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
@@ -1147,6 +1150,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
+
if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 5372e2042adf..48ddc60b4fbd 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ skb_push(skb, ETH_HLEN);
if (!is_skb_forwardable(skb->dev, skb))
goto drop;
- skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ skb->tstamp = 0;
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
net, sk, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
@@ -97,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
net = dev_net(indev);
} else {
if (unlikely(netpoll_tx_running(to->br->dev))) {
- if (!is_skb_forwardable(skb->dev, skb)) {
+ skb_push(skb, ETH_HLEN);
+ if (!is_skb_forwardable(skb->dev, skb))
kfree_skb(skb);
- } else {
- skb_push(skb, ETH_HLEN);
+ else
br_netpoll_send_skb(to, skb);
- }
return;
}
br_hook = NF_BR_LOCAL_OUT;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3aeff0895669..a0e369179f6d 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/if_ether.h>
#include <linux/igmp.h>
+#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -29,6 +30,7 @@
#include <net/ip.h>
#include <net/switchdev.h>
#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/icmpv6.h>
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/ip6_checksum.h>
@@ -938,7 +940,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
for (i = 0; i < num; i++) {
len += sizeof(*grec);
- if (!pskb_may_pull(skb, len))
+ if (!ip_mc_may_pull(skb, len))
return -EINVAL;
grec = (void *)(skb->data + len - sizeof(*grec));
@@ -946,7 +948,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
type = grec->grec_type;
len += ntohs(grec->grec_nsrcs) * 4;
- if (!pskb_may_pull(skb, len))
+ if (!ip_mc_may_pull(skb, len))
return -EINVAL;
/* We treat this as an IGMPv2 report for now. */
@@ -985,15 +987,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ unsigned int nsrcs_offset;
const unsigned char *src;
struct icmp6hdr *icmp6h;
struct mld2_grec *grec;
+ unsigned int grec_len;
int i;
int len;
int num;
int err = 0;
- if (!pskb_may_pull(skb, sizeof(*icmp6h)))
+ if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
return -EINVAL;
icmp6h = icmp6_hdr(skb);
@@ -1003,21 +1007,24 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
for (i = 0; i < num; i++) {
__be16 *nsrcs, _nsrcs;
- nsrcs = skb_header_pointer(skb,
- len + offsetof(struct mld2_grec,
- grec_nsrcs),
+ nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
+
+ if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
+ nsrcs_offset + sizeof(_nsrcs))
+ return -EINVAL;
+
+ nsrcs = skb_header_pointer(skb, nsrcs_offset,
sizeof(_nsrcs), &_nsrcs);
if (!nsrcs)
return -EINVAL;
- if (!pskb_may_pull(skb,
- len + sizeof(*grec) +
- sizeof(struct in6_addr) * ntohs(*nsrcs)))
+ grec_len = struct_size(grec, grec_src, ntohs(*nsrcs));
+
+ if (!ipv6_mc_may_pull(skb, len + grec_len))
return -EINVAL;
grec = (struct mld2_grec *)(skb->data + len);
- len += sizeof(*grec) +
- sizeof(struct in6_addr) * ntohs(*nsrcs);
+ len += grec_len;
/* We treat these as MLDv1 reports for now. */
switch (grec->grec_type) {
@@ -1204,14 +1211,7 @@ static void br_multicast_query_received(struct net_bridge *br,
return;
br_multicast_update_query_timer(br, query, max_delay);
-
- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
- * the arrival port for IGMP Queries where the source address
- * is 0.0.0.0 should not be added to router port list.
- */
- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
- saddr->proto == htons(ETH_P_IPV6))
- br_multicast_mark_router(br, port);
+ br_multicast_mark_router(br, port);
}
static void br_ip4_multicast_query(struct net_bridge *br,
@@ -1219,6 +1219,7 @@ static void br_ip4_multicast_query(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ unsigned int transport_len = ip_transport_len(skb);
const struct iphdr *iph = ip_hdr(skb);
struct igmphdr *ih = igmp_hdr(skb);
struct net_bridge_mdb_entry *mp;
@@ -1228,7 +1229,6 @@ static void br_ip4_multicast_query(struct net_bridge *br,
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
- unsigned int offset = skb_transport_offset(skb);
__be32 group;
spin_lock(&br->multicast_lock);
@@ -1238,14 +1238,14 @@ static void br_ip4_multicast_query(struct net_bridge *br,
group = ih->group;
- if (skb->len == offset + sizeof(*ih)) {
+ if (transport_len == sizeof(*ih)) {
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
if (!max_delay) {
max_delay = 10 * HZ;
group = 0;
}
- } else if (skb->len >= offset + sizeof(*ih3)) {
+ } else if (transport_len >= sizeof(*ih3)) {
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs)
goto out;
@@ -1296,6 +1296,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ unsigned int transport_len = ipv6_transport_len(skb);
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld_msg *mld;
struct net_bridge_mdb_entry *mp;
@@ -1315,7 +1316,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- if (skb->len == offset + sizeof(*mld)) {
+ if (transport_len == sizeof(*mld)) {
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
err = -EINVAL;
goto out;
@@ -1576,17 +1577,29 @@ static void br_multicast_pim(struct net_bridge *br,
br_multicast_mark_router(br, port);
}
+static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
+ igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
+ return -ENOMSG;
+
+ br_multicast_mark_router(br, port);
+
+ return 0;
+}
+
static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
- struct sk_buff *skb_trimmed = NULL;
const unsigned char *src;
struct igmphdr *ih;
int err;
- err = ip_mc_check_igmp(skb, &skb_trimmed);
+ err = ip_mc_check_igmp(skb);
if (err == -ENOMSG) {
if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
@@ -1594,7 +1607,10 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
if (ip_hdr(skb)->protocol == IPPROTO_PIM)
br_multicast_pim(br, port, skb);
+ } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
+ br_ip4_multicast_mrd_rcv(br, port, skb);
}
+
return 0;
} else if (err < 0) {
br_multicast_err_count(br, port, skb->protocol);
@@ -1612,19 +1628,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
- err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
+ err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
- br_ip4_multicast_query(br, port, skb_trimmed, vid);
+ br_ip4_multicast_query(br, port, skb, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
break;
}
- if (skb_trimmed && skb_trimmed != skb)
- kfree_skb(skb_trimmed);
-
br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
BR_MCAST_DIR_RX);
@@ -1632,21 +1645,51 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
}
#if IS_ENABLED(CONFIG_IPV6)
+static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
+ return -ENOMSG;
+
+ ret = ipv6_mc_check_icmpv6(skb);
+ if (ret < 0)
+ return ret;
+
+ if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
+ return -ENOMSG;
+
+ br_multicast_mark_router(br, port);
+
+ return 0;
+}
+
static int br_multicast_ipv6_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
- struct sk_buff *skb_trimmed = NULL;
const unsigned char *src;
struct mld_msg *mld;
int err;
- err = ipv6_mc_check_mld(skb, &skb_trimmed);
+ err = ipv6_mc_check_mld(skb);
if (err == -ENOMSG) {
if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
+
+ if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
+ err = br_ip6_multicast_mrd_rcv(br, port, skb);
+
+ if (err < 0 && err != -ENOMSG) {
+ br_multicast_err_count(br, port, skb->protocol);
+ return err;
+ }
+ }
+
return 0;
} else if (err < 0) {
br_multicast_err_count(br, port, skb->protocol);
@@ -1664,10 +1707,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
src);
break;
case ICMPV6_MLD2_REPORT:
- err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
+ err = br_ip6_multicast_mld2_report(br, port, skb, vid);
break;
case ICMPV6_MGM_QUERY:
- err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
+ err = br_ip6_multicast_query(br, port, skb, vid);
break;
case ICMPV6_MGM_REDUCTION:
src = eth_hdr(skb)->h_source;
@@ -1675,9 +1718,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
break;
}
- if (skb_trimmed && skb_trimmed != skb)
- kfree_skb(skb_trimmed);
-
br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
BR_MCAST_DIR_RX);
@@ -1781,6 +1821,68 @@ void br_multicast_init(struct net_bridge *br)
INIT_HLIST_HEAD(&br->mdb_list);
}
+static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
+{
+ struct in_device *in_dev = in_dev_get(br->dev);
+
+ if (!in_dev)
+ return;
+
+ __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
+ in_dev_put(in_dev);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
+{
+ struct in6_addr addr;
+
+ ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
+ ipv6_dev_mc_inc(br->dev, &addr);
+}
+#else
+static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
+{
+}
+#endif
+
+static void br_multicast_join_snoopers(struct net_bridge *br)
+{
+ br_ip4_multicast_join_snoopers(br);
+ br_ip6_multicast_join_snoopers(br);
+}
+
+static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
+{
+ struct in_device *in_dev = in_dev_get(br->dev);
+
+ if (WARN_ON(!in_dev))
+ return;
+
+ __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
+ in_dev_put(in_dev);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
+{
+ struct in6_addr addr;
+
+ ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
+ ipv6_dev_mc_dec(br->dev, &addr);
+}
+#else
+static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
+{
+}
+#endif
+
+static void br_multicast_leave_snoopers(struct net_bridge *br)
+{
+ br_ip4_multicast_leave_snoopers(br);
+ br_ip6_multicast_leave_snoopers(br);
+}
+
static void __br_multicast_open(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
@@ -1794,6 +1896,9 @@ static void __br_multicast_open(struct net_bridge *br,
void br_multicast_open(struct net_bridge *br)
{
+ if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ br_multicast_join_snoopers(br);
+
__br_multicast_open(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_own_query);
@@ -1809,6 +1914,9 @@ void br_multicast_stop(struct net_bridge *br)
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
+
+ if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ br_multicast_leave_snoopers(br);
}
void br_multicast_dev_del(struct net_bridge *br)
@@ -1944,8 +2052,10 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
br_mc_disabled_update(br->dev, val);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
- if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
+ br_multicast_leave_snoopers(br);
goto unlock;
+ }
if (!netif_running(br->dev))
goto unlock;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d21a23698410..9d34de68571b 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -265,7 +265,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret;
- if (neigh->hh.hh_len) {
+ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
ret = br_handle_frame_finish(net, sk, skb);
@@ -831,7 +831,8 @@ static unsigned int ip_sabotage_in(void *priv,
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
if (nf_bridge && !nf_bridge->in_prerouting &&
- !netif_is_l3_master(skb->dev)) {
+ !netif_is_l3_master(skb->dev) &&
+ !netif_is_l3_slave(skb->dev)) {
state->okfn(state->net, state->sk, skb);
return NF_STOLEN;
}
@@ -881,11 +882,6 @@ static const struct nf_br_ops br_ops = {
.br_dev_xmit_hook = br_nf_dev_xmit,
};
-void br_netfilter_enable(void)
-{
-}
-EXPORT_SYMBOL_GPL(br_netfilter_enable);
-
/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* br_dev_queue_push_xmit is called afterwards */
static const struct nf_hook_ops br_nf_ops[] = {
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 94039f588f1d..564710f88f93 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
IPSTATS_MIB_INDISCARDS);
goto drop;
}
+ hdr = ipv6_hdr(skb);
}
if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
goto drop;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d240b3e7919f..00deef7fc1f3 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -107,6 +107,7 @@ struct br_tunnel_info {
/* private vlan flags */
enum {
BR_VLFLAG_PER_PORT_STATS = BIT(0),
+ BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1),
};
/**
@@ -572,7 +573,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid);
int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 nlh_flags);
+ const unsigned char *addr, u16 vid, u16 nlh_flags,
+ struct netlink_ext_ack *extack);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *fdev, int *idx);
int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 035ff59d9cbd..921310d3cbae 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -14,7 +14,7 @@ static int br_switchdev_mark_get(struct net_bridge *br, struct net_device *dev)
/* dev is yet to be added to the port list. */
list_for_each_entry(p, &br->port_list, list) {
- if (switchdev_port_same_parent_id(dev, p->dev))
+ if (netdev_port_same_parent_id(dev, p->dev))
return p->offload_fwd_mark;
}
@@ -23,15 +23,12 @@ static int br_switchdev_mark_get(struct net_bridge *br, struct net_device *dev)
int nbp_switchdev_mark_set(struct net_bridge_port *p)
{
- struct switchdev_attr attr = {
- .orig_dev = p->dev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
+ struct netdev_phys_item_id ppid = { };
int err;
ASSERT_RTNL();
- err = switchdev_port_attr_get(p->dev, &attr);
+ err = dev_get_port_parent_id(p->dev, &ppid, true);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
@@ -67,21 +64,25 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
- .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
+ .id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
+ .u.brport_flags = mask,
+ };
+ struct switchdev_notifier_port_attr_info info = {
+ .attr = &attr,
};
int err;
if (mask & ~BR_PORT_FLAGS_HW_OFFLOAD)
return 0;
- err = switchdev_port_attr_get(p->dev, &attr);
+ /* We run from atomic context here */
+ err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
+ &info.info, NULL);
+ err = notifier_to_errno(err);
if (err == -EOPNOTSUPP)
return 0;
- if (err)
- return err;
- /* Check if specific bridge flag attribute offload is supported */
- if (!(attr.u.brport_flags_support & mask)) {
+ if (err) {
br_warn(p->br, "bridge flag offload is not supported %u(%s)\n",
(unsigned int)p->port_no, p->dev->name);
return -EOPNOTSUPP;
@@ -90,6 +91,7 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
attr.flags = SWITCHDEV_F_DEFER;
attr.u.brport_flags = flags;
+
err = switchdev_port_attr_set(p->dev, &attr);
if (err) {
br_warn(p->br, "error setting offload flag on port %u(%s)\n",
@@ -113,7 +115,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
info.added_by_user = added_by_user;
info.offloaded = offloaded;
notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
- call_switchdev_notifiers(notifier_type, dev, &info.info);
+ call_switchdev_notifiers(notifier_type, dev, &info.info, NULL);
}
void
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4a2f31157ef5..96abf8feb9dc 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -80,16 +80,18 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
}
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
- u16 vid, u16 flags, struct netlink_ext_ack *extack)
+ struct net_bridge_vlan *v, u16 flags,
+ struct netlink_ext_ack *extack)
{
int err;
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q add.
*/
- err = br_switchdev_port_vlan_add(dev, vid, flags, extack);
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
if (err == -EOPNOTSUPP)
- return vlan_vid_add(dev, br->vlan_proto, vid);
+ return vlan_vid_add(dev, br->vlan_proto, v->vid);
+ v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
return err;
}
@@ -121,19 +123,17 @@ static void __vlan_del_list(struct net_bridge_vlan *v)
}
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
- u16 vid)
+ const struct net_bridge_vlan *v)
{
int err;
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q del.
*/
- err = br_switchdev_port_vlan_del(dev, vid);
- if (err == -EOPNOTSUPP) {
- vlan_vid_del(dev, br->vlan_proto, vid);
- return 0;
- }
- return err;
+ err = br_switchdev_port_vlan_del(dev, v->vid);
+ if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
+ vlan_vid_del(dev, br->vlan_proto, v->vid);
+ return err == -EOPNOTSUPP ? 0 : err;
}
/* Returns a master vlan, if it didn't exist it gets created. In all cases a
@@ -242,7 +242,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
* This ensures tagged traffic enters the bridge when
* promiscuous mode is disabled by br_manage_promisc().
*/
- err = __vlan_vid_add(dev, br, v->vid, flags, extack);
+ err = __vlan_vid_add(dev, br, v, flags, extack);
if (err)
goto out;
@@ -305,7 +305,7 @@ out_fdb_insert:
out_filt:
if (p) {
- __vlan_vid_del(dev, br, v->vid);
+ __vlan_vid_del(dev, br, v);
if (masterv) {
if (v->stats && masterv->stats != v->stats)
free_percpu(v->stats);
@@ -338,7 +338,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
__vlan_delete_pvid(vg, v->vid);
if (p) {
- err = __vlan_vid_del(p->dev, p->br, v->vid);
+ err = __vlan_vid_del(p->dev, p->br, v);
if (err)
goto out;
} else {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 491828713e0b..eb15891f8b9f 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -31,10 +31,6 @@
/* needed for logical [in,out]-dev filtering */
#include "../br_private.h"
-#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
- "report to author: "format, ## args)
-/* #define BUGPRINT(format, args...) */
-
/* Each cpu has its own set of counters, so there is no need for write_lock in
* the softirq
* For reading or updating the counters, the user context needs to
@@ -385,7 +381,7 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
par->match = match;
par->matchinfo = m->data;
ret = xt_check_match(par, m->match_size,
- e->ethproto, e->invflags & EBT_IPROTO);
+ ntohs(e->ethproto), e->invflags & EBT_IPROTO);
if (ret < 0) {
module_put(match->me);
return ret;
@@ -422,7 +418,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
par->target = watcher;
par->targinfo = w->data;
ret = xt_check_target(par, w->watcher_size,
- e->ethproto, e->invflags & EBT_IPROTO);
+ ntohs(e->ethproto), e->invflags & EBT_IPROTO);
if (ret < 0) {
module_put(watcher->me);
return ret;
@@ -466,8 +462,6 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
/* we make userspace set this right,
* so there is no misunderstanding
*/
- BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
- "in distinguisher\n");
return -EINVAL;
}
if (i != NF_BR_NUMHOOKS)
@@ -485,18 +479,14 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
offset += e->next_offset;
}
}
- if (offset != limit) {
- BUGPRINT("entries_size too small\n");
+ if (offset != limit)
return -EINVAL;
- }
/* check if all valid hooks have a chain */
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if (!newinfo->hook_entry[i] &&
- (valid_hooks & (1 << i))) {
- BUGPRINT("Valid hook without chain\n");
+ (valid_hooks & (1 << i)))
return -EINVAL;
- }
}
return 0;
}
@@ -523,26 +513,20 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
/* this checks if the previous chain has as many entries
* as it said it has
*/
- if (*n != *cnt) {
- BUGPRINT("nentries does not equal the nr of entries "
- "in the chain\n");
+ if (*n != *cnt)
return -EINVAL;
- }
+
if (((struct ebt_entries *)e)->policy != EBT_DROP &&
((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
/* only RETURN from udc */
if (i != NF_BR_NUMHOOKS ||
- ((struct ebt_entries *)e)->policy != EBT_RETURN) {
- BUGPRINT("bad policy\n");
+ ((struct ebt_entries *)e)->policy != EBT_RETURN)
return -EINVAL;
- }
}
if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
(*udc_cnt)++;
- if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
- BUGPRINT("counter_offset != totalcnt");
+ if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
return -EINVAL;
- }
*n = ((struct ebt_entries *)e)->nentries;
*cnt = 0;
return 0;
@@ -550,15 +534,13 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
/* a plain old entry, heh */
if (sizeof(struct ebt_entry) > e->watchers_offset ||
e->watchers_offset > e->target_offset ||
- e->target_offset >= e->next_offset) {
- BUGPRINT("entry offsets not in right order\n");
+ e->target_offset >= e->next_offset)
return -EINVAL;
- }
+
/* this is not checked anywhere else */
- if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
- BUGPRINT("target size too small\n");
+ if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
return -EINVAL;
- }
+
(*cnt)++;
(*totalcnt)++;
return 0;
@@ -678,18 +660,15 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
if (e->bitmask == 0)
return 0;
- if (e->bitmask & ~EBT_F_MASK) {
- BUGPRINT("Unknown flag for bitmask\n");
+ if (e->bitmask & ~EBT_F_MASK)
return -EINVAL;
- }
- if (e->invflags & ~EBT_INV_MASK) {
- BUGPRINT("Unknown flag for inv bitmask\n");
+
+ if (e->invflags & ~EBT_INV_MASK)
return -EINVAL;
- }
- if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) {
- BUGPRINT("NOPROTO & 802_3 not allowed\n");
+
+ if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
return -EINVAL;
- }
+
/* what hook do we belong to? */
for (i = 0; i < NF_BR_NUMHOOKS; i++) {
if (!newinfo->hook_entry[i])
@@ -748,13 +727,11 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
t->u.target = target;
if (t->u.target == &ebt_standard_target) {
if (gap < sizeof(struct ebt_standard_target)) {
- BUGPRINT("Standard target size too big\n");
ret = -EFAULT;
goto cleanup_watchers;
}
if (((struct ebt_standard_target *)t)->verdict <
-NUM_STANDARD_TARGETS) {
- BUGPRINT("Invalid standard target\n");
ret = -EFAULT;
goto cleanup_watchers;
}
@@ -767,7 +744,7 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
tgpar.target = target;
tgpar.targinfo = t->data;
ret = xt_check_target(&tgpar, t->target_size,
- e->ethproto, e->invflags & EBT_IPROTO);
+ ntohs(e->ethproto), e->invflags & EBT_IPROTO);
if (ret < 0) {
module_put(target->me);
goto cleanup_watchers;
@@ -813,10 +790,9 @@ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack
if (strcmp(t->u.name, EBT_STANDARD_TARGET))
goto letscontinue;
if (e->target_offset + sizeof(struct ebt_standard_target) >
- e->next_offset) {
- BUGPRINT("Standard target size too big\n");
+ e->next_offset)
return -1;
- }
+
verdict = ((struct ebt_standard_target *)t)->verdict;
if (verdict >= 0) { /* jump to another chain */
struct ebt_entries *hlp2 =
@@ -825,14 +801,12 @@ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack
if (hlp2 == cl_s[i].cs.chaininfo)
break;
/* bad destination or loop */
- if (i == udc_cnt) {
- BUGPRINT("bad destination\n");
+ if (i == udc_cnt)
return -1;
- }
- if (cl_s[i].cs.n) {
- BUGPRINT("loop\n");
+
+ if (cl_s[i].cs.n)
return -1;
- }
+
if (cl_s[i].hookmask & (1 << hooknr))
goto letscontinue;
/* this can't be 0, so the loop test is correct */
@@ -865,24 +839,21 @@ static int translate_table(struct net *net, const char *name,
i = 0;
while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
i++;
- if (i == NF_BR_NUMHOOKS) {
- BUGPRINT("No valid hooks specified\n");
+ if (i == NF_BR_NUMHOOKS)
return -EINVAL;
- }
- if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
- BUGPRINT("Chains don't start at beginning\n");
+
+ if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries)
return -EINVAL;
- }
+
/* make sure chains are ordered after each other in same order
* as their corresponding hooks
*/
for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
if (!newinfo->hook_entry[j])
continue;
- if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
- BUGPRINT("Hook order must be followed\n");
+ if (newinfo->hook_entry[j] <= newinfo->hook_entry[i])
return -EINVAL;
- }
+
i = j;
}
@@ -900,15 +871,11 @@ static int translate_table(struct net *net, const char *name,
if (ret != 0)
return ret;
- if (i != j) {
- BUGPRINT("nentries does not equal the nr of entries in the "
- "(last) chain\n");
+ if (i != j)
return -EINVAL;
- }
- if (k != newinfo->nentries) {
- BUGPRINT("Total nentries is wrong\n");
+
+ if (k != newinfo->nentries)
return -EINVAL;
- }
/* get the location of the udc, put them in an array
* while we're at it, allocate the chainstack
@@ -942,7 +909,6 @@ static int translate_table(struct net *net, const char *name,
ebt_get_udc_positions, newinfo, &i, cl_s);
/* sanity check */
if (i != udc_cnt) {
- BUGPRINT("i != udc_cnt\n");
vfree(cl_s);
return -EFAULT;
}
@@ -1042,7 +1008,6 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
goto free_unlock;
if (repl->num_counters && repl->num_counters != t->private->nentries) {
- BUGPRINT("Wrong nr. of counters requested\n");
ret = -EINVAL;
goto free_unlock;
}
@@ -1118,15 +1083,12 @@ static int do_replace(struct net *net, const void __user *user,
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
- if (len != sizeof(tmp) + tmp.entries_size) {
- BUGPRINT("Wrong len argument\n");
+ if (len != sizeof(tmp) + tmp.entries_size)
return -EINVAL;
- }
- if (tmp.entries_size == 0) {
- BUGPRINT("Entries_size never zero\n");
+ if (tmp.entries_size == 0)
return -EINVAL;
- }
+
/* overflow check */
if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
@@ -1137,21 +1099,22 @@ static int do_replace(struct net *net, const void __user *user,
tmp.name[sizeof(tmp.name) - 1] = 0;
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
- newinfo = vmalloc(sizeof(*newinfo) + countersize);
+ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
+ PAGE_KERNEL);
if (!newinfo)
return -ENOMEM;
if (countersize)
memset(newinfo->counters, 0, countersize);
- newinfo->entries = vmalloc(tmp.entries_size);
+ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
+ PAGE_KERNEL);
if (!newinfo->entries) {
ret = -ENOMEM;
goto free_newinfo;
}
if (copy_from_user(
newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
- BUGPRINT("Couldn't copy entries from userspace\n");
ret = -EFAULT;
goto free_entries;
}
@@ -1192,10 +1155,8 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
if (input_table == NULL || (repl = input_table->table) == NULL ||
repl->entries == NULL || repl->entries_size == 0 ||
- repl->counters != NULL || input_table->private != NULL) {
- BUGPRINT("Bad table data for ebt_register_table!!!\n");
+ repl->counters != NULL || input_table->private != NULL)
return -EINVAL;
- }
/* Don't add one table to multiple lists. */
table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
@@ -1233,13 +1194,10 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
((char *)repl->hook_entry[i] - repl->entries);
}
ret = translate_table(net, repl->name, newinfo);
- if (ret != 0) {
- BUGPRINT("Translate_table failed\n");
+ if (ret != 0)
goto free_chainstack;
- }
if (table->check && table->check(newinfo, table->valid_hooks)) {
- BUGPRINT("The table doesn't like its own initial data, lol\n");
ret = -EINVAL;
goto free_chainstack;
}
@@ -1250,7 +1208,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
if (strcmp(t->name, table->name) == 0) {
ret = -EEXIST;
- BUGPRINT("Table name already exists\n");
goto free_unlock;
}
}
@@ -1318,7 +1275,6 @@ static int do_update_counters(struct net *net, const char *name,
goto free_tmp;
if (num_counters != t->private->nentries) {
- BUGPRINT("Wrong nr of counters\n");
ret = -EINVAL;
goto unlock_mutex;
}
@@ -1445,10 +1401,8 @@ static int copy_counters_to_user(struct ebt_table *t,
if (num_counters == 0)
return 0;
- if (num_counters != nentries) {
- BUGPRINT("Num_counters wrong\n");
+ if (num_counters != nentries)
return -EINVAL;
- }
counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
if (!counterstmp)
@@ -1494,15 +1448,11 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
(tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
return -EINVAL;
- if (tmp.nentries != nentries) {
- BUGPRINT("Nentries wrong\n");
+ if (tmp.nentries != nentries)
return -EINVAL;
- }
- if (tmp.entries_size != entries_size) {
- BUGPRINT("Wrong size\n");
+ if (tmp.entries_size != entries_size)
return -EINVAL;
- }
ret = copy_counters_to_user(t, oldcounters, tmp.counters,
tmp.num_counters, nentries);
@@ -1574,7 +1524,6 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
mutex_unlock(&ebt_mutex);
if (copy_to_user(user, &tmp, *len) != 0) {
- BUGPRINT("c2u Didn't work\n");
ret = -EFAULT;
break;
}
@@ -2291,9 +2240,12 @@ static int compat_do_replace(struct net *net, void __user *user,
xt_compat_lock(NFPROTO_BRIDGE);
- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
- if (ret < 0)
- goto out_unlock;
+ if (tmp.nentries) {
+ ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
+ if (ret < 0)
+ goto out_unlock;
+ }
+
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
if (ret < 0)
goto out_unlock;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 08cbed7d940e..1b1856744c80 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -125,13 +125,10 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
return;
- if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
- ip_hdr(oldskb)->protocol == IPPROTO_UDP)
- proto = ip_hdr(oldskb)->protocol;
- else
- proto = 0;
+ proto = ip_hdr(oldskb)->protocol;
if (!skb_csum_unnecessary(oldskb) &&
+ nf_reject_verify_csum(proto) &&
nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
return;
@@ -229,10 +226,14 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
return false;
+ ip6h = ipv6_hdr(skb);
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
+ if (!nf_reject_verify_csum(proto))
+ return true;
+
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 38c2b7a890dd..37ac5ca0ffdf 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -319,16 +319,12 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
if (tmppkt == NULL)
return NULL;
tmp = pkt_to_skb(tmppkt);
- skb_set_tail_pointer(tmp, dstlen);
- tmp->len = dstlen;
- memcpy(tmp->data, dst->data, dstlen);
+ skb_put_data(tmp, dst->data, dstlen);
cfpkt_destroy(dstpkt);
dst = tmp;
}
- memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add));
+ skb_put_data(dst, add->data, skb_headlen(add));
cfpkt_destroy(addpkt);
- dst->tail += addlen;
- dst->len += addlen;
return skb_to_pkt(dst);
}
@@ -359,13 +355,11 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
if (skb2 == NULL)
return NULL;
+ skb_put_data(skb2, split, len2nd);
+
/* Reduce the length of the original packet */
- skb_set_tail_pointer(skb, pos);
- skb->len = pos;
+ skb_trim(skb, pos);
- memcpy(skb2->data, split, len2nd);
- skb2->tail += len2nd;
- skb2->len += len2nd;
skb2->priority = skb->priority;
return skb_to_pkt(skb2);
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 0af8f0db892a..79bb8afa9c0c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -67,6 +67,9 @@
*/
#define MAX_NFRAMES 256
+/* limit timers to 400 days for sending/timeouts */
+#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
+
/* use of last_frames[index].flags */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
}
+/* check limitations for timeval provided by user */
+static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
+{
+ if ((msg_head->ival1.tv_sec < 0) ||
+ (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
+ (msg_head->ival1.tv_usec < 0) ||
+ (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
+ (msg_head->ival2.tv_sec < 0) ||
+ (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
+ (msg_head->ival2.tv_usec < 0) ||
+ (msg_head->ival2.tv_usec >= USEC_PER_SEC))
+ return true;
+
+ return false;
+}
+
#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
#define OPSIZ sizeof(struct bcm_op)
#define MHSIZ sizeof(struct bcm_msg_head)
@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
return -EINVAL;
+ /* check timeval limitations */
+ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+ return -EINVAL;
+
/* check the given can_id */
op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
if (op) {
@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
(!(msg_head->can_id & CAN_RTR_FLAG))))
return -EINVAL;
+ /* check timeval limitations */
+ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+ return -EINVAL;
+
/* check the given can_id */
op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
if (op) {
diff --git a/net/can/gw.c b/net/can/gw.c
index faa3da88a127..53859346dc9a 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
- /* check for checksum updates when the CAN frame has been modified */
+ /* Has the CAN frame been modified? */
if (modidx) {
- if (gwj->mod.csumfunc.crc8)
+ /* get available space for the processed CAN frame type */
+ int max_len = nskb->len - offsetof(struct can_frame, data);
+
+ /* dlc may have changed, make sure it fits to the CAN frame */
+ if (cf->can_dlc > max_len)
+ goto out_delete;
+
+ /* check for checksum updates in classic CAN length only */
+ if (gwj->mod.csumfunc.crc8) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
+
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+ }
+
+ if (gwj->mod.csumfunc.xor) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
- if (gwj->mod.csumfunc.xor)
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ }
}
/* clear the skb timestamp if not configured the other way */
@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
gwj->dropped_frames++;
else
gwj->handled_frames++;
+
+ return;
+
+ out_delete:
+ /* delete frame due to misconfiguration */
+ gwj->deleted_frames++;
+ kfree_skb(nskb);
+ return;
}
static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 87afb9ec4c68..9cab80207ced 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -255,6 +255,7 @@ enum {
Opt_nocephx_sign_messages,
Opt_tcp_nodelay,
Opt_notcp_nodelay,
+ Opt_abort_on_full,
};
static match_table_t opt_tokens = {
@@ -280,6 +281,7 @@ static match_table_t opt_tokens = {
{Opt_nocephx_sign_messages, "nocephx_sign_messages"},
{Opt_tcp_nodelay, "tcp_nodelay"},
{Opt_notcp_nodelay, "notcp_nodelay"},
+ {Opt_abort_on_full, "abort_on_full"},
{-1, NULL}
};
@@ -535,6 +537,10 @@ ceph_parse_options(char *options, const char *dev_name,
opt->flags &= ~CEPH_OPT_TCP_NODELAY;
break;
+ case Opt_abort_on_full:
+ opt->flags |= CEPH_OPT_ABORT_ON_FULL;
+ break;
+
default:
BUG_ON(token);
}
@@ -549,7 +555,8 @@ out:
}
EXPORT_SYMBOL(ceph_parse_options);
-int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
+ bool show_all)
{
struct ceph_options *opt = client->options;
size_t pos = m->count;
@@ -574,6 +581,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
seq_puts(m, "nocephx_sign_messages,");
if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
seq_puts(m, "notcp_nodelay,");
+ if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL))
+ seq_puts(m, "abort_on_full,");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
seq_printf(m, "mount_timeout=%d,",
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 02952605d121..46f65709a6ff 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -375,7 +375,7 @@ static int client_options_show(struct seq_file *s, void *p)
struct ceph_client *client = s->private;
int ret;
- ret = ceph_print_client_options(s, client);
+ ret = ceph_print_client_options(s, client, true);
if (ret)
return ret;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index d5718284db57..7e71b0df1fbc 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con)
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
if (con->auth) {
+ int len = le32_to_cpu(con->in_reply.authorizer_len);
+
/*
* Any connection that defines ->get_authorizer()
* should also define ->add_authorizer_challenge() and
@@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con)
*/
if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
ret = con->ops->add_authorizer_challenge(
- con, con->auth->authorizer_reply_buf,
- le32_to_cpu(con->in_reply.authorizer_len));
+ con, con->auth->authorizer_reply_buf, len);
if (ret < 0)
return ret;
@@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con)
return 0;
}
- ret = con->ops->verify_authorizer_reply(con);
- if (ret < 0) {
- con->error_msg = "bad authorize reply";
- return ret;
+ if (len) {
+ ret = con->ops->verify_authorizer_reply(con);
+ if (ret < 0) {
+ con->error_msg = "bad authorize reply";
+ return ret;
+ }
}
}
@@ -3206,9 +3209,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
dout("con_keepalive %p\n", con);
mutex_lock(&con->mutex);
clear_standby(con);
+ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
mutex_unlock(&con->mutex);
- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
+
+ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d23a9f81f3d7..fa9530dd876e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2315,7 +2315,7 @@ again:
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
pool_full(osdc, req->r_t.base_oloc.pool))) {
dout("req %p full/pool_full\n", req);
- if (osdc->abort_on_full) {
+ if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
err = -ENOSPC;
} else {
pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2545,7 +2545,7 @@ static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
{
bool victims = false;
- if (osdc->abort_on_full &&
+ if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
for_each_request(osdc, abort_on_full_fn, &victims);
}
diff --git a/net/compat.c b/net/compat.c
index 959d1c51826d..eeea5eb71639 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -209,8 +209,8 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
{
struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
struct compat_cmsghdr cmhdr;
- struct compat_timeval ctv;
- struct compat_timespec cts[3];
+ struct old_timeval32 ctv;
+ struct old_timespec32 cts[3];
int cmlen;
if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
@@ -219,16 +219,16 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
}
if (!COMPAT_USE_64BIT_TIME) {
- if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
- struct timeval *tv = (struct timeval *)data;
+ if (level == SOL_SOCKET && type == SO_TIMESTAMP_OLD) {
+ struct __kernel_old_timeval *tv = (struct __kernel_old_timeval *)data;
ctv.tv_sec = tv->tv_sec;
ctv.tv_usec = tv->tv_usec;
data = &ctv;
len = sizeof(ctv);
}
if (level == SOL_SOCKET &&
- (type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) {
- int count = type == SCM_TIMESTAMPNS ? 1 : 3;
+ (type == SO_TIMESTAMPNS_OLD || type == SO_TIMESTAMPING_OLD)) {
+ int count = type == SO_TIMESTAMPNS_OLD ? 1 : 3;
int i;
struct timespec *ts = (struct timespec *)data;
for (i = 0; i < count; i++) {
@@ -348,28 +348,6 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
sizeof(struct sock_fprog));
}
-static int do_set_sock_timeout(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen)
-{
- struct compat_timeval __user *up = (struct compat_timeval __user *)optval;
- struct timeval ktime;
- mm_segment_t old_fs;
- int err;
-
- if (optlen < sizeof(*up))
- return -EINVAL;
- if (!access_ok(up, sizeof(*up)) ||
- __get_user(ktime.tv_sec, &up->tv_sec) ||
- __get_user(ktime.tv_usec, &up->tv_usec))
- return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
- set_fs(old_fs);
-
- return err;
-}
-
static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
@@ -377,10 +355,6 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
optname == SO_ATTACH_REUSEPORT_CBPF)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
- if (!COMPAT_USE_64BIT_TIME &&
- (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
- return do_set_sock_timeout(sock, level, optname, optval, optlen);
-
return sock_setsockopt(sock, level, optname, optval, optlen);
}
@@ -388,8 +362,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
- struct socket *sock = sockfd_lookup(fd, &err);
+ struct socket *sock;
+
+ if (optlen > INT_MAX)
+ return -EINVAL;
+ sock = sockfd_lookup(fd, &err);
if (sock) {
err = security_socket_setsockopt(sock, level, optname);
if (err) {
@@ -417,44 +395,6 @@ COMPAT_SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
return __compat_sys_setsockopt(fd, level, optname, optval, optlen);
}
-static int do_get_sock_timeout(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
-{
- struct compat_timeval __user *up;
- struct timeval ktime;
- mm_segment_t old_fs;
- int len, err;
-
- up = (struct compat_timeval __user *) optval;
- if (get_user(len, optlen))
- return -EFAULT;
- if (len < sizeof(*up))
- return -EINVAL;
- len = sizeof(ktime);
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
- set_fs(old_fs);
-
- if (!err) {
- if (put_user(sizeof(*up), optlen) ||
- !access_ok(up, sizeof(*up)) ||
- __put_user(ktime.tv_sec, &up->tv_sec) ||
- __put_user(ktime.tv_usec, &up->tv_usec))
- err = -EFAULT;
- }
- return err;
-}
-
-static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int __user *optlen)
-{
- if (!COMPAT_USE_64BIT_TIME &&
- (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
- return do_get_sock_timeout(sock, level, optname, optval, optlen);
- return sock_getsockopt(sock, level, optname, optval, optlen);
-}
-
int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
struct compat_timeval __user *ctv;
@@ -527,7 +467,7 @@ static int __compat_sys_getsockopt(int fd, int level, int optname,
}
if (level == SOL_SOCKET)
- err = compat_sock_getsockopt(sock, level,
+ err = sock_getsockopt(sock, level,
optname, optval, optlen);
else if (sock->ops->compat_getsockopt)
err = sock->ops->compat_getsockopt(sock, level,
@@ -585,7 +525,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
- struct compat_group_req __user *gr32 = (void *)optval;
+ struct compat_group_req __user *gr32 = (void __user *)optval;
struct group_req __user *kgr =
compat_alloc_user_space(sizeof(struct group_req));
u32 interface;
@@ -606,7 +546,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
case MCAST_BLOCK_SOURCE:
case MCAST_UNBLOCK_SOURCE:
{
- struct compat_group_source_req __user *gsr32 = (void *)optval;
+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
struct group_source_req __user *kgsr = compat_alloc_user_space(
sizeof(struct group_source_req));
u32 interface;
@@ -627,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
}
case MCAST_MSFILTER:
{
- struct compat_group_filter __user *gf32 = (void *)optval;
+ struct compat_group_filter __user *gf32 = (void __user *)optval;
struct group_filter __user *kgf;
u32 interface, fmode, numsrc;
@@ -665,7 +605,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
char __user *optval, int __user *optlen,
int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
{
- struct compat_group_filter __user *gf32 = (void *)optval;
+ struct compat_group_filter __user *gf32 = (void __user *)optval;
struct group_filter __user *kgf;
int __user *koptlen;
u32 interface, fmode, numsrc;
@@ -822,7 +762,7 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg_time64, int, fd, struct compat_mmsghdr __user *,
}
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+COMPAT_SYSCALL_DEFINE5(recvmmsg_time32, int, fd, struct compat_mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
struct old_timespec32 __user *, timeout)
{
diff --git a/net/core/Makefile b/net/core/Makefile
index fccd31e0e7f7..f97d6254e564 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o xdp.o
+ fib_notifier.o xdp.o flow_offload.o
obj-y += net-sysfs.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 82f20022259d..2b67f2aa59dd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3421,7 +3421,7 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
/* To get more precise estimation of bytes sent on wire,
* we add to pkt_len the headers size of all segments
*/
- if (shinfo->gso_size) {
+ if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
unsigned int hdr_len;
u16 gso_segs = shinfo->gso_segs;
@@ -7878,6 +7878,63 @@ int dev_get_phys_port_name(struct net_device *dev,
EXPORT_SYMBOL(dev_get_phys_port_name);
/**
+ * dev_get_port_parent_id - Get the device's port parent identifier
+ * @dev: network device
+ * @ppid: pointer to a storage for the port's parent identifier
+ * @recurse: allow/disallow recursion to lower devices
+ *
+ * Get the devices's port parent identifier
+ */
+int dev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid,
+ bool recurse)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ struct netdev_phys_item_id first = { };
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (ops->ndo_get_port_parent_id)
+ return ops->ndo_get_port_parent_id(dev, ppid);
+
+ if (!recurse)
+ return err;
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = dev_get_port_parent_id(lower_dev, ppid, recurse);
+ if (err)
+ break;
+ if (!first.id_len)
+ first = *ppid;
+ else if (memcmp(&first, ppid, sizeof(*ppid)))
+ return -ENODATA;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(dev_get_port_parent_id);
+
+/**
+ * netdev_port_same_parent_id - Indicate if two network devices have
+ * the same port parent identifier
+ * @a: first network device
+ * @b: second network device
+ */
+bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
+{
+ struct netdev_phys_item_id a_id = { };
+ struct netdev_phys_item_id b_id = { };
+
+ if (dev_get_port_parent_id(a, &a_id, true) ||
+ dev_get_port_parent_id(b, &b_id, true))
+ return false;
+
+ return netdev_phys_item_id_same(&a_id, &b_id);
+}
+EXPORT_SYMBOL(netdev_port_same_parent_id);
+
+/**
* dev_change_proto_down - update protocol port state information
* @dev: device
* @proto_down: new value
@@ -7897,6 +7954,25 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
}
EXPORT_SYMBOL(dev_change_proto_down);
+/**
+ * dev_change_proto_down_generic - generic implementation for
+ * ndo_change_proto_down that sets carrier according to
+ * proto_down.
+ *
+ * @dev: device
+ * @proto_down: new value
+ */
+int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
+{
+ if (proto_down)
+ netif_carrier_off(dev);
+ else
+ netif_carrier_on(dev);
+ dev->proto_down = proto_down;
+ return 0;
+}
+EXPORT_SYMBOL(dev_change_proto_down_generic);
+
u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
enum bpf_netdev_command cmd)
{
@@ -7976,35 +8052,41 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
enum bpf_netdev_command query;
struct bpf_prog *prog = NULL;
bpf_op_t bpf_op, bpf_chk;
+ bool offload;
int err;
ASSERT_RTNL();
- query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
+ offload = flags & XDP_FLAGS_HW_MODE;
+ query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
bpf_op = bpf_chk = ops->ndo_bpf;
- if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
+ if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) {
+ NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode");
return -EOPNOTSUPP;
+ }
if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
bpf_op = generic_xdp_install;
if (bpf_op == bpf_chk)
bpf_chk = generic_xdp_install;
if (fd >= 0) {
- if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
- __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
+ if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
+ NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
return -EEXIST;
+ }
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
- __dev_xdp_query(dev, bpf_op, query))
+ __dev_xdp_query(dev, bpf_op, query)) {
+ NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
+ }
prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
bpf_op == ops->ndo_bpf);
if (IS_ERR(prog))
return PTR_ERR(prog);
- if (!(flags & XDP_FLAGS_HW_MODE) &&
- bpf_prog_is_dev_bound(prog->aux)) {
+ if (!offload && bpf_prog_is_dev_bound(prog->aux)) {
NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
bpf_prog_put(prog);
return -EINVAL;
@@ -8152,7 +8234,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(upper->wanted_features & feature)
&& (features & feature)) {
@@ -8172,7 +8254,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(features & feature) && (lower->features & feature)) {
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
@@ -8712,6 +8794,9 @@ int init_dummy_netdev(struct net_device *dev)
set_bit(__LINK_STATE_PRESENT, &dev->state);
set_bit(__LINK_STATE_START, &dev->state);
+ /* napi_busy_loop stats accounting wants this */
+ dev_net_set(dev, &init_net);
+
/* Note : We dont allocate pcpu_refcnt for dummy devices,
* because users of this 'device' dont need to change
* its refcount.
diff --git a/net/core/devlink.c b/net/core/devlink.c
index abb0da9d7b4b..78e22cea4cc7 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -81,6 +81,7 @@ struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
EXPORT_SYMBOL(devlink_dpipe_header_ipv6);
EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
+EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
static LIST_HEAD(devlink_list);
@@ -115,6 +116,8 @@ static struct devlink *devlink_get_from_attrs(struct net *net,
busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
+ lockdep_assert_held(&devlink_mutex);
+
list_for_each_entry(devlink, &devlink_list, list) {
if (strcmp(devlink->dev->bus->name, busname) == 0 &&
strcmp(dev_name(devlink->dev), devname) == 0 &&
@@ -720,7 +723,7 @@ static int devlink_port_type_set(struct devlink *devlink,
{
int err;
- if (devlink->ops && devlink->ops->port_type_set) {
+ if (devlink->ops->port_type_set) {
if (port_type == DEVLINK_PORT_TYPE_NOTSET)
return -EINVAL;
if (port_type == devlink_port->type)
@@ -757,7 +760,7 @@ static int devlink_port_split(struct devlink *devlink, u32 port_index,
u32 count, struct netlink_ext_ack *extack)
{
- if (devlink->ops && devlink->ops->port_split)
+ if (devlink->ops->port_split)
return devlink->ops->port_split(devlink, port_index, count,
extack);
return -EOPNOTSUPP;
@@ -783,7 +786,7 @@ static int devlink_port_unsplit(struct devlink *devlink, u32 port_index,
struct netlink_ext_ack *extack)
{
- if (devlink->ops && devlink->ops->port_unsplit)
+ if (devlink->ops->port_unsplit)
return devlink->ops->port_unsplit(devlink, port_index, extack);
return -EOPNOTSUPP;
}
@@ -932,6 +935,9 @@ static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
pool_info.threshold_type))
goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE,
+ pool_info.cell_size))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
@@ -955,7 +961,7 @@ static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
if (err)
return err;
- if (!devlink->ops || !devlink->ops->sb_pool_get)
+ if (!devlink->ops->sb_pool_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -1011,7 +1017,7 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
- !devlink->ops || !devlink->ops->sb_pool_get)
+ !devlink->ops->sb_pool_get)
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
@@ -1040,7 +1046,7 @@ static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
{
const struct devlink_ops *ops = devlink->ops;
- if (ops && ops->sb_pool_set)
+ if (ops->sb_pool_set)
return ops->sb_pool_set(devlink, sb_index, pool_index,
size, threshold_type);
return -EOPNOTSUPP;
@@ -1145,7 +1151,7 @@ static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
if (err)
return err;
- if (!devlink->ops || !devlink->ops->sb_port_pool_get)
+ if (!devlink->ops->sb_port_pool_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -1207,7 +1213,7 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
- !devlink->ops || !devlink->ops->sb_port_pool_get)
+ !devlink->ops->sb_port_pool_get)
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
@@ -1236,7 +1242,7 @@ static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
- if (ops && ops->sb_port_pool_set)
+ if (ops->sb_port_pool_set)
return ops->sb_port_pool_set(devlink_port, sb_index,
pool_index, threshold);
return -EOPNOTSUPP;
@@ -1349,7 +1355,7 @@ static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
if (err)
return err;
- if (!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+ if (!devlink->ops->sb_tc_pool_bind_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -1433,7 +1439,7 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
- !devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+ !devlink->ops->sb_tc_pool_bind_get)
continue;
mutex_lock(&devlink->lock);
@@ -1465,7 +1471,7 @@ static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
- if (ops && ops->sb_tc_pool_bind_set)
+ if (ops->sb_tc_pool_bind_set)
return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
tc_index, pool_type,
pool_index, threshold);
@@ -1513,7 +1519,7 @@ static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
struct devlink_sb *devlink_sb = info->user_ptr[1];
const struct devlink_ops *ops = devlink->ops;
- if (ops && ops->sb_occ_snapshot)
+ if (ops->sb_occ_snapshot)
return ops->sb_occ_snapshot(devlink, devlink_sb->index);
return -EOPNOTSUPP;
}
@@ -1525,7 +1531,7 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
struct devlink_sb *devlink_sb = info->user_ptr[1];
const struct devlink_ops *ops = devlink->ops;
- if (ops && ops->sb_occ_max_clear)
+ if (ops->sb_occ_max_clear)
return ops->sb_occ_max_clear(devlink, devlink_sb->index);
return -EOPNOTSUPP;
}
@@ -1588,13 +1594,9 @@ static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
- const struct devlink_ops *ops = devlink->ops;
struct sk_buff *msg;
int err;
- if (!ops)
- return -EOPNOTSUPP;
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -1619,9 +1621,6 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
int err = 0;
u16 mode;
- if (!ops)
- return -EOPNOTSUPP;
-
if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
if (!ops->eswitch_mode_set)
return -EOPNOTSUPP;
@@ -2656,6 +2655,27 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return devlink->ops->reload(devlink, info->extack);
}
+static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ const char *file_name, *component;
+ struct nlattr *nla_component;
+
+ if (!devlink->ops->flash_update)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME])
+ return -EINVAL;
+ file_name = nla_data(info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]);
+
+ nla_component = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT];
+ component = nla_component ? nla_data(nla_component) : NULL;
+
+ return devlink->ops->flash_update(devlink, file_name, component,
+ info->extack);
+}
+
static const struct devlink_param devlink_param_generic[] = {
{
.id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
@@ -2843,11 +2863,13 @@ nla_put_failure:
}
static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
+ unsigned int port_index,
struct devlink_param_item *param_item,
enum devlink_command cmd,
u32 portid, u32 seq, int flags)
{
union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
+ bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
const struct devlink_param *param = param_item->param;
struct devlink_param_gset_ctx ctx;
struct nlattr *param_values_list;
@@ -2866,12 +2888,15 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
return -EOPNOTSUPP;
param_value[i] = param_item->driverinit_value;
} else {
+ if (!param_item->published)
+ continue;
ctx.cmode = i;
err = devlink_param_get(devlink, param, &ctx);
if (err)
return err;
param_value[i] = ctx.val;
}
+ param_value_set[i] = true;
}
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
@@ -2880,6 +2905,13 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
if (devlink_nl_put_handle(msg, devlink))
goto genlmsg_cancel;
+
+ if (cmd == DEVLINK_CMD_PORT_PARAM_GET ||
+ cmd == DEVLINK_CMD_PORT_PARAM_NEW ||
+ cmd == DEVLINK_CMD_PORT_PARAM_DEL)
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
+ goto genlmsg_cancel;
+
param_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM);
if (!param_attr)
goto genlmsg_cancel;
@@ -2899,7 +2931,7 @@ static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
goto param_nest_cancel;
for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
- if (!devlink_param_cmode_is_supported(param, i))
+ if (!param_value_set[i])
continue;
err = devlink_nl_param_value_fill_one(msg, param->type,
i, param_value[i]);
@@ -2922,18 +2954,22 @@ genlmsg_cancel:
}
static void devlink_param_notify(struct devlink *devlink,
+ unsigned int port_index,
struct devlink_param_item *param_item,
enum devlink_command cmd)
{
struct sk_buff *msg;
int err;
- WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL);
+ WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
+ cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
+ cmd != DEVLINK_CMD_PORT_PARAM_DEL);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- err = devlink_nl_param_fill(msg, devlink, param_item, cmd, 0, 0, 0);
+ err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
+ 0, 0, 0);
if (err) {
nlmsg_free(msg);
return;
@@ -2962,7 +2998,7 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
idx++;
continue;
}
- err = devlink_nl_param_fill(msg, devlink, param_item,
+ err = devlink_nl_param_fill(msg, devlink, 0, param_item,
DEVLINK_CMD_PARAM_GET,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
@@ -3051,7 +3087,7 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
}
static struct devlink_param_item *
-devlink_param_get_from_info(struct devlink *devlink,
+devlink_param_get_from_info(struct list_head *param_list,
struct genl_info *info)
{
char *param_name;
@@ -3060,7 +3096,7 @@ devlink_param_get_from_info(struct devlink *devlink,
return NULL;
param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
- return devlink_param_find_by_name(&devlink->param_list, param_name);
+ return devlink_param_find_by_name(param_list, param_name);
}
static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
@@ -3071,7 +3107,7 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
struct sk_buff *msg;
int err;
- param_item = devlink_param_get_from_info(devlink, info);
+ param_item = devlink_param_get_from_info(&devlink->param_list, info);
if (!param_item)
return -EINVAL;
@@ -3079,7 +3115,7 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
if (!msg)
return -ENOMEM;
- err = devlink_nl_param_fill(msg, devlink, param_item,
+ err = devlink_nl_param_fill(msg, devlink, 0, param_item,
DEVLINK_CMD_PARAM_GET,
info->snd_portid, info->snd_seq, 0);
if (err) {
@@ -3090,10 +3126,12 @@ static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list,
+ struct genl_info *info,
+ enum devlink_command cmd)
{
- struct devlink *devlink = info->user_ptr[0];
enum devlink_param_type param_type;
struct devlink_param_gset_ctx ctx;
enum devlink_param_cmode cmode;
@@ -3102,7 +3140,7 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
union devlink_param_value value;
int err = 0;
- param_item = devlink_param_get_from_info(devlink, info);
+ param_item = devlink_param_get_from_info(param_list, info);
if (!param_item)
return -EINVAL;
param = param_item->param;
@@ -3142,17 +3180,28 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
return err;
}
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ devlink_param_notify(devlink, port_index, param_item, cmd);
return 0;
}
+static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+
+ return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->param_list,
+ info, DEVLINK_CMD_PARAM_NEW);
+}
+
static int devlink_param_register_one(struct devlink *devlink,
- const struct devlink_param *param)
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *param,
+ enum devlink_command cmd)
{
struct devlink_param_item *param_item;
- if (devlink_param_find_by_name(&devlink->param_list,
- param->name))
+ if (devlink_param_find_by_name(param_list, param->name))
return -EEXIST;
if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
@@ -3165,24 +3214,111 @@ static int devlink_param_register_one(struct devlink *devlink,
return -ENOMEM;
param_item->param = param;
- list_add_tail(&param_item->list, &devlink->param_list);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ list_add_tail(&param_item->list, param_list);
+ devlink_param_notify(devlink, port_index, param_item, cmd);
return 0;
}
static void devlink_param_unregister_one(struct devlink *devlink,
- const struct devlink_param *param)
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *param,
+ enum devlink_command cmd)
{
struct devlink_param_item *param_item;
- param_item = devlink_param_find_by_name(&devlink->param_list,
- param->name);
+ param_item = devlink_param_find_by_name(param_list, param->name);
WARN_ON(!param_item);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_DEL);
+ devlink_param_notify(devlink, port_index, param_item, cmd);
list_del(&param_item->list);
kfree(param_item);
}
+static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_param_item *param_item;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(devlink_port, &devlink->port_list, list) {
+ list_for_each_entry(param_item,
+ &devlink_port->param_list, list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_param_fill(msg,
+ devlink_port->devlink,
+ devlink_port->index, param_item,
+ DEVLINK_CMD_PORT_PARAM_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[0];
+ struct devlink_param_item *param_item;
+ struct sk_buff *msg;
+ int err;
+
+ param_item = devlink_param_get_from_info(&devlink_port->param_list,
+ info);
+ if (!param_item)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_param_fill(msg, devlink_port->devlink,
+ devlink_port->index, param_item,
+ DEVLINK_CMD_PORT_PARAM_GET,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[0];
+
+ return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list, info,
+ DEVLINK_CMD_PORT_PARAM_NEW);
+}
+
static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
struct devlink *devlink,
struct devlink_snapshot *snapshot)
@@ -3504,44 +3640,56 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
u64 ret_offset, start_offset, end_offset = 0;
- struct nlattr *attrs[DEVLINK_ATTR_MAX + 1];
const struct genl_ops *ops = cb->data;
struct devlink_region *region;
struct nlattr *chunks_attr;
const char *region_name;
struct devlink *devlink;
+ struct nlattr **attrs;
bool dump = true;
void *hdr;
int err;
start_offset = *((u64 *)&cb->args[0]);
+ attrs = kmalloc_array(DEVLINK_ATTR_MAX + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
attrs, DEVLINK_ATTR_MAX, ops->policy, cb->extack);
if (err)
- goto out;
+ goto out_free;
+ mutex_lock(&devlink_mutex);
devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
- if (IS_ERR(devlink))
- goto out;
+ if (IS_ERR(devlink)) {
+ err = PTR_ERR(devlink);
+ goto out_dev;
+ }
- mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
- !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID])
+ !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) {
+ err = -EINVAL;
goto out_unlock;
+ }
region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
region = devlink_region_get_by_name(devlink, region_name);
- if (!region)
+ if (!region) {
+ err = -EINVAL;
goto out_unlock;
+ }
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
DEVLINK_CMD_REGION_READ);
- if (!hdr)
+ if (!hdr) {
+ err = -EMSGSIZE;
goto out_unlock;
+ }
err = devlink_nl_put_handle(skb, devlink);
if (err)
@@ -3552,8 +3700,10 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto nla_put_failure;
chunks_attr = nla_nest_start(skb, DEVLINK_ATTR_REGION_CHUNKS);
- if (!chunks_attr)
+ if (!chunks_attr) {
+ err = -EMSGSIZE;
goto nla_put_failure;
+ }
if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
@@ -3576,8 +3726,10 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto nla_put_failure;
/* Check if there was any progress done to prevent infinite loop */
- if (ret_offset == start_offset)
+ if (ret_offset == start_offset) {
+ err = -EINVAL;
goto nla_put_failure;
+ }
*((u64 *)&cb->args[0]) = ret_offset;
@@ -3585,6 +3737,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
genlmsg_end(skb, hdr);
mutex_unlock(&devlink->lock);
mutex_unlock(&devlink_mutex);
+ kfree(attrs);
return skb->len;
@@ -3592,8 +3745,1144 @@ nla_put_failure:
genlmsg_cancel(skb, hdr);
out_unlock:
mutex_unlock(&devlink->lock);
+out_dev:
+ mutex_unlock(&devlink_mutex);
+out_free:
+ kfree(attrs);
+ return err;
+}
+
+struct devlink_info_req {
+ struct sk_buff *msg;
+};
+
+int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
+{
+ return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name);
+}
+EXPORT_SYMBOL_GPL(devlink_info_driver_name_put);
+
+int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
+{
+ return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
+}
+EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
+
+static int devlink_info_version_put(struct devlink_info_req *req, int attr,
+ const char *version_name,
+ const char *version_value)
+{
+ struct nlattr *nest;
+ int err;
+
+ nest = nla_nest_start(req->msg, attr);
+ if (!nest)
+ return -EMSGSIZE;
+
+ err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
+ version_name);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
+ version_value);
+ if (err)
+ goto nla_put_failure;
+
+ nla_nest_end(req->msg, nest);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(req->msg, nest);
+ return err;
+}
+
+int devlink_info_version_fixed_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
+ version_name, version_value);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
+
+int devlink_info_version_stored_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
+ version_name, version_value);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
+
+int devlink_info_version_running_put(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value)
+{
+ return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
+ version_name, version_value);
+}
+EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
+
+static int
+devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags, struct netlink_ext_ack *extack)
+{
+ struct devlink_info_req req;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto err_cancel_msg;
+
+ req.msg = msg;
+ err = devlink->ops->info_get(devlink, &req, extack);
+ if (err)
+ goto err_cancel_msg;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static int devlink_nl_cmd_info_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ if (!devlink->ops->info_get)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+
+ mutex_lock(&devlink->lock);
+ err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->extack);
+ mutex_unlock(&devlink->lock);
+ if (err)
+ break;
+ idx++;
+ }
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+struct devlink_fmsg_item {
+ struct list_head list;
+ int attrtype;
+ u8 nla_type;
+ u16 len;
+ int value[0];
+};
+
+struct devlink_fmsg {
+ struct list_head item_list;
+};
+
+static struct devlink_fmsg *devlink_fmsg_alloc(void)
+{
+ struct devlink_fmsg *fmsg;
+
+ fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL);
+ if (!fmsg)
+ return NULL;
+
+ INIT_LIST_HEAD(&fmsg->item_list);
+
+ return fmsg;
+}
+
+static void devlink_fmsg_free(struct devlink_fmsg *fmsg)
+{
+ struct devlink_fmsg_item *item, *tmp;
+
+ list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) {
+ list_del(&item->list);
+ kfree(item);
+ }
+ kfree(fmsg);
+}
+
+static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
+ int attrtype)
+{
+ struct devlink_fmsg_item *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->attrtype = attrtype;
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
+
+static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
+}
+
+int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
+
+#define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN)
+
+static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
+{
+ struct devlink_fmsg_item *item;
+
+ if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
+ return -EMSGSIZE;
+
+ item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->nla_type = NLA_NUL_STRING;
+ item->len = strlen(name) + 1;
+ item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME;
+ memcpy(&item->value, name, item->len);
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_put_name(fmsg, name);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
+
+int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ return devlink_fmsg_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
+
+int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start);
+
+int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
+
+static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
+ const void *value, u16 value_len,
+ u8 value_nla_type)
+{
+ struct devlink_fmsg_item *item;
+
+ if (value_len > DEVLINK_FMSG_MAX_SIZE)
+ return -EMSGSIZE;
+
+ item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ item->nla_type = value_nla_type;
+ item->len = value_len;
+ item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+ memcpy(&item->value, value, item->len);
+ list_add_tail(&item->list, &fmsg->item_list);
+
+ return 0;
+}
+
+int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_bool_put);
+
+int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u8_put);
+
+int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
+
+int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
+{
+ return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u64_put);
+
+int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
+{
+ return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
+ NLA_NUL_STRING);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
+
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len)
+{
+ return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
+
+int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_bool_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put);
+
+int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put);
+
+int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put);
+
+int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put);
+
+int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_put(fmsg, value);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
+
+int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u16 value_len)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_binary_put(fmsg, value, value_len);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
+
+static int
+devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+{
+ switch (msg->nla_type) {
+ case NLA_FLAG:
+ case NLA_U8:
+ case NLA_U32:
+ case NLA_U64:
+ case NLA_NUL_STRING:
+ case NLA_BINARY:
+ return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
+ msg->nla_type);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+{
+ int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+ u8 tmp;
+
+ switch (msg->nla_type) {
+ case NLA_FLAG:
+ /* Always provide flag data, regardless of its value */
+ tmp = *(bool *) msg->value;
+
+ return nla_put_u8(skb, attrtype, tmp);
+ case NLA_U8:
+ return nla_put_u8(skb, attrtype, *(u8 *) msg->value);
+ case NLA_U32:
+ return nla_put_u32(skb, attrtype, *(u32 *) msg->value);
+ case NLA_U64:
+ return nla_put_u64_64bit(skb, attrtype, *(u64 *) msg->value,
+ DEVLINK_ATTR_PAD);
+ case NLA_NUL_STRING:
+ return nla_put_string(skb, attrtype, (char *) &msg->value);
+ case NLA_BINARY:
+ return nla_put(skb, attrtype, msg->len, (void *) &msg->value);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
+ int *start)
+{
+ struct devlink_fmsg_item *item;
+ struct nlattr *fmsg_nlattr;
+ int i = 0;
+ int err;
+
+ fmsg_nlattr = nla_nest_start(skb, DEVLINK_ATTR_FMSG);
+ if (!fmsg_nlattr)
+ return -EMSGSIZE;
+
+ list_for_each_entry(item, &fmsg->item_list, list) {
+ if (i < *start) {
+ i++;
+ continue;
+ }
+
+ switch (item->attrtype) {
+ case DEVLINK_ATTR_FMSG_OBJ_NEST_START:
+ case DEVLINK_ATTR_FMSG_PAIR_NEST_START:
+ case DEVLINK_ATTR_FMSG_ARR_NEST_START:
+ case DEVLINK_ATTR_FMSG_NEST_END:
+ err = nla_put_flag(skb, item->attrtype);
+ break;
+ case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA:
+ err = devlink_fmsg_item_fill_type(item, skb);
+ if (err)
+ break;
+ err = devlink_fmsg_item_fill_data(item, skb);
+ break;
+ case DEVLINK_ATTR_FMSG_OBJ_NAME:
+ err = nla_put_string(skb, item->attrtype,
+ (char *) &item->value);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (!err)
+ *start = ++i;
+ else
+ break;
+ }
+
+ nla_nest_end(skb, fmsg_nlattr);
+ return err;
+}
+
+static int devlink_fmsg_snd(struct devlink_fmsg *fmsg,
+ struct genl_info *info,
+ enum devlink_command cmd, int flags)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ bool last = false;
+ int index = 0;
+ void *hdr;
+ int err;
+
+ while (!last) {
+ int tmp_index = index;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, flags | NLM_F_MULTI, cmd);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
+ if (!err)
+ last = true;
+ else if (err != -EMSGSIZE || tmp_index == index)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, hdr);
+ err = genlmsg_reply(skb, info);
+ if (err)
+ return err;
+ }
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ return genlmsg_reply(skb, info);
+
+nla_put_failure:
+ nlmsg_free(skb);
+ return err;
+}
+
+struct devlink_health_reporter {
+ struct list_head list;
+ void *priv;
+ const struct devlink_health_reporter_ops *ops;
+ struct devlink *devlink;
+ struct devlink_fmsg *dump_fmsg;
+ struct mutex dump_lock; /* lock parallel read/write from dump buffers */
+ u64 graceful_period;
+ bool auto_recover;
+ u8 health_state;
+ u64 dump_ts;
+ u64 error_count;
+ u64 recovery_count;
+ u64 last_recovery_ts;
+};
+
+void *
+devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
+{
+ return reporter->priv;
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
+
+static struct devlink_health_reporter *
+devlink_health_reporter_find_by_name(struct devlink *devlink,
+ const char *reporter_name)
+{
+ struct devlink_health_reporter *reporter;
+
+ list_for_each_entry(reporter, &devlink->reporter_list, list)
+ if (!strcmp(reporter->ops->name, reporter_name))
+ return reporter;
+ return NULL;
+}
+
+/**
+ * devlink_health_reporter_create - create devlink health reporter
+ *
+ * @devlink: devlink
+ * @ops: ops
+ * @graceful_period: to avoid recovery loops, in msecs
+ * @auto_recover: auto recover when error occurs
+ * @priv: priv
+ */
+struct devlink_health_reporter *
+devlink_health_reporter_create(struct devlink *devlink,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, bool auto_recover,
+ void *priv)
+{
+ struct devlink_health_reporter *reporter;
+
+ mutex_lock(&devlink->lock);
+ if (devlink_health_reporter_find_by_name(devlink, ops->name)) {
+ reporter = ERR_PTR(-EEXIST);
+ goto unlock;
+ }
+
+ if (WARN_ON(auto_recover && !ops->recover) ||
+ WARN_ON(graceful_period && !ops->recover)) {
+ reporter = ERR_PTR(-EINVAL);
+ goto unlock;
+ }
+
+ reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
+ if (!reporter) {
+ reporter = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ reporter->priv = priv;
+ reporter->ops = ops;
+ reporter->devlink = devlink;
+ reporter->graceful_period = graceful_period;
+ reporter->auto_recover = auto_recover;
+ mutex_init(&reporter->dump_lock);
+ list_add_tail(&reporter->list, &devlink->reporter_list);
+unlock:
+ mutex_unlock(&devlink->lock);
+ return reporter;
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
+
+/**
+ * devlink_health_reporter_destroy - destroy devlink health reporter
+ *
+ * @reporter: devlink health reporter to destroy
+ */
+void
+devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
+{
+ mutex_lock(&reporter->devlink->lock);
+ list_del(&reporter->list);
+ mutex_unlock(&reporter->devlink->lock);
+ if (reporter->dump_fmsg)
+ devlink_fmsg_free(reporter->dump_fmsg);
+ kfree(reporter);
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
+
+void
+devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
+ enum devlink_health_reporter_state state)
+{
+ if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY &&
+ state != DEVLINK_HEALTH_REPORTER_STATE_ERROR))
+ return;
+
+ if (reporter->health_state == state)
+ return;
+
+ reporter->health_state = state;
+ trace_devlink_health_reporter_state_update(reporter->devlink,
+ reporter->ops->name, state);
+}
+EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
+
+static int
+devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ int err;
+
+ if (!reporter->ops->recover)
+ return -EOPNOTSUPP;
+
+ err = reporter->ops->recover(reporter, priv_ctx);
+ if (err)
+ return err;
+
+ reporter->recovery_count++;
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ reporter->last_recovery_ts = jiffies;
+
+ return 0;
+}
+
+static void
+devlink_health_dump_clear(struct devlink_health_reporter *reporter)
+{
+ if (!reporter->dump_fmsg)
+ return;
+ devlink_fmsg_free(reporter->dump_fmsg);
+ reporter->dump_fmsg = NULL;
+}
+
+static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ int err;
+
+ if (!reporter->ops->dump)
+ return 0;
+
+ if (reporter->dump_fmsg)
+ return 0;
+
+ reporter->dump_fmsg = devlink_fmsg_alloc();
+ if (!reporter->dump_fmsg) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ err = devlink_fmsg_obj_nest_start(reporter->dump_fmsg);
+ if (err)
+ goto dump_err;
+
+ err = reporter->ops->dump(reporter, reporter->dump_fmsg,
+ priv_ctx);
+ if (err)
+ goto dump_err;
+
+ err = devlink_fmsg_obj_nest_end(reporter->dump_fmsg);
+ if (err)
+ goto dump_err;
+
+ reporter->dump_ts = jiffies;
+
+ return 0;
+
+dump_err:
+ devlink_health_dump_clear(reporter);
+ return err;
+}
+
+int devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ enum devlink_health_reporter_state prev_health_state;
+ struct devlink *devlink = reporter->devlink;
+
+ /* write a log message of the current error */
+ WARN_ON(!msg);
+ trace_devlink_health_report(devlink, reporter->ops->name, msg);
+ reporter->error_count++;
+ prev_health_state = reporter->health_state;
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+
+ /* abort if the previous error wasn't recovered */
+ if (reporter->auto_recover &&
+ (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
+ jiffies - reporter->last_recovery_ts <
+ msecs_to_jiffies(reporter->graceful_period))) {
+ trace_devlink_health_recover_aborted(devlink,
+ reporter->ops->name,
+ reporter->health_state,
+ jiffies -
+ reporter->last_recovery_ts);
+ return -ECANCELED;
+ }
+
+ reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+
+ mutex_lock(&reporter->dump_lock);
+ /* store current dump of current error, for later analysis */
+ devlink_health_do_dump(reporter, priv_ctx);
+ mutex_unlock(&reporter->dump_lock);
+
+ if (reporter->auto_recover)
+ return devlink_health_reporter_recover(reporter, priv_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_health_report);
+
+static struct devlink_health_reporter *
+devlink_health_reporter_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ char *reporter_name;
+
+ if (!info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
+ return NULL;
+
+ reporter_name =
+ nla_data(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
+ return devlink_health_reporter_find_by_name(devlink, reporter_name);
+}
+
+static int
+devlink_nl_health_reporter_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_health_reporter *reporter,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ struct nlattr *reporter_attr;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto genlmsg_cancel;
+
+ reporter_attr = nla_nest_start(msg, DEVLINK_ATTR_HEALTH_REPORTER);
+ if (!reporter_attr)
+ goto genlmsg_cancel;
+ if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ reporter->ops->name))
+ goto reporter_nest_cancel;
+ if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
+ reporter->health_state))
+ goto reporter_nest_cancel;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
+ reporter->error_count, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
+ reporter->recovery_count, DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->ops->recover &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
+ reporter->graceful_period,
+ DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+ if (reporter->ops->recover &&
+ nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
+ reporter->auto_recover))
+ goto reporter_nest_cancel;
+ if (reporter->dump_fmsg &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
+ jiffies_to_msecs(reporter->dump_ts),
+ DEVLINK_ATTR_PAD))
+ goto reporter_nest_cancel;
+
+ nla_nest_end(msg, reporter_attr);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+reporter_nest_cancel:
+ nla_nest_end(msg, reporter_attr);
+genlmsg_cancel:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ struct sk_buff *msg;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_health_reporter_fill(msg, devlink, reporter,
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ info->snd_portid, info->snd_seq,
+ 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink_health_reporter *reporter;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(reporter, &devlink->reporter_list,
+ list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_health_reporter_fill(msg, devlink,
+ reporter,
+ DEVLINK_CMD_HEALTH_REPORTER_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int
+devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->recover &&
+ (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]))
+ return -EOPNOTSUPP;
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
+ reporter->graceful_period =
+ nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
+ reporter->auto_recover =
+ nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
+
+ return 0;
+}
+
+static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ return devlink_health_reporter_recover(reporter, NULL);
+}
+
+static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ struct devlink_fmsg *fmsg;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->diagnose)
+ return -EOPNOTSUPP;
+
+ fmsg = devlink_fmsg_alloc();
+ if (!fmsg)
+ return -ENOMEM;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ goto out;
+
+ err = reporter->ops->diagnose(reporter, fmsg);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_snd(fmsg, info,
+ DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0);
+
+out:
+ devlink_fmsg_free(fmsg);
+ return err;
+}
+
+static int devlink_nl_cmd_health_reporter_dump_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->dump)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&reporter->dump_lock);
+ err = devlink_health_do_dump(reporter, NULL);
+ if (err)
+ goto out;
+
+ err = devlink_fmsg_snd(reporter->dump_fmsg, info,
+ DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET, 0);
+
out:
+ mutex_unlock(&reporter->dump_lock);
+ return err;
+}
+
+static int
+devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->dump)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&reporter->dump_lock);
+ devlink_health_dump_clear(reporter);
+ mutex_unlock(&reporter->dump_lock);
return 0;
}
@@ -3622,6 +4911,11 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
};
static const struct genl_ops devlink_nl_ops[] = {
@@ -3821,6 +5115,21 @@ static const struct genl_ops devlink_nl_ops[] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
+ .cmd = DEVLINK_CMD_PORT_PARAM_GET,
+ .doit = devlink_nl_cmd_port_param_get_doit,
+ .dumpit = devlink_nl_cmd_port_param_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_PARAM_SET,
+ .doit = devlink_nl_cmd_port_param_set_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
.cmd = DEVLINK_CMD_REGION_GET,
.doit = devlink_nl_cmd_region_get_doit,
.dumpit = devlink_nl_cmd_region_get_dumpit,
@@ -3842,6 +5151,66 @@ static const struct genl_ops devlink_nl_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
+ {
+ .cmd = DEVLINK_CMD_INFO_GET,
+ .doit = devlink_nl_cmd_info_get_doit,
+ .dumpit = devlink_nl_cmd_info_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
+ .doit = devlink_nl_cmd_health_reporter_get_doit,
+ .dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
+ .policy = devlink_nl_policy,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
+ .doit = devlink_nl_cmd_health_reporter_set_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
+ .doit = devlink_nl_cmd_health_reporter_recover_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
+ .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
+ .doit = devlink_nl_cmd_health_reporter_dump_get_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
+ .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
+ },
+ {
+ .cmd = DEVLINK_CMD_FLASH_UPDATE,
+ .doit = devlink_nl_cmd_flash_update,
+ .policy = devlink_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
};
static struct genl_family devlink_nl_family __ro_after_init = {
@@ -3871,6 +5240,9 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
{
struct devlink *devlink;
+ if (WARN_ON(!ops))
+ return NULL;
+
devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
if (!devlink)
return NULL;
@@ -3882,6 +5254,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&devlink->resource_list);
INIT_LIST_HEAD(&devlink->param_list);
INIT_LIST_HEAD(&devlink->region_list);
+ INIT_LIST_HEAD(&devlink->reporter_list);
mutex_init(&devlink->lock);
return devlink;
}
@@ -3891,6 +5264,7 @@ EXPORT_SYMBOL_GPL(devlink_alloc);
* devlink_register - Register devlink instance
*
* @devlink: devlink
+ * @dev: parent device
*/
int devlink_register(struct devlink *devlink, struct device *dev)
{
@@ -3924,6 +5298,14 @@ EXPORT_SYMBOL_GPL(devlink_unregister);
*/
void devlink_free(struct devlink *devlink)
{
+ WARN_ON(!list_empty(&devlink->reporter_list));
+ WARN_ON(!list_empty(&devlink->region_list));
+ WARN_ON(!list_empty(&devlink->param_list));
+ WARN_ON(!list_empty(&devlink->resource_list));
+ WARN_ON(!list_empty(&devlink->dpipe_table_list));
+ WARN_ON(!list_empty(&devlink->sb_list));
+ WARN_ON(!list_empty(&devlink->port_list));
+
kfree(devlink);
}
EXPORT_SYMBOL_GPL(devlink_free);
@@ -3933,7 +5315,7 @@ EXPORT_SYMBOL_GPL(devlink_free);
*
* @devlink: devlink
* @devlink_port: devlink port
- * @port_index
+ * @port_index: driver-specific numerical identifier of the port
*
* Register devlink port with provided port index. User can use
* any indexing, even hw-related one. devlink_port structure
@@ -3954,6 +5336,7 @@ int devlink_port_register(struct devlink *devlink,
devlink_port->index = port_index;
devlink_port->registered = true;
list_add_tail(&devlink_port->list, &devlink->port_list);
+ INIT_LIST_HEAD(&devlink_port->param_list);
mutex_unlock(&devlink->lock);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
return 0;
@@ -4262,13 +5645,10 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
*
* @devlink: devlink
* @resource_name: resource's name
- * @top_hierarchy: top hierarchy
- * @reload_required: reload is required for new configuration to
- * apply
* @resource_size: resource's size
* @resource_id: resource's id
- * @parent_reosurce_id: resource's parent id
- * @size params: size parameters
+ * @parent_resource_id: resource's parent id
+ * @size_params: size parameters
*/
int devlink_resource_register(struct devlink *devlink,
const char *resource_name,
@@ -4471,18 +5851,23 @@ out:
}
EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
-/**
- * devlink_params_register - register configuration parameters
- *
- * @devlink: devlink
- * @params: configuration parameters array
- * @params_count: number of parameters provided
- *
- * Register the configuration parameters supported by the driver.
- */
-int devlink_params_register(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
+static int devlink_param_verify(const struct devlink_param *param)
+{
+ if (!param || !param->name || !param->supported_cmodes)
+ return -EINVAL;
+ if (param->generic)
+ return devlink_param_generic_verify(param);
+ else
+ return devlink_param_driver_verify(param);
+}
+
+static int __devlink_params_register(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *params,
+ size_t params_count,
+ enum devlink_command reg_cmd,
+ enum devlink_command unreg_cmd)
{
const struct devlink_param *param = params;
int i;
@@ -4490,20 +5875,12 @@ int devlink_params_register(struct devlink *devlink,
mutex_lock(&devlink->lock);
for (i = 0; i < params_count; i++, param++) {
- if (!param || !param->name || !param->supported_cmodes) {
- err = -EINVAL;
+ err = devlink_param_verify(param);
+ if (err)
goto rollback;
- }
- if (param->generic) {
- err = devlink_param_generic_verify(param);
- if (err)
- goto rollback;
- } else {
- err = devlink_param_driver_verify(param);
- if (err)
- goto rollback;
- }
- err = devlink_param_register_one(devlink, param);
+
+ err = devlink_param_register_one(devlink, port_index,
+ param_list, param, reg_cmd);
if (err)
goto rollback;
}
@@ -4515,11 +5892,48 @@ rollback:
if (!i)
goto unlock;
for (param--; i > 0; i--, param--)
- devlink_param_unregister_one(devlink, param);
+ devlink_param_unregister_one(devlink, port_index, param_list,
+ param, unreg_cmd);
unlock:
mutex_unlock(&devlink->lock);
return err;
}
+
+static void __devlink_params_unregister(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list,
+ const struct devlink_param *params,
+ size_t params_count,
+ enum devlink_command cmd)
+{
+ const struct devlink_param *param = params;
+ int i;
+
+ mutex_lock(&devlink->lock);
+ for (i = 0; i < params_count; i++, param++)
+ devlink_param_unregister_one(devlink, 0, param_list, param,
+ cmd);
+ mutex_unlock(&devlink->lock);
+}
+
+/**
+ * devlink_params_register - register configuration parameters
+ *
+ * @devlink: devlink
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ *
+ * Register the configuration parameters supported by the driver.
+ */
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return __devlink_params_register(devlink, 0, &devlink->param_list,
+ params, params_count,
+ DEVLINK_CMD_PARAM_NEW,
+ DEVLINK_CMD_PARAM_DEL);
+}
EXPORT_SYMBOL_GPL(devlink_params_register);
/**
@@ -4532,36 +5946,103 @@ void devlink_params_unregister(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count)
{
- const struct devlink_param *param = params;
- int i;
-
- mutex_lock(&devlink->lock);
- for (i = 0; i < params_count; i++, param++)
- devlink_param_unregister_one(devlink, param);
- mutex_unlock(&devlink->lock);
+ return __devlink_params_unregister(devlink, 0, &devlink->param_list,
+ params, params_count,
+ DEVLINK_CMD_PARAM_DEL);
}
EXPORT_SYMBOL_GPL(devlink_params_unregister);
/**
- * devlink_param_driverinit_value_get - get configuration parameter
- * value for driver initializing
+ * devlink_params_publish - publish configuration parameters
*
* @devlink: devlink
- * @param_id: parameter ID
- * @init_val: value of parameter in driverinit configuration mode
*
- * This function should be used by the driver to get driverinit
- * configuration for initialization after reload command.
+ * Publish previously registered configuration parameters.
*/
-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val)
+void devlink_params_publish(struct devlink *devlink)
{
struct devlink_param_item *param_item;
- if (!devlink->ops || !devlink->ops->reload)
- return -EOPNOTSUPP;
+ list_for_each_entry(param_item, &devlink->param_list, list) {
+ if (param_item->published)
+ continue;
+ param_item->published = true;
+ devlink_param_notify(devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_NEW);
+ }
+}
+EXPORT_SYMBOL_GPL(devlink_params_publish);
- param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+/**
+ * devlink_params_unpublish - unpublish configuration parameters
+ *
+ * @devlink: devlink
+ *
+ * Unpublish previously registered configuration parameters.
+ */
+void devlink_params_unpublish(struct devlink *devlink)
+{
+ struct devlink_param_item *param_item;
+
+ list_for_each_entry(param_item, &devlink->param_list, list) {
+ if (!param_item->published)
+ continue;
+ param_item->published = false;
+ devlink_param_notify(devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_DEL);
+ }
+}
+EXPORT_SYMBOL_GPL(devlink_params_unpublish);
+
+/**
+ * devlink_port_params_register - register port configuration parameters
+ *
+ * @devlink_port: devlink port
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ *
+ * Register the configuration parameters supported by the port.
+ */
+int devlink_port_params_register(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return __devlink_params_register(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list, params,
+ params_count,
+ DEVLINK_CMD_PORT_PARAM_NEW,
+ DEVLINK_CMD_PORT_PARAM_DEL);
+}
+EXPORT_SYMBOL_GPL(devlink_port_params_register);
+
+/**
+ * devlink_port_params_unregister - unregister port configuration
+ * parameters
+ *
+ * @devlink_port: devlink port
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ */
+void devlink_port_params_unregister(struct devlink_port *devlink_port,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return __devlink_params_unregister(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list,
+ params, params_count,
+ DEVLINK_CMD_PORT_PARAM_DEL);
+}
+EXPORT_SYMBOL_GPL(devlink_port_params_unregister);
+
+static int
+__devlink_param_driverinit_value_get(struct list_head *param_list, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(param_list, param_id);
if (!param_item)
return -EINVAL;
@@ -4577,6 +6058,54 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
return 0;
}
+
+static int
+__devlink_param_driverinit_value_set(struct devlink *devlink,
+ unsigned int port_index,
+ struct list_head *param_list, u32 param_id,
+ union devlink_param_value init_val,
+ enum devlink_command cmd)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(param_list, param_id);
+ if (!param_item)
+ return -EINVAL;
+
+ if (!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT))
+ return -EOPNOTSUPP;
+
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, init_val.vstr);
+ else
+ param_item->driverinit_value = init_val;
+ param_item->driverinit_value_valid = true;
+
+ devlink_param_notify(devlink, port_index, param_item, cmd);
+ return 0;
+}
+
+/**
+ * devlink_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @init_val: value of parameter in driverinit configuration mode
+ *
+ * This function should be used by the driver to get driverinit
+ * configuration for initialization after reload command.
+ */
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ if (!devlink->ops->reload)
+ return -EOPNOTSUPP;
+
+ return __devlink_param_driverinit_value_get(&devlink->param_list,
+ param_id, init_val);
+}
EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
/**
@@ -4594,26 +6123,61 @@ EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val)
{
- struct devlink_param_item *param_item;
+ return __devlink_param_driverinit_value_set(devlink, 0,
+ &devlink->param_list,
+ param_id, init_val,
+ DEVLINK_CMD_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
- param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
- if (!param_item)
- return -EINVAL;
+/**
+ * devlink_port_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
+ *
+ * @devlink_port: devlink_port
+ * @param_id: parameter ID
+ * @init_val: value of parameter in driverinit configuration mode
+ *
+ * This function should be used by the driver to get driverinit
+ * configuration for initialization after reload command.
+ */
+int devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value *init_val)
+{
+ struct devlink *devlink = devlink_port->devlink;
- if (!devlink_param_cmode_is_supported(param_item->param,
- DEVLINK_PARAM_CMODE_DRIVERINIT))
+ if (!devlink->ops->reload)
return -EOPNOTSUPP;
- if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
- strcpy(param_item->driverinit_value.vstr, init_val.vstr);
- else
- param_item->driverinit_value = init_val;
- param_item->driverinit_value_valid = true;
+ return __devlink_param_driverinit_value_get(&devlink_port->param_list,
+ param_id, init_val);
+}
+EXPORT_SYMBOL_GPL(devlink_port_param_driverinit_value_get);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
- return 0;
+/**
+ * devlink_port_param_driverinit_value_set - set value of configuration
+ * parameter for driverinit
+ * configuration mode
+ *
+ * @devlink_port: devlink_port
+ * @param_id: parameter ID
+ * @init_val: value of parameter to set for driverinit configuration mode
+ *
+ * This function should be used by the driver to set driverinit
+ * configuration mode default value.
+ */
+int devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
+ u32 param_id,
+ union devlink_param_value init_val)
+{
+ return __devlink_param_driverinit_value_set(devlink_port->devlink,
+ devlink_port->index,
+ &devlink_port->param_list,
+ param_id, init_val,
+ DEVLINK_CMD_PORT_PARAM_NEW);
}
-EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
+EXPORT_SYMBOL_GPL(devlink_port_param_driverinit_value_set);
/**
* devlink_param_value_changed - notify devlink on a parameter's value
@@ -4626,7 +6190,6 @@ EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
* This function should be used by the driver to notify devlink on value
* change, excluding driverinit configuration mode.
* For driverinit configuration mode driver should use the function
- * devlink_param_driverinit_value_set() instead.
*/
void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
{
@@ -4635,11 +6198,38 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
WARN_ON(!param_item);
- devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
}
EXPORT_SYMBOL_GPL(devlink_param_value_changed);
/**
+ * devlink_port_param_value_changed - notify devlink on a parameter's value
+ * change. Should be called by the driver
+ * right after the change.
+ *
+ * @devlink_port: devlink_port
+ * @param_id: parameter ID
+ *
+ * This function should be used by the driver to notify devlink on value
+ * change, excluding driverinit configuration mode.
+ * For driverinit configuration mode driver should use the function
+ * devlink_port_param_driverinit_value_set() instead.
+ */
+void devlink_port_param_value_changed(struct devlink_port *devlink_port,
+ u32 param_id)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(&devlink_port->param_list,
+ param_id);
+ WARN_ON(!param_item);
+
+ devlink_param_notify(devlink_port->devlink, devlink_port->index,
+ param_item, DEVLINK_CMD_PORT_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devlink_port_param_value_changed);
+
+/**
* devlink_param_value_str_fill - Safely fill-up the string preventing
* from overflow of the preallocated buffer
*
@@ -4755,7 +6345,7 @@ EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get);
* Multiple snapshots can be created on a region.
* The @snapshot_id should be obtained using the getter function.
*
- * @devlink_region: devlink region of the snapshot
+ * @region: devlink region of the snapshot
* @data_len: size of snapshot data
* @data: snapshot data
* @snapshot_id: snapshot id to be created
@@ -4808,20 +6398,93 @@ unlock:
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
-static int __init devlink_module_init(void)
+static void __devlink_compat_running_version(struct devlink *devlink,
+ char *buf, size_t len)
{
- return genl_register_family(&devlink_nl_family);
+ const struct nlattr *nlattr;
+ struct devlink_info_req req;
+ struct sk_buff *msg;
+ int rem, err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ req.msg = msg;
+ err = devlink->ops->info_get(devlink, &req, NULL);
+ if (err)
+ goto free_msg;
+
+ nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
+ const struct nlattr *kv;
+ int rem_kv;
+
+ if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
+ continue;
+
+ nla_for_each_nested(kv, nlattr, rem_kv) {
+ if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
+ continue;
+
+ strlcat(buf, nla_data(kv), len);
+ strlcat(buf, " ", len);
+ }
+ }
+free_msg:
+ nlmsg_free(msg);
+}
+
+void devlink_compat_running_version(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct devlink *devlink;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ mutex_lock(&devlink_mutex);
+ devlink = netdev_to_devlink(dev);
+ if (!devlink || !devlink->ops->info_get)
+ goto unlock_list;
+
+ mutex_lock(&devlink->lock);
+ __devlink_compat_running_version(devlink, buf, len);
+ mutex_unlock(&devlink->lock);
+unlock_list:
+ mutex_unlock(&devlink_mutex);
+
+ rtnl_lock();
+ dev_put(dev);
}
-static void __exit devlink_module_exit(void)
+int devlink_compat_flash_update(struct net_device *dev, const char *file_name)
{
- genl_unregister_family(&devlink_nl_family);
+ struct devlink *devlink;
+ int ret = -EOPNOTSUPP;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ mutex_lock(&devlink_mutex);
+ devlink = netdev_to_devlink(dev);
+ if (!devlink || !devlink->ops->flash_update)
+ goto unlock_list;
+
+ mutex_lock(&devlink->lock);
+ ret = devlink->ops->flash_update(devlink, file_name, NULL, NULL);
+ mutex_unlock(&devlink->lock);
+unlock_list:
+ mutex_unlock(&devlink_mutex);
+
+ rtnl_lock();
+ dev_put(dev);
+
+ return ret;
}
-module_init(devlink_module_init);
-module_exit(devlink_module_exit);
+static int __init devlink_init(void)
+{
+ return genl_register_family(&devlink_nl_family);
+}
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
-MODULE_DESCRIPTION("Network physical device Netlink interface");
-MODULE_ALIAS_GENL_FAMILY(DEVLINK_GENL_NAME);
+subsys_initcall(devlink_init);
diff --git a/net/core/dst.c b/net/core/dst.c
index 81ccf20e2826..a263309df115 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -98,8 +98,12 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
struct dst_entry *dst;
if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
- if (ops->gc(ops))
+ if (ops->gc(ops)) {
+ printk_ratelimited(KERN_NOTICE "Route cache is full: "
+ "consider increasing sysctl "
+ "net.ipv[4|6].route.max_size.\n");
return NULL;
+ }
}
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 158264f7cfaf..d4918ffddda8 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -27,7 +27,9 @@
#include <linux/rtnetlink.h>
#include <linux/sched/signal.h>
#include <linux/net.h>
+#include <net/devlink.h>
#include <net/xdp_sock.h>
+#include <net/flow_offload.h>
/*
* Some useful ethtool_ops methods that're device independent.
@@ -803,6 +805,10 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
+ if (!info.fw_version[0])
+ devlink_compat_running_version(dev, info.fw_version,
+ sizeof(info.fw_version));
+
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
return 0;
@@ -1348,12 +1354,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
- regbuf = NULL;
- if (reglen) {
- regbuf = vzalloc(reglen);
- if (!regbuf)
- return -ENOMEM;
- }
+ regbuf = vzalloc(reglen);
+ if (!regbuf)
+ return -ENOMEM;
ops->get_regs(dev, &regs, regbuf);
@@ -1714,7 +1717,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+ struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM };
if (!dev->ethtool_ops->get_pauseparam)
return -EOPNOTSUPP;
@@ -2033,11 +2036,10 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
if (copy_from_user(&efl, useraddr, sizeof(efl)))
return -EFAULT;
+ efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
if (!dev->ethtool_ops->flash_device)
- return -EOPNOTSUPP;
-
- efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
+ return devlink_compat_flash_update(dev, efl.data);
return dev->ethtool_ops->flash_device(dev, &efl);
}
@@ -2501,7 +2503,7 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
+ struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM };
int rc;
if (!dev->ethtool_ops->get_fecparam)
@@ -2816,3 +2818,241 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return rc;
}
+
+struct ethtool_rx_flow_key {
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_ports tp;
+ struct flow_dissector_key_ip ip;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_eth_addrs eth_addrs;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct ethtool_rx_flow_match {
+ struct flow_dissector dissector;
+ struct ethtool_rx_flow_key key;
+ struct ethtool_rx_flow_key mask;
+};
+
+struct ethtool_rx_flow_rule *
+ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
+{
+ const struct ethtool_rx_flow_spec *fs = input->fs;
+ static struct in6_addr zero_addr = {};
+ struct ethtool_rx_flow_match *match;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_action_entry *act;
+
+ flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) +
+ sizeof(struct ethtool_rx_flow_match), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ /* ethtool_rx supports only one single action per rule. */
+ flow->rule = flow_rule_alloc(1);
+ if (!flow->rule) {
+ kfree(flow);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ match = (struct ethtool_rx_flow_match *)flow->priv;
+ flow->rule->match.dissector = &match->dissector;
+ flow->rule->match.mask = &match->mask;
+ flow->rule->match.key = &match->key;
+
+ match->mask.basic.n_proto = htons(0xffff);
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
+
+ match->key.basic.n_proto = htons(ETH_P_IP);
+
+ v4_spec = &fs->h_u.tcp_ip4_spec;
+ v4_m_spec = &fs->m_u.tcp_ip4_spec;
+
+ if (v4_m_spec->ip4src) {
+ match->key.ipv4.src = v4_spec->ip4src;
+ match->mask.ipv4.src = v4_m_spec->ip4src;
+ }
+ if (v4_m_spec->ip4dst) {
+ match->key.ipv4.dst = v4_spec->ip4dst;
+ match->mask.ipv4.dst = v4_m_spec->ip4dst;
+ }
+ if (v4_m_spec->ip4src ||
+ v4_m_spec->ip4dst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, ipv4);
+ }
+ if (v4_m_spec->psrc) {
+ match->key.tp.src = v4_spec->psrc;
+ match->mask.tp.src = v4_m_spec->psrc;
+ }
+ if (v4_m_spec->pdst) {
+ match->key.tp.dst = v4_spec->pdst;
+ match->mask.tp.dst = v4_m_spec->pdst;
+ }
+ if (v4_m_spec->psrc ||
+ v4_m_spec->pdst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
+ offsetof(struct ethtool_rx_flow_key, tp);
+ }
+ if (v4_m_spec->tos) {
+ match->key.ip.tos = v4_spec->tos;
+ match->mask.ip.tos = v4_m_spec->tos;
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IP);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
+ offsetof(struct ethtool_rx_flow_key, ip);
+ }
+ }
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW: {
+ const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
+
+ match->key.basic.n_proto = htons(ETH_P_IPV6);
+
+ v6_spec = &fs->h_u.tcp_ip6_spec;
+ v6_m_spec = &fs->m_u.tcp_ip6_spec;
+ if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ memcpy(&match->key.ipv6.src, v6_spec->ip6src,
+ sizeof(match->key.ipv6.src));
+ memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src,
+ sizeof(match->mask.ipv6.src));
+ }
+ if (memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
+ memcpy(&match->key.ipv6.dst, v6_spec->ip6dst,
+ sizeof(match->key.ipv6.dst));
+ memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst,
+ sizeof(match->mask.ipv6.dst));
+ }
+ if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
+ memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, ipv6);
+ }
+ if (v6_m_spec->psrc) {
+ match->key.tp.src = v6_spec->psrc;
+ match->mask.tp.src = v6_m_spec->psrc;
+ }
+ if (v6_m_spec->pdst) {
+ match->key.tp.dst = v6_spec->pdst;
+ match->mask.tp.dst = v6_m_spec->pdst;
+ }
+ if (v6_m_spec->psrc ||
+ v6_m_spec->pdst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
+ offsetof(struct ethtool_rx_flow_key, tp);
+ }
+ if (v6_m_spec->tclass) {
+ match->key.ip.tos = v6_spec->tclass;
+ match->mask.ip.tos = v6_m_spec->tclass;
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IP);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
+ offsetof(struct ethtool_rx_flow_key, ip);
+ }
+ }
+ break;
+ default:
+ ethtool_rx_flow_rule_destroy(flow);
+ return ERR_PTR(-EINVAL);
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ match->key.basic.ip_proto = IPPROTO_TCP;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ match->key.basic.ip_proto = IPPROTO_UDP;
+ break;
+ }
+ match->mask.basic.ip_proto = 0xff;
+
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] =
+ offsetof(struct ethtool_rx_flow_key, basic);
+
+ if (fs->flow_type & FLOW_EXT) {
+ const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
+ const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
+
+ if (ext_m_spec->vlan_etype &&
+ ext_m_spec->vlan_tci) {
+ match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype;
+ match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype;
+
+ match->key.vlan.vlan_id =
+ ntohs(ext_h_spec->vlan_tci) & 0x0fff;
+ match->mask.vlan.vlan_id =
+ ntohs(ext_m_spec->vlan_tci) & 0x0fff;
+
+ match->key.vlan.vlan_priority =
+ (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
+ match->mask.vlan.vlan_priority =
+ (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13;
+
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_VLAN);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
+ offsetof(struct ethtool_rx_flow_key, vlan);
+ }
+ }
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
+ const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
+
+ memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest,
+ ETH_ALEN);
+ memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest,
+ ETH_ALEN);
+
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, eth_addrs);
+ }
+
+ act = &flow->rule->action.entries[0];
+ switch (fs->ring_cookie) {
+ case RX_CLS_FLOW_DISC:
+ act->id = FLOW_ACTION_DROP;
+ break;
+ case RX_CLS_FLOW_WAKE:
+ act->id = FLOW_ACTION_WAKE;
+ break;
+ default:
+ act->id = FLOW_ACTION_QUEUE;
+ if (fs->flow_type & FLOW_RSS)
+ act->queue.ctx = input->rss_ctx;
+
+ act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ break;
+ }
+
+ return flow;
+}
+EXPORT_SYMBOL(ethtool_rx_flow_rule_create);
+
+void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow)
+{
+ kfree(flow->rule);
+ kfree(flow);
+}
+EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy);
diff --git a/net/core/filter.c b/net/core/filter.c
index 447dd1bad31f..5ceba98069d4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -73,6 +73,7 @@
#include <linux/seg6_local.h>
#include <net/seg6.h>
#include <net/seg6_local.h>
+#include <net/lwtunnel.h>
/**
* sk_filter_trim_cap - run a packet through a socket filter
@@ -1793,6 +1794,20 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
.arg2_type = ARG_ANYTHING,
};
+BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
+{
+ sk = sk_to_full_sk(sk);
+
+ return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_sk_fullsock_proto = {
+ .func = bpf_sk_fullsock,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+};
+
static inline int sk_skb_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
@@ -2020,18 +2035,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
- /* skb->mac_len is not set on normal egress */
- unsigned int mlen = skb->network_header - skb->mac_header;
+ unsigned int mlen = skb_network_offset(skb);
- __skb_pull(skb, mlen);
+ if (mlen) {
+ __skb_pull(skb, mlen);
- /* At ingress, the mac header has already been pulled once.
- * At egress, skb_pospull_rcsum has to be done in case that
- * the skb is originated from ingress (i.e. a forwarded skb)
- * to ensure that rcsum starts at net header.
- */
- if (!skb_at_tc_ingress(skb))
- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ /* At ingress, the mac header has already been pulled once.
+ * At egress, skb_pospull_rcsum has to be done in case that
+ * the skb is originated from ingress (i.e. a forwarded skb)
+ * to ensure that rcsum starts at net header.
+ */
+ if (!skb_at_tc_ingress(skb))
+ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+ }
skb_pop_mac_header(skb);
skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ?
@@ -2788,8 +2804,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2830,8 +2845,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2956,8 +2970,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2986,8 +2999,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -4111,14 +4123,20 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
/* Only some socketops are supported */
switch (optname) {
case SO_RCVBUF:
+ val = min_t(u32, val, sysctl_rmem_max);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_SNDBUF:
+ val = min_t(u32, val, sysctl_wmem_max);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break;
case SO_MAX_PACING_RATE: /* 32bit version */
+ if (val != ~0U)
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
@@ -4132,7 +4150,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_rcvlowat = val ? : 1;
break;
case SO_MARK:
- sk->sk_mark = val;
+ if (sk->sk_mark != val) {
+ sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
break;
default:
ret = -EINVAL;
@@ -4203,7 +4224,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
/* Only some options are supported */
switch (optname) {
case TCP_BPF_IW:
- if (val <= 0 || tp->data_segs_out > 0)
+ if (val <= 0 || tp->data_segs_out > tp->syn_data)
ret = -EINVAL;
else
tp->snd_cwnd = val;
@@ -4793,7 +4814,15 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
}
#endif /* CONFIG_IPV6_SEG6_BPF */
-BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
+#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
+static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
+ bool ingress)
+{
+ return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
+}
+#endif
+
+BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
u32, len)
{
switch (type) {
@@ -4802,13 +4831,40 @@ BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
case BPF_LWT_ENCAP_SEG6_INLINE:
return bpf_push_seg6_encap(skb, type, hdr, len);
#endif
+#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
+ case BPF_LWT_ENCAP_IP:
+ return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
+
+BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
+ void *, hdr, u32, len)
+{
+ switch (type) {
+#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
+ case BPF_LWT_ENCAP_IP:
+ return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
+#endif
default:
return -EINVAL;
}
}
-static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
- .func = bpf_lwt_push_encap,
+static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
+ .func = bpf_lwt_in_push_encap,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_CONST_SIZE
+};
+
+static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
+ .func = bpf_lwt_xmit_push_encap,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
@@ -5008,6 +5064,54 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
};
#endif /* CONFIG_IPV6_SEG6_BPF */
+#define CONVERT_COMMON_TCP_SOCK_FIELDS(md_type, CONVERT) \
+do { \
+ switch (si->off) { \
+ case offsetof(md_type, snd_cwnd): \
+ CONVERT(snd_cwnd); break; \
+ case offsetof(md_type, srtt_us): \
+ CONVERT(srtt_us); break; \
+ case offsetof(md_type, snd_ssthresh): \
+ CONVERT(snd_ssthresh); break; \
+ case offsetof(md_type, rcv_nxt): \
+ CONVERT(rcv_nxt); break; \
+ case offsetof(md_type, snd_nxt): \
+ CONVERT(snd_nxt); break; \
+ case offsetof(md_type, snd_una): \
+ CONVERT(snd_una); break; \
+ case offsetof(md_type, mss_cache): \
+ CONVERT(mss_cache); break; \
+ case offsetof(md_type, ecn_flags): \
+ CONVERT(ecn_flags); break; \
+ case offsetof(md_type, rate_delivered): \
+ CONVERT(rate_delivered); break; \
+ case offsetof(md_type, rate_interval_us): \
+ CONVERT(rate_interval_us); break; \
+ case offsetof(md_type, packets_out): \
+ CONVERT(packets_out); break; \
+ case offsetof(md_type, retrans_out): \
+ CONVERT(retrans_out); break; \
+ case offsetof(md_type, total_retrans): \
+ CONVERT(total_retrans); break; \
+ case offsetof(md_type, segs_in): \
+ CONVERT(segs_in); break; \
+ case offsetof(md_type, data_segs_in): \
+ CONVERT(data_segs_in); break; \
+ case offsetof(md_type, segs_out): \
+ CONVERT(segs_out); break; \
+ case offsetof(md_type, data_segs_out): \
+ CONVERT(data_segs_out); break; \
+ case offsetof(md_type, lost_out): \
+ CONVERT(lost_out); break; \
+ case offsetof(md_type, sacked_out): \
+ CONVERT(sacked_out); break; \
+ case offsetof(md_type, bytes_received): \
+ CONVERT(bytes_received); break; \
+ case offsetof(md_type, bytes_acked): \
+ CONVERT(bytes_acked); break; \
+ } \
+} while (0)
+
#ifdef CONFIG_INET
static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
int dif, int sdif, u8 family, u8 proto)
@@ -5245,6 +5349,105 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
.arg5_type = ARG_ANYTHING,
};
+bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked))
+ return false;
+
+ if (off % size != 0)
+ return false;
+
+ switch (off) {
+ case offsetof(struct bpf_tcp_sock, bytes_received):
+ case offsetof(struct bpf_tcp_sock, bytes_acked):
+ return size == sizeof(__u64);
+ default:
+ return size == sizeof(__u32);
+ }
+}
+
+u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog, u32 *target_size)
+{
+ struct bpf_insn *insn = insn_buf;
+
+#define BPF_TCP_SOCK_GET_COMMON(FIELD) \
+ do { \
+ BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \
+ FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct tcp_sock, FIELD)); \
+ } while (0)
+
+ CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_tcp_sock,
+ BPF_TCP_SOCK_GET_COMMON);
+
+ if (insn > insn_buf)
+ return insn - insn_buf;
+
+ switch (si->off) {
+ case offsetof(struct bpf_tcp_sock, rtt_min):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+ sizeof(struct minmax));
+ BUILD_BUG_ON(sizeof(struct minmax) <
+ sizeof(struct minmax_sample));
+
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
+ offsetof(struct tcp_sock, rtt_min) +
+ offsetof(struct minmax_sample, v));
+ break;
+ }
+
+ return insn - insn_buf;
+}
+
+BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
+{
+ sk = sk_to_full_sk(sk);
+
+ if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
+ return (unsigned long)sk;
+
+ return (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_tcp_sock_proto = {
+ .func = bpf_tcp_sock,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
+ .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+};
+
+BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
+{
+ unsigned int iphdr_len;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_IP))
+ iphdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
+ iphdr_len = sizeof(struct ipv6hdr);
+ else
+ return 0;
+
+ if (skb_headlen(skb) < iphdr_len)
+ return 0;
+
+ if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
+ return 0;
+
+ return INET_ECN_set_ce(skb);
+}
+
+static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
+ .func = bpf_skb_ecn_set_ce,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
#endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func)
@@ -5274,7 +5477,8 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_lwt_seg6_adjust_srh ||
func == bpf_lwt_seg6_action ||
#endif
- func == bpf_lwt_push_encap)
+ func == bpf_lwt_in_push_encap ||
+ func == bpf_lwt_xmit_push_encap)
return true;
return false;
@@ -5306,10 +5510,20 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_tail_call_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
+ default:
+ break;
+ }
+
+ if (!capable(CAP_SYS_ADMIN))
+ return NULL;
+
+ switch (func_id) {
+ case BPF_FUNC_spin_lock:
+ return &bpf_spin_lock_proto;
+ case BPF_FUNC_spin_unlock:
+ return &bpf_spin_unlock_proto;
case BPF_FUNC_trace_printk:
- if (capable(CAP_SYS_ADMIN))
- return bpf_get_trace_printk_proto();
- /* else: fall through */
+ return bpf_get_trace_printk_proto();
default:
return NULL;
}
@@ -5388,6 +5602,14 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
switch (func_id) {
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_sk_fullsock:
+ return &bpf_sk_fullsock_proto;
+#ifdef CONFIG_INET
+ case BPF_FUNC_tcp_sock:
+ return &bpf_tcp_sock_proto;
+ case BPF_FUNC_skb_ecn_set_ce:
+ return &bpf_skb_ecn_set_ce_proto;
+#endif
default:
return sk_filter_func_proto(func_id, prog);
}
@@ -5459,6 +5681,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_uid_proto;
case BPF_FUNC_fib_lookup:
return &bpf_skb_fib_lookup_proto;
+ case BPF_FUNC_sk_fullsock:
+ return &bpf_sk_fullsock_proto;
#ifdef CONFIG_XFRM
case BPF_FUNC_skb_get_xfrm_state:
return &bpf_skb_get_xfrm_state_proto;
@@ -5476,6 +5700,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_lookup_udp_proto;
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
+ case BPF_FUNC_tcp_sock:
+ return &bpf_tcp_sock_proto;
#endif
default:
return bpf_base_func_proto(func_id);
@@ -5652,7 +5878,7 @@ lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_lwt_push_encap:
- return &bpf_lwt_push_encap_proto;
+ return &bpf_lwt_in_push_encap_proto;
default:
return lwt_out_func_proto(func_id, prog);
}
@@ -5688,6 +5914,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_l4_csum_replace_proto;
case BPF_FUNC_set_hash_invalid:
return &bpf_set_hash_invalid_proto;
+ case BPF_FUNC_lwt_push_encap:
+ return &bpf_lwt_xmit_push_encap_proto;
default:
return lwt_out_func_proto(func_id, prog);
}
@@ -5746,6 +5974,11 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (size != sizeof(__u64))
return false;
break;
+ case offsetof(struct __sk_buff, sk):
+ if (type == BPF_WRITE || size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
+ break;
default:
/* Only narrow read access allowed for now. */
if (type == BPF_WRITE) {
@@ -5917,31 +6150,44 @@ full_access:
return true;
}
-static bool __sock_filter_check_size(int off, int size,
+bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
- const int size_default = sizeof(__u32);
-
switch (off) {
- case bpf_ctx_range(struct bpf_sock, src_ip4):
- case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
- bpf_ctx_record_field_size(info, size_default);
- return bpf_ctx_narrow_access_ok(off, size, size_default);
+ case bpf_ctx_range_till(struct bpf_sock, type, priority):
+ return false;
+ default:
+ return bpf_sock_is_valid_access(off, size, type, info);
}
-
- return size == size_default;
}
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
+ const int size_default = sizeof(__u32);
+
if (off < 0 || off >= sizeof(struct bpf_sock))
return false;
if (off % size != 0)
return false;
- if (!__sock_filter_check_size(off, size, info))
- return false;
- return true;
+
+ switch (off) {
+ case offsetof(struct bpf_sock, state):
+ case offsetof(struct bpf_sock, family):
+ case offsetof(struct bpf_sock, type):
+ case offsetof(struct bpf_sock, protocol):
+ case offsetof(struct bpf_sock, dst_port):
+ case offsetof(struct bpf_sock, src_port):
+ case bpf_ctx_range(struct bpf_sock, src_ip4):
+ case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
+ case bpf_ctx_range(struct bpf_sock, dst_ip4):
+ case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
+ bpf_ctx_record_field_size(info, size_default);
+ return bpf_ctx_narrow_access_ok(off, size, size_default);
+ }
+
+ return size == size_default;
}
static bool sock_filter_is_valid_access(int off, int size,
@@ -6057,6 +6303,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, queue_mapping):
break;
default:
return false;
@@ -6461,9 +6708,18 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, queue_mapping):
- *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
- bpf_target_off(struct sk_buff, queue_mapping, 2,
- target_size));
+ if (type == BPF_WRITE) {
+ *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
+ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ queue_mapping,
+ 2, target_size));
+ } else {
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ queue_mapping,
+ 2, target_size));
+ }
break;
case offsetof(struct __sk_buff, vlan_present):
@@ -6700,6 +6956,27 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
target_size));
break;
+ case offsetof(struct __sk_buff, gso_segs):
+ /* si->dst_reg = skb_shinfo(SKB); */
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, head));
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ BPF_REG_AX, si->src_reg,
+ offsetof(struct sk_buff, end));
+ *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
+#else
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, end));
+#endif
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
+ si->dst_reg, si->dst_reg,
+ bpf_target_off(struct skb_shared_info,
+ gso_segs, 2,
+ target_size));
+ break;
case offsetof(struct __sk_buff, wire_len):
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
@@ -6709,6 +6986,13 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
off += offsetof(struct qdisc_skb_cb, pkt_len);
*target_size = 4;
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
+ break;
+
+ case offsetof(struct __sk_buff, sk):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, sk));
+ break;
}
return insn - insn_buf;
@@ -6757,24 +7041,32 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct bpf_sock, family):
- BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
-
- *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
- offsetof(struct sock, sk_family));
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct sock_common, skc_family),
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common,
+ skc_family,
+ FIELD_SIZEOF(struct sock_common,
+ skc_family),
+ target_size));
break;
case offsetof(struct bpf_sock, type):
+ BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
+ *target_size = 2;
break;
case offsetof(struct bpf_sock, protocol):
+ BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
+ *target_size = 1;
break;
case offsetof(struct bpf_sock, src_ip4):
@@ -6786,6 +7078,15 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
target_size));
break;
+ case offsetof(struct bpf_sock, dst_ip4):
+ *insn++ = BPF_LDX_MEM(
+ BPF_SIZE(si->code), si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common, skc_daddr,
+ FIELD_SIZEOF(struct sock_common,
+ skc_daddr),
+ target_size));
+ break;
+
case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
#if IS_ENABLED(CONFIG_IPV6)
off = si->off;
@@ -6804,6 +7105,23 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
#endif
break;
+ case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
+#if IS_ENABLED(CONFIG_IPV6)
+ off = si->off;
+ off -= offsetof(struct bpf_sock, dst_ip6[0]);
+ *insn++ = BPF_LDX_MEM(
+ BPF_SIZE(si->code), si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common,
+ skc_v6_daddr.s6_addr32[0],
+ FIELD_SIZEOF(struct sock_common,
+ skc_v6_daddr.s6_addr32[0]),
+ target_size) + off);
+#else
+ *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
+ *target_size = 4;
+#endif
+ break;
+
case offsetof(struct bpf_sock, src_port):
*insn++ = BPF_LDX_MEM(
BPF_FIELD_SIZEOF(struct sock_common, skc_num),
@@ -6813,6 +7131,26 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
skc_num),
target_size));
break;
+
+ case offsetof(struct bpf_sock, dst_port):
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common, skc_dport,
+ FIELD_SIZEOF(struct sock_common,
+ skc_dport),
+ target_size));
+ break;
+
+ case offsetof(struct bpf_sock, state):
+ *insn++ = BPF_LDX_MEM(
+ BPF_FIELD_SIZEOF(struct sock_common, skc_state),
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sock_common, skc_state,
+ FIELD_SIZEOF(struct sock_common,
+ skc_state),
+ target_size));
+ break;
}
return insn - insn_buf;
@@ -7060,6 +7398,85 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn = insn_buf;
int off;
+/* Helper macro for adding read access to tcp_sock or sock fields. */
+#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+ do { \
+ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
+ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
+ OBJ_FIELD), \
+ si->dst_reg, si->dst_reg, \
+ offsetof(OBJ, OBJ_FIELD)); \
+ } while (0)
+
+#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
+ SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
+
+/* Helper macro for adding write access to tcp_sock or sock fields.
+ * The macro is called with two registers, dst_reg which contains a pointer
+ * to ctx (context) and src_reg which contains the value that should be
+ * stored. However, we need an additional register since we cannot overwrite
+ * dst_reg because it may be used later in the program.
+ * Instead we "borrow" one of the other register. We first save its value
+ * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
+ * it at the end of the macro.
+ */
+#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
+ do { \
+ int reg = BPF_REG_9; \
+ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
+ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
+ reg, si->src_reg, \
+ offsetof(OBJ, OBJ_FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } while (0)
+
+#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
+ do { \
+ if (TYPE == BPF_WRITE) \
+ SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
+ else \
+ SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
+ } while (0)
+
+ CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_sock_ops,
+ SOCK_OPS_GET_TCP_SOCK_FIELD);
+
+ if (insn > insn_buf)
+ return insn - insn_buf;
+
switch (si->off) {
case offsetof(struct bpf_sock_ops, op) ...
offsetof(struct bpf_sock_ops, replylong[3]):
@@ -7217,175 +7634,15 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
FIELD_SIZEOF(struct minmax_sample, t));
break;
-/* Helper macro for adding read access to tcp_sock or sock fields. */
-#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
- do { \
- BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
- FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, \
- is_fullsock), \
- si->dst_reg, si->src_reg, \
- offsetof(struct bpf_sock_ops_kern, \
- is_fullsock)); \
- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, sk),\
- si->dst_reg, si->src_reg, \
- offsetof(struct bpf_sock_ops_kern, sk));\
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
- OBJ_FIELD), \
- si->dst_reg, si->dst_reg, \
- offsetof(OBJ, OBJ_FIELD)); \
- } while (0)
-
-/* Helper macro for adding write access to tcp_sock or sock fields.
- * The macro is called with two registers, dst_reg which contains a pointer
- * to ctx (context) and src_reg which contains the value that should be
- * stored. However, we need an additional register since we cannot overwrite
- * dst_reg because it may be used later in the program.
- * Instead we "borrow" one of the other register. We first save its value
- * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
- * it at the end of the macro.
- */
-#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
- do { \
- int reg = BPF_REG_9; \
- BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
- FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
- if (si->dst_reg == reg || si->src_reg == reg) \
- reg--; \
- if (si->dst_reg == reg || si->src_reg == reg) \
- reg--; \
- *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
- offsetof(struct bpf_sock_ops_kern, \
- temp)); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, \
- is_fullsock), \
- reg, si->dst_reg, \
- offsetof(struct bpf_sock_ops_kern, \
- is_fullsock)); \
- *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
- struct bpf_sock_ops_kern, sk),\
- reg, si->dst_reg, \
- offsetof(struct bpf_sock_ops_kern, sk));\
- *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
- reg, si->src_reg, \
- offsetof(OBJ, OBJ_FIELD)); \
- *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
- offsetof(struct bpf_sock_ops_kern, \
- temp)); \
- } while (0)
-
-#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
- do { \
- if (TYPE == BPF_WRITE) \
- SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
- else \
- SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
- } while (0)
-
- case offsetof(struct bpf_sock_ops, snd_cwnd):
- SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, srtt_us):
- SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
- break;
-
case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
struct tcp_sock);
break;
- case offsetof(struct bpf_sock_ops, snd_ssthresh):
- SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, rcv_nxt):
- SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, snd_nxt):
- SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, snd_una):
- SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, mss_cache):
- SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, ecn_flags):
- SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, rate_delivered):
- SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, rate_interval_us):
- SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, packets_out):
- SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, retrans_out):
- SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, total_retrans):
- SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, segs_in):
- SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, data_segs_in):
- SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, segs_out):
- SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, data_segs_out):
- SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, lost_out):
- SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, sacked_out):
- SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
- break;
-
case offsetof(struct bpf_sock_ops, sk_txhash):
SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
struct sock, type);
break;
-
- case offsetof(struct bpf_sock_ops, bytes_received):
- SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
- struct tcp_sock);
- break;
-
- case offsetof(struct bpf_sock_ops, bytes_acked):
- SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
- break;
-
}
return insn - insn_buf;
}
@@ -7690,6 +7947,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
};
const struct bpf_prog_ops flow_dissector_prog_ops = {
+ .test_run = bpf_prog_test_run_flow_dissector,
};
int sk_detach_filter(struct sock *sk)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9f2840510e63..bb1a54747d64 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -683,6 +683,46 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
}
}
+bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
+ const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ struct bpf_flow_keys *flow_keys)
+{
+ struct bpf_skb_data_end cb_saved;
+ struct bpf_skb_data_end *cb;
+ u32 result;
+
+ /* Note that even though the const qualifier is discarded
+ * throughout the execution of the BPF program, all changes(the
+ * control block) are reverted after the BPF program returns.
+ * Therefore, __skb_flow_dissect does not alter the skb.
+ */
+
+ cb = (struct bpf_skb_data_end *)skb->cb;
+
+ /* Save Control Block */
+ memcpy(&cb_saved, cb, sizeof(cb_saved));
+ memset(cb, 0, sizeof(*cb));
+
+ /* Pass parameters to the BPF program */
+ memset(flow_keys, 0, sizeof(*flow_keys));
+ cb->qdisc_cb.flow_keys = flow_keys;
+ flow_keys->nhoff = skb_network_offset(skb);
+ flow_keys->thoff = flow_keys->nhoff;
+
+ bpf_compute_data_pointers((struct sk_buff *)skb);
+ result = BPF_PROG_RUN(prog, skb);
+
+ /* Restore state */
+ memcpy(cb, &cb_saved, sizeof(cb_saved));
+
+ flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+ flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
+ flow_keys->nhoff, skb->len);
+
+ return result == BPF_OK;
+}
+
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -714,7 +754,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_vlan *key_vlan;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
- struct bpf_prog *attached = NULL;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
@@ -754,53 +793,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
- rcu_read_lock();
if (skb) {
+ struct bpf_flow_keys flow_keys;
+ struct bpf_prog *attached = NULL;
+
+ rcu_read_lock();
+
if (skb->dev)
attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
else if (skb->sk)
attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
else
WARN_ON_ONCE(1);
- }
- if (attached) {
- /* Note that even though the const qualifier is discarded
- * throughout the execution of the BPF program, all changes(the
- * control block) are reverted after the BPF program returns.
- * Therefore, __skb_flow_dissect does not alter the skb.
- */
- struct bpf_flow_keys flow_keys = {};
- struct bpf_skb_data_end cb_saved;
- struct bpf_skb_data_end *cb;
- u32 result;
-
- cb = (struct bpf_skb_data_end *)skb->cb;
-
- /* Save Control Block */
- memcpy(&cb_saved, cb, sizeof(cb_saved));
- memset(cb, 0, sizeof(cb_saved));
- /* Pass parameters to the BPF program */
- cb->qdisc_cb.flow_keys = &flow_keys;
- flow_keys.nhoff = nhoff;
- flow_keys.thoff = nhoff;
-
- bpf_compute_data_pointers((struct sk_buff *)skb);
- result = BPF_PROG_RUN(attached, skb);
-
- /* Restore state */
- memcpy(cb, &cb_saved, sizeof(cb_saved));
-
- flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
- flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
- flow_keys.nhoff, skb->len);
-
- __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
- target_container);
+ if (attached) {
+ ret = __skb_flow_bpf_dissect(attached, skb,
+ flow_dissector,
+ &flow_keys);
+ __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+ target_container);
+ rcu_read_unlock();
+ return ret;
+ }
rcu_read_unlock();
- return result == BPF_OK;
}
- rcu_read_unlock();
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
new file mode 100644
index 000000000000..c3a00eac4804
--- /dev/null
+++ b/net/core/flow_offload.c
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <net/flow_offload.h>
+
+struct flow_rule *flow_rule_alloc(unsigned int num_actions)
+{
+ struct flow_rule *rule;
+
+ rule = kzalloc(sizeof(struct flow_rule) +
+ sizeof(struct flow_action_entry) * num_actions,
+ GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->action.num_entries = num_actions;
+
+ return rule;
+}
+EXPORT_SYMBOL(flow_rule_alloc);
+
+#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
+ const struct flow_match *__m = &(__rule)->match; \
+ struct flow_dissector *__d = (__m)->dissector; \
+ \
+ (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
+ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+ struct flow_match_basic *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
+}
+EXPORT_SYMBOL(flow_rule_match_basic);
+
+void flow_rule_match_control(const struct flow_rule *rule,
+ struct flow_match_control *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
+}
+EXPORT_SYMBOL(flow_rule_match_control);
+
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+ struct flow_match_eth_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_eth_addrs);
+
+void flow_rule_match_vlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
+}
+EXPORT_SYMBOL(flow_rule_match_vlan);
+
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
+
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
+
+void flow_rule_match_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ip);
+
+void flow_rule_match_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ports);
+
+void flow_rule_match_tcp(const struct flow_rule *rule,
+ struct flow_match_tcp *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_tcp);
+
+void flow_rule_match_icmp(const struct flow_rule *rule,
+ struct flow_match_icmp *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_icmp);
+
+void flow_rule_match_mpls(const struct flow_rule *rule,
+ struct flow_match_mpls *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_mpls);
+
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+ struct flow_match_control *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_control);
+
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
+
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
+
+void flow_rule_match_enc_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ip);
+
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ports);
+
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+ struct flow_match_enc_keyid *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_keyid);
+
+void flow_rule_match_enc_opts(const struct flow_rule *rule,
+ struct flow_match_enc_opts *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_opts);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 9bf1b9ad1780..ac679f74ba47 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -291,7 +291,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
for_each_possible_cpu(i) {
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
- qstats->qlen = 0;
qstats->backlog += qcpu->backlog;
qstats->drops += qcpu->drops;
qstats->requeues += qcpu->requeues;
@@ -307,7 +306,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
if (cpu) {
__gnet_stats_copy_queue_cpu(qstats, cpu);
} else {
- qstats->qlen = q->qlen;
qstats->backlog = q->backlog;
qstats->drops = q->drops;
qstats->requeues = q->requeues;
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437f7106..cf2f8897ca19 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -16,6 +16,8 @@
#include <linux/types.h>
#include <linux/bpf.h>
#include <net/lwtunnel.h>
+#include <net/gre.h>
+#include <net/ip6_route.h>
struct bpf_lwt_prog {
struct bpf_prog *prog;
@@ -55,6 +57,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
switch (ret) {
case BPF_OK:
+ case BPF_LWT_REROUTE:
break;
case BPF_REDIRECT:
@@ -63,6 +66,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
lwt->name ? : "<unknown>");
ret = BPF_OK;
} else {
+ skb_reset_mac_header(skb);
ret = skb_do_redirect(skb);
if (ret == 0)
ret = BPF_REDIRECT;
@@ -86,6 +90,30 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
return ret;
}
+static int bpf_lwt_input_reroute(struct sk_buff *skb)
+{
+ int err = -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, skb_dst(skb)->dev);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ err = ipv6_stub->ipv6_route_input(skb);
+ } else {
+ err = -EAFNOSUPPORT;
+ }
+
+ if (err)
+ goto err;
+ return dst_input(skb);
+
+err:
+ kfree_skb(skb);
+ return err;
+}
+
static int bpf_input(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -97,11 +125,11 @@ static int bpf_input(struct sk_buff *skb)
ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
if (ret < 0)
return ret;
+ if (ret == BPF_LWT_REROUTE)
+ return bpf_lwt_input_reroute(skb);
}
if (unlikely(!dst->lwtstate->orig_input)) {
- pr_warn_once("orig_input not set on dst for prog %s\n",
- bpf->out.name);
kfree_skb(skb);
return -EINVAL;
}
@@ -146,6 +174,102 @@ static int xmit_check_hhlen(struct sk_buff *skb)
return 0;
}
+static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
+{
+ struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
+ int oif = l3mdev ? l3mdev->ifindex : 0;
+ struct dst_entry *dst = NULL;
+ int err = -EAFNOSUPPORT;
+ struct sock *sk;
+ struct net *net;
+ bool ipv4;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ipv4 = true;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ ipv4 = false;
+ else
+ goto err;
+
+ sk = sk_to_full_sk(skb->sk);
+ if (sk) {
+ if (sk->sk_bound_dev_if)
+ oif = sk->sk_bound_dev_if;
+ net = sock_net(sk);
+ } else {
+ net = dev_net(skb_dst(skb)->dev);
+ }
+
+ if (ipv4) {
+ struct iphdr *iph = ip_hdr(skb);
+ struct flowi4 fl4 = {};
+ struct rtable *rt;
+
+ fl4.flowi4_oif = oif;
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_uid = sock_net_uid(net, sk);
+ fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
+ fl4.flowi4_proto = iph->protocol;
+ fl4.daddr = iph->daddr;
+ fl4.saddr = iph->saddr;
+
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto err;
+ }
+ dst = &rt->dst;
+ } else {
+ struct ipv6hdr *iph6 = ipv6_hdr(skb);
+ struct flowi6 fl6 = {};
+
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_uid = sock_net_uid(net, sk);
+ fl6.flowlabel = ip6_flowinfo(iph6);
+ fl6.flowi6_proto = iph6->nexthdr;
+ fl6.daddr = iph6->daddr;
+ fl6.saddr = iph6->saddr;
+
+ err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
+ if (unlikely(err))
+ goto err;
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err;
+ }
+ }
+ if (unlikely(dst->error)) {
+ err = dst->error;
+ dst_release(dst);
+ goto err;
+ }
+
+ /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
+ * was done for the previous dst, so we are doing it here again, in
+ * case the new dst needs much more space. The call below is a noop
+ * if there is enough header space in skb.
+ */
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ goto err;
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ if (unlikely(err))
+ return err;
+
+ /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
+ return LWTUNNEL_XMIT_DONE;
+
+err:
+ kfree_skb(skb);
+ return err;
+}
+
static int bpf_xmit(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -153,11 +277,20 @@ static int bpf_xmit(struct sk_buff *skb)
bpf = bpf_lwt_lwtunnel(dst->lwtstate);
if (bpf->xmit.prog) {
+ __be16 proto = skb->protocol;
int ret;
ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
switch (ret) {
case BPF_OK:
+ /* If the header changed, e.g. via bpf_lwt_push_encap,
+ * BPF_LWT_REROUTE below should have been used if the
+ * protocol was also changed.
+ */
+ if (skb->protocol != proto) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
/* If the header was expanded, headroom might be too
* small for L2 header to come, expand as needed.
*/
@@ -168,6 +301,8 @@ static int bpf_xmit(struct sk_buff *skb)
return LWTUNNEL_XMIT_CONTINUE;
case BPF_REDIRECT:
return LWTUNNEL_XMIT_DONE;
+ case BPF_LWT_REROUTE:
+ return bpf_lwt_xmit_reroute(skb);
default:
return ret;
}
@@ -389,6 +524,133 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
.owner = THIS_MODULE,
};
+static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
+ int encap_len)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_type |= gso_type;
+ skb_decrease_gso_size(shinfo, encap_len);
+ shinfo->gso_segs = 0;
+ return 0;
+}
+
+static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
+{
+ int next_hdr_offset;
+ void *next_hdr;
+ __u8 protocol;
+
+ /* SCTP and UDP_L4 gso need more nuanced handling than what
+ * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
+ * So at the moment only TCP GSO packets are let through.
+ */
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -ENOTSUPP;
+
+ if (ipv4) {
+ protocol = ip_hdr(skb)->protocol;
+ next_hdr_offset = sizeof(struct iphdr);
+ next_hdr = skb_network_header(skb) + next_hdr_offset;
+ } else {
+ protocol = ipv6_hdr(skb)->nexthdr;
+ next_hdr_offset = sizeof(struct ipv6hdr);
+ next_hdr = skb_network_header(skb) + next_hdr_offset;
+ }
+
+ switch (protocol) {
+ case IPPROTO_GRE:
+ next_hdr_offset += sizeof(struct gre_base_hdr);
+ if (next_hdr_offset > encap_len)
+ return -EINVAL;
+
+ if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
+ return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
+ encap_len);
+ return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
+
+ case IPPROTO_UDP:
+ next_hdr_offset += sizeof(struct udphdr);
+ if (next_hdr_offset > encap_len)
+ return -EINVAL;
+
+ if (((struct udphdr *)next_hdr)->check)
+ return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
+ encap_len);
+ return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
+
+ case IPPROTO_IP:
+ case IPPROTO_IPV6:
+ if (ipv4)
+ return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
+ else
+ return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+}
+
+int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
+{
+ struct iphdr *iph;
+ bool ipv4;
+ int err;
+
+ if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
+ return -EINVAL;
+
+ /* validate protocol and length */
+ iph = (struct iphdr *)hdr;
+ if (iph->version == 4) {
+ ipv4 = true;
+ if (unlikely(len < iph->ihl * 4))
+ return -EINVAL;
+ } else if (iph->version == 6) {
+ ipv4 = false;
+ if (unlikely(len < sizeof(struct ipv6hdr)))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ if (ingress)
+ err = skb_cow_head(skb, len + skb->mac_len);
+ else
+ err = skb_cow_head(skb,
+ len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
+ if (unlikely(err))
+ return err;
+
+ /* push the encap headers and fix pointers */
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ skb_push(skb, len);
+ if (ingress)
+ skb_postpush_rcsum(skb, iph, len);
+ skb_reset_network_header(skb);
+ memcpy(skb_network_header(skb), hdr, len);
+ bpf_compute_data_pointers(skb);
+ skb_clear_hash(skb);
+
+ if (ipv4) {
+ skb->protocol = htons(ETH_P_IP);
+ iph = ip_hdr(skb);
+
+ if (!iph->check)
+ iph->check = ip_fast_csum((unsigned char *)iph,
+ iph->ihl);
+ } else {
+ skb->protocol = htons(ETH_P_IPV6);
+ }
+
+ if (skb_is_gso(skb))
+ return handle_gso_encap(skb, ipv4, len);
+
+ return 0;
+}
+
static int __init bpf_lwt_init(void)
{
return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 0b171756453c..19b557bd294b 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -122,18 +122,18 @@ int lwtunnel_build_state(u16 encap_type,
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
- if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
+ if (likely(ops && ops->build_state && try_module_get(ops->owner)))
found = true;
+ rcu_read_unlock();
+
+ if (found) {
ret = ops->build_state(encap, family, cfg, lws, extack);
if (ret)
module_put(ops->owner);
- }
- rcu_read_unlock();
-
- /* don't rely on -EOPNOTSUPP to detect match as build_state
- * handlers could return it
- */
- if (!found) {
+ } else {
+ /* don't rely on -EOPNOTSUPP to detect match as build_state
+ * handlers could return it
+ */
NL_SET_ERR_MSG_ATTR(extack, encap,
"LWT encapsulation type not supported");
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 763a7b08df67..30f6fd8f68e0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -41,6 +42,8 @@
#include <linux/inetdevice.h>
#include <net/addrconf.h>
+#include <trace/events/neigh.h>
+
#define DEBUG
#define NEIGH_DEBUG 1
#define neigh_dbg(level, fmt, ...) \
@@ -101,6 +104,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
if (neigh->parms->neigh_cleanup)
neigh->parms->neigh_cleanup(neigh);
+ trace_neigh_cleanup_and_release(neigh, 0);
__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
neigh_release(neigh);
@@ -443,12 +447,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
buckets = kzalloc(size, GFP_ATOMIC);
- else
+ } else {
buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
+ kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
+ }
if (!buckets) {
kfree(ret);
return NULL;
@@ -468,10 +474,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
struct neighbour __rcu **buckets = nht->hash_buckets;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
kfree(buckets);
- else
+ } else {
+ kmemleak_free(buckets);
free_pages((unsigned long)buckets, get_order(size));
+ }
kfree(nht);
}
@@ -1002,7 +1010,7 @@ static void neigh_probe(struct neighbour *neigh)
if (neigh->ops->solicit)
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
- kfree_skb(skb);
+ consume_skb(skb);
}
/* Called when a timer expires for a neighbour entry. */
@@ -1090,6 +1098,8 @@ out:
if (notify)
neigh_update_notify(neigh, 0);
+ trace_neigh_timer_handler(neigh, 0);
+
neigh_release(neigh);
}
@@ -1160,6 +1170,7 @@ out_unlock_bh:
else
write_unlock(&neigh->lock);
local_bh_enable();
+ trace_neigh_event_send_done(neigh, rc);
return rc;
out_dead:
@@ -1167,6 +1178,7 @@ out_dead:
goto out_unlock_bh;
write_unlock_bh(&neigh->lock);
kfree_skb(skb);
+ trace_neigh_event_send_dead(neigh, 1);
return 1;
}
EXPORT_SYMBOL(__neigh_event_send);
@@ -1222,6 +1234,8 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
struct net_device *dev;
int update_isrouter = 0;
+ trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
+
write_lock_bh(&neigh->lock);
dev = neigh->dev;
@@ -1388,6 +1402,8 @@ out:
if (notify)
neigh_update_notify(neigh, nlmsg_pid);
+ trace_neigh_update_done(neigh, err);
+
return err;
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index ff9fd2bb4ce4..4ff661f6f989 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -12,7 +12,6 @@
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <net/switchdev.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
@@ -501,16 +500,11 @@ static ssize_t phys_switch_id_show(struct device *dev,
return restart_syscall();
if (dev_isalive(netdev)) {
- struct switchdev_attr attr = {
- .orig_dev = netdev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
- };
+ struct netdev_phys_item_id ppid = { };
- ret = switchdev_port_attr_get(netdev, &attr);
+ ret = dev_get_port_parent_id(netdev, &ppid, false);
if (!ret)
- ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
- attr.u.ppid.id);
+ ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
}
rtnl_unlock();
@@ -1342,8 +1336,7 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
if (tc < 0)
return -EINVAL;
}
- mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
- GFP_KERNEL);
+ mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
if (!mask)
return -ENOMEM;
@@ -1372,7 +1365,7 @@ out_no_maps:
rcu_read_unlock();
len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
- kfree(mask);
+ bitmap_free(mask);
return len < PAGE_SIZE ? len : -EINVAL;
}
@@ -1388,8 +1381,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
- GFP_KERNEL);
+ mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
if (!mask)
return -ENOMEM;
@@ -1397,7 +1389,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
if (err) {
- kfree(mask);
+ bitmap_free(mask);
return err;
}
@@ -1405,7 +1397,7 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
err = __netif_set_xps_queue(dev, mask, index, true);
cpus_read_unlock();
- kfree(mask);
+ bitmap_free(mask);
return err ? : len;
}
@@ -1547,6 +1539,9 @@ static int register_queue_kobjects(struct net_device *dev)
error:
netdev_queue_update_kobjects(dev, txq, 0);
net_rx_queue_update_kobjects(dev, rxq, 0);
+#ifdef CONFIG_SYSFS
+ kset_unregister(dev->queues_kset);
+#endif
return error;
}
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 419af6dfe29f..470b179d599e 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -43,6 +43,14 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete);
EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update);
#endif
+#include <trace/events/neigh.h>
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_update_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_timer_handler);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_event_send_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_event_send_dead);
+EXPORT_TRACEPOINT_SYMBOL_GPL(neigh_cleanup_and_release);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b02fb19df2cc..17f36317363d 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -778,6 +778,41 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int rtnl_net_valid_getid_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETNSA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETNSA_PID:
+ case NETNSA_FD:
+ case NETNSA_NSID:
+ case NETNSA_TARGET_NSID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -793,8 +828,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int err;
- err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
- rtnl_net_policy, extack);
+ err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
if (err < 0)
return err;
if (tb[NETNSA_PID]) {
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 43a932cb609b..5b2252c6d49b 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -136,17 +136,19 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
goto skip_dma_map;
- /* Setup DMA mapping: use page->private for DMA-addr
+ /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
+ * since dma_addr_t can be either 32 or 64 bits and does not always fit
+ * into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
- dma = dma_map_page(pool->p.dev, page, 0,
- (PAGE_SIZE << pool->p.order),
- pool->p.dma_dir);
+ dma = dma_map_page_attrs(pool->p.dev, page, 0,
+ (PAGE_SIZE << pool->p.order),
+ pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(pool->p.dev, dma)) {
put_page(page);
return NULL;
}
- set_page_private(page, dma); /* page->private = dma; */
+ page->dma_addr = dma;
skip_dma_map:
/* When page just alloc'ed is should/must have refcnt 1. */
@@ -175,13 +177,17 @@ EXPORT_SYMBOL(page_pool_alloc_pages);
static void __page_pool_clean_page(struct page_pool *pool,
struct page *page)
{
+ dma_addr_t dma;
+
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
return;
+ dma = page->dma_addr;
/* DMA unmap */
- dma_unmap_page(pool->p.dev, page_private(page),
- PAGE_SIZE << pool->p.order, pool->p.dma_dir);
- set_page_private(page, 0);
+ dma_unmap_page_attrs(pool->p.dev, dma,
+ PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ page->dma_addr = 0;
}
/* Return a page to the page allocator, cleaning up our state */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 6ac919847ce6..f3f5a78cd062 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -158,6 +158,7 @@
#include <linux/etherdevice.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
+#include <linux/mmzone.h>
#include <net/net_namespace.h>
#include <net/checksum.h>
#include <net/ipv6.h>
@@ -3625,7 +3626,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_cfi = 0;
pkt_dev->svlan_id = 0xffff;
pkt_dev->burst = 1;
- pkt_dev->node = -1;
+ pkt_dev->node = NUMA_NO_NODE;
err = pktgen_setup_dev(t->net, pkt_dev, ifname);
if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5ea1bed08ede..a51cab95ba64 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -46,7 +46,6 @@
#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <net/switchdev.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
@@ -1146,22 +1145,17 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
+ struct netdev_phys_item_id ppid = { };
int err;
- struct switchdev_attr attr = {
- .orig_dev = dev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
- };
- err = switchdev_port_attr_get(dev, &attr);
+ err = dev_get_port_parent_id(dev, &ppid, false);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
return err;
}
- if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
- attr.u.ppid.id))
+ if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
return -EMSGSIZE;
return 0;
@@ -3242,6 +3236,53 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
return ret;
}
+static int rtnl_valid_getlink_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifinfomsg *ifm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for get link");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
+ extack);
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+ ifm->ifi_change) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
+ extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= IFLA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFLA_IFNAME:
+ case IFLA_EXT_MASK:
+ case IFLA_TARGET_NETNSID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -3256,7 +3297,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u32 ext_filter_mask = 0;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
+ err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -3639,7 +3680,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
const struct net_device_ops *ops = br_dev->netdev_ops;
err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags, extack);
if (err)
goto out;
else
@@ -3651,7 +3692,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (dev->netdev_ops->ndo_fdb_add)
err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
vid,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags,
+ extack);
else
err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
nlh->nlmsg_flags);
@@ -4901,6 +4943,40 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
return size;
}
+static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
+ bool is_dump, struct netlink_ext_ack *extack)
+{
+ struct if_stats_msg *ifsm;
+
+ if (nlh->nlmsg_len < sizeof(*ifsm)) {
+ NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
+ return -EINVAL;
+ }
+
+ if (!strict_check)
+ return 0;
+
+ ifsm = nlmsg_data(nlh);
+
+ /* only requests using strict checks can pass data to influence
+ * the dump. The legacy exception is filter_mask.
+ */
+ if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
+ return -EINVAL;
+ }
+ if (nlmsg_attrlen(nlh, sizeof(*ifsm))) {
+ NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
+ return -EINVAL;
+ }
+ if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
+ NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -4912,8 +4988,10 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 filter_mask;
int err;
- if (nlmsg_len(nlh) < sizeof(*ifsm))
- return -EINVAL;
+ err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
+ false, extack);
+ if (err)
+ return err;
ifsm = nlmsg_data(nlh);
if (ifsm->ifindex > 0)
@@ -4965,27 +5043,11 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = net->dev_base_seq;
- if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) {
- NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
- return -EINVAL;
- }
+ err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
+ if (err)
+ return err;
ifsm = nlmsg_data(cb->nlh);
-
- /* only requests using strict checks can pass data to influence
- * the dump. The legacy exception is filter_mask.
- */
- if (cb->strict_check) {
- if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) {
- NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
- return -EINVAL;
- }
- if (nlmsg_attrlen(cb->nlh, sizeof(*ifsm))) {
- NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
- return -EINVAL;
- }
- }
-
filter_mask = ifsm->filter_mask;
if (!filter_mask) {
NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
diff --git a/net/core/scm.c b/net/core/scm.c
index b1ff8a441748..52ef219cf6df 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -29,6 +29,7 @@
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
+#include <linux/errqueue.h>
#include <linux/uaccess.h>
@@ -252,6 +253,32 @@ out:
}
EXPORT_SYMBOL(put_cmsg);
+void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
+{
+ struct scm_timestamping64 tss;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tss.ts); i++) {
+ tss.ts[i].tv_sec = tss_internal->ts[i].tv_sec;
+ tss.ts[i].tv_nsec = tss_internal->ts[i].tv_nsec;
+ }
+
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_NEW, sizeof(tss), &tss);
+}
+EXPORT_SYMBOL(put_cmsg_scm_timestamping64);
+
+void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
+{
+ struct scm_timestamping tss;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tss.ts); i++)
+ tss.ts[i] = timespec64_to_timespec(tss_internal->ts[i]);
+
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_OLD, sizeof(tss), &tss);
+}
+EXPORT_SYMBOL(put_cmsg_scm_timestamping);
+
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
{
struct cmsghdr __user *cm
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 37317ffec146..2415d9cb9b89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(napi_alloc_frag);
@@ -5270,7 +5274,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long chunk;
struct sk_buff *skb;
struct page *page;
- gfp_t gfp_head;
int i;
*errcode = -EMSGSIZE;
@@ -5280,12 +5283,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
if (npages > MAX_SKB_FRAGS)
return NULL;
- gfp_head = gfp_mask;
- if (gfp_head & __GFP_DIRECT_RECLAIM)
- gfp_head |= __GFP_RETRY_MAYFAIL;
-
*errcode = -ENOBUFS;
- skb = alloc_skb(header_len, gfp_head);
+ skb = alloc_skb(header_len, gfp_mask);
if (!skb)
return NULL;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index d6d5c20d7044..ae6f06e45737 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -78,11 +78,9 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
{
int i = src->sg.start;
struct scatterlist *sge = sk_msg_elem(src, i);
+ struct scatterlist *sgd = NULL;
u32 sge_len, sge_off;
- if (sk_msg_full(dst))
- return -ENOSPC;
-
while (off) {
if (sge->length > off)
break;
@@ -94,16 +92,27 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
}
while (len) {
- if (sk_msg_full(dst))
- return -ENOSPC;
-
sge_len = sge->length - off;
- sge_off = sge->offset + off;
if (sge_len > len)
sge_len = len;
+
+ if (dst->sg.end)
+ sgd = sk_msg_elem(dst, dst->sg.end - 1);
+
+ if (sgd &&
+ (sg_page(sge) == sg_page(sgd)) &&
+ (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
+ sgd->length += sge_len;
+ dst->sg.size += sge_len;
+ } else if (!sk_msg_full(dst)) {
+ sge_off = sge->offset + off;
+ sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
+ } else {
+ return -ENOSPC;
+ }
+
off = 0;
len -= sge_len;
- sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
sk_mem_charge(sk, sge_len);
sk_msg_iter_var_next(i);
if (i == src->sg.end && len)
@@ -545,8 +554,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
/* No sk_callback_lock since already detached. */
- if (psock->parser.enabled)
- strp_done(&psock->parser.strp);
+ strp_done(&psock->parser.strp);
cancel_work_sync(&psock->work);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6aa2e7e0b4fb..782343bb925b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -335,14 +335,68 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(__sk_backlog_rcv);
-static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
+static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
{
- struct timeval tv;
+ struct __kernel_sock_timeval tv;
+ int size;
- if (optlen < sizeof(tv))
- return -EINVAL;
- if (copy_from_user(&tv, optval, sizeof(tv)))
- return -EFAULT;
+ if (timeo == MAX_SCHEDULE_TIMEOUT) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_sec = timeo / HZ;
+ tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
+ }
+
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+ struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
+ *(struct old_timeval32 *)optval = tv32;
+ return sizeof(tv32);
+ }
+
+ if (old_timeval) {
+ struct __kernel_old_timeval old_tv;
+ old_tv.tv_sec = tv.tv_sec;
+ old_tv.tv_usec = tv.tv_usec;
+ *(struct __kernel_old_timeval *)optval = old_tv;
+ size = sizeof(old_tv);
+ } else {
+ *(struct __kernel_sock_timeval *)optval = tv;
+ size = sizeof(tv);
+ }
+
+ return size;
+}
+
+static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval)
+{
+ struct __kernel_sock_timeval tv;
+
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+ struct old_timeval32 tv32;
+
+ if (optlen < sizeof(tv32))
+ return -EINVAL;
+
+ if (copy_from_user(&tv32, optval, sizeof(tv32)))
+ return -EFAULT;
+ tv.tv_sec = tv32.tv_sec;
+ tv.tv_usec = tv32.tv_usec;
+ } else if (old_timeval) {
+ struct __kernel_old_timeval old_tv;
+
+ if (optlen < sizeof(old_tv))
+ return -EINVAL;
+ if (copy_from_user(&old_tv, optval, sizeof(old_tv)))
+ return -EFAULT;
+ tv.tv_sec = old_tv.tv_sec;
+ tv.tv_usec = old_tv.tv_usec;
+ } else {
+ if (optlen < sizeof(tv))
+ return -EINVAL;
+ if (copy_from_user(&tv, optval, sizeof(tv)))
+ return -EFAULT;
+ }
if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
return -EDOM;
@@ -360,8 +414,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
*timeo_p = MAX_SCHEDULE_TIMEOUT;
if (tv.tv_sec == 0 && tv.tv_usec == 0)
return 0;
- if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
- *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
+ if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
+ *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
return 0;
}
@@ -520,14 +574,11 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
}
EXPORT_SYMBOL(sk_dst_check);
-static int sock_setbindtodevice(struct sock *sk, char __user *optval,
- int optlen)
+static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
struct net *net = sock_net(sk);
- char devname[IFNAMSIZ];
- int index;
/* Sorry... */
ret = -EPERM;
@@ -535,6 +586,32 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
goto out;
ret = -EINVAL;
+ if (ifindex < 0)
+ goto out;
+
+ sk->sk_bound_dev_if = ifindex;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ sk_dst_reset(sk);
+
+ ret = 0;
+
+out:
+#endif
+
+ return ret;
+}
+
+static int sock_setbindtodevice(struct sock *sk, char __user *optval,
+ int optlen)
+{
+ int ret = -ENOPROTOOPT;
+#ifdef CONFIG_NETDEVICES
+ struct net *net = sock_net(sk);
+ char devname[IFNAMSIZ];
+ int index;
+
+ ret = -EINVAL;
if (optlen < 0)
goto out;
@@ -566,14 +643,9 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
}
lock_sock(sk);
- sk->sk_bound_dev_if = index;
- if (sk->sk_prot->rehash)
- sk->sk_prot->rehash(sk);
- sk_dst_reset(sk);
+ ret = sock_setbindtodevice_locked(sk, index);
release_sock(sk);
- ret = 0;
-
out:
#endif
@@ -713,6 +785,10 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
*/
val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
+ /* Ensure val * 2 fits into an int, to prevent max_t()
+ * from treating it as a negative value.
+ */
+ val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
/* Wake up sending tasks if we upped the value. */
@@ -724,6 +800,12 @@ set_sndbuf:
ret = -EPERM;
break;
}
+
+ /* No negative values (to prevent underflow, as val will be
+ * multiplied by 2).
+ */
+ if (val < 0)
+ val = 0;
goto set_sndbuf;
case SO_RCVBUF:
@@ -734,6 +816,10 @@ set_sndbuf:
*/
val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
+ /* Ensure val * 2 fits into an int, to prevent max_t()
+ * from treating it as a negative value.
+ */
+ val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
* We double it on the way in to account for
@@ -758,6 +844,12 @@ set_rcvbuf:
ret = -EPERM;
break;
}
+
+ /* No negative values (to prevent underflow, as val will be
+ * multiplied by 2).
+ */
+ if (val < 0)
+ val = 0;
goto set_rcvbuf;
case SO_KEEPALIVE:
@@ -815,10 +907,17 @@ set_rcvbuf:
clear_bit(SOCK_PASSCRED, &sock->flags);
break;
- case SO_TIMESTAMP:
- case SO_TIMESTAMPNS:
+ case SO_TIMESTAMP_OLD:
+ case SO_TIMESTAMP_NEW:
+ case SO_TIMESTAMPNS_OLD:
+ case SO_TIMESTAMPNS_NEW:
if (valbool) {
- if (optname == SO_TIMESTAMP)
+ if (optname == SO_TIMESTAMP_NEW || optname == SO_TIMESTAMPNS_NEW)
+ sock_set_flag(sk, SOCK_TSTAMP_NEW);
+ else
+ sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+
+ if (optname == SO_TIMESTAMP_OLD || optname == SO_TIMESTAMP_NEW)
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
else
sock_set_flag(sk, SOCK_RCVTSTAMPNS);
@@ -827,10 +926,14 @@ set_rcvbuf:
} else {
sock_reset_flag(sk, SOCK_RCVTSTAMP);
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+ sock_reset_flag(sk, SOCK_TSTAMP_NEW);
}
break;
- case SO_TIMESTAMPING:
+ case SO_TIMESTAMPING_NEW:
+ sock_set_flag(sk, SOCK_TSTAMP_NEW);
+ /* fall through */
+ case SO_TIMESTAMPING_OLD:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
break;
@@ -861,9 +964,13 @@ set_rcvbuf:
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
sock_enable_timestamp(sk,
SOCK_TIMESTAMPING_RX_SOFTWARE);
- else
+ else {
+ if (optname == SO_TIMESTAMPING_NEW)
+ sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+
sock_disable_timestamp(sk,
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+ }
break;
case SO_RCVLOWAT:
@@ -875,12 +982,14 @@ set_rcvbuf:
sk->sk_rcvlowat = val ? : 1;
break;
- case SO_RCVTIMEO:
- ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
+ case SO_RCVTIMEO_OLD:
+ case SO_RCVTIMEO_NEW:
+ ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD);
break;
- case SO_SNDTIMEO:
- ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
+ case SO_SNDTIMEO_OLD:
+ case SO_SNDTIMEO_NEW:
+ ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD);
break;
case SO_ATTACH_FILTER:
@@ -999,15 +1108,23 @@ set_rcvbuf:
#endif
case SO_MAX_PACING_RATE:
- if (val != ~0U)
+ {
+ unsigned long ulval = (val == ~0U) ? ~0UL : val;
+
+ if (sizeof(ulval) != sizeof(val) &&
+ optlen >= sizeof(ulval) &&
+ get_user(ulval, (unsigned long __user *)optval)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (ulval != ~0UL)
cmpxchg(&sk->sk_pacing_status,
SK_PACING_NONE,
SK_PACING_NEEDED);
- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
- sk->sk_pacing_rate = min(sk->sk_pacing_rate,
- sk->sk_max_pacing_rate);
+ sk->sk_max_pacing_rate = ulval;
+ sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
break;
-
+ }
case SO_INCOMING_CPU:
sk->sk_incoming_cpu = val;
break;
@@ -1055,6 +1172,10 @@ set_rcvbuf:
}
break;
+ case SO_BINDTOIFINDEX:
+ ret = sock_setbindtodevice_locked(sk, val);
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -1098,8 +1219,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
union {
int val;
u64 val64;
+ unsigned long ulval;
struct linger ling;
- struct timeval tm;
+ struct old_timeval32 tm32;
+ struct __kernel_old_timeval tm;
+ struct __kernel_sock_timeval stm;
struct sock_txtime txtime;
} v;
@@ -1186,39 +1310,36 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
sock_warn_obsolete_bsdism("getsockopt");
break;
- case SO_TIMESTAMP:
+ case SO_TIMESTAMP_OLD:
v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
+ !sock_flag(sk, SOCK_TSTAMP_NEW) &&
!sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
- case SO_TIMESTAMPNS:
- v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
+ case SO_TIMESTAMPNS_OLD:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
break;
- case SO_TIMESTAMPING:
+ case SO_TIMESTAMP_NEW:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
+ break;
+
+ case SO_TIMESTAMPNS_NEW:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
+ break;
+
+ case SO_TIMESTAMPING_OLD:
v.val = sk->sk_tsflags;
break;
- case SO_RCVTIMEO:
- lv = sizeof(struct timeval);
- if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
- v.tm.tv_sec = 0;
- v.tm.tv_usec = 0;
- } else {
- v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ;
- }
+ case SO_RCVTIMEO_OLD:
+ case SO_RCVTIMEO_NEW:
+ lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
break;
- case SO_SNDTIMEO:
- lv = sizeof(struct timeval);
- if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
- v.tm.tv_sec = 0;
- v.tm.tv_usec = 0;
- } else {
- v.tm.tv_sec = sk->sk_sndtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ;
- }
+ case SO_SNDTIMEO_OLD:
+ case SO_SNDTIMEO_NEW:
+ lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
break;
case SO_RCVLOWAT:
@@ -1344,8 +1465,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
#endif
case SO_MAX_PACING_RATE:
- /* 32bit version */
- v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
+ if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
+ lv = sizeof(v.ulval);
+ v.ulval = sk->sk_max_pacing_rate;
+ } else {
+ /* 32bit version */
+ v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
+ }
break;
case SO_INCOMING_CPU:
@@ -1399,6 +1525,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
SOF_TXTIME_REPORT_ERRORS : 0;
break;
+ case SO_BINDTOIFINDEX:
+ v.val = sk->sk_bound_dev_if;
+ break;
+
default:
/* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7).
@@ -1726,7 +1856,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
- atomic64_set(&newsk->sk_cookie, 0);
if (likely(newsk->sk_net_refcnt))
sock_inuse_add(sock_net(newsk), 1);
@@ -1750,7 +1879,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
- newsk->sk_wq = NULL;
+ RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk);
@@ -2122,7 +2251,7 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
return -EINVAL;
sockc->mark = *(u32 *)CMSG_DATA(cmsg);
break;
- case SO_TIMESTAMPING:
+ case SO_TIMESTAMPING_OLD:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
return -EINVAL;
@@ -2380,7 +2509,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
}
if (sk_has_memory_pressure(sk)) {
- int alloc;
+ u64 alloc;
if (!sk_under_memory_pressure(sk))
return 1;
@@ -2713,11 +2842,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
if (sock) {
sk->sk_type = sock->type;
- sk->sk_wq = sock->wq;
+ RCU_INIT_POINTER(sk->sk_wq, sock->wq);
sock->sk = sk;
sk->sk_uid = SOCK_INODE(sock)->i_uid;
} else {
- sk->sk_wq = NULL;
+ RCU_INIT_POINTER(sk->sk_wq, NULL);
sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index d67ec17f2cc8..84bf2861f45f 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -36,6 +36,15 @@ static int net_msg_warn; /* Unused, but still a sysctl */
int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
+/* 0 - Keep current behavior:
+ * IPv4: inherit all current settings from init_net
+ * IPv6: reset all settings to default
+ * 1 - Both inherit all current settings from init_net
+ * 2 - Both reset all settings to default
+ */
+int sysctl_devconf_inherit_init_net __read_mostly;
+EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
+
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -544,6 +553,15 @@ static struct ctl_table net_core_table[] = {
.extra1 = &zero,
.extra2 = &one,
},
+ {
+ .procname = "devconf_inherit_init_net",
+ .data = &sysctl_devconf_inherit_init_net,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
{ }
};
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6eb837a47b5c..baaaeb2b2c42 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
u8 pkt, u8 opt, u8 *val, u8 len)
{
- if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+ if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
return 0;
return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
}
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
u8 pkt, u8 opt, u8 *val, u8 len)
{
- if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+ if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
return 0;
return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
}
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 85d6c879383d..8d03707abdac 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -480,7 +480,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
- if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
+ if (sk->sk_write_pending || inet_csk_in_pingpong_mode(sk) ||
icsk->icsk_accept_queue.rskq_defer_accept) {
/* Save one ACK. Data will be ready after
* several ticks, if write_pending is set.
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 1501a20a94ca..74e138495d67 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -199,7 +199,7 @@ static void dccp_delack_timer(struct timer_list *t)
icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
if (inet_csk_ack_scheduled(sk)) {
- if (!icsk->icsk_ack.pingpong) {
+ if (!inet_csk_in_pingpong_mode(sk)) {
/* Delayed ACK missed: inflate ATO. */
icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
icsk->icsk_rto);
@@ -207,7 +207,7 @@ static void dccp_delack_timer(struct timer_list *t)
/* Delayed ACK missed: leave pingpong mode and
* deflate ATO.
*/
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
dccp_send_ack(sk);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d0b3e69c6b39..0962f9201baa 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -56,7 +56,7 @@
#include <net/dn_neigh.h>
#include <net/dn_fib.h>
-#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
+#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn))
static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index f78fe58eafc8..6cd3737593a6 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -282,7 +282,7 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *att
(nhs = dn_fib_count_nhs(attrs[RTA_MULTIPATH])) == 0)
goto err_inval;
- fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
+ fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
err = -ENOBUFS;
if (fi == NULL)
goto failure;
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 91e52973ee13..fab49132345f 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -6,7 +6,7 @@ config HAVE_NET_DSA
config NET_DSA
tristate "Distributed Switch Architecture"
- depends on HAVE_NET_DSA && MAY_USE_DEVLINK
+ depends on HAVE_NET_DSA
depends on BRIDGE || BRIDGE=n
select NET_SWITCHDEV
select PHYLINK
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index aee909bcddc4..36de4f2a3366 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -57,6 +57,7 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
#endif
#ifdef CONFIG_NET_DSA_TAG_KSZ9477
[DSA_TAG_PROTO_KSZ9477] = &ksz9477_netdev_ops,
+ [DSA_TAG_PROTO_KSZ9893] = &ksz9893_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_LAN9303
[DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
@@ -93,6 +94,7 @@ const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
#endif
#ifdef CONFIG_NET_DSA_TAG_KSZ9477
[DSA_TAG_PROTO_KSZ9477] = "ksz9477",
+ [DSA_TAG_PROTO_KSZ9893] = "ksz9893",
#endif
#ifdef CONFIG_NET_DSA_TAG_LAN9303
[DSA_TAG_PROTO_LAN9303] = "lan9303",
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index a1917025e155..c00ee464afc7 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -612,8 +612,8 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
{
struct device_node *ports, *port;
struct dsa_port *dp;
+ int err = 0;
u32 reg;
- int err;
ports = of_get_child_by_name(dn, "ports");
if (!ports) {
@@ -624,19 +624,23 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
if (err)
- return err;
+ goto out_put_node;
- if (reg >= ds->num_ports)
- return -EINVAL;
+ if (reg >= ds->num_ports) {
+ err = -EINVAL;
+ goto out_put_node;
+ }
dp = &ds->ports[reg];
err = dsa_port_parse_of(dp, port);
if (err)
- return err;
+ goto out_put_node;
}
- return 0;
+out_put_node:
+ of_node_put(ports);
+ return err;
}
static int dsa_switch_parse_member_of(struct dsa_switch *ds,
@@ -767,11 +771,10 @@ static int dsa_switch_probe(struct dsa_switch *ds)
struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
{
- size_t size = sizeof(struct dsa_switch) + n * sizeof(struct dsa_port);
struct dsa_switch *ds;
int i;
- ds = devm_kzalloc(dev, size, GFP_KERNEL);
+ ds = devm_kzalloc(dev, struct_size(ds, ports, n), GFP_KERNEL);
if (!ds)
return NULL;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 026a05774bf7..093b7d145eb1 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -103,7 +103,8 @@ static inline void dsa_legacy_unregister(void) { }
int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid);
@@ -142,7 +143,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
int dsa_port_set_state(struct dsa_port *dp, u8 state,
struct switchdev_trans *trans);
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
-void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy);
+void dsa_port_disable(struct dsa_port *dp);
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
@@ -159,6 +160,10 @@ int dsa_port_mdb_add(const struct dsa_port *dp,
struct switchdev_trans *trans);
int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans);
+int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans);
int dsa_port_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
@@ -211,6 +216,7 @@ extern const struct dsa_device_ops gswip_netdev_ops;
/* tag_ksz.c */
extern const struct dsa_device_ops ksz9477_netdev_ops;
+extern const struct dsa_device_ops ksz9893_netdev_ops;
/* tag_lan9303.c */
extern const struct dsa_device_ops lan9303_netdev_ops;
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 71bb15f491c8..c58f33931be1 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -126,6 +126,17 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
}
}
+static int dsa_master_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+ if (snprintf(name, len, "p%d", cpu_dp->index) >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
static int dsa_master_ethtool_setup(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -158,6 +169,38 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
cpu_dp->orig_ethtool_ops = NULL;
}
+static int dsa_master_ndo_setup(struct net_device *dev)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct net_device_ops *ops;
+
+ if (dev->netdev_ops->ndo_get_phys_port_name)
+ return 0;
+
+ ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ cpu_dp->orig_ndo_ops = dev->netdev_ops;
+ if (cpu_dp->orig_ndo_ops)
+ memcpy(ops, cpu_dp->orig_ndo_ops, sizeof(*ops));
+
+ ops->ndo_get_phys_port_name = dsa_master_get_phys_port_name;
+
+ dev->netdev_ops = ops;
+
+ return 0;
+}
+
+static void dsa_master_ndo_teardown(struct net_device *dev)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+ dev->netdev_ops = cpu_dp->orig_ndo_ops;
+ cpu_dp->orig_ndo_ops = NULL;
+}
+
static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
char *buf)
{
@@ -205,6 +248,8 @@ static void dsa_master_reset_mtu(struct net_device *dev)
rtnl_unlock();
}
+static struct lock_class_key dsa_master_addr_list_lock_key;
+
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
int ret;
@@ -218,21 +263,34 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
wmb();
dev->dsa_ptr = cpu_dp;
+ lockdep_set_class(&dev->addr_list_lock,
+ &dsa_master_addr_list_lock_key);
ret = dsa_master_ethtool_setup(dev);
if (ret)
return ret;
+ ret = dsa_master_ndo_setup(dev);
+ if (ret)
+ goto out_err_ethtool_teardown;
+
ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
if (ret)
- dsa_master_ethtool_teardown(dev);
+ goto out_err_ndo_teardown;
+
+ return ret;
+out_err_ndo_teardown:
+ dsa_master_ndo_teardown(dev);
+out_err_ethtool_teardown:
+ dsa_master_ethtool_teardown(dev);
return ret;
}
void dsa_master_teardown(struct net_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
+ dsa_master_ndo_teardown(dev);
dsa_master_ethtool_teardown(dev);
dsa_master_reset_mtu(dev);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 2d7e01b23572..caeef4c99dc0 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
{
- u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
struct dsa_switch *ds = dp->ds;
int port = dp->index;
int err;
@@ -80,20 +79,22 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
return err;
}
- dsa_port_set_state_now(dp, stp_state);
+ if (!dp->bridge_dev)
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
return 0;
}
-void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
+void dsa_port_disable(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
- dsa_port_set_state_now(dp, BR_STATE_DISABLED);
+ if (!dp->bridge_dev)
+ dsa_port_set_state_now(dp, BR_STATE_DISABLED);
if (ds->ops->port_disable)
- ds->ops->port_disable(ds, port, phy);
+ ds->ops->port_disable(ds, port);
}
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
@@ -105,16 +106,23 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
};
int err;
- /* Here the port is already bridged. Reflect the current configuration
- * so that drivers can program their chips accordingly.
+ /* Set the flooding mode before joining the port in the switch */
+ err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD, NULL);
+ if (err)
+ return err;
+
+ /* Here the interface is already bridged. Reflect the current
+ * configuration so that drivers can program their chips accordingly.
*/
dp->bridge_dev = br;
err = dsa_port_notify(dp, DSA_NOTIFIER_BRIDGE_JOIN, &info);
/* The bridging is rolled back on error */
- if (err)
+ if (err) {
+ dsa_port_bridge_flags(dp, 0, NULL);
dp->bridge_dev = NULL;
+ }
return err;
}
@@ -137,6 +145,9 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
if (err)
pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
+ /* Port is leaving the bridge, disable flooding */
+ dsa_port_bridge_flags(dp, 0, NULL);
+
/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
* so allow it to be in BR_STATE_FORWARDING to be kept functional
*/
@@ -177,6 +188,35 @@ int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
}
+int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_egress_floods ||
+ (flags & ~(BR_FLOOD | BR_MCAST_FLOOD)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ int err = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (ds->ops->port_egress_floods)
+ err = ds->ops->port_egress_floods(ds, port, flags & BR_FLOOD,
+ flags & BR_MCAST_FLOOD);
+
+ return err;
+}
+
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
@@ -252,7 +292,10 @@ int dsa_port_vlan_add(struct dsa_port *dp,
.vlan = vlan,
};
- if (br_vlan_enabled(dp->bridge_dev))
+ /* Can be called from dsa_slave_port_obj_add() or
+ * dsa_slave_vlan_rx_add_vid()
+ */
+ if (!dp->bridge_dev || br_vlan_enabled(dp->bridge_dev))
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
return 0;
@@ -267,10 +310,13 @@ int dsa_port_vlan_del(struct dsa_port *dp,
.vlan = vlan,
};
- if (netif_is_bridge_master(vlan->obj.orig_dev))
+ if (vlan->obj.orig_dev && netif_is_bridge_master(vlan->obj.orig_dev))
return -EOPNOTSUPP;
- if (br_vlan_enabled(dp->bridge_dev))
+ /* Can be called from dsa_slave_port_obj_del() or
+ * dsa_slave_vlan_rx_kill_vid()
+ */
+ if (!dp->bridge_dev || br_vlan_enabled(dp->bridge_dev))
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
return 0;
@@ -291,6 +337,7 @@ static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
return ERR_PTR(-EPROBE_DEFER);
}
+ of_node_put(phy_dn);
return phydev;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3fcc1d01615..093eef6f2599 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -122,7 +122,7 @@ static int dsa_slave_close(struct net_device *dev)
phylink_stop(dp->pl);
- dsa_port_disable(dp, dev->phydev);
+ dsa_port_disable(dp);
dev_mc_unsync(master, dev);
dev_uc_unsync(master, dev);
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *master = dsa_slave_to_master(dev);
-
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
- dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(master,
+ dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(master,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+ }
}
static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -292,6 +295,13 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
+ trans);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -362,24 +372,15 @@ static int dsa_slave_port_obj_del(struct net_device *dev,
return err;
}
-static int dsa_slave_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
+static int dsa_slave_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst = ds->dst;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(dst->index);
- memcpy(&attr->u.ppid.id, &dst->index, attr->u.ppid.id_len);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
- attr->u.brport_flags_support = 0;
- break;
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = sizeof(dst->index);
+ memcpy(&ppid->id, &dst->index, ppid->id_len);
return 0;
}
@@ -639,7 +640,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev && !dp->pl)
+ if (!dev->phydev || !dp->pl)
return -ENODEV;
if (!ds->ops->set_mac_eee)
@@ -659,7 +660,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev && !dp->pl)
+ if (!dev->phydev || !dp->pl)
return -ENODEV;
if (!ds->ops->get_mac_eee)
@@ -982,6 +983,75 @@ static int dsa_slave_get_ts_info(struct net_device *dev,
return ds->ops->get_ts_info(ds, p->dp->index, ts);
}
+static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan vlan = {
+ .vid_begin = vid,
+ .vid_end = vid,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+ struct switchdev_trans trans;
+ struct bridge_vlan_info info;
+ int ret;
+
+ /* Check for a possible bridge VLAN entry now since there is no
+ * need to emulate the switchdev prepare + commit phase.
+ */
+ if (dp->bridge_dev) {
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
+ if (ret == 0)
+ return -EBUSY;
+ }
+
+ trans.ph_prepare = true;
+ ret = dsa_port_vlan_add(dp, &vlan, &trans);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+
+ trans.ph_prepare = false;
+ return dsa_port_vlan_add(dp, &vlan, &trans);
+}
+
+static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan vlan = {
+ .vid_begin = vid,
+ .vid_end = vid,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+ struct bridge_vlan_info info;
+ int ret;
+
+ /* Check for a possible bridge VLAN entry now since there is no
+ * need to emulate the switchdev prepare + commit phase.
+ */
+ if (dp->bridge_dev) {
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
+ if (ret == 0)
+ return -EBUSY;
+ }
+
+ ret = dsa_port_vlan_del(dp, &vlan);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
+ return ret;
+}
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_drvinfo = dsa_slave_get_drvinfo,
.get_regs_len = dsa_slave_get_regs_len,
@@ -1009,7 +1079,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -1045,11 +1116,9 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
.ndo_setup_tc = dsa_slave_setup_tc,
.ndo_get_stats64 = dsa_slave_get_stats64,
-};
-
-static const struct switchdev_ops dsa_slave_switchdev_ops = {
- .switchdev_port_attr_get = dsa_slave_port_attr_get,
- .switchdev_port_attr_set = dsa_slave_port_attr_set,
+ .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
+ .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
};
static struct device_type dsa_type = {
@@ -1305,13 +1374,13 @@ int dsa_slave_create(struct dsa_port *port)
if (slave_dev == NULL)
return -ENOMEM;
- slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
+ slave_dev->features = master->vlan_features | NETIF_F_HW_TC |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
slave_dev->hw_features |= NETIF_F_HW_TC;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
- slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
slave_dev->min_mtu = 0;
slave_dev->max_mtu = ETH_MAX_MTU;
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
@@ -1406,20 +1475,66 @@ static int dsa_slave_changeupper(struct net_device *dev,
return err;
}
+static int dsa_slave_upper_vlan_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *
+ info)
+{
+ struct netlink_ext_ack *ext_ack;
+ struct net_device *slave;
+ struct dsa_port *dp;
+
+ ext_ack = netdev_notifier_info_to_extack(&info->info);
+
+ if (!is_vlan_dev(dev))
+ return NOTIFY_DONE;
+
+ slave = vlan_dev_real_dev(dev);
+ if (!dsa_slave_dev_check(slave))
+ return NOTIFY_DONE;
+
+ dp = dsa_slave_to_port(slave);
+ if (!dp->bridge_dev)
+ return NOTIFY_DONE;
+
+ /* Deny enslaving a VLAN device into a VLAN-aware bridge */
+ if (br_vlan_enabled(dp->bridge_dev) &&
+ netif_is_bridge_master(info->upper_dev) && info->linking) {
+ NL_SET_ERR_MSG_MOD(ext_ack,
+ "Cannot enslave VLAN device into VLAN aware bridge");
+ return notifier_from_errno(-EINVAL);
+ }
+
+ return NOTIFY_DONE;
+}
+
static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (!dsa_slave_dev_check(dev))
- return NOTIFY_DONE;
+ if (event == NETDEV_CHANGEUPPER) {
+ if (!dsa_slave_dev_check(dev))
+ return dsa_slave_upper_vlan_check(dev, ptr);
- if (event == NETDEV_CHANGEUPPER)
return dsa_slave_changeupper(dev, ptr);
+ }
return NOTIFY_DONE;
}
+static int
+dsa_slave_switchdev_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *port_attr_info)
+{
+ int err;
+
+ err = dsa_slave_port_attr_set(netdev, port_attr_info->attr,
+ port_attr_info->trans);
+
+ port_attr_info->handled = true;
+ return notifier_from_errno(err);
+}
+
struct dsa_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
@@ -1450,7 +1565,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
}
fdb_info->offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
- &fdb_info->info);
+ &fdb_info->info, NULL);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
@@ -1498,6 +1613,9 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
if (!dsa_slave_dev_check(dev))
return NOTIFY_DONE;
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ return dsa_slave_switchdev_port_attr_set_event(dev, ptr);
+
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return NOTIFY_BAD;
@@ -1560,6 +1678,8 @@ static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
case SWITCHDEV_PORT_OBJ_DEL:
return dsa_slave_switchdev_port_obj_event(event, dev, ptr);
+ case SWITCHDEV_PORT_ATTR_SET:
+ return dsa_slave_switchdev_port_attr_set_event(dev, ptr);
}
return NOTIFY_DONE;
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 142b294d3446..e1fae969aa73 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -12,6 +12,7 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
+#include <linux/if_vlan.h>
#include <net/switchdev.h>
#include "dsa_priv.h"
@@ -168,6 +169,43 @@ static int dsa_switch_mdb_del(struct dsa_switch *ds,
return 0;
}
+static int dsa_port_vlan_device_check(struct net_device *vlan_dev,
+ int vlan_dev_vid,
+ void *arg)
+{
+ struct switchdev_obj_port_vlan *vlan = arg;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ if (vid == vlan_dev_vid)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int dsa_port_vlan_check(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, port);
+ int err = 0;
+
+ /* Device is not bridged, let it proceed with the VLAN device
+ * creation.
+ */
+ if (!dp->bridge_dev)
+ return err;
+
+ /* dsa_slave_vlan_rx_{add,kill}_vid() cannot use the prepare pharse and
+ * already checks whether there is an overlapping bridge VLAN entry
+ * with the same VID, so here we only need to check that if we are
+ * adding a bridge VLAN entry there is not an overlapping VLAN device
+ * claiming that VID.
+ */
+ return vlan_for_each(dp->slave, dsa_port_vlan_device_check,
+ (void *)vlan);
+}
+
static int
dsa_switch_vlan_prepare_bitmap(struct dsa_switch *ds,
const struct switchdev_obj_port_vlan *vlan,
@@ -179,6 +217,10 @@ dsa_switch_vlan_prepare_bitmap(struct dsa_switch *ds,
return -EOPNOTSUPP;
for_each_set_bit(port, bitmap, ds->num_ports) {
+ err = dsa_port_vlan_check(ds, port, vlan);
+ if (err)
+ return err;
+
err = ds->ops->port_vlan_prepare(ds, port, vlan);
if (err)
return err;
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 8b2f92e3f3a2..67ff3fae18d8 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -146,8 +146,17 @@ static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
return skb;
}
+static int dsa_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+ int *offset)
+{
+ *offset = 4;
+ *proto = ((__be16 *)skb->data)[1];
+ return 0;
+}
+
const struct dsa_device_ops dsa_netdev_ops = {
.xmit = dsa_xmit,
.rcv = dsa_rcv,
+ .flow_dissect = dsa_tag_flow_dissect,
.overhead = DSA_HLEN,
};
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index f5b87ee5c94e..234585ec116e 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -165,8 +165,17 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
return skb;
}
+static int edsa_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+ int *offset)
+{
+ *offset = 8;
+ *proto = ((__be16 *)skb->data)[3];
+ return 0;
+}
+
const struct dsa_device_ops edsa_netdev_ops = {
.xmit = edsa_xmit,
.rcv = edsa_rcv,
+ .flow_dissect = edsa_tag_flow_dissect,
.overhead = EDSA_HLEN,
};
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index da71b9e2af52..de246c93d3bb 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -16,6 +16,7 @@
/* Typically only one byte is used for tail tag. */
#define KSZ_EGRESS_TAG_LEN 1
+#define KSZ_INGRESS_TAG_LEN 1
static struct sk_buff *ksz_common_xmit(struct sk_buff *skb,
struct net_device *dev, int len)
@@ -67,6 +68,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
pskb_trim_rcsum(skb, skb->len - len);
+ skb->offload_fwd_mark = true;
+
return skb;
}
@@ -139,3 +142,36 @@ const struct dsa_device_ops ksz9477_netdev_ops = {
.rcv = ksz9477_rcv,
.overhead = KSZ9477_INGRESS_TAG_LEN,
};
+
+#define KSZ9893_TAIL_TAG_OVERRIDE BIT(5)
+#define KSZ9893_TAIL_TAG_LOOKUP BIT(6)
+
+static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct sk_buff *nskb;
+ u8 *addr;
+ u8 *tag;
+
+ nskb = ksz_common_xmit(skb, dev, KSZ_INGRESS_TAG_LEN);
+ if (!nskb)
+ return NULL;
+
+ /* Tag encoding */
+ tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
+ addr = skb_mac_header(nskb);
+
+ *tag = BIT(dp->index);
+
+ if (is_link_local_ether_addr(addr))
+ *tag |= KSZ9893_TAIL_TAG_OVERRIDE;
+
+ return nskb;
+}
+
+const struct dsa_device_ops ksz9893_netdev_ops = {
+ .xmit = ksz9893_xmit,
+ .rcv = ksz9477_rcv,
+ .overhead = KSZ_INGRESS_TAG_LEN,
+};
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 4c520110b04f..f7a3d7a171c7 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -265,6 +265,18 @@ void eth_header_cache_update(struct hh_cache *hh,
EXPORT_SYMBOL(eth_header_cache_update);
/**
+ * eth_header_parser_protocol - extract protocol from L2 header
+ * @skb: packet to extract protocol from
+ */
+__be16 eth_header_parse_protocol(const struct sk_buff *skb)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ return eth->h_proto;
+}
+EXPORT_SYMBOL(eth_header_parse_protocol);
+
+/**
* eth_prepare_mac_addr_change - prepare for mac change
* @dev: network device
* @p: socket address
@@ -346,6 +358,7 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
+ .parse_protocol = eth_header_parse_protocol,
};
/**
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index d14226ecfde4..4196bcd4105a 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -27,6 +27,7 @@
#include <net/6lowpan.h>
#include <net/ipv6_frag.h>
#include <net/inet_frag.h>
+#include <net/ip.h>
#include "6lowpan_i.h"
@@ -34,8 +35,8 @@ static const char lowpan_frags_cache_name[] = "lowpan-frags";
static struct inet_frags lowpan_frags;
-static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
- struct sk_buff *prev, struct net_device *ldev);
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev, struct net_device *ldev);
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
{
@@ -88,9 +89,15 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
struct sk_buff *skb, u8 frag_type)
{
- struct sk_buff *prev, *next;
+ struct sk_buff *prev_tail;
struct net_device *ldev;
- int end, offset;
+ int end, offset, err;
+
+ /* inet_frag_queue_* functions use skb->cb; see struct ipfrag_skb_cb
+ * in inet_fragment.c
+ */
+ BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet_skb_parm));
+ BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet6_skb_parm));
if (fq->q.flags & INET_FRAG_COMPLETE)
goto err;
@@ -117,38 +124,15 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
}
}
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = fq->q.fragments_tail;
- if (!prev ||
- lowpan_802154_cb(prev)->d_offset <
- lowpan_802154_cb(skb)->d_offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (lowpan_802154_cb(next)->d_offset >=
- lowpan_802154_cb(skb)->d_offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- fq->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- fq->q.fragments = skb;
-
ldev = skb->dev;
if (ldev)
skb->dev = NULL;
+ barrier();
+
+ prev_tail = fq->q.fragments_tail;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+ if (err)
+ goto err;
fq->q.stamp = skb->tstamp;
if (frag_type == LOWPAN_DISPATCH_FRAG1)
@@ -163,10 +147,11 @@ found:
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- res = lowpan_frag_reasm(fq, prev, ldev);
+ res = lowpan_frag_reasm(fq, skb, prev_tail, ldev);
skb->_skb_refdst = orefdst;
return res;
}
+ skb_dst_drop(skb);
return -1;
err:
@@ -175,97 +160,28 @@ err:
}
/* Check if this packet is complete.
- * Returns NULL on failure by any reason, and pointer
- * to current nexthdr field in reassembled frame.
*
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
*/
-static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
- struct net_device *ldev)
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *ldev)
{
- struct sk_buff *fp, *head = fq->q.fragments;
- int sum_truesize;
+ void *reasm_data;
inet_frag_kill(&fq->q);
- /* Make the one we just received the head. */
- if (prev) {
- head = prev->next;
- fp = skb_clone(head, GFP_ATOMIC);
-
- if (!fp)
- goto out_oom;
-
- fp->next = head->next;
- if (!fp->next)
- fq->q.fragments_tail = fp;
- prev->next = fp;
-
- skb_morph(head, fq->q.fragments);
- head->next = fq->q.fragments->next;
-
- consume_skb(fq->q.fragments);
- fq->q.fragments = head;
- }
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
+ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+ if (!reasm_data)
goto out_oom;
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments.
- */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (!clone)
- goto out_oom;
- clone->next = head->next;
- head->next = clone;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = head->data_len - plen;
- clone->data_len = clone->len;
- head->data_len -= clone->len;
- head->len -= clone->len;
- add_frag_mem_limit(fq->q.net, clone->truesize);
- }
-
- WARN_ON(head == NULL);
-
- sum_truesize = head->truesize;
- for (fp = head->next; fp;) {
- bool headstolen;
- int delta;
- struct sk_buff *next = fp->next;
-
- sum_truesize += fp->truesize;
- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
- kfree_skb_partial(fp, headstolen);
- } else {
- if (!skb_shinfo(head)->frag_list)
- skb_shinfo(head)->frag_list = fp;
- head->data_len += fp->len;
- head->len += fp->len;
- head->truesize += fp->truesize;
- }
- fp = next;
- }
- sub_frag_mem_limit(fq->q.net, sum_truesize);
-
- skb_mark_not_on_list(head);
- head->dev = ldev;
- head->tstamp = fq->q.stamp;
-
- fq->q.fragments = NULL;
+ skb->dev = ldev;
+ skb->tstamp = fq->q.stamp;
+ fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
+ fq->q.last_run_head = NULL;
return 1;
out_oom:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 0dfb72c46671..eab3ebde981e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1385,6 +1385,15 @@ out:
}
EXPORT_SYMBOL(inet_gso_segment);
+static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
+ return ERR_PTR(-EINVAL);
+
+ return inet_gso_segment(skb, features);
+}
+
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *,
struct sk_buff *));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
@@ -1861,7 +1870,7 @@ static struct packet_offload ip_packet_offload __read_mostly = {
static const struct net_offload ipip_offload = {
.callbacks = {
- .gso_segment = inet_gso_segment,
+ .gso_segment = ipip_gso_segment,
.gro_receive = ipip_gro_receive,
.gro_complete = ipip_gro_complete,
},
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c
index 5e04ed25bc0e..1e976bb93d99 100644
--- a/net/ipv4/bpfilter/sockopt.c
+++ b/net/ipv4/bpfilter/sockopt.c
@@ -1,28 +1,54 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/bpfilter.h>
#include <uapi/linux/bpf.h>
#include <linux/wait.h>
#include <linux/kmod.h>
+#include <linux/fs.h>
+#include <linux/file.h>
-int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
- char __user *optval,
- unsigned int optlen, bool is_set);
-EXPORT_SYMBOL_GPL(bpfilter_process_sockopt);
+struct bpfilter_umh_ops bpfilter_ops;
+EXPORT_SYMBOL_GPL(bpfilter_ops);
+
+static void bpfilter_umh_cleanup(struct umh_info *info)
+{
+ mutex_lock(&bpfilter_ops.lock);
+ bpfilter_ops.stop = true;
+ fput(info->pipe_to_umh);
+ fput(info->pipe_from_umh);
+ info->pid = 0;
+ mutex_unlock(&bpfilter_ops.lock);
+}
static int bpfilter_mbox_request(struct sock *sk, int optname,
char __user *optval,
unsigned int optlen, bool is_set)
{
- if (!bpfilter_process_sockopt) {
- int err = request_module("bpfilter");
+ int err;
+ mutex_lock(&bpfilter_ops.lock);
+ if (!bpfilter_ops.sockopt) {
+ mutex_unlock(&bpfilter_ops.lock);
+ err = request_module("bpfilter");
+ mutex_lock(&bpfilter_ops.lock);
if (err)
- return err;
- if (!bpfilter_process_sockopt)
- return -ECHILD;
+ goto out;
+ if (!bpfilter_ops.sockopt) {
+ err = -ECHILD;
+ goto out;
+ }
+ }
+ if (bpfilter_ops.stop) {
+ err = bpfilter_ops.start();
+ if (err)
+ goto out;
}
- return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set);
+ err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set);
+out:
+ mutex_unlock(&bpfilter_ops.lock);
+ return err;
}
int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
@@ -41,3 +67,15 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
return bpfilter_mbox_request(sk, optname, optval, len, false);
}
+
+static int __init bpfilter_sockopt_init(void)
+{
+ mutex_init(&bpfilter_ops.lock);
+ bpfilter_ops.stop = true;
+ bpfilter_ops.info.cmdline = "bpfilter_umh";
+ bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup;
+
+ return 0;
+}
+
+module_init(bpfilter_sockopt_init);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 777fa3b7fb13..f0165c5f376b 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
case CIPSO_V4_MAP_PASS:
return 0;
case CIPSO_V4_MAP_TRANS:
- if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
+ if ((level < doi_def->map.std->lvl.cipso_size) &&
+ (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
return 0;
break;
}
@@ -1735,13 +1736,26 @@ validate_return:
*/
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
+ unsigned char optbuf[sizeof(struct ip_options) + 40];
+ struct ip_options *opt = (struct ip_options *)optbuf;
+
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
+ /*
+ * We might be called above the IP layer,
+ * so we can not use icmp_send and IPCB here.
+ */
+
+ memset(opt, 0, sizeof(struct ip_options));
+ opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+ if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+ return;
+
if (gateway)
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
else
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
}
/**
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 04ba321ae5ce..eb514f312e6f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1826,7 +1826,7 @@ put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
- return err < 0 ? err : skb->len;
+ return skb->len ? : err;
}
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
@@ -2063,13 +2063,49 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
};
+static int inet_netconf_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv4_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv4_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETCONFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETCONFA_IFINDEX:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX+1];
- struct netconfmsg *ncm;
struct sk_buff *skb;
struct ipv4_devconf *devconf;
struct in_device *in_dev;
@@ -2077,9 +2113,8 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
- devconf_ipv4_policy, extack);
- if (err < 0)
+ err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
+ if (err)
goto errout;
err = -EINVAL;
@@ -2556,32 +2591,34 @@ static __net_init int devinet_init_net(struct net *net)
int err;
struct ipv4_devconf *all, *dflt;
#ifdef CONFIG_SYSCTL
- struct ctl_table *tbl = ctl_forward_entry;
+ struct ctl_table *tbl;
struct ctl_table_header *forw_hdr;
#endif
err = -ENOMEM;
- all = &ipv4_devconf;
- dflt = &ipv4_devconf_dflt;
+ all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
+ if (!all)
+ goto err_alloc_all;
- if (!net_eq(net, &init_net)) {
- all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
- if (!all)
- goto err_alloc_all;
-
- dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
- if (!dflt)
- goto err_alloc_dflt;
+ dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
+ if (!dflt)
+ goto err_alloc_dflt;
#ifdef CONFIG_SYSCTL
- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
- if (!tbl)
- goto err_alloc_ctl;
+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
+ if (!tbl)
+ goto err_alloc_ctl;
- tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
- tbl[0].extra1 = all;
- tbl[0].extra2 = net;
+ tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
+ tbl[0].extra1 = all;
+ tbl[0].extra2 = net;
#endif
+
+ if ((!IS_ENABLED(CONFIG_SYSCTL) ||
+ sysctl_devconf_inherit_init_net != 2) &&
+ !net_eq(net, &init_net)) {
+ memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf));
+ memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt));
}
#ifdef CONFIG_SYSCTL
@@ -2611,15 +2648,12 @@ err_reg_ctl:
err_reg_dflt:
__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
err_reg_all:
- if (tbl != ctl_forward_entry)
- kfree(tbl);
+ kfree(tbl);
err_alloc_ctl:
#endif
- if (dflt != &ipv4_devconf_dflt)
- kfree(dflt);
+ kfree(dflt);
err_alloc_dflt:
- if (all != &ipv4_devconf)
- kfree(all);
+ kfree(all);
err_alloc_all:
return err;
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 5459f41fc26f..10e809b296ec 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
skb->len += tailen;
skb->data_len += tailen;
skb->truesize += tailen;
- if (sk)
+ if (sk && sk_fullsock(sk))
refcount_add(tailen, &sk->sk_wmem_alloc);
goto out;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6df95be96311..ed14ec245584 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
- flushed += fib_table_flush(net, tb);
+ flushed += fib_table_flush(net, tb, false);
}
if (flushed)
@@ -710,6 +710,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
case RTA_GATEWAY:
cfg->fc_gw = nla_get_be32(attr);
break;
+ case RTA_VIA:
+ NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
+ err = -EINVAL;
+ goto errout;
case RTA_PRIORITY:
cfg->fc_priority = nla_get_u32(attr);
break;
@@ -1463,7 +1467,7 @@ static void ip_fib_net_exit(struct net *net)
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
- fib_table_flush(net, tb);
+ fib_table_flush(net, tb, true);
fib_free_table(tb);
}
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5022bc63863a..8e185b5a2bf6 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1072,7 +1072,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
goto failure;
}
- fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
+ fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
if (!fi)
goto failure;
fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 237c9f72b265..a573e37e0615 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
}
/* Caller must hold RTNL. */
-int fib_table_flush(struct net *net, struct fib_table *tb)
+int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
- if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
- tb->tb_id != fa->tb_id) {
+ if (!fi || tb->tb_id != fa->tb_id ||
+ (!(fi->fib_flags & RTNH_F_DEAD) &&
+ !fib_props[fa->fa_type].error)) {
+ slen = fa->fa_slen;
+ continue;
+ }
+
+ /* Do not flush error routes if network namespace is
+ * not being dismantled
+ */
+ if (!flush_all && fib_props[fa->fa_type].error) {
slen = fa->fa_slen;
continue;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 0c9f171fb085..437070d1ffb1 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info)
{
int transport_offset = skb_transport_offset(skb);
struct guehdr *guehdr;
- size_t optlen;
+ size_t len, optlen;
int ret;
- if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ if (!pskb_may_pull(skb, len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info)
optlen = guehdr->hlen << 2;
+ if (!pskb_may_pull(skb, len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
if (validate_gue_flags(guehdr, optlen))
return -EINVAL;
@@ -1065,7 +1070,8 @@ static int gue_err(struct sk_buff *skb, u32 info)
* recursion. Besides, this kind of encapsulation can't even be
* configured currently. Discard this.
*/
- if (guehdr->proto_ctype == IPPROTO_UDP)
+ if (guehdr->proto_ctype == IPPROTO_UDP ||
+ guehdr->proto_ctype == IPPROTO_UDPLITE)
return -EOPNOTSUPP;
skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index a4bf22ee3aed..7c4a41dc04bb 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
#include <net/gre.h>
+#include <net/erspan.h>
#include <net/icmp.h>
#include <net/route.h>
@@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
hdr_len += 4;
}
tpi->hdr_len = hdr_len;
+
+ /* ERSPAN ver 1 and 2 protocol sets GRE key field
+ * to 0 and sets the configured key in the
+ * inner erspan header field
+ */
+ if (greh->protocol == htons(ETH_P_ERSPAN) ||
+ greh->protocol == htons(ETH_P_ERSPAN2)) {
+ struct erspan_base_hdr *ershdr;
+
+ if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
+ return -EINVAL;
+
+ ershdr = (struct erspan_base_hdr *)options;
+ tpi->key = cpu_to_be32(get_session_id(ershdr));
+ }
+
return hdr_len;
}
EXPORT_SYMBOL(gre_parse_header);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 065997f414e6..f3a5893b1e86 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -570,7 +570,8 @@ relookup_failed:
* MUST reply to only the first fragment.
*/
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt)
{
struct iphdr *iph;
int room;
@@ -691,7 +692,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
- if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
+ if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
goto out_unlock;
@@ -742,7 +743,7 @@ out_bh_enable:
local_bh_enable();
out:;
}
-EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(__icmp_send);
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
@@ -1245,9 +1246,7 @@ static int __net_init icmp_sk_init(struct net *net)
return 0;
fail:
- for_each_possible_cpu(i)
- inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
- free_percpu(net->ipv4.icmp_sk);
+ icmp_sk_exit(net);
return err;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 765b2b32c4a4..6c2febc39dca 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -159,7 +159,8 @@ static int unsolicited_report_interval(struct in_device *in_dev)
return interval_jiffies;
}
-static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
+static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
+ gfp_t gfp);
static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_clear_delrec(struct in_device *in_dev);
static int sf_setstate(struct ip_mc_list *pmc);
@@ -1145,7 +1146,8 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
/*
* deleted ip_mc_list manipulation
*/
-static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
+static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
+ gfp_t gfp)
{
struct ip_mc_list *pmc;
struct net *net = dev_net(in_dev->dev);
@@ -1156,7 +1158,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
* for deleted items allows change reports to use common code with
* non-deleted or query-response MCA's.
*/
- pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
+ pmc = kzalloc(sizeof(*pmc), gfp);
if (!pmc)
return;
spin_lock_init(&pmc->lock);
@@ -1261,7 +1263,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
}
#endif
-static void igmp_group_dropped(struct ip_mc_list *im)
+static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp)
{
struct in_device *in_dev = im->interface;
#ifdef CONFIG_IP_MULTICAST
@@ -1292,13 +1294,18 @@ static void igmp_group_dropped(struct ip_mc_list *im)
return;
}
/* IGMPv3 */
- igmpv3_add_delrec(in_dev, im);
+ igmpv3_add_delrec(in_dev, im, gfp);
igmp_ifc_event(in_dev);
}
#endif
}
+static void igmp_group_dropped(struct ip_mc_list *im)
+{
+ __igmp_group_dropped(im, GFP_KERNEL);
+}
+
static void igmp_group_added(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
@@ -1400,8 +1407,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
/*
* A socket has joined a multicast group on device dev.
*/
-static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
- unsigned int mode)
+static void ____ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
+ unsigned int mode, gfp_t gfp)
{
struct ip_mc_list *im;
@@ -1415,7 +1422,7 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
}
}
- im = kzalloc(sizeof(*im), GFP_KERNEL);
+ im = kzalloc(sizeof(*im), gfp);
if (!im)
goto out;
@@ -1448,6 +1455,12 @@ out:
return;
}
+void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
+{
+ ____ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE, gfp);
+}
+EXPORT_SYMBOL(__ip_mc_inc_group);
+
void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
{
__ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
@@ -1493,22 +1506,22 @@ static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
len += sizeof(struct igmpv3_report);
- return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+ return ip_mc_may_pull(skb, len) ? 0 : -EINVAL;
}
static int ip_mc_check_igmp_query(struct sk_buff *skb)
{
- unsigned int len = skb_transport_offset(skb);
-
- len += sizeof(struct igmphdr);
- if (skb->len < len)
- return -EINVAL;
+ unsigned int transport_len = ip_transport_len(skb);
+ unsigned int len;
/* IGMPv{1,2}? */
- if (skb->len != len) {
+ if (transport_len != sizeof(struct igmphdr)) {
/* or IGMPv3? */
- len += sizeof(struct igmpv3_query) - sizeof(struct igmphdr);
- if (skb->len < len || !pskb_may_pull(skb, len))
+ if (transport_len < sizeof(struct igmpv3_query))
+ return -EINVAL;
+
+ len = skb_transport_offset(skb) + sizeof(struct igmpv3_query);
+ if (!ip_mc_may_pull(skb, len))
return -EINVAL;
}
@@ -1528,7 +1541,6 @@ static int ip_mc_check_igmp_msg(struct sk_buff *skb)
case IGMP_HOST_LEAVE_MESSAGE:
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
- /* fall through */
return 0;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
return ip_mc_check_igmp_reportv3(skb);
@@ -1544,47 +1556,29 @@ static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
return skb_checksum_simple_validate(skb);
}
-static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
-
+static int ip_mc_check_igmp_csum(struct sk_buff *skb)
{
- struct sk_buff *skb_chk;
- unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
- int ret = -EINVAL;
+ unsigned int transport_len = ip_transport_len(skb);
+ struct sk_buff *skb_chk;
- transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
+ if (!ip_mc_may_pull(skb, len))
+ return -EINVAL;
skb_chk = skb_checksum_trimmed(skb, transport_len,
ip_mc_validate_checksum);
if (!skb_chk)
- goto err;
-
- if (!pskb_may_pull(skb_chk, len))
- goto err;
-
- ret = ip_mc_check_igmp_msg(skb_chk);
- if (ret)
- goto err;
-
- if (skb_trimmed)
- *skb_trimmed = skb_chk;
- /* free now unneeded clone */
- else if (skb_chk != skb)
- kfree_skb(skb_chk);
-
- ret = 0;
+ return -EINVAL;
-err:
- if (ret && skb_chk && skb_chk != skb)
+ if (skb_chk != skb)
kfree_skb(skb_chk);
- return ret;
+ return 0;
}
/**
* ip_mc_check_igmp - checks whether this is a sane IGMP packet
* @skb: the skb to validate
- * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
*
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
* skb transport header accordingly and returns zero.
@@ -1594,18 +1588,10 @@ err:
* -ENOMSG: IP header validation succeeded but it is not an IGMP packet.
* -ENOMEM: A memory allocation failure happened.
*
- * Optionally, an skb pointer might be provided via skb_trimmed (or set it
- * to NULL): After parsing an IGMP packet successfully it will point to
- * an skb which has its tail aligned to the IP packet end. This might
- * either be the originally provided skb or a trimmed, cloned version if
- * the skb frame had data beyond the IP packet. A cloned skb allows us
- * to leave the original skb and its full frame unchanged (which might be
- * desirable for layer 2 frame jugglers).
- *
* Caller needs to set the skb network header and free any returned skb if it
* differs from the provided skb.
*/
-int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+int ip_mc_check_igmp(struct sk_buff *skb)
{
int ret = ip_mc_check_iphdr(skb);
@@ -1615,7 +1601,11 @@ int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
return -ENOMSG;
- return __ip_mc_check_igmp(skb, skb_trimmed);
+ ret = ip_mc_check_igmp_csum(skb);
+ if (ret < 0)
+ return ret;
+
+ return ip_mc_check_igmp_msg(skb);
}
EXPORT_SYMBOL(ip_mc_check_igmp);
@@ -1656,7 +1646,7 @@ static void ip_mc_rejoin_groups(struct in_device *in_dev)
* A socket has left a multicast group on device dev
*/
-void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
+void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
{
struct ip_mc_list *i;
struct ip_mc_list __rcu **ip;
@@ -1671,7 +1661,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
ip_mc_hash_remove(in_dev, i);
*ip = i->next_rcu;
in_dev->mc_count--;
- igmp_group_dropped(i);
+ __igmp_group_dropped(i, gfp);
ip_mc_clear_src(i);
if (!in_dev->dead)
@@ -1684,7 +1674,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
}
}
}
-EXPORT_SYMBOL(ip_mc_dec_group);
+EXPORT_SYMBOL(__ip_mc_dec_group);
/* Device changing type */
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1a4e9ff02762..5731670c560b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+ nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto errout;
}
- if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
+ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+ ext & (1 << (INET_DIAG_TCLASS - 1))) {
u32 classid = 0;
#ifdef CONFIG_SOCK_CGROUP_DATA
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
#endif
+ /* Fallback to socket priority if class id isn't set.
+ * Classful qdiscs use it as direct reference to class.
+ * For cgroup2 classid is always zero.
+ */
+ if (!classid)
+ classid = sk->sk_priority;
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
goto errout;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 760a9e52e02b..737808e27f8b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,6 +25,62 @@
#include <net/sock.h>
#include <net/inet_frag.h>
#include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ };
+ struct sk_buff *next_frag;
+ int frag_run_len;
+};
+
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void fragcb_clear(struct sk_buff *skb)
+{
+ RB_CLEAR_NODE(&skb->rbnode);
+ FRAG_CB(skb)->next_frag = NULL;
+ FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void fragrun_append_to_last(struct inet_frag_queue *q,
+ struct sk_buff *skb)
+{
+ fragcb_clear(skb);
+
+ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+ FRAG_CB(q->fragments_tail)->next_frag = skb;
+ q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+ fragcb_clear(skb);
+
+ if (q->last_run_head)
+ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+ &q->last_run_head->rbnode.rb_right);
+ else
+ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+ q->fragments_tail = skb;
+ q->last_run_head = skb;
+}
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
* Value : 0xff if frame should be dropped.
@@ -123,9 +179,30 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
kmem_cache_free(f->frags_cachep, q);
}
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ while (skb) {
+ struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+ sum += skb->truesize;
+ kfree_skb(skb);
+ skb = next;
+ }
+ }
+ return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
void inet_frag_destroy(struct inet_frag_queue *q)
{
- struct sk_buff *fp;
struct netns_frags *nf;
unsigned int sum, sum_truesize = 0;
struct inet_frags *f;
@@ -134,20 +211,9 @@ void inet_frag_destroy(struct inet_frag_queue *q)
WARN_ON(del_timer(&q->timer) != 0);
/* Release all fragment data. */
- fp = q->fragments;
nf = q->net;
f = nf->f;
- if (fp) {
- do {
- struct sk_buff *xp = fp->next;
-
- sum_truesize += fp->truesize;
- kfree_skb(fp);
- fp = xp;
- } while (fp);
- } else {
- sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
- }
+ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
sum = sum_truesize + f->qsize;
call_rcu(&q->rcu, inet_frag_destroy_rcu);
@@ -224,3 +290,212 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
return fq;
}
EXPORT_SYMBOL(inet_frag_find);
+
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end)
+{
+ struct sk_buff *last = q->fragments_tail;
+
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+ * Duplicates, however, should be ignored (i.e. skb dropped, but the
+ * queue/fragments kept for later reassembly).
+ */
+ if (!last)
+ fragrun_create(q, skb); /* First fragment. */
+ else if (last->ip_defrag_offset + last->len < end) {
+ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+ if (offset < last->ip_defrag_offset + last->len)
+ return IPFRAG_OVERLAP;
+ if (offset == last->ip_defrag_offset + last->len)
+ fragrun_append_to_last(q, skb);
+ else
+ fragrun_create(q, skb);
+ } else {
+ /* Binary search. Note that skb can become the first fragment,
+ * but not the last (covered above).
+ */
+ struct rb_node **rbn, *parent;
+
+ rbn = &q->rb_fragments.rb_node;
+ do {
+ struct sk_buff *curr;
+ int curr_run_end;
+
+ parent = *rbn;
+ curr = rb_to_skb(parent);
+ curr_run_end = curr->ip_defrag_offset +
+ FRAG_CB(curr)->frag_run_len;
+ if (end <= curr->ip_defrag_offset)
+ rbn = &parent->rb_left;
+ else if (offset >= curr_run_end)
+ rbn = &parent->rb_right;
+ else if (offset >= curr->ip_defrag_offset &&
+ end <= curr_run_end)
+ return IPFRAG_DUP;
+ else
+ return IPFRAG_OVERLAP;
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+ * one of its NULL left/right children. Insert skb.
+ */
+ fragcb_clear(skb);
+ rb_link_node(&skb->rbnode, parent, rbn);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+ }
+
+ skb->ip_defrag_offset = offset;
+
+ return IPFRAG_OK;
+}
+EXPORT_SYMBOL(inet_frag_queue_insert);
+
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent)
+{
+ struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+ struct sk_buff **nextp;
+ int delta;
+
+ if (head != skb) {
+ fp = skb_clone(skb, GFP_ATOMIC);
+ if (!fp)
+ return NULL;
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ if (RB_EMPTY_NODE(&skb->rbnode))
+ FRAG_CB(parent)->next_frag = fp;
+ else
+ rb_replace_node(&skb->rbnode, &fp->rbnode,
+ &q->rb_fragments);
+ if (q->fragments_tail == skb)
+ q->fragments_tail = fp;
+ skb_morph(skb, head);
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &q->rb_fragments);
+ consume_skb(head);
+ head = skb;
+ }
+ WARN_ON(head->ip_defrag_offset != 0);
+
+ delta = -head->truesize;
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ return NULL;
+
+ delta += head->truesize;
+ if (delta)
+ add_frag_mem_limit(q->net, delta);
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments.
+ */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ return NULL;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->data_len = head->data_len - plen;
+ clone->len = clone->data_len;
+ head->truesize += clone->truesize;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(q->net, clone->truesize);
+ skb_shinfo(head)->frag_list = clone;
+ nextp = &clone->next;
+ } else {
+ nextp = &skb_shinfo(head)->frag_list;
+ }
+
+ return nextp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_prepare);
+
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data)
+{
+ struct sk_buff **nextp = (struct sk_buff **)reasm_data;
+ struct rb_node *rbn;
+ struct sk_buff *fp;
+
+ skb_push(head, head->data - skb_network_header(head));
+
+ /* Traverse the tree in order, to build frag_list. */
+ fp = FRAG_CB(head)->next_frag;
+ rbn = rb_next(&head->rbnode);
+ rb_erase(&head->rbnode, &q->rb_fragments);
+ while (rbn || fp) {
+ /* fp points to the next sk_buff in the current run;
+ * rbn points to the next run.
+ */
+ /* Go through the current run. */
+ while (fp) {
+ *nextp = fp;
+ nextp = &fp->next;
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ fp = FRAG_CB(fp)->next_frag;
+ }
+ /* Move to the next run. */
+ if (rbn) {
+ struct rb_node *rbnext = rb_next(rbn);
+
+ fp = rb_to_skb(rbn);
+ rb_erase(rbn, &q->rb_fragments);
+ rbn = rbnext;
+ }
+ }
+ sub_frag_mem_limit(q->net, head->truesize);
+
+ *nextp = NULL;
+ skb_mark_not_on_list(head);
+ head->prev = NULL;
+ head->tstamp = q->stamp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_finish);
+
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
+{
+ struct sk_buff *head, *skb;
+
+ head = skb_rb_first(&q->rb_fragments);
+ if (!head)
+ return NULL;
+ skb = FRAG_CB(head)->next_frag;
+ if (skb)
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &q->rb_fragments);
+ else
+ rb_erase(&head->rbnode, &q->rb_fragments);
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
+ barrier();
+
+ if (head == q->fragments_tail)
+ q->fragments_tail = NULL;
+
+ sub_frag_mem_limit(q->net, head->truesize);
+
+ return head;
+}
+EXPORT_SYMBOL(inet_frag_pull_head);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d757b9642d0d..be778599bfed 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
+ p->n_redirects = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
* calculation of tokens is at its maximum.
*/
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 867be8f7f1fa..cf2b0a6a3337 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -57,57 +57,6 @@
*/
static const char ip_frag_cache_name[] = "ip4-frags";
-/* Use skb->cb to track consecutive/adjacent fragments coming at
- * the end of the queue. Nodes in the rb-tree queue will
- * contain "runs" of one or more adjacent fragments.
- *
- * Invariants:
- * - next_frag is NULL at the tail of a "run";
- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
- */
-struct ipfrag_skb_cb {
- struct inet_skb_parm h;
- struct sk_buff *next_frag;
- int frag_run_len;
-};
-
-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
-
-static void ip4_frag_init_run(struct sk_buff *skb)
-{
- BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
-
- FRAG_CB(skb)->next_frag = NULL;
- FRAG_CB(skb)->frag_run_len = skb->len;
-}
-
-/* Append skb to the last "run". */
-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
- struct sk_buff *skb)
-{
- RB_CLEAR_NODE(&skb->rbnode);
- FRAG_CB(skb)->next_frag = NULL;
-
- FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
- FRAG_CB(q->fragments_tail)->next_frag = skb;
- q->fragments_tail = skb;
-}
-
-/* Create a new "run" with the skb. */
-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
-{
- if (q->last_run_head)
- rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
- &q->last_run_head->rbnode.rb_right);
- else
- rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
- rb_insert_color(&skb->rbnode, &q->rb_fragments);
-
- ip4_frag_init_run(skb);
- q->fragments_tail = skb;
- q->last_run_head = skb;
-}
-
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
struct inet_frag_queue q;
@@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
* pull the head out of the tree in order to be able to
* deal with head->dev.
*/
- if (qp->q.fragments) {
- head = qp->q.fragments;
- qp->q.fragments = head->next;
- } else {
- head = skb_rb_first(&qp->q.rb_fragments);
- if (!head)
- goto out;
- if (FRAG_CB(head)->next_frag)
- rb_replace_node(&head->rbnode,
- &FRAG_CB(head)->next_frag->rbnode,
- &qp->q.rb_fragments);
- else
- rb_erase(&head->rbnode, &qp->q.rb_fragments);
- memset(&head->rbnode, 0, sizeof(head->rbnode));
- barrier();
- }
- if (head == qp->q.fragments_tail)
- qp->q.fragments_tail = NULL;
-
- sub_frag_mem_limit(qp->q.net, head->truesize);
-
+ head = inet_frag_pull_head(&qp->q);
+ if (!head)
+ goto out;
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out;
@@ -330,7 +261,6 @@ static int ip_frag_reinit(struct ipq *qp)
qp->q.flags = 0;
qp->q.len = 0;
qp->q.meat = 0;
- qp->q.fragments = NULL;
qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
qp->q.last_run_head = NULL;
@@ -344,12 +274,10 @@ static int ip_frag_reinit(struct ipq *qp)
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
- struct rb_node **rbn, *parent;
- struct sk_buff *skb1, *prev_tail;
- int ihl, end, skb1_run_end;
+ int ihl, end, flags, offset;
+ struct sk_buff *prev_tail;
struct net_device *dev;
unsigned int fragsize;
- int flags, offset;
int err = -ENOENT;
u8 ecn;
@@ -413,62 +341,13 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
/* Makes sure compiler wont do silly aliasing games */
barrier();
- /* RFC5722, Section 4, amended by Errata ID : 3089
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments) MUST be silently discarded.
- *
- * We do the same here for IPv4 (and increment an snmp counter) but
- * we do not want to drop the whole queue in response to a duplicate
- * fragment.
- */
-
- err = -EINVAL;
- /* Find out where to put this fragment. */
prev_tail = qp->q.fragments_tail;
- if (!prev_tail)
- ip4_frag_create_run(&qp->q, skb); /* First fragment. */
- else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
- /* This is the common case: skb goes to the end. */
- /* Detect and discard overlaps. */
- if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
- goto overlap;
- if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
- ip4_frag_append_to_last_run(&qp->q, skb);
- else
- ip4_frag_create_run(&qp->q, skb);
- } else {
- /* Binary search. Note that skb can become the first fragment,
- * but not the last (covered above).
- */
- rbn = &qp->q.rb_fragments.rb_node;
- do {
- parent = *rbn;
- skb1 = rb_to_skb(parent);
- skb1_run_end = skb1->ip_defrag_offset +
- FRAG_CB(skb1)->frag_run_len;
- if (end <= skb1->ip_defrag_offset)
- rbn = &parent->rb_left;
- else if (offset >= skb1_run_end)
- rbn = &parent->rb_right;
- else if (offset >= skb1->ip_defrag_offset &&
- end <= skb1_run_end)
- goto err; /* No new data, potential duplicate */
- else
- goto overlap; /* Found an overlap */
- } while (*rbn);
- /* Here we have parent properly set, and rbn pointing to
- * one of its NULL left/right children. Insert skb.
- */
- ip4_frag_init_run(skb);
- rb_link_node(&skb->rbnode, parent, rbn);
- rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
- }
+ err = inet_frag_queue_insert(&qp->q, skb, offset, end);
+ if (err)
+ goto insert_error;
if (dev)
qp->iif = dev->ifindex;
- skb->ip_defrag_offset = offset;
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
@@ -501,10 +380,16 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
skb_dst_drop(skb);
return -EINPROGRESS;
-overlap:
+insert_error:
+ if (err == IPFRAG_DUP) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ err = -EINVAL;
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
discard_qp:
inet_frag_kill(&qp->q);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
err:
kfree_skb(skb);
return err;
@@ -516,13 +401,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
- struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
- struct sk_buff **nextp; /* To build frag_list. */
- struct rb_node *rbn;
- int len;
- int ihlen;
- int delta;
- int err;
+ void *reasm_data;
+ int len, err;
u8 ecn;
ipq_kill(qp);
@@ -532,117 +412,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
err = -EINVAL;
goto out_fail;
}
- /* Make the one we just received the head. */
- if (head != skb) {
- fp = skb_clone(skb, GFP_ATOMIC);
- if (!fp)
- goto out_nomem;
- FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
- if (RB_EMPTY_NODE(&skb->rbnode))
- FRAG_CB(prev_tail)->next_frag = fp;
- else
- rb_replace_node(&skb->rbnode, &fp->rbnode,
- &qp->q.rb_fragments);
- if (qp->q.fragments_tail == skb)
- qp->q.fragments_tail = fp;
- skb_morph(skb, head);
- FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
- rb_replace_node(&head->rbnode, &skb->rbnode,
- &qp->q.rb_fragments);
- consume_skb(head);
- head = skb;
- }
- WARN_ON(head->ip_defrag_offset != 0);
-
- /* Allocate a new buffer for the datagram. */
- ihlen = ip_hdrlen(head);
- len = ihlen + qp->q.len;
+ /* Make the one we just received the head. */
+ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
+ if (!reasm_data)
+ goto out_nomem;
+ len = ip_hdrlen(skb) + qp->q.len;
err = -E2BIG;
if (len > 65535)
goto out_oversize;
- delta = - head->truesize;
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- goto out_nomem;
-
- delta += head->truesize;
- if (delta)
- add_frag_mem_limit(qp->q.net, delta);
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (!clone)
- goto out_nomem;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->truesize += clone->truesize;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(qp->q.net, clone->truesize);
- skb_shinfo(head)->frag_list = clone;
- nextp = &clone->next;
- } else {
- nextp = &skb_shinfo(head)->frag_list;
- }
-
- skb_push(head, head->data - skb_network_header(head));
+ inet_frag_reasm_finish(&qp->q, skb, reasm_data);
- /* Traverse the tree in order, to build frag_list. */
- fp = FRAG_CB(head)->next_frag;
- rbn = rb_next(&head->rbnode);
- rb_erase(&head->rbnode, &qp->q.rb_fragments);
- while (rbn || fp) {
- /* fp points to the next sk_buff in the current run;
- * rbn points to the next run.
- */
- /* Go through the current run. */
- while (fp) {
- *nextp = fp;
- nextp = &fp->next;
- fp->prev = NULL;
- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
- fp->sk = NULL;
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp = FRAG_CB(fp)->next_frag;
- }
- /* Move to the next run. */
- if (rbn) {
- struct rb_node *rbnext = rb_next(rbn);
-
- fp = rb_to_skb(rbn);
- rb_erase(rbn, &qp->q.rb_fragments);
- rbn = rbnext;
- }
- }
- sub_frag_mem_limit(qp->q.net, head->truesize);
-
- *nextp = NULL;
- skb_mark_not_on_list(head);
- head->prev = NULL;
- head->dev = dev;
- head->tstamp = qp->q.stamp;
- IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
+ skb->dev = dev;
+ IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
- iph = ip_hdr(head);
+ iph = ip_hdr(skb);
iph->tot_len = htons(len);
iph->tos |= ecn;
@@ -655,7 +441,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
* from one very small df-fragment and one large non-df frag.
*/
if (qp->max_df_size == qp->q.max_size) {
- IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+ IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
iph->frag_off = htons(IP_DF);
} else {
iph->frag_off = 0;
@@ -664,7 +450,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
ip_send_check(iph);
__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
- qp->q.fragments = NULL;
qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
qp->q.last_run_head = NULL;
@@ -753,28 +538,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
}
EXPORT_SYMBOL(ip_check_defrag);
-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
-{
- struct rb_node *p = rb_first(root);
- unsigned int sum = 0;
-
- while (p) {
- struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
-
- p = rb_next(p);
- rb_erase(&skb->rbnode, root);
- while (skb) {
- struct sk_buff *next = FRAG_CB(skb)->next_frag;
-
- sum += skb->truesize;
- kfree_skb(skb);
- skb = next;
- }
- }
- return sum;
-}
-EXPORT_SYMBOL(inet_frag_rbtree_purge);
-
#ifdef CONFIG_SYSCTL
static int dist_min;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d1d09f3e5f9e..fd219f7bd3ea 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int len;
itn = net_generic(net, erspan_net_id);
- len = gre_hdr_len + sizeof(*ershdr);
-
- /* Check based hdr len */
- if (unlikely(!pskb_may_pull(skb, len)))
- return PACKET_REJECT;
iph = ip_hdr(skb);
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
ver = ershdr->ver;
- /* The original GRE header does not have key field,
- * Use ERSPAN 10-bit session ID as key.
- */
- tpi->key = cpu_to_be32(get_session_id(ershdr));
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_KEY,
iph->saddr, iph->daddr, tpi->key);
@@ -458,81 +449,14 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}
-static struct rtable *gre_get_rt(struct sk_buff *skb,
- struct net_device *dev,
- struct flowi4 *fl,
- const struct ip_tunnel_key *key)
-{
- struct net *net = dev_net(dev);
-
- memset(fl, 0, sizeof(*fl));
- fl->daddr = key->u.ipv4.dst;
- fl->saddr = key->u.ipv4.src;
- fl->flowi4_tos = RT_TOS(key->tos);
- fl->flowi4_mark = skb->mark;
- fl->flowi4_proto = IPPROTO_GRE;
-
- return ip_route_output_key(net, fl);
-}
-
-static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
- struct net_device *dev,
- struct flowi4 *fl,
- int tunnel_hlen)
-{
- struct ip_tunnel_info *tun_info;
- const struct ip_tunnel_key *key;
- struct rtable *rt = NULL;
- int min_headroom;
- bool use_cache;
- int err;
-
- tun_info = skb_tunnel_info(skb);
- key = &tun_info->key;
- use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
-
- if (use_cache)
- rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
- if (!rt) {
- rt = gre_get_rt(skb, dev, fl, key);
- if (IS_ERR(rt))
- goto err_free_skb;
- if (use_cache)
- dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
- fl->saddr);
- }
-
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + tunnel_hlen + sizeof(struct iphdr);
- if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
- int head_delta = SKB_DATA_ALIGN(min_headroom -
- skb_headroom(skb) +
- 16);
- err = pskb_expand_head(skb, max_t(int, head_delta, 0),
- 0, GFP_ATOMIC);
- if (unlikely(err))
- goto err_free_rt;
- }
- return rt;
-
-err_free_rt:
- ip_rt_put(rt);
-err_free_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NULL;
-}
-
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
- struct rtable *rt = NULL;
- struct flowi4 fl;
int tunnel_hlen;
- __be16 df, flags;
+ __be16 flags;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@@ -542,13 +466,12 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
key = &tun_info->key;
tunnel_hlen = gre_calc_hlen(key->tun_flags);
- rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
- if (!rt)
- return;
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto err_free_skb;
/* Push Tunnel header. */
if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
- goto err_free_rt;
+ goto err_free_skb;
flags = tun_info->key.tun_flags &
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
@@ -556,32 +479,25 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel_id_to_key32(tun_info->key.tun_id),
(flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
- iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
- key->tos, key->ttl, df, false);
return;
-err_free_rt:
- ip_rt_put(rt);
err_free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
}
-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
- __be16 proto)
+static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
struct erspan_metadata *md;
- struct rtable *rt = NULL;
bool truncate = false;
- struct flowi4 fl;
+ __be16 proto;
int tunnel_hlen;
int version;
- __be16 df;
int nhoff;
int thoff;
@@ -592,21 +508,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
key = &tun_info->key;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
- goto err_free_rt;
+ goto err_free_skb;
md = ip_tunnel_info_opts(tun_info);
if (!md)
- goto err_free_rt;
+ goto err_free_skb;
/* ERSPAN has fixed 8 byte GRE header */
version = md->version;
tunnel_hlen = 8 + erspan_hdr_len(version);
- rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
- if (!rt)
- return;
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto err_free_skb;
if (gre_handle_offloads(skb, false))
- goto err_free_rt;
+ goto err_free_skb;
if (skb->len > dev->mtu + dev->hard_header_len) {
pskb_trim(skb, dev->mtu + dev->hard_header_len);
@@ -626,27 +541,25 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
if (version == 1) {
erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
ntohl(md->u.index), truncate, true);
+ proto = htons(ETH_P_ERSPAN);
} else if (version == 2) {
erspan_build_header_v2(skb,
ntohl(tunnel_id_to_key32(key->tun_id)),
md->u.md2.dir,
get_hwid(&md->u.md2),
truncate, true);
+ proto = htons(ETH_P_ERSPAN2);
} else {
- goto err_free_rt;
+ goto err_free_skb;
}
gre_build_header(skb, 8, TUNNEL_SEQ,
- htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
+ proto, 0, htonl(tunnel->o_seqno++));
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
- iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
- key->tos, key->ttl, df, false);
return;
-err_free_rt:
- ip_rt_put(rt);
err_free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
@@ -655,13 +568,18 @@ err_free_skb:
static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ const struct ip_tunnel_key *key;
struct rtable *rt;
struct flowi4 fl4;
if (ip_tunnel_info_af(info) != AF_INET)
return -EINVAL;
- rt = gre_get_rt(skb, dev, &fl4, &info->key);
+ key = &info->key;
+ ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
+ tunnel_id_to_key32(key->tun_id), key->tos, 0,
+ skb->mark, skb_get_hash(skb));
+ rt = ip_route_output_key(dev_net(dev), &fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -721,12 +639,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
{
struct ip_tunnel *tunnel = netdev_priv(dev);
bool truncate = false;
+ __be16 proto;
if (!pskb_inet_may_pull(skb))
goto free_skb;
if (tunnel->collect_md) {
- erspan_fb_xmit(skb, dev, skb->protocol);
+ erspan_fb_xmit(skb, dev);
return NETDEV_TX_OK;
}
@@ -742,19 +661,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
}
/* Push ERSPAN header */
- if (tunnel->erspan_ver == 1)
+ if (tunnel->erspan_ver == 1) {
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
truncate, true);
- else if (tunnel->erspan_ver == 2)
+ proto = htons(ETH_P_ERSPAN);
+ } else if (tunnel->erspan_ver == 2) {
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
tunnel->dir, tunnel->hwid,
truncate, true);
- else
+ proto = htons(ETH_P_ERSPAN2);
+ } else {
goto free_skb;
+ }
tunnel->parms.o_flags &= ~TUNNEL_KEY;
- __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
+ __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
return NETDEV_TX_OK;
free_skb:
@@ -1459,12 +1381,31 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
+ __be16 o_flags = p->o_flags;
+
+ if (t->erspan_ver == 1 || t->erspan_ver == 2) {
+ if (!t->collect_md)
+ o_flags |= TUNNEL_KEY;
+
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+ goto nla_put_failure;
+
+ if (t->erspan_ver == 1) {
+ if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+ goto nla_put_failure;
+ }
+ }
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
- gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+ gre_tnl_flags_to_gre_flags(o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
@@ -1494,19 +1435,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
}
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
- goto nla_put_failure;
-
- if (t->erspan_ver == 1) {
- if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
- goto nla_put_failure;
- } else if (t->erspan_ver == 2) {
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
- goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
- goto nla_put_failure;
- }
-
return 0;
nla_put_failure:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26921f6b3b92..ecce2dc78f17 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -307,11 +307,10 @@ drop:
}
static int ip_rcv_finish_core(struct net *net, struct sock *sk,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct net_device *dev)
{
const struct iphdr *iph = ip_hdr(skb);
int (*edemux)(struct sk_buff *skb);
- struct net_device *dev = skb->dev;
struct rtable *rt;
int err;
@@ -400,6 +399,7 @@ drop_error:
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ struct net_device *dev = skb->dev;
int ret;
/* if ingress device is enslaved to an L3 master device pass the
@@ -409,7 +409,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
if (!skb)
return NET_RX_SUCCESS;
- ret = ip_rcv_finish_core(net, sk, skb);
+ ret = ip_rcv_finish_core(net, sk, skb, dev);
if (ret != NET_RX_DROP)
ret = dst_input(skb);
return ret;
@@ -429,7 +429,6 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
-
__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -488,6 +487,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
goto drop;
}
+ iph = ip_hdr(skb);
skb->transport_header = skb->network_header + iph->ihl*4;
/* Remove any debris in the socket control block */
@@ -520,6 +520,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
skb = ip_rcv_core(skb, net);
if (skb == NULL)
return NET_RX_DROP;
+
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
net, NULL, skb, dev, NULL,
ip_rcv_finish);
@@ -544,6 +545,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
+ struct net_device *dev = skb->dev;
struct dst_entry *dst;
skb_list_del_init(skb);
@@ -553,7 +555,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
skb = l3mdev_ip_rcv(skb);
if (!skb)
continue;
- if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+ if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
continue;
dst = skb_dst(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ed194d46c00e..32a35043c9f5 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -251,8 +251,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
* If opt == NULL, then skb->data should point to IP header.
*/
-int ip_options_compile(struct net *net,
- struct ip_options *opt, struct sk_buff *skb)
+int __ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb,
+ __be32 *info)
{
__be32 spec_dst = htonl(INADDR_ANY);
unsigned char *pp_ptr = NULL;
@@ -468,11 +469,22 @@ eol:
return 0;
error:
- if (skb) {
- icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
- }
+ if (info)
+ *info = htonl((pp_ptr-iph)<<24);
return -EINVAL;
}
+
+int ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb)
+{
+ int ret;
+ __be32 info;
+
+ ret = __ip_options_compile(net, opt, skb, &info);
+ if (ret != 0 && skb)
+ icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
+ return ret;
+}
EXPORT_SYMBOL(ip_options_compile);
/*
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fffcc130900e..82f341e84fae 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
+ __be16 _ports[2], *ports;
struct sockaddr_in sin;
- __be16 *ports;
- int end;
-
- end = skb_transport_offset(skb) + 4;
- if (end > 0 && !pskb_may_pull(skb, end))
- return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
- ports = (__be16 *)skb_transport_header(skb);
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (!ports)
+ return;
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..2756fb725bf0 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -310,7 +310,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
RT_TOS(iph->tos), tunnel->parms.link,
- tunnel->fwmark);
+ tunnel->fwmark, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) {
@@ -501,15 +501,19 @@ EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
struct rtable *rt, __be16 df,
- const struct iphdr *inner_iph)
+ const struct iphdr *inner_iph,
+ int tunnel_hlen, __be32 dst, bool md)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
+ int pkt_size;
int mtu;
+ tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
+ pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
+
if (df)
mtu = dst_mtu(&rt->dst) - dev->hard_header_len
- - sizeof(struct iphdr) - tunnel->hlen;
+ - sizeof(struct iphdr) - tunnel_hlen;
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
@@ -527,11 +531,13 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
+ __be32 daddr;
+
+ daddr = md ? dst : tunnel->parms.iph.daddr;
if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
mtu >= IPV6_MIN_MTU) {
- if ((tunnel->parms.iph.daddr &&
- !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
+ if ((daddr && !ipv4_is_multicast(daddr)) ||
rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED;
dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
@@ -548,17 +554,19 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ u8 proto, int tunnel_hlen)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
u32 headroom = sizeof(struct iphdr);
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
const struct iphdr *inner_iph;
- struct rtable *rt;
+ struct rtable *rt = NULL;
struct flowi4 fl4;
__be16 df = 0;
u8 tos, ttl;
+ bool use_cache;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@@ -574,20 +582,39 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
else if (skb->protocol == htons(ETH_P_IPV6))
tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
}
- ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
- RT_TOS(tos), tunnel->parms.link, tunnel->fwmark);
+ ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
+ tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
+ 0, skb->mark, skb_get_hash(skb));
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
goto tx_error;
- rt = ip_route_output_key(tunnel->net, &fl4);
- if (IS_ERR(rt)) {
- dev->stats.tx_carrier_errors++;
- goto tx_error;
+
+ use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
+ if (use_cache)
+ rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
+ if (!rt) {
+ rt = ip_route_output_key(tunnel->net, &fl4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+ if (use_cache)
+ dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
+ fl4.saddr);
}
if (rt->dst.dev == dev) {
ip_rt_put(rt);
dev->stats.collisions++;
goto tx_error;
}
+
+ if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+ if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
+ key->u.ipv4.dst, true)) {
+ ip_rt_put(rt);
+ goto tx_error;
+ }
+
tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
ttl = key->ttl;
if (ttl == 0) {
@@ -598,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
else
ttl = ip4_dst_hoplimit(&rt->dst);
}
- if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
- df = htons(IP_DF);
- else if (skb->protocol == htons(ETH_P_IP))
+
+ if (!df && skb->protocol == htons(ETH_P_IP))
df = inner_iph->frag_off & htons(IP_DF);
+
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
if (headroom > dev->needed_headroom)
dev->needed_headroom = headroom;
@@ -627,14 +654,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct ip_tunnel_info *tun_info = NULL;
const struct iphdr *inner_iph;
- struct flowi4 fl4;
- u8 tos, ttl;
- __be16 df;
- struct rtable *rt; /* Route to the other host */
unsigned int max_headroom; /* The extra header space needed */
- __be32 dst;
+ struct rtable *rt = NULL; /* Route to the other host */
+ bool use_cache = false;
+ struct flowi4 fl4;
+ bool md = false;
bool connected;
+ u8 tos, ttl;
+ __be32 dst;
+ __be16 df;
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
@@ -650,7 +680,15 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (skb->protocol == htons(ETH_P_IP)) {
+ tun_info = skb_tunnel_info(skb);
+ if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
+ ip_tunnel_info_af(tun_info) == AF_INET &&
+ tun_info->key.u.ipv4.dst) {
+ dst = tun_info->key.u.ipv4.dst;
+ md = true;
+ connected = true;
+ }
+ else if (skb->protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
dst = rt_nexthop(rt, inner_iph->daddr);
}
@@ -688,7 +726,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
else
goto tx_error;
- connected = false;
+ if (!md)
+ connected = false;
}
tos = tnl_params->tos;
@@ -705,13 +744,20 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
- tunnel->fwmark);
+ tunnel->fwmark, skb_get_hash(skb));
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
goto tx_error;
- rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr) :
- NULL;
+ if (connected && md) {
+ use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
+ if (use_cache)
+ rt = dst_cache_get_ip4(&tun_info->dst_cache,
+ &fl4.saddr);
+ } else {
+ rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache,
+ &fl4.saddr) : NULL;
+ }
if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4);
@@ -720,7 +766,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
dev->stats.tx_carrier_errors++;
goto tx_error;
}
- if (connected)
+ if (use_cache)
+ dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
+ fl4.saddr);
+ else if (!md && connected)
dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
fl4.saddr);
}
@@ -731,7 +780,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
+ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
+ 0, 0, false)) {
ip_rt_put(rt);
goto tx_error;
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 9a0e67b52a4e..c3f3d28d1087 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -252,6 +252,14 @@ static int ip_tun_build_state(struct nlattr *attr,
tun_info = lwt_tun_info(new_state);
+#ifdef CONFIG_DST_CACHE
+ err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
+ if (err) {
+ lwtstate_free(new_state);
+ return err;
+ }
+#endif
+
if (tb[LWTUNNEL_IP_ID])
tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
@@ -278,6 +286,15 @@ static int ip_tun_build_state(struct nlattr *attr,
return 0;
}
+static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate)
+{
+#ifdef CONFIG_DST_CACHE
+ struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+ dst_cache_destroy(&tun_info->dst_cache);
+#endif
+}
+
static int ip_tun_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwtstate)
{
@@ -313,6 +330,7 @@ static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
.build_state = ip_tun_build_state,
+ .destroy_state = ip_tun_destroy_state,
.fill_encap = ip_tun_fill_encap_info,
.get_encap_size = ip_tun_encap_nlsize,
.cmp_encap = ip_tun_cmp_encap,
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index d7b43e700023..68a21bf75dd0 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -74,6 +74,33 @@ drop:
return 0;
}
+static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
+{
+ struct ip_tunnel *tunnel;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
+ if (tunnel) {
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto drop;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+
+ skb->dev = tunnel->dev;
+
+ return xfrm_input(skb, nexthdr, spi, encap_type);
+ }
+
+ return -EINVAL;
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
static int vti_rcv(struct sk_buff *skb)
{
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
}
+static int vti_rcv_ipip(struct sk_buff *skb)
+{
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+ return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
+}
+
static int vti_rcv_cb(struct sk_buff *skb, int err)
{
unsigned short family;
@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
.priority = 100,
};
+static struct xfrm_tunnel ipip_handler __read_mostly = {
+ .handler = vti_rcv_ipip,
+ .err_handler = vti4_err,
+ .priority = 0,
+};
+
static int __net_init vti_init_net(struct net *net)
{
int err;
@@ -603,6 +644,13 @@ static int __init vti_init(void)
if (err < 0)
goto xfrm_proto_comp_failed;
+ msg = "ipip tunnel";
+ err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+ if (err < 0) {
+ pr_info("%s: cant't register tunnel\n",__func__);
+ goto xfrm_tunnel_failed;
+ }
+
msg = "netlink interface";
err = rtnl_link_register(&vti_link_ops);
if (err < 0)
@@ -612,6 +660,8 @@ static int __init vti_init(void)
rtnl_link_failed:
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_tunnel_failed:
+ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm_proto_comp_failed:
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index b9a9873c25c6..9bcca08efec9 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -85,7 +85,6 @@
/* Define the friendly delay before and after opening net devices */
#define CONF_POST_OPEN 10 /* After opening: 10 msecs */
-#define CONF_CARRIER_TIMEOUT 120000 /* Wait for carrier timeout */
/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */
@@ -101,6 +100,9 @@
#define NONE cpu_to_be32(INADDR_NONE)
#define ANY cpu_to_be32(INADDR_ANY)
+/* Wait for carrier timeout default in seconds */
+static unsigned int carrier_timeout = 120;
+
/*
* Public IP configuration
*/
@@ -268,9 +270,9 @@ static int __init ic_open_devs(void)
/* wait for a carrier on at least one device */
start = jiffies;
- next_msg = start + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12);
+ next_msg = start + msecs_to_jiffies(20000);
while (time_before(jiffies, start +
- msecs_to_jiffies(CONF_CARRIER_TIMEOUT))) {
+ msecs_to_jiffies(carrier_timeout * 1000))) {
int wait, elapsed;
for_each_netdev(&init_net, dev)
@@ -283,9 +285,9 @@ static int __init ic_open_devs(void)
continue;
elapsed = jiffies_to_msecs(jiffies - start);
- wait = (CONF_CARRIER_TIMEOUT - elapsed + 500)/1000;
+ wait = (carrier_timeout * 1000 - elapsed + 500) / 1000;
pr_info("Waiting up to %d more seconds for network.\n", wait);
- next_msg = jiffies + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12);
+ next_msg = jiffies + msecs_to_jiffies(20000);
}
have_carrier:
rtnl_unlock();
@@ -1780,3 +1782,18 @@ static int __init vendor_class_identifier_setup(char *addrs)
return 1;
}
__setup("dhcpclass=", vendor_class_identifier_setup);
+
+static int __init set_carrier_timeout(char *str)
+{
+ ssize_t ret;
+
+ if (!str)
+ return 0;
+
+ ret = kstrtouint(str, 0, &carrier_timeout);
+ if (ret)
+ return 0;
+
+ return 1;
+}
+__setup("carrier_timeout=", set_carrier_timeout);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 57c5dd283a2c..fe10b9a2efc8 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -302,7 +302,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
skb_set_inner_ipproto(skb, ipproto);
if (tunnel->collect_md)
- ip_md_tunnel_xmit(skb, dev, ipproto);
+ ip_md_tunnel_xmit(skb, dev, ipproto, 0);
else
ip_tunnel_xmit(skb, dev, tiph, ipproto);
return NETDEV_TX_OK;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ddbf8c9a1abb..2c931120c494 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -67,7 +67,6 @@
#include <net/fib_rules.h>
#include <linux/netconf.h>
#include <net/nexthop.h>
-#include <net/switchdev.h>
#include <linux/nospec.h>
@@ -111,7 +110,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd);
static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
-static void mroute_clean_tables(struct mr_table *mrt, bool all);
+static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -416,7 +415,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
static void ipmr_free_table(struct mr_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
- mroute_clean_tables(mrt, true);
+ mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
+ MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC);
rhltable_destroy(&mrt->mfc_hash);
kfree(mrt);
}
@@ -837,10 +837,8 @@ static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
static int vif_add(struct net *net, struct mr_table *mrt,
struct vifctl *vifc, int mrtsock)
{
+ struct netdev_phys_item_id ppid = { };
int vifi = vifc->vifc_vifi;
- struct switchdev_attr attr = {
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
struct vif_device *v = &mrt->vif_table[vifi];
struct net_device *dev;
struct in_device *in_dev;
@@ -919,10 +917,10 @@ static int vif_add(struct net *net, struct mr_table *mrt,
vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
(VIFF_TUNNEL | VIFF_REGISTER));
- attr.orig_dev = dev;
- if (!switchdev_port_attr_get(dev, &attr)) {
- memcpy(v->dev_parent_id.id, attr.u.ppid.id, attr.u.ppid.id_len);
- v->dev_parent_id.id_len = attr.u.ppid.id_len;
+ err = dev_get_port_parent_id(dev, &ppid, true);
+ if (err == 0) {
+ memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
+ v->dev_parent_id.id_len = ppid.id_len;
} else {
v->dev_parent_id.id_len = 0;
}
@@ -1299,7 +1297,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
}
/* Close the multicast socket, and clear the vif tables etc */
-static void mroute_clean_tables(struct mr_table *mrt, bool all)
+static void mroute_clean_tables(struct mr_table *mrt, int flags)
{
struct net *net = read_pnet(&mrt->net);
struct mr_mfc *c, *tmp;
@@ -1308,35 +1306,44 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
int i;
/* Shut down all active vif entries */
- for (i = 0; i < mrt->maxvif; i++) {
- if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
- continue;
- vif_delete(mrt, i, 0, &list);
+ if (flags & (MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC)) {
+ for (i = 0; i < mrt->maxvif; i++) {
+ if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
+ !(flags & MRT_FLUSH_VIFS_STATIC)) ||
+ (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
+ continue;
+ vif_delete(mrt, i, 0, &list);
+ }
+ unregister_netdevice_many(&list);
}
- unregister_netdevice_many(&list);
/* Wipe the cache */
- list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
- if (!all && (c->mfc_flags & MFC_STATIC))
- continue;
- rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
- list_del_rcu(&c->list);
- cache = (struct mfc_cache *)c;
- call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
- mrt->id);
- mroute_netlink_event(mrt, cache, RTM_DELROUTE);
- mr_cache_put(c);
- }
-
- if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
- spin_lock_bh(&mfc_unres_lock);
- list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
- list_del(&c->list);
+ if (flags & (MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC)) {
+ list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
+ if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) ||
+ (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC)))
+ continue;
+ rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
+ list_del_rcu(&c->list);
cache = (struct mfc_cache *)c;
+ call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
+ mrt->id);
mroute_netlink_event(mrt, cache, RTM_DELROUTE);
- ipmr_destroy_unres(mrt, cache);
+ mr_cache_put(c);
+ }
+ }
+
+ if (flags & MRT_FLUSH_MFC) {
+ if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
+ spin_lock_bh(&mfc_unres_lock);
+ list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
+ list_del(&c->list);
+ cache = (struct mfc_cache *)c;
+ mroute_netlink_event(mrt, cache, RTM_DELROUTE);
+ ipmr_destroy_unres(mrt, cache);
+ }
+ spin_unlock_bh(&mfc_unres_lock);
}
- spin_unlock_bh(&mfc_unres_lock);
}
}
@@ -1357,7 +1364,7 @@ static void mrtsock_destruct(struct sock *sk)
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
RCU_INIT_POINTER(mrt->mroute_sk, NULL);
- mroute_clean_tables(mrt, false);
+ mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC);
}
}
rtnl_unlock();
@@ -1482,6 +1489,17 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
sk == rtnl_dereference(mrt->mroute_sk),
parent);
break;
+ case MRT_FLUSH:
+ if (optlen != sizeof(val)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (get_user(val, (int __user *)optval)) {
+ ret = -EFAULT;
+ break;
+ }
+ mroute_clean_tables(mrt, val);
+ break;
/* Control PIM assert. */
case MRT_ASSERT:
if (optlen != sizeof(val)) {
@@ -2467,6 +2485,61 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
}
+static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
+ (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
+ rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
+ rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
+ (tb[RTA_DST] && !rtm->rtm_dst_len)) {
+ NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_SRC:
+ case RTA_DST:
+ case RTA_TABLE:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -2475,18 +2548,14 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct sk_buff *skb = NULL;
struct mfc_cache *cache;
struct mr_table *mrt;
- struct rtmsg *rtm;
__be32 src, grp;
u32 tableid;
int err;
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_ipv4_policy, extack);
+ err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
- rtm = nlmsg_data(nlh);
-
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 8d2e5dc9a827..a058213b77a7 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -80,24 +80,6 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
}
EXPORT_SYMBOL(ip_route_me_harder);
-int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
-{
- const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct iphdr *iph = ip_hdr(skb);
-
- if (!(iph->tos == rt_info->tos &&
- skb->mark == rt_info->mark &&
- iph->daddr == rt_info->daddr &&
- iph->saddr == rt_info->saddr))
- return ip_route_me_harder(entry->state.net, skb,
- RTN_UNSPEC);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(nf_ip_reroute);
-
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict __always_unused)
{
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 80f72cc5ca8d..c98391d49200 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -94,50 +94,7 @@ config NF_REJECT_IPV4
tristate "IPv4 packet rejection"
default m if NETFILTER_ADVANCED=n
-config NF_NAT_IPV4
- tristate "IPv4 NAT"
- depends on NF_CONNTRACK
- default m if NETFILTER_ADVANCED=n
- select NF_NAT
- help
- The IPv4 NAT option allows masquerading, port forwarding and other
- forms of full Network Address Port Translation. This can be
- controlled by iptables or nft.
-
-if NF_NAT_IPV4
-
-config NF_NAT_MASQUERADE_IPV4
- bool
-
-if NF_TABLES
-config NFT_CHAIN_NAT_IPV4
- depends on NF_TABLES_IPV4
- tristate "IPv4 nf_tables nat chain support"
- help
- This option enables the "nat" chain for IPv4 in nf_tables. This
- chain type is used to perform Network Address Translation (NAT)
- packet transformations such as the source, destination address and
- source and destination ports.
-
-config NFT_MASQ_IPV4
- tristate "IPv4 masquerading support for nf_tables"
- depends on NF_TABLES_IPV4
- depends on NFT_MASQ
- select NF_NAT_MASQUERADE_IPV4
- help
- This is the expression that provides IPv4 masquerading support for
- nf_tables.
-
-config NFT_REDIR_IPV4
- tristate "IPv4 redirect support for nf_tables"
- depends on NF_TABLES_IPV4
- depends on NFT_REDIR
- select NF_NAT_REDIRECT
- help
- This is the expression that provides IPv4 redirect support for
- nf_tables.
-endif # NF_TABLES
-
+if NF_NAT
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
depends on NF_CONNTRACK_SNMP
@@ -166,7 +123,7 @@ config NF_NAT_H323
depends on NF_CONNTRACK
default NF_CONNTRACK_H323
-endif # NF_NAT_IPV4
+endif # NF_NAT
config IP_NF_IPTABLES
tristate "IP tables support (required for filtering/masq/NAT)"
@@ -263,7 +220,6 @@ config IP_NF_NAT
depends on NF_CONNTRACK
default m if NETFILTER_ADVANCED=n
select NF_NAT
- select NF_NAT_IPV4
select NETFILTER_XT_NAT
help
This enables the `nat' table in iptables. This allows masquerading,
@@ -276,7 +232,7 @@ if IP_NF_NAT
config IP_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
- select NF_NAT_MASQUERADE_IPV4
+ select NF_NAT_MASQUERADE
default m if NETFILTER_ADVANCED=n
help
Masquerading is a special case of NAT: all outgoing connections are
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index fd7122e0e2c9..e241f5188ebe 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -3,10 +3,6 @@
# Makefile for the netfilter modules on top of IPv4.
#
-nf_nat_ipv4-y := nf_nat_l3proto_ipv4.o
-nf_nat_ipv4-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
-obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
-
# defrag
obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
@@ -29,11 +25,8 @@ $(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic.asn1.h
obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
-obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
-obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
-obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
# flow table support
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index b61977db9b7f..835d50b279f5 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net)
static void clusterip_net_exit(struct net *net)
{
+#ifdef CONFIG_PROC_FS
struct clusterip_net *cn = clusterip_pernet(net);
-#ifdef CONFIG_PROC_FS
mutex_lock(&cn->mutex);
proc_remove(cn->procdir);
cn->procdir = NULL;
@@ -864,7 +864,7 @@ static struct pernet_operations clusterip_net_ops = {
.size = sizeof(struct clusterip_net),
};
-struct notifier_block cip_netdev_notifier = {
+static struct notifier_block cip_netdev_notifier = {
.notifier_call = clusterip_netdev_event
};
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index a317445448bf..007da0882412 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -15,8 +15,6 @@
#include <net/ip.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
static int __net_init iptable_nat_table_init(struct net *net);
@@ -70,10 +68,10 @@ static int ipt_nat_register_lookups(struct net *net)
int i, ret;
for (i = 0; i < ARRAY_SIZE(nf_nat_ipv4_ops); i++) {
- ret = nf_nat_l3proto_ipv4_register_fn(net, &nf_nat_ipv4_ops[i]);
+ ret = nf_nat_ipv4_register_fn(net, &nf_nat_ipv4_ops[i]);
if (ret) {
while (i)
- nf_nat_l3proto_ipv4_unregister_fn(net, &nf_nat_ipv4_ops[--i]);
+ nf_nat_ipv4_unregister_fn(net, &nf_nat_ipv4_ops[--i]);
return ret;
}
@@ -87,7 +85,7 @@ static void ipt_nat_unregister_lookups(struct net *net)
int i;
for (i = 0; i < ARRAY_SIZE(nf_nat_ipv4_ops); i++)
- nf_nat_l3proto_ipv4_unregister_fn(net, &nf_nat_ipv4_ops[i]);
+ nf_nat_ipv4_unregister_fn(net, &nf_nat_ipv4_ops[i]);
}
static int __net_init iptable_nat_table_init(struct net *net)
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
deleted file mode 100644
index 2687db015b6f..000000000000
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ /dev/null
@@ -1,387 +0,0 @@
-/*
- * (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2011 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/icmp.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <net/secure_seq.h>
-#include <net/checksum.h>
-#include <net/route.h>
-#include <net/ip.h>
-
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static const struct nf_nat_l3proto nf_nat_l3proto_ipv4;
-
-#ifdef CONFIG_XFRM
-static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
- const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- unsigned long statusbit,
- struct flowi *fl)
-{
- const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
- struct flowi4 *fl4 = &fl->u.ip4;
-
- if (ct->status & statusbit) {
- fl4->daddr = t->dst.u3.ip;
- if (t->dst.protonum == IPPROTO_TCP ||
- t->dst.protonum == IPPROTO_UDP ||
- t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
- t->dst.protonum == IPPROTO_SCTP)
- fl4->fl4_dport = t->dst.u.all;
- }
-
- statusbit ^= IPS_NAT_MASK;
-
- if (ct->status & statusbit) {
- fl4->saddr = t->src.u3.ip;
- if (t->dst.protonum == IPPROTO_TCP ||
- t->dst.protonum == IPPROTO_UDP ||
- t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
- t->dst.protonum == IPPROTO_SCTP)
- fl4->fl4_sport = t->src.u.all;
- }
-}
-#endif /* CONFIG_XFRM */
-
-static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
- unsigned int iphdroff,
- const struct nf_conntrack_tuple *target,
- enum nf_nat_manip_type maniptype)
-{
- struct iphdr *iph;
- unsigned int hdroff;
-
- if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
- return false;
-
- iph = (void *)skb->data + iphdroff;
- hdroff = iphdroff + iph->ihl * 4;
-
- if (!nf_nat_l4proto_manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff,
- hdroff, target, maniptype))
- return false;
- iph = (void *)skb->data + iphdroff;
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
- iph->saddr = target->src.u3.ip;
- } else {
- csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
- iph->daddr = target->dst.u3.ip;
- }
- return true;
-}
-
-static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
- unsigned int iphdroff, __sum16 *check,
- const struct nf_conntrack_tuple *t,
- enum nf_nat_manip_type maniptype)
-{
- struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
- __be32 oldip, newip;
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- oldip = iph->saddr;
- newip = t->src.u3.ip;
- } else {
- oldip = iph->daddr;
- newip = t->dst.u3.ip;
- }
- inet_proto_csum_replace4(check, skb, oldip, newip, true);
-}
-
-static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
- u8 proto, void *data, __sum16 *check,
- int datalen, int oldlen)
-{
- if (skb->ip_summed != CHECKSUM_PARTIAL) {
- const struct iphdr *iph = ip_hdr(skb);
-
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
- ip_hdrlen(skb);
- skb->csum_offset = (void *)check - data;
- *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
- proto, 0);
- } else
- inet_proto_csum_replace2(check, skb,
- htons(oldlen), htons(datalen), true);
-}
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range2 *range)
-{
- if (tb[CTA_NAT_V4_MINIP]) {
- range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
- range->flags |= NF_NAT_RANGE_MAP_IPS;
- }
-
- if (tb[CTA_NAT_V4_MAXIP])
- range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
- else
- range->max_addr.ip = range->min_addr.ip;
-
- return 0;
-}
-#endif
-
-static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
- .l3proto = NFPROTO_IPV4,
- .manip_pkt = nf_nat_ipv4_manip_pkt,
- .csum_update = nf_nat_ipv4_csum_update,
- .csum_recalc = nf_nat_ipv4_csum_recalc,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_ipv4_nlattr_to_range,
-#endif
-#ifdef CONFIG_XFRM
- .decode_session = nf_nat_ipv4_decode_session,
-#endif
-};
-
-int nf_nat_icmp_reply_translation(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum)
-{
- struct {
- struct icmphdr icmp;
- struct iphdr ip;
- } *inside;
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
- unsigned int hdrlen = ip_hdrlen(skb);
- struct nf_conntrack_tuple target;
- unsigned long statusbit;
-
- WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
-
- if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
- return 0;
- if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
- return 0;
-
- inside = (void *)skb->data + hdrlen;
- if (inside->icmp.type == ICMP_REDIRECT) {
- if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
- return 0;
- if (ct->status & IPS_NAT_MASK)
- return 0;
- }
-
- if (manip == NF_NAT_MANIP_SRC)
- statusbit = IPS_SRC_NAT;
- else
- statusbit = IPS_DST_NAT;
-
- /* Invert if this is reply direction */
- if (dir == IP_CT_DIR_REPLY)
- statusbit ^= IPS_NAT_MASK;
-
- if (!(ct->status & statusbit))
- return 1;
-
- if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
- &ct->tuplehash[!dir].tuple, !manip))
- return 0;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL) {
- /* Reloading "inside" here since manip_pkt may reallocate */
- inside = (void *)skb->data + hdrlen;
- inside->icmp.checksum = 0;
- inside->icmp.checksum =
- csum_fold(skb_checksum(skb, hdrlen,
- skb->len - hdrlen, 0));
- }
-
- /* Change outer to look like the reply to an incoming packet */
- nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
- if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
- return 0;
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
-
-static unsigned int
-nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct)
- return NF_ACCEPT;
-
- if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
- if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
- if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
- state->hook))
- return NF_DROP;
- else
- return NF_ACCEPT;
- }
- }
-
- return nf_nat_inet_fn(priv, skb, state);
-}
-
-static unsigned int
-nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- unsigned int ret;
- __be32 daddr = ip_hdr(skb)->daddr;
-
- ret = nf_nat_ipv4_fn(priv, skb, state);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- daddr != ip_hdr(skb)->daddr)
- skb_dst_drop(skb);
-
- return ret;
-}
-
-static unsigned int
-nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
-#ifdef CONFIG_XFRM
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- int err;
-#endif
- unsigned int ret;
-
- ret = nf_nat_ipv4_fn(priv, skb, state);
-#ifdef CONFIG_XFRM
- if (ret != NF_DROP && ret != NF_STOLEN &&
- !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if ((ct->tuplehash[dir].tuple.src.u3.ip !=
- ct->tuplehash[!dir].tuple.dst.u3.ip) ||
- (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
- ct->tuplehash[dir].tuple.src.u.all !=
- ct->tuplehash[!dir].tuple.dst.u.all)) {
- err = nf_xfrm_me_harder(state->net, skb, AF_INET);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
- }
-#endif
- return ret;
-}
-
-static unsigned int
-nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- unsigned int ret;
- int err;
-
- ret = nf_nat_ipv4_fn(priv, skb, state);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (ct->tuplehash[dir].tuple.dst.u3.ip !=
- ct->tuplehash[!dir].tuple.src.u3.ip) {
- err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#ifdef CONFIG_XFRM
- else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
- ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
- ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all) {
- err = nf_xfrm_me_harder(state->net, skb, AF_INET);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#endif
- }
- return ret;
-}
-
-static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
- /* Before packet filtering, change destination */
- {
- .hook = nf_nat_ipv4_in,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP_PRI_NAT_DST,
- },
- /* After packet filtering, change source */
- {
- .hook = nf_nat_ipv4_out,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP_PRI_NAT_SRC,
- },
- /* Before packet filtering, change destination */
- {
- .hook = nf_nat_ipv4_local_fn,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP_PRI_NAT_DST,
- },
- /* After packet filtering, change source */
- {
- .hook = nf_nat_ipv4_fn,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_NAT_SRC,
- },
-};
-
-int nf_nat_l3proto_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops)
-{
- return nf_nat_register_fn(net, ops, nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
-}
-EXPORT_SYMBOL_GPL(nf_nat_l3proto_ipv4_register_fn);
-
-void nf_nat_l3proto_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
-{
- nf_nat_unregister_fn(net, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
-}
-EXPORT_SYMBOL_GPL(nf_nat_l3proto_ipv4_unregister_fn);
-
-static int __init nf_nat_l3proto_ipv4_init(void)
-{
- return nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
-}
-
-static void __exit nf_nat_l3proto_ipv4_exit(void)
-{
- nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
-}
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("nf-nat-" __stringify(AF_INET));
-
-module_init(nf_nat_l3proto_ipv4_init);
-module_exit(nf_nat_l3proto_ipv4_exit);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index a0aa13bcabda..0a8a60c1bf9a 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
int snmp_version(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
+ if (datalen != 1)
+ return -EINVAL;
if (*(unsigned char *)data > 1)
return -ENOTSUPP;
return 1;
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
- __be32 *pdata = (__be32 *)data;
+ __be32 *pdata;
+ if (datalen != 4)
+ return -EINVAL;
+ pdata = (__be32 *)data;
if (*pdata == ctx->from) {
pr_debug("%s: %pI4 to %pI4\n", __func__,
(void *)&ctx->from, (void *)&ctx->to);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index aa8304c618b8..7dc3c324b911 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -173,21 +173,16 @@ EXPORT_SYMBOL_GPL(nf_send_reset);
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
{
struct iphdr *iph = ip_hdr(skb_in);
- u8 proto;
+ u8 proto = iph->protocol;
if (iph->frag_off & htons(IP_OFFSET))
return;
- if (skb_csum_unnecessary(skb_in)) {
+ if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
return;
}
- if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)
- proto = iph->protocol;
- else
- proto = 0;
-
if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
deleted file mode 100644
index a3c4ea303e3e..000000000000
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
- * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
- * Copyright (c) 2012 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables_ipv4.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/ip.h>
-
-static unsigned int nft_nat_do_chain(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_ipv4(&pkt, skb);
-
- return nft_do_chain(&pkt, priv);
-}
-
-static int nft_nat_ipv4_reg(struct net *net, const struct nf_hook_ops *ops)
-{
- return nf_nat_l3proto_ipv4_register_fn(net, ops);
-}
-
-static void nft_nat_ipv4_unreg(struct net *net, const struct nf_hook_ops *ops)
-{
- nf_nat_l3proto_ipv4_unregister_fn(net, ops);
-}
-
-static const struct nft_chain_type nft_chain_nat_ipv4 = {
- .name = "nat",
- .type = NFT_CHAIN_T_NAT,
- .family = NFPROTO_IPV4,
- .owner = THIS_MODULE,
- .hook_mask = (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_POST_ROUTING) |
- (1 << NF_INET_LOCAL_OUT) |
- (1 << NF_INET_LOCAL_IN),
- .hooks = {
- [NF_INET_PRE_ROUTING] = nft_nat_do_chain,
- [NF_INET_POST_ROUTING] = nft_nat_do_chain,
- [NF_INET_LOCAL_OUT] = nft_nat_do_chain,
- [NF_INET_LOCAL_IN] = nft_nat_do_chain,
- },
- .ops_register = nft_nat_ipv4_reg,
- .ops_unregister = nft_nat_ipv4_unreg,
-};
-
-static int __init nft_chain_nat_init(void)
-{
- nft_register_chain_type(&nft_chain_nat_ipv4);
-
- return 0;
-}
-
-static void __exit nft_chain_nat_exit(void)
-{
- nft_unregister_chain_type(&nft_chain_nat_ipv4);
-}
-
-module_init(nft_chain_nat_init);
-module_exit(nft_chain_nat_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
deleted file mode 100644
index 6847de1d1db8..000000000000
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nft_masq.h>
-#include <net/netfilter/ipv4/nf_nat_masquerade.h>
-
-static void nft_masq_ipv4_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_masq *priv = nft_expr_priv(expr);
- struct nf_nat_range2 range;
-
- memset(&range, 0, sizeof(range));
- range.flags = priv->flags;
- if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_min]);
- range.max_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_max]);
- }
- regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
- &range, nft_out(pkt));
-}
-
-static void
-nft_masq_ipv4_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
-{
- nf_ct_netns_put(ctx->net, NFPROTO_IPV4);
-}
-
-static struct nft_expr_type nft_masq_ipv4_type;
-static const struct nft_expr_ops nft_masq_ipv4_ops = {
- .type = &nft_masq_ipv4_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
- .eval = nft_masq_ipv4_eval,
- .init = nft_masq_init,
- .destroy = nft_masq_ipv4_destroy,
- .dump = nft_masq_dump,
- .validate = nft_masq_validate,
-};
-
-static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
- .family = NFPROTO_IPV4,
- .name = "masq",
- .ops = &nft_masq_ipv4_ops,
- .policy = nft_masq_policy,
- .maxattr = NFTA_MASQ_MAX,
- .owner = THIS_MODULE,
-};
-
-static int __init nft_masq_ipv4_module_init(void)
-{
- int ret;
-
- ret = nft_register_expr(&nft_masq_ipv4_type);
- if (ret < 0)
- return ret;
-
- ret = nf_nat_masquerade_ipv4_register_notifier();
- if (ret)
- nft_unregister_expr(&nft_masq_ipv4_type);
-
- return ret;
-}
-
-static void __exit nft_masq_ipv4_module_exit(void)
-{
- nft_unregister_expr(&nft_masq_ipv4_type);
- nf_nat_masquerade_ipv4_unregister_notifier();
-}
-
-module_init(nft_masq_ipv4_module_init);
-module_exit(nft_masq_ipv4_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "masq");
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
deleted file mode 100644
index 5120be1d3118..000000000000
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_redirect.h>
-#include <net/netfilter/nft_redir.h>
-
-static void nft_redir_ipv4_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_redir *priv = nft_expr_priv(expr);
- struct nf_nat_ipv4_multi_range_compat mr;
-
- memset(&mr, 0, sizeof(mr));
- if (priv->sreg_proto_min) {
- mr.range[0].min.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_min]);
- mr.range[0].max.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_max]);
- mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
- }
-
- mr.range[0].flags |= priv->flags;
-
- regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, nft_hook(pkt));
-}
-
-static void
-nft_redir_ipv4_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
-{
- nf_ct_netns_put(ctx->net, NFPROTO_IPV4);
-}
-
-static struct nft_expr_type nft_redir_ipv4_type;
-static const struct nft_expr_ops nft_redir_ipv4_ops = {
- .type = &nft_redir_ipv4_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
- .eval = nft_redir_ipv4_eval,
- .init = nft_redir_init,
- .destroy = nft_redir_ipv4_destroy,
- .dump = nft_redir_dump,
- .validate = nft_redir_validate,
-};
-
-static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
- .family = NFPROTO_IPV4,
- .name = "redir",
- .ops = &nft_redir_ipv4_ops,
- .policy = nft_redir_policy,
- .maxattr = NFTA_REDIR_MAX,
- .owner = THIS_MODULE,
-};
-
-static int __init nft_redir_ipv4_module_init(void)
-{
- return nft_register_expr(&nft_redir_ipv4_type);
-}
-
-static void __exit nft_redir_ipv4_module_exit(void)
-{
- nft_unregister_expr(&nft_redir_ipv4_type);
-}
-
-module_init(nft_redir_ipv4_module_init);
-module_exit(nft_redir_ipv4_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir");
diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c
index f86bb4f06609..d8e3a1fb8e82 100644
--- a/net/ipv4/netlink.c
+++ b/net/ipv4/netlink.c
@@ -3,9 +3,10 @@
#include <linux/types.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
+#include <linux/in6.h>
#include <net/ip.h>
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack)
{
*ip_proto = nla_get_u8(attr);
@@ -13,11 +14,19 @@ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
switch (*ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
+ return 0;
case IPPROTO_ICMP:
+ if (family != AF_INET)
+ break;
+ return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ if (family != AF_INET6)
+ break;
return 0;
- default:
- NL_SET_ERR_MSG(extack, "Unsupported ip proto");
- return -EOPNOTSUPP;
+#endif
}
+ NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+ return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ce92f73cf104..738ff0a1a048 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
- if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
peer->rate_tokens = 0;
+ peer->n_redirects = 0;
+ }
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
- if (peer->rate_tokens >= ip_rt_redirect_number) {
+ if (peer->n_redirects >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
goto out_put_peer;
}
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->rate_tokens;
+ ++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
@@ -1608,7 +1611,8 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
return -EINVAL;
if (ipv4_is_zeronet(saddr)) {
- if (!ipv4_is_local_multicast(daddr))
+ if (!ipv4_is_local_multicast(daddr) &&
+ ip_hdr(skb)->protocol != IPPROTO_IGMP)
return -EINVAL;
} else {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
@@ -1816,6 +1820,7 @@ out:
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys)
{
+ u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
struct flow_keys hash_keys;
u32 mhash;
@@ -1866,6 +1871,9 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
}
mhash = flow_hash_from_keys(&hash_keys);
+ if (multipath_hash)
+ mhash = jhash_2words(mhash, multipath_hash, 0);
+
return mhash >> 1;
}
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -2763,6 +2771,75 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
return skb;
}
+static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG(extack,
+ "ipv4: Invalid header for route get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
+ (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
+ rtm->rtm_table || rtm->rtm_protocol ||
+ rtm->rtm_scope || rtm->rtm_type) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
+ return -EINVAL;
+ }
+
+ if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
+ RTM_F_LOOKUP_TABLE |
+ RTM_F_FIB_MATCH)) {
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
+ (tb[RTA_DST] && !rtm->rtm_dst_len)) {
+ NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_IIF:
+ case RTA_OIF:
+ case RTA_SRC:
+ case RTA_DST:
+ case RTA_IP_PROTO:
+ case RTA_SPORT:
+ case RTA_DPORT:
+ case RTA_MARK:
+ case RTA_UID:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -2783,8 +2860,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
int err;
int mark;
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
- extack);
+ err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -2800,7 +2876,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
- &ip_proto, extack);
+ &ip_proto, AF_INET, extack);
if (err)
return err;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 27e2f6837062..ad07dd71063d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1127,7 +1127,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
}
static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
- int *copied, size_t size)
+ int *copied, size_t size,
+ struct ubuf_info *uarg)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
@@ -1147,6 +1148,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
return -ENOBUFS;
tp->fastopen_req->data = msg;
tp->fastopen_req->size = size;
+ tp->fastopen_req->uarg = uarg;
if (inet->defer_connect) {
err = tcp_connect(sk);
@@ -1186,11 +1188,6 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
flags = msg->msg_flags;
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
- if (sk->sk_state != TCP_ESTABLISHED) {
- err = -EINVAL;
- goto out_err;
- }
-
skb = tcp_write_queue_tail(sk);
uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
if (!uarg) {
@@ -1205,7 +1202,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
!tp->repair) {
- err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
+ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
else if (err)
@@ -1415,7 +1412,8 @@ do_fault:
/* It is the one place in all of TCP, except connection
* reset, where we can be unlinking the send_head.
*/
- tcp_check_send_head(sk, skb);
+ if (tcp_write_queue_empty(sk))
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
sk_wmem_free_skb(sk, skb);
}
@@ -1554,7 +1552,7 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied)
(copied > 0 &&
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
- !icsk->icsk_ack.pingpong)) &&
+ !inet_csk_in_pingpong_mode(sk))) &&
!atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = true;
}
@@ -1847,57 +1845,78 @@ out:
#endif
static void tcp_update_recv_tstamps(struct sk_buff *skb,
- struct scm_timestamping *tss)
+ struct scm_timestamping_internal *tss)
{
if (skb->tstamp)
- tss->ts[0] = ktime_to_timespec(skb->tstamp);
+ tss->ts[0] = ktime_to_timespec64(skb->tstamp);
else
- tss->ts[0] = (struct timespec) {0};
+ tss->ts[0] = (struct timespec64) {0};
if (skb_hwtstamps(skb)->hwtstamp)
- tss->ts[2] = ktime_to_timespec(skb_hwtstamps(skb)->hwtstamp);
+ tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
else
- tss->ts[2] = (struct timespec) {0};
+ tss->ts[2] = (struct timespec64) {0};
}
/* Similar to __sock_recv_timestamp, but does not require an skb */
static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
- struct scm_timestamping *tss)
+ struct scm_timestamping_internal *tss)
{
- struct timeval tv;
+ int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
bool has_timestamping = false;
if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
if (sock_flag(sk, SOCK_RCVTSTAMP)) {
if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
- put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
- sizeof(tss->ts[0]), &tss->ts[0]);
- } else {
- tv.tv_sec = tss->ts[0].tv_sec;
- tv.tv_usec = tss->ts[0].tv_nsec / 1000;
+ if (new_tstamp) {
+ struct __kernel_timespec kts = {tss->ts[0].tv_sec, tss->ts[0].tv_nsec};
- put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
- sizeof(tv), &tv);
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
+ sizeof(kts), &kts);
+ } else {
+ struct timespec ts_old = timespec64_to_timespec(tss->ts[0]);
+
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
+ sizeof(ts_old), &ts_old);
+ }
+ } else {
+ if (new_tstamp) {
+ struct __kernel_sock_timeval stv;
+
+ stv.tv_sec = tss->ts[0].tv_sec;
+ stv.tv_usec = tss->ts[0].tv_nsec / 1000;
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
+ sizeof(stv), &stv);
+ } else {
+ struct __kernel_old_timeval tv;
+
+ tv.tv_sec = tss->ts[0].tv_sec;
+ tv.tv_usec = tss->ts[0].tv_nsec / 1000;
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
+ sizeof(tv), &tv);
+ }
}
}
if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
has_timestamping = true;
else
- tss->ts[0] = (struct timespec) {0};
+ tss->ts[0] = (struct timespec64) {0};
}
if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
has_timestamping = true;
else
- tss->ts[2] = (struct timespec) {0};
+ tss->ts[2] = (struct timespec64) {0};
}
if (has_timestamping) {
- tss->ts[1] = (struct timespec) {0};
- put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING,
- sizeof(*tss), tss);
+ tss->ts[1] = (struct timespec64) {0};
+ if (sock_flag(sk, SOCK_TSTAMP_NEW))
+ put_cmsg_scm_timestamping64(msg, tss);
+ else
+ put_cmsg_scm_timestamping(msg, tss);
}
}
@@ -1938,7 +1957,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
long timeo;
struct sk_buff *skb, *last;
u32 urg_hole = 0;
- struct scm_timestamping tss;
+ struct scm_timestamping_internal tss;
bool has_tss = false;
bool has_cmsg;
@@ -2528,6 +2547,7 @@ void tcp_write_queue_purge(struct sock *sk)
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
tcp_sk(sk)->packets_out = 0;
+ inet_csk(sk)->icsk_backoff = 0;
}
int tcp_disconnect(struct sock *sk, int flags)
@@ -2572,6 +2592,7 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
tp->srtt_us = 0;
+ tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
tp->rcv_rtt_last_tsecr = 0;
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
@@ -2579,7 +2600,9 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
+ icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ tp->snd_cwnd = TCP_INIT_CWND;
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
tp->delivered_ce = 0;
@@ -2603,6 +2626,23 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->duplicate_sack[0].end_seq = 0;
tp->dsack_dups = 0;
tp->reord_seen = 0;
+ tp->retrans_out = 0;
+ tp->sacked_out = 0;
+ tp->tlp_high_seq = 0;
+ tp->last_oow_ack_time = 0;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
+ tp->rack.mstamp = 0;
+ tp->rack.advanced = 0;
+ tp->rack.reo_wnd_steps = 1;
+ tp->rack.last_delivered = 0;
+ tp->rack.reo_wnd_persist = 0;
+ tp->rack.dsack_seen = 0;
+ tp->syn_data_acked = 0;
+ tp->rx_opt.saw_tstamp = 0;
+ tp->rx_opt.dsack = 0;
+ tp->rx_opt.num_sacks = 0;
+
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
@@ -2968,16 +3008,16 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_QUICKACK:
if (!val) {
- icsk->icsk_ack.pingpong = 1;
+ inet_csk_enter_pingpong_mode(sk);
} else {
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
inet_csk_ack_scheduled(sk)) {
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
tcp_cleanup_rbuf(sk, 1);
if (!(val & 1))
- icsk->icsk_ack.pingpong = 1;
+ inet_csk_enter_pingpong_mode(sk);
}
}
break;
@@ -3391,7 +3431,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
return 0;
}
case TCP_QUICKACK:
- val = !icsk->icsk_ack.pingpong;
+ val = !inet_csk_in_pingpong_mode(sk);
break;
case TCP_CONGESTION:
@@ -3659,7 +3699,7 @@ bool tcp_alloc_md5sig_pool(void)
if (!tcp_md5sig_pool_populated) {
__tcp_alloc_md5sig_pool();
if (tcp_md5sig_pool_populated)
- static_key_slow_inc(&tcp_md5_needed);
+ static_branch_inc(&tcp_md5_needed);
}
mutex_unlock(&tcp_md5sig_mutex);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 0f497fc49c3f..56be7d27f208 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -115,6 +115,14 @@ struct bbr {
unused_b:5;
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
u32 full_bw; /* recent bw, to estimate if pipe is full */
+
+ /* For tracking ACK aggregation: */
+ u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
+ u16 extra_acked[2]; /* max excess data ACKed in epoch */
+ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
+ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
+ extra_acked_win_idx:1, /* current index in extra_acked array */
+ unused_c:6;
};
#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
@@ -182,6 +190,15 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
/* If we estimate we're policed, use lt_bw for this many round trips: */
static const u32 bbr_lt_bw_max_rtts = 48;
+/* Gain factor for adding extra_acked to target cwnd: */
+static const int bbr_extra_acked_gain = BBR_UNIT;
+/* Window length of extra_acked window. */
+static const u32 bbr_extra_acked_win_rtts = 5;
+/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
+static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
+/* Time period for clamping cwnd increment due to ack aggregation */
+static const u32 bbr_extra_acked_max_us = 100 * 1000;
+
static void bbr_check_probe_rtt_done(struct sock *sk);
/* Do we estimate that STARTUP filled the pipe? */
@@ -208,6 +225,16 @@ static u32 bbr_bw(const struct sock *sk)
return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
}
+/* Return maximum extra acked in past k-2k round trips,
+ * where k = bbr_extra_acked_win_rtts.
+ */
+static u16 bbr_extra_acked(const struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return max(bbr->extra_acked[0], bbr->extra_acked[1]);
+}
+
/* Return rate in bytes per second, optionally with a gain.
* The order here is chosen carefully to avoid overflow of u64. This should
* work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
@@ -305,6 +332,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
if (event == CA_EVENT_TX_START && tp->app_limited) {
bbr->idle_restart = 1;
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
+ bbr->ack_epoch_acked = 0;
/* Avoid pointless buffer overflows: pace at est. bw if we don't
* need more speed (we're restarting from idle and app-limited).
*/
@@ -315,30 +344,19 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
}
}
-/* Find target cwnd. Right-size the cwnd based on min RTT and the
- * estimated bottleneck bandwidth:
+/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
*
- * cwnd = bw * min_rtt * gain = BDP * gain
+ * bdp = bw * min_rtt * gain
*
* The key factor, gain, controls the amount of queue. While a small gain
* builds a smaller queue, it becomes more vulnerable to noise in RTT
* measurements (e.g., delayed ACKs or other ACK compression effects). This
* noise may cause BBR to under-estimate the rate.
- *
- * To achieve full performance in high-speed paths, we budget enough cwnd to
- * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
- * - one skb in sending host Qdisc,
- * - one skb in sending host TSO/GSO engine
- * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
- * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
- * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
- * which allows 2 outstanding 2-packet sequences, to try to keep pipe
- * full even with ACK-every-other-packet delayed ACKs.
*/
-static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
+static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
{
struct bbr *bbr = inet_csk_ca(sk);
- u32 cwnd;
+ u32 bdp;
u64 w;
/* If we've never had a valid RTT sample, cap cwnd at the initial
@@ -353,7 +371,24 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
w = (u64)bw * bbr->min_rtt_us;
/* Apply a gain to the given value, then remove the BW_SCALE shift. */
- cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+ bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+
+ return bdp;
+}
+
+/* To achieve full performance in high-speed paths, we budget enough cwnd to
+ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
+ * - one skb in sending host Qdisc,
+ * - one skb in sending host TSO/GSO engine
+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
+ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
+ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
+ * full even with ACK-every-other-packet delayed ACKs.
+ */
+static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
/* Allow enough full-sized skbs in flight to utilize end systems. */
cwnd += 3 * bbr_tso_segs_goal(sk);
@@ -368,6 +403,17 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
return cwnd;
}
+/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
+static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
+{
+ u32 inflight;
+
+ inflight = bbr_bdp(sk, bw, gain);
+ inflight = bbr_quantization_budget(sk, inflight, gain);
+
+ return inflight;
+}
+
/* With pacing at lower layers, there's often less data "in the network" than
* "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
* we often have several skbs queued in the pacing layer with a pre-scheduled
@@ -401,6 +447,22 @@ static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
return inflight_at_edt - interval_delivered;
}
+/* Find the cwnd increment based on estimate of ack aggregation */
+static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
+{
+ u32 max_aggr_cwnd, aggr_cwnd = 0;
+
+ if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
+ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
+ / BW_UNIT;
+ aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
+ >> BBR_SCALE;
+ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
+ }
+
+ return aggr_cwnd;
+}
+
/* An optimization in BBR to reduce losses: On the first round of recovery, we
* follow the packet conservation principle: send P packets per P packets acked.
* After that, we slow-start and send at most 2*P packets per P packets acked.
@@ -461,8 +523,15 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
goto done;
+ target_cwnd = bbr_bdp(sk, bw, gain);
+
+ /* Increment the cwnd to account for excess ACKed data that seems
+ * due to aggregation (of data and/or ACKs) visible in the ACK stream.
+ */
+ target_cwnd += bbr_ack_aggregation_cwnd(sk);
+ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
+
/* If we're below target cwnd, slow start cwnd toward target cwnd. */
- target_cwnd = bbr_target_cwnd(sk, bw, gain);
if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
cwnd = min(cwnd + acked, target_cwnd);
else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
@@ -503,14 +572,14 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
if (bbr->pacing_gain > BBR_UNIT)
return is_full_length &&
(rs->losses || /* perhaps pacing_gain*BDP won't fit */
- inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
+ inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
* probing didn't find more bw. If inflight falls to match BDP then we
* estimate queue is drained; persisting would underutilize the pipe.
*/
return is_full_length ||
- inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
+ inflight <= bbr_inflight(sk, bw, BBR_UNIT);
}
static void bbr_advance_cycle_phase(struct sock *sk)
@@ -727,6 +796,67 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
}
}
+/* Estimates the windowed max degree of ack aggregation.
+ * This is used to provision extra in-flight data to keep sending during
+ * inter-ACK silences.
+ *
+ * Degree of ack aggregation is estimated as extra data acked beyond expected.
+ *
+ * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
+ * cwnd += max_extra_acked
+ *
+ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
+ * Max filter is an approximate sliding window of 5-10 (packet timed) round
+ * trips.
+ */
+static void bbr_update_ack_aggregation(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ u32 epoch_us, expected_acked, extra_acked;
+ struct bbr *bbr = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
+ rs->delivered < 0 || rs->interval_us <= 0)
+ return;
+
+ if (bbr->round_start) {
+ bbr->extra_acked_win_rtts = min(0x1F,
+ bbr->extra_acked_win_rtts + 1);
+ if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
+ bbr->extra_acked_win_rtts = 0;
+ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
+ 0 : 1;
+ bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
+ }
+ }
+
+ /* Compute how many packets we expected to be delivered over epoch. */
+ epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
+ bbr->ack_epoch_mstamp);
+ expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
+
+ /* Reset the aggregation epoch if ACK rate is below expected rate or
+ * significantly large no. of ack received since epoch (potentially
+ * quite old epoch).
+ */
+ if (bbr->ack_epoch_acked <= expected_acked ||
+ (bbr->ack_epoch_acked + rs->acked_sacked >=
+ bbr_ack_epoch_acked_reset_thresh)) {
+ bbr->ack_epoch_acked = 0;
+ bbr->ack_epoch_mstamp = tp->delivered_mstamp;
+ expected_acked = 0;
+ }
+
+ /* Compute excess data delivered, beyond what was expected. */
+ bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
+ bbr->ack_epoch_acked + rs->acked_sacked);
+ extra_acked = bbr->ack_epoch_acked - expected_acked;
+ extra_acked = min(extra_acked, tp->snd_cwnd);
+ if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
+ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
+}
+
/* Estimate when the pipe is full, using the change in delivery rate: BBR
* estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
* at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
@@ -762,11 +892,11 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
bbr->mode = BBR_DRAIN; /* drain queue we created */
tcp_sk(sk)->snd_ssthresh =
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
} /* fall through to check if in-flight is already small: */
if (bbr->mode == BBR_DRAIN &&
bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
}
@@ -881,6 +1011,7 @@ static void bbr_update_gains(struct sock *sk)
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
{
bbr_update_bw(sk, rs);
+ bbr_update_ack_aggregation(sk, rs);
bbr_update_cycle_phase(sk, rs);
bbr_check_full_bw_reached(sk, rs);
bbr_check_drain(sk, rs);
@@ -932,6 +1063,13 @@ static void bbr_init(struct sock *sk)
bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk);
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
+ bbr->ack_epoch_acked = 0;
+ bbr->extra_acked_win_rtts = 0;
+ bbr->extra_acked_win_idx = 0;
+ bbr->extra_acked[0] = 0;
+ bbr->extra_acked[1] = 0;
+
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 76858b14ebe9..4eb0c8ca3c60 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -221,7 +221,7 @@ void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_incr_quickack(sk, max_quickacks);
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
EXPORT_SYMBOL(tcp_enter_quickack_mode);
@@ -236,7 +236,7 @@ static bool tcp_in_quickack_mode(struct sock *sk)
const struct dst_entry *dst = __sk_dst_get(sk);
return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
- (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
+ (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
}
static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -1574,9 +1574,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
return skb;
}
-static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk,
- struct tcp_sacktag_state *state,
- u32 seq)
+static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, u32 seq)
{
struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
struct sk_buff *skb;
@@ -1598,13 +1596,12 @@ static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk,
}
static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
- struct tcp_sacktag_state *state,
u32 skip_to_seq)
{
if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq))
return skb;
- return tcp_sacktag_bsearch(sk, state, skip_to_seq);
+ return tcp_sacktag_bsearch(sk, skip_to_seq);
}
static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
@@ -1617,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
return skb;
if (before(next_dup->start_seq, skip_to_seq)) {
- skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
+ skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
skb = tcp_sacktag_walk(skb, sk, NULL, state,
next_dup->start_seq, next_dup->end_seq,
1);
@@ -1758,8 +1755,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
/* Head todo? */
if (before(start_seq, cache->start_seq)) {
- skb = tcp_sacktag_skip(skb, sk, state,
- start_seq);
+ skb = tcp_sacktag_skip(skb, sk, start_seq);
skb = tcp_sacktag_walk(skb, sk, next_dup,
state,
start_seq,
@@ -1785,7 +1781,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
goto walk;
}
- skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
+ skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
/* Check overlap against next cached too (past this one already) */
cache++;
continue;
@@ -1796,7 +1792,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (!skb)
break;
}
- skb = tcp_sacktag_skip(skb, sk, state, start_seq);
+ skb = tcp_sacktag_skip(skb, sk, start_seq);
walk:
skb = tcp_sacktag_walk(skb, sk, next_dup, state,
@@ -3595,7 +3591,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
* this segment (RFC793 Section 3.9).
*/
if (after(ack, tp->snd_nxt))
- goto invalid_ack;
+ return -1;
if (after(ack, prior_snd_una)) {
flag |= FLAG_SND_UNA_ADVANCED;
@@ -3714,10 +3710,6 @@ no_queue:
tcp_process_tlp_ack(sk, ack, flag);
return 1;
-invalid_ack:
- SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
- return -1;
-
old_ack:
/* If data was SACKed, tag it and see if we should send more data.
* If data was DSACKed, see if we can undo a cwnd reduction.
@@ -3731,7 +3723,6 @@ old_ack:
tcp_xmit_recovery(sk, rexmit);
}
- SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
return 0;
}
@@ -4094,7 +4085,7 @@ void tcp_fin(struct sock *sk)
case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */
tcp_set_state(sk, TCP_CLOSE_WAIT);
- inet_csk(sk)->icsk_ack.pingpong = 1;
+ inet_csk_enter_pingpong_mode(sk);
break;
case TCP_CLOSE_WAIT:
@@ -4432,13 +4423,9 @@ static void tcp_ofo_queue(struct sock *sk)
rb_erase(&skb->rbnode, &tp->out_of_order_queue);
if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
- SOCK_DEBUG(sk, "ofo packet was already received\n");
tcp_drop(sk, skb);
continue;
}
- SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
- tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
- TCP_SKB_CB(skb)->end_seq);
tail = skb_peek_tail(&sk->sk_receive_queue);
eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
@@ -4502,8 +4489,6 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
seq = TCP_SKB_CB(skb)->seq;
end_seq = TCP_SKB_CB(skb)->end_seq;
- SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
- tp->rcv_nxt, seq, end_seq);
p = &tp->out_of_order_queue.rb_node;
if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
@@ -4779,10 +4764,6 @@ drop:
if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
/* Partial packet, seq < rcv_next < end_seq */
- SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
- tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
- TCP_SKB_CB(skb)->end_seq);
-
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
/* If window is closed, drop tail of packet. But after
@@ -5061,8 +5042,6 @@ static int tcp_prune_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
-
NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
@@ -5889,7 +5868,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
return -1;
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
- icsk->icsk_ack.pingpong) {
+ inet_csk_in_pingpong_mode(sk)) {
/* Save one ACK. Data will be ready after
* several ticks, if write_pending is set.
*
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index efc6fef692ff..831d844a27ca 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (sock_owned_by_user(sk))
break;
+ skb = tcp_rtx_queue_head(sk);
+ if (WARN_ON_ONCE(!skb))
+ break;
+
icsk->icsk_backoff--;
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT;
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
- skb = tcp_rtx_queue_head(sk);
tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
@@ -970,7 +973,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
* We need to maintain these in the sk structure.
*/
-struct static_key tcp_md5_needed __read_mostly;
+DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
EXPORT_SYMBOL(tcp_md5_needed);
/* Find the Key structure for an address. */
@@ -2437,7 +2440,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
refcount_read(&sk->sk_refcnt), sk,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
- (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
+ (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
tp->snd_cwnd,
state == TCP_LISTEN ?
fastopenq->max_qlen :
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 12affb7864d9..79900f783e0d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -294,12 +294,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
* so the timewait ack generating code has the key.
*/
do {
- struct tcp_md5sig_key *key;
tcptw->tw_md5_key = NULL;
- key = tp->af_specific->md5_lookup(sk, sk);
- if (key) {
- tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
- BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
+ if (static_branch_unlikely(&tcp_md5_needed)) {
+ struct tcp_md5sig_key *key;
+
+ key = tp->af_specific->md5_lookup(sk, sk);
+ if (key) {
+ tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
+ BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
+ }
}
} while (0);
#endif
@@ -338,10 +341,12 @@ EXPORT_SYMBOL(tcp_time_wait);
void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
- struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+ if (static_branch_unlikely(&tcp_md5_needed)) {
+ struct tcp_timewait_sock *twsk = tcp_twsk(sk);
- if (twsk->tw_md5_key)
- kfree_rcu(twsk->tw_md5_key, rcu);
+ if (twsk->tw_md5_key)
+ kfree_rcu(twsk->tw_md5_key, rcu);
+ }
#endif
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
@@ -479,43 +484,16 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
tcp_init_wl(newtp, treq->rcv_isn);
- newtp->srtt_us = 0;
- newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
- newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
- newtp->packets_out = 0;
- newtp->retrans_out = 0;
- newtp->sacked_out = 0;
- newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- newtp->tlp_high_seq = 0;
newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = treq->txhash;
- newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans;
- /* So many TCP implementations out there (incorrectly) count the
- * initial SYN frame in their delayed-ACK and congestion control
- * algorithms that we must have the following bandaid to talk
- * efficiently to them. -DaveM
- */
- newtp->snd_cwnd = TCP_INIT_CWND;
- newtp->snd_cwnd_cnt = 0;
-
- /* There's a bubble in the pipe until at least the first ACK. */
- newtp->app_limited = ~0U;
-
tcp_init_xmit_timers(newsk);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
- newtp->rx_opt.saw_tstamp = 0;
-
- newtp->rx_opt.dsack = 0;
- newtp->rx_opt.num_sacks = 0;
-
- newtp->urg_data = 0;
-
if (sock_flag(newsk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
@@ -556,13 +534,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
- newtp->syn_data_acked = 0;
- newtp->rack.mstamp = 0;
- newtp->rack.advanced = 0;
- newtp->rack.reo_wnd_steps = 1;
- newtp->rack.last_delivered = 0;
- newtp->rack.reo_wnd_persist = 0;
- newtp->rack.dsack_seen = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..4522579aaca2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -165,13 +165,16 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
- tp->lsndtime = now;
-
- /* If it is a reply for ato after last received
- * packet, enter pingpong mode.
+ /* If this is the first data packet sent in response to the
+ * previous received data,
+ * and it is a reply for ato after last received packet,
+ * increase pingpong count.
*/
- if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
- icsk->icsk_ack.pingpong = 1;
+ if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
+ (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+ inet_csk_inc_pingpong_cnt(sk);
+
+ tp->lsndtime = now;
}
/* Account for an ACK we sent. */
@@ -594,7 +597,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
*md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG
- if (static_key_false(&tcp_md5_needed) &&
+ if (static_branch_unlikely(&tcp_md5_needed) &&
rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) {
@@ -731,7 +734,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
*md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG
- if (static_key_false(&tcp_md5_needed) &&
+ if (static_branch_unlikely(&tcp_md5_needed) &&
rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) {
@@ -980,7 +983,6 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
- skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
if (sk->sk_pacing_status != SK_PACING_NONE) {
unsigned long rate = sk->sk_pacing_rate;
@@ -1028,7 +1030,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
BUG_ON(!skb || !tcp_skb_pcount(skb));
tp = tcp_sk(sk);
-
+ prior_wstamp = tp->tcp_wstamp_ns;
+ tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
+ skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
if (clone_it) {
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una;
@@ -1045,11 +1049,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
return -ENOBUFS;
}
- prior_wstamp = tp->tcp_wstamp_ns;
- tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
-
- skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
-
inet = inet_sk(sk);
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
@@ -1847,17 +1846,17 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
* know that all the data is in scatter-gather pages, and that the
* packet has never been sent out before (and thus is not cloned).
*/
-static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
- struct sk_buff *skb, unsigned int len,
+static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
unsigned int mss_now, gfp_t gfp)
{
- struct sk_buff *buff;
int nlen = skb->len - len;
+ struct sk_buff *buff;
u8 flags;
/* All of a TSO frame must be composed of paged data. */
if (skb->len != skb->data_len)
- return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp);
+ return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
+ skb, len, mss_now, gfp);
buff = sk_stream_alloc_skb(sk, 0, gfp, true);
if (unlikely(!buff))
@@ -1893,7 +1892,7 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
/* Link BUFF into the send queue. */
__skb_header_release(buff);
- tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
+ tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
return 0;
}
@@ -2347,6 +2346,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
+ tcp_init_tso_segs(skb, mss_now);
goto repair; /* Skip network transmission */
}
@@ -2391,8 +2391,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
nonagle);
if (skb->len > limit &&
- unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
- skb, limit, mss_now, gfp)))
+ unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
if (tcp_small_queue_check(sk, skb, 0))
@@ -2937,12 +2936,16 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
+ /* To avoid taking spuriously low RTT samples based on a timestamp
+ * for a transmit that never happened, always mark EVER_RETRANS
+ */
+ TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+
if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
TCP_SKB_CB(skb)->seq, segs, err);
if (likely(!err)) {
- TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
trace_tcp_retransmit_skb(sk, skb);
} else if (err != -EBUSY) {
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
@@ -2963,13 +2966,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
#endif
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);
-
- /* Save stamp of the first retransmit. */
- if (!tp->retrans_stamp)
- tp->retrans_stamp = tcp_skb_timestamp(skb);
-
}
+ /* Save stamp of the first (attempted) retransmit. */
+ if (!tp->retrans_stamp)
+ tp->retrans_stamp = tcp_skb_timestamp(skb);
+
if (tp->undo_retrans < 0)
tp->undo_retrans = 0;
tp->undo_retrans += tcp_skb_pcount(skb);
@@ -3456,6 +3458,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
skb_trim(syn_data, copied);
space = copied;
}
+ skb_zcopy_set(syn_data, fo->uarg, NULL);
}
/* No more data pending in inet_wait_for_connect() */
if (space == fo->size)
@@ -3569,7 +3572,7 @@ void tcp_send_delayed_ack(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
- if (icsk->icsk_ack.pingpong ||
+ if (inet_csk_in_pingpong_mode(sk) ||
(icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
max_ato = TCP_DELACK_MAX;
@@ -3750,7 +3753,7 @@ void tcp_send_probe0(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
- unsigned long probe_max;
+ unsigned long timeout;
int err;
err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
@@ -3762,26 +3765,18 @@ void tcp_send_probe0(struct sock *sk)
return;
}
+ icsk->icsk_probes_out++;
if (err <= 0) {
if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
icsk->icsk_backoff++;
- icsk->icsk_probes_out++;
- probe_max = TCP_RTO_MAX;
+ timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
} else {
/* If packet was not sent due to local congestion,
- * do not backoff and do not remember icsk_probes_out.
- * Let local senders to fight for local resources.
- *
- * Use accumulated backoff yet.
+ * Let senders fight for local resources conservatively.
*/
- if (!icsk->icsk_probes_out)
- icsk->icsk_probes_out = 1;
- probe_max = TCP_RESOURCE_PROBE_INTERVAL;
- }
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_when(sk, probe_max),
- TCP_RTO_MAX,
- NULL);
+ timeout = TCP_RESOURCE_PROBE_INTERVAL;
+ }
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index f87dbc78b6bc..f0c86398e6a7 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -22,28 +22,14 @@
#include <linux/gfp.h>
#include <net/tcp.h>
-static u32 tcp_retransmit_stamp(const struct sock *sk)
-{
- u32 start_ts = tcp_sk(sk)->retrans_stamp;
-
- if (unlikely(!start_ts)) {
- struct sk_buff *head = tcp_rtx_queue_head(sk);
-
- if (!head)
- return 0;
- start_ts = tcp_skb_timestamp(head);
- }
- return start_ts;
-}
-
static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
u32 elapsed, start_ts;
s32 remaining;
- start_ts = tcp_retransmit_stamp(sk);
- if (!icsk->icsk_user_timeout || !start_ts)
+ start_ts = tcp_sk(sk)->retrans_stamp;
+ if (!icsk->icsk_user_timeout)
return icsk->icsk_rto;
elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
remaining = icsk->icsk_user_timeout - elapsed;
@@ -173,7 +159,20 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
-
+static unsigned int tcp_model_timeout(struct sock *sk,
+ unsigned int boundary,
+ unsigned int rto_base)
+{
+ unsigned int linear_backoff_thresh, timeout;
+
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
+ if (boundary <= linear_backoff_thresh)
+ timeout = ((2 << boundary) - 1) * rto_base;
+ else
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
+ (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ return jiffies_to_msecs(timeout);
+}
/**
* retransmits_timed_out() - returns true if this connection has timed out
* @sk: The current socket
@@ -191,26 +190,15 @@ static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
unsigned int timeout)
{
- const unsigned int rto_base = TCP_RTO_MIN;
- unsigned int linear_backoff_thresh, start_ts;
+ unsigned int start_ts;
if (!inet_csk(sk)->icsk_retransmits)
return false;
- start_ts = tcp_retransmit_stamp(sk);
- if (!start_ts)
- return false;
-
- if (likely(timeout == 0)) {
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
+ start_ts = tcp_sk(sk)->retrans_stamp;
+ if (likely(timeout == 0))
+ timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
- if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * rto_base;
- else
- timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
- timeout = jiffies_to_msecs(timeout);
- }
return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
}
@@ -226,7 +214,7 @@ static int tcp_write_timeout(struct sock *sk)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits) {
dst_negative_advice(sk);
- } else if (!tp->syn_data && !tp->syn_fastopen) {
+ } else {
sk_rethink_txhash(sk);
}
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
@@ -289,14 +277,14 @@ void tcp_delack_timer_handler(struct sock *sk)
icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
if (inet_csk_ack_scheduled(sk)) {
- if (!icsk->icsk_ack.pingpong) {
+ if (!inet_csk_in_pingpong_mode(sk)) {
/* Delayed ACK missed: inflate ATO. */
icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
} else {
/* Delayed ACK missed: leave pingpong mode and
* deflate ATO.
*/
- icsk->icsk_ack.pingpong = 0;
+ inet_csk_exit_pingpong_mode(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
tcp_mstamp_refresh(tcp_sk(sk));
@@ -345,7 +333,6 @@ static void tcp_probe_timer(struct sock *sk)
struct sk_buff *skb = tcp_send_head(sk);
struct tcp_sock *tp = tcp_sk(sk);
int max_probes;
- u32 start_ts;
if (tp->packets_out || !skb) {
icsk->icsk_probes_out = 0;
@@ -360,12 +347,13 @@ static void tcp_probe_timer(struct sock *sk)
* corresponding system limit. We also implement similar policy when
* we use RTO to probe window in tcp_retransmit_timer().
*/
- start_ts = tcp_skb_timestamp(skb);
- if (!start_ts)
- skb->skb_mstamp_ns = tp->tcp_clock_cache;
- else if (icsk->icsk_user_timeout &&
- (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
- goto abort;
+ if (icsk->icsk_user_timeout) {
+ u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
+ tcp_probe0_base(sk));
+
+ if (elapsed >= icsk->icsk_user_timeout)
+ goto abort;
+ }
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
@@ -395,6 +383,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
int max_retries = icsk->icsk_syn_retries ? :
sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+ struct tcp_sock *tp = tcp_sk(sk);
struct request_sock *req;
req = tcp_sk(sk)->fastopen_rsk;
@@ -412,6 +401,8 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
inet_rtx_syn_ack(sk, req);
req->num_timeout++;
icsk->icsk_retransmits++;
+ if (!tp->retrans_stamp)
+ tp->retrans_stamp = tcp_time_stamp(tp);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
}
@@ -443,10 +434,8 @@ void tcp_retransmit_timer(struct sock *sk)
*/
return;
}
- if (!tp->packets_out)
- goto out;
-
- WARN_ON(tcp_rtx_queue_empty(sk));
+ if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk)))
+ return;
tp->tlp_high_seq = 0;
@@ -511,14 +500,13 @@ void tcp_retransmit_timer(struct sock *sk)
tcp_enter_loss(sk);
+ icsk->icsk_retransmits++;
if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
/* Retransmission failed because of local congestion,
- * do not backoff.
+ * Let senders fight for local resources conservatively.
*/
- if (!icsk->icsk_retransmits)
- icsk->icsk_retransmits = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
+ TCP_RESOURCE_PROBE_INTERVAL,
TCP_RTO_MAX);
goto out;
}
@@ -539,7 +527,6 @@ void tcp_retransmit_timer(struct sock *sk)
* the 120 second clamps though!
*/
icsk->icsk_backoff++;
- icsk->icsk_retransmits++;
out_reset_timer:
/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3fb0ed5e4789..372fdc5381a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
int (*handler)(struct sk_buff *skb, u32 info);
+ const struct ip_tunnel_encap_ops *encap;
- if (!iptun_encaps[i])
+ encap = rcu_dereference(iptun_encaps[i]);
+ if (!encap)
continue;
- handler = rcu_dereference(iptun_encaps[i]->err_handler);
+ handler = encap->err_handler;
if (handler && !handler(skb, info))
return 0;
}
@@ -847,15 +849,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize)
+ if (hlen + cork->gso_size > cork->fragsize) {
+ kfree_skb(skb);
return -EINVAL;
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+ }
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
return -EINVAL;
- if (sk->sk_no_check_tx)
+ }
+ if (sk->sk_no_check_tx) {
+ kfree_skb(skb);
return -EINVAL;
+ }
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb)))
+ dst_xfrm(skb_dst(skb))) {
+ kfree_skb(skb);
return -EIO;
+ }
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1918,7 +1928,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
}
EXPORT_SYMBOL(udp_lib_rehash);
-static void udp_v4_rehash(struct sock *sk)
+void udp_v4_rehash(struct sock *sk)
{
u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 322672655419..6b2fa77eeb1c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
int udp_v4_get_port(struct sock *sk, unsigned short snum);
+void udp_v4_rehash(struct sock *sk);
int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index be8b5b2157d8..e93cc0379201 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -21,18 +21,9 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
goto error;
if (cfg->bind_ifindex) {
- struct net_device *dev;
-
- dev = dev_get_by_index(net, cfg->bind_ifindex);
- if (!dev) {
- err = -ENODEV;
- goto error;
- }
-
- err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
- dev->name, strlen(dev->name) + 1);
- dev_put(dev);
-
+ err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTOIFINDEX,
+ (void *)&cfg->bind_ifindex,
+ sizeof(cfg->bind_ifindex));
if (err < 0)
goto error;
}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 39c7f17d916f..3c94b8f0ff27 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -53,6 +53,7 @@ struct proto udplite_prot = {
.sendpage = udp_sendpage,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8eeec6eb2bd3..4ae17a966ae3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -597,6 +597,43 @@ static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
};
+static int inet6_netconf_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv6_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_ipv6_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETCONFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETCONFA_IFINDEX:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
@@ -605,14 +642,12 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
struct nlattr *tb[NETCONFA_MAX+1];
struct inet6_dev *in6_dev = NULL;
struct net_device *dev = NULL;
- struct netconfmsg *ncm;
struct sk_buff *skb;
struct ipv6_devconf *devconf;
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
- devconf_ipv6_policy, extack);
+ err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -1165,7 +1200,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa == ifp)
continue;
- if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
+ if (ifa->prefix_len != ifp->prefix_len ||
+ !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
ifp->prefix_len))
continue;
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
@@ -3495,8 +3531,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (!addrconf_link_ready(dev)) {
/* device is not ready yet. */
- pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
- dev->name);
+ pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
+ dev->name);
break;
}
@@ -5120,6 +5156,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
if (idev) {
err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
&fillargs);
+ if (err > 0)
+ err = 0;
}
goto put_tgt_net;
}
@@ -5154,7 +5192,7 @@ put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
- return err < 0 ? err : skb->len;
+ return skb->len ? : err;
}
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
@@ -5179,6 +5217,52 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
return inet6_dump_addr(skb, cb, type);
}
+static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifaddrmsg *ifm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv6_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv6_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= IFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFA_TARGET_NETNSID:
+ case IFA_ADDRESS:
+ case IFA_LOCAL:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -5199,8 +5283,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct sk_buff *skb;
int err;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
- extack);
+ err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
@@ -6822,6 +6905,12 @@ static int __net_init addrconf_init_net(struct net *net)
if (!dflt)
goto err_alloc_dflt;
+ if (IS_ENABLED(CONFIG_SYSCTL) &&
+ sysctl_devconf_inherit_init_net == 1 && !net_eq(net, &init_net)) {
+ memcpy(all, init_net.ipv6.devconf_all, sizeof(ipv6_devconf));
+ memcpy(dflt, init_net.ipv6.devconf_dflt, sizeof(ipv6_devconf_dflt));
+ }
+
/* these will be inherited by all namespaces */
dflt->autoconf = ipv6_defaults.autoconf;
dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 5cd0029d930e..6c79af056d9b 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -134,6 +134,11 @@ static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
return -EAFNOSUPPORT;
}
+static int eafnosupport_ipv6_route_input(struct sk_buff *skb)
+{
+ return -EAFNOSUPPORT;
+}
+
static struct fib6_table *eafnosupport_fib6_get_table(struct net *net, u32 id)
{
return NULL;
@@ -170,6 +175,7 @@ eafnosupport_ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
.ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+ .ipv6_route_input = eafnosupport_ipv6_route_input,
.fib6_get_table = eafnosupport_fib6_get_table,
.fib6_table_lookup = eafnosupport_fib6_table_lookup,
.fib6_lookup = eafnosupport_fib6_lookup,
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 0d1ee82ee55b..d43d076c98f5 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -523,6 +523,50 @@ static inline int ip6addrlbl_msgsize(void)
+ nla_total_size(4); /* IFAL_LABEL */
}
+static int ip6addrlbl_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct ifaddrlblmsg *ifal;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifal))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for addrlabel get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX,
+ ifal_policy, extack);
+
+ ifal = nlmsg_data(nlh);
+ if (ifal->__ifal_reserved || ifal->ifal_flags || ifal->ifal_seq) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for addrlabel get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifal), tb, IFAL_MAX,
+ ifal_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= IFAL_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFAL_ADDRESS:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in addrlabel get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -535,8 +579,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct ip6addrlbl_entry *p;
struct sk_buff *skb;
- err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy,
- extack);
+ err = ip6addrlbl_valid_get_req(in_skb, nlh, tb, extack);
if (err < 0)
return err;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0bfb6cc0a30a..2f45d2a3e3a3 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -310,6 +310,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
/* Check if the address belongs to the host. */
if (addr_type == IPV6_ADDR_MAPPED) {
+ struct net_device *dev = NULL;
int chk_addr_ret;
/* Binding to v4-mapped address on a v6-only socket
@@ -320,9 +321,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
goto out;
}
+ rcu_read_lock();
+ if (sk->sk_bound_dev_if) {
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
/* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
- chk_addr_ret = inet_addr_type(net, v4addr);
+ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
+ rcu_read_unlock();
+
if (!inet_can_nonlocal_bind(net, inet) &&
v4addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
@@ -888,10 +900,17 @@ static struct pernet_operations inet6_net_ops = {
.exit = inet6_net_exit,
};
+static int ipv6_route_input(struct sk_buff *skb)
+{
+ ip6_route_input(skb);
+ return skb_dst(skb)->error;
+}
+
static const struct ipv6_stub ipv6_stub_impl = {
.ipv6_sock_mc_join = ipv6_sock_mc_join,
.ipv6_sock_mc_drop = ipv6_sock_mc_drop,
.ipv6_dst_lookup = ip6_dst_lookup,
+ .ipv6_route_input = ipv6_route_input,
.fib6_get_table = fib6_get_table,
.fib6_table_lookup = fib6_table_lookup,
.fib6_lookup = fib6_lookup,
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index bde08aa549f3..ee4a4e54d016 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
iph->daddr = fl6->daddr;
+ ip6_flow_hdr(iph, 0, 0);
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
@@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
}
if (np->rxopt.bits.rxorigdstaddr) {
struct sockaddr_in6 sin6;
- __be16 *ports;
- int end;
+ __be16 _ports[2], *ports;
- end = skb_transport_offset(skb) + 4;
- if (end <= 0 || pskb_may_pull(skb, end)) {
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (ports) {
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
- ports = (__be16 *)skb_transport_header(skb);
-
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1];
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5afe9f83374d..239d4a65ad6e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
skb->len += tailen;
skb->data_len += tailen;
skb->truesize += tailen;
- if (sk)
+ if (sk && sk_fullsock(sk))
refcount_add(tailen, &sk->sk_wmem_alloc);
goto out;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index bd675c61deb1..867474abe269 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, u32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
const struct inet6_protocol *ipprot;
@@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
{
int transport_offset = skb_transport_offset(skb);
struct guehdr *guehdr;
- size_t optlen;
+ size_t len, optlen;
int ret;
- if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ if (!pskb_may_pull(skb, len))
return -EINVAL;
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -128,9 +129,21 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
optlen = guehdr->hlen << 2;
+ if (!pskb_may_pull(skb, len + optlen))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
if (validate_gue_flags(guehdr, optlen))
return -EINVAL;
+ /* Handling exceptions for direct UDP encapsulation in GUE would lead to
+ * recursion. Besides, this kind of encapsulation can't even be
+ * configured currently. Discard this.
+ */
+ if (guehdr->proto_ctype == IPPROTO_UDP ||
+ guehdr->proto_ctype == IPPROTO_UDPLITE)
+ return -EOPNOTSUPP;
+
skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
opt, type, code, offset, info);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 5d7aa2c2770c..802faa2fcc0e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -81,7 +81,7 @@
*/
static inline struct sock *icmpv6_sk(struct net *net)
{
- return net->ipv6.icmp_sk[smp_processor_id()];
+ return *this_cpu_ptr(net->ipv6.icmp_sk);
}
static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -423,10 +423,10 @@ static int icmp6_iif(const struct sk_buff *skb)
static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr)
{
- struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL;
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct sock *sk;
+ struct net *net;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
struct dst_entry *dst;
@@ -437,12 +437,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
int iif = 0;
int addr_type = 0;
int len;
- u32 mark = IP6_REPLY_MARK(net, skb->mark);
+ u32 mark;
if ((u8 *)hdr < skb->head ||
(skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
return;
+ if (!skb->dev)
+ return;
+ net = dev_net(skb->dev);
+ mark = IP6_REPLY_MARK(net, skb->mark);
/*
* Make sure we respect the rules
* i.e. RFC 1885 2.4(e)
@@ -949,13 +953,21 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
}
+static void __net_exit icmpv6_sk_exit(struct net *net)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv6.icmp_sk, i));
+ free_percpu(net->ipv6.icmp_sk);
+}
+
static int __net_init icmpv6_sk_init(struct net *net)
{
struct sock *sk;
- int err, i, j;
+ int err, i;
- net->ipv6.icmp_sk =
- kcalloc(nr_cpu_ids, sizeof(struct sock *), GFP_KERNEL);
+ net->ipv6.icmp_sk = alloc_percpu(struct sock *);
if (!net->ipv6.icmp_sk)
return -ENOMEM;
@@ -968,7 +980,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
goto fail;
}
- net->ipv6.icmp_sk[i] = sk;
+ *per_cpu_ptr(net->ipv6.icmp_sk, i) = sk;
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
@@ -978,22 +990,10 @@ static int __net_init icmpv6_sk_init(struct net *net)
return 0;
fail:
- for (j = 0; j < i; j++)
- inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
- kfree(net->ipv6.icmp_sk);
+ icmpv6_sk_exit(net);
return err;
}
-static void __net_exit icmpv6_sk_exit(struct net *net)
-{
- int i;
-
- for_each_possible_cpu(i) {
- inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
- }
- kfree(net->ipv6.icmp_sk);
-}
-
static struct pernet_operations icmpv6_sk_ops = {
.init = icmpv6_sk_init,
.exit = icmpv6_sk_exit,
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 17c455ff69ff..79d2e43c05c5 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -383,12 +383,9 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
struct rhashtable_iter iter;
struct ila_map *ila;
spinlock_t *lock;
- int ret;
-
- ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter, GFP_KERNEL);
- if (ret)
- goto done;
+ int ret = 0;
+ rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
rhashtable_walk_start(&iter);
for (;;) {
@@ -509,23 +506,17 @@ int ila_xlat_nl_dump_start(struct netlink_callback *cb)
struct net *net = sock_net(cb->skb->sk);
struct ila_net *ilan = net_generic(net, ila_net_id);
struct ila_dump_iter *iter;
- int ret;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
- ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter->rhiter,
- GFP_KERNEL);
- if (ret) {
- kfree(iter);
- return ret;
- }
+ rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
iter->skip = 0;
cb->args[0] = (long)iter;
- return ret;
+ return 0;
}
int ila_xlat_nl_dump_done(struct netlink_callback *cb)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 09d0826742f8..b32c95f02128 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -524,7 +524,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
return PACKET_REJECT;
}
-static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+static int ip6erspan_rcv(struct sk_buff *skb,
struct tnl_ptk_info *tpi)
{
struct erspan_base_hdr *ershdr;
@@ -534,13 +534,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
struct ip6_tnl *tunnel;
u8 ver;
- if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
- return PACKET_REJECT;
-
ipv6h = ipv6_hdr(skb);
ershdr = (struct erspan_base_hdr *)skb->data;
ver = ershdr->ver;
- tpi->key = cpu_to_be32(get_session_id(ershdr));
tunnel = ip6gre_tunnel_lookup(skb->dev,
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
@@ -611,7 +607,7 @@ static int gre_rcv(struct sk_buff *skb)
if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
tpi.proto == htons(ETH_P_ERSPAN2))) {
- if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
+ if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD)
return 0;
goto out;
}
@@ -922,6 +918,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
__u8 dsfield = false;
struct flowi6 fl6;
int err = -EINVAL;
+ __be16 proto;
__u32 mtu;
int nhoff;
int thoff;
@@ -1035,8 +1032,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
}
/* Push GRE header. */
- gre_build_header(skb, 8, TUNNEL_SEQ,
- htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
+ : htons(ETH_P_ERSPAN2);
+ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1169,6 +1167,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
t->parms.fwmark = p->fwmark;
+ t->parms.erspan_ver = p->erspan_ver;
+ t->parms.index = p->index;
+ t->parms.dir = p->dir;
+ t->parms.hwid = p->hwid;
dst_cache_reset(&t->dst_cache);
}
@@ -1717,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static void ip6erspan_set_version(struct nlattr *data[],
+ struct __ip6_tnl_parm *parms)
+{
+ if (!data)
+ return;
+
+ parms->erspan_ver = 1;
+ if (data[IFLA_GRE_ERSPAN_VER])
+ parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+ if (parms->erspan_ver == 1) {
+ if (data[IFLA_GRE_ERSPAN_INDEX])
+ parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+ } else if (parms->erspan_ver == 2) {
+ if (data[IFLA_GRE_ERSPAN_DIR])
+ parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+ if (data[IFLA_GRE_ERSPAN_HWID])
+ parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+ }
+}
+
static void ip6gre_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
{
@@ -1765,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
if (data[IFLA_GRE_COLLECT_METADATA])
parms->collect_md = true;
-
- parms->erspan_ver = 1;
- if (data[IFLA_GRE_ERSPAN_VER])
- parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
-
- if (parms->erspan_ver == 1) {
- if (data[IFLA_GRE_ERSPAN_INDEX])
- parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
- } else if (parms->erspan_ver == 2) {
- if (data[IFLA_GRE_ERSPAN_DIR])
- parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
- if (data[IFLA_GRE_ERSPAN_HWID])
- parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
- }
}
static int ip6gre_tap_init(struct net_device *dev)
@@ -2025,9 +2034,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
struct __ip6_tnl_parm p;
- struct ip6_tnl *t;
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
if (IS_ERR(t))
@@ -2096,12 +2105,31 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm *p = &t->parms;
+ __be16 o_flags = p->o_flags;
+
+ if (p->erspan_ver == 1 || p->erspan_ver == 2) {
+ if (!p->collect_md)
+ o_flags |= TUNNEL_KEY;
+
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
+ goto nla_put_failure;
+
+ if (p->erspan_ver == 1) {
+ if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
+ goto nla_put_failure;
+ }
+ }
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
- gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+ gre_tnl_flags_to_gre_flags(o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
@@ -2110,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
- nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) ||
- nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+ nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -2129,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
}
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
- goto nla_put_failure;
-
- if (p->erspan_ver == 1) {
- if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
- goto nla_put_failure;
- } else if (p->erspan_ver == 2) {
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
- goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
- goto nla_put_failure;
- }
-
return 0;
nla_put_failure:
@@ -2196,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
int err;
ip6gre_netlink_parms(data, &nt->parms);
+ ip6erspan_set_version(data, &nt->parms);
ign = net_generic(net, ip6gre_net_id);
if (nt->parms.collect_md) {
@@ -2241,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
if (IS_ERR(t))
return PTR_ERR(t);
+ ip6erspan_set_version(data, &p);
ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 5c045691c302..345882d9c061 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -383,9 +383,36 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
},
};
+static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
+ return ERR_PTR(-EINVAL);
+
+ return ipv6_gso_segment(skb, features);
+}
+
+static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
+ return ERR_PTR(-EINVAL);
+
+ return inet_gso_segment(skb, features);
+}
+
+static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
+ return ERR_PTR(-EINVAL);
+
+ return ipv6_gso_segment(skb, features);
+}
+
static const struct net_offload sit_offload = {
.callbacks = {
- .gso_segment = ipv6_gso_segment,
+ .gso_segment = sit_gso_segment,
.gro_receive = sit_ip6ip6_gro_receive,
.gro_complete = sit_gro_complete,
},
@@ -393,7 +420,7 @@ static const struct net_offload sit_offload = {
static const struct net_offload ip4ip6_offload = {
.callbacks = {
- .gso_segment = inet_gso_segment,
+ .gso_segment = ip4ip6_gso_segment,
.gro_receive = ip4ip6_gro_receive,
.gro_complete = ip4ip6_gro_complete,
},
@@ -401,7 +428,7 @@ static const struct net_offload ip4ip6_offload = {
static const struct net_offload ip6ip6_offload = {
.callbacks = {
- .gso_segment = ipv6_gso_segment,
+ .gso_segment = ip6ip6_gso_segment,
.gro_receive = sit_ip6ip6_gro_receive,
.gro_complete = ip6ip6_gro_complete,
},
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5f9fa0302b5a..edbd12067170 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -300,6 +300,12 @@ static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
if (sk && ra->sel == sel &&
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == skb->dev->ifindex)) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (np && np->rtalert_isolate &&
+ !net_eq(sock_net(sk), dev_net(skb->dev))) {
+ continue;
+ }
if (last) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index ad1a9ccd4b44..25430c991cea 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -32,18 +32,9 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
goto error;
}
if (cfg->bind_ifindex) {
- struct net_device *dev;
-
- dev = dev_get_by_index(net, cfg->bind_ifindex);
- if (!dev) {
- err = -ENODEV;
- goto error;
- }
-
- err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
- dev->name, strlen(dev->name) + 1);
- dev_put(dev);
-
+ err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTOIFINDEX,
+ (void *)&cfg->bind_ifindex,
+ sizeof(cfg->bind_ifindex));
if (err < 0)
goto error;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 30337b38274b..e4dd57976737 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -97,7 +97,7 @@ static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
-static void mroute_clean_tables(struct mr_table *mrt, bool all);
+static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
@@ -393,7 +393,8 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
- mroute_clean_tables(mrt, true);
+ mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
+ MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
rhltable_destroy(&mrt->mfc_hash);
kfree(mrt);
}
@@ -1496,43 +1497,51 @@ static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
* Close the multicast socket, and clear the vif tables etc
*/
-static void mroute_clean_tables(struct mr_table *mrt, bool all)
+static void mroute_clean_tables(struct mr_table *mrt, int flags)
{
struct mr_mfc *c, *tmp;
LIST_HEAD(list);
int i;
/* Shut down all active vif entries */
- for (i = 0; i < mrt->maxvif; i++) {
- if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
- continue;
- mif6_delete(mrt, i, 0, &list);
+ if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
+ for (i = 0; i < mrt->maxvif; i++) {
+ if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
+ !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
+ (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
+ continue;
+ mif6_delete(mrt, i, 0, &list);
+ }
+ unregister_netdevice_many(&list);
}
- unregister_netdevice_many(&list);
/* Wipe the cache */
- list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
- if (!all && (c->mfc_flags & MFC_STATIC))
- continue;
- rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
- list_del_rcu(&c->list);
- mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
- mr_cache_put(c);
- }
-
- if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
- spin_lock_bh(&mfc_unres_lock);
- list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
- list_del(&c->list);
+ if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
+ list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
+ if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
+ (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
+ continue;
+ rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
+ list_del_rcu(&c->list);
call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
FIB_EVENT_ENTRY_DEL,
- (struct mfc6_cache *)c,
- mrt->id);
- mr6_netlink_event(mrt, (struct mfc6_cache *)c,
- RTM_DELROUTE);
- ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
+ (struct mfc6_cache *)c, mrt->id);
+ mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
+ mr_cache_put(c);
+ }
+ }
+
+ if (flags & MRT6_FLUSH_MFC) {
+ if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
+ spin_lock_bh(&mfc_unres_lock);
+ list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
+ list_del(&c->list);
+ mr6_netlink_event(mrt, (struct mfc6_cache *)c,
+ RTM_DELROUTE);
+ ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
+ }
+ spin_unlock_bh(&mfc_unres_lock);
}
- spin_unlock_bh(&mfc_unres_lock);
}
}
@@ -1588,7 +1597,7 @@ int ip6mr_sk_done(struct sock *sk)
NETCONFA_IFINDEX_ALL,
net->ipv6.devconf_all);
- mroute_clean_tables(mrt, false);
+ mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
err = 0;
break;
}
@@ -1704,6 +1713,20 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
rtnl_unlock();
return ret;
+ case MRT6_FLUSH:
+ {
+ int flags;
+
+ if (optlen != sizeof(flags))
+ return -EINVAL;
+ if (get_user(flags, (int __user *)optval))
+ return -EFAULT;
+ rtnl_lock();
+ mroute_clean_tables(mrt, flags);
+ rtnl_unlock();
+ return 0;
+ }
+
/*
* Control PIM assert (to activate pim will activate assert)
*/
@@ -1965,10 +1988,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTFORWDATAGRAMS);
- __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTOCTETS, skb->len);
+ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTOCTETS, skb->len);
return dst_output(net, sk, skb);
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 973e215c3114..40f21fef25ff 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -787,6 +787,12 @@ done:
goto e_inval;
retv = ip6_ra_control(sk, val);
break;
+ case IPV6_ROUTER_ALERT_ISOLATE:
+ if (optlen < sizeof(int))
+ goto e_inval;
+ np->rtalert_isolate = valbool;
+ retv = 0;
+ break;
case IPV6_MTU_DISCOVER:
if (optlen < sizeof(int))
goto e_inval;
@@ -1358,6 +1364,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->rxopt.bits.recvfragsize;
break;
+ case IPV6_ROUTER_ALERT_ISOLATE:
+ val = np->rtalert_isolate;
+ break;
+
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 21f6deb2aec9..42f3f5cd349f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -940,6 +940,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
{
return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
}
+EXPORT_SYMBOL(ipv6_dev_mc_inc);
/*
* device multicast group del
@@ -987,6 +988,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
return err;
}
+EXPORT_SYMBOL(ipv6_dev_mc_dec);
/*
* check if the interface/address pair is valid
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index 9405b04eecc6..dddd75d1be0e 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -41,6 +41,8 @@ static int ipv6_mc_check_ip6hdr(struct sk_buff *skb)
if (skb->len < len || len <= offset)
return -EINVAL;
+ skb_set_transport_header(skb, offset);
+
return 0;
}
@@ -77,27 +79,27 @@ static int ipv6_mc_check_mld_reportv2(struct sk_buff *skb)
len += sizeof(struct mld2_report);
- return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+ return ipv6_mc_may_pull(skb, len) ? 0 : -EINVAL;
}
static int ipv6_mc_check_mld_query(struct sk_buff *skb)
{
+ unsigned int transport_len = ipv6_transport_len(skb);
struct mld_msg *mld;
- unsigned int len = skb_transport_offset(skb);
+ unsigned int len;
/* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
return -EINVAL;
- len += sizeof(struct mld_msg);
- if (skb->len < len)
- return -EINVAL;
-
/* MLDv1? */
- if (skb->len != len) {
+ if (transport_len != sizeof(struct mld_msg)) {
/* or MLDv2? */
- len += sizeof(struct mld2_query) - sizeof(struct mld_msg);
- if (skb->len < len || !pskb_may_pull(skb, len))
+ if (transport_len < sizeof(struct mld2_query))
+ return -EINVAL;
+
+ len = skb_transport_offset(skb) + sizeof(struct mld2_query);
+ if (!ipv6_mc_may_pull(skb, len))
return -EINVAL;
}
@@ -115,12 +117,17 @@ static int ipv6_mc_check_mld_query(struct sk_buff *skb)
static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
{
- struct mld_msg *mld = (struct mld_msg *)skb_transport_header(skb);
+ unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
+ struct mld_msg *mld;
+
+ if (!ipv6_mc_may_pull(skb, len))
+ return -EINVAL;
+
+ mld = (struct mld_msg *)skb_transport_header(skb);
switch (mld->mld_type) {
case ICMPV6_MGM_REDUCTION:
case ICMPV6_MGM_REPORT:
- /* fall through */
return 0;
case ICMPV6_MLD2_REPORT:
return ipv6_mc_check_mld_reportv2(skb);
@@ -136,49 +143,30 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
}
-static int __ipv6_mc_check_mld(struct sk_buff *skb,
- struct sk_buff **skb_trimmed)
-
+int ipv6_mc_check_icmpv6(struct sk_buff *skb)
{
- struct sk_buff *skb_chk = NULL;
- unsigned int transport_len;
- unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
- int ret = -EINVAL;
+ unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
+ unsigned int transport_len = ipv6_transport_len(skb);
+ struct sk_buff *skb_chk;
- transport_len = ntohs(ipv6_hdr(skb)->payload_len);
- transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
+ if (!ipv6_mc_may_pull(skb, len))
+ return -EINVAL;
skb_chk = skb_checksum_trimmed(skb, transport_len,
ipv6_mc_validate_checksum);
if (!skb_chk)
- goto err;
-
- if (!pskb_may_pull(skb_chk, len))
- goto err;
-
- ret = ipv6_mc_check_mld_msg(skb_chk);
- if (ret)
- goto err;
-
- if (skb_trimmed)
- *skb_trimmed = skb_chk;
- /* free now unneeded clone */
- else if (skb_chk != skb)
- kfree_skb(skb_chk);
-
- ret = 0;
+ return -EINVAL;
-err:
- if (ret && skb_chk && skb_chk != skb)
+ if (skb_chk != skb)
kfree_skb(skb_chk);
- return ret;
+ return 0;
}
+EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
/**
* ipv6_mc_check_mld - checks whether this is a sane MLD packet
* @skb: the skb to validate
- * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
*
* Checks whether an IPv6 packet is a valid MLD packet. If so sets
* skb transport header accordingly and returns zero.
@@ -188,18 +176,10 @@ err:
* -ENOMSG: IP header validation succeeded but it is not an MLD packet.
* -ENOMEM: A memory allocation failure happened.
*
- * Optionally, an skb pointer might be provided via skb_trimmed (or set it
- * to NULL): After parsing an MLD packet successfully it will point to
- * an skb which has its tail aligned to the IP packet end. This might
- * either be the originally provided skb or a trimmed, cloned version if
- * the skb frame had data beyond the IP packet. A cloned skb allows us
- * to leave the original skb and its full frame unchanged (which might be
- * desirable for layer 2 frame jugglers).
- *
* Caller needs to set the skb network header and free any returned skb if it
* differs from the provided skb.
*/
-int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+int ipv6_mc_check_mld(struct sk_buff *skb)
{
int ret;
@@ -211,6 +191,10 @@ int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
if (ret < 0)
return ret;
- return __ipv6_mc_check_mld(skb, skb_trimmed);
+ ret = ipv6_mc_check_icmpv6(skb);
+ if (ret < 0)
+ return ret;
+
+ return ipv6_mc_check_mld_msg(skb);
}
EXPORT_SYMBOL(ipv6_mc_check_mld);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 8b075f0bc351..1240ccd57f39 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
struct sock *sk = sk_to_full_sk(skb->sk);
unsigned int hh_len;
struct dst_entry *dst;
+ int strict = (ipv6_addr_type(&iph->daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
struct flowi6 fl6 = {
.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
- rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
+ strict ? skb_dst(skb)->dev->ifindex : 0,
.flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
@@ -84,8 +86,8 @@ static int nf_ip6_reroute(struct sk_buff *skb,
return 0;
}
-static int nf_ip6_route(struct net *net, struct dst_entry **dst,
- struct flowi *fl, bool strict)
+int __nf_ip6_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
{
static const struct ipv6_pinfo fake_pinfo;
static const struct inet_sock fake_sk = {
@@ -105,12 +107,17 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
*dst = result;
return err;
}
+EXPORT_SYMBOL_GPL(__nf_ip6_route);
static const struct nf_ipv6_ops ipv6ops = {
+#if IS_MODULE(CONFIG_IPV6)
.chk_addr = ipv6_chk_addr,
- .route_input = ip6_route_input,
+ .route_me_harder = ip6_route_me_harder,
+ .dev_get_saddr = ipv6_dev_get_saddr,
+ .route = __nf_ip6_route,
+#endif
+ .route_input = ip6_route_input,
.fragment = ip6_fragment,
- .route = nf_ip6_route,
.reroute = nf_ip6_reroute,
};
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 339d0762b027..ddc99a1653aa 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -31,34 +31,6 @@ config NFT_CHAIN_ROUTE_IPV6
fields such as the source, destination, flowlabel, hop-limit and
the packet mark.
-if NF_NAT_IPV6
-
-config NFT_CHAIN_NAT_IPV6
- tristate "IPv6 nf_tables nat chain support"
- help
- This option enables the "nat" chain for IPv6 in nf_tables. This
- chain type is used to perform Network Address Translation (NAT)
- packet transformations such as the source, destination address and
- source and destination ports.
-
-config NFT_MASQ_IPV6
- tristate "IPv6 masquerade support for nf_tables"
- depends on NFT_MASQ
- select NF_NAT_MASQUERADE_IPV6
- help
- This is the expression that provides IPv4 masquerading support for
- nf_tables.
-
-config NFT_REDIR_IPV6
- tristate "IPv6 redirect support for nf_tables"
- depends on NFT_REDIR
- select NF_NAT_REDIRECT
- help
- This is the expression that provides IPv4 redirect support for
- nf_tables.
-
-endif # NF_NAT_IPV6
-
config NFT_REJECT_IPV6
select NF_REJECT_IPV6
default NFT_REJECT
@@ -106,23 +78,6 @@ config NF_LOG_IPV6
default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
-config NF_NAT_IPV6
- tristate "IPv6 NAT"
- depends on NF_CONNTRACK
- depends on NETFILTER_ADVANCED
- select NF_NAT
- help
- The IPv6 NAT option allows masquerading, port forwarding and other
- forms of full Network Address Port Translation. This can be
- controlled by iptables or nft.
-
-if NF_NAT_IPV6
-
-config NF_NAT_MASQUERADE_IPV6
- bool
-
-endif # NF_NAT_IPV6
-
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
@@ -311,7 +266,6 @@ config IP6_NF_NAT
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_NAT
- select NF_NAT_IPV6
select NETFILTER_XT_NAT
help
This enables the `nat' table in ip6tables. This allows masquerading,
@@ -324,7 +278,7 @@ if IP6_NF_NAT
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
- select NF_NAT_MASQUERADE_IPV6
+ select NF_NAT_MASQUERADE
help
Masquerading is a special case of NAT: all outgoing connections are
changed to seem to come from a particular interface's address, and
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 9ea43d5256e0..3853c648ebaa 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -11,10 +11,6 @@ obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
-nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o
-nf_nat_ipv6-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o
-obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
-
# defrag
nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
@@ -32,10 +28,7 @@ obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
# nf_tables
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
-obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
-obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
-obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 67ba70ab9f5c..3e1fab9d7503 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -17,8 +17,6 @@
#include <net/ipv6.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
static int __net_init ip6table_nat_table_init(struct net *net);
@@ -72,10 +70,10 @@ static int ip6t_nat_register_lookups(struct net *net)
int i, ret;
for (i = 0; i < ARRAY_SIZE(nf_nat_ipv6_ops); i++) {
- ret = nf_nat_l3proto_ipv6_register_fn(net, &nf_nat_ipv6_ops[i]);
+ ret = nf_nat_ipv6_register_fn(net, &nf_nat_ipv6_ops[i]);
if (ret) {
while (i)
- nf_nat_l3proto_ipv6_unregister_fn(net, &nf_nat_ipv6_ops[--i]);
+ nf_nat_ipv6_unregister_fn(net, &nf_nat_ipv6_ops[--i]);
return ret;
}
@@ -89,7 +87,7 @@ static void ip6t_nat_unregister_lookups(struct net *net)
int i;
for (i = 0; i < ARRAY_SIZE(nf_nat_ipv6_ops); i++)
- nf_nat_l3proto_ipv6_unregister_fn(net, &nf_nat_ipv6_ops[i]);
+ nf_nat_ipv6_unregister_fn(net, &nf_nat_ipv6_ops[i]);
}
static int __net_init ip6table_nat_table_init(struct net *net)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 181da2c40f9a..3de0e9b0a482 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -136,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
}
#endif
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
+
static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
{
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@@ -177,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
const struct frag_hdr *fhdr, int nhoff)
{
- struct sk_buff *prev, *next;
unsigned int payload_len;
- int offset, end;
+ struct net_device *dev;
+ struct sk_buff *prev;
+ int offset, end, err;
u8 ecn;
if (fq->q.flags & INET_FRAG_COMPLETE) {
@@ -254,55 +258,18 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
goto err;
}
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = fq->q.fragments_tail;
- if (!prev || prev->ip_defrag_offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (next->ip_defrag_offset >= offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* RFC5722, Section 4:
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments, including those not yet received) MUST be silently
- * discarded.
- */
-
- /* Check for overlap with preceding fragment. */
- if (prev &&
- (prev->ip_defrag_offset + prev->len) > offset)
- goto discard_fq;
-
- /* Look for overlap with succeeding segment. */
- if (next && next->ip_defrag_offset < end)
- goto discard_fq;
-
- /* Note : skb->ip_defrag_offset and skb->dev share the same location */
- if (skb->dev)
- fq->iif = skb->dev->ifindex;
+ /* Note : skb->rbnode and skb->dev share the same location. */
+ dev = skb->dev;
/* Makes sure compiler wont do silly aliasing games */
barrier();
- skb->ip_defrag_offset = offset;
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- fq->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- fq->q.fragments = skb;
+ prev = fq->q.fragments_tail;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+ if (err)
+ goto insert_error;
+
+ if (dev)
+ fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
@@ -319,11 +286,25 @@ found:
fq->q.flags |= INET_FRAG_FIRST_IN;
}
- return 0;
+ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ fq->q.meat == fq->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ err = nf_ct_frag6_reasm(fq, skb, prev, dev);
+ skb->_skb_refdst = orefdst;
+ return err;
+ }
+
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
-discard_fq:
+insert_error:
+ if (err == IPFRAG_DUP)
+ goto err;
inet_frag_kill(&fq->q);
err:
+ skb_dst_drop(skb);
return -EINVAL;
}
@@ -333,147 +314,66 @@ err:
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
- *
- * returns true if *prev skb has been transformed into the reassembled
- * skb, false otherwise.
*/
-static bool
-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
- struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len, delta;
+ void *reasm_data;
+ int payload_len;
u8 ecn;
inet_frag_kill(&fq->q);
- WARN_ON(head == NULL);
- WARN_ON(head->ip_defrag_offset != 0);
-
ecn = ip_frag_ecn_table[fq->ecn];
if (unlikely(ecn == 0xff))
- return false;
+ goto err;
+
+ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+ if (!reasm_data)
+ goto err;
- /* Unfragmented part is taken from the first segment. */
- payload_len = ((head->data - skb_network_header(head)) -
+ payload_len = ((skb->data - skb_network_header(skb)) -
sizeof(struct ipv6hdr) + fq->q.len -
sizeof(struct frag_hdr));
if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len);
- return false;
- }
-
- delta = - head->truesize;
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- return false;
-
- delta += head->truesize;
- if (delta)
- add_frag_mem_limit(fq->q.net, delta);
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (clone == NULL)
- return false;
-
- clone->next = head->next;
- head->next = clone;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
-
- add_frag_mem_limit(fq->q.net, clone->truesize);
- }
-
- /* morph head into last received skb: prev.
- *
- * This allows callers of ipv6 conntrack defrag to continue
- * to use the last skb(frag) passed into the reasm engine.
- * The last skb frag 'silently' turns into the full reassembled skb.
- *
- * Since prev is also part of q->fragments we have to clone it first.
- */
- if (head != prev) {
- struct sk_buff *iter;
-
- fp = skb_clone(prev, GFP_ATOMIC);
- if (!fp)
- return false;
-
- fp->next = prev->next;
-
- iter = head;
- while (iter) {
- if (iter->next == prev) {
- iter->next = fp;
- break;
- }
- iter = iter->next;
- }
-
- skb_morph(prev, head);
- prev->next = head->next;
- consume_skb(head);
- head = prev;
+ goto err;
}
/* We have to remove fragment header from datagram and to relocate
* header in order to calculate ICV correctly. */
- skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
- memmove(head->head + sizeof(struct frag_hdr), head->head,
- (head->data - head->head) - sizeof(struct frag_hdr));
- head->mac_header += sizeof(struct frag_hdr);
- head->network_header += sizeof(struct frag_hdr);
-
- skb_shinfo(head)->frag_list = head->next;
- skb_reset_transport_header(head);
- skb_push(head, head->data - skb_network_header(head));
-
- for (fp = head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp->sk = NULL;
- }
- sub_frag_mem_limit(fq->q.net, head->truesize);
+ skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
+ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+ (skb->data - skb->head) - sizeof(struct frag_hdr));
+ skb->mac_header += sizeof(struct frag_hdr);
+ skb->network_header += sizeof(struct frag_hdr);
+
+ skb_reset_transport_header(skb);
+
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
- head->ignore_df = 1;
- skb_mark_not_on_list(head);
- head->dev = dev;
- head->tstamp = fq->q.stamp;
- ipv6_hdr(head)->payload_len = htons(payload_len);
- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
- IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
+ skb->ignore_df = 1;
+ skb->dev = dev;
+ ipv6_hdr(skb)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+ IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
/* Yes, and fold redundant checksum back. 8) */
- if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_partial(skb_network_header(head),
- skb_network_header_len(head),
- head->csum);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_partial(skb_network_header(skb),
+ skb_network_header_len(skb),
+ skb->csum);
- fq->q.fragments = NULL;
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
+ fq->q.last_run_head = NULL;
- return true;
+ return 0;
+
+err:
+ inet_frag_kill(&fq->q);
+ return -EINVAL;
}
/*
@@ -542,7 +442,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
{
u16 savethdr = skb->transport_header;
- struct net_device *dev = skb->dev;
int fhoff, nhoff, ret;
struct frag_hdr *fhdr;
struct frag_queue *fq;
@@ -565,10 +464,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
- fhdr->frag_off & htons(IP6_MF))
- return -EINVAL;
-
skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, hdr,
skb->dev ? skb->dev->ifindex : 0);
@@ -580,31 +475,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
spin_lock_bh(&fq->q.lock);
ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
- if (ret < 0) {
- if (ret == -EPROTO) {
- skb->transport_header = savethdr;
- ret = 0;
- }
- goto out_unlock;
+ if (ret == -EPROTO) {
+ skb->transport_header = savethdr;
+ ret = 0;
}
/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
* must be returned.
*/
- ret = -EINPROGRESS;
- if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- fq->q.meat == fq->q.len) {
- unsigned long orefdst = skb->_skb_refdst;
-
- skb->_skb_refdst = 0UL;
- if (nf_ct_frag6_reasm(fq, skb, dev))
- ret = 0;
- skb->_skb_refdst = orefdst;
- } else {
- skb_dst_drop(skb);
- }
+ if (ret)
+ ret = -EINPROGRESS;
-out_unlock:
spin_unlock_bh(&fq->q.lock);
inet_frag_put(&fq->q);
return ret;
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
deleted file mode 100644
index 23022447eb49..000000000000
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Development of IPv6 NAT funded by Astaro.
- */
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ipv6.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6.h>
-#include <net/secure_seq.h>
-#include <net/checksum.h>
-#include <net/ip6_checksum.h>
-#include <net/ip6_route.h>
-#include <net/ipv6.h>
-
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static const struct nf_nat_l3proto nf_nat_l3proto_ipv6;
-
-#ifdef CONFIG_XFRM
-static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
- const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- unsigned long statusbit,
- struct flowi *fl)
-{
- const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
- struct flowi6 *fl6 = &fl->u.ip6;
-
- if (ct->status & statusbit) {
- fl6->daddr = t->dst.u3.in6;
- if (t->dst.protonum == IPPROTO_TCP ||
- t->dst.protonum == IPPROTO_UDP ||
- t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
- t->dst.protonum == IPPROTO_SCTP)
- fl6->fl6_dport = t->dst.u.all;
- }
-
- statusbit ^= IPS_NAT_MASK;
-
- if (ct->status & statusbit) {
- fl6->saddr = t->src.u3.in6;
- if (t->dst.protonum == IPPROTO_TCP ||
- t->dst.protonum == IPPROTO_UDP ||
- t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
- t->dst.protonum == IPPROTO_SCTP)
- fl6->fl6_sport = t->src.u.all;
- }
-}
-#endif
-
-static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
- unsigned int iphdroff,
- const struct nf_conntrack_tuple *target,
- enum nf_nat_manip_type maniptype)
-{
- struct ipv6hdr *ipv6h;
- __be16 frag_off;
- int hdroff;
- u8 nexthdr;
-
- if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h)))
- return false;
-
- ipv6h = (void *)skb->data + iphdroff;
- nexthdr = ipv6h->nexthdr;
- hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
- &nexthdr, &frag_off);
- if (hdroff < 0)
- goto manip_addr;
-
- if ((frag_off & htons(~0x7)) == 0 &&
- !nf_nat_l4proto_manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
- target, maniptype))
- return false;
-
- /* must reload, offset might have changed */
- ipv6h = (void *)skb->data + iphdroff;
-
-manip_addr:
- if (maniptype == NF_NAT_MANIP_SRC)
- ipv6h->saddr = target->src.u3.in6;
- else
- ipv6h->daddr = target->dst.u3.in6;
-
- return true;
-}
-
-static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
- unsigned int iphdroff, __sum16 *check,
- const struct nf_conntrack_tuple *t,
- enum nf_nat_manip_type maniptype)
-{
- const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
- const struct in6_addr *oldip, *newip;
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- oldip = &ipv6h->saddr;
- newip = &t->src.u3.in6;
- } else {
- oldip = &ipv6h->daddr;
- newip = &t->dst.u3.in6;
- }
- inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
- newip->s6_addr32, true);
-}
-
-static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
- u8 proto, void *data, __sum16 *check,
- int datalen, int oldlen)
-{
- if (skb->ip_summed != CHECKSUM_PARTIAL) {
- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
- (data - (void *)skb->data);
- skb->csum_offset = (void *)check - data;
- *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
- datalen, proto, 0);
- } else
- inet_proto_csum_replace2(check, skb,
- htons(oldlen), htons(datalen), true);
-}
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range2 *range)
-{
- if (tb[CTA_NAT_V6_MINIP]) {
- nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
- sizeof(struct in6_addr));
- range->flags |= NF_NAT_RANGE_MAP_IPS;
- }
-
- if (tb[CTA_NAT_V6_MAXIP])
- nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
- sizeof(struct in6_addr));
- else
- range->max_addr = range->min_addr;
-
- return 0;
-}
-#endif
-
-static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
- .l3proto = NFPROTO_IPV6,
- .manip_pkt = nf_nat_ipv6_manip_pkt,
- .csum_update = nf_nat_ipv6_csum_update,
- .csum_recalc = nf_nat_ipv6_csum_recalc,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_ipv6_nlattr_to_range,
-#endif
-#ifdef CONFIG_XFRM
- .decode_session = nf_nat_ipv6_decode_session,
-#endif
-};
-
-int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum,
- unsigned int hdrlen)
-{
- struct {
- struct icmp6hdr icmp6;
- struct ipv6hdr ip6;
- } *inside;
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
- struct nf_conntrack_tuple target;
- unsigned long statusbit;
-
- WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
-
- if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
- return 0;
- if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
- return 0;
-
- inside = (void *)skb->data + hdrlen;
- if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
- if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
- return 0;
- if (ct->status & IPS_NAT_MASK)
- return 0;
- }
-
- if (manip == NF_NAT_MANIP_SRC)
- statusbit = IPS_SRC_NAT;
- else
- statusbit = IPS_DST_NAT;
-
- /* Invert if this is reply direction */
- if (dir == IP_CT_DIR_REPLY)
- statusbit ^= IPS_NAT_MASK;
-
- if (!(ct->status & statusbit))
- return 1;
-
- if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
- &ct->tuplehash[!dir].tuple, !manip))
- return 0;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL) {
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- inside = (void *)skb->data + hdrlen;
- inside->icmp6.icmp6_cksum = 0;
- inside->icmp6.icmp6_cksum =
- csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
- skb->len - hdrlen, IPPROTO_ICMPV6,
- skb_checksum(skb, hdrlen,
- skb->len - hdrlen, 0));
- }
-
- nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
- if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
- return 0;
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
-
-static unsigned int
-nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- __be16 frag_off;
- int hdrlen;
- u8 nexthdr;
-
- ct = nf_ct_get(skb, &ctinfo);
- /* Can't track? It's not due to stress, or conntrack would
- * have dropped it. Hence it's the user's responsibilty to
- * packet filter it out, or implement conntrack/NAT for that
- * protocol. 8) --RR
- */
- if (!ct)
- return NF_ACCEPT;
-
- if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
- nexthdr = ipv6_hdr(skb)->nexthdr;
- hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
- &nexthdr, &frag_off);
-
- if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
- if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
- state->hook,
- hdrlen))
- return NF_DROP;
- else
- return NF_ACCEPT;
- }
- }
-
- return nf_nat_inet_fn(priv, skb, state);
-}
-
-static unsigned int
-nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- unsigned int ret;
- struct in6_addr daddr = ipv6_hdr(skb)->daddr;
-
- ret = nf_nat_ipv6_fn(priv, skb, state);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
- skb_dst_drop(skb);
-
- return ret;
-}
-
-static unsigned int
-nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
-#ifdef CONFIG_XFRM
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- int err;
-#endif
- unsigned int ret;
-
- ret = nf_nat_ipv6_fn(priv, skb, state);
-#ifdef CONFIG_XFRM
- if (ret != NF_DROP && ret != NF_STOLEN &&
- !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
- &ct->tuplehash[!dir].tuple.dst.u3) ||
- (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
- ct->tuplehash[dir].tuple.src.u.all !=
- ct->tuplehash[!dir].tuple.dst.u.all)) {
- err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
- }
-#endif
- return ret;
-}
-
-static unsigned int
-nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- unsigned int ret;
- int err;
-
- ret = nf_nat_ipv6_fn(priv, skb, state);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
- &ct->tuplehash[!dir].tuple.src.u3)) {
- err = ip6_route_me_harder(state->net, skb);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#ifdef CONFIG_XFRM
- else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
- ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all) {
- err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#endif
- }
- return ret;
-}
-
-static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
- /* Before packet filtering, change destination */
- {
- .hook = nf_nat_ipv6_in,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP6_PRI_NAT_DST,
- },
- /* After packet filtering, change source */
- {
- .hook = nf_nat_ipv6_out,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP6_PRI_NAT_SRC,
- },
- /* Before packet filtering, change destination */
- {
- .hook = nf_nat_ipv6_local_fn,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP6_PRI_NAT_DST,
- },
- /* After packet filtering, change source */
- {
- .hook = nf_nat_ipv6_fn,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_NAT_SRC,
- },
-};
-
-int nf_nat_l3proto_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops)
-{
- return nf_nat_register_fn(net, ops, nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops));
-}
-EXPORT_SYMBOL_GPL(nf_nat_l3proto_ipv6_register_fn);
-
-void nf_nat_l3proto_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
-{
- nf_nat_unregister_fn(net, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
-}
-EXPORT_SYMBOL_GPL(nf_nat_l3proto_ipv6_unregister_fn);
-
-static int __init nf_nat_l3proto_ipv6_init(void)
-{
- return nf_nat_l3proto_register(&nf_nat_l3proto_ipv6);
-}
-
-static void __exit nf_nat_l3proto_ipv6_exit(void)
-{
- nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv6);
-}
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("nf-nat-" __stringify(AF_INET6));
-
-module_init(nf_nat_l3proto_ipv6_init);
-module_exit(nf_nat_l3proto_ipv6_exit);
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
deleted file mode 100644
index 0ad0da5a2600..000000000000
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6
- * NAT funded by Astaro.
- */
-
-#include <linux/kernel.h>
-#include <linux/atomic.h>
-#include <linux/netdevice.h>
-#include <linux/ipv6.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/addrconf.h>
-#include <net/ipv6.h>
-#include <net/netfilter/ipv6/nf_nat_masquerade.h>
-
-#define MAX_WORK_COUNT 16
-
-static atomic_t v6_worker_count;
-
-unsigned int
-nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
- const struct net_device *out)
-{
- enum ip_conntrack_info ctinfo;
- struct nf_conn_nat *nat;
- struct in6_addr src;
- struct nf_conn *ct;
- struct nf_nat_range2 newrange;
-
- ct = nf_ct_get(skb, &ctinfo);
- WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
- ctinfo == IP_CT_RELATED_REPLY)));
-
- if (ipv6_dev_get_saddr(nf_ct_net(ct), out,
- &ipv6_hdr(skb)->daddr, 0, &src) < 0)
- return NF_DROP;
-
- nat = nf_ct_nat_ext_add(ct);
- if (nat)
- nat->masq_index = out->ifindex;
-
- newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
- newrange.min_addr.in6 = src;
- newrange.max_addr.in6 = src;
- newrange.min_proto = range->min_proto;
- newrange.max_proto = range->max_proto;
-
- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
-}
-EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
-
-static int device_cmp(struct nf_conn *ct, void *ifindex)
-{
- const struct nf_conn_nat *nat = nfct_nat(ct);
-
- if (!nat)
- return 0;
- if (nf_ct_l3num(ct) != NFPROTO_IPV6)
- return 0;
- return nat->masq_index == (int)(long)ifindex;
-}
-
-static int masq_device_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct net *net = dev_net(dev);
-
- if (event == NETDEV_DOWN)
- nf_ct_iterate_cleanup_net(net, device_cmp,
- (void *)(long)dev->ifindex, 0, 0);
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block masq_dev_notifier = {
- .notifier_call = masq_device_event,
-};
-
-struct masq_dev_work {
- struct work_struct work;
- struct net *net;
- struct in6_addr addr;
- int ifindex;
-};
-
-static int inet_cmp(struct nf_conn *ct, void *work)
-{
- struct masq_dev_work *w = (struct masq_dev_work *)work;
- struct nf_conntrack_tuple *tuple;
-
- if (!device_cmp(ct, (void *)(long)w->ifindex))
- return 0;
-
- tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-
- return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
-}
-
-static void iterate_cleanup_work(struct work_struct *work)
-{
- struct masq_dev_work *w;
-
- w = container_of(work, struct masq_dev_work, work);
-
- nf_ct_iterate_cleanup_net(w->net, inet_cmp, (void *)w, 0, 0);
-
- put_net(w->net);
- kfree(w);
- atomic_dec(&v6_worker_count);
- module_put(THIS_MODULE);
-}
-
-/* ipv6 inet notifier is an atomic notifier, i.e. we cannot
- * schedule.
- *
- * Unfortunately, nf_ct_iterate_cleanup_net can run for a long
- * time if there are lots of conntracks and the system
- * handles high softirq load, so it frequently calls cond_resched
- * while iterating the conntrack table.
- *
- * So we defer nf_ct_iterate_cleanup_net walk to the system workqueue.
- *
- * As we can have 'a lot' of inet_events (depending on amount
- * of ipv6 addresses being deleted), we also need to add an upper
- * limit to the number of queued work items.
- */
-static int masq_inet6_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct inet6_ifaddr *ifa = ptr;
- const struct net_device *dev;
- struct masq_dev_work *w;
- struct net *net;
-
- if (event != NETDEV_DOWN ||
- atomic_read(&v6_worker_count) >= MAX_WORK_COUNT)
- return NOTIFY_DONE;
-
- dev = ifa->idev->dev;
- net = maybe_get_net(dev_net(dev));
- if (!net)
- return NOTIFY_DONE;
-
- if (!try_module_get(THIS_MODULE))
- goto err_module;
-
- w = kmalloc(sizeof(*w), GFP_ATOMIC);
- if (w) {
- atomic_inc(&v6_worker_count);
-
- INIT_WORK(&w->work, iterate_cleanup_work);
- w->ifindex = dev->ifindex;
- w->net = net;
- w->addr = ifa->addr;
- schedule_work(&w->work);
-
- return NOTIFY_DONE;
- }
-
- module_put(THIS_MODULE);
- err_module:
- put_net(net);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block masq_inet6_notifier = {
- .notifier_call = masq_inet6_event,
-};
-
-static int masq_refcnt;
-static DEFINE_MUTEX(masq_mutex);
-
-int nf_nat_masquerade_ipv6_register_notifier(void)
-{
- int ret = 0;
-
- mutex_lock(&masq_mutex);
- /* check if the notifier is already set */
- if (++masq_refcnt > 1)
- goto out_unlock;
-
- ret = register_netdevice_notifier(&masq_dev_notifier);
- if (ret)
- goto err_dec;
-
- ret = register_inet6addr_notifier(&masq_inet6_notifier);
- if (ret)
- goto err_unregister;
-
- mutex_unlock(&masq_mutex);
- return ret;
-
-err_unregister:
- unregister_netdevice_notifier(&masq_dev_notifier);
-err_dec:
- masq_refcnt--;
-out_unlock:
- mutex_unlock(&masq_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
-
-void nf_nat_masquerade_ipv6_unregister_notifier(void)
-{
- mutex_lock(&masq_mutex);
- /* check if the notifier still has clients */
- if (--masq_refcnt > 0)
- goto out_unlock;
-
- unregister_inet6addr_notifier(&masq_inet6_notifier);
- unregister_netdevice_notifier(&masq_dev_notifier);
-out_unlock:
- mutex_unlock(&masq_mutex);
-}
-EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index b9c8a763c863..02e9228641e0 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -233,6 +233,9 @@ static bool reject6_csum_ok(struct sk_buff *skb, int hook)
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
+ if (!nf_reject_verify_csum(proto))
+ return true;
+
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
}
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
deleted file mode 100644
index 8a081ad7d5db..000000000000
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
- * Copyright (c) 2012 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables_ipv6.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/ipv6.h>
-
-static unsigned int nft_nat_do_chain(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nft_pktinfo pkt;
-
- nft_set_pktinfo(&pkt, skb, state);
- nft_set_pktinfo_ipv6(&pkt, skb);
-
- return nft_do_chain(&pkt, priv);
-}
-
-static int nft_nat_ipv6_reg(struct net *net, const struct nf_hook_ops *ops)
-{
- return nf_nat_l3proto_ipv6_register_fn(net, ops);
-}
-
-static void nft_nat_ipv6_unreg(struct net *net, const struct nf_hook_ops *ops)
-{
- nf_nat_l3proto_ipv6_unregister_fn(net, ops);
-}
-
-static const struct nft_chain_type nft_chain_nat_ipv6 = {
- .name = "nat",
- .type = NFT_CHAIN_T_NAT,
- .family = NFPROTO_IPV6,
- .owner = THIS_MODULE,
- .hook_mask = (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_POST_ROUTING) |
- (1 << NF_INET_LOCAL_OUT) |
- (1 << NF_INET_LOCAL_IN),
- .hooks = {
- [NF_INET_PRE_ROUTING] = nft_nat_do_chain,
- [NF_INET_POST_ROUTING] = nft_nat_do_chain,
- [NF_INET_LOCAL_OUT] = nft_nat_do_chain,
- [NF_INET_LOCAL_IN] = nft_nat_do_chain,
- },
- .ops_register = nft_nat_ipv6_reg,
- .ops_unregister = nft_nat_ipv6_unreg,
-};
-
-static int __init nft_chain_nat_ipv6_init(void)
-{
- nft_register_chain_type(&nft_chain_nat_ipv6);
-
- return 0;
-}
-
-static void __exit nft_chain_nat_ipv6_exit(void)
-{
- nft_unregister_chain_type(&nft_chain_nat_ipv6);
-}
-
-module_init(nft_chain_nat_ipv6_init);
-module_exit(nft_chain_nat_ipv6_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
-MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 36be3cf0adef..73cdc0bc63f7 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -59,7 +59,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
struct ipv6hdr *iph)
{
const struct net_device *dev = NULL;
- const struct nf_ipv6_ops *v6ops;
int route_err, addrtype;
struct rt6_info *rt;
struct flowi6 fl6 = {
@@ -68,10 +67,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
};
u32 ret = 0;
- v6ops = nf_get_ipv6_ops();
- if (!v6ops)
- return RTN_UNREACHABLE;
-
if (priv->flags & NFTA_FIB_F_IIF)
dev = nft_in(pkt);
else if (priv->flags & NFTA_FIB_F_OIF)
@@ -79,10 +74,10 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
- if (dev && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
+ if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
ret = RTN_LOCAL;
- route_err = v6ops->route(nft_net(pkt), (struct dst_entry **)&rt,
+ route_err = nf_ip6_route(nft_net(pkt), (struct dst_entry **)&rt,
flowi6_to_flowi(&fl6), false);
if (route_err)
goto err;
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
deleted file mode 100644
index e06c82e9dfcd..000000000000
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nft_masq.h>
-#include <net/netfilter/ipv6/nf_nat_masquerade.h>
-
-static void nft_masq_ipv6_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_masq *priv = nft_expr_priv(expr);
- struct nf_nat_range2 range;
-
- memset(&range, 0, sizeof(range));
- range.flags = priv->flags;
- if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_min]);
- range.max_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_max]);
- }
- regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
- nft_out(pkt));
-}
-
-static void
-nft_masq_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
-{
- nf_ct_netns_put(ctx->net, NFPROTO_IPV6);
-}
-
-static struct nft_expr_type nft_masq_ipv6_type;
-static const struct nft_expr_ops nft_masq_ipv6_ops = {
- .type = &nft_masq_ipv6_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
- .eval = nft_masq_ipv6_eval,
- .init = nft_masq_init,
- .destroy = nft_masq_ipv6_destroy,
- .dump = nft_masq_dump,
- .validate = nft_masq_validate,
-};
-
-static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
- .family = NFPROTO_IPV6,
- .name = "masq",
- .ops = &nft_masq_ipv6_ops,
- .policy = nft_masq_policy,
- .maxattr = NFTA_MASQ_MAX,
- .owner = THIS_MODULE,
-};
-
-static int __init nft_masq_ipv6_module_init(void)
-{
- int ret;
-
- ret = nft_register_expr(&nft_masq_ipv6_type);
- if (ret < 0)
- return ret;
-
- ret = nf_nat_masquerade_ipv6_register_notifier();
- if (ret)
- nft_unregister_expr(&nft_masq_ipv6_type);
-
- return ret;
-}
-
-static void __exit nft_masq_ipv6_module_exit(void)
-{
- nft_unregister_expr(&nft_masq_ipv6_type);
- nf_nat_masquerade_ipv6_unregister_notifier();
-}
-
-module_init(nft_masq_ipv6_module_init);
-module_exit(nft_masq_ipv6_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "masq");
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
deleted file mode 100644
index 74269865acc8..000000000000
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netlink.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nft_redir.h>
-#include <net/netfilter/nf_nat_redirect.h>
-
-static void nft_redir_ipv6_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_redir *priv = nft_expr_priv(expr);
- struct nf_nat_range2 range;
-
- memset(&range, 0, sizeof(range));
- if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_min]);
- range.max_proto.all = (__force __be16)nft_reg_load16(
- &regs->data[priv->sreg_proto_max]);
- range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
- }
-
- range.flags |= priv->flags;
-
- regs->verdict.code =
- nf_nat_redirect_ipv6(pkt->skb, &range, nft_hook(pkt));
-}
-
-static void
-nft_redir_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
-{
- nf_ct_netns_put(ctx->net, NFPROTO_IPV6);
-}
-
-static struct nft_expr_type nft_redir_ipv6_type;
-static const struct nft_expr_ops nft_redir_ipv6_ops = {
- .type = &nft_redir_ipv6_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
- .eval = nft_redir_ipv6_eval,
- .init = nft_redir_init,
- .destroy = nft_redir_ipv6_destroy,
- .dump = nft_redir_dump,
- .validate = nft_redir_validate,
-};
-
-static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
- .family = NFPROTO_IPV6,
- .name = "redir",
- .ops = &nft_redir_ipv6_ops,
- .policy = nft_redir_policy,
- .maxattr = NFTA_REDIR_MAX,
- .owner = THIS_MODULE,
-};
-
-static int __init nft_redir_ipv6_module_init(void)
-{
- return nft_register_expr(&nft_redir_ipv6_type);
-}
-
-static void __exit nft_redir_ipv6_module_exit(void)
-{
- nft_unregister_expr(&nft_redir_ipv6_type);
-}
-
-module_init(nft_redir_ipv6_module_init);
-module_exit(nft_redir_ipv6_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 36a3d8dc61f5..1a832f5e190b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -69,8 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
static struct inet_frags ip6_frags;
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
- struct net_device *dev);
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
static void ip6_frag_expire(struct timer_list *t)
{
@@ -111,21 +111,26 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff,
u32 *prob_offset)
{
- struct sk_buff *prev, *next;
- struct net_device *dev;
- int offset, end, fragsize;
struct net *net = dev_net(skb_dst(skb)->dev);
+ int offset, end, fragsize;
+ struct sk_buff *prev_tail;
+ struct net_device *dev;
+ int err = -ENOENT;
u8 ecn;
if (fq->q.flags & INET_FRAG_COMPLETE)
goto err;
+ err = -EINVAL;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
*prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
+ /* note that if prob_offset is set, the skb is freed elsewhere,
+ * we do not free it here.
+ */
return -1;
}
@@ -170,62 +175,27 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
if (end == offset)
goto discard_fq;
+ err = -ENOMEM;
/* Point into the IP datagram 'data' part. */
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
goto discard_fq;
- if (pskb_trim_rcsum(skb, end - offset))
+ err = pskb_trim_rcsum(skb, end - offset);
+ if (err)
goto discard_fq;
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = fq->q.fragments_tail;
- if (!prev || prev->ip_defrag_offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (next->ip_defrag_offset >= offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* RFC5722, Section 4, amended by Errata ID : 3089
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments) MUST be silently discarded.
- */
-
- /* Check for overlap with preceding fragment. */
- if (prev &&
- (prev->ip_defrag_offset + prev->len) > offset)
- goto discard_fq;
-
- /* Look for overlap with succeeding segment. */
- if (next && next->ip_defrag_offset < end)
- goto discard_fq;
-
- /* Note : skb->ip_defrag_offset and skb->sk share the same location */
+ /* Note : skb->rbnode and skb->dev share the same location. */
dev = skb->dev;
- if (dev)
- fq->iif = dev->ifindex;
/* Makes sure compiler wont do silly aliasing games */
barrier();
- skb->ip_defrag_offset = offset;
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- fq->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- fq->q.fragments = skb;
+ prev_tail = fq->q.fragments_tail;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+ if (err)
+ goto insert_error;
+
+ if (dev)
+ fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
@@ -246,44 +216,48 @@ found:
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len) {
- int res;
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- res = ip6_frag_reasm(fq, prev, dev);
+ err = ip6_frag_reasm(fq, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
- return res;
+ return err;
}
skb_dst_drop(skb);
- return -1;
+ return -EINPROGRESS;
+insert_error:
+ if (err == IPFRAG_DUP) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_REASM_OVERLAPS);
discard_fq:
inet_frag_kill(&fq->q);
-err:
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
+err:
kfree_skb(skb);
- return -1;
+ return err;
}
/*
* Check if this packet is complete.
- * Returns NULL on failure by any reason, and pointer
- * to current nexthdr field in reassembled frame.
*
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
*/
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
- struct net_device *dev)
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
- struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len, delta;
unsigned int nhoff;
- int sum_truesize;
+ void *reasm_data;
+ int payload_len;
u8 ecn;
inet_frag_kill(&fq->q);
@@ -292,128 +266,47 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
if (unlikely(ecn == 0xff))
goto out_fail;
- /* Make the one we just received the head. */
- if (prev) {
- head = prev->next;
- fp = skb_clone(head, GFP_ATOMIC);
-
- if (!fp)
- goto out_oom;
-
- fp->next = head->next;
- if (!fp->next)
- fq->q.fragments_tail = fp;
- prev->next = fp;
-
- skb_morph(head, fq->q.fragments);
- head->next = fq->q.fragments->next;
-
- consume_skb(fq->q.fragments);
- fq->q.fragments = head;
- }
-
- WARN_ON(head == NULL);
- WARN_ON(head->ip_defrag_offset != 0);
+ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+ if (!reasm_data)
+ goto out_oom;
- /* Unfragmented part is taken from the first segment. */
- payload_len = ((head->data - skb_network_header(head)) -
+ payload_len = ((skb->data - skb_network_header(skb)) -
sizeof(struct ipv6hdr) + fq->q.len -
sizeof(struct frag_hdr));
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
- delta = - head->truesize;
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- goto out_oom;
-
- delta += head->truesize;
- if (delta)
- add_frag_mem_limit(fq->q.net, delta);
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (!clone)
- goto out_oom;
- clone->next = head->next;
- head->next = clone;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(fq->q.net, clone->truesize);
- }
-
/* We have to remove fragment header from datagram and to relocate
* header in order to calculate ICV correctly. */
nhoff = fq->nhoffset;
- skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
- memmove(head->head + sizeof(struct frag_hdr), head->head,
- (head->data - head->head) - sizeof(struct frag_hdr));
- if (skb_mac_header_was_set(head))
- head->mac_header += sizeof(struct frag_hdr);
- head->network_header += sizeof(struct frag_hdr);
-
- skb_reset_transport_header(head);
- skb_push(head, head->data - skb_network_header(head));
-
- sum_truesize = head->truesize;
- for (fp = head->next; fp;) {
- bool headstolen;
- int delta;
- struct sk_buff *next = fp->next;
-
- sum_truesize += fp->truesize;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
-
- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
- kfree_skb_partial(fp, headstolen);
- } else {
- fp->sk = NULL;
- if (!skb_shinfo(head)->frag_list)
- skb_shinfo(head)->frag_list = fp;
- head->data_len += fp->len;
- head->len += fp->len;
- head->truesize += fp->truesize;
- }
- fp = next;
- }
- sub_frag_mem_limit(fq->q.net, sum_truesize);
+ skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
+ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+ (skb->data - skb->head) - sizeof(struct frag_hdr));
+ if (skb_mac_header_was_set(skb))
+ skb->mac_header += sizeof(struct frag_hdr);
+ skb->network_header += sizeof(struct frag_hdr);
+
+ skb_reset_transport_header(skb);
+
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
- skb_mark_not_on_list(head);
- head->dev = dev;
- head->tstamp = fq->q.stamp;
- ipv6_hdr(head)->payload_len = htons(payload_len);
- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
- IP6CB(head)->nhoff = nhoff;
- IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
- IP6CB(head)->frag_max_size = fq->q.max_size;
+ skb->dev = dev;
+ ipv6_hdr(skb)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+ IP6CB(skb)->nhoff = nhoff;
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ IP6CB(skb)->frag_max_size = fq->q.max_size;
/* Yes, and fold redundant checksum back. 8) */
- skb_postpush_rcsum(head, skb_network_header(head),
- skb_network_header_len(head));
+ skb_postpush_rcsum(skb, skb_network_header(skb),
+ skb_network_header_len(skb));
rcu_read_lock();
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
rcu_read_unlock();
- fq->q.fragments = NULL;
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
+ fq->q.last_run_head = NULL;
return 1;
out_oversize:
@@ -464,10 +357,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
return 1;
}
- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
- fhdr->frag_off & htons(IP6_MF))
- goto fail_hdr;
-
iif = skb->dev ? skb->dev->ifindex : 0;
fq = fq_find(net, fhdr->identification, hdr, iif);
if (fq) {
@@ -485,6 +374,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
if (prob_offset) {
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
IPSTATS_MIB_INHDRERRORS);
+ /* icmpv6_param_prob() calls kfree_skb(skb) */
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
}
return ret;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 40b225f87d5e..4ef4bbdb49d4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
struct rt6_exception *rt6_ex)
{
+ struct fib6_info *from;
struct net *net;
if (!bucket || !rt6_ex)
return;
net = dev_net(rt6_ex->rt6i->dst.dev);
+ net->ipv6.rt6_stats->fib_rt_cache--;
+
+ /* purge completely the exception to allow releasing the held resources:
+ * some [sk] cache may keep the dst around for unlimited time
+ */
+ from = rcu_dereference_protected(rt6_ex->rt6i->from,
+ lockdep_is_held(&rt6_exception_lock));
+ rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+ fib6_info_release(from);
+ dst_dev_put(&rt6_ex->rt6i->dst);
+
hlist_del_rcu(&rt6_ex->hlist);
dst_release(&rt6_ex->rt6i->dst);
kfree_rcu(rt6_ex, rcu);
WARN_ON_ONCE(!bucket->depth);
bucket->depth--;
- net->ipv6.rt6_stats->fib_rt_cache--;
}
/* Remove oldest rt6_ex in bucket and free the memory
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
struct rt6_exception_bucket *bucket;
- struct fib6_info *from = rt->from;
struct in6_addr *src_key = NULL;
struct rt6_exception *rt6_ex;
-
- if (!from ||
- !(rt->rt6i_flags & RTF_CACHE))
- return;
+ struct fib6_info *from;
rcu_read_lock();
+ from = rcu_dereference(rt->from);
+ if (!from || !(rt->rt6i_flags & RTF_CACHE))
+ goto unlock;
+
bucket = rcu_dereference(from->rt6i_exception_bucket);
#ifdef CONFIG_IPV6_SUBTREES
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
if (rt6_ex)
rt6_ex->stamp = jiffies;
+unlock:
rcu_read_unlock();
}
@@ -2277,14 +2289,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
- bool from_set;
-
- rcu_read_lock();
- from_set = !!rcu_dereference(rt->from);
- rcu_read_unlock();
-
return !(rt->rt6i_flags & RTF_CACHE) &&
- (rt->rt6i_flags & RTF_PCPU || from_set);
+ (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
@@ -2742,20 +2748,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
const struct in6_addr *gw_addr = &cfg->fc_gateway;
u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+ struct fib6_info *from;
struct rt6_info *grt;
int err;
err = 0;
grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
if (grt) {
+ rcu_read_lock();
+ from = rcu_dereference(grt->from);
if (!grt->dst.error &&
/* ignore match if it is the default route */
- grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
+ from && !ipv6_addr_any(&from->fib6_dst.addr) &&
(grt->rt6i_flags & flags || dev != grt->dst.dev)) {
NL_SET_ERR_MSG(extack,
"Nexthop has invalid gateway or device mismatch");
err = -EINVAL;
}
+ rcu_read_unlock();
ip6_rt_put(grt);
}
@@ -4166,6 +4176,10 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
cfg->fc_flags |= RTF_GATEWAY;
}
+ if (tb[RTA_VIA]) {
+ NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
+ goto errout;
+ }
if (tb[RTA_DST]) {
int plen = (rtm->rtm_dst_len + 7) >> 3;
@@ -4251,17 +4265,6 @@ struct rt6_nh {
struct list_head next;
};
-static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
-{
- struct rt6_nh *nh;
-
- list_for_each_entry(nh, rt6_nh_list, next) {
- pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
- &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
- nh->r_cfg.fc_ifindex);
- }
-}
-
static int ip6_route_info_append(struct net *net,
struct list_head *rt6_nh_list,
struct fib6_info *rt,
@@ -4407,7 +4410,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
nh->fib6_info = NULL;
if (err) {
if (replace && nhn)
- ip6_print_replace_route_err(&rt6_nh_list);
+ NL_SET_ERR_MSG_MOD(extack,
+ "multipath route replace failed (check consistency of installed routes)");
err_nh = nh;
goto add_errout;
}
@@ -4659,7 +4663,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
table = rt->fib6_table->tb6_id;
else
table = RT6_TABLE_UNSPEC;
- rtm->rtm_table = table;
+ rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table))
goto nla_put_failure;
@@ -4822,6 +4826,73 @@ int rt6_dump_route(struct fib6_info *rt, void *p_arg)
arg->cb->nlh->nlmsg_seq, flags);
}
+static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid header for get route request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv6_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
+ (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
+ rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
+ rtm->rtm_type) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
+ return -EINVAL;
+ }
+ if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid flags for get route request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv6_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
+ (tb[RTA_DST] && !rtm->rtm_dst_len)) {
+ NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_SRC:
+ case RTA_DST:
+ case RTA_IIF:
+ case RTA_OIF:
+ case RTA_MARK:
+ case RTA_UID:
+ case RTA_SPORT:
+ case RTA_DPORT:
+ case RTA_IP_PROTO:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -4836,8 +4907,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct flowi6 fl6 = {};
bool fibmatch;
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
- extack);
+ err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
@@ -4883,7 +4953,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
- &fl6.flowi6_proto, extack);
+ &fl6.flowi6_proto, AF_INET6,
+ extack);
if (err)
goto errout;
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 8d0ba757a46c..9b2f272ca164 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
rcu_read_unlock();
genlmsg_end(msg, hdr);
- genlmsg_reply(msg, info);
-
- return 0;
+ return genlmsg_reply(msg, info);
nla_put_failure:
rcu_read_unlock();
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 8181ee7e1e27..ee5403cbe655 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
} else {
ip6_flow_hdr(hdr, 0, flowlabel);
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
}
hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1e03305c0549..09e440e8dfae 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
}
err = 0;
- if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
+ if (__in6_dev_get(skb->dev) &&
+ !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
goto out;
if (t->parms.iph.daddr == 0)
@@ -1872,6 +1873,7 @@ static int __net_init sit_init_net(struct net *net)
err_reg_dev:
ipip6_dev_free(sitn->fb_tunnel_dev);
+ free_netdev(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b81eb7cb815e..57ef69a10889 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -220,8 +220,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
- SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
-
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
@@ -1864,7 +1862,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
refcount_read(&sp->sk_refcnt), sp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
- (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
+ (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
tp->snd_cwnd,
state == TCP_LISTEN ?
fastopenq->max_qlen :
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9cbf363172bd..b444483cdb2b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
-static void udp_v6_rehash(struct sock *sk)
+void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int peeked, peeking, off;
int err;
int is_udplite = IS_UDPLITE(sk);
+ struct udp_mib __percpu *mib;
bool checksum_valid = false;
- struct udp_mib *mib;
int is_udp4;
if (flags & MSG_ERRQUEUE)
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable);
*/
static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, u32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
int i;
for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, u32 info);
+ u8 type, u8 code, int offset, __be32 info);
+ const struct ip6_tnl_encap_ops *encap;
- if (!ip6tun_encaps[i])
+ encap = rcu_dereference(ip6tun_encaps[i]);
+ if (!encap)
continue;
- handler = rcu_dereference(ip6tun_encaps[i]->err_handler);
+ handler = encap->err_handler;
if (handler && !handler(skb, opt, type, code, offset, info))
return 0;
}
@@ -1132,15 +1134,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
const int hlen = skb_network_header_len(skb) +
sizeof(struct udphdr);
- if (hlen + cork->gso_size > cork->fragsize)
+ if (hlen + cork->gso_size > cork->fragsize) {
+ kfree_skb(skb);
return -EINVAL;
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+ }
+ if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ kfree_skb(skb);
return -EINVAL;
- if (udp_sk(sk)->no_check6_tx)
+ }
+ if (udp_sk(sk)->no_check6_tx) {
+ kfree_skb(skb);
return -EINVAL;
+ }
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb)))
+ dst_xfrm(skb_dst(skb))) {
+ kfree_skb(skb);
return -EIO;
+ }
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1390,10 +1400,7 @@ do_udp_sendmsg:
ipc6.opt = opt;
fl6.flowi6_proto = sk->sk_protocol;
- if (!ipv6_addr_any(daddr))
- fl6.daddr = *daddr;
- else
- fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+ fl6.daddr = *daddr;
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
@@ -1421,6 +1428,9 @@ do_udp_sendmsg:
}
}
+ if (ipv6_addr_any(&fl6.daddr))
+ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = false;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 5730e6503cb4..20e324b6f358 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
__be32, struct udp_table *);
int udp_v6_get_port(struct sock *sk, unsigned short snum);
+void udp_v6_rehash(struct sock *sk);
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index a125aebc29e5..f35907836444 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -49,6 +49,7 @@ struct proto udplitev6_prot = {
.recvmsg = udpv6_recvmsg,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f5b4febeaa25..bc65db782bfb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
unsigned int i;
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
xfrm_flush_gc();
+ xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 571d824e4e24..c5c5ab6c5a1c 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2036,13 +2036,13 @@ static int __init kcm_init(void)
kcm_muxp = kmem_cache_create("kcm_mux_cache",
sizeof(struct kcm_mux), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!kcm_muxp)
goto fail;
kcm_psockp = kmem_cache_create("kcm_psock_cache",
sizeof(struct kcm_psock), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!kcm_psockp)
goto fail;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 655c787f9d54..5651c29cb5bd 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
return 0;
}
-static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
- gfp_t allocation, struct sock *sk)
+static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
+ struct sock *sk)
{
int err = -ENOBUFS;
- sock_hold(sk);
- if (*skb2 == NULL) {
- if (refcount_read(&skb->users) != 1) {
- *skb2 = skb_clone(skb, allocation);
- } else {
- *skb2 = skb;
- refcount_inc(&skb->users);
- }
- }
- if (*skb2 != NULL) {
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
- skb_set_owner_r(*skb2, sk);
- skb_queue_tail(&sk->sk_receive_queue, *skb2);
- sk->sk_data_ready(sk);
- *skb2 = NULL;
- err = 0;
- }
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+ return err;
+
+ skb = skb_clone(skb, allocation);
+
+ if (skb) {
+ skb_set_owner_r(skb, sk);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk);
+ err = 0;
}
- sock_put(sk);
return err;
}
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
- struct sk_buff *skb2 = NULL;
int err = -ESRCH;
/* XXX Do we need something like netlink_overrun? I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
* socket.
*/
if (pfk->promisc)
- pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+ pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* the exact target will be processed later */
if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
continue;
}
- err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+ err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* Error is cleared after successful sending to at least one
* registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
rcu_read_unlock();
if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+ err = pfkey_broadcast_one(skb, allocation, one_sk);
- kfree_skb(skb2);
kfree_skb(skb);
return err;
}
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
if (proto == 0)
return -EINVAL;
- err = xfrm_state_flush(net, proto, true);
+ err = xfrm_state_flush(net, proto, true, false);
err2 = unicast_flush_resp(sk, hdr);
if (err || err2) {
if (err == -ESRCH) /* empty table - go quietly */
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 26f1d435696a..fed6becc5daf 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
#define L2TP_SLFLAG_S 0x40000000
#define L2TP_SL_SEQ_MASK 0x00ffffff
-#define L2TP_HDR_SIZE_SEQ 10
-#define L2TP_HDR_SIZE_NOSEQ 6
+#define L2TP_HDR_SIZE_MAX 14
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
__skb_pull(skb, sizeof(struct udphdr));
/* Short packet? */
- if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
l2tp_info(tunnel, L2TP_MSG_DATA,
"%s: recv short packet (len=%d)\n",
tunnel->name, skb->len);
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
goto error;
}
+ if (tunnel->version == L2TP_HDR_VER_3 &&
+ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto error;
+
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
l2tp_session_dec_refcount(session);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9c9afe94d389..b2ce90260c35 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
}
#endif
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char **ptr, unsigned char **optr)
+{
+ int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
+
+ if (opt_len > 0) {
+ int off = *ptr - *optr;
+
+ if (!pskb_may_pull(skb, off + opt_len))
+ return -1;
+
+ if (skb->data != *optr) {
+ *optr = skb->data;
+ *ptr = skb->data + off;
+ }
+ }
+
+ return 0;
+}
+
#define l2tp_printk(ptr, type, func, fmt, ...) \
do { \
if (((ptr)->debug) & (type)) \
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 35f6f86d4dcc..d4c60523c549 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto discard_sess;
+
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 237f1a4a0b0c..0ae6899edac0 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto discard_sess;
+
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
l2tp_session_dec_refcount(session);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 69e831bc317b..2c4cd4183bf9 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -8,7 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -229,7 +229,7 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
local_bh_disable();
rcu_read_lock();
- drv_wake_tx_queue(sta->sdata->local, txqi);
+ schedule_and_wake_txq(sta->sdata->local, txqi);
rcu_read_unlock();
local_bh_enable();
}
@@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+ ieee80211_agg_stop_txq(sta, tid);
+
spin_unlock_bh(&sta->lock);
ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index de65fe3ed9cc..09dd1c2860fc 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_P2P_PS |
BSS_CHANGED_TXPOWER;
int err;
+ int prev_beacon_int;
old = sdata_dereference(sdata->u.ap.beacon, sdata);
if (old)
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->needed_rx_chains = sdata->local->rx_chains;
+ prev_beacon_int = sdata->vif.bss_conf.beacon_int;
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
if (params->he_cap)
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (!err)
ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
mutex_unlock(&local->mtx);
- if (err)
+ if (err) {
+ sdata->vif.bss_conf.beacon_int = prev_beacon_int;
return err;
+ }
/*
* Apply control port protocol, this allows us to
@@ -1231,6 +1235,11 @@ static void sta_apply_mesh_params(struct ieee80211_local *local,
ieee80211_mps_sta_status_update(sta);
changed |= ieee80211_mps_set_sta_local_pm(sta,
sdata->u.mesh.mshcfg.power_mode);
+
+ ewma_mesh_tx_rate_avg_init(&sta->mesh->tx_rate_avg);
+ /* init at low value */
+ ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg, 10);
+
break;
case NL80211_PLINK_LISTEN:
case NL80211_PLINK_BLOCKED:
@@ -1447,6 +1456,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
if (ieee80211_vif_is_mesh(&sdata->vif))
sta_apply_mesh_params(local, sta, params);
+ if (params->airtime_weight)
+ sta->airtime_weight = params->airtime_weight;
+
/* set the STA state after all sta info from usermode has been set */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
set & BIT(NL80211_STA_FLAG_ASSOCIATED)) {
@@ -1490,6 +1502,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
sta->sta.tdls = true;
+ if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
err = sta_apply_parameters(local, sta, params);
if (err) {
sta_info_free(local, sta);
@@ -1742,7 +1758,9 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
MPATH_INFO_EXPTIME |
MPATH_INFO_DISCOVERY_TIMEOUT |
MPATH_INFO_DISCOVERY_RETRIES |
- MPATH_INFO_FLAGS;
+ MPATH_INFO_FLAGS |
+ MPATH_INFO_HOP_COUNT |
+ MPATH_INFO_PATH_CHANGE;
pinfo->frame_qlen = mpath->frame_queue.qlen;
pinfo->sn = mpath->sn;
@@ -1762,6 +1780,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
pinfo->flags |= NL80211_MPATH_FLAG_FIXED;
if (mpath->flags & MESH_PATH_RESOLVED)
pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED;
+ pinfo->hop_count = mpath->hop_count;
+ pinfo->path_change_count = mpath->path_change_count;
}
static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 3fe541e358f3..2d43bc127043 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -3,7 +3,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* GPLv2
*
@@ -218,6 +218,9 @@ static const char *hw_flag_names[] = {
FLAG(BUFF_MMPDU_TXQ),
FLAG(SUPPORTS_VHT_EXT_NSS_BW),
FLAG(STA_MMPDU_TXQ),
+ FLAG(TX_STATUS_NO_AMPDU_LEN),
+ FLAG(SUPPORTS_MULTI_BSSID),
+ FLAG(SUPPORTS_ONLY_HE_MULTI_BSSID),
#undef FLAG
};
@@ -383,6 +386,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
if (local->ops->wake_tx_queue)
DEBUGFS_ADD_MODE(aqm, 0600);
+ debugfs_create_u16("airtime_flags", 0600,
+ phyd, &local->airtime_flags);
+
statsd = debugfs_create_dir("statistics", phyd);
/* if the dir failed, don't put all the other things into the root! */
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index b753194710ad..8e921281e0d5 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -4,7 +4,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -181,9 +181,9 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
txqi->tin.tx_bytes,
txqi->tin.tx_packets,
txqi->flags,
- txqi->flags & (1<<IEEE80211_TXQ_STOP) ? "STOP" : "RUN",
- txqi->flags & (1<<IEEE80211_TXQ_AMPDU) ? " AMPDU" : "",
- txqi->flags & (1<<IEEE80211_TXQ_NO_AMSDU) ? " NO-AMSDU" : "");
+ test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
+ test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
+ test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
}
rcu_read_unlock();
@@ -195,6 +195,64 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
}
STA_OPS(aqm);
+static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_local *local = sta->sdata->local;
+ size_t bufsz = 200;
+ char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
+ u64 rx_airtime = 0, tx_airtime = 0;
+ s64 deficit[IEEE80211_NUM_ACS];
+ ssize_t rv;
+ int ac;
+
+ if (!buf)
+ return -ENOMEM;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ spin_lock_bh(&local->active_txq_lock[ac]);
+ rx_airtime += sta->airtime[ac].rx_airtime;
+ tx_airtime += sta->airtime[ac].tx_airtime;
+ deficit[ac] = sta->airtime[ac].deficit;
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+ }
+
+ p += scnprintf(p, bufsz + buf - p,
+ "RX: %llu us\nTX: %llu us\nWeight: %u\n"
+ "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
+ rx_airtime,
+ tx_airtime,
+ sta->airtime_weight,
+ deficit[0],
+ deficit[1],
+ deficit[2],
+ deficit[3]);
+
+ rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ kfree(buf);
+ return rv;
+}
+
+static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_local *local = sta->sdata->local;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ spin_lock_bh(&local->active_txq_lock[ac]);
+ sta->airtime[ac].rx_airtime = 0;
+ sta->airtime[ac].tx_airtime = 0;
+ sta->airtime[ac].deficit = sta->airtime_weight;
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+ }
+
+ return count;
+}
+STA_OPS_RW(airtime);
+
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -627,6 +685,9 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
"SUBCHAN-SELECVITE-TRANSMISSION");
PFLAG(MAC, 5, UL_2x996_TONE_RU, "UL-2x996-TONE-RU");
PFLAG(MAC, 5, OM_CTRL_UL_MU_DATA_DIS_RX, "OM-CTRL-UL-MU-DATA-DIS-RX");
+ PFLAG(MAC, 5, HE_DYNAMIC_SM_PS, "HE-DYNAMIC-SM-PS");
+ PFLAG(MAC, 5, PUNCTURED_SOUNDING, "PUNCTURED-SOUNDING");
+ PFLAG(MAC, 5, HT_VHT_TRIG_FRAME_RX, "HT-VHT-TRIG-FRAME-RX");
cap = hec->he_cap_elem.phy_cap_info;
p += scnprintf(p, buf_sz + buf - p,
@@ -761,18 +822,18 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
PFLAG(PHY, 8, MIDAMBLE_RX_TX_2X_AND_1XLTF,
"MIDAMBLE-RX-TX-2X-AND-1XLTF");
- switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK) {
- case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ:
- PRINT("DDCM-MAX-BW-20MHZ");
+ switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK) {
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242:
+ PRINT("DCM-MAX-RU-242");
break;
- case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ:
- PRINT("DCM-MAX-BW-40MHZ");
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484:
+ PRINT("DCM-MAX-RU-484");
break;
- case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ:
- PRINT("DCM-MAX-BW-80MHZ");
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996:
+ PRINT("DCM-MAX-RU-996");
break;
- case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ:
- PRINT("DCM-MAX-BW-160-OR-80P80-MHZ");
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996:
+ PRINT("DCM-MAX-RU-2x996");
break;
}
@@ -789,6 +850,18 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
"RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB");
+ switch (cap[9] & IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) {
+ case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US:
+ PRINT("NOMINAL-PACKET-PADDING-0US");
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US:
+ PRINT("NOMINAL-PACKET-PADDING-8US");
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US:
+ PRINT("NOMINAL-PACKET-PADDING-16US");
+ break;
+ }
+
#undef PFLAG_RANGE_DEFAULT
#undef PFLAG_RANGE
#undef PFLAG
@@ -906,6 +979,10 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
if (local->ops->wake_tx_queue)
DEBUGFS_ADD(aqm);
+ if (wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ DEBUGFS_ADD(airtime);
+
if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
debugfs_create_x32("driver_buffered_tids", 0400,
sta->debugfs_dir,
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 3e0d5922a440..28d022a3eee3 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 Intel Corporation
+* Copyright (C) 2018 - 2019 Intel Corporation
*/
#ifndef __MAC80211_DRIVER_OPS
@@ -1052,6 +1052,35 @@ drv_post_channel_switch(struct ieee80211_sub_if_data *sdata)
return ret;
}
+static inline void
+drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_abort_channel_switch(local, sdata);
+
+ if (local->ops->abort_channel_switch)
+ local->ops->abort_channel_switch(&local->hw, &sdata->vif);
+}
+
+static inline void
+drv_channel_switch_rx_beacon(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_channel_switch_rx_beacon(local, sdata, ch_switch);
+ if (local->ops->channel_switch_rx_beacon)
+ local->ops->channel_switch_rx_beacon(&local->hw, &sdata->vif,
+ ch_switch);
+}
+
static inline int drv_join_ibss(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
@@ -1173,6 +1202,13 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
local->ops->wake_tx_queue(&local->hw, &txq->txq);
}
+static inline void schedule_and_wake_txq(struct ieee80211_local *local,
+ struct txq_info *txqi)
+{
+ ieee80211_schedule_txq(&local->hw, &txqi->txq);
+ drv_wake_tx_queue(local, txqi);
+}
+
static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local,
struct sk_buff *head,
struct sk_buff *skb)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f849ea814993..e03c46ac8e4d 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -107,6 +107,14 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
__check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
IEEE80211_HT_CAP_40MHZ_INTOLERANT);
+ /* Allow user to enable TX STBC bit */
+ __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_TX_STBC);
+
+ /* Allow user to configure RX STBC bits */
+ if (ht_capa_mask->cap_info & IEEE80211_HT_CAP_RX_STBC)
+ ht_cap->cap |= ht_capa->cap_info & IEEE80211_HT_CAP_RX_STBC;
+
/* Allow user to decrease AMPDU factor */
if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR) {
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 0d704e8d7078..4e4507115cf3 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -8,6 +8,7 @@
* Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1124,8 +1125,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ieee80211_update_sta_info(sdata, mgmt, len, rx_status, elems, channel);
- bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
- channel);
+ bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel);
if (!bss)
return;
@@ -1604,7 +1604,7 @@ void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata,
return;
ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
- false, &elems);
+ false, &elems, mgmt->bssid, NULL);
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
}
@@ -1654,7 +1654,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(
mgmt->u.action.u.chan_switch.variable,
- ies_len, true, &elems);
+ ies_len, true, &elems, mgmt->bssid, NULL);
if (elems.parse_error)
break;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 7dfb4e2f98b2..e170f986d226 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -556,6 +556,12 @@ struct ieee80211_if_managed {
* get stuck in a downgraded situation and flush takes forever.
*/
struct delayed_work tx_tspec_wk;
+
+ /* Information elements from the last transmitted (Re)Association
+ * Request frame.
+ */
+ u8 *assoc_req_ies;
+ size_t assoc_req_ies_len;
};
struct ieee80211_if_ibss {
@@ -831,6 +837,8 @@ enum txq_info_flags {
* a fq_flow which is already owned by a different tin
* @def_cvars: codel vars for @def_flow
* @frags: used to keep fragments created after dequeue
+ * @schedule_order: used with ieee80211_local->active_txqs
+ * @schedule_round: counter to prevent infinite loops on TXQ scheduling
*/
struct txq_info {
struct fq_tin tin;
@@ -838,6 +846,8 @@ struct txq_info {
struct codel_vars def_cvars;
struct codel_stats cstats;
struct sk_buff_head frags;
+ struct list_head schedule_order;
+ u16 schedule_round;
unsigned long flags;
/* keep last! */
@@ -1129,6 +1139,13 @@ struct ieee80211_local {
struct codel_vars *cvars;
struct codel_params cparams;
+ /* protects active_txqs and txqi->schedule_order */
+ spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
+ struct list_head active_txqs[IEEE80211_NUM_ACS];
+ u16 schedule_round[IEEE80211_NUM_ACS];
+
+ u16 airtime_flags;
+
const struct ieee80211_ops *ops;
/*
@@ -1436,6 +1453,7 @@ struct ieee80211_csa_ie {
u8 ttl;
u16 pre_value;
u16 reason_code;
+ u32 max_switch_time;
};
/* Parsed Information Elements */
@@ -1476,6 +1494,7 @@ struct ieee802_11_elems {
const struct ieee80211_channel_sw_ie *ch_switch_ie;
const struct ieee80211_ext_chansw_ie *ext_chansw_ie;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
+ const u8 *max_channel_switch_time;
const u8 *country_elem;
const u8 *pwr_constr_elem;
const u8 *cisco_dtpc_elem;
@@ -1484,6 +1503,12 @@ struct ieee802_11_elems {
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie;
+ const struct ieee80211_multiple_bssid_configuration *mbssid_config_ie;
+ const struct ieee80211_bssid_index *bssid_index;
+ const u8 *nontransmitted_bssid_profile;
+ u8 max_bssid_indicator;
+ u8 dtim_count;
+ u8 dtim_period;
/* length of them, respectively */
u8 ext_capab_len;
@@ -1502,6 +1527,7 @@ struct ieee802_11_elems {
u8 prep_len;
u8 perr_len;
u8 country_elem_len;
+ u8 bssid_index_len;
/* whether a parse error occurred while retrieving these elements */
bool parse_error;
@@ -1661,7 +1687,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct ieee80211_rx_status *rx_status,
struct ieee80211_mgmt *mgmt,
size_t len,
- struct ieee802_11_elems *elems,
struct ieee80211_channel *channel);
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
@@ -1945,12 +1970,16 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
struct ieee802_11_elems *elems,
- u64 filter, u32 crc);
+ u64 filter, u32 crc, u8 *transmitter_bssid,
+ u8 *bss_bssid);
static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
bool action,
- struct ieee802_11_elems *elems)
+ struct ieee802_11_elems *elems,
+ u8 *transmitter_bssid,
+ u8 *bss_bssid)
{
- ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
+ ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0,
+ transmitter_bssid, bss_bssid);
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 87a729926734..800e67615e2a 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -478,6 +478,8 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
IEEE80211_HT_CAP_MAX_AMSDU |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC |
+ IEEE80211_HT_CAP_RX_STBC |
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_40MHZ_INTOLERANT),
.mcs = {
@@ -615,13 +617,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
* We need a bit of data queued to build aggregates properly, so
* instruct the TCP stack to allow more than a single ms of data
* to be queued in the stack. The value is a bit-shift of 1
- * second, so 8 is ~4ms of queued data. Only affects local TCP
+ * second, so 7 is ~8ms of queued data. Only affects local TCP
* sockets.
* This is the default, anyhow - drivers may need to override it
* for local reasons (longer buffers, longer completion time, or
* similar).
*/
- local->hw.tx_sk_pacing_shift = 8;
+ local->hw.tx_sk_pacing_shift = 7;
/* set up some defaults */
local->hw.queues = 1;
@@ -663,6 +665,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
spin_lock_init(&local->rx_path_lock);
spin_lock_init(&local->queue_stop_reason_lock);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ INIT_LIST_HEAD(&local->active_txqs[i]);
+ spin_lock_init(&local->active_txq_lock[i]);
+ }
+ local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX;
+
INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx);
@@ -1104,6 +1112,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA))
local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING;
+ /* mac80211 supports multi BSSID, if the driver supports it */
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_MULTI_BSSID)) {
+ local->hw.wiphy->support_mbssid = true;
+ if (ieee80211_hw_check(&local->hw,
+ SUPPORTS_ONLY_HE_MULTI_BSSID))
+ local->hw.wiphy->support_only_he_mbssid = true;
+ else
+ local->ext_capa[2] |=
+ WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
+ }
+
local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
result = wiphy_register(local->hw.wiphy);
@@ -1148,6 +1167,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->hw.max_nan_de_entries)
local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID;
+ if (!local->hw.weight_multiplier)
+ local->hw.weight_multiplier = 1;
+
result = ieee80211_wep_init(local);
if (result < 0)
wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c90452aa0c42..766e5e5bab8a 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*
@@ -1106,7 +1106,8 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
if (baselen > len)
return;
- ieee802_11_parse_elems(pos, len - baselen, false, &elems);
+ ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid,
+ NULL);
if (!elems.mesh_id)
return;
@@ -1170,7 +1171,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
return;
ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
- false, &elems);
+ false, &elems, mgmt->bssid, NULL);
/* ignore non-mesh or secure / unsecure mismatch */
if ((!elems.mesh_id || !elems.mesh_config) ||
@@ -1306,7 +1307,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
pos = mgmt->u.action.u.chan_switch.variable;
baselen = offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
- ieee802_11_parse_elems(pos, len - baselen, true, &elems);
+ ieee802_11_parse_elems(pos, len - baselen, true, &elems,
+ mgmt->bssid, NULL);
ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
if (!--ifmsh->chsw_ttl)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index cad6592c52a1..574c3891c4b2 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
* @dst: mesh path destination mac address
* @mpp: mesh proxy mac address
* @rhash: rhashtable list pointer
+ * @walk_list: linked list containing all mesh_path objects.
* @gate_list: list pointer for known gates list
* @sdata: mesh subif
* @next_hop: mesh neighbor to which frames for this destination will be
@@ -94,6 +95,7 @@ enum mesh_deferred_task_flags {
* @last_preq_to_root: Timestamp of last PREQ sent to root
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
+ * @path_change_count: the number of path changes to destination
*
*
* The dst address is unique in the mesh path table. Since the mesh_path is
@@ -105,6 +107,7 @@ struct mesh_path {
u8 dst[ETH_ALEN];
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
struct rhash_head rhash;
+ struct hlist_node walk_list;
struct hlist_node gate_list;
struct ieee80211_sub_if_data *sdata;
struct sta_info __rcu *next_hop;
@@ -124,6 +127,7 @@ struct mesh_path {
unsigned long last_preq_to_root;
bool is_root;
bool is_gate;
+ u32 path_change_count;
};
/**
@@ -133,12 +137,16 @@ struct mesh_path {
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containging all mesh_path objects
+ * @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
+ struct hlist_head walk_head;
+ spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 6950cd0bf594..f7517668e77a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
+ * Copyright (C) 2019 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -300,6 +301,7 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
{
struct ieee80211_tx_info *txinfo = st->info;
int failed;
+ struct rate_info rinfo;
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
@@ -310,12 +312,15 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) >
LINK_FAIL_THRESH)
mesh_plink_broken(sta);
+
+ sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
+ ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg,
+ cfg80211_calculate_bitrate(&rinfo));
}
static u32 airtime_link_metric_get(struct ieee80211_local *local,
struct sta_info *sta)
{
- struct rate_info rinfo;
/* This should be adjusted for each device */
int device_constant = 1 << ARITH_SHIFT;
int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
@@ -339,8 +344,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
if (fail_avg > LINK_FAIL_THRESH)
return MAX_METRIC;
- sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
- rate = cfg80211_calculate_bitrate(&rinfo);
+ rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg);
if (WARN_ON(!rate))
return MAX_METRIC;
@@ -386,6 +390,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
unsigned long orig_lifetime, exp_time;
u32 last_hop_metric, new_metric;
bool process = true;
+ u8 hopcount;
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
@@ -404,6 +409,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
orig_metric = PREQ_IE_METRIC(hwmp_ie);
+ hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1;
break;
case MPATH_PREP:
/* Originator here refers to the MP that was the target in the
@@ -415,6 +421,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
orig_metric = PREP_IE_METRIC(hwmp_ie);
+ hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1;
break;
default:
rcu_read_unlock();
@@ -441,7 +448,10 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
(mpath->flags & MESH_PATH_SN_VALID)) {
if (SN_GT(mpath->sn, orig_sn) ||
(mpath->sn == orig_sn &&
- new_metric >= mpath->metric)) {
+ (rcu_access_pointer(mpath->next_hop) !=
+ sta ?
+ mult_frac(new_metric, 10, 9) :
+ new_metric) >= mpath->metric)) {
process = false;
fresh_info = false;
}
@@ -476,12 +486,15 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
}
if (fresh_info) {
+ if (rcu_access_pointer(mpath->next_hop) != sta)
+ mpath->path_change_count++;
mesh_path_assign_nexthop(mpath, sta);
mpath->flags |= MESH_PATH_SN_VALID;
mpath->metric = new_metric;
mpath->sn = orig_sn;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
+ mpath->hop_count = hopcount;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
@@ -506,8 +519,10 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (mpath) {
spin_lock_bh(&mpath->state_lock);
if ((mpath->flags & MESH_PATH_FIXED) ||
- ((mpath->flags & MESH_PATH_ACTIVE) &&
- (last_hop_metric > mpath->metric)))
+ ((mpath->flags & MESH_PATH_ACTIVE) &&
+ ((rcu_access_pointer(mpath->next_hop) != sta ?
+ mult_frac(last_hop_metric, 10, 9) :
+ last_hop_metric) > mpath->metric)))
fresh_info = false;
} else {
mpath = mesh_path_add(sdata, ta);
@@ -519,10 +534,13 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
}
if (fresh_info) {
+ if (rcu_access_pointer(mpath->next_hop) != sta)
+ mpath->path_change_count++;
mesh_path_assign_nexthop(mpath, sta);
mpath->metric = last_hop_metric;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
+ mpath->hop_count = 1;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
@@ -909,7 +927,7 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
- len - baselen, false, &elems);
+ len - baselen, false, &elems, mgmt->bssid, NULL);
if (elems.preq) {
if (elems.preq_len != 37)
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a5125624a76d..95eb5064fa91 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
return NULL;
INIT_HLIST_HEAD(&newtbl->known_gates);
+ INIT_HLIST_HEAD(&newtbl->walk_head);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
+ spin_lock_init(&newtbl->walk_lock);
return newtbl;
}
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
static struct mesh_path *
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
{
- int i = 0, ret;
- struct mesh_path *mpath = NULL;
- struct rhashtable_iter iter;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return NULL;
-
- rhashtable_walk_start(&iter);
+ int i = 0;
+ struct mesh_path *mpath;
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (i++ == idx)
break;
}
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
- if (IS_ERR(mpath) || !mpath)
+ if (!mpath)
return NULL;
if (mpath_expired(mpath)) {
@@ -415,7 +404,6 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
{
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
- int ret;
if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */
@@ -432,29 +420,23 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
return ERR_PTR(-ENOMEM);
tbl = sdata->u.mesh.mesh_paths;
- do {
- ret = rhashtable_lookup_insert_fast(&tbl->rhead,
- &new_mpath->rhash,
- mesh_rht_params);
-
- if (ret == -EEXIST)
- mpath = rhashtable_lookup_fast(&tbl->rhead,
- dst,
- mesh_rht_params);
-
- } while (unlikely(ret == -EEXIST && !mpath));
-
- if (ret && ret != -EEXIST)
- return ERR_PTR(ret);
-
- /* At this point either new_mpath was added, or we found a
- * matching entry already in the table; in the latter case
- * free the unnecessary new entry.
- */
- if (ret == -EEXIST) {
+ spin_lock_bh(&tbl->walk_lock);
+ mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
+ &new_mpath->rhash,
+ mesh_rht_params);
+ if (!mpath)
+ hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
+ spin_unlock_bh(&tbl->walk_lock);
+
+ if (mpath) {
kfree(new_mpath);
+
+ if (IS_ERR(mpath))
+ return mpath;
+
new_mpath = mpath;
}
+
sdata->u.mesh.mesh_paths_generation++;
return new_mpath;
}
@@ -480,9 +462,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = sdata->u.mesh.mpp_paths;
+
+ spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash,
mesh_rht_params);
+ if (!ret)
+ hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
+ spin_unlock_bh(&tbl->walk_lock);
+
+ if (ret)
+ kfree(new_mpath);
sdata->u.mesh.mpp_paths_generation++;
return ret;
@@ -503,20 +493,9 @@ void mesh_plink_broken(struct sta_info *sta)
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +509,7 @@ void mesh_plink_broken(struct sta_info *sta)
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
}
}
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ rcu_read_unlock();
}
static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +529,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
+ hlist_del_rcu(&mpath->walk_list);
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
mesh_path_free_rcu(tbl, mpath);
}
@@ -571,27 +550,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ struct hlist_node *n;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta)
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +565,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
{
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ struct hlist_node *n;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (ether_addr_equal(mpath->mpp, proxy))
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
static void table_flush_by_iface(struct mesh_table *tbl)
{
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
+ struct hlist_node *n;
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
/**
@@ -675,15 +616,15 @@ static int table_path_del(struct mesh_table *tbl,
{
struct mesh_path *mpath;
- rcu_read_lock();
+ spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
if (!mpath) {
- rcu_read_unlock();
+ spin_unlock_bh(&tbl->walk_lock);
return -ENXIO;
}
__mesh_path_del(tbl, mpath);
- rcu_read_unlock();
+ spin_unlock_bh(&tbl->walk_lock);
return 0;
}
@@ -854,28 +795,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
+ struct hlist_node *n;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 33055c8ed37e..8afd0ece94c9 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
+ * Copyright (C) 2019 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -1214,6 +1215,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
if (baselen > len)
return;
}
- ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
+ ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems,
+ mgmt->bssid, NULL);
mesh_process_plink_frame(sdata, mgmt, &elems, rx_status);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 687821567287..2dbcf5d5512e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -7,7 +7,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -644,7 +644,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos, qos_info;
+ u8 *pos, qos_info, *ie_start;
size_t offset = 0, noffset;
int i, count, rates_len, supp_rates_len, shift;
u16 capab;
@@ -752,6 +752,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
/* SSID */
pos = skb_put(skb, 2 + assoc_data->ssid_len);
+ ie_start = pos;
*pos++ = WLAN_EID_SSID;
*pos++ = assoc_data->ssid_len;
memcpy(pos, assoc_data->ssid, assoc_data->ssid_len);
@@ -813,6 +814,21 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
}
+ /* Set MBSSID support for HE AP if needed */
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) &&
+ !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && assoc_data->ie_len) {
+ struct element *elem;
+
+ /* we know it's writable, cast away the const */
+ elem = (void *)cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
+ assoc_data->ie,
+ assoc_data->ie_len);
+
+ /* We can probably assume both always true */
+ if (elem && elem->datalen >= 3)
+ elem->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
+ }
+
/* if present, add any custom IEs that go before HT */
if (assoc_data->ie_len) {
static const u8 before_ht[] = {
@@ -961,6 +977,11 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
return;
}
+ pos = skb_tail_pointer(skb);
+ kfree(ifmgd->assoc_req_ies);
+ ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
+ ifmgd->assoc_req_ies_len = pos - ie_start;
+
drv_mgd_prepare_tx(local, sdata, 0);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
@@ -1238,6 +1259,32 @@ static void ieee80211_chswitch_timer(struct timer_list *t)
}
static void
+ieee80211_sta_abort_chanswitch(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (!local->ops->abort_channel_switch)
+ return;
+
+ mutex_lock(&local->mtx);
+
+ mutex_lock(&local->chanctx_mtx);
+ ieee80211_vif_unreserve_chanctx(sdata);
+ mutex_unlock(&local->chanctx_mtx);
+
+ if (sdata->csa_block_tx)
+ ieee80211_wake_vif_queues(local, sdata,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ sdata->csa_block_tx = false;
+ sdata->vif.csa_active = false;
+
+ mutex_unlock(&local->mtx);
+
+ drv_abort_channel_switch(sdata);
+}
+
+static void
ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
u64 timestamp, u32 device_timestamp,
struct ieee802_11_elems *elems,
@@ -1261,19 +1308,36 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (local->scanning)
return;
- /* disregard subsequent announcements if we are already processing */
- if (sdata->vif.csa_active)
- return;
-
current_band = cbss->channel->band;
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
ifmgd->flags,
ifmgd->associated->bssid, &csa_ie);
- if (res < 0)
+
+ if (!res) {
+ ch_switch.timestamp = timestamp;
+ ch_switch.device_timestamp = device_timestamp;
+ ch_switch.block_tx = beacon ? csa_ie.mode : 0;
+ ch_switch.chandef = csa_ie.chandef;
+ ch_switch.count = csa_ie.count;
+ ch_switch.delay = csa_ie.max_switch_time;
+ }
+
+ if (res < 0) {
ieee80211_queue_work(&local->hw,
&ifmgd->csa_connection_drop_work);
- if (res)
return;
+ }
+
+ if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
+ if (res)
+ ieee80211_sta_abort_chanswitch(sdata);
+ else
+ drv_channel_switch_rx_beacon(sdata, &ch_switch);
+ return;
+ } else if (sdata->vif.csa_active || res) {
+ /* disregard subsequent announcements if already processing */
+ return;
+ }
if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
IEEE80211_CHAN_DISABLED)) {
@@ -1289,7 +1353,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
}
if (cfg80211_chandef_identical(&csa_ie.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chandef) &&
+ (!csa_ie.mode || !beacon)) {
if (ifmgd->csa_ignored_same_chan)
return;
sdata_info(sdata,
@@ -1326,12 +1391,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
goto drop_connection;
}
- ch_switch.timestamp = timestamp;
- ch_switch.device_timestamp = device_timestamp;
- ch_switch.block_tx = csa_ie.mode;
- ch_switch.chandef = csa_ie.chandef;
- ch_switch.count = csa_ie.count;
-
if (drv_pre_channel_switch(sdata, &ch_switch)) {
sdata_info(sdata,
"preparing for channel switch failed, disconnecting\n");
@@ -1350,7 +1409,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
sdata->vif.csa_active = true;
sdata->csa_chandef = csa_ie.chandef;
- sdata->csa_block_tx = csa_ie.mode;
+ sdata->csa_block_tx = ch_switch.block_tx;
ifmgd->csa_ignored_same_chan = false;
if (sdata->csa_block_tx)
@@ -1384,7 +1443,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
* reset when the disconnection worker runs.
*/
sdata->vif.csa_active = true;
- sdata->csa_block_tx = csa_ie.mode;
+ sdata->csa_block_tx = ch_switch.block_tx;
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
@@ -2762,7 +2821,8 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
u32 tx_flags = 0;
pos = mgmt->u.auth.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), false, &elems);
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
+ mgmt->bssid, auth_data->bss->bssid);
if (!elems.challenge)
return;
auth_data->expected_transaction = 4;
@@ -3130,7 +3190,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
pos = mgmt->u.assoc_resp.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), false, &elems);
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
+ mgmt->bssid, assoc_data->bss->bssid);
if (!elems.supp_rates) {
sdata_info(sdata, "no SuppRates element in AssocResp\n");
@@ -3167,7 +3228,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
return false;
ieee802_11_parse_elems(bss_ies->data, bss_ies->len,
- false, &bss_elems);
+ false, &bss_elems,
+ mgmt->bssid,
+ assoc_data->bss->bssid);
if (assoc_data->wmm &&
!elems.wmm_param && bss_elems.wmm_param) {
elems.wmm_param = bss_elems.wmm_param;
@@ -3304,6 +3367,14 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
/* TODO: OPEN: what happens if BSS color disable is set? */
}
+ if (cbss->transmitted_bss) {
+ bss_conf->nontransmitted = true;
+ ether_addr_copy(bss_conf->transmitter_bssid,
+ cbss->transmitted_bss->bssid);
+ bss_conf->bssid_indicator = cbss->max_bssid_indicator;
+ bss_conf->bssid_index = cbss->bssid_index;
+ }
+
/*
* Some APs, e.g. Netgear WNDR3700, report invalid HT operation data
* in their association response, so ignore that data for our own
@@ -3464,7 +3535,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
return;
pos = mgmt->u.assoc_resp.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), false, &elems);
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
+ mgmt->bssid, assoc_data->bss->bssid);
if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
elems.timeout_int &&
@@ -3516,13 +3588,13 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
}
- cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len, uapsd_queues);
+ cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len, uapsd_queues,
+ ifmgd->assoc_req_ies, ifmgd->assoc_req_ies_len);
}
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len,
- struct ieee80211_rx_status *rx_status,
- struct ieee802_11_elems *elems)
+ struct ieee80211_rx_status *rx_status)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_bss *bss;
@@ -3534,8 +3606,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
if (!channel)
return;
- bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
- channel);
+ bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel);
if (bss) {
sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
ieee80211_rx_bss_put(local, bss);
@@ -3550,7 +3621,6 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd;
struct ieee80211_rx_status *rx_status = (void *) skb->cb;
size_t baselen, len = skb->len;
- struct ieee802_11_elems elems;
ifmgd = &sdata->u.mgd;
@@ -3563,10 +3633,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
if (baselen > len)
return;
- ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
- false, &elems);
-
- ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
+ ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
if (ifmgd->associated &&
ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
@@ -3693,6 +3760,16 @@ static void ieee80211_handle_beacon_sig(struct ieee80211_sub_if_data *sdata,
}
}
+static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
+ struct cfg80211_bss *bss)
+{
+ if (ether_addr_equal(tx_bssid, bss->bssid))
+ return true;
+ if (!bss->transmitted_bss)
+ return false;
+ return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid);
+}
+
static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len,
struct ieee80211_rx_status *rx_status)
@@ -3734,15 +3811,16 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
- ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
+ ieee80211_rx_our_beacon(mgmt->bssid, ifmgd->assoc_data->bss)) {
ieee802_11_parse_elems(mgmt->u.beacon.variable,
- len - baselen, false, &elems);
+ len - baselen, false, &elems,
+ mgmt->bssid,
+ ifmgd->assoc_data->bss->bssid);
- ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
- if (elems.tim && !elems.parse_error) {
- const struct ieee80211_tim_ie *tim_ie = elems.tim;
- ifmgd->dtim_period = tim_ie->dtim_period;
- }
+ ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
+
+ if (elems.dtim_period)
+ ifmgd->dtim_period = elems.dtim_period;
ifmgd->have_beacon = true;
ifmgd->assoc_data->need_beacon = false;
if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
@@ -3750,12 +3828,17 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
le64_to_cpu(mgmt->u.beacon.timestamp);
sdata->vif.bss_conf.sync_device_ts =
rx_status->device_timestamp;
- if (elems.tim)
- sdata->vif.bss_conf.sync_dtim_count =
- elems.tim->dtim_count;
- else
- sdata->vif.bss_conf.sync_dtim_count = 0;
+ sdata->vif.bss_conf.sync_dtim_count = elems.dtim_count;
}
+
+ if (elems.mbssid_config_ie)
+ bss_conf->profile_periodicity =
+ elems.mbssid_config_ie->profile_periodicity;
+
+ if (elems.ext_capab_len >= 11 &&
+ (elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
+ bss_conf->ema_ap = true;
+
/* continue assoc process */
ifmgd->assoc_data->timeout = jiffies;
ifmgd->assoc_data->timeout_started = true;
@@ -3764,7 +3847,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (!ifmgd->associated ||
- !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
+ !ieee80211_rx_our_beacon(mgmt->bssid, ifmgd->associated))
return;
bssid = ifmgd->associated->bssid;
@@ -3787,7 +3870,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
len - baselen, false, &elems,
- care_about_ies, ncrc);
+ care_about_ies, ncrc,
+ mgmt->bssid, bssid);
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
ieee80211_check_tim(elems.tim, elems.tim_len, ifmgd->aid)) {
@@ -3859,11 +3943,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
le64_to_cpu(mgmt->u.beacon.timestamp);
sdata->vif.bss_conf.sync_device_ts =
rx_status->device_timestamp;
- if (elems.tim)
- sdata->vif.bss_conf.sync_dtim_count =
- elems.tim->dtim_count;
- else
- sdata->vif.bss_conf.sync_dtim_count = 0;
+ sdata->vif.bss_conf.sync_dtim_count = elems.dtim_count;
}
if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
@@ -3871,7 +3951,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->beacon_crc = ncrc;
ifmgd->beacon_crc_valid = true;
- ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
+ ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
rx_status->device_timestamp,
@@ -3889,10 +3969,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
*/
if (!ifmgd->have_beacon) {
/* a few bogus AP send dtim_period = 0 or no TIM IE */
- if (elems.tim)
- bss_conf->dtim_period = elems.tim->dtim_period ?: 1;
- else
- bss_conf->dtim_period = 1;
+ bss_conf->dtim_period = elems.dtim_period ?: 1;
changed |= BSS_CHANGED_BEACON_INFO;
ifmgd->have_beacon = true;
@@ -3992,9 +4069,10 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
if (ies_len < 0)
break;
+ /* CSA IE cannot be overridden, no need for BSSID */
ieee802_11_parse_elems(
mgmt->u.action.u.chan_switch.variable,
- ies_len, true, &elems);
+ ies_len, true, &elems, mgmt->bssid, NULL);
if (elems.parse_error)
break;
@@ -4011,9 +4089,13 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
if (ies_len < 0)
break;
+ /*
+ * extended CSA IE can't be overridden, no need for
+ * BSSID
+ */
ieee802_11_parse_elems(
mgmt->u.action.u.ext_chan_switch.variable,
- ies_len, true, &elems);
+ ies_len, true, &elems, mgmt->bssid, NULL);
if (elems.parse_error)
break;
@@ -4754,6 +4836,40 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
return ret;
}
+static bool ieee80211_get_dtim(const struct cfg80211_bss_ies *ies,
+ u8 *dtim_count, u8 *dtim_period)
+{
+ const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ const u8 *idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, ies->data,
+ ies->len);
+ const struct ieee80211_tim_ie *tim = NULL;
+ const struct ieee80211_bssid_index *idx;
+ bool valid = tim_ie && tim_ie[1] >= 2;
+
+ if (valid)
+ tim = (void *)(tim_ie + 2);
+
+ if (dtim_count)
+ *dtim_count = valid ? tim->dtim_count : 0;
+
+ if (dtim_period)
+ *dtim_period = valid ? tim->dtim_period : 0;
+
+ /* Check if value is overridden by non-transmitted profile */
+ if (!idx_ie || idx_ie[1] < 3)
+ return valid;
+
+ idx = (void *)(idx_ie + 2);
+
+ if (dtim_count)
+ *dtim_count = idx->dtim_count;
+
+ if (dtim_period)
+ *dtim_period = idx->dtim_period;
+
+ return true;
+}
+
static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss, bool assoc,
bool override)
@@ -4845,17 +4961,13 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
ies = rcu_dereference(cbss->beacon_ies);
if (ies) {
- const u8 *tim_ie;
-
sdata->vif.bss_conf.sync_tsf = ies->tsf;
sdata->vif.bss_conf.sync_device_ts =
bss->device_ts_beacon;
- tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
- ies->data, ies->len);
- if (tim_ie && tim_ie[1] >= 2)
- sdata->vif.bss_conf.sync_dtim_count = tim_ie[2];
- else
- sdata->vif.bss_conf.sync_dtim_count = 0;
+
+ ieee80211_get_dtim(ies,
+ &sdata->vif.bss_conf.sync_dtim_count,
+ NULL);
} else if (!ieee80211_hw_check(&sdata->local->hw,
TIMING_BEACON_ONLY)) {
ies = rcu_dereference(cbss->proberesp_ies);
@@ -5325,17 +5437,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
assoc_data->timeout_started = true;
assoc_data->need_beacon = true;
} else if (beacon_ies) {
- const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
- beacon_ies->data,
- beacon_ies->len);
+ const u8 *ie;
u8 dtim_count = 0;
- if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) {
- const struct ieee80211_tim_ie *tim;
- tim = (void *)(tim_ie + 2);
- ifmgd->dtim_period = tim->dtim_period;
- dtim_count = tim->dtim_count;
- }
+ ieee80211_get_dtim(beacon_ies, &dtim_count,
+ &ifmgd->dtim_period);
+
ifmgd->have_beacon = true;
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
@@ -5346,6 +5453,17 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
bss->device_ts_beacon;
sdata->vif.bss_conf.sync_dtim_count = dtim_count;
}
+
+ ie = cfg80211_find_ext_ie(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION,
+ beacon_ies->data, beacon_ies->len);
+ if (ie && ie[1] >= 3)
+ sdata->vif.bss_conf.profile_periodicity = ie[4];
+
+ ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY,
+ beacon_ies->data, beacon_ies->len);
+ if (ie && ie[1] >= 11 &&
+ (ie[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
+ sdata->vif.bss_conf.ema_ap = true;
} else {
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
@@ -5503,6 +5621,9 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
ifmgd->teardown_skb = NULL;
ifmgd->orig_teardown_skb = NULL;
}
+ kfree(ifmgd->assoc_req_ies);
+ ifmgd->assoc_req_ies = NULL;
+ ifmgd->assoc_req_ies_len = 0;
spin_unlock_bh(&ifmgd->teardown_lock);
del_timer_sync(&ifmgd->timer);
sdata_unlock(sdata);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index f466ec37d161..ccaf951e4e31 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -294,6 +294,15 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
}
+static unsigned int
+minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta *mi)
+{
+ if (!mi->avg_ampdu_len)
+ return AVG_AMPDU_SIZE;
+
+ return MINSTREL_TRUNC(mi->avg_ampdu_len);
+}
+
/*
* Return current throughput based on the average A-MPDU length, taking into
* account the expected number of retransmissions and their expected length
@@ -309,7 +318,7 @@ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
return 0;
if (group != MINSTREL_CCK_GROUP)
- nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
+ nsecs = 1000 * mi->overhead / minstrel_ht_avg_ampdu_len(mi);
nsecs += minstrel_mcs_groups[group].duration[rate] <<
minstrel_mcs_groups[group].shift;
@@ -503,8 +512,12 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
u16 tmp_cck_tp_rate[MAX_THR_RATES], index;
if (mi->ampdu_packets > 0) {
- mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
- MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
+ if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN))
+ mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
+ MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets),
+ EWMA_LEVEL);
+ else
+ mi->avg_ampdu_len = 0;
mi->ampdu_len = 0;
mi->ampdu_packets = 0;
}
@@ -709,7 +722,9 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
mi->ampdu_len += info->status.ampdu_len;
if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
- mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
+ int avg_ampdu_len = minstrel_ht_avg_ampdu_len(mi);
+
+ mi->sample_wait = 16 + 2 * avg_ampdu_len;
mi->sample_tries = 1;
mi->sample_count--;
}
@@ -777,7 +792,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
unsigned int cw = mp->cw_min;
unsigned int ctime = 0;
unsigned int t_slot = 9; /* FIXME */
- unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
+ unsigned int ampdu_len = minstrel_ht_avg_ampdu_len(mi);
unsigned int overhead = 0, overhead_rtscts = 0;
mrs = minstrel_get_ratestats(mi, index);
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 57820a5f2c16..31641d0b0f5c 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -160,9 +160,10 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
"lookaround %d\n",
max(0, (int) mi->total_packets - (int) mi->sample_packets),
mi->sample_packets);
- p += sprintf(p, "Average # of aggregated frames per A-MPDU: %d.%d\n",
- MINSTREL_TRUNC(mi->avg_ampdu_len),
- MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
+ if (mi->avg_ampdu_len)
+ p += sprintf(p, "Average # of aggregated frames per A-MPDU: %d.%d\n",
+ MINSTREL_TRUNC(mi->avg_ampdu_len),
+ MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
ms->len = p - ms->buf;
WARN_ON(ms->len + sizeof(*ms) > 32768);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 45aad3d3108c..7f8d93401ce0 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -5,7 +5,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -208,7 +208,24 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
}
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
- struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
+ struct ieee80211_vendor_radiotap *rtap;
+ int vendor_data_offset = 0;
+
+ /*
+ * The position to look at depends on the existence (or non-
+ * existence) of other elements, so take that into account...
+ */
+ if (status->flag & RX_FLAG_RADIOTAP_HE)
+ vendor_data_offset +=
+ sizeof(struct ieee80211_radiotap_he);
+ if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ vendor_data_offset +=
+ sizeof(struct ieee80211_radiotap_he_mu);
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG)
+ vendor_data_offset +=
+ sizeof(struct ieee80211_radiotap_lsig);
+
+ rtap = (void *)&skb->data[vendor_data_offset];
/* alignment for fixed 6-byte vendor data header */
len = ALIGN(len, 2);
@@ -231,7 +248,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr_3addr hdr;
u8 category;
u8 action_code;
- } __packed action;
+ } __packed __aligned(2) action;
if (!sdata)
return;
@@ -2644,6 +2661,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u16 ac, q, hdrlen;
+ int tailroom = 0;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2723,15 +2741,21 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
skb_set_queue_mapping(skb, q);
if (!--mesh_hdr->ttl) {
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+ if (!is_multicast_ether_addr(hdr->addr1))
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
+ dropped_frames_ttl);
goto out;
}
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto out;
+ if (sdata->crypto_tx_tailroom_needed_cnt)
+ tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
fwd_skb = skb_copy_expand(skb, local->tx_headroom +
- sdata->encrypt_headroom, 0, GFP_ATOMIC);
+ sdata->encrypt_headroom,
+ tailroom, GFP_ATOMIC);
if (!fwd_skb)
goto out;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 95413413f98c..0cf066700623 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -8,6 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -57,62 +58,14 @@ static bool is_uapsd_supported(struct ieee802_11_elems *elems)
return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
}
-struct ieee80211_bss *
-ieee80211_bss_info_update(struct ieee80211_local *local,
- struct ieee80211_rx_status *rx_status,
- struct ieee80211_mgmt *mgmt, size_t len,
- struct ieee802_11_elems *elems,
- struct ieee80211_channel *channel)
+static void
+ieee80211_update_bss_from_elems(struct ieee80211_local *local,
+ struct ieee80211_bss *bss,
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status,
+ bool beacon)
{
- bool beacon = ieee80211_is_beacon(mgmt->frame_control);
- struct cfg80211_bss *cbss;
- struct ieee80211_bss *bss;
int clen, srlen;
- struct cfg80211_inform_bss bss_meta = {
- .boottime_ns = rx_status->boottime_ns,
- };
- bool signal_valid;
- struct ieee80211_sub_if_data *scan_sdata;
-
- if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)
- bss_meta.signal = 0; /* invalid signal indication */
- else if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
- bss_meta.signal = rx_status->signal * 100;
- else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
- bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
-
- bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20;
- if (rx_status->bw == RATE_INFO_BW_5)
- bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5;
- else if (rx_status->bw == RATE_INFO_BW_10)
- bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
-
- bss_meta.chan = channel;
-
- rcu_read_lock();
- scan_sdata = rcu_dereference(local->scan_sdata);
- if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION &&
- scan_sdata->vif.bss_conf.assoc &&
- ieee80211_have_rx_timestamp(rx_status)) {
- bss_meta.parent_tsf =
- ieee80211_calculate_rx_timestamp(local, rx_status,
- len + FCS_LEN, 24);
- ether_addr_copy(bss_meta.parent_bssid,
- scan_sdata->vif.bss_conf.bssid);
- }
- rcu_read_unlock();
-
- cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta,
- mgmt, len, GFP_ATOMIC);
- if (!cbss)
- return NULL;
- /* In case the signal is invalid update the status */
- signal_valid = abs(channel->center_freq - cbss->channel->center_freq)
- <= local->hw.wiphy->max_adj_channel_rssi_comp;
- if (!signal_valid)
- rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
-
- bss = (void *)cbss->priv;
if (beacon)
bss->device_ts_beacon = rx_status->device_timestamp;
@@ -182,6 +135,89 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
bss->beacon_rate =
&sband->bitrates[rx_status->rate_idx];
}
+}
+
+struct ieee80211_bss *
+ieee80211_bss_info_update(struct ieee80211_local *local,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_channel *channel)
+{
+ bool beacon = ieee80211_is_beacon(mgmt->frame_control);
+ struct cfg80211_bss *cbss, *non_tx_cbss;
+ struct ieee80211_bss *bss, *non_tx_bss;
+ struct cfg80211_inform_bss bss_meta = {
+ .boottime_ns = rx_status->boottime_ns,
+ };
+ bool signal_valid;
+ struct ieee80211_sub_if_data *scan_sdata;
+ struct ieee802_11_elems elems;
+ size_t baselen;
+ u8 *elements;
+
+ if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)
+ bss_meta.signal = 0; /* invalid signal indication */
+ else if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
+ bss_meta.signal = rx_status->signal * 100;
+ else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
+ bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
+
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->bw == RATE_INFO_BW_5)
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ else if (rx_status->bw == RATE_INFO_BW_10)
+ bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
+
+ bss_meta.chan = channel;
+
+ rcu_read_lock();
+ scan_sdata = rcu_dereference(local->scan_sdata);
+ if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION &&
+ scan_sdata->vif.bss_conf.assoc &&
+ ieee80211_have_rx_timestamp(rx_status)) {
+ bss_meta.parent_tsf =
+ ieee80211_calculate_rx_timestamp(local, rx_status,
+ len + FCS_LEN, 24);
+ ether_addr_copy(bss_meta.parent_bssid,
+ scan_sdata->vif.bss_conf.bssid);
+ }
+ rcu_read_unlock();
+
+ cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta,
+ mgmt, len, GFP_ATOMIC);
+ if (!cbss)
+ return NULL;
+
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ elements = mgmt->u.probe_resp.variable;
+ baselen = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ } else {
+ baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ elements = mgmt->u.beacon.variable;
+ }
+
+ if (baselen > len)
+ return NULL;
+
+ ieee802_11_parse_elems(elements, len - baselen, false, &elems,
+ mgmt->bssid, cbss->bssid);
+
+ /* In case the signal is invalid update the status */
+ signal_valid = abs(channel->center_freq - cbss->channel->center_freq)
+ <= local->hw.wiphy->max_adj_channel_rssi_comp;
+ if (!signal_valid)
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ bss = (void *)cbss->priv;
+ ieee80211_update_bss_from_elems(local, bss, &elems, rx_status, beacon);
+
+ list_for_each_entry(non_tx_cbss, &cbss->nontrans_list, nontrans_list) {
+ non_tx_bss = (void *)non_tx_cbss->priv;
+
+ ieee80211_update_bss_from_elems(local, non_tx_bss, &elems,
+ rx_status, beacon);
+ }
return bss;
}
@@ -206,10 +242,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
struct ieee80211_sub_if_data *sdata1, *sdata2;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
- u8 *elements;
struct ieee80211_channel *channel;
- size_t baselen;
- struct ieee802_11_elems elems;
if (skb->len < 24 ||
(!ieee80211_is_probe_resp(mgmt->frame_control) &&
@@ -244,26 +277,15 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
!ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags,
mgmt->da))
return;
-
- elements = mgmt->u.probe_resp.variable;
- baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
- } else {
- baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
- elements = mgmt->u.beacon.variable;
}
- if (baselen > skb->len)
- return;
-
- ieee802_11_parse_elems(elements, skb->len - baselen, false, &elems);
-
channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
return;
bss = ieee80211_bss_info_update(local, rx_status,
- mgmt, skb->len, &elems,
+ mgmt, skb->len,
channel);
if (bss)
ieee80211_rx_bss_put(local, bss);
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 4e4902bdbef8..3c644f14dd59 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -177,6 +177,12 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
csa_ie->chandef = new_vht_chandef;
}
+ if (elems->max_channel_switch_time)
+ csa_ie->max_switch_time =
+ (elems->max_channel_switch_time[0] << 0) |
+ (elems->max_channel_switch_time[1] << 8) |
+ (elems->max_channel_switch_time[2] << 16);
+
return 0;
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c4a8f115ed33..11f058987a54 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -90,7 +90,6 @@ static void __cleanup_single_sta(struct sta_info *sta)
struct tid_ampdu_tx *tid_tx;
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- struct fq *fq = &local->fq;
struct ps_data *ps;
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
@@ -120,9 +119,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
txqi = to_txq_info(sta->sta.txq[i]);
- spin_lock_bh(&fq->lock);
ieee80211_txq_purge(local, txqi);
- spin_unlock_bh(&fq->lock);
}
}
@@ -387,9 +384,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
if (sta_prepare_rate_control(local, sta, gfp))
goto free_txq;
+ sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
+
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
skb_queue_head_init(&sta->ps_tx_buf[i]);
skb_queue_head_init(&sta->tx_filtered[i]);
+ sta->airtime[i].deficit = sta->airtime_weight;
}
for (i = 0; i < IEEE80211_NUM_TIDS; i++)
@@ -1249,7 +1249,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i]))
continue;
- drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
+ schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i]));
}
skb_queue_head_init(&pending);
@@ -1826,6 +1826,27 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
}
EXPORT_SYMBOL(ieee80211_sta_set_buffered);
+void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
+ u32 tx_airtime, u32 rx_airtime)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_local *local = sta->sdata->local;
+ u8 ac = ieee80211_ac_from_tid(tid);
+ u32 airtime = 0;
+
+ if (sta->local->airtime_flags & AIRTIME_USE_TX)
+ airtime += tx_airtime;
+ if (sta->local->airtime_flags & AIRTIME_USE_RX)
+ airtime += rx_airtime;
+
+ spin_lock_bh(&local->active_txq_lock[ac]);
+ sta->airtime[ac].tx_airtime += tx_airtime;
+ sta->airtime[ac].rx_airtime += rx_airtime;
+ sta->airtime[ac].deficit -= airtime;
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+}
+EXPORT_SYMBOL(ieee80211_sta_register_airtime);
+
int sta_info_move_state(struct sta_info *sta,
enum ieee80211_sta_state new_state)
{
@@ -2188,6 +2209,23 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ sinfo->rx_duration += sta->airtime[ac].rx_airtime;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+ }
+
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ sinfo->tx_duration += sta->airtime[ac].tx_airtime;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+ }
+
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
+ sinfo->airtime_weight = sta->airtime_weight;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
+ }
+
sinfo->rx_dropped_misc = sta->rx_stats.dropped;
if (sta->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 8eb29041be54..71f7e4973329 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -127,6 +127,16 @@ enum ieee80211_agg_stop_reason {
AGG_STOP_DESTROY_STA,
};
+/* Debugfs flags to enable/disable use of RX/TX airtime in scheduler */
+#define AIRTIME_USE_TX BIT(0)
+#define AIRTIME_USE_RX BIT(1)
+
+struct airtime_info {
+ u64 rx_airtime;
+ u64 tx_airtime;
+ s64 deficit;
+};
+
struct sta_info;
/**
@@ -343,6 +353,7 @@ struct ieee80211_fast_rx {
/* we use only values in the range 0-100, so pick a large precision */
DECLARE_EWMA(mesh_fail_avg, 20, 8)
+DECLARE_EWMA(mesh_tx_rate_avg, 8, 16)
/**
* struct mesh_sta - mesh STA information
@@ -366,6 +377,7 @@ DECLARE_EWMA(mesh_fail_avg, 20, 8)
* processed
* @connected_to_gate: true if mesh STA has a path to a mesh gate
* @fail_avg: moving percentage of failed MSDUs
+ * @tx_rate_avg: moving average of tx bitrate
*/
struct mesh_sta {
struct timer_list plink_timer;
@@ -394,6 +406,8 @@ struct mesh_sta {
/* moving percentage of failed MSDUs */
struct ewma_mesh_fail_avg fail_avg;
+ /* moving average of tx bitrate */
+ struct ewma_mesh_tx_rate_avg tx_rate_avg;
};
DECLARE_EWMA(signal, 10, 8)
@@ -459,6 +473,9 @@ struct ieee80211_sta_rx_stats {
* @last_seq_ctrl: last received seq/frag number from this STA (per TID
* plus one for non-QoS frames)
* @tid_seq: per-TID sequence numbers for sending to this STA
+ * @airtime: per-AC struct airtime_info describing airtime statistics for this
+ * station
+ * @airtime_weight: station weight for airtime fairness calculation purposes
* @ampdu_mlme: A-MPDU state machine state
* @mesh: mesh STA information
* @debugfs_dir: debug filesystem directory dentry
@@ -480,10 +497,28 @@ struct ieee80211_sta_rx_stats {
* @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
* the BSS one.
* @tx_stats: TX statistics
+ * @tx_stats.packets: # of packets transmitted
+ * @tx_stats.bytes: # of bytes in all packets transmitted
+ * @tx_stats.last_rate: last TX rate
+ * @tx_stats.msdu: # of transmitted MSDUs per TID
* @rx_stats: RX statistics
+ * @rx_stats_avg: averaged RX statistics
+ * @rx_stats_avg.signal: averaged signal
+ * @rx_stats_avg.chain_signal: averaged per-chain signal
* @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs
* this (by advertising the USES_RSS hw flag)
* @status_stats: TX status statistics
+ * @status_stats.filtered: # of filtered frames
+ * @status_stats.retry_failed: # of frames that failed after retry
+ * @status_stats.retry_count: # of retries attempted
+ * @status_stats.lost_packets: # of lost packets
+ * @status_stats.last_tdls_pkt_time: timestamp of last TDLS packet
+ * @status_stats.msdu_retries: # of MSDU retries
+ * @status_stats.msdu_failed: # of failed MSDUs
+ * @status_stats.last_ack: last ack timestamp (jiffies)
+ * @status_stats.last_ack_signal: last ACK signal
+ * @status_stats.ack_signal_filled: last ACK signal validity
+ * @status_stats.avg_ack_signal: average ACK signal
*/
struct sta_info {
/* General information, mostly static */
@@ -565,6 +600,9 @@ struct sta_info {
} tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
+ struct airtime_info airtime[IEEE80211_NUM_ACS];
+ u16 airtime_weight;
+
/*
* Aggregation information, locked with lock.
*/
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 3f0b96e1e02f..5b9952b1caf3 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -823,6 +823,12 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
acked, info->status.tx_time);
+ if (info->status.tx_time &&
+ wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ ieee80211_sta_register_airtime(&sta->sta, tid,
+ info->status.tx_time, 0);
+
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
if (info->flags & IEEE80211_TX_STAT_ACK) {
if (sta->status_stats.lost_packets)
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 6c647f425e05..d30690d79a58 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -5,6 +5,7 @@
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
* Copyright 2015 - 2016 Intel Deutschland GmbH
+ * Copyright (C) 2019 Intel Corporation
*
* This file is GPLv2 as found in COPYING.
*/
@@ -1716,7 +1717,8 @@ ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata,
}
ieee802_11_parse_elems(tf->u.chan_switch_resp.variable,
- skb->len - baselen, false, &elems);
+ skb->len - baselen, false, &elems,
+ NULL, NULL);
if (elems.parse_error) {
tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n");
ret = -EINVAL;
@@ -1828,7 +1830,7 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
}
ieee802_11_parse_elems(tf->u.chan_switch_req.variable,
- skb->len - baselen, false, &elems);
+ skb->len - baselen, false, &elems, NULL, NULL);
if (elems.parse_error) {
tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n");
return -EINVAL;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 35ea0dcb55e6..8ba70d26b82e 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Portions of this file
-* Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 Intel Corporation
+* Copyright(c) 2016-2017 Intel Deutschland GmbH
+* Copyright (C) 2018 - 2019 Intel Corporation
*/
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -2452,6 +2452,48 @@ DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch,
TP_ARGS(local, sdata)
);
+DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
+TRACE_EVENT(drv_channel_switch_rx_beacon,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch),
+
+ TP_ARGS(local, sdata, ch_switch),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ CHANDEF_ENTRY
+ __field(u64, timestamp)
+ __field(u32, device_timestamp)
+ __field(bool, block_tx)
+ __field(u8, count)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ CHANDEF_ASSIGN(&ch_switch->chandef)
+ __entry->timestamp = ch_switch->timestamp;
+ __entry->device_timestamp = ch_switch->device_timestamp;
+ __entry->block_tx = ch_switch->block_tx;
+ __entry->count = ch_switch->count;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT
+ " received a channel switch beacon to "
+ CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
+ __entry->block_tx, __entry->timestamp
+ )
+);
+
TRACE_EVENT(drv_get_txpower,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f170d6c6629a..8a49a74c0a37 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1449,6 +1449,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
codel_vars_init(&txqi->def_cvars);
codel_stats_init(&txqi->cstats);
__skb_queue_head_init(&txqi->frags);
+ INIT_LIST_HEAD(&txqi->schedule_order);
txqi->txq.vif = &sdata->vif;
@@ -1487,8 +1488,14 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
struct fq *fq = &local->fq;
struct fq_tin *tin = &txqi->tin;
+ spin_lock_bh(&fq->lock);
fq_tin_reset(fq, tin, fq_skb_free_func);
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
+ spin_unlock_bh(&fq->lock);
+
+ spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
+ list_del_init(&txqi->schedule_order);
+ spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
}
void ieee80211_txq_set_params(struct ieee80211_local *local)
@@ -1605,7 +1612,7 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock);
- drv_wake_tx_queue(local, txqi);
+ schedule_and_wake_txq(local, txqi);
return true;
}
@@ -1938,9 +1945,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
int head_need, bool may_encrypt)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_hdr *hdr;
+ bool enc_tailroom;
int tail_need = 0;
- if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
+ hdr = (struct ieee80211_hdr *) skb->data;
+ enc_tailroom = may_encrypt &&
+ (sdata->crypto_tx_tailroom_needed_cnt ||
+ ieee80211_is_mgmt(hdr->frame_control));
+
+ if (enc_tailroom) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0);
@@ -1948,8 +1962,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
if (skb_cloned(skb) &&
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
- !skb_clone_writable(skb, ETH_HLEN) ||
- (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
+ !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -3630,6 +3643,151 @@ out:
}
EXPORT_SYMBOL(ieee80211_tx_dequeue);
+struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = NULL;
+
+ lockdep_assert_held(&local->active_txq_lock[ac]);
+
+ begin:
+ txqi = list_first_entry_or_null(&local->active_txqs[ac],
+ struct txq_info,
+ schedule_order);
+ if (!txqi)
+ return NULL;
+
+ if (txqi->txq.sta) {
+ struct sta_info *sta = container_of(txqi->txq.sta,
+ struct sta_info, sta);
+
+ if (sta->airtime[txqi->txq.ac].deficit < 0) {
+ sta->airtime[txqi->txq.ac].deficit +=
+ sta->airtime_weight;
+ list_move_tail(&txqi->schedule_order,
+ &local->active_txqs[txqi->txq.ac]);
+ goto begin;
+ }
+ }
+
+
+ if (txqi->schedule_round == local->schedule_round[ac])
+ return NULL;
+
+ list_del_init(&txqi->schedule_order);
+ txqi->schedule_round = local->schedule_round[ac];
+ return &txqi->txq;
+}
+EXPORT_SYMBOL(ieee80211_next_txq);
+
+void ieee80211_return_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = to_txq_info(txq);
+
+ lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+
+ if (list_empty(&txqi->schedule_order) &&
+ (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
+ /* If airtime accounting is active, always enqueue STAs at the
+ * head of the list to ensure that they only get moved to the
+ * back by the airtime DRR scheduler once they have a negative
+ * deficit. A station that already has a negative deficit will
+ * get immediately moved to the back of the list on the next
+ * call to ieee80211_next_txq().
+ */
+ if (txqi->txq.sta &&
+ wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ list_add(&txqi->schedule_order,
+ &local->active_txqs[txq->ac]);
+ else
+ list_add_tail(&txqi->schedule_order,
+ &local->active_txqs[txq->ac]);
+ }
+}
+EXPORT_SYMBOL(ieee80211_return_txq);
+
+void ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+ __acquires(txq_lock) __releases(txq_lock)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ spin_lock_bh(&local->active_txq_lock[txq->ac]);
+ ieee80211_return_txq(hw, txq);
+ spin_unlock_bh(&local->active_txq_lock[txq->ac]);
+}
+EXPORT_SYMBOL(ieee80211_schedule_txq);
+
+bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
+ struct sta_info *sta;
+ u8 ac = txq->ac;
+
+ lockdep_assert_held(&local->active_txq_lock[ac]);
+
+ if (!txqi->txq.sta)
+ goto out;
+
+ if (list_empty(&txqi->schedule_order))
+ goto out;
+
+ list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
+ schedule_order) {
+ if (iter == txqi)
+ break;
+
+ if (!iter->txq.sta) {
+ list_move_tail(&iter->schedule_order,
+ &local->active_txqs[ac]);
+ continue;
+ }
+ sta = container_of(iter->txq.sta, struct sta_info, sta);
+ if (sta->airtime[ac].deficit < 0)
+ sta->airtime[ac].deficit += sta->airtime_weight;
+ list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
+ }
+
+ sta = container_of(txqi->txq.sta, struct sta_info, sta);
+ if (sta->airtime[ac].deficit >= 0)
+ goto out;
+
+ sta->airtime[ac].deficit += sta->airtime_weight;
+ list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+
+ return false;
+out:
+ if (!list_empty(&txqi->schedule_order))
+ list_del_init(&txqi->schedule_order);
+
+ return true;
+}
+EXPORT_SYMBOL(ieee80211_txq_may_transmit);
+
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
+ __acquires(txq_lock)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ spin_lock_bh(&local->active_txq_lock[ac]);
+ local->schedule_round[ac]++;
+}
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
+
+void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+ __releases(txq_lock)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ spin_unlock_bh(&local->active_txq_lock[ac]);
+}
+EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d0eb38b890aa..4c1655972565 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -5,7 +5,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -891,33 +891,24 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
-u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
- struct ieee802_11_elems *elems,
- u64 filter, u32 crc)
+static u32
+_ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ struct ieee802_11_elems *elems,
+ u64 filter, u32 crc, u8 *transmitter_bssid,
+ u8 *bss_bssid)
{
- size_t left = len;
- const u8 *pos = start;
+ const struct element *elem, *sub;
bool calc_crc = filter != 0;
DECLARE_BITMAP(seen_elems, 256);
const u8 *ie;
bitmap_zero(seen_elems, 256);
- memset(elems, 0, sizeof(*elems));
- elems->ie_start = start;
- elems->total_len = len;
- while (left >= 2) {
- u8 id, elen;
+ for_each_element(elem, start, len) {
bool elem_parse_failed;
-
- id = *pos++;
- elen = *pos++;
- left -= 2;
-
- if (elen > left) {
- elems->parse_error = true;
- break;
- }
+ u8 id = elem->id;
+ u8 elen = elem->datalen;
+ const u8 *pos = elem->data;
switch (id) {
case WLAN_EID_SSID:
@@ -960,8 +951,6 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
*/
if (test_bit(id, seen_elems)) {
elems->parse_error = true;
- left -= elen;
- pos += elen;
continue;
}
break;
@@ -1219,6 +1208,57 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
if (elen >= sizeof(*elems->max_idle_period_ie))
elems->max_idle_period_ie = (void *)pos;
break;
+ case WLAN_EID_MULTIPLE_BSSID:
+ if (!bss_bssid || !transmitter_bssid || elen < 4)
+ break;
+
+ elems->max_bssid_indicator = pos[0];
+
+ for_each_element(sub, pos + 1, elen - 1) {
+ u8 sub_len = sub->datalen;
+ u8 new_bssid[ETH_ALEN];
+ const u8 *index;
+
+ /*
+ * we only expect the "non-transmitted BSSID
+ * profile" subelement (subelement id 0)
+ */
+ if (sub->id != 0 || sub->datalen < 4) {
+ /* not a valid BSS profile */
+ continue;
+ }
+
+ if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ sub->data[1] != 2) {
+ /* The first element of the
+ * Nontransmitted BSSID Profile is not
+ * the Nontransmitted BSSID Capability
+ * element.
+ */
+ continue;
+ }
+
+ /* found a Nontransmitted BSSID Profile */
+ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ sub->data, sub_len);
+ if (!index || index[1] < 1 || index[2] == 0) {
+ /* Invalid MBSSID Index element */
+ continue;
+ }
+
+ cfg80211_gen_new_bssid(transmitter_bssid,
+ pos[0],
+ index[2],
+ new_bssid);
+ if (ether_addr_equal(new_bssid, bss_bssid)) {
+ elems->nontransmitted_bssid_profile =
+ (void *)sub;
+ elems->bssid_index_len = index[1];
+ elems->bssid_index = (void *)&index[2];
+ break;
+ }
+ }
+ break;
case WLAN_EID_EXTENSION:
if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA &&
elen >= (sizeof(*elems->mu_edca_param_set) + 1)) {
@@ -1234,6 +1274,14 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->he_operation = (void *)&pos[1];
} else if (pos[0] == WLAN_EID_EXT_UORA && elen >= 1) {
elems->uora_element = (void *)&pos[1];
+ } else if (pos[0] ==
+ WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME &&
+ elen == 4) {
+ elems->max_channel_switch_time = pos + 1;
+ } else if (pos[0] ==
+ WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION &&
+ elen == 3) {
+ elems->mbssid_config_ie = (void *)&pos[1];
}
break;
default:
@@ -1244,17 +1292,56 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->parse_error = true;
else
__set_bit(id, seen_elems);
-
- left -= elen;
- pos += elen;
}
- if (left != 0)
+ if (!for_each_element_completed(elem, start, len))
elems->parse_error = true;
return crc;
}
+u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ struct ieee802_11_elems *elems,
+ u64 filter, u32 crc, u8 *transmitter_bssid,
+ u8 *bss_bssid)
+{
+ memset(elems, 0, sizeof(*elems));
+ elems->ie_start = start;
+ elems->total_len = len;
+
+ crc = _ieee802_11_parse_elems_crc(start, len, action, elems, filter,
+ crc, transmitter_bssid, bss_bssid);
+
+ /* Override with nontransmitted profile, if found */
+ if (transmitter_bssid && elems->nontransmitted_bssid_profile) {
+ const u8 *profile = elems->nontransmitted_bssid_profile;
+
+ _ieee802_11_parse_elems_crc(&profile[2], profile[1],
+ action, elems, 0, 0,
+ transmitter_bssid, bss_bssid);
+ }
+
+ if (elems->tim && !elems->parse_error) {
+ const struct ieee80211_tim_ie *tim_ie = elems->tim;
+
+ elems->dtim_period = tim_ie->dtim_period;
+ elems->dtim_count = tim_ie->dtim_count;
+ }
+
+ /* Override DTIM period and count if needed */
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_period))
+ elems->dtim_period = elems->bssid_index->dtim_period;
+
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_count))
+ elems->dtim_count = elems->bssid_index->dtim_count;
+
+ return crc;
+}
+
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_queue_params
*qparam, int ac)
@@ -2146,6 +2233,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MONITOR:
break;
+ case NL80211_IFTYPE_ADHOC:
+ if (sdata->vif.bss_conf.ibss_joined)
+ WARN_ON(drv_join_ibss(local, sdata));
+ /* fall through */
default:
ieee80211_reconfig_stations(sdata);
/* fall through */
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7d55d4c04088..f7c544592ec8 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1209,21 +1209,57 @@ static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
};
+static int mpls_netconf_valid_get_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid header for netconf get request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_mpls_policy, extack);
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct netconfmsg), tb,
+ NETCONFA_MAX, devconf_mpls_policy, extack);
+ if (err)
+ return err;
+
+ for (i = 0; i <= NETCONFA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NETCONFA_IFINDEX:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX + 1];
- struct netconfmsg *ncm;
struct net_device *dev;
struct mpls_dev *mdev;
struct sk_buff *skb;
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
- devconf_mpls_policy, extack);
+ err = mpls_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
@@ -1838,6 +1874,9 @@ static int rtm_to_route_config(struct sk_buff *skb,
goto errout;
break;
}
+ case RTA_GATEWAY:
+ NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
+ goto errout;
case RTA_VIA:
{
if (nla_get_via(nla, &cfg->rc_via_alen,
@@ -2236,6 +2275,64 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
}
+static int mpls_valid_getroute_req(struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ struct rtmsg *rtm;
+ int i, err;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid header for get route request");
+ return -EINVAL;
+ }
+
+ if (!netlink_strict_get_check(skb))
+ return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_mpls_policy, extack);
+
+ rtm = nlmsg_data(nlh);
+ if ((rtm->rtm_dst_len && rtm->rtm_dst_len != 20) ||
+ rtm->rtm_src_len || rtm->rtm_tos || rtm->rtm_table ||
+ rtm->rtm_protocol || rtm->rtm_scope || rtm->rtm_type) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
+ return -EINVAL;
+ }
+ if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid flags for get route request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_mpls_policy, extack);
+ if (err)
+ return err;
+
+ if ((tb[RTA_DST] || tb[RTA_NEWDST]) && !rtm->rtm_dst_len) {
+ NL_SET_ERR_MSG_MOD(extack, "rtm_dst_len must be 20 for MPLS");
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= RTA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_DST:
+ case RTA_NEWDST:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct netlink_ext_ack *extack)
{
@@ -2255,8 +2352,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
u8 n_labels;
int err;
- err = nlmsg_parse(in_nlh, sizeof(*rtm), tb, RTA_MAX,
- rtm_mpls_policy, extack);
+ err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack);
if (err < 0)
goto errout;
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 94f53a9b7d1a..dda8930f20e7 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -183,8 +183,8 @@ static int mpls_build_state(struct nlattr *nla,
&n_labels, NULL, extack))
return -EINVAL;
- newts = lwtunnel_state_alloc(sizeof(*tun_encap_info) +
- n_labels * sizeof(u32));
+ newts = lwtunnel_state_alloc(struct_size(tun_encap_info, label,
+ n_labels));
if (!newts)
return -ENOMEM;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index beb3a69ce1d4..d43ffb09939b 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -174,7 +174,7 @@ config NF_CT_PROTO_DCCP
If unsure, say Y.
config NF_CT_PROTO_GRE
- tristate
+ bool
config NF_CT_PROTO_SCTP
bool 'SCTP protocol connection tracking support'
@@ -396,7 +396,13 @@ config NETFILTER_NETLINK_GLUE_CT
the enqueued via NFNETLINK.
config NF_NAT
- tristate
+ tristate "Network Address Translation support"
+ depends on NF_CONNTRACK
+ default m if NETFILTER_ADVANCED=n
+ help
+ The NAT option allows masquerading, port forwarding and other
+ forms of full Network Address Port Translation. This can be
+ controlled by iptables, ip6tables or nft.
config NF_NAT_NEEDED
bool
@@ -431,6 +437,9 @@ config NF_NAT_TFTP
config NF_NAT_REDIRECT
bool
+config NF_NAT_MASQUERADE
+ bool
+
config NETFILTER_SYNPROXY
tristate
@@ -523,6 +532,7 @@ config NFT_LIMIT
config NFT_MASQ
depends on NF_CONNTRACK
depends on NF_NAT
+ select NF_NAT_MASQUERADE
tristate "Netfilter nf_tables masquerade support"
help
This option adds the "masquerade" expression that you can use
@@ -532,6 +542,7 @@ config NFT_REDIR
depends on NF_CONNTRACK
depends on NF_NAT
tristate "Netfilter nf_tables redirect support"
+ select NF_NAT_REDIRECT
help
This options adds the "redirect" expression that you can use
to perform NAT in the redirect flavour.
@@ -539,6 +550,7 @@ config NFT_REDIR
config NFT_NAT
depends on NF_CONNTRACK
select NF_NAT
+ depends on NF_TABLES_IPV4 || NF_TABLES_IPV6
tristate "Netfilter nf_tables nat module"
help
This option adds the "nat" expression that you can use to perform
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1ae65a314d7a..4894a85cdd0b 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -13,6 +13,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
+nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -25,8 +26,6 @@ obj-$(CONFIG_NETFILTER_NETLINK_OSF) += nfnetlink_osf.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
-obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
-
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
@@ -57,6 +56,7 @@ obj-$(CONFIG_NF_LOG_NETDEV) += nf_log_netdev.o
obj-$(CONFIG_NF_NAT) += nf_nat.o
nf_nat-$(CONFIG_NF_NAT_REDIRECT) += nf_nat_redirect.o
+nf_nat-$(CONFIG_NF_NAT_MASQUERADE) += nf_nat_masquerade.o
# NAT helpers
obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
@@ -110,6 +110,8 @@ obj-$(CONFIG_NFT_OSF) += nft_osf.o
obj-$(CONFIG_NFT_TPROXY) += nft_tproxy.o
obj-$(CONFIG_NFT_XFRM) += nft_xfrm.o
+obj-$(CONFIG_NFT_NAT) += nft_chain_nat.o
+
# nf_tables netdev
obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o
obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index cad48d07c818..8401cefd9f65 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -29,6 +29,7 @@ config IP_VS_IPV6
bool "IPv6 support for IPVS"
depends on IPV6 = y || IP_VS = IPV6
select IP6_NF_IPTABLES
+ select NF_DEFRAG_IPV6
---help---
Add IPv6 support to IPVS.
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index fe9abf3cc10a..43bbaa32b1d6 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -53,6 +53,7 @@
#endif
#include <net/ip_vs.h>
+#include <linux/indirect_call_wrapper.h>
EXPORT_SYMBOL(register_ip_vs_scheduler);
@@ -70,6 +71,29 @@ EXPORT_SYMBOL(ip_vs_get_debug_level);
#endif
EXPORT_SYMBOL(ip_vs_new_conn_out);
+#ifdef CONFIG_IP_VS_PROTO_TCP
+INDIRECT_CALLABLE_DECLARE(int
+ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+INDIRECT_CALLABLE_DECLARE(int
+ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ struct ip_vs_conn *cp, struct ip_vs_iphdr *iph));
+#endif
+
+#if defined(CONFIG_IP_VS_PROTO_TCP) && defined(CONFIG_IP_VS_PROTO_UDP)
+#define SNAT_CALL(f, ...) \
+ INDIRECT_CALL_2(f, tcp_snat_handler, udp_snat_handler, __VA_ARGS__)
+#elif defined(CONFIG_IP_VS_PROTO_TCP)
+#define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, tcp_snat_handler, __VA_ARGS__)
+#elif defined(CONFIG_IP_VS_PROTO_UDP)
+#define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, udp_snat_handler, __VA_ARGS__)
+#else
+#define SNAT_CALL(f, ...) f(__VA_ARGS__)
+#endif
+
static unsigned int ip_vs_net_id __read_mostly;
/* netns cnt used for uniqueness */
static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
@@ -478,7 +502,9 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
*/
if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
iph->hdr_flags ^= IP_VS_HDR_INVERSE;
- cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get,
+ ip_vs_conn_in_get_proto, svc->ipvs,
+ svc->af, skb, iph);
iph->hdr_flags ^= IP_VS_HDR_INVERSE;
if (cp) {
@@ -972,7 +998,8 @@ static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
+ ipvs, AF_INET, skb, &ciph);
if (!cp)
return NF_ACCEPT;
@@ -1028,7 +1055,8 @@ static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
return NF_ACCEPT;
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
+ ipvs, AF_INET6, skb, &ciph);
if (!cp)
return NF_ACCEPT;
@@ -1263,7 +1291,8 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
goto drop;
/* mangle the packet */
- if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
+ if (pp->snat_handler &&
+ !SNAT_CALL(pp->snat_handler, skb, pp, cp, iph))
goto drop;
#ifdef CONFIG_IP_VS_IPV6
@@ -1389,7 +1418,8 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(ipvs, af, skb, &iph);
+ cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
+ ipvs, af, skb, &iph);
if (likely(cp)) {
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
@@ -1536,14 +1566,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
/* sorry, all this trouble for a no-hit :) */
IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
"ip_vs_in: packet continues traversal as normal");
- if (iph->fragoffs) {
- /* Fragment that couldn't be mapped to a conn entry
- * is missing module nf_defrag_ipv6
- */
- IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
+
+ /* Fragment couldn't be mapped to a conn entry */
+ if (iph->fragoffs)
IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
"unhandled fragment");
- }
+
*verdict = NF_ACCEPT;
return 0;
}
@@ -1644,7 +1672,8 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
/* The embedded headers contain source and dest in reverse order.
* For IPIP this is error for request, not for reply.
*/
- cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
+ ipvs, AF_INET, skb, &ciph);
if (!cp) {
int v;
@@ -1796,7 +1825,8 @@ static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
/* The embedded headers contain source and dest in reverse order
* if not from localhost
*/
- cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
+ ipvs, AF_INET6, skb, &ciph);
if (!cp) {
int v;
@@ -1925,7 +1955,8 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
/*
* Check if the packet belongs to an existing connection entry
*/
- cp = pp->conn_in_get(ipvs, af, skb, &iph);
+ cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
+ ipvs, af, skb, &iph);
conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 432141f04af3..053cd96b9c76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -43,6 +43,7 @@
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
#include <net/ip6_route.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
#include <net/route.h>
#include <net/sock.h>
@@ -900,11 +901,17 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
#ifdef CONFIG_IP_VS_IPV6
if (udest->af == AF_INET6) {
+ int ret;
+
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
!__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
return -EINVAL;
+
+ ret = nf_defrag_ipv6_enable(svc->ipvs->net);
+ if (ret)
+ return ret;
} else
#endif
{
@@ -1228,6 +1235,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
ret = -EINVAL;
goto out_err;
}
+
+ ret = nf_defrag_ipv6_enable(ipvs->net);
+ if (ret)
+ goto out_err;
}
#endif
@@ -2221,6 +2232,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
u->udp_timeout);
#ifdef CONFIG_IP_VS_PROTO_TCP
+ if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
+ u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
+ return -EINVAL;
+ }
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
+ return -EINVAL;
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
@@ -2722,8 +2745,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
int size;
get = (struct ip_vs_get_services *)arg;
- size = sizeof(*get) +
- sizeof(struct ip_vs_service_entry) * get->num_services;
+ size = struct_size(get, entrytable, get->num_services);
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
@@ -2764,8 +2786,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
int size;
get = (struct ip_vs_get_dests *)arg;
- size = sizeof(*get) +
- sizeof(struct ip_vs_dest_entry) * get->num_dests;
+ size = struct_size(get, entrytable, get->num_dests);
if (*len != size) {
pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
@@ -3065,7 +3086,7 @@ static bool ip_vs_is_af_valid(int af)
static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
struct ip_vs_service_user_kern *usvc,
- struct nlattr *nla, int full_entry,
+ struct nlattr *nla, bool full_entry,
struct ip_vs_service **ret_svc)
{
struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
@@ -3152,7 +3173,7 @@ static struct ip_vs_service *ip_vs_genl_find_service(struct netns_ipvs *ipvs,
struct ip_vs_service *svc;
int ret;
- ret = ip_vs_genl_parse_service(ipvs, &usvc, nla, 0, &svc);
+ ret = ip_vs_genl_parse_service(ipvs, &usvc, nla, false, &svc);
return ret ? ERR_PTR(ret) : svc;
}
@@ -3262,7 +3283,7 @@ out_err:
}
static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
- struct nlattr *nla, int full_entry)
+ struct nlattr *nla, bool full_entry)
{
struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
struct nlattr *nla_addr, *nla_port;
@@ -3524,11 +3545,11 @@ out:
static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
{
+ bool need_full_svc = false, need_full_dest = false;
struct ip_vs_service *svc = NULL;
struct ip_vs_service_user_kern usvc;
struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
- int need_full_svc = 0, need_full_dest = 0;
struct net *net = sock_net(skb->sk);
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3552,7 +3573,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
* received a valid one. We need a full service specification when
* adding / editing a service. Only identifying members otherwise. */
if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
- need_full_svc = 1;
+ need_full_svc = true;
ret = ip_vs_genl_parse_service(ipvs, &usvc,
info->attrs[IPVS_CMD_ATTR_SERVICE],
@@ -3572,7 +3593,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
cmd == IPVS_CMD_DEL_DEST) {
if (cmd != IPVS_CMD_DEL_DEST)
- need_full_dest = 1;
+ need_full_dest = true;
ret = ip_vs_genl_parse_dest(&udest,
info->attrs[IPVS_CMD_ATTR_DEST],
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 4398a72edec5..fe69d46ff779 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -124,7 +124,7 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
}
s = data + plen;
if (skip) {
- int found = 0;
+ bool found = false;
for (;; s++) {
if (s == data_limit)
@@ -136,7 +136,7 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
if (!ext && isdigit(*s))
break;
if (*s == skip)
- found = 1;
+ found = true;
} else if (*s != skip) {
break;
}
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 5320d39976e1..480598cb0f05 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -129,7 +129,6 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
- .csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
@@ -152,7 +151,6 @@ struct ip_vs_protocol ip_vs_protocol_esp = {
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
- .csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index b0cd7d08f2a7..b58ddb7dffd1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -10,6 +10,9 @@
#include <net/ip_vs.h>
static int
+sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
+
+static int
sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
@@ -105,7 +108,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!sctp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
@@ -152,7 +155,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!sctp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
@@ -183,7 +186,7 @@ static int
sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
unsigned int sctphoff;
- struct sctphdr *sh, _sctph;
+ struct sctphdr *sh;
__le32 cmp, val;
#ifdef CONFIG_IP_VS_IPV6
@@ -193,10 +196,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
#endif
sctphoff = ip_hdrlen(skb);
- sh = skb_header_pointer(skb, sctphoff, sizeof(_sctph), &_sctph);
- if (sh == NULL)
- return 0;
-
+ sh = (struct sctphdr *)(skb->data + sctphoff);
cmp = sh->checksum;
val = sctp_compute_cksum(skb, sctphoff);
@@ -587,7 +587,6 @@ struct ip_vs_protocol ip_vs_protocol_sctp = {
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = sctp_snat_handler,
.dnat_handler = sctp_dnat_handler,
- .csum_check = sctp_csum_check,
.state_name = sctp_state_name,
.state_transition = sctp_state_transition,
.app_conn_bind = sctp_app_conn_bind,
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 1770fc6ce960..00ce07dda980 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -28,10 +28,14 @@
#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/ip_vs.h>
static int
+tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
+
+static int
tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
@@ -143,14 +147,14 @@ tcp_partial_csum_update(int af, struct tcphdr *tcph,
}
-static int
+INDIRECT_CALLABLE_SCOPE int
tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct tcphdr *tcph;
unsigned int tcphoff = iph->len;
+ bool payload_csum = false;
int oldlen;
- int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
@@ -166,7 +170,7 @@ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!tcp_csum_check(cp->af, skb, pp))
return 0;
/* Call application helper if needed */
@@ -176,7 +180,7 @@ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (ret == 1)
oldlen = skb->len - tcphoff;
else
- payload_csum = 1;
+ payload_csum = true;
}
tcph = (void *)skb_network_header(skb) + tcphoff;
@@ -192,7 +196,7 @@ tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -227,8 +231,8 @@ tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
{
struct tcphdr *tcph;
unsigned int tcphoff = iph->len;
+ bool payload_csum = false;
int oldlen;
- int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
@@ -244,7 +248,7 @@ tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!tcp_csum_check(cp->af, skb, pp))
return 0;
/*
@@ -257,7 +261,7 @@ tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (ret == 1)
oldlen = skb->len - tcphoff;
else
- payload_csum = 1;
+ payload_csum = true;
}
tcph = (void *)skb_network_header(skb) + tcphoff;
@@ -275,7 +279,7 @@ tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -736,7 +740,6 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = tcp_snat_handler,
.dnat_handler = tcp_dnat_handler,
- .csum_check = tcp_csum_check,
.state_name = tcp_state_name,
.state_transition = tcp_state_transition,
.app_conn_bind = tcp_app_conn_bind,
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 0f53c49025f8..92c078abcb3e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -23,12 +23,16 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/udp.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/ip_vs.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
static int
+udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp);
+
+static int
udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp,
@@ -133,14 +137,14 @@ udp_partial_csum_update(int af, struct udphdr *uhdr,
}
-static int
+INDIRECT_CALLABLE_SCOPE int
udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
{
struct udphdr *udph;
unsigned int udphoff = iph->len;
+ bool payload_csum = false;
int oldlen;
- int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
@@ -156,7 +160,7 @@ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!udp_csum_check(cp->af, skb, pp))
return 0;
/*
@@ -168,7 +172,7 @@ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (ret == 1)
oldlen = skb->len - udphoff;
else
- payload_csum = 1;
+ payload_csum = true;
}
udph = (void *)skb_network_header(skb) + udphoff;
@@ -186,7 +190,7 @@ udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
cp->dport, cp->vport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -222,8 +226,8 @@ udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
{
struct udphdr *udph;
unsigned int udphoff = iph->len;
+ bool payload_csum = false;
int oldlen;
- int payload_csum = 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6 && iph->fragoffs)
@@ -239,7 +243,7 @@ udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
int ret;
/* Some checks before mangling */
- if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ if (!udp_csum_check(cp->af, skb, pp))
return 0;
/*
@@ -252,7 +256,7 @@ udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (ret == 1)
oldlen = skb->len - udphoff;
else
- payload_csum = 1;
+ payload_csum = true;
}
udph = (void *)skb_network_header(skb) + udphoff;
@@ -270,7 +274,7 @@ udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
cp->vport, cp->dport);
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = (cp->app && pp->csum_check) ?
+ skb->ip_summed = cp->app ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
} else {
/* full checksum calculation */
@@ -494,7 +498,6 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.conn_out_get = ip_vs_conn_out_get_proto,
.snat_handler = udp_snat_handler,
.dnat_handler = udp_dnat_handler,
- .csum_check = udp_csum_check,
.state_transition = udp_state_transition,
.state_name = udp_state_name,
.register_app = udp_register_app,
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 473cce2a5231..175349fcf91f 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -126,7 +126,7 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
{
struct flowi4 fl4;
struct rtable *rt;
- int loop = 0;
+ bool loop = false;
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
@@ -149,7 +149,7 @@ retry:
ip_rt_put(rt);
*saddr = fl4.saddr;
flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr);
- loop++;
+ loop = true;
goto retry;
}
*saddr = fl4.saddr;
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 20edd589fe06..f2681ec5b5f6 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -54,6 +54,7 @@ enum amanda_strings {
SEARCH_DATA,
SEARCH_MESG,
SEARCH_INDEX,
+ SEARCH_STATE,
};
static struct {
@@ -81,6 +82,10 @@ static struct {
.string = "INDEX ",
.len = 6,
},
+ [SEARCH_STATE] = {
+ .string = "STATE ",
+ .len = 6,
+ },
};
static int amanda_help(struct sk_buff *skb,
@@ -124,7 +129,7 @@ static int amanda_help(struct sk_buff *skb,
goto out;
stop += start;
- for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) {
+ for (i = SEARCH_DATA; i <= SEARCH_STATE; i++) {
off = skb_find_text(skb, start, stop, search[i].ts);
if (off == UINT_MAX)
continue;
@@ -168,7 +173,7 @@ out:
}
static const struct nf_conntrack_expect_policy amanda_exp_policy = {
- .max_expected = 3,
+ .max_expected = 4,
.timeout = 180,
};
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 741b533148ba..82bfbeef46af 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -51,7 +51,6 @@
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>
#include <net/ip.h>
@@ -222,6 +221,24 @@ static u32 hash_conntrack(const struct net *net,
return scale_hash(hash_conntrack_raw(tuple, net));
}
+static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{ struct {
+ __be16 sport;
+ __be16 dport;
+ } _inet_hdr, *inet_hdr;
+
+ /* Actually only need first 4 bytes to get ports. */
+ inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
+ if (!inet_hdr)
+ return false;
+
+ tuple->src.u.udp.port = inet_hdr->sport;
+ tuple->dst.u.udp.port = inet_hdr->dport;
+ return true;
+}
+
static bool
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
@@ -229,16 +246,11 @@ nf_ct_get_tuple(const struct sk_buff *skb,
u_int16_t l3num,
u_int8_t protonum,
struct net *net,
- struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l4proto *l4proto)
+ struct nf_conntrack_tuple *tuple)
{
unsigned int size;
const __be32 *ap;
__be32 _addrs[8];
- struct {
- __be16 sport;
- __be16 dport;
- } _inet_hdr, *inet_hdr;
memset(tuple, 0, sizeof(*tuple));
@@ -274,16 +286,36 @@ nf_ct_get_tuple(const struct sk_buff *skb,
tuple->dst.protonum = protonum;
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
- if (unlikely(l4proto->pkt_to_tuple))
- return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
-
- /* Actually only need first 4 bytes to get ports. */
- inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
- if (!inet_hdr)
- return false;
+ switch (protonum) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+ case IPPROTO_ICMP:
+ return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE:
+ return gre_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+ case IPPROTO_TCP:
+ case IPPROTO_UDP: /* fallthrough */
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+ default:
+ break;
+ }
- tuple->src.u.udp.port = inet_hdr->sport;
- tuple->dst.u.udp.port = inet_hdr->dport;
return true;
}
@@ -366,33 +398,20 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num,
struct net *net, struct nf_conntrack_tuple *tuple)
{
- const struct nf_conntrack_l4proto *l4proto;
u8 protonum;
int protoff;
- int ret;
-
- rcu_read_lock();
protoff = get_l4proto(skb, nhoff, l3num, &protonum);
- if (protoff <= 0) {
- rcu_read_unlock();
+ if (protoff <= 0)
return false;
- }
-
- l4proto = __nf_ct_l4proto_find(protonum);
-
- ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
- l4proto);
- rcu_read_unlock();
- return ret;
+ return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l4proto *l4proto)
+ const struct nf_conntrack_tuple *orig)
{
memset(inverse, 0, sizeof(*inverse));
@@ -415,8 +434,14 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
inverse->dst.protonum = orig->dst.protonum;
- if (unlikely(l4proto->invert_tuple))
- return l4proto->invert_tuple(inverse, orig);
+ switch (orig->dst.protonum) {
+ case IPPROTO_ICMP:
+ return nf_conntrack_invert_icmp_tuple(inverse, orig);
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
+#endif
+ }
inverse->src.u.all = orig->dst.u.all;
inverse->dst.u.all = orig->src.u.all;
@@ -526,11 +551,20 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl)
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
+static void destroy_gre_conntrack(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_conn *master = ct->master;
+
+ if (master)
+ nf_ct_gre_keymap_destroy(master);
+#endif
+}
+
static void
destroy_conntrack(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
- const struct nf_conntrack_l4proto *l4proto;
pr_debug("destroy_conntrack(%p)\n", ct);
WARN_ON(atomic_read(&nfct->use) != 0);
@@ -539,9 +573,9 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_ct_tmpl_free(ct);
return;
}
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
- if (l4proto->destroy)
- l4proto->destroy(ct);
+
+ if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
+ destroy_gre_conntrack(ct);
local_bh_disable();
/* Expectations will have been removed in clean_from_lists,
@@ -840,7 +874,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
enum ip_conntrack_info oldinfo;
struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->allow_clash &&
!nf_ct_is_dying(ct) &&
atomic_inc_not_zero(&ct->ct_general.use)) {
@@ -901,10 +935,18 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* REJECT will give spurious warnings here.
*/
- /* No external references means no one else could have
- * confirmed us.
+ /* Another skb with the same unconfirmed conntrack may
+ * win the race. This may happen for bridge(br_flood)
+ * or broadcast/multicast packets do skb_clone with
+ * unconfirmed conntrack.
*/
- WARN_ON(nf_ct_is_confirmed(ct));
+ if (unlikely(nf_ct_is_confirmed(ct))) {
+ WARN_ON_ONCE(1);
+ nf_conntrack_double_unlock(hash, reply_hash);
+ local_bh_enable();
+ return NF_DROP;
+ }
+
pr_debug("Confirming conntrack %p\n", ct);
/* We have to check the DYING flag after unlink to prevent
* a race against nf_ct_get_next_corpse() possibly called from
@@ -1007,6 +1049,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
}
if (nf_ct_key_equal(h, tuple, zone, net)) {
+ /* Tuple is taken already, so caller will need to find
+ * a new source port to use.
+ *
+ * Only exception:
+ * If the *original tuples* are identical, then both
+ * conntracks refer to the same flow.
+ * This is a rare situation, it can occur e.g. when
+ * more than one UDP packet is sent from same socket
+ * in different threads.
+ *
+ * Let nf_ct_resolve_clash() deal with this later.
+ */
+ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ continue;
+
NF_CT_STAT_INC_ATOMIC(net, found);
rcu_read_unlock();
return 1;
@@ -1112,7 +1170,7 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
return true;
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
return true;
@@ -1342,7 +1400,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l4proto *l4proto,
struct sk_buff *skb,
unsigned int dataoff, u32 hash)
{
@@ -1355,7 +1412,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conn_timeout *timeout_ext;
struct nf_conntrack_zone tmp;
- if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) {
+ if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
pr_debug("Can't invert tuple.\n");
return NULL;
}
@@ -1437,7 +1494,6 @@ resolve_normal_ct(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u_int8_t protonum,
- const struct nf_conntrack_l4proto *l4proto,
const struct nf_hook_state *state)
{
const struct nf_conntrack_zone *zone;
@@ -1450,7 +1506,7 @@ resolve_normal_ct(struct nf_conn *tmpl,
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
- &tuple, l4proto)) {
+ &tuple)) {
pr_debug("Can't get tuple\n");
return 0;
}
@@ -1460,7 +1516,7 @@ resolve_normal_ct(struct nf_conn *tmpl,
hash = hash_conntrack_raw(&tuple, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
if (!h) {
- h = init_conntrack(state->net, tmpl, &tuple, l4proto,
+ h = init_conntrack(state->net, tmpl, &tuple,
skb, dataoff, hash);
if (!h)
return 0;
@@ -1522,10 +1578,66 @@ nf_conntrack_handle_icmp(struct nf_conn *tmpl,
return ret;
}
+static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo)
+{
+ const unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+ if (!timeout)
+ timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
+
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
+ return NF_ACCEPT;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int nf_conntrack_handle_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
+{
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ return nf_conntrack_tcp_packet(ct, skb, dataoff,
+ ctinfo, state);
+ case IPPROTO_UDP:
+ return nf_conntrack_udp_packet(ct, skb, dataoff,
+ ctinfo, state);
+ case IPPROTO_ICMP:
+ return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE:
+ return nf_conntrack_udplite_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP:
+ return nf_conntrack_sctp_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP:
+ return nf_conntrack_dccp_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE:
+ return nf_conntrack_gre_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+ }
+
+ return generic_packet(ct, skb, ctinfo);
+}
+
unsigned int
nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
{
- const struct nf_conntrack_l4proto *l4proto;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct, *tmpl;
u_int8_t protonum;
@@ -1552,8 +1664,6 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
goto out;
}
- l4proto = __nf_ct_l4proto_find(protonum);
-
if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
protonum, state);
@@ -1567,7 +1677,7 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
}
repeat:
ret = resolve_normal_ct(tmpl, skb, dataoff,
- protonum, l4proto, state);
+ protonum, state);
if (ret < 0) {
/* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(state->net, drop);
@@ -1583,7 +1693,7 @@ repeat:
goto out;
}
- ret = l4proto->packet(ct, skb, dataoff, ctinfo, state);
+ ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
@@ -1614,19 +1724,6 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_in);
-bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig)
-{
- bool ret;
-
- rcu_read_lock();
- ret = nf_ct_invert_tuple(inverse, orig,
- __nf_ct_l4proto_find(orig->dst.protonum));
- rcu_read_unlock();
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
-
/* Alter reply tuple (maybe alter helper). This is for NAT, and is
implicitly racy: see __nf_conntrack_confirm */
void nf_conntrack_alter_reply(struct nf_conn *ct,
@@ -1654,11 +1751,9 @@ EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
void __nf_ct_refresh_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
- unsigned long extra_jiffies,
- int do_acct)
+ u32 extra_jiffies,
+ bool do_acct)
{
- WARN_ON(!skb);
-
/* Only update if this is not a fixed timeout */
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
goto acct;
@@ -1667,7 +1762,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
if (nf_ct_is_confirmed(ct))
extra_jiffies += nfct_time_stamp;
- ct->timeout = extra_jiffies;
+ if (ct->timeout != extra_jiffies)
+ ct->timeout = extra_jiffies;
acct:
if (do_acct)
nf_ct_acct_update(ct, ctinfo, skb->len);
@@ -1757,7 +1853,6 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
{
- const struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
@@ -1778,10 +1873,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
if (dataoff <= 0)
return -1;
- l4proto = nf_ct_l4proto_find_get(l4num);
-
if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
- l4num, net, &tuple, l4proto))
+ l4num, net, &tuple))
return -1;
if (ct->status & IPS_SRC_NAT) {
@@ -2387,6 +2480,7 @@ int nf_conntrack_init_net(struct net *net)
int cpu;
BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
+ BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
atomic_set(&net->ct.count, 0);
net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
@@ -2413,15 +2507,10 @@ int nf_conntrack_init_net(struct net *net)
nf_conntrack_tstamp_pernet_init(net);
nf_conntrack_ecache_pernet_init(net);
nf_conntrack_helper_pernet_init(net);
+ nf_conntrack_proto_pernet_init(net);
- ret = nf_conntrack_proto_pernet_init(net);
- if (ret < 0)
- goto err_proto;
return 0;
-err_proto:
- nf_conntrack_ecache_pernet_fini(net);
- nf_conntrack_expect_pernet_fini(net);
err_expect:
free_percpu(net->ct.stat);
err_pcpu_lists:
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 3034038bfdf0..334d6e5b7762 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -610,7 +610,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
expect->tuple.src.l3num,
expect->tuple.dst.protonum);
print_tuple(s, &expect->tuple,
- __nf_ct_l4proto_find(expect->tuple.dst.protonum));
+ nf_ct_l4proto_find(expect->tuple.dst.protonum));
if (expect->flags & NF_CT_EXPECT_PERMANENT) {
seq_puts(s, "PERMANENT");
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1213beb5a714..66c596d287a5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -46,7 +46,7 @@
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#ifdef CONFIG_NF_NAT_NEEDED
-#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#endif
@@ -134,7 +134,7 @@ static int ctnetlink_dump_tuples(struct sk_buff *skb,
ret = ctnetlink_dump_tuples_ip(skb, tuple);
if (ret >= 0) {
- l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
+ l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
}
rcu_read_unlock();
@@ -182,7 +182,7 @@ static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
struct nlattr *nest_proto;
int ret;
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (!l4proto->to_nlattr)
return 0;
@@ -590,7 +590,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
len *= 3u; /* ORIG, REPLY, MASTER */
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
len += l4proto->nlattr_size;
if (l4proto->nlattr_tuple_size) {
len4 = l4proto->nlattr_tuple_size();
@@ -1059,7 +1059,7 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
rcu_read_lock();
- l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
+ l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
if (likely(l4proto->nlattr_to_tuple)) {
ret = nla_validate_nested(attr, CTA_PROTO_MAX,
@@ -1722,11 +1722,9 @@ static int ctnetlink_change_protoinfo(struct nf_conn *ct,
if (err < 0)
return err;
- rcu_read_lock();
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->from_nlattr)
err = l4proto->from_nlattr(tb, ct);
- rcu_read_unlock();
return err;
}
@@ -2676,8 +2674,8 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
rcu_read_lock();
ret = ctnetlink_dump_tuples_ip(skb, &m);
if (ret >= 0) {
- l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
- ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
+ l4proto = nf_ct_l4proto_find(tuple->dst.protonum);
+ ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
}
rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 11562f2a08bb..976f1dcb97f0 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -121,7 +121,7 @@ static void pptp_expectfn(struct nf_conn *ct,
struct nf_conntrack_expect *exp_other;
/* obviously this tuple inversion only works until you do NAT */
- nf_ct_invert_tuplepr(&inv_t, &exp->tuple);
+ nf_ct_invert_tuple(&inv_t, &exp->tuple);
pr_debug("trying to unexpect other dir: ");
nf_ct_dump_tuple(&inv_t);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 859f5d07a915..b9403a266a2e 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -43,40 +43,9 @@
extern unsigned int nf_conntrack_net_id;
-static struct nf_conntrack_l4proto __rcu *nf_ct_protos[MAX_NF_CT_PROTO + 1] __read_mostly;
-
static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
-static int
-nf_ct_register_sysctl(struct net *net,
- struct ctl_table_header **header,
- const char *path,
- struct ctl_table *table)
-{
- if (*header == NULL) {
- *header = register_net_sysctl(net, path, table);
- if (*header == NULL)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void
-nf_ct_unregister_sysctl(struct ctl_table_header **header,
- struct ctl_table **table,
- unsigned int users)
-{
- if (users > 0)
- return;
-
- unregister_net_sysctl_table(*header);
- kfree(*table);
- *header = NULL;
- *table = NULL;
-}
-
__printf(5, 6)
void nf_l4proto_log_invalid(const struct sk_buff *skb,
struct net *net,
@@ -124,295 +93,82 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid);
#endif
-const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto)
-{
- if (unlikely(l4proto >= ARRAY_SIZE(nf_ct_protos)))
- return &nf_conntrack_l4proto_generic;
-
- return rcu_dereference(nf_ct_protos[l4proto]);
-}
-EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
-
-const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4num)
-{
- const struct nf_conntrack_l4proto *p;
-
- rcu_read_lock();
- p = __nf_ct_l4proto_find(l4num);
- if (!try_module_get(p->me))
- p = &nf_conntrack_l4proto_generic;
- rcu_read_unlock();
-
- return p;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
-
-void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p)
-{
- module_put(p->me);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
-
-static int kill_l4proto(struct nf_conn *i, void *data)
-{
- const struct nf_conntrack_l4proto *l4proto;
- l4proto = data;
- return nf_ct_protonum(i) == l4proto->l4proto;
-}
-
-static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
- const struct nf_conntrack_l4proto *l4proto)
-{
- if (l4proto->get_net_proto) {
- /* statically built-in protocols use static per-net */
- return l4proto->get_net_proto(net);
- } else if (l4proto->net_id) {
- /* ... and loadable protocols use dynamic per-net */
- return net_generic(net, *l4proto->net_id);
- }
- return NULL;
-}
-
-static
-int nf_ct_l4proto_register_sysctl(struct net *net,
- struct nf_proto_net *pn)
-{
- int err = 0;
-
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table != NULL) {
- err = nf_ct_register_sysctl(net,
- &pn->ctl_table_header,
- "net/netfilter",
- pn->ctl_table);
- if (err < 0) {
- if (!pn->users) {
- kfree(pn->ctl_table);
- pn->ctl_table = NULL;
- }
- }
- }
-#endif /* CONFIG_SYSCTL */
- return err;
-}
-
-static
-void nf_ct_l4proto_unregister_sysctl(struct nf_proto_net *pn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table_header != NULL)
- nf_ct_unregister_sysctl(&pn->ctl_table_header,
- &pn->ctl_table,
- pn->users);
-#endif /* CONFIG_SYSCTL */
-}
-
-/* FIXME: Allow NULL functions and sub in pointers to generic for
- them. --RR */
-int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto)
-{
- int ret = 0;
-
- if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) ||
- (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
- return -EINVAL;
-
- mutex_lock(&nf_ct_proto_mutex);
- if (rcu_dereference_protected(
- nf_ct_protos[l4proto->l4proto],
- lockdep_is_held(&nf_ct_proto_mutex)
- ) != &nf_conntrack_l4proto_generic) {
- ret = -EBUSY;
- goto out_unlock;
- }
-
- rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], l4proto);
-out_unlock:
- mutex_unlock(&nf_ct_proto_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_register_one);
-
-int nf_ct_l4proto_pernet_register_one(struct net *net,
- const struct nf_conntrack_l4proto *l4proto)
-{
- int ret = 0;
- struct nf_proto_net *pn = NULL;
-
- if (l4proto->init_net) {
- ret = l4proto->init_net(net);
- if (ret < 0)
- goto out;
- }
-
- pn = nf_ct_l4proto_net(net, l4proto);
- if (pn == NULL)
- goto out;
-
- ret = nf_ct_l4proto_register_sysctl(net, pn);
- if (ret < 0)
- goto out;
-
- pn->users++;
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one);
-
-static void __nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto)
-
-{
- BUG_ON(l4proto->l4proto >= ARRAY_SIZE(nf_ct_protos));
-
- BUG_ON(rcu_dereference_protected(
- nf_ct_protos[l4proto->l4proto],
- lockdep_is_held(&nf_ct_proto_mutex)
- ) != l4proto);
- rcu_assign_pointer(nf_ct_protos[l4proto->l4proto],
- &nf_conntrack_l4proto_generic);
-}
-
-void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto)
-{
- mutex_lock(&nf_ct_proto_mutex);
- __nf_ct_l4proto_unregister_one(l4proto);
- mutex_unlock(&nf_ct_proto_mutex);
-
- synchronize_net();
- /* Remove all contrack entries for this protocol */
- nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister_one);
-
-void nf_ct_l4proto_pernet_unregister_one(struct net *net,
- const struct nf_conntrack_l4proto *l4proto)
-{
- struct nf_proto_net *pn = nf_ct_l4proto_net(net, l4proto);
-
- if (pn == NULL)
- return;
-
- pn->users--;
- nf_ct_l4proto_unregister_sysctl(pn);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
-
-static void
-nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
- unsigned int num_proto)
-{
- int i;
-
- mutex_lock(&nf_ct_proto_mutex);
- for (i = 0; i < num_proto; i++)
- __nf_ct_l4proto_unregister_one(l4proto[i]);
- mutex_unlock(&nf_ct_proto_mutex);
-
- synchronize_net();
-
- for (i = 0; i < num_proto; i++)
- nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto[i]);
-}
-
-static int
-nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
- unsigned int num_proto)
-{
- int ret = -EINVAL;
- unsigned int i;
-
- for (i = 0; i < num_proto; i++) {
- ret = nf_ct_l4proto_register_one(l4proto[i]);
- if (ret < 0)
- break;
- }
- if (i != num_proto) {
- pr_err("nf_conntrack: can't register l4 %d proto.\n",
- l4proto[i]->l4proto);
- nf_ct_l4proto_unregister(l4proto, i);
- }
- return ret;
-}
-
-int nf_ct_l4proto_pernet_register(struct net *net,
- const struct nf_conntrack_l4proto *const l4proto[],
- unsigned int num_proto)
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto)
{
- int ret = -EINVAL;
- unsigned int i;
-
- for (i = 0; i < num_proto; i++) {
- ret = nf_ct_l4proto_pernet_register_one(net, l4proto[i]);
- if (ret < 0)
- break;
- }
- if (i != num_proto) {
- pr_err("nf_conntrack %d: pernet registration failed\n",
- l4proto[i]->l4proto);
- nf_ct_l4proto_pernet_unregister(net, l4proto, i);
+ switch (l4proto) {
+ case IPPROTO_UDP: return &nf_conntrack_l4proto_udp;
+ case IPPROTO_TCP: return &nf_conntrack_l4proto_tcp;
+ case IPPROTO_ICMP: return &nf_conntrack_l4proto_icmp;
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP: return &nf_conntrack_l4proto_dccp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP: return &nf_conntrack_l4proto_sctp;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE: return &nf_conntrack_l4proto_udplite;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE: return &nf_conntrack_l4proto_gre;
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6: return &nf_conntrack_l4proto_icmpv6;
+#endif /* CONFIG_IPV6 */
}
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
-void nf_ct_l4proto_pernet_unregister(struct net *net,
- const struct nf_conntrack_l4proto *const l4proto[],
- unsigned int num_proto)
-{
- while (num_proto-- != 0)
- nf_ct_l4proto_pernet_unregister_one(net, l4proto[num_proto]);
-}
-EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
+ return &nf_conntrack_l4proto_generic;
+};
+EXPORT_SYMBOL_GPL(nf_ct_l4proto_find);
-static unsigned int ipv4_helper(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+static unsigned int nf_confirm(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
const struct nf_conn_help *help;
- const struct nf_conntrack_helper *helper;
-
- /* This is where we call the helper: as the packet goes out. */
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
+ if (help) {
+ const struct nf_conntrack_helper *helper;
+ int ret;
+
+ /* rcu_read_lock()ed by nf_hook_thresh */
+ helper = rcu_dereference(help->helper);
+ if (helper) {
+ ret = helper->help(skb,
+ protoff,
+ ct, ctinfo);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+ }
- /* rcu_read_lock()ed by nf_hook_thresh */
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_is_loopback_packet(skb)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+ return NF_DROP;
+ }
+ }
- return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
- ct, ctinfo);
+ /* We've seen it coming out the other side: confirm it */
+ return nf_conntrack_confirm(skb);
}
static unsigned int ipv4_confirm(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return nf_conntrack_confirm(skb);
- /* adjust seqs for loopback traffic only in outgoing direction */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_is_loopback_packet(skb)) {
- if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
- NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
- return NF_DROP;
- }
- }
-out:
- /* We've seen it coming out the other side: confirm it */
- return nf_conntrack_confirm(skb);
+ return nf_confirm(skb,
+ skb_network_offset(skb) + ip_hdrlen(skb),
+ ct, ctinfo);
}
static unsigned int ipv4_conntrack_in(void *priv,
@@ -461,24 +217,12 @@ static const struct nf_hook_ops ipv4_conntrack_ops[] = {
.priority = NF_IP_PRI_CONNTRACK,
},
{
- .hook = ipv4_helper,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
- .hook = ipv4_helper,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
@@ -623,31 +367,21 @@ static unsigned int ipv6_confirm(void *priv,
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned char pnum = ipv6_hdr(skb)->nexthdr;
- int protoff;
__be16 frag_off;
+ int protoff;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return nf_conntrack_confirm(skb);
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
&frag_off);
if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
pr_debug("proto header not found\n");
- goto out;
+ return nf_conntrack_confirm(skb);
}
- /* adjust seqs for loopback traffic only in outgoing direction */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_is_loopback_packet(skb)) {
- if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
- NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
- return NF_DROP;
- }
- }
-out:
- /* We've seen it coming out the other side: confirm it */
- return nf_conntrack_confirm(skb);
+ return nf_confirm(skb, protoff, ct, ctinfo);
}
static unsigned int ipv6_conntrack_in(void *priv,
@@ -664,42 +398,6 @@ static unsigned int ipv6_conntrack_local(void *priv,
return nf_conntrack_in(skb, state);
}
-static unsigned int ipv6_helper(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
-{
- struct nf_conn *ct;
- const struct nf_conn_help *help;
- const struct nf_conntrack_helper *helper;
- enum ip_conntrack_info ctinfo;
- __be16 frag_off;
- int protoff;
- u8 nexthdr;
-
- /* This is where we call the helper: as the packet goes out. */
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
-
- help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
- /* rcu_read_lock()ed by nf_hook_thresh */
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
-
- nexthdr = ipv6_hdr(skb)->nexthdr;
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
- &frag_off);
- if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
- pr_debug("proto header not found\n");
- return NF_ACCEPT;
- }
-
- return helper->help(skb, protoff, ct, ctinfo);
-}
-
static const struct nf_hook_ops ipv6_conntrack_ops[] = {
{
.hook = ipv6_conntrack_in,
@@ -714,24 +412,12 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
.priority = NF_IP6_PRI_CONNTRACK,
},
{
- .hook = ipv6_helper,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP6_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv6_confirm,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_LAST,
},
{
- .hook = ipv6_helper,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_CONNTRACK_HELPER,
- },
- {
.hook = ipv6_confirm,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
@@ -874,27 +560,9 @@ void nf_ct_netns_put(struct net *net, uint8_t nfproto)
}
EXPORT_SYMBOL_GPL(nf_ct_netns_put);
-static const struct nf_conntrack_l4proto * const builtin_l4proto[] = {
- &nf_conntrack_l4proto_tcp,
- &nf_conntrack_l4proto_udp,
- &nf_conntrack_l4proto_icmp,
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- &nf_conntrack_l4proto_dccp,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
- &nf_conntrack_l4proto_sctp,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
- &nf_conntrack_l4proto_udplite,
-#endif
-#if IS_ENABLED(CONFIG_IPV6)
- &nf_conntrack_l4proto_icmpv6,
-#endif /* CONFIG_IPV6 */
-};
-
int nf_conntrack_proto_init(void)
{
- int ret = 0, i;
+ int ret;
ret = nf_register_sockopt(&so_getorigdst);
if (ret < 0)
@@ -906,18 +574,8 @@ int nf_conntrack_proto_init(void)
goto cleanup_sockopt;
#endif
- for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
- RCU_INIT_POINTER(nf_ct_protos[i],
- &nf_conntrack_l4proto_generic);
-
- ret = nf_ct_l4proto_register(builtin_l4proto,
- ARRAY_SIZE(builtin_l4proto));
- if (ret < 0)
- goto cleanup_sockopt2;
-
return ret;
-cleanup_sockopt2:
- nf_unregister_sockopt(&so_getorigdst);
+
#if IS_ENABLED(CONFIG_IPV6)
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst6);
@@ -933,43 +591,33 @@ void nf_conntrack_proto_fini(void)
#endif
}
-int nf_conntrack_proto_pernet_init(struct net *net)
+void nf_conntrack_proto_pernet_init(struct net *net)
{
- int err;
- struct nf_proto_net *pn = nf_ct_l4proto_net(net,
- &nf_conntrack_l4proto_generic);
-
- err = nf_conntrack_l4proto_generic.init_net(net);
- if (err < 0)
- return err;
- err = nf_ct_l4proto_register_sysctl(net,
- pn);
- if (err < 0)
- return err;
-
- err = nf_ct_l4proto_pernet_register(net, builtin_l4proto,
- ARRAY_SIZE(builtin_l4proto));
- if (err < 0) {
- nf_ct_l4proto_unregister_sysctl(pn);
- return err;
- }
-
- pn->users++;
- return 0;
+ nf_conntrack_generic_init_net(net);
+ nf_conntrack_udp_init_net(net);
+ nf_conntrack_tcp_init_net(net);
+ nf_conntrack_icmp_init_net(net);
+#if IS_ENABLED(CONFIG_IPV6)
+ nf_conntrack_icmpv6_init_net(net);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ nf_conntrack_dccp_init_net(net);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ nf_conntrack_sctp_init_net(net);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ nf_conntrack_gre_init_net(net);
+#endif
}
void nf_conntrack_proto_pernet_fini(struct net *net)
{
- struct nf_proto_net *pn = nf_ct_l4proto_net(net,
- &nf_conntrack_l4proto_generic);
-
- nf_ct_l4proto_pernet_unregister(net, builtin_l4proto,
- ARRAY_SIZE(builtin_l4proto));
- pn->users--;
- nf_ct_l4proto_unregister_sysctl(pn);
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ nf_ct_gre_keymap_flush(net);
+#endif
}
-
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 023c1445bc39..6fca80587505 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -472,9 +472,10 @@ out_invalid:
return true;
}
-static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
- unsigned int dataoff, enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
@@ -723,123 +724,28 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-/* template, data assigned later */
-static struct ctl_table dccp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_dccp_timeout_request",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_respond",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_partopen",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_open",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_closereq",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_closing",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_timeout_timewait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_dccp_loose",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
- struct nf_dccp_net *dn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
-
- pn->ctl_table = kmemdup(dccp_sysctl_table,
- sizeof(dccp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
- pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
- pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
- pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
- pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
- pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
- pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
- pn->ctl_table[7].data = &dn->dccp_loose;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- pn->ctl_table[0].procname = NULL;
-#endif
- return 0;
-}
-
-static int dccp_init_net(struct net *net)
+void nf_conntrack_dccp_init_net(struct net *net)
{
struct nf_dccp_net *dn = nf_dccp_pernet(net);
- struct nf_proto_net *pn = &dn->pn;
-
- if (!pn->users) {
- /* default values */
- dn->dccp_loose = 1;
- dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
-
- /* timeouts[0] is unused, make it same as SYN_SENT so
- * ->timeouts[0] contains 'new' timeout, like udp or icmp.
- */
- dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
- }
- return dccp_kmemdup_sysctl_table(net, pn, dn);
-}
-
-static struct nf_proto_net *dccp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.dccp.pn;
+ /* default values */
+ dn->dccp_loose = 1;
+ dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
+
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
.l4proto = IPPROTO_DCCP,
- .packet = dccp_packet,
.can_early_drop = dccp_can_early_drop,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = dccp_print_conntrack,
@@ -862,6 +768,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = dccp_init_net,
- .get_net_proto = dccp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 5da19d5fbc76..0f526fafecae 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -15,50 +15,6 @@
static const unsigned int nf_ct_generic_timeout = 600*HZ;
-static bool nf_generic_should_process(u8 proto)
-{
- switch (proto) {
-#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
- case IPPROTO_GRE:
- return false;
-#endif
- default:
- return true;
- }
-}
-
-static bool generic_pkt_to_tuple(const struct sk_buff *skb,
- unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
-{
- tuple->src.u.all = 0;
- tuple->dst.u.all = 0;
-
- return true;
-}
-
-/* Returns verdict for packet, or -1 for invalid. */
-static int generic_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
-{
- const unsigned int *timeout = nf_ct_timeout_lookup(ct);
-
- if (!nf_generic_should_process(nf_ct_protonum(ct))) {
- pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n",
- nf_ct_protonum(ct));
- return -NF_ACCEPT;
- }
-
- if (!timeout)
- timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
-
- nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
- return NF_ACCEPT;
-}
-
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
@@ -104,53 +60,16 @@ generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table generic_sysctl_table[] = {
- {
- .procname = "nf_conntrack_generic_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_generic_net *gn)
-{
-#ifdef CONFIG_SYSCTL
- pn->ctl_table = kmemdup(generic_sysctl_table,
- sizeof(generic_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &gn->timeout;
-#endif
- return 0;
-}
-
-static int generic_init_net(struct net *net)
+void nf_conntrack_generic_init_net(struct net *net)
{
struct nf_generic_net *gn = nf_generic_pernet(net);
- struct nf_proto_net *pn = &gn->pn;
gn->timeout = nf_ct_generic_timeout;
-
- return generic_kmemdup_sysctl_table(pn, gn);
-}
-
-static struct nf_proto_net *generic_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.generic.pn;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
{
.l4proto = 255,
- .pkt_to_tuple = generic_pkt_to_tuple,
- .packet = generic_packet,
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
@@ -160,6 +79,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
.nla_policy = generic_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = generic_init_net,
- .get_net_proto = generic_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 8899b51aad44..ee9ab10a32e4 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -48,24 +48,25 @@ static const unsigned int gre_timeouts[GRE_CT_MAX] = {
[GRE_CT_REPLIED] = 180*HZ,
};
-static unsigned int proto_gre_net_id __read_mostly;
+/* used when expectation is added */
+static DEFINE_SPINLOCK(keymap_lock);
-static inline struct netns_proto_gre *gre_pernet(struct net *net)
+static inline struct nf_gre_net *gre_pernet(struct net *net)
{
- return net_generic(net, proto_gre_net_id);
+ return &net->ct.nf_ct_proto.gre;
}
-static void nf_ct_gre_keymap_flush(struct net *net)
+void nf_ct_gre_keymap_flush(struct net *net)
{
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km, *tmp;
- write_lock_bh(&net_gre->keymap_lock);
+ spin_lock_bh(&keymap_lock);
list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) {
- list_del(&km->list);
- kfree(km);
+ list_del_rcu(&km->list);
+ kfree_rcu(km, rcu);
}
- write_unlock_bh(&net_gre->keymap_lock);
+ spin_unlock_bh(&keymap_lock);
}
static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
@@ -81,18 +82,16 @@ static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
/* look up the source key for a given tuple */
static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
{
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km;
__be16 key = 0;
- read_lock_bh(&net_gre->keymap_lock);
- list_for_each_entry(km, &net_gre->keymap_list, list) {
+ list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
if (gre_key_cmpfn(km, t)) {
key = km->tuple.src.u.gre.key;
break;
}
}
- read_unlock_bh(&net_gre->keymap_lock);
pr_debug("lookup src key 0x%x for ", key);
nf_ct_dump_tuple(t);
@@ -105,21 +104,17 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t)
{
struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_ct_gre_keymap **kmp, *km;
kmp = &ct_pptp_info->keymap[dir];
if (*kmp) {
/* check whether it's a retransmission */
- read_lock_bh(&net_gre->keymap_lock);
- list_for_each_entry(km, &net_gre->keymap_list, list) {
- if (gre_key_cmpfn(km, t) && km == *kmp) {
- read_unlock_bh(&net_gre->keymap_lock);
+ list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
+ if (gre_key_cmpfn(km, t) && km == *kmp)
return 0;
- }
}
- read_unlock_bh(&net_gre->keymap_lock);
pr_debug("trying to override keymap_%s for ct %p\n",
dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
return -EEXIST;
@@ -134,9 +129,9 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
pr_debug("adding new entry %p: ", km);
nf_ct_dump_tuple(&km->tuple);
- write_lock_bh(&net_gre->keymap_lock);
+ spin_lock_bh(&keymap_lock);
list_add_tail(&km->list, &net_gre->keymap_list);
- write_unlock_bh(&net_gre->keymap_lock);
+ spin_unlock_bh(&keymap_lock);
return 0;
}
@@ -145,32 +140,30 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
/* destroy the keymap entries associated with specified master ct */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
{
- struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = gre_pernet(net);
struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
enum ip_conntrack_dir dir;
pr_debug("entering for ct %p\n", ct);
- write_lock_bh(&net_gre->keymap_lock);
+ spin_lock_bh(&keymap_lock);
for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
if (ct_pptp_info->keymap[dir]) {
pr_debug("removing %p from list\n",
ct_pptp_info->keymap[dir]);
- list_del(&ct_pptp_info->keymap[dir]->list);
- kfree(ct_pptp_info->keymap[dir]);
+ list_del_rcu(&ct_pptp_info->keymap[dir]->list);
+ kfree_rcu(ct_pptp_info->keymap[dir], rcu);
ct_pptp_info->keymap[dir] = NULL;
}
}
- write_unlock_bh(&net_gre->keymap_lock);
+ spin_unlock_bh(&keymap_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
/* gre hdr info to tuple */
-static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
+bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+ struct net *net, struct nf_conntrack_tuple *tuple)
{
const struct pptp_gre_header *pgrehdr;
struct pptp_gre_header _pgrehdr;
@@ -216,15 +209,15 @@ static void gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
static unsigned int *gre_get_timeouts(struct net *net)
{
- return gre_pernet(net)->gre_timeouts;
+ return gre_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntrack */
-static int gre_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_gre_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
if (state->pf != NFPROTO_IPV4)
return -NF_ACCEPT;
@@ -256,19 +249,6 @@ static int gre_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Called when a conntrack entry has already been removed from the hashes
- * and is about to be deleted from memory */
-static void gre_destroy(struct nf_conn *ct)
-{
- struct nf_conn *master = ct->master;
- pr_debug(" entering\n");
-
- if (!master)
- pr_debug("no master !?!\n");
- else
- nf_ct_gre_keymap_destroy(master);
-}
-
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
@@ -278,13 +258,13 @@ static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_gre_net *net_gre = gre_pernet(net);
if (!timeouts)
timeouts = gre_get_timeouts(net);
/* set default timeouts for GRE. */
- timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED];
- timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED];
+ timeouts[GRE_CT_UNREPLIED] = net_gre->timeouts[GRE_CT_UNREPLIED];
+ timeouts[GRE_CT_REPLIED] = net_gre->timeouts[GRE_CT_REPLIED];
if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
timeouts[GRE_CT_UNREPLIED] =
@@ -320,69 +300,22 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table gre_sysctl_table[] = {
- {
- .procname = "nf_conntrack_gre_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_gre_timeout_stream",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {}
-};
-#endif
-
-static int gre_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *nf,
- struct netns_proto_gre *net_gre)
-{
-#ifdef CONFIG_SYSCTL
- int i;
-
- if (nf->ctl_table)
- return 0;
-
- nf->ctl_table = kmemdup(gre_sysctl_table,
- sizeof(gre_sysctl_table),
- GFP_KERNEL);
- if (!nf->ctl_table)
- return -ENOMEM;
-
- for (i = 0; i < GRE_CT_MAX; i++)
- nf->ctl_table[i].data = &net_gre->gre_timeouts[i];
-#endif
- return 0;
-}
-
-static int gre_init_net(struct net *net)
+void nf_conntrack_gre_init_net(struct net *net)
{
- struct netns_proto_gre *net_gre = gre_pernet(net);
- struct nf_proto_net *nf = &net_gre->nf;
+ struct nf_gre_net *net_gre = gre_pernet(net);
int i;
- rwlock_init(&net_gre->keymap_lock);
INIT_LIST_HEAD(&net_gre->keymap_list);
for (i = 0; i < GRE_CT_MAX; i++)
- net_gre->gre_timeouts[i] = gre_timeouts[i];
-
- return gre_kmemdup_sysctl_table(net, nf, net_gre);
+ net_gre->timeouts[i] = gre_timeouts[i];
}
/* protocol helper struct */
-static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre = {
.l4proto = IPPROTO_GRE,
- .pkt_to_tuple = gre_pkt_to_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = gre_print_conntrack,
#endif
- .packet = gre_packet,
- .destroy = gre_destroy,
- .me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
@@ -398,61 +331,4 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.nla_policy = gre_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .net_id = &proto_gre_net_id,
- .init_net = gre_init_net,
};
-
-static int proto_gre_net_init(struct net *net)
-{
- int ret = 0;
-
- ret = nf_ct_l4proto_pernet_register_one(net,
- &nf_conntrack_l4proto_gre4);
- if (ret < 0)
- pr_err("nf_conntrack_gre4: pernet registration failed.\n");
- return ret;
-}
-
-static void proto_gre_net_exit(struct net *net)
-{
- nf_ct_l4proto_pernet_unregister_one(net, &nf_conntrack_l4proto_gre4);
- nf_ct_gre_keymap_flush(net);
-}
-
-static struct pernet_operations proto_gre_net_ops = {
- .init = proto_gre_net_init,
- .exit = proto_gre_net_exit,
- .id = &proto_gre_net_id,
- .size = sizeof(struct netns_proto_gre),
-};
-
-static int __init nf_ct_proto_gre_init(void)
-{
- int ret;
-
- BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
-
- ret = register_pernet_subsys(&proto_gre_net_ops);
- if (ret < 0)
- goto out_pernet;
- ret = nf_ct_l4proto_register_one(&nf_conntrack_l4proto_gre4);
- if (ret < 0)
- goto out_gre4;
-
- return 0;
-out_gre4:
- unregister_pernet_subsys(&proto_gre_net_ops);
-out_pernet:
- return ret;
-}
-
-static void __exit nf_ct_proto_gre_fini(void)
-{
- nf_ct_l4proto_unregister_one(&nf_conntrack_l4proto_gre4);
- unregister_pernet_subsys(&proto_gre_net_ops);
-}
-
-module_init(nf_ct_proto_gre_init);
-module_exit(nf_ct_proto_gre_fini);
-
-MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index de64d8a5fdfd..7df477996b16 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -25,8 +25,8 @@
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
-static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
- struct net *net, struct nf_conntrack_tuple *tuple)
+bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+ struct net *net, struct nf_conntrack_tuple *tuple)
{
const struct icmphdr *hp;
struct icmphdr _hdr;
@@ -54,8 +54,8 @@ static const u_int8_t invmap[] = {
[ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1
};
-static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
+bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
{
if (orig->dst.u.icmp.type >= sizeof(invmap) ||
!invmap[orig->dst.u.icmp.type])
@@ -68,11 +68,10 @@ static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
}
/* Returns verdict for packet, or -1 for invalid. */
-static int icmp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_icmp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
@@ -110,7 +109,6 @@ icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conntrack_tuple innertuple, origtuple;
- const struct nf_conntrack_l4proto *innerproto;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_zone *zone;
enum ip_conntrack_info ctinfo;
@@ -128,12 +126,9 @@ icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
return -NF_ACCEPT;
}
- /* rcu_read_lock()ed by nf_hook_thresh */
- innerproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
-
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&innertuple, &origtuple, innerproto)) {
+ if (!nf_ct_invert_tuple(&innertuple, &origtuple)) {
pr_debug("icmp_error_message: no match\n");
return -NF_ACCEPT;
}
@@ -303,56 +298,16 @@ icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table icmp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_icmp_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_icmp_net *in)
-{
-#ifdef CONFIG_SYSCTL
- pn->ctl_table = kmemdup(icmp_sysctl_table,
- sizeof(icmp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &in->timeout;
-#endif
- return 0;
-}
-
-static int icmp_init_net(struct net *net)
+void nf_conntrack_icmp_init_net(struct net *net)
{
struct nf_icmp_net *in = nf_icmp_pernet(net);
- struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmp_timeout;
-
- return icmp_kmemdup_sysctl_table(pn, in);
-}
-
-static struct nf_proto_net *icmp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmp.pn;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
{
.l4proto = IPPROTO_ICMP,
- .pkt_to_tuple = icmp_pkt_to_tuple,
- .invert_tuple = icmp_invert_tuple,
- .packet = icmp_packet,
- .destroy = NULL,
- .me = NULL,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmp_tuple_to_nlattr,
.nlattr_tuple_size = icmp_nlattr_tuple_size,
@@ -368,6 +323,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
.nla_policy = icmp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = icmp_init_net,
- .get_net_proto = icmp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index a15eefb8e317..bec4a3211658 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -30,10 +30,10 @@
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
-static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
- unsigned int dataoff,
- struct net *net,
- struct nf_conntrack_tuple *tuple)
+bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple)
{
const struct icmp6hdr *hp;
struct icmp6hdr _hdr;
@@ -67,8 +67,8 @@ static const u_int8_t noct_valid_new[] = {
[ICMPV6_MLD2_REPORT - 130] = 1
};
-static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *orig)
+bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
{
int type = orig->dst.u.icmp.type - 128;
if (type < 0 || type >= sizeof(invmap) || !invmap[type])
@@ -86,11 +86,10 @@ static unsigned int *icmpv6_get_timeouts(struct net *net)
}
/* Returns verdict for packet, or -1 for invalid. */
-static int icmpv6_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeout = nf_ct_timeout_lookup(ct);
static const u8 valid_new[] = {
@@ -131,7 +130,6 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
{
struct nf_conntrack_tuple intuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
- const struct nf_conntrack_l4proto *inproto;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
@@ -147,12 +145,9 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
return -NF_ACCEPT;
}
- /* rcu_read_lock()ed by nf_hook_thresh */
- inproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
-
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
- if (!nf_ct_invert_tuple(&intuple, &origtuple, inproto)) {
+ if (!nf_ct_invert_tuple(&intuple, &origtuple)) {
pr_debug("icmpv6_error: Can't invert tuple\n");
return -NF_ACCEPT;
}
@@ -314,54 +309,16 @@ icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table icmpv6_sysctl_table[] = {
- {
- .procname = "nf_conntrack_icmpv6_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_icmp_net *in)
-{
-#ifdef CONFIG_SYSCTL
- pn->ctl_table = kmemdup(icmpv6_sysctl_table,
- sizeof(icmpv6_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &in->timeout;
-#endif
- return 0;
-}
-
-static int icmpv6_init_net(struct net *net)
+void nf_conntrack_icmpv6_init_net(struct net *net)
{
struct nf_icmp_net *in = nf_icmpv6_pernet(net);
- struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmpv6_timeout;
-
- return icmpv6_kmemdup_sysctl_table(pn, in);
-}
-
-static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmpv6.pn;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
{
.l4proto = IPPROTO_ICMPV6,
- .pkt_to_tuple = icmpv6_pkt_to_tuple,
- .invert_tuple = icmpv6_invert_tuple,
- .packet = icmpv6_packet,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmpv6_tuple_to_nlattr,
.nlattr_tuple_size = icmpv6_nlattr_tuple_size,
@@ -377,6 +334,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
.nla_policy = icmpv6_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = icmpv6_init_net,
- .get_net_proto = icmpv6_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index d53e3e78f605..a7818101ad80 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -357,11 +357,11 @@ out_invalid:
}
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
-static int sctp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -642,116 +642,18 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table sctp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_sctp_timeout_closed",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_cookie_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_cookie_echoed",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_established",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_shutdown_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_shutdown_recd",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_heartbeat_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif
-
-static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_sctp_net *sn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
-
- pn->ctl_table = kmemdup(sctp_sysctl_table,
- sizeof(sctp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
- pn->ctl_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
- pn->ctl_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
- pn->ctl_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
- pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
- pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
- pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
- pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT];
- pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED];
-#endif
- return 0;
-}
-
-static int sctp_init_net(struct net *net)
+void nf_conntrack_sctp_init_net(struct net *net)
{
struct nf_sctp_net *sn = nf_sctp_pernet(net);
- struct nf_proto_net *pn = &sn->pn;
-
- if (!pn->users) {
- int i;
-
- for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
- sn->timeouts[i] = sctp_timeouts[i];
-
- /* timeouts[0] is unused, init it so ->timeouts[0] contains
- * 'new' timeout, like udp or icmp.
- */
- sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
- }
+ int i;
- return sctp_kmemdup_sysctl_table(pn, sn);
-}
+ for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
+ sn->timeouts[i] = sctp_timeouts[i];
-static struct nf_proto_net *sctp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.sctp.pn;
+ /* timeouts[0] is unused, init it so ->timeouts[0] contains
+ * 'new' timeout, like udp or icmp.
+ */
+ sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
@@ -759,9 +661,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = sctp_print_conntrack,
#endif
- .packet = sctp_packet,
.can_early_drop = sctp_can_early_drop,
- .me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_size = SCTP_NLATTR_SIZE,
.to_nlattr = sctp_to_nlattr,
@@ -780,6 +680,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = sctp_init_net,
- .get_net_proto = sctp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 4dcbd51a8e97..a06875a466a4 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -828,12 +828,18 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
+static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
+{
+ return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
+ test_bit(IPS_ASSURED_BIT, &ct->status);
+}
+
/* Returns verdict for packet, or -1 for invalid. */
-static int tcp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
@@ -1030,16 +1036,38 @@ static int tcp_packet(struct nf_conn *ct,
new_state = TCP_CONNTRACK_ESTABLISHED;
break;
case TCP_CONNTRACK_CLOSE:
- if (index == TCP_RST_SET
- && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
- && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
- /* Invalid RST */
- spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
- return -NF_ACCEPT;
+ if (index != TCP_RST_SET)
+ break;
+
+ if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
+ u32 seq = ntohl(th->seq);
+
+ if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
+ /* Invalid RST */
+ spin_unlock_bh(&ct->lock);
+ nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
+ return -NF_ACCEPT;
+ }
+
+ if (!nf_conntrack_tcp_established(ct) ||
+ seq == ct->proto.tcp.seen[!dir].td_maxack)
+ break;
+
+ /* Check if rst is part of train, such as
+ * foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
+ * foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
+ */
+ if (ct->proto.tcp.last_index == TCP_ACK_SET &&
+ ct->proto.tcp.last_dir == dir &&
+ seq == ct->proto.tcp.last_end)
+ break;
+
+ /* ... RST sequence number doesn't match exactly, keep
+ * established state to allow a possible challenge ACK.
+ */
+ new_state = old_state;
}
- if (index == TCP_RST_SET
- && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
+ if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_SYN_SET)
|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_ACK_SET))
@@ -1055,7 +1083,7 @@ static int tcp_packet(struct nf_conn *ct,
* segments we ignored. */
goto in_window;
}
- /* Just fall through */
+ break;
default:
/* Keep compilers happy. */
break;
@@ -1090,6 +1118,8 @@ static int tcp_packet(struct nf_conn *ct,
if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
+ else if (unlikely(index == TCP_RST_SET))
+ timeout = timeouts[TCP_CONNTRACK_CLOSE];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
@@ -1387,146 +1417,21 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table tcp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_tcp_timeout_syn_sent",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_syn_recv",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_established",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_fin_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_close_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_last_ack",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_time_wait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_close",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_max_retrans",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_timeout_unacknowledged",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_tcp_loose",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "nf_conntrack_tcp_be_liberal",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "nf_conntrack_tcp_max_retrans",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_tcp_net *tn)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
-
- pn->ctl_table = kmemdup(tcp_sysctl_table,
- sizeof(tcp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
-
- pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
- pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
- pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
- pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
- pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
- pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
- pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
- pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
- pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
- pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
- pn->ctl_table[10].data = &tn->tcp_loose;
- pn->ctl_table[11].data = &tn->tcp_be_liberal;
- pn->ctl_table[12].data = &tn->tcp_max_retrans;
-#endif
- return 0;
-}
-
-static int tcp_init_net(struct net *net)
+void nf_conntrack_tcp_init_net(struct net *net)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
- struct nf_proto_net *pn = &tn->pn;
-
- if (!pn->users) {
- int i;
-
- for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
- tn->timeouts[i] = tcp_timeouts[i];
+ int i;
- /* timeouts[0] is unused, make it same as SYN_SENT so
- * ->timeouts[0] contains 'new' timeout, like udp or icmp.
- */
- tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
- tn->tcp_loose = nf_ct_tcp_loose;
- tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
- tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
- }
+ for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
+ tn->timeouts[i] = tcp_timeouts[i];
- return tcp_kmemdup_sysctl_table(pn, tn);
-}
-
-static struct nf_proto_net *tcp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.tcp.pn;
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
+ tn->tcp_loose = nf_ct_tcp_loose;
+ tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
+ tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
@@ -1535,7 +1440,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
- .packet = tcp_packet,
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
@@ -1556,6 +1460,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = tcp_init_net,
- .get_net_proto = tcp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index b4f5d5e82031..951366dfbec3 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -85,11 +85,11 @@ static bool udp_error(struct sk_buff *skb,
}
/* Returns verdict for packet, and may modify conntracktype */
-static int udp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_udp_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeouts;
@@ -177,11 +177,11 @@ static bool udplite_error(struct sk_buff *skb,
}
/* Returns verdict for packet, and may modify conntracktype */
-static int udplite_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
+int nf_conntrack_udplite_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeouts;
@@ -260,66 +260,19 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
-static struct ctl_table udp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_udp_timeout",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "nf_conntrack_udp_timeout_stream",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
- struct nf_udp_net *un)
-{
-#ifdef CONFIG_SYSCTL
- if (pn->ctl_table)
- return 0;
- pn->ctl_table = kmemdup(udp_sysctl_table,
- sizeof(udp_sysctl_table),
- GFP_KERNEL);
- if (!pn->ctl_table)
- return -ENOMEM;
- pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
- pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED];
-#endif
- return 0;
-}
-
-static int udp_init_net(struct net *net)
+void nf_conntrack_udp_init_net(struct net *net)
{
struct nf_udp_net *un = nf_udp_pernet(net);
- struct nf_proto_net *pn = &un->pn;
+ int i;
- if (!pn->users) {
- int i;
-
- for (i = 0; i < UDP_CT_MAX; i++)
- un->timeouts[i] = udp_timeouts[i];
- }
-
- return udp_kmemdup_sysctl_table(pn, un);
-}
-
-static struct nf_proto_net *udp_get_net_proto(struct net *net)
-{
- return &net->ct.nf_ct_proto.udp.pn;
+ for (i = 0; i < UDP_CT_MAX; i++)
+ un->timeouts[i] = udp_timeouts[i];
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
{
.l4proto = IPPROTO_UDP,
.allow_clash = true,
- .packet = udp_packet,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
@@ -335,8 +288,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = udp_init_net,
- .get_net_proto = udp_get_net_proto,
};
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
@@ -344,7 +295,6 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite =
{
.l4proto = IPPROTO_UDPLITE,
.allow_clash = true,
- .packet = udplite_packet,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
@@ -360,7 +310,5 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = udp_init_net,
- .get_net_proto = udp_get_net_proto,
};
#endif
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index c8d2b6688a2a..f067c6b50857 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -21,6 +21,8 @@
#include <linux/tcp.h>
#include <linux/netfilter.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
@@ -54,6 +56,11 @@ module_param(sip_direct_media, int, 0600);
MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
"endpoints only (default 1)");
+static int sip_external_media __read_mostly = 0;
+module_param(sip_external_media, int, 0600);
+MODULE_PARM_DESC(sip_external_media, "Expect Media streams between external "
+ "endpoints (default 0)");
+
const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
@@ -861,6 +868,41 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
if (!nf_inet_addr_cmp(daddr, &ct->tuplehash[dir].tuple.src.u3))
return NF_ACCEPT;
saddr = &ct->tuplehash[!dir].tuple.src.u3;
+ } else if (sip_external_media) {
+ struct net_device *dev = skb_dst(skb)->dev;
+ struct net *net = dev_net(dev);
+ struct rtable *rt;
+ struct flowi4 fl4 = {};
+#if IS_ENABLED(CONFIG_IPV6)
+ struct flowi6 fl6 = {};
+#endif
+ struct dst_entry *dst = NULL;
+
+ switch (nf_ct_l3num(ct)) {
+ case NFPROTO_IPV4:
+ fl4.daddr = daddr->ip;
+ rt = ip_route_output_key(net, &fl4);
+ if (!IS_ERR(rt))
+ dst = &rt->dst;
+ break;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6:
+ fl6.daddr = daddr->in6;
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ dst = NULL;
+ }
+ break;
+#endif
+ }
+
+ /* Don't predict any conntracks when media endpoint is reachable
+ * through the same interface as the signalling peer.
+ */
+ if (dst && dst->dev == dev)
+ return NF_ACCEPT;
}
/* We need to check whether the registration exists before attempting
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index b6177fd73304..c2ae14c720b4 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -24,6 +24,10 @@
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <linux/rculist_nulls.h>
+static bool enable_hooks __read_mostly;
+MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks");
+module_param(enable_hooks, bool, 0000);
+
unsigned int nf_conntrack_net_id __read_mostly;
#ifdef CONFIG_NF_CONNTRACK_PROCFS
@@ -310,8 +314,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (!net_eq(nf_ct_net(ct), net))
goto release;
- l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
- WARN_ON(!l4proto);
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
ret = -ENOSPC;
seq_printf(s, "%-8s %u %-8s %u ",
@@ -547,8 +550,55 @@ enum nf_ct_sysctl_index {
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
NF_SYSCTL_CT_TIMESTAMP,
#endif
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
+ NF_SYSCTL_CT_PROTO_TCP_LOOSE,
+ NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+ NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT,
+ NF_SYSCTL_CT_PROTO_DCCP_LOOSE,
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
+#endif
+
+ __NF_SYSCTL_CT_LAST_SYSCTL,
};
+#define NF_SYSCTL_CT_LAST_SYSCTL (__NF_SYSCTL_CT_LAST_SYSCTL + 1)
+
static struct ctl_table nf_ct_sysctl_table[] = {
[NF_SYSCTL_CT_MAX] = {
.procname = "nf_conntrack_max",
@@ -626,7 +676,235 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.proc_handler = proc_dointvec,
},
#endif
- { }
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC] = {
+ .procname = "nf_conntrack_generic_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT] = {
+ .procname = "nf_conntrack_tcp_timeout_syn_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV] = {
+ .procname = "nf_conntrack_tcp_timeout_syn_recv",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED] = {
+ .procname = "nf_conntrack_tcp_timeout_established",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT] = {
+ .procname = "nf_conntrack_tcp_timeout_fin_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT] = {
+ .procname = "nf_conntrack_tcp_timeout_close_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK] = {
+ .procname = "nf_conntrack_tcp_timeout_last_ack",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT] = {
+ .procname = "nf_conntrack_tcp_timeout_time_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE] = {
+ .procname = "nf_conntrack_tcp_timeout_close",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS] = {
+ .procname = "nf_conntrack_tcp_timeout_max_retrans",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK] = {
+ .procname = "nf_conntrack_tcp_timeout_unacknowledged",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
+ .procname = "nf_conntrack_tcp_loose",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ [NF_SYSCTL_CT_PROTO_TCP_LIBERAL] = {
+ .procname = "nf_conntrack_tcp_be_liberal",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ [NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = {
+ .procname = "nf_conntrack_tcp_max_retrans",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP] = {
+ .procname = "nf_conntrack_udp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM] = {
+ .procname = "nf_conntrack_udp_timeout_stream",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
+ .procname = "nf_conntrack_icmp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6] = {
+ .procname = "nf_conntrack_icmpv6_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED] = {
+ .procname = "nf_conntrack_sctp_timeout_closed",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT] = {
+ .procname = "nf_conntrack_sctp_timeout_cookie_wait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED] = {
+ .procname = "nf_conntrack_sctp_timeout_cookie_echoed",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED] = {
+ .procname = "nf_conntrack_sctp_timeout_established",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT] = {
+ .procname = "nf_conntrack_sctp_timeout_shutdown_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD] = {
+ .procname = "nf_conntrack_sctp_timeout_shutdown_recd",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = {
+ .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT] = {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
+ .procname = "nf_conntrack_dccp_timeout_request",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND] = {
+ .procname = "nf_conntrack_dccp_timeout_respond",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN] = {
+ .procname = "nf_conntrack_dccp_timeout_partopen",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN] = {
+ .procname = "nf_conntrack_dccp_timeout_open",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ] = {
+ .procname = "nf_conntrack_dccp_timeout_closereq",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING] = {
+ .procname = "nf_conntrack_dccp_timeout_closing",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT] = {
+ .procname = "nf_conntrack_dccp_timeout_timewait",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_DCCP_LOOSE] = {
+ .procname = "nf_conntrack_dccp_loose",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE] = {
+ .procname = "nf_conntrack_gre_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM] = {
+ .procname = "nf_conntrack_gre_timeout_stream",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
+ {}
};
static struct ctl_table nf_ct_netfilter_table[] = {
@@ -640,14 +918,103 @@ static struct ctl_table nf_ct_netfilter_table[] = {
{ }
};
+static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+#define XASSIGN(XNAME, tn) \
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ ## XNAME].data = \
+ &(tn)->timeouts[TCP_CONNTRACK_ ## XNAME]
+
+ XASSIGN(SYN_SENT, tn);
+ XASSIGN(SYN_RECV, tn);
+ XASSIGN(ESTABLISHED, tn);
+ XASSIGN(FIN_WAIT, tn);
+ XASSIGN(CLOSE_WAIT, tn);
+ XASSIGN(LAST_ACK, tn);
+ XASSIGN(TIME_WAIT, tn);
+ XASSIGN(CLOSE, tn);
+ XASSIGN(RETRANS, tn);
+ XASSIGN(UNACK, tn);
+#undef XASSIGN
+#define XASSIGN(XNAME, rval) \
+ table[NF_SYSCTL_CT_PROTO_TCP_ ## XNAME].data = (rval)
+
+ XASSIGN(LOOSE, &tn->tcp_loose);
+ XASSIGN(LIBERAL, &tn->tcp_be_liberal);
+ XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
+#undef XASSIGN
+}
+
+static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
+
+#define XASSIGN(XNAME, sn) \
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ ## XNAME].data = \
+ &(sn)->timeouts[SCTP_CONNTRACK_ ## XNAME]
+
+ XASSIGN(CLOSED, sn);
+ XASSIGN(COOKIE_WAIT, sn);
+ XASSIGN(COOKIE_ECHOED, sn);
+ XASSIGN(ESTABLISHED, sn);
+ XASSIGN(SHUTDOWN_SENT, sn);
+ XASSIGN(SHUTDOWN_RECD, sn);
+ XASSIGN(SHUTDOWN_ACK_SENT, sn);
+ XASSIGN(HEARTBEAT_SENT, sn);
+ XASSIGN(HEARTBEAT_ACKED, sn);
+#undef XASSIGN
+#endif
+}
+
+static void nf_conntrack_standalone_init_dccp_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
+
+#define XASSIGN(XNAME, dn) \
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_ ## XNAME].data = \
+ &(dn)->dccp_timeout[CT_DCCP_ ## XNAME]
+
+ XASSIGN(REQUEST, dn);
+ XASSIGN(RESPOND, dn);
+ XASSIGN(PARTOPEN, dn);
+ XASSIGN(OPEN, dn);
+ XASSIGN(CLOSEREQ, dn);
+ XASSIGN(CLOSING, dn);
+ XASSIGN(TIMEWAIT, dn);
+#undef XASSIGN
+
+ table[NF_SYSCTL_CT_PROTO_DCCP_LOOSE].data = &dn->dccp_loose;
+#endif
+}
+
+static void nf_conntrack_standalone_init_gre_sysctl(struct net *net,
+ struct ctl_table *table)
+{
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_gre_net *gn = nf_gre_pernet(net);
+
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE].data = &gn->timeouts[GRE_CT_UNREPLIED];
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM].data = &gn->timeouts[GRE_CT_REPLIED];
+#endif
+}
+
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
+ struct nf_udp_net *un = nf_udp_pernet(net);
struct ctl_table *table;
+ BUILD_BUG_ON(ARRAY_SIZE(nf_ct_sysctl_table) != NF_SYSCTL_CT_LAST_SYSCTL);
+
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
- goto out_kmemdup;
+ return -ENOMEM;
table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
@@ -655,6 +1022,16 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
#endif
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
+
+ nf_conntrack_standalone_init_tcp_sysctl(net, table);
+ nf_conntrack_standalone_init_sctp_sysctl(net, table);
+ nf_conntrack_standalone_init_dccp_sysctl(net, table);
+ nf_conntrack_standalone_init_gre_sysctl(net, table);
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
@@ -680,7 +1057,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
out_unregister_netfilter:
kfree(table);
-out_kmemdup:
return -ENOMEM;
}
@@ -703,31 +1079,47 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net)
}
#endif /* CONFIG_SYSCTL */
+static void nf_conntrack_fini_net(struct net *net)
+{
+ if (enable_hooks)
+ nf_ct_netns_put(net, NFPROTO_INET);
+
+ nf_conntrack_standalone_fini_proc(net);
+ nf_conntrack_standalone_fini_sysctl(net);
+}
+
static int nf_conntrack_pernet_init(struct net *net)
{
int ret;
- ret = nf_conntrack_init_net(net);
+ net->ct.sysctl_checksum = 1;
+
+ ret = nf_conntrack_standalone_init_sysctl(net);
if (ret < 0)
- goto out_init;
+ return ret;
ret = nf_conntrack_standalone_init_proc(net);
if (ret < 0)
goto out_proc;
- net->ct.sysctl_checksum = 1;
- net->ct.sysctl_log_invalid = 0;
- ret = nf_conntrack_standalone_init_sysctl(net);
+ ret = nf_conntrack_init_net(net);
if (ret < 0)
- goto out_sysctl;
+ goto out_init_net;
+
+ if (enable_hooks) {
+ ret = nf_ct_netns_get(net, NFPROTO_INET);
+ if (ret < 0)
+ goto out_hooks;
+ }
return 0;
-out_sysctl:
+out_hooks:
+ nf_conntrack_cleanup_net(net);
+out_init_net:
nf_conntrack_standalone_fini_proc(net);
out_proc:
- nf_conntrack_cleanup_net(net);
-out_init:
+ nf_conntrack_standalone_fini_sysctl(net);
return ret;
}
@@ -735,10 +1127,9 @@ static void nf_conntrack_pernet_exit(struct list_head *net_exit_list)
{
struct net *net;
- list_for_each_entry(net, net_exit_list, exit_list) {
- nf_conntrack_standalone_fini_sysctl(net);
- nf_conntrack_standalone_fini_proc(net);
- }
+ list_for_each_entry(net, net_exit_list, exit_list)
+ nf_conntrack_fini_net(net);
+
nf_conntrack_cleanup_net_list(net_exit_list);
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index fa0844e2a68d..7aabfd4b1e50 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
{
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
struct dst_entry *dst = route->tuple[dir].dst;
ft->dir = dir;
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port;
- ft->iifidx = route->tuple[dir].ifindex;
- ft->oifidx = route->tuple[!dir].ifindex;
+ ft->iifidx = other_dst->dev->ifindex;
+ ft->oifidx = dst->dev->ifindex;
ft->dst_cache = dst;
}
@@ -120,7 +121,7 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
if (l4num == IPPROTO_TCP)
flow_offload_fixup_tcp(&ct->proto.tcp);
- l4proto = __nf_ct_l4proto_find(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
if (!l4proto)
return;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index d159e9e7835b..af7dc6537758 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -22,8 +22,6 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
@@ -35,8 +33,6 @@
static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
static DEFINE_MUTEX(nf_nat_proto_mutex);
-static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
- __read_mostly;
static unsigned int nat_net_id __read_mostly;
static struct hlist_head *nf_nat_bysource __read_mostly;
@@ -58,16 +54,75 @@ struct nat_net {
struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
};
-inline const struct nf_nat_l3proto *
-__nf_nat_l3proto_find(u8 family)
+#ifdef CONFIG_XFRM
+static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
+ const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ unsigned long statusbit,
+ struct flowi *fl)
{
- return rcu_dereference(nf_nat_l3protos[family]);
+ const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
+ struct flowi4 *fl4 = &fl->u.ip4;
+
+ if (ct->status & statusbit) {
+ fl4->daddr = t->dst.u3.ip;
+ if (t->dst.protonum == IPPROTO_TCP ||
+ t->dst.protonum == IPPROTO_UDP ||
+ t->dst.protonum == IPPROTO_UDPLITE ||
+ t->dst.protonum == IPPROTO_DCCP ||
+ t->dst.protonum == IPPROTO_SCTP)
+ fl4->fl4_dport = t->dst.u.all;
+ }
+
+ statusbit ^= IPS_NAT_MASK;
+
+ if (ct->status & statusbit) {
+ fl4->saddr = t->src.u3.ip;
+ if (t->dst.protonum == IPPROTO_TCP ||
+ t->dst.protonum == IPPROTO_UDP ||
+ t->dst.protonum == IPPROTO_UDPLITE ||
+ t->dst.protonum == IPPROTO_DCCP ||
+ t->dst.protonum == IPPROTO_SCTP)
+ fl4->fl4_sport = t->src.u.all;
+ }
+}
+
+static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
+ const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ unsigned long statusbit,
+ struct flowi *fl)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
+ struct flowi6 *fl6 = &fl->u.ip6;
+
+ if (ct->status & statusbit) {
+ fl6->daddr = t->dst.u3.in6;
+ if (t->dst.protonum == IPPROTO_TCP ||
+ t->dst.protonum == IPPROTO_UDP ||
+ t->dst.protonum == IPPROTO_UDPLITE ||
+ t->dst.protonum == IPPROTO_DCCP ||
+ t->dst.protonum == IPPROTO_SCTP)
+ fl6->fl6_dport = t->dst.u.all;
+ }
+
+ statusbit ^= IPS_NAT_MASK;
+
+ if (ct->status & statusbit) {
+ fl6->saddr = t->src.u3.in6;
+ if (t->dst.protonum == IPPROTO_TCP ||
+ t->dst.protonum == IPPROTO_UDP ||
+ t->dst.protonum == IPPROTO_UDPLITE ||
+ t->dst.protonum == IPPROTO_DCCP ||
+ t->dst.protonum == IPPROTO_SCTP)
+ fl6->fl6_sport = t->src.u.all;
+ }
+#endif
}
-#ifdef CONFIG_XFRM
static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
- const struct nf_nat_l3proto *l3proto;
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
enum ip_conntrack_dir dir;
@@ -79,17 +134,20 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
return;
family = nf_ct_l3num(ct);
- l3proto = __nf_nat_l3proto_find(family);
- if (l3proto == NULL)
- return;
-
dir = CTINFO2DIR(ctinfo);
if (dir == IP_CT_DIR_ORIGINAL)
statusbit = IPS_DST_NAT;
else
statusbit = IPS_SRC_NAT;
- l3proto->decode_session(skb, ct, dir, statusbit, fl);
+ switch (family) {
+ case NFPROTO_IPV4:
+ nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
+ return;
+ case NFPROTO_IPV6:
+ nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
+ return;
+ }
}
int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
@@ -146,7 +204,7 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
}
/* Is this tuple already taken? (not by us) */
-int
+static int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
@@ -158,10 +216,9 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
*/
struct nf_conntrack_tuple reply;
- nf_ct_invert_tuplepr(&reply, tuple);
+ nf_ct_invert_tuple(&reply, tuple);
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
-EXPORT_SYMBOL(nf_nat_used_tuple);
static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
const struct nf_nat_range2 *range)
@@ -183,7 +240,7 @@ static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
__be16 port;
switch (tuple->dst.protonum) {
- case IPPROTO_ICMP: /* fallthrough */
+ case IPPROTO_ICMP:
case IPPROTO_ICMPV6:
return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
@@ -253,7 +310,7 @@ find_appropriate_src(struct net *net,
net_eq(net, nf_ct_net(ct)) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
/* Copy source part from reply tuple. */
- nf_ct_invert_tuplepr(result,
+ nf_ct_invert_tuple(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
@@ -560,8 +617,8 @@ nf_nat_setup_info(struct nf_conn *ct,
* manipulations (future optimization: if num_manips == 0,
* orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
*/
- nf_ct_invert_tuplepr(&curr_tuple,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ nf_ct_invert_tuple(&curr_tuple,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
@@ -569,7 +626,7 @@ nf_nat_setup_info(struct nf_conn *ct,
struct nf_conntrack_tuple reply;
/* Alter conntrack table so will recognize replies. */
- nf_ct_invert_tuplepr(&reply, &new_tuple);
+ nf_ct_invert_tuple(&reply, &new_tuple);
nf_conntrack_alter_reply(ct, &reply);
/* Non-atomic: we own this at the moment. */
@@ -632,23 +689,6 @@ nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
}
EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
-static unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
- enum nf_nat_manip_type mtype,
- enum ip_conntrack_dir dir)
-{
- const struct nf_nat_l3proto *l3proto;
- struct nf_conntrack_tuple target;
-
- /* We are aiming to look like inverse of other direction. */
- nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
-
- l3proto = __nf_nat_l3proto_find(target.src.l3num);
- if (!l3proto->manip_pkt(skb, 0, &target, mtype))
- return NF_DROP;
-
- return NF_ACCEPT;
-}
-
/* Do packet manipulations according to nf_nat_setup_info. */
unsigned int nf_nat_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -799,33 +839,6 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
return 0;
}
-static void nf_nat_l3proto_clean(u8 l3proto)
-{
- struct nf_nat_proto_clean clean = {
- .l3proto = l3proto,
- };
-
- nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
-}
-
-int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
-{
- RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
- return 0;
-}
-EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
-
-void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
-{
- mutex_lock(&nf_nat_proto_mutex);
- RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
- mutex_unlock(&nf_nat_proto_mutex);
- synchronize_rcu();
-
- nf_nat_l3proto_clean(l3proto->l3proto);
-}
-EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
-
/* No one using conntrack by the time this called. */
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
@@ -888,10 +901,43 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
[CTA_NAT_PROTO] = { .type = NLA_NESTED },
};
+static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
+ struct nf_nat_range2 *range)
+{
+ if (tb[CTA_NAT_V4_MINIP]) {
+ range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
+ range->flags |= NF_NAT_RANGE_MAP_IPS;
+ }
+
+ if (tb[CTA_NAT_V4_MAXIP])
+ range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
+ else
+ range->max_addr.ip = range->min_addr.ip;
+
+ return 0;
+}
+
+static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
+ struct nf_nat_range2 *range)
+{
+ if (tb[CTA_NAT_V6_MINIP]) {
+ nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
+ sizeof(struct in6_addr));
+ range->flags |= NF_NAT_RANGE_MAP_IPS;
+ }
+
+ if (tb[CTA_NAT_V6_MAXIP])
+ nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
+ sizeof(struct in6_addr));
+ else
+ range->max_addr = range->min_addr;
+
+ return 0;
+}
+
static int
nfnetlink_parse_nat(const struct nlattr *nat,
- const struct nf_conn *ct, struct nf_nat_range2 *range,
- const struct nf_nat_l3proto *l3proto)
+ const struct nf_conn *ct, struct nf_nat_range2 *range)
{
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
@@ -902,8 +948,19 @@ nfnetlink_parse_nat(const struct nlattr *nat,
if (err < 0)
return err;
- err = l3proto->nlattr_to_range(tb, range);
- if (err < 0)
+ switch (nf_ct_l3num(ct)) {
+ case NFPROTO_IPV4:
+ err = nf_nat_ipv4_nlattr_to_range(tb, range);
+ break;
+ case NFPROTO_IPV6:
+ err = nf_nat_ipv6_nlattr_to_range(tb, range);
+ break;
+ default:
+ err = -EPROTONOSUPPORT;
+ break;
+ }
+
+ if (err)
return err;
if (!tb[CTA_NAT_PROTO])
@@ -919,7 +976,6 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
const struct nlattr *attr)
{
struct nf_nat_range2 range;
- const struct nf_nat_l3proto *l3proto;
int err;
/* Should not happen, restricted to creating new conntracks
@@ -928,18 +984,11 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
return -EEXIST;
- /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
- * attach the null binding, otherwise this may oops.
- */
- l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
- if (l3proto == NULL)
- return -EAGAIN;
-
/* No NAT information has been passed, allocate the null-binding */
if (attr == NULL)
return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
- err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
+ err = nfnetlink_parse_nat(attr, ct, &range);
if (err < 0)
return err;
@@ -1036,7 +1085,6 @@ int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
mutex_unlock(&nf_nat_proto_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(nf_nat_register_fn);
void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
unsigned int ops_count)
@@ -1085,7 +1133,6 @@ void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
unlock:
mutex_unlock(&nf_nat_proto_mutex);
}
-EXPORT_SYMBOL_GPL(nf_nat_unregister_fn);
static struct pernet_operations nat_net_ops = {
.id = &nat_net_id,
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 38793b95d9bc..ccc06f7539d7 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -22,9 +22,6 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
/* Frobs data inside this packet, which is linear. */
@@ -98,7 +95,6 @@ bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
const char *rep_buffer,
unsigned int rep_len, bool adjust)
{
- const struct nf_nat_l3proto *l3proto;
struct tcphdr *tcph;
int oldlen, datalen;
@@ -118,9 +114,8 @@ bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
datalen = skb->len - protoff;
- l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
- l3proto->csum_recalc(skb, IPPROTO_TCP, tcph, &tcph->check,
- datalen, oldlen);
+ nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_TCP,
+ tcph, &tcph->check, datalen, oldlen);
if (adjust && rep_len != match_len)
nf_ct_seqadj_set(ct, ctinfo, tcph->seq,
@@ -150,7 +145,6 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
const char *rep_buffer,
unsigned int rep_len)
{
- const struct nf_nat_l3proto *l3proto;
struct udphdr *udph;
int datalen, oldlen;
@@ -176,9 +170,8 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
return true;
- l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
- l3proto->csum_recalc(skb, IPPROTO_UDP, udph, &udph->check,
- datalen, oldlen);
+ nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_TCP,
+ udph, &udph->check, datalen, oldlen);
return true;
}
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/netfilter/nf_nat_masquerade.c
index 41327bb99093..86fa4dcc63c5 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -1,25 +1,17 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/inetdevice.h>
-#include <linux/ip.h>
-#include <linux/timer.h>
#include <linux/netfilter.h>
-#include <net/protocol.h>
-#include <net/ip.h>
-#include <net/checksum.h>
-#include <net/route.h>
#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat.h>
+#include <linux/netfilter_ipv6.h>
+
#include <net/netfilter/ipv4/nf_nat_masquerade.h>
+#include <net/netfilter/ipv6/nf_nat_masquerade.h>
+
+static DEFINE_MUTEX(masq_mutex);
+static unsigned int masq_refcnt __read_mostly;
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
@@ -78,8 +70,6 @@ static int device_cmp(struct nf_conn *i, void *ifindex)
if (!nat)
return 0;
- if (nf_ct_l3num(i) != NFPROTO_IPV4)
- return 0;
return nat->masq_index == (int)(long)ifindex;
}
@@ -95,7 +85,6 @@ static int masq_device_event(struct notifier_block *this,
* conntracks which were associated with that device,
* and forget them.
*/
- WARN_ON(dev->ifindex == 0);
nf_ct_iterate_cleanup_net(net, device_cmp,
(void *)(long)dev->ifindex, 0, 0);
@@ -147,9 +136,6 @@ static struct notifier_block masq_inet_notifier = {
.notifier_call = masq_inet_event,
};
-static int masq_refcnt;
-static DEFINE_MUTEX(masq_mutex);
-
int nf_nat_masquerade_ipv4_register_notifier(void)
{
int ret = 0;
@@ -194,3 +180,183 @@ out_unlock:
mutex_unlock(&masq_mutex);
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
+
+#if IS_ENABLED(CONFIG_IPV6)
+static atomic_t v6_worker_count __read_mostly;
+
+static int
+nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int srcprefs,
+ struct in6_addr *saddr)
+{
+#ifdef CONFIG_IPV6_MODULE
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (!v6_ops)
+ return -EHOSTUNREACH;
+
+ return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr);
+#else
+ return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr);
+#endif
+}
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ const struct net_device *out)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn_nat *nat;
+ struct in6_addr src;
+ struct nf_conn *ct;
+ struct nf_nat_range2 newrange;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+ ctinfo == IP_CT_RELATED_REPLY)));
+
+ if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out,
+ &ipv6_hdr(skb)->daddr, 0, &src) < 0)
+ return NF_DROP;
+
+ nat = nf_ct_nat_ext_add(ct);
+ if (nat)
+ nat->masq_index = out->ifindex;
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.in6 = src;
+ newrange.max_addr.in6 = src;
+ newrange.min_proto = range->min_proto;
+ newrange.max_proto = range->max_proto;
+
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
+
+struct masq_dev_work {
+ struct work_struct work;
+ struct net *net;
+ struct in6_addr addr;
+ int ifindex;
+};
+
+static int inet6_cmp(struct nf_conn *ct, void *work)
+{
+ struct masq_dev_work *w = (struct masq_dev_work *)work;
+ struct nf_conntrack_tuple *tuple;
+
+ if (!device_cmp(ct, (void *)(long)w->ifindex))
+ return 0;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
+}
+
+static void iterate_cleanup_work(struct work_struct *work)
+{
+ struct masq_dev_work *w;
+
+ w = container_of(work, struct masq_dev_work, work);
+
+ nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
+
+ put_net(w->net);
+ kfree(w);
+ atomic_dec(&v6_worker_count);
+ module_put(THIS_MODULE);
+}
+
+/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
+ *
+ * Defer it to the system workqueue.
+ *
+ * As we can have 'a lot' of inet_events (depending on amount of ipv6
+ * addresses being deleted), we also need to limit work item queue.
+ */
+static int masq_inet6_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ const struct net_device *dev;
+ struct masq_dev_work *w;
+ struct net *net;
+
+ if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
+ return NOTIFY_DONE;
+
+ dev = ifa->idev->dev;
+ net = maybe_get_net(dev_net(dev));
+ if (!net)
+ return NOTIFY_DONE;
+
+ if (!try_module_get(THIS_MODULE))
+ goto err_module;
+
+ w = kmalloc(sizeof(*w), GFP_ATOMIC);
+ if (w) {
+ atomic_inc(&v6_worker_count);
+
+ INIT_WORK(&w->work, iterate_cleanup_work);
+ w->ifindex = dev->ifindex;
+ w->net = net;
+ w->addr = ifa->addr;
+ schedule_work(&w->work);
+
+ return NOTIFY_DONE;
+ }
+
+ module_put(THIS_MODULE);
+ err_module:
+ put_net(net);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block masq_inet6_notifier = {
+ .notifier_call = masq_inet6_event,
+};
+
+int nf_nat_masquerade_ipv6_register_notifier(void)
+{
+ int ret = 0;
+
+ mutex_lock(&masq_mutex);
+ /* check if the notifier is already set */
+ if (++masq_refcnt > 1)
+ goto out_unlock;
+
+ ret = register_netdevice_notifier(&masq_dev_notifier);
+ if (ret)
+ goto err_dec;
+
+ ret = register_inet6addr_notifier(&masq_inet6_notifier);
+ if (ret)
+ goto err_unregister;
+
+ mutex_unlock(&masq_mutex);
+ return ret;
+
+err_unregister:
+ unregister_netdevice_notifier(&masq_dev_notifier);
+err_dec:
+ masq_refcnt--;
+out_unlock:
+ mutex_unlock(&masq_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
+
+void nf_nat_masquerade_ipv6_unregister_notifier(void)
+{
+ mutex_lock(&masq_mutex);
+ /* check if the notifier still has clients */
+ if (--masq_refcnt > 0)
+ goto out_unlock;
+
+ unregister_inet6addr_notifier(&masq_inet6_notifier);
+ unregister_netdevice_notifier(&masq_dev_notifier);
+out_unlock:
+ mutex_unlock(&masq_mutex);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
+#endif
diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
index f83bf9d8c9f5..62743da3004f 100644
--- a/net/netfilter/nf_nat_proto.c
+++ b/net/netfilter/nf_nat_proto.c
@@ -20,13 +20,26 @@
#include <linux/netfilter.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
+
+#include <linux/ipv6.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <net/ip6_route.h>
+#include <net/xfrm.h>
+#include <net/ipv6.h>
+
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+static void nf_csum_update(struct sk_buff *skb,
+ unsigned int iphdroff, __sum16 *check,
+ const struct nf_conntrack_tuple *t,
+ enum nf_nat_manip_type maniptype);
static void
__udp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, struct udphdr *hdr,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype, bool do_csum)
@@ -43,8 +56,7 @@ __udp_manip_pkt(struct sk_buff *skb,
portptr = &hdr->dest;
}
if (do_csum) {
- l3proto->csum_update(skb, iphdroff, &hdr->check,
- tuple, maniptype);
+ nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
false);
if (!hdr->check)
@@ -54,7 +66,6 @@ __udp_manip_pkt(struct sk_buff *skb,
}
static bool udp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -68,12 +79,11 @@ static bool udp_manip_pkt(struct sk_buff *skb,
hdr = (struct udphdr *)(skb->data + hdroff);
do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
- __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum);
+ __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
return true;
}
static bool udplite_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -85,14 +95,13 @@ static bool udplite_manip_pkt(struct sk_buff *skb,
return false;
hdr = (struct udphdr *)(skb->data + hdroff);
- __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, true);
+ __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, true);
#endif
return true;
}
static bool
sctp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -135,7 +144,6 @@ sctp_manip_pkt(struct sk_buff *skb,
static bool
tcp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -171,14 +179,13 @@ tcp_manip_pkt(struct sk_buff *skb,
if (hdrsize < sizeof(*hdr))
return true;
- l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
+ nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
return true;
}
static bool
dccp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -210,8 +217,7 @@ dccp_manip_pkt(struct sk_buff *skb,
if (hdrsize < sizeof(*hdr))
return true;
- l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
- tuple, maniptype);
+ nf_csum_update(skb, iphdroff, &hdr->dccph_checksum, tuple, maniptype);
inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
false);
#endif
@@ -220,7 +226,6 @@ dccp_manip_pkt(struct sk_buff *skb,
static bool
icmp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -239,7 +244,6 @@ icmp_manip_pkt(struct sk_buff *skb,
static bool
icmpv6_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -250,8 +254,7 @@ icmpv6_manip_pkt(struct sk_buff *skb,
return false;
hdr = (struct icmp6hdr *)(skb->data + hdroff);
- l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
- tuple, maniptype);
+ nf_csum_update(skb, iphdroff, &hdr->icmp6_cksum, tuple, maniptype);
if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST ||
hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
@@ -265,7 +268,6 @@ icmpv6_manip_pkt(struct sk_buff *skb,
/* manipulate a GRE packet according to maniptype */
static bool
gre_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
@@ -304,40 +306,718 @@ gre_manip_pkt(struct sk_buff *skb,
return true;
}
-bool nf_nat_l4proto_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
+static bool l4proto_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
switch (tuple->dst.protonum) {
case IPPROTO_TCP:
- return tcp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return tcp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_UDP:
- return udp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return udp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_UDPLITE:
- return udplite_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return udplite_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_SCTP:
- return sctp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return sctp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_ICMP:
- return icmp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return icmp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_ICMPV6:
- return icmpv6_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return icmpv6_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_DCCP:
- return dccp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return dccp_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
case IPPROTO_GRE:
- return gre_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ return gre_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
}
/* If we don't know protocol -- no error, pass it unmodified. */
return true;
}
-EXPORT_SYMBOL_GPL(nf_nat_l4proto_manip_pkt);
+
+static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
+ unsigned int iphdroff,
+ const struct nf_conntrack_tuple *target,
+ enum nf_nat_manip_type maniptype)
+{
+ struct iphdr *iph;
+ unsigned int hdroff;
+
+ if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
+ return false;
+
+ iph = (void *)skb->data + iphdroff;
+ hdroff = iphdroff + iph->ihl * 4;
+
+ if (!l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
+ return false;
+ iph = (void *)skb->data + iphdroff;
+
+ if (maniptype == NF_NAT_MANIP_SRC) {
+ csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
+ iph->saddr = target->src.u3.ip;
+ } else {
+ csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
+ iph->daddr = target->dst.u3.ip;
+ }
+ return true;
+}
+
+static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
+ unsigned int iphdroff,
+ const struct nf_conntrack_tuple *target,
+ enum nf_nat_manip_type maniptype)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h;
+ __be16 frag_off;
+ int hdroff;
+ u8 nexthdr;
+
+ if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h)))
+ return false;
+
+ ipv6h = (void *)skb->data + iphdroff;
+ nexthdr = ipv6h->nexthdr;
+ hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
+ &nexthdr, &frag_off);
+ if (hdroff < 0)
+ goto manip_addr;
+
+ if ((frag_off & htons(~0x7)) == 0 &&
+ !l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
+ return false;
+
+ /* must reload, offset might have changed */
+ ipv6h = (void *)skb->data + iphdroff;
+
+manip_addr:
+ if (maniptype == NF_NAT_MANIP_SRC)
+ ipv6h->saddr = target->src.u3.in6;
+ else
+ ipv6h->daddr = target->dst.u3.in6;
+
+#endif
+ return true;
+}
+
+unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
+ enum nf_nat_manip_type mtype,
+ enum ip_conntrack_dir dir)
+{
+ struct nf_conntrack_tuple target;
+
+ /* We are aiming to look like inverse of other direction. */
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
+
+ switch (target.src.l3num) {
+ case NFPROTO_IPV6:
+ if (nf_nat_ipv6_manip_pkt(skb, 0, &target, mtype))
+ return NF_ACCEPT;
+ break;
+ case NFPROTO_IPV4:
+ if (nf_nat_ipv4_manip_pkt(skb, 0, &target, mtype))
+ return NF_ACCEPT;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return NF_DROP;
+}
+
+static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
+ unsigned int iphdroff, __sum16 *check,
+ const struct nf_conntrack_tuple *t,
+ enum nf_nat_manip_type maniptype)
+{
+ struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
+ __be32 oldip, newip;
+
+ if (maniptype == NF_NAT_MANIP_SRC) {
+ oldip = iph->saddr;
+ newip = t->src.u3.ip;
+ } else {
+ oldip = iph->daddr;
+ newip = t->dst.u3.ip;
+ }
+ inet_proto_csum_replace4(check, skb, oldip, newip, true);
+}
+
+static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
+ unsigned int iphdroff, __sum16 *check,
+ const struct nf_conntrack_tuple *t,
+ enum nf_nat_manip_type maniptype)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
+ const struct in6_addr *oldip, *newip;
+
+ if (maniptype == NF_NAT_MANIP_SRC) {
+ oldip = &ipv6h->saddr;
+ newip = &t->src.u3.in6;
+ } else {
+ oldip = &ipv6h->daddr;
+ newip = &t->dst.u3.in6;
+ }
+ inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
+ newip->s6_addr32, true);
+#endif
+}
+
+static void nf_csum_update(struct sk_buff *skb,
+ unsigned int iphdroff, __sum16 *check,
+ const struct nf_conntrack_tuple *t,
+ enum nf_nat_manip_type maniptype)
+{
+ switch (t->src.l3num) {
+ case NFPROTO_IPV4:
+ nf_nat_ipv4_csum_update(skb, iphdroff, check, t, maniptype);
+ return;
+ case NFPROTO_IPV6:
+ nf_nat_ipv6_csum_update(skb, iphdroff, check, t, maniptype);
+ return;
+ }
+}
+
+static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
+ u8 proto, void *data, __sum16 *check,
+ int datalen, int oldlen)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
+ ip_hdrlen(skb);
+ skb->csum_offset = (void *)check - data;
+ *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
+ proto, 0);
+ } else {
+ inet_proto_csum_replace2(check, skb,
+ htons(oldlen), htons(datalen), true);
+ }
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
+ u8 proto, void *data, __sum16 *check,
+ int datalen, int oldlen)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
+ (data - (void *)skb->data);
+ skb->csum_offset = (void *)check - data;
+ *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ datalen, proto, 0);
+ } else {
+ inet_proto_csum_replace2(check, skb,
+ htons(oldlen), htons(datalen), true);
+ }
+}
+#endif
+
+void nf_nat_csum_recalc(struct sk_buff *skb,
+ u8 nfproto, u8 proto, void *data, __sum16 *check,
+ int datalen, int oldlen)
+{
+ switch (nfproto) {
+ case NFPROTO_IPV4:
+ nf_nat_ipv4_csum_recalc(skb, proto, data, check,
+ datalen, oldlen);
+ return;
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6:
+ nf_nat_ipv6_csum_recalc(skb, proto, data, check,
+ datalen, oldlen);
+ return;
+#endif
+ }
+
+ WARN_ON_ONCE(1);
+}
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum)
+{
+ struct {
+ struct icmphdr icmp;
+ struct iphdr ip;
+ } *inside;
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+ unsigned int hdrlen = ip_hdrlen(skb);
+ struct nf_conntrack_tuple target;
+ unsigned long statusbit;
+
+ WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
+
+ if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+ return 0;
+ if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+ return 0;
+
+ inside = (void *)skb->data + hdrlen;
+ if (inside->icmp.type == ICMP_REDIRECT) {
+ if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
+ return 0;
+ if (ct->status & IPS_NAT_MASK)
+ return 0;
+ }
+
+ if (manip == NF_NAT_MANIP_SRC)
+ statusbit = IPS_SRC_NAT;
+ else
+ statusbit = IPS_DST_NAT;
+
+ /* Invert if this is reply direction */
+ if (dir == IP_CT_DIR_REPLY)
+ statusbit ^= IPS_NAT_MASK;
+
+ if (!(ct->status & statusbit))
+ return 1;
+
+ if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
+ &ct->tuplehash[!dir].tuple, !manip))
+ return 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ /* Reloading "inside" here since manip_pkt may reallocate */
+ inside = (void *)skb->data + hdrlen;
+ inside->icmp.checksum = 0;
+ inside->icmp.checksum =
+ csum_fold(skb_checksum(skb, hdrlen,
+ skb->len - hdrlen, 0));
+ }
+
+ /* Change outer to look like the reply to an incoming packet */
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
+ target.dst.protonum = IPPROTO_ICMP;
+ if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
+
+static unsigned int
+nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return NF_ACCEPT;
+
+ if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
+ if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+ if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+ state->hook))
+ return NF_DROP;
+ else
+ return NF_ACCEPT;
+ }
+ }
+
+ return nf_nat_inet_fn(priv, skb, state);
+}
+
+static unsigned int
+nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ unsigned int ret;
+ __be32 daddr = ip_hdr(skb)->daddr;
+
+ ret = nf_nat_ipv4_fn(priv, skb, state);
+ if (ret == NF_ACCEPT && daddr != ip_hdr(skb)->daddr)
+ skb_dst_drop(skb);
+
+ return ret;
+}
+
+static unsigned int
+nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#ifdef CONFIG_XFRM
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ int err;
+#endif
+ unsigned int ret;
+
+ ret = nf_nat_ipv4_fn(priv, skb, state);
+#ifdef CONFIG_XFRM
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ return ret;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.src.u3.ip !=
+ ct->tuplehash[!dir].tuple.dst.u3.ip ||
+ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
+ ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all)) {
+ err = nf_xfrm_me_harder(state->net, skb, AF_INET);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+ }
+#endif
+ return ret;
+}
+
+static unsigned int
+nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ unsigned int ret;
+ int err;
+
+ ret = nf_nat_ipv4_fn(priv, skb, state);
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+ ct->tuplehash[!dir].tuple.src.u3.ip) {
+ err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#ifdef CONFIG_XFRM
+ else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+ ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
+ ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all) {
+ err = nf_xfrm_me_harder(state->net, skb, AF_INET);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#endif
+ }
+ return ret;
+}
+
+static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
+ /* Before packet filtering, change destination */
+ {
+ .hook = nf_nat_ipv4_in,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_NAT_DST,
+ },
+ /* After packet filtering, change source */
+ {
+ .hook = nf_nat_ipv4_out,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_NAT_SRC,
+ },
+ /* Before packet filtering, change destination */
+ {
+ .hook = nf_nat_ipv4_local_fn,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP_PRI_NAT_DST,
+ },
+ /* After packet filtering, change source */
+ {
+ .hook = nf_nat_ipv4_fn,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_NAT_SRC,
+ },
+};
+
+int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops)
+{
+ return nf_nat_register_fn(net, ops, nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv4_register_fn);
+
+void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
+{
+ nf_nat_unregister_fn(net, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv4_unregister_fn);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum,
+ unsigned int hdrlen)
+{
+ struct {
+ struct icmp6hdr icmp6;
+ struct ipv6hdr ip6;
+ } *inside;
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+ struct nf_conntrack_tuple target;
+ unsigned long statusbit;
+
+ WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
+
+ if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+ return 0;
+ if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
+ return 0;
+
+ inside = (void *)skb->data + hdrlen;
+ if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
+ if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
+ return 0;
+ if (ct->status & IPS_NAT_MASK)
+ return 0;
+ }
+
+ if (manip == NF_NAT_MANIP_SRC)
+ statusbit = IPS_SRC_NAT;
+ else
+ statusbit = IPS_DST_NAT;
+
+ /* Invert if this is reply direction */
+ if (dir == IP_CT_DIR_REPLY)
+ statusbit ^= IPS_NAT_MASK;
+
+ if (!(ct->status & statusbit))
+ return 1;
+
+ if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
+ &ct->tuplehash[!dir].tuple, !manip))
+ return 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ inside = (void *)skb->data + hdrlen;
+ inside->icmp6.icmp6_cksum = 0;
+ inside->icmp6.icmp6_cksum =
+ csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ skb->len - hdrlen, IPPROTO_ICMPV6,
+ skb_checksum(skb, hdrlen,
+ skb->len - hdrlen, 0));
+ }
+
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
+ target.dst.protonum = IPPROTO_ICMPV6;
+ if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
+
+static unsigned int
+nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ __be16 frag_off;
+ int hdrlen;
+ u8 nexthdr;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ /* Can't track? It's not due to stress, or conntrack would
+ * have dropped it. Hence it's the user's responsibilty to
+ * packet filter it out, or implement conntrack/NAT for that
+ * protocol. 8) --RR
+ */
+ if (!ct)
+ return NF_ACCEPT;
+
+ if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+ &nexthdr, &frag_off);
+
+ if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+ if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+ state->hook,
+ hdrlen))
+ return NF_DROP;
+ else
+ return NF_ACCEPT;
+ }
+ }
+
+ return nf_nat_inet_fn(priv, skb, state);
+}
+
+static unsigned int
+nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ unsigned int ret;
+ struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+
+ ret = nf_nat_ipv6_fn(priv, skb, state);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+ skb_dst_drop(skb);
+
+ return ret;
+}
+
+static unsigned int
+nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#ifdef CONFIG_XFRM
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ int err;
+#endif
+ unsigned int ret;
+
+ ret = nf_nat_ipv6_fn(priv, skb, state);
+#ifdef CONFIG_XFRM
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ return ret;
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+ &ct->tuplehash[!dir].tuple.dst.u3) ||
+ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
+ ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all)) {
+ err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+ }
+#endif
+
+ return ret;
+}
+
+static int nat_route_me_harder(struct net *net, struct sk_buff *skb)
+{
+#ifdef CONFIG_IPV6_MODULE
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (!v6_ops)
+ return -EHOSTUNREACH;
+
+ return v6_ops->route_me_harder(net, skb);
+#else
+ return ip6_route_me_harder(net, skb);
+#endif
+}
+
+static unsigned int
+nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ unsigned int ret;
+ int err;
+
+ ret = nf_nat_ipv6_fn(priv, skb, state);
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+ &ct->tuplehash[!dir].tuple.src.u3)) {
+ err = nat_route_me_harder(state->net, skb);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#ifdef CONFIG_XFRM
+ else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
+ ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all) {
+ err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#endif
+ }
+
+ return ret;
+}
+
+static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
+ /* Before packet filtering, change destination */
+ {
+ .hook = nf_nat_ipv6_in,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP6_PRI_NAT_DST,
+ },
+ /* After packet filtering, change source */
+ {
+ .hook = nf_nat_ipv6_out,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_NAT_SRC,
+ },
+ /* Before packet filtering, change destination */
+ {
+ .hook = nf_nat_ipv6_local_fn,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_OUT,
+ .priority = NF_IP6_PRI_NAT_DST,
+ },
+ /* After packet filtering, change source */
+ {
+ .hook = nf_nat_ipv6_fn,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP6_PRI_NAT_SRC,
+ },
+};
+
+int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops)
+{
+ return nf_nat_register_fn(net, ops, nf_nat_ipv6_ops,
+ ARRAY_SIZE(nf_nat_ipv6_ops));
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv6_register_fn);
+
+void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
+{
+ nf_nat_unregister_fn(net, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv6_unregister_fn);
+#endif /* CONFIG_IPV6 */
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2b0a93300dd7..faf6bd10a19f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -37,10 +37,16 @@ enum {
NFT_VALIDATE_DO,
};
+static struct rhltable nft_objname_ht;
+
static u32 nft_chain_hash(const void *data, u32 len, u32 seed);
static u32 nft_chain_hash_obj(const void *data, u32 len, u32 seed);
static int nft_chain_hash_cmp(struct rhashtable_compare_arg *, const void *);
+static u32 nft_objname_hash(const void *data, u32 len, u32 seed);
+static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed);
+static int nft_objname_hash_cmp(struct rhashtable_compare_arg *, const void *);
+
static const struct rhashtable_params nft_chain_ht_params = {
.head_offset = offsetof(struct nft_chain, rhlhead),
.key_offset = offsetof(struct nft_chain, name),
@@ -51,6 +57,15 @@ static const struct rhashtable_params nft_chain_ht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params nft_objname_ht_params = {
+ .head_offset = offsetof(struct nft_object, rhlhead),
+ .key_offset = offsetof(struct nft_object, key),
+ .hashfn = nft_objname_hash,
+ .obj_hashfn = nft_objname_hash_obj,
+ .obj_cmpfn = nft_objname_hash_cmp,
+ .automatic_shrinking = true,
+};
+
static void nft_validate_state_update(struct net *net, u8 new_validate_state)
{
switch (net->nft.validate_state) {
@@ -116,6 +131,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
kfree(trans);
}
+static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ struct net *net = ctx->net;
+ struct nft_trans *trans;
+
+ if (!nft_set_is_anonymous(set))
+ return;
+
+ list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
+ if (trans->msg_type == NFT_MSG_NEWSET &&
+ nft_trans_set(trans) == set) {
+ nft_trans_set_bound(trans) = true;
+ break;
+ }
+ }
+}
+
static int nf_tables_register_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
@@ -211,18 +243,6 @@ static int nft_delchain(struct nft_ctx *ctx)
return err;
}
-/* either expr ops provide both activate/deactivate, or neither */
-static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
-{
- if (!ops)
- return true;
-
- if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
- return false;
-
- return true;
-}
-
static void nft_rule_expr_activate(const struct nft_ctx *ctx,
struct nft_rule *rule)
{
@@ -238,14 +258,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
}
static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
- struct nft_rule *rule)
+ struct nft_rule *rule,
+ enum nft_trans_phase phase)
{
struct nft_expr *expr;
expr = nft_expr_first(rule);
while (expr != nft_expr_last(rule) && expr->ops) {
if (expr->ops->deactivate)
- expr->ops->deactivate(ctx, expr);
+ expr->ops->deactivate(ctx, expr, phase);
expr = nft_expr_next(expr);
}
@@ -296,7 +317,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
nft_trans_destroy(trans);
return err;
}
- nft_rule_expr_deactivate(ctx, rule);
+ nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
return 0;
}
@@ -307,6 +328,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
int err;
list_for_each_entry(rule, &ctx->chain->rules, list) {
+ if (!nft_is_active_next(ctx->net, rule))
+ continue;
+
err = nft_delrule(ctx, rule);
if (err < 0)
return err;
@@ -814,6 +838,34 @@ static int nft_chain_hash_cmp(struct rhashtable_compare_arg *arg,
return strcmp(chain->name, name);
}
+static u32 nft_objname_hash(const void *data, u32 len, u32 seed)
+{
+ const struct nft_object_hash_key *k = data;
+
+ seed ^= hash_ptr(k->table, 32);
+
+ return jhash(k->name, strlen(k->name), seed);
+}
+
+static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed)
+{
+ const struct nft_object *obj = data;
+
+ return nft_objname_hash(&obj->key, 0, seed);
+}
+
+static int nft_objname_hash_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct nft_object_hash_key *k = arg->key;
+ const struct nft_object *obj = ptr;
+
+ if (obj->key.table != k->table)
+ return -1;
+
+ return strcmp(obj->key.name, k->name);
+}
+
static int nf_tables_newtable(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
@@ -1070,7 +1122,7 @@ nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask)
return ERR_PTR(-ENOENT);
}
-static bool lockdep_commit_lock_is_held(struct net *net)
+static bool lockdep_commit_lock_is_held(const struct net *net)
{
#ifdef CONFIG_PROVE_LOCKING
return lockdep_is_held(&net->nft.commit_mutex);
@@ -1929,9 +1981,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
*/
int nft_register_expr(struct nft_expr_type *type)
{
- if (!nft_expr_check_ops(type->ops))
- return -EINVAL;
-
nfnl_lock(NFNL_SUBSYS_NFTABLES);
if (type->family == NFPROTO_UNSPEC)
list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -2079,10 +2128,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
err = PTR_ERR(ops);
goto err1;
}
- if (!nft_expr_check_ops(ops)) {
- err = -EINVAL;
- goto err1;
- }
} else
ops = type->ops;
@@ -2127,6 +2172,7 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
{
struct nft_expr_info info;
struct nft_expr *expr;
+ struct module *owner;
int err;
err = nf_tables_expr_parse(ctx, nla, &info);
@@ -2146,7 +2192,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
err3:
kfree(expr);
err2:
- module_put(info.ops->type->owner);
+ owner = info.ops->type->owner;
+ if (info.ops->type->release_ops)
+ info.ops->type->release_ops(info.ops);
+
+ module_put(owner);
err1:
return ERR_PTR(err);
}
@@ -2196,6 +2246,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
[NFTA_RULE_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
[NFTA_RULE_ID] = { .type = NLA_U32 },
+ [NFTA_RULE_POSITION_ID] = { .type = NLA_U32 },
};
static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
@@ -2304,7 +2355,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
unsigned int s_idx = cb->args[0];
const struct nft_rule *rule;
- int rc = 1;
list_for_each_entry_rcu(rule, &chain->rules, list) {
if (!nft_is_active(net, rule))
@@ -2321,16 +2371,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
NLM_F_MULTI | NLM_F_APPEND,
table->family,
table, chain, rule) < 0)
- goto out_unfinished;
+ return 1;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
(*idx)++;
}
- rc = 0;
-out_unfinished:
- cb->args[0] = *idx;
- return rc;
+ return 0;
}
static int nf_tables_dump_rules(struct sk_buff *skb,
@@ -2354,7 +2401,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
- if (ctx && ctx->chain) {
+ if (ctx && ctx->table && ctx->chain) {
struct rhlist_head *list, *tmp;
list = rhltable_lookup(&table->chains_ht, ctx->chain,
@@ -2382,6 +2429,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
}
done:
rcu_read_unlock();
+
+ cb->args[0] = idx;
return skb->len;
}
@@ -2513,7 +2562,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
static void nf_tables_rule_release(const struct nft_ctx *ctx,
struct nft_rule *rule)
{
- nft_rule_expr_deactivate(ctx, rule);
+ nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
nf_tables_rule_destroy(ctx, rule);
}
@@ -2567,6 +2616,9 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
return 0;
}
+static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ const struct nlattr *nla);
+
#define NFT_RULE_MAXEXPRS 128
static int nf_tables_newrule(struct net *net, struct sock *nlsk,
@@ -2636,6 +2688,12 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
return PTR_ERR(old_rule);
}
+ } else if (nla[NFTA_RULE_POSITION_ID]) {
+ old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]);
+ if (IS_ERR(old_rule)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]);
+ return PTR_ERR(old_rule);
+ }
}
}
@@ -3710,39 +3768,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
bind:
binding->chain = ctx->chain;
list_add_tail_rcu(&binding->list, &set->bindings);
+ nft_set_trans_bind(ctx, set);
+
return 0;
}
EXPORT_SYMBOL_GPL(nf_tables_bind_set);
-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
- struct nft_set_binding *binding)
-{
- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
- nft_is_active(ctx->net, set))
- list_add_tail_rcu(&set->list, &ctx->table->sets);
-
- list_add_tail_rcu(&binding->list, &set->bindings);
-}
-EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
-
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
- struct nft_set_binding *binding)
+ struct nft_set_binding *binding, bool event)
{
list_del_rcu(&binding->list);
- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
- nft_is_active(ctx->net, set))
+ if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
list_del_rcu(&set->list);
+ if (event)
+ nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
+ GFP_KERNEL);
+ }
}
EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
{
- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
- nft_is_active(ctx->net, set)) {
- nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
+ if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
nft_set_destroy(set);
- }
}
EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
@@ -3853,7 +3902,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) &&
nla_put_string(skb, NFTA_SET_ELEM_OBJREF,
- (*nft_set_ext_obj(ext))->name) < 0)
+ (*nft_set_ext_obj(ext))->key.name) < 0)
goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
@@ -4386,7 +4435,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -EINVAL;
goto err2;
}
- obj = nft_obj_lookup(ctx->table, nla[NFTA_SET_ELEM_OBJREF],
+ obj = nft_obj_lookup(ctx->net, ctx->table,
+ nla[NFTA_SET_ELEM_OBJREF],
set->objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -4508,6 +4558,8 @@ err6:
err5:
kfree(trans);
err4:
+ if (obj)
+ obj->use--;
kfree(elem.priv);
err3:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
@@ -4819,18 +4871,36 @@ void nft_unregister_obj(struct nft_object_type *obj_type)
}
EXPORT_SYMBOL_GPL(nft_unregister_obj);
-struct nft_object *nft_obj_lookup(const struct nft_table *table,
+struct nft_object *nft_obj_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u32 objtype,
u8 genmask)
{
+ struct nft_object_hash_key k = { .table = table };
+ char search[NFT_OBJ_MAXNAMELEN];
+ struct rhlist_head *tmp, *list;
struct nft_object *obj;
- list_for_each_entry_rcu(obj, &table->objects, list) {
- if (!nla_strcmp(nla, obj->name) &&
- objtype == obj->ops->type->type &&
- nft_active_genmask(obj, genmask))
+ nla_strlcpy(search, nla, sizeof(search));
+ k.name = search;
+
+ WARN_ON_ONCE(!rcu_read_lock_held() &&
+ !lockdep_commit_lock_is_held(net));
+
+ rcu_read_lock();
+ list = rhltable_lookup(&nft_objname_ht, &k, nft_objname_ht_params);
+ if (!list)
+ goto out;
+
+ rhl_for_each_entry_rcu(obj, tmp, list, rhlhead) {
+ if (objtype == obj->ops->type->type &&
+ nft_active_genmask(obj, genmask)) {
+ rcu_read_unlock();
return obj;
+ }
}
+out:
+ rcu_read_unlock();
return ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(nft_obj_lookup);
@@ -4988,7 +5058,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
+ obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
if (err != -ENOENT) {
@@ -5014,11 +5084,11 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
err = PTR_ERR(obj);
goto err1;
}
- obj->table = table;
+ obj->key.table = table;
obj->handle = nf_tables_alloc_handle(table);
- obj->name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
- if (!obj->name) {
+ obj->key.name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
+ if (!obj->key.name) {
err = -ENOMEM;
goto err2;
}
@@ -5027,11 +5097,20 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
if (err < 0)
goto err3;
+ err = rhltable_insert(&nft_objname_ht, &obj->rhlhead,
+ nft_objname_ht_params);
+ if (err < 0)
+ goto err4;
+
list_add_tail_rcu(&obj->list, &table->objects);
table->use++;
return 0;
+err4:
+ /* queued in transaction log */
+ INIT_LIST_HEAD(&obj->list);
+ return err;
err3:
- kfree(obj->name);
+ kfree(obj->key.name);
err2:
if (obj->ops->destroy)
obj->ops->destroy(&ctx, obj);
@@ -5060,7 +5139,7 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
- nla_put_string(skb, NFTA_OBJ_NAME, obj->name) ||
+ nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset) ||
@@ -5215,7 +5294,7 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
+ obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
return PTR_ERR(obj);
@@ -5246,7 +5325,7 @@ static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
obj->ops->destroy(ctx, obj);
module_put(obj->ops->type->owner);
- kfree(obj->name);
+ kfree(obj->key.name);
kfree(obj);
}
@@ -5280,7 +5359,7 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
obj = nft_obj_lookup_byhandle(table, attr, objtype, genmask);
} else {
attr = nla[NFTA_OBJ_NAME];
- obj = nft_obj_lookup(table, attr, objtype, genmask);
+ obj = nft_obj_lookup(net, table, attr, objtype, genmask);
}
if (IS_ERR(obj)) {
@@ -5297,7 +5376,7 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
return nft_delobj(&ctx, obj);
}
-void nft_obj_notify(struct net *net, struct nft_table *table,
+void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
int family, int report, gfp_t gfp)
{
@@ -6404,6 +6483,12 @@ static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
nf_tables_commit_chain_free_rules_old(g0);
}
+static void nft_obj_del(struct nft_object *obj)
+{
+ rhltable_remove(&nft_objname_ht, &obj->rhlhead, nft_objname_ht_params);
+ list_del_rcu(&obj->list);
+}
+
static void nft_chain_del(struct nft_chain *chain)
{
struct nft_table *table = chain->table;
@@ -6535,6 +6620,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nf_tables_rule_notify(&trans->ctx,
nft_trans_rule(trans),
NFT_MSG_DELRULE);
+ nft_rule_expr_deactivate(&trans->ctx,
+ nft_trans_rule(trans),
+ NFT_TRANS_COMMIT);
break;
case NFT_MSG_NEWSET:
nft_clear(net, nft_trans_set(trans));
@@ -6580,7 +6668,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_trans_destroy(trans);
break;
case NFT_MSG_DELOBJ:
- list_del_rcu(&nft_trans_obj(trans)->list);
+ nft_obj_del(nft_trans_obj(trans));
nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
NFT_MSG_DELOBJ);
break;
@@ -6621,7 +6709,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_NEWSET:
- nft_set_destroy(nft_trans_set(trans));
+ if (!nft_trans_set_bound(trans))
+ nft_set_destroy(nft_trans_set(trans));
break;
case NFT_MSG_NEWSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -6682,7 +6771,9 @@ static int __nf_tables_abort(struct net *net)
case NFT_MSG_NEWRULE:
trans->ctx.chain->use--;
list_del_rcu(&nft_trans_rule(trans)->list);
- nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
+ nft_rule_expr_deactivate(&trans->ctx,
+ nft_trans_rule(trans),
+ NFT_TRANS_ABORT);
break;
case NFT_MSG_DELRULE:
trans->ctx.chain->use++;
@@ -6692,7 +6783,8 @@ static int __nf_tables_abort(struct net *net)
break;
case NFT_MSG_NEWSET:
trans->ctx.table->use--;
- list_del_rcu(&nft_trans_set(trans)->list);
+ if (!nft_trans_set_bound(trans))
+ list_del_rcu(&nft_trans_set(trans)->list);
break;
case NFT_MSG_DELSET:
trans->ctx.table->use++;
@@ -6716,7 +6808,7 @@ static int __nf_tables_abort(struct net *net)
break;
case NFT_MSG_NEWOBJ:
trans->ctx.table->use--;
- list_del_rcu(&nft_trans_obj(trans)->list);
+ nft_obj_del(nft_trans_obj(trans));
break;
case NFT_MSG_DELOBJ:
trans->ctx.table->use++;
@@ -7330,7 +7422,7 @@ static void __nft_release_tables(struct net *net)
nft_set_destroy(set);
}
list_for_each_entry_safe(obj, ne, &table->objects, list) {
- list_del(&obj->list);
+ nft_obj_del(obj);
table->use--;
nft_obj_destroy(&ctx, obj);
}
@@ -7392,12 +7484,18 @@ static int __init nf_tables_module_init(void)
if (err < 0)
goto err3;
+ err = rhltable_init(&nft_objname_ht, &nft_objname_ht_params);
+ if (err < 0)
+ goto err4;
+
/* must be last */
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
- goto err4;
+ goto err5;
return err;
+err5:
+ rhltable_destroy(&nft_objname_ht);
err4:
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
err3:
@@ -7417,6 +7515,7 @@ static void __exit nf_tables_module_exit(void)
unregister_pernet_subsys(&nf_tables_net_ops);
cancel_work_sync(&trans_destroy_work);
rcu_barrier();
+ rhltable_destroy(&nft_objname_ht);
nf_tables_core_module_exit();
}
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index a50500232b0a..d0f168c2670f 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -98,21 +98,23 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
const struct nft_pktinfo *pkt)
{
struct nft_base_chain *base_chain;
+ struct nft_stats __percpu *pstats;
struct nft_stats *stats;
base_chain = nft_base_chain(chain);
- if (!rcu_access_pointer(base_chain->stats))
- return;
- local_bh_disable();
- stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
- if (stats) {
+ rcu_read_lock();
+ pstats = READ_ONCE(base_chain->stats);
+ if (pstats) {
+ local_bh_disable();
+ stats = this_cpu_ptr(pstats);
u64_stats_update_begin(&stats->syncp);
stats->pkts++;
stats->bytes += pkt->skb->len;
u64_stats_update_end(&stats->syncp);
+ local_bh_enable();
}
- local_bh_enable();
+ rcu_read_unlock();
}
struct nft_jumpstack {
@@ -124,14 +126,25 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
struct nft_regs *regs,
struct nft_pktinfo *pkt)
{
+#ifdef CONFIG_RETPOLINE
unsigned long e = (unsigned long)expr->ops->eval;
-
- if (e == (unsigned long)nft_meta_get_eval)
- nft_meta_get_eval(expr, regs, pkt);
- else if (e == (unsigned long)nft_lookup_eval)
- nft_lookup_eval(expr, regs, pkt);
- else
- expr->ops->eval(expr, regs, pkt);
+#define X(e, fun) \
+ do { if ((e) == (unsigned long)(fun)) \
+ return fun(expr, regs, pkt); } while (0)
+
+ X(e, nft_payload_eval);
+ X(e, nft_cmp_eval);
+ X(e, nft_meta_get_eval);
+ X(e, nft_lookup_eval);
+ X(e, nft_range_eval);
+ X(e, nft_immediate_eval);
+ X(e, nft_byteorder_eval);
+ X(e, nft_dynset_eval);
+ X(e, nft_rt_get_eval);
+ X(e, nft_bitwise_eval);
+#undef X
+#endif /* CONFIG_RETPOLINE */
+ expr->ops->eval(expr, regs, pkt);
}
unsigned int
@@ -210,7 +223,6 @@ next_rule:
chain = regs.verdict.chain;
goto do_chain;
case NFT_CONTINUE:
- /* fall through */
case NFT_RETURN:
nft_trace_packet(&info, chain, rule,
NFT_TRACETYPE_RETURN);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 109b0d27345a..c69b11ca5aad 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -122,7 +122,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
return -EBUSY;
}
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num) {
@@ -152,7 +152,6 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
err:
kfree(timeout);
err_proto_put:
- nf_ct_l4proto_put(l4proto);
return ret;
}
@@ -302,7 +301,6 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
if (refcount_dec_if_one(&timeout->refcnt)) {
/* We are protected by nfnl mutex. */
list_del_rcu(&timeout->head);
- nf_ct_l4proto_put(timeout->timeout.l4proto);
nf_ct_untimeout(net, &timeout->timeout);
kfree_rcu(timeout, rcu_head);
} else {
@@ -359,7 +357,7 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
return -EINVAL;
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
/* This protocol is not supported, skip. */
if (l4proto->l4proto != l4num) {
@@ -372,10 +370,8 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
if (ret < 0)
goto err;
- nf_ct_l4proto_put(l4proto);
return 0;
err:
- nf_ct_l4proto_put(l4proto);
return ret;
}
@@ -442,7 +438,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
err = -EOPNOTSUPP;
if (l4proto->l4proto != l4num)
@@ -474,12 +470,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
break;
case IPPROTO_GRE:
#ifdef CONFIG_NF_CT_PROTO_GRE
- if (l4proto->net_id) {
- struct netns_proto_gre *net_gre;
-
- net_gre = net_generic(net, *l4proto->net_id);
- timeouts = net_gre->gre_timeouts;
- }
+ timeouts = nf_gre_pernet(net)->timeouts;
#endif
break;
case 255:
@@ -516,7 +507,6 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
/* this avoids a loop in nfnetlink. */
return ret == -EAGAIN ? -ENOBUFS : ret;
err:
- nf_ct_l4proto_put(l4proto);
return err;
}
@@ -597,7 +587,6 @@ static void __net_exit cttimeout_net_exit(struct net *net)
list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) {
list_del_rcu(&cur->head);
- nf_ct_l4proto_put(cur->timeout.l4proto);
if (refcount_dec_and_test(&cur->refcnt))
kfree_rcu(cur, rcu_head);
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 6f41dd74729d..1f1d90c1716b 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
int ttl_check,
struct nf_osf_hdr_ctx *ctx)
{
+ const __u8 *optpinit = ctx->optp;
unsigned int check_WSS = 0;
int fmatch = FMATCH_WRONG;
int foptsize, optnum;
@@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
}
}
+ if (fmatch != FMATCH_OK)
+ ctx->optp = optpinit;
+
return fmatch == FMATCH_OK;
}
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index fff8073e2a56..2c75b9e0474e 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -25,9 +25,8 @@ struct nft_bitwise {
struct nft_data xor;
};
-static void nft_bitwise_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_bitwise_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_bitwise *priv = nft_expr_priv(expr);
const u32 *src = &regs->data[priv->sreg];
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index 13d4e421a6b3..19dbc34cc75e 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -26,9 +26,9 @@ struct nft_byteorder {
u8 size;
};
-static void nft_byteorder_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_byteorder_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_byteorder *priv = nft_expr_priv(expr);
u32 *src = &regs->data[priv->sreg];
diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
new file mode 100644
index 000000000000..ee4852088d50
--- /dev/null
+++ b/net/netfilter/nft_chain_nat.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static unsigned int nft_nat_do_chain(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nft_pktinfo pkt;
+
+ nft_set_pktinfo(&pkt, skb, state);
+
+ switch (state->pf) {
+#ifdef CONFIG_NF_TABLES_IPV4
+ case NFPROTO_IPV4:
+ nft_set_pktinfo_ipv4(&pkt, skb);
+ break;
+#endif
+#ifdef CONFIG_NF_TABLES_IPV6
+ case NFPROTO_IPV6:
+ nft_set_pktinfo_ipv6(&pkt, skb);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ return nft_do_chain(&pkt, priv);
+}
+
+#ifdef CONFIG_NF_TABLES_IPV4
+static const struct nft_chain_type nft_chain_nat_ipv4 = {
+ .name = "nat",
+ .type = NFT_CHAIN_T_NAT,
+ .family = NFPROTO_IPV4,
+ .owner = THIS_MODULE,
+ .hook_mask = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_LOCAL_IN),
+ .hooks = {
+ [NF_INET_PRE_ROUTING] = nft_nat_do_chain,
+ [NF_INET_POST_ROUTING] = nft_nat_do_chain,
+ [NF_INET_LOCAL_OUT] = nft_nat_do_chain,
+ [NF_INET_LOCAL_IN] = nft_nat_do_chain,
+ },
+ .ops_register = nf_nat_ipv4_register_fn,
+ .ops_unregister = nf_nat_ipv4_unregister_fn,
+};
+#endif
+
+#ifdef CONFIG_NF_TABLES_IPV6
+static const struct nft_chain_type nft_chain_nat_ipv6 = {
+ .name = "nat",
+ .type = NFT_CHAIN_T_NAT,
+ .family = NFPROTO_IPV6,
+ .owner = THIS_MODULE,
+ .hook_mask = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_POST_ROUTING) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_LOCAL_IN),
+ .hooks = {
+ [NF_INET_PRE_ROUTING] = nft_nat_do_chain,
+ [NF_INET_POST_ROUTING] = nft_nat_do_chain,
+ [NF_INET_LOCAL_OUT] = nft_nat_do_chain,
+ [NF_INET_LOCAL_IN] = nft_nat_do_chain,
+ },
+ .ops_register = nf_nat_ipv6_register_fn,
+ .ops_unregister = nf_nat_ipv6_unregister_fn,
+};
+#endif
+
+static int __init nft_chain_nat_init(void)
+{
+#ifdef CONFIG_NF_TABLES_IPV6
+ nft_register_chain_type(&nft_chain_nat_ipv6);
+#endif
+#ifdef CONFIG_NF_TABLES_IPV4
+ nft_register_chain_type(&nft_chain_nat_ipv4);
+#endif
+
+ return 0;
+}
+
+static void __exit nft_chain_nat_exit(void)
+{
+#ifdef CONFIG_NF_TABLES_IPV4
+ nft_unregister_chain_type(&nft_chain_nat_ipv4);
+#endif
+#ifdef CONFIG_NF_TABLES_IPV6
+ nft_unregister_chain_type(&nft_chain_nat_ipv6);
+#endif
+}
+
+module_init(nft_chain_nat_init);
+module_exit(nft_chain_nat_exit);
+
+MODULE_LICENSE("GPL");
+#ifdef CONFIG_NF_TABLES_IPV4
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
+#endif
+#ifdef CONFIG_NF_TABLES_IPV6
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
+#endif
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 79d48c1d06f4..f9f1fa66a16e 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -24,9 +24,9 @@ struct nft_cmp_expr {
enum nft_cmp_ops op:8;
};
-static void nft_cmp_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_cmp_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
int d;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 7334e0b80a5e..469f9da5073b 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -23,19 +23,6 @@
#include <linux/netfilter_arp/arp_tables.h>
#include <net/netfilter/nf_tables.h>
-struct nft_xt {
- struct list_head head;
- struct nft_expr_ops ops;
- unsigned int refcnt;
-
- /* Unlike other expressions, ops doesn't have static storage duration.
- * nft core assumes they do. We use kfree_rcu so that nft core can
- * can check expr->ops->size even after nft_compat->destroy() frees
- * the nft_xt struct that holds the ops structure.
- */
- struct rcu_head rcu_head;
-};
-
/* Used for matches where *info is larger than X byte */
#define NFT_MATCH_LARGE_THRESH 192
@@ -43,17 +30,6 @@ struct nft_xt_match_priv {
void *info;
};
-static bool nft_xt_put(struct nft_xt *xt)
-{
- if (--xt->refcnt == 0) {
- list_del(&xt->head);
- kfree_rcu(xt, rcu_head);
- return true;
- }
-
- return false;
-}
-
static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
const char *tablename)
{
@@ -248,7 +224,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct xt_target *target = expr->ops->data;
struct xt_tgchk_param par;
size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
- struct nft_xt *nft_xt;
u16 proto = 0;
bool inv = false;
union nft_entry e = {};
@@ -272,8 +247,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
if (!target->target)
return -EINVAL;
- nft_xt = container_of(expr->ops, struct nft_xt, ops);
- nft_xt->refcnt++;
return 0;
}
@@ -282,6 +255,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
struct xt_target *target = expr->ops->data;
void *info = nft_expr_priv(expr);
+ struct module *me = target->me;
struct xt_tgdtor_param par;
par.net = ctx->net;
@@ -291,8 +265,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
if (par.target->destroy != NULL)
par.target->destroy(&par);
- if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
- module_put(target->me);
+ module_put(me);
+ kfree(expr->ops);
}
static int nft_extension_dump_info(struct sk_buff *skb, int attr,
@@ -465,7 +439,6 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct xt_match *match = expr->ops->data;
struct xt_mtchk_param par;
size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
- struct nft_xt *nft_xt;
u16 proto = 0;
bool inv = false;
union nft_entry e = {};
@@ -481,13 +454,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
- ret = xt_check_match(&par, size, proto, inv);
- if (ret < 0)
- return ret;
-
- nft_xt = container_of(expr->ops, struct nft_xt, ops);
- nft_xt->refcnt++;
- return 0;
+ return xt_check_match(&par, size, proto, inv);
}
static int
@@ -530,8 +497,8 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
if (par.match->destroy != NULL)
par.match->destroy(&par);
- if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
- module_put(me);
+ module_put(me);
+ kfree(expr->ops);
}
static void
@@ -734,22 +701,13 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
.cb = nfnl_nft_compat_cb,
};
-static LIST_HEAD(nft_match_list);
-
static struct nft_expr_type nft_match_type;
-static bool nft_match_cmp(const struct xt_match *match,
- const char *name, u32 rev, u32 family)
-{
- return strcmp(match->name, name) == 0 && match->revision == rev &&
- (match->family == NFPROTO_UNSPEC || match->family == family);
-}
-
static const struct nft_expr_ops *
nft_match_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
- struct nft_xt *nft_match;
+ struct nft_expr_ops *ops;
struct xt_match *match;
unsigned int matchsize;
char *mt_name;
@@ -765,14 +723,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
family = ctx->family;
- /* Re-use the existing match if it's already loaded. */
- list_for_each_entry(nft_match, &nft_match_list, head) {
- struct xt_match *match = nft_match->ops.data;
-
- if (nft_match_cmp(match, mt_name, rev, family))
- return &nft_match->ops;
- }
-
match = xt_request_find_match(family, mt_name, rev);
if (IS_ERR(match))
return ERR_PTR(-ENOENT);
@@ -782,66 +732,62 @@ nft_match_select_ops(const struct nft_ctx *ctx,
goto err;
}
- /* This is the first time we use this match, allocate operations */
- nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
- if (nft_match == NULL) {
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ if (!ops) {
err = -ENOMEM;
goto err;
}
- nft_match->refcnt = 0;
- nft_match->ops.type = &nft_match_type;
- nft_match->ops.eval = nft_match_eval;
- nft_match->ops.init = nft_match_init;
- nft_match->ops.destroy = nft_match_destroy;
- nft_match->ops.dump = nft_match_dump;
- nft_match->ops.validate = nft_match_validate;
- nft_match->ops.data = match;
+ ops->type = &nft_match_type;
+ ops->eval = nft_match_eval;
+ ops->init = nft_match_init;
+ ops->destroy = nft_match_destroy;
+ ops->dump = nft_match_dump;
+ ops->validate = nft_match_validate;
+ ops->data = match;
matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
if (matchsize > NFT_MATCH_LARGE_THRESH) {
matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
- nft_match->ops.eval = nft_match_large_eval;
- nft_match->ops.init = nft_match_large_init;
- nft_match->ops.destroy = nft_match_large_destroy;
- nft_match->ops.dump = nft_match_large_dump;
+ ops->eval = nft_match_large_eval;
+ ops->init = nft_match_large_init;
+ ops->destroy = nft_match_large_destroy;
+ ops->dump = nft_match_large_dump;
}
- nft_match->ops.size = matchsize;
-
- list_add(&nft_match->head, &nft_match_list);
+ ops->size = matchsize;
- return &nft_match->ops;
+ return ops;
err:
module_put(match->me);
return ERR_PTR(err);
}
+static void nft_match_release_ops(const struct nft_expr_ops *ops)
+{
+ struct xt_match *match = ops->data;
+
+ module_put(match->me);
+ kfree(ops);
+}
+
static struct nft_expr_type nft_match_type __read_mostly = {
.name = "match",
.select_ops = nft_match_select_ops,
+ .release_ops = nft_match_release_ops,
.policy = nft_match_policy,
.maxattr = NFTA_MATCH_MAX,
.owner = THIS_MODULE,
};
-static LIST_HEAD(nft_target_list);
-
static struct nft_expr_type nft_target_type;
-static bool nft_target_cmp(const struct xt_target *tg,
- const char *name, u32 rev, u32 family)
-{
- return strcmp(tg->name, name) == 0 && tg->revision == rev &&
- (tg->family == NFPROTO_UNSPEC || tg->family == family);
-}
-
static const struct nft_expr_ops *
nft_target_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
- struct nft_xt *nft_target;
+ struct nft_expr_ops *ops;
struct xt_target *target;
char *tg_name;
u32 rev, family;
@@ -861,17 +807,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
strcmp(tg_name, "standard") == 0)
return ERR_PTR(-EINVAL);
- /* Re-use the existing target if it's already loaded. */
- list_for_each_entry(nft_target, &nft_target_list, head) {
- struct xt_target *target = nft_target->ops.data;
-
- if (!target->target)
- continue;
-
- if (nft_target_cmp(target, tg_name, rev, family))
- return &nft_target->ops;
- }
-
target = xt_request_find_target(family, tg_name, rev);
if (IS_ERR(target))
return ERR_PTR(-ENOENT);
@@ -886,38 +821,43 @@ nft_target_select_ops(const struct nft_ctx *ctx,
goto err;
}
- /* This is the first time we use this target, allocate operations */
- nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
- if (nft_target == NULL) {
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ if (!ops) {
err = -ENOMEM;
goto err;
}
- nft_target->refcnt = 0;
- nft_target->ops.type = &nft_target_type;
- nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
- nft_target->ops.init = nft_target_init;
- nft_target->ops.destroy = nft_target_destroy;
- nft_target->ops.dump = nft_target_dump;
- nft_target->ops.validate = nft_target_validate;
- nft_target->ops.data = target;
+ ops->type = &nft_target_type;
+ ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
+ ops->init = nft_target_init;
+ ops->destroy = nft_target_destroy;
+ ops->dump = nft_target_dump;
+ ops->validate = nft_target_validate;
+ ops->data = target;
if (family == NFPROTO_BRIDGE)
- nft_target->ops.eval = nft_target_eval_bridge;
+ ops->eval = nft_target_eval_bridge;
else
- nft_target->ops.eval = nft_target_eval_xt;
-
- list_add(&nft_target->head, &nft_target_list);
+ ops->eval = nft_target_eval_xt;
- return &nft_target->ops;
+ return ops;
err:
module_put(target->me);
return ERR_PTR(err);
}
+static void nft_target_release_ops(const struct nft_expr_ops *ops)
+{
+ struct xt_target *target = ops->data;
+
+ module_put(target->me);
+ kfree(ops);
+}
+
static struct nft_expr_type nft_target_type __read_mostly = {
.name = "target",
.select_ops = nft_target_select_ops,
+ .release_ops = nft_target_release_ops,
.policy = nft_target_policy,
.maxattr = NFTA_TARGET_MAX,
.owner = THIS_MODULE,
@@ -942,7 +882,6 @@ static int __init nft_compat_module_init(void)
}
return ret;
-
err_target:
nft_unregister_expr(&nft_target_type);
err_match:
@@ -952,32 +891,6 @@ err_match:
static void __exit nft_compat_module_exit(void)
{
- struct nft_xt *xt, *next;
-
- /* list should be empty here, it can be non-empty only in case there
- * was an error that caused nft_xt expr to not be initialized fully
- * and noone else requested the same expression later.
- *
- * In this case, the lists contain 0-refcount entries that still
- * hold module reference.
- */
- list_for_each_entry_safe(xt, next, &nft_target_list, head) {
- struct xt_target *target = xt->ops.data;
-
- if (WARN_ON_ONCE(xt->refcnt))
- continue;
- module_put(target->me);
- kfree(xt);
- }
-
- list_for_each_entry_safe(xt, next, &nft_match_list, head) {
- struct xt_match *match = xt->ops.data;
-
- if (WARN_ON_ONCE(xt->refcnt))
- continue;
- module_put(match->me);
- kfree(xt);
- }
nfnetlink_subsys_unregister(&nfnl_compat_subsys);
nft_unregister_expr(&nft_target_type);
nft_unregister_expr(&nft_match_type);
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index a61d7edfc290..1a6b06ce6b5b 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -104,7 +104,7 @@ static void nft_counter_obj_destroy(const struct nft_ctx *ctx,
nft_counter_do_destroy(priv);
}
-static void nft_counter_reset(struct nft_counter_percpu_priv __percpu *priv,
+static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
struct nft_counter *total)
{
struct nft_counter *this_cpu;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 586627c361df..7b717fad6cdc 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -870,7 +870,7 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
priv->l4proto = l4num;
- l4proto = nf_ct_l4proto_find_get(l4num);
+ l4proto = nf_ct_l4proto_find(l4num);
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
@@ -902,7 +902,6 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
err_free_timeout:
kfree(timeout);
err_proto_put:
- nf_ct_l4proto_put(l4proto);
return ret;
}
@@ -913,7 +912,6 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
struct nf_ct_timeout *timeout = priv->timeout;
nf_ct_untimeout(ctx->net, timeout);
- nf_ct_l4proto_put(timeout->l4proto);
nf_ct_netns_put(ctx->net, ctx->family);
kfree(priv->timeout);
}
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 07d4efd3d851..a8a74a16f9c4 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -62,9 +62,8 @@ err1:
return NULL;
}
-static void nft_dynset_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_dynset_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
struct nft_set *set = priv->set;
@@ -235,20 +234,17 @@ err1:
return err;
}
-static void nft_dynset_activate(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
-{
- struct nft_dynset *priv = nft_expr_priv(expr);
-
- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
-}
-
static void nft_dynset_deactivate(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase)
{
struct nft_dynset *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+ if (phase == NFT_TRANS_PREPARE)
+ return;
+
+ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
+ phase == NFT_TRANS_COMMIT);
}
static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -296,7 +292,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
.eval = nft_dynset_eval,
.init = nft_dynset_init,
.destroy = nft_dynset_destroy,
- .activate = nft_dynset_activate,
.deactivate = nft_dynset_deactivate,
.dump = nft_dynset_dump,
};
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 974525eb92df..6e6b9adf7d38 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -12,6 +12,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack_helper.h>
struct nft_flow_offload {
struct nft_flowtable *flowtable;
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
memset(&fl, 0, sizeof(fl));
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
break;
case NFPROTO_IPV6:
- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
break;
}
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
return -ENOENT;
route->tuple[dir].dst = this_dst;
- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
route->tuple[!dir].dst = other_dst;
- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
return 0;
}
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
+ const struct nf_conn_help *help;
enum ip_conntrack_info ctinfo;
struct nf_flow_route route;
struct flow_offload *flow;
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
goto out;
}
- if (test_bit(IPS_HELPER_BIT, &ct->status))
+ help = nfct_help(ct);
+ if (help)
goto out;
if (ctinfo == IP_CT_NEW ||
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c2d237144f74..ea658e6c53e3 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -25,7 +25,6 @@ struct nft_jhash {
u32 modulus;
u32 seed;
u32 offset;
- struct nft_set *map;
};
static void nft_jhash_eval(const struct nft_expr *expr,
@@ -42,33 +41,10 @@ static void nft_jhash_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = h + priv->offset;
}
-static void nft_jhash_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_jhash *priv = nft_expr_priv(expr);
- const void *data = &regs->data[priv->sreg];
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = reciprocal_scale(jhash(data, priv->len, priv->seed),
- priv->modulus) + priv->offset;
-
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
struct nft_symhash {
enum nft_registers dreg:8;
u32 modulus;
u32 offset;
- struct nft_set *map;
};
static void nft_symhash_eval(const struct nft_expr *expr,
@@ -84,28 +60,6 @@ static void nft_symhash_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = h + priv->offset;
}
-static void nft_symhash_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_symhash *priv = nft_expr_priv(expr);
- struct sk_buff *skb = pkt->skb;
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = reciprocal_scale(__skb_get_hash_symmetric(skb),
- priv->modulus) + priv->offset;
-
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_SREG] = { .type = NLA_U32 },
[NFTA_HASH_DREG] = { .type = NLA_U32 },
@@ -114,9 +68,6 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_SEED] = { .type = NLA_U32 },
[NFTA_HASH_OFFSET] = { .type = NLA_U32 },
[NFTA_HASH_TYPE] = { .type = NLA_U32 },
- [NFTA_HASH_SET_NAME] = { .type = NLA_STRING,
- .len = NFT_SET_MAXNAMELEN - 1 },
- [NFTA_HASH_SET_ID] = { .type = NLA_U32 },
};
static int nft_jhash_init(const struct nft_ctx *ctx,
@@ -166,20 +117,6 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_jhash_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_jhash *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_jhash_init(ctx, expr, tb);
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_HASH_SET_NAME],
- tb[NFTA_HASH_SET_ID], genmask);
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_symhash_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -206,20 +143,6 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_symhash_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_jhash *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_symhash_init(ctx, expr, tb);
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_HASH_SET_NAME],
- tb[NFTA_HASH_SET_ID], genmask);
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_jhash_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
@@ -247,18 +170,6 @@ nla_put_failure:
return -1;
}
-static int nft_jhash_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_jhash *priv = nft_expr_priv(expr);
-
- if (nft_jhash_dump(skb, expr) ||
- nla_put_string(skb, NFTA_HASH_SET_NAME, priv->map->name))
- return -1;
-
- return 0;
-}
-
static int nft_symhash_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
@@ -279,18 +190,6 @@ nla_put_failure:
return -1;
}
-static int nft_symhash_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_symhash *priv = nft_expr_priv(expr);
-
- if (nft_symhash_dump(skb, expr) ||
- nla_put_string(skb, NFTA_HASH_SET_NAME, priv->map->name))
- return -1;
-
- return 0;
-}
-
static struct nft_expr_type nft_hash_type;
static const struct nft_expr_ops nft_jhash_ops = {
.type = &nft_hash_type,
@@ -300,14 +199,6 @@ static const struct nft_expr_ops nft_jhash_ops = {
.dump = nft_jhash_dump,
};
-static const struct nft_expr_ops nft_jhash_map_ops = {
- .type = &nft_hash_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_jhash)),
- .eval = nft_jhash_map_eval,
- .init = nft_jhash_map_init,
- .dump = nft_jhash_map_dump,
-};
-
static const struct nft_expr_ops nft_symhash_ops = {
.type = &nft_hash_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)),
@@ -316,14 +207,6 @@ static const struct nft_expr_ops nft_symhash_ops = {
.dump = nft_symhash_dump,
};
-static const struct nft_expr_ops nft_symhash_map_ops = {
- .type = &nft_hash_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)),
- .eval = nft_symhash_map_eval,
- .init = nft_symhash_map_init,
- .dump = nft_symhash_map_dump,
-};
-
static const struct nft_expr_ops *
nft_hash_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
@@ -336,12 +219,8 @@ nft_hash_select_ops(const struct nft_ctx *ctx,
type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE]));
switch (type) {
case NFT_HASH_SYM:
- if (tb[NFTA_HASH_SET_NAME])
- return &nft_symhash_map_ops;
return &nft_symhash_ops;
case NFT_HASH_JENKINS:
- if (tb[NFTA_HASH_SET_NAME])
- return &nft_jhash_map_ops;
return &nft_jhash_ops;
default:
break;
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 0777a93211e2..5ec43124cbca 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -17,9 +17,9 @@
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
-static void nft_immediate_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_immediate_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
}
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+ if (phase == NFT_TRANS_COMMIT)
+ return;
+
return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
}
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 227b2b15a19c..14496da5141d 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
return 0;
}
-static void nft_lookup_activate(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
-{
- struct nft_lookup *priv = nft_expr_priv(expr);
-
- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
-}
-
static void nft_lookup_deactivate(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase)
{
struct nft_lookup *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+ if (phase == NFT_TRANS_PREPARE)
+ return;
+
+ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
+ phase == NFT_TRANS_COMMIT);
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
.eval = nft_lookup_eval,
.init = nft_lookup_init,
- .activate = nft_lookup_activate,
.deactivate = nft_lookup_deactivate,
.destroy = nft_lookup_destroy,
.dump = nft_lookup_dump,
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 9d8655bc1bea..bee156eaa400 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -14,18 +14,24 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nft_masq.h>
+#include <net/netfilter/ipv4/nf_nat_masquerade.h>
+#include <net/netfilter/ipv6/nf_nat_masquerade.h>
-const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
+struct nft_masq {
+ u32 flags;
+ enum nft_registers sreg_proto_min:8;
+ enum nft_registers sreg_proto_max:8;
+};
+
+static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
[NFTA_MASQ_FLAGS] = { .type = NLA_U32 },
[NFTA_MASQ_REG_PROTO_MIN] = { .type = NLA_U32 },
[NFTA_MASQ_REG_PROTO_MAX] = { .type = NLA_U32 },
};
-EXPORT_SYMBOL_GPL(nft_masq_policy);
-int nft_masq_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+static int nft_masq_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
{
int err;
@@ -36,11 +42,10 @@ int nft_masq_validate(const struct nft_ctx *ctx,
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_POST_ROUTING));
}
-EXPORT_SYMBOL_GPL(nft_masq_validate);
-int nft_masq_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_masq_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
{
u32 plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
struct nft_masq *priv = nft_expr_priv(expr);
@@ -75,9 +80,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
return nf_ct_netns_get(ctx->net, ctx->family);
}
-EXPORT_SYMBOL_GPL(nft_masq_init);
-int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_masq *priv = nft_expr_priv(expr);
@@ -98,7 +102,157 @@ int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr)
nla_put_failure:
return -1;
}
-EXPORT_SYMBOL_GPL(nft_masq_dump);
+
+static void nft_masq_ipv4_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_masq *priv = nft_expr_priv(expr);
+ struct nf_nat_range2 range;
+
+ memset(&range, 0, sizeof(range));
+ range.flags = priv->flags;
+ if (priv->sreg_proto_min) {
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
+ }
+ regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
+ &range, nft_out(pkt));
+}
+
+static void
+nft_masq_ipv4_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+{
+ nf_ct_netns_put(ctx->net, NFPROTO_IPV4);
+}
+
+static struct nft_expr_type nft_masq_ipv4_type;
+static const struct nft_expr_ops nft_masq_ipv4_ops = {
+ .type = &nft_masq_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
+ .eval = nft_masq_ipv4_eval,
+ .init = nft_masq_init,
+ .destroy = nft_masq_ipv4_destroy,
+ .dump = nft_masq_dump,
+ .validate = nft_masq_validate,
+};
+
+static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "masq",
+ .ops = &nft_masq_ipv4_ops,
+ .policy = nft_masq_policy,
+ .maxattr = NFTA_MASQ_MAX,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_NF_TABLES_IPV6
+static void nft_masq_ipv6_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_masq *priv = nft_expr_priv(expr);
+ struct nf_nat_range2 range;
+
+ memset(&range, 0, sizeof(range));
+ range.flags = priv->flags;
+ if (priv->sreg_proto_min) {
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
+ }
+ regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
+ nft_out(pkt));
+}
+
+static void
+nft_masq_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+{
+ nf_ct_netns_put(ctx->net, NFPROTO_IPV6);
+}
+
+static struct nft_expr_type nft_masq_ipv6_type;
+static const struct nft_expr_ops nft_masq_ipv6_ops = {
+ .type = &nft_masq_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
+ .eval = nft_masq_ipv6_eval,
+ .init = nft_masq_init,
+ .destroy = nft_masq_ipv6_destroy,
+ .dump = nft_masq_dump,
+ .validate = nft_masq_validate,
+};
+
+static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "masq",
+ .ops = &nft_masq_ipv6_ops,
+ .policy = nft_masq_policy,
+ .maxattr = NFTA_MASQ_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_masq_module_init_ipv6(void)
+{
+ int ret = nft_register_expr(&nft_masq_ipv6_type);
+
+ if (ret)
+ return ret;
+
+ ret = nf_nat_masquerade_ipv6_register_notifier();
+ if (ret < 0)
+ nft_unregister_expr(&nft_masq_ipv6_type);
+
+ return ret;
+}
+
+static void nft_masq_module_exit_ipv6(void)
+{
+ nft_unregister_expr(&nft_masq_ipv6_type);
+ nf_nat_masquerade_ipv6_unregister_notifier();
+}
+#else
+static inline int nft_masq_module_init_ipv6(void) { return 0; }
+static inline void nft_masq_module_exit_ipv6(void) {}
+#endif
+
+static int __init nft_masq_module_init(void)
+{
+ int ret;
+
+ ret = nft_masq_module_init_ipv6();
+ if (ret < 0)
+ return ret;
+
+ ret = nft_register_expr(&nft_masq_ipv4_type);
+ if (ret < 0) {
+ nft_masq_module_exit_ipv6();
+ return ret;
+ }
+
+ ret = nf_nat_masquerade_ipv4_register_notifier();
+ if (ret < 0) {
+ nft_masq_module_exit_ipv6();
+ nft_unregister_expr(&nft_masq_ipv4_type);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit nft_masq_module_exit(void)
+{
+ nft_masq_module_exit_ipv6();
+ nft_unregister_expr(&nft_masq_ipv4_type);
+ nf_nat_masquerade_ipv4_unregister_notifier();
+}
+
+module_init(nft_masq_module_init);
+module_exit(nft_masq_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "masq");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "masq");
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 6df486c5ebd3..987d2d6ce624 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -244,6 +244,16 @@ void nft_meta_get_eval(const struct nft_expr *expr,
strncpy((char *)dest, p->br->dev->name, IFNAMSIZ);
return;
#endif
+ case NFT_META_IIFKIND:
+ if (in == NULL || in->rtnl_link_ops == NULL)
+ goto err;
+ strncpy((char *)dest, in->rtnl_link_ops->kind, IFNAMSIZ);
+ break;
+ case NFT_META_OIFKIND:
+ if (out == NULL || out->rtnl_link_ops == NULL)
+ goto err;
+ strncpy((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ);
+ break;
default:
WARN_ON(1);
goto err;
@@ -340,6 +350,8 @@ static int nft_meta_get_init(const struct nft_ctx *ctx,
break;
case NFT_META_IIFNAME:
case NFT_META_OIFNAME:
+ case NFT_META_IIFKIND:
+ case NFT_META_OIFKIND:
len = IFNAMSIZ;
break;
case NFT_META_PRANDOM:
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index c15807d10b91..e93aed9bda88 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -21,9 +21,7 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nf_nat_l3proto.h>
#include <net/ip.h>
struct nft_nat {
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index a3185ca2a3a9..79ef074c18ca 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -38,7 +38,8 @@ static int nft_objref_init(const struct nft_ctx *ctx,
return -EINVAL;
objtype = ntohl(nla_get_be32(tb[NFTA_OBJREF_IMM_TYPE]));
- obj = nft_obj_lookup(ctx->table, tb[NFTA_OBJREF_IMM_NAME], objtype,
+ obj = nft_obj_lookup(ctx->net, ctx->table,
+ tb[NFTA_OBJREF_IMM_NAME], objtype,
genmask);
if (IS_ERR(obj))
return -ENOENT;
@@ -53,7 +54,7 @@ static int nft_objref_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_object *obj = nft_objref_priv(expr);
- if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->name) ||
+ if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->key.name) ||
nla_put_be32(skb, NFTA_OBJREF_IMM_TYPE,
htonl(obj->ops->type->type)))
goto nla_put_failure;
@@ -155,20 +156,17 @@ nla_put_failure:
return -1;
}
-static void nft_objref_map_activate(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
-{
- struct nft_objref_map *priv = nft_expr_priv(expr);
-
- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
-}
-
static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+ if (phase == NFT_TRANS_PREPARE)
+ return;
+
+ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
+ phase == NFT_TRANS_COMMIT);
}
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -185,7 +183,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
.eval = nft_objref_map_eval,
.init = nft_objref_map_init,
- .activate = nft_objref_map_activate,
.deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index e110b0ebbf58..54e15de4b79a 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -70,9 +70,9 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
}
-static void nft_payload_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_payload_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_payload *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index 0ed124a93fcf..354cde67bca9 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -61,7 +61,7 @@ static void nft_quota_obj_eval(struct nft_object *obj,
if (overquota &&
!test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
- nft_obj_notify(nft_net(pkt), obj->table, obj, 0, 0,
+ nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0,
NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC);
}
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index cedb96c3619f..529ac8acb19d 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -23,9 +23,8 @@ struct nft_range_expr {
enum nft_range_ops op:8;
};
-static void nft_range_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_range_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
int d1, d2;
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index c64cbe78dee7..f8092926f704 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -13,19 +13,24 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_redirect.h>
#include <net/netfilter/nf_tables.h>
-#include <net/netfilter/nft_redir.h>
-const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
+struct nft_redir {
+ enum nft_registers sreg_proto_min:8;
+ enum nft_registers sreg_proto_max:8;
+ u16 flags;
+};
+
+static const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
[NFTA_REDIR_REG_PROTO_MIN] = { .type = NLA_U32 },
[NFTA_REDIR_REG_PROTO_MAX] = { .type = NLA_U32 },
[NFTA_REDIR_FLAGS] = { .type = NLA_U32 },
};
-EXPORT_SYMBOL_GPL(nft_redir_policy);
-int nft_redir_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data)
+static int nft_redir_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
{
int err;
@@ -37,11 +42,10 @@ int nft_redir_validate(const struct nft_ctx *ctx,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT));
}
-EXPORT_SYMBOL_GPL(nft_redir_validate);
-int nft_redir_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_redir_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
{
struct nft_redir *priv = nft_expr_priv(expr);
unsigned int plen;
@@ -77,7 +81,6 @@ int nft_redir_init(const struct nft_ctx *ctx,
return nf_ct_netns_get(ctx->net, ctx->family);
}
-EXPORT_SYMBOL_GPL(nft_redir_init);
int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
@@ -101,7 +104,134 @@ int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr)
nla_put_failure:
return -1;
}
-EXPORT_SYMBOL_GPL(nft_redir_dump);
+
+static void nft_redir_ipv4_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_redir *priv = nft_expr_priv(expr);
+ struct nf_nat_ipv4_multi_range_compat mr;
+
+ memset(&mr, 0, sizeof(mr));
+ if (priv->sreg_proto_min) {
+ mr.range[0].min.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ mr.range[0].max.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
+ mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+
+ mr.range[0].flags |= priv->flags;
+
+ regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, nft_hook(pkt));
+}
+
+static void
+nft_redir_ipv4_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+{
+ nf_ct_netns_put(ctx->net, NFPROTO_IPV4);
+}
+
+static struct nft_expr_type nft_redir_ipv4_type;
+static const struct nft_expr_ops nft_redir_ipv4_ops = {
+ .type = &nft_redir_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+ .eval = nft_redir_ipv4_eval,
+ .init = nft_redir_init,
+ .destroy = nft_redir_ipv4_destroy,
+ .dump = nft_redir_dump,
+ .validate = nft_redir_validate,
+};
+
+static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "redir",
+ .ops = &nft_redir_ipv4_ops,
+ .policy = nft_redir_policy,
+ .maxattr = NFTA_REDIR_MAX,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_NF_TABLES_IPV6
+static void nft_redir_ipv6_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_redir *priv = nft_expr_priv(expr);
+ struct nf_nat_range2 range;
+
+ memset(&range, 0, sizeof(range));
+ if (priv->sreg_proto_min) {
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg_proto_max]);
+ range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+
+ range.flags |= priv->flags;
+
+ regs->verdict.code =
+ nf_nat_redirect_ipv6(pkt->skb, &range, nft_hook(pkt));
+}
+
+static void
+nft_redir_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+{
+ nf_ct_netns_put(ctx->net, NFPROTO_IPV6);
+}
+
+static struct nft_expr_type nft_redir_ipv6_type;
+static const struct nft_expr_ops nft_redir_ipv6_ops = {
+ .type = &nft_redir_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+ .eval = nft_redir_ipv6_eval,
+ .init = nft_redir_init,
+ .destroy = nft_redir_ipv6_destroy,
+ .dump = nft_redir_dump,
+ .validate = nft_redir_validate,
+};
+
+static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "redir",
+ .ops = &nft_redir_ipv6_ops,
+ .policy = nft_redir_policy,
+ .maxattr = NFTA_REDIR_MAX,
+ .owner = THIS_MODULE,
+};
+#endif
+
+static int __init nft_redir_module_init(void)
+{
+ int ret = nft_register_expr(&nft_redir_ipv4_type);
+
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_NF_TABLES_IPV6
+ ret = nft_register_expr(&nft_redir_ipv6_type);
+ if (ret) {
+ nft_unregister_expr(&nft_redir_ipv4_type);
+ return ret;
+ }
+#endif
+
+ return ret;
+}
+
+static void __exit nft_redir_module_exit(void)
+{
+ nft_unregister_expr(&nft_redir_ipv4_type);
+#ifdef CONFIG_NF_TABLES_IPV6
+ nft_unregister_expr(&nft_redir_ipv6_type);
+#endif
+}
+
+module_init(nft_redir_module_init);
+module_exit(nft_redir_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET4, "redir");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index f35fa33913ae..c48daed5c46b 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -53,9 +53,9 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
return mtu - minlen;
}
-static void nft_rt_get_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_rt_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
const struct nft_rt *priv = nft_expr_priv(expr);
const struct sk_buff *skb = pkt->skb;
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 339a9dd1c832..03df08801e28 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -442,15 +442,6 @@ static void *nft_hash_get(const struct net *net, const struct nft_set *set,
return ERR_PTR(-ENOENT);
}
-/* nft_hash_select_ops() makes sure key size can be either 2 or 4 bytes . */
-static inline u32 nft_hash_key(const u32 *key, u32 klen)
-{
- if (klen == 4)
- return *key;
-
- return *(u16 *)key;
-}
-
static bool nft_hash_lookup_fast(const struct net *net,
const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
@@ -460,11 +451,11 @@ static bool nft_hash_lookup_fast(const struct net *net,
const struct nft_hash_elem *he;
u32 hash, k1, k2;
- k1 = nft_hash_key(key, set->klen);
+ k1 = *key;
hash = jhash_1word(k1, priv->seed);
hash = reciprocal_scale(hash, priv->buckets);
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
- k2 = nft_hash_key(nft_set_ext_key(&he->ext)->data, set->klen);
+ k2 = *(u32 *)nft_set_ext_key(&he->ext)->data;
if (k1 == k2 &&
nft_set_elem_active(&he->ext, genmask)) {
*ext = &he->ext;
@@ -474,6 +465,23 @@ static bool nft_hash_lookup_fast(const struct net *net,
return false;
}
+static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
+ const struct nft_set_ext *ext)
+{
+ const struct nft_data *key = nft_set_ext_key(ext);
+ u32 hash, k1;
+
+ if (set->klen == 4) {
+ k1 = *(u32 *)key;
+ hash = jhash_1word(k1, priv->seed);
+ } else {
+ hash = jhash(key, set->klen, priv->seed);
+ }
+ hash = reciprocal_scale(hash, priv->buckets);
+
+ return hash;
+}
+
static int nft_hash_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **ext)
@@ -483,8 +491,7 @@ static int nft_hash_insert(const struct net *net, const struct nft_set *set,
u8 genmask = nft_genmask_next(net);
u32 hash;
- hash = jhash(nft_set_ext_key(&this->ext), set->klen, priv->seed);
- hash = reciprocal_scale(hash, priv->buckets);
+ hash = nft_jhash(set, priv, &this->ext);
hlist_for_each_entry(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&this->ext),
nft_set_ext_key(&he->ext), set->klen) &&
@@ -523,10 +530,9 @@ static void *nft_hash_deactivate(const struct net *net,
u8 genmask = nft_genmask_next(net);
u32 hash;
- hash = jhash(nft_set_ext_key(&this->ext), set->klen, priv->seed);
- hash = reciprocal_scale(hash, priv->buckets);
+ hash = nft_jhash(set, priv, &this->ext);
hlist_for_each_entry(he, &priv->table[hash], node) {
- if (!memcmp(nft_set_ext_key(&this->ext), &elem->key.val,
+ if (!memcmp(nft_set_ext_key(&he->ext), &elem->key.val,
set->klen) &&
nft_set_elem_active(&he->ext, genmask)) {
nft_set_elem_change_active(net, set, &he->ext);
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 3a15f219e4e7..b113fcac94e1 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -15,6 +15,7 @@
struct nft_tunnel {
enum nft_tunnel_keys key:8;
enum nft_registers dreg:8;
+ enum nft_tunnel_mode mode:8;
};
static void nft_tunnel_get_eval(const struct nft_expr *expr,
@@ -29,14 +30,32 @@ static void nft_tunnel_get_eval(const struct nft_expr *expr,
switch (priv->key) {
case NFT_TUNNEL_PATH:
- nft_reg_store8(dest, !!tun_info);
+ if (!tun_info) {
+ nft_reg_store8(dest, false);
+ return;
+ }
+ if (priv->mode == NFT_TUNNEL_MODE_NONE ||
+ (priv->mode == NFT_TUNNEL_MODE_RX &&
+ !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
+ (priv->mode == NFT_TUNNEL_MODE_TX &&
+ (tun_info->mode & IP_TUNNEL_INFO_TX)))
+ nft_reg_store8(dest, true);
+ else
+ nft_reg_store8(dest, false);
break;
case NFT_TUNNEL_ID:
if (!tun_info) {
regs->verdict.code = NFT_BREAK;
return;
}
- *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
+ if (priv->mode == NFT_TUNNEL_MODE_NONE ||
+ (priv->mode == NFT_TUNNEL_MODE_RX &&
+ !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
+ (priv->mode == NFT_TUNNEL_MODE_TX &&
+ (tun_info->mode & IP_TUNNEL_INFO_TX)))
+ *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
+ else
+ regs->verdict.code = NFT_BREAK;
break;
default:
WARN_ON(1);
@@ -47,6 +66,7 @@ static void nft_tunnel_get_eval(const struct nft_expr *expr,
static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
[NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
[NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
+ [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
};
static int nft_tunnel_get_init(const struct nft_ctx *ctx,
@@ -74,6 +94,14 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
+ if (tb[NFTA_TUNNEL_MODE]) {
+ priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
+ if (priv->mode > NFT_TUNNEL_MODE_MAX)
+ return -EOPNOTSUPP;
+ } else {
+ priv->mode = NFT_TUNNEL_MODE_NONE;
+ }
+
return nft_validate_register_store(ctx, priv->dreg, NULL,
NFT_DATA_VALUE, len);
}
@@ -87,6 +115,8 @@ static int nft_tunnel_get_dump(struct sk_buff *skb,
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
goto nla_put_failure;
+ if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -376,6 +406,13 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
return -ENOMEM;
memcpy(&md->u.tun_info, &info, sizeof(info));
+#ifdef CONFIG_DST_CACHE
+ err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
+ if (err < 0) {
+ metadata_dst_free(md);
+ return err;
+ }
+#endif
ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
priv->opts.flags);
priv->md = md;
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index e8da9a9bba73..06dc55590441 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(nf_checksum_partial);
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict, unsigned short family)
{
- const struct nf_ipv6_ops *v6ops;
+ const struct nf_ipv6_ops *v6ops __maybe_unused;
int ret = 0;
switch (family) {
@@ -170,9 +170,7 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
ret = nf_ip_route(net, dst, fl, strict);
break;
case AF_INET6:
- v6ops = rcu_dereference(nf_ipv6_ops);
- if (v6ops)
- ret = v6ops->route(net, dst, fl, strict);
+ ret = nf_ip6_route(net, dst, fl, strict);
break;
}
@@ -180,6 +178,25 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
}
EXPORT_SYMBOL_GPL(nf_route);
+static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
+{
+#ifdef CONFIG_INET
+ const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (!(iph->tos == rt_info->tos &&
+ skb->mark == rt_info->mark &&
+ iph->daddr == rt_info->daddr &&
+ iph->saddr == rt_info->saddr))
+ return ip_route_me_harder(entry->state.net, skb,
+ RTN_UNSPEC);
+ }
+#endif
+ return 0;
+}
+
int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
{
const struct nf_ipv6_ops *v6ops;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index aecadd471e1d..e5e5c64df8d1 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -461,7 +461,7 @@ int xt_check_proc_name(const char *name, unsigned int size)
EXPORT_SYMBOL(xt_check_proc_name);
int xt_check_match(struct xt_mtchk_param *par,
- unsigned int size, u_int8_t proto, bool inv_proto)
+ unsigned int size, u16 proto, bool inv_proto)
{
int ret;
@@ -984,7 +984,7 @@ bool xt_find_jump_offset(const unsigned int *offsets,
EXPORT_SYMBOL(xt_find_jump_offset);
int xt_check_target(struct xt_tgchk_param *par,
- unsigned int size, u_int8_t proto, bool inv_proto)
+ unsigned int size, u16 proto, bool inv_proto)
{
int ret;
@@ -1899,7 +1899,7 @@ static int __init xt_init(void)
seqcount_init(&per_cpu(xt_recseq, i));
}
- xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
+ xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
if (!xt)
return -ENOMEM;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 2c7a4b80206f..0fa863f57575 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -159,7 +159,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
/* Make sure the timeout policy matches any existing protocol tracker,
* otherwise default to generic.
*/
- l4proto = __nf_ct_l4proto_find(proto);
+ l4proto = nf_ct_l4proto_find(proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index eb4cbd244c3d..5f9b37e12801 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -41,19 +41,13 @@
#include <linux/workqueue.h>
#include <linux/sysfs.h>
-struct idletimer_tg_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
-};
-
struct idletimer_tg {
struct list_head entry;
struct timer_list timer;
struct work_struct work;
struct kobject *kobj;
- struct idletimer_tg_attr attr;
+ struct device_attribute attr;
unsigned int refcnt;
};
@@ -76,15 +70,15 @@ struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
return NULL;
}
-static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
+static ssize_t idletimer_tg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct idletimer_tg *timer;
unsigned long expires = 0;
mutex_lock(&list_mutex);
- timer = __idletimer_tg_find_by_label(attr->name);
+ timer = __idletimer_tg_find_by_label(attr->attr.name);
if (timer)
expires = timer->timer.expires;
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 89e281b3bfc2..29987ff03621 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -36,7 +36,6 @@ MODULE_ALIAS("ip6t_addrtype");
static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
const struct in6_addr *addr, u16 mask)
{
- const struct nf_ipv6_ops *v6ops;
struct flowi6 flow;
struct rt6_info *rt;
u32 ret = 0;
@@ -47,18 +46,13 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
if (dev)
flow.flowi6_oif = dev->ifindex;
- v6ops = nf_get_ipv6_ops();
- if (v6ops) {
- if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
- if (v6ops->chk_addr(net, addr, dev, true))
- ret = XT_ADDRTYPE_LOCAL;
- }
- route_err = v6ops->route(net, (struct dst_entry **)&rt,
- flowi6_to_flowi(&flow), false);
- } else {
- route_err = 1;
+ if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
+ if (nf_ipv6_chk_addr(net, addr, dev, true))
+ ret = XT_ADDRTYPE_LOCAL;
}
+ route_err = nf_ip6_route(net, (struct dst_entry **)&rt,
+ flowi6_to_flowi(&flow), false);
if (route_err)
return XT_ADDRTYPE_UNREACHABLE;
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index ac91170fc8c8..61eabd171186 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -14,7 +14,7 @@
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat.h>
static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
{
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 4034d70bff39..b2e39cb6a590 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -96,8 +96,7 @@ match_outdev:
static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
-
- br_netfilter_enable();
+ static bool brnf_probed __read_mostly;
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
@@ -111,6 +110,12 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
return -EINVAL;
}
+
+ if (!brnf_probed) {
+ brnf_probed = true;
+ request_module("br_netfilter");
+ }
+
return 0;
}
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index f44de4bc2100..1664d2ec8b2f 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -337,7 +337,6 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
unsigned int nstamp_mask;
unsigned int i;
int ret = -EINVAL;
- size_t sz;
net_get_random_once(&hash_rnd, sizeof(hash_rnd));
@@ -387,8 +386,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
goto out;
}
- sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size;
- t = kvzalloc(sz, GFP_KERNEL);
+ t = kvzalloc(struct_size(t, iphash, ip_list_hash_size), GFP_KERNEL);
if (t == NULL) {
ret = -ENOMEM;
goto out;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index ea7c67050792..ee3e5b6471a6 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
(state == 0 && (byte & bitmask) == 0))
return bit_spot;
- bit_spot++;
+ if (++bit_spot >= bitmap_len)
+ return -1;
bitmask >>= 1;
if (bitmask == 0) {
byte = bitmap[++byte_offset];
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 3c023d6120f6..f28e937320a3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1371,6 +1371,14 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
}
EXPORT_SYMBOL_GPL(netlink_has_listeners);
+bool netlink_strict_get_check(struct sk_buff *skb)
+{
+ const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
+
+ return nlk->flags & NETLINK_F_STRICT_CHK;
+}
+EXPORT_SYMBOL_GPL(netlink_strict_get_check);
+
static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
@@ -2541,15 +2549,7 @@ struct nl_seq_iter {
static int netlink_walk_start(struct nl_seq_iter *iter)
{
- int err;
-
- err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti,
- GFP_KERNEL);
- if (err) {
- iter->link = MAX_LINKS;
- return err;
- }
-
+ rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
rhashtable_walk_start(&iter->hti);
return 0;
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index cbd51ed5a2d7..908e53ab47a4 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
- mod_timer(&nr->t1timer, jiffies + nr->t1);
+ sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
}
void nr_start_t2timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
- mod_timer(&nr->t2timer, jiffies + nr->t2);
+ sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
}
void nr_start_t4timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
- mod_timer(&nr->t4timer, jiffies + nr->t4);
+ sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
}
void nr_start_idletimer(struct sock *sk)
@@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
struct nr_sock *nr = nr_sk(sk);
if (nr->idle > 0)
- mod_timer(&nr->idletimer, jiffies + nr->idle);
+ sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
}
void nr_start_heartbeat(struct sock *sk)
{
- mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
}
void nr_stop_t1timer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->t1timer);
+ sk_stop_timer(sk, &nr_sk(sk)->t1timer);
}
void nr_stop_t2timer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->t2timer);
+ sk_stop_timer(sk, &nr_sk(sk)->t2timer);
}
void nr_stop_t4timer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->t4timer);
+ sk_stop_timer(sk, &nr_sk(sk)->t4timer);
}
void nr_stop_idletimer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->idletimer);
+ sk_stop_timer(sk, &nr_sk(sk)->idletimer);
}
void nr_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
}
int nr_t1timer_running(struct sock *sk)
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 6a196e438b6c..d1fc019e932e 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
sock->service_name,
sock->service_name_len,
&service_name_tlv_length);
+ if (!service_name_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += service_name_tlv_length;
}
@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index ef4026a23e80..4fa015208aab 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
{
- u8 *gb_cur, *version_tlv, version, version_length;
- u8 *lto_tlv, lto_length;
- u8 *wks_tlv, wks_length;
- u8 *miux_tlv, miux_length;
+ u8 *gb_cur, version, version_length;
+ u8 lto_length, wks_length, miux_length;
+ u8 *version_tlv = NULL, *lto_tlv = NULL,
+ *wks_tlv = NULL, *miux_tlv = NULL;
__be16 wks = cpu_to_be16(local->local_wks);
u8 gb_len = 0;
int ret = 0;
@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
version = LLCP_VERSION_11;
version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
1, &version_length);
+ if (!version_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += version_length;
lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
+ if (!lto_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += lto_length;
pr_debug("Local wks 0x%lx\n", local->local_wks);
wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
+ if (!wks_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += wks_length;
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
&miux_length);
+ if (!miux_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += miux_length;
gb_len += ARRAY_SIZE(llcp_magic);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 89da9512ec1e..ac1cc6e38170 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -8,8 +8,6 @@ config OPENVSWITCH
depends on !NF_CONNTRACK || \
(NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \
(!NF_NAT || NF_NAT) && \
- (!NF_NAT_IPV4 || NF_NAT_IPV4) && \
- (!NF_NAT_IPV6 || NF_NAT_IPV6) && \
(!NETFILTER_CONNCOUNT || NETFILTER_CONNCOUNT)))
select LIBCRC32C
select MPLS
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index cd94f925495a..1b6896896fff 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -29,9 +29,7 @@
#include <net/ipv6_frag.h>
#ifdef CONFIG_NF_NAT_NEEDED
-#include <linux/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat.h>
#endif
#include "datapath.h"
@@ -622,7 +620,7 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
if (natted) {
struct nf_conntrack_tuple inverse;
- if (!nf_ct_invert_tuplepr(&inverse, &tuple)) {
+ if (!nf_ct_invert_tuple(&inverse, &tuple)) {
pr_debug("ovs_ct_find_existing: Inversion failed!\n");
return NULL;
}
@@ -745,14 +743,14 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
- if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+ if (IS_ENABLED(CONFIG_NF_NAT) &&
skb->protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
hooknum))
err = NF_DROP;
goto push;
- } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+ } else if (IS_ENABLED(CONFIG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6)) {
__be16 frag_off;
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
@@ -1673,7 +1671,7 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
}
if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
- if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+ if (IS_ENABLED(CONFIG_NF_NAT) &&
info->family == NFPROTO_IPV4) {
if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
info->range.min_addr.ip) ||
@@ -1682,7 +1680,7 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
(nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
info->range.max_addr.ip))))
return false;
- } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+ } else if (IS_ENABLED(CONFIG_IPV6) &&
info->family == NFPROTO_IPV6) {
if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
&info->range.min_addr.in6) ||
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 57e07768c9d1..f54cf17ef7a8 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -276,10 +276,12 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
if (flags & IP6_FH_F_FRAG) {
- if (frag_off)
+ if (frag_off) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
- else
- key->ip.frag = OVS_FRAG_TYPE_FIRST;
+ key->ip.proto = nexthdr;
+ return 0;
+ }
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
} else {
key->ip.frag = OVS_FRAG_TYPE_NONE;
}
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 435a4bdf8f89..691da853bef5 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
return -EINVAL;
}
- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
attrs |= 1 << type;
a[type] = nla;
}
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index c038e021a591..43849d752a1e 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -206,8 +206,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
return ERR_PTR(-EINVAL);
/* Allocate and set up the meter before locking anything. */
- meter = kzalloc(n_bands * sizeof(struct dp_meter_band) +
- sizeof(*meter), GFP_KERNEL);
+ meter = kzalloc(struct_size(meter, bands, n_bands), GFP_KERNEL);
if (!meter)
return ERR_PTR(-ENOMEM);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index eedacdebcd4c..8376bc1c1508 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1850,6 +1850,15 @@ oom:
return 0;
}
+static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
+{
+ if (!skb->protocol && sock->type == SOCK_RAW) {
+ skb_reset_mac_header(skb);
+ skb->protocol = dev_parse_header_protocol(skb);
+ }
+
+ skb_probe_transport_header(skb);
+}
/*
* Output a raw packet to a device layer. This bypasses all the other
@@ -1970,7 +1979,7 @@ retry:
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
- skb_probe_transport_header(skb, 0);
+ packet_parse_headers(skb, sock);
dev_queue_xmit(skb);
rcu_read_unlock();
@@ -2404,15 +2413,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
sock_wfree(skb);
}
-static void tpacket_set_protocol(const struct net_device *dev,
- struct sk_buff *skb)
-{
- if (dev->type == ARPHRD_ETHER) {
- skb_reset_mac_header(skb);
- skb->protocol = eth_hdr(skb)->h_proto;
- }
-}
-
static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
{
if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -2483,8 +2483,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return err;
if (!dev_validate_header(dev, skb->data, hdrlen))
return -EINVAL;
- if (!skb->protocol)
- tpacket_set_protocol(dev, skb);
data += hdrlen;
to_write -= hdrlen;
@@ -2519,7 +2517,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
len = ((to_write > len_max) ? len_max : to_write);
}
- skb_probe_transport_header(skb, 0);
+ packet_parse_headers(skb, sock);
return tp_len;
}
@@ -2628,7 +2626,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out;
+ goto out_put;
}
err = -ENXIO;
@@ -2828,7 +2826,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out;
+ goto out_unlock;
}
err = -ENXIO;
@@ -2887,7 +2885,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_free;
} else if (reserve) {
skb_reserve(skb, -reserve);
- if (len < reserve)
+ if (len < reserve + sizeof(struct ipv6hdr) &&
+ dev->min_header_len != dev->hard_header_len)
skb_reset_network_header(skb);
}
@@ -2924,7 +2923,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
- skb_probe_transport_header(skb, reserve);
+ packet_parse_headers(skb, sock);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -4291,7 +4290,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9fc76b19cd3c..db3473540303 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
ph->utid = 0;
ph->message_id = id;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = code;
+ ph->error_code = code;
return pn_skb_send(sk, skb, NULL);
}
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
ph->utid = id; /* whatever */
ph->message_id = id;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = code;
+ ph->error_code = code;
return pn_skb_send(sk, skb, NULL);
}
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
struct pnpipehdr *ph;
struct sockaddr_pn dst;
u8 data[4] = {
- oph->data[0], /* PEP type */
+ oph->pep_type, /* PEP type */
code, /* error code, at an unusual offset */
PAD, PAD,
};
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
ph->utid = oph->utid;
ph->message_id = PNS_PEP_CTRL_RESP;
ph->pipe_handle = oph->pipe_handle;
- ph->data[0] = oph->data[1]; /* CTRL id */
+ ph->data0 = oph->data[0]; /* CTRL id */
pn_skb_get_src_sockaddr(oskb, &dst);
return pn_skb_send(sk, skb, &dst);
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
return -EINVAL;
hdr = pnp_hdr(skb);
- if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
+ if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
- (unsigned int)hdr->data[0]);
+ (unsigned int)hdr->pep_type);
return -EOPNOTSUPP;
}
- switch (hdr->data[1]) {
+ switch (hdr->data[0]) {
case PN_PEP_IND_FLOW_CONTROL:
switch (pn->tx_fc) {
case PN_LEGACY_FLOW_CONTROL:
- switch (hdr->data[4]) {
+ switch (hdr->data[3]) {
case PEP_IND_BUSY:
atomic_set(&pn->tx_credits, 0);
break;
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
}
break;
case PN_ONE_CREDIT_FLOW_CONTROL:
- if (hdr->data[4] == PEP_IND_READY)
+ if (hdr->data[3] == PEP_IND_READY)
atomic_set(&pn->tx_credits, wake = 1);
break;
}
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
break;
- atomic_add(wake = hdr->data[4], &pn->tx_credits);
+ atomic_add(wake = hdr->data[3], &pn->tx_credits);
break;
default:
net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
- (unsigned int)hdr->data[1]);
+ (unsigned int)hdr->data[0]);
return -EOPNOTSUPP;
}
if (wake)
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
struct pnpipehdr *hdr = pnp_hdr(skb);
- u8 n_sb = hdr->data[0];
+ u8 n_sb = hdr->data0;
pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
__skb_pull(skb, sizeof(*hdr));
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
return -ECONNREFUSED;
/* Parse sub-blocks */
- n_sb = hdr->data[4];
+ n_sb = hdr->data[3];
while (n_sb > 0) {
u8 type, buf[6], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
ph->utid = 0;
ph->message_id = PNS_PIPE_REMOVE_REQ;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = PAD;
+ ph->data0 = PAD;
return pn_skb_send(sk, skb, NULL);
}
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
peer_type = hdr->other_pep_type << 8;
/* Parse sub-blocks (options) */
- n_sb = hdr->data[4];
+ n_sb = hdr->data[3];
while (n_sb > 0) {
u8 type, buf[1], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
ph->utid = 0;
if (pn->aligned) {
ph->message_id = PNS_PIPE_ALIGNED_DATA;
- ph->data[0] = 0; /* padding */
+ ph->data0 = 0; /* padding */
} else
ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 86e1e37eb4e8..b37e6e0a1026 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -15,6 +15,7 @@
#include <linux/netlink.h>
#include <linux/qrtr.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
+#include <linux/numa.h>
#include <net/sock.h>
@@ -101,7 +102,7 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
return container_of(sk, struct qrtr_sock, sk);
}
-static unsigned int qrtr_local_nid = -1;
+static unsigned int qrtr_local_nid = NUMA_NO_NODE;
/* for node ids */
static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 65387e1e6964..d6cc97fbbbb0 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -254,7 +254,40 @@ static __poll_t rds_poll(struct file *file, struct socket *sock,
static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ struct rds_sock *rs = rds_sk_to_rs(sock->sk);
+ rds_tos_t utos, tos = 0;
+
+ switch (cmd) {
+ case SIOCRDSSETTOS:
+ if (get_user(utos, (rds_tos_t __user *)arg))
+ return -EFAULT;
+
+ if (rs->rs_transport &&
+ rs->rs_transport->get_tos_map)
+ tos = rs->rs_transport->get_tos_map(utos);
+ else
+ return -ENOIOCTLCMD;
+
+ spin_lock_bh(&rds_sock_lock);
+ if (rs->rs_tos || rs->rs_conn) {
+ spin_unlock_bh(&rds_sock_lock);
+ return -EINVAL;
+ }
+ rs->rs_tos = tos;
+ spin_unlock_bh(&rds_sock_lock);
+ break;
+ case SIOCRDSGETTOS:
+ spin_lock_bh(&rds_sock_lock);
+ tos = rs->rs_tos;
+ spin_unlock_bh(&rds_sock_lock);
+ if (put_user(tos, (rds_tos_t __user *)arg))
+ return -EFAULT;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
}
static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
@@ -348,7 +381,7 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval,
}
static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
- int optlen)
+ int optlen, int optname)
{
int val, valbool;
@@ -360,6 +393,9 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
valbool = val ? 1 : 0;
+ if (optname == SO_TIMESTAMP_NEW)
+ sock_set_flag(sk, SOCK_TSTAMP_NEW);
+
if (valbool)
sock_set_flag(sk, SOCK_RCVTSTAMP);
else
@@ -430,9 +466,10 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
ret = rds_set_transport(rs, optval, optlen);
release_sock(sock->sk);
break;
- case SO_TIMESTAMP:
+ case SO_TIMESTAMP_OLD:
+ case SO_TIMESTAMP_NEW:
lock_sock(sock->sk);
- ret = rds_enable_recvtstamp(sock->sk, optval, optlen);
+ ret = rds_enable_recvtstamp(sock->sk, optval, optlen, optname);
release_sock(sock->sk);
break;
case SO_RDS_MSG_RXPATH_LATENCY:
@@ -646,6 +683,8 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
spin_lock_init(&rs->rs_rdma_lock);
rs->rs_rdma_keys = RB_ROOT;
rs->rs_rx_traces = 0;
+ rs->rs_tos = 0;
+ rs->rs_conn = NULL;
spin_lock_bh(&rds_sock_lock);
list_add_tail(&rs->rs_item, &rds_sock_list);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 762d2c6788a3..17c9d9f0c848 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
__rds_create_bind_key(key, addr, port, scope_id);
rcu_read_lock();
rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
- if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
- rds_sock_addref(rs);
- else
+ if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
+ !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
rs = NULL;
+
rcu_read_unlock();
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 3bd2f4a5a30d..7ea134f9a825 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -84,7 +84,7 @@ static struct rds_connection *rds_conn_lookup(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- int dev_if)
+ u8 tos, int dev_if)
{
struct rds_connection *conn, *ret = NULL;
@@ -92,6 +92,7 @@ static struct rds_connection *rds_conn_lookup(struct net *net,
if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
ipv6_addr_equal(&conn->c_laddr, laddr) &&
conn->c_trans == trans &&
+ conn->c_tos == tos &&
net == rds_conn_net(conn) &&
conn->c_dev_if == dev_if) {
ret = conn;
@@ -139,6 +140,7 @@ static void __rds_conn_path_init(struct rds_connection *conn,
atomic_set(&cp->cp_state, RDS_CONN_DOWN);
cp->cp_send_gen = 0;
cp->cp_reconnect_jiffies = 0;
+ cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
@@ -159,7 +161,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- gfp_t gfp,
+ gfp_t gfp, u8 tos,
int is_outgoing,
int dev_if)
{
@@ -171,7 +173,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
rcu_read_lock();
- conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if);
+ conn = rds_conn_lookup(net, head, laddr, faddr, trans, tos, dev_if);
if (conn &&
conn->c_loopback &&
conn->c_trans != &rds_loop_transport &&
@@ -205,6 +207,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
conn->c_faddr = *faddr;
conn->c_dev_if = dev_if;
+ conn->c_tos = tos;
#if IS_ENABLED(CONFIG_IPV6)
/* If the local address is link local, set c_bound_if to be the
@@ -297,7 +300,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
struct rds_connection *found;
found = rds_conn_lookup(net, head, laddr, faddr, trans,
- dev_if);
+ tos, dev_if);
if (found) {
struct rds_conn_path *cp;
int i;
@@ -332,10 +335,10 @@ out:
struct rds_connection *rds_conn_create(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
- struct rds_transport *trans, gfp_t gfp,
- int dev_if)
+ struct rds_transport *trans, u8 tos,
+ gfp_t gfp, int dev_if)
{
- return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 0, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create);
@@ -343,9 +346,9 @@ struct rds_connection *rds_conn_create_outgoing(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- gfp_t gfp, int dev_if)
+ u8 tos, gfp_t gfp, int dev_if)
{
- return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 1, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 9d7b7586f240..2da9b75bad16 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -301,6 +301,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
iinfo->src_addr = conn->c_laddr.s6_addr32[3];
iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
+ iinfo->tos = conn->c_tos;
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
@@ -514,6 +515,15 @@ void rds_ib_exit(void)
rds_ib_mr_exit();
}
+static u8 rds_ib_get_tos_map(u8 tos)
+{
+ /* 1:1 user to transport map for RDMA transport.
+ * In future, if custom map is desired, hook can export
+ * user configurable map.
+ */
+ return tos;
+}
+
struct rds_transport rds_ib_transport = {
.laddr_check = rds_ib_laddr_check,
.xmit_path_complete = rds_ib_xmit_path_complete,
@@ -536,6 +546,7 @@ struct rds_transport rds_ib_transport = {
.sync_mr = rds_ib_sync_mr,
.free_mr = rds_ib_free_mr,
.flush_mrs = rds_ib_flush_mrs,
+ .get_tos_map = rds_ib_get_tos_map,
.t_owner = THIS_MODULE,
.t_name = "infiniband",
.t_unloading = rds_ib_is_unloading,
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 71ff356ee702..752f92235a38 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -67,7 +67,9 @@ struct rds_ib_conn_priv_cmn {
u8 ricpc_protocol_major;
u8 ricpc_protocol_minor;
__be16 ricpc_protocol_minor_mask; /* bitmask */
- __be32 ricpc_reserved1;
+ u8 ricpc_dp_toss;
+ u8 ripc_reserved1;
+ __be16 ripc_reserved2;
__be64 ricpc_ack_seq;
__be32 ricpc_credit; /* non-zero enables flow ctl */
};
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index bfbb31f0c7fd..66c6eb56072b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -133,23 +133,24 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
rds_ib_set_flow_control(conn, be32_to_cpu(credit));
}
- if (conn->c_version < RDS_PROTOCOL(3, 1)) {
- pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
- &conn->c_laddr, &conn->c_faddr,
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version));
- set_bit(RDS_DESTROY_PENDING, &conn->c_path[0].cp_flags);
- rds_conn_destroy(conn);
- return;
- } else {
- pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c> version %u.%u%s\n",
- ic->i_active_side ? "Active" : "Passive",
- &conn->c_laddr, &conn->c_faddr,
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version),
- ic->i_flowctl ? ", flow control" : "");
+ if (conn->c_version < RDS_PROTOCOL_VERSION) {
+ if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) {
+ pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
+ &conn->c_laddr, &conn->c_faddr,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version));
+ rds_conn_destroy(conn);
+ return;
+ }
}
+ pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c,%d> version %u.%u%s\n",
+ ic->i_active_side ? "Active" : "Passive",
+ &conn->c_laddr, &conn->c_faddr, conn->c_tos,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version),
+ ic->i_flowctl ? ", flow control" : "");
+
atomic_set(&ic->i_cq_quiesce, 0);
/* Init rings and fill recv. this needs to wait until protocol
@@ -184,6 +185,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
NULL);
}
+ conn->c_proposed_version = conn->c_version;
rds_connect_complete(conn);
}
@@ -220,6 +222,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
dp->ricp_v6.dp_ack_seq =
cpu_to_be64(rds_ib_piggyb_ack(ic));
+ dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos;
conn_param->private_data = &dp->ricp_v6;
conn_param->private_data_len = sizeof(dp->ricp_v6);
@@ -234,6 +237,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
dp->ricp_v4.dp_ack_seq =
cpu_to_be64(rds_ib_piggyb_ack(ic));
+ dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos;
conn_param->private_data = &dp->ricp_v4;
conn_param->private_data_len = sizeof(dp->ricp_v4);
@@ -389,10 +393,9 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
- rdsdebug("Fatal QP Event %u (%s) "
- "- connection %pI6c->%pI6c, reconnecting\n",
- event->event, ib_event_msg(event->event),
- &conn->c_laddr, &conn->c_faddr);
+ rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n",
+ event->event, ib_event_msg(event->event),
+ &conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
break;
}
@@ -660,13 +663,16 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
/* Even if len is crap *now* I still want to check it. -ASG */
if (event->param.conn.private_data_len < data_len || major == 0)
- return RDS_PROTOCOL_3_0;
+ return RDS_PROTOCOL_4_0;
common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
- if (major == 3 && common) {
- version = RDS_PROTOCOL_3_0;
+ if (major == 4 && common) {
+ version = RDS_PROTOCOL_4_0;
while ((common >>= 1) != 0)
version++;
+ } else if (RDS_PROTOCOL_COMPAT_VERSION ==
+ RDS_PROTOCOL(major, minor)) {
+ version = RDS_PROTOCOL_COMPAT_VERSION;
} else {
if (isv6)
printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n",
@@ -729,8 +735,10 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
/* Check whether the remote protocol version matches ours. */
version = rds_ib_protocol_compatible(event, isv6);
- if (!version)
+ if (!version) {
+ err = RDS_RDMA_REJ_INCOMPAT;
goto out;
+ }
dp = event->param.conn.private_data;
if (isv6) {
@@ -771,15 +779,16 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
daddr6 = &d_mapped_addr;
}
- rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid "
- "0x%llx\n", saddr6, daddr6,
- RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
+ rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid 0x%llx, tos:%d\n",
+ saddr6, daddr6, RDS_PROTOCOL_MAJOR(version),
+ RDS_PROTOCOL_MINOR(version),
(unsigned long long)be64_to_cpu(lguid),
- (unsigned long long)be64_to_cpu(fguid));
+ (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss);
/* RDS/IB is not currently netns aware, thus init_net */
conn = rds_conn_create(&init_net, daddr6, saddr6,
- &rds_ib_transport, GFP_KERNEL, ifindex);
+ &rds_ib_transport, dp_cmn->ricpc_dp_toss,
+ GFP_KERNEL, ifindex);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
@@ -846,7 +855,7 @@ out:
if (conn)
mutex_unlock(&conn->c_cm_lock);
if (err)
- rdma_reject(cm_id, NULL, 0);
+ rdma_reject(cm_id, &err, sizeof(int));
return destroy;
}
@@ -861,7 +870,7 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
/* If the peer doesn't do protocol negotiation, we must
* default to RDSv3.0 */
- rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
+ rds_ib_set_protocol(conn, RDS_PROTOCOL_4_1);
ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
ret = rds_ib_setup_qp(conn);
@@ -870,7 +879,8 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
goto out;
}
- rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
+ rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
+ conn->c_proposed_version,
UINT_MAX, UINT_MAX, isv6);
ret = rdma_connect(cm_id, &conn_param);
if (ret)
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 2f16146e4ec9..d395eec98959 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -986,9 +986,9 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
} else {
/* We expect errors as the qp is drained during shutdown */
if (rds_conn_up(conn) || rds_conn_connecting(conn))
- rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
+ rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr,
- wc->status,
+ conn->c_tos, wc->status,
ib_wc_status_msg(wc->status));
}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 2dcb555e6350..09c46f2e97fa 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -305,8 +305,9 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
/* We expect errors as the qp is drained during shutdown */
if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
- rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
- &conn->c_laddr, &conn->c_faddr, wc->status,
+ rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), disconnecting and reconnecting\n",
+ &conn->c_laddr, &conn->c_faddr,
+ conn->c_tos, wc->status,
ib_wc_status_msg(wc->status));
}
}
@@ -522,7 +523,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
- i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
+ i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
@@ -879,7 +880,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
- i = ceil(op->op_count, max_sge);
+ i = DIV_ROUND_UP(op->op_count, max_sge);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
diff --git a/net/rds/message.c b/net/rds/message.c
index f139420ba1f6..50f13f1d4ae0 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -341,7 +341,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
{
struct rds_message *rm;
unsigned int i;
- int num_sgs = ceil(total_len, PAGE_SIZE);
+ int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
int extra_bytes = num_sgs * sizeof(struct scatterlist);
int ret;
@@ -351,7 +351,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
- rm->data.op_nents = ceil(total_len, PAGE_SIZE);
+ rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
if (!rm->data.op_sg) {
rds_message_put(rm);
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 6b0f57c83a2a..46bce8389066 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -51,6 +51,8 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
struct rds_connection *conn = cm_id->context;
struct rds_transport *trans;
int ret = 0;
+ int *err;
+ u8 len;
rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
event->event, rdma_event_msg(event->event));
@@ -81,6 +83,7 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_ADDR_RESOLVED:
+ rdma_set_service_type(cm_id, conn->c_tos);
/* XXX do we need to clean up if this fails? */
ret = rdma_resolve_route(cm_id,
RDS_RDMA_RESOLVE_TIMEOUT_MS);
@@ -106,8 +109,19 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_REJECTED:
+ if (!conn)
+ break;
+ err = (int *)rdma_consumer_reject_data(cm_id, event, &len);
+ if (!err || (err && ((*err) == RDS_RDMA_REJ_INCOMPAT))) {
+ pr_warn("RDS/RDMA: conn <%pI6c, %pI6c> rejected, dropping connection\n",
+ &conn->c_laddr, &conn->c_faddr);
+ conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION;
+ conn->c_tos = 0;
+ rds_conn_drop(conn);
+ }
rdsdebug("Connection rejected: %s\n",
rdma_reject_msg(cm_id, event->status));
+ break;
/* FALLTHROUGH */
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
diff --git a/net/rds/rdma_transport.h b/net/rds/rdma_transport.h
index 200d3134aaae..bfafd4a6d827 100644
--- a/net/rds/rdma_transport.h
+++ b/net/rds/rdma_transport.h
@@ -11,6 +11,12 @@
#define RDS_RDMA_RESOLVE_TIMEOUT_MS 5000
+/* Below reject reason is for legacy interoperability issue with non-linux
+ * RDS endpoints where older version incompatibility is conveyed via value 1.
+ * For future version(s), proper encoded reject reason should be be used.
+ */
+#define RDS_RDMA_REJ_INCOMPAT 1
+
int rds_rdma_conn_connect(struct rds_connection *conn);
int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 02ec4a3b2799..0d8f67cadd74 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -19,10 +19,13 @@
*/
#define RDS_PROTOCOL_3_0 0x0300
#define RDS_PROTOCOL_3_1 0x0301
+#define RDS_PROTOCOL_4_0 0x0400
+#define RDS_PROTOCOL_4_1 0x0401
#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
+#define RDS_PROTOCOL_COMPAT_VERSION RDS_PROTOCOL_3_1
/* The following ports, 16385, 18634, 18635, are registered with IANA as
* the ports to be used for RDS over TCP and UDP. Currently, only RDS over
@@ -48,10 +51,6 @@ void rdsdebug(char *fmt, ...)
}
#endif
-/* XXX is there one of these somewhere? */
-#define ceil(x, y) \
- ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
-
#define RDS_FRAG_SHIFT 12
#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
@@ -155,9 +154,13 @@ struct rds_connection {
struct rds_cong_map *c_fcong;
/* Protocol version */
+ unsigned int c_proposed_version;
unsigned int c_version;
possible_net_t c_net;
+ /* TOS */
+ u8 c_tos;
+
struct list_head c_map_item;
unsigned long c_map_queued;
@@ -571,6 +574,7 @@ struct rds_transport {
void (*free_mr)(void *trans_private, int invalidate);
void (*flush_mrs)(void);
bool (*t_unloading)(struct rds_connection *conn);
+ u8 (*get_tos_map)(u8 tos);
};
/* Bind hash table key length. It is the sum of the size of a struct
@@ -652,6 +656,7 @@ struct rds_sock {
u8 rs_rx_traces;
u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
struct rds_msg_zcopy_queue rs_zcookie_queue;
+ u8 rs_tos;
};
static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
@@ -760,13 +765,14 @@ void rds_conn_exit(void);
struct rds_connection *rds_conn_create(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
- struct rds_transport *trans, gfp_t gfp,
+ struct rds_transport *trans,
+ u8 tos, gfp_t gfp,
int dev_if);
struct rds_connection *rds_conn_create_outgoing(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- gfp_t gfp, int dev_if);
+ u8 tos, gfp_t gfp, int dev_if);
void rds_conn_shutdown(struct rds_conn_path *cpath);
void rds_conn_destroy(struct rds_connection *conn);
void rds_conn_drop(struct rds_connection *conn);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 727639dac8a7..853de4876088 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -549,9 +549,21 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
if ((inc->i_rx_tstamp != 0) &&
sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
- struct timeval tv = ktime_to_timeval(inc->i_rx_tstamp);
- ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
- sizeof(tv), &tv);
+ struct __kernel_old_timeval tv = ns_to_kernel_old_timeval(inc->i_rx_tstamp);
+
+ if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) {
+ ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
+ sizeof(tv), &tv);
+ } else {
+ struct __kernel_sock_timeval sk_tv;
+
+ sk_tv.tv_sec = tv.tv_sec;
+ sk_tv.tv_usec = tv.tv_usec;
+
+ ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
+ sizeof(sk_tv), &sk_tv);
+ }
+
if (ret)
goto out;
}
@@ -770,6 +782,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
minfo.len = be32_to_cpu(inc->i_hdr.h_len);
+ minfo.tos = inc->i_conn->c_tos;
if (flip) {
minfo.laddr = daddr;
diff --git a/net/rds/send.c b/net/rds/send.c
index 3d822bad7de9..166dd578c1cc 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1107,7 +1107,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
size_t total_payload_len = payload_len, rdma_payload_len = 0;
bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
- int num_sgs = ceil(payload_len, PAGE_SIZE);
+ int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
int namelen;
struct rds_iov_vector_arr vct;
int ind;
@@ -1277,12 +1277,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* rds_conn_create has a spinlock that runs with IRQ off.
* Caching the conn in the socket helps a lot. */
- if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr))
+ if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
+ rs->rs_tos == rs->rs_conn->c_tos) {
conn = rs->rs_conn;
- else {
+ } else {
conn = rds_conn_create_outgoing(sock_net(sock->sk),
&rs->rs_bound_addr, &daddr,
- rs->rs_transport,
+ rs->rs_transport, rs->rs_tos,
sock->sk->sk_allocation,
scope_id);
if (IS_ERR(conn)) {
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index c16f0a362c32..fd2694174607 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -267,6 +267,7 @@ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
tsinfo.last_expected_una = tc->t_last_expected_una;
tsinfo.last_seen_una = tc->t_last_seen_una;
+ tsinfo.tos = tc->t_cpath->cp_conn->c_tos;
rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
}
@@ -452,6 +453,12 @@ static void rds_tcp_destroy_conns(void)
static void rds_tcp_exit(void);
+static u8 rds_tcp_get_tos_map(u8 tos)
+{
+ /* all user tos mapped to default 0 for TCP transport */
+ return 0;
+}
+
struct rds_transport rds_tcp_transport = {
.laddr_check = rds_tcp_laddr_check,
.xmit_path_prepare = rds_tcp_xmit_path_prepare,
@@ -466,6 +473,7 @@ struct rds_transport rds_tcp_transport = {
.inc_free = rds_tcp_inc_free,
.stats_info_copy = rds_tcp_stats_info_copy,
.exit = rds_tcp_exit,
+ .get_tos_map = rds_tcp_get_tos_map,
.t_owner = THIS_MODULE,
.t_name = "tcp",
.t_type = RDS_TRANS_TCP,
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index c12203f646da..810a3a49e947 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -200,7 +200,7 @@ int rds_tcp_accept_one(struct socket *sock)
conn = rds_conn_create(sock_net(sock->sk),
my_addr, peer_addr,
- &rds_tcp_transport, GFP_KERNEL, dev_if);
+ &rds_tcp_transport, 0, GFP_KERNEL, dev_if);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index e64f9e4c3cda..32dc50f0a303 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -93,6 +93,7 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
}
rcu_read_unlock();
+ cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
}
EXPORT_SYMBOL_GPL(rds_connect_path_complete);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d00a0ef39a56..c96f63ffe31e 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -689,8 +689,10 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
rose->source_call = user->call;
ax25_uid_put(user);
} else {
- if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
+ if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
+ dev_put(dev);
return -EACCES;
+ }
rose->source_call = *source;
}
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 77e9f85a2c92..f2ff21d7df08 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
/*
* Route a frame to an appropriate AX.25 connection.
+ * A NULL ax25_cb indicates an internally generated frame.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
@@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (skb->len < ROSE_MIN_LEN)
return res;
+
+ if (!ax25)
+ return rose_loopback_queue(skb, NULL);
+
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (frametype == ROSE_CALL_REQUEST &&
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index a2522f9d71e2..96f2952bbdfd 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -419,76 +419,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
/**
- * rxrpc_kernel_check_call - Check a call's state
- * @sock: The socket the call is on
- * @call: The call to check
- * @_compl: Where to store the completion state
- * @_abort_code: Where to store any abort code
- *
- * Allow a kernel service to query the state of a call and find out the manner
- * of its termination if it has completed. Returns -EINPROGRESS if the call is
- * still going, 0 if the call finished successfully, -ECONNABORTED if the call
- * was aborted and an appropriate error if the call failed in some other way.
- */
-int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
- enum rxrpc_call_completion *_compl, u32 *_abort_code)
-{
- if (call->state != RXRPC_CALL_COMPLETE)
- return -EINPROGRESS;
- smp_rmb();
- *_compl = call->completion;
- *_abort_code = call->abort_code;
- return call->error;
-}
-EXPORT_SYMBOL(rxrpc_kernel_check_call);
-
-/**
- * rxrpc_kernel_retry_call - Allow a kernel service to retry a call
- * @sock: The socket the call is on
- * @call: The call to retry
- * @srx: The address of the peer to contact
- * @key: The security context to use (defaults to socket setting)
- *
- * Allow a kernel service to try resending a client call that failed due to a
- * network error to a new address. The Tx queue is maintained intact, thereby
- * relieving the need to re-encrypt any request data that has already been
- * buffered.
- */
-int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
- struct sockaddr_rxrpc *srx, struct key *key)
-{
- struct rxrpc_conn_parameters cp;
- struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- int ret;
-
- _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
-
- if (!key)
- key = rx->key;
- if (key && !key->payload.data[0])
- key = NULL; /* a no-security key */
-
- memset(&cp, 0, sizeof(cp));
- cp.local = rx->local;
- cp.key = key;
- cp.security_level = 0;
- cp.exclusive = false;
- cp.service_id = srx->srx_service;
-
- mutex_lock(&call->user_mutex);
-
- ret = rxrpc_prepare_call_for_retry(rx, call);
- if (ret == 0)
- ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
-
- mutex_unlock(&call->user_mutex);
- rxrpc_put_peer(cp.peer);
- _leave(" = %d", ret);
- return ret;
-}
-EXPORT_SYMBOL(rxrpc_kernel_retry_call);
-
-/**
* rxrpc_kernel_new_call_notification - Get notifications of new calls
* @sock: The socket to intercept received messages on
* @notify_new_call: Function to be called when new calls appear
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index bc628acf4f4f..4b1a534d290a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -476,7 +476,6 @@ enum rxrpc_call_flag {
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
- RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
@@ -518,6 +517,18 @@ enum rxrpc_call_state {
};
/*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+ RXRPC_CALL_SUCCEEDED, /* - Normal termination */
+ RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
+ RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
+ RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
+ RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
+ NR__RXRPC_CALL_COMPLETIONS
+};
+
+/*
* Call Tx congestion management modes.
*/
enum rxrpc_congest_mode {
@@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct sockaddr_rxrpc *,
struct rxrpc_call_params *, gfp_t,
unsigned int);
-int rxrpc_retry_client_call(struct rxrpc_sock *,
- struct rxrpc_call *,
- struct rxrpc_conn_parameters *,
- struct sockaddr_rxrpc *,
- gfp_t);
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
struct sk_buff *);
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
-int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
bool __rxrpc_queue_call(struct rxrpc_call *);
bool rxrpc_queue_call(struct rxrpc_call *);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8f1a8f85b1f9..8aa2937b069f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -325,48 +325,6 @@ error:
}
/*
- * Retry a call to a new address. It is expected that the Tx queue of the call
- * will contain data previously packaged for an old call.
- */
-int rxrpc_retry_client_call(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
-{
- const void *here = __builtin_return_address(0);
- int ret;
-
- /* Set up or get a connection record and set the protocol parameters,
- * including channel number and call ID.
- */
- ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
- if (ret < 0)
- goto error;
-
- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, NULL);
-
- rxrpc_start_call_timer(call);
-
- _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
-
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
- rxrpc_queue_call(call);
-
- _leave(" = 0");
- return 0;
-
-error:
- rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
- RX_CALL_DEAD, ret);
- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
- here, ERR_PTR(ret));
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* Set up an incoming call. call->conn points to the connection.
* This is called in BH context and isn't allowed to fail.
*/
@@ -534,61 +492,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
}
/*
- * Prepare a kernel service call for retry.
- */
-int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
-{
- const void *here = __builtin_return_address(0);
- int i;
- u8 last = 0;
-
- _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
-
- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
- here, (const void *)call->flags);
-
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
- ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
- ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
- ASSERT(list_empty(&call->recvmsg_link));
-
- del_timer_sync(&call->timer);
-
- _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
-
- if (call->conn)
- rxrpc_disconnect_call(call);
-
- if (rxrpc_is_service_call(call) ||
- !call->tx_phase ||
- call->tx_hard_ack != 0 ||
- call->rx_hard_ack != 0 ||
- call->rx_top != 0)
- return -EINVAL;
-
- call->state = RXRPC_CALL_UNINITIALISED;
- call->completion = RXRPC_CALL_SUCCEEDED;
- call->call_id = 0;
- call->cid = 0;
- call->cong_cwnd = 0;
- call->cong_extra = 0;
- call->cong_ssthresh = 0;
- call->cong_mode = 0;
- call->cong_dup_acks = 0;
- call->cong_cumul_acks = 0;
- call->acks_lowest_nak = 0;
-
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
- last |= call->rxtx_annotations[i];
- call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
- call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
- }
-
- _leave(" = 0");
- return 0;
-}
-
-/*
* release all the calls associated with a socket
*/
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 521189f4b666..b2adfa825363 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
write_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
- call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
- else
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
write_unlock_bh(&call->state_lock);
rxrpc_see_call(call);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 0906e51d3cfb..15cf42d5b53a 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -202,7 +202,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
/* We want receive timestamps. */
opt = 1;
- ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+ ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
(char *)&opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index eaf19ebaa964..3f7bb11f3290 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -596,6 +596,7 @@ error_requeue_call:
}
error_no_call:
release_sock(&rx->sk);
+error_trace:
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
return ret;
@@ -604,7 +605,7 @@ wait_interrupted:
wait_error:
finish_wait(sk_sleep(&rx->sk), &wait);
call = NULL;
- goto error_no_call;
+ goto error_trace;
}
/**
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index be01f9c5d963..46c9312085b1 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
ASSERTCMP(seq, ==, call->tx_top + 1);
- if (last) {
+ if (last)
annotation |= RXRPC_TX_ANNO_LAST;
- set_bit(RXRPC_CALL_TX_LASTQ, &call->flags);
- }
/* We have to set the timestamp before queueing as the retransmit
* algorithm can see the packet as soon as we queue it.
@@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
call->tx_total_len -= copy;
}
+ /* check for the far side aborting the call or a network error
+ * occurring */
+ if (call->state == RXRPC_CALL_COMPLETE)
+ goto call_terminated;
+
/* add the packet to the send queue if it's now full */
if (sp->remain <= 0 ||
(msg_data_left(msg) == 0 && !more)) {
@@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
notify_end_tx);
skb = NULL;
}
-
- /* Check for the far side aborting the call or a network error
- * occurring. If this happens, save any packet that was under
- * construction so that in the case of a network error, the
- * call can be retried or redirected.
- */
- if (call->state == RXRPC_CALL_COMPLETE) {
- ret = call->error;
- goto out;
- }
} while (msg_data_left(msg) > 0);
success:
@@ -444,6 +437,11 @@ out:
_leave(" = %d", ret);
return ret;
+call_terminated:
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ _leave(" = %d", call->error);
+ return call->error;
+
maybe_error:
if (copied)
goto success;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d4b8355737d8..aecf1bf233c8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -543,7 +543,7 @@ int tcf_register_action(struct tc_action_ops *act,
write_lock(&act_mod_lock);
list_for_each_entry(a, &act_base, head) {
- if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
+ if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
write_unlock(&act_mod_lock);
unregister_pernet_subsys(ops);
return -EEXIST;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index c7633843e223..aa5c38d11a30 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -396,7 +396,7 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_bpf_ops __read_mostly = {
.kind = "bpf",
- .type = TCA_ACT_BPF,
+ .id = TCA_ID_BPF,
.owner = THIS_MODULE,
.act = tcf_bpf_act,
.dump = tcf_bpf_dump,
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 8475913f2070..5d24993cccfe 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -204,7 +204,7 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_connmark_ops = {
.kind = "connmark",
- .type = TCA_ACT_CONNMARK,
+ .id = TCA_ID_CONNMARK,
.owner = THIS_MODULE,
.act = tcf_connmark_act,
.dump = tcf_connmark_dump,
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 3dc25b7806d7..c79aca29505e 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -559,8 +559,11 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_csum *p = to_tcf_csum(a);
+ bool orig_vlan_tag_present = false;
+ unsigned int vlan_hdr_count = 0;
struct tcf_csum_params *params;
u32 update_flags;
+ __be16 protocol;
int action;
params = rcu_dereference_bh(p->params);
@@ -573,7 +576,9 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
goto drop;
update_flags = params->update_flags;
- switch (tc_skb_protocol(skb)) {
+ protocol = tc_skb_protocol(skb);
+again:
+ switch (protocol) {
case cpu_to_be16(ETH_P_IP):
if (!tcf_csum_ipv4(skb, update_flags))
goto drop;
@@ -582,13 +587,35 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
if (!tcf_csum_ipv6(skb, update_flags))
goto drop;
break;
+ case cpu_to_be16(ETH_P_8021AD): /* fall through */
+ case cpu_to_be16(ETH_P_8021Q):
+ if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
+ protocol = skb->protocol;
+ orig_vlan_tag_present = true;
+ } else {
+ struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
+
+ protocol = vlan->h_vlan_encapsulated_proto;
+ skb_pull(skb, VLAN_HLEN);
+ skb_reset_network_header(skb);
+ vlan_hdr_count++;
+ }
+ goto again;
+ }
+
+out:
+ /* Restore the skb for the pulled VLAN tags */
+ while (vlan_hdr_count--) {
+ skb_push(skb, VLAN_HLEN);
+ skb_reset_network_header(skb);
}
return action;
drop:
qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
- return TC_ACT_SHOT;
+ action = TC_ACT_SHOT;
+ goto out;
}
static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
@@ -660,7 +687,7 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
- .type = TCA_ACT_CSUM,
+ .id = TCA_ID_CSUM,
.owner = THIS_MODULE,
.act = tcf_csum_act,
.dump = tcf_csum_dump,
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b61c20ebb314..93da0004e9f4 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -253,7 +253,7 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
- .type = TCA_ACT_GACT,
+ .id = TCA_ID_GACT,
.owner = THIS_MODULE,
.act = tcf_gact_act,
.stats_update = tcf_gact_stats_update,
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 30b63fa23ee2..9b1f2b3990ee 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -864,7 +864,7 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_ife_ops = {
.kind = "ife",
- .type = TCA_ACT_IFE,
+ .id = TCA_ID_IFE,
.owner = THIS_MODULE,
.act = tcf_ife_act,
.dump = tcf_ife_dump,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8af6c11d2482..98f5b6ea77b4 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -199,8 +199,7 @@ err3:
err2:
kfree(tname);
err1:
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return err;
}
@@ -338,7 +337,7 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
- .type = TCA_ACT_IPT,
+ .id = TCA_ID_IPT,
.owner = THIS_MODULE,
.act = tcf_ipt_act,
.dump = tcf_ipt_dump,
@@ -387,7 +386,7 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_xt_ops = {
.kind = "xt",
- .type = TCA_ACT_XT,
+ .id = TCA_ID_XT,
.owner = THIS_MODULE,
.act = tcf_ipt_act,
.dump = tcf_ipt_dump,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c8cf4d10c435..6692fd054617 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -400,7 +400,7 @@ static void tcf_mirred_put_dev(struct net_device *dev)
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
- .type = TCA_ACT_MIRRED,
+ .id = TCA_ID_MIRRED,
.owner = THIS_MODULE,
.act = tcf_mirred_act,
.stats_update = tcf_stats_update,
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index c5c1e23add77..543eab9193f1 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -304,7 +304,7 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
- .type = TCA_ACT_NAT,
+ .id = TCA_ID_NAT,
.owner = THIS_MODULE,
.act = tcf_nat_act,
.dump = tcf_nat_dump,
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 2b372a06b432..a80373878df7 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -406,7 +406,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
struct tcf_t t;
int s;
- s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key);
+ s = struct_size(opt, keys, p->tcfp_nkeys);
/* netlink spinlocks held above us - must use ATOMIC */
opt = kzalloc(s, GFP_ATOMIC);
@@ -470,7 +470,7 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
- .type = TCA_ACT_PEDIT,
+ .id = TCA_ID_PEDIT,
.owner = THIS_MODULE,
.act = tcf_pedit_act,
.dump = tcf_pedit_dump,
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index ec8ec55e0fe8..8271a6263824 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -366,7 +366,7 @@ MODULE_LICENSE("GPL");
static struct tc_action_ops act_police_ops = {
.kind = "police",
- .type = TCA_ID_POLICE,
+ .id = TCA_ID_POLICE,
.owner = THIS_MODULE,
.act = tcf_police_act,
.dump = tcf_police_dump,
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 1a0c682fd734..203e399e5c85 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -233,7 +233,7 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_sample_ops = {
.kind = "sample",
- .type = TCA_ACT_SAMPLE,
+ .id = TCA_ID_SAMPLE,
.owner = THIS_MODULE,
.act = tcf_sample_act,
.dump = tcf_sample_dump,
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 902957beceb3..d54cb608dbaf 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -19,8 +19,6 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
-#define TCA_ACT_SIMP 22
-
#include <linux/tc_act/tc_defact.h>
#include <net/tc_act/tc_defact.h>
@@ -197,7 +195,7 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
- .type = TCA_ACT_SIMP,
+ .id = TCA_ID_SIMP,
.owner = THIS_MODULE,
.act = tcf_simp_act,
.dump = tcf_simp_dump,
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 64dba3708fce..65879500b688 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -189,8 +189,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return -ENOMEM;
}
@@ -305,7 +304,7 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
- .type = TCA_ACT_SKBEDIT,
+ .id = TCA_ID_SKBEDIT,
.owner = THIS_MODULE,
.act = tcf_skbedit_act,
.dump = tcf_skbedit_dump,
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 59710a183bd3..7bac1d78e7a3 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -260,7 +260,7 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_skbmod_ops = {
.kind = "skbmod",
- .type = TCA_ACT_SKBMOD,
+ .id = TCA_ACT_SKBMOD,
.owner = THIS_MODULE,
.act = tcf_skbmod_act,
.dump = tcf_skbmod_dump,
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index c3b90fadaff6..3beb4717d3b7 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -197,6 +197,21 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
[TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
};
+static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
+{
+ if (!p)
+ return;
+ if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
+#ifdef CONFIG_DST_CACHE
+ struct ip_tunnel_info *info = &p->tcft_enc_metadata->u.tun_info;
+
+ dst_cache_destroy(&info->dst_cache);
+#endif
+ dst_release(&p->tcft_enc_metadata->dst);
+ }
+ kfree_rcu(p, rcu);
+}
+
static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
@@ -312,12 +327,18 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
goto err_out;
}
+#ifdef CONFIG_DST_CACHE
+ ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
+ if (ret)
+ goto release_tun_meta;
+#endif
+
if (opts_len) {
ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
&metadata->u.tun_info,
opts_len, extack);
if (ret < 0)
- goto release_tun_meta;
+ goto release_dst_cache;
}
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@@ -333,14 +354,14 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
&act_tunnel_key_ops, bind, true);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
- goto release_tun_meta;
+ goto release_dst_cache;
}
ret = ACT_P_CREATED;
} else if (!ovr) {
NL_SET_ERR_MSG(extack, "TC IDR already exists");
ret = -EEXIST;
- goto release_tun_meta;
+ goto release_dst_cache;
}
t = to_tunnel_key(*a);
@@ -350,7 +371,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
ret = -ENOMEM;
exists = true;
- goto release_tun_meta;
+ goto release_dst_cache;
}
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
@@ -360,16 +381,21 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
rcu_swap_protected(t->params, params_new,
lockdep_is_held(&t->tcf_lock));
spin_unlock_bh(&t->tcf_lock);
- if (params_new)
- kfree_rcu(params_new, rcu);
+ tunnel_key_release_params(params_new);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
return ret;
+release_dst_cache:
+#ifdef CONFIG_DST_CACHE
+ if (metadata)
+ dst_cache_destroy(&metadata->u.tun_info.dst_cache);
release_tun_meta:
- dst_release(&metadata->dst);
+#endif
+ if (metadata)
+ dst_release(&metadata->dst);
err_out:
if (exists)
@@ -385,12 +411,7 @@ static void tunnel_key_release(struct tc_action *a)
struct tcf_tunnel_key_params *params;
params = rcu_dereference_protected(t->params, 1);
- if (params) {
- if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
- dst_release(&params->tcft_enc_metadata->dst);
-
- kfree_rcu(params, rcu);
- }
+ tunnel_key_release_params(params);
}
static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
@@ -560,7 +581,7 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key",
- .type = TCA_ACT_TUNNEL_KEY,
+ .id = TCA_ID_TUNNEL_KEY,
.owner = THIS_MODULE,
.act = tunnel_key_act,
.dump = tunnel_key_dump,
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 93fdaf707313..ac0061599225 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -297,7 +297,7 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
- .type = TCA_ACT_VLAN,
+ .id = TCA_ID_VLAN,
.owner = THIS_MODULE,
.act = tcf_vlan_act,
.dump = tcf_vlan_dump,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8ce2a0507970..478095d50f95 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,6 +31,13 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+#include <net/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_skbedit.h>
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
@@ -61,7 +68,8 @@ static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
}
static const struct tcf_proto_ops *
-tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
+tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
const struct tcf_proto_ops *ops;
@@ -69,9 +77,11 @@ tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
if (ops)
return ops;
#ifdef CONFIG_MODULES
- rtnl_unlock();
+ if (rtnl_held)
+ rtnl_unlock();
request_module("cls_%s", kind);
- rtnl_lock();
+ if (rtnl_held)
+ rtnl_lock();
ops = __tcf_proto_lookup_ops(kind);
/* We dropped the RTNL semaphore in order to perform
* the module load. So, even if we succeeded in loading
@@ -152,8 +162,26 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
return TC_H_MAJ(first);
}
+static bool tcf_proto_is_unlocked(const char *kind)
+{
+ const struct tcf_proto_ops *ops;
+ bool ret;
+
+ ops = tcf_proto_lookup_ops(kind, false, NULL);
+ /* On error return false to take rtnl lock. Proto lookup/create
+ * functions will perform lookup again and properly handle errors.
+ */
+ if (IS_ERR(ops))
+ return false;
+
+ ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
+ module_put(ops->owner);
+ return ret;
+}
+
static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
u32 prio, struct tcf_chain *chain,
+ bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct tcf_proto *tp;
@@ -163,7 +191,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
if (!tp)
return ERR_PTR(-ENOBUFS);
- tp->ops = tcf_proto_lookup_ops(kind, extack);
+ tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
if (IS_ERR(tp->ops)) {
err = PTR_ERR(tp->ops);
goto errout;
@@ -172,6 +200,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
tp->protocol = protocol;
tp->prio = prio;
tp->chain = chain;
+ spin_lock_init(&tp->lock);
+ refcount_set(&tp->refcnt, 1);
err = tp->ops->init(tp);
if (err) {
@@ -185,14 +215,80 @@ errout:
return ERR_PTR(err);
}
-static void tcf_proto_destroy(struct tcf_proto *tp,
+static void tcf_proto_get(struct tcf_proto *tp)
+{
+ refcount_inc(&tp->refcnt);
+}
+
+static void tcf_chain_put(struct tcf_chain *chain);
+
+static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
- tp->ops->destroy(tp, extack);
+ tp->ops->destroy(tp, rtnl_held, extack);
+ tcf_chain_put(tp->chain);
module_put(tp->ops->owner);
kfree_rcu(tp, rcu);
}
+static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
+{
+ if (refcount_dec_and_test(&tp->refcnt))
+ tcf_proto_destroy(tp, rtnl_held, extack);
+}
+
+static int walker_check_empty(struct tcf_proto *tp, void *fh,
+ struct tcf_walker *arg)
+{
+ if (fh) {
+ arg->nonempty = true;
+ return -1;
+ }
+ return 0;
+}
+
+static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
+{
+ struct tcf_walker walker = { .fn = walker_check_empty, };
+
+ if (tp->ops->walk) {
+ tp->ops->walk(tp, &walker, rtnl_held);
+ return !walker.nonempty;
+ }
+ return true;
+}
+
+static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
+{
+ spin_lock(&tp->lock);
+ if (tcf_proto_is_empty(tp, rtnl_held))
+ tp->deleting = true;
+ spin_unlock(&tp->lock);
+ return tp->deleting;
+}
+
+static void tcf_proto_mark_delete(struct tcf_proto *tp)
+{
+ spin_lock(&tp->lock);
+ tp->deleting = true;
+ spin_unlock(&tp->lock);
+}
+
+static bool tcf_proto_is_deleting(struct tcf_proto *tp)
+{
+ bool deleting;
+
+ spin_lock(&tp->lock);
+ deleting = tp->deleting;
+ spin_unlock(&tp->lock);
+
+ return deleting;
+}
+
+#define ASSERT_BLOCK_LOCKED(block) \
+ lockdep_assert_held(&(block)->lock)
+
struct tcf_filter_chain_list_item {
struct list_head list;
tcf_chain_head_change_t *chain_head_change;
@@ -204,10 +300,13 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
{
struct tcf_chain *chain;
+ ASSERT_BLOCK_LOCKED(block);
+
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
list_add_tail(&chain->list, &block->chain_list);
+ mutex_init(&chain->filter_chain_lock);
chain->block = block;
chain->index = chain_index;
chain->refcnt = 1;
@@ -231,29 +330,59 @@ static void tcf_chain0_head_change(struct tcf_chain *chain,
if (chain->index)
return;
+
+ mutex_lock(&block->lock);
list_for_each_entry(item, &block->chain0.filter_chain_list, list)
tcf_chain_head_change_item(item, tp_head);
+ mutex_unlock(&block->lock);
}
-static void tcf_chain_destroy(struct tcf_chain *chain)
+/* Returns true if block can be safely freed. */
+
+static bool tcf_chain_detach(struct tcf_chain *chain)
{
struct tcf_block *block = chain->block;
+ ASSERT_BLOCK_LOCKED(block);
+
list_del(&chain->list);
if (!chain->index)
block->chain0.chain = NULL;
+
+ if (list_empty(&block->chain_list) &&
+ refcount_read(&block->refcnt) == 0)
+ return true;
+
+ return false;
+}
+
+static void tcf_block_destroy(struct tcf_block *block)
+{
+ mutex_destroy(&block->lock);
+ kfree_rcu(block, rcu);
+}
+
+static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
+{
+ struct tcf_block *block = chain->block;
+
+ mutex_destroy(&chain->filter_chain_lock);
kfree(chain);
- if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt))
- kfree_rcu(block, rcu);
+ if (free_block)
+ tcf_block_destroy(block);
}
static void tcf_chain_hold(struct tcf_chain *chain)
{
+ ASSERT_BLOCK_LOCKED(chain->block);
+
++chain->refcnt;
}
static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
{
+ ASSERT_BLOCK_LOCKED(chain->block);
+
/* In case all the references are action references, this
* chain should not be shown to the user.
*/
@@ -265,6 +394,8 @@ static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
{
struct tcf_chain *chain;
+ ASSERT_BLOCK_LOCKED(block);
+
list_for_each_entry(chain, &block->chain_list, list) {
if (chain->index == chain_index)
return chain;
@@ -279,31 +410,40 @@ static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
u32 chain_index, bool create,
bool by_act)
{
- struct tcf_chain *chain = tcf_chain_lookup(block, chain_index);
+ struct tcf_chain *chain = NULL;
+ bool is_first_reference;
+ mutex_lock(&block->lock);
+ chain = tcf_chain_lookup(block, chain_index);
if (chain) {
tcf_chain_hold(chain);
} else {
if (!create)
- return NULL;
+ goto errout;
chain = tcf_chain_create(block, chain_index);
if (!chain)
- return NULL;
+ goto errout;
}
if (by_act)
++chain->action_refcnt;
+ is_first_reference = chain->refcnt - chain->action_refcnt == 1;
+ mutex_unlock(&block->lock);
/* Send notification only in case we got the first
* non-action reference. Until then, the chain acts only as
* a placeholder for actions pointing to it and user ought
* not know about them.
*/
- if (chain->refcnt - chain->action_refcnt == 1 && !by_act)
+ if (is_first_reference && !by_act)
tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
RTM_NEWCHAIN, false);
return chain;
+
+errout:
+ mutex_unlock(&block->lock);
+ return chain;
}
static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
@@ -318,51 +458,94 @@ struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
}
EXPORT_SYMBOL(tcf_chain_get_by_act);
-static void tc_chain_tmplt_del(struct tcf_chain *chain);
+static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
+ void *tmplt_priv);
+static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
+ void *tmplt_priv, u32 chain_index,
+ struct tcf_block *block, struct sk_buff *oskb,
+ u32 seq, u16 flags, bool unicast);
-static void __tcf_chain_put(struct tcf_chain *chain, bool by_act)
+static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
+ bool explicitly_created)
{
+ struct tcf_block *block = chain->block;
+ const struct tcf_proto_ops *tmplt_ops;
+ bool is_last, free_block = false;
+ unsigned int refcnt;
+ void *tmplt_priv;
+ u32 chain_index;
+
+ mutex_lock(&block->lock);
+ if (explicitly_created) {
+ if (!chain->explicitly_created) {
+ mutex_unlock(&block->lock);
+ return;
+ }
+ chain->explicitly_created = false;
+ }
+
if (by_act)
chain->action_refcnt--;
- chain->refcnt--;
+
+ /* tc_chain_notify_delete can't be called while holding block lock.
+ * However, when block is unlocked chain can be changed concurrently, so
+ * save these to temporary variables.
+ */
+ refcnt = --chain->refcnt;
+ is_last = refcnt - chain->action_refcnt == 0;
+ tmplt_ops = chain->tmplt_ops;
+ tmplt_priv = chain->tmplt_priv;
+ chain_index = chain->index;
+
+ if (refcnt == 0)
+ free_block = tcf_chain_detach(chain);
+ mutex_unlock(&block->lock);
/* The last dropped non-action reference will trigger notification. */
- if (chain->refcnt - chain->action_refcnt == 0 && !by_act)
- tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false);
+ if (is_last && !by_act) {
+ tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain_index,
+ block, NULL, 0, 0, false);
+ /* Last reference to chain, no need to lock. */
+ chain->flushing = false;
+ }
- if (chain->refcnt == 0) {
- tc_chain_tmplt_del(chain);
- tcf_chain_destroy(chain);
+ if (refcnt == 0) {
+ tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
+ tcf_chain_destroy(chain, free_block);
}
}
static void tcf_chain_put(struct tcf_chain *chain)
{
- __tcf_chain_put(chain, false);
+ __tcf_chain_put(chain, false, false);
}
void tcf_chain_put_by_act(struct tcf_chain *chain)
{
- __tcf_chain_put(chain, true);
+ __tcf_chain_put(chain, true, false);
}
EXPORT_SYMBOL(tcf_chain_put_by_act);
static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
{
- if (chain->explicitly_created)
- tcf_chain_put(chain);
+ __tcf_chain_put(chain, false, true);
}
-static void tcf_chain_flush(struct tcf_chain *chain)
+static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
{
- struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
+ struct tcf_proto *tp, *tp_next;
+ mutex_lock(&chain->filter_chain_lock);
+ tp = tcf_chain_dereference(chain->filter_chain, chain);
+ RCU_INIT_POINTER(chain->filter_chain, NULL);
tcf_chain0_head_change(chain, NULL);
+ chain->flushing = true;
+ mutex_unlock(&chain->filter_chain_lock);
+
while (tp) {
- RCU_INIT_POINTER(chain->filter_chain, tp->next);
- tcf_proto_destroy(tp, NULL);
- tp = rtnl_dereference(chain->filter_chain);
- tcf_chain_put(chain);
+ tp_next = rcu_dereference_protected(tp->next, 1);
+ tcf_proto_put(tp, rtnl_held, NULL);
+ tp = tp_next;
}
}
@@ -684,8 +867,8 @@ tcf_chain0_head_change_cb_add(struct tcf_block *block,
struct tcf_block_ext_info *ei,
struct netlink_ext_ack *extack)
{
- struct tcf_chain *chain0 = block->chain0.chain;
struct tcf_filter_chain_list_item *item;
+ struct tcf_chain *chain0;
item = kmalloc(sizeof(*item), GFP_KERNEL);
if (!item) {
@@ -694,9 +877,32 @@ tcf_chain0_head_change_cb_add(struct tcf_block *block,
}
item->chain_head_change = ei->chain_head_change;
item->chain_head_change_priv = ei->chain_head_change_priv;
- if (chain0 && chain0->filter_chain)
- tcf_chain_head_change_item(item, chain0->filter_chain);
- list_add(&item->list, &block->chain0.filter_chain_list);
+
+ mutex_lock(&block->lock);
+ chain0 = block->chain0.chain;
+ if (chain0)
+ tcf_chain_hold(chain0);
+ else
+ list_add(&item->list, &block->chain0.filter_chain_list);
+ mutex_unlock(&block->lock);
+
+ if (chain0) {
+ struct tcf_proto *tp_head;
+
+ mutex_lock(&chain0->filter_chain_lock);
+
+ tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
+ if (tp_head)
+ tcf_chain_head_change_item(item, tp_head);
+
+ mutex_lock(&block->lock);
+ list_add(&item->list, &block->chain0.filter_chain_list);
+ mutex_unlock(&block->lock);
+
+ mutex_unlock(&chain0->filter_chain_lock);
+ tcf_chain_put(chain0);
+ }
+
return 0;
}
@@ -704,20 +910,23 @@ static void
tcf_chain0_head_change_cb_del(struct tcf_block *block,
struct tcf_block_ext_info *ei)
{
- struct tcf_chain *chain0 = block->chain0.chain;
struct tcf_filter_chain_list_item *item;
+ mutex_lock(&block->lock);
list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
(item->chain_head_change == ei->chain_head_change &&
item->chain_head_change_priv == ei->chain_head_change_priv)) {
- if (chain0)
+ if (block->chain0.chain)
tcf_chain_head_change_item(item, NULL);
list_del(&item->list);
+ mutex_unlock(&block->lock);
+
kfree(item);
return;
}
}
+ mutex_unlock(&block->lock);
WARN_ON(1);
}
@@ -764,6 +973,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
return ERR_PTR(-ENOMEM);
}
+ mutex_init(&block->lock);
INIT_LIST_HEAD(&block->chain_list);
INIT_LIST_HEAD(&block->cb_list);
INIT_LIST_HEAD(&block->owner_list);
@@ -799,157 +1009,241 @@ static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
return block;
}
-static void tcf_block_flush_all_chains(struct tcf_block *block)
+static struct tcf_chain *
+__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
{
- struct tcf_chain *chain;
+ mutex_lock(&block->lock);
+ if (chain)
+ chain = list_is_last(&chain->list, &block->chain_list) ?
+ NULL : list_next_entry(chain, list);
+ else
+ chain = list_first_entry_or_null(&block->chain_list,
+ struct tcf_chain, list);
- /* Hold a refcnt for all chains, so that they don't disappear
- * while we are iterating.
- */
- list_for_each_entry(chain, &block->chain_list, list)
+ /* skip all action-only chains */
+ while (chain && tcf_chain_held_by_acts_only(chain))
+ chain = list_is_last(&chain->list, &block->chain_list) ?
+ NULL : list_next_entry(chain, list);
+
+ if (chain)
tcf_chain_hold(chain);
+ mutex_unlock(&block->lock);
- list_for_each_entry(chain, &block->chain_list, list)
- tcf_chain_flush(chain);
+ return chain;
}
-static void tcf_block_put_all_chains(struct tcf_block *block)
+/* Function to be used by all clients that want to iterate over all chains on
+ * block. It properly obtains block->lock and takes reference to chain before
+ * returning it. Users of this function must be tolerant to concurrent chain
+ * insertion/deletion or ensure that no concurrent chain modification is
+ * possible. Note that all netlink dump callbacks cannot guarantee to provide
+ * consistent dump because rtnl lock is released each time skb is filled with
+ * data and sent to user-space.
+ */
+
+struct tcf_chain *
+tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
{
- struct tcf_chain *chain, *tmp;
+ struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
- /* At this point, all the chains should have refcnt >= 1. */
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
- tcf_chain_put_explicitly_created(chain);
+ if (chain)
tcf_chain_put(chain);
- }
+
+ return chain_next;
}
+EXPORT_SYMBOL(tcf_get_next_chain);
-static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
- struct tcf_block_ext_info *ei)
+static struct tcf_proto *
+__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
{
- if (refcount_dec_and_test(&block->refcnt)) {
- /* Flushing/putting all chains will cause the block to be
- * deallocated when last chain is freed. However, if chain_list
- * is empty, block has to be manually deallocated. After block
- * reference counter reached 0, it is no longer possible to
- * increment it or add new chains to block.
- */
- bool free_block = list_empty(&block->chain_list);
+ u32 prio = 0;
- if (tcf_block_shared(block))
- tcf_block_remove(block, block->net);
- if (!free_block)
- tcf_block_flush_all_chains(block);
+ ASSERT_RTNL();
+ mutex_lock(&chain->filter_chain_lock);
- if (q)
- tcf_block_offload_unbind(block, q, ei);
+ if (!tp) {
+ tp = tcf_chain_dereference(chain->filter_chain, chain);
+ } else if (tcf_proto_is_deleting(tp)) {
+ /* 'deleting' flag is set and chain->filter_chain_lock was
+ * unlocked, which means next pointer could be invalid. Restart
+ * search.
+ */
+ prio = tp->prio + 1;
+ tp = tcf_chain_dereference(chain->filter_chain, chain);
- if (free_block)
- kfree_rcu(block, rcu);
- else
- tcf_block_put_all_chains(block);
- } else if (q) {
- tcf_block_offload_unbind(block, q, ei);
+ for (; tp; tp = tcf_chain_dereference(tp->next, chain))
+ if (!tp->deleting && tp->prio >= prio)
+ break;
+ } else {
+ tp = tcf_chain_dereference(tp->next, chain);
}
+
+ if (tp)
+ tcf_proto_get(tp);
+
+ mutex_unlock(&chain->filter_chain_lock);
+
+ return tp;
}
-static void tcf_block_refcnt_put(struct tcf_block *block)
+/* Function to be used by all clients that want to iterate over all tp's on
+ * chain. Users of this function must be tolerant to concurrent tp
+ * insertion/deletion or ensure that no concurrent chain modification is
+ * possible. Note that all netlink dump callbacks cannot guarantee to provide
+ * consistent dump because rtnl lock is released each time skb is filled with
+ * data and sent to user-space.
+ */
+
+struct tcf_proto *
+tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
+ bool rtnl_held)
{
- __tcf_block_put(block, NULL, NULL);
+ struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
+
+ if (tp)
+ tcf_proto_put(tp, rtnl_held, NULL);
+
+ return tp_next;
}
+EXPORT_SYMBOL(tcf_get_next_proto);
-/* Find tcf block.
- * Set q, parent, cl when appropriate.
+static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
+{
+ struct tcf_chain *chain;
+
+ /* Last reference to block. At this point chains cannot be added or
+ * removed concurrently.
+ */
+ for (chain = tcf_get_next_chain(block, NULL);
+ chain;
+ chain = tcf_get_next_chain(block, chain)) {
+ tcf_chain_put_explicitly_created(chain);
+ tcf_chain_flush(chain, rtnl_held);
+ }
+}
+
+/* Lookup Qdisc and increments its reference counter.
+ * Set parent, if necessary.
*/
-static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
- u32 *parent, unsigned long *cl,
- int ifindex, u32 block_index,
- struct netlink_ext_ack *extack)
+static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
+ u32 *parent, int ifindex, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
- struct tcf_block *block;
+ const struct Qdisc_class_ops *cops;
+ struct net_device *dev;
int err = 0;
- if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
- block = tcf_block_refcnt_get(net, block_index);
- if (!block) {
- NL_SET_ERR_MSG(extack, "Block of given index was not found");
- return ERR_PTR(-EINVAL);
- }
- } else {
- const struct Qdisc_class_ops *cops;
- struct net_device *dev;
-
- rcu_read_lock();
+ if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
+ return 0;
- /* Find link */
- dev = dev_get_by_index_rcu(net, ifindex);
- if (!dev) {
- rcu_read_unlock();
- return ERR_PTR(-ENODEV);
- }
+ rcu_read_lock();
- /* Find qdisc */
- if (!*parent) {
- *q = dev->qdisc;
- *parent = (*q)->handle;
- } else {
- *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
- if (!*q) {
- NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
- err = -EINVAL;
- goto errout_rcu;
- }
- }
+ /* Find link */
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
- *q = qdisc_refcount_inc_nz(*q);
+ /* Find qdisc */
+ if (!*parent) {
+ *q = dev->qdisc;
+ *parent = (*q)->handle;
+ } else {
+ *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
if (!*q) {
NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
err = -EINVAL;
goto errout_rcu;
}
+ }
- /* Is it classful? */
- cops = (*q)->ops->cl_ops;
- if (!cops) {
- NL_SET_ERR_MSG(extack, "Qdisc not classful");
- err = -EINVAL;
- goto errout_rcu;
- }
+ *q = qdisc_refcount_inc_nz(*q);
+ if (!*q) {
+ NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
+ err = -EINVAL;
+ goto errout_rcu;
+ }
- if (!cops->tcf_block) {
- NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
- err = -EOPNOTSUPP;
- goto errout_rcu;
- }
+ /* Is it classful? */
+ cops = (*q)->ops->cl_ops;
+ if (!cops) {
+ NL_SET_ERR_MSG(extack, "Qdisc not classful");
+ err = -EINVAL;
+ goto errout_qdisc;
+ }
- /* At this point we know that qdisc is not noop_qdisc,
- * which means that qdisc holds a reference to net_device
- * and we hold a reference to qdisc, so it is safe to release
- * rcu read lock.
- */
- rcu_read_unlock();
+ if (!cops->tcf_block) {
+ NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
+ err = -EOPNOTSUPP;
+ goto errout_qdisc;
+ }
- /* Do we search for filter, attached to class? */
- if (TC_H_MIN(*parent)) {
- *cl = cops->find(*q, *parent);
- if (*cl == 0) {
- NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
- err = -ENOENT;
- goto errout_qdisc;
- }
+errout_rcu:
+ /* At this point we know that qdisc is not noop_qdisc,
+ * which means that qdisc holds a reference to net_device
+ * and we hold a reference to qdisc, so it is safe to release
+ * rcu read lock.
+ */
+ rcu_read_unlock();
+ return err;
+
+errout_qdisc:
+ rcu_read_unlock();
+
+ if (rtnl_held)
+ qdisc_put(*q);
+ else
+ qdisc_put_unlocked(*q);
+ *q = NULL;
+
+ return err;
+}
+
+static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
+ int ifindex, struct netlink_ext_ack *extack)
+{
+ if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
+ return 0;
+
+ /* Do we search for filter, attached to class? */
+ if (TC_H_MIN(parent)) {
+ const struct Qdisc_class_ops *cops = q->ops->cl_ops;
+
+ *cl = cops->find(q, parent);
+ if (*cl == 0) {
+ NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
+ return -ENOENT;
}
+ }
+
+ return 0;
+}
+
+static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
+ unsigned long cl, int ifindex,
+ u32 block_index,
+ struct netlink_ext_ack *extack)
+{
+ struct tcf_block *block;
- /* And the last stroke */
- block = cops->tcf_block(*q, *cl, extack);
+ if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+ block = tcf_block_refcnt_get(net, block_index);
if (!block) {
- err = -EINVAL;
- goto errout_qdisc;
+ NL_SET_ERR_MSG(extack, "Block of given index was not found");
+ return ERR_PTR(-EINVAL);
}
+ } else {
+ const struct Qdisc_class_ops *cops = q->ops->cl_ops;
+
+ block = cops->tcf_block(q, cl, extack);
+ if (!block)
+ return ERR_PTR(-EINVAL);
+
if (tcf_block_shared(block)) {
NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
- err = -EOPNOTSUPP;
- goto errout_qdisc;
+ return ERR_PTR(-EOPNOTSUPP);
}
/* Always take reference to block in order to support execution
@@ -962,24 +1256,91 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
}
return block;
+}
+
+static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei, bool rtnl_held)
+{
+ if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
+ /* Flushing/putting all chains will cause the block to be
+ * deallocated when last chain is freed. However, if chain_list
+ * is empty, block has to be manually deallocated. After block
+ * reference counter reached 0, it is no longer possible to
+ * increment it or add new chains to block.
+ */
+ bool free_block = list_empty(&block->chain_list);
+
+ mutex_unlock(&block->lock);
+ if (tcf_block_shared(block))
+ tcf_block_remove(block, block->net);
+
+ if (q)
+ tcf_block_offload_unbind(block, q, ei);
+
+ if (free_block)
+ tcf_block_destroy(block);
+ else
+ tcf_block_flush_all_chains(block, rtnl_held);
+ } else if (q) {
+ tcf_block_offload_unbind(block, q, ei);
+ }
+}
+
+static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
+{
+ __tcf_block_put(block, NULL, NULL, rtnl_held);
+}
+
+/* Find tcf block.
+ * Set q, parent, cl when appropriate.
+ */
+
+static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
+ u32 *parent, unsigned long *cl,
+ int ifindex, u32 block_index,
+ struct netlink_ext_ack *extack)
+{
+ struct tcf_block *block;
+ int err = 0;
+
+ ASSERT_RTNL();
+
+ err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
+ if (err)
+ goto errout;
+
+ err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
+ if (err)
+ goto errout_qdisc;
+
+ block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
+ if (IS_ERR(block)) {
+ err = PTR_ERR(block);
+ goto errout_qdisc;
+ }
+
+ return block;
-errout_rcu:
- rcu_read_unlock();
errout_qdisc:
- if (*q) {
+ if (*q)
qdisc_put(*q);
- *q = NULL;
- }
+errout:
+ *q = NULL;
return ERR_PTR(err);
}
-static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
+static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
+ bool rtnl_held)
{
if (!IS_ERR_OR_NULL(block))
- tcf_block_refcnt_put(block);
+ tcf_block_refcnt_put(block, rtnl_held);
- if (q)
- qdisc_put(q);
+ if (q) {
+ if (rtnl_held)
+ qdisc_put(q);
+ else
+ qdisc_put_unlocked(q);
+ }
}
struct tcf_block_owner_item {
@@ -1087,7 +1448,7 @@ err_chain0_head_change_cb_add:
tcf_block_owner_del(block, q, ei->binder_type);
err_block_owner_add:
err_block_insert:
- tcf_block_refcnt_put(block);
+ tcf_block_refcnt_put(block, true);
return err;
}
EXPORT_SYMBOL(tcf_block_get_ext);
@@ -1124,7 +1485,7 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
tcf_chain0_head_change_cb_del(block, ei);
tcf_block_owner_del(block, q, ei->binder_type);
- __tcf_block_put(block, q, ei);
+ __tcf_block_put(block, q, ei, true);
}
EXPORT_SYMBOL(tcf_block_put_ext);
@@ -1181,13 +1542,19 @@ tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
void *cb_priv, bool add, bool offload_in_use,
struct netlink_ext_ack *extack)
{
- struct tcf_chain *chain;
- struct tcf_proto *tp;
+ struct tcf_chain *chain, *chain_prev;
+ struct tcf_proto *tp, *tp_prev;
int err;
- list_for_each_entry(chain, &block->chain_list, list) {
- for (tp = rtnl_dereference(chain->filter_chain); tp;
- tp = rtnl_dereference(tp->next)) {
+ for (chain = __tcf_get_next_chain(block, NULL);
+ chain;
+ chain_prev = chain,
+ chain = __tcf_get_next_chain(block, chain),
+ tcf_chain_put(chain_prev)) {
+ for (tp = __tcf_get_next_proto(chain, NULL); tp;
+ tp_prev = tp,
+ tp = __tcf_get_next_proto(chain, tp),
+ tcf_proto_put(tp_prev, true, NULL)) {
if (tp->ops->reoffload) {
err = tp->ops->reoffload(tp, add, cb, cb_priv,
extack);
@@ -1204,6 +1571,8 @@ tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
return 0;
err_playback_remove:
+ tcf_proto_put(tp, true, NULL);
+ tcf_chain_put(chain);
tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
extack);
return err;
@@ -1277,7 +1646,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister);
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
- __be16 protocol = tc_skb_protocol(skb);
#ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 4;
const struct tcf_proto *orig_tp = tp;
@@ -1287,6 +1655,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
+ __be16 protocol = tc_skb_protocol(skb);
int err;
if (tp->protocol != protocol &&
@@ -1319,7 +1688,6 @@ reset:
}
tp = first_tp;
- protocol = tc_skb_protocol(skb);
goto reclassify;
#endif
}
@@ -1330,32 +1698,116 @@ struct tcf_chain_info {
struct tcf_proto __rcu *next;
};
-static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
+static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info)
{
- return rtnl_dereference(*chain_info->pprev);
+ return tcf_chain_dereference(*chain_info->pprev, chain);
}
-static void tcf_chain_tp_insert(struct tcf_chain *chain,
- struct tcf_chain_info *chain_info,
- struct tcf_proto *tp)
+static int tcf_chain_tp_insert(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ struct tcf_proto *tp)
{
+ if (chain->flushing)
+ return -EAGAIN;
+
if (*chain_info->pprev == chain->filter_chain)
tcf_chain0_head_change(chain, tp);
- RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
+ tcf_proto_get(tp);
+ RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
rcu_assign_pointer(*chain_info->pprev, tp);
- tcf_chain_hold(chain);
+
+ return 0;
}
static void tcf_chain_tp_remove(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
struct tcf_proto *tp)
{
- struct tcf_proto *next = rtnl_dereference(chain_info->next);
+ struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
+ tcf_proto_mark_delete(tp);
if (tp == chain->filter_chain)
tcf_chain0_head_change(chain, next);
RCU_INIT_POINTER(*chain_info->pprev, next);
- tcf_chain_put(chain);
+}
+
+static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ u32 protocol, u32 prio,
+ bool prio_allocate);
+
+/* Try to insert new proto.
+ * If proto with specified priority already exists, free new proto
+ * and return existing one.
+ */
+
+static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
+ struct tcf_proto *tp_new,
+ u32 protocol, u32 prio,
+ bool rtnl_held)
+{
+ struct tcf_chain_info chain_info;
+ struct tcf_proto *tp;
+ int err = 0;
+
+ mutex_lock(&chain->filter_chain_lock);
+
+ tp = tcf_chain_tp_find(chain, &chain_info,
+ protocol, prio, false);
+ if (!tp)
+ err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
+ mutex_unlock(&chain->filter_chain_lock);
+
+ if (tp) {
+ tcf_proto_destroy(tp_new, rtnl_held, NULL);
+ tp_new = tp;
+ } else if (err) {
+ tcf_proto_destroy(tp_new, rtnl_held, NULL);
+ tp_new = ERR_PTR(err);
+ }
+
+ return tp_new;
+}
+
+static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
+ struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
+{
+ struct tcf_chain_info chain_info;
+ struct tcf_proto *tp_iter;
+ struct tcf_proto **pprev;
+ struct tcf_proto *next;
+
+ mutex_lock(&chain->filter_chain_lock);
+
+ /* Atomically find and remove tp from chain. */
+ for (pprev = &chain->filter_chain;
+ (tp_iter = tcf_chain_dereference(*pprev, chain));
+ pprev = &tp_iter->next) {
+ if (tp_iter == tp) {
+ chain_info.pprev = pprev;
+ chain_info.next = tp_iter->next;
+ WARN_ON(tp_iter->deleting);
+ break;
+ }
+ }
+ /* Verify that tp still exists and no new filters were inserted
+ * concurrently.
+ * Mark tp for deletion if it is empty.
+ */
+ if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
+ mutex_unlock(&chain->filter_chain_lock);
+ return;
+ }
+
+ next = tcf_chain_dereference(chain_info.next, chain);
+ if (tp == chain->filter_chain)
+ tcf_chain0_head_change(chain, next);
+ RCU_INIT_POINTER(*chain_info.pprev, next);
+ mutex_unlock(&chain->filter_chain_lock);
+
+ tcf_proto_put(tp, rtnl_held, extack);
}
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
@@ -1368,7 +1820,8 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
/* Check the chain for existence of proto-tcf with this priority */
for (pprev = &chain->filter_chain;
- (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
+ (tp = tcf_chain_dereference(*pprev, chain));
+ pprev = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
if (prio_allocate ||
@@ -1381,14 +1834,20 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
}
}
chain_info->pprev = pprev;
- chain_info->next = tp ? tp->next : NULL;
+ if (tp) {
+ chain_info->next = tp->next;
+ tcf_proto_get(tp);
+ } else {
+ chain_info->next = NULL;
+ }
return tp;
}
static int tcf_fill_node(struct net *net, struct sk_buff *skb,
struct tcf_proto *tp, struct tcf_block *block,
struct Qdisc *q, u32 parent, void *fh,
- u32 portid, u32 seq, u16 flags, int event)
+ u32 portid, u32 seq, u16 flags, int event,
+ bool rtnl_held)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
@@ -1416,7 +1875,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
if (!fh) {
tcm->tcm_handle = 0;
} else {
- if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
+ if (tp->ops->dump &&
+ tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
goto nla_put_failure;
}
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -1431,7 +1891,8 @@ nla_put_failure:
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
struct tcf_block *block, struct Qdisc *q,
- u32 parent, void *fh, int event, bool unicast)
+ u32 parent, void *fh, int event, bool unicast,
+ bool rtnl_held)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -1441,7 +1902,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
return -ENOBUFS;
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
- n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
+ n->nlmsg_seq, n->nlmsg_flags, event,
+ rtnl_held) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -1457,7 +1919,7 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
struct tcf_block *block, struct Qdisc *q,
u32 parent, void *fh, bool unicast, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -1468,13 +1930,14 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
return -ENOBUFS;
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
- n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
+ n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
+ rtnl_held) <= 0) {
NL_SET_ERR_MSG(extack, "Failed to build del event notification");
kfree_skb(skb);
return -EINVAL;
}
- err = tp->ops->delete(tp, fh, last, extack);
+ err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
if (err) {
kfree_skb(skb);
return err;
@@ -1493,14 +1956,21 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
struct tcf_block *block, struct Qdisc *q,
u32 parent, struct nlmsghdr *n,
- struct tcf_chain *chain, int event)
+ struct tcf_chain *chain, int event,
+ bool rtnl_held)
{
struct tcf_proto *tp;
- for (tp = rtnl_dereference(chain->filter_chain);
- tp; tp = rtnl_dereference(tp->next))
+ for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
+ tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
tfilter_notify(net, oskb, n, tp, block,
- q, parent, NULL, event, false);
+ q, parent, NULL, event, false, rtnl_held);
+}
+
+static void tfilter_put(struct tcf_proto *tp, void *fh)
+{
+ if (tp->ops->put && fh)
+ tp->ops->put(tp, fh);
}
static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1523,6 +1993,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
void *fh;
int err;
int tp_created;
+ bool rtnl_held = false;
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -1539,7 +2010,9 @@ replay:
prio = TC_H_MAJ(t->tcm_info);
prio_allocate = false;
parent = t->tcm_parent;
+ tp = NULL;
cl = 0;
+ block = NULL;
if (prio == 0) {
/* If no priority is provided by the user,
@@ -1556,8 +2029,27 @@ replay:
/* Find head of filter chain. */
- block = tcf_block_find(net, &q, &parent, &cl,
- t->tcm_ifindex, t->tcm_block_index, extack);
+ err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
+ if (err)
+ return err;
+
+ /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
+ * block is shared (no qdisc found), qdisc is not unlocked, classifier
+ * type is not specified, classifier is not unlocked.
+ */
+ if (rtnl_held ||
+ (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
+ !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ rtnl_held = true;
+ rtnl_lock();
+ }
+
+ err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
+ if (err)
+ goto errout;
+
+ block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
+ extack);
if (IS_ERR(block)) {
err = PTR_ERR(block);
goto errout;
@@ -1576,40 +2068,62 @@ replay:
goto errout;
}
+ mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
prio, prio_allocate);
if (IS_ERR(tp)) {
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
err = PTR_ERR(tp);
- goto errout;
+ goto errout_locked;
}
if (tp == NULL) {
+ struct tcf_proto *tp_new = NULL;
+
+ if (chain->flushing) {
+ err = -EAGAIN;
+ goto errout_locked;
+ }
+
/* Proto-tcf does not exist, create new one */
if (tca[TCA_KIND] == NULL || !protocol) {
NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
err = -EINVAL;
- goto errout;
+ goto errout_locked;
}
if (!(n->nlmsg_flags & NLM_F_CREATE)) {
NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
err = -ENOENT;
- goto errout;
+ goto errout_locked;
}
if (prio_allocate)
- prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
+ prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
+ &chain_info));
+
+ mutex_unlock(&chain->filter_chain_lock);
+ tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
+ protocol, prio, chain, rtnl_held,
+ extack);
+ if (IS_ERR(tp_new)) {
+ err = PTR_ERR(tp_new);
+ goto errout_tp;
+ }
- tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
- protocol, prio, chain, extack);
+ tp_created = 1;
+ tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
+ rtnl_held);
if (IS_ERR(tp)) {
err = PTR_ERR(tp);
- goto errout;
+ goto errout_tp;
}
- tp_created = 1;
- } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
+ } else {
+ mutex_unlock(&chain->filter_chain_lock);
+ }
+
+ if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
err = -EINVAL;
goto errout;
@@ -1624,6 +2138,7 @@ replay:
goto errout;
}
} else if (n->nlmsg_flags & NLM_F_EXCL) {
+ tfilter_put(tp, fh);
NL_SET_ERR_MSG(extack, "Filter already exists");
err = -EEXIST;
goto errout;
@@ -1637,25 +2152,41 @@ replay:
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
- extack);
+ rtnl_held, extack);
if (err == 0) {
- if (tp_created)
- tcf_chain_tp_insert(chain, &chain_info, tp);
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
- RTM_NEWTFILTER, false);
- } else {
- if (tp_created)
- tcf_proto_destroy(tp, NULL);
+ RTM_NEWTFILTER, false, rtnl_held);
+ tfilter_put(tp, fh);
}
errout:
- if (chain)
- tcf_chain_put(chain);
- tcf_block_release(q, block);
- if (err == -EAGAIN)
+ if (err && tp_created)
+ tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
+errout_tp:
+ if (chain) {
+ if (tp && !IS_ERR(tp))
+ tcf_proto_put(tp, rtnl_held, NULL);
+ if (!tp_created)
+ tcf_chain_put(chain);
+ }
+ tcf_block_release(q, block, rtnl_held);
+
+ if (rtnl_held)
+ rtnl_unlock();
+
+ if (err == -EAGAIN) {
+ /* Take rtnl lock in case EAGAIN is caused by concurrent flush
+ * of target chain.
+ */
+ rtnl_held = true;
/* Replay the request. */
goto replay;
+ }
return err;
+
+errout_locked:
+ mutex_unlock(&chain->filter_chain_lock);
+ goto errout;
}
static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1671,11 +2202,12 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
struct Qdisc *q = NULL;
struct tcf_chain_info chain_info;
struct tcf_chain *chain = NULL;
- struct tcf_block *block;
+ struct tcf_block *block = NULL;
struct tcf_proto *tp = NULL;
unsigned long cl = 0;
void *fh = NULL;
int err;
+ bool rtnl_held = false;
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -1696,8 +2228,27 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
/* Find head of filter chain. */
- block = tcf_block_find(net, &q, &parent, &cl,
- t->tcm_ifindex, t->tcm_block_index, extack);
+ err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
+ if (err)
+ return err;
+
+ /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
+ * found), qdisc is not unlocked, classifier type is not specified,
+ * classifier is not unlocked.
+ */
+ if (!prio ||
+ (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
+ !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ rtnl_held = true;
+ rtnl_lock();
+ }
+
+ err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
+ if (err)
+ goto errout;
+
+ block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
+ extack);
if (IS_ERR(block)) {
err = PTR_ERR(block);
goto errout;
@@ -1725,56 +2276,69 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (prio == 0) {
tfilter_notify_chain(net, skb, block, q, parent, n,
- chain, RTM_DELTFILTER);
- tcf_chain_flush(chain);
+ chain, RTM_DELTFILTER, rtnl_held);
+ tcf_chain_flush(chain, rtnl_held);
err = 0;
goto errout;
}
+ mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
prio, false);
if (!tp || IS_ERR(tp)) {
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
err = tp ? PTR_ERR(tp) : -ENOENT;
- goto errout;
+ goto errout_locked;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
err = -EINVAL;
+ goto errout_locked;
+ } else if (t->tcm_handle == 0) {
+ tcf_chain_tp_remove(chain, &chain_info, tp);
+ mutex_unlock(&chain->filter_chain_lock);
+
+ tcf_proto_put(tp, rtnl_held, NULL);
+ tfilter_notify(net, skb, n, tp, block, q, parent, fh,
+ RTM_DELTFILTER, false, rtnl_held);
+ err = 0;
goto errout;
}
+ mutex_unlock(&chain->filter_chain_lock);
fh = tp->ops->get(tp, t->tcm_handle);
if (!fh) {
- if (t->tcm_handle == 0) {
- tcf_chain_tp_remove(chain, &chain_info, tp);
- tfilter_notify(net, skb, n, tp, block, q, parent, fh,
- RTM_DELTFILTER, false);
- tcf_proto_destroy(tp, extack);
- err = 0;
- } else {
- NL_SET_ERR_MSG(extack, "Specified filter handle not found");
- err = -ENOENT;
- }
+ NL_SET_ERR_MSG(extack, "Specified filter handle not found");
+ err = -ENOENT;
} else {
bool last;
err = tfilter_del_notify(net, skb, n, tp, block,
q, parent, fh, false, &last,
- extack);
+ rtnl_held, extack);
+
if (err)
goto errout;
- if (last) {
- tcf_chain_tp_remove(chain, &chain_info, tp);
- tcf_proto_destroy(tp, extack);
- }
+ if (last)
+ tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
}
errout:
- if (chain)
+ if (chain) {
+ if (tp && !IS_ERR(tp))
+ tcf_proto_put(tp, rtnl_held, NULL);
tcf_chain_put(chain);
- tcf_block_release(q, block);
+ }
+ tcf_block_release(q, block, rtnl_held);
+
+ if (rtnl_held)
+ rtnl_unlock();
+
return err;
+
+errout_locked:
+ mutex_unlock(&chain->filter_chain_lock);
+ goto errout;
}
static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1790,11 +2354,12 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
struct Qdisc *q = NULL;
struct tcf_chain_info chain_info;
struct tcf_chain *chain = NULL;
- struct tcf_block *block;
+ struct tcf_block *block = NULL;
struct tcf_proto *tp = NULL;
unsigned long cl = 0;
void *fh = NULL;
int err;
+ bool rtnl_held = false;
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
@@ -1812,8 +2377,26 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
/* Find head of filter chain. */
- block = tcf_block_find(net, &q, &parent, &cl,
- t->tcm_ifindex, t->tcm_block_index, extack);
+ err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
+ if (err)
+ return err;
+
+ /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
+ * unlocked, classifier type is not specified, classifier is not
+ * unlocked.
+ */
+ if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
+ !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+ rtnl_held = true;
+ rtnl_lock();
+ }
+
+ err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
+ if (err)
+ goto errout;
+
+ block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
+ extack);
if (IS_ERR(block)) {
err = PTR_ERR(block);
goto errout;
@@ -1832,8 +2415,10 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
+ mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
prio, false);
+ mutex_unlock(&chain->filter_chain_lock);
if (!tp || IS_ERR(tp)) {
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
err = tp ? PTR_ERR(tp) : -ENOENT;
@@ -1851,15 +2436,23 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
err = -ENOENT;
} else {
err = tfilter_notify(net, skb, n, tp, block, q, parent,
- fh, RTM_NEWTFILTER, true);
+ fh, RTM_NEWTFILTER, true, rtnl_held);
if (err < 0)
NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
}
+ tfilter_put(tp, fh);
errout:
- if (chain)
+ if (chain) {
+ if (tp && !IS_ERR(tp))
+ tcf_proto_put(tp, rtnl_held, NULL);
tcf_chain_put(chain);
- tcf_block_release(q, block);
+ }
+ tcf_block_release(q, block, rtnl_held);
+
+ if (rtnl_held)
+ rtnl_unlock();
+
return err;
}
@@ -1880,7 +2473,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
n, NETLINK_CB(a->cb->skb).portid,
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWTFILTER);
+ RTM_NEWTFILTER, true);
}
static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
@@ -1890,11 +2483,15 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
struct net *net = sock_net(skb->sk);
struct tcf_block *block = chain->block;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
+ struct tcf_proto *tp, *tp_prev;
struct tcf_dump_args arg;
- struct tcf_proto *tp;
- for (tp = rtnl_dereference(chain->filter_chain);
- tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
+ for (tp = __tcf_get_next_proto(chain, NULL);
+ tp;
+ tp_prev = tp,
+ tp = __tcf_get_next_proto(chain, tp),
+ tcf_proto_put(tp_prev, true, NULL),
+ (*p_index)++) {
if (*p_index < index_start)
continue;
if (TC_H_MAJ(tcm->tcm_info) &&
@@ -1910,9 +2507,8 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWTFILTER) <= 0)
- return false;
-
+ RTM_NEWTFILTER, true) <= 0)
+ goto errout;
cb->args[1] = 1;
}
if (!tp->ops->walk)
@@ -1927,23 +2523,27 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
arg.w.skip = cb->args[1] - 1;
arg.w.count = 0;
arg.w.cookie = cb->args[2];
- tp->ops->walk(tp, &arg.w);
+ tp->ops->walk(tp, &arg.w, true);
cb->args[2] = arg.w.cookie;
cb->args[1] = arg.w.count + 1;
if (arg.w.stop)
- return false;
+ goto errout;
}
return true;
+
+errout:
+ tcf_proto_put(tp, true, NULL);
+ return false;
}
/* called with RTNL */
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct tcf_chain *chain, *chain_prev;
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
struct Qdisc *q = NULL;
struct tcf_block *block;
- struct tcf_chain *chain;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
long index_start;
long index;
@@ -2007,19 +2607,24 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
index_start = cb->args[0];
index = 0;
- list_for_each_entry(chain, &block->chain_list, list) {
+ for (chain = __tcf_get_next_chain(block, NULL);
+ chain;
+ chain_prev = chain,
+ chain = __tcf_get_next_chain(block, chain),
+ tcf_chain_put(chain_prev)) {
if (tca[TCA_CHAIN] &&
nla_get_u32(tca[TCA_CHAIN]) != chain->index)
continue;
if (!tcf_chain_dump(chain, q, parent, skb, cb,
index_start, &index)) {
+ tcf_chain_put(chain);
err = -EMSGSIZE;
break;
}
}
if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
- tcf_block_refcnt_put(block);
+ tcf_block_refcnt_put(block, true);
cb->args[0] = index;
out:
@@ -2029,8 +2634,10 @@ out:
return skb->len;
}
-static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
- struct sk_buff *skb, struct tcf_block *block,
+static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
+ void *tmplt_priv, u32 chain_index,
+ struct net *net, struct sk_buff *skb,
+ struct tcf_block *block,
u32 portid, u32 seq, u16 flags, int event)
{
unsigned char *b = skb_tail_pointer(skb);
@@ -2039,8 +2646,8 @@ static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
struct tcmsg *tcm;
void *priv;
- ops = chain->tmplt_ops;
- priv = chain->tmplt_priv;
+ ops = tmplt_ops;
+ priv = tmplt_priv;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
@@ -2058,7 +2665,7 @@ static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
tcm->tcm_block_index = block->index;
}
- if (nla_put_u32(skb, TCA_CHAIN, chain->index))
+ if (nla_put_u32(skb, TCA_CHAIN, chain_index))
goto nla_put_failure;
if (ops) {
@@ -2089,7 +2696,8 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
if (!skb)
return -ENOBUFS;
- if (tc_chain_fill_node(chain, net, skb, block, portid,
+ if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
+ chain->index, net, skb, block, portid,
seq, flags, event) <= 0) {
kfree_skb(skb);
return -EINVAL;
@@ -2101,6 +2709,31 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
}
+static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
+ void *tmplt_priv, u32 chain_index,
+ struct tcf_block *block, struct sk_buff *oskb,
+ u32 seq, u16 flags, bool unicast)
+{
+ u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+ struct net *net = block->net;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
+ block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (unicast)
+ return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
+
+ return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
+}
+
static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
struct nlattr **tca,
struct netlink_ext_ack *extack)
@@ -2112,7 +2745,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
if (!tca[TCA_KIND])
return 0;
- ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack);
+ ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
if (IS_ERR(ops))
return PTR_ERR(ops);
if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
@@ -2130,16 +2763,15 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
return 0;
}
-static void tc_chain_tmplt_del(struct tcf_chain *chain)
+static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
+ void *tmplt_priv)
{
- const struct tcf_proto_ops *ops = chain->tmplt_ops;
-
/* If template ops are set, no work to do for us. */
- if (!ops)
+ if (!tmplt_ops)
return;
- ops->tmplt_destroy(chain->tmplt_priv);
- module_put(ops->owner);
+ tmplt_ops->tmplt_destroy(tmplt_priv);
+ module_put(tmplt_ops->owner);
}
/* Add/delete/get a chain */
@@ -2182,6 +2814,8 @@ replay:
err = -EINVAL;
goto errout_block;
}
+
+ mutex_lock(&block->lock);
chain = tcf_chain_lookup(block, chain_index);
if (n->nlmsg_type == RTM_NEWCHAIN) {
if (chain) {
@@ -2193,54 +2827,61 @@ replay:
} else {
NL_SET_ERR_MSG(extack, "Filter chain already exists");
err = -EEXIST;
- goto errout_block;
+ goto errout_block_locked;
}
} else {
if (!(n->nlmsg_flags & NLM_F_CREATE)) {
NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
err = -ENOENT;
- goto errout_block;
+ goto errout_block_locked;
}
chain = tcf_chain_create(block, chain_index);
if (!chain) {
NL_SET_ERR_MSG(extack, "Failed to create filter chain");
err = -ENOMEM;
- goto errout_block;
+ goto errout_block_locked;
}
}
} else {
if (!chain || tcf_chain_held_by_acts_only(chain)) {
NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
err = -EINVAL;
- goto errout_block;
+ goto errout_block_locked;
}
tcf_chain_hold(chain);
}
+ if (n->nlmsg_type == RTM_NEWCHAIN) {
+ /* Modifying chain requires holding parent block lock. In case
+ * the chain was successfully added, take a reference to the
+ * chain. This ensures that an empty chain does not disappear at
+ * the end of this function.
+ */
+ tcf_chain_hold(chain);
+ chain->explicitly_created = true;
+ }
+ mutex_unlock(&block->lock);
+
switch (n->nlmsg_type) {
case RTM_NEWCHAIN:
err = tc_chain_tmplt_add(chain, net, tca, extack);
- if (err)
+ if (err) {
+ tcf_chain_put_explicitly_created(chain);
goto errout;
- /* In case the chain was successfully added, take a reference
- * to the chain. This ensures that an empty chain
- * does not disappear at the end of this function.
- */
- tcf_chain_hold(chain);
- chain->explicitly_created = true;
+ }
+
tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
RTM_NEWCHAIN, false);
break;
case RTM_DELCHAIN:
tfilter_notify_chain(net, skb, block, q, parent, n,
- chain, RTM_DELTFILTER);
+ chain, RTM_DELTFILTER, true);
/* Flush the chain first as the user requested chain removal. */
- tcf_chain_flush(chain);
+ tcf_chain_flush(chain, true);
/* In case the chain was successfully deleted, put a reference
* to the chain previously taken during addition.
*/
tcf_chain_put_explicitly_created(chain);
- chain->explicitly_created = false;
break;
case RTM_GETCHAIN:
err = tc_chain_notify(chain, skb, n->nlmsg_seq,
@@ -2257,11 +2898,15 @@ replay:
errout:
tcf_chain_put(chain);
errout_block:
- tcf_block_release(q, block);
+ tcf_block_release(q, block, true);
if (err == -EAGAIN)
/* Replay the request. */
goto replay;
return err;
+
+errout_block_locked:
+ mutex_unlock(&block->lock);
+ goto errout_block;
}
/* called with RTNL */
@@ -2271,8 +2916,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
struct nlattr *tca[TCA_MAX + 1];
struct Qdisc *q = NULL;
struct tcf_block *block;
- struct tcf_chain *chain;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
+ struct tcf_chain *chain;
long index_start;
long index;
u32 parent;
@@ -2335,6 +2980,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
index_start = cb->args[0];
index = 0;
+ mutex_lock(&block->lock);
list_for_each_entry(chain, &block->chain_list, list) {
if ((tca[TCA_CHAIN] &&
nla_get_u32(tca[TCA_CHAIN]) != chain->index))
@@ -2345,7 +2991,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
}
if (tcf_chain_held_by_acts_only(chain))
continue;
- err = tc_chain_fill_node(chain, net, skb, block,
+ err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
+ chain->index, net, skb, block,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWCHAIN);
@@ -2353,9 +3000,10 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
break;
index++;
}
+ mutex_unlock(&block->lock);
if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
- tcf_block_refcnt_put(block);
+ tcf_block_refcnt_put(block, true);
cb->args[0] = index;
out:
@@ -2377,7 +3025,7 @@ EXPORT_SYMBOL(tcf_exts_destroy);
int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
#ifdef CONFIG_NET_CLS_ACT
{
@@ -2387,7 +3035,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
if (exts->police && tb[exts->police]) {
act = tcf_action_init_1(net, tp, tb[exts->police],
rate_tlv, "police", ovr,
- TCA_ACT_BIND, true, extack);
+ TCA_ACT_BIND, rtnl_held,
+ extack);
if (IS_ERR(act))
return PTR_ERR(act);
@@ -2399,13 +3048,12 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
err = tcf_action_init(net, tp, tb[exts->action],
rate_tlv, NULL, ovr, TCA_ACT_BIND,
- exts->actions, &attr_size, true,
- extack);
+ exts->actions, &attr_size,
+ rtnl_held, extack);
if (err < 0)
return err;
exts->nr_actions = err;
}
- exts->net = net;
}
#else
if ((exts->action && tb[exts->action]) ||
@@ -2516,6 +3164,114 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
}
EXPORT_SYMBOL(tc_setup_cb_call);
+int tc_setup_flow_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts)
+{
+ const struct tc_action *act;
+ int i, j, k;
+
+ if (!exts)
+ return 0;
+
+ j = 0;
+ tcf_exts_for_each_action(i, act, exts) {
+ struct flow_action_entry *entry;
+
+ entry = &flow_action->entries[j];
+ if (is_tcf_gact_ok(act)) {
+ entry->id = FLOW_ACTION_ACCEPT;
+ } else if (is_tcf_gact_shot(act)) {
+ entry->id = FLOW_ACTION_DROP;
+ } else if (is_tcf_gact_trap(act)) {
+ entry->id = FLOW_ACTION_TRAP;
+ } else if (is_tcf_gact_goto_chain(act)) {
+ entry->id = FLOW_ACTION_GOTO;
+ entry->chain_index = tcf_gact_goto_chain_index(act);
+ } else if (is_tcf_mirred_egress_redirect(act)) {
+ entry->id = FLOW_ACTION_REDIRECT;
+ entry->dev = tcf_mirred_dev(act);
+ } else if (is_tcf_mirred_egress_mirror(act)) {
+ entry->id = FLOW_ACTION_MIRRED;
+ entry->dev = tcf_mirred_dev(act);
+ } else if (is_tcf_vlan(act)) {
+ switch (tcf_vlan_action(act)) {
+ case TCA_VLAN_ACT_PUSH:
+ entry->id = FLOW_ACTION_VLAN_PUSH;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ case TCA_VLAN_ACT_POP:
+ entry->id = FLOW_ACTION_VLAN_POP;
+ break;
+ case TCA_VLAN_ACT_MODIFY:
+ entry->id = FLOW_ACTION_VLAN_MANGLE;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ default:
+ goto err_out;
+ }
+ } else if (is_tcf_tunnel_set(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_ENCAP;
+ entry->tunnel = tcf_tunnel_info(act);
+ } else if (is_tcf_tunnel_release(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_DECAP;
+ entry->tunnel = tcf_tunnel_info(act);
+ } else if (is_tcf_pedit(act)) {
+ for (k = 0; k < tcf_pedit_nkeys(act); k++) {
+ switch (tcf_pedit_cmd(act, k)) {
+ case TCA_PEDIT_KEY_EX_CMD_SET:
+ entry->id = FLOW_ACTION_MANGLE;
+ break;
+ case TCA_PEDIT_KEY_EX_CMD_ADD:
+ entry->id = FLOW_ACTION_ADD;
+ break;
+ default:
+ goto err_out;
+ }
+ entry->mangle.htype = tcf_pedit_htype(act, k);
+ entry->mangle.mask = tcf_pedit_mask(act, k);
+ entry->mangle.val = tcf_pedit_val(act, k);
+ entry->mangle.offset = tcf_pedit_offset(act, k);
+ entry = &flow_action->entries[++j];
+ }
+ } else if (is_tcf_csum(act)) {
+ entry->id = FLOW_ACTION_CSUM;
+ entry->csum_flags = tcf_csum_update_flags(act);
+ } else if (is_tcf_skbedit_mark(act)) {
+ entry->id = FLOW_ACTION_MARK;
+ entry->mark = tcf_skbedit_mark(act);
+ } else {
+ goto err_out;
+ }
+
+ if (!is_tcf_pedit(act))
+ j++;
+ }
+ return 0;
+err_out:
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(tc_setup_flow_action);
+
+unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
+{
+ unsigned int num_acts = 0;
+ struct tc_action *act;
+ int i;
+
+ tcf_exts_for_each_action(i, act, exts) {
+ if (is_tcf_pedit(act))
+ num_acts += tcf_pedit_nkeys(act);
+ else
+ num_acts++;
+ }
+ return num_acts;
+}
+EXPORT_SYMBOL(tcf_exts_num_actions);
+
static __net_init int tcf_net_init(struct net *net)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
@@ -2556,10 +3312,12 @@ static int __init tc_filter_init(void)
if (err)
goto err_rhash_setup_block_ht;
- rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
+ rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
+ RTNL_FLAG_DOIT_UNLOCKED);
+ rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
+ RTNL_FLAG_DOIT_UNLOCKED);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
- tc_dump_tfilter, 0);
+ tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 6a5dce8baf19..687b0af67878 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -18,6 +18,7 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/idr.h>
+#include <linux/percpu.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
@@ -35,6 +36,7 @@ struct basic_filter {
struct tcf_result res;
struct tcf_proto *tp;
struct list_head link;
+ struct tc_basic_pcnt __percpu *pf;
struct rcu_work rwork;
};
@@ -46,8 +48,10 @@ static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct basic_filter *f;
list_for_each_entry_rcu(f, &head->flist, link) {
+ __this_cpu_inc(f->pf->rcnt);
if (!tcf_em_tree_match(skb, &f->ematches, NULL))
continue;
+ __this_cpu_inc(f->pf->rhit);
*res = f->res;
r = tcf_exts_exec(skb, &f->exts, res);
if (r < 0)
@@ -89,6 +93,7 @@ static void __basic_delete_filter(struct basic_filter *f)
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
tcf_exts_put_net(&f->exts);
+ free_percpu(f->pf);
kfree(f);
}
@@ -102,7 +107,8 @@ static void basic_delete_filter_work(struct work_struct *work)
rtnl_unlock();
}
-static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void basic_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f, *n;
@@ -121,7 +127,7 @@ static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
}
static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f = arg;
@@ -148,7 +154,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
if (err < 0)
return err;
@@ -168,7 +174,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
static int basic_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, void **arg, bool ovr,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
int err;
struct basic_head *head = rtnl_dereference(tp->root);
@@ -193,7 +199,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (!fnew)
return -ENOBUFS;
- err = tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE);
+ err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE);
if (err < 0)
goto errout;
@@ -208,6 +214,11 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto errout;
fnew->handle = handle;
+ fnew->pf = alloc_percpu(struct tc_basic_pcnt);
+ if (!fnew->pf) {
+ err = -ENOMEM;
+ goto errout;
+ }
err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
extack);
@@ -231,12 +242,14 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
return 0;
errout:
+ free_percpu(fnew->pf);
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
return err;
}
-static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f;
@@ -263,10 +276,12 @@ static void basic_bind_class(void *fh, u32 classid, unsigned long cl)
}
static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
+ struct tc_basic_pcnt gpf = {};
struct basic_filter *f = fh;
struct nlattr *nest;
+ int cpu;
if (f == NULL)
return skb->len;
@@ -281,6 +296,18 @@ static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
goto nla_put_failure;
+ for_each_possible_cpu(cpu) {
+ struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
+
+ gpf.rcnt += pf->rcnt;
+ gpf.rhit += pf->rhit;
+ }
+
+ if (nla_put_64bit(skb, TCA_BASIC_PCNT,
+ sizeof(struct tc_basic_pcnt),
+ &gpf, TCA_BASIC_PAD))
+ goto nla_put_failure;
+
if (tcf_exts_dump(skb, &f->exts) < 0 ||
tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
goto nla_put_failure;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index a95cb240a606..b4ac58039cb1 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -298,7 +298,7 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
}
static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
@@ -307,7 +307,7 @@ static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
return 0;
}
-static void cls_bpf_destroy(struct tcf_proto *tp,
+static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
@@ -417,7 +417,8 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
return -EINVAL;
- ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack);
+ ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
+ extack);
if (ret < 0)
return ret;
@@ -455,7 +456,8 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr, struct netlink_ext_ack *extack)
+ void **arg, bool ovr, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *oldprog = *arg;
@@ -475,7 +477,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (!prog)
return -ENOBUFS;
- ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
+ ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
if (ret < 0)
goto errout;
@@ -575,7 +577,7 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
}
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *tm)
+ struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
{
struct cls_bpf_prog *prog = fh;
struct nlattr *nest;
@@ -635,7 +637,8 @@ static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
prog->res.class = cl;
}
-static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 3bc01bdde165..4c1567854f95 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -78,7 +78,7 @@ static void cls_cgroup_destroy_work(struct work_struct *work)
static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr,
+ void **arg, bool ovr, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_CGROUP_MAX + 1];
@@ -99,7 +99,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (!new)
return -ENOBUFS;
- err = tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
+ err = tcf_exts_init(&new->exts, net, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
if (err < 0)
goto errout;
new->handle = handle;
@@ -110,7 +110,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
goto errout;
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr,
- extack);
+ true, extack);
if (err < 0)
goto errout;
@@ -130,7 +130,7 @@ errout:
return err;
}
-static void cls_cgroup_destroy(struct tcf_proto *tp,
+static void cls_cgroup_destroy(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
@@ -145,18 +145,21 @@ static void cls_cgroup_destroy(struct tcf_proto *tp,
}
static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
if (arg->count < arg->skip)
goto skip;
+ if (!head)
+ return;
if (arg->fn(tp, head, arg) < 0) {
arg->stop = 1;
return;
@@ -166,7 +169,7 @@ skip:
}
static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct cls_cgroup_head *head = rtnl_dereference(tp->root);
struct nlattr *nest;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 2bb043cd436b..eece1ee26930 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -391,7 +391,8 @@ static void flow_destroy_filter_work(struct work_struct *work)
static int flow_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr, struct netlink_ext_ack *extack)
+ void **arg, bool ovr, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *fold, *fnew;
@@ -440,12 +441,12 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto err1;
- err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+ err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
if (err < 0)
goto err2;
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
- extack);
+ true, extack);
if (err < 0)
goto err2;
@@ -566,7 +567,7 @@ err1:
}
static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f = arg;
@@ -590,7 +591,8 @@ static int flow_init(struct tcf_proto *tp)
return 0;
}
-static void flow_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f, *next;
@@ -617,7 +619,7 @@ static void *flow_get(struct tcf_proto *tp, u32 handle)
}
static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct flow_filter *f = fh;
struct nlattr *nest;
@@ -677,7 +679,8 @@ nla_put_failure:
return -1;
}
-static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dad04e710493..27300a3e76c7 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -381,16 +381,31 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
bool skip_sw = tc_skip_sw(f->flags);
int err;
+ cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
+ if (!cls_flower.rule)
+ return -ENOMEM;
+
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f;
- cls_flower.dissector = &f->mask->dissector;
- cls_flower.mask = &f->mask->key;
- cls_flower.key = &f->mkey;
- cls_flower.exts = &f->exts;
+ cls_flower.rule->match.dissector = &f->mask->dissector;
+ cls_flower.rule->match.mask = &f->mask->key;
+ cls_flower.rule->match.key = &f->mkey;
cls_flower.classid = f->res.classid;
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+ if (err) {
+ kfree(cls_flower.rule);
+ if (skip_sw) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
+ return err;
+ }
+ return 0;
+ }
+
err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
+ kfree(cls_flower.rule);
+
if (err < 0) {
fl_hw_destroy_filter(tp, f, NULL);
return err;
@@ -413,10 +428,13 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
- cls_flower.exts = &f->exts;
cls_flower.classid = f->res.classid;
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
+
+ tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
+ cls_flower.stats.pkts,
+ cls_flower.stats.lastused);
}
static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
@@ -451,7 +469,8 @@ static void fl_destroy_sleepable(struct work_struct *work)
module_put(THIS_MODULE);
}
-static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct fl_flow_mask *mask, *next_mask;
@@ -1258,7 +1277,8 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true,
+ extack);
if (err < 0)
return err;
@@ -1285,22 +1305,29 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
static int fl_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr, struct netlink_ext_ack *extack)
+ void **arg, bool ovr, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *fold = *arg;
struct cls_fl_filter *fnew;
+ struct fl_flow_mask *mask;
struct nlattr **tb;
- struct fl_flow_mask mask = {};
int err;
if (!tca[TCA_OPTIONS])
return -EINVAL;
- tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
- if (!tb)
+ mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
+ if (!mask)
return -ENOBUFS;
+ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+ if (!tb) {
+ err = -ENOBUFS;
+ goto errout_mask_alloc;
+ }
+
err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
fl_policy, NULL);
if (err < 0)
@@ -1317,7 +1344,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout_tb;
}
- err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
+ err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
if (err < 0)
goto errout;
@@ -1343,12 +1370,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
}
- err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
+ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
tp->chain->tmplt_priv, extack);
if (err)
goto errout_idr;
- err = fl_check_assign_mask(head, fnew, fold, &mask);
+ err = fl_check_assign_mask(head, fnew, fold, mask);
if (err)
goto errout_idr;
@@ -1365,7 +1392,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp, fnew, extack);
if (err)
- goto errout_mask;
+ goto errout_mask_ht;
}
if (!tc_in_hw(fnew->flags))
@@ -1392,8 +1419,13 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
kfree(tb);
+ kfree(mask);
return 0;
+errout_mask_ht:
+ rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
+ fnew->mask->filter_ht_params);
+
errout_mask:
fl_mask_put(head, fnew->mask, false);
@@ -1405,11 +1437,13 @@ errout:
kfree(fnew);
errout_tb:
kfree(tb);
+errout_mask_alloc:
+ kfree(mask);
return err;
}
static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f = arg;
@@ -1421,7 +1455,8 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
return 0;
}
-static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f;
@@ -1454,18 +1489,36 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
if (tc_skip_hw(f->flags))
continue;
+ cls_flower.rule =
+ flow_rule_alloc(tcf_exts_num_actions(&f->exts));
+ if (!cls_flower.rule)
+ return -ENOMEM;
+
tc_cls_common_offload_init(&cls_flower.common, tp,
f->flags, extack);
cls_flower.command = add ?
TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long)f;
- cls_flower.dissector = &mask->dissector;
- cls_flower.mask = &mask->key;
- cls_flower.key = &f->mkey;
- cls_flower.exts = &f->exts;
+ cls_flower.rule->match.dissector = &mask->dissector;
+ cls_flower.rule->match.mask = &mask->key;
+ cls_flower.rule->match.key = &f->mkey;
+
+ err = tc_setup_flow_action(&cls_flower.rule->action,
+ &f->exts);
+ if (err) {
+ kfree(cls_flower.rule);
+ if (tc_skip_sw(f->flags)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
+ return err;
+ }
+ continue;
+ }
+
cls_flower.classid = f->res.classid;
err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+ kfree(cls_flower.rule);
+
if (err) {
if (add && tc_skip_sw(f->flags))
return err;
@@ -1480,25 +1533,30 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
return 0;
}
-static void fl_hw_create_tmplt(struct tcf_chain *chain,
- struct fl_flow_tmplt *tmplt)
+static int fl_hw_create_tmplt(struct tcf_chain *chain,
+ struct fl_flow_tmplt *tmplt)
{
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = chain->block;
- struct tcf_exts dummy_exts = { 0, };
+
+ cls_flower.rule = flow_rule_alloc(0);
+ if (!cls_flower.rule)
+ return -ENOMEM;
cls_flower.common.chain_index = chain->index;
cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
cls_flower.cookie = (unsigned long) tmplt;
- cls_flower.dissector = &tmplt->dissector;
- cls_flower.mask = &tmplt->mask;
- cls_flower.key = &tmplt->dummy_key;
- cls_flower.exts = &dummy_exts;
+ cls_flower.rule->match.dissector = &tmplt->dissector;
+ cls_flower.rule->match.mask = &tmplt->mask;
+ cls_flower.rule->match.key = &tmplt->dummy_key;
/* We don't care if driver (any of them) fails to handle this
* call. It serves just as a hint for it.
*/
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
+ kfree(cls_flower.rule);
+
+ return 0;
}
static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
@@ -1542,12 +1600,14 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
if (err)
goto errout_tmplt;
- kfree(tb);
fl_init_dissector(&tmplt->dissector, &tmplt->mask);
- fl_hw_create_tmplt(chain, tmplt);
+ err = fl_hw_create_tmplt(chain, tmplt);
+ if (err)
+ goto errout_tmplt;
+ kfree(tb);
return tmplt;
errout_tmplt:
@@ -1995,7 +2055,7 @@ nla_put_failure:
}
static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct cls_fl_filter *f = fh;
struct nlattr *nest;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 29eeeaf3ea44..ad036b00427d 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -139,7 +139,8 @@ static void fw_delete_filter_work(struct work_struct *work)
rtnl_unlock();
}
-static void fw_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void fw_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f;
@@ -163,7 +164,7 @@ static void fw_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
}
static int fw_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = arg;
@@ -217,7 +218,7 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
int err;
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr,
- extack);
+ true, extack);
if (err < 0)
return err;
@@ -250,7 +251,8 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
static int fw_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca, void **arg,
- bool ovr, struct netlink_ext_ack *extack)
+ bool ovr, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = *arg;
@@ -283,7 +285,8 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
#endif /* CONFIG_NET_CLS_IND */
fnew->tp = f->tp;
- err = tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE);
+ err = tcf_exts_init(&fnew->exts, net, TCA_FW_ACT,
+ TCA_FW_POLICE);
if (err < 0) {
kfree(fnew);
return err;
@@ -332,7 +335,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
return -ENOBUFS;
- err = tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
+ err = tcf_exts_init(&f->exts, net, TCA_FW_ACT, TCA_FW_POLICE);
if (err < 0)
goto errout;
f->id = handle;
@@ -354,7 +357,8 @@ errout:
return err;
}
-static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct fw_head *head = rtnl_dereference(tp->root);
int h;
@@ -384,7 +388,7 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
static int fw_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = fh;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 0e408ee9dcec..459921bd3d87 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/percpu.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
@@ -22,6 +23,7 @@ struct cls_mall_head {
u32 handle;
u32 flags;
unsigned int in_hw_count;
+ struct tc_matchall_pcnt __percpu *pf;
struct rcu_work rwork;
};
@@ -34,6 +36,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
return -1;
*res = head->res;
+ __this_cpu_inc(head->pf->rhit);
return tcf_exts_exec(skb, &head->exts, res);
}
@@ -46,6 +49,7 @@ static void __mall_destroy(struct cls_mall_head *head)
{
tcf_exts_destroy(&head->exts);
tcf_exts_put_net(&head->exts);
+ free_percpu(head->pf);
kfree(head);
}
@@ -105,7 +109,8 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
return 0;
}
-static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
@@ -141,7 +146,8 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, extack);
+ err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
+ extack);
if (err < 0)
return err;
@@ -155,7 +161,8 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
static int mall_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
- void **arg, bool ovr, struct netlink_ext_ack *extack)
+ void **arg, bool ovr, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
@@ -184,7 +191,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (!new)
return -ENOBUFS;
- err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
+ err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
if (err)
goto err_exts_init;
@@ -192,6 +199,11 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
handle = 1;
new->handle = handle;
new->flags = flags;
+ new->pf = alloc_percpu(struct tc_matchall_pcnt);
+ if (!new->pf) {
+ err = -ENOMEM;
+ goto err_alloc_percpu;
+ }
err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
extack);
@@ -214,6 +226,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
err_replace_hw_filter:
err_set_parms:
+ free_percpu(new->pf);
+err_alloc_percpu:
tcf_exts_destroy(&new->exts);
err_exts_init:
kfree(new);
@@ -221,17 +235,21 @@ err_exts_init:
}
static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
if (arg->count < arg->skip)
goto skip;
+
+ if (!head)
+ return;
if (arg->fn(tp, head, arg) < 0)
arg->stop = 1;
skip:
@@ -268,10 +286,12 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
}
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
+ struct tc_matchall_pcnt gpf = {};
struct cls_mall_head *head = fh;
struct nlattr *nest;
+ int cpu;
if (!head)
return skb->len;
@@ -289,6 +309,17 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
goto nla_put_failure;
+ for_each_possible_cpu(cpu) {
+ struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
+
+ gpf.rhit += pf->rhit;
+ }
+
+ if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
+ sizeof(struct tc_matchall_pcnt),
+ &gpf, TCA_MATCHALL_PAD))
+ goto nla_put_failure;
+
if (tcf_exts_dump(skb, &head->exts))
goto nla_put_failure;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 0404aa5fa7cb..f006af23b64a 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -276,7 +276,8 @@ static void route4_queue_work(struct route4_filter *f)
tcf_queue_work(&f->rwork, route4_delete_filter_work);
}
-static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
int h1, h2;
@@ -312,7 +313,7 @@ static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
}
static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter *f = arg;
@@ -393,7 +394,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
struct route4_bucket *b;
int err;
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
+ err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
if (err < 0)
return err;
@@ -468,7 +469,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
static int route4_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, void **arg, bool ovr,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter __rcu **fp;
@@ -496,7 +497,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
if (!f)
goto errout;
- err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
+ err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
if (err < 0)
goto errout;
@@ -560,15 +561,13 @@ errout:
return err;
}
-static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct route4_head *head = rtnl_dereference(tp->root);
unsigned int h, h1;
- if (head == NULL)
- arg->stop = 1;
-
- if (arg->stop)
+ if (head == NULL || arg->stop)
return;
for (h = 0; h <= 256; h++) {
@@ -597,7 +596,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct route4_filter *f = fh;
struct nlattr *nest;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index e9ccf7daea7d..0719a21d9c41 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -312,7 +312,8 @@ static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
__rsvp_delete_filter(f);
}
-static void rsvp_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct rsvp_head *data = rtnl_dereference(tp->root);
int h1, h2;
@@ -341,7 +342,7 @@ static void rsvp_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
}
static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct rsvp_head *head = rtnl_dereference(tp->root);
struct rsvp_filter *nfp, *f = arg;
@@ -477,7 +478,8 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
- void **arg, bool ovr, struct netlink_ext_ack *extack)
+ void **arg, bool ovr, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct rsvp_head *data = rtnl_dereference(tp->root);
struct rsvp_filter *f, *nfp;
@@ -499,10 +501,11 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
- err = tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
if (err < 0)
return err;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, extack);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
+ extack);
if (err < 0)
goto errout2;
@@ -520,7 +523,8 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
goto errout2;
}
- err = tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
+ TCA_RSVP_POLICE);
if (err < 0) {
kfree(n);
goto errout2;
@@ -548,7 +552,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
if (f == NULL)
goto errout2;
- err = tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+ err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
if (err < 0)
goto errout;
h2 = 16;
@@ -654,7 +658,8 @@ errout2:
return err;
}
-static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct rsvp_head *head = rtnl_dereference(tp->root);
unsigned int h, h1;
@@ -688,7 +693,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct rsvp_filter *f = fh;
struct rsvp_session *s;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 9ccc93f257db..24e0a62a65cc 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -48,7 +48,7 @@ struct tcindex_data {
u32 hash; /* hash table size; 0 if undefined */
u32 alloc_hash; /* allocated size */
u32 fall_through; /* 0: only classify if explicit match */
- struct rcu_head rcu;
+ struct rcu_work rwork;
};
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
@@ -173,7 +173,7 @@ static void tcindex_destroy_fexts_work(struct work_struct *work)
}
static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = arg;
@@ -221,17 +221,11 @@ found:
return 0;
}
-static int tcindex_destroy_element(struct tcf_proto *tp,
- void *arg, struct tcf_walker *walker)
-{
- bool last;
-
- return tcindex_delete(tp, arg, &last, NULL);
-}
-
-static void __tcindex_destroy(struct rcu_head *head)
+static void tcindex_destroy_work(struct work_struct *work)
{
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+ struct tcindex_data *p = container_of(to_rcu_work(work),
+ struct tcindex_data,
+ rwork);
kfree(p->perfect);
kfree(p->h);
@@ -252,15 +246,19 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
[TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
};
-static int tcindex_filter_result_init(struct tcindex_filter_result *r)
+static int tcindex_filter_result_init(struct tcindex_filter_result *r,
+ struct net *net)
{
memset(r, 0, sizeof(*r));
- return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
+ TCA_TCINDEX_POLICE);
}
-static void __tcindex_partial_destroy(struct rcu_head *head)
+static void tcindex_partial_destroy_work(struct work_struct *work)
{
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+ struct tcindex_data *p = container_of(to_rcu_work(work),
+ struct tcindex_data,
+ rwork);
kfree(p->perfect);
kfree(p);
@@ -275,7 +273,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
kfree(cp->perfect);
}
-static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
+static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
{
int i, err = 0;
@@ -285,7 +283,7 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
return -ENOMEM;
for (i = 0; i < cp->hash; i++) {
- err = tcf_exts_init(&cp->perfect[i].exts,
+ err = tcf_exts_init(&cp->perfect[i].exts, net,
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
if (err < 0)
goto errout;
@@ -305,16 +303,16 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
{
struct tcindex_filter_result new_filter_result, *old_r = r;
- struct tcindex_filter_result cr;
struct tcindex_data *cp = NULL, *oldp;
struct tcindex_filter *f = NULL; /* make gcc behave */
+ struct tcf_result cr = {};
int err, balloc = 0;
struct tcf_exts e;
- err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
if (err < 0)
return err;
- err = tcf_exts_validate(net, tp, tb, est, &e, ovr, extack);
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
if (err < 0)
goto errout;
@@ -337,7 +335,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (p->perfect) {
int i;
- if (tcindex_alloc_perfect_hash(cp) < 0)
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
goto errout;
for (i = 0; i < cp->hash; i++)
cp->perfect[i].res = p->perfect[i].res;
@@ -345,14 +343,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
}
cp->h = p->h;
- err = tcindex_filter_result_init(&new_filter_result);
- if (err < 0)
- goto errout1;
- err = tcindex_filter_result_init(&cr);
+ err = tcindex_filter_result_init(&new_filter_result, net);
if (err < 0)
goto errout1;
if (old_r)
- cr.res = r->res;
+ cr = r->res;
if (tb[TCA_TCINDEX_HASH])
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -406,7 +401,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = -ENOMEM;
if (!cp->perfect && !cp->h) {
if (valid_perfect_hash(cp)) {
- if (tcindex_alloc_perfect_hash(cp) < 0)
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
goto errout_alloc;
balloc = 1;
} else {
@@ -435,7 +430,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
goto errout_alloc;
f->key = handle;
f->next = NULL;
- err = tcindex_filter_result_init(&f->result);
+ err = tcindex_filter_result_init(&f->result, net);
if (err < 0) {
kfree(f);
goto errout_alloc;
@@ -443,12 +438,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
}
if (tb[TCA_TCINDEX_CLASSID]) {
- cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
- tcf_bind_filter(tp, &cr.res, base);
+ cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
+ tcf_bind_filter(tp, &cr, base);
}
if (old_r && old_r != r) {
- err = tcindex_filter_result_init(old_r);
+ err = tcindex_filter_result_init(old_r, net);
if (err < 0) {
kfree(f);
goto errout_alloc;
@@ -456,7 +451,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
}
oldp = p;
- r->res = cr.res;
+ r->res = cr;
tcf_exts_change(&r->exts, &e);
rcu_assign_pointer(tp->root, cp);
@@ -475,10 +470,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
; /* nothing */
rcu_assign_pointer(*fp, f);
+ } else {
+ tcf_exts_destroy(&new_filter_result.exts);
}
if (oldp)
- call_rcu(&oldp->rcu, __tcindex_partial_destroy);
+ tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
return 0;
errout_alloc:
@@ -487,7 +484,6 @@ errout_alloc:
else if (balloc == 2)
kfree(cp->h);
errout1:
- tcf_exts_destroy(&cr.exts);
tcf_exts_destroy(&new_filter_result.exts);
errout:
kfree(cp);
@@ -499,7 +495,7 @@ static int
tcindex_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, void **arg, bool ovr,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_TCINDEX_MAX + 1];
@@ -522,7 +518,8 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
tca[TCA_RATE], ovr, extack);
}
-static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
+static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
+ bool rtnl_held)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter *f, *next;
@@ -558,24 +555,43 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
}
}
-static void tcindex_destroy(struct tcf_proto *tp,
+static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcf_walker walker;
+ int i;
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
- walker.count = 0;
- walker.skip = 0;
- walker.fn = tcindex_destroy_element;
- tcindex_walk(tp, &walker);
- call_rcu(&p->rcu, __tcindex_destroy);
+ if (p->perfect) {
+ for (i = 0; i < p->hash; i++) {
+ struct tcindex_filter_result *r = p->perfect + i;
+
+ tcf_unbind_filter(tp, &r->res);
+ if (tcf_exts_get_net(&r->exts))
+ tcf_queue_work(&r->rwork,
+ tcindex_destroy_rexts_work);
+ else
+ __tcindex_destroy_rexts(r);
+ }
+ }
+
+ for (i = 0; p->h && i < p->hash; i++) {
+ struct tcindex_filter *f, *next;
+ bool last;
+
+ for (f = rtnl_dereference(p->h[i]); f; f = next) {
+ next = rtnl_dereference(f->next);
+ tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
+ }
+ }
+
+ tcf_queue_work(&p->rwork, tcindex_destroy_work);
}
static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = fh;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index dcea21004604..48e76a3acf8a 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -629,7 +629,8 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
return -ENOENT;
}
-static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
@@ -663,7 +664,7 @@ static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
}
static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
- struct netlink_ext_ack *extack)
+ bool rtnl_held, struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = arg;
struct tc_u_common *tp_c = tp->data;
@@ -726,7 +727,7 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack);
+ err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
if (err < 0)
return err;
@@ -803,7 +804,7 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
rcu_assign_pointer(*ins, n);
}
-static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
+static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
struct tc_u_knode *n)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
@@ -848,7 +849,7 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
#endif
memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
- if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
+ if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
kfree(new);
return NULL;
}
@@ -858,7 +859,7 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca, void **arg, bool ovr,
+ struct nlattr **tca, void **arg, bool ovr, bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
@@ -910,7 +911,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
- new = u32_init_knode(tp, n);
+ new = u32_init_knode(net, tp, n);
if (!new)
return -ENOMEM;
@@ -1060,7 +1061,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
n->flags = flags;
- err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
+ err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
if (err < 0)
goto errout;
@@ -1123,7 +1124,8 @@ erridr:
return err;
}
-static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+ bool rtnl_held)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
@@ -1281,7 +1283,7 @@ static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
}
static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
struct tc_u_knode *n = fh;
struct tc_u_hnode *ht_up, *ht_down;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7e4d1ccf4c87..352b46f98440 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -526,11 +526,6 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
return stab;
}
-static void stab_kfree_rcu(struct rcu_head *head)
-{
- kfree(container_of(head, struct qdisc_size_table, rcu));
-}
-
void qdisc_put_stab(struct qdisc_size_table *tab)
{
if (!tab)
@@ -538,7 +533,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
if (--tab->refcnt == 0) {
list_del(&tab->list);
- call_rcu(&tab->rcu, stab_kfree_rcu);
+ kfree_rcu(tab, rcu);
}
}
EXPORT_SYMBOL(qdisc_put_stab);
@@ -758,8 +753,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
return 0;
}
-void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
- unsigned int len)
+void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
{
bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
const struct Qdisc_class_ops *cops;
@@ -1202,9 +1196,11 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
} else {
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
- err = -ENOMEM;
- if (handle == 0)
+ if (handle == 0) {
+ NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
+ err = -ENOSPC;
goto err_out3;
+ }
}
if (!netif_is_multiqueue(dev))
sch->flags |= TCQ_F_ONETXQUEUE;
@@ -1910,17 +1906,19 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
block = cops->tcf_block(q, cl, NULL);
if (!block)
return;
- list_for_each_entry(chain, &block->chain_list, list) {
+ for (chain = tcf_get_next_chain(block, NULL);
+ chain;
+ chain = tcf_get_next_chain(block, chain)) {
struct tcf_proto *tp;
- for (tp = rtnl_dereference(chain->filter_chain);
- tp; tp = rtnl_dereference(tp->next)) {
+ for (tp = tcf_get_next_proto(chain, NULL, true);
+ tp; tp = tcf_get_next_proto(chain, tp, true)) {
struct tcf_bind_args arg = {};
arg.w.fn = tcf_node_bind;
arg.classid = clid;
arg.cl = new_cl;
- tp->ops->walk(tp, &arg.w);
+ tp->ops->walk(tp, &arg.w, true);
}
}
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index b910cd5c56f7..1d2a12132abc 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -138,8 +138,8 @@ struct cake_flow {
struct cake_host {
u32 srchost_tag;
u32 dsthost_tag;
- u16 srchost_refcnt;
- u16 dsthost_refcnt;
+ u16 srchost_bulk_flow_count;
+ u16 dsthost_bulk_flow_count;
};
struct cake_heap_entry {
@@ -258,7 +258,8 @@ enum {
CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
CAKE_FLAG_INGRESS = BIT(2),
CAKE_FLAG_WASH = BIT(3),
- CAKE_FLAG_SPLIT_GSO = BIT(4)
+ CAKE_FLAG_SPLIT_GSO = BIT(4),
+ CAKE_FLAG_FWMARK = BIT(5)
};
/* COBALT operates the Codel and BLUE algorithms in parallel, in order to
@@ -746,8 +747,10 @@ skip_hash:
* queue, accept the collision, update the host tags.
*/
q->way_collisions++;
- q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
- q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
+ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+ q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+ q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+ }
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
found:
@@ -767,13 +770,14 @@ found:
}
for (i = 0; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
- if (!q->hosts[outer_hash + k].srchost_refcnt)
+ if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
break;
}
q->hosts[outer_hash + k].srchost_tag = srchost_hash;
found_src:
srchost_idx = outer_hash + k;
- q->hosts[srchost_idx].srchost_refcnt++;
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ q->hosts[srchost_idx].srchost_bulk_flow_count++;
q->flows[reduced_hash].srchost = srchost_idx;
}
@@ -789,13 +793,14 @@ found_src:
}
for (i = 0; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
- if (!q->hosts[outer_hash + k].dsthost_refcnt)
+ if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
break;
}
q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
found_dst:
dsthost_idx = outer_hash + k;
- q->hosts[dsthost_idx].dsthost_refcnt++;
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
q->flows[reduced_hash].dsthost = dsthost_idx;
}
}
@@ -1508,20 +1513,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
return idx + (tin << 16);
}
-static void cake_wash_diffserv(struct sk_buff *skb)
-{
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
- break;
- case htons(ETH_P_IPV6):
- ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
- break;
- default:
- break;
- }
-}
-
static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
{
u8 dscp;
@@ -1553,25 +1544,32 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 tin;
+ u8 dscp;
+
+ /* Tin selection: Default to diffserv-based selection, allow overriding
+ * using firewall marks or skb->priority.
+ */
+ dscp = cake_handle_diffserv(skb,
+ q->rate_flags & CAKE_FLAG_WASH);
+
+ if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
+ tin = 0;
- if (TC_H_MAJ(skb->priority) == sch->handle &&
- TC_H_MIN(skb->priority) > 0 &&
- TC_H_MIN(skb->priority) <= q->tin_cnt) {
+ else if (q->rate_flags & CAKE_FLAG_FWMARK && /* use fw mark */
+ skb->mark &&
+ skb->mark <= q->tin_cnt)
+ tin = q->tin_order[skb->mark - 1];
+
+ else if (TC_H_MAJ(skb->priority) == sch->handle &&
+ TC_H_MIN(skb->priority) > 0 &&
+ TC_H_MIN(skb->priority) <= q->tin_cnt)
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
- if (q->rate_flags & CAKE_FLAG_WASH)
- cake_wash_diffserv(skb);
- } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
- /* extract the Diffserv Precedence field, if it exists */
- /* and clear DSCP bits if washing */
- tin = q->tin_index[cake_handle_diffserv(skb,
- q->rate_flags & CAKE_FLAG_WASH)];
+ else {
+ tin = q->tin_index[dscp];
+
if (unlikely(tin >= q->tin_cnt))
tin = 0;
- } else {
- tin = 0;
- if (q->rate_flags & CAKE_FLAG_WASH)
- cake_wash_diffserv(skb);
}
return &q->tins[tin];
@@ -1667,7 +1665,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
- unsigned int slen = 0;
+ unsigned int slen = 0, numsegs = 0;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
@@ -1683,6 +1681,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
flow_queue_add(flow, segs);
sch->q.qlen++;
+ numsegs++;
slen += segs->len;
q->buffer_used += segs->truesize;
b->packets++;
@@ -1696,7 +1695,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch->qstats.backlog += slen;
q->avg_window_bytes += slen;
- qdisc_tree_reduce_backlog(sch, 1, len);
+ qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
consume_skb(skb);
} else {
/* not splitting */
@@ -1793,20 +1792,30 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
b->sparse_flow_count++;
if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_refcnt);
+ host_load = max(host_load, srchost->srchost_bulk_flow_count);
if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_refcnt);
+ host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
flow->deficit = (b->flow_quantum *
quantum_div[host_load]) >> 16;
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
+ struct cake_host *srchost = &b->hosts[flow->srchost];
+ struct cake_host *dsthost = &b->hosts[flow->dsthost];
+
/* this flow was empty, accounted as a sparse flow, but actually
* in the bulk rotation.
*/
flow->set = CAKE_SET_BULK;
b->sparse_flow_count--;
b->bulk_flow_count++;
+
+ if (cake_dsrc(q->flow_mode))
+ srchost->srchost_bulk_flow_count++;
+
+ if (cake_ddst(q->flow_mode))
+ dsthost->dsthost_bulk_flow_count++;
+
}
if (q->buffer_used > q->buffer_max_used)
@@ -1974,23 +1983,8 @@ retry:
dsthost = &b->hosts[flow->dsthost];
host_load = 1;
- if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_refcnt);
-
- if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_refcnt);
-
- WARN_ON(host_load > CAKE_QUEUES);
-
/* flow isolation (DRR++) */
if (flow->deficit <= 0) {
- /* The shifted prandom_u32() is a way to apply dithering to
- * avoid accumulating roundoff errors
- */
- flow->deficit += (b->flow_quantum * quantum_div[host_load] +
- (prandom_u32() >> 16)) >> 16;
- list_move_tail(&flow->flowchain, &b->old_flows);
-
/* Keep all flows with deficits out of the sparse and decaying
* rotations. No non-empty flow can go into the decaying
* rotation, so they can't get deficits
@@ -1999,6 +1993,13 @@ retry:
if (flow->head) {
b->sparse_flow_count--;
b->bulk_flow_count++;
+
+ if (cake_dsrc(q->flow_mode))
+ srchost->srchost_bulk_flow_count++;
+
+ if (cake_ddst(q->flow_mode))
+ dsthost->dsthost_bulk_flow_count++;
+
flow->set = CAKE_SET_BULK;
} else {
/* we've moved it to the bulk rotation for
@@ -2008,6 +2009,22 @@ retry:
flow->set = CAKE_SET_SPARSE_WAIT;
}
}
+
+ if (cake_dsrc(q->flow_mode))
+ host_load = max(host_load, srchost->srchost_bulk_flow_count);
+
+ if (cake_ddst(q->flow_mode))
+ host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+
+ WARN_ON(host_load > CAKE_QUEUES);
+
+ /* The shifted prandom_u32() is a way to apply dithering to
+ * avoid accumulating roundoff errors
+ */
+ flow->deficit += (b->flow_quantum * quantum_div[host_load] +
+ (prandom_u32() >> 16)) >> 16;
+ list_move_tail(&flow->flowchain, &b->old_flows);
+
goto retry;
}
@@ -2028,6 +2045,13 @@ retry:
&b->decaying_flows);
if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
+
+ if (cake_dsrc(q->flow_mode))
+ srchost->srchost_bulk_flow_count--;
+
+ if (cake_ddst(q->flow_mode))
+ dsthost->dsthost_bulk_flow_count--;
+
b->decaying_flow_count++;
} else if (flow->set == CAKE_SET_SPARSE ||
flow->set == CAKE_SET_SPARSE_WAIT) {
@@ -2041,14 +2065,19 @@ retry:
if (flow->set == CAKE_SET_SPARSE ||
flow->set == CAKE_SET_SPARSE_WAIT)
b->sparse_flow_count--;
- else if (flow->set == CAKE_SET_BULK)
+ else if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- else
+
+ if (cake_dsrc(q->flow_mode))
+ srchost->srchost_bulk_flow_count--;
+
+ if (cake_ddst(q->flow_mode))
+ dsthost->dsthost_bulk_flow_count--;
+
+ } else
b->decaying_flow_count--;
flow->set = CAKE_SET_NONE;
- srchost->srchost_refcnt--;
- dsthost->dsthost_refcnt--;
}
goto begin;
}
@@ -2589,6 +2618,13 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
}
+ if (tb[TCA_CAKE_FWMARK]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_FWMARK]))
+ q->rate_flags |= CAKE_FLAG_FWMARK;
+ else
+ q->rate_flags &= ~CAKE_FLAG_FWMARK;
+ }
+
if (q->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
@@ -2748,6 +2784,10 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
!!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CAKE_FWMARK,
+ !!(q->rate_flags & CAKE_FLAG_FWMARK)))
+ goto nla_put_failure;
+
return nla_nest_end(skb, opts);
nla_put_failure:
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index e689e11b6d0f..c6a502933fe7 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
int err;
err = child->ops->enqueue(skb, child, to_free);
if (err != NET_XMIT_SUCCESS)
return err;
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index cdebaed0f8cf..09b800991065 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
int err = 0;
+ bool first;
cl = drr_classify(skb, sch, &err);
if (cl == NULL) {
@@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (cl->qdisc->q.qlen == 1) {
+ if (first) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return err;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index f6f480784bc6..42471464ded3 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct dsmark_qdisc_data *p = qdisc_priv(sch);
int err;
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 66ba2ce2320f..a117d9260558 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
skb = __skb_dequeue(&q->skb_bad_txq);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
- qdisc_qstats_cpu_qlen_dec(q);
+ qdisc_qstats_atomic_qlen_dec(q);
} else {
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
@@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_inc(q, skb);
- qdisc_qstats_cpu_qlen_inc(q);
+ qdisc_qstats_atomic_qlen_inc(q);
} else {
qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++;
@@ -147,7 +147,7 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
qdisc_qstats_cpu_requeues_inc(q);
qdisc_qstats_cpu_backlog_inc(q, skb);
- qdisc_qstats_cpu_qlen_inc(q);
+ qdisc_qstats_atomic_qlen_inc(q);
skb = next;
}
@@ -252,7 +252,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
skb = __skb_dequeue(&q->gso_skb);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
- qdisc_qstats_cpu_qlen_dec(q);
+ qdisc_qstats_atomic_qlen_dec(q);
} else {
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
@@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev)
* netif_carrier_on - set carrier
* @dev: network device
*
- * Device has detected that carrier.
+ * Device has detected acquisition of carrier.
*/
void netif_carrier_on(struct net_device *dev)
{
@@ -559,7 +559,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
};
static struct netdev_queue noop_netdev_queue = {
- .qdisc = &noop_qdisc,
+ RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
.qdisc_sleeping = &noop_qdisc,
};
@@ -645,7 +645,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
if (unlikely(err))
return qdisc_drop_cpu(skb, qdisc, to_free);
- qdisc_qstats_cpu_qlen_inc(qdisc);
+ qdisc_qstats_atomic_qlen_inc(qdisc);
/* Note: skb can not be used after skb_array_produce(),
* so we better not use qdisc_qstats_cpu_backlog_inc()
*/
@@ -670,7 +670,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
if (likely(skb)) {
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
qdisc_bstats_cpu_update(qdisc, skb);
- qdisc_qstats_cpu_qlen_dec(qdisc);
+ qdisc_qstats_atomic_qlen_dec(qdisc);
}
return skb;
@@ -714,7 +714,6 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
q->backlog = 0;
- q->qlen = 0;
}
}
@@ -1366,7 +1365,11 @@ static void mini_qdisc_rcu_func(struct rcu_head *head)
void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
struct tcf_proto *tp_head)
{
- struct mini_Qdisc *miniq_old = rtnl_dereference(*miniqp->p_miniq);
+ /* Protected with chain0->filter_chain_lock.
+ * Can't access chain directly because tp_head can be NULL.
+ */
+ struct mini_Qdisc *miniq_old =
+ rcu_dereference_protected(*miniqp->p_miniq, 1);
struct mini_Qdisc *miniq;
if (!tp_head) {
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b18ec1f6de60..24cc220a3218 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct hfsc_class *cl;
int uninitialized_var(err);
+ bool first;
cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
@@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err;
}
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err;
}
- if (cl->qdisc->q.qlen == 1) {
- unsigned int len = qdisc_pkt_len(skb);
-
+ if (first) {
if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC)
@@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 58b449490757..30f9da7e1076 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
int uninitialized_var(ret);
+ unsigned int len = qdisc_pkt_len(skb);
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb, sch, &ret);
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
htb_activate(q, cl);
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 75046ec72144..cc9d8133afcd 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -447,6 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
int nb = 0;
int count = 1;
int rc = NET_XMIT_SUCCESS;
+ int rc_drop = NET_XMIT_DROP;
/* Do not fool qdisc_drop_all() */
skb->prev = NULL;
@@ -486,6 +487,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->duplicate = 0;
rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave;
+ rc_drop = NET_XMIT_SUCCESS;
}
/*
@@ -498,7 +500,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb_is_gso(skb)) {
segs = netem_segment(skb, sch, to_free);
if (!segs)
- return NET_XMIT_DROP;
+ return rc_drop;
} else {
segs = skb;
}
@@ -521,8 +523,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1<<(prandom_u32() % 8);
}
- if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop_all(skb, sch, to_free);
+ if (unlikely(sch->q.qlen >= sch->limit)) {
+ qdisc_drop_all(skb, sch, to_free);
+ return rc_drop;
+ }
qdisc_qstats_backlog_inc(sch, skb);
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index d1429371592f..1cc0c7b74aa3 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -17,9 +17,7 @@
* University of Oslo, Norway.
*
* References:
- * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
- * IEEE Conference on High Performance Switching and Routing 2013 :
- * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
+ * RFC 8033: https://tools.ietf.org/html/rfc8033
*/
#include <linux/module.h>
@@ -31,9 +29,9 @@
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
-#define QUEUE_THRESHOLD 10000
+#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
-#define MAX_PROB 0xffffffff
+#define MAX_PROB 0xffffffffffffffff
#define PIE_SCALE 8
/* parameters used */
@@ -49,14 +47,16 @@ struct pie_params {
/* variables used */
struct pie_vars {
- u32 prob; /* probability but scaled by u32 limit. */
+ u64 prob; /* probability but scaled by u64 limit. */
psched_time_t burst_time;
psched_time_t qdelay;
psched_time_t qdelay_old;
u64 dq_count; /* measured in bytes */
psched_time_t dq_tstamp; /* drain rate */
+ u64 accu_prob; /* accumulated drop probability */
u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */
u32 qlen_old; /* in bytes */
+ u8 accu_prob_overflows; /* overflows of accu_prob */
};
/* statistics gathering */
@@ -81,9 +81,9 @@ static void pie_params_init(struct pie_params *params)
{
params->alpha = 2;
params->beta = 20;
- params->tupdate = usecs_to_jiffies(30 * USEC_PER_MSEC); /* 30 ms */
+ params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
params->limit = 1000; /* default of 1000 packets */
- params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC); /* 20 ms */
+ params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
params->ecn = false;
params->bytemode = false;
}
@@ -91,16 +91,18 @@ static void pie_params_init(struct pie_params *params)
static void pie_vars_init(struct pie_vars *vars)
{
vars->dq_count = DQCOUNT_INVALID;
+ vars->accu_prob = 0;
vars->avg_dq_rate = 0;
- /* default of 100 ms in pschedtime */
- vars->burst_time = PSCHED_NS2TICKS(100 * NSEC_PER_MSEC);
+ /* default of 150 ms in pschedtime */
+ vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC);
+ vars->accu_prob_overflows = 0;
}
static bool drop_early(struct Qdisc *sch, u32 packet_size)
{
struct pie_sched_data *q = qdisc_priv(sch);
- u32 rnd;
- u32 local_prob = q->vars.prob;
+ u64 rnd;
+ u64 local_prob = q->vars.prob;
u32 mtu = psched_mtu(qdisc_dev(sch));
/* If there is still burst allowance left skip random early drop */
@@ -124,13 +126,33 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
* probablity. Smaller packets will have lower drop prob in this case
*/
if (q->params.bytemode && packet_size <= mtu)
- local_prob = (local_prob / mtu) * packet_size;
+ local_prob = (u64)packet_size * div_u64(local_prob, mtu);
else
local_prob = q->vars.prob;
- rnd = prandom_u32();
- if (rnd < local_prob)
+ if (local_prob == 0) {
+ q->vars.accu_prob = 0;
+ q->vars.accu_prob_overflows = 0;
+ }
+
+ if (local_prob > MAX_PROB - q->vars.accu_prob)
+ q->vars.accu_prob_overflows++;
+
+ q->vars.accu_prob += local_prob;
+
+ if (q->vars.accu_prob_overflows == 0 &&
+ q->vars.accu_prob < (MAX_PROB / 100) * 85)
+ return false;
+ if (q->vars.accu_prob_overflows == 8 &&
+ q->vars.accu_prob >= MAX_PROB / 2)
+ return true;
+
+ prandom_bytes(&rnd, 8);
+ if (rnd < local_prob) {
+ q->vars.accu_prob = 0;
+ q->vars.accu_prob_overflows = 0;
return true;
+ }
return false;
}
@@ -168,6 +190,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
+ q->vars.accu_prob = 0;
+ q->vars.accu_prob_overflows = 0;
return qdisc_drop(skb, sch, to_free);
}
@@ -317,9 +341,10 @@ static void calculate_probability(struct Qdisc *sch)
u32 qlen = sch->qstats.backlog; /* queue size in bytes */
psched_time_t qdelay = 0; /* in pschedtime */
psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */
- s32 delta = 0; /* determines the change in probability */
- u32 oldprob;
- u32 alpha, beta;
+ s64 delta = 0; /* determines the change in probability */
+ u64 oldprob;
+ u64 alpha, beta;
+ u32 power;
bool update_prob = true;
q->vars.qdelay_old = q->vars.qdelay;
@@ -339,38 +364,36 @@ static void calculate_probability(struct Qdisc *sch)
* value for alpha as 0.125. In this implementation, we use values 0-32
* passed from user space to represent this. Also, alpha and beta have
* unit of HZ and need to be scaled before they can used to update
- * probability. alpha/beta are updated locally below by 1) scaling them
- * appropriately 2) scaling down by 16 to come to 0-2 range.
- * Please see paper for details.
- *
- * We scale alpha and beta differently depending on whether we are in
- * light, medium or high dropping mode.
+ * probability. alpha/beta are updated locally below by scaling down
+ * by 16 to come to 0-2 range.
*/
- if (q->vars.prob < MAX_PROB / 100) {
- alpha =
- (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
- beta =
- (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
- } else if (q->vars.prob < MAX_PROB / 10) {
- alpha =
- (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
- beta =
- (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
- } else {
- alpha =
- (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
- beta =
- (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
+ alpha = ((u64)q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
+ beta = ((u64)q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
+
+ /* We scale alpha and beta differently depending on how heavy the
+ * congestion is. Please see RFC 8033 for details.
+ */
+ if (q->vars.prob < MAX_PROB / 10) {
+ alpha >>= 1;
+ beta >>= 1;
+
+ power = 100;
+ while (q->vars.prob < div_u64(MAX_PROB, power) &&
+ power <= 1000000) {
+ alpha >>= 2;
+ beta >>= 2;
+ power *= 10;
+ }
}
/* alpha and beta should be between 0 and 32, in multiples of 1/16 */
- delta += alpha * ((qdelay - q->params.target));
- delta += beta * ((qdelay - qdelay_old));
+ delta += alpha * (u64)(qdelay - q->params.target);
+ delta += beta * (u64)(qdelay - qdelay_old);
oldprob = q->vars.prob;
/* to ensure we increase probability in steps of no more than 2% */
- if (delta > (s32)(MAX_PROB / (100 / 2)) &&
+ if (delta > (s64)(MAX_PROB / (100 / 2)) &&
q->vars.prob >= MAX_PROB / 10)
delta = (MAX_PROB / 100) * 2;
@@ -406,7 +429,8 @@ static void calculate_probability(struct Qdisc *sch)
*/
if (qdelay == 0 && qdelay_old == 0 && update_prob)
- q->vars.prob = (q->vars.prob * 98) / 100;
+ /* Reduce drop probability to 98.4% */
+ q->vars.prob -= q->vars.prob / 64u;
q->vars.qdelay = qdelay;
q->vars.qlen_old = qlen;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index cdf68706e40f..847141cd900f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
static int
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *qdisc;
int ret;
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index dc37c4ead439..29f5c4a24688 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ unsigned int len = qdisc_pkt_len(skb), gso_segs;
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
struct qfq_aggregate *agg;
int err = 0;
+ bool first;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
- if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
+ if (unlikely(cl->agg->lmax < len)) {
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
- cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
- err = qfq_change_agg(sch, cl, cl->agg->class_weight,
- qdisc_pkt_len(skb));
+ cl->agg->lmax, len, cl->common.classid);
+ err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
if (err) {
cl->qstats.drops++;
return qdisc_drop(skb, sch, to_free);
}
}
+ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
@@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- bstats_update(&cl->bstats, skb);
- qdisc_qstats_backlog_inc(sch, skb);
+ cl->bstats.bytes += len;
+ cl->bstats.packets += gso_segs;
+ sch->qstats.backlog += len;
++sch->q.qlen;
agg = cl->agg;
/* if the queue was not empty, then done here */
- if (cl->qdisc->q.qlen != 1) {
+ if (!first) {
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
list_first_entry(&agg->active, struct qfq_class, alist)
- == cl && cl->deficit < qdisc_pkt_len(skb))
+ == cl && cl->deficit < len)
list_move_tail(&cl->alist, &agg->active);
return err;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 942dcca09cf2..7f272a9070c5 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct tbf_sched_data *q = qdisc_priv(sch);
+ unsigned int len = qdisc_pkt_len(skb);
int ret;
if (qdisc_pkt_len(skb) > q->max_size) {
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return ret;
}
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 201c888604e4..d2c7d0d2abc1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -101,7 +101,7 @@ static struct sctp_association *sctp_association_init(
* socket values.
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
- asoc->pf_retrans = net->sctp.pf_retrans;
+ asoc->pf_retrans = sp->pf_retrans;
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -1651,8 +1651,11 @@ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
if (preload)
idr_preload(gfp);
spin_lock_bh(&sctp_assocs_id_lock);
- /* 0 is not a valid assoc_id, must be >= 1 */
- ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
+ /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and
+ * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC.
+ */
+ ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0,
+ GFP_NOWAIT);
spin_unlock_bh(&sctp_assocs_id_lock);
if (preload)
idr_preload_end();
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 64bef313d436..5cb7c1ff97e9 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -192,7 +192,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
if (unlikely(!max_data)) {
max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
sctp_datachk_len(&asoc->stream));
- pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)",
+ pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%zu)",
__func__, asoc, max_data);
}
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 078f01a8d582..435847d98b51 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+ nla_total_size(addrlen * asoc->peer.transport_count)
+ nla_total_size(addrlen * addrcnt)
+ nla_total_size(sizeof(struct inet_diag_meminfo))
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index b9ed271b7ef7..6200cd2b4b99 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
- addr->a.v6.sin6_flowinfo = 0;
addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
@@ -282,7 +280,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (saddr) {
fl6->saddr = saddr->v6.sin6_addr;
- fl6->fl6_sport = saddr->v6.sin6_port;
+ if (!fl6->fl6_sport)
+ fl6->fl6_sport = saddr->v6.sin6_port;
pr_debug("src=%pI6 - ", &fl6->saddr);
}
@@ -434,7 +433,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 123e9f2dc226..edfcf16e704c 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
skb->csum_not_inet = 0;
+ gso_reset_checksum(skb, ~0);
return sctp_compute_cksum(skb, skb_transport_offset(skb));
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c37e1c2dec9d..fd33281999b5 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
- sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
+ sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss);
}
/* Free the outqueue structure and any related pending chunks.
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d5878ae55840..6abc8b274270 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -441,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
if (saddr) {
fl4->saddr = saddr->v4.sin_addr.s_addr;
- fl4->fl4_sport = saddr->v4.sin_port;
+ if (!fl4->fl4_sport)
+ fl4->fl4_sport = saddr->v4.sin_port;
}
pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
@@ -776,10 +776,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f4ac6c592e13..d05c57664e36 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
*
* [INIT ACK back to where the INIT came from.]
*/
- retval->transport = chunk->transport;
+ if (chunk->transport)
+ retval->transport =
+ sctp_assoc_lookup_paddr(asoc,
+ &chunk->transport->ipaddr);
retval->subh.init_hdr =
sctp_addto_chunk(retval, sizeof(initack), &initack);
@@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
*
* [COOKIE ACK back to where the COOKIE ECHO came from.]
*/
- if (retval && chunk)
- retval->transport = chunk->transport;
+ if (retval && chunk && chunk->transport)
+ retval->transport =
+ sctp_assoc_lookup_paddr(asoc,
+ &chunk->transport->ipaddr);
return retval;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f93c3cf9e567..533207dbeae9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -248,7 +248,7 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
}
/* Otherwise this is a UDP-style socket. */
- if (!id || (id == (sctp_assoc_t)-1))
+ if (id <= SCTP_ALL_ASSOC)
return NULL;
spin_lock_bh(&sctp_assocs_id_lock);
@@ -1866,6 +1866,7 @@ static int sctp_sendmsg_check_sflags(struct sctp_association *asoc,
pr_debug("%s: aborting association:%p\n", __func__, asoc);
sctp_primitive_ABORT(net, asoc, chunk);
+ iov_iter_revert(&msg->msg_iter, msg_len);
return 0;
}
@@ -2027,7 +2028,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_transport *transport = NULL;
struct sctp_sndrcvinfo _sinfo, *sinfo;
- struct sctp_association *asoc;
+ struct sctp_association *asoc, *tmp;
struct sctp_cmsgs cmsgs;
union sctp_addr *daddr;
bool new = false;
@@ -2053,7 +2054,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
/* SCTP_SENDALL process */
if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
- list_for_each_entry(asoc, &ep->asocs, asocs) {
+ list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
msg_len);
if (err == 0)
@@ -2750,12 +2751,13 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
return -EINVAL;
}
- /* Get association, if assoc_id != 0 and the socket is a one
- * to many style socket, and an association was not found, then
- * the id was invalid.
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
- if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
/* Heartbeat demand can only be sent on a transport or
@@ -2797,6 +2799,43 @@ static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
}
+static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info *params,
+ struct sctp_association *asoc)
+{
+ struct sctp_transport *trans;
+
+ if (params->sack_delay) {
+ asoc->sackdelay = msecs_to_jiffies(params->sack_delay);
+ asoc->param_flags =
+ sctp_spp_sackdelay_enable(asoc->param_flags);
+ }
+ if (params->sack_freq == 1) {
+ asoc->param_flags =
+ sctp_spp_sackdelay_disable(asoc->param_flags);
+ } else if (params->sack_freq > 1) {
+ asoc->sackfreq = params->sack_freq;
+ asoc->param_flags =
+ sctp_spp_sackdelay_enable(asoc->param_flags);
+ }
+
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ if (params->sack_delay) {
+ trans->sackdelay = msecs_to_jiffies(params->sack_delay);
+ trans->param_flags =
+ sctp_spp_sackdelay_enable(trans->param_flags);
+ }
+ if (params->sack_freq == 1) {
+ trans->param_flags =
+ sctp_spp_sackdelay_disable(trans->param_flags);
+ } else if (params->sack_freq > 1) {
+ trans->sackfreq = params->sack_freq;
+ trans->param_flags =
+ sctp_spp_sackdelay_enable(trans->param_flags);
+ }
+ }
+}
+
/*
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
*
@@ -2836,10 +2875,9 @@ static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
static int sctp_setsockopt_delayed_ack(struct sock *sk,
char __user *optval, unsigned int optlen)
{
- struct sctp_sack_info params;
- struct sctp_transport *trans = NULL;
- struct sctp_association *asoc = NULL;
- struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ struct sctp_sack_info params;
if (optlen == sizeof(struct sctp_sack_info)) {
if (copy_from_user(&params, optval, optlen))
@@ -2867,67 +2905,42 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
if (params.sack_delay > 500)
return -EINVAL;
- /* Get association, if sack_assoc_id != 0 and the socket is a one
- * to many style socket, and an association was not found, then
- * the id was invalid.
+ /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
- if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && params.sack_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
- if (params.sack_delay) {
- if (asoc) {
- asoc->sackdelay =
- msecs_to_jiffies(params.sack_delay);
- asoc->param_flags =
- sctp_spp_sackdelay_enable(asoc->param_flags);
- } else {
+ if (asoc) {
+ sctp_apply_asoc_delayed_ack(&params, asoc);
+
+ return 0;
+ }
+
+ if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
+ params.sack_assoc_id == SCTP_ALL_ASSOC) {
+ if (params.sack_delay) {
sp->sackdelay = params.sack_delay;
sp->param_flags =
sctp_spp_sackdelay_enable(sp->param_flags);
}
- }
-
- if (params.sack_freq == 1) {
- if (asoc) {
- asoc->param_flags =
- sctp_spp_sackdelay_disable(asoc->param_flags);
- } else {
+ if (params.sack_freq == 1) {
sp->param_flags =
sctp_spp_sackdelay_disable(sp->param_flags);
- }
- } else if (params.sack_freq > 1) {
- if (asoc) {
- asoc->sackfreq = params.sack_freq;
- asoc->param_flags =
- sctp_spp_sackdelay_enable(asoc->param_flags);
- } else {
+ } else if (params.sack_freq > 1) {
sp->sackfreq = params.sack_freq;
sp->param_flags =
sctp_spp_sackdelay_enable(sp->param_flags);
}
}
- /* If change is for association, also apply to each transport. */
- if (asoc) {
- list_for_each_entry(trans, &asoc->peer.transport_addr_list,
- transports) {
- if (params.sack_delay) {
- trans->sackdelay =
- msecs_to_jiffies(params.sack_delay);
- trans->param_flags =
- sctp_spp_sackdelay_enable(trans->param_flags);
- }
- if (params.sack_freq == 1) {
- trans->param_flags =
- sctp_spp_sackdelay_disable(trans->param_flags);
- } else if (params.sack_freq > 1) {
- trans->sackfreq = params.sack_freq;
- trans->param_flags =
- sctp_spp_sackdelay_enable(trans->param_flags);
- }
- }
- }
+ if (params.sack_assoc_id == SCTP_CURRENT_ASSOC ||
+ params.sack_assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ sctp_apply_asoc_delayed_ack(&params, asoc);
return 0;
}
@@ -2997,15 +3010,22 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
return -EINVAL;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
- if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && info.sinfo_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
+
if (asoc) {
asoc->default_stream = info.sinfo_stream;
asoc->default_flags = info.sinfo_flags;
asoc->default_ppid = info.sinfo_ppid;
asoc->default_context = info.sinfo_context;
asoc->default_timetolive = info.sinfo_timetolive;
- } else {
+
+ return 0;
+ }
+
+ if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
+ info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
sp->default_stream = info.sinfo_stream;
sp->default_flags = info.sinfo_flags;
sp->default_ppid = info.sinfo_ppid;
@@ -3013,6 +3033,17 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
sp->default_timetolive = info.sinfo_timetolive;
}
+ if (info.sinfo_assoc_id == SCTP_CURRENT_ASSOC ||
+ info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ asoc->default_stream = info.sinfo_stream;
+ asoc->default_flags = info.sinfo_flags;
+ asoc->default_ppid = info.sinfo_ppid;
+ asoc->default_context = info.sinfo_context;
+ asoc->default_timetolive = info.sinfo_timetolive;
+ }
+ }
+
return 0;
}
@@ -3037,20 +3068,37 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk,
return -EINVAL;
asoc = sctp_id2assoc(sk, info.snd_assoc_id);
- if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && info.snd_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
+
if (asoc) {
asoc->default_stream = info.snd_sid;
asoc->default_flags = info.snd_flags;
asoc->default_ppid = info.snd_ppid;
asoc->default_context = info.snd_context;
- } else {
+
+ return 0;
+ }
+
+ if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
+ info.snd_assoc_id == SCTP_ALL_ASSOC) {
sp->default_stream = info.snd_sid;
sp->default_flags = info.snd_flags;
sp->default_ppid = info.snd_ppid;
sp->default_context = info.snd_context;
}
+ if (info.snd_assoc_id == SCTP_CURRENT_ASSOC ||
+ info.snd_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ asoc->default_stream = info.snd_sid;
+ asoc->default_flags = info.snd_flags;
+ asoc->default_ppid = info.snd_ppid;
+ asoc->default_context = info.snd_context;
+ }
+ }
+
return 0;
}
@@ -3144,7 +3192,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
/* Set the values to the specific association */
- if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
rto_max = rtoinfo.srto_max;
@@ -3206,7 +3255,8 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsig
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
- if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
/* Set the values to the specific association */
@@ -3319,7 +3369,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
current->comm, task_pid_nr(current));
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
- params.assoc_id = 0;
+ params.assoc_id = SCTP_FUTURE_ASSOC;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
@@ -3329,6 +3379,9 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
}
asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
if (val) {
int min_len, max_len;
@@ -3346,8 +3399,6 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
asoc->user_frag = val;
sctp_assoc_update_frag_point(asoc);
} else {
- if (params.assoc_id && sctp_style(sk, UDP))
- return -EINVAL;
sp->user_frag = val;
}
@@ -3460,8 +3511,8 @@ static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval
static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
unsigned int optlen)
{
+ struct sctp_sock *sp = sctp_sk(sk);
struct sctp_assoc_value params;
- struct sctp_sock *sp;
struct sctp_association *asoc;
if (optlen != sizeof(struct sctp_assoc_value))
@@ -3469,17 +3520,26 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
- sp = sctp_sk(sk);
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
- if (params.assoc_id != 0) {
- asoc = sctp_id2assoc(sk, params.assoc_id);
- if (!asoc)
- return -EINVAL;
+ if (asoc) {
asoc->default_rcv_context = params.assoc_value;
- } else {
- sp->default_rcv_context = params.assoc_value;
+
+ return 0;
}
+ if (params.assoc_id == SCTP_FUTURE_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC)
+ sp->default_rcv_context = params.assoc_value;
+
+ if (params.assoc_id == SCTP_CURRENT_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ asoc->default_rcv_context = params.assoc_value;
+
return 0;
}
@@ -3580,11 +3640,9 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_sock *sp = sctp_sk(sk);
struct sctp_assoc_value params;
- struct sctp_sock *sp;
struct sctp_association *asoc;
- int val;
- int assoc_id = 0;
if (optlen == sizeof(int)) {
pr_warn_ratelimited(DEPRECATED
@@ -3592,25 +3650,34 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
"Use of int in max_burst socket option deprecated.\n"
"Use struct sctp_assoc_value instead\n",
current->comm, task_pid_nr(current));
- if (copy_from_user(&val, optval, optlen))
+ if (copy_from_user(&params.assoc_value, optval, optlen))
return -EFAULT;
+ params.assoc_id = SCTP_FUTURE_ASSOC;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
- val = params.assoc_value;
- assoc_id = params.assoc_id;
} else
return -EINVAL;
- sp = sctp_sk(sk);
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
- if (assoc_id != 0) {
- asoc = sctp_id2assoc(sk, assoc_id);
- if (!asoc)
- return -EINVAL;
- asoc->max_burst = val;
- } else
- sp->max_burst = val;
+ if (asoc) {
+ asoc->max_burst = params.assoc_value;
+
+ return 0;
+ }
+
+ if (params.assoc_id == SCTP_FUTURE_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC)
+ sp->max_burst = params.assoc_value;
+
+ if (params.assoc_id == SCTP_CURRENT_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ asoc->max_burst = params.assoc_value;
return 0;
}
@@ -3702,7 +3769,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_authkey *authkey;
struct sctp_association *asoc;
- int ret;
+ int ret = -EINVAL;
if (!ep->auth_enable)
return -EACCES;
@@ -3712,25 +3779,44 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
/* authkey->sca_keylength is u16, so optlen can't be bigger than
* this.
*/
- optlen = min_t(unsigned int, optlen, USHRT_MAX +
- sizeof(struct sctp_authkey));
+ optlen = min_t(unsigned int, optlen, USHRT_MAX + sizeof(*authkey));
authkey = memdup_user(optval, optlen);
if (IS_ERR(authkey))
return PTR_ERR(authkey);
- if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
- ret = -EINVAL;
+ if (authkey->sca_keylength > optlen - sizeof(*authkey))
goto out;
- }
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
- if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
- ret = -EINVAL;
+ if (!asoc && authkey->sca_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ if (asoc) {
+ ret = sctp_auth_set_key(ep, asoc, authkey);
goto out;
}
- ret = sctp_auth_set_key(ep, asoc, authkey);
+ if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
+ authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_set_key(ep, asoc, authkey);
+ if (ret)
+ goto out;
+ }
+
+ ret = 0;
+
+ if (authkey->sca_assoc_id == SCTP_CURRENT_ASSOC ||
+ authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_set_key(ep, asoc, authkey);
+
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
out:
kzfree(authkey);
return ret;
@@ -3747,8 +3833,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
- struct sctp_authkeyid val;
struct sctp_association *asoc;
+ struct sctp_authkeyid val;
+ int ret = 0;
if (!ep->auth_enable)
return -EACCES;
@@ -3759,10 +3846,32 @@ static int sctp_setsockopt_active_key(struct sock *sk,
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
- if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
- return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
+ if (asoc)
+ return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
+
+ if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
+ val.scact_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
+ if (ret)
+ return ret;
+ }
+
+ if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
+ val.scact_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_set_active_key(ep, asoc,
+ val.scact_keynumber);
+
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
+ return ret;
}
/*
@@ -3775,8 +3884,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
- struct sctp_authkeyid val;
struct sctp_association *asoc;
+ struct sctp_authkeyid val;
+ int ret = 0;
if (!ep->auth_enable)
return -EACCES;
@@ -3787,11 +3897,32 @@ static int sctp_setsockopt_del_key(struct sock *sk,
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
- if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
- return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
+ if (asoc)
+ return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
+
+ if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
+ val.scact_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
+ if (ret)
+ return ret;
+ }
+
+ if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
+ val.scact_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_del_key_id(ep, asoc,
+ val.scact_keynumber);
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
+ return ret;
}
/*
@@ -3803,8 +3934,9 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
- struct sctp_authkeyid val;
struct sctp_association *asoc;
+ struct sctp_authkeyid val;
+ int ret = 0;
if (!ep->auth_enable)
return -EACCES;
@@ -3815,10 +3947,32 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
return -EFAULT;
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
- if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
- return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
+ if (asoc)
+ return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
+
+ if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
+ val.scact_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
+ if (ret)
+ return ret;
+ }
+
+ if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
+ val.scact_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_deact_key_id(ep, asoc,
+ val.scact_keynumber);
+
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
+ return ret;
}
/*
@@ -3884,11 +4038,25 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
sizeof(struct sctp_paddrthlds)))
return -EFAULT;
-
- if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
- asoc = sctp_id2assoc(sk, val.spt_assoc_id);
- if (!asoc)
+ if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
return -ENOENT;
+
+ if (val.spt_pathmaxrxt)
+ trans->pathmaxrxt = val.spt_pathmaxrxt;
+ trans->pf_retrans = val.spt_pathpfthld;
+
+ return 0;
+ }
+
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports) {
if (val.spt_pathmaxrxt)
@@ -3900,14 +4068,11 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
asoc->pathmaxrxt = val.spt_pathmaxrxt;
asoc->pf_retrans = val.spt_pathpfthld;
} else {
- trans = sctp_addr_id2transport(sk, &val.spt_address,
- val.spt_assoc_id);
- if (!trans)
- return -ENOENT;
+ struct sctp_sock *sp = sctp_sk(sk);
if (val.spt_pathmaxrxt)
- trans->pathmaxrxt = val.spt_pathmaxrxt;
- trans->pf_retrans = val.spt_pathpfthld;
+ sp->pathmaxrxt = val.spt_pathmaxrxt;
+ sp->pf_retrans = val.spt_pathpfthld;
}
return 0;
@@ -3950,6 +4115,7 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
unsigned int optlen)
{
struct sctp_assoc_value params;
+ struct sctp_association *asoc;
if (optlen != sizeof(params))
return -EINVAL;
@@ -3957,6 +4123,11 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
return 0;
@@ -3966,6 +4137,7 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_sock *sp = sctp_sk(sk);
struct sctp_default_prinfo info;
struct sctp_association *asoc;
int retval = -EINVAL;
@@ -3985,19 +4157,31 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk,
info.pr_value = 0;
asoc = sctp_id2assoc(sk, info.pr_assoc_id);
+ if (!asoc && info.pr_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ retval = 0;
+
if (asoc) {
SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
asoc->default_timetolive = info.pr_value;
- } else if (!info.pr_assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
+ goto out;
+ }
+ if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
+ info.pr_assoc_id == SCTP_ALL_ASSOC) {
SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
sp->default_timetolive = info.pr_value;
- } else {
- goto out;
}
- retval = 0;
+ if (info.pr_assoc_id == SCTP_CURRENT_ASSOC ||
+ info.pr_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
+ asoc->default_timetolive = info.pr_value;
+ }
+ }
out:
return retval;
@@ -4020,15 +4204,14 @@ static int sctp_setsockopt_reconfig_supported(struct sock *sk,
}
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (asoc) {
- asoc->reconf_enable = !!params.assoc_value;
- } else if (!params.assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
-
- sp->ep->reconf_enable = !!params.assoc_value;
- } else {
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
goto out;
- }
+
+ if (asoc)
+ asoc->reconf_enable = !!params.assoc_value;
+ else
+ sctp_sk(sk)->ep->reconf_enable = !!params.assoc_value;
retval = 0;
@@ -4040,6 +4223,7 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EINVAL;
@@ -4056,17 +4240,25 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk,
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ retval = 0;
+
if (asoc) {
asoc->strreset_enable = params.assoc_value;
- } else if (!params.assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
-
- sp->ep->strreset_enable = params.assoc_value;
- } else {
goto out;
}
- retval = 0;
+ if (params.assoc_id == SCTP_FUTURE_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC)
+ ep->strreset_enable = params.assoc_value;
+
+ if (params.assoc_id == SCTP_CURRENT_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &ep->asocs, asocs)
+ asoc->strreset_enable = params.assoc_value;
out:
return retval;
@@ -4161,29 +4353,44 @@ static int sctp_setsockopt_scheduler(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
+ struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
struct sctp_assoc_value params;
- int retval = -EINVAL;
+ int retval = 0;
if (optlen < sizeof(params))
- goto out;
+ return -EINVAL;
optlen = sizeof(params);
- if (copy_from_user(&params, optval, optlen)) {
- retval = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&params, optval, optlen))
+ return -EFAULT;
if (params.assoc_value > SCTP_SS_MAX)
- goto out;
+ return -EINVAL;
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (!asoc)
- goto out;
+ if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc)
+ return sctp_sched_set_sched(asoc, params.assoc_value);
- retval = sctp_sched_set_sched(asoc, params.assoc_value);
+ if (params.assoc_id == SCTP_FUTURE_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC)
+ sp->default_ss = params.assoc_value;
+
+ if (params.assoc_id == SCTP_CURRENT_ASSOC ||
+ params.assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ int ret = sctp_sched_set_sched(asoc,
+ params.assoc_value);
+
+ if (ret && !retval)
+ retval = ret;
+ }
+ }
-out:
return retval;
}
@@ -4191,8 +4398,8 @@ static int sctp_setsockopt_scheduler_value(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
- struct sctp_association *asoc;
struct sctp_stream_value params;
+ struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen < sizeof(params))
@@ -4205,11 +4412,24 @@ static int sctp_setsockopt_scheduler_value(struct sock *sk,
}
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (!asoc)
+ if (!asoc && params.assoc_id != SCTP_CURRENT_ASSOC &&
+ sctp_style(sk, UDP))
goto out;
- retval = sctp_sched_set_value(asoc, params.stream_id,
- params.stream_value, GFP_KERNEL);
+ if (asoc) {
+ retval = sctp_sched_set_value(asoc, params.stream_id,
+ params.stream_value, GFP_KERNEL);
+ goto out;
+ }
+
+ retval = 0;
+
+ list_for_each_entry(asoc, &sctp_sk(sk)->ep->asocs, asocs) {
+ int ret = sctp_sched_set_value(asoc, params.stream_id,
+ params.stream_value, GFP_KERNEL);
+ if (ret && !retval) /* try to return the 1st error. */
+ retval = ret;
+ }
out:
return retval;
@@ -4220,8 +4440,8 @@ static int sctp_setsockopt_interleaving_supported(struct sock *sk,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
- struct net *net = sock_net(sk);
struct sctp_assoc_value params;
+ struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen < sizeof(params))
@@ -4233,10 +4453,12 @@ static int sctp_setsockopt_interleaving_supported(struct sock *sk,
goto out;
}
- if (params.assoc_id)
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
goto out;
- if (!net->sctp.intl_enable || !sp->frag_interleave) {
+ if (!sock_net(sk)->sctp.intl_enable || !sp->frag_interleave) {
retval = -EPERM;
goto out;
}
@@ -4271,54 +4493,69 @@ static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
return 0;
}
+static int sctp_assoc_ulpevent_type_set(struct sctp_event *param,
+ struct sctp_association *asoc)
+{
+ struct sctp_ulpevent *event;
+
+ sctp_ulpevent_type_set(&asoc->subscribe, param->se_type, param->se_on);
+
+ if (param->se_type == SCTP_SENDER_DRY_EVENT && param->se_on) {
+ if (sctp_outq_is_empty(&asoc->outqueue)) {
+ event = sctp_ulpevent_make_sender_dry_event(asoc,
+ GFP_USER | __GFP_NOWARN);
+ if (!event)
+ return -ENOMEM;
+
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+ }
+ }
+
+ return 0;
+}
+
static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
unsigned int optlen)
{
+ struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
- struct sctp_ulpevent *event;
struct sctp_event param;
int retval = 0;
- if (optlen < sizeof(param)) {
- retval = -EINVAL;
- goto out;
- }
+ if (optlen < sizeof(param))
+ return -EINVAL;
optlen = sizeof(param);
- if (copy_from_user(&param, optval, optlen)) {
- retval = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&param, optval, optlen))
+ return -EFAULT;
if (param.se_type < SCTP_SN_TYPE_BASE ||
- param.se_type > SCTP_SN_TYPE_MAX) {
- retval = -EINVAL;
- goto out;
- }
+ param.se_type > SCTP_SN_TYPE_MAX)
+ return -EINVAL;
asoc = sctp_id2assoc(sk, param.se_assoc_id);
- if (!asoc) {
- sctp_ulpevent_type_set(&sctp_sk(sk)->subscribe,
- param.se_type, param.se_on);
- goto out;
- }
+ if (!asoc && param.se_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
- sctp_ulpevent_type_set(&asoc->subscribe, param.se_type, param.se_on);
+ if (asoc)
+ return sctp_assoc_ulpevent_type_set(&param, asoc);
- if (param.se_type == SCTP_SENDER_DRY_EVENT && param.se_on) {
- if (sctp_outq_is_empty(&asoc->outqueue)) {
- event = sctp_ulpevent_make_sender_dry_event(asoc,
- GFP_USER | __GFP_NOWARN);
- if (!event) {
- retval = -ENOMEM;
- goto out;
- }
+ if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
+ param.se_assoc_id == SCTP_ALL_ASSOC)
+ sctp_ulpevent_type_set(&sp->subscribe,
+ param.se_type, param.se_on);
- asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+ if (param.se_assoc_id == SCTP_CURRENT_ASSOC ||
+ param.se_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ int ret = sctp_assoc_ulpevent_type_set(&param, asoc);
+
+ if (ret && !retval)
+ retval = ret;
}
}
-out:
return retval;
}
@@ -4777,12 +5014,14 @@ static int sctp_init_sock(struct sock *sk)
*/
sp->hbinterval = net->sctp.hb_interval;
sp->pathmaxrxt = net->sctp.max_retrans_path;
+ sp->pf_retrans = net->sctp.pf_retrans;
sp->pathmtu = 0; /* allow default discovery */
sp->sackdelay = net->sctp.sack_timeout;
sp->sackfreq = 2;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
+ sp->default_ss = SCTP_SS_DEFAULT;
/* If enabled no SCTP message fragmentation will be performed.
* Configure through SCTP_DISABLE_FRAGMENTS socket option.
@@ -5676,12 +5915,13 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
}
}
- /* Get association, if assoc_id != 0 and the socket is a one
- * to many style socket, and an association was not found, then
- * the id was invalid.
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
- if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
+ if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
pr_debug("%s: failed no association\n", __func__);
return -EINVAL;
}
@@ -5810,19 +6050,19 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
} else
return -EINVAL;
- /* Get association, if sack_assoc_id != 0 and the socket is a one
- * to many style socket, and an association was not found, then
- * the id was invalid.
+ /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
*/
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
- if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && params.sack_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
/* Fetch association values. */
if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
- params.sack_delay = jiffies_to_msecs(
- asoc->sackdelay);
+ params.sack_delay = jiffies_to_msecs(asoc->sackdelay);
params.sack_freq = asoc->sackfreq;
} else {
@@ -6175,8 +6415,10 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
- if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && info.sinfo_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
+
if (asoc) {
info.sinfo_stream = asoc->default_stream;
info.sinfo_flags = asoc->default_flags;
@@ -6219,8 +6461,10 @@ static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
return -EFAULT;
asoc = sctp_id2assoc(sk, info.snd_assoc_id);
- if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && info.snd_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
+
if (asoc) {
info.snd_sid = asoc->default_stream;
info.snd_flags = asoc->default_flags;
@@ -6296,7 +6540,8 @@ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
- if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
/* Values corresponding to the specific association. */
@@ -6353,7 +6598,8 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len,
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
- if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
/* Values correspoinding to the specific association */
@@ -6428,7 +6674,6 @@ static int sctp_getsockopt_context(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
struct sctp_assoc_value params;
- struct sctp_sock *sp;
struct sctp_association *asoc;
if (len < sizeof(struct sctp_assoc_value))
@@ -6439,16 +6684,13 @@ static int sctp_getsockopt_context(struct sock *sk, int len,
if (copy_from_user(&params, optval, len))
return -EFAULT;
- sp = sctp_sk(sk);
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
- if (params.assoc_id != 0) {
- asoc = sctp_id2assoc(sk, params.assoc_id);
- if (!asoc)
- return -EINVAL;
- params.assoc_value = asoc->default_rcv_context;
- } else {
- params.assoc_value = sp->default_rcv_context;
- }
+ params.assoc_value = asoc ? asoc->default_rcv_context
+ : sctp_sk(sk)->default_rcv_context;
if (put_user(len, optlen))
return -EFAULT;
@@ -6497,7 +6739,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
"Use of int in maxseg socket option.\n"
"Use struct sctp_assoc_value instead\n",
current->comm, task_pid_nr(current));
- params.assoc_id = 0;
+ params.assoc_id = SCTP_FUTURE_ASSOC;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(&params, optval, len))
@@ -6506,7 +6748,8 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
return -EINVAL;
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (!asoc && params.assoc_id && sctp_style(sk, UDP))
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
if (asoc)
@@ -6583,7 +6826,6 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
int __user *optlen)
{
struct sctp_assoc_value params;
- struct sctp_sock *sp;
struct sctp_association *asoc;
if (len == sizeof(int)) {
@@ -6592,7 +6834,7 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
"Use of int in max_burst socket option.\n"
"Use struct sctp_assoc_value instead\n",
current->comm, task_pid_nr(current));
- params.assoc_id = 0;
+ params.assoc_id = SCTP_FUTURE_ASSOC;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
if (copy_from_user(&params, optval, len))
@@ -6600,15 +6842,12 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
} else
return -EINVAL;
- sp = sctp_sk(sk);
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
- if (params.assoc_id != 0) {
- asoc = sctp_id2assoc(sk, params.assoc_id);
- if (!asoc)
- return -EINVAL;
- params.assoc_value = asoc->max_burst;
- } else
- params.assoc_value = sp->max_burst;
+ params.assoc_value = asoc ? asoc->max_burst : sctp_sk(sk)->max_burst;
if (len == sizeof(int)) {
if (copy_to_user(optval, &params.assoc_value, len))
@@ -6759,14 +6998,12 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
to = p->gauth_chunks;
asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
- if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
+ if (!asoc && val.gauth_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
return -EINVAL;
- if (asoc)
- ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
- else
- ch = ep->auth_chunk_list;
-
+ ch = asoc ? (struct sctp_chunks_param *)asoc->c.auth_chunks
+ : ep->auth_chunk_list;
if (!ch)
goto num;
@@ -6911,14 +7148,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
return -EFAULT;
- if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
- asoc = sctp_id2assoc(sk, val.spt_assoc_id);
- if (!asoc)
- return -ENOENT;
-
- val.spt_pathpfthld = asoc->pf_retrans;
- val.spt_pathmaxrxt = asoc->pathmaxrxt;
- } else {
+ if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
trans = sctp_addr_id2transport(sk, &val.spt_address,
val.spt_assoc_id);
if (!trans)
@@ -6926,6 +7156,23 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
val.spt_pathmaxrxt = trans->pathmaxrxt;
val.spt_pathpfthld = trans->pf_retrans;
+
+ return 0;
+ }
+
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ val.spt_pathpfthld = asoc->pf_retrans;
+ val.spt_pathmaxrxt = asoc->pathmaxrxt;
+ } else {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ val.spt_pathpfthld = sp->pf_retrans;
+ val.spt_pathmaxrxt = sp->pathmaxrxt;
}
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
@@ -7056,17 +7303,15 @@ static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (asoc) {
- params.assoc_value = asoc->prsctp_enable;
- } else if (!params.assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
-
- params.assoc_value = sp->ep->prsctp_enable;
- } else {
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
retval = -EINVAL;
goto out;
}
+ params.assoc_value = asoc ? asoc->prsctp_enable
+ : sctp_sk(sk)->ep->prsctp_enable;
+
if (put_user(len, optlen))
goto out;
@@ -7097,17 +7342,20 @@ static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
goto out;
asoc = sctp_id2assoc(sk, info.pr_assoc_id);
+ if (!asoc && info.pr_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
if (asoc) {
info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
info.pr_value = asoc->default_timetolive;
- } else if (!info.pr_assoc_id) {
+ } else {
struct sctp_sock *sp = sctp_sk(sk);
info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
info.pr_value = sp->default_timetolive;
- } else {
- retval = -EINVAL;
- goto out;
}
if (put_user(len, optlen))
@@ -7263,17 +7511,15 @@ static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (asoc) {
- params.assoc_value = asoc->reconf_enable;
- } else if (!params.assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
-
- params.assoc_value = sp->ep->reconf_enable;
- } else {
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
retval = -EINVAL;
goto out;
}
+ params.assoc_value = asoc ? asoc->reconf_enable
+ : sctp_sk(sk)->ep->reconf_enable;
+
if (put_user(len, optlen))
goto out;
@@ -7304,17 +7550,15 @@ static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (asoc) {
- params.assoc_value = asoc->strreset_enable;
- } else if (!params.assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
-
- params.assoc_value = sp->ep->strreset_enable;
- } else {
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
retval = -EINVAL;
goto out;
}
+ params.assoc_value = asoc ? asoc->strreset_enable
+ : sctp_sk(sk)->ep->strreset_enable;
+
if (put_user(len, optlen))
goto out;
@@ -7345,12 +7589,14 @@ static int sctp_getsockopt_scheduler(struct sock *sk, int len,
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (!asoc) {
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
retval = -EINVAL;
goto out;
}
- params.assoc_value = sctp_sched_get_sched(asoc);
+ params.assoc_value = asoc ? sctp_sched_get_sched(asoc)
+ : sctp_sk(sk)->default_ss;
if (put_user(len, optlen))
goto out;
@@ -7424,17 +7670,15 @@ static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
- if (asoc) {
- params.assoc_value = asoc->intl_enable;
- } else if (!params.assoc_id) {
- struct sctp_sock *sp = sctp_sk(sk);
-
- params.assoc_value = sp->strm_interleave;
- } else {
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
retval = -EINVAL;
goto out;
}
+ params.assoc_value = asoc ? asoc->intl_enable
+ : sctp_sk(sk)->strm_interleave;
+
if (put_user(len, optlen))
goto out;
@@ -7486,6 +7730,10 @@ static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval,
return -EINVAL;
asoc = sctp_id2assoc(sk, param.se_assoc_id);
+ if (!asoc && param.se_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
subscribe = asoc ? asoc->subscribe : sctp_sk(sk)->subscribe;
param.se_on = sctp_ulpevent_type_enabled(subscribe, param.se_type);
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 3892e7630f3a..2936ed17bf9e 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
}
}
+static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
+{
+ size_t index = 0;
+
+ while (count--) {
+ if (elem == flex_array_get(fa, index))
+ break;
+ index++;
+ }
+
+ return index;
+}
+
/* Migrates chunks from stream queues to new stream queues if needed,
* but not across associations. Also, removes those chunks to streams
* higher than the new max.
@@ -131,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
}
}
- for (i = outcnt; i < stream->outcnt; i++)
+ for (i = outcnt; i < stream->outcnt; i++) {
kfree(SCTP_SO(stream, i)->ext);
+ SCTP_SO(stream, i)->ext = NULL;
+ }
}
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
@@ -147,6 +162,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
if (stream->out) {
fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
+ if (stream->out_curr) {
+ size_t index = fa_index(stream->out, stream->out_curr,
+ stream->outcnt);
+
+ BUG_ON(index == stream->outcnt);
+ stream->out_curr = flex_array_get(out, index);
+ }
fa_free(stream->out);
}
@@ -585,9 +607,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
struct sctp_strreset_outreq *outreq = param.v;
struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
- __u16 i, nums, flags = 0;
__be16 *str_p = NULL;
__u32 request_seq;
+ __u16 i, nums;
request_seq = ntohl(outreq->request_seq);
@@ -615,6 +637,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
goto out;
+ nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
+ str_p = outreq->list_of_streams;
+ for (i = 0; i < nums; i++) {
+ if (ntohs(str_p[i]) >= stream->incnt) {
+ result = SCTP_STRRESET_ERR_WRONG_SSN;
+ goto out;
+ }
+ }
+
if (asoc->strreset_chunk) {
if (!sctp_chunk_lookup_strreset_param(
asoc, outreq->response_seq,
@@ -637,32 +668,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
sctp_chunk_put(asoc->strreset_chunk);
asoc->strreset_chunk = NULL;
}
-
- flags = SCTP_STREAM_RESET_INCOMING_SSN;
}
- nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
- if (nums) {
- str_p = outreq->list_of_streams;
- for (i = 0; i < nums; i++) {
- if (ntohs(str_p[i]) >= stream->incnt) {
- result = SCTP_STRRESET_ERR_WRONG_SSN;
- goto out;
- }
- }
-
+ if (nums)
for (i = 0; i < nums; i++)
SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
- } else {
+ else
for (i = 0; i < stream->incnt; i++)
SCTP_SI(stream, i)->mid = 0;
- }
result = SCTP_STRRESET_PERFORMED;
*evp = sctp_ulpevent_make_stream_reset_event(asoc,
- flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
- GFP_ATOMIC);
+ SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
out:
sctp_update_strreset_result(asoc, result);
@@ -738,9 +756,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
result = SCTP_STRRESET_PERFORMED;
- *evp = sctp_ulpevent_make_stream_reset_event(asoc,
- SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
-
out:
sctp_update_strreset_result(asoc, result);
err:
@@ -873,6 +888,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
goto out;
+ in = ntohs(addstrm->number_of_streams);
+ incnt = stream->incnt + in;
+ if (!in || incnt > SCTP_MAX_STREAM)
+ goto out;
+
+ if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
+ goto out;
+
if (asoc->strreset_chunk) {
if (!sctp_chunk_lookup_strreset_param(
asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@@ -896,14 +919,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
}
}
- in = ntohs(addstrm->number_of_streams);
- incnt = stream->incnt + in;
- if (!in || incnt > SCTP_MAX_STREAM)
- goto out;
-
- if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
- goto out;
-
stream->incnt = incnt;
result = SCTP_STRRESET_PERFORMED;
@@ -973,9 +988,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
result = SCTP_STRRESET_PERFORMED;
- *evp = sctp_ulpevent_make_stream_change_event(asoc,
- 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
-
out:
sctp_update_strreset_result(asoc, result);
err:
@@ -1036,10 +1048,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
sout->mid_uo = 0;
}
}
-
- flags = SCTP_STREAM_RESET_OUTGOING_SSN;
}
+ flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
+
for (i = 0; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
@@ -1058,6 +1070,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
sizeof(__u16);
+ flags |= SCTP_STREAM_RESET_INCOMING_SSN;
+
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
nums, str_p, GFP_ATOMIC);
} else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 033696e6f74f..ad158d311ffa 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
/* When a data chunk is sent, reset the heartbeat interval. */
expires = jiffies + sctp_transport_timeout(transport);
- if (time_before(transport->hb_timer.expires, expires) &&
+ if ((time_before(transport->hb_timer.expires, expires) ||
+ !timer_pending(&transport->hb_timer)) &&
!mod_timer(&transport->hb_timer,
expires + prandom_u32_max(transport->rto)))
sctp_transport_hold(transport);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index c4da4a78d369..77ef53596d18 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -30,6 +30,10 @@
#include <net/smc.h>
#include <asm/ioctls.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include "smc_netns.h"
+
#include "smc.h"
#include "smc_clc.h"
#include "smc_llc.h"
@@ -42,8 +46,11 @@
#include "smc_rx.h"
#include "smc_close.h"
-static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group
- * creation
+static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
+ * creation on server
+ */
+static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
+ * creation on client
*/
static void smc_tcp_listen_work(struct work_struct *);
@@ -145,32 +152,35 @@ static int smc_release(struct socket *sock)
rc = smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
- }
- if (smc->clcsock) {
- if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
+ } else {
+ if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
+ sock_put(sk); /* passive closing */
+ if (sk->sk_state == SMC_LISTEN) {
/* wake up clcsock accept */
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
}
- mutex_lock(&smc->clcsock_release_lock);
- sock_release(smc->clcsock);
- smc->clcsock = NULL;
- mutex_unlock(&smc->clcsock_release_lock);
- }
- if (smc->use_fallback) {
- if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
- sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
}
+ sk->sk_prot->unhash(sk);
+
+ if (sk->sk_state == SMC_CLOSED) {
+ if (smc->clcsock) {
+ mutex_lock(&smc->clcsock_release_lock);
+ sock_release(smc->clcsock);
+ smc->clcsock = NULL;
+ mutex_unlock(&smc->clcsock_release_lock);
+ }
+ if (!smc->use_fallback)
+ smc_conn_free(&smc->conn);
+ }
+
/* detach socket */
sock_orphan(sk);
sock->sk = NULL;
- if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
- smc_conn_free(&smc->conn);
release_sock(sk);
- sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */
out:
return rc;
@@ -289,7 +299,8 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
(1UL << SOCK_RXQ_OVFL) | \
(1UL << SOCK_WIFI_STATUS) | \
(1UL << SOCK_NOFCS) | \
- (1UL << SOCK_FILTER_LOCKED))
+ (1UL << SOCK_FILTER_LOCKED) | \
+ (1UL << SOCK_TSTAMP_NEW))
/* copy only relevant settings and flags of SOL_SOCKET level from smc to
* clc socket (since smc is not called for these options from net/core)
*/
@@ -473,7 +484,12 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
{
if (local_contact == SMC_FIRST_CONTACT)
smc_lgr_forget(smc->conn.lgr);
- mutex_unlock(&smc_create_lgr_pending);
+ if (smc->conn.lgr->is_smcd)
+ /* there is only one lgr role for SMC-D; use server lock */
+ mutex_unlock(&smc_server_lgr_pending);
+ else
+ mutex_unlock(&smc_client_lgr_pending);
+
smc_conn_free(&smc->conn);
return reason_code;
}
@@ -558,7 +574,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
struct smc_link *link;
int reason_code = 0;
- mutex_lock(&smc_create_lgr_pending);
+ mutex_lock(&smc_client_lgr_pending);
local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
ibport, ntoh24(aclc->qpn), &aclc->lcl,
NULL, 0);
@@ -569,7 +585,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
else
reason_code = SMC_CLC_DECL_INTERR; /* other error */
- return smc_connect_abort(smc, reason_code, 0);
+ mutex_unlock(&smc_client_lgr_pending);
+ return reason_code;
}
link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
@@ -613,7 +630,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
return smc_connect_abort(smc, reason_code,
local_contact);
}
- mutex_unlock(&smc_create_lgr_pending);
+ mutex_unlock(&smc_client_lgr_pending);
smc_copy_sock_settings_to_clc(smc);
if (smc->sk.sk_state == SMC_INIT)
@@ -630,11 +647,14 @@ static int smc_connect_ism(struct smc_sock *smc,
int local_contact = SMC_FIRST_CONTACT;
int rc = 0;
- mutex_lock(&smc_create_lgr_pending);
+ /* there is only one lgr role for SMC-D; use server lock */
+ mutex_lock(&smc_server_lgr_pending);
local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
NULL, ismdev, aclc->gid);
- if (local_contact < 0)
- return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
+ if (local_contact < 0) {
+ mutex_unlock(&smc_server_lgr_pending);
+ return SMC_CLC_DECL_MEM;
+ }
/* Create send and receive buffers */
if (smc_buf_create(smc, true))
@@ -648,7 +668,7 @@ static int smc_connect_ism(struct smc_sock *smc,
rc = smc_clc_send_confirm(smc);
if (rc)
return smc_connect_abort(smc, rc, local_contact);
- mutex_unlock(&smc_create_lgr_pending);
+ mutex_unlock(&smc_server_lgr_pending);
smc_copy_sock_settings_to_clc(smc);
if (smc->sk.sk_state == SMC_INIT)
@@ -1247,7 +1267,7 @@ static void smc_listen_work(struct work_struct *work)
return;
}
- mutex_lock(&smc_create_lgr_pending);
+ mutex_lock(&smc_server_lgr_pending);
smc_close_init(new_smc);
smc_rx_init(new_smc);
smc_tx_init(new_smc);
@@ -1269,7 +1289,7 @@ static void smc_listen_work(struct work_struct *work)
&local_contact) ||
smc_listen_rdma_reg(new_smc, local_contact))) {
/* SMC not supported, decline */
- mutex_unlock(&smc_create_lgr_pending);
+ mutex_unlock(&smc_server_lgr_pending);
smc_listen_decline(new_smc, SMC_CLC_DECL_MODEUNSUPP,
local_contact);
return;
@@ -1278,29 +1298,33 @@ static void smc_listen_work(struct work_struct *work)
/* send SMC Accept CLC message */
rc = smc_clc_send_accept(new_smc, local_contact);
if (rc) {
- mutex_unlock(&smc_create_lgr_pending);
+ mutex_unlock(&smc_server_lgr_pending);
smc_listen_decline(new_smc, rc, local_contact);
return;
}
+ /* SMC-D does not need this lock any more */
+ if (ism_supported)
+ mutex_unlock(&smc_server_lgr_pending);
+
/* receive SMC Confirm CLC message */
reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
SMC_CLC_CONFIRM, CLC_WAIT_TIME);
if (reason_code) {
- mutex_unlock(&smc_create_lgr_pending);
+ if (!ism_supported)
+ mutex_unlock(&smc_server_lgr_pending);
smc_listen_decline(new_smc, reason_code, local_contact);
return;
}
/* finish worker */
if (!ism_supported) {
- if (smc_listen_rdma_finish(new_smc, &cclc, local_contact)) {
- mutex_unlock(&smc_create_lgr_pending);
+ rc = smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+ mutex_unlock(&smc_server_lgr_pending);
+ if (rc)
return;
- }
}
smc_conn_save_peer_info(new_smc, &cclc);
- mutex_unlock(&smc_create_lgr_pending);
smc_listen_out_connected(new_smc);
}
@@ -1503,6 +1527,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
smc = smc_sk(sk);
lock_sock(sk);
+ if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
+ /* socket was connected before, no more data to read */
+ rc = 0;
+ goto out;
+ }
if ((sk->sk_state == SMC_INIT) ||
(sk->sk_state == SMC_LISTEN) ||
(sk->sk_state == SMC_CLOSED))
@@ -1838,7 +1867,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
smc = smc_sk(sk);
lock_sock(sk);
-
+ if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
+ /* socket was connected before, no more data to read */
+ rc = 0;
+ goto out;
+ }
if (sk->sk_state == SMC_INIT ||
sk->sk_state == SMC_LISTEN ||
sk->sk_state == SMC_CLOSED)
@@ -1937,10 +1970,33 @@ static const struct net_proto_family smc_sock_family_ops = {
.create = smc_create,
};
+unsigned int smc_net_id;
+
+static __net_init int smc_net_init(struct net *net)
+{
+ return smc_pnet_net_init(net);
+}
+
+static void __net_exit smc_net_exit(struct net *net)
+{
+ smc_pnet_net_exit(net);
+}
+
+static struct pernet_operations smc_net_ops = {
+ .init = smc_net_init,
+ .exit = smc_net_exit,
+ .id = &smc_net_id,
+ .size = sizeof(struct smc_net),
+};
+
static int __init smc_init(void)
{
int rc;
+ rc = register_pernet_subsys(&smc_net_ops);
+ if (rc)
+ return rc;
+
rc = smc_pnet_init();
if (rc)
return rc;
@@ -2006,6 +2062,7 @@ static void __exit smc_exit(void)
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
smc_pnet_exit();
+ unregister_pernet_subsys(&smc_net_ops);
}
module_init(smc_init);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416d0605..adbdf195eb08 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
} __aligned(8);
enum smc_urg_state {
- SMC_URG_VALID, /* data present */
- SMC_URG_NOTYET, /* data pending */
- SMC_URG_READ /* data was already read */
+ SMC_URG_VALID = 1, /* data present */
+ SMC_URG_NOTYET = 2, /* data pending */
+ SMC_URG_READ = 3, /* data was already read */
};
struct smc_connection {
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index db83332ac1c8..d0b0f4c865b4 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -21,13 +21,6 @@
/********************************** send *************************************/
-struct smc_cdc_tx_pend {
- struct smc_connection *conn; /* socket connection */
- union smc_host_cursor cursor; /* tx sndbuf cursor sent */
- union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
- u16 ctrl_seq; /* conn. tx sequence # */
-};
-
/* handler for send/transmission completion of a CDC msg */
static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
struct smc_link *link,
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
int smc_cdc_get_free_slot(struct smc_connection *conn,
struct smc_wr_buf **wr_buf,
+ struct smc_rdma_wr **wr_rdma_buf,
struct smc_cdc_tx_pend **pend)
{
struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
int rc;
rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
+ wr_rdma_buf,
(struct smc_wr_tx_pend_priv **)pend);
if (!conn->alert_token_local)
/* abnormal termination */
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
struct smc_wr_buf *wr_buf,
struct smc_cdc_tx_pend *pend)
{
+ union smc_host_cursor cfed;
struct smc_link *link;
int rc;
@@ -105,12 +101,12 @@ int smc_cdc_msg_send(struct smc_connection *conn,
conn->tx_cdc_seq++;
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
- smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
- &conn->local_tx_ctrl, conn);
+ smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
- if (!rc)
- smc_curs_copy(&conn->rx_curs_confirmed,
- &conn->local_tx_ctrl.cons, conn);
+ if (!rc) {
+ smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
+ conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
+ }
return rc;
}
@@ -121,11 +117,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
struct smc_wr_buf *wr_buf;
int rc;
- rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
+ rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
if (rc)
return rc;
- return smc_cdc_msg_send(conn, wr_buf, pend);
+ spin_lock_bh(&conn->send_lock);
+ rc = smc_cdc_msg_send(conn, wr_buf, pend);
+ spin_unlock_bh(&conn->send_lock);
+ return rc;
}
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
@@ -195,6 +194,7 @@ int smcd_cdc_msg_send(struct smc_connection *conn)
if (rc)
return rc;
smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
+ conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
/* Calculate transmitted data and increment free send buffer space */
diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
&conn->tx_curs_sent);
@@ -271,26 +271,18 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
smp_mb__after_atomic();
smc->sk.sk_data_ready(&smc->sk);
} else {
- if (conn->local_rx_ctrl.prod_flags.write_blocked ||
- conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
- conn->local_rx_ctrl.prod_flags.urg_data_pending) {
- if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
- conn->urg_state = SMC_URG_NOTYET;
- /* force immediate tx of current consumer cursor, but
- * under send_lock to guarantee arrival in seqno-order
- */
- if (smc->sk.sk_state != SMC_INIT)
- smc_tx_sndbuf_nonempty(conn);
- }
+ if (conn->local_rx_ctrl.prod_flags.write_blocked)
+ smc->sk.sk_data_ready(&smc->sk);
+ if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
+ conn->urg_state = SMC_URG_NOTYET;
}
- /* piggy backed tx info */
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
- if (diff_cons && smc_tx_prepared_sends(conn)) {
+ if ((diff_cons && smc_tx_prepared_sends(conn)) ||
+ conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
+ conn->local_rx_ctrl.prod_flags.urg_data_pending)
smc_tx_sndbuf_nonempty(conn);
- /* trigger socket release if connection closed */
- smc_close_wake_tx_prepared(smc);
- }
+
if (diff_cons && conn->urg_tx_pend &&
atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
/* urg data confirmed by peer, indicate we're ready for more */
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index b5bfe38c7f9b..861dc24c588c 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
#endif
}
-/* calculate cursor difference between old and new, where old <= new */
+/* calculate cursor difference between old and new, where old <= new and
+ * difference cannot exceed size
+ */
static inline int smc_curs_diff(unsigned int size,
union smc_host_cursor *old,
union smc_host_cursor *new)
@@ -185,28 +187,51 @@ static inline int smc_curs_comp(unsigned int size,
return smc_curs_diff(size, old, new);
}
+/* calculate cursor difference between old and new, where old <= new and
+ * difference may exceed size
+ */
+static inline int smc_curs_diff_large(unsigned int size,
+ union smc_host_cursor *old,
+ union smc_host_cursor *new)
+{
+ if (old->wrap < new->wrap)
+ return min_t(int,
+ (size - old->count) + new->count +
+ (new->wrap - old->wrap - 1) * size,
+ size);
+
+ if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
+ return min_t(int,
+ (size - old->count) + new->count +
+ (new->wrap + 0xffff - old->wrap) * size,
+ size);
+
+ return max_t(int, 0, (new->count - old->count));
+}
+
static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
union smc_host_cursor *local,
+ union smc_host_cursor *save,
struct smc_connection *conn)
{
- union smc_host_cursor temp;
-
- smc_curs_copy(&temp, local, conn);
- peer->count = htonl(temp.count);
- peer->wrap = htons(temp.wrap);
+ smc_curs_copy(save, local, conn);
+ peer->count = htonl(save->count);
+ peer->wrap = htons(save->wrap);
/* peer->reserved = htons(0); must be ensured by caller */
}
static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
- struct smc_host_cdc_msg *local,
- struct smc_connection *conn)
+ struct smc_connection *conn,
+ union smc_host_cursor *save)
{
+ struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
+
peer->common.type = local->common.type;
peer->len = local->len;
peer->seqno = htons(local->seqno);
peer->token = htonl(local->token);
- smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn);
- smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn);
+ smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
+ smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
peer->prod_flags = local->prod_flags;
peer->conn_state_flags = local->conn_state_flags;
}
@@ -245,17 +270,18 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
}
static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
- struct smcd_cdc_msg *peer)
+ struct smcd_cdc_msg *peer,
+ struct smc_connection *conn)
{
union smc_host_cursor temp;
temp.wrap = peer->prod.wrap;
temp.count = peer->prod.count;
- atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs));
+ smc_curs_copy(&local->prod, &temp, conn);
temp.wrap = peer->cons.wrap;
temp.count = peer->cons.count;
- atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs));
+ smc_curs_copy(&local->cons, &temp, conn);
local->prod_flags = peer->cons.prod_flags;
local->conn_state_flags = peer->cons.conn_state_flags;
}
@@ -265,15 +291,21 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
struct smc_connection *conn)
{
if (conn->lgr->is_smcd)
- smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer);
+ smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer, conn);
else
smcr_cdc_msg_to_host(local, peer, conn);
}
-struct smc_cdc_tx_pend;
+struct smc_cdc_tx_pend {
+ struct smc_connection *conn; /* socket connection */
+ union smc_host_cursor cursor; /* tx sndbuf cursor sent */
+ union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
+ u16 ctrl_seq; /* conn. tx sequence # */
+};
int smc_cdc_get_free_slot(struct smc_connection *conn,
struct smc_wr_buf **wr_buf,
+ struct smc_rdma_wr **wr_rdma_buf,
struct smc_cdc_tx_pend **pend);
void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 776e9dfc915d..d53fd588d1f5 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
vec.iov_len = sizeof(struct smc_clc_msg_decline);
len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
sizeof(struct smc_clc_msg_decline));
- if (len < sizeof(struct smc_clc_msg_decline))
+ if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
len = -EPROTO;
return len > 0 ? 0 : len;
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ea2b87f29469..2ad37e998509 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work)
switch (sk->sk_state) {
case SMC_INIT:
- if (atomic_read(&conn->bytes_to_rcv) ||
- (rxflags->peer_done_writing &&
- !smc_cdc_rxed_any_close(conn))) {
- sk->sk_state = SMC_APPCLOSEWAIT1;
- } else {
- sk->sk_state = SMC_CLOSED;
- sock_put(sk); /* passive closing */
- }
+ sk->sk_state = SMC_APPCLOSEWAIT1;
break;
case SMC_ACTIVE:
sk->sk_state = SMC_APPCLOSEWAIT1;
@@ -405,8 +398,13 @@ wakeup:
if (old_state != sk->sk_state) {
sk->sk_state_change(sk);
if ((sk->sk_state == SMC_CLOSED) &&
- (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket))
+ (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
smc_conn_free(conn);
+ if (smc->clcsock) {
+ sock_release(smc->clcsock);
+ smc->clcsock = NULL;
+ }
+ }
}
release_sock(sk);
sock_put(sk); /* sock_hold done by schedulers of close_work */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 35c1cdc93e1c..53a17cfa61af 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -118,7 +118,6 @@ static void __smc_lgr_unregister_conn(struct smc_connection *conn)
rb_erase(&conn->alert_node, &lgr->conns_all);
lgr->conns_num--;
conn->alert_token_local = 0;
- conn->lgr = NULL;
sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
}
@@ -128,6 +127,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
+ if (!lgr)
+ return;
write_lock_bh(&lgr->conns_lock);
if (conn->alert_token_local) {
__smc_lgr_unregister_conn(conn);
@@ -159,8 +160,6 @@ static void smc_lgr_free_work(struct work_struct *work)
bool conns;
spin_lock_bh(&smc_lgr_list.lock);
- if (list_empty(&lgr->list))
- goto free;
read_lock_bh(&lgr->conns_lock);
conns = RB_EMPTY_ROOT(&lgr->conns_all);
read_unlock_bh(&lgr->conns_lock);
@@ -168,8 +167,8 @@ static void smc_lgr_free_work(struct work_struct *work)
spin_unlock_bh(&smc_lgr_list.lock);
return;
}
- list_del_init(&lgr->list); /* remove from smc_lgr_list */
-free:
+ if (!list_empty(&lgr->list))
+ list_del_init(&lgr->list); /* remove from smc_lgr_list */
spin_unlock_bh(&smc_lgr_list.lock);
if (!lgr->is_smcd && !lgr->terminating) {
@@ -300,13 +299,13 @@ static void smc_buf_unuse(struct smc_connection *conn,
conn->sndbuf_desc->used = 0;
if (conn->rmb_desc) {
if (!conn->rmb_desc->regerr) {
- conn->rmb_desc->used = 0;
if (!lgr->is_smcd) {
/* unregister rmb with peer */
smc_llc_do_delete_rkey(
&lgr->lnk[SMC_SINGLE_LINK],
conn->rmb_desc);
}
+ conn->rmb_desc->used = 0;
} else {
/* buf registration failed, reuse not possible */
write_lock_bh(&lgr->rmbs_lock);
@@ -331,8 +330,9 @@ void smc_conn_free(struct smc_connection *conn)
} else {
smc_cdc_tx_dismiss_slots(conn);
}
- smc_lgr_unregister_conn(conn); /* unsets conn->lgr */
+ smc_lgr_unregister_conn(conn);
smc_buf_unuse(conn, lgr); /* allow buffer reuse */
+ conn->lgr = NULL;
if (!lgr->conns_num)
smc_lgr_schedule_free_work(lgr);
@@ -462,6 +462,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
sock_hold(&smc->sk); /* sock_put in close work */
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
__smc_lgr_unregister_conn(conn);
+ conn->lgr = NULL;
write_unlock_bh(&lgr->conns_lock);
if (!schedule_work(&conn->close_work))
sock_put(&smc->sk);
@@ -628,6 +629,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
local_contact = SMC_REUSE_CONTACT;
conn->lgr = lgr;
smc_lgr_register_conn(conn); /* add smc conn to lgr */
+ if (delayed_work_pending(&lgr->free_work))
+ cancel_delayed_work(&lgr->free_work);
write_unlock_bh(&lgr->conns_lock);
break;
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index b00287989a3d..8806d2afa6ed 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -52,6 +52,24 @@ enum smc_wr_reg_state {
FAILED /* ib_wr_reg_mr response: failure */
};
+struct smc_rdma_sge { /* sges for RDMA writes */
+ struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
+};
+
+#define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per
+ * message send
+ */
+
+struct smc_rdma_sges { /* sges per message send */
+ struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES];
+};
+
+struct smc_rdma_wr { /* work requests per message
+ * send
+ */
+ struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES];
+};
+
struct smc_link {
struct smc_ib_device *smcibdev; /* ib-device */
u8 ibport; /* port - values 1 | 2 */
@@ -64,6 +82,8 @@ struct smc_link {
struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */
struct ib_send_wr *wr_tx_ibs; /* WR send meta data */
struct ib_sge *wr_tx_sges; /* WR send gather meta data */
+ struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
+ struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */
struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */
/* above four vectors have wr_tx_cnt elements and use the same index */
dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index dbf64a93d68a..371b4cf31fcd 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -38,6 +38,7 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
+ r->diag_family = sk->sk_family;
if (!smc->clcsock)
return;
r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
@@ -45,14 +46,12 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
sock_diag_save_cookie(sk, r->id.idiag_cookie);
if (sk->sk_protocol == SMCPROTO_SMC) {
- r->diag_family = PF_INET;
memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
#if IS_ENABLED(CONFIG_IPV6)
} else if (sk->sk_protocol == SMCPROTO_SMC6) {
- r->diag_family = PF_INET6;
memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index e519ef29c0ff..53f429c04843 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -257,12 +257,20 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
smcibdev = container_of(handler, struct smc_ib_device, event_handler);
switch (ibevent->event) {
- case IB_EVENT_PORT_ERR:
case IB_EVENT_DEVICE_FATAL:
+ /* terminate all ports on device */
+ for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++)
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ schedule_work(&smcibdev->port_event_work);
+ break;
+ case IB_EVENT_PORT_ERR:
case IB_EVENT_PORT_ACTIVE:
+ case IB_EVENT_GID_CHANGE:
port_idx = ibevent->element.port_num - 1;
- set_bit(port_idx, &smcibdev->port_event_mask);
- schedule_work(&smcibdev->port_event_work);
+ if (port_idx < SMC_MAX_PORTS) {
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ schedule_work(&smcibdev->port_event_work);
+ }
break;
default:
break;
@@ -289,18 +297,18 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
{
- struct smc_ib_device *smcibdev =
- (struct smc_ib_device *)ibevent->device;
+ struct smc_link *lnk = (struct smc_link *)priv;
+ struct smc_ib_device *smcibdev = lnk->smcibdev;
u8 port_idx;
switch (ibevent->event) {
- case IB_EVENT_DEVICE_FATAL:
- case IB_EVENT_GID_CHANGE:
- case IB_EVENT_PORT_ERR:
+ case IB_EVENT_QP_FATAL:
case IB_EVENT_QP_ACCESS_ERR:
- port_idx = ibevent->element.port_num - 1;
- set_bit(port_idx, &smcibdev->port_event_mask);
- schedule_work(&smcibdev->port_event_work);
+ port_idx = ibevent->element.qp->port - 1;
+ if (port_idx < SMC_MAX_PORTS) {
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ schedule_work(&smcibdev->port_event_work);
+ }
break;
default:
break;
@@ -556,7 +564,6 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
spin_unlock(&smc_ib_devices.lock);
- smc_pnet_remove_by_ibdev(smcibdev);
smc_ib_cleanup_per_ibdev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler);
kfree(smcibdev);
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index bac7fd65a4c0..da60ab9e8d70 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -42,6 +42,8 @@ struct smc_ib_device { /* ib-device infos for smc */
/* mac address per port*/
u8 pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN];
/* pnetid per port */
+ bool pnetid_by_user[SMC_MAX_PORTS];
+ /* pnetid defined by user? */
u8 initialized : 1; /* ib dev CQ, evthdl done */
struct work_struct port_event_work;
unsigned long port_event_mask;
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index a6d3623d06f4..4fd60c522802 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link,
{
int rc;
- rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend);
+ rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
+ pend);
if (rc < 0)
return rc;
BUILD_BUG_ON_MSG(
diff --git a/net/smc/smc_netns.h b/net/smc/smc_netns.h
new file mode 100644
index 000000000000..e7a8fc4ae02f
--- /dev/null
+++ b/net/smc/smc_netns.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Shared Memory Communications
+ *
+ * Network namespace definitions.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef SMC_NETNS_H
+#define SMC_NETNS_H
+
+#include "smc_pnet.h"
+
+extern unsigned int smc_net_id;
+
+/* per-network namespace private data */
+struct smc_net {
+ struct smc_pnettable pnettable;
+};
+#endif
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 7cb3e4f07c10..8d2f6296279c 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -20,14 +20,21 @@
#include <rdma/ib_verbs.h>
+#include <net/netns/generic.h>
+#include "smc_netns.h"
+
#include "smc_pnet.h"
#include "smc_ib.h"
#include "smc_ism.h"
+#define SMC_ASCII_BLANK 32
+
+static struct net_device *pnet_find_base_ndev(struct net_device *ndev);
+
static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
[SMC_PNETID_NAME] = {
.type = NLA_NUL_STRING,
- .len = SMC_MAX_PNETID_LEN - 1
+ .len = SMC_MAX_PNETID_LEN
},
[SMC_PNETID_ETHNAME] = {
.type = NLA_NUL_STRING,
@@ -43,118 +50,127 @@ static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
static struct genl_family smc_pnet_nl_family;
/**
- * struct smc_pnettable - SMC PNET table anchor
- * @lock: Lock for list action
- * @pnetlist: List of PNETIDs
- */
-static struct smc_pnettable {
- rwlock_t lock;
- struct list_head pnetlist;
-} smc_pnettable = {
- .pnetlist = LIST_HEAD_INIT(smc_pnettable.pnetlist),
- .lock = __RW_LOCK_UNLOCKED(smc_pnettable.lock)
-};
-
-/**
- * struct smc_pnetentry - pnet identifier name entry
+ * struct smc_user_pnetentry - pnet identifier name entry for/from user
* @list: List node.
* @pnet_name: Pnet identifier name
* @ndev: pointer to network device.
* @smcibdev: Pointer to IB device.
+ * @ib_port: Port of IB device.
+ * @smcd_dev: Pointer to smcd device.
*/
-struct smc_pnetentry {
+struct smc_user_pnetentry {
struct list_head list;
char pnet_name[SMC_MAX_PNETID_LEN + 1];
struct net_device *ndev;
struct smc_ib_device *smcibdev;
u8 ib_port;
+ struct smcd_dev *smcd_dev;
};
-/* Check if two RDMA device entries are identical. Use device name and port
- * number for comparison.
- */
-static bool smc_pnet_same_ibname(struct smc_pnetentry *pnetelem, char *ibname,
- u8 ibport)
-{
- return pnetelem->ib_port == ibport &&
- !strncmp(pnetelem->smcibdev->ibdev->name, ibname,
- sizeof(pnetelem->smcibdev->ibdev->name));
-}
+/* pnet entry stored in pnet table */
+struct smc_pnetentry {
+ struct list_head list;
+ char pnet_name[SMC_MAX_PNETID_LEN + 1];
+ struct net_device *ndev;
+};
-/* Find a pnetid in the pnet table.
- */
-static struct smc_pnetentry *smc_pnet_find_pnetid(char *pnet_name)
+/* Check if two given pnetids match */
+static bool smc_pnet_match(u8 *pnetid1, u8 *pnetid2)
{
- struct smc_pnetentry *pnetelem, *found_pnetelem = NULL;
+ int i;
- read_lock(&smc_pnettable.lock);
- list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
- if (!strncmp(pnetelem->pnet_name, pnet_name,
- sizeof(pnetelem->pnet_name))) {
- found_pnetelem = pnetelem;
+ for (i = 0; i < SMC_MAX_PNETID_LEN; i++) {
+ if ((pnetid1[i] == 0 || pnetid1[i] == SMC_ASCII_BLANK) &&
+ (pnetid2[i] == 0 || pnetid2[i] == SMC_ASCII_BLANK))
break;
- }
+ if (pnetid1[i] != pnetid2[i])
+ return false;
}
- read_unlock(&smc_pnettable.lock);
- return found_pnetelem;
+ return true;
}
/* Remove a pnetid from the pnet table.
*/
-static int smc_pnet_remove_by_pnetid(char *pnet_name)
+static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
{
struct smc_pnetentry *pnetelem, *tmp_pe;
+ struct smc_pnettable *pnettable;
+ struct smc_ib_device *ibdev;
+ struct smcd_dev *smcd_dev;
+ struct smc_net *sn;
int rc = -ENOENT;
+ int ibport;
+
+ /* get pnettable for namespace */
+ sn = net_generic(net, smc_net_id);
+ pnettable = &sn->pnettable;
- write_lock(&smc_pnettable.lock);
- list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
+ /* remove netdevices */
+ write_lock(&pnettable->lock);
+ list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist,
list) {
- if (!strncmp(pnetelem->pnet_name, pnet_name,
- sizeof(pnetelem->pnet_name))) {
+ if (!pnet_name ||
+ smc_pnet_match(pnetelem->pnet_name, pnet_name)) {
list_del(&pnetelem->list);
dev_put(pnetelem->ndev);
kfree(pnetelem);
rc = 0;
- break;
}
}
- write_unlock(&smc_pnettable.lock);
- return rc;
-}
+ write_unlock(&pnettable->lock);
-/* Remove a pnet entry mentioning a given network device from the pnet table.
- */
-static int smc_pnet_remove_by_ndev(struct net_device *ndev)
-{
- struct smc_pnetentry *pnetelem, *tmp_pe;
- int rc = -ENOENT;
+ /* if this is not the initial namespace, stop here */
+ if (net != &init_net)
+ return rc;
- write_lock(&smc_pnettable.lock);
- list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
- list) {
- if (pnetelem->ndev == ndev) {
- list_del(&pnetelem->list);
- dev_put(pnetelem->ndev);
- kfree(pnetelem);
+ /* remove ib devices */
+ spin_lock(&smc_ib_devices.lock);
+ list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
+ for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) {
+ if (ibdev->pnetid_by_user[ibport] &&
+ (!pnet_name ||
+ smc_pnet_match(pnet_name,
+ ibdev->pnetid[ibport]))) {
+ memset(ibdev->pnetid[ibport], 0,
+ SMC_MAX_PNETID_LEN);
+ ibdev->pnetid_by_user[ibport] = false;
+ rc = 0;
+ }
+ }
+ }
+ spin_unlock(&smc_ib_devices.lock);
+ /* remove smcd devices */
+ spin_lock(&smcd_dev_list.lock);
+ list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
+ if (smcd_dev->pnetid_by_user &&
+ (!pnet_name ||
+ smc_pnet_match(pnet_name, smcd_dev->pnetid))) {
+ memset(smcd_dev->pnetid, 0, SMC_MAX_PNETID_LEN);
+ smcd_dev->pnetid_by_user = false;
rc = 0;
- break;
}
}
- write_unlock(&smc_pnettable.lock);
+ spin_unlock(&smcd_dev_list.lock);
return rc;
}
-/* Remove a pnet entry mentioning a given ib device from the pnet table.
+/* Remove a pnet entry mentioning a given network device from the pnet table.
*/
-int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev)
+static int smc_pnet_remove_by_ndev(struct net_device *ndev)
{
struct smc_pnetentry *pnetelem, *tmp_pe;
+ struct smc_pnettable *pnettable;
+ struct net *net = dev_net(ndev);
+ struct smc_net *sn;
int rc = -ENOENT;
- write_lock(&smc_pnettable.lock);
- list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
- list) {
- if (pnetelem->smcibdev == ibdev) {
+ /* get pnettable for namespace */
+ sn = net_generic(net, smc_net_id);
+ pnettable = &sn->pnettable;
+
+ write_lock(&pnettable->lock);
+ list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
+ if (pnetelem->ndev == ndev) {
list_del(&pnetelem->list);
dev_put(pnetelem->ndev);
kfree(pnetelem);
@@ -162,35 +178,84 @@ int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev)
break;
}
}
- write_unlock(&smc_pnettable.lock);
+ write_unlock(&pnettable->lock);
return rc;
}
/* Append a pnetid to the end of the pnet table if not already on this list.
*/
-static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem)
+static int smc_pnet_enter(struct smc_pnettable *pnettable,
+ struct smc_user_pnetentry *new_pnetelem)
{
+ u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
+ u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+ struct smc_pnetentry *tmp_pnetelem;
struct smc_pnetentry *pnetelem;
- int rc = -EEXIST;
-
- write_lock(&smc_pnettable.lock);
- list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
- if (!strncmp(pnetelem->pnet_name, new_pnetelem->pnet_name,
- sizeof(new_pnetelem->pnet_name)) ||
- !strncmp(pnetelem->ndev->name, new_pnetelem->ndev->name,
- sizeof(new_pnetelem->ndev->name)) ||
- smc_pnet_same_ibname(pnetelem,
- new_pnetelem->smcibdev->ibdev->name,
- new_pnetelem->ib_port)) {
- dev_put(pnetelem->ndev);
- goto found;
+ bool new_smcddev = false;
+ struct net_device *ndev;
+ bool new_netdev = true;
+ bool new_ibdev = false;
+
+ if (new_pnetelem->smcibdev) {
+ struct smc_ib_device *ib_dev = new_pnetelem->smcibdev;
+ int ib_port = new_pnetelem->ib_port;
+
+ spin_lock(&smc_ib_devices.lock);
+ if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) {
+ memcpy(ib_dev->pnetid[ib_port - 1],
+ new_pnetelem->pnet_name, SMC_MAX_PNETID_LEN);
+ ib_dev->pnetid_by_user[ib_port - 1] = true;
+ new_ibdev = true;
}
+ spin_unlock(&smc_ib_devices.lock);
}
- list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist);
- rc = 0;
-found:
- write_unlock(&smc_pnettable.lock);
- return rc;
+ if (new_pnetelem->smcd_dev) {
+ struct smcd_dev *smcd_dev = new_pnetelem->smcd_dev;
+
+ spin_lock(&smcd_dev_list.lock);
+ if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) {
+ memcpy(smcd_dev->pnetid, new_pnetelem->pnet_name,
+ SMC_MAX_PNETID_LEN);
+ smcd_dev->pnetid_by_user = true;
+ new_smcddev = true;
+ }
+ spin_unlock(&smcd_dev_list.lock);
+ }
+
+ if (!new_pnetelem->ndev)
+ return (new_ibdev || new_smcddev) ? 0 : -EEXIST;
+
+ /* check if (base) netdev already has a pnetid. If there is one, we do
+ * not want to add a pnet table entry
+ */
+ ndev = pnet_find_base_ndev(new_pnetelem->ndev);
+ if (!smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
+ ndev_pnetid))
+ return (new_ibdev || new_smcddev) ? 0 : -EEXIST;
+
+ /* add a new netdev entry to the pnet table if there isn't one */
+ tmp_pnetelem = kzalloc(sizeof(*pnetelem), GFP_KERNEL);
+ if (!tmp_pnetelem)
+ return -ENOMEM;
+ memcpy(tmp_pnetelem->pnet_name, new_pnetelem->pnet_name,
+ SMC_MAX_PNETID_LEN);
+ tmp_pnetelem->ndev = new_pnetelem->ndev;
+
+ write_lock(&pnettable->lock);
+ list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
+ if (pnetelem->ndev == new_pnetelem->ndev)
+ new_netdev = false;
+ }
+ if (new_netdev) {
+ dev_hold(tmp_pnetelem->ndev);
+ list_add_tail(&tmp_pnetelem->list, &pnettable->pnetlist);
+ write_unlock(&pnettable->lock);
+ } else {
+ write_unlock(&pnettable->lock);
+ kfree(tmp_pnetelem);
+ }
+
+ return (new_netdev || new_ibdev || new_smcddev) ? 0 : -EEXIST;
}
/* The limit for pnetid is 16 characters.
@@ -228,7 +293,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
spin_lock(&smc_ib_devices.lock);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
if (!strncmp(ibdev->ibdev->name, ib_name,
- sizeof(ibdev->ibdev->name))) {
+ sizeof(ibdev->ibdev->name)) ||
+ !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
+ IB_DEVICE_NAME_MAX - 1)) {
goto out;
}
}
@@ -238,10 +305,28 @@ out:
return ibdev;
}
+/* Find an smcd device by a given name. The device might not exist. */
+static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
+{
+ struct smcd_dev *smcd_dev;
+
+ spin_lock(&smcd_dev_list.lock);
+ list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
+ if (!strncmp(dev_name(&smcd_dev->dev), smcd_name,
+ IB_DEVICE_NAME_MAX - 1))
+ goto out;
+ }
+ smcd_dev = NULL;
+out:
+ spin_unlock(&smcd_dev_list.lock);
+ return smcd_dev;
+}
+
/* Parse the supplied netlink attributes and fill a pnetentry structure.
* For ethernet and infiniband device names verify that the devices exist.
*/
-static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
+static int smc_pnet_fill_entry(struct net *net,
+ struct smc_user_pnetentry *pnetelem,
struct nlattr *tb[])
{
char *string, *ibname;
@@ -258,30 +343,34 @@ static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
goto error;
rc = -EINVAL;
- if (!tb[SMC_PNETID_ETHNAME])
- goto error;
- rc = -ENOENT;
- string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
- pnetelem->ndev = dev_get_by_name(net, string);
- if (!pnetelem->ndev)
- goto error;
+ if (tb[SMC_PNETID_ETHNAME]) {
+ string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
+ pnetelem->ndev = dev_get_by_name(net, string);
+ if (!pnetelem->ndev)
+ goto error;
+ }
- rc = -EINVAL;
- if (!tb[SMC_PNETID_IBNAME])
- goto error;
- rc = -ENOENT;
- ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
- ibname = strim(ibname);
- pnetelem->smcibdev = smc_pnet_find_ib(ibname);
- if (!pnetelem->smcibdev)
- goto error;
+ /* if this is not the initial namespace, stop here */
+ if (net != &init_net)
+ return 0;
rc = -EINVAL;
- if (!tb[SMC_PNETID_IBPORT])
- goto error;
- pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
- if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS)
- goto error;
+ if (tb[SMC_PNETID_IBNAME]) {
+ ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
+ ibname = strim(ibname);
+ pnetelem->smcibdev = smc_pnet_find_ib(ibname);
+ pnetelem->smcd_dev = smc_pnet_find_smcd(ibname);
+ if (!pnetelem->smcibdev && !pnetelem->smcd_dev)
+ goto error;
+ if (pnetelem->smcibdev) {
+ if (!tb[SMC_PNETID_IBPORT])
+ goto error;
+ pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
+ if (pnetelem->ib_port < 1 ||
+ pnetelem->ib_port > SMC_MAX_PORTS)
+ goto error;
+ }
+ }
return 0;
@@ -292,79 +381,65 @@ error:
}
/* Convert an smc_pnetentry to a netlink attribute sequence */
-static int smc_pnet_set_nla(struct sk_buff *msg, struct smc_pnetentry *pnetelem)
+static int smc_pnet_set_nla(struct sk_buff *msg,
+ struct smc_user_pnetentry *pnetelem)
{
- if (nla_put_string(msg, SMC_PNETID_NAME, pnetelem->pnet_name) ||
- nla_put_string(msg, SMC_PNETID_ETHNAME, pnetelem->ndev->name) ||
- nla_put_string(msg, SMC_PNETID_IBNAME,
- pnetelem->smcibdev->ibdev->name) ||
- nla_put_u8(msg, SMC_PNETID_IBPORT, pnetelem->ib_port))
+ if (nla_put_string(msg, SMC_PNETID_NAME, pnetelem->pnet_name))
return -1;
- return 0;
-}
-
-/* Retrieve one PNETID entry */
-static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
-{
- struct smc_pnetentry *pnetelem;
- struct sk_buff *msg;
- void *hdr;
- int rc;
-
- if (!info->attrs[SMC_PNETID_NAME])
- return -EINVAL;
- pnetelem = smc_pnet_find_pnetid(
- (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
- if (!pnetelem)
- return -ENOENT;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
- &smc_pnet_nl_family, 0, SMC_PNETID_GET);
- if (!hdr) {
- rc = -EMSGSIZE;
- goto err_out;
+ if (pnetelem->ndev) {
+ if (nla_put_string(msg, SMC_PNETID_ETHNAME,
+ pnetelem->ndev->name))
+ return -1;
+ } else {
+ if (nla_put_string(msg, SMC_PNETID_ETHNAME, "n/a"))
+ return -1;
}
-
- if (smc_pnet_set_nla(msg, pnetelem)) {
- rc = -ENOBUFS;
- goto err_out;
+ if (pnetelem->smcibdev) {
+ if (nla_put_string(msg, SMC_PNETID_IBNAME,
+ dev_name(pnetelem->smcibdev->ibdev->dev.parent)) ||
+ nla_put_u8(msg, SMC_PNETID_IBPORT, pnetelem->ib_port))
+ return -1;
+ } else if (pnetelem->smcd_dev) {
+ if (nla_put_string(msg, SMC_PNETID_IBNAME,
+ dev_name(&pnetelem->smcd_dev->dev)) ||
+ nla_put_u8(msg, SMC_PNETID_IBPORT, 1))
+ return -1;
+ } else {
+ if (nla_put_string(msg, SMC_PNETID_IBNAME, "n/a") ||
+ nla_put_u8(msg, SMC_PNETID_IBPORT, 0xff))
+ return -1;
}
- genlmsg_end(msg, hdr);
- return genlmsg_reply(msg, info);
-
-err_out:
- nlmsg_free(msg);
- return rc;
+ return 0;
}
static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
- struct smc_pnetentry *pnetelem;
+ struct smc_user_pnetentry pnetelem;
+ struct smc_pnettable *pnettable;
+ struct smc_net *sn;
int rc;
- pnetelem = kzalloc(sizeof(*pnetelem), GFP_KERNEL);
- if (!pnetelem)
- return -ENOMEM;
- rc = smc_pnet_fill_entry(net, pnetelem, info->attrs);
+ /* get pnettable for namespace */
+ sn = net_generic(net, smc_net_id);
+ pnettable = &sn->pnettable;
+
+ rc = smc_pnet_fill_entry(net, &pnetelem, info->attrs);
if (!rc)
- rc = smc_pnet_enter(pnetelem);
- if (rc) {
- kfree(pnetelem);
- return rc;
- }
+ rc = smc_pnet_enter(pnettable, &pnetelem);
+ if (pnetelem.ndev)
+ dev_put(pnetelem.ndev);
return rc;
}
static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
{
+ struct net *net = genl_info_net(info);
+
if (!info->attrs[SMC_PNETID_NAME])
return -EINVAL;
- return smc_pnet_remove_by_pnetid(
+ return smc_pnet_remove_by_pnetid(net,
(char *)nla_data(info->attrs[SMC_PNETID_NAME]));
}
@@ -376,7 +451,7 @@ static int smc_pnet_dump_start(struct netlink_callback *cb)
static int smc_pnet_dumpinfo(struct sk_buff *skb,
u32 portid, u32 seq, u32 flags,
- struct smc_pnetentry *pnetelem)
+ struct smc_user_pnetentry *pnetelem)
{
void *hdr;
@@ -392,42 +467,143 @@ static int smc_pnet_dumpinfo(struct sk_buff *skb,
return 0;
}
-static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb)
+static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
+ u32 seq, u8 *pnetid, int start_idx)
{
+ struct smc_user_pnetentry tmp_entry;
+ struct smc_pnettable *pnettable;
struct smc_pnetentry *pnetelem;
+ struct smc_ib_device *ibdev;
+ struct smcd_dev *smcd_dev;
+ struct smc_net *sn;
int idx = 0;
+ int ibport;
+
+ /* get pnettable for namespace */
+ sn = net_generic(net, smc_net_id);
+ pnettable = &sn->pnettable;
- read_lock(&smc_pnettable.lock);
- list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
- if (idx++ < cb->args[0])
+ /* dump netdevices */
+ read_lock(&pnettable->lock);
+ list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
+ if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid))
+ continue;
+ if (idx++ < start_idx)
continue;
- if (smc_pnet_dumpinfo(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- pnetelem)) {
+ memset(&tmp_entry, 0, sizeof(tmp_entry));
+ memcpy(&tmp_entry.pnet_name, pnetelem->pnet_name,
+ SMC_MAX_PNETID_LEN);
+ tmp_entry.ndev = pnetelem->ndev;
+ if (smc_pnet_dumpinfo(skb, portid, seq, NLM_F_MULTI,
+ &tmp_entry)) {
--idx;
break;
}
}
+ read_unlock(&pnettable->lock);
+
+ /* if this is not the initial namespace, stop here */
+ if (net != &init_net)
+ return idx;
+
+ /* dump ib devices */
+ spin_lock(&smc_ib_devices.lock);
+ list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
+ for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) {
+ if (ibdev->pnetid_by_user[ibport]) {
+ if (pnetid &&
+ !smc_pnet_match(ibdev->pnetid[ibport],
+ pnetid))
+ continue;
+ if (idx++ < start_idx)
+ continue;
+ memset(&tmp_entry, 0, sizeof(tmp_entry));
+ memcpy(&tmp_entry.pnet_name,
+ ibdev->pnetid[ibport],
+ SMC_MAX_PNETID_LEN);
+ tmp_entry.smcibdev = ibdev;
+ tmp_entry.ib_port = ibport + 1;
+ if (smc_pnet_dumpinfo(skb, portid, seq,
+ NLM_F_MULTI,
+ &tmp_entry)) {
+ --idx;
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock(&smc_ib_devices.lock);
+
+ /* dump smcd devices */
+ spin_lock(&smcd_dev_list.lock);
+ list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
+ if (smcd_dev->pnetid_by_user) {
+ if (pnetid && !smc_pnet_match(smcd_dev->pnetid, pnetid))
+ continue;
+ if (idx++ < start_idx)
+ continue;
+ memset(&tmp_entry, 0, sizeof(tmp_entry));
+ memcpy(&tmp_entry.pnet_name, smcd_dev->pnetid,
+ SMC_MAX_PNETID_LEN);
+ tmp_entry.smcd_dev = smcd_dev;
+ if (smc_pnet_dumpinfo(skb, portid, seq, NLM_F_MULTI,
+ &tmp_entry)) {
+ --idx;
+ break;
+ }
+ }
+ }
+ spin_unlock(&smcd_dev_list.lock);
+
+ return idx;
+}
+
+static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int idx;
+
+ idx = _smc_pnet_dump(net, skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NULL, cb->args[0]);
+
cb->args[0] = idx;
- read_unlock(&smc_pnettable.lock);
return skb->len;
}
+/* Retrieve one PNETID entry */
+static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct sk_buff *msg;
+ void *hdr;
+
+ if (!info->attrs[SMC_PNETID_NAME])
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ _smc_pnet_dump(net, msg, info->snd_portid, info->snd_seq,
+ nla_data(info->attrs[SMC_PNETID_NAME]), 0);
+
+ /* finish multi part message and send it */
+ hdr = nlmsg_put(msg, info->snd_portid, info->snd_seq, NLMSG_DONE, 0,
+ NLM_F_MULTI);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+ }
+ return genlmsg_reply(msg, info);
+}
+
/* Remove and delete all pnetids from pnet table.
*/
static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
{
- struct smc_pnetentry *pnetelem, *tmp_pe;
+ struct net *net = genl_info_net(info);
- write_lock(&smc_pnettable.lock);
- list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
- list) {
- list_del(&pnetelem->list);
- dev_put(pnetelem->ndev);
- kfree(pnetelem);
- }
- write_unlock(&smc_pnettable.lock);
- return 0;
+ return smc_pnet_remove_by_pnetid(net, NULL);
}
/* SMC_PNETID generic netlink operation definition */
@@ -491,6 +667,18 @@ static struct notifier_block smc_netdev_notifier = {
.notifier_call = smc_pnet_netdev_event
};
+/* init network namespace */
+int smc_pnet_net_init(struct net *net)
+{
+ struct smc_net *sn = net_generic(net, smc_net_id);
+ struct smc_pnettable *pnettable = &sn->pnettable;
+
+ INIT_LIST_HEAD(&pnettable->pnetlist);
+ rwlock_init(&pnettable->lock);
+
+ return 0;
+}
+
int __init smc_pnet_init(void)
{
int rc;
@@ -504,9 +692,15 @@ int __init smc_pnet_init(void)
return rc;
}
+/* exit network namespace */
+void smc_pnet_net_exit(struct net *net)
+{
+ /* flush pnet table */
+ smc_pnet_remove_by_pnetid(net, NULL);
+}
+
void smc_pnet_exit(void)
{
- smc_pnet_flush(NULL, NULL);
unregister_netdevice_notifier(&smc_netdev_notifier);
genl_unregister_family(&smc_pnet_nl_family);
}
@@ -534,9 +728,73 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
return ndev;
}
+static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
+ u8 *pnetid)
+{
+ struct smc_pnettable *pnettable;
+ struct net *net = dev_net(ndev);
+ struct smc_pnetentry *pnetelem;
+ struct smc_net *sn;
+ int rc = -ENOENT;
+
+ /* get pnettable for namespace */
+ sn = net_generic(net, smc_net_id);
+ pnettable = &sn->pnettable;
+
+ read_lock(&pnettable->lock);
+ list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
+ if (ndev == pnetelem->ndev) {
+ /* get pnetid of netdev device */
+ memcpy(pnetid, pnetelem->pnet_name, SMC_MAX_PNETID_LEN);
+ rc = 0;
+ break;
+ }
+ }
+ read_unlock(&pnettable->lock);
+ return rc;
+}
+
+/* if handshake network device belongs to a roce device, return its
+ * IB device and port
+ */
+static void smc_pnet_find_rdma_dev(struct net_device *netdev,
+ struct smc_ib_device **smcibdev,
+ u8 *ibport, unsigned short vlan_id, u8 gid[])
+{
+ struct smc_ib_device *ibdev;
+
+ spin_lock(&smc_ib_devices.lock);
+ list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
+ struct net_device *ndev;
+ int i;
+
+ for (i = 1; i <= SMC_MAX_PORTS; i++) {
+ if (!rdma_is_port_valid(ibdev->ibdev, i))
+ continue;
+ if (!ibdev->ibdev->ops.get_netdev)
+ continue;
+ ndev = ibdev->ibdev->ops.get_netdev(ibdev->ibdev, i);
+ if (!ndev)
+ continue;
+ dev_put(ndev);
+ if (netdev == ndev &&
+ smc_ib_port_active(ibdev, i) &&
+ !smc_ib_determine_gid(ibdev, i, vlan_id, gid,
+ NULL)) {
+ *smcibdev = ibdev;
+ *ibport = i;
+ break;
+ }
+ }
+ }
+ spin_unlock(&smc_ib_devices.lock);
+}
+
/* Determine the corresponding IB device port based on the hardware PNETID.
* Searching stops at the first matching active IB device port with vlan_id
* configured.
+ * If nothing found, check pnetid table.
+ * If nothing found, try to use handshake device
*/
static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
struct smc_ib_device **smcibdev,
@@ -549,16 +807,18 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
ndev = pnet_find_base_ndev(ndev);
if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
- ndev_pnetid))
+ ndev_pnetid) &&
+ smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) {
+ smc_pnet_find_rdma_dev(ndev, smcibdev, ibport, vlan_id, gid);
return; /* pnetid could not be determined */
+ }
spin_lock(&smc_ib_devices.lock);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
for (i = 1; i <= SMC_MAX_PORTS; i++) {
if (!rdma_is_port_valid(ibdev->ibdev, i))
continue;
- if (!memcmp(ibdev->pnetid[i - 1], ndev_pnetid,
- SMC_MAX_PNETID_LEN) &&
+ if (smc_pnet_match(ibdev->pnetid[i - 1], ndev_pnetid) &&
smc_ib_port_active(ibdev, i) &&
!smc_ib_determine_gid(ibdev, i, vlan_id, gid,
NULL)) {
@@ -580,12 +840,13 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
ndev = pnet_find_base_ndev(ndev);
if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
- ndev_pnetid))
+ ndev_pnetid) &&
+ smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid))
return; /* pnetid could not be determined */
spin_lock(&smcd_dev_list.lock);
list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
- if (!memcmp(ismdev->pnetid, ndev_pnetid, SMC_MAX_PNETID_LEN)) {
+ if (smc_pnet_match(ismdev->pnetid, ndev_pnetid)) {
*smcismdev = ismdev;
break;
}
@@ -593,31 +854,6 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
spin_unlock(&smcd_dev_list.lock);
}
-/* Lookup of coupled ib_device via SMC pnet table */
-static void smc_pnet_find_roce_by_table(struct net_device *netdev,
- struct smc_ib_device **smcibdev,
- u8 *ibport, unsigned short vlan_id,
- u8 gid[])
-{
- struct smc_pnetentry *pnetelem;
-
- read_lock(&smc_pnettable.lock);
- list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
- if (netdev == pnetelem->ndev) {
- if (smc_ib_port_active(pnetelem->smcibdev,
- pnetelem->ib_port) &&
- !smc_ib_determine_gid(pnetelem->smcibdev,
- pnetelem->ib_port, vlan_id,
- gid, NULL)) {
- *smcibdev = pnetelem->smcibdev;
- *ibport = pnetelem->ib_port;
- }
- break;
- }
- }
- read_unlock(&smc_pnettable.lock);
-}
-
/* PNET table analysis for a given sock:
* determine ib_device and port belonging to used internal TCP socket
* ethernet interface.
@@ -636,13 +872,7 @@ void smc_pnet_find_roce_resource(struct sock *sk,
if (!dst->dev)
goto out_rel;
- /* if possible, lookup via hardware-defined pnetid */
smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport, vlan_id, gid);
- if (*smcibdev)
- goto out_rel;
-
- /* lookup via SMC PNET table */
- smc_pnet_find_roce_by_table(dst->dev, smcibdev, ibport, vlan_id, gid);
out_rel:
dst_release(dst);
@@ -660,7 +890,6 @@ void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev)
if (!dst->dev)
goto out_rel;
- /* if possible, lookup via hardware-defined pnetid */
smc_pnet_find_ism_by_pnetid(dst->dev, smcismdev);
out_rel:
diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
index 8ff777636e32..5eac42fb45d0 100644
--- a/net/smc/smc_pnet.h
+++ b/net/smc/smc_pnet.h
@@ -19,6 +19,16 @@
struct smc_ib_device;
struct smcd_dev;
+/**
+ * struct smc_pnettable - SMC PNET table anchor
+ * @lock: Lock for list action
+ * @pnetlist: List of PNETIDs
+ */
+struct smc_pnettable {
+ rwlock_t lock;
+ struct list_head pnetlist;
+};
+
static inline int smc_pnetid_by_dev_port(struct device *dev,
unsigned short port, u8 *pnetid)
{
@@ -30,8 +40,9 @@ static inline int smc_pnetid_by_dev_port(struct device *dev,
}
int smc_pnet_init(void) __init;
+int smc_pnet_net_init(struct net *net);
void smc_pnet_exit(void);
-int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev);
+void smc_pnet_net_exit(struct net *net);
void smc_pnet_find_roce_resource(struct sock *sk,
struct smc_ib_device **smcibdev, u8 *ibport,
unsigned short vlan_id, u8 gid[]);
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index d8366ed51757..f0de323d15d6 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -24,10 +24,11 @@
#include "smc.h"
#include "smc_wr.h"
#include "smc_cdc.h"
+#include "smc_close.h"
#include "smc_ism.h"
#include "smc_tx.h"
-#define SMC_TX_WORK_DELAY HZ
+#define SMC_TX_WORK_DELAY 0
#define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
/***************************** sndbuf producer *******************************/
@@ -165,12 +166,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
+ if (send_done)
+ return send_done;
rc = smc_tx_wait(smc, msg->msg_flags);
- if (rc) {
- if (send_done)
- return send_done;
+ if (rc)
goto out_err;
- }
continue;
}
@@ -267,27 +267,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
- int num_sges, struct ib_sge sges[])
+ int num_sges, struct ib_rdma_wr *rdma_wr)
{
struct smc_link_group *lgr = conn->lgr;
- struct ib_rdma_wr rdma_wr;
struct smc_link *link;
int rc;
- memset(&rdma_wr, 0, sizeof(rdma_wr));
link = &lgr->lnk[SMC_SINGLE_LINK];
- rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
- rdma_wr.wr.sg_list = sges;
- rdma_wr.wr.num_sge = num_sges;
- rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
- rdma_wr.remote_addr =
+ rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
+ rdma_wr->wr.num_sge = num_sges;
+ rdma_wr->remote_addr =
lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
/* RMBE within RMB */
conn->tx_off +
/* offset within RMBE */
peer_rmbe_offset;
- rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
- rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL);
+ rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
+ rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
if (rc) {
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
smc_lgr_terminate(lgr);
@@ -314,24 +310,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
/* SMC-R helper for smc_tx_rdma_writes() */
static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
size_t src_off, size_t src_len,
- size_t dst_off, size_t dst_len)
+ size_t dst_off, size_t dst_len,
+ struct smc_rdma_wr *wr_rdma_buf)
{
dma_addr_t dma_addr =
sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
- struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
int src_len_sum = src_len, dst_len_sum = dst_len;
- struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
int sent_count = src_off;
int srcchunk, dstchunk;
int num_sges;
int rc;
for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+ struct ib_sge *sge =
+ wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
+
num_sges = 0;
for (srcchunk = 0; srcchunk < 2; srcchunk++) {
- sges[srcchunk].addr = dma_addr + src_off;
- sges[srcchunk].length = src_len;
- sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
+ sge[srcchunk].addr = dma_addr + src_off;
+ sge[srcchunk].length = src_len;
num_sges++;
src_off += src_len;
@@ -344,7 +341,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
src_len = dst_len - src_len; /* remainder */
src_len_sum += src_len;
}
- rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
+ rc = smc_tx_rdma_write(conn, dst_off, num_sges,
+ &wr_rdma_buf->wr_tx_rdma[dstchunk]);
if (rc)
return rc;
if (dst_len_sum == len)
@@ -403,7 +401,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
* usable snd_wnd as max transmit
*/
-static int smc_tx_rdma_writes(struct smc_connection *conn)
+static int smc_tx_rdma_writes(struct smc_connection *conn,
+ struct smc_rdma_wr *wr_rdma_buf)
{
size_t len, src_len, dst_off, dst_len; /* current chunk values */
union smc_host_cursor sent, prep, prod, cons;
@@ -464,7 +463,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
dst_off, dst_len);
else
rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
- dst_off, dst_len);
+ dst_off, dst_len, wr_rdma_buf);
if (rc)
return rc;
@@ -484,32 +483,31 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
*/
static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{
- struct smc_cdc_producer_flags *pflags;
+ struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
+ struct smc_rdma_wr *wr_rdma_buf;
struct smc_cdc_tx_pend *pend;
struct smc_wr_buf *wr_buf;
int rc;
- spin_lock_bh(&conn->send_lock);
- rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
+ rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend);
if (rc < 0) {
if (rc == -EBUSY) {
struct smc_sock *smc =
container_of(conn, struct smc_sock, conn);
- if (smc->sk.sk_err == ECONNABORTED) {
- rc = sock_error(&smc->sk);
- goto out_unlock;
- }
+ if (smc->sk.sk_err == ECONNABORTED)
+ return sock_error(&smc->sk);
rc = 0;
if (conn->alert_token_local) /* connection healthy */
mod_delayed_work(system_wq, &conn->tx_work,
SMC_TX_WORK_DELAY);
}
- goto out_unlock;
+ return rc;
}
- if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
- rc = smc_tx_rdma_writes(conn);
+ spin_lock_bh(&conn->send_lock);
+ if (!pflags->urg_data_present) {
+ rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
if (rc) {
smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
(struct smc_wr_tx_pend_priv *)pend);
@@ -518,7 +516,6 @@ static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
}
rc = smc_cdc_msg_send(conn, wr_buf, pend);
- pflags = &conn->local_tx_ctrl.prod_flags;
if (!rc && pflags->urg_data_present) {
pflags->urg_data_pending = 0;
pflags->urg_data_present = 0;
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
spin_lock_bh(&conn->send_lock);
if (!pflags->urg_data_present)
- rc = smc_tx_rdma_writes(conn);
+ rc = smc_tx_rdma_writes(conn, NULL);
if (!rc)
rc = smcd_cdc_msg_send(conn);
@@ -557,6 +554,12 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
else
rc = smcr_tx_sndbuf_nonempty(conn);
+ if (!rc) {
+ /* trigger socket release if connection is closing */
+ struct smc_sock *smc = container_of(conn, struct smc_sock,
+ conn);
+ smc_close_wake_tx_prepared(smc);
+ }
return rc;
}
@@ -598,7 +601,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
if (to_confirm > conn->rmbe_update_limit) {
smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
sender_free = conn->rmb_desc->len -
- smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+ smc_curs_diff_large(conn->rmb_desc->len,
+ &cfed, &prod);
}
if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
@@ -612,9 +616,6 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
SMC_TX_WORK_DELAY);
return;
}
- smc_curs_copy(&conn->rx_curs_confirmed,
- &conn->local_tx_ctrl.cons, conn);
- conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
}
if (conn->local_rx_ctrl.prod_flags.write_blocked &&
!atomic_read(&conn->bytes_to_rcv))
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index c2694750a6a8..253aa75dc2b6 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
* @link: Pointer to smc_link used to later send the message.
* @handler: Send completion handler function pointer.
* @wr_buf: Out value returns pointer to message buffer.
+ * @wr_rdma_buf: Out value returns pointer to rdma work request.
* @wr_pend_priv: Out value returns pointer serving as handler context.
*
* Return: 0 on success, or -errno on error.
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
int smc_wr_tx_get_free_slot(struct smc_link *link,
smc_wr_tx_handler handler,
struct smc_wr_buf **wr_buf,
+ struct smc_rdma_wr **wr_rdma_buf,
struct smc_wr_tx_pend_priv **wr_pend_priv)
{
struct smc_wr_tx_pend *wr_pend;
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
wr_ib = &link->wr_tx_ibs[idx];
wr_ib->wr_id = wr_id;
*wr_buf = &link->wr_tx_bufs[idx];
+ if (wr_rdma_buf)
+ *wr_rdma_buf = &link->wr_tx_rdmas[idx];
*wr_pend_priv = &wr_pend->priv;
return 0;
}
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link,
u32 idx = pend->idx;
/* clear the full struct smc_wr_tx_pend including .priv */
- memset(&link->wr_tx_pends[pend->idx], 0,
- sizeof(link->wr_tx_pends[pend->idx]));
- memset(&link->wr_tx_bufs[pend->idx], 0,
- sizeof(link->wr_tx_bufs[pend->idx]));
+ memset(&link->wr_tx_pends[idx], 0,
+ sizeof(link->wr_tx_pends[idx]));
+ memset(&link->wr_tx_bufs[idx], 0,
+ sizeof(link->wr_tx_bufs[idx]));
test_and_clear_bit(idx, link->wr_tx_mask);
return 1;
}
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk)
lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
+ lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
+ lnk->roce_pd->local_dma_lkey;
+ lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
+ lnk->roce_pd->local_dma_lkey;
+ lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
+ lnk->roce_pd->local_dma_lkey;
+ lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
+ lnk->roce_pd->local_dma_lkey;
lnk->wr_tx_ibs[i].next = NULL;
lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
lnk->wr_tx_ibs[i].num_sge = 1;
lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
lnk->wr_tx_ibs[i].send_flags =
IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
+ lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
+ lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
+ lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
+ lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
+ lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
}
for (i = 0; i < lnk->wr_rx_cnt; i++) {
lnk->wr_rx_sges[i].addr =
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk)
lnk->wr_tx_mask = NULL;
kfree(lnk->wr_tx_sges);
lnk->wr_tx_sges = NULL;
+ kfree(lnk->wr_tx_rdma_sges);
+ lnk->wr_tx_rdma_sges = NULL;
kfree(lnk->wr_rx_sges);
lnk->wr_rx_sges = NULL;
+ kfree(lnk->wr_tx_rdmas);
+ lnk->wr_tx_rdmas = NULL;
kfree(lnk->wr_rx_ibs);
lnk->wr_rx_ibs = NULL;
kfree(lnk->wr_tx_ibs);
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
GFP_KERNEL);
if (!link->wr_rx_ibs)
goto no_mem_wr_tx_ibs;
+ link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
+ sizeof(link->wr_tx_rdmas[0]),
+ GFP_KERNEL);
+ if (!link->wr_tx_rdmas)
+ goto no_mem_wr_rx_ibs;
+ link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
+ sizeof(link->wr_tx_rdma_sges[0]),
+ GFP_KERNEL);
+ if (!link->wr_tx_rdma_sges)
+ goto no_mem_wr_tx_rdmas;
link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
GFP_KERNEL);
if (!link->wr_tx_sges)
- goto no_mem_wr_rx_ibs;
+ goto no_mem_wr_tx_rdma_sges;
link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
sizeof(link->wr_rx_sges[0]),
GFP_KERNEL);
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges:
kfree(link->wr_rx_sges);
no_mem_wr_tx_sges:
kfree(link->wr_tx_sges);
+no_mem_wr_tx_rdma_sges:
+ kfree(link->wr_tx_rdma_sges);
+no_mem_wr_tx_rdmas:
+ kfree(link->wr_tx_rdmas);
no_mem_wr_rx_ibs:
kfree(link->wr_rx_ibs);
no_mem_wr_tx_ibs:
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 1d85bb14fd6f..09bf32fd3959 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev);
int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
struct smc_wr_buf **wr_buf,
+ struct smc_rdma_wr **wrs,
struct smc_wr_tx_pend_priv **wr_pend_priv);
int smc_wr_tx_put_slot(struct smc_link *link,
struct smc_wr_tx_pend_priv *wr_pend_priv);
diff --git a/net/socket.c b/net/socket.c
index e89884e2197b..3c176a12fe48 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -577,6 +577,7 @@ static void __sock_release(struct socket *sock, struct inode *inode)
if (inode)
inode_lock(inode);
sock->ops->release(sock);
+ sock->sk = NULL;
if (inode)
inode_unlock(inode);
sock->ops = NULL;
@@ -669,7 +670,7 @@ static bool skb_is_err_queue(const struct sk_buff *skb)
* before the software timestamp is received, a hardware TX timestamp may be
* returned only if there is no software TX timestamp. Ignore false software
* timestamps, which may be made in the __sock_recv_timestamp() call when the
- * option SO_TIMESTAMP(NS) is enabled on the socket, even when the skb has a
+ * option SO_TIMESTAMP_OLD(NS) is enabled on the socket, even when the skb has a
* hardware timestamp.
*/
static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
@@ -705,7 +706,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
- struct scm_timestamping tss;
+ int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
+ struct scm_timestamping_internal tss;
+
int empty = 1, false_tstamp = 0;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
@@ -719,34 +722,54 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
if (need_software_tstamp) {
if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
- struct timeval tv;
- skb_get_timestamp(skb, &tv);
- put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
- sizeof(tv), &tv);
+ if (new_tstamp) {
+ struct __kernel_sock_timeval tv;
+
+ skb_get_new_timestamp(skb, &tv);
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
+ sizeof(tv), &tv);
+ } else {
+ struct __kernel_old_timeval tv;
+
+ skb_get_timestamp(skb, &tv);
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
+ sizeof(tv), &tv);
+ }
} else {
- struct timespec ts;
- skb_get_timestampns(skb, &ts);
- put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
- sizeof(ts), &ts);
+ if (new_tstamp) {
+ struct __kernel_timespec ts;
+
+ skb_get_new_timestampns(skb, &ts);
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
+ sizeof(ts), &ts);
+ } else {
+ struct timespec ts;
+
+ skb_get_timestampns(skb, &ts);
+ put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
+ sizeof(ts), &ts);
+ }
}
}
memset(&tss, 0, sizeof(tss));
if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
- ktime_to_timespec_cond(skb->tstamp, tss.ts + 0))
+ ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
!skb_is_swtx_tstamp(skb, false_tstamp) &&
- ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) {
+ ktime_to_timespec64_cond(shhwtstamps->hwtstamp, tss.ts + 2)) {
empty = 0;
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
!skb_is_err_queue(skb))
put_ts_pktinfo(msg, skb);
}
if (!empty) {
- put_cmsg(msg, SOL_SOCKET,
- SCM_TIMESTAMPING, sizeof(tss), &tss);
+ if (sock_flag(sk, SOCK_TSTAMP_NEW))
+ put_cmsg_scm_timestamping64(msg, &tss);
+ else
+ put_cmsg_scm_timestamping(msg, &tss);
if (skb_is_err_queue(skb) && skb->len &&
SKB_EXT_ERR(skb)->opt_stats)
@@ -941,8 +964,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
EXPORT_SYMBOL(dlci_ioctl_set);
static long sock_do_ioctl(struct net *net, struct socket *sock,
- unsigned int cmd, unsigned long arg,
- unsigned int ifreq_size)
+ unsigned int cmd, unsigned long arg)
{
int err;
void __user *argp = (void __user *)arg;
@@ -968,11 +990,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
} else {
struct ifreq ifr;
bool need_copyout;
- if (copy_from_user(&ifr, argp, ifreq_size))
+ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
if (!err && need_copyout)
- if (copy_to_user(argp, &ifr, ifreq_size))
+ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
return -EFAULT;
}
return err;
@@ -1071,8 +1093,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
err = open_related_ns(&net->ns, get_net_ns);
break;
default:
- err = sock_do_ioctl(net, sock, cmd, arg,
- sizeof(struct ifreq));
+ err = sock_do_ioctl(net, sock, cmd, arg);
break;
}
return err;
@@ -2780,8 +2801,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
- sizeof(struct compat_ifreq));
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
set_fs(old_fs);
if (!err)
err = compat_put_timeval(&ktv, up);
@@ -2797,8 +2817,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
- sizeof(struct compat_ifreq));
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
set_fs(old_fs);
if (!err)
err = compat_put_timespec(&kts, up);
@@ -2994,6 +3013,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
return dev_ioctl(net, cmd, &ifreq, NULL);
}
+static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
+ unsigned int cmd,
+ struct compat_ifreq __user *uifr32)
+{
+ struct ifreq __user *uifr;
+ int err;
+
+ /* Handle the fact that while struct ifreq has the same *layout* on
+ * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
+ * which are handled elsewhere, it still has different *size* due to
+ * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
+ * resulting in struct ifreq being 32 and 40 bytes respectively).
+ * As a result, if the struct happens to be at the end of a page and
+ * the next page isn't readable/writable, we get a fault. To prevent
+ * that, copy back and forth to the full size.
+ */
+
+ uifr = compat_alloc_user_space(sizeof(*uifr));
+ if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
+ return -EFAULT;
+
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
+
+ if (!err) {
+ switch (cmd) {
+ case SIOCGIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFINDEX:
+ case SIOCGIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFPFLAGS:
+ case SIOCGIFTXQLEN:
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCGIFNAME:
+ if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
+ err = -EFAULT;
+ break;
+ }
+ }
+ return err;
+}
+
static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
struct compat_ifreq __user *uifr32)
{
@@ -3109,8 +3176,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
}
set_fs(KERNEL_DS);
- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
- sizeof(struct compat_ifreq));
+ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
set_fs(old_fs);
out:
@@ -3210,21 +3276,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCSIFTXQLEN:
case SIOCBRADDIF:
case SIOCBRDELIF:
+ case SIOCGIFNAME:
case SIOCSIFNAME:
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- case SIOCSARP:
- case SIOCGARP:
- case SIOCDARP:
- case SIOCATMARK:
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
case SIOCBONDCHANGEACTIVE:
- case SIOCGIFNAME:
- return sock_do_ioctl(net, sock, cmd, arg,
- sizeof(struct compat_ifreq));
+ return compat_ifreq_ioctl(net, sock, cmd, argp);
+
+ case SIOCSARP:
+ case SIOCGARP:
+ case SIOCDARP:
+ case SIOCATMARK:
+ return sock_do_ioctl(net, sock, cmd, arg);
}
return -ENOIOCTLCMD;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 1ff9768f5456..f3023bbc0b7f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -41,6 +41,9 @@ static unsigned long number_cred_unused;
static struct cred machine_cred = {
.usage = ATOMIC_INIT(1),
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ .magic = CRED_MAGIC,
+#endif
};
/*
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dc86713b32b6..1531b0219344 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p)
cred_len = p++;
spin_lock(&ctx->gc_seq_lock);
- req->rq_seqno = ctx->gc_seq++;
+ req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
spin_unlock(&ctx->gc_seq_lock);
+ if (req->rq_seqno == MAXSEQ)
+ goto out_expired;
*p++ = htonl((u32) RPC_GSS_VERSION);
*p++ = htonl((u32) ctx->gc_proc);
@@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p)
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
- clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ goto out_expired;
} else if (maj_stat != 0) {
- printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
+ pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
+ task->tk_status = -EIO;
goto out_put_ctx;
}
p = xdr_encode_opaque(p, NULL, mic.len);
gss_put_ctx(ctx);
return p;
+out_expired:
+ clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ task->tk_status = -EKEYEXPIRED;
out_put_ctx:
gss_put_ctx(ctx);
return NULL;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index fb6656295204..507105127095 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
struct crypto_sync_skcipher *cipher;
- unsigned char plain[8];
+ unsigned char *plain;
s32 code;
dprintk("RPC: %s:\n", __func__);
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
if (IS_ERR(cipher))
return PTR_ERR(cipher);
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain)
+ return -ENOMEM;
+
plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
code = krb5_encrypt(cipher, cksum, plain, buf, 8);
out:
+ kfree(plain);
crypto_free_sync_skcipher(cipher);
return code;
}
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
u32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
- unsigned char plain[8];
+ unsigned char *plain;
+ s32 code;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
return krb5_make_rc4_seq_num(kctx, direction, seqnum,
cksum, buf);
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain)
+ return -ENOMEM;
+
plain[0] = (unsigned char) (seqnum & 0xff);
plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
plain[6] = direction;
plain[7] = direction;
- return krb5_encrypt(key, cksum, plain, buf, 8);
+ code = krb5_encrypt(key, cksum, plain, buf, 8);
+ kfree(plain);
+ return code;
}
static s32
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
unsigned char *buf, int *direction, s32 *seqnum)
{
struct crypto_sync_skcipher *cipher;
- unsigned char plain[8];
+ unsigned char *plain;
s32 code;
dprintk("RPC: %s:\n", __func__);
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
if (code)
goto out;
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain) {
+ code = -ENOMEM;
+ goto out;
+ }
+
code = krb5_decrypt(cipher, cksum, buf, plain, 8);
if (code)
- goto out;
+ goto out_plain;
if ((plain[4] != plain[5]) || (plain[4] != plain[6])
|| (plain[4] != plain[7])) {
code = (s32)KG_BAD_SEQ;
- goto out;
+ goto out_plain;
}
*direction = plain[4];
*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
(plain[2] << 8) | (plain[3]));
+out_plain:
+ kfree(plain);
out:
crypto_free_sync_skcipher(cipher);
return code;
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
int *direction, u32 *seqnum)
{
s32 code;
- unsigned char plain[8];
+ unsigned char *plain;
struct crypto_sync_skcipher *key = kctx->seq;
dprintk("RPC: krb5_get_seq_num:\n");
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
return krb5_get_rc4_seq_num(kctx, cksum, buf,
direction, seqnum);
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain)
+ return -ENOMEM;
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
- return code;
+ goto out;
if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
- (plain[4] != plain[7]))
- return (s32)KG_BAD_SEQ;
+ (plain[4] != plain[7])) {
+ code = (s32)KG_BAD_SEQ;
+ goto out;
+ }
*direction = plain[4];
*seqnum = ((plain[0]) |
(plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
- return 0;
+out:
+ kfree(plain);
+ return code;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 71d9599b5816..d7ec6132c046 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task)
xdr_buf_init(&req->rq_rcv_buf,
req->rq_rbuffer,
req->rq_rcvsize);
- req->rq_bytes_sent = 0;
p = rpc_encode_header(task);
- if (p == NULL) {
- printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
- rpc_exit(task, -EIO);
+ if (p == NULL)
return;
- }
encode = task->tk_msg.rpc_proc->p_encode;
if (encode == NULL)
@@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task)
/* Did the encode result in an error condition? */
if (task->tk_status != 0) {
/* Was the error nonfatal? */
- if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
+ switch (task->tk_status) {
+ case -EAGAIN:
+ case -ENOMEM:
rpc_delay(task, HZ >> 4);
- else
+ break;
+ case -EKEYEXPIRED:
+ task->tk_action = call_refresh;
+ break;
+ default:
rpc_exit(task, task->tk_status);
+ }
return;
}
@@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task)
*p++ = htonl(clnt->cl_vers); /* program version */
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
p = rpcauth_marshcred(task, p);
- req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
+ if (p)
+ req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
return p;
}
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index 45a033329cd4..19bb356230ed 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
rcu_read_lock();
xprt = rcu_dereference(clnt->cl_xprt);
/* no "debugfs" dentry? Don't bother with the symlink. */
- if (!xprt->debugfs) {
+ if (IS_ERR_OR_NULL(xprt->debugfs)) {
rcu_read_unlock();
return;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 73547d17d3c6..f1ec2110efeb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
struct rpc_xprt *xprt = req->rq_xprt;
if (xprt_request_need_enqueue_transmit(task, req)) {
+ req->rq_bytes_sent = 0;
spin_lock(&xprt->queue_lock);
/*
* Requests that carry congestion control credits are added
@@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
INIT_LIST_HEAD(&req->rq_xmit2);
goto out;
}
- } else {
+ } else if (!req->rq_seqno) {
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
if (pos->rq_task->tk_owner != task->tk_owner)
continue;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index cf51b8f9b15f..1f200119268c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
DMA_TO_DEVICE);
}
+/* If the xdr_buf has more elements than the device can
+ * transmit in a single RDMA Send, then the reply will
+ * have to be copied into a bounce buffer.
+ */
+static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
+ struct xdr_buf *xdr,
+ __be32 *wr_lst)
+{
+ int elements;
+
+ /* xdr->head */
+ elements = 1;
+
+ /* xdr->pages */
+ if (!wr_lst) {
+ unsigned int remaining;
+ unsigned long pageoff;
+
+ pageoff = xdr->page_base & ~PAGE_MASK;
+ remaining = xdr->page_len;
+ while (remaining) {
+ ++elements;
+ remaining -= min_t(u32, PAGE_SIZE - pageoff,
+ remaining);
+ pageoff = 0;
+ }
+ }
+
+ /* xdr->tail */
+ if (xdr->tail[0].iov_len)
+ ++elements;
+
+ /* assume 1 SGE is needed for the transport header */
+ return elements >= rdma->sc_max_send_sges;
+}
+
+/* The device is not capable of sending the reply directly.
+ * Assemble the elements of @xdr into the transport header
+ * buffer.
+ */
+static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt,
+ struct xdr_buf *xdr, __be32 *wr_lst)
+{
+ unsigned char *dst, *tailbase;
+ unsigned int taillen;
+
+ dst = ctxt->sc_xprt_buf;
+ dst += ctxt->sc_sges[0].length;
+
+ memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
+ dst += xdr->head[0].iov_len;
+
+ tailbase = xdr->tail[0].iov_base;
+ taillen = xdr->tail[0].iov_len;
+ if (wr_lst) {
+ u32 xdrpad;
+
+ xdrpad = xdr_padsize(xdr->page_len);
+ if (taillen && xdrpad) {
+ tailbase += xdrpad;
+ taillen -= xdrpad;
+ }
+ } else {
+ unsigned int len, remaining;
+ unsigned long pageoff;
+ struct page **ppages;
+
+ ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+ pageoff = xdr->page_base & ~PAGE_MASK;
+ remaining = xdr->page_len;
+ while (remaining) {
+ len = min_t(u32, PAGE_SIZE - pageoff, remaining);
+
+ memcpy(dst, page_address(*ppages), len);
+ remaining -= len;
+ dst += len;
+ pageoff = 0;
+ }
+ }
+
+ if (taillen)
+ memcpy(dst, tailbase, taillen);
+
+ ctxt->sc_sges[0].length += xdr->len;
+ ib_dma_sync_single_for_device(rdma->sc_pd->device,
+ ctxt->sc_sges[0].addr,
+ ctxt->sc_sges[0].length,
+ DMA_TO_DEVICE);
+
+ return 0;
+}
+
/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
* @rdma: controlling transport
* @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
u32 xdr_pad;
int ret;
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
- return -EIO;
+ if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
+ return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
+
+ ++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_buf(rdma, ctxt,
xdr->head[0].iov_base,
xdr->head[0].iov_len);
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
while (remaining) {
len = min_t(u32, PAGE_SIZE - page_off, remaining);
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
- return -EIO;
+ ++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
page_off, len);
if (ret < 0)
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
len = xdr->tail[0].iov_len;
tail:
if (len) {
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
- return -EIO;
+ ++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
if (ret < 0)
return ret;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 924c17d46903..57f86c63a463 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/* Transport header, head iovec, tail iovec */
newxprt->sc_max_send_sges = 3;
/* Add one SGE per page list entry */
- newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
- if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
- pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
- newxprt->sc_max_send_sges);
- goto errout;
- }
+ newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
+ if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
+ newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = svcrdma_max_requests;
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 7749a2bf6887..21113bfd4eca 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
sendcq = ib_alloc_cq(ia->ri_device, NULL,
ep->rep_attr.cap.max_send_wr + 1,
- 1, IB_POLL_WORKQUEUE);
+ ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq);
goto out1;
@@ -845,17 +846,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
for (i = 0; i <= buf->rb_sc_last; i++) {
sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
if (!sc)
- goto out_destroy;
+ return -ENOMEM;
sc->sc_xprt = r_xprt;
buf->rb_sc_ctxs[i] = sc;
}
return 0;
-
-out_destroy:
- rpcrdma_sendctxs_destroy(buf);
- return -ENOMEM;
}
/* The sendctx queue is not guaranteed to have a size that is a
@@ -1113,8 +1110,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
WQ_MEM_RECLAIM | WQ_HIGHPRI,
0,
r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
- if (!buf->rb_completion_wq)
+ if (!buf->rb_completion_wq) {
+ rc = -ENOMEM;
goto out;
+ }
return 0;
out:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 13559e6a460b..7754aa3e434f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -48,6 +48,7 @@
#include <net/udp.h>
#include <net/tcp.h>
#include <linux/bvec.h>
+#include <linux/highmem.h>
#include <linux/uio.h>
#include <trace/events/sunrpc.h>
@@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
return sock_recvmsg(sock, msg, flags);
}
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static void
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
+{
+ struct bvec_iter bi = {
+ .bi_size = count,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
+ for_each_bvec(bv, bvec, bi, bi)
+ flush_dcache_page(bv.bv_page);
+}
+#else
+static inline void
+xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
+{
+}
+#endif
+
static ssize_t
xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
@@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
seek + buf->page_base);
if (ret <= 0)
goto sock_err;
+ xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
offset += ret - buf->page_base;
if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
goto out;
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 5df9d1138ac9..90ba4a1f0a6d 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -23,78 +23,6 @@
#include <linux/rtnetlink.h>
#include <net/switchdev.h>
-/**
- * switchdev_trans_item_enqueue - Enqueue data item to transaction queue
- *
- * @trans: transaction
- * @data: pointer to data being queued
- * @destructor: data destructor
- * @tritem: transaction item being queued
- *
- * Enqeueue data item to transaction queue. tritem is typically placed in
- * cointainter pointed at by data pointer. Destructor is called on
- * transaction abort and after successful commit phase in case
- * the caller did not dequeue the item before.
- */
-void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
- void *data, void (*destructor)(void const *),
- struct switchdev_trans_item *tritem)
-{
- tritem->data = data;
- tritem->destructor = destructor;
- list_add_tail(&tritem->list, &trans->item_list);
-}
-EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
-
-static struct switchdev_trans_item *
-__switchdev_trans_item_dequeue(struct switchdev_trans *trans)
-{
- struct switchdev_trans_item *tritem;
-
- if (list_empty(&trans->item_list))
- return NULL;
- tritem = list_first_entry(&trans->item_list,
- struct switchdev_trans_item, list);
- list_del(&tritem->list);
- return tritem;
-}
-
-/**
- * switchdev_trans_item_dequeue - Dequeue data item from transaction queue
- *
- * @trans: transaction
- */
-void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
-{
- struct switchdev_trans_item *tritem;
-
- tritem = __switchdev_trans_item_dequeue(trans);
- BUG_ON(!tritem);
- return tritem->data;
-}
-EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
-
-static void switchdev_trans_init(struct switchdev_trans *trans)
-{
- INIT_LIST_HEAD(&trans->item_list);
-}
-
-static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
-{
- struct switchdev_trans_item *tritem;
-
- while ((tritem = __switchdev_trans_item_dequeue(trans)))
- tritem->destructor(tritem->data);
-}
-
-static void switchdev_trans_items_warn_destroy(struct net_device *dev,
- struct switchdev_trans *trans)
-{
- WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
- dev->name);
- switchdev_trans_items_destroy(trans);
-}
-
static LIST_HEAD(deferred);
static DEFINE_SPINLOCK(deferred_lock);
@@ -174,81 +102,32 @@ static int switchdev_deferred_enqueue(struct net_device *dev,
return 0;
}
-/**
- * switchdev_port_attr_get - Get port attribute
- *
- * @dev: port device
- * @attr: attribute to get
- */
-int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
+ struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
{
- const struct switchdev_ops *ops = dev->switchdev_ops;
- struct net_device *lower_dev;
- struct list_head *iter;
- struct switchdev_attr first = {
- .id = SWITCHDEV_ATTR_ID_UNDEFINED
- };
- int err = -EOPNOTSUPP;
+ int err;
+ int rc;
- if (ops && ops->switchdev_port_attr_get)
- return ops->switchdev_port_attr_get(dev, attr);
+ struct switchdev_notifier_port_attr_info attr_info = {
+ .attr = attr,
+ .trans = trans,
+ .handled = false,
+ };
- if (attr->flags & SWITCHDEV_F_NO_RECURSE)
+ rc = call_switchdev_blocking_notifiers(nt, dev,
+ &attr_info.info, NULL);
+ err = notifier_to_errno(rc);
+ if (err) {
+ WARN_ON(!attr_info.handled);
return err;
-
- /* Switch device port(s) may be stacked under
- * bond/team/vlan dev, so recurse down to get attr on
- * each port. Return -ENODATA if attr values don't
- * compare across ports.
- */
-
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = switchdev_port_attr_get(lower_dev, attr);
- if (err)
- break;
- if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
- first = *attr;
- else if (memcmp(&first, attr, sizeof(*attr)))
- return -ENODATA;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
-
-static int __switchdev_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- const struct switchdev_ops *ops = dev->switchdev_ops;
- struct net_device *lower_dev;
- struct list_head *iter;
- int err = -EOPNOTSUPP;
-
- if (ops && ops->switchdev_port_attr_set) {
- err = ops->switchdev_port_attr_set(dev, attr, trans);
- goto done;
}
- if (attr->flags & SWITCHDEV_F_NO_RECURSE)
- goto done;
-
- /* Switch device port(s) may be stacked under
- * bond/team/vlan dev, so recurse down to set attr on
- * each port.
- */
-
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = __switchdev_port_attr_set(lower_dev, attr, trans);
- if (err)
- break;
- }
-
-done:
- if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
- err = 0;
+ if (!attr_info.handled)
+ return -EOPNOTSUPP;
- return err;
+ return 0;
}
static int switchdev_port_attr_set_now(struct net_device *dev,
@@ -257,8 +136,6 @@ static int switchdev_port_attr_set_now(struct net_device *dev,
struct switchdev_trans trans;
int err;
- switchdev_trans_init(&trans);
-
/* Phase I: prepare for attr set. Driver/device should fail
* here if there are going to be issues in the commit phase,
* such as lack of resources or support. The driver/device
@@ -267,18 +144,10 @@ static int switchdev_port_attr_set_now(struct net_device *dev,
*/
trans.ph_prepare = true;
- err = __switchdev_port_attr_set(dev, attr, &trans);
- if (err) {
- /* Prepare phase failed: abort the transaction. Any
- * resources reserved in the prepare phase are
- * released.
- */
-
- if (err != -EOPNOTSUPP)
- switchdev_trans_items_destroy(&trans);
-
+ err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
+ &trans);
+ if (err)
return err;
- }
/* Phase II: commit attr set. This cannot fail as a fault
* of driver/device. If it does, it's a bug in the driver/device
@@ -286,10 +155,10 @@ static int switchdev_port_attr_set_now(struct net_device *dev,
*/
trans.ph_prepare = false;
- err = __switchdev_port_attr_set(dev, attr, &trans);
+ err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
+ &trans);
WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
dev->name, attr->id);
- switchdev_trans_items_warn_destroy(dev, &trans);
return err;
}
@@ -388,8 +257,6 @@ static int switchdev_port_obj_add_now(struct net_device *dev,
ASSERT_RTNL();
- switchdev_trans_init(&trans);
-
/* Phase I: prepare for obj add. Driver/device should fail
* here if there are going to be issues in the commit phase,
* such as lack of resources or support. The driver/device
@@ -400,17 +267,8 @@ static int switchdev_port_obj_add_now(struct net_device *dev,
trans.ph_prepare = true;
err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
dev, obj, &trans, extack);
- if (err) {
- /* Prepare phase failed: abort the transaction. Any
- * resources reserved in the prepare phase are
- * released.
- */
-
- if (err != -EOPNOTSUPP)
- switchdev_trans_items_destroy(&trans);
-
+ if (err)
return err;
- }
/* Phase II: commit obj add. This cannot fail as a fault
* of driver/device. If it does, it's a bug in the driver/device
@@ -421,7 +279,6 @@ static int switchdev_port_obj_add_now(struct net_device *dev,
err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
dev, obj, &trans, extack);
WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
- switchdev_trans_items_warn_destroy(dev, &trans);
return err;
}
@@ -556,10 +413,11 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
* Call all network notifier blocks.
*/
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
- struct switchdev_notifier_info *info)
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
{
info->dev = dev;
- info->extack = NULL;
+ info->extack = extack;
return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
}
EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
@@ -591,26 +449,6 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
}
EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
-bool switchdev_port_same_parent_id(struct net_device *a,
- struct net_device *b)
-{
- struct switchdev_attr a_attr = {
- .orig_dev = a,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
- struct switchdev_attr b_attr = {
- .orig_dev = b,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
-
- if (switchdev_port_attr_get(a, &a_attr) ||
- switchdev_port_attr_get(b, &b_attr))
- return false;
-
- return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
-}
-EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
-
static int __switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
@@ -716,3 +554,54 @@ int switchdev_handle_port_obj_del(struct net_device *dev,
return err;
}
EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
+
+static int __switchdev_handle_port_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*set_cb)(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans))
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (check_cb(dev)) {
+ port_attr_info->handled = true;
+ return set_cb(dev, port_attr_info->attr,
+ port_attr_info->trans);
+ }
+
+ /* Switch ports might be stacked under e.g. a LAG. Ignore the
+ * unsupported devices, another driver might be able to handle them. But
+ * propagate to the callers any hard errors.
+ *
+ * If the driver does its own bookkeeping of stacked ports, it's not
+ * necessary to go through this helper.
+ */
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
+ check_cb, set_cb);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return err;
+}
+
+int switchdev_handle_port_attr_set(struct net_device *dev,
+ struct switchdev_notifier_port_attr_info *port_attr_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*set_cb)(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans))
+{
+ int err;
+
+ err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
+ set_cb);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2792a3cae682..341ecd796aa4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1126,7 +1126,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
skb_queue_tail(mc_inputq, skb);
return true;
}
- /* else: fall through */
+ /* fall through */
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
@@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
default:
pr_warn("Dropping received illegal msg type\n");
kfree_skb(skb);
- return false;
+ return true;
};
}
@@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
l->rcv_unacked = 0;
} else {
/* RESET_MSG or ACTIVATE_MSG */
+ if (mtyp == ACTIVATE_MSG) {
+ msg_set_dest_session_valid(hdr, 1);
+ msg_set_dest_session(hdr, l->peer_session);
+ }
msg_set_max_pkt(hdr, l->advertised_mtu);
strcpy(data, l->if_name);
msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
@@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
break;
}
+
+ /* If this endpoint was re-created while peer was ESTABLISHING
+ * it doesn't know current session number. Force re-synch.
+ */
+ if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
+ l->session != msg_dest_session(hdr)) {
+ if (less(l->session, msg_dest_session(hdr)))
+ l->session = msg_dest_session(hdr) + 1;
+ break;
+ }
+
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
if (mtyp == RESET_MSG || !link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a0924956bb61..d7e4b8b93f9d 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
msg_set_bits(m, 1, 0, 0xffff, n);
}
+/* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch
+ * link peer session number
+ */
+static inline bool msg_dest_session_valid(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 16, 0x1);
+}
+
+static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid)
+{
+ msg_set_bits(m, 1, 16, 0x1, valid);
+}
+
+static inline u16 msg_dest_session(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_dest_session(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 1, 0, 0xffff, n);
+}
/*
* Word 2
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 40f5cae623a7..4ad3586da8f0 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
return limit;
}
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
return buf;
}
+static inline bool string_is_valid(char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_bearer_config *b;
+ int len;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(b->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE;
@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *bearer;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE;
@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
int err;
+ int len;
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
return err;
name = (char *)TLV_DATA(msg->req);
+
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0;
@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *media;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
if (!media)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE;
@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE;
@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_link_config *lc;
struct tipc_bearer *bearer;
struct tipc_media *media;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
media = tipc_media_find(lc->name);
if (media) {
cmd->doit = &__tipc_nl_media_set;
@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *link;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
if (!link)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE;
@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+ return -EINVAL;
depth = ntohl(ntq->depth);
@@ -904,8 +952,10 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
TIPC_NL_PUBL_GET);
- if (!hdr)
+ if (!hdr) {
+ kfree_skb(args);
return -EMSGSIZE;
+ }
nest = nla_nest_start(args, TIPC_NLA_SOCK);
if (!nest) {
@@ -1206,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (len && !TLV_OK(msg.req, len)) {
+ if (!len || !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index db2a6c3e0be9..2dc4919ab23c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
tipc_node_write_lock(n);
if (!tipc_link_is_establishing(l)) {
__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
- if (delete) {
- kfree(l);
- le->link = NULL;
- n->link_cnt--;
- }
} else {
/* Defuse pending tipc_node_link_up() */
+ tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
}
+ if (delete) {
+ kfree(l);
+ le->link = NULL;
+ n->link_cnt--;
+ }
trace_tipc_node_link_down(n, true, "node link down or deleted!");
tipc_node_write_unlock(n);
if (delete)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1217c90a363b..e482b342bfa8 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -379,16 +379,18 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
#define tipc_wait_for_cond(sock_, timeo_, condition_) \
({ \
+ DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
struct sock *sk_; \
int rc_; \
\
while ((rc_ = !(condition_))) { \
- DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
+ /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
+ smp_rmb(); \
sk_ = (sock_)->sk; \
rc_ = tipc_sk_sock_err((sock_), timeo_); \
if (rc_) \
break; \
- prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
+ add_wait_queue(sk_sleep(sk_), &wait_); \
release_sock(sk_); \
*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
sched_annotate_sleep(); \
@@ -735,7 +737,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
case TIPC_CONNECTING:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
revents |= EPOLLOUT;
- /* fall thru' */
+ /* fall through */
case TIPC_LISTEN:
if (!skb_queue_empty(&sk->sk_receive_queue))
revents |= EPOLLIN | EPOLLRDNORM;
@@ -1677,7 +1679,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
{
struct sock *sk = sock->sk;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
long timeo = *timeop;
int err = sock_error(sk);
@@ -1685,15 +1687,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
return err;
for (;;) {
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = -ENOTCONN;
break;
}
+ add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+ sched_annotate_sleep();
lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1709,7 +1713,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
if (err)
break;
}
- finish_wait(sk_sleep(sk), &wait);
*timeop = timeo;
return err;
}
@@ -1982,6 +1985,8 @@ static void tipc_sk_proto_rcv(struct sock *sk,
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+ /* coupled with smp_rmb() in tipc_wait_for_cond() */
+ smp_wmb();
tsk->cong_link_cnt--;
wakeup = true;
break;
@@ -2416,7 +2421,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
* case is EINPROGRESS, rather than EALREADY.
*/
res = -EINPROGRESS;
- /* fall thru' */
+ /* fall through */
case TIPC_CONNECTING:
if (!timeout) {
if (previous == TIPC_CONNECTING)
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index efb16f69bd2c..4a708a4e8583 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -60,7 +60,6 @@
* @awork: accept work item
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
- * @max_rcvbuf_size: maximum permitted receive message length
* @listener: topsrv listener socket
* @name: server name
*/
@@ -72,7 +71,6 @@ struct tipc_topsrv {
struct work_struct awork;
struct workqueue_struct *rcv_wq;
struct workqueue_struct *send_wq;
- int max_rcvbuf_size;
struct socket *listener;
char name[TIPC_SERVER_NAME_LEN];
};
@@ -398,7 +396,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
- if (ret > 0) {
+ if (ret == sizeof(s)) {
read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
@@ -648,7 +646,6 @@ int tipc_topsrv_start(struct net *net)
return -ENOMEM;
srv->net = net;
- srv->max_rcvbuf_size = sizeof(struct tipc_subscr);
INIT_WORK(&srv->awork, tipc_topsrv_accept);
strscpy(srv->name, name, sizeof(srv->name));
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
index 964823841efe..265f6a26aa3d 100644
--- a/net/tipc/trace.c
+++ b/net/tipc/trace.c
@@ -111,7 +111,7 @@ int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
break;
default:
break;
- };
+ }
i += scnprintf(buf + i, sz - i, " | %u",
msg_src_droppable(hdr));
i += scnprintf(buf + i, sz - i, " %u",
@@ -122,7 +122,7 @@ int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
default:
/* need more? */
break;
- };
+ }
i += scnprintf(buf + i, sz - i, "\n");
if (!more)
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index d753e362d2d9..4a1da837a733 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -247,6 +247,7 @@ static int tls_push_record(struct sock *sk,
int flags,
unsigned char record_type)
{
+ struct tls_prot_info *prot = &ctx->prot_info;
struct tcp_sock *tp = tcp_sk(sk);
struct page_frag dummy_tag_frag;
skb_frag_t *frag;
@@ -256,21 +257,21 @@ static int tls_push_record(struct sock *sk,
frag = &record->frags[0];
tls_fill_prepend(ctx,
skb_frag_address(frag),
- record->len - ctx->tx.prepend_size,
- record_type);
+ record->len - prot->prepend_size,
+ record_type,
+ ctx->crypto_send.info.version);
/* HW doesn't care about the data in the tag, because it fills it. */
dummy_tag_frag.page = skb_frag_page(frag);
dummy_tag_frag.offset = 0;
- tls_append_frag(record, &dummy_tag_frag, ctx->tx.tag_size);
+ tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
record->end_seq = tp->write_seq + record->len;
spin_lock_irq(&offload_ctx->lock);
list_add_tail(&record->list, &offload_ctx->records_list);
spin_unlock_irq(&offload_ctx->lock);
offload_ctx->open_record = NULL;
- set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
- tls_advance_record_sn(sk, &ctx->tx);
+ tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
for (i = 0; i < record->num_frags; i++) {
frag = &record->frags[i];
@@ -346,6 +347,7 @@ static int tls_push_data(struct sock *sk,
unsigned char record_type)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
@@ -365,9 +367,11 @@ static int tls_push_data(struct sock *sk,
return -sk->sk_err;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
- if (rc < 0)
- return rc;
+ if (tls_is_partially_sent_record(tls_ctx)) {
+ rc = tls_push_partial_record(sk, tls_ctx, flags);
+ if (rc < 0)
+ return rc;
+ }
pfrag = sk_page_frag(sk);
@@ -375,10 +379,10 @@ static int tls_push_data(struct sock *sk,
* we need to leave room for an authentication tag.
*/
max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
- tls_ctx->tx.prepend_size;
+ prot->prepend_size;
do {
rc = tls_do_allocation(sk, ctx, pfrag,
- tls_ctx->tx.prepend_size);
+ prot->prepend_size);
if (rc) {
rc = sk_stream_wait_memory(sk, &timeo);
if (!rc)
@@ -396,7 +400,7 @@ handle_error:
size = orig_size;
destroy_record(record);
ctx->open_record = NULL;
- } else if (record->len > tls_ctx->tx.prepend_size) {
+ } else if (record->len > prot->prepend_size) {
goto last_record;
}
@@ -542,6 +546,23 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
}
+void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
+{
+ int rc = 0;
+
+ if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
+ gfp_t sk_allocation = sk->sk_allocation;
+
+ sk->sk_allocation = GFP_ATOMIC;
+ rc = tls_push_partial_record(sk, ctx,
+ MSG_DONTWAIT | MSG_NOSIGNAL);
+ sk->sk_allocation = sk_allocation;
+ }
+
+ if (!rc)
+ ctx->sk_write_space(sk);
+}
+
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -657,6 +678,8 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
u16 nonce_size, tag_size, iv_size, rec_seq_size;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_record_info *start_marker_record;
struct tls_offload_context_tx *offload_ctx;
struct tls_crypto_info *crypto_info;
@@ -702,10 +725,10 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto free_offload_ctx;
}
- ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size;
- ctx->tx.tag_size = tag_size;
- ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size;
- ctx->tx.iv_size = iv_size;
+ prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
+ prot->tag_size = tag_size;
+ prot->overhead_size = prot->prepend_size + prot->tag_size;
+ prot->iv_size = iv_size;
ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
GFP_KERNEL);
if (!ctx->tx.iv) {
@@ -715,7 +738,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
- ctx->tx.rec_seq_size = rec_seq_size;
+ prot->rec_seq_size = rec_seq_size;
ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
if (!ctx->tx.rec_seq) {
rc = -ENOMEM;
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 450a6dbc5a88..54c3a758f2a7 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -73,7 +73,8 @@ static int tls_enc_record(struct aead_request *aead_req,
len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
- (char *)&rcd_sn, sizeof(rcd_sn), buf[0]);
+ (char *)&rcd_sn, sizeof(rcd_sn), buf[0],
+ TLS_1_2_VERSION);
memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
TLS_CIPHER_AES_GCM_128_IV_SIZE);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 78cb4a584080..17e8667917aa 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -61,6 +61,8 @@ static LIST_HEAD(device_list);
static DEFINE_SPINLOCK(device_spinlock);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_sw_proto_ops;
+static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ struct proto *base);
static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{
@@ -207,23 +209,9 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
return tls_push_sg(sk, ctx, sg, offset, flags);
}
-int tls_push_pending_closed_record(struct sock *sk,
- struct tls_context *tls_ctx,
- int flags, long *timeo)
-{
- struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
-
- if (tls_is_partially_sent_record(tls_ctx) ||
- !list_empty(&ctx->tx_list))
- return tls_tx_records(sk, flags);
- else
- return tls_ctx->push_pending_record(sk, flags);
-}
-
static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
- struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
/* If in_tcp_sendpages call lower protocol write space handler
* to ensure we wake up any waiting operations there. For example
@@ -234,14 +222,12 @@ static void tls_write_space(struct sock *sk)
return;
}
- /* Schedule the transmission if tx list is ready */
- if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
- /* Schedule the transmission */
- if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
- schedule_delayed_work(&tx_ctx->tx_work.work, 0);
- }
-
- ctx->sk_write_space(sk);
+#ifdef CONFIG_TLS_DEVICE
+ if (ctx->tx_conf == TLS_HW)
+ tls_device_write_space(sk, ctx);
+ else
+#endif
+ tls_sw_write_space(sk, ctx);
}
static void tls_ctx_free(struct tls_context *ctx)
@@ -264,8 +250,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
lock_sock(sk);
sk_proto_close = ctx->sk_proto_close;
- if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) ||
- (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) {
+ if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
+ goto skip_tx_cleanup;
+
+ if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
free_ctx = true;
goto skip_tx_cleanup;
}
@@ -368,6 +356,30 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
rc = -EFAULT;
break;
}
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *
+ crypto_info_aes_gcm_256 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_gcm_256,
+ info);
+
+ if (len != sizeof(*crypto_info_aes_gcm_256)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(crypto_info_aes_gcm_256->iv,
+ ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
+ TLS_CIPHER_AES_GCM_256_IV_SIZE);
+ memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
+ TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval,
+ crypto_info_aes_gcm_256,
+ sizeof(*crypto_info_aes_gcm_256)))
+ rc = -EFAULT;
+ break;
+ }
default:
rc = -EINVAL;
}
@@ -407,7 +419,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
unsigned int optlen, int tx)
{
struct tls_crypto_info *crypto_info;
+ struct tls_crypto_info *alt_crypto_info;
struct tls_context *ctx = tls_get_ctx(sk);
+ size_t optsize;
int rc = 0;
int conf;
@@ -416,10 +430,13 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
goto out;
}
- if (tx)
+ if (tx) {
crypto_info = &ctx->crypto_send.info;
- else
+ alt_crypto_info = &ctx->crypto_recv.info;
+ } else {
crypto_info = &ctx->crypto_recv.info;
+ alt_crypto_info = &ctx->crypto_send.info;
+ }
/* Currently we don't support set crypto info more than one time */
if (TLS_CRYPTO_INFO_READY(crypto_info)) {
@@ -434,14 +451,28 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
}
/* check version */
- if (crypto_info->version != TLS_1_2_VERSION) {
+ if (crypto_info->version != TLS_1_2_VERSION &&
+ crypto_info->version != TLS_1_3_VERSION) {
rc = -ENOTSUPP;
goto err_crypto_info;
}
+ /* Ensure that TLS version and ciphers are same in both directions */
+ if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
+ if (alt_crypto_info->version != crypto_info->version ||
+ alt_crypto_info->cipher_type != crypto_info->cipher_type) {
+ rc = -EINVAL;
+ goto err_crypto_info;
+ }
+ }
+
switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
+ case TLS_CIPHER_AES_GCM_128:
+ case TLS_CIPHER_AES_GCM_256: {
+ optsize = crypto_info->cipher_type == TLS_CIPHER_AES_GCM_128 ?
+ sizeof(struct tls12_crypto_info_aes_gcm_128) :
+ sizeof(struct tls12_crypto_info_aes_gcm_256);
+ if (optlen != optsize) {
rc = -EINVAL;
goto err_crypto_info;
}
@@ -551,6 +582,43 @@ static struct tls_context *create_ctx(struct sock *sk)
return ctx;
}
+static void tls_build_proto(struct sock *sk)
+{
+ int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+
+ /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
+ if (ip_ver == TLSV6 &&
+ unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+ mutex_lock(&tcpv6_prot_mutex);
+ if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+ build_protos(tls_prots[TLSV6], sk->sk_prot);
+ smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+ }
+ mutex_unlock(&tcpv6_prot_mutex);
+ }
+
+ if (ip_ver == TLSV4 &&
+ unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
+ mutex_lock(&tcpv4_prot_mutex);
+ if (likely(sk->sk_prot != saved_tcpv4_prot)) {
+ build_protos(tls_prots[TLSV4], sk->sk_prot);
+ smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
+ }
+ mutex_unlock(&tcpv4_prot_mutex);
+ }
+}
+
+static void tls_hw_sk_destruct(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ ctx->sk_destruct(sk);
+ /* Free ctx */
+ kfree(ctx);
+ icsk->icsk_ulp_data = NULL;
+}
+
static int tls_hw_prot(struct sock *sk)
{
struct tls_context *ctx;
@@ -564,12 +632,17 @@ static int tls_hw_prot(struct sock *sk)
if (!ctx)
goto out;
+ spin_unlock_bh(&device_spinlock);
+ tls_build_proto(sk);
ctx->hash = sk->sk_prot->hash;
ctx->unhash = sk->sk_prot->unhash;
ctx->sk_proto_close = sk->sk_prot->close;
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_hw_sk_destruct;
ctx->rx_conf = TLS_HW_RECORD;
ctx->tx_conf = TLS_HW_RECORD;
update_sk_prot(sk, ctx);
+ spin_lock_bh(&device_spinlock);
rc = 1;
break;
}
@@ -668,7 +741,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
static int tls_init(struct sock *sk)
{
- int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
struct tls_context *ctx;
int rc = 0;
@@ -691,27 +763,7 @@ static int tls_init(struct sock *sk)
goto out;
}
- /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
- if (ip_ver == TLSV6 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
- mutex_lock(&tcpv6_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv6_prot)) {
- build_protos(tls_prots[TLSV6], sk->sk_prot);
- smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
- }
- mutex_unlock(&tcpv6_prot_mutex);
- }
-
- if (ip_ver == TLSV4 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
- mutex_lock(&tcpv4_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv4_prot)) {
- build_protos(tls_prots[TLSV4], sk->sk_prot);
- smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
- }
- mutex_unlock(&tcpv4_prot_mutex);
- }
-
+ tls_build_proto(sk);
ctx->tx_conf = TLS_BASE;
ctx->rx_conf = TLS_BASE;
update_sk_prot(sk, ctx);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 11cdc8f7db63..425351ac2a9b 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -120,12 +120,42 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
return __skb_nsg(skb, offset, len, 0);
}
+static int padding_length(struct tls_sw_context_rx *ctx,
+ struct tls_context *tls_ctx, struct sk_buff *skb)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ int sub = 0;
+
+ /* Determine zero-padding length */
+ if (tls_ctx->prot_info.version == TLS_1_3_VERSION) {
+ char content_type = 0;
+ int err;
+ int back = 17;
+
+ while (content_type == 0) {
+ if (back > rxm->full_len)
+ return -EBADMSG;
+ err = skb_copy_bits(skb,
+ rxm->offset + rxm->full_len - back,
+ &content_type, 1);
+ if (content_type)
+ break;
+ sub++;
+ back++;
+ }
+ ctx->control = content_type;
+ }
+ return sub;
+}
+
static void tls_decrypt_done(struct crypto_async_request *req, int err)
{
struct aead_request *aead_req = (struct aead_request *)req;
struct scatterlist *sgout = aead_req->dst;
+ struct scatterlist *sgin = aead_req->src;
struct tls_sw_context_rx *ctx;
struct tls_context *tls_ctx;
+ struct tls_prot_info *prot;
struct scatterlist *sg;
struct sk_buff *skb;
unsigned int pages;
@@ -134,12 +164,17 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
skb = (struct sk_buff *)req->data;
tls_ctx = tls_get_ctx(skb->sk);
ctx = tls_sw_ctx_rx(tls_ctx);
- pending = atomic_dec_return(&ctx->decrypt_pending);
+ prot = &tls_ctx->prot_info;
/* Propagate if there was an err */
if (err) {
ctx->async_wait.err = err;
tls_err_abort(skb->sk, err);
+ } else {
+ struct strp_msg *rxm = strp_msg(skb);
+ rxm->full_len -= padding_length(ctx, tls_ctx, skb);
+ rxm->offset += prot->prepend_size;
+ rxm->full_len -= prot->overhead_size;
}
/* After using skb->sk to propagate sk through crypto async callback
@@ -147,18 +182,21 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
*/
skb->sk = NULL;
- /* Release the skb, pages and memory allocated for crypto req */
- kfree_skb(skb);
- /* Skip the first S/G entry as it points to AAD */
- for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
- if (!sg)
- break;
- put_page(sg_page(sg));
+ /* Free the destination pages if skb was not decrypted inplace */
+ if (sgout != sgin) {
+ /* Skip the first S/G entry as it points to AAD */
+ for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ if (!sg)
+ break;
+ put_page(sg_page(sg));
+ }
}
kfree(aead_req);
+ pending = atomic_dec_return(&ctx->decrypt_pending);
+
if (!pending && READ_ONCE(ctx->async_notify))
complete(&ctx->async_wait.completion);
}
@@ -173,13 +211,14 @@ static int tls_do_decryption(struct sock *sk,
bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
int ret;
aead_request_set_tfm(aead_req, ctx->aead_recv);
- aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
+ aead_request_set_ad(aead_req, prot->aad_size);
aead_request_set_crypt(aead_req, sgin, sgout,
- data_len + tls_ctx->rx.tag_size,
+ data_len + prot->tag_size,
(u8 *)iv_recv);
if (async) {
@@ -217,12 +256,13 @@ static int tls_do_decryption(struct sock *sk,
static void tls_trim_both_msgs(struct sock *sk, int target_size)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
sk_msg_trim(sk, &rec->msg_plaintext, target_size);
if (target_size > 0)
- target_size += tls_ctx->tx.overhead_size;
+ target_size += prot->overhead_size;
sk_msg_trim(sk, &rec->msg_encrypted, target_size);
}
@@ -239,6 +279,7 @@ static int tls_alloc_encrypted_msg(struct sock *sk, int len)
static int tls_clone_plaintext_msg(struct sock *sk, int required)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_pl = &rec->msg_plaintext;
@@ -254,7 +295,7 @@ static int tls_clone_plaintext_msg(struct sock *sk, int required)
/* Skip initial bytes in msg_en's data to be able to use
* same offset of both plain and encrypted data.
*/
- skip = tls_ctx->tx.prepend_size + msg_pl->sg.size;
+ skip = prot->prepend_size + msg_pl->sg.size;
return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
}
@@ -262,6 +303,7 @@ static int tls_clone_plaintext_msg(struct sock *sk, int required)
static struct tls_rec *tls_get_rec(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct sk_msg *msg_pl, *msg_en;
struct tls_rec *rec;
@@ -280,13 +322,11 @@ static struct tls_rec *tls_get_rec(struct sock *sk)
sk_msg_init(msg_en);
sg_init_table(rec->sg_aead_in, 2);
- sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
- sizeof(rec->aad_space));
+ sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
sg_unmark_end(&rec->sg_aead_in[1]);
sg_init_table(rec->sg_aead_out, 2);
- sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
- sizeof(rec->aad_space));
+ sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
sg_unmark_end(&rec->sg_aead_out[1]);
return rec;
@@ -375,6 +415,7 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
struct aead_request *aead_req = (struct aead_request *)req;
struct sock *sk = req->data;
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct scatterlist *sge;
struct sk_msg *msg_en;
@@ -386,8 +427,8 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
msg_en = &rec->msg_encrypted;
sge = sk_msg_elem(msg_en, msg_en->sg.curr);
- sge->offset -= tls_ctx->tx.prepend_size;
- sge->length += tls_ctx->tx.prepend_size;
+ sge->offset -= prot->prepend_size;
+ sge->length += prot->prepend_size;
/* Check if error is previously set on socket */
if (err || sk->sk_err) {
@@ -434,21 +475,26 @@ static int tls_do_encryption(struct sock *sk,
struct aead_request *aead_req,
size_t data_len, u32 start)
{
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_en = &rec->msg_encrypted;
struct scatterlist *sge = sk_msg_elem(msg_en, start);
int rc;
- sge->offset += tls_ctx->tx.prepend_size;
- sge->length -= tls_ctx->tx.prepend_size;
+ memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
+ xor_iv_with_seq(prot->version, rec->iv_data,
+ tls_ctx->tx.rec_seq);
+
+ sge->offset += prot->prepend_size;
+ sge->length -= prot->prepend_size;
msg_en->sg.curr = start;
aead_request_set_tfm(aead_req, ctx->aead_send);
- aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
+ aead_request_set_ad(aead_req, prot->aad_size);
aead_request_set_crypt(aead_req, rec->sg_aead_in,
rec->sg_aead_out,
- data_len, tls_ctx->tx.iv);
+ data_len, rec->iv_data);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_encrypt_done, sk);
@@ -460,8 +506,8 @@ static int tls_do_encryption(struct sock *sk,
rc = crypto_aead_encrypt(aead_req);
if (!rc || rc != -EINPROGRESS) {
atomic_dec(&ctx->encrypt_pending);
- sge->offset -= tls_ctx->tx.prepend_size;
- sge->length += tls_ctx->tx.prepend_size;
+ sge->offset -= prot->prepend_size;
+ sge->length += prot->prepend_size;
}
if (!rc) {
@@ -473,7 +519,7 @@ static int tls_do_encryption(struct sock *sk,
/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
- tls_advance_record_sn(sk, &tls_ctx->tx);
+ tls_advance_record_sn(sk, &tls_ctx->tx, prot->version);
return rc;
}
@@ -599,6 +645,7 @@ static int tls_push_record(struct sock *sk, int flags,
unsigned char record_type)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
u32 i, split_point, uninitialized_var(orig_end);
@@ -617,12 +664,12 @@ static int tls_push_record(struct sock *sk, int flags,
split = split_point && split_point < msg_pl->sg.size;
if (split) {
rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
- split_point, tls_ctx->tx.overhead_size,
+ split_point, prot->overhead_size,
&orig_end);
if (rc < 0)
return rc;
sk_msg_trim(sk, msg_en, msg_pl->sg.size +
- tls_ctx->tx.overhead_size);
+ prot->overhead_size);
}
rec->tx_flags = flags;
@@ -630,7 +677,17 @@ static int tls_push_record(struct sock *sk, int flags,
i = msg_pl->sg.end;
sk_msg_iter_var_prev(i);
- sg_mark_end(sk_msg_elem(msg_pl, i));
+
+ rec->content_type = record_type;
+ if (prot->version == TLS_1_3_VERSION) {
+ /* Add content type to end of message. No padding added */
+ sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
+ sg_mark_end(&rec->sg_content_type);
+ sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
+ &rec->sg_content_type);
+ } else {
+ sg_mark_end(sk_msg_elem(msg_pl, i));
+ }
i = msg_pl->sg.start;
sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
@@ -643,18 +700,20 @@ static int tls_push_record(struct sock *sk, int flags,
i = msg_en->sg.start;
sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
- tls_make_aad(rec->aad_space, msg_pl->sg.size,
- tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
- record_type);
+ tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
+ tls_ctx->tx.rec_seq, prot->rec_seq_size,
+ record_type, prot->version);
tls_fill_prepend(tls_ctx,
page_address(sg_page(&msg_en->sg.data[i])) +
- msg_en->sg.data[i].offset, msg_pl->sg.size,
- record_type);
+ msg_en->sg.data[i].offset,
+ msg_pl->sg.size + prot->tail_size,
+ record_type, prot->version);
tls_ctx->pending_open_record_frags = false;
- rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i);
+ rc = tls_do_encryption(sk, tls_ctx, ctx, req,
+ msg_pl->sg.size + prot->tail_size, i);
if (rc < 0) {
if (rc != -EINPROGRESS) {
tls_err_abort(sk, EBADMSG);
@@ -663,12 +722,12 @@ static int tls_push_record(struct sock *sk, int flags,
tls_merge_open_record(sk, rec, tmp, orig_end);
}
}
+ ctx->async_capable = 1;
return rc;
} else if (split) {
msg_pl = &tmp->msg_plaintext;
msg_en = &tmp->msg_encrypted;
- sk_msg_trim(sk, msg_en, msg_pl->sg.size +
- tls_ctx->tx.overhead_size);
+ sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
tls_ctx->pending_open_record_frags = true;
ctx->open_rec = tmp;
}
@@ -803,9 +862,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
- bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+ bool async_capable = ctx->async_capable;
unsigned char record_type = TLS_RECORD_TYPE_DATA;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool eor = !(msg->msg_flags & MSG_MORE);
@@ -870,7 +929,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
}
required_size = msg_pl->sg.size + try_to_copy +
- tls_ctx->tx.overhead_size;
+ prot->overhead_size;
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
@@ -939,8 +998,8 @@ fallback_to_reg_send:
*/
try_to_copy -= required_size - msg_pl->sg.size;
full_record = true;
- sk_msg_trim(sk, msg_en, msg_pl->sg.size +
- tls_ctx->tx.overhead_size);
+ sk_msg_trim(sk, msg_en,
+ msg_pl->sg.size + prot->overhead_size);
}
if (try_to_copy) {
@@ -1020,12 +1079,13 @@ send_end:
return copied ? copied : ret;
}
-int tls_sw_do_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
+static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
{
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
unsigned char record_type = TLS_RECORD_TYPE_DATA;
struct sk_msg *msg_pl;
struct tls_rec *rec;
@@ -1075,8 +1135,7 @@ int tls_sw_do_sendpage(struct sock *sk, struct page *page,
full_record = true;
}
- required_size = msg_pl->sg.size + copy +
- tls_ctx->tx.overhead_size;
+ required_size = msg_pl->sg.size + copy + prot->overhead_size;
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
@@ -1143,16 +1202,6 @@ sendpage_end:
return copied ? copied : ret;
}
-int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
- MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
- return -ENOTSUPP;
-
- return tls_sw_do_sendpage(sk, page, offset, size, flags);
-}
-
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
@@ -1281,10 +1330,11 @@ out:
static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
struct iov_iter *out_iov,
struct scatterlist *out_sg,
- int *chunk, bool *zc)
+ int *chunk, bool *zc, bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm = strp_msg(skb);
int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
struct aead_request *aead_req;
@@ -1292,15 +1342,16 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
u8 *aad, *iv, *mem = NULL;
struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL;
- const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
+ const int data_len = rxm->full_len - prot->overhead_size +
+ prot->tail_size;
if (*zc && (out_iov || out_sg)) {
if (out_iov)
n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
else
n_sgout = sg_nents(out_sg);
- n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
- rxm->full_len - tls_ctx->rx.prepend_size);
+ n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
+ rxm->full_len - prot->prepend_size);
} else {
n_sgout = 0;
*zc = false;
@@ -1317,7 +1368,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
mem_size = aead_size + (nsg * sizeof(struct scatterlist));
- mem_size = mem_size + TLS_AAD_SPACE_SIZE;
+ mem_size = mem_size + prot->aad_size;
mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
/* Allocate a single block of memory which contains
@@ -1333,29 +1384,35 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
sgin = (struct scatterlist *)(mem + aead_size);
sgout = sgin + n_sgin;
aad = (u8 *)(sgout + n_sgout);
- iv = aad + TLS_AAD_SPACE_SIZE;
+ iv = aad + prot->aad_size;
/* Prepare IV */
err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
- tls_ctx->rx.iv_size);
+ prot->iv_size);
if (err < 0) {
kfree(mem);
return err;
}
- memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ if (prot->version == TLS_1_3_VERSION)
+ memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv));
+ else
+ memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+
+ xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
/* Prepare AAD */
- tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
- tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
- ctx->control);
+ tls_make_aad(aad, rxm->full_len - prot->overhead_size +
+ prot->tail_size,
+ tls_ctx->rx.rec_seq, prot->rec_seq_size,
+ ctx->control, prot->version);
/* Prepare sgin */
sg_init_table(sgin, n_sgin);
- sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
+ sg_set_buf(&sgin[0], aad, prot->aad_size);
err = skb_to_sgvec(skb, &sgin[1],
- rxm->offset + tls_ctx->rx.prepend_size,
- rxm->full_len - tls_ctx->rx.prepend_size);
+ rxm->offset + prot->prepend_size,
+ rxm->full_len - prot->prepend_size);
if (err < 0) {
kfree(mem);
return err;
@@ -1364,7 +1421,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
if (n_sgout) {
if (out_iov) {
sg_init_table(sgout, n_sgout);
- sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
+ sg_set_buf(&sgout[0], aad, prot->aad_size);
*chunk = 0;
err = tls_setup_from_iter(sk, out_iov, data_len,
@@ -1381,13 +1438,13 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
fallback_to_reg_recv:
sgout = sgin;
pages = 0;
- *chunk = 0;
+ *chunk = data_len;
*zc = false;
}
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, skb, sgin, sgout, iv,
- data_len, aead_req, *zc);
+ data_len, aead_req, async);
if (err == -EINPROGRESS)
return err;
@@ -1400,36 +1457,45 @@ fallback_to_reg_recv:
}
static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
- struct iov_iter *dest, int *chunk, bool *zc)
+ struct iov_iter *dest, int *chunk, bool *zc,
+ bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ int version = prot->version;
struct strp_msg *rxm = strp_msg(skb);
int err = 0;
-#ifdef CONFIG_TLS_DEVICE
- err = tls_device_decrypted(sk, skb);
- if (err < 0)
- return err;
-#endif
if (!ctx->decrypted) {
- err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
- if (err < 0) {
- if (err == -EINPROGRESS)
- tls_advance_record_sn(sk, &tls_ctx->rx);
-
+#ifdef CONFIG_TLS_DEVICE
+ err = tls_device_decrypted(sk, skb);
+ if (err < 0)
return err;
+#endif
+ /* Still not decrypted after tls_device */
+ if (!ctx->decrypted) {
+ err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
+ async);
+ if (err < 0) {
+ if (err == -EINPROGRESS)
+ tls_advance_record_sn(sk, &tls_ctx->rx,
+ version);
+
+ return err;
+ }
}
+
+ rxm->full_len -= padding_length(ctx, tls_ctx, skb);
+ rxm->offset += prot->prepend_size;
+ rxm->full_len -= prot->overhead_size;
+ tls_advance_record_sn(sk, &tls_ctx->rx, version);
+ ctx->decrypted = true;
+ ctx->saved_data_ready(sk);
} else {
*zc = false;
}
- rxm->offset += tls_ctx->rx.prepend_size;
- rxm->full_len -= tls_ctx->rx.overhead_size;
- tls_advance_record_sn(sk, &tls_ctx->rx);
- ctx->decrypted = true;
- ctx->saved_data_ready(sk);
-
return err;
}
@@ -1439,7 +1505,7 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
bool zc = true;
int chunk;
- return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
+ return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
}
static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
@@ -1466,6 +1532,115 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
return true;
}
+/* This function traverses the rx_list in tls receive context to copies the
+ * decrypted records into the buffer provided by caller zero copy is not
+ * true. Further, the records are removed from the rx_list if it is not a peek
+ * case and the record has been consumed completely.
+ */
+static int process_rx_list(struct tls_sw_context_rx *ctx,
+ struct msghdr *msg,
+ u8 *control,
+ bool *cmsg,
+ size_t skip,
+ size_t len,
+ bool zc,
+ bool is_peek)
+{
+ struct sk_buff *skb = skb_peek(&ctx->rx_list);
+ u8 ctrl = *control;
+ u8 msgc = *cmsg;
+ struct tls_msg *tlm;
+ ssize_t copied = 0;
+
+ /* Set the record type in 'control' if caller didn't pass it */
+ if (!ctrl && skb) {
+ tlm = tls_msg(skb);
+ ctrl = tlm->control;
+ }
+
+ while (skip && skb) {
+ struct strp_msg *rxm = strp_msg(skb);
+ tlm = tls_msg(skb);
+
+ /* Cannot process a record of different type */
+ if (ctrl != tlm->control)
+ return 0;
+
+ if (skip < rxm->full_len)
+ break;
+
+ skip = skip - rxm->full_len;
+ skb = skb_peek_next(skb, &ctx->rx_list);
+ }
+
+ while (len && skb) {
+ struct sk_buff *next_skb;
+ struct strp_msg *rxm = strp_msg(skb);
+ int chunk = min_t(unsigned int, rxm->full_len - skip, len);
+
+ tlm = tls_msg(skb);
+
+ /* Cannot process a record of different type */
+ if (ctrl != tlm->control)
+ return 0;
+
+ /* Set record type if not already done. For a non-data record,
+ * do not proceed if record type could not be copied.
+ */
+ if (!msgc) {
+ int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+ sizeof(ctrl), &ctrl);
+ msgc = true;
+ if (ctrl != TLS_RECORD_TYPE_DATA) {
+ if (cerr || msg->msg_flags & MSG_CTRUNC)
+ return -EIO;
+
+ *cmsg = msgc;
+ }
+ }
+
+ if (!zc || (rxm->full_len - skip) > len) {
+ int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ msg, chunk);
+ if (err < 0)
+ return err;
+ }
+
+ len = len - chunk;
+ copied = copied + chunk;
+
+ /* Consume the data from record if it is non-peek case*/
+ if (!is_peek) {
+ rxm->offset = rxm->offset + chunk;
+ rxm->full_len = rxm->full_len - chunk;
+
+ /* Return if there is unconsumed data in the record */
+ if (rxm->full_len - skip)
+ break;
+ }
+
+ /* The remaining skip-bytes must lie in 1st record in rx_list.
+ * So from the 2nd record, 'skip' should be 0.
+ */
+ skip = 0;
+
+ if (msg)
+ msg->msg_flags |= MSG_EOR;
+
+ next_skb = skb_peek_next(skb, &ctx->rx_list);
+
+ if (!is_peek) {
+ skb_unlink(skb, &ctx->rx_list);
+ kfree_skb(skb);
+ }
+
+ skb = next_skb;
+ }
+
+ *control = ctrl;
+ return copied;
+}
+
int tls_sw_recvmsg(struct sock *sk,
struct msghdr *msg,
size_t len,
@@ -1475,15 +1650,19 @@ int tls_sw_recvmsg(struct sock *sk,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct sk_psock *psock;
- unsigned char control;
+ unsigned char control = 0;
+ ssize_t decrypted = 0;
struct strp_msg *rxm;
+ struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
bool cmsg = false;
int target, err = 0;
long timeo;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+ bool is_peek = flags & MSG_PEEK;
int num_async = 0;
flags |= nonblock;
@@ -1494,12 +1673,31 @@ int tls_sw_recvmsg(struct sock *sk,
psock = sk_psock_get(sk);
lock_sock(sk);
- target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ /* Process pending decrypted records. It must be non-zero-copy */
+ err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
+ is_peek);
+ if (err < 0) {
+ tls_err_abort(sk, err);
+ goto end;
+ } else {
+ copied = err;
+ }
+
+ len = len - copied;
+ if (len) {
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ } else {
+ goto recv_end;
+ }
+
do {
+ bool retain_skb = false;
bool zc = false;
- bool async = false;
+ int to_decrypt;
int chunk = 0;
+ bool async_capable;
+ bool async = false;
skb = tls_wait_data(sk, psock, flags, timeo, &err);
if (!skb) {
@@ -1508,97 +1706,125 @@ int tls_sw_recvmsg(struct sock *sk,
msg, len, flags);
if (ret > 0) {
- copied += ret;
+ decrypted += ret;
len -= ret;
continue;
}
}
goto recv_end;
+ } else {
+ tlm = tls_msg(skb);
+ if (prot->version == TLS_1_3_VERSION)
+ tlm->control = 0;
+ else
+ tlm->control = ctx->control;
}
rxm = strp_msg(skb);
+ to_decrypt = rxm->full_len - prot->overhead_size;
+
+ if (to_decrypt <= len && !is_kvec && !is_peek &&
+ ctx->control == TLS_RECORD_TYPE_DATA &&
+ prot->version != TLS_1_3_VERSION)
+ zc = true;
+
+ /* Do not use async mode if record is non-data */
+ if (ctx->control == TLS_RECORD_TYPE_DATA)
+ async_capable = ctx->async_capable;
+ else
+ async_capable = false;
+
+ err = decrypt_skb_update(sk, skb, &msg->msg_iter,
+ &chunk, &zc, async_capable);
+ if (err < 0 && err != -EINPROGRESS) {
+ tls_err_abort(sk, EBADMSG);
+ goto recv_end;
+ }
+
+ if (err == -EINPROGRESS) {
+ async = true;
+ num_async++;
+ } else if (prot->version == TLS_1_3_VERSION) {
+ tlm->control = ctx->control;
+ }
+
+ /* If the type of records being processed is not known yet,
+ * set it to record type just dequeued. If it is already known,
+ * but does not match the record type just dequeued, go to end.
+ * We always get record type here since for tls1.2, record type
+ * is known just after record is dequeued from stream parser.
+ * For tls1.3, we disable async.
+ */
+
+ if (!control)
+ control = tlm->control;
+ else if (control != tlm->control)
+ goto recv_end;
+
if (!cmsg) {
int cerr;
cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
- sizeof(ctx->control), &ctx->control);
+ sizeof(control), &control);
cmsg = true;
- control = ctx->control;
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ if (control != TLS_RECORD_TYPE_DATA) {
if (cerr || msg->msg_flags & MSG_CTRUNC) {
err = -EIO;
goto recv_end;
}
}
- } else if (control != ctx->control) {
- goto recv_end;
}
- if (!ctx->decrypted) {
- int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
+ if (async)
+ goto pick_next_record;
- if (!is_kvec && to_copy <= len &&
- likely(!(flags & MSG_PEEK)))
- zc = true;
+ if (!zc) {
+ if (rxm->full_len > len) {
+ retain_skb = true;
+ chunk = len;
+ } else {
+ chunk = rxm->full_len;
+ }
- err = decrypt_skb_update(sk, skb, &msg->msg_iter,
- &chunk, &zc);
- if (err < 0 && err != -EINPROGRESS) {
- tls_err_abort(sk, EBADMSG);
+ err = skb_copy_datagram_msg(skb, rxm->offset,
+ msg, chunk);
+ if (err < 0)
goto recv_end;
- }
- if (err == -EINPROGRESS) {
- async = true;
- num_async++;
- goto pick_next_record;
+ if (!is_peek) {
+ rxm->offset = rxm->offset + chunk;
+ rxm->full_len = rxm->full_len - chunk;
}
-
- ctx->decrypted = true;
}
- if (!zc) {
- chunk = min_t(unsigned int, rxm->full_len, len);
+pick_next_record:
+ if (chunk > len)
+ chunk = len;
- err = skb_copy_datagram_msg(skb, rxm->offset, msg,
- chunk);
- if (err < 0)
- goto recv_end;
+ decrypted += chunk;
+ len -= chunk;
+
+ /* For async or peek case, queue the current skb */
+ if (async || is_peek || retain_skb) {
+ skb_queue_tail(&ctx->rx_list, skb);
+ skb = NULL;
}
-pick_next_record:
- copied += chunk;
- len -= chunk;
- if (likely(!(flags & MSG_PEEK))) {
- u8 control = ctx->control;
-
- /* For async, drop current skb reference */
- if (async)
- skb = NULL;
-
- if (tls_sw_advance_skb(sk, skb, chunk)) {
- /* Return full control message to
- * userspace before trying to parse
- * another message type
- */
- msg->msg_flags |= MSG_EOR;
- if (control != TLS_RECORD_TYPE_DATA)
- goto recv_end;
- } else {
- break;
- }
- } else {
- /* MSG_PEEK right now cannot look beyond current skb
- * from strparser, meaning we cannot advance skb here
- * and thus unpause strparser since we'd loose original
- * one.
+ if (tls_sw_advance_skb(sk, skb, chunk)) {
+ /* Return full control message to
+ * userspace before trying to parse
+ * another message type
*/
+ msg->msg_flags |= MSG_EOR;
+ if (ctx->control != TLS_RECORD_TYPE_DATA)
+ goto recv_end;
+ } else {
break;
}
/* If we have a new message from strparser, continue now. */
- if (copied >= target && !ctx->recv_pkt)
+ if (decrypted >= target && !ctx->recv_pkt)
break;
} while (len);
@@ -1612,13 +1838,31 @@ recv_end:
/* one of async decrypt failed */
tls_err_abort(sk, err);
copied = 0;
+ decrypted = 0;
+ goto end;
}
} else {
reinit_completion(&ctx->async_wait.completion);
}
WRITE_ONCE(ctx->async_notify, false);
+
+ /* Drain records from the rx_list & copy if required */
+ if (is_peek || is_kvec)
+ err = process_rx_list(ctx, msg, &control, &cmsg, copied,
+ decrypted, false, is_peek);
+ else
+ err = process_rx_list(ctx, msg, &control, &cmsg, 0,
+ decrypted, true, is_peek);
+ if (err < 0) {
+ tls_err_abort(sk, err);
+ copied = 0;
+ goto end;
+ }
}
+ copied += decrypted;
+
+end:
release_sock(sk);
if (psock)
sk_psock_put(sk, psock);
@@ -1648,14 +1892,14 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
if (!skb)
goto splice_read_end;
- /* splice does not support reading control messages */
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
- err = -ENOTSUPP;
- goto splice_read_end;
- }
-
if (!ctx->decrypted) {
- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
+
+ /* splice does not support reading control messages */
+ if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ err = -ENOTSUPP;
+ goto splice_read_end;
+ }
if (err < 0) {
tls_err_abort(sk, EBADMSG);
@@ -1698,6 +1942,7 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
struct strp_msg *rxm = strp_msg(skb);
size_t cipher_overhead;
@@ -1705,17 +1950,17 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
int ret;
/* Verify that we have a full TLS header, or wait for more data */
- if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
+ if (rxm->offset + prot->prepend_size > skb->len)
return 0;
/* Sanity-check size of on-stack buffer. */
- if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
+ if (WARN_ON(prot->prepend_size > sizeof(header))) {
ret = -EINVAL;
goto read_failure;
}
/* Linearize header to local buffer */
- ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
+ ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
if (ret < 0)
goto read_failure;
@@ -1724,9 +1969,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
data_len = ((header[4] & 0xFF) | (header[3] << 8));
- cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
+ cipher_overhead = prot->tag_size;
+ if (prot->version != TLS_1_3_VERSION)
+ cipher_overhead += prot->iv_size;
- if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
+ if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
+ prot->tail_size) {
ret = -EMSGSIZE;
goto read_failure;
}
@@ -1735,12 +1983,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
- if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
- header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
+ /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
+ if (header[1] != TLS_1_2_VERSION_MINOR ||
+ header[2] != TLS_1_2_VERSION_MAJOR) {
ret = -EINVAL;
goto read_failure;
}
-
#ifdef CONFIG_TLS_DEVICE
handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
*(u64*)tls_ctx->rx.rec_seq);
@@ -1792,7 +2040,9 @@ void tls_sw_free_resources_tx(struct sock *sk)
if (atomic_read(&ctx->encrypt_pending))
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ release_sock(sk);
cancel_delayed_work_sync(&ctx->tx_work.work);
+ lock_sock(sk);
/* Tx whatever records we can transmit and abandon the rest */
tls_tx_records(sk, -1);
@@ -1842,6 +2092,7 @@ void tls_sw_release_resources_rx(struct sock *sk)
if (ctx->aead_recv) {
kfree_skb(ctx->recv_pkt);
ctx->recv_pkt = NULL;
+ skb_queue_purge(&ctx->rx_list);
crypto_free_aead(ctx->aead_recv);
strp_stop(&ctx->strp);
write_lock_bh(&sk->sk_callback_lock);
@@ -1881,17 +2132,35 @@ static void tx_work_handler(struct work_struct *work)
release_sock(sk);
}
+void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
+{
+ struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
+
+ /* Schedule the transmission if tx list is ready */
+ if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
+ /* Schedule the transmission */
+ if (!test_and_set_bit(BIT_TX_SCHEDULED,
+ &tx_ctx->tx_bitmask))
+ schedule_delayed_work(&tx_ctx->tx_work.work, 0);
+ }
+}
+
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_crypto_info *crypto_info;
struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
+ struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
struct tls_sw_context_rx *sw_ctx_rx = NULL;
struct cipher_context *cctx;
struct crypto_aead **aead;
struct strp_callbacks cb;
u16 nonce_size, tag_size, iv_size, rec_seq_size;
- char *iv, *rec_seq;
+ struct crypto_tfm *tfm;
+ char *iv, *rec_seq, *key, *salt;
+ size_t keysize;
int rc = 0;
if (!ctx) {
@@ -1937,6 +2206,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
crypto_init_wait(&sw_ctx_rx->async_wait);
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
+ skb_queue_head_init(&sw_ctx_rx->rx_list);
aead = &sw_ctx_rx->aead_recv;
}
@@ -1951,6 +2221,24 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
gcm_128_info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+ key = gcm_128_info->key;
+ salt = gcm_128_info->salt;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
+ tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
+ iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
+ iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
+ rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
+ rec_seq =
+ ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
+ gcm_256_info =
+ (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ key = gcm_256_info->key;
+ salt = gcm_256_info->salt;
break;
}
default:
@@ -1964,19 +2252,32 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_priv;
}
- cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
- cctx->tag_size = tag_size;
- cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
- cctx->iv_size = iv_size;
+ if (crypto_info->version == TLS_1_3_VERSION) {
+ nonce_size = 0;
+ prot->aad_size = TLS_HEADER_SIZE;
+ prot->tail_size = 1;
+ } else {
+ prot->aad_size = TLS_AAD_SPACE_SIZE;
+ prot->tail_size = 0;
+ }
+
+ prot->version = crypto_info->version;
+ prot->cipher_type = crypto_info->cipher_type;
+ prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
+ prot->tag_size = tag_size;
+ prot->overhead_size = prot->prepend_size +
+ prot->tag_size + prot->tail_size;
+ prot->iv_size = iv_size;
cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
GFP_KERNEL);
if (!cctx->iv) {
rc = -ENOMEM;
goto free_priv;
}
- memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ /* Note: 128 & 256 bit salt are the same size */
+ memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
- cctx->rec_seq_size = rec_seq_size;
+ prot->rec_seq_size = rec_seq_size;
cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
if (!cctx->rec_seq) {
rc = -ENOMEM;
@@ -1994,16 +2295,24 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
ctx->push_pending_record = tls_sw_push_pending_record;
- rc = crypto_aead_setkey(*aead, gcm_128_info->key,
- TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ rc = crypto_aead_setkey(*aead, key, keysize);
+
if (rc)
goto free_aead;
- rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
+ rc = crypto_aead_setauthsize(*aead, prot->tag_size);
if (rc)
goto free_aead;
if (sw_ctx_rx) {
+ tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
+
+ if (crypto_info->version == TLS_1_3_VERSION)
+ sw_ctx_rx->async_capable = false;
+ else
+ sw_ctx_rx->async_capable =
+ tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+
/* Set up strparser */
memset(&cb, 0, sizeof(cb));
cb.rcv_msg = tls_queue;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 74d1eed7cbd4..a95d479caeea 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -890,7 +890,7 @@ retry:
addr->hash ^= sk->sk_type;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
spin_unlock(&unix_table_lock);
err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = 0;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(list, sk);
out_unlock:
@@ -1331,15 +1331,29 @@ restart:
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
- /* copy address information from listening to new sock*/
- if (otheru->addr) {
- refcount_inc(&otheru->addr->refcnt);
- newu->addr = otheru->addr;
- }
+ /* copy address information from listening to new sock
+ *
+ * The contents of *(otheru->addr) and otheru->path
+ * are seen fully set up here, since we have found
+ * otheru in hash under unix_table_lock. Insertion
+ * into the hash chain we'd found it in had been done
+ * in an earlier critical area protected by unix_table_lock,
+ * the same one where we'd set *(otheru->addr) contents,
+ * as well as otheru->path and otheru->addr itself.
+ *
+ * Using smp_store_release() here to set newu->addr
+ * is enough to make those stores, as well as stores
+ * to newu->path visible to anyone who gets newu->addr
+ * by smp_load_acquire(). IOW, the same warranties
+ * as for unix_sock instances bound in unix_bind() or
+ * in unix_autobind().
+ */
if (otheru->path.dentry) {
path_get(&otheru->path);
newu->path = otheru->path;
}
+ refcount_inc(&otheru->addr->refcnt);
+ smp_store_release(&newu->addr, otheru->addr);
/* Set credentials */
copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@ out:
static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
{
struct sock *sk = sock->sk;
- struct unix_sock *u;
+ struct unix_address *addr;
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
sock_hold(sk);
}
- u = unix_sk(sk);
- unix_state_lock(sk);
- if (!u->addr) {
+ addr = smp_load_acquire(&unix_sk(sk)->addr);
+ if (!addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
err = sizeof(short);
} else {
- struct unix_address *addr = u->addr;
-
err = addr->len;
memcpy(sunaddr, addr->name, addr->len);
}
- unix_state_unlock(sk);
sock_put(sk);
out:
return err;
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
- struct unix_sock *u = unix_sk(sk);
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
- if (u->addr) {
- msg->msg_namelen = u->addr->len;
- memcpy(msg->msg_name, u->addr->name, u->addr->len);
+ if (addr) {
+ msg->msg_namelen = addr->len;
+ memcpy(msg->msg_name, addr->name, addr->len);
}
}
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk)
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- unix_state_lock(sk);
+ if (!smp_load_acquire(&unix_sk(sk)->addr))
+ return -ENOENT;
+
path = unix_sk(sk)->path;
- if (!path.dentry) {
- unix_state_unlock(sk);
+ if (!path.dentry)
return -ENOENT;
- }
path_get(&path);
- unix_state_unlock(sk);
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
- if (u->addr) {
+ if (u->addr) { // under unix_table_lock here
int i, len;
seq_putc(seq, ' ');
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e83462..3183d9b8ab33 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
- struct unix_address *addr = unix_sk(sk)->addr;
+ /* might or might not have unix_table_lock */
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr)
return 0;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 43a1dec08825..d892000770cf 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -505,7 +505,7 @@ out:
static int __vsock_bind_stream(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
- static u32 port = 0;
+ static u32 port;
struct sockaddr_vm new_addr;
if (!port)
@@ -1439,7 +1439,7 @@ static int vsock_stream_setsockopt(struct socket *sock,
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
- struct timeval tv;
+ struct __kernel_old_timeval tv;
COPY_IN(tv);
if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
@@ -1517,7 +1517,7 @@ static int vsock_stream_getsockopt(struct socket *sock,
break;
case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
- struct timeval tv;
+ struct __kernel_old_timeval tv;
tv.tv_sec = vsk->connect_timeout / HZ;
tv.tv_usec =
(vsk->connect_timeout -
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9e8744..15eb5d3d4750 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
{
struct virtio_vsock *vsock = virtio_vsock_get();
+ if (!vsock)
+ return VMADDR_CID_ANY;
+
return vsock->guest_cid;
}
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
virtio_vsock_update_guest_cid(vsock);
- ret = vsock_core_init(&virtio_transport.transport);
- if (ret < 0)
- goto out_vqs;
-
vsock->rx_buf_nr = 0;
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
mutex_unlock(&the_virtio_vsock_mutex);
return 0;
-out_vqs:
- vsock->vdev->config->del_vqs(vsock->vdev);
out:
kfree(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
flush_work(&vsock->event_work);
flush_work(&vsock->send_pkt_work);
+ /* Reset all connected sockets when the device disappear */
+ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+
vdev->config->reset(vdev);
mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
mutex_lock(&the_virtio_vsock_mutex);
the_virtio_vsock = NULL;
- vsock_core_exit();
mutex_unlock(&the_virtio_vsock_mutex);
vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
if (!virtio_vsock_workqueue)
return -ENOMEM;
+
ret = register_virtio_driver(&virtio_vsock_driver);
if (ret)
- destroy_workqueue(virtio_vsock_workqueue);
+ goto out_wq;
+
+ ret = vsock_core_init(&virtio_transport.transport);
+ if (ret)
+ goto out_vdr;
+
+ return 0;
+
+out_vdr:
+ unregister_virtio_driver(&virtio_vsock_driver);
+out_wq:
+ destroy_workqueue(virtio_vsock_workqueue);
return ret;
+
}
static void __exit virtio_vsock_exit(void)
{
+ vsock_core_exit();
unregister_virtio_driver(&virtio_vsock_driver);
destroy_workqueue(virtio_vsock_workqueue);
}
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c361ce782412..c3d5ab01fba7 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
static void vmci_transport_destruct(struct vsock_sock *vsk)
{
+ /* transport can be NULL if we hit a failure at init() time */
+ if (!vmci_trans(vsk))
+ return;
+
/* Ensure that the detach callback doesn't use the sk/vsk
* we are about to destruct.
*/
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 882d97bdc6bf..550ac9d827fe 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
cfg80211_sched_dfs_chan_update(rdev);
}
+ schedule_work(&cfg80211_disconnect_work);
+
return err;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 623dfe5e211c..b36ad8efb5e5 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync)
ASSERT_RTNL();
+ flush_work(&wdev->pmsr_free_wk);
+
nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
list_del_rcu(&wdev->list);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index c5d6f3418601..84d36ca7a7ab 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -182,12 +182,23 @@ static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pu
static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss)
{
atomic_inc(&bss->hold);
+ if (bss->pub.transmitted_bss) {
+ bss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss, pub);
+ atomic_inc(&bss->hold);
+ }
}
static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
{
int r = atomic_dec_return(&bss->hold);
WARN_ON(r < 0);
+ if (bss->pub.transmitted_bss) {
+ bss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss, pub);
+ r = atomic_dec_return(&bss->hold);
+ WARN_ON(r < 0);
+ }
}
@@ -445,6 +456,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
u32 center_freq_khz, u32 bw_khz);
+extern struct work_struct cfg80211_disconnect_work;
+
/**
* cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
* @wiphy: the wiphy to validate against
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1615e503f8e3..f9462010575f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -21,7 +21,8 @@
void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
- const u8 *buf, size_t len, int uapsd_queues)
+ const u8 *buf, size_t len, int uapsd_queues,
+ const u8 *req_ies, size_t req_ies_len)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -33,6 +34,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
cr.bssid = mgmt->bssid;
cr.bss = bss;
+ cr.req_ie = req_ies;
+ cr.req_ie_len = req_ies_len;
cr.resp_ie = mgmt->u.assoc_resp.variable;
cr.resp_ie_len =
len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
@@ -52,7 +55,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
return;
}
- nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues);
+ nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues,
+ req_ies, req_ies_len);
/* update current_bss etc., consumes the bss reference */
__cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS);
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5e49492d5911..25a9e3b5c154 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4,7 +4,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*/
#include <linux/if.h>
@@ -203,29 +203,17 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
static int validate_ie_attr(const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
- const u8 *pos;
- int len;
+ const u8 *data = nla_data(attr);
+ unsigned int len = nla_len(attr);
+ const struct element *elem;
- pos = nla_data(attr);
- len = nla_len(attr);
-
- while (len) {
- u8 elemlen;
-
- if (len < 2)
- goto error;
- len -= 2;
-
- elemlen = pos[1];
- if (elemlen > len)
- goto error;
-
- len -= elemlen;
- pos += 2 + elemlen;
+ for_each_element(elem, data, len) {
+ /* nothing */
}
- return 0;
-error:
+ if (for_each_element_completed(elem, data, len))
+ return 0;
+
NL_SET_ERR_MSG_ATTR(extack, attr, "malformed information elements");
return -EINVAL;
}
@@ -250,7 +238,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
NLA_POLICY_MAX(NLA_U8, 15),
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
- NLA_POLICY_MAX(NLA_U8, 15),
+ NLA_POLICY_MAX(NLA_U8, 31),
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
@@ -259,15 +247,13 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
static const struct nla_policy
nl80211_pmsr_req_data_policy[NL80211_PMSR_TYPE_MAX + 1] = {
[NL80211_PMSR_TYPE_FTM] =
- NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX,
- nl80211_pmsr_ftm_req_attr_policy),
+ NLA_POLICY_NESTED(nl80211_pmsr_ftm_req_attr_policy),
};
static const struct nla_policy
nl80211_pmsr_req_attr_policy[NL80211_PMSR_REQ_ATTR_MAX + 1] = {
[NL80211_PMSR_REQ_ATTR_DATA] =
- NLA_POLICY_NESTED(NL80211_PMSR_TYPE_MAX,
- nl80211_pmsr_req_data_policy),
+ NLA_POLICY_NESTED(nl80211_pmsr_req_data_policy),
[NL80211_PMSR_REQ_ATTR_GET_AP_TSF] = { .type = NLA_FLAG },
};
@@ -280,8 +266,7 @@ nl80211_psmr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = {
*/
[NL80211_PMSR_PEER_ATTR_CHAN] = { .type = NLA_NESTED },
[NL80211_PMSR_PEER_ATTR_REQ] =
- NLA_POLICY_NESTED(NL80211_PMSR_REQ_ATTR_MAX,
- nl80211_pmsr_req_attr_policy),
+ NLA_POLICY_NESTED(nl80211_pmsr_req_attr_policy),
[NL80211_PMSR_PEER_ATTR_RESP] = { .type = NLA_REJECT },
};
@@ -292,8 +277,7 @@ nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = {
[NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT },
[NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT },
[NL80211_PMSR_ATTR_PEERS] =
- NLA_POLICY_NESTED_ARRAY(NL80211_PMSR_PEER_ATTR_MAX,
- nl80211_psmr_peer_attr_policy),
+ NLA_POLICY_NESTED_ARRAY(nl80211_psmr_peer_attr_policy),
};
const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
@@ -555,8 +539,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
},
[NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
[NL80211_ATTR_PEER_MEASUREMENTS] =
- NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX,
- nl80211_pmsr_attr_policy),
+ NLA_POLICY_NESTED(nl80211_pmsr_attr_policy),
+ [NL80211_ATTR_AIRTIME_WEIGHT] = NLA_POLICY_MIN(NLA_U16, 1),
};
/* policy for the key attributes */
@@ -2278,6 +2262,15 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
if (nl80211_send_pmsr_capa(rdev, msg))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 15:
+ if (rdev->wiphy.akm_suites &&
+ nla_put(msg, NL80211_ATTR_AKM_SUITES,
+ sizeof(u32) * rdev->wiphy.n_akm_suites,
+ rdev->wiphy.akm_suites))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -4540,6 +4533,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
nl80211_calculate_ap_params(&params);
+ if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
+ params.flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT;
+
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, &params);
if (!err) {
@@ -4851,6 +4847,11 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
PUT_SINFO(PLID, plid, u16);
PUT_SINFO(PLINK_STATE, plink_state, u8);
PUT_SINFO_U64(RX_DURATION, rx_duration);
+ PUT_SINFO_U64(TX_DURATION, tx_duration);
+
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ PUT_SINFO(AIRTIME_WEIGHT, airtime_weight, u16);
switch (rdev->wiphy.signal_type) {
case CFG80211_SIGNAL_TYPE_MBM:
@@ -5470,6 +5471,15 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
}
+ if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT])
+ params.airtime_weight =
+ nla_get_u16(info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]);
+
+ if (params.airtime_weight &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ return -EOPNOTSUPP;
+
/* Include parameters for TDLS peer (will check later) */
err = nl80211_set_station_tdls(info, &params);
if (err)
@@ -5598,6 +5608,15 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.plink_action =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
+ if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT])
+ params.airtime_weight =
+ nla_get_u16(info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]);
+
+ if (params.airtime_weight &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ return -EOPNOTSUPP;
+
err = nl80211_parse_sta_channel_info(info, &params);
if (err)
return err;
@@ -5803,7 +5822,13 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
pinfo->discovery_timeout)) ||
((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) &&
nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
- pinfo->discovery_retries)))
+ pinfo->discovery_retries)) ||
+ ((pinfo->filled & MPATH_INFO_HOP_COUNT) &&
+ nla_put_u8(msg, NL80211_MPATH_INFO_HOP_COUNT,
+ pinfo->hop_count)) ||
+ ((pinfo->filled & MPATH_INFO_PATH_CHANGE) &&
+ nla_put_u32(msg, NL80211_MPATH_INFO_PATH_CHANGE,
+ pinfo->path_change_count)))
goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
@@ -9281,6 +9306,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
struct wireless_dev *wdev,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
+ unsigned int portid,
int vendor_event_idx,
int approxlen, gfp_t gfp)
{
@@ -9304,7 +9330,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
return NULL;
}
- return __cfg80211_alloc_vendor_skb(rdev, wdev, approxlen, 0, 0,
+ return __cfg80211_alloc_vendor_skb(rdev, wdev, approxlen, portid, 0,
cmd, attr, info, gfp);
}
EXPORT_SYMBOL(__cfg80211_alloc_event_skb);
@@ -9313,6 +9339,7 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
{
struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
void *hdr = ((void **)skb->cb)[1];
+ struct nlmsghdr *nlhdr = nlmsg_hdr(skb);
struct nlattr *data = ((void **)skb->cb)[2];
enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
@@ -9322,11 +9349,16 @@ void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
nla_nest_end(skb, data);
genlmsg_end(skb, hdr);
- if (data->nla_type == NL80211_ATTR_VENDOR_DATA)
- mcgrp = NL80211_MCGRP_VENDOR;
+ if (nlhdr->nlmsg_pid) {
+ genlmsg_unicast(wiphy_net(&rdev->wiphy), skb,
+ nlhdr->nlmsg_pid);
+ } else {
+ if (data->nla_type == NL80211_ATTR_VENDOR_DATA)
+ mcgrp = NL80211_MCGRP_VENDOR;
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0,
- mcgrp, gfp);
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy),
+ skb, 0, mcgrp, gfp);
+ }
}
EXPORT_SYMBOL(__cfg80211_send_event_skb);
@@ -9857,7 +9889,10 @@ static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info)
}
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
+ !(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP &&
+ wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_AP_PMKSA_CACHING)))
return -EOPNOTSUPP;
switch (info->genlhdr->cmd) {
@@ -12708,6 +12743,17 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply);
+unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+ if (WARN_ON(!rdev->cur_cmd_info))
+ return 0;
+
+ return rdev->cur_cmd_info->snd_portid;
+}
+EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_get_sender);
+
static int nl80211_set_qos_map(struct sk_buff *skb,
struct genl_info *info)
{
@@ -13047,7 +13093,9 @@ static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->external_auth)
return -EOPNOTSUPP;
- if (!info->attrs[NL80211_ATTR_SSID])
+ if (!info->attrs[NL80211_ATTR_SSID] &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
if (!info->attrs[NL80211_ATTR_BSSID])
@@ -13058,18 +13106,24 @@ static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info)
memset(&params, 0, sizeof(params));
- params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
- if (params.ssid.ssid_len == 0 ||
- params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
- return -EINVAL;
- memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]),
- params.ssid.ssid_len);
+ if (info->attrs[NL80211_ATTR_SSID]) {
+ params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+ if (params.ssid.ssid_len == 0 ||
+ params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
+ memcpy(params.ssid.ssid,
+ nla_data(info->attrs[NL80211_ATTR_SSID]),
+ params.ssid.ssid_len);
+ }
memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]),
ETH_ALEN);
params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
+ if (info->attrs[NL80211_ATTR_PMKID])
+ params.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
+
return rdev_external_auth(rdev, dev, &params);
}
@@ -14455,12 +14509,13 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len,
enum nl80211_commands cmd, gfp_t gfp,
- int uapsd_queues)
+ int uapsd_queues, const u8 *req_ies,
+ size_t req_ies_len)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(100 + len, gfp);
+ msg = nlmsg_new(100 + len + req_ies_len, gfp);
if (!msg)
return;
@@ -14472,7 +14527,9 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
- nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+ nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+ (req_ies &&
+ nla_put(msg, NL80211_ATTR_REQ_IE, req_ies_len, req_ies)))
goto nla_put_failure;
if (uapsd_queues >= 0) {
@@ -14503,15 +14560,17 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_AUTHENTICATE, gfp, -1);
+ NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0);
}
void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *buf,
- size_t len, gfp_t gfp, int uapsd_queues)
+ size_t len, gfp_t gfp, int uapsd_queues,
+ const u8 *req_ies, size_t req_ies_len)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_ASSOCIATE, gfp, uapsd_queues);
+ NL80211_CMD_ASSOCIATE, gfp, uapsd_queues,
+ req_ies, req_ies_len);
}
void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
@@ -14519,7 +14578,7 @@ void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DEAUTHENTICATE, gfp, -1);
+ NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0);
}
void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
@@ -14527,7 +14586,7 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DISASSOCIATE, gfp, -1);
+ NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0);
}
void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
@@ -14548,7 +14607,8 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
cmd = NL80211_CMD_UNPROT_DISASSOCIATE;
trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len);
- nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1);
+ nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1,
+ NULL, 0);
}
EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 531c82dcba6b..a41e94a49a89 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -67,7 +67,8 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len, gfp_t gfp,
- int uapsd_queues);
+ int uapsd_queues,
+ const u8 *req_ies, size_t req_ies_len);
void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len, gfp_t gfp);
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index de9286703280..5e2ab01d325c 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -256,9 +256,8 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out_err;
} else {
- memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]),
- ETH_ALEN);
- memset(req->mac_addr_mask, 0xff, ETH_ALEN);
+ memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN);
+ eth_broadcast_addr(req->mac_addr_mask);
}
idx = 0;
@@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
req->n_peers = count;
req->cookie = cfg80211_assign_cookie(rdev);
+ req->nl_portid = info->snd_portid;
err = rdev_start_pmsr(rdev, wdev, req);
if (err)
@@ -530,14 +530,14 @@ free:
}
EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
-void cfg80211_pmsr_free_wk(struct work_struct *work)
+static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev)
{
- struct wireless_dev *wdev = container_of(work, struct wireless_dev,
- pmsr_free_wk);
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_pmsr_request *req, *tmp;
LIST_HEAD(free_list);
+ lockdep_assert_held(&wdev->mtx);
+
spin_lock_bh(&wdev->pmsr_lock);
list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
if (req->nl_portid)
@@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work)
spin_unlock_bh(&wdev->pmsr_lock);
list_for_each_entry_safe(req, tmp, &free_list, list) {
- wdev_lock(wdev);
rdev_abort_pmsr(rdev, wdev, req);
- wdev_unlock(wdev);
kfree(req);
}
}
+void cfg80211_pmsr_free_wk(struct work_struct *work)
+{
+ struct wireless_dev *wdev = container_of(work, struct wireless_dev,
+ pmsr_free_wk);
+
+ wdev_lock(wdev);
+ cfg80211_pmsr_process_abort(wdev);
+ wdev_unlock(wdev);
+}
+
void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
{
struct cfg80211_pmsr_request *req;
@@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
spin_unlock_bh(&wdev->pmsr_lock);
if (found)
- schedule_work(&wdev->pmsr_free_wk);
- flush_work(&wdev->pmsr_free_wk);
+ cfg80211_pmsr_process_abort(wdev);
+
WARN_ON(!list_empty(&wdev->pmsr_list));
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ecfb1a06dbb2..2f1bf91eb226 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,7 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -131,7 +131,8 @@ static spinlock_t reg_indoor_lock;
/* Used to track the userspace process controlling the indoor setting */
static u32 reg_is_indoor_portid;
-static void restore_regulatory_settings(bool reset_user);
+static void restore_regulatory_settings(bool reset_user, bool cached);
+static void print_regdomain(const struct ieee80211_regdomain *rd);
static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
{
@@ -263,6 +264,7 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom =
static char *ieee80211_regdom = "00";
static char user_alpha2[2];
+static const struct ieee80211_regdomain *cfg80211_user_regdom;
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
@@ -445,6 +447,15 @@ reg_copy_regd(const struct ieee80211_regdomain *src_regd)
return regd;
}
+static void cfg80211_save_user_regdom(const struct ieee80211_regdomain *rd)
+{
+ ASSERT_RTNL();
+
+ if (!IS_ERR(cfg80211_user_regdom))
+ kfree(cfg80211_user_regdom);
+ cfg80211_user_regdom = reg_copy_regd(rd);
+}
+
struct reg_regdb_apply_request {
struct list_head list;
const struct ieee80211_regdomain *regdom;
@@ -510,7 +521,7 @@ static void crda_timeout_work(struct work_struct *work)
pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
rtnl_lock();
reg_crda_timeouts++;
- restore_regulatory_settings(true);
+ restore_regulatory_settings(true, false);
rtnl_unlock();
}
@@ -1024,8 +1035,13 @@ static void regdb_fw_cb(const struct firmware *fw, void *context)
}
rtnl_lock();
- if (WARN_ON(regdb && !IS_ERR(regdb))) {
- /* just restore and free new db */
+ if (regdb && !IS_ERR(regdb)) {
+ /* negative case - a bug
+ * positive case - can happen due to race in case of multiple cb's in
+ * queue, due to usage of asynchronous callback
+ *
+ * Either case, just restore and free new db.
+ */
} else if (set_error) {
regdb = ERR_PTR(set_error);
} else if (fw) {
@@ -1039,7 +1055,7 @@ static void regdb_fw_cb(const struct firmware *fw, void *context)
}
if (restore)
- restore_regulatory_settings(true);
+ restore_regulatory_settings(true, false);
rtnl_unlock();
@@ -1255,7 +1271,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
* definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
* however it is safe for now to assume that a frequency rule should not be
* part of a frequency's band if the start freq or end freq are off by more
- * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
* 60 GHz band.
* This resolution can be lowered and should be considered as we add
* regulatory rule support for other "bands".
@@ -1270,7 +1286,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
* with the Channel starting frequency above 45 GHz.
*/
u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
- 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+ 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
@@ -2724,9 +2740,7 @@ static void notify_self_managed_wiphys(struct regulatory_request *request)
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
wiphy = &rdev->wiphy;
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
- request->initiator == NL80211_REGDOM_SET_BY_USER &&
- request->user_reg_hint_type ==
- NL80211_USER_REG_HINT_CELL_BASE)
+ request->initiator == NL80211_REGDOM_SET_BY_USER)
reg_call_notifier(wiphy, request);
}
}
@@ -3114,7 +3128,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy)
* keep their own regulatory domain on wiphy->regd so that does does
* not need to be remembered.
*/
-static void restore_regulatory_settings(bool reset_user)
+static void restore_regulatory_settings(bool reset_user, bool cached)
{
char alpha2[2];
char world_alpha2[2];
@@ -3173,15 +3187,41 @@ static void restore_regulatory_settings(bool reset_user)
restore_custom_reg_settings(&rdev->wiphy);
}
- regulatory_hint_core(world_alpha2);
+ if (cached && (!is_an_alpha2(alpha2) ||
+ !IS_ERR_OR_NULL(cfg80211_user_regdom))) {
+ reset_regdomains(false, cfg80211_world_regdom);
+ update_all_wiphy_regulatory(NL80211_REGDOM_SET_BY_CORE);
+ print_regdomain(get_cfg80211_regdom());
+ nl80211_send_reg_change_event(&core_request_world);
+ reg_set_request_processed();
- /*
- * This restores the ieee80211_regdom module parameter
- * preference or the last user requested regulatory
- * settings, user regulatory settings takes precedence.
- */
- if (is_an_alpha2(alpha2))
- regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
+ if (is_an_alpha2(alpha2) &&
+ !regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER)) {
+ struct regulatory_request *ureq;
+
+ spin_lock(&reg_requests_lock);
+ ureq = list_last_entry(&reg_requests_list,
+ struct regulatory_request,
+ list);
+ list_del(&ureq->list);
+ spin_unlock(&reg_requests_lock);
+
+ notify_self_managed_wiphys(ureq);
+ reg_update_last_request(ureq);
+ set_regdom(reg_copy_regd(cfg80211_user_regdom),
+ REGD_SOURCE_CACHED);
+ }
+ } else {
+ regulatory_hint_core(world_alpha2);
+
+ /*
+ * This restores the ieee80211_regdom module parameter
+ * preference or the last user requested regulatory
+ * settings, user regulatory settings takes precedence.
+ */
+ if (is_an_alpha2(alpha2))
+ regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
+ }
spin_lock(&reg_requests_lock);
list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
@@ -3241,7 +3281,7 @@ void regulatory_hint_disconnect(void)
}
pr_debug("All devices are disconnected, going to restore regulatory settings\n");
- restore_regulatory_settings(false);
+ restore_regulatory_settings(false, true);
}
static bool freq_is_chan_12_13_14(u32 freq)
@@ -3558,6 +3598,9 @@ int set_regdom(const struct ieee80211_regdomain *rd,
bool user_reset = false;
int r;
+ if (IS_ERR_OR_NULL(rd))
+ return -ENODATA;
+
if (!reg_is_valid_request(rd->alpha2)) {
kfree(rd);
return -EINVAL;
@@ -3574,6 +3617,7 @@ int set_regdom(const struct ieee80211_regdomain *rd,
r = reg_set_rd_core(rd);
break;
case NL80211_REGDOM_SET_BY_USER:
+ cfg80211_save_user_regdom(rd);
r = reg_set_rd_user(rd, lr);
user_reset = true;
break;
@@ -3596,7 +3640,7 @@ int set_regdom(const struct ieee80211_regdomain *rd,
break;
default:
/* Back to world regulatory in case of errors */
- restore_regulatory_settings(user_reset);
+ restore_regulatory_settings(user_reset, false);
}
kfree(rd);
@@ -3932,6 +3976,8 @@ void regulatory_exit(void)
if (!IS_ERR_OR_NULL(regdb))
kfree(regdb);
+ if (!IS_ERR_OR_NULL(cfg80211_user_regdom))
+ kfree(cfg80211_user_regdom);
free_regdb_keyring();
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 9ceeb5f3a7cb..504133d76de4 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -5,6 +5,7 @@
/*
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
+ * Copyright (C) 2019 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +23,7 @@
enum ieee80211_regd_source {
REGD_SOURCE_INTERNAL_DB,
REGD_SOURCE_CRDA,
+ REGD_SOURCE_CACHED,
};
extern const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 5123667f4569..287518c6caa4 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,6 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018-2019 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -109,6 +110,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
pub);
bss->refcount++;
}
+ if (bss->pub.transmitted_bss) {
+ bss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss,
+ pub);
+ bss->refcount++;
+ }
}
static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
@@ -125,6 +132,18 @@ static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
if (hbss->refcount == 0)
bss_free(hbss);
}
+
+ if (bss->pub.transmitted_bss) {
+ struct cfg80211_internal_bss *tbss;
+
+ tbss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss,
+ pub);
+ tbss->refcount--;
+ if (tbss->refcount == 0)
+ bss_free(tbss);
+ }
+
bss->refcount--;
if (bss->refcount == 0)
bss_free(bss);
@@ -150,6 +169,7 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
}
list_del_init(&bss->list);
+ list_del_init(&bss->pub.nontrans_list);
rb_erase(&bss->rbn, &rdev->bss_tree);
rdev->bss_entries--;
WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
@@ -159,6 +179,162 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
return true;
}
+static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
+ const u8 *subelement, size_t subie_len,
+ u8 *new_ie, gfp_t gfp)
+{
+ u8 *pos, *tmp;
+ const u8 *tmp_old, *tmp_new;
+ u8 *sub_copy;
+
+ /* copy subelement as we need to change its content to
+ * mark an ie after it is processed.
+ */
+ sub_copy = kmalloc(subie_len, gfp);
+ if (!sub_copy)
+ return 0;
+ memcpy(sub_copy, subelement, subie_len);
+
+ pos = &new_ie[0];
+
+ /* set new ssid */
+ tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len);
+ if (tmp_new) {
+ memcpy(pos, tmp_new, tmp_new[1] + 2);
+ pos += (tmp_new[1] + 2);
+ }
+
+ /* go through IEs in ie (skip SSID) and subelement,
+ * merge them into new_ie
+ */
+ tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+ tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
+
+ while (tmp_old + tmp_old[1] + 2 - ie <= ielen) {
+ if (tmp_old[0] == 0) {
+ tmp_old++;
+ continue;
+ }
+
+ if (tmp_old[0] == WLAN_EID_EXTENSION)
+ tmp = (u8 *)cfg80211_find_ext_ie(tmp_old[2], sub_copy,
+ subie_len);
+ else
+ tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy,
+ subie_len);
+
+ if (!tmp) {
+ /* ie in old ie but not in subelement */
+ if (tmp_old[0] != WLAN_EID_MULTIPLE_BSSID) {
+ memcpy(pos, tmp_old, tmp_old[1] + 2);
+ pos += tmp_old[1] + 2;
+ }
+ } else {
+ /* ie in transmitting ie also in subelement,
+ * copy from subelement and flag the ie in subelement
+ * as copied (by setting eid field to WLAN_EID_SSID,
+ * which is skipped anyway).
+ * For vendor ie, compare OUI + type + subType to
+ * determine if they are the same ie.
+ */
+ if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
+ if (!memcmp(tmp_old + 2, tmp + 2, 5)) {
+ /* same vendor ie, copy from
+ * subelement
+ */
+ memcpy(pos, tmp, tmp[1] + 2);
+ pos += tmp[1] + 2;
+ tmp[0] = WLAN_EID_SSID;
+ } else {
+ memcpy(pos, tmp_old, tmp_old[1] + 2);
+ pos += tmp_old[1] + 2;
+ }
+ } else {
+ /* copy ie from subelement into new ie */
+ memcpy(pos, tmp, tmp[1] + 2);
+ pos += tmp[1] + 2;
+ tmp[0] = WLAN_EID_SSID;
+ }
+ }
+
+ if (tmp_old + tmp_old[1] + 2 - ie == ielen)
+ break;
+
+ tmp_old += tmp_old[1] + 2;
+ }
+
+ /* go through subelement again to check if there is any ie not
+ * copied to new ie, skip ssid, capability, bssid-index ie
+ */
+ tmp_new = sub_copy;
+ while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
+ if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
+ tmp_new[0] == WLAN_EID_SSID ||
+ tmp_new[0] == WLAN_EID_MULTI_BSSID_IDX)) {
+ memcpy(pos, tmp_new, tmp_new[1] + 2);
+ pos += tmp_new[1] + 2;
+ }
+ if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len)
+ break;
+ tmp_new += tmp_new[1] + 2;
+ }
+
+ kfree(sub_copy);
+ return pos - new_ie;
+}
+
+static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
+ const u8 *ssid, size_t ssid_len)
+{
+ const struct cfg80211_bss_ies *ies;
+ const u8 *ssidie;
+
+ if (bssid && !ether_addr_equal(a->bssid, bssid))
+ return false;
+
+ if (!ssid)
+ return true;
+
+ ies = rcu_access_pointer(a->ies);
+ if (!ies)
+ return false;
+ ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ if (!ssidie)
+ return false;
+ if (ssidie[1] != ssid_len)
+ return false;
+ return memcmp(ssidie + 2, ssid, ssid_len) == 0;
+}
+
+static int
+cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
+ struct cfg80211_bss *nontrans_bss)
+{
+ const u8 *ssid;
+ size_t ssid_len;
+ struct cfg80211_bss *bss = NULL;
+
+ rcu_read_lock();
+ ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
+ if (!ssid) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ ssid_len = ssid[1];
+ ssid = ssid + 2;
+ rcu_read_unlock();
+
+ /* check if nontrans_bss is in the list */
+ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
+ if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
+ return 0;
+ }
+
+ /* add to the list */
+ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
+ return 0;
+}
+
static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
unsigned long expire_time)
{
@@ -480,73 +656,43 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
__cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
}
-const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
- const u8 *match, int match_len,
- int match_offset)
+const struct element *
+cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset)
{
- /* match_offset can't be smaller than 2, unless match_len is
- * zero, in which case match_offset must be zero as well.
- */
- if (WARN_ON((match_len && match_offset < 2) ||
- (!match_len && match_offset)))
- return NULL;
+ const struct element *elem;
- while (len >= 2 && len >= ies[1] + 2) {
- if ((ies[0] == eid) &&
- (ies[1] + 2 >= match_offset + match_len) &&
- !memcmp(ies + match_offset, match, match_len))
- return ies;
-
- len -= ies[1] + 2;
- ies += ies[1] + 2;
+ for_each_element_id(elem, eid, ies, len) {
+ if (elem->datalen >= match_offset + match_len &&
+ !memcmp(elem->data + match_offset, match, match_len))
+ return elem;
}
return NULL;
}
-EXPORT_SYMBOL(cfg80211_find_ie_match);
+EXPORT_SYMBOL(cfg80211_find_elem_match);
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
- const u8 *ies, int len)
+const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
+ const u8 *ies,
+ unsigned int len)
{
- const u8 *ie;
+ const struct element *elem;
u8 match[] = { oui >> 16, oui >> 8, oui, oui_type };
int match_len = (oui_type < 0) ? 3 : sizeof(match);
if (WARN_ON(oui_type > 0xff))
return NULL;
- ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
- match, match_len, 2);
+ elem = cfg80211_find_elem_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
+ match, match_len, 0);
- if (ie && (ie[1] < 4))
+ if (!elem || elem->datalen < 4)
return NULL;
- return ie;
-}
-EXPORT_SYMBOL(cfg80211_find_vendor_ie);
-
-static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
- const u8 *ssid, size_t ssid_len)
-{
- const struct cfg80211_bss_ies *ies;
- const u8 *ssidie;
-
- if (bssid && !ether_addr_equal(a->bssid, bssid))
- return false;
-
- if (!ssid)
- return true;
-
- ies = rcu_access_pointer(a->ies);
- if (!ies)
- return false;
- ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
- if (!ssidie)
- return false;
- if (ssidie[1] != ssid_len)
- return false;
- return memcmp(ssidie + 2, ssid, ssid_len) == 0;
+ return elem;
}
+EXPORT_SYMBOL(cfg80211_find_vendor_elem);
/**
* enum bss_compare_mode - BSS compare mode
@@ -882,6 +1028,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
return true;
}
+struct cfg80211_non_tx_bss {
+ struct cfg80211_bss *tx_bss;
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+};
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *rdev,
@@ -985,6 +1137,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
IEEE80211_MAX_CHAINS);
ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
+ found->pub.max_bssid_indicator = tmp->pub.max_bssid_indicator;
+ found->pub.bssid_index = tmp->pub.bssid_index;
} else {
struct cfg80211_internal_bss *new;
struct cfg80211_internal_bss *hidden;
@@ -1009,6 +1163,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
memcpy(new, tmp, sizeof(*new));
new->refcount = 1;
INIT_LIST_HEAD(&new->hidden_list);
+ INIT_LIST_HEAD(&new->pub.nontrans_list);
if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
@@ -1042,6 +1197,17 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
goto drop;
}
+ /* This must be before the call to bss_ref_get */
+ if (tmp->pub.transmitted_bss) {
+ struct cfg80211_internal_bss *pbss =
+ container_of(tmp->pub.transmitted_bss,
+ struct cfg80211_internal_bss,
+ pub);
+
+ new->pub.transmitted_bss = tmp->pub.transmitted_bss;
+ bss_ref_get(rdev, pbss);
+ }
+
list_add_tail(&new->list, &rdev->bss_list);
rdev->bss_entries++;
rb_insert_bss(rdev, new);
@@ -1130,14 +1296,16 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
}
/* Returned bss is reference counted and must be cleaned up appropriately. */
-struct cfg80211_bss *
-cfg80211_inform_bss_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- enum cfg80211_bss_frame_type ftype,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- gfp_t gfp)
+static struct cfg80211_bss *
+cfg80211_inform_single_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
struct cfg80211_bss_ies *ies;
struct ieee80211_channel *channel;
struct cfg80211_internal_bss tmp = {}, *res;
@@ -1163,6 +1331,11 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
tmp.ts_boottime = data->boottime_ns;
+ if (non_tx_data) {
+ tmp.pub.transmitted_bss = non_tx_data->tx_bss;
+ tmp.pub.bssid_index = non_tx_data->bssid_index;
+ tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator;
+ }
/*
* If we do not know here whether the IEs are from a Beacon or Probe
@@ -1209,19 +1382,247 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
regulatory_hint_found_beacon(wiphy, channel, gfp);
}
+ if (non_tx_data && non_tx_data->tx_bss) {
+ /* this is a nontransmitting bss, we need to add it to
+ * transmitting bss' list if it is not there
+ */
+ if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
+ &res->pub)) {
+ if (__cfg80211_unlink_bss(rdev, res))
+ rdev->bss_generation++;
+ }
+ }
+
trace_cfg80211_return_bss(&res->pub);
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_data);
-/* cfg80211_inform_bss_width_frame helper */
+static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf,
+ u16 beacon_interval, const u8 *ie,
+ size_t ielen,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
+{
+ const u8 *mbssid_index_ie;
+ const struct element *elem, *sub;
+ size_t new_ie_len;
+ u8 new_bssid[ETH_ALEN];
+ u8 *new_ie;
+ u16 capability;
+ struct cfg80211_bss *bss;
+
+ if (!non_tx_data)
+ return;
+ if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ return;
+ if (!wiphy->support_mbssid)
+ return;
+ if (wiphy->support_only_he_mbssid &&
+ !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ return;
+
+ new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
+ if (!new_ie)
+ return;
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) {
+ if (elem->datalen < 4)
+ continue;
+ for_each_element(sub, elem->data + 1, elem->datalen - 1) {
+ if (sub->id != 0 || sub->datalen < 4) {
+ /* not a valid BSS profile */
+ continue;
+ }
+
+ if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ sub->data[1] != 2) {
+ /* The first element within the Nontransmitted
+ * BSSID Profile is not the Nontransmitted
+ * BSSID Capability element.
+ */
+ continue;
+ }
+
+ /* found a Nontransmitted BSSID Profile */
+ mbssid_index_ie = cfg80211_find_ie
+ (WLAN_EID_MULTI_BSSID_IDX,
+ sub->data, sub->datalen);
+ if (!mbssid_index_ie || mbssid_index_ie[1] < 1 ||
+ mbssid_index_ie[2] == 0) {
+ /* No valid Multiple BSSID-Index element */
+ continue;
+ }
+
+ non_tx_data->bssid_index = mbssid_index_ie[2];
+ non_tx_data->max_bssid_indicator = elem->data[0];
+
+ cfg80211_gen_new_bssid(bssid,
+ non_tx_data->max_bssid_indicator,
+ non_tx_data->bssid_index,
+ new_bssid);
+ memset(new_ie, 0, IEEE80211_MAX_DATA_LEN);
+ new_ie_len = cfg80211_gen_new_ie(ie, ielen, sub->data,
+ sub->datalen, new_ie,
+ gfp);
+ if (!new_ie_len)
+ continue;
+
+ capability = get_unaligned_le16(sub->data + 2);
+ bss = cfg80211_inform_single_bss_data(wiphy, data,
+ ftype,
+ new_bssid, tsf,
+ capability,
+ beacon_interval,
+ new_ie,
+ new_ie_len,
+ non_tx_data,
+ gfp);
+ if (!bss)
+ break;
+ cfg80211_put_bss(wiphy, bss);
+ }
+ }
+
+ kfree(new_ie);
+}
+
struct cfg80211_bss *
-cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ gfp_t gfp)
+{
+ struct cfg80211_bss *res;
+ struct cfg80211_non_tx_bss non_tx_data;
+
+ res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf,
+ capability, beacon_interval, ie,
+ ielen, NULL, gfp);
+ non_tx_data.tx_bss = res;
+ cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf,
+ beacon_interval, ie, ielen, &non_tx_data,
+ gfp);
+ return res;
+}
+EXPORT_SYMBOL(cfg80211_inform_bss_data);
+
+static void
+cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
+{
+ enum cfg80211_bss_frame_type ftype;
+ const u8 *ie = mgmt->u.probe_resp.variable;
+ size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ ftype = ieee80211_is_beacon(mgmt->frame_control) ?
+ CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+
+ cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
+ le64_to_cpu(mgmt->u.probe_resp.timestamp),
+ le16_to_cpu(mgmt->u.probe_resp.beacon_int),
+ ie, ielen, non_tx_data, gfp);
+}
+
+static void
+cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
+ struct cfg80211_bss *nontrans_bss,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+{
+ u8 *ie, *new_ie, *pos;
+ const u8 *nontrans_ssid, *trans_ssid, *mbssid;
+ size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ size_t new_ie_len;
+ struct cfg80211_bss_ies *new_ies;
+ const struct cfg80211_bss_ies *old;
+ u8 cpy_len;
+
+ ie = mgmt->u.probe_resp.variable;
+
+ new_ie_len = ielen;
+ trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+ if (!trans_ssid)
+ return;
+ new_ie_len -= trans_ssid[1];
+ mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
+ if (!mbssid)
+ return;
+ new_ie_len -= mbssid[1];
+ rcu_read_lock();
+ nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
+ if (!nontrans_ssid) {
+ rcu_read_unlock();
+ return;
+ }
+ new_ie_len += nontrans_ssid[1];
+ rcu_read_unlock();
+
+ /* generate new ie for nontrans BSS
+ * 1. replace SSID with nontrans BSS' SSID
+ * 2. skip MBSSID IE
+ */
+ new_ie = kzalloc(new_ie_len, gfp);
+ if (!new_ie)
+ return;
+ new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp);
+ if (!new_ies)
+ goto out_free;
+
+ pos = new_ie;
+
+ /* copy the nontransmitted SSID */
+ cpy_len = nontrans_ssid[1] + 2;
+ memcpy(pos, nontrans_ssid, cpy_len);
+ pos += cpy_len;
+ /* copy the IEs between SSID and MBSSID */
+ cpy_len = trans_ssid[1] + 2;
+ memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len)));
+ pos += (mbssid - (trans_ssid + cpy_len));
+ /* copy the IEs after MBSSID */
+ cpy_len = mbssid[1] + 2;
+ memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len)));
+
+ /* update ie */
+ new_ies->len = new_ie_len;
+ new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
+ new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
+ memcpy(new_ies->data, new_ie, new_ie_len);
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ old = rcu_access_pointer(nontrans_bss->proberesp_ies);
+ rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies);
+ rcu_assign_pointer(nontrans_bss->ies, new_ies);
+ if (old)
+ kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+ } else {
+ old = rcu_access_pointer(nontrans_bss->beacon_ies);
+ rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
+ rcu_assign_pointer(nontrans_bss->ies, new_ies);
+ if (old)
+ kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+ }
+
+out_free:
+ kfree(new_ie);
+}
+
+/* cfg80211_inform_bss_width_frame helper */
+static struct cfg80211_bss *
+cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
{
struct cfg80211_internal_bss tmp = {}, *res;
struct cfg80211_bss_ies *ies;
@@ -1279,6 +1680,11 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
tmp.pub.chains = data->chains;
memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
+ if (non_tx_data) {
+ tmp.pub.transmitted_bss = non_tx_data->tx_bss;
+ tmp.pub.bssid_index = non_tx_data->bssid_index;
+ tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator;
+ }
signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
wiphy->max_adj_channel_rssi_comp;
@@ -1300,6 +1706,53 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
+
+struct cfg80211_bss *
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+{
+ struct cfg80211_bss *res, *tmp_bss;
+ const u8 *ie = mgmt->u.probe_resp.variable;
+ const struct cfg80211_bss_ies *ies1, *ies2;
+ size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ struct cfg80211_non_tx_bss non_tx_data;
+
+ res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
+ len, NULL, gfp);
+ if (!res || !wiphy->support_mbssid ||
+ !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ return res;
+ if (wiphy->support_only_he_mbssid &&
+ !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ return res;
+
+ non_tx_data.tx_bss = res;
+ /* process each non-transmitting bss */
+ cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
+ &non_tx_data, gfp);
+
+ /* check if the res has other nontransmitting bss which is not
+ * in MBSSID IE
+ */
+ ies1 = rcu_access_pointer(res->ies);
+
+ /* go through nontrans_list, if the timestamp of the BSS is
+ * earlier than the timestamp of the transmitting BSS then
+ * update it
+ */
+ list_for_each_entry(tmp_bss, &res->nontrans_list,
+ nontrans_list) {
+ ies2 = rcu_access_pointer(tmp_bss->ies);
+ if (ies2->tsf < ies1->tsf)
+ cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
+ mgmt, len, gfp);
+ }
+
+ return res;
+}
EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
@@ -1337,7 +1790,8 @@ EXPORT_SYMBOL(cfg80211_put_bss);
void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct cfg80211_internal_bss *bss;
+ struct cfg80211_internal_bss *bss, *tmp1;
+ struct cfg80211_bss *nontrans_bss, *tmp;
if (WARN_ON(!pub))
return;
@@ -1345,10 +1799,21 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
bss = container_of(pub, struct cfg80211_internal_bss, pub);
spin_lock_bh(&rdev->bss_lock);
- if (!list_empty(&bss->list)) {
- if (__cfg80211_unlink_bss(rdev, bss))
+ if (list_empty(&bss->list))
+ goto out;
+
+ list_for_each_entry_safe(nontrans_bss, tmp,
+ &pub->nontrans_list,
+ nontrans_list) {
+ tmp1 = container_of(nontrans_bss,
+ struct cfg80211_internal_bss, pub);
+ if (__cfg80211_unlink_bss(rdev, tmp1))
rdev->bss_generation++;
}
+
+ if (__cfg80211_unlink_bss(rdev, bss))
+ rdev->bss_generation++;
+out:
spin_unlock_bh(&rdev->bss_lock);
}
EXPORT_SYMBOL(cfg80211_unlink_bss);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f741d8376a46..7d34cb884840 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
rtnl_unlock();
}
-static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
+DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
/*
diff --git a/net/wireless/util.c b/net/wireless/util.c
index cd48cdd582c0..e4b8db5e81ec 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,7 +5,7 @@
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*/
#include <linux/export.h>
#include <linux/bitops.h>
@@ -19,6 +19,7 @@
#include <linux/mpls.h>
#include <linux/gcd.h>
#include <linux/bitfield.h>
+#include <linux/nospec.h>
#include "core.h"
#include "rdev-ops.h"
@@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
{
unsigned int dscp;
unsigned char vlan_priority;
+ unsigned int ret;
/* skb->priority values from 256->263 are magic values to
* directly indicate a specific 802.1d priority. This is used
* to allow 802.1d priority to be passed directly in from VLAN
* tags, etc.
*/
- if (skb->priority >= 256 && skb->priority <= 263)
- return skb->priority - 256;
+ if (skb->priority >= 256 && skb->priority <= 263) {
+ ret = skb->priority - 256;
+ goto out;
+ }
if (skb_vlan_tag_present(skb)) {
vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK)
>> VLAN_PRIO_SHIFT;
- if (vlan_priority > 0)
- return vlan_priority;
+ if (vlan_priority > 0) {
+ ret = vlan_priority;
+ goto out;
+ }
}
switch (skb->protocol) {
@@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
if (!mpls)
return 0;
- return (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
+ ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
>> MPLS_LS_TC_SHIFT;
+ goto out;
}
case htons(ETH_P_80221):
/* 802.21 is always network control traffic */
@@ -761,22 +768,28 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
unsigned int i, tmp_dscp = dscp >> 2;
for (i = 0; i < qos_map->num_des; i++) {
- if (tmp_dscp == qos_map->dscp_exception[i].dscp)
- return qos_map->dscp_exception[i].up;
+ if (tmp_dscp == qos_map->dscp_exception[i].dscp) {
+ ret = qos_map->dscp_exception[i].up;
+ goto out;
+ }
}
for (i = 0; i < 8; i++) {
if (tmp_dscp >= qos_map->up[i].low &&
- tmp_dscp <= qos_map->up[i].high)
- return i;
+ tmp_dscp <= qos_map->up[i].high) {
+ ret = i;
+ goto out;
+ }
}
}
- return dscp >> 5;
+ ret = dscp >> 5;
+out:
+ return array_index_nospec(ret, IEEE80211_NUM_TIDS);
}
EXPORT_SYMBOL(cfg80211_classify8021d);
-const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
+const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id)
{
const struct cfg80211_bss_ies *ies;
@@ -784,9 +797,9 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
if (!ies)
return NULL;
- return cfg80211_find_ie(ie, ies->data, ies->len);
+ return cfg80211_find_elem(id, ies->data, ies->len);
}
-EXPORT_SYMBOL(ieee80211_bss_get_ie);
+EXPORT_SYMBOL(ieee80211_bss_get_elem);
void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
{
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 06943d9c9835..d522787c7354 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1337,6 +1337,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
wstats.qual.qual = sig + 110;
break;
}
+ /* fall through */
case CFG80211_SIGNAL_TYPE_UNSPEC:
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) {
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
@@ -1345,6 +1346,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
wstats.qual.qual = sinfo.signal;
break;
}
+ /* fall through */
default:
wstats.qual.updated |= IW_QUAL_LEVEL_INVALID;
wstats.qual.updated |= IW_QUAL_QUAL_INVALID;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5121729b8b63..eff31348e20b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
unsigned int lci = 1;
struct sock *sk;
- read_lock_bh(&x25_list_lock);
-
- while ((sk = __x25_find_socket(lci, nb)) != NULL) {
+ while ((sk = x25_find_socket(lci, nb)) != NULL) {
sock_put(sk);
if (++lci == 4096) {
lci = 0;
break;
}
+ cond_resched();
}
- read_unlock_bh(&x25_list_lock);
return lci;
}
@@ -681,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
int len, i, rc = 0;
- if (!sock_flag(sk, SOCK_ZAPPED) ||
- addr_len != sizeof(struct sockaddr_x25) ||
+ if (addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25) {
rc = -EINVAL;
goto out;
@@ -701,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
lock_sock(sk);
- x25_sk(sk)->source_addr = addr->sx25_addr;
- x25_insert_socket(sk);
- sock_reset_flag(sk, SOCK_ZAPPED);
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ x25_sk(sk)->source_addr = addr->sx25_addr;
+ x25_insert_socket(sk);
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ } else {
+ rc = -EINVAL;
+ }
release_sock(sk);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
out:
diff --git a/net/xdp/Kconfig b/net/xdp/Kconfig
index 90e4a7152854..0255b33cff4b 100644
--- a/net/xdp/Kconfig
+++ b/net/xdp/Kconfig
@@ -5,3 +5,11 @@ config XDP_SOCKETS
help
XDP sockets allows a channel between XDP programs and
userspace applications.
+
+config XDP_SOCKETS_DIAG
+ tristate "XDP sockets: monitoring interface"
+ depends on XDP_SOCKETS
+ default n
+ help
+ Support for PF_XDP sockets monitoring interface used by the ss tool.
+ If unsure, say Y.
diff --git a/net/xdp/Makefile b/net/xdp/Makefile
index 04f073146256..59dbfdf93dca 100644
--- a/net/xdp/Makefile
+++ b/net/xdp/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o
+obj-$(CONFIG_XDP_SOCKETS_DIAG) += xsk_diag.o
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a264cf2accd0..77520eacee8f 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -13,12 +13,15 @@
#include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
+#include <linux/idr.h>
#include "xdp_umem.h"
#include "xsk_queue.h"
#define XDP_UMEM_MIN_CHUNK_SIZE 2048
+static DEFINE_IDA(umem_ida);
+
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
unsigned long flags;
@@ -41,13 +44,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
* not know if the device has more tx queues than rx, or the opposite.
* This might also change during run time.
*/
-static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
- u16 queue_id)
+static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
+ u16 queue_id)
{
+ if (queue_id >= max_t(unsigned int,
+ dev->real_num_rx_queues,
+ dev->real_num_tx_queues))
+ return -EINVAL;
+
if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].umem = umem;
if (queue_id < dev->real_num_tx_queues)
dev->_tx[queue_id].umem = umem;
+
+ return 0;
}
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
@@ -60,6 +70,7 @@ struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
return NULL;
}
+EXPORT_SYMBOL(xdp_get_umem_from_qid);
static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
{
@@ -88,7 +99,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
goto out_rtnl_unlock;
}
- xdp_reg_umem_at_qid(dev, umem, queue_id);
+ err = xdp_reg_umem_at_qid(dev, umem, queue_id);
+ if (err)
+ goto out_rtnl_unlock;
+
umem->dev = dev;
umem->queue_id = queue_id;
if (force_copy)
@@ -115,9 +129,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0;
err_unreg_umem:
- xdp_clear_umem_at_qid(dev, queue_id);
if (!force_zc)
err = 0; /* fallback to copy mode */
+ if (err)
+ xdp_clear_umem_at_qid(dev, queue_id);
out_rtnl_unlock:
rtnl_unlock();
return err;
@@ -183,6 +198,8 @@ static void xdp_umem_release(struct xdp_umem *umem)
xdp_umem_clear_dev(umem);
+ ida_simple_remove(&umem_ida, umem->id);
+
if (umem->fq) {
xskq_destroy(umem->fq);
umem->fq = NULL;
@@ -249,10 +266,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
if (!umem->pgs)
return -ENOMEM;
- down_write(&current->mm->mmap_sem);
- npgs = get_user_pages(umem->address, umem->npgs,
- gup_flags, &umem->pgs[0], NULL);
- up_write(&current->mm->mmap_sem);
+ down_read(&current->mm->mmap_sem);
+ npgs = get_user_pages_longterm(umem->address, umem->npgs,
+ gup_flags, &umem->pgs[0], NULL);
+ up_read(&current->mm->mmap_sem);
if (npgs != umem->npgs) {
if (npgs >= 0) {
@@ -389,8 +406,16 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
if (!umem)
return ERR_PTR(-ENOMEM);
+ err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
+ if (err < 0) {
+ kfree(umem);
+ return ERR_PTR(err);
+ }
+ umem->id = err;
+
err = xdp_umem_reg(umem, mr);
if (err) {
+ ida_simple_remove(&umem_ida, umem->id);
kfree(umem);
return ERR_PTR(err);
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a03268454a27..6697084e3fdf 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -27,14 +27,10 @@
#include "xsk_queue.h"
#include "xdp_umem.h"
+#include "xsk.h"
#define TX_BATCH_SIZE 16
-static struct xdp_sock *xdp_sk(struct sock *sk)
-{
- return (struct xdp_sock *)sk;
-}
-
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
@@ -350,6 +346,10 @@ static int xsk_release(struct socket *sock)
net = sock_net(sk);
+ mutex_lock(&net->xdp.lock);
+ sk_del_node_init_rcu(sk);
+ mutex_unlock(&net->xdp.lock);
+
local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
local_bh_enable();
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock)
xskq_destroy(xs->rx);
xskq_destroy(xs->tx);
- xdp_put_umem(xs->umem);
sock_orphan(sk);
sock->sk = NULL;
@@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
if (!umem)
return -EINVAL;
+ /* Matches the smp_wmb() in XDP_UMEM_REG */
+ smp_rmb();
if (offset == XDP_UMEM_PGOFF_FILL_RING)
q = READ_ONCE(umem->fq);
else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
@@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
if (!q)
return -EINVAL;
+ /* Matches the smp_wmb() in xsk_init_queue */
+ smp_rmb();
qpg = virt_to_head_page(q->ring);
if (size > (PAGE_SIZE << compound_order(qpg)))
return -EINVAL;
@@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = {
.sendpage = sock_no_sendpage,
};
+static void xsk_destruct(struct sock *sk)
+{
+ struct xdp_sock *xs = xdp_sk(sk);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ return;
+
+ xdp_put_umem(xs->umem);
+
+ sk_refcnt_debug_dec(sk);
+}
+
static int xsk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
@@ -740,12 +755,19 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_family = PF_XDP;
+ sk->sk_destruct = xsk_destruct;
+ sk_refcnt_debug_inc(sk);
+
sock_set_flag(sk, SOCK_RCU_FREE);
xs = xdp_sk(sk);
mutex_init(&xs->mutex);
spin_lock_init(&xs->tx_completion_lock);
+ mutex_lock(&net->xdp.lock);
+ sk_add_node_rcu(sk, &net->xdp.list);
+ mutex_unlock(&net->xdp.lock);
+
local_bh_disable();
sock_prot_inuse_add(net, &xsk_proto, 1);
local_bh_enable();
@@ -759,6 +781,23 @@ static const struct net_proto_family xsk_family_ops = {
.owner = THIS_MODULE,
};
+static int __net_init xsk_net_init(struct net *net)
+{
+ mutex_init(&net->xdp.lock);
+ INIT_HLIST_HEAD(&net->xdp.list);
+ return 0;
+}
+
+static void __net_exit xsk_net_exit(struct net *net)
+{
+ WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
+}
+
+static struct pernet_operations xsk_net_ops = {
+ .init = xsk_net_init,
+ .exit = xsk_net_exit,
+};
+
static int __init xsk_init(void)
{
int err;
@@ -771,8 +810,13 @@ static int __init xsk_init(void)
if (err)
goto out_proto;
+ err = register_pernet_subsys(&xsk_net_ops);
+ if (err)
+ goto out_sk;
return 0;
+out_sk:
+ sock_unregister(PF_XDP);
out_proto:
proto_unregister(&xsk_proto);
out:
diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h
new file mode 100644
index 000000000000..ba8120610426
--- /dev/null
+++ b/net/xdp/xsk.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. */
+
+#ifndef XSK_H_
+#define XSK_H_
+
+static inline struct xdp_sock *xdp_sk(struct sock *sk)
+{
+ return (struct xdp_sock *)sk;
+}
+
+#endif /* XSK_H_ */
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
new file mode 100644
index 000000000000..661d007c3b28
--- /dev/null
+++ b/net/xdp/xsk_diag.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/* XDP sockets monitoring support
+ *
+ * Copyright(c) 2019 Intel Corporation.
+ *
+ * Author: Björn Töpel <bjorn.topel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <net/xdp_sock.h>
+#include <linux/xdp_diag.h>
+#include <linux/sock_diag.h>
+
+#include "xsk_queue.h"
+#include "xsk.h"
+
+static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
+{
+ struct xdp_diag_info di = {};
+
+ di.ifindex = xs->dev ? xs->dev->ifindex : 0;
+ di.queue_id = xs->queue_id;
+ return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
+}
+
+static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
+ struct sk_buff *nlskb)
+{
+ struct xdp_diag_ring dr = {};
+
+ dr.entries = queue->nentries;
+ return nla_put(nlskb, nl_type, sizeof(dr), &dr);
+}
+
+static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
+ struct sk_buff *nlskb)
+{
+ int err = 0;
+
+ if (xs->rx)
+ err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
+ if (!err && xs->tx)
+ err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
+ return err;
+}
+
+static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
+{
+ struct xdp_umem *umem = xs->umem;
+ struct xdp_diag_umem du = {};
+ int err;
+
+ if (!umem)
+ return 0;
+
+ du.id = umem->id;
+ du.size = umem->size;
+ du.num_pages = umem->npgs;
+ du.chunk_size = (__u32)(~umem->chunk_mask + 1);
+ du.headroom = umem->headroom;
+ du.ifindex = umem->dev ? umem->dev->ifindex : 0;
+ du.queue_id = umem->queue_id;
+ du.flags = 0;
+ if (umem->zc)
+ du.flags |= XDP_DU_F_ZEROCOPY;
+ du.refs = refcount_read(&umem->users);
+
+ err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
+
+ if (!err && umem->fq)
+ err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_FILL_RING, nlskb);
+ if (!err && umem->cq) {
+ err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_COMPLETION_RING,
+ nlskb);
+ }
+ return err;
+}
+
+static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
+ struct xdp_diag_req *req,
+ struct user_namespace *user_ns,
+ u32 portid, u32 seq, u32 flags, int sk_ino)
+{
+ struct xdp_sock *xs = xdp_sk(sk);
+ struct xdp_diag_msg *msg;
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
+ flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ msg = nlmsg_data(nlh);
+ memset(msg, 0, sizeof(*msg));
+ msg->xdiag_family = AF_XDP;
+ msg->xdiag_type = sk->sk_type;
+ msg->xdiag_ino = sk_ino;
+ sock_diag_save_cookie(sk, msg->xdiag_cookie);
+
+ if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_INFO) &&
+ nla_put_u32(nlskb, XDP_DIAG_UID,
+ from_kuid_munged(user_ns, sock_i_uid(sk))))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
+ xsk_diag_put_rings_cfg(xs, nlskb))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_UMEM) &&
+ xsk_diag_put_umem(xs, nlskb))
+ goto out_nlmsg_trim;
+
+ if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
+ sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
+ goto out_nlmsg_trim;
+
+ nlmsg_end(nlskb, nlh);
+ return 0;
+
+out_nlmsg_trim:
+ nlmsg_cancel(nlskb, nlh);
+ return -EMSGSIZE;
+}
+
+static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
+{
+ struct xdp_diag_req *req = nlmsg_data(cb->nlh);
+ struct net *net = sock_net(nlskb->sk);
+ int num = 0, s_num = cb->args[0];
+ struct sock *sk;
+
+ mutex_lock(&net->xdp.lock);
+
+ sk_for_each(sk, &net->xdp.list) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
+ if (num++ < s_num)
+ continue;
+
+ if (xsk_diag_fill(sk, nlskb, req,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ sock_i_ino(sk)) < 0) {
+ num--;
+ break;
+ }
+ }
+
+ mutex_unlock(&net->xdp.lock);
+ cb->args[0] = num;
+ return nlskb->len;
+}
+
+static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
+{
+ struct netlink_dump_control c = { .dump = xsk_diag_dump };
+ int hdrlen = sizeof(struct xdp_diag_req);
+ struct net *net = sock_net(nlskb->sk);
+
+ if (nlmsg_len(hdr) < hdrlen)
+ return -EINVAL;
+
+ if (!(hdr->nlmsg_flags & NLM_F_DUMP))
+ return -EOPNOTSUPP;
+
+ return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
+}
+
+static const struct sock_diag_handler xsk_diag_handler = {
+ .family = AF_XDP,
+ .dump = xsk_diag_handler_dump,
+};
+
+static int __init xsk_diag_init(void)
+{
+ return sock_diag_register(&xsk_diag_handler);
+}
+
+static void __exit xsk_diag_exit(void)
+{
+ sock_diag_unregister(&xsk_diag_handler);
+}
+
+module_init(xsk_diag_init);
+module_exit(xsk_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 6be8c7df15bb..dbb3c1945b5c 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
int ifindex;
struct xfrm_if *xi;
- if (!skb->dev)
+ if (!secpath_exists(skb) || !skb->dev)
return NULL;
- xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
+ xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
ifindex = skb->dev->ifindex;
for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 934492bad8e0..8d1a898d0ba5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -680,16 +680,6 @@ static void xfrm_hash_resize(struct work_struct *work)
mutex_unlock(&hash_resize_mutex);
}
-static void xfrm_hash_reset_inexact_table(struct net *net)
-{
- struct xfrm_pol_inexact_bin *b;
-
- lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
-
- list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins)
- INIT_HLIST_HEAD(&b->hhead);
-}
-
/* Make sure *pol can be inserted into fastbin.
* Useful to check that later insert requests will be sucessful
* (provided xfrm_policy_lock is held throughout).
@@ -833,13 +823,13 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
u16 family)
{
unsigned int matched_s, matched_d;
- struct hlist_node *newpos = NULL;
struct xfrm_policy *policy, *p;
matched_s = 0;
matched_d = 0;
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ struct hlist_node *newpos = NULL;
bool matches_s, matches_d;
if (!policy->bydst_reinsert)
@@ -849,16 +839,19 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
policy->bydst_reinsert = false;
hlist_for_each_entry(p, &n->hhead, bydst) {
- if (policy->priority >= p->priority)
+ if (policy->priority > p->priority)
+ newpos = &p->bydst;
+ else if (policy->priority == p->priority &&
+ policy->pos > p->pos)
newpos = &p->bydst;
else
break;
}
if (newpos)
- hlist_add_behind(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, newpos);
else
- hlist_add_head(&policy->bydst, &n->hhead);
+ hlist_add_head_rcu(&policy->bydst, &n->hhead);
/* paranoia checks follow.
* Check that the reinserted policy matches at least
@@ -893,12 +886,13 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
struct rb_root *new,
u16 family)
{
- struct rb_node **p, *parent = NULL;
struct xfrm_pol_inexact_node *node;
+ struct rb_node **p, *parent;
/* we should not have another subtree here */
WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
-
+restart:
+ parent = NULL;
p = &new->rb_node;
while (*p) {
u8 prefixlen;
@@ -918,12 +912,11 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
} else {
struct xfrm_policy *tmp;
- hlist_for_each_entry(tmp, &node->hhead, bydst)
- tmp->bydst_reinsert = true;
- hlist_for_each_entry(tmp, &n->hhead, bydst)
+ hlist_for_each_entry(tmp, &n->hhead, bydst) {
tmp->bydst_reinsert = true;
+ hlist_del_rcu(&tmp->bydst);
+ }
- INIT_HLIST_HEAD(&node->hhead);
xfrm_policy_inexact_list_reinsert(net, node, family);
if (node->prefixlen == n->prefixlen) {
@@ -935,8 +928,7 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
kfree_rcu(n, rcu);
n = node;
n->prefixlen = prefixlen;
- *p = new->rb_node;
- parent = NULL;
+ goto restart;
}
}
@@ -965,12 +957,11 @@ static void xfrm_policy_inexact_node_merge(struct net *net,
family);
}
- hlist_for_each_entry(tmp, &v->hhead, bydst)
- tmp->bydst_reinsert = true;
- hlist_for_each_entry(tmp, &n->hhead, bydst)
+ hlist_for_each_entry(tmp, &v->hhead, bydst) {
tmp->bydst_reinsert = true;
+ hlist_del_rcu(&tmp->bydst);
+ }
- INIT_HLIST_HEAD(&n->hhead);
xfrm_policy_inexact_list_reinsert(net, n, family);
}
@@ -1235,6 +1226,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ write_seqcount_begin(&xfrm_policy_hash_generation);
/* make sure that we can insert the indirect policies again before
* we start with destructive action.
@@ -1278,10 +1270,14 @@ static void xfrm_hash_rebuild(struct work_struct *work)
}
/* reset the bydst and inexact table in all directions */
- xfrm_hash_reset_inexact_table(net);
-
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
- INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(policy, n,
+ &net->xfrm.policy_inexact[dir],
+ bydst_inexact_list)
+ hlist_del_init(&policy->bydst_inexact_list);
+
hmask = net->xfrm.policy_bydst[dir].hmask;
odst = net->xfrm.policy_bydst[dir].table;
for (i = hmask; i >= 0; i--)
@@ -1313,6 +1309,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
newpos = NULL;
chain = policy_hash_bysel(net, &policy->selector,
policy->family, dir);
+
+ hlist_del_rcu(&policy->bydst);
+
if (!chain) {
void *p = xfrm_policy_inexact_insert(policy, dir, 0);
@@ -1334,6 +1333,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
out_unlock:
__xfrm_policy_inexact_flush(net);
+ write_seqcount_end(&xfrm_policy_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
mutex_unlock(&hash_resize_mutex);
@@ -2600,7 +2600,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst_copy_metrics(dst1, dst);
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
- __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
+ __u32 mark = 0;
+
+ if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
+ mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
family = xfrm[i]->props.family;
dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
@@ -3311,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (ifcb) {
xi = ifcb->decode_session(skb);
- if (xi)
+ if (xi) {
if_id = xi->p.if_id;
+ net = xi->net;
+ }
}
rcu_read_unlock();
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 23c92891758a..1bb971f46fc6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_free);
-static void xfrm_state_gc_destroy(struct xfrm_state *x)
+static void ___xfrm_state_destroy(struct xfrm_state *x)
{
tasklet_hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
synchronize_rcu();
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
- xfrm_state_gc_destroy(x);
+ ___xfrm_state_destroy(x);
}
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
}
EXPORT_SYMBOL(xfrm_state_alloc);
-void __xfrm_state_destroy(struct xfrm_state *x)
+void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
- spin_lock_bh(&xfrm_state_gc_lock);
- hlist_add_head(&x->gclist, &xfrm_state_gc_list);
- spin_unlock_bh(&xfrm_state_gc_lock);
- schedule_work(&xfrm_state_gc_work);
+ if (sync) {
+ synchronize_rcu();
+ ___xfrm_state_destroy(x);
+ } else {
+ spin_lock_bh(&xfrm_state_gc_lock);
+ hlist_add_head(&x->gclist, &xfrm_state_gc_list);
+ spin_unlock_bh(&xfrm_state_gc_lock);
+ schedule_work(&xfrm_state_gc_work);
+ }
}
EXPORT_SYMBOL(__xfrm_state_destroy);
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
}
#endif
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
{
int i, err = 0, cnt = 0;
@@ -730,7 +735,10 @@ restart:
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
- xfrm_state_put(x);
+ if (sync)
+ xfrm_state_put_sync(x);
+ else
+ xfrm_state_put(x);
if (!err)
cnt++;
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
if (atomic_read(&t->tunnel_users) == 2)
xfrm_state_delete(t);
atomic_dec(&t->tunnel_users);
- xfrm_state_put(t);
+ xfrm_state_put_sync(t);
x->tunnel = NULL;
}
}
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net)
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
flush_work(&xfrm_state_gc_work);
+ xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 277c1c46fe94..a131f9ff979e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
if (!ut[i].family)
ut[i].family = family;
- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
- (ut[i].family != prev_family))
- return -EINVAL;
-
+ switch (ut[i].mode) {
+ case XFRM_MODE_TUNNEL:
+ case XFRM_MODE_BEET:
+ break;
+ default:
+ if (ut[i].family != prev_family)
+ return -EINVAL;
+ break;
+ }
if (ut[i].mode >= XFRM_MODE_MAX)
return -EINVAL;
@@ -1927,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_usersa_flush *p = nlmsg_data(nlh);
int err;
- err = xfrm_state_flush(net, p->proto, true);
+ err = xfrm_state_flush(net, p->proto, true, false);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 8ae4940025f8..dbb817dbacfc 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -1,7 +1,6 @@
cpustat
fds_example
lathist
-load_sock_ops
lwt_len_hist
map_perf_test
offwaketime
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 66ae15f27c70..65e667bdf979 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -40,7 +40,6 @@ hostprogs-y += lwt_len_hist
hostprogs-y += xdp_tx_iptunnel
hostprogs-y += test_map_in_map
hostprogs-y += per_socket_stats_example
-hostprogs-y += load_sock_ops
hostprogs-y += xdp_redirect
hostprogs-y += xdp_redirect_map
hostprogs-y += xdp_redirect_cpu
@@ -53,6 +52,7 @@ hostprogs-y += xdpsock
hostprogs-y += xdp_fwd
hostprogs-y += task_fd_query
hostprogs-y += xdp_sample_pkts
+hostprogs-y += hbm
# Libbpf dependencies
LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
@@ -60,9 +60,9 @@ LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o
TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o
-fds_example-objs := bpf_load.o fds_example.o
-sockex1-objs := bpf_load.o sockex1_user.o
-sockex2-objs := bpf_load.o sockex2_user.o
+fds_example-objs := fds_example.o
+sockex1-objs := sockex1_user.o
+sockex2-objs := sockex2_user.o
sockex3-objs := bpf_load.o sockex3_user.o
tracex1-objs := bpf_load.o tracex1_user.o
tracex2-objs := bpf_load.o tracex2_user.o
@@ -71,7 +71,6 @@ tracex4-objs := bpf_load.o tracex4_user.o
tracex5-objs := bpf_load.o tracex5_user.o
tracex6-objs := bpf_load.o tracex6_user.o
tracex7-objs := bpf_load.o tracex7_user.o
-load_sock_ops-objs := bpf_load.o load_sock_ops.o
test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
trace_output-objs := bpf_load.o trace_output_user.o $(TRACE_HELPERS)
lathist-objs := bpf_load.o lathist_user.o
@@ -87,18 +86,18 @@ test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o
xdp1-objs := xdp1_user.o
# reuse xdp1 source intentionally
xdp2-objs := xdp1_user.o
-xdp_router_ipv4-objs := bpf_load.o xdp_router_ipv4_user.o
+xdp_router_ipv4-objs := xdp_router_ipv4_user.o
test_current_task_under_cgroup-objs := bpf_load.o $(CGROUP_HELPERS) \
test_current_task_under_cgroup_user.o
trace_event-objs := bpf_load.o trace_event_user.o $(TRACE_HELPERS)
sampleip-objs := bpf_load.o sampleip_user.o $(TRACE_HELPERS)
tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
-xdp_tx_iptunnel-objs := bpf_load.o xdp_tx_iptunnel_user.o
+xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
per_socket_stats_example-objs := cookie_uid_helper_example.o
-xdp_redirect-objs := bpf_load.o xdp_redirect_user.o
-xdp_redirect_map-objs := bpf_load.o xdp_redirect_map_user.o
+xdp_redirect-objs := xdp_redirect_user.o
+xdp_redirect_map-objs := xdp_redirect_map_user.o
xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
xdp_rxq_info-objs := xdp_rxq_info_user.o
@@ -109,6 +108,7 @@ xdpsock-objs := xdpsock_user.o
xdp_fwd-objs := xdp_fwd_user.o
task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
+hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS)
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -163,10 +163,10 @@ always += xdp2skb_meta_kern.o
always += syscall_tp_kern.o
always += cpustat_kern.o
always += xdp_adjust_tail_kern.o
-always += xdpsock_kern.o
always += xdp_fwd_kern.o
always += task_fd_query_kern.o
always += xdp_sample_pkts_kern.o
+always += hbm_out_kern.o
KBUILD_HOSTCFLAGS += -I$(objtree)/usr/include
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/
@@ -266,6 +266,8 @@ $(BPF_SAMPLES_PATH)/*.c: verify_target_bpf $(LIBBPF)
$(src)/*.c: verify_target_bpf $(LIBBPF)
$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
+$(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
+$(obj)/hbm.o: $(src)/hbm.h
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
@@ -279,6 +281,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
new file mode 100644
index 000000000000..5cd7c1d1a5d5
--- /dev/null
+++ b/samples/bpf/asm_goto_workaround.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+#ifndef __ASM_GOTO_WORKAROUND_H
+#define __ASM_GOTO_WORKAROUND_H
+
+/* this will bring in asm_volatile_goto macro definition
+ * if enabled by compiler and config options.
+ */
+#include <linux/types.h>
+
+#ifdef asm_volatile_goto
+#undef asm_volatile_goto
+#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
+#endif
+
+#endif
diff --git a/samples/bpf/bpf_insn.h b/samples/bpf/bpf_insn.h
index 20dc5cefec84..544237980582 100644
--- a/samples/bpf/bpf_insn.h
+++ b/samples/bpf/bpf_insn.h
@@ -164,6 +164,16 @@ struct bpf_insn;
.off = OFF, \
.imm = 0 })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
@@ -174,6 +184,16 @@ struct bpf_insn;
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Raw code statement block */
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
diff --git a/samples/bpf/do_hbm_test.sh b/samples/bpf/do_hbm_test.sh
new file mode 100755
index 000000000000..56c8b4115c95
--- /dev/null
+++ b/samples/bpf/do_hbm_test.sh
@@ -0,0 +1,436 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2019 Facebook
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of version 2 of the GNU General Public
+# License as published by the Free Software Foundation.
+
+Usage() {
+ echo "Script for testing HBM (Host Bandwidth Manager) framework."
+ echo "It creates a cgroup to use for testing and load a BPF program to limit"
+ echo "egress or ingress bandwidht. It then uses iperf3 or netperf to create"
+ echo "loads. The output is the goodput in Mbps (unless -D was used)."
+ echo ""
+ echo "USAGE: $name [out] [-b=<prog>|--bpf=<prog>] [-c=<cc>|--cc=<cc>] [-D]"
+ echo " [-d=<delay>|--delay=<delay>] [--debug] [-E]"
+ echo " [-f=<#flows>|--flows=<#flows>] [-h] [-i=<id>|--id=<id >]"
+ echo " [-l] [-N] [-p=<port>|--port=<port>] [-P]"
+ echo " [-q=<qdisc>] [-R] [-s=<server>|--server=<server]"
+ echo " [-S|--stats] -t=<time>|--time=<time>] [-w] [cubic|dctcp]"
+ echo " Where:"
+ echo " out egress (default)"
+ echo " -b or --bpf BPF program filename to load and attach."
+ echo " Default is hbm_out_kern.o for egress,"
+ echo " -c or -cc TCP congestion control (cubic or dctcp)"
+ echo " --debug print BPF trace buffer"
+ echo " -d or --delay add a delay in ms using netem"
+ echo " -D In addition to the goodput in Mbps, it also outputs"
+ echo " other detailed information. This information is"
+ echo " test dependent (i.e. iperf3 or netperf)."
+ echo " -E enable ECN (not required for dctcp)"
+ echo " -f or --flows number of concurrent flows (default=1)"
+ echo " -i or --id cgroup id (an integer, default is 1)"
+ echo " -N use netperf instead of iperf3"
+ echo " -l do not limit flows using loopback"
+ echo " -h Help"
+ echo " -p or --port iperf3 port (default is 5201)"
+ echo " -P use an iperf3 instance for each flow"
+ echo " -q use the specified qdisc"
+ echo " -r or --rate rate in Mbps (default 1s 1Gbps)"
+ echo " -R Use TCP_RR for netperf. 1st flow has req"
+ echo " size of 10KB, rest of 1MB. Reply in all"
+ echo " cases is 1 byte."
+ echo " More detailed output for each flow can be found"
+ echo " in the files netperf.<cg>.<flow>, where <cg> is the"
+ echo " cgroup id as specified with the -i flag, and <flow>"
+ echo " is the flow id starting at 1 and increasing by 1 for"
+ echo " flow (as specified by -f)."
+ echo " -s or --server hostname of netperf server. Used to create netperf"
+ echo " test traffic between to hosts (default is within host)"
+ echo " netserver must be running on the host."
+ echo " -S or --stats whether to update hbm stats (default is yes)."
+ echo " -t or --time duration of iperf3 in seconds (default=5)"
+ echo " -w Work conserving flag. cgroup can increase its"
+ echo " bandwidth beyond the rate limit specified"
+ echo " while there is available bandwidth. Current"
+ echo " implementation assumes there is only one NIC"
+ echo " (eth0), but can be extended to support multiple"
+ echo " NICs."
+ echo " cubic or dctcp specify which TCP CC to use"
+ echo " "
+ exit
+}
+
+#set -x
+
+debug_flag=0
+args="$@"
+name="$0"
+netem=0
+cc=x
+dir="-o"
+dir_name="out"
+dur=5
+flows=1
+id=1
+prog=""
+port=5201
+rate=1000
+multi_iperf=0
+flow_cnt=1
+use_netperf=0
+rr=0
+ecn=0
+details=0
+server=""
+qdisc=""
+flags=""
+do_stats=0
+
+function start_hbm () {
+ rm -f hbm.out
+ echo "./hbm $dir -n $id -r $rate -t $dur $flags $dbg $prog" > hbm.out
+ echo " " >> hbm.out
+ ./hbm $dir -n $id -r $rate -t $dur $flags $dbg $prog >> hbm.out 2>&1 &
+ echo $!
+}
+
+processArgs () {
+ for i in $args ; do
+ case $i in
+ # Support for upcomming ingress rate limiting
+ #in) # support for upcoming ingress rate limiting
+ # dir="-i"
+ # dir_name="in"
+ # ;;
+ out)
+ dir="-o"
+ dir_name="out"
+ ;;
+ -b=*|--bpf=*)
+ prog="${i#*=}"
+ ;;
+ -c=*|--cc=*)
+ cc="${i#*=}"
+ ;;
+ --debug)
+ flags="$flags -d"
+ debug_flag=1
+ ;;
+ -d=*|--delay=*)
+ netem="${i#*=}"
+ ;;
+ -D)
+ details=1
+ ;;
+ -E)
+ ecn=1
+ ;;
+ # Support for upcomming fq Early Departure Time egress rate limiting
+ #--edt)
+ # prog="hbm_out_edt_kern.o"
+ # qdisc="fq"
+ # ;;
+ -f=*|--flows=*)
+ flows="${i#*=}"
+ ;;
+ -i=*|--id=*)
+ id="${i#*=}"
+ ;;
+ -l)
+ flags="$flags -l"
+ ;;
+ -N)
+ use_netperf=1
+ ;;
+ -p=*|--port=*)
+ port="${i#*=}"
+ ;;
+ -P)
+ multi_iperf=1
+ ;;
+ -q=*)
+ qdisc="${i#*=}"
+ ;;
+ -r=*|--rate=*)
+ rate="${i#*=}"
+ ;;
+ -R)
+ rr=1
+ ;;
+ -s=*|--server=*)
+ server="${i#*=}"
+ ;;
+ -S|--stats)
+ flags="$flags -s"
+ do_stats=1
+ ;;
+ -t=*|--time=*)
+ dur="${i#*=}"
+ ;;
+ -w)
+ flags="$flags -w"
+ ;;
+ cubic)
+ cc=cubic
+ ;;
+ dctcp)
+ cc=dctcp
+ ;;
+ *)
+ echo "Unknown arg:$i"
+ Usage
+ ;;
+ esac
+ done
+}
+
+processArgs
+
+if [ $debug_flag -eq 1 ] ; then
+ rm -f hbm_out.log
+fi
+
+hbm_pid=$(start_hbm)
+usleep 100000
+
+host=`hostname`
+cg_base_dir=/sys/fs/cgroup
+cg_dir="$cg_base_dir/cgroup-test-work-dir/hbm$id"
+
+echo $$ >> $cg_dir/cgroup.procs
+
+ulimit -l unlimited
+
+rm -f ss.out
+rm -f hbm.[0-9]*.$dir_name
+if [ $ecn -ne 0 ] ; then
+ sysctl -w -q -n net.ipv4.tcp_ecn=1
+fi
+
+if [ $use_netperf -eq 0 ] ; then
+ cur_cc=`sysctl -n net.ipv4.tcp_congestion_control`
+ if [ "$cc" != "x" ] ; then
+ sysctl -w -q -n net.ipv4.tcp_congestion_control=$cc
+ fi
+fi
+
+if [ "$netem" -ne "0" ] ; then
+ if [ "$qdisc" != "" ] ; then
+ echo "WARNING: Ignoring -q options because -d option used"
+ fi
+ tc qdisc del dev lo root > /dev/null 2>&1
+ tc qdisc add dev lo root netem delay $netem\ms > /dev/null 2>&1
+elif [ "$qdisc" != "" ] ; then
+ tc qdisc del dev lo root > /dev/null 2>&1
+ tc qdisc add dev lo root $qdisc > /dev/null 2>&1
+fi
+
+n=0
+m=$[$dur * 5]
+hn="::1"
+if [ $use_netperf -ne 0 ] ; then
+ if [ "$server" != "" ] ; then
+ hn=$server
+ fi
+fi
+
+( ping6 -i 0.2 -c $m $hn > ping.out 2>&1 ) &
+
+if [ $use_netperf -ne 0 ] ; then
+ begNetserverPid=`ps ax | grep netserver | grep --invert-match "grep" | \
+ awk '{ print $1 }'`
+ if [ "$begNetserverPid" == "" ] ; then
+ if [ "$server" == "" ] ; then
+ ( ./netserver > /dev/null 2>&1) &
+ usleep 100000
+ fi
+ fi
+ flow_cnt=1
+ if [ "$server" == "" ] ; then
+ np_server=$host
+ else
+ np_server=$server
+ fi
+ if [ "$cc" == "x" ] ; then
+ np_cc=""
+ else
+ np_cc="-K $cc,$cc"
+ fi
+ replySize=1
+ while [ $flow_cnt -le $flows ] ; do
+ if [ $rr -ne 0 ] ; then
+ reqSize=1M
+ if [ $flow_cnt -eq 1 ] ; then
+ reqSize=10K
+ fi
+ if [ "$dir" == "-i" ] ; then
+ replySize=$reqSize
+ reqSize=1
+ fi
+ ( ./netperf -H $np_server -l $dur -f m -j -t TCP_RR -- -r $reqSize,$replySize $np_cc -k P50_lATENCY,P90_LATENCY,LOCAL_TRANSPORT_RETRANS,REMOTE_TRANSPORT_RETRANS,LOCAL_SEND_THROUGHPUT,LOCAL_RECV_THROUGHPUT,REQUEST_SIZE,RESPONSE_SIZE > netperf.$id.$flow_cnt ) &
+ else
+ if [ "$dir" == "-i" ] ; then
+ ( ./netperf -H $np_server -l $dur -f m -j -t TCP_RR -- -r 1,10M $np_cc -k P50_LATENCY,P90_LATENCY,LOCAL_TRANSPORT_RETRANS,LOCAL_SEND_THROUGHPUT,REMOTE_TRANSPORT_RETRANS,REMOTE_SEND_THROUGHPUT,REQUEST_SIZE,RESPONSE_SIZE > netperf.$id.$flow_cnt ) &
+ else
+ ( ./netperf -H $np_server -l $dur -f m -j -t TCP_STREAM -- $np_cc -k P50_lATENCY,P90_LATENCY,LOCAL_TRANSPORT_RETRANS,LOCAL_SEND_THROUGHPUT,REQUEST_SIZE,RESPONSE_SIZE > netperf.$id.$flow_cnt ) &
+ fi
+ fi
+ flow_cnt=$[flow_cnt+1]
+ done
+
+# sleep for duration of test (plus some buffer)
+ n=$[dur+2]
+ sleep $n
+
+# force graceful termination of netperf
+ pids=`pgrep netperf`
+ for p in $pids ; do
+ kill -SIGALRM $p
+ done
+
+ flow_cnt=1
+ rate=0
+ if [ $details -ne 0 ] ; then
+ echo ""
+ echo "Details for HBM in cgroup $id"
+ if [ $do_stats -eq 1 ] ; then
+ if [ -e hbm.$id.$dir_name ] ; then
+ cat hbm.$id.$dir_name
+ fi
+ fi
+ fi
+ while [ $flow_cnt -le $flows ] ; do
+ if [ "$dir" == "-i" ] ; then
+ r=`cat netperf.$id.$flow_cnt | grep -o "REMOTE_SEND_THROUGHPUT=[0-9]*" | grep -o "[0-9]*"`
+ else
+ r=`cat netperf.$id.$flow_cnt | grep -o "LOCAL_SEND_THROUGHPUT=[0-9]*" | grep -o "[0-9]*"`
+ fi
+ echo "rate for flow $flow_cnt: $r"
+ rate=$[rate+r]
+ if [ $details -ne 0 ] ; then
+ echo "-----"
+ echo "Details for cgroup $id, flow $flow_cnt"
+ cat netperf.$id.$flow_cnt
+ fi
+ flow_cnt=$[flow_cnt+1]
+ done
+ if [ $details -ne 0 ] ; then
+ echo ""
+ delay=`grep "avg" ping.out | grep -o "= [0-9.]*/[0-9.]*" | grep -o "[0-9.]*$"`
+ echo "PING AVG DELAY:$delay"
+ echo "AGGREGATE_GOODPUT:$rate"
+ else
+ echo $rate
+ fi
+elif [ $multi_iperf -eq 0 ] ; then
+ (iperf3 -s -p $port -1 > /dev/null 2>&1) &
+ usleep 100000
+ iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id
+ rates=`grep receiver iperf.$id | grep -o "[0-9.]* Mbits" | grep -o "^[0-9]*"`
+ rate=`echo $rates | grep -o "[0-9]*$"`
+
+ if [ $details -ne 0 ] ; then
+ echo ""
+ echo "Details for HBM in cgroup $id"
+ if [ $do_stats -eq 1 ] ; then
+ if [ -e hbm.$id.$dir_name ] ; then
+ cat hbm.$id.$dir_name
+ fi
+ fi
+ delay=`grep "avg" ping.out | grep -o "= [0-9.]*/[0-9.]*" | grep -o "[0-9.]*$"`
+ echo "PING AVG DELAY:$delay"
+ echo "AGGREGATE_GOODPUT:$rate"
+ else
+ echo $rate
+ fi
+else
+ flow_cnt=1
+ while [ $flow_cnt -le $flows ] ; do
+ (iperf3 -s -p $port -1 > /dev/null 2>&1) &
+ ( iperf3 -c $host -p $port -i 0 -P 1 -f m -t $dur | grep receiver | grep -o "[0-9.]* Mbits" | grep -o "^[0-9]*" | grep -o "[0-9]*$" > iperf3.$id.$flow_cnt ) &
+ port=$[port+1]
+ flow_cnt=$[flow_cnt+1]
+ done
+ n=$[dur+1]
+ sleep $n
+ flow_cnt=1
+ rate=0
+ if [ $details -ne 0 ] ; then
+ echo ""
+ echo "Details for HBM in cgroup $id"
+ if [ $do_stats -eq 1 ] ; then
+ if [ -e hbm.$id.$dir_name ] ; then
+ cat hbm.$id.$dir_name
+ fi
+ fi
+ fi
+
+ while [ $flow_cnt -le $flows ] ; do
+ r=`cat iperf3.$id.$flow_cnt`
+# echo "rate for flow $flow_cnt: $r"
+ if [ $details -ne 0 ] ; then
+ echo "Rate for cgroup $id, flow $flow_cnt LOCAL_SEND_THROUGHPUT=$r"
+ fi
+ rate=$[rate+r]
+ flow_cnt=$[flow_cnt+1]
+ done
+ if [ $details -ne 0 ] ; then
+ delay=`grep "avg" ping.out | grep -o "= [0-9.]*/[0-9.]*" | grep -o "[0-9.]*$"`
+ echo "PING AVG DELAY:$delay"
+ echo "AGGREGATE_GOODPUT:$rate"
+ else
+ echo $rate
+ fi
+fi
+
+if [ $use_netperf -eq 0 ] ; then
+ sysctl -w -q -n net.ipv4.tcp_congestion_control=$cur_cc
+fi
+if [ $ecn -ne 0 ] ; then
+ sysctl -w -q -n net.ipv4.tcp_ecn=0
+fi
+if [ "$netem" -ne "0" ] ; then
+ tc qdisc del dev lo root > /dev/null 2>&1
+fi
+
+sleep 2
+
+hbmPid=`ps ax | grep "hbm " | grep --invert-match "grep" | awk '{ print $1 }'`
+if [ "$hbmPid" == "$hbm_pid" ] ; then
+ kill $hbm_pid
+fi
+
+sleep 1
+
+# Detach any BPF programs that may have lingered
+ttx=`bpftool cgroup tree | grep hbm`
+v=2
+for x in $ttx ; do
+ if [ "${x:0:36}" == "/sys/fs/cgroup/cgroup-test-work-dir/" ] ; then
+ cg=$x ; v=0
+ else
+ if [ $v -eq 0 ] ; then
+ id=$x ; v=1
+ else
+ if [ $v -eq 1 ] ; then
+ type=$x ; bpftool cgroup detach $cg $type id $id
+ v=0
+ fi
+ fi
+ fi
+done
+
+if [ $use_netperf -ne 0 ] ; then
+ if [ "$server" == "" ] ; then
+ if [ "$begNetserverPid" == "" ] ; then
+ netserverPid=`ps ax | grep netserver | grep --invert-match "grep" | awk '{ print $1 }'`
+ if [ "$netserverPid" != "" ] ; then
+ kill $netserverPid
+ fi
+ fi
+ fi
+fi
+exit
diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c
index 9854854f05d1..e51eb060244e 100644
--- a/samples/bpf/fds_example.c
+++ b/samples/bpf/fds_example.c
@@ -14,8 +14,8 @@
#include <bpf/bpf.h>
+#include "bpf/libbpf.h"
#include "bpf_insn.h"
-#include "bpf_load.h"
#include "sock_example.h"
#define BPF_F_PIN (1 << 0)
@@ -57,10 +57,14 @@ static int bpf_prog_create(const char *object)
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn);
+ char bpf_log_buf[BPF_LOG_BUF_SIZE];
+ struct bpf_object *obj;
+ int prog_fd;
if (object) {
- assert(!load_bpf_file((char *)object));
- return prog_fd[0];
+ assert(!bpf_prog_load(object, BPF_PROG_TYPE_UNSPEC,
+ &obj, &prog_fd));
+ return prog_fd;
} else {
return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER,
insns, insns_cnt, "GPL", 0,
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
new file mode 100644
index 000000000000..8408ccb7409f
--- /dev/null
+++ b/samples/bpf/hbm.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Example program for Host Bandwidth Managment
+ *
+ * This program loads a cgroup skb BPF program to enforce cgroup output
+ * (egress) or input (ingress) bandwidth limits.
+ *
+ * USAGE: hbm [-d] [-l] [-n <id>] [-r <rate>] [-s] [-t <secs>] [-w] [-h] [prog]
+ * Where:
+ * -d Print BPF trace debug buffer
+ * -l Also limit flows doing loopback
+ * -n <#> To create cgroup \"/hbm#\" and attach prog
+ * Default is /hbm1
+ * -r <rate> Rate limit in Mbps
+ * -s Get HBM stats (marked, dropped, etc.)
+ * -t <time> Exit after specified seconds (deault is 0)
+ * -w Work conserving flag. cgroup can increase its bandwidth
+ * beyond the rate limit specified while there is available
+ * bandwidth. Current implementation assumes there is only
+ * NIC (eth0), but can be extended to support multiple NICs.
+ * Currrently only supported for egress.
+ * -h Print this info
+ * prog BPF program file name. Name defaults to hbm_out_kern.o
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/unistd.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+
+#include "bpf_load.h"
+#include "bpf_rlimit.h"
+#include "cgroup_helpers.h"
+#include "hbm.h"
+#include "bpf_util.h"
+#include "bpf/bpf.h"
+#include "bpf/libbpf.h"
+
+bool outFlag = true;
+int minRate = 1000; /* cgroup rate limit in Mbps */
+int rate = 1000; /* can grow if rate conserving is enabled */
+int dur = 1;
+bool stats_flag;
+bool loopback_flag;
+bool debugFlag;
+bool work_conserving_flag;
+
+static void Usage(void);
+static void read_trace_pipe2(void);
+static void do_error(char *msg, bool errno_flag);
+
+#define DEBUGFS "/sys/kernel/debug/tracing/"
+
+struct bpf_object *obj;
+int bpfprog_fd;
+int cgroup_storage_fd;
+
+static void read_trace_pipe2(void)
+{
+ int trace_fd;
+ FILE *outf;
+ char *outFname = "hbm_out.log";
+
+ trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
+ if (trace_fd < 0) {
+ printf("Error opening trace_pipe\n");
+ return;
+ }
+
+// Future support of ingress
+// if (!outFlag)
+// outFname = "hbm_in.log";
+ outf = fopen(outFname, "w");
+
+ if (outf == NULL)
+ printf("Error creating %s\n", outFname);
+
+ while (1) {
+ static char buf[4097];
+ ssize_t sz;
+
+ sz = read(trace_fd, buf, sizeof(buf) - 1);
+ if (sz > 0) {
+ buf[sz] = 0;
+ puts(buf);
+ if (outf != NULL) {
+ fprintf(outf, "%s\n", buf);
+ fflush(outf);
+ }
+ }
+ }
+}
+
+static void do_error(char *msg, bool errno_flag)
+{
+ if (errno_flag)
+ printf("ERROR: %s, errno: %d\n", msg, errno);
+ else
+ printf("ERROR: %s\n", msg);
+ exit(1);
+}
+
+static int prog_load(char *prog)
+{
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .file = prog,
+ .expected_attach_type = BPF_CGROUP_INET_EGRESS,
+ };
+ int map_fd;
+ struct bpf_map *map;
+
+ int ret = 0;
+
+ if (access(prog, O_RDONLY) < 0) {
+ printf("Error accessing file %s: %s\n", prog, strerror(errno));
+ return 1;
+ }
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &bpfprog_fd))
+ ret = 1;
+ if (!ret) {
+ map = bpf_object__find_map_by_name(obj, "queue_stats");
+ map_fd = bpf_map__fd(map);
+ if (map_fd < 0) {
+ printf("Map not found: %s\n", strerror(map_fd));
+ ret = 1;
+ }
+ }
+
+ if (ret) {
+ printf("ERROR: load_bpf_file failed for: %s\n", prog);
+ printf(" Output from verifier:\n%s\n------\n", bpf_log_buf);
+ ret = -1;
+ } else {
+ ret = map_fd;
+ }
+
+ return ret;
+}
+
+static int run_bpf_prog(char *prog, int cg_id)
+{
+ int map_fd;
+ int rc = 0;
+ int key = 0;
+ int cg1 = 0;
+ int type = BPF_CGROUP_INET_EGRESS;
+ char cg_dir[100];
+ struct hbm_queue_stats qstats = {0};
+
+ sprintf(cg_dir, "/hbm%d", cg_id);
+ map_fd = prog_load(prog);
+ if (map_fd == -1)
+ return 1;
+
+ if (setup_cgroup_environment()) {
+ printf("ERROR: setting cgroup environment\n");
+ goto err;
+ }
+ cg1 = create_and_get_cgroup(cg_dir);
+ if (!cg1) {
+ printf("ERROR: create_and_get_cgroup\n");
+ goto err;
+ }
+ if (join_cgroup(cg_dir)) {
+ printf("ERROR: join_cgroup\n");
+ goto err;
+ }
+
+ qstats.rate = rate;
+ qstats.stats = stats_flag ? 1 : 0;
+ qstats.loopback = loopback_flag ? 1 : 0;
+ if (bpf_map_update_elem(map_fd, &key, &qstats, BPF_ANY)) {
+ printf("ERROR: Could not update map element\n");
+ goto err;
+ }
+
+ if (!outFlag)
+ type = BPF_CGROUP_INET_INGRESS;
+ if (bpf_prog_attach(bpfprog_fd, cg1, type, 0)) {
+ printf("ERROR: bpf_prog_attach fails!\n");
+ log_err("Attaching prog");
+ goto err;
+ }
+
+ if (work_conserving_flag) {
+ struct timeval t0, t_last, t_new;
+ FILE *fin;
+ unsigned long long last_eth_tx_bytes, new_eth_tx_bytes;
+ signed long long last_cg_tx_bytes, new_cg_tx_bytes;
+ signed long long delta_time, delta_bytes, delta_rate;
+ int delta_ms;
+#define DELTA_RATE_CHECK 10000 /* in us */
+#define RATE_THRESHOLD 9500000000 /* 9.5 Gbps */
+
+ bpf_map_lookup_elem(map_fd, &key, &qstats);
+ if (gettimeofday(&t0, NULL) < 0)
+ do_error("gettimeofday failed", true);
+ t_last = t0;
+ fin = fopen("/sys/class/net/eth0/statistics/tx_bytes", "r");
+ if (fscanf(fin, "%llu", &last_eth_tx_bytes) != 1)
+ do_error("fscanf fails", false);
+ fclose(fin);
+ last_cg_tx_bytes = qstats.bytes_total;
+ while (true) {
+ usleep(DELTA_RATE_CHECK);
+ if (gettimeofday(&t_new, NULL) < 0)
+ do_error("gettimeofday failed", true);
+ delta_ms = (t_new.tv_sec - t0.tv_sec) * 1000 +
+ (t_new.tv_usec - t0.tv_usec)/1000;
+ if (delta_ms > dur * 1000)
+ break;
+ delta_time = (t_new.tv_sec - t_last.tv_sec) * 1000000 +
+ (t_new.tv_usec - t_last.tv_usec);
+ if (delta_time == 0)
+ continue;
+ t_last = t_new;
+ fin = fopen("/sys/class/net/eth0/statistics/tx_bytes",
+ "r");
+ if (fscanf(fin, "%llu", &new_eth_tx_bytes) != 1)
+ do_error("fscanf fails", false);
+ fclose(fin);
+ printf(" new_eth_tx_bytes:%llu\n",
+ new_eth_tx_bytes);
+ bpf_map_lookup_elem(map_fd, &key, &qstats);
+ new_cg_tx_bytes = qstats.bytes_total;
+ delta_bytes = new_eth_tx_bytes - last_eth_tx_bytes;
+ last_eth_tx_bytes = new_eth_tx_bytes;
+ delta_rate = (delta_bytes * 8000000) / delta_time;
+ printf("%5d - eth_rate:%.1fGbps cg_rate:%.3fGbps",
+ delta_ms, delta_rate/1000000000.0,
+ rate/1000.0);
+ if (delta_rate < RATE_THRESHOLD) {
+ /* can increase cgroup rate limit, but first
+ * check if we are using the current limit.
+ * Currently increasing by 6.25%, unknown
+ * if that is the optimal rate.
+ */
+ int rate_diff100;
+
+ delta_bytes = new_cg_tx_bytes -
+ last_cg_tx_bytes;
+ last_cg_tx_bytes = new_cg_tx_bytes;
+ delta_rate = (delta_bytes * 8000000) /
+ delta_time;
+ printf(" rate:%.3fGbps",
+ delta_rate/1000000000.0);
+ rate_diff100 = (((long long)rate)*1000000 -
+ delta_rate) * 100 /
+ (((long long) rate) * 1000000);
+ printf(" rdiff:%d", rate_diff100);
+ if (rate_diff100 <= 3) {
+ rate += (rate >> 4);
+ if (rate > RATE_THRESHOLD / 1000000)
+ rate = RATE_THRESHOLD / 1000000;
+ qstats.rate = rate;
+ printf(" INC\n");
+ } else {
+ printf("\n");
+ }
+ } else {
+ /* Need to decrease cgroup rate limit.
+ * Currently decreasing by 12.5%, unknown
+ * if that is optimal
+ */
+ printf(" DEC\n");
+ rate -= (rate >> 3);
+ if (rate < minRate)
+ rate = minRate;
+ qstats.rate = rate;
+ }
+ if (bpf_map_update_elem(map_fd, &key, &qstats, BPF_ANY))
+ do_error("update map element fails", false);
+ }
+ } else {
+ sleep(dur);
+ }
+ // Get stats!
+ if (stats_flag && bpf_map_lookup_elem(map_fd, &key, &qstats)) {
+ char fname[100];
+ FILE *fout;
+
+ if (!outFlag)
+ sprintf(fname, "hbm.%d.in", cg_id);
+ else
+ sprintf(fname, "hbm.%d.out", cg_id);
+ fout = fopen(fname, "w");
+ fprintf(fout, "id:%d\n", cg_id);
+ fprintf(fout, "ERROR: Could not lookup queue_stats\n");
+ } else if (stats_flag && qstats.lastPacketTime >
+ qstats.firstPacketTime) {
+ long long delta_us = (qstats.lastPacketTime -
+ qstats.firstPacketTime)/1000;
+ unsigned int rate_mbps = ((qstats.bytes_total -
+ qstats.bytes_dropped) * 8 /
+ delta_us);
+ double percent_pkts, percent_bytes;
+ char fname[100];
+ FILE *fout;
+
+// Future support of ingress
+// if (!outFlag)
+// sprintf(fname, "hbm.%d.in", cg_id);
+// else
+ sprintf(fname, "hbm.%d.out", cg_id);
+ fout = fopen(fname, "w");
+ fprintf(fout, "id:%d\n", cg_id);
+ fprintf(fout, "rate_mbps:%d\n", rate_mbps);
+ fprintf(fout, "duration:%.1f secs\n",
+ (qstats.lastPacketTime - qstats.firstPacketTime) /
+ 1000000000.0);
+ fprintf(fout, "packets:%d\n", (int)qstats.pkts_total);
+ fprintf(fout, "bytes_MB:%d\n", (int)(qstats.bytes_total /
+ 1000000));
+ fprintf(fout, "pkts_dropped:%d\n", (int)qstats.pkts_dropped);
+ fprintf(fout, "bytes_dropped_MB:%d\n",
+ (int)(qstats.bytes_dropped /
+ 1000000));
+ // Marked Pkts and Bytes
+ percent_pkts = (qstats.pkts_marked * 100.0) /
+ (qstats.pkts_total + 1);
+ percent_bytes = (qstats.bytes_marked * 100.0) /
+ (qstats.bytes_total + 1);
+ fprintf(fout, "pkts_marked_percent:%6.2f\n", percent_pkts);
+ fprintf(fout, "bytes_marked_percent:%6.2f\n", percent_bytes);
+
+ // Dropped Pkts and Bytes
+ percent_pkts = (qstats.pkts_dropped * 100.0) /
+ (qstats.pkts_total + 1);
+ percent_bytes = (qstats.bytes_dropped * 100.0) /
+ (qstats.bytes_total + 1);
+ fprintf(fout, "pkts_dropped_percent:%6.2f\n", percent_pkts);
+ fprintf(fout, "bytes_dropped_percent:%6.2f\n", percent_bytes);
+ fclose(fout);
+ }
+
+ if (debugFlag)
+ read_trace_pipe2();
+ return rc;
+err:
+ rc = 1;
+
+ if (cg1)
+ close(cg1);
+ cleanup_cgroup_environment();
+
+ return rc;
+}
+
+static void Usage(void)
+{
+ printf("This program loads a cgroup skb BPF program to enforce\n"
+ "cgroup output (egress) bandwidth limits.\n\n"
+ "USAGE: hbm [-o] [-d] [-l] [-n <id>] [-r <rate>] [-s]\n"
+ " [-t <secs>] [-w] [-h] [prog]\n"
+ " Where:\n"
+ " -o indicates egress direction (default)\n"
+ " -d print BPF trace debug buffer\n"
+ " -l also limit flows using loopback\n"
+ " -n <#> to create cgroup \"/hbm#\" and attach prog\n"
+ " Default is /hbm1\n"
+ " -r <rate> Rate in Mbps\n"
+ " -s Update HBM stats\n"
+ " -t <time> Exit after specified seconds (deault is 0)\n"
+ " -w Work conserving flag. cgroup can increase\n"
+ " bandwidth beyond the rate limit specified\n"
+ " while there is available bandwidth. Current\n"
+ " implementation assumes there is only eth0\n"
+ " but can be extended to support multiple NICs\n"
+ " -h print this info\n"
+ " prog BPF program file name. Name defaults to\n"
+ " hbm_out_kern.o\n");
+}
+
+int main(int argc, char **argv)
+{
+ char *prog = "hbm_out_kern.o";
+ int k;
+ int cg_id = 1;
+ char *optstring = "iodln:r:st:wh";
+
+ while ((k = getopt(argc, argv, optstring)) != -1) {
+ switch (k) {
+ case'o':
+ break;
+ case 'd':
+ debugFlag = true;
+ break;
+ case 'l':
+ loopback_flag = true;
+ break;
+ case 'n':
+ cg_id = atoi(optarg);
+ break;
+ case 'r':
+ minRate = atoi(optarg) * 1.024;
+ rate = minRate;
+ break;
+ case 's':
+ stats_flag = true;
+ break;
+ case 't':
+ dur = atoi(optarg);
+ break;
+ case 'w':
+ work_conserving_flag = true;
+ break;
+ case '?':
+ if (optopt == 'n' || optopt == 'r' || optopt == 't')
+ fprintf(stderr,
+ "Option -%c requires an argument.\n\n",
+ optopt);
+ case 'h':
+ // fallthrough
+ default:
+ Usage();
+ return 0;
+ }
+ }
+
+ if (optind < argc)
+ prog = argv[optind];
+ printf("HBM prog: %s\n", prog != NULL ? prog : "NULL");
+
+ return run_bpf_prog(prog, cg_id);
+}
diff --git a/samples/bpf/hbm.h b/samples/bpf/hbm.h
new file mode 100644
index 000000000000..518e8147d084
--- /dev/null
+++ b/samples/bpf/hbm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Include file for Host Bandwidth Management (HBM) programs
+ */
+struct hbm_vqueue {
+ struct bpf_spin_lock lock;
+ /* 4 byte hole */
+ unsigned long long lasttime; /* In ns */
+ int credit; /* In bytes */
+ unsigned int rate; /* In bytes per NS << 20 */
+};
+
+struct hbm_queue_stats {
+ unsigned long rate; /* in Mbps*/
+ unsigned long stats:1, /* get HBM stats (marked, dropped,..) */
+ loopback:1; /* also limit flows using loopback */
+ unsigned long long pkts_marked;
+ unsigned long long bytes_marked;
+ unsigned long long pkts_dropped;
+ unsigned long long bytes_dropped;
+ unsigned long long pkts_total;
+ unsigned long long bytes_total;
+ unsigned long long firstPacketTime;
+ unsigned long long lastPacketTime;
+};
diff --git a/samples/bpf/hbm_kern.h b/samples/bpf/hbm_kern.h
new file mode 100644
index 000000000000..c5635d924193
--- /dev/null
+++ b/samples/bpf/hbm_kern.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Include file for sample Host Bandwidth Manager (HBM) BPF programs
+ */
+#define KBUILD_MODNAME "foo"
+#include <stddef.h>
+#include <stdbool.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
+#include <net/inet_ecn.h>
+#include "bpf_endian.h"
+#include "bpf_helpers.h"
+#include "hbm.h"
+
+#define DROP_PKT 0
+#define ALLOW_PKT 1
+#define TCP_ECN_OK 1
+
+#define HBM_DEBUG 0 // Set to 1 to enable debugging
+#if HBM_DEBUG
+#define bpf_printk(fmt, ...) \
+({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+#else
+#define bpf_printk(fmt, ...)
+#endif
+
+#define INITIAL_CREDIT_PACKETS 100
+#define MAX_BYTES_PER_PACKET 1500
+#define MARK_THRESH (40 * MAX_BYTES_PER_PACKET)
+#define DROP_THRESH (80 * 5 * MAX_BYTES_PER_PACKET)
+#define LARGE_PKT_DROP_THRESH (DROP_THRESH - (15 * MAX_BYTES_PER_PACKET))
+#define MARK_REGION_SIZE (LARGE_PKT_DROP_THRESH - MARK_THRESH)
+#define LARGE_PKT_THRESH 120
+#define MAX_CREDIT (100 * MAX_BYTES_PER_PACKET)
+#define INIT_CREDIT (INITIAL_CREDIT_PACKETS * MAX_BYTES_PER_PACKET)
+
+// rate in bytes per ns << 20
+#define CREDIT_PER_NS(delta, rate) ((((u64)(delta)) * (rate)) >> 20)
+
+struct bpf_map_def SEC("maps") queue_state = {
+ .type = BPF_MAP_TYPE_CGROUP_STORAGE,
+ .key_size = sizeof(struct bpf_cgroup_storage_key),
+ .value_size = sizeof(struct hbm_vqueue),
+};
+BPF_ANNOTATE_KV_PAIR(queue_state, struct bpf_cgroup_storage_key,
+ struct hbm_vqueue);
+
+struct bpf_map_def SEC("maps") queue_stats = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(struct hbm_queue_stats),
+ .max_entries = 1,
+};
+BPF_ANNOTATE_KV_PAIR(queue_stats, int, struct hbm_queue_stats);
+
+struct hbm_pkt_info {
+ bool is_ip;
+ bool is_tcp;
+ short ecn;
+};
+
+static __always_inline void hbm_get_pkt_info(struct __sk_buff *skb,
+ struct hbm_pkt_info *pkti)
+{
+ struct iphdr iph;
+ struct ipv6hdr *ip6h;
+
+ bpf_skb_load_bytes(skb, 0, &iph, 12);
+ if (iph.version == 6) {
+ ip6h = (struct ipv6hdr *)&iph;
+ pkti->is_ip = true;
+ pkti->is_tcp = (ip6h->nexthdr == 6);
+ pkti->ecn = (ip6h->flow_lbl[0] >> 4) & INET_ECN_MASK;
+ } else if (iph.version == 4) {
+ pkti->is_ip = true;
+ pkti->is_tcp = (iph.protocol == 6);
+ pkti->ecn = iph.tos & INET_ECN_MASK;
+ } else {
+ pkti->is_ip = false;
+ pkti->is_tcp = false;
+ pkti->ecn = 0;
+ }
+}
+
+static __always_inline void hbm_init_vqueue(struct hbm_vqueue *qdp, int rate)
+{
+ bpf_printk("Initializing queue_state, rate:%d\n", rate * 128);
+ qdp->lasttime = bpf_ktime_get_ns();
+ qdp->credit = INIT_CREDIT;
+ qdp->rate = rate * 128;
+}
+
+static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp,
+ int len,
+ unsigned long long curtime,
+ bool congestion_flag,
+ bool drop_flag)
+{
+ if (qsp != NULL) {
+ // Following is needed for work conserving
+ __sync_add_and_fetch(&(qsp->bytes_total), len);
+ if (qsp->stats) {
+ // Optionally update statistics
+ if (qsp->firstPacketTime == 0)
+ qsp->firstPacketTime = curtime;
+ qsp->lastPacketTime = curtime;
+ __sync_add_and_fetch(&(qsp->pkts_total), 1);
+ if (congestion_flag || drop_flag) {
+ __sync_add_and_fetch(&(qsp->pkts_marked), 1);
+ __sync_add_and_fetch(&(qsp->bytes_marked), len);
+ }
+ if (drop_flag) {
+ __sync_add_and_fetch(&(qsp->pkts_dropped), 1);
+ __sync_add_and_fetch(&(qsp->bytes_dropped),
+ len);
+ }
+ }
+ }
+}
diff --git a/samples/bpf/hbm_out_kern.c b/samples/bpf/hbm_out_kern.c
new file mode 100644
index 000000000000..f806863d0b79
--- /dev/null
+++ b/samples/bpf/hbm_out_kern.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Sample Host Bandwidth Manager (HBM) BPF program.
+ *
+ * A cgroup skb BPF egress program to limit cgroup output bandwidth.
+ * It uses a modified virtual token bucket queue to limit average
+ * egress bandwidth. The implementation uses credits instead of tokens.
+ * Negative credits imply that queueing would have happened (this is
+ * a virtual queue, so no queueing is done by it. However, queueing may
+ * occur at the actual qdisc (which is not used for rate limiting).
+ *
+ * This implementation uses 3 thresholds, one to start marking packets and
+ * the other two to drop packets:
+ * CREDIT
+ * - <--------------------------|------------------------> +
+ * | | | 0
+ * | Large pkt |
+ * | drop thresh |
+ * Small pkt drop Mark threshold
+ * thresh
+ *
+ * The effect of marking depends on the type of packet:
+ * a) If the packet is ECN enabled and it is a TCP packet, then the packet
+ * is ECN marked.
+ * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr
+ * to reduce the congestion window. The current implementation uses a linear
+ * distribution (0% probability at marking threshold, 100% probability
+ * at drop threshold).
+ * c) If the packet is not a TCP packet, then it is dropped.
+ *
+ * If the credit is below the drop threshold, the packet is dropped. If it
+ * is a TCP packet, then it also calls tcp_cwr since packets dropped by
+ * by a cgroup skb BPF program do not automatically trigger a call to
+ * tcp_cwr in the current kernel code.
+ *
+ * This BPF program actually uses 2 drop thresholds, one threshold
+ * for larger packets (>= 120 bytes) and another for smaller packets. This
+ * protects smaller packets such as SYNs, ACKs, etc.
+ *
+ * The default bandwidth limit is set at 1Gbps but this can be changed by
+ * a user program through a shared BPF map. In addition, by default this BPF
+ * program does not limit connections using loopback. This behavior can be
+ * overwritten by the user program. There is also an option to calculate
+ * some statistics, such as percent of packets marked or dropped, which
+ * the user program can access.
+ *
+ * A latter patch provides such a program (hbm.c)
+ */
+
+#include "hbm_kern.h"
+
+SEC("cgroup_skb/egress")
+int _hbm_out_cg(struct __sk_buff *skb)
+{
+ struct hbm_pkt_info pkti;
+ int len = skb->len;
+ unsigned int queue_index = 0;
+ unsigned long long curtime;
+ int credit;
+ signed long long delta = 0, zero = 0;
+ int max_credit = MAX_CREDIT;
+ bool congestion_flag = false;
+ bool drop_flag = false;
+ bool cwr_flag = false;
+ struct hbm_vqueue *qdp;
+ struct hbm_queue_stats *qsp = NULL;
+ int rv = ALLOW_PKT;
+
+ qsp = bpf_map_lookup_elem(&queue_stats, &queue_index);
+ if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
+ return ALLOW_PKT;
+
+ hbm_get_pkt_info(skb, &pkti);
+
+ // We may want to account for the length of headers in len
+ // calculation, like ETH header + overhead, specially if it
+ // is a gso packet. But I am not doing it right now.
+
+ qdp = bpf_get_local_storage(&queue_state, 0);
+ if (!qdp)
+ return ALLOW_PKT;
+ else if (qdp->lasttime == 0)
+ hbm_init_vqueue(qdp, 1024);
+
+ curtime = bpf_ktime_get_ns();
+
+ // Begin critical section
+ bpf_spin_lock(&qdp->lock);
+ credit = qdp->credit;
+ delta = curtime - qdp->lasttime;
+ /* delta < 0 implies that another process with a curtime greater
+ * than ours beat us to the critical section and already added
+ * the new credit, so we should not add it ourselves
+ */
+ if (delta > 0) {
+ qdp->lasttime = curtime;
+ credit += CREDIT_PER_NS(delta, qdp->rate);
+ if (credit > MAX_CREDIT)
+ credit = MAX_CREDIT;
+ }
+ credit -= len;
+ qdp->credit = credit;
+ bpf_spin_unlock(&qdp->lock);
+ // End critical section
+
+ // Check if we should update rate
+ if (qsp != NULL && (qsp->rate * 128) != qdp->rate) {
+ qdp->rate = qsp->rate * 128;
+ bpf_printk("Updating rate: %d (1sec:%llu bits)\n",
+ (int)qdp->rate,
+ CREDIT_PER_NS(1000000000, qdp->rate) * 8);
+ }
+
+ // Set flags (drop, congestion, cwr)
+ // Dropping => we are congested, so ignore congestion flag
+ if (credit < -DROP_THRESH ||
+ (len > LARGE_PKT_THRESH &&
+ credit < -LARGE_PKT_DROP_THRESH)) {
+ // Very congested, set drop flag
+ drop_flag = true;
+ } else if (credit < 0) {
+ // Congested, set congestion flag
+ if (pkti.ecn) {
+ if (credit < -MARK_THRESH)
+ congestion_flag = true;
+ else
+ congestion_flag = false;
+ } else {
+ congestion_flag = true;
+ }
+ }
+
+ if (congestion_flag) {
+ if (!bpf_skb_ecn_set_ce(skb)) {
+ if (len > LARGE_PKT_THRESH) {
+ // Problem if too many small packets?
+ drop_flag = true;
+ }
+ }
+ }
+
+ if (drop_flag)
+ rv = DROP_PKT;
+
+ hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag);
+
+ if (rv == DROP_PKT)
+ __sync_add_and_fetch(&(qdp->credit), len);
+
+ return rv;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/load_sock_ops.c b/samples/bpf/load_sock_ops.c
deleted file mode 100644
index 8ecb41ea0c03..000000000000
--- a/samples/bpf/load_sock_ops.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/* Copyright (c) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <linux/bpf.h>
-#include <bpf/bpf.h>
-#include "bpf_load.h"
-#include <unistd.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/unistd.h>
-
-static void usage(char *pname)
-{
- printf("USAGE:\n %s [-l] <cg-path> <prog filename>\n", pname);
- printf("\tLoad and attach a sock_ops program to the specified "
- "cgroup\n");
- printf("\tIf \"-l\" is used, the program will continue to run\n");
- printf("\tprinting the BPF log buffer\n");
- printf("\tIf the specified filename does not end in \".o\", it\n");
- printf("\tappends \"_kern.o\" to the name\n");
- printf("\n");
- printf(" %s -r <cg-path>\n", pname);
- printf("\tDetaches the currently attached sock_ops program\n");
- printf("\tfrom the specified cgroup\n");
- printf("\n");
- exit(1);
-}
-
-int main(int argc, char **argv)
-{
- int logFlag = 0;
- int error = 0;
- char *cg_path;
- char fn[500];
- char *prog;
- int cg_fd;
-
- if (argc < 3)
- usage(argv[0]);
-
- if (!strcmp(argv[1], "-r")) {
- cg_path = argv[2];
- cg_fd = open(cg_path, O_DIRECTORY, O_RDONLY);
- error = bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
- if (error) {
- printf("ERROR: bpf_prog_detach: %d (%s)\n",
- error, strerror(errno));
- return 2;
- }
- return 0;
- } else if (!strcmp(argv[1], "-h")) {
- usage(argv[0]);
- } else if (!strcmp(argv[1], "-l")) {
- logFlag = 1;
- if (argc < 4)
- usage(argv[0]);
- }
-
- prog = argv[argc - 1];
- cg_path = argv[argc - 2];
- if (strlen(prog) > 480) {
- fprintf(stderr, "ERROR: program name too long (> 480 chars)\n");
- return 3;
- }
- cg_fd = open(cg_path, O_DIRECTORY, O_RDONLY);
-
- if (!strcmp(prog + strlen(prog)-2, ".o"))
- strcpy(fn, prog);
- else
- sprintf(fn, "%s_kern.o", prog);
- if (logFlag)
- printf("loading bpf file:%s\n", fn);
- if (load_bpf_file(fn)) {
- printf("ERROR: load_bpf_file failed for: %s\n", fn);
- printf("%s", bpf_log_buf);
- return 4;
- }
- if (logFlag)
- printf("TCP BPF Loaded %s\n", fn);
-
- error = bpf_prog_attach(prog_fd[0], cg_fd, BPF_CGROUP_SOCK_OPS, 0);
- if (error) {
- printf("ERROR: bpf_prog_attach: %d (%s)\n",
- error, strerror(errno));
- return 5;
- } else if (logFlag) {
- read_trace_pipe();
- }
-
- return error;
-}
diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c
index 60ec467c78ab..00aae1d33fca 100644
--- a/samples/bpf/sock_example.c
+++ b/samples/bpf/sock_example.c
@@ -99,7 +99,7 @@ int main(void)
{
FILE *f;
- f = popen("ping -c5 localhost", "r");
+ f = popen("ping -4 -c5 localhost", "r");
(void)f;
return test_sock();
diff --git a/samples/bpf/sockex1_user.c b/samples/bpf/sockex1_user.c
index 93ec01c56104..7f90796ae15a 100644
--- a/samples/bpf/sockex1_user.c
+++ b/samples/bpf/sockex1_user.c
@@ -3,30 +3,33 @@
#include <assert.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
+#include "bpf/libbpf.h"
#include "sock_example.h"
#include <unistd.h>
#include <arpa/inet.h>
int main(int ac, char **argv)
{
+ struct bpf_object *obj;
+ int map_fd, prog_fd;
char filename[256];
- FILE *f;
int i, sock;
+ FILE *f;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
+ if (bpf_prog_load(filename, BPF_PROG_TYPE_SOCKET_FILTER,
+ &obj, &prog_fd))
return 1;
- }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
sock = open_raw_sock("lo");
- assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, prog_fd,
- sizeof(prog_fd[0])) == 0);
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
+ sizeof(prog_fd)) == 0);
- f = popen("ping -c5 localhost", "r");
+ f = popen("ping -4 -c5 localhost", "r");
(void) f;
for (i = 0; i < 5; i++) {
@@ -34,13 +37,13 @@ int main(int ac, char **argv)
int key;
key = IPPROTO_TCP;
- assert(bpf_map_lookup_elem(map_fd[0], &key, &tcp_cnt) == 0);
+ assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
key = IPPROTO_UDP;
- assert(bpf_map_lookup_elem(map_fd[0], &key, &udp_cnt) == 0);
+ assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0);
key = IPPROTO_ICMP;
- assert(bpf_map_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
+ assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
printf("TCP %lld UDP %lld ICMP %lld bytes\n",
tcp_cnt, udp_cnt, icmp_cnt);
diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c
index 1d5c6e9a6d27..bc257333ad92 100644
--- a/samples/bpf/sockex2_user.c
+++ b/samples/bpf/sockex2_user.c
@@ -3,7 +3,7 @@
#include <assert.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
+#include "bpf/libbpf.h"
#include "sock_example.h"
#include <unistd.h>
#include <arpa/inet.h>
@@ -17,32 +17,35 @@ struct pair {
int main(int ac, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ struct bpf_object *obj;
+ int map_fd, prog_fd;
char filename[256];
- FILE *f;
int i, sock;
+ FILE *f;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
setrlimit(RLIMIT_MEMLOCK, &r);
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
+ if (bpf_prog_load(filename, BPF_PROG_TYPE_SOCKET_FILTER,
+ &obj, &prog_fd))
return 1;
- }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map");
sock = open_raw_sock("lo");
- assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, prog_fd,
- sizeof(prog_fd[0])) == 0);
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
+ sizeof(prog_fd)) == 0);
- f = popen("ping -c5 localhost", "r");
+ f = popen("ping -4 -c5 localhost", "r");
(void) f;
for (i = 0; i < 5; i++) {
int key = 0, next_key;
struct pair value;
- while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
- bpf_map_lookup_elem(map_fd[0], &next_key, &value);
+ while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(map_fd, &next_key, &value);
printf("ip %s bytes %lld packets %lld\n",
inet_ntoa((struct in_addr){htonl(next_key)}),
value.bytes, value.packets);
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 9d02e0404719..bbb1cd0666a9 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -58,7 +58,7 @@ int main(int argc, char **argv)
sizeof(__u32)) == 0);
if (argc > 1)
- f = popen("ping -c5 localhost", "r");
+ f = popen("ping -4 -c5 localhost", "r");
else
f = popen("netperf -l 4 localhost", "r");
(void) f;
diff --git a/samples/bpf/task_fd_query_kern.c b/samples/bpf/task_fd_query_kern.c
index f4b0a9ea674d..fb56fc2a3e5d 100644
--- a/samples/bpf/task_fd_query_kern.c
+++ b/samples/bpf/task_fd_query_kern.c
@@ -4,7 +4,7 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
-SEC("kprobe/blk_start_request")
+SEC("kprobe/blk_mq_start_request")
int bpf_prog1(struct pt_regs *ctx)
{
return 0;
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index 8381d792f138..aff2b4ae914e 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -311,7 +311,7 @@ int main(int argc, char **argv)
}
/* test two functions in the corresponding *_kern.c file */
- CHECK_AND_RET(test_debug_fs_kprobe(0, "blk_start_request",
+ CHECK_AND_RET(test_debug_fs_kprobe(0, "blk_mq_start_request",
BPF_FD_TYPE_KPROBE));
CHECK_AND_RET(test_debug_fs_kprobe(1, "blk_account_io_completion",
BPF_FD_TYPE_KRETPROBE));
diff --git a/samples/bpf/tcp_basertt_kern.c b/samples/bpf/tcp_basertt_kern.c
index 4bf4fc597db9..6ef1625e8b2c 100644
--- a/samples/bpf/tcp_basertt_kern.c
+++ b/samples/bpf/tcp_basertt_kern.c
@@ -7,7 +7,7 @@
* BPF program to set base_rtt to 80us when host is running TCP-NV and
* both hosts are in the same datacenter (as determined by IPv6 prefix).
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/tcp_bpf.readme b/samples/bpf/tcp_bpf.readme
index 831fb601e3c9..fee746621aec 100644
--- a/samples/bpf/tcp_bpf.readme
+++ b/samples/bpf/tcp_bpf.readme
@@ -8,14 +8,16 @@ a cgroupv2 and attach a bash shell to the group.
bash
echo $$ >> /tmp/cgroupv2/foo/cgroup.procs
-Anything that runs under this shell belongs to the foo cgroupv2 To load
+Anything that runs under this shell belongs to the foo cgroupv2. To load
(attach) one of the tcp_*_kern.o programs:
- ./load_sock_ops -l /tmp/cgroupv2/foo tcp_basertt_kern.o
+ bpftool prog load tcp_basertt_kern.o /sys/fs/bpf/tcp_prog
+ bpftool cgroup attach /tmp/cgroupv2/foo sock_ops pinned /sys/fs/bpf/tcp_prog
+ bpftool prog tracelog
-If the "-l" flag is used, the load_sock_ops program will continue to run
-printing the BPF log buffer. The tcp_*_kern.o programs use special print
-functions to print logging information (if enabled by the ifdef).
+"bpftool prog tracelog" will continue to run printing the BPF log buffer.
+The tcp_*_kern.o programs use special print functions to print logging
+information (if enabled by the ifdef).
If using netperf/netserver to create traffic, you need to run them under the
cgroupv2 to which the BPF programs are attached (i.e. under bash shell
@@ -23,4 +25,4 @@ attached to the cgroupv2).
To remove (unattach) a socket_ops BPF program from a cgroupv2:
- ./load_sock_ops -r /tmp/cgroupv2/foo
+ bpftool cgroup attach /tmp/cgroupv2/foo sock_ops pinned /sys/fs/bpf/tcp_prog
diff --git a/samples/bpf/tcp_bufs_kern.c b/samples/bpf/tcp_bufs_kern.c
index 0566b7fa38a1..e03e204739fa 100644
--- a/samples/bpf/tcp_bufs_kern.c
+++ b/samples/bpf/tcp_bufs_kern.c
@@ -9,7 +9,7 @@
* doing appropriate checks that indicate the hosts are far enough
* away (i.e. large RTT).
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/tcp_clamp_kern.c b/samples/bpf/tcp_clamp_kern.c
index f4225c9d2c0c..a0dc2d254aca 100644
--- a/samples/bpf/tcp_clamp_kern.c
+++ b/samples/bpf/tcp_clamp_kern.c
@@ -9,7 +9,7 @@
* the same datacenter. For his example, we assume they are within the same
* datacenter when the first 5.5 bytes of their IPv6 addresses are the same.
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/tcp_cong_kern.c b/samples/bpf/tcp_cong_kern.c
index ad0f1ba8206a..4fd3ca979a06 100644
--- a/samples/bpf/tcp_cong_kern.c
+++ b/samples/bpf/tcp_cong_kern.c
@@ -7,7 +7,7 @@
* BPF program to set congestion control to dctcp when both hosts are
* in the same datacenter (as deteremined by IPv6 prefix).
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/tcp_iw_kern.c b/samples/bpf/tcp_iw_kern.c
index 4ca5ecc9f580..9b139ec69560 100644
--- a/samples/bpf/tcp_iw_kern.c
+++ b/samples/bpf/tcp_iw_kern.c
@@ -9,7 +9,7 @@
* would usually be done after doing appropriate checks that indicate
* the hosts are far enough away (i.e. large RTT).
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/tcp_rwnd_kern.c b/samples/bpf/tcp_rwnd_kern.c
index 09ff65b40b31..cc71ee96e044 100644
--- a/samples/bpf/tcp_rwnd_kern.c
+++ b/samples/bpf/tcp_rwnd_kern.c
@@ -8,7 +8,7 @@
* and the first 5.5 bytes of the IPv6 addresses are not the same (in this
* example that means both hosts are not the same datacenter).
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/tcp_synrto_kern.c b/samples/bpf/tcp_synrto_kern.c
index 232bb242823e..ca87ed34f896 100644
--- a/samples/bpf/tcp_synrto_kern.c
+++ b/samples/bpf/tcp_synrto_kern.c
@@ -8,7 +8,7 @@
* and the first 5.5 bytes of the IPv6 addresses are the same (in this example
* that means both hosts are in the same datacenter).
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/tcp_tos_reflect_kern.c b/samples/bpf/tcp_tos_reflect_kern.c
index d51dab19eca6..de788be6f862 100644
--- a/samples/bpf/tcp_tos_reflect_kern.c
+++ b/samples/bpf/tcp_tos_reflect_kern.c
@@ -4,7 +4,7 @@
*
* BPF program to automatically reflect TOS option from received syn packet
*
- * Use load_sock_ops to load this BPF program.
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
*/
#include <uapi/linux/bpf.h>
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index d7b68ef5ba79..0bb6507256b7 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -77,7 +77,7 @@ static int test_foo_bar(void)
/* Create cgroup /foo, get fd, and join it */
foo = create_and_get_cgroup(FOO);
- if (!foo)
+ if (foo < 0)
goto err;
if (join_cgroup(FOO))
@@ -94,7 +94,7 @@ static int test_foo_bar(void)
/* Create cgroup /foo/bar, get fd, and join it */
bar = create_and_get_cgroup(BAR);
- if (!bar)
+ if (bar < 0)
goto err;
if (join_cgroup(BAR))
@@ -298,19 +298,19 @@ static int test_multiprog(void)
goto err;
cg1 = create_and_get_cgroup("/cg1");
- if (!cg1)
+ if (cg1 < 0)
goto err;
cg2 = create_and_get_cgroup("/cg1/cg2");
- if (!cg2)
+ if (cg2 < 0)
goto err;
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
- if (!cg3)
+ if (cg3 < 0)
goto err;
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
- if (!cg4)
+ if (cg4 < 0)
goto err;
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
- if (!cg5)
+ if (cg5 < 0)
goto err;
if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
index 2259f997a26c..f082d6ac59f0 100644
--- a/samples/bpf/test_current_task_under_cgroup_user.c
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -32,7 +32,7 @@ int main(int argc, char **argv)
cg2 = create_and_get_cgroup(CGROUP_PATH);
- if (!cg2)
+ if (cg2 < 0)
goto err;
if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
index 1a81e6a5c2ea..c9544a4ce61a 100644
--- a/samples/bpf/tracex2_user.c
+++ b/samples/bpf/tracex2_user.c
@@ -131,7 +131,7 @@ int main(int ac, char **argv)
signal(SIGTERM, int_exit);
/* start 'ping' in the background to have some kfree_skb events */
- f = popen("ping -c5 localhost", "r");
+ f = popen("ping -4 -c5 localhost", "r");
(void) f;
/* start 'dd' in the background to have plenty of 'write' syscalls */
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
index 9974c3d7c18b..ea1d4c19c132 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3_kern.c
@@ -20,7 +20,7 @@ struct bpf_map_def SEC("maps") my_map = {
/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
* example will no longer be meaningful
*/
-SEC("kprobe/blk_start_request")
+SEC("kprobe/blk_mq_start_request")
int bpf_prog1(struct pt_regs *ctx)
{
long rq = PT_REGS_PARM1(ctx);
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index 0a197f86ac43..6a64e93365e1 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -22,11 +22,23 @@
#include "bpf/libbpf.h"
static int ifindex;
-static __u32 xdp_flags;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static __u32 prog_id;
static void int_exit(int sig)
{
- bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ __u32 curr_prog_id = 0;
+
+ if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given interface\n");
+ else
+ printf("program on interface changed, not removing\n");
exit(0);
}
@@ -63,7 +75,8 @@ static void usage(const char *prog)
"usage: %s [OPTS] IFACE\n\n"
"OPTS:\n"
" -S use skb-mode\n"
- " -N enforce native mode\n",
+ " -N enforce native mode\n"
+ " -F force loading prog\n",
prog);
}
@@ -73,11 +86,14 @@ int main(int argc, char **argv)
struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
};
- const char *optstr = "SN";
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const char *optstr = "FSN";
int prog_fd, map_fd, opt;
struct bpf_object *obj;
struct bpf_map *map;
char filename[256];
+ int err;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
@@ -87,6 +103,9 @@ int main(int argc, char **argv)
case 'N':
xdp_flags |= XDP_FLAGS_DRV_MODE;
break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
default:
usage(basename(argv[0]));
return 1;
@@ -103,7 +122,7 @@ int main(int argc, char **argv)
return 1;
}
- ifindex = if_nametoindex(argv[1]);
+ ifindex = if_nametoindex(argv[optind]);
if (!ifindex) {
perror("if_nametoindex");
return 1;
@@ -135,6 +154,13 @@ int main(int argc, char **argv)
return 1;
}
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
+
poll_stats(map_fd, 2);
return 0;
diff --git a/samples/bpf/xdp_adjust_tail_user.c b/samples/bpf/xdp_adjust_tail_user.c
index 3042ce37dae8..07e1b9269e49 100644
--- a/samples/bpf/xdp_adjust_tail_user.c
+++ b/samples/bpf/xdp_adjust_tail_user.c
@@ -24,12 +24,25 @@
#define STATS_INTERVAL_S 2U
static int ifindex = -1;
-static __u32 xdp_flags;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static __u32 prog_id;
static void int_exit(int sig)
{
- if (ifindex > -1)
- bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ __u32 curr_prog_id = 0;
+
+ if (ifindex > -1) {
+ if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given iface\n");
+ else
+ printf("program on interface changed, not removing\n");
+ }
exit(0);
}
@@ -60,6 +73,7 @@ static void usage(const char *cmd)
printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
printf(" -S use skb-mode\n");
printf(" -N enforce native mode\n");
+ printf(" -F force loading prog\n");
printf(" -h Display this help\n");
}
@@ -70,12 +84,15 @@ int main(int argc, char **argv)
.prog_type = BPF_PROG_TYPE_XDP,
};
unsigned char opt_flags[256] = {};
+ const char *optstr = "i:T:SNFh";
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
unsigned int kill_after_s = 0;
- const char *optstr = "i:T:SNh";
int i, prog_fd, map_fd, opt;
struct bpf_object *obj;
struct bpf_map *map;
char filename[256];
+ int err;
for (i = 0; i < strlen(optstr); i++)
if (optstr[i] != 'h' && 'a' <= optstr[i] && optstr[i] <= 'z')
@@ -96,6 +113,9 @@ int main(int argc, char **argv)
case 'N':
xdp_flags |= XDP_FLAGS_DRV_MODE;
break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
default:
usage(argv[0]);
return 1;
@@ -142,9 +162,15 @@ int main(int argc, char **argv)
return 1;
}
- poll_stats(map_fd, kill_after_s);
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return 1;
+ }
+ prog_id = info.id;
- bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ poll_stats(map_fd, kill_after_s);
+ int_exit(0);
return 0;
}
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index 2d23054aaccf..586b294d72d3 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -24,20 +24,26 @@ static const char *__doc__ =
/* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 6
-/* Wanted to get rid of bpf_load.h and fake-"libbpf.h" (and instead
- * use bpf/libbpf.h), but cannot as (currently) needed for XDP
- * attaching to a device via bpf_set_link_xdp_fd()
- */
#include <bpf/bpf.h>
-#include "bpf_load.h"
+#include "bpf/libbpf.h"
#include "bpf_util.h"
static int ifindex = -1;
static char ifname_buf[IF_NAMESIZE];
static char *ifname;
-
-static __u32 xdp_flags;
+static __u32 prog_id;
+
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int cpu_map_fd;
+static int rx_cnt_map_fd;
+static int redirect_err_cnt_map_fd;
+static int cpumap_enqueue_cnt_map_fd;
+static int cpumap_kthread_cnt_map_fd;
+static int cpus_available_map_fd;
+static int cpus_count_map_fd;
+static int cpus_iterator_map_fd;
+static int exception_cnt_map_fd;
/* Exit return codes */
#define EXIT_OK 0
@@ -51,27 +57,50 @@ static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
{"dev", required_argument, NULL, 'd' },
{"skb-mode", no_argument, NULL, 'S' },
- {"debug", no_argument, NULL, 'D' },
{"sec", required_argument, NULL, 's' },
- {"prognum", required_argument, NULL, 'p' },
+ {"progname", required_argument, NULL, 'p' },
{"qsize", required_argument, NULL, 'q' },
{"cpu", required_argument, NULL, 'c' },
{"stress-mode", no_argument, NULL, 'x' },
{"no-separators", no_argument, NULL, 'z' },
+ {"force", no_argument, NULL, 'F' },
{0, 0, NULL, 0 }
};
static void int_exit(int sig)
{
- fprintf(stderr,
- "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
- ifindex, ifname);
- if (ifindex > -1)
- bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ __u32 curr_prog_id = 0;
+
+ if (ifindex > -1) {
+ if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(EXIT_FAIL);
+ }
+ if (prog_id == curr_prog_id) {
+ fprintf(stderr,
+ "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
+ ifindex, ifname);
+ bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ } else if (!curr_prog_id) {
+ printf("couldn't find a prog id on a given iface\n");
+ } else {
+ printf("program on interface changed, not removing\n");
+ }
+ }
exit(EXIT_OK);
}
-static void usage(char *argv[])
+static void print_avail_progs(struct bpf_object *obj)
+{
+ struct bpf_program *pos;
+
+ bpf_object__for_each_program(pos, obj) {
+ if (bpf_program__is_xdp(pos))
+ printf(" %s\n", bpf_program__title(pos, false));
+ }
+}
+
+static void usage(char *argv[], struct bpf_object *obj)
{
int i;
@@ -89,6 +118,8 @@ static void usage(char *argv[])
long_options[i].val);
printf("\n");
}
+ printf("\n Programs to be used for --progname:\n");
+ print_avail_progs(obj);
printf("\n");
}
@@ -263,7 +294,7 @@ static __u64 calc_errs_pps(struct datarec *r,
static void stats_print(struct stats_record *stats_rec,
struct stats_record *stats_prev,
- int prog_num)
+ char *prog_name)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
double pps = 0, drop = 0, err = 0;
@@ -273,7 +304,7 @@ static void stats_print(struct stats_record *stats_rec,
int i;
/* Header */
- printf("Running XDP/eBPF prog_num:%d\n", prog_num);
+ printf("Running XDP/eBPF prog_name:%s\n", prog_name);
printf("%-15s %-7s %-14s %-11s %-9s\n",
"XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");
@@ -424,20 +455,20 @@ static void stats_collect(struct stats_record *rec)
{
int fd, i;
- fd = map_fd[1]; /* map: rx_cnt */
+ fd = rx_cnt_map_fd;
map_collect_percpu(fd, 0, &rec->rx_cnt);
- fd = map_fd[2]; /* map: redirect_err_cnt */
+ fd = redirect_err_cnt_map_fd;
map_collect_percpu(fd, 1, &rec->redir_err);
- fd = map_fd[3]; /* map: cpumap_enqueue_cnt */
+ fd = cpumap_enqueue_cnt_map_fd;
for (i = 0; i < MAX_CPUS; i++)
map_collect_percpu(fd, i, &rec->enq[i]);
- fd = map_fd[4]; /* map: cpumap_kthread_cnt */
+ fd = cpumap_kthread_cnt_map_fd;
map_collect_percpu(fd, 0, &rec->kthread);
- fd = map_fd[8]; /* map: exception_cnt */
+ fd = exception_cnt_map_fd;
map_collect_percpu(fd, 0, &rec->exception);
}
@@ -462,7 +493,7 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
* the kernel for the cpu.
*/
- ret = bpf_map_update_elem(map_fd[0], &cpu, &queue_size, 0);
+ ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
if (ret) {
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
exit(EXIT_FAIL_BPF);
@@ -471,23 +502,22 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
/* Inform bpf_prog's that a new CPU is available to select
* from via some control maps.
*/
- /* map_fd[5] = cpus_available */
- ret = bpf_map_update_elem(map_fd[5], &avail_idx, &cpu, 0);
+ ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
if (ret) {
fprintf(stderr, "Add to avail CPUs failed\n");
exit(EXIT_FAIL_BPF);
}
/* When not replacing/updating existing entry, bump the count */
- /* map_fd[6] = cpus_count */
- ret = bpf_map_lookup_elem(map_fd[6], &key, &curr_cpus_count);
+ ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
if (ret) {
fprintf(stderr, "Failed reading curr cpus_count\n");
exit(EXIT_FAIL_BPF);
}
if (new) {
curr_cpus_count++;
- ret = bpf_map_update_elem(map_fd[6], &key, &curr_cpus_count, 0);
+ ret = bpf_map_update_elem(cpus_count_map_fd, &key,
+ &curr_cpus_count, 0);
if (ret) {
fprintf(stderr, "Failed write curr cpus_count\n");
exit(EXIT_FAIL_BPF);
@@ -510,8 +540,8 @@ static void mark_cpus_unavailable(void)
int ret, i;
for (i = 0; i < MAX_CPUS; i++) {
- /* map_fd[5] = cpus_available */
- ret = bpf_map_update_elem(map_fd[5], &i, &invalid_cpu, 0);
+ ret = bpf_map_update_elem(cpus_available_map_fd, &i,
+ &invalid_cpu, 0);
if (ret) {
fprintf(stderr, "Failed marking CPU unavailable\n");
exit(EXIT_FAIL_BPF);
@@ -531,7 +561,7 @@ static void stress_cpumap(void)
create_cpu_entry(1, 16000, 0, false);
}
-static void stats_poll(int interval, bool use_separators, int prog_num,
+static void stats_poll(int interval, bool use_separators, char *prog_name,
bool stress_mode)
{
struct stats_record *record, *prev;
@@ -547,7 +577,7 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
while (1) {
swap(&prev, &record);
stats_collect(record);
- stats_print(record, prev, prog_num);
+ stats_print(record, prev, prog_name);
sleep(interval);
if (stress_mode)
stress_cpumap();
@@ -557,20 +587,55 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
free_stats_record(prev);
}
+static int init_map_fds(struct bpf_object *obj)
+{
+ cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
+ rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
+ redirect_err_cnt_map_fd =
+ bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
+ cpumap_enqueue_cnt_map_fd =
+ bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
+ cpumap_kthread_cnt_map_fd =
+ bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");
+ cpus_available_map_fd =
+ bpf_object__find_map_fd_by_name(obj, "cpus_available");
+ cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
+ cpus_iterator_map_fd =
+ bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
+ exception_cnt_map_fd =
+ bpf_object__find_map_fd_by_name(obj, "exception_cnt");
+
+ if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
+ redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
+ cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
+ cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
+ exception_cnt_map_fd < 0)
+ return -ENOENT;
+
+ return 0;
+}
+
int main(int argc, char **argv)
{
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
+ char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_UNSPEC,
+ };
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
bool use_separators = true;
bool stress_mode = false;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
char filename[256];
- bool debug = false;
int added_cpus = 0;
int longindex = 0;
int interval = 2;
- int prog_num = 5;
int add_cpu = -1;
+ int opt, err;
+ int prog_fd;
__u32 qsize;
- int opt;
/* Notice: choosing he queue size is very important with the
* ixgbe driver, because it's driver page recycling trick is
@@ -581,26 +646,29 @@ int main(int argc, char **argv)
qsize = 128+64;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
perror("setrlimit(RLIMIT_MEMLOCK)");
return 1;
}
- if (load_bpf_file(filename)) {
- fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return EXIT_FAIL;
- }
- if (!prog_fd[0]) {
- fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
+ if (prog_fd < 0) {
+ fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
+ strerror(errno));
+ return EXIT_FAIL;
+ }
+ if (init_map_fds(obj) < 0) {
+ fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
return EXIT_FAIL;
}
-
mark_cpus_unavailable();
/* Parse commands line args */
- while ((opt = getopt_long(argc, argv, "hSd:",
+ while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF",
long_options, &longindex)) != -1) {
switch (opt) {
case 'd':
@@ -624,9 +692,6 @@ int main(int argc, char **argv)
case 'S':
xdp_flags |= XDP_FLAGS_SKB_MODE;
break;
- case 'D':
- debug = true;
- break;
case 'x':
stress_mode = true;
break;
@@ -635,13 +700,7 @@ int main(int argc, char **argv)
break;
case 'p':
/* Selecting eBPF prog to load */
- prog_num = atoi(optarg);
- if (prog_num < 0 || prog_num >= MAX_PROG) {
- fprintf(stderr,
- "--prognum too large err(%d):%s\n",
- errno, strerror(errno));
- goto error;
- }
+ prog_name = optarg;
break;
case 'c':
/* Add multiple CPUs */
@@ -658,24 +717,27 @@ int main(int argc, char **argv)
case 'q':
qsize = atoi(optarg);
break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
case 'h':
error:
default:
- usage(argv);
+ usage(argv, obj);
return EXIT_FAIL_OPTION;
}
}
/* Required option */
if (ifindex == -1) {
fprintf(stderr, "ERR: required option --dev missing\n");
- usage(argv);
+ usage(argv, obj);
return EXIT_FAIL_OPTION;
}
/* Required option */
if (add_cpu == -1) {
fprintf(stderr, "ERR: required option --cpu missing\n");
fprintf(stderr, " Specify multiple --cpu option to add more\n");
- usage(argv);
+ usage(argv, obj);
return EXIT_FAIL_OPTION;
}
@@ -683,16 +745,30 @@ int main(int argc, char **argv)
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
- if (bpf_set_link_xdp_fd(ifindex, prog_fd[prog_num], xdp_flags) < 0) {
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (!prog) {
+ fprintf(stderr, "bpf_object__find_program_by_title failed\n");
+ return EXIT_FAIL;
+ }
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ fprintf(stderr, "bpf_program__fd failed\n");
+ return EXIT_FAIL;
+ }
+
+ if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
fprintf(stderr, "link set xdp fd failed\n");
return EXIT_FAIL_XDP;
}
- if (debug) {
- printf("Debug-mode reading trace pipe (fix #define DEBUG)\n");
- read_trace_pipe();
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
}
+ prog_id = info.id;
- stats_poll(interval, use_separators, prog_num, stress_mode);
+ stats_poll(interval, use_separators, prog_name, stress_mode);
return EXIT_OK;
}
diff --git a/samples/bpf/xdp_redirect_map_user.c b/samples/bpf/xdp_redirect_map_user.c
index 4445e76854b5..1dbe7fd3a1a8 100644
--- a/samples/bpf/xdp_redirect_map_user.c
+++ b/samples/bpf/xdp_redirect_map_user.c
@@ -22,21 +22,48 @@
#include <libgen.h>
#include <sys/resource.h>
-#include "bpf_load.h"
#include "bpf_util.h"
#include <bpf/bpf.h>
+#include "bpf/libbpf.h"
static int ifindex_in;
static int ifindex_out;
static bool ifindex_out_xdp_dummy_attached = true;
+static __u32 prog_id;
+static __u32 dummy_prog_id;
-static __u32 xdp_flags;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int rxcnt_map_fd;
static void int_exit(int sig)
{
- bpf_set_link_xdp_fd(ifindex_in, -1, xdp_flags);
- if (ifindex_out_xdp_dummy_attached)
- bpf_set_link_xdp_fd(ifindex_out, -1, xdp_flags);
+ __u32 curr_prog_id = 0;
+
+ if (bpf_get_link_xdp_id(ifindex_in, &curr_prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(ifindex_in, -1, xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on iface IN\n");
+ else
+ printf("program on iface IN changed, not removing\n");
+
+ if (ifindex_out_xdp_dummy_attached) {
+ curr_prog_id = 0;
+ if (bpf_get_link_xdp_id(ifindex_out, &curr_prog_id,
+ xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (dummy_prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(ifindex_out, -1, xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on iface OUT\n");
+ else
+ printf("program on iface OUT changed, not removing\n");
+ }
exit(0);
}
@@ -53,7 +80,7 @@ static void poll_stats(int interval, int ifindex)
int i;
sleep(interval);
- assert(bpf_map_lookup_elem(map_fd[1], &key, values) == 0);
+ assert(bpf_map_lookup_elem(rxcnt_map_fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
sum += (values[i] - prev[i]);
if (sum)
@@ -69,16 +96,26 @@ static void usage(const char *prog)
"usage: %s [OPTS] IFINDEX_IN IFINDEX_OUT\n\n"
"OPTS:\n"
" -S use skb-mode\n"
- " -N enforce native mode\n",
+ " -N enforce native mode\n"
+ " -F force loading prog\n",
prog);
}
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
- const char *optstr = "SN";
- char filename[256];
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ struct bpf_program *prog, *dummy_prog;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int prog_fd, dummy_prog_fd;
+ const char *optstr = "FSN";
+ struct bpf_object *obj;
int ret, opt, key = 0;
+ char filename[256];
+ int tx_port_map_fd;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
@@ -88,6 +125,9 @@ int main(int argc, char **argv)
case 'N':
xdp_flags |= XDP_FLAGS_DRV_MODE;
break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
default:
usage(basename(argv[0]));
return 1;
@@ -109,37 +149,65 @@ int main(int argc, char **argv)
printf("input: %d output: %d\n", ifindex_in, ifindex_out);
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
+
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ return 1;
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
+ prog = bpf_program__next(NULL, obj);
+ dummy_prog = bpf_program__next(prog, obj);
+ if (!prog || !dummy_prog) {
+ printf("finding a prog in obj file failed\n");
+ return 1;
+ }
+ /* bpf_prog_load_xattr gives us the pointer to first prog's fd,
+ * so we're missing only the fd for dummy prog
+ */
+ dummy_prog_fd = bpf_program__fd(dummy_prog);
+ if (prog_fd < 0 || dummy_prog_fd < 0) {
+ printf("bpf_prog_load_xattr: %s\n", strerror(errno));
return 1;
}
- if (!prog_fd[0]) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ tx_port_map_fd = bpf_object__find_map_fd_by_name(obj, "tx_port");
+ rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
+ if (tx_port_map_fd < 0 || rxcnt_map_fd < 0) {
+ printf("bpf_object__find_map_fd_by_name failed\n");
return 1;
}
- if (bpf_set_link_xdp_fd(ifindex_in, prog_fd[0], xdp_flags) < 0) {
+ if (bpf_set_link_xdp_fd(ifindex_in, prog_fd, xdp_flags) < 0) {
printf("ERROR: link set xdp fd failed on %d\n", ifindex_in);
return 1;
}
+ ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (ret) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return ret;
+ }
+ prog_id = info.id;
+
/* Loading dummy XDP prog on out-device */
- if (bpf_set_link_xdp_fd(ifindex_out, prog_fd[1],
+ if (bpf_set_link_xdp_fd(ifindex_out, dummy_prog_fd,
(xdp_flags | XDP_FLAGS_UPDATE_IF_NOEXIST)) < 0) {
printf("WARN: link set xdp fd failed on %d\n", ifindex_out);
ifindex_out_xdp_dummy_attached = false;
}
+ memset(&info, 0, sizeof(info));
+ ret = bpf_obj_get_info_by_fd(dummy_prog_fd, &info, &info_len);
+ if (ret) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return ret;
+ }
+ dummy_prog_id = info.id;
+
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
- printf("map[0] (vports) = %i, map[1] (map) = %i, map[2] (count) = %i\n",
- map_fd[0], map_fd[1], map_fd[2]);
-
/* populate virtual to physical port map */
- ret = bpf_map_update_elem(map_fd[0], &key, &ifindex_out, 0);
+ ret = bpf_map_update_elem(tx_port_map_fd, &key, &ifindex_out, 0);
if (ret) {
perror("bpf_update_elem");
goto out;
diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
index 81a69e36cb78..e9054c0269ff 100644
--- a/samples/bpf/xdp_redirect_user.c
+++ b/samples/bpf/xdp_redirect_user.c
@@ -22,21 +22,48 @@
#include <libgen.h>
#include <sys/resource.h>
-#include "bpf_load.h"
#include "bpf_util.h"
#include <bpf/bpf.h>
+#include "bpf/libbpf.h"
static int ifindex_in;
static int ifindex_out;
static bool ifindex_out_xdp_dummy_attached = true;
+static __u32 prog_id;
+static __u32 dummy_prog_id;
-static __u32 xdp_flags;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int rxcnt_map_fd;
static void int_exit(int sig)
{
- bpf_set_link_xdp_fd(ifindex_in, -1, xdp_flags);
- if (ifindex_out_xdp_dummy_attached)
- bpf_set_link_xdp_fd(ifindex_out, -1, xdp_flags);
+ __u32 curr_prog_id = 0;
+
+ if (bpf_get_link_xdp_id(ifindex_in, &curr_prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(ifindex_in, -1, xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on iface IN\n");
+ else
+ printf("program on iface IN changed, not removing\n");
+
+ if (ifindex_out_xdp_dummy_attached) {
+ curr_prog_id = 0;
+ if (bpf_get_link_xdp_id(ifindex_out, &curr_prog_id,
+ xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (dummy_prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(ifindex_out, -1, xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on iface OUT\n");
+ else
+ printf("program on iface OUT changed, not removing\n");
+ }
exit(0);
}
@@ -53,7 +80,7 @@ static void poll_stats(int interval, int ifindex)
int i;
sleep(interval);
- assert(bpf_map_lookup_elem(map_fd[1], &key, values) == 0);
+ assert(bpf_map_lookup_elem(rxcnt_map_fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
sum += (values[i] - prev[i]);
if (sum)
@@ -69,7 +96,8 @@ static void usage(const char *prog)
"usage: %s [OPTS] IFINDEX_IN IFINDEX_OUT\n\n"
"OPTS:\n"
" -S use skb-mode\n"
- " -N enforce native mode\n",
+ " -N enforce native mode\n"
+ " -F force loading prog\n",
prog);
}
@@ -77,9 +105,18 @@ static void usage(const char *prog)
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
- const char *optstr = "SN";
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ struct bpf_program *prog, *dummy_prog;
+ int prog_fd, tx_port_map_fd, opt;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const char *optstr = "FSN";
+ struct bpf_object *obj;
char filename[256];
- int ret, opt, key = 0;
+ int dummy_prog_fd;
+ int ret, key = 0;
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
@@ -89,6 +126,9 @@ int main(int argc, char **argv)
case 'N':
xdp_flags |= XDP_FLAGS_DRV_MODE;
break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
default:
usage(basename(argv[0]));
return 1;
@@ -110,34 +150,65 @@ int main(int argc, char **argv)
printf("input: %d output: %d\n", ifindex_in, ifindex_out);
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
+
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ return 1;
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
+ prog = bpf_program__next(NULL, obj);
+ dummy_prog = bpf_program__next(prog, obj);
+ if (!prog || !dummy_prog) {
+ printf("finding a prog in obj file failed\n");
+ return 1;
+ }
+ /* bpf_prog_load_xattr gives us the pointer to first prog's fd,
+ * so we're missing only the fd for dummy prog
+ */
+ dummy_prog_fd = bpf_program__fd(dummy_prog);
+ if (prog_fd < 0 || dummy_prog_fd < 0) {
+ printf("bpf_prog_load_xattr: %s\n", strerror(errno));
return 1;
}
- if (!prog_fd[0]) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ tx_port_map_fd = bpf_object__find_map_fd_by_name(obj, "tx_port");
+ rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
+ if (tx_port_map_fd < 0 || rxcnt_map_fd < 0) {
+ printf("bpf_object__find_map_fd_by_name failed\n");
return 1;
}
- if (bpf_set_link_xdp_fd(ifindex_in, prog_fd[0], xdp_flags) < 0) {
+ if (bpf_set_link_xdp_fd(ifindex_in, prog_fd, xdp_flags) < 0) {
printf("ERROR: link set xdp fd failed on %d\n", ifindex_in);
return 1;
}
+ ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (ret) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return ret;
+ }
+ prog_id = info.id;
+
/* Loading dummy XDP prog on out-device */
- if (bpf_set_link_xdp_fd(ifindex_out, prog_fd[1],
+ if (bpf_set_link_xdp_fd(ifindex_out, dummy_prog_fd,
(xdp_flags | XDP_FLAGS_UPDATE_IF_NOEXIST)) < 0) {
printf("WARN: link set xdp fd failed on %d\n", ifindex_out);
ifindex_out_xdp_dummy_attached = false;
}
+ memset(&info, 0, sizeof(info));
+ ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (ret) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return ret;
+ }
+ dummy_prog_id = info.id;
+
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
/* bpf redirect port */
- ret = bpf_map_update_elem(map_fd[0], &key, &ifindex_out, 0);
+ ret = bpf_map_update_elem(tx_port_map_fd, &key, &ifindex_out, 0);
if (ret) {
perror("bpf_update_elem");
goto out;
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
index b2b4dfa776c8..79fe7bc26ab4 100644
--- a/samples/bpf/xdp_router_ipv4_user.c
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -15,7 +15,6 @@
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
-#include "bpf_load.h"
#include <bpf/bpf.h>
#include <arpa/inet.h>
#include <fcntl.h>
@@ -25,32 +24,52 @@
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include "bpf_util.h"
+#include "bpf/libbpf.h"
+#include <sys/resource.h>
+#include <libgen.h>
-int sock, sock_arp, flags = 0;
+int sock, sock_arp, flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static int total_ifindex;
-int *ifindex_list;
+static int *ifindex_list;
+static __u32 *prog_id_list;
char buf[8192];
+static int lpm_map_fd;
+static int rxcnt_map_fd;
+static int arp_table_map_fd;
+static int exact_match_map_fd;
+static int tx_port_map_fd;
static int get_route_table(int rtm_family);
static void int_exit(int sig)
{
+ __u32 prog_id = 0;
int i = 0;
- for (i = 0; i < total_ifindex; i++)
- bpf_set_link_xdp_fd(ifindex_list[i], -1, flags);
+ for (i = 0; i < total_ifindex; i++) {
+ if (bpf_get_link_xdp_id(ifindex_list[i], &prog_id, flags)) {
+ printf("bpf_get_link_xdp_id on iface %d failed\n",
+ ifindex_list[i]);
+ exit(1);
+ }
+ if (prog_id_list[i] == prog_id)
+ bpf_set_link_xdp_fd(ifindex_list[i], -1, flags);
+ else if (!prog_id)
+ printf("couldn't find a prog id on iface %d\n",
+ ifindex_list[i]);
+ else
+ printf("program on iface %d changed, not removing\n",
+ ifindex_list[i]);
+ prog_id = 0;
+ }
exit(0);
}
static void close_and_exit(int sig)
{
- int i = 0;
-
close(sock);
close(sock_arp);
- for (i = 0; i < total_ifindex; i++)
- bpf_set_link_xdp_fd(ifindex_list[i], -1, flags);
- exit(0);
+ int_exit(0);
}
/* Get the mac address of the interface given interface name */
@@ -179,14 +198,10 @@ static void read_route(struct nlmsghdr *nh, int nll)
route.iface_name = alloca(sizeof(char *) * IFNAMSIZ);
route.iface_name = if_indextoname(route.iface, route.iface_name);
route.mac = getmac(route.iface_name);
- if (route.mac == -1) {
- int i = 0;
-
- for (i = 0; i < total_ifindex; i++)
- bpf_set_link_xdp_fd(ifindex_list[i], -1, flags);
- exit(0);
- }
- assert(bpf_map_update_elem(map_fd[4], &route.iface, &route.iface, 0) == 0);
+ if (route.mac == -1)
+ int_exit(0);
+ assert(bpf_map_update_elem(tx_port_map_fd,
+ &route.iface, &route.iface, 0) == 0);
if (rtm_family == AF_INET) {
struct trie_value {
__u8 prefix[4];
@@ -207,11 +222,16 @@ static void read_route(struct nlmsghdr *nh, int nll)
direct_entry.arp.dst = 0;
if (route.dst_len == 32) {
if (nh->nlmsg_type == RTM_DELROUTE) {
- assert(bpf_map_delete_elem(map_fd[3], &route.dst) == 0);
+ assert(bpf_map_delete_elem(exact_match_map_fd,
+ &route.dst) == 0);
} else {
- if (bpf_map_lookup_elem(map_fd[2], &route.dst, &direct_entry.arp.mac) == 0)
+ if (bpf_map_lookup_elem(arp_table_map_fd,
+ &route.dst,
+ &direct_entry.arp.mac) == 0)
direct_entry.arp.dst = route.dst;
- assert(bpf_map_update_elem(map_fd[3], &route.dst, &direct_entry, 0) == 0);
+ assert(bpf_map_update_elem(exact_match_map_fd,
+ &route.dst,
+ &direct_entry, 0) == 0);
}
}
for (i = 0; i < 4; i++)
@@ -225,7 +245,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
route.gw, route.dst_len,
route.metric,
route.iface_name);
- if (bpf_map_lookup_elem(map_fd[0], prefix_key,
+ if (bpf_map_lookup_elem(lpm_map_fd, prefix_key,
prefix_value) < 0) {
for (i = 0; i < 4; i++)
prefix_value->prefix[i] = prefix_key->data[i];
@@ -234,7 +254,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
prefix_value->gw = route.gw;
prefix_value->metric = route.metric;
- assert(bpf_map_update_elem(map_fd[0],
+ assert(bpf_map_update_elem(lpm_map_fd,
prefix_key,
prefix_value, 0
) == 0);
@@ -247,7 +267,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
prefix_key->data[2],
prefix_key->data[3],
prefix_key->prefixlen);
- assert(bpf_map_delete_elem(map_fd[0],
+ assert(bpf_map_delete_elem(lpm_map_fd,
prefix_key
) == 0);
/* Rereading the route table to check if
@@ -275,8 +295,7 @@ static void read_route(struct nlmsghdr *nh, int nll)
prefix_value->ifindex = route.iface;
prefix_value->gw = route.gw;
prefix_value->metric = route.metric;
- assert(bpf_map_update_elem(
- map_fd[0],
+ assert(bpf_map_update_elem(lpm_map_fd,
prefix_key,
prefix_value,
0) == 0);
@@ -401,7 +420,8 @@ static void read_arp(struct nlmsghdr *nh, int nll)
arp_entry.mac = atol(mac);
printf("%x\t\t%llx\n", arp_entry.dst, arp_entry.mac);
if (ndm_family == AF_INET) {
- if (bpf_map_lookup_elem(map_fd[3], &arp_entry.dst,
+ if (bpf_map_lookup_elem(exact_match_map_fd,
+ &arp_entry.dst,
&direct_entry) == 0) {
if (nh->nlmsg_type == RTM_DELNEIGH) {
direct_entry.arp.dst = 0;
@@ -410,16 +430,17 @@ static void read_arp(struct nlmsghdr *nh, int nll)
direct_entry.arp.dst = arp_entry.dst;
direct_entry.arp.mac = arp_entry.mac;
}
- assert(bpf_map_update_elem(map_fd[3],
+ assert(bpf_map_update_elem(exact_match_map_fd,
&arp_entry.dst,
&direct_entry, 0
) == 0);
memset(&direct_entry, 0, sizeof(direct_entry));
}
if (nh->nlmsg_type == RTM_DELNEIGH) {
- assert(bpf_map_delete_elem(map_fd[2], &arp_entry.dst) == 0);
+ assert(bpf_map_delete_elem(arp_table_map_fd,
+ &arp_entry.dst) == 0);
} else if (nh->nlmsg_type == RTM_NEWNEIGH) {
- assert(bpf_map_update_elem(map_fd[2],
+ assert(bpf_map_update_elem(arp_table_map_fd,
&arp_entry.dst,
&arp_entry.mac, 0
) == 0);
@@ -553,7 +574,8 @@ static int monitor_route(void)
for (key = 0; key < nr_keys; key++) {
__u64 sum = 0;
- assert(bpf_map_lookup_elem(map_fd[1], &key, values) == 0);
+ assert(bpf_map_lookup_elem(rxcnt_map_fd,
+ &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
sum += (values[i] - prev[key][i]);
if (sum)
@@ -594,36 +616,87 @@ cleanup:
return ret;
}
+static void usage(const char *prog)
+{
+ fprintf(stderr,
+ "%s: %s [OPTS] interface name list\n\n"
+ "OPTS:\n"
+ " -S use skb-mode\n"
+ " -F force loading prog\n",
+ __func__, prog);
+}
+
int main(int ac, char **argv)
{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const char *optstr = "SF";
+ struct bpf_object *obj;
char filename[256];
char **ifname_list;
- int i = 1;
+ int prog_fd, opt;
+ int err, i = 1;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- if (ac < 2) {
- printf("usage: %s [-S] Interface name list\n", argv[0]);
- return 1;
+ prog_load_attr.file = filename;
+
+ total_ifindex = ac - 1;
+ ifname_list = (argv + 1);
+
+ while ((opt = getopt(ac, argv, optstr)) != -1) {
+ switch (opt) {
+ case 'S':
+ flags |= XDP_FLAGS_SKB_MODE;
+ total_ifindex--;
+ ifname_list++;
+ break;
+ case 'F':
+ flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ total_ifindex--;
+ ifname_list++;
+ break;
+ default:
+ usage(basename(argv[0]));
+ return 1;
+ }
}
- if (!strcmp(argv[1], "-S")) {
- flags = XDP_FLAGS_SKB_MODE;
- total_ifindex = ac - 2;
- ifname_list = (argv + 2);
- } else {
- flags = 0;
- total_ifindex = ac - 1;
- ifname_list = (argv + 1);
+
+ if (optind == ac) {
+ usage(basename(argv[0]));
+ return 1;
}
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
+
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
return 1;
}
+
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ return 1;
+
printf("\n**************loading bpf file*********************\n\n\n");
- if (!prog_fd[0]) {
- printf("load_bpf_file: %s\n", strerror(errno));
+ if (!prog_fd) {
+ printf("bpf_prog_load_xattr: %s\n", strerror(errno));
return 1;
}
- ifindex_list = (int *)malloc(total_ifindex * sizeof(int *));
+
+ lpm_map_fd = bpf_object__find_map_fd_by_name(obj, "lpm_map");
+ rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
+ arp_table_map_fd = bpf_object__find_map_fd_by_name(obj, "arp_table");
+ exact_match_map_fd = bpf_object__find_map_fd_by_name(obj,
+ "exact_match");
+ tx_port_map_fd = bpf_object__find_map_fd_by_name(obj, "tx_port");
+ if (lpm_map_fd < 0 || rxcnt_map_fd < 0 || arp_table_map_fd < 0 ||
+ exact_match_map_fd < 0 || tx_port_map_fd < 0) {
+ printf("bpf_object__find_map_fd_by_name failed\n");
+ return 1;
+ }
+
+ ifindex_list = (int *)calloc(total_ifindex, sizeof(int *));
for (i = 0; i < total_ifindex; i++) {
ifindex_list[i] = if_nametoindex(ifname_list[i]);
if (!ifindex_list[i]) {
@@ -632,8 +705,9 @@ int main(int ac, char **argv)
return 1;
}
}
+ prog_id_list = (__u32 *)calloc(total_ifindex, sizeof(__u32 *));
for (i = 0; i < total_ifindex; i++) {
- if (bpf_set_link_xdp_fd(ifindex_list[i], prog_fd[0], flags) < 0) {
+ if (bpf_set_link_xdp_fd(ifindex_list[i], prog_fd, flags) < 0) {
printf("link set xdp fd failed\n");
int recovery_index = i;
@@ -642,6 +716,13 @@ int main(int ac, char **argv)
return 1;
}
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id_list[i] = info.id;
+ memset(&info, 0, sizeof(info));
printf("Attached to %d\n", ifindex_list[i]);
}
signal(SIGINT, int_exit);
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
index ef26f882f92f..1210f3b170f0 100644
--- a/samples/bpf/xdp_rxq_info_user.c
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -29,8 +29,9 @@ static const char *__doc__ = " XDP RX-queue info extract example\n\n"
static int ifindex = -1;
static char ifname_buf[IF_NAMESIZE];
static char *ifname;
+static __u32 prog_id;
-static __u32 xdp_flags;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static struct bpf_map *stats_global_map;
static struct bpf_map *rx_queue_index_map;
@@ -52,16 +53,30 @@ static const struct option long_options[] = {
{"action", required_argument, NULL, 'a' },
{"readmem", no_argument, NULL, 'r' },
{"swapmac", no_argument, NULL, 'm' },
+ {"force", no_argument, NULL, 'F' },
{0, 0, NULL, 0 }
};
static void int_exit(int sig)
{
- fprintf(stderr,
- "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
- ifindex, ifname);
- if (ifindex > -1)
- bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ __u32 curr_prog_id = 0;
+
+ if (ifindex > -1) {
+ if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(EXIT_FAIL);
+ }
+ if (prog_id == curr_prog_id) {
+ fprintf(stderr,
+ "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
+ ifindex, ifname);
+ bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ } else if (!curr_prog_id) {
+ printf("couldn't find a prog id on a given iface\n");
+ } else {
+ printf("program on interface changed, not removing\n");
+ }
+ }
exit(EXIT_OK);
}
@@ -446,6 +461,8 @@ int main(int argc, char **argv)
struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
};
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
int prog_fd, map_fd, opt, err;
bool use_separators = true;
struct config cfg = { 0 };
@@ -487,7 +504,7 @@ int main(int argc, char **argv)
}
/* Parse commands line args */
- while ((opt = getopt_long(argc, argv, "hSd:",
+ while ((opt = getopt_long(argc, argv, "FhSrmzd:s:a:",
long_options, &longindex)) != -1) {
switch (opt) {
case 'd':
@@ -524,6 +541,9 @@ int main(int argc, char **argv)
case 'm':
cfg_options |= SWAP_MAC;
break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
case 'h':
error:
default:
@@ -576,6 +596,13 @@ int main(int argc, char **argv)
return EXIT_FAIL_XDP;
}
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
+
stats_poll(interval, action, cfg_options);
return EXIT_OK;
}
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
index 8dd87c1eb560..dc66345a929a 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -12,6 +12,9 @@
#include <signal.h>
#include <libbpf.h>
#include <bpf/bpf.h>
+#include <sys/resource.h>
+#include <libgen.h>
+#include <linux/if_link.h>
#include "perf-sys.h"
#include "trace_helpers.h"
@@ -20,25 +23,50 @@
static int pmu_fds[MAX_CPUS], if_idx;
static struct perf_event_mmap_page *headers[MAX_CPUS];
static char *if_name;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static __u32 prog_id;
static int do_attach(int idx, int fd, const char *name)
{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
int err;
- err = bpf_set_link_xdp_fd(idx, fd, 0);
- if (err < 0)
+ err = bpf_set_link_xdp_fd(idx, fd, xdp_flags);
+ if (err < 0) {
printf("ERROR: failed to attach program to %s\n", name);
+ return err;
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
return err;
}
static int do_detach(int idx, const char *name)
{
- int err;
+ __u32 curr_prog_id = 0;
+ int err = 0;
- err = bpf_set_link_xdp_fd(idx, -1, 0);
- if (err < 0)
- printf("ERROR: failed to detach program from %s\n", name);
+ err = bpf_get_link_xdp_id(idx, &curr_prog_id, 0);
+ if (err) {
+ printf("bpf_get_link_xdp_id failed\n");
+ return err;
+ }
+ if (prog_id == curr_prog_id) {
+ err = bpf_set_link_xdp_fd(idx, -1, 0);
+ if (err < 0)
+ printf("ERROR: failed to detach prog from %s\n", name);
+ } else if (!curr_prog_id) {
+ printf("couldn't find a prog id on a %s\n", name);
+ } else {
+ printf("program on interface changed, not removing\n");
+ }
return err;
}
@@ -97,20 +125,47 @@ static void sig_handler(int signo)
exit(0);
}
+static void usage(const char *prog)
+{
+ fprintf(stderr,
+ "%s: %s [OPTS] <ifname|ifindex>\n\n"
+ "OPTS:\n"
+ " -F force loading prog\n",
+ __func__, prog);
+}
+
int main(int argc, char **argv)
{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
};
+ const char *optstr = "F";
+ int prog_fd, map_fd, opt;
struct bpf_object *obj;
struct bpf_map *map;
- int prog_fd, map_fd;
char filename[256];
int ret, err, i;
int numcpus;
- if (argc < 2) {
- printf("Usage: %s <ifname>\n", argv[0]);
+ while ((opt = getopt(argc, argv, optstr)) != -1) {
+ switch (opt) {
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ default:
+ usage(basename(argv[0]));
+ return 1;
+ }
+ }
+
+ if (optind == argc) {
+ usage(basename(argv[0]));
+ return 1;
+ }
+
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
return 1;
}
@@ -136,16 +191,16 @@ int main(int argc, char **argv)
}
map_fd = bpf_map__fd(map);
- if_idx = if_nametoindex(argv[1]);
+ if_idx = if_nametoindex(argv[optind]);
if (!if_idx)
- if_idx = strtoul(argv[1], NULL, 0);
+ if_idx = strtoul(argv[optind], NULL, 0);
if (!if_idx) {
fprintf(stderr, "Invalid ifname\n");
return 1;
}
- if_name = argv[1];
- err = do_attach(if_idx, prog_fd, argv[1]);
+ if_name = argv[optind];
+ err = do_attach(if_idx, prog_fd, if_name);
if (err)
return err;
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c
index a4ccc33adac0..4a1511eb7812 100644
--- a/samples/bpf/xdp_tx_iptunnel_user.c
+++ b/samples/bpf/xdp_tx_iptunnel_user.c
@@ -17,7 +17,7 @@
#include <netinet/ether.h>
#include <unistd.h>
#include <time.h>
-#include "bpf_load.h"
+#include "bpf/libbpf.h"
#include <bpf/bpf.h>
#include "bpf_util.h"
#include "xdp_tx_iptunnel_common.h"
@@ -25,12 +25,26 @@
#define STATS_INTERVAL_S 2U
static int ifindex = -1;
-static __u32 xdp_flags = 0;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int rxcnt_map_fd;
+static __u32 prog_id;
static void int_exit(int sig)
{
- if (ifindex > -1)
- bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ __u32 curr_prog_id = 0;
+
+ if (ifindex > -1) {
+ if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given iface\n");
+ else
+ printf("program on interface changed, not removing\n");
+ }
exit(0);
}
@@ -53,7 +67,8 @@ static void poll_stats(unsigned int kill_after_s)
for (proto = 0; proto < nr_protos; proto++) {
__u64 sum = 0;
- assert(bpf_map_lookup_elem(map_fd[0], &proto, values) == 0);
+ assert(bpf_map_lookup_elem(rxcnt_map_fd, &proto,
+ values) == 0);
for (i = 0; i < nr_cpus; i++)
sum += (values[i] - prev[proto][i]);
@@ -81,6 +96,7 @@ static void usage(const char *cmd)
printf(" -P <IP-Protocol> Default is TCP\n");
printf(" -S use skb-mode\n");
printf(" -N enforce native mode\n");
+ printf(" -F Force loading the XDP prog\n");
printf(" -h Display this help\n");
}
@@ -138,16 +154,22 @@ static int parse_ports(const char *port_str, int *min_port, int *max_port)
int main(int argc, char **argv)
{
+ struct bpf_prog_load_attr prog_load_attr = {
+ .prog_type = BPF_PROG_TYPE_XDP,
+ };
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ int min_port = 0, max_port = 0, vip2tnl_map_fd;
+ const char *optstr = "i:a:p:s:d:m:T:P:FSNh";
unsigned char opt_flags[256] = {};
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
unsigned int kill_after_s = 0;
- const char *optstr = "i:a:p:s:d:m:T:P:SNh";
- int min_port = 0, max_port = 0;
struct iptnl_info tnl = {};
- struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ struct bpf_object *obj;
struct vip vip = {};
char filename[256];
- int opt;
- int i;
+ int opt, prog_fd;
+ int i, err;
tnl.family = AF_UNSPEC;
vip.protocol = IPPROTO_TCP;
@@ -211,6 +233,9 @@ int main(int argc, char **argv)
case 'N':
xdp_flags |= XDP_FLAGS_DRV_MODE;
break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
default:
usage(argv[0]);
return 1;
@@ -232,33 +257,47 @@ int main(int argc, char **argv)
}
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ prog_load_attr.file = filename;
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return 1;
- }
- if (!prog_fd[0]) {
+ if (!prog_fd) {
printf("load_bpf_file: %s\n", strerror(errno));
return 1;
}
+ rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
+ vip2tnl_map_fd = bpf_object__find_map_fd_by_name(obj, "vip2tnl");
+ if (vip2tnl_map_fd < 0 || rxcnt_map_fd < 0) {
+ printf("bpf_object__find_map_fd_by_name failed\n");
+ return 1;
+ }
+
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
while (min_port <= max_port) {
vip.dport = htons(min_port++);
- if (bpf_map_update_elem(map_fd[1], &vip, &tnl, BPF_NOEXIST)) {
+ if (bpf_map_update_elem(vip2tnl_map_fd, &vip, &tnl,
+ BPF_NOEXIST)) {
perror("bpf_map_update_elem(&vip2tnl)");
return 1;
}
}
- if (bpf_set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
+ if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
printf("link set xdp fd failed\n");
return 1;
}
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
+
poll_stats(kill_after_s);
bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
diff --git a/samples/bpf/xdpsock.h b/samples/bpf/xdpsock.h
deleted file mode 100644
index 533ab81adfa1..000000000000
--- a/samples/bpf/xdpsock.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef XDPSOCK_H_
-#define XDPSOCK_H_
-
-/* Power-of-2 number of sockets */
-#define MAX_SOCKS 4
-
-/* Round-robin receive */
-#define RR_LB 0
-
-#endif /* XDPSOCK_H_ */
diff --git a/samples/bpf/xdpsock_kern.c b/samples/bpf/xdpsock_kern.c
deleted file mode 100644
index b8ccd0802b3f..000000000000
--- a/samples/bpf/xdpsock_kern.c
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define KBUILD_MODNAME "foo"
-#include <uapi/linux/bpf.h>
-#include "bpf_helpers.h"
-
-#include "xdpsock.h"
-
-struct bpf_map_def SEC("maps") qidconf_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 1,
-};
-
-struct bpf_map_def SEC("maps") xsks_map = {
- .type = BPF_MAP_TYPE_XSKMAP,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = MAX_SOCKS,
-};
-
-struct bpf_map_def SEC("maps") rr_map = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(unsigned int),
- .max_entries = 1,
-};
-
-SEC("xdp_sock")
-int xdp_sock_prog(struct xdp_md *ctx)
-{
- int *qidconf, key = 0, idx;
- unsigned int *rr;
-
- qidconf = bpf_map_lookup_elem(&qidconf_map, &key);
- if (!qidconf)
- return XDP_ABORTED;
-
- if (*qidconf != ctx->rx_queue_index)
- return XDP_PASS;
-
-#if RR_LB /* NB! RR_LB is configured in xdpsock.h */
- rr = bpf_map_lookup_elem(&rr_map, &key);
- if (!rr)
- return XDP_ABORTED;
-
- *rr = (*rr + 1) & (MAX_SOCKS - 1);
- idx = *rr;
-#else
- idx = 0;
-#endif
-
- return bpf_redirect_map(&xsks_map, idx, 0);
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 57ecadc58403..d08ee1ab7bb4 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -1,37 +1,36 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2018 Intel Corporation. */
-#include <assert.h>
+#include <asm/barrier.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <linux/bpf.h>
+#include <linux/compiler.h>
#include <linux/if_link.h>
#include <linux/if_xdp.h>
#include <linux/if_ether.h>
+#include <locale.h>
+#include <net/ethernet.h>
#include <net/if.h>
+#include <poll.h>
+#include <pthread.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <net/ethernet.h>
+#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/socket.h>
-#include <sys/mman.h>
+#include <sys/types.h>
#include <time.h>
#include <unistd.h>
-#include <pthread.h>
-#include <locale.h>
-#include <sys/types.h>
-#include <poll.h>
#include "bpf/libbpf.h"
-#include "bpf_util.h"
+#include "bpf/xsk.h"
#include <bpf/bpf.h>
-#include "xdpsock.h"
-
#ifndef SOL_XDP
#define SOL_XDP 283
#endif
@@ -44,17 +43,11 @@
#define PF_XDP AF_XDP
#endif
-#define NUM_FRAMES 131072
-#define FRAME_HEADROOM 0
-#define FRAME_SHIFT 11
-#define FRAME_SIZE 2048
-#define NUM_DESCS 1024
-#define BATCH_SIZE 16
-
-#define FQ_NUM_DESCS 1024
-#define CQ_NUM_DESCS 1024
+#define NUM_FRAMES (4 * 1024)
+#define BATCH_SIZE 64
#define DEBUG_HEXDUMP 0
+#define MAX_SOCKS 8
typedef __u64 u64;
typedef __u32 u32;
@@ -68,58 +61,36 @@ enum benchmark_type {
};
static enum benchmark_type opt_bench = BENCH_RXDROP;
-static u32 opt_xdp_flags;
+static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static const char *opt_if = "";
static int opt_ifindex;
static int opt_queue;
static int opt_poll;
-static int opt_shared_packet_buffer;
static int opt_interval = 1;
static u32 opt_xdp_bind_flags;
+static __u32 prog_id;
-struct xdp_umem_uqueue {
- u32 cached_prod;
- u32 cached_cons;
- u32 mask;
- u32 size;
- u32 *producer;
- u32 *consumer;
- u64 *ring;
- void *map;
-};
-
-struct xdp_umem {
- char *frames;
- struct xdp_umem_uqueue fq;
- struct xdp_umem_uqueue cq;
- int fd;
-};
-
-struct xdp_uqueue {
- u32 cached_prod;
- u32 cached_cons;
- u32 mask;
- u32 size;
- u32 *producer;
- u32 *consumer;
- struct xdp_desc *ring;
- void *map;
+struct xsk_umem_info {
+ struct xsk_ring_prod fq;
+ struct xsk_ring_cons cq;
+ struct xsk_umem *umem;
+ void *buffer;
};
-struct xdpsock {
- struct xdp_uqueue rx;
- struct xdp_uqueue tx;
- int sfd;
- struct xdp_umem *umem;
- u32 outstanding_tx;
+struct xsk_socket_info {
+ struct xsk_ring_cons rx;
+ struct xsk_ring_prod tx;
+ struct xsk_umem_info *umem;
+ struct xsk_socket *xsk;
unsigned long rx_npkts;
unsigned long tx_npkts;
unsigned long prev_rx_npkts;
unsigned long prev_tx_npkts;
+ u32 outstanding_tx;
};
static int num_socks;
-struct xdpsock *xsks[MAX_SOCKS];
+struct xsk_socket_info *xsks[MAX_SOCKS];
static unsigned long get_nsecs(void)
{
@@ -129,225 +100,124 @@ static unsigned long get_nsecs(void)
return ts.tv_sec * 1000000000UL + ts.tv_nsec;
}
-static void dump_stats(void);
-
-#define lassert(expr) \
- do { \
- if (!(expr)) { \
- fprintf(stderr, "%s:%s:%i: Assertion failed: " \
- #expr ": errno: %d/\"%s\"\n", \
- __FILE__, __func__, __LINE__, \
- errno, strerror(errno)); \
- dump_stats(); \
- exit(EXIT_FAILURE); \
- } \
- } while (0)
-
-#define barrier() __asm__ __volatile__("": : :"memory")
-#ifdef __aarch64__
-#define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
-#define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
-#else
-#define u_smp_rmb() barrier()
-#define u_smp_wmb() barrier()
-#endif
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
-
-static const char pkt_data[] =
- "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
- "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
- "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
- "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
-
-static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
-{
- u32 free_entries = q->cached_cons - q->cached_prod;
-
- if (free_entries >= nb)
- return free_entries;
-
- /* Refresh the local tail pointer */
- q->cached_cons = *q->consumer + q->size;
-
- return q->cached_cons - q->cached_prod;
-}
-
-static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
+static void print_benchmark(bool running)
{
- u32 free_entries = q->cached_cons - q->cached_prod;
+ const char *bench_str = "INVALID";
- if (free_entries >= ndescs)
- return free_entries;
+ if (opt_bench == BENCH_RXDROP)
+ bench_str = "rxdrop";
+ else if (opt_bench == BENCH_TXONLY)
+ bench_str = "txonly";
+ else if (opt_bench == BENCH_L2FWD)
+ bench_str = "l2fwd";
- /* Refresh the local tail pointer */
- q->cached_cons = *q->consumer + q->size;
- return q->cached_cons - q->cached_prod;
-}
+ printf("%s:%d %s ", opt_if, opt_queue, bench_str);
+ if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
+ printf("xdp-skb ");
+ else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
+ printf("xdp-drv ");
+ else
+ printf(" ");
-static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
-{
- u32 entries = q->cached_prod - q->cached_cons;
+ if (opt_poll)
+ printf("poll() ");
- if (entries == 0) {
- q->cached_prod = *q->producer;
- entries = q->cached_prod - q->cached_cons;
+ if (running) {
+ printf("running...");
+ fflush(stdout);
}
-
- return (entries > nb) ? nb : entries;
}
-static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
+static void dump_stats(void)
{
- u32 entries = q->cached_prod - q->cached_cons;
+ unsigned long now = get_nsecs();
+ long dt = now - prev_time;
+ int i;
- if (entries == 0) {
- q->cached_prod = *q->producer;
- entries = q->cached_prod - q->cached_cons;
- }
+ prev_time = now;
- return (entries > ndescs) ? ndescs : entries;
-}
+ for (i = 0; i < num_socks && xsks[i]; i++) {
+ char *fmt = "%-15s %'-11.0f %'-11lu\n";
+ double rx_pps, tx_pps;
-static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
- struct xdp_desc *d,
- size_t nb)
-{
- u32 i;
+ rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
+ 1000000000. / dt;
+ tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
+ 1000000000. / dt;
- if (umem_nb_free(fq, nb) < nb)
- return -ENOSPC;
+ printf("\n sock%d@", i);
+ print_benchmark(false);
+ printf("\n");
- for (i = 0; i < nb; i++) {
- u32 idx = fq->cached_prod++ & fq->mask;
+ printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
+ dt / 1000000000.);
+ printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
+ printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
- fq->ring[idx] = d[i].addr;
+ xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
+ xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
}
-
- u_smp_wmb();
-
- *fq->producer = fq->cached_prod;
-
- return 0;
}
-static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
- size_t nb)
+static void *poller(void *arg)
{
- u32 i;
-
- if (umem_nb_free(fq, nb) < nb)
- return -ENOSPC;
-
- for (i = 0; i < nb; i++) {
- u32 idx = fq->cached_prod++ & fq->mask;
-
- fq->ring[idx] = d[i];
+ (void)arg;
+ for (;;) {
+ sleep(opt_interval);
+ dump_stats();
}
- u_smp_wmb();
-
- *fq->producer = fq->cached_prod;
-
- return 0;
+ return NULL;
}
-static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
- u64 *d, size_t nb)
+static void remove_xdp_program(void)
{
- u32 idx, i, entries = umem_nb_avail(cq, nb);
-
- u_smp_rmb();
+ __u32 curr_prog_id = 0;
- for (i = 0; i < entries; i++) {
- idx = cq->cached_cons++ & cq->mask;
- d[i] = cq->ring[idx];
- }
-
- if (entries > 0) {
- u_smp_wmb();
-
- *cq->consumer = cq->cached_cons;
+ if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
+ printf("bpf_get_link_xdp_id failed\n");
+ exit(EXIT_FAILURE);
}
-
- return entries;
-}
-
-static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
-{
- return &xsk->umem->frames[addr];
+ if (prog_id == curr_prog_id)
+ bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given interface\n");
+ else
+ printf("program on interface changed, not removing\n");
}
-static inline int xq_enq(struct xdp_uqueue *uq,
- const struct xdp_desc *descs,
- unsigned int ndescs)
+static void int_exit(int sig)
{
- struct xdp_desc *r = uq->ring;
- unsigned int i;
-
- if (xq_nb_free(uq, ndescs) < ndescs)
- return -ENOSPC;
+ struct xsk_umem *umem = xsks[0]->umem->umem;
- for (i = 0; i < ndescs; i++) {
- u32 idx = uq->cached_prod++ & uq->mask;
-
- r[idx].addr = descs[i].addr;
- r[idx].len = descs[i].len;
- }
+ (void)sig;
- u_smp_wmb();
+ dump_stats();
+ xsk_socket__delete(xsks[0]->xsk);
+ (void)xsk_umem__delete(umem);
+ remove_xdp_program();
- *uq->producer = uq->cached_prod;
- return 0;
+ exit(EXIT_SUCCESS);
}
-static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
- unsigned int id, unsigned int ndescs)
+static void __exit_with_error(int error, const char *file, const char *func,
+ int line)
{
- struct xdp_desc *r = uq->ring;
- unsigned int i;
-
- if (xq_nb_free(uq, ndescs) < ndescs)
- return -ENOSPC;
-
- for (i = 0; i < ndescs; i++) {
- u32 idx = uq->cached_prod++ & uq->mask;
-
- r[idx].addr = (id + i) << FRAME_SHIFT;
- r[idx].len = sizeof(pkt_data) - 1;
- }
-
- u_smp_wmb();
-
- *uq->producer = uq->cached_prod;
- return 0;
+ fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
+ line, error, strerror(error));
+ dump_stats();
+ remove_xdp_program();
+ exit(EXIT_FAILURE);
}
-static inline int xq_deq(struct xdp_uqueue *uq,
- struct xdp_desc *descs,
- int ndescs)
-{
- struct xdp_desc *r = uq->ring;
- unsigned int idx;
- int i, entries;
-
- entries = xq_nb_avail(uq, ndescs);
+#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, \
+ __LINE__)
- u_smp_rmb();
-
- for (i = 0; i < entries; i++) {
- idx = uq->cached_cons++ & uq->mask;
- descs[i] = r[idx];
- }
-
- if (entries > 0) {
- u_smp_wmb();
-
- *uq->consumer = uq->cached_cons;
- }
-
- return entries;
-}
+static const char pkt_data[] =
+ "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
+ "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
+ "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
+ "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
static void swap_mac_addresses(void *data)
{
@@ -396,247 +266,74 @@ static void hex_dump(void *pkt, size_t length, u64 addr)
printf("\n");
}
-static size_t gen_eth_frame(char *frame)
+static size_t gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
{
- memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
+ memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
+ sizeof(pkt_data) - 1);
return sizeof(pkt_data) - 1;
}
-static struct xdp_umem *xdp_umem_configure(int sfd)
+static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
{
- int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
- struct xdp_mmap_offsets off;
- struct xdp_umem_reg mr;
- struct xdp_umem *umem;
- socklen_t optlen;
- void *bufs;
+ struct xsk_umem_info *umem;
+ int ret;
umem = calloc(1, sizeof(*umem));
- lassert(umem);
-
- lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
- NUM_FRAMES * FRAME_SIZE) == 0);
-
- mr.addr = (__u64)bufs;
- mr.len = NUM_FRAMES * FRAME_SIZE;
- mr.chunk_size = FRAME_SIZE;
- mr.headroom = FRAME_HEADROOM;
-
- lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
- lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
- sizeof(int)) == 0);
- lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
- sizeof(int)) == 0);
-
- optlen = sizeof(off);
- lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
- &optlen) == 0);
-
- umem->fq.map = mmap(0, off.fr.desc +
- FQ_NUM_DESCS * sizeof(u64),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, sfd,
- XDP_UMEM_PGOFF_FILL_RING);
- lassert(umem->fq.map != MAP_FAILED);
-
- umem->fq.mask = FQ_NUM_DESCS - 1;
- umem->fq.size = FQ_NUM_DESCS;
- umem->fq.producer = umem->fq.map + off.fr.producer;
- umem->fq.consumer = umem->fq.map + off.fr.consumer;
- umem->fq.ring = umem->fq.map + off.fr.desc;
- umem->fq.cached_cons = FQ_NUM_DESCS;
-
- umem->cq.map = mmap(0, off.cr.desc +
- CQ_NUM_DESCS * sizeof(u64),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, sfd,
- XDP_UMEM_PGOFF_COMPLETION_RING);
- lassert(umem->cq.map != MAP_FAILED);
-
- umem->cq.mask = CQ_NUM_DESCS - 1;
- umem->cq.size = CQ_NUM_DESCS;
- umem->cq.producer = umem->cq.map + off.cr.producer;
- umem->cq.consumer = umem->cq.map + off.cr.consumer;
- umem->cq.ring = umem->cq.map + off.cr.desc;
-
- umem->frames = bufs;
- umem->fd = sfd;
+ if (!umem)
+ exit_with_error(errno);
- if (opt_bench == BENCH_TXONLY) {
- int i;
-
- for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
- (void)gen_eth_frame(&umem->frames[i]);
- }
+ ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
+ NULL);
+ if (ret)
+ exit_with_error(-ret);
+ umem->buffer = buffer;
return umem;
}
-static struct xdpsock *xsk_configure(struct xdp_umem *umem)
+static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem)
{
- struct sockaddr_xdp sxdp = {};
- struct xdp_mmap_offsets off;
- int sfd, ndescs = NUM_DESCS;
- struct xdpsock *xsk;
- bool shared = true;
- socklen_t optlen;
- u64 i;
-
- sfd = socket(PF_XDP, SOCK_RAW, 0);
- lassert(sfd >= 0);
+ struct xsk_socket_config cfg;
+ struct xsk_socket_info *xsk;
+ int ret;
+ u32 idx;
+ int i;
xsk = calloc(1, sizeof(*xsk));
- lassert(xsk);
-
- xsk->sfd = sfd;
- xsk->outstanding_tx = 0;
-
- if (!umem) {
- shared = false;
- xsk->umem = xdp_umem_configure(sfd);
- } else {
- xsk->umem = umem;
- }
-
- lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
- &ndescs, sizeof(int)) == 0);
- lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
- &ndescs, sizeof(int)) == 0);
- optlen = sizeof(off);
- lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
- &optlen) == 0);
-
- /* Rx */
- xsk->rx.map = mmap(NULL,
- off.rx.desc +
- NUM_DESCS * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, sfd,
- XDP_PGOFF_RX_RING);
- lassert(xsk->rx.map != MAP_FAILED);
-
- if (!shared) {
- for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
- lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
- == 0);
- }
-
- /* Tx */
- xsk->tx.map = mmap(NULL,
- off.tx.desc +
- NUM_DESCS * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, sfd,
- XDP_PGOFF_TX_RING);
- lassert(xsk->tx.map != MAP_FAILED);
-
- xsk->rx.mask = NUM_DESCS - 1;
- xsk->rx.size = NUM_DESCS;
- xsk->rx.producer = xsk->rx.map + off.rx.producer;
- xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
- xsk->rx.ring = xsk->rx.map + off.rx.desc;
-
- xsk->tx.mask = NUM_DESCS - 1;
- xsk->tx.size = NUM_DESCS;
- xsk->tx.producer = xsk->tx.map + off.tx.producer;
- xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
- xsk->tx.ring = xsk->tx.map + off.tx.desc;
- xsk->tx.cached_cons = NUM_DESCS;
-
- sxdp.sxdp_family = PF_XDP;
- sxdp.sxdp_ifindex = opt_ifindex;
- sxdp.sxdp_queue_id = opt_queue;
-
- if (shared) {
- sxdp.sxdp_flags = XDP_SHARED_UMEM;
- sxdp.sxdp_shared_umem_fd = umem->fd;
- } else {
- sxdp.sxdp_flags = opt_xdp_bind_flags;
- }
-
- lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
+ if (!xsk)
+ exit_with_error(errno);
+
+ xsk->umem = umem;
+ cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg.libbpf_flags = 0;
+ cfg.xdp_flags = opt_xdp_flags;
+ cfg.bind_flags = opt_xdp_bind_flags;
+ ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
+ &xsk->rx, &xsk->tx, &cfg);
+ if (ret)
+ exit_with_error(-ret);
+
+ ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
+ if (ret)
+ exit_with_error(-ret);
+
+ ret = xsk_ring_prod__reserve(&xsk->umem->fq,
+ XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ &idx);
+ if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
+ exit_with_error(-ret);
+ for (i = 0;
+ i < XSK_RING_PROD__DEFAULT_NUM_DESCS *
+ XSK_UMEM__DEFAULT_FRAME_SIZE;
+ i += XSK_UMEM__DEFAULT_FRAME_SIZE)
+ *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) = i;
+ xsk_ring_prod__submit(&xsk->umem->fq,
+ XSK_RING_PROD__DEFAULT_NUM_DESCS);
return xsk;
}
-static void print_benchmark(bool running)
-{
- const char *bench_str = "INVALID";
-
- if (opt_bench == BENCH_RXDROP)
- bench_str = "rxdrop";
- else if (opt_bench == BENCH_TXONLY)
- bench_str = "txonly";
- else if (opt_bench == BENCH_L2FWD)
- bench_str = "l2fwd";
-
- printf("%s:%d %s ", opt_if, opt_queue, bench_str);
- if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
- printf("xdp-skb ");
- else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
- printf("xdp-drv ");
- else
- printf(" ");
-
- if (opt_poll)
- printf("poll() ");
-
- if (running) {
- printf("running...");
- fflush(stdout);
- }
-}
-
-static void dump_stats(void)
-{
- unsigned long now = get_nsecs();
- long dt = now - prev_time;
- int i;
-
- prev_time = now;
-
- for (i = 0; i < num_socks && xsks[i]; i++) {
- char *fmt = "%-15s %'-11.0f %'-11lu\n";
- double rx_pps, tx_pps;
-
- rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
- 1000000000. / dt;
- tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
- 1000000000. / dt;
-
- printf("\n sock%d@", i);
- print_benchmark(false);
- printf("\n");
-
- printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
- dt / 1000000000.);
- printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
- printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
-
- xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
- xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
- }
-}
-
-static void *poller(void *arg)
-{
- (void)arg;
- for (;;) {
- sleep(opt_interval);
- dump_stats();
- }
-
- return NULL;
-}
-
-static void int_exit(int sig)
-{
- (void)sig;
- dump_stats();
- bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
- exit(EXIT_SUCCESS);
-}
-
static struct option long_options[] = {
{"rxdrop", no_argument, 0, 'r'},
{"txonly", no_argument, 0, 't'},
@@ -644,7 +341,6 @@ static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"queue", required_argument, 0, 'q'},
{"poll", no_argument, 0, 'p'},
- {"shared-buffer", no_argument, 0, 's'},
{"xdp-skb", no_argument, 0, 'S'},
{"xdp-native", no_argument, 0, 'N'},
{"interval", required_argument, 0, 'n'},
@@ -664,7 +360,6 @@ static void usage(const char *prog)
" -i, --interface=n Run on interface n\n"
" -q, --queue=n Use queue n (default 0)\n"
" -p, --poll Use poll syscall\n"
- " -s, --shared-buffer Use shared packet buffer\n"
" -S, --xdp-skb=n Use XDP skb-mod\n"
" -N, --xdp-native=n Enfore XDP native mode\n"
" -n, --interval=n Specify statistics update interval (default 1 sec).\n"
@@ -682,7 +377,7 @@ static void parse_command_line(int argc, char **argv)
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "rtli:q:psSNn:cz", long_options,
+ c = getopt_long(argc, argv, "Frtli:q:psSNn:cz", long_options,
&option_index);
if (c == -1)
break;
@@ -703,9 +398,6 @@ static void parse_command_line(int argc, char **argv)
case 'q':
opt_queue = atoi(optarg);
break;
- case 's':
- opt_shared_packet_buffer = 1;
- break;
case 'p':
opt_poll = 1;
break;
@@ -725,6 +417,9 @@ static void parse_command_line(int argc, char **argv)
case 'c':
opt_xdp_bind_flags |= XDP_COPY;
break;
+ case 'F':
+ opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
default:
usage(basename(argv[0]));
}
@@ -736,75 +431,104 @@ static void parse_command_line(int argc, char **argv)
opt_if);
usage(basename(argv[0]));
}
+
}
-static void kick_tx(int fd)
+static void kick_tx(struct xsk_socket_info *xsk)
{
int ret;
- ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
+ ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
return;
- lassert(0);
+ exit_with_error(errno);
}
-static inline void complete_tx_l2fwd(struct xdpsock *xsk)
+static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
{
- u64 descs[BATCH_SIZE];
+ u32 idx_cq = 0, idx_fq = 0;
unsigned int rcvd;
size_t ndescs;
if (!xsk->outstanding_tx)
return;
- kick_tx(xsk->sfd);
+ kick_tx(xsk);
ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
- xsk->outstanding_tx;
+ xsk->outstanding_tx;
/* re-add completed Tx buffers */
- rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
+ rcvd = xsk_ring_cons__peek(&xsk->umem->cq, ndescs, &idx_cq);
if (rcvd > 0) {
- umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
+ unsigned int i;
+ int ret;
+
+ ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+ while (ret != rcvd) {
+ if (ret < 0)
+ exit_with_error(-ret);
+ ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd,
+ &idx_fq);
+ }
+ for (i = 0; i < rcvd; i++)
+ *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) =
+ *xsk_ring_cons__comp_addr(&xsk->umem->cq,
+ idx_cq++);
+
+ xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
+ xsk_ring_cons__release(&xsk->umem->cq, rcvd);
xsk->outstanding_tx -= rcvd;
xsk->tx_npkts += rcvd;
}
}
-static inline void complete_tx_only(struct xdpsock *xsk)
+static inline void complete_tx_only(struct xsk_socket_info *xsk)
{
- u64 descs[BATCH_SIZE];
unsigned int rcvd;
+ u32 idx;
if (!xsk->outstanding_tx)
return;
- kick_tx(xsk->sfd);
+ kick_tx(xsk);
- rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
+ rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx);
if (rcvd > 0) {
+ xsk_ring_cons__release(&xsk->umem->cq, rcvd);
xsk->outstanding_tx -= rcvd;
xsk->tx_npkts += rcvd;
}
}
-static void rx_drop(struct xdpsock *xsk)
+static void rx_drop(struct xsk_socket_info *xsk)
{
- struct xdp_desc descs[BATCH_SIZE];
unsigned int rcvd, i;
+ u32 idx_rx = 0, idx_fq = 0;
+ int ret;
- rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
+ rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
if (!rcvd)
return;
+ ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+ while (ret != rcvd) {
+ if (ret < 0)
+ exit_with_error(-ret);
+ ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
+ }
+
for (i = 0; i < rcvd; i++) {
- char *pkt = xq_get_data(xsk, descs[i].addr);
+ u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
+ u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
+ char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
- hex_dump(pkt, descs[i].len, descs[i].addr);
+ hex_dump(pkt, len, addr);
+ *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = addr;
}
+ xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
+ xsk_ring_cons__release(&xsk->rx, rcvd);
xsk->rx_npkts += rcvd;
-
- umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
}
static void rx_drop_all(void)
@@ -815,7 +539,7 @@ static void rx_drop_all(void)
memset(fds, 0, sizeof(fds));
for (i = 0; i < num_socks; i++) {
- fds[i].fd = xsks[i]->sfd;
+ fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
fds[i].events = POLLIN;
timeout = 1000; /* 1sn */
}
@@ -832,14 +556,14 @@ static void rx_drop_all(void)
}
}
-static void tx_only(struct xdpsock *xsk)
+static void tx_only(struct xsk_socket_info *xsk)
{
int timeout, ret, nfds = 1;
struct pollfd fds[nfds + 1];
- unsigned int idx = 0;
+ u32 idx, frame_nb = 0;
memset(fds, 0, sizeof(fds));
- fds[0].fd = xsk->sfd;
+ fds[0].fd = xsk_socket__fd(xsk->xsk);
fds[0].events = POLLOUT;
timeout = 1000; /* 1sn */
@@ -849,50 +573,73 @@ static void tx_only(struct xdpsock *xsk)
if (ret <= 0)
continue;
- if (fds[0].fd != xsk->sfd ||
- !(fds[0].revents & POLLOUT))
+ if (!(fds[0].revents & POLLOUT))
continue;
}
- if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
- lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
+ if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) ==
+ BATCH_SIZE) {
+ unsigned int i;
+ for (i = 0; i < BATCH_SIZE; i++) {
+ xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr
+ = (frame_nb + i) <<
+ XSK_UMEM__DEFAULT_FRAME_SHIFT;
+ xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len =
+ sizeof(pkt_data) - 1;
+ }
+
+ xsk_ring_prod__submit(&xsk->tx, BATCH_SIZE);
xsk->outstanding_tx += BATCH_SIZE;
- idx += BATCH_SIZE;
- idx %= NUM_FRAMES;
+ frame_nb += BATCH_SIZE;
+ frame_nb %= NUM_FRAMES;
}
complete_tx_only(xsk);
}
}
-static void l2fwd(struct xdpsock *xsk)
+static void l2fwd(struct xsk_socket_info *xsk)
{
for (;;) {
- struct xdp_desc descs[BATCH_SIZE];
unsigned int rcvd, i;
+ u32 idx_rx = 0, idx_tx = 0;
int ret;
for (;;) {
complete_tx_l2fwd(xsk);
- rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
+ rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE,
+ &idx_rx);
if (rcvd > 0)
break;
}
+ ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
+ while (ret != rcvd) {
+ if (ret < 0)
+ exit_with_error(-ret);
+ ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
+ }
+
for (i = 0; i < rcvd; i++) {
- char *pkt = xq_get_data(xsk, descs[i].addr);
+ u64 addr = xsk_ring_cons__rx_desc(&xsk->rx,
+ idx_rx)->addr;
+ u32 len = xsk_ring_cons__rx_desc(&xsk->rx,
+ idx_rx++)->len;
+ char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
swap_mac_addresses(pkt);
- hex_dump(pkt, descs[i].len, descs[i].addr);
+ hex_dump(pkt, len, addr);
+ xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = addr;
+ xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
}
- xsk->rx_npkts += rcvd;
+ xsk_ring_prod__submit(&xsk->tx, rcvd);
+ xsk_ring_cons__release(&xsk->rx, rcvd);
- ret = xq_enq(&xsk->tx, descs, rcvd);
- lassert(ret == 0);
+ xsk->rx_npkts += rcvd;
xsk->outstanding_tx += rcvd;
}
}
@@ -900,15 +647,10 @@ static void l2fwd(struct xdpsock *xsk)
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
- struct bpf_prog_load_attr prog_load_attr = {
- .prog_type = BPF_PROG_TYPE_XDP,
- };
- int prog_fd, qidconf_map, xsks_map;
- struct bpf_object *obj;
- char xdp_filename[256];
- struct bpf_map *map;
- int i, ret, key = 0;
+ struct xsk_umem_info *umem;
pthread_t pt;
+ void *bufs;
+ int ret;
parse_command_line(argc, argv);
@@ -918,60 +660,22 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
- snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
- prog_load_attr.file = xdp_filename;
-
- if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
- exit(EXIT_FAILURE);
- if (prog_fd < 0) {
- fprintf(stderr, "ERROR: no program found: %s\n",
- strerror(prog_fd));
- exit(EXIT_FAILURE);
- }
-
- map = bpf_object__find_map_by_name(obj, "qidconf_map");
- qidconf_map = bpf_map__fd(map);
- if (qidconf_map < 0) {
- fprintf(stderr, "ERROR: no qidconf map found: %s\n",
- strerror(qidconf_map));
- exit(EXIT_FAILURE);
- }
+ ret = posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
+ NUM_FRAMES * XSK_UMEM__DEFAULT_FRAME_SIZE);
+ if (ret)
+ exit_with_error(ret);
- map = bpf_object__find_map_by_name(obj, "xsks_map");
- xsks_map = bpf_map__fd(map);
- if (xsks_map < 0) {
- fprintf(stderr, "ERROR: no xsks map found: %s\n",
- strerror(xsks_map));
- exit(EXIT_FAILURE);
- }
+ /* Create sockets... */
+ umem = xsk_configure_umem(bufs,
+ NUM_FRAMES * XSK_UMEM__DEFAULT_FRAME_SIZE);
+ xsks[num_socks++] = xsk_configure_socket(umem);
- if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
- fprintf(stderr, "ERROR: link set xdp fd failed\n");
- exit(EXIT_FAILURE);
- }
-
- ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0);
- if (ret) {
- fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
- exit(EXIT_FAILURE);
- }
-
- /* Create sockets... */
- xsks[num_socks++] = xsk_configure(NULL);
-
-#if RR_LB
- for (i = 0; i < MAX_SOCKS - 1; i++)
- xsks[num_socks++] = xsk_configure(xsks[0]->umem);
-#endif
+ if (opt_bench == BENCH_TXONLY) {
+ int i;
- /* ...and insert them into the map. */
- for (i = 0; i < num_socks; i++) {
- key = i;
- ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0);
- if (ret) {
- fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
- exit(EXIT_FAILURE);
- }
+ for (i = 0; i < NUM_FRAMES * XSK_UMEM__DEFAULT_FRAME_SIZE;
+ i += XSK_UMEM__DEFAULT_FRAME_SIZE)
+ (void)gen_eth_frame(umem, i);
}
signal(SIGINT, int_exit);
@@ -981,7 +685,8 @@ int main(int argc, char **argv)
setlocale(LC_ALL, "");
ret = pthread_create(&pt, NULL, poller, NULL);
- lassert(ret == 0);
+ if (ret)
+ exit_with_error(ret);
prev_time = get_nsecs();
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 33e67bd1dc34..32234481ad7d 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
me->verbose = verbose;
- me->fd = open("/dev/mei", O_RDWR);
+ me->fd = open("/dev/mei0", O_RDWR);
if (me->fd == -1) {
mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
goto err;
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index 4920903c8009..fb43a814d4c0 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -34,6 +34,7 @@ HOSTCFLAGS_bpf-direct.o += $(MFLAG)
HOSTCFLAGS_dropper.o += $(MFLAG)
HOSTCFLAGS_bpf-helper.o += $(MFLAG)
HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
+HOSTCFLAGS_user-trap.o += $(MFLAG)
HOSTLDLIBS_bpf-direct += $(MFLAG)
HOSTLDLIBS_bpf-fancy += $(MFLAG)
HOSTLDLIBS_dropper += $(MFLAG)
diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c
index ca7960adf5a3..b038aa9f5a70 100644
--- a/samples/vfio-mdev/mbochs.c
+++ b/samples/vfio-mdev/mbochs.c
@@ -1448,13 +1448,13 @@ static int __init mbochs_dev_init(void)
{
int ret = 0;
- ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK, MBOCHS_NAME);
+ ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK + 1, MBOCHS_NAME);
if (ret < 0) {
pr_err("Error: failed to register mbochs_dev, err: %d\n", ret);
return ret;
}
cdev_init(&mbochs_cdev, &vd_fops);
- cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK);
+ cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK + 1);
pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt));
mbochs_class = class_create(THIS_MODULE, MBOCHS_CLASS_NAME);
@@ -1483,7 +1483,7 @@ failed2:
class_destroy(mbochs_class);
failed1:
cdev_del(&mbochs_cdev);
- unregister_chrdev_region(mbochs_devt, MINORMASK);
+ unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
return ret;
}
@@ -1494,7 +1494,7 @@ static void __exit mbochs_dev_exit(void)
device_unregister(&mbochs_dev);
cdev_del(&mbochs_cdev);
- unregister_chrdev_region(mbochs_devt, MINORMASK);
+ unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
class_destroy(mbochs_class);
mbochs_class = NULL;
}
diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c
index 96e7969c473a..cc86bf6566e4 100644
--- a/samples/vfio-mdev/mdpy.c
+++ b/samples/vfio-mdev/mdpy.c
@@ -752,13 +752,13 @@ static int __init mdpy_dev_init(void)
{
int ret = 0;
- ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK, MDPY_NAME);
+ ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
if (ret < 0) {
pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
return ret;
}
cdev_init(&mdpy_cdev, &vd_fops);
- cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK);
+ cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
@@ -787,7 +787,7 @@ failed2:
class_destroy(mdpy_class);
failed1:
cdev_del(&mdpy_cdev);
- unregister_chrdev_region(mdpy_devt, MINORMASK);
+ unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
return ret;
}
@@ -798,7 +798,7 @@ static void __exit mdpy_dev_exit(void)
device_unregister(&mdpy_dev);
cdev_del(&mdpy_cdev);
- unregister_chrdev_region(mdpy_devt, MINORMASK);
+ unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
class_destroy(mdpy_class);
mdpy_class = NULL;
}
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index f6732aa16bb1..1c77c370c92f 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -156,15 +156,15 @@ static const struct file_operations vd_fops = {
/* function prototypes */
-static int mtty_trigger_interrupt(uuid_le uuid);
+static int mtty_trigger_interrupt(const guid_t *uuid);
/* Helper functions */
-static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
+static struct mdev_state *find_mdev_state_by_uuid(const guid_t *uuid)
{
struct mdev_state *mds;
list_for_each_entry(mds, &mdev_devices_list, next) {
- if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
+ if (guid_equal(mdev_uuid(mds->mdev), uuid))
return mds;
}
@@ -1032,7 +1032,7 @@ static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
return ret;
}
-static int mtty_trigger_interrupt(uuid_le uuid)
+static int mtty_trigger_interrupt(const guid_t *uuid)
{
int ret = -1;
struct mdev_state *mdev_state;
@@ -1442,7 +1442,8 @@ static int __init mtty_dev_init(void)
idr_init(&mtty_dev.vd_idr);
- ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
+ ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
+ MTTY_NAME);
if (ret < 0) {
pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
@@ -1450,7 +1451,7 @@ static int __init mtty_dev_init(void)
}
cdev_init(&mtty_dev.vd_cdev, &vd_fops);
- cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
+ cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
@@ -1487,7 +1488,7 @@ failed2:
failed1:
cdev_del(&mtty_dev.vd_cdev);
- unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
+ unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
all_done:
return ret;
@@ -1501,7 +1502,7 @@ static void __exit mtty_dev_exit(void)
device_unregister(&mtty_dev.dev);
idr_destroy(&mtty_dev.vd_idr);
cdev_del(&mtty_dev.vd_cdev);
- unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
+ unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
class_destroy(mtty_dev.vd_class);
mtty_dev.vd_class = NULL;
pr_info("mtty_dev: Unloaded!\n");
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 525bff667a52..30816037036e 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d)
basetarget = $(basename $(notdir $@))
###
-# filename of first prerequisite with directory and extension stripped
-baseprereq = $(basename $(notdir $<))
-
-###
# Escape single quote for use in echo statements
escsq = $(subst $(squote),'\$(squote)',$1)
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 25c259df8ffa..6410bd22fe38 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -26,15 +26,10 @@ else
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-stack=1) \
- $(call cc-param,asan-use-after-scope=1) \
+ $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
$(call cc-param,asan-instrument-allocas=1)
endif
-ifdef CONFIG_KASAN_EXTRA
-CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope)
-endif
-
endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
diff --git a/scripts/atomic/atomic-tbl.sh b/scripts/atomic/atomic-tbl.sh
new file mode 100755
index 000000000000..81d5c32039dd
--- /dev/null
+++ b/scripts/atomic/atomic-tbl.sh
@@ -0,0 +1,186 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# helpers for dealing with atomics.tbl
+
+#meta_in(meta, match)
+meta_in()
+{
+ case "$1" in
+ [$2]) return 0;;
+ esac
+
+ return 1
+}
+
+#meta_has_ret(meta)
+meta_has_ret()
+{
+ meta_in "$1" "bBiIfFlR"
+}
+
+#meta_has_acquire(meta)
+meta_has_acquire()
+{
+ meta_in "$1" "BFIlR"
+}
+
+#meta_has_release(meta)
+meta_has_release()
+{
+ meta_in "$1" "BFIRs"
+}
+
+#meta_has_relaxed(meta)
+meta_has_relaxed()
+{
+ meta_in "$1" "BFIR"
+}
+
+#find_fallback_template(pfx, name, sfx, order)
+find_fallback_template()
+{
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local order="$1"; shift
+
+ local base=""
+ local file=""
+
+ # We may have fallbacks for a specific case (e.g. read_acquire()), or
+ # an entire class, e.g. *inc*().
+ #
+ # Start at the most specific, and fall back to the most general. Once
+ # we find a specific fallback, don't bother looking for more.
+ for base in "${pfx}${name}${sfx}${order}" "${name}"; do
+ file="${ATOMICDIR}/fallbacks/${base}"
+
+ if [ -f "${file}" ]; then
+ printf "${file}"
+ break
+ fi
+ done
+}
+
+#gen_ret_type(meta, int)
+gen_ret_type() {
+ local meta="$1"; shift
+ local int="$1"; shift
+
+ case "${meta}" in
+ [sv]) printf "void";;
+ [bB]) printf "bool";;
+ [aiIfFlR]) printf "${int}";;
+ esac
+}
+
+#gen_ret_stmt(meta)
+gen_ret_stmt()
+{
+ if meta_has_ret "${meta}"; then
+ printf "return ";
+ fi
+}
+
+# gen_param_name(arg)
+gen_param_name()
+{
+ # strip off the leading 'c' for 'cv'
+ local name="${1#c}"
+ printf "${name#*:}"
+}
+
+# gen_param_type(arg, int, atomic)
+gen_param_type()
+{
+ local type="${1%%:*}"; shift
+ local int="$1"; shift
+ local atomic="$1"; shift
+
+ case "${type}" in
+ i) type="${int} ";;
+ p) type="${int} *";;
+ v) type="${atomic}_t *";;
+ cv) type="const ${atomic}_t *";;
+ esac
+
+ printf "${type}"
+}
+
+#gen_param(arg, int, atomic)
+gen_param()
+{
+ local arg="$1"; shift
+ local int="$1"; shift
+ local atomic="$1"; shift
+ local name="$(gen_param_name "${arg}")"
+ local type="$(gen_param_type "${arg}" "${int}" "${atomic}")"
+
+ printf "${type}${name}"
+}
+
+#gen_params(int, atomic, arg...)
+gen_params()
+{
+ local int="$1"; shift
+ local atomic="$1"; shift
+
+ while [ "$#" -gt 0 ]; do
+ gen_param "$1" "${int}" "${atomic}"
+ [ "$#" -gt 1 ] && printf ", "
+ shift;
+ done
+}
+
+#gen_args(arg...)
+gen_args()
+{
+ while [ "$#" -gt 0 ]; do
+ printf "$(gen_param_name "$1")"
+ [ "$#" -gt 1 ] && printf ", "
+ shift;
+ done
+}
+
+#gen_proto_order_variants(meta, pfx, name, sfx, ...)
+gen_proto_order_variants()
+{
+ local meta="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+ if meta_has_acquire "${meta}"; then
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+ fi
+ if meta_has_release "${meta}"; then
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+ fi
+ if meta_has_relaxed "${meta}"; then
+ gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
+ fi
+}
+
+#gen_proto_variants(meta, name, ...)
+gen_proto_variants()
+{
+ local meta="$1"; shift
+ local name="$1"; shift
+ local pfx=""
+ local sfx=""
+
+ meta_in "${meta}" "fF" && pfx="fetch_"
+ meta_in "${meta}" "R" && sfx="_return"
+
+ gen_proto_order_variants "${meta}" "${pfx}" "${name}" "${sfx}" "$@"
+}
+
+#gen_proto(meta, ...)
+gen_proto() {
+ local meta="$1"; shift
+ for m in $(echo "${meta}" | grep -o .); do
+ gen_proto_variants "${m}" "$@"
+ done
+}
diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl
new file mode 100755
index 000000000000..fbee2f6190d9
--- /dev/null
+++ b/scripts/atomic/atomics.tbl
@@ -0,0 +1,41 @@
+# name meta args...
+#
+# Where meta contains a string of variants to generate.
+# Upper-case implies _{acquire,release,relaxed} variants.
+# Valid meta values are:
+# * B/b - bool: returns bool
+# * v - void: returns void
+# * I/i - int: returns base type
+# * R - return: returns base type (has _return variants)
+# * F/f - fetch: returns base type (has fetch_ variants)
+# * l - load: returns base type (has _acquire order variant)
+# * s - store: returns void (has _release order variant)
+#
+# Where args contains list of type[:name], where type is:
+# * cv - const pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
+# * v - pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
+# * i - base type (int/s64/long)
+# * p - pointer to base type (int/s64/long)
+#
+read l cv
+set s v i
+add vRF i v
+sub vRF i v
+inc vRF v
+dec vRF v
+and vF i v
+andnot vF i v
+or vF i v
+xor vF i v
+xchg I v i
+cmpxchg I v i:old i:new
+try_cmpxchg B v p:old i:new
+sub_and_test b i v
+dec_and_test b v
+inc_and_test b v
+add_negative b i v
+add_unless fb v i:a i:u
+inc_not_zero b v
+inc_unless_negative b v
+dec_unless_positive b v
+dec_if_positive i v
diff --git a/scripts/atomic/check-atomics.sh b/scripts/atomic/check-atomics.sh
new file mode 100755
index 000000000000..cfa0c2f71c84
--- /dev/null
+++ b/scripts/atomic/check-atomics.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Check if atomic headers are up-to-date
+
+ATOMICDIR=$(dirname $0)
+ATOMICTBL=${ATOMICDIR}/atomics.tbl
+LINUXDIR=${ATOMICDIR}/../..
+
+echo '' | sha1sum - > /dev/null 2>&1
+if [ $? -ne 0 ]; then
+ printf "sha1sum not available, skipping atomic header checks.\n"
+ exit 0
+fi
+
+cat <<EOF |
+asm-generic/atomic-instrumented.h
+asm-generic/atomic-long.h
+linux/atomic-fallback.h
+EOF
+while read header; do
+ OLDSUM="$(tail -n 1 ${LINUXDIR}/include/${header})"
+ OLDSUM="${OLDSUM#// }"
+
+ NEWSUM="$(head -n -1 ${LINUXDIR}/include/${header} | sha1sum)"
+ NEWSUM="${NEWSUM%% *}"
+
+ if [ "${OLDSUM}" != "${NEWSUM}" ]; then
+ printf "warning: generated include/${header} has been modified.\n"
+ fi
+done
+
+exit 0
diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire
new file mode 100755
index 000000000000..e38871e64db6
--- /dev/null
+++ b/scripts/atomic/fallbacks/acquire
@@ -0,0 +1,9 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}${name}${sfx}_acquire(${params})
+{
+ ${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+ __atomic_acquire_fence();
+ return ret;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative
new file mode 100755
index 000000000000..e6f4815637de
--- /dev/null
+++ b/scripts/atomic/fallbacks/add_negative
@@ -0,0 +1,16 @@
+cat <<EOF
+/**
+ * ${atomic}_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline bool
+${atomic}_add_negative(${int} i, ${atomic}_t *v)
+{
+ return ${atomic}_add_return(i, v) < 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless
new file mode 100755
index 000000000000..792533885fbf
--- /dev/null
+++ b/scripts/atomic/fallbacks/add_unless
@@ -0,0 +1,16 @@
+cat << EOF
+/**
+ * ${atomic}_add_unless - add unless the number is already a given value
+ * @v: pointer of type ${atomic}_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool
+${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+{
+ return ${atomic}_fetch_add_unless(v, a, u) != u;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/andnot b/scripts/atomic/fallbacks/andnot
new file mode 100755
index 000000000000..9f3a3216b5e3
--- /dev/null
+++ b/scripts/atomic/fallbacks/andnot
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
+{
+ ${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec b/scripts/atomic/fallbacks/dec
new file mode 100755
index 000000000000..10bbc82be31d
--- /dev/null
+++ b/scripts/atomic/fallbacks/dec
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
+{
+ ${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec_and_test b/scripts/atomic/fallbacks/dec_and_test
new file mode 100755
index 000000000000..0ce7103b3df2
--- /dev/null
+++ b/scripts/atomic/fallbacks/dec_and_test
@@ -0,0 +1,15 @@
+cat <<EOF
+/**
+ * ${atomic}_dec_and_test - decrement and test
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline bool
+${atomic}_dec_and_test(${atomic}_t *v)
+{
+ return ${atomic}_dec_return(v) == 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive
new file mode 100755
index 000000000000..c52eacec43c8
--- /dev/null
+++ b/scripts/atomic/fallbacks/dec_if_positive
@@ -0,0 +1,15 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_dec_if_positive(${atomic}_t *v)
+{
+ ${int} dec, c = ${atomic}_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!${atomic}_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive
new file mode 100755
index 000000000000..8a2578f14268
--- /dev/null
+++ b/scripts/atomic/fallbacks/dec_unless_positive
@@ -0,0 +1,14 @@
+cat <<EOF
+static inline bool
+${atomic}_dec_unless_positive(${atomic}_t *v)
+{
+ ${int} c = ${atomic}_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!${atomic}_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence
new file mode 100755
index 000000000000..82f68fa6931a
--- /dev/null
+++ b/scripts/atomic/fallbacks/fence
@@ -0,0 +1,11 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}${name}${sfx}(${params})
+{
+ ${ret} ret;
+ __atomic_pre_full_fence();
+ ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+ __atomic_post_full_fence();
+ return ret;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless
new file mode 100755
index 000000000000..d2c091db7eae
--- /dev/null
+++ b/scripts/atomic/fallbacks/fetch_add_unless
@@ -0,0 +1,23 @@
+cat << EOF
+/**
+ * ${atomic}_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type ${atomic}_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static inline ${int}
+${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+{
+ ${int} c = ${atomic}_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!${atomic}_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc b/scripts/atomic/fallbacks/inc
new file mode 100755
index 000000000000..f866b3ad2353
--- /dev/null
+++ b/scripts/atomic/fallbacks/inc
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
+{
+ ${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc_and_test b/scripts/atomic/fallbacks/inc_and_test
new file mode 100755
index 000000000000..4e2068869f7e
--- /dev/null
+++ b/scripts/atomic/fallbacks/inc_and_test
@@ -0,0 +1,15 @@
+cat <<EOF
+/**
+ * ${atomic}_inc_and_test - increment and test
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+${atomic}_inc_and_test(${atomic}_t *v)
+{
+ return ${atomic}_inc_return(v) == 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc_not_zero b/scripts/atomic/fallbacks/inc_not_zero
new file mode 100755
index 000000000000..a7c45c8d107c
--- /dev/null
+++ b/scripts/atomic/fallbacks/inc_not_zero
@@ -0,0 +1,14 @@
+cat <<EOF
+/**
+ * ${atomic}_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static inline bool
+${atomic}_inc_not_zero(${atomic}_t *v)
+{
+ return ${atomic}_add_unless(v, 1, 0);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative
new file mode 100755
index 000000000000..0c266e71dbd4
--- /dev/null
+++ b/scripts/atomic/fallbacks/inc_unless_negative
@@ -0,0 +1,14 @@
+cat <<EOF
+static inline bool
+${atomic}_inc_unless_negative(${atomic}_t *v)
+{
+ ${int} c = ${atomic}_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!${atomic}_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire
new file mode 100755
index 000000000000..75863b5203f7
--- /dev/null
+++ b/scripts/atomic/fallbacks/read_acquire
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_read_acquire(const ${atomic}_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release
new file mode 100755
index 000000000000..3f628a3802d9
--- /dev/null
+++ b/scripts/atomic/fallbacks/release
@@ -0,0 +1,8 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}${name}${sfx}_release(${params})
+{
+ __atomic_release_fence();
+ ${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+}
+EOF
diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release
new file mode 100755
index 000000000000..45bb5e0cfc08
--- /dev/null
+++ b/scripts/atomic/fallbacks/set_release
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline void
+${atomic}_set_release(${atomic}_t *v, ${int} i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test
new file mode 100755
index 000000000000..289ef17a2d7a
--- /dev/null
+++ b/scripts/atomic/fallbacks/sub_and_test
@@ -0,0 +1,16 @@
+cat <<EOF
+/**
+ * ${atomic}_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
+{
+ return ${atomic}_sub_return(i, v) == 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/try_cmpxchg b/scripts/atomic/fallbacks/try_cmpxchg
new file mode 100755
index 000000000000..4ed85e2f5378
--- /dev/null
+++ b/scripts/atomic/fallbacks/try_cmpxchg
@@ -0,0 +1,11 @@
+cat <<EOF
+static inline bool
+${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
+{
+ ${int} r, o = *old;
+ r = ${atomic}_cmpxchg${order}(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+EOF
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
new file mode 100755
index 000000000000..1bd7c1707633
--- /dev/null
+++ b/scripts/atomic/gen-atomic-fallback.sh
@@ -0,0 +1,181 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+ATOMICDIR=$(dirname $0)
+
+. ${ATOMICDIR}/atomic-tbl.sh
+
+#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
+gen_template_fallback()
+{
+ local template="$1"; shift
+ local meta="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local order="$1"; shift
+ local atomic="$1"; shift
+ local int="$1"; shift
+
+ local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
+
+ local ret="$(gen_ret_type "${meta}" "${int}")"
+ local retstmt="$(gen_ret_stmt "${meta}")"
+ local params="$(gen_params "${int}" "${atomic}" "$@")"
+ local args="$(gen_args "$@")"
+
+ if [ ! -z "${template}" ]; then
+ printf "#ifndef ${atomicname}\n"
+ . ${template}
+ printf "#define ${atomicname} ${atomicname}\n"
+ printf "#endif\n\n"
+ fi
+}
+
+#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
+gen_proto_fallback()
+{
+ local meta="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local order="$1"; shift
+
+ local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
+ gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
+}
+
+#gen_basic_fallbacks(basename)
+gen_basic_fallbacks()
+{
+ local basename="$1"; shift
+cat << EOF
+#define ${basename}_acquire ${basename}
+#define ${basename}_release ${basename}
+#define ${basename}_relaxed ${basename}
+EOF
+}
+
+#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
+gen_proto_order_variants()
+{
+ local meta="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local atomic="$1"
+
+ local basename="${atomic}_${pfx}${name}${sfx}"
+
+ local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
+
+ # If we don't have relaxed atomics, then we don't bother with ordering fallbacks
+ # read_acquire and set_release need to be templated, though
+ if ! meta_has_relaxed "${meta}"; then
+ gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+ if meta_has_acquire "${meta}"; then
+ gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+ fi
+
+ if meta_has_release "${meta}"; then
+ gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+ fi
+
+ return
+ fi
+
+ printf "#ifndef ${basename}_relaxed\n"
+
+ if [ ! -z "${template}" ]; then
+ printf "#ifdef ${basename}\n"
+ fi
+
+ gen_basic_fallbacks "${basename}"
+
+ if [ ! -z "${template}" ]; then
+ printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n"
+ gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+ gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+ gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+ gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
+ fi
+
+ printf "#else /* ${basename}_relaxed */\n\n"
+
+ gen_template_fallback "${ATOMICDIR}/fallbacks/acquire" "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+ gen_template_fallback "${ATOMICDIR}/fallbacks/release" "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+ gen_template_fallback "${ATOMICDIR}/fallbacks/fence" "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+ printf "#endif /* ${basename}_relaxed */\n\n"
+}
+
+gen_xchg_fallbacks()
+{
+ local xchg="$1"; shift
+cat <<EOF
+#ifndef ${xchg}_relaxed
+#define ${xchg}_relaxed ${xchg}
+#define ${xchg}_acquire ${xchg}
+#define ${xchg}_release ${xchg}
+#else /* ${xchg}_relaxed */
+
+#ifndef ${xchg}_acquire
+#define ${xchg}_acquire(...) \\
+ __atomic_op_acquire(${xchg}, __VA_ARGS__)
+#endif
+
+#ifndef ${xchg}_release
+#define ${xchg}_release(...) \\
+ __atomic_op_release(${xchg}, __VA_ARGS__)
+#endif
+
+#ifndef ${xchg}
+#define ${xchg}(...) \\
+ __atomic_op_fence(${xchg}, __VA_ARGS__)
+#endif
+
+#endif /* ${xchg}_relaxed */
+
+EOF
+}
+
+cat << EOF
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by $0
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+EOF
+
+for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
+ gen_xchg_fallbacks "${xchg}"
+done
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+done
+
+cat <<EOF
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+done
+
+cat <<EOF
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+EOF
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
new file mode 100755
index 000000000000..e09812372b17
--- /dev/null
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -0,0 +1,182 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+ATOMICDIR=$(dirname $0)
+
+. ${ATOMICDIR}/atomic-tbl.sh
+
+#gen_param_check(arg)
+gen_param_check()
+{
+ local arg="$1"; shift
+ local type="${arg%%:*}"
+ local name="$(gen_param_name "${arg}")"
+ local rw="write"
+
+ case "${type#c}" in
+ i) return;;
+ esac
+
+ # We don't write to constant parameters
+ [ ${type#c} != ${type} ] && rw="read"
+
+ printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n"
+}
+
+#gen_param_check(arg...)
+gen_params_checks()
+{
+ while [ "$#" -gt 0 ]; do
+ gen_param_check "$1"
+ shift;
+ done
+}
+
+# gen_guard(meta, atomic, pfx, name, sfx, order)
+gen_guard()
+{
+ local meta="$1"; shift
+ local atomic="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local order="$1"; shift
+
+ local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
+
+ local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
+
+ # We definitely need a preprocessor symbol for this atomic if it is an
+ # ordering variant, or if there's a generic fallback.
+ if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then
+ printf "defined(${atomicname})"
+ return
+ fi
+
+ # If this is a base variant, but a relaxed variant *may* exist, then we
+ # only have a preprocessor symbol if the relaxed variant isn't defined
+ if meta_has_relaxed "${meta}"; then
+ printf "!defined(${atomicname}_relaxed) || defined(${atomicname})"
+ fi
+}
+
+#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
+gen_proto_order_variant()
+{
+ local meta="$1"; shift
+ local pfx="$1"; shift
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local order="$1"; shift
+ local atomic="$1"; shift
+ local int="$1"; shift
+
+ local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
+
+ local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")"
+
+ local ret="$(gen_ret_type "${meta}" "${int}")"
+ local params="$(gen_params "${int}" "${atomic}" "$@")"
+ local checks="$(gen_params_checks "$@")"
+ local args="$(gen_args "$@")"
+ local retstmt="$(gen_ret_stmt "${meta}")"
+
+ [ ! -z "${guard}" ] && printf "#if ${guard}\n"
+
+cat <<EOF
+static inline ${ret}
+${atomicname}(${params})
+{
+${checks}
+ ${retstmt}arch_${atomicname}(${args});
+}
+#define ${atomicname} ${atomicname}
+EOF
+
+ [ ! -z "${guard}" ] && printf "#endif\n"
+
+ printf "\n"
+}
+
+gen_xchg()
+{
+ local xchg="$1"; shift
+ local mult="$1"; shift
+
+cat <<EOF
+#define ${xchg}(ptr, ...) \\
+({ \\
+ typeof(ptr) __ai_ptr = (ptr); \\
+ kasan_check_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
+ arch_${xchg}(__ai_ptr, __VA_ARGS__); \\
+})
+EOF
+}
+
+gen_optional_xchg()
+{
+ local name="$1"; shift
+ local sfx="$1"; shift
+ local guard="defined(arch_${name}${sfx})"
+
+ [ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})"
+
+ printf "#if ${guard}\n"
+ gen_xchg "${name}${sfx}" ""
+ printf "#endif\n\n"
+}
+
+cat << EOF
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by $0
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/kasan-checks.h>
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+done
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+done
+
+for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
+ for order in "" "_acquire" "_release" "_relaxed"; do
+ gen_optional_xchg "${xchg}" "${order}"
+ done
+done
+
+for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do
+ gen_xchg "${xchg}" ""
+ printf "\n"
+done
+
+gen_xchg "cmpxchg_double" "2 * "
+
+printf "\n\n"
+
+gen_xchg "cmpxchg_double_local" "2 * "
+
+cat <<EOF
+
+#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
+EOF
diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh
new file mode 100755
index 000000000000..c240a7231b2e
--- /dev/null
+++ b/scripts/atomic/gen-atomic-long.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+ATOMICDIR=$(dirname $0)
+
+. ${ATOMICDIR}/atomic-tbl.sh
+
+#gen_cast(arg, int, atomic)
+gen_cast()
+{
+ local arg="$1"; shift
+ local int="$1"; shift
+ local atomic="$1"; shift
+
+ [ "${arg%%:*}" = "p" ] || return
+
+ printf "($(gen_param_type "${arg}" "${int}" "${atomic}"))"
+}
+
+#gen_args_cast(int, atomic, arg...)
+gen_args_cast()
+{
+ local int="$1"; shift
+ local atomic="$1"; shift
+
+ while [ "$#" -gt 0 ]; do
+ local cast="$(gen_cast "$1" "${int}" "${atomic}")"
+ local arg="$(gen_param_name "$1")"
+ printf "${cast}${arg}"
+ [ "$#" -gt 1 ] && printf ", "
+ shift;
+ done
+}
+
+#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
+gen_proto_order_variant()
+{
+ local meta="$1"; shift
+ local name="$1$2$3$4"; shift; shift; shift; shift
+ local atomic="$1"; shift
+ local int="$1"; shift
+
+ local ret="$(gen_ret_type "${meta}" "long")"
+ local params="$(gen_params "long" "atomic_long" "$@")"
+ local argscast="$(gen_args_cast "${int}" "${atomic}" "$@")"
+ local retstmt="$(gen_ret_stmt "${meta}")"
+
+cat <<EOF
+static inline ${ret}
+atomic_long_${name}(${params})
+{
+ ${retstmt}${atomic}_${name}(${argscast});
+}
+
+EOF
+}
+
+cat << EOF
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by $0
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _ASM_GENERIC_ATOMIC_LONG_H
+#define _ASM_GENERIC_ATOMIC_LONG_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
+#endif
+
+#ifdef CONFIG_64BIT
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+done
+
+cat <<EOF
+#else /* CONFIG_64BIT */
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+ gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+done
+
+cat <<EOF
+#endif /* CONFIG_64BIT */
+#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+EOF
diff --git a/scripts/atomic/gen-atomics.sh b/scripts/atomic/gen-atomics.sh
new file mode 100644
index 000000000000..27400b0cd732
--- /dev/null
+++ b/scripts/atomic/gen-atomics.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate atomic headers
+
+ATOMICDIR=$(dirname $0)
+ATOMICTBL=${ATOMICDIR}/atomics.tbl
+LINUXDIR=${ATOMICDIR}/../..
+
+cat <<EOF |
+gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
+gen-atomic-long.sh asm-generic/atomic-long.h
+gen-atomic-fallback.sh linux/atomic-fallback.h
+EOF
+while read script header; do
+ ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
+ HASH="$(sha1sum ${LINUXDIR}/include/${header})"
+ HASH="${HASH%% *}"
+ printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}
+done
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index cf931003395f..a18b47695f55 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -30,12 +30,14 @@ cat << EOF
#define __IGNORE_readlink /* readlinkat */
#define __IGNORE_symlink /* symlinkat */
#define __IGNORE_utimes /* futimesat */
-#if BITS_PER_LONG == 64
#define __IGNORE_stat /* fstatat */
#define __IGNORE_lstat /* fstatat */
-#else
#define __IGNORE_stat64 /* fstatat64 */
#define __IGNORE_lstat64 /* fstatat64 */
+
+#ifndef __ARCH_WANT_SET_GET_RLIMIT
+#define __IGNORE_getrlimit /* getrlimit */
+#define __IGNORE_setrlimit /* setrlimit */
#endif
/* Missing flags argument */
@@ -84,6 +86,26 @@ cat << EOF
#define __IGNORE_statfs64
#define __IGNORE_llseek
#define __IGNORE_mmap2
+#define __IGNORE_clock_gettime64
+#define __IGNORE_clock_settime64
+#define __IGNORE_clock_adjtime64
+#define __IGNORE_clock_getres_time64
+#define __IGNORE_clock_nanosleep_time64
+#define __IGNORE_timer_gettime64
+#define __IGNORE_timer_settime64
+#define __IGNORE_timerfd_gettime64
+#define __IGNORE_timerfd_settime64
+#define __IGNORE_utimensat_time64
+#define __IGNORE_pselect6_time64
+#define __IGNORE_ppoll_time64
+#define __IGNORE_io_pgetevents_time64
+#define __IGNORE_recvmmsg_time64
+#define __IGNORE_mq_timedsend_time64
+#define __IGNORE_mq_timedreceive_time64
+#define __IGNORE_semtimedop_time64
+#define __IGNORE_rt_sigtimedwait_time64
+#define __IGNORE_futex_time64
+#define __IGNORE_sched_rr_get_interval_time64
#else
#define __IGNORE_sendfile
#define __IGNORE_ftruncate
@@ -98,6 +120,33 @@ cat << EOF
#define __IGNORE_statfs
#define __IGNORE_lseek
#define __IGNORE_mmap
+#define __IGNORE_clock_gettime
+#define __IGNORE_clock_settime
+#define __IGNORE_clock_adjtime
+#define __IGNORE_clock_getres
+#define __IGNORE_clock_nanosleep
+#define __IGNORE_timer_gettime
+#define __IGNORE_timer_settime
+#define __IGNORE_timerfd_gettime
+#define __IGNORE_timerfd_settime
+#define __IGNORE_utimensat
+#define __IGNORE_pselect6
+#define __IGNORE_ppoll
+#define __IGNORE_io_pgetevents
+#define __IGNORE_recvmmsg
+#define __IGNORE_mq_timedsend
+#define __IGNORE_mq_timedreceive
+#define __IGNORE_semtimedop
+#define __IGNORE_rt_sigtimedwait
+#define __IGNORE_futex
+#define __IGNORE_sched_rr_get_interval
+#define __IGNORE_gettimeofday
+#define __IGNORE_settimeofday
+#define __IGNORE_wait4
+#define __IGNORE_adjtimex
+#define __IGNORE_nanosleep
+#define __IGNORE_io_getevents
+#define __IGNORE_recvmmsg
#endif
/* i386-specific or historical system calls */
diff --git a/scripts/coccinelle/api/alloc/alloc_cast.cocci b/scripts/coccinelle/api/alloc/alloc_cast.cocci
index 408ee3879f9b..18fedf7c60ed 100644
--- a/scripts/coccinelle/api/alloc/alloc_cast.cocci
+++ b/scripts/coccinelle/api/alloc/alloc_cast.cocci
@@ -32,7 +32,7 @@ type T;
(T *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -55,7 +55,7 @@ type r1.T;
* (T *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -78,7 +78,7 @@ type r1.T;
- (T *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -95,7 +95,7 @@ position p;
(T@p *)
\(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
- dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\|
+ dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
diff --git a/scripts/coccinelle/api/alloc/zalloc-simple.cocci b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
index d819275b7fde..5cd1991c582e 100644
--- a/scripts/coccinelle/api/alloc/zalloc-simple.cocci
+++ b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
@@ -69,15 +69,6 @@ statement S;
- x = (T)vmalloc(E1);
+ x = (T)vzalloc(E1);
|
-- x = dma_alloc_coherent(E2,E1,E3,E4);
-+ x = dma_zalloc_coherent(E2,E1,E3,E4);
-|
-- x = (T *)dma_alloc_coherent(E2,E1,E3,E4);
-+ x = dma_zalloc_coherent(E2,E1,E3,E4);
-|
-- x = (T)dma_alloc_coherent(E2,E1,E3,E4);
-+ x = (T)dma_zalloc_coherent(E2,E1,E3,E4);
-|
- x = kmalloc_node(E1,E2,E3);
+ x = kzalloc_node(E1,E2,E3);
|
@@ -225,7 +216,7 @@ p << r2.p;
x << r2.x;
@@
-msg="WARNING: dma_zalloc_coherent should be used for %s, instead of dma_alloc_coherent/memset" % (x)
+msg="WARNING: dma_alloc_coherent use in %s already zeroes out memory, so memset is not needed" % (x)
coccilib.report.print_report(p[0], msg)
//-----------------------------------------------------------------
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 98a7d63a723e..bcdd45df3f51 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -37,6 +37,13 @@ parse_symbol() {
symbol=${symbol#\(}
symbol=${symbol%\)}
+ # Strip segment
+ local segment
+ if [[ $symbol == *:* ]] ; then
+ segment=${symbol%%:*}:
+ symbol=${symbol#*:}
+ fi
+
# Strip the symbol name so that we could look it up
local name=${symbol%+*}
@@ -84,7 +91,7 @@ parse_symbol() {
code=${code//$'\n'/' '}
# Replace old address with pretty line numbers
- symbol="$name ($code)"
+ symbol="$segment$name ($code)"
}
decode_code() {
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index d45f7f36b859..d9fd9988ef27 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -68,10 +68,6 @@ config GCC_PLUGIN_LATENT_ENTROPY
config GCC_PLUGIN_STRUCTLEAK
bool "Force initialization of variables containing userspace addresses"
- # Currently STRUCTLEAK inserts initialization out of live scope of
- # variables from KASAN point of view. This leads to KASAN false
- # positive reports. Prohibit this combination for now.
- depends on !KASAN_EXTRA
help
This plugin zero-initializes any structures containing a
__user attribute. This can prevent some classes of information
diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
index de70b8470971..89c47f57d1ce 100644
--- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
+++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
@@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
const char *sym;
rtx body;
- rtx masked_sp;
+ rtx mask, masked_sp;
/*
* Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
@@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
* produces the address of the copy of the stack canary value
* stored in struct thread_info
*/
+ mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
masked_sp = gen_reg_rtx(Pmode);
emit_insn_before(gen_rtx_SET(masked_sp,
gen_rtx_AND(Pmode,
stack_pointer_rtx,
- GEN_INT(sp_mask))),
+ mask)),
insn);
SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
@@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
#define NO_GATE
#include "gcc-generate-rtl-pass.h"
+#if BUILDING_GCC_VERSION >= 9000
+static bool no(void)
+{
+ return false;
+}
+
+static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data)
+{
+ targetm.have_stack_protect_combined_set = no;
+ targetm.have_stack_protect_combined_test = no;
+}
+#endif
+
__visible int plugin_init(struct plugin_name_args *plugin_info,
struct plugin_gcc_version *version)
{
@@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,
NULL, &arm_pertask_ssp_rtl_pass_info);
+#if BUILDING_GCC_VERSION >= 9000
+ register_callback(plugin_info->base_name, PLUGIN_START_UNIT,
+ arm_pertask_ssp_start_unit, NULL);
+#endif
+
return 0;
}
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 77cebad0474e..f75e7bda4889 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s)
fprintf(stderr, "Read error or end of file.\n");
return -1;
}
- if (strlen(sym) > KSYM_NAME_LEN) {
- fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n"
+ if (strlen(sym) >= KSYM_NAME_LEN) {
+ fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
"Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
sym, strlen(sym), KSYM_NAME_LEN);
return -1;
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index c05ab001b54c..181973509a05 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $<
$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE
$(call filechk,conf_cfg)
-clean-files += conf-cfg
+clean-files += *conf-cfg
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 293004499b4d..160718383a71 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -225,5 +225,8 @@ int main(void)
DEVID_FIELD(typec_device_id, svid);
DEVID_FIELD(typec_device_id, mode);
+ DEVID(tee_client_device_id);
+ DEVID_FIELD(tee_client_device_id, uuid);
+
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index a37af7d71973..d0e41723627f 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -37,6 +37,9 @@ typedef unsigned char __u8;
typedef struct {
__u8 b[16];
} uuid_le;
+typedef struct {
+ __u8 b[16];
+} uuid_t;
/* Big exception to the "don't include kernel headers into userspace, which
* even potentially has different endianness and word sizes, since
@@ -1287,6 +1290,21 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/* Looks like: tee:uuid */
+static int do_tee_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD(symval, tee_client_device_id, uuid);
+
+ sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+ uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
+ uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
+ uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
+ uuid.b[15]);
+
+ add_wildcard(alias);
+ return 1;
+}
+
/* Does namelen bytes of name exactly match the symbol? */
static bool sym_is(const char *name, unsigned namelen, const char *symbol)
{
@@ -1357,6 +1375,7 @@ static const struct devtable devtable[] = {
{"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
{"tbsvc", SIZE_tb_service_id, do_tbsvc_entry},
{"typec", SIZE_typec_device_id, do_typec_entry},
+ {"tee", SIZE_tee_client_device_id, do_tee_entry},
};
/* Create MODULE_ALIAS() statements.
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0de2fb236640..26bf886bd168 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2185,7 +2185,7 @@ static void add_intree_flag(struct buffer *b, int is_intree)
/* Cannot check for assembler */
static void add_retpoline(struct buffer *b)
{
- buf_printf(b, "\n#ifdef RETPOLINE\n");
+ buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
buf_printf(b, "#endif\n");
}
diff --git a/scripts/ver_linux b/scripts/ver_linux
index a6c728db05ce..810e608baa24 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -13,6 +13,8 @@ BEGIN {
system("uname -a")
printf("\n")
+ vernum = "[0-9]+([.]?[0-9]+)+"
+
printversion("GNU C", version("gcc -dumpversion"))
printversion("GNU Make", version("make --version"))
printversion("Binutils", version("ld -v"))
@@ -34,7 +36,7 @@ BEGIN {
while (getline <"/proc/self/maps" > 0) {
if (/libc.*\.so$/) {
n = split($0, procmaps, "/")
- if (match(procmaps[n], /[0-9]+([.]?[0-9]+)+/)) {
+ if (match(procmaps[n], vernum)) {
ver = substr(procmaps[n], RSTART, RLENGTH)
printversion("Linux C Library", ver)
break
@@ -70,7 +72,7 @@ BEGIN {
function version(cmd, ver) {
cmd = cmd " 2>&1"
while (cmd | getline > 0) {
- if (match($0, /[0-9]+([.]?[0-9]+)+/)) {
+ if (match($0, vernum)) {
ver = substr($0, RSTART, RLENGTH)
break
}
diff --git a/security/Kconfig b/security/Kconfig
index e4fe2f3c2c65..1d6463fb1450 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -40,8 +40,7 @@ config SECURITYFS
bool "Enable the securityfs filesystem"
help
This will build the securityfs filesystem. It is currently used by
- the TPM bios character driver and IMA, an integrity provider. It is
- not used by SELinux or SMACK.
+ various security modules (AppArmor, IMA, SafeSetID, TOMOYO, TPM).
If you are unsure how to answer this question, answer N.
@@ -236,45 +235,19 @@ source "security/tomoyo/Kconfig"
source "security/apparmor/Kconfig"
source "security/loadpin/Kconfig"
source "security/yama/Kconfig"
+source "security/safesetid/Kconfig"
source "security/integrity/Kconfig"
-choice
- prompt "Default security module"
- default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
- default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
- default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
- default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
- default DEFAULT_SECURITY_DAC
-
+config LSM
+ string "Ordered list of enabled LSMs"
+ default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
help
- Select the security module that will be used by default if the
- kernel parameter security= is not specified.
-
- config DEFAULT_SECURITY_SELINUX
- bool "SELinux" if SECURITY_SELINUX=y
-
- config DEFAULT_SECURITY_SMACK
- bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
-
- config DEFAULT_SECURITY_TOMOYO
- bool "TOMOYO" if SECURITY_TOMOYO=y
-
- config DEFAULT_SECURITY_APPARMOR
- bool "AppArmor" if SECURITY_APPARMOR=y
-
- config DEFAULT_SECURITY_DAC
- bool "Unix Discretionary Access Controls"
-
-endchoice
+ A comma-separated list of LSMs, in initialization order.
+ Any LSMs left off this list will be ignored. This can be
+ controlled at boot with the "lsm=" parameter.
-config DEFAULT_SECURITY
- string
- default "selinux" if DEFAULT_SECURITY_SELINUX
- default "smack" if DEFAULT_SECURITY_SMACK
- default "tomoyo" if DEFAULT_SECURITY_TOMOYO
- default "apparmor" if DEFAULT_SECURITY_APPARMOR
- default "" if DEFAULT_SECURITY_DAC
+ If unsure, leave this as the default.
endmenu
diff --git a/security/Makefile b/security/Makefile
index 4d2d3782ddef..c598b904938f 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -10,6 +10,7 @@ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
+subdir-$(CONFIG_SECURITY_SAFESETID) += safesetid
# always enable default capabilities
obj-y += commoncap.o
@@ -25,6 +26,7 @@ obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
+obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index b6b68a7750ce..3de21f46c82a 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -14,22 +14,6 @@ config SECURITY_APPARMOR
If you are unsure how to answer this question, answer N.
-config SECURITY_APPARMOR_BOOTPARAM_VALUE
- int "AppArmor boot parameter default value"
- depends on SECURITY_APPARMOR
- range 0 1
- default 1
- help
- This option sets the default value for the kernel parameter
- 'apparmor', which allows AppArmor to be enabled or disabled
- at boot. If this option is set to 0 (zero), the AppArmor
- kernel parameter will default to 0, disabling AppArmor at
- boot. If this option is set to 1 (one), the AppArmor
- kernel parameter will default to 1, enabling AppArmor at
- boot.
-
- If you are unsure how to answer this question, answer 1.
-
config SECURITY_APPARMOR_HASH
bool "Enable introspection of sha1 hashes for loaded profiles"
depends on SECURITY_APPARMOR
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 253ef6e9d445..752f73980e30 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -110,13 +110,13 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
* profile_capable - test if profile allows use of capability @cap
* @profile: profile being enforced (NOT NULL, NOT unconfined)
* @cap: capability to test if allowed
- * @audit: whether an audit record should be generated
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
* @sa: audit data (MAY BE NULL indicating no auditing)
*
* Returns: 0 if allowed else -EPERM
*/
-static int profile_capable(struct aa_profile *profile, int cap, int audit,
- struct common_audit_data *sa)
+static int profile_capable(struct aa_profile *profile, int cap,
+ unsigned int opts, struct common_audit_data *sa)
{
int error;
@@ -126,7 +126,7 @@ static int profile_capable(struct aa_profile *profile, int cap, int audit,
else
error = -EPERM;
- if (audit == SECURITY_CAP_NOAUDIT) {
+ if (opts & CAP_OPT_NOAUDIT) {
if (!COMPLAIN_MODE(profile))
return error;
/* audit the cap request in complain mode but note that it
@@ -142,13 +142,13 @@ static int profile_capable(struct aa_profile *profile, int cap, int audit,
* aa_capable - test permission to use capability
* @label: label being tested for capability (NOT NULL)
* @cap: capability to be tested
- * @audit: whether an audit record should be generated
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
*
* Look up capability in profile capability set.
*
* Returns: 0 on success, or else an error code.
*/
-int aa_capable(struct aa_label *label, int cap, int audit)
+int aa_capable(struct aa_label *label, int cap, unsigned int opts)
{
struct aa_profile *profile;
int error = 0;
@@ -156,7 +156,7 @@ int aa_capable(struct aa_label *label, int cap, int audit)
sa.u.cap = cap;
error = fn_for_each_confined(label, profile,
- profile_capable(profile, cap, audit, &sa));
+ profile_capable(profile, cap, opts, &sa));
return error;
}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 08c88de0ffda..ca2dccf5b445 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -572,7 +572,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
stack = NULL;
break;
}
- /* fall through to X_NAME */
+ /* fall through - to X_NAME */
case AA_X_NAME:
if (xindex & AA_X_CHILD)
/* released by caller */
@@ -975,7 +975,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
}
aa_put_label(cred_label(bprm->cred));
/* transfer reference, released when cred is freed */
- cred_label(bprm->cred) = new;
+ set_cred_label(bprm->cred, new);
done:
aa_put_label(label);
@@ -1444,7 +1444,10 @@ check:
new = aa_label_merge(label, target, GFP_KERNEL);
if (IS_ERR_OR_NULL(new)) {
info = "failed to build target label";
- error = PTR_ERR(new);
+ if (!new)
+ error = -ENOMEM;
+ else
+ error = PTR_ERR(new);
new = NULL;
perms.allow = 0;
goto audit;
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index e0304e2aeb7f..1b3663b6ab12 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -40,7 +40,7 @@ struct aa_caps {
extern struct aa_sfs_entry aa_sfs_entry_caps[];
-int aa_capable(struct aa_label *label, int cap, int audit);
+int aa_capable(struct aa_label *label, int cap, unsigned int opts);
static inline void aa_free_cap_rules(struct aa_caps *caps)
{
diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h
index 265ae6641a06..b9504a05fddc 100644
--- a/security/apparmor/include/cred.h
+++ b/security/apparmor/include/cred.h
@@ -23,8 +23,22 @@
#include "policy_ns.h"
#include "task.h"
-#define cred_label(X) ((X)->security)
+static inline struct aa_label *cred_label(const struct cred *cred)
+{
+ struct aa_label **blob = cred->security + apparmor_blob_sizes.lbs_cred;
+
+ AA_BUG(!blob);
+ return *blob;
+}
+static inline void set_cred_label(const struct cred *cred,
+ struct aa_label *label)
+{
+ struct aa_label **blob = cred->security + apparmor_blob_sizes.lbs_cred;
+
+ AA_BUG(!blob);
+ *blob = label;
+}
/**
* aa_cred_raw_label - obtain cred's label
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index 4c2c8ac8842f..8be09208cf7c 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -32,7 +32,10 @@ struct path;
AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_LOCK | \
AA_EXEC_MMAP | AA_MAY_LINK)
-#define file_ctx(X) ((struct aa_file_ctx *)(X)->f_security)
+static inline struct aa_file_ctx *file_ctx(struct file *file)
+{
+ return file->f_security + apparmor_blob_sizes.lbs_file;
+}
/* struct aa_file_ctx - the AppArmor context the file was opened in
* @lock: lock to update the ctx
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 6505e1ad9e23..bbe9b384d71d 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
+#include <linux/lsm_hooks.h>
#include "match.h"
@@ -55,6 +56,9 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
size_t *ns_len);
void aa_info_message(const char *str);
+/* Security blob offsets */
+extern struct lsm_blob_sizes apparmor_blob_sizes;
+
/**
* aa_strneq - compare null terminated @str to a non null terminated substring
* @str: a null terminated string
diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
index 55edaa1d83f8..311e652324e3 100644
--- a/security/apparmor/include/task.h
+++ b/security/apparmor/include/task.h
@@ -14,7 +14,10 @@
#ifndef __AA_TASK_H
#define __AA_TASK_H
-#define task_ctx(X) ((X)->security)
+static inline struct aa_task_ctx *task_ctx(struct task_struct *task)
+{
+ return task->security + apparmor_blob_sizes.lbs_task;
+}
/*
* struct aa_task_ctx - information for current task label change
@@ -37,17 +40,6 @@ int aa_restore_previous_label(u64 cookie);
struct aa_label *aa_get_task_label(struct task_struct *task);
/**
- * aa_alloc_task_ctx - allocate a new task_ctx
- * @flags: gfp flags for allocation
- *
- * Returns: allocated buffer or NULL on failure
- */
-static inline struct aa_task_ctx *aa_alloc_task_ctx(gfp_t flags)
-{
- return kzalloc(sizeof(struct aa_task_ctx), flags);
-}
-
-/**
* aa_free_task_ctx - free a task_ctx
* @ctx: task_ctx to free (MAYBE NULL)
*/
@@ -57,8 +49,6 @@ static inline void aa_free_task_ctx(struct aa_task_ctx *ctx)
aa_put_label(ctx->nnp);
aa_put_label(ctx->previous);
aa_put_label(ctx->onexec);
-
- kzfree(ctx);
}
}
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 527ea1557120..aacd1e95cb59 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -107,7 +107,8 @@ static int profile_tracer_perm(struct aa_profile *tracer,
aad(sa)->label = &tracer->label;
aad(sa)->peer = tracee;
aad(sa)->request = 0;
- aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
+ aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
+ CAP_OPT_NONE);
return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
}
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 2c010874329f..49d664ddff44 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -60,7 +60,7 @@ DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
static void apparmor_cred_free(struct cred *cred)
{
aa_put_label(cred_label(cred));
- cred_label(cred) = NULL;
+ set_cred_label(cred, NULL);
}
/*
@@ -68,7 +68,7 @@ static void apparmor_cred_free(struct cred *cred)
*/
static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
- cred_label(cred) = NULL;
+ set_cred_label(cred, NULL);
return 0;
}
@@ -78,7 +78,7 @@ static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- cred_label(new) = aa_get_newest_label(cred_label(old));
+ set_cred_label(new, aa_get_newest_label(cred_label(old)));
return 0;
}
@@ -87,26 +87,21 @@ static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
*/
static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
{
- cred_label(new) = aa_get_newest_label(cred_label(old));
+ set_cred_label(new, aa_get_newest_label(cred_label(old)));
}
static void apparmor_task_free(struct task_struct *task)
{
aa_free_task_ctx(task_ctx(task));
- task_ctx(task) = NULL;
}
static int apparmor_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
- struct aa_task_ctx *new = aa_alloc_task_ctx(GFP_KERNEL);
-
- if (!new)
- return -ENOMEM;
+ struct aa_task_ctx *new = task_ctx(task);
aa_dup_task_ctx(new, task_ctx(current));
- task_ctx(task) = new;
return 0;
}
@@ -177,14 +172,14 @@ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
}
static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
- int cap, int audit)
+ int cap, unsigned int opts)
{
struct aa_label *label;
int error = 0;
label = aa_get_newest_cred_label(cred);
if (!unconfined(label))
- error = aa_capable(label, cap, audit);
+ error = aa_capable(label, cap, opts);
aa_put_label(label);
return error;
@@ -434,21 +429,21 @@ static int apparmor_file_open(struct file *file)
static int apparmor_file_alloc_security(struct file *file)
{
- int error = 0;
-
- /* freed by apparmor_file_free_security */
+ struct aa_file_ctx *ctx = file_ctx(file);
struct aa_label *label = begin_current_label_crit_section();
- file->f_security = aa_alloc_file_ctx(label, GFP_KERNEL);
- if (!file_ctx(file))
- error = -ENOMEM;
- end_current_label_crit_section(label);
- return error;
+ spin_lock_init(&ctx->lock);
+ rcu_assign_pointer(ctx->label, aa_get_label(label));
+ end_current_label_crit_section(label);
+ return 0;
}
static void apparmor_file_free_security(struct file *file)
{
- aa_free_file_ctx(file_ctx(file));
+ struct aa_file_ctx *ctx = file_ctx(file);
+
+ if (ctx)
+ aa_put_label(rcu_access_pointer(ctx->label));
}
static int common_file_perm(const char *op, struct file *file, u32 mask)
@@ -1151,6 +1146,15 @@ static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb,
}
#endif
+/*
+ * The cred blob is a pointer to, not an instance of, an aa_task_ctx.
+ */
+struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct aa_task_ctx *),
+ .lbs_file = sizeof(struct aa_file_ctx),
+ .lbs_task = sizeof(struct aa_task_ctx),
+};
+
static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1333,8 +1337,8 @@ bool aa_g_paranoid_load = true;
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
/* Boot time disable flag */
-static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
-module_param_named(enabled, apparmor_enabled, bool, S_IRUGO);
+static int apparmor_enabled __lsm_ro_after_init = 1;
+module_param_named(enabled, apparmor_enabled, int, 0444);
static int __init apparmor_enabled_setup(char *str)
{
@@ -1479,14 +1483,8 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
static int __init set_init_ctx(void)
{
struct cred *cred = (struct cred *)current->real_cred;
- struct aa_task_ctx *ctx;
-
- ctx = aa_alloc_task_ctx(GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- cred_label(cred) = aa_get_label(ns_unconfined(root_ns));
- task_ctx(current) = ctx;
+ set_cred_label(cred, aa_get_label(ns_unconfined(root_ns)));
return 0;
}
@@ -1599,12 +1597,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv,
return apparmor_ip_postroute(priv, skb, state);
}
+#if IS_ENABLED(CONFIG_IPV6)
static unsigned int apparmor_ipv6_postroute(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
return apparmor_ip_postroute(priv, skb, state);
}
+#endif
static const struct nf_hook_ops apparmor_nf_ops[] = {
{
@@ -1663,12 +1663,6 @@ static int __init apparmor_init(void)
{
int error;
- if (!apparmor_enabled || !security_module_enable("apparmor")) {
- aa_info_message("AppArmor disabled by boot time parameter");
- apparmor_enabled = false;
- return 0;
- }
-
aa_secids_init();
error = aa_setup_dfa_engine();
@@ -1729,5 +1723,8 @@ alloc_out:
DEFINE_LSM(apparmor) = {
.name = "apparmor",
+ .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
+ .enabled = &apparmor_enabled,
+ .blobs = &apparmor_blob_sizes,
.init = apparmor_init,
};
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 95fd26d09757..552ed09cb47e 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -124,7 +124,7 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
*/
if (label != peer &&
- aa_capable(label, CAP_SYS_RESOURCE, SECURITY_CAP_NOAUDIT) != 0)
+ aa_capable(label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
error = fn_for_each(label, profile,
audit_resource(profile, resource,
new_rlim->rlim_max, peer,
diff --git a/security/apparmor/task.c b/security/apparmor/task.c
index c6b78a14da91..4551110f0496 100644
--- a/security/apparmor/task.c
+++ b/security/apparmor/task.c
@@ -81,7 +81,7 @@ int aa_replace_current_label(struct aa_label *label)
*/
aa_get_label(label);
aa_put_label(cred_label(new));
- cred_label(new) = label;
+ set_cred_label(new, label);
commit_creds(new);
return 0;
@@ -138,7 +138,7 @@ int aa_set_current_hat(struct aa_label *label, u64 token)
return -EACCES;
}
- cred_label(new) = aa_get_newest_label(label);
+ set_cred_label(new, aa_get_newest_label(label));
/* clear exec on switching context */
aa_put_label(ctx->onexec);
ctx->onexec = NULL;
@@ -172,7 +172,7 @@ int aa_restore_previous_label(u64 token)
return -ENOMEM;
aa_put_label(cred_label(new));
- cred_label(new) = aa_get_newest_label(ctx->previous);
+ set_cred_label(new, aa_get_newest_label(ctx->previous));
AA_BUG(!cred_label(new));
/* clear exec && prev information when restoring to previous context */
aa_clear_task_ctx_trans(ctx);
diff --git a/security/commoncap.c b/security/commoncap.c
index c097f3568001..c477fb673701 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -57,7 +57,7 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
* @cred: The credentials to use
* @ns: The user namespace in which we need the capability
* @cap: The capability to check for
- * @audit: Whether to write an audit message or not
+ * @opts: Bitmask of options defined in include/linux/security.h
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
@@ -68,7 +68,7 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
* kernel's capable() and has_capability() returns 1 for this case.
*/
int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
- int cap, int audit)
+ int cap, unsigned int opts)
{
struct user_namespace *ns = targ_ns;
@@ -222,12 +222,11 @@ int cap_capget(struct task_struct *target, kernel_cap_t *effective,
*/
static inline int cap_inh_is_capped(void)
{
-
/* they are so limited unless the current task has the CAP_SETPCAP
* capability
*/
if (cap_capable(current_cred(), current_cred()->user_ns,
- CAP_SETPCAP, SECURITY_CAP_AUDIT) == 0)
+ CAP_SETPCAP, CAP_OPT_NONE) == 0)
return 0;
return 1;
}
@@ -1210,8 +1209,9 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
|| ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
|| (cap_capable(current_cred(),
- current_cred()->user_ns, CAP_SETPCAP,
- SECURITY_CAP_AUDIT) != 0) /*[4]*/
+ current_cred()->user_ns,
+ CAP_SETPCAP,
+ CAP_OPT_NONE) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
@@ -1306,9 +1306,10 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
{
int cap_sys_admin = 0;
- if (cap_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN,
- SECURITY_CAP_NOAUDIT) == 0)
+ if (cap_capable(current_cred(), &init_user_ns,
+ CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) == 0)
cap_sys_admin = 1;
+
return cap_sys_admin;
}
@@ -1327,7 +1328,7 @@ int cap_mmap_addr(unsigned long addr)
if (addr < dac_mmap_min_addr) {
ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO,
- SECURITY_CAP_AUDIT);
+ CAP_OPT_NONE);
/* set PF_SUPERPRIV if it turns out we allow the low mmap */
if (ret == 0)
current->flags |= PF_SUPERPRIV;
@@ -1364,10 +1365,17 @@ struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(vm_enough_memory, cap_vm_enough_memory),
};
-void __init capability_add_hooks(void)
+static int __init capability_init(void)
{
security_add_hooks(capability_hooks, ARRAY_SIZE(capability_hooks),
"capability");
+ return 0;
}
+DEFINE_LSM(capability) = {
+ .name = "capability",
+ .order = LSM_ORDER_FIRST,
+ .init = capability_init,
+};
+
#endif /* CONFIG_SECURITY */
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 88f04b3380d4..423876fca8b4 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -200,7 +200,7 @@ int integrity_kernel_read(struct file *file, loff_t offset,
return -EBADF;
old_fs = get_fs();
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
ret = __vfs_read(file, buf, count, &offset);
set_fs(old_fs);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index a2baa85ea2f5..5fb7127bbe68 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -114,6 +114,7 @@ static void ima_set_cache_status(struct integrity_iint_cache *iint,
break;
case CREDS_CHECK:
iint->ima_creds_status = status;
+ break;
case FILE_CHECK:
case POST_SETATTR:
iint->ima_file_status = status;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 26fa9d9723f6..e0cc323f948f 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -936,10 +936,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
case Opt_uid_gt:
case Opt_euid_gt:
entry->uid_op = &uid_gt;
+ /* fall through */
case Opt_uid_lt:
case Opt_euid_lt:
if ((token == Opt_uid_lt) || (token == Opt_euid_lt))
entry->uid_op = &uid_lt;
+ /* fall through */
case Opt_uid_eq:
case Opt_euid_eq:
uid_token = (token == Opt_uid_eq) ||
@@ -968,9 +970,11 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
break;
case Opt_fowner_gt:
entry->fowner_op = &uid_gt;
+ /* fall through */
case Opt_fowner_lt:
if (token == Opt_fowner_lt)
entry->fowner_op = &uid_lt;
+ /* fall through */
case Opt_fowner_eq:
ima_log_string_op(ab, "fowner", args[0].from,
entry->fowner_op);
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 43752002c222..513b457ae900 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -83,6 +83,7 @@ static void ima_show_template_data_ascii(struct seq_file *m,
/* skip ':' and '\0' */
buf_ptr += 2;
buflen -= buf_ptr - field_data->data;
+ /* fall through */
case DATA_FMT_DIGEST:
case DATA_FMT_HEX:
if (!buflen)
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 479909b858c7..8f533c81aa8d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm)
return key_task_permission(key_ref, current_cred(), perm);
}
-/*
- * Authorisation record for request_key().
- */
-struct request_key_auth {
- struct key *target_key;
- struct key *dest_keyring;
- const struct cred *cred;
- void *callout_info;
- size_t callout_len;
- pid_t pid;
-} __randomize_layout;
-
extern struct key_type key_type_request_key_auth;
extern struct key *request_key_auth_new(struct key *target,
+ const char *op,
const void *callout_info,
size_t callout_len,
struct key *dest_keyring);
diff --git a/security/keys/key.c b/security/keys/key.c
index 44a80d6741a1..696f1c092c50 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
spin_lock(&user->lock);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
- if (user->qnkeys + 1 >= maxkeys ||
- user->qnbytes + quotalen >= maxbytes ||
+ if (user->qnkeys + 1 > maxkeys ||
+ user->qnbytes + quotalen > maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->gid = gid;
key->perm = perm;
key->restrict_link = restrict_link;
+ key->last_used_at = ktime_get_real_seconds();
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index e8093d025966..3e4053a217c3 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -25,6 +25,7 @@
#include <linux/security.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
#include "internal.h"
#define KEY_MAX_DESC_SIZE 4096
@@ -1751,7 +1752,7 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
return -EINVAL;
return keyctl_pkey_query((key_serial_t)arg2,
(const char __user *)arg4,
- (struct keyctl_pkey_query *)arg5);
+ (struct keyctl_pkey_query __user *)arg5);
case KEYCTL_PKEY_ENCRYPT:
case KEYCTL_PKEY_DECRYPT:
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index eadebb92986a..e14f09e3a4b0 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -246,6 +246,7 @@ static unsigned long keyring_get_key_chunk(const void *data, int level)
(ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
n--;
offset = 1;
+ /* fall through */
default:
offset += sizeof(chunk) - 1;
offset += (level - 3) * sizeof(chunk);
@@ -661,9 +662,6 @@ static bool search_nested_keyrings(struct key *keyring,
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
- if (ctx->index_key.description)
- ctx->index_key.desc_len = strlen(ctx->index_key.description);
-
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
@@ -914,6 +912,7 @@ key_ref_t keyring_search(key_ref_t keyring,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/proc.c b/security/keys/proc.c
index d2b802072693..78ac305d715e 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
int rc;
struct keyring_search_context ctx = {
- .index_key.type = key->type,
- .index_key.description = key->description,
+ .index_key = key->index_key,
.cred = m->file->f_cred,
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 02c77e928f68..9320424c4a46 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -19,6 +19,7 @@
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
@@ -379,6 +380,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
case -EAGAIN: /* no key */
if (ret)
break;
+ /* fall through */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
@@ -403,6 +405,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
case -EAGAIN: /* no key */
if (ret)
break;
+ /* fall through */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
@@ -423,6 +426,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
case -EAGAIN: /* no key */
if (ret)
break;
+ /* fall through */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 301f0e300dbd..2f17d84d46f1 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -18,31 +18,30 @@
#include <linux/keyctl.h>
#include <linux/slab.h>
#include "internal.h"
+#include <keys/request_key_auth-type.h>
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
/**
* complete_request_key - Complete the construction of a key.
- * @cons: The key construction record.
+ * @auth_key: The authorisation key.
* @error: The success or failute of the construction.
*
* Complete the attempt to construct a key. The key will be negated
* if an error is indicated. The authorisation key will be revoked
* unconditionally.
*/
-void complete_request_key(struct key_construction *cons, int error)
+void complete_request_key(struct key *authkey, int error)
{
- kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
+ struct request_key_auth *rka = get_request_key_auth(authkey);
+ struct key *key = rka->target_key;
+
+ kenter("%d{%d},%d", authkey->serial, key->serial, error);
if (error < 0)
- key_negate_and_link(cons->key, key_negative_timeout, NULL,
- cons->authkey);
+ key_negate_and_link(key, key_negative_timeout, NULL, authkey);
else
- key_revoke(cons->authkey);
-
- key_put(cons->key);
- key_put(cons->authkey);
- kfree(cons);
+ key_revoke(authkey);
}
EXPORT_SYMBOL(complete_request_key);
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
* Request userspace finish the construction of a key
* - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
*/
-static int call_sbin_request_key(struct key_construction *cons,
- const char *op,
- void *aux)
+static int call_sbin_request_key(struct key *authkey, void *aux)
{
static char const request_key[] = "/sbin/request-key";
+ struct request_key_auth *rka = get_request_key_auth(authkey);
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
- struct key *key = cons->key, *authkey = cons->authkey, *keyring,
- *session;
+ struct key *key = rka->target_key, *keyring, *session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
int ret, i;
- kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
+ kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
ret = install_user_keyrings();
if (ret < 0)
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons,
/* set up the argument list */
i = 0;
argv[i++] = (char *)request_key;
- argv[i++] = (char *) op;
+ argv[i++] = (char *)rka->op;
argv[i++] = key_str;
argv[i++] = uid_str;
argv[i++] = gid_str;
@@ -191,7 +188,7 @@ error_link:
key_put(keyring);
error_alloc:
- complete_request_key(cons, ret);
+ complete_request_key(authkey, ret);
kleave(" = %d", ret);
return ret;
}
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info,
size_t callout_len, void *aux,
struct key *dest_keyring)
{
- struct key_construction *cons;
request_key_actor_t actor;
struct key *authkey;
int ret;
kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
- cons = kmalloc(sizeof(*cons), GFP_KERNEL);
- if (!cons)
- return -ENOMEM;
-
/* allocate an authorisation key */
- authkey = request_key_auth_new(key, callout_info, callout_len,
+ authkey = request_key_auth_new(key, "create", callout_info, callout_len,
dest_keyring);
- if (IS_ERR(authkey)) {
- kfree(cons);
- ret = PTR_ERR(authkey);
- authkey = NULL;
- } else {
- cons->authkey = key_get(authkey);
- cons->key = key_get(key);
+ if (IS_ERR(authkey))
+ return PTR_ERR(authkey);
- /* make the call */
- actor = call_sbin_request_key;
- if (key->type->request_key)
- actor = key->type->request_key;
+ /* Make the call */
+ actor = call_sbin_request_key;
+ if (key->type->request_key)
+ actor = key->type->request_key;
- ret = actor(cons, "create", aux);
+ ret = actor(authkey, aux);
- /* check that the actor called complete_request_key() prior to
- * returning an error */
- WARN_ON(ret < 0 &&
- !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
- key_put(authkey);
- }
+ /* check that the actor called complete_request_key() prior to
+ * returning an error */
+ WARN_ON(ret < 0 &&
+ !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
+ key_put(authkey);
kleave(" = %d", ret);
return ret;
}
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
- rka = authkey->payload.data[0];
+ rka = get_request_key_auth(authkey);
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
@@ -287,16 +273,19 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
}
}
+ /* fall through */
case KEY_REQKEY_DEFL_THREAD_KEYRING:
dest_keyring = key_get(cred->thread_keyring);
if (dest_keyring)
break;
+ /* fall through */
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
+ /* fall through */
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
@@ -306,6 +295,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
if (dest_keyring)
break;
+ /* fall through */
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
key_get(cred->user->session_keyring);
@@ -545,6 +535,7 @@ struct key *request_key_and_link(struct key_type *type,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 87ea2f54dedc..bda6201c6c45 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "internal.h"
-#include <keys/user-type.h>
+#include <keys/request_key_auth-type.h>
static int request_key_auth_preparse(struct key_preparsed_payload *);
static void request_key_auth_free_preparse(struct key_preparsed_payload *);
@@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key,
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
seq_puts(m, "key:");
seq_puts(m, key->description);
@@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key,
static long request_key_auth_read(const struct key *key,
char __user *buffer, size_t buflen)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
size_t datalen;
long ret;
@@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key,
*/
static void request_key_auth_revoke(struct key *key)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
kenter("{%d}", key->serial);
@@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka)
*/
static void request_key_auth_destroy(struct key *key)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
kenter("{%d}", key->serial);
@@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key)
* Create an authorisation token for /sbin/request-key or whoever to gain
* access to the caller's security data.
*/
-struct key *request_key_auth_new(struct key *target, const void *callout_info,
- size_t callout_len, struct key *dest_keyring)
+struct key *request_key_auth_new(struct key *target, const char *op,
+ const void *callout_info, size_t callout_len,
+ struct key *dest_keyring)
{
struct request_key_auth *rka, *irka;
const struct cred *cred = current->cred;
@@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
if (!rka->callout_info)
goto error_free_rka;
rka->callout_len = callout_len;
+ strlcpy(rka->op, op, sizeof(rka->op));
/* see if the calling process is already servicing the key request of
* another process */
@@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
struct key *authkey;
key_ref_t authkey_ref;
- sprintf(description, "%x", target_id);
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
authkey_ref = search_process_keyrings(&ctx);
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 48f39631b370..055fb0a64169 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -187,13 +187,19 @@ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(kernel_load_data, loadpin_load_data),
};
-void __init loadpin_add_hooks(void)
+static int __init loadpin_init(void)
{
pr_info("ready to pin (currently %senforcing)\n",
enforce ? "" : "not ");
security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks), "loadpin");
+ return 0;
}
+DEFINE_LSM(loadpin) = {
+ .name = "loadpin",
+ .init = loadpin_init,
+};
+
/* Should not be mutable after boot, so not listed in sysfs (perm == 0). */
module_param(enforce, int, 0);
MODULE_PARM_DESC(enforce, "Enforce module/firmware pinning");
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f84001019356..33028c098ef3 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
if (a->u.net->sk) {
struct sock *sk = a->u.net->sk;
struct unix_sock *u;
+ struct unix_address *addr;
int len = 0;
char *p = NULL;
@@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
#endif
case AF_UNIX:
u = unix_sk(sk);
+ addr = smp_load_acquire(&u->addr);
+ if (!addr)
+ break;
if (u->path.dentry) {
audit_log_d_path(ab, " path=", &u->path);
break;
}
- if (!u->addr)
- break;
- len = u->addr->len-sizeof(short);
- p = &u->addr->name->sun_path[0];
+ len = addr->len-sizeof(short);
+ p = &addr->name->sun_path[0];
audit_log_format(ab, " path=");
if (*p)
audit_log_untrustedstring(ab, p);
diff --git a/security/safesetid/Kconfig b/security/safesetid/Kconfig
new file mode 100644
index 000000000000..4f415c4e3f93
--- /dev/null
+++ b/security/safesetid/Kconfig
@@ -0,0 +1,14 @@
+config SECURITY_SAFESETID
+ bool "Gate setid transitions to limit CAP_SET{U/G}ID capabilities"
+ depends on SECURITY
+ select SECURITYFS
+ default n
+ help
+ SafeSetID is an LSM module that gates the setid family of syscalls to
+ restrict UID/GID transitions from a given UID/GID to only those
+ approved by a system-wide whitelist. These restrictions also prohibit
+ the given UIDs/GIDs from obtaining auxiliary privileges associated
+ with CAP_SET{U/G}ID, such as allowing a user to set up user namespace
+ UID mappings.
+
+ If you are unsure how to answer this question, answer N.
diff --git a/security/safesetid/Makefile b/security/safesetid/Makefile
new file mode 100644
index 000000000000..6b0660321164
--- /dev/null
+++ b/security/safesetid/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the safesetid LSM.
+#
+
+obj-$(CONFIG_SECURITY_SAFESETID) := safesetid.o
+safesetid-y := lsm.o securityfs.o
diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c
new file mode 100644
index 000000000000..cecd38e2ac80
--- /dev/null
+++ b/security/safesetid/lsm.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SafeSetID Linux Security Module
+ *
+ * Author: Micah Morton <mortonm@chromium.org>
+ *
+ * Copyright (C) 2018 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "SafeSetID: " fmt
+
+#include <linux/hashtable.h>
+#include <linux/lsm_hooks.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+
+/* Flag indicating whether initialization completed */
+int safesetid_initialized;
+
+#define NUM_BITS 8 /* 128 buckets in hash table */
+
+static DEFINE_HASHTABLE(safesetid_whitelist_hashtable, NUM_BITS);
+
+/*
+ * Hash table entry to store safesetid policy signifying that 'parent' user
+ * can setid to 'child' user.
+ */
+struct entry {
+ struct hlist_node next;
+ struct hlist_node dlist; /* for deletion cleanup */
+ uint64_t parent_kuid;
+ uint64_t child_kuid;
+};
+
+static DEFINE_SPINLOCK(safesetid_whitelist_hashtable_spinlock);
+
+static bool check_setuid_policy_hashtable_key(kuid_t parent)
+{
+ struct entry *entry;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(safesetid_whitelist_hashtable,
+ entry, next, __kuid_val(parent)) {
+ if (entry->parent_kuid == __kuid_val(parent)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+static bool check_setuid_policy_hashtable_key_value(kuid_t parent,
+ kuid_t child)
+{
+ struct entry *entry;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(safesetid_whitelist_hashtable,
+ entry, next, __kuid_val(parent)) {
+ if (entry->parent_kuid == __kuid_val(parent) &&
+ entry->child_kuid == __kuid_val(child)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+static int safesetid_security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
+{
+ if (cap == CAP_SETUID &&
+ check_setuid_policy_hashtable_key(cred->uid)) {
+ if (!(opts & CAP_OPT_INSETID)) {
+ /*
+ * Deny if we're not in a set*uid() syscall to avoid
+ * giving powers gated by CAP_SETUID that are related
+ * to functionality other than calling set*uid() (e.g.
+ * allowing user to set up userns uid mappings).
+ */
+ pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions",
+ __kuid_val(cred->uid));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int check_uid_transition(kuid_t parent, kuid_t child)
+{
+ if (check_setuid_policy_hashtable_key_value(parent, child))
+ return 0;
+ pr_warn("UID transition (%d -> %d) blocked",
+ __kuid_val(parent),
+ __kuid_val(child));
+ /*
+ * Kill this process to avoid potential security vulnerabilities
+ * that could arise from a missing whitelist entry preventing a
+ * privileged process from dropping to a lesser-privileged one.
+ */
+ force_sig(SIGKILL, current);
+ return -EACCES;
+}
+
+/*
+ * Check whether there is either an exception for user under old cred struct to
+ * set*uid to user under new cred struct, or the UID transition is allowed (by
+ * Linux set*uid rules) even without CAP_SETUID.
+ */
+static int safesetid_task_fix_setuid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+
+ /* Do nothing if there are no setuid restrictions for this UID. */
+ if (!check_setuid_policy_hashtable_key(old->uid))
+ return 0;
+
+ switch (flags) {
+ case LSM_SETID_RE:
+ /*
+ * Users for which setuid restrictions exist can only set the
+ * real UID to the real UID or the effective UID, unless an
+ * explicit whitelist policy allows the transition.
+ */
+ if (!uid_eq(old->uid, new->uid) &&
+ !uid_eq(old->euid, new->uid)) {
+ return check_uid_transition(old->uid, new->uid);
+ }
+ /*
+ * Users for which setuid restrictions exist can only set the
+ * effective UID to the real UID, the effective UID, or the
+ * saved set-UID, unless an explicit whitelist policy allows
+ * the transition.
+ */
+ if (!uid_eq(old->uid, new->euid) &&
+ !uid_eq(old->euid, new->euid) &&
+ !uid_eq(old->suid, new->euid)) {
+ return check_uid_transition(old->euid, new->euid);
+ }
+ break;
+ case LSM_SETID_ID:
+ /*
+ * Users for which setuid restrictions exist cannot change the
+ * real UID or saved set-UID unless an explicit whitelist
+ * policy allows the transition.
+ */
+ if (!uid_eq(old->uid, new->uid))
+ return check_uid_transition(old->uid, new->uid);
+ if (!uid_eq(old->suid, new->suid))
+ return check_uid_transition(old->suid, new->suid);
+ break;
+ case LSM_SETID_RES:
+ /*
+ * Users for which setuid restrictions exist cannot change the
+ * real UID, effective UID, or saved set-UID to anything but
+ * one of: the current real UID, the current effective UID or
+ * the current saved set-user-ID unless an explicit whitelist
+ * policy allows the transition.
+ */
+ if (!uid_eq(new->uid, old->uid) &&
+ !uid_eq(new->uid, old->euid) &&
+ !uid_eq(new->uid, old->suid)) {
+ return check_uid_transition(old->uid, new->uid);
+ }
+ if (!uid_eq(new->euid, old->uid) &&
+ !uid_eq(new->euid, old->euid) &&
+ !uid_eq(new->euid, old->suid)) {
+ return check_uid_transition(old->euid, new->euid);
+ }
+ if (!uid_eq(new->suid, old->uid) &&
+ !uid_eq(new->suid, old->euid) &&
+ !uid_eq(new->suid, old->suid)) {
+ return check_uid_transition(old->suid, new->suid);
+ }
+ break;
+ case LSM_SETID_FS:
+ /*
+ * Users for which setuid restrictions exist cannot change the
+ * filesystem UID to anything but one of: the current real UID,
+ * the current effective UID or the current saved set-UID
+ * unless an explicit whitelist policy allows the transition.
+ */
+ if (!uid_eq(new->fsuid, old->uid) &&
+ !uid_eq(new->fsuid, old->euid) &&
+ !uid_eq(new->fsuid, old->suid) &&
+ !uid_eq(new->fsuid, old->fsuid)) {
+ return check_uid_transition(old->fsuid, new->fsuid);
+ }
+ break;
+ default:
+ pr_warn("Unknown setid state %d\n", flags);
+ force_sig(SIGKILL, current);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int add_safesetid_whitelist_entry(kuid_t parent, kuid_t child)
+{
+ struct entry *new;
+
+ /* Return if entry already exists */
+ if (check_setuid_policy_hashtable_key_value(parent, child))
+ return 0;
+
+ new = kzalloc(sizeof(struct entry), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->parent_kuid = __kuid_val(parent);
+ new->child_kuid = __kuid_val(child);
+ spin_lock(&safesetid_whitelist_hashtable_spinlock);
+ hash_add_rcu(safesetid_whitelist_hashtable,
+ &new->next,
+ __kuid_val(parent));
+ spin_unlock(&safesetid_whitelist_hashtable_spinlock);
+ return 0;
+}
+
+void flush_safesetid_whitelist_entries(void)
+{
+ struct entry *entry;
+ struct hlist_node *hlist_node;
+ unsigned int bkt_loop_cursor;
+ HLIST_HEAD(free_list);
+
+ /*
+ * Could probably use hash_for_each_rcu here instead, but this should
+ * be fine as well.
+ */
+ spin_lock(&safesetid_whitelist_hashtable_spinlock);
+ hash_for_each_safe(safesetid_whitelist_hashtable, bkt_loop_cursor,
+ hlist_node, entry, next) {
+ hash_del_rcu(&entry->next);
+ hlist_add_head(&entry->dlist, &free_list);
+ }
+ spin_unlock(&safesetid_whitelist_hashtable_spinlock);
+ synchronize_rcu();
+ hlist_for_each_entry_safe(entry, hlist_node, &free_list, dlist) {
+ hlist_del(&entry->dlist);
+ kfree(entry);
+ }
+}
+
+static struct security_hook_list safesetid_security_hooks[] = {
+ LSM_HOOK_INIT(task_fix_setuid, safesetid_task_fix_setuid),
+ LSM_HOOK_INIT(capable, safesetid_security_capable)
+};
+
+static int __init safesetid_security_init(void)
+{
+ security_add_hooks(safesetid_security_hooks,
+ ARRAY_SIZE(safesetid_security_hooks), "safesetid");
+
+ /* Report that SafeSetID successfully initialized */
+ safesetid_initialized = 1;
+
+ return 0;
+}
+
+DEFINE_LSM(safesetid_security_init) = {
+ .init = safesetid_security_init,
+ .name = "safesetid",
+};
diff --git a/security/safesetid/lsm.h b/security/safesetid/lsm.h
new file mode 100644
index 000000000000..c1ea3c265fcf
--- /dev/null
+++ b/security/safesetid/lsm.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SafeSetID Linux Security Module
+ *
+ * Author: Micah Morton <mortonm@chromium.org>
+ *
+ * Copyright (C) 2018 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef _SAFESETID_H
+#define _SAFESETID_H
+
+#include <linux/types.h>
+
+/* Flag indicating whether initialization completed */
+extern int safesetid_initialized;
+
+/* Function type. */
+enum safesetid_whitelist_file_write_type {
+ SAFESETID_WHITELIST_ADD, /* Add whitelist policy. */
+ SAFESETID_WHITELIST_FLUSH, /* Flush whitelist policies. */
+};
+
+/* Add entry to safesetid whitelist to allow 'parent' to setid to 'child'. */
+int add_safesetid_whitelist_entry(kuid_t parent, kuid_t child);
+
+void flush_safesetid_whitelist_entries(void);
+
+#endif /* _SAFESETID_H */
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
new file mode 100644
index 000000000000..2c6c829be044
--- /dev/null
+++ b/security/safesetid/securityfs.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SafeSetID Linux Security Module
+ *
+ * Author: Micah Morton <mortonm@chromium.org>
+ *
+ * Copyright (C) 2018 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/security.h>
+#include <linux/cred.h>
+
+#include "lsm.h"
+
+static struct dentry *safesetid_policy_dir;
+
+struct safesetid_file_entry {
+ const char *name;
+ enum safesetid_whitelist_file_write_type type;
+ struct dentry *dentry;
+};
+
+static struct safesetid_file_entry safesetid_files[] = {
+ {.name = "add_whitelist_policy",
+ .type = SAFESETID_WHITELIST_ADD},
+ {.name = "flush_whitelist_policies",
+ .type = SAFESETID_WHITELIST_FLUSH},
+};
+
+/*
+ * In the case the input buffer contains one or more invalid UIDs, the kuid_t
+ * variables pointed to by 'parent' and 'child' will get updated but this
+ * function will return an error.
+ */
+static int parse_safesetid_whitelist_policy(const char __user *buf,
+ size_t len,
+ kuid_t *parent,
+ kuid_t *child)
+{
+ char *kern_buf;
+ char *parent_buf;
+ char *child_buf;
+ const char separator[] = ":";
+ int ret;
+ size_t first_substring_length;
+ long parsed_parent;
+ long parsed_child;
+
+ /* Duplicate string from user memory and NULL-terminate */
+ kern_buf = memdup_user_nul(buf, len);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ /*
+ * Format of |buf| string should be <UID>:<UID>.
+ * Find location of ":" in kern_buf (copied from |buf|).
+ */
+ first_substring_length = strcspn(kern_buf, separator);
+ if (first_substring_length == 0 || first_substring_length == len) {
+ ret = -EINVAL;
+ goto free_kern;
+ }
+
+ parent_buf = kmemdup_nul(kern_buf, first_substring_length, GFP_KERNEL);
+ if (!parent_buf) {
+ ret = -ENOMEM;
+ goto free_kern;
+ }
+
+ ret = kstrtol(parent_buf, 0, &parsed_parent);
+ if (ret)
+ goto free_both;
+
+ child_buf = kern_buf + first_substring_length + 1;
+ ret = kstrtol(child_buf, 0, &parsed_child);
+ if (ret)
+ goto free_both;
+
+ *parent = make_kuid(current_user_ns(), parsed_parent);
+ if (!uid_valid(*parent)) {
+ ret = -EINVAL;
+ goto free_both;
+ }
+
+ *child = make_kuid(current_user_ns(), parsed_child);
+ if (!uid_valid(*child)) {
+ ret = -EINVAL;
+ goto free_both;
+ }
+
+free_both:
+ kfree(parent_buf);
+free_kern:
+ kfree(kern_buf);
+ return ret;
+}
+
+static ssize_t safesetid_file_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ struct safesetid_file_entry *file_entry =
+ file->f_inode->i_private;
+ kuid_t parent;
+ kuid_t child;
+ int ret;
+
+ if (!ns_capable(current_user_ns(), CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ switch (file_entry->type) {
+ case SAFESETID_WHITELIST_FLUSH:
+ flush_safesetid_whitelist_entries();
+ break;
+ case SAFESETID_WHITELIST_ADD:
+ ret = parse_safesetid_whitelist_policy(buf, len, &parent,
+ &child);
+ if (ret)
+ return ret;
+
+ ret = add_safesetid_whitelist_entry(parent, child);
+ if (ret)
+ return ret;
+ break;
+ default:
+ pr_warn("Unknown securityfs file %d\n", file_entry->type);
+ break;
+ }
+
+ /* Return len on success so caller won't keep trying to write */
+ return len;
+}
+
+static const struct file_operations safesetid_file_fops = {
+ .write = safesetid_file_write,
+};
+
+static void safesetid_shutdown_securityfs(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(safesetid_files); ++i) {
+ struct safesetid_file_entry *entry =
+ &safesetid_files[i];
+ securityfs_remove(entry->dentry);
+ entry->dentry = NULL;
+ }
+
+ securityfs_remove(safesetid_policy_dir);
+ safesetid_policy_dir = NULL;
+}
+
+static int __init safesetid_init_securityfs(void)
+{
+ int i;
+ int ret;
+
+ if (!safesetid_initialized)
+ return 0;
+
+ safesetid_policy_dir = securityfs_create_dir("safesetid", NULL);
+ if (IS_ERR(safesetid_policy_dir)) {
+ ret = PTR_ERR(safesetid_policy_dir);
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(safesetid_files); ++i) {
+ struct safesetid_file_entry *entry =
+ &safesetid_files[i];
+ entry->dentry = securityfs_create_file(
+ entry->name, 0200, safesetid_policy_dir,
+ entry, &safesetid_file_fops);
+ if (IS_ERR(entry->dentry)) {
+ ret = PTR_ERR(entry->dentry);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ safesetid_shutdown_securityfs();
+ return ret;
+}
+fs_initcall(safesetid_init_securityfs);
diff --git a/security/security.c b/security/security.c
index 5f954b179a8e..301b141b9a32 100644
--- a/security/security.c
+++ b/security/security.c
@@ -30,20 +30,32 @@
#include <linux/personality.h>
#include <linux/backing-dev.h>
#include <linux/string.h>
+#include <linux/msg.h>
#include <net/flow.h>
#define MAX_LSM_EVM_XATTR 2
-/* Maximum number of letters for an LSM name string */
-#define SECURITY_NAME_MAX 10
+/* How many LSMs were built into the kernel? */
+#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
struct security_hook_heads security_hook_heads __lsm_ro_after_init;
static ATOMIC_NOTIFIER_HEAD(lsm_notifier_chain);
+static struct kmem_cache *lsm_file_cache;
+static struct kmem_cache *lsm_inode_cache;
+
char *lsm_names;
+static struct lsm_blob_sizes blob_sizes __lsm_ro_after_init;
+
/* Boot-time LSM user choice */
-static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
- CONFIG_DEFAULT_SECURITY;
+static __initdata const char *chosen_lsm_order;
+static __initdata const char *chosen_major_lsm;
+
+static __initconst const char * const builtin_lsm_order = CONFIG_LSM;
+
+/* Ordered list of LSMs to initialize. */
+static __initdata struct lsm_info **ordered_lsms;
+static __initdata struct lsm_info *exclusive;
static __initdata bool debug;
#define init_debug(...) \
@@ -52,18 +64,269 @@ static __initdata bool debug;
pr_info(__VA_ARGS__); \
} while (0)
-static void __init major_lsm_init(void)
+static bool __init is_enabled(struct lsm_info *lsm)
{
- struct lsm_info *lsm;
- int ret;
+ if (!lsm->enabled)
+ return false;
+
+ return *lsm->enabled;
+}
+
+/* Mark an LSM's enabled flag. */
+static int lsm_enabled_true __initdata = 1;
+static int lsm_enabled_false __initdata = 0;
+static void __init set_enabled(struct lsm_info *lsm, bool enabled)
+{
+ /*
+ * When an LSM hasn't configured an enable variable, we can use
+ * a hard-coded location for storing the default enabled state.
+ */
+ if (!lsm->enabled) {
+ if (enabled)
+ lsm->enabled = &lsm_enabled_true;
+ else
+ lsm->enabled = &lsm_enabled_false;
+ } else if (lsm->enabled == &lsm_enabled_true) {
+ if (!enabled)
+ lsm->enabled = &lsm_enabled_false;
+ } else if (lsm->enabled == &lsm_enabled_false) {
+ if (enabled)
+ lsm->enabled = &lsm_enabled_true;
+ } else {
+ *lsm->enabled = enabled;
+ }
+}
+
+/* Is an LSM already listed in the ordered LSMs list? */
+static bool __init exists_ordered_lsm(struct lsm_info *lsm)
+{
+ struct lsm_info **check;
+
+ for (check = ordered_lsms; *check; check++)
+ if (*check == lsm)
+ return true;
+
+ return false;
+}
+
+/* Append an LSM to the list of ordered LSMs to initialize. */
+static int last_lsm __initdata;
+static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
+{
+ /* Ignore duplicate selections. */
+ if (exists_ordered_lsm(lsm))
+ return;
+
+ if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
+ return;
+
+ /* Enable this LSM, if it is not already set. */
+ if (!lsm->enabled)
+ lsm->enabled = &lsm_enabled_true;
+ ordered_lsms[last_lsm++] = lsm;
+
+ init_debug("%s ordering: %s (%sabled)\n", from, lsm->name,
+ is_enabled(lsm) ? "en" : "dis");
+}
+
+/* Is an LSM allowed to be initialized? */
+static bool __init lsm_allowed(struct lsm_info *lsm)
+{
+ /* Skip if the LSM is disabled. */
+ if (!is_enabled(lsm))
+ return false;
+
+ /* Not allowed if another exclusive LSM already initialized. */
+ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
+ init_debug("exclusive disabled: %s\n", lsm->name);
+ return false;
+ }
+
+ return true;
+}
+
+static void __init lsm_set_blob_size(int *need, int *lbs)
+{
+ int offset;
+
+ if (*need > 0) {
+ offset = *lbs;
+ *lbs += *need;
+ *need = offset;
+ }
+}
+
+static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
+{
+ if (!needed)
+ return;
+
+ lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
+ lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
+ /*
+ * The inode blob gets an rcu_head in addition to
+ * what the modules might need.
+ */
+ if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
+ blob_sizes.lbs_inode = sizeof(struct rcu_head);
+ lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
+ lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
+ lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
+ lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
+}
+
+/* Prepare LSM for initialization. */
+static void __init prepare_lsm(struct lsm_info *lsm)
+{
+ int enabled = lsm_allowed(lsm);
+
+ /* Record enablement (to handle any following exclusive LSMs). */
+ set_enabled(lsm, enabled);
+
+ /* If enabled, do pre-initialization work. */
+ if (enabled) {
+ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
+ exclusive = lsm;
+ init_debug("exclusive chosen: %s\n", lsm->name);
+ }
+
+ lsm_set_blob_sizes(lsm->blobs);
+ }
+}
+
+/* Initialize a given LSM, if it is enabled. */
+static void __init initialize_lsm(struct lsm_info *lsm)
+{
+ if (is_enabled(lsm)) {
+ int ret;
- for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
init_debug("initializing %s\n", lsm->name);
ret = lsm->init();
WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
}
}
+/* Populate ordered LSMs list from comma-separated LSM name list. */
+static void __init ordered_lsm_parse(const char *order, const char *origin)
+{
+ struct lsm_info *lsm;
+ char *sep, *name, *next;
+
+ /* LSM_ORDER_FIRST is always first. */
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (lsm->order == LSM_ORDER_FIRST)
+ append_ordered_lsm(lsm, "first");
+ }
+
+ /* Process "security=", if given. */
+ if (chosen_major_lsm) {
+ struct lsm_info *major;
+
+ /*
+ * To match the original "security=" behavior, this
+ * explicitly does NOT fallback to another Legacy Major
+ * if the selected one was separately disabled: disable
+ * all non-matching Legacy Major LSMs.
+ */
+ for (major = __start_lsm_info; major < __end_lsm_info;
+ major++) {
+ if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
+ strcmp(major->name, chosen_major_lsm) != 0) {
+ set_enabled(major, false);
+ init_debug("security=%s disabled: %s\n",
+ chosen_major_lsm, major->name);
+ }
+ }
+ }
+
+ sep = kstrdup(order, GFP_KERNEL);
+ next = sep;
+ /* Walk the list, looking for matching LSMs. */
+ while ((name = strsep(&next, ",")) != NULL) {
+ bool found = false;
+
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (lsm->order == LSM_ORDER_MUTABLE &&
+ strcmp(lsm->name, name) == 0) {
+ append_ordered_lsm(lsm, origin);
+ found = true;
+ }
+ }
+
+ if (!found)
+ init_debug("%s ignored: %s\n", origin, name);
+ }
+
+ /* Process "security=", if given. */
+ if (chosen_major_lsm) {
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (exists_ordered_lsm(lsm))
+ continue;
+ if (strcmp(lsm->name, chosen_major_lsm) == 0)
+ append_ordered_lsm(lsm, "security=");
+ }
+ }
+
+ /* Disable all LSMs not in the ordered list. */
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (exists_ordered_lsm(lsm))
+ continue;
+ set_enabled(lsm, false);
+ init_debug("%s disabled: %s\n", origin, lsm->name);
+ }
+
+ kfree(sep);
+}
+
+static void __init lsm_early_cred(struct cred *cred);
+static void __init lsm_early_task(struct task_struct *task);
+
+static void __init ordered_lsm_init(void)
+{
+ struct lsm_info **lsm;
+
+ ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
+ GFP_KERNEL);
+
+ if (chosen_lsm_order) {
+ if (chosen_major_lsm) {
+ pr_info("security= is ignored because it is superseded by lsm=\n");
+ chosen_major_lsm = NULL;
+ }
+ ordered_lsm_parse(chosen_lsm_order, "cmdline");
+ } else
+ ordered_lsm_parse(builtin_lsm_order, "builtin");
+
+ for (lsm = ordered_lsms; *lsm; lsm++)
+ prepare_lsm(*lsm);
+
+ init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
+ init_debug("file blob size = %d\n", blob_sizes.lbs_file);
+ init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
+ init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
+ init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
+ init_debug("task blob size = %d\n", blob_sizes.lbs_task);
+
+ /*
+ * Create any kmem_caches needed for blobs
+ */
+ if (blob_sizes.lbs_file)
+ lsm_file_cache = kmem_cache_create("lsm_file_cache",
+ blob_sizes.lbs_file, 0,
+ SLAB_PANIC, NULL);
+ if (blob_sizes.lbs_inode)
+ lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
+ blob_sizes.lbs_inode, 0,
+ SLAB_PANIC, NULL);
+
+ lsm_early_cred((struct cred *) current->cred);
+ lsm_early_task(current);
+ for (lsm = ordered_lsms; *lsm; lsm++)
+ initialize_lsm(*lsm);
+
+ kfree(ordered_lsms);
+}
+
/**
* security_init - initializes the security framework
*
@@ -80,28 +343,27 @@ int __init security_init(void)
i++)
INIT_HLIST_HEAD(&list[i]);
- /*
- * Load minor LSMs, with the capability module always first.
- */
- capability_add_hooks();
- yama_add_hooks();
- loadpin_add_hooks();
-
- /*
- * Load all the remaining security modules.
- */
- major_lsm_init();
+ /* Load LSMs in specified order. */
+ ordered_lsm_init();
return 0;
}
/* Save user chosen LSM */
-static int __init choose_lsm(char *str)
+static int __init choose_major_lsm(char *str)
{
- strncpy(chosen_lsm, str, SECURITY_NAME_MAX);
+ chosen_major_lsm = str;
return 1;
}
-__setup("security=", choose_lsm);
+__setup("security=", choose_major_lsm);
+
+/* Explicitly choose LSM initialization order. */
+static int __init choose_lsm_order(char *str)
+{
+ chosen_lsm_order = str;
+ return 1;
+}
+__setup("lsm=", choose_lsm_order);
/* Enable LSM order debugging. */
static int __init enable_debug(char *str)
@@ -148,29 +410,6 @@ static int lsm_append(char *new, char **result)
}
/**
- * security_module_enable - Load given security module on boot ?
- * @module: the name of the module
- *
- * Each LSM must pass this method before registering its own operations
- * to avoid security registration races. This method may also be used
- * to check if your LSM is currently loaded during kernel initialization.
- *
- * Returns:
- *
- * true if:
- *
- * - The passed LSM is the one chosen by user at boot time,
- * - or the passed LSM is configured as the default and the user did not
- * choose an alternate LSM at boot time.
- *
- * Otherwise, return false.
- */
-int __init security_module_enable(const char *module)
-{
- return !strcmp(module, chosen_lsm);
-}
-
-/**
* security_add_hooks - Add a modules hooks to the hook lists.
* @hooks: the hooks to add
* @count: the number of hooks to add
@@ -209,6 +448,161 @@ int unregister_lsm_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_lsm_notifier);
+/**
+ * lsm_cred_alloc - allocate a composite cred blob
+ * @cred: the cred that needs a blob
+ * @gfp: allocation type
+ *
+ * Allocate the cred blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
+{
+ if (blob_sizes.lbs_cred == 0) {
+ cred->security = NULL;
+ return 0;
+ }
+
+ cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
+ if (cred->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_early_cred - during initialization allocate a composite cred blob
+ * @cred: the cred that needs a blob
+ *
+ * Allocate the cred blob for all the modules
+ */
+static void __init lsm_early_cred(struct cred *cred)
+{
+ int rc = lsm_cred_alloc(cred, GFP_KERNEL);
+
+ if (rc)
+ panic("%s: Early cred alloc failed.\n", __func__);
+}
+
+/**
+ * lsm_file_alloc - allocate a composite file blob
+ * @file: the file that needs a blob
+ *
+ * Allocate the file blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_file_alloc(struct file *file)
+{
+ if (!lsm_file_cache) {
+ file->f_security = NULL;
+ return 0;
+ }
+
+ file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
+ if (file->f_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_inode_alloc - allocate a composite inode blob
+ * @inode: the inode that needs a blob
+ *
+ * Allocate the inode blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+int lsm_inode_alloc(struct inode *inode)
+{
+ if (!lsm_inode_cache) {
+ inode->i_security = NULL;
+ return 0;
+ }
+
+ inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
+ if (inode->i_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_task_alloc - allocate a composite task blob
+ * @task: the task that needs a blob
+ *
+ * Allocate the task blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_task_alloc(struct task_struct *task)
+{
+ if (blob_sizes.lbs_task == 0) {
+ task->security = NULL;
+ return 0;
+ }
+
+ task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
+ if (task->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_ipc_alloc - allocate a composite ipc blob
+ * @kip: the ipc that needs a blob
+ *
+ * Allocate the ipc blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
+{
+ if (blob_sizes.lbs_ipc == 0) {
+ kip->security = NULL;
+ return 0;
+ }
+
+ kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
+ if (kip->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_msg_msg_alloc - allocate a composite msg_msg blob
+ * @mp: the msg_msg that needs a blob
+ *
+ * Allocate the ipc blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_msg_msg_alloc(struct msg_msg *mp)
+{
+ if (blob_sizes.lbs_msg_msg == 0) {
+ mp->security = NULL;
+ return 0;
+ }
+
+ mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
+ if (mp->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_early_task - during initialization allocate a composite task blob
+ * @task: the task that needs a blob
+ *
+ * Allocate the task blob for all the modules
+ */
+static void __init lsm_early_task(struct task_struct *task)
+{
+ int rc = lsm_task_alloc(task);
+
+ if (rc)
+ panic("%s: Early task alloc failed.\n", __func__);
+}
+
/*
* Hook list operation macros.
*
@@ -294,16 +688,12 @@ int security_capset(struct cred *new, const struct cred *old,
effective, inheritable, permitted);
}
-int security_capable(const struct cred *cred, struct user_namespace *ns,
- int cap)
-{
- return call_int_hook(capable, 0, cred, ns, cap, SECURITY_CAP_AUDIT);
-}
-
-int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
- int cap)
+int security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
{
- return call_int_hook(capable, 0, cred, ns, cap, SECURITY_CAP_NOAUDIT);
+ return call_int_hook(capable, 0, cred, ns, cap, opts);
}
int security_quotactl(int cmds, int type, int id, struct super_block *sb)
@@ -468,14 +858,40 @@ EXPORT_SYMBOL(security_add_mnt_opt);
int security_inode_alloc(struct inode *inode)
{
- inode->i_security = NULL;
- return call_int_hook(inode_alloc_security, 0, inode);
+ int rc = lsm_inode_alloc(inode);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(inode_alloc_security, 0, inode);
+ if (unlikely(rc))
+ security_inode_free(inode);
+ return rc;
+}
+
+static void inode_free_by_rcu(struct rcu_head *head)
+{
+ /*
+ * The rcu head is at the start of the inode blob
+ */
+ kmem_cache_free(lsm_inode_cache, head);
}
void security_inode_free(struct inode *inode)
{
integrity_inode_free(inode);
call_void_hook(inode_free_security, inode);
+ /*
+ * The inode may still be referenced in a path walk and
+ * a call to security_inode_permission() can be made
+ * after inode_free_security() is called. Ideally, the VFS
+ * wouldn't do this, but fixing that is a much harder
+ * job. For now, simply free the i_security via RCU, and
+ * leave the current inode->i_security pointer intact.
+ * The inode will be freed after the RCU grace period too.
+ */
+ if (inode->i_security)
+ call_rcu((struct rcu_head *)inode->i_security,
+ inode_free_by_rcu);
}
int security_dentry_init_security(struct dentry *dentry, int mode,
@@ -905,12 +1321,27 @@ int security_file_permission(struct file *file, int mask)
int security_file_alloc(struct file *file)
{
- return call_int_hook(file_alloc_security, 0, file);
+ int rc = lsm_file_alloc(file);
+
+ if (rc)
+ return rc;
+ rc = call_int_hook(file_alloc_security, 0, file);
+ if (unlikely(rc))
+ security_file_free(file);
+ return rc;
}
void security_file_free(struct file *file)
{
+ void *blob;
+
call_void_hook(file_free_security, file);
+
+ blob = file->f_security;
+ if (blob) {
+ file->f_security = NULL;
+ kmem_cache_free(lsm_file_cache, blob);
+ }
}
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -1012,27 +1443,63 @@ int security_file_open(struct file *file)
int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
{
- return call_int_hook(task_alloc, 0, task, clone_flags);
+ int rc = lsm_task_alloc(task);
+
+ if (rc)
+ return rc;
+ rc = call_int_hook(task_alloc, 0, task, clone_flags);
+ if (unlikely(rc))
+ security_task_free(task);
+ return rc;
}
void security_task_free(struct task_struct *task)
{
call_void_hook(task_free, task);
+
+ kfree(task->security);
+ task->security = NULL;
}
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
- return call_int_hook(cred_alloc_blank, 0, cred, gfp);
+ int rc = lsm_cred_alloc(cred, gfp);
+
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
+ if (unlikely(rc))
+ security_cred_free(cred);
+ return rc;
}
void security_cred_free(struct cred *cred)
{
+ /*
+ * There is a failure case in prepare_creds() that
+ * may result in a call here with ->security being NULL.
+ */
+ if (unlikely(cred->security == NULL))
+ return;
+
call_void_hook(cred_free, cred);
+
+ kfree(cred->security);
+ cred->security = NULL;
}
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
{
- return call_int_hook(cred_prepare, 0, new, old, gfp);
+ int rc = lsm_cred_alloc(new, gfp);
+
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(cred_prepare, 0, new, old, gfp);
+ if (unlikely(rc))
+ security_cred_free(new);
+ return rc;
}
void security_transfer_creds(struct cred *new, const struct cred *old)
@@ -1213,22 +1680,40 @@ void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
int security_msg_msg_alloc(struct msg_msg *msg)
{
- return call_int_hook(msg_msg_alloc_security, 0, msg);
+ int rc = lsm_msg_msg_alloc(msg);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(msg_msg_alloc_security, 0, msg);
+ if (unlikely(rc))
+ security_msg_msg_free(msg);
+ return rc;
}
void security_msg_msg_free(struct msg_msg *msg)
{
call_void_hook(msg_msg_free_security, msg);
+ kfree(msg->security);
+ msg->security = NULL;
}
int security_msg_queue_alloc(struct kern_ipc_perm *msq)
{
- return call_int_hook(msg_queue_alloc_security, 0, msq);
+ int rc = lsm_ipc_alloc(msq);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(msg_queue_alloc_security, 0, msq);
+ if (unlikely(rc))
+ security_msg_queue_free(msq);
+ return rc;
}
void security_msg_queue_free(struct kern_ipc_perm *msq)
{
call_void_hook(msg_queue_free_security, msq);
+ kfree(msq->security);
+ msq->security = NULL;
}
int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
@@ -1255,12 +1740,21 @@ int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
int security_shm_alloc(struct kern_ipc_perm *shp)
{
- return call_int_hook(shm_alloc_security, 0, shp);
+ int rc = lsm_ipc_alloc(shp);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(shm_alloc_security, 0, shp);
+ if (unlikely(rc))
+ security_shm_free(shp);
+ return rc;
}
void security_shm_free(struct kern_ipc_perm *shp)
{
call_void_hook(shm_free_security, shp);
+ kfree(shp->security);
+ shp->security = NULL;
}
int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
@@ -1280,12 +1774,21 @@ int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmf
int security_sem_alloc(struct kern_ipc_perm *sma)
{
- return call_int_hook(sem_alloc_security, 0, sma);
+ int rc = lsm_ipc_alloc(sma);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(sem_alloc_security, 0, sma);
+ if (unlikely(rc))
+ security_sem_free(sma);
+ return rc;
}
void security_sem_free(struct kern_ipc_perm *sma)
{
call_void_hook(sem_free_security, sma);
+ kfree(sma->security);
+ sma->security = NULL;
}
int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
@@ -1312,14 +1815,30 @@ void security_d_instantiate(struct dentry *dentry, struct inode *inode)
}
EXPORT_SYMBOL(security_d_instantiate);
-int security_getprocattr(struct task_struct *p, char *name, char **value)
+int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
+ char **value)
{
- return call_int_hook(getprocattr, -EINVAL, p, name, value);
+ struct security_hook_list *hp;
+
+ hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
+ if (lsm != NULL && strcmp(lsm, hp->lsm))
+ continue;
+ return hp->hook.getprocattr(p, name, value);
+ }
+ return -EINVAL;
}
-int security_setprocattr(const char *name, void *value, size_t size)
+int security_setprocattr(const char *lsm, const char *name, void *value,
+ size_t size)
{
- return call_int_hook(setprocattr, -EINVAL, name, value, size);
+ struct security_hook_list *hp;
+
+ hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
+ if (lsm != NULL && strcmp(lsm, hp->lsm))
+ continue;
+ return hp->hook.setprocattr(name, value, size);
+ }
+ return -EINVAL;
}
int security_netlink_send(struct sock *sk, struct sk_buff *skb)
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 8af7a690eb40..55f032f1fc2d 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -22,21 +22,6 @@ config SECURITY_SELINUX_BOOTPARAM
If you are unsure how to answer this question, answer N.
-config SECURITY_SELINUX_BOOTPARAM_VALUE
- int "NSA SELinux boot parameter default value"
- depends on SECURITY_SELINUX_BOOTPARAM
- range 0 1
- default 1
- help
- This option sets the default value for the kernel parameter
- 'selinux', which allows SELinux to be disabled at boot. If this
- option is set to 0 (zero), the SELinux kernel parameter will
- default to 0, disabling SELinux at bootup. If this option is
- set to 1 (one), the SELinux kernel parameter will default to 1,
- enabling SELinux at bootup.
-
- If you are unsure how to answer this question, answer 1.
-
config SECURITY_SELINUX_DISABLE
bool "NSA SELinux runtime disable"
depends on SECURITY_SELINUX
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index c7161f8792b2..ccf950409384 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
- netnode.o netport.o ibpkey.o exports.o \
+ netnode.o netport.o ibpkey.o \
ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 635e5c1e3e48..8346a4f7c5d7 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -130,75 +130,6 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
}
/**
- * avc_dump_av - Display an access vector in human-readable form.
- * @tclass: target security class
- * @av: access vector
- */
-static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
-{
- const char **perms;
- int i, perm;
-
- if (av == 0) {
- audit_log_format(ab, " null");
- return;
- }
-
- BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map));
- perms = secclass_map[tclass-1].perms;
-
- audit_log_format(ab, " {");
- i = 0;
- perm = 1;
- while (i < (sizeof(av) * 8)) {
- if ((perm & av) && perms[i]) {
- audit_log_format(ab, " %s", perms[i]);
- av &= ~perm;
- }
- i++;
- perm <<= 1;
- }
-
- if (av)
- audit_log_format(ab, " 0x%x", av);
-
- audit_log_format(ab, " }");
-}
-
-/**
- * avc_dump_query - Display a SID pair and a class in human-readable form.
- * @ssid: source security identifier
- * @tsid: target security identifier
- * @tclass: target security class
- */
-static void avc_dump_query(struct audit_buffer *ab, struct selinux_state *state,
- u32 ssid, u32 tsid, u16 tclass)
-{
- int rc;
- char *scontext;
- u32 scontext_len;
-
- rc = security_sid_to_context(state, ssid, &scontext, &scontext_len);
- if (rc)
- audit_log_format(ab, "ssid=%d", ssid);
- else {
- audit_log_format(ab, "scontext=%s", scontext);
- kfree(scontext);
- }
-
- rc = security_sid_to_context(state, tsid, &scontext, &scontext_len);
- if (rc)
- audit_log_format(ab, " tsid=%d", tsid);
- else {
- audit_log_format(ab, " tcontext=%s", scontext);
- kfree(scontext);
- }
-
- BUG_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map));
- audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
-}
-
-/**
* avc_init - Initialize the AVC.
*
* Initialize the access vector cache.
@@ -735,11 +666,36 @@ out:
static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
- audit_log_format(ab, "avc: %s ",
- ad->selinux_audit_data->denied ? "denied" : "granted");
- avc_dump_av(ab, ad->selinux_audit_data->tclass,
- ad->selinux_audit_data->audited);
- audit_log_format(ab, " for ");
+ struct selinux_audit_data *sad = ad->selinux_audit_data;
+ u32 av = sad->audited;
+ const char **perms;
+ int i, perm;
+
+ audit_log_format(ab, "avc: %s ", sad->denied ? "denied" : "granted");
+
+ if (av == 0) {
+ audit_log_format(ab, " null");
+ return;
+ }
+
+ perms = secclass_map[sad->tclass-1].perms;
+
+ audit_log_format(ab, " {");
+ i = 0;
+ perm = 1;
+ while (i < (sizeof(av) * 8)) {
+ if ((perm & av) && perms[i]) {
+ audit_log_format(ab, " %s", perms[i]);
+ av &= ~perm;
+ }
+ i++;
+ perm <<= 1;
+ }
+
+ if (av)
+ audit_log_format(ab, " 0x%x", av);
+
+ audit_log_format(ab, " } for ");
}
/**
@@ -751,14 +707,47 @@ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
- audit_log_format(ab, " ");
- avc_dump_query(ab, ad->selinux_audit_data->state,
- ad->selinux_audit_data->ssid,
- ad->selinux_audit_data->tsid,
- ad->selinux_audit_data->tclass);
- if (ad->selinux_audit_data->denied) {
- audit_log_format(ab, " permissive=%u",
- ad->selinux_audit_data->result ? 0 : 1);
+ struct selinux_audit_data *sad = ad->selinux_audit_data;
+ char *scontext;
+ u32 scontext_len;
+ int rc;
+
+ rc = security_sid_to_context(sad->state, sad->ssid, &scontext,
+ &scontext_len);
+ if (rc)
+ audit_log_format(ab, " ssid=%d", sad->ssid);
+ else {
+ audit_log_format(ab, " scontext=%s", scontext);
+ kfree(scontext);
+ }
+
+ rc = security_sid_to_context(sad->state, sad->tsid, &scontext,
+ &scontext_len);
+ if (rc)
+ audit_log_format(ab, " tsid=%d", sad->tsid);
+ else {
+ audit_log_format(ab, " tcontext=%s", scontext);
+ kfree(scontext);
+ }
+
+ audit_log_format(ab, " tclass=%s", secclass_map[sad->tclass-1].name);
+
+ if (sad->denied)
+ audit_log_format(ab, " permissive=%u", sad->result ? 0 : 1);
+
+ /* in case of invalid context report also the actual context string */
+ rc = security_sid_to_context_inval(sad->state, sad->ssid, &scontext,
+ &scontext_len);
+ if (!rc && scontext) {
+ audit_log_format(ab, " srawcon=%s", scontext);
+ kfree(scontext);
+ }
+
+ rc = security_sid_to_context_inval(sad->state, sad->tsid, &scontext,
+ &scontext_len);
+ if (!rc && scontext) {
+ audit_log_format(ab, " trawcon=%s", scontext);
+ kfree(scontext);
}
}
@@ -772,6 +761,9 @@ noinline int slow_avc_audit(struct selinux_state *state,
struct common_audit_data stack_data;
struct selinux_audit_data sad;
+ if (WARN_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map)))
+ return -EINVAL;
+
if (!a) {
a = &stack_data;
a->type = LSM_AUDIT_DATA_NONE;
@@ -838,6 +830,7 @@ out:
* @ssid,@tsid,@tclass : identifier of an AVC entry
* @seqno : sequence number when decision was made
* @xpd: extended_perms_decision to be added to the node
+ * @flags: the AVC_* flags, e.g. AVC_NONBLOCKING, AVC_EXTENDED_PERMS, or 0.
*
* if a valid AVC entry doesn't exist,this function returns -ENOENT.
* if kmalloc() called internal returns NULL, this function returns -ENOMEM.
@@ -856,6 +849,22 @@ static int avc_update_node(struct selinux_avc *avc,
struct hlist_head *head;
spinlock_t *lock;
+ /*
+ * If we are in a non-blocking code path, e.g. VFS RCU walk,
+ * then we must not add permissions to a cache entry
+ * because we cannot safely audit the denial. Otherwise,
+ * during the subsequent blocking retry (e.g. VFS ref walk), we
+ * will find the permissions already granted in the cache entry
+ * and won't audit anything at all, leading to silent denials in
+ * permissive mode that only appear when in enforcing mode.
+ *
+ * See the corresponding handling in slow_avc_audit(), and the
+ * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
+ * which is transliterated into AVC_NONBLOCKING.
+ */
+ if (flags & AVC_NONBLOCKING)
+ return 0;
+
node = avc_alloc_node(avc);
if (!node) {
rc = -ENOMEM;
@@ -1050,7 +1059,8 @@ int avc_has_extended_perms(struct selinux_state *state,
int rc = 0, rc2;
xp_node = &local_xp_node;
- BUG_ON(!requested);
+ if (WARN_ON(!requested))
+ return -EACCES;
rcu_read_lock();
@@ -1115,7 +1125,7 @@ decision:
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
- * @flags: AVC_STRICT or 0
+ * @flags: AVC_STRICT, AVC_NONBLOCKING, or 0
* @avd: access vector decisions
*
* Check the AVC to determine whether the @requested permissions are granted
@@ -1140,7 +1150,8 @@ inline int avc_has_perm_noaudit(struct selinux_state *state,
int rc = 0;
u32 denied;
- BUG_ON(!requested);
+ if (WARN_ON(!requested))
+ return -EACCES;
rcu_read_lock();
@@ -1191,24 +1202,6 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
return rc;
}
-int avc_has_perm_flags(struct selinux_state *state,
- u32 ssid, u32 tsid, u16 tclass, u32 requested,
- struct common_audit_data *auditdata,
- int flags)
-{
- struct av_decision avd;
- int rc, rc2;
-
- rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
- &avd);
-
- rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
- auditdata, flags);
- if (rc2)
- return rc2;
- return rc;
-}
-
u32 avc_policy_seqno(struct selinux_state *state)
{
return state->avc->avc_cache.latest_notif;
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
deleted file mode 100644
index e75dd94e2d2b..000000000000
--- a/security/selinux/exports.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * SELinux services exported to the rest of the kernel.
- *
- * Author: James Morris <jmorris@redhat.com>
- *
- * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
- * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
- * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/selinux.h>
-
-#include "security.h"
-
-bool selinux_is_enabled(void)
-{
- return selinux_enabled;
-}
-EXPORT_SYMBOL_GPL(selinux_is_enabled);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f0e36c3492ba..2f82a54f8703 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -79,7 +79,6 @@
#include <linux/personality.h>
#include <linux/audit.h>
#include <linux/string.h>
-#include <linux/selinux.h>
#include <linux/mutex.h>
#include <linux/posix-timers.h>
#include <linux/syslog.h>
@@ -121,9 +120,8 @@ __setup("enforcing=", enforcing_setup);
#define selinux_enforcing_boot 1
#endif
+int selinux_enabled __lsm_ro_after_init = 1;
#ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
-int selinux_enabled = CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE;
-
static int __init selinux_enabled_setup(char *str)
{
unsigned long enabled;
@@ -132,8 +130,6 @@ static int __init selinux_enabled_setup(char *str)
return 1;
}
__setup("selinux=", selinux_enabled_setup);
-#else
-int selinux_enabled = 1;
#endif
static unsigned int selinux_checkreqprot_boot =
@@ -149,9 +145,6 @@ static int __init checkreqprot_setup(char *str)
}
__setup("checkreqprot=", checkreqprot_setup);
-static struct kmem_cache *sel_inode_cache;
-static struct kmem_cache *file_security_cache;
-
/**
* selinux_secmark_enabled - Check to see if SECMARK is currently enabled
*
@@ -214,12 +207,8 @@ static void cred_init_security(void)
struct cred *cred = (struct cred *) current->real_cred;
struct task_security_struct *tsec;
- tsec = kzalloc(sizeof(struct task_security_struct), GFP_KERNEL);
- if (!tsec)
- panic("SELinux: Failed to initialize initial task.\n");
-
+ tsec = selinux_cred(cred);
tsec->osid = tsec->sid = SECINITSID_KERNEL;
- cred->security = tsec;
}
/*
@@ -229,7 +218,7 @@ static inline u32 cred_sid(const struct cred *cred)
{
const struct task_security_struct *tsec;
- tsec = cred->security;
+ tsec = selinux_cred(cred);
return tsec->sid;
}
@@ -250,13 +239,9 @@ static inline u32 task_sid(const struct task_struct *task)
static int inode_alloc_security(struct inode *inode)
{
- struct inode_security_struct *isec;
+ struct inode_security_struct *isec = selinux_inode(inode);
u32 sid = current_sid();
- isec = kmem_cache_zalloc(sel_inode_cache, GFP_NOFS);
- if (!isec)
- return -ENOMEM;
-
spin_lock_init(&isec->lock);
INIT_LIST_HEAD(&isec->list);
isec->inode = inode;
@@ -264,7 +249,6 @@ static int inode_alloc_security(struct inode *inode)
isec->sclass = SECCLASS_FILE;
isec->task_sid = sid;
isec->initialized = LABEL_INVALID;
- inode->i_security = isec;
return 0;
}
@@ -281,7 +265,7 @@ static int __inode_security_revalidate(struct inode *inode,
struct dentry *dentry,
bool may_sleep)
{
- struct inode_security_struct *isec = inode->i_security;
+ struct inode_security_struct *isec = selinux_inode(inode);
might_sleep_if(may_sleep);
@@ -302,7 +286,7 @@ static int __inode_security_revalidate(struct inode *inode,
static struct inode_security_struct *inode_security_novalidate(struct inode *inode)
{
- return inode->i_security;
+ return selinux_inode(inode);
}
static struct inode_security_struct *inode_security_rcu(struct inode *inode, bool rcu)
@@ -312,7 +296,7 @@ static struct inode_security_struct *inode_security_rcu(struct inode *inode, boo
error = __inode_security_revalidate(inode, NULL, !rcu);
if (error)
return ERR_PTR(error);
- return inode->i_security;
+ return selinux_inode(inode);
}
/*
@@ -321,14 +305,14 @@ static struct inode_security_struct *inode_security_rcu(struct inode *inode, boo
static struct inode_security_struct *inode_security(struct inode *inode)
{
__inode_security_revalidate(inode, NULL, true);
- return inode->i_security;
+ return selinux_inode(inode);
}
static struct inode_security_struct *backing_inode_security_novalidate(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
- return inode->i_security;
+ return selinux_inode(inode);
}
/*
@@ -339,22 +323,17 @@ static struct inode_security_struct *backing_inode_security(struct dentry *dentr
struct inode *inode = d_backing_inode(dentry);
__inode_security_revalidate(inode, dentry, true);
- return inode->i_security;
-}
-
-static void inode_free_rcu(struct rcu_head *head)
-{
- struct inode_security_struct *isec;
-
- isec = container_of(head, struct inode_security_struct, rcu);
- kmem_cache_free(sel_inode_cache, isec);
+ return selinux_inode(inode);
}
static void inode_free_security(struct inode *inode)
{
- struct inode_security_struct *isec = inode->i_security;
- struct superblock_security_struct *sbsec = inode->i_sb->s_security;
+ struct inode_security_struct *isec = selinux_inode(inode);
+ struct superblock_security_struct *sbsec;
+ if (!isec)
+ return;
+ sbsec = inode->i_sb->s_security;
/*
* As not all inode security structures are in a list, we check for
* empty list outside of the lock to make sure that we won't waste
@@ -370,42 +349,19 @@ static void inode_free_security(struct inode *inode)
list_del_init(&isec->list);
spin_unlock(&sbsec->isec_lock);
}
-
- /*
- * The inode may still be referenced in a path walk and
- * a call to selinux_inode_permission() can be made
- * after inode_free_security() is called. Ideally, the VFS
- * wouldn't do this, but fixing that is a much harder
- * job. For now, simply free the i_security via RCU, and
- * leave the current inode->i_security pointer intact.
- * The inode will be freed after the RCU grace period too.
- */
- call_rcu(&isec->rcu, inode_free_rcu);
}
static int file_alloc_security(struct file *file)
{
- struct file_security_struct *fsec;
+ struct file_security_struct *fsec = selinux_file(file);
u32 sid = current_sid();
- fsec = kmem_cache_zalloc(file_security_cache, GFP_KERNEL);
- if (!fsec)
- return -ENOMEM;
-
fsec->sid = sid;
fsec->fown_sid = sid;
- file->f_security = fsec;
return 0;
}
-static void file_free_security(struct file *file)
-{
- struct file_security_struct *fsec = file->f_security;
- file->f_security = NULL;
- kmem_cache_free(file_security_cache, fsec);
-}
-
static int superblock_alloc_security(struct super_block *sb)
{
struct superblock_security_struct *sbsec;
@@ -501,7 +457,7 @@ static int may_context_mount_sb_relabel(u32 sid,
struct superblock_security_struct *sbsec,
const struct cred *cred)
{
- const struct task_security_struct *tsec = cred->security;
+ const struct task_security_struct *tsec = selinux_cred(cred);
int rc;
rc = avc_has_perm(&selinux_state,
@@ -520,7 +476,7 @@ static int may_context_mount_inode_relabel(u32 sid,
struct superblock_security_struct *sbsec,
const struct cred *cred)
{
- const struct task_security_struct *tsec = cred->security;
+ const struct task_security_struct *tsec = selinux_cred(cred);
int rc;
rc = avc_has_perm(&selinux_state,
tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
@@ -534,16 +490,10 @@ static int may_context_mount_inode_relabel(u32 sid,
return rc;
}
-static int selinux_is_sblabel_mnt(struct super_block *sb)
+static int selinux_is_genfs_special_handling(struct super_block *sb)
{
- struct superblock_security_struct *sbsec = sb->s_security;
-
- return sbsec->behavior == SECURITY_FS_USE_XATTR ||
- sbsec->behavior == SECURITY_FS_USE_TRANS ||
- sbsec->behavior == SECURITY_FS_USE_TASK ||
- sbsec->behavior == SECURITY_FS_USE_NATIVE ||
- /* Special handling. Genfs but also in-core setxattr handler */
- !strcmp(sb->s_type->name, "sysfs") ||
+ /* Special handling. Genfs but also in-core setxattr handler */
+ return !strcmp(sb->s_type->name, "sysfs") ||
!strcmp(sb->s_type->name, "pstore") ||
!strcmp(sb->s_type->name, "debugfs") ||
!strcmp(sb->s_type->name, "tracefs") ||
@@ -553,6 +503,34 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
!strcmp(sb->s_type->name, "cgroup2")));
}
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = sb->s_security;
+
+ /*
+ * IMPORTANT: Double-check logic in this function when adding a new
+ * SECURITY_FS_USE_* definition!
+ */
+ BUILD_BUG_ON(SECURITY_FS_USE_MAX != 7);
+
+ switch (sbsec->behavior) {
+ case SECURITY_FS_USE_XATTR:
+ case SECURITY_FS_USE_TRANS:
+ case SECURITY_FS_USE_TASK:
+ case SECURITY_FS_USE_NATIVE:
+ return 1;
+
+ case SECURITY_FS_USE_GENFS:
+ return selinux_is_genfs_special_handling(sb);
+
+ /* Never allow relabeling on context mounts */
+ case SECURITY_FS_USE_MNTPOINT:
+ case SECURITY_FS_USE_NONE:
+ default:
+ return 0;
+ }
+}
+
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
@@ -1374,7 +1352,7 @@ static int selinux_genfs_get_sid(struct dentry *dentry,
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry)
{
struct superblock_security_struct *sbsec = NULL;
- struct inode_security_struct *isec = inode->i_security;
+ struct inode_security_struct *isec = selinux_inode(inode);
u32 task_sid, sid = 0;
u16 sclass;
struct dentry *dentry;
@@ -1621,7 +1599,7 @@ static inline u32 signal_to_av(int sig)
/* Check whether a task is allowed to use a capability. */
static int cred_has_capability(const struct cred *cred,
- int cap, int audit, bool initns)
+ int cap, unsigned int opts, bool initns)
{
struct common_audit_data ad;
struct av_decision avd;
@@ -1648,7 +1626,7 @@ static int cred_has_capability(const struct cred *cred,
rc = avc_has_perm_noaudit(&selinux_state,
sid, sid, sclass, av, 0, &avd);
- if (audit == SECURITY_CAP_AUDIT) {
+ if (!(opts & CAP_OPT_NOAUDIT)) {
int rc2 = avc_audit(&selinux_state,
sid, sid, sclass, av, &avd, rc, &ad, 0);
if (rc2)
@@ -1674,7 +1652,7 @@ static int inode_has_perm(const struct cred *cred,
return 0;
sid = cred_sid(cred);
- isec = inode->i_security;
+ isec = selinux_inode(inode);
return avc_has_perm(&selinux_state,
sid, isec->sid, isec->sclass, perms, adp);
@@ -1740,7 +1718,7 @@ static int file_has_perm(const struct cred *cred,
struct file *file,
u32 av)
{
- struct file_security_struct *fsec = file->f_security;
+ struct file_security_struct *fsec = selinux_file(file);
struct inode *inode = file_inode(file);
struct common_audit_data ad;
u32 sid = cred_sid(cred);
@@ -1806,7 +1784,7 @@ static int may_create(struct inode *dir,
struct dentry *dentry,
u16 tclass)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid;
@@ -1828,7 +1806,7 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- rc = selinux_determine_inode_label(current_security(), dir,
+ rc = selinux_determine_inode_label(selinux_cred(current_cred()), dir,
&dentry->d_name, tclass, &newsid);
if (rc)
return rc;
@@ -2084,7 +2062,7 @@ static int selinux_binder_transfer_file(struct task_struct *from,
struct file *file)
{
u32 sid = task_sid(to);
- struct file_security_struct *fsec = file->f_security;
+ struct file_security_struct *fsec = selinux_file(file);
struct dentry *dentry = file->f_path.dentry;
struct inode_security_struct *isec;
struct common_audit_data ad;
@@ -2168,9 +2146,9 @@ static int selinux_capset(struct cred *new, const struct cred *old,
*/
static int selinux_capable(const struct cred *cred, struct user_namespace *ns,
- int cap, int audit)
+ int cap, unsigned int opts)
{
- return cred_has_capability(cred, cap, audit, ns == &init_user_ns);
+ return cred_has_capability(cred, cap, opts, ns == &init_user_ns);
}
static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb)
@@ -2244,7 +2222,7 @@ static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
int rc, cap_sys_admin = 0;
rc = cred_has_capability(current_cred(), CAP_SYS_ADMIN,
- SECURITY_CAP_NOAUDIT, true);
+ CAP_OPT_NOAUDIT, true);
if (rc == 0)
cap_sys_admin = 1;
@@ -2335,8 +2313,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->called_set_creds)
return 0;
- old_tsec = current_security();
- new_tsec = bprm->cred->security;
+ old_tsec = selinux_cred(current_cred());
+ new_tsec = selinux_cred(bprm->cred);
isec = inode_security(inode);
/* Default to the current task SID. */
@@ -2500,7 +2478,7 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
struct rlimit *rlim, *initrlim;
int rc, i;
- new_tsec = bprm->cred->security;
+ new_tsec = selinux_cred(bprm->cred);
if (new_tsec->sid == new_tsec->osid)
return;
@@ -2543,7 +2521,7 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
*/
static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
struct itimerval itimer;
u32 osid, sid;
int rc, i;
@@ -2780,7 +2758,7 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode,
u32 newsid;
int rc;
- rc = selinux_determine_inode_label(current_security(),
+ rc = selinux_determine_inode_label(selinux_cred(current_cred()),
d_inode(dentry->d_parent), name,
inode_mode_to_security_class(mode),
&newsid);
@@ -2800,14 +2778,14 @@ static int selinux_dentry_create_files_as(struct dentry *dentry, int mode,
int rc;
struct task_security_struct *tsec;
- rc = selinux_determine_inode_label(old->security,
+ rc = selinux_determine_inode_label(selinux_cred(old),
d_inode(dentry->d_parent), name,
inode_mode_to_security_class(mode),
&newsid);
if (rc)
return rc;
- tsec = new->security;
+ tsec = selinux_cred(new);
tsec->create_sid = newsid;
return 0;
}
@@ -2817,7 +2795,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
const char **name,
void **value, size_t *len)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
struct superblock_security_struct *sbsec;
u32 newsid, clen;
int rc;
@@ -2827,7 +2805,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
newsid = tsec->create_sid;
- rc = selinux_determine_inode_label(current_security(),
+ rc = selinux_determine_inode_label(selinux_cred(current_cred()),
dir, qstr,
inode_mode_to_security_class(inode->i_mode),
&newsid);
@@ -2836,7 +2814,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
/* Possibly defer initialization to selinux_complete_init. */
if (sbsec->flags & SE_SBINITIALIZED) {
- struct inode_security_struct *isec = inode->i_security;
+ struct inode_security_struct *isec = selinux_inode(inode);
isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = LABEL_INITIALIZED;
@@ -2925,9 +2903,8 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
if (IS_ERR(isec))
return PTR_ERR(isec);
- return avc_has_perm_flags(&selinux_state,
- sid, isec->sid, isec->sclass, FILE__READ, &ad,
- rcu ? MAY_NOT_BLOCK : 0);
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, FILE__READ, &ad);
}
static noinline int audit_inode_permission(struct inode *inode,
@@ -2936,7 +2913,7 @@ static noinline int audit_inode_permission(struct inode *inode,
unsigned flags)
{
struct common_audit_data ad;
- struct inode_security_struct *isec = inode->i_security;
+ struct inode_security_struct *isec = selinux_inode(inode);
int rc;
ad.type = LSM_AUDIT_DATA_INODE;
@@ -2982,7 +2959,9 @@ static int selinux_inode_permission(struct inode *inode, int mask)
return PTR_ERR(isec);
rc = avc_has_perm_noaudit(&selinux_state,
- sid, isec->sid, isec->sclass, perms, 0, &avd);
+ sid, isec->sid, isec->sclass, perms,
+ (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
+ &avd);
audited = avc_audit_required(perms, &avd, rc,
from_access ? FILE__AUDIT_ACCESS : 0,
&denied);
@@ -3031,11 +3010,11 @@ static int selinux_inode_getattr(const struct path *path)
static bool has_cap_mac_admin(bool audit)
{
const struct cred *cred = current_cred();
- int cap_audit = audit ? SECURITY_CAP_AUDIT : SECURITY_CAP_NOAUDIT;
+ unsigned int opts = audit ? CAP_OPT_NONE : CAP_OPT_NOAUDIT;
- if (cap_capable(cred, &init_user_ns, CAP_MAC_ADMIN, cap_audit))
+ if (cap_capable(cred, &init_user_ns, CAP_MAC_ADMIN, opts))
return false;
- if (cred_has_capability(cred, CAP_MAC_ADMIN, cap_audit, true))
+ if (cred_has_capability(cred, CAP_MAC_ADMIN, opts, true))
return false;
return true;
}
@@ -3241,12 +3220,16 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct inode_security_struct *isec = inode_security_novalidate(inode);
+ struct superblock_security_struct *sbsec = inode->i_sb->s_security;
u32 newsid;
int rc;
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
+ if (!(sbsec->flags & SBLABEL_MNT))
+ return -EOPNOTSUPP;
+
if (!value || !size)
return -EACCES;
@@ -3289,7 +3272,7 @@ static int selinux_inode_copy_up(struct dentry *src, struct cred **new)
return -ENOMEM;
}
- tsec = new_creds->security;
+ tsec = selinux_cred(new_creds);
/* Get label from overlay inode and set it in create_sid */
selinux_inode_getsecid(d_inode(src), &sid);
tsec->create_sid = sid;
@@ -3330,7 +3313,7 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
static int selinux_file_permission(struct file *file, int mask)
{
struct inode *inode = file_inode(file);
- struct file_security_struct *fsec = file->f_security;
+ struct file_security_struct *fsec = selinux_file(file);
struct inode_security_struct *isec;
u32 sid = current_sid();
@@ -3352,11 +3335,6 @@ static int selinux_file_alloc_security(struct file *file)
return file_alloc_security(file);
}
-static void selinux_file_free_security(struct file *file)
-{
- file_free_security(file);
-}
-
/*
* Check whether a task has the ioctl permission and cmd
* operation to an inode.
@@ -3365,7 +3343,7 @@ static int ioctl_has_perm(const struct cred *cred, struct file *file,
u32 requested, u16 cmd)
{
struct common_audit_data ad;
- struct file_security_struct *fsec = file->f_security;
+ struct file_security_struct *fsec = selinux_file(file);
struct inode *inode = file_inode(file);
struct inode_security_struct *isec;
struct lsm_ioctlop_audit ioctl;
@@ -3435,7 +3413,7 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
case KDSKBENT:
case KDSKBSENT:
error = cred_has_capability(cred, CAP_SYS_TTY_CONFIG,
- SECURITY_CAP_AUDIT, true);
+ CAP_OPT_NONE, true);
break;
/* default case assumes that the command will go
@@ -3617,7 +3595,7 @@ static void selinux_file_set_fowner(struct file *file)
{
struct file_security_struct *fsec;
- fsec = file->f_security;
+ fsec = selinux_file(file);
fsec->fown_sid = current_sid();
}
@@ -3632,7 +3610,7 @@ static int selinux_file_send_sigiotask(struct task_struct *tsk,
/* struct fown_struct is never outside the context of a struct file */
file = container_of(fown, struct file, f_owner);
- fsec = file->f_security;
+ fsec = selinux_file(file);
if (!signum)
perm = signal_to_av(SIGIO); /* as per send_sigio_to_task */
@@ -3656,7 +3634,7 @@ static int selinux_file_open(struct file *file)
struct file_security_struct *fsec;
struct inode_security_struct *isec;
- fsec = file->f_security;
+ fsec = selinux_file(file);
isec = inode_security(file_inode(file));
/*
* Save inode label and policy sequence number
@@ -3690,52 +3668,15 @@ static int selinux_task_alloc(struct task_struct *task,
}
/*
- * allocate the SELinux part of blank credentials
- */
-static int selinux_cred_alloc_blank(struct cred *cred, gfp_t gfp)
-{
- struct task_security_struct *tsec;
-
- tsec = kzalloc(sizeof(struct task_security_struct), gfp);
- if (!tsec)
- return -ENOMEM;
-
- cred->security = tsec;
- return 0;
-}
-
-/*
- * detach and free the LSM part of a set of credentials
- */
-static void selinux_cred_free(struct cred *cred)
-{
- struct task_security_struct *tsec = cred->security;
-
- /*
- * cred->security == NULL if security_cred_alloc_blank() or
- * security_prepare_creds() returned an error.
- */
- BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
- cred->security = (void *) 0x7UL;
- kfree(tsec);
-}
-
-/*
* prepare a new set of credentials for modification
*/
static int selinux_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- const struct task_security_struct *old_tsec;
- struct task_security_struct *tsec;
-
- old_tsec = old->security;
-
- tsec = kmemdup(old_tsec, sizeof(struct task_security_struct), gfp);
- if (!tsec)
- return -ENOMEM;
+ const struct task_security_struct *old_tsec = selinux_cred(old);
+ struct task_security_struct *tsec = selinux_cred(new);
- new->security = tsec;
+ *tsec = *old_tsec;
return 0;
}
@@ -3744,8 +3685,8 @@ static int selinux_cred_prepare(struct cred *new, const struct cred *old,
*/
static void selinux_cred_transfer(struct cred *new, const struct cred *old)
{
- const struct task_security_struct *old_tsec = old->security;
- struct task_security_struct *tsec = new->security;
+ const struct task_security_struct *old_tsec = selinux_cred(old);
+ struct task_security_struct *tsec = selinux_cred(new);
*tsec = *old_tsec;
}
@@ -3761,7 +3702,7 @@ static void selinux_cred_getsecid(const struct cred *c, u32 *secid)
*/
static int selinux_kernel_act_as(struct cred *new, u32 secid)
{
- struct task_security_struct *tsec = new->security;
+ struct task_security_struct *tsec = selinux_cred(new);
u32 sid = current_sid();
int ret;
@@ -3786,7 +3727,7 @@ static int selinux_kernel_act_as(struct cred *new, u32 secid)
static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
{
struct inode_security_struct *isec = inode_security(inode);
- struct task_security_struct *tsec = new->security;
+ struct task_security_struct *tsec = selinux_cred(new);
u32 sid = current_sid();
int ret;
@@ -3832,7 +3773,7 @@ static int selinux_kernel_module_from_file(struct file *file)
ad.type = LSM_AUDIT_DATA_FILE;
ad.u.file = file;
- fsec = file->f_security;
+ fsec = selinux_file(file);
if (sid != fsec->sid) {
rc = avc_has_perm(&selinux_state,
sid, fsec->sid, SECCLASS_FD, FD__USE, &ad);
@@ -3998,7 +3939,7 @@ static int selinux_task_kill(struct task_struct *p, struct kernel_siginfo *info,
static void selinux_task_to_inode(struct task_struct *p,
struct inode *inode)
{
- struct inode_security_struct *isec = inode->i_security;
+ struct inode_security_struct *isec = selinux_inode(inode);
u32 sid = task_sid(p);
spin_lock(&isec->lock);
@@ -4335,7 +4276,7 @@ static int sock_has_perm(struct sock *sk, u32 perms)
static int selinux_socket_create(int family, int type,
int protocol, int kern)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
u32 newsid;
u16 secclass;
int rc;
@@ -4355,7 +4296,7 @@ static int selinux_socket_create(int family, int type,
static int selinux_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(sock));
struct sk_security_struct *sksec;
u16 sclass = socket_type_to_security_class(family, type, protocol);
@@ -5236,7 +5177,7 @@ static int selinux_secmark_relabel_packet(u32 sid)
const struct task_security_struct *__tsec;
u32 tsid;
- __tsec = current_security();
+ __tsec = selinux_cred(current_cred());
tsid = __tsec->sid;
return avc_has_perm(&selinux_state,
@@ -5711,51 +5652,22 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
return selinux_nlmsg_perm(sk, skb);
}
-static int ipc_alloc_security(struct kern_ipc_perm *perm,
- u16 sclass)
+static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
{
- struct ipc_security_struct *isec;
-
- isec = kzalloc(sizeof(struct ipc_security_struct), GFP_KERNEL);
- if (!isec)
- return -ENOMEM;
-
isec->sclass = sclass;
isec->sid = current_sid();
- perm->security = isec;
-
- return 0;
-}
-
-static void ipc_free_security(struct kern_ipc_perm *perm)
-{
- struct ipc_security_struct *isec = perm->security;
- perm->security = NULL;
- kfree(isec);
}
static int msg_msg_alloc_security(struct msg_msg *msg)
{
struct msg_security_struct *msec;
- msec = kzalloc(sizeof(struct msg_security_struct), GFP_KERNEL);
- if (!msec)
- return -ENOMEM;
-
+ msec = selinux_msg_msg(msg);
msec->sid = SECINITSID_UNLABELED;
- msg->security = msec;
return 0;
}
-static void msg_msg_free_security(struct msg_msg *msg)
-{
- struct msg_security_struct *msec = msg->security;
-
- msg->security = NULL;
- kfree(msec);
-}
-
static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
u32 perms)
{
@@ -5763,7 +5675,7 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
struct common_audit_data ad;
u32 sid = current_sid();
- isec = ipc_perms->security;
+ isec = selinux_ipc(ipc_perms);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = ipc_perms->key;
@@ -5777,11 +5689,6 @@ static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
return msg_msg_alloc_security(msg);
}
-static void selinux_msg_msg_free_security(struct msg_msg *msg)
-{
- msg_msg_free_security(msg);
-}
-
/* message queue security operations */
static int selinux_msg_queue_alloc_security(struct kern_ipc_perm *msq)
{
@@ -5790,11 +5697,8 @@ static int selinux_msg_queue_alloc_security(struct kern_ipc_perm *msq)
u32 sid = current_sid();
int rc;
- rc = ipc_alloc_security(msq, SECCLASS_MSGQ);
- if (rc)
- return rc;
-
- isec = msq->security;
+ isec = selinux_ipc(msq);
+ ipc_init_security(isec, SECCLASS_MSGQ);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->key;
@@ -5802,16 +5706,7 @@ static int selinux_msg_queue_alloc_security(struct kern_ipc_perm *msq)
rc = avc_has_perm(&selinux_state,
sid, isec->sid, SECCLASS_MSGQ,
MSGQ__CREATE, &ad);
- if (rc) {
- ipc_free_security(msq);
- return rc;
- }
- return 0;
-}
-
-static void selinux_msg_queue_free_security(struct kern_ipc_perm *msq)
-{
- ipc_free_security(msq);
+ return rc;
}
static int selinux_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
@@ -5820,7 +5715,7 @@ static int selinux_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
struct common_audit_data ad;
u32 sid = current_sid();
- isec = msq->security;
+ isec = selinux_ipc(msq);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->key;
@@ -5869,8 +5764,8 @@ static int selinux_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *m
u32 sid = current_sid();
int rc;
- isec = msq->security;
- msec = msg->security;
+ isec = selinux_ipc(msq);
+ msec = selinux_msg_msg(msg);
/*
* First time through, need to assign label to the message
@@ -5917,8 +5812,8 @@ static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *m
u32 sid = task_sid(target);
int rc;
- isec = msq->security;
- msec = msg->security;
+ isec = selinux_ipc(msq);
+ msec = selinux_msg_msg(msg);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->key;
@@ -5941,11 +5836,8 @@ static int selinux_shm_alloc_security(struct kern_ipc_perm *shp)
u32 sid = current_sid();
int rc;
- rc = ipc_alloc_security(shp, SECCLASS_SHM);
- if (rc)
- return rc;
-
- isec = shp->security;
+ isec = selinux_ipc(shp);
+ ipc_init_security(isec, SECCLASS_SHM);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->key;
@@ -5953,16 +5845,7 @@ static int selinux_shm_alloc_security(struct kern_ipc_perm *shp)
rc = avc_has_perm(&selinux_state,
sid, isec->sid, SECCLASS_SHM,
SHM__CREATE, &ad);
- if (rc) {
- ipc_free_security(shp);
- return rc;
- }
- return 0;
-}
-
-static void selinux_shm_free_security(struct kern_ipc_perm *shp)
-{
- ipc_free_security(shp);
+ return rc;
}
static int selinux_shm_associate(struct kern_ipc_perm *shp, int shmflg)
@@ -5971,7 +5854,7 @@ static int selinux_shm_associate(struct kern_ipc_perm *shp, int shmflg)
struct common_audit_data ad;
u32 sid = current_sid();
- isec = shp->security;
+ isec = selinux_ipc(shp);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->key;
@@ -6038,11 +5921,8 @@ static int selinux_sem_alloc_security(struct kern_ipc_perm *sma)
u32 sid = current_sid();
int rc;
- rc = ipc_alloc_security(sma, SECCLASS_SEM);
- if (rc)
- return rc;
-
- isec = sma->security;
+ isec = selinux_ipc(sma);
+ ipc_init_security(isec, SECCLASS_SEM);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->key;
@@ -6050,16 +5930,7 @@ static int selinux_sem_alloc_security(struct kern_ipc_perm *sma)
rc = avc_has_perm(&selinux_state,
sid, isec->sid, SECCLASS_SEM,
SEM__CREATE, &ad);
- if (rc) {
- ipc_free_security(sma);
- return rc;
- }
- return 0;
-}
-
-static void selinux_sem_free_security(struct kern_ipc_perm *sma)
-{
- ipc_free_security(sma);
+ return rc;
}
static int selinux_sem_associate(struct kern_ipc_perm *sma, int semflg)
@@ -6068,7 +5939,7 @@ static int selinux_sem_associate(struct kern_ipc_perm *sma, int semflg)
struct common_audit_data ad;
u32 sid = current_sid();
- isec = sma->security;
+ isec = selinux_ipc(sma);
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->key;
@@ -6154,7 +6025,7 @@ static int selinux_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
static void selinux_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
- struct ipc_security_struct *isec = ipcp->security;
+ struct ipc_security_struct *isec = selinux_ipc(ipcp);
*secid = isec->sid;
}
@@ -6173,7 +6044,7 @@ static int selinux_getprocattr(struct task_struct *p,
unsigned len;
rcu_read_lock();
- __tsec = __task_cred(p)->security;
+ __tsec = selinux_cred(__task_cred(p));
if (current != p) {
error = avc_has_perm(&selinux_state,
@@ -6296,7 +6167,7 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
operation. See selinux_bprm_set_creds for the execve
checks and may_create for the file creation checks. The
operation will then fail if the context is not permitted. */
- tsec = new->security;
+ tsec = selinux_cred(new);
if (!strcmp(name, "exec")) {
tsec->exec_sid = sid;
} else if (!strcmp(name, "fscreate")) {
@@ -6380,7 +6251,7 @@ static void selinux_release_secctx(char *secdata, u32 seclen)
static void selinux_inode_invalidate_secctx(struct inode *inode)
{
- struct inode_security_struct *isec = inode->i_security;
+ struct inode_security_struct *isec = selinux_inode(inode);
spin_lock(&isec->lock);
isec->initialized = LABEL_INVALID;
@@ -6392,7 +6263,10 @@ static void selinux_inode_invalidate_secctx(struct inode *inode)
*/
static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
- return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
+ int rc = selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX,
+ ctx, ctxlen, 0);
+ /* Do not return error when suppressing label (SBLABEL_MNT not set). */
+ return rc == -EOPNOTSUPP ? 0 : rc;
}
/*
@@ -6425,7 +6299,7 @@ static int selinux_key_alloc(struct key *k, const struct cred *cred,
if (!ksec)
return -ENOMEM;
- tsec = cred->security;
+ tsec = selinux_cred(cred);
if (tsec->keycreate_sid)
ksec->sid = tsec->keycreate_sid;
else
@@ -6688,6 +6562,14 @@ static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
}
#endif
+struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct task_security_struct),
+ .lbs_file = sizeof(struct file_security_struct),
+ .lbs_inode = sizeof(struct inode_security_struct),
+ .lbs_ipc = sizeof(struct ipc_security_struct),
+ .lbs_msg_msg = sizeof(struct msg_security_struct),
+};
+
static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
@@ -6757,7 +6639,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(file_permission, selinux_file_permission),
LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
- LSM_HOOK_INIT(file_free_security, selinux_file_free_security),
LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
@@ -6771,8 +6652,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(file_open, selinux_file_open),
LSM_HOOK_INIT(task_alloc, selinux_task_alloc),
- LSM_HOOK_INIT(cred_alloc_blank, selinux_cred_alloc_blank),
- LSM_HOOK_INIT(cred_free, selinux_cred_free),
LSM_HOOK_INIT(cred_prepare, selinux_cred_prepare),
LSM_HOOK_INIT(cred_transfer, selinux_cred_transfer),
LSM_HOOK_INIT(cred_getsecid, selinux_cred_getsecid),
@@ -6800,24 +6679,20 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ipc_getsecid, selinux_ipc_getsecid),
LSM_HOOK_INIT(msg_msg_alloc_security, selinux_msg_msg_alloc_security),
- LSM_HOOK_INIT(msg_msg_free_security, selinux_msg_msg_free_security),
LSM_HOOK_INIT(msg_queue_alloc_security,
selinux_msg_queue_alloc_security),
- LSM_HOOK_INIT(msg_queue_free_security, selinux_msg_queue_free_security),
LSM_HOOK_INIT(msg_queue_associate, selinux_msg_queue_associate),
LSM_HOOK_INIT(msg_queue_msgctl, selinux_msg_queue_msgctl),
LSM_HOOK_INIT(msg_queue_msgsnd, selinux_msg_queue_msgsnd),
LSM_HOOK_INIT(msg_queue_msgrcv, selinux_msg_queue_msgrcv),
LSM_HOOK_INIT(shm_alloc_security, selinux_shm_alloc_security),
- LSM_HOOK_INIT(shm_free_security, selinux_shm_free_security),
LSM_HOOK_INIT(shm_associate, selinux_shm_associate),
LSM_HOOK_INIT(shm_shmctl, selinux_shm_shmctl),
LSM_HOOK_INIT(shm_shmat, selinux_shm_shmat),
LSM_HOOK_INIT(sem_alloc_security, selinux_sem_alloc_security),
- LSM_HOOK_INIT(sem_free_security, selinux_sem_free_security),
LSM_HOOK_INIT(sem_associate, selinux_sem_associate),
LSM_HOOK_INIT(sem_semctl, selinux_sem_semctl),
LSM_HOOK_INIT(sem_semop, selinux_sem_semop),
@@ -6928,16 +6803,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
static __init int selinux_init(void)
{
- if (!security_module_enable("selinux")) {
- selinux_enabled = 0;
- return 0;
- }
-
- if (!selinux_enabled) {
- pr_info("SELinux: Disabled at boot.\n");
- return 0;
- }
-
pr_info("SELinux: Initializing.\n");
memset(&selinux_state, 0, sizeof(selinux_state));
@@ -6951,12 +6816,6 @@ static __init int selinux_init(void)
default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC);
- sel_inode_cache = kmem_cache_create("selinux_inode_security",
- sizeof(struct inode_security_struct),
- 0, SLAB_PANIC, NULL);
- file_security_cache = kmem_cache_create("selinux_file_security",
- sizeof(struct file_security_struct),
- 0, SLAB_PANIC, NULL);
avc_init();
avtab_cache_init();
@@ -6999,6 +6858,9 @@ void selinux_complete_init(void)
all processes and objects when they are created. */
DEFINE_LSM(selinux) = {
.name = "selinux",
+ .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
+ .enabled = &selinux_enabled,
+ .blobs = &selinux_blob_sizes,
.init = selinux_init,
};
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
index e51a81ffb8c9..682e2b5de2a4 100644
--- a/security/selinux/include/audit.h
+++ b/security/selinux/include/audit.h
@@ -1,9 +1,6 @@
/*
* SELinux support for the Audit LSM hooks
*
- * Most of below header was moved from include/linux/selinux.h which
- * is released under below copyrights:
- *
* Author: James Morris <jmorris@redhat.com>
*
* Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index ef899bcfd2cb..7be0e1e90e8b 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -142,6 +142,7 @@ static inline int avc_audit(struct selinux_state *state,
#define AVC_STRICT 1 /* Ignore permissive mode. */
#define AVC_EXTENDED_PERMS 2 /* update extended permissions */
+#define AVC_NONBLOCKING 4 /* non blocking */
int avc_has_perm_noaudit(struct selinux_state *state,
u32 ssid, u32 tsid,
u16 tclass, u32 requested,
@@ -152,11 +153,6 @@ int avc_has_perm(struct selinux_state *state,
u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata);
-int avc_has_perm_flags(struct selinux_state *state,
- u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct common_audit_data *auditdata,
- int flags);
int avc_has_extended_perms(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass, u32 requested,
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index cc5e26b0161b..231262d8eac9 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -25,6 +25,8 @@
#include <linux/binfmts.h>
#include <linux/in.h>
#include <linux/spinlock.h>
+#include <linux/lsm_hooks.h>
+#include <linux/msg.h>
#include <net/net_namespace.h>
#include "flask.h"
#include "avc.h"
@@ -56,10 +58,7 @@ enum label_initialized {
struct inode_security_struct {
struct inode *inode; /* back pointer to inode object */
- union {
- struct list_head list; /* list of inode_security_struct */
- struct rcu_head rcu; /* for freeing the inode_security_struct */
- };
+ struct list_head list; /* list of inode_security_struct */
u32 task_sid; /* SID of creating task */
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
@@ -158,4 +157,35 @@ struct bpf_security_struct {
u32 sid; /*SID of bpf obj creater*/
};
+extern struct lsm_blob_sizes selinux_blob_sizes;
+static inline struct task_security_struct *selinux_cred(const struct cred *cred)
+{
+ return cred->security + selinux_blob_sizes.lbs_cred;
+}
+
+static inline struct file_security_struct *selinux_file(const struct file *file)
+{
+ return file->f_security + selinux_blob_sizes.lbs_file;
+}
+
+static inline struct inode_security_struct *selinux_inode(
+ const struct inode *inode)
+{
+ if (unlikely(!inode->i_security))
+ return NULL;
+ return inode->i_security + selinux_blob_sizes.lbs_inode;
+}
+
+static inline struct msg_security_struct *selinux_msg_msg(
+ const struct msg_msg *msg_msg)
+{
+ return msg_msg->security + selinux_blob_sizes.lbs_msg_msg;
+}
+
+static inline struct ipc_security_struct *selinux_ipc(
+ const struct kern_ipc_perm *ipc)
+{
+ return ipc->security + selinux_blob_sizes.lbs_ipc;
+}
+
#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index ba8eedf42b90..f68fb25b5702 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -255,6 +255,9 @@ int security_sid_to_context(struct selinux_state *state, u32 sid,
int security_sid_to_context_force(struct selinux_state *state,
u32 sid, char **scontext, u32 *scontext_len);
+int security_sid_to_context_inval(struct selinux_state *state,
+ u32 sid, char **scontext, u32 *scontext_len);
+
int security_context_to_sid(struct selinux_state *state,
const char *scontext, u32 scontext_len,
u32 *out_sid, gfp_t gfp);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index f3a5a138a096..145ee62f205a 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1378,7 +1378,7 @@ static int sel_make_bools(struct selinux_fs_info *fsi)
goto out;
}
- isec = (struct inode_security_struct *)inode->i_security;
+ isec = selinux_inode(inode);
ret = security_genfs_sid(fsi->state, "selinuxfs", page,
SECCLASS_FILE, &sid);
if (ret) {
@@ -1953,7 +1953,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
}
inode->i_ino = ++fsi->last_ino;
- isec = (struct inode_security_struct *)inode->i_security;
+ isec = selinux_inode(inode);
isec->sid = SECINITSID_DEVNULL;
isec->sclass = SECCLASS_CHR_FILE;
isec->initialized = LABEL_INITIALIZED;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index a50d625e7946..c1c31e33657a 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
kfree(key);
if (datum) {
levdatum = datum;
- ebitmap_destroy(&levdatum->level->cat);
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
kfree(levdatum->level);
}
kfree(datum);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 0b7e33f6aa59..1269e2be3c2d 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -49,7 +49,6 @@
#include <linux/sched.h>
#include <linux/audit.h>
#include <linux/mutex.h>
-#include <linux/selinux.h>
#include <linux/flex_array.h>
#include <linux/vmalloc.h>
#include <net/netlabel.h>
@@ -1281,7 +1280,8 @@ const char *security_get_initial_sid_context(u32 sid)
static int security_sid_to_context_core(struct selinux_state *state,
u32 sid, char **scontext,
- u32 *scontext_len, int force)
+ u32 *scontext_len, int force,
+ int only_invalid)
{
struct policydb *policydb;
struct sidtab *sidtab;
@@ -1326,8 +1326,14 @@ static int security_sid_to_context_core(struct selinux_state *state,
rc = -EINVAL;
goto out_unlock;
}
- rc = context_struct_to_string(policydb, context, scontext,
- scontext_len);
+ if (only_invalid && !context->len) {
+ scontext = NULL;
+ scontext_len = 0;
+ rc = 0;
+ } else {
+ rc = context_struct_to_string(policydb, context, scontext,
+ scontext_len);
+ }
out_unlock:
read_unlock(&state->ss->policy_rwlock);
out:
@@ -1349,14 +1355,34 @@ int security_sid_to_context(struct selinux_state *state,
u32 sid, char **scontext, u32 *scontext_len)
{
return security_sid_to_context_core(state, sid, scontext,
- scontext_len, 0);
+ scontext_len, 0, 0);
}
int security_sid_to_context_force(struct selinux_state *state, u32 sid,
char **scontext, u32 *scontext_len)
{
return security_sid_to_context_core(state, sid, scontext,
- scontext_len, 1);
+ scontext_len, 1, 0);
+}
+
+/**
+ * security_sid_to_context_inval - Obtain a context for a given SID if it
+ * is invalid.
+ * @sid: security identifier, SID
+ * @scontext: security context
+ * @scontext_len: length in bytes
+ *
+ * Write the string representation of the context associated with @sid
+ * into a dynamically allocated string of the correct size, but only if the
+ * context is invalid in the current policy. Set @scontext to point to
+ * this string (or NULL if the context is valid) and set @scontext_len to
+ * the length of the string (or 0 if the context is valid).
+ */
+int security_sid_to_context_inval(struct selinux_state *state, u32 sid,
+ char **scontext, u32 *scontext_len)
+{
+ return security_sid_to_context_core(state, sid, scontext,
+ scontext_len, 1, 1);
}
/*
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index bd7d18bdb147..7c57cb7e4146 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -79,7 +79,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
gfp_t gfp)
{
int rc;
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
struct xfrm_sec_ctx *ctx = NULL;
u32 str_len;
@@ -138,7 +138,7 @@ static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
*/
static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
if (!ctx)
return 0;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index f7db791fb566..9c7c95a5c497 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/lsm_audit.h>
+#include <linux/msg.h>
/*
* Use IPv6 port labeling if IPv6 is enabled and secmarks
@@ -336,6 +337,7 @@ extern struct smack_known *smack_syslog_label;
extern struct smack_known *smack_unconfined;
#endif
extern int smack_ptrace_rule;
+extern struct lsm_blob_sizes smack_blob_sizes;
extern struct smack_known smack_known_floor;
extern struct smack_known smack_known_hat;
@@ -356,12 +358,38 @@ extern struct list_head smack_onlycap_list;
#define SMACK_HASH_SLOTS 16
extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+static inline struct task_smack *smack_cred(const struct cred *cred)
+{
+ return cred->security + smack_blob_sizes.lbs_cred;
+}
+
+static inline struct smack_known **smack_file(const struct file *file)
+{
+ return (struct smack_known **)(file->f_security +
+ smack_blob_sizes.lbs_file);
+}
+
+static inline struct inode_smack *smack_inode(const struct inode *inode)
+{
+ return inode->i_security + smack_blob_sizes.lbs_inode;
+}
+
+static inline struct smack_known **smack_msg_msg(const struct msg_msg *msg)
+{
+ return msg->security + smack_blob_sizes.lbs_msg_msg;
+}
+
+static inline struct smack_known **smack_ipc(const struct kern_ipc_perm *ipc)
+{
+ return ipc->security + smack_blob_sizes.lbs_ipc;
+}
+
/*
* Is the directory transmuting?
*/
static inline int smk_inode_transmutable(const struct inode *isp)
{
- struct inode_smack *sip = isp->i_security;
+ struct inode_smack *sip = smack_inode(isp);
return (sip->smk_flags & SMK_INODE_TRANSMUTE) != 0;
}
@@ -370,7 +398,7 @@ static inline int smk_inode_transmutable(const struct inode *isp)
*/
static inline struct smack_known *smk_of_inode(const struct inode *isp)
{
- struct inode_smack *sip = isp->i_security;
+ struct inode_smack *sip = smack_inode(isp);
return sip->smk_inode;
}
@@ -382,13 +410,19 @@ static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
return tsp->smk_task;
}
-static inline struct smack_known *smk_of_task_struct(const struct task_struct *t)
+static inline struct smack_known *smk_of_task_struct(
+ const struct task_struct *t)
{
struct smack_known *skp;
+ const struct cred *cred;
rcu_read_lock();
- skp = smk_of_task(__task_cred(t)->security);
+
+ cred = __task_cred(t);
+ skp = smk_of_task(smack_cred(cred));
+
rcu_read_unlock();
+
return skp;
}
@@ -405,7 +439,7 @@ static inline struct smack_known *smk_of_forked(const struct task_smack *tsp)
*/
static inline struct smack_known *smk_of_current(void)
{
- return smk_of_task(current_security());
+ return smk_of_task(smack_cred(current_cred()));
}
/*
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 9a4c0ad46518..fe2ce3a65822 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -275,7 +275,7 @@ out_audit:
int smk_curacc(struct smack_known *obj_known,
u32 mode, struct smk_audit_info *a)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_tskacc(tsp, obj_known, mode, a);
}
@@ -635,12 +635,12 @@ DEFINE_MUTEX(smack_onlycap_lock);
*/
bool smack_privileged_cred(int cap, const struct cred *cred)
{
- struct task_smack *tsp = cred->security;
+ struct task_smack *tsp = smack_cred(cred);
struct smack_known *skp = tsp->smk_task;
struct smack_known_list_elem *sklep;
int rc;
- rc = cap_capable(cred, &init_user_ns, cap, SECURITY_CAP_AUDIT);
+ rc = cap_capable(cred, &init_user_ns, cap, CAP_OPT_NONE);
if (rc)
return false;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 403513df42fc..424bce4ef21d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -139,7 +139,7 @@ static int smk_bu_note(char *note, struct smack_known *sskp,
static int smk_bu_current(char *note, struct smack_known *oskp,
int mode, int rc)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (rc <= 0)
@@ -160,7 +160,7 @@ static int smk_bu_current(char *note, struct smack_known *oskp,
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
static int smk_bu_task(struct task_struct *otp, int mode, int rc)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
struct smack_known *smk_task = smk_of_task_struct(otp);
char acc[SMK_NUM_ACCESS_TYPE + 1];
@@ -182,8 +182,8 @@ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
static int smk_bu_inode(struct inode *inode, int mode, int rc)
{
- struct task_smack *tsp = current_security();
- struct inode_smack *isp = inode->i_security;
+ struct task_smack *tsp = smack_cred(current_cred());
+ struct inode_smack *isp = smack_inode(inode);
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (isp->smk_flags & SMK_INODE_IMPURE)
@@ -212,10 +212,10 @@ static int smk_bu_inode(struct inode *inode, int mode, int rc)
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
static int smk_bu_file(struct file *file, int mode, int rc)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
struct smack_known *sskp = tsp->smk_task;
struct inode *inode = file_inode(file);
- struct inode_smack *isp = inode->i_security;
+ struct inode_smack *isp = smack_inode(inode);
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (isp->smk_flags & SMK_INODE_IMPURE)
@@ -242,10 +242,10 @@ static int smk_bu_file(struct file *file, int mode, int rc)
static int smk_bu_credfile(const struct cred *cred, struct file *file,
int mode, int rc)
{
- struct task_smack *tsp = cred->security;
+ struct task_smack *tsp = smack_cred(cred);
struct smack_known *sskp = tsp->smk_task;
struct inode *inode = file_inode(file);
- struct inode_smack *isp = inode->i_security;
+ struct inode_smack *isp = smack_inode(inode);
char acc[SMK_NUM_ACCESS_TYPE + 1];
if (isp->smk_flags & SMK_INODE_IMPURE)
@@ -305,50 +305,35 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
}
/**
- * new_inode_smack - allocate an inode security blob
+ * init_inode_smack - initialize an inode security blob
+ * @isp: the blob to initialize
* @skp: a pointer to the Smack label entry to use in the blob
*
- * Returns the new blob or NULL if there's no memory available
*/
-static struct inode_smack *new_inode_smack(struct smack_known *skp)
+static void init_inode_smack(struct inode *inode, struct smack_known *skp)
{
- struct inode_smack *isp;
-
- isp = kmem_cache_zalloc(smack_inode_cache, GFP_NOFS);
- if (isp == NULL)
- return NULL;
+ struct inode_smack *isp = smack_inode(inode);
isp->smk_inode = skp;
isp->smk_flags = 0;
mutex_init(&isp->smk_lock);
-
- return isp;
}
/**
- * new_task_smack - allocate a task security blob
+ * init_task_smack - initialize a task security blob
+ * @tsp: blob to initialize
* @task: a pointer to the Smack label for the running task
* @forked: a pointer to the Smack label for the forked task
- * @gfp: type of the memory for the allocation
*
- * Returns the new blob or NULL if there's no memory available
*/
-static struct task_smack *new_task_smack(struct smack_known *task,
- struct smack_known *forked, gfp_t gfp)
+static void init_task_smack(struct task_smack *tsp, struct smack_known *task,
+ struct smack_known *forked)
{
- struct task_smack *tsp;
-
- tsp = kzalloc(sizeof(struct task_smack), gfp);
- if (tsp == NULL)
- return NULL;
-
tsp->smk_task = task;
tsp->smk_forked = forked;
INIT_LIST_HEAD(&tsp->smk_rules);
INIT_LIST_HEAD(&tsp->smk_relabel);
mutex_init(&tsp->smk_rules_lock);
-
- return tsp;
}
/**
@@ -448,7 +433,7 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
rcu_read_lock();
tracercred = __task_cred(tracer);
- tsp = tracercred->security;
+ tsp = smack_cred(tracercred);
tracer_known = smk_of_task(tsp);
if ((mode & PTRACE_MODE_ATTACH) &&
@@ -515,7 +500,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
int rc;
struct smack_known *skp;
- skp = smk_of_task(current_security());
+ skp = smk_of_task(smack_cred(current_cred()));
rc = smk_ptrace_rule_check(ptp, skp, PTRACE_MODE_ATTACH, __func__);
return rc;
@@ -718,6 +703,13 @@ static int smack_set_mnt_opts(struct super_block *sb,
if (sp->smk_flags & SMK_SB_INITIALIZED)
return 0;
+ if (inode->i_security == NULL) {
+ int rc = lsm_inode_alloc(inode);
+
+ if (rc)
+ return rc;
+ }
+
if (!smack_privileged(CAP_MAC_ADMIN)) {
/*
* Unprivileged mounts don't get to specify Smack values.
@@ -782,17 +774,12 @@ static int smack_set_mnt_opts(struct super_block *sb,
/*
* Initialize the root inode.
*/
- isp = inode->i_security;
- if (isp == NULL) {
- isp = new_inode_smack(sp->smk_root);
- if (isp == NULL)
- return -ENOMEM;
- inode->i_security = isp;
- } else
- isp->smk_inode = sp->smk_root;
+ init_inode_smack(inode, sp->smk_root);
- if (transmute)
+ if (transmute) {
+ isp = smack_inode(inode);
isp->smk_flags |= SMK_INODE_TRANSMUTE;
+ }
return 0;
}
@@ -831,7 +818,7 @@ static int smack_sb_statfs(struct dentry *dentry)
static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
struct inode *inode = file_inode(bprm->file);
- struct task_smack *bsp = bprm->cred->security;
+ struct task_smack *bsp = smack_cred(bprm->cred);
struct inode_smack *isp;
struct superblock_smack *sbsp;
int rc;
@@ -839,7 +826,7 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->called_set_creds)
return 0;
- isp = inode->i_security;
+ isp = smack_inode(inode);
if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
@@ -890,49 +877,11 @@ static int smack_inode_alloc_security(struct inode *inode)
{
struct smack_known *skp = smk_of_current();
- inode->i_security = new_inode_smack(skp);
- if (inode->i_security == NULL)
- return -ENOMEM;
+ init_inode_smack(inode, skp);
return 0;
}
/**
- * smack_inode_free_rcu - Free inode_smack blob from cache
- * @head: the rcu_head for getting inode_smack pointer
- *
- * Call back function called from call_rcu() to free
- * the i_security blob pointer in inode
- */
-static void smack_inode_free_rcu(struct rcu_head *head)
-{
- struct inode_smack *issp;
-
- issp = container_of(head, struct inode_smack, smk_rcu);
- kmem_cache_free(smack_inode_cache, issp);
-}
-
-/**
- * smack_inode_free_security - free an inode blob using call_rcu()
- * @inode: the inode with a blob
- *
- * Clears the blob pointer in inode using RCU
- */
-static void smack_inode_free_security(struct inode *inode)
-{
- struct inode_smack *issp = inode->i_security;
-
- /*
- * The inode may still be referenced in a path walk and
- * a call to smack_inode_permission() can be made
- * after smack_inode_free_security() is called.
- * To avoid race condition free the i_security via RCU
- * and leave the current inode->i_security pointer intact.
- * The inode will be freed after the RCU grace period too.
- */
- call_rcu(&issp->smk_rcu, smack_inode_free_rcu);
-}
-
-/**
* smack_inode_init_security - copy out the smack from an inode
* @inode: the newly created inode
* @dir: containing directory object
@@ -947,7 +896,7 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len)
{
- struct inode_smack *issp = inode->i_security;
+ struct inode_smack *issp = smack_inode(inode);
struct smack_known *skp = smk_of_current();
struct smack_known *isp = smk_of_inode(inode);
struct smack_known *dsp = smk_of_inode(dir);
@@ -1285,7 +1234,7 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct smack_known *skp;
- struct inode_smack *isp = d_backing_inode(dentry)->i_security;
+ struct inode_smack *isp = smack_inode(d_backing_inode(dentry));
if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
isp->smk_flags |= SMK_INODE_TRANSMUTE;
@@ -1366,7 +1315,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
if (rc != 0)
return rc;
- isp = d_backing_inode(dentry)->i_security;
+ isp = smack_inode(d_backing_inode(dentry));
/*
* Don't do anything special for these.
* XATTR_NAME_SMACKIPIN
@@ -1498,25 +1447,13 @@ static void smack_inode_getsecid(struct inode *inode, u32 *secid)
*/
static int smack_file_alloc_security(struct file *file)
{
- struct smack_known *skp = smk_of_current();
+ struct smack_known **blob = smack_file(file);
- file->f_security = skp;
+ *blob = smk_of_current();
return 0;
}
/**
- * smack_file_free_security - clear a file security blob
- * @file: the object
- *
- * The security blob for a file is a pointer to the master
- * label list, so no memory is freed.
- */
-static void smack_file_free_security(struct file *file)
-{
- file->f_security = NULL;
-}
-
-/**
* smack_file_ioctl - Smack check on ioctls
* @file: the object
* @cmd: what to do
@@ -1653,7 +1590,7 @@ static int smack_mmap_file(struct file *file,
if (unlikely(IS_PRIVATE(file_inode(file))))
return 0;
- isp = file_inode(file)->i_security;
+ isp = smack_inode(file_inode(file));
if (isp->smk_mmap == NULL)
return 0;
sbsp = file_inode(file)->i_sb->s_security;
@@ -1662,7 +1599,7 @@ static int smack_mmap_file(struct file *file,
return -EACCES;
mkp = isp->smk_mmap;
- tsp = current_security();
+ tsp = smack_cred(current_cred());
skp = smk_of_current();
rc = 0;
@@ -1740,7 +1677,9 @@ static int smack_mmap_file(struct file *file,
*/
static void smack_file_set_fowner(struct file *file)
{
- file->f_security = smk_of_current();
+ struct smack_known **blob = smack_file(file);
+
+ *blob = smk_of_current();
}
/**
@@ -1757,8 +1696,9 @@ static void smack_file_set_fowner(struct file *file)
static int smack_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
+ struct smack_known **blob;
struct smack_known *skp;
- struct smack_known *tkp = smk_of_task(tsk->cred->security);
+ struct smack_known *tkp = smk_of_task(smack_cred(tsk->cred));
const struct cred *tcred;
struct file *file;
int rc;
@@ -1770,7 +1710,8 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
file = container_of(fown, struct file, f_owner);
/* we don't log here as rc can be overriden */
- skp = file->f_security;
+ blob = smack_file(file);
+ skp = *blob;
rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc);
@@ -1811,7 +1752,7 @@ static int smack_file_receive(struct file *file)
if (inode->i_sb->s_magic == SOCKFS_MAGIC) {
sock = SOCKET_I(inode);
ssp = sock->sk->sk_security;
- tsp = current_security();
+ tsp = smack_cred(current_cred());
/*
* If the receiving process can't write to the
* passed socket or if the passed socket can't
@@ -1853,7 +1794,7 @@ static int smack_file_receive(struct file *file)
*/
static int smack_file_open(struct file *file)
{
- struct task_smack *tsp = file->f_cred->security;
+ struct task_smack *tsp = smack_cred(file->f_cred);
struct inode *inode = file_inode(file);
struct smk_audit_info ad;
int rc;
@@ -1881,14 +1822,7 @@ static int smack_file_open(struct file *file)
*/
static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
- struct task_smack *tsp;
-
- tsp = new_task_smack(NULL, NULL, gfp);
- if (tsp == NULL)
- return -ENOMEM;
-
- cred->security = tsp;
-
+ init_task_smack(smack_cred(cred), NULL, NULL);
return 0;
}
@@ -1900,15 +1834,11 @@ static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
*/
static void smack_cred_free(struct cred *cred)
{
- struct task_smack *tsp = cred->security;
+ struct task_smack *tsp = smack_cred(cred);
struct smack_rule *rp;
struct list_head *l;
struct list_head *n;
- if (tsp == NULL)
- return;
- cred->security = NULL;
-
smk_destroy_label_list(&tsp->smk_relabel);
list_for_each_safe(l, n, &tsp->smk_rules) {
@@ -1916,7 +1846,6 @@ static void smack_cred_free(struct cred *cred)
list_del(&rp->list);
kfree(rp);
}
- kfree(tsp);
}
/**
@@ -1930,15 +1859,11 @@ static void smack_cred_free(struct cred *cred)
static int smack_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- struct task_smack *old_tsp = old->security;
- struct task_smack *new_tsp;
+ struct task_smack *old_tsp = smack_cred(old);
+ struct task_smack *new_tsp = smack_cred(new);
int rc;
- new_tsp = new_task_smack(old_tsp->smk_task, old_tsp->smk_task, gfp);
- if (new_tsp == NULL)
- return -ENOMEM;
-
- new->security = new_tsp;
+ init_task_smack(new_tsp, old_tsp->smk_task, old_tsp->smk_task);
rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
if (rc != 0)
@@ -1946,10 +1871,7 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
rc = smk_copy_relabel(&new_tsp->smk_relabel, &old_tsp->smk_relabel,
gfp);
- if (rc != 0)
- return rc;
-
- return 0;
+ return rc;
}
/**
@@ -1961,15 +1883,14 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
*/
static void smack_cred_transfer(struct cred *new, const struct cred *old)
{
- struct task_smack *old_tsp = old->security;
- struct task_smack *new_tsp = new->security;
+ struct task_smack *old_tsp = smack_cred(old);
+ struct task_smack *new_tsp = smack_cred(new);
new_tsp->smk_task = old_tsp->smk_task;
new_tsp->smk_forked = old_tsp->smk_task;
mutex_init(&new_tsp->smk_rules_lock);
INIT_LIST_HEAD(&new_tsp->smk_rules);
-
/* cbs copy rule list */
}
@@ -1980,12 +1901,12 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
*
* Sets the secid to contain a u32 version of the smack label.
*/
-static void smack_cred_getsecid(const struct cred *c, u32 *secid)
+static void smack_cred_getsecid(const struct cred *cred, u32 *secid)
{
struct smack_known *skp;
rcu_read_lock();
- skp = smk_of_task(c->security);
+ skp = smk_of_task(smack_cred(cred));
*secid = skp->smk_secid;
rcu_read_unlock();
}
@@ -1999,7 +1920,7 @@ static void smack_cred_getsecid(const struct cred *c, u32 *secid)
*/
static int smack_kernel_act_as(struct cred *new, u32 secid)
{
- struct task_smack *new_tsp = new->security;
+ struct task_smack *new_tsp = smack_cred(new);
new_tsp->smk_task = smack_from_secid(secid);
return 0;
@@ -2016,8 +1937,8 @@ static int smack_kernel_act_as(struct cred *new, u32 secid)
static int smack_kernel_create_files_as(struct cred *new,
struct inode *inode)
{
- struct inode_smack *isp = inode->i_security;
- struct task_smack *tsp = new->security;
+ struct inode_smack *isp = smack_inode(inode);
+ struct task_smack *tsp = smack_cred(new);
tsp->smk_forked = isp->smk_inode;
tsp->smk_task = tsp->smk_forked;
@@ -2201,7 +2122,7 @@ static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info,
* specific behavior. This is not clean. For one thing
* we can't take privilege into account.
*/
- skp = smk_of_task(cred->security);
+ skp = smk_of_task(smack_cred(cred));
rc = smk_access(skp, tkp, MAY_DELIVER, &ad);
rc = smk_bu_note("USB signal", skp, tkp, MAY_DELIVER, rc);
return rc;
@@ -2216,7 +2137,7 @@ static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info,
*/
static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
{
- struct inode_smack *isp = inode->i_security;
+ struct inode_smack *isp = smack_inode(inode);
struct smack_known *skp = smk_of_task_struct(p);
isp->smk_inode = skp;
@@ -2679,7 +2600,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct smack_known *skp;
- struct inode_smack *nsp = inode->i_security;
+ struct inode_smack *nsp = smack_inode(inode);
struct socket_smack *ssp;
struct socket *sock;
int rc = 0;
@@ -2888,24 +2809,13 @@ static int smack_flags_to_may(int flags)
*/
static int smack_msg_msg_alloc_security(struct msg_msg *msg)
{
- struct smack_known *skp = smk_of_current();
+ struct smack_known **blob = smack_msg_msg(msg);
- msg->security = skp;
+ *blob = smk_of_current();
return 0;
}
/**
- * smack_msg_msg_free_security - Clear the security blob for msg_msg
- * @msg: the object
- *
- * Clears the blob pointer
- */
-static void smack_msg_msg_free_security(struct msg_msg *msg)
-{
- msg->security = NULL;
-}
-
-/**
* smack_of_ipc - the smack pointer for the ipc
* @isp: the object
*
@@ -2913,7 +2823,9 @@ static void smack_msg_msg_free_security(struct msg_msg *msg)
*/
static struct smack_known *smack_of_ipc(struct kern_ipc_perm *isp)
{
- return (struct smack_known *)isp->security;
+ struct smack_known **blob = smack_ipc(isp);
+
+ return *blob;
}
/**
@@ -2924,24 +2836,13 @@ static struct smack_known *smack_of_ipc(struct kern_ipc_perm *isp)
*/
static int smack_ipc_alloc_security(struct kern_ipc_perm *isp)
{
- struct smack_known *skp = smk_of_current();
+ struct smack_known **blob = smack_ipc(isp);
- isp->security = skp;
+ *blob = smk_of_current();
return 0;
}
/**
- * smack_ipc_free_security - Clear the security blob for ipc
- * @isp: the object
- *
- * Clears the blob pointer
- */
-static void smack_ipc_free_security(struct kern_ipc_perm *isp)
-{
- isp->security = NULL;
-}
-
-/**
* smk_curacc_shm : check if current has access on shm
* @isp : the object
* @access : access requested
@@ -3238,7 +3139,8 @@ static int smack_msg_queue_msgrcv(struct kern_ipc_perm *isp, struct msg_msg *msg
*/
static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
{
- struct smack_known *iskp = ipp->security;
+ struct smack_known **blob = smack_ipc(ipp);
+ struct smack_known *iskp = *blob;
int may = smack_flags_to_may(flag);
struct smk_audit_info ad;
int rc;
@@ -3259,7 +3161,8 @@ static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
*/
static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
{
- struct smack_known *iskp = ipp->security;
+ struct smack_known **blob = smack_ipc(ipp);
+ struct smack_known *iskp = *blob;
*secid = iskp->smk_secid;
}
@@ -3287,7 +3190,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
if (inode == NULL)
return;
- isp = inode->i_security;
+ isp = smack_inode(inode);
mutex_lock(&isp->smk_lock);
/*
@@ -3390,13 +3293,12 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
final = &smack_known_star;
/*
- * Fall through.
- *
* If a smack value has been set we want to use it,
* but since tmpfs isn't giving us the opportunity
* to set mount options simulate setting the
* superblock default.
*/
+ /* Fall through */
default:
/*
* This isn't an understood special case.
@@ -3528,7 +3430,7 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
*/
static int smack_setprocattr(const char *name, void *value, size_t size)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
struct cred *new;
struct smack_known *skp;
struct smack_known_list_elem *sklep;
@@ -3569,7 +3471,7 @@ static int smack_setprocattr(const char *name, void *value, size_t size)
if (new == NULL)
return -ENOMEM;
- tsp = new->security;
+ tsp = smack_cred(new);
tsp->smk_task = skp;
/*
* process can change its label only once
@@ -4214,7 +4116,7 @@ static void smack_inet_csk_clone(struct sock *sk,
static int smack_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
- struct smack_known *skp = smk_of_task(cred->security);
+ struct smack_known *skp = smk_of_task(smack_cred(cred));
key->security = skp;
return 0;
@@ -4245,7 +4147,7 @@ static int smack_key_permission(key_ref_t key_ref,
{
struct key *keyp;
struct smk_audit_info ad;
- struct smack_known *tkp = smk_of_task(cred->security);
+ struct smack_known *tkp = smk_of_task(smack_cred(cred));
int request = 0;
int rc;
@@ -4518,12 +4420,12 @@ static int smack_inode_copy_up(struct dentry *dentry, struct cred **new)
return -ENOMEM;
}
- tsp = new_creds->security;
+ tsp = smack_cred(new_creds);
/*
* Get label from overlay inode and set it in create_sid
*/
- isp = d_inode(dentry->d_parent)->i_security;
+ isp = smack_inode(d_inode(dentry->d_parent));
skp = isp->smk_inode;
tsp->smk_task = skp;
*new = new_creds;
@@ -4546,8 +4448,8 @@ static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
const struct cred *old,
struct cred *new)
{
- struct task_smack *otsp = old->security;
- struct task_smack *ntsp = new->security;
+ struct task_smack *otsp = smack_cred(old);
+ struct task_smack *ntsp = smack_cred(new);
struct inode_smack *isp;
int may;
@@ -4560,7 +4462,7 @@ static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
/*
* the attribute of the containing directory
*/
- isp = d_inode(dentry->d_parent)->i_security;
+ isp = smack_inode(d_inode(dentry->d_parent));
if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
rcu_read_lock();
@@ -4580,6 +4482,14 @@ static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
return 0;
}
+struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = {
+ .lbs_cred = sizeof(struct task_smack),
+ .lbs_file = sizeof(struct smack_known *),
+ .lbs_inode = sizeof(struct inode_smack),
+ .lbs_ipc = sizeof(struct smack_known *),
+ .lbs_msg_msg = sizeof(struct smack_known *),
+};
+
static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
@@ -4595,7 +4505,6 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(bprm_set_creds, smack_bprm_set_creds),
LSM_HOOK_INIT(inode_alloc_security, smack_inode_alloc_security),
- LSM_HOOK_INIT(inode_free_security, smack_inode_free_security),
LSM_HOOK_INIT(inode_init_security, smack_inode_init_security),
LSM_HOOK_INIT(inode_link, smack_inode_link),
LSM_HOOK_INIT(inode_unlink, smack_inode_unlink),
@@ -4614,7 +4523,6 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(inode_getsecid, smack_inode_getsecid),
LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
- LSM_HOOK_INIT(file_free_security, smack_file_free_security),
LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
LSM_HOOK_INIT(file_lock, smack_file_lock),
LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
@@ -4650,23 +4558,19 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ipc_getsecid, smack_ipc_getsecid),
LSM_HOOK_INIT(msg_msg_alloc_security, smack_msg_msg_alloc_security),
- LSM_HOOK_INIT(msg_msg_free_security, smack_msg_msg_free_security),
LSM_HOOK_INIT(msg_queue_alloc_security, smack_ipc_alloc_security),
- LSM_HOOK_INIT(msg_queue_free_security, smack_ipc_free_security),
LSM_HOOK_INIT(msg_queue_associate, smack_msg_queue_associate),
LSM_HOOK_INIT(msg_queue_msgctl, smack_msg_queue_msgctl),
LSM_HOOK_INIT(msg_queue_msgsnd, smack_msg_queue_msgsnd),
LSM_HOOK_INIT(msg_queue_msgrcv, smack_msg_queue_msgrcv),
LSM_HOOK_INIT(shm_alloc_security, smack_ipc_alloc_security),
- LSM_HOOK_INIT(shm_free_security, smack_ipc_free_security),
LSM_HOOK_INIT(shm_associate, smack_shm_associate),
LSM_HOOK_INIT(shm_shmctl, smack_shm_shmctl),
LSM_HOOK_INIT(shm_shmat, smack_shm_shmat),
LSM_HOOK_INIT(sem_alloc_security, smack_ipc_alloc_security),
- LSM_HOOK_INIT(sem_free_security, smack_ipc_free_security),
LSM_HOOK_INIT(sem_associate, smack_sem_associate),
LSM_HOOK_INIT(sem_semctl, smack_sem_semctl),
LSM_HOOK_INIT(sem_semop, smack_sem_semop),
@@ -4757,23 +4661,23 @@ static __init void init_smack_known_list(void)
*/
static __init int smack_init(void)
{
- struct cred *cred;
+ struct cred *cred = (struct cred *) current->cred;
struct task_smack *tsp;
- if (!security_module_enable("smack"))
- return 0;
-
smack_inode_cache = KMEM_CACHE(inode_smack, 0);
if (!smack_inode_cache)
return -ENOMEM;
- tsp = new_task_smack(&smack_known_floor, &smack_known_floor,
- GFP_KERNEL);
- if (tsp == NULL) {
- kmem_cache_destroy(smack_inode_cache);
- return -ENOMEM;
- }
+ /*
+ * Set the security state for the initial task.
+ */
+ tsp = smack_cred(cred);
+ init_task_smack(tsp, &smack_known_floor, &smack_known_floor);
+ /*
+ * Register with LSM
+ */
+ security_add_hooks(smack_hooks, ARRAY_SIZE(smack_hooks), "smack");
smack_enabled = 1;
pr_info("Smack: Initializing.\n");
@@ -4787,20 +4691,9 @@ static __init int smack_init(void)
pr_info("Smack: IPv6 Netfilter enabled.\n");
#endif
- /*
- * Set the security state for the initial task.
- */
- cred = (struct cred *) current->cred;
- cred->security = tsp;
-
/* initialize the smack_known_list */
init_smack_known_list();
- /*
- * Register with LSM
- */
- security_add_hooks(smack_hooks, ARRAY_SIZE(smack_hooks), "smack");
-
return 0;
}
@@ -4810,5 +4703,7 @@ static __init int smack_init(void)
*/
DEFINE_LSM(smack) = {
.name = "smack",
+ .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
+ .blobs = &smack_blob_sizes,
.init = smack_init,
};
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 06b517075ec0..faf2ea3968b3 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2208,14 +2208,14 @@ static const struct file_operations smk_logging_ops = {
static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_seq_start(s, pos, &tsp->smk_rules);
}
static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_seq_next(s, v, pos, &tsp->smk_rules);
}
@@ -2262,7 +2262,7 @@ static int smk_open_load_self(struct inode *inode, struct file *file)
static ssize_t smk_write_load_self(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
&tsp->smk_rules_lock, SMK_FIXED24_FMT);
@@ -2414,14 +2414,14 @@ static const struct file_operations smk_load2_ops = {
static void *load_self2_seq_start(struct seq_file *s, loff_t *pos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_seq_start(s, pos, &tsp->smk_rules);
}
static void *load_self2_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_seq_next(s, v, pos, &tsp->smk_rules);
}
@@ -2467,7 +2467,7 @@ static int smk_open_load_self2(struct inode *inode, struct file *file)
static ssize_t smk_write_load_self2(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
&tsp->smk_rules_lock, SMK_LONG_FMT);
@@ -2681,14 +2681,14 @@ static const struct file_operations smk_syslog_ops = {
static void *relabel_self_seq_start(struct seq_file *s, loff_t *pos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_seq_start(s, pos, &tsp->smk_relabel);
}
static void *relabel_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
return smk_seq_next(s, v, pos, &tsp->smk_relabel);
}
@@ -2736,7 +2736,7 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file)
static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_smack *tsp = current_security();
+ struct task_smack *tsp = smack_cred(current_cred());
char *data;
int rc;
LIST_HEAD(list_tmp);
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 479b03a7a17e..3c96e8402e94 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -32,6 +32,7 @@ static char *tomoyo_print_bprm(struct linux_binprm *bprm,
int argv_count = bprm->argc;
int envp_count = bprm->envc;
bool truncated = false;
+
if (!buffer)
return NULL;
len = snprintf(buffer, tomoyo_buffer_len - 1, "argv[]={ ");
@@ -49,6 +50,7 @@ static char *tomoyo_print_bprm(struct linux_binprm *bprm,
while (offset < PAGE_SIZE) {
const char *kaddr = dump->data;
const unsigned char c = kaddr[offset++];
+
if (cp == last_start)
*cp++ = '"';
if (cp >= buffer + tomoyo_buffer_len - 32) {
@@ -154,19 +156,18 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
int pos;
u8 i;
+
if (!buffer)
return NULL;
tomoyo_convert_time(ktime_get_real_seconds(), &stamp);
pos = snprintf(buffer, tomoyo_buffer_len - 1,
- "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s "
- "granted=%s (global-pid=%u) task={ pid=%u ppid=%u "
- "uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u "
- "fsuid=%u fsgid=%u }", stamp.year, stamp.month,
- stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile,
- tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid,
- tomoyo_sys_getpid(), tomoyo_sys_getppid(),
+ "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s granted=%s (global-pid=%u) task={ pid=%u ppid=%u uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
+ stamp.year, stamp.month, stamp.day, stamp.hour,
+ stamp.min, stamp.sec, r->profile, tomoyo_mode[r->mode],
+ tomoyo_yesno(r->granted), gpid, tomoyo_sys_getpid(),
+ tomoyo_sys_getppid(),
from_kuid(&init_user_ns, current_uid()),
from_kgid(&init_user_ns, current_gid()),
from_kuid(&init_user_ns, current_euid()),
@@ -185,6 +186,7 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
struct tomoyo_mini_stat *stat;
unsigned int dev;
umode_t mode;
+
if (!obj->stat_valid[i])
continue;
stat = &obj->stat[i];
@@ -193,8 +195,8 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
if (i & 1) {
pos += snprintf(buffer + pos,
tomoyo_buffer_len - 1 - pos,
- " path%u.parent={ uid=%u gid=%u "
- "ino=%lu perm=0%o }", (i >> 1) + 1,
+ " path%u.parent={ uid=%u gid=%u ino=%lu perm=0%o }",
+ (i >> 1) + 1,
from_kuid(&init_user_ns, stat->uid),
from_kgid(&init_user_ns, stat->gid),
(unsigned long)stat->ino,
@@ -202,8 +204,8 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
continue;
}
pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
- " path%u={ uid=%u gid=%u ino=%lu major=%u"
- " minor=%u perm=0%o type=%s", (i >> 1) + 1,
+ " path%u={ uid=%u gid=%u ino=%lu major=%u minor=%u perm=0%o type=%s",
+ (i >> 1) + 1,
from_kuid(&init_user_ns, stat->uid),
from_kgid(&init_user_ns, stat->gid),
(unsigned long)stat->ino,
@@ -249,6 +251,7 @@ char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
const char *symlink = NULL;
int pos;
const char *domainname = r->domain->domainname->name;
+
header = tomoyo_print_header(r);
if (!header)
return NULL;
@@ -256,6 +259,7 @@ char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
len += strlen(domainname) + strlen(header) + 10;
if (r->ee) {
struct file *file = r->ee->bprm->file;
+
realpath = tomoyo_realpath_from_path(&file->f_path);
bprm_info = tomoyo_print_bprm(r->ee->bprm, &r->ee->dump);
if (!realpath || !bprm_info)
@@ -275,6 +279,7 @@ char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
pos = snprintf(buf, len, "%s", header);
if (realpath) {
struct linux_binprm *bprm = r->ee->bprm;
+
pos += snprintf(buf + pos, len - pos,
" exec={ realpath=\"%s\" argc=%d envc=%d %s }",
realpath, bprm->argc, bprm->envc, bprm_info);
@@ -328,6 +333,7 @@ static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
const u8 category = tomoyo_index2category[index] +
TOMOYO_MAX_MAC_INDEX;
struct tomoyo_profile *p;
+
if (!tomoyo_policy_loaded)
return false;
p = tomoyo_profile(ns, profile);
@@ -362,6 +368,7 @@ void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
char *buf;
struct tomoyo_log *entry;
bool quota_exceeded = false;
+
if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type,
r->matched_acl, r->granted))
goto out;
@@ -413,6 +420,7 @@ void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
{
va_list args;
int len;
+
va_start(args, fmt);
len = vsnprintf((char *) &len, 1, fmt, args) + 1;
va_end(args);
@@ -431,6 +439,7 @@ void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
void tomoyo_read_log(struct tomoyo_io_buffer *head)
{
struct tomoyo_log *ptr = NULL;
+
if (head->r.w_pos)
return;
kfree(head->read_buf);
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index c598aa00d5e3..57988d95d33d 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -197,6 +197,7 @@ static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...)
{
va_list args;
const int pos = strlen(buffer);
+
va_start(args, fmt);
vsnprintf(buffer + pos, len - pos - 1, fmt, args);
va_end(args);
@@ -214,6 +215,7 @@ static bool tomoyo_flush(struct tomoyo_io_buffer *head)
while (head->r.w_pos) {
const char *w = head->r.w[0];
size_t len = strlen(w);
+
if (len) {
if (len > head->read_user_buf_avail)
len = head->read_user_buf_avail;
@@ -279,6 +281,7 @@ static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
size_t len;
size_t pos = head->r.avail;
int size = head->readbuf_size - pos;
+
if (size <= 0)
return;
va_start(args, fmt);
@@ -344,13 +347,14 @@ static bool tomoyo_namespace_enabled;
void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
{
unsigned int idx;
+
for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++)
INIT_LIST_HEAD(&ns->acl_group[idx]);
for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++)
INIT_LIST_HEAD(&ns->group_list[idx]);
for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
INIT_LIST_HEAD(&ns->policy_list[idx]);
- ns->profile_version = 20110903;
+ ns->profile_version = 20150505;
tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
}
@@ -433,6 +437,7 @@ static void tomoyo_print_number_union_nospace
u8 min_type = ptr->value_type[0];
const u8 max_type = ptr->value_type[1];
char buffer[128];
+
buffer[0] = '\0';
for (i = 0; i < 2; i++) {
switch (min_type) {
@@ -487,6 +492,7 @@ static struct tomoyo_profile *tomoyo_assign_profile
{
struct tomoyo_profile *ptr;
struct tomoyo_profile *entry;
+
if (profile >= TOMOYO_MAX_PROFILES)
return NULL;
ptr = ns->profile_ptr[profile];
@@ -530,6 +536,7 @@ struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
{
static struct tomoyo_profile tomoyo_null_profile;
struct tomoyo_profile *ptr = ns->profile_ptr[profile];
+
if (!ptr)
ptr = &tomoyo_null_profile;
return ptr;
@@ -546,6 +553,7 @@ struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
static s8 tomoyo_find_yesno(const char *string, const char *find)
{
const char *cp = strstr(string, find);
+
if (cp) {
cp += strlen(find);
if (!strncmp(cp, "=yes", 4))
@@ -569,6 +577,7 @@ static void tomoyo_set_uint(unsigned int *i, const char *string,
const char *find)
{
const char *cp = strstr(string, find);
+
if (cp)
sscanf(cp + strlen(find), "=%u", i);
}
@@ -587,6 +596,7 @@ static int tomoyo_set_mode(char *name, const char *value,
{
u8 i;
u8 config;
+
if (!strcmp(name, "CONFIG")) {
i = TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX;
config = profile->default_config;
@@ -595,10 +605,12 @@ static int tomoyo_set_mode(char *name, const char *value,
for (i = 0; i < TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX; i++) {
int len = 0;
+
if (i < TOMOYO_MAX_MAC_INDEX) {
const u8 c = tomoyo_index2category[i];
const char *category =
tomoyo_category_keywords[c];
+
len = strlen(category);
if (strncmp(name, category, len) ||
name[len++] != ':' || name[len++] != ':')
@@ -618,6 +630,7 @@ static int tomoyo_set_mode(char *name, const char *value,
config = TOMOYO_CONFIG_USE_DEFAULT;
} else {
u8 mode;
+
for (mode = 0; mode < 4; mode++)
if (strstr(value, tomoyo_mode[mode]))
/*
@@ -664,6 +677,7 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
unsigned int i;
char *cp;
struct tomoyo_profile *profile;
+
if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version)
== 1)
return 0;
@@ -683,6 +697,7 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
const struct tomoyo_path_info *new_comment
= tomoyo_get_name(cp);
const struct tomoyo_path_info *old_comment;
+
if (!new_comment)
return -ENOMEM;
spin_lock(&lock);
@@ -732,6 +747,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head)
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
const struct tomoyo_profile *profile;
+
if (head->r.eof)
return;
next:
@@ -760,6 +776,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head)
u8 i;
const struct tomoyo_path_info *comment =
profile->comment;
+
tomoyo_print_namespace(head);
tomoyo_io_printf(head, "%u-COMMENT=", index);
tomoyo_set_string(head, comment ? comment->name : "");
@@ -788,6 +805,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head)
+ TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) {
const u8 i = head->r.bit;
const u8 config = profile->config[i];
+
if (config == TOMOYO_CONFIG_USE_DEFAULT)
continue;
tomoyo_print_namespace(head);
@@ -847,10 +865,10 @@ static int tomoyo_update_manager_entry(const char *manager,
struct tomoyo_acl_param param = {
/* .ns = &tomoyo_kernel_namespace, */
.is_delete = is_delete,
- .list = &tomoyo_kernel_namespace.
- policy_list[TOMOYO_ID_MANAGER],
+ .list = &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER],
};
int error = is_delete ? -ENOENT : -ENOMEM;
+
if (!tomoyo_correct_domain(manager) &&
!tomoyo_correct_word(manager))
return -EINVAL;
@@ -894,10 +912,10 @@ static void tomoyo_read_manager(struct tomoyo_io_buffer *head)
{
if (head->r.eof)
return;
- list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace.
- policy_list[TOMOYO_ID_MANAGER]) {
+ list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER]) {
struct tomoyo_manager *ptr =
list_entry(head->r.acl, typeof(*ptr), head.list);
+
if (ptr->head.is_deleted)
continue;
if (!tomoyo_flush(head))
@@ -933,8 +951,7 @@ static bool tomoyo_manager(void)
exe = tomoyo_get_exe();
if (!exe)
return false;
- list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.
- policy_list[TOMOYO_ID_MANAGER], head.list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) {
if (!ptr->head.is_deleted &&
(!tomoyo_pathcmp(domainname, ptr->manager) ||
!strcmp(exe, ptr->manager->name))) {
@@ -945,9 +962,10 @@ static bool tomoyo_manager(void)
if (!found) { /* Reduce error messages. */
static pid_t last_pid;
const pid_t pid = current->pid;
+
if (last_pid != pid) {
- printk(KERN_WARNING "%s ( %s ) is not permitted to "
- "update policies.\n", domainname->name, exe);
+ pr_warn("%s ( %s ) is not permitted to update policies.\n",
+ domainname->name, exe);
last_pid = pid;
}
}
@@ -974,19 +992,21 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
unsigned int pid;
struct tomoyo_domain_info *domain = NULL;
bool global_pid = false;
+
if (strncmp(data, "select ", 7))
return false;
data += 7;
if (sscanf(data, "pid=%u", &pid) == 1 ||
(global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
struct task_struct *p;
+
rcu_read_lock();
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
- domain = tomoyo_real_domain(p);
+ domain = tomoyo_task(p)->domain_info;
rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
if (tomoyo_domain_def(data + 7))
@@ -1020,10 +1040,11 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
* Returns true if @a == @b, false otherwise.
*/
static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
- const struct tomoyo_acl_info *b)
+ const struct tomoyo_acl_info *b)
{
const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
+
return p1->domainname == p2->domainname;
}
@@ -1039,11 +1060,13 @@ static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
static int tomoyo_write_task(struct tomoyo_acl_param *param)
{
int error = -EINVAL;
+
if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) {
struct tomoyo_task_acl e = {
.head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
.domainname = tomoyo_get_domainname(param),
};
+
if (e.domainname)
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_task_acl,
@@ -1110,7 +1133,7 @@ static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
};
static const struct {
const char *keyword;
- int (*write) (struct tomoyo_acl_param *);
+ int (*write)(struct tomoyo_acl_param *param);
} tomoyo_callback[5] = {
{ "file ", tomoyo_write_file },
{ "network inet ", tomoyo_write_inet_network },
@@ -1151,9 +1174,11 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head)
struct tomoyo_domain_info *domain = head->w.domain;
const bool is_delete = head->w.is_delete;
bool is_select = !is_delete && tomoyo_str_starts(&data, "select ");
- unsigned int profile;
+ unsigned int idx;
+
if (*data == '<') {
int ret = 0;
+
domain = NULL;
if (is_delete)
ret = tomoyo_delete_domain(data);
@@ -1167,23 +1192,27 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head)
if (!domain)
return -EINVAL;
ns = domain->ns;
- if (sscanf(data, "use_profile %u", &profile) == 1
- && profile < TOMOYO_MAX_PROFILES) {
- if (!tomoyo_policy_loaded || ns->profile_ptr[profile])
- domain->profile = (u8) profile;
+ if (sscanf(data, "use_profile %u", &idx) == 1
+ && idx < TOMOYO_MAX_PROFILES) {
+ if (!tomoyo_policy_loaded || ns->profile_ptr[idx])
+ if (!is_delete)
+ domain->profile = (u8) idx;
return 0;
}
- if (sscanf(data, "use_group %u\n", &profile) == 1
- && profile < TOMOYO_MAX_ACL_GROUPS) {
+ if (sscanf(data, "use_group %u\n", &idx) == 1
+ && idx < TOMOYO_MAX_ACL_GROUPS) {
if (!is_delete)
- domain->group = (u8) profile;
+ set_bit(idx, domain->group);
+ else
+ clear_bit(idx, domain->group);
return 0;
}
- for (profile = 0; profile < TOMOYO_MAX_DOMAIN_INFO_FLAGS; profile++) {
- const char *cp = tomoyo_dif[profile];
+ for (idx = 0; idx < TOMOYO_MAX_DOMAIN_INFO_FLAGS; idx++) {
+ const char *cp = tomoyo_dif[idx];
+
if (strncmp(data, cp, strlen(cp) - 1))
continue;
- domain->flags[profile] = !is_delete;
+ domain->flags[idx] = !is_delete;
return 0;
}
return tomoyo_write_domain2(ns, &domain->acl_info_list, data,
@@ -1225,9 +1254,11 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
const struct tomoyo_envp *envp =
(typeof(envp)) (argv + cond->argc);
u16 skip;
+
for (skip = 0; skip < head->r.cond_index; skip++) {
const u8 left = condp->left;
const u8 right = condp->right;
+
condp++;
switch (left) {
case TOMOYO_ARGV_ENTRY:
@@ -1253,6 +1284,7 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
const u8 match = condp->equals;
const u8 left = condp->left;
const u8 right = condp->right;
+
if (!tomoyo_flush(head))
return false;
condp++;
@@ -1262,8 +1294,7 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
case TOMOYO_ARGV_ENTRY:
tomoyo_io_printf(head,
"exec.argv[%lu]%s=\"",
- argv->index, argv->
- is_not ? "!" : "");
+ argv->index, argv->is_not ? "!" : "");
tomoyo_set_string(head,
argv->value->name);
tomoyo_set_string(head, "\"");
@@ -1274,12 +1305,10 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
"exec.envp[\"");
tomoyo_set_string(head,
envp->name->name);
- tomoyo_io_printf(head, "\"]%s=", envp->
- is_not ? "!" : "");
+ tomoyo_io_printf(head, "\"]%s=", envp->is_not ? "!" : "");
if (envp->value) {
tomoyo_set_string(head, "\"");
- tomoyo_set_string(head, envp->
- value->name);
+ tomoyo_set_string(head, envp->value->name);
tomoyo_set_string(head, "\"");
} else {
tomoyo_set_string(head,
@@ -1375,6 +1404,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
struct tomoyo_path_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u16 perm = ptr->perm;
+
for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
@@ -1395,6 +1425,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
} else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
struct tomoyo_task_acl *ptr =
container_of(acl, typeof(*ptr), head);
+
tomoyo_set_group(head, "task ");
tomoyo_set_string(head, "manual_domain_transition ");
tomoyo_set_string(head, ptr->domainname->name);
@@ -1404,6 +1435,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
struct tomoyo_path2_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
+
for (bit = 0; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
@@ -1424,6 +1456,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
struct tomoyo_path_number_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
+
for (bit = 0; bit < TOMOYO_MAX_PATH_NUMBER_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
@@ -1444,6 +1477,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
struct tomoyo_mkdev_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
+
for (bit = 0; bit < TOMOYO_MAX_MKDEV_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
@@ -1490,6 +1524,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
->name);
} else {
char buf[128];
+
tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
tomoyo_io_printf(head, "%s", buf);
}
@@ -1519,6 +1554,7 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
} else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
struct tomoyo_mount_acl *ptr =
container_of(acl, typeof(*ptr), head);
+
tomoyo_set_group(head, "file mount");
tomoyo_print_name_union(head, &ptr->dev_name);
tomoyo_print_name_union(head, &ptr->dir_name);
@@ -1562,6 +1598,7 @@ static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head,
list_for_each_cookie(head->r.acl, list) {
struct tomoyo_acl_info *ptr =
list_entry(head->r.acl, typeof(*ptr), list);
+
if (!tomoyo_print_entry(head, ptr))
return false;
}
@@ -1583,8 +1620,9 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head)
list_for_each_cookie(head->r.domain, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain =
list_entry(head->r.domain, typeof(*domain), list);
+ u8 i;
+
switch (head->r.step) {
- u8 i;
case 0:
if (domain->is_deleted &&
!head->r.print_this_domain_only)
@@ -1594,22 +1632,33 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head)
tomoyo_set_lf(head);
tomoyo_io_printf(head, "use_profile %u\n",
domain->profile);
- tomoyo_io_printf(head, "use_group %u\n",
- domain->group);
for (i = 0; i < TOMOYO_MAX_DOMAIN_INFO_FLAGS; i++)
if (domain->flags[i])
tomoyo_set_string(head, tomoyo_dif[i]);
+ head->r.index = 0;
head->r.step++;
- tomoyo_set_lf(head);
/* fall through */
case 1:
+ while (head->r.index < TOMOYO_MAX_ACL_GROUPS) {
+ i = head->r.index++;
+ if (!test_bit(i, domain->group))
+ continue;
+ tomoyo_io_printf(head, "use_group %u\n", i);
+ if (!tomoyo_flush(head))
+ return;
+ }
+ head->r.index = 0;
+ head->r.step++;
+ tomoyo_set_lf(head);
+ /* fall through */
+ case 2:
if (!tomoyo_read_domain2(head, &domain->acl_info_list))
return;
head->r.step++;
if (!tomoyo_set_lf(head))
return;
/* fall through */
- case 2:
+ case 3:
head->r.step = 0;
if (head->r.print_this_domain_only)
goto done;
@@ -1668,7 +1717,7 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
else
p = find_task_by_vpid(pid);
if (p)
- domain = tomoyo_real_domain(p);
+ domain = tomoyo_task(p)->domain_info;
rcu_read_unlock();
if (!domain)
return;
@@ -1711,6 +1760,7 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
.data = head->write_buf,
};
u8 i;
+
if (tomoyo_str_starts(&param.data, "aggregator "))
return tomoyo_write_aggregator(&param);
for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++)
@@ -1722,6 +1772,7 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
if (tomoyo_str_starts(&param.data, "acl_group ")) {
unsigned int group;
char *data;
+
group = simple_strtoul(param.data, &data, 10);
if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ')
return tomoyo_write_domain2
@@ -1746,12 +1797,15 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
struct list_head *list = &ns->group_list[idx];
+
list_for_each_cookie(head->r.group, list) {
struct tomoyo_group *group =
list_entry(head->r.group, typeof(*group), head.list);
+
list_for_each_cookie(head->r.acl, &group->member_list) {
struct tomoyo_acl_head *ptr =
list_entry(head->r.acl, typeof(*ptr), list);
+
if (ptr->is_deleted)
continue;
if (!tomoyo_flush(head))
@@ -1771,10 +1825,10 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
head)->number);
} else if (idx == TOMOYO_ADDRESS_GROUP) {
char buffer[128];
-
struct tomoyo_address_group *member =
container_of(ptr, typeof(*member),
head);
+
tomoyo_print_ip(buffer, sizeof(buffer),
&member->address);
tomoyo_io_printf(head, " %s", buffer);
@@ -1802,6 +1856,7 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx)
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
struct list_head *list = &ns->policy_list[idx];
+
list_for_each_cookie(head->r.acl, list) {
struct tomoyo_acl_head *acl =
container_of(head->r.acl, typeof(*acl), list);
@@ -1814,6 +1869,7 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx)
{
struct tomoyo_transition_control *ptr =
container_of(acl, typeof(*ptr), head);
+
tomoyo_print_namespace(head);
tomoyo_set_string(head, tomoyo_transition_type
[ptr->type]);
@@ -1829,6 +1885,7 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx)
{
struct tomoyo_aggregator *ptr =
container_of(acl, typeof(*ptr), head);
+
tomoyo_print_namespace(head);
tomoyo_set_string(head, "aggregator ");
tomoyo_set_string(head,
@@ -1858,6 +1915,7 @@ static void tomoyo_read_exception(struct tomoyo_io_buffer *head)
{
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
+
if (head->r.eof)
return;
while (head->r.step < TOMOYO_MAX_POLICY &&
@@ -1921,6 +1979,7 @@ static atomic_t tomoyo_query_observers = ATOMIC_INIT(0);
static int tomoyo_truncate(char *str)
{
char *start = str;
+
while (*(unsigned char *) str > (unsigned char) ' ')
str++;
*str = '\0';
@@ -1943,6 +2002,7 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
char *symlink = NULL;
char *cp = strchr(header, '\n');
int len;
+
if (!cp)
return;
cp = strchr(cp + 1, '\n');
@@ -2002,6 +2062,7 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
static unsigned int tomoyo_serial;
struct tomoyo_query entry = { };
bool quota_exceeded = false;
+
va_start(args, fmt);
len = vsnprintf((char *) &len, 1, fmt, args) + 1;
va_end(args);
@@ -2063,8 +2124,7 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
(tomoyo_answer_wait, entry.answer ||
!atomic_read(&tomoyo_query_observers), HZ))
break;
- else
- entry.timer++;
+ entry.timer++;
}
spin_lock(&tomoyo_query_list_lock);
list_del(&entry.list);
@@ -2100,6 +2160,7 @@ static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
{
struct tomoyo_query *ptr;
struct tomoyo_domain_info *domain = NULL;
+
spin_lock(&tomoyo_query_list_lock);
list_for_each_entry(ptr, &tomoyo_query_list, list) {
if (ptr->serial != serial)
@@ -2142,15 +2203,15 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head)
unsigned int pos = 0;
size_t len = 0;
char *buf;
+
if (head->r.w_pos)
return;
- if (head->read_buf) {
- kfree(head->read_buf);
- head->read_buf = NULL;
- }
+ kfree(head->read_buf);
+ head->read_buf = NULL;
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
if (pos++ != head->r.query_index)
continue;
len = ptr->query_len;
@@ -2168,6 +2229,7 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head)
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
if (pos++ != head->r.query_index)
continue;
/*
@@ -2202,9 +2264,11 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
struct list_head *tmp;
unsigned int serial;
unsigned int answer;
+
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
ptr->timer = 0;
}
spin_unlock(&tomoyo_query_list_lock);
@@ -2213,6 +2277,7 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+
if (ptr->serial != serial)
continue;
ptr->answer = answer;
@@ -2235,7 +2300,7 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
static void tomoyo_read_version(struct tomoyo_io_buffer *head)
{
if (!head->r.eof) {
- tomoyo_io_printf(head, "2.5.0");
+ tomoyo_io_printf(head, "2.6.0");
head->r.eof = true;
}
}
@@ -2287,6 +2352,7 @@ static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
{
u8 i;
unsigned int total = 0;
+
if (head->r.eof)
return;
for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) {
@@ -2295,9 +2361,9 @@ static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
tomoyo_stat_updated[i]);
if (tomoyo_stat_modified[i]) {
struct tomoyo_time stamp;
+
tomoyo_convert_time(tomoyo_stat_modified[i], &stamp);
- tomoyo_io_printf(head, " (Last: %04u/%02u/%02u "
- "%02u:%02u:%02u)",
+ tomoyo_io_printf(head, " (Last: %04u/%02u/%02u %02u:%02u:%02u)",
stamp.year, stamp.month, stamp.day,
stamp.hour, stamp.min, stamp.sec);
}
@@ -2305,6 +2371,7 @@ static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
}
for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) {
unsigned int used = tomoyo_memory_used[i];
+
total += used;
tomoyo_io_printf(head, "Memory used by %-22s %10u",
tomoyo_memory_headers[i], used);
@@ -2329,6 +2396,7 @@ static int tomoyo_write_stat(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
u8 i;
+
if (tomoyo_str_starts(&data, "Memory used by "))
for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++)
if (tomoyo_str_starts(&data, tomoyo_memory_headers[i]))
@@ -2457,6 +2525,7 @@ int tomoyo_open_control(const u8 type, struct file *file)
__poll_t tomoyo_poll_control(struct file *file, poll_table *wait)
{
struct tomoyo_io_buffer *head = file->private_data;
+
if (head->poll)
return head->poll(file, wait) | EPOLLOUT | EPOLLWRNORM;
return EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM;
@@ -2472,6 +2541,7 @@ __poll_t tomoyo_poll_control(struct file *file, poll_table *wait)
static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head)
{
struct list_head *ns;
+
if (head->type != TOMOYO_EXCEPTIONPOLICY &&
head->type != TOMOYO_PROFILE)
return;
@@ -2517,7 +2587,7 @@ ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
int idx;
if (!head->read)
- return -ENOSYS;
+ return -EINVAL;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
head->read_user_buf = buffer;
@@ -2557,6 +2627,7 @@ static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line)
head->type == TOMOYO_PROFILE) {
if (*line == '<') {
char *cp = strchr(line, ' ');
+
if (cp) {
*cp++ = '\0';
head->w.ns = tomoyo_assign_namespace(line);
@@ -2589,8 +2660,9 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
size_t avail_len = buffer_len;
char *cp0 = head->write_buf;
int idx;
+
if (!head->write)
- return -ENOSYS;
+ return -EINVAL;
if (!access_ok(buffer, buffer_len))
return -EFAULT;
if (mutex_lock_interruptible(&head->io_sem))
@@ -2600,9 +2672,11 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
/* Read a line and dispatch it to the policy handler. */
while (avail_len > 0) {
char c;
+
if (head->w.avail >= head->writebuf_size - 1) {
const int len = head->writebuf_size * 2;
char *cp = kzalloc(len, GFP_NOFS);
+
if (!cp) {
error = -ENOMEM;
break;
@@ -2701,30 +2775,32 @@ void tomoyo_check_profile(void)
{
struct tomoyo_domain_info *domain;
const int idx = tomoyo_read_lock();
+
tomoyo_policy_loaded = true;
- printk(KERN_INFO "TOMOYO: 2.5.0\n");
+ pr_info("TOMOYO: 2.6.0\n");
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
const u8 profile = domain->profile;
- const struct tomoyo_policy_namespace *ns = domain->ns;
- if (ns->profile_version != 20110903)
- printk(KERN_ERR
- "Profile version %u is not supported.\n",
+ struct tomoyo_policy_namespace *ns = domain->ns;
+
+ if (ns->profile_version == 20110903) {
+ pr_info_once("Converting profile version from %u to %u.\n",
+ 20110903, 20150505);
+ ns->profile_version = 20150505;
+ }
+ if (ns->profile_version != 20150505)
+ pr_err("Profile version %u is not supported.\n",
ns->profile_version);
else if (!ns->profile_ptr[profile])
- printk(KERN_ERR
- "Profile %u (used by '%s') is not defined.\n",
+ pr_err("Profile %u (used by '%s') is not defined.\n",
profile, domain->domainname->name);
else
continue;
- printk(KERN_ERR
- "Userland tools for TOMOYO 2.5 must be installed and "
- "policy must be initialized.\n");
- printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ "
- "for more information.\n");
+ pr_err("Userland tools for TOMOYO 2.6 must be installed and policy must be initialized.\n");
+ pr_err("Please see https://tomoyo.osdn.jp/2.6/ for more information.\n");
panic("STOP!");
}
tomoyo_read_unlock(idx);
- printk(KERN_INFO "Mandatory Access Control activated.\n");
+ pr_info("Mandatory Access Control activated.\n");
}
/**
@@ -2743,9 +2819,11 @@ void __init tomoyo_load_builtin_policy(void)
#include "builtin-policy.h"
u8 i;
const int idx = tomoyo_read_lock();
+
for (i = 0; i < 5; i++) {
struct tomoyo_io_buffer head = { };
char *start = "";
+
switch (i) {
case 0:
start = tomoyo_builtin_profile;
@@ -2775,6 +2853,7 @@ void __init tomoyo_load_builtin_policy(void)
}
while (1) {
char *end = strchr(start, '\n');
+
if (!end)
break;
*end = '\0';
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 539bcdd30bb8..050473df5809 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -10,6 +10,8 @@
#ifndef _SECURITY_TOMOYO_COMMON_H
#define _SECURITY_TOMOYO_COMMON_H
+#define pr_fmt(fmt) fmt
+
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/mm.h>
@@ -29,6 +31,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/un.h>
+#include <linux/lsm_hooks.h>
#include <net/sock.h>
#include <net/af_unix.h>
#include <net/ip.h>
@@ -681,11 +684,12 @@ struct tomoyo_domain_info {
const struct tomoyo_path_info *domainname;
/* Namespace for this domain. Never NULL. */
struct tomoyo_policy_namespace *ns;
+ /* Group numbers to use. */
+ unsigned long group[TOMOYO_MAX_ACL_GROUPS / BITS_PER_LONG];
u8 profile; /* Profile number to use. */
- u8 group; /* Group number to use. */
bool is_deleted; /* Delete flag. */
bool flags[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
- atomic_t users; /* Number of referring credentials. */
+ atomic_t users; /* Number of referring tasks. */
};
/*
@@ -787,9 +791,9 @@ struct tomoyo_acl_param {
* interfaces.
*/
struct tomoyo_io_buffer {
- void (*read) (struct tomoyo_io_buffer *);
- int (*write) (struct tomoyo_io_buffer *);
- __poll_t (*poll) (struct file *file, poll_table *wait);
+ void (*read)(struct tomoyo_io_buffer *head);
+ int (*write)(struct tomoyo_io_buffer *head);
+ __poll_t (*poll)(struct file *file, poll_table *wait);
/* Exclusive lock for this structure. */
struct mutex io_sem;
char __user *read_user_buf;
@@ -906,12 +910,18 @@ struct tomoyo_policy_namespace {
struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS];
/* List for connecting to tomoyo_namespace_list list. */
struct list_head namespace_list;
- /* Profile version. Currently only 20110903 is defined. */
+ /* Profile version. Currently only 20150505 is defined. */
unsigned int profile_version;
/* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */
const char *name;
};
+/* Structure for "struct task_struct"->security. */
+struct tomoyo_task {
+ struct tomoyo_domain_info *domain_info;
+ struct tomoyo_domain_info *old_domain_info;
+};
+
/********** Function prototypes. **********/
bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
@@ -1020,6 +1030,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param);
struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
const bool transit);
+struct tomoyo_domain_info *tomoyo_domain(void);
struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname);
struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
const u8 idx);
@@ -1034,8 +1045,8 @@ void *tomoyo_commit_ok(void *data, const unsigned int size);
void __init tomoyo_load_builtin_policy(void);
void __init tomoyo_mm_init(void);
void tomoyo_check_acl(struct tomoyo_request_info *r,
- bool (*check_entry) (struct tomoyo_request_info *,
- const struct tomoyo_acl_info *));
+ bool (*check_entry)(struct tomoyo_request_info *,
+ const struct tomoyo_acl_info *));
void tomoyo_check_profile(void);
void tomoyo_convert_time(time64_t time, struct tomoyo_time *stamp);
void tomoyo_del_condition(struct list_head *element);
@@ -1062,6 +1073,7 @@ void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
/********** External variable definitions. **********/
extern bool tomoyo_policy_loaded;
+extern int tomoyo_enabled;
extern const char * const tomoyo_condition_keyword
[TOMOYO_MAX_CONDITION_KEYWORD];
extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
@@ -1085,6 +1097,7 @@ extern struct tomoyo_domain_info tomoyo_kernel_domain;
extern struct tomoyo_policy_namespace tomoyo_kernel_namespace;
extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
+extern struct lsm_blob_sizes tomoyo_blob_sizes;
/********** Inlined functions. **********/
@@ -1121,6 +1134,7 @@ static inline void tomoyo_read_unlock(int idx)
static inline pid_t tomoyo_sys_getppid(void)
{
pid_t pid;
+
rcu_read_lock();
pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
@@ -1197,26 +1211,15 @@ static inline void tomoyo_put_group(struct tomoyo_group *group)
}
/**
- * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread.
- *
- * Returns pointer to "struct tomoyo_domain_info" for current thread.
- */
-static inline struct tomoyo_domain_info *tomoyo_domain(void)
-{
- return current_cred()->security;
-}
-
-/**
- * tomoyo_real_domain - Get "struct tomoyo_domain_info" for specified thread.
+ * tomoyo_task - Get "struct tomoyo_task" for specified thread.
*
- * @task: Pointer to "struct task_struct".
+ * @task - Pointer to "struct task_struct".
*
- * Returns pointer to "struct tomoyo_security" for specified thread.
+ * Returns pointer to "struct tomoyo_task" for specified thread.
*/
-static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
- *task)
+static inline struct tomoyo_task *tomoyo_task(struct task_struct *task)
{
- return task_cred_xxx(task, security);
+ return task->security + tomoyo_blob_sizes.lbs_task;
}
/**
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
index 8d0e1b9c9c57..8f6d57c15df6 100644
--- a/security/tomoyo/condition.c
+++ b/security/tomoyo/condition.c
@@ -28,9 +28,11 @@ static bool tomoyo_argv(const unsigned int index, const char *arg_ptr,
{
int i;
struct tomoyo_path_info arg;
+
arg.name = arg_ptr;
for (i = 0; i < argc; argv++, checked++, i++) {
bool result;
+
if (index != argv->index)
continue;
*checked = 1;
@@ -62,12 +64,14 @@ static bool tomoyo_envp(const char *env_name, const char *env_value,
int i;
struct tomoyo_path_info name;
struct tomoyo_path_info value;
+
name.name = env_name;
tomoyo_fill_path_info(&name);
value.name = env_value;
tomoyo_fill_path_info(&value);
for (i = 0; i < envc; envp++, checked++, i++) {
bool result;
+
if (!tomoyo_path_matches_pattern(&name, envp->name))
continue;
*checked = 1;
@@ -113,6 +117,7 @@ static bool tomoyo_scan_bprm(struct tomoyo_execve *ee,
bool result = true;
u8 local_checked[32];
u8 *checked;
+
if (argc + envc <= sizeof(local_checked)) {
checked = local_checked;
memset(local_checked, 0, sizeof(local_checked));
@@ -131,6 +136,7 @@ static bool tomoyo_scan_bprm(struct tomoyo_execve *ee,
/* Read. */
const char *kaddr = dump->data;
const unsigned char c = kaddr[offset++];
+
if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
if (c == '\\') {
arg_ptr[arg_len++] = '\\';
@@ -160,6 +166,7 @@ static bool tomoyo_scan_bprm(struct tomoyo_execve *ee,
argv_count--;
} else if (envp_count) {
char *cp = strchr(arg_ptr, '=');
+
if (cp) {
*cp = '\0';
if (!tomoyo_envp(arg_ptr, cp + 1,
@@ -182,6 +189,7 @@ static bool tomoyo_scan_bprm(struct tomoyo_execve *ee,
out:
if (result) {
int i;
+
/* Check not-yet-checked entries. */
for (i = 0; i < argc; i++) {
if (checked[i])
@@ -229,6 +237,7 @@ static bool tomoyo_scan_exec_realpath(struct file *file,
{
bool result;
struct tomoyo_path_info exe;
+
if (!file)
return false;
exe.name = tomoyo_realpath_from_path(&file->f_path);
@@ -250,6 +259,7 @@ static bool tomoyo_scan_exec_realpath(struct file *file,
static const struct tomoyo_path_info *tomoyo_get_dqword(char *start)
{
char *cp = start + strlen(start) - 1;
+
if (cp == start || *start++ != '"' || *cp != '"')
return NULL;
*cp = '\0';
@@ -270,6 +280,7 @@ static bool tomoyo_parse_name_union_quoted(struct tomoyo_acl_param *param,
struct tomoyo_name_union *ptr)
{
char *filename = param->data;
+
if (*filename == '@')
return tomoyo_parse_name_union(param, ptr);
ptr->filename = tomoyo_get_dqword(filename);
@@ -310,6 +321,7 @@ static bool tomoyo_parse_envp(char *left, char *right,
const struct tomoyo_path_info *name;
const struct tomoyo_path_info *value;
char *cp = left + strlen(left) - 1;
+
if (*cp-- != ']' || *cp != '"')
goto out;
*cp = '\0';
@@ -364,6 +376,7 @@ static inline bool tomoyo_same_condition(const struct tomoyo_condition *a,
static u8 tomoyo_condition_type(const char *word)
{
u8 i;
+
for (i = 0; i < TOMOYO_MAX_CONDITION_KEYWORD; i++) {
if (!strcmp(word, tomoyo_condition_keyword[i]))
break;
@@ -395,6 +408,7 @@ static struct tomoyo_condition *tomoyo_commit_condition
{
struct tomoyo_condition *ptr;
bool found = false;
+
if (mutex_lock_interruptible(&tomoyo_policy_lock)) {
dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__);
ptr = NULL;
@@ -442,12 +456,14 @@ static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param,
{
char * const pos = param->data;
bool flag;
+
if (*pos == '<') {
e->transit = tomoyo_get_domainname(param);
goto done;
}
{
char *cp = strchr(pos, ' ');
+
if (cp)
*cp = '\0';
flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") ||
@@ -489,6 +505,7 @@ struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param)
tomoyo_get_transit_preference(param, &e);
char * const end_of_string = start_of_string + strlen(start_of_string);
char *pos;
+
rerun:
pos = start_of_string;
while (1) {
@@ -498,6 +515,7 @@ rerun:
char *cp;
char *right_word;
bool is_not;
+
if (!*left_word)
break;
/*
@@ -622,8 +640,8 @@ rerun:
}
store_value:
if (!condp) {
- dprintk(KERN_WARNING "%u: dry_run left=%u right=%u "
- "match=%u\n", __LINE__, left, right, !is_not);
+ dprintk(KERN_WARNING "%u: dry_run left=%u right=%u match=%u\n",
+ __LINE__, left, right, !is_not);
continue;
}
condp->left = left;
@@ -660,6 +678,7 @@ store_value:
envp = (struct tomoyo_envp *) (argv + e.argc);
{
bool flag = false;
+
for (pos = start_of_string; pos < end_of_string; pos++) {
if (*pos)
continue;
@@ -698,6 +717,7 @@ void tomoyo_get_attributes(struct tomoyo_obj_info *obj)
for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
struct inode *inode;
+
switch (i) {
case TOMOYO_PATH1:
dentry = obj->path1.dentry;
@@ -718,6 +738,7 @@ void tomoyo_get_attributes(struct tomoyo_obj_info *obj)
inode = d_backing_inode(dentry);
if (inode) {
struct tomoyo_mini_stat *stat = &obj->stat[i];
+
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
stat->ino = inode->i_ino;
@@ -726,8 +747,7 @@ void tomoyo_get_attributes(struct tomoyo_obj_info *obj)
stat->rdev = inode->i_rdev;
obj->stat_valid[i] = true;
}
- if (i & 1) /* i == TOMOYO_PATH1_PARENT ||
- i == TOMOYO_PATH2_PARENT */
+ if (i & 1) /* TOMOYO_PATH1_PARENT or TOMOYO_PATH2_PARENT */
dput(dentry);
}
}
@@ -758,6 +778,7 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
u16 argc;
u16 envc;
struct linux_binprm *bprm = NULL;
+
if (!cond)
return true;
condc = cond->condc;
@@ -780,6 +801,7 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
const u8 right = condp->right;
bool is_bitop[2] = { false, false };
u8 j;
+
condp++;
/* Check argv[] and envp[] later. */
if (left == TOMOYO_ARGV_ENTRY || left == TOMOYO_ENVP_ENTRY)
@@ -787,10 +809,11 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
/* Check string expressions. */
if (right == TOMOYO_NAME_UNION) {
const struct tomoyo_name_union *ptr = names_p++;
+ struct tomoyo_path_info *symlink;
+ struct tomoyo_execve *ee;
+ struct file *file;
+
switch (left) {
- struct tomoyo_path_info *symlink;
- struct tomoyo_execve *ee;
- struct file *file;
case TOMOYO_SYMLINK_TARGET:
symlink = obj ? obj->symlink_target : NULL;
if (!symlink ||
@@ -812,6 +835,7 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
for (j = 0; j < 2; j++) {
const u8 index = j ? right : left;
unsigned long value = 0;
+
switch (index) {
case TOMOYO_TASK_UID:
value = from_kuid(&init_user_ns, current_uid());
@@ -874,31 +898,31 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
value = S_ISVTX;
break;
case TOMOYO_MODE_OWNER_READ:
- value = S_IRUSR;
+ value = 0400;
break;
case TOMOYO_MODE_OWNER_WRITE:
- value = S_IWUSR;
+ value = 0200;
break;
case TOMOYO_MODE_OWNER_EXECUTE:
- value = S_IXUSR;
+ value = 0100;
break;
case TOMOYO_MODE_GROUP_READ:
- value = S_IRGRP;
+ value = 0040;
break;
case TOMOYO_MODE_GROUP_WRITE:
- value = S_IWGRP;
+ value = 0020;
break;
case TOMOYO_MODE_GROUP_EXECUTE:
- value = S_IXGRP;
+ value = 0010;
break;
case TOMOYO_MODE_OTHERS_READ:
- value = S_IROTH;
+ value = 0004;
break;
case TOMOYO_MODE_OTHERS_WRITE:
- value = S_IWOTH;
+ value = 0002;
break;
case TOMOYO_MODE_OTHERS_EXECUTE:
- value = S_IXOTH;
+ value = 0001;
break;
case TOMOYO_EXEC_ARGC:
if (!bprm)
@@ -923,6 +947,7 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
{
u8 stat_index;
struct tomoyo_mini_stat *stat;
+
switch (index) {
case TOMOYO_PATH1_UID:
case TOMOYO_PATH1_GID:
@@ -1036,12 +1061,14 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
if (left == TOMOYO_NUMBER_UNION) {
/* Fetch values now. */
const struct tomoyo_number_union *ptr = numbers_p++;
+
min_v[0] = ptr->values[0];
max_v[0] = ptr->values[1];
}
if (right == TOMOYO_NUMBER_UNION) {
/* Fetch values now. */
const struct tomoyo_number_union *ptr = numbers_p++;
+
if (ptr->group) {
if (tomoyo_number_matches_group(min_v[0],
max_v[0],
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index f6758dad981f..8526a0a74023 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -30,10 +30,10 @@ struct tomoyo_domain_info tomoyo_kernel_domain;
*/
int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
struct tomoyo_acl_param *param,
- bool (*check_duplicate) (const struct tomoyo_acl_head
- *,
- const struct tomoyo_acl_head
- *))
+ bool (*check_duplicate)(const struct tomoyo_acl_head
+ *,
+ const struct tomoyo_acl_head
+ *))
{
int error = param->is_delete ? -ENOENT : -ENOMEM;
struct tomoyo_acl_head *entry;
@@ -90,13 +90,13 @@ static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a,
*/
int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
struct tomoyo_acl_param *param,
- bool (*check_duplicate) (const struct tomoyo_acl_info
- *,
- const struct tomoyo_acl_info
- *),
- bool (*merge_duplicate) (struct tomoyo_acl_info *,
- struct tomoyo_acl_info *,
- const bool))
+ bool (*check_duplicate)(const struct tomoyo_acl_info
+ *,
+ const struct tomoyo_acl_info
+ *),
+ bool (*merge_duplicate)(struct tomoyo_acl_info *,
+ struct tomoyo_acl_info *,
+ const bool))
{
const bool is_delete = param->is_delete;
int error = is_delete ? -ENOENT : -ENOMEM;
@@ -157,13 +157,13 @@ out:
* Caller holds tomoyo_read_lock().
*/
void tomoyo_check_acl(struct tomoyo_request_info *r,
- bool (*check_entry) (struct tomoyo_request_info *,
- const struct tomoyo_acl_info *))
+ bool (*check_entry)(struct tomoyo_request_info *,
+ const struct tomoyo_acl_info *))
{
const struct tomoyo_domain_info *domain = r->domain;
struct tomoyo_acl_info *ptr;
- bool retried = false;
const struct list_head *list = &domain->acl_info_list;
+ u16 i = 0;
retry:
list_for_each_entry_rcu(ptr, list, list) {
@@ -177,9 +177,10 @@ retry:
r->granted = true;
return;
}
- if (!retried) {
- retried = true;
- list = &domain->ns->acl_group[domain->group];
+ for (; i < TOMOYO_MAX_ACL_GROUPS; i++) {
+ if (!test_bit(i, domain->group))
+ continue;
+ list = &domain->ns->acl_group[i++];
goto retry;
}
r->granted = false;
@@ -198,6 +199,7 @@ LIST_HEAD(tomoyo_domain_list);
static const char *tomoyo_last_word(const char *name)
{
const char *cp = strrchr(name, ' ');
+
if (cp)
return cp + 1;
return name;
@@ -220,6 +222,7 @@ static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a,
const struct tomoyo_transition_control *p2 = container_of(b,
typeof(*p2),
head);
+
return p1->type == p2->type && p1->is_last_name == p2->is_last_name
&& p1->domainname == p2->domainname
&& p1->program == p2->program;
@@ -240,6 +243,7 @@ int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
int error = param->is_delete ? -ENOENT : -ENOMEM;
char *program = param->data;
char *domainname = strstr(program, " from ");
+
if (domainname) {
*domainname = '\0';
domainname += 6;
@@ -293,6 +297,7 @@ static inline bool tomoyo_scan_transition
const enum tomoyo_transition_type type)
{
const struct tomoyo_transition_control *ptr;
+
list_for_each_entry_rcu(ptr, list, head.list) {
if (ptr->head.is_deleted || ptr->type != type)
continue;
@@ -338,9 +343,11 @@ static enum tomoyo_transition_type tomoyo_transition_type
{
const char *last_name = tomoyo_last_word(domainname->name);
enum tomoyo_transition_type type = TOMOYO_TRANSITION_CONTROL_NO_RESET;
+
while (type < TOMOYO_MAX_TRANSITION_TYPE) {
const struct list_head * const list =
&ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL];
+
if (!tomoyo_scan_transition(list, domainname, program,
last_name, type)) {
type++;
@@ -375,6 +382,7 @@ static bool tomoyo_same_aggregator(const struct tomoyo_acl_head *a,
head);
const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2),
head);
+
return p1->original_name == p2->original_name &&
p1->aggregated_name == p2->aggregated_name;
}
@@ -394,6 +402,7 @@ int tomoyo_write_aggregator(struct tomoyo_acl_param *param)
int error = param->is_delete ? -ENOENT : -ENOMEM;
const char *original_name = tomoyo_read_token(param);
const char *aggregated_name = tomoyo_read_token(param);
+
if (!tomoyo_correct_word(original_name) ||
!tomoyo_correct_path(aggregated_name))
return -EINVAL;
@@ -426,6 +435,7 @@ static struct tomoyo_policy_namespace *tomoyo_find_namespace
(const char *name, const unsigned int len)
{
struct tomoyo_policy_namespace *ns;
+
list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
if (strncmp(name, ns->name, len) ||
(name[len] && name[len] != ' '))
@@ -451,6 +461,7 @@ struct tomoyo_policy_namespace *tomoyo_assign_namespace(const char *domainname)
struct tomoyo_policy_namespace *entry;
const char *cp = domainname;
unsigned int len = 0;
+
while (*cp && *cp++ != ' ')
len++;
ptr = tomoyo_find_namespace(domainname, len);
@@ -466,6 +477,7 @@ struct tomoyo_policy_namespace *tomoyo_assign_namespace(const char *domainname)
ptr = tomoyo_find_namespace(domainname, len);
if (!ptr && tomoyo_memory_ok(entry)) {
char *name = (char *) (entry + 1);
+
ptr = entry;
memmove(name, domainname, len);
name[len] = '\0';
@@ -490,6 +502,7 @@ static bool tomoyo_namespace_jump(const char *domainname)
{
const char *namespace = tomoyo_current_namespace()->name;
const int len = strlen(namespace);
+
return strncmp(domainname, namespace, len) ||
(domainname[len] && domainname[len] != ' ');
}
@@ -510,6 +523,7 @@ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
struct tomoyo_domain_info e = { };
struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname);
bool created = false;
+
if (entry) {
if (transit) {
/*
@@ -546,8 +560,9 @@ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
*/
if (transit) {
const struct tomoyo_domain_info *domain = tomoyo_domain();
+
e.profile = domain->profile;
- e.group = domain->group;
+ memcpy(e.group, domain->group, sizeof(e.group));
}
e.domainname = tomoyo_get_name(domainname);
if (!e.domainname)
@@ -569,12 +584,17 @@ out:
if (entry && transit) {
if (created) {
struct tomoyo_request_info r;
+ int i;
+
tomoyo_init_request_info(&r, entry,
TOMOYO_MAC_FILE_EXECUTE);
r.granted = false;
tomoyo_write_log(&r, "use_profile %u\n",
entry->profile);
- tomoyo_write_log(&r, "use_group %u\n", entry->group);
+ for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
+ if (test_bit(i, entry->group))
+ tomoyo_write_log(&r, "use_group %u\n",
+ i);
tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
}
}
@@ -712,6 +732,7 @@ retry:
struct tomoyo_aggregator *ptr;
struct list_head *list =
&old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR];
+
/* Check 'aggregator' directive. */
candidate = &exename;
list_for_each_entry_rcu(ptr, list, head.list) {
@@ -747,6 +768,7 @@ retry:
*/
if (ee->transition) {
const char *domainname = ee->transition->name;
+
reject_on_transition_failure = true;
if (!strcmp(domainname, "keep"))
goto force_keep_domain;
@@ -758,6 +780,7 @@ retry:
goto force_initialize_domain;
if (!strcmp(domainname, "parent")) {
char *cp;
+
strncpy(ee->tmp, old_domain->domainname->name,
TOMOYO_EXEC_TMPSIZE - 1);
cp = strrchr(ee->tmp, ' ');
@@ -822,8 +845,7 @@ force_jump_domain:
if (domain)
retval = 0;
else if (reject_on_transition_failure) {
- printk(KERN_WARNING "ERROR: Domain '%s' not ready.\n",
- ee->tmp);
+ pr_warn("ERROR: Domain '%s' not ready.\n", ee->tmp);
retval = -ENOMEM;
} else if (ee->r.mode == TOMOYO_CONFIG_ENFORCING)
retval = -ENOMEM;
@@ -834,16 +856,20 @@ force_jump_domain:
ee->r.granted = false;
tomoyo_write_log(&ee->r, "%s", tomoyo_dif
[TOMOYO_DIF_TRANSITION_FAILED]);
- printk(KERN_WARNING
- "ERROR: Domain '%s' not defined.\n", ee->tmp);
+ pr_warn("ERROR: Domain '%s' not defined.\n", ee->tmp);
}
}
out:
if (!domain)
domain = old_domain;
/* Update reference count on "struct tomoyo_domain_info". */
- atomic_inc(&domain->users);
- bprm->cred->security = domain;
+ {
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ s->old_domain_info = s->domain_info;
+ s->domain_info = domain;
+ atomic_inc(&domain->users);
+ }
kfree(exename.name);
if (!retval) {
ee->r.domain = domain;
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 2a374b4da8f5..86f7d1b90212 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -214,6 +214,7 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r)
const u8 type = r->param.path_number.operation;
u8 radix;
char buffer[64];
+
switch (type) {
case TOMOYO_TYPE_CREATE:
case TOMOYO_TYPE_MKDIR:
@@ -253,6 +254,7 @@ static bool tomoyo_check_path_acl(struct tomoyo_request_info *r,
{
const struct tomoyo_path_acl *acl = container_of(ptr, typeof(*acl),
head);
+
if (acl->perm & (1 << r->param.path.operation)) {
r->param.path.matched_path =
tomoyo_compare_name_union(r->param.path.filename,
@@ -275,6 +277,7 @@ static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r,
{
const struct tomoyo_path_number_acl *acl =
container_of(ptr, typeof(*acl), head);
+
return (acl->perm & (1 << r->param.path_number.operation)) &&
tomoyo_compare_number_union(r->param.path_number.number,
&acl->number) &&
@@ -295,6 +298,7 @@ static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r,
{
const struct tomoyo_path2_acl *acl =
container_of(ptr, typeof(*acl), head);
+
return (acl->perm & (1 << r->param.path2.operation)) &&
tomoyo_compare_name_union(r->param.path2.filename1, &acl->name1)
&& tomoyo_compare_name_union(r->param.path2.filename2,
@@ -314,6 +318,7 @@ static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r,
{
const struct tomoyo_mkdev_acl *acl =
container_of(ptr, typeof(*acl), head);
+
return (acl->perm & (1 << r->param.mkdev.operation)) &&
tomoyo_compare_number_union(r->param.mkdev.mode,
&acl->mode) &&
@@ -338,6 +343,7 @@ static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a,
{
const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head);
+
return tomoyo_same_name_union(&p1->name, &p2->name);
}
@@ -358,6 +364,7 @@ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a,
->perm;
u16 perm = *a_perm;
const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm;
+
if (is_delete)
perm &= ~b_perm;
else
@@ -384,6 +391,7 @@ static int tomoyo_update_path_acl(const u16 perm,
.perm = perm
};
int error;
+
if (!tomoyo_parse_name_union(param, &e.name))
error = -EINVAL;
else
@@ -407,6 +415,7 @@ static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a,
{
const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head);
+
return tomoyo_same_name_union(&p1->name, &p2->name) &&
tomoyo_same_number_union(&p1->mode, &p2->mode) &&
tomoyo_same_number_union(&p1->major, &p2->major) &&
@@ -431,6 +440,7 @@ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a,
u8 perm = *a_perm;
const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head)
->perm;
+
if (is_delete)
perm &= ~b_perm;
else
@@ -457,6 +467,7 @@ static int tomoyo_update_mkdev_acl(const u8 perm,
.perm = perm
};
int error;
+
if (!tomoyo_parse_name_union(param, &e.name) ||
!tomoyo_parse_number_union(param, &e.mode) ||
!tomoyo_parse_number_union(param, &e.major) ||
@@ -486,6 +497,7 @@ static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a,
{
const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head);
+
return tomoyo_same_name_union(&p1->name1, &p2->name1) &&
tomoyo_same_name_union(&p1->name2, &p2->name2);
}
@@ -507,6 +519,7 @@ static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a,
->perm;
u8 perm = *a_perm;
const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm;
+
if (is_delete)
perm &= ~b_perm;
else
@@ -533,6 +546,7 @@ static int tomoyo_update_path2_acl(const u8 perm,
.perm = perm
};
int error;
+
if (!tomoyo_parse_name_union(param, &e.name1) ||
!tomoyo_parse_name_union(param, &e.name2))
error = -EINVAL;
@@ -621,6 +635,7 @@ static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a,
head);
const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2),
head);
+
return tomoyo_same_name_union(&p1->name, &p2->name) &&
tomoyo_same_number_union(&p1->number, &p2->number);
}
@@ -643,6 +658,7 @@ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a,
u8 perm = *a_perm;
const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head)
->perm;
+
if (is_delete)
perm &= ~b_perm;
else
@@ -667,6 +683,7 @@ static int tomoyo_update_path_number_acl(const u8 perm,
.perm = perm
};
int error;
+
if (!tomoyo_parse_name_union(param, &e.name) ||
!tomoyo_parse_number_union(param, &e.number))
error = -EINVAL;
@@ -947,6 +964,7 @@ static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a,
{
const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head);
+
return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) &&
tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) &&
tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) &&
@@ -966,6 +984,7 @@ static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param)
{
struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
int error;
+
if (!tomoyo_parse_name_union(param, &e.dev_name) ||
!tomoyo_parse_name_union(param, &e.dir_name) ||
!tomoyo_parse_name_union(param, &e.fs_type) ||
@@ -995,6 +1014,7 @@ int tomoyo_write_file(struct tomoyo_acl_param *param)
u16 perm = 0;
u8 type;
const char *operation = tomoyo_read_token(param);
+
for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++)
if (tomoyo_permstr(operation, tomoyo_path_keyword[type]))
perm |= 1 << type;
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
index e22bea811c57..9537832fca18 100644
--- a/security/tomoyo/gc.c
+++ b/security/tomoyo/gc.c
@@ -77,11 +77,13 @@ static bool tomoyo_name_used_by_io_buffer(const char *string)
spin_lock(&tomoyo_io_buffer_list_lock);
list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
int i;
+
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
mutex_lock(&head->io_sem);
for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
const char *w = head->r.w[i];
+
if (w < string || w > string + size)
continue;
in_use = true;
@@ -108,6 +110,7 @@ static inline void tomoyo_del_transition_control(struct list_head *element)
{
struct tomoyo_transition_control *ptr =
container_of(element, typeof(*ptr), head.list);
+
tomoyo_put_name(ptr->domainname);
tomoyo_put_name(ptr->program);
}
@@ -123,6 +126,7 @@ static inline void tomoyo_del_aggregator(struct list_head *element)
{
struct tomoyo_aggregator *ptr =
container_of(element, typeof(*ptr), head.list);
+
tomoyo_put_name(ptr->original_name);
tomoyo_put_name(ptr->aggregated_name);
}
@@ -138,6 +142,7 @@ static inline void tomoyo_del_manager(struct list_head *element)
{
struct tomoyo_manager *ptr =
container_of(element, typeof(*ptr), head.list);
+
tomoyo_put_name(ptr->manager);
}
@@ -152,6 +157,7 @@ static void tomoyo_del_acl(struct list_head *element)
{
struct tomoyo_acl_info *acl =
container_of(element, typeof(*acl), list);
+
tomoyo_put_condition(acl->cond);
switch (acl->type) {
case TOMOYO_TYPE_PATH_ACL:
@@ -226,6 +232,7 @@ static void tomoyo_del_acl(struct list_head *element)
{
struct tomoyo_task_acl *entry =
container_of(acl, typeof(*entry), head);
+
tomoyo_put_name(entry->domainname);
}
break;
@@ -247,6 +254,7 @@ static inline void tomoyo_del_domain(struct list_head *element)
container_of(element, typeof(*domain), list);
struct tomoyo_acl_info *acl;
struct tomoyo_acl_info *tmp;
+
/*
* Since this domain is referenced from neither
* "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
@@ -286,6 +294,7 @@ void tomoyo_del_condition(struct list_head *element)
= (const struct tomoyo_argv *) (names_p + names_count);
const struct tomoyo_envp *envp
= (const struct tomoyo_envp *) (argv + argc);
+
for (i = 0; i < numbers_count; i++)
tomoyo_put_number_union(numbers_p++);
for (i = 0; i < names_count; i++)
@@ -321,6 +330,7 @@ static inline void tomoyo_del_path_group(struct list_head *element)
{
struct tomoyo_path_group *member =
container_of(element, typeof(*member), head.list);
+
tomoyo_put_name(member->member_name);
}
@@ -335,6 +345,7 @@ static inline void tomoyo_del_group(struct list_head *element)
{
struct tomoyo_group *group =
container_of(element, typeof(*group), head.list);
+
tomoyo_put_name(group->group_name);
}
@@ -476,6 +487,7 @@ static void tomoyo_collect_member(const enum tomoyo_policy_id id,
{
struct tomoyo_acl_head *member;
struct tomoyo_acl_head *tmp;
+
list_for_each_entry_safe(member, tmp, member_list, list) {
if (!member->is_deleted)
continue;
@@ -495,6 +507,7 @@ static void tomoyo_collect_acl(struct list_head *list)
{
struct tomoyo_acl_info *acl;
struct tomoyo_acl_info *tmp;
+
list_for_each_entry_safe(acl, tmp, list, list) {
if (!acl->is_deleted)
continue;
@@ -513,10 +526,12 @@ static void tomoyo_collect_entry(void)
int i;
enum tomoyo_policy_id id;
struct tomoyo_policy_namespace *ns;
+
mutex_lock(&tomoyo_policy_lock);
{
struct tomoyo_domain_info *domain;
struct tomoyo_domain_info *tmp;
+
list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
list) {
tomoyo_collect_acl(&domain->acl_info_list);
@@ -534,6 +549,7 @@ static void tomoyo_collect_entry(void)
{
struct tomoyo_shared_acl_head *ptr;
struct tomoyo_shared_acl_head *tmp;
+
list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
list) {
if (atomic_read(&ptr->users) > 0)
@@ -547,6 +563,7 @@ static void tomoyo_collect_entry(void)
struct list_head *list = &ns->group_list[i];
struct tomoyo_group *group;
struct tomoyo_group *tmp;
+
switch (i) {
case 0:
id = TOMOYO_ID_PATH_GROUP;
@@ -574,6 +591,7 @@ static void tomoyo_collect_entry(void)
struct list_head *list = &tomoyo_name_list[i];
struct tomoyo_shared_acl_head *ptr;
struct tomoyo_shared_acl_head *tmp;
+
list_for_each_entry_safe(ptr, tmp, list, list) {
if (atomic_read(&ptr->users) > 0)
continue;
@@ -595,6 +613,7 @@ static int tomoyo_gc_thread(void *unused)
{
/* Garbage collector thread is exclusive. */
static DEFINE_MUTEX(tomoyo_gc_mutex);
+
if (!mutex_trylock(&tomoyo_gc_mutex))
goto out;
tomoyo_collect_entry();
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
index 21b0cc3a7e1a..a37c7dc66e44 100644
--- a/security/tomoyo/group.c
+++ b/security/tomoyo/group.c
@@ -75,11 +75,13 @@ int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
{
struct tomoyo_group *group = tomoyo_get_group(param, type);
int error = -EINVAL;
+
if (!group)
return -ENOMEM;
param->list = &group->member_list;
if (type == TOMOYO_PATH_GROUP) {
struct tomoyo_path_group e = { };
+
e.member_name = tomoyo_get_name(tomoyo_read_token(param));
if (!e.member_name) {
error = -ENOMEM;
@@ -90,6 +92,7 @@ int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
tomoyo_put_name(e.member_name);
} else if (type == TOMOYO_NUMBER_GROUP) {
struct tomoyo_number_group e = { };
+
if (param->data[0] == '@' ||
!tomoyo_parse_number_union(param, &e.number))
goto out;
@@ -129,6 +132,7 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
const struct tomoyo_group *group)
{
struct tomoyo_path_group *member;
+
list_for_each_entry_rcu(member, &group->member_list, head.list) {
if (member->head.is_deleted)
continue;
@@ -156,6 +160,7 @@ bool tomoyo_number_matches_group(const unsigned long min,
{
struct tomoyo_number_group *member;
bool matched = false;
+
list_for_each_entry_rcu(member, &group->member_list, head.list) {
if (member->head.is_deleted)
continue;
diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
index 81b951652051..3445ae6fd479 100644
--- a/security/tomoyo/load_policy.c
+++ b/security/tomoyo/load_policy.c
@@ -37,11 +37,12 @@ __setup("TOMOYO_loader=", tomoyo_loader_setup);
static bool tomoyo_policy_loader_exists(void)
{
struct path path;
+
if (!tomoyo_loader)
tomoyo_loader = CONFIG_SECURITY_TOMOYO_POLICY_LOADER;
if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) {
- printk(KERN_INFO "Not activating Mandatory Access Control "
- "as %s does not exist.\n", tomoyo_loader);
+ pr_info("Not activating Mandatory Access Control as %s does not exist.\n",
+ tomoyo_loader);
return false;
}
path_put(&path);
@@ -96,8 +97,7 @@ void tomoyo_load_policy(const char *filename)
if (!tomoyo_policy_loader_exists())
return;
done = true;
- printk(KERN_INFO "Calling %s to load policy. Please wait.\n",
- tomoyo_loader);
+ pr_info("Calling %s to load policy. Please wait.\n", tomoyo_loader);
argv[0] = (char *) tomoyo_loader;
argv[1] = NULL;
envp[0] = "HOME=/";
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
index 12477e0b0a11..2e7fcfa923c9 100644
--- a/security/tomoyo/memory.c
+++ b/security/tomoyo/memory.c
@@ -19,9 +19,9 @@ void tomoyo_warn_oom(const char *function)
/* Reduce error messages. */
static pid_t tomoyo_last_pid;
const pid_t pid = current->pid;
+
if (tomoyo_last_pid != pid) {
- printk(KERN_WARNING "ERROR: Out of memory at %s.\n",
- function);
+ pr_warn("ERROR: Out of memory at %s.\n", function);
tomoyo_last_pid = pid;
}
if (!tomoyo_policy_loaded)
@@ -48,6 +48,7 @@ bool tomoyo_memory_ok(void *ptr)
{
if (ptr) {
const size_t s = ksize(ptr);
+
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
@@ -73,6 +74,7 @@ bool tomoyo_memory_ok(void *ptr)
void *tomoyo_commit_ok(void *data, const unsigned int size)
{
void *ptr = kzalloc(size, GFP_NOFS);
+
if (tomoyo_memory_ok(ptr)) {
memmove(ptr, data, size);
memset(data, 0, size);
@@ -98,6 +100,7 @@ struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
struct list_head *list;
const char *group_name = tomoyo_read_token(param);
bool found = false;
+
if (!tomoyo_correct_word(group_name) || idx >= TOMOYO_MAX_GROUP)
return NULL;
e.group_name = tomoyo_get_name(group_name);
@@ -116,6 +119,7 @@ struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
}
if (!found) {
struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e));
+
if (entry) {
INIT_LIST_HEAD(&entry->member_list);
atomic_set(&entry->head.users, 1);
@@ -191,6 +195,7 @@ struct tomoyo_policy_namespace tomoyo_kernel_namespace;
void __init tomoyo_mm_init(void)
{
int idx;
+
for (idx = 0; idx < TOMOYO_MAX_HASH; idx++)
INIT_LIST_HEAD(&tomoyo_name_list[idx]);
tomoyo_kernel_namespace.name = "<kernel>";
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index 7dc7f59b7dde..2755971f50df 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -49,6 +49,7 @@ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
{
const struct tomoyo_mount_acl *acl =
container_of(ptr, typeof(*acl), head);
+
return tomoyo_compare_number_union(r->param.mount.flags,
&acl->flags) &&
tomoyo_compare_name_union(r->param.mount.type,
@@ -89,6 +90,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
struct tomoyo_path_info rdir;
int need_dev = 0;
int error = -ENOMEM;
+
r->obj = &obj;
/* Get fstype. */
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 6ff8c21e4fff..85e6e31dd1e5 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -94,11 +94,13 @@ static char *tomoyo_get_absolute_path(const struct path *path, char * const buff
const int buflen)
{
char *pos = ERR_PTR(-ENOMEM);
+
if (buflen >= 256) {
/* go to whatever namespace root we are under */
pos = d_absolute_path(path, buffer, buflen - 1);
if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
struct inode *inode = d_backing_inode(path->dentry);
+
if (inode && S_ISDIR(inode->i_mode)) {
buffer[buflen - 2] = '/';
buffer[buflen - 1] = '\0';
@@ -123,10 +125,12 @@ static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer,
const int buflen)
{
char *pos = ERR_PTR(-ENOMEM);
+
if (buflen >= 256) {
pos = dentry_path_raw(dentry, buffer, buflen - 1);
if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
struct inode *inode = d_backing_inode(dentry);
+
if (inode && S_ISDIR(inode->i_mode)) {
buffer[buflen - 2] = '/';
buffer[buflen - 1] = '\0';
@@ -150,12 +154,14 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
{
struct super_block *sb = dentry->d_sb;
char *pos = tomoyo_get_dentry_path(dentry, buffer, buflen);
+
if (IS_ERR(pos))
return pos;
/* Convert from $PID to self if $PID is current thread. */
if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') {
char *ep;
const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10);
+
if (*ep == '/' && pid && pid ==
task_tgid_nr_ns(current, sb->s_fs_info)) {
pos = ep - 5;
@@ -170,6 +176,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
goto prepend_filesystem_name;
{
struct inode *inode = d_backing_inode(sb->s_root);
+
/*
* Use filesystem name if filesystem does not support rename()
* operation.
@@ -182,6 +189,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
char name[64];
int name_len;
const dev_t dev = sb->s_dev;
+
name[sizeof(name) - 1] = '\0';
snprintf(name, sizeof(name) - 1, "dev(%u,%u):", MAJOR(dev),
MINOR(dev));
@@ -197,6 +205,7 @@ prepend_filesystem_name:
{
const char *name = sb->s_type->name;
const int name_len = strlen(name);
+
pos -= name_len + 1;
if (pos < buffer)
goto out;
@@ -223,10 +232,10 @@ static char *tomoyo_get_socket_name(const struct path *path, char * const buffer
struct inode *inode = d_backing_inode(path->dentry);
struct socket *sock = inode ? SOCKET_I(inode) : NULL;
struct sock *sk = sock ? sock->sk : NULL;
+
if (sk) {
- snprintf(buffer, buflen, "socket:[family=%u:type=%u:"
- "protocol=%u]", sk->sk_family, sk->sk_type,
- sk->sk_protocol);
+ snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]",
+ sk->sk_family, sk->sk_type, sk->sk_protocol);
} else {
snprintf(buffer, buflen, "socket:[unknown]");
}
@@ -255,12 +264,14 @@ char *tomoyo_realpath_from_path(const struct path *path)
unsigned int buf_len = PAGE_SIZE / 2;
struct dentry *dentry = path->dentry;
struct super_block *sb;
+
if (!dentry)
return NULL;
sb = dentry->d_sb;
while (1) {
char *pos;
struct inode *inode;
+
buf_len <<= 1;
kfree(buf);
buf = kmalloc(buf_len, GFP_NOFS);
@@ -323,6 +334,7 @@ char *tomoyo_realpath_nofollow(const char *pathname)
if (pathname && kern_path(pathname, 0, &path) == 0) {
char *buf = tomoyo_realpath_from_path(&path);
+
path_put(&path);
return buf;
}
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 1d3d7e7a1f05..546281c5b233 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -21,6 +21,7 @@ static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
{
const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
head);
+
return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
}
@@ -42,6 +43,7 @@ static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
{
char *data;
int error;
+
if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
return -ENOMEM;
data = memdup_user_nul(buf, count);
@@ -52,6 +54,7 @@ static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
const int idx = tomoyo_read_lock();
struct tomoyo_path_info name;
struct tomoyo_request_info r;
+
name.name = data;
tomoyo_fill_path_info(&name);
/* Check "task manual_domain_transition" permission. */
@@ -67,18 +70,14 @@ static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
if (!new_domain) {
error = -ENOENT;
} else {
- struct cred *cred = prepare_creds();
- if (!cred) {
- error = -ENOMEM;
- } else {
- struct tomoyo_domain_info *old_domain =
- cred->security;
- cred->security = new_domain;
- atomic_inc(&new_domain->users);
- atomic_dec(&old_domain->users);
- commit_creds(cred);
- error = 0;
- }
+ struct tomoyo_task *s = tomoyo_task(current);
+ struct tomoyo_domain_info *old_domain =
+ s->domain_info;
+
+ s->domain_info = new_domain;
+ atomic_inc(&new_domain->users);
+ atomic_dec(&old_domain->users);
+ error = 0;
}
}
tomoyo_read_unlock(idx);
@@ -104,6 +103,7 @@ static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
const char *domain = tomoyo_domain()->domainname->name;
loff_t len = strlen(domain);
loff_t pos = *ppos;
+
if (pos >= len || !count)
return 0;
len -= pos;
@@ -234,10 +234,14 @@ static void __init tomoyo_create_entry(const char *name, const umode_t mode,
*/
static int __init tomoyo_initerface_init(void)
{
+ struct tomoyo_domain_info *domain;
struct dentry *tomoyo_dir;
+ if (!tomoyo_enabled)
+ return 0;
+ domain = tomoyo_domain();
/* Don't create securityfs entries unless registered. */
- if (current_cred()->security != &tomoyo_kernel_domain)
+ if (domain != &tomoyo_kernel_domain)
return 0;
tomoyo_dir = securityfs_create_dir("tomoyo", NULL);
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 1b5b5097efd7..716c92ec941a 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -9,17 +9,19 @@
#include "common.h"
/**
- * tomoyo_cred_alloc_blank - Target for security_cred_alloc_blank().
+ * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread.
*
- * @new: Pointer to "struct cred".
- * @gfp: Memory allocation flags.
- *
- * Returns 0.
+ * Returns pointer to "struct tomoyo_domain_info" for current thread.
*/
-static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
+struct tomoyo_domain_info *tomoyo_domain(void)
{
- new->security = NULL;
- return 0;
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ if (s->old_domain_info && !current->in_execve) {
+ atomic_dec(&s->old_domain_info->users);
+ s->old_domain_info = NULL;
+ }
+ return s->domain_info;
}
/**
@@ -34,42 +36,38 @@ static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- struct tomoyo_domain_info *domain = old->security;
- new->security = domain;
- if (domain)
- atomic_inc(&domain->users);
+ /* Restore old_domain_info saved by previous execve() request. */
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ if (s->old_domain_info && !current->in_execve) {
+ atomic_dec(&s->domain_info->users);
+ s->domain_info = s->old_domain_info;
+ s->old_domain_info = NULL;
+ }
return 0;
}
/**
- * tomoyo_cred_transfer - Target for security_transfer_creds().
+ * tomoyo_bprm_committed_creds - Target for security_bprm_committed_creds().
*
- * @new: Pointer to "struct cred".
- * @old: Pointer to "struct cred".
+ * @bprm: Pointer to "struct linux_binprm".
*/
-static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
+static void tomoyo_bprm_committed_creds(struct linux_binprm *bprm)
{
- tomoyo_cred_prepare(new, old, 0);
-}
+ /* Clear old_domain_info saved by execve() request. */
+ struct tomoyo_task *s = tomoyo_task(current);
-/**
- * tomoyo_cred_free - Target for security_cred_free().
- *
- * @cred: Pointer to "struct cred".
- */
-static void tomoyo_cred_free(struct cred *cred)
-{
- struct tomoyo_domain_info *domain = cred->security;
- if (domain)
- atomic_dec(&domain->users);
+ atomic_dec(&s->old_domain_info->users);
+ s->old_domain_info = NULL;
}
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
/**
* tomoyo_bprm_set_creds - Target for security_bprm_set_creds().
*
* @bprm: Pointer to "struct linux_binprm".
*
- * Returns 0 on success, negative value otherwise.
+ * Returns 0.
*/
static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
{
@@ -79,29 +77,15 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
*/
if (bprm->called_set_creds)
return 0;
-#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
/*
* Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
* for the first time.
*/
if (!tomoyo_policy_loaded)
tomoyo_load_policy(bprm->filename);
-#endif
- /*
- * Release reference to "struct tomoyo_domain_info" stored inside
- * "bprm->cred->security". New reference to "struct tomoyo_domain_info"
- * stored inside "bprm->cred->security" will be acquired later inside
- * tomoyo_find_next_domain().
- */
- atomic_dec(&((struct tomoyo_domain_info *)
- bprm->cred->security)->users);
- /*
- * Tell tomoyo_bprm_check_security() is called for the first time of an
- * execve operation.
- */
- bprm->cred->security = NULL;
return 0;
}
+#endif
/**
* tomoyo_bprm_check_security - Target for security_bprm_check().
@@ -112,23 +96,24 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
*/
static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
{
- struct tomoyo_domain_info *domain = bprm->cred->security;
+ struct tomoyo_task *s = tomoyo_task(current);
/*
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
- if (!domain) {
+ if (!s->old_domain_info) {
const int idx = tomoyo_read_lock();
const int err = tomoyo_find_next_domain(bprm);
+
tomoyo_read_unlock(idx);
return err;
}
/*
* Read permission is checked against interpreters using next domain.
*/
- return tomoyo_check_open_permission(domain, &bprm->file->f_path,
- O_RDONLY);
+ return tomoyo_check_open_permission(s->domain_info,
+ &bprm->file->f_path, O_RDONLY);
}
/**
@@ -167,6 +152,7 @@ static int tomoyo_path_truncate(const struct path *path)
static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
}
@@ -183,6 +169,7 @@ static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry,
umode_t mode)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
mode & S_IALLUGO);
}
@@ -198,6 +185,7 @@ static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry,
static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
}
@@ -214,6 +202,7 @@ static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry,
const char *old_name)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
+
return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
}
@@ -271,6 +260,7 @@ static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_di
{
struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry };
struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry };
+
return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
}
@@ -291,6 +281,7 @@ static int tomoyo_path_rename(const struct path *old_parent,
{
struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry };
struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry };
+
return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
}
@@ -322,11 +313,11 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
*/
static int tomoyo_file_open(struct file *f)
{
- int flags = f->f_flags;
/* Don't check read permission here if called from do_execve(). */
if (current->in_execve)
return 0;
- return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
+ return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path,
+ f->f_flags);
}
/**
@@ -370,6 +361,7 @@ static int tomoyo_path_chmod(const struct path *path, umode_t mode)
static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
int error = 0;
+
if (uid_valid(uid))
error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path,
from_kuid(&init_user_ns, uid));
@@ -419,6 +411,7 @@ static int tomoyo_sb_mount(const char *dev_name, const struct path *path,
static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
{
struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
+
return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
}
@@ -493,16 +486,61 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
return tomoyo_socket_sendmsg_permission(sock, msg, size);
}
+struct lsm_blob_sizes tomoyo_blob_sizes __lsm_ro_after_init = {
+ .lbs_task = sizeof(struct tomoyo_task),
+};
+
+/**
+ * tomoyo_task_alloc - Target for security_task_alloc().
+ *
+ * @task: Pointer to "struct task_struct".
+ * @flags: clone() flags.
+ *
+ * Returns 0.
+ */
+static int tomoyo_task_alloc(struct task_struct *task,
+ unsigned long clone_flags)
+{
+ struct tomoyo_task *old = tomoyo_task(current);
+ struct tomoyo_task *new = tomoyo_task(task);
+
+ new->domain_info = old->domain_info;
+ atomic_inc(&new->domain_info->users);
+ new->old_domain_info = NULL;
+ return 0;
+}
+
+/**
+ * tomoyo_task_free - Target for security_task_free().
+ *
+ * @task: Pointer to "struct task_struct".
+ */
+static void tomoyo_task_free(struct task_struct *task)
+{
+ struct tomoyo_task *s = tomoyo_task(task);
+
+ if (s->domain_info) {
+ atomic_dec(&s->domain_info->users);
+ s->domain_info = NULL;
+ }
+ if (s->old_domain_info) {
+ atomic_dec(&s->old_domain_info->users);
+ s->old_domain_info = NULL;
+ }
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
*/
static struct security_hook_list tomoyo_hooks[] __lsm_ro_after_init = {
- LSM_HOOK_INIT(cred_alloc_blank, tomoyo_cred_alloc_blank),
LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
- LSM_HOOK_INIT(cred_transfer, tomoyo_cred_transfer),
- LSM_HOOK_INIT(cred_free, tomoyo_cred_free),
+ LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
+ LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
+ LSM_HOOK_INIT(task_free, tomoyo_task_free),
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
LSM_HOOK_INIT(bprm_set_creds, tomoyo_bprm_set_creds),
+#endif
LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
LSM_HOOK_INIT(file_open, tomoyo_file_open),
@@ -531,6 +569,8 @@ static struct security_hook_list tomoyo_hooks[] __lsm_ro_after_init = {
/* Lock for GC. */
DEFINE_SRCU(tomoyo_ss);
+int tomoyo_enabled __lsm_ro_after_init = 1;
+
/**
* tomoyo_init - Register TOMOYO Linux as a LSM module.
*
@@ -538,19 +578,23 @@ DEFINE_SRCU(tomoyo_ss);
*/
static int __init tomoyo_init(void)
{
- struct cred *cred = (struct cred *) current_cred();
+ struct tomoyo_task *s = tomoyo_task(current);
- if (!security_module_enable("tomoyo"))
- return 0;
/* register ourselves with the security framework */
security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), "tomoyo");
- printk(KERN_INFO "TOMOYO Linux initialized\n");
- cred->security = &tomoyo_kernel_domain;
+ pr_info("TOMOYO Linux initialized\n");
+ s->domain_info = &tomoyo_kernel_domain;
+ atomic_inc(&tomoyo_kernel_domain.users);
+ s->old_domain_info = NULL;
tomoyo_mm_init();
+
return 0;
}
DEFINE_LSM(tomoyo) = {
.name = "tomoyo",
+ .enabled = &tomoyo_enabled,
+ .flags = LSM_FLAG_LEGACY_MAJOR,
+ .blobs = &tomoyo_blob_sizes,
.init = tomoyo_init,
};
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index badffc8271c8..0517cbdd7275 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -91,6 +91,7 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
void tomoyo_convert_time(time64_t time64, struct tomoyo_time *stamp)
{
struct tm tm;
+
time64_to_tm(time64, 0, &tm);
stamp->sec = tm.tm_sec;
stamp->min = tm.tm_min;
@@ -113,6 +114,7 @@ void tomoyo_convert_time(time64_t time64, struct tomoyo_time *stamp)
bool tomoyo_permstr(const char *string, const char *keyword)
{
const char *cp = strstr(string, keyword);
+
if (cp)
return cp == string || *(cp - 1) == '/';
return false;
@@ -132,6 +134,7 @@ char *tomoyo_read_token(struct tomoyo_acl_param *param)
{
char *pos = param->data;
char *del = strchr(pos, ' ');
+
if (del)
*del++ = '\0';
else
@@ -152,6 +155,7 @@ const struct tomoyo_path_info *tomoyo_get_domainname
{
char *start = param->data;
char *pos = start;
+
while (*pos) {
if (*pos++ != ' ' || *pos++ == '/')
continue;
@@ -181,8 +185,10 @@ u8 tomoyo_parse_ulong(unsigned long *result, char **str)
const char *cp = *str;
char *ep;
int base = 10;
+
if (*cp == '0') {
char c = *(cp + 1);
+
if (c == 'x' || c == 'X') {
base = 16;
cp += 2;
@@ -240,6 +246,7 @@ bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
struct tomoyo_name_union *ptr)
{
char *filename;
+
if (param->data[0] == '@') {
param->data++;
ptr->group = tomoyo_get_group(param, TOMOYO_PATH_GROUP);
@@ -266,6 +273,7 @@ bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
char *data;
u8 type;
unsigned long v;
+
memset(ptr, 0, sizeof(*ptr));
if (param->data[0] == '@') {
param->data++;
@@ -429,6 +437,7 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
unsigned char c;
unsigned char d;
unsigned char e;
+
if (!len)
goto out;
while (len--) {
@@ -533,6 +542,7 @@ bool tomoyo_correct_domain(const unsigned char *domainname)
return true;
while (1) {
const unsigned char *cp = strchr(domainname, ' ');
+
if (!cp)
break;
if (*domainname != '/' ||
@@ -554,6 +564,7 @@ bool tomoyo_domain_def(const unsigned char *buffer)
{
const unsigned char *cp;
int len;
+
if (*buffer != '<')
return false;
cp = strchr(buffer, ' ');
@@ -668,6 +679,9 @@ static bool tomoyo_file_matches_pattern2(const char *filename,
{
while (filename < filename_end && pattern < pattern_end) {
char c;
+ int i;
+ int j;
+
if (*pattern != '\\') {
if (*filename++ != *pattern++)
return false;
@@ -676,8 +690,6 @@ static bool tomoyo_file_matches_pattern2(const char *filename,
c = *filename;
pattern++;
switch (*pattern) {
- int i;
- int j;
case '?':
if (c == '/') {
return false;
@@ -985,6 +997,7 @@ int tomoyo_init_request_info(struct tomoyo_request_info *r,
struct tomoyo_domain_info *domain, const u8 index)
{
u8 profile;
+
memset(r, 0, sizeof(*r));
if (!domain)
domain = tomoyo_domain();
@@ -1018,6 +1031,7 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
u16 perm;
u8 i;
+
if (ptr->is_deleted)
continue;
switch (ptr->type) {
@@ -1062,9 +1076,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
/* r->granted = false; */
tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
- printk(KERN_WARNING "WARNING: "
- "Domain '%s' has too many ACLs to hold. "
- "Stopped learning mode.\n", domain->domainname->name);
+ pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
+ domain->domainname->name);
}
return false;
}
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index ffda91a4a1aa..57cc60722dd3 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
break;
case YAMA_SCOPE_RELATIONAL:
rcu_read_lock();
- if (!task_is_descendant(current, child) &&
+ if (!pid_alive(child))
+ rc = -EPERM;
+ if (!rc && !task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
@@ -477,9 +479,15 @@ static void __init yama_init_sysctl(void)
static inline void yama_init_sysctl(void) { }
#endif /* CONFIG_SYSCTL */
-void __init yama_add_hooks(void)
+static int __init yama_init(void)
{
pr_info("Yama: becoming mindful.\n");
security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks), "yama");
yama_init_sysctl();
+ return 0;
}
+
+DEFINE_LSM(yama) = {
+ .name = "yama",
+ .init = yama_init,
+};
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index 9f0c480489ef..9cbf6927abe9 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -84,7 +84,7 @@ ac97_of_get_child_device(struct ac97_controller *ac97_ctrl, int idx,
if ((idx != of_property_read_u32(node, "reg", &reg)) ||
!of_device_is_compatible(node, compat))
continue;
- return of_node_get(node);
+ return node;
}
return NULL;
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index 65557421fe0b..c3ff721e4660 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -82,6 +82,7 @@ static struct device_node *get_gpio(char *name,
if (altname && (strcmp(audio_gpio, altname) == 0))
break;
}
+ of_node_put(gpio);
/* still not found, assume not there */
if (!np)
return NULL;
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index c3f57a3fb1a5..904659d14988 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -47,8 +47,8 @@ static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev,
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
- r->size, &r->bus_addr, GFP_KERNEL);
+ r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
+ r->size, &r->bus_addr, GFP_KERNEL);
if (!r->space)
return -ENOMEM;
@@ -380,10 +380,6 @@ static int i2sbus_suspend(struct macio_dev* dev, pm_message_t state)
int err, ret = 0;
list_for_each_entry(i2sdev, &control->list, item) {
- /* Notify Alsa */
- /* Suspend PCM streams */
- snd_pcm_suspend_all(i2sdev->sound.pcm);
-
/* Notify codecs */
list_for_each_entry(cii, &i2sdev->sound.codec_list, list) {
err = 0;
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 0114ffed56dd..a2d4b41096e0 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -757,7 +757,6 @@ static int aaci_do_suspend(struct snd_card *card)
{
struct aaci *aaci = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3cold);
- snd_pcm_suspend_all(aaci->pcm);
return 0;
}
@@ -942,7 +941,8 @@ static int aaci_init_pcm(struct aaci *aaci)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &aaci_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &aaci_capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- NULL, 0, 64 * 1024);
+ aaci->card->dev,
+ 0, 64 * 1024);
}
return ret;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 1f72672262d0..68fe5bb11eea 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -124,7 +124,6 @@ static int pxa2xx_ac97_do_suspend(struct snd_card *card)
pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3cold);
- snd_pcm_suspend_all(pxa2xx_ac97_pcm);
snd_ac97_suspend(pxa2xx_ac97_ac97);
if (platform_ops && platform_ops->suspend)
platform_ops->suspend(platform_ops->priv);
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 380025887aef..33c87a0547a9 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -603,11 +603,9 @@ static int atmel_ac97c_pcm_new(struct atmel_ac97c *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &atmel_ac97_capture_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_ac97_playback_ops);
- retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pdev->dev, hw.periods_min * hw.period_bytes_min,
hw.buffer_bytes_max);
- if (retval)
- return retval;
pcm->private_data = chip;
pcm->info_flags = 0;
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a5b09e75e787..a1a6fd75cfe5 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -541,7 +541,8 @@ static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
- params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
+ params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
+ params->buffer.fragments == 0)
return -EINVAL;
/* now codec parameters */
@@ -1014,22 +1015,13 @@ static int snd_compress_proc_init(struct snd_compr *compr)
if (!entry)
return -ENOMEM;
entry->mode = S_IFDIR | 0555;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- return -ENOMEM;
- }
compr->proc_root = entry;
entry = snd_info_create_card_entry(compr->card, "info",
compr->proc_root);
- if (entry) {
+ if (entry)
snd_info_set_text_ops(entry, compr,
snd_compress_proc_info_read);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
compr->proc_info_entry = entry;
return 0;
diff --git a/sound/core/info.c b/sound/core/info.c
index fe502bc5e6d2..96a074019c33 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -463,11 +463,12 @@ static struct snd_info_entry *create_subdir(struct module *mod,
}
static struct snd_info_entry *
-snd_info_create_entry(const char *name, struct snd_info_entry *parent);
+snd_info_create_entry(const char *name, struct snd_info_entry *parent,
+ struct module *module);
int __init snd_info_init(void)
{
- snd_proc_root = snd_info_create_entry("asound", NULL);
+ snd_proc_root = snd_info_create_entry("asound", NULL, THIS_MODULE);
if (!snd_proc_root)
return -ENOMEM;
snd_proc_root->mode = S_IFDIR | 0555;
@@ -503,6 +504,14 @@ int __exit snd_info_done(void)
return 0;
}
+static void snd_card_id_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct snd_card *card = entry->private_data;
+
+ snd_iprintf(buffer, "%s\n", card->id);
+}
+
/*
* create a card proc file
* called from init.c
@@ -520,28 +529,8 @@ int snd_info_card_create(struct snd_card *card)
if (!entry)
return -ENOMEM;
card->proc_root = entry;
- return 0;
-}
-
-/* register all pending info entries */
-static int snd_info_register_recursive(struct snd_info_entry *entry)
-{
- struct snd_info_entry *p;
- int err;
- if (!entry->p) {
- err = snd_info_register(entry);
- if (err < 0)
- return err;
- }
-
- list_for_each_entry(p, &entry->children, list) {
- err = snd_info_register_recursive(p);
- if (err < 0)
- return err;
- }
-
- return 0;
+ return snd_card_ro_proc_new(card, "id", card, snd_card_id_read);
}
/*
@@ -557,7 +546,7 @@ int snd_info_card_register(struct snd_card *card)
if (snd_BUG_ON(!card))
return -ENXIO;
- err = snd_info_register_recursive(card->proc_root);
+ err = snd_info_register(card->proc_root);
if (err < 0)
return err;
@@ -705,7 +694,8 @@ EXPORT_SYMBOL(snd_info_get_str);
* Return: The pointer of the new instance, or %NULL on failure.
*/
static struct snd_info_entry *
-snd_info_create_entry(const char *name, struct snd_info_entry *parent)
+snd_info_create_entry(const char *name, struct snd_info_entry *parent,
+ struct module *module)
{
struct snd_info_entry *entry;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
@@ -722,6 +712,7 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent)
INIT_LIST_HEAD(&entry->children);
INIT_LIST_HEAD(&entry->list);
entry->parent = parent;
+ entry->module = module;
if (parent)
list_add_tail(&entry->list, &parent->children);
return entry;
@@ -741,10 +732,9 @@ struct snd_info_entry *snd_info_create_module_entry(struct module * module,
const char *name,
struct snd_info_entry *parent)
{
- struct snd_info_entry *entry = snd_info_create_entry(name, parent);
- if (entry)
- entry->module = module;
- return entry;
+ if (!parent)
+ parent = snd_proc_root;
+ return snd_info_create_entry(name, parent, module);
}
EXPORT_SYMBOL(snd_info_create_module_entry);
@@ -762,12 +752,9 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
const char *name,
struct snd_info_entry * parent)
{
- struct snd_info_entry *entry = snd_info_create_entry(name, parent);
- if (entry) {
- entry->module = card->module;
- entry->card = card;
- }
- return entry;
+ if (!parent)
+ parent = card->proc_root;
+ return snd_info_create_entry(name, parent, card->module);
}
EXPORT_SYMBOL(snd_info_create_card_entry);
@@ -813,15 +800,7 @@ void snd_info_free_entry(struct snd_info_entry * entry)
}
EXPORT_SYMBOL(snd_info_free_entry);
-/**
- * snd_info_register - register the info entry
- * @entry: the info entry
- *
- * Registers the proc info entry.
- *
- * Return: Zero if successful, or a negative error code on failure.
- */
-int snd_info_register(struct snd_info_entry * entry)
+static int __snd_info_register(struct snd_info_entry *entry)
{
struct proc_dir_entry *root, *p = NULL;
@@ -829,6 +808,8 @@ int snd_info_register(struct snd_info_entry * entry)
return -ENXIO;
root = entry->parent == NULL ? snd_proc_root->p : entry->parent->p;
mutex_lock(&info_mutex);
+ if (entry->p || !root)
+ goto unlock;
if (S_ISDIR(entry->mode)) {
p = proc_mkdir_mode(entry->name, entry->mode, root);
if (!p) {
@@ -850,11 +831,73 @@ int snd_info_register(struct snd_info_entry * entry)
proc_set_size(p, entry->size);
}
entry->p = p;
+ unlock:
mutex_unlock(&info_mutex);
return 0;
}
+
+/**
+ * snd_info_register - register the info entry
+ * @entry: the info entry
+ *
+ * Registers the proc info entry.
+ * The all children entries are registered recursively.
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ */
+int snd_info_register(struct snd_info_entry *entry)
+{
+ struct snd_info_entry *p;
+ int err;
+
+ if (!entry->p) {
+ err = __snd_info_register(entry);
+ if (err < 0)
+ return err;
+ }
+
+ list_for_each_entry(p, &entry->children, list) {
+ err = snd_info_register(p);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
EXPORT_SYMBOL(snd_info_register);
+/**
+ * snd_card_rw_proc_new - Create a read/write text proc file entry for the card
+ * @card: the card instance
+ * @name: the file name
+ * @private_data: the arbitrary private data
+ * @read: the read callback
+ * @write: the write callback, NULL for read-only
+ *
+ * This proc file entry will be registered via snd_card_register() call, and
+ * it will be removed automatically at the card removal, too.
+ */
+int snd_card_rw_proc_new(struct snd_card *card, const char *name,
+ void *private_data,
+ void (*read)(struct snd_info_entry *,
+ struct snd_info_buffer *),
+ void (*write)(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer))
+{
+ struct snd_info_entry *entry;
+
+ entry = snd_info_create_card_entry(card, name, card->proc_root);
+ if (!entry)
+ return -ENOMEM;
+ snd_info_set_text_ops(entry, private_data, read);
+ if (write) {
+ entry->mode |= 0200;
+ entry->c.text.write = write;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_card_rw_proc_new);
+
/*
*/
diff --git a/sound/core/init.c b/sound/core/init.c
index 4849c611c0fe..0c4dc40376a7 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -100,31 +100,6 @@ int (*snd_mixer_oss_notify_callback)(struct snd_card *card, int free_flag);
EXPORT_SYMBOL(snd_mixer_oss_notify_callback);
#endif
-#ifdef CONFIG_SND_PROC_FS
-static void snd_card_id_read(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
-{
- snd_iprintf(buffer, "%s\n", entry->card->id);
-}
-
-static int init_info_for_card(struct snd_card *card)
-{
- struct snd_info_entry *entry;
-
- entry = snd_info_create_card_entry(card, "id", card->proc_root);
- if (!entry) {
- dev_dbg(card->dev, "unable to create card entry\n");
- return -ENOMEM;
- }
- entry->c.text.read = snd_card_id_read;
- card->proc_id = entry;
-
- return snd_info_card_register(card);
-}
-#else /* !CONFIG_SND_PROC_FS */
-#define init_info_for_card(card)
-#endif
-
static int check_empty_slot(struct module *module, int slot)
{
return !slots[slot] || !*slots[slot];
@@ -491,7 +466,6 @@ static int snd_card_do_free(struct snd_card *card)
snd_device_free_all(card);
if (card->private_free)
card->private_free(card);
- snd_info_free_entry(card->proc_id);
if (snd_info_card_free(card) < 0) {
dev_warn(card->dev, "unable to free card info\n");
/* Not fatal error */
@@ -795,7 +769,10 @@ int snd_card_register(struct snd_card *card)
}
snd_cards[card->number] = card;
mutex_unlock(&snd_card_mutex);
- init_info_for_card(card);
+ err = snd_info_card_register(card);
+ if (err < 0)
+ return err;
+
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
if (snd_mixer_oss_notify_callback)
snd_mixer_oss_notify_callback(card, SND_MIXER_OSS_NOTIFY_REGISTER);
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 59a4adc286ed..eb974235c92b 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -182,6 +182,8 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
+ if (WARN_ON(!device))
+ return -EINVAL;
dmab->dev.type = type;
dmab->dev.dev = device;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 467039b342b5..d5b0d7ba83c4 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2427,7 +2427,6 @@ static int snd_pcm_oss_open_file(struct file *file,
}
pcm_oss_file->streams[idx] = substream;
- substream->file = pcm_oss_file;
snd_pcm_oss_init_substream(substream, &setup[idx], minor);
}
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 01b9d62eef14..7b63aee124af 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -528,52 +528,44 @@ static int snd_pcm_stream_proc_init(struct snd_pcm_str *pstr)
if (!entry)
return -ENOMEM;
entry->mode = S_IFDIR | 0555;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- return -ENOMEM;
- }
pstr->proc_root = entry;
entry = snd_info_create_card_entry(pcm->card, "info", pstr->proc_root);
- if (entry) {
+ if (entry)
snd_info_set_text_ops(entry, pstr, snd_pcm_stream_proc_info_read);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- pstr->proc_info_entry = entry;
-
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
entry = snd_info_create_card_entry(pcm->card, "xrun_debug",
pstr->proc_root);
if (entry) {
- entry->c.text.read = snd_pcm_xrun_debug_read;
+ snd_info_set_text_ops(entry, pstr, snd_pcm_xrun_debug_read);
entry->c.text.write = snd_pcm_xrun_debug_write;
entry->mode |= 0200;
- entry->private_data = pstr;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
}
- pstr->proc_xrun_debug_entry = entry;
#endif
return 0;
}
static int snd_pcm_stream_proc_done(struct snd_pcm_str *pstr)
{
-#ifdef CONFIG_SND_PCM_XRUN_DEBUG
- snd_info_free_entry(pstr->proc_xrun_debug_entry);
- pstr->proc_xrun_debug_entry = NULL;
-#endif
- snd_info_free_entry(pstr->proc_info_entry);
- pstr->proc_info_entry = NULL;
snd_info_free_entry(pstr->proc_root);
pstr->proc_root = NULL;
return 0;
}
+static struct snd_info_entry *
+create_substream_info_entry(struct snd_pcm_substream *substream,
+ const char *name,
+ void (*read)(struct snd_info_entry *,
+ struct snd_info_buffer *))
+{
+ struct snd_info_entry *entry;
+
+ entry = snd_info_create_card_entry(substream->pcm->card, name,
+ substream->proc_root);
+ if (entry)
+ snd_info_set_text_ops(entry, substream, read);
+ return entry;
+}
+
static int snd_pcm_substream_proc_init(struct snd_pcm_substream *substream)
{
struct snd_info_entry *entry;
@@ -588,101 +580,61 @@ static int snd_pcm_substream_proc_init(struct snd_pcm_substream *substream)
if (!entry)
return -ENOMEM;
entry->mode = S_IFDIR | 0555;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- return -ENOMEM;
- }
substream->proc_root = entry;
- entry = snd_info_create_card_entry(card, "info", substream->proc_root);
- if (entry) {
- snd_info_set_text_ops(entry, substream,
- snd_pcm_substream_proc_info_read);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- substream->proc_info_entry = entry;
- entry = snd_info_create_card_entry(card, "hw_params",
- substream->proc_root);
- if (entry) {
- snd_info_set_text_ops(entry, substream,
- snd_pcm_substream_proc_hw_params_read);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- substream->proc_hw_params_entry = entry;
- entry = snd_info_create_card_entry(card, "sw_params",
- substream->proc_root);
- if (entry) {
- snd_info_set_text_ops(entry, substream,
- snd_pcm_substream_proc_sw_params_read);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- substream->proc_sw_params_entry = entry;
- entry = snd_info_create_card_entry(card, "status",
- substream->proc_root);
- if (entry) {
- snd_info_set_text_ops(entry, substream,
- snd_pcm_substream_proc_status_read);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- substream->proc_status_entry = entry;
+
+ create_substream_info_entry(substream, "info",
+ snd_pcm_substream_proc_info_read);
+ create_substream_info_entry(substream, "hw_params",
+ snd_pcm_substream_proc_hw_params_read);
+ create_substream_info_entry(substream, "sw_params",
+ snd_pcm_substream_proc_sw_params_read);
+ create_substream_info_entry(substream, "status",
+ snd_pcm_substream_proc_status_read);
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
- entry = snd_info_create_card_entry(card, "xrun_injection",
- substream->proc_root);
+ entry = create_substream_info_entry(substream, "xrun_injection", NULL);
if (entry) {
- entry->private_data = substream;
- entry->c.text.read = NULL;
entry->c.text.write = snd_pcm_xrun_injection_write;
entry->mode = S_IFREG | 0200;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
}
- substream->proc_xrun_injection_entry = entry;
#endif /* CONFIG_SND_PCM_XRUN_DEBUG */
return 0;
}
-static int snd_pcm_substream_proc_done(struct snd_pcm_substream *substream)
-{
- snd_info_free_entry(substream->proc_info_entry);
- substream->proc_info_entry = NULL;
- snd_info_free_entry(substream->proc_hw_params_entry);
- substream->proc_hw_params_entry = NULL;
- snd_info_free_entry(substream->proc_sw_params_entry);
- substream->proc_sw_params_entry = NULL;
- snd_info_free_entry(substream->proc_status_entry);
- substream->proc_status_entry = NULL;
-#ifdef CONFIG_SND_PCM_XRUN_DEBUG
- snd_info_free_entry(substream->proc_xrun_injection_entry);
- substream->proc_xrun_injection_entry = NULL;
-#endif
- snd_info_free_entry(substream->proc_root);
- substream->proc_root = NULL;
- return 0;
-}
#else /* !CONFIG_SND_VERBOSE_PROCFS */
static inline int snd_pcm_stream_proc_init(struct snd_pcm_str *pstr) { return 0; }
static inline int snd_pcm_stream_proc_done(struct snd_pcm_str *pstr) { return 0; }
static inline int snd_pcm_substream_proc_init(struct snd_pcm_substream *substream) { return 0; }
-static inline int snd_pcm_substream_proc_done(struct snd_pcm_substream *substream) { return 0; }
#endif /* CONFIG_SND_VERBOSE_PROCFS */
static const struct attribute_group *pcm_dev_attr_groups[];
+/*
+ * PM callbacks: we need to deal only with suspend here, as the resume is
+ * triggered either from user-space or the driver's resume callback
+ */
+#ifdef CONFIG_PM_SLEEP
+static int do_pcm_suspend(struct device *dev)
+{
+ struct snd_pcm_str *pstr = container_of(dev, struct snd_pcm_str, dev);
+
+ if (!pstr->pcm->no_device_suspend)
+ snd_pcm_suspend_all(pstr->pcm);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops pcm_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(do_pcm_suspend, NULL)
+};
+
+/* device type for PCM -- basically only for passing PM callbacks */
+static const struct device_type pcm_dev_type = {
+ .name = "pcm",
+ .pm = &pcm_dev_pm_ops,
+};
+
/**
* snd_pcm_new_stream - create a new PCM stream
* @pcm: the pcm instance
@@ -713,6 +665,7 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
snd_device_initialize(&pstr->dev, pcm->card);
pstr->dev.groups = pcm_dev_attr_groups;
+ pstr->dev.type = &pcm_dev_type;
dev_set_name(&pstr->dev, "pcmC%iD%i%c", pcm->card->number, pcm->device,
stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c');
@@ -753,9 +706,7 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
}
}
substream->group = &substream->self_group;
- spin_lock_init(&substream->self_group.lock);
- mutex_init(&substream->self_group.mutex);
- INIT_LIST_HEAD(&substream->self_group.substreams);
+ snd_pcm_group_init(&substream->self_group);
list_add_tail(&substream->link_list, &substream->self_group.substreams);
atomic_set(&substream->mmap_count, 0);
prev = substream;
@@ -885,15 +836,17 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
struct snd_pcm_oss_setup *setup, *setupn;
#endif
+
+ /* free all proc files under the stream */
+ snd_pcm_stream_proc_done(pstr);
+
substream = pstr->substream;
while (substream) {
substream_next = substream->next;
snd_pcm_timer_done(substream);
- snd_pcm_substream_proc_done(substream);
kfree(substream);
substream = substream_next;
}
- snd_pcm_stream_proc_done(pstr);
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
for (setup = pstr->oss.setup_list; setup; setup = setupn) {
setupn = setup->next;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 40013b26f671..345ab1ab2cac 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2176,17 +2176,16 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
snd_pcm_update_hw_ptr(substream);
+ /*
+ * If size < start_threshold, wait indefinitely. Another
+ * thread may start capture
+ */
if (!is_playback &&
- runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
- if (size >= runtime->start_threshold) {
- err = snd_pcm_start(substream);
- if (err < 0)
- goto _end_unlock;
- } else {
- /* nothing to do */
- err = 0;
+ runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
+ size >= runtime->start_threshold) {
+ err = snd_pcm_start(substream);
+ if (err < 0)
goto _end_unlock;
- }
}
avail = snd_pcm_avail(substream);
@@ -2219,9 +2218,8 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
if (frames > cont)
frames = cont;
if (snd_BUG_ON(!frames)) {
- runtime->twake = 0;
- snd_pcm_stream_unlock_irq(substream);
- return -EINVAL;
+ err = -EINVAL;
+ goto _end_unlock;
}
snd_pcm_stream_unlock_irq(substream);
err = writer(substream, appl_ofs, data, offset, frames,
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
index c515612969a4..0b4b5dfaec18 100644
--- a/sound/core/pcm_local.h
+++ b/sound/core/pcm_local.h
@@ -66,5 +66,6 @@ static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {}
#endif
void __snd_pcm_xrun(struct snd_pcm_substream *substream);
+void snd_pcm_group_init(struct snd_pcm_group *group);
#endif /* __SOUND_CORE_PCM_LOCAL_H */
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 4b5356a10315..ed73be80bd29 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -87,19 +87,10 @@ static void snd_pcm_lib_preallocate_dma_free(struct snd_pcm_substream *substream
* @substream: the pcm substream instance
*
* Releases the pre-allocated buffer of the given substream.
- *
- * Return: Zero if successful, or a negative error code on failure.
*/
-int snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream)
+void snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream)
{
snd_pcm_lib_preallocate_dma_free(substream);
-#ifdef CONFIG_SND_VERBOSE_PROCFS
- snd_info_free_entry(substream->proc_prealloc_max_entry);
- substream->proc_prealloc_max_entry = NULL;
- snd_info_free_entry(substream->proc_prealloc_entry);
- substream->proc_prealloc_entry = NULL;
-#endif
- return 0;
}
/**
@@ -107,10 +98,8 @@ int snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream)
* @pcm: the pcm instance
*
* Releases all the pre-allocated buffers on the given pcm.
- *
- * Return: Zero if successful, or a negative error code on failure.
*/
-int snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm)
+void snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int stream;
@@ -118,7 +107,6 @@ int snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm)
for (stream = 0; stream < 2; stream++)
for (substream = pcm->streams[stream].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_free(substream);
- return 0;
}
EXPORT_SYMBOL(snd_pcm_lib_preallocate_free_for_all);
@@ -198,26 +186,19 @@ static inline void preallocate_info_init(struct snd_pcm_substream *substream)
{
struct snd_info_entry *entry;
- if ((entry = snd_info_create_card_entry(substream->pcm->card, "prealloc", substream->proc_root)) != NULL) {
- entry->c.text.read = snd_pcm_lib_preallocate_proc_read;
+ entry = snd_info_create_card_entry(substream->pcm->card, "prealloc",
+ substream->proc_root);
+ if (entry) {
+ snd_info_set_text_ops(entry, substream,
+ snd_pcm_lib_preallocate_proc_read);
entry->c.text.write = snd_pcm_lib_preallocate_proc_write;
entry->mode |= 0200;
- entry->private_data = substream;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
}
- substream->proc_prealloc_entry = entry;
- if ((entry = snd_info_create_card_entry(substream->pcm->card, "prealloc_max", substream->proc_root)) != NULL) {
- entry->c.text.read = snd_pcm_lib_preallocate_max_proc_read;
- entry->private_data = substream;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- substream->proc_prealloc_max_entry = entry;
+ entry = snd_info_create_card_entry(substream->pcm->card, "prealloc_max",
+ substream->proc_root);
+ if (entry)
+ snd_info_set_text_ops(entry, substream,
+ snd_pcm_lib_preallocate_max_proc_read);
}
#else /* !CONFIG_SND_VERBOSE_PROCFS */
@@ -227,7 +208,7 @@ static inline void preallocate_info_init(struct snd_pcm_substream *substream)
/*
* pre-allocate the buffer and create a proc file for the substream
*/
-static int snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
+static void snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
size_t size, size_t max)
{
@@ -238,7 +219,6 @@ static int snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
substream->buffer_bytes_max = substream->dma_buffer.bytes;
substream->dma_max = max;
preallocate_info_init(substream);
- return 0;
}
@@ -251,16 +231,14 @@ static int snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
* @max: the max. allowed pre-allocation size
*
* Do pre-allocation for the given DMA buffer type.
- *
- * Return: Zero if successful, or a negative error code on failure.
*/
-int snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
+void snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
int type, struct device *data,
size_t size, size_t max)
{
substream->dma_buffer.dev.type = type;
substream->dma_buffer.dev.dev = data;
- return snd_pcm_lib_preallocate_pages1(substream, size, max);
+ snd_pcm_lib_preallocate_pages1(substream, size, max);
}
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages);
@@ -274,21 +252,17 @@ EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages);
*
* Do pre-allocation to all substreams of the given pcm for the
* specified DMA type.
- *
- * Return: Zero if successful, or a negative error code on failure.
*/
-int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
+void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int type, void *data,
size_t size, size_t max)
{
struct snd_pcm_substream *substream;
- int stream, err;
+ int stream;
for (stream = 0; stream < 2; stream++)
for (substream = pcm->streams[stream].substream; substream; substream = substream->next)
- if ((err = snd_pcm_lib_preallocate_pages(substream, type, data, size, max)) < 0)
- return err;
- return 0;
+ snd_pcm_lib_preallocate_pages(substream, type, data, size, max);
}
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 818dff1de545..f731f904e8cc 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -85,71 +85,30 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
*
*/
-static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
-/* Writer in rwsem may block readers even during its waiting in queue,
- * and this may lead to a deadlock when the code path takes read sem
- * twice (e.g. one in snd_pcm_action_nonatomic() and another in
- * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
- * sleep until all the readers are completed without blocking by writer.
- */
-static inline void down_write_nonfifo(struct rw_semaphore *lock)
+void snd_pcm_group_init(struct snd_pcm_group *group)
{
- while (!down_write_trylock(lock))
- msleep(1);
+ spin_lock_init(&group->lock);
+ mutex_init(&group->mutex);
+ INIT_LIST_HEAD(&group->substreams);
+ refcount_set(&group->refs, 0);
}
-#define PCM_LOCK_DEFAULT 0
-#define PCM_LOCK_IRQ 1
-#define PCM_LOCK_IRQSAVE 2
-
-static unsigned long __snd_pcm_stream_lock_mode(struct snd_pcm_substream *substream,
- unsigned int mode)
-{
- unsigned long flags = 0;
- if (substream->pcm->nonatomic) {
- down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
- mutex_lock(&substream->self_group.mutex);
- } else {
- switch (mode) {
- case PCM_LOCK_DEFAULT:
- read_lock(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQ:
- read_lock_irq(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQSAVE:
- read_lock_irqsave(&snd_pcm_link_rwlock, flags);
- break;
- }
- spin_lock(&substream->self_group.lock);
- }
- return flags;
+/* define group lock helpers */
+#define DEFINE_PCM_GROUP_LOCK(action, mutex_action) \
+static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
+{ \
+ if (nonatomic) \
+ mutex_ ## mutex_action(&group->mutex); \
+ else \
+ spin_ ## action(&group->lock); \
}
-static void __snd_pcm_stream_unlock_mode(struct snd_pcm_substream *substream,
- unsigned int mode, unsigned long flags)
-{
- if (substream->pcm->nonatomic) {
- mutex_unlock(&substream->self_group.mutex);
- up_read(&snd_pcm_link_rwsem);
- } else {
- spin_unlock(&substream->self_group.lock);
-
- switch (mode) {
- case PCM_LOCK_DEFAULT:
- read_unlock(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQ:
- read_unlock_irq(&snd_pcm_link_rwlock);
- break;
- case PCM_LOCK_IRQSAVE:
- read_unlock_irqrestore(&snd_pcm_link_rwlock, flags);
- break;
- }
- }
-}
+DEFINE_PCM_GROUP_LOCK(lock, lock);
+DEFINE_PCM_GROUP_LOCK(unlock, unlock);
+DEFINE_PCM_GROUP_LOCK(lock_irq, lock);
+DEFINE_PCM_GROUP_LOCK(unlock_irq, unlock);
/**
* snd_pcm_stream_lock - Lock the PCM stream
@@ -161,7 +120,7 @@ static void __snd_pcm_stream_unlock_mode(struct snd_pcm_substream *substream,
*/
void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_lock_mode(substream, PCM_LOCK_DEFAULT);
+ snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
@@ -173,7 +132,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
*/
void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_DEFAULT, 0);
+ snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
@@ -187,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
*/
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQ);
+ snd_pcm_group_lock_irq(&substream->self_group,
+ substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
@@ -199,13 +159,19 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
*/
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
{
- __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQ, 0);
+ snd_pcm_group_unlock_irq(&substream->self_group,
+ substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
{
- return __snd_pcm_stream_lock_mode(substream, PCM_LOCK_IRQSAVE);
+ unsigned long flags = 0;
+ if (substream->pcm->nonatomic)
+ mutex_lock(&substream->self_group.mutex);
+ else
+ spin_lock_irqsave(&substream->self_group.lock, flags);
+ return flags;
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
@@ -219,7 +185,10 @@ EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags)
{
- __snd_pcm_stream_unlock_mode(substream, PCM_LOCK_IRQSAVE, flags);
+ if (substream->pcm->nonatomic)
+ mutex_unlock(&substream->self_group.mutex);
+ else
+ spin_unlock_irqrestore(&substream->self_group.lock, flags);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
@@ -1124,6 +1093,68 @@ static int snd_pcm_action_single(const struct action_ops *ops,
return res;
}
+static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
+ struct snd_pcm_group *new_group)
+{
+ substream->group = new_group;
+ list_move(&substream->link_list, &new_group->substreams);
+}
+
+/*
+ * Unref and unlock the group, but keep the stream lock;
+ * when the group becomes empty and no longer referred, destroy itself
+ */
+static void snd_pcm_group_unref(struct snd_pcm_group *group,
+ struct snd_pcm_substream *substream)
+{
+ bool do_free;
+
+ if (!group)
+ return;
+ do_free = refcount_dec_and_test(&group->refs) &&
+ list_empty(&group->substreams);
+ snd_pcm_group_unlock(group, substream->pcm->nonatomic);
+ if (do_free)
+ kfree(group);
+}
+
+/*
+ * Lock the group inside a stream lock and reference it;
+ * return the locked group object, or NULL if not linked
+ */
+static struct snd_pcm_group *
+snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
+{
+ bool nonatomic = substream->pcm->nonatomic;
+ struct snd_pcm_group *group;
+ bool trylock;
+
+ for (;;) {
+ if (!snd_pcm_stream_linked(substream))
+ return NULL;
+ group = substream->group;
+ /* block freeing the group object */
+ refcount_inc(&group->refs);
+
+ trylock = nonatomic ? mutex_trylock(&group->mutex) :
+ spin_trylock(&group->lock);
+ if (trylock)
+ break; /* OK */
+
+ /* re-lock for avoiding ABBA deadlock */
+ snd_pcm_stream_unlock(substream);
+ snd_pcm_group_lock(group, nonatomic);
+ snd_pcm_stream_lock(substream);
+
+ /* check the group again; the above opens a small race window */
+ if (substream->group == group)
+ break; /* OK */
+ /* group changed, try again */
+ snd_pcm_group_unref(group, substream);
+ }
+ return group;
+}
+
/*
* Note: call with stream lock
*/
@@ -1131,28 +1162,15 @@ static int snd_pcm_action(const struct action_ops *ops,
struct snd_pcm_substream *substream,
int state)
{
+ struct snd_pcm_group *group;
int res;
- if (!snd_pcm_stream_linked(substream))
- return snd_pcm_action_single(ops, substream, state);
-
- if (substream->pcm->nonatomic) {
- if (!mutex_trylock(&substream->group->mutex)) {
- mutex_unlock(&substream->self_group.mutex);
- mutex_lock(&substream->group->mutex);
- mutex_lock(&substream->self_group.mutex);
- }
+ group = snd_pcm_stream_group_ref(substream);
+ if (group)
res = snd_pcm_action_group(ops, substream, state, 1);
- mutex_unlock(&substream->group->mutex);
- } else {
- if (!spin_trylock(&substream->group->lock)) {
- spin_unlock(&substream->self_group.lock);
- spin_lock(&substream->group->lock);
- spin_lock(&substream->self_group.lock);
- }
- res = snd_pcm_action_group(ops, substream, state, 1);
- spin_unlock(&substream->group->lock);
- }
+ else
+ res = snd_pcm_action_single(ops, substream, state);
+ snd_pcm_group_unref(group, substream);
return res;
}
@@ -1179,6 +1197,7 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
{
int res;
+ /* Guarantee the group members won't change during non-atomic action */
down_read(&snd_pcm_link_rwsem);
if (snd_pcm_stream_linked(substream))
res = snd_pcm_action_group(ops, substream, state, 0);
@@ -1460,29 +1479,24 @@ static const struct action_ops snd_pcm_action_suspend = {
.post_action = snd_pcm_post_suspend
};
-/**
+/*
* snd_pcm_suspend - trigger SUSPEND to all linked streams
* @substream: the PCM substream
*
* After this call, all streams are changed to SUSPENDED state.
*
- * Return: Zero if successful (or @substream is %NULL), or a negative error
- * code.
+ * Return: Zero if successful, or a negative error code.
*/
-int snd_pcm_suspend(struct snd_pcm_substream *substream)
+static int snd_pcm_suspend(struct snd_pcm_substream *substream)
{
int err;
unsigned long flags;
- if (! substream)
- return 0;
-
snd_pcm_stream_lock_irqsave(substream, flags);
err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
snd_pcm_stream_unlock_irqrestore(substream, flags);
return err;
}
-EXPORT_SYMBOL(snd_pcm_suspend);
/**
* snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
@@ -1506,6 +1520,14 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
/* FIXME: the open/close code should lock this as well */
if (substream->runtime == NULL)
continue;
+
+ /*
+ * Skip BE dai link PCM's that are internal and may
+ * not have their substream ops set.
+ */
+ if (!substream->ops)
+ continue;
+
err = snd_pcm_suspend(substream);
if (err < 0 && err != -EBUSY)
return err;
@@ -1792,8 +1814,6 @@ static const struct action_ops snd_pcm_action_drain_init = {
.post_action = snd_pcm_post_drain_init
};
-static int snd_pcm_drop(struct snd_pcm_substream *substream);
-
/*
* Drain the stream(s).
* When the substream is linked, sync until the draining of all playback streams
@@ -1807,6 +1827,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
struct snd_card *card;
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *s;
+ struct snd_pcm_group *group;
wait_queue_entry_t wait;
int result = 0;
int nonblock = 0;
@@ -1823,7 +1844,6 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
} else if (substream->f_flags & O_NONBLOCK)
nonblock = 1;
- down_read(&snd_pcm_link_rwsem);
snd_pcm_stream_lock_irq(substream);
/* resume pause */
if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
@@ -1848,6 +1868,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
}
/* find a substream to drain */
to_check = NULL;
+ group = snd_pcm_stream_group_ref(substream);
snd_pcm_group_for_each_entry(s, substream) {
if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
continue;
@@ -1857,12 +1878,12 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
break;
}
}
+ snd_pcm_group_unref(group, substream);
if (!to_check)
break; /* all drained */
init_waitqueue_entry(&wait, current);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
- up_read(&snd_pcm_link_rwsem);
if (runtime->no_period_wakeup)
tout = MAX_SCHEDULE_TIMEOUT;
else {
@@ -1874,9 +1895,17 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
tout = msecs_to_jiffies(tout * 1000);
}
tout = schedule_timeout_interruptible(tout);
- down_read(&snd_pcm_link_rwsem);
+
snd_pcm_stream_lock_irq(substream);
- remove_wait_queue(&to_check->sleep, &wait);
+ group = snd_pcm_stream_group_ref(substream);
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (s->runtime == to_check) {
+ remove_wait_queue(&to_check->sleep, &wait);
+ break;
+ }
+ }
+ snd_pcm_group_unref(group, substream);
+
if (card->shutdown) {
result = -ENODEV;
break;
@@ -1896,7 +1925,6 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
unlock:
snd_pcm_stream_unlock_irq(substream);
- up_read(&snd_pcm_link_rwsem);
return result;
}
@@ -1935,13 +1963,19 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
static bool is_pcm_file(struct file *file)
{
struct inode *inode = file_inode(file);
+ struct snd_pcm *pcm;
unsigned int minor;
if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
return false;
minor = iminor(inode);
- return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) ||
- snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
+ pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+ if (!pcm)
+ pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
+ if (!pcm)
+ return false;
+ snd_card_unref(pcm->card);
+ return true;
}
/*
@@ -1952,7 +1986,8 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
int res = 0;
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream1;
- struct snd_pcm_group *group;
+ struct snd_pcm_group *group, *target_group;
+ bool nonatomic = substream->pcm->nonatomic;
struct fd f = fdget(fd);
if (!f.file)
@@ -1963,13 +1998,14 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
}
pcm_file = f.file->private_data;
substream1 = pcm_file->substream;
- group = kmalloc(sizeof(*group), GFP_KERNEL);
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
res = -ENOMEM;
goto _nolock;
}
- down_write_nonfifo(&snd_pcm_link_rwsem);
- write_lock_irq(&snd_pcm_link_rwlock);
+ snd_pcm_group_init(group);
+
+ down_write(&snd_pcm_link_rwsem);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
substream->pcm->nonatomic != substream1->pcm->nonatomic) {
@@ -1980,23 +2016,23 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
res = -EALREADY;
goto _end;
}
+
+ snd_pcm_stream_lock_irq(substream);
if (!snd_pcm_stream_linked(substream)) {
- substream->group = group;
- group = NULL;
- spin_lock_init(&substream->group->lock);
- mutex_init(&substream->group->mutex);
- INIT_LIST_HEAD(&substream->group->substreams);
- list_add_tail(&substream->link_list, &substream->group->substreams);
- substream->group->count = 1;
- }
- list_add_tail(&substream1->link_list, &substream->group->substreams);
- substream->group->count++;
- substream1->group = substream->group;
+ snd_pcm_group_assign(substream, group);
+ group = NULL; /* assigned, don't free this one below */
+ }
+ target_group = substream->group;
+ snd_pcm_stream_unlock_irq(substream);
+
+ snd_pcm_group_lock_irq(target_group, nonatomic);
+ snd_pcm_stream_lock(substream1);
+ snd_pcm_group_assign(substream1, target_group);
+ snd_pcm_stream_unlock(substream1);
+ snd_pcm_group_unlock_irq(target_group, nonatomic);
_end:
- write_unlock_irq(&snd_pcm_link_rwlock);
up_write(&snd_pcm_link_rwsem);
_nolock:
- snd_card_unref(substream1->pcm->card);
kfree(group);
_badf:
fdput(f);
@@ -2005,34 +2041,43 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
static void relink_to_local(struct snd_pcm_substream *substream)
{
- substream->group = &substream->self_group;
- INIT_LIST_HEAD(&substream->self_group.substreams);
- list_add_tail(&substream->link_list, &substream->self_group.substreams);
+ snd_pcm_stream_lock(substream);
+ snd_pcm_group_assign(substream, &substream->self_group);
+ snd_pcm_stream_unlock(substream);
}
static int snd_pcm_unlink(struct snd_pcm_substream *substream)
{
- struct snd_pcm_substream *s;
+ struct snd_pcm_group *group;
+ bool nonatomic = substream->pcm->nonatomic;
+ bool do_free = false;
int res = 0;
- down_write_nonfifo(&snd_pcm_link_rwsem);
- write_lock_irq(&snd_pcm_link_rwlock);
+ down_write(&snd_pcm_link_rwsem);
+
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
goto _end;
}
- list_del(&substream->link_list);
- substream->group->count--;
- if (substream->group->count == 1) { /* detach the last stream, too */
- snd_pcm_group_for_each_entry(s, substream) {
- relink_to_local(s);
- break;
- }
- kfree(substream->group);
- }
+
+ group = substream->group;
+ snd_pcm_group_lock_irq(group, nonatomic);
+
relink_to_local(substream);
+
+ /* detach the last stream, too */
+ if (list_is_singular(&group->substreams)) {
+ relink_to_local(list_first_entry(&group->substreams,
+ struct snd_pcm_substream,
+ link_list));
+ do_free = !refcount_read(&group->refs);
+ }
+
+ snd_pcm_group_unlock_irq(group, nonatomic);
+ if (do_free)
+ kfree(group);
+
_end:
- write_unlock_irq(&snd_pcm_link_rwlock);
up_write(&snd_pcm_link_rwsem);
return res;
}
@@ -2457,10 +2502,8 @@ static int snd_pcm_open_file(struct file *file,
return -ENOMEM;
}
pcm_file->substream = substream;
- if (substream->ref_count == 1) {
- substream->file = pcm_file;
+ if (substream->ref_count == 1)
substream->pcm_release = pcm_release_private;
- }
file->private_data = pcm_file;
return 0;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 1e34e6381baa..8c3fbe1276be 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1133,16 +1133,10 @@ static void print_cable_info(struct snd_info_entry *entry,
static int loopback_proc_new(struct loopback *loopback, int cidx)
{
char name[32];
- struct snd_info_entry *entry;
- int err;
snprintf(name, sizeof(name), "cable#%d", cidx);
- err = snd_card_proc_new(loopback->card, name, &entry);
- if (err < 0)
- return err;
-
- snd_info_set_text_ops(entry, loopback, print_cable_info);
- return 0;
+ return snd_card_ro_proc_new(loopback->card, name, loopback,
+ print_cable_info);
}
static int loopback_probe(struct platform_device *devptr)
@@ -1200,12 +1194,8 @@ static int loopback_remove(struct platform_device *devptr)
static int loopback_suspend(struct device *pdev)
{
struct snd_card *card = dev_get_drvdata(pdev);
- struct loopback *loopback = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
-
- snd_pcm_suspend_all(loopback->pcm[0]);
- snd_pcm_suspend_all(loopback->pcm[1]);
return 0;
}
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 9af154db530a..2672c2e13334 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -1037,14 +1037,8 @@ static void dummy_proc_write(struct snd_info_entry *entry,
static void dummy_proc_init(struct snd_dummy *chip)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(chip->card, "dummy_pcm", &entry)) {
- snd_info_set_text_ops(entry, chip, dummy_proc_read);
- entry->c.text.write = dummy_proc_write;
- entry->mode |= 0200;
- entry->private_data = chip;
- }
+ snd_card_rw_proc_new(chip->card, "dummy_pcm", chip,
+ dummy_proc_read, dummy_proc_write);
}
#else
#define dummy_proc_init(x)
@@ -1138,10 +1132,8 @@ static int snd_dummy_remove(struct platform_device *devptr)
static int snd_dummy_suspend(struct device *pdev)
{
struct snd_card *card = dev_get_drvdata(pdev);
- struct snd_dummy *dummy = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(dummy->pcm);
return 0;
}
diff --git a/sound/drivers/opl4/opl4_proc.c b/sound/drivers/opl4/opl4_proc.c
index 16b24091d799..f1b839a0e7b7 100644
--- a/sound/drivers/opl4/opl4_proc.c
+++ b/sound/drivers/opl4/opl4_proc.c
@@ -114,10 +114,6 @@ int snd_opl4_create_proc(struct snd_opl4 *opl4)
entry->c.ops = &snd_opl4_mem_proc_ops;
entry->module = THIS_MODULE;
entry->private_data = opl4;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
}
opl4->proc_entry = entry;
return 0;
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 0dd3f46eb03e..d83ad3820f02 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -197,7 +197,6 @@ static int pcsp_suspend(struct device *dev)
{
struct snd_pcsp *chip = dev_get_drvdata(dev);
pcsp_stop_beep(chip);
- snd_pcm_suspend_all(chip->pcm);
return 0;
}
diff --git a/sound/drivers/vx/vx_core.c b/sound/drivers/vx/vx_core.c
index 04368dd59a4c..543945643a76 100644
--- a/sound/drivers/vx/vx_core.c
+++ b/sound/drivers/vx/vx_core.c
@@ -643,10 +643,7 @@ static void vx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *b
static void vx_proc_init(struct vx_core *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "vx-status", &entry))
- snd_info_set_text_ops(entry, chip, vx_proc_read);
+ snd_card_ro_proc_new(chip->card, "vx-status", chip, vx_proc_read);
}
@@ -732,12 +729,8 @@ EXPORT_SYMBOL(snd_vx_dsp_load);
*/
int snd_vx_suspend(struct vx_core *chip)
{
- unsigned int i;
-
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
chip->chip_status |= VX_STAT_IN_SUSPEND;
- for (i = 0; i < chip->hw->num_codecs; i++)
- snd_pcm_suspend_all(chip->pcm[i]);
return 0;
}
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 052e00590259..b9e96d0b3a0a 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -163,5 +163,6 @@ config SND_FIREFACE
Say Y here to include support for RME fireface series.
* Fireface 400
* Fireface 800
+ * Fireface UCX
endif # SND_FIREWIRE
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index d91874275d2c..5b46e8dcc2dd 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -448,7 +448,19 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* Focusrite, SaffirePro 26 I/O */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
/* Focusrite, SaffirePro 10 I/O */
- SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
+ {
+ // The combination of vendor_id and model_id is the same as the
+ // same as the one of Liquid Saffire 56.
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = VEN_FOCUSRITE,
+ .model_id = 0x000006,
+ .specifier_id = 0x00a02d,
+ .version = 0x010001,
+ .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
+ },
/* Focusrite, Saffire(no label and LE) */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
&saffire_spec),
diff --git a/sound/firewire/bebob/bebob_proc.c b/sound/firewire/bebob/bebob_proc.c
index 8096891af913..05e2a1c6326c 100644
--- a/sound/firewire/bebob/bebob_proc.c
+++ b/sound/firewire/bebob/bebob_proc.c
@@ -163,12 +163,8 @@ add_node(struct snd_bebob *bebob, struct snd_info_entry *root, const char *name,
struct snd_info_entry *entry;
entry = snd_info_create_card_entry(bebob->card, name, root);
- if (entry == NULL)
- return;
-
- snd_info_set_text_ops(entry, bebob, op);
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
+ if (entry)
+ snd_info_set_text_ops(entry, bebob, op);
}
void snd_bebob_proc_init(struct snd_bebob *bebob)
@@ -184,10 +180,6 @@ void snd_bebob_proc_init(struct snd_bebob *bebob)
if (root == NULL)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
add_node(bebob, root, "clock", proc_read_clock);
add_node(bebob, root, "firmware", proc_read_hw_info);
diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c
index bb870fc73f99..9b1d509c6320 100644
--- a/sound/firewire/dice/dice-proc.c
+++ b/sound/firewire/dice/dice-proc.c
@@ -285,12 +285,8 @@ static void add_node(struct snd_dice *dice, struct snd_info_entry *root,
struct snd_info_entry *entry;
entry = snd_info_create_card_entry(dice->card, name, root);
- if (!entry)
- return;
-
- snd_info_set_text_ops(entry, dice, op);
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
+ if (entry)
+ snd_info_set_text_ops(entry, dice, op);
}
void snd_dice_create_proc(struct snd_dice *dice)
@@ -306,10 +302,6 @@ void snd_dice_create_proc(struct snd_dice *dice)
if (!root)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
add_node(dice, root, "dice", dice_proc_read);
add_node(dice, root, "formation", dice_proc_read_formation);
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index ed50b222d36e..eee184b05d93 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -18,6 +18,7 @@ MODULE_LICENSE("GPL v2");
#define OUI_ALESIS 0x000595
#define OUI_MAUDIO 0x000d6c
#define OUI_MYTEK 0x001ee8
+#define OUI_SSL 0x0050c2 // Actually ID reserved by IEEE.
#define DICE_CATEGORY_ID 0x04
#define WEISS_CATEGORY_ID 0x00
@@ -196,7 +197,7 @@ static int dice_probe(struct fw_unit *unit,
struct snd_dice *dice;
int err;
- if (!entry->driver_data) {
+ if (!entry->driver_data && entry->vendor_id != OUI_SSL) {
err = check_dice_category(unit);
if (err < 0)
return -ENODEV;
@@ -361,6 +362,15 @@ static const struct ieee1394_device_id dice_id_table[] = {
.model_id = 0x000002,
.driver_data = (kernel_ulong_t)snd_dice_detect_mytek_formats,
},
+ // Solid State Logic, Duende Classic and Mini.
+ // NOTE: each field of GUID in config ROM is not compliant to standard
+ // DICE scheme.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_SSL,
+ .model_id = 0x000070,
+ },
{
.match_flags = IEEE1394_MATCH_VERSION,
.version = DICE_INTERFACE,
diff --git a/sound/firewire/digi00x/digi00x-proc.c b/sound/firewire/digi00x/digi00x-proc.c
index 6996d5a6ff5f..d22e8675b10f 100644
--- a/sound/firewire/digi00x/digi00x-proc.c
+++ b/sound/firewire/digi00x/digi00x-proc.c
@@ -80,20 +80,8 @@ void snd_dg00x_proc_init(struct snd_dg00x *dg00x)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
entry = snd_info_create_card_entry(dg00x->card, "clock", root);
- if (entry == NULL) {
- snd_info_free_entry(root);
- return;
- }
-
- snd_info_set_text_ops(entry, dg00x, proc_read_clock);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- snd_info_free_entry(root);
- }
+ if (entry)
+ snd_info_set_text_ops(entry, dg00x, proc_read_clock);
}
diff --git a/sound/firewire/fireface/Makefile b/sound/firewire/fireface/Makefile
index 79a7d6d99d72..d64f4e2a1096 100644
--- a/sound/firewire/fireface/Makefile
+++ b/sound/firewire/fireface/Makefile
@@ -1,4 +1,4 @@
snd-fireface-objs := ff.o ff-transaction.o ff-midi.o ff-proc.o amdtp-ff.o \
- ff-stream.o ff-pcm.o ff-hwdep.o ff-protocol-ff400.o \
- ff-protocol-ff800.o
+ ff-stream.o ff-pcm.o ff-hwdep.o ff-protocol-former.o \
+ ff-protocol-latter.o
obj-$(CONFIG_SND_FIREFACE) += snd-fireface.o
diff --git a/sound/firewire/fireface/ff-midi.c b/sound/firewire/fireface/ff-midi.c
index 6a49611ee462..5b44e1c4569a 100644
--- a/sound/firewire/fireface/ff-midi.c
+++ b/sound/firewire/fireface/ff-midi.c
@@ -19,7 +19,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream)
struct snd_ff *ff = substream->rmidi->private_data;
/* Initialize internal status. */
- ff->running_status[substream->number] = 0;
+ ff->on_sysex[substream->number] = 0;
ff->rx_midi_error[substream->number] = false;
WRITE_ONCE(ff->rx_midi_substreams[substream->number], substream);
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index d0bc96b20a65..5adf04b95c04 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -152,7 +152,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto release_lock;
- err = snd_ff_transaction_get_clock(ff, &rate, &src);
+ err = ff->spec->protocol->get_clock(ff, &rate, &src);
if (err < 0)
goto release_lock;
diff --git a/sound/firewire/fireface/ff-proc.c b/sound/firewire/fireface/ff-proc.c
index a0c550dabe9a..b886b541c94b 100644
--- a/sound/firewire/fireface/ff-proc.c
+++ b/sound/firewire/fireface/ff-proc.c
@@ -8,209 +8,29 @@
#include "./ff.h"
-static void proc_dump_clock_config(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
+const char *snd_ff_proc_get_clk_label(enum snd_ff_clock_src src)
{
- struct snd_ff *ff = entry->private_data;
- __le32 reg;
- u32 data;
- unsigned int rate;
- const char *src;
- int err;
-
- err = snd_fw_transaction(ff->unit, TCODE_READ_BLOCK_REQUEST,
- SND_FF_REG_CLOCK_CONFIG, &reg, sizeof(reg), 0);
- if (err < 0)
- return;
-
- data = le32_to_cpu(reg);
-
- snd_iprintf(buffer, "Output S/PDIF format: %s (Emphasis: %s)\n",
- (data & 0x20) ? "Professional" : "Consumer",
- (data & 0x40) ? "on" : "off");
-
- snd_iprintf(buffer, "Optical output interface format: %s\n",
- ((data >> 8) & 0x01) ? "S/PDIF" : "ADAT");
-
- snd_iprintf(buffer, "Word output single speed: %s\n",
- ((data >> 8) & 0x20) ? "on" : "off");
-
- snd_iprintf(buffer, "S/PDIF input interface: %s\n",
- ((data >> 8) & 0x02) ? "Optical" : "Coaxial");
-
- switch ((data >> 1) & 0x03) {
- case 0x01:
- rate = 32000;
- break;
- case 0x00:
- rate = 44100;
- break;
- case 0x03:
- rate = 48000;
- break;
- case 0x02:
- default:
- return;
- }
-
- if (data & 0x08)
- rate *= 2;
- else if (data & 0x10)
- rate *= 4;
-
- snd_iprintf(buffer, "Sampling rate: %d\n", rate);
-
- if (data & 0x01) {
- src = "Internal";
- } else {
- switch ((data >> 10) & 0x07) {
- case 0x00:
- src = "ADAT1";
- break;
- case 0x01:
- src = "ADAT2";
- break;
- case 0x03:
- src = "S/PDIF";
- break;
- case 0x04:
- src = "Word";
- break;
- case 0x05:
- src = "LTC";
- break;
- default:
- return;
- }
- }
-
- snd_iprintf(buffer, "Sync to clock source: %s\n", src);
+ static const char *const labels[] = {
+ "Internal",
+ "S/PDIF",
+ "ADAT1",
+ "ADAT2",
+ "Word",
+ "LTC",
+ };
+
+ if (src >= ARRAY_SIZE(labels))
+ return NULL;
+
+ return labels[src];
}
-static void proc_dump_sync_status(struct snd_info_entry *entry,
- struct snd_info_buffer *buffer)
+static void proc_dump_status(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
{
struct snd_ff *ff = entry->private_data;
- __le32 reg;
- u32 data;
- int err;
- err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
- SND_FF_REG_SYNC_STATUS, &reg, sizeof(reg), 0);
- if (err < 0)
- return;
-
- data = le32_to_cpu(reg);
-
- snd_iprintf(buffer, "External source detection:\n");
-
- snd_iprintf(buffer, "Word Clock:");
- if ((data >> 24) & 0x20) {
- if ((data >> 24) & 0x40)
- snd_iprintf(buffer, "sync\n");
- else
- snd_iprintf(buffer, "lock\n");
- } else {
- snd_iprintf(buffer, "none\n");
- }
-
- snd_iprintf(buffer, "S/PDIF:");
- if ((data >> 16) & 0x10) {
- if ((data >> 16) & 0x04)
- snd_iprintf(buffer, "sync\n");
- else
- snd_iprintf(buffer, "lock\n");
- } else {
- snd_iprintf(buffer, "none\n");
- }
-
- snd_iprintf(buffer, "ADAT1:");
- if ((data >> 8) & 0x04) {
- if ((data >> 8) & 0x10)
- snd_iprintf(buffer, "sync\n");
- else
- snd_iprintf(buffer, "lock\n");
- } else {
- snd_iprintf(buffer, "none\n");
- }
-
- snd_iprintf(buffer, "ADAT2:");
- if ((data >> 8) & 0x08) {
- if ((data >> 8) & 0x20)
- snd_iprintf(buffer, "sync\n");
- else
- snd_iprintf(buffer, "lock\n");
- } else {
- snd_iprintf(buffer, "none\n");
- }
-
- snd_iprintf(buffer, "\nUsed external source:\n");
-
- if (((data >> 22) & 0x07) == 0x07) {
- snd_iprintf(buffer, "None\n");
- } else {
- switch ((data >> 22) & 0x07) {
- case 0x00:
- snd_iprintf(buffer, "ADAT1:");
- break;
- case 0x01:
- snd_iprintf(buffer, "ADAT2:");
- break;
- case 0x03:
- snd_iprintf(buffer, "S/PDIF:");
- break;
- case 0x04:
- snd_iprintf(buffer, "Word:");
- break;
- case 0x07:
- snd_iprintf(buffer, "Nothing:");
- break;
- case 0x02:
- case 0x05:
- case 0x06:
- default:
- snd_iprintf(buffer, "unknown:");
- break;
- }
-
- if ((data >> 25) & 0x07) {
- switch ((data >> 25) & 0x07) {
- case 0x01:
- snd_iprintf(buffer, "32000\n");
- break;
- case 0x02:
- snd_iprintf(buffer, "44100\n");
- break;
- case 0x03:
- snd_iprintf(buffer, "48000\n");
- break;
- case 0x04:
- snd_iprintf(buffer, "64000\n");
- break;
- case 0x05:
- snd_iprintf(buffer, "88200\n");
- break;
- case 0x06:
- snd_iprintf(buffer, "96000\n");
- break;
- case 0x07:
- snd_iprintf(buffer, "128000\n");
- break;
- case 0x08:
- snd_iprintf(buffer, "176400\n");
- break;
- case 0x09:
- snd_iprintf(buffer, "192000\n");
- break;
- case 0x00:
- snd_iprintf(buffer, "unknown\n");
- break;
- }
- }
- }
-
- snd_iprintf(buffer, "Multiplied:");
- snd_iprintf(buffer, "%d\n", (data & 0x3ff) * 250);
+ ff->spec->protocol->dump_status(ff, buffer);
}
static void add_node(struct snd_ff *ff, struct snd_info_entry *root,
@@ -221,12 +41,8 @@ static void add_node(struct snd_ff *ff, struct snd_info_entry *root,
struct snd_info_entry *entry;
entry = snd_info_create_card_entry(ff->card, name, root);
- if (entry == NULL)
- return;
-
- snd_info_set_text_ops(entry, ff, op);
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
+ if (entry)
+ snd_info_set_text_ops(entry, ff, op);
}
void snd_ff_proc_init(struct snd_ff *ff)
@@ -242,11 +58,6 @@ void snd_ff_proc_init(struct snd_ff *ff)
if (root == NULL)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
- add_node(ff, root, "clock-config", proc_dump_clock_config);
- add_node(ff, root, "sync-status", proc_dump_sync_status);
+ add_node(ff, root, "status", proc_dump_status);
}
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
deleted file mode 100644
index 2280fab9b3c7..000000000000
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * ff-protocol-ff400.c - a part of driver for RME Fireface series
- *
- * Copyright (c) 2015-2017 Takashi Sakamoto
- *
- * Licensed under the terms of the GNU General Public License, version 2.
- */
-
-#include <linux/delay.h>
-#include "ff.h"
-
-#define FF400_STF 0x000080100500ull
-#define FF400_RX_PACKET_FORMAT 0x000080100504ull
-#define FF400_ISOC_COMM_START 0x000080100508ull
-#define FF400_TX_PACKET_FORMAT 0x00008010050cull
-#define FF400_ISOC_COMM_STOP 0x000080100510ull
-
-/*
- * Fireface 400 manages isochronous channel number in 3 bit field. Therefore,
- * we can allocate between 0 and 7 channel.
- */
-static int keep_resources(struct snd_ff *ff, unsigned int rate)
-{
- enum snd_ff_stream_mode mode;
- int i;
- int err;
-
- // Check whether the given value is supported or not.
- for (i = 0; i < CIP_SFC_COUNT; i++) {
- if (amdtp_rate_table[i] == rate)
- break;
- }
- if (i >= CIP_SFC_COUNT)
- return -EINVAL;
-
- err = snd_ff_stream_get_multiplier_mode(i, &mode);
- if (err < 0)
- return err;
-
- /* Keep resources for in-stream. */
- ff->tx_resources.channels_mask = 0x00000000000000ffuLL;
- err = fw_iso_resources_allocate(&ff->tx_resources,
- amdtp_stream_get_max_payload(&ff->tx_stream),
- fw_parent_device(ff->unit)->max_speed);
- if (err < 0)
- return err;
-
- /* Keep resources for out-stream. */
- err = amdtp_ff_set_parameters(&ff->rx_stream, rate,
- ff->spec->pcm_playback_channels[mode]);
- if (err < 0)
- return err;
- ff->rx_resources.channels_mask = 0x00000000000000ffuLL;
- err = fw_iso_resources_allocate(&ff->rx_resources,
- amdtp_stream_get_max_payload(&ff->rx_stream),
- fw_parent_device(ff->unit)->max_speed);
- if (err < 0)
- fw_iso_resources_free(&ff->tx_resources);
-
- return err;
-}
-
-static int ff400_begin_session(struct snd_ff *ff, unsigned int rate)
-{
- __le32 reg;
- int err;
-
- err = keep_resources(ff, rate);
- if (err < 0)
- return err;
-
- /* Set the number of data blocks transferred in a second. */
- reg = cpu_to_le32(rate);
- err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF400_STF, &reg, sizeof(reg), 0);
- if (err < 0)
- return err;
-
- msleep(100);
-
- /*
- * Set isochronous channel and the number of quadlets of received
- * packets.
- */
- reg = cpu_to_le32(((ff->rx_stream.data_block_quadlets << 3) << 8) |
- ff->rx_resources.channel);
- err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF400_RX_PACKET_FORMAT, &reg, sizeof(reg), 0);
- if (err < 0)
- return err;
-
- /*
- * Set isochronous channel and the number of quadlets of transmitted
- * packet.
- */
- /* TODO: investigate the purpose of this 0x80. */
- reg = cpu_to_le32((0x80 << 24) |
- (ff->tx_resources.channel << 5) |
- (ff->tx_stream.data_block_quadlets));
- err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF400_TX_PACKET_FORMAT, &reg, sizeof(reg), 0);
- if (err < 0)
- return err;
-
- /* Allow to transmit packets. */
- reg = cpu_to_le32(0x00000001);
- return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF400_ISOC_COMM_START, &reg, sizeof(reg), 0);
-}
-
-static void ff400_finish_session(struct snd_ff *ff)
-{
- __le32 reg;
-
- reg = cpu_to_le32(0x80000000);
- snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF400_ISOC_COMM_STOP, &reg, sizeof(reg), 0);
-}
-
-static void ff400_handle_midi_msg(struct snd_ff *ff, __le32 *buf, size_t length)
-{
- int i;
-
- for (i = 0; i < length / 4; i++) {
- u32 quad = le32_to_cpu(buf[i]);
- u8 byte;
- unsigned int index;
- struct snd_rawmidi_substream *substream;
-
- /* Message in first port. */
- /*
- * This value may represent the index of this unit when the same
- * units are on the same IEEE 1394 bus. This driver doesn't use
- * it.
- */
- index = (quad >> 8) & 0xff;
- if (index > 0) {
- substream = READ_ONCE(ff->tx_midi_substreams[0]);
- if (substream != NULL) {
- byte = quad & 0xff;
- snd_rawmidi_receive(substream, &byte, 1);
- }
- }
-
- /* Message in second port. */
- index = (quad >> 24) & 0xff;
- if (index > 0) {
- substream = READ_ONCE(ff->tx_midi_substreams[1]);
- if (substream != NULL) {
- byte = (quad >> 16) & 0xff;
- snd_rawmidi_receive(substream, &byte, 1);
- }
- }
- }
-}
-
-const struct snd_ff_protocol snd_ff_protocol_ff400 = {
- .handle_midi_msg = ff400_handle_midi_msg,
- .begin_session = ff400_begin_session,
- .finish_session = ff400_finish_session,
-};
diff --git a/sound/firewire/fireface/ff-protocol-ff800.c b/sound/firewire/fireface/ff-protocol-ff800.c
deleted file mode 100644
index 2acbf6039770..000000000000
--- a/sound/firewire/fireface/ff-protocol-ff800.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * ff-protocol-ff800.c - a part of driver for RME Fireface series
- *
- * Copyright (c) 2018 Takashi Sakamoto
- *
- * Licensed under the terms of the GNU General Public License, version 2.
- */
-
-#include <linux/delay.h>
-
-#include "ff.h"
-
-#define FF800_STF 0x0000fc88f000
-#define FF800_RX_PACKET_FORMAT 0x0000fc88f004
-#define FF800_ALLOC_TX_STREAM 0x0000fc88f008
-#define FF800_ISOC_COMM_START 0x0000fc88f00c
-#define FF800_TX_S800_FLAG 0x00000800
-#define FF800_ISOC_COMM_STOP 0x0000fc88f010
-
-#define FF800_TX_PACKET_ISOC_CH 0x0000801c0008
-
-static int allocate_rx_resources(struct snd_ff *ff)
-{
- u32 data;
- __le32 reg;
- int err;
-
- // Controllers should allocate isochronous resources for rx stream.
- err = fw_iso_resources_allocate(&ff->rx_resources,
- amdtp_stream_get_max_payload(&ff->rx_stream),
- fw_parent_device(ff->unit)->max_speed);
- if (err < 0)
- return err;
-
- // Set isochronous channel and the number of quadlets of rx packets.
- data = ff->rx_stream.data_block_quadlets << 3;
- data = (data << 8) | ff->rx_resources.channel;
- reg = cpu_to_le32(data);
- return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF800_RX_PACKET_FORMAT, &reg, sizeof(reg), 0);
-}
-
-static int allocate_tx_resources(struct snd_ff *ff)
-{
- __le32 reg;
- unsigned int count;
- unsigned int tx_isoc_channel;
- int err;
-
- reg = cpu_to_le32(ff->tx_stream.data_block_quadlets);
- err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF800_ALLOC_TX_STREAM, &reg, sizeof(reg), 0);
- if (err < 0)
- return err;
-
- // Wait till the format of tx packet is available.
- count = 0;
- while (count++ < 10) {
- u32 data;
- err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
- FF800_TX_PACKET_ISOC_CH, &reg, sizeof(reg), 0);
- if (err < 0)
- return err;
-
- data = le32_to_cpu(reg);
- if (data != 0xffffffff) {
- tx_isoc_channel = data;
- break;
- }
-
- msleep(50);
- }
- if (count >= 10)
- return -ETIMEDOUT;
-
- // NOTE: this is a makeshift to start OHCI 1394 IR context in the
- // channel. On the other hand, 'struct fw_iso_resources.allocated' is
- // not true and it's not deallocated at stop.
- ff->tx_resources.channel = tx_isoc_channel;
-
- return 0;
-}
-
-static int ff800_begin_session(struct snd_ff *ff, unsigned int rate)
-{
- __le32 reg;
- int err;
-
- reg = cpu_to_le32(rate);
- err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF800_STF, &reg, sizeof(reg), 0);
- if (err < 0)
- return err;
-
- // If starting isochronous communication immediately, change of STF has
- // no effect. In this case, the communication runs based on former STF.
- // Let's sleep for a bit.
- msleep(100);
-
- err = allocate_rx_resources(ff);
- if (err < 0)
- return err;
-
- err = allocate_tx_resources(ff);
- if (err < 0)
- return err;
-
- reg = cpu_to_le32(0x80000000);
- reg |= cpu_to_le32(ff->tx_stream.data_block_quadlets);
- if (fw_parent_device(ff->unit)->max_speed == SCODE_800)
- reg |= cpu_to_le32(FF800_TX_S800_FLAG);
- return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF800_ISOC_COMM_START, &reg, sizeof(reg), 0);
-}
-
-static void ff800_finish_session(struct snd_ff *ff)
-{
- __le32 reg;
-
- reg = cpu_to_le32(0x80000000);
- snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- FF800_ISOC_COMM_STOP, &reg, sizeof(reg), 0);
-}
-
-static void ff800_handle_midi_msg(struct snd_ff *ff, __le32 *buf, size_t length)
-{
- int i;
-
- for (i = 0; i < length / 4; i++) {
- u8 byte = le32_to_cpu(buf[i]) & 0xff;
- struct snd_rawmidi_substream *substream;
-
- substream = READ_ONCE(ff->tx_midi_substreams[0]);
- if (substream)
- snd_rawmidi_receive(substream, &byte, 1);
- }
-}
-
-const struct snd_ff_protocol snd_ff_protocol_ff800 = {
- .handle_midi_msg = ff800_handle_midi_msg,
- .begin_session = ff800_begin_session,
- .finish_session = ff800_finish_session,
-};
diff --git a/sound/firewire/fireface/ff-protocol-former.c b/sound/firewire/fireface/ff-protocol-former.c
new file mode 100644
index 000000000000..8d1c2c6e907b
--- /dev/null
+++ b/sound/firewire/fireface/ff-protocol-former.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+// ff-protocol-former.c - a part of driver for RME Fireface series
+//
+// Copyright (c) 2019 Takashi Sakamoto
+//
+// Licensed under the terms of the GNU General Public License, version 2.
+
+#include <linux/delay.h>
+
+#include "ff.h"
+
+#define FORMER_REG_SYNC_STATUS 0x0000801c0000ull
+/* For block write request. */
+#define FORMER_REG_FETCH_PCM_FRAMES 0x0000801c0000ull
+#define FORMER_REG_CLOCK_CONFIG 0x0000801c0004ull
+
+static int parse_clock_bits(u32 data, unsigned int *rate,
+ enum snd_ff_clock_src *src)
+{
+ static const struct {
+ unsigned int rate;
+ u32 mask;
+ } *rate_entry, rate_entries[] = {
+ { 32000, 0x00000002, },
+ { 44100, 0x00000000, },
+ { 48000, 0x00000006, },
+ { 64000, 0x0000000a, },
+ { 88200, 0x00000008, },
+ { 96000, 0x0000000e, },
+ { 128000, 0x00000012, },
+ { 176400, 0x00000010, },
+ { 192000, 0x00000016, },
+ };
+ static const struct {
+ enum snd_ff_clock_src src;
+ u32 mask;
+ } *clk_entry, clk_entries[] = {
+ { SND_FF_CLOCK_SRC_ADAT1, 0x00000000, },
+ { SND_FF_CLOCK_SRC_ADAT2, 0x00000400, },
+ { SND_FF_CLOCK_SRC_SPDIF, 0x00000c00, },
+ { SND_FF_CLOCK_SRC_WORD, 0x00001000, },
+ { SND_FF_CLOCK_SRC_LTC, 0x00001800, },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rate_entries); ++i) {
+ rate_entry = rate_entries + i;
+ if ((data & 0x0000001e) == rate_entry->mask) {
+ *rate = rate_entry->rate;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(rate_entries))
+ return -EIO;
+
+ if (data & 0x00000001) {
+ *src = SND_FF_CLOCK_SRC_INTERNAL;
+ } else {
+ for (i = 0; i < ARRAY_SIZE(clk_entries); ++i) {
+ clk_entry = clk_entries + i;
+ if ((data & 0x00001c00) == clk_entry->mask) {
+ *src = clk_entry->src;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(clk_entries))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int former_get_clock(struct snd_ff *ff, unsigned int *rate,
+ enum snd_ff_clock_src *src)
+{
+ __le32 reg;
+ u32 data;
+ int err;
+
+ err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+ FORMER_REG_CLOCK_CONFIG, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+ data = le32_to_cpu(reg);
+
+ return parse_clock_bits(data, rate, src);
+}
+
+static int former_switch_fetching_mode(struct snd_ff *ff, bool enable)
+{
+ unsigned int count;
+ __le32 *reg;
+ int i;
+ int err;
+
+ count = 0;
+ for (i = 0; i < SND_FF_STREAM_MODE_COUNT; ++i)
+ count = max(count, ff->spec->pcm_playback_channels[i]);
+
+ reg = kcalloc(count, sizeof(__le32), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ if (!enable) {
+ /*
+ * Each quadlet is corresponding to data channels in a data
+ * blocks in reverse order. Precisely, quadlets for available
+ * data channels should be enabled. Here, I take second best
+ * to fetch PCM frames from all of data channels regardless of
+ * stf.
+ */
+ for (i = 0; i < count; ++i)
+ reg[i] = cpu_to_le32(0x00000001);
+ }
+
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+ FORMER_REG_FETCH_PCM_FRAMES, reg,
+ sizeof(__le32) * count, 0);
+ kfree(reg);
+ return err;
+}
+
+static void dump_clock_config(struct snd_ff *ff, struct snd_info_buffer *buffer)
+{
+ __le32 reg;
+ u32 data;
+ unsigned int rate;
+ enum snd_ff_clock_src src;
+ const char *label;
+ int err;
+
+ err = snd_fw_transaction(ff->unit, TCODE_READ_BLOCK_REQUEST,
+ FORMER_REG_CLOCK_CONFIG, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return;
+ data = le32_to_cpu(reg);
+
+ snd_iprintf(buffer, "Output S/PDIF format: %s (Emphasis: %s)\n",
+ (data & 0x00000020) ? "Professional" : "Consumer",
+ (data & 0x00000040) ? "on" : "off");
+
+ snd_iprintf(buffer, "Optical output interface format: %s\n",
+ (data & 0x00000100) ? "S/PDIF" : "ADAT");
+
+ snd_iprintf(buffer, "Word output single speed: %s\n",
+ (data & 0x00002000) ? "on" : "off");
+
+ snd_iprintf(buffer, "S/PDIF input interface: %s\n",
+ (data & 0x00000200) ? "Optical" : "Coaxial");
+
+ err = parse_clock_bits(data, &rate, &src);
+ if (err < 0)
+ return;
+ label = snd_ff_proc_get_clk_label(src);
+ if (!label)
+ return;
+
+ snd_iprintf(buffer, "Clock configuration: %d %s\n", rate, label);
+}
+
+static void dump_sync_status(struct snd_ff *ff, struct snd_info_buffer *buffer)
+{
+ static const struct {
+ char *const label;
+ u32 locked_mask;
+ u32 synced_mask;
+ } *clk_entry, clk_entries[] = {
+ { "WDClk", 0x40000000, 0x20000000, },
+ { "S/PDIF", 0x00080000, 0x00040000, },
+ { "ADAT1", 0x00000400, 0x00001000, },
+ { "ADAT2", 0x00000800, 0x00002000, },
+ };
+ static const struct {
+ char *const label;
+ u32 mask;
+ } *referred_entry, referred_entries[] = {
+ { "ADAT1", 0x00000000, },
+ { "ADAT2", 0x00400000, },
+ { "S/PDIF", 0x00c00000, },
+ { "WDclk", 0x01000000, },
+ { "TCO", 0x01400000, },
+ };
+ static const struct {
+ unsigned int rate;
+ u32 mask;
+ } *rate_entry, rate_entries[] = {
+ { 32000, 0x02000000, },
+ { 44100, 0x04000000, },
+ { 48000, 0x06000000, },
+ { 64000, 0x08000000, },
+ { 88200, 0x0a000000, },
+ { 96000, 0x0c000000, },
+ { 128000, 0x0e000000, },
+ { 176400, 0x10000000, },
+ { 192000, 0x12000000, },
+ };
+ __le32 reg[2];
+ u32 data[2];
+ int i;
+ int err;
+
+ err = snd_fw_transaction(ff->unit, TCODE_READ_BLOCK_REQUEST,
+ FORMER_REG_SYNC_STATUS, reg, sizeof(reg), 0);
+ if (err < 0)
+ return;
+ data[0] = le32_to_cpu(reg[0]);
+ data[1] = le32_to_cpu(reg[1]);
+
+ snd_iprintf(buffer, "External source detection:\n");
+
+ for (i = 0; i < ARRAY_SIZE(clk_entries); ++i) {
+ const char *state;
+
+ clk_entry = clk_entries + i;
+ if (data[0] & clk_entry->locked_mask) {
+ if (data[0] & clk_entry->synced_mask)
+ state = "sync";
+ else
+ state = "lock";
+ } else {
+ state = "none";
+ }
+
+ snd_iprintf(buffer, "%s: %s\n", clk_entry->label, state);
+ }
+
+ snd_iprintf(buffer, "Referred clock:\n");
+
+ if (data[1] & 0x00000001) {
+ snd_iprintf(buffer, "Internal\n");
+ } else {
+ unsigned int rate;
+ const char *label;
+
+ for (i = 0; i < ARRAY_SIZE(referred_entries); ++i) {
+ referred_entry = referred_entries + i;
+ if ((data[0] & 0x1e0000) == referred_entry->mask) {
+ label = referred_entry->label;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(referred_entries))
+ label = "none";
+
+ for (i = 0; i < ARRAY_SIZE(rate_entries); ++i) {
+ rate_entry = rate_entries + i;
+ if ((data[0] & 0x1e000000) == rate_entry->mask) {
+ rate = rate_entry->rate;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(rate_entries))
+ rate = 0;
+
+ snd_iprintf(buffer, "%s %d\n", label, rate);
+ }
+}
+
+static void former_dump_status(struct snd_ff *ff,
+ struct snd_info_buffer *buffer)
+{
+ dump_clock_config(ff, buffer);
+ dump_sync_status(ff, buffer);
+}
+
+static int former_fill_midi_msg(struct snd_ff *ff,
+ struct snd_rawmidi_substream *substream,
+ unsigned int port)
+{
+ u8 *buf = (u8 *)ff->msg_buf[port];
+ int len;
+ int i;
+
+ len = snd_rawmidi_transmit_peek(substream, buf,
+ SND_FF_MAXIMIM_MIDI_QUADS);
+ if (len <= 0)
+ return len;
+
+ // One quadlet includes one byte.
+ for (i = len - 1; i >= 0; --i)
+ ff->msg_buf[port][i] = cpu_to_le32(buf[i]);
+ ff->rx_bytes[port] = len;
+
+ return len;
+}
+
+#define FF800_STF 0x0000fc88f000
+#define FF800_RX_PACKET_FORMAT 0x0000fc88f004
+#define FF800_ALLOC_TX_STREAM 0x0000fc88f008
+#define FF800_ISOC_COMM_START 0x0000fc88f00c
+#define FF800_TX_S800_FLAG 0x00000800
+#define FF800_ISOC_COMM_STOP 0x0000fc88f010
+
+#define FF800_TX_PACKET_ISOC_CH 0x0000801c0008
+
+static int allocate_rx_resources(struct snd_ff *ff)
+{
+ u32 data;
+ __le32 reg;
+ int err;
+
+ // Controllers should allocate isochronous resources for rx stream.
+ err = fw_iso_resources_allocate(&ff->rx_resources,
+ amdtp_stream_get_max_payload(&ff->rx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ return err;
+
+ // Set isochronous channel and the number of quadlets of rx packets.
+ data = ff->rx_stream.data_block_quadlets << 3;
+ data = (data << 8) | ff->rx_resources.channel;
+ reg = cpu_to_le32(data);
+ return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_RX_PACKET_FORMAT, &reg, sizeof(reg), 0);
+}
+
+static int allocate_tx_resources(struct snd_ff *ff)
+{
+ __le32 reg;
+ unsigned int count;
+ unsigned int tx_isoc_channel;
+ int err;
+
+ reg = cpu_to_le32(ff->tx_stream.data_block_quadlets);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_ALLOC_TX_STREAM, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ // Wait till the format of tx packet is available.
+ count = 0;
+ while (count++ < 10) {
+ u32 data;
+ err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+ FF800_TX_PACKET_ISOC_CH, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ data = le32_to_cpu(reg);
+ if (data != 0xffffffff) {
+ tx_isoc_channel = data;
+ break;
+ }
+
+ msleep(50);
+ }
+ if (count >= 10)
+ return -ETIMEDOUT;
+
+ // NOTE: this is a makeshift to start OHCI 1394 IR context in the
+ // channel. On the other hand, 'struct fw_iso_resources.allocated' is
+ // not true and it's not deallocated at stop.
+ ff->tx_resources.channel = tx_isoc_channel;
+
+ return 0;
+}
+
+static int ff800_begin_session(struct snd_ff *ff, unsigned int rate)
+{
+ __le32 reg;
+ int err;
+
+ reg = cpu_to_le32(rate);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_STF, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ // If starting isochronous communication immediately, change of STF has
+ // no effect. In this case, the communication runs based on former STF.
+ // Let's sleep for a bit.
+ msleep(100);
+
+ err = allocate_rx_resources(ff);
+ if (err < 0)
+ return err;
+
+ err = allocate_tx_resources(ff);
+ if (err < 0)
+ return err;
+
+ reg = cpu_to_le32(0x80000000);
+ reg |= cpu_to_le32(ff->tx_stream.data_block_quadlets);
+ if (fw_parent_device(ff->unit)->max_speed == SCODE_800)
+ reg |= cpu_to_le32(FF800_TX_S800_FLAG);
+ return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_ISOC_COMM_START, &reg, sizeof(reg), 0);
+}
+
+static void ff800_finish_session(struct snd_ff *ff)
+{
+ __le32 reg;
+
+ reg = cpu_to_le32(0x80000000);
+ snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_ISOC_COMM_STOP, &reg, sizeof(reg), 0);
+}
+
+// Fireface 800 doesn't allow drivers to register lower 4 bytes of destination
+// address.
+// A write transaction to clear registered higher 4 bytes of destination address
+// has an effect to suppress asynchronous transaction from device.
+static void ff800_handle_midi_msg(struct snd_ff *ff, unsigned int offset,
+ __le32 *buf, size_t length)
+{
+ int i;
+
+ for (i = 0; i < length / 4; i++) {
+ u8 byte = le32_to_cpu(buf[i]) & 0xff;
+ struct snd_rawmidi_substream *substream;
+
+ substream = READ_ONCE(ff->tx_midi_substreams[0]);
+ if (substream)
+ snd_rawmidi_receive(substream, &byte, 1);
+ }
+}
+
+const struct snd_ff_protocol snd_ff_protocol_ff800 = {
+ .handle_midi_msg = ff800_handle_midi_msg,
+ .fill_midi_msg = former_fill_midi_msg,
+ .get_clock = former_get_clock,
+ .switch_fetching_mode = former_switch_fetching_mode,
+ .begin_session = ff800_begin_session,
+ .finish_session = ff800_finish_session,
+ .dump_status = former_dump_status,
+};
+
+#define FF400_STF 0x000080100500ull
+#define FF400_RX_PACKET_FORMAT 0x000080100504ull
+#define FF400_ISOC_COMM_START 0x000080100508ull
+#define FF400_TX_PACKET_FORMAT 0x00008010050cull
+#define FF400_ISOC_COMM_STOP 0x000080100510ull
+
+/*
+ * Fireface 400 manages isochronous channel number in 3 bit field. Therefore,
+ * we can allocate between 0 and 7 channel.
+ */
+static int keep_resources(struct snd_ff *ff, unsigned int rate)
+{
+ enum snd_ff_stream_mode mode;
+ int i;
+ int err;
+
+ // Check whether the given value is supported or not.
+ for (i = 0; i < CIP_SFC_COUNT; i++) {
+ if (amdtp_rate_table[i] == rate)
+ break;
+ }
+ if (i >= CIP_SFC_COUNT)
+ return -EINVAL;
+
+ err = snd_ff_stream_get_multiplier_mode(i, &mode);
+ if (err < 0)
+ return err;
+
+ /* Keep resources for in-stream. */
+ ff->tx_resources.channels_mask = 0x00000000000000ffuLL;
+ err = fw_iso_resources_allocate(&ff->tx_resources,
+ amdtp_stream_get_max_payload(&ff->tx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ return err;
+
+ /* Keep resources for out-stream. */
+ ff->rx_resources.channels_mask = 0x00000000000000ffuLL;
+ err = fw_iso_resources_allocate(&ff->rx_resources,
+ amdtp_stream_get_max_payload(&ff->rx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ fw_iso_resources_free(&ff->tx_resources);
+
+ return err;
+}
+
+static int ff400_begin_session(struct snd_ff *ff, unsigned int rate)
+{
+ __le32 reg;
+ int err;
+
+ err = keep_resources(ff, rate);
+ if (err < 0)
+ return err;
+
+ /* Set the number of data blocks transferred in a second. */
+ reg = cpu_to_le32(rate);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF400_STF, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ msleep(100);
+
+ /*
+ * Set isochronous channel and the number of quadlets of received
+ * packets.
+ */
+ reg = cpu_to_le32(((ff->rx_stream.data_block_quadlets << 3) << 8) |
+ ff->rx_resources.channel);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF400_RX_PACKET_FORMAT, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ /*
+ * Set isochronous channel and the number of quadlets of transmitted
+ * packet.
+ */
+ /* TODO: investigate the purpose of this 0x80. */
+ reg = cpu_to_le32((0x80 << 24) |
+ (ff->tx_resources.channel << 5) |
+ (ff->tx_stream.data_block_quadlets));
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF400_TX_PACKET_FORMAT, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ /* Allow to transmit packets. */
+ reg = cpu_to_le32(0x00000001);
+ return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF400_ISOC_COMM_START, &reg, sizeof(reg), 0);
+}
+
+static void ff400_finish_session(struct snd_ff *ff)
+{
+ __le32 reg;
+
+ reg = cpu_to_le32(0x80000000);
+ snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF400_ISOC_COMM_STOP, &reg, sizeof(reg), 0);
+}
+
+// For Fireface 400, lower 4 bytes of destination address is configured by bit
+// flag in quadlet register (little endian) at 0x'0000'801'0051c. Drivers can
+// select one of 4 options:
+//
+// bit flags: offset of destination address
+// - 0x04000000: 0x'....'....'0000'0000
+// - 0x08000000: 0x'....'....'0000'0080
+// - 0x10000000: 0x'....'....'0000'0100
+// - 0x20000000: 0x'....'....'0000'0180
+//
+// Drivers can suppress the device to transfer asynchronous transactions by
+// using below 2 bits.
+// - 0x01000000: suppress transmission
+// - 0x02000000: suppress transmission
+//
+// Actually, the register is write-only and includes the other options such as
+// input attenuation. This driver allocates destination address with '0000'0000
+// in its lower offset and expects userspace application to configure the
+// register for it.
+static void ff400_handle_midi_msg(struct snd_ff *ff, unsigned int offset,
+ __le32 *buf, size_t length)
+{
+ int i;
+
+ for (i = 0; i < length / 4; i++) {
+ u32 quad = le32_to_cpu(buf[i]);
+ u8 byte;
+ unsigned int index;
+ struct snd_rawmidi_substream *substream;
+
+ /* Message in first port. */
+ /*
+ * This value may represent the index of this unit when the same
+ * units are on the same IEEE 1394 bus. This driver doesn't use
+ * it.
+ */
+ index = (quad >> 8) & 0xff;
+ if (index > 0) {
+ substream = READ_ONCE(ff->tx_midi_substreams[0]);
+ if (substream != NULL) {
+ byte = quad & 0xff;
+ snd_rawmidi_receive(substream, &byte, 1);
+ }
+ }
+
+ /* Message in second port. */
+ index = (quad >> 24) & 0xff;
+ if (index > 0) {
+ substream = READ_ONCE(ff->tx_midi_substreams[1]);
+ if (substream != NULL) {
+ byte = (quad >> 16) & 0xff;
+ snd_rawmidi_receive(substream, &byte, 1);
+ }
+ }
+ }
+}
+
+const struct snd_ff_protocol snd_ff_protocol_ff400 = {
+ .handle_midi_msg = ff400_handle_midi_msg,
+ .fill_midi_msg = former_fill_midi_msg,
+ .get_clock = former_get_clock,
+ .switch_fetching_mode = former_switch_fetching_mode,
+ .begin_session = ff400_begin_session,
+ .finish_session = ff400_finish_session,
+ .dump_status = former_dump_status,
+};
diff --git a/sound/firewire/fireface/ff-protocol-latter.c b/sound/firewire/fireface/ff-protocol-latter.c
new file mode 100644
index 000000000000..c8236ff89b7f
--- /dev/null
+++ b/sound/firewire/fireface/ff-protocol-latter.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+// ff-protocol-latter - a part of driver for RME Fireface series
+//
+// Copyright (c) 2019 Takashi Sakamoto
+//
+// Licensed under the terms of the GNU General Public License, version 2.
+
+#include <linux/delay.h>
+
+#include "ff.h"
+
+#define LATTER_STF 0xffff00000004
+#define LATTER_ISOC_CHANNELS 0xffff00000008
+#define LATTER_ISOC_START 0xffff0000000c
+#define LATTER_FETCH_MODE 0xffff00000010
+#define LATTER_SYNC_STATUS 0x0000801c0000
+
+static int parse_clock_bits(u32 data, unsigned int *rate,
+ enum snd_ff_clock_src *src)
+{
+ static const struct {
+ unsigned int rate;
+ u32 flag;
+ } *rate_entry, rate_entries[] = {
+ { 32000, 0x00000000, },
+ { 44100, 0x01000000, },
+ { 48000, 0x02000000, },
+ { 64000, 0x04000000, },
+ { 88200, 0x05000000, },
+ { 96000, 0x06000000, },
+ { 128000, 0x08000000, },
+ { 176400, 0x09000000, },
+ { 192000, 0x0a000000, },
+ };
+ static const struct {
+ enum snd_ff_clock_src src;
+ u32 flag;
+ } *clk_entry, clk_entries[] = {
+ { SND_FF_CLOCK_SRC_SPDIF, 0x00000200, },
+ { SND_FF_CLOCK_SRC_ADAT1, 0x00000400, },
+ { SND_FF_CLOCK_SRC_WORD, 0x00000600, },
+ { SND_FF_CLOCK_SRC_INTERNAL, 0x00000e00, },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rate_entries); ++i) {
+ rate_entry = rate_entries + i;
+ if ((data & 0x0f000000) == rate_entry->flag) {
+ *rate = rate_entry->rate;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(rate_entries))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(clk_entries); ++i) {
+ clk_entry = clk_entries + i;
+ if ((data & 0x000e00) == clk_entry->flag) {
+ *src = clk_entry->src;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(clk_entries))
+ return -EIO;
+
+ return 0;
+}
+
+static int latter_get_clock(struct snd_ff *ff, unsigned int *rate,
+ enum snd_ff_clock_src *src)
+{
+ __le32 reg;
+ u32 data;
+ int err;
+
+ err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+ LATTER_SYNC_STATUS, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+ data = le32_to_cpu(reg);
+
+ return parse_clock_bits(data, rate, src);
+}
+
+static int latter_switch_fetching_mode(struct snd_ff *ff, bool enable)
+{
+ u32 data;
+ __le32 reg;
+
+ if (enable)
+ data = 0x00000000;
+ else
+ data = 0xffffffff;
+ reg = cpu_to_le32(data);
+
+ return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ LATTER_FETCH_MODE, &reg, sizeof(reg), 0);
+}
+
+static int keep_resources(struct snd_ff *ff, unsigned int rate)
+{
+ enum snd_ff_stream_mode mode;
+ int i;
+ int err;
+
+ // Check whether the given value is supported or not.
+ for (i = 0; i < CIP_SFC_COUNT; i++) {
+ if (amdtp_rate_table[i] == rate)
+ break;
+ }
+ if (i >= CIP_SFC_COUNT)
+ return -EINVAL;
+
+ err = snd_ff_stream_get_multiplier_mode(i, &mode);
+ if (err < 0)
+ return err;
+
+ /* Keep resources for in-stream. */
+ ff->tx_resources.channels_mask = 0x00000000000000ffuLL;
+ err = fw_iso_resources_allocate(&ff->tx_resources,
+ amdtp_stream_get_max_payload(&ff->tx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ return err;
+
+ /* Keep resources for out-stream. */
+ ff->rx_resources.channels_mask = 0x00000000000000ffuLL;
+ err = fw_iso_resources_allocate(&ff->rx_resources,
+ amdtp_stream_get_max_payload(&ff->rx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ fw_iso_resources_free(&ff->tx_resources);
+
+ return err;
+}
+
+static int latter_begin_session(struct snd_ff *ff, unsigned int rate)
+{
+ static const struct {
+ unsigned int stf;
+ unsigned int code;
+ unsigned int flag;
+ } *entry, rate_table[] = {
+ { 32000, 0x00, 0x92, },
+ { 44100, 0x02, 0x92, },
+ { 48000, 0x04, 0x92, },
+ { 64000, 0x08, 0x8e, },
+ { 88200, 0x0a, 0x8e, },
+ { 96000, 0x0c, 0x8e, },
+ { 128000, 0x10, 0x8c, },
+ { 176400, 0x12, 0x8c, },
+ { 192000, 0x14, 0x8c, },
+ };
+ u32 data;
+ __le32 reg;
+ unsigned int count;
+ int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(rate_table); ++i) {
+ entry = rate_table + i;
+ if (entry->stf == rate)
+ break;
+ }
+ if (i == ARRAY_SIZE(rate_table))
+ return -EINVAL;
+
+ reg = cpu_to_le32(entry->code);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ LATTER_STF, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ // Confirm to shift transmission clock.
+ count = 0;
+ while (count++ < 10) {
+ unsigned int curr_rate;
+ enum snd_ff_clock_src src;
+
+ err = latter_get_clock(ff, &curr_rate, &src);
+ if (err < 0)
+ return err;
+
+ if (curr_rate == rate)
+ break;
+ }
+ if (count == 10)
+ return -ETIMEDOUT;
+
+ err = keep_resources(ff, rate);
+ if (err < 0)
+ return err;
+
+ data = (ff->tx_resources.channel << 8) | ff->rx_resources.channel;
+ reg = cpu_to_le32(data);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ LATTER_ISOC_CHANNELS, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ // Always use the maximum number of data channels in data block of
+ // packet.
+ reg = cpu_to_le32(entry->flag);
+ return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ LATTER_ISOC_START, &reg, sizeof(reg), 0);
+}
+
+static void latter_finish_session(struct snd_ff *ff)
+{
+ __le32 reg;
+
+ reg = cpu_to_le32(0x00000000);
+ snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ LATTER_ISOC_START, &reg, sizeof(reg), 0);
+}
+
+static void latter_dump_status(struct snd_ff *ff, struct snd_info_buffer *buffer)
+{
+ static const struct {
+ char *const label;
+ u32 locked_mask;
+ u32 synced_mask;
+ } *clk_entry, clk_entries[] = {
+ { "S/PDIF", 0x00000001, 0x00000010, },
+ { "ADAT", 0x00000002, 0x00000020, },
+ { "WDClk", 0x00000004, 0x00000040, },
+ };
+ __le32 reg;
+ u32 data;
+ unsigned int rate;
+ enum snd_ff_clock_src src;
+ const char *label;
+ int i;
+ int err;
+
+ err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+ LATTER_SYNC_STATUS, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return;
+ data = le32_to_cpu(reg);
+
+ snd_iprintf(buffer, "External source detection:\n");
+
+ for (i = 0; i < ARRAY_SIZE(clk_entries); ++i) {
+ clk_entry = clk_entries + i;
+ snd_iprintf(buffer, "%s: ", clk_entry->label);
+ if (data & clk_entry->locked_mask) {
+ if (data & clk_entry->synced_mask)
+ snd_iprintf(buffer, "sync\n");
+ else
+ snd_iprintf(buffer, "lock\n");
+ } else {
+ snd_iprintf(buffer, "none\n");
+ }
+ }
+
+ err = parse_clock_bits(data, &rate, &src);
+ if (err < 0)
+ return;
+ label = snd_ff_proc_get_clk_label(src);
+ if (!label)
+ return;
+
+ snd_iprintf(buffer, "Referred clock: %s %d\n", label, rate);
+}
+
+// NOTE: transactions are transferred within 0x00-0x7f in allocated range of
+// address. This seems to be for check of discontinuity in receiver side.
+//
+// Like Fireface 400, drivers can select one of 4 options for lower 4 bytes of
+// destination address by bit flags in quadlet register (little endian) at
+// 0x'ffff'0000'0014:
+//
+// bit flags: offset of destination address
+// - 0x00002000: 0x'....'....'0000'0000
+// - 0x00004000: 0x'....'....'0000'0080
+// - 0x00008000: 0x'....'....'0000'0100
+// - 0x00010000: 0x'....'....'0000'0180
+//
+// Drivers can suppress the device to transfer asynchronous transactions by
+// clear these bit flags.
+//
+// Actually, the register is write-only and includes the other settings such as
+// input attenuation. This driver allocates for the first option
+// (0x'....'....'0000'0000) and expects userspace application to configure the
+// register for it.
+static void latter_handle_midi_msg(struct snd_ff *ff, unsigned int offset,
+ __le32 *buf, size_t length)
+{
+ u32 data = le32_to_cpu(*buf);
+ unsigned int index = (data & 0x000000f0) >> 4;
+ u8 byte[3];
+ struct snd_rawmidi_substream *substream;
+ unsigned int len;
+
+ if (index >= ff->spec->midi_in_ports)
+ return;
+
+ switch (data & 0x0000000f) {
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000e:
+ len = 3;
+ break;
+ case 0x0000000c:
+ case 0x0000000d:
+ len = 2;
+ break;
+ default:
+ len = data & 0x00000003;
+ if (len == 0)
+ len = 3;
+ break;
+ }
+
+ byte[0] = (data & 0x0000ff00) >> 8;
+ byte[1] = (data & 0x00ff0000) >> 16;
+ byte[2] = (data & 0xff000000) >> 24;
+
+ substream = READ_ONCE(ff->tx_midi_substreams[index]);
+ if (substream)
+ snd_rawmidi_receive(substream, byte, len);
+}
+
+/*
+ * When return minus value, given argument is not MIDI status.
+ * When return 0, given argument is a beginning of system exclusive.
+ * When return the others, given argument is MIDI data.
+ */
+static inline int calculate_message_bytes(u8 status)
+{
+ switch (status) {
+ case 0xf6: /* Tune request. */
+ case 0xf8: /* Timing clock. */
+ case 0xfa: /* Start. */
+ case 0xfb: /* Continue. */
+ case 0xfc: /* Stop. */
+ case 0xfe: /* Active sensing. */
+ case 0xff: /* System reset. */
+ return 1;
+ case 0xf1: /* MIDI time code quarter frame. */
+ case 0xf3: /* Song select. */
+ return 2;
+ case 0xf2: /* Song position pointer. */
+ return 3;
+ case 0xf0: /* Exclusive. */
+ return 0;
+ case 0xf7: /* End of exclusive. */
+ break;
+ case 0xf4: /* Undefined. */
+ case 0xf5: /* Undefined. */
+ case 0xf9: /* Undefined. */
+ case 0xfd: /* Undefined. */
+ break;
+ default:
+ switch (status & 0xf0) {
+ case 0x80: /* Note on. */
+ case 0x90: /* Note off. */
+ case 0xa0: /* Polyphonic key pressure. */
+ case 0xb0: /* Control change and Mode change. */
+ case 0xe0: /* Pitch bend change. */
+ return 3;
+ case 0xc0: /* Program change. */
+ case 0xd0: /* Channel pressure. */
+ return 2;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int latter_fill_midi_msg(struct snd_ff *ff,
+ struct snd_rawmidi_substream *substream,
+ unsigned int port)
+{
+ u32 data = {0};
+ u8 *buf = (u8 *)&data;
+ int consumed;
+
+ buf[0] = port << 4;
+ consumed = snd_rawmidi_transmit_peek(substream, buf + 1, 3);
+ if (consumed <= 0)
+ return consumed;
+
+ if (!ff->on_sysex[port]) {
+ if (buf[1] != 0xf0) {
+ if (consumed < calculate_message_bytes(buf[1]))
+ return 0;
+ } else {
+ // The beginning of exclusives.
+ ff->on_sysex[port] = true;
+ }
+
+ buf[0] |= consumed;
+ } else {
+ if (buf[1] != 0xf7) {
+ if (buf[2] == 0xf7 || buf[3] == 0xf7) {
+ // Transfer end code at next time.
+ consumed -= 1;
+ }
+
+ buf[0] |= consumed;
+ } else {
+ // The end of exclusives.
+ ff->on_sysex[port] = false;
+ consumed = 1;
+ buf[0] |= 0x0f;
+ }
+ }
+
+ ff->msg_buf[port][0] = cpu_to_le32(data);
+ ff->rx_bytes[port] = consumed;
+
+ return 1;
+}
+
+const struct snd_ff_protocol snd_ff_protocol_latter = {
+ .handle_midi_msg = latter_handle_midi_msg,
+ .fill_midi_msg = latter_fill_midi_msg,
+ .get_clock = latter_get_clock,
+ .switch_fetching_mode = latter_switch_fetching_mode,
+ .begin_session = latter_begin_session,
+ .finish_session = latter_finish_session,
+ .dump_status = latter_dump_status,
+};
diff --git a/sound/firewire/fireface/ff-stream.c b/sound/firewire/fireface/ff-stream.c
index a490e4553721..a8a90f1ae09e 100644
--- a/sound/firewire/fireface/ff-stream.c
+++ b/sound/firewire/fireface/ff-stream.c
@@ -37,44 +37,10 @@ static void release_resources(struct snd_ff *ff)
fw_iso_resources_free(&ff->rx_resources);
}
-static int switch_fetching_mode(struct snd_ff *ff, bool enable)
-{
- unsigned int count;
- __le32 *reg;
- int i;
- int err;
-
- count = 0;
- for (i = 0; i < SND_FF_STREAM_MODE_COUNT; ++i)
- count = max(count, ff->spec->pcm_playback_channels[i]);
-
- reg = kcalloc(count, sizeof(__le32), GFP_KERNEL);
- if (!reg)
- return -ENOMEM;
-
- if (!enable) {
- /*
- * Each quadlet is corresponding to data channels in a data
- * blocks in reverse order. Precisely, quadlets for available
- * data channels should be enabled. Here, I take second best
- * to fetch PCM frames from all of data channels regardless of
- * stf.
- */
- for (i = 0; i < count; ++i)
- reg[i] = cpu_to_le32(0x00000001);
- }
-
- err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
- SND_FF_REG_FETCH_PCM_FRAMES, reg,
- sizeof(__le32) * count, 0);
- kfree(reg);
- return err;
-}
-
static inline void finish_session(struct snd_ff *ff)
{
ff->spec->protocol->finish_session(ff);
- switch_fetching_mode(ff, false);
+ ff->spec->protocol->switch_fetching_mode(ff, false);
}
static int init_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
@@ -147,7 +113,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
if (ff->substreams_counter == 0)
return 0;
- err = snd_ff_transaction_get_clock(ff, &curr_rate, &src);
+ err = ff->spec->protocol->get_clock(ff, &curr_rate, &src);
if (err < 0)
return err;
if (curr_rate != rate ||
@@ -206,7 +172,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
goto error;
}
- err = switch_fetching_mode(ff, true);
+ err = ff->spec->protocol->switch_fetching_mode(ff, true);
if (err < 0)
goto error;
}
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c
index 5f4ddfd55403..0d6ad19363b8 100644
--- a/sound/firewire/fireface/ff-transaction.c
+++ b/sound/firewire/fireface/ff-transaction.c
@@ -8,72 +8,6 @@
#include "ff.h"
-#define SND_FF_REG_MIDI_RX_PORT_0 0x000080180000ull
-#define SND_FF_REG_MIDI_RX_PORT_1 0x000080190000ull
-
-int snd_ff_transaction_get_clock(struct snd_ff *ff, unsigned int *rate,
- enum snd_ff_clock_src *src)
-{
- __le32 reg;
- u32 data;
- int err;
-
- err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
- SND_FF_REG_CLOCK_CONFIG, &reg, sizeof(reg), 0);
- if (err < 0)
- return err;
- data = le32_to_cpu(reg);
-
- /* Calculate sampling rate. */
- switch ((data >> 1) & 0x03) {
- case 0x01:
- *rate = 32000;
- break;
- case 0x00:
- *rate = 44100;
- break;
- case 0x03:
- *rate = 48000;
- break;
- case 0x02:
- default:
- return -EIO;
- }
-
- if (data & 0x08)
- *rate *= 2;
- else if (data & 0x10)
- *rate *= 4;
-
- /* Calculate source of clock. */
- if (data & 0x01) {
- *src = SND_FF_CLOCK_SRC_INTERNAL;
- } else {
- /* TODO: 0x02, 0x06, 0x07? */
- switch ((data >> 10) & 0x07) {
- case 0x00:
- *src = SND_FF_CLOCK_SRC_ADAT1;
- break;
- case 0x01:
- *src = SND_FF_CLOCK_SRC_ADAT2;
- break;
- case 0x03:
- *src = SND_FF_CLOCK_SRC_SPDIF;
- break;
- case 0x04:
- *src = SND_FF_CLOCK_SRC_WORD;
- break;
- case 0x05:
- *src = SND_FF_CLOCK_SRC_LTC;
- break;
- default:
- return -EIO;
- }
- }
-
- return 0;
-}
-
static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port,
int rcode)
{
@@ -117,23 +51,17 @@ static void finish_transmit_midi1_msg(struct fw_card *card, int rcode,
finish_transmit_midi_msg(ff, 1, rcode);
}
-static inline void fill_midi_buf(struct snd_ff *ff, unsigned int port,
- unsigned int index, u8 byte)
-{
- ff->msg_buf[port][index] = cpu_to_le32(byte);
-}
-
static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
{
struct snd_rawmidi_substream *substream =
READ_ONCE(ff->rx_midi_substreams[port]);
- u8 *buf = (u8 *)ff->msg_buf[port];
- int i, len;
+ int quad_count;
struct fw_device *fw_dev = fw_parent_device(ff->unit);
unsigned long long addr;
int generation;
fw_transaction_callback_t callback;
+ int tcode;
if (substream == NULL || snd_rawmidi_transmit_empty(substream))
return;
@@ -147,26 +75,26 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
return;
}
- len = snd_rawmidi_transmit_peek(substream, buf,
- SND_FF_MAXIMIM_MIDI_QUADS);
- if (len <= 0)
+ quad_count = ff->spec->protocol->fill_midi_msg(ff, substream, port);
+ if (quad_count <= 0)
return;
- for (i = len - 1; i >= 0; i--)
- fill_midi_buf(ff, port, i, buf[i]);
-
if (port == 0) {
- addr = SND_FF_REG_MIDI_RX_PORT_0;
+ addr = ff->spec->midi_rx_addrs[0];
callback = finish_transmit_midi0_msg;
} else {
- addr = SND_FF_REG_MIDI_RX_PORT_1;
+ addr = ff->spec->midi_rx_addrs[1];
callback = finish_transmit_midi1_msg;
}
/* Set interval to next transaction. */
ff->next_ktime[port] = ktime_add_ns(ktime_get(),
- len * 8 * NSEC_PER_SEC / 31250);
- ff->rx_bytes[port] = len;
+ ff->rx_bytes[port] * 8 * NSEC_PER_SEC / 31250);
+
+ if (quad_count == 1)
+ tcode = TCODE_WRITE_QUADLET_REQUEST;
+ else
+ tcode = TCODE_WRITE_BLOCK_REQUEST;
/*
* In Linux FireWire core, when generation is updated with memory
@@ -178,10 +106,9 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
*/
generation = fw_dev->generation;
smp_rmb();
- fw_send_request(fw_dev->card, &ff->transactions[port],
- TCODE_WRITE_BLOCK_REQUEST,
+ fw_send_request(fw_dev->card, &ff->transactions[port], tcode,
fw_dev->node_id, generation, fw_dev->max_speed,
- addr, &ff->msg_buf[port], len * 4,
+ addr, &ff->msg_buf[port], quad_count * 4,
callback, &ff->transactions[port]);
}
@@ -209,7 +136,9 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
fw_send_response(card, request, RCODE_COMPLETE);
- ff->spec->protocol->handle_midi_msg(ff, buf, length);
+ offset -= ff->async_handler.offset;
+ ff->spec->protocol->handle_midi_msg(ff, (unsigned int)offset, buf,
+ length);
}
static int allocate_own_address(struct snd_ff *ff, int i)
@@ -217,7 +146,7 @@ static int allocate_own_address(struct snd_ff *ff, int i)
struct fw_address_region midi_msg_region;
int err;
- ff->async_handler.length = SND_FF_MAXIMIM_MIDI_QUADS * 4;
+ ff->async_handler.length = ff->spec->midi_addr_range;
ff->async_handler.address_callback = handle_midi_msg;
ff->async_handler.callback_data = ff;
@@ -236,35 +165,13 @@ static int allocate_own_address(struct snd_ff *ff, int i)
return err;
}
-/*
- * Controllers are allowed to register higher 4 bytes of address to receive
- * the transactions. Different models have different registers for this purpose;
- * e.g. 0x'0000'8010'03f4 for Fireface 400.
- * The controllers are not allowed to register lower 4 bytes of the address.
- * They are forced to select one of 4 options for the part of address by writing
- * corresponding bits to 0x'0000'8010'051f.
- *
- * The 3rd-6th bits of this register are flags to indicate lower 4 bytes of
- * address to which the device transferrs the transactions. In short:
- * - 0x20: 0x'....'....'0000'0180
- * - 0x10: 0x'....'....'0000'0100
- * - 0x08: 0x'....'....'0000'0080
- * - 0x04: 0x'....'....'0000'0000
- *
- * This driver configure 0x'....'....'0000'0000 to receive MIDI messages from
- * units. The 3rd bit of the register should be configured, however this driver
- * deligates this task to userspace applications due to a restriction that this
- * register is write-only and the other bits have own effects.
- *
- * Unlike Fireface 800, Fireface 400 cancels transferring asynchronous
- * transactions when the 1st and 2nd of the register stand. These two bits have
- * the same effect.
- * - 0x02, 0x01: cancel transferring
- *
- * On the other hand, the bits have no effect on Fireface 800. This model
- * cancels asynchronous transactions when the higher 4 bytes of address is
- * overwritten with zero.
- */
+// Controllers are allowed to register higher 4 bytes of destination address to
+// receive asynchronous transactions for MIDI messages, while the way to
+// register lower 4 bytes of address is different depending on protocols. For
+// details, please refer to comments in protocol implementations.
+//
+// This driver expects userspace applications to configure registers for the
+// lower address because in most cases such registers has the other settings.
int snd_ff_transaction_reregister(struct snd_ff *ff)
{
struct fw_card *fw_card = fw_parent_device(ff->unit)->card;
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index 36575f4159d1..a9611157f4c8 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -153,6 +153,8 @@ static const struct snd_ff_spec spec_ff800 = {
.midi_out_ports = 1,
.protocol = &snd_ff_protocol_ff800,
.midi_high_addr = 0x000200000320ull,
+ .midi_addr_range = 12,
+ .midi_rx_addrs = {0x000080180000ull, 0},
};
static const struct snd_ff_spec spec_ff400 = {
@@ -163,6 +165,20 @@ static const struct snd_ff_spec spec_ff400 = {
.midi_out_ports = 2,
.protocol = &snd_ff_protocol_ff400,
.midi_high_addr = 0x0000801003f4ull,
+ .midi_addr_range = SND_FF_MAXIMIM_MIDI_QUADS * 4,
+ .midi_rx_addrs = {0x000080180000ull, 0x000080190000ull},
+};
+
+static const struct snd_ff_spec spec_ucx = {
+ .name = "FirefaceUCX",
+ .pcm_capture_channels = {18, 14, 12},
+ .pcm_playback_channels = {18, 14, 12},
+ .midi_in_ports = 2,
+ .midi_out_ports = 2,
+ .protocol = &snd_ff_protocol_latter,
+ .midi_high_addr = 0xffff00000034ull,
+ .midi_addr_range = 0x80,
+ .midi_rx_addrs = {0xffff00000030ull, 0xffff00000030ull},
};
static const struct ieee1394_device_id snd_ff_id_table[] = {
@@ -190,6 +206,18 @@ static const struct ieee1394_device_id snd_ff_id_table[] = {
.model_id = 0x101800,
.driver_data = (kernel_ulong_t)&spec_ff400,
},
+ // Fireface UCX.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_RME,
+ .specifier_id = OUI_RME,
+ .version = 0x000004,
+ .model_id = 0x101800,
+ .driver_data = (kernel_ulong_t)&spec_ucx,
+ },
{}
};
MODULE_DEVICE_TABLE(ieee1394, snd_ff_id_table);
diff --git a/sound/firewire/fireface/ff.h b/sound/firewire/fireface/ff.h
index 7dfc7745a914..ed8fea0ff5e1 100644
--- a/sound/firewire/fireface/ff.h
+++ b/sound/firewire/fireface/ff.h
@@ -35,11 +35,6 @@
#define SND_FF_IN_MIDI_PORTS 2
#define SND_FF_OUT_MIDI_PORTS 2
-#define SND_FF_REG_SYNC_STATUS 0x0000801c0000ull
-/* For block write request. */
-#define SND_FF_REG_FETCH_PCM_FRAMES 0x0000801c0000ull
-#define SND_FF_REG_CLOCK_CONFIG 0x0000801c0004ull
-
enum snd_ff_stream_mode {
SND_FF_STREAM_MODE_LOW = 0,
SND_FF_STREAM_MODE_MID,
@@ -59,6 +54,8 @@ struct snd_ff_spec {
const struct snd_ff_protocol *protocol;
u64 midi_high_addr;
+ u8 midi_addr_range;
+ u64 midi_rx_addrs[SND_FF_OUT_MIDI_PORTS];
};
struct snd_ff {
@@ -78,7 +75,7 @@ struct snd_ff {
/* TO handle MIDI rx. */
struct snd_rawmidi_substream *rx_midi_substreams[SND_FF_OUT_MIDI_PORTS];
- u8 running_status[SND_FF_OUT_MIDI_PORTS];
+ bool on_sysex[SND_FF_OUT_MIDI_PORTS];
__le32 msg_buf[SND_FF_OUT_MIDI_PORTS][SND_FF_MAXIMIM_MIDI_QUADS];
struct work_struct rx_midi_work[SND_FF_OUT_MIDI_PORTS];
struct fw_transaction transactions[SND_FF_OUT_MIDI_PORTS];
@@ -108,16 +105,23 @@ enum snd_ff_clock_src {
};
struct snd_ff_protocol {
- void (*handle_midi_msg)(struct snd_ff *ff, __le32 *buf, size_t length);
+ void (*handle_midi_msg)(struct snd_ff *ff, unsigned int offset,
+ __le32 *buf, size_t length);
+ int (*fill_midi_msg)(struct snd_ff *ff,
+ struct snd_rawmidi_substream *substream,
+ unsigned int port);
+ int (*get_clock)(struct snd_ff *ff, unsigned int *rate,
+ enum snd_ff_clock_src *src);
+ int (*switch_fetching_mode)(struct snd_ff *ff, bool enable);
int (*begin_session)(struct snd_ff *ff, unsigned int rate);
void (*finish_session)(struct snd_ff *ff);
+ void (*dump_status)(struct snd_ff *ff, struct snd_info_buffer *buffer);
};
extern const struct snd_ff_protocol snd_ff_protocol_ff800;
extern const struct snd_ff_protocol snd_ff_protocol_ff400;
+extern const struct snd_ff_protocol snd_ff_protocol_latter;
-int snd_ff_transaction_get_clock(struct snd_ff *ff, unsigned int *rate,
- enum snd_ff_clock_src *src);
int snd_ff_transaction_register(struct snd_ff *ff);
int snd_ff_transaction_reregister(struct snd_ff *ff);
void snd_ff_transaction_unregister(struct snd_ff *ff);
@@ -142,6 +146,7 @@ int snd_ff_stream_lock_try(struct snd_ff *ff);
void snd_ff_stream_lock_release(struct snd_ff *ff);
void snd_ff_proc_init(struct snd_ff *ff);
+const char *snd_ff_proc_get_clk_label(enum snd_ff_clock_src src);
int snd_ff_create_midi_devices(struct snd_ff *ff);
diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
index 779ecec5af62..9fa5c34a9572 100644
--- a/sound/firewire/fireworks/fireworks_proc.c
+++ b/sound/firewire/fireworks/fireworks_proc.c
@@ -199,12 +199,8 @@ add_node(struct snd_efw *efw, struct snd_info_entry *root, const char *name,
struct snd_info_entry *entry;
entry = snd_info_create_card_entry(efw->card, name, root);
- if (entry == NULL)
- return;
-
- snd_info_set_text_ops(entry, efw, op);
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
+ if (entry)
+ snd_info_set_text_ops(entry, efw, op);
}
void snd_efw_proc_init(struct snd_efw *efw)
@@ -220,10 +216,6 @@ void snd_efw_proc_init(struct snd_efw *efw)
if (root == NULL)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
add_node(efw, root, "clock", proc_read_clock);
add_node(efw, root, "firmware", proc_read_hwinfo);
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index f0555a24d90e..6c9b743ea74b 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -136,7 +136,9 @@ static void read_pcm_s32(struct amdtp_stream *s,
byte = (u8 *)buffer + p->pcm_byte_offset;
for (c = 0; c < channels; ++c) {
- *dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
+ *dst = (byte[0] << 24) |
+ (byte[1] << 16) |
+ (byte[2] << 8);
byte += 3;
dst++;
}
diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c
index ab6830a6d242..94327853620a 100644
--- a/sound/firewire/motu/motu-proc.c
+++ b/sound/firewire/motu/motu-proc.c
@@ -87,12 +87,8 @@ static void add_node(struct snd_motu *motu, struct snd_info_entry *root,
struct snd_info_entry *entry;
entry = snd_info_create_card_entry(motu->card, name, root);
- if (entry == NULL)
- return;
-
- snd_info_set_text_ops(entry, motu, op);
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
+ if (entry)
+ snd_info_set_text_ops(entry, motu, op);
}
void snd_motu_proc_init(struct snd_motu *motu)
@@ -108,10 +104,6 @@ void snd_motu_proc_init(struct snd_motu *motu)
if (root == NULL)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
add_node(motu, root, "clock", proc_read_clock);
add_node(motu, root, "format", proc_read_format);
diff --git a/sound/firewire/oxfw/oxfw-proc.c b/sound/firewire/oxfw/oxfw-proc.c
index 27dac071bc73..644107e3782e 100644
--- a/sound/firewire/oxfw/oxfw-proc.c
+++ b/sound/firewire/oxfw/oxfw-proc.c
@@ -83,12 +83,8 @@ static void add_node(struct snd_oxfw *oxfw, struct snd_info_entry *root,
struct snd_info_entry *entry;
entry = snd_info_create_card_entry(oxfw->card, name, root);
- if (entry == NULL)
- return;
-
- snd_info_set_text_ops(entry, oxfw, op);
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
+ if (entry)
+ snd_info_set_text_ops(entry, oxfw, op);
}
void snd_oxfw_proc_init(struct snd_oxfw *oxfw)
@@ -104,10 +100,6 @@ void snd_oxfw_proc_init(struct snd_oxfw *oxfw)
if (root == NULL)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
add_node(oxfw, root, "formation", proc_read_formation);
}
diff --git a/sound/firewire/tascam/tascam-proc.c b/sound/firewire/tascam/tascam-proc.c
index fee3bf32a0da..8bc8d277394a 100644
--- a/sound/firewire/tascam/tascam-proc.c
+++ b/sound/firewire/tascam/tascam-proc.c
@@ -58,12 +58,8 @@ static void add_node(struct snd_tscm *tscm, struct snd_info_entry *root,
struct snd_info_entry *entry;
entry = snd_info_create_card_entry(tscm->card, name, root);
- if (entry == NULL)
- return;
-
- snd_info_set_text_ops(entry, tscm, op);
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
+ if (entry)
+ snd_info_set_text_ops(entry, tscm, op);
}
void snd_tscm_proc_init(struct snd_tscm *tscm)
@@ -79,10 +75,6 @@ void snd_tscm_proc_init(struct snd_tscm *tscm)
if (root == NULL)
return;
root->mode = S_IFDIR | 0555;
- if (snd_info_register(root) < 0) {
- snd_info_free_entry(root);
- return;
- }
add_node(tscm, root, "firmware", proc_read_firmware);
}
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
index a6d37b9d6413..5c95933e739a 100644
--- a/sound/hda/hdac_component.c
+++ b/sound/hda/hdac_component.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_acomp_register_notifier);
*/
int snd_hdac_acomp_init(struct hdac_bus *bus,
const struct drm_audio_component_audio_ops *aops,
- int (*match_master)(struct device *, void *),
+ int (*match_master)(struct device *, int, void *),
size_t extra_size)
{
struct component_match *match = NULL;
@@ -288,7 +288,7 @@ int snd_hdac_acomp_init(struct hdac_bus *bus,
bus->audio_component = acomp;
devres_add(dev, acomp);
- component_match_add(dev, &match, match_master, bus);
+ component_match_add_typed(dev, &match, match_master, bus);
ret = component_master_add_with_match(dev, &hdac_component_master_ops,
match);
if (ret < 0)
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 74244d8e2909..b2e9454f5816 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -376,7 +376,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
{
unsigned long timeout;
- snd_hdac_chip_updateb(bus, GCTL, 0, AZX_GCTL_RESET);
+ snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET);
timeout = jiffies + msecs_to_jiffies(100);
while (!snd_hdac_chip_readb(bus, GCTL) && time_before(jiffies, timeout))
@@ -415,7 +415,7 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
}
/* Accept unsolicited responses */
- snd_hdac_chip_updatel(bus, GCTL, 0, AZX_GCTL_UNSOL);
+ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
/* detect codecs */
if (!bus->codec_mask) {
@@ -431,7 +431,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
static void azx_int_enable(struct hdac_bus *bus)
{
/* enable controller CIE and GIE */
- snd_hdac_chip_updatel(bus, INTCTL, 0, AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
+ snd_hdac_chip_updatel(bus, INTCTL,
+ AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN,
+ AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
}
/* disable interrupts */
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 617ff1aa818f..575198bd3cd0 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -82,9 +82,11 @@ void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
}
EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
-static int i915_component_master_match(struct device *dev, void *data)
+static int i915_component_master_match(struct device *dev, int subcomponent,
+ void *data)
{
- return !strcmp(dev->driver->name, "i915");
+ return !strcmp(dev->driver->name, "i915") &&
+ subcomponent == I915_COMPONENT_AUDIO;
}
/* check whether intel graphics is present */
@@ -144,9 +146,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
return -ENODEV;
if (!acomp->ops) {
request_module("i915");
- /* 10s timeout */
+ /* 60s timeout */
wait_for_completion_timeout(&bind_complete,
- msecs_to_jiffies(10 * 1000));
+ msecs_to_jiffies(60 * 1000));
}
if (!acomp->ops) {
dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index eee422390d8e..f5dd288d1a7a 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -13,6 +13,40 @@
#include "trace.h"
/**
+ * snd_hdac_get_stream_stripe_ctl - get stripe control value
+ * @bus: HD-audio core bus
+ * @substream: PCM substream
+ */
+int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned int channels = runtime->channels,
+ rate = runtime->rate,
+ bits_per_sample = runtime->sample_bits,
+ max_sdo_lines, value, sdo_line;
+
+ /* T_AZA_GCAP_NSDO is 1:2 bitfields in GCAP */
+ max_sdo_lines = snd_hdac_chip_readl(bus, GCAP) & AZX_GCAP_NSDO;
+
+ /* following is from HD audio spec */
+ for (sdo_line = max_sdo_lines; sdo_line > 0; sdo_line >>= 1) {
+ if (rate > 48000)
+ value = (channels * bits_per_sample *
+ (rate / 48000)) / sdo_line;
+ else
+ value = (channels * bits_per_sample) / sdo_line;
+
+ if (value >= 8)
+ break;
+ }
+
+ /* stripe value: 0 for 1SDO, 1 for 2SDO, 2 for 4SDO lines */
+ return sdo_line >> 1;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_get_stream_stripe_ctl);
+
+/**
* snd_hdac_stream_init - initialize each stream (aka device)
* @bus: HD-audio core bus
* @azx_dev: HD-audio core stream object to initialize
@@ -48,6 +82,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_stream_init);
void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start)
{
struct hdac_bus *bus = azx_dev->bus;
+ int stripe_ctl;
trace_snd_hdac_stream_start(bus, azx_dev);
@@ -56,7 +91,13 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start)
azx_dev->start_wallclk -= azx_dev->period_wallclk;
/* enable SIE */
- snd_hdac_chip_updatel(bus, INTCTL, 0, 1 << azx_dev->index);
+ snd_hdac_chip_updatel(bus, INTCTL,
+ 1 << azx_dev->index,
+ 1 << azx_dev->index);
+ /* set stripe control */
+ stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream);
+ snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK,
+ stripe_ctl);
/* set DMA start and interrupt mask */
snd_hdac_stream_updateb(azx_dev, SD_CTL,
0, SD_CTL_DMA_START | SD_INT_MASK);
@@ -73,6 +114,7 @@ void snd_hdac_stream_clear(struct hdac_stream *azx_dev)
snd_hdac_stream_updateb(azx_dev, SD_CTL,
SD_CTL_DMA_START | SD_INT_MASK, 0);
snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
+ snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0);
azx_dev->running = false;
}
EXPORT_SYMBOL_GPL(snd_hdac_stream_clear);
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 4099e6062d3c..573599d0378d 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -492,9 +492,8 @@ static void snd_ak4113_proc_regs_read(struct snd_info_entry *entry,
static void snd_ak4113_proc_init(struct ak4113 *ak4113)
{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(ak4113->card, "ak4113", &entry))
- snd_info_set_text_ops(entry, ak4113, snd_ak4113_proc_regs_read);
+ snd_card_ro_proc_new(ak4113->card, "ak4113", ak4113,
+ snd_ak4113_proc_regs_read);
}
int snd_ak4113_build(struct ak4113 *ak4113,
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 7fb1aeb46915..76afb975782d 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -465,9 +465,8 @@ static void snd_ak4114_proc_regs_read(struct snd_info_entry *entry,
static void snd_ak4114_proc_init(struct ak4114 *ak4114)
{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(ak4114->card, "ak4114", &entry))
- snd_info_set_text_ops(entry, ak4114, snd_ak4114_proc_regs_read);
+ snd_card_ro_proc_new(ak4114->card, "ak4114", ak4114,
+ snd_ak4114_proc_regs_read);
}
int snd_ak4114_build(struct ak4114 *ak4114,
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index 7f2761a2e7c8..62a6c5fa96b5 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -875,13 +875,7 @@ static void proc_regs_read(struct snd_info_entry *entry,
static int proc_init(struct snd_akm4xxx *ak)
{
- struct snd_info_entry *entry;
- int err;
- err = snd_card_proc_new(ak->card, ak->name, &entry);
- if (err < 0)
- return err;
- snd_info_set_text_ops(entry, ak, proc_regs_read);
- return 0;
+ return snd_card_ro_proc_new(ak->card, ak->name, ak, proc_regs_read);
}
int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index fba6d22f7f4b..94b381a78e9e 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -518,7 +518,6 @@ void snd_ad1816a_suspend(struct snd_ad1816a *chip)
int reg;
unsigned long flags;
- snd_pcm_suspend_all(chip->pcm);
spin_lock_irqsave(&chip->lock, flags);
for (reg = 0; reg < 48; reg++)
chip->image[reg] = snd_ad1816a_read(chip, reg);
@@ -694,7 +693,7 @@ int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device)
snd_ad1816a_init(chip);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ chip->card->dev,
64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
chip->pcm = pcm;
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index f63142ec287e..571108021e9d 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -322,7 +322,6 @@ static int snd_als100_pnp_suspend(struct pnp_card_link *pcard, pm_message_t stat
struct snd_sb *chip = acard->chip;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_sbmixer_suspend(chip);
return 0;
}
diff --git a/sound/isa/cmi8328.c b/sound/isa/cmi8328.c
index de6ef1b1cf0e..617977516201 100644
--- a/sound/isa/cmi8328.c
+++ b/sound/isa/cmi8328.c
@@ -434,7 +434,6 @@ static int snd_cmi8328_suspend(struct device *pdev, unsigned int n,
cmi = card->private_data;
snd_cmi8328_cfg_save(cmi->port, cmi->cfg);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(cmi->wss->pcm);
cmi->wss->suspend(cmi->wss);
return 0;
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index 6b8c46942efb..1868b73aa49c 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -470,7 +470,7 @@ static int snd_cmi8330_pcm(struct snd_card *card, struct snd_cmi8330 *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &chip->streams[SNDRV_PCM_STREAM_CAPTURE].ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ card->dev,
64*1024, 128*1024);
chip->pcm = pcm;
@@ -484,7 +484,6 @@ static int snd_cmi8330_suspend(struct snd_card *card)
struct snd_cmi8330 *acard = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(acard->pcm);
acard->wss->suspend(acard->wss);
snd_sbmixer_suspend(acard->sb);
return 0;
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index 3dfe7e592c25..87527627e059 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -301,10 +301,8 @@ static int snd_es968_pnp_suspend(struct pnp_card_link *pcard,
pm_message_t state)
{
struct snd_card *card = pnp_get_card_drvdata(pcard);
- struct snd_es1688 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
return 0;
}
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index 50cdce0e8946..1d9556c045e9 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -121,7 +121,7 @@ EXPORT_SYMBOL(snd_es1688_reset);
static int snd_es1688_probe(struct snd_es1688 *chip)
{
unsigned long flags;
- unsigned short major, minor, hw;
+ unsigned short major, minor;
int i;
/*
@@ -166,14 +166,12 @@ static int snd_es1688_probe(struct snd_es1688 *chip)
if (!chip->version)
return -ENODEV; /* probably SB */
- hw = ES1688_HW_AUTO;
switch (chip->version & 0xfff0) {
case 0x4880:
snd_printk(KERN_ERR "[0x%lx] ESS: AudioDrive ES488 detected, "
"but driver is in another place\n", chip->port);
return -ENODEV;
case 0x6880:
- hw = (chip->version & 0x0f) >= 8 ? ES1688_HW_1688 : ES1688_HW_688;
break;
default:
snd_printk(KERN_ERR "[0x%lx] ESS: unknown AudioDrive chip "
@@ -746,7 +744,7 @@ int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ card->dev,
64*1024, 64*1024);
return 0;
}
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index 0d103d6f805e..07abc7f7840c 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -1717,7 +1717,7 @@ static int snd_es18xx_pcm(struct snd_card *card, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ card->dev,
64*1024,
chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
return 0;
@@ -1731,8 +1731,6 @@ static int snd_es18xx_suspend(struct snd_card *card, pm_message_t state)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
-
/* power down */
chip->pm_reg = (unsigned char)snd_es18xx_read(chip, ES18XX_PM);
chip->pm_reg |= (ES18XX_PM_FM | ES18XX_PM_SUS);
diff --git a/sound/isa/gus/gus_irq.c b/sound/isa/gus/gus_irq.c
index 2055aff71b50..0ca6c38e2ed9 100644
--- a/sound/isa/gus/gus_irq.c
+++ b/sound/isa/gus/gus_irq.c
@@ -140,10 +140,7 @@ static void snd_gus_irq_info_read(struct snd_info_entry *entry,
void snd_gus_irq_profile_init(struct snd_gus_card *gus)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(gus->card, "gusirq", &entry))
- snd_info_set_text_ops(entry, gus, snd_gus_irq_info_read);
+ snd_card_ro_proc_new(gus->card, "gusirq", gus, snd_gus_irq_info_read);
}
#endif
diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
index 3b8a0c880db5..33c8b66d5c8a 100644
--- a/sound/isa/gus/gus_main.c
+++ b/sound/isa/gus/gus_main.c
@@ -92,8 +92,17 @@ static const struct snd_kcontrol_new snd_gus_joystick_control = {
static void snd_gus_init_control(struct snd_gus_card *gus)
{
- if (!gus->ace_flag)
- snd_ctl_add(gus->card, snd_ctl_new1(&snd_gus_joystick_control, gus));
+ int ret;
+
+ if (!gus->ace_flag) {
+ ret =
+ snd_ctl_add(gus->card,
+ snd_ctl_new1(&snd_gus_joystick_control,
+ gus));
+ if (ret)
+ snd_printk(KERN_ERR "gus: snd_ctl_add failed: %d\n",
+ ret);
+ }
}
/*
diff --git a/sound/isa/gus/gus_mem.c b/sound/isa/gus/gus_mem.c
index af888a022fc0..4ac76f46dd76 100644
--- a/sound/isa/gus/gus_mem.c
+++ b/sound/isa/gus/gus_mem.c
@@ -238,9 +238,6 @@ int snd_gf1_mem_init(struct snd_gus_card * gus)
{
struct snd_gf1_mem *alloc;
struct snd_gf1_mem_block block;
-#ifdef CONFIG_SND_DEBUG
- struct snd_info_entry *entry;
-#endif
alloc = &gus->gf1.mem_alloc;
mutex_init(&alloc->memory_mutex);
@@ -263,8 +260,7 @@ int snd_gf1_mem_init(struct snd_gus_card * gus)
if (snd_gf1_mem_xalloc(alloc, &block) == NULL)
return -ENOMEM;
#ifdef CONFIG_SND_DEBUG
- if (! snd_card_proc_new(gus->card, "gusmem", &entry))
- snd_info_set_text_ops(entry, gus, snd_gf1_mem_info_read);
+ snd_card_ro_proc_new(gus->card, "gusmem", gus, snd_gf1_mem_info_read);
#endif
return 0;
}
diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c
index 131b28997e1d..b9efc6dff45d 100644
--- a/sound/isa/gus/gus_pcm.c
+++ b/sound/isa/gus/gus_pcm.c
@@ -891,7 +891,7 @@ int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index)
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ card->dev,
64*1024, gus->gf1.dma1 > 3 ? 128*1024 : 64*1024);
pcm->info_flags = 0;
@@ -901,7 +901,7 @@ int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index)
if (gus->gf1.dma2 == gus->gf1.dma1)
pcm->info_flags |= SNDRV_PCM_INFO_HALF_DUPLEX;
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
- SNDRV_DMA_TYPE_DEV, snd_dma_isa_data(),
+ SNDRV_DMA_TYPE_DEV, card->dev,
64*1024, gus->gf1.dma2 > 3 ? 128*1024 : 64*1024);
}
strcpy(pcm->name, pcm->id);
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index c6136c6b0214..997cdfd7b1ea 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -997,10 +997,7 @@ static void snd_miro_proc_read(struct snd_info_entry * entry,
static void snd_miro_proc_init(struct snd_card *card,
struct snd_miro *miro)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(card, "miro", &entry))
- snd_info_set_text_ops(entry, miro, snd_miro_proc_read);
+ snd_card_ro_proc_new(card, "miro", miro, snd_miro_proc_read);
}
/*
diff --git a/sound/isa/sb/jazz16.c b/sound/isa/sb/jazz16.c
index bfa0055e1fd6..7a313ff589c7 100644
--- a/sound/isa/sb/jazz16.c
+++ b/sound/isa/sb/jazz16.c
@@ -356,7 +356,6 @@ static int snd_jazz16_suspend(struct device *pdev, unsigned int n,
struct snd_sb *chip = acard->chip;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_sbmixer_suspend(chip);
return 0;
}
diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c
index 8f9ebeb998f6..3844d4c02f49 100644
--- a/sound/isa/sb/sb16.c
+++ b/sound/isa/sb/sb16.c
@@ -471,7 +471,6 @@ static int snd_sb16_suspend(struct snd_card *card, pm_message_t state)
struct snd_sb *chip = acard->chip;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_sbmixer_suspend(chip);
return 0;
}
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index bf3db0d2ea12..a09ad57b8313 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -1126,10 +1126,9 @@ static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p)
static int init_proc_entry(struct snd_sb_csp * p, int device)
{
char name[16];
- struct snd_info_entry *entry;
+
sprintf(name, "cspD%d", device);
- if (! snd_card_proc_new(p->chip->card, name, &entry))
- snd_info_set_text_ops(entry, p, info_read);
+ snd_card_ro_proc_new(p->chip->card, name, p, info_read);
return 0;
}
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index 37e6ce7b0b13..473ec74ae48c 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -879,13 +879,17 @@ int snd_sb16dsp_pcm(struct snd_sb *chip, int device)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops);
- if (chip->dma16 >= 0 && chip->dma8 != chip->dma16)
- snd_ctl_add(card, snd_ctl_new1(&snd_sb16_dma_control, chip));
- else
+ if (chip->dma16 >= 0 && chip->dma8 != chip->dma16) {
+ err = snd_ctl_add(card, snd_ctl_new1(
+ &snd_sb16_dma_control, chip));
+ if (err)
+ return err;
+ } else {
pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
+ }
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ card->dev,
64*1024, 128*1024);
return 0;
}
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index d77dcba276b5..aa2a83eb81a9 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -218,7 +218,6 @@ static int snd_sb8_suspend(struct device *dev, unsigned int n,
struct snd_sb *chip = acard->chip;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_sbmixer_suspend(chip);
return 0;
}
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index 8288fae90085..97645a732a71 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -610,7 +610,7 @@ int snd_sb8dsp_pcm(struct snd_sb *chip, int device)
if (chip->dma8 > 3 || chip->dma16 >= 0)
max_prealloc = 128 * 1024;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ card->dev,
64*1024, max_prealloc);
return 0;
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 733adee5afbf..8181db4db019 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -167,12 +167,13 @@ static inline struct soundscape *get_card_soundscape(struct snd_card *c)
* I think this means that the memory has to map to
* contiguous pages of physical memory.
*/
-static struct snd_dma_buffer *get_dmabuf(struct snd_dma_buffer *buf,
+static struct snd_dma_buffer *get_dmabuf(struct soundscape *s,
+ struct snd_dma_buffer *buf,
unsigned long size)
{
if (buf) {
if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ s->chip->card->dev,
size, buf) < 0) {
snd_printk(KERN_ERR "sscape: Failed to allocate "
"%lu bytes for DMA\n",
@@ -443,7 +444,7 @@ static int upload_dma_data(struct soundscape *s, const unsigned char *data,
int ret;
unsigned char val;
- if (!get_dmabuf(&dma, PAGE_ALIGN(32 * 1024)))
+ if (!get_dmabuf(s, &dma, PAGE_ALIGN(32 * 1024)))
return -ENOMEM;
spin_lock_irqsave(&s->lock, flags);
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 3a5008837576..0dfb8065b403 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1625,7 +1625,6 @@ static void snd_wss_suspend(struct snd_wss *chip)
int reg;
unsigned long flags;
- snd_pcm_suspend_all(chip->pcm);
spin_lock_irqsave(&chip->reg_lock, flags);
for (reg = 0; reg < 32; reg++)
chip->image[reg] = snd_wss_in(chip, reg);
@@ -1943,7 +1942,7 @@ int snd_wss_pcm(struct snd_wss *chip, int device)
strcpy(pcm->name, snd_wss_chip_id(chip));
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_isa_data(),
+ chip->card->dev,
64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
chip->pcm = pcm;
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index a4ed54aeaf1d..d63e1565b62b 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -454,21 +454,22 @@ static inline void hal2_stop_adc(struct snd_hal2 *hal2)
hal2->adc.pbus.pbus->pbdma_ctrl = HPC3_PDMACTRL_LD;
}
-static int hal2_alloc_dmabuf(struct hal2_codec *codec)
+static int hal2_alloc_dmabuf(struct snd_hal2 *hal2, struct hal2_codec *codec)
{
+ struct device *dev = hal2->card->dev;
struct hal2_desc *desc;
dma_addr_t desc_dma, buffer_dma;
int count = H2_BUF_SIZE / H2_BLOCK_SIZE;
int i;
- codec->buffer = dma_alloc_attrs(NULL, H2_BUF_SIZE, &buffer_dma,
+ codec->buffer = dma_alloc_attrs(dev, H2_BUF_SIZE, &buffer_dma,
GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if (!codec->buffer)
return -ENOMEM;
- desc = dma_alloc_attrs(NULL, count * sizeof(struct hal2_desc),
+ desc = dma_alloc_attrs(dev, count * sizeof(struct hal2_desc),
&desc_dma, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if (!desc) {
- dma_free_attrs(NULL, H2_BUF_SIZE, codec->buffer, buffer_dma,
+ dma_free_attrs(dev, H2_BUF_SIZE, codec->buffer, buffer_dma,
DMA_ATTR_NON_CONSISTENT);
return -ENOMEM;
}
@@ -482,17 +483,19 @@ static int hal2_alloc_dmabuf(struct hal2_codec *codec)
desc_dma : desc_dma + (i + 1) * sizeof(struct hal2_desc);
desc++;
}
- dma_cache_sync(NULL, codec->desc, count * sizeof(struct hal2_desc),
+ dma_cache_sync(dev, codec->desc, count * sizeof(struct hal2_desc),
DMA_TO_DEVICE);
codec->desc_count = count;
return 0;
}
-static void hal2_free_dmabuf(struct hal2_codec *codec)
+static void hal2_free_dmabuf(struct snd_hal2 *hal2, struct hal2_codec *codec)
{
- dma_free_attrs(NULL, codec->desc_count * sizeof(struct hal2_desc),
+ struct device *dev = hal2->card->dev;
+
+ dma_free_attrs(dev, codec->desc_count * sizeof(struct hal2_desc),
codec->desc, codec->desc_dma, DMA_ATTR_NON_CONSISTENT);
- dma_free_attrs(NULL, H2_BUF_SIZE, codec->buffer, codec->buffer_dma,
+ dma_free_attrs(dev, H2_BUF_SIZE, codec->buffer, codec->buffer_dma,
DMA_ATTR_NON_CONSISTENT);
}
@@ -540,7 +543,7 @@ static int hal2_playback_open(struct snd_pcm_substream *substream)
runtime->hw = hal2_pcm_hw;
- err = hal2_alloc_dmabuf(&hal2->dac);
+ err = hal2_alloc_dmabuf(hal2, &hal2->dac);
if (err)
return err;
return 0;
@@ -550,7 +553,7 @@ static int hal2_playback_close(struct snd_pcm_substream *substream)
{
struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream);
- hal2_free_dmabuf(&hal2->dac);
+ hal2_free_dmabuf(hal2, &hal2->dac);
return 0;
}
@@ -606,7 +609,7 @@ static void hal2_playback_transfer(struct snd_pcm_substream *substream,
unsigned char *buf = hal2->dac.buffer + rec->hw_data;
memcpy(buf, substream->runtime->dma_area + rec->sw_data, bytes);
- dma_cache_sync(NULL, buf, bytes, DMA_TO_DEVICE);
+ dma_cache_sync(hal2->card->dev, buf, bytes, DMA_TO_DEVICE);
}
@@ -629,7 +632,7 @@ static int hal2_capture_open(struct snd_pcm_substream *substream)
runtime->hw = hal2_pcm_hw;
- err = hal2_alloc_dmabuf(adc);
+ err = hal2_alloc_dmabuf(hal2, adc);
if (err)
return err;
return 0;
@@ -639,7 +642,7 @@ static int hal2_capture_close(struct snd_pcm_substream *substream)
{
struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream);
- hal2_free_dmabuf(&hal2->adc);
+ hal2_free_dmabuf(hal2, &hal2->adc);
return 0;
}
@@ -694,7 +697,7 @@ static void hal2_capture_transfer(struct snd_pcm_substream *substream,
struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream);
unsigned char *buf = hal2->adc.buffer + rec->hw_data;
- dma_cache_sync(NULL, buf, bytes, DMA_FROM_DEVICE);
+ dma_cache_sync(hal2->card->dev, buf, bytes, DMA_FROM_DEVICE);
memcpy(substream->runtime->dma_area + rec->sw_data, buf, bytes);
}
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 3ec9391a4736..53a4ee01c522 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -805,7 +805,7 @@ static int snd_sgio2audio_free(struct snd_sgio2audio *chip)
free_irq(snd_sgio2_isr_table[i].irq,
&chip->channel[snd_sgio2_isr_table[i].idx]);
- dma_free_coherent(NULL, MACEISA_RINGBUFFERS_SIZE,
+ dma_free_coherent(chip->card->dev, MACEISA_RINGBUFFERS_SIZE,
chip->ring_base, chip->ring_base_dma);
/* release card data */
@@ -843,8 +843,9 @@ static int snd_sgio2audio_create(struct snd_card *card,
chip->card = card;
- chip->ring_base = dma_alloc_coherent(NULL, MACEISA_RINGBUFFERS_SIZE,
- &chip->ring_base_dma, GFP_USER);
+ chip->ring_base = dma_alloc_coherent(card->dev,
+ MACEISA_RINGBUFFERS_SIZE,
+ &chip->ring_base_dma, GFP_KERNEL);
if (chip->ring_base == NULL) {
printk(KERN_ERR
"sgio2audio: could not allocate ring buffers\n");
diff --git a/sound/parisc/harmony.c b/sound/parisc/harmony.c
index f36e7006e00c..a4264b8943f0 100644
--- a/sound/parisc/harmony.c
+++ b/sound/parisc/harmony.c
@@ -669,14 +669,8 @@ snd_harmony_pcm_init(struct snd_harmony *h)
}
/* pre-allocate space for DMA */
- err = snd_pcm_lib_preallocate_pages_for_all(pcm, h->dma.type,
- h->dma.dev,
- MAX_BUF_SIZE,
- MAX_BUF_SIZE);
- if (err < 0) {
- printk(KERN_ERR PFX "buffer allocation error: %d\n", err);
- return err;
- }
+ snd_pcm_lib_preallocate_pages_for_all(pcm, h->dma.type, h->dma.dev,
+ MAX_BUF_SIZE, MAX_BUF_SIZE);
h->st.format = snd_harmony_set_data_format(h,
SNDRV_PCM_FORMAT_S16_BE, 1);
diff --git a/sound/pci/ac97/ac97_proc.c b/sound/pci/ac97/ac97_proc.c
index e120a11c69e8..20516b6907b5 100644
--- a/sound/pci/ac97/ac97_proc.c
+++ b/sound/pci/ac97/ac97_proc.c
@@ -436,25 +436,20 @@ void snd_ac97_proc_init(struct snd_ac97 * ac97)
return;
prefix = ac97_is_audio(ac97) ? "ac97" : "mc97";
sprintf(name, "%s#%d-%d", prefix, ac97->addr, ac97->num);
- if ((entry = snd_info_create_card_entry(ac97->bus->card, name, ac97->bus->proc)) != NULL) {
+ entry = snd_info_create_card_entry(ac97->bus->card, name,
+ ac97->bus->proc);
+ if (entry)
snd_info_set_text_ops(entry, ac97, snd_ac97_proc_read);
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
ac97->proc = entry;
sprintf(name, "%s#%d-%d+regs", prefix, ac97->addr, ac97->num);
- if ((entry = snd_info_create_card_entry(ac97->bus->card, name, ac97->bus->proc)) != NULL) {
+ entry = snd_info_create_card_entry(ac97->bus->card, name,
+ ac97->bus->proc);
+ if (entry) {
snd_info_set_text_ops(entry, ac97, snd_ac97_proc_regs_read);
#ifdef CONFIG_SND_DEBUG
entry->mode |= 0200;
entry->c.text.write = snd_ac97_proc_regs_write;
#endif
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
}
ac97->proc_regs = entry;
}
@@ -473,13 +468,10 @@ void snd_ac97_bus_proc_init(struct snd_ac97_bus * bus)
char name[32];
sprintf(name, "codec97#%d", bus->num);
- if ((entry = snd_info_create_card_entry(bus->card, name, bus->card->proc_root)) != NULL) {
+ entry = snd_info_create_card_entry(bus->card, name,
+ bus->card->proc_root);
+ if (entry)
entry->mode = S_IFDIR | 0555;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
bus->proc = entry;
}
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index d9c54c08e2db..fef07ae648e6 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -644,16 +644,11 @@ snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device)
chip->psubs = NULL;
chip->csubs = NULL;
- err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
BUFFER_BYTES_MAX / 2,
BUFFER_BYTES_MAX);
- if (err < 0) {
- dev_err(chip->card->dev, "buffer allocation error: %d\n", err);
- return err;
- }
-
return 0;
}
@@ -741,10 +736,8 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
static void
snd_ad1889_proc_init(struct snd_ad1889 *chip)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(chip->card, chip->card->driver, &entry))
- snd_info_set_text_ops(entry, chip, snd_ad1889_proc_read);
+ snd_card_ro_proc_new(chip->card, chip->card->driver,
+ chip, snd_ad1889_proc_read);
}
static const struct ac97_quirk ac97_quirks[] = {
diff --git a/sound/pci/ak4531_codec.c b/sound/pci/ak4531_codec.c
index 2fb1fbba3e5e..11e902cac71b 100644
--- a/sound/pci/ak4531_codec.c
+++ b/sound/pci/ak4531_codec.c
@@ -481,8 +481,5 @@ static void snd_ak4531_proc_read(struct snd_info_entry *entry,
static void
snd_ak4531_proc_init(struct snd_card *card, struct snd_ak4531 *ak4531)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(card, "ak4531", &entry))
- snd_info_set_text_ops(entry, ak4531, snd_ak4531_proc_read);
+ snd_card_ro_proc_new(card, "ak4531", ak4531, snd_ak4531_proc_read);
}
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 9f569379b77e..f7fbe05836b3 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1882,10 +1882,8 @@ static int ali_suspend(struct device *dev)
return 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < chip->num_of_codecs; i++) {
- snd_pcm_suspend_all(chip->pcm[i]);
+ for (i = 0; i < chip->num_of_codecs; i++)
snd_ac97_suspend(chip->ac97[i]);
- }
spin_lock_irq(&chip->reg_lock);
@@ -2051,9 +2049,7 @@ static void snd_ali_proc_read(struct snd_info_entry *entry,
static void snd_ali_proc_init(struct snd_ali *codec)
{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(codec->card, "ali5451", &entry))
- snd_info_set_text_ops(entry, codec, snd_ali_proc_read);
+ snd_card_ro_proc_new(codec->card, "ali5451", codec, snd_ali_proc_read);
}
static int snd_ali_resources(struct snd_ali *codec)
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index eaa2d853d922..516b3d9cbfdf 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -731,7 +731,6 @@ static int snd_als300_suspend(struct device *dev)
struct snd_als300 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
return 0;
}
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index 26b097edec8c..45fa38382e79 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -994,7 +994,6 @@ static int snd_als4000_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_sbmixer_suspend(chip);
return 0;
}
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index aad74e809797..32b2f9802479 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -2782,10 +2782,8 @@ snd_asihpi_proc_read(struct snd_info_entry *entry,
static void snd_asihpi_proc_init(struct snd_card_asihpi *asihpi)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(asihpi->card, "info", &entry))
- snd_info_set_text_ops(entry, asihpi, snd_asihpi_proc_read);
+ snd_card_ro_proc_new(asihpi->card, "info", asihpi,
+ snd_asihpi_proc_read);
}
/*------------------------------------------------------------
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 1a41f8c80243..169763c88f5e 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -733,6 +733,10 @@ static int snd_atiixp_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
+ if (dma->running && dma->suspended &&
+ cmd == SNDRV_PCM_TRIGGER_RESUME)
+ writel(dma->saved_curptr, chip->remap_addr +
+ dma->ops->dt_cur);
dma->ops->enable_transfer(chip, 1);
dma->running = 1;
dma->suspended = 0;
@@ -740,9 +744,12 @@ static int snd_atiixp_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
+ dma->suspended = cmd == SNDRV_PCM_TRIGGER_SUSPEND;
+ if (dma->running && dma->suspended)
+ dma->saved_curptr = readl(chip->remap_addr +
+ dma->ops->dt_cur);
dma->ops->enable_transfer(chip, 0);
dma->running = 0;
- dma->suspended = cmd == SNDRV_PCM_TRIGGER_SUSPEND;
break;
default:
err = -EINVAL;
@@ -1479,14 +1486,6 @@ static int snd_atiixp_suspend(struct device *dev)
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < NUM_ATI_PCMDEVS; i++)
- if (chip->pcmdevs[i]) {
- struct atiixp_dma *dma = &chip->dmas[i];
- if (dma->substream && dma->running)
- dma->saved_curptr = readl(chip->remap_addr +
- dma->ops->dt_cur);
- snd_pcm_suspend_all(chip->pcmdevs[i]);
- }
for (i = 0; i < NUM_ATI_CODECS; i++)
snd_ac97_suspend(chip->ac97[i]);
snd_atiixp_aclink_down(chip);
@@ -1514,8 +1513,6 @@ static int snd_atiixp_resume(struct device *dev)
dma->substream->ops->prepare(dma->substream);
writel((u32)dma->desc_buf.addr | ATI_REG_LINKPTR_EN,
chip->remap_addr + dma->ops->llp_offset);
- writel(dma->saved_curptr, chip->remap_addr +
- dma->ops->dt_cur);
}
}
@@ -1546,10 +1543,7 @@ static void snd_atiixp_proc_read(struct snd_info_entry *entry,
static void snd_atiixp_proc_init(struct atiixp *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "atiixp", &entry))
- snd_info_set_text_ops(entry, chip, snd_atiixp_proc_read);
+ snd_card_ro_proc_new(chip->card, "atiixp", chip, snd_atiixp_proc_read);
}
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index dc1de860cedf..cece66bb3644 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -1125,8 +1125,6 @@ static int snd_atiixp_suspend(struct device *dev)
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < NUM_ATI_PCMDEVS; i++)
- snd_pcm_suspend_all(chip->pcmdevs[i]);
for (i = 0; i < NUM_ATI_CODECS; i++)
snd_ac97_suspend(chip->ac97[i]);
snd_atiixp_aclink_down(chip);
@@ -1172,10 +1170,8 @@ static void snd_atiixp_proc_read(struct snd_info_entry *entry,
static void snd_atiixp_proc_init(struct atiixp_modem *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "atiixp-modem", &entry))
- snd_info_set_text_ops(entry, chip, snd_atiixp_proc_read);
+ snd_card_ro_proc_new(chip->card, "atiixp-modem", chip,
+ snd_atiixp_proc_read);
}
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index 9a49e4243a9c..b07c5fc1da56 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -624,15 +624,10 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* pre-allocation of buffers */
/* Preallocate continuous pages. */
- err = snd_pcm_lib_preallocate_pages_for_all(pcm_playback_ana,
- SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data
- (chip->pci),
- 64 * 1024, 64 * 1024);
- if (err)
- dev_err(chip->card->dev,
- "snd_pcm_lib_preallocate_pages_for_all error (0x%X)\n",
- err);
+ snd_pcm_lib_preallocate_pages_for_all(pcm_playback_ana,
+ SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ 64 * 1024, 64 * 1024);
err = snd_pcm_new(chip->card, "Audiowerk2 digital playback", 1, 1, 0,
&pcm_playback_num);
@@ -661,15 +656,10 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* pre-allocation of buffers */
/* Preallocate continuous pages. */
- err = snd_pcm_lib_preallocate_pages_for_all(pcm_playback_num,
- SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data
- (chip->pci),
- 64 * 1024, 64 * 1024);
- if (err)
- dev_err(chip->card->dev,
- "snd_pcm_lib_preallocate_pages_for_all error (0x%X)\n",
- err);
+ snd_pcm_lib_preallocate_pages_for_all(pcm_playback_num,
+ SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ 64 * 1024, 64 * 1024);
err = snd_pcm_new(chip->card, "Audiowerk2 capture", 2, 0, 1,
&pcm_capture);
@@ -699,16 +689,10 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* pre-allocation of buffers */
/* Preallocate continuous pages. */
- err = snd_pcm_lib_preallocate_pages_for_all(pcm_capture,
- SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data
- (chip->pci),
- 64 * 1024, 64 * 1024);
- if (err)
- dev_err(chip->card->dev,
- "snd_pcm_lib_preallocate_pages_for_all error (0x%X)\n",
- err);
-
+ snd_pcm_lib_preallocate_pages_for_all(pcm_capture,
+ SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ 64 * 1024, 64 * 1024);
/* Create control */
err = snd_ctl_add(chip->card, snd_ctl_new1(&aw2_control, chip));
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index fc18c29a8173..90348817f096 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2699,10 +2699,6 @@ snd_azf3328_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- /* same pcm object for playback/capture */
- snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]);
- snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]);
-
snd_azf3328_suspend_ac97(chip);
snd_azf3328_suspend_regs(chip, chip->ctrl_io,
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index ba971042f871..0adcba10c067 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -714,11 +714,11 @@ static int snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *name)
pcm->private_data = chip;
strcpy(pcm->name, name);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_bt87x_pcm_ops);
- return snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
- 128 * 1024,
- ALIGN(255 * 4092, 1024));
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ snd_dma_pci_data(chip->pci),
+ 128 * 1024,
+ ALIGN(255 * 4092, 1024));
+ return 0;
}
static int snd_bt87x_create(struct snd_card *card,
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index cd27b5536654..11ef0d636405 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1402,21 +1402,17 @@ static int snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
for(substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
substream;
substream = substream->next) {
- if ((err = snd_pcm_lib_preallocate_pages(substream,
- SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
- 64*1024, 64*1024)) < 0) /* FIXME: 32*1024 for sound buffer, between 32and64 for Periods table. */
- return err;
+ snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(emu->pci),
+ 64*1024, 64*1024);
}
for (substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
substream;
substream = substream->next) {
- if ((err = snd_pcm_lib_preallocate_pages(substream,
- SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
- 64*1024, 64*1024)) < 0)
- return err;
+ snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(emu->pci),
+ 64*1024, 64*1024);
}
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2,
@@ -1910,11 +1906,8 @@ static int snd_ca0106_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct snd_ca0106 *chip = card->private_data;
- int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < 4; i++)
- snd_pcm_suspend_all(chip->pcm[i]);
if (chip->details->ac97)
snd_ac97_suspend(chip->ac97);
snd_ca0106_mixer_suspend(chip);
diff --git a/sound/pci/ca0106/ca0106_proc.c b/sound/pci/ca0106/ca0106_proc.c
index a2c85cc37972..f5b8934db735 100644
--- a/sound/pci/ca0106/ca0106_proc.c
+++ b/sound/pci/ca0106/ca0106_proc.c
@@ -424,30 +424,20 @@ static void snd_ca0106_proc_i2c_write(struct snd_info_entry *entry,
int snd_ca0106_proc_init(struct snd_ca0106 *emu)
{
- struct snd_info_entry *entry;
-
- if(! snd_card_proc_new(emu->card, "iec958", &entry))
- snd_info_set_text_ops(entry, emu, snd_ca0106_proc_iec958);
- if(! snd_card_proc_new(emu->card, "ca0106_reg32", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read32);
- entry->c.text.write = snd_ca0106_proc_reg_write32;
- entry->mode |= 0200;
- }
- if(! snd_card_proc_new(emu->card, "ca0106_reg16", &entry))
- snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read16);
- if(! snd_card_proc_new(emu->card, "ca0106_reg8", &entry))
- snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read8);
- if(! snd_card_proc_new(emu->card, "ca0106_regs1", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read1);
- entry->c.text.write = snd_ca0106_proc_reg_write;
- entry->mode |= 0200;
- }
- if(! snd_card_proc_new(emu->card, "ca0106_i2c", &entry)) {
- entry->c.text.write = snd_ca0106_proc_i2c_write;
- entry->private_data = emu;
- entry->mode |= 0200;
- }
- if(! snd_card_proc_new(emu->card, "ca0106_regs2", &entry))
- snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read2);
+ snd_card_ro_proc_new(emu->card, "iec958", emu, snd_ca0106_proc_iec958);
+ snd_card_rw_proc_new(emu->card, "ca0106_reg32", emu,
+ snd_ca0106_proc_reg_read32,
+ snd_ca0106_proc_reg_write32);
+ snd_card_ro_proc_new(emu->card, "ca0106_reg16", emu,
+ snd_ca0106_proc_reg_read16);
+ snd_card_ro_proc_new(emu->card, "ca0106_reg8", emu,
+ snd_ca0106_proc_reg_read8);
+ snd_card_rw_proc_new(emu->card, "ca0106_regs1", emu,
+ snd_ca0106_proc_reg_read1,
+ snd_ca0106_proc_reg_write);
+ snd_card_rw_proc_new(emu->card, "ca0106_i2c", emu, NULL,
+ snd_ca0106_proc_i2c_write);
+ snd_card_ro_proc_new(emu->card, "ca0106_regs2", emu,
+ snd_ca0106_proc_reg_read2);
return 0;
}
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 452cc79b44af..701be04aed53 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -2792,10 +2792,7 @@ static void snd_cmipci_proc_read(struct snd_info_entry *entry,
static void snd_cmipci_proc_init(struct cmipci *cm)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(cm->card, "cmipci", &entry))
- snd_info_set_text_ops(entry, cm, snd_cmipci_proc_read);
+ snd_card_ro_proc_new(cm->card, "cmipci", cm, snd_cmipci_proc_read);
}
static const struct pci_device_id snd_cmipci_ids[] = {
@@ -3351,10 +3348,6 @@ static int snd_cmipci_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(cm->pcm);
- snd_pcm_suspend_all(cm->pcm2);
- snd_pcm_suspend_all(cm->pcm_spdif);
-
/* save registers */
for (i = 0; i < ARRAY_SIZE(saved_regs); i++)
cm->saved_regs[i] = snd_cmipci_read(cm, saved_regs[i]);
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index ec4247638fa1..15bbf9564c82 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1174,8 +1174,7 @@ static void snd_cs4281_proc_init(struct cs4281 *chip)
{
struct snd_info_entry *entry;
- if (! snd_card_proc_new(chip->card, "cs4281", &entry))
- snd_info_set_text_ops(entry, chip, snd_cs4281_proc_read);
+ snd_card_ro_proc_new(chip->card, "cs4281", chip, snd_cs4281_proc_read);
if (! snd_card_proc_new(chip->card, "cs4281_BA0", &entry)) {
entry->content = SNDRV_INFO_CONTENT_DATA;
entry->private_data = chip;
@@ -2002,8 +2001,6 @@ static int cs4281_suspend(struct device *dev)
unsigned int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
-
snd_ac97_suspend(chip->ac97);
snd_ac97_suspend(chip->ac97_secondary);
diff --git a/sound/pci/cs46xx/cs46xx_dsp_spos.h b/sound/pci/cs46xx/cs46xx_dsp_spos.h
index 8008c59288a6..a02e1e19c021 100644
--- a/sound/pci/cs46xx/cs46xx_dsp_spos.h
+++ b/sound/pci/cs46xx/cs46xx_dsp_spos.h
@@ -177,22 +177,16 @@ struct dsp_spos_instance {
/* proc fs */
struct snd_card *snd_card;
struct snd_info_entry * proc_dsp_dir;
- struct snd_info_entry * proc_sym_info_entry;
- struct snd_info_entry * proc_modules_info_entry;
- struct snd_info_entry * proc_parameter_dump_info_entry;
- struct snd_info_entry * proc_sample_dump_info_entry;
/* SCB's descriptors */
int nscb;
int scb_highest_frag_index;
struct dsp_scb_descriptor scbs[DSP_MAX_SCB_DESC];
- struct snd_info_entry * proc_scb_info_entry;
struct dsp_scb_descriptor * the_null_scb;
/* Task's descriptors */
int ntask;
struct dsp_task_descriptor tasks[DSP_MAX_TASK_DESC];
- struct snd_info_entry * proc_task_info_entry;
/* SPDIF status */
int spdif_status_out;
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 750eec437a79..a77d4cc44028 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -3781,12 +3781,6 @@ static int snd_cs46xx_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
chip->in_suspend = 1;
- snd_pcm_suspend_all(chip->pcm);
-#ifdef CONFIG_SND_CS46XX_NEW_DSP
- snd_pcm_suspend_all(chip->pcm_rear);
- snd_pcm_suspend_all(chip->pcm_center_lfe);
- snd_pcm_suspend_all(chip->pcm_iec958);
-#endif
// chip->ac97_powerdown = snd_cs46xx_codec_read(chip, AC97_POWER_CONTROL);
// chip->ac97_general_purpose = snd_cs46xx_codec_read(chip, BA0_AC97_GENERAL_PURPOSE);
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 598d140bb7cb..c28e58602679 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -799,92 +799,49 @@ int cs46xx_dsp_proc_init (struct snd_card *card, struct snd_cs46xx *chip)
ins->snd_card = card;
- if ((entry = snd_info_create_card_entry(card, "dsp", card->proc_root)) != NULL) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
+ entry = snd_info_create_card_entry(card, "dsp", card->proc_root);
+ if (entry)
entry->mode = S_IFDIR | 0555;
-
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
-
ins->proc_dsp_dir = entry;
if (!ins->proc_dsp_dir)
return -ENOMEM;
- if ((entry = snd_info_create_card_entry(card, "spos_symbols", ins->proc_dsp_dir)) != NULL) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = chip;
- entry->mode = S_IFREG | 0644;
- entry->c.text.read = cs46xx_dsp_proc_symbol_table_read;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- ins->proc_sym_info_entry = entry;
+ entry = snd_info_create_card_entry(card, "spos_symbols",
+ ins->proc_dsp_dir);
+ if (entry)
+ snd_info_set_text_ops(entry, chip,
+ cs46xx_dsp_proc_symbol_table_read);
- if ((entry = snd_info_create_card_entry(card, "spos_modules", ins->proc_dsp_dir)) != NULL) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = chip;
- entry->mode = S_IFREG | 0644;
- entry->c.text.read = cs46xx_dsp_proc_modules_read;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- ins->proc_modules_info_entry = entry;
-
- if ((entry = snd_info_create_card_entry(card, "parameter", ins->proc_dsp_dir)) != NULL) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = chip;
- entry->mode = S_IFREG | 0644;
- entry->c.text.read = cs46xx_dsp_proc_parameter_dump_read;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- ins->proc_parameter_dump_info_entry = entry;
-
- if ((entry = snd_info_create_card_entry(card, "sample", ins->proc_dsp_dir)) != NULL) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = chip;
- entry->mode = S_IFREG | 0644;
- entry->c.text.read = cs46xx_dsp_proc_sample_dump_read;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- ins->proc_sample_dump_info_entry = entry;
-
- if ((entry = snd_info_create_card_entry(card, "task_tree", ins->proc_dsp_dir)) != NULL) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = chip;
- entry->mode = S_IFREG | 0644;
- entry->c.text.read = cs46xx_dsp_proc_task_tree_read;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- ins->proc_task_info_entry = entry;
-
- if ((entry = snd_info_create_card_entry(card, "scb_info", ins->proc_dsp_dir)) != NULL) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = chip;
- entry->mode = S_IFREG | 0644;
- entry->c.text.read = cs46xx_dsp_proc_scb_read;
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- entry = NULL;
- }
- }
- ins->proc_scb_info_entry = entry;
+ entry = snd_info_create_card_entry(card, "spos_modules",
+ ins->proc_dsp_dir);
+ if (entry)
+ snd_info_set_text_ops(entry, chip,
+ cs46xx_dsp_proc_modules_read);
+
+ entry = snd_info_create_card_entry(card, "parameter",
+ ins->proc_dsp_dir);
+ if (entry)
+ snd_info_set_text_ops(entry, chip,
+ cs46xx_dsp_proc_parameter_dump_read);
+
+ entry = snd_info_create_card_entry(card, "sample",
+ ins->proc_dsp_dir);
+ if (entry)
+ snd_info_set_text_ops(entry, chip,
+ cs46xx_dsp_proc_sample_dump_read);
+
+ entry = snd_info_create_card_entry(card, "task_tree",
+ ins->proc_dsp_dir);
+ if (entry)
+ snd_info_set_text_ops(entry, chip,
+ cs46xx_dsp_proc_task_tree_read);
+
+ entry = snd_info_create_card_entry(card, "scb_info",
+ ins->proc_dsp_dir);
+ if (entry)
+ snd_info_set_text_ops(entry, chip,
+ cs46xx_dsp_proc_scb_read);
mutex_lock(&chip->spos_mutex);
/* register/update SCB's entries on proc */
@@ -903,23 +860,8 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
int i;
- snd_info_free_entry(ins->proc_sym_info_entry);
- ins->proc_sym_info_entry = NULL;
-
- snd_info_free_entry(ins->proc_modules_info_entry);
- ins->proc_modules_info_entry = NULL;
-
- snd_info_free_entry(ins->proc_parameter_dump_info_entry);
- ins->proc_parameter_dump_info_entry = NULL;
-
- snd_info_free_entry(ins->proc_sample_dump_info_entry);
- ins->proc_sample_dump_info_entry = NULL;
-
- snd_info_free_entry(ins->proc_scb_info_entry);
- ins->proc_scb_info_entry = NULL;
-
- snd_info_free_entry(ins->proc_task_info_entry);
- ins->proc_task_info_entry = NULL;
+ if (!ins)
+ return 0;
mutex_lock(&chip->spos_mutex);
for (i = 0; i < ins->nscb; ++i) {
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
index 8d0a3d357345..1d9d610262de 100644
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
@@ -254,8 +254,9 @@ void cs46xx_dsp_proc_register_scb_desc (struct snd_cs46xx *chip,
if (ins->snd_card != NULL && ins->proc_dsp_dir != NULL &&
scb->proc_info == NULL) {
- if ((entry = snd_info_create_card_entry(ins->snd_card, scb->scb_name,
- ins->proc_dsp_dir)) != NULL) {
+ entry = snd_info_create_card_entry(ins->snd_card, scb->scb_name,
+ ins->proc_dsp_dir);
+ if (entry) {
scb_info = kmalloc(sizeof(struct proc_scb_info), GFP_KERNEL);
if (!scb_info) {
snd_info_free_entry(entry);
@@ -265,18 +266,8 @@ void cs46xx_dsp_proc_register_scb_desc (struct snd_cs46xx *chip,
scb_info->chip = chip;
scb_info->scb_desc = scb;
-
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = scb_info;
- entry->mode = S_IFREG | 0644;
-
- entry->c.text.read = cs46xx_dsp_proc_scb_info_read;
-
- if (snd_info_register(entry) < 0) {
- snd_info_free_entry(entry);
- kfree (scb_info);
- entry = NULL;
- }
+ snd_info_set_text_ops(entry, scb_info,
+ cs46xx_dsp_proc_scb_info_read);
}
out:
scb->proc_info = entry;
diff --git a/sound/pci/cs5535audio/cs5535audio_pm.c b/sound/pci/cs5535audio/cs5535audio_pm.c
index 82bd10b68a77..446ef1f1b45a 100644
--- a/sound/pci/cs5535audio/cs5535audio_pm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pm.c
@@ -62,7 +62,6 @@ static int __maybe_unused snd_cs5535audio_suspend(struct device *dev)
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(cs5535au->pcm);
snd_ac97_suspend(cs5535au->ac97);
for (i = 0; i < NUM_CS5535AUDIO_DMAS; i++) {
struct cs5535audio_dma *dma = &cs5535au->dmas[i];
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 2ada8444abd9..e622613ea947 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -1548,18 +1548,10 @@ static void atc_connect_resources(struct ct_atc *atc)
#ifdef CONFIG_PM_SLEEP
static int atc_suspend(struct ct_atc *atc)
{
- int i;
struct hw *hw = atc->hw;
snd_power_change_state(atc->card, SNDRV_CTL_POWER_D3hot);
- for (i = FRONT; i < NUM_PCMS; i++) {
- if (!atc->pcms[i])
- continue;
-
- snd_pcm_suspend_all(atc->pcms[i]);
- }
-
atc_release_resources(atc);
hw->suspend(hw);
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 907cf1a46712..ea876b0b02b9 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -884,17 +884,15 @@ static const struct snd_pcm_ops digital_capture_ops = {
static int snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
{
struct snd_pcm_substream *ss;
- int stream, err;
+ int stream;
for (stream = 0; stream < 2; stream++)
- for (ss = pcm->streams[stream].substream; ss; ss = ss->next) {
- err = snd_pcm_lib_preallocate_pages(ss, SNDRV_DMA_TYPE_DEV_SG,
- dev,
- ss->number ? 0 : 128<<10,
- 256<<10);
- if (err < 0)
- return err;
- }
+ for (ss = pcm->streams[stream].substream; ss; ss = ss->next)
+ snd_pcm_lib_preallocate_pages(ss, SNDRV_DMA_TYPE_DEV_SG,
+ dev,
+ ss->number ? 0 : 128<<10,
+ 256<<10);
+
return 0;
}
@@ -2165,9 +2163,6 @@ static int snd_echo_suspend(struct device *dev)
{
struct echoaudio *chip = dev_get_drvdata(dev);
- snd_pcm_suspend_all(chip->analog_pcm);
- snd_pcm_suspend_all(chip->digital_pcm);
-
#ifdef ECHOCARD_HAS_MIDI
/* This call can sleep */
if (chip->midi_out)
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index d3203df50a1a..3c41a0edcfb0 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -224,12 +224,6 @@ static int snd_emu10k1_suspend(struct device *dev)
cancel_delayed_work_sync(&emu->emu1010.firmware_work);
- snd_pcm_suspend_all(emu->pcm);
- snd_pcm_suspend_all(emu->pcm_mic);
- snd_pcm_suspend_all(emu->pcm_efx);
- snd_pcm_suspend_all(emu->pcm_multi);
- snd_pcm_suspend_all(emu->pcm_p16v);
-
snd_ac97_suspend(emu->ac97);
snd_emu10k1_efx_suspend(emu);
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 611589cbdad6..576c7bd03a1a 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1065,15 +1065,9 @@ static void snd_emu10k1x_proc_reg_write(struct snd_info_entry *entry,
static int snd_emu10k1x_proc_init(struct emu10k1x *emu)
{
- struct snd_info_entry *entry;
-
- if(! snd_card_proc_new(emu->card, "emu10k1x_regs", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_emu10k1x_proc_reg_read);
- entry->c.text.write = snd_emu10k1x_proc_reg_write;
- entry->mode |= 0200;
- entry->private_data = emu;
- }
-
+ snd_card_rw_proc_new(emu->card, "emu10k1x_regs", emu,
+ snd_emu10k1x_proc_reg_read,
+ snd_emu10k1x_proc_reg_write);
return 0;
}
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 30b3472d0b75..f6b4cb9ac75c 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1427,11 +1427,14 @@ int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device)
emu->pcm = pcm;
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
- if ((err = snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(emu->pci), 64*1024, 64*1024)) < 0)
- return err;
+ snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
+ snd_dma_pci_data(emu->pci),
+ 64*1024, 64*1024);
for (substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; substream; substream = substream->next)
- snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
+ snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(emu->pci),
+ 64*1024, 64*1024);
return 0;
}
@@ -1455,8 +1458,9 @@ int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device)
emu->pcm_multi = pcm;
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
- if ((err = snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(emu->pci), 64*1024, 64*1024)) < 0)
- return err;
+ snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
+ snd_dma_pci_data(emu->pci),
+ 64*1024, 64*1024);
return 0;
}
@@ -1489,7 +1493,9 @@ int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device)
strcpy(pcm->name, "Mic Capture");
emu->pcm_mic = pcm;
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(emu->pci),
+ 64*1024, 64*1024);
return 0;
}
@@ -1862,7 +1868,9 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
if (err < 0)
return err;
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(emu->pci),
+ 64*1024, 64*1024);
return 0;
}
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index b57008031792..a3d9f06e8e6a 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -568,55 +568,40 @@ int snd_emu10k1_proc_init(struct snd_emu10k1 *emu)
struct snd_info_entry *entry;
#ifdef CONFIG_SND_DEBUG
if (emu->card_capabilities->emu_model) {
- if (! snd_card_proc_new(emu->card, "emu1010_regs", &entry))
- snd_info_set_text_ops(entry, emu, snd_emu_proc_emu1010_reg_read);
- }
- if (! snd_card_proc_new(emu->card, "io_regs", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_emu_proc_io_reg_read);
- entry->c.text.write = snd_emu_proc_io_reg_write;
- entry->mode |= 0200;
- }
- if (! snd_card_proc_new(emu->card, "ptr_regs00a", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00a);
- entry->c.text.write = snd_emu_proc_ptr_reg_write00;
- entry->mode |= 0200;
- }
- if (! snd_card_proc_new(emu->card, "ptr_regs00b", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00b);
- entry->c.text.write = snd_emu_proc_ptr_reg_write00;
- entry->mode |= 0200;
- }
- if (! snd_card_proc_new(emu->card, "ptr_regs20a", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20a);
- entry->c.text.write = snd_emu_proc_ptr_reg_write20;
- entry->mode |= 0200;
- }
- if (! snd_card_proc_new(emu->card, "ptr_regs20b", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20b);
- entry->c.text.write = snd_emu_proc_ptr_reg_write20;
- entry->mode |= 0200;
- }
- if (! snd_card_proc_new(emu->card, "ptr_regs20c", &entry)) {
- snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20c);
- entry->c.text.write = snd_emu_proc_ptr_reg_write20;
- entry->mode |= 0200;
+ snd_card_ro_proc_new(emu->card, "emu1010_regs",
+ emu, snd_emu_proc_emu1010_reg_read);
}
+ snd_card_rw_proc_new(emu->card, "io_regs", emu,
+ snd_emu_proc_io_reg_read,
+ snd_emu_proc_io_reg_write);
+ snd_card_rw_proc_new(emu->card, "ptr_regs00a", emu,
+ snd_emu_proc_ptr_reg_read00a,
+ snd_emu_proc_ptr_reg_write00);
+ snd_card_rw_proc_new(emu->card, "ptr_regs00b", emu,
+ snd_emu_proc_ptr_reg_read00b,
+ snd_emu_proc_ptr_reg_write00);
+ snd_card_rw_proc_new(emu->card, "ptr_regs20a", emu,
+ snd_emu_proc_ptr_reg_read20a,
+ snd_emu_proc_ptr_reg_write20);
+ snd_card_rw_proc_new(emu->card, "ptr_regs20b", emu,
+ snd_emu_proc_ptr_reg_read20b,
+ snd_emu_proc_ptr_reg_write20);
+ snd_card_rw_proc_new(emu->card, "ptr_regs20c", emu,
+ snd_emu_proc_ptr_reg_read20c,
+ snd_emu_proc_ptr_reg_write20);
#endif
- if (! snd_card_proc_new(emu->card, "emu10k1", &entry))
- snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_read);
+ snd_card_ro_proc_new(emu->card, "emu10k1", emu, snd_emu10k1_proc_read);
- if (emu->card_capabilities->emu10k2_chip) {
- if (! snd_card_proc_new(emu->card, "spdif-in", &entry))
- snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_spdif_read);
- }
- if (emu->card_capabilities->ca0151_chip) {
- if (! snd_card_proc_new(emu->card, "capture-rates", &entry))
- snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_rates_read);
- }
+ if (emu->card_capabilities->emu10k2_chip)
+ snd_card_ro_proc_new(emu->card, "spdif-in", emu,
+ snd_emu10k1_proc_spdif_read);
+ if (emu->card_capabilities->ca0151_chip)
+ snd_card_ro_proc_new(emu->card, "capture-rates", emu,
+ snd_emu10k1_proc_rates_read);
- if (! snd_card_proc_new(emu->card, "voices", &entry))
- snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_voices_read);
+ snd_card_ro_proc_new(emu->card, "voices", emu,
+ snd_emu10k1_proc_voices_read);
if (! snd_card_proc_new(emu->card, "fx8010_gpr", &entry)) {
entry->content = SNDRV_INFO_CONTENT_DATA;
@@ -646,11 +631,7 @@ int snd_emu10k1_proc_init(struct snd_emu10k1 *emu)
entry->size = emu->audigy ? A_TOTAL_SIZE_CODE : TOTAL_SIZE_CODE;
entry->c.ops = &snd_emu10k1_proc_ops_fx8010;
}
- if (! snd_card_proc_new(emu->card, "fx8010_acode", &entry)) {
- entry->content = SNDRV_INFO_CONTENT_TEXT;
- entry->private_data = emu;
- entry->mode = S_IFREG | 0444 /*| S_IWUSR*/;
- entry->c.text.read = snd_emu10k1_proc_acode_read;
- }
+ snd_card_ro_proc_new(emu->card, "fx8010_acode", emu,
+ snd_emu10k1_proc_acode_read);
return 0;
}
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index 4948b95f6665..672017cac4c7 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -656,11 +656,10 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
for(substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
substream;
substream = substream->next) {
- if ((err = snd_pcm_lib_preallocate_pages(substream,
- SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
- ((65536 - 64) * 8), ((65536 - 64) * 8))) < 0)
- return err;
+ snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(emu->pci),
+ (65536 - 64) * 8,
+ (65536 - 64) * 8);
/*
dev_dbg(emu->card->dev,
"preallocate playback substream: err=%d\n", err);
@@ -670,11 +669,9 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
for (substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
substream;
substream = substream->next) {
- if ((err = snd_pcm_lib_preallocate_pages(substream,
- SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
- 65536 - 64, 65536 - 64)) < 0)
- return err;
+ snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(emu->pci),
+ 65536 - 64, 65536 - 64);
/*
dev_dbg(emu->card->dev,
"preallocate capture substream: err=%d\n", err);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 727eb3da1fda..1cfff35e370e 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1902,10 +1902,8 @@ static void snd_ensoniq_proc_read(struct snd_info_entry *entry,
static void snd_ensoniq_proc_init(struct ensoniq *ensoniq)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(ensoniq->card, "audiopci", &entry))
- snd_info_set_text_ops(entry, ensoniq, snd_ensoniq_proc_read);
+ snd_card_ro_proc_new(ensoniq->card, "audiopci", ensoniq,
+ snd_ensoniq_proc_read);
}
/*
@@ -2037,9 +2035,6 @@ static int snd_ensoniq_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(ensoniq->pcm1);
- snd_pcm_suspend_all(ensoniq->pcm2);
-
#ifdef CHIP1371
snd_ac97_suspend(ensoniq->u.es1371.ac97);
#else
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 9d248eb2e26c..84d07bce581c 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1475,7 +1475,6 @@ static int es1938_suspend(struct device *dev)
unsigned char *s, *d;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
/* save mixer-related registers */
for (s = saved_regs, d = chip->saved_regs; *s; s++, d++)
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 0b1845ca6005..9dcb698fc8c7 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -2392,7 +2392,6 @@ static int es1968_suspend(struct device *dev)
chip->in_suspend = 1;
cancel_work_sync(&chip->hwvol_work);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
snd_es1968_bob_stop(chip);
return 0;
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index e3fb9c61017c..1317f3183eb1 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1408,7 +1408,6 @@ static int snd_fm801_suspend(struct device *dev)
if (chip->tea575x_tuner & TUNER_ONLY) {
/* FIXME: tea575x suspend */
} else {
- snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
snd_ac97_suspend(chip->ac97_sec);
}
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 066b5b59c4d7..b7d9160ed868 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -127,44 +127,6 @@ static void turn_off_beep(struct hda_beep *beep)
}
}
-static void snd_hda_do_detach(struct hda_beep *beep)
-{
- if (beep->registered)
- input_unregister_device(beep->dev);
- else
- input_free_device(beep->dev);
- beep->dev = NULL;
- turn_off_beep(beep);
-}
-
-static int snd_hda_do_attach(struct hda_beep *beep)
-{
- struct input_dev *input_dev;
- struct hda_codec *codec = beep->codec;
-
- input_dev = input_allocate_device();
- if (!input_dev)
- return -ENOMEM;
-
- /* setup digital beep device */
- input_dev->name = "HDA Digital PCBeep";
- input_dev->phys = beep->phys;
- input_dev->id.bustype = BUS_PCI;
- input_dev->dev.parent = &codec->card->card_dev;
-
- input_dev->id.vendor = codec->core.vendor_id >> 16;
- input_dev->id.product = codec->core.vendor_id & 0xffff;
- input_dev->id.version = 0x01;
-
- input_dev->evbit[0] = BIT_MASK(EV_SND);
- input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
- input_dev->event = snd_hda_beep_event;
- input_set_drvdata(input_dev, beep);
-
- beep->dev = input_dev;
- return 0;
-}
-
/**
* snd_hda_enable_beep_device - Turn on/off beep sound
* @codec: the HDA codec
@@ -186,6 +148,38 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable)
}
EXPORT_SYMBOL_GPL(snd_hda_enable_beep_device);
+static int beep_dev_register(struct snd_device *device)
+{
+ struct hda_beep *beep = device->device_data;
+ int err;
+
+ err = input_register_device(beep->dev);
+ if (!err)
+ beep->registered = true;
+ return err;
+}
+
+static int beep_dev_disconnect(struct snd_device *device)
+{
+ struct hda_beep *beep = device->device_data;
+
+ if (beep->registered)
+ input_unregister_device(beep->dev);
+ else
+ input_free_device(beep->dev);
+ turn_off_beep(beep);
+ return 0;
+}
+
+static int beep_dev_free(struct snd_device *device)
+{
+ struct hda_beep *beep = device->device_data;
+
+ beep->codec->beep = NULL;
+ kfree(beep);
+ return 0;
+}
+
/**
* snd_hda_attach_beep_device - Attach a beep input device
* @codec: the HDA codec
@@ -194,14 +188,16 @@ EXPORT_SYMBOL_GPL(snd_hda_enable_beep_device);
* Attach a beep object to the given widget. If beep hint is turned off
* explicitly or beep_mode of the codec is turned off, this doesn't nothing.
*
- * The attached beep device has to be registered via
- * snd_hda_register_beep_device() and released via snd_hda_detach_beep_device()
- * appropriately.
- *
* Currently, only one beep device is allowed to each codec.
*/
int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
{
+ static struct snd_device_ops ops = {
+ .dev_register = beep_dev_register,
+ .dev_disconnect = beep_dev_disconnect,
+ .dev_free = beep_dev_free,
+ };
+ struct input_dev *input_dev;
struct hda_beep *beep;
int err;
@@ -226,14 +222,41 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
INIT_WORK(&beep->beep_work, &snd_hda_generate_beep);
mutex_init(&beep->mutex);
- err = snd_hda_do_attach(beep);
- if (err < 0) {
- kfree(beep);
- codec->beep = NULL;
- return err;
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ err = -ENOMEM;
+ goto err_free;
}
+ /* setup digital beep device */
+ input_dev->name = "HDA Digital PCBeep";
+ input_dev->phys = beep->phys;
+ input_dev->id.bustype = BUS_PCI;
+ input_dev->dev.parent = &codec->card->card_dev;
+
+ input_dev->id.vendor = codec->core.vendor_id >> 16;
+ input_dev->id.product = codec->core.vendor_id & 0xffff;
+ input_dev->id.version = 0x01;
+
+ input_dev->evbit[0] = BIT_MASK(EV_SND);
+ input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
+ input_dev->event = snd_hda_beep_event;
+ input_set_drvdata(input_dev, beep);
+
+ beep->dev = input_dev;
+
+ err = snd_device_new(codec->card, SNDRV_DEV_JACK, beep, &ops);
+ if (err < 0)
+ goto err_input;
+
return 0;
+
+ err_input:
+ input_free_device(beep->dev);
+ err_free:
+ kfree(beep);
+ codec->beep = NULL;
+ return err;
}
EXPORT_SYMBOL_GPL(snd_hda_attach_beep_device);
@@ -243,41 +266,11 @@ EXPORT_SYMBOL_GPL(snd_hda_attach_beep_device);
*/
void snd_hda_detach_beep_device(struct hda_codec *codec)
{
- struct hda_beep *beep = codec->beep;
- if (beep) {
- if (beep->dev)
- snd_hda_do_detach(beep);
- codec->beep = NULL;
- kfree(beep);
- }
+ if (!codec->bus->shutdown && codec->beep)
+ snd_device_free(codec->card, codec->beep);
}
EXPORT_SYMBOL_GPL(snd_hda_detach_beep_device);
-/**
- * snd_hda_register_beep_device - Register the beep device
- * @codec: the HDA codec
- */
-int snd_hda_register_beep_device(struct hda_codec *codec)
-{
- struct hda_beep *beep = codec->beep;
- int err;
-
- if (!beep || !beep->dev)
- return 0;
-
- err = input_register_device(beep->dev);
- if (err < 0) {
- codec_err(codec, "hda_beep: unable to register input device\n");
- input_free_device(beep->dev);
- codec->beep = NULL;
- kfree(beep);
- return err;
- }
- beep->registered = true;
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_hda_register_beep_device);
-
static bool ctl_has_mute(struct snd_kcontrol *kcontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index f1457c6b3969..a25358a4807a 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -34,7 +34,6 @@ struct hda_beep {
int snd_hda_enable_beep_device(struct hda_codec *codec, int enable);
int snd_hda_attach_beep_device(struct hda_codec *codec, int nid);
void snd_hda_detach_beep_device(struct hda_codec *codec);
-int snd_hda_register_beep_device(struct hda_codec *codec);
#else
static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
{
@@ -43,9 +42,5 @@ static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
static inline void snd_hda_detach_beep_device(struct hda_codec *codec)
{
}
-static inline int snd_hda_register_beep_device(struct hda_codec *codec)
-{
- return 0;
-}
#endif
#endif
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 9174f1b3a987..1ec706ced75c 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev)
err = snd_hda_codec_build_controls(codec);
if (err < 0)
goto error_module;
- if (codec->card->registered) {
+ /* only register after the bus probe finished; otherwise it's racy */
+ if (!codec->bus->bus_probing && codec->card->registered) {
err = snd_card_register(codec->card);
if (err < 0)
goto error_module;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9f8d59e7e89f..5f2005098a60 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -813,7 +813,6 @@ void snd_hda_codec_register(struct hda_codec *codec)
if (codec->registered)
return;
if (device_is_registered(hda_codec_dev(codec))) {
- snd_hda_register_beep_device(codec);
codec_display_power(codec, true);
pm_runtime_enable(hda_codec_dev(codec));
/* it was powered up in snd_hda_codec_new(), now all done */
@@ -828,14 +827,6 @@ static int snd_hda_codec_dev_register(struct snd_device *device)
return 0;
}
-static int snd_hda_codec_dev_disconnect(struct snd_device *device)
-{
- struct hda_codec *codec = device->device_data;
-
- snd_hda_detach_beep_device(codec);
- return 0;
-}
-
static int snd_hda_codec_dev_free(struct snd_device *device)
{
struct hda_codec *codec = device->device_data;
@@ -921,7 +912,6 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
int err;
static struct snd_device_ops dev_ops = {
.dev_register = snd_hda_codec_dev_register,
- .dev_disconnect = snd_hda_codec_dev_disconnect,
.dev_free = snd_hda_codec_dev_free,
};
@@ -2917,18 +2907,16 @@ static void hda_call_codec_resume(struct hda_codec *codec)
hda_jackpoll_work(&codec->jackpoll_work.work);
else
snd_hda_jack_report_sync(codec);
+ codec->core.dev.power.power_state = PMSG_ON;
snd_hdac_leave_pm(&codec->core);
}
static int hda_codec_runtime_suspend(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
- struct hda_pcm *pcm;
unsigned int state;
cancel_delayed_work_sync(&codec->jackpoll_work);
- list_for_each_entry(pcm, &codec->pcm_list_head, list)
- snd_pcm_suspend_all(pcm->pcm);
state = hda_call_codec_suspend(codec);
if (codec->link_down_at_suspend ||
(codec_has_clkstop(codec) && codec_has_epss(codec) &&
@@ -2950,10 +2938,48 @@ static int hda_codec_runtime_resume(struct device *dev)
}
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_SLEEP
+static int hda_codec_pm_suspend(struct device *dev)
+{
+ dev->power.power_state = PMSG_SUSPEND;
+ return pm_runtime_force_suspend(dev);
+}
+
+static int hda_codec_pm_resume(struct device *dev)
+{
+ dev->power.power_state = PMSG_RESUME;
+ return pm_runtime_force_resume(dev);
+}
+
+static int hda_codec_pm_freeze(struct device *dev)
+{
+ dev->power.power_state = PMSG_FREEZE;
+ return pm_runtime_force_suspend(dev);
+}
+
+static int hda_codec_pm_thaw(struct device *dev)
+{
+ dev->power.power_state = PMSG_THAW;
+ return pm_runtime_force_resume(dev);
+}
+
+static int hda_codec_pm_restore(struct device *dev)
+{
+ dev->power.power_state = PMSG_RESTORE;
+ return pm_runtime_force_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
/* referred in hda_bind.c */
const struct dev_pm_ops hda_codec_driver_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = hda_codec_pm_suspend,
+ .resume = hda_codec_pm_resume,
+ .freeze = hda_codec_pm_freeze,
+ .thaw = hda_codec_pm_thaw,
+ .poweroff = hda_codec_pm_suspend,
+ .restore = hda_codec_pm_restore,
+#endif /* CONFIG_PM_SLEEP */
SET_RUNTIME_PM_OPS(hda_codec_runtime_suspend, hda_codec_runtime_resume,
NULL)
};
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e784130ea4e0..e5c49003e75f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2185,6 +2185,7 @@ static int azx_probe_continue(struct azx *chip)
int dev = chip->dev_index;
int err;
+ to_hda_bus(bus)->bus_probing = 1;
hda->probe_continued = 1;
/* bind with i915 if needed */
@@ -2269,6 +2270,7 @@ out_free:
if (err < 0)
hda->init_failed = 1;
complete_all(&hda->probe_wait);
+ to_hda_bus(bus)->bus_probing = 0;
return err;
}
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index a65740419650..853842987fa1 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -919,15 +919,8 @@ static void print_codec_info(struct snd_info_entry *entry,
int snd_hda_codec_proc_new(struct hda_codec *codec)
{
char name[32];
- struct snd_info_entry *entry;
- int err;
snprintf(name, sizeof(name), "codec#%d", codec->core.addr);
- err = snd_card_proc_new(codec->card, name, &entry);
- if (err < 0)
- return err;
-
- snd_info_set_text_ops(entry, codec, print_codec_info);
- return 0;
+ return snd_card_ro_proc_new(codec->card, name, codec, print_codec_info);
}
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 97a176d817a0..dbd8da5685cb 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/string.h>
+#include <linux/pm_runtime.h>
#include <sound/core.h>
#include <sound/initval.h>
@@ -232,40 +233,72 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
static int hda_tegra_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
- struct azx *chip = card->private_data;
- struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
- struct hdac_bus *bus = azx_bus(chip);
+ int rc;
+ rc = pm_runtime_force_suspend(dev);
+ if (rc < 0)
+ return rc;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- azx_stop_chip(chip);
- synchronize_irq(bus->irq);
- azx_enter_link_reset(chip);
- hda_tegra_disable_clocks(hda);
-
return 0;
}
static int hda_tegra_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
+ int rc;
+
+ rc = pm_runtime_force_resume(dev);
+ if (rc < 0)
+ return rc;
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int hda_tegra_runtime_suspend(struct device *dev)
+{
+ struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+ struct hdac_bus *bus = azx_bus(chip);
- hda_tegra_enable_clocks(hda);
+ if (chip && chip->running) {
+ azx_stop_chip(chip);
+ synchronize_irq(bus->irq);
+ azx_enter_link_reset(chip);
+ }
+ hda_tegra_disable_clocks(hda);
- hda_tegra_init(hda);
+ return 0;
+}
- azx_init_chip(chip, 1);
+static int hda_tegra_runtime_resume(struct device *dev)
+{
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip = card->private_data;
+ struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+ int rc;
- snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ rc = hda_tegra_enable_clocks(hda);
+ if (rc != 0)
+ return rc;
+ if (chip && chip->running) {
+ hda_tegra_init(hda);
+ azx_init_chip(chip, 1);
+ }
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
static const struct dev_pm_ops hda_tegra_pm = {
SET_SYSTEM_SLEEP_PM_OPS(hda_tegra_suspend, hda_tegra_resume)
+ SET_RUNTIME_PM_OPS(hda_tegra_runtime_suspend,
+ hda_tegra_runtime_resume,
+ NULL)
};
static int hda_tegra_dev_disconnect(struct snd_device *device)
@@ -303,7 +336,23 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
struct hdac_bus *bus = azx_bus(chip);
struct device *dev = hda->dev;
struct resource *res;
- int err;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hda->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
+
+ bus->remap_addr = hda->regs + HDA_BAR0;
+ bus->addr = res->start + HDA_BAR0;
+
+ hda_tegra_init(hda);
+
+ return 0;
+}
+
+static int hda_tegra_init_clk(struct hda_tegra *hda)
+{
+ struct device *dev = hda->dev;
hda->hda_clk = devm_clk_get(dev, "hda");
if (IS_ERR(hda->hda_clk)) {
@@ -321,22 +370,6 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
return PTR_ERR(hda->hda2hdmi_clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hda->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(hda->regs))
- return PTR_ERR(hda->regs);
-
- bus->remap_addr = hda->regs + HDA_BAR0;
- bus->addr = res->start + HDA_BAR0;
-
- err = hda_tegra_enable_clocks(hda);
- if (err) {
- dev_err(dev, "failed to get enable clocks\n");
- return err;
- }
-
- hda_tegra_init(hda);
-
return 0;
}
@@ -347,8 +380,8 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
int err;
unsigned short gcap;
int irq_id = platform_get_irq(pdev, 0);
- const char *sname;
- struct device_node *root;
+ const char *sname, *drv_name = "tegra-hda";
+ struct device_node *np = pdev->dev.of_node;
err = hda_tegra_init_chip(chip, pdev);
if (err)
@@ -407,17 +440,11 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
}
/* driver name */
- strcpy(card->driver, "tegra-hda");
-
- root = of_find_node_by_path("/");
- sname = of_get_property(root, "compatible", NULL);
- of_node_put(root);
- if (!sname) {
- dev_err(card->dev,
- "failed to get compatible property from root node\n");
- return -ENODEV;
- }
+ strncpy(card->driver, drv_name, sizeof(card->driver));
/* shortname for card */
+ sname = of_get_property(np, "nvidia,model", NULL);
+ if (!sname)
+ sname = drv_name;
if (strlen(sname) > sizeof(card->shortname))
dev_info(card->dev, "truncating shortname for card\n");
strncpy(card->shortname, sname, sizeof(card->shortname));
@@ -487,7 +514,8 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match);
static int hda_tegra_probe(struct platform_device *pdev)
{
- const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR;
+ const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR |
+ AZX_DCAPS_PM_RUNTIME;
struct snd_card *card;
struct azx *chip;
struct hda_tegra *hda;
@@ -506,12 +534,21 @@ static int hda_tegra_probe(struct platform_device *pdev)
return err;
}
+ err = hda_tegra_init_clk(hda);
+ if (err < 0)
+ goto out_free;
+
err = hda_tegra_create(card, driver_flags, hda);
if (err < 0)
goto out_free;
card->private_data = chip;
dev_set_drvdata(&pdev->dev, card);
+
+ pm_runtime_enable(hda->dev);
+ if (!azx_has_pm_runtime(chip))
+ pm_runtime_forbid(hda->dev);
+
schedule_work(&hda->probe_work);
return 0;
@@ -528,6 +565,7 @@ static void hda_tegra_probe_work(struct work_struct *work)
struct platform_device *pdev = to_platform_device(hda->dev);
int err;
+ pm_runtime_get_sync(hda->dev);
err = hda_tegra_first_init(chip, pdev);
if (err < 0)
goto out_free;
@@ -549,12 +587,18 @@ static void hda_tegra_probe_work(struct work_struct *work)
snd_hda_set_power_save(&chip->bus, power_save * 1000);
out_free:
+ pm_runtime_put(hda->dev);
return; /* no error return from async probe */
}
static int hda_tegra_remove(struct platform_device *pdev)
{
- return snd_card_free(dev_get_drvdata(&pdev->dev));
+ int ret;
+
+ ret = snd_card_free(dev_get_drvdata(&pdev->dev));
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
}
static void hda_tegra_shutdown(struct platform_device *pdev)
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index e5bdbc245682..29882bda7632 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -8451,8 +8451,10 @@ static void ca0132_free(struct hda_codec *codec)
ca0132_exit_chip(codec);
snd_hda_power_down(codec);
- if (IS_ENABLED(CONFIG_PCI) && spec->mem_base)
+#ifdef CONFIG_PCI
+ if (spec->mem_base)
pci_iounmap(codec->bus->pci, spec->mem_base);
+#endif
kfree(spec->spec_init_verbs);
kfree(codec->spec);
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 51cc6589443f..a4ee7656d9ee 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
@@ -931,6 +932,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 46f88dc7b7e8..73d7042ff884 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1865,7 +1865,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
hda_nid_t pin_nid;
struct snd_pcm_runtime *runtime = substream->runtime;
bool non_pcm;
- int pinctl;
+ int pinctl, stripe;
int err = 0;
mutex_lock(&spec->pcm_lock);
@@ -1909,6 +1909,14 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
per_pin->channels = substream->runtime->channels;
per_pin->setup = true;
+ if (get_wcaps(codec, cvt_nid) & AC_WCAP_STRIPE) {
+ stripe = snd_hdac_get_stream_stripe_ctl(&codec->bus->core,
+ substream);
+ snd_hda_codec_write(codec, cvt_nid, 0,
+ AC_VERB_SET_STRIPE_CONTROL,
+ stripe);
+ }
+
hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
mutex_unlock(&per_pin->lock);
if (spec->dyn_pin_out) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index aee4cbd29d53..c8413d44973c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -117,6 +117,8 @@ struct alc_spec {
int codec_variant; /* flag for other variants */
unsigned int has_alc5505_dsp:1;
unsigned int no_depop_delay:1;
+ unsigned int done_hp_init:1;
+ unsigned int no_shutup_pins:1;
/* for PLL fix */
hda_nid_t pll_nid;
@@ -475,6 +477,14 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
set_eapd(codec, *p, on);
}
+static void alc_shutup_pins(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (!spec->no_shutup_pins)
+ snd_hda_shutup_pins(codec);
+}
+
/* generic shutup callback;
* just turning off EAPD and a little pause for avoiding pop-noise
*/
@@ -485,7 +495,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
alc_auto_setup_eapd(codec, false);
if (!spec->no_depop_delay)
msleep(200);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
/* generic EAPD initialization */
@@ -514,6 +524,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
}
}
+/* get a primary headphone pin if available */
+static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
+{
+ if (spec->gen.autocfg.hp_pins[0])
+ return spec->gen.autocfg.hp_pins[0];
+ if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+ return spec->gen.autocfg.line_out_pins[0];
+ return 0;
+}
/*
* Realtek SSID verification
@@ -724,9 +743,7 @@ do_sku:
* 15 : 1 --> enable the function "Mute internal speaker
* when the external headphone out jack is plugged"
*/
- if (!spec->gen.autocfg.hp_pins[0] &&
- !(spec->gen.autocfg.line_out_pins[0] &&
- spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
+ if (!alc_get_hp_pin(spec)) {
hda_nid_t nid;
tmp = (ass >> 11) & 0x3; /* HP to chassis */
nid = ports[tmp];
@@ -806,7 +823,7 @@ static inline void alc_shutup(struct hda_codec *codec)
if (spec && spec->shutup)
spec->shutup(codec);
else
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static void alc_reboot_notify(struct hda_codec *codec)
@@ -1847,6 +1864,8 @@ enum {
ALC887_FIXUP_BASS_CHMAP,
ALC1220_FIXUP_GB_DUAL_CODECS,
ALC1220_FIXUP_CLEVO_P950,
+ ALC1220_FIXUP_SYSTEM76_ORYP5,
+ ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2048,6 +2067,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
}
+static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action);
+
+static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ alc1220_fixup_clevo_p950(codec, fix, action);
+ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
+}
+
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = HDA_FIXUP_PINS,
@@ -2292,6 +2322,19 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_clevo_p950,
},
+ [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc1220_fixup_system76_oryp5,
+ },
+ [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2368,6 +2411,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -2914,7 +2959,7 @@ static void alc269_shutup(struct hda_codec *codec)
(alc_get_coef0(codec) & 0x00ff) == 0x018) {
msleep(150);
}
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static struct coef_fw alc282_coefs[] = {
@@ -2958,7 +3003,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
static void alc282_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
int coef78;
@@ -2995,7 +3040,7 @@ static void alc282_init(struct hda_codec *codec)
static void alc282_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
int coef78;
@@ -3017,14 +3062,15 @@ static void alc282_shutup(struct hda_codec *codec)
if (hp_pin_sense)
msleep(85);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
alc_write_coef_idx(codec, 0x78, coef78);
}
@@ -3073,14 +3119,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
static void alc283_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
- if (!spec->gen.autocfg.hp_outs) {
- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
- hp_pin = spec->gen.autocfg.line_out_pins[0];
- }
-
alc283_restore_default_value(codec);
if (!hp_pin)
@@ -3114,14 +3155,9 @@ static void alc283_init(struct hda_codec *codec)
static void alc283_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
- if (!spec->gen.autocfg.hp_outs) {
- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
- hp_pin = spec->gen.autocfg.line_out_pins[0];
- }
-
if (!hp_pin) {
alc269_shutup(codec);
return;
@@ -3140,22 +3176,23 @@ static void alc283_shutup(struct hda_codec *codec)
if (hp_pin_sense)
msleep(100);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
alc_write_coef_idx(codec, 0x43, 0x9614);
}
static void alc256_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin)
@@ -3191,7 +3228,7 @@ static void alc256_init(struct hda_codec *codec)
static void alc256_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin) {
@@ -3214,20 +3251,21 @@ static void alc256_shutup(struct hda_codec *codec)
/* NOTE: call this before clearing the pin, otherwise codec stalls */
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static void alc225_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp1_pin_sense, hp2_pin_sense;
if (!hp_pin)
@@ -3270,7 +3308,7 @@ static void alc225_init(struct hda_codec *codec)
static void alc225_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp1_pin_sense, hp2_pin_sense;
if (!hp_pin) {
@@ -3308,13 +3346,13 @@ static void alc225_shutup(struct hda_codec *codec)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static void alc_default_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin)
@@ -3343,7 +3381,7 @@ static void alc_default_init(struct hda_codec *codec)
static void alc_default_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin) {
@@ -3362,14 +3400,60 @@ static void alc_default_shutup(struct hda_codec *codec)
if (hp_pin_sense)
msleep(85);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
+}
+
+static void alc294_hp_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ int i, val;
+
+ if (!hp_pin)
+ return;
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ msleep(100);
+
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+ alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+ /* Wait for depop procedure finish */
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
+ for (i = 0; i < 20 && val & 0x0080; i++) {
+ msleep(50);
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
+ }
+ /* Set HP depop to auto mode */
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+ msleep(50);
+}
+
+static void alc294_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ /* required only at boot or S4 resume time */
+ if (!spec->done_hp_init ||
+ codec->core.dev.power.power_state.event == PM_EVENT_RESTORE) {
+ alc294_hp_init(codec);
+ spec->done_hp_init = true;
+ }
+ alc_default_init(codec);
}
static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
@@ -4102,6 +4186,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
case 0x10ec0295:
case 0x10ec0289:
case 0x10ec0299:
+ alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
break;
case 0x10ec0867:
@@ -4736,7 +4821,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
int new_headset_mode;
@@ -4938,16 +5023,12 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
}
}
-static void alc_no_shutup(struct hda_codec *codec)
-{
-}
-
static void alc_fixup_no_shutup(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
struct alc_spec *spec = codec->spec;
- spec->shutup = alc_no_shutup;
+ spec->no_shutup_pins = 1;
}
}
@@ -5015,7 +5096,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
static void alc_shutup_dell_xps13(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- int hp_pin = spec->gen.autocfg.hp_pins[0];
+ int hp_pin = alc_get_hp_pin(spec);
/* Prevent pop noises when headphones are plugged in */
snd_hda_codec_write(codec, hp_pin, 0,
@@ -5108,7 +5189,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PROBE) {
int mic_pin = find_ext_mic_pin(codec);
- int hp_pin = spec->gen.autocfg.hp_pins[0];
+ int hp_pin = alc_get_hp_pin(spec);
if (snd_BUG_ON(!mic_pin || !hp_pin))
return;
@@ -5440,6 +5521,13 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
}
}
+static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
@@ -5549,6 +5637,7 @@ enum {
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DISABLE_MIC_VREF,
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_FIXUP_DISABLE_DAC3,
ALC280_FIXUP_HP_HEADSET_MIC,
@@ -5582,6 +5671,8 @@ enum {
ALC294_FIXUP_ASUS_HEADSET_MIC,
ALC294_FIXUP_ASUS_SPK,
ALC225_FIXUP_HEADSET_JACK,
+ ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6268,6 +6359,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
},
+ [ALC225_FIXUP_DISABLE_MIC_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_mic_vref,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -6277,7 +6374,7 @@ static const struct hda_fixup alc269_fixups[] = {
{}
},
.chained = true,
- .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
},
[ALC280_FIXUP_HP_HEADSET_MIC] = {
.type = HDA_FIXUP_FUNC,
@@ -6522,6 +6619,26 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_jack,
},
+ [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
+ [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Disable PCBEEP-IN passthrough */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6572,6 +6689,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -6584,6 +6702,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6665,7 +6784,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
- SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -6699,6 +6817,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -6910,7 +7029,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
{.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
{.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
- {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
+ {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
{.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
{.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
{.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
@@ -7205,7 +7324,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60130},
{0x19, 0x03a11020},
{0x21, 0x0321101f}),
- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
{0x12, 0x90a60130},
{0x14, 0x90170110},
{0x19, 0x04a11040},
@@ -7284,6 +7403,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
{0x12, 0x90a60130},
{0x17, 0x90170110},
+ {0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
{0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_STANDARD_PINS,
@@ -7357,37 +7480,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x4, 0, 1<<11);
}
-static void alc294_hp_init(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
- int i, val;
-
- if (!hp_pin)
- return;
-
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
-
- msleep(100);
-
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
-
- alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
- alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
-
- /* Wait for depop procedure finish */
- val = alc_read_coefex_idx(codec, 0x58, 0x01);
- for (i = 0; i < 20 && val & 0x0080; i++) {
- msleep(50);
- val = alc_read_coefex_idx(codec, 0x58, 0x01);
- }
- /* Set HP depop to auto mode */
- alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
- msleep(50);
-}
-
/*
*/
static int patch_alc269(struct hda_codec *codec)
@@ -7513,7 +7605,7 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC294;
spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
- alc294_hp_init(codec);
+ spec->init_hook = alc294_init;
break;
case 0x10ec0300:
spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7525,7 +7617,7 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
- alc294_hp_init(codec);
+ spec->init_hook = alc294_init;
break;
}
diff --git a/sound/pci/ice1712/ews.c b/sound/pci/ice1712/ews.c
index b8af747ecb43..7646c93e8268 100644
--- a/sound/pci/ice1712/ews.c
+++ b/sound/pci/ice1712/ews.c
@@ -826,7 +826,12 @@ static int snd_ice1712_6fire_read_pca(struct snd_ice1712 *ice, unsigned char reg
snd_i2c_lock(ice->i2c);
byte = reg;
- snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1);
+ if (snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1)) {
+ snd_i2c_unlock(ice->i2c);
+ dev_err(ice->card->dev, "cannot send pca\n");
+ return -EIO;
+ }
+
byte = 0;
if (snd_i2c_readbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1) != 1) {
snd_i2c_unlock(ice->i2c);
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index f1fe497c2f9d..fa7d90ee6e2d 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -1603,10 +1603,7 @@ static void snd_ice1712_proc_read(struct snd_info_entry *entry,
static void snd_ice1712_proc_init(struct snd_ice1712 *ice)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(ice->card, "ice1712", &entry))
- snd_info_set_text_ops(entry, ice, snd_ice1712_proc_read);
+ snd_card_ro_proc_new(ice->card, "ice1712", ice, snd_ice1712_proc_read);
}
/*
@@ -2792,9 +2789,6 @@ static int snd_ice1712_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(ice->pcm);
- snd_pcm_suspend_all(ice->pcm_pro);
- snd_pcm_suspend_all(ice->pcm_ds);
snd_ac97_suspend(ice->ac97);
spin_lock_irq(&ice->reg_lock);
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 057c2f394ea7..a7d640ee4a17 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -1571,10 +1571,7 @@ static void snd_vt1724_proc_read(struct snd_info_entry *entry,
static void snd_vt1724_proc_init(struct snd_ice1712 *ice)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(ice->card, "ice1724", &entry))
- snd_info_set_text_ops(entry, ice, snd_vt1724_proc_read);
+ snd_card_ro_proc_new(ice->card, "ice1724", ice, snd_vt1724_proc_read);
}
/*
@@ -2804,9 +2801,6 @@ static int snd_vt1724_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(ice->pcm);
- snd_pcm_suspend_all(ice->pcm_pro);
- snd_pcm_suspend_all(ice->pcm_ds);
snd_ac97_suspend(ice->ac97);
spin_lock_irq(&ice->reg_lock);
diff --git a/sound/pci/ice1712/pontis.c b/sound/pci/ice1712/pontis.c
index 93b8cfc6636f..f499f1e8d0c9 100644
--- a/sound/pci/ice1712/pontis.c
+++ b/sound/pci/ice1712/pontis.c
@@ -659,12 +659,8 @@ static void wm_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buff
static void wm_proc_init(struct snd_ice1712 *ice)
{
- struct snd_info_entry *entry;
- if (! snd_card_proc_new(ice->card, "wm_codec", &entry)) {
- snd_info_set_text_ops(entry, ice, wm_proc_regs_read);
- entry->mode |= 0200;
- entry->c.text.write = wm_proc_regs_write;
- }
+ snd_card_rw_proc_new(ice->card, "wm_codec", ice, wm_proc_regs_read,
+ wm_proc_regs_write);
}
static void cs_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
@@ -684,9 +680,7 @@ static void cs_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buff
static void cs_proc_init(struct snd_ice1712 *ice)
{
- struct snd_info_entry *entry;
- if (! snd_card_proc_new(ice->card, "cs_codec", &entry))
- snd_info_set_text_ops(entry, ice, cs_proc_regs_read);
+ snd_card_ro_proc_new(ice->card, "cs_codec", ice, cs_proc_regs_read);
}
diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c
index 3919aed39ca0..d243309029d3 100644
--- a/sound/pci/ice1712/prodigy192.c
+++ b/sound/pci/ice1712/prodigy192.c
@@ -651,9 +651,8 @@ static void stac9460_proc_regs_read(struct snd_info_entry *entry,
static void stac9460_proc_init(struct snd_ice1712 *ice)
{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(ice->card, "stac9460_codec", &entry))
- snd_info_set_text_ops(entry, ice, stac9460_proc_regs_read);
+ snd_card_ro_proc_new(ice->card, "stac9460_codec", ice,
+ stac9460_proc_regs_read);
}
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index c97b5528e4b8..72f252c936e5 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -904,12 +904,8 @@ static void wm_proc_regs_read(struct snd_info_entry *entry,
static void wm_proc_init(struct snd_ice1712 *ice)
{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(ice->card, "wm_codec", &entry)) {
- snd_info_set_text_ops(entry, ice, wm_proc_regs_read);
- entry->mode |= 0200;
- entry->c.text.write = wm_proc_regs_write;
- }
+ snd_card_rw_proc_new(ice->card, "wm_codec", ice, wm_proc_regs_read,
+ wm_proc_regs_write);
}
static int prodigy_hifi_add_controls(struct snd_ice1712 *ice)
diff --git a/sound/pci/ice1712/quartet.c b/sound/pci/ice1712/quartet.c
index 5bc836241c97..8ad964ee0b65 100644
--- a/sound/pci/ice1712/quartet.c
+++ b/sound/pci/ice1712/quartet.c
@@ -502,9 +502,7 @@ static void proc_regs_read(struct snd_info_entry *entry,
static void proc_init(struct snd_ice1712 *ice)
{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(ice->card, "quartet", &entry))
- snd_info_set_text_ops(entry, ice, proc_regs_read);
+ snd_card_ro_proc_new(ice->card, "quartet", ice, proc_regs_read);
}
static int qtet_mute_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index ffddcdfe0c66..2784bf48cf5a 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -2614,8 +2614,6 @@ static int intel8x0_suspend(struct device *dev)
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < chip->pcm_devs; i++)
- snd_pcm_suspend_all(chip->pcm[i]);
for (i = 0; i < chip->ncodecs; i++)
snd_ac97_suspend(chip->ac97[i]);
if (chip->device_type == DEVICE_INTEL_ICH4)
@@ -2865,10 +2863,8 @@ static void snd_intel8x0_proc_read(struct snd_info_entry * entry,
static void snd_intel8x0_proc_init(struct intel8x0 *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "intel8x0", &entry))
- snd_info_set_text_ops(entry, chip, snd_intel8x0_proc_read);
+ snd_card_ro_proc_new(chip->card, "intel8x0", chip,
+ snd_intel8x0_proc_read);
}
static int snd_intel8x0_dev_free(struct snd_device *device)
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index c84629190cba..43c654e15452 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -1025,11 +1025,8 @@ static int intel8x0m_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0m *chip = card->private_data;
- int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < chip->pcm_devs; i++)
- snd_pcm_suspend_all(chip->pcm[i]);
snd_ac97_suspend(chip->ac97);
if (chip->irq >= 0) {
free_irq(chip->irq, chip);
@@ -1087,10 +1084,8 @@ static void snd_intel8x0m_proc_read(struct snd_info_entry * entry,
static void snd_intel8x0m_proc_init(struct intel8x0m *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "intel8x0m", &entry))
- snd_info_set_text_ops(entry, chip, snd_intel8x0m_proc_read);
+ snd_card_ro_proc_new(chip->card, "intel8x0m", chip,
+ snd_intel8x0m_proc_read);
}
static int snd_intel8x0m_dev_free(struct snd_device *device)
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 4e189a93f475..fe4aba8a08ea 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2090,10 +2090,8 @@ static void snd_korg1212_proc_read(struct snd_info_entry *entry,
static void snd_korg1212_proc_init(struct snd_korg1212 *korg1212)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(korg1212->card, "korg1212", &entry))
- snd_info_set_text_ops(entry, korg1212, snd_korg1212_proc_read);
+ snd_card_ro_proc_new(korg1212->card, "korg1212", korg1212,
+ snd_korg1212_proc_read);
}
static int
diff --git a/sound/pci/lola/lola_proc.c b/sound/pci/lola/lola_proc.c
index 904e3c4f4dfe..1603f9c81897 100644
--- a/sound/pci/lola/lola_proc.c
+++ b/sound/pci/lola/lola_proc.c
@@ -208,15 +208,9 @@ static void lola_proc_regs_read(struct snd_info_entry *entry,
void lola_proc_debug_new(struct lola *chip)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(chip->card, "codec", &entry))
- snd_info_set_text_ops(entry, chip, lola_proc_codec_read);
- if (!snd_card_proc_new(chip->card, "codec_rw", &entry)) {
- snd_info_set_text_ops(entry, chip, lola_proc_codec_rw_read);
- entry->mode |= 0200;
- entry->c.text.write = lola_proc_codec_rw_write;
- }
- if (!snd_card_proc_new(chip->card, "regs", &entry))
- snd_info_set_text_ops(entry, chip, lola_proc_regs_read);
+ snd_card_ro_proc_new(chip->card, "codec", chip, lola_proc_codec_read);
+ snd_card_rw_proc_new(chip->card, "codec_rw", chip,
+ lola_proc_codec_rw_read,
+ lola_proc_codec_rw_write);
+ snd_card_ro_proc_new(chip->card, "regs", chip, lola_proc_regs_read);
}
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 54f6252faca6..ae23a2dfbdea 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -854,11 +854,9 @@ static int lx_pcm_create(struct lx6464es *chip)
pcm->nonatomic = true;
strcpy(pcm->name, card_name);
- err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- size, size);
- if (err < 0)
- return err;
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ size, size);
chip->pcm = pcm;
chip->capture_stream.is_capture = 1;
@@ -948,13 +946,7 @@ static void lx_proc_levels_read(struct snd_info_entry *entry,
static int lx_proc_create(struct snd_card *card, struct lx6464es *chip)
{
- struct snd_info_entry *entry;
- int err = snd_card_proc_new(card, "levels", &entry);
- if (err < 0)
- return err;
-
- snd_info_set_text_ops(entry, chip, lx_proc_levels_read);
- return 0;
+ return snd_card_ro_proc_new(card, "levels", chip, lx_proc_levels_read);
}
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 62962178a9d7..1a9468c14aaf 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -2422,7 +2422,6 @@ static int m3_suspend(struct device *dev)
chip->in_suspend = 1;
cancel_work_sync(&chip->hwvol_work);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
msleep(10); /* give the assp a chance to idle.. */
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 9cd297a42f24..92f616df3863 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -1220,10 +1220,8 @@ static void snd_mixart_proc_init(struct snd_mixart *chip)
struct snd_info_entry *entry;
/* text interface to read perf and temp meters */
- if (! snd_card_proc_new(chip->card, "board_info", &entry)) {
- entry->private_data = chip;
- entry->c.text.read = snd_mixart_proc_read;
- }
+ snd_card_ro_proc_new(chip->card, "board_info", chip,
+ snd_mixart_proc_read);
if (! snd_card_proc_new(chip->card, "mixart_BA0", &entry)) {
entry->content = SNDRV_INFO_CONTENT_DATA;
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index b97f4ea6b56c..85e46ff44ac3 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -1413,7 +1413,6 @@ static int nm256_suspend(struct device *dev)
struct nm256 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
chip->coeffs_current = 0;
return 0;
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index b4ef5804212d..3ae9dd4b39e8 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -244,10 +244,7 @@ static void oxygen_proc_read(struct snd_info_entry *entry,
static void oxygen_proc_init(struct oxygen *chip)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(chip->card, "oxygen", &entry))
- snd_info_set_text_ops(entry, chip, oxygen_proc_read);
+ snd_card_ro_proc_new(chip->card, "oxygen", chip, oxygen_proc_read);
}
static const struct pci_device_id *
@@ -373,7 +370,7 @@ static void oxygen_init(struct oxygen *chip)
for (i = 0; i < 8; ++i)
chip->dac_volume[i] = chip->model.dac_volume_min;
chip->dac_mute = 1;
- chip->spdif_playback_enable = 1;
+ chip->spdif_playback_enable = 0;
chip->spdif_bits = OXYGEN_SPDIF_C | OXYGEN_SPDIF_ORIGINAL |
(IEC958_AES1_CON_PCM_CODER << OXYGEN_SPDIF_CATEGORY_SHIFT);
chip->spdif_pcm_bits = chip->spdif_bits;
@@ -744,13 +741,10 @@ static int oxygen_pci_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct oxygen *chip = card->private_data;
- unsigned int i, saved_interrupt_mask;
+ unsigned int saved_interrupt_mask;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < PCM_COUNT; ++i)
- snd_pcm_suspend(chip->streams[i]);
-
if (chip->model.suspend)
chip->model.suspend(chip);
diff --git a/sound/pci/oxygen/pcm1796.h b/sound/pci/oxygen/pcm1796.h
index 34d07dd2d22e..d5dcb09e44cd 100644
--- a/sound/pci/oxygen/pcm1796.h
+++ b/sound/pci/oxygen/pcm1796.h
@@ -10,7 +10,6 @@
#define PCM1796_MUTE 0x01
#define PCM1796_DME 0x02
#define PCM1796_DMF_MASK 0x0c
-#define PCM1796_DMF_DISABLED 0x00
#define PCM1796_DMF_48 0x04
#define PCM1796_DMF_441 0x08
#define PCM1796_DMF_32 0x0c
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
index 24109d37ca09..a1c6b98b191e 100644
--- a/sound/pci/oxygen/xonar_pcm179x.c
+++ b/sound/pci/oxygen/xonar_pcm179x.c
@@ -331,7 +331,7 @@ static void pcm1796_init(struct oxygen *chip)
struct xonar_pcm179x *data = chip->model_data;
data->pcm1796_regs[0][18 - PCM1796_REG_BASE] =
- PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
+ PCM1796_FMT_24_I2S | PCM1796_ATLD;
if (!data->broken_i2c)
data->pcm1796_regs[0][18 - PCM1796_REG_BASE] |= PCM1796_MUTE;
data->pcm1796_regs[0][19 - PCM1796_REG_BASE] =
@@ -621,6 +621,23 @@ static void update_pcm1796_oversampling(struct oxygen *chip)
pcm1796_write_cached(chip, i, 20, reg);
}
+static void update_pcm1796_deemph(struct oxygen *chip)
+{
+ struct xonar_pcm179x *data = chip->model_data;
+ unsigned int i;
+ u8 reg;
+
+ reg = data->pcm1796_regs[0][18 - PCM1796_REG_BASE] & ~PCM1796_DMF_MASK;
+ if (data->current_rate == 48000)
+ reg |= PCM1796_DMF_48;
+ else if (data->current_rate == 44100)
+ reg |= PCM1796_DMF_441;
+ else if (data->current_rate == 32000)
+ reg |= PCM1796_DMF_32;
+ for (i = 0; i < data->dacs; ++i)
+ pcm1796_write_cached(chip, i, 18, reg);
+}
+
static void set_pcm1796_params(struct oxygen *chip,
struct snd_pcm_hw_params *params)
{
@@ -629,6 +646,7 @@ static void set_pcm1796_params(struct oxygen *chip,
msleep(1);
data->current_rate = params_rate(params);
update_pcm1796_oversampling(chip);
+ update_pcm1796_deemph(chip);
}
static void update_pcm1796_volume(struct oxygen *chip)
@@ -653,9 +671,11 @@ static void update_pcm1796_mute(struct oxygen *chip)
unsigned int i;
u8 value;
- value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
+ value = data->pcm1796_regs[0][18 - PCM1796_REG_BASE];
if (chip->dac_mute)
value |= PCM1796_MUTE;
+ else
+ value &= ~PCM1796_MUTE;
for (i = 0; i < data->dacs; ++i)
pcm1796_write_cached(chip, i, 18, value);
}
@@ -777,6 +797,49 @@ static const struct snd_kcontrol_new rolloff_control = {
.put = rolloff_put,
};
+static int deemph_get(struct snd_kcontrol *ctl,
+ struct snd_ctl_elem_value *value)
+{
+ struct oxygen *chip = ctl->private_data;
+ struct xonar_pcm179x *data = chip->model_data;
+
+ value->value.integer.value[0] =
+ !!(data->pcm1796_regs[0][18 - PCM1796_REG_BASE] & PCM1796_DME);
+ return 0;
+}
+
+static int deemph_put(struct snd_kcontrol *ctl,
+ struct snd_ctl_elem_value *value)
+{
+ struct oxygen *chip = ctl->private_data;
+ struct xonar_pcm179x *data = chip->model_data;
+ unsigned int i;
+ int changed;
+ u8 reg;
+
+ mutex_lock(&chip->mutex);
+ reg = data->pcm1796_regs[0][18 - PCM1796_REG_BASE];
+ if (!value->value.integer.value[0])
+ reg &= ~PCM1796_DME;
+ else
+ reg |= PCM1796_DME;
+ changed = reg != data->pcm1796_regs[0][18 - PCM1796_REG_BASE];
+ if (changed) {
+ for (i = 0; i < data->dacs; ++i)
+ pcm1796_write(chip, i, 18, reg);
+ }
+ mutex_unlock(&chip->mutex);
+ return changed;
+}
+
+static const struct snd_kcontrol_new deemph_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "De-emphasis Playback Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = deemph_get,
+ .put = deemph_put,
+};
+
static const struct snd_kcontrol_new hdav_hdmi_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "HDMI Playback Switch",
@@ -1011,6 +1074,10 @@ static int add_pcm1796_controls(struct oxygen *chip)
snd_ctl_new1(&rolloff_control, chip));
if (err < 0)
return err;
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&deemph_control, chip));
+ if (err < 0)
+ return err;
}
return 0;
}
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index e57da4036231..4ab7efc6e9f7 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -1454,21 +1454,14 @@ static void pcxhr_proc_ltc(struct snd_info_entry *entry,
static void pcxhr_proc_init(struct snd_pcxhr *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "info", &entry))
- snd_info_set_text_ops(entry, chip, pcxhr_proc_info);
- if (! snd_card_proc_new(chip->card, "sync", &entry))
- snd_info_set_text_ops(entry, chip, pcxhr_proc_sync);
+ snd_card_ro_proc_new(chip->card, "info", chip, pcxhr_proc_info);
+ snd_card_ro_proc_new(chip->card, "sync", chip, pcxhr_proc_sync);
/* gpio available on stereo sound cards only */
- if (chip->mgr->is_hr_stereo &&
- !snd_card_proc_new(chip->card, "gpio", &entry)) {
- snd_info_set_text_ops(entry, chip, pcxhr_proc_gpio_read);
- entry->c.text.write = pcxhr_proc_gpo_write;
- entry->mode |= 0200;
- }
- if (!snd_card_proc_new(chip->card, "ltc", &entry))
- snd_info_set_text_ops(entry, chip, pcxhr_proc_ltc);
+ if (chip->mgr->is_hr_stereo)
+ snd_card_rw_proc_new(chip->card, "gpio", chip,
+ pcxhr_proc_gpio_read,
+ pcxhr_proc_gpo_write);
+ snd_card_ro_proc_new(chip->card, "ltc", chip, pcxhr_proc_ltc);
}
/* end of proc interface */
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 23017e3bc76c..8d1a56a9bcfd 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1158,7 +1158,6 @@ static int riptide_suspend(struct device *dev)
chip->in_suspend = 1;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
return 0;
}
@@ -1974,10 +1973,8 @@ snd_riptide_proc_read(struct snd_info_entry *entry,
static void snd_riptide_proc_init(struct snd_riptide *chip)
{
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(chip->card, "riptide", &entry))
- snd_info_set_text_ops(entry, chip, snd_riptide_proc_read);
+ snd_card_ro_proc_new(chip->card, "riptide", chip,
+ snd_riptide_proc_read);
}
static int snd_riptide_mixer(struct snd_riptide *chip)
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 3ac8c71d567c..c6bcc0715716 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1568,10 +1568,7 @@ snd_rme32_proc_read(struct snd_info_entry * entry, struct snd_info_buffer *buffe
static void snd_rme32_proc_init(struct rme32 *rme32)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(rme32->card, "rme32", &entry))
- snd_info_set_text_ops(entry, rme32, snd_rme32_proc_read);
+ snd_card_ro_proc_new(rme32->card, "rme32", rme32, snd_rme32_proc_read);
}
/*
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index dcfa4d7a73e2..42c6b5e09072 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -1868,10 +1868,7 @@ snd_rme96_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer
static void snd_rme96_proc_init(struct rme96 *rme96)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(rme96->card, "rme96", &entry))
- snd_info_set_text_ops(entry, rme96, snd_rme96_proc_read);
+ snd_card_ro_proc_new(rme96->card, "rme96", rme96, snd_rme96_proc_read);
}
/*
@@ -2388,8 +2385,6 @@ static int rme96_suspend(struct device *dev)
struct rme96 *rme96 = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend(rme96->playback_substream);
- snd_pcm_suspend(rme96->capture_substream);
/* save capture & playback pointers */
rme96->playback_pointer = readl(rme96->iobase + RME96_IO_GET_PLAY_POS)
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index ba99ff0e93e0..29bef48a3af3 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -3708,10 +3708,7 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
static void snd_hdsp_proc_init(struct hdsp *hdsp)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(hdsp->card, "hdsp", &entry))
- snd_info_set_text_ops(entry, hdsp, snd_hdsp_proc_read);
+ snd_card_ro_proc_new(hdsp->card, "hdsp", hdsp, snd_hdsp_proc_read);
}
static void snd_hdsp_free_buffers(struct hdsp *hdsp)
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 679ad0415e3b..1209cf0b05e0 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -5287,44 +5287,35 @@ static void snd_hdspm_proc_ports_out(struct snd_info_entry *entry,
static void snd_hdspm_proc_init(struct hdspm *hdspm)
{
- struct snd_info_entry *entry;
+ void (*read)(struct snd_info_entry *, struct snd_info_buffer *) = NULL;
- if (!snd_card_proc_new(hdspm->card, "hdspm", &entry)) {
- switch (hdspm->io_type) {
- case AES32:
- snd_info_set_text_ops(entry, hdspm,
- snd_hdspm_proc_read_aes32);
- break;
- case MADI:
- snd_info_set_text_ops(entry, hdspm,
- snd_hdspm_proc_read_madi);
- break;
- case MADIface:
- /* snd_info_set_text_ops(entry, hdspm,
- snd_hdspm_proc_read_madiface); */
- break;
- case RayDAT:
- snd_info_set_text_ops(entry, hdspm,
- snd_hdspm_proc_read_raydat);
- break;
- case AIO:
- break;
- }
- }
-
- if (!snd_card_proc_new(hdspm->card, "ports.in", &entry)) {
- snd_info_set_text_ops(entry, hdspm, snd_hdspm_proc_ports_in);
+ switch (hdspm->io_type) {
+ case AES32:
+ read = snd_hdspm_proc_read_aes32;
+ break;
+ case MADI:
+ read = snd_hdspm_proc_read_madi;
+ break;
+ case MADIface:
+ /* read = snd_hdspm_proc_read_madiface; */
+ break;
+ case RayDAT:
+ read = snd_hdspm_proc_read_raydat;
+ break;
+ case AIO:
+ break;
}
- if (!snd_card_proc_new(hdspm->card, "ports.out", &entry)) {
- snd_info_set_text_ops(entry, hdspm, snd_hdspm_proc_ports_out);
- }
+ snd_card_ro_proc_new(hdspm->card, "hdspm", hdspm, read);
+ snd_card_ro_proc_new(hdspm->card, "ports.in", hdspm,
+ snd_hdspm_proc_ports_in);
+ snd_card_ro_proc_new(hdspm->card, "ports.out", hdspm,
+ snd_hdspm_proc_ports_out);
#ifdef CONFIG_SND_DEBUG
/* debug file to read all hdspm registers */
- if (!snd_card_proc_new(hdspm->card, "debug", &entry))
- snd_info_set_text_ops(entry, hdspm,
- snd_hdspm_proc_read_debug);
+ snd_card_ro_proc_new(hdspm->card, "debug", hdspm,
+ snd_hdspm_proc_read_debug);
#endif
}
@@ -6411,7 +6402,6 @@ static int snd_hdspm_create_hwdep(struct snd_card *card,
------------------------------------------------------------*/
static int snd_hdspm_preallocate_memory(struct hdspm *hdspm)
{
- int err;
struct snd_pcm *pcm;
size_t wanted;
@@ -6419,21 +6409,10 @@ static int snd_hdspm_preallocate_memory(struct hdspm *hdspm)
wanted = HDSPM_DMA_AREA_BYTES;
- err =
- snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(hdspm->pci),
- wanted,
- wanted);
- if (err < 0) {
- dev_dbg(hdspm->card->dev,
- "Could not preallocate %zd Bytes\n", wanted);
-
- return err;
- } else
- dev_dbg(hdspm->card->dev,
- " Preallocated %zd Bytes\n", wanted);
-
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ snd_dma_pci_data(hdspm->pci),
+ wanted, wanted);
+ dev_dbg(hdspm->card->dev, " Preallocated %zd Bytes\n", wanted);
return 0;
}
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index edd765e22377..5228b982da5a 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -1737,10 +1737,8 @@ snd_rme9652_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buff
static void snd_rme9652_proc_init(struct snd_rme9652 *rme9652)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(rme9652->card, "rme9652", &entry))
- snd_info_set_text_ops(entry, rme9652, snd_rme9652_proc_read);
+ snd_card_ro_proc_new(rme9652->card, "rme9652", rme9652,
+ snd_rme9652_proc_read);
}
static void snd_rme9652_free_buffers(struct snd_rme9652 *rme9652)
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 964acf302479..6b27980d77a8 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1214,7 +1214,6 @@ static int sis_suspend(struct device *dev)
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(sis->pcm);
if (sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT)
snd_ac97_suspend(sis->ac97[0]);
if (sis->codecs_present & SIS_SECONDARY_CODEC_PRESENT)
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 7218f38b59db..71d5ad3cffd6 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -1171,10 +1171,8 @@ static void snd_sonicvibes_proc_read(struct snd_info_entry *entry,
static void snd_sonicvibes_proc_init(struct sonicvibes *sonic)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(sonic->card, "sonicvibes", &entry))
- snd_info_set_text_ops(entry, sonic, snd_sonicvibes_proc_read);
+ snd_card_ro_proc_new(sonic->card, "sonicvibes", sonic,
+ snd_sonicvibes_proc_read);
}
/*
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 5523e193d556..0ff32d3f5d3b 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -3320,13 +3320,11 @@ static void snd_trident_proc_read(struct snd_info_entry *entry,
static void snd_trident_proc_init(struct snd_trident *trident)
{
- struct snd_info_entry *entry;
const char *s = "trident";
if (trident->device == TRIDENT_DEVICE_ID_SI7018)
s = "sis7018";
- if (! snd_card_proc_new(trident->card, s, &entry))
- snd_info_set_text_ops(entry, trident, snd_trident_proc_read);
+ snd_card_ro_proc_new(trident->card, s, trident, snd_trident_proc_read);
}
static int snd_trident_dev_free(struct snd_device *device)
@@ -3915,10 +3913,6 @@ static int snd_trident_suspend(struct device *dev)
trident->in_suspend = 1;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(trident->pcm);
- snd_pcm_suspend_all(trident->foldback);
- snd_pcm_suspend_all(trident->spdif);
-
snd_ac97_suspend(trident->ac97);
snd_ac97_suspend(trident->ac97_sec);
return 0;
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index c488c5afa195..dee1c487d6ba 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -2144,10 +2144,8 @@ static void snd_via82xx_proc_read(struct snd_info_entry *entry,
static void snd_via82xx_proc_init(struct via82xx *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "via82xx", &entry))
- snd_info_set_text_ops(entry, chip, snd_via82xx_proc_read);
+ snd_card_ro_proc_new(chip->card, "via82xx", chip,
+ snd_via82xx_proc_read);
}
/*
@@ -2278,8 +2276,6 @@ static int snd_via82xx_suspend(struct device *dev)
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < 2; i++)
- snd_pcm_suspend_all(chip->pcms[i]);
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
synchronize_irq(chip->irq);
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index b13c8688cc8d..7e0bebce7b77 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -865,11 +865,9 @@ static int snd_via686_pcm_new(struct via82xx_modem *chip)
init_viadev(chip, 0, VIA_REG_MO_STATUS, 0);
init_viadev(chip, 1, VIA_REG_MI_STATUS, 1);
- if ((err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
- 64*1024, 128*1024)) < 0)
- return err;
-
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ snd_dma_pci_data(chip->pci),
+ 64*1024, 128*1024);
return 0;
}
@@ -937,10 +935,8 @@ static void snd_via82xx_proc_read(struct snd_info_entry *entry, struct snd_info_
static void snd_via82xx_proc_init(struct via82xx_modem *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "via82xx", &entry))
- snd_info_set_text_ops(entry, chip, snd_via82xx_proc_read);
+ snd_card_ro_proc_new(chip->card, "via82xx", chip,
+ snd_via82xx_proc_read);
}
/*
@@ -1038,8 +1034,6 @@ static int snd_via82xx_suspend(struct device *dev)
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- for (i = 0; i < 2; i++)
- snd_pcm_suspend_all(chip->pcms[i]);
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
synchronize_irq(chip->irq);
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index a4926fb03991..4d48877f211f 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -1985,11 +1985,7 @@ static void snd_ymfpci_proc_read(struct snd_info_entry *entry,
static int snd_ymfpci_proc_init(struct snd_card *card, struct snd_ymfpci *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(card, "ymfpci", &entry))
- snd_info_set_text_ops(entry, chip, snd_ymfpci_proc_read);
- return 0;
+ return snd_card_ro_proc_new(card, "ymfpci", chip, snd_ymfpci_proc_read);
}
/*
@@ -2304,10 +2300,6 @@ static int snd_ymfpci_suspend(struct device *dev)
unsigned int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
- snd_pcm_suspend_all(chip->pcm2);
- snd_pcm_suspend_all(chip->pcm_spdif);
- snd_pcm_suspend_all(chip->pcm_4ch);
snd_ac97_suspend(chip->ac97);
for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++)
chip->saved_regs[i] = snd_ymfpci_readl(chip, saved_regs_index[i]);
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_core.c b/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
index d724ab0653cf..910478275fd9 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
@@ -148,10 +148,7 @@ static void pdacf_proc_read(struct snd_info_entry * entry,
static void pdacf_proc_init(struct snd_pdacf *chip)
{
- struct snd_info_entry *entry;
-
- if (! snd_card_proc_new(chip->card, "pdaudiocf", &entry))
- snd_info_set_text_ops(entry, chip, pdacf_proc_read);
+ snd_card_ro_proc_new(chip->card, "pdaudiocf", chip, pdacf_proc_read);
}
struct snd_pdacf *snd_pdacf_create(struct snd_card *card)
@@ -265,7 +262,6 @@ int snd_pdacf_suspend(struct snd_pdacf *chip)
u16 val;
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
/* disable interrupts, but use direct write to preserve old register value in chip->regmap */
val = inw(chip->port + PDAUDIOCF_REG_IER);
val &= ~(PDAUDIOCF_IRQOVREN|PDAUDIOCF_IRQAKMEN|PDAUDIOCF_IRQLVLEN0|PDAUDIOCF_IRQLVLEN1);
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index d692e4070167..6d420bd3ae17 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -1365,7 +1365,6 @@ void snd_pmac_suspend(struct snd_pmac *chip)
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (chip->suspend)
chip->suspend(chip);
- snd_pcm_suspend_all(chip->pcm);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_pmac_beep_stop(chip);
spin_unlock_irqrestore(&chip->reg_lock, flags);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index abe031c9d592..521236efcc4d 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -1024,15 +1024,11 @@ static int snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
the_card.pcm->info_flags = SNDRV_PCM_INFO_NONINTERLEAVED;
/* pre-alloc PCM DMA buffer*/
- ret = snd_pcm_lib_preallocate_pages_for_all(the_card.pcm,
+ snd_pcm_lib_preallocate_pages_for_all(the_card.pcm,
SNDRV_DMA_TYPE_DEV,
&dev->core,
SND_PS3_PCM_PREALLOC_SIZE,
SND_PS3_PCM_PREALLOC_SIZE);
- if (ret < 0) {
- pr_info("%s: prealloc failed\n", __func__);
- goto clean_card;
- }
/*
* allocate null buffer
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 6d7ffffcce95..78e5798ae967 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -1371,6 +1371,7 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
mix->anded_reset = 1;
if (of_get_property(np, "layout-id", NULL))
mix->reset_on_sleep = 0;
+ of_node_put(np);
break;
}
}
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 2b26311405a4..e7fef3fce44a 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -464,14 +464,12 @@ static int __init snd_aicapcmchip(struct snd_card_aica
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_aicapcm_playback_ops);
/* Allocate the DMA buffers */
- err =
- snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data
- (GFP_KERNEL),
- AICA_BUFFER_SIZE,
- AICA_BUFFER_SIZE);
- return err;
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ AICA_BUFFER_SIZE,
+ AICA_BUFFER_SIZE);
+ return 0;
}
/* Mixer controls */
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 6592a422a047..aa35940f5c50 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -64,6 +64,7 @@ source "sound/soc/samsung/Kconfig"
source "sound/soc/sh/Kconfig"
source "sound/soc/sirf/Kconfig"
source "sound/soc/spear/Kconfig"
+source "sound/soc/sprd/Kconfig"
source "sound/soc/sti/Kconfig"
source "sound/soc/stm/Kconfig"
source "sound/soc/sunxi/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 48c48c1c893c..974fb9821e17 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_SND_SOC) += samsung/
obj-$(CONFIG_SND_SOC) += sh/
obj-$(CONFIG_SND_SOC) += sirf/
obj-$(CONFIG_SND_SOC) += spear/
+obj-$(CONFIG_SND_SOC) += sprd/
obj-$(CONFIG_SND_SOC) += sti/
obj-$(CONFIG_SND_SOC) += stm/
obj-$(CONFIG_SND_SOC) += sunxi/
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index f4011bebc7ec..2391c7f1dd2d 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -1142,7 +1142,6 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
{
- int ret;
struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
@@ -1150,24 +1149,21 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
switch (adata->asic_type) {
case CHIP_STONEY:
- ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
- SNDRV_DMA_TYPE_DEV,
- parent,
- ST_MIN_BUFFER,
- ST_MAX_BUFFER);
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_DEV,
+ parent,
+ ST_MIN_BUFFER,
+ ST_MAX_BUFFER);
break;
default:
- ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
- SNDRV_DMA_TYPE_DEV,
- parent,
- MIN_BUFFER,
- MAX_BUFFER);
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_DEV,
+ parent,
+ MIN_BUFFER,
+ MAX_BUFFER);
break;
}
- if (ret < 0)
- dev_err(component->dev,
- "buffer preallocation failure error:%d\n", ret);
- return ret;
+ return 0;
}
static int acp_dma_close(struct snd_pcm_substream *substream)
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index 022a8912c8a2..1a2e15ff1456 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -367,10 +367,10 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
{
- return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
- SNDRV_DMA_TYPE_DEV,
- NULL, MIN_BUFFER,
- MAX_BUFFER);
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
+ rtd->pcm->card->dev,
+ MIN_BUFFER, MAX_BUFFER);
+ return 0;
}
static int acp3x_dma_hw_free(struct snd_pcm_substream *substream)
@@ -611,14 +611,16 @@ static int acp3x_audio_probe(struct platform_device *pdev)
}
irqflags = *((unsigned int *)(pdev->dev.platform_data));
- adata = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dev_data),
- GFP_KERNEL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
return -ENODEV;
}
+ adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL);
+ if (!adata)
+ return -ENOMEM;
+
adata->acp3x_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 62bdb7e333b8..419114edfd57 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -50,10 +50,12 @@ config SND_SOC_ALL_CODECS
select SND_SOC_BT_SCO
select SND_SOC_BD28623
select SND_SOC_CQ0093VC
+ select SND_SOC_CROS_EC_CODEC if MFD_CROS_EC
select SND_SOC_CS35L32 if I2C
select SND_SOC_CS35L33 if I2C
select SND_SOC_CS35L34 if I2C
select SND_SOC_CS35L35 if I2C
+ select SND_SOC_CS35L36 if I2C
select SND_SOC_CS42L42 if I2C
select SND_SOC_CS42L51_I2C if I2C
select SND_SOC_CS42L52 if I2C && INPUT
@@ -65,6 +67,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CS4271_SPI if SPI_MASTER
select SND_SOC_CS42XX8_I2C if I2C
select SND_SOC_CS43130 if I2C
+ select SND_SOC_CS4341 if SND_SOC_I2C_AND_SPI
select SND_SOC_CS4349 if I2C
select SND_SOC_CS47L24 if MFD_CS47L24
select SND_SOC_CS53L30 if I2C
@@ -88,6 +91,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_INNO_RK3036
select SND_SOC_ISABELLE if I2C
select SND_SOC_JZ4740_CODEC
+ select SND_SOC_JZ4725B_CODEC
select SND_SOC_LM4857 if I2C
select SND_SOC_LM49453 if I2C
select SND_SOC_MAX98088 if I2C
@@ -109,6 +113,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MC13783 if MFD_MC13XXX
select SND_SOC_ML26124 if I2C
select SND_SOC_MT6351 if MTK_PMIC_WRAP
+ select SND_SOC_MT6358 if MTK_PMIC_WRAP
select SND_SOC_NAU8540 if I2C
select SND_SOC_NAU8810 if I2C
select SND_SOC_NAU8822 if I2C
@@ -129,6 +134,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_PCM5102A
select SND_SOC_PCM512x_I2C if I2C
select SND_SOC_PCM512x_SPI if SPI_MASTER
+ select SND_SOC_RK3328
select SND_SOC_RT274 if I2C
select SND_SOC_RT286 if I2C
select SND_SOC_RT298 if I2C
@@ -185,6 +191,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TWL6040 if TWL6040_CORE
select SND_SOC_UDA134X
select SND_SOC_UDA1380 if I2C
+ select SND_SOC_WCD9335 if SLIMBUS
select SND_SOC_WL1273 if MFD_WL1273_CORE
select SND_SOC_WM0010 if SPI_MASTER
select SND_SOC_WM1250_EV1 if I2C
@@ -455,6 +462,13 @@ config SND_SOC_CPCAP
config SND_SOC_CQ0093VC
tristate
+config SND_SOC_CROS_EC_CODEC
+ tristate "codec driver for ChromeOS EC"
+ depends on MFD_CROS_EC
+ help
+ If you say yes here you will get support for the
+ ChromeOS Embedded Controller's Audio Codec.
+
config SND_SOC_CS35L32
tristate "Cirrus Logic CS35L32 CODEC"
depends on I2C
@@ -471,6 +485,10 @@ config SND_SOC_CS35L35
tristate "Cirrus Logic CS35L35 CODEC"
depends on I2C
+config SND_SOC_CS35L36
+ tristate "Cirrus Logic CS35L36 CODEC"
+ depends on I2C
+
config SND_SOC_CS42L42
tristate "Cirrus Logic CS42L42 CODEC"
depends on I2C
@@ -542,6 +560,12 @@ config SND_SOC_CS43130
tristate "Cirrus Logic CS43130 CODEC"
depends on I2C
+config SND_SOC_CS4341
+ tristate "Cirrus Logic CS4341 CODEC"
+ depends on SND_SOC_I2C_AND_SPI
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+
# Cirrus Logic CS4349 HiFi DAC
config SND_SOC_CS4349
tristate "Cirrus Logic CS4349 CODEC"
@@ -560,8 +584,26 @@ config SND_SOC_CX20442
depends on TTY
config SND_SOC_JZ4740_CODEC
+ depends on MIPS || COMPILE_TEST
select REGMAP_MMIO
- tristate
+ tristate "Ingenic JZ4740 internal CODEC"
+ help
+ Enable support for the internal CODEC found in the JZ4740 SoC
+ from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called snd-soc-jz4740-codec.
+
+config SND_SOC_JZ4725B_CODEC
+ depends on MIPS || COMPILE_TEST
+ select REGMAP
+ tristate "Ingenic JZ4725B internal CODEC"
+ help
+ Enable support for the internal CODEC found in the JZ4725B SoC
+ from Ingenic.
+
+ This driver can also be built as a module. If so, the module
+ will be called snd-soc-jz4725b-codec.
config SND_SOC_L3
tristate
@@ -698,6 +740,7 @@ config SND_SOC_MSM8916_WCD_ANALOG
config SND_SOC_MSM8916_WCD_DIGITAL
tristate "Qualcomm MSM8916 WCD DIGITAL Codec"
+ select REGMAP_MMIO
config SND_SOC_PCM1681
tristate "Texas Instruments PCM1681 CODEC"
@@ -799,6 +842,10 @@ config SND_SOC_PCM512x_SPI
select SND_SOC_PCM512x
select REGMAP_SPI
+config SND_SOC_RK3328
+ tristate "Rockchip RK3328 audio CODEC"
+ select REGMAP_MMIO
+
config SND_SOC_RL6231
tristate
default y if SND_SOC_RT5514=y
@@ -1100,6 +1147,15 @@ config SND_SOC_UDA1380
tristate
depends on I2C
+config SND_SOC_WCD9335
+ tristate "WCD9335 Codec"
+ depends on SLIMBUS
+ select REGMAP_SLIMBUS
+ help
+ The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports
+ Qualcomm Technologies, Inc. (QTI) multimedia solutions,
+ including the MSM8996, MSM8976, and MSM8956 chipsets.
+
config SND_SOC_WL1273
tristate
@@ -1211,7 +1267,8 @@ config SND_SOC_WM8903
depends on I2C
config SND_SOC_WM8904
- tristate
+ tristate "Wolfson Microelectronics WM8904 CODEC"
+ depends on I2C
config SND_SOC_WM8940
tristate
@@ -1325,6 +1382,12 @@ config SND_SOC_ML26124
config SND_SOC_MT6351
tristate "MediaTek MT6351 Codec"
+config SND_SOC_MT6358
+ tristate "MediaTek MT6358 Codec"
+ help
+ Enable support for the platform which uses MT6358 as
+ external codec device.
+
config SND_SOC_NAU8540
tristate "Nuvoton Technology Corporation NAU85L40 CODEC"
depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 66f55d185620..aab2ad95a137 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -42,10 +42,12 @@ snd-soc-bd28623-objs := bd28623.o
snd-soc-bt-sco-objs := bt-sco.o
snd-soc-cpcap-objs := cpcap.o
snd-soc-cq93vc-objs := cq93vc.o
+snd-soc-cros-ec-codec-objs := cros_ec_codec.o
snd-soc-cs35l32-objs := cs35l32.o
snd-soc-cs35l33-objs := cs35l33.o
snd-soc-cs35l34-objs := cs35l34.o
snd-soc-cs35l35-objs := cs35l35.o
+snd-soc-cs35l36-objs := cs35l36.o
snd-soc-cs42l42-objs := cs42l42.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o
@@ -60,6 +62,7 @@ snd-soc-cs4271-spi-objs := cs4271-spi.o
snd-soc-cs42xx8-objs := cs42xx8.o
snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
snd-soc-cs43130-objs := cs43130.o
+snd-soc-cs4341-objs := cs4341.o
snd-soc-cs4349-objs := cs4349.o
snd-soc-cs47l24-objs := cs47l24.o
snd-soc-cs53l30-objs := cs53l30.o
@@ -84,6 +87,7 @@ snd-soc-ics43432-objs := ics43432.o
snd-soc-inno-rk3036-objs := inno_rk3036.o
snd-soc-isabelle-objs := isabelle.o
snd-soc-jz4740-codec-objs := jz4740.o
+snd-soc-jz4725b-codec-objs := jz4725b.o
snd-soc-l3-objs := l3.o
snd-soc-lm4857-objs := lm4857.o
snd-soc-lm49453-objs := lm49453.o
@@ -106,6 +110,7 @@ snd-soc-ml26124-objs := ml26124.o
snd-soc-msm8916-analog-objs := msm8916-wcd-analog.o
snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o
snd-soc-mt6351-objs := mt6351.o
+snd-soc-mt6358-objs := mt6358.o
snd-soc-nau8540-objs := nau8540.o
snd-soc-nau8810-objs := nau8810.o
snd-soc-nau8822-objs := nau8822.o
@@ -132,6 +137,7 @@ snd-soc-pcm5102a-objs := pcm5102a.o
snd-soc-pcm512x-objs := pcm512x.o
snd-soc-pcm512x-i2c-objs := pcm512x-i2c.o
snd-soc-pcm512x-spi-objs := pcm512x-spi.o
+snd-soc-rk3328-objs := rk3328_codec.o
snd-soc-rl6231-objs := rl6231.o
snd-soc-rl6347a-objs := rl6347a.o
snd-soc-rt1305-objs := rt1305.o
@@ -198,6 +204,7 @@ snd-soc-twl4030-objs := twl4030.o
snd-soc-twl6040-objs := twl6040.o
snd-soc-uda134x-objs := uda134x.o
snd-soc-uda1380-objs := uda1380.o
+snd-soc-wcd9335-objs := wcd-clsh-v2.o wcd9335.o
snd-soc-wl1273-objs := wl1273.o
snd-soc-wm-adsp-objs := wm_adsp.o
snd-soc-wm0010-objs := wm0010.o
@@ -308,10 +315,12 @@ obj-$(CONFIG_SND_SOC_BD28623) += snd-soc-bd28623.o
obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CPCAP) += snd-soc-cpcap.o
+obj-$(CONFIG_SND_SOC_CROS_EC_CODEC) += snd-soc-cros-ec-codec.o
obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
obj-$(CONFIG_SND_SOC_CS35L33) += snd-soc-cs35l33.o
obj-$(CONFIG_SND_SOC_CS35L34) += snd-soc-cs35l34.o
obj-$(CONFIG_SND_SOC_CS35L35) += snd-soc-cs35l35.o
+obj-$(CONFIG_SND_SOC_CS35L36) += snd-soc-cs35l36.o
obj-$(CONFIG_SND_SOC_CS42L42) += snd-soc-cs42l42.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o
@@ -326,6 +335,7 @@ obj-$(CONFIG_SND_SOC_CS4271_SPI) += snd-soc-cs4271-spi.o
obj-$(CONFIG_SND_SOC_CS42XX8) += snd-soc-cs42xx8.o
obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
obj-$(CONFIG_SND_SOC_CS43130) += snd-soc-cs43130.o
+obj-$(CONFIG_SND_SOC_CS4341) += snd-soc-cs4341.o
obj-$(CONFIG_SND_SOC_CS4349) += snd-soc-cs4349.o
obj-$(CONFIG_SND_SOC_CS47L24) += snd-soc-cs47l24.o
obj-$(CONFIG_SND_SOC_CS53L30) += snd-soc-cs53l30.o
@@ -350,6 +360,7 @@ obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o
obj-$(CONFIG_SND_SOC_INNO_RK3036) += snd-soc-inno-rk3036.o
obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o
obj-$(CONFIG_SND_SOC_JZ4740_CODEC) += snd-soc-jz4740-codec.o
+obj-$(CONFIG_SND_SOC_JZ4725B_CODEC) += snd-soc-jz4725b-codec.o
obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
obj-$(CONFIG_SND_SOC_LM4857) += snd-soc-lm4857.o
obj-$(CONFIG_SND_SOC_LM49453) += snd-soc-lm49453.o
@@ -372,6 +383,7 @@ obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
obj-$(CONFIG_SND_SOC_MSM8916_WCD_ANALOG) +=snd-soc-msm8916-analog.o
obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o
obj-$(CONFIG_SND_SOC_MT6351) += snd-soc-mt6351.o
+obj-$(CONFIG_SND_SOC_MT6358) += snd-soc-mt6358.o
obj-$(CONFIG_SND_SOC_NAU8540) += snd-soc-nau8540.o
obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o
obj-$(CONFIG_SND_SOC_NAU8822) += snd-soc-nau8822.o
@@ -398,6 +410,7 @@ obj-$(CONFIG_SND_SOC_PCM5102A) += snd-soc-pcm5102a.o
obj-$(CONFIG_SND_SOC_PCM512x) += snd-soc-pcm512x.o
obj-$(CONFIG_SND_SOC_PCM512x_I2C) += snd-soc-pcm512x-i2c.o
obj-$(CONFIG_SND_SOC_PCM512x_SPI) += snd-soc-pcm512x-spi.o
+obj-$(CONFIG_SND_SOC_RK3328) += snd-soc-rk3328.o
obj-$(CONFIG_SND_SOC_RL6231) += snd-soc-rl6231.o
obj-$(CONFIG_SND_SOC_RL6347A) += snd-soc-rl6347a.o
obj-$(CONFIG_SND_SOC_RT1305) += snd-soc-rt1305.o
@@ -463,6 +476,7 @@ obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o
obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
+obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o
obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o
obj-$(CONFIG_SND_SOC_WM0010) += snd-soc-wm0010.o
obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 4b60ebee491d..96d7cb2e4a56 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -37,6 +37,13 @@ static SOC_ENUM_SINGLE_DECL(ad193x_deemp_enum, AD193X_DAC_CTRL2, 1,
static const DECLARE_TLV_DB_MINMAX(adau193x_tlv, -9563, 0);
+static const unsigned int ad193x_sb[] = {32};
+
+static struct snd_pcm_hw_constraint_list constr = {
+ .list = ad193x_sb,
+ .count = ARRAY_SIZE(ad193x_sb),
+};
+
static const struct snd_kcontrol_new ad193x_snd_controls[] = {
/* DAC volume control */
SOC_DOUBLE_R_TLV("DAC1 Volume", AD193X_DAC_L1_VOL,
@@ -93,6 +100,15 @@ static const struct snd_soc_dapm_widget ad193x_adc_widgets[] = {
SND_SOC_DAPM_INPUT("ADC2IN"),
};
+static int ad193x_check_pll(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(source->dapm);
+ struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(component);
+
+ return !!ad193x->sysclk;
+}
+
static const struct snd_soc_dapm_route audio_paths[] = {
{ "DAC", NULL, "SYSCLK" },
{ "DAC Output", NULL, "DAC" },
@@ -101,7 +117,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
{ "DAC2OUT", NULL, "DAC Output" },
{ "DAC3OUT", NULL, "DAC Output" },
{ "DAC4OUT", NULL, "DAC Output" },
- { "SYSCLK", NULL, "PLL_PWR" },
+ { "SYSCLK", NULL, "PLL_PWR", &ad193x_check_pll },
};
static const struct snd_soc_dapm_route ad193x_adc_audio_paths[] = {
@@ -181,23 +197,26 @@ static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai,
{
struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(codec_dai->component);
unsigned int adc_serfmt = 0;
+ unsigned int dac_serfmt = 0;
unsigned int adc_fmt = 0;
unsigned int dac_fmt = 0;
/* At present, the driver only support AUX ADC mode(SND_SOC_DAIFMT_I2S
- * with TDM) and ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A)
+ * with TDM), ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A) and DAC I2S mode
+ * (SND_SOC_DAIFMT_I2S)
*/
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
adc_serfmt |= AD193X_ADC_SERFMT_TDM;
+ dac_serfmt |= AD193X_DAC_SERFMT_STEREO;
break;
case SND_SOC_DAIFMT_DSP_A:
adc_serfmt |= AD193X_ADC_SERFMT_AUX;
+ dac_serfmt |= AD193X_DAC_SERFMT_TDM;
break;
default:
if (ad193x_has_adc(ad193x))
return -EINVAL;
- break;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -221,6 +240,12 @@ static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
+ /* For DSP_*, LRCLK's polarity must be inverted */
+ if (fmt & SND_SOC_DAIFMT_DSP_A) {
+ change_bit(ffs(AD193X_DAC_LEFT_HIGH) - 1,
+ (unsigned long *)&dac_fmt);
+ }
+
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & frm master */
adc_fmt |= AD193X_ADC_LCR_MASTER;
@@ -248,6 +273,8 @@ static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai,
regmap_update_bits(ad193x->regmap, AD193X_ADC_CTRL2,
AD193X_ADC_FMT_MASK, adc_fmt);
}
+ regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL0,
+ AD193X_DAC_SERFMT_MASK, dac_serfmt);
regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL1,
AD193X_DAC_FMT_MASK, dac_fmt);
@@ -258,7 +285,22 @@ static int ad193x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_component *component = codec_dai->component;
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(component);
+
+ if (clk_id == AD193X_SYSCLK_MCLK) {
+ /* MCLK must be 512 x fs */
+ if (dir == SND_SOC_CLOCK_OUT || freq != 24576000)
+ return -EINVAL;
+
+ regmap_update_bits(ad193x->regmap, AD193X_PLL_CLK_CTRL1,
+ AD193X_PLL_SRC_MASK,
+ AD193X_PLL_DAC_SRC_MCLK |
+ AD193X_PLL_CLK_SRC_MCLK);
+
+ snd_soc_dapm_sync(dapm);
+ return 0;
+ }
switch (freq) {
case 12288000:
case 18432000:
@@ -321,7 +363,16 @@ static int ad193x_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int ad193x_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+ &constr);
+}
+
static const struct snd_soc_dai_ops ad193x_dai_ops = {
+ .startup = ad193x_startup,
.hw_params = ad193x_hw_params,
.digital_mute = ad193x_mute,
.set_tdm_slot = ad193x_set_tdm_slot,
@@ -351,6 +402,20 @@ static struct snd_soc_dai_driver ad193x_dai = {
.ops = &ad193x_dai_ops,
};
+/* codec DAI instance for DAC only */
+static struct snd_soc_dai_driver ad193x_no_adc_dai = {
+ .name = "ad193x-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &ad193x_dai_ops,
+};
+
static int ad193x_component_probe(struct snd_soc_component *component)
{
struct ad193x_priv *ad193x = snd_soc_component_get_drvdata(component);
@@ -444,8 +509,11 @@ int ad193x_probe(struct device *dev, struct regmap *regmap,
dev_set_drvdata(dev, ad193x);
+ if (ad193x_has_adc(ad193x))
+ return devm_snd_soc_register_component(dev, &soc_component_dev_ad193x,
+ &ad193x_dai, 1);
return devm_snd_soc_register_component(dev, &soc_component_dev_ad193x,
- &ad193x_dai, 1);
+ &ad193x_no_adc_dai, 1);
}
EXPORT_SYMBOL_GPL(ad193x_probe);
diff --git a/sound/soc/codecs/ad193x.h b/sound/soc/codecs/ad193x.h
index 8b1e65f928d2..27d6afbd7dfb 100644
--- a/sound/soc/codecs/ad193x.h
+++ b/sound/soc/codecs/ad193x.h
@@ -31,6 +31,11 @@ int ad193x_probe(struct device *dev, struct regmap *regmap,
#define AD193X_PLL_INPUT_512 (2 << 1)
#define AD193X_PLL_INPUT_768 (3 << 1)
#define AD193X_PLL_CLK_CTRL1 0x01
+#define AD193X_PLL_SRC_MASK 0x03
+#define AD193X_PLL_DAC_SRC_PLL 0
+#define AD193X_PLL_DAC_SRC_MCLK 1
+#define AD193X_PLL_CLK_SRC_PLL (0 << 1)
+#define AD193X_PLL_CLK_SRC_MCLK (1 << 1)
#define AD193X_DAC_CTRL0 0x02
#define AD193X_DAC_POWERDOWN 0x01
#define AD193X_DAC_SERFMT_MASK 0xC0
@@ -96,4 +101,7 @@ int ad193x_probe(struct device *dev, struct regmap *regmap,
#define AD193X_NUM_REGS 17
+#define AD193X_SYSCLK_PLL 0
+#define AD193X_SYSCLK_MCLK 1
+
#endif
diff --git a/sound/soc/codecs/adau1977.c b/sound/soc/codecs/adau1977.c
index 116af6a9ce3b..11c53bcb71dd 100644
--- a/sound/soc/codecs/adau1977.c
+++ b/sound/soc/codecs/adau1977.c
@@ -885,13 +885,15 @@ static int adau1977_setup_micbias(struct adau1977 *adau1977)
struct adau1977_platform_data *pdata = adau1977->dev->platform_data;
unsigned int micbias;
- if (pdata) {
+ if (pdata)
micbias = pdata->micbias;
- if (micbias > ADAU1977_MICBIAS_9V0)
- return -EINVAL;
-
- } else {
+ else if (device_property_read_u32(adau1977->dev, "adi,micbias",
+ &micbias))
micbias = ADAU1977_MICBIAS_8V5;
+
+ if (micbias > ADAU1977_MICBIAS_9V0) {
+ dev_err(adau1977->dev, "Invalid value for 'adi,micbias'\n");
+ return -EINVAL;
}
return regmap_update_bits(adau1977->regmap, ADAU1977_REG_MICBIAS,
diff --git a/sound/soc/codecs/adau7002.c b/sound/soc/codecs/adau7002.c
index fdff86878287..a8deb37fc78a 100644
--- a/sound/soc/codecs/adau7002.c
+++ b/sound/soc/codecs/adau7002.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -15,12 +16,55 @@
#include <sound/soc.h>
+struct adau7002_priv {
+ int wakeup_delay;
+};
+
+static int adau7002_aif_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct adau7002_priv *adau7002 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (adau7002->wakeup_delay)
+ msleep(adau7002->wakeup_delay);
+ break;
+ }
+
+ return 0;
+}
+
+static int adau7002_component_probe(struct snd_soc_component *component)
+{
+ struct adau7002_priv *adau7002;
+
+ adau7002 = devm_kzalloc(component->dev, sizeof(*adau7002),
+ GFP_KERNEL);
+ if (!adau7002)
+ return -ENOMEM;
+
+ device_property_read_u32(component->dev, "wakeup-delay-ms",
+ &adau7002->wakeup_delay);
+
+ snd_soc_component_set_drvdata(component, adau7002);
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget adau7002_widgets[] = {
+ SND_SOC_DAPM_AIF_OUT_E("ADAU AIF", "Capture", 0,
+ SND_SOC_NOPM, 0, 0, adau7002_aif_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_INPUT("PDM_DAT"),
SND_SOC_DAPM_REGULATOR_SUPPLY("IOVDD", 0, 0),
};
static const struct snd_soc_dapm_route adau7002_routes[] = {
+ { "ADAU AIF", NULL, "PDM_DAT"},
{ "Capture", NULL, "PDM_DAT" },
{ "Capture", NULL, "IOVDD" },
};
@@ -40,6 +84,7 @@ static struct snd_soc_dai_driver adau7002_dai = {
};
static const struct snd_soc_component_driver adau7002_component_driver = {
+ .probe = adau7002_component_probe,
.dapm_widgets = adau7002_widgets,
.num_dapm_widgets = ARRAY_SIZE(adau7002_widgets),
.dapm_routes = adau7002_routes,
diff --git a/sound/soc/codecs/ak4118.c b/sound/soc/codecs/ak4118.c
index 238ab29f2bf4..ce419e8cf890 100644
--- a/sound/soc/codecs/ak4118.c
+++ b/sound/soc/codecs/ak4118.c
@@ -6,6 +6,7 @@
*/
#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
index 70d4c89bd6fc..eab7c76cfcd9 100644
--- a/sound/soc/codecs/ak4458.c
+++ b/sound/soc/codecs/ak4458.c
@@ -21,6 +21,11 @@
#include "ak4458.h"
+struct ak4458_drvdata {
+ struct snd_soc_dai_driver *dai_drv;
+ const struct snd_soc_component_driver *comp_drv;
+};
+
/* AK4458 Codec Private Data */
struct ak4458_priv {
struct device *dev;
@@ -258,6 +263,33 @@ static const struct snd_soc_dapm_route ak4458_intercon[] = {
{"AK4458 AOUTD", NULL, "AK4458 DAC4"},
};
+/* ak4497 controls */
+static const struct snd_kcontrol_new ak4497_snd_controls[] = {
+ SOC_DOUBLE_R_TLV("DAC Playback Volume", AK4458_03_LCHATT,
+ AK4458_04_RCHATT, 0, 0xFF, 0, dac_tlv),
+ SOC_ENUM("AK4497 De-emphasis Response DAC", ak4458_dac1_dem_enum),
+ SOC_ENUM_EXT("AK4497 Digital Filter Setting", ak4458_digfil_enum,
+ get_digfil, set_digfil),
+ SOC_ENUM("AK4497 Inverting Enable of DZFB", ak4458_dzfb_enum),
+ SOC_ENUM("AK4497 Sound Mode", ak4458_sm_enum),
+ SOC_ENUM("AK4497 Attenuation transition Time Setting",
+ ak4458_ats_enum),
+};
+
+/* ak4497 dapm widgets */
+static const struct snd_soc_dapm_widget ak4497_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("AK4497 DAC", NULL, AK4458_0A_CONTROL6, 2, 0),
+ SND_SOC_DAPM_AIF_IN("AK4497 SDTI", "Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("AK4497 AOUT"),
+};
+
+/* ak4497 dapm routes */
+static const struct snd_soc_dapm_route ak4497_intercon[] = {
+ {"AK4497 DAC", NULL, "AK4497 SDTI"},
+ {"AK4497 AOUT", NULL, "AK4497 DAC"},
+
+};
+
static int ak4458_rstn_control(struct snd_soc_component *component, int bit)
{
int ret;
@@ -476,6 +508,18 @@ static struct snd_soc_dai_driver ak4458_dai = {
.ops = &ak4458_dai_ops,
};
+static struct snd_soc_dai_driver ak4497_dai = {
+ .name = "ak4497-aif",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .formats = AK4458_FORMATS,
+ },
+ .ops = &ak4458_dai_ops,
+};
+
static void ak4458_power_off(struct ak4458_priv *ak4458)
{
if (ak4458->reset_gpiod) {
@@ -573,6 +617,21 @@ static const struct snd_soc_component_driver soc_codec_dev_ak4458 = {
.non_legacy_dai_naming = 1,
};
+static const struct snd_soc_component_driver soc_codec_dev_ak4497 = {
+ .probe = ak4458_probe,
+ .remove = ak4458_remove,
+ .controls = ak4497_snd_controls,
+ .num_controls = ARRAY_SIZE(ak4497_snd_controls),
+ .dapm_widgets = ak4497_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4497_dapm_widgets),
+ .dapm_routes = ak4497_intercon,
+ .num_dapm_routes = ARRAY_SIZE(ak4497_intercon),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
static const struct regmap_config ak4458_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -583,6 +642,16 @@ static const struct regmap_config ak4458_regmap = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct ak4458_drvdata ak4458_drvdata = {
+ .dai_drv = &ak4458_dai,
+ .comp_drv = &soc_codec_dev_ak4458,
+};
+
+static const struct ak4458_drvdata ak4497_drvdata = {
+ .dai_drv = &ak4497_dai,
+ .comp_drv = &soc_codec_dev_ak4497,
+};
+
static const struct dev_pm_ops ak4458_pm = {
SET_RUNTIME_PM_OPS(ak4458_runtime_suspend, ak4458_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
@@ -592,6 +661,7 @@ static const struct dev_pm_ops ak4458_pm = {
static int ak4458_i2c_probe(struct i2c_client *i2c)
{
struct ak4458_priv *ak4458;
+ const struct ak4458_drvdata *drvdata;
int ret;
ak4458 = devm_kzalloc(&i2c->dev, sizeof(*ak4458), GFP_KERNEL);
@@ -605,6 +675,8 @@ static int ak4458_i2c_probe(struct i2c_client *i2c)
i2c_set_clientdata(i2c, ak4458);
ak4458->dev = &i2c->dev;
+ drvdata = of_device_get_match_data(&i2c->dev);
+
ak4458->reset_gpiod = devm_gpiod_get_optional(ak4458->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ak4458->reset_gpiod))
@@ -615,8 +687,8 @@ static int ak4458_i2c_probe(struct i2c_client *i2c)
if (IS_ERR(ak4458->mute_gpiod))
return PTR_ERR(ak4458->mute_gpiod);
- ret = devm_snd_soc_register_component(ak4458->dev, &soc_codec_dev_ak4458,
- &ak4458_dai, 1);
+ ret = devm_snd_soc_register_component(ak4458->dev, drvdata->comp_drv,
+ drvdata->dai_drv, 1);
if (ret < 0) {
dev_err(ak4458->dev, "Failed to register CODEC: %d\n", ret);
return ret;
@@ -635,7 +707,8 @@ static int ak4458_i2c_remove(struct i2c_client *i2c)
}
static const struct of_device_id ak4458_of_match[] = {
- { .compatible = "asahi-kasei,ak4458", },
+ { .compatible = "asahi-kasei,ak4458", .data = &ak4458_drvdata},
+ { .compatible = "asahi-kasei,ak4497", .data = &ak4497_drvdata},
{ },
};
diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c
new file mode 100644
index 000000000000..99a3af8a15ff
--- /dev/null
+++ b/sound/soc/codecs/cros_ec_codec.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for ChromeOS Embedded Controller codec.
+ *
+ * This driver uses the cros-ec interface to communicate with the ChromeOS
+ * EC for audio function.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define DRV_NAME "cros-ec-codec"
+
+/**
+ * struct cros_ec_codec_data - ChromeOS EC codec driver data.
+ * @dev: Device structure used in sysfs.
+ * @ec_device: cros_ec_device structure to talk to the physical device.
+ * @component: Pointer to the component.
+ * @max_dmic_gain: Maximum gain in dB supported by EC codec.
+ */
+struct cros_ec_codec_data {
+ struct device *dev;
+ struct cros_ec_device *ec_device;
+ struct snd_soc_component *component;
+ unsigned int max_dmic_gain;
+};
+
+static const DECLARE_TLV_DB_SCALE(ec_mic_gain_tlv, 0, 100, 0);
+
+static int ec_command_get_gain(struct snd_soc_component *component,
+ struct ec_param_codec_i2s *param,
+ struct ec_response_codec_gain *resp)
+{
+ struct cros_ec_codec_data *codec_data =
+ snd_soc_component_get_drvdata(component);
+ struct cros_ec_device *ec_device = codec_data->ec_device;
+ u8 buffer[sizeof(struct cros_ec_command) +
+ max(sizeof(struct ec_param_codec_i2s),
+ sizeof(struct ec_response_codec_gain))];
+ struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+ int ret;
+
+ msg->version = 0;
+ msg->command = EC_CMD_CODEC_I2S;
+ msg->outsize = sizeof(struct ec_param_codec_i2s);
+ msg->insize = sizeof(struct ec_response_codec_gain);
+
+ memcpy(msg->data, param, msg->outsize);
+
+ ret = cros_ec_cmd_xfer_status(ec_device, msg);
+ if (ret > 0)
+ memcpy(resp, msg->data, msg->insize);
+
+ return ret;
+}
+
+/*
+ * Wrapper for EC command without response.
+ */
+static int ec_command_no_resp(struct snd_soc_component *component,
+ struct ec_param_codec_i2s *param)
+{
+ struct cros_ec_codec_data *codec_data =
+ snd_soc_component_get_drvdata(component);
+ struct cros_ec_device *ec_device = codec_data->ec_device;
+ u8 buffer[sizeof(struct cros_ec_command) +
+ sizeof(struct ec_param_codec_i2s)];
+ struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+
+ msg->version = 0;
+ msg->command = EC_CMD_CODEC_I2S;
+ msg->outsize = sizeof(struct ec_param_codec_i2s);
+ msg->insize = 0;
+
+ memcpy(msg->data, param, msg->outsize);
+
+ return cros_ec_cmd_xfer_status(ec_device, msg);
+}
+
+static int set_i2s_config(struct snd_soc_component *component,
+ enum ec_i2s_config i2s_config)
+{
+ struct ec_param_codec_i2s param;
+
+ dev_dbg(component->dev, "%s set I2S format to %u\n", __func__,
+ i2s_config);
+
+ param.cmd = EC_CODEC_I2S_SET_CONFIG;
+ param.i2s_config = i2s_config;
+
+ return ec_command_no_resp(component, &param);
+}
+
+static int cros_ec_i2s_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ enum ec_i2s_config i2s_config;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ i2s_config = EC_DAI_FMT_I2S;
+ break;
+
+ case SND_SOC_DAIFMT_RIGHT_J:
+ i2s_config = EC_DAI_FMT_RIGHT_J;
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ i2s_config = EC_DAI_FMT_LEFT_J;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ i2s_config = EC_DAI_FMT_PCM_A;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
+ i2s_config = EC_DAI_FMT_PCM_B;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return set_i2s_config(component, i2s_config);
+}
+
+static int set_i2s_sample_depth(struct snd_soc_component *component,
+ enum ec_sample_depth_value depth)
+{
+ struct ec_param_codec_i2s param;
+
+ dev_dbg(component->dev, "%s set depth to %u\n", __func__, depth);
+
+ param.cmd = EC_CODEC_SET_SAMPLE_DEPTH;
+ param.depth = depth;
+
+ return ec_command_no_resp(component, &param);
+}
+
+static int set_i2s_bclk(struct snd_soc_component *component, uint32_t bclk)
+{
+ struct ec_param_codec_i2s param;
+
+ dev_dbg(component->dev, "%s set i2s bclk to %u\n", __func__, bclk);
+
+ param.cmd = EC_CODEC_I2S_SET_BCLK;
+ param.bclk = bclk;
+
+ return ec_command_no_resp(component, &param);
+}
+
+static int cros_ec_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ unsigned int rate, bclk;
+ int ret;
+
+ rate = params_rate(params);
+ if (rate != 48000)
+ return -EINVAL;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ ret = set_i2s_sample_depth(component, EC_CODEC_SAMPLE_DEPTH_16);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ret = set_i2s_sample_depth(component, EC_CODEC_SAMPLE_DEPTH_24);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ return ret;
+
+ bclk = snd_soc_params_to_bclk(params);
+ return set_i2s_bclk(component, bclk);
+}
+
+static const struct snd_soc_dai_ops cros_ec_i2s_dai_ops = {
+ .hw_params = cros_ec_i2s_hw_params,
+ .set_fmt = cros_ec_i2s_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver cros_ec_dai[] = {
+ {
+ .name = "cros_ec_codec I2S",
+ .id = 0,
+ .capture = {
+ .stream_name = "I2S Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &cros_ec_i2s_dai_ops,
+ }
+};
+
+static int get_ec_mic_gain(struct snd_soc_component *component,
+ u8 *left, u8 *right)
+{
+ struct ec_param_codec_i2s param;
+ struct ec_response_codec_gain resp;
+ int ret;
+
+ param.cmd = EC_CODEC_GET_GAIN;
+
+ ret = ec_command_get_gain(component, &param, &resp);
+ if (ret < 0)
+ return ret;
+
+ *left = resp.left;
+ *right = resp.right;
+
+ return 0;
+}
+
+static int mic_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ u8 left, right;
+ int ret;
+
+ ret = get_ec_mic_gain(component, &left, &right);
+ if (ret)
+ return ret;
+
+ ucontrol->value.integer.value[0] = left;
+ ucontrol->value.integer.value[1] = right;
+
+ return 0;
+}
+
+static int set_ec_mic_gain(struct snd_soc_component *component,
+ u8 left, u8 right)
+{
+ struct ec_param_codec_i2s param;
+
+ dev_dbg(component->dev, "%s set mic gain to %u, %u\n",
+ __func__, left, right);
+
+ param.cmd = EC_CODEC_SET_GAIN;
+ param.gain.left = left;
+ param.gain.right = right;
+
+ return ec_command_no_resp(component, &param);
+}
+
+static int mic_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_data *codec_data =
+ snd_soc_component_get_drvdata(component);
+ int left = ucontrol->value.integer.value[0];
+ int right = ucontrol->value.integer.value[1];
+ unsigned int max_dmic_gain = codec_data->max_dmic_gain;
+
+ if (left > max_dmic_gain || right > max_dmic_gain)
+ return -EINVAL;
+
+ return set_ec_mic_gain(component, (u8)left, (u8)right);
+}
+
+static struct snd_kcontrol_new mic_gain_control =
+ SOC_DOUBLE_EXT_TLV("EC Mic Gain", SND_SOC_NOPM, SND_SOC_NOPM, 0, 0, 0,
+ mic_gain_get, mic_gain_put, ec_mic_gain_tlv);
+
+static int enable_i2s(struct snd_soc_component *component, int enable)
+{
+ struct ec_param_codec_i2s param;
+
+ dev_dbg(component->dev, "%s set i2s to %u\n", __func__, enable);
+
+ param.cmd = EC_CODEC_I2S_ENABLE;
+ param.i2s_enable = enable;
+
+ return ec_command_no_resp(component, &param);
+}
+
+static int cros_ec_i2s_enable_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ dev_dbg(component->dev,
+ "%s got SND_SOC_DAPM_PRE_PMU event\n", __func__);
+ return enable_i2s(component, 1);
+
+ case SND_SOC_DAPM_PRE_PMD:
+ dev_dbg(component->dev,
+ "%s got SND_SOC_DAPM_PRE_PMD event\n", __func__);
+ return enable_i2s(component, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * The goal of this DAPM route is to turn on/off I2S using EC
+ * host command when capture stream is started/stopped.
+ */
+static const struct snd_soc_dapm_widget cros_ec_codec_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("DMIC"),
+
+ /*
+ * Control EC to enable/disable I2S.
+ */
+ SND_SOC_DAPM_SUPPLY("I2S Enable", SND_SOC_NOPM,
+ 0, 0, cros_ec_i2s_enable_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_AIF_OUT("I2STX", "I2S Capture", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route cros_ec_codec_dapm_routes[] = {
+ { "I2STX", NULL, "DMIC" },
+ { "I2STX", NULL, "I2S Enable" },
+};
+
+/*
+ * Read maximum gain from device property and set it to mixer control.
+ */
+static int cros_ec_set_gain_range(struct device *dev)
+{
+ struct soc_mixer_control *control;
+ struct cros_ec_codec_data *codec_data = dev_get_drvdata(dev);
+ int rc;
+
+ rc = device_property_read_u32(dev, "max-dmic-gain",
+ &codec_data->max_dmic_gain);
+ if (rc)
+ return rc;
+
+ control = (struct soc_mixer_control *)
+ mic_gain_control.private_value;
+ control->max = codec_data->max_dmic_gain;
+ control->platform_max = codec_data->max_dmic_gain;
+
+ return 0;
+}
+
+static int cros_ec_codec_probe(struct snd_soc_component *component)
+{
+ int rc;
+
+ struct cros_ec_codec_data *codec_data =
+ snd_soc_component_get_drvdata(component);
+
+ rc = cros_ec_set_gain_range(codec_data->dev);
+ if (rc)
+ return rc;
+
+ return snd_soc_add_component_controls(component, &mic_gain_control, 1);
+}
+
+static const struct snd_soc_component_driver cros_ec_component_driver = {
+ .probe = cros_ec_codec_probe,
+ .dapm_widgets = cros_ec_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cros_ec_codec_dapm_widgets),
+ .dapm_routes = cros_ec_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cros_ec_codec_dapm_routes),
+};
+
+/*
+ * Platform device and platform driver fro cros-ec-codec.
+ */
+static int cros_ec_codec_platform_probe(struct platform_device *pd)
+{
+ struct device *dev = &pd->dev;
+ struct cros_ec_device *ec_device = dev_get_drvdata(pd->dev.parent);
+ struct cros_ec_codec_data *codec_data;
+
+ codec_data = devm_kzalloc(dev, sizeof(struct cros_ec_codec_data),
+ GFP_KERNEL);
+ if (!codec_data)
+ return -ENOMEM;
+
+ codec_data->dev = dev;
+ codec_data->ec_device = ec_device;
+
+ platform_set_drvdata(pd, codec_data);
+
+ return snd_soc_register_component(dev, &cros_ec_component_driver,
+ cros_ec_dai, ARRAY_SIZE(cros_ec_dai));
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cros_ec_codec_of_match[] = {
+ { .compatible = "google,cros-ec-codec" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cros_ec_codec_of_match);
+#endif
+
+static struct platform_driver cros_ec_codec_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(cros_ec_codec_of_match),
+ },
+ .probe = cros_ec_codec_platform_probe,
+};
+
+module_platform_driver(cros_ec_codec_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS EC codec driver");
+MODULE_AUTHOR("Cheng-Yi Chiang <cychiang@chromium.org>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/codecs/cs35l36.c b/sound/soc/codecs/cs35l36.c
new file mode 100644
index 000000000000..e9b5f76f27a8
--- /dev/null
+++ b/sound/soc/codecs/cs35l36.c
@@ -0,0 +1,1957 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// cs35l36.c -- CS35L36 ALSA SoC audio driver
+//
+// Copyright 2018 Cirrus Logic, Inc.
+//
+// Author: James Schulman <james.schulman@cirrus.com>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/gpio.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/cs35l36.h>
+#include <linux/of_irq.h>
+#include <linux/completion.h>
+
+#include "cs35l36.h"
+
+/*
+ * Some fields take zero as a valid value so use a high bit flag that won't
+ * get written to the device to mark those.
+ */
+#define CS35L36_VALID_PDATA 0x80000000
+
+static const char * const cs35l36_supplies[] = {
+ "VA",
+ "VP",
+};
+
+struct cs35l36_private {
+ struct device *dev;
+ struct cs35l36_platform_data pdata;
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[2];
+ int num_supplies;
+ int clksrc;
+ int chip_version;
+ int rev_id;
+ int ldm_mode_sel;
+ struct gpio_desc *reset_gpio;
+};
+
+struct cs35l36_pll_config {
+ int freq;
+ int clk_cfg;
+ int fll_igain;
+};
+
+static const struct cs35l36_pll_config cs35l36_pll_sysclk[] = {
+ {32768, 0x00, 0x05},
+ {8000, 0x01, 0x03},
+ {11025, 0x02, 0x03},
+ {12000, 0x03, 0x03},
+ {16000, 0x04, 0x04},
+ {22050, 0x05, 0x04},
+ {24000, 0x06, 0x04},
+ {32000, 0x07, 0x05},
+ {44100, 0x08, 0x05},
+ {48000, 0x09, 0x05},
+ {88200, 0x0A, 0x06},
+ {96000, 0x0B, 0x06},
+ {128000, 0x0C, 0x07},
+ {176400, 0x0D, 0x07},
+ {192000, 0x0E, 0x07},
+ {256000, 0x0F, 0x08},
+ {352800, 0x10, 0x08},
+ {384000, 0x11, 0x08},
+ {512000, 0x12, 0x09},
+ {705600, 0x13, 0x09},
+ {750000, 0x14, 0x09},
+ {768000, 0x15, 0x09},
+ {1000000, 0x16, 0x0A},
+ {1024000, 0x17, 0x0A},
+ {1200000, 0x18, 0x0A},
+ {1411200, 0x19, 0x0A},
+ {1500000, 0x1A, 0x0A},
+ {1536000, 0x1B, 0x0A},
+ {2000000, 0x1C, 0x0A},
+ {2048000, 0x1D, 0x0A},
+ {2400000, 0x1E, 0x0A},
+ {2822400, 0x1F, 0x0A},
+ {3000000, 0x20, 0x0A},
+ {3072000, 0x21, 0x0A},
+ {3200000, 0x22, 0x0A},
+ {4000000, 0x23, 0x0A},
+ {4096000, 0x24, 0x0A},
+ {4800000, 0x25, 0x0A},
+ {5644800, 0x26, 0x0A},
+ {6000000, 0x27, 0x0A},
+ {6144000, 0x28, 0x0A},
+ {6250000, 0x29, 0x08},
+ {6400000, 0x2A, 0x0A},
+ {6500000, 0x2B, 0x08},
+ {6750000, 0x2C, 0x09},
+ {7526400, 0x2D, 0x0A},
+ {8000000, 0x2E, 0x0A},
+ {8192000, 0x2F, 0x0A},
+ {9600000, 0x30, 0x0A},
+ {11289600, 0x31, 0x0A},
+ {12000000, 0x32, 0x0A},
+ {12288000, 0x33, 0x0A},
+ {12500000, 0x34, 0x08},
+ {12800000, 0x35, 0x0A},
+ {13000000, 0x36, 0x0A},
+ {13500000, 0x37, 0x0A},
+ {19200000, 0x38, 0x0A},
+ {22579200, 0x39, 0x0A},
+ {24000000, 0x3A, 0x0A},
+ {24576000, 0x3B, 0x0A},
+ {25000000, 0x3C, 0x0A},
+ {25600000, 0x3D, 0x0A},
+ {26000000, 0x3E, 0x0A},
+ {27000000, 0x3F, 0x0A},
+};
+
+static struct reg_default cs35l36_reg[] = {
+ {CS35L36_TESTKEY_CTRL, 0x00000000},
+ {CS35L36_USERKEY_CTL, 0x00000000},
+ {CS35L36_OTP_CTRL1, 0x00002460},
+ {CS35L36_OTP_CTRL2, 0x00000000},
+ {CS35L36_OTP_CTRL3, 0x00000000},
+ {CS35L36_OTP_CTRL4, 0x00000000},
+ {CS35L36_OTP_CTRL5, 0x00000000},
+ {CS35L36_PAC_CTL1, 0x00000004},
+ {CS35L36_PAC_CTL2, 0x00000000},
+ {CS35L36_PAC_CTL3, 0x00000000},
+ {CS35L36_PWR_CTRL1, 0x00000000},
+ {CS35L36_PWR_CTRL2, 0x00003321},
+ {CS35L36_PWR_CTRL3, 0x01000010},
+ {CS35L36_CTRL_OVRRIDE, 0x00000002},
+ {CS35L36_AMP_OUT_MUTE, 0x00000000},
+ {CS35L36_OTP_TRIM_STATUS, 0x00000000},
+ {CS35L36_DISCH_FILT, 0x00000000},
+ {CS35L36_PROTECT_REL_ERR, 0x00000000},
+ {CS35L36_PAD_INTERFACE, 0x00000038},
+ {CS35L36_PLL_CLK_CTRL, 0x00000010},
+ {CS35L36_GLOBAL_CLK_CTRL, 0x00000003},
+ {CS35L36_ADC_CLK_CTRL, 0x00000000},
+ {CS35L36_SWIRE_CLK_CTRL, 0x00000000},
+ {CS35L36_SP_SCLK_CLK_CTRL, 0x00000000},
+ {CS35L36_MDSYNC_EN, 0x00000000},
+ {CS35L36_MDSYNC_TX_ID, 0x00000000},
+ {CS35L36_MDSYNC_PWR_CTRL, 0x00000000},
+ {CS35L36_MDSYNC_DATA_TX, 0x00000000},
+ {CS35L36_MDSYNC_TX_STATUS, 0x00000002},
+ {CS35L36_MDSYNC_RX_STATUS, 0x00000000},
+ {CS35L36_MDSYNC_ERR_STATUS, 0x00000000},
+ {CS35L36_BSTCVRT_VCTRL1, 0x00000000},
+ {CS35L36_BSTCVRT_VCTRL2, 0x00000001},
+ {CS35L36_BSTCVRT_PEAK_CUR, 0x0000004A},
+ {CS35L36_BSTCVRT_SFT_RAMP, 0x00000003},
+ {CS35L36_BSTCVRT_COEFF, 0x00002424},
+ {CS35L36_BSTCVRT_SLOPE_LBST, 0x00005800},
+ {CS35L36_BSTCVRT_SW_FREQ, 0x00010000},
+ {CS35L36_BSTCVRT_DCM_CTRL, 0x00002001},
+ {CS35L36_BSTCVRT_DCM_MODE_FORCE, 0x00000000},
+ {CS35L36_BSTCVRT_OVERVOLT_CTRL, 0x00000130},
+ {CS35L36_VPI_LIMIT_MODE, 0x00000000},
+ {CS35L36_VPI_LIMIT_MINMAX, 0x00003000},
+ {CS35L36_VPI_VP_THLD, 0x00101010},
+ {CS35L36_VPI_TRACK_CTRL, 0x00000000},
+ {CS35L36_VPI_TRIG_MODE_CTRL, 0x00000000},
+ {CS35L36_VPI_TRIG_STEPS, 0x00000000},
+ {CS35L36_VI_SPKMON_FILT, 0x00000003},
+ {CS35L36_VI_SPKMON_GAIN, 0x00000909},
+ {CS35L36_VI_SPKMON_IP_SEL, 0x00000000},
+ {CS35L36_DTEMP_WARN_THLD, 0x00000002},
+ {CS35L36_DTEMP_STATUS, 0x00000000},
+ {CS35L36_VPVBST_FS_SEL, 0x00000001},
+ {CS35L36_VPVBST_VP_CTRL, 0x000001C0},
+ {CS35L36_VPVBST_VBST_CTRL, 0x000001C0},
+ {CS35L36_ASP_TX_PIN_CTRL, 0x00000028},
+ {CS35L36_ASP_RATE_CTRL, 0x00090000},
+ {CS35L36_ASP_FORMAT, 0x00000002},
+ {CS35L36_ASP_FRAME_CTRL, 0x00180018},
+ {CS35L36_ASP_TX1_TX2_SLOT, 0x00010000},
+ {CS35L36_ASP_TX3_TX4_SLOT, 0x00030002},
+ {CS35L36_ASP_TX5_TX6_SLOT, 0x00050004},
+ {CS35L36_ASP_TX7_TX8_SLOT, 0x00070006},
+ {CS35L36_ASP_RX1_SLOT, 0x00000000},
+ {CS35L36_ASP_RX_TX_EN, 0x00000000},
+ {CS35L36_ASP_RX1_SEL, 0x00000008},
+ {CS35L36_ASP_TX1_SEL, 0x00000018},
+ {CS35L36_ASP_TX2_SEL, 0x00000019},
+ {CS35L36_ASP_TX3_SEL, 0x00000028},
+ {CS35L36_ASP_TX4_SEL, 0x00000029},
+ {CS35L36_ASP_TX5_SEL, 0x00000020},
+ {CS35L36_ASP_TX6_SEL, 0x00000000},
+ {CS35L36_SWIRE_P1_TX1_SEL, 0x00000018},
+ {CS35L36_SWIRE_P1_TX2_SEL, 0x00000019},
+ {CS35L36_SWIRE_P2_TX1_SEL, 0x00000028},
+ {CS35L36_SWIRE_P2_TX2_SEL, 0x00000029},
+ {CS35L36_SWIRE_P2_TX3_SEL, 0x00000020},
+ {CS35L36_SWIRE_DP1_FIFO_CFG, 0x0000001B},
+ {CS35L36_SWIRE_DP2_FIFO_CFG, 0x0000001B},
+ {CS35L36_SWIRE_DP3_FIFO_CFG, 0x0000001B},
+ {CS35L36_SWIRE_PCM_RX_DATA, 0x00000000},
+ {CS35L36_SWIRE_FS_SEL, 0x00000001},
+ {CS35L36_AMP_DIG_VOL_CTRL, 0x00008000},
+ {CS35L36_VPBR_CFG, 0x02AA1905},
+ {CS35L36_VBBR_CFG, 0x02AA1905},
+ {CS35L36_VPBR_STATUS, 0x00000000},
+ {CS35L36_VBBR_STATUS, 0x00000000},
+ {CS35L36_OVERTEMP_CFG, 0x00000001},
+ {CS35L36_AMP_ERR_VOL, 0x00000000},
+ {CS35L36_CLASSH_CFG, 0x000B0405},
+ {CS35L36_CLASSH_FET_DRV_CFG, 0x00000111},
+ {CS35L36_NG_CFG, 0x00000033},
+ {CS35L36_AMP_GAIN_CTRL, 0x00000273},
+ {CS35L36_PWM_MOD_IO_CTRL, 0x00000000},
+ {CS35L36_PWM_MOD_STATUS, 0x00000000},
+ {CS35L36_DAC_MSM_CFG, 0x00000000},
+ {CS35L36_AMP_SLOPE_CTRL, 0x00000B00},
+ {CS35L36_AMP_PDM_VOLUME, 0x00000000},
+ {CS35L36_AMP_PDM_RATE_CTRL, 0x00000000},
+ {CS35L36_PDM_CH_SEL, 0x00000000},
+ {CS35L36_AMP_NG_CTRL, 0x0000212F},
+ {CS35L36_PDM_HIGHFILT_CTRL, 0x00000000},
+ {CS35L36_PAC_INT0_CTRL, 0x00000001},
+ {CS35L36_PAC_INT1_CTRL, 0x00000001},
+ {CS35L36_PAC_INT2_CTRL, 0x00000001},
+ {CS35L36_PAC_INT3_CTRL, 0x00000001},
+ {CS35L36_PAC_INT4_CTRL, 0x00000001},
+ {CS35L36_PAC_INT5_CTRL, 0x00000001},
+ {CS35L36_PAC_INT6_CTRL, 0x00000001},
+ {CS35L36_PAC_INT7_CTRL, 0x00000001},
+};
+
+static bool cs35l36_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L36_SW_RESET:
+ case CS35L36_SW_REV:
+ case CS35L36_HW_REV:
+ case CS35L36_TESTKEY_CTRL:
+ case CS35L36_USERKEY_CTL:
+ case CS35L36_OTP_MEM30:
+ case CS35L36_OTP_CTRL1:
+ case CS35L36_OTP_CTRL2:
+ case CS35L36_OTP_CTRL3:
+ case CS35L36_OTP_CTRL4:
+ case CS35L36_OTP_CTRL5:
+ case CS35L36_PAC_CTL1:
+ case CS35L36_PAC_CTL2:
+ case CS35L36_PAC_CTL3:
+ case CS35L36_DEVICE_ID:
+ case CS35L36_FAB_ID:
+ case CS35L36_REV_ID:
+ case CS35L36_PWR_CTRL1:
+ case CS35L36_PWR_CTRL2:
+ case CS35L36_PWR_CTRL3:
+ case CS35L36_CTRL_OVRRIDE:
+ case CS35L36_AMP_OUT_MUTE:
+ case CS35L36_OTP_TRIM_STATUS:
+ case CS35L36_DISCH_FILT:
+ case CS35L36_PROTECT_REL_ERR:
+ case CS35L36_PAD_INTERFACE:
+ case CS35L36_PLL_CLK_CTRL:
+ case CS35L36_GLOBAL_CLK_CTRL:
+ case CS35L36_ADC_CLK_CTRL:
+ case CS35L36_SWIRE_CLK_CTRL:
+ case CS35L36_SP_SCLK_CLK_CTRL:
+ case CS35L36_TST_FS_MON0:
+ case CS35L36_MDSYNC_EN:
+ case CS35L36_MDSYNC_TX_ID:
+ case CS35L36_MDSYNC_PWR_CTRL:
+ case CS35L36_MDSYNC_DATA_TX:
+ case CS35L36_MDSYNC_TX_STATUS:
+ case CS35L36_MDSYNC_RX_STATUS:
+ case CS35L36_MDSYNC_ERR_STATUS:
+ case CS35L36_BSTCVRT_VCTRL1:
+ case CS35L36_BSTCVRT_VCTRL2:
+ case CS35L36_BSTCVRT_PEAK_CUR:
+ case CS35L36_BSTCVRT_SFT_RAMP:
+ case CS35L36_BSTCVRT_COEFF:
+ case CS35L36_BSTCVRT_SLOPE_LBST:
+ case CS35L36_BSTCVRT_SW_FREQ:
+ case CS35L36_BSTCVRT_DCM_CTRL:
+ case CS35L36_BSTCVRT_DCM_MODE_FORCE:
+ case CS35L36_BSTCVRT_OVERVOLT_CTRL:
+ case CS35L36_BST_TST_MANUAL:
+ case CS35L36_BST_ANA2_TEST:
+ case CS35L36_VPI_LIMIT_MODE:
+ case CS35L36_VPI_LIMIT_MINMAX:
+ case CS35L36_VPI_VP_THLD:
+ case CS35L36_VPI_TRACK_CTRL:
+ case CS35L36_VPI_TRIG_MODE_CTRL:
+ case CS35L36_VPI_TRIG_STEPS:
+ case CS35L36_VI_SPKMON_FILT:
+ case CS35L36_VI_SPKMON_GAIN:
+ case CS35L36_VI_SPKMON_IP_SEL:
+ case CS35L36_DTEMP_WARN_THLD:
+ case CS35L36_DTEMP_STATUS:
+ case CS35L36_VPVBST_FS_SEL:
+ case CS35L36_VPVBST_VP_CTRL:
+ case CS35L36_VPVBST_VBST_CTRL:
+ case CS35L36_ASP_TX_PIN_CTRL:
+ case CS35L36_ASP_RATE_CTRL:
+ case CS35L36_ASP_FORMAT:
+ case CS35L36_ASP_FRAME_CTRL:
+ case CS35L36_ASP_TX1_TX2_SLOT:
+ case CS35L36_ASP_TX3_TX4_SLOT:
+ case CS35L36_ASP_TX5_TX6_SLOT:
+ case CS35L36_ASP_TX7_TX8_SLOT:
+ case CS35L36_ASP_RX1_SLOT:
+ case CS35L36_ASP_RX_TX_EN:
+ case CS35L36_ASP_RX1_SEL:
+ case CS35L36_ASP_TX1_SEL:
+ case CS35L36_ASP_TX2_SEL:
+ case CS35L36_ASP_TX3_SEL:
+ case CS35L36_ASP_TX4_SEL:
+ case CS35L36_ASP_TX5_SEL:
+ case CS35L36_ASP_TX6_SEL:
+ case CS35L36_SWIRE_P1_TX1_SEL:
+ case CS35L36_SWIRE_P1_TX2_SEL:
+ case CS35L36_SWIRE_P2_TX1_SEL:
+ case CS35L36_SWIRE_P2_TX2_SEL:
+ case CS35L36_SWIRE_P2_TX3_SEL:
+ case CS35L36_SWIRE_DP1_FIFO_CFG:
+ case CS35L36_SWIRE_DP2_FIFO_CFG:
+ case CS35L36_SWIRE_DP3_FIFO_CFG:
+ case CS35L36_SWIRE_PCM_RX_DATA:
+ case CS35L36_SWIRE_FS_SEL:
+ case CS35L36_AMP_DIG_VOL_CTRL:
+ case CS35L36_VPBR_CFG:
+ case CS35L36_VBBR_CFG:
+ case CS35L36_VPBR_STATUS:
+ case CS35L36_VBBR_STATUS:
+ case CS35L36_OVERTEMP_CFG:
+ case CS35L36_AMP_ERR_VOL:
+ case CS35L36_CLASSH_CFG:
+ case CS35L36_CLASSH_FET_DRV_CFG:
+ case CS35L36_NG_CFG:
+ case CS35L36_AMP_GAIN_CTRL:
+ case CS35L36_PWM_MOD_IO_CTRL:
+ case CS35L36_PWM_MOD_STATUS:
+ case CS35L36_DAC_MSM_CFG:
+ case CS35L36_AMP_SLOPE_CTRL:
+ case CS35L36_AMP_PDM_VOLUME:
+ case CS35L36_AMP_PDM_RATE_CTRL:
+ case CS35L36_PDM_CH_SEL:
+ case CS35L36_AMP_NG_CTRL:
+ case CS35L36_PDM_HIGHFILT_CTRL:
+ case CS35L36_INT1_STATUS:
+ case CS35L36_INT2_STATUS:
+ case CS35L36_INT3_STATUS:
+ case CS35L36_INT4_STATUS:
+ case CS35L36_INT1_RAW_STATUS:
+ case CS35L36_INT2_RAW_STATUS:
+ case CS35L36_INT3_RAW_STATUS:
+ case CS35L36_INT4_RAW_STATUS:
+ case CS35L36_INT1_MASK:
+ case CS35L36_INT2_MASK:
+ case CS35L36_INT3_MASK:
+ case CS35L36_INT4_MASK:
+ case CS35L36_INT1_EDGE_LVL_CTRL:
+ case CS35L36_INT3_EDGE_LVL_CTRL:
+ case CS35L36_PAC_INT_STATUS:
+ case CS35L36_PAC_INT_RAW_STATUS:
+ case CS35L36_PAC_INT_FLUSH_CTRL:
+ case CS35L36_PAC_INT0_CTRL:
+ case CS35L36_PAC_INT1_CTRL:
+ case CS35L36_PAC_INT2_CTRL:
+ case CS35L36_PAC_INT3_CTRL:
+ case CS35L36_PAC_INT4_CTRL:
+ case CS35L36_PAC_INT5_CTRL:
+ case CS35L36_PAC_INT6_CTRL:
+ case CS35L36_PAC_INT7_CTRL:
+ return true;
+ default:
+ if (reg >= CS35L36_PAC_PMEM_WORD0 &&
+ reg <= CS35L36_PAC_PMEM_WORD1023)
+ return true;
+ else
+ return false;
+ }
+}
+
+static bool cs35l36_precious_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L36_TESTKEY_CTRL:
+ case CS35L36_USERKEY_CTL:
+ case CS35L36_TST_FS_MON0:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs35l36_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L36_SW_RESET:
+ case CS35L36_SW_REV:
+ case CS35L36_HW_REV:
+ case CS35L36_TESTKEY_CTRL:
+ case CS35L36_USERKEY_CTL:
+ case CS35L36_DEVICE_ID:
+ case CS35L36_FAB_ID:
+ case CS35L36_REV_ID:
+ case CS35L36_INT1_STATUS:
+ case CS35L36_INT2_STATUS:
+ case CS35L36_INT3_STATUS:
+ case CS35L36_INT4_STATUS:
+ case CS35L36_INT1_RAW_STATUS:
+ case CS35L36_INT2_RAW_STATUS:
+ case CS35L36_INT3_RAW_STATUS:
+ case CS35L36_INT4_RAW_STATUS:
+ case CS35L36_INT1_MASK:
+ case CS35L36_INT2_MASK:
+ case CS35L36_INT3_MASK:
+ case CS35L36_INT4_MASK:
+ case CS35L36_INT1_EDGE_LVL_CTRL:
+ case CS35L36_INT3_EDGE_LVL_CTRL:
+ case CS35L36_PAC_INT_STATUS:
+ case CS35L36_PAC_INT_RAW_STATUS:
+ case CS35L36_PAC_INT_FLUSH_CTRL:
+ return true;
+ default:
+ if (reg >= CS35L36_PAC_PMEM_WORD0 &&
+ reg <= CS35L36_PAC_PMEM_WORD1023)
+ return true;
+ else
+ return false;
+ }
+}
+
+static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 25, 0);
+static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1);
+
+static const char * const cs35l36_pcm_sftramp_text[] = {
+ "Off", ".5ms", "1ms", "2ms", "4ms", "8ms", "15ms", "30ms"};
+
+static SOC_ENUM_SINGLE_DECL(pcm_sft_ramp, CS35L36_AMP_DIG_VOL_CTRL, 0,
+ cs35l36_pcm_sftramp_text);
+
+static int cs35l36_ldm_sel_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = cs35l36->ldm_mode_sel;
+
+ return 0;
+}
+
+static int cs35l36_ldm_sel_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component);
+ int val = (ucontrol->value.integer.value[0]) ? CS35L36_NG_AMP_EN_MASK :
+ 0;
+
+ cs35l36->ldm_mode_sel = val;
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG,
+ CS35L36_NG_AMP_EN_MASK, val);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new cs35l36_aud_controls[] = {
+ SOC_SINGLE_SX_TLV("Digital PCM Volume", CS35L36_AMP_DIG_VOL_CTRL,
+ 3, 0x4D0, 0x390, dig_vol_tlv),
+ SOC_SINGLE_TLV("Analog PCM Volume", CS35L36_AMP_GAIN_CTRL, 5, 0x13, 0,
+ amp_gain_tlv),
+ SOC_ENUM("PCM Soft Ramp", pcm_sft_ramp),
+ SOC_SINGLE("Amp Gain Zero-Cross Switch", CS35L36_AMP_GAIN_CTRL,
+ CS35L36_AMP_ZC_SHIFT, 1, 0),
+ SOC_SINGLE("PDM LDM Enter Ramp Switch", CS35L36_DAC_MSM_CFG,
+ CS35L36_PDM_LDM_ENTER_SHIFT, 1, 0),
+ SOC_SINGLE("PDM LDM Exit Ramp Switch", CS35L36_DAC_MSM_CFG,
+ CS35L36_PDM_LDM_EXIT_SHIFT, 1, 0),
+ SOC_SINGLE_BOOL_EXT("LDM Select Switch", 0, cs35l36_ldm_sel_get,
+ cs35l36_ldm_sel_put),
+};
+
+static int cs35l36_main_amp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component);
+ u32 reg;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL1,
+ CS35L36_GLOBAL_EN_MASK,
+ 1 << CS35L36_GLOBAL_EN_SHIFT);
+
+ usleep_range(2000, 2100);
+
+ regmap_read(cs35l36->regmap, CS35L36_INT4_RAW_STATUS, &reg);
+
+ if (WARN_ON_ONCE(reg & CS35L36_PLL_UNLOCK_MASK))
+ dev_crit(cs35l36->dev, "PLL Unlocked\n");
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RX1_SEL,
+ CS35L36_PCM_RX_SEL_MASK,
+ CS35L36_PCM_RX_SEL_PCM);
+ regmap_update_bits(cs35l36->regmap, CS35L36_AMP_OUT_MUTE,
+ CS35L36_AMP_MUTE_MASK,
+ 0 << CS35L36_AMP_MUTE_SHIFT);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RX1_SEL,
+ CS35L36_PCM_RX_SEL_MASK,
+ CS35L36_PCM_RX_SEL_ZERO);
+ regmap_update_bits(cs35l36->regmap, CS35L36_AMP_OUT_MUTE,
+ CS35L36_AMP_MUTE_MASK,
+ 1 << CS35L36_AMP_MUTE_SHIFT);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL1,
+ CS35L36_GLOBAL_EN_MASK,
+ 0 << CS35L36_GLOBAL_EN_SHIFT);
+
+ usleep_range(2000, 2100);
+ break;
+ default:
+ dev_dbg(component->dev, "Invalid event = 0x%x\n", event);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs35l36_boost_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (!cs35l36->pdata.extern_boost)
+ regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL2,
+ CS35L36_BST_EN_MASK,
+ CS35L36_BST_EN <<
+ CS35L36_BST_EN_SHIFT);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (!cs35l36->pdata.extern_boost)
+ regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL2,
+ CS35L36_BST_EN_MASK,
+ CS35L36_BST_DIS_VP <<
+ CS35L36_BST_EN_SHIFT);
+ break;
+ default:
+ dev_dbg(component->dev, "Invalid event = 0x%x\n", event);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const char * const cs35l36_chan_text[] = {
+ "RX1",
+ "RX2",
+};
+
+static SOC_ENUM_SINGLE_DECL(chansel_enum, CS35L36_ASP_RX1_SLOT, 0,
+ cs35l36_chan_text);
+
+static const struct snd_kcontrol_new cs35l36_chan_mux =
+ SOC_DAPM_ENUM("Input Mux", chansel_enum);
+
+static const struct snd_kcontrol_new amp_enable_ctrl =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", CS35L36_AMP_OUT_MUTE,
+ CS35L36_AMP_MUTE_SHIFT, 1, 1);
+
+static const struct snd_kcontrol_new boost_ctrl =
+ SOC_DAPM_SINGLE_VIRT("Switch", 1);
+
+static const char * const asp_tx_src_text[] = {
+ "Zero Fill", "ASPRX1", "VMON", "IMON", "ERRVOL", "VPMON", "VBSTMON"
+};
+
+static const unsigned int asp_tx_src_values[] = {
+ 0x00, 0x08, 0x18, 0x19, 0x20, 0x28, 0x29
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx1_src_enum, CS35L36_ASP_TX1_SEL, 0,
+ CS35L36_APS_TX_SEL_MASK, asp_tx_src_text,
+ asp_tx_src_values);
+
+static const struct snd_kcontrol_new asp_tx1_src =
+ SOC_DAPM_ENUM("ASPTX1SRC", asp_tx1_src_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx2_src_enum, CS35L36_ASP_TX2_SEL, 0,
+ CS35L36_APS_TX_SEL_MASK, asp_tx_src_text,
+ asp_tx_src_values);
+
+static const struct snd_kcontrol_new asp_tx2_src =
+ SOC_DAPM_ENUM("ASPTX2SRC", asp_tx2_src_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx3_src_enum, CS35L36_ASP_TX3_SEL, 0,
+ CS35L36_APS_TX_SEL_MASK, asp_tx_src_text,
+ asp_tx_src_values);
+
+static const struct snd_kcontrol_new asp_tx3_src =
+ SOC_DAPM_ENUM("ASPTX3SRC", asp_tx3_src_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx4_src_enum, CS35L36_ASP_TX4_SEL, 0,
+ CS35L36_APS_TX_SEL_MASK, asp_tx_src_text,
+ asp_tx_src_values);
+
+static const struct snd_kcontrol_new asp_tx4_src =
+ SOC_DAPM_ENUM("ASPTX4SRC", asp_tx4_src_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx5_src_enum, CS35L36_ASP_TX5_SEL, 0,
+ CS35L36_APS_TX_SEL_MASK, asp_tx_src_text,
+ asp_tx_src_values);
+
+static const struct snd_kcontrol_new asp_tx5_src =
+ SOC_DAPM_ENUM("ASPTX5SRC", asp_tx5_src_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(asp_tx6_src_enum, CS35L36_ASP_TX6_SEL, 0,
+ CS35L36_APS_TX_SEL_MASK, asp_tx_src_text,
+ asp_tx_src_values);
+
+static const struct snd_kcontrol_new asp_tx6_src =
+ SOC_DAPM_ENUM("ASPTX6SRC", asp_tx6_src_enum);
+
+static const struct snd_soc_dapm_widget cs35l36_dapm_widgets[] = {
+ SND_SOC_DAPM_MUX("Channel Mux", SND_SOC_NOPM, 0, 0, &cs35l36_chan_mux),
+ SND_SOC_DAPM_AIF_IN("SDIN", NULL, 0, CS35L36_ASP_RX_TX_EN, 16, 0),
+
+ SND_SOC_DAPM_OUT_DRV_E("Main AMP", CS35L36_PWR_CTRL2, 0, 0, NULL, 0,
+ cs35l36_main_amp_event, SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_OUTPUT("SPK"),
+ SND_SOC_DAPM_SWITCH("AMP Enable", SND_SOC_NOPM, 0, 1, &amp_enable_ctrl),
+ SND_SOC_DAPM_MIXER("CLASS H", CS35L36_PWR_CTRL3, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SWITCH_E("BOOST Enable", SND_SOC_NOPM, 0, 0, &boost_ctrl,
+ cs35l36_boost_event, SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_POST_PMU),
+
+ SND_SOC_DAPM_AIF_OUT("ASPTX1", NULL, 0, CS35L36_ASP_RX_TX_EN, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ASPTX2", NULL, 1, CS35L36_ASP_RX_TX_EN, 1, 0),
+ SND_SOC_DAPM_AIF_OUT("ASPTX3", NULL, 2, CS35L36_ASP_RX_TX_EN, 2, 0),
+ SND_SOC_DAPM_AIF_OUT("ASPTX4", NULL, 3, CS35L36_ASP_RX_TX_EN, 3, 0),
+ SND_SOC_DAPM_AIF_OUT("ASPTX5", NULL, 4, CS35L36_ASP_RX_TX_EN, 4, 0),
+ SND_SOC_DAPM_AIF_OUT("ASPTX6", NULL, 5, CS35L36_ASP_RX_TX_EN, 5, 0),
+
+ SND_SOC_DAPM_MUX("ASPTX1SRC", SND_SOC_NOPM, 0, 0, &asp_tx1_src),
+ SND_SOC_DAPM_MUX("ASPTX2SRC", SND_SOC_NOPM, 0, 0, &asp_tx2_src),
+ SND_SOC_DAPM_MUX("ASPTX3SRC", SND_SOC_NOPM, 0, 0, &asp_tx3_src),
+ SND_SOC_DAPM_MUX("ASPTX4SRC", SND_SOC_NOPM, 0, 0, &asp_tx4_src),
+ SND_SOC_DAPM_MUX("ASPTX5SRC", SND_SOC_NOPM, 0, 0, &asp_tx5_src),
+ SND_SOC_DAPM_MUX("ASPTX6SRC", SND_SOC_NOPM, 0, 0, &asp_tx6_src),
+
+ SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L36_PWR_CTRL2, 12, 0),
+ SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L36_PWR_CTRL2, 13, 0),
+ SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L36_PWR_CTRL2, 8, 0),
+ SND_SOC_DAPM_ADC("VBSTMON ADC", NULL, CS35L36_PWR_CTRL2, 9, 0),
+
+ SND_SOC_DAPM_INPUT("VP"),
+ SND_SOC_DAPM_INPUT("VBST"),
+ SND_SOC_DAPM_INPUT("VSENSE"),
+};
+
+static const struct snd_soc_dapm_route cs35l36_audio_map[] = {
+ {"VPMON ADC", NULL, "VP"},
+ {"VBSTMON ADC", NULL, "VBST"},
+ {"IMON ADC", NULL, "VSENSE"},
+ {"VMON ADC", NULL, "VSENSE"},
+
+ {"ASPTX1SRC", "IMON", "IMON ADC"},
+ {"ASPTX1SRC", "VMON", "VMON ADC"},
+ {"ASPTX1SRC", "VBSTMON", "VBSTMON ADC"},
+ {"ASPTX1SRC", "VPMON", "VPMON ADC"},
+
+ {"ASPTX2SRC", "IMON", "IMON ADC"},
+ {"ASPTX2SRC", "VMON", "VMON ADC"},
+ {"ASPTX2SRC", "VBSTMON", "VBSTMON ADC"},
+ {"ASPTX2SRC", "VPMON", "VPMON ADC"},
+
+ {"ASPTX3SRC", "IMON", "IMON ADC"},
+ {"ASPTX3SRC", "VMON", "VMON ADC"},
+ {"ASPTX3SRC", "VBSTMON", "VBSTMON ADC"},
+ {"ASPTX3SRC", "VPMON", "VPMON ADC"},
+
+ {"ASPTX4SRC", "IMON", "IMON ADC"},
+ {"ASPTX4SRC", "VMON", "VMON ADC"},
+ {"ASPTX4SRC", "VBSTMON", "VBSTMON ADC"},
+ {"ASPTX4SRC", "VPMON", "VPMON ADC"},
+
+ {"ASPTX5SRC", "IMON", "IMON ADC"},
+ {"ASPTX5SRC", "VMON", "VMON ADC"},
+ {"ASPTX5SRC", "VBSTMON", "VBSTMON ADC"},
+ {"ASPTX5SRC", "VPMON", "VPMON ADC"},
+
+ {"ASPTX6SRC", "IMON", "IMON ADC"},
+ {"ASPTX6SRC", "VMON", "VMON ADC"},
+ {"ASPTX6SRC", "VBSTMON", "VBSTMON ADC"},
+ {"ASPTX6SRC", "VPMON", "VPMON ADC"},
+
+ {"ASPTX1", NULL, "ASPTX1SRC"},
+ {"ASPTX2", NULL, "ASPTX2SRC"},
+ {"ASPTX3", NULL, "ASPTX3SRC"},
+ {"ASPTX4", NULL, "ASPTX4SRC"},
+ {"ASPTX5", NULL, "ASPTX5SRC"},
+ {"ASPTX6", NULL, "ASPTX6SRC"},
+
+ {"AMP Capture", NULL, "ASPTX1"},
+ {"AMP Capture", NULL, "ASPTX2"},
+ {"AMP Capture", NULL, "ASPTX3"},
+ {"AMP Capture", NULL, "ASPTX4"},
+ {"AMP Capture", NULL, "ASPTX5"},
+ {"AMP Capture", NULL, "ASPTX6"},
+
+ {"AMP Enable", "Switch", "AMP Playback"},
+ {"SDIN", NULL, "AMP Enable"},
+ {"Channel Mux", "RX1", "SDIN"},
+ {"Channel Mux", "RX2", "SDIN"},
+ {"BOOST Enable", "Switch", "Channel Mux"},
+ {"CLASS H", NULL, "BOOST Enable"},
+ {"Main AMP", NULL, "Channel Mux"},
+ {"Main AMP", NULL, "CLASS H"},
+ {"SPK", NULL, "Main AMP"},
+};
+
+static int cs35l36_set_dai_fmt(struct snd_soc_dai *component_dai,
+ unsigned int fmt)
+{
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component_dai->component);
+ unsigned int asp_fmt, lrclk_fmt, sclk_fmt, slave_mode, clk_frc;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ slave_mode = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ slave_mode = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL,
+ CS35L36_SCLK_MSTR_MASK,
+ slave_mode << CS35L36_SCLK_MSTR_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RATE_CTRL,
+ CS35L36_LRCLK_MSTR_MASK,
+ slave_mode << CS35L36_LRCLK_MSTR_SHIFT);
+
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
+ case SND_SOC_DAIFMT_CONT:
+ clk_frc = 1;
+ break;
+ case SND_SOC_DAIFMT_GATED:
+ clk_frc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL,
+ CS35L36_SCLK_FRC_MASK, clk_frc <<
+ CS35L36_SCLK_FRC_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RATE_CTRL,
+ CS35L36_LRCLK_FRC_MASK, clk_frc <<
+ CS35L36_LRCLK_FRC_SHIFT);
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ asp_fmt = 0;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ asp_fmt = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_IF:
+ lrclk_fmt = 1;
+ sclk_fmt = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ lrclk_fmt = 0;
+ sclk_fmt = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ lrclk_fmt = 1;
+ sclk_fmt = 1;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ lrclk_fmt = 0;
+ sclk_fmt = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_RATE_CTRL,
+ CS35L36_LRCLK_INV_MASK,
+ lrclk_fmt << CS35L36_LRCLK_INV_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL,
+ CS35L36_SCLK_INV_MASK,
+ sclk_fmt << CS35L36_SCLK_INV_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_FORMAT,
+ CS35L36_ASP_FMT_MASK, asp_fmt);
+
+ return 0;
+}
+
+struct cs35l36_global_fs_config {
+ int rate;
+ int fs_cfg;
+};
+
+static const struct cs35l36_global_fs_config cs35l36_fs_rates[] = {
+ {12000, 0x01},
+ {24000, 0x02},
+ {48000, 0x03},
+ {96000, 0x04},
+ {192000, 0x05},
+ {384000, 0x06},
+ {11025, 0x09},
+ {22050, 0x0A},
+ {44100, 0x0B},
+ {88200, 0x0C},
+ {176400, 0x0D},
+ {8000, 0x11},
+ {16000, 0x12},
+ {32000, 0x13},
+};
+
+static int cs35l36_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(dai->component);
+ unsigned int asp_width, global_fs = params_rate(params);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l36_fs_rates); i++) {
+ if (global_fs == cs35l36_fs_rates[i].rate)
+ regmap_update_bits(cs35l36->regmap,
+ CS35L36_GLOBAL_CLK_CTRL,
+ CS35L36_GLOBAL_FS_MASK,
+ cs35l36_fs_rates[i].fs_cfg <<
+ CS35L36_GLOBAL_FS_SHIFT);
+ }
+
+ switch (params_width(params)) {
+ case 16:
+ asp_width = CS35L36_ASP_WIDTH_16;
+ break;
+ case 24:
+ asp_width = CS35L36_ASP_WIDTH_24;
+ break;
+ case 32:
+ asp_width = CS35L36_ASP_WIDTH_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_FRAME_CTRL,
+ CS35L36_ASP_RX_WIDTH_MASK,
+ asp_width << CS35L36_ASP_RX_WIDTH_SHIFT);
+ } else {
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_FRAME_CTRL,
+ CS35L36_ASP_TX_WIDTH_MASK,
+ asp_width << CS35L36_ASP_TX_WIDTH_SHIFT);
+ }
+
+ return 0;
+}
+
+static int cs35l36_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = dai->component;
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component);
+ int fs1, fs2;
+
+ if (freq > CS35L36_FS_NOM_6MHZ) {
+ fs1 = CS35L36_FS1_DEFAULT_VAL;
+ fs2 = CS35L36_FS2_DEFAULT_VAL;
+ } else {
+ fs1 = 3 * ((CS35L36_FS_NOM_6MHZ * 4 + freq - 1) / freq) + 4;
+ fs2 = 5 * ((CS35L36_FS_NOM_6MHZ * 4 + freq - 1) / freq) + 4;
+ }
+
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK2);
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_TST_FS_MON0,
+ CS35L36_FS1_WINDOW_MASK | CS35L36_FS2_WINDOW_MASK,
+ fs1 | (fs2 << CS35L36_FS2_WINDOW_SHIFT));
+
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK2);
+ return 0;
+}
+
+static const struct cs35l36_pll_config *cs35l36_get_clk_config(
+ struct cs35l36_private *cs35l36, int freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l36_pll_sysclk); i++) {
+ if (cs35l36_pll_sysclk[i].freq == freq)
+ return &cs35l36_pll_sysclk[i];
+ }
+
+ return NULL;
+}
+
+static const unsigned int cs35l36_src_rates[] = {
+ 8000, 12000, 11025, 16000, 22050, 24000, 32000,
+ 44100, 48000, 88200, 96000, 176400, 192000, 384000
+};
+
+static const struct snd_pcm_hw_constraint_list cs35l36_constraints = {
+ .count = ARRAY_SIZE(cs35l36_src_rates),
+ .list = cs35l36_src_rates,
+};
+
+static int cs35l36_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &cs35l36_constraints);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops cs35l36_ops = {
+ .startup = cs35l36_pcm_startup,
+ .set_fmt = cs35l36_set_dai_fmt,
+ .hw_params = cs35l36_pcm_hw_params,
+ .set_sysclk = cs35l36_dai_set_sysclk,
+};
+
+static struct snd_soc_dai_driver cs35l36_dai[] = {
+ {
+ .name = "cs35l36-pcm",
+ .id = 0,
+ .playback = {
+ .stream_name = "AMP Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .formats = CS35L36_RX_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AMP Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .formats = CS35L36_TX_FORMATS,
+ },
+ .ops = &cs35l36_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static int cs35l36_component_set_sysclk(struct snd_soc_component *component,
+ int clk_id, int source, unsigned int freq,
+ int dir)
+{
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component);
+ const struct cs35l36_pll_config *clk_cfg;
+ int prev_clksrc;
+ bool pdm_switch;
+
+ prev_clksrc = cs35l36->clksrc;
+
+ switch (clk_id) {
+ case 0:
+ cs35l36->clksrc = CS35L36_PLLSRC_SCLK;
+ break;
+ case 1:
+ cs35l36->clksrc = CS35L36_PLLSRC_LRCLK;
+ break;
+ case 2:
+ cs35l36->clksrc = CS35L36_PLLSRC_PDMCLK;
+ break;
+ case 3:
+ cs35l36->clksrc = CS35L36_PLLSRC_SELF;
+ break;
+ case 4:
+ cs35l36->clksrc = CS35L36_PLLSRC_MCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ clk_cfg = cs35l36_get_clk_config(cs35l36, freq);
+ if (clk_cfg == NULL) {
+ dev_err(component->dev, "Invalid CLK Config Freq: %d\n", freq);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL,
+ CS35L36_PLL_OPENLOOP_MASK,
+ 1 << CS35L36_PLL_OPENLOOP_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL,
+ CS35L36_REFCLK_FREQ_MASK,
+ clk_cfg->clk_cfg << CS35L36_REFCLK_FREQ_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL,
+ CS35L36_PLL_REFCLK_EN_MASK,
+ 0 << CS35L36_PLL_REFCLK_EN_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL,
+ CS35L36_PLL_CLK_SEL_MASK,
+ cs35l36->clksrc);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL,
+ CS35L36_PLL_OPENLOOP_MASK,
+ 0 << CS35L36_PLL_OPENLOOP_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_CLK_CTRL,
+ CS35L36_PLL_REFCLK_EN_MASK,
+ 1 << CS35L36_PLL_REFCLK_EN_SHIFT);
+
+ if (cs35l36->rev_id == CS35L36_REV_A0) {
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK2);
+
+ regmap_write(cs35l36->regmap, CS35L36_DCO_CTRL, 0x00036DA8);
+ regmap_write(cs35l36->regmap, CS35L36_MISC_CTRL, 0x0100EE0E);
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_LOOP_PARAMS,
+ CS35L36_PLL_IGAIN_MASK,
+ CS35L36_PLL_IGAIN <<
+ CS35L36_PLL_IGAIN_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PLL_LOOP_PARAMS,
+ CS35L36_PLL_FFL_IGAIN_MASK,
+ clk_cfg->fll_igain);
+
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK2);
+ }
+
+ if (cs35l36->clksrc == CS35L36_PLLSRC_PDMCLK) {
+ pdm_switch = cs35l36->ldm_mode_sel &&
+ (prev_clksrc != CS35L36_PLLSRC_PDMCLK);
+
+ if (pdm_switch)
+ regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG,
+ CS35L36_NG_DELAY_MASK,
+ 0 << CS35L36_NG_DELAY_SHIFT);
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_DAC_MSM_CFG,
+ CS35L36_PDM_MODE_MASK,
+ 1 << CS35L36_PDM_MODE_SHIFT);
+
+ if (pdm_switch)
+ regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG,
+ CS35L36_NG_DELAY_MASK,
+ 3 << CS35L36_NG_DELAY_SHIFT);
+ } else {
+ pdm_switch = cs35l36->ldm_mode_sel &&
+ (prev_clksrc == CS35L36_PLLSRC_PDMCLK);
+
+ if (pdm_switch)
+ regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG,
+ CS35L36_NG_DELAY_MASK,
+ 0 << CS35L36_NG_DELAY_SHIFT);
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_DAC_MSM_CFG,
+ CS35L36_PDM_MODE_MASK,
+ 0 << CS35L36_PDM_MODE_SHIFT);
+
+ if (pdm_switch)
+ regmap_update_bits(cs35l36->regmap, CS35L36_NG_CFG,
+ CS35L36_NG_DELAY_MASK,
+ 3 << CS35L36_NG_DELAY_SHIFT);
+ }
+
+ return 0;
+}
+
+static int cs35l36_boost_inductor(struct cs35l36_private *cs35l36, int inductor)
+{
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_COEFF,
+ CS35L36_BSTCVRT_K1_MASK, 0x3C);
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_COEFF,
+ CS35L36_BSTCVRT_K2_MASK,
+ 0x3C << CS35L36_BSTCVRT_K2_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SW_FREQ,
+ CS35L36_BSTCVRT_CCMFREQ_MASK, 0x00);
+
+ switch (inductor) {
+ case 1000: /* 1 uH */
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST,
+ CS35L36_BSTCVRT_SLOPE_MASK,
+ 0x75 << CS35L36_BSTCVRT_SLOPE_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST,
+ CS35L36_BSTCVRT_LBSTVAL_MASK, 0x00);
+ break;
+ case 1200: /* 1.2 uH */
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST,
+ CS35L36_BSTCVRT_SLOPE_MASK,
+ 0x6B << CS35L36_BSTCVRT_SLOPE_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_SLOPE_LBST,
+ CS35L36_BSTCVRT_LBSTVAL_MASK, 0x01);
+ break;
+ default:
+ dev_err(cs35l36->dev, "%s Invalid Inductor Value %d uH\n",
+ __func__, inductor);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs35l36_component_probe(struct snd_soc_component *component)
+{
+ struct cs35l36_private *cs35l36 =
+ snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ if ((cs35l36->rev_id == CS35L36_REV_A0) && cs35l36->pdata.dcm_mode) {
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_DCM_CTRL,
+ CS35L36_DCM_AUTO_MASK,
+ CS35L36_DCM_AUTO_MASK);
+
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK2);
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_BST_TST_MANUAL,
+ CS35L36_BST_MAN_IPKCOMP_MASK,
+ 0 << CS35L36_BST_MAN_IPKCOMP_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_BST_TST_MANUAL,
+ CS35L36_BST_MAN_IPKCOMP_EN_MASK,
+ CS35L36_BST_MAN_IPKCOMP_EN_MASK);
+
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK2);
+ }
+
+ if (cs35l36->pdata.amp_pcm_inv)
+ regmap_update_bits(cs35l36->regmap, CS35L36_AMP_DIG_VOL_CTRL,
+ CS35L36_AMP_PCM_INV_MASK,
+ CS35L36_AMP_PCM_INV_MASK);
+
+ if (cs35l36->pdata.multi_amp_mode)
+ regmap_update_bits(cs35l36->regmap, CS35L36_ASP_TX_PIN_CTRL,
+ CS35L36_ASP_TX_HIZ_MASK,
+ CS35L36_ASP_TX_HIZ_MASK);
+
+ if (cs35l36->pdata.imon_pol_inv)
+ regmap_update_bits(cs35l36->regmap, CS35L36_VI_SPKMON_FILT,
+ CS35L36_IMON_POL_MASK, 0);
+
+ if (cs35l36->pdata.vmon_pol_inv)
+ regmap_update_bits(cs35l36->regmap, CS35L36_VI_SPKMON_FILT,
+ CS35L36_VMON_POL_MASK, 0);
+
+ if (cs35l36->pdata.bst_vctl)
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL1,
+ CS35L35_BSTCVRT_CTL_MASK,
+ cs35l36->pdata.bst_vctl);
+
+ if (cs35l36->pdata.bst_vctl_sel)
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL2,
+ CS35L35_BSTCVRT_CTL_SEL_MASK,
+ cs35l36->pdata.bst_vctl_sel);
+
+ if (cs35l36->pdata.bst_ipk)
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_PEAK_CUR,
+ CS35L36_BST_IPK_MASK,
+ cs35l36->pdata.bst_ipk);
+
+ if (cs35l36->pdata.boost_ind) {
+ ret = cs35l36_boost_inductor(cs35l36, cs35l36->pdata.boost_ind);
+ if (ret < 0) {
+ dev_err(cs35l36->dev,
+ "Boost inductor config failed(%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (cs35l36->pdata.temp_warn_thld)
+ regmap_update_bits(cs35l36->regmap, CS35L36_DTEMP_WARN_THLD,
+ CS35L36_TEMP_THLD_MASK,
+ cs35l36->pdata.temp_warn_thld);
+
+ if (cs35l36->pdata.irq_drv_sel)
+ regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE,
+ CS35L36_INT_DRV_SEL_MASK,
+ cs35l36->pdata.irq_drv_sel <<
+ CS35L36_INT_DRV_SEL_SHIFT);
+
+ if (cs35l36->pdata.irq_gpio_sel)
+ regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE,
+ CS35L36_INT_GPIO_SEL_MASK,
+ cs35l36->pdata.irq_gpio_sel <<
+ CS35L36_INT_GPIO_SEL_SHIFT);
+
+ /*
+ * Rev B0 has 2 versions
+ * L36 is 10V
+ * L37 is 12V
+ * If L36 we need to clamp some values for safety
+ * after probe has setup dt values. We want to make
+ * sure we dont miss any values set in probe
+ */
+ if (cs35l36->chip_version == CS35L36_10V_L36) {
+ regmap_update_bits(cs35l36->regmap,
+ CS35L36_BSTCVRT_OVERVOLT_CTRL,
+ CS35L36_BST_OVP_THLD_MASK,
+ CS35L36_BST_OVP_THLD_11V);
+
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK2);
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_BST_ANA2_TEST,
+ CS35L36_BST_OVP_TRIM_MASK,
+ CS35L36_BST_OVP_TRIM_11V <<
+ CS35L36_BST_OVP_TRIM_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL2,
+ CS35L36_BST_CTRL_LIM_MASK,
+ 1 << CS35L36_BST_CTRL_LIM_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_BSTCVRT_VCTRL1,
+ CS35L35_BSTCVRT_CTL_MASK,
+ CS35L36_BST_CTRL_10V_CLAMP);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK2);
+ }
+
+ /*
+ * RevA and B require the disabling of
+ * SYNC_GLOBAL_OVR when GLOBAL_EN = 0.
+ * Just turn it off from default
+ */
+ regmap_update_bits(cs35l36->regmap, CS35L36_CTRL_OVRRIDE,
+ CS35L36_SYNC_GLOBAL_OVR_MASK,
+ 0 << CS35L36_SYNC_GLOBAL_OVR_SHIFT);
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver soc_component_dev_cs35l36 = {
+ .probe = &cs35l36_component_probe,
+ .set_sysclk = cs35l36_component_set_sysclk,
+ .dapm_widgets = cs35l36_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs35l36_dapm_widgets),
+ .dapm_routes = cs35l36_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs35l36_audio_map),
+ .controls = cs35l36_aud_controls,
+ .num_controls = ARRAY_SIZE(cs35l36_aud_controls),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static struct regmap_config cs35l36_regmap = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = CS35L36_PAC_PMEM_WORD1023,
+ .reg_defaults = cs35l36_reg,
+ .num_reg_defaults = ARRAY_SIZE(cs35l36_reg),
+ .precious_reg = cs35l36_precious_reg,
+ .volatile_reg = cs35l36_volatile_reg,
+ .readable_reg = cs35l36_readable_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static irqreturn_t cs35l36_irq(int irq, void *data)
+{
+ struct cs35l36_private *cs35l36 = data;
+ unsigned int status[4];
+ unsigned int masks[4];
+ int ret = IRQ_NONE;
+
+ /* ack the irq by reading all status registers */
+ regmap_bulk_read(cs35l36->regmap, CS35L36_INT1_STATUS, status,
+ ARRAY_SIZE(status));
+
+ regmap_bulk_read(cs35l36->regmap, CS35L36_INT1_MASK, masks,
+ ARRAY_SIZE(masks));
+
+ /* Check to see if unmasked bits are active */
+ if (!(status[0] & ~masks[0]) && !(status[1] & ~masks[1]) &&
+ !(status[2] & ~masks[2]) && !(status[3] & ~masks[3])) {
+ return IRQ_NONE;
+ }
+
+ /*
+ * The following interrupts require a
+ * protection release cycle to get the
+ * speaker out of Safe-Mode.
+ */
+ if (status[2] & CS35L36_AMP_SHORT_ERR) {
+ dev_crit(cs35l36->dev, "Amp short error\n");
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_AMP_SHORT_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_AMP_SHORT_ERR_RLS,
+ CS35L36_AMP_SHORT_ERR_RLS);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_AMP_SHORT_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_INT3_STATUS,
+ CS35L36_AMP_SHORT_ERR,
+ CS35L36_AMP_SHORT_ERR);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status[0] & CS35L36_TEMP_WARN) {
+ dev_crit(cs35l36->dev, "Over temperature warning\n");
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_WARN_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_WARN_ERR_RLS,
+ CS35L36_TEMP_WARN_ERR_RLS);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_WARN_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS,
+ CS35L36_TEMP_WARN, CS35L36_TEMP_WARN);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status[0] & CS35L36_TEMP_ERR) {
+ dev_crit(cs35l36->dev, "Over temperature error\n");
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_ERR_RLS, CS35L36_TEMP_ERR_RLS);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS,
+ CS35L36_TEMP_ERR, CS35L36_TEMP_ERR);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status[0] & CS35L36_BST_OVP_ERR) {
+ dev_crit(cs35l36->dev, "VBST Over Voltage error\n");
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_ERR_RLS, CS35L36_TEMP_ERR_RLS);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_TEMP_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS,
+ CS35L36_BST_OVP_ERR, CS35L36_BST_OVP_ERR);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status[0] & CS35L36_BST_DCM_UVP_ERR) {
+ dev_crit(cs35l36->dev, "DCM VBST Under Voltage Error\n");
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_BST_UVP_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_BST_UVP_ERR_RLS,
+ CS35L36_BST_UVP_ERR_RLS);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_BST_UVP_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS,
+ CS35L36_BST_DCM_UVP_ERR,
+ CS35L36_BST_DCM_UVP_ERR);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status[0] & CS35L36_BST_SHORT_ERR) {
+ dev_crit(cs35l36->dev, "LBST SHORT error!\n");
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_BST_SHORT_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_BST_SHORT_ERR_RLS,
+ CS35L36_BST_SHORT_ERR_RLS);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PROTECT_REL_ERR,
+ CS35L36_BST_SHORT_ERR_RLS, 0);
+ regmap_update_bits(cs35l36->regmap, CS35L36_INT1_STATUS,
+ CS35L36_BST_SHORT_ERR,
+ CS35L36_BST_SHORT_ERR);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int cs35l36_handle_of_data(struct i2c_client *i2c_client,
+ struct cs35l36_platform_data *pdata)
+{
+ struct device_node *np = i2c_client->dev.of_node;
+ struct cs35l36_vpbr_cfg *vpbr_config = &pdata->vpbr_config;
+ struct device_node *vpbr_node;
+ unsigned int val;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ ret = of_property_read_u32(np, "cirrus,boost-ctl-millivolt", &val);
+ if (!ret) {
+ if (val < 2550 || val > 12000) {
+ dev_err(&i2c_client->dev,
+ "Invalid Boost Voltage %d mV\n", val);
+ return -EINVAL;
+ }
+ pdata->bst_vctl = (((val - 2550) / 100) + 1) << 1;
+ } else {
+ dev_err(&i2c_client->dev,
+ "Unable to find required parameter 'cirrus,boost-ctl-millivolt'");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "cirrus,boost-ctl-select", &val);
+ if (!ret)
+ pdata->bst_vctl_sel = val | CS35L36_VALID_PDATA;
+
+ ret = of_property_read_u32(np, "cirrus,boost-peak-milliamp", &val);
+ if (!ret) {
+ if (val < 1600 || val > 4500) {
+ dev_err(&i2c_client->dev,
+ "Invalid Boost Peak Current %u mA\n", val);
+ return -EINVAL;
+ }
+
+ pdata->bst_ipk = (val - 1600) / 50;
+ } else {
+ dev_err(&i2c_client->dev,
+ "Unable to find required parameter 'cirrus,boost-peak-milliamp'");
+ return -EINVAL;
+ }
+
+ pdata->multi_amp_mode = of_property_read_bool(np,
+ "cirrus,multi-amp-mode");
+
+ pdata->dcm_mode = of_property_read_bool(np,
+ "cirrus,dcm-mode-enable");
+
+ pdata->amp_pcm_inv = of_property_read_bool(np,
+ "cirrus,amp-pcm-inv");
+
+ pdata->imon_pol_inv = of_property_read_bool(np,
+ "cirrus,imon-pol-inv");
+
+ pdata->vmon_pol_inv = of_property_read_bool(np,
+ "cirrus,vmon-pol-inv");
+
+ if (of_property_read_u32(np, "cirrus,temp-warn-threshold", &val) >= 0)
+ pdata->temp_warn_thld = val | CS35L36_VALID_PDATA;
+
+ if (of_property_read_u32(np, "cirrus,boost-ind-nanohenry", &val) >= 0) {
+ pdata->boost_ind = val;
+ } else {
+ dev_err(&i2c_client->dev, "Inductor not specified.\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(np, "cirrus,irq-drive-select", &val) >= 0)
+ pdata->irq_drv_sel = val | CS35L36_VALID_PDATA;
+
+ if (of_property_read_u32(np, "cirrus,irq-gpio-select", &val) >= 0)
+ pdata->irq_gpio_sel = val | CS35L36_VALID_PDATA;
+
+ /* VPBR Config */
+ vpbr_node = of_get_child_by_name(np, "cirrus,vpbr-config");
+ vpbr_config->is_present = vpbr_node ? true : false;
+ if (vpbr_config->is_present) {
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-en",
+ &val) >= 0)
+ vpbr_config->vpbr_en = val;
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-thld",
+ &val) >= 0)
+ vpbr_config->vpbr_thld = val;
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-atk-rate",
+ &val) >= 0)
+ vpbr_config->vpbr_atk_rate = val;
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-atk-vol",
+ &val) >= 0)
+ vpbr_config->vpbr_atk_vol = val;
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-max-attn",
+ &val) >= 0)
+ vpbr_config->vpbr_max_attn = val;
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-wait",
+ &val) >= 0)
+ vpbr_config->vpbr_wait = val;
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-rel-rate",
+ &val) >= 0)
+ vpbr_config->vpbr_rel_rate = val;
+ if (of_property_read_u32(vpbr_node, "cirrus,vpbr-mute-en",
+ &val) >= 0)
+ vpbr_config->vpbr_mute_en = val;
+ }
+ of_node_put(vpbr_node);
+
+ return 0;
+}
+
+static int cs35l36_pac(struct cs35l36_private *cs35l36)
+{
+ int ret, count;
+ unsigned int val;
+
+ if (cs35l36->rev_id != CS35L36_REV_B0)
+ return 0;
+
+ /*
+ * Magic code for internal PAC
+ */
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_UNLOCK2);
+
+ usleep_range(9500, 10500);
+
+ regmap_write(cs35l36->regmap, CS35L36_PAC_CTL1,
+ CS35L36_PAC_RESET);
+ regmap_write(cs35l36->regmap, CS35L36_PAC_CTL3,
+ CS35L36_PAC_MEM_ACCESS);
+ regmap_write(cs35l36->regmap, CS35L36_PAC_PMEM_WORD0,
+ CS35L36_B0_PAC_PATCH);
+
+ regmap_write(cs35l36->regmap, CS35L36_PAC_CTL3,
+ CS35L36_PAC_MEM_ACCESS_CLR);
+ regmap_write(cs35l36->regmap, CS35L36_PAC_CTL1,
+ CS35L36_PAC_ENABLE_MASK);
+
+ usleep_range(9500, 10500);
+
+ ret = regmap_read(cs35l36->regmap, CS35L36_INT4_STATUS, &val);
+ if (ret < 0) {
+ dev_err(cs35l36->dev, "Failed to read int4_status %d\n", ret);
+ return ret;
+ }
+
+ count = 0;
+ while (!(val & CS35L36_MCU_CONFIG_CLR)) {
+ usleep_range(100, 200);
+ count++;
+
+ ret = regmap_read(cs35l36->regmap, CS35L36_INT4_STATUS,
+ &val);
+ if (ret < 0) {
+ dev_err(cs35l36->dev, "Failed to read int4_status %d\n",
+ ret);
+ return ret;
+ }
+
+ if (count >= 100)
+ return -EINVAL;
+ }
+
+ regmap_write(cs35l36->regmap, CS35L36_INT4_STATUS,
+ CS35L36_MCU_CONFIG_CLR);
+ regmap_update_bits(cs35l36->regmap, CS35L36_PAC_CTL1,
+ CS35L36_PAC_ENABLE_MASK, 0);
+
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK1);
+ regmap_write(cs35l36->regmap, CS35L36_TESTKEY_CTRL,
+ CS35L36_TEST_LOCK2);
+
+ return 0;
+}
+
+static void cs35l36_apply_vpbr_config(struct cs35l36_private *cs35l36)
+{
+ struct cs35l36_platform_data *pdata = &cs35l36->pdata;
+ struct cs35l36_vpbr_cfg *vpbr_config = &pdata->vpbr_config;
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_PWR_CTRL3,
+ CS35L36_VPBR_EN_MASK,
+ vpbr_config->vpbr_en <<
+ CS35L36_VPBR_EN_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG,
+ CS35L36_VPBR_THLD_MASK,
+ vpbr_config->vpbr_thld <<
+ CS35L36_VPBR_THLD_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG,
+ CS35L36_VPBR_MAX_ATTN_MASK,
+ vpbr_config->vpbr_max_attn <<
+ CS35L36_VPBR_MAX_ATTN_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG,
+ CS35L36_VPBR_ATK_VOL_MASK,
+ vpbr_config->vpbr_atk_vol <<
+ CS35L36_VPBR_ATK_VOL_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG,
+ CS35L36_VPBR_ATK_RATE_MASK,
+ vpbr_config->vpbr_atk_rate <<
+ CS35L36_VPBR_ATK_RATE_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG,
+ CS35L36_VPBR_WAIT_MASK,
+ vpbr_config->vpbr_wait <<
+ CS35L36_VPBR_WAIT_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG,
+ CS35L36_VPBR_REL_RATE_MASK,
+ vpbr_config->vpbr_rel_rate <<
+ CS35L36_VPBR_REL_RATE_SHIFT);
+ regmap_update_bits(cs35l36->regmap, CS35L36_VPBR_CFG,
+ CS35L36_VPBR_MUTE_EN_MASK,
+ vpbr_config->vpbr_mute_en <<
+ CS35L36_VPBR_MUTE_EN_SHIFT);
+}
+
+static const struct reg_sequence cs35l36_reva0_errata_patch[] = {
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK1 },
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK2 },
+ /* Errata Writes */
+ { CS35L36_OTP_CTRL1, 0x00002060 },
+ { CS35L36_OTP_CTRL2, 0x00000001 },
+ { CS35L36_OTP_CTRL1, 0x00002460 },
+ { CS35L36_OTP_CTRL2, 0x00000001 },
+ { 0x00002088, 0x012A1838 },
+ { 0x00003014, 0x0100EE0E },
+ { 0x00003008, 0x0008184A },
+ { 0x00007418, 0x509001C8 },
+ { 0x00007064, 0x0929A800 },
+ { 0x00002D10, 0x0002C01C },
+ { 0x0000410C, 0x00000A11 },
+ { 0x00006E08, 0x8B19140C },
+ { 0x00006454, 0x0300000A },
+ { CS35L36_AMP_NG_CTRL, 0x000020EF },
+ { 0x00007E34, 0x0000000E },
+ { 0x0000410C, 0x00000A11 },
+ { 0x00007410, 0x20514B00 },
+ /* PAC Config */
+ { CS35L36_CTRL_OVRRIDE, 0x00000000 },
+ { CS35L36_PAC_INT0_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT1_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT2_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT3_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT4_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT5_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT6_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT7_CTRL, 0x00860001 },
+ { CS35L36_PAC_INT_FLUSH_CTRL, 0x000000FF },
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK1 },
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK2 },
+};
+
+static const struct reg_sequence cs35l36_revb0_errata_patch[] = {
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK1 },
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_UNLOCK2 },
+ { 0x00007064, 0x0929A800 },
+ { 0x00007850, 0x00002FA9 },
+ { 0x00007854, 0x0003F1D5 },
+ { 0x00007858, 0x0003F5E3 },
+ { 0x0000785C, 0x00001137 },
+ { 0x00007860, 0x0001A7A5 },
+ { 0x00007864, 0x0002F16A },
+ { 0x00007868, 0x00003E21 },
+ { 0x00007848, 0x00000001 },
+ { 0x00003854, 0x05180240 },
+ { 0x00007418, 0x509001C8 },
+ { 0x0000394C, 0x028764BD },
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK1 },
+ { CS35L36_TESTKEY_CTRL, CS35L36_TEST_LOCK2 },
+};
+
+static int cs35l36_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct cs35l36_private *cs35l36;
+ struct device *dev = &i2c_client->dev;
+ struct cs35l36_platform_data *pdata = dev_get_platdata(dev);
+ struct irq_data *irq_d;
+ int ret, irq_pol, chip_irq_pol, i;
+ u32 reg_id, reg_revid, l37_id_reg;
+
+ cs35l36 = devm_kzalloc(dev, sizeof(struct cs35l36_private), GFP_KERNEL);
+ if (!cs35l36)
+ return -ENOMEM;
+
+ cs35l36->dev = dev;
+
+ i2c_set_clientdata(i2c_client, cs35l36);
+ cs35l36->regmap = devm_regmap_init_i2c(i2c_client, &cs35l36_regmap);
+ if (IS_ERR(cs35l36->regmap)) {
+ ret = PTR_ERR(cs35l36->regmap);
+ dev_err(dev, "regmap_init() failed: %d\n", ret);
+ goto err;
+ }
+
+ cs35l36->num_supplies = ARRAY_SIZE(cs35l36_supplies);
+ for (i = 0; i < ARRAY_SIZE(cs35l36_supplies); i++)
+ cs35l36->supplies[i].supply = cs35l36_supplies[i];
+
+ ret = devm_regulator_bulk_get(dev, cs35l36->num_supplies,
+ cs35l36->supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to request core supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata) {
+ cs35l36->pdata = *pdata;
+ } else {
+ pdata = devm_kzalloc(dev, sizeof(struct cs35l36_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (i2c_client->dev.of_node) {
+ ret = cs35l36_handle_of_data(i2c_client, pdata);
+ if (ret != 0)
+ return ret;
+
+ }
+
+ cs35l36->pdata = *pdata;
+ }
+
+ ret = regulator_bulk_enable(cs35l36->num_supplies, cs35l36->supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable core supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* returning NULL can be an option if in stereo mode */
+ cs35l36->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cs35l36->reset_gpio)) {
+ ret = PTR_ERR(cs35l36->reset_gpio);
+ cs35l36->reset_gpio = NULL;
+ if (ret == -EBUSY) {
+ dev_info(dev, "Reset line busy, assuming shared reset\n");
+ } else {
+ dev_err(dev, "Failed to get reset GPIO: %d\n", ret);
+ goto err_disable_regs;
+ }
+ }
+
+ if (cs35l36->reset_gpio)
+ gpiod_set_value_cansleep(cs35l36->reset_gpio, 1);
+
+ usleep_range(2000, 2100);
+
+ /* initialize amplifier */
+ ret = regmap_read(cs35l36->regmap, CS35L36_SW_RESET, &reg_id);
+ if (ret < 0) {
+ dev_err(dev, "Get Device ID failed %d\n", ret);
+ goto err;
+ }
+
+ if (reg_id != CS35L36_CHIP_ID) {
+ dev_err(dev, "Device ID (%X). Expected ID %X\n", reg_id,
+ CS35L36_CHIP_ID);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = regmap_read(cs35l36->regmap, CS35L36_REV_ID, &reg_revid);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Get Revision ID failed %d\n", ret);
+ goto err;
+ }
+
+ cs35l36->rev_id = reg_revid >> 8;
+
+ ret = regmap_read(cs35l36->regmap, CS35L36_OTP_MEM30, &l37_id_reg);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Failed to read otp_id Register %d\n",
+ ret);
+ return ret;
+ }
+
+ if ((l37_id_reg & CS35L36_OTP_REV_MASK) == CS35L36_OTP_REV_L37)
+ cs35l36->chip_version = CS35L36_12V_L37;
+ else
+ cs35l36->chip_version = CS35L36_10V_L36;
+
+ switch (cs35l36->rev_id) {
+ case CS35L36_REV_A0:
+ ret = regmap_register_patch(cs35l36->regmap,
+ cs35l36_reva0_errata_patch,
+ ARRAY_SIZE(cs35l36_reva0_errata_patch));
+ if (ret < 0) {
+ dev_err(dev, "Failed to apply A0 errata patch %d\n",
+ ret);
+ goto err;
+ }
+ break;
+ case CS35L36_REV_B0:
+ ret = cs35l36_pac(cs35l36);
+ if (ret < 0) {
+ dev_err(dev, "Failed to Trim OTP %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_register_patch(cs35l36->regmap,
+ cs35l36_revb0_errata_patch,
+ ARRAY_SIZE(cs35l36_revb0_errata_patch));
+ if (ret < 0) {
+ dev_err(dev, "Failed to apply B0 errata patch %d\n",
+ ret);
+ goto err;
+ }
+ break;
+ }
+
+ if (pdata->vpbr_config.is_present)
+ cs35l36_apply_vpbr_config(cs35l36);
+
+ irq_d = irq_get_irq_data(i2c_client->irq);
+ if (!irq_d) {
+ dev_err(&i2c_client->dev, "Invalid IRQ: %d\n", i2c_client->irq);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ irq_pol = irqd_get_trigger_type(irq_d);
+
+ switch (irq_pol) {
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ chip_irq_pol = 0;
+ break;
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ chip_irq_pol = 1;
+ break;
+ default:
+ dev_err(cs35l36->dev, "Invalid IRQ polarity: %d\n", irq_pol);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE,
+ CS35L36_INT_POL_SEL_MASK,
+ chip_irq_pol << CS35L36_INT_POL_SEL_SHIFT);
+
+ ret = devm_request_threaded_irq(dev, i2c_client->irq, NULL, cs35l36_irq,
+ IRQF_ONESHOT | irq_pol, "cs35l36",
+ cs35l36);
+ if (ret != 0) {
+ dev_err(dev, "Failed to request IRQ: %d\n", ret);
+ goto err;
+ }
+
+ regmap_update_bits(cs35l36->regmap, CS35L36_PAD_INTERFACE,
+ CS35L36_INT_OUTPUT_EN_MASK, 1);
+
+ /* Set interrupt masks for critical errors */
+ regmap_write(cs35l36->regmap, CS35L36_INT1_MASK,
+ CS35L36_INT1_MASK_DEFAULT);
+ regmap_write(cs35l36->regmap, CS35L36_INT3_MASK,
+ CS35L36_INT3_MASK_DEFAULT);
+
+ dev_info(&i2c_client->dev, "Cirrus Logic CS35L%d, Revision: %02X\n",
+ cs35l36->chip_version, reg_revid >> 8);
+
+ ret = devm_snd_soc_register_component(dev, &soc_component_dev_cs35l36,
+ cs35l36_dai,
+ ARRAY_SIZE(cs35l36_dai));
+ if (ret < 0) {
+ dev_err(dev, "%s: Register component failed %d\n", __func__,
+ ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ gpiod_set_value_cansleep(cs35l36->reset_gpio, 0);
+
+err_disable_regs:
+ regulator_bulk_disable(cs35l36->num_supplies, cs35l36->supplies);
+ return ret;
+}
+
+static int cs35l36_i2c_remove(struct i2c_client *client)
+{
+ struct cs35l36_private *cs35l36 = i2c_get_clientdata(client);
+
+ /* Reset interrupt masks for device removal */
+ regmap_write(cs35l36->regmap, CS35L36_INT1_MASK,
+ CS35L36_INT1_MASK_RESET);
+ regmap_write(cs35l36->regmap, CS35L36_INT3_MASK,
+ CS35L36_INT3_MASK_RESET);
+
+ if (cs35l36->reset_gpio)
+ gpiod_set_value_cansleep(cs35l36->reset_gpio, 0);
+
+ regulator_bulk_disable(cs35l36->num_supplies, cs35l36->supplies);
+
+ return 0;
+}
+static const struct of_device_id cs35l36_of_match[] = {
+ {.compatible = "cirrus,cs35l36"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, cs35l36_of_match);
+
+static const struct i2c_device_id cs35l36_id[] = {
+ {"cs35l36", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs35l36_id);
+
+static struct i2c_driver cs35l36_i2c_driver = {
+ .driver = {
+ .name = "cs35l36",
+ .of_match_table = cs35l36_of_match,
+ },
+ .id_table = cs35l36_id,
+ .probe = cs35l36_i2c_probe,
+ .remove = cs35l36_i2c_remove,
+};
+module_i2c_driver(cs35l36_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS35L36 driver");
+MODULE_AUTHOR("James Schulman, Cirrus Logic Inc, <james.schulman@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs35l36.h b/sound/soc/codecs/cs35l36.h
new file mode 100644
index 000000000000..f6e38c633b93
--- /dev/null
+++ b/sound/soc/codecs/cs35l36.h
@@ -0,0 +1,446 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cs35l36.h -- CS35L36 ALSA SoC audio driver
+ *
+ * Copyright 2018 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ *
+ */
+
+#ifndef __CS35L36_H__
+#define __CS35L36_H__
+
+#include <linux/regmap.h>
+
+#define CS35L36_FIRSTREG 0x00000000
+#define CS35L36_LASTREG 0x00E037FC
+#define CS35L36_SW_RESET 0x00000000
+#define CS35L36_SW_REV 0x00000004
+#define CS35L36_HW_REV 0x00000008
+#define CS35L36_TESTKEY_CTRL 0x00000020
+#define CS35L36_USERKEY_CTL 0x00000024
+#define CS35L36_OTP_MEM30 0x00000478
+#define CS35L36_OTP_CTRL1 0x00000500
+#define CS35L36_OTP_CTRL2 0x00000504
+#define CS35L36_OTP_CTRL3 0x00000508
+#define CS35L36_OTP_CTRL4 0x0000050C
+#define CS35L36_OTP_CTRL5 0x00000510
+#define CS35L36_PAC_CTL1 0x00000C00
+#define CS35L36_PAC_CTL2 0x00000C04
+#define CS35L36_PAC_CTL3 0x00000C08
+#define CS35L36_DEVICE_ID 0x00002004
+#define CS35L36_FAB_ID 0x00002008
+#define CS35L36_REV_ID 0x0000200C
+#define CS35L36_PWR_CTRL1 0x00002014
+#define CS35L36_PWR_CTRL2 0x00002018
+#define CS35L36_PWR_CTRL3 0x0000201C
+#define CS35L36_CTRL_OVRRIDE 0x00002020
+#define CS35L36_AMP_OUT_MUTE 0x00002024
+#define CS35L36_OTP_TRIM_STATUS 0x00002028
+#define CS35L36_DISCH_FILT 0x0000202C
+#define CS35L36_OSC_TRIM 0x00002030
+#define CS35L36_PROTECT_REL_ERR 0x00002034
+#define CS35L36_PAD_INTERFACE 0x00002400
+#define CS35L36_PLL_CLK_CTRL 0x00002C04
+#define CS35L36_GLOBAL_CLK_CTRL 0x00002C0C
+#define CS35L36_ADC_CLK_CTRL 0x00002C10
+#define CS35L36_SWIRE_CLK_CTRL 0x00002C14
+#define CS35L36_SP_SCLK_CLK_CTRL 0x00002D00
+#define CS35L36_TST_FS_MON0 0x00002D10
+#define CS35L36_PLL_LOOP_PARAMS 0x00003008
+#define CS35L36_DCO_CTRL 0x00003010
+#define CS35L36_MISC_CTRL 0x00003014
+#define CS35L36_MDSYNC_EN 0x00003404
+#define CS35L36_MDSYNC_TX_ID 0x00003408
+#define CS35L36_MDSYNC_PWR_CTRL 0x0000340C
+#define CS35L36_MDSYNC_DATA_TX 0x00003410
+#define CS35L36_MDSYNC_TX_STATUS 0x0000341C
+#define CS35L36_MDSYNC_RX_STATUS 0x00003420
+#define CS35L36_MDSYNC_ERR_STATUS 0x00003424
+#define CS35L36_BSTCVRT_VCTRL1 0x00003800
+#define CS35L36_BSTCVRT_VCTRL2 0x00003804
+#define CS35L36_BSTCVRT_PEAK_CUR 0x00003808
+#define CS35L36_BSTCVRT_SFT_RAMP 0x0000380C
+#define CS35L36_BSTCVRT_COEFF 0x00003810
+#define CS35L36_BSTCVRT_SLOPE_LBST 0x00003814
+#define CS35L36_BSTCVRT_SW_FREQ 0x00003818
+#define CS35L36_BSTCVRT_DCM_CTRL 0x0000381C
+#define CS35L36_BSTCVRT_DCM_MODE_FORCE 0x00003820
+#define CS35L36_BSTCVRT_OVERVOLT_CTRL 0x00003830
+#define CS35L36_BST_TST_MANUAL 0x0000393C
+#define CS35L36_BST_ANA2_TEST 0x0000394C
+#define CS35L36_VPI_LIMIT_MODE 0x00003C04
+#define CS35L36_VPI_LIMIT_MINMAX 0x00003C08
+#define CS35L36_VPI_VP_THLD 0x00003C0C
+#define CS35L36_VPI_TRACK_CTRL 0x00003C10
+#define CS35L36_VPI_TRIG_MODE_CTRL 0x00003C14
+#define CS35L36_VPI_TRIG_STEPS 0x00003C18
+#define CS35L36_VI_SPKMON_FILT 0x00004004
+#define CS35L36_VI_SPKMON_GAIN 0x00004008
+#define CS35L36_VI_SPKMON_IP_SEL 0x00004100
+#define CS35L36_DTEMP_WARN_THLD 0x00004220
+#define CS35L36_DTEMP_STATUS 0x00004300
+#define CS35L36_VPVBST_FS_SEL 0x00004400
+#define CS35L36_VPVBST_VP_CTRL 0x00004440
+#define CS35L36_VPVBST_VBST_CTRL 0x00004444
+#define CS35L36_ASP_TX_PIN_CTRL 0x00004800
+#define CS35L36_ASP_RATE_CTRL 0x00004804
+#define CS35L36_ASP_FORMAT 0x00004808
+#define CS35L36_ASP_FRAME_CTRL 0x00004818
+#define CS35L36_ASP_TX1_TX2_SLOT 0x0000481C
+#define CS35L36_ASP_TX3_TX4_SLOT 0x00004820
+#define CS35L36_ASP_TX5_TX6_SLOT 0x00004824
+#define CS35L36_ASP_TX7_TX8_SLOT 0x00004828
+#define CS35L36_ASP_RX1_SLOT 0x0000482C
+#define CS35L36_ASP_RX_TX_EN 0x0000483C
+#define CS35L36_ASP_RX1_SEL 0x00004C00
+#define CS35L36_ASP_TX1_SEL 0x00004C20
+#define CS35L36_ASP_TX2_SEL 0x00004C24
+#define CS35L36_ASP_TX3_SEL 0x00004C28
+#define CS35L36_ASP_TX4_SEL 0x00004C2C
+#define CS35L36_ASP_TX5_SEL 0x00004C30
+#define CS35L36_ASP_TX6_SEL 0x00004C34
+#define CS35L36_SWIRE_P1_TX1_SEL 0x00004C40
+#define CS35L36_SWIRE_P1_TX2_SEL 0x00004C44
+#define CS35L36_SWIRE_P2_TX1_SEL 0x00004C60
+#define CS35L36_SWIRE_P2_TX2_SEL 0x00004C64
+#define CS35L36_SWIRE_P2_TX3_SEL 0x00004C68
+#define CS35L36_SWIRE_DP1_FIFO_CFG 0x00005000
+#define CS35L36_SWIRE_DP2_FIFO_CFG 0x00005004
+#define CS35L36_SWIRE_DP3_FIFO_CFG 0x00005008
+#define CS35L36_SWIRE_PCM_RX_DATA 0x0000500C
+#define CS35L36_SWIRE_FS_SEL 0x00005010
+#define CS35L36_SPARE_CP_BITS 0x00005C00
+#define CS35L36_AMP_DIG_VOL_CTRL 0x00006000
+#define CS35L36_VPBR_CFG 0x00006404
+#define CS35L36_VBBR_CFG 0x00006408
+#define CS35L36_VPBR_STATUS 0x0000640C
+#define CS35L36_VBBR_STATUS 0x00006410
+#define CS35L36_OVERTEMP_CFG 0x00006414
+#define CS35L36_AMP_ERR_VOL 0x00006418
+#define CS35L36_CLASSH_CFG 0x00006800
+#define CS35L36_CLASSH_FET_DRV_CFG 0x00006804
+#define CS35L36_NG_CFG 0x00006808
+#define CS35L36_AMP_GAIN_CTRL 0x00006C04
+#define CS35L36_PWM_MOD_IO_CTRL 0x0000706C
+#define CS35L36_PWM_MOD_STATUS 0x00007070
+#define CS35L36_DAC_MSM_CFG 0x00007400
+#define CS35L36_AMP_SLOPE_CTRL 0x00007410
+#define CS35L36_AMP_PDM_VOLUME 0x00007E04
+#define CS35L36_AMP_PDM_RATE_CTRL 0x00007E08
+#define CS35L36_PDM_CH_SEL 0x00007E10
+#define CS35L36_AMP_NG_CTRL 0x00007E14
+#define CS35L36_PDM_HIGHFILT_CTRL 0x00007E3C
+#define CS35L36_INT1_STATUS 0x00D00000
+#define CS35L36_INT2_STATUS 0x00D00004
+#define CS35L36_INT3_STATUS 0x00D00008
+#define CS35L36_INT4_STATUS 0x00D0000C
+#define CS35L36_INT1_RAW_STATUS 0x00D00020
+#define CS35L36_INT2_RAW_STATUS 0x00D00024
+#define CS35L36_INT3_RAW_STATUS 0x00D00028
+#define CS35L36_INT4_RAW_STATUS 0x00D0002C
+#define CS35L36_INT1_MASK 0x00D00040
+#define CS35L36_INT2_MASK 0x00D00044
+#define CS35L36_INT3_MASK 0x00D00048
+#define CS35L36_INT4_MASK 0x00D0004C
+#define CS35L36_INT1_EDGE_LVL_CTRL 0x00D00060
+#define CS35L36_INT3_EDGE_LVL_CTRL 0x00D00068
+#define CS35L36_PAC_INT_STATUS 0x00D00200
+#define CS35L36_PAC_INT_RAW_STATUS 0x00D00210
+#define CS35L36_PAC_INT_FLUSH_CTRL 0x00D00218
+#define CS35L36_PAC_INT0_CTRL 0x00D00220
+#define CS35L36_PAC_INT1_CTRL 0x00D00224
+#define CS35L36_PAC_INT2_CTRL 0x00D00228
+#define CS35L36_PAC_INT3_CTRL 0x00D0022C
+#define CS35L36_PAC_INT4_CTRL 0x00D00230
+#define CS35L36_PAC_INT5_CTRL 0x00D00234
+#define CS35L36_PAC_INT6_CTRL 0x00D00238
+#define CS35L36_PAC_INT7_CTRL 0x00D0023C
+#define CS35L36_PAC_PMEM_WORD0 0x00E02800
+#define CS35L36_PAC_PMEM_WORD1 0x00E02804
+#define CS35L36_PAC_PMEM_WORD1023 0x00E037FC
+
+#define CS35L36_INTPAC_REG_COUNT 25
+#define CS35L36_CHIP_ID 0x00035A36
+
+#define CS35L36_INT_OUTPUT_EN_MASK 0x01
+#define CS35L36_INT_GPIO_SEL_MASK 0x02
+#define CS35L36_INT_GPIO_SEL_SHIFT 1
+#define CS35L36_INT_POL_SEL_MASK 0x04
+#define CS35L36_INT_POL_SEL_SHIFT 2
+#define CS35L36_INT_DRV_SEL_MASK 0x20
+#define CS35L36_INT_DRV_SEL_SHIFT 5
+#define CS35L36_IRQ_SRC_MASK 0x08
+#define CS35L36_IRQ_SRC_SHIFT 3
+
+#define CS35L36_SCLK_MSTR_MASK 0x40
+#define CS35L36_SCLK_MSTR_SHIFT 6
+#define CS35L36_LRCLK_MSTR_MASK 0x01
+#define CS35L36_LRCLK_MSTR_SHIFT 0
+#define CS35L36_SCLK_INV_MASK 0x100
+#define CS35L36_SCLK_INV_SHIFT 8
+#define CS35L36_LRCLK_INV_MASK 0x04
+#define CS35L36_LRCLK_INV_SHIFT 2
+#define CS35L36_SCLK_FRC_MASK 0x80
+#define CS35L36_SCLK_FRC_SHIFT 7
+#define CS35L36_LRCLK_FRC_MASK 0x02
+#define CS35L36_LRCLK_FRC_SHIFT 1
+
+#define CS35L36_PDM_MODE_MASK 0x01
+#define CS35L36_PDM_MODE_SHIFT 0
+
+#define CS35L36_ASP_FMT_MASK 0x07
+#define CS35L36_ASP_FMT_SHIFT 0
+
+#define CS35L36_ASP_RX_WIDTH_MASK 0xFF0000
+#define CS35L36_ASP_RX_WIDTH_SHIFT 16
+#define CS35L36_ASP_TX_WIDTH_MASK 0xFF
+#define CS35L36_ASP_TX_WIDTH_SHIFT 0
+#define CS35L36_ASP_WIDTH_16 0x10
+#define CS35L36_ASP_WIDTH_24 0x18
+#define CS35L36_ASP_WIDTH_32 0x20
+
+#define CS35L36_ASP_RX1_SLOT_MASK 0x3F
+#define CS35L36_ASP_RX1_EN_MASK 0x00010000
+#define CS35L36_ASP_RX1_EN_SHIFT 16
+
+#define CS35L36_ASP_TX1_SLOT_MASK 0x3F
+#define CS35L36_ASP_TX2_SLOT_MASK 0x3F0000
+#define CS35L36_ASP_TX2_SLOT_SHIFT 16
+#define CS35L36_ASP_TX3_SLOT_MASK 0x3F
+#define CS35L36_ASP_TX4_SLOT_MASK 0x3F0000
+#define CS35L36_ASP_TX4_SLOT_SHIFT 16
+#define CS35L36_ASP_TX5_SLOT_MASK 0x3F
+#define CS35L36_ASP_TX6_SLOT_MASK 0x3F0000
+#define CS35L36_ASP_TX6_SLOT_SHIFT 16
+#define CS35L36_ASP_TX7_SLOT_MASK 0x3F
+#define CS35L36_ASP_TX8_SLOT_MASK 0x3F0000
+#define CS35L36_ASP_TX8_SLOT_SHIFT 16
+#define CS35L36_ASP_TX_HIZ_MASK 0x200000
+
+#define CS35L36_APS_TX_SEL_MASK 0x7F
+
+#define CS35L36_ASP_TX1_EN_MASK 0x01
+#define CS35L36_ASP_TX2_EN_MASK 0x02
+#define CS35L36_ASP_TX2_EN_SHIFT 1
+#define CS35L36_ASP_TX3_EN_MASK 0x04
+#define CS35L36_ASP_TX3_EN_SHIFT 2
+#define CS35L36_ASP_TX4_EN_MASK 0x08
+#define CS35L36_ASP_TX4_EN_SHIFT 3
+#define CS35L36_ASP_TX5_EN_MASK 0x10
+#define CS35L36_ASP_TX5_EN_SHIFT 4
+#define CS35L36_ASP_TX6_EN_MASK 0x20
+#define CS35L36_ASP_TX6_EN_SHIFT 5
+#define CS35L36_ASP_TX7_EN_MASK 0x40
+#define CS35L36_ASP_TX7_EN_SHIFT 6
+#define CS35L36_ASP_TX8_EN_MASK 0x80
+#define CS35L36_ASP_TX8_EN_SHIFT 7
+
+
+#define CS35L36_PLL_CLK_SEL_MASK 0x07
+#define CS35L36_PLL_CLK_SEL_SHIFT 0
+#define CS35L36_PLLSRC_SCLK 0
+#define CS35L36_PLLSRC_LRCLK 1
+#define CS35L36_PLLSRC_SELF 3
+#define CS35L36_PLLSRC_PDMCLK 4
+#define CS35L36_PLLSRC_MCLK 5
+#define CS35L36_PLLSRC_SWIRE 7
+#define CS35L36_REFCLK_FREQ_MASK 0x7E0
+#define CS35L36_REFCLK_FREQ_SHIFT 5
+#define CS35L36_PLL_OPENLOOP_MASK 0x800
+#define CS35L36_PLL_OPENLOOP_SHIFT 11
+#define CS35L36_PLL_REFCLK_EN_MASK 0x10
+#define CS35L36_PLL_REFCLK_EN_SHIFT 4
+
+
+#define CS35L36_GLOBAL_FS_MASK 0x1F
+#define CS35L36_GLOBAL_FS_SHIFT 0
+
+#define CS35L36_HPF_PCM_EN_MASK 0x800
+#define CS35L36_HPF_PCM_EN_SHIFT 15
+#define CS35L36_PCM_RX_SEL_MASK 0x7F
+#define CS35L36_PCM_RX_SEL_SHIFT 0
+
+#define CS35L36_PCM_RX_SEL_ZERO 0x00
+#define CS35L36_PCM_RX_SEL_PCM 0x08
+#define CS35L36_PCM_RX_SEL_SWIRE 0x10
+#define CS35L36_PCM_RX_SEL_DIAG 0x04
+
+#define CS35L36_GLOBAL_EN_MASK 0x01
+#define CS35L36_GLOBAL_EN_SHIFT 0x00
+
+#define CS35L36_AMP_PCM_INV_MASK 0x4000
+#define CS35L36_AMP_PCM_INV_SHIFT 14
+
+#define CS35L36_AMP_VOL_PCM_MASK 0x3FF8
+#define CS35L36_AMP_VOL_PCM_SHIFT 3
+#define CS35L36_DIGITAL_MUTE 0x04CF
+
+#define CS35L36_AMP_RAMP_MASK 0x0007
+#define CS35L36_AMP_RAMP_SHIFT 0
+
+#define CS35L36_AMP_MUTE_MASK 0x0010
+#define CS35L36_AMP_MUTE_SHIFT 4
+
+#define CS35L36_GLOBAL_RESYNC_FS1_MASK 0x00000200
+#define CS35L36_GLOBAL_RESYNC_FS2_MASK 0x00000400
+#define CS35L36_SYNC_GLOBAL_OVR_MASK 0x00000002
+#define CS35L36_SYNC_GLOBAL_OVR_SHIFT 1
+
+#define CS35L36_REFCLK_IN_MASK 0x00100000
+#define CS35L36_PLL_UNLOCK_MASK 0x00002000
+
+#define CS35L36_ASP_RX_UDF_MASK 0x00000040
+#define CS35L36_ASP_RX_OVF_MASK 0x00000080
+
+#define CS35L36_IMON_POL_MASK 0x02
+#define CS35L36_IMON_POL_SHIFT 1
+
+#define CS35L36_VMON_POL_MASK 0x01
+#define CS35L36_VMON_POL_SHIFT 0
+
+#define CS35L36_PDN_DONE 0x40
+#define CS35L36_PDN_DONE_SHIFT 6
+#define CS35L36_PUP_DONE 0x80
+#define CS35L36_PUP_DONE_SHIFT 7
+#define CS35L36_GLOBAL_EN_ASSRT 0x20
+#define CS35L36_PUP_DONE_IRQ_UNMASK 0x7F
+#define CS35L36_PUP_DONE_IRQ_MASK 0xBF
+
+#define CS35L36_FS1_WINDOW_MASK 0x000007FF
+#define CS35L36_FS2_WINDOW_MASK 0x00FFF800
+#define CS35L36_FS2_WINDOW_SHIFT 12
+
+#define CS35L36_PLL_FFL_IGAIN_MASK 0x0F
+#define CS35L36_PLL_IGAIN_MASK 0x3F0
+#define CS35L36_PLL_IGAIN_SHIFT 4
+#define CS35L36_PLL_IGAIN 0x04
+
+#define CS35L36_BST_EN_MASK 0x30
+#define CS35L36_BST_EN 0x02
+#define CS35L36_BST_DIS_VP 0x01
+#define CS35L36_BST_DIS_EXTN 0x00
+#define CS35L36_BST_EN_SHIFT 4
+#define CS35L36_BST_MAN_IPKCOMP_MASK 0x200
+#define CS35L36_BST_MAN_IPKCOMP_SHIFT 9
+
+#define CS35L36_BST_MAN_IPKCOMP_EN_MASK 0x100
+#define CS35L36_BST_MAN_IPKCOMP_EN_SHIFT 8
+
+#define CS35L36_BST_IPK_MASK 0x7F
+#define CS35L36_BST_OVP_THLD_MASK 0x3F
+#define CS35L36_BST_OVP_THLD_11V 0x10
+#define CS35L36_BST_OVP_TRIM_MASK 0x00078000
+#define CS35L36_BST_OVP_TRIM_SHIFT 15
+#define CS35L36_BST_OVP_TRIM_11V 0x0C
+#define CS35L36_BST_CTRL_LIM_MASK 0x04
+#define CS35L36_BST_CTRL_LIM_SHIFT 2
+#define CS35L36_BST_CTRL_10V_CLAMP 0x96
+
+#define CS35L36_NG_AMP_EN_MASK 0x3F00
+#define CS35L36_NG_DELAY_MASK 0x70
+#define CS35L36_NG_DELAY_SHIFT 4
+#define CS35L36_AMP_ZC_SHIFT 10
+#define CS35L36_PDM_LDM_ENTER_SHIFT 3
+#define CS35L36_PDM_LDM_EXIT_SHIFT 4
+
+#define CS35L36_BSTCVRT_K1_MASK 0xFF
+#define CS35L36_BSTCVRT_K2_MASK 0xFF00
+#define CS35L36_BSTCVRT_K2_SHIFT 8
+#define CS35L36_BSTCVRT_SLOPE_MASK 0xFF00
+#define CS35L36_BSTCVRT_SLOPE_SHIFT 8
+#define CS35L36_BSTCVRT_CCMFREQ_MASK 0x0F
+#define CS35L36_BSTCVRT_LBSTVAL_MASK 0x03
+#define CS35L35_BSTCVRT_CTL_MASK 0xFF
+#define CS35L35_BSTCVRT_CTL_SEL_MASK 0x03
+#define CS35L36_DCM_AUTO_MASK 0x01
+
+#define CS35L36_INT1_MASK_DEFAULT 0xF9BA7FFF
+#define CS35L36_INT1_MASK_RESET 0xFFFFFFFF
+#define CS35L36_INT3_MASK_DEFAULT 0xFFFFEFFF
+#define CS35L36_INT3_MASK_RESET 0xFFFFFFFF
+
+
+#define CS35L36_AMP_SHORT_ERR 0x1000
+#define CS35L36_BST_SHORT_ERR 0x40000
+#define CS35L36_TEMP_WARN 0x2000000
+#define CS35L36_TEMP_ERR 0x4000000
+#define CS35L36_BST_OVP_ERR 0x10000
+#define CS35L36_BST_DCM_UVP_ERR 0x20000
+
+#define CS35L36_AMP_SHORT_ERR_RLS 0x02
+#define CS35L36_BST_SHORT_ERR_RLS 0x04
+#define CS35L36_BST_OVP_ERR_RLS 0x08
+#define CS35L36_BST_UVP_ERR_RLS 0x10
+#define CS35L36_TEMP_WARN_ERR_RLS 0x20
+#define CS35L36_TEMP_ERR_RLS 0x40
+#define CS35L36_TEMP_THLD_MASK 0x03
+
+#define CS35L36_REV_B0 0xb0
+#define CS35L36_REV_A0 0xa0
+#define CS35L36_B0_PAC_PATCH 0x00DD0102
+
+#define CS35L36_OTP_ECC_EN_MASK 0x400
+#define CS35L36_OTP_ECC_EN_SHIFT 10
+#define CS35L36_OTP_RUN_BOOT_MASK 0x01
+#define CS35L36_OTP_BOOT_DONE 0x2000000
+#define CS35L36_PAC_RESET_MASK 0x04
+#define CS35L36_PAC_RESET_SHIFT 2
+#define CS35L36_PAC_STALL_MASK 0x02
+#define CS35L36_PAC_STALL_SHIFT 1
+#define CS35L36_PAC_ENABLE_MASK 0x00000001
+#define CS35L36_PAC_MEM_ACCESS 0x01
+#define CS35L36_PAC_MEM_ACCESS_CLR 0
+#define CS35L36_SOFT_RESET 0x5AAA
+#define CS35L36_MCU_BOOT_COMPLETE 0x02
+#define CS35L36_MCU_CONFIG_UNMASK 0x00FEFFFF
+#define CS35L36_MCU_CONFIG_CLR 0x00010000
+#define CS35L36_MCU_CONFIG_MASK 0x00FFFFFF
+#define CS35L36_GPIO_INT_SEL_MASK 0x0000003B
+#define CS35L36_GPIO_INT_SEL_UNMASK 0x0000003A
+#define CS35L36_PAC_RESET 0x00000000
+#define CS35L36_OTP_REV_MASK 0x00FF0000
+#define CS35L36_OTP_REV_L37 0x00CC0000
+#define CS35L36_12V_L37 37
+#define CS35L36_10V_L36 36
+
+#define CS35L36_VPBR_EN_MASK 0x00001000
+#define CS35L36_VPBR_EN_SHIFT 12
+
+#define CS35L36_VPBR_THLD_MASK 0x0000001F
+#define CS35L36_VPBR_THLD_SHIFT 0
+#define CS35L36_VPBR_MAX_ATTN_MASK 0x00000F00
+#define CS35L36_VPBR_MAX_ATTN_SHIFT 8
+#define CS35L36_VPBR_ATK_VOL_MASK 0x0000F000
+#define CS35L36_VPBR_ATK_VOL_SHIFT 12
+#define CS35L36_VPBR_ATK_RATE_MASK 0x00070000
+#define CS35L36_VPBR_ATK_RATE_SHIFT 16
+#define CS35L36_VPBR_WAIT_MASK 0x00180000
+#define CS35L36_VPBR_WAIT_SHIFT 19
+#define CS35L36_VPBR_REL_RATE_MASK 0x00E00000
+#define CS35L36_VPBR_REL_RATE_SHIFT 21
+#define CS35L36_VPBR_MUTE_EN_MASK 0x01000000
+#define CS35L36_VPBR_MUTE_EN_SHIFT 24
+
+#define CS35L36_OSC_FREQ_TRIM_MASK 0x070
+#define CS35L36_OSC_TRIM_DONE 0x08
+
+#define CS35L36_FS1_DEFAULT_VAL 16
+#define CS35L36_FS2_DEFAULT_VAL 36
+#define CS35L36_FS_NOM_6MHZ 6000000
+
+#define CS35L36_TEST_UNLOCK1 0x00005555
+#define CS35L36_TEST_UNLOCK2 0x0000AAAA
+#define CS35L36_TEST_LOCK1 0x0000CCCC
+#define CS35L36_TEST_LOCK2 0x00003333
+
+#define CS35L36_PAC_PROG_MEM 512
+
+#define CS35L36_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
+#define CS35L36_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE \
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+extern const int cs35l36_a0_pac_patch[CS35L36_PAC_PROG_MEM];
+
+#endif
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 849fdb2cb260..1104830edaf8 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -223,10 +223,10 @@ static int cs4271_set_dai_fmt(struct snd_soc_dai *codec_dai,
switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- cs4271->master = 0;
+ cs4271->master = false;
break;
case SND_SOC_DAIFMT_CBM_CFM:
- cs4271->master = 1;
+ cs4271->master = true;
val |= CS4271_MODE1_MASTER;
break;
default:
diff --git a/sound/soc/codecs/cs4341.c b/sound/soc/codecs/cs4341.c
new file mode 100644
index 000000000000..ade7477d04f1
--- /dev/null
+++ b/sound/soc/codecs/cs4341.c
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Cirrus Logic CS4341A ALSA SoC Codec Driver
+ * Author: Alexander Shiyan <shc_work@mail.ru>
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define CS4341_REG_MODE1 0x00
+#define CS4341_REG_MODE2 0x01
+#define CS4341_REG_MIX 0x02
+#define CS4341_REG_VOLA 0x03
+#define CS4341_REG_VOLB 0x04
+
+#define CS4341_MODE2_DIF (7 << 4)
+#define CS4341_MODE2_DIF_I2S_24 (0 << 4)
+#define CS4341_MODE2_DIF_I2S_16 (1 << 4)
+#define CS4341_MODE2_DIF_LJ_24 (2 << 4)
+#define CS4341_MODE2_DIF_RJ_24 (3 << 4)
+#define CS4341_MODE2_DIF_RJ_16 (5 << 4)
+#define CS4341_VOLX_MUTE (1 << 7)
+
+struct cs4341_priv {
+ unsigned int fmt;
+ struct regmap *regmap;
+ struct regmap_config regcfg;
+};
+
+static const struct reg_default cs4341_reg_defaults[] = {
+ { CS4341_REG_MODE1, 0x00 },
+ { CS4341_REG_MODE2, 0x82 },
+ { CS4341_REG_MIX, 0x49 },
+ { CS4341_REG_VOLA, 0x80 },
+ { CS4341_REG_VOLB, 0x80 },
+};
+
+static int cs4341_set_fmt(struct snd_soc_dai *dai, unsigned int format)
+{
+ struct snd_soc_component *component = dai->component;
+ struct cs4341_priv *cs4341 = snd_soc_component_get_drvdata(component);
+
+ switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (format & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ cs4341->fmt = format & SND_SOC_DAIFMT_FORMAT_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs4341_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct cs4341_priv *cs4341 = snd_soc_component_get_drvdata(component);
+ unsigned int mode = 0;
+ int b24 = 0;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ b24 = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ default:
+ dev_err(component->dev, "Unsupported PCM format 0x%08x.\n",
+ params_format(params));
+ return -EINVAL;
+ }
+
+ switch (cs4341->fmt) {
+ case SND_SOC_DAIFMT_I2S:
+ mode = b24 ? CS4341_MODE2_DIF_I2S_24 : CS4341_MODE2_DIF_I2S_16;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ mode = CS4341_MODE2_DIF_LJ_24;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ mode = b24 ? CS4341_MODE2_DIF_RJ_24 : CS4341_MODE2_DIF_RJ_16;
+ break;
+ default:
+ dev_err(component->dev, "Unsupported DAI format 0x%08x.\n",
+ cs4341->fmt);
+ return -EINVAL;
+ }
+
+ return snd_soc_component_update_bits(component, CS4341_REG_MODE2,
+ CS4341_MODE2_DIF, mode);
+}
+
+static int cs4341_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+ int ret;
+
+ ret = snd_soc_component_update_bits(component, CS4341_REG_VOLA,
+ CS4341_VOLX_MUTE,
+ mute ? CS4341_VOLX_MUTE : 0);
+ if (ret < 0)
+ return ret;
+
+ return snd_soc_component_update_bits(component, CS4341_REG_VOLB,
+ CS4341_VOLX_MUTE,
+ mute ? CS4341_VOLX_MUTE : 0);
+}
+
+static DECLARE_TLV_DB_SCALE(out_tlv, -9000, 100, 0);
+
+static const char * const deemph[] = {
+ "None", "44.1k", "48k", "32k",
+};
+
+static const struct soc_enum deemph_enum =
+ SOC_ENUM_SINGLE(CS4341_REG_MODE2, 2, 4, deemph);
+
+static const char * const srzc[] = {
+ "Immediate", "Zero Cross", "Soft Ramp", "SR on ZC",
+};
+
+static const struct soc_enum srzc_enum =
+ SOC_ENUM_SINGLE(CS4341_REG_MIX, 5, 4, srzc);
+
+
+static const struct snd_soc_dapm_widget cs4341_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("HiFi DAC", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("OutA"),
+ SND_SOC_DAPM_OUTPUT("OutB"),
+};
+
+static const struct snd_soc_dapm_route cs4341_routes[] = {
+ { "OutA", NULL, "HiFi DAC" },
+ { "OutB", NULL, "HiFi DAC" },
+ { "DAC Playback", NULL, "OutA" },
+ { "DAC Playback", NULL, "OutB" },
+};
+
+static const struct snd_kcontrol_new cs4341_controls[] = {
+ SOC_DOUBLE_R_TLV("Master Playback Volume",
+ CS4341_REG_VOLA, CS4341_REG_VOLB, 0, 90, 1, out_tlv),
+ SOC_ENUM("De-Emphasis Control", deemph_enum),
+ SOC_ENUM("Soft Ramp Zero Cross Control", srzc_enum),
+ SOC_SINGLE("Auto-Mute Switch", CS4341_REG_MODE2, 7, 1, 0),
+ SOC_SINGLE("Popguard Transient Switch", CS4341_REG_MODE2, 1, 1, 0),
+};
+
+static const struct snd_soc_dai_ops cs4341_dai_ops = {
+ .set_fmt = cs4341_set_fmt,
+ .hw_params = cs4341_hw_params,
+ .digital_mute = cs4341_digital_mute,
+};
+
+static struct snd_soc_dai_driver cs4341_dai = {
+ .name = "cs4341a-hifi",
+ .playback = {
+ .stream_name = "DAC Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &cs4341_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static const struct snd_soc_component_driver soc_component_cs4341 = {
+ .controls = cs4341_controls,
+ .num_controls = ARRAY_SIZE(cs4341_controls),
+ .dapm_widgets = cs4341_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs4341_dapm_widgets),
+ .dapm_routes = cs4341_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs4341_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct of_device_id __maybe_unused cs4341_dt_ids[] = {
+ { .compatible = "cirrus,cs4341a", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cs4341_dt_ids);
+
+static int cs4341_probe(struct device *dev)
+{
+ struct cs4341_priv *cs4341 = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs4341_reg_defaults); i++)
+ regmap_write(cs4341->regmap, cs4341_reg_defaults[i].reg,
+ cs4341_reg_defaults[i].def);
+
+ return devm_snd_soc_register_component(dev, &soc_component_cs4341,
+ &cs4341_dai, 1);
+}
+
+#if IS_ENABLED(CONFIG_I2C)
+static int cs4341_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct cs4341_priv *cs4341;
+
+ cs4341 = devm_kzalloc(&i2c->dev, sizeof(*cs4341), GFP_KERNEL);
+ if (!cs4341)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, cs4341);
+
+ cs4341->regcfg.reg_bits = 8;
+ cs4341->regcfg.val_bits = 8;
+ cs4341->regcfg.max_register = CS4341_REG_VOLB;
+ cs4341->regcfg.cache_type = REGCACHE_FLAT;
+ cs4341->regcfg.reg_defaults = cs4341_reg_defaults;
+ cs4341->regcfg.num_reg_defaults = ARRAY_SIZE(cs4341_reg_defaults);
+ cs4341->regmap = devm_regmap_init_i2c(i2c, &cs4341->regcfg);
+ if (IS_ERR(cs4341->regmap))
+ return PTR_ERR(cs4341->regmap);
+
+ return cs4341_probe(&i2c->dev);
+}
+
+static const struct i2c_device_id cs4341_i2c_id[] = {
+ { "cs4341", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cs4341_i2c_id);
+
+static struct i2c_driver cs4341_i2c_driver = {
+ .driver = {
+ .name = "cs4341-i2c",
+ .of_match_table = of_match_ptr(cs4341_dt_ids),
+ },
+ .probe = cs4341_i2c_probe,
+ .id_table = cs4341_i2c_id,
+};
+#endif
+
+#if defined(CONFIG_SPI_MASTER)
+static bool cs4341_reg_readable(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static int cs4341_spi_probe(struct spi_device *spi)
+{
+ struct cs4341_priv *cs4341;
+ int ret;
+
+ cs4341 = devm_kzalloc(&spi->dev, sizeof(*cs4341), GFP_KERNEL);
+ if (!cs4341)
+ return -ENOMEM;
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = 6000000;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, cs4341);
+
+ cs4341->regcfg.reg_bits = 16;
+ cs4341->regcfg.val_bits = 8;
+ cs4341->regcfg.write_flag_mask = 0x20;
+ cs4341->regcfg.max_register = CS4341_REG_VOLB;
+ cs4341->regcfg.cache_type = REGCACHE_FLAT;
+ cs4341->regcfg.readable_reg = cs4341_reg_readable;
+ cs4341->regcfg.reg_defaults = cs4341_reg_defaults;
+ cs4341->regcfg.num_reg_defaults = ARRAY_SIZE(cs4341_reg_defaults);
+ cs4341->regmap = devm_regmap_init_spi(spi, &cs4341->regcfg);
+ if (IS_ERR(cs4341->regmap))
+ return PTR_ERR(cs4341->regmap);
+
+ return cs4341_probe(&spi->dev);
+}
+
+static struct spi_driver cs4341_spi_driver = {
+ .driver = {
+ .name = "cs4341-spi",
+ .of_match_table = of_match_ptr(cs4341_dt_ids),
+ },
+ .probe = cs4341_spi_probe,
+};
+#endif
+
+static int __init cs4341_init(void)
+{
+ int ret = 0;
+
+#if IS_ENABLED(CONFIG_I2C)
+ ret = i2c_add_driver(&cs4341_i2c_driver);
+ if (ret)
+ return ret;
+#endif
+#if defined(CONFIG_SPI_MASTER)
+ ret = spi_register_driver(&cs4341_spi_driver);
+#endif
+
+ return ret;
+}
+module_init(cs4341_init);
+
+static void __exit cs4341_exit(void)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ i2c_del_driver(&cs4341_i2c_driver);
+#endif
+#if defined(CONFIG_SPI_MASTER)
+ spi_unregister_driver(&cs4341_spi_driver);
+#endif
+}
+module_exit(cs4341_exit);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Cirrus Logic CS4341 ALSA SoC Codec Driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 45e50fe3bf25..b16832a6a9af 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -500,72 +500,72 @@ SND_SOC_DAPM_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 1,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 2,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 3,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 4,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 5,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 6,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 7,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 1,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 2,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 3,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 4,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 5,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 6,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 7,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 1,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX3", NULL, 2,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX4", NULL, 3,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX5", NULL, 4,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX6", NULL, 5,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 1,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX3", NULL, 2,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX4", NULL, 3,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX5", NULL, 4,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX6", NULL, 5,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 1,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 1,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 2c7d5088e6f2..e0964b20a389 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -117,7 +117,7 @@ static void da7219_aad_hptest_work(struct work_struct *work)
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
- u16 tonegen_freq_hptest;
+ __le16 tonegen_freq_hptest;
u8 pll_srm_sts, pll_ctrl, gain_ramp_ctrl, accdet_cfg8;
int report = 0, ret = 0;
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index e46e9f4bc994..121a8190f93e 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -423,7 +423,7 @@ static int da7219_tonegen_freq_get(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mixer_ctrl =
(struct soc_mixer_control *) kcontrol->private_value;
unsigned int reg = mixer_ctrl->reg;
- u16 val;
+ __le16 val;
int ret;
mutex_lock(&da7219->ctrl_lock);
@@ -450,7 +450,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mixer_ctrl =
(struct soc_mixer_control *) kcontrol->private_value;
unsigned int reg = mixer_ctrl->reg;
- u16 val;
+ __le16 val;
int ret;
/*
@@ -838,7 +838,7 @@ static int da7219_dai_event(struct snd_soc_dapm_widget *w,
++i;
msleep(50);
}
- } while ((i < DA7219_SRM_CHECK_RETRIES) & (!srm_lock));
+ } while ((i < DA7219_SRM_CHECK_RETRIES) && (!srm_lock));
if (!srm_lock)
dev_warn(component->dev, "SRM failed to lock\n");
@@ -1376,11 +1376,7 @@ static int da7219_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
return -EINVAL;
}
- /* By default 64 BCLKs per WCLK is supported */
- dai_clk_mode |= DA7219_DAI_BCLKS_PER_WCLK_64;
-
snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE,
- DA7219_DAI_BCLKS_PER_WCLK_MASK |
DA7219_DAI_CLK_POL_MASK | DA7219_DAI_WCLK_POL_MASK,
dai_clk_mode);
snd_soc_component_update_bits(component, DA7219_DAI_CTRL, DA7219_DAI_FORMAT_MASK,
@@ -1395,69 +1391,83 @@ static int da7219_set_dai_tdm_slot(struct snd_soc_dai *dai,
{
struct snd_soc_component *component = dai->component;
struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
- u8 dai_bclks_per_wclk;
+ unsigned int ch_mask;
+ u8 dai_bclks_per_wclk, slot_offset;
u16 offset;
+ __le16 dai_offset;
u32 frame_size;
- /* No channels enabled so disable TDM, revert to 64-bit frames */
+ /* No channels enabled so disable TDM */
if (!tx_mask) {
snd_soc_component_update_bits(component, DA7219_DAI_TDM_CTRL,
DA7219_DAI_TDM_CH_EN_MASK |
DA7219_DAI_TDM_MODE_EN_MASK, 0);
- snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE,
- DA7219_DAI_BCLKS_PER_WCLK_MASK,
- DA7219_DAI_BCLKS_PER_WCLK_64);
+ da7219->tdm_en = false;
return 0;
}
/* Check we have valid slots */
- if (fls(tx_mask) > DA7219_DAI_TDM_MAX_SLOTS) {
- dev_err(component->dev, "Invalid number of slots, max = %d\n",
+ slot_offset = ffs(tx_mask) - 1;
+ ch_mask = (tx_mask >> slot_offset);
+ if (fls(ch_mask) > DA7219_DAI_TDM_MAX_SLOTS) {
+ dev_err(component->dev,
+ "Invalid number of slots, max = %d\n",
DA7219_DAI_TDM_MAX_SLOTS);
return -EINVAL;
}
- /* Check we have a valid offset given */
- if (rx_mask > DA7219_DAI_OFFSET_MAX) {
- dev_err(component->dev, "Invalid slot offset, max = %d\n",
- DA7219_DAI_OFFSET_MAX);
+ /*
+ * Ensure we have a valid offset into the frame, based on slot width
+ * and slot offset of first slot we're interested in.
+ */
+ offset = slot_offset * slot_width;
+ if (offset > DA7219_DAI_OFFSET_MAX) {
+ dev_err(component->dev, "Invalid frame offset %d\n", offset);
return -EINVAL;
}
- /* Calculate & validate frame size based on slot info provided. */
- frame_size = slots * slot_width;
- switch (frame_size) {
- case 32:
- dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_32;
- break;
- case 64:
- dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_64;
- break;
- case 128:
- dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_128;
- break;
- case 256:
- dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_256;
- break;
- default:
- dev_err(component->dev, "Invalid frame size %d\n", frame_size);
- return -EINVAL;
- }
+ /*
+ * If we're master, calculate & validate frame size based on slot info
+ * provided as we have a limited set of rates available.
+ */
+ if (da7219->master) {
+ frame_size = slots * slot_width;
+ switch (frame_size) {
+ case 32:
+ dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_32;
+ break;
+ case 64:
+ dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_64;
+ break;
+ case 128:
+ dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_128;
+ break;
+ case 256:
+ dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_256;
+ break;
+ default:
+ dev_err(component->dev, "Invalid frame size %d\n",
+ frame_size);
+ return -EINVAL;
+ }
- snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE,
- DA7219_DAI_BCLKS_PER_WCLK_MASK,
- dai_bclks_per_wclk);
+ snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE,
+ DA7219_DAI_BCLKS_PER_WCLK_MASK,
+ dai_bclks_per_wclk);
+ }
- offset = cpu_to_le16(rx_mask);
+ dai_offset = cpu_to_le16(offset);
regmap_bulk_write(da7219->regmap, DA7219_DAI_OFFSET_LOWER,
- &offset, sizeof(offset));
+ &dai_offset, sizeof(dai_offset));
snd_soc_component_update_bits(component, DA7219_DAI_TDM_CTRL,
DA7219_DAI_TDM_CH_EN_MASK |
DA7219_DAI_TDM_MODE_EN_MASK,
- (tx_mask << DA7219_DAI_TDM_CH_EN_SHIFT) |
+ (ch_mask << DA7219_DAI_TDM_CH_EN_SHIFT) |
DA7219_DAI_TDM_MODE_EN_MASK);
+ da7219->tdm_en = true;
+
return 0;
}
@@ -1466,10 +1476,13 @@ static int da7219_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
- u8 dai_ctrl = 0, fs;
+ struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
+ u8 dai_ctrl = 0, dai_bclks_per_wclk = 0, fs;
unsigned int channels;
+ int word_len = params_width(params);
+ int frame_size;
- switch (params_width(params)) {
+ switch (word_len) {
case 16:
dai_ctrl |= DA7219_DAI_WORD_LENGTH_S16_LE;
break;
@@ -1533,6 +1546,23 @@ static int da7219_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ /*
+ * If we're master, then we have a limited set of BCLK rates we
+ * support. For slave mode this isn't the case and the codec can detect
+ * the BCLK rate automatically.
+ */
+ if (da7219->master && !da7219->tdm_en) {
+ frame_size = word_len * 2;
+ if (frame_size <= 32)
+ dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_32;
+ else
+ dai_bclks_per_wclk = DA7219_DAI_BCLKS_PER_WCLK_64;
+
+ snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE,
+ DA7219_DAI_BCLKS_PER_WCLK_MASK,
+ dai_bclks_per_wclk);
+ }
+
snd_soc_component_update_bits(component, DA7219_DAI_CTRL,
DA7219_DAI_WORD_LENGTH_MASK |
DA7219_DAI_CH_NUM_MASK,
@@ -1767,7 +1797,7 @@ static int da7219_dai_clks_prepare(struct clk_hw *hw)
{
struct da7219_priv *da7219 =
container_of(hw, struct da7219_priv, dai_clks_hw);
- struct snd_soc_component *component = da7219->aad->component;
+ struct snd_soc_component *component = da7219->component;
snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE,
DA7219_DAI_CLK_EN_MASK,
@@ -1780,7 +1810,7 @@ static void da7219_dai_clks_unprepare(struct clk_hw *hw)
{
struct da7219_priv *da7219 =
container_of(hw, struct da7219_priv, dai_clks_hw);
- struct snd_soc_component *component = da7219->aad->component;
+ struct snd_soc_component *component = da7219->component;
snd_soc_component_update_bits(component, DA7219_DAI_CLK_MODE,
DA7219_DAI_CLK_EN_MASK, 0);
@@ -1790,7 +1820,7 @@ static int da7219_dai_clks_is_prepared(struct clk_hw *hw)
{
struct da7219_priv *da7219 =
container_of(hw, struct da7219_priv, dai_clks_hw);
- struct snd_soc_component *component = da7219->aad->component;
+ struct snd_soc_component *component = da7219->component;
u8 clk_reg;
clk_reg = snd_soc_component_read32(component, DA7219_DAI_CLK_MODE);
@@ -1798,13 +1828,50 @@ static int da7219_dai_clks_is_prepared(struct clk_hw *hw)
return !!(clk_reg & DA7219_DAI_CLK_EN_MASK);
}
+static unsigned long da7219_dai_clks_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct da7219_priv *da7219 =
+ container_of(hw, struct da7219_priv, dai_clks_hw);
+ struct snd_soc_component *component = da7219->component;
+ u8 fs = snd_soc_component_read32(component, DA7219_SR);
+
+ switch (fs & DA7219_SR_MASK) {
+ case DA7219_SR_8000:
+ return 8000;
+ case DA7219_SR_11025:
+ return 11025;
+ case DA7219_SR_12000:
+ return 12000;
+ case DA7219_SR_16000:
+ return 16000;
+ case DA7219_SR_22050:
+ return 22050;
+ case DA7219_SR_24000:
+ return 24000;
+ case DA7219_SR_32000:
+ return 32000;
+ case DA7219_SR_44100:
+ return 44100;
+ case DA7219_SR_48000:
+ return 48000;
+ case DA7219_SR_88200:
+ return 88200;
+ case DA7219_SR_96000:
+ return 96000;
+ default:
+ return 0;
+ }
+}
+
static const struct clk_ops da7219_dai_clks_ops = {
.prepare = da7219_dai_clks_prepare,
.unprepare = da7219_dai_clks_unprepare,
.is_prepared = da7219_dai_clks_is_prepared,
+ .recalc_rate = da7219_dai_clks_recalc_rate,
};
-static void da7219_register_dai_clks(struct snd_soc_component *component)
+static int da7219_register_dai_clks(struct snd_soc_component *component)
{
struct device *dev = component->dev;
struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
@@ -1812,18 +1879,27 @@ static void da7219_register_dai_clks(struct snd_soc_component *component)
struct clk_init_data init = {};
struct clk *dai_clks;
struct clk_lookup *dai_clks_lookup;
+ const char *parent_name;
+
+ if (da7219->mclk) {
+ parent_name = __clk_get_name(da7219->mclk);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ } else {
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ }
- init.parent_names = NULL;
- init.num_parents = 0;
init.name = pdata->dai_clks_name;
init.ops = &da7219_dai_clks_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
da7219->dai_clks_hw.init = &init;
dai_clks = devm_clk_register(dev, &da7219->dai_clks_hw);
if (IS_ERR(dai_clks)) {
dev_warn(dev, "Failed to register DAI clocks: %ld\n",
PTR_ERR(dai_clks));
- return;
+ return PTR_ERR(dai_clks);
}
da7219->dai_clks = dai_clks;
@@ -1835,13 +1911,18 @@ static void da7219_register_dai_clks(struct snd_soc_component *component)
dai_clks_lookup = clkdev_create(dai_clks, pdata->dai_clks_name,
"%s", dev_name(dev));
if (!dai_clks_lookup)
- dev_warn(dev, "Failed to create DAI clkdev");
+ return -ENOMEM;
else
da7219->dai_clks_lookup = dai_clks_lookup;
}
+
+ return 0;
}
#else
-static inline void da7219_register_dai_clks(struct snd_soc_component *component) {}
+static inline int da7219_register_dai_clks(struct snd_soc_component *component)
+{
+ return 0;
+}
#endif /* CONFIG_COMMON_CLK */
static void da7219_handle_pdata(struct snd_soc_component *component)
@@ -1854,8 +1935,6 @@ static void da7219_handle_pdata(struct snd_soc_component *component)
da7219->wakeup_source = pdata->wakeup_source;
- da7219_register_dai_clks(component);
-
/* Mic Bias voltages */
switch (pdata->micbias_lvl) {
case DA7219_MICBIAS_1_6V:
@@ -1901,6 +1980,7 @@ static int da7219_probe(struct snd_soc_component *component)
unsigned int rev;
int ret;
+ da7219->component = component;
mutex_init(&da7219->ctrl_lock);
mutex_init(&da7219->pll_lock);
@@ -1947,6 +2027,11 @@ static int da7219_probe(struct snd_soc_component *component)
}
}
+ /* Register CCF DAI clock control */
+ ret = da7219_register_dai_clks(component);
+ if (ret)
+ return ret;
+
/* Default PC counter to free-running */
snd_soc_component_update_bits(component, DA7219_PC_COUNT, DA7219_PC_FREERUN_MASK,
DA7219_PC_FREERUN_MASK);
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index 3a006862f0e7..018819c631fb 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -809,6 +809,7 @@ struct da7219_aad_priv;
/* Private data */
struct da7219_priv {
+ struct snd_soc_component *component;
struct da7219_aad_priv *aad;
struct da7219_pdata *pdata;
@@ -829,6 +830,7 @@ struct da7219_priv {
int clk_src;
bool master;
+ bool tdm_en;
bool alc_en;
bool micbias_on_event;
unsigned int mic_pga_delay;
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index da921da50ef0..de041369e5a7 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -44,8 +44,8 @@ struct dmic {
int modeswitch_delay;
};
-int dmic_daiops_trigger(struct snd_pcm_substream *substream,
- int cmd, struct snd_soc_dai *dai)
+static int dmic_daiops_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
struct dmic *dmic = snd_soc_component_get_drvdata(component);
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index e97d12d578b0..6d4a323f786b 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -15,12 +15,14 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
+#include <sound/jack.h>
#include "es8316.h"
/* In slave mode at single speed, the codec is documented as accepting 5
@@ -33,6 +35,11 @@ static const unsigned int supported_mclk_lrck_ratios[] = {
};
struct es8316_priv {
+ struct mutex lock;
+ struct regmap *regmap;
+ struct snd_soc_component *component;
+ struct snd_soc_jack *jack;
+ int irq;
unsigned int sysclk;
unsigned int allowed_rates[NR_SUPPORTED_MCLK_LRCK_RATIOS];
struct snd_pcm_hw_constraint_list sysclk_constraints;
@@ -94,6 +101,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
SOC_SINGLE("DAC Notch Filter Switch", ES8316_DAC_SET2, 6, 1, 0),
SOC_SINGLE("DAC Double Fs Switch", ES8316_DAC_SET2, 7, 1, 0),
SOC_SINGLE("DAC Stereo Enhancement", ES8316_DAC_SET3, 0, 7, 0),
+ SOC_SINGLE("DAC Mono Mix Switch", ES8316_DAC_SET3, 3, 1, 0),
SOC_ENUM("Capture Polarity", adcpol),
SOC_SINGLE("Mic Boost Switch", ES8316_ADC_D2SEPGA, 0, 1, 0),
@@ -159,8 +167,6 @@ static const char * const es8316_hpmux_texts[] = {
"lin-rin with Boost and PGA"
};
-static const unsigned int es8316_hpmux_values[] = { 0, 1, 2, 3 };
-
static SOC_ENUM_SINGLE_DECL(es8316_left_hpmux_enum, ES8316_HPMIX_SEL,
4, es8316_hpmux_texts);
@@ -191,8 +197,6 @@ static const char * const es8316_dacsrc_texts[] = {
"RDATA TO LDAC, LDATA TO RDAC",
};
-static const unsigned int es8316_dacsrc_values[] = { 0, 1, 2, 3 };
-
static SOC_ENUM_SINGLE_DECL(es8316_dacsrc_mux_enum, ES8316_DAC_SET1,
6, es8316_dacsrc_texts);
@@ -529,8 +533,162 @@ static struct snd_soc_dai_driver es8316_dai = {
.symmetric_rates = 1,
};
+static void es8316_enable_micbias_for_mic_gnd_short_detect(
+ struct snd_soc_component *component)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+
+ snd_soc_dapm_mutex_lock(dapm);
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "Bias");
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "Analog power");
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "Mic Bias");
+ snd_soc_dapm_sync_unlocked(dapm);
+ snd_soc_dapm_mutex_unlock(dapm);
+
+ msleep(20);
+}
+
+static void es8316_disable_micbias_for_mic_gnd_short_detect(
+ struct snd_soc_component *component)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+
+ snd_soc_dapm_mutex_lock(dapm);
+ snd_soc_dapm_disable_pin_unlocked(dapm, "Mic Bias");
+ snd_soc_dapm_disable_pin_unlocked(dapm, "Analog power");
+ snd_soc_dapm_disable_pin_unlocked(dapm, "Bias");
+ snd_soc_dapm_sync_unlocked(dapm);
+ snd_soc_dapm_mutex_unlock(dapm);
+}
+
+static irqreturn_t es8316_irq(int irq, void *data)
+{
+ struct es8316_priv *es8316 = data;
+ struct snd_soc_component *comp = es8316->component;
+ unsigned int flags;
+
+ mutex_lock(&es8316->lock);
+
+ regmap_read(es8316->regmap, ES8316_GPIO_FLAG, &flags);
+ if (flags == 0x00)
+ goto out; /* Powered-down / reset */
+
+ /* Catch spurious IRQ before set_jack is called */
+ if (!es8316->jack)
+ goto out;
+
+ dev_dbg(comp->dev, "gpio flags %#04x\n", flags);
+ if (flags & ES8316_GPIO_FLAG_HP_NOT_INSERTED) {
+ /* Jack removed, or spurious IRQ? */
+ if (es8316->jack->status & SND_JACK_MICROPHONE)
+ es8316_disable_micbias_for_mic_gnd_short_detect(comp);
+
+ if (es8316->jack->status & SND_JACK_HEADPHONE) {
+ snd_soc_jack_report(es8316->jack, 0,
+ SND_JACK_HEADSET | SND_JACK_BTN_0);
+ dev_dbg(comp->dev, "jack unplugged\n");
+ }
+ } else if (!(es8316->jack->status & SND_JACK_HEADPHONE)) {
+ /* Jack inserted, determine type */
+ es8316_enable_micbias_for_mic_gnd_short_detect(comp);
+ regmap_read(es8316->regmap, ES8316_GPIO_FLAG, &flags);
+ dev_dbg(comp->dev, "gpio flags %#04x\n", flags);
+ if (flags & ES8316_GPIO_FLAG_HP_NOT_INSERTED) {
+ /* Jack unplugged underneath us */
+ es8316_disable_micbias_for_mic_gnd_short_detect(comp);
+ } else if (flags & ES8316_GPIO_FLAG_GM_NOT_SHORTED) {
+ /* Open, headset */
+ snd_soc_jack_report(es8316->jack,
+ SND_JACK_HEADSET,
+ SND_JACK_HEADSET);
+ /* Keep mic-gnd-short detection on for button press */
+ } else {
+ /* Shorted, headphones */
+ snd_soc_jack_report(es8316->jack,
+ SND_JACK_HEADPHONE,
+ SND_JACK_HEADSET);
+ /* No longer need mic-gnd-short detection */
+ es8316_disable_micbias_for_mic_gnd_short_detect(comp);
+ }
+ } else if (es8316->jack->status & SND_JACK_MICROPHONE) {
+ /* Interrupt while jack inserted, report button state */
+ if (flags & ES8316_GPIO_FLAG_GM_NOT_SHORTED) {
+ /* Open, button release */
+ snd_soc_jack_report(es8316->jack, 0, SND_JACK_BTN_0);
+ } else {
+ /* Short, button press */
+ snd_soc_jack_report(es8316->jack,
+ SND_JACK_BTN_0,
+ SND_JACK_BTN_0);
+ }
+ }
+
+out:
+ mutex_unlock(&es8316->lock);
+ return IRQ_HANDLED;
+}
+
+static void es8316_enable_jack_detect(struct snd_soc_component *component,
+ struct snd_soc_jack *jack)
+{
+ struct es8316_priv *es8316 = snd_soc_component_get_drvdata(component);
+
+ mutex_lock(&es8316->lock);
+
+ es8316->jack = jack;
+
+ if (es8316->jack->status & SND_JACK_MICROPHONE)
+ es8316_enable_micbias_for_mic_gnd_short_detect(component);
+
+ snd_soc_component_update_bits(component, ES8316_GPIO_DEBOUNCE,
+ ES8316_GPIO_ENABLE_INTERRUPT,
+ ES8316_GPIO_ENABLE_INTERRUPT);
+
+ mutex_unlock(&es8316->lock);
+
+ /* Enable irq and sync initial jack state */
+ enable_irq(es8316->irq);
+ es8316_irq(es8316->irq, es8316);
+}
+
+static void es8316_disable_jack_detect(struct snd_soc_component *component)
+{
+ struct es8316_priv *es8316 = snd_soc_component_get_drvdata(component);
+
+ disable_irq(es8316->irq);
+
+ mutex_lock(&es8316->lock);
+
+ snd_soc_component_update_bits(component, ES8316_GPIO_DEBOUNCE,
+ ES8316_GPIO_ENABLE_INTERRUPT, 0);
+
+ if (es8316->jack->status & SND_JACK_MICROPHONE) {
+ es8316_disable_micbias_for_mic_gnd_short_detect(component);
+ snd_soc_jack_report(es8316->jack, 0, SND_JACK_BTN_0);
+ }
+
+ es8316->jack = NULL;
+
+ mutex_unlock(&es8316->lock);
+}
+
+static int es8316_set_jack(struct snd_soc_component *component,
+ struct snd_soc_jack *jack, void *data)
+{
+ if (jack)
+ es8316_enable_jack_detect(component, jack);
+ else
+ es8316_disable_jack_detect(component);
+
+ return 0;
+}
+
static int es8316_probe(struct snd_soc_component *component)
{
+ struct es8316_priv *es8316 = snd_soc_component_get_drvdata(component);
+
+ es8316->component = component;
+
/* Reset codec and enable current state machine */
snd_soc_component_write(component, ES8316_RESET, 0x3f);
usleep_range(5000, 5500);
@@ -555,6 +713,7 @@ static int es8316_probe(struct snd_soc_component *component)
static const struct snd_soc_component_driver soc_component_dev_es8316 = {
.probe = es8316_probe,
+ .set_jack = es8316_set_jack,
.controls = es8316_snd_controls,
.num_controls = ARRAY_SIZE(es8316_snd_controls),
.dapm_widgets = es8316_dapm_widgets,
@@ -566,18 +725,29 @@ static const struct snd_soc_component_driver soc_component_dev_es8316 = {
.non_legacy_dai_naming = 1,
};
+static const struct regmap_range es8316_volatile_ranges[] = {
+ regmap_reg_range(ES8316_GPIO_FLAG, ES8316_GPIO_FLAG),
+};
+
+static const struct regmap_access_table es8316_volatile_table = {
+ .yes_ranges = es8316_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(es8316_volatile_ranges),
+};
+
static const struct regmap_config es8316_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x53,
+ .volatile_table = &es8316_volatile_table,
.cache_type = REGCACHE_RBTREE,
};
static int es8316_i2c_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
{
+ struct device *dev = &i2c_client->dev;
struct es8316_priv *es8316;
- struct regmap *regmap;
+ int ret;
es8316 = devm_kzalloc(&i2c_client->dev, sizeof(struct es8316_priv),
GFP_KERNEL);
@@ -586,9 +756,23 @@ static int es8316_i2c_probe(struct i2c_client *i2c_client,
i2c_set_clientdata(i2c_client, es8316);
- regmap = devm_regmap_init_i2c(i2c_client, &es8316_regmap);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ es8316->regmap = devm_regmap_init_i2c(i2c_client, &es8316_regmap);
+ if (IS_ERR(es8316->regmap))
+ return PTR_ERR(es8316->regmap);
+
+ es8316->irq = i2c_client->irq;
+ mutex_init(&es8316->lock);
+
+ ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "es8316", es8316);
+ if (ret == 0) {
+ /* Gets re-enabled by es8316_set_jack() */
+ disable_irq(es8316->irq);
+ } else {
+ dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
+ es8316->irq = -ENXIO;
+ }
return devm_snd_soc_register_component(&i2c_client->dev,
&soc_component_dev_es8316,
diff --git a/sound/soc/codecs/es8316.h b/sound/soc/codecs/es8316.h
index 6bcdd63ea459..439a0130cbb7 100644
--- a/sound/soc/codecs/es8316.h
+++ b/sound/soc/codecs/es8316.h
@@ -126,4 +126,11 @@
#define ES8316_SERDATA2_LEN_16 0x0c
#define ES8316_SERDATA2_LEN_32 0x10
+/* ES8316_GPIO_DEBOUNCE */
+#define ES8316_GPIO_ENABLE_INTERRUPT 0x02
+
+/* ES8316_GPIO_FLAG */
+#define ES8316_GPIO_FLAG_GM_NOT_SHORTED 0x02
+#define ES8316_GPIO_FLAG_HP_NOT_INSERTED 0x04
+
#endif
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 3ab2949c1dfa..5eeb0fe836a9 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1176,13 +1176,15 @@ static int hdac_hdmi_add_cvt(struct hdac_device *hdev, hda_nid_t nid)
struct hdac_hdmi_cvt *cvt;
char name[NAME_SIZE];
- cvt = kzalloc(sizeof(*cvt), GFP_KERNEL);
+ cvt = devm_kzalloc(&hdev->dev, sizeof(*cvt), GFP_KERNEL);
if (!cvt)
return -ENOMEM;
cvt->nid = nid;
sprintf(name, "cvt %d", cvt->nid);
- cvt->name = kstrdup(name, GFP_KERNEL);
+ cvt->name = devm_kstrdup(&hdev->dev, name, GFP_KERNEL);
+ if (!cvt->name)
+ return -ENOMEM;
list_add_tail(&cvt->head, &hdmi->cvt_list);
hdmi->num_cvt++;
@@ -1287,8 +1289,8 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
mutex_unlock(&hdmi->pin_mutex);
}
-static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi,
- struct hdac_hdmi_pin *pin)
+static int hdac_hdmi_add_ports(struct hdac_device *hdev,
+ struct hdac_hdmi_pin *pin)
{
struct hdac_hdmi_port *ports;
int max_ports = HDA_MAX_PORTS;
@@ -1300,7 +1302,7 @@ static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi,
* implemented.
*/
- ports = kcalloc(max_ports, sizeof(*ports), GFP_KERNEL);
+ ports = devm_kcalloc(&hdev->dev, max_ports, sizeof(*ports), GFP_KERNEL);
if (!ports)
return -ENOMEM;
@@ -1319,14 +1321,14 @@ static int hdac_hdmi_add_pin(struct hdac_device *hdev, hda_nid_t nid)
struct hdac_hdmi_pin *pin;
int ret;
- pin = kzalloc(sizeof(*pin), GFP_KERNEL);
+ pin = devm_kzalloc(&hdev->dev, sizeof(*pin), GFP_KERNEL);
if (!pin)
return -ENOMEM;
pin->nid = nid;
pin->mst_capable = false;
pin->hdev = hdev;
- ret = hdac_hdmi_add_ports(hdmi, pin);
+ ret = hdac_hdmi_add_ports(hdev, pin);
if (ret < 0)
return ret;
@@ -1468,8 +1470,6 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_device *hdev,
{
hda_nid_t nid;
int i, num_nodes;
- struct hdac_hdmi_cvt *temp_cvt, *cvt_next;
- struct hdac_hdmi_pin *temp_pin, *pin_next;
struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
int ret;
@@ -1497,51 +1497,35 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_device *hdev,
case AC_WID_AUD_OUT:
ret = hdac_hdmi_add_cvt(hdev, nid);
if (ret < 0)
- goto free_widgets;
+ return ret;
break;
case AC_WID_PIN:
ret = hdac_hdmi_add_pin(hdev, nid);
if (ret < 0)
- goto free_widgets;
+ return ret;
break;
}
}
if (!hdmi->num_pin || !hdmi->num_cvt) {
ret = -EIO;
- goto free_widgets;
+ dev_err(&hdev->dev, "Bad pin/cvt setup in %s\n", __func__);
+ return ret;
}
ret = hdac_hdmi_create_dais(hdev, dais, hdmi, hdmi->num_cvt);
if (ret) {
dev_err(&hdev->dev, "Failed to create dais with err: %d\n",
- ret);
- goto free_widgets;
+ ret);
+ return ret;
}
*num_dais = hdmi->num_cvt;
ret = hdac_hdmi_init_dai_map(hdev);
if (ret < 0)
- goto free_widgets;
-
- return ret;
-
-free_widgets:
- list_for_each_entry_safe(temp_cvt, cvt_next, &hdmi->cvt_list, head) {
- list_del(&temp_cvt->head);
- kfree(temp_cvt->name);
- kfree(temp_cvt);
- }
-
- list_for_each_entry_safe(temp_pin, pin_next, &hdmi->pin_list, head) {
- for (i = 0; i < temp_pin->num_ports; i++)
- temp_pin->ports[i].pin = NULL;
- kfree(temp_pin->ports);
- list_del(&temp_pin->head);
- kfree(temp_pin);
- }
-
+ dev_err(&hdev->dev, "Failed to init DAI map with err: %d\n",
+ ret);
return ret;
}
@@ -1782,7 +1766,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
* this is a new PCM device, create new pcm and
* add to the pcm list
*/
- pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
+ pcm = devm_kzalloc(&hdev->dev, sizeof(*pcm), GFP_KERNEL);
if (!pcm)
return -ENOMEM;
pcm->pcm_id = device;
@@ -1798,7 +1782,6 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
dev_err(&hdev->dev,
"chmap control add failed with err: %d for pcm: %d\n",
err, device);
- kfree(pcm);
return err;
}
}
@@ -1890,51 +1873,31 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
pm_runtime_disable(&hdev->dev);
}
-#ifdef CONFIG_PM
-static int hdmi_codec_prepare(struct device *dev)
-{
- struct hdac_device *hdev = dev_to_hdac_dev(dev);
-
- pm_runtime_get_sync(&hdev->dev);
-
- /*
- * Power down afg.
- * codec_read is preferred over codec_write to set the power state.
- * This way verb is send to set the power state and response
- * is received. So setting power state is ensured without using loop
- * to read the state.
- */
- snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
- AC_PWRST_D3);
-
- return 0;
-}
-
-static void hdmi_codec_complete(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int hdmi_codec_resume(struct device *dev)
{
struct hdac_device *hdev = dev_to_hdac_dev(dev);
struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
+ int ret;
- /* Power up afg */
- snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
- AC_PWRST_D0);
-
- hdac_hdmi_skl_enable_all_pins(hdev);
- hdac_hdmi_skl_enable_dp12(hdev);
-
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
/*
* As the ELD notify callback request is not entertained while the
* device is in suspend state. Need to manually check detection of
* all pins here. pin capablity change is not support, so use the
* already set pin caps.
+ *
+ * NOTE: this is safe to call even if the codec doesn't actually resume.
+ * The pin check involves only with DRM audio component hooks, so it
+ * works even if the HD-audio side is still dreaming peacefully.
*/
hdac_hdmi_present_sense_all_pins(hdev, hdmi, false);
-
- pm_runtime_put_sync(&hdev->dev);
+ return 0;
}
#else
-#define hdmi_codec_prepare NULL
-#define hdmi_codec_complete NULL
+#define hdmi_codec_resume NULL
#endif
static const struct snd_soc_component_driver hdmi_hda_codec = {
@@ -2095,115 +2058,12 @@ static int hdac_hdmi_dev_probe(struct hdac_device *hdev)
static int hdac_hdmi_dev_remove(struct hdac_device *hdev)
{
- struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
- struct hdac_hdmi_pin *pin, *pin_next;
- struct hdac_hdmi_cvt *cvt, *cvt_next;
- struct hdac_hdmi_pcm *pcm, *pcm_next;
- struct hdac_hdmi_port *port, *port_next;
- int i;
-
snd_hdac_display_power(hdev->bus, hdev->addr, false);
- list_for_each_entry_safe(pcm, pcm_next, &hdmi->pcm_list, head) {
- pcm->cvt = NULL;
- if (list_empty(&pcm->port_list))
- continue;
-
- list_for_each_entry_safe(port, port_next,
- &pcm->port_list, head)
- list_del(&port->head);
-
- list_del(&pcm->head);
- kfree(pcm);
- }
-
- list_for_each_entry_safe(cvt, cvt_next, &hdmi->cvt_list, head) {
- list_del(&cvt->head);
- kfree(cvt->name);
- kfree(cvt);
- }
-
- list_for_each_entry_safe(pin, pin_next, &hdmi->pin_list, head) {
- for (i = 0; i < pin->num_ports; i++)
- pin->ports[i].pin = NULL;
- kfree(pin->ports);
- list_del(&pin->head);
- kfree(pin);
- }
-
return 0;
}
#ifdef CONFIG_PM
-/*
- * Power management sequences
- * ==========================
- *
- * The following explains the PM handling of HDAC HDMI with its parent
- * device SKL and display power usage
- *
- * Probe
- * -----
- * In SKL probe,
- * 1. skl_probe_work() powers up the display (refcount++ -> 1)
- * 2. enumerates the codecs on the link
- * 3. powers down the display (refcount-- -> 0)
- *
- * In HDAC HDMI probe,
- * 1. hdac_hdmi_dev_probe() powers up the display (refcount++ -> 1)
- * 2. probe the codec
- * 3. put the HDAC HDMI device to runtime suspend
- * 4. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
- *
- * Once children are runtime suspended, SKL device also goes to runtime
- * suspend
- *
- * HDMI Playback
- * -------------
- * Open HDMI device,
- * 1. skl_runtime_resume() invoked
- * 2. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
- *
- * Close HDMI device,
- * 1. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
- * 2. skl_runtime_suspend() invoked
- *
- * S0/S3 Cycle with playback in progress
- * -------------------------------------
- * When the device is opened for playback, the device is runtime active
- * already and the display refcount is 1 as explained above.
- *
- * Entering to S3,
- * 1. hdmi_codec_prepare() invoke the runtime resume of codec which just
- * increments the PM runtime usage count of the codec since the device
- * is in use already
- * 2. skl_suspend() powers down the display (refcount-- -> 0)
- *
- * Wakeup from S3,
- * 1. skl_resume() powers up the display (refcount++ -> 1)
- * 2. hdmi_codec_complete() invokes the runtime suspend of codec which just
- * decrements the PM runtime usage count of the codec since the device
- * is in use already
- *
- * Once playback is stopped, the display refcount is set to 0 as explained
- * above in the HDMI playback sequence. The PM handlings are designed in
- * such way that to balance the refcount of display power when the codec
- * device put to S3 while playback is going on.
- *
- * S0/S3 Cycle without playback in progress
- * ----------------------------------------
- * Entering to S3,
- * 1. hdmi_codec_prepare() invoke the runtime resume of codec
- * 2. skl_runtime_resume() invoked
- * 3. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
- * 4. skl_suspend() powers down the display (refcount-- -> 0)
- *
- * Wakeup from S3,
- * 1. skl_resume() powers up the display (refcount++ -> 1)
- * 2. hdmi_codec_complete() invokes the runtime suspend of codec
- * 3. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
- * 4. skl_runtime_suspend() invoked
- */
static int hdac_hdmi_runtime_suspend(struct device *dev)
{
struct hdac_device *hdev = dev_to_hdac_dev(dev);
@@ -2277,8 +2137,7 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
static const struct dev_pm_ops hdac_hdmi_pm = {
SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
- .prepare = hdmi_codec_prepare,
- .complete = hdmi_codec_complete,
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, hdmi_codec_resume)
};
static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index d00734d31e04..e5b6769b9797 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
if (hcd->spdif)
hcp->daidrv[i] = hdmi_spdif_dai;
+ dev_set_drvdata(dev, hcp);
+
ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
dai_count);
if (ret) {
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
__func__, ret);
return ret;
}
-
- dev_set_drvdata(dev, hcp);
return 0;
}
diff --git a/sound/soc/codecs/jz4725b.c b/sound/soc/codecs/jz4725b.c
new file mode 100644
index 000000000000..766354c73076
--- /dev/null
+++ b/sound/soc/codecs/jz4725b.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// JZ4725B CODEC driver
+//
+// Copyright (C) 2019, Paul Cercueil <paul@crapouillou.net>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#include <linux/delay.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define ICDC_RGADW_OFFSET 0x00
+#define ICDC_RGDATA_OFFSET 0x04
+
+/* ICDC internal register access control register(RGADW) */
+#define ICDC_RGADW_RGWR BIT(16)
+
+#define ICDC_RGADW_RGADDR_OFFSET 8
+#define ICDC_RGADW_RGADDR_MASK GENMASK(14, ICDC_RGADW_RGADDR_OFFSET)
+
+#define ICDC_RGADW_RGDIN_OFFSET 0
+#define ICDC_RGADW_RGDIN_MASK GENMASK(7, ICDC_RGADW_RGDIN_OFFSET)
+
+/* ICDC internal register data output register (RGDATA)*/
+#define ICDC_RGDATA_IRQ BIT(8)
+
+#define ICDC_RGDATA_RGDOUT_OFFSET 0
+#define ICDC_RGDATA_RGDOUT_MASK GENMASK(7, ICDC_RGDATA_RGDOUT_OFFSET)
+
+/* JZ internal register space */
+enum {
+ JZ4725B_CODEC_REG_AICR,
+ JZ4725B_CODEC_REG_CR1,
+ JZ4725B_CODEC_REG_CR2,
+ JZ4725B_CODEC_REG_CCR1,
+ JZ4725B_CODEC_REG_CCR2,
+ JZ4725B_CODEC_REG_PMR1,
+ JZ4725B_CODEC_REG_PMR2,
+ JZ4725B_CODEC_REG_CRR,
+ JZ4725B_CODEC_REG_ICR,
+ JZ4725B_CODEC_REG_IFR,
+ JZ4725B_CODEC_REG_CGR1,
+ JZ4725B_CODEC_REG_CGR2,
+ JZ4725B_CODEC_REG_CGR3,
+ JZ4725B_CODEC_REG_CGR4,
+ JZ4725B_CODEC_REG_CGR5,
+ JZ4725B_CODEC_REG_CGR6,
+ JZ4725B_CODEC_REG_CGR7,
+ JZ4725B_CODEC_REG_CGR8,
+ JZ4725B_CODEC_REG_CGR9,
+ JZ4725B_CODEC_REG_CGR10,
+ JZ4725B_CODEC_REG_TR1,
+ JZ4725B_CODEC_REG_TR2,
+ JZ4725B_CODEC_REG_CR3,
+ JZ4725B_CODEC_REG_AGC1,
+ JZ4725B_CODEC_REG_AGC2,
+ JZ4725B_CODEC_REG_AGC3,
+ JZ4725B_CODEC_REG_AGC4,
+ JZ4725B_CODEC_REG_AGC5,
+};
+
+#define REG_AICR_CONFIG1_OFFSET 0
+#define REG_AICR_CONFIG1_MASK (0xf << REG_AICR_CONFIG1_OFFSET)
+
+#define REG_CR1_SB_MICBIAS_OFFSET 7
+#define REG_CR1_MONO_OFFSET 6
+#define REG_CR1_DAC_MUTE_OFFSET 5
+#define REG_CR1_HP_DIS_OFFSET 4
+#define REG_CR1_DACSEL_OFFSET 3
+#define REG_CR1_BYPASS_OFFSET 2
+
+#define REG_CR2_DAC_DEEMP_OFFSET 7
+#define REG_CR2_DAC_ADWL_OFFSET 5
+#define REG_CR2_DAC_ADWL_MASK (0x3 << REG_CR2_DAC_ADWL_OFFSET)
+#define REG_CR2_ADC_ADWL_OFFSET 3
+#define REG_CR2_ADC_ADWL_MASK (0x3 << REG_CR2_ADC_ADWL_OFFSET)
+#define REG_CR2_ADC_HPF_OFFSET 2
+
+#define REG_CR3_SB_MIC1_OFFSET 7
+#define REG_CR3_SB_MIC2_OFFSET 6
+#define REG_CR3_SIDETONE1_OFFSET 5
+#define REG_CR3_SIDETONE2_OFFSET 4
+#define REG_CR3_MICDIFF_OFFSET 3
+#define REG_CR3_MICSTEREO_OFFSET 2
+#define REG_CR3_INSEL_OFFSET 0
+#define REG_CR3_INSEL_MASK (0x3 << REG_CR3_INSEL_OFFSET)
+
+#define REG_CCR1_CONFIG4_OFFSET 0
+#define REG_CCR1_CONFIG4_MASK (0xf << REG_CCR1_CONFIG4_OFFSET)
+
+#define REG_CCR2_DFREQ_OFFSET 4
+#define REG_CCR2_DFREQ_MASK (0xf << REG_CCR2_DFREQ_OFFSET)
+#define REG_CCR2_AFREQ_OFFSET 0
+#define REG_CCR2_AFREQ_MASK (0xf << REG_CCR2_AFREQ_OFFSET)
+
+#define REG_PMR1_SB_DAC_OFFSET 7
+#define REG_PMR1_SB_OUT_OFFSET 6
+#define REG_PMR1_SB_MIX_OFFSET 5
+#define REG_PMR1_SB_ADC_OFFSET 4
+#define REG_PMR1_SB_LIN_OFFSET 3
+#define REG_PMR1_SB_IND_OFFSET 0
+
+#define REG_PMR2_LRGI_OFFSET 7
+#define REG_PMR2_RLGI_OFFSET 6
+#define REG_PMR2_LRGOD_OFFSET 5
+#define REG_PMR2_RLGOD_OFFSET 4
+#define REG_PMR2_GIM_OFFSET 3
+#define REG_PMR2_SB_MC_OFFSET 2
+#define REG_PMR2_SB_OFFSET 1
+#define REG_PMR2_SB_SLEEP_OFFSET 0
+
+#define REG_IFR_RAMP_UP_DONE_OFFSET 3
+#define REG_IFR_RAMP_DOWN_DONE_OFFSET 2
+
+#define REG_CGR1_GODL_OFFSET 4
+#define REG_CGR1_GODL_MASK (0xf << REG_CGR1_GODL_OFFSET)
+#define REG_CGR1_GODR_OFFSET 0
+#define REG_CGR1_GODR_MASK (0xf << REG_CGR1_GODR_OFFSET)
+
+#define REG_CGR2_GO1R_OFFSET 0
+#define REG_CGR2_GO1R_MASK (0x1f << REG_CGR2_GO1R_OFFSET)
+
+#define REG_CGR3_GO1L_OFFSET 0
+#define REG_CGR3_GO1L_MASK (0x1f << REG_CGR3_GO1L_OFFSET)
+
+struct jz_icdc {
+ struct regmap *regmap;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_dac_tlv, -2250, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_line_tlv, -1500, 600);
+
+static const struct snd_kcontrol_new jz4725b_codec_controls[] = {
+ SOC_DOUBLE_TLV("Master Playback Volume",
+ JZ4725B_CODEC_REG_CGR1,
+ REG_CGR1_GODL_OFFSET,
+ REG_CGR1_GODR_OFFSET,
+ 0xf, 1, jz4725b_dac_tlv),
+ SOC_DOUBLE_R_TLV("Master Capture Volume",
+ JZ4725B_CODEC_REG_CGR3,
+ JZ4725B_CODEC_REG_CGR2,
+ REG_CGR2_GO1R_OFFSET,
+ 0x1f, 1, jz4725b_line_tlv),
+
+ SOC_SINGLE("Master Playback Switch", JZ4725B_CODEC_REG_CR1,
+ REG_CR1_DAC_MUTE_OFFSET, 1, 1),
+
+ SOC_SINGLE("Deemphasize Filter Playback Switch",
+ JZ4725B_CODEC_REG_CR2,
+ REG_CR2_DAC_DEEMP_OFFSET, 1, 0),
+
+ SOC_SINGLE("High-Pass Filter Capture Switch",
+ JZ4725B_CODEC_REG_CR2,
+ REG_CR2_ADC_HPF_OFFSET, 1, 0),
+};
+
+static const char * const jz4725b_codec_adc_src_texts[] = {
+ "Mic 1", "Mic 2", "Line In", "Mixer",
+};
+static const unsigned int jz4725b_codec_adc_src_values[] = { 0, 1, 2, 3, };
+static SOC_VALUE_ENUM_SINGLE_DECL(jz4725b_codec_adc_src_enum,
+ JZ4725B_CODEC_REG_CR3,
+ REG_CR3_INSEL_OFFSET,
+ REG_CR3_INSEL_MASK,
+ jz4725b_codec_adc_src_texts,
+ jz4725b_codec_adc_src_values);
+static const struct snd_kcontrol_new jz4725b_codec_adc_src_ctrl =
+ SOC_DAPM_ENUM("Route", jz4725b_codec_adc_src_enum);
+
+static const struct snd_kcontrol_new jz4725b_codec_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Line In Bypass", JZ4725B_CODEC_REG_CR1,
+ REG_CR1_BYPASS_OFFSET, 1, 0),
+};
+
+static int jz4725b_out_stage_enable(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *codec = snd_soc_dapm_to_component(w->dapm);
+ struct jz_icdc *icdc = snd_soc_component_get_drvdata(codec);
+ struct regmap *map = icdc->regmap;
+ unsigned int val;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return regmap_update_bits(map, JZ4725B_CODEC_REG_IFR,
+ BIT(REG_IFR_RAMP_UP_DONE_OFFSET), 0);
+ case SND_SOC_DAPM_POST_PMU:
+ return regmap_read_poll_timeout(map, JZ4725B_CODEC_REG_IFR,
+ val, val & BIT(REG_IFR_RAMP_UP_DONE_OFFSET),
+ 100000, 500000);
+ case SND_SOC_DAPM_PRE_PMD:
+ return regmap_update_bits(map, JZ4725B_CODEC_REG_IFR,
+ BIT(REG_IFR_RAMP_DOWN_DONE_OFFSET), 0);
+ case SND_SOC_DAPM_POST_PMD:
+ return regmap_read_poll_timeout(map, JZ4725B_CODEC_REG_IFR,
+ val, val & BIT(REG_IFR_RAMP_DOWN_DONE_OFFSET),
+ 100000, 500000);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = {
+ /* DAC */
+ SND_SOC_DAPM_DAC("DAC", "Playback",
+ JZ4725B_CODEC_REG_PMR1, REG_PMR1_SB_DAC_OFFSET, 1),
+
+ /* ADC */
+ SND_SOC_DAPM_ADC("ADC", "Capture",
+ JZ4725B_CODEC_REG_PMR1, REG_PMR1_SB_ADC_OFFSET, 1),
+
+ SND_SOC_DAPM_MUX("ADC Source", SND_SOC_NOPM, 0, 0,
+ &jz4725b_codec_adc_src_ctrl),
+
+ /* Mixer */
+ SND_SOC_DAPM_MIXER("Mixer", JZ4725B_CODEC_REG_PMR1,
+ REG_PMR1_SB_MIX_OFFSET, 1,
+ jz4725b_codec_mixer_controls,
+ ARRAY_SIZE(jz4725b_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DAC to Mixer", JZ4725B_CODEC_REG_CR1,
+ REG_CR1_DACSEL_OFFSET, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("Line In", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("HP Out", JZ4725B_CODEC_REG_CR1,
+ REG_CR1_HP_DIS_OFFSET, 1, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("Mic 1", JZ4725B_CODEC_REG_CR3,
+ REG_CR3_SB_MIC1_OFFSET, 1, NULL, 0),
+ SND_SOC_DAPM_MIXER("Mic 2", JZ4725B_CODEC_REG_CR3,
+ REG_CR3_SB_MIC2_OFFSET, 1, NULL, 0),
+
+ SND_SOC_DAPM_MIXER_E("Out Stage", JZ4725B_CODEC_REG_PMR1,
+ REG_PMR1_SB_OUT_OFFSET, 1, NULL, 0,
+ jz4725b_out_stage_enable,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER("Mixer to ADC", JZ4725B_CODEC_REG_PMR1,
+ REG_PMR1_SB_IND_OFFSET, 1, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("Mic Bias", JZ4725B_CODEC_REG_CR1,
+ REG_CR1_SB_MICBIAS_OFFSET, 1, NULL, 0),
+
+ /* Pins */
+ SND_SOC_DAPM_INPUT("MIC1P"),
+ SND_SOC_DAPM_INPUT("MIC1N"),
+ SND_SOC_DAPM_INPUT("MIC2P"),
+ SND_SOC_DAPM_INPUT("MIC2N"),
+
+ SND_SOC_DAPM_INPUT("LLINEIN"),
+ SND_SOC_DAPM_INPUT("RLINEIN"),
+
+ SND_SOC_DAPM_OUTPUT("LHPOUT"),
+ SND_SOC_DAPM_OUTPUT("RHPOUT"),
+};
+
+static const struct snd_soc_dapm_route jz4725b_codec_dapm_routes[] = {
+ {"Mic 1", NULL, "MIC1P"},
+ {"Mic 1", NULL, "MIC1N"},
+ {"Mic 2", NULL, "MIC2P"},
+ {"Mic 2", NULL, "MIC2N"},
+
+ {"Line In", NULL, "LLINEIN"},
+ {"Line In", NULL, "RLINEIN"},
+
+ {"Mixer", "Line In Bypass", "Line In"},
+ {"DAC to Mixer", NULL, "DAC"},
+ {"Mixer", NULL, "DAC to Mixer"},
+
+ {"Mixer to ADC", NULL, "Mixer"},
+ {"ADC Source", "Mixer", "Mixer to ADC"},
+ {"ADC Source", "Line In", "Line In"},
+ {"ADC Source", "Mic 1", "Mic 1"},
+ {"ADC Source", "Mic 2", "Mic 2"},
+ {"ADC", NULL, "ADC Source"},
+
+ {"Out Stage", NULL, "Mixer"},
+ {"HP Out", NULL, "Out Stage"},
+ {"LHPOUT", NULL, "HP Out"},
+ {"RHPOUT", NULL, "HP Out"},
+};
+
+static int jz4725b_codec_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct jz_icdc *icdc = snd_soc_component_get_drvdata(component);
+ struct regmap *map = icdc->regmap;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2,
+ BIT(REG_PMR2_SB_SLEEP_OFFSET), 0);
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ /* Enable sound hardware */
+ regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2,
+ BIT(REG_PMR2_SB_OFFSET), 0);
+ msleep(224);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2,
+ BIT(REG_PMR2_SB_SLEEP_OFFSET),
+ BIT(REG_PMR2_SB_SLEEP_OFFSET));
+ break;
+ case SND_SOC_BIAS_OFF:
+ regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2,
+ BIT(REG_PMR2_SB_OFFSET),
+ BIT(REG_PMR2_SB_OFFSET));
+ break;
+ }
+
+ return 0;
+}
+
+static int jz4725b_codec_dev_probe(struct snd_soc_component *component)
+{
+ struct jz_icdc *icdc = snd_soc_component_get_drvdata(component);
+ struct regmap *map = icdc->regmap;
+
+ clk_prepare_enable(icdc->clk);
+
+ /* Write CONFIGn (n=1 to 8) bits.
+ * The value 0x0f is specified in the datasheet as a requirement.
+ */
+ regmap_write(map, JZ4725B_CODEC_REG_AICR,
+ 0xf << REG_AICR_CONFIG1_OFFSET);
+ regmap_write(map, JZ4725B_CODEC_REG_CCR1,
+ 0x0 << REG_CCR1_CONFIG4_OFFSET);
+
+ return 0;
+}
+
+static void jz4725b_codec_dev_remove(struct snd_soc_component *component)
+{
+ struct jz_icdc *icdc = snd_soc_component_get_drvdata(component);
+
+ clk_disable_unprepare(icdc->clk);
+}
+
+static const struct snd_soc_component_driver jz4725b_codec = {
+ .probe = jz4725b_codec_dev_probe,
+ .remove = jz4725b_codec_dev_remove,
+ .set_bias_level = jz4725b_codec_set_bias_level,
+ .controls = jz4725b_codec_controls,
+ .num_controls = ARRAY_SIZE(jz4725b_codec_controls),
+ .dapm_widgets = jz4725b_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(jz4725b_codec_dapm_widgets),
+ .dapm_routes = jz4725b_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(jz4725b_codec_dapm_routes),
+ .suspend_bias_off = 1,
+ .use_pmdown_time = 1,
+};
+
+static const unsigned int jz4725b_codec_sample_rates[] = {
+ 96000, 48000, 44100, 32000,
+ 24000, 22050, 16000, 12000,
+ 11025, 9600, 8000,
+};
+
+static int jz4725b_codec_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct jz_icdc *icdc = snd_soc_component_get_drvdata(dai->component);
+ unsigned int rate, bit_width;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bit_width = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S18_3LE:
+ bit_width = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ bit_width = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ bit_width = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (rate = 0; rate < ARRAY_SIZE(jz4725b_codec_sample_rates); rate++) {
+ if (jz4725b_codec_sample_rates[rate] == params_rate(params))
+ break;
+ }
+
+ if (rate == ARRAY_SIZE(jz4725b_codec_sample_rates))
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(icdc->regmap,
+ JZ4725B_CODEC_REG_CR2,
+ REG_CR2_DAC_ADWL_MASK,
+ bit_width << REG_CR2_DAC_ADWL_OFFSET);
+
+ regmap_update_bits(icdc->regmap,
+ JZ4725B_CODEC_REG_CCR2,
+ REG_CCR2_DFREQ_MASK,
+ rate << REG_CCR2_DFREQ_OFFSET);
+ } else {
+ regmap_update_bits(icdc->regmap,
+ JZ4725B_CODEC_REG_CR2,
+ REG_CR2_ADC_ADWL_MASK,
+ bit_width << REG_CR2_ADC_ADWL_OFFSET);
+
+ regmap_update_bits(icdc->regmap,
+ JZ4725B_CODEC_REG_CCR2,
+ REG_CCR2_AFREQ_MASK,
+ rate << REG_CCR2_AFREQ_OFFSET);
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops jz4725b_codec_dai_ops = {
+ .hw_params = jz4725b_codec_hw_params,
+};
+
+#define JZ_ICDC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_3LE)
+
+static struct snd_soc_dai_driver jz4725b_codec_dai = {
+ .name = "jz4725b-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = JZ_ICDC_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = JZ_ICDC_FORMATS,
+ },
+ .ops = &jz4725b_codec_dai_ops,
+};
+
+static bool jz4725b_codec_volatile(struct device *dev, unsigned int reg)
+{
+ return reg == JZ4725B_CODEC_REG_IFR;
+}
+
+static bool jz4725b_codec_can_access_reg(struct device *dev, unsigned int reg)
+{
+ return (reg != JZ4725B_CODEC_REG_TR1) && (reg != JZ4725B_CODEC_REG_TR2);
+}
+
+static int jz4725b_codec_io_wait(struct jz_icdc *icdc)
+{
+ u32 reg;
+
+ return readl_poll_timeout(icdc->base + ICDC_RGADW_OFFSET, reg,
+ !(reg & ICDC_RGADW_RGWR), 1000, 10000);
+}
+
+static int jz4725b_codec_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct jz_icdc *icdc = context;
+ unsigned int i;
+ u32 tmp;
+ int ret;
+
+ ret = jz4725b_codec_io_wait(icdc);
+ if (ret)
+ return ret;
+
+ tmp = readl(icdc->base + ICDC_RGADW_OFFSET);
+ tmp = (tmp & ~ICDC_RGADW_RGADDR_MASK)
+ | (reg << ICDC_RGADW_RGADDR_OFFSET);
+ writel(tmp, icdc->base + ICDC_RGADW_OFFSET);
+
+ /* wait 6+ cycles */
+ for (i = 0; i < 6; i++)
+ *val = readl(icdc->base + ICDC_RGDATA_OFFSET) &
+ ICDC_RGDATA_RGDOUT_MASK;
+
+ return 0;
+}
+
+static int jz4725b_codec_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct jz_icdc *icdc = context;
+ int ret;
+
+ ret = jz4725b_codec_io_wait(icdc);
+ if (ret)
+ return ret;
+
+ writel(ICDC_RGADW_RGWR | (reg << ICDC_RGADW_RGADDR_OFFSET) | val,
+ icdc->base + ICDC_RGADW_OFFSET);
+
+ ret = jz4725b_codec_io_wait(icdc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const u8 jz4725b_codec_reg_defaults[] = {
+ 0x0c, 0xaa, 0x78, 0x00, 0x00, 0xff, 0x03, 0x51,
+ 0x3f, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0xc0, 0x34,
+ 0x07, 0x44, 0x1f, 0x00,
+};
+
+static const struct regmap_config jz4725b_codec_regmap_config = {
+ .reg_bits = 7,
+ .val_bits = 8,
+
+ .max_register = JZ4725B_CODEC_REG_AGC5,
+ .volatile_reg = jz4725b_codec_volatile,
+ .readable_reg = jz4725b_codec_can_access_reg,
+ .writeable_reg = jz4725b_codec_can_access_reg,
+
+ .reg_read = jz4725b_codec_reg_read,
+ .reg_write = jz4725b_codec_reg_write,
+
+ .reg_defaults_raw = jz4725b_codec_reg_defaults,
+ .num_reg_defaults_raw = ARRAY_SIZE(jz4725b_codec_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int jz4725b_codec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct jz_icdc *icdc;
+ struct resource *mem;
+ int ret;
+
+ icdc = devm_kzalloc(dev, sizeof(*icdc), GFP_KERNEL);
+ if (!icdc)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ icdc->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(icdc->base))
+ return PTR_ERR(icdc->base);
+
+ icdc->regmap = devm_regmap_init(dev, NULL, icdc,
+ &jz4725b_codec_regmap_config);
+ if (IS_ERR(icdc->regmap))
+ return PTR_ERR(icdc->regmap);
+
+ icdc->clk = devm_clk_get(&pdev->dev, "aic");
+ if (IS_ERR(icdc->clk))
+ return PTR_ERR(icdc->clk);
+
+ platform_set_drvdata(pdev, icdc);
+
+ ret = devm_snd_soc_register_component(dev, &jz4725b_codec,
+ &jz4725b_codec_dai, 1);
+ if (ret)
+ dev_err(dev, "Failed to register codec\n");
+
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id jz4725b_codec_of_matches[] = {
+ { .compatible = "ingenic,jz4725b-codec", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, jz4725b_codec_of_matches);
+#endif
+
+static struct platform_driver jz4725b_codec_driver = {
+ .probe = jz4725b_codec_probe,
+ .driver = {
+ .name = "jz4725b-codec",
+ .of_match_table = of_match_ptr(jz4725b_codec_of_matches),
+ },
+};
+module_platform_driver(jz4725b_codec_driver);
+
+MODULE_DESCRIPTION("JZ4725B SoC internal codec driver");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 9395b583432c..974e17fa1911 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -1,15 +1,8 @@
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// JZ4740 CODEC driver
+//
+// Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -353,10 +346,19 @@ static int jz4740_codec_probe(struct platform_device *pdev)
return ret;
}
+#ifdef CONFIG_OF
+static const struct of_device_id jz4740_codec_of_matches[] = {
+ { .compatible = "ingenic,jz4740-codec", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, jz4740_codec_of_matches);
+#endif
+
static struct platform_driver jz4740_codec_driver = {
.probe = jz4740_codec_probe,
.driver = {
.name = "jz4740-codec",
+ .of_match_table = of_match_ptr(jz4740_codec_of_matches),
},
};
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index c97f21836c66..30c242c38d99 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -314,9 +314,6 @@ static const DECLARE_TLV_DB_SCALE(max98090_av_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(max98090_dvg_tlv, 0, 600, 0);
static const DECLARE_TLV_DB_SCALE(max98090_dv_tlv, -1500, 100, 0);
-static const DECLARE_TLV_DB_SCALE(max98090_sidetone_tlv, -6050, 200, 0);
-
-static const DECLARE_TLV_DB_SCALE(max98090_alc_tlv, -1500, 100, 0);
static const DECLARE_TLV_DB_SCALE(max98090_alcmakeup_tlv, 0, 100, 0);
static const DECLARE_TLV_DB_SCALE(max98090_alccomp_tlv, -3100, 100, 0);
static const DECLARE_TLV_DB_SCALE(max98090_drcexp_tlv, -6600, 100, 0);
@@ -817,18 +814,6 @@ static SOC_ENUM_SINGLE_VIRT_DECL(dmic_mux_enum, dmic_mux_text);
static const struct snd_kcontrol_new max98090_dmic_mux =
SOC_DAPM_ENUM("DMIC Mux", dmic_mux_enum);
-static const char *max98090_micpre_text[] = { "Off", "On" };
-
-static SOC_ENUM_SINGLE_DECL(max98090_pa1en_enum,
- M98090_REG_MIC1_INPUT_LEVEL,
- M98090_MIC_PA1EN_SHIFT,
- max98090_micpre_text);
-
-static SOC_ENUM_SINGLE_DECL(max98090_pa2en_enum,
- M98090_REG_MIC2_INPUT_LEVEL,
- M98090_MIC_PA2EN_SHIFT,
- max98090_micpre_text);
-
/* LINEA mixer switch */
static const struct snd_kcontrol_new max98090_linea_mixer_controls[] = {
SOC_DAPM_SINGLE("IN1 Switch", M98090_REG_LINE_INPUT_CONFIG,
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 9c8616a7b61c..528695cd6a1c 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -408,7 +408,7 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
regmap_update_bits(max98373->regmap,
MAX98373_R20FF_GLOBAL_SHDN,
MAX98373_GLOBAL_EN_MASK, 0);
- max98373->tdm_mode = 0;
+ max98373->tdm_mode = false;
break;
default:
return 0;
@@ -919,9 +919,9 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
/* update interleave mode info */
if (device_property_read_bool(&i2c->dev, "maxim,interleave_mode"))
- max98373->interleave_mode = 1;
+ max98373->interleave_mode = true;
else
- max98373->interleave_mode = 0;
+ max98373->interleave_mode = false;
/* regmap initialization */
diff --git a/sound/soc/codecs/max9860.c b/sound/soc/codecs/max9860.c
index de3d44e9199b..8be636fe6552 100644
--- a/sound/soc/codecs/max9860.c
+++ b/sound/soc/codecs/max9860.c
@@ -615,7 +615,8 @@ static int max9860_probe(struct i2c_client *i2c)
max9860->dvddio_nb.notifier_call = max9860_dvddio_event;
- ret = regulator_register_notifier(max9860->dvddio, &max9860->dvddio_nb);
+ ret = devm_regulator_register_notifier(max9860->dvddio,
+ &max9860->dvddio_nb);
if (ret)
dev_err(dev, "Failed to register DVDDIO notifier: %d\n", ret);
diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
index 065303a46535..e53d2007f3be 100644
--- a/sound/soc/codecs/max98927.c
+++ b/sound/soc/codecs/max98927.c
@@ -505,7 +505,7 @@ static int max98927_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- max98927->tdm_mode = 0;
+ max98927->tdm_mode = false;
break;
case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(max98927->regmap,
@@ -886,11 +886,11 @@ static int max98927_i2c_probe(struct i2c_client *i2c,
if (!of_property_read_u32(i2c->dev.of_node,
"interleave_mode", &value)) {
if (value > 0)
- max98927->interleave_mode = 1;
+ max98927->interleave_mode = true;
else
- max98927->interleave_mode = 0;
+ max98927->interleave_mode = false;
} else
- max98927->interleave_mode = 0;
+ max98927->interleave_mode = false;
/* regmap initialization */
max98927->regmap
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index b7cf7cce95fe..368b6c09474b 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -1,3 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2016, The Linux Foundation. All rights reserved.
+
#include <linux/module.h>
#include <linux/err.h>
#include <linux/kernel.h>
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
index 3063dedd21cf..a63961861e55 100644
--- a/sound/soc/codecs/msm8916-wcd-digital.c
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -1,14 +1,5 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2016, The Linux Foundation. All rights reserved.
#include <linux/module.h>
#include <linux/err.h>
@@ -220,8 +211,6 @@ static const char *const dec_mux_text[] = {
};
static const char *const cic_mux_text[] = { "AMIC", "DMIC" };
-static const char *const rx_mix2_text[] = { "ZERO", "IIR1", "IIR2" };
-static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
/* RX1 MIX1 */
static const struct soc_enum rx_mix1_inp_enum[] = {
@@ -230,10 +219,6 @@ static const struct soc_enum rx_mix1_inp_enum[] = {
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B2_CTL, 0, 6, rx_mix1_text),
};
-/* RX1 MIX2 */
-static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
- LPASS_CDC_CONN_RX1_B3_CTL, 0, 3, rx_mix2_text);
-
/* RX2 MIX1 */
static const struct soc_enum rx2_mix1_inp_enum[] = {
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
@@ -241,10 +226,6 @@ static const struct soc_enum rx2_mix1_inp_enum[] = {
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B2_CTL, 0, 6, rx_mix1_text),
};
-/* RX2 MIX2 */
-static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
- LPASS_CDC_CONN_RX2_B3_CTL, 0, 3, rx_mix2_text);
-
/* RX3 MIX1 */
static const struct soc_enum rx3_mix1_inp_enum[] = {
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
diff --git a/sound/soc/codecs/mt6351.c b/sound/soc/codecs/mt6351.c
index f73dcd753584..4b3ce01c5a93 100644
--- a/sound/soc/codecs/mt6351.c
+++ b/sound/soc/codecs/mt6351.c
@@ -1415,8 +1415,6 @@ static const struct snd_soc_dapm_route mt6351_dapm_routes[] = {
static int mt6351_codec_init_reg(struct snd_soc_component *cmpnt)
{
- int ret = 0;
-
/* Disable CLKSQ 26MHz */
regmap_update_bits(cmpnt->regmap, MT6351_TOP_CLKSQ, 0x0001, 0x0);
/* disable AUDGLB */
@@ -1434,7 +1432,7 @@ static int mt6351_codec_init_reg(struct snd_soc_component *cmpnt)
/* Reverse the PMIC clock*/
regmap_update_bits(cmpnt->regmap, MT6351_AFE_PMIC_NEWIF_CFG2,
0x8000, 0x8000);
- return ret;
+ return 0;
}
static int mt6351_codec_probe(struct snd_soc_component *cmpnt)
diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c
new file mode 100644
index 000000000000..d4c4fee6d3d9
--- /dev/null
+++ b/sound/soc/codecs/mt6358.c
@@ -0,0 +1,2336 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt6358.c -- mt6358 ALSA SoC audio codec driver
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/regulator/consumer.h>
+
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "mt6358.h"
+
+enum {
+ AUDIO_ANALOG_VOLUME_HSOUTL,
+ AUDIO_ANALOG_VOLUME_HSOUTR,
+ AUDIO_ANALOG_VOLUME_HPOUTL,
+ AUDIO_ANALOG_VOLUME_HPOUTR,
+ AUDIO_ANALOG_VOLUME_LINEOUTL,
+ AUDIO_ANALOG_VOLUME_LINEOUTR,
+ AUDIO_ANALOG_VOLUME_MICAMP1,
+ AUDIO_ANALOG_VOLUME_MICAMP2,
+ AUDIO_ANALOG_VOLUME_TYPE_MAX
+};
+
+enum {
+ MUX_ADC_L,
+ MUX_ADC_R,
+ MUX_PGA_L,
+ MUX_PGA_R,
+ MUX_MIC_TYPE,
+ MUX_HP_L,
+ MUX_HP_R,
+ MUX_NUM,
+};
+
+enum {
+ DEVICE_HP,
+ DEVICE_LO,
+ DEVICE_RCV,
+ DEVICE_MIC1,
+ DEVICE_MIC2,
+ DEVICE_NUM
+};
+
+/* Supply widget subseq */
+enum {
+ /* common */
+ SUPPLY_SEQ_CLK_BUF,
+ SUPPLY_SEQ_AUD_GLB,
+ SUPPLY_SEQ_CLKSQ,
+ SUPPLY_SEQ_VOW_AUD_LPW,
+ SUPPLY_SEQ_AUD_VOW,
+ SUPPLY_SEQ_VOW_CLK,
+ SUPPLY_SEQ_VOW_LDO,
+ SUPPLY_SEQ_TOP_CK,
+ SUPPLY_SEQ_TOP_CK_LAST,
+ SUPPLY_SEQ_AUD_TOP,
+ SUPPLY_SEQ_AUD_TOP_LAST,
+ SUPPLY_SEQ_AFE,
+ /* capture */
+ SUPPLY_SEQ_ADC_SUPPLY,
+};
+
+enum {
+ CH_L = 0,
+ CH_R,
+ NUM_CH,
+};
+
+#define REG_STRIDE 2
+
+struct mt6358_priv {
+ struct device *dev;
+ struct regmap *regmap;
+
+ unsigned int dl_rate;
+ unsigned int ul_rate;
+
+ int ana_gain[AUDIO_ANALOG_VOLUME_TYPE_MAX];
+ unsigned int mux_select[MUX_NUM];
+
+ int dev_counter[DEVICE_NUM];
+
+ int mtkaif_protocol;
+
+ struct regulator *avdd_reg;
+};
+
+int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
+ int mtkaif_protocol)
+{
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ priv->mtkaif_protocol = mtkaif_protocol;
+ return 0;
+}
+
+static void playback_gpio_set(struct mt6358_priv *priv)
+{
+ /* set gpio mosi mode */
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2_CLR,
+ 0x01f8, 0x01f8);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2_SET,
+ 0xffff, 0x0249);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2,
+ 0xffff, 0x0249);
+}
+
+static void playback_gpio_reset(struct mt6358_priv *priv)
+{
+ /* set pad_aud_*_mosi to GPIO mode and dir input
+ * reason:
+ * pad_aud_dat_mosi*, because the pin is used as boot strap
+ * don't clean clk/sync, for mtkaif protocol 2
+ */
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2_CLR,
+ 0x01f8, 0x01f8);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE2,
+ 0x01f8, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_DIR0,
+ 0xf << 8, 0x0);
+}
+
+static void capture_gpio_set(struct mt6358_priv *priv)
+{
+ /* set gpio miso mode */
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3_CLR,
+ 0xffff, 0xffff);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3_SET,
+ 0xffff, 0x0249);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3,
+ 0xffff, 0x0249);
+}
+
+static void capture_gpio_reset(struct mt6358_priv *priv)
+{
+ /* set pad_aud_*_miso to GPIO mode and dir input
+ * reason:
+ * pad_aud_clk_miso, because when playback only the miso_clk
+ * will also have 26m, so will have power leak
+ * pad_aud_dat_miso*, because the pin is used as boot strap
+ */
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3_CLR,
+ 0xffff, 0xffff);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_DIR0,
+ 0xf << 12, 0x0);
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_dcxo(struct mt6358_priv *priv, bool enable)
+{
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW14,
+ 0x1 << RG_XO_AUDIO_EN_M_SFT,
+ (enable ? 1 : 0) << RG_XO_AUDIO_EN_M_SFT);
+ return 0;
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_clksq(struct mt6358_priv *priv, bool enable)
+{
+ /* audio clk source from internal dcxo */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON6,
+ RG_CLKSQ_IN_SEL_TEST_MASK_SFT,
+ 0x0);
+
+ /* Enable/disable CLKSQ 26MHz */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON6,
+ RG_CLKSQ_EN_MASK_SFT,
+ (enable ? 1 : 0) << RG_CLKSQ_EN_SFT);
+ return 0;
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_aud_global_bias(struct mt6358_priv *priv, bool enable)
+{
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ RG_AUDGLB_PWRDN_VA28_MASK_SFT,
+ (enable ? 0 : 1) << RG_AUDGLB_PWRDN_VA28_SFT);
+ return 0;
+}
+
+/* use only when not govern by DAPM */
+static int mt6358_set_topck(struct mt6358_priv *priv, bool enable)
+{
+ regmap_update_bits(priv->regmap, MT6358_AUD_TOP_CKPDN_CON0,
+ 0x0066, enable ? 0x0 : 0x66);
+ return 0;
+}
+
+static int mt6358_mtkaif_tx_enable(struct mt6358_priv *priv)
+{
+ switch (priv->mtkaif_protocol) {
+ case MT6358_MTKAIF_PROTOCOL_2_CLK_P2:
+ /* MTKAIF TX format setting */
+ regmap_update_bits(priv->regmap,
+ MT6358_AFE_ADDA_MTKAIF_CFG0,
+ 0xffff, 0x0010);
+ /* enable aud_pad TX fifos */
+ regmap_update_bits(priv->regmap,
+ MT6358_AFE_AUD_PAD_TOP,
+ 0xff00, 0x3800);
+ regmap_update_bits(priv->regmap,
+ MT6358_AFE_AUD_PAD_TOP,
+ 0xff00, 0x3900);
+ break;
+ case MT6358_MTKAIF_PROTOCOL_2:
+ /* MTKAIF TX format setting */
+ regmap_update_bits(priv->regmap,
+ MT6358_AFE_ADDA_MTKAIF_CFG0,
+ 0xffff, 0x0010);
+ /* enable aud_pad TX fifos */
+ regmap_update_bits(priv->regmap,
+ MT6358_AFE_AUD_PAD_TOP,
+ 0xff00, 0x3100);
+ break;
+ case MT6358_MTKAIF_PROTOCOL_1:
+ default:
+ /* MTKAIF TX format setting */
+ regmap_update_bits(priv->regmap,
+ MT6358_AFE_ADDA_MTKAIF_CFG0,
+ 0xffff, 0x0000);
+ /* enable aud_pad TX fifos */
+ regmap_update_bits(priv->regmap,
+ MT6358_AFE_AUD_PAD_TOP,
+ 0xff00, 0x3100);
+ break;
+ }
+ return 0;
+}
+
+static int mt6358_mtkaif_tx_disable(struct mt6358_priv *priv)
+{
+ /* disable aud_pad TX fifos */
+ regmap_update_bits(priv->regmap, MT6358_AFE_AUD_PAD_TOP,
+ 0xff00, 0x3000);
+ return 0;
+}
+
+int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt)
+{
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ playback_gpio_set(priv);
+ capture_gpio_set(priv);
+ mt6358_mtkaif_tx_enable(priv);
+
+ mt6358_set_dcxo(priv, true);
+ mt6358_set_aud_global_bias(priv, true);
+ mt6358_set_clksq(priv, true);
+ mt6358_set_topck(priv, true);
+
+ /* set dat_miso_loopback on */
+ regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+ RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK_SFT,
+ 1 << RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_SFT);
+ regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+ RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK_SFT,
+ 1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
+ return 0;
+}
+
+int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt)
+{
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ /* set dat_miso_loopback off */
+ regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+ RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK_SFT,
+ 0 << RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_SFT);
+ regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+ RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK_SFT,
+ 0 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT);
+
+ mt6358_set_topck(priv, false);
+ mt6358_set_clksq(priv, false);
+ mt6358_set_aud_global_bias(priv, false);
+ mt6358_set_dcxo(priv, false);
+
+ mt6358_mtkaif_tx_disable(priv);
+ playback_gpio_reset(priv);
+ capture_gpio_reset(priv);
+ return 0;
+}
+
+int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
+ int phase_1, int phase_2)
+{
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+ RG_AUD_PAD_TOP_PHASE_MODE_MASK_SFT,
+ phase_1 << RG_AUD_PAD_TOP_PHASE_MODE_SFT);
+ regmap_update_bits(priv->regmap, MT6358_AUDIO_DIG_CFG,
+ RG_AUD_PAD_TOP_PHASE_MODE2_MASK_SFT,
+ phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT);
+ return 0;
+}
+
+/* dl pga gain */
+enum {
+ DL_GAIN_8DB = 0,
+ DL_GAIN_0DB = 8,
+ DL_GAIN_N_1DB = 9,
+ DL_GAIN_N_10DB = 18,
+ DL_GAIN_N_40DB = 0x1f,
+};
+
+#define DL_GAIN_N_10DB_REG (DL_GAIN_N_10DB << 7 | DL_GAIN_N_10DB)
+#define DL_GAIN_N_40DB_REG (DL_GAIN_N_40DB << 7 | DL_GAIN_N_40DB)
+#define DL_GAIN_REG_MASK 0x0f9f
+
+static void lo_store_gain(struct mt6358_priv *priv)
+{
+ unsigned int reg;
+ unsigned int gain_l, gain_r;
+
+ regmap_read(priv->regmap, MT6358_ZCD_CON1, &reg);
+ gain_l = (reg >> RG_AUDLOLGAIN_SFT) & RG_AUDLOLGAIN_MASK;
+ gain_r = (reg >> RG_AUDLORGAIN_SFT) & RG_AUDLORGAIN_MASK;
+
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTL] = gain_l;
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTR] = gain_r;
+}
+
+static void hp_store_gain(struct mt6358_priv *priv)
+{
+ unsigned int reg;
+ unsigned int gain_l, gain_r;
+
+ regmap_read(priv->regmap, MT6358_ZCD_CON2, &reg);
+ gain_l = (reg >> RG_AUDHPLGAIN_SFT) & RG_AUDHPLGAIN_MASK;
+ gain_r = (reg >> RG_AUDHPRGAIN_SFT) & RG_AUDHPRGAIN_MASK;
+
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL] = gain_l;
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTR] = gain_r;
+}
+
+static void hp_zcd_disable(struct mt6358_priv *priv)
+{
+ regmap_write(priv->regmap, MT6358_ZCD_CON0, 0x0000);
+}
+
+static void hp_main_output_ramp(struct mt6358_priv *priv, bool up)
+{
+ int i = 0, stage = 0;
+ int target = 7;
+
+ /* Enable/Reduce HPL/R main output stage step by step */
+ for (i = 0; i <= target; i++) {
+ stage = up ? i : target - i;
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+ 0x7 << 8, stage << 8);
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+ 0x7 << 11, stage << 11);
+ usleep_range(100, 150);
+ }
+}
+
+static void hp_aux_feedback_loop_gain_ramp(struct mt6358_priv *priv, bool up)
+{
+ int i = 0, stage = 0;
+
+ /* Reduce HP aux feedback loop gain step by step */
+ for (i = 0; i <= 0xf; i++) {
+ stage = up ? i : 0xf - i;
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+ 0xf << 12, stage << 12);
+ usleep_range(100, 150);
+ }
+}
+
+static void hp_pull_down(struct mt6358_priv *priv, bool enable)
+{
+ int i;
+
+ if (enable) {
+ for (i = 0x0; i <= 0x6; i++) {
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+ 0x7, i);
+ usleep_range(600, 700);
+ }
+ } else {
+ for (i = 0x6; i >= 0x1; i--) {
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+ 0x7, i);
+ usleep_range(600, 700);
+ }
+ }
+}
+
+static bool is_valid_hp_pga_idx(int reg_idx)
+{
+ return (reg_idx >= DL_GAIN_8DB && reg_idx <= DL_GAIN_N_10DB) ||
+ reg_idx == DL_GAIN_N_40DB;
+}
+
+static void headset_volume_ramp(struct mt6358_priv *priv,
+ int from, int to)
+{
+ int offset = 0, count = 1, reg_idx;
+
+ if (!is_valid_hp_pga_idx(from) || !is_valid_hp_pga_idx(to))
+ dev_warn(priv->dev, "%s(), volume index is not valid, from %d, to %d\n",
+ __func__, from, to);
+
+ dev_info(priv->dev, "%s(), from %d, to %d\n",
+ __func__, from, to);
+
+ if (to > from)
+ offset = to - from;
+ else
+ offset = from - to;
+
+ while (offset > 0) {
+ if (to > from)
+ reg_idx = from + count;
+ else
+ reg_idx = from - count;
+
+ if (is_valid_hp_pga_idx(reg_idx)) {
+ regmap_update_bits(priv->regmap,
+ MT6358_ZCD_CON2,
+ DL_GAIN_REG_MASK,
+ (reg_idx << 7) | reg_idx);
+ usleep_range(200, 300);
+ }
+ offset--;
+ count++;
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(playback_tlv, -1000, 100, 0);
+static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 600, 0);
+
+static const struct snd_kcontrol_new mt6358_snd_controls[] = {
+ /* dl pga gain */
+ SOC_DOUBLE_TLV("Headphone Volume",
+ MT6358_ZCD_CON2, 0, 7, 0x12, 1,
+ playback_tlv),
+ SOC_DOUBLE_TLV("Lineout Volume",
+ MT6358_ZCD_CON1, 0, 7, 0x12, 1,
+ playback_tlv),
+ SOC_SINGLE_TLV("Handset Volume",
+ MT6358_ZCD_CON3, 0, 0x12, 1,
+ playback_tlv),
+ /* ul pga gain */
+ SOC_DOUBLE_R_TLV("PGA Volume",
+ MT6358_AUDENC_ANA_CON0, MT6358_AUDENC_ANA_CON1,
+ 8, 4, 0,
+ pga_tlv),
+};
+
+/* MUX */
+/* LOL MUX */
+static const char * const lo_in_mux_map[] = {
+ "Open", "Mute", "Playback", "Test Mode"
+};
+
+static int lo_in_mux_map_value[] = {
+ 0x0, 0x1, 0x2, 0x3,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(lo_in_mux_map_enum,
+ MT6358_AUDDEC_ANA_CON7,
+ RG_AUDLOLMUXINPUTSEL_VAUDP15_SFT,
+ RG_AUDLOLMUXINPUTSEL_VAUDP15_MASK,
+ lo_in_mux_map,
+ lo_in_mux_map_value);
+
+static const struct snd_kcontrol_new lo_in_mux_control =
+ SOC_DAPM_ENUM("In Select", lo_in_mux_map_enum);
+
+/*HP MUX */
+enum {
+ HP_MUX_OPEN = 0,
+ HP_MUX_HPSPK,
+ HP_MUX_HP,
+ HP_MUX_TEST_MODE,
+ HP_MUX_HP_IMPEDANCE,
+ HP_MUX_MASK = 0x7,
+};
+
+static const char * const hp_in_mux_map[] = {
+ "Open",
+ "LoudSPK Playback",
+ "Audio Playback",
+ "Test Mode",
+ "HP Impedance",
+ "undefined1",
+ "undefined2",
+ "undefined3",
+};
+
+static int hp_in_mux_map_value[] = {
+ HP_MUX_OPEN,
+ HP_MUX_HPSPK,
+ HP_MUX_HP,
+ HP_MUX_TEST_MODE,
+ HP_MUX_HP_IMPEDANCE,
+ HP_MUX_OPEN,
+ HP_MUX_OPEN,
+ HP_MUX_OPEN,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hpl_in_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ HP_MUX_MASK,
+ hp_in_mux_map,
+ hp_in_mux_map_value);
+
+static const struct snd_kcontrol_new hpl_in_mux_control =
+ SOC_DAPM_ENUM("HPL Select", hpl_in_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hpr_in_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ HP_MUX_MASK,
+ hp_in_mux_map,
+ hp_in_mux_map_value);
+
+static const struct snd_kcontrol_new hpr_in_mux_control =
+ SOC_DAPM_ENUM("HPR Select", hpr_in_mux_map_enum);
+
+/* RCV MUX */
+enum {
+ RCV_MUX_OPEN = 0,
+ RCV_MUX_MUTE,
+ RCV_MUX_VOICE_PLAYBACK,
+ RCV_MUX_TEST_MODE,
+ RCV_MUX_MASK = 0x3,
+};
+
+static const char * const rcv_in_mux_map[] = {
+ "Open", "Mute", "Voice Playback", "Test Mode"
+};
+
+static int rcv_in_mux_map_value[] = {
+ RCV_MUX_OPEN,
+ RCV_MUX_MUTE,
+ RCV_MUX_VOICE_PLAYBACK,
+ RCV_MUX_TEST_MODE,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(rcv_in_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ RCV_MUX_MASK,
+ rcv_in_mux_map,
+ rcv_in_mux_map_value);
+
+static const struct snd_kcontrol_new rcv_in_mux_control =
+ SOC_DAPM_ENUM("RCV Select", rcv_in_mux_map_enum);
+
+/* DAC In MUX */
+static const char * const dac_in_mux_map[] = {
+ "Normal Path", "Sgen"
+};
+
+static int dac_in_mux_map_value[] = {
+ 0x0, 0x1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(dac_in_mux_map_enum,
+ MT6358_AFE_TOP_CON0,
+ DL_SINE_ON_SFT,
+ DL_SINE_ON_MASK,
+ dac_in_mux_map,
+ dac_in_mux_map_value);
+
+static const struct snd_kcontrol_new dac_in_mux_control =
+ SOC_DAPM_ENUM("DAC Select", dac_in_mux_map_enum);
+
+/* AIF Out MUX */
+static SOC_VALUE_ENUM_SINGLE_DECL(aif_out_mux_map_enum,
+ MT6358_AFE_TOP_CON0,
+ UL_SINE_ON_SFT,
+ UL_SINE_ON_MASK,
+ dac_in_mux_map,
+ dac_in_mux_map_value);
+
+static const struct snd_kcontrol_new aif_out_mux_control =
+ SOC_DAPM_ENUM("AIF Out Select", aif_out_mux_map_enum);
+
+/* Mic Type MUX */
+enum {
+ MIC_TYPE_MUX_IDLE = 0,
+ MIC_TYPE_MUX_ACC,
+ MIC_TYPE_MUX_DMIC,
+ MIC_TYPE_MUX_DCC,
+ MIC_TYPE_MUX_DCC_ECM_DIFF,
+ MIC_TYPE_MUX_DCC_ECM_SINGLE,
+ MIC_TYPE_MUX_MASK = 0x7,
+};
+
+#define IS_DCC_BASE(type) ((type) == MIC_TYPE_MUX_DCC || \
+ (type) == MIC_TYPE_MUX_DCC_ECM_DIFF || \
+ (type) == MIC_TYPE_MUX_DCC_ECM_SINGLE)
+
+static const char * const mic_type_mux_map[] = {
+ "Idle",
+ "ACC",
+ "DMIC",
+ "DCC",
+ "DCC_ECM_DIFF",
+ "DCC_ECM_SINGLE",
+};
+
+static int mic_type_mux_map_value[] = {
+ MIC_TYPE_MUX_IDLE,
+ MIC_TYPE_MUX_ACC,
+ MIC_TYPE_MUX_DMIC,
+ MIC_TYPE_MUX_DCC,
+ MIC_TYPE_MUX_DCC_ECM_DIFF,
+ MIC_TYPE_MUX_DCC_ECM_SINGLE,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(mic_type_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ MIC_TYPE_MUX_MASK,
+ mic_type_mux_map,
+ mic_type_mux_map_value);
+
+static const struct snd_kcontrol_new mic_type_mux_control =
+ SOC_DAPM_ENUM("Mic Type Select", mic_type_mux_map_enum);
+
+/* ADC L MUX */
+enum {
+ ADC_MUX_IDLE = 0,
+ ADC_MUX_AIN0,
+ ADC_MUX_PREAMPLIFIER,
+ ADC_MUX_IDLE1,
+ ADC_MUX_MASK = 0x3,
+};
+
+static const char * const adc_left_mux_map[] = {
+ "Idle", "AIN0", "Left Preamplifier", "Idle_1"
+};
+
+static int adc_mux_map_value[] = {
+ ADC_MUX_IDLE,
+ ADC_MUX_AIN0,
+ ADC_MUX_PREAMPLIFIER,
+ ADC_MUX_IDLE1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(adc_left_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ ADC_MUX_MASK,
+ adc_left_mux_map,
+ adc_mux_map_value);
+
+static const struct snd_kcontrol_new adc_left_mux_control =
+ SOC_DAPM_ENUM("ADC L Select", adc_left_mux_map_enum);
+
+/* ADC R MUX */
+static const char * const adc_right_mux_map[] = {
+ "Idle", "AIN0", "Right Preamplifier", "Idle_1"
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(adc_right_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ ADC_MUX_MASK,
+ adc_right_mux_map,
+ adc_mux_map_value);
+
+static const struct snd_kcontrol_new adc_right_mux_control =
+ SOC_DAPM_ENUM("ADC R Select", adc_right_mux_map_enum);
+
+/* PGA L MUX */
+enum {
+ PGA_MUX_NONE = 0,
+ PGA_MUX_AIN0,
+ PGA_MUX_AIN1,
+ PGA_MUX_AIN2,
+ PGA_MUX_MASK = 0x3,
+};
+
+static const char * const pga_mux_map[] = {
+ "None", "AIN0", "AIN1", "AIN2"
+};
+
+static int pga_mux_map_value[] = {
+ PGA_MUX_NONE,
+ PGA_MUX_AIN0,
+ PGA_MUX_AIN1,
+ PGA_MUX_AIN2,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(pga_left_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ PGA_MUX_MASK,
+ pga_mux_map,
+ pga_mux_map_value);
+
+static const struct snd_kcontrol_new pga_left_mux_control =
+ SOC_DAPM_ENUM("PGA L Select", pga_left_mux_map_enum);
+
+/* PGA R MUX */
+static SOC_VALUE_ENUM_SINGLE_DECL(pga_right_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ PGA_MUX_MASK,
+ pga_mux_map,
+ pga_mux_map_value);
+
+static const struct snd_kcontrol_new pga_right_mux_control =
+ SOC_DAPM_ENUM("PGA R Select", pga_right_mux_map_enum);
+
+static int mt_clksq_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_dbg(priv->dev, "%s(), event = 0x%x\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* audio clk source from internal dcxo */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON6,
+ RG_CLKSQ_IN_SEL_TEST_MASK_SFT,
+ 0x0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt_sgen_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_dbg(priv->dev, "%s(), event = 0x%x\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* sdm audio fifo clock power on */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0006);
+ /* scrambler clock on enable */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xCBA1);
+ /* sdm power on */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0003);
+ /* sdm fifo enable */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x000B);
+
+ regmap_update_bits(priv->regmap, MT6358_AFE_SGEN_CFG0,
+ 0xff3f,
+ 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_AFE_SGEN_CFG1,
+ 0xffff,
+ 0x0001);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* DL scrambler disabling sequence */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0000);
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xcba0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt_aif_in_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_info(priv->dev, "%s(), event 0x%x, rate %d\n",
+ __func__, event, priv->dl_rate);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ playback_gpio_set(priv);
+
+ /* sdm audio fifo clock power on */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0006);
+ /* scrambler clock on enable */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xCBA1);
+ /* sdm power on */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0003);
+ /* sdm fifo enable */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x000B);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* DL scrambler disabling sequence */
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON2, 0x0000);
+ regmap_write(priv->regmap, MT6358_AFUNC_AUD_CON0, 0xcba0);
+
+ playback_gpio_reset(priv);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_hp_enable(struct mt6358_priv *priv)
+{
+ /* Pull-down HPL/R to AVSS28_AUD */
+ hp_pull_down(priv, true);
+ /* release HP CMFB gate rstb */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+ 0x1 << 6, 0x1 << 6);
+
+ /* Reduce ESD resistance of AU_REFN */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4000);
+
+ /* save target gain to restore after hardware open complete */
+ hp_store_gain(priv);
+ /* Set HPR/HPL gain as minimum (~ -40dB) */
+ regmap_write(priv->regmap, MT6358_ZCD_CON2, DL_GAIN_N_40DB_REG);
+
+ /* Turn on DA_600K_NCP_VA18 */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON1, 0x0001);
+ /* Set NCP clock as 604kHz // 26MHz/43 = 604KHz */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON2, 0x002c);
+ /* Toggle RG_DIVCKS_CHG */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON0, 0x0001);
+ /* Set NCP soft start mode as default mode: 100us */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON4, 0x0003);
+ /* Enable NCP */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x0000);
+ usleep_range(250, 270);
+
+ /* Enable cap-less LDOs (1.5V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x1055, 0x1055);
+ /* Enable NV regulator (-1.2V) */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x0001);
+ usleep_range(100, 120);
+
+ /* Disable AUD_ZCD */
+ hp_zcd_disable(priv);
+
+ /* Disable headphone short-circuit protection */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x3000);
+
+ /* Enable IBIST */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+
+ /* Set HP DR bias current optimization, 010: 6uA */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON11, 0x4900);
+ /* Set HP & ZCD bias current optimization */
+ /* 01: ZCD: 4uA, HP/HS/LO: 5uA */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+ /* Set HPP/N STB enhance circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4033);
+
+ /* Enable HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x000c);
+ /* Enable HP aux feedback loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x003c);
+ /* Enable HP aux CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0c00);
+ /* Enable HP driver bias circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30c0);
+ /* Enable HP driver core circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30f0);
+ /* Short HP main output to HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x00fc);
+
+ /* Enable HP main CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0e00);
+ /* Disable HP aux CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0200);
+
+ /* Select CMFB resistor bulk to AC mode */
+ /* Selec HS/LO cap size (6.5pF default) */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON10, 0x0000);
+
+ /* Enable HP main output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x00ff);
+ /* Enable HPR/L main output stage step by step */
+ hp_main_output_ramp(priv, true);
+
+ /* Reduce HP aux feedback loop gain */
+ hp_aux_feedback_loop_gain_ramp(priv, true);
+ /* Disable HP aux feedback loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fcf);
+
+ /* apply volume setting */
+ headset_volume_ramp(priv,
+ DL_GAIN_N_10DB,
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL]);
+
+ /* Disable HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fc3);
+ /* Unshort HP main output to HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3f03);
+ usleep_range(100, 120);
+
+ /* Enable AUD_CLK */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x1);
+ /* Enable Audio DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30ff);
+ /* Enable low-noise mode of DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0xf201);
+ usleep_range(100, 120);
+
+ /* Switch HPL MUX to audio DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x32ff);
+ /* Switch HPR MUX to audio DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x3aff);
+
+ /* Disable Pull-down HPL/R to AVSS28_AUD */
+ hp_pull_down(priv, false);
+
+ return 0;
+}
+
+static int mtk_hp_disable(struct mt6358_priv *priv)
+{
+ /* Pull-down HPL/R to AVSS28_AUD */
+ hp_pull_down(priv, true);
+
+ /* HPR/HPL mux to open */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x0f00, 0x0000);
+
+ /* Disable low-noise mode of DAC */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+ 0x0001, 0x0000);
+
+ /* Disable Audio DAC */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x000f, 0x0000);
+
+ /* Disable AUD_CLK */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x0);
+
+ /* Short HP main output to HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fc3);
+ /* Enable HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fcf);
+
+ /* decrease HPL/R gain to normal gain step by step */
+ headset_volume_ramp(priv,
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL],
+ DL_GAIN_N_40DB);
+
+ /* Enable HP aux feedback loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fff);
+
+ /* Reduce HP aux feedback loop gain */
+ hp_aux_feedback_loop_gain_ramp(priv, false);
+
+ /* decrease HPR/L main output stage step by step */
+ hp_main_output_ramp(priv, false);
+
+ /* Disable HP main output stage */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3, 0x0);
+
+ /* Enable HP aux CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0e00);
+
+ /* Disable HP main CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0c00);
+
+ /* Unshort HP main output to HP aux output stage */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+ 0x3 << 6, 0x0);
+
+ /* Disable HP driver core circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x3 << 4, 0x0);
+
+ /* Disable HP driver bias circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x3 << 6, 0x0);
+
+ /* Disable HP aux CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0000);
+
+ /* Disable HP aux feedback loop */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+ 0x3 << 4, 0x0);
+
+ /* Disable HP aux output stage */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1,
+ 0x3 << 2, 0x0);
+
+ /* Disable IBIST */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON12,
+ 0x1 << 8, 0x1 << 8);
+
+ /* Disable NV regulator (-1.2V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x1, 0x0);
+ /* Disable cap-less LDOs (1.5V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x1055, 0x0);
+ /* Disable NCP */
+ regmap_update_bits(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3,
+ 0x1, 0x1);
+
+ /* Increase ESD resistance of AU_REFN */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON2,
+ 0x1 << 14, 0x0);
+
+ /* Set HP CMFB gate rstb */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+ 0x1 << 6, 0x0);
+ /* disable Pull-down HPL/R to AVSS28_AUD */
+ hp_pull_down(priv, false);
+
+ return 0;
+}
+
+static int mtk_hp_spk_enable(struct mt6358_priv *priv)
+{
+ /* Pull-down HPL/R to AVSS28_AUD */
+ hp_pull_down(priv, true);
+ /* release HP CMFB gate rstb */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+ 0x1 << 6, 0x1 << 6);
+
+ /* Reduce ESD resistance of AU_REFN */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4000);
+
+ /* save target gain to restore after hardware open complete */
+ hp_store_gain(priv);
+ /* Set HPR/HPL gain to -10dB */
+ regmap_write(priv->regmap, MT6358_ZCD_CON2, DL_GAIN_N_10DB_REG);
+
+ /* Turn on DA_600K_NCP_VA18 */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON1, 0x0001);
+ /* Set NCP clock as 604kHz // 26MHz/43 = 604KHz */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON2, 0x002c);
+ /* Toggle RG_DIVCKS_CHG */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON0, 0x0001);
+ /* Set NCP soft start mode as default mode: 100us */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON4, 0x0003);
+ /* Enable NCP */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x0000);
+ usleep_range(250, 270);
+
+ /* Enable cap-less LDOs (1.5V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x1055, 0x1055);
+ /* Enable NV regulator (-1.2V) */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x0001);
+ usleep_range(100, 120);
+
+ /* Disable AUD_ZCD */
+ hp_zcd_disable(priv);
+
+ /* Disable headphone short-circuit protection */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x3000);
+
+ /* Enable IBIST */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+
+ /* Set HP DR bias current optimization, 010: 6uA */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON11, 0x4900);
+ /* Set HP & ZCD bias current optimization */
+ /* 01: ZCD: 4uA, HP/HS/LO: 5uA */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+ /* Set HPP/N STB enhance circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4033);
+
+ /* Disable Pull-down HPL/R to AVSS28_AUD */
+ hp_pull_down(priv, false);
+
+ /* Enable HP driver bias circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30c0);
+ /* Enable HP driver core circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30f0);
+ /* Enable HP main CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0200);
+
+ /* Select CMFB resistor bulk to AC mode */
+ /* Selec HS/LO cap size (6.5pF default) */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON10, 0x0000);
+
+ /* Enable HP main output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x0003);
+ /* Enable HPR/L main output stage step by step */
+ hp_main_output_ramp(priv, true);
+
+ /* Set LO gain as minimum (~ -40dB) */
+ lo_store_gain(priv);
+ regmap_write(priv->regmap, MT6358_ZCD_CON1, DL_GAIN_N_40DB_REG);
+ /* apply volume setting */
+ headset_volume_ramp(priv,
+ DL_GAIN_N_10DB,
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL]);
+
+ /* Set LO STB enhance circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x0110);
+ /* Enable LO driver bias circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x0112);
+ /* Enable LO driver core circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x0113);
+
+ /* Set LOL gain to normal gain step by step */
+ regmap_update_bits(priv->regmap, MT6358_ZCD_CON1,
+ RG_AUDLOLGAIN_MASK_SFT,
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTL] <<
+ RG_AUDLOLGAIN_SFT);
+ regmap_update_bits(priv->regmap, MT6358_ZCD_CON1,
+ RG_AUDLORGAIN_MASK_SFT,
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_LINEOUTR] <<
+ RG_AUDLORGAIN_SFT);
+
+ /* Enable AUD_CLK */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x1);
+ /* Enable Audio DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x30f9);
+ /* Enable low-noise mode of DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0201);
+ /* Switch LOL MUX to audio DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON7, 0x011b);
+ /* Switch HPL/R MUX to Line-out */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x35f9);
+
+ return 0;
+}
+
+static int mtk_hp_spk_disable(struct mt6358_priv *priv)
+{
+ /* HPR/HPL mux to open */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x0f00, 0x0000);
+ /* LOL mux to open */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+ 0x3 << 2, 0x0000);
+
+ /* Disable Audio DAC */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x000f, 0x0000);
+
+ /* Disable AUD_CLK */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13, 0x1, 0x0);
+
+ /* decrease HPL/R gain to normal gain step by step */
+ headset_volume_ramp(priv,
+ priv->ana_gain[AUDIO_ANALOG_VOLUME_HPOUTL],
+ DL_GAIN_N_40DB);
+
+ /* decrease LOL gain to minimum gain step by step */
+ regmap_update_bits(priv->regmap, MT6358_ZCD_CON1,
+ DL_GAIN_REG_MASK, DL_GAIN_N_40DB_REG);
+
+ /* decrease HPR/L main output stage step by step */
+ hp_main_output_ramp(priv, false);
+
+ /* Disable HP main output stage */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3, 0x0);
+
+ /* Short HP main output to HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fc3);
+ /* Enable HP aux output stage */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fcf);
+
+ /* Enable HP aux feedback loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON1, 0x3fff);
+
+ /* Reduce HP aux feedback loop gain */
+ hp_aux_feedback_loop_gain_ramp(priv, false);
+
+ /* Disable HP driver core circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x3 << 4, 0x0);
+ /* Disable LO driver core circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+ 0x1, 0x0);
+
+ /* Disable HP driver bias circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x3 << 6, 0x0);
+ /* Disable LO driver bias circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+ 0x1 << 1, 0x0);
+
+ /* Disable HP aux CMFB loop */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+ 0xff << 8, 0x0000);
+
+ /* Disable IBIST */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON12,
+ 0x1 << 8, 0x1 << 8);
+ /* Disable NV regulator (-1.2V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x1, 0x0);
+ /* Disable cap-less LDOs (1.5V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14, 0x1055, 0x0);
+ /* Disable NCP */
+ regmap_update_bits(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x1, 0x1);
+
+ /* Set HP CMFB gate rstb */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON4,
+ 0x1 << 6, 0x0);
+ /* disable Pull-down HPL/R to AVSS28_AUD */
+ hp_pull_down(priv, false);
+
+ return 0;
+}
+
+static int mt_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+ int device = DEVICE_HP;
+
+ dev_info(priv->dev, "%s(), event 0x%x, dev_counter[DEV_HP] %d, mux %u\n",
+ __func__,
+ event,
+ priv->dev_counter[device],
+ mux);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ priv->dev_counter[device]++;
+ if (priv->dev_counter[device] > 1)
+ break; /* already enabled, do nothing */
+ else if (priv->dev_counter[device] <= 0)
+ dev_warn(priv->dev, "%s(), dev_counter[DEV_HP] %d <= 0\n",
+ __func__,
+ priv->dev_counter[device]);
+
+ priv->mux_select[MUX_HP_L] = mux;
+
+ if (mux == HP_MUX_HP)
+ mtk_hp_enable(priv);
+ else if (mux == HP_MUX_HPSPK)
+ mtk_hp_spk_enable(priv);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ priv->dev_counter[device]--;
+ if (priv->dev_counter[device] > 0) {
+ break; /* still being used, don't close */
+ } else if (priv->dev_counter[device] < 0) {
+ dev_warn(priv->dev, "%s(), dev_counter[DEV_HP] %d < 0\n",
+ __func__,
+ priv->dev_counter[device]);
+ priv->dev_counter[device] = 0;
+ break;
+ }
+
+ if (priv->mux_select[MUX_HP_L] == HP_MUX_HP)
+ mtk_hp_disable(priv);
+ else if (priv->mux_select[MUX_HP_L] == HP_MUX_HPSPK)
+ mtk_hp_spk_disable(priv);
+
+ priv->mux_select[MUX_HP_L] = mux;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt_rcv_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_info(priv->dev, "%s(), event 0x%x, mux %u\n",
+ __func__,
+ event,
+ dapm_kcontrol_get_value(w->kcontrols[0]));
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Reduce ESD resistance of AU_REFN */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON2, 0x4000);
+
+ /* Turn on DA_600K_NCP_VA18 */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON1, 0x0001);
+ /* Set NCP clock as 604kHz // 26MHz/43 = 604KHz */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON2, 0x002c);
+ /* Toggle RG_DIVCKS_CHG */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON0, 0x0001);
+ /* Set NCP soft start mode as default mode: 100us */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON4, 0x0003);
+ /* Enable NCP */
+ regmap_write(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3, 0x0000);
+ usleep_range(250, 270);
+
+ /* Enable cap-less LDOs (1.5V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x1055, 0x1055);
+ /* Enable NV regulator (-1.2V) */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON15, 0x0001);
+ usleep_range(100, 120);
+
+ /* Disable AUD_ZCD */
+ hp_zcd_disable(priv);
+
+ /* Disable handset short-circuit protection */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0010);
+
+ /* Enable IBIST */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+ /* Set HP DR bias current optimization, 010: 6uA */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON11, 0x4900);
+ /* Set HP & ZCD bias current optimization */
+ /* 01: ZCD: 4uA, HP/HS/LO: 5uA */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON12, 0x0055);
+ /* Set HS STB enhance circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0090);
+
+ /* Disable HP main CMFB loop */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0000);
+ /* Select CMFB resistor bulk to AC mode */
+ /* Selec HS/LO cap size (6.5pF default) */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON10, 0x0000);
+
+ /* Enable HS driver bias circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0092);
+ /* Enable HS driver core circuits */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x0093);
+
+ /* Enable AUD_CLK */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0x1, 0x1);
+
+ /* Enable Audio DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON0, 0x0009);
+ /* Enable low-noise mode of DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON9, 0x0001);
+ /* Switch HS MUX to audio DAC */
+ regmap_write(priv->regmap, MT6358_AUDDEC_ANA_CON6, 0x009b);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ /* HS mux to open */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+ RG_AUDHSMUXINPUTSEL_VAUDP15_MASK_SFT,
+ RCV_MUX_OPEN);
+
+ /* Disable Audio DAC */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ 0x000f, 0x0000);
+
+ /* Disable AUD_CLK */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0x1, 0x0);
+
+ /* decrease HS gain to minimum gain step by step */
+ regmap_write(priv->regmap, MT6358_ZCD_CON3, DL_GAIN_N_40DB);
+
+ /* Disable HS driver core circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+ 0x1, 0x0);
+
+ /* Disable HS driver bias circuits */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+ 0x1 << 1, 0x0000);
+
+ /* Disable HP aux CMFB loop */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+ 0xff << 8, 0x0);
+
+ /* Enable HP main CMFB Switch */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON9,
+ 0xff << 8, 0x2 << 8);
+
+ /* Disable IBIST */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON12,
+ 0x1 << 8, 0x1 << 8);
+
+ /* Disable NV regulator (-1.2V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON15,
+ 0x1, 0x0);
+ /* Disable cap-less LDOs (1.5V) */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x1055, 0x0);
+ /* Disable NCP */
+ regmap_update_bits(priv->regmap, MT6358_AUDNCP_CLKDIV_CON3,
+ 0x1, 0x1);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt_aif_out_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_dbg(priv->dev, "%s(), event 0x%x, rate %d\n",
+ __func__, event, priv->ul_rate);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ capture_gpio_set(priv);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ capture_gpio_reset(priv);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt_adc_supply_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_dbg(priv->dev, "%s(), event 0x%x\n",
+ __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Enable audio ADC CLKGEN */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0x1 << 5, 0x1 << 5);
+ /* ADC CLK from CLKGEN (13MHz) */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON3,
+ 0x0000);
+ /* Enable LCLDO_ENC 1P8V */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x2500, 0x0100);
+ /* LCLDO_ENC remote sense */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x2500, 0x2500);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* LCLDO_ENC remote sense off */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x2500, 0x0100);
+ /* disable LCLDO_ENC 1P8V */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON14,
+ 0x2500, 0x0000);
+
+ /* ADC CLK from CLKGEN (13MHz) */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON3, 0x0000);
+ /* disable audio ADC CLKGEN */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0x1 << 5, 0x0 << 5);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt6358_amic_enable(struct mt6358_priv *priv)
+{
+ unsigned int mic_type = priv->mux_select[MUX_MIC_TYPE];
+ unsigned int mux_pga_l = priv->mux_select[MUX_PGA_L];
+ unsigned int mux_pga_r = priv->mux_select[MUX_PGA_R];
+
+ dev_info(priv->dev, "%s(), mux, mic %u, pga l %u, pga r %u\n",
+ __func__, mic_type, mux_pga_l, mux_pga_r);
+
+ if (IS_DCC_BASE(mic_type)) {
+ /* DCC 50k CLK (from 26M) */
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2060);
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2061);
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG1, 0x0100);
+ }
+
+ /* mic bias 0 */
+ if (mux_pga_l == PGA_MUX_AIN0 || mux_pga_l == PGA_MUX_AIN2 ||
+ mux_pga_r == PGA_MUX_AIN0 || mux_pga_r == PGA_MUX_AIN2) {
+ switch (mic_type) {
+ case MIC_TYPE_MUX_DCC_ECM_DIFF:
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xff00, 0x7700);
+ break;
+ case MIC_TYPE_MUX_DCC_ECM_SINGLE:
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xff00, 0x1100);
+ break;
+ default:
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xff00, 0x0000);
+ break;
+ }
+ /* Enable MICBIAS0, MISBIAS0 = 1P9V */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xff, 0x21);
+ }
+
+ /* mic bias 1 */
+ if (mux_pga_l == PGA_MUX_AIN1 || mux_pga_r == PGA_MUX_AIN1) {
+ /* Enable MICBIAS1, MISBIAS1 = 2P6V */
+ if (mic_type == MIC_TYPE_MUX_DCC_ECM_SINGLE)
+ regmap_write(priv->regmap,
+ MT6358_AUDENC_ANA_CON10, 0x0161);
+ else
+ regmap_write(priv->regmap,
+ MT6358_AUDENC_ANA_CON10, 0x0061);
+ }
+
+ if (IS_DCC_BASE(mic_type)) {
+ /* Audio L/R preamplifier DCC precharge */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ 0xf8ff, 0x0004);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0xf8ff, 0x0004);
+ } else {
+ /* reset reg */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ 0xf8ff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0xf8ff, 0x0000);
+ }
+
+ if (mux_pga_l != PGA_MUX_NONE) {
+ /* L preamplifier input sel */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ RG_AUDPREAMPLINPUTSEL_MASK_SFT,
+ mux_pga_l << RG_AUDPREAMPLINPUTSEL_SFT);
+
+ /* L preamplifier enable */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ RG_AUDPREAMPLON_MASK_SFT,
+ 0x1 << RG_AUDPREAMPLON_SFT);
+
+ if (IS_DCC_BASE(mic_type)) {
+ /* L preamplifier DCCEN */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ RG_AUDPREAMPLDCCEN_MASK_SFT,
+ 0x1 << RG_AUDPREAMPLDCCEN_SFT);
+ }
+
+ /* L ADC input sel : L PGA. Enable audio L ADC */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ RG_AUDADCLINPUTSEL_MASK_SFT,
+ ADC_MUX_PREAMPLIFIER <<
+ RG_AUDADCLINPUTSEL_SFT);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ RG_AUDADCLPWRUP_MASK_SFT,
+ 0x1 << RG_AUDADCLPWRUP_SFT);
+ }
+
+ if (mux_pga_r != PGA_MUX_NONE) {
+ /* R preamplifier input sel */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ RG_AUDPREAMPRINPUTSEL_MASK_SFT,
+ mux_pga_r << RG_AUDPREAMPRINPUTSEL_SFT);
+
+ /* R preamplifier enable */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ RG_AUDPREAMPRON_MASK_SFT,
+ 0x1 << RG_AUDPREAMPRON_SFT);
+
+ if (IS_DCC_BASE(mic_type)) {
+ /* R preamplifier DCCEN */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ RG_AUDPREAMPRDCCEN_MASK_SFT,
+ 0x1 << RG_AUDPREAMPRDCCEN_SFT);
+ }
+
+ /* R ADC input sel : R PGA. Enable audio R ADC */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ RG_AUDADCRINPUTSEL_MASK_SFT,
+ ADC_MUX_PREAMPLIFIER <<
+ RG_AUDADCRINPUTSEL_SFT);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ RG_AUDADCRPWRUP_MASK_SFT,
+ 0x1 << RG_AUDADCRPWRUP_SFT);
+ }
+
+ if (IS_DCC_BASE(mic_type)) {
+ usleep_range(100, 150);
+ /* Audio L preamplifier DCC precharge off */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ RG_AUDPREAMPLDCPRECHARGE_MASK_SFT, 0x0);
+ /* Audio R preamplifier DCC precharge off */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ RG_AUDPREAMPRDCPRECHARGE_MASK_SFT, 0x0);
+
+ /* Short body to ground in PGA */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON3,
+ 0x1 << 12, 0x0);
+ }
+
+ /* here to set digital part */
+ mt6358_mtkaif_tx_enable(priv);
+
+ /* UL dmic setting off */
+ regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_H, 0x0000);
+
+ /* UL turn on */
+ regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_L, 0x0001);
+
+ return 0;
+}
+
+static void mt6358_amic_disable(struct mt6358_priv *priv)
+{
+ unsigned int mic_type = priv->mux_select[MUX_MIC_TYPE];
+ unsigned int mux_pga_l = priv->mux_select[MUX_PGA_L];
+ unsigned int mux_pga_r = priv->mux_select[MUX_PGA_R];
+
+ dev_info(priv->dev, "%s(), mux, mic %u, pga l %u, pga r %u\n",
+ __func__, mic_type, mux_pga_l, mux_pga_r);
+
+ /* UL turn off */
+ regmap_update_bits(priv->regmap, MT6358_AFE_UL_SRC_CON0_L,
+ 0x0001, 0x0000);
+
+ /* disable aud_pad TX fifos */
+ mt6358_mtkaif_tx_disable(priv);
+
+ /* L ADC input sel : off, disable L ADC */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ 0xf000, 0x0000);
+ /* L preamplifier DCCEN */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ 0x1 << 1, 0x0);
+ /* L preamplifier input sel : off, L PGA 0 dB gain */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ 0xfffb, 0x0000);
+
+ /* disable L preamplifier DCC precharge */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON0,
+ 0x1 << 2, 0x0);
+
+ /* R ADC input sel : off, disable R ADC */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0xf000, 0x0000);
+ /* R preamplifier DCCEN */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0x1 << 1, 0x0);
+ /* R preamplifier input sel : off, R PGA 0 dB gain */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0x0ffb, 0x0000);
+
+ /* disable R preamplifier DCC precharge */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0x1 << 2, 0x0);
+
+ /* mic bias */
+ /* Disable MICBIAS0, MISBIAS0 = 1P7V */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0000);
+
+ /* Disable MICBIAS1 */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON10,
+ 0x0001, 0x0000);
+
+ if (IS_DCC_BASE(mic_type)) {
+ /* dcclk_gen_on=1'b0 */
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2060);
+ /* dcclk_pdn=1'b1 */
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+ /* dcclk_ref_ck_sel=2'b00 */
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+ /* dcclk_div=11'b00100000011 */
+ regmap_write(priv->regmap, MT6358_AFE_DCCLK_CFG0, 0x2062);
+ }
+}
+
+static int mt6358_dmic_enable(struct mt6358_priv *priv)
+{
+ dev_info(priv->dev, "%s()\n", __func__);
+
+ /* mic bias */
+ /* Enable MICBIAS0, MISBIAS0 = 1P9V */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0021);
+
+ /* RG_BANDGAPGEN=1'b0 */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON10,
+ 0x1 << 12, 0x0);
+
+ /* DMIC enable */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON8, 0x0005);
+
+ /* here to set digital part */
+ mt6358_mtkaif_tx_enable(priv);
+
+ /* UL dmic setting */
+ regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_H, 0x0080);
+
+ /* UL turn on */
+ regmap_write(priv->regmap, MT6358_AFE_UL_SRC_CON0_L, 0x0003);
+ return 0;
+}
+
+static void mt6358_dmic_disable(struct mt6358_priv *priv)
+{
+ dev_info(priv->dev, "%s()\n", __func__);
+
+ /* UL turn off */
+ regmap_update_bits(priv->regmap, MT6358_AFE_UL_SRC_CON0_L,
+ 0x0003, 0x0000);
+
+ /* disable aud_pad TX fifos */
+ mt6358_mtkaif_tx_disable(priv);
+
+ /* DMIC disable */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON8, 0x0000);
+
+ /* mic bias */
+ /* MISBIAS0 = 1P7V */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0001);
+
+ /* RG_BANDGAPGEN=1'b0 */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON10,
+ 0x1 << 12, 0x0);
+
+ /* MICBIA0 disable */
+ regmap_write(priv->regmap, MT6358_AUDENC_ANA_CON9, 0x0000);
+}
+
+static int mt_mic_type_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+ dev_dbg(priv->dev, "%s(), event 0x%x, mux %u\n",
+ __func__, event, mux);
+
+ switch (event) {
+ case SND_SOC_DAPM_WILL_PMU:
+ priv->mux_select[MUX_MIC_TYPE] = mux;
+ break;
+ case SND_SOC_DAPM_PRE_PMU:
+ switch (mux) {
+ case MIC_TYPE_MUX_DMIC:
+ mt6358_dmic_enable(priv);
+ break;
+ default:
+ mt6358_amic_enable(priv);
+ break;
+ }
+
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ switch (priv->mux_select[MUX_MIC_TYPE]) {
+ case MIC_TYPE_MUX_DMIC:
+ mt6358_dmic_disable(priv);
+ break;
+ default:
+ mt6358_amic_disable(priv);
+ break;
+ }
+
+ priv->mux_select[MUX_MIC_TYPE] = mux;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt_adc_l_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+ dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+ __func__, event, mux);
+
+ priv->mux_select[MUX_ADC_L] = mux;
+
+ return 0;
+}
+
+static int mt_adc_r_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+ dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+ __func__, event, mux);
+
+ priv->mux_select[MUX_ADC_R] = mux;
+
+ return 0;
+}
+
+static int mt_pga_left_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+ dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+ __func__, event, mux);
+
+ priv->mux_select[MUX_PGA_L] = mux;
+
+ return 0;
+}
+
+static int mt_pga_right_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
+
+ dev_dbg(priv->dev, "%s(), event = 0x%x, mux %u\n",
+ __func__, event, mux);
+
+ priv->mux_select[MUX_PGA_R] = mux;
+
+ return 0;
+}
+
+static int mt_delay_250_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(250, 270);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ usleep_range(250, 270);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* DAPM Widgets */
+static const struct snd_soc_dapm_widget mt6358_dapm_widgets[] = {
+ /* Global Supply*/
+ SND_SOC_DAPM_SUPPLY_S("CLK_BUF", SUPPLY_SEQ_CLK_BUF,
+ MT6358_DCXO_CW14,
+ RG_XO_AUDIO_EN_M_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AUDGLB", SUPPLY_SEQ_AUD_GLB,
+ MT6358_AUDDEC_ANA_CON13,
+ RG_AUDGLB_PWRDN_VA28_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("CLKSQ Audio", SUPPLY_SEQ_CLKSQ,
+ MT6358_AUDENC_ANA_CON6,
+ RG_CLKSQ_EN_SFT, 0,
+ mt_clksq_event,
+ SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_SUPPLY_S("AUDNCP_CK", SUPPLY_SEQ_TOP_CK,
+ MT6358_AUD_TOP_CKPDN_CON0,
+ RG_AUDNCP_CK_PDN_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ZCD13M_CK", SUPPLY_SEQ_TOP_CK,
+ MT6358_AUD_TOP_CKPDN_CON0,
+ RG_ZCD13M_CK_PDN_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AUD_CK", SUPPLY_SEQ_TOP_CK_LAST,
+ MT6358_AUD_TOP_CKPDN_CON0,
+ RG_AUD_CK_PDN_SFT, 1,
+ mt_delay_250_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_SUPPLY_S("AUDIF_CK", SUPPLY_SEQ_TOP_CK,
+ MT6358_AUD_TOP_CKPDN_CON0,
+ RG_AUDIF_CK_PDN_SFT, 1, NULL, 0),
+
+ /* Digital Clock */
+ SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_AFE_CTL", SUPPLY_SEQ_AUD_TOP_LAST,
+ MT6358_AUDIO_TOP_CON0,
+ PDN_AFE_CTL_SFT, 1,
+ mt_delay_250_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_DAC_CTL", SUPPLY_SEQ_AUD_TOP,
+ MT6358_AUDIO_TOP_CON0,
+ PDN_DAC_CTL_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_ADC_CTL", SUPPLY_SEQ_AUD_TOP,
+ MT6358_AUDIO_TOP_CON0,
+ PDN_ADC_CTL_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_I2S_DL", SUPPLY_SEQ_AUD_TOP,
+ MT6358_AUDIO_TOP_CON0,
+ PDN_I2S_DL_CTL_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_PWR_CLK", SUPPLY_SEQ_AUD_TOP,
+ MT6358_AUDIO_TOP_CON0,
+ PWR_CLK_DIS_CTL_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_PDN_AFE_TESTMODEL", SUPPLY_SEQ_AUD_TOP,
+ MT6358_AUDIO_TOP_CON0,
+ PDN_AFE_TESTMODEL_CTL_SFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AUDIO_TOP_PDN_RESERVED", SUPPLY_SEQ_AUD_TOP,
+ MT6358_AUDIO_TOP_CON0,
+ PDN_RESERVED_SFT, 1, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("DL Digital Clock", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+
+ /* AFE ON */
+ SND_SOC_DAPM_SUPPLY_S("AFE_ON", SUPPLY_SEQ_AFE,
+ MT6358_AFE_UL_DL_CON0, AFE_ON_SFT, 0,
+ NULL, 0),
+
+ /* AIF Rx*/
+ SND_SOC_DAPM_AIF_IN_E("AIF_RX", "AIF1 Playback", 0,
+ MT6358_AFE_DL_SRC2_CON0_L,
+ DL_2_SRC_ON_TMP_CTL_PRE_SFT, 0,
+ mt_aif_in_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* DL Supply */
+ SND_SOC_DAPM_SUPPLY("DL Power Supply", SND_SOC_NOPM,
+ 0, 0, NULL, 0),
+
+ /* DAC */
+ SND_SOC_DAPM_MUX("DAC In Mux", SND_SOC_NOPM, 0, 0, &dac_in_mux_control),
+
+ SND_SOC_DAPM_DAC("DACL", NULL, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_DAC("DACR", NULL, SND_SOC_NOPM, 0, 0),
+
+ /* LOL */
+ SND_SOC_DAPM_MUX("LOL Mux", SND_SOC_NOPM, 0, 0, &lo_in_mux_control),
+
+ SND_SOC_DAPM_SUPPLY("LO Stability Enh", MT6358_AUDDEC_ANA_CON7,
+ RG_LOOUTPUTSTBENH_VAUDP15_SFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUT_DRV("LOL Buffer", MT6358_AUDDEC_ANA_CON7,
+ RG_AUDLOLPWRUP_VAUDP15_SFT, 0, NULL, 0),
+
+ /* Headphone */
+ SND_SOC_DAPM_MUX_E("HPL Mux", SND_SOC_NOPM, 0, 0,
+ &hpl_in_mux_control,
+ mt_hp_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_MUX_E("HPR Mux", SND_SOC_NOPM, 0, 0,
+ &hpr_in_mux_control,
+ mt_hp_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+
+ /* Receiver */
+ SND_SOC_DAPM_MUX_E("RCV Mux", SND_SOC_NOPM, 0, 0,
+ &rcv_in_mux_control,
+ mt_rcv_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+
+ /* Outputs */
+ SND_SOC_DAPM_OUTPUT("Receiver"),
+ SND_SOC_DAPM_OUTPUT("Headphone L"),
+ SND_SOC_DAPM_OUTPUT("Headphone R"),
+ SND_SOC_DAPM_OUTPUT("Headphone L Ext Spk Amp"),
+ SND_SOC_DAPM_OUTPUT("Headphone R Ext Spk Amp"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT L"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT L HSSPK"),
+
+ /* SGEN */
+ SND_SOC_DAPM_SUPPLY("SGEN DL Enable", MT6358_AFE_SGEN_CFG0,
+ SGEN_DAC_EN_CTL_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("SGEN MUTE", MT6358_AFE_SGEN_CFG0,
+ SGEN_MUTE_SW_CTL_SFT, 1,
+ mt_sgen_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("SGEN DL SRC", MT6358_AFE_DL_SRC2_CON0_L,
+ DL_2_SRC_ON_TMP_CTL_PRE_SFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_INPUT("SGEN DL"),
+
+ /* Uplinks */
+ SND_SOC_DAPM_AIF_OUT_E("AIF1TX", "AIF1 Capture", 0,
+ SND_SOC_NOPM, 0, 0,
+ mt_aif_out_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY_S("ADC Supply", SUPPLY_SEQ_ADC_SUPPLY,
+ SND_SOC_NOPM, 0, 0,
+ mt_adc_supply_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* Uplinks MUX */
+ SND_SOC_DAPM_MUX("AIF Out Mux", SND_SOC_NOPM, 0, 0,
+ &aif_out_mux_control),
+
+ SND_SOC_DAPM_MUX_E("Mic Type Mux", SND_SOC_NOPM, 0, 0,
+ &mic_type_mux_control,
+ mt_mic_type_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_WILL_PMU),
+
+ SND_SOC_DAPM_MUX_E("ADC L Mux", SND_SOC_NOPM, 0, 0,
+ &adc_left_mux_control,
+ mt_adc_l_event,
+ SND_SOC_DAPM_WILL_PMU),
+ SND_SOC_DAPM_MUX_E("ADC R Mux", SND_SOC_NOPM, 0, 0,
+ &adc_right_mux_control,
+ mt_adc_r_event,
+ SND_SOC_DAPM_WILL_PMU),
+
+ SND_SOC_DAPM_ADC("ADC L", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADC R", NULL, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_MUX_E("PGA L Mux", SND_SOC_NOPM, 0, 0,
+ &pga_left_mux_control,
+ mt_pga_left_event,
+ SND_SOC_DAPM_WILL_PMU),
+ SND_SOC_DAPM_MUX_E("PGA R Mux", SND_SOC_NOPM, 0, 0,
+ &pga_right_mux_control,
+ mt_pga_right_event,
+ SND_SOC_DAPM_WILL_PMU),
+
+ SND_SOC_DAPM_PGA("PGA L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("PGA R", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* UL input */
+ SND_SOC_DAPM_INPUT("AIN0"),
+ SND_SOC_DAPM_INPUT("AIN1"),
+ SND_SOC_DAPM_INPUT("AIN2"),
+};
+
+static const struct snd_soc_dapm_route mt6358_dapm_routes[] = {
+ /* Capture */
+ {"AIF1TX", NULL, "AIF Out Mux"},
+ {"AIF1TX", NULL, "CLK_BUF"},
+ {"AIF1TX", NULL, "AUDGLB"},
+ {"AIF1TX", NULL, "CLKSQ Audio"},
+
+ {"AIF1TX", NULL, "AUD_CK"},
+ {"AIF1TX", NULL, "AUDIF_CK"},
+
+ {"AIF1TX", NULL, "AUDIO_TOP_AFE_CTL"},
+ {"AIF1TX", NULL, "AUDIO_TOP_ADC_CTL"},
+ {"AIF1TX", NULL, "AUDIO_TOP_PWR_CLK"},
+ {"AIF1TX", NULL, "AUDIO_TOP_PDN_RESERVED"},
+ {"AIF1TX", NULL, "AUDIO_TOP_I2S_DL"},
+
+ {"AIF1TX", NULL, "AFE_ON"},
+
+ {"AIF Out Mux", NULL, "Mic Type Mux"},
+
+ {"Mic Type Mux", "ACC", "ADC L"},
+ {"Mic Type Mux", "ACC", "ADC R"},
+ {"Mic Type Mux", "DCC", "ADC L"},
+ {"Mic Type Mux", "DCC", "ADC R"},
+ {"Mic Type Mux", "DCC_ECM_DIFF", "ADC L"},
+ {"Mic Type Mux", "DCC_ECM_DIFF", "ADC R"},
+ {"Mic Type Mux", "DCC_ECM_SINGLE", "ADC L"},
+ {"Mic Type Mux", "DCC_ECM_SINGLE", "ADC R"},
+ {"Mic Type Mux", "DMIC", "AIN0"},
+ {"Mic Type Mux", "DMIC", "AIN2"},
+
+ {"ADC L", NULL, "ADC L Mux"},
+ {"ADC L", NULL, "ADC Supply"},
+ {"ADC R", NULL, "ADC R Mux"},
+ {"ADC R", NULL, "ADC Supply"},
+
+ {"ADC L Mux", "Left Preamplifier", "PGA L"},
+
+ {"ADC R Mux", "Right Preamplifier", "PGA R"},
+
+ {"PGA L", NULL, "PGA L Mux"},
+ {"PGA R", NULL, "PGA R Mux"},
+
+ {"PGA L Mux", "AIN0", "AIN0"},
+ {"PGA L Mux", "AIN1", "AIN1"},
+ {"PGA L Mux", "AIN2", "AIN2"},
+
+ {"PGA R Mux", "AIN0", "AIN0"},
+ {"PGA R Mux", "AIN1", "AIN1"},
+ {"PGA R Mux", "AIN2", "AIN2"},
+
+ /* DL Supply */
+ {"DL Power Supply", NULL, "CLK_BUF"},
+ {"DL Power Supply", NULL, "AUDGLB"},
+ {"DL Power Supply", NULL, "CLKSQ Audio"},
+
+ {"DL Power Supply", NULL, "AUDNCP_CK"},
+ {"DL Power Supply", NULL, "ZCD13M_CK"},
+ {"DL Power Supply", NULL, "AUD_CK"},
+ {"DL Power Supply", NULL, "AUDIF_CK"},
+
+ /* DL Digital Supply */
+ {"DL Digital Clock", NULL, "AUDIO_TOP_AFE_CTL"},
+ {"DL Digital Clock", NULL, "AUDIO_TOP_DAC_CTL"},
+ {"DL Digital Clock", NULL, "AUDIO_TOP_PWR_CLK"},
+
+ {"DL Digital Clock", NULL, "AFE_ON"},
+
+ {"AIF_RX", NULL, "DL Digital Clock"},
+
+ /* DL Path */
+ {"DAC In Mux", "Normal Path", "AIF_RX"},
+
+ {"DAC In Mux", "Sgen", "SGEN DL"},
+ {"SGEN DL", NULL, "SGEN DL SRC"},
+ {"SGEN DL", NULL, "SGEN MUTE"},
+ {"SGEN DL", NULL, "SGEN DL Enable"},
+ {"SGEN DL", NULL, "DL Digital Clock"},
+ {"SGEN DL", NULL, "AUDIO_TOP_PDN_AFE_TESTMODEL"},
+
+ {"DACL", NULL, "DAC In Mux"},
+ {"DACL", NULL, "DL Power Supply"},
+
+ {"DACR", NULL, "DAC In Mux"},
+ {"DACR", NULL, "DL Power Supply"},
+
+ /* Lineout Path */
+ {"LOL Mux", "Playback", "DACL"},
+
+ {"LOL Buffer", NULL, "LOL Mux"},
+ {"LOL Buffer", NULL, "LO Stability Enh"},
+
+ {"LINEOUT L", NULL, "LOL Buffer"},
+
+ /* Headphone Path */
+ {"HPL Mux", "Audio Playback", "DACL"},
+ {"HPR Mux", "Audio Playback", "DACR"},
+ {"HPL Mux", "HP Impedance", "DACL"},
+ {"HPR Mux", "HP Impedance", "DACR"},
+ {"HPL Mux", "LoudSPK Playback", "DACL"},
+ {"HPR Mux", "LoudSPK Playback", "DACR"},
+
+ {"Headphone L", NULL, "HPL Mux"},
+ {"Headphone R", NULL, "HPR Mux"},
+ {"Headphone L Ext Spk Amp", NULL, "HPL Mux"},
+ {"Headphone R Ext Spk Amp", NULL, "HPR Mux"},
+ {"LINEOUT L HSSPK", NULL, "HPL Mux"},
+
+ /* Receiver Path */
+ {"RCV Mux", "Voice Playback", "DACL"},
+ {"Receiver", NULL, "RCV Mux"},
+};
+
+static int mt6358_codec_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *cmpnt = dai->component;
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int rate = params_rate(params);
+
+ dev_info(priv->dev, "%s(), substream->stream %d, rate %d, number %d\n",
+ __func__,
+ substream->stream,
+ rate,
+ substream->number);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ priv->dl_rate = rate;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ priv->ul_rate = rate;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mt6358_codec_dai_ops = {
+ .hw_params = mt6358_codec_dai_hw_params,
+};
+
+#define MT6358_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
+ SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE |\
+ SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE |\
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE |\
+ SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE)
+
+static struct snd_soc_dai_driver mt6358_dai_driver[] = {
+ {
+ .name = "mt6358-snd-codec-aif1",
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = MT6358_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_48000,
+ .formats = MT6358_FORMATS,
+ },
+ .ops = &mt6358_codec_dai_ops,
+ },
+};
+
+static int mt6358_codec_init_reg(struct mt6358_priv *priv)
+{
+ int ret = 0;
+
+ /* Disable HeadphoneL/HeadphoneR short circuit protection */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ RG_AUDHPLSCDISABLE_VAUDP15_MASK_SFT,
+ 0x1 << RG_AUDHPLSCDISABLE_VAUDP15_SFT);
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON0,
+ RG_AUDHPRSCDISABLE_VAUDP15_MASK_SFT,
+ 0x1 << RG_AUDHPRSCDISABLE_VAUDP15_SFT);
+ /* Disable voice short circuit protection */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON6,
+ RG_AUDHSSCDISABLE_VAUDP15_MASK_SFT,
+ 0x1 << RG_AUDHSSCDISABLE_VAUDP15_SFT);
+ /* disable LO buffer left short circuit protection */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON7,
+ RG_AUDLOLSCDISABLE_VAUDP15_MASK_SFT,
+ 0x1 << RG_AUDLOLSCDISABLE_VAUDP15_SFT);
+
+ /* accdet s/w enable */
+ regmap_update_bits(priv->regmap, MT6358_ACCDET_CON13,
+ 0xFFFF, 0x700E);
+
+ /* gpio miso driving set to 4mA */
+ regmap_write(priv->regmap, MT6358_DRV_CON3, 0x8888);
+
+ /* set gpio */
+ playback_gpio_reset(priv);
+ capture_gpio_reset(priv);
+
+ return ret;
+}
+
+static int mt6358_codec_probe(struct snd_soc_component *cmpnt)
+{
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(cmpnt);
+ int ret;
+
+ snd_soc_component_init_regmap(cmpnt, priv->regmap);
+
+ mt6358_codec_init_reg(priv);
+
+ priv->avdd_reg = devm_regulator_get(priv->dev, "Avdd");
+ if (IS_ERR(priv->avdd_reg)) {
+ dev_err(priv->dev, "%s() have no Avdd supply", __func__);
+ return PTR_ERR(priv->avdd_reg);
+ }
+
+ ret = regulator_enable(priv->avdd_reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver mt6358_soc_component_driver = {
+ .probe = mt6358_codec_probe,
+ .controls = mt6358_snd_controls,
+ .num_controls = ARRAY_SIZE(mt6358_snd_controls),
+ .dapm_widgets = mt6358_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt6358_dapm_widgets),
+ .dapm_routes = mt6358_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt6358_dapm_routes),
+};
+
+static int mt6358_platform_driver_probe(struct platform_device *pdev)
+{
+ struct mt6358_priv *priv;
+ struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct mt6358_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ priv->dev = &pdev->dev;
+
+ priv->regmap = mt6397->regmap;
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ dev_info(priv->dev, "%s(), dev name %s\n",
+ __func__, dev_name(&pdev->dev));
+
+ return devm_snd_soc_register_component(&pdev->dev,
+ &mt6358_soc_component_driver,
+ mt6358_dai_driver,
+ ARRAY_SIZE(mt6358_dai_driver));
+}
+
+static const struct of_device_id mt6358_of_match[] = {
+ {.compatible = "mediatek,mt6358-sound",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6358_of_match);
+
+static struct platform_driver mt6358_platform_driver = {
+ .driver = {
+ .name = "mt6358-sound",
+ .of_match_table = mt6358_of_match,
+ },
+ .probe = mt6358_platform_driver_probe,
+};
+
+module_platform_driver(mt6358_platform_driver)
+
+/* Module information */
+MODULE_DESCRIPTION("MT6358 ALSA SoC codec driver");
+MODULE_AUTHOR("KaiChieh Chuang <kaichieh.chuang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/mt6358.h b/sound/soc/codecs/mt6358.h
new file mode 100644
index 000000000000..a5953315eaa2
--- /dev/null
+++ b/sound/soc/codecs/mt6358.h
@@ -0,0 +1,2314 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt6358.h -- mt6358 ALSA SoC audio codec driver
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef __MT6358_H__
+#define __MT6358_H__
+
+/* Reg bit define */
+/* MT6358_DCXO_CW14 */
+#define RG_XO_AUDIO_EN_M_SFT 13
+
+/* MT6358_DCXO_CW13 */
+#define RG_XO_VOW_EN_SFT 8
+
+/* MT6358_AUD_TOP_CKPDN_CON0 */
+#define RG_VOW13M_CK_PDN_SFT 13
+#define RG_VOW13M_CK_PDN_MASK 0x1
+#define RG_VOW13M_CK_PDN_MASK_SFT (0x1 << 13)
+#define RG_VOW32K_CK_PDN_SFT 12
+#define RG_VOW32K_CK_PDN_MASK 0x1
+#define RG_VOW32K_CK_PDN_MASK_SFT (0x1 << 12)
+#define RG_AUD_INTRP_CK_PDN_SFT 8
+#define RG_AUD_INTRP_CK_PDN_MASK 0x1
+#define RG_AUD_INTRP_CK_PDN_MASK_SFT (0x1 << 8)
+#define RG_PAD_AUD_CLK_MISO_CK_PDN_SFT 7
+#define RG_PAD_AUD_CLK_MISO_CK_PDN_MASK 0x1
+#define RG_PAD_AUD_CLK_MISO_CK_PDN_MASK_SFT (0x1 << 7)
+#define RG_AUDNCP_CK_PDN_SFT 6
+#define RG_AUDNCP_CK_PDN_MASK 0x1
+#define RG_AUDNCP_CK_PDN_MASK_SFT (0x1 << 6)
+#define RG_ZCD13M_CK_PDN_SFT 5
+#define RG_ZCD13M_CK_PDN_MASK 0x1
+#define RG_ZCD13M_CK_PDN_MASK_SFT (0x1 << 5)
+#define RG_AUDIF_CK_PDN_SFT 2
+#define RG_AUDIF_CK_PDN_MASK 0x1
+#define RG_AUDIF_CK_PDN_MASK_SFT (0x1 << 2)
+#define RG_AUD_CK_PDN_SFT 1
+#define RG_AUD_CK_PDN_MASK 0x1
+#define RG_AUD_CK_PDN_MASK_SFT (0x1 << 1)
+#define RG_ACCDET_CK_PDN_SFT 0
+#define RG_ACCDET_CK_PDN_MASK 0x1
+#define RG_ACCDET_CK_PDN_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUD_TOP_CKPDN_CON0_SET */
+#define RG_AUD_TOP_CKPDN_CON0_SET_SFT 0
+#define RG_AUD_TOP_CKPDN_CON0_SET_MASK 0x3fff
+#define RG_AUD_TOP_CKPDN_CON0_SET_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AUD_TOP_CKPDN_CON0_CLR */
+#define RG_AUD_TOP_CKPDN_CON0_CLR_SFT 0
+#define RG_AUD_TOP_CKPDN_CON0_CLR_MASK 0x3fff
+#define RG_AUD_TOP_CKPDN_CON0_CLR_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AUD_TOP_CKSEL_CON0 */
+#define RG_AUDIF_CK_CKSEL_SFT 3
+#define RG_AUDIF_CK_CKSEL_MASK 0x1
+#define RG_AUDIF_CK_CKSEL_MASK_SFT (0x1 << 3)
+#define RG_AUD_CK_CKSEL_SFT 2
+#define RG_AUD_CK_CKSEL_MASK 0x1
+#define RG_AUD_CK_CKSEL_MASK_SFT (0x1 << 2)
+
+/* MT6358_AUD_TOP_CKSEL_CON0_SET */
+#define RG_AUD_TOP_CKSEL_CON0_SET_SFT 0
+#define RG_AUD_TOP_CKSEL_CON0_SET_MASK 0xf
+#define RG_AUD_TOP_CKSEL_CON0_SET_MASK_SFT (0xf << 0)
+
+/* MT6358_AUD_TOP_CKSEL_CON0_CLR */
+#define RG_AUD_TOP_CKSEL_CON0_CLR_SFT 0
+#define RG_AUD_TOP_CKSEL_CON0_CLR_MASK 0xf
+#define RG_AUD_TOP_CKSEL_CON0_CLR_MASK_SFT (0xf << 0)
+
+/* MT6358_AUD_TOP_CKTST_CON0 */
+#define RG_VOW13M_CK_TSTSEL_SFT 9
+#define RG_VOW13M_CK_TSTSEL_MASK 0x1
+#define RG_VOW13M_CK_TSTSEL_MASK_SFT (0x1 << 9)
+#define RG_VOW13M_CK_TST_DIS_SFT 8
+#define RG_VOW13M_CK_TST_DIS_MASK 0x1
+#define RG_VOW13M_CK_TST_DIS_MASK_SFT (0x1 << 8)
+#define RG_AUD26M_CK_TSTSEL_SFT 4
+#define RG_AUD26M_CK_TSTSEL_MASK 0x1
+#define RG_AUD26M_CK_TSTSEL_MASK_SFT (0x1 << 4)
+#define RG_AUDIF_CK_TSTSEL_SFT 3
+#define RG_AUDIF_CK_TSTSEL_MASK 0x1
+#define RG_AUDIF_CK_TSTSEL_MASK_SFT (0x1 << 3)
+#define RG_AUD_CK_TSTSEL_SFT 2
+#define RG_AUD_CK_TSTSEL_MASK 0x1
+#define RG_AUD_CK_TSTSEL_MASK_SFT (0x1 << 2)
+#define RG_AUD26M_CK_TST_DIS_SFT 0
+#define RG_AUD26M_CK_TST_DIS_MASK 0x1
+#define RG_AUD26M_CK_TST_DIS_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUD_TOP_CLK_HWEN_CON0 */
+#define RG_AUD_INTRP_CK_PDN_HWEN_SFT 0
+#define RG_AUD_INTRP_CK_PDN_HWEN_MASK 0x1
+#define RG_AUD_INTRP_CK_PDN_HWEN_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUD_TOP_CLK_HWEN_CON0_SET */
+#define RG_AUD_INTRP_CK_PND_HWEN_CON0_SET_SFT 0
+#define RG_AUD_INTRP_CK_PND_HWEN_CON0_SET_MASK 0xffff
+#define RG_AUD_INTRP_CK_PND_HWEN_CON0_SET_MASK_SFT (0xffff << 0)
+
+/* MT6358_AUD_TOP_CLK_HWEN_CON0_CLR */
+#define RG_AUD_INTRP_CLK_PDN_HWEN_CON0_CLR_SFT 0
+#define RG_AUD_INTRP_CLK_PDN_HWEN_CON0_CLR_MASK 0xffff
+#define RG_AUD_INTRP_CLK_PDN_HWEN_CON0_CLR_MASK_SFT (0xffff << 0)
+
+/* MT6358_AUD_TOP_RST_CON0 */
+#define RG_AUDNCP_RST_SFT 3
+#define RG_AUDNCP_RST_MASK 0x1
+#define RG_AUDNCP_RST_MASK_SFT (0x1 << 3)
+#define RG_ZCD_RST_SFT 2
+#define RG_ZCD_RST_MASK 0x1
+#define RG_ZCD_RST_MASK_SFT (0x1 << 2)
+#define RG_ACCDET_RST_SFT 1
+#define RG_ACCDET_RST_MASK 0x1
+#define RG_ACCDET_RST_MASK_SFT (0x1 << 1)
+#define RG_AUDIO_RST_SFT 0
+#define RG_AUDIO_RST_MASK 0x1
+#define RG_AUDIO_RST_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUD_TOP_RST_CON0_SET */
+#define RG_AUD_TOP_RST_CON0_SET_SFT 0
+#define RG_AUD_TOP_RST_CON0_SET_MASK 0xf
+#define RG_AUD_TOP_RST_CON0_SET_MASK_SFT (0xf << 0)
+
+/* MT6358_AUD_TOP_RST_CON0_CLR */
+#define RG_AUD_TOP_RST_CON0_CLR_SFT 0
+#define RG_AUD_TOP_RST_CON0_CLR_MASK 0xf
+#define RG_AUD_TOP_RST_CON0_CLR_MASK_SFT (0xf << 0)
+
+/* MT6358_AUD_TOP_RST_BANK_CON0 */
+#define BANK_AUDZCD_SWRST_SFT 2
+#define BANK_AUDZCD_SWRST_MASK 0x1
+#define BANK_AUDZCD_SWRST_MASK_SFT (0x1 << 2)
+#define BANK_AUDIO_SWRST_SFT 1
+#define BANK_AUDIO_SWRST_MASK 0x1
+#define BANK_AUDIO_SWRST_MASK_SFT (0x1 << 1)
+#define BANK_ACCDET_SWRST_SFT 0
+#define BANK_ACCDET_SWRST_MASK 0x1
+#define BANK_ACCDET_SWRST_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUD_TOP_INT_CON0 */
+#define RG_INT_EN_AUDIO_SFT 0
+#define RG_INT_EN_AUDIO_MASK 0x1
+#define RG_INT_EN_AUDIO_MASK_SFT (0x1 << 0)
+#define RG_INT_EN_ACCDET_SFT 5
+#define RG_INT_EN_ACCDET_MASK 0x1
+#define RG_INT_EN_ACCDET_MASK_SFT (0x1 << 5)
+#define RG_INT_EN_ACCDET_EINT0_SFT 6
+#define RG_INT_EN_ACCDET_EINT0_MASK 0x1
+#define RG_INT_EN_ACCDET_EINT0_MASK_SFT (0x1 << 6)
+#define RG_INT_EN_ACCDET_EINT1_SFT 7
+#define RG_INT_EN_ACCDET_EINT1_MASK 0x1
+#define RG_INT_EN_ACCDET_EINT1_MASK_SFT (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_CON0_SET */
+#define RG_AUD_INT_CON0_SET_SFT 0
+#define RG_AUD_INT_CON0_SET_MASK 0xffff
+#define RG_AUD_INT_CON0_SET_MASK_SFT (0xffff << 0)
+
+/* MT6358_AUD_TOP_INT_CON0_CLR */
+#define RG_AUD_INT_CON0_CLR_SFT 0
+#define RG_AUD_INT_CON0_CLR_MASK 0xffff
+#define RG_AUD_INT_CON0_CLR_MASK_SFT (0xffff << 0)
+
+/* MT6358_AUD_TOP_INT_MASK_CON0 */
+#define RG_INT_MASK_AUDIO_SFT 0
+#define RG_INT_MASK_AUDIO_MASK 0x1
+#define RG_INT_MASK_AUDIO_MASK_SFT (0x1 << 0)
+#define RG_INT_MASK_ACCDET_SFT 5
+#define RG_INT_MASK_ACCDET_MASK 0x1
+#define RG_INT_MASK_ACCDET_MASK_SFT (0x1 << 5)
+#define RG_INT_MASK_ACCDET_EINT0_SFT 6
+#define RG_INT_MASK_ACCDET_EINT0_MASK 0x1
+#define RG_INT_MASK_ACCDET_EINT0_MASK_SFT (0x1 << 6)
+#define RG_INT_MASK_ACCDET_EINT1_SFT 7
+#define RG_INT_MASK_ACCDET_EINT1_MASK 0x1
+#define RG_INT_MASK_ACCDET_EINT1_MASK_SFT (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_MASK_CON0_SET */
+#define RG_AUD_INT_MASK_CON0_SET_SFT 0
+#define RG_AUD_INT_MASK_CON0_SET_MASK 0xff
+#define RG_AUD_INT_MASK_CON0_SET_MASK_SFT (0xff << 0)
+
+/* MT6358_AUD_TOP_INT_MASK_CON0_CLR */
+#define RG_AUD_INT_MASK_CON0_CLR_SFT 0
+#define RG_AUD_INT_MASK_CON0_CLR_MASK 0xff
+#define RG_AUD_INT_MASK_CON0_CLR_MASK_SFT (0xff << 0)
+
+/* MT6358_AUD_TOP_INT_STATUS0 */
+#define RG_INT_STATUS_AUDIO_SFT 0
+#define RG_INT_STATUS_AUDIO_MASK 0x1
+#define RG_INT_STATUS_AUDIO_MASK_SFT (0x1 << 0)
+#define RG_INT_STATUS_ACCDET_SFT 5
+#define RG_INT_STATUS_ACCDET_MASK 0x1
+#define RG_INT_STATUS_ACCDET_MASK_SFT (0x1 << 5)
+#define RG_INT_STATUS_ACCDET_EINT0_SFT 6
+#define RG_INT_STATUS_ACCDET_EINT0_MASK 0x1
+#define RG_INT_STATUS_ACCDET_EINT0_MASK_SFT (0x1 << 6)
+#define RG_INT_STATUS_ACCDET_EINT1_SFT 7
+#define RG_INT_STATUS_ACCDET_EINT1_MASK 0x1
+#define RG_INT_STATUS_ACCDET_EINT1_MASK_SFT (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_RAW_STATUS0 */
+#define RG_INT_RAW_STATUS_AUDIO_SFT 0
+#define RG_INT_RAW_STATUS_AUDIO_MASK 0x1
+#define RG_INT_RAW_STATUS_AUDIO_MASK_SFT (0x1 << 0)
+#define RG_INT_RAW_STATUS_ACCDET_SFT 5
+#define RG_INT_RAW_STATUS_ACCDET_MASK 0x1
+#define RG_INT_RAW_STATUS_ACCDET_MASK_SFT (0x1 << 5)
+#define RG_INT_RAW_STATUS_ACCDET_EINT0_SFT 6
+#define RG_INT_RAW_STATUS_ACCDET_EINT0_MASK 0x1
+#define RG_INT_RAW_STATUS_ACCDET_EINT0_MASK_SFT (0x1 << 6)
+#define RG_INT_RAW_STATUS_ACCDET_EINT1_SFT 7
+#define RG_INT_RAW_STATUS_ACCDET_EINT1_MASK 0x1
+#define RG_INT_RAW_STATUS_ACCDET_EINT1_MASK_SFT (0x1 << 7)
+
+/* MT6358_AUD_TOP_INT_MISC_CON0 */
+#define RG_AUD_TOP_INT_POLARITY_SFT 0
+#define RG_AUD_TOP_INT_POLARITY_MASK 0x1
+#define RG_AUD_TOP_INT_POLARITY_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON0 */
+#define RG_DIVCKS_CHG_SFT 0
+#define RG_DIVCKS_CHG_MASK 0x1
+#define RG_DIVCKS_CHG_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON1 */
+#define RG_DIVCKS_ON_SFT 0
+#define RG_DIVCKS_ON_MASK 0x1
+#define RG_DIVCKS_ON_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON2 */
+#define RG_DIVCKS_PRG_SFT 0
+#define RG_DIVCKS_PRG_MASK 0x1ff
+#define RG_DIVCKS_PRG_MASK_SFT (0x1ff << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON3 */
+#define RG_DIVCKS_PWD_NCP_SFT 0
+#define RG_DIVCKS_PWD_NCP_MASK 0x1
+#define RG_DIVCKS_PWD_NCP_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUDNCP_CLKDIV_CON4 */
+#define RG_DIVCKS_PWD_NCP_ST_SEL_SFT 0
+#define RG_DIVCKS_PWD_NCP_ST_SEL_MASK 0x3
+#define RG_DIVCKS_PWD_NCP_ST_SEL_MASK_SFT (0x3 << 0)
+
+/* MT6358_AUD_TOP_MON_CON0 */
+#define RG_AUD_TOP_MON_SEL_SFT 0
+#define RG_AUD_TOP_MON_SEL_MASK 0x7
+#define RG_AUD_TOP_MON_SEL_MASK_SFT (0x7 << 0)
+#define RG_AUD_CLK_INT_MON_FLAG_SEL_SFT 3
+#define RG_AUD_CLK_INT_MON_FLAG_SEL_MASK 0xff
+#define RG_AUD_CLK_INT_MON_FLAG_SEL_MASK_SFT (0xff << 3)
+#define RG_AUD_CLK_INT_MON_FLAG_EN_SFT 11
+#define RG_AUD_CLK_INT_MON_FLAG_EN_MASK 0x1
+#define RG_AUD_CLK_INT_MON_FLAG_EN_MASK_SFT (0x1 << 11)
+
+/* MT6358_AUDIO_DIG_DSN_ID */
+#define AUDIO_DIG_ANA_ID_SFT 0
+#define AUDIO_DIG_ANA_ID_MASK 0xff
+#define AUDIO_DIG_ANA_ID_MASK_SFT (0xff << 0)
+#define AUDIO_DIG_DIG_ID_SFT 8
+#define AUDIO_DIG_DIG_ID_MASK 0xff
+#define AUDIO_DIG_DIG_ID_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDIO_DIG_DSN_REV0 */
+#define AUDIO_DIG_ANA_MINOR_REV_SFT 0
+#define AUDIO_DIG_ANA_MINOR_REV_MASK 0xf
+#define AUDIO_DIG_ANA_MINOR_REV_MASK_SFT (0xf << 0)
+#define AUDIO_DIG_ANA_MAJOR_REV_SFT 4
+#define AUDIO_DIG_ANA_MAJOR_REV_MASK 0xf
+#define AUDIO_DIG_ANA_MAJOR_REV_MASK_SFT (0xf << 4)
+#define AUDIO_DIG_DIG_MINOR_REV_SFT 8
+#define AUDIO_DIG_DIG_MINOR_REV_MASK 0xf
+#define AUDIO_DIG_DIG_MINOR_REV_MASK_SFT (0xf << 8)
+#define AUDIO_DIG_DIG_MAJOR_REV_SFT 12
+#define AUDIO_DIG_DIG_MAJOR_REV_MASK 0xf
+#define AUDIO_DIG_DIG_MAJOR_REV_MASK_SFT (0xf << 12)
+
+/* MT6358_AUDIO_DIG_DSN_DBI */
+#define AUDIO_DIG_DSN_CBS_SFT 0
+#define AUDIO_DIG_DSN_CBS_MASK 0x3
+#define AUDIO_DIG_DSN_CBS_MASK_SFT (0x3 << 0)
+#define AUDIO_DIG_DSN_BIX_SFT 2
+#define AUDIO_DIG_DSN_BIX_MASK 0x3
+#define AUDIO_DIG_DSN_BIX_MASK_SFT (0x3 << 2)
+#define AUDIO_DIG_ESP_SFT 8
+#define AUDIO_DIG_ESP_MASK 0xff
+#define AUDIO_DIG_ESP_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDIO_DIG_DSN_DXI */
+#define AUDIO_DIG_DSN_FPI_SFT 0
+#define AUDIO_DIG_DSN_FPI_MASK 0xff
+#define AUDIO_DIG_DSN_FPI_MASK_SFT (0xff << 0)
+
+/* MT6358_AFE_UL_DL_CON0 */
+#define AFE_UL_LR_SWAP_SFT 15
+#define AFE_UL_LR_SWAP_MASK 0x1
+#define AFE_UL_LR_SWAP_MASK_SFT (0x1 << 15)
+#define AFE_DL_LR_SWAP_SFT 14
+#define AFE_DL_LR_SWAP_MASK 0x1
+#define AFE_DL_LR_SWAP_MASK_SFT (0x1 << 14)
+#define AFE_ON_SFT 0
+#define AFE_ON_MASK 0x1
+#define AFE_ON_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_DL_SRC2_CON0_L */
+#define DL_2_SRC_ON_TMP_CTL_PRE_SFT 0
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK 0x1
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_UL_SRC_CON0_H */
+#define C_DIGMIC_PHASE_SEL_CH1_CTL_SFT 11
+#define C_DIGMIC_PHASE_SEL_CH1_CTL_MASK 0x7
+#define C_DIGMIC_PHASE_SEL_CH1_CTL_MASK_SFT (0x7 << 11)
+#define C_DIGMIC_PHASE_SEL_CH2_CTL_SFT 8
+#define C_DIGMIC_PHASE_SEL_CH2_CTL_MASK 0x7
+#define C_DIGMIC_PHASE_SEL_CH2_CTL_MASK_SFT (0x7 << 8)
+#define C_TWO_DIGITAL_MIC_CTL_SFT 7
+#define C_TWO_DIGITAL_MIC_CTL_MASK 0x1
+#define C_TWO_DIGITAL_MIC_CTL_MASK_SFT (0x1 << 7)
+
+/* MT6358_AFE_UL_SRC_CON0_L */
+#define DMIC_LOW_POWER_MODE_CTL_SFT 14
+#define DMIC_LOW_POWER_MODE_CTL_MASK 0x3
+#define DMIC_LOW_POWER_MODE_CTL_MASK_SFT (0x3 << 14)
+#define DIGMIC_3P25M_1P625M_SEL_CTL_SFT 5
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK 0x1
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT (0x1 << 5)
+#define UL_LOOP_BACK_MODE_CTL_SFT 2
+#define UL_LOOP_BACK_MODE_CTL_MASK 0x1
+#define UL_LOOP_BACK_MODE_CTL_MASK_SFT (0x1 << 2)
+#define UL_SDM_3_LEVEL_CTL_SFT 1
+#define UL_SDM_3_LEVEL_CTL_MASK 0x1
+#define UL_SDM_3_LEVEL_CTL_MASK_SFT (0x1 << 1)
+#define UL_SRC_ON_TMP_CTL_SFT 0
+#define UL_SRC_ON_TMP_CTL_MASK 0x1
+#define UL_SRC_ON_TMP_CTL_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_TOP_CON0 */
+#define MTKAIF_SINE_ON_SFT 2
+#define MTKAIF_SINE_ON_MASK 0x1
+#define MTKAIF_SINE_ON_MASK_SFT (0x1 << 2)
+#define UL_SINE_ON_SFT 1
+#define UL_SINE_ON_MASK 0x1
+#define UL_SINE_ON_MASK_SFT (0x1 << 1)
+#define DL_SINE_ON_SFT 0
+#define DL_SINE_ON_MASK 0x1
+#define DL_SINE_ON_MASK_SFT (0x1 << 0)
+
+/* MT6358_AUDIO_TOP_CON0 */
+#define PDN_AFE_CTL_SFT 7
+#define PDN_AFE_CTL_MASK 0x1
+#define PDN_AFE_CTL_MASK_SFT (0x1 << 7)
+#define PDN_DAC_CTL_SFT 6
+#define PDN_DAC_CTL_MASK 0x1
+#define PDN_DAC_CTL_MASK_SFT (0x1 << 6)
+#define PDN_ADC_CTL_SFT 5
+#define PDN_ADC_CTL_MASK 0x1
+#define PDN_ADC_CTL_MASK_SFT (0x1 << 5)
+#define PDN_I2S_DL_CTL_SFT 3
+#define PDN_I2S_DL_CTL_MASK 0x1
+#define PDN_I2S_DL_CTL_MASK_SFT (0x1 << 3)
+#define PWR_CLK_DIS_CTL_SFT 2
+#define PWR_CLK_DIS_CTL_MASK 0x1
+#define PWR_CLK_DIS_CTL_MASK_SFT (0x1 << 2)
+#define PDN_AFE_TESTMODEL_CTL_SFT 1
+#define PDN_AFE_TESTMODEL_CTL_MASK 0x1
+#define PDN_AFE_TESTMODEL_CTL_MASK_SFT (0x1 << 1)
+#define PDN_RESERVED_SFT 0
+#define PDN_RESERVED_MASK 0x1
+#define PDN_RESERVED_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_MON_DEBUG0 */
+#define AUDIO_SYS_TOP_MON_SWAP_SFT 14
+#define AUDIO_SYS_TOP_MON_SWAP_MASK 0x3
+#define AUDIO_SYS_TOP_MON_SWAP_MASK_SFT (0x3 << 14)
+#define AUDIO_SYS_TOP_MON_SEL_SFT 8
+#define AUDIO_SYS_TOP_MON_SEL_MASK 0x1f
+#define AUDIO_SYS_TOP_MON_SEL_MASK_SFT (0x1f << 8)
+#define AFE_MON_SEL_SFT 0
+#define AFE_MON_SEL_MASK 0xff
+#define AFE_MON_SEL_MASK_SFT (0xff << 0)
+
+/* MT6358_AFUNC_AUD_CON0 */
+#define CCI_AUD_ANACK_SEL_SFT 15
+#define CCI_AUD_ANACK_SEL_MASK 0x1
+#define CCI_AUD_ANACK_SEL_MASK_SFT (0x1 << 15)
+#define CCI_AUDIO_FIFO_WPTR_SFT 12
+#define CCI_AUDIO_FIFO_WPTR_MASK 0x7
+#define CCI_AUDIO_FIFO_WPTR_MASK_SFT (0x7 << 12)
+#define CCI_SCRAMBLER_CG_EN_SFT 11
+#define CCI_SCRAMBLER_CG_EN_MASK 0x1
+#define CCI_SCRAMBLER_CG_EN_MASK_SFT (0x1 << 11)
+#define CCI_LCH_INV_SFT 10
+#define CCI_LCH_INV_MASK 0x1
+#define CCI_LCH_INV_MASK_SFT (0x1 << 10)
+#define CCI_RAND_EN_SFT 9
+#define CCI_RAND_EN_MASK 0x1
+#define CCI_RAND_EN_MASK_SFT (0x1 << 9)
+#define CCI_SPLT_SCRMB_CLK_ON_SFT 8
+#define CCI_SPLT_SCRMB_CLK_ON_MASK 0x1
+#define CCI_SPLT_SCRMB_CLK_ON_MASK_SFT (0x1 << 8)
+#define CCI_SPLT_SCRMB_ON_SFT 7
+#define CCI_SPLT_SCRMB_ON_MASK 0x1
+#define CCI_SPLT_SCRMB_ON_MASK_SFT (0x1 << 7)
+#define CCI_AUD_IDAC_TEST_EN_SFT 6
+#define CCI_AUD_IDAC_TEST_EN_MASK 0x1
+#define CCI_AUD_IDAC_TEST_EN_MASK_SFT (0x1 << 6)
+#define CCI_ZERO_PAD_DISABLE_SFT 5
+#define CCI_ZERO_PAD_DISABLE_MASK 0x1
+#define CCI_ZERO_PAD_DISABLE_MASK_SFT (0x1 << 5)
+#define CCI_AUD_SPLIT_TEST_EN_SFT 4
+#define CCI_AUD_SPLIT_TEST_EN_MASK 0x1
+#define CCI_AUD_SPLIT_TEST_EN_MASK_SFT (0x1 << 4)
+#define CCI_AUD_SDM_MUTEL_SFT 3
+#define CCI_AUD_SDM_MUTEL_MASK 0x1
+#define CCI_AUD_SDM_MUTEL_MASK_SFT (0x1 << 3)
+#define CCI_AUD_SDM_MUTER_SFT 2
+#define CCI_AUD_SDM_MUTER_MASK 0x1
+#define CCI_AUD_SDM_MUTER_MASK_SFT (0x1 << 2)
+#define CCI_AUD_SDM_7BIT_SEL_SFT 1
+#define CCI_AUD_SDM_7BIT_SEL_MASK 0x1
+#define CCI_AUD_SDM_7BIT_SEL_MASK_SFT (0x1 << 1)
+#define CCI_SCRAMBLER_EN_SFT 0
+#define CCI_SCRAMBLER_EN_MASK 0x1
+#define CCI_SCRAMBLER_EN_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_CON1 */
+#define AUD_SDM_TEST_L_SFT 8
+#define AUD_SDM_TEST_L_MASK 0xff
+#define AUD_SDM_TEST_L_MASK_SFT (0xff << 8)
+#define AUD_SDM_TEST_R_SFT 0
+#define AUD_SDM_TEST_R_MASK 0xff
+#define AUD_SDM_TEST_R_MASK_SFT (0xff << 0)
+
+/* MT6358_AFUNC_AUD_CON2 */
+#define CCI_AUD_DAC_ANA_MUTE_SFT 7
+#define CCI_AUD_DAC_ANA_MUTE_MASK 0x1
+#define CCI_AUD_DAC_ANA_MUTE_MASK_SFT (0x1 << 7)
+#define CCI_AUD_DAC_ANA_RSTB_SEL_SFT 6
+#define CCI_AUD_DAC_ANA_RSTB_SEL_MASK 0x1
+#define CCI_AUD_DAC_ANA_RSTB_SEL_MASK_SFT (0x1 << 6)
+#define CCI_AUDIO_FIFO_CLKIN_INV_SFT 4
+#define CCI_AUDIO_FIFO_CLKIN_INV_MASK 0x1
+#define CCI_AUDIO_FIFO_CLKIN_INV_MASK_SFT (0x1 << 4)
+#define CCI_AUDIO_FIFO_ENABLE_SFT 3
+#define CCI_AUDIO_FIFO_ENABLE_MASK 0x1
+#define CCI_AUDIO_FIFO_ENABLE_MASK_SFT (0x1 << 3)
+#define CCI_ACD_MODE_SFT 2
+#define CCI_ACD_MODE_MASK 0x1
+#define CCI_ACD_MODE_MASK_SFT (0x1 << 2)
+#define CCI_AFIFO_CLK_PWDB_SFT 1
+#define CCI_AFIFO_CLK_PWDB_MASK 0x1
+#define CCI_AFIFO_CLK_PWDB_MASK_SFT (0x1 << 1)
+#define CCI_ACD_FUNC_RSTB_SFT 0
+#define CCI_ACD_FUNC_RSTB_MASK 0x1
+#define CCI_ACD_FUNC_RSTB_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_CON3 */
+#define SDM_ANA13M_TESTCK_SEL_SFT 15
+#define SDM_ANA13M_TESTCK_SEL_MASK 0x1
+#define SDM_ANA13M_TESTCK_SEL_MASK_SFT (0x1 << 15)
+#define SDM_ANA13M_TESTCK_SRC_SEL_SFT 12
+#define SDM_ANA13M_TESTCK_SRC_SEL_MASK 0x7
+#define SDM_ANA13M_TESTCK_SRC_SEL_MASK_SFT (0x7 << 12)
+#define SDM_TESTCK_SRC_SEL_SFT 8
+#define SDM_TESTCK_SRC_SEL_MASK 0x7
+#define SDM_TESTCK_SRC_SEL_MASK_SFT (0x7 << 8)
+#define DIGMIC_TESTCK_SRC_SEL_SFT 4
+#define DIGMIC_TESTCK_SRC_SEL_MASK 0x7
+#define DIGMIC_TESTCK_SRC_SEL_MASK_SFT (0x7 << 4)
+#define DIGMIC_TESTCK_SEL_SFT 0
+#define DIGMIC_TESTCK_SEL_MASK 0x1
+#define DIGMIC_TESTCK_SEL_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_CON4 */
+#define UL_FIFO_WCLK_INV_SFT 8
+#define UL_FIFO_WCLK_INV_MASK 0x1
+#define UL_FIFO_WCLK_INV_MASK_SFT (0x1 << 8)
+#define UL_FIFO_DIGMIC_WDATA_TESTSRC_SEL_SFT 6
+#define UL_FIFO_DIGMIC_WDATA_TESTSRC_SEL_MASK 0x1
+#define UL_FIFO_DIGMIC_WDATA_TESTSRC_SEL_MASK_SFT (0x1 << 6)
+#define UL_FIFO_WDATA_TESTEN_SFT 5
+#define UL_FIFO_WDATA_TESTEN_MASK 0x1
+#define UL_FIFO_WDATA_TESTEN_MASK_SFT (0x1 << 5)
+#define UL_FIFO_WDATA_TESTSRC_SEL_SFT 4
+#define UL_FIFO_WDATA_TESTSRC_SEL_MASK 0x1
+#define UL_FIFO_WDATA_TESTSRC_SEL_MASK_SFT (0x1 << 4)
+#define UL_FIFO_WCLK_6P5M_TESTCK_SEL_SFT 3
+#define UL_FIFO_WCLK_6P5M_TESTCK_SEL_MASK 0x1
+#define UL_FIFO_WCLK_6P5M_TESTCK_SEL_MASK_SFT (0x1 << 3)
+#define UL_FIFO_WCLK_6P5M_TESTCK_SRC_SEL_SFT 0
+#define UL_FIFO_WCLK_6P5M_TESTCK_SRC_SEL_MASK 0x7
+#define UL_FIFO_WCLK_6P5M_TESTCK_SRC_SEL_MASK_SFT (0x7 << 0)
+
+/* MT6358_AFUNC_AUD_CON5 */
+#define R_AUD_DAC_POS_LARGE_MONO_SFT 8
+#define R_AUD_DAC_POS_LARGE_MONO_MASK 0xff
+#define R_AUD_DAC_POS_LARGE_MONO_MASK_SFT (0xff << 8)
+#define R_AUD_DAC_NEG_LARGE_MONO_SFT 0
+#define R_AUD_DAC_NEG_LARGE_MONO_MASK 0xff
+#define R_AUD_DAC_NEG_LARGE_MONO_MASK_SFT (0xff << 0)
+
+/* MT6358_AFUNC_AUD_CON6 */
+#define R_AUD_DAC_POS_SMALL_MONO_SFT 12
+#define R_AUD_DAC_POS_SMALL_MONO_MASK 0xf
+#define R_AUD_DAC_POS_SMALL_MONO_MASK_SFT (0xf << 12)
+#define R_AUD_DAC_NEG_SMALL_MONO_SFT 8
+#define R_AUD_DAC_NEG_SMALL_MONO_MASK 0xf
+#define R_AUD_DAC_NEG_SMALL_MONO_MASK_SFT (0xf << 8)
+#define R_AUD_DAC_POS_TINY_MONO_SFT 6
+#define R_AUD_DAC_POS_TINY_MONO_MASK 0x3
+#define R_AUD_DAC_POS_TINY_MONO_MASK_SFT (0x3 << 6)
+#define R_AUD_DAC_NEG_TINY_MONO_SFT 4
+#define R_AUD_DAC_NEG_TINY_MONO_MASK 0x3
+#define R_AUD_DAC_NEG_TINY_MONO_MASK_SFT (0x3 << 4)
+#define R_AUD_DAC_MONO_SEL_SFT 3
+#define R_AUD_DAC_MONO_SEL_MASK 0x1
+#define R_AUD_DAC_MONO_SEL_MASK_SFT (0x1 << 3)
+#define R_AUD_DAC_SW_RSTB_SFT 0
+#define R_AUD_DAC_SW_RSTB_MASK 0x1
+#define R_AUD_DAC_SW_RSTB_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFUNC_AUD_MON0 */
+#define AUD_SCR_OUT_L_SFT 8
+#define AUD_SCR_OUT_L_MASK 0xff
+#define AUD_SCR_OUT_L_MASK_SFT (0xff << 8)
+#define AUD_SCR_OUT_R_SFT 0
+#define AUD_SCR_OUT_R_MASK 0xff
+#define AUD_SCR_OUT_R_MASK_SFT (0xff << 0)
+
+/* MT6358_AUDRC_TUNE_MON0 */
+#define ASYNC_TEST_OUT_BCK_SFT 15
+#define ASYNC_TEST_OUT_BCK_MASK 0x1
+#define ASYNC_TEST_OUT_BCK_MASK_SFT (0x1 << 15)
+#define RGS_AUDRCTUNE1READ_SFT 8
+#define RGS_AUDRCTUNE1READ_MASK 0x1f
+#define RGS_AUDRCTUNE1READ_MASK_SFT (0x1f << 8)
+#define RGS_AUDRCTUNE0READ_SFT 0
+#define RGS_AUDRCTUNE0READ_MASK 0x1f
+#define RGS_AUDRCTUNE0READ_MASK_SFT (0x1f << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_FIFO_CFG0 */
+#define AFE_RESERVED_SFT 1
+#define AFE_RESERVED_MASK 0x7fff
+#define AFE_RESERVED_MASK_SFT (0x7fff << 1)
+#define RG_MTKAIF_RXIF_FIFO_INTEN_SFT 0
+#define RG_MTKAIF_RXIF_FIFO_INTEN_MASK 0x1
+#define RG_MTKAIF_RXIF_FIFO_INTEN_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 */
+#define MTKAIF_RXIF_WR_FULL_STATUS_SFT 1
+#define MTKAIF_RXIF_WR_FULL_STATUS_MASK 0x1
+#define MTKAIF_RXIF_WR_FULL_STATUS_MASK_SFT (0x1 << 1)
+#define MTKAIF_RXIF_RD_EMPTY_STATUS_SFT 0
+#define MTKAIF_RXIF_RD_EMPTY_STATUS_MASK 0x1
+#define MTKAIF_RXIF_RD_EMPTY_STATUS_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON0 */
+#define MTKAIFTX_V3_SYNC_OUT_SFT 14
+#define MTKAIFTX_V3_SYNC_OUT_MASK 0x1
+#define MTKAIFTX_V3_SYNC_OUT_MASK_SFT (0x1 << 14)
+#define MTKAIFTX_V3_SDATA_OUT2_SFT 13
+#define MTKAIFTX_V3_SDATA_OUT2_MASK 0x1
+#define MTKAIFTX_V3_SDATA_OUT2_MASK_SFT (0x1 << 13)
+#define MTKAIFTX_V3_SDATA_OUT1_SFT 12
+#define MTKAIFTX_V3_SDATA_OUT1_MASK 0x1
+#define MTKAIFTX_V3_SDATA_OUT1_MASK_SFT (0x1 << 12)
+#define MTKAIF_RXIF_FIFO_STATUS_SFT 0
+#define MTKAIF_RXIF_FIFO_STATUS_MASK 0xfff
+#define MTKAIF_RXIF_FIFO_STATUS_MASK_SFT (0xfff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON1 */
+#define MTKAIFRX_V3_SYNC_IN_SFT 14
+#define MTKAIFRX_V3_SYNC_IN_MASK 0x1
+#define MTKAIFRX_V3_SYNC_IN_MASK_SFT (0x1 << 14)
+#define MTKAIFRX_V3_SDATA_IN2_SFT 13
+#define MTKAIFRX_V3_SDATA_IN2_MASK 0x1
+#define MTKAIFRX_V3_SDATA_IN2_MASK_SFT (0x1 << 13)
+#define MTKAIFRX_V3_SDATA_IN1_SFT 12
+#define MTKAIFRX_V3_SDATA_IN1_MASK 0x1
+#define MTKAIFRX_V3_SDATA_IN1_MASK_SFT (0x1 << 12)
+#define MTKAIF_RXIF_SEARCH_FAIL_FLAG_SFT 11
+#define MTKAIF_RXIF_SEARCH_FAIL_FLAG_MASK 0x1
+#define MTKAIF_RXIF_SEARCH_FAIL_FLAG_MASK_SFT (0x1 << 11)
+#define MTKAIF_RXIF_INVALID_FLAG_SFT 8
+#define MTKAIF_RXIF_INVALID_FLAG_MASK 0x1
+#define MTKAIF_RXIF_INVALID_FLAG_MASK_SFT (0x1 << 8)
+#define MTKAIF_RXIF_INVALID_CYCLE_SFT 0
+#define MTKAIF_RXIF_INVALID_CYCLE_MASK 0xff
+#define MTKAIF_RXIF_INVALID_CYCLE_MASK_SFT (0xff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON2 */
+#define MTKAIF_TXIF_IN_CH2_SFT 8
+#define MTKAIF_TXIF_IN_CH2_MASK 0xff
+#define MTKAIF_TXIF_IN_CH2_MASK_SFT (0xff << 8)
+#define MTKAIF_TXIF_IN_CH1_SFT 0
+#define MTKAIF_TXIF_IN_CH1_MASK 0xff
+#define MTKAIF_TXIF_IN_CH1_MASK_SFT (0xff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_MON3 */
+#define MTKAIF_RXIF_OUT_CH2_SFT 8
+#define MTKAIF_RXIF_OUT_CH2_MASK 0xff
+#define MTKAIF_RXIF_OUT_CH2_MASK_SFT (0xff << 8)
+#define MTKAIF_RXIF_OUT_CH1_SFT 0
+#define MTKAIF_RXIF_OUT_CH1_MASK 0xff
+#define MTKAIF_RXIF_OUT_CH1_MASK_SFT (0xff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_CFG0 */
+#define RG_MTKAIF_RXIF_CLKINV_SFT 15
+#define RG_MTKAIF_RXIF_CLKINV_MASK 0x1
+#define RG_MTKAIF_RXIF_CLKINV_MASK_SFT (0x1 << 15)
+#define RG_MTKAIF_RXIF_PROTOCOL2_SFT 8
+#define RG_MTKAIF_RXIF_PROTOCOL2_MASK 0x1
+#define RG_MTKAIF_RXIF_PROTOCOL2_MASK_SFT (0x1 << 8)
+#define RG_MTKAIF_BYPASS_SRC_MODE_SFT 6
+#define RG_MTKAIF_BYPASS_SRC_MODE_MASK 0x3
+#define RG_MTKAIF_BYPASS_SRC_MODE_MASK_SFT (0x3 << 6)
+#define RG_MTKAIF_BYPASS_SRC_TEST_SFT 5
+#define RG_MTKAIF_BYPASS_SRC_TEST_MASK 0x1
+#define RG_MTKAIF_BYPASS_SRC_TEST_MASK_SFT (0x1 << 5)
+#define RG_MTKAIF_TXIF_PROTOCOL2_SFT 4
+#define RG_MTKAIF_TXIF_PROTOCOL2_MASK 0x1
+#define RG_MTKAIF_TXIF_PROTOCOL2_MASK_SFT (0x1 << 4)
+#define RG_MTKAIF_PMIC_TXIF_8TO5_SFT 2
+#define RG_MTKAIF_PMIC_TXIF_8TO5_MASK 0x1
+#define RG_MTKAIF_PMIC_TXIF_8TO5_MASK_SFT (0x1 << 2)
+#define RG_MTKAIF_LOOPBACK_TEST2_SFT 1
+#define RG_MTKAIF_LOOPBACK_TEST2_MASK 0x1
+#define RG_MTKAIF_LOOPBACK_TEST2_MASK_SFT (0x1 << 1)
+#define RG_MTKAIF_LOOPBACK_TEST1_SFT 0
+#define RG_MTKAIF_LOOPBACK_TEST1_MASK 0x1
+#define RG_MTKAIF_LOOPBACK_TEST1_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG0 */
+#define RG_MTKAIF_RXIF_VOICE_MODE_SFT 12
+#define RG_MTKAIF_RXIF_VOICE_MODE_MASK 0xf
+#define RG_MTKAIF_RXIF_VOICE_MODE_MASK_SFT (0xf << 12)
+#define RG_MTKAIF_RXIF_DATA_BIT_SFT 8
+#define RG_MTKAIF_RXIF_DATA_BIT_MASK 0x7
+#define RG_MTKAIF_RXIF_DATA_BIT_MASK_SFT (0x7 << 8)
+#define RG_MTKAIF_RXIF_FIFO_RSP_SFT 4
+#define RG_MTKAIF_RXIF_FIFO_RSP_MASK 0x7
+#define RG_MTKAIF_RXIF_FIFO_RSP_MASK_SFT (0x7 << 4)
+#define RG_MTKAIF_RXIF_DETECT_ON_SFT 3
+#define RG_MTKAIF_RXIF_DETECT_ON_MASK 0x1
+#define RG_MTKAIF_RXIF_DETECT_ON_MASK_SFT (0x1 << 3)
+#define RG_MTKAIF_RXIF_DATA_MODE_SFT 0
+#define RG_MTKAIF_RXIF_DATA_MODE_MASK 0x1
+#define RG_MTKAIF_RXIF_DATA_MODE_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG1 */
+#define RG_MTKAIF_RXIF_SYNC_SEARCH_TABLE_SFT 12
+#define RG_MTKAIF_RXIF_SYNC_SEARCH_TABLE_MASK 0xf
+#define RG_MTKAIF_RXIF_SYNC_SEARCH_TABLE_MASK_SFT (0xf << 12)
+#define RG_MTKAIF_RXIF_INVALID_SYNC_CHECK_ROUND_SFT 8
+#define RG_MTKAIF_RXIF_INVALID_SYNC_CHECK_ROUND_MASK 0xf
+#define RG_MTKAIF_RXIF_INVALID_SYNC_CHECK_ROUND_MASK_SFT (0xf << 8)
+#define RG_MTKAIF_RXIF_SYNC_CHECK_ROUND_SFT 4
+#define RG_MTKAIF_RXIF_SYNC_CHECK_ROUND_MASK 0xf
+#define RG_MTKAIF_RXIF_SYNC_CHECK_ROUND_MASK_SFT (0xf << 4)
+#define RG_MTKAIF_RXIF_VOICE_MODE_PROTOCOL2_SFT 0
+#define RG_MTKAIF_RXIF_VOICE_MODE_PROTOCOL2_MASK 0xf
+#define RG_MTKAIF_RXIF_VOICE_MODE_PROTOCOL2_MASK_SFT (0xf << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG2 */
+#define RG_MTKAIF_RXIF_CLEAR_SYNC_FAIL_SFT 12
+#define RG_MTKAIF_RXIF_CLEAR_SYNC_FAIL_MASK 0x1
+#define RG_MTKAIF_RXIF_CLEAR_SYNC_FAIL_MASK_SFT (0x1 << 12)
+#define RG_MTKAIF_RXIF_SYNC_CNT_TABLE_SFT 0
+#define RG_MTKAIF_RXIF_SYNC_CNT_TABLE_MASK 0xfff
+#define RG_MTKAIF_RXIF_SYNC_CNT_TABLE_MASK_SFT (0xfff << 0)
+
+/* MT6358_AFE_ADDA_MTKAIF_RX_CFG3 */
+#define RG_MTKAIF_RXIF_LOOPBACK_USE_NLE_SFT 7
+#define RG_MTKAIF_RXIF_LOOPBACK_USE_NLE_MASK 0x1
+#define RG_MTKAIF_RXIF_LOOPBACK_USE_NLE_MASK_SFT (0x1 << 7)
+#define RG_MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_SFT 4
+#define RG_MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK 0x7
+#define RG_MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK_SFT (0x7 << 4)
+#define RG_MTKAIF_RXIF_DETECT_ON_PROTOCOL2_SFT 3
+#define RG_MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK 0x1
+#define RG_MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK_SFT (0x1 << 3)
+
+/* MT6358_AFE_ADDA_MTKAIF_TX_CFG1 */
+#define RG_MTKAIF_SYNC_WORD2_SFT 4
+#define RG_MTKAIF_SYNC_WORD2_MASK 0x7
+#define RG_MTKAIF_SYNC_WORD2_MASK_SFT (0x7 << 4)
+#define RG_MTKAIF_SYNC_WORD1_SFT 0
+#define RG_MTKAIF_SYNC_WORD1_MASK 0x7
+#define RG_MTKAIF_SYNC_WORD1_MASK_SFT (0x7 << 0)
+
+/* MT6358_AFE_SGEN_CFG0 */
+#define SGEN_AMP_DIV_CH1_CTL_SFT 12
+#define SGEN_AMP_DIV_CH1_CTL_MASK 0xf
+#define SGEN_AMP_DIV_CH1_CTL_MASK_SFT (0xf << 12)
+#define SGEN_DAC_EN_CTL_SFT 7
+#define SGEN_DAC_EN_CTL_MASK 0x1
+#define SGEN_DAC_EN_CTL_MASK_SFT (0x1 << 7)
+#define SGEN_MUTE_SW_CTL_SFT 6
+#define SGEN_MUTE_SW_CTL_MASK 0x1
+#define SGEN_MUTE_SW_CTL_MASK_SFT (0x1 << 6)
+#define R_AUD_SDM_MUTE_L_SFT 5
+#define R_AUD_SDM_MUTE_L_MASK 0x1
+#define R_AUD_SDM_MUTE_L_MASK_SFT (0x1 << 5)
+#define R_AUD_SDM_MUTE_R_SFT 4
+#define R_AUD_SDM_MUTE_R_MASK 0x1
+#define R_AUD_SDM_MUTE_R_MASK_SFT (0x1 << 4)
+
+/* MT6358_AFE_SGEN_CFG1 */
+#define C_SGEN_RCH_INV_5BIT_SFT 15
+#define C_SGEN_RCH_INV_5BIT_MASK 0x1
+#define C_SGEN_RCH_INV_5BIT_MASK_SFT (0x1 << 15)
+#define C_SGEN_RCH_INV_8BIT_SFT 14
+#define C_SGEN_RCH_INV_8BIT_MASK 0x1
+#define C_SGEN_RCH_INV_8BIT_MASK_SFT (0x1 << 14)
+#define SGEN_FREQ_DIV_CH1_CTL_SFT 0
+#define SGEN_FREQ_DIV_CH1_CTL_MASK 0x1f
+#define SGEN_FREQ_DIV_CH1_CTL_MASK_SFT (0x1f << 0)
+
+/* MT6358_AFE_ADC_ASYNC_FIFO_CFG */
+#define RG_UL_ASYNC_FIFO_SOFT_RST_EN_SFT 5
+#define RG_UL_ASYNC_FIFO_SOFT_RST_EN_MASK 0x1
+#define RG_UL_ASYNC_FIFO_SOFT_RST_EN_MASK_SFT (0x1 << 5)
+#define RG_UL_ASYNC_FIFO_SOFT_RST_SFT 4
+#define RG_UL_ASYNC_FIFO_SOFT_RST_MASK 0x1
+#define RG_UL_ASYNC_FIFO_SOFT_RST_MASK_SFT (0x1 << 4)
+#define RG_AMIC_UL_ADC_CLK_SEL_SFT 1
+#define RG_AMIC_UL_ADC_CLK_SEL_MASK 0x1
+#define RG_AMIC_UL_ADC_CLK_SEL_MASK_SFT (0x1 << 1)
+
+/* MT6358_AFE_DCCLK_CFG0 */
+#define DCCLK_DIV_SFT 5
+#define DCCLK_DIV_MASK 0x7ff
+#define DCCLK_DIV_MASK_SFT (0x7ff << 5)
+#define DCCLK_INV_SFT 4
+#define DCCLK_INV_MASK 0x1
+#define DCCLK_INV_MASK_SFT (0x1 << 4)
+#define DCCLK_PDN_SFT 1
+#define DCCLK_PDN_MASK 0x1
+#define DCCLK_PDN_MASK_SFT (0x1 << 1)
+#define DCCLK_GEN_ON_SFT 0
+#define DCCLK_GEN_ON_MASK 0x1
+#define DCCLK_GEN_ON_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_DCCLK_CFG1 */
+#define RESYNC_SRC_SEL_SFT 10
+#define RESYNC_SRC_SEL_MASK 0x3
+#define RESYNC_SRC_SEL_MASK_SFT (0x3 << 10)
+#define RESYNC_SRC_CK_INV_SFT 9
+#define RESYNC_SRC_CK_INV_MASK 0x1
+#define RESYNC_SRC_CK_INV_MASK_SFT (0x1 << 9)
+#define DCCLK_RESYNC_BYPASS_SFT 8
+#define DCCLK_RESYNC_BYPASS_MASK 0x1
+#define DCCLK_RESYNC_BYPASS_MASK_SFT (0x1 << 8)
+#define DCCLK_PHASE_SEL_SFT 4
+#define DCCLK_PHASE_SEL_MASK 0xf
+#define DCCLK_PHASE_SEL_MASK_SFT (0xf << 4)
+
+/* MT6358_AUDIO_DIG_CFG */
+#define RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_SFT 15
+#define RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK 0x1
+#define RG_AUD_PAD_TOP_DAT_MISO2_LOOPBACK_MASK_SFT (0x1 << 15)
+#define RG_AUD_PAD_TOP_PHASE_MODE2_SFT 8
+#define RG_AUD_PAD_TOP_PHASE_MODE2_MASK 0x7f
+#define RG_AUD_PAD_TOP_PHASE_MODE2_MASK_SFT (0x7f << 8)
+#define RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT 7
+#define RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK 0x1
+#define RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_MASK_SFT (0x1 << 7)
+#define RG_AUD_PAD_TOP_PHASE_MODE_SFT 0
+#define RG_AUD_PAD_TOP_PHASE_MODE_MASK 0x7f
+#define RG_AUD_PAD_TOP_PHASE_MODE_MASK_SFT (0x7f << 0)
+
+/* MT6358_AFE_AUD_PAD_TOP */
+#define RG_AUD_PAD_TOP_TX_FIFO_RSP_SFT 12
+#define RG_AUD_PAD_TOP_TX_FIFO_RSP_MASK 0x7
+#define RG_AUD_PAD_TOP_TX_FIFO_RSP_MASK_SFT (0x7 << 12)
+#define RG_AUD_PAD_TOP_MTKAIF_CLK_PROTOCOL2_SFT 11
+#define RG_AUD_PAD_TOP_MTKAIF_CLK_PROTOCOL2_MASK 0x1
+#define RG_AUD_PAD_TOP_MTKAIF_CLK_PROTOCOL2_MASK_SFT (0x1 << 11)
+#define RG_AUD_PAD_TOP_TX_FIFO_ON_SFT 8
+#define RG_AUD_PAD_TOP_TX_FIFO_ON_MASK 0x1
+#define RG_AUD_PAD_TOP_TX_FIFO_ON_MASK_SFT (0x1 << 8)
+
+/* MT6358_AFE_AUD_PAD_TOP_MON */
+#define ADDA_AUD_PAD_TOP_MON_SFT 0
+#define ADDA_AUD_PAD_TOP_MON_MASK 0xffff
+#define ADDA_AUD_PAD_TOP_MON_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_AUD_PAD_TOP_MON1 */
+#define ADDA_AUD_PAD_TOP_MON1_SFT 0
+#define ADDA_AUD_PAD_TOP_MON1_MASK 0xffff
+#define ADDA_AUD_PAD_TOP_MON1_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_DL_NLE_CFG */
+#define NLE_RCH_HPGAIN_SEL_SFT 10
+#define NLE_RCH_HPGAIN_SEL_MASK 0x1
+#define NLE_RCH_HPGAIN_SEL_MASK_SFT (0x1 << 10)
+#define NLE_RCH_CH_SEL_SFT 9
+#define NLE_RCH_CH_SEL_MASK 0x1
+#define NLE_RCH_CH_SEL_MASK_SFT (0x1 << 9)
+#define NLE_RCH_ON_SFT 8
+#define NLE_RCH_ON_MASK 0x1
+#define NLE_RCH_ON_MASK_SFT (0x1 << 8)
+#define NLE_LCH_HPGAIN_SEL_SFT 2
+#define NLE_LCH_HPGAIN_SEL_MASK 0x1
+#define NLE_LCH_HPGAIN_SEL_MASK_SFT (0x1 << 2)
+#define NLE_LCH_CH_SEL_SFT 1
+#define NLE_LCH_CH_SEL_MASK 0x1
+#define NLE_LCH_CH_SEL_MASK_SFT (0x1 << 1)
+#define NLE_LCH_ON_SFT 0
+#define NLE_LCH_ON_MASK 0x1
+#define NLE_LCH_ON_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_DL_NLE_MON */
+#define NLE_MONITOR_SFT 0
+#define NLE_MONITOR_MASK 0x3fff
+#define NLE_MONITOR_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_CG_EN_MON */
+#define CK_CG_EN_MON_SFT 0
+#define CK_CG_EN_MON_MASK 0x3f
+#define CK_CG_EN_MON_MASK_SFT (0x3f << 0)
+
+/* MT6358_AFE_VOW_TOP */
+#define PDN_VOW_SFT 15
+#define PDN_VOW_MASK 0x1
+#define PDN_VOW_MASK_SFT (0x1 << 15)
+#define VOW_1P6M_800K_SEL_SFT 14
+#define VOW_1P6M_800K_SEL_MASK 0x1
+#define VOW_1P6M_800K_SEL_MASK_SFT (0x1 << 14)
+#define VOW_DIGMIC_ON_SFT 13
+#define VOW_DIGMIC_ON_MASK 0x1
+#define VOW_DIGMIC_ON_MASK_SFT (0x1 << 13)
+#define VOW_CK_DIV_RST_SFT 12
+#define VOW_CK_DIV_RST_MASK 0x1
+#define VOW_CK_DIV_RST_MASK_SFT (0x1 << 12)
+#define VOW_ON_SFT 11
+#define VOW_ON_MASK 0x1
+#define VOW_ON_MASK_SFT (0x1 << 11)
+#define VOW_DIGMIC_CK_PHASE_SEL_SFT 8
+#define VOW_DIGMIC_CK_PHASE_SEL_MASK 0x7
+#define VOW_DIGMIC_CK_PHASE_SEL_MASK_SFT (0x7 << 8)
+#define MAIN_DMIC_CK_VOW_SEL_SFT 7
+#define MAIN_DMIC_CK_VOW_SEL_MASK 0x1
+#define MAIN_DMIC_CK_VOW_SEL_MASK_SFT (0x1 << 7)
+#define VOW_SDM_3_LEVEL_SFT 6
+#define VOW_SDM_3_LEVEL_MASK 0x1
+#define VOW_SDM_3_LEVEL_MASK_SFT (0x1 << 6)
+#define VOW_LOOP_BACK_MODE_SFT 5
+#define VOW_LOOP_BACK_MODE_MASK 0x1
+#define VOW_LOOP_BACK_MODE_MASK_SFT (0x1 << 5)
+#define VOW_INTR_SOURCE_SEL_SFT 4
+#define VOW_INTR_SOURCE_SEL_MASK 0x1
+#define VOW_INTR_SOURCE_SEL_MASK_SFT (0x1 << 4)
+#define VOW_INTR_CLR_SFT 3
+#define VOW_INTR_CLR_MASK 0x1
+#define VOW_INTR_CLR_MASK_SFT (0x1 << 3)
+#define S_N_VALUE_RST_SFT 2
+#define S_N_VALUE_RST_MASK 0x1
+#define S_N_VALUE_RST_MASK_SFT (0x1 << 2)
+#define SAMPLE_BASE_MODE_SFT 1
+#define SAMPLE_BASE_MODE_MASK 0x1
+#define SAMPLE_BASE_MODE_MASK_SFT (0x1 << 1)
+#define VOW_INTR_FLAG_SFT 0
+#define VOW_INTR_FLAG_MASK 0x1
+#define VOW_INTR_FLAG_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_VOW_CFG0 */
+#define AMPREF_SFT 0
+#define AMPREF_MASK 0xffff
+#define AMPREF_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_CFG1 */
+#define TIMERINI_SFT 0
+#define TIMERINI_MASK 0xffff
+#define TIMERINI_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_CFG2 */
+#define B_DEFAULT_SFT 12
+#define B_DEFAULT_MASK 0x7
+#define B_DEFAULT_MASK_SFT (0x7 << 12)
+#define A_DEFAULT_SFT 8
+#define A_DEFAULT_MASK 0x7
+#define A_DEFAULT_MASK_SFT (0x7 << 8)
+#define B_INI_SFT 4
+#define B_INI_MASK 0x7
+#define B_INI_MASK_SFT (0x7 << 4)
+#define A_INI_SFT 0
+#define A_INI_MASK 0x7
+#define A_INI_MASK_SFT (0x7 << 0)
+
+/* MT6358_AFE_VOW_CFG3 */
+#define K_BETA_RISE_SFT 12
+#define K_BETA_RISE_MASK 0xf
+#define K_BETA_RISE_MASK_SFT (0xf << 12)
+#define K_BETA_FALL_SFT 8
+#define K_BETA_FALL_MASK 0xf
+#define K_BETA_FALL_MASK_SFT (0xf << 8)
+#define K_ALPHA_RISE_SFT 4
+#define K_ALPHA_RISE_MASK 0xf
+#define K_ALPHA_RISE_MASK_SFT (0xf << 4)
+#define K_ALPHA_FALL_SFT 0
+#define K_ALPHA_FALL_MASK 0xf
+#define K_ALPHA_FALL_MASK_SFT (0xf << 0)
+
+/* MT6358_AFE_VOW_CFG4 */
+#define VOW_TXIF_SCK_INV_SFT 15
+#define VOW_TXIF_SCK_INV_MASK 0x1
+#define VOW_TXIF_SCK_INV_MASK_SFT (0x1 << 15)
+#define VOW_ADC_TESTCK_SRC_SEL_SFT 12
+#define VOW_ADC_TESTCK_SRC_SEL_MASK 0x7
+#define VOW_ADC_TESTCK_SRC_SEL_MASK_SFT (0x7 << 12)
+#define VOW_ADC_TESTCK_SEL_SFT 11
+#define VOW_ADC_TESTCK_SEL_MASK 0x1
+#define VOW_ADC_TESTCK_SEL_MASK_SFT (0x1 << 11)
+#define VOW_ADC_CLK_INV_SFT 10
+#define VOW_ADC_CLK_INV_MASK 0x1
+#define VOW_ADC_CLK_INV_MASK_SFT (0x1 << 10)
+#define VOW_TXIF_MONO_SFT 9
+#define VOW_TXIF_MONO_MASK 0x1
+#define VOW_TXIF_MONO_MASK_SFT (0x1 << 9)
+#define VOW_TXIF_SCK_DIV_SFT 4
+#define VOW_TXIF_SCK_DIV_MASK 0x1f
+#define VOW_TXIF_SCK_DIV_MASK_SFT (0x1f << 4)
+#define K_GAMMA_SFT 0
+#define K_GAMMA_MASK 0xf
+#define K_GAMMA_MASK_SFT (0xf << 0)
+
+/* MT6358_AFE_VOW_CFG5 */
+#define N_MIN_SFT 0
+#define N_MIN_MASK 0xffff
+#define N_MIN_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_CFG6 */
+#define RG_WINDOW_SIZE_SEL_SFT 12
+#define RG_WINDOW_SIZE_SEL_MASK 0x1
+#define RG_WINDOW_SIZE_SEL_MASK_SFT (0x1 << 12)
+#define RG_FLR_BYPASS_SFT 11
+#define RG_FLR_BYPASS_MASK 0x1
+#define RG_FLR_BYPASS_MASK_SFT (0x1 << 11)
+#define RG_FLR_RATIO_SFT 8
+#define RG_FLR_RATIO_MASK 0x7
+#define RG_FLR_RATIO_MASK_SFT (0x7 << 8)
+#define RG_BUCK_DVFS_DONE_SW_CTL_SFT 7
+#define RG_BUCK_DVFS_DONE_SW_CTL_MASK 0x1
+#define RG_BUCK_DVFS_DONE_SW_CTL_MASK_SFT (0x1 << 7)
+#define RG_BUCK_DVFS_DONE_HW_MODE_SFT 6
+#define RG_BUCK_DVFS_DONE_HW_MODE_MASK 0x1
+#define RG_BUCK_DVFS_DONE_HW_MODE_MASK_SFT (0x1 << 6)
+#define RG_BUCK_DVFS_HW_CNT_THR_SFT 0
+#define RG_BUCK_DVFS_HW_CNT_THR_MASK 0x3f
+#define RG_BUCK_DVFS_HW_CNT_THR_MASK_SFT (0x3f << 0)
+
+/* MT6358_AFE_VOW_MON0 */
+#define VOW_DOWNCNT_SFT 0
+#define VOW_DOWNCNT_MASK 0xffff
+#define VOW_DOWNCNT_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON1 */
+#define K_TMP_MON_SFT 10
+#define K_TMP_MON_MASK 0xf
+#define K_TMP_MON_MASK_SFT (0xf << 10)
+#define SLT_COUNTER_MON_SFT 7
+#define SLT_COUNTER_MON_MASK 0x7
+#define SLT_COUNTER_MON_MASK_SFT (0x7 << 7)
+#define VOW_B_SFT 4
+#define VOW_B_MASK 0x7
+#define VOW_B_MASK_SFT (0x7 << 4)
+#define VOW_A_SFT 1
+#define VOW_A_MASK 0x7
+#define VOW_A_MASK_SFT (0x7 << 1)
+#define SECOND_CNT_START_SFT 0
+#define SECOND_CNT_START_MASK 0x1
+#define SECOND_CNT_START_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_VOW_MON2 */
+#define VOW_S_L_SFT 0
+#define VOW_S_L_MASK 0xffff
+#define VOW_S_L_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON3 */
+#define VOW_S_H_SFT 0
+#define VOW_S_H_MASK 0xffff
+#define VOW_S_H_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON4 */
+#define VOW_N_L_SFT 0
+#define VOW_N_L_MASK 0xffff
+#define VOW_N_L_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_MON5 */
+#define VOW_N_H_SFT 0
+#define VOW_N_H_MASK 0xffff
+#define VOW_N_H_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_SN_INI_CFG */
+#define VOW_SN_INI_CFG_EN_SFT 15
+#define VOW_SN_INI_CFG_EN_MASK 0x1
+#define VOW_SN_INI_CFG_EN_MASK_SFT (0x1 << 15)
+#define VOW_SN_INI_CFG_VAL_SFT 0
+#define VOW_SN_INI_CFG_VAL_MASK 0x7fff
+#define VOW_SN_INI_CFG_VAL_MASK_SFT (0x7fff << 0)
+
+/* MT6358_AFE_VOW_TGEN_CFG0 */
+#define VOW_TGEN_EN_SFT 15
+#define VOW_TGEN_EN_MASK 0x1
+#define VOW_TGEN_EN_MASK_SFT (0x1 << 15)
+#define VOW_TGEN_MUTE_SW_SFT 14
+#define VOW_TGEN_MUTE_SW_MASK 0x1
+#define VOW_TGEN_MUTE_SW_MASK_SFT (0x1 << 14)
+#define VOW_TGEN_FREQ_DIV_SFT 0
+#define VOW_TGEN_FREQ_DIV_MASK 0x3fff
+#define VOW_TGEN_FREQ_DIV_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_POSDIV_CFG0 */
+#define BUCK_DVFS_DONE_SFT 15
+#define BUCK_DVFS_DONE_MASK 0x1
+#define BUCK_DVFS_DONE_MASK_SFT (0x1 << 15)
+#define VOW_32K_MODE_SFT 13
+#define VOW_32K_MODE_MASK 0x1
+#define VOW_32K_MODE_MASK_SFT (0x1 << 13)
+#define RG_BUCK_CLK_DIV_SFT 8
+#define RG_BUCK_CLK_DIV_MASK 0x1f
+#define RG_BUCK_CLK_DIV_MASK_SFT (0x1f << 8)
+#define RG_A1P6M_EN_SEL_SFT 7
+#define RG_A1P6M_EN_SEL_MASK 0x1
+#define RG_A1P6M_EN_SEL_MASK_SFT (0x1 << 7)
+#define VOW_CLK_SEL_SFT 6
+#define VOW_CLK_SEL_MASK 0x1
+#define VOW_CLK_SEL_MASK_SFT (0x1 << 6)
+#define VOW_INTR_SW_MODE_SFT 5
+#define VOW_INTR_SW_MODE_MASK 0x1
+#define VOW_INTR_SW_MODE_MASK_SFT (0x1 << 5)
+#define VOW_INTR_SW_VAL_SFT 4
+#define VOW_INTR_SW_VAL_MASK 0x1
+#define VOW_INTR_SW_VAL_MASK_SFT (0x1 << 4)
+#define VOW_CIC_MODE_SEL_SFT 2
+#define VOW_CIC_MODE_SEL_MASK 0x3
+#define VOW_CIC_MODE_SEL_MASK_SFT (0x3 << 2)
+#define RG_VOW_POSDIV_SFT 0
+#define RG_VOW_POSDIV_MASK 0x3
+#define RG_VOW_POSDIV_MASK_SFT (0x3 << 0)
+
+/* MT6358_AFE_VOW_HPF_CFG0 */
+#define VOW_HPF_DC_TEST_SFT 12
+#define VOW_HPF_DC_TEST_MASK 0xf
+#define VOW_HPF_DC_TEST_MASK_SFT (0xf << 12)
+#define VOW_IRQ_LATCH_SNR_EN_SFT 10
+#define VOW_IRQ_LATCH_SNR_EN_MASK 0x1
+#define VOW_IRQ_LATCH_SNR_EN_MASK_SFT (0x1 << 10)
+#define VOW_DMICCLK_PDN_SFT 9
+#define VOW_DMICCLK_PDN_MASK 0x1
+#define VOW_DMICCLK_PDN_MASK_SFT (0x1 << 9)
+#define VOW_POSDIVCLK_PDN_SFT 8
+#define VOW_POSDIVCLK_PDN_MASK 0x1
+#define VOW_POSDIVCLK_PDN_MASK_SFT (0x1 << 8)
+#define RG_BASELINE_ALPHA_ORDER_SFT 4
+#define RG_BASELINE_ALPHA_ORDER_MASK 0xf
+#define RG_BASELINE_ALPHA_ORDER_MASK_SFT (0xf << 4)
+#define RG_MTKAIF_HPF_BYPASS_SFT 2
+#define RG_MTKAIF_HPF_BYPASS_MASK 0x1
+#define RG_MTKAIF_HPF_BYPASS_MASK_SFT (0x1 << 2)
+#define RG_SNRDET_HPF_BYPASS_SFT 1
+#define RG_SNRDET_HPF_BYPASS_MASK 0x1
+#define RG_SNRDET_HPF_BYPASS_MASK_SFT (0x1 << 1)
+#define RG_HPF_ON_SFT 0
+#define RG_HPF_ON_MASK 0x1
+#define RG_HPF_ON_MASK_SFT (0x1 << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG0 */
+#define RG_PERIODIC_EN_SFT 15
+#define RG_PERIODIC_EN_MASK 0x1
+#define RG_PERIODIC_EN_MASK_SFT (0x1 << 15)
+#define RG_PERIODIC_CNT_CLR_SFT 14
+#define RG_PERIODIC_CNT_CLR_MASK 0x1
+#define RG_PERIODIC_CNT_CLR_MASK_SFT (0x1 << 14)
+#define RG_PERIODIC_CNT_PERIOD_SFT 0
+#define RG_PERIODIC_CNT_PERIOD_MASK 0x3fff
+#define RG_PERIODIC_CNT_PERIOD_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG1 */
+#define RG_PERIODIC_CNT_SET_SFT 15
+#define RG_PERIODIC_CNT_SET_MASK 0x1
+#define RG_PERIODIC_CNT_SET_MASK_SFT (0x1 << 15)
+#define RG_PERIODIC_CNT_PAUSE_SFT 14
+#define RG_PERIODIC_CNT_PAUSE_MASK 0x1
+#define RG_PERIODIC_CNT_PAUSE_MASK_SFT (0x1 << 14)
+#define RG_PERIODIC_CNT_SET_VALUE_SFT 0
+#define RG_PERIODIC_CNT_SET_VALUE_MASK 0x3fff
+#define RG_PERIODIC_CNT_SET_VALUE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG2 */
+#define AUDPREAMPLON_PERIODIC_MODE_SFT 15
+#define AUDPREAMPLON_PERIODIC_MODE_MASK 0x1
+#define AUDPREAMPLON_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDPREAMPLON_PERIODIC_INVERSE_SFT 14
+#define AUDPREAMPLON_PERIODIC_INVERSE_MASK 0x1
+#define AUDPREAMPLON_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDPREAMPLON_PERIODIC_ON_CYCLE_SFT 0
+#define AUDPREAMPLON_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDPREAMPLON_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG3 */
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_MODE_SFT 15
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_MODE_MASK 0x1
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_INVERSE_SFT 14
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_INVERSE_MASK 0x1
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_ON_CYCLE_SFT 0
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG4 */
+#define AUDADCLPWRUP_PERIODIC_MODE_SFT 15
+#define AUDADCLPWRUP_PERIODIC_MODE_MASK 0x1
+#define AUDADCLPWRUP_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDADCLPWRUP_PERIODIC_INVERSE_SFT 14
+#define AUDADCLPWRUP_PERIODIC_INVERSE_MASK 0x1
+#define AUDADCLPWRUP_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDADCLPWRUP_PERIODIC_ON_CYCLE_SFT 0
+#define AUDADCLPWRUP_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDADCLPWRUP_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG5 */
+#define AUDGLBVOWLPWEN_PERIODIC_MODE_SFT 15
+#define AUDGLBVOWLPWEN_PERIODIC_MODE_MASK 0x1
+#define AUDGLBVOWLPWEN_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDGLBVOWLPWEN_PERIODIC_INVERSE_SFT 14
+#define AUDGLBVOWLPWEN_PERIODIC_INVERSE_MASK 0x1
+#define AUDGLBVOWLPWEN_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDGLBVOWLPWEN_PERIODIC_ON_CYCLE_SFT 0
+#define AUDGLBVOWLPWEN_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDGLBVOWLPWEN_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG6 */
+#define AUDDIGMICEN_PERIODIC_MODE_SFT 15
+#define AUDDIGMICEN_PERIODIC_MODE_MASK 0x1
+#define AUDDIGMICEN_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDDIGMICEN_PERIODIC_INVERSE_SFT 14
+#define AUDDIGMICEN_PERIODIC_INVERSE_MASK 0x1
+#define AUDDIGMICEN_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDDIGMICEN_PERIODIC_ON_CYCLE_SFT 0
+#define AUDDIGMICEN_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDDIGMICEN_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG7 */
+#define AUDPWDBMICBIAS0_PERIODIC_MODE_SFT 15
+#define AUDPWDBMICBIAS0_PERIODIC_MODE_MASK 0x1
+#define AUDPWDBMICBIAS0_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDPWDBMICBIAS0_PERIODIC_INVERSE_SFT 14
+#define AUDPWDBMICBIAS0_PERIODIC_INVERSE_MASK 0x1
+#define AUDPWDBMICBIAS0_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDPWDBMICBIAS0_PERIODIC_ON_CYCLE_SFT 0
+#define AUDPWDBMICBIAS0_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDPWDBMICBIAS0_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG8 */
+#define AUDPWDBMICBIAS1_PERIODIC_MODE_SFT 15
+#define AUDPWDBMICBIAS1_PERIODIC_MODE_MASK 0x1
+#define AUDPWDBMICBIAS1_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDPWDBMICBIAS1_PERIODIC_INVERSE_SFT 14
+#define AUDPWDBMICBIAS1_PERIODIC_INVERSE_MASK 0x1
+#define AUDPWDBMICBIAS1_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDPWDBMICBIAS1_PERIODIC_ON_CYCLE_SFT 0
+#define AUDPWDBMICBIAS1_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDPWDBMICBIAS1_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG9 */
+#define XO_VOW_CK_EN_PERIODIC_MODE_SFT 15
+#define XO_VOW_CK_EN_PERIODIC_MODE_MASK 0x1
+#define XO_VOW_CK_EN_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define XO_VOW_CK_EN_PERIODIC_INVERSE_SFT 14
+#define XO_VOW_CK_EN_PERIODIC_INVERSE_MASK 0x1
+#define XO_VOW_CK_EN_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define XO_VOW_CK_EN_PERIODIC_ON_CYCLE_SFT 0
+#define XO_VOW_CK_EN_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define XO_VOW_CK_EN_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG10 */
+#define AUDGLB_PWRDN_PERIODIC_MODE_SFT 15
+#define AUDGLB_PWRDN_PERIODIC_MODE_MASK 0x1
+#define AUDGLB_PWRDN_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define AUDGLB_PWRDN_PERIODIC_INVERSE_SFT 14
+#define AUDGLB_PWRDN_PERIODIC_INVERSE_MASK 0x1
+#define AUDGLB_PWRDN_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define AUDGLB_PWRDN_PERIODIC_ON_CYCLE_SFT 0
+#define AUDGLB_PWRDN_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define AUDGLB_PWRDN_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG11 */
+#define VOW_ON_PERIODIC_MODE_SFT 15
+#define VOW_ON_PERIODIC_MODE_MASK 0x1
+#define VOW_ON_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define VOW_ON_PERIODIC_INVERSE_SFT 14
+#define VOW_ON_PERIODIC_INVERSE_MASK 0x1
+#define VOW_ON_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define VOW_ON_PERIODIC_ON_CYCLE_SFT 0
+#define VOW_ON_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define VOW_ON_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG12 */
+#define DMIC_ON_PERIODIC_MODE_SFT 15
+#define DMIC_ON_PERIODIC_MODE_MASK 0x1
+#define DMIC_ON_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define DMIC_ON_PERIODIC_INVERSE_SFT 14
+#define DMIC_ON_PERIODIC_INVERSE_MASK 0x1
+#define DMIC_ON_PERIODIC_INVERSE_MASK_SFT (0x1 << 14)
+#define DMIC_ON_PERIODIC_ON_CYCLE_SFT 0
+#define DMIC_ON_PERIODIC_ON_CYCLE_MASK 0x3fff
+#define DMIC_ON_PERIODIC_ON_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG13 */
+#define PDN_VOW_F32K_CK_SFT 15
+#define PDN_VOW_F32K_CK_MASK 0x1
+#define PDN_VOW_F32K_CK_MASK_SFT (0x1 << 15)
+#define AUDPREAMPLON_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDPREAMPLON_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDPREAMPLON_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG14 */
+#define VOW_SNRDET_PERIODIC_CFG_SFT 15
+#define VOW_SNRDET_PERIODIC_CFG_MASK 0x1
+#define VOW_SNRDET_PERIODIC_CFG_MASK_SFT (0x1 << 15)
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDPREAMPLDCPRECHARGE_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG15 */
+#define AUDADCLPWRUP_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDADCLPWRUP_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDADCLPWRUP_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG16 */
+#define AUDGLBVOWLPWEN_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDGLBVOWLPWEN_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDGLBVOWLPWEN_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG17 */
+#define AUDDIGMICEN_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDDIGMICEN_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDDIGMICEN_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG18 */
+#define AUDPWDBMICBIAS0_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDPWDBMICBIAS0_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDPWDBMICBIAS0_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG19 */
+#define AUDPWDBMICBIAS1_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDPWDBMICBIAS1_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDPWDBMICBIAS1_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG20 */
+#define CLKSQ_EN_VOW_PERIODIC_MODE_SFT 15
+#define CLKSQ_EN_VOW_PERIODIC_MODE_MASK 0x1
+#define CLKSQ_EN_VOW_PERIODIC_MODE_MASK_SFT (0x1 << 15)
+#define XO_VOW_CK_EN_PERIODIC_OFF_CYCLE_SFT 0
+#define XO_VOW_CK_EN_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define XO_VOW_CK_EN_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG21 */
+#define AUDGLB_PWRDN_PERIODIC_OFF_CYCLE_SFT 0
+#define AUDGLB_PWRDN_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define AUDGLB_PWRDN_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG22 */
+#define VOW_ON_PERIODIC_OFF_CYCLE_SFT 0
+#define VOW_ON_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define VOW_ON_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_CFG23 */
+#define DMIC_ON_PERIODIC_OFF_CYCLE_SFT 0
+#define DMIC_ON_PERIODIC_OFF_CYCLE_MASK 0x3fff
+#define DMIC_ON_PERIODIC_OFF_CYCLE_MASK_SFT (0x3fff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_MON0 */
+#define VOW_PERIODIC_MON_SFT 0
+#define VOW_PERIODIC_MON_MASK 0xffff
+#define VOW_PERIODIC_MON_MASK_SFT (0xffff << 0)
+
+/* MT6358_AFE_VOW_PERIODIC_MON1 */
+#define VOW_PERIODIC_COUNT_MON_SFT 0
+#define VOW_PERIODIC_COUNT_MON_MASK 0xffff
+#define VOW_PERIODIC_COUNT_MON_MASK_SFT (0xffff << 0)
+
+/* MT6358_AUDENC_DSN_ID */
+#define AUDENC_ANA_ID_SFT 0
+#define AUDENC_ANA_ID_MASK 0xff
+#define AUDENC_ANA_ID_MASK_SFT (0xff << 0)
+#define AUDENC_DIG_ID_SFT 8
+#define AUDENC_DIG_ID_MASK 0xff
+#define AUDENC_DIG_ID_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDENC_DSN_REV0 */
+#define AUDENC_ANA_MINOR_REV_SFT 0
+#define AUDENC_ANA_MINOR_REV_MASK 0xf
+#define AUDENC_ANA_MINOR_REV_MASK_SFT (0xf << 0)
+#define AUDENC_ANA_MAJOR_REV_SFT 4
+#define AUDENC_ANA_MAJOR_REV_MASK 0xf
+#define AUDENC_ANA_MAJOR_REV_MASK_SFT (0xf << 4)
+#define AUDENC_DIG_MINOR_REV_SFT 8
+#define AUDENC_DIG_MINOR_REV_MASK 0xf
+#define AUDENC_DIG_MINOR_REV_MASK_SFT (0xf << 8)
+#define AUDENC_DIG_MAJOR_REV_SFT 12
+#define AUDENC_DIG_MAJOR_REV_MASK 0xf
+#define AUDENC_DIG_MAJOR_REV_MASK_SFT (0xf << 12)
+
+/* MT6358_AUDENC_DSN_DBI */
+#define AUDENC_DSN_CBS_SFT 0
+#define AUDENC_DSN_CBS_MASK 0x3
+#define AUDENC_DSN_CBS_MASK_SFT (0x3 << 0)
+#define AUDENC_DSN_BIX_SFT 2
+#define AUDENC_DSN_BIX_MASK 0x3
+#define AUDENC_DSN_BIX_MASK_SFT (0x3 << 2)
+#define AUDENC_DSN_ESP_SFT 8
+#define AUDENC_DSN_ESP_MASK 0xff
+#define AUDENC_DSN_ESP_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDENC_DSN_FPI */
+#define AUDENC_DSN_FPI_SFT 0
+#define AUDENC_DSN_FPI_MASK 0xff
+#define AUDENC_DSN_FPI_MASK_SFT (0xff << 0)
+
+/* MT6358_AUDENC_ANA_CON0 */
+#define RG_AUDPREAMPLON_SFT 0
+#define RG_AUDPREAMPLON_MASK 0x1
+#define RG_AUDPREAMPLON_MASK_SFT (0x1 << 0)
+#define RG_AUDPREAMPLDCCEN_SFT 1
+#define RG_AUDPREAMPLDCCEN_MASK 0x1
+#define RG_AUDPREAMPLDCCEN_MASK_SFT (0x1 << 1)
+#define RG_AUDPREAMPLDCPRECHARGE_SFT 2
+#define RG_AUDPREAMPLDCPRECHARGE_MASK 0x1
+#define RG_AUDPREAMPLDCPRECHARGE_MASK_SFT (0x1 << 2)
+#define RG_AUDPREAMPLPGATEST_SFT 3
+#define RG_AUDPREAMPLPGATEST_MASK 0x1
+#define RG_AUDPREAMPLPGATEST_MASK_SFT (0x1 << 3)
+#define RG_AUDPREAMPLVSCALE_SFT 4
+#define RG_AUDPREAMPLVSCALE_MASK 0x3
+#define RG_AUDPREAMPLVSCALE_MASK_SFT (0x3 << 4)
+#define RG_AUDPREAMPLINPUTSEL_SFT 6
+#define RG_AUDPREAMPLINPUTSEL_MASK 0x3
+#define RG_AUDPREAMPLINPUTSEL_MASK_SFT (0x3 << 6)
+#define RG_AUDPREAMPLGAIN_SFT 8
+#define RG_AUDPREAMPLGAIN_MASK 0x7
+#define RG_AUDPREAMPLGAIN_MASK_SFT (0x7 << 8)
+#define RG_AUDADCLPWRUP_SFT 12
+#define RG_AUDADCLPWRUP_MASK 0x1
+#define RG_AUDADCLPWRUP_MASK_SFT (0x1 << 12)
+#define RG_AUDADCLINPUTSEL_SFT 13
+#define RG_AUDADCLINPUTSEL_MASK 0x3
+#define RG_AUDADCLINPUTSEL_MASK_SFT (0x3 << 13)
+
+/* MT6358_AUDENC_ANA_CON1 */
+#define RG_AUDPREAMPRON_SFT 0
+#define RG_AUDPREAMPRON_MASK 0x1
+#define RG_AUDPREAMPRON_MASK_SFT (0x1 << 0)
+#define RG_AUDPREAMPRDCCEN_SFT 1
+#define RG_AUDPREAMPRDCCEN_MASK 0x1
+#define RG_AUDPREAMPRDCCEN_MASK_SFT (0x1 << 1)
+#define RG_AUDPREAMPRDCPRECHARGE_SFT 2
+#define RG_AUDPREAMPRDCPRECHARGE_MASK 0x1
+#define RG_AUDPREAMPRDCPRECHARGE_MASK_SFT (0x1 << 2)
+#define RG_AUDPREAMPRPGATEST_SFT 3
+#define RG_AUDPREAMPRPGATEST_MASK 0x1
+#define RG_AUDPREAMPRPGATEST_MASK_SFT (0x1 << 3)
+#define RG_AUDPREAMPRVSCALE_SFT 4
+#define RG_AUDPREAMPRVSCALE_MASK 0x3
+#define RG_AUDPREAMPRVSCALE_MASK_SFT (0x3 << 4)
+#define RG_AUDPREAMPRINPUTSEL_SFT 6
+#define RG_AUDPREAMPRINPUTSEL_MASK 0x3
+#define RG_AUDPREAMPRINPUTSEL_MASK_SFT (0x3 << 6)
+#define RG_AUDPREAMPRGAIN_SFT 8
+#define RG_AUDPREAMPRGAIN_MASK 0x7
+#define RG_AUDPREAMPRGAIN_MASK_SFT (0x7 << 8)
+#define RG_AUDIO_VOW_EN_SFT 11
+#define RG_AUDIO_VOW_EN_MASK 0x1
+#define RG_AUDIO_VOW_EN_MASK_SFT (0x1 << 11)
+#define RG_AUDADCRPWRUP_SFT 12
+#define RG_AUDADCRPWRUP_MASK 0x1
+#define RG_AUDADCRPWRUP_MASK_SFT (0x1 << 12)
+#define RG_AUDADCRINPUTSEL_SFT 13
+#define RG_AUDADCRINPUTSEL_MASK 0x3
+#define RG_AUDADCRINPUTSEL_MASK_SFT (0x3 << 13)
+#define RG_CLKSQ_EN_VOW_SFT 15
+#define RG_CLKSQ_EN_VOW_MASK 0x1
+#define RG_CLKSQ_EN_VOW_MASK_SFT (0x1 << 15)
+
+/* MT6358_AUDENC_ANA_CON2 */
+#define RG_AUDULHALFBIAS_SFT 0
+#define RG_AUDULHALFBIAS_MASK 0x1
+#define RG_AUDULHALFBIAS_MASK_SFT (0x1 << 0)
+#define RG_AUDGLBVOWLPWEN_SFT 1
+#define RG_AUDGLBVOWLPWEN_MASK 0x1
+#define RG_AUDGLBVOWLPWEN_MASK_SFT (0x1 << 1)
+#define RG_AUDPREAMPLPEN_SFT 2
+#define RG_AUDPREAMPLPEN_MASK 0x1
+#define RG_AUDPREAMPLPEN_MASK_SFT (0x1 << 2)
+#define RG_AUDADC1STSTAGELPEN_SFT 3
+#define RG_AUDADC1STSTAGELPEN_MASK 0x1
+#define RG_AUDADC1STSTAGELPEN_MASK_SFT (0x1 << 3)
+#define RG_AUDADC2NDSTAGELPEN_SFT 4
+#define RG_AUDADC2NDSTAGELPEN_MASK 0x1
+#define RG_AUDADC2NDSTAGELPEN_MASK_SFT (0x1 << 4)
+#define RG_AUDADCFLASHLPEN_SFT 5
+#define RG_AUDADCFLASHLPEN_MASK 0x1
+#define RG_AUDADCFLASHLPEN_MASK_SFT (0x1 << 5)
+#define RG_AUDPREAMPIDDTEST_SFT 6
+#define RG_AUDPREAMPIDDTEST_MASK 0x3
+#define RG_AUDPREAMPIDDTEST_MASK_SFT (0x3 << 6)
+#define RG_AUDADC1STSTAGEIDDTEST_SFT 8
+#define RG_AUDADC1STSTAGEIDDTEST_MASK 0x3
+#define RG_AUDADC1STSTAGEIDDTEST_MASK_SFT (0x3 << 8)
+#define RG_AUDADC2NDSTAGEIDDTEST_SFT 10
+#define RG_AUDADC2NDSTAGEIDDTEST_MASK 0x3
+#define RG_AUDADC2NDSTAGEIDDTEST_MASK_SFT (0x3 << 10)
+#define RG_AUDADCREFBUFIDDTEST_SFT 12
+#define RG_AUDADCREFBUFIDDTEST_MASK 0x3
+#define RG_AUDADCREFBUFIDDTEST_MASK_SFT (0x3 << 12)
+#define RG_AUDADCFLASHIDDTEST_SFT 14
+#define RG_AUDADCFLASHIDDTEST_MASK 0x3
+#define RG_AUDADCFLASHIDDTEST_MASK_SFT (0x3 << 14)
+
+/* MT6358_AUDENC_ANA_CON3 */
+#define RG_AUDADCDAC0P25FS_SFT 0
+#define RG_AUDADCDAC0P25FS_MASK 0x1
+#define RG_AUDADCDAC0P25FS_MASK_SFT (0x1 << 0)
+#define RG_AUDADCCLKSEL_SFT 1
+#define RG_AUDADCCLKSEL_MASK 0x1
+#define RG_AUDADCCLKSEL_MASK_SFT (0x1 << 1)
+#define RG_AUDADCCLKSOURCE_SFT 2
+#define RG_AUDADCCLKSOURCE_MASK 0x3
+#define RG_AUDADCCLKSOURCE_MASK_SFT (0x3 << 2)
+#define RG_AUDPREAMPAAFEN_SFT 8
+#define RG_AUDPREAMPAAFEN_MASK 0x1
+#define RG_AUDPREAMPAAFEN_MASK_SFT (0x1 << 8)
+#define RG_DCCVCMBUFLPMODSEL_SFT 9
+#define RG_DCCVCMBUFLPMODSEL_MASK 0x1
+#define RG_DCCVCMBUFLPMODSEL_MASK_SFT (0x1 << 9)
+#define RG_DCCVCMBUFLPSWEN_SFT 10
+#define RG_DCCVCMBUFLPSWEN_MASK 0x1
+#define RG_DCCVCMBUFLPSWEN_MASK_SFT (0x1 << 10)
+#define RG_CMSTBENH_SFT 11
+#define RG_CMSTBENH_MASK 0x1
+#define RG_CMSTBENH_MASK_SFT (0x1 << 11)
+#define RG_PGABODYSW_SFT 12
+#define RG_PGABODYSW_MASK 0x1
+#define RG_PGABODYSW_MASK_SFT (0x1 << 12)
+
+/* MT6358_AUDENC_ANA_CON4 */
+#define RG_AUDADC1STSTAGESDENB_SFT 0
+#define RG_AUDADC1STSTAGESDENB_MASK 0x1
+#define RG_AUDADC1STSTAGESDENB_MASK_SFT (0x1 << 0)
+#define RG_AUDADC2NDSTAGERESET_SFT 1
+#define RG_AUDADC2NDSTAGERESET_MASK 0x1
+#define RG_AUDADC2NDSTAGERESET_MASK_SFT (0x1 << 1)
+#define RG_AUDADC3RDSTAGERESET_SFT 2
+#define RG_AUDADC3RDSTAGERESET_MASK 0x1
+#define RG_AUDADC3RDSTAGERESET_MASK_SFT (0x1 << 2)
+#define RG_AUDADCFSRESET_SFT 3
+#define RG_AUDADCFSRESET_MASK 0x1
+#define RG_AUDADCFSRESET_MASK_SFT (0x1 << 3)
+#define RG_AUDADCWIDECM_SFT 4
+#define RG_AUDADCWIDECM_MASK 0x1
+#define RG_AUDADCWIDECM_MASK_SFT (0x1 << 4)
+#define RG_AUDADCNOPATEST_SFT 5
+#define RG_AUDADCNOPATEST_MASK 0x1
+#define RG_AUDADCNOPATEST_MASK_SFT (0x1 << 5)
+#define RG_AUDADCBYPASS_SFT 6
+#define RG_AUDADCBYPASS_MASK 0x1
+#define RG_AUDADCBYPASS_MASK_SFT (0x1 << 6)
+#define RG_AUDADCFFBYPASS_SFT 7
+#define RG_AUDADCFFBYPASS_MASK 0x1
+#define RG_AUDADCFFBYPASS_MASK_SFT (0x1 << 7)
+#define RG_AUDADCDACFBCURRENT_SFT 8
+#define RG_AUDADCDACFBCURRENT_MASK 0x1
+#define RG_AUDADCDACFBCURRENT_MASK_SFT (0x1 << 8)
+#define RG_AUDADCDACIDDTEST_SFT 9
+#define RG_AUDADCDACIDDTEST_MASK 0x3
+#define RG_AUDADCDACIDDTEST_MASK_SFT (0x3 << 9)
+#define RG_AUDADCDACNRZ_SFT 11
+#define RG_AUDADCDACNRZ_MASK 0x1
+#define RG_AUDADCDACNRZ_MASK_SFT (0x1 << 11)
+#define RG_AUDADCNODEM_SFT 12
+#define RG_AUDADCNODEM_MASK 0x1
+#define RG_AUDADCNODEM_MASK_SFT (0x1 << 12)
+#define RG_AUDADCDACTEST_SFT 13
+#define RG_AUDADCDACTEST_MASK 0x1
+#define RG_AUDADCDACTEST_MASK_SFT (0x1 << 13)
+
+/* MT6358_AUDENC_ANA_CON5 */
+#define RG_AUDRCTUNEL_SFT 0
+#define RG_AUDRCTUNEL_MASK 0x1f
+#define RG_AUDRCTUNEL_MASK_SFT (0x1f << 0)
+#define RG_AUDRCTUNELSEL_SFT 5
+#define RG_AUDRCTUNELSEL_MASK 0x1
+#define RG_AUDRCTUNELSEL_MASK_SFT (0x1 << 5)
+#define RG_AUDRCTUNER_SFT 8
+#define RG_AUDRCTUNER_MASK 0x1f
+#define RG_AUDRCTUNER_MASK_SFT (0x1f << 8)
+#define RG_AUDRCTUNERSEL_SFT 13
+#define RG_AUDRCTUNERSEL_MASK 0x1
+#define RG_AUDRCTUNERSEL_MASK_SFT (0x1 << 13)
+
+/* MT6358_AUDENC_ANA_CON6 */
+#define RG_CLKSQ_EN_SFT 0
+#define RG_CLKSQ_EN_MASK 0x1
+#define RG_CLKSQ_EN_MASK_SFT (0x1 << 0)
+#define RG_CLKSQ_IN_SEL_TEST_SFT 1
+#define RG_CLKSQ_IN_SEL_TEST_MASK 0x1
+#define RG_CLKSQ_IN_SEL_TEST_MASK_SFT (0x1 << 1)
+#define RG_CM_REFGENSEL_SFT 2
+#define RG_CM_REFGENSEL_MASK 0x1
+#define RG_CM_REFGENSEL_MASK_SFT (0x1 << 2)
+#define RG_AUDSPARE_SFT 4
+#define RG_AUDSPARE_MASK 0xf
+#define RG_AUDSPARE_MASK_SFT (0xf << 4)
+#define RG_AUDENCSPARE_SFT 8
+#define RG_AUDENCSPARE_MASK 0x3f
+#define RG_AUDENCSPARE_MASK_SFT (0x3f << 8)
+
+/* MT6358_AUDENC_ANA_CON7 */
+#define RG_AUDENCSPARE2_SFT 0
+#define RG_AUDENCSPARE2_MASK 0xff
+#define RG_AUDENCSPARE2_MASK_SFT (0xff << 0)
+
+/* MT6358_AUDENC_ANA_CON8 */
+#define RG_AUDDIGMICEN_SFT 0
+#define RG_AUDDIGMICEN_MASK 0x1
+#define RG_AUDDIGMICEN_MASK_SFT (0x1 << 0)
+#define RG_AUDDIGMICBIAS_SFT 1
+#define RG_AUDDIGMICBIAS_MASK 0x3
+#define RG_AUDDIGMICBIAS_MASK_SFT (0x3 << 1)
+#define RG_DMICHPCLKEN_SFT 3
+#define RG_DMICHPCLKEN_MASK 0x1
+#define RG_DMICHPCLKEN_MASK_SFT (0x1 << 3)
+#define RG_AUDDIGMICPDUTY_SFT 4
+#define RG_AUDDIGMICPDUTY_MASK 0x3
+#define RG_AUDDIGMICPDUTY_MASK_SFT (0x3 << 4)
+#define RG_AUDDIGMICNDUTY_SFT 6
+#define RG_AUDDIGMICNDUTY_MASK 0x3
+#define RG_AUDDIGMICNDUTY_MASK_SFT (0x3 << 6)
+#define RG_DMICMONEN_SFT 8
+#define RG_DMICMONEN_MASK 0x1
+#define RG_DMICMONEN_MASK_SFT (0x1 << 8)
+#define RG_DMICMONSEL_SFT 9
+#define RG_DMICMONSEL_MASK 0x7
+#define RG_DMICMONSEL_MASK_SFT (0x7 << 9)
+#define RG_AUDSPAREVMIC_SFT 12
+#define RG_AUDSPAREVMIC_MASK 0xf
+#define RG_AUDSPAREVMIC_MASK_SFT (0xf << 12)
+
+/* MT6358_AUDENC_ANA_CON9 */
+#define RG_AUDPWDBMICBIAS0_SFT 0
+#define RG_AUDPWDBMICBIAS0_MASK 0x1
+#define RG_AUDPWDBMICBIAS0_MASK_SFT (0x1 << 0)
+#define RG_AUDMICBIAS0BYPASSEN_SFT 1
+#define RG_AUDMICBIAS0BYPASSEN_MASK 0x1
+#define RG_AUDMICBIAS0BYPASSEN_MASK_SFT (0x1 << 1)
+#define RG_AUDMICBIAS0LOWPEN_SFT 2
+#define RG_AUDMICBIAS0LOWPEN_MASK 0x1
+#define RG_AUDMICBIAS0LOWPEN_MASK_SFT (0x1 << 2)
+#define RG_AUDMICBIAS0VREF_SFT 4
+#define RG_AUDMICBIAS0VREF_MASK 0x7
+#define RG_AUDMICBIAS0VREF_MASK_SFT (0x7 << 4)
+#define RG_AUDMICBIAS0DCSW0P1EN_SFT 8
+#define RG_AUDMICBIAS0DCSW0P1EN_MASK 0x1
+#define RG_AUDMICBIAS0DCSW0P1EN_MASK_SFT (0x1 << 8)
+#define RG_AUDMICBIAS0DCSW0P2EN_SFT 9
+#define RG_AUDMICBIAS0DCSW0P2EN_MASK 0x1
+#define RG_AUDMICBIAS0DCSW0P2EN_MASK_SFT (0x1 << 9)
+#define RG_AUDMICBIAS0DCSW0NEN_SFT 10
+#define RG_AUDMICBIAS0DCSW0NEN_MASK 0x1
+#define RG_AUDMICBIAS0DCSW0NEN_MASK_SFT (0x1 << 10)
+#define RG_AUDMICBIAS0DCSW2P1EN_SFT 12
+#define RG_AUDMICBIAS0DCSW2P1EN_MASK 0x1
+#define RG_AUDMICBIAS0DCSW2P1EN_MASK_SFT (0x1 << 12)
+#define RG_AUDMICBIAS0DCSW2P2EN_SFT 13
+#define RG_AUDMICBIAS0DCSW2P2EN_MASK 0x1
+#define RG_AUDMICBIAS0DCSW2P2EN_MASK_SFT (0x1 << 13)
+#define RG_AUDMICBIAS0DCSW2NEN_SFT 14
+#define RG_AUDMICBIAS0DCSW2NEN_MASK 0x1
+#define RG_AUDMICBIAS0DCSW2NEN_MASK_SFT (0x1 << 14)
+
+/* MT6358_AUDENC_ANA_CON10 */
+#define RG_AUDPWDBMICBIAS1_SFT 0
+#define RG_AUDPWDBMICBIAS1_MASK 0x1
+#define RG_AUDPWDBMICBIAS1_MASK_SFT (0x1 << 0)
+#define RG_AUDMICBIAS1BYPASSEN_SFT 1
+#define RG_AUDMICBIAS1BYPASSEN_MASK 0x1
+#define RG_AUDMICBIAS1BYPASSEN_MASK_SFT (0x1 << 1)
+#define RG_AUDMICBIAS1LOWPEN_SFT 2
+#define RG_AUDMICBIAS1LOWPEN_MASK 0x1
+#define RG_AUDMICBIAS1LOWPEN_MASK_SFT (0x1 << 2)
+#define RG_AUDMICBIAS1VREF_SFT 4
+#define RG_AUDMICBIAS1VREF_MASK 0x7
+#define RG_AUDMICBIAS1VREF_MASK_SFT (0x7 << 4)
+#define RG_AUDMICBIAS1DCSW1PEN_SFT 8
+#define RG_AUDMICBIAS1DCSW1PEN_MASK 0x1
+#define RG_AUDMICBIAS1DCSW1PEN_MASK_SFT (0x1 << 8)
+#define RG_AUDMICBIAS1DCSW1NEN_SFT 9
+#define RG_AUDMICBIAS1DCSW1NEN_MASK 0x1
+#define RG_AUDMICBIAS1DCSW1NEN_MASK_SFT (0x1 << 9)
+#define RG_BANDGAPGEN_SFT 12
+#define RG_BANDGAPGEN_MASK 0x1
+#define RG_BANDGAPGEN_MASK_SFT (0x1 << 12)
+#define RG_MTEST_EN_SFT 13
+#define RG_MTEST_EN_MASK 0x1
+#define RG_MTEST_EN_MASK_SFT (0x1 << 13)
+#define RG_MTEST_SEL_SFT 14
+#define RG_MTEST_SEL_MASK 0x1
+#define RG_MTEST_SEL_MASK_SFT (0x1 << 14)
+#define RG_MTEST_CURRENT_SFT 15
+#define RG_MTEST_CURRENT_MASK 0x1
+#define RG_MTEST_CURRENT_MASK_SFT (0x1 << 15)
+
+/* MT6358_AUDENC_ANA_CON11 */
+#define RG_AUDACCDETMICBIAS0PULLLOW_SFT 0
+#define RG_AUDACCDETMICBIAS0PULLLOW_MASK 0x1
+#define RG_AUDACCDETMICBIAS0PULLLOW_MASK_SFT (0x1 << 0)
+#define RG_AUDACCDETMICBIAS1PULLLOW_SFT 1
+#define RG_AUDACCDETMICBIAS1PULLLOW_MASK 0x1
+#define RG_AUDACCDETMICBIAS1PULLLOW_MASK_SFT (0x1 << 1)
+#define RG_AUDACCDETVIN1PULLLOW_SFT 2
+#define RG_AUDACCDETVIN1PULLLOW_MASK 0x1
+#define RG_AUDACCDETVIN1PULLLOW_MASK_SFT (0x1 << 2)
+#define RG_AUDACCDETVTHACAL_SFT 4
+#define RG_AUDACCDETVTHACAL_MASK 0x1
+#define RG_AUDACCDETVTHACAL_MASK_SFT (0x1 << 4)
+#define RG_AUDACCDETVTHBCAL_SFT 5
+#define RG_AUDACCDETVTHBCAL_MASK 0x1
+#define RG_AUDACCDETVTHBCAL_MASK_SFT (0x1 << 5)
+#define RG_AUDACCDETTVDET_SFT 6
+#define RG_AUDACCDETTVDET_MASK 0x1
+#define RG_AUDACCDETTVDET_MASK_SFT (0x1 << 6)
+#define RG_ACCDETSEL_SFT 7
+#define RG_ACCDETSEL_MASK 0x1
+#define RG_ACCDETSEL_MASK_SFT (0x1 << 7)
+#define RG_SWBUFMODSEL_SFT 8
+#define RG_SWBUFMODSEL_MASK 0x1
+#define RG_SWBUFMODSEL_MASK_SFT (0x1 << 8)
+#define RG_SWBUFSWEN_SFT 9
+#define RG_SWBUFSWEN_MASK 0x1
+#define RG_SWBUFSWEN_MASK_SFT (0x1 << 9)
+#define RG_EINTCOMPVTH_SFT 10
+#define RG_EINTCOMPVTH_MASK 0x1
+#define RG_EINTCOMPVTH_MASK_SFT (0x1 << 10)
+#define RG_EINTCONFIGACCDET_SFT 11
+#define RG_EINTCONFIGACCDET_MASK 0x1
+#define RG_EINTCONFIGACCDET_MASK_SFT (0x1 << 11)
+#define RG_EINTHIRENB_SFT 12
+#define RG_EINTHIRENB_MASK 0x1
+#define RG_EINTHIRENB_MASK_SFT (0x1 << 12)
+#define RG_ACCDET2AUXRESBYPASS_SFT 13
+#define RG_ACCDET2AUXRESBYPASS_MASK 0x1
+#define RG_ACCDET2AUXRESBYPASS_MASK_SFT (0x1 << 13)
+#define RG_ACCDET2AUXBUFFERBYPASS_SFT 14
+#define RG_ACCDET2AUXBUFFERBYPASS_MASK 0x1
+#define RG_ACCDET2AUXBUFFERBYPASS_MASK_SFT (0x1 << 14)
+#define RG_ACCDET2AUXSWEN_SFT 15
+#define RG_ACCDET2AUXSWEN_MASK 0x1
+#define RG_ACCDET2AUXSWEN_MASK_SFT (0x1 << 15)
+
+/* MT6358_AUDENC_ANA_CON12 */
+#define RGS_AUDRCTUNELREAD_SFT 0
+#define RGS_AUDRCTUNELREAD_MASK 0x1f
+#define RGS_AUDRCTUNELREAD_MASK_SFT (0x1f << 0)
+#define RGS_AUDRCTUNERREAD_SFT 8
+#define RGS_AUDRCTUNERREAD_MASK 0x1f
+#define RGS_AUDRCTUNERREAD_MASK_SFT (0x1f << 8)
+
+/* MT6358_AUDDEC_DSN_ID */
+#define AUDDEC_ANA_ID_SFT 0
+#define AUDDEC_ANA_ID_MASK 0xff
+#define AUDDEC_ANA_ID_MASK_SFT (0xff << 0)
+#define AUDDEC_DIG_ID_SFT 8
+#define AUDDEC_DIG_ID_MASK 0xff
+#define AUDDEC_DIG_ID_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDDEC_DSN_REV0 */
+#define AUDDEC_ANA_MINOR_REV_SFT 0
+#define AUDDEC_ANA_MINOR_REV_MASK 0xf
+#define AUDDEC_ANA_MINOR_REV_MASK_SFT (0xf << 0)
+#define AUDDEC_ANA_MAJOR_REV_SFT 4
+#define AUDDEC_ANA_MAJOR_REV_MASK 0xf
+#define AUDDEC_ANA_MAJOR_REV_MASK_SFT (0xf << 4)
+#define AUDDEC_DIG_MINOR_REV_SFT 8
+#define AUDDEC_DIG_MINOR_REV_MASK 0xf
+#define AUDDEC_DIG_MINOR_REV_MASK_SFT (0xf << 8)
+#define AUDDEC_DIG_MAJOR_REV_SFT 12
+#define AUDDEC_DIG_MAJOR_REV_MASK 0xf
+#define AUDDEC_DIG_MAJOR_REV_MASK_SFT (0xf << 12)
+
+/* MT6358_AUDDEC_DSN_DBI */
+#define AUDDEC_DSN_CBS_SFT 0
+#define AUDDEC_DSN_CBS_MASK 0x3
+#define AUDDEC_DSN_CBS_MASK_SFT (0x3 << 0)
+#define AUDDEC_DSN_BIX_SFT 2
+#define AUDDEC_DSN_BIX_MASK 0x3
+#define AUDDEC_DSN_BIX_MASK_SFT (0x3 << 2)
+#define AUDDEC_DSN_ESP_SFT 8
+#define AUDDEC_DSN_ESP_MASK 0xff
+#define AUDDEC_DSN_ESP_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDDEC_DSN_FPI */
+#define AUDDEC_DSN_FPI_SFT 0
+#define AUDDEC_DSN_FPI_MASK 0xff
+#define AUDDEC_DSN_FPI_MASK_SFT (0xff << 0)
+
+/* MT6358_AUDDEC_ANA_CON0 */
+#define RG_AUDDACLPWRUP_VAUDP15_SFT 0
+#define RG_AUDDACLPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDDACLPWRUP_VAUDP15_MASK_SFT (0x1 << 0)
+#define RG_AUDDACRPWRUP_VAUDP15_SFT 1
+#define RG_AUDDACRPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDDACRPWRUP_VAUDP15_MASK_SFT (0x1 << 1)
+#define RG_AUD_DAC_PWR_UP_VA28_SFT 2
+#define RG_AUD_DAC_PWR_UP_VA28_MASK 0x1
+#define RG_AUD_DAC_PWR_UP_VA28_MASK_SFT (0x1 << 2)
+#define RG_AUD_DAC_PWL_UP_VA28_SFT 3
+#define RG_AUD_DAC_PWL_UP_VA28_MASK 0x1
+#define RG_AUD_DAC_PWL_UP_VA28_MASK_SFT (0x1 << 3)
+#define RG_AUDHPLPWRUP_VAUDP15_SFT 4
+#define RG_AUDHPLPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDHPLPWRUP_VAUDP15_MASK_SFT (0x1 << 4)
+#define RG_AUDHPRPWRUP_VAUDP15_SFT 5
+#define RG_AUDHPRPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDHPRPWRUP_VAUDP15_MASK_SFT (0x1 << 5)
+#define RG_AUDHPLPWRUP_IBIAS_VAUDP15_SFT 6
+#define RG_AUDHPLPWRUP_IBIAS_VAUDP15_MASK 0x1
+#define RG_AUDHPLPWRUP_IBIAS_VAUDP15_MASK_SFT (0x1 << 6)
+#define RG_AUDHPRPWRUP_IBIAS_VAUDP15_SFT 7
+#define RG_AUDHPRPWRUP_IBIAS_VAUDP15_MASK 0x1
+#define RG_AUDHPRPWRUP_IBIAS_VAUDP15_MASK_SFT (0x1 << 7)
+#define RG_AUDHPLMUXINPUTSEL_VAUDP15_SFT 8
+#define RG_AUDHPLMUXINPUTSEL_VAUDP15_MASK 0x3
+#define RG_AUDHPLMUXINPUTSEL_VAUDP15_MASK_SFT (0x3 << 8)
+#define RG_AUDHPRMUXINPUTSEL_VAUDP15_SFT 10
+#define RG_AUDHPRMUXINPUTSEL_VAUDP15_MASK 0x3
+#define RG_AUDHPRMUXINPUTSEL_VAUDP15_MASK_SFT (0x3 << 10)
+#define RG_AUDHPLSCDISABLE_VAUDP15_SFT 12
+#define RG_AUDHPLSCDISABLE_VAUDP15_MASK 0x1
+#define RG_AUDHPLSCDISABLE_VAUDP15_MASK_SFT (0x1 << 12)
+#define RG_AUDHPRSCDISABLE_VAUDP15_SFT 13
+#define RG_AUDHPRSCDISABLE_VAUDP15_MASK 0x1
+#define RG_AUDHPRSCDISABLE_VAUDP15_MASK_SFT (0x1 << 13)
+#define RG_AUDHPLBSCCURRENT_VAUDP15_SFT 14
+#define RG_AUDHPLBSCCURRENT_VAUDP15_MASK 0x1
+#define RG_AUDHPLBSCCURRENT_VAUDP15_MASK_SFT (0x1 << 14)
+#define RG_AUDHPRBSCCURRENT_VAUDP15_SFT 15
+#define RG_AUDHPRBSCCURRENT_VAUDP15_MASK 0x1
+#define RG_AUDHPRBSCCURRENT_VAUDP15_MASK_SFT (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON1 */
+#define RG_AUDHPLOUTPWRUP_VAUDP15_SFT 0
+#define RG_AUDHPLOUTPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDHPLOUTPWRUP_VAUDP15_MASK_SFT (0x1 << 0)
+#define RG_AUDHPROUTPWRUP_VAUDP15_SFT 1
+#define RG_AUDHPROUTPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDHPROUTPWRUP_VAUDP15_MASK_SFT (0x1 << 1)
+#define RG_AUDHPLOUTAUXPWRUP_VAUDP15_SFT 2
+#define RG_AUDHPLOUTAUXPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDHPLOUTAUXPWRUP_VAUDP15_MASK_SFT (0x1 << 2)
+#define RG_AUDHPROUTAUXPWRUP_VAUDP15_SFT 3
+#define RG_AUDHPROUTAUXPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDHPROUTAUXPWRUP_VAUDP15_MASK_SFT (0x1 << 3)
+#define RG_HPLAUXFBRSW_EN_VAUDP15_SFT 4
+#define RG_HPLAUXFBRSW_EN_VAUDP15_MASK 0x1
+#define RG_HPLAUXFBRSW_EN_VAUDP15_MASK_SFT (0x1 << 4)
+#define RG_HPRAUXFBRSW_EN_VAUDP15_SFT 5
+#define RG_HPRAUXFBRSW_EN_VAUDP15_MASK 0x1
+#define RG_HPRAUXFBRSW_EN_VAUDP15_MASK_SFT (0x1 << 5)
+#define RG_HPLSHORT2HPLAUX_EN_VAUDP15_SFT 6
+#define RG_HPLSHORT2HPLAUX_EN_VAUDP15_MASK 0x1
+#define RG_HPLSHORT2HPLAUX_EN_VAUDP15_MASK_SFT (0x1 << 6)
+#define RG_HPRSHORT2HPRAUX_EN_VAUDP15_SFT 7
+#define RG_HPRSHORT2HPRAUX_EN_VAUDP15_MASK 0x1
+#define RG_HPRSHORT2HPRAUX_EN_VAUDP15_MASK_SFT (0x1 << 7)
+#define RG_HPLOUTSTGCTRL_VAUDP15_SFT 8
+#define RG_HPLOUTSTGCTRL_VAUDP15_MASK 0x7
+#define RG_HPLOUTSTGCTRL_VAUDP15_MASK_SFT (0x7 << 8)
+#define RG_HPROUTSTGCTRL_VAUDP15_SFT 11
+#define RG_HPROUTSTGCTRL_VAUDP15_MASK 0x7
+#define RG_HPROUTSTGCTRL_VAUDP15_MASK_SFT (0x7 << 11)
+
+/* MT6358_AUDDEC_ANA_CON2 */
+#define RG_HPLOUTPUTSTBENH_VAUDP15_SFT 0
+#define RG_HPLOUTPUTSTBENH_VAUDP15_MASK 0x7
+#define RG_HPLOUTPUTSTBENH_VAUDP15_MASK_SFT (0x7 << 0)
+#define RG_HPROUTPUTSTBENH_VAUDP15_SFT 4
+#define RG_HPROUTPUTSTBENH_VAUDP15_MASK 0x7
+#define RG_HPROUTPUTSTBENH_VAUDP15_MASK_SFT (0x7 << 4)
+#define RG_AUDHPSTARTUP_VAUDP15_SFT 13
+#define RG_AUDHPSTARTUP_VAUDP15_MASK 0x1
+#define RG_AUDHPSTARTUP_VAUDP15_MASK_SFT (0x1 << 13)
+#define RG_AUDREFN_DERES_EN_VAUDP15_SFT 14
+#define RG_AUDREFN_DERES_EN_VAUDP15_MASK 0x1
+#define RG_AUDREFN_DERES_EN_VAUDP15_MASK_SFT (0x1 << 14)
+#define RG_HPPSHORT2VCM_VAUDP15_SFT 15
+#define RG_HPPSHORT2VCM_VAUDP15_MASK 0x1
+#define RG_HPPSHORT2VCM_VAUDP15_MASK_SFT (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON3 */
+#define RG_HPINPUTSTBENH_VAUDP15_SFT 13
+#define RG_HPINPUTSTBENH_VAUDP15_MASK 0x1
+#define RG_HPINPUTSTBENH_VAUDP15_MASK_SFT (0x1 << 13)
+#define RG_HPINPUTRESET0_VAUDP15_SFT 14
+#define RG_HPINPUTRESET0_VAUDP15_MASK 0x1
+#define RG_HPINPUTRESET0_VAUDP15_MASK_SFT (0x1 << 14)
+#define RG_HPOUTPUTRESET0_VAUDP15_SFT 15
+#define RG_HPOUTPUTRESET0_VAUDP15_MASK 0x1
+#define RG_HPOUTPUTRESET0_VAUDP15_MASK_SFT (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON4 */
+#define RG_ABIDEC_RSVD0_VAUDP28_SFT 0
+#define RG_ABIDEC_RSVD0_VAUDP28_MASK 0xff
+#define RG_ABIDEC_RSVD0_VAUDP28_MASK_SFT (0xff << 0)
+
+/* MT6358_AUDDEC_ANA_CON5 */
+#define RG_AUDHPDECMGAINADJ_VAUDP15_SFT 0
+#define RG_AUDHPDECMGAINADJ_VAUDP15_MASK 0x7
+#define RG_AUDHPDECMGAINADJ_VAUDP15_MASK_SFT (0x7 << 0)
+#define RG_AUDHPDEDMGAINADJ_VAUDP15_SFT 4
+#define RG_AUDHPDEDMGAINADJ_VAUDP15_MASK 0x7
+#define RG_AUDHPDEDMGAINADJ_VAUDP15_MASK_SFT (0x7 << 4)
+
+/* MT6358_AUDDEC_ANA_CON6 */
+#define RG_AUDHSPWRUP_VAUDP15_SFT 0
+#define RG_AUDHSPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDHSPWRUP_VAUDP15_MASK_SFT (0x1 << 0)
+#define RG_AUDHSPWRUP_IBIAS_VAUDP15_SFT 1
+#define RG_AUDHSPWRUP_IBIAS_VAUDP15_MASK 0x1
+#define RG_AUDHSPWRUP_IBIAS_VAUDP15_MASK_SFT (0x1 << 1)
+#define RG_AUDHSMUXINPUTSEL_VAUDP15_SFT 2
+#define RG_AUDHSMUXINPUTSEL_VAUDP15_MASK 0x3
+#define RG_AUDHSMUXINPUTSEL_VAUDP15_MASK_SFT (0x3 << 2)
+#define RG_AUDHSSCDISABLE_VAUDP15_SFT 4
+#define RG_AUDHSSCDISABLE_VAUDP15_MASK 0x1
+#define RG_AUDHSSCDISABLE_VAUDP15_MASK_SFT (0x1 << 4)
+#define RG_AUDHSBSCCURRENT_VAUDP15_SFT 5
+#define RG_AUDHSBSCCURRENT_VAUDP15_MASK 0x1
+#define RG_AUDHSBSCCURRENT_VAUDP15_MASK_SFT (0x1 << 5)
+#define RG_AUDHSSTARTUP_VAUDP15_SFT 6
+#define RG_AUDHSSTARTUP_VAUDP15_MASK 0x1
+#define RG_AUDHSSTARTUP_VAUDP15_MASK_SFT (0x1 << 6)
+#define RG_HSOUTPUTSTBENH_VAUDP15_SFT 7
+#define RG_HSOUTPUTSTBENH_VAUDP15_MASK 0x1
+#define RG_HSOUTPUTSTBENH_VAUDP15_MASK_SFT (0x1 << 7)
+#define RG_HSINPUTSTBENH_VAUDP15_SFT 8
+#define RG_HSINPUTSTBENH_VAUDP15_MASK 0x1
+#define RG_HSINPUTSTBENH_VAUDP15_MASK_SFT (0x1 << 8)
+#define RG_HSINPUTRESET0_VAUDP15_SFT 9
+#define RG_HSINPUTRESET0_VAUDP15_MASK 0x1
+#define RG_HSINPUTRESET0_VAUDP15_MASK_SFT (0x1 << 9)
+#define RG_HSOUTPUTRESET0_VAUDP15_SFT 10
+#define RG_HSOUTPUTRESET0_VAUDP15_MASK 0x1
+#define RG_HSOUTPUTRESET0_VAUDP15_MASK_SFT (0x1 << 10)
+#define RG_HSOUT_SHORTVCM_VAUDP15_SFT 11
+#define RG_HSOUT_SHORTVCM_VAUDP15_MASK 0x1
+#define RG_HSOUT_SHORTVCM_VAUDP15_MASK_SFT (0x1 << 11)
+
+/* MT6358_AUDDEC_ANA_CON7 */
+#define RG_AUDLOLPWRUP_VAUDP15_SFT 0
+#define RG_AUDLOLPWRUP_VAUDP15_MASK 0x1
+#define RG_AUDLOLPWRUP_VAUDP15_MASK_SFT (0x1 << 0)
+#define RG_AUDLOLPWRUP_IBIAS_VAUDP15_SFT 1
+#define RG_AUDLOLPWRUP_IBIAS_VAUDP15_MASK 0x1
+#define RG_AUDLOLPWRUP_IBIAS_VAUDP15_MASK_SFT (0x1 << 1)
+#define RG_AUDLOLMUXINPUTSEL_VAUDP15_SFT 2
+#define RG_AUDLOLMUXINPUTSEL_VAUDP15_MASK 0x3
+#define RG_AUDLOLMUXINPUTSEL_VAUDP15_MASK_SFT (0x3 << 2)
+#define RG_AUDLOLSCDISABLE_VAUDP15_SFT 4
+#define RG_AUDLOLSCDISABLE_VAUDP15_MASK 0x1
+#define RG_AUDLOLSCDISABLE_VAUDP15_MASK_SFT (0x1 << 4)
+#define RG_AUDLOLBSCCURRENT_VAUDP15_SFT 5
+#define RG_AUDLOLBSCCURRENT_VAUDP15_MASK 0x1
+#define RG_AUDLOLBSCCURRENT_VAUDP15_MASK_SFT (0x1 << 5)
+#define RG_AUDLOSTARTUP_VAUDP15_SFT 6
+#define RG_AUDLOSTARTUP_VAUDP15_MASK 0x1
+#define RG_AUDLOSTARTUP_VAUDP15_MASK_SFT (0x1 << 6)
+#define RG_LOINPUTSTBENH_VAUDP15_SFT 7
+#define RG_LOINPUTSTBENH_VAUDP15_MASK 0x1
+#define RG_LOINPUTSTBENH_VAUDP15_MASK_SFT (0x1 << 7)
+#define RG_LOOUTPUTSTBENH_VAUDP15_SFT 8
+#define RG_LOOUTPUTSTBENH_VAUDP15_MASK 0x1
+#define RG_LOOUTPUTSTBENH_VAUDP15_MASK_SFT (0x1 << 8)
+#define RG_LOINPUTRESET0_VAUDP15_SFT 9
+#define RG_LOINPUTRESET0_VAUDP15_MASK 0x1
+#define RG_LOINPUTRESET0_VAUDP15_MASK_SFT (0x1 << 9)
+#define RG_LOOUTPUTRESET0_VAUDP15_SFT 10
+#define RG_LOOUTPUTRESET0_VAUDP15_MASK 0x1
+#define RG_LOOUTPUTRESET0_VAUDP15_MASK_SFT (0x1 << 10)
+#define RG_LOOUT_SHORTVCM_VAUDP15_SFT 11
+#define RG_LOOUT_SHORTVCM_VAUDP15_MASK 0x1
+#define RG_LOOUT_SHORTVCM_VAUDP15_MASK_SFT (0x1 << 11)
+
+/* MT6358_AUDDEC_ANA_CON8 */
+#define RG_AUDTRIMBUF_INPUTMUXSEL_VAUDP15_SFT 0
+#define RG_AUDTRIMBUF_INPUTMUXSEL_VAUDP15_MASK 0xf
+#define RG_AUDTRIMBUF_INPUTMUXSEL_VAUDP15_MASK_SFT (0xf << 0)
+#define RG_AUDTRIMBUF_GAINSEL_VAUDP15_SFT 4
+#define RG_AUDTRIMBUF_GAINSEL_VAUDP15_MASK 0x3
+#define RG_AUDTRIMBUF_GAINSEL_VAUDP15_MASK_SFT (0x3 << 4)
+#define RG_AUDTRIMBUF_EN_VAUDP15_SFT 6
+#define RG_AUDTRIMBUF_EN_VAUDP15_MASK 0x1
+#define RG_AUDTRIMBUF_EN_VAUDP15_MASK_SFT (0x1 << 6)
+#define RG_AUDHPSPKDET_INPUTMUXSEL_VAUDP15_SFT 8
+#define RG_AUDHPSPKDET_INPUTMUXSEL_VAUDP15_MASK 0x3
+#define RG_AUDHPSPKDET_INPUTMUXSEL_VAUDP15_MASK_SFT (0x3 << 8)
+#define RG_AUDHPSPKDET_OUTPUTMUXSEL_VAUDP15_SFT 10
+#define RG_AUDHPSPKDET_OUTPUTMUXSEL_VAUDP15_MASK 0x3
+#define RG_AUDHPSPKDET_OUTPUTMUXSEL_VAUDP15_MASK_SFT (0x3 << 10)
+#define RG_AUDHPSPKDET_EN_VAUDP15_SFT 12
+#define RG_AUDHPSPKDET_EN_VAUDP15_MASK 0x1
+#define RG_AUDHPSPKDET_EN_VAUDP15_MASK_SFT (0x1 << 12)
+
+/* MT6358_AUDDEC_ANA_CON9 */
+#define RG_ABIDEC_RSVD0_VA28_SFT 0
+#define RG_ABIDEC_RSVD0_VA28_MASK 0xff
+#define RG_ABIDEC_RSVD0_VA28_MASK_SFT (0xff << 0)
+#define RG_ABIDEC_RSVD0_VAUDP15_SFT 8
+#define RG_ABIDEC_RSVD0_VAUDP15_MASK 0xff
+#define RG_ABIDEC_RSVD0_VAUDP15_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDDEC_ANA_CON10 */
+#define RG_ABIDEC_RSVD1_VAUDP15_SFT 0
+#define RG_ABIDEC_RSVD1_VAUDP15_MASK 0xff
+#define RG_ABIDEC_RSVD1_VAUDP15_MASK_SFT (0xff << 0)
+#define RG_ABIDEC_RSVD2_VAUDP15_SFT 8
+#define RG_ABIDEC_RSVD2_VAUDP15_MASK 0xff
+#define RG_ABIDEC_RSVD2_VAUDP15_MASK_SFT (0xff << 8)
+
+/* MT6358_AUDDEC_ANA_CON11 */
+#define RG_AUDZCDMUXSEL_VAUDP15_SFT 0
+#define RG_AUDZCDMUXSEL_VAUDP15_MASK 0x7
+#define RG_AUDZCDMUXSEL_VAUDP15_MASK_SFT (0x7 << 0)
+#define RG_AUDZCDCLKSEL_VAUDP15_SFT 3
+#define RG_AUDZCDCLKSEL_VAUDP15_MASK 0x1
+#define RG_AUDZCDCLKSEL_VAUDP15_MASK_SFT (0x1 << 3)
+#define RG_AUDBIASADJ_0_VAUDP15_SFT 7
+#define RG_AUDBIASADJ_0_VAUDP15_MASK 0x1ff
+#define RG_AUDBIASADJ_0_VAUDP15_MASK_SFT (0x1ff << 7)
+
+/* MT6358_AUDDEC_ANA_CON12 */
+#define RG_AUDBIASADJ_1_VAUDP15_SFT 0
+#define RG_AUDBIASADJ_1_VAUDP15_MASK 0xff
+#define RG_AUDBIASADJ_1_VAUDP15_MASK_SFT (0xff << 0)
+#define RG_AUDIBIASPWRDN_VAUDP15_SFT 8
+#define RG_AUDIBIASPWRDN_VAUDP15_MASK 0x1
+#define RG_AUDIBIASPWRDN_VAUDP15_MASK_SFT (0x1 << 8)
+
+/* MT6358_AUDDEC_ANA_CON13 */
+#define RG_RSTB_DECODER_VA28_SFT 0
+#define RG_RSTB_DECODER_VA28_MASK 0x1
+#define RG_RSTB_DECODER_VA28_MASK_SFT (0x1 << 0)
+#define RG_SEL_DECODER_96K_VA28_SFT 1
+#define RG_SEL_DECODER_96K_VA28_MASK 0x1
+#define RG_SEL_DECODER_96K_VA28_MASK_SFT (0x1 << 1)
+#define RG_SEL_DELAY_VCORE_SFT 2
+#define RG_SEL_DELAY_VCORE_MASK 0x1
+#define RG_SEL_DELAY_VCORE_MASK_SFT (0x1 << 2)
+#define RG_AUDGLB_PWRDN_VA28_SFT 4
+#define RG_AUDGLB_PWRDN_VA28_MASK 0x1
+#define RG_AUDGLB_PWRDN_VA28_MASK_SFT (0x1 << 4)
+#define RG_RSTB_ENCODER_VA28_SFT 5
+#define RG_RSTB_ENCODER_VA28_MASK 0x1
+#define RG_RSTB_ENCODER_VA28_MASK_SFT (0x1 << 5)
+#define RG_SEL_ENCODER_96K_VA28_SFT 6
+#define RG_SEL_ENCODER_96K_VA28_MASK 0x1
+#define RG_SEL_ENCODER_96K_VA28_MASK_SFT (0x1 << 6)
+
+/* MT6358_AUDDEC_ANA_CON14 */
+#define RG_HCLDO_EN_VA18_SFT 0
+#define RG_HCLDO_EN_VA18_MASK 0x1
+#define RG_HCLDO_EN_VA18_MASK_SFT (0x1 << 0)
+#define RG_HCLDO_PDDIS_EN_VA18_SFT 1
+#define RG_HCLDO_PDDIS_EN_VA18_MASK 0x1
+#define RG_HCLDO_PDDIS_EN_VA18_MASK_SFT (0x1 << 1)
+#define RG_HCLDO_REMOTE_SENSE_VA18_SFT 2
+#define RG_HCLDO_REMOTE_SENSE_VA18_MASK 0x1
+#define RG_HCLDO_REMOTE_SENSE_VA18_MASK_SFT (0x1 << 2)
+#define RG_LCLDO_EN_VA18_SFT 4
+#define RG_LCLDO_EN_VA18_MASK 0x1
+#define RG_LCLDO_EN_VA18_MASK_SFT (0x1 << 4)
+#define RG_LCLDO_PDDIS_EN_VA18_SFT 5
+#define RG_LCLDO_PDDIS_EN_VA18_MASK 0x1
+#define RG_LCLDO_PDDIS_EN_VA18_MASK_SFT (0x1 << 5)
+#define RG_LCLDO_REMOTE_SENSE_VA18_SFT 6
+#define RG_LCLDO_REMOTE_SENSE_VA18_MASK 0x1
+#define RG_LCLDO_REMOTE_SENSE_VA18_MASK_SFT (0x1 << 6)
+#define RG_LCLDO_ENC_EN_VA28_SFT 8
+#define RG_LCLDO_ENC_EN_VA28_MASK 0x1
+#define RG_LCLDO_ENC_EN_VA28_MASK_SFT (0x1 << 8)
+#define RG_LCLDO_ENC_PDDIS_EN_VA28_SFT 9
+#define RG_LCLDO_ENC_PDDIS_EN_VA28_MASK 0x1
+#define RG_LCLDO_ENC_PDDIS_EN_VA28_MASK_SFT (0x1 << 9)
+#define RG_LCLDO_ENC_REMOTE_SENSE_VA28_SFT 10
+#define RG_LCLDO_ENC_REMOTE_SENSE_VA28_MASK 0x1
+#define RG_LCLDO_ENC_REMOTE_SENSE_VA28_MASK_SFT (0x1 << 10)
+#define RG_VA33REFGEN_EN_VA18_SFT 12
+#define RG_VA33REFGEN_EN_VA18_MASK 0x1
+#define RG_VA33REFGEN_EN_VA18_MASK_SFT (0x1 << 12)
+#define RG_VA28REFGEN_EN_VA28_SFT 13
+#define RG_VA28REFGEN_EN_VA28_MASK 0x1
+#define RG_VA28REFGEN_EN_VA28_MASK_SFT (0x1 << 13)
+#define RG_HCLDO_VOSEL_VA18_SFT 14
+#define RG_HCLDO_VOSEL_VA18_MASK 0x1
+#define RG_HCLDO_VOSEL_VA18_MASK_SFT (0x1 << 14)
+#define RG_LCLDO_VOSEL_VA18_SFT 15
+#define RG_LCLDO_VOSEL_VA18_MASK 0x1
+#define RG_LCLDO_VOSEL_VA18_MASK_SFT (0x1 << 15)
+
+/* MT6358_AUDDEC_ANA_CON15 */
+#define RG_NVREG_EN_VAUDP15_SFT 0
+#define RG_NVREG_EN_VAUDP15_MASK 0x1
+#define RG_NVREG_EN_VAUDP15_MASK_SFT (0x1 << 0)
+#define RG_NVREG_PULL0V_VAUDP15_SFT 1
+#define RG_NVREG_PULL0V_VAUDP15_MASK 0x1
+#define RG_NVREG_PULL0V_VAUDP15_MASK_SFT (0x1 << 1)
+#define RG_AUDPMU_RSD0_VAUDP15_SFT 4
+#define RG_AUDPMU_RSD0_VAUDP15_MASK 0xf
+#define RG_AUDPMU_RSD0_VAUDP15_MASK_SFT (0xf << 4)
+#define RG_AUDPMU_RSD0_VA18_SFT 8
+#define RG_AUDPMU_RSD0_VA18_MASK 0xf
+#define RG_AUDPMU_RSD0_VA18_MASK_SFT (0xf << 8)
+#define RG_AUDPMU_RSD0_VA28_SFT 12
+#define RG_AUDPMU_RSD0_VA28_MASK 0xf
+#define RG_AUDPMU_RSD0_VA28_MASK_SFT (0xf << 12)
+
+/* MT6358_ZCD_CON0 */
+#define RG_AUDZCDENABLE_SFT 0
+#define RG_AUDZCDENABLE_MASK 0x1
+#define RG_AUDZCDENABLE_MASK_SFT (0x1 << 0)
+#define RG_AUDZCDGAINSTEPTIME_SFT 1
+#define RG_AUDZCDGAINSTEPTIME_MASK 0x7
+#define RG_AUDZCDGAINSTEPTIME_MASK_SFT (0x7 << 1)
+#define RG_AUDZCDGAINSTEPSIZE_SFT 4
+#define RG_AUDZCDGAINSTEPSIZE_MASK 0x3
+#define RG_AUDZCDGAINSTEPSIZE_MASK_SFT (0x3 << 4)
+#define RG_AUDZCDTIMEOUTMODESEL_SFT 6
+#define RG_AUDZCDTIMEOUTMODESEL_MASK 0x1
+#define RG_AUDZCDTIMEOUTMODESEL_MASK_SFT (0x1 << 6)
+
+/* MT6358_ZCD_CON1 */
+#define RG_AUDLOLGAIN_SFT 0
+#define RG_AUDLOLGAIN_MASK 0x1f
+#define RG_AUDLOLGAIN_MASK_SFT (0x1f << 0)
+#define RG_AUDLORGAIN_SFT 7
+#define RG_AUDLORGAIN_MASK 0x1f
+#define RG_AUDLORGAIN_MASK_SFT (0x1f << 7)
+
+/* MT6358_ZCD_CON2 */
+#define RG_AUDHPLGAIN_SFT 0
+#define RG_AUDHPLGAIN_MASK 0x1f
+#define RG_AUDHPLGAIN_MASK_SFT (0x1f << 0)
+#define RG_AUDHPRGAIN_SFT 7
+#define RG_AUDHPRGAIN_MASK 0x1f
+#define RG_AUDHPRGAIN_MASK_SFT (0x1f << 7)
+
+/* MT6358_ZCD_CON3 */
+#define RG_AUDHSGAIN_SFT 0
+#define RG_AUDHSGAIN_MASK 0x1f
+#define RG_AUDHSGAIN_MASK_SFT (0x1f << 0)
+
+/* MT6358_ZCD_CON4 */
+#define RG_AUDIVLGAIN_SFT 0
+#define RG_AUDIVLGAIN_MASK 0x7
+#define RG_AUDIVLGAIN_MASK_SFT (0x7 << 0)
+#define RG_AUDIVRGAIN_SFT 8
+#define RG_AUDIVRGAIN_MASK 0x7
+#define RG_AUDIVRGAIN_MASK_SFT (0x7 << 8)
+
+/* MT6358_ZCD_CON5 */
+#define RG_AUDINTGAIN1_SFT 0
+#define RG_AUDINTGAIN1_MASK 0x3f
+#define RG_AUDINTGAIN1_MASK_SFT (0x3f << 0)
+#define RG_AUDINTGAIN2_SFT 8
+#define RG_AUDINTGAIN2_MASK 0x3f
+#define RG_AUDINTGAIN2_MASK_SFT (0x3f << 8)
+
+/* audio register */
+#define MT6358_DRV_CON3 0x3c
+#define MT6358_GPIO_DIR0 0x88
+
+#define MT6358_GPIO_MODE2 0xd8 /* mosi */
+#define MT6358_GPIO_MODE2_SET 0xda
+#define MT6358_GPIO_MODE2_CLR 0xdc
+
+#define MT6358_GPIO_MODE3 0xde /* miso */
+#define MT6358_GPIO_MODE3_SET 0xe0
+#define MT6358_GPIO_MODE3_CLR 0xe2
+
+#define MT6358_TOP_CKPDN_CON0 0x10c
+#define MT6358_TOP_CKPDN_CON0_SET 0x10e
+#define MT6358_TOP_CKPDN_CON0_CLR 0x110
+
+#define MT6358_TOP_CKHWEN_CON0 0x12a
+#define MT6358_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6358_TOP_CKHWEN_CON0_CLR 0x12e
+
+#define MT6358_OTP_CON0 0x38a
+#define MT6358_OTP_CON8 0x39a
+#define MT6358_OTP_CON11 0x3a0
+#define MT6358_OTP_CON12 0x3a2
+#define MT6358_OTP_CON13 0x3a4
+
+#define MT6358_DCXO_CW13 0x7aa
+#define MT6358_DCXO_CW14 0x7ac
+
+#define MT6358_AUXADC_CON10 0x11a0
+
+/* audio register */
+#define MT6358_AUD_TOP_ID 0x2200
+#define MT6358_AUD_TOP_REV0 0x2202
+#define MT6358_AUD_TOP_DBI 0x2204
+#define MT6358_AUD_TOP_DXI 0x2206
+#define MT6358_AUD_TOP_CKPDN_TPM0 0x2208
+#define MT6358_AUD_TOP_CKPDN_TPM1 0x220a
+#define MT6358_AUD_TOP_CKPDN_CON0 0x220c
+#define MT6358_AUD_TOP_CKPDN_CON0_SET 0x220e
+#define MT6358_AUD_TOP_CKPDN_CON0_CLR 0x2210
+#define MT6358_AUD_TOP_CKSEL_CON0 0x2212
+#define MT6358_AUD_TOP_CKSEL_CON0_SET 0x2214
+#define MT6358_AUD_TOP_CKSEL_CON0_CLR 0x2216
+#define MT6358_AUD_TOP_CKTST_CON0 0x2218
+#define MT6358_AUD_TOP_CLK_HWEN_CON0 0x221a
+#define MT6358_AUD_TOP_CLK_HWEN_CON0_SET 0x221c
+#define MT6358_AUD_TOP_CLK_HWEN_CON0_CLR 0x221e
+#define MT6358_AUD_TOP_RST_CON0 0x2220
+#define MT6358_AUD_TOP_RST_CON0_SET 0x2222
+#define MT6358_AUD_TOP_RST_CON0_CLR 0x2224
+#define MT6358_AUD_TOP_RST_BANK_CON0 0x2226
+#define MT6358_AUD_TOP_INT_CON0 0x2228
+#define MT6358_AUD_TOP_INT_CON0_SET 0x222a
+#define MT6358_AUD_TOP_INT_CON0_CLR 0x222c
+#define MT6358_AUD_TOP_INT_MASK_CON0 0x222e
+#define MT6358_AUD_TOP_INT_MASK_CON0_SET 0x2230
+#define MT6358_AUD_TOP_INT_MASK_CON0_CLR 0x2232
+#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+#define MT6358_AUD_TOP_INT_RAW_STATUS0 0x2236
+#define MT6358_AUD_TOP_INT_MISC_CON0 0x2238
+#define MT6358_AUDNCP_CLKDIV_CON0 0x223a
+#define MT6358_AUDNCP_CLKDIV_CON1 0x223c
+#define MT6358_AUDNCP_CLKDIV_CON2 0x223e
+#define MT6358_AUDNCP_CLKDIV_CON3 0x2240
+#define MT6358_AUDNCP_CLKDIV_CON4 0x2242
+#define MT6358_AUD_TOP_MON_CON0 0x2244
+#define MT6358_AUDIO_DIG_DSN_ID 0x2280
+#define MT6358_AUDIO_DIG_DSN_REV0 0x2282
+#define MT6358_AUDIO_DIG_DSN_DBI 0x2284
+#define MT6358_AUDIO_DIG_DSN_DXI 0x2286
+#define MT6358_AFE_UL_DL_CON0 0x2288
+#define MT6358_AFE_DL_SRC2_CON0_L 0x228a
+#define MT6358_AFE_UL_SRC_CON0_H 0x228c
+#define MT6358_AFE_UL_SRC_CON0_L 0x228e
+#define MT6358_AFE_TOP_CON0 0x2290
+#define MT6358_AUDIO_TOP_CON0 0x2292
+#define MT6358_AFE_MON_DEBUG0 0x2294
+#define MT6358_AFUNC_AUD_CON0 0x2296
+#define MT6358_AFUNC_AUD_CON1 0x2298
+#define MT6358_AFUNC_AUD_CON2 0x229a
+#define MT6358_AFUNC_AUD_CON3 0x229c
+#define MT6358_AFUNC_AUD_CON4 0x229e
+#define MT6358_AFUNC_AUD_CON5 0x22a0
+#define MT6358_AFUNC_AUD_CON6 0x22a2
+#define MT6358_AFUNC_AUD_MON0 0x22a4
+#define MT6358_AUDRC_TUNE_MON0 0x22a6
+#define MT6358_AFE_ADDA_MTKAIF_FIFO_CFG0 0x22a8
+#define MT6358_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x22aa
+#define MT6358_AFE_ADDA_MTKAIF_MON0 0x22ac
+#define MT6358_AFE_ADDA_MTKAIF_MON1 0x22ae
+#define MT6358_AFE_ADDA_MTKAIF_MON2 0x22b0
+#define MT6358_AFE_ADDA_MTKAIF_MON3 0x22b2
+#define MT6358_AFE_ADDA_MTKAIF_CFG0 0x22b4
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG0 0x22b6
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG1 0x22b8
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG2 0x22ba
+#define MT6358_AFE_ADDA_MTKAIF_RX_CFG3 0x22bc
+#define MT6358_AFE_ADDA_MTKAIF_TX_CFG1 0x22be
+#define MT6358_AFE_SGEN_CFG0 0x22c0
+#define MT6358_AFE_SGEN_CFG1 0x22c2
+#define MT6358_AFE_ADC_ASYNC_FIFO_CFG 0x22c4
+#define MT6358_AFE_DCCLK_CFG0 0x22c6
+#define MT6358_AFE_DCCLK_CFG1 0x22c8
+#define MT6358_AUDIO_DIG_CFG 0x22ca
+#define MT6358_AFE_AUD_PAD_TOP 0x22cc
+#define MT6358_AFE_AUD_PAD_TOP_MON 0x22ce
+#define MT6358_AFE_AUD_PAD_TOP_MON1 0x22d0
+#define MT6358_AFE_DL_NLE_CFG 0x22d2
+#define MT6358_AFE_DL_NLE_MON 0x22d4
+#define MT6358_AFE_CG_EN_MON 0x22d6
+#define MT6358_AUDIO_DIG_2ND_DSN_ID 0x2300
+#define MT6358_AUDIO_DIG_2ND_DSN_REV0 0x2302
+#define MT6358_AUDIO_DIG_2ND_DSN_DBI 0x2304
+#define MT6358_AUDIO_DIG_2ND_DSN_DXI 0x2306
+#define MT6358_AFE_PMIC_NEWIF_CFG3 0x2308
+#define MT6358_AFE_VOW_TOP 0x230a
+#define MT6358_AFE_VOW_CFG0 0x230c
+#define MT6358_AFE_VOW_CFG1 0x230e
+#define MT6358_AFE_VOW_CFG2 0x2310
+#define MT6358_AFE_VOW_CFG3 0x2312
+#define MT6358_AFE_VOW_CFG4 0x2314
+#define MT6358_AFE_VOW_CFG5 0x2316
+#define MT6358_AFE_VOW_CFG6 0x2318
+#define MT6358_AFE_VOW_MON0 0x231a
+#define MT6358_AFE_VOW_MON1 0x231c
+#define MT6358_AFE_VOW_MON2 0x231e
+#define MT6358_AFE_VOW_MON3 0x2320
+#define MT6358_AFE_VOW_MON4 0x2322
+#define MT6358_AFE_VOW_MON5 0x2324
+#define MT6358_AFE_VOW_SN_INI_CFG 0x2326
+#define MT6358_AFE_VOW_TGEN_CFG0 0x2328
+#define MT6358_AFE_VOW_POSDIV_CFG0 0x232a
+#define MT6358_AFE_VOW_HPF_CFG0 0x232c
+#define MT6358_AFE_VOW_PERIODIC_CFG0 0x232e
+#define MT6358_AFE_VOW_PERIODIC_CFG1 0x2330
+#define MT6358_AFE_VOW_PERIODIC_CFG2 0x2332
+#define MT6358_AFE_VOW_PERIODIC_CFG3 0x2334
+#define MT6358_AFE_VOW_PERIODIC_CFG4 0x2336
+#define MT6358_AFE_VOW_PERIODIC_CFG5 0x2338
+#define MT6358_AFE_VOW_PERIODIC_CFG6 0x233a
+#define MT6358_AFE_VOW_PERIODIC_CFG7 0x233c
+#define MT6358_AFE_VOW_PERIODIC_CFG8 0x233e
+#define MT6358_AFE_VOW_PERIODIC_CFG9 0x2340
+#define MT6358_AFE_VOW_PERIODIC_CFG10 0x2342
+#define MT6358_AFE_VOW_PERIODIC_CFG11 0x2344
+#define MT6358_AFE_VOW_PERIODIC_CFG12 0x2346
+#define MT6358_AFE_VOW_PERIODIC_CFG13 0x2348
+#define MT6358_AFE_VOW_PERIODIC_CFG14 0x234a
+#define MT6358_AFE_VOW_PERIODIC_CFG15 0x234c
+#define MT6358_AFE_VOW_PERIODIC_CFG16 0x234e
+#define MT6358_AFE_VOW_PERIODIC_CFG17 0x2350
+#define MT6358_AFE_VOW_PERIODIC_CFG18 0x2352
+#define MT6358_AFE_VOW_PERIODIC_CFG19 0x2354
+#define MT6358_AFE_VOW_PERIODIC_CFG20 0x2356
+#define MT6358_AFE_VOW_PERIODIC_CFG21 0x2358
+#define MT6358_AFE_VOW_PERIODIC_CFG22 0x235a
+#define MT6358_AFE_VOW_PERIODIC_CFG23 0x235c
+#define MT6358_AFE_VOW_PERIODIC_MON0 0x235e
+#define MT6358_AFE_VOW_PERIODIC_MON1 0x2360
+#define MT6358_AUDENC_DSN_ID 0x2380
+#define MT6358_AUDENC_DSN_REV0 0x2382
+#define MT6358_AUDENC_DSN_DBI 0x2384
+#define MT6358_AUDENC_DSN_FPI 0x2386
+#define MT6358_AUDENC_ANA_CON0 0x2388
+#define MT6358_AUDENC_ANA_CON1 0x238a
+#define MT6358_AUDENC_ANA_CON2 0x238c
+#define MT6358_AUDENC_ANA_CON3 0x238e
+#define MT6358_AUDENC_ANA_CON4 0x2390
+#define MT6358_AUDENC_ANA_CON5 0x2392
+#define MT6358_AUDENC_ANA_CON6 0x2394
+#define MT6358_AUDENC_ANA_CON7 0x2396
+#define MT6358_AUDENC_ANA_CON8 0x2398
+#define MT6358_AUDENC_ANA_CON9 0x239a
+#define MT6358_AUDENC_ANA_CON10 0x239c
+#define MT6358_AUDENC_ANA_CON11 0x239e
+#define MT6358_AUDENC_ANA_CON12 0x23a0
+#define MT6358_AUDDEC_DSN_ID 0x2400
+#define MT6358_AUDDEC_DSN_REV0 0x2402
+#define MT6358_AUDDEC_DSN_DBI 0x2404
+#define MT6358_AUDDEC_DSN_FPI 0x2406
+#define MT6358_AUDDEC_ANA_CON0 0x2408
+#define MT6358_AUDDEC_ANA_CON1 0x240a
+#define MT6358_AUDDEC_ANA_CON2 0x240c
+#define MT6358_AUDDEC_ANA_CON3 0x240e
+#define MT6358_AUDDEC_ANA_CON4 0x2410
+#define MT6358_AUDDEC_ANA_CON5 0x2412
+#define MT6358_AUDDEC_ANA_CON6 0x2414
+#define MT6358_AUDDEC_ANA_CON7 0x2416
+#define MT6358_AUDDEC_ANA_CON8 0x2418
+#define MT6358_AUDDEC_ANA_CON9 0x241a
+#define MT6358_AUDDEC_ANA_CON10 0x241c
+#define MT6358_AUDDEC_ANA_CON11 0x241e
+#define MT6358_AUDDEC_ANA_CON12 0x2420
+#define MT6358_AUDDEC_ANA_CON13 0x2422
+#define MT6358_AUDDEC_ANA_CON14 0x2424
+#define MT6358_AUDDEC_ANA_CON15 0x2426
+#define MT6358_AUDDEC_ELR_NUM 0x2428
+#define MT6358_AUDDEC_ELR_0 0x242a
+#define MT6358_AUDZCD_DSN_ID 0x2480
+#define MT6358_AUDZCD_DSN_REV0 0x2482
+#define MT6358_AUDZCD_DSN_DBI 0x2484
+#define MT6358_AUDZCD_DSN_FPI 0x2486
+#define MT6358_ZCD_CON0 0x2488
+#define MT6358_ZCD_CON1 0x248a
+#define MT6358_ZCD_CON2 0x248c
+#define MT6358_ZCD_CON3 0x248e
+#define MT6358_ZCD_CON4 0x2490
+#define MT6358_ZCD_CON5 0x2492
+#define MT6358_ACCDET_CON13 0x2522
+
+#define MT6358_MAX_REGISTER MT6358_ZCD_CON5
+
+enum {
+ MT6358_MTKAIF_PROTOCOL_1 = 0,
+ MT6358_MTKAIF_PROTOCOL_2,
+ MT6358_MTKAIF_PROTOCOL_2_CLK_P2,
+};
+
+/* set only during init */
+int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
+ int mtkaif_protocol);
+int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt);
+int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt);
+int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt,
+ int phase_1, int phase_2);
+#endif /* __MT6358_H__ */
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 468d5143e2c4..87ed3dc496dc 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -807,7 +807,7 @@ static const struct snd_soc_dapm_route nau8824_dapm_routes[] = {
static bool nau8824_is_jack_inserted(struct nau8824 *nau8824)
{
struct snd_soc_jack *jack = nau8824->jack;
- bool insert = FALSE;
+ bool insert = false;
if (nau8824->irq && jack)
insert = jack->status & SND_JACK_HEADPHONE;
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 7bbcbf5f05c8..47e65cf99879 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -351,6 +351,7 @@ static void nau8825_hpvol_ramp(struct nau8825 *nau8825,
* Computes log10 of a value; the result is round off to 3 decimal. This func-
* tion takes reference to dvb-math. The source code locates as the following.
* Linux/drivers/media/dvb-core/dvb_math.c
+ * @value: input for log10
*
* return log10(value) * 1000
*/
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
index 809b7e9f03ca..c5fcc632f670 100644
--- a/sound/soc/codecs/pcm186x.c
+++ b/sound/soc/codecs/pcm186x.c
@@ -42,7 +42,7 @@ struct pcm186x_priv {
bool is_master_mode;
};
-static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
+static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 50, 0);
static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
@@ -158,7 +158,7 @@ static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
* Put the codec into SLEEP mode when not in use, allowing the
* Energysense mechanism to operate.
*/
- SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0),
+ SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 1),
};
static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
@@ -184,8 +184,8 @@ static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
* Put the codec into SLEEP mode when not in use, allowing the
* Energysense mechanism to operate.
*/
- SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0),
- SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0),
+ SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 1),
+ SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 1),
};
static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c
index 6714aa8d9026..32b26f1c2282 100644
--- a/sound/soc/codecs/pcm3060.c
+++ b/sound/soc/codecs/pcm3060.c
@@ -18,12 +18,39 @@ static int pcm3060_set_sysclk(struct snd_soc_dai *dai, int clk_id,
{
struct snd_soc_component *comp = dai->component;
struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp);
+ unsigned int reg;
+ unsigned int val;
if (dir != SND_SOC_CLOCK_IN) {
dev_err(comp->dev, "unsupported sysclock dir: %d\n", dir);
return -EINVAL;
}
+ switch (clk_id) {
+ case PCM3060_CLK_DEF:
+ val = 0;
+ break;
+
+ case PCM3060_CLK1:
+ val = (dai->id == PCM3060_DAI_ID_DAC ? PCM3060_REG_CSEL : 0);
+ break;
+
+ case PCM3060_CLK2:
+ val = (dai->id == PCM3060_DAI_ID_DAC ? 0 : PCM3060_REG_CSEL);
+ break;
+
+ default:
+ dev_err(comp->dev, "unsupported sysclock id: %d\n", clk_id);
+ return -EINVAL;
+ }
+
+ if (dai->id == PCM3060_DAI_ID_DAC)
+ reg = PCM3060_REG67;
+ else
+ reg = PCM3060_REG72;
+
+ regmap_update_bits(priv->regmap, reg, PCM3060_REG_CSEL, val);
+
priv->dai[dai->id].sclk_freq = freq;
return 0;
@@ -287,6 +314,14 @@ int pcm3060_probe(struct device *dev)
int rc;
struct pcm3060_priv *priv = dev_get_drvdata(dev);
+ /* soft reset */
+ rc = regmap_update_bits(priv->regmap, PCM3060_REG64,
+ PCM3060_REG_MRST, 0);
+ if (rc) {
+ dev_err(dev, "failed to reset component, rc=%d\n", rc);
+ return rc;
+ }
+
if (dev->of_node)
pcm3060_parse_dt(dev->of_node, priv);
diff --git a/sound/soc/codecs/pcm3060.h b/sound/soc/codecs/pcm3060.h
index 6a027b4a845d..75931c9a9d85 100644
--- a/sound/soc/codecs/pcm3060.h
+++ b/sound/soc/codecs/pcm3060.h
@@ -17,6 +17,11 @@ extern const struct regmap_config pcm3060_regmap;
#define PCM3060_DAI_ID_ADC 1
#define PCM3060_DAI_IDS_NUM 2
+/* ADC and DAC can be clocked from separate or same sources CLK1 and CLK2 */
+#define PCM3060_CLK_DEF 0 /* default: CLK1->ADC, CLK2->DAC */
+#define PCM3060_CLK1 1
+#define PCM3060_CLK2 2
+
struct pcm3060_priv_dai {
bool is_master;
unsigned int sclk_freq;
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 6cb1653be804..62d05b01711f 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -55,6 +55,7 @@ struct pcm512x_priv {
unsigned long overclock_dsp;
int mute;
struct mutex mutex;
+ unsigned int bclk_ratio;
};
/*
@@ -915,16 +916,21 @@ static int pcm512x_set_dividers(struct snd_soc_dai *dai,
int fssp;
int gpio;
- lrclk_div = snd_soc_params_to_frame_size(params);
- if (lrclk_div == 0) {
- dev_err(dev, "No LRCLK?\n");
- return -EINVAL;
+ if (pcm512x->bclk_ratio > 0) {
+ lrclk_div = pcm512x->bclk_ratio;
+ } else {
+ lrclk_div = snd_soc_params_to_frame_size(params);
+
+ if (lrclk_div == 0) {
+ dev_err(dev, "No LRCLK?\n");
+ return -EINVAL;
+ }
}
if (!pcm512x->pll_out) {
sck_rate = clk_get_rate(pcm512x->sclk);
- bclk_div = params->rate_den * 64 / lrclk_div;
- bclk_rate = DIV_ROUND_CLOSEST(sck_rate, bclk_div);
+ bclk_rate = params_rate(params) * lrclk_div;
+ bclk_div = DIV_ROUND_CLOSEST(sck_rate, bclk_rate);
mck_rate = sck_rate;
} else {
@@ -1383,6 +1389,19 @@ static int pcm512x_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
+static int pcm512x_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_component *component = dai->component;
+ struct pcm512x_priv *pcm512x = snd_soc_component_get_drvdata(component);
+
+ if (ratio > 256)
+ return -EINVAL;
+
+ pcm512x->bclk_ratio = ratio;
+
+ return 0;
+}
+
static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_component *component = dai->component;
@@ -1400,24 +1419,20 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
if (ret != 0) {
dev_err(component->dev,
"Failed to set digital mute: %d\n", ret);
- mutex_unlock(&pcm512x->mutex);
- return ret;
+ goto unlock;
}
regmap_read_poll_timeout(pcm512x->regmap,
PCM512x_ANALOG_MUTE_DET,
mute_det, (mute_det & 0x3) == 0,
200, 10000);
-
- mutex_unlock(&pcm512x->mutex);
} else {
pcm512x->mute &= ~0x1;
ret = pcm512x_update_mute(pcm512x);
if (ret != 0) {
dev_err(component->dev,
"Failed to update digital mute: %d\n", ret);
- mutex_unlock(&pcm512x->mutex);
- return ret;
+ goto unlock;
}
regmap_read_poll_timeout(pcm512x->regmap,
@@ -1428,9 +1443,10 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
200, 10000);
}
+unlock:
mutex_unlock(&pcm512x->mutex);
- return 0;
+ return ret;
}
static const struct snd_soc_dai_ops pcm512x_dai_ops = {
@@ -1438,6 +1454,7 @@ static const struct snd_soc_dai_ops pcm512x_dai_ops = {
.hw_params = pcm512x_hw_params,
.set_fmt = pcm512x_set_fmt,
.digital_mute = pcm512x_digital_mute,
+ .set_bclk_ratio = pcm512x_set_bclk_ratio,
};
static struct snd_soc_dai_driver pcm512x_dai = {
@@ -1523,8 +1540,9 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
pcm512x->supply_nb[2].notifier_call = pcm512x_regulator_event_2;
for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) {
- ret = regulator_register_notifier(pcm512x->supplies[i].consumer,
- &pcm512x->supply_nb[i]);
+ ret = devm_regulator_register_notifier(
+ pcm512x->supplies[i].consumer,
+ &pcm512x->supply_nb[i]);
if (ret != 0) {
dev_err(dev,
"Failed to register regulator notifier: %d\n",
diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
new file mode 100644
index 000000000000..24f8f86d58e9
--- /dev/null
+++ b/sound/soc/codecs/rk3328_codec.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// rk3328 ALSA SoC Audio driver
+//
+// Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm_params.h>
+#include "rk3328_codec.h"
+
+/*
+ * volume setting
+ * 0: -39dB
+ * 26: 0dB
+ * 31: 6dB
+ * Step: 1.5dB
+ */
+#define OUT_VOLUME (0x18)
+#define RK3328_GRF_SOC_CON2 (0x0408)
+#define RK3328_GRF_SOC_CON10 (0x0428)
+#define INITIAL_FREQ (11289600)
+
+struct rk3328_codec_priv {
+ struct regmap *regmap;
+ struct regmap *grf;
+ struct clk *mclk;
+ struct clk *pclk;
+ unsigned int sclk;
+ int spk_depop_time; /* msec */
+};
+
+static const struct reg_default rk3328_codec_reg_defaults[] = {
+ { CODEC_RESET, 0x03 },
+ { DAC_INIT_CTRL1, 0x00 },
+ { DAC_INIT_CTRL2, 0x50 },
+ { DAC_INIT_CTRL3, 0x0e },
+ { DAC_PRECHARGE_CTRL, 0x01 },
+ { DAC_PWR_CTRL, 0x00 },
+ { DAC_CLK_CTRL, 0x00 },
+ { HPMIX_CTRL, 0x00 },
+ { HPOUT_CTRL, 0x00 },
+ { HPOUTL_GAIN_CTRL, 0x00 },
+ { HPOUTR_GAIN_CTRL, 0x00 },
+ { HPOUT_POP_CTRL, 0x11 },
+};
+
+static int rk3328_codec_reset(struct rk3328_codec_priv *rk3328)
+{
+ regmap_write(rk3328->regmap, CODEC_RESET, 0x00);
+ mdelay(10);
+ regmap_write(rk3328->regmap, CODEC_RESET, 0x03);
+
+ return 0;
+}
+
+static int rk3328_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct rk3328_codec_priv *rk3328 =
+ snd_soc_component_get_drvdata(dai->component);
+ unsigned int val;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ val = PIN_DIRECTION_IN | DAC_I2S_MODE_SLAVE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ val = PIN_DIRECTION_OUT | DAC_I2S_MODE_MASTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(rk3328->regmap, DAC_INIT_CTRL1,
+ PIN_DIRECTION_MASK | DAC_I2S_MODE_MASK, val);
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ val = DAC_MODE_PCM;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ val = DAC_MODE_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val = DAC_MODE_RJM;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = DAC_MODE_LJM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(rk3328->regmap, DAC_INIT_CTRL2,
+ DAC_MODE_MASK, val);
+
+ return 0;
+}
+
+static void rk3328_analog_output(struct rk3328_codec_priv *rk3328, int mute)
+{
+ unsigned int val = BIT(17);
+
+ if (mute)
+ val |= BIT(1);
+
+ regmap_write(rk3328->grf, RK3328_GRF_SOC_CON10, val);
+}
+
+static int rk3328_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct rk3328_codec_priv *rk3328 =
+ snd_soc_component_get_drvdata(dai->component);
+ unsigned int val;
+
+ if (mute)
+ val = HPOUTL_MUTE | HPOUTR_MUTE;
+ else
+ val = HPOUTL_UNMUTE | HPOUTR_UNMUTE;
+
+ regmap_update_bits(rk3328->regmap, HPOUT_CTRL,
+ HPOUTL_MUTE_MASK | HPOUTR_MUTE_MASK, val);
+
+ return 0;
+}
+
+static int rk3328_codec_power_on(struct rk3328_codec_priv *rk3328, int wait_ms)
+{
+ regmap_update_bits(rk3328->regmap, DAC_PRECHARGE_CTRL,
+ DAC_CHARGE_XCHARGE_MASK, DAC_CHARGE_PRECHARGE);
+ mdelay(10);
+ regmap_update_bits(rk3328->regmap, DAC_PRECHARGE_CTRL,
+ DAC_CHARGE_CURRENT_ALL_MASK,
+ DAC_CHARGE_CURRENT_ALL_ON);
+ mdelay(wait_ms);
+
+ return 0;
+}
+
+static int rk3328_codec_power_off(struct rk3328_codec_priv *rk3328, int wait_ms)
+{
+ regmap_update_bits(rk3328->regmap, DAC_PRECHARGE_CTRL,
+ DAC_CHARGE_XCHARGE_MASK, DAC_CHARGE_DISCHARGE);
+ mdelay(10);
+ regmap_update_bits(rk3328->regmap, DAC_PRECHARGE_CTRL,
+ DAC_CHARGE_CURRENT_ALL_MASK,
+ DAC_CHARGE_CURRENT_ALL_ON);
+ mdelay(wait_ms);
+
+ return 0;
+}
+
+static const struct rk3328_reg_msk_val playback_open_list[] = {
+ { DAC_PWR_CTRL, DAC_PWR_MASK, DAC_PWR_ON },
+ { DAC_PWR_CTRL, DACL_PATH_REFV_MASK | DACR_PATH_REFV_MASK,
+ DACL_PATH_REFV_ON | DACR_PATH_REFV_ON },
+ { DAC_PWR_CTRL, HPOUTL_ZERO_CROSSING_MASK | HPOUTR_ZERO_CROSSING_MASK,
+ HPOUTL_ZERO_CROSSING_ON | HPOUTR_ZERO_CROSSING_ON },
+ { HPOUT_POP_CTRL, HPOUTR_POP_MASK | HPOUTL_POP_MASK,
+ HPOUTR_POP_WORK | HPOUTL_POP_WORK },
+ { HPMIX_CTRL, HPMIXL_MASK | HPMIXR_MASK, HPMIXL_EN | HPMIXR_EN },
+ { HPMIX_CTRL, HPMIXL_INIT_MASK | HPMIXR_INIT_MASK,
+ HPMIXL_INIT_EN | HPMIXR_INIT_EN },
+ { HPOUT_CTRL, HPOUTL_MASK | HPOUTR_MASK, HPOUTL_EN | HPOUTR_EN },
+ { HPOUT_CTRL, HPOUTL_INIT_MASK | HPOUTR_INIT_MASK,
+ HPOUTL_INIT_EN | HPOUTR_INIT_EN },
+ { DAC_CLK_CTRL, DACL_REFV_MASK | DACR_REFV_MASK,
+ DACL_REFV_ON | DACR_REFV_ON },
+ { DAC_CLK_CTRL, DACL_CLK_MASK | DACR_CLK_MASK,
+ DACL_CLK_ON | DACR_CLK_ON },
+ { DAC_CLK_CTRL, DACL_MASK | DACR_MASK, DACL_ON | DACR_ON },
+ { DAC_CLK_CTRL, DACL_INIT_MASK | DACR_INIT_MASK,
+ DACL_INIT_ON | DACR_INIT_ON },
+ { DAC_SELECT, DACL_SELECT_MASK | DACR_SELECT_MASK,
+ DACL_SELECT | DACR_SELECT },
+ { HPMIX_CTRL, HPMIXL_INIT2_MASK | HPMIXR_INIT2_MASK,
+ HPMIXL_INIT2_EN | HPMIXR_INIT2_EN },
+ { HPOUT_CTRL, HPOUTL_MUTE_MASK | HPOUTR_MUTE_MASK,
+ HPOUTL_UNMUTE | HPOUTR_UNMUTE },
+};
+
+static int rk3328_codec_open_playback(struct rk3328_codec_priv *rk3328)
+{
+ int i;
+
+ regmap_update_bits(rk3328->regmap, DAC_PRECHARGE_CTRL,
+ DAC_CHARGE_CURRENT_ALL_MASK,
+ DAC_CHARGE_CURRENT_I);
+
+ for (i = 0; i < ARRAY_SIZE(playback_open_list); i++) {
+ regmap_update_bits(rk3328->regmap,
+ playback_open_list[i].reg,
+ playback_open_list[i].msk,
+ playback_open_list[i].val);
+ mdelay(1);
+ }
+
+ msleep(rk3328->spk_depop_time);
+ rk3328_analog_output(rk3328, 1);
+
+ regmap_update_bits(rk3328->regmap, HPOUTL_GAIN_CTRL,
+ HPOUTL_GAIN_MASK, OUT_VOLUME);
+ regmap_update_bits(rk3328->regmap, HPOUTR_GAIN_CTRL,
+ HPOUTR_GAIN_MASK, OUT_VOLUME);
+
+ return 0;
+}
+
+static const struct rk3328_reg_msk_val playback_close_list[] = {
+ { HPMIX_CTRL, HPMIXL_INIT2_MASK | HPMIXR_INIT2_MASK,
+ HPMIXL_INIT2_DIS | HPMIXR_INIT2_DIS },
+ { DAC_SELECT, DACL_SELECT_MASK | DACR_SELECT_MASK,
+ DACL_UNSELECT | DACR_UNSELECT },
+ { HPOUT_CTRL, HPOUTL_MUTE_MASK | HPOUTR_MUTE_MASK,
+ HPOUTL_MUTE | HPOUTR_MUTE },
+ { HPOUT_CTRL, HPOUTL_INIT_MASK | HPOUTR_INIT_MASK,
+ HPOUTL_INIT_DIS | HPOUTR_INIT_DIS },
+ { HPOUT_CTRL, HPOUTL_MASK | HPOUTR_MASK, HPOUTL_DIS | HPOUTR_DIS },
+ { HPMIX_CTRL, HPMIXL_MASK | HPMIXR_MASK, HPMIXL_DIS | HPMIXR_DIS },
+ { DAC_CLK_CTRL, DACL_MASK | DACR_MASK, DACL_OFF | DACR_OFF },
+ { DAC_CLK_CTRL, DACL_CLK_MASK | DACR_CLK_MASK,
+ DACL_CLK_OFF | DACR_CLK_OFF },
+ { DAC_CLK_CTRL, DACL_REFV_MASK | DACR_REFV_MASK,
+ DACL_REFV_OFF | DACR_REFV_OFF },
+ { HPOUT_POP_CTRL, HPOUTR_POP_MASK | HPOUTL_POP_MASK,
+ HPOUTR_POP_XCHARGE | HPOUTL_POP_XCHARGE },
+ { DAC_PWR_CTRL, DACL_PATH_REFV_MASK | DACR_PATH_REFV_MASK,
+ DACL_PATH_REFV_OFF | DACR_PATH_REFV_OFF },
+ { DAC_PWR_CTRL, DAC_PWR_MASK, DAC_PWR_OFF },
+ { HPMIX_CTRL, HPMIXL_INIT_MASK | HPMIXR_INIT_MASK,
+ HPMIXL_INIT_DIS | HPMIXR_INIT_DIS },
+ { DAC_CLK_CTRL, DACL_INIT_MASK | DACR_INIT_MASK,
+ DACL_INIT_OFF | DACR_INIT_OFF },
+};
+
+static int rk3328_codec_close_playback(struct rk3328_codec_priv *rk3328)
+{
+ size_t i;
+
+ rk3328_analog_output(rk3328, 0);
+
+ regmap_update_bits(rk3328->regmap, HPOUTL_GAIN_CTRL,
+ HPOUTL_GAIN_MASK, 0);
+ regmap_update_bits(rk3328->regmap, HPOUTR_GAIN_CTRL,
+ HPOUTR_GAIN_MASK, 0);
+
+ for (i = 0; i < ARRAY_SIZE(playback_close_list); i++) {
+ regmap_update_bits(rk3328->regmap,
+ playback_close_list[i].reg,
+ playback_close_list[i].msk,
+ playback_close_list[i].val);
+ mdelay(1);
+ }
+
+ /* Workaround for silence when changed Fs 48 -> 44.1kHz */
+ rk3328_codec_reset(rk3328);
+
+ regmap_update_bits(rk3328->regmap, DAC_PRECHARGE_CTRL,
+ DAC_CHARGE_CURRENT_ALL_MASK,
+ DAC_CHARGE_CURRENT_ALL_ON);
+
+ return 0;
+}
+
+static int rk3328_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct rk3328_codec_priv *rk3328 =
+ snd_soc_component_get_drvdata(dai->component);
+ unsigned int val = 0;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ val = DAC_VDL_16BITS;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ val = DAC_VDL_20BITS;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ val = DAC_VDL_24BITS;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ val = DAC_VDL_32BITS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ regmap_update_bits(rk3328->regmap, DAC_INIT_CTRL2, DAC_VDL_MASK, val);
+
+ val = DAC_WL_32BITS | DAC_RST_DIS;
+ regmap_update_bits(rk3328->regmap, DAC_INIT_CTRL3,
+ DAC_WL_MASK | DAC_RST_MASK, val);
+
+ return 0;
+}
+
+static int rk3328_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct rk3328_codec_priv *rk3328 =
+ snd_soc_component_get_drvdata(dai->component);
+
+ return rk3328_codec_open_playback(rk3328);
+}
+
+static void rk3328_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct rk3328_codec_priv *rk3328 =
+ snd_soc_component_get_drvdata(dai->component);
+
+ rk3328_codec_close_playback(rk3328);
+}
+
+static const struct snd_soc_dai_ops rk3328_dai_ops = {
+ .hw_params = rk3328_hw_params,
+ .set_fmt = rk3328_set_dai_fmt,
+ .digital_mute = rk3328_digital_mute,
+ .startup = rk3328_pcm_startup,
+ .shutdown = rk3328_pcm_shutdown,
+};
+
+static struct snd_soc_dai_driver rk3328_dai[] = {
+ {
+ .name = "rk3328-hifi",
+ .id = RK3328_HIFI,
+ .playback = {
+ .stream_name = "HIFI Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ },
+ .capture = {
+ .stream_name = "HIFI Capture",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ },
+ .ops = &rk3328_dai_ops,
+ },
+};
+
+static int rk3328_codec_probe(struct snd_soc_component *component)
+{
+ struct rk3328_codec_priv *rk3328 =
+ snd_soc_component_get_drvdata(component);
+
+ rk3328_codec_reset(rk3328);
+ rk3328_codec_power_on(rk3328, 0);
+
+ return 0;
+}
+
+static void rk3328_codec_remove(struct snd_soc_component *component)
+{
+ struct rk3328_codec_priv *rk3328 =
+ snd_soc_component_get_drvdata(component);
+
+ rk3328_codec_close_playback(rk3328);
+ rk3328_codec_power_off(rk3328, 0);
+}
+
+static const struct snd_soc_component_driver soc_codec_rk3328 = {
+ .probe = rk3328_codec_probe,
+ .remove = rk3328_codec_remove,
+};
+
+static bool rk3328_codec_write_read_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CODEC_RESET:
+ case DAC_INIT_CTRL1:
+ case DAC_INIT_CTRL2:
+ case DAC_INIT_CTRL3:
+ case DAC_PRECHARGE_CTRL:
+ case DAC_PWR_CTRL:
+ case DAC_CLK_CTRL:
+ case HPMIX_CTRL:
+ case DAC_SELECT:
+ case HPOUT_CTRL:
+ case HPOUTL_GAIN_CTRL:
+ case HPOUTR_GAIN_CTRL:
+ case HPOUT_POP_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rk3328_codec_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CODEC_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config rk3328_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = HPOUT_POP_CTRL,
+ .writeable_reg = rk3328_codec_write_read_reg,
+ .readable_reg = rk3328_codec_write_read_reg,
+ .volatile_reg = rk3328_codec_volatile_reg,
+ .reg_defaults = rk3328_codec_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(rk3328_codec_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int rk3328_platform_probe(struct platform_device *pdev)
+{
+ struct device_node *rk3328_np = pdev->dev.of_node;
+ struct rk3328_codec_priv *rk3328;
+ struct resource *res;
+ struct regmap *grf;
+ void __iomem *base;
+ int ret = 0;
+
+ rk3328 = devm_kzalloc(&pdev->dev, sizeof(*rk3328), GFP_KERNEL);
+ if (!rk3328)
+ return -ENOMEM;
+
+ grf = syscon_regmap_lookup_by_phandle(rk3328_np,
+ "rockchip,grf");
+ if (IS_ERR(grf)) {
+ dev_err(&pdev->dev, "missing 'rockchip,grf'\n");
+ return PTR_ERR(grf);
+ }
+ rk3328->grf = grf;
+ /* enable i2s_acodec_en */
+ regmap_write(grf, RK3328_GRF_SOC_CON2,
+ (BIT(14) << 16 | BIT(14)));
+
+ ret = of_property_read_u32(rk3328_np, "spk-depop-time-ms",
+ &rk3328->spk_depop_time);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "spk_depop_time use default value.\n");
+ rk3328->spk_depop_time = 200;
+ }
+
+ rk3328_analog_output(rk3328, 0);
+
+ rk3328->mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(rk3328->mclk))
+ return PTR_ERR(rk3328->mclk);
+
+ ret = clk_prepare_enable(rk3328->mclk);
+ if (ret)
+ return ret;
+ clk_set_rate(rk3328->mclk, INITIAL_FREQ);
+
+ rk3328->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(rk3328->pclk)) {
+ dev_err(&pdev->dev, "can't get acodec pclk\n");
+ return PTR_ERR(rk3328->pclk);
+ }
+
+ ret = clk_prepare_enable(rk3328->pclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable acodec pclk\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rk3328->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &rk3328_codec_regmap_config);
+ if (IS_ERR(rk3328->regmap))
+ return PTR_ERR(rk3328->regmap);
+
+ platform_set_drvdata(pdev, rk3328);
+
+ return devm_snd_soc_register_component(&pdev->dev, &soc_codec_rk3328,
+ rk3328_dai,
+ ARRAY_SIZE(rk3328_dai));
+}
+
+static const struct of_device_id rk3328_codec_of_match[] = {
+ { .compatible = "rockchip,rk3328-codec", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rk3328_codec_of_match);
+
+static struct platform_driver rk3328_codec_driver = {
+ .driver = {
+ .name = "rk3328-codec",
+ .of_match_table = of_match_ptr(rk3328_codec_of_match),
+ },
+ .probe = rk3328_platform_probe,
+};
+module_platform_driver(rk3328_codec_driver);
+
+MODULE_AUTHOR("Sugar Zhang <sugar.zhang@rock-chips.com>");
+MODULE_DESCRIPTION("ASoC rk3328 codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rk3328_codec.h b/sound/soc/codecs/rk3328_codec.h
new file mode 100644
index 000000000000..655103586241
--- /dev/null
+++ b/sound/soc/codecs/rk3328_codec.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * rk3328 ALSA SoC Audio driver
+ *
+ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd All rights reserved.
+ */
+
+#ifndef _RK3328_CODEC_H
+#define _RK3328_CODEC_H
+
+#include <linux/bitfield.h>
+
+/* codec register */
+#define CODEC_RESET (0x00 << 2)
+#define DAC_INIT_CTRL1 (0x03 << 2)
+#define DAC_INIT_CTRL2 (0x04 << 2)
+#define DAC_INIT_CTRL3 (0x05 << 2)
+#define DAC_PRECHARGE_CTRL (0x22 << 2)
+#define DAC_PWR_CTRL (0x23 << 2)
+#define DAC_CLK_CTRL (0x24 << 2)
+#define HPMIX_CTRL (0x25 << 2)
+#define DAC_SELECT (0x26 << 2)
+#define HPOUT_CTRL (0x27 << 2)
+#define HPOUTL_GAIN_CTRL (0x28 << 2)
+#define HPOUTR_GAIN_CTRL (0x29 << 2)
+#define HPOUT_POP_CTRL (0x2a << 2)
+
+/* REG00: CODEC_RESET */
+#define PWR_RST_BYPASS_DIS (0x0 << 6)
+#define PWR_RST_BYPASS_EN (0x1 << 6)
+#define DIG_CORE_RST (0x0 << 1)
+#define DIG_CORE_WORK (0x1 << 1)
+#define SYS_RST (0x0 << 0)
+#define SYS_WORK (0x1 << 0)
+
+/* REG03: DAC_INIT_CTRL1 */
+#define PIN_DIRECTION_MASK BIT(5)
+#define PIN_DIRECTION_IN (0x0 << 5)
+#define PIN_DIRECTION_OUT (0x1 << 5)
+#define DAC_I2S_MODE_MASK BIT(4)
+#define DAC_I2S_MODE_SLAVE (0x0 << 4)
+#define DAC_I2S_MODE_MASTER (0x1 << 4)
+
+/* REG04: DAC_INIT_CTRL2 */
+#define DAC_I2S_LRP_MASK BIT(7)
+#define DAC_I2S_LRP_NORMAL (0x0 << 7)
+#define DAC_I2S_LRP_REVERSAL (0x1 << 7)
+#define DAC_VDL_MASK GENMASK(6, 5)
+#define DAC_VDL_16BITS (0x0 << 5)
+#define DAC_VDL_20BITS (0x1 << 5)
+#define DAC_VDL_24BITS (0x2 << 5)
+#define DAC_VDL_32BITS (0x3 << 5)
+#define DAC_MODE_MASK GENMASK(4, 3)
+#define DAC_MODE_RJM (0x0 << 3)
+#define DAC_MODE_LJM (0x1 << 3)
+#define DAC_MODE_I2S (0x2 << 3)
+#define DAC_MODE_PCM (0x3 << 3)
+#define DAC_LR_SWAP_MASK BIT(2)
+#define DAC_LR_SWAP_DIS (0x0 << 2)
+#define DAC_LR_SWAP_EN (0x1 << 2)
+
+/* REG05: DAC_INIT_CTRL3 */
+#define DAC_WL_MASK GENMASK(3, 2)
+#define DAC_WL_16BITS (0x0 << 2)
+#define DAC_WL_20BITS (0x1 << 2)
+#define DAC_WL_24BITS (0x2 << 2)
+#define DAC_WL_32BITS (0x3 << 2)
+#define DAC_RST_MASK BIT(1)
+#define DAC_RST_EN (0x0 << 1)
+#define DAC_RST_DIS (0x1 << 1)
+#define DAC_BCP_MASK BIT(0)
+#define DAC_BCP_NORMAL (0x0 << 0)
+#define DAC_BCP_REVERSAL (0x1 << 0)
+
+/* REG22: DAC_PRECHARGE_CTRL */
+#define DAC_CHARGE_XCHARGE_MASK BIT(7)
+#define DAC_CHARGE_DISCHARGE (0x0 << 7)
+#define DAC_CHARGE_PRECHARGE (0x1 << 7)
+#define DAC_CHARGE_CURRENT_64I_MASK BIT(6)
+#define DAC_CHARGE_CURRENT_64I (0x1 << 6)
+#define DAC_CHARGE_CURRENT_32I_MASK BIT(5)
+#define DAC_CHARGE_CURRENT_32I (0x1 << 5)
+#define DAC_CHARGE_CURRENT_16I_MASK BIT(4)
+#define DAC_CHARGE_CURRENT_16I (0x1 << 4)
+#define DAC_CHARGE_CURRENT_08I_MASK BIT(3)
+#define DAC_CHARGE_CURRENT_08I (0x1 << 3)
+#define DAC_CHARGE_CURRENT_04I_MASK BIT(2)
+#define DAC_CHARGE_CURRENT_04I (0x1 << 2)
+#define DAC_CHARGE_CURRENT_02I_MASK BIT(1)
+#define DAC_CHARGE_CURRENT_02I (0x1 << 1)
+#define DAC_CHARGE_CURRENT_I_MASK BIT(0)
+#define DAC_CHARGE_CURRENT_I (0x1 << 0)
+#define DAC_CHARGE_CURRENT_ALL_MASK GENMASK(6, 0)
+#define DAC_CHARGE_CURRENT_ALL_OFF 0x00
+#define DAC_CHARGE_CURRENT_ALL_ON 0x7f
+
+/* REG23: DAC_PWR_CTRL */
+#define DAC_PWR_MASK BIT(6)
+#define DAC_PWR_OFF (0x0 << 6)
+#define DAC_PWR_ON (0x1 << 6)
+#define DACL_PATH_REFV_MASK BIT(5)
+#define DACL_PATH_REFV_OFF (0x0 << 5)
+#define DACL_PATH_REFV_ON (0x1 << 5)
+#define HPOUTL_ZERO_CROSSING_MASK BIT(4)
+#define HPOUTL_ZERO_CROSSING_OFF (0x0 << 4)
+#define HPOUTL_ZERO_CROSSING_ON (0x1 << 4)
+#define DACR_PATH_REFV_MASK BIT(1)
+#define DACR_PATH_REFV_OFF (0x0 << 1)
+#define DACR_PATH_REFV_ON (0x1 << 1)
+#define HPOUTR_ZERO_CROSSING_MASK BIT(0)
+#define HPOUTR_ZERO_CROSSING_OFF (0x0 << 0)
+#define HPOUTR_ZERO_CROSSING_ON (0x1 << 0)
+
+/* REG24: DAC_CLK_CTRL */
+#define DACL_REFV_MASK BIT(7)
+#define DACL_REFV_OFF (0x0 << 7)
+#define DACL_REFV_ON (0x1 << 7)
+#define DACL_CLK_MASK BIT(6)
+#define DACL_CLK_OFF (0x0 << 6)
+#define DACL_CLK_ON (0x1 << 6)
+#define DACL_MASK BIT(5)
+#define DACL_OFF (0x0 << 5)
+#define DACL_ON (0x1 << 5)
+#define DACL_INIT_MASK BIT(4)
+#define DACL_INIT_OFF (0x0 << 4)
+#define DACL_INIT_ON (0x1 << 4)
+#define DACR_REFV_MASK BIT(3)
+#define DACR_REFV_OFF (0x0 << 3)
+#define DACR_REFV_ON (0x1 << 3)
+#define DACR_CLK_MASK BIT(2)
+#define DACR_CLK_OFF (0x0 << 2)
+#define DACR_CLK_ON (0x1 << 2)
+#define DACR_MASK BIT(1)
+#define DACR_OFF (0x0 << 1)
+#define DACR_ON (0x1 << 1)
+#define DACR_INIT_MASK BIT(0)
+#define DACR_INIT_OFF (0x0 << 0)
+#define DACR_INIT_ON (0x1 << 0)
+
+/* REG25: HPMIX_CTRL*/
+#define HPMIXL_MASK BIT(6)
+#define HPMIXL_DIS (0x0 << 6)
+#define HPMIXL_EN (0x1 << 6)
+#define HPMIXL_INIT_MASK BIT(5)
+#define HPMIXL_INIT_DIS (0x0 << 5)
+#define HPMIXL_INIT_EN (0x1 << 5)
+#define HPMIXL_INIT2_MASK BIT(4)
+#define HPMIXL_INIT2_DIS (0x0 << 4)
+#define HPMIXL_INIT2_EN (0x1 << 4)
+#define HPMIXR_MASK BIT(2)
+#define HPMIXR_DIS (0x0 << 2)
+#define HPMIXR_EN (0x1 << 2)
+#define HPMIXR_INIT_MASK BIT(1)
+#define HPMIXR_INIT_DIS (0x0 << 1)
+#define HPMIXR_INIT_EN (0x1 << 1)
+#define HPMIXR_INIT2_MASK BIT(0)
+#define HPMIXR_INIT2_DIS (0x0 << 0)
+#define HPMIXR_INIT2_EN (0x1 << 0)
+
+/* REG26: DAC_SELECT */
+#define DACL_SELECT_MASK BIT(4)
+#define DACL_UNSELECT (0x0 << 4)
+#define DACL_SELECT (0x1 << 4)
+#define DACR_SELECT_MASK BIT(0)
+#define DACR_UNSELECT (0x0 << 0)
+#define DACR_SELECT (0x1 << 0)
+
+/* REG27: HPOUT_CTRL */
+#define HPOUTL_MASK BIT(7)
+#define HPOUTL_DIS (0x0 << 7)
+#define HPOUTL_EN (0x1 << 7)
+#define HPOUTL_INIT_MASK BIT(6)
+#define HPOUTL_INIT_DIS (0x0 << 6)
+#define HPOUTL_INIT_EN (0x1 << 6)
+#define HPOUTL_MUTE_MASK BIT(5)
+#define HPOUTL_MUTE (0x0 << 5)
+#define HPOUTL_UNMUTE (0x1 << 5)
+#define HPOUTR_MASK BIT(4)
+#define HPOUTR_DIS (0x0 << 4)
+#define HPOUTR_EN (0x1 << 4)
+#define HPOUTR_INIT_MASK BIT(3)
+#define HPOUTR_INIT_DIS (0x0 << 3)
+#define HPOUTR_INIT_EN (0x1 << 3)
+#define HPOUTR_MUTE_MASK BIT(2)
+#define HPOUTR_MUTE (0x0 << 2)
+#define HPOUTR_UNMUTE (0x1 << 2)
+
+/* REG28: HPOUTL_GAIN_CTRL */
+#define HPOUTL_GAIN_MASK GENMASK(4, 0)
+
+/* REG29: HPOUTR_GAIN_CTRL */
+#define HPOUTR_GAIN_MASK GENMASK(4, 0)
+
+/* REG2a: HPOUT_POP_CTRL */
+#define HPOUTR_POP_MASK GENMASK(5, 4)
+#define HPOUTR_POP_XCHARGE (0x1 << 4)
+#define HPOUTR_POP_WORK (0x2 << 4)
+#define HPOUTL_POP_MASK GENMASK(1, 0)
+#define HPOUTL_POP_XCHARGE (0x1 << 0)
+#define HPOUTL_POP_WORK (0x2 << 0)
+
+#define RK3328_HIFI 0
+
+struct rk3328_reg_msk_val {
+ unsigned int reg;
+ unsigned int msk;
+ unsigned int val;
+};
+
+#endif
diff --git a/sound/soc/codecs/rl6347a.c b/sound/soc/codecs/rl6347a.c
index 8f571cf8edd4..c0d729b45277 100644
--- a/sound/soc/codecs/rl6347a.c
+++ b/sound/soc/codecs/rl6347a.c
@@ -64,8 +64,8 @@ int rl6347a_hw_read(void *context, unsigned int reg, unsigned int *value)
struct i2c_client *client = context;
struct i2c_msg xfer[2];
int ret;
- __be32 be_reg;
- unsigned int index, vid, buf = 0x0;
+ __be32 be_reg, buf = 0x0;
+ unsigned int index, vid;
/* handle index registers */
if (reg <= 0xff) {
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index 0ef966d56bac..adf59039a3b6 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -353,6 +353,7 @@ static void rt274_index_sync(struct snd_soc_component *component)
static int rt274_jack_detect(struct rt274_priv *rt274, bool *hp, bool *mic)
{
unsigned int buf;
+ int ret;
*hp = false;
*mic = false;
@@ -360,9 +361,15 @@ static int rt274_jack_detect(struct rt274_priv *rt274, bool *hp, bool *mic)
if (!rt274->component)
return -EINVAL;
- regmap_read(rt274->regmap, RT274_GET_HP_SENSE, &buf);
+ ret = regmap_read(rt274->regmap, RT274_GET_HP_SENSE, &buf);
+ if (ret)
+ return ret;
+
*hp = buf & 0x80000000;
- regmap_read(rt274->regmap, RT274_GET_MIC_SENSE, &buf);
+ ret = regmap_read(rt274->regmap, RT274_GET_MIC_SENSE, &buf);
+ if (ret)
+ return ret;
+
*mic = buf & 0x80000000;
pr_debug("*hp = %d *mic = %d\n", *hp, *mic);
@@ -381,10 +388,10 @@ static void rt274_jack_detect_work(struct work_struct *work)
if (rt274_jack_detect(rt274, &hp, &mic) < 0)
return;
- if (hp == true)
+ if (hp)
status |= SND_JACK_HEADPHONE;
- if (mic == true)
+ if (mic)
status |= SND_JACK_MICROPHONE;
snd_soc_jack_report(rt274->jack, status,
@@ -955,10 +962,10 @@ static irqreturn_t rt274_irq(int irq, void *data)
ret = rt274_jack_detect(rt274, &hp, &mic);
if (ret == 0) {
- if (hp == true)
+ if (hp)
status |= SND_JACK_HEADPHONE;
- if (mic == true)
+ if (mic)
status |= SND_JACK_MICROPHONE;
snd_soc_jack_report(rt274->jack, status,
@@ -1128,8 +1135,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c,
return ret;
}
- regmap_read(rt274->regmap,
+ ret = regmap_read(rt274->regmap,
RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
+ if (ret)
+ return ret;
+
if (val != RT274_VENDOR_ID) {
dev_err(&i2c->dev,
"Device with ID register %#x is not rt274\n", val);
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 0b0f748bffbe..c9457c247a03 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -296,10 +296,10 @@ static void rt286_jack_detect_work(struct work_struct *work)
rt286_jack_detect(rt286, &hp, &mic);
- if (hp == true)
+ if (hp)
status |= SND_JACK_HEADPHONE;
- if (mic == true)
+ if (mic)
status |= SND_JACK_MICROPHONE;
snd_soc_jack_report(rt286->jack, status,
@@ -924,10 +924,10 @@ static irqreturn_t rt286_irq(int irq, void *data)
/* Clear IRQ */
regmap_update_bits(rt286->regmap, RT286_IRQ_CTRL, 0x1, 0x1);
- if (hp == true)
+ if (hp)
status |= SND_JACK_HEADPHONE;
- if (mic == true)
+ if (mic)
status |= SND_JACK_MICROPHONE;
snd_soc_jack_report(rt286->jack, status,
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 06cdba4edfe2..bcf5bab31969 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -314,10 +314,10 @@ static void rt298_jack_detect_work(struct work_struct *work)
if (rt298_jack_detect(rt298, &hp, &mic) < 0)
return;
- if (hp == true)
+ if (hp)
status |= SND_JACK_HEADPHONE;
- if (mic == true)
+ if (mic)
status |= SND_JACK_MICROPHONE;
snd_soc_jack_report(rt298->jack, status,
@@ -345,10 +345,10 @@ int rt298_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *j
regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
rt298_jack_detect(rt298, &hp, &mic);
- if (hp == true)
+ if (hp)
status |= SND_JACK_HEADPHONE;
- if (mic == true)
+ if (mic)
status |= SND_JACK_MICROPHONE;
snd_soc_jack_report(rt298->jack, status,
@@ -989,10 +989,10 @@ static irqreturn_t rt298_irq(int irq, void *data)
regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x1, 0x1);
if (ret == 0) {
- if (hp == true)
+ if (hp)
status |= SND_JACK_HEADPHONE;
- if (mic == true)
+ if (mic)
status |= SND_JACK_MICROPHONE;
snd_soc_jack_report(rt298->jack, status,
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 4d46f4567c3a..bec2eefa8b0f 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
GFP_KERNEL);
+ if (!rt5514_dsp)
+ return -ENOMEM;
rt5514_dsp->dev = &rt5514_spi->dev;
mutex_init(&rt5514_dsp->dma_lock);
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index a67de68b6da6..f9ad6e36ab16 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -489,6 +489,7 @@ static const struct snd_kcontrol_new rt5514_sto2_dmic_mux =
/**
* rt5514_calc_dmic_clk - Calculate the frequency divider parameter of dmic.
*
+ * @component: only used for dev_warn
* @rate: base clock rate.
*
* Choose divider parameter that gives the highest possible DMIC frequency in
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index fc530481a6e4..b3580ecadecf 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -977,11 +977,11 @@ static int rt5640_hp_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
rt5640_pmu_depop(component);
- rt5640->hp_mute = 0;
+ rt5640->hp_mute = false;
break;
case SND_SOC_DAPM_PRE_PMD:
- rt5640->hp_mute = 1;
+ rt5640->hp_mute = true;
msleep(70);
break;
@@ -2822,7 +2822,7 @@ static int rt5640_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5640->regmap, RT5640_DUMMY1,
RT5640_MCLK_DET, RT5640_MCLK_DET);
- rt5640->hp_mute = 1;
+ rt5640->hp_mute = true;
rt5640->irq = i2c->irq;
INIT_DELAYED_WORK(&rt5640->bp_work, rt5640_button_press_work);
INIT_WORK(&rt5640->jack_work, rt5640_jack_work);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index be674688dc40..9a0751978090 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -401,6 +401,11 @@ struct rt5645_eq_param_s {
unsigned short val;
};
+struct rt5645_eq_param_s_be16 {
+ __be16 reg;
+ __be16 val;
+};
+
static const char *const rt5645_supply_names[] = {
"avdd",
"cpvdd",
@@ -672,8 +677,8 @@ static int rt5645_hweq_get(struct snd_kcontrol *kcontrol,
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
- struct rt5645_eq_param_s *eq_param =
- (struct rt5645_eq_param_s *)ucontrol->value.bytes.data;
+ struct rt5645_eq_param_s_be16 *eq_param =
+ (struct rt5645_eq_param_s_be16 *)ucontrol->value.bytes.data;
int i;
for (i = 0; i < RT5645_HWEQ_NUM; i++) {
@@ -698,36 +703,33 @@ static int rt5645_hweq_put(struct snd_kcontrol *kcontrol,
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
- struct rt5645_eq_param_s *eq_param =
- (struct rt5645_eq_param_s *)ucontrol->value.bytes.data;
+ struct rt5645_eq_param_s_be16 *eq_param =
+ (struct rt5645_eq_param_s_be16 *)ucontrol->value.bytes.data;
int i;
for (i = 0; i < RT5645_HWEQ_NUM; i++) {
- eq_param[i].reg = be16_to_cpu(eq_param[i].reg);
- eq_param[i].val = be16_to_cpu(eq_param[i].val);
+ rt5645->eq_param[i].reg = be16_to_cpu(eq_param[i].reg);
+ rt5645->eq_param[i].val = be16_to_cpu(eq_param[i].val);
}
/* The final setting of the table should be RT5645_EQ_CTRL2 */
for (i = RT5645_HWEQ_NUM - 1; i >= 0; i--) {
- if (eq_param[i].reg == 0)
+ if (rt5645->eq_param[i].reg == 0)
continue;
- else if (eq_param[i].reg != RT5645_EQ_CTRL2)
+ else if (rt5645->eq_param[i].reg != RT5645_EQ_CTRL2)
return 0;
else
break;
}
for (i = 0; i < RT5645_HWEQ_NUM; i++) {
- if (!rt5645_validate_hweq(eq_param[i].reg) &&
- eq_param[i].reg != 0)
+ if (!rt5645_validate_hweq(rt5645->eq_param[i].reg) &&
+ rt5645->eq_param[i].reg != 0)
return 0;
- else if (eq_param[i].reg == 0)
+ else if (rt5645->eq_param[i].reg == 0)
break;
}
- memcpy(rt5645->eq_param, eq_param,
- RT5645_HWEQ_NUM * sizeof(struct rt5645_eq_param_s));
-
return 0;
}
@@ -1288,30 +1290,6 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5645_dac_r2_mux =
SOC_DAPM_ENUM("DAC2 R source", rt5645_dac2r_enum);
-
-/* INL/R source */
-static const char * const rt5645_inl_src[] = {
- "IN2P", "MonoP"
-};
-
-static SOC_ENUM_SINGLE_DECL(
- rt5645_inl_enum, RT5645_INL1_INR1_VOL,
- RT5645_INL_SEL_SFT, rt5645_inl_src);
-
-static const struct snd_kcontrol_new rt5645_inl_mux =
- SOC_DAPM_ENUM("INL source", rt5645_inl_enum);
-
-static const char * const rt5645_inr_src[] = {
- "IN2N", "MonoN"
-};
-
-static SOC_ENUM_SINGLE_DECL(
- rt5645_inr_enum, RT5645_INL1_INR1_VOL,
- RT5645_INR_SEL_SFT, rt5645_inr_src);
-
-static const struct snd_kcontrol_new rt5645_inr_mux =
- SOC_DAPM_ENUM("INR source", rt5645_inr_enum);
-
/* Stereo1 ADC source */
/* MX-27 [12] */
static const char * const rt5645_stereo_adc1_src[] = {
@@ -1611,18 +1589,6 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5645_if2_adc_in_mux =
SOC_DAPM_ENUM("IF2 ADC IN source", rt5645_if2_adc_in_enum);
-/* MX-2F [1:0] */
-static const char * const rt5645_if3_adc_in_src[] = {
- "IF_ADC1", "IF_ADC2", "VAD_ADC"
-};
-
-static SOC_ENUM_SINGLE_DECL(
- rt5645_if3_adc_in_enum, RT5645_DIG_INF1_DATA,
- RT5645_IF3_ADC_IN_SFT, rt5645_if3_adc_in_src);
-
-static const struct snd_kcontrol_new rt5645_if3_adc_in_mux =
- SOC_DAPM_ENUM("IF3 ADC IN source", rt5645_if3_adc_in_enum);
-
/* MX-31 [15] [13] [11] [9] */
static const char * const rt5645_pdm_src[] = {
"Mono DAC", "Stereo DAC"
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index b7ba64350a07..29b2d60076b0 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
@@ -747,11 +748,11 @@ static int rt5651_hp_event(struct snd_soc_dapm_widget *w,
RT5651_HP_CP_PD | RT5651_HP_SG_EN);
regmap_update_bits(rt5651->regmap, RT5651_PR_BASE +
RT5651_CHPUMP_INT_REG1, 0x0700, 0x0400);
- rt5651->hp_mute = 0;
+ rt5651->hp_mute = false;
break;
case SND_SOC_DAPM_PRE_PMD:
- rt5651->hp_mute = 1;
+ rt5651->hp_mute = true;
usleep_range(70000, 75000);
break;
@@ -1621,6 +1622,12 @@ static bool rt5651_jack_inserted(struct snd_soc_component *component)
struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
int val;
+ if (rt5651->gpiod_hp_det) {
+ val = gpiod_get_value_cansleep(rt5651->gpiod_hp_det);
+ dev_dbg(component->dev, "jack-detect gpio %d\n", val);
+ return val;
+ }
+
val = snd_soc_component_read32(component, RT5651_INT_IRQ_ST);
dev_dbg(component->dev, "irq status %#04x\n", val);
@@ -1761,6 +1768,13 @@ static int rt5651_detect_headset(struct snd_soc_component *component)
return SND_JACK_HEADPHONE;
}
+static bool rt5651_support_button_press(struct rt5651_priv *rt5651)
+{
+ /* Button press support only works with internal jack-detection */
+ return (rt5651->hp_jack->status & SND_JACK_MICROPHONE) &&
+ rt5651->gpiod_hp_det == NULL;
+}
+
static void rt5651_jack_detect_work(struct work_struct *work)
{
struct rt5651_priv *rt5651 =
@@ -1785,15 +1799,15 @@ static void rt5651_jack_detect_work(struct work_struct *work)
WARN_ON(rt5651->ovcd_irq_enabled);
rt5651_enable_micbias1_for_ovcd(component);
report = rt5651_detect_headset(component);
- if (report == SND_JACK_HEADSET) {
+ dev_dbg(component->dev, "detect report %#02x\n", report);
+ snd_soc_jack_report(rt5651->hp_jack, report, SND_JACK_HEADSET);
+ if (rt5651_support_button_press(rt5651)) {
/* Enable ovcd IRQ for button press detect. */
rt5651_enable_micbias1_ovcd_irq(component);
} else {
/* No more need for overcurrent detect. */
rt5651_disable_micbias1_for_ovcd(component);
}
- dev_dbg(component->dev, "detect report %#02x\n", report);
- snd_soc_jack_report(rt5651->hp_jack, report, SND_JACK_HEADSET);
} else if (rt5651->ovcd_irq_enabled && rt5651_micbias1_ovcd(component)) {
dev_dbg(component->dev, "OVCD IRQ\n");
@@ -1837,16 +1851,20 @@ static void rt5651_cancel_work(void *data)
}
static void rt5651_enable_jack_detect(struct snd_soc_component *component,
- struct snd_soc_jack *hp_jack)
+ struct snd_soc_jack *hp_jack,
+ struct gpio_desc *gpiod_hp_det)
{
struct rt5651_priv *rt5651 = snd_soc_component_get_drvdata(component);
-
- /* IRQ output on GPIO1 */
- snd_soc_component_update_bits(component, RT5651_GPIO_CTRL1,
- RT5651_GP1_PIN_MASK, RT5651_GP1_PIN_IRQ);
+ bool using_internal_jack_detect = true;
/* Select jack detect source */
switch (rt5651->jd_src) {
+ case RT5651_JD_NULL:
+ rt5651->gpiod_hp_det = gpiod_hp_det;
+ if (!rt5651->gpiod_hp_det)
+ return; /* No jack detect */
+ using_internal_jack_detect = false;
+ break;
case RT5651_JD1_1:
snd_soc_component_update_bits(component, RT5651_JD_CTRL2,
RT5651_JD_TRG_SEL_MASK, RT5651_JD_TRG_SEL_JD1_1);
@@ -1865,16 +1883,20 @@ static void rt5651_enable_jack_detect(struct snd_soc_component *component,
snd_soc_component_update_bits(component, RT5651_IRQ_CTRL1,
RT5651_JD2_IRQ_EN, RT5651_JD2_IRQ_EN);
break;
- case RT5651_JD_NULL:
- return;
default:
dev_err(component->dev, "Currently only JD1_1 / JD1_2 / JD2 are supported\n");
return;
}
- /* Enable jack detect power */
- snd_soc_component_update_bits(component, RT5651_PWR_ANLG2,
- RT5651_PWR_JD_M, RT5651_PWR_JD_M);
+ if (using_internal_jack_detect) {
+ /* IRQ output on GPIO1 */
+ snd_soc_component_update_bits(component, RT5651_GPIO_CTRL1,
+ RT5651_GP1_PIN_MASK, RT5651_GP1_PIN_IRQ);
+
+ /* Enable jack detect power */
+ snd_soc_component_update_bits(component, RT5651_PWR_ANLG2,
+ RT5651_PWR_JD_M, RT5651_PWR_JD_M);
+ }
/* Set OVCD threshold current and scale-factor */
snd_soc_component_write(component, RT5651_PR_BASE + RT5651_BIAS_CUR4,
@@ -1903,7 +1925,7 @@ static void rt5651_enable_jack_detect(struct snd_soc_component *component,
RT5651_MB1_OC_STKY_MASK, RT5651_MB1_OC_STKY_EN);
rt5651->hp_jack = hp_jack;
- if (rt5651->hp_jack->status & SND_JACK_MICROPHONE) {
+ if (rt5651_support_button_press(rt5651)) {
rt5651_enable_micbias1_for_ovcd(component);
rt5651_enable_micbias1_ovcd_irq(component);
}
@@ -1920,7 +1942,7 @@ static void rt5651_disable_jack_detect(struct snd_soc_component *component)
disable_irq(rt5651->irq);
rt5651_cancel_work(rt5651);
- if (rt5651->hp_jack->status & SND_JACK_MICROPHONE) {
+ if (rt5651_support_button_press(rt5651)) {
rt5651_disable_micbias1_ovcd_irq(component);
rt5651_disable_micbias1_for_ovcd(component);
snd_soc_jack_report(rt5651->hp_jack, 0, SND_JACK_BTN_0);
@@ -1933,7 +1955,7 @@ static int rt5651_set_jack(struct snd_soc_component *component,
struct snd_soc_jack *jack, void *data)
{
if (jack)
- rt5651_enable_jack_detect(component, jack);
+ rt5651_enable_jack_detect(component, jack, data);
else
rt5651_disable_jack_detect(component);
@@ -2138,6 +2160,7 @@ MODULE_DEVICE_TABLE(of, rt5651_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id rt5651_acpi_match[] = {
{ "10EC5651", 0 },
+ { "10EC5640", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5651_acpi_match);
@@ -2158,6 +2181,7 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
{
struct rt5651_priv *rt5651;
int ret;
+ int err;
rt5651 = devm_kzalloc(&i2c->dev, sizeof(*rt5651),
GFP_KERNEL);
@@ -2174,7 +2198,10 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
return ret;
}
- regmap_read(rt5651->regmap, RT5651_DEVICE_ID, &ret);
+ err = regmap_read(rt5651->regmap, RT5651_DEVICE_ID, &ret);
+ if (err)
+ return err;
+
if (ret != RT5651_DEVICE_ID_VALUE) {
dev_err(&i2c->dev,
"Device with ID register %#x is not rt5651\n", ret);
@@ -2189,7 +2216,7 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
rt5651->irq = i2c->irq;
- rt5651->hp_mute = 1;
+ rt5651->hp_mute = true;
INIT_DELAYED_WORK(&rt5651->bp_work, rt5651_button_press_work);
INIT_WORK(&rt5651->jack_detect_work, rt5651_jack_detect_work);
diff --git a/sound/soc/codecs/rt5651.h b/sound/soc/codecs/rt5651.h
index ac6de6fb5414..41fcb8b5eb40 100644
--- a/sound/soc/codecs/rt5651.h
+++ b/sound/soc/codecs/rt5651.h
@@ -2073,6 +2073,7 @@ struct rt5651_priv {
struct regmap *regmap;
/* Jack and button detect data */
struct snd_soc_jack *hp_jack;
+ struct gpio_desc *gpiod_hp_det;
struct work_struct jack_detect_work;
struct delayed_work bp_work;
bool ovcd_irq_enabled;
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 453328c988c0..9a037108b1ae 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -1057,20 +1057,6 @@ static const struct snd_kcontrol_new rt5670_lout_mix[] = {
RT5670_M_OV_R_LM_SFT, 1, 1),
};
-static const struct snd_kcontrol_new rt5670_hpl_mix[] = {
- SOC_DAPM_SINGLE("DAC L1 Switch", RT5670_HPO_MIXER,
- RT5670_M_DACL1_HML_SFT, 1, 1),
- SOC_DAPM_SINGLE("INL1 Switch", RT5670_HPO_MIXER,
- RT5670_M_INL1_HML_SFT, 1, 1),
-};
-
-static const struct snd_kcontrol_new rt5670_hpr_mix[] = {
- SOC_DAPM_SINGLE("DAC R1 Switch", RT5670_HPO_MIXER,
- RT5670_M_DACR1_HMR_SFT, 1, 1),
- SOC_DAPM_SINGLE("INR1 Switch", RT5670_HPO_MIXER,
- RT5670_M_INR1_HMR_SFT, 1, 1),
-};
-
static const struct snd_kcontrol_new lout_l_enable_control =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5670_LOUT1,
RT5670_L_MUTE_SFT, 1, 1);
@@ -1196,24 +1182,6 @@ static SOC_ENUM_SINGLE_DECL(rt5670_stereo2_adc2_enum, RT5670_STO2_ADC_MIXER,
static const struct snd_kcontrol_new rt5670_sto2_adc_2_mux =
SOC_DAPM_ENUM("Stereo2 ADC 2 Mux", rt5670_stereo2_adc2_enum);
-
-/* MX-27 MX26 [10] */
-static const char * const rt5670_stereo_adc_src[] = {
- "ADC1L ADC2R", "ADC3"
-};
-
-static SOC_ENUM_SINGLE_DECL(rt5670_stereo1_adc_enum, RT5670_STO1_ADC_MIXER,
- RT5670_ADC_SRC_SFT, rt5670_stereo_adc_src);
-
-static const struct snd_kcontrol_new rt5670_sto_adc_mux =
- SOC_DAPM_ENUM("Stereo1 ADC source", rt5670_stereo1_adc_enum);
-
-static SOC_ENUM_SINGLE_DECL(rt5670_stereo2_adc_enum, RT5670_STO2_ADC_MIXER,
- RT5670_ADC_SRC_SFT, rt5670_stereo_adc_src);
-
-static const struct snd_kcontrol_new rt5670_sto2_adc_mux =
- SOC_DAPM_ENUM("Stereo2 ADC source", rt5670_stereo2_adc_enum);
-
/* MX-27 MX-26 [9:8] */
static const char * const rt5670_stereo_dmic_src[] = {
"DMIC1", "DMIC2", "DMIC3"
@@ -1231,17 +1199,6 @@ static SOC_ENUM_SINGLE_DECL(rt5670_stereo2_dmic_enum, RT5670_STO2_ADC_MIXER,
static const struct snd_kcontrol_new rt5670_sto2_dmic_mux =
SOC_DAPM_ENUM("Stereo2 DMIC source", rt5670_stereo2_dmic_enum);
-/* MX-27 [0] */
-static const char * const rt5670_stereo_dmic3_src[] = {
- "DMIC3", "PDM ADC"
-};
-
-static SOC_ENUM_SINGLE_DECL(rt5670_stereo_dmic3_enum, RT5670_STO1_ADC_MIXER,
- RT5670_DMIC3_SRC_SFT, rt5670_stereo_dmic3_src);
-
-static const struct snd_kcontrol_new rt5670_sto_dmic3_mux =
- SOC_DAPM_ENUM("Stereo DMIC3 source", rt5670_stereo_dmic3_enum);
-
/* Mono ADC source */
/* MX-28 [12] */
static const char * const rt5670_mono_adc_l1_src[] = {
@@ -1334,17 +1291,6 @@ static SOC_ENUM_SINGLE_DECL(rt5670_if2_adc_in_enum, RT5670_DIG_INF1_DATA,
static const struct snd_kcontrol_new rt5670_if2_adc_in_mux =
SOC_DAPM_ENUM("IF2 ADC IN source", rt5670_if2_adc_in_enum);
-/* MX-30 [5:4] */
-static const char * const rt5670_if4_adc_in_src[] = {
- "IF_ADC1", "IF_ADC2", "IF_ADC3"
-};
-
-static SOC_ENUM_SINGLE_DECL(rt5670_if4_adc_in_enum, RT5670_DIG_INF2_DATA,
- RT5670_IF4_ADC_IN_SFT, rt5670_if4_adc_in_src);
-
-static const struct snd_kcontrol_new rt5670_if4_adc_in_mux =
- SOC_DAPM_ENUM("IF4 ADC IN source", rt5670_if4_adc_in_enum);
-
/* MX-31 [15] [13] [11] [9] */
static const char * const rt5670_pdm_src[] = {
"Mono DAC", "Stereo DAC"
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 9b7a1833d331..6fc70e441458 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -547,7 +547,7 @@ static bool rt5677_readable_register(struct device *dev, unsigned int reg)
* @rt5677: Private Data.
* @addr: Address index.
* @value: Address data.
- *
+ * @opcode: opcode value
*
* Returns 0 for success or negative error code.
*/
@@ -602,7 +602,7 @@ err:
/**
* rt5677_dsp_mode_i2c_read_addr - Read value from address on DSP mode.
- * rt5677: Private Data.
+ * @rt5677: Private Data.
* @addr: Address index.
* @value: Address data.
*
@@ -651,7 +651,7 @@ err:
/**
* rt5677_dsp_mode_i2c_write - Write register on DSP mode.
- * rt5677: Private Data.
+ * @rt5677: Private Data.
* @reg: Register index.
* @value: Register data.
*
@@ -667,7 +667,7 @@ static int rt5677_dsp_mode_i2c_write(struct rt5677_priv *rt5677,
/**
* rt5677_dsp_mode_i2c_read - Read register on DSP mode.
- * @codec: SoC audio codec device.
+ * @rt5677: Private Data
* @reg: Register index.
* @value: Register data.
*
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 34cfaf8f6f34..9d5acd2d04ab 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -43,6 +43,12 @@ static const char *rt5682_supply_names[RT5682_NUM_SUPPLIES] = {
"VBAT",
};
+static const struct rt5682_platform_data i2s_default_platform_data = {
+ .dmic1_data_pin = RT5682_DMIC1_DATA_GPIO2,
+ .dmic1_clk_pin = RT5682_DMIC1_CLK_GPIO3,
+ .jd_src = RT5682_JD1,
+};
+
struct rt5682_priv {
struct snd_soc_component *component;
struct rt5682_platform_data pdata;
@@ -1778,7 +1784,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
{"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
{"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
{"ADC STO1 ASRC", NULL, "AD ASRC"},
+ {"ADC STO1 ASRC", NULL, "DA ASRC"},
{"ADC STO1 ASRC", NULL, "CLKDET"},
+ {"DAC STO1 ASRC", NULL, "AD ASRC"},
{"DAC STO1 ASRC", NULL, "DA ASRC"},
{"DAC STO1 ASRC", NULL, "CLKDET"},
@@ -2512,6 +2520,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
+ regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
mutex_unlock(&rt5682->calibrate_mutex);
@@ -2533,6 +2542,8 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, rt5682);
+ rt5682->pdata = i2s_default_platform_data;
+
if (pdata)
rt5682->pdata = *pdata;
else
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index d82a8301fd74..96944cff0ed7 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -849,18 +849,18 @@
#define RT5682_SCLK_SRC_PLL2 (0x2 << 13)
#define RT5682_SCLK_SRC_SDW (0x3 << 13)
#define RT5682_SCLK_SRC_RCCLK (0x4 << 13)
-#define RT5682_PLL1_SRC_MASK (0x3 << 10)
-#define RT5682_PLL1_SRC_SFT 10
-#define RT5682_PLL1_SRC_MCLK (0x0 << 10)
-#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10)
-#define RT5682_PLL1_SRC_SDW (0x2 << 10)
-#define RT5682_PLL1_SRC_RC (0x3 << 10)
-#define RT5682_PLL2_SRC_MASK (0x3 << 8)
-#define RT5682_PLL2_SRC_SFT 8
-#define RT5682_PLL2_SRC_MCLK (0x0 << 8)
-#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8)
-#define RT5682_PLL2_SRC_SDW (0x2 << 8)
-#define RT5682_PLL2_SRC_RC (0x3 << 8)
+#define RT5682_PLL2_SRC_MASK (0x3 << 10)
+#define RT5682_PLL2_SRC_SFT 10
+#define RT5682_PLL2_SRC_MCLK (0x0 << 10)
+#define RT5682_PLL2_SRC_BCLK1 (0x1 << 10)
+#define RT5682_PLL2_SRC_SDW (0x2 << 10)
+#define RT5682_PLL2_SRC_RC (0x3 << 10)
+#define RT5682_PLL1_SRC_MASK (0x3 << 8)
+#define RT5682_PLL1_SRC_SFT 8
+#define RT5682_PLL1_SRC_MCLK (0x0 << 8)
+#define RT5682_PLL1_SRC_BCLK1 (0x1 << 8)
+#define RT5682_PLL1_SRC_SDW (0x2 << 8)
+#define RT5682_PLL1_SRC_RC (0x3 << 8)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index add18d6d77da..a6a4748c97f9 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -116,6 +116,13 @@ enum {
I2S_LRCLK_STRENGTH_HIGH,
};
+enum {
+ I2S_SCLK_STRENGTH_DISABLE,
+ I2S_SCLK_STRENGTH_LOW,
+ I2S_SCLK_STRENGTH_MEDIUM,
+ I2S_SCLK_STRENGTH_HIGH,
+};
+
/* sgtl5000 private structure in codec */
struct sgtl5000_priv {
int sysclk; /* sysclk rate */
@@ -129,6 +136,7 @@ struct sgtl5000_priv {
u8 micbias_resistor;
u8 micbias_voltage;
u8 lrclk_strength;
+ u8 sclk_strength;
};
/*
@@ -1302,7 +1310,9 @@ static int sgtl5000_probe(struct snd_soc_component *component)
SGTL5000_DAC_MUTE_RIGHT |
SGTL5000_DAC_MUTE_LEFT);
- reg = ((sgtl5000->lrclk_strength) << SGTL5000_PAD_I2S_LRCLK_SHIFT | 0x5f);
+ reg = ((sgtl5000->lrclk_strength) << SGTL5000_PAD_I2S_LRCLK_SHIFT |
+ (sgtl5000->sclk_strength) << SGTL5000_PAD_I2S_SCLK_SHIFT |
+ 0x1f);
snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg);
snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL,
@@ -1542,6 +1552,13 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
sgtl5000->lrclk_strength = value;
}
+ sgtl5000->sclk_strength = I2S_SCLK_STRENGTH_LOW;
+ if (!of_property_read_u32(np, "sclk-strength", &value)) {
+ if (value > I2S_SCLK_STRENGTH_HIGH)
+ value = I2S_SCLK_STRENGTH_LOW;
+ sgtl5000->sclk_strength = value;
+ }
+
/* Ensure sgtl5000 will start with sane register values */
sgtl5000_fill_defaults(client);
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 501a4e73b185..464a4d7873bb 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -1,31 +1,17 @@
-/*
- * File: sound/soc/codecs/ssm2602.c
- * Author: Cliff Cai <Cliff.Cai@analog.com>
- *
- * Created: Tue June 06 2008
- * Description: Driver for ssm2602 sound chip
- *
- * Modified:
- * Copyright 2008 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// File: sound/soc/codecs/ssm2602.c
+// Author: Cliff Cai <Cliff.Cai@analog.com>
+//
+// Created: Tue June 06 2008
+// Description: Driver for ssm2602 sound chip
+//
+// Modified:
+// Copyright 2008 Analog Devices Inc.
+//
+// Bugs: Enter bugs at http://blackfin.uclinux.org/
+
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -111,7 +97,6 @@ SOC_SINGLE_TLV("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1,
SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0),
SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0),
-SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1),
};
/* Output Mixer */
@@ -121,10 +106,31 @@ SOC_DAPM_SINGLE("HiFi Playback Switch", SSM2602_APANA, 4, 1, 0),
SOC_DAPM_SINGLE("Mic Sidetone Switch", SSM2602_APANA, 5, 1, 0),
};
+static const struct snd_kcontrol_new mic_ctl =
+ SOC_DAPM_SINGLE("Switch", SSM2602_APANA, 1, 1, 1);
+
/* Input mux */
static const struct snd_kcontrol_new ssm2602_input_mux_controls =
SOC_DAPM_ENUM("Input Select", ssm2602_enum[0]);
+static int ssm2602_mic_switch_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ /*
+ * According to the ssm2603 data sheet (control register sequencing),
+ * the digital core should be activated only after all necessary bits
+ * in the power register are enabled, and a delay determined by the
+ * decoupling capacitor on the VMID pin has passed. If the digital core
+ * is activated too early, or even before the ADC is powered up, audible
+ * artifacts appear at the beginning and end of the recorded signal.
+ *
+ * In practice, audible artifacts disappear well over 500 ms.
+ */
+ msleep(500);
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget ssm260x_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DAC", "HiFi Playback", SSM2602_PWR, 3, 1),
SND_SOC_DAPM_ADC("ADC", "HiFi Capture", SSM2602_PWR, 2, 1),
@@ -146,6 +152,9 @@ SND_SOC_DAPM_MIXER("Output Mixer", SSM2602_PWR, 4, 1,
SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0, &ssm2602_input_mux_controls),
SND_SOC_DAPM_MICBIAS("Mic Bias", SSM2602_PWR, 1, 1),
+SND_SOC_DAPM_SWITCH_E("Mic Switch", SSM2602_APANA, 1, 1, &mic_ctl,
+ ssm2602_mic_switch_event, SND_SOC_DAPM_PRE_PMU),
+
SND_SOC_DAPM_OUTPUT("LHPOUT"),
SND_SOC_DAPM_OUTPUT("RHPOUT"),
SND_SOC_DAPM_INPUT("MICIN"),
@@ -178,9 +187,11 @@ static const struct snd_soc_dapm_route ssm2602_routes[] = {
{"LHPOUT", NULL, "Output Mixer"},
{"Input Mux", "Line", "Line Input"},
- {"Input Mux", "Mic", "Mic Bias"},
+ {"Input Mux", "Mic", "Mic Switch"},
{"ADC", NULL, "Input Mux"},
+ {"Mic Switch", NULL, "Mic Bias"},
+
{"Mic Bias", NULL, "MICIN"},
};
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index c6048d95c6d3..c544a1e35f5e 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1274,8 +1274,9 @@ static int aic31xx_codec_probe(struct snd_soc_component *component)
aic31xx->disable_nb[i].nb.notifier_call =
aic31xx_regulator_event;
aic31xx->disable_nb[i].aic31xx = aic31xx;
- ret = regulator_register_notifier(aic31xx->supplies[i].consumer,
- &aic31xx->disable_nb[i].nb);
+ ret = devm_regulator_register_notifier(
+ aic31xx->supplies[i].consumer,
+ &aic31xx->disable_nb[i].nb);
if (ret) {
dev_err(component->dev,
"Failed to request regulator notifier: %d\n",
@@ -1298,19 +1299,8 @@ static int aic31xx_codec_probe(struct snd_soc_component *component)
return 0;
}
-static void aic31xx_codec_remove(struct snd_soc_component *component)
-{
- struct aic31xx_priv *aic31xx = snd_soc_component_get_drvdata(component);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
- regulator_unregister_notifier(aic31xx->supplies[i].consumer,
- &aic31xx->disable_nb[i].nb);
-}
-
static const struct snd_soc_component_driver soc_codec_driver_aic31xx = {
.probe = aic31xx_codec_probe,
- .remove = aic31xx_codec_remove,
.set_bias_level = aic31xx_set_bias_level,
.controls = common31xx_snd_controls,
.num_controls = ARRAY_SIZE(common31xx_snd_controls),
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e2b5a11b16d1..96f1526cb258 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -79,6 +79,32 @@ struct aic32x4_priv {
struct device *dev;
};
+static int mic_bias_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* Change Mic Bias Registor */
+ snd_soc_component_update_bits(component, AIC32X4_MICBIAS,
+ AIC32x4_MICBIAS_MASK,
+ AIC32X4_MICBIAS_LDOIN |
+ AIC32X4_MICBIAS_2075V);
+ printk(KERN_DEBUG "%s: Mic Bias will be turned ON\n", __func__);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_component_update_bits(component, AIC32X4_MICBIAS,
+ AIC32x4_MICBIAS_MASK, 0);
+ printk(KERN_DEBUG "%s: Mic Bias will be turned OFF\n",
+ __func__);
+ break;
+ }
+
+ return 0;
+}
+
+
static int aic32x4_get_mfp1_gpio(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -450,7 +476,9 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
SND_SOC_DAPM_MUX("IN3_R to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
in3r_to_lmixer_controls),
- SND_SOC_DAPM_MICBIAS("Mic Bias", AIC32X4_MICBIAS, 6, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Bias", AIC32X4_MICBIAS, 6, 0, mic_bias_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
SND_SOC_DAPM_OUTPUT("HPL"),
SND_SOC_DAPM_OUTPUT("HPR"),
@@ -822,6 +850,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
+ /* Initial cold start */
+ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
+ break;
+
/* Switch off BCLK_N Divider */
snd_soc_component_update_bits(component, AIC32X4_BCLKN,
AIC32X4_BCLKEN, 0);
@@ -938,6 +970,7 @@ static int aic32x4_component_probe(struct snd_soc_component *component)
if (gpio_is_valid(aic32x4->rstn_gpio)) {
ndelay(10);
gpio_set_value(aic32x4->rstn_gpio, 1);
+ mdelay(1);
}
snd_soc_component_write(component, AIC32X4_RESET, 0x01);
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index e9df49edbf19..c2d74025bf4b 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -195,6 +195,7 @@ int aic32x4_remove(struct device *dev);
/* AIC32X4_MICBIAS */
#define AIC32X4_MICBIAS_LDOIN BIT(3)
#define AIC32X4_MICBIAS_2075V 0x60
+#define AIC32x4_MICBIAS_MASK GENMASK(6, 3)
/* AIC32X4_LMICPGANIN */
#define AIC32X4_LMICPGANIN_IN2R_10K 0x10
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 6aa0edf8c5ef..283583d1db60 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1615,13 +1615,14 @@ static int aic3x_probe(struct snd_soc_component *component)
for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
aic3x->disable_nb[i].nb.notifier_call = aic3x_regulator_event;
aic3x->disable_nb[i].aic3x = aic3x;
- ret = regulator_register_notifier(aic3x->supplies[i].consumer,
- &aic3x->disable_nb[i].nb);
+ ret = devm_regulator_register_notifier(
+ aic3x->supplies[i].consumer,
+ &aic3x->disable_nb[i].nb);
if (ret) {
dev_err(component->dev,
"Failed to request regulator notifier: %d\n",
ret);
- goto err_notif;
+ return ret;
}
}
@@ -1679,29 +1680,11 @@ static int aic3x_probe(struct snd_soc_component *component)
aic3x_add_widgets(component);
return 0;
-
-err_notif:
- while (i--)
- regulator_unregister_notifier(aic3x->supplies[i].consumer,
- &aic3x->disable_nb[i].nb);
- return ret;
-}
-
-static void aic3x_remove(struct snd_soc_component *component)
-{
- struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component);
- int i;
-
- list_del(&aic3x->list);
- for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
- regulator_unregister_notifier(aic3x->supplies[i].consumer,
- &aic3x->disable_nb[i].nb);
}
static const struct snd_soc_component_driver soc_component_dev_aic3x = {
.set_bias_level = aic3x_set_bias_level,
.probe = aic3x_probe,
- .remove = aic3x_remove,
.controls = aic3x_snd_controls,
.num_controls = ARRAY_SIZE(aic3x_snd_controls),
.dapm_widgets = aic3x_dapm_widgets,
diff --git a/sound/soc/codecs/tscs42xx.c b/sound/soc/codecs/tscs42xx.c
index 7396a6e5277e..27b8c6ba72fa 100644
--- a/sound/soc/codecs/tscs42xx.c
+++ b/sound/soc/codecs/tscs42xx.c
@@ -389,7 +389,7 @@ static int dac_event(struct snd_soc_dapm_widget *w,
mutex_lock(&tscs42xx->coeff_ram_lock);
- if (tscs42xx->coeff_ram_synced == false) {
+ if (!tscs42xx->coeff_ram_synced) {
ret = write_coeff_ram(component, tscs42xx->coeff_ram, 0x00,
COEFF_RAM_COEFF_COUNT);
if (ret < 0)
diff --git a/sound/soc/codecs/wcd-clsh-v2.c b/sound/soc/codecs/wcd-clsh-v2.c
new file mode 100644
index 000000000000..c397d713f01a
--- /dev/null
+++ b/sound/soc/codecs/wcd-clsh-v2.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+// Copyright (c) 2017-2018, Linaro Limited
+
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include "wcd9335.h"
+#include "wcd-clsh-v2.h"
+
+struct wcd_clsh_ctrl {
+ int state;
+ int mode;
+ int flyback_users;
+ int buck_users;
+ int clsh_users;
+ int codec_version;
+ struct snd_soc_component *comp;
+};
+
+/* Class-H registers for codecs from and above WCD9335 */
+#define WCD9XXX_A_CDC_RX0_RX_PATH_CFG0 WCD9335_REG(0xB, 0x42)
+#define WCD9XXX_A_CDC_RX_PATH_CLSH_EN_MASK BIT(6)
+#define WCD9XXX_A_CDC_RX_PATH_CLSH_ENABLE BIT(6)
+#define WCD9XXX_A_CDC_RX_PATH_CLSH_DISABLE 0
+#define WCD9XXX_A_CDC_RX1_RX_PATH_CFG0 WCD9335_REG(0xB, 0x56)
+#define WCD9XXX_A_CDC_RX2_RX_PATH_CFG0 WCD9335_REG(0xB, 0x6A)
+#define WCD9XXX_A_CDC_CLSH_K1_MSB WCD9335_REG(0xC, 0x08)
+#define WCD9XXX_A_CDC_CLSH_K1_MSB_COEF_MASK GENMASK(3, 0)
+#define WCD9XXX_A_CDC_CLSH_K1_LSB WCD9335_REG(0xC, 0x09)
+#define WCD9XXX_A_CDC_CLSH_K1_LSB_COEF_MASK GENMASK(7, 0)
+#define WCD9XXX_A_ANA_RX_SUPPLIES WCD9335_REG(0x6, 0x08)
+#define WCD9XXX_A_ANA_RX_REGULATOR_MODE_MASK BIT(1)
+#define WCD9XXX_A_ANA_RX_REGULATOR_MODE_CLS_H 0
+#define WCD9XXX_A_ANA_RX_REGULATOR_MODE_CLS_AB BIT(1)
+#define WCD9XXX_A_ANA_RX_VNEG_PWR_LVL_MASK BIT(2)
+#define WCD9XXX_A_ANA_RX_VNEG_PWR_LVL_UHQA BIT(2)
+#define WCD9XXX_A_ANA_RX_VNEG_PWR_LVL_DEFAULT 0
+#define WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_MASK BIT(3)
+#define WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_UHQA BIT(3)
+#define WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_DEFAULT 0
+#define WCD9XXX_A_ANA_RX_VNEG_EN_MASK BIT(6)
+#define WCD9XXX_A_ANA_RX_VNEG_EN_SHIFT 6
+#define WCD9XXX_A_ANA_RX_VNEG_ENABLE BIT(6)
+#define WCD9XXX_A_ANA_RX_VNEG_DISABLE 0
+#define WCD9XXX_A_ANA_RX_VPOS_EN_MASK BIT(7)
+#define WCD9XXX_A_ANA_RX_VPOS_EN_SHIFT 7
+#define WCD9XXX_A_ANA_RX_VPOS_ENABLE BIT(7)
+#define WCD9XXX_A_ANA_RX_VPOS_DISABLE 0
+#define WCD9XXX_A_ANA_HPH WCD9335_REG(0x6, 0x09)
+#define WCD9XXX_A_ANA_HPH_PWR_LEVEL_MASK GENMASK(3, 2)
+#define WCD9XXX_A_ANA_HPH_PWR_LEVEL_UHQA 0x08
+#define WCD9XXX_A_ANA_HPH_PWR_LEVEL_LP 0x04
+#define WCD9XXX_A_ANA_HPH_PWR_LEVEL_NORMAL 0x0
+#define WCD9XXX_A_CDC_CLSH_CRC WCD9335_REG(0xC, 0x01)
+#define WCD9XXX_A_CDC_CLSH_CRC_CLK_EN_MASK BIT(0)
+#define WCD9XXX_A_CDC_CLSH_CRC_CLK_ENABLE BIT(0)
+#define WCD9XXX_A_CDC_CLSH_CRC_CLK_DISABLE 0
+#define WCD9XXX_FLYBACK_EN WCD9335_REG(0x6, 0xA4)
+#define WCD9XXX_FLYBACK_EN_DELAY_SEL_MASK GENMASK(6, 5)
+#define WCD9XXX_FLYBACK_EN_DELAY_26P25_US 0x40
+#define WCD9XXX_FLYBACK_EN_RESET_BY_EXT_MASK BIT(4)
+#define WCD9XXX_FLYBACK_EN_PWDN_WITHOUT_DELAY BIT(4)
+#define WCD9XXX_FLYBACK_EN_PWDN_WITH_DELAY 0
+#define WCD9XXX_RX_BIAS_FLYB_BUFF WCD9335_REG(0x6, 0xC7)
+#define WCD9XXX_RX_BIAS_FLYB_VNEG_5_UA_MASK GENMASK(7, 4)
+#define WCD9XXX_RX_BIAS_FLYB_VPOS_5_UA_MASK GENMASK(0, 3)
+#define WCD9XXX_HPH_L_EN WCD9335_REG(0x6, 0xD3)
+#define WCD9XXX_HPH_CONST_SEL_L_MASK GENMASK(7, 3)
+#define WCD9XXX_HPH_CONST_SEL_BYPASS 0
+#define WCD9XXX_HPH_CONST_SEL_LP_PATH 0x40
+#define WCD9XXX_HPH_CONST_SEL_HQ_PATH 0x80
+#define WCD9XXX_HPH_R_EN WCD9335_REG(0x6, 0xD6)
+#define WCD9XXX_HPH_REFBUFF_UHQA_CTL WCD9335_REG(0x6, 0xDD)
+#define WCD9XXX_HPH_REFBUFF_UHQA_GAIN_MASK GENMASK(2, 0)
+#define WCD9XXX_CLASSH_CTRL_VCL_2 WCD9335_REG(0x6, 0x9B)
+#define WCD9XXX_CLASSH_CTRL_VCL_2_VREF_FILT_1_MASK GENMASK(5, 4)
+#define WCD9XXX_CLASSH_CTRL_VCL_VREF_FILT_R_50KOHM 0x20
+#define WCD9XXX_CLASSH_CTRL_VCL_VREF_FILT_R_0KOHM 0x0
+#define WCD9XXX_CDC_RX1_RX_PATH_CTL WCD9335_REG(0xB, 0x55)
+#define WCD9XXX_CDC_RX2_RX_PATH_CTL WCD9335_REG(0xB, 0x69)
+#define WCD9XXX_CDC_CLK_RST_CTRL_MCLK_CONTROL WCD9335_REG(0xD, 0x41)
+#define WCD9XXX_CDC_CLK_RST_CTRL_MCLK_EN_MASK BIT(0)
+#define WCD9XXX_CDC_CLK_RST_CTRL_MCLK_11P3_EN_MASK BIT(1)
+#define WCD9XXX_CLASSH_CTRL_CCL_1 WCD9335_REG(0x6, 0x9C)
+#define WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_MASK GENMASK(7, 4)
+#define WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_50MA 0x50
+#define WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_30MA 0x30
+
+#define CLSH_REQ_ENABLE true
+#define CLSH_REQ_DISABLE false
+#define WCD_USLEEP_RANGE 50
+
+enum {
+ DAC_GAIN_0DB = 0,
+ DAC_GAIN_0P2DB,
+ DAC_GAIN_0P4DB,
+ DAC_GAIN_0P6DB,
+ DAC_GAIN_0P8DB,
+ DAC_GAIN_M0P2DB,
+ DAC_GAIN_M0P4DB,
+ DAC_GAIN_M0P6DB,
+};
+
+static inline void wcd_enable_clsh_block(struct wcd_clsh_ctrl *ctrl,
+ bool enable)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ if ((enable && ++ctrl->clsh_users == 1) ||
+ (!enable && --ctrl->clsh_users == 0))
+ snd_soc_component_update_bits(comp, WCD9XXX_A_CDC_CLSH_CRC,
+ WCD9XXX_A_CDC_CLSH_CRC_CLK_EN_MASK,
+ enable);
+ if (ctrl->clsh_users < 0)
+ ctrl->clsh_users = 0;
+}
+
+static inline bool wcd_clsh_enable_status(struct snd_soc_component *comp)
+{
+ return snd_soc_component_read32(comp, WCD9XXX_A_CDC_CLSH_CRC) &
+ WCD9XXX_A_CDC_CLSH_CRC_CLK_EN_MASK;
+}
+
+static inline void wcd_clsh_set_buck_mode(struct snd_soc_component *comp,
+ int mode)
+{
+ /* set to HIFI */
+ if (mode == CLS_H_HIFI)
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_MASK,
+ WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_UHQA);
+ else
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_MASK,
+ WCD9XXX_A_ANA_RX_VPOS_PWR_LVL_DEFAULT);
+}
+
+static inline void wcd_clsh_set_flyback_mode(struct snd_soc_component *comp,
+ int mode)
+{
+ /* set to HIFI */
+ if (mode == CLS_H_HIFI)
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_VNEG_PWR_LVL_MASK,
+ WCD9XXX_A_ANA_RX_VNEG_PWR_LVL_UHQA);
+ else
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_VNEG_PWR_LVL_MASK,
+ WCD9XXX_A_ANA_RX_VNEG_PWR_LVL_DEFAULT);
+}
+
+static void wcd_clsh_buck_ctrl(struct wcd_clsh_ctrl *ctrl,
+ int mode,
+ bool enable)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ /* enable/disable buck */
+ if ((enable && (++ctrl->buck_users == 1)) ||
+ (!enable && (--ctrl->buck_users == 0)))
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_VPOS_EN_MASK,
+ enable << WCD9XXX_A_ANA_RX_VPOS_EN_SHIFT);
+ /*
+ * 500us sleep is required after buck enable/disable
+ * as per HW requirement
+ */
+ usleep_range(500, 500 + WCD_USLEEP_RANGE);
+}
+
+static void wcd_clsh_flyback_ctrl(struct wcd_clsh_ctrl *ctrl,
+ int mode,
+ bool enable)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ /* enable/disable flyback */
+ if ((enable && (++ctrl->flyback_users == 1)) ||
+ (!enable && (--ctrl->flyback_users == 0))) {
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_VNEG_EN_MASK,
+ enable << WCD9XXX_A_ANA_RX_VNEG_EN_SHIFT);
+ /* 100usec delay is needed as per HW requirement */
+ usleep_range(100, 110);
+ }
+ /*
+ * 500us sleep is required after flyback enable/disable
+ * as per HW requirement
+ */
+ usleep_range(500, 500 + WCD_USLEEP_RANGE);
+}
+
+static void wcd_clsh_set_gain_path(struct wcd_clsh_ctrl *ctrl, int mode)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+ int val = 0;
+
+ switch (mode) {
+ case CLS_H_NORMAL:
+ case CLS_AB:
+ val = WCD9XXX_HPH_CONST_SEL_BYPASS;
+ break;
+ case CLS_H_HIFI:
+ val = WCD9XXX_HPH_CONST_SEL_HQ_PATH;
+ break;
+ case CLS_H_LP:
+ val = WCD9XXX_HPH_CONST_SEL_LP_PATH;
+ break;
+ }
+
+ snd_soc_component_update_bits(comp, WCD9XXX_HPH_L_EN,
+ WCD9XXX_HPH_CONST_SEL_L_MASK,
+ val);
+
+ snd_soc_component_update_bits(comp, WCD9XXX_HPH_R_EN,
+ WCD9XXX_HPH_CONST_SEL_L_MASK,
+ val);
+}
+
+static void wcd_clsh_set_hph_mode(struct snd_soc_component *comp,
+ int mode)
+{
+ int val = 0, gain = 0, res_val;
+ int ipeak = WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_50MA;
+
+ res_val = WCD9XXX_CLASSH_CTRL_VCL_VREF_FILT_R_0KOHM;
+ switch (mode) {
+ case CLS_H_NORMAL:
+ res_val = WCD9XXX_CLASSH_CTRL_VCL_VREF_FILT_R_50KOHM;
+ val = WCD9XXX_A_ANA_HPH_PWR_LEVEL_NORMAL;
+ gain = DAC_GAIN_0DB;
+ ipeak = WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_50MA;
+ break;
+ case CLS_AB:
+ val = WCD9XXX_A_ANA_HPH_PWR_LEVEL_NORMAL;
+ gain = DAC_GAIN_0DB;
+ ipeak = WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_50MA;
+ break;
+ case CLS_H_HIFI:
+ val = WCD9XXX_A_ANA_HPH_PWR_LEVEL_UHQA;
+ gain = DAC_GAIN_M0P2DB;
+ ipeak = WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_50MA;
+ break;
+ case CLS_H_LP:
+ val = WCD9XXX_A_ANA_HPH_PWR_LEVEL_LP;
+ ipeak = WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_30MA;
+ break;
+ }
+
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_HPH,
+ WCD9XXX_A_ANA_HPH_PWR_LEVEL_MASK, val);
+ snd_soc_component_update_bits(comp, WCD9XXX_CLASSH_CTRL_VCL_2,
+ WCD9XXX_CLASSH_CTRL_VCL_2_VREF_FILT_1_MASK,
+ res_val);
+ if (mode != CLS_H_LP)
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_HPH_REFBUFF_UHQA_CTL,
+ WCD9XXX_HPH_REFBUFF_UHQA_GAIN_MASK,
+ gain);
+ snd_soc_component_update_bits(comp, WCD9XXX_CLASSH_CTRL_CCL_1,
+ WCD9XXX_CLASSH_CTRL_CCL_1_DELTA_IPEAK_MASK,
+ ipeak);
+}
+
+static void wcd_clsh_set_flyback_current(struct snd_soc_component *comp,
+ int mode)
+{
+
+ snd_soc_component_update_bits(comp, WCD9XXX_RX_BIAS_FLYB_BUFF,
+ WCD9XXX_RX_BIAS_FLYB_VPOS_5_UA_MASK, 0x0A);
+ snd_soc_component_update_bits(comp, WCD9XXX_RX_BIAS_FLYB_BUFF,
+ WCD9XXX_RX_BIAS_FLYB_VNEG_5_UA_MASK, 0x0A);
+ /* Sleep needed to avoid click and pop as per HW requirement */
+ usleep_range(100, 110);
+}
+
+static void wcd_clsh_set_buck_regulator_mode(struct snd_soc_component *comp,
+ int mode)
+{
+ if (mode == CLS_AB)
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_REGULATOR_MODE_MASK,
+ WCD9XXX_A_ANA_RX_REGULATOR_MODE_CLS_AB);
+ else
+ snd_soc_component_update_bits(comp, WCD9XXX_A_ANA_RX_SUPPLIES,
+ WCD9XXX_A_ANA_RX_REGULATOR_MODE_MASK,
+ WCD9XXX_A_ANA_RX_REGULATOR_MODE_CLS_H);
+}
+
+static void wcd_clsh_state_lo(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ if (mode != CLS_AB) {
+ dev_err(comp->dev, "%s: LO cannot be in this mode: %d\n",
+ __func__, mode);
+ return;
+ }
+
+ if (is_enable) {
+ wcd_clsh_set_buck_regulator_mode(comp, mode);
+ wcd_clsh_set_buck_mode(comp, mode);
+ wcd_clsh_set_flyback_mode(comp, mode);
+ wcd_clsh_flyback_ctrl(ctrl, mode, true);
+ wcd_clsh_set_flyback_current(comp, mode);
+ wcd_clsh_buck_ctrl(ctrl, mode, true);
+ } else {
+ wcd_clsh_buck_ctrl(ctrl, mode, false);
+ wcd_clsh_flyback_ctrl(ctrl, mode, false);
+ wcd_clsh_set_flyback_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_set_buck_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_set_buck_regulator_mode(comp, CLS_H_NORMAL);
+ }
+}
+
+static void wcd_clsh_state_hph_r(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ if (mode == CLS_H_NORMAL) {
+ dev_err(comp->dev, "%s: Normal mode not applicable for hph_r\n",
+ __func__);
+ return;
+ }
+
+ if (is_enable) {
+ if (mode != CLS_AB) {
+ wcd_enable_clsh_block(ctrl, true);
+ /*
+ * These K1 values depend on the Headphone Impedance
+ * For now it is assumed to be 16 ohm
+ */
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_CLSH_K1_MSB,
+ WCD9XXX_A_CDC_CLSH_K1_MSB_COEF_MASK,
+ 0x00);
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_CLSH_K1_LSB,
+ WCD9XXX_A_CDC_CLSH_K1_LSB_COEF_MASK,
+ 0xC0);
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_EN_MASK,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_ENABLE);
+ }
+ wcd_clsh_set_buck_regulator_mode(comp, mode);
+ wcd_clsh_set_flyback_mode(comp, mode);
+ wcd_clsh_flyback_ctrl(ctrl, mode, true);
+ wcd_clsh_set_flyback_current(comp, mode);
+ wcd_clsh_set_buck_mode(comp, mode);
+ wcd_clsh_buck_ctrl(ctrl, mode, true);
+ wcd_clsh_set_hph_mode(comp, mode);
+ wcd_clsh_set_gain_path(ctrl, mode);
+ } else {
+ wcd_clsh_set_hph_mode(comp, CLS_H_NORMAL);
+
+ if (mode != CLS_AB) {
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_RX2_RX_PATH_CFG0,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_EN_MASK,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_DISABLE);
+ wcd_enable_clsh_block(ctrl, false);
+ }
+ /* buck and flyback set to default mode and disable */
+ wcd_clsh_buck_ctrl(ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_flyback_ctrl(ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_set_flyback_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_set_buck_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_set_buck_regulator_mode(comp, CLS_H_NORMAL);
+ }
+}
+
+static void wcd_clsh_state_hph_l(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ if (mode == CLS_H_NORMAL) {
+ dev_err(comp->dev, "%s: Normal mode not applicable for hph_l\n",
+ __func__);
+ return;
+ }
+
+ if (is_enable) {
+ if (mode != CLS_AB) {
+ wcd_enable_clsh_block(ctrl, true);
+ /*
+ * These K1 values depend on the Headphone Impedance
+ * For now it is assumed to be 16 ohm
+ */
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_CLSH_K1_MSB,
+ WCD9XXX_A_CDC_CLSH_K1_MSB_COEF_MASK,
+ 0x00);
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_CLSH_K1_LSB,
+ WCD9XXX_A_CDC_CLSH_K1_LSB_COEF_MASK,
+ 0xC0);
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_EN_MASK,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_ENABLE);
+ }
+ wcd_clsh_set_buck_regulator_mode(comp, mode);
+ wcd_clsh_set_flyback_mode(comp, mode);
+ wcd_clsh_flyback_ctrl(ctrl, mode, true);
+ wcd_clsh_set_flyback_current(comp, mode);
+ wcd_clsh_set_buck_mode(comp, mode);
+ wcd_clsh_buck_ctrl(ctrl, mode, true);
+ wcd_clsh_set_hph_mode(comp, mode);
+ wcd_clsh_set_gain_path(ctrl, mode);
+ } else {
+ wcd_clsh_set_hph_mode(comp, CLS_H_NORMAL);
+
+ if (mode != CLS_AB) {
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_RX1_RX_PATH_CFG0,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_EN_MASK,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_DISABLE);
+ wcd_enable_clsh_block(ctrl, false);
+ }
+ /* set buck and flyback to Default Mode */
+ wcd_clsh_buck_ctrl(ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_flyback_ctrl(ctrl, CLS_H_NORMAL, false);
+ wcd_clsh_set_flyback_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_set_buck_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_set_buck_regulator_mode(comp, CLS_H_NORMAL);
+ }
+}
+
+static void wcd_clsh_state_ear(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ if (mode != CLS_H_NORMAL) {
+ dev_err(comp->dev, "%s: mode: %d cannot be used for EAR\n",
+ __func__, mode);
+ return;
+ }
+
+ if (is_enable) {
+ wcd_enable_clsh_block(ctrl, true);
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_EN_MASK,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_ENABLE);
+ wcd_clsh_set_buck_mode(comp, mode);
+ wcd_clsh_set_flyback_mode(comp, mode);
+ wcd_clsh_flyback_ctrl(ctrl, mode, true);
+ wcd_clsh_set_flyback_current(comp, mode);
+ wcd_clsh_buck_ctrl(ctrl, mode, true);
+ } else {
+ snd_soc_component_update_bits(comp,
+ WCD9XXX_A_CDC_RX0_RX_PATH_CFG0,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_EN_MASK,
+ WCD9XXX_A_CDC_RX_PATH_CLSH_DISABLE);
+ wcd_enable_clsh_block(ctrl, false);
+ wcd_clsh_buck_ctrl(ctrl, mode, false);
+ wcd_clsh_flyback_ctrl(ctrl, mode, false);
+ wcd_clsh_set_flyback_mode(comp, CLS_H_NORMAL);
+ wcd_clsh_set_buck_mode(comp, CLS_H_NORMAL);
+ }
+}
+
+static int _wcd_clsh_ctrl_set_state(struct wcd_clsh_ctrl *ctrl, int req_state,
+ bool is_enable, int mode)
+{
+ switch (req_state) {
+ case WCD_CLSH_STATE_EAR:
+ wcd_clsh_state_ear(ctrl, req_state, is_enable, mode);
+ break;
+ case WCD_CLSH_STATE_HPHL:
+ wcd_clsh_state_hph_l(ctrl, req_state, is_enable, mode);
+ break;
+ case WCD_CLSH_STATE_HPHR:
+ wcd_clsh_state_hph_r(ctrl, req_state, is_enable, mode);
+ break;
+ break;
+ case WCD_CLSH_STATE_LO:
+ wcd_clsh_state_lo(ctrl, req_state, is_enable, mode);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Function: wcd_clsh_is_state_valid
+ * Params: state
+ * Description:
+ * Provides information on valid states of Class H configuration
+ */
+static bool wcd_clsh_is_state_valid(int state)
+{
+ switch (state) {
+ case WCD_CLSH_STATE_IDLE:
+ case WCD_CLSH_STATE_EAR:
+ case WCD_CLSH_STATE_HPHL:
+ case WCD_CLSH_STATE_HPHR:
+ case WCD_CLSH_STATE_LO:
+ return true;
+ default:
+ return false;
+ };
+}
+
+/*
+ * Function: wcd_clsh_fsm
+ * Params: ctrl, req_state, req_type, clsh_event
+ * Description:
+ * This function handles PRE DAC and POST DAC conditions of different devices
+ * and updates class H configuration of different combination of devices
+ * based on validity of their states. ctrl will contain current
+ * class h state information
+ */
+int wcd_clsh_ctrl_set_state(struct wcd_clsh_ctrl *ctrl,
+ enum wcd_clsh_event clsh_event,
+ int nstate,
+ enum wcd_clsh_mode mode)
+{
+ struct snd_soc_component *comp = ctrl->comp;
+
+ if (nstate == ctrl->state)
+ return 0;
+
+ if (!wcd_clsh_is_state_valid(nstate)) {
+ dev_err(comp->dev, "Class-H not a valid new state:\n");
+ return -EINVAL;
+ }
+
+ switch (clsh_event) {
+ case WCD_CLSH_EVENT_PRE_DAC:
+ _wcd_clsh_ctrl_set_state(ctrl, nstate, CLSH_REQ_ENABLE, mode);
+ break;
+ case WCD_CLSH_EVENT_POST_PA:
+ _wcd_clsh_ctrl_set_state(ctrl, nstate, CLSH_REQ_DISABLE, mode);
+ break;
+ }
+
+ ctrl->state = nstate;
+ ctrl->mode = mode;
+
+ return 0;
+}
+
+int wcd_clsh_ctrl_get_state(struct wcd_clsh_ctrl *ctrl)
+{
+ return ctrl->state;
+}
+
+struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(struct snd_soc_component *comp,
+ int version)
+{
+ struct wcd_clsh_ctrl *ctrl;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ ctrl->state = WCD_CLSH_STATE_IDLE;
+ ctrl->comp = comp;
+
+ return ctrl;
+}
+
+void wcd_clsh_ctrl_free(struct wcd_clsh_ctrl *ctrl)
+{
+ kfree(ctrl);
+}
diff --git a/sound/soc/codecs/wcd-clsh-v2.h b/sound/soc/codecs/wcd-clsh-v2.h
new file mode 100644
index 000000000000..a902f9893467
--- /dev/null
+++ b/sound/soc/codecs/wcd-clsh-v2.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _WCD_CLSH_V2_H_
+#define _WCD_CLSH_V2_H_
+#include <sound/soc.h>
+
+enum wcd_clsh_event {
+ WCD_CLSH_EVENT_PRE_DAC = 1,
+ WCD_CLSH_EVENT_POST_PA,
+};
+
+/*
+ * Basic states for Class H state machine.
+ * represented as a bit mask within a u8 data type
+ * bit 0: EAR mode
+ * bit 1: HPH Left mode
+ * bit 2: HPH Right mode
+ * bit 3: Lineout mode
+ */
+#define WCD_CLSH_STATE_IDLE 0
+#define WCD_CLSH_STATE_EAR BIT(0)
+#define WCD_CLSH_STATE_HPHL BIT(1)
+#define WCD_CLSH_STATE_HPHR BIT(2)
+#define WCD_CLSH_STATE_LO BIT(3)
+#define WCD_CLSH_STATE_MAX 4
+#define NUM_CLSH_STATES_V2 BIT(WCD_CLSH_STATE_MAX)
+
+enum wcd_clsh_mode {
+ CLS_H_NORMAL = 0, /* Class-H Default */
+ CLS_H_HIFI, /* Class-H HiFi */
+ CLS_H_LP, /* Class-H Low Power */
+ CLS_AB, /* Class-AB */
+ CLS_H_LOHIFI, /* LoHIFI */
+ CLS_NONE, /* None of the above modes */
+};
+
+struct wcd_clsh_ctrl;
+
+extern struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(
+ struct snd_soc_component *component,
+ int version);
+extern void wcd_clsh_ctrl_free(struct wcd_clsh_ctrl *ctrl);
+extern int wcd_clsh_ctrl_get_state(struct wcd_clsh_ctrl *ctrl);
+extern int wcd_clsh_ctrl_set_state(struct wcd_clsh_ctrl *ctrl,
+ enum wcd_clsh_event event,
+ int state,
+ enum wcd_clsh_mode mode);
+
+#endif /* _WCD_CLSH_V2_H_ */
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
new file mode 100644
index 000000000000..981f88a5f615
--- /dev/null
+++ b/sound/soc/codecs/wcd9335.c
@@ -0,0 +1,5244 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+// Copyright (c) 2017-2018, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slimbus.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <sound/soc-dapm.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <sound/tlv.h>
+#include <sound/info.h>
+#include "wcd9335.h"
+#include "wcd-clsh-v2.h"
+
+#define WCD9335_RATES_MASK (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+/* Fractional Rates */
+#define WCD9335_FRAC_RATES_MASK (SNDRV_PCM_RATE_44100)
+#define WCD9335_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+/* slave port water mark level
+ * (0: 6bytes, 1: 9bytes, 2: 12 bytes, 3: 15 bytes)
+ */
+#define SLAVE_PORT_WATER_MARK_6BYTES 0
+#define SLAVE_PORT_WATER_MARK_9BYTES 1
+#define SLAVE_PORT_WATER_MARK_12BYTES 2
+#define SLAVE_PORT_WATER_MARK_15BYTES 3
+#define SLAVE_PORT_WATER_MARK_SHIFT 1
+#define SLAVE_PORT_ENABLE 1
+#define SLAVE_PORT_DISABLE 0
+#define WCD9335_SLIM_WATER_MARK_VAL \
+ ((SLAVE_PORT_WATER_MARK_12BYTES << SLAVE_PORT_WATER_MARK_SHIFT) | \
+ (SLAVE_PORT_ENABLE))
+
+#define WCD9335_SLIM_NUM_PORT_REG 3
+#define WCD9335_SLIM_PGD_PORT_INT_TX_EN0 (WCD9335_SLIM_PGD_PORT_INT_EN0 + 2)
+
+#define WCD9335_MCLK_CLK_12P288MHZ 12288000
+#define WCD9335_MCLK_CLK_9P6MHZ 9600000
+
+#define WCD9335_SLIM_CLOSE_TIMEOUT 1000
+#define WCD9335_SLIM_IRQ_OVERFLOW (1 << 0)
+#define WCD9335_SLIM_IRQ_UNDERFLOW (1 << 1)
+#define WCD9335_SLIM_IRQ_PORT_CLOSED (1 << 2)
+
+#define WCD9335_NUM_INTERPOLATORS 9
+#define WCD9335_RX_START 16
+#define WCD9335_SLIM_CH_START 128
+#define WCD9335_MAX_MICBIAS 4
+#define WCD9335_MAX_VALID_ADC_MUX 13
+#define WCD9335_INVALID_ADC_MUX 9
+
+#define TX_HPF_CUT_OFF_FREQ_MASK 0x60
+#define CF_MIN_3DB_4HZ 0x0
+#define CF_MIN_3DB_75HZ 0x1
+#define CF_MIN_3DB_150HZ 0x2
+#define WCD9335_DMIC_CLK_DIV_2 0x0
+#define WCD9335_DMIC_CLK_DIV_3 0x1
+#define WCD9335_DMIC_CLK_DIV_4 0x2
+#define WCD9335_DMIC_CLK_DIV_6 0x3
+#define WCD9335_DMIC_CLK_DIV_8 0x4
+#define WCD9335_DMIC_CLK_DIV_16 0x5
+#define WCD9335_DMIC_CLK_DRIVE_DEFAULT 0x02
+#define WCD9335_AMIC_PWR_LEVEL_LP 0
+#define WCD9335_AMIC_PWR_LEVEL_DEFAULT 1
+#define WCD9335_AMIC_PWR_LEVEL_HP 2
+#define WCD9335_AMIC_PWR_LVL_MASK 0x60
+#define WCD9335_AMIC_PWR_LVL_SHIFT 0x5
+
+#define WCD9335_DEC_PWR_LVL_MASK 0x06
+#define WCD9335_DEC_PWR_LVL_LP 0x02
+#define WCD9335_DEC_PWR_LVL_HP 0x04
+#define WCD9335_DEC_PWR_LVL_DF 0x00
+
+#define TX_HPF_CUT_OFF_FREQ_MASK 0x60
+#define CF_MIN_3DB_4HZ 0x0
+#define CF_MIN_3DB_75HZ 0x1
+#define CF_MIN_3DB_150HZ 0x2
+
+#define WCD9335_SLIM_RX_CH(p) \
+ {.port = p + WCD9335_RX_START, .shift = p,}
+
+#define WCD9335_SLIM_TX_CH(p) \
+ {.port = p, .shift = p,}
+
+/* vout step value */
+#define WCD9335_CALCULATE_VOUT_D(req_mv) (((req_mv - 650) * 10) / 25)
+
+#define WCD9335_INTERPOLATOR_PATH(id) \
+ {"RX INT" #id "_1 MIX1 INP0", "RX0", "SLIM RX0"}, \
+ {"RX INT" #id "_1 MIX1 INP0", "RX1", "SLIM RX1"}, \
+ {"RX INT" #id "_1 MIX1 INP0", "RX2", "SLIM RX2"}, \
+ {"RX INT" #id "_1 MIX1 INP0", "RX3", "SLIM RX3"}, \
+ {"RX INT" #id "_1 MIX1 INP0", "RX4", "SLIM RX4"}, \
+ {"RX INT" #id "_1 MIX1 INP0", "RX5", "SLIM RX5"}, \
+ {"RX INT" #id "_1 MIX1 INP0", "RX6", "SLIM RX6"}, \
+ {"RX INT" #id "_1 MIX1 INP0", "RX7", "SLIM RX7"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX0", "SLIM RX0"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX1", "SLIM RX1"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX2", "SLIM RX2"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX3", "SLIM RX3"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX4", "SLIM RX4"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX5", "SLIM RX5"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX6", "SLIM RX6"}, \
+ {"RX INT" #id "_1 MIX1 INP1", "RX7", "SLIM RX7"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX0", "SLIM RX0"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX1", "SLIM RX1"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX2", "SLIM RX2"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX3", "SLIM RX3"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX4", "SLIM RX4"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX5", "SLIM RX5"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX6", "SLIM RX6"}, \
+ {"RX INT" #id "_1 MIX1 INP2", "RX7", "SLIM RX7"}, \
+ {"RX INT" #id "_2 MUX", "RX0", "SLIM RX0"}, \
+ {"RX INT" #id "_2 MUX", "RX1", "SLIM RX1"}, \
+ {"RX INT" #id "_2 MUX", "RX2", "SLIM RX2"}, \
+ {"RX INT" #id "_2 MUX", "RX3", "SLIM RX3"}, \
+ {"RX INT" #id "_2 MUX", "RX4", "SLIM RX4"}, \
+ {"RX INT" #id "_2 MUX", "RX5", "SLIM RX5"}, \
+ {"RX INT" #id "_2 MUX", "RX6", "SLIM RX6"}, \
+ {"RX INT" #id "_2 MUX", "RX7", "SLIM RX7"}, \
+ {"RX INT" #id "_1 MIX1", NULL, "RX INT" #id "_1 MIX1 INP0"}, \
+ {"RX INT" #id "_1 MIX1", NULL, "RX INT" #id "_1 MIX1 INP1"}, \
+ {"RX INT" #id "_1 MIX1", NULL, "RX INT" #id "_1 MIX1 INP2"}, \
+ {"RX INT" #id " SEC MIX", NULL, "RX INT" #id "_2 MUX"}, \
+ {"RX INT" #id " SEC MIX", NULL, "RX INT" #id "_1 MIX1"}, \
+ {"RX INT" #id " MIX2", NULL, "RX INT" #id " SEC MIX"}, \
+ {"RX INT" #id " INTERP", NULL, "RX INT" #id " MIX2"}
+
+#define WCD9335_ADC_MUX_PATH(id) \
+ {"AIF1_CAP Mixer", "SLIM TX" #id, "SLIM TX" #id " MUX"}, \
+ {"AIF2_CAP Mixer", "SLIM TX" #id, "SLIM TX" #id " MUX"}, \
+ {"AIF3_CAP Mixer", "SLIM TX" #id, "SLIM TX" #id " MUX"}, \
+ {"SLIM TX" #id " MUX", "DEC" #id, "ADC MUX" #id}, \
+ {"ADC MUX" #id, "DMIC", "DMIC MUX" #id}, \
+ {"ADC MUX" #id, "AMIC", "AMIC MUX" #id}, \
+ {"DMIC MUX" #id, "DMIC0", "DMIC0"}, \
+ {"DMIC MUX" #id, "DMIC1", "DMIC1"}, \
+ {"DMIC MUX" #id, "DMIC2", "DMIC2"}, \
+ {"DMIC MUX" #id, "DMIC3", "DMIC3"}, \
+ {"DMIC MUX" #id, "DMIC4", "DMIC4"}, \
+ {"DMIC MUX" #id, "DMIC5", "DMIC5"}, \
+ {"AMIC MUX" #id, "ADC1", "ADC1"}, \
+ {"AMIC MUX" #id, "ADC2", "ADC2"}, \
+ {"AMIC MUX" #id, "ADC3", "ADC3"}, \
+ {"AMIC MUX" #id, "ADC4", "ADC4"}, \
+ {"AMIC MUX" #id, "ADC5", "ADC5"}, \
+ {"AMIC MUX" #id, "ADC6", "ADC6"}
+
+enum {
+ WCD9335_RX0 = 0,
+ WCD9335_RX1,
+ WCD9335_RX2,
+ WCD9335_RX3,
+ WCD9335_RX4,
+ WCD9335_RX5,
+ WCD9335_RX6,
+ WCD9335_RX7,
+ WCD9335_RX8,
+ WCD9335_RX9,
+ WCD9335_RX10,
+ WCD9335_RX11,
+ WCD9335_RX12,
+ WCD9335_RX_MAX,
+};
+
+enum {
+ WCD9335_TX0 = 0,
+ WCD9335_TX1,
+ WCD9335_TX2,
+ WCD9335_TX3,
+ WCD9335_TX4,
+ WCD9335_TX5,
+ WCD9335_TX6,
+ WCD9335_TX7,
+ WCD9335_TX8,
+ WCD9335_TX9,
+ WCD9335_TX10,
+ WCD9335_TX11,
+ WCD9335_TX12,
+ WCD9335_TX13,
+ WCD9335_TX14,
+ WCD9335_TX15,
+ WCD9335_TX_MAX,
+};
+
+enum {
+ SIDO_SOURCE_INTERNAL = 0,
+ SIDO_SOURCE_RCO_BG,
+};
+
+enum wcd9335_sido_voltage {
+ SIDO_VOLTAGE_SVS_MV = 950,
+ SIDO_VOLTAGE_NOMINAL_MV = 1100,
+};
+
+enum {
+ AIF1_PB = 0,
+ AIF1_CAP,
+ AIF2_PB,
+ AIF2_CAP,
+ AIF3_PB,
+ AIF3_CAP,
+ AIF4_PB,
+ NUM_CODEC_DAIS,
+};
+
+enum {
+ COMPANDER_1, /* HPH_L */
+ COMPANDER_2, /* HPH_R */
+ COMPANDER_3, /* LO1_DIFF */
+ COMPANDER_4, /* LO2_DIFF */
+ COMPANDER_5, /* LO3_SE */
+ COMPANDER_6, /* LO4_SE */
+ COMPANDER_7, /* SWR SPK CH1 */
+ COMPANDER_8, /* SWR SPK CH2 */
+ COMPANDER_MAX,
+};
+
+enum {
+ INTn_2_INP_SEL_ZERO = 0,
+ INTn_2_INP_SEL_RX0,
+ INTn_2_INP_SEL_RX1,
+ INTn_2_INP_SEL_RX2,
+ INTn_2_INP_SEL_RX3,
+ INTn_2_INP_SEL_RX4,
+ INTn_2_INP_SEL_RX5,
+ INTn_2_INP_SEL_RX6,
+ INTn_2_INP_SEL_RX7,
+ INTn_2_INP_SEL_PROXIMITY,
+};
+
+enum {
+ INTn_1_MIX_INP_SEL_ZERO = 0,
+ INTn_1_MIX_INP_SEL_DEC0,
+ INTn_1_MIX_INP_SEL_DEC1,
+ INTn_1_MIX_INP_SEL_IIR0,
+ INTn_1_MIX_INP_SEL_IIR1,
+ INTn_1_MIX_INP_SEL_RX0,
+ INTn_1_MIX_INP_SEL_RX1,
+ INTn_1_MIX_INP_SEL_RX2,
+ INTn_1_MIX_INP_SEL_RX3,
+ INTn_1_MIX_INP_SEL_RX4,
+ INTn_1_MIX_INP_SEL_RX5,
+ INTn_1_MIX_INP_SEL_RX6,
+ INTn_1_MIX_INP_SEL_RX7,
+
+};
+
+enum {
+ INTERP_EAR = 0,
+ INTERP_HPHL,
+ INTERP_HPHR,
+ INTERP_LO1,
+ INTERP_LO2,
+ INTERP_LO3,
+ INTERP_LO4,
+ INTERP_SPKR1,
+ INTERP_SPKR2,
+};
+
+enum wcd_clock_type {
+ WCD_CLK_OFF,
+ WCD_CLK_RCO,
+ WCD_CLK_MCLK,
+};
+
+enum {
+ MIC_BIAS_1 = 1,
+ MIC_BIAS_2,
+ MIC_BIAS_3,
+ MIC_BIAS_4
+};
+
+enum {
+ MICB_PULLUP_ENABLE,
+ MICB_PULLUP_DISABLE,
+ MICB_ENABLE,
+ MICB_DISABLE,
+};
+
+struct wcd9335_slim_ch {
+ u32 ch_num;
+ u16 port;
+ u16 shift;
+ struct list_head list;
+};
+
+struct wcd_slim_codec_dai_data {
+ struct list_head slim_ch_list;
+ struct slim_stream_config sconfig;
+ struct slim_stream_runtime *sruntime;
+};
+
+struct wcd9335_codec {
+ struct device *dev;
+ struct clk *mclk;
+ struct clk *native_clk;
+ u32 mclk_rate;
+ u8 version;
+
+ struct slim_device *slim;
+ struct slim_device *slim_ifc_dev;
+ struct regmap *regmap;
+ struct regmap *if_regmap;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct wcd9335_slim_ch rx_chs[WCD9335_RX_MAX];
+ struct wcd9335_slim_ch tx_chs[WCD9335_TX_MAX];
+ u32 num_rx_port;
+ u32 num_tx_port;
+
+ int sido_input_src;
+ enum wcd9335_sido_voltage sido_voltage;
+
+ struct wcd_slim_codec_dai_data dai[NUM_CODEC_DAIS];
+ struct snd_soc_component *component;
+
+ int master_bias_users;
+ int clk_mclk_users;
+ int clk_rco_users;
+ int sido_ccl_cnt;
+ enum wcd_clock_type clk_type;
+
+ struct wcd_clsh_ctrl *clsh_ctrl;
+ u32 hph_mode;
+ int prim_int_users[WCD9335_NUM_INTERPOLATORS];
+
+ int comp_enabled[COMPANDER_MAX];
+
+ int intr1;
+ int reset_gpio;
+ struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY];
+
+ unsigned int rx_port_value;
+ unsigned int tx_port_value;
+ int hph_l_gain;
+ int hph_r_gain;
+ u32 rx_bias_count;
+
+ /*TX*/
+ int micb_ref[WCD9335_MAX_MICBIAS];
+ int pullup_ref[WCD9335_MAX_MICBIAS];
+
+ int dmic_0_1_clk_cnt;
+ int dmic_2_3_clk_cnt;
+ int dmic_4_5_clk_cnt;
+ int dmic_sample_rate;
+ int mad_dmic_sample_rate;
+
+ int native_clk_users;
+};
+
+struct wcd9335_irq {
+ int irq;
+ irqreturn_t (*handler)(int irq, void *data);
+ char *name;
+};
+
+static const struct wcd9335_slim_ch wcd9335_tx_chs[WCD9335_TX_MAX] = {
+ WCD9335_SLIM_TX_CH(0),
+ WCD9335_SLIM_TX_CH(1),
+ WCD9335_SLIM_TX_CH(2),
+ WCD9335_SLIM_TX_CH(3),
+ WCD9335_SLIM_TX_CH(4),
+ WCD9335_SLIM_TX_CH(5),
+ WCD9335_SLIM_TX_CH(6),
+ WCD9335_SLIM_TX_CH(7),
+ WCD9335_SLIM_TX_CH(8),
+ WCD9335_SLIM_TX_CH(9),
+ WCD9335_SLIM_TX_CH(10),
+ WCD9335_SLIM_TX_CH(11),
+ WCD9335_SLIM_TX_CH(12),
+ WCD9335_SLIM_TX_CH(13),
+ WCD9335_SLIM_TX_CH(14),
+ WCD9335_SLIM_TX_CH(15),
+};
+
+static const struct wcd9335_slim_ch wcd9335_rx_chs[WCD9335_RX_MAX] = {
+ WCD9335_SLIM_RX_CH(0), /* 16 */
+ WCD9335_SLIM_RX_CH(1), /* 17 */
+ WCD9335_SLIM_RX_CH(2),
+ WCD9335_SLIM_RX_CH(3),
+ WCD9335_SLIM_RX_CH(4),
+ WCD9335_SLIM_RX_CH(5),
+ WCD9335_SLIM_RX_CH(6),
+ WCD9335_SLIM_RX_CH(7),
+ WCD9335_SLIM_RX_CH(8),
+ WCD9335_SLIM_RX_CH(9),
+ WCD9335_SLIM_RX_CH(10),
+ WCD9335_SLIM_RX_CH(11),
+ WCD9335_SLIM_RX_CH(12),
+};
+
+struct interp_sample_rate {
+ int rate;
+ int rate_val;
+};
+
+static struct interp_sample_rate int_mix_rate_val[] = {
+ {48000, 0x4}, /* 48K */
+ {96000, 0x5}, /* 96K */
+ {192000, 0x6}, /* 192K */
+};
+
+static struct interp_sample_rate int_prim_rate_val[] = {
+ {8000, 0x0}, /* 8K */
+ {16000, 0x1}, /* 16K */
+ {24000, -EINVAL},/* 24K */
+ {32000, 0x3}, /* 32K */
+ {48000, 0x4}, /* 48K */
+ {96000, 0x5}, /* 96K */
+ {192000, 0x6}, /* 192K */
+ {384000, 0x7}, /* 384K */
+ {44100, 0x8}, /* 44.1K */
+};
+
+struct wcd9335_reg_mask_val {
+ u16 reg;
+ u8 mask;
+ u8 val;
+};
+
+static const struct wcd9335_reg_mask_val wcd9335_codec_reg_init[] = {
+ /* Rbuckfly/R_EAR(32) */
+ {WCD9335_CDC_CLSH_K2_MSB, 0x0F, 0x00},
+ {WCD9335_CDC_CLSH_K2_LSB, 0xFF, 0x60},
+ {WCD9335_CPE_SS_DMIC_CFG, 0x80, 0x00},
+ {WCD9335_CDC_BOOST0_BOOST_CTL, 0x70, 0x50},
+ {WCD9335_CDC_BOOST1_BOOST_CTL, 0x70, 0x50},
+ {WCD9335_CDC_RX7_RX_PATH_CFG1, 0x08, 0x08},
+ {WCD9335_CDC_RX8_RX_PATH_CFG1, 0x08, 0x08},
+ {WCD9335_ANA_LO_1_2, 0x3C, 0X3C},
+ {WCD9335_DIFF_LO_COM_SWCAP_REFBUF_FREQ, 0x70, 0x00},
+ {WCD9335_DIFF_LO_COM_PA_FREQ, 0x70, 0x40},
+ {WCD9335_SOC_MAD_AUDIO_CTL_2, 0x03, 0x03},
+ {WCD9335_CDC_TOP_TOP_CFG1, 0x02, 0x02},
+ {WCD9335_CDC_TOP_TOP_CFG1, 0x01, 0x01},
+ {WCD9335_EAR_CMBUFF, 0x08, 0x00},
+ {WCD9335_CDC_TX9_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_TX10_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_TX11_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_TX12_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_COMPANDER7_CTL3, 0x80, 0x80},
+ {WCD9335_CDC_COMPANDER8_CTL3, 0x80, 0x80},
+ {WCD9335_CDC_COMPANDER7_CTL7, 0x01, 0x01},
+ {WCD9335_CDC_COMPANDER8_CTL7, 0x01, 0x01},
+ {WCD9335_CDC_RX0_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX1_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX2_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX3_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX4_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX5_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX6_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX7_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX8_RX_PATH_CFG0, 0x01, 0x01},
+ {WCD9335_CDC_RX0_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX1_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX2_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX3_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX4_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX5_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX6_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX7_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_CDC_RX8_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {WCD9335_VBADC_IBIAS_FE, 0x0C, 0x08},
+ {WCD9335_RCO_CTRL_2, 0x0F, 0x08},
+ {WCD9335_RX_BIAS_FLYB_MID_RST, 0xF0, 0x10},
+ {WCD9335_FLYBACK_CTRL_1, 0x20, 0x20},
+ {WCD9335_HPH_OCP_CTL, 0xFF, 0x5A},
+ {WCD9335_HPH_L_TEST, 0x01, 0x01},
+ {WCD9335_HPH_R_TEST, 0x01, 0x01},
+ {WCD9335_CDC_BOOST0_BOOST_CFG1, 0x3F, 0x12},
+ {WCD9335_CDC_BOOST0_BOOST_CFG2, 0x1C, 0x08},
+ {WCD9335_CDC_COMPANDER7_CTL7, 0x1E, 0x18},
+ {WCD9335_CDC_BOOST1_BOOST_CFG1, 0x3F, 0x12},
+ {WCD9335_CDC_BOOST1_BOOST_CFG2, 0x1C, 0x08},
+ {WCD9335_CDC_COMPANDER8_CTL7, 0x1E, 0x18},
+ {WCD9335_CDC_TX0_TX_PATH_SEC7, 0xFF, 0x45},
+ {WCD9335_CDC_RX0_RX_PATH_SEC0, 0xFC, 0xF4},
+ {WCD9335_HPH_REFBUFF_LP_CTL, 0x08, 0x08},
+ {WCD9335_HPH_REFBUFF_LP_CTL, 0x06, 0x02},
+};
+
+/* Cutoff frequency for high pass filter */
+static const char * const cf_text[] = {
+ "CF_NEG_3DB_4HZ", "CF_NEG_3DB_75HZ", "CF_NEG_3DB_150HZ"
+};
+
+static const char * const rx_cf_text[] = {
+ "CF_NEG_3DB_4HZ", "CF_NEG_3DB_75HZ", "CF_NEG_3DB_150HZ",
+ "CF_NEG_3DB_0P48HZ"
+};
+
+static const char * const rx_int0_7_mix_mux_text[] = {
+ "ZERO", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5",
+ "RX6", "RX7", "PROXIMITY"
+};
+
+static const char * const rx_int_mix_mux_text[] = {
+ "ZERO", "RX0", "RX1", "RX2", "RX3", "RX4", "RX5",
+ "RX6", "RX7"
+};
+
+static const char * const rx_prim_mix_text[] = {
+ "ZERO", "DEC0", "DEC1", "IIR0", "IIR1", "RX0", "RX1", "RX2",
+ "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const rx_int_dem_inp_mux_text[] = {
+ "NORMAL_DSM_OUT", "CLSH_DSM_OUT",
+};
+
+static const char * const rx_int0_interp_mux_text[] = {
+ "ZERO", "RX INT0 MIX2",
+};
+
+static const char * const rx_int1_interp_mux_text[] = {
+ "ZERO", "RX INT1 MIX2",
+};
+
+static const char * const rx_int2_interp_mux_text[] = {
+ "ZERO", "RX INT2 MIX2",
+};
+
+static const char * const rx_int3_interp_mux_text[] = {
+ "ZERO", "RX INT3 MIX2",
+};
+
+static const char * const rx_int4_interp_mux_text[] = {
+ "ZERO", "RX INT4 MIX2",
+};
+
+static const char * const rx_int5_interp_mux_text[] = {
+ "ZERO", "RX INT5 MIX2",
+};
+
+static const char * const rx_int6_interp_mux_text[] = {
+ "ZERO", "RX INT6 MIX2",
+};
+
+static const char * const rx_int7_interp_mux_text[] = {
+ "ZERO", "RX INT7 MIX2",
+};
+
+static const char * const rx_int8_interp_mux_text[] = {
+ "ZERO", "RX INT8 SEC MIX"
+};
+
+static const char * const rx_hph_mode_mux_text[] = {
+ "Class H Invalid", "Class-H Hi-Fi", "Class-H Low Power", "Class-AB",
+ "Class-H Hi-Fi Low Power"
+};
+
+static const char *const slim_rx_mux_text[] = {
+ "ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB", "AIF4_PB",
+};
+
+static const char * const adc_mux_text[] = {
+ "DMIC", "AMIC", "ANC_FB_TUNE1", "ANC_FB_TUNE2"
+};
+
+static const char * const dmic_mux_text[] = {
+ "ZERO", "DMIC0", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5",
+ "SMIC0", "SMIC1", "SMIC2", "SMIC3"
+};
+
+static const char * const dmic_mux_alt_text[] = {
+ "ZERO", "DMIC0", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5",
+};
+
+static const char * const amic_mux_text[] = {
+ "ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6"
+};
+
+static const char * const sb_tx0_mux_text[] = {
+ "ZERO", "RX_MIX_TX0", "DEC0", "DEC0_192"
+};
+
+static const char * const sb_tx1_mux_text[] = {
+ "ZERO", "RX_MIX_TX1", "DEC1", "DEC1_192"
+};
+
+static const char * const sb_tx2_mux_text[] = {
+ "ZERO", "RX_MIX_TX2", "DEC2", "DEC2_192"
+};
+
+static const char * const sb_tx3_mux_text[] = {
+ "ZERO", "RX_MIX_TX3", "DEC3", "DEC3_192"
+};
+
+static const char * const sb_tx4_mux_text[] = {
+ "ZERO", "RX_MIX_TX4", "DEC4", "DEC4_192"
+};
+
+static const char * const sb_tx5_mux_text[] = {
+ "ZERO", "RX_MIX_TX5", "DEC5", "DEC5_192"
+};
+
+static const char * const sb_tx6_mux_text[] = {
+ "ZERO", "RX_MIX_TX6", "DEC6", "DEC6_192"
+};
+
+static const char * const sb_tx7_mux_text[] = {
+ "ZERO", "RX_MIX_TX7", "DEC7", "DEC7_192"
+};
+
+static const char * const sb_tx8_mux_text[] = {
+ "ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192"
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0);
+
+static const struct soc_enum cf_dec0_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX0_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX1_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec2_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX2_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec3_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX3_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec4_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX4_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec5_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX5_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec6_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX6_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec7_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX7_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_dec8_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX8_TX_PATH_CFG0, 5, 3, cf_text);
+
+static const struct soc_enum cf_int0_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX0_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int0_2_enum, WCD9335_CDC_RX0_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int1_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX1_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int1_2_enum, WCD9335_CDC_RX1_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int2_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX2_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int2_2_enum, WCD9335_CDC_RX2_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int3_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX3_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int3_2_enum, WCD9335_CDC_RX3_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int4_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX4_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int4_2_enum, WCD9335_CDC_RX4_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int5_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX5_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int5_2_enum, WCD9335_CDC_RX5_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int6_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX6_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int6_2_enum, WCD9335_CDC_RX6_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int7_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX7_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int7_2_enum, WCD9335_CDC_RX7_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum cf_int8_1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX8_RX_PATH_CFG2, 0, 4, rx_cf_text);
+
+static SOC_ENUM_SINGLE_DECL(cf_int8_2_enum, WCD9335_CDC_RX8_RX_PATH_MIX_CFG, 2,
+ rx_cf_text);
+
+static const struct soc_enum rx_hph_mode_mux_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(rx_hph_mode_mux_text),
+ rx_hph_mode_mux_text);
+
+static const struct soc_enum slim_rx_mux_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(slim_rx_mux_text), slim_rx_mux_text);
+
+static const struct soc_enum rx_int0_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1, 0, 10,
+ rx_int0_7_mix_mux_text);
+
+static const struct soc_enum rx_int1_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1, 0, 9,
+ rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int2_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1, 0, 9,
+ rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int3_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1, 0, 9,
+ rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int4_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1, 0, 9,
+ rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int5_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1, 0, 9,
+ rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int6_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1, 0, 9,
+ rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int7_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1, 0, 10,
+ rx_int0_7_mix_mux_text);
+
+static const struct soc_enum rx_int8_2_mux_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1, 0, 9,
+ rx_int_mix_mux_text);
+
+static const struct soc_enum rx_int0_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int0_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int0_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int1_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int1_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int1_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int2_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int2_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int2_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int3_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int3_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int3_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int4_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int4_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int4_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int5_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int5_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int5_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int6_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int6_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int6_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int7_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int7_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int7_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int8_1_mix_inp0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0, 0, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int8_1_mix_inp1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int8_1_mix_inp2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1, 4, 13,
+ rx_prim_mix_text);
+
+static const struct soc_enum rx_int0_dem_inp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX0_RX_PATH_SEC0, 0,
+ ARRAY_SIZE(rx_int_dem_inp_mux_text),
+ rx_int_dem_inp_mux_text);
+
+static const struct soc_enum rx_int1_dem_inp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX1_RX_PATH_SEC0, 0,
+ ARRAY_SIZE(rx_int_dem_inp_mux_text),
+ rx_int_dem_inp_mux_text);
+
+static const struct soc_enum rx_int2_dem_inp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX2_RX_PATH_SEC0, 0,
+ ARRAY_SIZE(rx_int_dem_inp_mux_text),
+ rx_int_dem_inp_mux_text);
+
+static const struct soc_enum rx_int0_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX0_RX_PATH_CTL, 5, 2,
+ rx_int0_interp_mux_text);
+
+static const struct soc_enum rx_int1_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX1_RX_PATH_CTL, 5, 2,
+ rx_int1_interp_mux_text);
+
+static const struct soc_enum rx_int2_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX2_RX_PATH_CTL, 5, 2,
+ rx_int2_interp_mux_text);
+
+static const struct soc_enum rx_int3_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX3_RX_PATH_CTL, 5, 2,
+ rx_int3_interp_mux_text);
+
+static const struct soc_enum rx_int4_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX4_RX_PATH_CTL, 5, 2,
+ rx_int4_interp_mux_text);
+
+static const struct soc_enum rx_int5_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX5_RX_PATH_CTL, 5, 2,
+ rx_int5_interp_mux_text);
+
+static const struct soc_enum rx_int6_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX6_RX_PATH_CTL, 5, 2,
+ rx_int6_interp_mux_text);
+
+static const struct soc_enum rx_int7_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX7_RX_PATH_CTL, 5, 2,
+ rx_int7_interp_mux_text);
+
+static const struct soc_enum rx_int8_interp_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_RX8_RX_PATH_CTL, 5, 2,
+ rx_int8_interp_mux_text);
+
+static const struct soc_enum tx_adc_mux0_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1, 0, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux1_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1, 0, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux2_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1, 0, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux3_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1, 0, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux4_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 6, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux5_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 6, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux6_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 6, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux7_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 6, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_adc_mux8_chain_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 6, 4,
+ adc_mux_text);
+
+static const struct soc_enum tx_dmic_mux0_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0, 3, 11,
+ dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0, 3, 11,
+ dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux2_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0, 3, 11,
+ dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux3_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0, 3, 11,
+ dmic_mux_text);
+
+static const struct soc_enum tx_dmic_mux4_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 3, 7,
+ dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux5_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 3, 7,
+ dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux6_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 3, 7,
+ dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux7_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 3, 7,
+ dmic_mux_alt_text);
+
+static const struct soc_enum tx_dmic_mux8_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 3, 7,
+ dmic_mux_alt_text);
+
+static const struct soc_enum tx_amic_mux0_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux1_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux2_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux3_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux4_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux5_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux6_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux7_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum tx_amic_mux8_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0, 0, 7,
+ amic_mux_text);
+
+static const struct soc_enum sb_tx0_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 0, 4,
+ sb_tx0_mux_text);
+
+static const struct soc_enum sb_tx1_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 2, 4,
+ sb_tx1_mux_text);
+
+static const struct soc_enum sb_tx2_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 4, 4,
+ sb_tx2_mux_text);
+
+static const struct soc_enum sb_tx3_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0, 6, 4,
+ sb_tx3_mux_text);
+
+static const struct soc_enum sb_tx4_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 0, 4,
+ sb_tx4_mux_text);
+
+static const struct soc_enum sb_tx5_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 2, 4,
+ sb_tx5_mux_text);
+
+static const struct soc_enum sb_tx6_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 4, 4,
+ sb_tx6_mux_text);
+
+static const struct soc_enum sb_tx7_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1, 6, 4,
+ sb_tx7_mux_text);
+
+static const struct soc_enum sb_tx8_mux_enum =
+ SOC_ENUM_SINGLE(WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2, 0, 4,
+ sb_tx8_mux_text);
+
+static const struct snd_kcontrol_new rx_int0_2_mux =
+ SOC_DAPM_ENUM("RX INT0_2 MUX Mux", rx_int0_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_2_mux =
+ SOC_DAPM_ENUM("RX INT1_2 MUX Mux", rx_int1_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_2_mux =
+ SOC_DAPM_ENUM("RX INT2_2 MUX Mux", rx_int2_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_2_mux =
+ SOC_DAPM_ENUM("RX INT3_2 MUX Mux", rx_int3_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_2_mux =
+ SOC_DAPM_ENUM("RX INT4_2 MUX Mux", rx_int4_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_2_mux =
+ SOC_DAPM_ENUM("RX INT5_2 MUX Mux", rx_int5_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_2_mux =
+ SOC_DAPM_ENUM("RX INT6_2 MUX Mux", rx_int6_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_2_mux =
+ SOC_DAPM_ENUM("RX INT7_2 MUX Mux", rx_int7_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_2_mux =
+ SOC_DAPM_ENUM("RX INT8_2 MUX Mux", rx_int8_2_mux_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT0_1 MIX1 INP0 Mux", rx_int0_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT0_1 MIX1 INP1 Mux", rx_int0_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT0_1 MIX1 INP2 Mux", rx_int0_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT1_1 MIX1 INP0 Mux", rx_int1_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT1_1 MIX1 INP1 Mux", rx_int1_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int1_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT1_1 MIX1 INP2 Mux", rx_int1_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT2_1 MIX1 INP0 Mux", rx_int2_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT2_1 MIX1 INP1 Mux", rx_int2_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int2_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT2_1 MIX1 INP2 Mux", rx_int2_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT3_1 MIX1 INP0 Mux", rx_int3_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT3_1 MIX1 INP1 Mux", rx_int3_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int3_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT3_1 MIX1 INP2 Mux", rx_int3_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT4_1 MIX1 INP0 Mux", rx_int4_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT4_1 MIX1 INP1 Mux", rx_int4_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int4_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT4_1 MIX1 INP2 Mux", rx_int4_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT5_1 MIX1 INP0 Mux", rx_int5_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT5_1 MIX1 INP1 Mux", rx_int5_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int5_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT5_1 MIX1 INP2 Mux", rx_int5_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT6_1 MIX1 INP0 Mux", rx_int6_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT6_1 MIX1 INP1 Mux", rx_int6_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int6_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT6_1 MIX1 INP2 Mux", rx_int6_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT7_1 MIX1 INP0 Mux", rx_int7_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT7_1 MIX1 INP1 Mux", rx_int7_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int7_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT7_1 MIX1 INP2 Mux", rx_int7_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_1_mix_inp0_mux =
+ SOC_DAPM_ENUM("RX INT8_1 MIX1 INP0 Mux", rx_int8_1_mix_inp0_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_1_mix_inp1_mux =
+ SOC_DAPM_ENUM("RX INT8_1 MIX1 INP1 Mux", rx_int8_1_mix_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx_int8_1_mix_inp2_mux =
+ SOC_DAPM_ENUM("RX INT8_1 MIX1 INP2 Mux", rx_int8_1_mix_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_int0_interp_mux =
+ SOC_DAPM_ENUM("RX INT0 INTERP Mux", rx_int0_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int1_interp_mux =
+ SOC_DAPM_ENUM("RX INT1 INTERP Mux", rx_int1_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int2_interp_mux =
+ SOC_DAPM_ENUM("RX INT2 INTERP Mux", rx_int2_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int3_interp_mux =
+ SOC_DAPM_ENUM("RX INT3 INTERP Mux", rx_int3_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int4_interp_mux =
+ SOC_DAPM_ENUM("RX INT4 INTERP Mux", rx_int4_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int5_interp_mux =
+ SOC_DAPM_ENUM("RX INT5 INTERP Mux", rx_int5_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int6_interp_mux =
+ SOC_DAPM_ENUM("RX INT6 INTERP Mux", rx_int6_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int7_interp_mux =
+ SOC_DAPM_ENUM("RX INT7 INTERP Mux", rx_int7_interp_mux_enum);
+
+static const struct snd_kcontrol_new rx_int8_interp_mux =
+ SOC_DAPM_ENUM("RX INT8 INTERP Mux", rx_int8_interp_mux_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux0 =
+ SOC_DAPM_ENUM("DMIC MUX0 Mux", tx_dmic_mux0_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux1 =
+ SOC_DAPM_ENUM("DMIC MUX1 Mux", tx_dmic_mux1_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux2 =
+ SOC_DAPM_ENUM("DMIC MUX2 Mux", tx_dmic_mux2_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux3 =
+ SOC_DAPM_ENUM("DMIC MUX3 Mux", tx_dmic_mux3_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux4 =
+ SOC_DAPM_ENUM("DMIC MUX4 Mux", tx_dmic_mux4_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux5 =
+ SOC_DAPM_ENUM("DMIC MUX5 Mux", tx_dmic_mux5_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux6 =
+ SOC_DAPM_ENUM("DMIC MUX6 Mux", tx_dmic_mux6_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux7 =
+ SOC_DAPM_ENUM("DMIC MUX7 Mux", tx_dmic_mux7_enum);
+
+static const struct snd_kcontrol_new tx_dmic_mux8 =
+ SOC_DAPM_ENUM("DMIC MUX8 Mux", tx_dmic_mux8_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux0 =
+ SOC_DAPM_ENUM("AMIC MUX0 Mux", tx_amic_mux0_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux1 =
+ SOC_DAPM_ENUM("AMIC MUX1 Mux", tx_amic_mux1_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux2 =
+ SOC_DAPM_ENUM("AMIC MUX2 Mux", tx_amic_mux2_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux3 =
+ SOC_DAPM_ENUM("AMIC MUX3 Mux", tx_amic_mux3_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux4 =
+ SOC_DAPM_ENUM("AMIC MUX4 Mux", tx_amic_mux4_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux5 =
+ SOC_DAPM_ENUM("AMIC MUX5 Mux", tx_amic_mux5_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux6 =
+ SOC_DAPM_ENUM("AMIC MUX6 Mux", tx_amic_mux6_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux7 =
+ SOC_DAPM_ENUM("AMIC MUX7 Mux", tx_amic_mux7_enum);
+
+static const struct snd_kcontrol_new tx_amic_mux8 =
+ SOC_DAPM_ENUM("AMIC MUX8 Mux", tx_amic_mux8_enum);
+
+static const struct snd_kcontrol_new sb_tx0_mux =
+ SOC_DAPM_ENUM("SLIM TX0 MUX Mux", sb_tx0_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx1_mux =
+ SOC_DAPM_ENUM("SLIM TX1 MUX Mux", sb_tx1_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx2_mux =
+ SOC_DAPM_ENUM("SLIM TX2 MUX Mux", sb_tx2_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx3_mux =
+ SOC_DAPM_ENUM("SLIM TX3 MUX Mux", sb_tx3_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx4_mux =
+ SOC_DAPM_ENUM("SLIM TX4 MUX Mux", sb_tx4_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx5_mux =
+ SOC_DAPM_ENUM("SLIM TX5 MUX Mux", sb_tx5_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx6_mux =
+ SOC_DAPM_ENUM("SLIM TX6 MUX Mux", sb_tx6_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx7_mux =
+ SOC_DAPM_ENUM("SLIM TX7 MUX Mux", sb_tx7_mux_enum);
+
+static const struct snd_kcontrol_new sb_tx8_mux =
+ SOC_DAPM_ENUM("SLIM TX8 MUX Mux", sb_tx8_mux_enum);
+
+static int slim_rx_mux_get(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kc);
+ struct wcd9335_codec *wcd = dev_get_drvdata(dapm->dev);
+
+ ucontrol->value.enumerated.item[0] = wcd->rx_port_value;
+
+ return 0;
+}
+
+static int slim_rx_mux_put(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kc);
+ struct wcd9335_codec *wcd = dev_get_drvdata(w->dapm->dev);
+ struct soc_enum *e = (struct soc_enum *)kc->private_value;
+ struct snd_soc_dapm_update *update = NULL;
+ u32 port_id = w->shift;
+
+ wcd->rx_port_value = ucontrol->value.enumerated.item[0];
+
+ switch (wcd->rx_port_value) {
+ case 0:
+ list_del_init(&wcd->rx_chs[port_id].list);
+ break;
+ case 1:
+ list_add_tail(&wcd->rx_chs[port_id].list,
+ &wcd->dai[AIF1_PB].slim_ch_list);
+ break;
+ case 2:
+ list_add_tail(&wcd->rx_chs[port_id].list,
+ &wcd->dai[AIF2_PB].slim_ch_list);
+ break;
+ case 3:
+ list_add_tail(&wcd->rx_chs[port_id].list,
+ &wcd->dai[AIF3_PB].slim_ch_list);
+ break;
+ case 4:
+ list_add_tail(&wcd->rx_chs[port_id].list,
+ &wcd->dai[AIF4_PB].slim_ch_list);
+ break;
+ default:
+ dev_err(wcd->dev, "Unknown AIF %d\n", wcd->rx_port_value);
+ goto err;
+ }
+
+ snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value,
+ e, update);
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static int slim_tx_mixer_get(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kc);
+ struct wcd9335_codec *wcd = dev_get_drvdata(dapm->dev);
+
+ ucontrol->value.integer.value[0] = wcd->tx_port_value;
+
+ return 0;
+}
+
+static int slim_tx_mixer_put(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct snd_soc_dapm_widget *widget = snd_soc_dapm_kcontrol_widget(kc);
+ struct wcd9335_codec *wcd = dev_get_drvdata(widget->dapm->dev);
+ struct snd_soc_dapm_update *update = NULL;
+ struct soc_mixer_control *mixer =
+ (struct soc_mixer_control *)kc->private_value;
+ int enable = ucontrol->value.integer.value[0];
+ int dai_id = widget->shift;
+ int port_id = mixer->shift;
+
+ switch (dai_id) {
+ case AIF1_CAP:
+ case AIF2_CAP:
+ case AIF3_CAP:
+ /* only add to the list if value not set */
+ if (enable && !(wcd->tx_port_value & BIT(port_id))) {
+ wcd->tx_port_value |= BIT(port_id);
+ list_add_tail(&wcd->tx_chs[port_id].list,
+ &wcd->dai[dai_id].slim_ch_list);
+ } else if (!enable && (wcd->tx_port_value & BIT(port_id))) {
+ wcd->tx_port_value &= ~BIT(port_id);
+ list_del_init(&wcd->tx_chs[port_id].list);
+ }
+ break;
+ default:
+ dev_err(wcd->dev, "Unknown AIF %d\n", dai_id);
+ return -EINVAL;
+ }
+
+ snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new slim_rx_mux[WCD9335_RX_MAX] = {
+ SOC_DAPM_ENUM_EXT("SLIM RX0 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+ SOC_DAPM_ENUM_EXT("SLIM RX1 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+ SOC_DAPM_ENUM_EXT("SLIM RX2 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+ SOC_DAPM_ENUM_EXT("SLIM RX3 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+ SOC_DAPM_ENUM_EXT("SLIM RX4 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+ SOC_DAPM_ENUM_EXT("SLIM RX5 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+ SOC_DAPM_ENUM_EXT("SLIM RX6 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+ SOC_DAPM_ENUM_EXT("SLIM RX7 Mux", slim_rx_mux_enum,
+ slim_rx_mux_get, slim_rx_mux_put),
+};
+
+static const struct snd_kcontrol_new aif1_cap_mixer[] = {
+ SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, WCD9335_TX0, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, WCD9335_TX1, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, WCD9335_TX2, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, WCD9335_TX3, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, WCD9335_TX4, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, WCD9335_TX5, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, WCD9335_TX6, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, WCD9335_TX7, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, WCD9335_TX8, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, WCD9335_TX9, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, WCD9335_TX10, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, WCD9335_TX11, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD9335_TX13, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif2_cap_mixer[] = {
+ SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, WCD9335_TX0, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, WCD9335_TX1, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, WCD9335_TX2, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, WCD9335_TX3, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, WCD9335_TX4, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, WCD9335_TX5, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, WCD9335_TX6, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, WCD9335_TX7, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, WCD9335_TX8, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, WCD9335_TX9, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, WCD9335_TX10, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX11", SND_SOC_NOPM, WCD9335_TX11, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD9335_TX13, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static const struct snd_kcontrol_new aif3_cap_mixer[] = {
+ SOC_SINGLE_EXT("SLIM TX0", SND_SOC_NOPM, WCD9335_TX0, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, WCD9335_TX1, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, WCD9335_TX2, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, WCD9335_TX3, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, WCD9335_TX4, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, WCD9335_TX5, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, WCD9335_TX6, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, WCD9335_TX7, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+ SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, WCD9335_TX8, 1, 0,
+ slim_tx_mixer_get, slim_tx_mixer_put),
+};
+
+static int wcd9335_put_dec_enum(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kc);
+ struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
+ struct soc_enum *e = (struct soc_enum *)kc->private_value;
+ unsigned int val, reg, sel;
+
+ val = ucontrol->value.enumerated.item[0];
+
+ switch (e->reg) {
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1:
+ reg = WCD9335_CDC_TX0_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1:
+ reg = WCD9335_CDC_TX1_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1:
+ reg = WCD9335_CDC_TX2_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1:
+ reg = WCD9335_CDC_TX3_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0:
+ reg = WCD9335_CDC_TX4_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0:
+ reg = WCD9335_CDC_TX5_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0:
+ reg = WCD9335_CDC_TX6_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0:
+ reg = WCD9335_CDC_TX7_TX_PATH_CFG0;
+ break;
+ case WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0:
+ reg = WCD9335_CDC_TX8_TX_PATH_CFG0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* AMIC: 0, DMIC: 1 */
+ sel = val ? WCD9335_CDC_TX_ADC_AMIC_SEL : WCD9335_CDC_TX_ADC_DMIC_SEL;
+ snd_soc_component_update_bits(component, reg,
+ WCD9335_CDC_TX_ADC_AMIC_DMIC_SEL_MASK,
+ sel);
+
+ return snd_soc_dapm_put_enum_double(kc, ucontrol);
+}
+
+static int wcd9335_int_dem_inp_mux_put(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_enum *e = (struct soc_enum *)kc->private_value;
+ struct snd_soc_component *component;
+ int reg, val;
+
+ component = snd_soc_dapm_kcontrol_component(kc);
+ val = ucontrol->value.enumerated.item[0];
+
+ if (e->reg == WCD9335_CDC_RX0_RX_PATH_SEC0)
+ reg = WCD9335_CDC_RX0_RX_PATH_CFG0;
+ else if (e->reg == WCD9335_CDC_RX1_RX_PATH_SEC0)
+ reg = WCD9335_CDC_RX1_RX_PATH_CFG0;
+ else if (e->reg == WCD9335_CDC_RX2_RX_PATH_SEC0)
+ reg = WCD9335_CDC_RX2_RX_PATH_CFG0;
+ else
+ return -EINVAL;
+
+ /* Set Look Ahead Delay */
+ snd_soc_component_update_bits(component, reg,
+ WCD9335_CDC_RX_PATH_CFG0_DLY_ZN_EN_MASK,
+ val ? WCD9335_CDC_RX_PATH_CFG0_DLY_ZN_EN : 0);
+ /* Set DEM INP Select */
+ return snd_soc_dapm_put_enum_double(kc, ucontrol);
+}
+
+static const struct snd_kcontrol_new rx_int0_dem_inp_mux =
+ SOC_DAPM_ENUM_EXT("RX INT0 DEM MUX Mux", rx_int0_dem_inp_mux_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_int_dem_inp_mux_put);
+
+static const struct snd_kcontrol_new rx_int1_dem_inp_mux =
+ SOC_DAPM_ENUM_EXT("RX INT1 DEM MUX Mux", rx_int1_dem_inp_mux_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_int_dem_inp_mux_put);
+
+static const struct snd_kcontrol_new rx_int2_dem_inp_mux =
+ SOC_DAPM_ENUM_EXT("RX INT2 DEM MUX Mux", rx_int2_dem_inp_mux_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_int_dem_inp_mux_put);
+
+static const struct snd_kcontrol_new tx_adc_mux0 =
+ SOC_DAPM_ENUM_EXT("ADC MUX0 Mux", tx_adc_mux0_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux1 =
+ SOC_DAPM_ENUM_EXT("ADC MUX1 Mux", tx_adc_mux1_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux2 =
+ SOC_DAPM_ENUM_EXT("ADC MUX2 Mux", tx_adc_mux2_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux3 =
+ SOC_DAPM_ENUM_EXT("ADC MUX3 Mux", tx_adc_mux3_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux4 =
+ SOC_DAPM_ENUM_EXT("ADC MUX4 Mux", tx_adc_mux4_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux5 =
+ SOC_DAPM_ENUM_EXT("ADC MUX5 Mux", tx_adc_mux5_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux6 =
+ SOC_DAPM_ENUM_EXT("ADC MUX6 Mux", tx_adc_mux6_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux7 =
+ SOC_DAPM_ENUM_EXT("ADC MUX7 Mux", tx_adc_mux7_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static const struct snd_kcontrol_new tx_adc_mux8 =
+ SOC_DAPM_ENUM_EXT("ADC MUX8 Mux", tx_adc_mux8_chain_enum,
+ snd_soc_dapm_get_enum_double,
+ wcd9335_put_dec_enum);
+
+static int wcd9335_set_mix_interpolator_rate(struct snd_soc_dai *dai,
+ int rate_val,
+ u32 rate)
+{
+ struct snd_soc_component *component = dai->component;
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ struct wcd9335_slim_ch *ch;
+ int val, j;
+
+ list_for_each_entry(ch, &wcd->dai[dai->id].slim_ch_list, list) {
+ for (j = 0; j < WCD9335_NUM_INTERPOLATORS; j++) {
+ val = snd_soc_component_read32(component,
+ WCD9335_CDC_RX_INP_MUX_RX_INT_CFG1(j)) &
+ WCD9335_CDC_RX_INP_MUX_RX_INT_SEL_MASK;
+
+ if (val == (ch->shift + INTn_2_INP_SEL_RX0))
+ snd_soc_component_update_bits(component,
+ WCD9335_CDC_RX_PATH_MIX_CTL(j),
+ WCD9335_CDC_MIX_PCM_RATE_MASK,
+ rate_val);
+ }
+ }
+
+ return 0;
+}
+
+static int wcd9335_set_prim_interpolator_rate(struct snd_soc_dai *dai,
+ u8 rate_val,
+ u32 rate)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+ struct wcd9335_slim_ch *ch;
+ u8 cfg0, cfg1, inp0_sel, inp1_sel, inp2_sel;
+ int inp, j;
+
+ list_for_each_entry(ch, &wcd->dai[dai->id].slim_ch_list, list) {
+ inp = ch->shift + INTn_1_MIX_INP_SEL_RX0;
+ /*
+ * Loop through all interpolator MUX inputs and find out
+ * to which interpolator input, the slim rx port
+ * is connected
+ */
+ for (j = 0; j < WCD9335_NUM_INTERPOLATORS; j++) {
+ cfg0 = snd_soc_component_read32(comp,
+ WCD9335_CDC_RX_INP_MUX_RX_INT_CFG0(j));
+ cfg1 = snd_soc_component_read32(comp,
+ WCD9335_CDC_RX_INP_MUX_RX_INT_CFG1(j));
+
+ inp0_sel = cfg0 &
+ WCD9335_CDC_RX_INP_MUX_RX_INT_SEL_MASK;
+ inp1_sel = (cfg0 >> 4) &
+ WCD9335_CDC_RX_INP_MUX_RX_INT_SEL_MASK;
+ inp2_sel = (cfg1 >> 4) &
+ WCD9335_CDC_RX_INP_MUX_RX_INT_SEL_MASK;
+
+ if ((inp0_sel == inp) || (inp1_sel == inp) ||
+ (inp2_sel == inp)) {
+ /* rate is in Hz */
+ if ((j == 0) && (rate == 44100))
+ dev_info(wcd->dev,
+ "Cannot set 44.1KHz on INT0\n");
+ else
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_RX_PATH_CTL(j),
+ WCD9335_CDC_MIX_PCM_RATE_MASK,
+ rate_val);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int wcd9335_set_interpolator_rate(struct snd_soc_dai *dai, u32 rate)
+{
+ int i;
+
+ /* set mixing path rate */
+ for (i = 0; i < ARRAY_SIZE(int_mix_rate_val); i++) {
+ if (rate == int_mix_rate_val[i].rate) {
+ wcd9335_set_mix_interpolator_rate(dai,
+ int_mix_rate_val[i].rate_val, rate);
+ break;
+ }
+ }
+
+ /* set primary path sample rate */
+ for (i = 0; i < ARRAY_SIZE(int_prim_rate_val); i++) {
+ if (rate == int_prim_rate_val[i].rate) {
+ wcd9335_set_prim_interpolator_rate(dai,
+ int_prim_rate_val[i].rate_val, rate);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int wcd9335_slim_set_hw_params(struct wcd9335_codec *wcd,
+ struct wcd_slim_codec_dai_data *dai_data,
+ int direction)
+{
+ struct list_head *slim_ch_list = &dai_data->slim_ch_list;
+ struct slim_stream_config *cfg = &dai_data->sconfig;
+ struct wcd9335_slim_ch *ch;
+ u16 payload = 0;
+ int ret, i;
+
+ cfg->ch_count = 0;
+ cfg->direction = direction;
+ cfg->port_mask = 0;
+
+ /* Configure slave interface device */
+ list_for_each_entry(ch, slim_ch_list, list) {
+ cfg->ch_count++;
+ payload |= 1 << ch->shift;
+ cfg->port_mask |= BIT(ch->port);
+ }
+
+ cfg->chs = kcalloc(cfg->ch_count, sizeof(unsigned int), GFP_KERNEL);
+ if (!cfg->chs)
+ return -ENOMEM;
+
+ i = 0;
+ list_for_each_entry(ch, slim_ch_list, list) {
+ cfg->chs[i++] = ch->ch_num;
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* write to interface device */
+ ret = regmap_write(wcd->if_regmap,
+ WCD9335_SLIM_PGD_RX_PORT_MULTI_CHNL_0(ch->port),
+ payload);
+
+ if (ret < 0)
+ goto err;
+
+ /* configure the slave port for water mark and enable*/
+ ret = regmap_write(wcd->if_regmap,
+ WCD9335_SLIM_PGD_RX_PORT_CFG(ch->port),
+ WCD9335_SLIM_WATER_MARK_VAL);
+ if (ret < 0)
+ goto err;
+ } else {
+ ret = regmap_write(wcd->if_regmap,
+ WCD9335_SLIM_PGD_TX_PORT_MULTI_CHNL_0(ch->port),
+ payload & 0x00FF);
+ if (ret < 0)
+ goto err;
+
+ /* ports 8,9 */
+ ret = regmap_write(wcd->if_regmap,
+ WCD9335_SLIM_PGD_TX_PORT_MULTI_CHNL_1(ch->port),
+ (payload & 0xFF00)>>8);
+ if (ret < 0)
+ goto err;
+
+ /* configure the slave port for water mark and enable*/
+ ret = regmap_write(wcd->if_regmap,
+ WCD9335_SLIM_PGD_TX_PORT_CFG(ch->port),
+ WCD9335_SLIM_WATER_MARK_VAL);
+
+ if (ret < 0)
+ goto err;
+ }
+ }
+
+ dai_data->sruntime = slim_stream_allocate(wcd->slim, "WCD9335-SLIM");
+
+ return 0;
+
+err:
+ dev_err(wcd->dev, "Error Setting slim hw params\n");
+ kfree(cfg->chs);
+ cfg->chs = NULL;
+
+ return ret;
+}
+
+static int wcd9335_set_decimator_rate(struct snd_soc_dai *dai,
+ u8 rate_val, u32 rate)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct wcd9335_codec *wcd = snd_soc_component_get_drvdata(comp);
+ u8 shift = 0, shift_val = 0, tx_mux_sel;
+ struct wcd9335_slim_ch *ch;
+ int tx_port, tx_port_reg;
+ int decimator = -1;
+
+ list_for_each_entry(ch, &wcd->dai[dai->id].slim_ch_list, list) {
+ tx_port = ch->port;
+ if ((tx_port == 12) || (tx_port >= 14)) {
+ dev_err(wcd->dev, "Invalid SLIM TX%u port DAI ID:%d\n",
+ tx_port, dai->id);
+ return -EINVAL;
+ }
+ /* Find the SB TX MUX input - which decimator is connected */
+ if (tx_port < 4) {
+ tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0;
+ shift = (tx_port << 1);
+ shift_val = 0x03;
+ } else if ((tx_port >= 4) && (tx_port < 8)) {
+ tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1;
+ shift = ((tx_port - 4) << 1);
+ shift_val = 0x03;
+ } else if ((tx_port >= 8) && (tx_port < 11)) {
+ tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2;
+ shift = ((tx_port - 8) << 1);
+ shift_val = 0x03;
+ } else if (tx_port == 11) {
+ tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3;
+ shift = 0;
+ shift_val = 0x0F;
+ } else if (tx_port == 13) {
+ tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3;
+ shift = 4;
+ shift_val = 0x03;
+ } else {
+ return -EINVAL;
+ }
+
+ tx_mux_sel = snd_soc_component_read32(comp, tx_port_reg) &
+ (shift_val << shift);
+
+ tx_mux_sel = tx_mux_sel >> shift;
+ if (tx_port <= 8) {
+ if ((tx_mux_sel == 0x2) || (tx_mux_sel == 0x3))
+ decimator = tx_port;
+ } else if (tx_port <= 10) {
+ if ((tx_mux_sel == 0x1) || (tx_mux_sel == 0x2))
+ decimator = ((tx_port == 9) ? 7 : 6);
+ } else if (tx_port == 11) {
+ if ((tx_mux_sel >= 1) && (tx_mux_sel < 7))
+ decimator = tx_mux_sel - 1;
+ } else if (tx_port == 13) {
+ if ((tx_mux_sel == 0x1) || (tx_mux_sel == 0x2))
+ decimator = 5;
+ }
+
+ if (decimator >= 0) {
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_TX_PATH_CTL(decimator),
+ WCD9335_CDC_TX_PATH_CTL_PCM_RATE_MASK,
+ rate_val);
+ } else if ((tx_port <= 8) && (tx_mux_sel == 0x01)) {
+ /* Check if the TX Mux input is RX MIX TXn */
+ dev_err(wcd->dev, "RX_MIX_TX%u going to SLIM TX%u\n",
+ tx_port, tx_port);
+ } else {
+ dev_err(wcd->dev, "ERROR: Invalid decimator: %d\n",
+ decimator);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int wcd9335_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct wcd9335_codec *wcd;
+ int ret, tx_fs_rate = 0;
+
+ wcd = snd_soc_component_get_drvdata(dai->component);
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ ret = wcd9335_set_interpolator_rate(dai, params_rate(params));
+ if (ret) {
+ dev_err(wcd->dev, "cannot set sample rate: %u\n",
+ params_rate(params));
+ return ret;
+ }
+ switch (params_width(params)) {
+ case 16 ... 24:
+ wcd->dai[dai->id].sconfig.bps = params_width(params);
+ break;
+ default:
+ dev_err(wcd->dev, "%s: Invalid format 0x%x\n",
+ __func__, params_width(params));
+ return -EINVAL;
+ }
+ break;
+
+ case SNDRV_PCM_STREAM_CAPTURE:
+ switch (params_rate(params)) {
+ case 8000:
+ tx_fs_rate = 0;
+ break;
+ case 16000:
+ tx_fs_rate = 1;
+ break;
+ case 32000:
+ tx_fs_rate = 3;
+ break;
+ case 48000:
+ tx_fs_rate = 4;
+ break;
+ case 96000:
+ tx_fs_rate = 5;
+ break;
+ case 192000:
+ tx_fs_rate = 6;
+ break;
+ case 384000:
+ tx_fs_rate = 7;
+ break;
+ default:
+ dev_err(wcd->dev, "%s: Invalid TX sample rate: %d\n",
+ __func__, params_rate(params));
+ return -EINVAL;
+
+ };
+
+ ret = wcd9335_set_decimator_rate(dai, tx_fs_rate,
+ params_rate(params));
+ if (ret < 0) {
+ dev_err(wcd->dev, "Cannot set TX Decimator rate\n");
+ return ret;
+ }
+ switch (params_width(params)) {
+ case 16 ... 32:
+ wcd->dai[dai->id].sconfig.bps = params_width(params);
+ break;
+ default:
+ dev_err(wcd->dev, "%s: Invalid format 0x%x\n",
+ __func__, params_width(params));
+ return -EINVAL;
+ };
+ break;
+ default:
+ dev_err(wcd->dev, "Invalid stream type %d\n",
+ substream->stream);
+ return -EINVAL;
+ };
+
+ wcd->dai[dai->id].sconfig.rate = params_rate(params);
+ wcd9335_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
+
+ return 0;
+}
+
+static int wcd9335_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct wcd_slim_codec_dai_data *dai_data;
+ struct wcd9335_codec *wcd;
+ struct slim_stream_config *cfg;
+
+ wcd = snd_soc_component_get_drvdata(dai->component);
+
+ dai_data = &wcd->dai[dai->id];
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ cfg = &dai_data->sconfig;
+ slim_stream_prepare(dai_data->sruntime, cfg);
+ slim_stream_enable(dai_data->sruntime);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ slim_stream_unprepare(dai_data->sruntime);
+ slim_stream_disable(dai_data->sruntime);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd9335_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+ struct wcd9335_codec *wcd;
+ int i;
+
+ wcd = snd_soc_component_get_drvdata(dai->component);
+
+ if (!tx_slot || !rx_slot) {
+ dev_err(wcd->dev, "Invalid tx_slot=%p, rx_slot=%p\n",
+ tx_slot, rx_slot);
+ return -EINVAL;
+ }
+
+ wcd->num_rx_port = rx_num;
+ for (i = 0; i < rx_num; i++) {
+ wcd->rx_chs[i].ch_num = rx_slot[i];
+ INIT_LIST_HEAD(&wcd->rx_chs[i].list);
+ }
+
+ wcd->num_tx_port = tx_num;
+ for (i = 0; i < tx_num; i++) {
+ wcd->tx_chs[i].ch_num = tx_slot[i];
+ INIT_LIST_HEAD(&wcd->tx_chs[i].list);
+ }
+
+ return 0;
+}
+
+static int wcd9335_get_channel_map(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot)
+{
+ struct wcd9335_slim_ch *ch;
+ struct wcd9335_codec *wcd;
+ int i = 0;
+
+ wcd = snd_soc_component_get_drvdata(dai->component);
+
+ switch (dai->id) {
+ case AIF1_PB:
+ case AIF2_PB:
+ case AIF3_PB:
+ case AIF4_PB:
+ if (!rx_slot || !rx_num) {
+ dev_err(wcd->dev, "Invalid rx_slot %p or rx_num %p\n",
+ rx_slot, rx_num);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(ch, &wcd->dai[dai->id].slim_ch_list, list)
+ rx_slot[i++] = ch->ch_num;
+
+ *rx_num = i;
+ break;
+ case AIF1_CAP:
+ case AIF2_CAP:
+ case AIF3_CAP:
+ if (!tx_slot || !tx_num) {
+ dev_err(wcd->dev, "Invalid tx_slot %p or tx_num %p\n",
+ tx_slot, tx_num);
+ return -EINVAL;
+ }
+ list_for_each_entry(ch, &wcd->dai[dai->id].slim_ch_list, list)
+ tx_slot[i++] = ch->ch_num;
+
+ *tx_num = i;
+ break;
+ default:
+ dev_err(wcd->dev, "Invalid DAI ID %x\n", dai->id);
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops wcd9335_dai_ops = {
+ .hw_params = wcd9335_hw_params,
+ .trigger = wcd9335_trigger,
+ .set_channel_map = wcd9335_set_channel_map,
+ .get_channel_map = wcd9335_get_channel_map,
+};
+
+static struct snd_soc_dai_driver wcd9335_slim_dais[] = {
+ [0] = {
+ .name = "wcd9335_rx1",
+ .id = AIF1_PB,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+ .formats = WCD9335_FORMATS_S16_S24_LE,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &wcd9335_dai_ops,
+ },
+ [1] = {
+ .name = "wcd9335_tx1",
+ .id = AIF1_CAP,
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .rates = WCD9335_RATES_MASK,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &wcd9335_dai_ops,
+ },
+ [2] = {
+ .name = "wcd9335_rx2",
+ .id = AIF2_PB,
+ .playback = {
+ .stream_name = "AIF2 Playback",
+ .rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+ .formats = WCD9335_FORMATS_S16_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &wcd9335_dai_ops,
+ },
+ [3] = {
+ .name = "wcd9335_tx2",
+ .id = AIF2_CAP,
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .rates = WCD9335_RATES_MASK,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &wcd9335_dai_ops,
+ },
+ [4] = {
+ .name = "wcd9335_rx3",
+ .id = AIF3_PB,
+ .playback = {
+ .stream_name = "AIF3 Playback",
+ .rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+ .formats = WCD9335_FORMATS_S16_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &wcd9335_dai_ops,
+ },
+ [5] = {
+ .name = "wcd9335_tx3",
+ .id = AIF3_CAP,
+ .capture = {
+ .stream_name = "AIF3 Capture",
+ .rates = WCD9335_RATES_MASK,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &wcd9335_dai_ops,
+ },
+ [6] = {
+ .name = "wcd9335_rx4",
+ .id = AIF4_PB,
+ .playback = {
+ .stream_name = "AIF4 Playback",
+ .rates = WCD9335_RATES_MASK | WCD9335_FRAC_RATES_MASK,
+ .formats = WCD9335_FORMATS_S16_S24_LE,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 2,
+ },
+ .ops = &wcd9335_dai_ops,
+ },
+};
+
+static int wcd9335_get_compander(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kc);
+ int comp = ((struct soc_mixer_control *)kc->private_value)->shift;
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+
+ ucontrol->value.integer.value[0] = wcd->comp_enabled[comp];
+ return 0;
+}
+
+static int wcd9335_set_compander(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kc);
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ int comp = ((struct soc_mixer_control *) kc->private_value)->shift;
+ int value = ucontrol->value.integer.value[0];
+ int sel;
+
+ wcd->comp_enabled[comp] = value;
+ sel = value ? WCD9335_HPH_GAIN_SRC_SEL_COMPANDER :
+ WCD9335_HPH_GAIN_SRC_SEL_REGISTER;
+
+ /* Any specific register configuration for compander */
+ switch (comp) {
+ case COMPANDER_1:
+ /* Set Gain Source Select based on compander enable/disable */
+ snd_soc_component_update_bits(component, WCD9335_HPH_L_EN,
+ WCD9335_HPH_GAIN_SRC_SEL_MASK, sel);
+ break;
+ case COMPANDER_2:
+ snd_soc_component_update_bits(component, WCD9335_HPH_R_EN,
+ WCD9335_HPH_GAIN_SRC_SEL_MASK, sel);
+ break;
+ case COMPANDER_5:
+ snd_soc_component_update_bits(component, WCD9335_SE_LO_LO3_GAIN,
+ WCD9335_HPH_GAIN_SRC_SEL_MASK, sel);
+ break;
+ case COMPANDER_6:
+ snd_soc_component_update_bits(component, WCD9335_SE_LO_LO4_GAIN,
+ WCD9335_HPH_GAIN_SRC_SEL_MASK, sel);
+ break;
+ default:
+ break;
+ };
+
+ return 0;
+}
+
+static int wcd9335_rx_hph_mode_get(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kc);
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+
+ ucontrol->value.enumerated.item[0] = wcd->hph_mode;
+
+ return 0;
+}
+
+static int wcd9335_rx_hph_mode_put(struct snd_kcontrol *kc,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kc);
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ u32 mode_val;
+
+ mode_val = ucontrol->value.enumerated.item[0];
+
+ if (mode_val == 0) {
+ dev_err(wcd->dev, "Invalid HPH Mode, default to ClSH HiFi\n");
+ mode_val = CLS_H_HIFI;
+ }
+ wcd->hph_mode = mode_val;
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new wcd9335_snd_controls[] = {
+ /* -84dB min - 40dB max */
+ SOC_SINGLE_SX_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX0 Mix Digital Volume",
+ WCD9335_CDC_RX0_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX1 Mix Digital Volume",
+ WCD9335_CDC_RX1_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX2 Mix Digital Volume",
+ WCD9335_CDC_RX2_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX3 Mix Digital Volume",
+ WCD9335_CDC_RX3_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX4 Mix Digital Volume",
+ WCD9335_CDC_RX4_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX5 Mix Digital Volume",
+ WCD9335_CDC_RX5_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX6 Mix Digital Volume",
+ WCD9335_CDC_RX6_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX7 Mix Digital Volume",
+ WCD9335_CDC_RX7_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX8 Mix Digital Volume",
+ WCD9335_CDC_RX8_RX_VOL_MIX_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_ENUM("RX INT0_1 HPF cut off", cf_int0_1_enum),
+ SOC_ENUM("RX INT0_2 HPF cut off", cf_int0_2_enum),
+ SOC_ENUM("RX INT1_1 HPF cut off", cf_int1_1_enum),
+ SOC_ENUM("RX INT1_2 HPF cut off", cf_int1_2_enum),
+ SOC_ENUM("RX INT2_1 HPF cut off", cf_int2_1_enum),
+ SOC_ENUM("RX INT2_2 HPF cut off", cf_int2_2_enum),
+ SOC_ENUM("RX INT3_1 HPF cut off", cf_int3_1_enum),
+ SOC_ENUM("RX INT3_2 HPF cut off", cf_int3_2_enum),
+ SOC_ENUM("RX INT4_1 HPF cut off", cf_int4_1_enum),
+ SOC_ENUM("RX INT4_2 HPF cut off", cf_int4_2_enum),
+ SOC_ENUM("RX INT5_1 HPF cut off", cf_int5_1_enum),
+ SOC_ENUM("RX INT5_2 HPF cut off", cf_int5_2_enum),
+ SOC_ENUM("RX INT6_1 HPF cut off", cf_int6_1_enum),
+ SOC_ENUM("RX INT6_2 HPF cut off", cf_int6_2_enum),
+ SOC_ENUM("RX INT7_1 HPF cut off", cf_int7_1_enum),
+ SOC_ENUM("RX INT7_2 HPF cut off", cf_int7_2_enum),
+ SOC_ENUM("RX INT8_1 HPF cut off", cf_int8_1_enum),
+ SOC_ENUM("RX INT8_2 HPF cut off", cf_int8_2_enum),
+ SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_SINGLE_EXT("COMP3 Switch", SND_SOC_NOPM, COMPANDER_3, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_SINGLE_EXT("COMP4 Switch", SND_SOC_NOPM, COMPANDER_4, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_SINGLE_EXT("COMP5 Switch", SND_SOC_NOPM, COMPANDER_5, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_SINGLE_EXT("COMP6 Switch", SND_SOC_NOPM, COMPANDER_6, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_SINGLE_EXT("COMP7 Switch", SND_SOC_NOPM, COMPANDER_7, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_SINGLE_EXT("COMP8 Switch", SND_SOC_NOPM, COMPANDER_8, 1, 0,
+ wcd9335_get_compander, wcd9335_set_compander),
+ SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
+ wcd9335_rx_hph_mode_get, wcd9335_rx_hph_mode_put),
+
+ /* Gain Controls */
+ SOC_SINGLE_TLV("EAR PA Volume", WCD9335_ANA_EAR, 4, 4, 1,
+ ear_pa_gain),
+ SOC_SINGLE_TLV("HPHL Volume", WCD9335_HPH_L_EN, 0, 20, 1,
+ line_gain),
+ SOC_SINGLE_TLV("HPHR Volume", WCD9335_HPH_R_EN, 0, 20, 1,
+ line_gain),
+ SOC_SINGLE_TLV("LINEOUT1 Volume", WCD9335_DIFF_LO_LO1_COMPANDER,
+ 3, 16, 1, line_gain),
+ SOC_SINGLE_TLV("LINEOUT2 Volume", WCD9335_DIFF_LO_LO2_COMPANDER,
+ 3, 16, 1, line_gain),
+ SOC_SINGLE_TLV("LINEOUT3 Volume", WCD9335_SE_LO_LO3_GAIN, 0, 20, 1,
+ line_gain),
+ SOC_SINGLE_TLV("LINEOUT4 Volume", WCD9335_SE_LO_LO4_GAIN, 0, 20, 1,
+ line_gain),
+
+ SOC_SINGLE_TLV("ADC1 Volume", WCD9335_ANA_AMIC1, 0, 20, 0,
+ analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", WCD9335_ANA_AMIC2, 0, 20, 0,
+ analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", WCD9335_ANA_AMIC3, 0, 20, 0,
+ analog_gain),
+ SOC_SINGLE_TLV("ADC4 Volume", WCD9335_ANA_AMIC4, 0, 20, 0,
+ analog_gain),
+ SOC_SINGLE_TLV("ADC5 Volume", WCD9335_ANA_AMIC5, 0, 20, 0,
+ analog_gain),
+ SOC_SINGLE_TLV("ADC6 Volume", WCD9335_ANA_AMIC6, 0, 20, 0,
+ analog_gain),
+
+ SOC_ENUM("TX0 HPF cut off", cf_dec0_enum),
+ SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
+ SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
+ SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
+ SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
+ SOC_ENUM("TX5 HPF cut off", cf_dec5_enum),
+ SOC_ENUM("TX6 HPF cut off", cf_dec6_enum),
+ SOC_ENUM("TX7 HPF cut off", cf_dec7_enum),
+ SOC_ENUM("TX8 HPF cut off", cf_dec8_enum),
+};
+
+static const struct snd_soc_dapm_route wcd9335_audio_map[] = {
+ {"SLIM RX0 MUX", "AIF1_PB", "AIF1 PB"},
+ {"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"},
+ {"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"},
+ {"SLIM RX3 MUX", "AIF1_PB", "AIF1 PB"},
+ {"SLIM RX4 MUX", "AIF1_PB", "AIF1 PB"},
+ {"SLIM RX5 MUX", "AIF1_PB", "AIF1 PB"},
+ {"SLIM RX6 MUX", "AIF1_PB", "AIF1 PB"},
+ {"SLIM RX7 MUX", "AIF1_PB", "AIF1 PB"},
+
+ {"SLIM RX0 MUX", "AIF2_PB", "AIF2 PB"},
+ {"SLIM RX1 MUX", "AIF2_PB", "AIF2 PB"},
+ {"SLIM RX2 MUX", "AIF2_PB", "AIF2 PB"},
+ {"SLIM RX3 MUX", "AIF2_PB", "AIF2 PB"},
+ {"SLIM RX4 MUX", "AIF2_PB", "AIF2 PB"},
+ {"SLIM RX5 MUX", "AIF2_PB", "AIF2 PB"},
+ {"SLIM RX6 MUX", "AIF2_PB", "AIF2 PB"},
+ {"SLIM RX7 MUX", "AIF2_PB", "AIF2 PB"},
+
+ {"SLIM RX0 MUX", "AIF3_PB", "AIF3 PB"},
+ {"SLIM RX1 MUX", "AIF3_PB", "AIF3 PB"},
+ {"SLIM RX2 MUX", "AIF3_PB", "AIF3 PB"},
+ {"SLIM RX3 MUX", "AIF3_PB", "AIF3 PB"},
+ {"SLIM RX4 MUX", "AIF3_PB", "AIF3 PB"},
+ {"SLIM RX5 MUX", "AIF3_PB", "AIF3 PB"},
+ {"SLIM RX6 MUX", "AIF3_PB", "AIF3 PB"},
+ {"SLIM RX7 MUX", "AIF3_PB", "AIF3 PB"},
+
+ {"SLIM RX0 MUX", "AIF4_PB", "AIF4 PB"},
+ {"SLIM RX1 MUX", "AIF4_PB", "AIF4 PB"},
+ {"SLIM RX2 MUX", "AIF4_PB", "AIF4 PB"},
+ {"SLIM RX3 MUX", "AIF4_PB", "AIF4 PB"},
+ {"SLIM RX4 MUX", "AIF4_PB", "AIF4 PB"},
+ {"SLIM RX5 MUX", "AIF4_PB", "AIF4 PB"},
+ {"SLIM RX6 MUX", "AIF4_PB", "AIF4 PB"},
+ {"SLIM RX7 MUX", "AIF4_PB", "AIF4 PB"},
+
+ {"SLIM RX0", NULL, "SLIM RX0 MUX"},
+ {"SLIM RX1", NULL, "SLIM RX1 MUX"},
+ {"SLIM RX2", NULL, "SLIM RX2 MUX"},
+ {"SLIM RX3", NULL, "SLIM RX3 MUX"},
+ {"SLIM RX4", NULL, "SLIM RX4 MUX"},
+ {"SLIM RX5", NULL, "SLIM RX5 MUX"},
+ {"SLIM RX6", NULL, "SLIM RX6 MUX"},
+ {"SLIM RX7", NULL, "SLIM RX7 MUX"},
+
+ WCD9335_INTERPOLATOR_PATH(0),
+ WCD9335_INTERPOLATOR_PATH(1),
+ WCD9335_INTERPOLATOR_PATH(2),
+ WCD9335_INTERPOLATOR_PATH(3),
+ WCD9335_INTERPOLATOR_PATH(4),
+ WCD9335_INTERPOLATOR_PATH(5),
+ WCD9335_INTERPOLATOR_PATH(6),
+ WCD9335_INTERPOLATOR_PATH(7),
+ WCD9335_INTERPOLATOR_PATH(8),
+
+ /* EAR PA */
+ {"RX INT0 DEM MUX", "CLSH_DSM_OUT", "RX INT0 INTERP"},
+ {"RX INT0 DAC", NULL, "RX INT0 DEM MUX"},
+ {"RX INT0 DAC", NULL, "RX_BIAS"},
+ {"EAR PA", NULL, "RX INT0 DAC"},
+ {"EAR", NULL, "EAR PA"},
+
+ /* HPHL */
+ {"RX INT1 DEM MUX", "CLSH_DSM_OUT", "RX INT1 INTERP"},
+ {"RX INT1 DAC", NULL, "RX INT1 DEM MUX"},
+ {"RX INT1 DAC", NULL, "RX_BIAS"},
+ {"HPHL PA", NULL, "RX INT1 DAC"},
+ {"HPHL", NULL, "HPHL PA"},
+
+ /* HPHR */
+ {"RX INT2 DEM MUX", "CLSH_DSM_OUT", "RX INT2 INTERP"},
+ {"RX INT2 DAC", NULL, "RX INT2 DEM MUX"},
+ {"RX INT2 DAC", NULL, "RX_BIAS"},
+ {"HPHR PA", NULL, "RX INT2 DAC"},
+ {"HPHR", NULL, "HPHR PA"},
+
+ /* LINEOUT1 */
+ {"RX INT3 DAC", NULL, "RX INT3 INTERP"},
+ {"RX INT3 DAC", NULL, "RX_BIAS"},
+ {"LINEOUT1 PA", NULL, "RX INT3 DAC"},
+ {"LINEOUT1", NULL, "LINEOUT1 PA"},
+
+ /* LINEOUT2 */
+ {"RX INT4 DAC", NULL, "RX INT4 INTERP"},
+ {"RX INT4 DAC", NULL, "RX_BIAS"},
+ {"LINEOUT2 PA", NULL, "RX INT4 DAC"},
+ {"LINEOUT2", NULL, "LINEOUT2 PA"},
+
+ /* LINEOUT3 */
+ {"RX INT5 DAC", NULL, "RX INT5 INTERP"},
+ {"RX INT5 DAC", NULL, "RX_BIAS"},
+ {"LINEOUT3 PA", NULL, "RX INT5 DAC"},
+ {"LINEOUT3", NULL, "LINEOUT3 PA"},
+
+ /* LINEOUT4 */
+ {"RX INT6 DAC", NULL, "RX INT6 INTERP"},
+ {"RX INT6 DAC", NULL, "RX_BIAS"},
+ {"LINEOUT4 PA", NULL, "RX INT6 DAC"},
+ {"LINEOUT4", NULL, "LINEOUT4 PA"},
+
+ /* SLIMBUS Connections */
+ {"AIF1 CAP", NULL, "AIF1_CAP Mixer"},
+ {"AIF2 CAP", NULL, "AIF2_CAP Mixer"},
+ {"AIF3 CAP", NULL, "AIF3_CAP Mixer"},
+
+ /* ADC Mux */
+ WCD9335_ADC_MUX_PATH(0),
+ WCD9335_ADC_MUX_PATH(1),
+ WCD9335_ADC_MUX_PATH(2),
+ WCD9335_ADC_MUX_PATH(3),
+ WCD9335_ADC_MUX_PATH(4),
+ WCD9335_ADC_MUX_PATH(5),
+ WCD9335_ADC_MUX_PATH(6),
+ WCD9335_ADC_MUX_PATH(7),
+ WCD9335_ADC_MUX_PATH(8),
+
+ /* ADC Connections */
+ {"ADC1", NULL, "AMIC1"},
+ {"ADC2", NULL, "AMIC2"},
+ {"ADC3", NULL, "AMIC3"},
+ {"ADC4", NULL, "AMIC4"},
+ {"ADC5", NULL, "AMIC5"},
+ {"ADC6", NULL, "AMIC6"},
+};
+
+static int wcd9335_micbias_control(struct snd_soc_component *component,
+ int micb_num, int req, bool is_dapm)
+{
+ struct wcd9335_codec *wcd = snd_soc_component_get_drvdata(component);
+ int micb_index = micb_num - 1;
+ u16 micb_reg;
+
+ if ((micb_index < 0) || (micb_index > WCD9335_MAX_MICBIAS - 1)) {
+ dev_err(wcd->dev, "Invalid micbias index, micb_ind:%d\n",
+ micb_index);
+ return -EINVAL;
+ }
+
+ switch (micb_num) {
+ case MIC_BIAS_1:
+ micb_reg = WCD9335_ANA_MICB1;
+ break;
+ case MIC_BIAS_2:
+ micb_reg = WCD9335_ANA_MICB2;
+ break;
+ case MIC_BIAS_3:
+ micb_reg = WCD9335_ANA_MICB3;
+ break;
+ case MIC_BIAS_4:
+ micb_reg = WCD9335_ANA_MICB4;
+ break;
+ default:
+ dev_err(component->dev, "%s: Invalid micbias number: %d\n",
+ __func__, micb_num);
+ return -EINVAL;
+ }
+
+ switch (req) {
+ case MICB_PULLUP_ENABLE:
+ wcd->pullup_ref[micb_index]++;
+ if ((wcd->pullup_ref[micb_index] == 1) &&
+ (wcd->micb_ref[micb_index] == 0))
+ snd_soc_component_update_bits(component, micb_reg,
+ 0xC0, 0x80);
+ break;
+ case MICB_PULLUP_DISABLE:
+ wcd->pullup_ref[micb_index]--;
+ if ((wcd->pullup_ref[micb_index] == 0) &&
+ (wcd->micb_ref[micb_index] == 0))
+ snd_soc_component_update_bits(component, micb_reg,
+ 0xC0, 0x00);
+ break;
+ case MICB_ENABLE:
+ wcd->micb_ref[micb_index]++;
+ if (wcd->micb_ref[micb_index] == 1)
+ snd_soc_component_update_bits(component, micb_reg,
+ 0xC0, 0x40);
+ break;
+ case MICB_DISABLE:
+ wcd->micb_ref[micb_index]--;
+ if ((wcd->micb_ref[micb_index] == 0) &&
+ (wcd->pullup_ref[micb_index] > 0))
+ snd_soc_component_update_bits(component, micb_reg,
+ 0xC0, 0x80);
+ else if ((wcd->micb_ref[micb_index] == 0) &&
+ (wcd->pullup_ref[micb_index] == 0)) {
+ snd_soc_component_update_bits(component, micb_reg,
+ 0xC0, 0x00);
+ }
+ break;
+ };
+
+ return 0;
+}
+
+static int __wcd9335_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+ int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ int micb_num;
+
+ if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1")))
+ micb_num = MIC_BIAS_1;
+ else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2")))
+ micb_num = MIC_BIAS_2;
+ else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3")))
+ micb_num = MIC_BIAS_3;
+ else if (strnstr(w->name, "MIC BIAS4", sizeof("MIC BIAS4")))
+ micb_num = MIC_BIAS_4;
+ else
+ return -EINVAL;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /*
+ * MIC BIAS can also be requested by MBHC,
+ * so use ref count to handle micbias pullup
+ * and enable requests
+ */
+ wcd9335_micbias_control(comp, micb_num, MICB_ENABLE, true);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* wait for cnp time */
+ usleep_range(1000, 1100);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd9335_micbias_control(comp, micb_num, MICB_DISABLE, true);
+ break;
+ };
+
+ return 0;
+}
+
+static int wcd9335_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ return __wcd9335_codec_enable_micbias(w, event);
+}
+
+static void wcd9335_codec_set_tx_hold(struct snd_soc_component *comp,
+ u16 amic_reg, bool set)
+{
+ u8 mask = 0x20;
+ u8 val;
+
+ if (amic_reg == WCD9335_ANA_AMIC1 || amic_reg == WCD9335_ANA_AMIC3 ||
+ amic_reg == WCD9335_ANA_AMIC5)
+ mask = 0x40;
+
+ val = set ? mask : 0x00;
+
+ switch (amic_reg) {
+ case WCD9335_ANA_AMIC1:
+ case WCD9335_ANA_AMIC2:
+ snd_soc_component_update_bits(comp, WCD9335_ANA_AMIC2, mask,
+ val);
+ break;
+ case WCD9335_ANA_AMIC3:
+ case WCD9335_ANA_AMIC4:
+ snd_soc_component_update_bits(comp, WCD9335_ANA_AMIC4, mask,
+ val);
+ break;
+ case WCD9335_ANA_AMIC5:
+ case WCD9335_ANA_AMIC6:
+ snd_soc_component_update_bits(comp, WCD9335_ANA_AMIC6, mask,
+ val);
+ break;
+ default:
+ dev_err(comp->dev, "%s: invalid amic: %d\n",
+ __func__, amic_reg);
+ break;
+ }
+}
+
+static int wcd9335_codec_enable_adc(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd9335_codec_set_tx_hold(comp, w->reg, true);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd9335_codec_find_amic_input(struct snd_soc_component *comp,
+ int adc_mux_n)
+{
+ int mux_sel, reg, mreg;
+
+ if (adc_mux_n < 0 || adc_mux_n > WCD9335_MAX_VALID_ADC_MUX ||
+ adc_mux_n == WCD9335_INVALID_ADC_MUX)
+ return 0;
+
+ /* Check whether adc mux input is AMIC or DMIC */
+ if (adc_mux_n < 4) {
+ reg = WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1 + 2 * adc_mux_n;
+ mreg = WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0 + 2 * adc_mux_n;
+ mux_sel = snd_soc_component_read32(comp, reg) & 0x3;
+ } else {
+ reg = WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0 + adc_mux_n - 4;
+ mreg = reg;
+ mux_sel = snd_soc_component_read32(comp, reg) >> 6;
+ }
+
+ if (mux_sel != WCD9335_CDC_TX_INP_MUX_SEL_AMIC)
+ return 0;
+
+ return snd_soc_component_read32(comp, mreg) & 0x07;
+}
+
+static u16 wcd9335_codec_get_amic_pwlvl_reg(struct snd_soc_component *comp,
+ int amic)
+{
+ u16 pwr_level_reg = 0;
+
+ switch (amic) {
+ case 1:
+ case 2:
+ pwr_level_reg = WCD9335_ANA_AMIC1;
+ break;
+
+ case 3:
+ case 4:
+ pwr_level_reg = WCD9335_ANA_AMIC3;
+ break;
+
+ case 5:
+ case 6:
+ pwr_level_reg = WCD9335_ANA_AMIC5;
+ break;
+ default:
+ dev_err(comp->dev, "invalid amic: %d\n", amic);
+ break;
+ }
+
+ return pwr_level_reg;
+}
+
+static int wcd9335_codec_enable_dec(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ unsigned int decimator;
+ char *dec_adc_mux_name = NULL;
+ char *widget_name = NULL;
+ char *wname;
+ int ret = 0, amic_n;
+ u16 tx_vol_ctl_reg, pwr_level_reg = 0, dec_cfg_reg, hpf_gate_reg;
+ u16 tx_gain_ctl_reg;
+ char *dec;
+ u8 hpf_coff_freq;
+
+ widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+ if (!widget_name)
+ return -ENOMEM;
+
+ wname = widget_name;
+ dec_adc_mux_name = strsep(&widget_name, " ");
+ if (!dec_adc_mux_name) {
+ dev_err(comp->dev, "%s: Invalid decimator = %s\n",
+ __func__, w->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ dec_adc_mux_name = widget_name;
+
+ dec = strpbrk(dec_adc_mux_name, "012345678");
+ if (!dec) {
+ dev_err(comp->dev, "%s: decimator index not found\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtouint(dec, 10, &decimator);
+ if (ret < 0) {
+ dev_err(comp->dev, "%s: Invalid decimator = %s\n",
+ __func__, wname);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tx_vol_ctl_reg = WCD9335_CDC_TX0_TX_PATH_CTL + 16 * decimator;
+ hpf_gate_reg = WCD9335_CDC_TX0_TX_PATH_SEC2 + 16 * decimator;
+ dec_cfg_reg = WCD9335_CDC_TX0_TX_PATH_CFG0 + 16 * decimator;
+ tx_gain_ctl_reg = WCD9335_CDC_TX0_TX_VOL_CTL + 16 * decimator;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ amic_n = wcd9335_codec_find_amic_input(comp, decimator);
+ if (amic_n)
+ pwr_level_reg = wcd9335_codec_get_amic_pwlvl_reg(comp,
+ amic_n);
+
+ if (pwr_level_reg) {
+ switch ((snd_soc_component_read32(comp, pwr_level_reg) &
+ WCD9335_AMIC_PWR_LVL_MASK) >>
+ WCD9335_AMIC_PWR_LVL_SHIFT) {
+ case WCD9335_AMIC_PWR_LEVEL_LP:
+ snd_soc_component_update_bits(comp, dec_cfg_reg,
+ WCD9335_DEC_PWR_LVL_MASK,
+ WCD9335_DEC_PWR_LVL_LP);
+ break;
+
+ case WCD9335_AMIC_PWR_LEVEL_HP:
+ snd_soc_component_update_bits(comp, dec_cfg_reg,
+ WCD9335_DEC_PWR_LVL_MASK,
+ WCD9335_DEC_PWR_LVL_HP);
+ break;
+ case WCD9335_AMIC_PWR_LEVEL_DEFAULT:
+ default:
+ snd_soc_component_update_bits(comp, dec_cfg_reg,
+ WCD9335_DEC_PWR_LVL_MASK,
+ WCD9335_DEC_PWR_LVL_DF);
+ break;
+ }
+ }
+ hpf_coff_freq = (snd_soc_component_read32(comp, dec_cfg_reg) &
+ TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
+
+ if (hpf_coff_freq != CF_MIN_3DB_150HZ)
+ snd_soc_component_update_bits(comp, dec_cfg_reg,
+ TX_HPF_CUT_OFF_FREQ_MASK,
+ CF_MIN_3DB_150HZ << 5);
+ /* Enable TX PGA Mute */
+ snd_soc_component_update_bits(comp, tx_vol_ctl_reg,
+ 0x10, 0x10);
+ /* Enable APC */
+ snd_soc_component_update_bits(comp, dec_cfg_reg, 0x08, 0x08);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_component_update_bits(comp, hpf_gate_reg, 0x01, 0x00);
+
+ if (decimator == 0) {
+ snd_soc_component_write(comp,
+ WCD9335_MBHC_ZDET_RAMP_CTL, 0x83);
+ snd_soc_component_write(comp,
+ WCD9335_MBHC_ZDET_RAMP_CTL, 0xA3);
+ snd_soc_component_write(comp,
+ WCD9335_MBHC_ZDET_RAMP_CTL, 0x83);
+ snd_soc_component_write(comp,
+ WCD9335_MBHC_ZDET_RAMP_CTL, 0x03);
+ }
+
+ snd_soc_component_update_bits(comp, hpf_gate_reg,
+ 0x01, 0x01);
+ snd_soc_component_update_bits(comp, tx_vol_ctl_reg,
+ 0x10, 0x00);
+ snd_soc_component_write(comp, tx_gain_ctl_reg,
+ snd_soc_component_read32(comp, tx_gain_ctl_reg));
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ hpf_coff_freq = (snd_soc_component_read32(comp, dec_cfg_reg) &
+ TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
+ snd_soc_component_update_bits(comp, tx_vol_ctl_reg, 0x10, 0x10);
+ snd_soc_component_update_bits(comp, dec_cfg_reg, 0x08, 0x00);
+ if (hpf_coff_freq != CF_MIN_3DB_150HZ) {
+ snd_soc_component_update_bits(comp, dec_cfg_reg,
+ TX_HPF_CUT_OFF_FREQ_MASK,
+ hpf_coff_freq << 5);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_update_bits(comp, tx_vol_ctl_reg, 0x10, 0x00);
+ break;
+ };
+out:
+ kfree(wname);
+ return ret;
+}
+
+static u8 wcd9335_get_dmic_clk_val(struct snd_soc_component *component,
+ u32 mclk_rate, u32 dmic_clk_rate)
+{
+ u32 div_factor;
+ u8 dmic_ctl_val;
+
+ dev_err(component->dev,
+ "%s: mclk_rate = %d, dmic_sample_rate = %d\n",
+ __func__, mclk_rate, dmic_clk_rate);
+
+ /* Default value to return in case of error */
+ if (mclk_rate == WCD9335_MCLK_CLK_9P6MHZ)
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_2;
+ else
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_3;
+
+ if (dmic_clk_rate == 0) {
+ dev_err(component->dev,
+ "%s: dmic_sample_rate cannot be 0\n",
+ __func__);
+ goto done;
+ }
+
+ div_factor = mclk_rate / dmic_clk_rate;
+ switch (div_factor) {
+ case 2:
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_2;
+ break;
+ case 3:
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_3;
+ break;
+ case 4:
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_4;
+ break;
+ case 6:
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_6;
+ break;
+ case 8:
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_8;
+ break;
+ case 16:
+ dmic_ctl_val = WCD9335_DMIC_CLK_DIV_16;
+ break;
+ default:
+ dev_err(component->dev,
+ "%s: Invalid div_factor %u, clk_rate(%u), dmic_rate(%u)\n",
+ __func__, div_factor, mclk_rate, dmic_clk_rate);
+ break;
+ }
+
+done:
+ return dmic_ctl_val;
+}
+
+static int wcd9335_codec_enable_dmic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = snd_soc_component_get_drvdata(comp);
+ u8 dmic_clk_en = 0x01;
+ u16 dmic_clk_reg;
+ s32 *dmic_clk_cnt;
+ u8 dmic_rate_val, dmic_rate_shift = 1;
+ unsigned int dmic;
+ int ret;
+ char *wname;
+
+ wname = strpbrk(w->name, "012345");
+ if (!wname) {
+ dev_err(comp->dev, "%s: widget not found\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = kstrtouint(wname, 10, &dmic);
+ if (ret < 0) {
+ dev_err(comp->dev, "%s: Invalid DMIC line on the codec\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (dmic) {
+ case 0:
+ case 1:
+ dmic_clk_cnt = &(wcd->dmic_0_1_clk_cnt);
+ dmic_clk_reg = WCD9335_CPE_SS_DMIC0_CTL;
+ break;
+ case 2:
+ case 3:
+ dmic_clk_cnt = &(wcd->dmic_2_3_clk_cnt);
+ dmic_clk_reg = WCD9335_CPE_SS_DMIC1_CTL;
+ break;
+ case 4:
+ case 5:
+ dmic_clk_cnt = &(wcd->dmic_4_5_clk_cnt);
+ dmic_clk_reg = WCD9335_CPE_SS_DMIC2_CTL;
+ break;
+ default:
+ dev_err(comp->dev, "%s: Invalid DMIC Selection\n",
+ __func__);
+ return -EINVAL;
+ };
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ dmic_rate_val =
+ wcd9335_get_dmic_clk_val(comp,
+ wcd->mclk_rate,
+ wcd->dmic_sample_rate);
+
+ (*dmic_clk_cnt)++;
+ if (*dmic_clk_cnt == 1) {
+ snd_soc_component_update_bits(comp, dmic_clk_reg,
+ 0x07 << dmic_rate_shift,
+ dmic_rate_val << dmic_rate_shift);
+ snd_soc_component_update_bits(comp, dmic_clk_reg,
+ dmic_clk_en, dmic_clk_en);
+ }
+
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ dmic_rate_val =
+ wcd9335_get_dmic_clk_val(comp,
+ wcd->mclk_rate,
+ wcd->mad_dmic_sample_rate);
+ (*dmic_clk_cnt)--;
+ if (*dmic_clk_cnt == 0) {
+ snd_soc_component_update_bits(comp, dmic_clk_reg,
+ dmic_clk_en, 0);
+ snd_soc_component_update_bits(comp, dmic_clk_reg,
+ 0x07 << dmic_rate_shift,
+ dmic_rate_val << dmic_rate_shift);
+ }
+ break;
+ };
+
+ return 0;
+}
+
+static void wcd9335_codec_enable_int_port(struct wcd_slim_codec_dai_data *dai,
+ struct snd_soc_component *component)
+{
+ int port_num = 0;
+ unsigned short reg = 0;
+ unsigned int val = 0;
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ struct wcd9335_slim_ch *ch;
+
+ list_for_each_entry(ch, &dai->slim_ch_list, list) {
+ if (ch->port >= WCD9335_RX_START) {
+ port_num = ch->port - WCD9335_RX_START;
+ reg = WCD9335_SLIM_PGD_PORT_INT_EN0 + (port_num / 8);
+ } else {
+ port_num = ch->port;
+ reg = WCD9335_SLIM_PGD_PORT_INT_TX_EN0 + (port_num / 8);
+ }
+
+ regmap_read(wcd->if_regmap, reg, &val);
+ if (!(val & BIT(port_num % 8)))
+ regmap_write(wcd->if_regmap, reg,
+ val | BIT(port_num % 8));
+ }
+}
+
+static int wcd9335_codec_enable_slim(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc,
+ int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = snd_soc_component_get_drvdata(comp);
+ struct wcd_slim_codec_dai_data *dai = &wcd->dai[w->shift];
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ wcd9335_codec_enable_int_port(dai, comp);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ kfree(dai->sconfig.chs);
+
+ break;
+ }
+
+ return ret;
+}
+
+static int wcd9335_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ u16 gain_reg;
+ int offset_val = 0;
+ int val = 0;
+
+ switch (w->reg) {
+ case WCD9335_CDC_RX0_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX0_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX1_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX1_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX2_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX2_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX3_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX3_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX4_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX4_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX5_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX5_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX6_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX6_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX7_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX7_RX_VOL_MIX_CTL;
+ break;
+ case WCD9335_CDC_RX8_RX_PATH_MIX_CTL:
+ gain_reg = WCD9335_CDC_RX8_RX_VOL_MIX_CTL;
+ break;
+ default:
+ dev_err(comp->dev, "%s: No gain register avail for %s\n",
+ __func__, w->name);
+ return 0;
+ };
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ val = snd_soc_component_read32(comp, gain_reg);
+ val += offset_val;
+ snd_soc_component_write(comp, gain_reg, val);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ break;
+ };
+
+ return 0;
+}
+
+static u16 wcd9335_interp_get_primary_reg(u16 reg, u16 *ind)
+{
+ u16 prim_int_reg = WCD9335_CDC_RX0_RX_PATH_CTL;
+
+ switch (reg) {
+ case WCD9335_CDC_RX0_RX_PATH_CTL:
+ case WCD9335_CDC_RX0_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX0_RX_PATH_CTL;
+ *ind = 0;
+ break;
+ case WCD9335_CDC_RX1_RX_PATH_CTL:
+ case WCD9335_CDC_RX1_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX1_RX_PATH_CTL;
+ *ind = 1;
+ break;
+ case WCD9335_CDC_RX2_RX_PATH_CTL:
+ case WCD9335_CDC_RX2_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX2_RX_PATH_CTL;
+ *ind = 2;
+ break;
+ case WCD9335_CDC_RX3_RX_PATH_CTL:
+ case WCD9335_CDC_RX3_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX3_RX_PATH_CTL;
+ *ind = 3;
+ break;
+ case WCD9335_CDC_RX4_RX_PATH_CTL:
+ case WCD9335_CDC_RX4_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX4_RX_PATH_CTL;
+ *ind = 4;
+ break;
+ case WCD9335_CDC_RX5_RX_PATH_CTL:
+ case WCD9335_CDC_RX5_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX5_RX_PATH_CTL;
+ *ind = 5;
+ break;
+ case WCD9335_CDC_RX6_RX_PATH_CTL:
+ case WCD9335_CDC_RX6_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX6_RX_PATH_CTL;
+ *ind = 6;
+ break;
+ case WCD9335_CDC_RX7_RX_PATH_CTL:
+ case WCD9335_CDC_RX7_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX7_RX_PATH_CTL;
+ *ind = 7;
+ break;
+ case WCD9335_CDC_RX8_RX_PATH_CTL:
+ case WCD9335_CDC_RX8_RX_PATH_MIX_CTL:
+ prim_int_reg = WCD9335_CDC_RX8_RX_PATH_CTL;
+ *ind = 8;
+ break;
+ };
+
+ return prim_int_reg;
+}
+
+static void wcd9335_codec_hd2_control(struct snd_soc_component *component,
+ u16 prim_int_reg, int event)
+{
+ u16 hd2_scale_reg;
+ u16 hd2_enable_reg = 0;
+
+ if (prim_int_reg == WCD9335_CDC_RX1_RX_PATH_CTL) {
+ hd2_scale_reg = WCD9335_CDC_RX1_RX_PATH_SEC3;
+ hd2_enable_reg = WCD9335_CDC_RX1_RX_PATH_CFG0;
+ }
+ if (prim_int_reg == WCD9335_CDC_RX2_RX_PATH_CTL) {
+ hd2_scale_reg = WCD9335_CDC_RX2_RX_PATH_SEC3;
+ hd2_enable_reg = WCD9335_CDC_RX2_RX_PATH_CFG0;
+ }
+
+ if (hd2_enable_reg && SND_SOC_DAPM_EVENT_ON(event)) {
+ snd_soc_component_update_bits(component, hd2_scale_reg,
+ WCD9335_CDC_RX_PATH_SEC_HD2_ALPHA_MASK,
+ WCD9335_CDC_RX_PATH_SEC_HD2_ALPHA_0P2500);
+ snd_soc_component_update_bits(component, hd2_scale_reg,
+ WCD9335_CDC_RX_PATH_SEC_HD2_SCALE_MASK,
+ WCD9335_CDC_RX_PATH_SEC_HD2_SCALE_2);
+ snd_soc_component_update_bits(component, hd2_enable_reg,
+ WCD9335_CDC_RX_PATH_CFG_HD2_EN_MASK,
+ WCD9335_CDC_RX_PATH_CFG_HD2_ENABLE);
+ }
+
+ if (hd2_enable_reg && SND_SOC_DAPM_EVENT_OFF(event)) {
+ snd_soc_component_update_bits(component, hd2_enable_reg,
+ WCD9335_CDC_RX_PATH_CFG_HD2_EN_MASK,
+ WCD9335_CDC_RX_PATH_CFG_HD2_DISABLE);
+ snd_soc_component_update_bits(component, hd2_scale_reg,
+ WCD9335_CDC_RX_PATH_SEC_HD2_SCALE_MASK,
+ WCD9335_CDC_RX_PATH_SEC_HD2_SCALE_1);
+ snd_soc_component_update_bits(component, hd2_scale_reg,
+ WCD9335_CDC_RX_PATH_SEC_HD2_ALPHA_MASK,
+ WCD9335_CDC_RX_PATH_SEC_HD2_ALPHA_0P0000);
+ }
+}
+
+static int wcd9335_codec_enable_prim_interpolator(
+ struct snd_soc_component *comp,
+ u16 reg, int event)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+ u16 ind = 0;
+ int prim_int_reg = wcd9335_interp_get_primary_reg(reg, &ind);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd->prim_int_users[ind]++;
+ if (wcd->prim_int_users[ind] == 1) {
+ snd_soc_component_update_bits(comp, prim_int_reg,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_ENABLE);
+ wcd9335_codec_hd2_control(comp, prim_int_reg, event);
+ snd_soc_component_update_bits(comp, prim_int_reg,
+ WCD9335_CDC_RX_CLK_EN_MASK,
+ WCD9335_CDC_RX_CLK_ENABLE);
+ }
+
+ if ((reg != prim_int_reg) &&
+ ((snd_soc_component_read32(comp, prim_int_reg)) &
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK))
+ snd_soc_component_update_bits(comp, reg,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_ENABLE);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd->prim_int_users[ind]--;
+ if (wcd->prim_int_users[ind] == 0) {
+ snd_soc_component_update_bits(comp, prim_int_reg,
+ WCD9335_CDC_RX_CLK_EN_MASK,
+ WCD9335_CDC_RX_CLK_DISABLE);
+ snd_soc_component_update_bits(comp, prim_int_reg,
+ WCD9335_CDC_RX_RESET_MASK,
+ WCD9335_CDC_RX_RESET_ENABLE);
+ snd_soc_component_update_bits(comp, prim_int_reg,
+ WCD9335_CDC_RX_RESET_MASK,
+ WCD9335_CDC_RX_RESET_DISABLE);
+ wcd9335_codec_hd2_control(comp, prim_int_reg, event);
+ }
+ break;
+ };
+
+ return 0;
+}
+
+static int wcd9335_config_compander(struct snd_soc_component *component,
+ int interp_n, int event)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ int comp;
+ u16 comp_ctl0_reg, rx_path_cfg0_reg;
+
+ /* EAR does not have compander */
+ if (!interp_n)
+ return 0;
+
+ comp = interp_n - 1;
+ if (!wcd->comp_enabled[comp])
+ return 0;
+
+ comp_ctl0_reg = WCD9335_CDC_COMPANDER1_CTL(comp);
+ rx_path_cfg0_reg = WCD9335_CDC_RX1_RX_PATH_CFG(comp);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ /* Enable Compander Clock */
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_CLK_EN_MASK,
+ WCD9335_CDC_COMPANDER_CLK_ENABLE);
+ /* Reset comander */
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_SOFT_RST_MASK,
+ WCD9335_CDC_COMPANDER_SOFT_RST_ENABLE);
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_SOFT_RST_MASK,
+ WCD9335_CDC_COMPANDER_SOFT_RST_DISABLE);
+ /* Enables DRE in this path */
+ snd_soc_component_update_bits(component, rx_path_cfg0_reg,
+ WCD9335_CDC_RX_PATH_CFG_CMP_EN_MASK,
+ WCD9335_CDC_RX_PATH_CFG_CMP_ENABLE);
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_HALT_MASK,
+ WCD9335_CDC_COMPANDER_HALT);
+ snd_soc_component_update_bits(component, rx_path_cfg0_reg,
+ WCD9335_CDC_RX_PATH_CFG_CMP_EN_MASK,
+ WCD9335_CDC_RX_PATH_CFG_CMP_DISABLE);
+
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_SOFT_RST_MASK,
+ WCD9335_CDC_COMPANDER_SOFT_RST_ENABLE);
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_SOFT_RST_MASK,
+ WCD9335_CDC_COMPANDER_SOFT_RST_DISABLE);
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_CLK_EN_MASK,
+ WCD9335_CDC_COMPANDER_CLK_DISABLE);
+ snd_soc_component_update_bits(component, comp_ctl0_reg,
+ WCD9335_CDC_COMPANDER_HALT_MASK,
+ WCD9335_CDC_COMPANDER_NOHALT);
+ }
+
+ return 0;
+}
+
+static int wcd9335_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ u16 gain_reg;
+ u16 reg;
+ int val;
+ int offset_val = 0;
+
+ if (!(strcmp(w->name, "RX INT0 INTERP"))) {
+ reg = WCD9335_CDC_RX0_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX0_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT1 INTERP"))) {
+ reg = WCD9335_CDC_RX1_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX1_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT2 INTERP"))) {
+ reg = WCD9335_CDC_RX2_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX2_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT3 INTERP"))) {
+ reg = WCD9335_CDC_RX3_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX3_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT4 INTERP"))) {
+ reg = WCD9335_CDC_RX4_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX4_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT5 INTERP"))) {
+ reg = WCD9335_CDC_RX5_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX5_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT6 INTERP"))) {
+ reg = WCD9335_CDC_RX6_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX6_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT7 INTERP"))) {
+ reg = WCD9335_CDC_RX7_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX7_RX_VOL_CTL;
+ } else if (!(strcmp(w->name, "RX INT8 INTERP"))) {
+ reg = WCD9335_CDC_RX8_RX_PATH_CTL;
+ gain_reg = WCD9335_CDC_RX8_RX_VOL_CTL;
+ } else {
+ dev_err(comp->dev, "%s: Interpolator reg not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Reset if needed */
+ wcd9335_codec_enable_prim_interpolator(comp, reg, event);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ wcd9335_config_compander(comp, w->shift, event);
+ val = snd_soc_component_read32(comp, gain_reg);
+ val += offset_val;
+ snd_soc_component_write(comp, gain_reg, val);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd9335_config_compander(comp, w->shift, event);
+ wcd9335_codec_enable_prim_interpolator(comp, reg, event);
+ break;
+ };
+
+ return 0;
+}
+
+static void wcd9335_codec_hph_mode_gain_opt(struct snd_soc_component *component,
+ u8 gain)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ u8 hph_l_en, hph_r_en;
+ u8 l_val, r_val;
+ u8 hph_pa_status;
+ bool is_hphl_pa, is_hphr_pa;
+
+ hph_pa_status = snd_soc_component_read32(component, WCD9335_ANA_HPH);
+ is_hphl_pa = hph_pa_status >> 7;
+ is_hphr_pa = (hph_pa_status & 0x40) >> 6;
+
+ hph_l_en = snd_soc_component_read32(component, WCD9335_HPH_L_EN);
+ hph_r_en = snd_soc_component_read32(component, WCD9335_HPH_R_EN);
+
+ l_val = (hph_l_en & 0xC0) | 0x20 | gain;
+ r_val = (hph_r_en & 0xC0) | 0x20 | gain;
+
+ /*
+ * Set HPH_L & HPH_R gain source selection to REGISTER
+ * for better click and pop only if corresponding PAs are
+ * not enabled. Also cache the values of the HPHL/R
+ * PA gains to be applied after PAs are enabled
+ */
+ if ((l_val != hph_l_en) && !is_hphl_pa) {
+ snd_soc_component_write(component, WCD9335_HPH_L_EN, l_val);
+ wcd->hph_l_gain = hph_l_en & 0x1F;
+ }
+
+ if ((r_val != hph_r_en) && !is_hphr_pa) {
+ snd_soc_component_write(component, WCD9335_HPH_R_EN, r_val);
+ wcd->hph_r_gain = hph_r_en & 0x1F;
+ }
+}
+
+static void wcd9335_codec_hph_lohifi_config(struct snd_soc_component *comp,
+ int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ snd_soc_component_update_bits(comp, WCD9335_RX_BIAS_HPH_PA,
+ WCD9335_RX_BIAS_HPH_PA_AMP_5_UA_MASK,
+ 0x06);
+ snd_soc_component_update_bits(comp,
+ WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2,
+ 0xF0, 0x40);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_CNP_WG_CTL,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_MASK,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_1000);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_ENABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL1,
+ WCD9335_HPH_PA_GM3_IB_SCALE_MASK,
+ 0x0C);
+ wcd9335_codec_hph_mode_gain_opt(comp, 0x11);
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_DISABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_CNP_WG_CTL,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_MASK,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_500);
+ snd_soc_component_write(comp, WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2,
+ 0x8A);
+ snd_soc_component_update_bits(comp, WCD9335_RX_BIAS_HPH_PA,
+ WCD9335_RX_BIAS_HPH_PA_AMP_5_UA_MASK,
+ 0x0A);
+ }
+}
+
+static void wcd9335_codec_hph_lp_config(struct snd_soc_component *comp,
+ int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL1,
+ WCD9335_HPH_PA_GM3_IB_SCALE_MASK,
+ 0x0C);
+ wcd9335_codec_hph_mode_gain_opt(comp, 0x10);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_CNP_WG_CTL,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_MASK,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_1000);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_ENABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_PSRREH_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_PSRREH_ENABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_HPH_PSRR_ENH_MASK,
+ WCD9335_HPH_PA_CTL2_HPH_PSRR_ENABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_RDAC_LDO_CTL,
+ WCD9335_HPH_RDAC_N1P65_LD_OUTCTL_MASK,
+ WCD9335_HPH_RDAC_N1P65_LD_OUTCTL_V_N1P60);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_RDAC_LDO_CTL,
+ WCD9335_HPH_RDAC_1P65_LD_OUTCTL_MASK,
+ WCD9335_HPH_RDAC_1P65_LD_OUTCTL_V_N1P60);
+ snd_soc_component_update_bits(comp,
+ WCD9335_RX_BIAS_HPH_RDAC_LDO, 0x0F, 0x01);
+ snd_soc_component_update_bits(comp,
+ WCD9335_RX_BIAS_HPH_RDAC_LDO, 0xF0, 0x10);
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ snd_soc_component_write(comp, WCD9335_RX_BIAS_HPH_RDAC_LDO,
+ 0x88);
+ snd_soc_component_write(comp, WCD9335_HPH_RDAC_LDO_CTL,
+ 0x33);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_HPH_PSRR_ENH_MASK,
+ WCD9335_HPH_PA_CTL2_HPH_PSRR_DISABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_PSRREH_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_PSRREH_DISABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_DISABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_CNP_WG_CTL,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_MASK,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_500);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_R_EN,
+ WCD9335_HPH_CONST_SEL_L_MASK,
+ WCD9335_HPH_CONST_SEL_L_HQ_PATH);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_L_EN,
+ WCD9335_HPH_CONST_SEL_L_MASK,
+ WCD9335_HPH_CONST_SEL_L_HQ_PATH);
+ }
+}
+
+static void wcd9335_codec_hph_hifi_config(struct snd_soc_component *comp,
+ int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ snd_soc_component_update_bits(comp, WCD9335_HPH_CNP_WG_CTL,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_MASK,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_1000);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_ENABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL1,
+ WCD9335_HPH_PA_GM3_IB_SCALE_MASK,
+ 0x0C);
+ wcd9335_codec_hph_mode_gain_opt(comp, 0x11);
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ snd_soc_component_update_bits(comp, WCD9335_HPH_PA_CTL2,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_MASK,
+ WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_DISABLE);
+ snd_soc_component_update_bits(comp, WCD9335_HPH_CNP_WG_CTL,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_MASK,
+ WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_500);
+ }
+}
+
+static void wcd9335_codec_hph_mode_config(struct snd_soc_component *component,
+ int event, int mode)
+{
+ switch (mode) {
+ case CLS_H_LP:
+ wcd9335_codec_hph_lp_config(component, event);
+ break;
+ case CLS_H_LOHIFI:
+ wcd9335_codec_hph_lohifi_config(component, event);
+ break;
+ case CLS_H_HIFI:
+ wcd9335_codec_hph_hifi_config(component, event);
+ break;
+ }
+}
+
+static int wcd9335_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc,
+ int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+ int hph_mode = wcd->hph_mode;
+ u8 dem_inp;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Read DEM INP Select */
+ dem_inp = snd_soc_component_read32(comp,
+ WCD9335_CDC_RX1_RX_PATH_SEC0) & 0x03;
+ if (((hph_mode == CLS_H_HIFI) || (hph_mode == CLS_H_LOHIFI) ||
+ (hph_mode == CLS_H_LP)) && (dem_inp != 0x01)) {
+ dev_err(comp->dev, "Incorrect DEM Input\n");
+ return -EINVAL;
+ }
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_HPHL,
+ ((hph_mode == CLS_H_LOHIFI) ?
+ CLS_H_HIFI : hph_mode));
+
+ wcd9335_codec_hph_mode_config(comp, event, hph_mode);
+
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(1000, 1100);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* 1000us required as per HW requirement */
+ usleep_range(1000, 1100);
+
+ if (!(wcd_clsh_ctrl_get_state(wcd->clsh_ctrl) &
+ WCD_CLSH_STATE_HPHR))
+ wcd9335_codec_hph_mode_config(comp, event, hph_mode);
+
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_HPHL,
+ ((hph_mode == CLS_H_LOHIFI) ?
+ CLS_H_HIFI : hph_mode));
+ break;
+ };
+
+ return ret;
+}
+
+static int wcd9335_codec_lineout_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_LO, CLS_AB);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_LO, CLS_AB);
+ break;
+ }
+
+ return 0;
+}
+
+static int wcd9335_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_EAR, CLS_H_NORMAL);
+
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_EAR, CLS_H_NORMAL);
+ break;
+ };
+
+ return ret;
+}
+
+static void wcd9335_codec_hph_post_pa_config(struct wcd9335_codec *wcd,
+ int mode, int event)
+{
+ u8 scale_val = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ switch (mode) {
+ case CLS_H_HIFI:
+ scale_val = 0x3;
+ break;
+ case CLS_H_LOHIFI:
+ scale_val = 0x1;
+ break;
+ }
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ scale_val = 0x6;
+ break;
+ }
+
+ if (scale_val)
+ snd_soc_component_update_bits(wcd->component,
+ WCD9335_HPH_PA_CTL1,
+ WCD9335_HPH_PA_GM3_IB_SCALE_MASK,
+ scale_val << 1);
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ if (wcd->comp_enabled[COMPANDER_1] ||
+ wcd->comp_enabled[COMPANDER_2]) {
+ /* GAIN Source Selection */
+ snd_soc_component_update_bits(wcd->component,
+ WCD9335_HPH_L_EN,
+ WCD9335_HPH_GAIN_SRC_SEL_MASK,
+ WCD9335_HPH_GAIN_SRC_SEL_COMPANDER);
+ snd_soc_component_update_bits(wcd->component,
+ WCD9335_HPH_R_EN,
+ WCD9335_HPH_GAIN_SRC_SEL_MASK,
+ WCD9335_HPH_GAIN_SRC_SEL_COMPANDER);
+ snd_soc_component_update_bits(wcd->component,
+ WCD9335_HPH_AUTO_CHOP,
+ WCD9335_HPH_AUTO_CHOP_MASK,
+ WCD9335_HPH_AUTO_CHOP_FORCE_ENABLE);
+ }
+ snd_soc_component_update_bits(wcd->component,
+ WCD9335_HPH_L_EN,
+ WCD9335_HPH_PA_GAIN_MASK,
+ wcd->hph_l_gain);
+ snd_soc_component_update_bits(wcd->component,
+ WCD9335_HPH_R_EN,
+ WCD9335_HPH_PA_GAIN_MASK,
+ wcd->hph_r_gain);
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event))
+ snd_soc_component_update_bits(wcd->component,
+ WCD9335_HPH_AUTO_CHOP,
+ WCD9335_HPH_AUTO_CHOP_MASK,
+ WCD9335_HPH_AUTO_CHOP_ENABLE_BY_CMPDR_GAIN);
+}
+
+static int wcd9335_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc,
+ int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+ int hph_mode = wcd->hph_mode;
+ u8 dem_inp;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+
+ /* Read DEM INP Select */
+ dem_inp = snd_soc_component_read32(comp,
+ WCD9335_CDC_RX2_RX_PATH_SEC0) &
+ WCD9335_CDC_RX_PATH_DEM_INP_SEL_MASK;
+ if (((hph_mode == CLS_H_HIFI) || (hph_mode == CLS_H_LOHIFI) ||
+ (hph_mode == CLS_H_LP)) && (dem_inp != 0x01)) {
+ dev_err(comp->dev, "DEM Input not set correctly, hph_mode: %d\n",
+ hph_mode);
+ return -EINVAL;
+ }
+
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl,
+ WCD_CLSH_EVENT_PRE_DAC,
+ WCD_CLSH_STATE_HPHR,
+ ((hph_mode == CLS_H_LOHIFI) ?
+ CLS_H_HIFI : hph_mode));
+
+ wcd9335_codec_hph_mode_config(comp, event, hph_mode);
+
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* 1000us required as per HW requirement */
+ usleep_range(1000, 1100);
+
+ if (!(wcd_clsh_ctrl_get_state(wcd->clsh_ctrl) &
+ WCD_CLSH_STATE_HPHL))
+ wcd9335_codec_hph_mode_config(comp, event, hph_mode);
+
+ wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_POST_PA,
+ WCD_CLSH_STATE_HPHR, ((hph_mode == CLS_H_LOHIFI) ?
+ CLS_H_HIFI : hph_mode));
+ break;
+ };
+
+ return ret;
+}
+
+static int wcd9335_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc,
+ int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+ int hph_mode = wcd->hph_mode;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * 7ms sleep is required after PA is enabled as per
+ * HW requirement
+ */
+ usleep_range(7000, 7100);
+
+ wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event);
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_RX1_RX_PATH_CTL,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+
+ /* Remove mix path mute if it is enabled */
+ if ((snd_soc_component_read32(comp,
+ WCD9335_CDC_RX1_RX_PATH_MIX_CTL)) &
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK)
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_RX1_RX_PATH_MIX_CTL,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* 5ms sleep is required after PA is disabled as per
+ * HW requirement
+ */
+ usleep_range(5000, 5500);
+ break;
+ };
+
+ return ret;
+}
+
+static int wcd9335_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc,
+ int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ int vol_reg = 0, mix_vol_reg = 0;
+ int ret = 0;
+
+ if (w->reg == WCD9335_ANA_LO_1_2) {
+ if (w->shift == 7) {
+ vol_reg = WCD9335_CDC_RX3_RX_PATH_CTL;
+ mix_vol_reg = WCD9335_CDC_RX3_RX_PATH_MIX_CTL;
+ } else if (w->shift == 6) {
+ vol_reg = WCD9335_CDC_RX4_RX_PATH_CTL;
+ mix_vol_reg = WCD9335_CDC_RX4_RX_PATH_MIX_CTL;
+ }
+ } else if (w->reg == WCD9335_ANA_LO_3_4) {
+ if (w->shift == 7) {
+ vol_reg = WCD9335_CDC_RX5_RX_PATH_CTL;
+ mix_vol_reg = WCD9335_CDC_RX5_RX_PATH_MIX_CTL;
+ } else if (w->shift == 6) {
+ vol_reg = WCD9335_CDC_RX6_RX_PATH_CTL;
+ mix_vol_reg = WCD9335_CDC_RX6_RX_PATH_MIX_CTL;
+ }
+ } else {
+ dev_err(comp->dev, "Error enabling lineout PA\n");
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* 5ms sleep is required after PA is enabled as per
+ * HW requirement
+ */
+ usleep_range(5000, 5500);
+ snd_soc_component_update_bits(comp, vol_reg,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+
+ /* Remove mix path mute if it is enabled */
+ if ((snd_soc_component_read32(comp, mix_vol_reg)) &
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK)
+ snd_soc_component_update_bits(comp, mix_vol_reg,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* 5ms sleep is required after PA is disabled as per
+ * HW requirement
+ */
+ usleep_range(5000, 5500);
+ break;
+ };
+
+ return ret;
+}
+
+static void wcd9335_codec_init_flyback(struct snd_soc_component *component)
+{
+ snd_soc_component_update_bits(component, WCD9335_HPH_L_EN,
+ WCD9335_HPH_CONST_SEL_L_MASK,
+ WCD9335_HPH_CONST_SEL_L_BYPASS);
+ snd_soc_component_update_bits(component, WCD9335_HPH_R_EN,
+ WCD9335_HPH_CONST_SEL_L_MASK,
+ WCD9335_HPH_CONST_SEL_L_BYPASS);
+ snd_soc_component_update_bits(component, WCD9335_RX_BIAS_FLYB_BUFF,
+ WCD9335_RX_BIAS_FLYB_VPOS_5_UA_MASK,
+ WCD9335_RX_BIAS_FLYB_I_0P0_UA);
+ snd_soc_component_update_bits(component, WCD9335_RX_BIAS_FLYB_BUFF,
+ WCD9335_RX_BIAS_FLYB_VNEG_5_UA_MASK,
+ WCD9335_RX_BIAS_FLYB_I_0P0_UA);
+}
+
+static int wcd9335_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ wcd->rx_bias_count++;
+ if (wcd->rx_bias_count == 1) {
+ wcd9335_codec_init_flyback(comp);
+ snd_soc_component_update_bits(comp,
+ WCD9335_ANA_RX_SUPPLIES,
+ WCD9335_ANA_RX_BIAS_ENABLE_MASK,
+ WCD9335_ANA_RX_BIAS_ENABLE);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ wcd->rx_bias_count--;
+ if (!wcd->rx_bias_count)
+ snd_soc_component_update_bits(comp,
+ WCD9335_ANA_RX_SUPPLIES,
+ WCD9335_ANA_RX_BIAS_ENABLE_MASK,
+ WCD9335_ANA_RX_BIAS_DISABLE);
+ break;
+ };
+
+ return 0;
+}
+
+static int wcd9335_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+ int hph_mode = wcd->hph_mode;
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * 7ms sleep is required after PA is enabled as per
+ * HW requirement
+ */
+ usleep_range(7000, 7100);
+ wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event);
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_RX2_RX_PATH_CTL,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+ /* Remove mix path mute if it is enabled */
+ if ((snd_soc_component_read32(comp,
+ WCD9335_CDC_RX2_RX_PATH_MIX_CTL)) &
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK)
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_RX2_RX_PATH_MIX_CTL,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ wcd9335_codec_hph_post_pa_config(wcd, hph_mode, event);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* 5ms sleep is required after PA is disabled as per
+ * HW requirement
+ */
+ usleep_range(5000, 5500);
+ break;
+ };
+
+ return ret;
+}
+
+static int wcd9335_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+ int ret = 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* 5ms sleep is required after PA is enabled as per
+ * HW requirement
+ */
+ usleep_range(5000, 5500);
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_RX0_RX_PATH_CTL,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+ /* Remove mix path mute if it is enabled */
+ if ((snd_soc_component_read32(comp,
+ WCD9335_CDC_RX0_RX_PATH_MIX_CTL)) &
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK)
+ snd_soc_component_update_bits(comp,
+ WCD9335_CDC_RX0_RX_PATH_MIX_CTL,
+ WCD9335_CDC_RX_PGA_MUTE_EN_MASK,
+ WCD9335_CDC_RX_PGA_MUTE_DISABLE);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* 5ms sleep is required after PA is disabled as per
+ * HW requirement
+ */
+ usleep_range(5000, 5500);
+
+ break;
+ };
+
+ return ret;
+}
+
+static irqreturn_t wcd9335_slimbus_irq(int irq, void *data)
+{
+ struct wcd9335_codec *wcd = data;
+ unsigned long status = 0;
+ int i, j, port_id;
+ unsigned int val, int_val = 0;
+ irqreturn_t ret = IRQ_NONE;
+ bool tx;
+ unsigned short reg = 0;
+
+ for (i = WCD9335_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
+ i <= WCD9335_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
+ regmap_read(wcd->if_regmap, i, &val);
+ status |= ((u32)val << (8 * j));
+ }
+
+ for_each_set_bit(j, &status, 32) {
+ tx = (j >= 16 ? true : false);
+ port_id = (tx ? j - 16 : j);
+ regmap_read(wcd->if_regmap,
+ WCD9335_SLIM_PGD_PORT_INT_RX_SOURCE0 + j, &val);
+ if (val) {
+ if (!tx)
+ reg = WCD9335_SLIM_PGD_PORT_INT_EN0 +
+ (port_id / 8);
+ else
+ reg = WCD9335_SLIM_PGD_PORT_INT_TX_EN0 +
+ (port_id / 8);
+ regmap_read(
+ wcd->if_regmap, reg, &int_val);
+ /*
+ * Ignore interrupts for ports for which the
+ * interrupts are not specifically enabled.
+ */
+ if (!(int_val & (1 << (port_id % 8))))
+ continue;
+ }
+
+ if (val & WCD9335_SLIM_IRQ_OVERFLOW)
+ dev_err_ratelimited(wcd->dev,
+ "%s: overflow error on %s port %d, value %x\n",
+ __func__, (tx ? "TX" : "RX"), port_id, val);
+
+ if (val & WCD9335_SLIM_IRQ_UNDERFLOW)
+ dev_err_ratelimited(wcd->dev,
+ "%s: underflow error on %s port %d, value %x\n",
+ __func__, (tx ? "TX" : "RX"), port_id, val);
+
+ if ((val & WCD9335_SLIM_IRQ_OVERFLOW) ||
+ (val & WCD9335_SLIM_IRQ_UNDERFLOW)) {
+ if (!tx)
+ reg = WCD9335_SLIM_PGD_PORT_INT_EN0 +
+ (port_id / 8);
+ else
+ reg = WCD9335_SLIM_PGD_PORT_INT_TX_EN0 +
+ (port_id / 8);
+ regmap_read(
+ wcd->if_regmap, reg, &int_val);
+ if (int_val & (1 << (port_id % 8))) {
+ int_val = int_val ^ (1 << (port_id % 8));
+ regmap_write(wcd->if_regmap,
+ reg, int_val);
+ }
+ }
+
+ regmap_write(wcd->if_regmap,
+ WCD9335_SLIM_PGD_PORT_INT_CLR_RX_0 + (j / 8),
+ BIT(j % 8));
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static struct wcd9335_irq wcd9335_irqs[] = {
+ {
+ .irq = WCD9335_IRQ_SLIMBUS,
+ .handler = wcd9335_slimbus_irq,
+ .name = "SLIM Slave",
+ },
+};
+
+static int wcd9335_setup_irqs(struct wcd9335_codec *wcd)
+{
+ int irq, ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(wcd9335_irqs); i++) {
+ irq = regmap_irq_get_virq(wcd->irq_data, wcd9335_irqs[i].irq);
+ if (irq < 0) {
+ dev_err(wcd->dev, "Failed to get %s\n",
+ wcd9335_irqs[i].name);
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(wcd->dev, irq, NULL,
+ wcd9335_irqs[i].handler,
+ IRQF_TRIGGER_RISING,
+ wcd9335_irqs[i].name, wcd);
+ if (ret) {
+ dev_err(wcd->dev, "Failed to request %s\n",
+ wcd9335_irqs[i].name);
+ return ret;
+ }
+ }
+
+ /* enable interrupts on all slave ports */
+ for (i = 0; i < WCD9335_SLIM_NUM_PORT_REG; i++)
+ regmap_write(wcd->if_regmap, WCD9335_SLIM_PGD_PORT_INT_EN0 + i,
+ 0xFF);
+
+ return ret;
+}
+
+static void wcd9335_cdc_sido_ccl_enable(struct wcd9335_codec *wcd,
+ bool ccl_flag)
+{
+ struct snd_soc_component *comp = wcd->component;
+
+ if (ccl_flag) {
+ if (++wcd->sido_ccl_cnt == 1)
+ snd_soc_component_write(comp, WCD9335_SIDO_SIDO_CCL_10,
+ WCD9335_SIDO_SIDO_CCL_DEF_VALUE);
+ } else {
+ if (wcd->sido_ccl_cnt == 0) {
+ dev_err(wcd->dev, "sido_ccl already disabled\n");
+ return;
+ }
+ if (--wcd->sido_ccl_cnt == 0)
+ snd_soc_component_write(comp, WCD9335_SIDO_SIDO_CCL_10,
+ WCD9335_SIDO_SIDO_CCL_10_ICHARG_PWR_SEL_C320FF);
+ }
+}
+
+static int wcd9335_enable_master_bias(struct wcd9335_codec *wcd)
+{
+ wcd->master_bias_users++;
+ if (wcd->master_bias_users == 1) {
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_BIAS,
+ WCD9335_ANA_BIAS_EN_MASK,
+ WCD9335_ANA_BIAS_ENABLE);
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_BIAS,
+ WCD9335_ANA_BIAS_PRECHRG_EN_MASK,
+ WCD9335_ANA_BIAS_PRECHRG_ENABLE);
+ /*
+ * 1ms delay is required after pre-charge is enabled
+ * as per HW requirement
+ */
+ usleep_range(1000, 1100);
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_BIAS,
+ WCD9335_ANA_BIAS_PRECHRG_EN_MASK,
+ WCD9335_ANA_BIAS_PRECHRG_DISABLE);
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_BIAS,
+ WCD9335_ANA_BIAS_PRECHRG_CTL_MODE,
+ WCD9335_ANA_BIAS_PRECHRG_CTL_MODE_MANUAL);
+ }
+
+ return 0;
+}
+
+static int wcd9335_enable_mclk(struct wcd9335_codec *wcd)
+{
+ /* Enable mclk requires master bias to be enabled first */
+ if (wcd->master_bias_users <= 0)
+ return -EINVAL;
+
+ if (((wcd->clk_mclk_users == 0) && (wcd->clk_type == WCD_CLK_MCLK)) ||
+ ((wcd->clk_mclk_users > 0) && (wcd->clk_type != WCD_CLK_MCLK))) {
+ dev_err(wcd->dev, "Error enabling MCLK, clk_type: %d\n",
+ wcd->clk_type);
+ return -EINVAL;
+ }
+
+ if (++wcd->clk_mclk_users == 1) {
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_CLK_TOP,
+ WCD9335_ANA_CLK_EXT_CLKBUF_EN_MASK,
+ WCD9335_ANA_CLK_EXT_CLKBUF_ENABLE);
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_CLK_TOP,
+ WCD9335_ANA_CLK_MCLK_SRC_MASK,
+ WCD9335_ANA_CLK_MCLK_SRC_EXTERNAL);
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_CLK_TOP,
+ WCD9335_ANA_CLK_MCLK_EN_MASK,
+ WCD9335_ANA_CLK_MCLK_ENABLE);
+ regmap_update_bits(wcd->regmap,
+ WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
+ WCD9335_CDC_CLK_RST_CTRL_FS_CNT_EN_MASK,
+ WCD9335_CDC_CLK_RST_CTRL_FS_CNT_ENABLE);
+ regmap_update_bits(wcd->regmap,
+ WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL,
+ WCD9335_CDC_CLK_RST_CTRL_MCLK_EN_MASK,
+ WCD9335_CDC_CLK_RST_CTRL_MCLK_ENABLE);
+ /*
+ * 10us sleep is required after clock is enabled
+ * as per HW requirement
+ */
+ usleep_range(10, 15);
+ }
+
+ wcd->clk_type = WCD_CLK_MCLK;
+
+ return 0;
+}
+
+static int wcd9335_disable_mclk(struct wcd9335_codec *wcd)
+{
+ if (wcd->clk_mclk_users <= 0)
+ return -EINVAL;
+
+ if (--wcd->clk_mclk_users == 0) {
+ if (wcd->clk_rco_users > 0) {
+ /* MCLK to RCO switch */
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_CLK_TOP,
+ WCD9335_ANA_CLK_MCLK_SRC_MASK,
+ WCD9335_ANA_CLK_MCLK_SRC_RCO);
+ wcd->clk_type = WCD_CLK_RCO;
+ } else {
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_CLK_TOP,
+ WCD9335_ANA_CLK_MCLK_EN_MASK,
+ WCD9335_ANA_CLK_MCLK_DISABLE);
+ wcd->clk_type = WCD_CLK_OFF;
+ }
+
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_CLK_TOP,
+ WCD9335_ANA_CLK_EXT_CLKBUF_EN_MASK,
+ WCD9335_ANA_CLK_EXT_CLKBUF_DISABLE);
+ }
+
+ return 0;
+}
+
+static int wcd9335_disable_master_bias(struct wcd9335_codec *wcd)
+{
+ if (wcd->master_bias_users <= 0)
+ return -EINVAL;
+
+ wcd->master_bias_users--;
+ if (wcd->master_bias_users == 0) {
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_BIAS,
+ WCD9335_ANA_BIAS_EN_MASK,
+ WCD9335_ANA_BIAS_DISABLE);
+ regmap_update_bits(wcd->regmap, WCD9335_ANA_BIAS,
+ WCD9335_ANA_BIAS_PRECHRG_CTL_MODE,
+ WCD9335_ANA_BIAS_PRECHRG_CTL_MODE_MANUAL);
+ }
+ return 0;
+}
+
+static int wcd9335_cdc_req_mclk_enable(struct wcd9335_codec *wcd,
+ bool enable)
+{
+ int ret = 0;
+
+ if (enable) {
+ wcd9335_cdc_sido_ccl_enable(wcd, true);
+ ret = clk_prepare_enable(wcd->mclk);
+ if (ret) {
+ dev_err(wcd->dev, "%s: ext clk enable failed\n",
+ __func__);
+ goto err;
+ }
+ /* get BG */
+ wcd9335_enable_master_bias(wcd);
+ /* get MCLK */
+ wcd9335_enable_mclk(wcd);
+
+ } else {
+ /* put MCLK */
+ wcd9335_disable_mclk(wcd);
+ /* put BG */
+ wcd9335_disable_master_bias(wcd);
+ clk_disable_unprepare(wcd->mclk);
+ wcd9335_cdc_sido_ccl_enable(wcd, false);
+ }
+err:
+ return ret;
+}
+
+static void wcd9335_codec_apply_sido_voltage(struct wcd9335_codec *wcd,
+ enum wcd9335_sido_voltage req_mv)
+{
+ struct snd_soc_component *comp = wcd->component;
+ int vout_d_val;
+
+ if (req_mv == wcd->sido_voltage)
+ return;
+
+ /* compute the vout_d step value */
+ vout_d_val = WCD9335_CALCULATE_VOUT_D(req_mv) &
+ WCD9335_ANA_BUCK_VOUT_MASK;
+ snd_soc_component_write(comp, WCD9335_ANA_BUCK_VOUT_D, vout_d_val);
+ snd_soc_component_update_bits(comp, WCD9335_ANA_BUCK_CTL,
+ WCD9335_ANA_BUCK_CTL_RAMP_START_MASK,
+ WCD9335_ANA_BUCK_CTL_RAMP_START_ENABLE);
+
+ /* 1 msec sleep required after SIDO Vout_D voltage change */
+ usleep_range(1000, 1100);
+ wcd->sido_voltage = req_mv;
+ snd_soc_component_update_bits(comp, WCD9335_ANA_BUCK_CTL,
+ WCD9335_ANA_BUCK_CTL_RAMP_START_MASK,
+ WCD9335_ANA_BUCK_CTL_RAMP_START_DISABLE);
+}
+
+static int wcd9335_codec_update_sido_voltage(struct wcd9335_codec *wcd,
+ enum wcd9335_sido_voltage req_mv)
+{
+ int ret = 0;
+
+ /* enable mclk before setting SIDO voltage */
+ ret = wcd9335_cdc_req_mclk_enable(wcd, true);
+ if (ret) {
+ dev_err(wcd->dev, "Ext clk enable failed\n");
+ goto err;
+ }
+
+ wcd9335_codec_apply_sido_voltage(wcd, req_mv);
+ wcd9335_cdc_req_mclk_enable(wcd, false);
+
+err:
+ return ret;
+}
+
+static int _wcd9335_codec_enable_mclk(struct snd_soc_component *component,
+ int enable)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ int ret;
+
+ if (enable) {
+ ret = wcd9335_cdc_req_mclk_enable(wcd, true);
+ if (ret)
+ return ret;
+
+ wcd9335_codec_apply_sido_voltage(wcd,
+ SIDO_VOLTAGE_NOMINAL_MV);
+ } else {
+ wcd9335_codec_update_sido_voltage(wcd,
+ wcd->sido_voltage);
+ wcd9335_cdc_req_mclk_enable(wcd, false);
+ }
+
+ return 0;
+}
+
+static int wcd9335_codec_enable_mclk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kc, int event)
+{
+ struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return _wcd9335_codec_enable_mclk(comp, true);
+ case SND_SOC_DAPM_POST_PMD:
+ return _wcd9335_codec_enable_mclk(comp, false);
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget wcd9335_dapm_widgets[] = {
+ /* TODO SPK1 & SPK2 OUT*/
+ SND_SOC_DAPM_OUTPUT("EAR"),
+ SND_SOC_DAPM_OUTPUT("HPHL"),
+ SND_SOC_DAPM_OUTPUT("HPHR"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT1"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT2"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT3"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT4"),
+ SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
+ AIF1_PB, 0, wcd9335_codec_enable_slim,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_IN_E("AIF2 PB", "AIF2 Playback", 0, SND_SOC_NOPM,
+ AIF2_PB, 0, wcd9335_codec_enable_slim,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_IN_E("AIF3 PB", "AIF3 Playback", 0, SND_SOC_NOPM,
+ AIF3_PB, 0, wcd9335_codec_enable_slim,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_IN_E("AIF4 PB", "AIF4 Playback", 0, SND_SOC_NOPM,
+ AIF4_PB, 0, wcd9335_codec_enable_slim,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("SLIM RX0 MUX", SND_SOC_NOPM, WCD9335_RX0, 0,
+ &slim_rx_mux[WCD9335_RX0]),
+ SND_SOC_DAPM_MUX("SLIM RX1 MUX", SND_SOC_NOPM, WCD9335_RX1, 0,
+ &slim_rx_mux[WCD9335_RX1]),
+ SND_SOC_DAPM_MUX("SLIM RX2 MUX", SND_SOC_NOPM, WCD9335_RX2, 0,
+ &slim_rx_mux[WCD9335_RX2]),
+ SND_SOC_DAPM_MUX("SLIM RX3 MUX", SND_SOC_NOPM, WCD9335_RX3, 0,
+ &slim_rx_mux[WCD9335_RX3]),
+ SND_SOC_DAPM_MUX("SLIM RX4 MUX", SND_SOC_NOPM, WCD9335_RX4, 0,
+ &slim_rx_mux[WCD9335_RX4]),
+ SND_SOC_DAPM_MUX("SLIM RX5 MUX", SND_SOC_NOPM, WCD9335_RX5, 0,
+ &slim_rx_mux[WCD9335_RX5]),
+ SND_SOC_DAPM_MUX("SLIM RX6 MUX", SND_SOC_NOPM, WCD9335_RX6, 0,
+ &slim_rx_mux[WCD9335_RX6]),
+ SND_SOC_DAPM_MUX("SLIM RX7 MUX", SND_SOC_NOPM, WCD9335_RX7, 0,
+ &slim_rx_mux[WCD9335_RX7]),
+ SND_SOC_DAPM_MIXER("SLIM RX0", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SLIM RX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SLIM RX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SLIM RX3", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SLIM RX4", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SLIM RX5", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SLIM RX6", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SLIM RX7", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MUX_E("RX INT0_2 MUX", WCD9335_CDC_RX0_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int0_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT1_2 MUX", WCD9335_CDC_RX1_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int1_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT2_2 MUX", WCD9335_CDC_RX2_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int2_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT3_2 MUX", WCD9335_CDC_RX3_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int3_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT4_2 MUX", WCD9335_CDC_RX4_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int4_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT5_2 MUX", WCD9335_CDC_RX5_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int5_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT6_2 MUX", WCD9335_CDC_RX6_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int6_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT7_2 MUX", WCD9335_CDC_RX7_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int7_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX_E("RX INT8_2 MUX", WCD9335_CDC_RX8_RX_PATH_MIX_CTL,
+ 5, 0, &rx_int8_2_mux, wcd9335_codec_enable_mix_path,
+ SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_MUX("RX INT0_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int0_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT0_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int0_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT0_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int0_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT1_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int1_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT1_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int1_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT1_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int1_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT2_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int2_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT2_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int2_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT2_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int2_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT3_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int3_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT3_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int3_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT3_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int3_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT4_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int4_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT4_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int4_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT4_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int4_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT5_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int5_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT5_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int5_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT5_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int5_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT6_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int6_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT6_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int6_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT6_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int6_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT7_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int7_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT7_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int7_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT7_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int7_1_mix_inp2_mux),
+ SND_SOC_DAPM_MUX("RX INT8_1 MIX1 INP0", SND_SOC_NOPM, 0, 0,
+ &rx_int8_1_mix_inp0_mux),
+ SND_SOC_DAPM_MUX("RX INT8_1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_int8_1_mix_inp1_mux),
+ SND_SOC_DAPM_MUX("RX INT8_1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_int8_1_mix_inp2_mux),
+
+ SND_SOC_DAPM_MIXER("RX INT0_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT0 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT1_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT1 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT2_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT2 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT3_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT3 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT4_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT4 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT5_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT5 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT6_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT6 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT7_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT7 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT8_1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT8 SEC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("RX INT0 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT3 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT4 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT5 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT6 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT7 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("RX INT8 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("RX INT0 DEM MUX", SND_SOC_NOPM, 0, 0,
+ &rx_int0_dem_inp_mux),
+ SND_SOC_DAPM_MUX("RX INT1 DEM MUX", SND_SOC_NOPM, 0, 0,
+ &rx_int1_dem_inp_mux),
+ SND_SOC_DAPM_MUX("RX INT2 DEM MUX", SND_SOC_NOPM, 0, 0,
+ &rx_int2_dem_inp_mux),
+
+ SND_SOC_DAPM_MUX_E("RX INT0 INTERP", SND_SOC_NOPM,
+ INTERP_EAR, 0, &rx_int0_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT1 INTERP", SND_SOC_NOPM,
+ INTERP_HPHL, 0, &rx_int1_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT2 INTERP", SND_SOC_NOPM,
+ INTERP_HPHR, 0, &rx_int2_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT3 INTERP", SND_SOC_NOPM,
+ INTERP_LO1, 0, &rx_int3_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT4 INTERP", SND_SOC_NOPM,
+ INTERP_LO2, 0, &rx_int4_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT5 INTERP", SND_SOC_NOPM,
+ INTERP_LO3, 0, &rx_int5_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT6 INTERP", SND_SOC_NOPM,
+ INTERP_LO4, 0, &rx_int6_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT7 INTERP", SND_SOC_NOPM,
+ INTERP_SPKR1, 0, &rx_int7_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX INT8 INTERP", SND_SOC_NOPM,
+ INTERP_SPKR2, 0, &rx_int8_interp_mux,
+ wcd9335_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_DAC_E("RX INT0 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, wcd9335_codec_ear_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD9335_ANA_HPH,
+ 5, 0, wcd9335_codec_hphl_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD9335_ANA_HPH,
+ 4, 0, wcd9335_codec_hphr_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, wcd9335_codec_lineout_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RX INT4 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, wcd9335_codec_lineout_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RX INT5 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, wcd9335_codec_lineout_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_DAC_E("RX INT6 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, wcd9335_codec_lineout_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("HPHL PA", WCD9335_ANA_HPH, 7, 0, NULL, 0,
+ wcd9335_codec_enable_hphl_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("HPHR PA", WCD9335_ANA_HPH, 6, 0, NULL, 0,
+ wcd9335_codec_enable_hphr_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("EAR PA", WCD9335_ANA_EAR, 7, 0, NULL, 0,
+ wcd9335_codec_enable_ear_pa,
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LINEOUT1 PA", WCD9335_ANA_LO_1_2, 7, 0, NULL, 0,
+ wcd9335_codec_enable_lineout_pa,
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LINEOUT2 PA", WCD9335_ANA_LO_1_2, 6, 0, NULL, 0,
+ wcd9335_codec_enable_lineout_pa,
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LINEOUT3 PA", WCD9335_ANA_LO_3_4, 7, 0, NULL, 0,
+ wcd9335_codec_enable_lineout_pa,
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LINEOUT4 PA", WCD9335_ANA_LO_3_4, 6, 0, NULL, 0,
+ wcd9335_codec_enable_lineout_pa,
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_mclk, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ /* TX */
+ SND_SOC_DAPM_INPUT("AMIC1"),
+ SND_SOC_DAPM_INPUT("AMIC2"),
+ SND_SOC_DAPM_INPUT("AMIC3"),
+ SND_SOC_DAPM_INPUT("AMIC4"),
+ SND_SOC_DAPM_INPUT("AMIC5"),
+ SND_SOC_DAPM_INPUT("AMIC6"),
+
+ SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
+ AIF1_CAP, 0, wcd9335_codec_enable_slim,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("AIF2 CAP", "AIF2 Capture", 0, SND_SOC_NOPM,
+ AIF2_CAP, 0, wcd9335_codec_enable_slim,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT_E("AIF3 CAP", "AIF3 Capture", 0, SND_SOC_NOPM,
+ AIF3_CAP, 0, wcd9335_codec_enable_slim,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("MIC BIAS1", SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS2", SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS3", SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS4", SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_micbias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("ADC1", NULL, WCD9335_ANA_AMIC1, 7, 0,
+ wcd9335_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_ADC_E("ADC2", NULL, WCD9335_ANA_AMIC2, 7, 0,
+ wcd9335_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_ADC_E("ADC3", NULL, WCD9335_ANA_AMIC3, 7, 0,
+ wcd9335_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_ADC_E("ADC4", NULL, WCD9335_ANA_AMIC4, 7, 0,
+ wcd9335_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_ADC_E("ADC5", NULL, WCD9335_ANA_AMIC5, 7, 0,
+ wcd9335_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_ADC_E("ADC6", NULL, WCD9335_ANA_AMIC6, 7, 0,
+ wcd9335_codec_enable_adc, SND_SOC_DAPM_PRE_PMU),
+
+ /* Digital Mic Inputs */
+ SND_SOC_DAPM_ADC_E("DMIC0", NULL, SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 0, 0,
+ wcd9335_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("DMIC MUX0", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux0),
+ SND_SOC_DAPM_MUX("DMIC MUX1", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux1),
+ SND_SOC_DAPM_MUX("DMIC MUX2", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux2),
+ SND_SOC_DAPM_MUX("DMIC MUX3", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux3),
+ SND_SOC_DAPM_MUX("DMIC MUX4", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux4),
+ SND_SOC_DAPM_MUX("DMIC MUX5", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux5),
+ SND_SOC_DAPM_MUX("DMIC MUX6", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux6),
+ SND_SOC_DAPM_MUX("DMIC MUX7", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux7),
+ SND_SOC_DAPM_MUX("DMIC MUX8", SND_SOC_NOPM, 0, 0,
+ &tx_dmic_mux8),
+
+ SND_SOC_DAPM_MUX("AMIC MUX0", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux0),
+ SND_SOC_DAPM_MUX("AMIC MUX1", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux1),
+ SND_SOC_DAPM_MUX("AMIC MUX2", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux2),
+ SND_SOC_DAPM_MUX("AMIC MUX3", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux3),
+ SND_SOC_DAPM_MUX("AMIC MUX4", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux4),
+ SND_SOC_DAPM_MUX("AMIC MUX5", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux5),
+ SND_SOC_DAPM_MUX("AMIC MUX6", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux6),
+ SND_SOC_DAPM_MUX("AMIC MUX7", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux7),
+ SND_SOC_DAPM_MUX("AMIC MUX8", SND_SOC_NOPM, 0, 0,
+ &tx_amic_mux8),
+
+ SND_SOC_DAPM_MIXER("AIF1_CAP Mixer", SND_SOC_NOPM, AIF1_CAP, 0,
+ aif1_cap_mixer, ARRAY_SIZE(aif1_cap_mixer)),
+
+ SND_SOC_DAPM_MIXER("AIF2_CAP Mixer", SND_SOC_NOPM, AIF2_CAP, 0,
+ aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)),
+
+ SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0,
+ aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)),
+
+ SND_SOC_DAPM_MUX("SLIM TX0 MUX", SND_SOC_NOPM, WCD9335_TX0, 0,
+ &sb_tx0_mux),
+ SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, WCD9335_TX1, 0,
+ &sb_tx1_mux),
+ SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, WCD9335_TX2, 0,
+ &sb_tx2_mux),
+ SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, WCD9335_TX3, 0,
+ &sb_tx3_mux),
+ SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, WCD9335_TX4, 0,
+ &sb_tx4_mux),
+ SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, WCD9335_TX5, 0,
+ &sb_tx5_mux),
+ SND_SOC_DAPM_MUX("SLIM TX6 MUX", SND_SOC_NOPM, WCD9335_TX6, 0,
+ &sb_tx6_mux),
+ SND_SOC_DAPM_MUX("SLIM TX7 MUX", SND_SOC_NOPM, WCD9335_TX7, 0,
+ &sb_tx7_mux),
+ SND_SOC_DAPM_MUX("SLIM TX8 MUX", SND_SOC_NOPM, WCD9335_TX8, 0,
+ &sb_tx8_mux),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX0", WCD9335_CDC_TX0_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux0, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX1", WCD9335_CDC_TX1_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux1, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX2", WCD9335_CDC_TX2_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux2, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX3", WCD9335_CDC_TX3_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux3, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX4", WCD9335_CDC_TX4_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux4, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX5", WCD9335_CDC_TX5_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux5, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX6", WCD9335_CDC_TX6_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux6, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX7", WCD9335_CDC_TX7_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux7, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("ADC MUX8", WCD9335_CDC_TX8_TX_PATH_CTL, 5, 0,
+ &tx_adc_mux8, wcd9335_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+};
+
+static void wcd9335_enable_sido_buck(struct snd_soc_component *component)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+
+ snd_soc_component_update_bits(component, WCD9335_ANA_RCO,
+ WCD9335_ANA_RCO_BG_EN_MASK,
+ WCD9335_ANA_RCO_BG_ENABLE);
+ snd_soc_component_update_bits(component, WCD9335_ANA_BUCK_CTL,
+ WCD9335_ANA_BUCK_CTL_VOUT_D_IREF_MASK,
+ WCD9335_ANA_BUCK_CTL_VOUT_D_IREF_EXT);
+ /* 100us sleep needed after IREF settings */
+ usleep_range(100, 110);
+ snd_soc_component_update_bits(component, WCD9335_ANA_BUCK_CTL,
+ WCD9335_ANA_BUCK_CTL_VOUT_D_VREF_MASK,
+ WCD9335_ANA_BUCK_CTL_VOUT_D_VREF_EXT);
+ /* 100us sleep needed after VREF settings */
+ usleep_range(100, 110);
+ wcd->sido_input_src = SIDO_SOURCE_RCO_BG;
+}
+
+static int wcd9335_enable_efuse_sensing(struct snd_soc_component *comp)
+{
+ _wcd9335_codec_enable_mclk(comp, true);
+ snd_soc_component_update_bits(comp,
+ WCD9335_CHIP_TIER_CTRL_EFUSE_CTL,
+ WCD9335_CHIP_TIER_CTRL_EFUSE_EN_MASK,
+ WCD9335_CHIP_TIER_CTRL_EFUSE_ENABLE);
+ /*
+ * 5ms sleep required after enabling efuse control
+ * before checking the status.
+ */
+ usleep_range(5000, 5500);
+
+ if (!(snd_soc_component_read32(comp,
+ WCD9335_CHIP_TIER_CTRL_EFUSE_STATUS) &
+ WCD9335_CHIP_TIER_CTRL_EFUSE_EN_MASK))
+ WARN(1, "%s: Efuse sense is not complete\n", __func__);
+
+ wcd9335_enable_sido_buck(comp);
+ _wcd9335_codec_enable_mclk(comp, false);
+
+ return 0;
+}
+
+static void wcd9335_codec_init(struct snd_soc_component *component)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ int i;
+
+ /* ungate MCLK and set clk rate */
+ regmap_update_bits(wcd->regmap, WCD9335_CODEC_RPM_CLK_GATE,
+ WCD9335_CODEC_RPM_CLK_GATE_MCLK_GATE_MASK, 0);
+
+ regmap_update_bits(wcd->regmap, WCD9335_CODEC_RPM_CLK_MCLK_CFG,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ);
+
+ for (i = 0; i < ARRAY_SIZE(wcd9335_codec_reg_init); i++)
+ snd_soc_component_update_bits(component,
+ wcd9335_codec_reg_init[i].reg,
+ wcd9335_codec_reg_init[i].mask,
+ wcd9335_codec_reg_init[i].val);
+
+ wcd9335_enable_efuse_sensing(component);
+}
+
+static int wcd9335_codec_probe(struct snd_soc_component *component)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
+ int i;
+
+ snd_soc_component_init_regmap(component, wcd->regmap);
+ /* Class-H Init*/
+ wcd->clsh_ctrl = wcd_clsh_ctrl_alloc(component, wcd->version);
+ if (IS_ERR(wcd->clsh_ctrl))
+ return PTR_ERR(wcd->clsh_ctrl);
+
+ /* Default HPH Mode to Class-H HiFi */
+ wcd->hph_mode = CLS_H_HIFI;
+ wcd->component = component;
+
+ wcd9335_codec_init(component);
+
+ for (i = 0; i < NUM_CODEC_DAIS; i++)
+ INIT_LIST_HEAD(&wcd->dai[i].slim_ch_list);
+
+ return wcd9335_setup_irqs(wcd);
+}
+
+static void wcd9335_codec_remove(struct snd_soc_component *comp)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+
+ wcd_clsh_ctrl_free(wcd->clsh_ctrl);
+ free_irq(regmap_irq_get_virq(wcd->irq_data, WCD9335_IRQ_SLIMBUS), wcd);
+}
+
+static int wcd9335_codec_set_sysclk(struct snd_soc_component *comp,
+ int clk_id, int source,
+ unsigned int freq, int dir)
+{
+ struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
+
+ wcd->mclk_rate = freq;
+
+ if (wcd->mclk_rate == WCD9335_MCLK_CLK_12P288MHZ)
+ snd_soc_component_update_bits(comp,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG_12P288MHZ);
+ else if (wcd->mclk_rate == WCD9335_MCLK_CLK_9P6MHZ)
+ snd_soc_component_update_bits(comp,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK,
+ WCD9335_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ);
+
+ return clk_set_rate(wcd->mclk, freq);
+}
+
+static const struct snd_soc_component_driver wcd9335_component_drv = {
+ .probe = wcd9335_codec_probe,
+ .remove = wcd9335_codec_remove,
+ .set_sysclk = wcd9335_codec_set_sysclk,
+ .controls = wcd9335_snd_controls,
+ .num_controls = ARRAY_SIZE(wcd9335_snd_controls),
+ .dapm_widgets = wcd9335_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wcd9335_dapm_widgets),
+ .dapm_routes = wcd9335_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wcd9335_audio_map),
+};
+
+static int wcd9335_probe(struct wcd9335_codec *wcd)
+{
+ struct device *dev = wcd->dev;
+
+ memcpy(wcd->rx_chs, wcd9335_rx_chs, sizeof(wcd9335_rx_chs));
+ memcpy(wcd->tx_chs, wcd9335_tx_chs, sizeof(wcd9335_tx_chs));
+
+ wcd->sido_input_src = SIDO_SOURCE_INTERNAL;
+ wcd->sido_voltage = SIDO_VOLTAGE_NOMINAL_MV;
+
+ return devm_snd_soc_register_component(dev, &wcd9335_component_drv,
+ wcd9335_slim_dais,
+ ARRAY_SIZE(wcd9335_slim_dais));
+}
+
+static const struct regmap_range_cfg wcd9335_ranges[] = {
+ {
+ .name = "WCD9335",
+ .range_min = 0x0,
+ .range_max = WCD9335_MAX_REGISTER,
+ .selector_reg = WCD9335_REG(0x0, 0),
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0x0,
+ .window_len = 0x1000,
+ },
+};
+
+static bool wcd9335_is_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WCD9335_INTR_PIN1_STATUS0...WCD9335_INTR_PIN2_CLEAR3:
+ case WCD9335_ANA_MBHC_RESULT_3:
+ case WCD9335_ANA_MBHC_RESULT_2:
+ case WCD9335_ANA_MBHC_RESULT_1:
+ case WCD9335_ANA_MBHC_MECH:
+ case WCD9335_ANA_MBHC_ELECT:
+ case WCD9335_ANA_MBHC_ZDET:
+ case WCD9335_ANA_MICB2:
+ case WCD9335_ANA_RCO:
+ case WCD9335_ANA_BIAS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config wcd9335_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = WCD9335_MAX_REGISTER,
+ .can_multi_write = true,
+ .ranges = wcd9335_ranges,
+ .num_ranges = ARRAY_SIZE(wcd9335_ranges),
+ .volatile_reg = wcd9335_is_volatile_register,
+};
+
+static const struct regmap_range_cfg wcd9335_ifc_ranges[] = {
+ {
+ .name = "WCD9335-IFC-DEV",
+ .range_min = 0x0,
+ .range_max = WCD9335_REG(0, 0x7ff),
+ .selector_reg = WCD9335_REG(0, 0x0),
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0x0,
+ .window_len = 0x1000,
+ },
+};
+
+static struct regmap_config wcd9335_ifc_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .can_multi_write = true,
+ .max_register = WCD9335_REG(0, 0x7FF),
+ .ranges = wcd9335_ifc_ranges,
+ .num_ranges = ARRAY_SIZE(wcd9335_ifc_ranges),
+};
+
+static const struct regmap_irq wcd9335_codec_irqs[] = {
+ /* INTR_REG 0 */
+ [WCD9335_IRQ_SLIMBUS] = {
+ .reg_offset = 0,
+ .mask = BIT(0),
+ .type = {
+ .type_reg_offset = 0,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ .type_reg_mask = BIT(0),
+ },
+ },
+};
+
+static const struct regmap_irq_chip wcd9335_regmap_irq1_chip = {
+ .name = "wcd9335_pin1_irq",
+ .status_base = WCD9335_INTR_PIN1_STATUS0,
+ .mask_base = WCD9335_INTR_PIN1_MASK0,
+ .ack_base = WCD9335_INTR_PIN1_CLEAR0,
+ .type_base = WCD9335_INTR_LEVEL0,
+ .num_type_reg = 4,
+ .num_regs = 4,
+ .irqs = wcd9335_codec_irqs,
+ .num_irqs = ARRAY_SIZE(wcd9335_codec_irqs),
+};
+
+static int wcd9335_parse_dt(struct wcd9335_codec *wcd)
+{
+ struct device *dev = wcd->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ wcd->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
+ if (wcd->reset_gpio < 0) {
+ dev_err(dev, "Reset GPIO missing from DT\n");
+ return wcd->reset_gpio;
+ }
+
+ wcd->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(wcd->mclk)) {
+ dev_err(dev, "mclk not found\n");
+ return PTR_ERR(wcd->mclk);
+ }
+
+ wcd->native_clk = devm_clk_get(dev, "slimbus");
+ if (IS_ERR(wcd->native_clk)) {
+ dev_err(dev, "slimbus clock not found\n");
+ return PTR_ERR(wcd->native_clk);
+ }
+
+ wcd->supplies[0].supply = "vdd-buck";
+ wcd->supplies[1].supply = "vdd-buck-sido";
+ wcd->supplies[2].supply = "vdd-tx";
+ wcd->supplies[3].supply = "vdd-rx";
+ wcd->supplies[4].supply = "vdd-io";
+
+ ret = regulator_bulk_get(dev, WCD9335_MAX_SUPPLY, wcd->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wcd9335_power_on_reset(struct wcd9335_codec *wcd)
+{
+ struct device *dev = wcd->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(WCD9335_MAX_SUPPLY, wcd->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * For WCD9335, it takes about 600us for the Vout_A and
+ * Vout_D to be ready after BUCK_SIDO is powered up.
+ * SYS_RST_N shouldn't be pulled high during this time
+ * Toggle the reset line to make sure the reset pulse is
+ * correctly applied
+ */
+ usleep_range(600, 650);
+
+ gpio_direction_output(wcd->reset_gpio, 0);
+ msleep(20);
+ gpio_set_value(wcd->reset_gpio, 1);
+ msleep(20);
+
+ return 0;
+}
+
+static int wcd9335_bring_up(struct wcd9335_codec *wcd)
+{
+ struct regmap *rm = wcd->regmap;
+ int val, byte0;
+
+ regmap_read(rm, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val);
+ regmap_read(rm, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0);
+
+ if ((val < 0) || (byte0 < 0)) {
+ dev_err(wcd->dev, "WCD9335 CODEC version detection fail!\n");
+ return -EINVAL;
+ }
+
+ if (byte0 == 0x1) {
+ dev_info(wcd->dev, "WCD9335 CODEC version is v2.0\n");
+ wcd->version = WCD9335_VERSION_2_0;
+ regmap_write(rm, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+ regmap_write(rm, WCD9335_SIDO_SIDO_TEST_2, 0x00);
+ regmap_write(rm, WCD9335_SIDO_SIDO_CCL_8, 0x6F);
+ regmap_write(rm, WCD9335_BIAS_VBG_FINE_ADJ, 0x65);
+ regmap_write(rm, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+ regmap_write(rm, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+ regmap_write(rm, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+ regmap_write(rm, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+ } else {
+ dev_err(wcd->dev, "WCD9335 CODEC version not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wcd9335_irq_init(struct wcd9335_codec *wcd)
+{
+ int ret;
+
+ /*
+ * INTR1 consists of all possible interrupt sources Ear OCP,
+ * HPH OCP, MBHC, MAD, VBAT, and SVA
+ * INTR2 is a subset of first interrupt sources MAD, VBAT, and SVA
+ */
+ wcd->intr1 = of_irq_get_byname(wcd->dev->of_node, "intr1");
+ if (wcd->intr1 < 0) {
+ if (wcd->intr1 != -EPROBE_DEFER)
+ dev_err(wcd->dev, "Unable to configure IRQ\n");
+
+ return wcd->intr1;
+ }
+
+ ret = devm_regmap_add_irq_chip(wcd->dev, wcd->regmap, wcd->intr1,
+ IRQF_TRIGGER_HIGH, 0,
+ &wcd9335_regmap_irq1_chip, &wcd->irq_data);
+ if (ret)
+ dev_err(wcd->dev, "Failed to register IRQ chip: %d\n", ret);
+
+ return ret;
+}
+
+static int wcd9335_slim_probe(struct slim_device *slim)
+{
+ struct device *dev = &slim->dev;
+ struct wcd9335_codec *wcd;
+ int ret;
+
+ wcd = devm_kzalloc(dev, sizeof(*wcd), GFP_KERNEL);
+ if (!wcd)
+ return -ENOMEM;
+
+ wcd->dev = dev;
+ ret = wcd9335_parse_dt(wcd);
+ if (ret) {
+ dev_err(dev, "Error parsing DT: %d\n", ret);
+ return ret;
+ }
+
+ ret = wcd9335_power_on_reset(wcd);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, wcd);
+
+ return 0;
+}
+
+static int wcd9335_slim_status(struct slim_device *sdev,
+ enum slim_device_status status)
+{
+ struct device *dev = &sdev->dev;
+ struct device_node *ifc_dev_np;
+ struct wcd9335_codec *wcd;
+ int ret;
+
+ wcd = dev_get_drvdata(dev);
+
+ ifc_dev_np = of_parse_phandle(dev->of_node, "slim-ifc-dev", 0);
+ if (!ifc_dev_np) {
+ dev_err(dev, "No Interface device found\n");
+ return -EINVAL;
+ }
+
+ wcd->slim = sdev;
+ wcd->slim_ifc_dev = of_slim_get_device(sdev->ctrl, ifc_dev_np);
+ if (!wcd->slim_ifc_dev) {
+ dev_err(dev, "Unable to get SLIM Interface device\n");
+ return -EINVAL;
+ }
+
+ slim_get_logical_addr(wcd->slim_ifc_dev);
+
+ wcd->regmap = regmap_init_slimbus(sdev, &wcd9335_regmap_config);
+ if (IS_ERR(wcd->regmap)) {
+ dev_err(dev, "Failed to allocate slim register map\n");
+ return PTR_ERR(wcd->regmap);
+ }
+
+ wcd->if_regmap = regmap_init_slimbus(wcd->slim_ifc_dev,
+ &wcd9335_ifc_regmap_config);
+ if (IS_ERR(wcd->if_regmap)) {
+ dev_err(dev, "Failed to allocate ifc register map\n");
+ return PTR_ERR(wcd->if_regmap);
+ }
+
+ ret = wcd9335_bring_up(wcd);
+ if (ret) {
+ dev_err(dev, "Failed to bringup WCD9335\n");
+ return ret;
+ }
+
+ ret = wcd9335_irq_init(wcd);
+ if (ret)
+ return ret;
+
+ wcd9335_probe(wcd);
+
+ return ret;
+}
+
+static const struct slim_device_id wcd9335_slim_id[] = {
+ {SLIM_MANF_ID_QCOM, SLIM_PROD_CODE_WCD9335, 0x1, 0x0},
+ {}
+};
+MODULE_DEVICE_TABLE(slim, wcd9335_slim_id);
+
+static struct slim_driver wcd9335_slim_driver = {
+ .driver = {
+ .name = "wcd9335-slim",
+ },
+ .probe = wcd9335_slim_probe,
+ .device_status = wcd9335_slim_status,
+ .id_table = wcd9335_slim_id,
+};
+
+module_slim_driver(wcd9335_slim_driver);
+MODULE_DESCRIPTION("WCD9335 slim driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("slim:217:1a0:*");
diff --git a/sound/soc/codecs/wcd9335.h b/sound/soc/codecs/wcd9335.h
new file mode 100644
index 000000000000..4d9be2496c30
--- /dev/null
+++ b/sound/soc/codecs/wcd9335.h
@@ -0,0 +1,640 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __WCD9335_H__
+#define __WCD9335_H__
+
+/*
+ * WCD9335 register base can change according to the mode it works in
+ * in slimbus mode the reg base starts from 0x800
+ * in i2s/i2c mode the reg base is 0x0
+ */
+#define WCD9335_REG(pg, r) ((pg << 12) | (r) | 0x800)
+#define WCD9335_REG_OFFSET(r) (r & 0xFF)
+#define WCD9335_PAGE_OFFSET(r) ((r >> 12) & 0xFF)
+
+/* Page-0 Registers */
+#define WCD9335_PAGE0_PAGE_REGISTER WCD9335_REG(0x00, 0x000)
+#define WCD9335_CODEC_RPM_CLK_GATE WCD9335_REG(0x00, 0x002)
+#define WCD9335_CODEC_RPM_CLK_GATE_MCLK_GATE_MASK GENMASK(1, 0)
+#define WCD9335_CODEC_RPM_CLK_MCLK_CFG WCD9335_REG(0x00, 0x003)
+#define WCD9335_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ BIT(0)
+#define WCD9335_CODEC_RPM_CLK_MCLK_CFG_12P288MHZ BIT(0)
+#define WCD9335_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK GENMASK(1, 0)
+#define WCD9335_CODEC_RPM_RST_CTL WCD9335_REG(0x00, 0x009)
+#define WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL WCD9335_REG(0x00, 0x011)
+#define WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0 WCD9335_REG(0x00, 0x021)
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_CTL WCD9335_REG(0x00, 0x025)
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_SSTATE_MASK GENMASK(4, 1)
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_EN_MASK BIT(0)
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_ENABLE BIT(0)
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0 WCD9335_REG(0x00, 0x029)
+#define WCD9335_CHIP_TIER_CTRL_EFUSE_STATUS WCD9335_REG(0x00, 0x039)
+#define WCD9335_INTR_CFG WCD9335_REG(0x00, 0x081)
+#define WCD9335_INTR_CLR_COMMIT WCD9335_REG(0x00, 0x082)
+#define WCD9335_INTR_PIN1_MASK0 WCD9335_REG(0x00, 0x089)
+#define WCD9335_INTR_PIN1_MASK1 WCD9335_REG(0x00, 0x08a)
+#define WCD9335_INTR_PIN1_MASK2 WCD9335_REG(0x00, 0x08b)
+#define WCD9335_INTR_PIN1_MASK3 WCD9335_REG(0x00, 0x08c)
+#define WCD9335_INTR_PIN1_STATUS0 WCD9335_REG(0x00, 0x091)
+#define WCD9335_INTR_PIN1_STATUS1 WCD9335_REG(0x00, 0x092)
+#define WCD9335_INTR_PIN1_STATUS2 WCD9335_REG(0x00, 0x093)
+#define WCD9335_INTR_PIN1_STATUS3 WCD9335_REG(0x00, 0x094)
+#define WCD9335_INTR_PIN1_CLEAR0 WCD9335_REG(0x00, 0x099)
+#define WCD9335_INTR_PIN1_CLEAR1 WCD9335_REG(0x00, 0x09a)
+#define WCD9335_INTR_PIN1_CLEAR2 WCD9335_REG(0x00, 0x09b)
+#define WCD9335_INTR_PIN1_CLEAR3 WCD9335_REG(0x00, 0x09c)
+#define WCD9335_INTR_PIN2_MASK0 WCD9335_REG(0x00, 0x0a1)
+#define WCD9335_INTR_PIN2_MASK1 WCD9335_REG(0x00, 0x0a2)
+#define WCD9335_INTR_PIN2_MASK2 WCD9335_REG(0x00, 0x0a3)
+#define WCD9335_INTR_PIN2_MASK3 WCD9335_REG(0x00, 0x0a4)
+#define WCD9335_INTR_PIN2_STATUS0 WCD9335_REG(0x00, 0x0a9)
+#define WCD9335_INTR_PIN2_STATUS1 WCD9335_REG(0x00, 0x0aa)
+#define WCD9335_INTR_PIN2_STATUS2 WCD9335_REG(0x00, 0x0ab)
+#define WCD9335_INTR_PIN2_STATUS3 WCD9335_REG(0x00, 0x0ac)
+#define WCD9335_INTR_PIN2_CLEAR0 WCD9335_REG(0x00, 0x0b1)
+#define WCD9335_INTR_PIN2_CLEAR1 WCD9335_REG(0x00, 0x0b2)
+#define WCD9335_INTR_PIN2_CLEAR2 WCD9335_REG(0x00, 0x0b3)
+#define WCD9335_INTR_PIN2_CLEAR3 WCD9335_REG(0x00, 0x0b4)
+#define WCD9335_INTR_LEVEL0 WCD9335_REG(0x00, 0x0e1)
+#define WCD9335_INTR_LEVEL1 WCD9335_REG(0x00, 0x0e2)
+#define WCD9335_INTR_LEVEL2 WCD9335_REG(0x00, 0x0e3)
+#define WCD9335_INTR_LEVEL3 WCD9335_REG(0x00, 0x0e4)
+
+/* Page-1 Registers */
+#define WCD9335_CPE_FLL_USER_CTL_0 WCD9335_REG(0x01, 0x001)
+#define WCD9335_CPE_FLL_USER_CTL_1 WCD9335_REG(0x01, 0x002)
+#define WCD9335_CPE_FLL_USER_CTL_2 WCD9335_REG(0x01, 0x003)
+#define WCD9335_CPE_FLL_USER_CTL_3 WCD9335_REG(0x01, 0x004)
+#define WCD9335_CPE_FLL_USER_CTL_4 WCD9335_REG(0x01, 0x005)
+#define WCD9335_CPE_FLL_USER_CTL_5 WCD9335_REG(0x01, 0x006)
+#define WCD9335_CPE_FLL_USER_CTL_6 WCD9335_REG(0x01, 0x007)
+#define WCD9335_CPE_FLL_USER_CTL_7 WCD9335_REG(0x01, 0x008)
+#define WCD9335_CPE_FLL_USER_CTL_8 WCD9335_REG(0x01, 0x009)
+#define WCD9335_CPE_FLL_USER_CTL_9 WCD9335_REG(0x01, 0x00a)
+#define WCD9335_CPE_FLL_L_VAL_CTL_0 WCD9335_REG(0x01, 0x00b)
+#define WCD9335_CPE_FLL_L_VAL_CTL_1 WCD9335_REG(0x01, 0x00c)
+#define WCD9335_CPE_FLL_DSM_FRAC_CTL_0 WCD9335_REG(0x01, 0x00d)
+#define WCD9335_CPE_FLL_DSM_FRAC_CTL_1 WCD9335_REG(0x01, 0x00e)
+#define WCD9335_CPE_FLL_CONFIG_CTL_0 WCD9335_REG(0x01, 0x00f)
+#define WCD9335_CPE_FLL_CONFIG_CTL_1 WCD9335_REG(0x01, 0x010)
+#define WCD9335_CPE_FLL_CONFIG_CTL_2 WCD9335_REG(0x01, 0x011)
+#define WCD9335_CPE_FLL_CONFIG_CTL_3 WCD9335_REG(0x01, 0x012)
+#define WCD9335_CPE_FLL_CONFIG_CTL_4 WCD9335_REG(0x01, 0x013)
+#define WCD9335_CPE_FLL_TEST_CTL_0 WCD9335_REG(0x01, 0x014)
+#define WCD9335_CPE_FLL_TEST_CTL_1 WCD9335_REG(0x01, 0x015)
+#define WCD9335_CPE_FLL_TEST_CTL_2 WCD9335_REG(0x01, 0x016)
+#define WCD9335_CPE_FLL_TEST_CTL_3 WCD9335_REG(0x01, 0x017)
+#define WCD9335_CPE_FLL_TEST_CTL_4 WCD9335_REG(0x01, 0x018)
+#define WCD9335_CPE_FLL_TEST_CTL_5 WCD9335_REG(0x01, 0x019)
+#define WCD9335_CPE_FLL_TEST_CTL_6 WCD9335_REG(0x01, 0x01a)
+#define WCD9335_CPE_FLL_TEST_CTL_7 WCD9335_REG(0x01, 0x01b)
+#define WCD9335_CPE_FLL_FREQ_CTL_0 WCD9335_REG(0x01, 0x01c)
+#define WCD9335_CPE_FLL_FREQ_CTL_1 WCD9335_REG(0x01, 0x01d)
+#define WCD9335_CPE_FLL_FREQ_CTL_2 WCD9335_REG(0x01, 0x01e)
+#define WCD9335_CPE_FLL_FREQ_CTL_3 WCD9335_REG(0x01, 0x01f)
+#define WCD9335_CPE_FLL_SSC_CTL_0 WCD9335_REG(0x01, 0x020)
+#define WCD9335_CPE_FLL_SSC_CTL_1 WCD9335_REG(0x01, 0x021)
+#define WCD9335_CPE_FLL_SSC_CTL_2 WCD9335_REG(0x01, 0x022)
+#define WCD9335_CPE_FLL_SSC_CTL_3 WCD9335_REG(0x01, 0x023)
+#define WCD9335_CPE_FLL_FLL_MODE WCD9335_REG(0x01, 0x024)
+#define WCD9335_CPE_FLL_STATUS_0 WCD9335_REG(0x01, 0x025)
+#define WCD9335_CPE_FLL_STATUS_1 WCD9335_REG(0x01, 0x026)
+#define WCD9335_CPE_FLL_STATUS_2 WCD9335_REG(0x01, 0x027)
+#define WCD9335_CPE_FLL_STATUS_3 WCD9335_REG(0x01, 0x028)
+#define WCD9335_I2S_FLL_USER_CTL_0 WCD9335_REG(0x01, 0x041)
+#define WCD9335_I2S_FLL_USER_CTL_1 WCD9335_REG(0x01, 0x042)
+#define WCD9335_I2S_FLL_USER_CTL_2 WCD9335_REG(0x01, 0x043)
+#define WCD9335_I2S_FLL_USER_CTL_3 WCD9335_REG(0x01, 0x044)
+#define WCD9335_I2S_FLL_USER_CTL_4 WCD9335_REG(0x01, 0x045)
+#define WCD9335_I2S_FLL_USER_CTL_5 WCD9335_REG(0x01, 0x046)
+#define WCD9335_I2S_FLL_USER_CTL_6 WCD9335_REG(0x01, 0x047)
+#define WCD9335_I2S_FLL_USER_CTL_7 WCD9335_REG(0x01, 0x048)
+#define WCD9335_I2S_FLL_USER_CTL_8 WCD9335_REG(0x01, 0x049)
+#define WCD9335_I2S_FLL_USER_CTL_9 WCD9335_REG(0x01, 0x04a)
+#define WCD9335_I2S_FLL_L_VAL_CTL_0 WCD9335_REG(0x01, 0x04b)
+#define WCD9335_I2S_FLL_L_VAL_CTL_1 WCD9335_REG(0x01, 0x04c)
+#define WCD9335_I2S_FLL_DSM_FRAC_CTL_0 WCD9335_REG(0x01, 0x04d)
+#define WCD9335_I2S_FLL_DSM_FRAC_CTL_1 WCD9335_REG(0x01, 0x04e)
+#define WCD9335_I2S_FLL_CONFIG_CTL_0 WCD9335_REG(0x01, 0x04f)
+#define WCD9335_I2S_FLL_CONFIG_CTL_1 WCD9335_REG(0x01, 0x050)
+#define WCD9335_I2S_FLL_CONFIG_CTL_2 WCD9335_REG(0x01, 0x051)
+#define WCD9335_I2S_FLL_CONFIG_CTL_3 WCD9335_REG(0x01, 0x052)
+#define WCD9335_I2S_FLL_CONFIG_CTL_4 WCD9335_REG(0x01, 0x053)
+#define WCD9335_I2S_FLL_TEST_CTL_0 WCD9335_REG(0x01, 0x054)
+#define WCD9335_I2S_FLL_TEST_CTL_1 WCD9335_REG(0x01, 0x055)
+#define WCD9335_I2S_FLL_TEST_CTL_2 WCD9335_REG(0x01, 0x056)
+#define WCD9335_I2S_FLL_TEST_CTL_3 WCD9335_REG(0x01, 0x057)
+#define WCD9335_I2S_FLL_TEST_CTL_4 WCD9335_REG(0x01, 0x058)
+#define WCD9335_I2S_FLL_TEST_CTL_5 WCD9335_REG(0x01, 0x059)
+#define WCD9335_I2S_FLL_TEST_CTL_6 WCD9335_REG(0x01, 0x05a)
+#define WCD9335_I2S_FLL_TEST_CTL_7 WCD9335_REG(0x01, 0x05b)
+#define WCD9335_I2S_FLL_FREQ_CTL_0 WCD9335_REG(0x01, 0x05c)
+#define WCD9335_I2S_FLL_FREQ_CTL_1 WCD9335_REG(0x01, 0x05d)
+#define WCD9335_I2S_FLL_FREQ_CTL_2 WCD9335_REG(0x01, 0x05e)
+#define WCD9335_I2S_FLL_FREQ_CTL_3 WCD9335_REG(0x01, 0x05f)
+#define WCD9335_I2S_FLL_SSC_CTL_0 WCD9335_REG(0x01, 0x060)
+#define WCD9335_I2S_FLL_SSC_CTL_1 WCD9335_REG(0x01, 0x061)
+#define WCD9335_I2S_FLL_SSC_CTL_2 WCD9335_REG(0x01, 0x062)
+#define WCD9335_I2S_FLL_SSC_CTL_3 WCD9335_REG(0x01, 0x063)
+#define WCD9335_I2S_FLL_FLL_MODE WCD9335_REG(0x01, 0x064)
+#define WCD9335_I2S_FLL_STATUS_0 WCD9335_REG(0x01, 0x065)
+#define WCD9335_I2S_FLL_STATUS_1 WCD9335_REG(0x01, 0x066)
+#define WCD9335_I2S_FLL_STATUS_2 WCD9335_REG(0x01, 0x067)
+#define WCD9335_I2S_FLL_STATUS_3 WCD9335_REG(0x01, 0x068)
+#define WCD9335_SB_FLL_USER_CTL_0 WCD9335_REG(0x01, 0x081)
+#define WCD9335_SB_FLL_USER_CTL_1 WCD9335_REG(0x01, 0x082)
+#define WCD9335_SB_FLL_USER_CTL_2 WCD9335_REG(0x01, 0x083)
+#define WCD9335_SB_FLL_USER_CTL_3 WCD9335_REG(0x01, 0x084)
+#define WCD9335_SB_FLL_USER_CTL_4 WCD9335_REG(0x01, 0x085)
+#define WCD9335_SB_FLL_USER_CTL_5 WCD9335_REG(0x01, 0x086)
+#define WCD9335_SB_FLL_USER_CTL_6 WCD9335_REG(0x01, 0x087)
+#define WCD9335_SB_FLL_USER_CTL_7 WCD9335_REG(0x01, 0x088)
+#define WCD9335_SB_FLL_USER_CTL_8 WCD9335_REG(0x01, 0x089)
+#define WCD9335_SB_FLL_USER_CTL_9 WCD9335_REG(0x01, 0x08a)
+#define WCD9335_SB_FLL_L_VAL_CTL_0 WCD9335_REG(0x01, 0x08b)
+#define WCD9335_SB_FLL_L_VAL_CTL_1 WCD9335_REG(0x01, 0x08c)
+#define WCD9335_SB_FLL_DSM_FRAC_CTL_0 WCD9335_REG(0x01, 0x08d)
+#define WCD9335_SB_FLL_DSM_FRAC_CTL_1 WCD9335_REG(0x01, 0x08e)
+#define WCD9335_SB_FLL_CONFIG_CTL_0 WCD9335_REG(0x01, 0x08f)
+#define WCD9335_SB_FLL_CONFIG_CTL_1 WCD9335_REG(0x01, 0x090)
+#define WCD9335_SB_FLL_CONFIG_CTL_2 WCD9335_REG(0x01, 0x091)
+#define WCD9335_SB_FLL_CONFIG_CTL_3 WCD9335_REG(0x01, 0x092)
+#define WCD9335_SB_FLL_CONFIG_CTL_4 WCD9335_REG(0x01, 0x093)
+#define WCD9335_SB_FLL_TEST_CTL_0 WCD9335_REG(0x01, 0x094)
+#define WCD9335_SB_FLL_TEST_CTL_1 WCD9335_REG(0x01, 0x095)
+#define WCD9335_SB_FLL_TEST_CTL_2 WCD9335_REG(0x01, 0x096)
+#define WCD9335_SB_FLL_TEST_CTL_3 WCD9335_REG(0x01, 0x097)
+#define WCD9335_SB_FLL_TEST_CTL_4 WCD9335_REG(0x01, 0x098)
+#define WCD9335_SB_FLL_TEST_CTL_5 WCD9335_REG(0x01, 0x099)
+#define WCD9335_SB_FLL_TEST_CTL_6 WCD9335_REG(0x01, 0x09a)
+#define WCD9335_SB_FLL_TEST_CTL_7 WCD9335_REG(0x01, 0x09b)
+#define WCD9335_SB_FLL_FREQ_CTL_0 WCD9335_REG(0x01, 0x09c)
+#define WCD9335_SB_FLL_FREQ_CTL_1 WCD9335_REG(0x01, 0x09d)
+#define WCD9335_SB_FLL_FREQ_CTL_2 WCD9335_REG(0x01, 0x09e)
+#define WCD9335_SB_FLL_FREQ_CTL_3 WCD9335_REG(0x01, 0x09f)
+#define WCD9335_SB_FLL_SSC_CTL_0 WCD9335_REG(0x01, 0x0a0)
+#define WCD9335_SB_FLL_SSC_CTL_1 WCD9335_REG(0x01, 0x0a1)
+#define WCD9335_SB_FLL_SSC_CTL_2 WCD9335_REG(0x01, 0x0a2)
+#define WCD9335_SB_FLL_SSC_CTL_3 WCD9335_REG(0x01, 0x0a3)
+#define WCD9335_SB_FLL_FLL_MODE WCD9335_REG(0x01, 0x0a4)
+#define WCD9335_SB_FLL_STATUS_0 WCD9335_REG(0x01, 0x0a5)
+#define WCD9335_SB_FLL_STATUS_1 WCD9335_REG(0x01, 0x0a6)
+#define WCD9335_SB_FLL_STATUS_2 WCD9335_REG(0x01, 0x0a7)
+#define WCD9335_SB_FLL_STATUS_3 WCD9335_REG(0x01, 0x0a8)
+
+/* Page-2 Registers */
+#define WCD9335_PAGE2_PAGE_REGISTER WCD9335_REG(0x02, 0x000)
+#define WCD9335_CPE_SS_DMIC0_CTL WCD9335_REG(0x02, 0x063)
+#define WCD9335_CPE_SS_DMIC1_CTL WCD9335_REG(0x02, 0x064)
+#define WCD9335_CPE_SS_DMIC2_CTL WCD9335_REG(0x02, 0x065)
+#define WCD9335_CPE_SS_DMIC_CFG WCD9335_REG(0x02, 0x066)
+#define WCD9335_SOC_MAD_AUDIO_CTL_2 WCD9335_REG(0x02, 0x084)
+
+/* Page-6 Registers */
+#define WCD9335_PAGE6_PAGE_REGISTER WCD9335_REG(0x06, 0x000)
+#define WCD9335_ANA_BIAS WCD9335_REG(0x06, 0x001)
+#define WCD9335_ANA_BIAS_EN_MASK BIT(7)
+#define WCD9335_ANA_BIAS_ENABLE BIT(7)
+#define WCD9335_ANA_BIAS_DISABLE 0
+#define WCD9335_ANA_BIAS_PRECHRG_EN_MASK BIT(6)
+#define WCD9335_ANA_BIAS_PRECHRG_ENABLE BIT(6)
+#define WCD9335_ANA_BIAS_PRECHRG_DISABLE 0
+#define WCD9335_ANA_BIAS_PRECHRG_CTL_MODE BIT(5)
+#define WCD9335_ANA_BIAS_PRECHRG_CTL_MODE_AUTO BIT(5)
+#define WCD9335_ANA_BIAS_PRECHRG_CTL_MODE_MANUAL 0
+#define WCD9335_ANA_CLK_TOP WCD9335_REG(0x06, 0x002)
+#define WCD9335_ANA_CLK_MCLK_EN_MASK BIT(2)
+#define WCD9335_ANA_CLK_MCLK_ENABLE BIT(2)
+#define WCD9335_ANA_CLK_MCLK_DISABLE 0
+#define WCD9335_ANA_CLK_MCLK_SRC_MASK BIT(3)
+#define WCD9335_ANA_CLK_MCLK_SRC_RCO BIT(3)
+#define WCD9335_ANA_CLK_MCLK_SRC_EXTERNAL 0
+#define WCD9335_ANA_CLK_EXT_CLKBUF_EN_MASK BIT(7)
+#define WCD9335_ANA_CLK_EXT_CLKBUF_ENABLE BIT(7)
+#define WCD9335_ANA_CLK_EXT_CLKBUF_DISABLE 0
+#define WCD9335_ANA_RCO WCD9335_REG(0x06, 0x003)
+#define WCD9335_ANA_RCO_BG_EN_MASK BIT(7)
+#define WCD9335_ANA_RCO_BG_ENABLE BIT(7)
+#define WCD9335_ANA_BUCK_VOUT_D WCD9335_REG(0x06, 0x005)
+#define WCD9335_ANA_BUCK_VOUT_MASK GENMASK(7, 0)
+#define WCD9335_ANA_BUCK_CTL WCD9335_REG(0x06, 0x006)
+#define WCD9335_ANA_BUCK_CTL_VOUT_D_IREF_MASK BIT(1)
+#define WCD9335_ANA_BUCK_CTL_VOUT_D_IREF_EXT BIT(1)
+#define WCD9335_ANA_BUCK_CTL_VOUT_D_IREF_INT 0
+#define WCD9335_ANA_BUCK_CTL_VOUT_D_VREF_MASK BIT(2)
+#define WCD9335_ANA_BUCK_CTL_VOUT_D_VREF_EXT BIT(2)
+#define WCD9335_ANA_BUCK_CTL_VOUT_D_VREF_INT 0
+#define WCD9335_ANA_BUCK_CTL_RAMP_START_MASK BIT(7)
+#define WCD9335_ANA_BUCK_CTL_RAMP_START_ENABLE BIT(7)
+#define WCD9335_ANA_BUCK_CTL_RAMP_START_DISABLE 0
+#define WCD9335_ANA_RX_SUPPLIES WCD9335_REG(0x06, 0x008)
+#define WCD9335_ANA_RX_BIAS_ENABLE_MASK BIT(0)
+#define WCD9335_ANA_RX_BIAS_ENABLE BIT(0)
+#define WCD9335_ANA_RX_BIAS_DISABLE 0
+#define WCD9335_ANA_HPH WCD9335_REG(0x06, 0x009)
+#define WCD9335_ANA_EAR WCD9335_REG(0x06, 0x00a)
+#define WCD9335_ANA_LO_1_2 WCD9335_REG(0x06, 0x00b)
+#define WCD9335_ANA_LO_3_4 WCD9335_REG(0x06, 0x00c)
+#define WCD9335_ANA_AMIC1 WCD9335_REG(0x06, 0x00e)
+#define WCD9335_ANA_AMIC2 WCD9335_REG(0x06, 0x00f)
+#define WCD9335_ANA_AMIC3 WCD9335_REG(0x06, 0x010)
+#define WCD9335_ANA_AMIC4 WCD9335_REG(0x06, 0x011)
+#define WCD9335_ANA_AMIC5 WCD9335_REG(0x06, 0x012)
+#define WCD9335_ANA_AMIC6 WCD9335_REG(0x06, 0x013)
+#define WCD9335_ANA_MBHC_MECH WCD9335_REG(0x06, 0x014)
+#define WCD9335_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD9335_MBHC_L_DET_EN BIT(7)
+#define WCD9335_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD9335_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD9335_MBHC_MECH_DETECT_TYPE_SHIFT 5
+#define WCD9335_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD9335_MBHC_HPHL_PLUG_TYPE_NO BIT(4)
+#define WCD9335_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD9335_MBHC_GND_PLUG_TYPE_NO BIT(3)
+#define WCD9335_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD9335_MBHC_HPHL_100K_TO_GND_EN BIT(0)
+
+#define WCD9335_ANA_MBHC_ELECT WCD9335_REG(0x06, 0x015)
+#define WCD9335_ANA_MBHC_BD_ISRC_CTL_MASK GENMASK(6, 4)
+#define WCD9335_ANA_MBHC_BD_ISRC_100UA GENMASK(5, 4)
+#define WCD9335_ANA_MBHC_BD_ISRC_OFF 0
+#define WCD9335_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD9335_ANA_MBHC_BIAS_EN BIT(0)
+#define WCD9335_ANA_MBHC_ZDET WCD9335_REG(0x06, 0x016)
+#define WCD9335_ANA_MBHC_RESULT_1 WCD9335_REG(0x06, 0x017)
+#define WCD9335_ANA_MBHC_RESULT_2 WCD9335_REG(0x06, 0x018)
+#define WCD9335_ANA_MBHC_RESULT_3 WCD9335_REG(0x06, 0x019)
+#define WCD9335_MBHC_BTN_RESULT_MASK GENMASK(2, 0)
+#define WCD9335_ANA_MBHC_BTN0 WCD9335_REG(0x06, 0x01a)
+#define WCD9335_ANA_MBHC_BTN1 WCD9335_REG(0x06, 0x01b)
+#define WCD9335_ANA_MBHC_BTN2 WCD9335_REG(0x06, 0x01c)
+#define WCD9335_ANA_MBHC_BTN3 WCD9335_REG(0x06, 0x01d)
+#define WCD9335_ANA_MBHC_BTN4 WCD9335_REG(0x06, 0x01e)
+#define WCD9335_ANA_MBHC_BTN5 WCD9335_REG(0x06, 0x01f)
+#define WCD9335_ANA_MBHC_BTN6 WCD9335_REG(0x06, 0x020)
+#define WCD9335_ANA_MBHC_BTN7 WCD9335_REG(0x06, 0x021)
+#define WCD9335_ANA_MICB1 WCD9335_REG(0x06, 0x022)
+#define WCD9335_ANA_MICB2 WCD9335_REG(0x06, 0x023)
+#define WCD9335_ANA_MICB2_ENABLE BIT(6)
+#define WCD9335_ANA_MICB2_RAMP WCD9335_REG(0x06, 0x024)
+#define WCD9335_ANA_MICB3 WCD9335_REG(0x06, 0x025)
+#define WCD9335_ANA_MICB4 WCD9335_REG(0x06, 0x026)
+#define WCD9335_ANA_VBADC WCD9335_REG(0x06, 0x027)
+#define WCD9335_BIAS_VBG_FINE_ADJ WCD9335_REG(0x06, 0x029)
+#define WCD9335_RCO_CTRL_2 WCD9335_REG(0x06, 0x02f)
+#define WCD9335_SIDO_SIDO_CCL_2 WCD9335_REG(0x06, 0x042)
+#define WCD9335_SIDO_SIDO_CCL_4 WCD9335_REG(0x06, 0x044)
+#define WCD9335_SIDO_SIDO_CCL_8 WCD9335_REG(0x06, 0x048)
+#define WCD9335_SIDO_SIDO_CCL_10 WCD9335_REG(0x06, 0x04a)
+#define WCD9335_SIDO_SIDO_CCL_10_ICHARG_PWR_SEL_C320FF 0x2
+/* Comparator 1 and 2 Bias current at 1P0UA with start pulse width of C320FF */
+#define WCD9335_SIDO_SIDO_CCL_DEF_VALUE 0x6e
+#define WCD9335_SIDO_SIDO_TEST_2 WCD9335_REG(0x06, 0x055)
+#define WCD9335_MBHC_CTL_1 WCD9335_REG(0x06, 0x056)
+#define WCD9335_MBHC_BTN_DBNC_MASK GENMASK(1, 0)
+#define WCD9335_MBHC_BTN_DBNC_T_16_MS 0x2
+#define WCD9335_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD9335_MBHC_CTL_RCO_EN BIT(7)
+
+#define WCD9335_MBHC_CTL_2 WCD9335_REG(0x06, 0x057)
+#define WCD9335_MBHC_HS_VREF_CTL_MASK GENMASK(1, 0)
+#define WCD9335_MBHC_HS_VREF_1P5_V 0x1
+#define WCD9335_MBHC_PLUG_DETECT_CTL WCD9335_REG(0x06, 0x058)
+#define WCD9335_MBHC_HSDET_PULLUP_CTL_MASK GENMASK(7, 6)
+#define WCD9335_MBHC_HSDET_PULLUP_CTL_SHIFT 6
+#define WCD9335_MBHC_HSDET_PULLUP_CTL_1_2P0_UA 0x80
+#define WCD9335_MBHC_DBNC_TIMER_INSREM_DBNC_T_96_MS 0x6
+
+#define WCD9335_MBHC_ZDET_RAMP_CTL WCD9335_REG(0x06, 0x05a)
+#define WCD9335_VBADC_IBIAS_FE WCD9335_REG(0x06, 0x05e)
+#define WCD9335_FLYBACK_CTRL_1 WCD9335_REG(0x06, 0x0b1)
+#define WCD9335_RX_BIAS_HPH_PA WCD9335_REG(0x06, 0x0bb)
+#define WCD9335_RX_BIAS_HPH_PA_AMP_5_UA_MASK GENMASK(3, 0)
+#define WCD9335_RX_BIAS_HPH_RDACBUFF_CNP2 WCD9335_REG(0x06, 0x0bc)
+#define WCD9335_RX_BIAS_HPH_RDAC_LDO WCD9335_REG(0x06, 0x0bd)
+#define WCD9335_RX_BIAS_FLYB_BUFF WCD9335_REG(0x06, 0x0c7)
+#define WCD9335_RX_BIAS_FLYB_VPOS_5_UA_MASK GENMASK(3, 0)
+#define WCD9335_RX_BIAS_FLYB_I_0P0_UA 0
+#define WCD9335_RX_BIAS_FLYB_VNEG_5_UA_MASK GENMASK(7, 4)
+#define WCD9335_RX_BIAS_FLYB_MID_RST WCD9335_REG(0x06, 0x0c8)
+#define WCD9335_HPH_CNP_WG_CTL WCD9335_REG(0x06, 0x0cc)
+#define WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_MASK GENMASK(2, 0)
+#define WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_500 0x2
+#define WCD9335_HPH_CNP_WG_CTL_CURR_LDIV_RATIO_1000 0x3
+#define WCD9335_HPH_OCP_CTL WCD9335_REG(0x06, 0x0ce)
+#define WCD9335_HPH_AUTO_CHOP WCD9335_REG(0x06, 0x0cf)
+#define WCD9335_HPH_AUTO_CHOP_MASK BIT(5)
+#define WCD9335_HPH_AUTO_CHOP_FORCE_ENABLE BIT(5)
+#define WCD9335_HPH_AUTO_CHOP_ENABLE_BY_CMPDR_GAIN 0
+#define WCD9335_HPH_PA_CTL1 WCD9335_REG(0x06, 0x0d1)
+#define WCD9335_HPH_PA_GM3_IB_SCALE_MASK GENMASK(3, 1)
+#define WCD9335_HPH_PA_CTL2 WCD9335_REG(0x06, 0x0d2)
+#define WCD9335_HPH_PA_CTL2_FORCE_PSRREH_MASK BIT(2)
+#define WCD9335_HPH_PA_CTL2_FORCE_PSRREH_ENABLE BIT(2)
+#define WCD9335_HPH_PA_CTL2_FORCE_PSRREH_DISABLE 0
+#define WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_MASK BIT(3)
+#define WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_ENABLE BIT(3)
+#define WCD9335_HPH_PA_CTL2_FORCE_IQCTRL_DISABLE 0
+#define WCD9335_HPH_PA_CTL2_HPH_PSRR_ENH_MASK BIT(5)
+#define WCD9335_HPH_PA_CTL2_HPH_PSRR_ENABLE BIT(5)
+#define WCD9335_HPH_PA_CTL2_HPH_PSRR_DISABLE 0
+#define WCD9335_HPH_L_EN WCD9335_REG(0x06, 0x0d3)
+#define WCD9335_HPH_CONST_SEL_L_MASK GENMASK(7, 6)
+#define WCD9335_HPH_CONST_SEL_L_BYPASS 0
+#define WCD9335_HPH_CONST_SEL_L_LP_PATH 0x40
+#define WCD9335_HPH_CONST_SEL_L_HQ_PATH 0x80
+#define WCD9335_HPH_PA_GAIN_MASK GENMASK(4, 0)
+#define WCD9335_HPH_GAIN_SRC_SEL_MASK BIT(5)
+#define WCD9335_HPH_GAIN_SRC_SEL_COMPANDER 0
+#define WCD9335_HPH_GAIN_SRC_SEL_REGISTER BIT(5)
+#define WCD9335_HPH_L_TEST WCD9335_REG(0x06, 0x0d4)
+#define WCD9335_HPH_R_EN WCD9335_REG(0x06, 0x0d6)
+#define WCD9335_HPH_R_TEST WCD9335_REG(0x06, 0x0d7)
+#define WCD9335_HPH_R_ATEST WCD9335_REG(0x06, 0x0d8)
+#define WCD9335_HPH_RDAC_LDO_CTL WCD9335_REG(0x06, 0x0db)
+#define WCD9335_HPH_RDAC_N1P65_LD_OUTCTL_MASK GENMASK(2, 0)
+#define WCD9335_HPH_RDAC_N1P65_LD_OUTCTL_V_N1P60 0x1
+#define WCD9335_HPH_RDAC_1P65_LD_OUTCTL_MASK GENMASK(6, 4)
+#define WCD9335_HPH_RDAC_1P65_LD_OUTCTL_V_N1P60 0x10
+#define WCD9335_HPH_REFBUFF_LP_CTL WCD9335_REG(0x06, 0x0de)
+#define WCD9335_HPH_L_DAC_CTL WCD9335_REG(0x06, 0x0df)
+#define WCD9335_HPH_DAC_LDO_POWERMODE_MASK BIT(0)
+#define WCD9335_HPH_DAC_LDO_POWERMODE_LOWPOWER 0
+#define WCD9335_HPH_DAC_LDO_POWERMODE_UHQA BIT(0)
+#define WCD9335_HPH_DAC_LDO_UHQA_OV_MASK BIT(1)
+#define WCD9335_HPH_DAC_LDO_UHQA_OV_ENABLE BIT(1)
+#define WCD9335_HPH_DAC_LDO_UHQA_OV_DISABLE 0
+
+#define WCD9335_EAR_CMBUFF WCD9335_REG(0x06, 0x0e2)
+#define WCD9335_DIFF_LO_LO2_COMPANDER WCD9335_REG(0x06, 0x0ea)
+#define WCD9335_DIFF_LO_LO1_COMPANDER WCD9335_REG(0x06, 0x0eb)
+#define WCD9335_DIFF_LO_COM_SWCAP_REFBUF_FREQ WCD9335_REG(0x06, 0x0f1)
+#define WCD9335_DIFF_LO_COM_PA_FREQ WCD9335_REG(0x06, 0x0f2)
+#define WCD9335_SE_LO_LO3_GAIN WCD9335_REG(0x06, 0x0f8)
+#define WCD9335_SE_LO_LO3_CTRL WCD9335_REG(0x06, 0x0f9)
+#define WCD9335_SE_LO_LO4_GAIN WCD9335_REG(0x06, 0x0fa)
+
+/* Page-10 Registers */
+#define WCD9335_CDC_TX0_TX_PATH_CTL WCD9335_REG(0x0a, 0x031)
+#define WCD9335_CDC_TX_PATH_CTL_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD9335_CDC_TX_PATH_CTL(dec) WCD9335_REG(0xa, (0x31 + dec * 0x10))
+#define WCD9335_CDC_TX0_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x032)
+#define WCD9335_CDC_TX_ADC_AMIC_DMIC_SEL_MASK BIT(7)
+#define WCD9335_CDC_TX_ADC_DMIC_SEL BIT(7)
+#define WCD9335_CDC_TX_ADC_AMIC_SEL 0
+#define WCD9335_CDC_TX0_TX_VOL_CTL WCD9335_REG(0x0a, 0x034)
+#define WCD9335_CDC_TX0_TX_PATH_SEC2 WCD9335_REG(0x0a, 0x039)
+#define WCD9335_CDC_TX0_TX_PATH_SEC7 WCD9335_REG(0x0a, 0x03e)
+#define WCD9335_CDC_TX1_TX_PATH_CTL WCD9335_REG(0x0a, 0x041)
+#define WCD9335_CDC_TX1_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x042)
+#define WCD9335_CDC_TX2_TX_PATH_CTL WCD9335_REG(0x0a, 0x051)
+#define WCD9335_CDC_TX2_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x052)
+#define WCD9335_CDC_TX2_TX_VOL_CTL WCD9335_REG(0x0a, 0x054)
+#define WCD9335_CDC_TX3_TX_PATH_CTL WCD9335_REG(0x0a, 0x061)
+#define WCD9335_CDC_TX3_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x062)
+#define WCD9335_CDC_TX3_TX_VOL_CTL WCD9335_REG(0x0a, 0x064)
+#define WCD9335_CDC_TX4_TX_PATH_CTL WCD9335_REG(0x0a, 0x071)
+#define WCD9335_CDC_TX4_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x072)
+#define WCD9335_CDC_TX4_TX_VOL_CTL WCD9335_REG(0x0a, 0x074)
+#define WCD9335_CDC_TX5_TX_PATH_CTL WCD9335_REG(0x0a, 0x081)
+#define WCD9335_CDC_TX5_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x082)
+#define WCD9335_CDC_TX5_TX_VOL_CTL WCD9335_REG(0x0a, 0x084)
+#define WCD9335_CDC_TX6_TX_PATH_CTL WCD9335_REG(0x0a, 0x091)
+#define WCD9335_CDC_TX6_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x092)
+#define WCD9335_CDC_TX6_TX_VOL_CTL WCD9335_REG(0x0a, 0x094)
+#define WCD9335_CDC_TX7_TX_PATH_CTL WCD9335_REG(0x0a, 0x0a1)
+#define WCD9335_CDC_TX7_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x0a2)
+#define WCD9335_CDC_TX7_TX_VOL_CTL WCD9335_REG(0x0a, 0x0a4)
+#define WCD9335_CDC_TX8_TX_PATH_CTL WCD9335_REG(0x0a, 0x0b1)
+#define WCD9335_CDC_TX8_TX_PATH_CFG0 WCD9335_REG(0x0a, 0x0b2)
+#define WCD9335_CDC_TX8_TX_VOL_CTL WCD9335_REG(0x0a, 0x0b4)
+#define WCD9335_CDC_TX9_SPKR_PROT_PATH_CFG0 WCD9335_REG(0x0a, 0x0c3)
+#define WCD9335_CDC_TX10_SPKR_PROT_PATH_CFG0 WCD9335_REG(0x0a, 0x0c7)
+#define WCD9335_CDC_TX11_SPKR_PROT_PATH_CFG0 WCD9335_REG(0x0a, 0x0cb)
+#define WCD9335_CDC_TX12_SPKR_PROT_PATH_CFG0 WCD9335_REG(0x0a, 0x0cf)
+
+/* Page-11 Registers */
+#define WCD9335_PAGE11_PAGE_REGISTER WCD9335_REG(0x0b, 0x000)
+#define WCD9335_CDC_COMPANDER1_CTL0 WCD9335_REG(0x0b, 0x001)
+#define WCD9335_CDC_COMPANDER1_CTL(c) WCD9335_REG(0x0b, (0x001 + c * 0x8))
+#define WCD9335_CDC_COMPANDER_CLK_EN_MASK BIT(0)
+#define WCD9335_CDC_COMPANDER_CLK_ENABLE BIT(0)
+#define WCD9335_CDC_COMPANDER_CLK_DISABLE 0
+#define WCD9335_CDC_COMPANDER_SOFT_RST_MASK BIT(1)
+#define WCD9335_CDC_COMPANDER_SOFT_RST_ENABLE BIT(1)
+#define WCD9335_CDC_COMPANDER_SOFT_RST_DISABLE 0
+#define WCD9335_CDC_COMPANDER_HALT_MASK BIT(2)
+#define WCD9335_CDC_COMPANDER_HALT BIT(2)
+#define WCD9335_CDC_COMPANDER_NOHALT 0
+#define WCD9335_CDC_COMPANDER7_CTL3 WCD9335_REG(0x0b, 0x034)
+#define WCD9335_CDC_COMPANDER7_CTL7 WCD9335_REG(0x0b, 0x038)
+#define WCD9335_CDC_COMPANDER8_CTL3 WCD9335_REG(0x0b, 0x03c)
+#define WCD9335_CDC_COMPANDER8_CTL7 WCD9335_REG(0x0b, 0x040)
+#define WCD9335_CDC_RX0_RX_PATH_CTL WCD9335_REG(0x0b, 0x041)
+#define WCD9335_CDC_RX_PGA_MUTE_EN_MASK BIT(4)
+#define WCD9335_CDC_RX_PGA_MUTE_ENABLE BIT(4)
+#define WCD9335_CDC_RX_PGA_MUTE_DISABLE 0
+#define WCD9335_CDC_RX_CLK_EN_MASK BIT(5)
+#define WCD9335_CDC_RX_CLK_ENABLE BIT(5)
+#define WCD9335_CDC_RX_CLK_DISABLE 0
+#define WCD9335_CDC_RX_RESET_MASK BIT(6)
+#define WCD9335_CDC_RX_RESET_ENABLE BIT(6)
+#define WCD9335_CDC_RX_RESET_DISABLE 0
+#define WCD9335_CDC_RX_PATH_CTL(rx) WCD9335_REG(0x0b, (0x041 + rx * 0x14))
+#define WCD9335_CDC_RX0_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x042)
+#define WCD9335_CDC_RX0_RX_PATH_CFG1 WCD9335_REG(0x0b, 0x043)
+#define WCD9335_CDC_RX0_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x044)
+#define WCD9335_CDC_RX0_RX_VOL_CTL WCD9335_REG(0x0b, 0x045)
+#define WCD9335_CDC_RX0_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x046)
+#define WCD9335_CDC_MIX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD9335_CDC_RX_PATH_MIX_CTL(rx) WCD9335_REG(0x0b, (0x46 + rx * 0x14))
+#define WCD9335_CDC_RX0_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x047)
+#define WCD9335_CDC_RX0_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x048)
+#define WCD9335_CDC_RX0_RX_PATH_SEC0 WCD9335_REG(0x0b, 0x049)
+#define WCD9335_CDC_RX0_RX_PATH_SEC7 WCD9335_REG(0x0b, 0x050)
+#define WCD9335_CDC_RX0_RX_PATH_MIX_SEC0 WCD9335_REG(0x0b, 0x051)
+#define WCD9335_CDC_RX1_RX_PATH_CTL WCD9335_REG(0x0b, 0x055)
+#define WCD9335_CDC_RX1_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x056)
+#define WCD9335_CDC_RX1_RX_PATH_CFG(c) WCD9335_REG(0x0b, (0x056 + c * 0x14))
+#define WCD9335_CDC_RX_PATH_CFG_CMP_EN_MASK BIT(1)
+#define WCD9335_CDC_RX_PATH_CFG_CMP_ENABLE BIT(1)
+#define WCD9335_CDC_RX_PATH_CFG_CMP_DISABLE 0
+#define WCD9335_CDC_RX_PATH_CFG_HD2_EN_MASK BIT(2)
+#define WCD9335_CDC_RX_PATH_CFG_HD2_ENABLE BIT(2)
+#define WCD9335_CDC_RX_PATH_CFG_HD2_DISABLE 0
+#define WCD9335_CDC_RX_PATH_CFG0_DLY_ZN_EN_MASK BIT(3)
+#define WCD9335_CDC_RX_PATH_CFG0_DLY_ZN_EN BIT(3)
+#define WCD9335_CDC_RX_PATH_CFG0_DLY_ZN_DISABLE 0
+#define WCD9335_CDC_RX1_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x058)
+#define WCD9335_CDC_RX1_RX_VOL_CTL WCD9335_REG(0x0b, 0x059)
+#define WCD9335_CDC_RX1_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x05a)
+#define WCD9335_CDC_RX1_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x05b)
+#define WCD9335_CDC_RX1_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x05c)
+#define WCD9335_CDC_RX1_RX_PATH_SEC0 WCD9335_REG(0x0b, 0x05d)
+#define WCD9335_CDC_RX1_RX_PATH_SEC3 WCD9335_REG(0x0b, 0x060)
+#define WCD9335_CDC_RX_PATH_SEC_HD2_SCALE_MASK GENMASK(1, 0)
+#define WCD9335_CDC_RX_PATH_SEC_HD2_SCALE_2 0x1
+#define WCD9335_CDC_RX_PATH_SEC_HD2_SCALE_1 0
+#define WCD9335_CDC_RX_PATH_SEC_HD2_ALPHA_MASK GENMASK(5, 2)
+#define WCD9335_CDC_RX_PATH_SEC_HD2_ALPHA_0P2500 0x10
+#define WCD9335_CDC_RX_PATH_SEC_HD2_ALPHA_0P0000 0
+#define WCD9335_CDC_RX2_RX_PATH_CTL WCD9335_REG(0x0b, 0x069)
+#define WCD9335_CDC_RX2_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x06a)
+#define WCD9335_CDC_RX2_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x06c)
+#define WCD9335_CDC_RX2_RX_VOL_CTL WCD9335_REG(0x0b, 0x06d)
+#define WCD9335_CDC_RX2_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x06e)
+#define WCD9335_CDC_RX2_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x06f)
+#define WCD9335_CDC_RX2_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x070)
+#define WCD9335_CDC_RX2_RX_PATH_SEC0 WCD9335_REG(0x0b, 0x071)
+#define WCD9335_CDC_RX_PATH_DEM_INP_SEL_MASK GENMASK(1, 0)
+#define WCD9335_CDC_RX2_RX_PATH_SEC3 WCD9335_REG(0x0b, 0x074)
+#define WCD9335_CDC_RX3_RX_PATH_CTL WCD9335_REG(0x0b, 0x07d)
+#define WCD9335_CDC_RX3_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x07e)
+#define WCD9335_CDC_RX3_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x080)
+#define WCD9335_CDC_RX3_RX_VOL_CTL WCD9335_REG(0x0b, 0x081)
+#define WCD9335_CDC_RX3_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x082)
+#define WCD9335_CDC_RX3_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x083)
+#define WCD9335_CDC_RX3_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x084)
+#define WCD9335_CDC_RX4_RX_PATH_CTL WCD9335_REG(0x0b, 0x091)
+#define WCD9335_CDC_RX4_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x092)
+#define WCD9335_CDC_RX4_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x094)
+#define WCD9335_CDC_RX4_RX_VOL_CTL WCD9335_REG(0x0b, 0x095)
+#define WCD9335_CDC_RX4_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x096)
+#define WCD9335_CDC_RX4_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x097)
+#define WCD9335_CDC_RX4_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x098)
+#define WCD9335_CDC_RX5_RX_PATH_CTL WCD9335_REG(0x0b, 0x0a5)
+#define WCD9335_CDC_RX5_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x0a6)
+#define WCD9335_CDC_RX5_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x0a8)
+#define WCD9335_CDC_RX5_RX_VOL_CTL WCD9335_REG(0x0b, 0x0a9)
+#define WCD9335_CDC_RX5_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x0aa)
+#define WCD9335_CDC_RX5_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x0ab)
+#define WCD9335_CDC_RX5_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x0ac)
+#define WCD9335_CDC_RX6_RX_PATH_CTL WCD9335_REG(0x0b, 0x0b9)
+#define WCD9335_CDC_RX6_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x0ba)
+#define WCD9335_CDC_RX6_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x0bc)
+#define WCD9335_CDC_RX6_RX_VOL_CTL WCD9335_REG(0x0b, 0x0bd)
+#define WCD9335_CDC_RX6_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x0be)
+#define WCD9335_CDC_RX6_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x0bf)
+#define WCD9335_CDC_RX6_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x0c0)
+#define WCD9335_CDC_RX7_RX_PATH_CTL WCD9335_REG(0x0b, 0x0cd)
+#define WCD9335_CDC_RX7_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x0ce)
+#define WCD9335_CDC_RX7_RX_PATH_CFG1 WCD9335_REG(0x0b, 0x0cf)
+#define WCD9335_CDC_RX7_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x0d0)
+#define WCD9335_CDC_RX7_RX_VOL_CTL WCD9335_REG(0x0b, 0x0d1)
+#define WCD9335_CDC_RX7_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x0d2)
+#define WCD9335_CDC_RX7_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x0d3)
+#define WCD9335_CDC_RX7_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x0d4)
+#define WCD9335_CDC_RX8_RX_PATH_CTL WCD9335_REG(0x0b, 0x0e1)
+#define WCD9335_CDC_RX8_RX_PATH_CFG0 WCD9335_REG(0x0b, 0x0e2)
+#define WCD9335_CDC_RX8_RX_PATH_CFG1 WCD9335_REG(0x0b, 0x0e3)
+#define WCD9335_CDC_RX8_RX_PATH_CFG2 WCD9335_REG(0x0b, 0x0e4)
+#define WCD9335_CDC_RX8_RX_VOL_CTL WCD9335_REG(0x0b, 0x0e5)
+#define WCD9335_CDC_RX8_RX_PATH_MIX_CTL WCD9335_REG(0x0b, 0x0e6)
+#define WCD9335_CDC_RX8_RX_PATH_MIX_CFG WCD9335_REG(0x0b, 0x0e7)
+#define WCD9335_CDC_RX8_RX_VOL_MIX_CTL WCD9335_REG(0x0b, 0x0e8)
+
+/* Page-12 Registers */
+#define WCD9335_PAGE12_PAGE_REGISTER WCD9335_REG(0x0c, 0x000)
+#define WCD9335_CDC_CLSH_K2_MSB WCD9335_REG(0x0c, 0x00a)
+#define WCD9335_CDC_CLSH_K2_LSB WCD9335_REG(0x0c, 0x00b)
+#define WCD9335_CDC_BOOST0_BOOST_CTL WCD9335_REG(0x0c, 0x01a)
+#define WCD9335_CDC_BOOST0_BOOST_CFG1 WCD9335_REG(0x0c, 0x01b)
+#define WCD9335_CDC_BOOST0_BOOST_CFG2 WCD9335_REG(0x0c, 0x01c)
+#define WCD9335_CDC_BOOST1_BOOST_CTL WCD9335_REG(0x0c, 0x022)
+#define WCD9335_CDC_BOOST1_BOOST_CFG1 WCD9335_REG(0x0c, 0x023)
+#define WCD9335_CDC_BOOST1_BOOST_CFG2 WCD9335_REG(0x0c, 0x024)
+
+/* Page-13 Registers */
+#define WCD9335_PAGE13_PAGE_REGISTER WCD9335_REG(0x0d, 0x000)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG0 WCD9335_REG(0x0d, 0x001)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT_CFG0(i) WCD9335_REG(0xd, (0x1 + i * 0x2))
+#define WCD9335_CDC_RX_INP_MUX_RX_INT0_CFG1 WCD9335_REG(0xd, 0x002)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT_SEL_MASK GENMASK(3, 0)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT_CFG1(i) WCD9335_REG(0xd, (0x2 + i * 0x2))
+
+#define WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG0 WCD9335_REG(0x0d, 0x003)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT1_CFG1 WCD9335_REG(0x0d, 0x004)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG0 WCD9335_REG(0x0d, 0x005)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT2_CFG1 WCD9335_REG(0x0d, 0x006)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG0 WCD9335_REG(0x0d, 0x007)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT3_CFG1 WCD9335_REG(0x0d, 0x008)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG0 WCD9335_REG(0x0d, 0x009)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT4_CFG1 WCD9335_REG(0x0d, 0x00a)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG0 WCD9335_REG(0x0d, 0x00b)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT5_CFG1 WCD9335_REG(0x0d, 0x00c)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG0 WCD9335_REG(0x0d, 0x00d)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT6_CFG1 WCD9335_REG(0x0d, 0x00e)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG0 WCD9335_REG(0x0d, 0x00f)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT7_CFG1 WCD9335_REG(0x0d, 0x010)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG0 WCD9335_REG(0x0d, 0x011)
+#define WCD9335_CDC_RX_INP_MUX_RX_INT8_CFG1 WCD9335_REG(0x0d, 0x012)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG0 WCD9335_REG(0x0d, 0x01d)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX0_CFG1 WCD9335_REG(0x0d, 0x01e)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG0 WCD9335_REG(0x0d, 0x01f)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX1_CFG1 WCD9335_REG(0x0d, 0x020)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG0 WCD9335_REG(0x0d, 0x021)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX2_CFG1 WCD9335_REG(0x0d, 0x022)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG0 WCD9335_REG(0x0d, 0x023)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX3_CFG1 WCD9335_REG(0x0d, 0x024)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX4_CFG0 WCD9335_REG(0x0d, 0x025)
+#define WCD9335_CDC_TX_INP_MUX_SEL_AMIC 0x1
+#define WCD9335_CDC_TX_INP_MUX_SEL_DMIC 0
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX5_CFG0 WCD9335_REG(0x0d, 0x026)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX6_CFG0 WCD9335_REG(0x0d, 0x027)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX7_CFG0 WCD9335_REG(0x0d, 0x028)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX8_CFG0 WCD9335_REG(0x0d, 0x029)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX10_CFG0 WCD9335_REG(0x0d, 0x02b)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX11_CFG0 WCD9335_REG(0x0d, 0x02c)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX12_CFG0 WCD9335_REG(0x0d, 0x02d)
+#define WCD9335_CDC_TX_INP_MUX_ADC_MUX13_CFG0 WCD9335_REG(0x0d, 0x02e)
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0 WCD9335_REG(0x0d, 0x03a)
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1 WCD9335_REG(0x0d, 0x03b)
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2 WCD9335_REG(0x0d, 0x03c)
+#define WCD9335_CDC_IF_ROUTER_TX_MUX_CFG3 WCD9335_REG(0x0d, 0x03d)
+#define WCD9335_CDC_CLK_RST_CTRL_MCLK_CONTROL WCD9335_REG(0x0d, 0x041)
+#define WCD9335_CDC_CLK_RST_CTRL_MCLK_EN_MASK BIT(0)
+#define WCD9335_CDC_CLK_RST_CTRL_MCLK_ENABLE BIT(0)
+#define WCD9335_CDC_CLK_RST_CTRL_MCLK_DISABLE 0
+#define WCD9335_CDC_CLK_RST_CTRL_FS_CNT_CONTROL WCD9335_REG(0x0d, 0x042)
+#define WCD9335_CDC_CLK_RST_CTRL_FS_CNT_EN_MASK BIT(0)
+#define WCD9335_CDC_CLK_RST_CTRL_FS_CNT_ENABLE BIT(0)
+#define WCD9335_CDC_CLK_RST_CTRL_FS_CNT_DISABLE 0
+#define WCD9335_CDC_TOP_TOP_CFG1 WCD9335_REG(0x0d, 0x082)
+#define WCD9335_MAX_REGISTER WCD9335_REG(0x80, 0x0FF)
+
+/* SLIMBUS Slave Registers */
+#define WCD9335_SLIM_PGD_PORT_INT_EN0 WCD9335_REG(0, 0x30)
+#define WCD9335_SLIM_PGD_PORT_INT_STATUS_RX_0 WCD9335_REG(0, 0x34)
+#define WCD9335_SLIM_PGD_PORT_INT_STATUS_RX_1 WCD9335_REG(0, 0x35)
+#define WCD9335_SLIM_PGD_PORT_INT_STATUS_TX_0 WCD9335_REG(0, 0x36)
+#define WCD9335_SLIM_PGD_PORT_INT_STATUS_TX_1 WCD9335_REG(0, 0x37)
+#define WCD9335_SLIM_PGD_PORT_INT_CLR_RX_0 WCD9335_REG(0, 0x38)
+#define WCD9335_SLIM_PGD_PORT_INT_CLR_RX_1 WCD9335_REG(0, 0x39)
+#define WCD9335_SLIM_PGD_PORT_INT_CLR_TX_0 WCD9335_REG(0, 0x3A)
+#define WCD9335_SLIM_PGD_PORT_INT_CLR_TX_1 WCD9335_REG(0, 0x3B)
+#define WCD9335_SLIM_PGD_PORT_INT_RX_SOURCE0 WCD9335_REG(0, 0x60)
+#define WCD9335_SLIM_PGD_PORT_INT_TX_SOURCE0 WCD9335_REG(0, 0x70)
+#define WCD9335_SLIM_PGD_RX_PORT_CFG(p) WCD9335_REG(0, (0x30 + p))
+#define WCD9335_SLIM_PGD_PORT_CFG(p) WCD9335_REG(0, (0x40 + p))
+#define WCD9335_SLIM_PGD_TX_PORT_CFG(p) WCD9335_REG(0, (0x50 + p))
+#define WCD9335_SLIM_PGD_PORT_INT_SRC(p) WCD9335_REG(0, (0x60 + p))
+#define WCD9335_SLIM_PGD_PORT_INT_STATUS(p) WCD9335_REG(0, (0x80 + p))
+#define WCD9335_SLIM_PGD_TX_PORT_MULTI_CHNL_0(p) WCD9335_REG(0, (0x100 + 4 * p))
+/* ports range from 10-16 */
+#define WCD9335_SLIM_PGD_TX_PORT_MULTI_CHNL_1(p) WCD9335_REG(0, (0x101 + 4 * p))
+#define WCD9335_SLIM_PGD_RX_PORT_MULTI_CHNL_0(p) WCD9335_REG(0, (0x140 + 4 * p))
+
+#define WCD9335_IRQ_SLIMBUS 0
+#define WCD9335_IRQ_MBHC_SW_DET 8
+#define WCD9335_IRQ_MBHC_ELECT_INS_REM_DET 9
+#define WCD9335_IRQ_MBHC_BUTTON_PRESS_DET 10
+#define WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET 11
+#define WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET 12
+
+#define SLIM_MANF_ID_QCOM 0x217
+#define SLIM_PROD_CODE_WCD9335 0x1a0
+
+#define WCD9335_VERSION_2_0 2
+#define WCD9335_MAX_SUPPLY 5
+
+#endif /* __WCD9335_H__ */
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 7e817e1877c2..4466e195b66d 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1214,105 +1214,105 @@ SND_SOC_DAPM_PGA("ISRC2DEC2", ARIZONA_ISRC_2_CTRL_3,
SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 1,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 2,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 3,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 4,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 5,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 6,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 7,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 1,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 2,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 3,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 4,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 5,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 6,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 7,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 1,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 1,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 1,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 1,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 1,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 2,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 3,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 4,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 5,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 6,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 7,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 1,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 2,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 3,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 4,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 5,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 6,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 7,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX8_ENA_SHIFT, 0),
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index b0789a03d699..b25877fa529d 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1348,122 +1348,122 @@ SND_SOC_DAPM_MUX("SPKDAT2R ANC Source", SND_SOC_NOPM, 0, 0,
SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 1,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 2,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 3,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 4,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 5,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 6,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 7,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 1,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 2,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 3,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 4,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 5,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 6,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 7,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 1,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX3", NULL, 2,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX4", NULL, 3,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX5", NULL, 4,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX6", NULL, 5,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 1,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX3", NULL, 2,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX4", NULL, 3,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX5", NULL, 4,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX6", NULL, 5,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 1,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 2,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 3,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 4,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 5,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 6,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 7,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 1,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 2,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 3,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 4,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 5,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 6,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 7,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 1,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 1,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 1fedf74da705..546ea735f534 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -196,7 +196,7 @@ static int wm8741_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_component *component = dai->component;
struct wm8741_priv *wm8741 = snd_soc_component_get_drvdata(component);
- unsigned int iface;
+ unsigned int iface, mode;
int i;
/* The set of sample rates that can be supported depends on the
@@ -240,11 +240,21 @@ static int wm8741_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ /* oversampling rate */
+ if (params_rate(params) > 96000)
+ mode = 0x40;
+ else if (params_rate(params) > 48000)
+ mode = 0x20;
+ else
+ mode = 0x00;
+
dev_dbg(component->dev, "wm8741_hw_params: bit size param = %d, rate param = %d",
params_width(params), params_rate(params));
snd_soc_component_update_bits(component, WM8741_FORMAT_CONTROL, WM8741_IWL_MASK,
iface);
+ snd_soc_component_update_bits(component, WM8741_MODE_CONTROL_1, WM8741_OSR_MASK,
+ mode);
return 0;
}
@@ -358,6 +368,15 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
return 0;
}
+static int wm8741_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ struct snd_soc_component *component = codec_dai->component;
+
+ snd_soc_component_update_bits(component, WM8741_VOLUME_CONTROL,
+ WM8741_SOFT_MASK, !!mute << WM8741_SOFT_SHIFT);
+ return 0;
+}
+
#define WM8741_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
@@ -371,6 +390,7 @@ static const struct snd_soc_dai_ops wm8741_dai_ops = {
.hw_params = wm8741_hw_params,
.set_sysclk = wm8741_set_dai_sysclk,
.set_fmt = wm8741_set_dai_fmt,
+ .digital_mute = wm8741_mute,
};
static struct snd_soc_dai_driver wm8741_dai = {
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index 806245c70f8b..37467c512597 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -666,8 +666,9 @@ static int wm8770_spi_probe(struct spi_device *spi)
/* This should really be moved into the regulator core */
for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) {
- ret = regulator_register_notifier(wm8770->supplies[i].consumer,
- &wm8770->disable_nb[i]);
+ ret = devm_regulator_register_notifier(
+ wm8770->supplies[i].consumer,
+ &wm8770->disable_nb[i]);
if (ret) {
dev_err(&spi->dev,
"Failed to register regulator notifier: %d\n",
@@ -687,25 +688,12 @@ static int wm8770_spi_probe(struct spi_device *spi)
return ret;
}
-static int wm8770_spi_remove(struct spi_device *spi)
-{
- struct wm8770_priv *wm8770 = spi_get_drvdata(spi);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i)
- regulator_unregister_notifier(wm8770->supplies[i].consumer,
- &wm8770->disable_nb[i]);
-
- return 0;
-}
-
static struct spi_driver wm8770_spi_driver = {
.driver = {
.name = "wm8770",
.of_match_table = wm8770_of_match,
},
.probe = wm8770_spi_probe,
- .remove = wm8770_spi_remove
};
module_spi_driver(wm8770_spi_driver);
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 2a3e5fbd04e4..9e0f96e0f8ec 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1837,9 +1837,6 @@ static int wm8904_set_bias_level(struct snd_soc_component *component,
switch (level) {
case SND_SOC_BIAS_ON:
- ret = clk_prepare_enable(wm8904->mclk);
- if (ret)
- return ret;
break;
case SND_SOC_BIAS_PREPARE:
@@ -1864,6 +1861,15 @@ static int wm8904_set_bias_level(struct snd_soc_component *component,
return ret;
}
+ ret = clk_prepare_enable(wm8904->mclk);
+ if (ret) {
+ dev_err(component->dev,
+ "Failed to enable MCLK: %d\n", ret);
+ regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies),
+ wm8904->supplies);
+ return ret;
+ }
+
regcache_cache_only(wm8904->regmap, false);
regcache_sync(wm8904->regmap);
@@ -2108,16 +2114,13 @@ static const struct regmap_config wm8904_regmap = {
};
#ifdef CONFIG_OF
-static enum wm8904_type wm8904_data = WM8904;
-static enum wm8904_type wm8912_data = WM8912;
-
static const struct of_device_id wm8904_of_match[] = {
{
.compatible = "wlf,wm8904",
- .data = &wm8904_data,
+ .data = (void *)WM8904,
}, {
.compatible = "wlf,wm8912",
- .data = &wm8912_data,
+ .data = (void *)WM8912,
}, {
/* sentinel */
}
@@ -2158,7 +2161,7 @@ static int wm8904_i2c_probe(struct i2c_client *i2c,
match = of_match_node(wm8904_of_match, i2c->dev.of_node);
if (match == NULL)
return -EINVAL;
- wm8904->devtype = *((enum wm8904_type *)match->data);
+ wm8904->devtype = (enum wm8904_type)match->data;
} else {
wm8904->devtype = id->driver_data;
}
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index efd8910b1ff7..467ed78dd2df 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3424,8 +3424,9 @@ static int wm8962_probe(struct snd_soc_component *component)
/* This should really be moved into the regulator core */
for (i = 0; i < ARRAY_SIZE(wm8962->supplies); i++) {
- ret = regulator_register_notifier(wm8962->supplies[i].consumer,
- &wm8962->disable_nb[i]);
+ ret = devm_regulator_register_notifier(
+ wm8962->supplies[i].consumer,
+ &wm8962->disable_nb[i]);
if (ret != 0) {
dev_err(component->dev,
"Failed to register regulator notifier: %d\n",
@@ -3467,15 +3468,11 @@ static int wm8962_probe(struct snd_soc_component *component)
static void wm8962_remove(struct snd_soc_component *component)
{
struct wm8962_priv *wm8962 = snd_soc_component_get_drvdata(component);
- int i;
cancel_delayed_work_sync(&wm8962->mic_work);
wm8962_free_gpio(component);
wm8962_free_beep(component);
- for (i = 0; i < ARRAY_SIZE(wm8962->supplies); i++)
- regulator_unregister_notifier(wm8962->supplies[i].consumer,
- &wm8962->disable_nb[i]);
}
static const struct snd_soc_component_driver soc_component_dev_wm8962 = {
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 68c99fe37097..79ee91906bb9 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -1995,20 +1995,6 @@ static int wm8995_set_bias_level(struct snd_soc_component *component,
return 0;
}
-static void wm8995_remove(struct snd_soc_component *component)
-{
- struct wm8995_priv *wm8995;
- int i;
-
- wm8995 = snd_soc_component_get_drvdata(component);
-
- for (i = 0; i < ARRAY_SIZE(wm8995->supplies); ++i)
- regulator_unregister_notifier(wm8995->supplies[i].consumer,
- &wm8995->disable_nb[i]);
-
- regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies);
-}
-
static int wm8995_probe(struct snd_soc_component *component)
{
struct wm8995_priv *wm8995;
@@ -2021,8 +2007,9 @@ static int wm8995_probe(struct snd_soc_component *component)
for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++)
wm8995->supplies[i].supply = wm8995_supply_names[i];
- ret = regulator_bulk_get(component->dev, ARRAY_SIZE(wm8995->supplies),
- wm8995->supplies);
+ ret = devm_regulator_bulk_get(component->dev,
+ ARRAY_SIZE(wm8995->supplies),
+ wm8995->supplies);
if (ret) {
dev_err(component->dev, "Failed to request supplies: %d\n", ret);
return ret;
@@ -2039,8 +2026,9 @@ static int wm8995_probe(struct snd_soc_component *component)
/* This should really be moved into the regulator core */
for (i = 0; i < ARRAY_SIZE(wm8995->supplies); i++) {
- ret = regulator_register_notifier(wm8995->supplies[i].consumer,
- &wm8995->disable_nb[i]);
+ ret = devm_regulator_register_notifier(
+ wm8995->supplies[i].consumer,
+ &wm8995->disable_nb[i]);
if (ret) {
dev_err(component->dev,
"Failed to register regulator notifier: %d\n",
@@ -2052,7 +2040,7 @@ static int wm8995_probe(struct snd_soc_component *component)
wm8995->supplies);
if (ret) {
dev_err(component->dev, "Failed to enable supplies: %d\n", ret);
- goto err_reg_get;
+ return ret;
}
ret = snd_soc_component_read32(component, WM8995_SOFTWARE_RESET);
@@ -2099,8 +2087,6 @@ static int wm8995_probe(struct snd_soc_component *component)
err_reg_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8995->supplies), wm8995->supplies);
-err_reg_get:
- regulator_bulk_free(ARRAY_SIZE(wm8995->supplies), wm8995->supplies);
return ret;
}
@@ -2188,7 +2174,6 @@ static struct snd_soc_dai_driver wm8995_dai[] = {
static const struct snd_soc_component_driver soc_component_dev_wm8995 = {
.probe = wm8995_probe,
- .remove = wm8995_remove,
.set_bias_level = wm8995_set_bias_level,
.controls = wm8995_snd_controls,
.num_controls = ARRAY_SIZE(wm8995_snd_controls),
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 91711f8958c5..ab04ea18c312 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -2801,8 +2801,9 @@ static int wm8996_i2c_probe(struct i2c_client *i2c,
/* This should really be moved into the regulator core */
for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) {
- ret = regulator_register_notifier(wm8996->supplies[i].consumer,
- &wm8996->disable_nb[i]);
+ ret = devm_regulator_register_notifier(
+ wm8996->supplies[i].consumer,
+ &wm8996->disable_nb[i]);
if (ret != 0) {
dev_err(&i2c->dev,
"Failed to register regulator notifier: %d\n",
@@ -3071,16 +3072,12 @@ err:
static int wm8996_i2c_remove(struct i2c_client *client)
{
struct wm8996_priv *wm8996 = i2c_get_clientdata(client);
- int i;
wm8996_free_gpio(wm8996);
if (wm8996->pdata.ldo_ena > 0) {
gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0);
gpio_free(wm8996->pdata.ldo_ena);
}
- for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
- regulator_unregister_notifier(wm8996->supplies[i].consumer,
- &wm8996->disable_nb[i]);
return 0;
}
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index df5b36b8fc5a..33e3dc1a1367 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -516,95 +516,95 @@ SND_SOC_DAPM_PGA("ISRC2DEC2", ARIZONA_ISRC_2_CTRL_3,
SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 1,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 2,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 3,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 4,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 5,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 6,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 7,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 1,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 2,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 3,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 4,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 5,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 6,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 7,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 1,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 1,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 1,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 2,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 3,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 4,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 5,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 6,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 7,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 1,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 2,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 3,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 4,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 5,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX6_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 6,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX7_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 7,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX8_ENA_SHIFT, 0),
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 409bed30a4e4..125fc32ad92a 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -626,96 +626,96 @@ SND_SOC_DAPM_MUX("AEC2 Loopback", ARIZONA_DAC_AEC_CONTROL_2,
SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 1,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 2,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 3,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 4,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 5,
ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 1,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 2,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 3,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 4,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 5,
ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 1,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX3", NULL, 2,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX4", NULL, 3,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX5", NULL, 4,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF2TX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF2TX6", NULL, 5,
ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 1,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX3", NULL, 2,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX4", NULL, 3,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX5", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX5", NULL, 4,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF2RX6", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF2RX6", NULL, 5,
ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 1,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 2,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 0,
+SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 3,
ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
ARIZONA_SLIMRX4_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 1,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX2_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 2,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX3_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 3,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX4_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 4,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX5_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 5,
ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
ARIZONA_SLIMTX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0,
+SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 1,
ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0),
-SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
+SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 1,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 1dd291cebe67..b93fdc8d2d6f 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -46,6 +46,13 @@
#define adsp_dbg(_dsp, fmt, ...) \
dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define compr_err(_obj, fmt, ...) \
+ adsp_err(_obj->dsp, "%s: " fmt, _obj->name ? _obj->name : "legacy", \
+ ##__VA_ARGS__)
+#define compr_dbg(_obj, fmt, ...) \
+ adsp_dbg(_obj->dsp, "%s: " fmt, _obj->name ? _obj->name : "legacy", \
+ ##__VA_ARGS__)
+
#define ADSP1_CONTROL_1 0x00
#define ADSP1_CONTROL_2 0x02
#define ADSP1_CONTROL_3 0x03
@@ -310,6 +317,12 @@ struct wm_adsp_alg_xm_struct {
__be64 smoothed_power;
};
+struct wm_adsp_host_buf_coeff_v1 {
+ __be32 host_buf_ptr; /* Host buffer pointer */
+ __be32 versions; /* Version numbers */
+ __be32 name[4]; /* The buffer name */
+};
+
struct wm_adsp_buffer {
__be32 buf1_base; /* Base addr of first buffer area */
__be32 buf1_size; /* Size of buf1 area in DSP words */
@@ -334,6 +347,7 @@ struct wm_adsp_buffer {
struct wm_adsp_compr;
struct wm_adsp_compr_buf {
+ struct list_head list;
struct wm_adsp *dsp;
struct wm_adsp_compr *compr;
@@ -344,9 +358,13 @@ struct wm_adsp_compr_buf {
u32 irq_count;
int read_index;
int avail;
+ int host_buf_mem_type;
+
+ char *name;
};
struct wm_adsp_compr {
+ struct list_head list;
struct wm_adsp *dsp;
struct wm_adsp_compr_buf *buf;
@@ -357,6 +375,8 @@ struct wm_adsp_compr {
unsigned int copied_total;
unsigned int sample_rate;
+
+ const char *name;
};
#define WM_ADSP_DATA_WORD_SIZE 3
@@ -374,6 +394,11 @@ struct wm_adsp_compr {
#define ALG_XM_FIELD(field) \
(offsetof(struct wm_adsp_alg_xm_struct, field) / sizeof(__be32))
+#define HOST_BUF_COEFF_SUPPORTED_COMPAT_VER 1
+
+#define HOST_BUF_COEFF_COMPAT_VER_MASK 0xFF00
+#define HOST_BUF_COEFF_COMPAT_VER_SHIFT 8
+
static int wm_adsp_buffer_init(struct wm_adsp *dsp);
static int wm_adsp_buffer_free(struct wm_adsp *dsp);
@@ -707,7 +732,7 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
mutex_lock(&dsp[e->shift_l].pwr_lock);
- if (dsp[e->shift_l].booted || dsp[e->shift_l].compr)
+ if (dsp[e->shift_l].booted || !list_empty(&dsp[e->shift_l].compr_list))
ret = -EBUSY;
else
dsp[e->shift_l].fw = ucontrol->value.enumerated.item[0];
@@ -2429,6 +2454,8 @@ static int wm_adsp_common_init(struct wm_adsp *dsp)
INIT_LIST_HEAD(&dsp->alg_regions);
INIT_LIST_HEAD(&dsp->ctl_list);
+ INIT_LIST_HEAD(&dsp->compr_list);
+ INIT_LIST_HEAD(&dsp->buffer_list);
mutex_init(&dsp->pwr_lock);
@@ -2971,14 +2998,19 @@ static inline int wm_adsp_compr_attached(struct wm_adsp_compr *compr)
static int wm_adsp_compr_attach(struct wm_adsp_compr *compr)
{
- /*
- * Note this will be more complex once each DSP can support multiple
- * streams
- */
- if (!compr->dsp->buffer)
+ struct wm_adsp_compr_buf *buf = NULL, *tmp;
+
+ list_for_each_entry(tmp, &compr->dsp->buffer_list, list) {
+ if (!tmp->name || !strcmp(compr->name, tmp->name)) {
+ buf = tmp;
+ break;
+ }
+ }
+
+ if (!buf)
return -EINVAL;
- compr->buf = compr->dsp->buffer;
+ compr->buf = buf;
compr->buf->compr = compr;
return 0;
@@ -3001,28 +3033,33 @@ static void wm_adsp_compr_detach(struct wm_adsp_compr *compr)
int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream)
{
- struct wm_adsp_compr *compr;
+ struct wm_adsp_compr *compr, *tmp;
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
int ret = 0;
mutex_lock(&dsp->pwr_lock);
if (wm_adsp_fw[dsp->fw].num_caps == 0) {
- adsp_err(dsp, "Firmware does not support compressed API\n");
+ adsp_err(dsp, "%s: Firmware does not support compressed API\n",
+ rtd->codec_dai->name);
ret = -ENXIO;
goto out;
}
if (wm_adsp_fw[dsp->fw].compr_direction != stream->direction) {
- adsp_err(dsp, "Firmware does not support stream direction\n");
+ adsp_err(dsp, "%s: Firmware does not support stream direction\n",
+ rtd->codec_dai->name);
ret = -EINVAL;
goto out;
}
- if (dsp->compr) {
- /* It is expect this limitation will be removed in future */
- adsp_err(dsp, "Only a single stream supported per DSP\n");
- ret = -EBUSY;
- goto out;
+ list_for_each_entry(tmp, &dsp->compr_list, list) {
+ if (!strcmp(tmp->name, rtd->codec_dai->name)) {
+ adsp_err(dsp, "%s: Only a single stream supported per dai\n",
+ rtd->codec_dai->name);
+ ret = -EBUSY;
+ goto out;
+ }
}
compr = kzalloc(sizeof(*compr), GFP_KERNEL);
@@ -3033,8 +3070,9 @@ int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream)
compr->dsp = dsp;
compr->stream = stream;
+ compr->name = rtd->codec_dai->name;
- dsp->compr = compr;
+ list_add_tail(&compr->list, &dsp->compr_list);
stream->runtime->private_data = compr;
@@ -3053,7 +3091,7 @@ int wm_adsp_compr_free(struct snd_compr_stream *stream)
mutex_lock(&dsp->pwr_lock);
wm_adsp_compr_detach(compr);
- dsp->compr = NULL;
+ list_del(&compr->list);
kfree(compr->raw_buf);
kfree(compr);
@@ -3078,9 +3116,9 @@ static int wm_adsp_compr_check_params(struct snd_compr_stream *stream,
params->buffer.fragments < WM_ADSP_MIN_FRAGMENTS ||
params->buffer.fragments > WM_ADSP_MAX_FRAGMENTS ||
params->buffer.fragment_size % WM_ADSP_DATA_WORD_SIZE) {
- adsp_err(dsp, "Invalid buffer fragsize=%d fragments=%d\n",
- params->buffer.fragment_size,
- params->buffer.fragments);
+ compr_err(compr, "Invalid buffer fragsize=%d fragments=%d\n",
+ params->buffer.fragment_size,
+ params->buffer.fragments);
return -EINVAL;
}
@@ -3108,9 +3146,9 @@ static int wm_adsp_compr_check_params(struct snd_compr_stream *stream,
return 0;
}
- adsp_err(dsp, "Invalid params id=%u ch=%u,%u rate=%u fmt=%u\n",
- params->codec.id, params->codec.ch_in, params->codec.ch_out,
- params->codec.sample_rate, params->codec.format);
+ compr_err(compr, "Invalid params id=%u ch=%u,%u rate=%u fmt=%u\n",
+ params->codec.id, params->codec.ch_in, params->codec.ch_out,
+ params->codec.sample_rate, params->codec.format);
return -EINVAL;
}
@@ -3132,8 +3170,8 @@ int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
compr->size = params->buffer;
- adsp_dbg(compr->dsp, "fragment_size=%d fragments=%d\n",
- compr->size.fragment_size, compr->size.fragments);
+ compr_dbg(compr, "fragment_size=%d fragments=%d\n",
+ compr->size.fragment_size, compr->size.fragments);
size = wm_adsp_compr_frag_words(compr) * sizeof(*compr->raw_buf);
compr->raw_buf = kmalloc(size, GFP_DMA | GFP_KERNEL);
@@ -3219,24 +3257,106 @@ static int wm_adsp_write_data_word(struct wm_adsp *dsp, int mem_type,
static inline int wm_adsp_buffer_read(struct wm_adsp_compr_buf *buf,
unsigned int field_offset, u32 *data)
{
- return wm_adsp_read_data_word(buf->dsp, WMFW_ADSP2_XM,
+ return wm_adsp_read_data_word(buf->dsp, buf->host_buf_mem_type,
buf->host_buf_ptr + field_offset, data);
}
static inline int wm_adsp_buffer_write(struct wm_adsp_compr_buf *buf,
unsigned int field_offset, u32 data)
{
- return wm_adsp_write_data_word(buf->dsp, WMFW_ADSP2_XM,
+ return wm_adsp_write_data_word(buf->dsp, buf->host_buf_mem_type,
buf->host_buf_ptr + field_offset, data);
}
-static int wm_adsp_legacy_host_buf_addr(struct wm_adsp_compr_buf *buf)
+static void wm_adsp_remove_padding(u32 *buf, int nwords, int data_word_size)
+{
+ u8 *pack_in = (u8 *)buf;
+ u8 *pack_out = (u8 *)buf;
+ int i, j;
+
+ /* Remove the padding bytes from the data read from the DSP */
+ for (i = 0; i < nwords; i++) {
+ for (j = 0; j < data_word_size; j++)
+ *pack_out++ = *pack_in++;
+
+ pack_in += sizeof(*buf) - data_word_size;
+ }
+}
+
+static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+{
+ const struct wm_adsp_fw_caps *caps = wm_adsp_fw[buf->dsp->fw].caps;
+ struct wm_adsp_buffer_region *region;
+ u32 offset = 0;
+ int i, ret;
+
+ buf->regions = kcalloc(caps->num_regions, sizeof(*buf->regions),
+ GFP_KERNEL);
+ if (!buf->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < caps->num_regions; ++i) {
+ region = &buf->regions[i];
+
+ region->offset = offset;
+ region->mem_type = caps->region_defs[i].mem_type;
+
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
+ &region->base_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
+ &offset);
+ if (ret < 0)
+ return ret;
+
+ region->cumulative_size = offset;
+
+ compr_dbg(buf,
+ "region=%d type=%d base=%08x off=%08x size=%08x\n",
+ i, region->mem_type, region->base_addr,
+ region->offset, region->cumulative_size);
+ }
+
+ return 0;
+}
+
+static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
+{
+ buf->irq_count = 0xFFFFFFFF;
+ buf->read_index = -1;
+ buf->avail = 0;
+}
+
+static struct wm_adsp_compr_buf *wm_adsp_buffer_alloc(struct wm_adsp *dsp)
+{
+ struct wm_adsp_compr_buf *buf;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->dsp = dsp;
+
+ wm_adsp_buffer_clear(buf);
+
+ list_add_tail(&buf->list, &dsp->buffer_list);
+
+ return buf;
+}
+
+static int wm_adsp_buffer_parse_legacy(struct wm_adsp *dsp)
{
struct wm_adsp_alg_region *alg_region;
- struct wm_adsp *dsp = buf->dsp;
+ struct wm_adsp_compr_buf *buf;
u32 xmalg, addr, magic;
int i, ret;
+ buf = wm_adsp_buffer_alloc(dsp);
+ if (!buf)
+ return -ENOMEM;
+
alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
xmalg = sizeof(struct wm_adsp_system_config_xm_hdr) / sizeof(__be32);
@@ -3246,7 +3366,7 @@ static int wm_adsp_legacy_host_buf_addr(struct wm_adsp_compr_buf *buf)
return ret;
if (magic != WM_ADSP_ALG_XM_STRUCT_MAGIC)
- return -EINVAL;
+ return -ENODEV;
addr = alg_region->base + xmalg + ALG_XM_FIELD(host_buf_ptr);
for (i = 0; i < 5; ++i) {
@@ -3264,48 +3384,30 @@ static int wm_adsp_legacy_host_buf_addr(struct wm_adsp_compr_buf *buf)
if (!buf->host_buf_ptr)
return -EIO;
- adsp_dbg(dsp, "host_buf_ptr=%x\n", buf->host_buf_ptr);
-
- return 0;
-}
-
-static struct wm_coeff_ctl *
-wm_adsp_find_host_buffer_ctrl(struct wm_adsp_compr_buf *buf)
-{
- struct wm_adsp *dsp = buf->dsp;
- struct wm_coeff_ctl *ctl;
-
- list_for_each_entry(ctl, &dsp->ctl_list, list) {
- if (ctl->type != WMFW_CTL_TYPE_HOST_BUFFER)
- continue;
+ buf->host_buf_mem_type = WMFW_ADSP2_XM;
- if (!ctl->enabled)
- continue;
+ ret = wm_adsp_buffer_populate(buf);
+ if (ret < 0)
+ return ret;
- return ctl;
- }
+ compr_dbg(buf, "legacy host_buf_ptr=%x\n", buf->host_buf_ptr);
- return NULL;
+ return 0;
}
-static int wm_adsp_buffer_locate(struct wm_adsp_compr_buf *buf)
+static int wm_adsp_buffer_parse_coeff(struct wm_coeff_ctl *ctl)
{
- struct wm_adsp *dsp = buf->dsp;
- struct wm_coeff_ctl *ctl;
- unsigned int reg;
- u32 val;
- int i, ret;
-
- ctl = wm_adsp_find_host_buffer_ctrl(buf);
- if (!ctl)
- return wm_adsp_legacy_host_buf_addr(buf);
+ struct wm_adsp_host_buf_coeff_v1 coeff_v1;
+ struct wm_adsp_compr_buf *buf;
+ unsigned int val, reg;
+ int ret, i;
ret = wm_coeff_base_reg(ctl, &reg);
if (ret)
return ret;
for (i = 0; i < 5; ++i) {
- ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ ret = regmap_raw_read(ctl->dsp->regmap, reg, &val, sizeof(val));
if (ret < 0)
return ret;
@@ -3315,108 +3417,130 @@ static int wm_adsp_buffer_locate(struct wm_adsp_compr_buf *buf)
usleep_range(1000, 2000);
}
- if (!val)
+ if (!val) {
+ adsp_err(ctl->dsp, "Failed to acquire host buffer\n");
return -EIO;
+ }
+
+ buf = wm_adsp_buffer_alloc(ctl->dsp);
+ if (!buf)
+ return -ENOMEM;
+ buf->host_buf_mem_type = ctl->alg_region.type;
buf->host_buf_ptr = be32_to_cpu(val);
- adsp_dbg(dsp, "host_buf_ptr=%x\n", buf->host_buf_ptr);
- return 0;
-}
+ ret = wm_adsp_buffer_populate(buf);
+ if (ret < 0)
+ return ret;
-static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
-{
- const struct wm_adsp_fw_caps *caps = wm_adsp_fw[buf->dsp->fw].caps;
- struct wm_adsp_buffer_region *region;
- u32 offset = 0;
- int i, ret;
+ /*
+ * v0 host_buffer coefficients didn't have versioning, so if the
+ * control is one word, assume version 0.
+ */
+ if (ctl->len == 4) {
+ compr_dbg(buf, "host_buf_ptr=%x\n", buf->host_buf_ptr);
+ return 0;
+ }
- for (i = 0; i < caps->num_regions; ++i) {
- region = &buf->regions[i];
+ ret = regmap_raw_read(ctl->dsp->regmap, reg, &coeff_v1,
+ sizeof(coeff_v1));
+ if (ret < 0)
+ return ret;
- region->offset = offset;
- region->mem_type = caps->region_defs[i].mem_type;
+ coeff_v1.versions = be32_to_cpu(coeff_v1.versions);
+ val = coeff_v1.versions & HOST_BUF_COEFF_COMPAT_VER_MASK;
+ val >>= HOST_BUF_COEFF_COMPAT_VER_SHIFT;
- ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
- &region->base_addr);
- if (ret < 0)
- return ret;
+ if (val > HOST_BUF_COEFF_SUPPORTED_COMPAT_VER) {
+ adsp_err(ctl->dsp,
+ "Host buffer coeff ver %u > supported version %u\n",
+ val, HOST_BUF_COEFF_SUPPORTED_COMPAT_VER);
+ return -EINVAL;
+ }
- ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
- &offset);
- if (ret < 0)
- return ret;
+ for (i = 0; i < ARRAY_SIZE(coeff_v1.name); i++)
+ coeff_v1.name[i] = be32_to_cpu(coeff_v1.name[i]);
- region->cumulative_size = offset;
+ wm_adsp_remove_padding((u32 *)&coeff_v1.name,
+ ARRAY_SIZE(coeff_v1.name),
+ WM_ADSP_DATA_WORD_SIZE);
- adsp_dbg(buf->dsp,
- "region=%d type=%d base=%08x off=%08x size=%08x\n",
- i, region->mem_type, region->base_addr,
- region->offset, region->cumulative_size);
- }
+ buf->name = kasprintf(GFP_KERNEL, "%s-dsp-%s", ctl->dsp->part,
+ (char *)&coeff_v1.name);
- return 0;
-}
+ compr_dbg(buf, "host_buf_ptr=%x coeff version %u\n",
+ buf->host_buf_ptr, val);
-static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
-{
- buf->irq_count = 0xFFFFFFFF;
- buf->read_index = -1;
- buf->avail = 0;
+ return val;
}
static int wm_adsp_buffer_init(struct wm_adsp *dsp)
{
- struct wm_adsp_compr_buf *buf;
+ struct wm_coeff_ctl *ctl;
int ret;
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- buf->dsp = dsp;
-
- wm_adsp_buffer_clear(buf);
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->type != WMFW_CTL_TYPE_HOST_BUFFER)
+ continue;
- ret = wm_adsp_buffer_locate(buf);
- if (ret < 0) {
- adsp_err(dsp, "Failed to acquire host buffer: %d\n", ret);
- goto err_buffer;
- }
+ if (!ctl->enabled)
+ continue;
- buf->regions = kcalloc(wm_adsp_fw[dsp->fw].caps->num_regions,
- sizeof(*buf->regions), GFP_KERNEL);
- if (!buf->regions) {
- ret = -ENOMEM;
- goto err_buffer;
+ ret = wm_adsp_buffer_parse_coeff(ctl);
+ if (ret < 0) {
+ adsp_err(dsp, "Failed to parse coeff: %d\n", ret);
+ goto error;
+ } else if (ret == 0) {
+ /* Only one buffer supported for version 0 */
+ return 0;
+ }
}
- ret = wm_adsp_buffer_populate(buf);
- if (ret < 0) {
- adsp_err(dsp, "Failed to populate host buffer: %d\n", ret);
- goto err_regions;
+ if (list_empty(&dsp->buffer_list)) {
+ /* Fall back to legacy support */
+ ret = wm_adsp_buffer_parse_legacy(dsp);
+ if (ret) {
+ adsp_err(dsp, "Failed to parse legacy: %d\n", ret);
+ goto error;
+ }
}
- dsp->buffer = buf;
-
return 0;
-err_regions:
- kfree(buf->regions);
-err_buffer:
- kfree(buf);
+error:
+ wm_adsp_buffer_free(dsp);
return ret;
}
static int wm_adsp_buffer_free(struct wm_adsp *dsp)
{
- if (dsp->buffer) {
- wm_adsp_compr_detach(dsp->buffer->compr);
+ struct wm_adsp_compr_buf *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dsp->buffer_list, list) {
+ if (buf->compr)
+ wm_adsp_compr_detach(buf->compr);
+
+ kfree(buf->name);
+ kfree(buf->regions);
+ list_del(&buf->list);
+ kfree(buf);
+ }
- kfree(dsp->buffer->regions);
- kfree(dsp->buffer);
+ return 0;
+}
+
+static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
+{
+ int ret;
- dsp->buffer = NULL;
+ ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
+ if (ret < 0) {
+ adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret);
+ return ret;
+ }
+ if (buf->error != 0) {
+ adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error);
+ return -EIO;
}
return 0;
@@ -3428,7 +3552,7 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
struct wm_adsp *dsp = compr->dsp;
int ret = 0;
- adsp_dbg(dsp, "Trigger: %d\n", cmd);
+ compr_dbg(compr, "Trigger: %d\n", cmd);
mutex_lock(&dsp->pwr_lock);
@@ -3437,12 +3561,16 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
if (!wm_adsp_compr_attached(compr)) {
ret = wm_adsp_compr_attach(compr);
if (ret < 0) {
- adsp_err(dsp, "Failed to link buffer and stream: %d\n",
- ret);
+ compr_err(compr, "Failed to link buffer and stream: %d\n",
+ ret);
break;
}
}
+ ret = wm_adsp_buffer_get_error(compr->buf);
+ if (ret < 0)
+ break;
+
wm_adsp_buffer_clear(compr->buf);
/* Trigger the IRQ at one fragment of data */
@@ -3450,8 +3578,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
HOST_BUFFER_FIELD(high_water_mark),
wm_adsp_compr_frag_words(compr));
if (ret < 0) {
- adsp_err(dsp, "Failed to set high water mark: %d\n",
- ret);
+ compr_err(compr, "Failed to set high water mark: %d\n",
+ ret);
break;
}
break;
@@ -3492,7 +3620,7 @@ static int wm_adsp_buffer_update_avail(struct wm_adsp_compr_buf *buf)
read_index = sign_extend32(next_read_index, 23);
if (read_index < 0) {
- adsp_dbg(buf->dsp, "Avail check on unstarted stream\n");
+ compr_dbg(buf, "Avail check on unstarted stream\n");
return 0;
}
@@ -3510,31 +3638,14 @@ static int wm_adsp_buffer_update_avail(struct wm_adsp_compr_buf *buf)
if (avail < 0)
avail += wm_adsp_buffer_size(buf);
- adsp_dbg(buf->dsp, "readindex=0x%x, writeindex=0x%x, avail=%d\n",
- buf->read_index, write_index, avail * WM_ADSP_DATA_WORD_SIZE);
+ compr_dbg(buf, "readindex=0x%x, writeindex=0x%x, avail=%d\n",
+ buf->read_index, write_index, avail * WM_ADSP_DATA_WORD_SIZE);
buf->avail = avail;
return 0;
}
-static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
-{
- int ret;
-
- ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
- if (ret < 0) {
- adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret);
- return ret;
- }
- if (buf->error != 0) {
- adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error);
- return -EIO;
- }
-
- return 0;
-}
-
int wm_adsp_compr_handle_irq(struct wm_adsp *dsp)
{
struct wm_adsp_compr_buf *buf;
@@ -3543,39 +3654,40 @@ int wm_adsp_compr_handle_irq(struct wm_adsp *dsp)
mutex_lock(&dsp->pwr_lock);
- buf = dsp->buffer;
- compr = dsp->compr;
-
- if (!buf) {
+ if (list_empty(&dsp->buffer_list)) {
ret = -ENODEV;
goto out;
}
adsp_dbg(dsp, "Handling buffer IRQ\n");
- ret = wm_adsp_buffer_get_error(buf);
- if (ret < 0)
- goto out_notify; /* Wake poll to report error */
+ list_for_each_entry(buf, &dsp->buffer_list, list) {
+ compr = buf->compr;
- ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(irq_count),
- &buf->irq_count);
- if (ret < 0) {
- adsp_err(dsp, "Failed to get irq_count: %d\n", ret);
- goto out;
- }
+ ret = wm_adsp_buffer_get_error(buf);
+ if (ret < 0)
+ goto out_notify; /* Wake poll to report error */
- ret = wm_adsp_buffer_update_avail(buf);
- if (ret < 0) {
- adsp_err(dsp, "Error reading avail: %d\n", ret);
- goto out;
- }
+ ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(irq_count),
+ &buf->irq_count);
+ if (ret < 0) {
+ compr_err(buf, "Failed to get irq_count: %d\n", ret);
+ goto out;
+ }
- if (wm_adsp_fw[dsp->fw].voice_trigger && buf->irq_count == 2)
- ret = WM_ADSP_COMPR_VOICE_TRIGGER;
+ ret = wm_adsp_buffer_update_avail(buf);
+ if (ret < 0) {
+ compr_err(buf, "Error reading avail: %d\n", ret);
+ goto out;
+ }
+
+ if (wm_adsp_fw[dsp->fw].voice_trigger && buf->irq_count == 2)
+ ret = WM_ADSP_COMPR_VOICE_TRIGGER;
out_notify:
- if (compr && compr->stream)
- snd_compr_fragment_elapsed(compr->stream);
+ if (compr && compr->stream)
+ snd_compr_fragment_elapsed(compr->stream);
+ }
out:
mutex_unlock(&dsp->pwr_lock);
@@ -3589,8 +3701,7 @@ static int wm_adsp_buffer_reenable_irq(struct wm_adsp_compr_buf *buf)
if (buf->irq_count & 0x01)
return 0;
- adsp_dbg(buf->dsp, "Enable IRQ(0x%x) for next fragment\n",
- buf->irq_count);
+ compr_dbg(buf, "Enable IRQ(0x%x) for next fragment\n", buf->irq_count);
buf->irq_count |= 0x01;
@@ -3606,7 +3717,7 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
struct wm_adsp_compr_buf *buf;
int ret = 0;
- adsp_dbg(dsp, "Pointer request\n");
+ compr_dbg(compr, "Pointer request\n");
mutex_lock(&dsp->pwr_lock);
@@ -3621,7 +3732,7 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
if (buf->avail < wm_adsp_compr_frag_words(compr)) {
ret = wm_adsp_buffer_update_avail(buf);
if (ret < 0) {
- adsp_err(dsp, "Error reading avail: %d\n", ret);
+ compr_err(compr, "Error reading avail: %d\n", ret);
goto out;
}
@@ -3640,9 +3751,8 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
ret = wm_adsp_buffer_reenable_irq(buf);
if (ret < 0) {
- adsp_err(dsp,
- "Failed to re-enable buffer IRQ: %d\n",
- ret);
+ compr_err(compr, "Failed to re-enable buffer IRQ: %d\n",
+ ret);
goto out;
}
}
@@ -3662,11 +3772,9 @@ EXPORT_SYMBOL_GPL(wm_adsp_compr_pointer);
static int wm_adsp_buffer_capture_block(struct wm_adsp_compr *compr, int target)
{
struct wm_adsp_compr_buf *buf = compr->buf;
- u8 *pack_in = (u8 *)compr->raw_buf;
- u8 *pack_out = (u8 *)compr->raw_buf;
unsigned int adsp_addr;
int mem_type, nwords, max_read;
- int i, j, ret;
+ int i, ret;
/* Calculate read parameters */
for (i = 0; i < wm_adsp_fw[buf->dsp->fw].caps->num_regions; ++i)
@@ -3698,13 +3806,7 @@ static int wm_adsp_buffer_capture_block(struct wm_adsp_compr *compr, int target)
if (ret < 0)
return ret;
- /* Remove the padding bytes from the data read from the DSP */
- for (i = 0; i < nwords; i++) {
- for (j = 0; j < WM_ADSP_DATA_WORD_SIZE; j++)
- *pack_out++ = *pack_in++;
-
- pack_in += sizeof(*(compr->raw_buf)) - WM_ADSP_DATA_WORD_SIZE;
- }
+ wm_adsp_remove_padding(compr->raw_buf, nwords, WM_ADSP_DATA_WORD_SIZE);
/* update read index to account for words read */
buf->read_index += nwords;
@@ -3725,11 +3827,10 @@ static int wm_adsp_buffer_capture_block(struct wm_adsp_compr *compr, int target)
static int wm_adsp_compr_read(struct wm_adsp_compr *compr,
char __user *buf, size_t count)
{
- struct wm_adsp *dsp = compr->dsp;
int ntotal = 0;
int nwords, nbytes;
- adsp_dbg(dsp, "Requested read of %zu bytes\n", count);
+ compr_dbg(compr, "Requested read of %zu bytes\n", count);
if (!compr->buf || compr->buf->error) {
snd_compr_stop_error(compr->stream, SNDRV_PCM_STATE_XRUN);
@@ -3741,17 +3842,18 @@ static int wm_adsp_compr_read(struct wm_adsp_compr *compr,
do {
nwords = wm_adsp_buffer_capture_block(compr, count);
if (nwords < 0) {
- adsp_err(dsp, "Failed to capture block: %d\n", nwords);
+ compr_err(compr, "Failed to capture block: %d\n",
+ nwords);
return nwords;
}
nbytes = nwords * WM_ADSP_DATA_WORD_SIZE;
- adsp_dbg(dsp, "Read %d bytes\n", nbytes);
+ compr_dbg(compr, "Read %d bytes\n", nbytes);
if (copy_to_user(buf + ntotal, compr->raw_buf, nbytes)) {
- adsp_err(dsp, "Failed to copy data to user: %d, %d\n",
- ntotal, nbytes);
+ compr_err(compr, "Failed to copy data to user: %d, %d\n",
+ ntotal, nbytes);
return -EFAULT;
}
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 4b8778b0b06c..59e07ad16329 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -90,8 +90,8 @@ struct wm_adsp {
struct work_struct boot_work;
- struct wm_adsp_compr *compr;
- struct wm_adsp_compr_buf *buffer;
+ struct list_head compr_list;
+ struct list_head buffer_list;
struct mutex pwr_lock;
diff --git a/sound/soc/dwc/dwc-pcm.c b/sound/soc/dwc/dwc-pcm.c
index 2cc9632024fc..a9ae91c4597f 100644
--- a/sound/soc/dwc/dwc-pcm.c
+++ b/sound/soc/dwc/dwc-pcm.c
@@ -249,9 +249,10 @@ static int dw_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
size_t size = dw_pcm_hardware.buffer_bytes_max;
- return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL), size, size);
+ return 0;
}
static void dw_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 2e75b5bc5f1d..7b1d9970be8b 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -57,6 +57,15 @@ config SND_SOC_FSL_ESAI
This option is only useful for out-of-tree drivers since
in-tree drivers select it automatically.
+config SND_SOC_FSL_MICFIL
+ tristate "Pulse Density Modulation Microphone Interface (MICFIL) module support"
+ select REGMAP_MMIO
+ select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Say Y if you want to add Pulse Density Modulation microphone
+ interface (MICFIL) support for NXP.
+
config SND_SOC_FSL_UTILS
tristate
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index de94fa057e24..3c0ff315b971 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -19,6 +19,7 @@ snd-soc-fsl-ssi-y := fsl_ssi.o
snd-soc-fsl-ssi-$(CONFIG_DEBUG_FS) += fsl_ssi_dbg.o
snd-soc-fsl-spdif-objs := fsl_spdif.o
snd-soc-fsl-esai-objs := fsl_esai.o
+snd-soc-fsl-micfil-objs := fsl_micfil.o
snd-soc-fsl-utils-objs := fsl_utils.o
snd-soc-fsl-dma-objs := fsl_dma.o
obj-$(CONFIG_SND_SOC_FSL_ASOC_CARD) += snd-soc-fsl-asoc-card.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_SND_SOC_FSL_SAI) += snd-soc-fsl-sai.o
obj-$(CONFIG_SND_SOC_FSL_SSI) += snd-soc-fsl-ssi.o
obj-$(CONFIG_SND_SOC_FSL_SPDIF) += snd-soc-fsl-spdif.o
obj-$(CONFIG_SND_SOC_FSL_ESAI) += snd-soc-fsl-esai.o
+obj-$(CONFIG_SND_SOC_FSL_MICFIL) += snd-soc-fsl-micfil.o
obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 81f2fe2c6d23..60f87a0d99f4 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -689,6 +689,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
asrc_fail:
of_node_put(asrc_np);
of_node_put(codec_np);
+ put_device(&cpu_pdev->dev);
fail:
of_node_put(cpu_np);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 57b484768a58..afe67c865330 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -398,7 +398,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
case SND_SOC_DAIFMT_RIGHT_J:
/* Data on rising edge of bclk, frame high, right aligned */
- xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA;
+ xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP;
+ xcr |= ESAI_xCR_xWA;
break;
case SND_SOC_DAIFMT_DSP_A:
/* Data on rising edge of bclk, frame high, 1clk before data */
@@ -455,12 +456,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR;
+ mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA;
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr);
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr);
mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP |
- ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA;
+ ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr);
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr);
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
new file mode 100644
index 000000000000..40c07e756481
--- /dev/null
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2018 NXP
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <sound/core.h>
+
+#include "fsl_micfil.h"
+#include "imx-pcm.h"
+
+#define FSL_MICFIL_RATES SNDRV_PCM_RATE_8000_48000
+#define FSL_MICFIL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+
+struct fsl_micfil {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ const struct fsl_micfil_soc_data *soc;
+ struct clk *mclk;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+ unsigned int dataline;
+ char name[32];
+ int irq[MICFIL_IRQ_LINES];
+ unsigned int mclk_streams;
+ int quality; /*QUALITY 2-0 bits */
+ bool slave_mode;
+ int channel_gain[8];
+};
+
+struct fsl_micfil_soc_data {
+ unsigned int fifos;
+ unsigned int fifo_depth;
+ unsigned int dataline;
+ bool imx;
+};
+
+static struct fsl_micfil_soc_data fsl_micfil_imx8mm = {
+ .imx = true,
+ .fifos = 8,
+ .fifo_depth = 8,
+ .dataline = 0xf,
+};
+
+static const struct of_device_id fsl_micfil_dt_ids[] = {
+ { .compatible = "fsl,imx8mm-micfil", .data = &fsl_micfil_imx8mm },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_micfil_dt_ids);
+
+/* Table 5. Quality Modes
+ * Medium 0 0 0
+ * High 0 0 1
+ * Very Low 2 1 0 0
+ * Very Low 1 1 0 1
+ * Very Low 0 1 1 0
+ * Low 1 1 1
+ */
+static const char * const micfil_quality_select_texts[] = {
+ "Medium", "High",
+ "N/A", "N/A",
+ "VLow2", "VLow1",
+ "VLow0", "Low",
+};
+
+static const struct soc_enum fsl_micfil_quality_enum =
+ SOC_ENUM_SINGLE(REG_MICFIL_CTRL2,
+ MICFIL_CTRL2_QSEL_SHIFT,
+ ARRAY_SIZE(micfil_quality_select_texts),
+ micfil_quality_select_texts);
+
+static DECLARE_TLV_DB_SCALE(gain_tlv, 0, 100, 0);
+
+static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
+ SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
+ SOC_ENUM_EXT("MICFIL Quality Select",
+ fsl_micfil_quality_enum,
+ snd_soc_get_enum_double, snd_soc_put_enum_double),
+};
+
+static inline int get_pdm_clk(struct fsl_micfil *micfil,
+ unsigned int rate)
+{
+ u32 ctrl2_reg;
+ int qsel, osr;
+ int bclk;
+
+ regmap_read(micfil->regmap, REG_MICFIL_CTRL2, &ctrl2_reg);
+ osr = 16 - ((ctrl2_reg & MICFIL_CTRL2_CICOSR_MASK)
+ >> MICFIL_CTRL2_CICOSR_SHIFT);
+
+ regmap_read(micfil->regmap, REG_MICFIL_CTRL2, &ctrl2_reg);
+ qsel = ctrl2_reg & MICFIL_CTRL2_QSEL_MASK;
+
+ switch (qsel) {
+ case MICFIL_HIGH_QUALITY:
+ bclk = rate * 8 * osr / 2; /* kfactor = 0.5 */
+ break;
+ case MICFIL_MEDIUM_QUALITY:
+ case MICFIL_VLOW0_QUALITY:
+ bclk = rate * 4 * osr * 1; /* kfactor = 1 */
+ break;
+ case MICFIL_LOW_QUALITY:
+ case MICFIL_VLOW1_QUALITY:
+ bclk = rate * 2 * osr * 2; /* kfactor = 2 */
+ break;
+ case MICFIL_VLOW2_QUALITY:
+ bclk = rate * osr * 4; /* kfactor = 4 */
+ break;
+ default:
+ dev_err(&micfil->pdev->dev,
+ "Please make sure you select a valid quality.\n");
+ bclk = -1;
+ break;
+ }
+
+ return bclk;
+}
+
+static inline int get_clk_div(struct fsl_micfil *micfil,
+ unsigned int rate)
+{
+ u32 ctrl2_reg;
+ long mclk_rate;
+ int osr;
+ int clk_div;
+
+ regmap_read(micfil->regmap, REG_MICFIL_CTRL2, &ctrl2_reg);
+ osr = 16 - ((ctrl2_reg & MICFIL_CTRL2_CICOSR_MASK)
+ >> MICFIL_CTRL2_CICOSR_SHIFT);
+
+ mclk_rate = clk_get_rate(micfil->mclk);
+
+ clk_div = mclk_rate / (get_pdm_clk(micfil, rate) * 2);
+
+ return clk_div;
+}
+
+/* The SRES is a self-negated bit which provides the CPU with the
+ * capability to initialize the PDM Interface module through the
+ * slave-bus interface. This bit always reads as zero, and this
+ * bit is only effective when MDIS is cleared
+ */
+static int fsl_micfil_reset(struct device *dev)
+{
+ struct fsl_micfil *micfil = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(micfil->regmap,
+ REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_MDIS_MASK,
+ 0);
+ if (ret) {
+ dev_err(dev, "failed to clear MDIS bit %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(micfil->regmap,
+ REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_SRES_MASK,
+ MICFIL_CTRL1_SRES);
+ if (ret) {
+ dev_err(dev, "failed to reset MICFIL: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_micfil_set_mclk_rate(struct fsl_micfil *micfil,
+ unsigned int freq)
+{
+ struct device *dev = &micfil->pdev->dev;
+ int ret;
+
+ clk_disable_unprepare(micfil->mclk);
+
+ ret = clk_set_rate(micfil->mclk, freq * 1024);
+ if (ret)
+ dev_warn(dev, "failed to set rate (%u): %d\n",
+ freq * 1024, ret);
+
+ clk_prepare_enable(micfil->mclk);
+
+ return ret;
+}
+
+static int fsl_micfil_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
+
+ if (!micfil) {
+ dev_err(dai->dev,
+ "micfil dai priv_data not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fsl_micfil_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
+ struct device *dev = &micfil->pdev->dev;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = fsl_micfil_reset(dev);
+ if (ret) {
+ dev_err(dev, "failed to soft reset\n");
+ return ret;
+ }
+
+ /* DMA Interrupt Selection - DISEL bits
+ * 00 - DMA and IRQ disabled
+ * 01 - DMA req enabled
+ * 10 - IRQ enabled
+ * 11 - reserved
+ */
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_DISEL_MASK,
+ (1 << MICFIL_CTRL1_DISEL_SHIFT));
+ if (ret) {
+ dev_err(dev, "failed to update DISEL bits\n");
+ return ret;
+ }
+
+ /* Enable the module */
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_PDMIEN_MASK,
+ MICFIL_CTRL1_PDMIEN);
+ if (ret) {
+ dev_err(dev, "failed to enable the module\n");
+ return ret;
+ }
+
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ /* Disable the module */
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_PDMIEN_MASK,
+ 0);
+ if (ret) {
+ dev_err(dev, "failed to enable the module\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_DISEL_MASK,
+ (0 << MICFIL_CTRL1_DISEL_SHIFT));
+ if (ret) {
+ dev_err(dev, "failed to update DISEL bits\n");
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int fsl_set_clock_params(struct device *dev, unsigned int rate)
+{
+ struct fsl_micfil *micfil = dev_get_drvdata(dev);
+ int clk_div;
+ int ret = 0;
+
+ ret = fsl_micfil_set_mclk_rate(micfil, rate);
+ if (ret < 0)
+ dev_err(dev, "failed to set mclk[%lu] to rate %u\n",
+ clk_get_rate(micfil->mclk), rate);
+
+ /* set CICOSR */
+ ret |= regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2,
+ MICFIL_CTRL2_CICOSR_MASK,
+ MICFIL_CTRL2_OSR_DEFAULT);
+ if (ret)
+ dev_err(dev, "failed to set CICOSR in reg 0x%X\n",
+ REG_MICFIL_CTRL2);
+
+ /* set CLK_DIV */
+ clk_div = get_clk_div(micfil, rate);
+ if (clk_div < 0)
+ ret = -EINVAL;
+
+ ret |= regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2,
+ MICFIL_CTRL2_CLKDIV_MASK, clk_div);
+ if (ret)
+ dev_err(dev, "failed to set CLKDIV in reg 0x%X\n",
+ REG_MICFIL_CTRL2);
+
+ return ret;
+}
+
+static int fsl_micfil_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+ struct device *dev = &micfil->pdev->dev;
+ int ret;
+
+ /* 1. Disable the module */
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL1,
+ MICFIL_CTRL1_PDMIEN_MASK, 0);
+ if (ret) {
+ dev_err(dev, "failed to disable the module\n");
+ return ret;
+ }
+
+ /* enable channels */
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL1,
+ 0xFF, ((1 << channels) - 1));
+ if (ret) {
+ dev_err(dev, "failed to enable channels %d, reg 0x%X\n", ret,
+ REG_MICFIL_CTRL1);
+ return ret;
+ }
+
+ ret = fsl_set_clock_params(dev, rate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set clock parameters [%d]\n", ret);
+ return ret;
+ }
+
+ micfil->dma_params_rx.maxburst = channels * MICFIL_DMA_MAXBURST_RX;
+
+ return 0;
+}
+
+static int fsl_micfil_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
+ struct device *dev = &micfil->pdev->dev;
+
+ int ret;
+
+ if (!freq)
+ return 0;
+
+ ret = fsl_micfil_set_mclk_rate(micfil, freq);
+ if (ret < 0)
+ dev_err(dev, "failed to set mclk[%lu] to rate %u\n",
+ clk_get_rate(micfil->mclk), freq);
+
+ return ret;
+}
+
+static struct snd_soc_dai_ops fsl_micfil_dai_ops = {
+ .startup = fsl_micfil_startup,
+ .trigger = fsl_micfil_trigger,
+ .hw_params = fsl_micfil_hw_params,
+ .set_sysclk = fsl_micfil_set_dai_sysclk,
+};
+
+static int fsl_micfil_dai_probe(struct snd_soc_dai *cpu_dai)
+{
+ struct fsl_micfil *micfil = dev_get_drvdata(cpu_dai->dev);
+ struct device *dev = cpu_dai->dev;
+ unsigned int val;
+ int ret;
+ int i;
+
+ /* set qsel to medium */
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2,
+ MICFIL_CTRL2_QSEL_MASK, MICFIL_MEDIUM_QUALITY);
+ if (ret) {
+ dev_err(dev, "failed to set quality mode bits, reg 0x%X\n",
+ REG_MICFIL_CTRL2);
+ return ret;
+ }
+
+ /* set default gain to max_gain */
+ regmap_write(micfil->regmap, REG_MICFIL_OUT_CTRL, 0x77777777);
+ for (i = 0; i < 8; i++)
+ micfil->channel_gain[i] = 0xF;
+
+ snd_soc_dai_init_dma_data(cpu_dai, NULL,
+ &micfil->dma_params_rx);
+
+ /* FIFO Watermark Control - FIFOWMK*/
+ val = MICFIL_FIFO_CTRL_FIFOWMK(micfil->soc->fifo_depth) - 1;
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_FIFO_CTRL,
+ MICFIL_FIFO_CTRL_FIFOWMK_MASK,
+ val);
+ if (ret) {
+ dev_err(dev, "failed to set FIFOWMK\n");
+ return ret;
+ }
+
+ snd_soc_dai_set_drvdata(cpu_dai, micfil);
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver fsl_micfil_dai = {
+ .probe = fsl_micfil_dai_probe,
+ .capture = {
+ .stream_name = "CPU-Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = FSL_MICFIL_RATES,
+ .formats = FSL_MICFIL_FORMATS,
+ },
+ .ops = &fsl_micfil_dai_ops,
+};
+
+static const struct snd_soc_component_driver fsl_micfil_component = {
+ .name = "fsl-micfil-dai",
+ .controls = fsl_micfil_snd_controls,
+ .num_controls = ARRAY_SIZE(fsl_micfil_snd_controls),
+
+};
+
+/* REGMAP */
+static const struct reg_default fsl_micfil_reg_defaults[] = {
+ {REG_MICFIL_CTRL1, 0x00000000},
+ {REG_MICFIL_CTRL2, 0x00000000},
+ {REG_MICFIL_STAT, 0x00000000},
+ {REG_MICFIL_FIFO_CTRL, 0x00000007},
+ {REG_MICFIL_FIFO_STAT, 0x00000000},
+ {REG_MICFIL_DATACH0, 0x00000000},
+ {REG_MICFIL_DATACH1, 0x00000000},
+ {REG_MICFIL_DATACH2, 0x00000000},
+ {REG_MICFIL_DATACH3, 0x00000000},
+ {REG_MICFIL_DATACH4, 0x00000000},
+ {REG_MICFIL_DATACH5, 0x00000000},
+ {REG_MICFIL_DATACH6, 0x00000000},
+ {REG_MICFIL_DATACH7, 0x00000000},
+ {REG_MICFIL_DC_CTRL, 0x00000000},
+ {REG_MICFIL_OUT_CTRL, 0x00000000},
+ {REG_MICFIL_OUT_STAT, 0x00000000},
+ {REG_MICFIL_VAD0_CTRL1, 0x00000000},
+ {REG_MICFIL_VAD0_CTRL2, 0x000A0000},
+ {REG_MICFIL_VAD0_STAT, 0x00000000},
+ {REG_MICFIL_VAD0_SCONFIG, 0x00000000},
+ {REG_MICFIL_VAD0_NCONFIG, 0x80000000},
+ {REG_MICFIL_VAD0_NDATA, 0x00000000},
+ {REG_MICFIL_VAD0_ZCD, 0x00000004},
+};
+
+static bool fsl_micfil_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case REG_MICFIL_CTRL1:
+ case REG_MICFIL_CTRL2:
+ case REG_MICFIL_STAT:
+ case REG_MICFIL_FIFO_CTRL:
+ case REG_MICFIL_FIFO_STAT:
+ case REG_MICFIL_DATACH0:
+ case REG_MICFIL_DATACH1:
+ case REG_MICFIL_DATACH2:
+ case REG_MICFIL_DATACH3:
+ case REG_MICFIL_DATACH4:
+ case REG_MICFIL_DATACH5:
+ case REG_MICFIL_DATACH6:
+ case REG_MICFIL_DATACH7:
+ case REG_MICFIL_DC_CTRL:
+ case REG_MICFIL_OUT_CTRL:
+ case REG_MICFIL_OUT_STAT:
+ case REG_MICFIL_VAD0_CTRL1:
+ case REG_MICFIL_VAD0_CTRL2:
+ case REG_MICFIL_VAD0_STAT:
+ case REG_MICFIL_VAD0_SCONFIG:
+ case REG_MICFIL_VAD0_NCONFIG:
+ case REG_MICFIL_VAD0_NDATA:
+ case REG_MICFIL_VAD0_ZCD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool fsl_micfil_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case REG_MICFIL_CTRL1:
+ case REG_MICFIL_CTRL2:
+ case REG_MICFIL_STAT: /* Write 1 to Clear */
+ case REG_MICFIL_FIFO_CTRL:
+ case REG_MICFIL_FIFO_STAT: /* Write 1 to Clear */
+ case REG_MICFIL_DC_CTRL:
+ case REG_MICFIL_OUT_CTRL:
+ case REG_MICFIL_OUT_STAT: /* Write 1 to Clear */
+ case REG_MICFIL_VAD0_CTRL1:
+ case REG_MICFIL_VAD0_CTRL2:
+ case REG_MICFIL_VAD0_STAT: /* Write 1 to Clear */
+ case REG_MICFIL_VAD0_SCONFIG:
+ case REG_MICFIL_VAD0_NCONFIG:
+ case REG_MICFIL_VAD0_ZCD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool fsl_micfil_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case REG_MICFIL_STAT:
+ case REG_MICFIL_DATACH0:
+ case REG_MICFIL_DATACH1:
+ case REG_MICFIL_DATACH2:
+ case REG_MICFIL_DATACH3:
+ case REG_MICFIL_DATACH4:
+ case REG_MICFIL_DATACH5:
+ case REG_MICFIL_DATACH6:
+ case REG_MICFIL_DATACH7:
+ case REG_MICFIL_VAD0_STAT:
+ case REG_MICFIL_VAD0_NDATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config fsl_micfil_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .max_register = REG_MICFIL_VAD0_ZCD,
+ .reg_defaults = fsl_micfil_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(fsl_micfil_reg_defaults),
+ .readable_reg = fsl_micfil_readable_reg,
+ .volatile_reg = fsl_micfil_volatile_reg,
+ .writeable_reg = fsl_micfil_writeable_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/* END OF REGMAP */
+
+static irqreturn_t micfil_isr(int irq, void *devid)
+{
+ struct fsl_micfil *micfil = (struct fsl_micfil *)devid;
+ struct platform_device *pdev = micfil->pdev;
+ u32 stat_reg;
+ u32 fifo_stat_reg;
+ u32 ctrl1_reg;
+ bool dma_enabled;
+ int i;
+
+ regmap_read(micfil->regmap, REG_MICFIL_STAT, &stat_reg);
+ regmap_read(micfil->regmap, REG_MICFIL_CTRL1, &ctrl1_reg);
+ regmap_read(micfil->regmap, REG_MICFIL_FIFO_STAT, &fifo_stat_reg);
+
+ dma_enabled = MICFIL_DMA_ENABLED(ctrl1_reg);
+
+ /* Channel 0-7 Output Data Flags */
+ for (i = 0; i < MICFIL_OUTPUT_CHANNELS; i++) {
+ if (stat_reg & MICFIL_STAT_CHXF_MASK(i))
+ dev_dbg(&pdev->dev,
+ "Data available in Data Channel %d\n", i);
+ /* if DMA is not enabled, field must be written with 1
+ * to clear
+ */
+ if (!dma_enabled)
+ regmap_write_bits(micfil->regmap,
+ REG_MICFIL_STAT,
+ MICFIL_STAT_CHXF_MASK(i),
+ 1);
+ }
+
+ for (i = 0; i < MICFIL_FIFO_NUM; i++) {
+ if (fifo_stat_reg & MICFIL_FIFO_STAT_FIFOX_OVER_MASK(i))
+ dev_dbg(&pdev->dev,
+ "FIFO Overflow Exception flag for channel %d\n",
+ i);
+
+ if (fifo_stat_reg & MICFIL_FIFO_STAT_FIFOX_UNDER_MASK(i))
+ dev_dbg(&pdev->dev,
+ "FIFO Underflow Exception flag for channel %d\n",
+ i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t micfil_err_isr(int irq, void *devid)
+{
+ struct fsl_micfil *micfil = (struct fsl_micfil *)devid;
+ struct platform_device *pdev = micfil->pdev;
+ u32 stat_reg;
+
+ regmap_read(micfil->regmap, REG_MICFIL_STAT, &stat_reg);
+
+ if (stat_reg & MICFIL_STAT_BSY_FIL_MASK)
+ dev_dbg(&pdev->dev, "isr: Decimation Filter is running\n");
+
+ if (stat_reg & MICFIL_STAT_FIR_RDY_MASK)
+ dev_dbg(&pdev->dev, "isr: FIR Filter Data ready\n");
+
+ if (stat_reg & MICFIL_STAT_LOWFREQF_MASK) {
+ dev_dbg(&pdev->dev, "isr: ipg_clk_app is too low\n");
+ regmap_write_bits(micfil->regmap, REG_MICFIL_STAT,
+ MICFIL_STAT_LOWFREQF_MASK, 1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int fsl_micfil_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ struct fsl_micfil *micfil;
+ struct resource *res;
+ void __iomem *regs;
+ int ret, i;
+ unsigned long irqflag = 0;
+
+ micfil = devm_kzalloc(&pdev->dev, sizeof(*micfil), GFP_KERNEL);
+ if (!micfil)
+ return -ENOMEM;
+
+ micfil->pdev = pdev;
+ strncpy(micfil->name, np->name, sizeof(micfil->name) - 1);
+
+ of_id = of_match_device(fsl_micfil_dt_ids, &pdev->dev);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ micfil->soc = of_id->data;
+
+ /* ipg_clk is used to control the registers
+ * ipg_clk_app is used to operate the filter
+ */
+ micfil->mclk = devm_clk_get(&pdev->dev, "ipg_clk_app");
+ if (IS_ERR(micfil->mclk)) {
+ dev_err(&pdev->dev, "failed to get core clock: %ld\n",
+ PTR_ERR(micfil->mclk));
+ return PTR_ERR(micfil->mclk);
+ }
+
+ /* init regmap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ micfil->regmap = devm_regmap_init_mmio_clk(&pdev->dev,
+ "ipg_clk",
+ regs,
+ &fsl_micfil_regmap_config);
+ if (IS_ERR(micfil->regmap)) {
+ dev_err(&pdev->dev, "failed to init MICFIL regmap: %ld\n",
+ PTR_ERR(micfil->regmap));
+ return PTR_ERR(micfil->regmap);
+ }
+
+ /* dataline mask for RX */
+ ret = of_property_read_u32_index(np,
+ "fsl,dataline",
+ 0,
+ &micfil->dataline);
+ if (ret)
+ micfil->dataline = 1;
+
+ if (micfil->dataline & ~micfil->soc->dataline) {
+ dev_err(&pdev->dev, "dataline setting error, Mask is 0x%X\n",
+ micfil->soc->dataline);
+ return -EINVAL;
+ }
+
+ /* get IRQs */
+ for (i = 0; i < MICFIL_IRQ_LINES; i++) {
+ micfil->irq[i] = platform_get_irq(pdev, i);
+ dev_err(&pdev->dev, "GET IRQ: %d\n", micfil->irq[i]);
+ if (micfil->irq[i] < 0) {
+ dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
+ return micfil->irq[i];
+ }
+ }
+
+ if (of_property_read_bool(np, "fsl,shared-interrupt"))
+ irqflag = IRQF_SHARED;
+
+ /* Digital Microphone interface interrupt - IRQ 109 */
+ ret = devm_request_irq(&pdev->dev, micfil->irq[0],
+ micfil_isr, irqflag,
+ micfil->name, micfil);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to claim mic interface irq %u\n",
+ micfil->irq[0]);
+ return ret;
+ }
+
+ /* Digital Microphone interface error interrupt - IRQ 110 */
+ ret = devm_request_irq(&pdev->dev, micfil->irq[1],
+ micfil_err_isr, irqflag,
+ micfil->name, micfil);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to claim mic interface error irq %u\n",
+ micfil->irq[1]);
+ return ret;
+ }
+
+ micfil->dma_params_rx.chan_name = "rx";
+ micfil->dma_params_rx.addr = res->start + REG_MICFIL_DATACH0;
+ micfil->dma_params_rx.maxburst = MICFIL_DMA_MAXBURST_RX;
+
+
+ platform_set_drvdata(pdev, micfil);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
+ &fsl_micfil_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register component %s\n",
+ fsl_micfil_component.name);
+ return ret;
+ }
+
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret)
+ dev_err(&pdev->dev, "failed to pcm register\n");
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
+{
+ struct fsl_micfil *micfil = dev_get_drvdata(dev);
+
+ regcache_cache_only(micfil->regmap, true);
+
+ clk_disable_unprepare(micfil->mclk);
+
+ return 0;
+}
+
+static int __maybe_unused fsl_micfil_runtime_resume(struct device *dev)
+{
+ struct fsl_micfil *micfil = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(micfil->mclk);
+ if (ret < 0)
+ return ret;
+
+ regcache_cache_only(micfil->regmap, false);
+ regcache_mark_dirty(micfil->regmap);
+ regcache_sync(micfil->regmap);
+
+ return 0;
+}
+#endif /* CONFIG_PM*/
+
+#ifdef CONFIG_PM_SLEEP
+static int __maybe_unused fsl_micfil_suspend(struct device *dev)
+{
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused fsl_micfil_resume(struct device *dev)
+{
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops fsl_micfil_pm_ops = {
+ SET_RUNTIME_PM_OPS(fsl_micfil_runtime_suspend,
+ fsl_micfil_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(fsl_micfil_suspend,
+ fsl_micfil_resume)
+};
+
+static struct platform_driver fsl_micfil_driver = {
+ .probe = fsl_micfil_probe,
+ .driver = {
+ .name = "fsl-micfil-dai",
+ .pm = &fsl_micfil_pm_ops,
+ .of_match_table = fsl_micfil_dt_ids,
+ },
+};
+module_platform_driver(fsl_micfil_driver);
+
+MODULE_AUTHOR("Cosmin-Gabriel Samoila <cosmin.samoila@nxp.com>");
+MODULE_DESCRIPTION("NXP PDM Microphone Interface (MICFIL) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/fsl_micfil.h b/sound/soc/fsl/fsl_micfil.h
new file mode 100644
index 000000000000..bac825c3135a
--- /dev/null
+++ b/sound/soc/fsl/fsl_micfil.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PDM Microphone Interface for the NXP i.MX SoC
+ * Copyright 2018 NXP
+ */
+
+#ifndef _FSL_MICFIL_H
+#define _FSL_MICFIL_H
+
+/* MICFIL Register Map */
+#define REG_MICFIL_CTRL1 0x00
+#define REG_MICFIL_CTRL2 0x04
+#define REG_MICFIL_STAT 0x08
+#define REG_MICFIL_FIFO_CTRL 0x10
+#define REG_MICFIL_FIFO_STAT 0x14
+#define REG_MICFIL_DATACH0 0x24
+#define REG_MICFIL_DATACH1 0x28
+#define REG_MICFIL_DATACH2 0x2C
+#define REG_MICFIL_DATACH3 0x30
+#define REG_MICFIL_DATACH4 0x34
+#define REG_MICFIL_DATACH5 0x38
+#define REG_MICFIL_DATACH6 0x3C
+#define REG_MICFIL_DATACH7 0x40
+#define REG_MICFIL_DC_CTRL 0x64
+#define REG_MICFIL_OUT_CTRL 0x74
+#define REG_MICFIL_OUT_STAT 0x7C
+#define REG_MICFIL_VAD0_CTRL1 0x90
+#define REG_MICFIL_VAD0_CTRL2 0x94
+#define REG_MICFIL_VAD0_STAT 0x98
+#define REG_MICFIL_VAD0_SCONFIG 0x9C
+#define REG_MICFIL_VAD0_NCONFIG 0xA0
+#define REG_MICFIL_VAD0_NDATA 0xA4
+#define REG_MICFIL_VAD0_ZCD 0xA8
+
+/* MICFIL Control Register 1 -- REG_MICFILL_CTRL1 0x00 */
+#define MICFIL_CTRL1_MDIS_SHIFT 31
+#define MICFIL_CTRL1_MDIS_MASK BIT(MICFIL_CTRL1_MDIS_SHIFT)
+#define MICFIL_CTRL1_MDIS BIT(MICFIL_CTRL1_MDIS_SHIFT)
+#define MICFIL_CTRL1_DOZEN_SHIFT 30
+#define MICFIL_CTRL1_DOZEN_MASK BIT(MICFIL_CTRL1_DOZEN_SHIFT)
+#define MICFIL_CTRL1_DOZEN BIT(MICFIL_CTRL1_DOZEN_SHIFT)
+#define MICFIL_CTRL1_PDMIEN_SHIFT 29
+#define MICFIL_CTRL1_PDMIEN_MASK BIT(MICFIL_CTRL1_PDMIEN_SHIFT)
+#define MICFIL_CTRL1_PDMIEN BIT(MICFIL_CTRL1_PDMIEN_SHIFT)
+#define MICFIL_CTRL1_DBG_SHIFT 28
+#define MICFIL_CTRL1_DBG_MASK BIT(MICFIL_CTRL1_DBG_SHIFT)
+#define MICFIL_CTRL1_DBG BIT(MICFIL_CTRL1_DBG_SHIFT)
+#define MICFIL_CTRL1_SRES_SHIFT 27
+#define MICFIL_CTRL1_SRES_MASK BIT(MICFIL_CTRL1_SRES_SHIFT)
+#define MICFIL_CTRL1_SRES BIT(MICFIL_CTRL1_SRES_SHIFT)
+#define MICFIL_CTRL1_DBGE_SHIFT 26
+#define MICFIL_CTRL1_DBGE_MASK BIT(MICFIL_CTRL1_DBGE_SHIFT)
+#define MICFIL_CTRL1_DBGE BIT(MICFIL_CTRL1_DBGE_SHIFT)
+#define MICFIL_CTRL1_DISEL_SHIFT 24
+#define MICFIL_CTRL1_DISEL_WIDTH 2
+#define MICFIL_CTRL1_DISEL_MASK ((BIT(MICFIL_CTRL1_DISEL_WIDTH) - 1) \
+ << MICFIL_CTRL1_DISEL_SHIFT)
+#define MICFIL_CTRL1_DISEL(v) (((v) << MICFIL_CTRL1_DISEL_SHIFT) \
+ & MICFIL_CTRL1_DISEL_MASK)
+#define MICFIL_CTRL1_ERREN_SHIFT 23
+#define MICFIL_CTRL1_ERREN_MASK BIT(MICFIL_CTRL1_ERREN_SHIFT)
+#define MICFIL_CTRL1_ERREN BIT(MICFIL_CTRL1_ERREN_SHIFT)
+#define MICFIL_CTRL1_CHEN_SHIFT 0
+#define MICFIL_CTRL1_CHEN_WIDTH 8
+#define MICFIL_CTRL1_CHEN_MASK(x) (BIT(x) << MICFIL_CTRL1_CHEN_SHIFT)
+#define MICFIL_CTRL1_CHEN(x) (MICFIL_CTRL1_CHEN_MASK(x))
+
+/* MICFIL Control Register 2 -- REG_MICFILL_CTRL2 0x04 */
+#define MICFIL_CTRL2_QSEL_SHIFT 25
+#define MICFIL_CTRL2_QSEL_WIDTH 3
+#define MICFIL_CTRL2_QSEL_MASK ((BIT(MICFIL_CTRL2_QSEL_WIDTH) - 1) \
+ << MICFIL_CTRL2_QSEL_SHIFT)
+#define MICFIL_HIGH_QUALITY BIT(MICFIL_CTRL2_QSEL_SHIFT)
+#define MICFIL_MEDIUM_QUALITY (0 << MICFIL_CTRL2_QSEL_SHIFT)
+#define MICFIL_LOW_QUALITY (7 << MICFIL_CTRL2_QSEL_SHIFT)
+#define MICFIL_VLOW0_QUALITY (6 << MICFIL_CTRL2_QSEL_SHIFT)
+#define MICFIL_VLOW1_QUALITY (5 << MICFIL_CTRL2_QSEL_SHIFT)
+#define MICFIL_VLOW2_QUALITY (4 << MICFIL_CTRL2_QSEL_SHIFT)
+
+#define MICFIL_CTRL2_CICOSR_SHIFT 16
+#define MICFIL_CTRL2_CICOSR_WIDTH 4
+#define MICFIL_CTRL2_CICOSR_MASK ((BIT(MICFIL_CTRL2_CICOSR_WIDTH) - 1) \
+ << MICFIL_CTRL2_CICOSR_SHIFT)
+#define MICFIL_CTRL2_CICOSR(v) (((v) << MICFIL_CTRL2_CICOSR_SHIFT) \
+ & MICFIL_CTRL2_CICOSR_MASK)
+#define MICFIL_CTRL2_CLKDIV_SHIFT 0
+#define MICFIL_CTRL2_CLKDIV_WIDTH 8
+#define MICFIL_CTRL2_CLKDIV_MASK ((BIT(MICFIL_CTRL2_CLKDIV_WIDTH) - 1) \
+ << MICFIL_CTRL2_CLKDIV_SHIFT)
+#define MICFIL_CTRL2_CLKDIV(v) (((v) << MICFIL_CTRL2_CLKDIV_SHIFT) \
+ & MICFIL_CTRL2_CLKDIV_MASK)
+
+/* MICFIL Status Register -- REG_MICFIL_STAT 0x08 */
+#define MICFIL_STAT_BSY_FIL_SHIFT 31
+#define MICFIL_STAT_BSY_FIL_MASK BIT(MICFIL_STAT_BSY_FIL_SHIFT)
+#define MICFIL_STAT_BSY_FIL BIT(MICFIL_STAT_BSY_FIL_SHIFT)
+#define MICFIL_STAT_FIR_RDY_SHIFT 30
+#define MICFIL_STAT_FIR_RDY_MASK BIT(MICFIL_STAT_FIR_RDY_SHIFT)
+#define MICFIL_STAT_FIR_RDY BIT(MICFIL_STAT_FIR_RDY_SHIFT)
+#define MICFIL_STAT_LOWFREQF_SHIFT 29
+#define MICFIL_STAT_LOWFREQF_MASK BIT(MICFIL_STAT_LOWFREQF_SHIFT)
+#define MICFIL_STAT_LOWFREQF BIT(MICFIL_STAT_LOWFREQF_SHIFT)
+#define MICFIL_STAT_CHXF_SHIFT(v) (v)
+#define MICFIL_STAT_CHXF_MASK(v) BIT(MICFIL_STAT_CHXF_SHIFT(v))
+#define MICFIL_STAT_CHXF(v) BIT(MICFIL_STAT_CHXF_SHIFT(v))
+
+/* MICFIL FIFO Control Register -- REG_MICFIL_FIFO_CTRL 0x10 */
+#define MICFIL_FIFO_CTRL_FIFOWMK_SHIFT 0
+#define MICFIL_FIFO_CTRL_FIFOWMK_WIDTH 3
+#define MICFIL_FIFO_CTRL_FIFOWMK_MASK ((BIT(MICFIL_FIFO_CTRL_FIFOWMK_WIDTH) - 1) \
+ << MICFIL_FIFO_CTRL_FIFOWMK_SHIFT)
+#define MICFIL_FIFO_CTRL_FIFOWMK(v) (((v) << MICFIL_FIFO_CTRL_FIFOWMK_SHIFT) \
+ & MICFIL_FIFO_CTRL_FIFOWMK_MASK)
+
+/* MICFIL FIFO Status Register -- REG_MICFIL_FIFO_STAT 0x14 */
+#define MICFIL_FIFO_STAT_FIFOX_OVER_SHIFT(v) (v)
+#define MICFIL_FIFO_STAT_FIFOX_OVER_MASK(v) BIT(MICFIL_FIFO_STAT_FIFOX_OVER_SHIFT(v))
+#define MICFIL_FIFO_STAT_FIFOX_UNDER_SHIFT(v) ((v) + 8)
+#define MICFIL_FIFO_STAT_FIFOX_UNDER_MASK(v) BIT(MICFIL_FIFO_STAT_FIFOX_UNDER_SHIFT(v))
+
+/* MICFIL HWVAD0 Control 1 Register -- REG_MICFIL_VAD0_CTRL1*/
+#define MICFIL_VAD0_CTRL1_CHSEL_SHIFT 24
+#define MICFIL_VAD0_CTRL1_CHSEL_WIDTH 3
+#define MICFIL_VAD0_CTRL1_CHSEL_MASK ((BIT(MICFIL_VAD0_CTRL1_CHSEL_WIDTH) - 1) \
+ << MICFIL_VAD0_CTRL1_CHSEL_SHIFT)
+#define MICFIL_VAD0_CTRL1_CHSEL(v) (((v) << MICFIL_VAD0_CTRL1_CHSEL_SHIFT) \
+ & MICFIL_VAD0_CTRL1_CHSEL_MASK)
+#define MICFIL_VAD0_CTRL1_CICOSR_SHIFT 16
+#define MICFIL_VAD0_CTRL1_CICOSR_WIDTH 4
+#define MICFIL_VAD0_CTRL1_CICOSR_MASK ((BIT(MICFIL_VAD0_CTRL1_CICOSR_WIDTH) - 1) \
+ << MICFIL_VAD0_CTRL1_CICOSR_SHIFT)
+#define MICFIL_VAD0_CTRL1_CICOSR(v) (((v) << MICFIL_VAD0_CTRL1_CICOSR_SHIFT) \
+ & MICFIL_VAD0_CTRL1_CICOSR_MASK)
+#define MICFIL_VAD0_CTRL1_INITT_SHIFT 8
+#define MICFIL_VAD0_CTRL1_INITT_WIDTH 5
+#define MICFIL_VAD0_CTRL1_INITT_MASK ((BIT(MICFIL_VAD0_CTRL1_INITT_WIDTH) - 1) \
+ << MICFIL_VAD0_CTRL1_INITT_SHIFT)
+#define MICFIL_VAD0_CTRL1_INITT(v) (((v) << MICFIL_VAD0_CTRL1_INITT_SHIFT) \
+ & MICFIL_VAD0_CTRL1_INITT_MASK)
+#define MICFIL_VAD0_CTRL1_ST10_SHIFT 4
+#define MICFIL_VAD0_CTRL1_ST10_MASK BIT(MICFIL_VAD0_CTRL1_ST10_SHIFT)
+#define MICFIL_VAD0_CTRL1_ST10 BIT(MICFIL_VAD0_CTRL1_ST10_SHIFT)
+#define MICFIL_VAD0_CTRL1_ERIE_SHIFT 3
+#define MICFIL_VAD0_CTRL1_ERIE_MASK BIT(MICFIL_VAD0_CTRL1_ERIE_SHIFT)
+#define MICFIL_VAD0_CTRL1_ERIE BIT(MICFIL_VAD0_CTRL1_ERIE_SHIFT)
+#define MICFIL_VAD0_CTRL1_IE_SHIFT 2
+#define MICFIL_VAD0_CTRL1_IE_MASK BIT(MICFIL_VAD0_CTRL1_IE_SHIFT)
+#define MICFIL_VAD0_CTRL1_IE BIT(MICFIL_VAD0_CTRL1_IE_SHIFT)
+#define MICFIL_VAD0_CTRL1_RST_SHIFT 1
+#define MICFIL_VAD0_CTRL1_RST_MASK BIT(MICFIL_VAD0_CTRL1_RST_SHIFT)
+#define MICFIL_VAD0_CTRL1_RST BIT(MICFIL_VAD0_CTRL1_RST_SHIFT)
+#define MICFIL_VAD0_CTRL1_EN_SHIFT 0
+#define MICFIL_VAD0_CTRL1_EN_MASK BIT(MICFIL_VAD0_CTRL1_EN_SHIFT)
+#define MICFIL_VAD0_CTRL1_EN BIT(MICFIL_VAD0_CTRL1_EN_SHIFT)
+
+/* MICFIL HWVAD0 Control 2 Register -- REG_MICFIL_VAD0_CTRL2*/
+#define MICFIL_VAD0_CTRL2_FRENDIS_SHIFT 31
+#define MICFIL_VAD0_CTRL2_FRENDIS_MASK BIT(MICFIL_VAD0_CTRL2_FRENDIS_SHIFT)
+#define MICFIL_VAD0_CTRL2_FRENDIS BIT(MICFIL_VAD0_CTRL2_FRENDIS_SHIFT)
+#define MICFIL_VAD0_CTRL2_PREFEN_SHIFT 30
+#define MICFIL_VAD0_CTRL2_PREFEN_MASK BIT(MICFIL_VAD0_CTRL2_PREFEN_SHIFT)
+#define MICFIL_VAD0_CTRL2_PREFEN BIT(MICFIL_VAD0_CTRL2_PREFEN_SHIFT)
+#define MICFIL_VAD0_CTRL2_FOUTDIS_SHIFT 28
+#define MICFIL_VAD0_CTRL2_FOUTDIS_MASK BIT(MICFIL_VAD0_CTRL2_FOUTDIS_SHIFT)
+#define MICFIL_VAD0_CTRL2_FOUTDIS BIT(MICFIL_VAD0_CTRL2_FOUTDIS_SHIFT)
+#define MICFIL_VAD0_CTRL2_FRAMET_SHIFT 16
+#define MICFIL_VAD0_CTRL2_FRAMET_WIDTH 6
+#define MICFIL_VAD0_CTRL2_FRAMET_MASK ((BIT(MICFIL_VAD0_CTRL2_FRAMET_WIDTH) - 1) \
+ << MICFIL_VAD0_CTRL2_FRAMET_SHIFT)
+#define MICFIL_VAD0_CTRL2_FRAMET(v) (((v) << MICFIL_VAD0_CTRL2_FRAMET_SHIFT) \
+ & MICFIL_VAD0_CTRL2_FRAMET_MASK)
+#define MICFIL_VAD0_CTRL2_INPGAIN_SHIFT 8
+#define MICFIL_VAD0_CTRL2_INPGAIN_WIDTH 4
+#define MICFIL_VAD0_CTRL2_INPGAIN_MASK ((BIT(MICFIL_VAD0_CTRL2_INPGAIN_WIDTH) - 1) \
+ << MICFIL_VAD0_CTRL2_INPGAIN_SHIFT)
+#define MICFIL_VAD0_CTRL2_INPGAIN(v) (((v) << MICFIL_VAD0_CTRL2_INPGAIN_SHIFT) \
+ & MICFIL_VAD0_CTRL2_INPGAIN_MASK)
+#define MICFIL_VAD0_CTRL2_HPF_SHIFT 0
+#define MICFIL_VAD0_CTRL2_HPF_WIDTH 2
+#define MICFIL_VAD0_CTRL2_HPF_MASK ((BIT(MICFIL_VAD0_CTRL2_HPF_WIDTH) - 1) \
+ << MICFIL_VAD0_CTRL2_HPF_SHIFT)
+#define MICFIL_VAD0_CTRL2_HPF(v) (((v) << MICFIL_VAD0_CTRL2_HPF_SHIFT) \
+ & MICFIL_VAD0_CTRL2_HPF_MASK)
+
+/* MICFIL HWVAD0 Signal CONFIG Register -- REG_MICFIL_VAD0_SCONFIG */
+#define MICFIL_VAD0_SCONFIG_SFILEN_SHIFT 31
+#define MICFIL_VAD0_SCONFIG_SFILEN_MASK BIT(MICFIL_VAD0_SCONFIG_SFILEN_SHIFT)
+#define MICFIL_VAD0_SCONFIG_SFILEN BIT(MICFIL_VAD0_SCONFIG_SFILEN_SHIFT)
+#define MICFIL_VAD0_SCONFIG_SMAXEN_SHIFT 30
+#define MICFIL_VAD0_SCONFIG_SMAXEN_MASK BIT(MICFIL_VAD0_SCONFIG_SMAXEN_SHIFT)
+#define MICFIL_VAD0_SCONFIG_SMAXEN BIT(MICFIL_VAD0_SCONFIG_SMAXEN_SHIFT)
+#define MICFIL_VAD0_SCONFIG_SGAIN_SHIFT 0
+#define MICFIL_VAD0_SCONFIG_SGAIN_WIDTH 4
+#define MICFIL_VAD0_SCONFIG_SGAIN_MASK ((BIT(MICFIL_VAD0_SCONFIG_SGAIN_WIDTH) - 1) \
+ << MICFIL_VAD0_SCONFIG_SGAIN_SHIFT)
+#define MICFIL_VAD0_SCONFIG_SGAIN(v) (((v) << MICFIL_VAD0_SCONFIG_SGAIN_SHIFT) \
+ & MICFIL_VAD0_SCONFIG_SGAIN_MASK)
+
+/* MICFIL HWVAD0 Noise CONFIG Register -- REG_MICFIL_VAD0_NCONFIG */
+#define MICFIL_VAD0_NCONFIG_NFILAUT_SHIFT 31
+#define MICFIL_VAD0_NCONFIG_NFILAUT_MASK BIT(MICFIL_VAD0_NCONFIG_NFILAUT_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NFILAUT BIT(MICFIL_VAD0_NCONFIG_NFILAUT_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NMINEN_SHIFT 30
+#define MICFIL_VAD0_NCONFIG_NMINEN_MASK BIT(MICFIL_VAD0_NCONFIG_NMINEN_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NMINEN BIT(MICFIL_VAD0_NCONFIG_NMINEN_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NDECEN_SHIFT 29
+#define MICFIL_VAD0_NCONFIG_NDECEN_MASK BIT(MICFIL_VAD0_NCONFIG_NDECEN_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NDECEN BIT(MICFIL_VAD0_NCONFIG_NDECEN_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NOREN_SHIFT 28
+#define MICFIL_VAD0_NCONFIG_NOREN BIT(MICFIL_VAD0_NCONFIG_NOREN_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NFILADJ_SHIFT 8
+#define MICFIL_VAD0_NCONFIG_NFILADJ_WIDTH 5
+#define MICFIL_VAD0_NCONFIG_NFILADJ_MASK ((BIT(MICFIL_VAD0_NCONFIG_NFILADJ_WIDTH) - 1) \
+ << MICFIL_VAD0_NCONFIG_NFILADJ_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NFILADJ(v) (((v) << MICFIL_VAD0_NCONFIG_NFILADJ_SHIFT) \
+ & MICFIL_VAD0_NCONFIG_NFILADJ_MASK)
+#define MICFIL_VAD0_NCONFIG_NGAIN_SHIFT 0
+#define MICFIL_VAD0_NCONFIG_NGAIN_WIDTH 4
+#define MICFIL_VAD0_NCONFIG_NGAIN_MASK ((BIT(MICFIL_VAD0_NCONFIG_NGAIN_WIDTH) - 1) \
+ << MICFIL_VAD0_NCONFIG_NGAIN_SHIFT)
+#define MICFIL_VAD0_NCONFIG_NGAIN(v) (((v) << MICFIL_VAD0_NCONFIG_NGAIN_SHIFT) \
+ & MICFIL_VAD0_NCONFIG_NGAIN_MASK)
+
+/* MICFIL HWVAD0 Zero-Crossing Detector - REG_MICFIL_VAD0_ZCD */
+#define MICFIL_VAD0_ZCD_ZCDTH_SHIFT 16
+#define MICFIL_VAD0_ZCD_ZCDTH_WIDTH 10
+#define MICFIL_VAD0_ZCD_ZCDTH_MASK ((BIT(MICFIL_VAD0_ZCD_ZCDTH_WIDTH) - 1) \
+ << MICFIL_VAD0_ZCD_ZCDTH_SHIFT)
+#define MICFIL_VAD0_ZCD_ZCDTH(v) (((v) << MICFIL_VAD0_ZCD_ZCDTH_SHIFT)\
+ & MICFIL_VAD0_ZCD_ZCDTH_MASK)
+#define MICFIL_VAD0_ZCD_ZCDADJ_SHIFT 8
+#define MICFIL_VAD0_ZCD_ZCDADJ_WIDTH 4
+#define MICFIL_VAD0_ZCD_ZCDADJ_MASK ((BIT(MICFIL_VAD0_ZCD_ZCDADJ_WIDTH) - 1)\
+ << MICFIL_VAD0_ZCD_ZCDADJ_SHIFT)
+#define MICFIL_VAD0_ZCD_ZCDADJ(v) (((v) << MICFIL_VAD0_ZCD_ZCDADJ_SHIFT)\
+ & MICFIL_VAD0_ZCD_ZCDADJ_MASK)
+#define MICFIL_VAD0_ZCD_ZCDAND_SHIFT 4
+#define MICFIL_VAD0_ZCD_ZCDAND_MASK BIT(MICFIL_VAD0_ZCD_ZCDAND_SHIFT)
+#define MICFIL_VAD0_ZCD_ZCDAND BIT(MICFIL_VAD0_ZCD_ZCDAND_SHIFT)
+#define MICFIL_VAD0_ZCD_ZCDAUT_SHIFT 2
+#define MICFIL_VAD0_ZCD_ZCDAUT_MASK BIT(MICFIL_VAD0_ZCD_ZCDAUT_SHIFT)
+#define MICFIL_VAD0_ZCD_ZCDAUT BIT(MICFIL_VAD0_ZCD_ZCDAUT_SHIFT)
+#define MICFIL_VAD0_ZCD_ZCDEN_SHIFT 0
+#define MICFIL_VAD0_ZCD_ZCDEN_MASK BIT(MICFIL_VAD0_ZCD_ZCDEN_SHIFT)
+#define MICFIL_VAD0_ZCD_ZCDEN BIT(MICFIL_VAD0_ZCD_ZCDEN_SHIFT)
+
+/* MICFIL HWVAD0 Status Register - REG_MICFIL_VAD0_STAT */
+#define MICFIL_VAD0_STAT_INITF_SHIFT 31
+#define MICFIL_VAD0_STAT_INITF_MASK BIT(MICFIL_VAD0_STAT_INITF_SHIFT)
+#define MICFIL_VAD0_STAT_INITF BIT(MICFIL_VAD0_STAT_INITF_SHIFT)
+#define MICFIL_VAD0_STAT_INSATF_SHIFT 16
+#define MICFIL_VAD0_STAT_INSATF_MASK BIT(MICFIL_VAD0_STAT_INSATF_SHIFT)
+#define MICFIL_VAD0_STAT_INSATF BIT(MICFIL_VAD0_STAT_INSATF_SHIFT)
+#define MICFIL_VAD0_STAT_EF_SHIFT 15
+#define MICFIL_VAD0_STAT_EF_MASK BIT(MICFIL_VAD0_STAT_EF_SHIFT)
+#define MICFIL_VAD0_STAT_EF BIT(MICFIL_VAD0_STAT_EF_SHIFT)
+#define MICFIL_VAD0_STAT_IF_SHIFT 0
+#define MICFIL_VAD0_STAT_IF_MASK BIT(MICFIL_VAD0_STAT_IF_SHIFT)
+#define MICFIL_VAD0_STAT_IF BIT(MICFIL_VAD0_STAT_IF_SHIFT)
+
+/* MICFIL Output Control Register */
+#define MICFIL_OUTGAIN_CHX_SHIFT(v) (4 * (v))
+
+/* Constants */
+#define MICFIL_DMA_IRQ_DISABLED(v) ((v) & MICFIL_CTRL1_DISEL_MASK)
+#define MICFIL_DMA_ENABLED(v) ((0x1 << MICFIL_CTRL1_DISEL_SHIFT) \
+ == ((v) & MICFIL_CTRL1_DISEL_MASK))
+#define MICFIL_IRQ_ENABLED(v) ((0x2 << MICFIL_CTRL1_DISEL_SHIFT) \
+ == ((v) & MICFIL_CTRL1_DISEL_MASK))
+#define MICFIL_OUTPUT_CHANNELS 8
+#define MICFIL_FIFO_NUM 8
+
+#define FIFO_PTRWID 3
+#define FIFO_LEN BIT(FIFO_PTRWID)
+
+#define MICFIL_IRQ_LINES 2
+#define MICFIL_MAX_RETRY 25
+#define MICFIL_SLEEP_MIN 90000 /* in us */
+#define MICFIL_SLEEP_MAX 100000 /* in us */
+#define MICFIL_DMA_MAXBURST_RX 6
+#define MICFIL_CTRL2_OSR_DEFAULT (0 << MICFIL_CTRL2_CICOSR_SHIFT)
+
+#endif /* _FSL_MICFIL_H */
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 4163f2cfc06f..db9e0872f73d 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -62,10 +62,10 @@ static irqreturn_t fsl_sai_isr(int irq, void *devid)
dev_dbg(dev, "isr: Start of Tx word detected\n");
if (flags & FSL_SAI_CSR_SEF)
- dev_warn(dev, "isr: Tx Frame sync error detected\n");
+ dev_dbg(dev, "isr: Tx Frame sync error detected\n");
if (flags & FSL_SAI_CSR_FEF) {
- dev_warn(dev, "isr: Transmit underrun detected\n");
+ dev_dbg(dev, "isr: Transmit underrun detected\n");
/* FIFO reset for safety */
xcsr |= FSL_SAI_CSR_FR;
}
@@ -96,10 +96,10 @@ irq_rx:
dev_dbg(dev, "isr: Start of Rx word detected\n");
if (flags & FSL_SAI_CSR_SEF)
- dev_warn(dev, "isr: Rx Frame sync error detected\n");
+ dev_dbg(dev, "isr: Rx Frame sync error detected\n");
if (flags & FSL_SAI_CSR_FEF) {
- dev_warn(dev, "isr: Receive overflow detected\n");
+ dev_dbg(dev, "isr: Receive overflow detected\n");
/* FIFO reset for safety */
xcsr |= FSL_SAI_CSR_FR;
}
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 740b90df44bb..4842e6df9a2d 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -96,7 +96,7 @@ struct fsl_spdif_priv {
bool dpll_locked;
u32 txrate[SPDIF_TXRATE_MAX];
u8 txclk_df[SPDIF_TXRATE_MAX];
- u8 sysclk_df[SPDIF_TXRATE_MAX];
+ u16 sysclk_df[SPDIF_TXRATE_MAX];
u8 txclk_src[SPDIF_TXRATE_MAX];
u8 rxclk_src;
struct clk *txclk[SPDIF_TXRATE_MAX];
@@ -376,7 +376,8 @@ static int spdif_set_sample_rate(struct snd_pcm_substream *substream,
struct platform_device *pdev = spdif_priv->pdev;
unsigned long csfs = 0;
u32 stc, mask, rate;
- u8 clk, txclk_df, sysclk_df;
+ u16 sysclk_df;
+ u8 clk, txclk_df;
int ret;
switch (sample_rate) {
@@ -1109,8 +1110,9 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
static const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk);
u64 rate_ideal, rate_actual, sub;
- u32 sysclk_dfmin, sysclk_dfmax;
- u32 txclk_df, sysclk_df, arate;
+ u32 arate;
+ u16 sysclk_dfmin, sysclk_dfmax, sysclk_df;
+ u8 txclk_df;
/* The sysclk has an extra divisor [2, 512] */
sysclk_dfmin = is_sysclk ? 2 : 1;
@@ -1320,7 +1322,7 @@ static int fsl_spdif_probe(struct platform_device *pdev)
}
ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
return ret;
diff --git a/sound/soc/fsl/fsl_spdif.h b/sound/soc/fsl/fsl_spdif.h
index 7666dabaccfd..e6c61e07bc1a 100644
--- a/sound/soc/fsl/fsl_spdif.h
+++ b/sound/soc/fsl/fsl_spdif.h
@@ -152,7 +152,7 @@ enum spdif_gainsel {
#define STC_TXCLK_ALL_EN_MASK (1 << STC_TXCLK_ALL_EN_OFFSET)
#define STC_TXCLK_ALL_EN (1 << STC_TXCLK_ALL_EN_OFFSET)
#define STC_TXCLK_DF_OFFSET 0
-#define STC_TXCLK_DF_MASK (0x7ff << STC_TXCLK_DF_OFFSET)
+#define STC_TXCLK_DF_MASK (0x7f << STC_TXCLK_DF_OFFSET)
#define STC_TXCLK_DF(x) ((((x) - 1) << STC_TXCLK_DF_OFFSET) & STC_TXCLK_DF_MASK)
#define STC_TXCLK_SRC_MAX 8
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0a648229e643..09b2967befd9 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1439,8 +1439,10 @@ static int fsl_ssi_probe_from_dt(struct fsl_ssi *ssi)
* different name to register the device.
*/
if (!ssi->card_name[0] && of_get_property(np, "codec-handle", NULL)) {
- sprop = of_get_property(of_find_node_by_path("/"),
- "compatible", NULL);
+ struct device_node *root = of_find_node_by_path("/");
+
+ sprop = of_get_property(root, "compatible", NULL);
+ of_node_put(root);
/* Strip "fsl," in the compatible name if applicable */
p = strrchr(sprop, ',');
if (p)
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 392d5eef356d..99e07b01a2ce 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
+ ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
pdcr, ptcr);
if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS output from %s, ",
audmux_port_string((ptcr >> 27) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk output from %s",
audmux_port_string((ptcr >> 22) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk input");
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"Port is symmetric");
} else {
if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS output from %s, ",
audmux_port_string((ptcr >> 17) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk output from %s",
audmux_port_string((ptcr >> 12) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk input");
}
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"\nData received from %s\n",
audmux_port_string((pdcr >> 13) & 0x7));
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index c29200cf755a..bf8597f57dce 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -104,14 +104,16 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
ssi_pdev = of_find_device_by_node(ssi_np);
if (!ssi_pdev) {
- dev_err(&pdev->dev, "failed to find SSI platform device\n");
+ dev_dbg(&pdev->dev, "failed to find SSI platform device\n");
ret = -EPROBE_DEFER;
goto fail;
}
+ put_device(&ssi_pdev->dev);
codec_dev = of_find_i2c_device_by_node(codec_np);
if (!codec_dev) {
- dev_err(&pdev->dev, "failed to find codec platform device\n");
- return -EPROBE_DEFER;
+ dev_dbg(&pdev->dev, "failed to find codec platform device\n");
+ ret = -EPROBE_DEFER;
+ goto fail;
}
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@@ -156,7 +158,9 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
goto fail;
}
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index fb896b2c9ba3..797d66e43d49 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -67,10 +67,8 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
goto end;
ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
- if (ret) {
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n", ret);
- goto end;
- }
end:
of_node_put(spdif_np);
diff --git a/sound/soc/generic/Kconfig b/sound/soc/generic/Kconfig
index 92c2cf06f40a..83f1243145b0 100644
--- a/sound/soc/generic/Kconfig
+++ b/sound/soc/generic/Kconfig
@@ -8,14 +8,6 @@ config SND_SIMPLE_CARD
This option enables generic simple sound card support
It also support DPCM of multi CPU single Codec ststem.
-config SND_SIMPLE_SCU_CARD
- tristate "ASoC Simple SCU sound card support"
- depends on OF
- select SND_SIMPLE_CARD_UTILS
- help
- This option enables generic simple SCU sound card support.
- It supports DPCM of multi CPU single Codec system.
-
config SND_AUDIO_GRAPH_CARD
tristate "ASoC Audio Graph sound card support"
depends on OF
@@ -24,12 +16,3 @@ config SND_AUDIO_GRAPH_CARD
This option enables generic simple sound card support
with OF-graph DT bindings.
It also support DPCM of multi CPU single Codec ststem.
-
-config SND_AUDIO_GRAPH_SCU_CARD
- tristate "ASoC Audio Graph SCU sound card support"
- depends on OF
- select SND_SIMPLE_CARD_UTILS
- help
- This option enables generic simple SCU sound card support
- with OF-graph DT bindings.
- It supports DPCM of multi CPU single Codec ststem.
diff --git a/sound/soc/generic/Makefile b/sound/soc/generic/Makefile
index 9dec293a4c4d..21c29e5e0671 100644
--- a/sound/soc/generic/Makefile
+++ b/sound/soc/generic/Makefile
@@ -1,12 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
snd-soc-simple-card-utils-objs := simple-card-utils.o
snd-soc-simple-card-objs := simple-card.o
-snd-soc-simple-scu-card-objs := simple-scu-card.o
snd-soc-audio-graph-card-objs := audio-graph-card.o
-snd-soc-audio-graph-scu-card-objs := audio-graph-scu-card.o
obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) += snd-soc-simple-card-utils.o
obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
-obj-$(CONFIG_SND_SIMPLE_SCU_CARD) += snd-soc-simple-scu-card.o
obj-$(CONFIG_SND_AUDIO_GRAPH_CARD) += snd-soc-audio-graph-card.o
-obj-$(CONFIG_SND_AUDIO_GRAPH_SCU_CARD) += snd-soc-audio-graph-scu-card.o
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 0d6144560a1e..bb12351330e8 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -20,13 +20,13 @@
#include <linux/string.h>
#include <sound/simple_card_utils.h>
-struct graph_card_data {
+struct graph_priv {
struct snd_soc_card snd_card;
struct graph_dai_props {
struct asoc_simple_dai *cpu_dai;
struct asoc_simple_dai *codec_dai;
struct snd_soc_dai_link_component codecs; /* single codec */
- struct snd_soc_dai_link_component platform;
+ struct snd_soc_dai_link_component platforms;
struct asoc_simple_card_data adata;
struct snd_soc_codec_conf *codec_conf;
unsigned int mclk_fs;
@@ -39,6 +39,13 @@ struct graph_card_data {
struct gpio_desc *pa_gpio;
};
+struct link_info {
+ int dais; /* number of dai */
+ int link; /* number of link */
+ int conf; /* number of codec_conf */
+ int cpu; /* turn for CPU / Codec */
+};
+
#define graph_priv_to_card(priv) (&(priv)->snd_card)
#define graph_priv_to_props(priv, i) ((priv)->dai_props + (i))
#define graph_priv_to_dev(priv) (graph_priv_to_card(priv)->dev)
@@ -46,12 +53,12 @@ struct graph_card_data {
#define PREFIX "audio-graph-card,"
-static int asoc_graph_card_outdrv_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event)
+static int graph_outdrv_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
{
struct snd_soc_dapm_context *dapm = w->dapm;
- struct graph_card_data *priv = snd_soc_card_get_drvdata(dapm->card);
+ struct graph_priv *priv = snd_soc_card_get_drvdata(dapm->card);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -67,16 +74,16 @@ static int asoc_graph_card_outdrv_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_soc_dapm_widget asoc_graph_card_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget graph_dapm_widgets[] = {
SND_SOC_DAPM_OUT_DRV_E("Amplifier", SND_SOC_NOPM,
- 0, 0, NULL, 0, asoc_graph_card_outdrv_event,
+ 0, 0, NULL, 0, graph_outdrv_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
};
-static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
+static int graph_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
int ret;
@@ -91,10 +98,10 @@ static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
return ret;
}
-static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
+static void graph_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
asoc_simple_card_clk_disable(dai_props->cpu_dai);
@@ -102,13 +109,13 @@ static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
asoc_simple_card_clk_disable(dai_props->codec_dai);
}
-static int asoc_graph_card_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int graph_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
unsigned int mclk, mclk_fs = 0;
int ret = 0;
@@ -133,15 +140,15 @@ err:
return ret;
}
-static const struct snd_soc_ops asoc_graph_card_ops = {
- .startup = asoc_graph_card_startup,
- .shutdown = asoc_graph_card_shutdown,
- .hw_params = asoc_graph_card_hw_params,
+static const struct snd_soc_ops graph_ops = {
+ .startup = graph_startup,
+ .shutdown = graph_shutdown,
+ .hw_params = graph_hw_params,
};
-static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
+static int graph_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
int ret = 0;
@@ -158,10 +165,10 @@ static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
+static int graph_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
{
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
asoc_simple_card_convert_fixup(&dai_props->adata, params);
@@ -169,41 +176,64 @@ static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static int asoc_graph_card_dai_link_of_dpcm(struct device_node *top,
- struct device_node *cpu_ep,
- struct device_node *codec_ep,
- struct graph_card_data *priv,
- int *dai_idx, int link_idx,
- int *conf_idx, int is_cpu)
+static void graph_get_conversion(struct device *dev,
+ struct device_node *ep,
+ struct asoc_simple_card_data *adata)
{
- struct device *dev = graph_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, link_idx);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, link_idx);
- struct device_node *ep = is_cpu ? cpu_ep : codec_ep;
+ struct device_node *top = dev->of_node;
struct device_node *port = of_get_parent(ep);
struct device_node *ports = of_get_parent(port);
struct device_node *node = of_graph_get_port_parent(ep);
+
+ asoc_simple_card_parse_convert(dev, top, NULL, adata);
+ asoc_simple_card_parse_convert(dev, node, PREFIX, adata);
+ asoc_simple_card_parse_convert(dev, ports, NULL, adata);
+ asoc_simple_card_parse_convert(dev, port, NULL, adata);
+ asoc_simple_card_parse_convert(dev, ep, NULL, adata);
+}
+
+static int graph_dai_link_of_dpcm(struct graph_priv *priv,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct link_info *li,
+ int dup_codec)
+{
+ struct device *dev = graph_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, li->link);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, li->link);
+ struct device_node *top = dev->of_node;
+ struct device_node *ep = li->cpu ? cpu_ep : codec_ep;
+ struct device_node *port;
+ struct device_node *ports;
+ struct device_node *node;
struct asoc_simple_dai *dai;
struct snd_soc_dai_link_component *codecs = dai_link->codecs;
int ret;
- dev_dbg(dev, "link_of DPCM (for %s)\n", is_cpu ? "CPU" : "Codec");
+ /* Do it all CPU endpoint, and 1st Codec endpoint */
+ if (!li->cpu && dup_codec)
+ return 0;
+
+ port = of_get_parent(ep);
+ ports = of_get_parent(port);
+ node = of_graph_get_port_parent(ep);
+
+ li->link++;
+
+ dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
of_property_read_u32(top, "mclk-fs", &dai_props->mclk_fs);
of_property_read_u32(ports, "mclk-fs", &dai_props->mclk_fs);
of_property_read_u32(port, "mclk-fs", &dai_props->mclk_fs);
of_property_read_u32(ep, "mclk-fs", &dai_props->mclk_fs);
- asoc_simple_card_parse_convert(dev, top, NULL, &dai_props->adata);
- asoc_simple_card_parse_convert(dev, node, PREFIX, &dai_props->adata);
- asoc_simple_card_parse_convert(dev, ports, NULL, &dai_props->adata);
- asoc_simple_card_parse_convert(dev, port, NULL, &dai_props->adata);
- asoc_simple_card_parse_convert(dev, ep, NULL, &dai_props->adata);
+ graph_get_conversion(dev, ep, &dai_props->adata);
of_node_put(ports);
of_node_put(port);
+ of_node_put(node);
- if (is_cpu) {
+ if (li->cpu) {
/* BE is dummy */
codecs->of_node = NULL;
@@ -215,7 +245,7 @@ static int asoc_graph_card_dai_link_of_dpcm(struct device_node *top,
dai_link->dpcm_merged_format = 1;
dai =
- dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+ dai_props->cpu_dai = &priv->dais[li->dais++];
ret = asoc_simple_card_parse_graph_cpu(ep, dai_link);
if (ret)
@@ -244,13 +274,13 @@ static int asoc_graph_card_dai_link_of_dpcm(struct device_node *top,
/* BE settings */
dai_link->no_pcm = 1;
- dai_link->be_hw_params_fixup = asoc_graph_card_be_hw_params_fixup;
+ dai_link->be_hw_params_fixup = graph_be_hw_params_fixup;
dai =
- dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+ dai_props->codec_dai = &priv->dais[li->dais++];
cconf =
- dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
+ dai_props->codec_conf = &priv->codec_conf[li->conf++];
ret = asoc_simple_card_parse_graph_codec(ep, dai_link);
if (ret < 0)
@@ -277,14 +307,12 @@ static int asoc_graph_card_dai_link_of_dpcm(struct device_node *top,
"prefix");
}
+ asoc_simple_card_canonicalize_platform(dai_link);
+
ret = asoc_simple_card_of_parse_tdm(ep, dai);
if (ret)
return ret;
- ret = asoc_simple_card_canonicalize_dailink(dai_link);
- if (ret < 0)
- return ret;
-
ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
NULL, &dai_link->dai_fmt);
if (ret < 0)
@@ -292,35 +320,46 @@ static int asoc_graph_card_dai_link_of_dpcm(struct device_node *top,
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
- dai_link->ops = &asoc_graph_card_ops;
- dai_link->init = asoc_graph_card_dai_init;
+ dai_link->ops = &graph_ops;
+ dai_link->init = graph_dai_init;
return 0;
}
-static int asoc_graph_card_dai_link_of(struct device_node *top,
- struct device_node *cpu_ep,
- struct device_node *codec_ep,
- struct graph_card_data *priv,
- int *dai_idx, int link_idx)
+static int graph_dai_link_of(struct graph_priv *priv,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct link_info *li)
{
struct device *dev = graph_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, link_idx);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, link_idx);
- struct device_node *cpu_port = of_get_parent(cpu_ep);
- struct device_node *codec_port = of_get_parent(codec_ep);
- struct device_node *cpu_ports = of_get_parent(cpu_port);
- struct device_node *codec_ports = of_get_parent(codec_port);
+ struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, li->link);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, li->link);
+ struct device_node *top = dev->of_node;
+ struct device_node *cpu_port;
+ struct device_node *cpu_ports;
+ struct device_node *codec_port;
+ struct device_node *codec_ports;
struct asoc_simple_dai *cpu_dai;
struct asoc_simple_dai *codec_dai;
int ret;
- dev_dbg(dev, "link_of\n");
+ /* Do it only CPU turn */
+ if (!li->cpu)
+ return 0;
+
+ cpu_port = of_get_parent(cpu_ep);
+ cpu_ports = of_get_parent(cpu_port);
+ codec_port = of_get_parent(codec_ep);
+ codec_ports = of_get_parent(codec_port);
+
+ dev_dbg(dev, "link_of (%pOF)\n", cpu_ep);
+
+ li->link++;
cpu_dai =
- dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+ dai_props->cpu_dai = &priv->dais[li->dais++];
codec_dai =
- dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+ dai_props->codec_dai = &priv->dais[li->dais++];
/* Factor to mclk, used in hw_params() */
of_property_read_u32(top, "mclk-fs", &dai_props->mclk_fs);
@@ -364,10 +403,6 @@ static int asoc_graph_card_dai_link_of(struct device_node *top,
if (ret < 0)
return ret;
- ret = asoc_simple_card_canonicalize_dailink(dai_link);
- if (ret < 0)
- return ret;
-
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
dai_link->cpu_dai_name,
@@ -375,30 +410,89 @@ static int asoc_graph_card_dai_link_of(struct device_node *top,
if (ret < 0)
return ret;
- dai_link->ops = &asoc_graph_card_ops;
- dai_link->init = asoc_graph_card_dai_init;
+ dai_link->ops = &graph_ops;
+ dai_link->init = graph_dai_init;
+ asoc_simple_card_canonicalize_platform(dai_link);
asoc_simple_card_canonicalize_cpu(dai_link,
of_graph_get_endpoint_count(dai_link->cpu_of_node) == 1);
return 0;
}
-static int asoc_graph_card_parse_of(struct graph_card_data *priv)
+static int graph_for_each_link(struct graph_priv *priv,
+ struct link_info *li,
+ int (*func_noml)(struct graph_priv *priv,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct link_info *li),
+ int (*func_dpcm)(struct graph_priv *priv,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct link_info *li, int dup_codec))
{
struct of_phandle_iterator it;
struct device *dev = graph_priv_to_dev(priv);
- struct snd_soc_card *card = graph_priv_to_card(priv);
- struct device_node *top = dev->of_node;
- struct device_node *node = top;
+ struct device_node *node = dev->of_node;
struct device_node *cpu_port;
- struct device_node *cpu_ep = NULL;
- struct device_node *codec_ep = NULL;
- struct device_node *codec_port = NULL;
- struct device_node *codec_port_old = NULL;
+ struct device_node *cpu_ep;
+ struct device_node *codec_ep;
+ struct device_node *codec_port;
+ struct device_node *codec_port_old = NULL;
+ struct asoc_simple_card_data adata;
int rc, ret;
- int link_idx, dai_idx, conf_idx;
- int cpu;
+
+ /* loop for all listed CPU port */
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ cpu_port = it.node;
+ cpu_ep = NULL;
+
+ /* loop for all CPU endpoint */
+ while (1) {
+ cpu_ep = of_get_next_child(cpu_port, cpu_ep);
+ if (!cpu_ep)
+ break;
+
+ /* get codec */
+ codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ codec_port = of_get_parent(codec_ep);
+
+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
+ /* get convert-xxx property */
+ memset(&adata, 0, sizeof(adata));
+ graph_get_conversion(dev, codec_ep, &adata);
+ graph_get_conversion(dev, cpu_ep, &adata);
+
+ /*
+ * It is DPCM
+ * if Codec port has many endpoints,
+ * or has convert-xxx property
+ */
+ if ((of_get_child_count(codec_port) > 1) ||
+ adata.convert_rate || adata.convert_channels)
+ ret = func_dpcm(priv, cpu_ep, codec_ep, li,
+ (codec_port_old == codec_port));
+ /* else normal sound */
+ else
+ ret = func_noml(priv, cpu_ep, codec_ep, li);
+
+ if (ret < 0)
+ return ret;
+
+ codec_port_old = codec_port;
+ }
+ }
+
+ return 0;
+}
+
+static int graph_parse_of(struct graph_priv *priv)
+{
+ struct snd_soc_card *card = graph_priv_to_card(priv);
+ struct link_info li;
+ int ret;
ret = asoc_simple_card_of_parse_widgets(card, NULL);
if (ret < 0)
@@ -408,11 +502,8 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
if (ret < 0)
return ret;
- link_idx = 0;
- dai_idx = 0;
- conf_idx = 0;
- codec_port_old = NULL;
- for (cpu = 1; cpu >= 0; cpu--) {
+ memset(&li, 0, sizeof(li));
+ for (li.cpu = 1; li.cpu >= 0; li.cpu--) {
/*
* Detect all CPU first, and Detect all Codec 2nd.
*
@@ -425,66 +516,57 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
* To avoid random sub-device numbering,
* detect "dummy-Codec" in last;
*/
- of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
- cpu_port = it.node;
- cpu_ep = NULL;
- while (1) {
- cpu_ep = of_get_next_child(cpu_port, cpu_ep);
- if (!cpu_ep)
- break;
-
- codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- codec_port = of_get_parent(codec_ep);
-
- of_node_put(codec_ep);
- of_node_put(codec_port);
-
- dev_dbg(dev, "%pOFf <-> %pOFf\n", cpu_ep, codec_ep);
-
- if (of_get_child_count(codec_port) > 1) {
- /*
- * for DPCM sound
- */
- if (!cpu) {
- if (codec_port_old == codec_port)
- continue;
- codec_port_old = codec_port;
- }
- ret = asoc_graph_card_dai_link_of_dpcm(
- top, cpu_ep, codec_ep, priv,
- &dai_idx, link_idx++,
- &conf_idx, cpu);
- } else if (cpu) {
- /*
- * for Normal sound
- */
- ret = asoc_graph_card_dai_link_of(
- top, cpu_ep, codec_ep, priv,
- &dai_idx, link_idx++);
- }
- if (ret < 0)
- return ret;
- }
- }
+ ret = graph_for_each_link(priv, &li,
+ graph_dai_link_of,
+ graph_dai_link_of_dpcm);
+ if (ret < 0)
+ return ret;
}
return asoc_simple_card_parse_card_name(card, NULL);
}
-static void asoc_graph_get_dais_count(struct device *dev,
- int *link_num,
- int *dais_num,
- int *ccnf_num)
+static int graph_count_noml(struct graph_priv *priv,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct link_info *li)
{
- struct of_phandle_iterator it;
- struct device_node *node = dev->of_node;
- struct device_node *cpu_port;
- struct device_node *cpu_ep;
- struct device_node *codec_ep;
- struct device_node *codec_port;
- struct device_node *codec_port_old;
- struct device_node *codec_port_old2;
- int rc;
+ struct device *dev = graph_priv_to_dev(priv);
+
+ li->link += 1; /* 1xCPU-Codec */
+ li->dais += 2; /* 1xCPU + 1xCodec */
+
+ dev_dbg(dev, "Count As Normal\n");
+
+ return 0;
+}
+
+static int graph_count_dpcm(struct graph_priv *priv,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct link_info *li,
+ int dup_codec)
+{
+ struct device *dev = graph_priv_to_dev(priv);
+
+ li->link++; /* 1xCPU-dummy */
+ li->dais++; /* 1xCPU */
+
+ if (!dup_codec) {
+ li->link++; /* 1xdummy-Codec */
+ li->conf++; /* 1xdummy-Codec */
+ li->dais++; /* 1xCodec */
+ }
+
+ dev_dbg(dev, "Count As DPCM\n");
+
+ return 0;
+}
+
+static void graph_get_dais_count(struct graph_priv *priv,
+ struct link_info *li)
+{
+ struct device *dev = graph_priv_to_dev(priv);
/*
* link_num : number of links.
@@ -522,45 +604,26 @@ static void asoc_graph_get_dais_count(struct device *dev,
* => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
* => 6 DAIs = 4xCPU + 2xCodec
* => 2 ccnf = 2xdummy-Codec
+ *
+ * ex4)
+ * CPU0 --- Codec0 (convert-rate) link : 3
+ * CPU1 --- Codec1 dais : 4
+ * ccnf : 1
+ *
+ * => 3 links = 1xCPU-Codec + 1xCPU-dummy + 1xdummy-Codec
+ * => 4 DAIs = 2xCPU + 2xCodec
+ * => 1 ccnf = 1xdummy-Codec
*/
- codec_port_old = NULL;
- codec_port_old2 = NULL;
- of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
- cpu_port = it.node;
- cpu_ep = NULL;
- while (1) {
- cpu_ep = of_get_next_child(cpu_port, cpu_ep);
- if (!cpu_ep)
- break;
-
- codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- codec_port = of_get_parent(codec_ep);
-
- of_node_put(codec_ep);
- of_node_put(codec_port);
-
- (*link_num)++;
- (*dais_num)++;
-
- if (codec_port_old == codec_port) {
- if (codec_port_old2 != codec_port_old) {
- (*link_num)++;
- (*ccnf_num)++;
- }
-
- codec_port_old2 = codec_port_old;
- continue;
- }
-
- (*dais_num)++;
- codec_port_old = codec_port;
- }
- }
+ graph_for_each_link(priv, li,
+ graph_count_noml,
+ graph_count_dpcm);
+ dev_dbg(dev, "link %d, dais %d, ccnf %d\n",
+ li->link, li->dais, li->conf);
}
-static int asoc_graph_soc_card_probe(struct snd_soc_card *card)
+static int graph_card_probe(struct snd_soc_card *card)
{
- struct graph_card_data *priv = snd_soc_card_get_drvdata(card);
+ struct graph_priv *priv = snd_soc_card_get_drvdata(card);
int ret;
ret = asoc_simple_card_init_hp(card, &priv->hp_jack, NULL);
@@ -574,16 +637,16 @@ static int asoc_graph_soc_card_probe(struct snd_soc_card *card)
return 0;
}
-static int asoc_graph_card_probe(struct platform_device *pdev)
+static int graph_probe(struct platform_device *pdev)
{
- struct graph_card_data *priv;
+ struct graph_priv *priv;
struct snd_soc_dai_link *dai_link;
struct graph_dai_props *dai_props;
struct asoc_simple_dai *dais;
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
struct snd_soc_codec_conf *cconf;
- int lnum = 0, dnum = 0, cnum = 0;
+ struct link_info li;
int ret, i;
/* Allocate the private data and the DAI link array */
@@ -591,14 +654,22 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- asoc_graph_get_dais_count(dev, &lnum, &dnum, &cnum);
- if (!lnum || !dnum)
+ card = graph_priv_to_card(priv);
+ card->owner = THIS_MODULE;
+ card->dev = dev;
+ card->dapm_widgets = graph_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(graph_dapm_widgets);
+ card->probe = graph_card_probe;
+
+ memset(&li, 0, sizeof(li));
+ graph_get_dais_count(priv, &li);
+ if (!li.link || !li.dais)
return -EINVAL;
- dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
- dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
- cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
+ dai_props = devm_kcalloc(dev, li.link, sizeof(*dai_props), GFP_KERNEL);
+ dai_link = devm_kcalloc(dev, li.link, sizeof(*dai_link), GFP_KERNEL);
+ dais = devm_kcalloc(dev, li.dais, sizeof(*dais), GFP_KERNEL);
+ cconf = devm_kcalloc(dev, li.conf, sizeof(*cconf), GFP_KERNEL);
if (!dai_props || !dai_link || !dais)
return -ENOMEM;
@@ -608,10 +679,11 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
* see
* soc-core.c :: snd_soc_init_multicodec()
*/
- for (i = 0; i < lnum; i++) {
+ for (i = 0; i < li.link; i++) {
dai_link[i].codecs = &dai_props[i].codecs;
dai_link[i].num_codecs = 1;
- dai_link[i].platform = &dai_props[i].platform;
+ dai_link[i].platforms = &dai_props[i].platforms;
+ dai_link[i].num_platforms = 1;
}
priv->pa_gpio = devm_gpiod_get_optional(dev, "pa", GPIOD_OUT_LOW);
@@ -621,24 +693,17 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
return ret;
}
- priv->dai_props = dai_props;
- priv->dai_link = dai_link;
- priv->dais = dais;
- priv->codec_conf = cconf;
+ priv->dai_props = dai_props;
+ priv->dai_link = dai_link;
+ priv->dais = dais;
+ priv->codec_conf = cconf;
- /* Init snd_soc_card */
- card = graph_priv_to_card(priv);
- card->owner = THIS_MODULE;
- card->dev = dev;
card->dai_link = dai_link;
- card->num_links = lnum;
- card->dapm_widgets = asoc_graph_card_dapm_widgets;
- card->num_dapm_widgets = ARRAY_SIZE(asoc_graph_card_dapm_widgets);
- card->probe = asoc_graph_soc_card_probe;
+ card->num_links = li.link;
card->codec_conf = cconf;
- card->num_configs = cnum;
+ card->num_configs = li.conf;
- ret = asoc_graph_card_parse_of(priv);
+ ret = graph_parse_of(priv);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "parse error %d\n", ret);
@@ -658,30 +723,30 @@ err:
return ret;
}
-static int asoc_graph_card_remove(struct platform_device *pdev)
+static int graph_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
return asoc_simple_card_clean_reference(card);
}
-static const struct of_device_id asoc_graph_of_match[] = {
+static const struct of_device_id graph_of_match[] = {
{ .compatible = "audio-graph-card", },
{ .compatible = "audio-graph-scu-card", },
{},
};
-MODULE_DEVICE_TABLE(of, asoc_graph_of_match);
+MODULE_DEVICE_TABLE(of, graph_of_match);
-static struct platform_driver asoc_graph_card = {
+static struct platform_driver graph_card = {
.driver = {
.name = "asoc-audio-graph-card",
.pm = &snd_soc_pm_ops,
- .of_match_table = asoc_graph_of_match,
+ .of_match_table = graph_of_match,
},
- .probe = asoc_graph_card_probe,
- .remove = asoc_graph_card_remove,
+ .probe = graph_probe,
+ .remove = graph_remove,
};
-module_platform_driver(asoc_graph_card);
+module_platform_driver(graph_card);
MODULE_ALIAS("platform:asoc-audio-graph-card");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/generic/audio-graph-scu-card.c b/sound/soc/generic/audio-graph-scu-card.c
deleted file mode 100644
index e1b192ea147b..000000000000
--- a/sound/soc/generic/audio-graph-scu-card.c
+++ /dev/null
@@ -1,501 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// ASoC audio graph SCU sound card support
-//
-// Copyright (C) 2017 Renesas Solutions Corp.
-// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
-//
-// based on
-// ${LINUX}/sound/soc/generic/simple-scu-card.c
-// ${LINUX}/sound/soc/generic/audio-graph-card.c
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-#include <linux/of_graph.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <sound/jack.h>
-#include <sound/simple_card_utils.h>
-
-struct graph_card_data {
- struct snd_soc_card snd_card;
- struct graph_dai_props {
- struct asoc_simple_dai *cpu_dai;
- struct asoc_simple_dai *codec_dai;
- struct snd_soc_dai_link_component codecs;
- struct snd_soc_dai_link_component platform;
- struct asoc_simple_card_data adata;
- struct snd_soc_codec_conf *codec_conf;
- } *dai_props;
- struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dais;
- struct asoc_simple_card_data adata;
- struct snd_soc_codec_conf *codec_conf;
-};
-
-#define graph_priv_to_card(priv) (&(priv)->snd_card)
-#define graph_priv_to_props(priv, i) ((priv)->dai_props + (i))
-#define graph_priv_to_dev(priv) (graph_priv_to_card(priv)->dev)
-#define graph_priv_to_link(priv, i) (graph_priv_to_card(priv)->dai_link + (i))
-
-#define PREFIX "audio-graph-card,"
-
-static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- int ret = 0;
-
- ret = asoc_simple_card_clk_enable(dai_props->cpu_dai);
- if (ret)
- return ret;
-
- ret = asoc_simple_card_clk_enable(dai_props->codec_dai);
- if (ret)
- asoc_simple_card_clk_disable(dai_props->cpu_dai);
-
- return ret;
-}
-
-static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
-
- asoc_simple_card_clk_disable(dai_props->cpu_dai);
-
- asoc_simple_card_clk_disable(dai_props->codec_dai);
-}
-
-static const struct snd_soc_ops asoc_graph_card_ops = {
- .startup = asoc_graph_card_startup,
- .shutdown = asoc_graph_card_shutdown,
-};
-
-static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- int ret = 0;
-
- ret = asoc_simple_card_init_dai(rtd->codec_dai,
- dai_props->codec_dai);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_init_dai(rtd->cpu_dai,
- dai_props->cpu_dai);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
-{
- struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
-
- asoc_simple_card_convert_fixup(&dai_props->adata, params);
-
- /* overwrite by top level adata if exist */
- asoc_simple_card_convert_fixup(&priv->adata, params);
-
- return 0;
-}
-
-static int asoc_graph_card_dai_link_of(struct device_node *cpu_ep,
- struct device_node *codec_ep,
- struct graph_card_data *priv,
- int *dai_idx, int link_idx,
- int *conf_idx, int is_fe)
-{
- struct device *dev = graph_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, link_idx);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, link_idx);
- struct snd_soc_card *card = graph_priv_to_card(priv);
- struct device_node *ep = is_fe ? cpu_ep : codec_ep;
- struct device_node *node = of_graph_get_port_parent(ep);
- struct asoc_simple_dai *dai;
- int ret;
-
- if (is_fe) {
- struct snd_soc_dai_link_component *codecs;
-
- /* BE is dummy */
- codecs = dai_link->codecs;
- codecs->of_node = NULL;
- codecs->dai_name = "snd-soc-dummy-dai";
- codecs->name = "snd-soc-dummy";
-
- /* FE settings */
- dai_link->dynamic = 1;
- dai_link->dpcm_merged_format = 1;
-
- dai =
- dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
-
- ret = asoc_simple_card_parse_graph_cpu(ep, dai_link);
- if (ret)
- return ret;
-
- ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, dai);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_set_dailink_name(dev, dai_link,
- "fe.%s",
- dai_link->cpu_dai_name);
- if (ret < 0)
- return ret;
-
- /* card->num_links includes Codec */
- asoc_simple_card_canonicalize_cpu(dai_link,
- of_graph_get_endpoint_count(dai_link->cpu_of_node) == 1);
- } else {
- struct snd_soc_codec_conf *cconf;
-
- /* FE is dummy */
- dai_link->cpu_of_node = NULL;
- dai_link->cpu_dai_name = "snd-soc-dummy-dai";
- dai_link->cpu_name = "snd-soc-dummy";
-
- /* BE settings */
- dai_link->no_pcm = 1;
- dai_link->be_hw_params_fixup = asoc_graph_card_be_hw_params_fixup;
-
- dai =
- dai_props->codec_dai = &priv->dais[(*dai_idx)++];
-
- cconf =
- dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
-
- ret = asoc_simple_card_parse_graph_codec(ep, dai_link);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, dai);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_set_dailink_name(dev, dai_link,
- "be.%s",
- dai_link->codecs->dai_name);
- if (ret < 0)
- return ret;
-
- /* check "prefix" from top node */
- snd_soc_of_parse_audio_prefix(card, cconf,
- dai_link->codecs->of_node,
- "prefix");
- /* check "prefix" from each node if top doesn't have */
- if (!cconf->of_node)
- snd_soc_of_parse_node_prefix(node, cconf,
- dai_link->codecs->of_node,
- PREFIX "prefix");
- }
-
- asoc_simple_card_parse_convert(dev, node, PREFIX, &dai_props->adata);
-
- ret = asoc_simple_card_of_parse_tdm(ep, dai);
- if (ret)
- return ret;
-
- ret = asoc_simple_card_canonicalize_dailink(dai_link);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
- NULL, &dai_link->dai_fmt);
- if (ret < 0)
- return ret;
-
- dai_link->dpcm_playback = 1;
- dai_link->dpcm_capture = 1;
- dai_link->ops = &asoc_graph_card_ops;
- dai_link->init = asoc_graph_card_dai_init;
-
- return 0;
-}
-
-static int asoc_graph_card_parse_of(struct graph_card_data *priv)
-{
- struct of_phandle_iterator it;
- struct device *dev = graph_priv_to_dev(priv);
- struct snd_soc_card *card = graph_priv_to_card(priv);
- struct device_node *node = dev->of_node;
- struct device_node *cpu_port;
- struct device_node *cpu_ep;
- struct device_node *codec_ep;
- struct device_node *codec_port;
- struct device_node *codec_port_old;
- int dai_idx, link_idx, conf_idx, ret;
- int rc, codec;
-
- if (!node)
- return -EINVAL;
-
- /*
- * we need to consider "widgets", "mclk-fs" around here
- * see simple-card
- */
-
- ret = asoc_simple_card_of_parse_routing(card, NULL);
- if (ret < 0)
- return ret;
-
- asoc_simple_card_parse_convert(dev, node, NULL, &priv->adata);
-
- /*
- * it supports multi CPU, single CODEC only here
- * see asoc_graph_get_dais_count
- */
-
- link_idx = 0;
- dai_idx = 0;
- conf_idx = 0;
- codec_port_old = NULL;
- for (codec = 0; codec < 2; codec++) {
- /*
- * To listup valid sounds continuously,
- * detect all CPU-dummy first, and
- * detect all dummy-Codec second
- */
- of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
- cpu_port = it.node;
- cpu_ep = of_get_next_child(cpu_port, NULL);
- codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- codec_port = of_graph_get_port_parent(codec_ep);
-
- of_node_put(cpu_ep);
- of_node_put(codec_ep);
- of_node_put(cpu_port);
- of_node_put(codec_port);
- it.node = NULL;
-
- if (codec) {
- if (codec_port_old == codec_port)
- continue;
-
- codec_port_old = codec_port;
- }
-
- ret = asoc_graph_card_dai_link_of(cpu_ep, codec_ep,
- priv, &dai_idx,
- link_idx++, &conf_idx,
- !codec);
- if (ret < 0)
- goto parse_of_err;
- }
- }
-
- ret = asoc_simple_card_parse_card_name(card, NULL);
- if (ret)
- goto parse_of_err;
-
- if ((card->num_links != link_idx) ||
- (card->num_configs != conf_idx)) {
- dev_err(dev, "dai_link or codec_config wrong (%d/%d, %d/%d)\n",
- card->num_links, link_idx, card->num_configs, conf_idx);
- ret = -EINVAL;
- goto parse_of_err;
- }
-
- ret = 0;
-
-parse_of_err:
- return ret;
-}
-
-static void asoc_graph_get_dais_count(struct device *dev,
- int *link_num,
- int *dais_num,
- int *ccnf_num)
-{
- struct of_phandle_iterator it;
- struct device_node *node = dev->of_node;
- struct device_node *cpu_port;
- struct device_node *cpu_ep;
- struct device_node *codec_ep;
- struct device_node *codec_port;
- struct device_node *codec_port_old;
- struct device_node *codec_port_old2;
- int rc;
-
- /*
- * link_num : number of links.
- * CPU-Codec / CPU-dummy / dummy-Codec
- * dais_num : number of DAIs
- * ccnf_num : number of codec_conf
- * same number for dummy-Codec
- *
- * ex1)
- * CPU0 --- Codec0 link : 5
- * CPU1 --- Codec1 dais : 7
- * CPU2 -/ ccnf : 1
- * CPU3 --- Codec2
- *
- * => 5 links = 2xCPU-Codec + 2xCPU-dummy + 1xdummy-Codec
- * => 7 DAIs = 4xCPU + 3xCodec
- * => 1 ccnf = 1xdummy-Codec
- *
- * ex2)
- * CPU0 --- Codec0 link : 5
- * CPU1 --- Codec1 dais : 6
- * CPU2 -/ ccnf : 1
- * CPU3 -/
- *
- * => 5 links = 1xCPU-Codec + 3xCPU-dummy + 1xdummy-Codec
- * => 6 DAIs = 4xCPU + 2xCodec
- * => 1 ccnf = 1xdummy-Codec
- *
- * ex3)
- * CPU0 --- Codec0 link : 6
- * CPU1 -/ dais : 6
- * CPU2 --- Codec1 ccnf : 2
- * CPU3 -/
- *
- * => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
- * => 6 DAIs = 4xCPU + 2xCodec
- * => 2 ccnf = 2xdummy-Codec
- */
- codec_port_old = NULL;
- codec_port_old2 = NULL;
- of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
- cpu_port = it.node;
- cpu_ep = of_get_next_child(cpu_port, NULL);
- codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- codec_port = of_graph_get_port_parent(codec_ep);
-
- of_node_put(cpu_ep);
- of_node_put(codec_ep);
- of_node_put(codec_port);
-
- (*link_num)++;
- (*dais_num)++;
-
- if (codec_port_old == codec_port) {
- if (codec_port_old2 != codec_port_old) {
- (*link_num)++;
- (*ccnf_num)++;
- }
-
- codec_port_old2 = codec_port_old;
- continue;
- }
-
- (*dais_num)++;
- codec_port_old = codec_port;
- }
-}
-
-static int asoc_graph_card_probe(struct platform_device *pdev)
-{
- struct graph_card_data *priv;
- struct snd_soc_dai_link *dai_link;
- struct graph_dai_props *dai_props;
- struct asoc_simple_dai *dais;
- struct device *dev = &pdev->dev;
- struct snd_soc_card *card;
- struct snd_soc_codec_conf *cconf;
- int lnum = 0, dnum = 0, cnum = 0;
- int ret, i;
-
- /* Allocate the private data and the DAI link array */
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- asoc_graph_get_dais_count(dev, &lnum, &dnum, &cnum);
- if (!lnum || !dnum)
- return -EINVAL;
-
- dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
- dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
- cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
- if (!dai_props || !dai_link || !dais)
- return -ENOMEM;
-
- /*
- * Use snd_soc_dai_link_component instead of legacy style
- * It is codec only. but cpu/platform will be supported in the future.
- * see
- * soc-core.c :: snd_soc_init_multicodec()
- */
- for (i = 0; i < lnum; i++) {
- dai_link[i].codecs = &dai_props[i].codecs;
- dai_link[i].num_codecs = 1;
- dai_link[i].platform = &dai_props[i].platform;
- }
-
- priv->dai_props = dai_props;
- priv->dai_link = dai_link;
- priv->dais = dais;
- priv->codec_conf = cconf;
-
- /* Init snd_soc_card */
- card = graph_priv_to_card(priv);
- card->owner = THIS_MODULE;
- card->dev = dev;
- card->dai_link = priv->dai_link;
- card->num_links = lnum;
- card->codec_conf = cconf;
- card->num_configs = cnum;
-
- ret = asoc_graph_card_parse_of(priv);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "parse error %d\n", ret);
- goto err;
- }
-
- snd_soc_card_set_drvdata(card, priv);
-
- ret = devm_snd_soc_register_card(dev, card);
- if (ret < 0)
- goto err;
-
- return 0;
-err:
- asoc_simple_card_clean_reference(card);
-
- return ret;
-}
-
-static int asoc_graph_card_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- return asoc_simple_card_clean_reference(card);
-}
-
-static const struct of_device_id asoc_graph_of_match[] = {
- { .compatible = "audio-graph-scu-card", },
- {},
-};
-MODULE_DEVICE_TABLE(of, asoc_graph_of_match);
-
-static struct platform_driver asoc_graph_card = {
- .driver = {
- .name = "asoc-audio-graph-scu-card",
- .pm = &snd_soc_pm_ops,
- .of_match_table = asoc_graph_of_match,
- },
- .probe = asoc_graph_card_probe,
- .remove = asoc_graph_card_remove,
-};
-module_platform_driver(asoc_graph_card);
-
-MODULE_ALIAS("platform:asoc-audio-graph-scu-card");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("ASoC Audio Graph SCU Sound Card");
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index b807a47515eb..5c1424f03620 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -283,12 +283,20 @@ static int asoc_simple_card_get_dai_id(struct device_node *ep)
/* use endpoint/port reg if exist */
ret = of_graph_parse_endpoint(ep, &info);
if (ret == 0) {
- if (info.id)
+ /*
+ * Because it will count port/endpoint if it doesn't have "reg".
+ * But, we can't judge whether it has "no reg", or "reg = <0>"
+ * only of_graph_parse_endpoint().
+ * We need to check "reg" property
+ */
+ if (of_get_property(ep, "reg", NULL))
return info.id;
- if (info.port)
+
+ node = of_get_parent(ep);
+ of_node_put(node);
+ if (of_get_property(node, "reg", NULL))
return info.port;
}
-
node = of_graph_get_port_parent(ep);
/*
@@ -386,16 +394,13 @@ int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
}
EXPORT_SYMBOL_GPL(asoc_simple_card_init_dai);
-int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link)
+void asoc_simple_card_canonicalize_platform(struct snd_soc_dai_link *dai_link)
{
/* Assumes platform == cpu */
- if (!dai_link->platform->of_node)
- dai_link->platform->of_node = dai_link->cpu_of_node;
-
- return 0;
-
+ if (!dai_link->platforms->of_node)
+ dai_link->platforms->of_node = dai_link->cpu_of_node;
}
-EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_dailink);
+EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_platform);
void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
int is_single_links)
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 37e001cf9cd1..7147bba45a2a 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -15,13 +15,13 @@
#include <sound/soc-dai.h>
#include <sound/soc.h>
-struct simple_card_data {
+struct simple_priv {
struct snd_soc_card snd_card;
struct simple_dai_props {
struct asoc_simple_dai *cpu_dai;
struct asoc_simple_dai *codec_dai;
struct snd_soc_dai_link_component codecs; /* single codec */
- struct snd_soc_dai_link_component platform;
+ struct snd_soc_dai_link_component platforms;
struct asoc_simple_card_data adata;
struct snd_soc_codec_conf *codec_conf;
unsigned int mclk_fs;
@@ -33,6 +33,13 @@ struct simple_card_data {
struct snd_soc_codec_conf *codec_conf;
};
+struct link_info {
+ int dais; /* number of dai */
+ int link; /* number of link */
+ int conf; /* number of codec_conf */
+ int cpu; /* turn for CPU / Codec */
+};
+
#define simple_priv_to_card(priv) (&(priv)->snd_card)
#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
#define simple_priv_to_dev(priv) (simple_priv_to_card(priv)->dev)
@@ -42,10 +49,10 @@ struct simple_card_data {
#define CELL "#sound-dai-cells"
#define PREFIX "simple-audio-card,"
-static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
+static int simple_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
int ret;
@@ -61,10 +68,10 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
return ret;
}
-static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
+static void simple_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
@@ -73,8 +80,8 @@ static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
asoc_simple_card_clk_disable(dai_props->codec_dai);
}
-static int asoc_simple_set_clk_rate(struct asoc_simple_dai *simple_dai,
- unsigned long rate)
+static int simple_set_clk_rate(struct asoc_simple_dai *simple_dai,
+ unsigned long rate)
{
if (!simple_dai)
return 0;
@@ -88,13 +95,13 @@ static int asoc_simple_set_clk_rate(struct asoc_simple_dai *simple_dai,
return clk_set_rate(simple_dai->clk, rate);
}
-static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int simple_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
unsigned int mclk, mclk_fs = 0;
@@ -106,11 +113,11 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
if (mclk_fs) {
mclk = params_rate(params) * mclk_fs;
- ret = asoc_simple_set_clk_rate(dai_props->codec_dai, mclk);
+ ret = simple_set_clk_rate(dai_props->codec_dai, mclk);
if (ret < 0)
return ret;
- ret = asoc_simple_set_clk_rate(dai_props->cpu_dai, mclk);
+ ret = simple_set_clk_rate(dai_props->cpu_dai, mclk);
if (ret < 0)
return ret;
@@ -129,15 +136,15 @@ err:
return ret;
}
-static const struct snd_soc_ops asoc_simple_card_ops = {
- .startup = asoc_simple_card_startup,
- .shutdown = asoc_simple_card_shutdown,
- .hw_params = asoc_simple_card_hw_params,
+static const struct snd_soc_ops simple_ops = {
+ .startup = simple_startup,
+ .shutdown = simple_shutdown,
+ .hw_params = simple_hw_params,
};
-static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
+static int simple_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
int ret;
@@ -154,10 +161,10 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
+static int simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
{
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
asoc_simple_card_convert_fixup(&dai_props->adata, params);
@@ -165,30 +172,58 @@ static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static int asoc_simple_card_dai_link_of_dpcm(struct device_node *top,
- struct device_node *node,
- struct device_node *np,
- struct device_node *codec,
- struct simple_card_data *priv,
- int *dai_idx, int link_idx,
- int *conf_idx, int is_fe,
- bool is_top_level_node)
+static void simple_get_conversion(struct device *dev,
+ struct device_node *np,
+ struct asoc_simple_card_data *adata)
+{
+ struct device_node *top = dev->of_node;
+ struct device_node *node = of_get_parent(np);
+
+ asoc_simple_card_parse_convert(dev, top, PREFIX, adata);
+ asoc_simple_card_parse_convert(dev, node, PREFIX, adata);
+ asoc_simple_card_parse_convert(dev, node, NULL, adata);
+ asoc_simple_card_parse_convert(dev, np, NULL, adata);
+
+ of_node_put(node);
+}
+
+static int simple_dai_link_of_dpcm(struct simple_priv *priv,
+ struct device_node *np,
+ struct device_node *codec,
+ struct link_info *li,
+ bool is_top)
{
struct device *dev = simple_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, link_idx);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, link_idx);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct asoc_simple_dai *dai;
struct snd_soc_dai_link_component *codecs = dai_link->codecs;
-
+ struct device_node *top = dev->of_node;
+ struct device_node *node = of_get_parent(np);
char prop[128];
char *prefix = "";
int ret;
+ /*
+ * |CPU |Codec : turn
+ * CPU |Pass |return
+ * Codec |return|Pass
+ * np
+ */
+ if (li->cpu == (np == codec))
+ return 0;
+
+ dev_dbg(dev, "link_of DPCM (%pOF)\n", np);
+
+ li->link++;
+
+ of_node_put(node);
+
/* For single DAI link & old style of DT node */
- if (is_top_level_node)
+ if (is_top)
prefix = PREFIX;
- if (is_fe) {
+ if (li->cpu) {
int is_single_links = 0;
/* BE is dummy */
@@ -201,7 +236,7 @@ static int asoc_simple_card_dai_link_of_dpcm(struct device_node *top,
dai_link->dpcm_merged_format = 1;
dai =
- dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+ dai_props->cpu_dai = &priv->dais[li->dais++];
ret = asoc_simple_card_parse_cpu(np, dai_link, DAI, CELL,
&is_single_links);
@@ -229,13 +264,13 @@ static int asoc_simple_card_dai_link_of_dpcm(struct device_node *top,
/* BE settings */
dai_link->no_pcm = 1;
- dai_link->be_hw_params_fixup = asoc_simple_card_be_hw_params_fixup;
+ dai_link->be_hw_params_fixup = simple_be_hw_params_fixup;
dai =
- dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+ dai_props->codec_dai = &priv->dais[li->dais++];
cconf =
- dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
+ dai_props->codec_conf = &priv->codec_conf[li->conf++];
ret = asoc_simple_card_parse_codec(np, dai_link, DAI, CELL);
if (ret < 0)
@@ -260,18 +295,14 @@ static int asoc_simple_card_dai_link_of_dpcm(struct device_node *top,
"prefix");
}
- asoc_simple_card_parse_convert(dev, top, PREFIX, &dai_props->adata);
- asoc_simple_card_parse_convert(dev, node, prefix, &dai_props->adata);
- asoc_simple_card_parse_convert(dev, np, NULL, &dai_props->adata);
+ simple_get_conversion(dev, np, &dai_props->adata);
+
+ asoc_simple_card_canonicalize_platform(dai_link);
ret = asoc_simple_card_of_parse_tdm(np, dai);
if (ret)
return ret;
- ret = asoc_simple_card_canonicalize_dailink(dai_link);
- if (ret < 0)
- return ret;
-
snprintf(prop, sizeof(prop), "%smclk-fs", prefix);
of_property_read_u32(top, PREFIX "mclk-fs", &dai_props->mclk_fs);
of_property_read_u32(node, prop, &dai_props->mclk_fs);
@@ -284,59 +315,57 @@ static int asoc_simple_card_dai_link_of_dpcm(struct device_node *top,
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
- dai_link->ops = &asoc_simple_card_ops;
- dai_link->init = asoc_simple_card_dai_init;
+ dai_link->ops = &simple_ops;
+ dai_link->init = simple_dai_init;
return 0;
}
-static int asoc_simple_card_dai_link_of(struct device_node *top,
- struct device_node *node,
- struct simple_card_data *priv,
- int *dai_idx, int link_idx,
- bool is_top_level_node)
+static int simple_dai_link_of(struct simple_priv *priv,
+ struct device_node *np,
+ struct device_node *codec,
+ struct link_info *li,
+ bool is_top)
{
struct device *dev = simple_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, link_idx);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, link_idx);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct asoc_simple_dai *cpu_dai;
struct asoc_simple_dai *codec_dai;
+ struct device_node *top = dev->of_node;
struct device_node *cpu = NULL;
+ struct device_node *node = NULL;
struct device_node *plat = NULL;
- struct device_node *codec = NULL;
char prop[128];
char *prefix = "";
int ret, single_cpu;
- /* For single DAI link & old style of DT node */
- if (is_top_level_node)
- prefix = PREFIX;
+ /*
+ * |CPU |Codec : turn
+ * CPU |Pass |return
+ * Codec |return|return
+ * np
+ */
+ if (!li->cpu || np == codec)
+ return 0;
- snprintf(prop, sizeof(prop), "%scpu", prefix);
- cpu = of_get_child_by_name(node, prop);
+ cpu = np;
+ node = of_get_parent(np);
+ li->link++;
- if (!cpu) {
- ret = -EINVAL;
- dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
- goto dai_link_of_err;
- }
+ dev_dbg(dev, "link_of (%pOF)\n", node);
+
+ /* For single DAI link & old style of DT node */
+ if (is_top)
+ prefix = PREFIX;
snprintf(prop, sizeof(prop), "%splat", prefix);
plat = of_get_child_by_name(node, prop);
- snprintf(prop, sizeof(prop), "%scodec", prefix);
- codec = of_get_child_by_name(node, prop);
-
- if (!codec) {
- ret = -EINVAL;
- dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
- goto dai_link_of_err;
- }
-
cpu_dai =
- dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+ dai_props->cpu_dai = &priv->dais[li->dais++];
codec_dai =
- dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+ dai_props->codec_dai = &priv->dais[li->dais++];
ret = asoc_simple_card_parse_daifmt(dev, node, codec,
prefix, &dai_link->dai_fmt);
@@ -378,10 +407,6 @@ static int asoc_simple_card_dai_link_of(struct device_node *top,
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_card_canonicalize_dailink(dai_link);
- if (ret < 0)
- goto dai_link_of_err;
-
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
dai_link->cpu_dai_name,
@@ -389,20 +414,95 @@ static int asoc_simple_card_dai_link_of(struct device_node *top,
if (ret < 0)
goto dai_link_of_err;
- dai_link->ops = &asoc_simple_card_ops;
- dai_link->init = asoc_simple_card_dai_init;
+ dai_link->ops = &simple_ops;
+ dai_link->init = simple_dai_init;
asoc_simple_card_canonicalize_cpu(dai_link, single_cpu);
+ asoc_simple_card_canonicalize_platform(dai_link);
dai_link_of_err:
- of_node_put(cpu);
- of_node_put(codec);
+ of_node_put(plat);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int simple_for_each_link(struct simple_priv *priv,
+ struct link_info *li,
+ int (*func_noml)(struct simple_priv *priv,
+ struct device_node *np,
+ struct device_node *codec,
+ struct link_info *li, bool is_top),
+ int (*func_dpcm)(struct simple_priv *priv,
+ struct device_node *np,
+ struct device_node *codec,
+ struct link_info *li, bool is_top))
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *top = dev->of_node;
+ struct device_node *node;
+ bool is_top = 0;
+ int ret = 0;
+
+ /* Check if it has dai-link */
+ node = of_get_child_by_name(top, PREFIX "dai-link");
+ if (!node) {
+ node = of_node_get(top);
+ is_top = 1;
+ }
+ /* loop for all dai-link */
+ do {
+ struct asoc_simple_card_data adata;
+ struct device_node *codec;
+ struct device_node *np;
+ int num = of_get_child_count(node);
+
+ /* get codec */
+ codec = of_get_child_by_name(node, is_top ?
+ PREFIX "codec" : "codec");
+ if (!codec) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ of_node_put(codec);
+
+ /* get convert-xxx property */
+ memset(&adata, 0, sizeof(adata));
+ for_each_child_of_node(node, np)
+ simple_get_conversion(dev, np, &adata);
+
+ /* loop for all CPU/Codec node */
+ for_each_child_of_node(node, np) {
+ /*
+ * It is DPCM
+ * if it has many CPUs,
+ * or has convert-xxx property
+ */
+ if (num > 2 ||
+ adata.convert_rate || adata.convert_channels)
+ ret = func_dpcm(priv, np, codec, li, is_top);
+ /* else normal sound */
+ else
+ ret = func_noml(priv, np, codec, li, is_top);
+
+ if (ret < 0) {
+ of_node_put(np);
+ goto error;
+ }
+ }
+
+ node = of_get_next_child(top, node);
+ } while (!is_top && node);
+
+ error:
+ of_node_put(node);
return ret;
}
-static int asoc_simple_card_parse_aux_devs(struct device_node *node,
- struct simple_card_data *priv)
+static int simple_parse_aux_devs(struct device_node *node,
+ struct simple_priv *priv)
{
struct device *dev = simple_priv_to_dev(priv);
struct device_node *aux_node;
@@ -432,17 +532,13 @@ static int asoc_simple_card_parse_aux_devs(struct device_node *node,
return 0;
}
-static int asoc_simple_card_parse_of(struct simple_card_data *priv)
+static int simple_parse_of(struct simple_priv *priv)
{
struct device *dev = simple_priv_to_dev(priv);
struct device_node *top = dev->of_node;
struct snd_soc_card *card = simple_priv_to_card(priv);
- struct device_node *node;
- struct device_node *np;
- struct device_node *codec;
- bool is_fe;
- int ret, loop;
- int dai_idx, link_idx, conf_idx;
+ struct link_info li;
+ int ret;
if (!top)
return -EINVAL;
@@ -456,62 +552,66 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv)
return ret;
/* Single/Muti DAI link(s) & New style of DT node */
- loop = 1;
- link_idx = 0;
- dai_idx = 0;
- conf_idx = 0;
- node = of_get_child_by_name(top, PREFIX "dai-link");
- if (!node) {
- node = dev->of_node;
- loop = 0;
- }
-
- do {
- /* DPCM */
- if (of_get_child_count(node) > 2) {
- for_each_child_of_node(node, np) {
- codec = of_get_child_by_name(node,
- loop ? "codec" :
- PREFIX "codec");
- if (!codec)
- return -ENODEV;
-
- is_fe = (np != codec);
-
- ret = asoc_simple_card_dai_link_of_dpcm(
- top, node, np, codec, priv,
- &dai_idx, link_idx++, &conf_idx,
- is_fe, !loop);
- }
- } else {
- ret = asoc_simple_card_dai_link_of(
- top, node, priv,
- &dai_idx, link_idx++, !loop);
- }
+ memset(&li, 0, sizeof(li));
+ for (li.cpu = 1; li.cpu >= 0; li.cpu--) {
+ /*
+ * Detect all CPU first, and Detect all Codec 2nd.
+ *
+ * In Normal sound case, all DAIs are detected
+ * as "CPU-Codec".
+ *
+ * In DPCM sound case,
+ * all CPUs are detected as "CPU-dummy", and
+ * all Codecs are detected as "dummy-Codec".
+ * To avoid random sub-device numbering,
+ * detect "dummy-Codec" in last;
+ */
+ ret = simple_for_each_link(priv, &li,
+ simple_dai_link_of,
+ simple_dai_link_of_dpcm);
if (ret < 0)
return ret;
-
- node = of_get_next_child(top, node);
- } while (loop && node);
+ }
ret = asoc_simple_card_parse_card_name(card, PREFIX);
if (ret < 0)
return ret;
- ret = asoc_simple_card_parse_aux_devs(top, priv);
+ ret = simple_parse_aux_devs(top, priv);
return ret;
}
-static void asoc_simple_card_get_dais_count(struct device *dev,
- int *link_num,
- int *dais_num,
- int *ccnf_num)
+static int simple_count_noml(struct simple_priv *priv,
+ struct device_node *np,
+ struct device_node *codec,
+ struct link_info *li, bool is_top)
+{
+ li->dais++; /* CPU or Codec */
+ if (np != codec)
+ li->link++; /* CPU-Codec */
+
+ return 0;
+}
+
+static int simple_count_dpcm(struct simple_priv *priv,
+ struct device_node *np,
+ struct device_node *codec,
+ struct link_info *li, bool is_top)
+{
+ li->dais++; /* CPU or Codec */
+ li->link++; /* CPU-dummy or dummy-Codec */
+ if (np == codec)
+ li->conf++;
+
+ return 0;
+}
+
+static void simple_get_dais_count(struct simple_priv *priv,
+ struct link_info *li)
{
+ struct device *dev = simple_priv_to_dev(priv);
struct device_node *top = dev->of_node;
- struct device_node *node;
- int loop;
- int num;
/*
* link_num : number of links.
@@ -549,37 +649,34 @@ static void asoc_simple_card_get_dais_count(struct device *dev,
* => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
* => 6 DAIs = 4xCPU + 2xCodec
* => 2 ccnf = 2xdummy-Codec
+ *
+ * ex4)
+ * CPU0 --- Codec0 (convert-rate) link : 3
+ * CPU1 --- Codec1 dais : 4
+ * ccnf : 1
+ *
+ * => 3 links = 1xCPU-Codec + 1xCPU-dummy + 1xdummy-Codec
+ * => 4 DAIs = 2xCPU + 2xCodec
+ * => 1 ccnf = 1xdummy-Codec
*/
if (!top) {
- (*link_num) = 1;
- (*dais_num) = 2;
- (*ccnf_num) = 0;
+ li->link = 1;
+ li->dais = 2;
+ li->conf = 0;
return;
}
- loop = 1;
- node = of_get_child_by_name(top, PREFIX "dai-link");
- if (!node) {
- node = top;
- loop = 0;
- }
+ simple_for_each_link(priv, li,
+ simple_count_noml,
+ simple_count_dpcm);
- do {
- num = of_get_child_count(node);
- (*dais_num) += num;
- if (num > 2) {
- (*link_num) += num;
- (*ccnf_num)++;
- } else {
- (*link_num)++;
- }
- node = of_get_next_child(top, node);
- } while (loop && node);
+ dev_dbg(dev, "link %d, dais %d, ccnf %d\n",
+ li->link, li->dais, li->conf);
}
-static int asoc_simple_soc_card_probe(struct snd_soc_card *card)
+static int simple_soc_probe(struct snd_soc_card *card)
{
- struct simple_card_data *priv = snd_soc_card_get_drvdata(card);
+ struct simple_priv *priv = snd_soc_card_get_drvdata(card);
int ret;
ret = asoc_simple_card_init_hp(card, &priv->hp_jack, PREFIX);
@@ -593,9 +690,9 @@ static int asoc_simple_soc_card_probe(struct snd_soc_card *card)
return 0;
}
-static int asoc_simple_card_probe(struct platform_device *pdev)
+static int simple_probe(struct platform_device *pdev)
{
- struct simple_card_data *priv;
+ struct simple_priv *priv;
struct snd_soc_dai_link *dai_link;
struct simple_dai_props *dai_props;
struct asoc_simple_dai *dais;
@@ -603,7 +700,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct snd_soc_card *card;
struct snd_soc_codec_conf *cconf;
- int lnum = 0, dnum = 0, cnum = 0;
+ struct link_info li;
int ret, i;
/* Allocate the private data and the DAI link array */
@@ -611,14 +708,20 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- asoc_simple_card_get_dais_count(dev, &lnum, &dnum, &cnum);
- if (!lnum || !dnum)
+ card = simple_priv_to_card(priv);
+ card->owner = THIS_MODULE;
+ card->dev = dev;
+ card->probe = simple_soc_probe;
+
+ memset(&li, 0, sizeof(li));
+ simple_get_dais_count(priv, &li);
+ if (!li.link || !li.dais)
return -EINVAL;
- dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
- dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
- cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
+ dai_props = devm_kcalloc(dev, li.link, sizeof(*dai_props), GFP_KERNEL);
+ dai_link = devm_kcalloc(dev, li.link, sizeof(*dai_link), GFP_KERNEL);
+ dais = devm_kcalloc(dev, li.dais, sizeof(*dais), GFP_KERNEL);
+ cconf = devm_kcalloc(dev, li.conf, sizeof(*cconf), GFP_KERNEL);
if (!dai_props || !dai_link || !dais)
return -ENOMEM;
@@ -628,30 +731,26 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
* see
* soc-core.c :: snd_soc_init_multicodec()
*/
- for (i = 0; i < lnum; i++) {
+ for (i = 0; i < li.link; i++) {
dai_link[i].codecs = &dai_props[i].codecs;
dai_link[i].num_codecs = 1;
- dai_link[i].platform = &dai_props[i].platform;
+ dai_link[i].platforms = &dai_props[i].platforms;
+ dai_link[i].num_platforms = 1;
}
- priv->dai_props = dai_props;
- priv->dai_link = dai_link;
- priv->dais = dais;
- priv->codec_conf = cconf;
+ priv->dai_props = dai_props;
+ priv->dai_link = dai_link;
+ priv->dais = dais;
+ priv->codec_conf = cconf;
- /* Init snd_soc_card */
- card = simple_priv_to_card(priv);
- card->owner = THIS_MODULE;
- card->dev = dev;
card->dai_link = priv->dai_link;
- card->num_links = lnum;
+ card->num_links = li.link;
card->codec_conf = cconf;
- card->num_configs = cnum;
- card->probe = asoc_simple_soc_card_probe;
+ card->num_configs = li.conf;
if (np && of_device_is_available(np)) {
- ret = asoc_simple_card_parse_of(priv);
+ ret = simple_parse_of(priv);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "parse error %d\n", ret);
@@ -686,7 +785,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
codecs->name = cinfo->codec;
codecs->dai_name = cinfo->codec_dai.name;
- platform = dai_link->platform;
+ platform = dai_link->platforms;
platform->name = cinfo->platform;
card->name = (cinfo->card) ? cinfo->card : cinfo->name;
@@ -694,7 +793,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
dai_link->stream_name = cinfo->name;
dai_link->cpu_dai_name = cinfo->cpu_dai.name;
dai_link->dai_fmt = cinfo->daifmt;
- dai_link->init = asoc_simple_card_dai_init;
+ dai_link->init = simple_dai_init;
memcpy(priv->dai_props->cpu_dai, &cinfo->cpu_dai,
sizeof(*priv->dai_props->cpu_dai));
memcpy(priv->dai_props->codec_dai, &cinfo->codec_dai,
@@ -714,28 +813,28 @@ err:
return ret;
}
-static int asoc_simple_card_remove(struct platform_device *pdev)
+static int simple_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
return asoc_simple_card_clean_reference(card);
}
-static const struct of_device_id asoc_simple_of_match[] = {
+static const struct of_device_id simple_of_match[] = {
{ .compatible = "simple-audio-card", },
{ .compatible = "simple-scu-audio-card", },
{},
};
-MODULE_DEVICE_TABLE(of, asoc_simple_of_match);
+MODULE_DEVICE_TABLE(of, simple_of_match);
static struct platform_driver asoc_simple_card = {
.driver = {
.name = "asoc-simple-card",
.pm = &snd_soc_pm_ops,
- .of_match_table = asoc_simple_of_match,
+ .of_match_table = simple_of_match,
},
- .probe = asoc_simple_card_probe,
- .remove = asoc_simple_card_remove,
+ .probe = simple_probe,
+ .remove = simple_remove,
};
module_platform_driver(asoc_simple_card);
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
deleted file mode 100644
index 9d7299d536a8..000000000000
--- a/sound/soc/generic/simple-scu-card.c
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// ASoC simple SCU sound card support
-//
-// Copyright (C) 2015 Renesas Solutions Corp.
-// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
-//
-// based on ${LINUX}/sound/soc/generic/simple-card.c
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <sound/jack.h>
-#include <sound/soc.h>
-#include <sound/soc-dai.h>
-#include <sound/simple_card_utils.h>
-
-struct simple_card_data {
- struct snd_soc_card snd_card;
- struct simple_dai_props {
- struct asoc_simple_dai *cpu_dai;
- struct asoc_simple_dai *codec_dai;
- struct snd_soc_dai_link_component codecs;
- struct snd_soc_dai_link_component platform;
- struct asoc_simple_card_data adata;
- struct snd_soc_codec_conf *codec_conf;
- } *dai_props;
- struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dais;
- struct asoc_simple_card_data adata;
- struct snd_soc_codec_conf *codec_conf;
-};
-
-#define simple_priv_to_card(priv) (&(priv)->snd_card)
-#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
-#define simple_priv_to_dev(priv) (simple_priv_to_card(priv)->dev)
-#define simple_priv_to_link(priv, i) (simple_priv_to_card(priv)->dai_link + (i))
-
-#define DAI "sound-dai"
-#define CELL "#sound-dai-cells"
-#define PREFIX "simple-audio-card,"
-
-static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *dai_props =
- simple_priv_to_props(priv, rtd->num);
- int ret;
-
- ret = asoc_simple_card_clk_enable(dai_props->cpu_dai);
- if (ret)
- return ret;
-
- ret = asoc_simple_card_clk_enable(dai_props->codec_dai);
- if (ret)
- asoc_simple_card_clk_disable(dai_props->cpu_dai);
-
- return ret;
-}
-
-static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *dai_props =
- simple_priv_to_props(priv, rtd->num);
-
- asoc_simple_card_clk_disable(dai_props->cpu_dai);
-
- asoc_simple_card_clk_disable(dai_props->codec_dai);
-}
-
-static const struct snd_soc_ops asoc_simple_card_ops = {
- .startup = asoc_simple_card_startup,
- .shutdown = asoc_simple_card_shutdown,
-};
-
-static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
- int ret;
-
- ret = asoc_simple_card_init_dai(rtd->codec_dai,
- dai_props->codec_dai);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_init_dai(rtd->cpu_dai,
- dai_props->cpu_dai);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
-{
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
-
- asoc_simple_card_convert_fixup(&dai_props->adata, params);
-
- /* overwrite by top level adata if exist */
- asoc_simple_card_convert_fixup(&priv->adata, params);
-
- return 0;
-}
-
-static int asoc_simple_card_dai_link_of(struct device_node *link,
- struct device_node *np,
- struct device_node *codec,
- struct simple_card_data *priv,
- int *dai_idx, int link_idx,
- int *conf_idx, int is_fe,
- bool is_top_level_node)
-{
- struct device *dev = simple_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, link_idx);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, link_idx);
- struct snd_soc_card *card = simple_priv_to_card(priv);
- struct asoc_simple_dai *dai;
- char *prefix = "";
- int ret;
-
- /* For single DAI link & old style of DT node */
- if (is_top_level_node)
- prefix = PREFIX;
-
- if (is_fe) {
- int is_single_links = 0;
- struct snd_soc_dai_link_component *codecs;
-
- /* BE is dummy */
- codecs = dai_link->codecs;
- codecs->of_node = NULL;
- codecs->dai_name = "snd-soc-dummy-dai";
- codecs->name = "snd-soc-dummy";
-
- /* FE settings */
- dai_link->dynamic = 1;
- dai_link->dpcm_merged_format = 1;
-
- dai =
- dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
-
- ret = asoc_simple_card_parse_cpu(np, dai_link, DAI, CELL,
- &is_single_links);
- if (ret)
- return ret;
-
- ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, dai);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_set_dailink_name(dev, dai_link,
- "fe.%s",
- dai_link->cpu_dai_name);
- if (ret < 0)
- return ret;
-
- asoc_simple_card_canonicalize_cpu(dai_link, is_single_links);
- } else {
- struct snd_soc_codec_conf *cconf;
-
- /* FE is dummy */
- dai_link->cpu_of_node = NULL;
- dai_link->cpu_dai_name = "snd-soc-dummy-dai";
- dai_link->cpu_name = "snd-soc-dummy";
-
- /* BE settings */
- dai_link->no_pcm = 1;
- dai_link->be_hw_params_fixup = asoc_simple_card_be_hw_params_fixup;
-
- dai =
- dai_props->codec_dai = &priv->dais[(*dai_idx)++];
-
- cconf =
- dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
-
- ret = asoc_simple_card_parse_codec(np, dai_link, DAI, CELL);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, dai);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_set_dailink_name(dev, dai_link,
- "be.%s",
- dai_link->codecs->dai_name);
- if (ret < 0)
- return ret;
-
- /* check "prefix" from top node */
- snd_soc_of_parse_audio_prefix(card, cconf,
- dai_link->codecs->of_node,
- PREFIX "prefix");
- /* check "prefix" from each node if top doesn't have */
- if (!cconf->of_node)
- snd_soc_of_parse_node_prefix(np, cconf,
- dai_link->codecs->of_node,
- "prefix");
- }
-
- asoc_simple_card_parse_convert(dev, link, prefix, &dai_props->adata);
-
- ret = asoc_simple_card_of_parse_tdm(np, dai);
- if (ret)
- return ret;
-
- ret = asoc_simple_card_canonicalize_dailink(dai_link);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_parse_daifmt(dev, link, codec,
- prefix, &dai_link->dai_fmt);
- if (ret < 0)
- return ret;
-
- dai_link->dpcm_playback = 1;
- dai_link->dpcm_capture = 1;
- dai_link->ops = &asoc_simple_card_ops;
- dai_link->init = asoc_simple_card_dai_init;
-
- return 0;
-}
-
-static int asoc_simple_card_parse_of(struct simple_card_data *priv)
-
-{
- struct device *dev = simple_priv_to_dev(priv);
- struct device_node *top = dev->of_node;
- struct device_node *node;
- struct device_node *np;
- struct device_node *codec;
- struct snd_soc_card *card = simple_priv_to_card(priv);
- bool is_fe;
- int ret, loop;
- int dai_idx, link_idx, conf_idx;
-
- if (!top)
- return -EINVAL;
-
- ret = asoc_simple_card_of_parse_widgets(card, PREFIX);
- if (ret < 0)
- return ret;
-
- ret = asoc_simple_card_of_parse_routing(card, PREFIX);
- if (ret < 0)
- return ret;
-
- asoc_simple_card_parse_convert(dev, top, PREFIX, &priv->adata);
-
- loop = 1;
- link_idx = 0;
- dai_idx = 0;
- conf_idx = 0;
- node = of_get_child_by_name(top, PREFIX "dai-link");
- if (!node) {
- node = dev->of_node;
- loop = 0;
- }
-
- do {
- codec = of_get_child_by_name(node,
- loop ? "codec" : PREFIX "codec");
- if (!codec)
- return -ENODEV;
-
- for_each_child_of_node(node, np) {
- is_fe = (np != codec);
-
- ret = asoc_simple_card_dai_link_of(node, np, codec, priv,
- &dai_idx, link_idx++,
- &conf_idx,
- is_fe, !loop);
- if (ret < 0)
- return ret;
- }
- node = of_get_next_child(top, node);
- } while (loop && node);
-
- ret = asoc_simple_card_parse_card_name(card, PREFIX);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static void asoc_simple_card_get_dais_count(struct device *dev,
- int *link_num,
- int *dais_num,
- int *ccnf_num)
-{
- struct device_node *top = dev->of_node;
- struct device_node *node;
- int loop;
- int num;
-
- /*
- * link_num : number of links.
- * CPU-Codec / CPU-dummy / dummy-Codec
- * dais_num : number of DAIs
- * ccnf_num : number of codec_conf
- * same number for "dummy-Codec"
- *
- * ex1)
- * CPU0 --- Codec0 link : 5
- * CPU1 --- Codec1 dais : 7
- * CPU2 -/ ccnf : 1
- * CPU3 --- Codec2
- *
- * => 5 links = 2xCPU-Codec + 2xCPU-dummy + 1xdummy-Codec
- * => 7 DAIs = 4xCPU + 3xCodec
- * => 1 ccnf = 1xdummy-Codec
- *
- * ex2)
- * CPU0 --- Codec0 link : 5
- * CPU1 --- Codec1 dais : 6
- * CPU2 -/ ccnf : 1
- * CPU3 -/
- *
- * => 5 links = 1xCPU-Codec + 3xCPU-dummy + 1xdummy-Codec
- * => 6 DAIs = 4xCPU + 2xCodec
- * => 1 ccnf = 1xdummy-Codec
- *
- * ex3)
- * CPU0 --- Codec0 link : 6
- * CPU1 -/ dais : 6
- * CPU2 --- Codec1 ccnf : 2
- * CPU3 -/
- *
- * => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
- * => 6 DAIs = 4xCPU + 2xCodec
- * => 2 ccnf = 2xdummy-Codec
- */
- if (!top) {
- (*link_num) = 1;
- (*dais_num) = 2;
- (*ccnf_num) = 0;
- return;
- }
-
- loop = 1;
- node = of_get_child_by_name(top, PREFIX "dai-link");
- if (!node) {
- node = top;
- loop = 0;
- }
-
- do {
- num = of_get_child_count(node);
- (*dais_num) += num;
- if (num > 2) {
- (*link_num) += num;
- (*ccnf_num)++;
- } else {
- (*link_num)++;
- }
- node = of_get_next_child(top, node);
- } while (loop && node);
-}
-
-static int asoc_simple_card_probe(struct platform_device *pdev)
-{
- struct simple_card_data *priv;
- struct snd_soc_dai_link *dai_link;
- struct simple_dai_props *dai_props;
- struct asoc_simple_dai *dais;
- struct snd_soc_card *card;
- struct snd_soc_codec_conf *cconf;
- struct device *dev = &pdev->dev;
- int ret, i;
- int lnum = 0, dnum = 0, cnum = 0;
-
- /* Allocate the private data */
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- asoc_simple_card_get_dais_count(dev, &lnum, &dnum, &cnum);
- if (!lnum || !dnum)
- return -EINVAL;
-
- dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
- dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
- cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
- if (!dai_props || !dai_link || !dais)
- return -ENOMEM;
-
- /*
- * Use snd_soc_dai_link_component instead of legacy style
- * It is codec only. but cpu/platform will be supported in the future.
- * see
- * soc-core.c :: snd_soc_init_multicodec()
- */
- for (i = 0; i < lnum; i++) {
- dai_link[i].codecs = &dai_props[i].codecs;
- dai_link[i].num_codecs = 1;
- dai_link[i].platform = &dai_props[i].platform;
- }
-
- priv->dai_props = dai_props;
- priv->dai_link = dai_link;
- priv->dais = dais;
- priv->codec_conf = cconf;
-
- /* Init snd_soc_card */
- card = simple_priv_to_card(priv);
- card->owner = THIS_MODULE;
- card->dev = dev;
- card->dai_link = priv->dai_link;
- card->num_links = lnum;
- card->codec_conf = cconf;
- card->num_configs = cnum;
-
- ret = asoc_simple_card_parse_of(priv);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "parse error %d\n", ret);
- goto err;
- }
-
- snd_soc_card_set_drvdata(card, priv);
-
- ret = devm_snd_soc_register_card(dev, card);
- if (ret < 0)
- goto err;
-
- return 0;
-err:
- asoc_simple_card_clean_reference(card);
-
- return ret;
-}
-
-static int asoc_simple_card_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- return asoc_simple_card_clean_reference(card);
-}
-
-static const struct of_device_id asoc_simple_of_match[] = {
- { .compatible = "renesas,rsrc-card", },
- { .compatible = "simple-scu-audio-card", },
- {},
-};
-MODULE_DEVICE_TABLE(of, asoc_simple_of_match);
-
-static struct platform_driver asoc_simple_card = {
- .driver = {
- .name = "simple-scu-audio-card",
- .pm = &snd_soc_pm_ops,
- .of_match_table = asoc_simple_of_match,
- },
- .probe = asoc_simple_card_probe,
- .remove = asoc_simple_card_remove,
-};
-
-module_platform_driver(asoc_simple_card);
-
-MODULE_ALIAS("platform:asoc-simple-scu-card");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("ASoC Simple SCU Sound Card");
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 99a62ba409df..bd9fd2035c55 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -91,7 +91,7 @@ config SND_SST_ATOM_HIFI2_PLATFORM_PCI
config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
default ACPI
- depends on X86 && ACPI
+ depends on X86 && ACPI && PCI
select SND_SST_IPC_ACPI
select SND_SST_ATOM_HIFI2_PLATFORM
select SND_SOC_ACPI_INTEL_MATCH
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 3672d36b4b66..d1207ea53523 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -647,7 +647,7 @@ static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w,
set_mixer = false;
}
- if (set_mixer == false)
+ if (!set_mixer)
return 0;
if (SND_SOC_DAPM_EVENT_ON(event) ||
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index afc559866095..08cea5b5cda9 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -190,7 +190,7 @@ int sst_fill_stream_params(void *substream,
map = ctx->pdata->pdev_strm_map;
map_size = ctx->pdata->strm_map_size;
- if (is_compress == true)
+ if (is_compress)
cstream = (struct snd_compr_stream *)substream;
else
pstream = (struct snd_pcm_substream *)substream;
@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ int ret;
+
+ ret =
+ snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(params));
+ if (ret)
+ return ret;
memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
return 0;
}
@@ -681,20 +687,15 @@ static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
- int retval = 0;
if (dai->driver->playback.channels_min ||
dai->driver->capture.channels_min) {
- retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_DMA),
SST_MIN_BUFFER, SST_MAX_BUFFER);
- if (retval) {
- dev_err(rtd->dev, "dma buffer allocation failure\n");
- return retval;
- }
}
- return retval;
+ return 0;
}
static int sst_soc_probe(struct snd_soc_component *component)
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index ac542535b9d5..ae17ce4677a5 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -255,18 +255,16 @@ static int is_byt(void)
return status;
}
-static int is_byt_cr(struct device *dev, bool *bytcr)
+static bool is_byt_cr(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int status = 0;
- if (IS_ENABLED(CONFIG_IOSF_MBI)) {
- u32 bios_status;
-
- if (!is_byt() || !iosf_mbi_available()) {
- /* bail silently */
- return status;
- }
+ if (!is_byt())
+ return false;
+ if (iosf_mbi_available()) {
+ u32 bios_status;
status = iosf_mbi_read(BT_MBI_UNIT_PMC, /* 0x04 PUNIT */
MBI_REG_READ, /* 0x10 */
0x006, /* BIOS_CONFIG */
@@ -278,15 +276,28 @@ static int is_byt_cr(struct device *dev, bool *bytcr)
/* bits 26:27 mirror PMIC options */
bios_status = (bios_status >> 26) & 3;
- if ((bios_status == 1) || (bios_status == 3))
- *bytcr = true;
- else
- dev_info(dev, "BYT-CR not detected\n");
+ if (bios_status == 1 || bios_status == 3) {
+ dev_info(dev, "Detected Baytrail-CR platform\n");
+ return true;
+ }
+
+ dev_info(dev, "BYT-CR not detected\n");
}
} else {
- dev_info(dev, "IOSF_MBI not enabled, no BYT-CR detection\n");
+ dev_info(dev, "IOSF_MBI not available, no BYT-CR detection\n");
}
- return status;
+
+ if (platform_get_resource(pdev, IORESOURCE_IRQ, 5) == NULL) {
+ /*
+ * Some devices detected as BYT-T have only a single IRQ listed,
+ * causing platform_get_irq with index 5 to return -ENXIO.
+ * The correct IRQ in this case is at index 0, as on BYT-CR.
+ */
+ dev_info(dev, "Falling back to Baytrail-CR platform\n");
+ return true;
+ }
+
+ return false;
}
@@ -301,7 +312,6 @@ static int sst_acpi_probe(struct platform_device *pdev)
struct platform_device *plat_dev;
struct sst_platform_info *pdata;
unsigned int dev_id;
- bool bytcr = false;
id = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!id)
@@ -333,10 +343,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = is_byt_cr(dev, &bytcr);
- if (!((ret < 0) || (bytcr == false))) {
- dev_info(dev, "Detected Baytrail-CR platform\n");
-
+ if (is_byt_cr(pdev)) {
/* override resource info */
byt_rvp_platform_data.res_info = &bytcr_res_info;
}
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 5455d6e0ab53..a592df06aa58 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -146,7 +146,7 @@ static int sst_power_control(struct device *dev, bool state)
int ret = 0;
int usage_count = 0;
- if (state == true) {
+ if (state) {
ret = pm_runtime_get_sync(dev);
usage_count = GET_USAGE_COUNT(dev);
dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index b8c456753f01..321c783cf833 100644
--- a/sound/soc/intel/atom/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -269,7 +269,7 @@ static void sst_do_memcpy(struct list_head *memcpy_list)
struct sst_memcpy_list *listnode;
list_for_each_entry(listnode, memcpy_list, memcpylist) {
- if (listnode->is_io == true)
+ if (listnode->is_io)
memcpy32_toio((void __iomem *)listnode->dstn,
listnode->src, listnode->size);
else
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 260447da32b8..2cd8f9668b50 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -278,7 +278,6 @@ static int sst_byt_process_notification(struct sst_byt *byt,
struct sst_byt_stream *stream;
u64 header;
u8 msg_id, stream_id;
- int handled = 1;
header = sst_dsp_shim_read64_unlocked(sst, SST_IPCD);
msg_id = sst_byt_header_msg_id(header);
@@ -298,7 +297,7 @@ static int sst_byt_process_notification(struct sst_byt *byt,
break;
}
- return handled;
+ return 1;
}
static irqreturn_t sst_byt_irq_thread(int irq, void *context)
diff --git a/sound/soc/intel/baytrail/sst-baytrail-pcm.c b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
index aabb35bf6b96..5373605de0af 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-pcm.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
@@ -188,7 +188,7 @@ static int sst_byt_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
sst_byt_stream_start(byt, pcm_data->stream, 0);
break;
case SNDRV_PCM_TRIGGER_RESUME:
- if (pdata->restore_stream == true)
+ if (pdata->restore_stream)
schedule_work(&pcm_data->work);
else
sst_byt_stream_resume(byt, pcm_data->stream);
@@ -327,23 +327,16 @@ static int sst_byt_pcm_new(struct snd_soc_pcm_runtime *rtd)
size_t size;
struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_pdata *pdata = dev_get_platdata(component->dev);
- int ret = 0;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
size = sst_byt_pcm_hardware.buffer_bytes_max;
- ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV,
- pdata->dma_dev,
- size, size);
- if (ret) {
- dev_err(rtd->dev, "dma buffer allocation failed %d\n",
- ret);
- return ret;
- }
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ pdata->dma_dev,
+ size, size);
}
- return ret;
+ return 0;
}
static struct snd_soc_dai_driver byt_dais[] = {
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 0a7e40d06395..12d6b73e9531 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -293,6 +293,7 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_DA7219
select SND_SOC_MAX98927
+ select SND_SOC_MAX98373
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
help
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index efcfd906c856..1844c88ea4e2 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -26,6 +26,7 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include <sound/jack.h>
+#include <sound/soc-acpi.h>
#include "../common/sst-dsp.h"
#include "../haswell/sst-haswell-ipc.h"
@@ -339,6 +340,9 @@ static struct snd_soc_card bdw_rt5677_card = {
static int bdw_rt5677_probe(struct platform_device *pdev)
{
struct bdw_rt5677_priv *bdw_rt5677;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name = NULL;
+ int ret;
bdw_rt5677_card.dev = &pdev->dev;
@@ -350,6 +354,16 @@ static int bdw_rt5677_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ if (mach) /* extra check since legacy does not pass parameters */
+ platform_name = mach->mach_params.platform;
+
+ ret = snd_soc_fixup_dai_links_platform_name(&bdw_rt5677_card,
+ platform_name);
+ if (ret)
+ return ret;
+
snd_soc_card_set_drvdata(&bdw_rt5677_card, bdw_rt5677);
return devm_snd_soc_register_card(&pdev->dev, &bdw_rt5677_card);
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 68e6543e6cb0..b86c746d9b7a 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -21,6 +21,7 @@
#include <sound/soc.h>
#include <sound/jack.h>
#include <sound/pcm_params.h>
+#include <sound/soc-acpi.h>
#include "../common/sst-dsp.h"
#include "../haswell/sst-haswell-ipc.h"
@@ -192,7 +193,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
- .dynamic = 0,
+ .dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -267,7 +268,22 @@ static struct snd_soc_card broadwell_rt286 = {
static int broadwell_audio_probe(struct platform_device *pdev)
{
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name = NULL;
+ int ret;
+
broadwell_rt286.dev = &pdev->dev;
+
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ if (mach) /* extra check since legacy does not pass parameters */
+ platform_name = mach->mach_params.platform;
+
+ ret = snd_soc_fixup_dai_links_platform_name(&broadwell_rt286,
+ platform_name);
+ if (ret)
+ return ret;
+
return devm_snd_soc_register_card(&pdev->dev, &broadwell_rt286);
}
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 6f052fc8d1e2..5cadb7f654f3 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -16,6 +16,8 @@
* GNU General Public License for more details.
*/
+#include <asm/cpu_device_id.h>
+#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/core.h>
@@ -23,6 +25,7 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/hdac_hdmi.h"
#include "../../codecs/da7219.h"
#include "../../codecs/da7219-aad.h"
@@ -48,6 +51,7 @@ struct bxt_card_private {
enum {
BXT_DPCM_AUDIO_PB = 0,
BXT_DPCM_AUDIO_CP,
+ BXT_DPCM_AUDIO_HS_PB,
BXT_DPCM_AUDIO_REF_CP,
BXT_DPCM_AUDIO_DMIC_CP,
BXT_DPCM_AUDIO_HDMI1_PB,
@@ -102,7 +106,7 @@ static const struct snd_soc_dapm_widget broxton_widgets[] = {
platform_clock_control, SND_SOC_DAPM_POST_PMD|SND_SOC_DAPM_PRE_PMU),
};
-static const struct snd_soc_dapm_route broxton_map[] = {
+static const struct snd_soc_dapm_route audio_map[] = {
/* HP jack connectors - unknown if we have jack detection */
{"Headphone Jack", NULL, "HPL"},
{"Headphone Jack", NULL, "HPR"},
@@ -117,15 +121,6 @@ static const struct snd_soc_dapm_route broxton_map[] = {
{"DMic", NULL, "SoC DMIC"},
/* CODEC BE connections */
- {"HiFi Playback", NULL, "ssp5 Tx"},
- {"ssp5 Tx", NULL, "codec0_out"},
-
- {"Playback", NULL, "ssp1 Tx"},
- {"ssp1 Tx", NULL, "codec1_out"},
-
- {"codec0_in", NULL, "ssp1 Rx"},
- {"ssp1 Rx", NULL, "Capture"},
-
{"HDMI1", NULL, "hif5-0 Output"},
{"HDMI2", NULL, "hif6-0 Output"},
{"HDMI2", NULL, "hif7-0 Output"},
@@ -145,6 +140,28 @@ static const struct snd_soc_dapm_route broxton_map[] = {
{ "Headset Mic", NULL, "Platform Clock" },
};
+static const struct snd_soc_dapm_route broxton_map[] = {
+ {"HiFi Playback", NULL, "ssp5 Tx"},
+ {"ssp5 Tx", NULL, "codec0_out"},
+
+ {"Playback", NULL, "ssp1 Tx"},
+ {"ssp1 Tx", NULL, "codec1_out"},
+
+ {"codec0_in", NULL, "ssp1 Rx"},
+ {"ssp1 Rx", NULL, "Capture"},
+};
+
+static const struct snd_soc_dapm_route gemini_map[] = {
+ {"HiFi Playback", NULL, "ssp1 Tx"},
+ {"ssp1 Tx", NULL, "codec0_out"},
+
+ {"Playback", NULL, "ssp2 Tx"},
+ {"ssp2 Tx", NULL, "codec1_out"},
+
+ {"codec0_in", NULL, "ssp2 Rx"},
+ {"ssp2 Rx", NULL, "Capture"},
+};
+
static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
@@ -192,6 +209,12 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
+ snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(broxton_headset.jack, SND_JACK_BTN_3,
+ KEY_VOICECOMMAND);
+
da7219_aad_jack_det(component, &broxton_headset);
snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
@@ -383,6 +406,20 @@ static struct snd_soc_dai_link broxton_dais[] = {
.dpcm_capture = 1,
.ops = &broxton_da7219_fe_ops,
},
+ [BXT_DPCM_AUDIO_HS_PB] = {
+ .name = "Bxt Audio Headset Playback",
+ .stream_name = "Headset Playback",
+ .cpu_dai_name = "System Pin2",
+ .platform_name = "0000:00:0e.0",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &broxton_da7219_fe_ops,
+ },
[BXT_DPCM_AUDIO_REF_CP] =
{
.name = "Bxt Audio Reference cap",
@@ -531,6 +568,11 @@ static struct snd_soc_dai_link broxton_dais[] = {
},
};
+static const struct x86_cpu_id glk_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x7A }, /* Geminilake CPU_ID */
+ {}
+};
+
#define NAME_SIZE 32
static int bxt_card_late_probe(struct snd_soc_card *card)
{
@@ -540,6 +582,13 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
int err, i = 0;
char jack_name[NAME_SIZE];
+ if (x86_match_cpu(glk_ids))
+ snd_soc_dapm_add_routes(&card->dapm, gemini_map,
+ ARRAY_SIZE(gemini_map));
+ else
+ snd_soc_dapm_add_routes(&card->dapm, broxton_map,
+ ARRAY_SIZE(broxton_map));
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -575,8 +624,8 @@ static struct snd_soc_card broxton_audio_card = {
.num_controls = ARRAY_SIZE(broxton_controls),
.dapm_widgets = broxton_widgets,
.num_dapm_widgets = ARRAY_SIZE(broxton_widgets),
- .dapm_routes = broxton_map,
- .num_dapm_routes = ARRAY_SIZE(broxton_map),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
.fully_routed = true,
.late_probe = bxt_card_late_probe,
};
@@ -584,6 +633,9 @@ static struct snd_soc_card broxton_audio_card = {
static int broxton_audio_probe(struct platform_device *pdev)
{
struct bxt_card_private *ctx;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
+ int ret;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -593,16 +645,52 @@ static int broxton_audio_probe(struct platform_device *pdev)
broxton_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&broxton_audio_card, ctx);
+ if (x86_match_cpu(glk_ids)) {
+ unsigned int i;
+
+ broxton_audio_card.name = "glkda7219max";
+ /* Fixup the SSP entries for geminilake */
+ for (i = 0; i < ARRAY_SIZE(broxton_dais); i++) {
+ /* MAXIM_CODEC is connected to SSP1. */
+ if (!strcmp(broxton_dais[i].codec_dai_name,
+ BXT_MAXIM_CODEC_DAI)) {
+ broxton_dais[i].name = "SSP1-Codec";
+ broxton_dais[i].cpu_dai_name = "SSP1 Pin";
+ }
+ /* DIALOG_CODE is connected to SSP2 */
+ else if (!strcmp(broxton_dais[i].codec_dai_name,
+ BXT_DIALOG_CODEC_DAI)) {
+ broxton_dais[i].name = "SSP2-Codec";
+ broxton_dais[i].cpu_dai_name = "SSP2 Pin";
+ }
+ }
+ }
+
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ platform_name = mach->mach_params.platform;
+
+ ret = snd_soc_fixup_dai_links_platform_name(&broxton_audio_card,
+ platform_name);
+ if (ret)
+ return ret;
return devm_snd_soc_register_card(&pdev->dev, &broxton_audio_card);
}
+static const struct platform_device_id bxt_board_ids[] = {
+ { .name = "bxt_da7219_max98357a" },
+ { .name = "glk_da7219_max98357a" },
+ { }
+};
+
static struct platform_driver broxton_audio = {
.probe = broxton_audio_probe,
.driver = {
.name = "bxt_da7219_max98357a",
.pm = &snd_soc_pm_ops,
},
+ .id_table = bxt_board_ids,
};
module_platform_driver(broxton_audio)
@@ -612,5 +700,7 @@ MODULE_AUTHOR("Sathyanarayana Nujella <sathyanarayana.nujella@intel.com>");
MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com>");
MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
MODULE_AUTHOR("Conrad Cooke <conrad.cooke@intel.com>");
+MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:bxt_da7219_max98357a");
+MODULE_ALIAS("platform:glk_da7219_max98357a");
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 27308337ab12..e91057f83d20 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -21,6 +21,7 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include <sound/jack.h>
#include <sound/pcm_params.h>
#include "../../codecs/hdac_hdmi.h"
@@ -576,6 +577,9 @@ static int broxton_audio_probe(struct platform_device *pdev)
struct bxt_rt286_private *ctx;
struct snd_soc_card *card =
(struct snd_soc_card *)pdev->id_entry->driver_data;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
+ int ret;
int i;
for (i = 0; i < ARRAY_SIZE(broxton_rt298_dais); i++) {
@@ -602,6 +606,15 @@ static int broxton_audio_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
snd_soc_card_set_drvdata(card, ctx);
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ platform_name = mach->mach_params.platform;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card,
+ platform_name);
+ if (ret)
+ return ret;
+
return devm_snd_soc_register_card(&pdev->dev, card);
}
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index 2179dedb28ad..b8e884803777 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -225,6 +225,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
{
struct snd_soc_card *card;
struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
const char *i2c_name = NULL;
int dai_index = 0;
int ret_val = 0;
@@ -250,6 +251,13 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
dailink[dai_index].codec_name = codec_name;
}
+ /* override plaform name, if required */
+ platform_name = mach->mach_params.platform;
+
+ ret_val = snd_soc_fixup_dai_links_platform_name(card, platform_name);
+ if (ret_val)
+ return ret_val;
+
ret_val = devm_snd_soc_register_card(&pdev->dev, card);
if (ret_val) {
dev_err(&pdev->dev,
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index adc26dfc7d65..d2a7e6ba11ae 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -19,13 +19,20 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/device.h>
#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/platform_sst_audio.h>
-#include <linux/clk.h>
+#include <sound/jack.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -35,27 +42,96 @@
struct byt_cht_es8316_private {
struct clk *mclk;
+ struct snd_soc_jack jack;
+ struct gpio_desc *speaker_en_gpio;
+ bool speaker_en;
};
+enum {
+ BYT_CHT_ES8316_INTMIC_IN1_MAP,
+ BYT_CHT_ES8316_INTMIC_IN2_MAP,
+};
+
+#define BYT_CHT_ES8316_MAP(quirk) ((quirk) & GENMASK(3, 0))
+#define BYT_CHT_ES8316_SSP0 BIT(16)
+#define BYT_CHT_ES8316_MONO_SPEAKER BIT(17)
+
+static int quirk;
+
+static int quirk_override = -1;
+module_param_named(quirk, quirk_override, int, 0444);
+MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+
+static void log_quirks(struct device *dev)
+{
+ if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN1_MAP)
+ dev_info(dev, "quirk IN1_MAP enabled");
+ if (BYT_CHT_ES8316_MAP(quirk) == BYT_CHT_ES8316_INTMIC_IN2_MAP)
+ dev_info(dev, "quirk IN2_MAP enabled");
+ if (quirk & BYT_CHT_ES8316_SSP0)
+ dev_info(dev, "quirk SSP0 enabled");
+ if (quirk & BYT_CHT_ES8316_MONO_SPEAKER)
+ dev_info(dev, "quirk MONO_SPEAKER enabled\n");
+}
+
+static int byt_cht_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_card *card = w->dapm->card;
+ struct byt_cht_es8316_private *priv = snd_soc_card_get_drvdata(card);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ priv->speaker_en = true;
+ else
+ priv->speaker_en = false;
+
+ gpiod_set_value_cansleep(priv->speaker_en_gpio, priv->speaker_en);
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget byt_cht_es8316_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Internal Mic", NULL),
- /*
- * The codec supports two analog microphone inputs. I have only
- * tested MIC1. A DMIC route could also potentially be added
- * if such functionality is found on another platform.
- */
- SND_SOC_DAPM_MIC("Microphone 1", NULL),
- SND_SOC_DAPM_MIC("Microphone 2", NULL),
+ SND_SOC_DAPM_SUPPLY("Speaker Power", SND_SOC_NOPM, 0, 0,
+ byt_cht_es8316_speaker_power_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
};
static const struct snd_soc_dapm_route byt_cht_es8316_audio_map[] = {
- {"MIC1", NULL, "Microphone 1"},
- {"MIC2", NULL, "Microphone 2"},
-
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
+ /*
+ * There is no separate speaker output instead the speakers are muxed to
+ * the HP outputs. The mux is controlled by the "Speaker Power" supply.
+ */
+ {"Speaker", NULL, "HPOL"},
+ {"Speaker", NULL, "HPOR"},
+ {"Speaker", NULL, "Speaker Power"},
+};
+
+static const struct snd_soc_dapm_route byt_cht_es8316_intmic_in1_map[] = {
+ {"MIC1", NULL, "Internal Mic"},
+ {"MIC2", NULL, "Headset Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_cht_es8316_intmic_in2_map[] = {
+ {"MIC2", NULL, "Internal Mic"},
+ {"MIC1", NULL, "Headset Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_cht_es8316_ssp0_map[] = {
+ {"Playback", NULL, "ssp0 Tx"},
+ {"ssp0 Tx", NULL, "modem_out"},
+ {"modem_in", NULL, "ssp0 Rx"},
+ {"ssp0 Rx", NULL, "Capture"},
+};
+
+static const struct snd_soc_dapm_route byt_cht_es8316_ssp2_map[] = {
{"Playback", NULL, "ssp2 Tx"},
{"ssp2 Tx", NULL, "codec_out0"},
{"ssp2 Tx", NULL, "codec_out1"},
@@ -65,19 +141,60 @@ static const struct snd_soc_dapm_route byt_cht_es8316_audio_map[] = {
};
static const struct snd_kcontrol_new byt_cht_es8316_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Microphone 1"),
- SOC_DAPM_PIN_SWITCH("Microphone 2"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Internal Mic"),
+};
+
+static struct snd_soc_jack_pin byt_cht_es8316_jack_pins[] = {
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
};
static int byt_cht_es8316_init(struct snd_soc_pcm_runtime *runtime)
{
+ struct snd_soc_component *codec = runtime->codec_dai->component;
struct snd_soc_card *card = runtime->card;
struct byt_cht_es8316_private *priv = snd_soc_card_get_drvdata(card);
+ const struct snd_soc_dapm_route *custom_map;
+ int num_routes;
int ret;
card->dapm.idle_bias_off = true;
+ switch (BYT_CHT_ES8316_MAP(quirk)) {
+ case BYT_CHT_ES8316_INTMIC_IN1_MAP:
+ default:
+ custom_map = byt_cht_es8316_intmic_in1_map;
+ num_routes = ARRAY_SIZE(byt_cht_es8316_intmic_in1_map);
+ break;
+ case BYT_CHT_ES8316_INTMIC_IN2_MAP:
+ custom_map = byt_cht_es8316_intmic_in2_map;
+ num_routes = ARRAY_SIZE(byt_cht_es8316_intmic_in2_map);
+ break;
+ }
+ ret = snd_soc_dapm_add_routes(&card->dapm, custom_map, num_routes);
+ if (ret)
+ return ret;
+
+ if (quirk & BYT_CHT_ES8316_SSP0) {
+ custom_map = byt_cht_es8316_ssp0_map;
+ num_routes = ARRAY_SIZE(byt_cht_es8316_ssp0_map);
+ } else {
+ custom_map = byt_cht_es8316_ssp2_map;
+ num_routes = ARRAY_SIZE(byt_cht_es8316_ssp2_map);
+ }
+ ret = snd_soc_dapm_add_routes(&card->dapm, custom_map, num_routes);
+ if (ret)
+ return ret;
+
/*
* The firmware might enable the clock at boot (this information
* may or may not be reflected in the enable clock register).
@@ -105,6 +222,18 @@ static int byt_cht_es8316_init(struct snd_soc_pcm_runtime *runtime)
return ret;
}
+ ret = snd_soc_card_jack_new(card, "Headset",
+ SND_JACK_HEADSET | SND_JACK_BTN_0,
+ &priv->jack, byt_cht_es8316_jack_pins,
+ ARRAY_SIZE(byt_cht_es8316_jack_pins));
+ if (ret) {
+ dev_err(card->dev, "jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(priv->jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_soc_component_set_jack(codec, &priv->jack, NULL);
+
return 0;
}
@@ -123,14 +252,21 @@ static int byt_cht_es8316_codec_fixup(struct snd_soc_pcm_runtime *rtd,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- int ret;
+ int ret, bits;
/* The DSP will covert the FE rate to 48k, stereo */
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
- /* set SSP2 to 24-bit */
- params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
+ if (quirk & BYT_CHT_ES8316_SSP0) {
+ /* set SSP0 to 16-bit */
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
+ bits = 16;
+ } else {
+ /* set SSP2 to 24-bit */
+ params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
+ bits = 24;
+ }
/*
* Default mode for SSP configuration is TDM 4 slot, override config
@@ -147,7 +283,7 @@ static int byt_cht_es8316_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
+ ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, bits);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
@@ -218,6 +354,59 @@ static struct snd_soc_dai_link byt_cht_es8316_dais[] = {
/* SoC card */
+static char codec_name[SND_ACPI_I2C_ID_LEN];
+static char long_name[50]; /* = "bytcht-es8316-*-spk-*-mic" */
+
+static int byt_cht_es8316_suspend(struct snd_soc_card *card)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component) {
+ if (!strcmp(component->name, codec_name)) {
+ dev_dbg(component->dev, "disabling jack detect before suspend\n");
+ snd_soc_component_set_jack(component, NULL, NULL);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int byt_cht_es8316_resume(struct snd_soc_card *card)
+{
+ struct byt_cht_es8316_private *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component) {
+ if (!strcmp(component->name, codec_name)) {
+ dev_dbg(component->dev, "re-enabling jack detect after resume\n");
+ snd_soc_component_set_jack(component, &priv->jack, NULL);
+ break;
+ }
+ }
+
+ /*
+ * Some Cherry Trail boards with an ES8316 codec have a bug in their
+ * ACPI tables where the MSSL1680 touchscreen's _PS0 and _PS3 methods
+ * wrongly also set the speaker-enable GPIO to 1/0. Testing has shown
+ * that this really is a bug and the GPIO has no influence on the
+ * touchscreen at all.
+ *
+ * The silead.c touchscreen driver does not support runtime suspend, so
+ * the GPIO can only be changed underneath us during a system suspend.
+ * This resume() function runs from a pm complete() callback, and thus
+ * is guaranteed to run after the touchscreen driver/ACPI-subsys has
+ * brought the touchscreen back up again (and thus changed the GPIO).
+ *
+ * So to work around this we pass GPIOD_FLAGS_BIT_NONEXCLUSIVE when
+ * requesting the GPIO and we set its value here to undo any changes
+ * done by the touchscreen's broken _PS0 ACPI method.
+ */
+ gpiod_set_value_cansleep(priv->speaker_en_gpio, priv->speaker_en);
+
+ return 0;
+}
+
static struct snd_soc_card byt_cht_es8316_card = {
.name = "bytcht-es8316",
.owner = THIS_MODULE,
@@ -230,24 +419,40 @@ static struct snd_soc_card byt_cht_es8316_card = {
.controls = byt_cht_es8316_controls,
.num_controls = ARRAY_SIZE(byt_cht_es8316_controls),
.fully_routed = true,
+ .suspend_pre = byt_cht_es8316_suspend,
+ .resume_post = byt_cht_es8316_resume,
};
-static char codec_name[SND_ACPI_I2C_ID_LEN];
+static const struct x86_cpu_id baytrail_cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, /* Valleyview */
+ {}
+};
+
+static const struct acpi_gpio_params first_gpio = { 0, 0, false };
+
+static const struct acpi_gpio_mapping byt_cht_es8316_gpios[] = {
+ { "speaker-enable-gpios", &first_gpio, 1 },
+ { },
+};
static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
{
+ static const char * const mic_name[] = { "in1", "in2" };
struct byt_cht_es8316_private *priv;
+ struct device *dev = &pdev->dev;
struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
const char *i2c_name = NULL;
+ struct device *codec_dev;
int dai_index = 0;
int i;
int ret = 0;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- mach = (&pdev->dev)->platform_data;
+ mach = dev->platform_data;
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
if (!strcmp(byt_cht_es8316_dais[i].codec_name,
@@ -265,26 +470,94 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
byt_cht_es8316_dais[dai_index].codec_name = codec_name;
}
- /* register the soc card */
- byt_cht_es8316_card.dev = &pdev->dev;
- snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
+ /* override plaform name, if required */
+ platform_name = mach->mach_params.platform;
- priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ ret = snd_soc_fixup_dai_links_platform_name(&byt_cht_es8316_card,
+ platform_name);
+ if (ret)
+ return ret;
+
+ /* Check for BYTCR or other platform and setup quirks */
+ if (x86_match_cpu(baytrail_cpu_ids) &&
+ mach->mach_params.acpi_ipc_irq_index == 0) {
+ /* On BYTCR default to SSP0, internal-mic-in2-map, mono-spk */
+ quirk = BYT_CHT_ES8316_SSP0 | BYT_CHT_ES8316_INTMIC_IN2_MAP |
+ BYT_CHT_ES8316_MONO_SPEAKER;
+ } else {
+ /* Others default to internal-mic-in1-map, mono-speaker */
+ quirk = BYT_CHT_ES8316_INTMIC_IN1_MAP |
+ BYT_CHT_ES8316_MONO_SPEAKER;
+ }
+ if (quirk_override != -1) {
+ dev_info(dev, "Overriding quirk 0x%x => 0x%x\n", quirk,
+ quirk_override);
+ quirk = quirk_override;
+ }
+ log_quirks(dev);
+
+ if (quirk & BYT_CHT_ES8316_SSP0)
+ byt_cht_es8316_dais[dai_index].cpu_dai_name = "ssp0-port";
+
+ /* get the clock */
+ priv->mclk = devm_clk_get(dev, "pmc_plt_clk_3");
if (IS_ERR(priv->mclk)) {
ret = PTR_ERR(priv->mclk);
- dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %d\n",
- ret);
+ dev_err(dev, "clk_get pmc_plt_clk_3 failed: %d\n", ret);
return ret;
}
- ret = devm_snd_soc_register_card(&pdev->dev, &byt_cht_es8316_card);
+ /* get speaker enable GPIO */
+ codec_dev = bus_find_device_by_name(&i2c_bus_type, NULL, codec_name);
+ if (!codec_dev)
+ return -EPROBE_DEFER;
+
+ devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios);
+ priv->speaker_en_gpio =
+ gpiod_get_index(codec_dev, "speaker-enable", 0,
+ /* see comment in byt_cht_es8316_resume */
+ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ put_device(codec_dev);
+
+ if (IS_ERR(priv->speaker_en_gpio)) {
+ ret = PTR_ERR(priv->speaker_en_gpio);
+ switch (ret) {
+ case -ENOENT:
+ priv->speaker_en_gpio = NULL;
+ break;
+ default:
+ dev_err(dev, "get speaker GPIO failed: %d\n", ret);
+ /* fall through */
+ case -EPROBE_DEFER:
+ return ret;
+ }
+ }
+
+ /* register the soc card */
+ snprintf(long_name, sizeof(long_name), "bytcht-es8316-%s-spk-%s-mic",
+ (quirk & BYT_CHT_ES8316_MONO_SPEAKER) ? "mono" : "stereo",
+ mic_name[BYT_CHT_ES8316_MAP(quirk)]);
+ byt_cht_es8316_card.long_name = long_name;
+ byt_cht_es8316_card.dev = dev;
+ snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
+
+ ret = devm_snd_soc_register_card(dev, &byt_cht_es8316_card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret);
+ gpiod_put(priv->speaker_en_gpio);
+ dev_err(dev, "snd_soc_register_card failed: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, &byt_cht_es8316_card);
- return ret;
+ return 0;
+}
+
+static int snd_byt_cht_es8316_mc_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct byt_cht_es8316_private *priv = snd_soc_card_get_drvdata(card);
+
+ gpiod_put(priv->speaker_en_gpio);
+ return 0;
}
static struct platform_driver snd_byt_cht_es8316_mc_driver = {
@@ -292,6 +565,7 @@ static struct platform_driver snd_byt_cht_es8316_mc_driver = {
.name = "bytcht_es8316",
},
.probe = snd_byt_cht_es8316_mc_probe,
+ .remove = snd_byt_cht_es8316_mc_remove,
};
module_platform_driver(snd_byt_cht_es8316_mc_driver);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index a22366ce33c4..940eb27158da 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -431,6 +431,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
},
.driver_data = (void *)(BYT_RT5640_IN1_MAP |
@@ -1137,10 +1149,11 @@ struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
{
- const char * const map_name[] = { "dmic1", "dmic2", "in1", "in3" };
+ static const char * const map_name[] = { "dmic1", "dmic2", "in1", "in3" };
const struct dmi_system_id *dmi_id;
struct byt_rt5640_private *priv;
struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
const char *i2c_name = NULL;
int ret_val = 0;
int dai_index = 0;
@@ -1305,6 +1318,14 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
map_name[BYT_RT5640_MAP(byt_rt5640_quirk)]);
byt_rt5640_card.long_name = byt_rt5640_long_name;
+ /* override plaform name, if required */
+ platform_name = mach->mach_params.platform;
+
+ ret_val = snd_soc_fixup_dai_links_platform_name(&byt_rt5640_card,
+ platform_name);
+ if (ret_val)
+ return ret_val;
+
ret_val = devm_snd_soc_register_card(&pdev->dev, &byt_rt5640_card);
if (ret_val) {
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index e528995668b7..b0a4d297176e 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -91,13 +91,20 @@ enum {
struct byt_rt5651_private {
struct clk *mclk;
struct gpio_desc *ext_amp_gpio;
+ struct gpio_desc *hp_detect;
struct snd_soc_jack jack;
};
+static const struct acpi_gpio_mapping *byt_rt5651_gpios;
+
/* Default: jack-detect on JD1_1, internal mic on in2, headsetmic on in3 */
static unsigned long byt_rt5651_quirk = BYT_RT5651_DEFAULT_QUIRKS |
BYT_RT5651_IN2_MAP;
+static unsigned int quirk_override;
+module_param_named(quirk, quirk_override, uint, 0444);
+MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+
static void log_quirks(struct device *dev)
{
if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_DMIC_MAP)
@@ -266,7 +273,7 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
{"DMIC L1", NULL, "Internal Mic"},
{"DMIC R1", NULL, "Internal Mic"},
- {"IN3P", NULL, "Headset Mic"},
+ {"IN2P", NULL, "Headset Mic"},
};
static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_map[] = {
@@ -360,6 +367,22 @@ static int byt_rt5651_aif1_hw_params(struct snd_pcm_substream *substream,
return byt_rt5651_prepare_and_enable_pll1(codec_dai, rate, bclk_ratio);
}
+static const struct acpi_gpio_params pov_p1006w_hp_detect = { 1, 0, false };
+static const struct acpi_gpio_params pov_p1006w_ext_amp_en = { 2, 0, true };
+
+static const struct acpi_gpio_mapping byt_rt5651_pov_p1006w_gpios[] = {
+ { "hp-detect-gpios", &pov_p1006w_hp_detect, 1, },
+ { "ext-amp-enable-gpios", &pov_p1006w_ext_amp_en, 1, },
+ { },
+};
+
+static int byt_rt5651_pov_p1006w_quirk_cb(const struct dmi_system_id *id)
+{
+ byt_rt5651_quirk = (unsigned long)id->driver_data;
+ byt_rt5651_gpios = byt_rt5651_pov_p1006w_gpios;
+ return 1;
+}
+
static int byt_rt5651_quirk_cb(const struct dmi_system_id *id)
{
byt_rt5651_quirk = (unsigned long)id->driver_data;
@@ -436,6 +459,23 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
BYT_RT5651_IN1_MAP),
},
{
+ /* Point of View mobii wintab p1006w (v1.0) */
+ .callback = byt_rt5651_pov_p1006w_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Note 105b is Foxcon's USB/PCI vendor id */
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
+ },
+ .driver_data = (void *)(BYT_RT5651_DMIC_MAP |
+ BYT_RT5651_OVCD_TH_2000UA |
+ BYT_RT5651_OVCD_SF_0P75 |
+ BYT_RT5651_DMIC_EN |
+ BYT_RT5651_MCLK_EN |
+ BYT_RT5651_SSP0_AIF1),
+ },
+ {
/* VIOS LTH17 */
.callback = byt_rt5651_quirk_cb,
.matches = {
@@ -495,6 +535,7 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
struct byt_rt5651_private *priv = snd_soc_card_get_drvdata(card);
const struct snd_soc_dapm_route *custom_map;
int num_routes;
+ int report;
int ret;
card->dapm.idle_bias_off = true;
@@ -578,20 +619,27 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
dev_err(card->dev, "unable to set MCLK rate\n");
}
- if (BYT_RT5651_JDSRC(byt_rt5651_quirk)) {
+ report = 0;
+ if (BYT_RT5651_JDSRC(byt_rt5651_quirk))
+ report = SND_JACK_HEADSET | SND_JACK_BTN_0;
+ else if (priv->hp_detect)
+ report = SND_JACK_HEADSET;
+
+ if (report) {
ret = snd_soc_card_jack_new(runtime->card, "Headset",
- SND_JACK_HEADSET | SND_JACK_BTN_0,
- &priv->jack, bytcr_jack_pins,
+ report, &priv->jack, bytcr_jack_pins,
ARRAY_SIZE(bytcr_jack_pins));
if (ret) {
dev_err(runtime->dev, "jack creation failed %d\n", ret);
return ret;
}
- snd_jack_set_key(priv->jack.jack, SND_JACK_BTN_0,
- KEY_PLAYPAUSE);
+ if (report & SND_JACK_BTN_0)
+ snd_jack_set_key(priv->jack.jack, SND_JACK_BTN_0,
+ KEY_PLAYPAUSE);
- ret = snd_soc_component_set_jack(codec, &priv->jack, NULL);
+ ret = snd_soc_component_set_jack(codec, &priv->jack,
+ priv->hp_detect);
if (ret)
return ret;
}
@@ -763,7 +811,8 @@ static int byt_rt5651_resume(struct snd_soc_card *card)
for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5651_codec_name)) {
dev_dbg(component->dev, "re-enabling jack detect after resume\n");
- snd_soc_component_set_jack(component, &priv->jack, NULL);
+ snd_soc_component_set_jack(component, &priv->jack,
+ priv->hp_detect);
break;
}
}
@@ -795,74 +844,18 @@ static const struct x86_cpu_id cherrytrail_cpu_ids[] = {
{}
};
-static const struct acpi_gpio_params first_gpio = { 0, 0, false };
-static const struct acpi_gpio_params second_gpio = { 1, 0, false };
-
-static const struct acpi_gpio_mapping byt_rt5651_amp_en_first[] = {
- { "ext-amp-enable-gpios", &first_gpio, 1 },
- { },
-};
+static const struct acpi_gpio_params ext_amp_enable_gpios = { 0, 0, false };
-static const struct acpi_gpio_mapping byt_rt5651_amp_en_second[] = {
- { "ext-amp-enable-gpios", &second_gpio, 1 },
+static const struct acpi_gpio_mapping cht_rt5651_gpios[] = {
+ /*
+ * Some boards have I2cSerialBusV2, GpioIo, GpioInt as ACPI resources,
+ * other boards may have I2cSerialBusV2, GpioInt, GpioIo instead.
+ * We want the GpioIo one for the ext-amp-enable-gpio.
+ */
+ { "ext-amp-enable-gpios", &ext_amp_enable_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
{ },
};
-/*
- * Some boards have I2cSerialBusV2, GpioIo, GpioInt as ACPI resources, other
- * boards may have I2cSerialBusV2, GpioInt, GpioIo instead. We want the
- * GpioIo one for the ext-amp-enable-gpio and both count for the index in
- * acpi_gpio_params index. So we have 2 different mappings and the code
- * below figures out which one to use.
- */
-struct byt_rt5651_acpi_resource_data {
- int gpio_count;
- int gpio_int_idx;
-};
-
-static int snd_byt_rt5651_acpi_resource(struct acpi_resource *ares, void *arg)
-{
- struct byt_rt5651_acpi_resource_data *data = arg;
-
- if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
- return 0;
-
- if (ares->data.gpio.connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
- data->gpio_int_idx = data->gpio_count;
-
- data->gpio_count++;
- return 0;
-}
-
-static void snd_byt_rt5651_mc_add_amp_en_gpio_mapping(struct device *codec)
-{
- struct byt_rt5651_acpi_resource_data data = { 0, -1 };
- LIST_HEAD(resources);
- int ret;
-
- ret = acpi_dev_get_resources(ACPI_COMPANION(codec), &resources,
- snd_byt_rt5651_acpi_resource, &data);
- if (ret < 0) {
- dev_warn(codec, "Failed to get ACPI resources, not adding external amplifier GPIO mapping\n");
- return;
- }
-
- /* All info we need is gathered during the walk */
- acpi_dev_free_resource_list(&resources);
-
- switch (data.gpio_int_idx) {
- case 0:
- devm_acpi_dev_add_driver_gpios(codec, byt_rt5651_amp_en_second);
- break;
- case 1:
- devm_acpi_dev_add_driver_gpios(codec, byt_rt5651_amp_en_first);
- break;
- default:
- dev_warn(codec, "Unknown GpioInt index %d, not adding external amplifier GPIO mapping\n",
- data.gpio_int_idx);
- }
-}
-
struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
u64 aif_value; /* 1: AIF1, 2: AIF2 */
u64 mclock_value; /* usually 25MHz (0x17d7940), ignored */
@@ -870,9 +863,10 @@ struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
{
- const char * const mic_name[] = { "dmic", "in1", "in2", "in12" };
+ static const char * const mic_name[] = { "dmic", "in1", "in2", "in12" };
struct byt_rt5651_private *priv;
struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
struct device *codec_dev;
const char *i2c_name = NULL;
const char *hp_swapped;
@@ -973,6 +967,12 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
/* check quirks before creating card */
dmi_check_system(byt_rt5651_quirk_table);
+ if (quirk_override) {
+ dev_info(&pdev->dev, "Overriding quirk 0x%x => 0x%x\n",
+ (unsigned int)byt_rt5651_quirk, quirk_override);
+ byt_rt5651_quirk = quirk_override;
+ }
+
/* Must be called before register_card, also see declaration comment. */
ret_val = byt_rt5651_add_codec_device_props(codec_dev);
if (ret_val) {
@@ -981,8 +981,11 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
}
/* Cherry Trail devices use an external amplifier enable gpio */
- if (x86_match_cpu(cherrytrail_cpu_ids)) {
- snd_byt_rt5651_mc_add_amp_en_gpio_mapping(codec_dev);
+ if (x86_match_cpu(cherrytrail_cpu_ids) && !byt_rt5651_gpios)
+ byt_rt5651_gpios = cht_rt5651_gpios;
+
+ if (byt_rt5651_gpios) {
+ devm_acpi_dev_add_driver_gpios(codec_dev, byt_rt5651_gpios);
priv->ext_amp_gpio = devm_fwnode_get_index_gpiod_from_child(
&pdev->dev, "ext-amp-enable", 0,
codec_dev->fwnode,
@@ -1002,6 +1005,25 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
return ret_val;
}
}
+ priv->hp_detect = devm_fwnode_get_index_gpiod_from_child(
+ &pdev->dev, "hp-detect", 0,
+ codec_dev->fwnode,
+ GPIOD_IN, "hp-detect");
+ if (IS_ERR(priv->hp_detect)) {
+ ret_val = PTR_ERR(priv->hp_detect);
+ switch (ret_val) {
+ case -ENOENT:
+ priv->hp_detect = NULL;
+ break;
+ default:
+ dev_err(&pdev->dev, "Failed to get hp-detect GPIO: %d\n",
+ ret_val);
+ /* fall through */
+ case -EPROBE_DEFER:
+ put_device(codec_dev);
+ return ret_val;
+ }
+ }
}
put_device(codec_dev);
@@ -1060,6 +1082,14 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
mic_name[BYT_RT5651_MAP(byt_rt5651_quirk)], hp_swapped);
byt_rt5651_card.long_name = byt_rt5651_long_name;
+ /* override plaform name, if required */
+ platform_name = mach->mach_params.platform;
+
+ ret_val = snd_soc_fixup_dai_links_platform_name(&byt_rt5651_card,
+ platform_name);
+ if (ret_val)
+ return ret_val;
+
ret_val = devm_snd_soc_register_card(&pdev->dev, &byt_rt5651_card);
if (ret_val) {
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 08a5152e635a..3263b0495853 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -28,6 +28,7 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include <sound/jack.h>
#include "../../codecs/max98090.h"
#include "../atom/sst-atom-controls.h"
@@ -420,6 +421,8 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
int ret_val = 0;
struct cht_mc_private *drv;
const char *mclk_name;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
int quirks = 0;
dmi_id = dmi_first_match(cht_max98090_quirk_table);
@@ -442,6 +445,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
dev_dbg(dev, "Unable to add GPIO mapping table\n");
}
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ platform_name = mach->mach_params.platform;
+
+ ret_val = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cht,
+ platform_name);
+ if (ret_val)
+ return ret_val;
+
/* register the soc card */
snd_soc_card_cht.dev = &pdev->dev;
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
diff --git a/sound/soc/intel/boards/cht_bsw_nau8824.c b/sound/soc/intel/boards/cht_bsw_nau8824.c
index 30c46977d53c..02c2fa239331 100644
--- a/sound/soc/intel/boards/cht_bsw_nau8824.c
+++ b/sound/soc/intel/boards/cht_bsw_nau8824.c
@@ -25,6 +25,7 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include <sound/jack.h>
#include <linux/input.h>
#include "../atom/sst-atom-controls.h"
@@ -246,6 +247,8 @@ static struct snd_soc_card snd_soc_card_cht = {
static int snd_cht_mc_probe(struct platform_device *pdev)
{
struct cht_mc_private *drv;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
int ret_val;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
@@ -253,6 +256,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
return -ENOMEM;
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ platform_name = mach->mach_params.platform;
+
+ ret_val = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cht,
+ platform_name);
+ if (ret_val)
+ return ret_val;
+
/* register the soc card */
snd_soc_card_cht.dev = &pdev->dev;
ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 250a356a0cbf..cbc2d458483f 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -530,6 +530,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = snd_soc_cards[0].soc_card;
struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
struct cht_mc_private *drv;
const char *i2c_name = NULL;
bool found = false;
@@ -663,6 +664,14 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
cht_rt5645_cpu_dai_name;
}
+ /* override plaform name, if required */
+ platform_name = mach->mach_params.platform;
+
+ ret_val = snd_soc_fixup_dai_links_platform_name(card,
+ platform_name);
+ if (ret_val)
+ return ret_val;
+
drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
if (IS_ERR(drv->mclk)) {
dev_err(&pdev->dev,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 9de64f447e7b..3d5a2b3a06f0 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -400,6 +400,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
int ret_val = 0;
struct cht_mc_private *drv;
struct snd_soc_acpi_mach *mach = pdev->dev.platform_data;
+ const char *platform_name;
const char *i2c_name;
int i;
@@ -410,22 +411,27 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
strcpy(drv->codec_name, RT5672_I2C_DEFAULT);
/* fixup codec name based on HID */
- if (mach) {
- i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
- if (i2c_name) {
- snprintf(drv->codec_name, sizeof(drv->codec_name),
- "i2c-%s", i2c_name);
- for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
- if (!strcmp(cht_dailink[i].codec_name,
- RT5672_I2C_DEFAULT)) {
- cht_dailink[i].codec_name =
- drv->codec_name;
- break;
- }
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
+ if (i2c_name) {
+ snprintf(drv->codec_name, sizeof(drv->codec_name),
+ "i2c-%s", i2c_name);
+ for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
+ if (!strcmp(cht_dailink[i].codec_name,
+ RT5672_I2C_DEFAULT)) {
+ cht_dailink[i].codec_name = drv->codec_name;
+ break;
}
}
}
+ /* override plaform name, if required */
+ platform_name = mach->mach_params.platform;
+
+ ret_val = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cht,
+ platform_name);
+ if (ret_val)
+ return ret_val;
+
drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
if (IS_ERR(drv->mclk)) {
dev_err(&pdev->dev,
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index c74c4f17316f..d17126f7757c 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -16,6 +16,7 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../skylake/skl.h"
#include "../../codecs/rt5682.h"
#include "../../codecs/hdac_hdmi.h"
@@ -55,39 +56,6 @@ enum {
GLK_DPCM_AUDIO_HDMI3_PB,
};
-static int platform_clock_control(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_dapm_context *dapm = w->dapm;
- struct snd_soc_card *card = dapm->card;
- struct snd_soc_dai *codec_dai;
- int ret = 0;
-
- codec_dai = snd_soc_card_get_codec_dai(card, GLK_REALTEK_CODEC_DAI);
- if (!codec_dai) {
- dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
- return -EIO;
- }
-
- if (SND_SOC_DAPM_EVENT_OFF(event)) {
- ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0);
- if (ret)
- dev_err(card->dev, "failed to stop sysclk: %d\n", ret);
- } else if (SND_SOC_DAPM_EVENT_ON(event)) {
- ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
- GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
- if (ret < 0) {
- dev_err(card->dev, "can't set codec pll: %d\n", ret);
- return ret;
- }
- }
-
- if (ret)
- dev_err(card->dev, "failed to start internal clk: %d\n", ret);
-
- return ret;
-}
-
static const struct snd_kcontrol_new geminilake_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -102,14 +70,10 @@ static const struct snd_soc_dapm_widget geminilake_widgets[] = {
SND_SOC_DAPM_SPK("HDMI1", NULL),
SND_SOC_DAPM_SPK("HDMI2", NULL),
SND_SOC_DAPM_SPK("HDMI3", NULL),
- SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
- platform_clock_control, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route geminilake_map[] = {
/* HP jack connectors - unknown if we have jack detection */
- { "Headphone Jack", NULL, "Platform Clock" },
{ "Headphone Jack", NULL, "HPOL" },
{ "Headphone Jack", NULL, "HPOR" },
@@ -117,7 +81,6 @@ static const struct snd_soc_dapm_route geminilake_map[] = {
{ "Spk", NULL, "Speaker" },
/* other jacks */
- { "Headset Mic", NULL, "Platform Clock" },
{ "IN1P", NULL, "Headset Mic" },
/* digital mics */
@@ -164,7 +127,7 @@ static int geminilake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
/* set SSP to 24 bit */
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
return 0;
}
@@ -177,6 +140,13 @@ static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_jack *jack;
int ret;
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
+ GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
+ return ret;
+ }
+
/* Configure sysclk for codec */
ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1,
RT5682_PLL_FREQ, SND_SOC_CLOCK_IN);
@@ -602,6 +572,10 @@ static struct snd_soc_card glk_audio_card_rt5682_m98357a = {
static int geminilake_audio_probe(struct platform_device *pdev)
{
struct glk_card_private *ctx;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
+ struct snd_soc_card *card;
+ int ret;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -609,11 +583,19 @@ static int geminilake_audio_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
- glk_audio_card_rt5682_m98357a.dev = &pdev->dev;
- snd_soc_card_set_drvdata(&glk_audio_card_rt5682_m98357a, ctx);
+ card = &glk_audio_card_rt5682_m98357a;
+ card->dev = &pdev->dev;
+ snd_soc_card_set_drvdata(card, ctx);
+
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ platform_name = mach->mach_params.platform;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, platform_name);
+ if (ret)
+ return ret;
- return devm_snd_soc_register_card(&pdev->dev,
- &glk_audio_card_rt5682_m98357a);
+ return devm_snd_soc_register_card(&pdev->dev, card);
}
static const struct platform_device_id glk_board_ids[] = {
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index eab1f439dd3f..971226d42042 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -19,6 +19,7 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include <sound/pcm_params.h>
#include "../common/sst-dsp.h"
@@ -146,7 +147,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
- .dynamic = 0,
+ .dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -189,8 +190,22 @@ static struct snd_soc_card haswell_rt5640 = {
static int haswell_audio_probe(struct platform_device *pdev)
{
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name = NULL;
+ int ret;
+
haswell_rt5640.dev = &pdev->dev;
+ /* override plaform name, if required */
+ mach = (&pdev->dev)->platform_data;
+ if (mach) /* extra check since legacy does not pass parameters */
+ platform_name = mach->mach_params.platform;
+
+ ret = snd_soc_fixup_dai_links_platform_name(&haswell_rt5640,
+ platform_name);
+ if (ret)
+ return ret;
+
return devm_snd_soc_register_card(&pdev->dev, &haswell_rt5640);
}
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
index 723a4935ed76..2768a572d065 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -2,7 +2,7 @@
// Copyright(c) 2018 Intel Corporation.
/*
- * Intel Kabylake I2S Machine Driver with MAX98927 & DA7219 Codecs
+ * Intel Kabylake I2S Machine Driver with MAX98927, MAX98373 & DA7219 Codecs
*
* Modified from:
* Intel Kabylake I2S Machine driver supporting MAX98927 and
@@ -24,8 +24,14 @@
#define KBL_DIALOG_CODEC_DAI "da7219-hifi"
#define MAX98927_CODEC_DAI "max98927-aif1"
-#define MAXIM_DEV0_NAME "i2c-MX98927:00"
-#define MAXIM_DEV1_NAME "i2c-MX98927:01"
+#define MAX98927_DEV0_NAME "i2c-MX98927:00"
+#define MAX98927_DEV1_NAME "i2c-MX98927:01"
+
+#define MAX98373_CODEC_DAI "max98373-aif1"
+#define MAX98373_DEV0_NAME "i2c-MX98373:00"
+#define MAX98373_DEV1_NAME "i2c-MX98373:01"
+
+
#define DUAL_CHANNEL 2
#define QUAD_CHANNEL 4
#define NAME_SIZE 32
@@ -176,20 +182,38 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
for (j = 0; j < runtime->num_codecs; j++) {
struct snd_soc_dai *codec_dai = runtime->codec_dais[j];
- if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
+ if (!strcmp(codec_dai->component->name, MAX98927_DEV0_NAME)) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
if (ret < 0) {
dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret);
return ret;
}
}
- if (!strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) {
+ if (!strcmp(codec_dai->component->name, MAX98927_DEV1_NAME)) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
if (ret < 0) {
dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret);
return ret;
}
}
+ if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x03, 3, 8, 24);
+ if (ret < 0) {
+ dev_err(runtime->dev,
+ "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x0C, 3, 8, 24);
+ if (ret < 0) {
+ dev_err(runtime->dev,
+ "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
}
return 0;
@@ -213,6 +237,25 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
/*
+ * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
+ * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
+ * Skipping the port wise FE and BE configuration for kblda7219m98373 &
+ * kblmax98373 as the topology (FE & BE) supports S24_LE only.
+ */
+
+ if (!strcmp(rtd->card->name, "kblda7219m98373") ||
+ !strcmp(rtd->card->name, "kblmax98373")) {
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = DUAL_CHANNEL;
+
+ /* set SSP to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+ }
+
+ /*
* The ADSP will convert the FE rate to 48k, stereo, 24 bit
*/
if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
@@ -221,7 +264,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
}
/*
@@ -229,7 +272,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
* thus changing the mask here
*/
if (!strcmp(be_dai_link->name, "SSP0-Codec"))
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
@@ -352,20 +395,31 @@ static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
static int kbl_fe_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_rt = substream->private_data;
/*
* On this platform for PCM device we support,
* 48Khz
* stereo
- * 16 bit audio
*/
runtime->hw.channels_max = DUAL_CHANNEL;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
&constraints_channels);
-
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+ /*
+ * Setup S24_LE (32 bit container and 24 bit valid data) for
+ * kblda7219m98373 & kblmax98373. For kblda7219m98927 &
+ * kblmax98927 keeping it as 16/16 due to topology FW dependency.
+ */
+ if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
+ !strcmp(soc_rt->card->name, "kblmax98373")) {
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
+
+ } else {
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+ }
snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
@@ -398,11 +452,23 @@ static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_rt = substream->private_data;
runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
&constraints_channels_quad);
+ /*
+ * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE.
+ * The DMIC also configured for S24_LE. Forcing the DMIC format to
+ * S24_LE due to the topology FW dependency.
+ */
+ if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
+ !strcmp(soc_rt->card->name, "kblmax98373")) {
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
+ }
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
@@ -448,29 +514,55 @@ static struct snd_soc_ops skylake_refcap_ops = {
static struct snd_soc_codec_conf max98927_codec_conf[] = {
{
- .dev_name = MAXIM_DEV0_NAME,
+ .dev_name = MAX98927_DEV0_NAME,
+ .name_prefix = "Right",
+ },
+
+ {
+ .dev_name = MAX98927_DEV1_NAME,
+ .name_prefix = "Left",
+ },
+};
+
+static struct snd_soc_codec_conf max98373_codec_conf[] = {
+
+ {
+ .dev_name = MAX98373_DEV0_NAME,
.name_prefix = "Right",
},
{
- .dev_name = MAXIM_DEV1_NAME,
+ .dev_name = MAX98373_DEV1_NAME,
.name_prefix = "Left",
},
};
-static struct snd_soc_dai_link_component ssp0_codec_components[] = {
+static struct snd_soc_dai_link_component max98927_ssp0_codec_components[] = {
{ /* Left */
- .name = MAXIM_DEV0_NAME,
+ .name = MAX98927_DEV0_NAME,
.dai_name = MAX98927_CODEC_DAI,
},
{ /* For Right */
- .name = MAXIM_DEV1_NAME,
+ .name = MAX98927_DEV1_NAME,
.dai_name = MAX98927_CODEC_DAI,
},
};
+static struct snd_soc_dai_link_component max98373_ssp0_codec_components[] = {
+ { /* Left */
+ .name = MAX98373_DEV0_NAME,
+ .dai_name = MAX98373_CODEC_DAI,
+ },
+
+ { /* For Right */
+ .name = MAX98373_DEV1_NAME,
+ .dai_name = MAX98373_CODEC_DAI,
+ },
+
+};
+
/* kabylake digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link kabylake_dais[] = {
/* Front End DAI links */
@@ -607,8 +699,8 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.cpu_dai_name = "SSP0 Pin",
.platform_name = "0000:00:1f.3",
.no_pcm = 1,
- .codecs = ssp0_codec_components,
- .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .codecs = max98927_ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(max98927_ssp0_codec_components),
.dai_fmt = SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
@@ -683,7 +775,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
};
/* kabylake digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link kabylake_max98927_dais[] = {
+static struct snd_soc_dai_link kabylake_max98_927_373_dais[] = {
/* Front End DAI links */
[KBL_DPCM_AUDIO_PB] = {
.name = "Kbl Audio Port",
@@ -802,8 +894,8 @@ static struct snd_soc_dai_link kabylake_max98927_dais[] = {
.cpu_dai_name = "SSP0 Pin",
.platform_name = "0000:00:1f.3",
.no_pcm = 1,
- .codecs = ssp0_codec_components,
- .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .codecs = max98927_ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(max98927_ssp0_codec_components),
.dai_fmt = SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
@@ -917,8 +1009,8 @@ static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
static struct snd_soc_card kbl_audio_card_max98927 = {
.name = "kblmax98927",
.owner = THIS_MODULE,
- .dai_link = kabylake_max98927_dais,
- .num_links = ARRAY_SIZE(kabylake_max98927_dais),
+ .dai_link = kabylake_max98_927_373_dais,
+ .num_links = ARRAY_SIZE(kabylake_max98_927_373_dais),
.controls = kabylake_controls,
.num_controls = ARRAY_SIZE(kabylake_controls),
.dapm_widgets = kabylake_widgets,
@@ -931,9 +1023,46 @@ static struct snd_soc_card kbl_audio_card_max98927 = {
.late_probe = kabylake_card_late_probe,
};
+static struct snd_soc_card kbl_audio_card_da7219_m98373 = {
+ .name = "kblda7219m98373",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_dais,
+ .num_links = ARRAY_SIZE(kabylake_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98373_codec_conf,
+ .num_configs = ARRAY_SIZE(max98373_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static struct snd_soc_card kbl_audio_card_max98373 = {
+ .name = "kblmax98373",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_max98_927_373_dais,
+ .num_links = ARRAY_SIZE(kabylake_max98_927_373_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98373_codec_conf,
+ .num_configs = ARRAY_SIZE(max98373_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
static int kabylake_audio_probe(struct platform_device *pdev)
{
struct kbl_codec_private *ctx;
+ struct snd_soc_dai_link *kbl_dai_link;
+ struct snd_soc_dai_link_component **codecs;
+ int i = 0;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -944,6 +1073,22 @@ static int kabylake_audio_probe(struct platform_device *pdev)
kabylake_audio_card =
(struct snd_soc_card *)pdev->id_entry->driver_data;
+ kbl_dai_link = kabylake_audio_card->dai_link;
+
+ /* Update codecs for SSP0 with max98373 codec info */
+ if (!strcmp(pdev->name, "kbl_da7219_max98373") ||
+ (!strcmp(pdev->name, "kbl_max98373"))) {
+ for (i = 0; i < kabylake_audio_card->num_links; ++i) {
+ if (strcmp(kbl_dai_link[i].name, "SSP0-Codec"))
+ continue;
+
+ codecs = &(kbl_dai_link[i].codecs);
+ *codecs = max98373_ssp0_codec_components;
+ kbl_dai_link[i].num_codecs =
+ ARRAY_SIZE(max98373_ssp0_codec_components);
+ break;
+ }
+ }
kabylake_audio_card->dev = &pdev->dev;
snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
@@ -961,13 +1106,23 @@ static const struct platform_device_id kbl_board_ids[] = {
.driver_data =
(kernel_ulong_t)&kbl_audio_card_max98927,
},
+ {
+ .name = "kbl_da7219_max98373",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_da7219_m98373,
+ },
+ {
+ .name = "kbl_max98373",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_max98373,
+ },
{ }
};
static struct platform_driver kabylake_audio = {
.probe = kabylake_audio_probe,
.driver = {
- .name = "kbl_da7219_max98927",
+ .name = "kbl_da7219_max98_927_373",
.pm = &snd_soc_pm_ops,
},
.id_table = kbl_board_ids,
@@ -976,8 +1131,10 @@ static struct platform_driver kabylake_audio = {
module_platform_driver(kabylake_audio)
/* Module information */
-MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927 & DA7219");
+MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927/MAX98373 & DA7219");
MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:kbl_da7219_max98927");
MODULE_ALIAS("platform:kbl_max98927");
+MODULE_ALIAS("platform:kbl_da7219_max98373");
+MODULE_ALIAS("platform:kbl_max98373");
diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
index 61dedc103b19..229e39586868 100644
--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
@@ -51,9 +51,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
.id = "INT343A",
.drv_name = "bxt_alc298s_i2s",
.fw_filename = "intel/dsp_fw_bxtn.bin",
- .sof_fw_filename = "intel/sof-apl.ri",
- .sof_tplg_filename = "intel/sof-apl-rt298.tplg",
- .asoc_plat_name = "0000:00:0e.0",
+ .sof_fw_filename = "sof-apl.ri",
+ .sof_tplg_filename = "sof-apl-rt298.tplg",
},
{
.id = "DLGS7219",
@@ -61,31 +60,27 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
.fw_filename = "intel/dsp_fw_bxtn.bin",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &bxt_codecs,
- .sof_fw_filename = "intel/sof-apl.ri",
- .sof_tplg_filename = "intel/sof-apl-da7219.tplg",
- .asoc_plat_name = "0000:00:0e.0",
+ .sof_fw_filename = "sof-apl.ri",
+ .sof_tplg_filename = "sof-apl-da7219.tplg",
},
{
.id = "104C5122",
.drv_name = "bxt-pcm512x",
- .sof_fw_filename = "intel/sof-apl.ri",
- .sof_tplg_filename = "intel/sof-apl-pcm512x.tplg",
- .asoc_plat_name = "0000:00:0e.0",
+ .sof_fw_filename = "sof-apl.ri",
+ .sof_tplg_filename = "sof-apl-pcm512x.tplg",
},
{
.id = "1AEC8804",
.drv_name = "bxt-wm8804",
- .sof_fw_filename = "intel/sof-apl.ri",
- .sof_tplg_filename = "intel/sof-apl-wm8804.tplg",
- .asoc_plat_name = "0000:00:0e.0",
+ .sof_fw_filename = "sof-apl.ri",
+ .sof_tplg_filename = "sof-apl-wm8804.tplg",
},
{
.id = "INT34C3",
.drv_name = "bxt_tdf8532",
.machine_quirk = apl_quirk,
- .sof_fw_filename = "intel/sof-apl.ri",
- .sof_tplg_filename = "intel/sof-apl-tdf8532.tplg",
- .asoc_plat_name = "0000:00:0e.0",
+ .sof_fw_filename = "sof-apl.ri",
+ .sof_tplg_filename = "sof-apl-tdf8532.tplg",
},
{},
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
index 097dc06377ba..fe812a909db4 100644
--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
@@ -21,6 +21,7 @@
static unsigned long byt_machine_id;
#define BYT_THINKPAD_10 1
+#define BYT_POV_P1006W 2
static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
{
@@ -28,6 +29,11 @@ static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
return 1;
}
+static int byt_pov_p1006w_quirk_cb(const struct dmi_system_id *id)
+{
+ byt_machine_id = BYT_POV_P1006W;
+ return 1;
+}
static const struct dmi_system_id byt_table[] = {
{
@@ -58,6 +64,17 @@ static const struct dmi_system_id byt_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
},
},
+ {
+ /* Point of View mobii wintab p1006w (v1.0) */
+ .callback = byt_pov_p1006w_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Note 105b is Foxcon's USB/PCI vendor id */
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
+ },
+ },
{ }
};
@@ -66,9 +83,17 @@ static struct snd_soc_acpi_mach byt_thinkpad_10 = {
.drv_name = "cht-bsw-rt5672",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-rt5670.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5670.tplg",
+};
+
+static struct snd_soc_acpi_mach byt_pov_p1006w = {
+ .id = "10EC5640",
+ .drv_name = "bytcr_rt5651",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5651",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5651.tplg",
};
static struct snd_soc_acpi_mach *byt_quirk(void *arg)
@@ -77,10 +102,14 @@ static struct snd_soc_acpi_mach *byt_quirk(void *arg)
dmi_check_system(byt_table);
- if (byt_machine_id == BYT_THINKPAD_10)
+ switch (byt_machine_id) {
+ case BYT_THINKPAD_10:
return &byt_thinkpad_10;
- else
+ case BYT_POV_P1006W:
+ return &byt_pov_p1006w;
+ default:
return mach;
+ }
}
struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[] = {
@@ -105,54 +134,56 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5640",
.machine_quirk = byt_quirk,
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-rt5640.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5640.tplg",
},
{
.id = "10EC5642",
.drv_name = "bytcr_rt5640",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5640",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-rt5640.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5640.tplg",
},
{
.id = "INTCCFFD",
.drv_name = "bytcr_rt5640",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5640",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-rt5640.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5640.tplg",
},
{
.id = "10EC5651",
.drv_name = "bytcr_rt5651",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcr_rt5651",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-rt5651.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5651.tplg",
},
{
.id = "DLGS7212",
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-da7213.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-da7213.tplg",
},
{
.id = "DLGS7213",
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-da7213.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-da7213.tplg",
+ },
+ {
+ .id = "ESSX8316",
+ .drv_name = "bytcht_es8316",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcht_es8316",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-es8316.tplg",
},
/* some Baytrail platforms rely on RT5645, use CHT machine driver */
{
@@ -160,18 +191,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-rt5645.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5645.tplg",
},
{
.id = "10EC5648",
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-rt5645.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-rt5645.tplg",
},
/* use CHT driver to Baytrail Chromebooks */
{
@@ -179,9 +208,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
.drv_name = "cht-bsw-max98090",
.fw_filename = "intel/fw_sst_0f28.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-byt.ri",
- .sof_tplg_filename = "intel/sof-byt-max98090.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-byt.ri",
+ .sof_tplg_filename = "sof-byt-max98090.tplg",
},
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
/*
diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
index 91bb99b69601..deafd87cc764 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
@@ -44,9 +44,8 @@ static struct snd_soc_acpi_mach cht_surface_mach = {
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5645.tplg",
};
static struct snd_soc_acpi_mach *cht_quirk(void *arg)
@@ -68,90 +67,80 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "cht-bsw-rt5672",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5670.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5670.tplg",
},
{
.id = "10EC5672",
.drv_name = "cht-bsw-rt5672",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5670.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5670.tplg",
},
{
.id = "10EC5645",
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5645.tplg",
},
{
.id = "10EC5650",
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5645.tplg",
},
{
.id = "10EC3270",
.drv_name = "cht-bsw-rt5645",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5645.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5645.tplg",
},
{
.id = "193C9890",
.drv_name = "cht-bsw-max98090",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-max98090.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-max98090.tplg",
},
{
.id = "10508824",
.drv_name = "cht-bsw-nau8824",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "cht-bsw",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-nau8824.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-nau8824.tplg",
},
{
.id = "DLGS7212",
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-da7213.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-da7213.tplg",
},
{
.id = "DLGS7213",
.drv_name = "bytcht_da7213",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcht_da7213",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-da7213.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-da7213.tplg",
},
{
.id = "ESSX8316",
.drv_name = "bytcht_es8316",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcht_es8316",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-es8316.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-es8316.tplg",
},
/* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
{
@@ -160,18 +149,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcr_rt5640",
.machine_quirk = cht_quirk,
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5640.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5640.tplg",
},
{
.id = "10EC3276",
.drv_name = "bytcr_rt5640",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcr_rt5640",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5640.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5640.tplg",
},
/* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
{
@@ -179,9 +166,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.drv_name = "bytcr_rt5651",
.fw_filename = "intel/fw_sst_22a8.bin",
.board = "bytcr_rt5651",
- .sof_fw_filename = "intel/sof-cht.ri",
- .sof_tplg_filename = "intel/sof-cht-rt5651.tplg",
- .asoc_plat_name = "sst-mfld-platform",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-rt5651.tplg",
},
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
/*
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
index ec8e28e7b937..a914dd238d0a 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -20,9 +20,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
.drv_name = "cnl_rt274",
.fw_filename = "intel/dsp_fw_cnl.bin",
.pdata = &cnl_pdata,
- .sof_fw_filename = "intel/sof-cnl.ri",
- .sof_tplg_filename = "intel/sof-cnl-rt274.tplg",
- .asoc_plat_name = "0000:00:1f.3",
+ .sof_fw_filename = "sof-cnl.ri",
+ .sof_tplg_filename = "sof-cnl-rt274.tplg",
},
{},
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
index 305875af71ca..3f2061475ae4 100644
--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
@@ -19,9 +19,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
.id = "INT343A",
.drv_name = "glk_alc298s_i2s",
.fw_filename = "intel/dsp_fw_glk.bin",
- .sof_fw_filename = "intel/sof-glk.ri",
- .sof_tplg_filename = "intel/sof-glk-alc298.tplg",
- .asoc_plat_name = "0000:00:0e.0",
+ .sof_fw_filename = "sof-glk.ri",
+ .sof_tplg_filename = "sof-glk-alc298.tplg",
},
{
.id = "DLGS7219",
@@ -29,9 +28,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
.fw_filename = "intel/dsp_fw_glk.bin",
.machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &glk_codecs,
- .sof_fw_filename = "intel/sof-glk.ri",
- .sof_tplg_filename = "intel/sof-glk-da7219.tplg",
- .asoc_plat_name = "0000:00:0e.0",
+ .sof_fw_filename = "sof-glk.ri",
+ .sof_tplg_filename = "sof-glk-da7219.tplg",
},
{},
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
index 533c1064f84b..68ae43f7b4b2 100644
--- a/sound/soc/intel/common/soc-acpi-intel-hda-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
@@ -23,7 +23,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[] = {
/* .sof_fw_filename is dynamically set in sof/intel driver */
- .sof_tplg_filename = "intel/sof-hda-generic.tplg",
+ .sof_tplg_filename = "sof-hda-generic.tplg",
/*
* .machine_quirk and .quirk_data are not used here but
diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
index 494a0ea9b029..690b305a255b 100644
--- a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
@@ -23,9 +23,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[] = {
.id = "INT33CA",
.drv_name = "haswell-audio",
.fw_filename = "intel/IntcSST1.bin",
- .sof_fw_filename = "intel/sof-hsw.ri",
- .sof_tplg_filename = "intel/sof-hsw.tplg",
- .asoc_plat_name = "haswell-pcm-audio",
+ .sof_fw_filename = "sof-hsw.ri",
+ .sof_tplg_filename = "sof-hsw.tplg",
},
{}
};
@@ -36,25 +35,22 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[] = {
.id = "INT343A",
.drv_name = "broadwell-audio",
.fw_filename = "intel/IntcSST2.bin",
- .sof_fw_filename = "intel/sof-bdw.ri",
- .sof_tplg_filename = "intel/sof-bdw-rt286.tplg",
- .asoc_plat_name = "haswell-pcm-audio",
+ .sof_fw_filename = "sof-bdw.ri",
+ .sof_tplg_filename = "sof-bdw-rt286.tplg",
},
{
.id = "RT5677CE",
.drv_name = "bdw-rt5677",
.fw_filename = "intel/IntcSST2.bin",
- .sof_fw_filename = "intel/sof-bdw.ri",
- .sof_tplg_filename = "intel/sof-bdw-rt5677.tplg",
- .asoc_plat_name = "haswell-pcm-audio",
+ .sof_fw_filename = "sof-bdw.ri",
+ .sof_tplg_filename = "sof-bdw-rt5677.tplg",
},
{
.id = "INT33CA",
.drv_name = "haswell-audio",
.fw_filename = "intel/IntcSST2.bin",
- .sof_fw_filename = "intel/sof-bdw.ri",
- .sof_tplg_filename = "intel/sof-bdw-rt5640.tplg",
- .asoc_plat_name = "haswell-pcm-audio",
+ .sof_fw_filename = "sof-bdw.ri",
+ .sof_tplg_filename = "sof-bdw-rt5640.tplg",
},
{}
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
index 33b441dca4d3..e5a6be5bc0ee 100644
--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -20,9 +20,8 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = {
.drv_name = "icl_rt274",
.fw_filename = "intel/dsp_fw_icl.bin",
.pdata = &icl_pdata,
- .sof_fw_filename = "intel/sof-icl.ri",
- .sof_tplg_filename = "intel/sof-icl-rt274.tplg",
- .asoc_plat_name = "0000:00:1f.3",
+ .sof_fw_filename = "sof-icl.ri",
+ .sof_tplg_filename = "sof-icl-rt274.tplg",
},
{},
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
index e6fa6f470526..4b331058e807 100644
--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -37,6 +37,11 @@ static struct snd_soc_acpi_codecs kbl_7219_98927_codecs = {
.codecs = {"MX98927"}
};
+static struct snd_soc_acpi_codecs kbl_7219_98373_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98373"}
+};
+
struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
{
.id = "INT343A",
@@ -106,6 +111,20 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
.drv_name = "kbl_rt5660",
.fw_filename = "intel/dsp_fw_kbl.bin",
},
+ {
+ .id = "DLGS7219",
+ .drv_name = "kbl_da7219_max98373",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_7219_98373_codecs,
+ .pdata = &skl_dmic_data
+ },
+ {
+ .id = "MX98373",
+ .drv_name = "kbl_max98373",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .pdata = &skl_dmic_data
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_kbl_machines);
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index d33bdaf92c57..31fcdf12c67d 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -1216,7 +1216,7 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
return ret;
}
- stream->commited = 1;
+ stream->commited = true;
trace_hsw_stream_alloc_reply(stream);
return 0;
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index fe2c826e710c..e023c4c3e5a9 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -544,7 +544,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
dev_err(rtd->dev, "error: invalid DAI ID %d\n",
rtd->cpu_dai->id);
return -EINVAL;
- };
+ }
ret = sst_hsw_stream_format(hsw, pcm_data->stream,
path_id, stream_type, SST_HSW_STREAM_FORMAT_PCM_FORMAT);
@@ -861,7 +861,7 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream)
dev_dbg(rtd->dev, "error: free stream failed %d\n", ret);
goto out;
}
- pcm_data->allocated = 0;
+ pcm_data->allocated = false;
pcm_data->stream = NULL;
out:
@@ -946,27 +946,21 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct sst_pdata *pdata = dev_get_platdata(component->dev);
struct hsw_priv_data *priv_data = dev_get_drvdata(component->dev);
struct device *dev = pdata->dma_dev;
- int ret = 0;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV_SG,
dev,
hsw_pcm_hardware.buffer_bytes_max,
hsw_pcm_hardware.buffer_bytes_max);
- if (ret) {
- dev_err(rtd->dev, "dma buffer allocation failed %d\n",
- ret);
- return ret;
- }
}
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_PLAYBACK].hsw_pcm = pcm;
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream)
priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_CAPTURE].hsw_pcm = pcm;
- return ret;
+ return 0;
}
#define HSW_FORMATS \
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index b0e6fb93eaf8..28c4806b196a 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -416,7 +416,7 @@ int skl_resume_dsp(struct skl *skl)
snd_hdac_ext_bus_ppcap_int_enable(bus, true);
/* check if DSP 1st boot is done */
- if (skl->skl_sst->is_first_boot == true)
+ if (skl->skl_sst->is_first_boot)
return 0;
/*
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 557f80c0bfe5..1ae83f4ccc36 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -1289,7 +1289,6 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct snd_pcm *pcm = rtd->pcm;
unsigned int size;
- int retval = 0;
struct skl *skl = bus_to_skl(bus);
if (dai->driver->playback.channels_min ||
@@ -1298,17 +1297,13 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
if (size > MAX_PREALLOC_SIZE)
size = MAX_PREALLOC_SIZE;
- retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(skl->pci),
size, MAX_PREALLOC_SIZE);
- if (retval) {
- dev_err(dai->dev, "dma buffer allocation fail\n");
- return retval;
- }
}
- return retval;
+ return 0;
}
static int skl_get_module_info(struct skl *skl, struct skl_module_cfg *mconfig)
@@ -1423,7 +1418,7 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
if (!ops)
return -EIO;
- if (skl->skl_sst->is_first_boot == false) {
+ if (!skl->skl_sst->is_first_boot) {
dev_err(component->dev, "DSP reports first boot done!!!\n");
return -EIO;
}
@@ -1464,6 +1459,7 @@ static const struct snd_soc_component_driver skl_component = {
.ops = &skl_platform_ops,
.pcm_new = skl_pcm_new,
.pcm_free = skl_pcm_free,
+ .ignore_module_refcount = 1, /* do not increase the refcount in core */
};
int skl_platform_register(struct device *dev)
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index cf8848b779dc..389f1862bc43 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -3103,7 +3103,7 @@ static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
ac->size = dfw_ac->max;
if (ac->max) {
- ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
+ ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL);
if (!ac->params)
return -ENOMEM;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 60c94836bf5b..4ed5b7e17d44 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -336,9 +336,6 @@ static int skl_suspend(struct device *dev)
skl->skl_sst->fw_loaded = false;
}
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
-
return 0;
}
@@ -350,10 +347,6 @@ static int skl_resume(struct device *dev)
struct hdac_ext_link *hlink = NULL;
int ret;
- /* Turned OFF in HDMI codec driver after codec reconfiguration */
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
-
/*
* resume only when we are not in suspend active, otherwise need to
* restore the device
@@ -446,8 +439,10 @@ static int skl_free(struct hdac_bus *bus)
snd_hdac_ext_bus_exit(bus);
cancel_work_sync(&skl->probe_work);
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
snd_hdac_i915_exit(bus);
+ }
return 0;
}
@@ -814,7 +809,7 @@ static void skl_probe_work(struct work_struct *work)
err = skl_platform_register(bus->dev);
if (err < 0) {
dev_err(bus->dev, "platform register failed: %d\n", err);
- return;
+ goto out_err;
}
err = skl_machine_device_register(skl);
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index e731d40afcce..b35410e4020e 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -105,3 +105,22 @@ config SND_SOC_MT8173_RT5650_RT5676
with the RT5650 and RT5676 codecs.
Select Y if you have such device.
If unsure select "N".
+
+config SND_SOC_MT8183
+ tristate "ASoC support for Mediatek MT8183 chip"
+ depends on ARCH_MEDIATEK
+ select SND_SOC_MEDIATEK
+ help
+ This adds ASoC platform driver support for Mediatek MT8183 chip
+ that can be used with other codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
+config SND_SOC_MTK_BTCVSD
+ tristate "ALSA BT SCO CVSD/MSBC Driver"
+ help
+ This is for software BTCVSD. This enable
+ the function for transferring/receiving
+ BT encoded data to/from BT firmware.
+ Select Y if you have such device.
+ If unsure select "N".
diff --git a/sound/soc/mediatek/Makefile b/sound/soc/mediatek/Makefile
index 3bb2c47532f4..76032cae6d51 100644
--- a/sound/soc/mediatek/Makefile
+++ b/sound/soc/mediatek/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_SND_SOC_MEDIATEK) += common/
obj-$(CONFIG_SND_SOC_MT2701) += mt2701/
obj-$(CONFIG_SND_SOC_MT6797) += mt6797/
obj-$(CONFIG_SND_SOC_MT8173) += mt8173/
+obj-$(CONFIG_SND_SOC_MT8183) += mt8183/
diff --git a/sound/soc/mediatek/common/Makefile b/sound/soc/mediatek/common/Makefile
index cdadabc5fd16..9ab90433a8d7 100644
--- a/sound/soc/mediatek/common/Makefile
+++ b/sound/soc/mediatek/common/Makefile
@@ -2,3 +2,5 @@
# platform driver
snd-soc-mtk-common-objs := mtk-afe-platform-driver.o mtk-afe-fe-dai.o
obj-$(CONFIG_SND_SOC_MEDIATEK) += snd-soc-mtk-common.o
+
+obj-$(CONFIG_SND_SOC_MTK_BTCVSD) += mtk-btcvsd.o \ No newline at end of file
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
index 697aa50aff9a..3ce527ce30ce 100644
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
@@ -126,9 +126,9 @@ int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
size = afe->mtk_afe_hardware->buffer_bytes_max;
- return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- afe->dev,
- size, size);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ afe->dev, size, size);
+ return 0;
}
EXPORT_SYMBOL_GPL(mtk_afe_pcm_new);
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
new file mode 100644
index 000000000000..1b8bcdaf02d1
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-btcvsd.c
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek ALSA BT SCO CVSD/MSBC Driver
+//
+// Copyright (c) 2019 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/sched/clock.h>
+
+#include <sound/soc.h>
+
+#define BTCVSD_SND_NAME "mtk-btcvsd-snd"
+
+#define BT_CVSD_TX_NREADY BIT(21)
+#define BT_CVSD_RX_READY BIT(22)
+#define BT_CVSD_TX_UNDERFLOW BIT(23)
+#define BT_CVSD_RX_OVERFLOW BIT(24)
+#define BT_CVSD_INTERRUPT BIT(31)
+
+#define BT_CVSD_CLEAR \
+ (BT_CVSD_TX_NREADY | BT_CVSD_RX_READY | BT_CVSD_TX_UNDERFLOW |\
+ BT_CVSD_RX_OVERFLOW | BT_CVSD_INTERRUPT)
+
+/* TX */
+#define SCO_TX_ENCODE_SIZE (60)
+/* 18 = 6 * 180 / SCO_TX_ENCODE_SIZE */
+#define SCO_TX_PACKER_BUF_NUM (18)
+
+/* RX */
+#define SCO_RX_PLC_SIZE (30)
+#define SCO_RX_PACKER_BUF_NUM (64)
+#define SCO_RX_PACKET_MASK (0x3F)
+
+#define SCO_CVSD_PACKET_VALID_SIZE 2
+
+#define SCO_PACKET_120 120
+#define SCO_PACKET_180 180
+
+#define BTCVSD_RX_PACKET_SIZE (SCO_RX_PLC_SIZE + SCO_CVSD_PACKET_VALID_SIZE)
+#define BTCVSD_TX_PACKET_SIZE (SCO_TX_ENCODE_SIZE)
+
+#define BTCVSD_RX_BUF_SIZE (BTCVSD_RX_PACKET_SIZE * SCO_RX_PACKER_BUF_NUM)
+#define BTCVSD_TX_BUF_SIZE (BTCVSD_TX_PACKET_SIZE * SCO_TX_PACKER_BUF_NUM)
+
+enum bt_sco_state {
+ BT_SCO_STATE_IDLE,
+ BT_SCO_STATE_RUNNING,
+ BT_SCO_STATE_ENDING,
+};
+
+enum bt_sco_direct {
+ BT_SCO_DIRECT_BT2ARM,
+ BT_SCO_DIRECT_ARM2BT,
+};
+
+enum bt_sco_packet_len {
+ BT_SCO_CVSD_30 = 0,
+ BT_SCO_CVSD_60,
+ BT_SCO_CVSD_90,
+ BT_SCO_CVSD_120,
+ BT_SCO_CVSD_10,
+ BT_SCO_CVSD_20,
+ BT_SCO_CVSD_MAX,
+};
+
+enum BT_SCO_BAND {
+ BT_SCO_NB,
+ BT_SCO_WB,
+};
+
+struct mtk_btcvsd_snd_hw_info {
+ unsigned int num_valid_addr;
+ unsigned long bt_sram_addr[20];
+ unsigned int packet_length;
+ unsigned int packet_num;
+};
+
+struct mtk_btcvsd_snd_stream {
+ struct snd_pcm_substream *substream;
+ int stream;
+
+ enum bt_sco_state state;
+
+ unsigned int packet_size;
+ unsigned int buf_size;
+ u8 temp_packet_buf[SCO_PACKET_180];
+
+ int packet_w;
+ int packet_r;
+ snd_pcm_uframes_t prev_frame;
+ int prev_packet_idx;
+
+ unsigned int xrun:1;
+ unsigned int timeout:1;
+ unsigned int mute:1;
+ unsigned int trigger_start:1;
+ unsigned int wait_flag:1;
+ unsigned int rw_cnt;
+
+ unsigned long long time_stamp;
+ unsigned long long buf_data_equivalent_time;
+
+ struct mtk_btcvsd_snd_hw_info buffer_info;
+};
+
+struct mtk_btcvsd_snd {
+ struct device *dev;
+ int irq_id;
+
+ struct regmap *infra;
+ void __iomem *bt_pkv_base;
+ void __iomem *bt_sram_bank2_base;
+
+ unsigned int infra_misc_offset;
+ unsigned int conn_bt_cvsd_mask;
+ unsigned int cvsd_mcu_read_offset;
+ unsigned int cvsd_mcu_write_offset;
+ unsigned int cvsd_packet_indicator;
+
+ u32 *bt_reg_pkt_r;
+ u32 *bt_reg_pkt_w;
+ u32 *bt_reg_ctl;
+
+ unsigned int irq_disabled:1;
+
+ spinlock_t tx_lock; /* spinlock for bt tx stream control */
+ spinlock_t rx_lock; /* spinlock for bt rx stream control */
+ wait_queue_head_t tx_wait;
+ wait_queue_head_t rx_wait;
+
+ struct mtk_btcvsd_snd_stream *tx;
+ struct mtk_btcvsd_snd_stream *rx;
+ u8 tx_packet_buf[BTCVSD_TX_BUF_SIZE];
+ u8 rx_packet_buf[BTCVSD_RX_BUF_SIZE];
+
+ enum BT_SCO_BAND band;
+};
+
+struct mtk_btcvsd_snd_time_buffer_info {
+ unsigned long long data_count_equi_time;
+ unsigned long long time_stamp_us;
+};
+
+static const unsigned int btsco_packet_valid_mask[BT_SCO_CVSD_MAX][6] = {
+ {0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3, 0x1 << 4, 0x1 << 5},
+ {0x1, 0x1, 0x2, 0x2, 0x4, 0x4},
+ {0x1, 0x1, 0x1, 0x2, 0x2, 0x2},
+ {0x1, 0x1, 0x1, 0x1, 0x0, 0x0},
+ {0x7, 0x7 << 3, 0x7 << 6, 0x7 << 9, 0x7 << 12, 0x7 << 15},
+ {0x3, 0x3 << 1, 0x3 << 3, 0x3 << 4, 0x3 << 6, 0x3 << 7},
+};
+
+static const unsigned int btsco_packet_info[BT_SCO_CVSD_MAX][4] = {
+ {30, 6, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+ SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+ {60, 3, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+ SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+ {90, 2, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+ SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+ {120, 1, SCO_PACKET_120 / SCO_TX_ENCODE_SIZE,
+ SCO_PACKET_120 / SCO_RX_PLC_SIZE},
+ {10, 18, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+ SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+ {20, 9, SCO_PACKET_180 / SCO_TX_ENCODE_SIZE,
+ SCO_PACKET_180 / SCO_RX_PLC_SIZE},
+};
+
+static const u8 table_msbc_silence[SCO_PACKET_180] = {
+ 0x01, 0x38, 0xad, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, 0x00,
+ 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d,
+ 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7,
+ 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd,
+ 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77,
+ 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6c, 0x00,
+ 0x01, 0xc8, 0xad, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, 0x00,
+ 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d,
+ 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7,
+ 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd,
+ 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77,
+ 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6c, 0x00,
+ 0x01, 0xf8, 0xad, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, 0x00,
+ 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d,
+ 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7,
+ 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77, 0x6d, 0xb6, 0xdd,
+ 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6d, 0xdd, 0xb6, 0xdb, 0x77,
+ 0x6d, 0xb6, 0xdd, 0xdb, 0x6d, 0xb7, 0x76, 0xdb, 0x6c, 0x00
+};
+
+static void mtk_btcvsd_snd_irq_enable(struct mtk_btcvsd_snd *bt)
+{
+ regmap_update_bits(bt->infra, bt->infra_misc_offset,
+ bt->conn_bt_cvsd_mask, bt->conn_bt_cvsd_mask);
+}
+
+static void mtk_btcvsd_snd_irq_disable(struct mtk_btcvsd_snd *bt)
+{
+ regmap_update_bits(bt->infra, bt->infra_misc_offset,
+ bt->conn_bt_cvsd_mask, 0);
+}
+
+static void mtk_btcvsd_snd_set_state(struct mtk_btcvsd_snd *bt,
+ struct mtk_btcvsd_snd_stream *bt_stream,
+ int state)
+{
+ dev_dbg(bt->dev, "%s(), stream %d, state %d, tx->state %d, rx->state %d, irq_disabled %d\n",
+ __func__,
+ bt_stream->stream, state,
+ bt->tx->state, bt->rx->state, bt->irq_disabled);
+
+ bt_stream->state = state;
+
+ if (bt->tx->state == BT_SCO_STATE_IDLE &&
+ bt->rx->state == BT_SCO_STATE_IDLE) {
+ if (!bt->irq_disabled) {
+ disable_irq(bt->irq_id);
+ mtk_btcvsd_snd_irq_disable(bt);
+ bt->irq_disabled = 1;
+ }
+ } else {
+ if (bt->irq_disabled) {
+ enable_irq(bt->irq_id);
+ mtk_btcvsd_snd_irq_enable(bt);
+ bt->irq_disabled = 0;
+ }
+ }
+}
+
+static int mtk_btcvsd_snd_tx_init(struct mtk_btcvsd_snd *bt)
+{
+ memset(bt->tx, 0, sizeof(*bt->tx));
+ memset(bt->tx_packet_buf, 0, sizeof(bt->tx_packet_buf));
+
+ bt->tx->packet_size = BTCVSD_TX_PACKET_SIZE;
+ bt->tx->buf_size = BTCVSD_TX_BUF_SIZE;
+ bt->tx->timeout = 0;
+ bt->tx->rw_cnt = 0;
+ bt->tx->stream = SNDRV_PCM_STREAM_PLAYBACK;
+ return 0;
+}
+
+static int mtk_btcvsd_snd_rx_init(struct mtk_btcvsd_snd *bt)
+{
+ memset(bt->rx, 0, sizeof(*bt->rx));
+ memset(bt->rx_packet_buf, 0, sizeof(bt->rx_packet_buf));
+
+ bt->rx->packet_size = BTCVSD_RX_PACKET_SIZE;
+ bt->rx->buf_size = BTCVSD_RX_BUF_SIZE;
+ bt->rx->timeout = 0;
+ bt->rx->rw_cnt = 0;
+ bt->rx->stream = SNDRV_PCM_STREAM_CAPTURE;
+ return 0;
+}
+
+static void get_tx_time_stamp(struct mtk_btcvsd_snd *bt,
+ struct mtk_btcvsd_snd_time_buffer_info *ts)
+{
+ ts->time_stamp_us = bt->tx->time_stamp;
+ ts->data_count_equi_time = bt->tx->buf_data_equivalent_time;
+}
+
+static void get_rx_time_stamp(struct mtk_btcvsd_snd *bt,
+ struct mtk_btcvsd_snd_time_buffer_info *ts)
+{
+ ts->time_stamp_us = bt->rx->time_stamp;
+ ts->data_count_equi_time = bt->rx->buf_data_equivalent_time;
+}
+
+static int btcvsd_bytes_to_frame(struct snd_pcm_substream *substream,
+ int bytes)
+{
+ int count = bytes;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ if (runtime->format == SNDRV_PCM_FORMAT_S32_LE ||
+ runtime->format == SNDRV_PCM_FORMAT_U32_LE)
+ count = count >> 2;
+ else
+ count = count >> 1;
+
+ count = count / runtime->channels;
+ return count;
+}
+
+static void mtk_btcvsd_snd_data_transfer(enum bt_sco_direct dir,
+ u8 *src, u8 *dst,
+ unsigned int blk_size,
+ unsigned int blk_num)
+{
+ unsigned int i, j;
+
+ if (blk_size == 60 || blk_size == 120 || blk_size == 20) {
+ u32 *src_32 = (u32 *)src;
+ u32 *dst_32 = (u32 *)dst;
+
+ for (i = 0; i < (blk_size * blk_num / 4); i++)
+ *dst_32++ = *src_32++;
+ } else {
+ u16 *src_16 = (u16 *)src;
+ u16 *dst_16 = (u16 *)dst;
+
+ for (j = 0; j < blk_num; j++) {
+ for (i = 0; i < (blk_size / 2); i++)
+ *dst_16++ = *src_16++;
+
+ if (dir == BT_SCO_DIRECT_BT2ARM)
+ src_16++;
+ else
+ dst_16++;
+ }
+ }
+}
+
+/* write encoded mute data to bt sram */
+static int btcvsd_tx_clean_buffer(struct mtk_btcvsd_snd *bt)
+{
+ unsigned int i;
+ unsigned int num_valid_addr;
+ unsigned long flags;
+ enum BT_SCO_BAND band = bt->band;
+
+ /* prepare encoded mute data */
+ if (band == BT_SCO_NB)
+ memset(bt->tx->temp_packet_buf, 170, SCO_PACKET_180);
+ else
+ memcpy(bt->tx->temp_packet_buf,
+ table_msbc_silence, SCO_PACKET_180);
+
+ /* write mute data to bt tx sram buffer */
+ spin_lock_irqsave(&bt->tx_lock, flags);
+ num_valid_addr = bt->tx->buffer_info.num_valid_addr;
+
+ dev_info(bt->dev, "%s(), band %d, num_valid_addr %u\n",
+ __func__, band, num_valid_addr);
+
+ for (i = 0; i < num_valid_addr; i++) {
+ void *dst;
+
+ dev_info(bt->dev, "%s(), clean addr 0x%lx\n", __func__,
+ bt->tx->buffer_info.bt_sram_addr[i]);
+
+ dst = (void *)bt->tx->buffer_info.bt_sram_addr[i];
+
+ mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+ bt->tx->temp_packet_buf, dst,
+ bt->tx->buffer_info.packet_length,
+ bt->tx->buffer_info.packet_num);
+ }
+ spin_unlock_irqrestore(&bt->tx_lock, flags);
+
+ return 0;
+}
+
+static int mtk_btcvsd_read_from_bt(struct mtk_btcvsd_snd *bt,
+ enum bt_sco_packet_len packet_type,
+ unsigned int packet_length,
+ unsigned int packet_num,
+ unsigned int blk_size,
+ unsigned int control)
+{
+ unsigned int i;
+ int pv;
+ u8 *src;
+ unsigned int packet_buf_ofs;
+ unsigned long flags;
+ unsigned long connsys_addr_rx, ap_addr_rx;
+
+ connsys_addr_rx = *bt->bt_reg_pkt_r;
+ ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
+ (connsys_addr_rx & 0xFFFF);
+
+ if (connsys_addr_rx == 0xdeadfeed) {
+ /* bt return 0xdeadfeed if read register during bt sleep */
+ dev_warn(bt->dev, "%s(), connsys_addr_rx == 0xdeadfeed",
+ __func__);
+ return -EIO;
+ }
+
+ src = (u8 *)ap_addr_rx;
+
+ mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_BT2ARM, src,
+ bt->rx->temp_packet_buf, packet_length,
+ packet_num);
+
+ spin_lock_irqsave(&bt->rx_lock, flags);
+ for (i = 0; i < blk_size; i++) {
+ packet_buf_ofs = (bt->rx->packet_w & SCO_RX_PACKET_MASK) *
+ bt->rx->packet_size;
+ memcpy(bt->rx_packet_buf + packet_buf_ofs,
+ bt->rx->temp_packet_buf + (SCO_RX_PLC_SIZE * i),
+ SCO_RX_PLC_SIZE);
+ if ((control & btsco_packet_valid_mask[packet_type][i]) ==
+ btsco_packet_valid_mask[packet_type][i])
+ pv = 1;
+ else
+ pv = 0;
+
+ packet_buf_ofs += SCO_RX_PLC_SIZE;
+ memcpy(bt->rx_packet_buf + packet_buf_ofs, (void *)&pv,
+ SCO_CVSD_PACKET_VALID_SIZE);
+ bt->rx->packet_w++;
+ }
+ spin_unlock_irqrestore(&bt->rx_lock, flags);
+ return 0;
+}
+
+int mtk_btcvsd_write_to_bt(struct mtk_btcvsd_snd *bt,
+ enum bt_sco_packet_len packet_type,
+ unsigned int packet_length,
+ unsigned int packet_num,
+ unsigned int blk_size)
+{
+ unsigned int i;
+ unsigned long flags;
+ u8 *dst;
+ unsigned long connsys_addr_tx, ap_addr_tx;
+ bool new_ap_addr_tx = true;
+
+ connsys_addr_tx = *bt->bt_reg_pkt_w;
+ ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
+ (connsys_addr_tx & 0xFFFF);
+
+ if (connsys_addr_tx == 0xdeadfeed) {
+ /* bt return 0xdeadfeed if read register during bt sleep */
+ dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
+ __func__);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&bt->tx_lock, flags);
+ for (i = 0; i < blk_size; i++) {
+ memcpy(bt->tx->temp_packet_buf + (bt->tx->packet_size * i),
+ (bt->tx_packet_buf +
+ (bt->tx->packet_r % SCO_TX_PACKER_BUF_NUM) *
+ bt->tx->packet_size),
+ bt->tx->packet_size);
+
+ bt->tx->packet_r++;
+ }
+ spin_unlock_irqrestore(&bt->tx_lock, flags);
+
+ dst = (u8 *)ap_addr_tx;
+
+ if (!bt->tx->mute) {
+ mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+ bt->tx->temp_packet_buf, dst,
+ packet_length, packet_num);
+ }
+
+ /* store bt tx buffer sram info */
+ bt->tx->buffer_info.packet_length = packet_length;
+ bt->tx->buffer_info.packet_num = packet_num;
+ for (i = 0; i < bt->tx->buffer_info.num_valid_addr; i++) {
+ if (bt->tx->buffer_info.bt_sram_addr[i] == ap_addr_tx) {
+ new_ap_addr_tx = false;
+ break;
+ }
+ }
+ if (new_ap_addr_tx) {
+ unsigned int next_idx;
+
+ spin_lock_irqsave(&bt->tx_lock, flags);
+ bt->tx->buffer_info.num_valid_addr++;
+ next_idx = bt->tx->buffer_info.num_valid_addr - 1;
+ bt->tx->buffer_info.bt_sram_addr[next_idx] = ap_addr_tx;
+ spin_unlock_irqrestore(&bt->tx_lock, flags);
+ dev_info(bt->dev, "%s(), new ap_addr_tx = 0x%lx, num_valid_addr %d\n",
+ __func__, ap_addr_tx,
+ bt->tx->buffer_info.num_valid_addr);
+ }
+
+ if (bt->tx->mute)
+ btcvsd_tx_clean_buffer(bt);
+
+ return 0;
+}
+
+static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
+{
+ struct mtk_btcvsd_snd *bt = dev;
+ unsigned int packet_type, packet_num, packet_length;
+ unsigned int buf_cnt_tx, buf_cnt_rx, control;
+
+ if (bt->rx->state != BT_SCO_STATE_RUNNING &&
+ bt->rx->state != BT_SCO_STATE_ENDING &&
+ bt->tx->state != BT_SCO_STATE_RUNNING &&
+ bt->tx->state != BT_SCO_STATE_ENDING) {
+ dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n",
+ __func__, bt->rx->state, bt->tx->state);
+ goto irq_handler_exit;
+ }
+
+ control = *bt->bt_reg_ctl;
+ packet_type = (control >> 18) & 0x7;
+
+ if (((control >> 31) & 1) == 0) {
+ dev_warn(bt->dev, "%s(), ((control >> 31) & 1) == 0, control 0x%x\n",
+ __func__, control);
+ goto irq_handler_exit;
+ }
+
+ if (packet_type >= BT_SCO_CVSD_MAX) {
+ dev_warn(bt->dev, "%s(), invalid packet_type %u, exit\n",
+ __func__, packet_type);
+ goto irq_handler_exit;
+ }
+
+ packet_length = btsco_packet_info[packet_type][0];
+ packet_num = btsco_packet_info[packet_type][1];
+ buf_cnt_tx = btsco_packet_info[packet_type][2];
+ buf_cnt_rx = btsco_packet_info[packet_type][3];
+
+ if (bt->rx->state == BT_SCO_STATE_RUNNING ||
+ bt->rx->state == BT_SCO_STATE_ENDING) {
+ if (bt->rx->xrun) {
+ if (bt->rx->packet_w - bt->rx->packet_r <=
+ SCO_RX_PACKER_BUF_NUM - 2 * buf_cnt_rx) {
+ /*
+ * free space is larger then
+ * twice interrupt rx data size
+ */
+ bt->rx->xrun = 0;
+ dev_warn(bt->dev, "%s(), rx->xrun 0!\n",
+ __func__);
+ }
+ }
+
+ if (!bt->rx->xrun &&
+ (bt->rx->packet_w - bt->rx->packet_r <=
+ SCO_RX_PACKER_BUF_NUM - buf_cnt_rx)) {
+ mtk_btcvsd_read_from_bt(bt,
+ packet_type,
+ packet_length,
+ packet_num,
+ buf_cnt_rx,
+ control);
+ bt->rx->rw_cnt++;
+ } else {
+ bt->rx->xrun = 1;
+ dev_warn(bt->dev, "%s(), rx->xrun 1\n", __func__);
+ }
+ }
+
+ /* tx */
+ bt->tx->timeout = 0;
+ if ((bt->tx->state == BT_SCO_STATE_RUNNING ||
+ bt->tx->state == BT_SCO_STATE_ENDING) &&
+ bt->tx->trigger_start) {
+ if (bt->tx->xrun) {
+ /* prepared data is larger then twice
+ * interrupt tx data size
+ */
+ if (bt->tx->packet_w - bt->tx->packet_r >=
+ 2 * buf_cnt_tx) {
+ bt->tx->xrun = 0;
+ dev_warn(bt->dev, "%s(), tx->xrun 0\n",
+ __func__);
+ }
+ }
+
+ if ((!bt->tx->xrun &&
+ (bt->tx->packet_w - bt->tx->packet_r >= buf_cnt_tx)) ||
+ bt->tx->state == BT_SCO_STATE_ENDING) {
+ mtk_btcvsd_write_to_bt(bt,
+ packet_type,
+ packet_length,
+ packet_num,
+ buf_cnt_tx);
+ bt->tx->rw_cnt++;
+ } else {
+ bt->tx->xrun = 1;
+ dev_warn(bt->dev, "%s(), tx->xrun 1\n", __func__);
+ }
+ }
+
+ *bt->bt_reg_ctl &= ~BT_CVSD_CLEAR;
+
+ if (bt->rx->state == BT_SCO_STATE_RUNNING ||
+ bt->rx->state == BT_SCO_STATE_ENDING) {
+ bt->rx->wait_flag = 1;
+ wake_up_interruptible(&bt->rx_wait);
+ snd_pcm_period_elapsed(bt->rx->substream);
+ }
+ if (bt->tx->state == BT_SCO_STATE_RUNNING ||
+ bt->tx->state == BT_SCO_STATE_ENDING) {
+ bt->tx->wait_flag = 1;
+ wake_up_interruptible(&bt->tx_wait);
+ snd_pcm_period_elapsed(bt->tx->substream);
+ }
+
+ return IRQ_HANDLED;
+irq_handler_exit:
+ *bt->bt_reg_ctl &= ~BT_CVSD_CLEAR;
+ return IRQ_HANDLED;
+}
+
+static int wait_for_bt_irq(struct mtk_btcvsd_snd *bt,
+ struct mtk_btcvsd_snd_stream *bt_stream)
+{
+ unsigned long long t1, t2;
+ /* one interrupt period = 22.5ms */
+ unsigned long long timeout_limit = 22500000;
+ int max_timeout_trial = 2;
+ int ret;
+
+ bt_stream->wait_flag = 0;
+
+ while (max_timeout_trial && !bt_stream->wait_flag) {
+ t1 = sched_clock();
+ if (bt_stream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = wait_event_interruptible_timeout(bt->tx_wait,
+ bt_stream->wait_flag,
+ nsecs_to_jiffies(timeout_limit));
+ } else {
+ ret = wait_event_interruptible_timeout(bt->rx_wait,
+ bt_stream->wait_flag,
+ nsecs_to_jiffies(timeout_limit));
+ }
+
+ t2 = sched_clock();
+ t2 = t2 - t1; /* in ns (10^9) */
+
+ if (t2 > timeout_limit) {
+ dev_warn(bt->dev, "%s(), stream %d, timeout %llu, limit %llu, ret %d, flag %d\n",
+ __func__, bt_stream->stream,
+ t2, timeout_limit, ret,
+ bt_stream->wait_flag);
+ }
+
+ if (ret < 0) {
+ /*
+ * error, -ERESTARTSYS if it was interrupted by
+ * a signal
+ */
+ dev_warn(bt->dev, "%s(), stream %d, error, trial left %d\n",
+ __func__,
+ bt_stream->stream, max_timeout_trial);
+
+ bt_stream->timeout = 1;
+ return ret;
+ } else if (ret == 0) {
+ /* conidtion is false after timeout */
+ max_timeout_trial--;
+ dev_warn(bt->dev, "%s(), stream %d, error, timeout, condition is false, trial left %d\n",
+ __func__,
+ bt_stream->stream, max_timeout_trial);
+
+ if (max_timeout_trial <= 0) {
+ bt_stream->timeout = 1;
+ return -ETIME;
+ }
+ }
+ }
+
+ return 0;
+}
+
+ssize_t mtk_btcvsd_snd_read(struct mtk_btcvsd_snd *bt,
+ char __user *buf,
+ size_t count)
+{
+ ssize_t read_size = 0, read_count = 0, cur_read_idx, cont;
+ unsigned int cur_buf_ofs = 0;
+ unsigned long avail;
+ unsigned long flags;
+ unsigned int packet_size = bt->rx->packet_size;
+
+ while (count) {
+ spin_lock_irqsave(&bt->rx_lock, flags);
+ /* available data in RX packet buffer */
+ avail = (bt->rx->packet_w - bt->rx->packet_r) * packet_size;
+
+ cur_read_idx = (bt->rx->packet_r & SCO_RX_PACKET_MASK) *
+ packet_size;
+ spin_unlock_irqrestore(&bt->rx_lock, flags);
+
+ if (!avail) {
+ int ret = wait_for_bt_irq(bt, bt->rx);
+
+ if (ret)
+ return read_count;
+
+ continue;
+ }
+
+ /* count must be multiple of packet_size */
+ if (count % packet_size != 0 ||
+ avail % packet_size != 0) {
+ dev_warn(bt->dev, "%s(), count %zu or d %lu is not multiple of packet_size %dd\n",
+ __func__, count, avail, packet_size);
+
+ count -= count % packet_size;
+ avail -= avail % packet_size;
+ }
+
+ if (count > avail)
+ read_size = avail;
+ else
+ read_size = count;
+
+ /* calculate continue space */
+ cont = bt->rx->buf_size - cur_read_idx;
+ if (read_size > cont)
+ read_size = cont;
+
+ if (copy_to_user(buf + cur_buf_ofs,
+ bt->rx_packet_buf + cur_read_idx,
+ read_size)) {
+ dev_warn(bt->dev, "%s(), copy_to_user fail\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&bt->rx_lock, flags);
+ bt->rx->packet_r += read_size / packet_size;
+ spin_unlock_irqrestore(&bt->rx_lock, flags);
+
+ read_count += read_size;
+ cur_buf_ofs += read_size;
+ count -= read_size;
+ }
+
+ /*
+ * save current timestamp & buffer time in times_tamp and
+ * buf_data_equivalent_time
+ */
+ bt->rx->time_stamp = sched_clock();
+ bt->rx->buf_data_equivalent_time =
+ (unsigned long long)(bt->rx->packet_w - bt->rx->packet_r) *
+ SCO_RX_PLC_SIZE * 16 * 1000 / 2 / 64;
+ bt->rx->buf_data_equivalent_time += read_count * SCO_RX_PLC_SIZE *
+ 16 * 1000 / packet_size / 2 / 64;
+ /* return equivalent time(us) to data count */
+ bt->rx->buf_data_equivalent_time *= 1000;
+
+ return read_count;
+}
+
+ssize_t mtk_btcvsd_snd_write(struct mtk_btcvsd_snd *bt,
+ char __user *buf,
+ size_t count)
+{
+ int written_size = count, avail = 0, cur_write_idx, write_size, cont;
+ unsigned int cur_buf_ofs = 0;
+ unsigned long flags;
+ unsigned int packet_size = bt->tx->packet_size;
+
+ /*
+ * save current timestamp & buffer time in time_stamp and
+ * buf_data_equivalent_time
+ */
+ bt->tx->time_stamp = sched_clock();
+ bt->tx->buf_data_equivalent_time =
+ (unsigned long long)(bt->tx->packet_w - bt->tx->packet_r) *
+ packet_size * 16 * 1000 / 2 / 64;
+
+ /* return equivalent time(us) to data count */
+ bt->tx->buf_data_equivalent_time *= 1000;
+
+ while (count) {
+ spin_lock_irqsave(&bt->tx_lock, flags);
+ /* free space of TX packet buffer */
+ avail = bt->tx->buf_size -
+ (bt->tx->packet_w - bt->tx->packet_r) * packet_size;
+
+ cur_write_idx = (bt->tx->packet_w % SCO_TX_PACKER_BUF_NUM) *
+ packet_size;
+ spin_unlock_irqrestore(&bt->tx_lock, flags);
+
+ if (!avail) {
+ int ret = wait_for_bt_irq(bt, bt->rx);
+
+ if (ret)
+ return written_size;
+
+ continue;
+ }
+
+ /* count must be multiple of bt->tx->packet_size */
+ if (count % packet_size != 0 ||
+ avail % packet_size != 0) {
+ dev_warn(bt->dev, "%s(), count %zu or avail %d is not multiple of packet_size %d\n",
+ __func__, count, avail, packet_size);
+ count -= count % packet_size;
+ avail -= avail % packet_size;
+ }
+
+ if (count > avail)
+ write_size = avail;
+ else
+ write_size = count;
+
+ /* calculate continue space */
+ cont = bt->tx->buf_size - cur_write_idx;
+ if (write_size > cont)
+ write_size = cont;
+
+ if (copy_from_user(bt->tx_packet_buf +
+ cur_write_idx,
+ buf + cur_buf_ofs,
+ write_size)) {
+ dev_warn(bt->dev, "%s(), copy_from_user fail\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&bt->tx_lock, flags);
+ bt->tx->packet_w += write_size / packet_size;
+ spin_unlock_irqrestore(&bt->tx_lock, flags);
+ cur_buf_ofs += write_size;
+ count -= write_size;
+ }
+
+ return written_size;
+}
+
+static struct mtk_btcvsd_snd_stream *get_bt_stream
+ (struct mtk_btcvsd_snd *bt, struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return bt->tx;
+ else
+ return bt->rx;
+}
+
+/* pcm ops */
+static const struct snd_pcm_hardware mtk_btcvsd_hardware = {
+ .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_RESUME),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .buffer_bytes_max = 24 * 1024,
+ .period_bytes_max = 24 * 1024,
+ .periods_min = 2,
+ .periods_max = 16,
+ .fifo_size = 0,
+};
+
+static int mtk_pcm_btcvsd_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+ int ret;
+
+ dev_dbg(bt->dev, "%s(), stream %d, substream %p\n",
+ __func__, substream->stream, substream);
+
+ snd_soc_set_runtime_hwparams(substream, &mtk_btcvsd_hardware);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = mtk_btcvsd_snd_tx_init(bt);
+ bt->tx->substream = substream;
+ } else {
+ ret = mtk_btcvsd_snd_rx_init(bt);
+ bt->rx->substream = substream;
+ }
+
+ return ret;
+}
+
+static int mtk_pcm_btcvsd_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+ struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
+
+ dev_dbg(bt->dev, "%s(), stream %d\n", __func__, substream->stream);
+
+ mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_IDLE);
+ bt_stream->substream = NULL;
+ return 0;
+}
+
+static int mtk_pcm_btcvsd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ params_buffer_bytes(hw_params) % bt->tx->packet_size != 0) {
+ dev_warn(bt->dev, "%s(), error, buffer size %d not valid\n",
+ __func__,
+ params_buffer_bytes(hw_params));
+ return -EINVAL;
+ }
+
+ substream->runtime->dma_bytes = params_buffer_bytes(hw_params);
+ return 0;
+}
+
+static int mtk_pcm_btcvsd_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ btcvsd_tx_clean_buffer(bt);
+
+ return 0;
+}
+
+static int mtk_pcm_btcvsd_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+ struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
+
+ dev_dbg(bt->dev, "%s(), stream %d\n", __func__, substream->stream);
+
+ mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_RUNNING);
+ return 0;
+}
+
+static int mtk_pcm_btcvsd_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+ struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
+ int stream = substream->stream;
+ int hw_packet_ptr;
+
+ dev_dbg(bt->dev, "%s(), stream %d, cmd %d\n",
+ __func__, substream->stream, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ hw_packet_ptr = stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ bt_stream->packet_r : bt_stream->packet_w;
+ bt_stream->prev_packet_idx = hw_packet_ptr;
+ bt_stream->prev_frame = 0;
+ bt_stream->trigger_start = 1;
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ bt_stream->trigger_start = 0;
+ mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_ENDING);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer
+ (struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+ struct mtk_btcvsd_snd_stream *bt_stream;
+ snd_pcm_uframes_t frame = 0;
+ int byte = 0;
+ int hw_packet_ptr;
+ int packet_diff;
+ spinlock_t *lock; /* spinlock for bt stream control */
+ unsigned long flags;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ lock = &bt->tx_lock;
+ bt_stream = bt->tx;
+ } else {
+ lock = &bt->rx_lock;
+ bt_stream = bt->rx;
+ }
+
+ spin_lock_irqsave(lock, flags);
+ hw_packet_ptr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ bt->tx->packet_r : bt->rx->packet_w;
+
+ /* get packet diff from last time */
+ if (hw_packet_ptr >= bt_stream->prev_packet_idx) {
+ packet_diff = hw_packet_ptr - bt_stream->prev_packet_idx;
+ } else {
+ /* integer overflow */
+ packet_diff = (INT_MAX - bt_stream->prev_packet_idx) +
+ (hw_packet_ptr - INT_MIN) + 1;
+ }
+ bt_stream->prev_packet_idx = hw_packet_ptr;
+
+ /* increased bytes */
+ byte = packet_diff * bt_stream->packet_size;
+
+ frame = btcvsd_bytes_to_frame(substream, byte);
+ frame += bt_stream->prev_frame;
+ frame %= substream->runtime->buffer_size;
+
+ bt_stream->prev_frame = frame;
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return frame;
+}
+
+static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *buf, unsigned long count)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mtk_btcvsd_snd_write(bt, buf, count);
+ else
+ mtk_btcvsd_snd_read(bt, buf, count);
+
+ return 0;
+}
+
+static struct snd_pcm_ops mtk_btcvsd_ops = {
+ .open = mtk_pcm_btcvsd_open,
+ .close = mtk_pcm_btcvsd_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = mtk_pcm_btcvsd_hw_params,
+ .hw_free = mtk_pcm_btcvsd_hw_free,
+ .prepare = mtk_pcm_btcvsd_prepare,
+ .trigger = mtk_pcm_btcvsd_trigger,
+ .pointer = mtk_pcm_btcvsd_pointer,
+ .copy_user = mtk_pcm_btcvsd_copy,
+};
+
+/* kcontrol */
+static const char *const btsco_band_str[] = {"NB", "WB"};
+
+static const struct soc_enum btcvsd_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(btsco_band_str), btsco_band_str),
+};
+
+static int btcvsd_band_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.integer.value[0] = bt->band;
+ return 0;
+}
+
+static int btcvsd_band_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ bt->band = ucontrol->value.integer.value[0];
+ dev_dbg(bt->dev, "%s(), band %d\n", __func__, bt->band);
+ return 0;
+}
+
+static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ if (!bt->tx) {
+ ucontrol->value.integer.value[0] = 0;
+ return 0;
+ }
+
+ ucontrol->value.integer.value[0] = bt->tx->mute;
+ return 0;
+}
+
+static int btcvsd_tx_mute_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ if (!bt->tx)
+ return 0;
+
+ bt->tx->mute = ucontrol->value.integer.value[0];
+ return 0;
+}
+
+static int btcvsd_rx_irq_received_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ if (!bt->rx)
+ return 0;
+
+ ucontrol->value.integer.value[0] = bt->rx->rw_cnt ? 1 : 0;
+ return 0;
+}
+
+static int btcvsd_rx_timeout_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ if (!bt->rx)
+ return 0;
+
+ ucontrol->value.integer.value[0] = bt->rx->timeout;
+ bt->rx->timeout = 0;
+ return 0;
+}
+
+static int btcvsd_rx_timestamp_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *data, unsigned int size)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+ int ret = 0;
+ struct mtk_btcvsd_snd_time_buffer_info time_buffer_info_rx;
+
+ if (size > sizeof(struct mtk_btcvsd_snd_time_buffer_info))
+ return -EINVAL;
+
+ get_rx_time_stamp(bt, &time_buffer_info_rx);
+
+ dev_dbg(bt->dev, "%s(), time_stamp_us %llu, data_count_equi_time %llu",
+ __func__,
+ time_buffer_info_rx.time_stamp_us,
+ time_buffer_info_rx.data_count_equi_time);
+
+ if (copy_to_user(data, &time_buffer_info_rx,
+ sizeof(struct mtk_btcvsd_snd_time_buffer_info))) {
+ dev_warn(bt->dev, "%s(), copy_to_user fail", __func__);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static int btcvsd_tx_irq_received_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ if (!bt->tx)
+ return 0;
+
+ ucontrol->value.integer.value[0] = bt->tx->rw_cnt ? 1 : 0;
+ return 0;
+}
+
+static int btcvsd_tx_timeout_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.integer.value[0] = bt->tx->timeout;
+ return 0;
+}
+
+static int btcvsd_tx_timestamp_get(struct snd_kcontrol *kcontrol,
+ unsigned int __user *data, unsigned int size)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+ int ret = 0;
+ struct mtk_btcvsd_snd_time_buffer_info time_buffer_info_tx;
+
+ if (size > sizeof(struct mtk_btcvsd_snd_time_buffer_info))
+ return -EINVAL;
+
+ get_tx_time_stamp(bt, &time_buffer_info_tx);
+
+ dev_dbg(bt->dev, "%s(), time_stamp_us %llu, data_count_equi_time %llu",
+ __func__,
+ time_buffer_info_tx.time_stamp_us,
+ time_buffer_info_tx.data_count_equi_time);
+
+ if (copy_to_user(data, &time_buffer_info_tx,
+ sizeof(struct mtk_btcvsd_snd_time_buffer_info))) {
+ dev_warn(bt->dev, "%s(), copy_to_user fail", __func__);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = {
+ SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0],
+ btcvsd_band_get, btcvsd_band_set),
+ SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0,
+ btcvsd_tx_mute_get, btcvsd_tx_mute_set),
+ SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0,
+ btcvsd_tx_irq_received_get, NULL),
+ SOC_SINGLE_BOOL_EXT("BTCVSD Tx Timeout Switch", 0,
+ btcvsd_tx_timeout_get, NULL),
+ SOC_SINGLE_BOOL_EXT("BTCVSD Rx Irq Received Switch", 0,
+ btcvsd_rx_irq_received_get, NULL),
+ SOC_SINGLE_BOOL_EXT("BTCVSD Rx Timeout Switch", 0,
+ btcvsd_rx_timeout_get, NULL),
+ SND_SOC_BYTES_TLV("BTCVSD Rx Timestamp",
+ sizeof(struct mtk_btcvsd_snd_time_buffer_info),
+ btcvsd_rx_timestamp_get, NULL),
+ SND_SOC_BYTES_TLV("BTCVSD Tx Timestamp",
+ sizeof(struct mtk_btcvsd_snd_time_buffer_info),
+ btcvsd_tx_timestamp_get, NULL),
+};
+
+static int mtk_btcvsd_snd_component_probe(struct snd_soc_component *component)
+{
+ return snd_soc_add_component_controls(component,
+ mtk_btcvsd_snd_controls,
+ ARRAY_SIZE(mtk_btcvsd_snd_controls));
+}
+
+static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
+ .name = BTCVSD_SND_NAME,
+ .ops = &mtk_btcvsd_ops,
+ .probe = mtk_btcvsd_snd_component_probe,
+};
+
+static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int irq_id;
+ u32 offset[5] = {0, 0, 0, 0, 0};
+ struct mtk_btcvsd_snd *btcvsd;
+ struct device *dev = &pdev->dev;
+
+ /* init btcvsd private data */
+ btcvsd = devm_kzalloc(dev, sizeof(*btcvsd), GFP_KERNEL);
+ if (!btcvsd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, btcvsd);
+ btcvsd->dev = dev;
+
+ /* init tx/rx */
+ btcvsd->rx = devm_kzalloc(btcvsd->dev, sizeof(*btcvsd->rx), GFP_KERNEL);
+ if (!btcvsd->rx)
+ return -ENOMEM;
+
+ btcvsd->tx = devm_kzalloc(btcvsd->dev, sizeof(*btcvsd->tx), GFP_KERNEL);
+ if (!btcvsd->tx)
+ return -ENOMEM;
+
+ spin_lock_init(&btcvsd->tx_lock);
+ spin_lock_init(&btcvsd->rx_lock);
+
+ init_waitqueue_head(&btcvsd->tx_wait);
+ init_waitqueue_head(&btcvsd->rx_wait);
+
+ mtk_btcvsd_snd_tx_init(btcvsd);
+ mtk_btcvsd_snd_rx_init(btcvsd);
+
+ /* irq */
+ irq_id = platform_get_irq(pdev, 0);
+ if (irq_id <= 0) {
+ dev_err(dev, "%pOFn no irq found\n", dev->of_node);
+ return irq_id < 0 ? irq_id : -ENXIO;
+ }
+
+ ret = devm_request_irq(dev, irq_id, mtk_btcvsd_snd_irq_handler,
+ IRQF_TRIGGER_LOW, "BTCVSD_ISR_Handle",
+ (void *)btcvsd);
+ if (ret) {
+ dev_err(dev, "could not request_irq for BTCVSD_ISR_Handle\n");
+ return ret;
+ }
+
+ btcvsd->irq_id = irq_id;
+
+ /* iomap */
+ btcvsd->bt_pkv_base = of_iomap(dev->of_node, 0);
+ if (!btcvsd->bt_pkv_base) {
+ dev_err(dev, "iomap bt_pkv_base fail\n");
+ return -EIO;
+ }
+
+ btcvsd->bt_sram_bank2_base = of_iomap(dev->of_node, 1);
+ if (!btcvsd->bt_sram_bank2_base) {
+ dev_err(dev, "iomap bt_sram_bank2_base fail\n");
+ return -EIO;
+ }
+
+ btcvsd->infra = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "mediatek,infracfg");
+ if (IS_ERR(btcvsd->infra)) {
+ dev_err(dev, "cannot find infra controller: %ld\n",
+ PTR_ERR(btcvsd->infra));
+ return PTR_ERR(btcvsd->infra);
+ }
+
+ /* get offset */
+ ret = of_property_read_u32_array(dev->of_node, "mediatek,offset",
+ offset,
+ ARRAY_SIZE(offset));
+ if (ret) {
+ dev_warn(dev, "%s(), get offset fail, ret %d\n", __func__, ret);
+ return ret;
+ }
+ btcvsd->infra_misc_offset = offset[0];
+ btcvsd->conn_bt_cvsd_mask = offset[1];
+ btcvsd->cvsd_mcu_read_offset = offset[2];
+ btcvsd->cvsd_mcu_write_offset = offset[3];
+ btcvsd->cvsd_packet_indicator = offset[4];
+
+ btcvsd->bt_reg_pkt_r = btcvsd->bt_pkv_base +
+ btcvsd->cvsd_mcu_read_offset;
+ btcvsd->bt_reg_pkt_w = btcvsd->bt_pkv_base +
+ btcvsd->cvsd_mcu_write_offset;
+ btcvsd->bt_reg_ctl = btcvsd->bt_pkv_base +
+ btcvsd->cvsd_packet_indicator;
+
+ /* init state */
+ mtk_btcvsd_snd_set_state(btcvsd, btcvsd->tx, BT_SCO_STATE_IDLE);
+ mtk_btcvsd_snd_set_state(btcvsd, btcvsd->rx, BT_SCO_STATE_IDLE);
+
+ return devm_snd_soc_register_component(dev, &mtk_btcvsd_snd_platform,
+ NULL, 0);
+}
+
+static int mtk_btcvsd_snd_remove(struct platform_device *pdev)
+{
+ struct mtk_btcvsd_snd *btcvsd = dev_get_drvdata(&pdev->dev);
+
+ iounmap(btcvsd->bt_pkv_base);
+ iounmap(btcvsd->bt_sram_bank2_base);
+ return 0;
+}
+
+static const struct of_device_id mtk_btcvsd_snd_dt_match[] = {
+ { .compatible = "mediatek,mtk-btcvsd-snd", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_btcvsd_snd_dt_match);
+
+static struct platform_driver mtk_btcvsd_snd_driver = {
+ .driver = {
+ .name = "mtk-btcvsd-snd",
+ .of_match_table = mtk_btcvsd_snd_dt_match,
+ },
+ .probe = mtk_btcvsd_snd_probe,
+ .remove = mtk_btcvsd_snd_remove,
+};
+
+module_platform_driver(mtk_btcvsd_snd_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA BT SCO CVSD/MSBC Driver");
+MODULE_AUTHOR("KaiChieh Chuang <kaichieh.chuang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt8183/Makefile b/sound/soc/mediatek/mt8183/Makefile
new file mode 100644
index 000000000000..f3ee6ac98fe8
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# platform driver
+snd-soc-mt8183-afe-objs := \
+ mt8183-afe-pcm.o \
+ mt8183-afe-clk.o \
+ mt8183-dai-i2s.o \
+ mt8183-dai-tdm.o \
+ mt8183-dai-pcm.o \
+ mt8183-dai-hostless.o \
+ mt8183-dai-adda.o
+
+obj-$(CONFIG_SND_SOC_MT8183) += snd-soc-mt8183-afe.o
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-clk.c b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c
new file mode 100644
index 000000000000..f523ad103acc
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8183-afe-clk.c -- Mediatek 8183 afe clock ctrl
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/clk.h>
+
+#include "mt8183-afe-common.h"
+#include "mt8183-afe-clk.h"
+#include "mt8183-reg.h"
+
+enum {
+ CLK_AFE = 0,
+ CLK_TML,
+ CLK_APLL22M,
+ CLK_APLL24M,
+ CLK_APLL1_TUNER,
+ CLK_APLL2_TUNER,
+ CLK_I2S1_BCLK_SW,
+ CLK_I2S2_BCLK_SW,
+ CLK_I2S3_BCLK_SW,
+ CLK_I2S4_BCLK_SW,
+ CLK_INFRA_SYS_AUDIO,
+ CLK_MUX_AUDIO,
+ CLK_MUX_AUDIOINTBUS,
+ CLK_TOP_SYSPLL_D2_D4,
+ /* apll related mux */
+ CLK_TOP_MUX_AUD_1,
+ CLK_TOP_APLL1_CK,
+ CLK_TOP_MUX_AUD_2,
+ CLK_TOP_APLL2_CK,
+ CLK_TOP_MUX_AUD_ENG1,
+ CLK_TOP_APLL1_D8,
+ CLK_TOP_MUX_AUD_ENG2,
+ CLK_TOP_APLL2_D8,
+ CLK_TOP_I2S0_M_SEL,
+ CLK_TOP_I2S1_M_SEL,
+ CLK_TOP_I2S2_M_SEL,
+ CLK_TOP_I2S3_M_SEL,
+ CLK_TOP_I2S4_M_SEL,
+ CLK_TOP_I2S5_M_SEL,
+ CLK_TOP_APLL12_DIV0,
+ CLK_TOP_APLL12_DIV1,
+ CLK_TOP_APLL12_DIV2,
+ CLK_TOP_APLL12_DIV3,
+ CLK_TOP_APLL12_DIV4,
+ CLK_TOP_APLL12_DIVB,
+ CLK_CLK26M,
+ CLK_NUM
+};
+
+static const char *aud_clks[CLK_NUM] = {
+ [CLK_AFE] = "aud_afe_clk",
+ [CLK_TML] = "aud_tml_clk",
+ [CLK_APLL22M] = "aud_apll22m_clk",
+ [CLK_APLL24M] = "aud_apll24m_clk",
+ [CLK_APLL1_TUNER] = "aud_apll1_tuner_clk",
+ [CLK_APLL2_TUNER] = "aud_apll2_tuner_clk",
+ [CLK_I2S1_BCLK_SW] = "aud_i2s1_bclk_sw",
+ [CLK_I2S2_BCLK_SW] = "aud_i2s2_bclk_sw",
+ [CLK_I2S3_BCLK_SW] = "aud_i2s3_bclk_sw",
+ [CLK_I2S4_BCLK_SW] = "aud_i2s4_bclk_sw",
+ [CLK_INFRA_SYS_AUDIO] = "aud_infra_clk",
+ [CLK_MUX_AUDIO] = "top_mux_audio",
+ [CLK_MUX_AUDIOINTBUS] = "top_mux_aud_intbus",
+ [CLK_TOP_SYSPLL_D2_D4] = "top_syspll_d2_d4",
+ [CLK_TOP_MUX_AUD_1] = "top_mux_aud_1",
+ [CLK_TOP_APLL1_CK] = "top_apll1_ck",
+ [CLK_TOP_MUX_AUD_2] = "top_mux_aud_2",
+ [CLK_TOP_APLL2_CK] = "top_apll2_ck",
+ [CLK_TOP_MUX_AUD_ENG1] = "top_mux_aud_eng1",
+ [CLK_TOP_APLL1_D8] = "top_apll1_d8",
+ [CLK_TOP_MUX_AUD_ENG2] = "top_mux_aud_eng2",
+ [CLK_TOP_APLL2_D8] = "top_apll2_d8",
+ [CLK_TOP_I2S0_M_SEL] = "top_i2s0_m_sel",
+ [CLK_TOP_I2S1_M_SEL] = "top_i2s1_m_sel",
+ [CLK_TOP_I2S2_M_SEL] = "top_i2s2_m_sel",
+ [CLK_TOP_I2S3_M_SEL] = "top_i2s3_m_sel",
+ [CLK_TOP_I2S4_M_SEL] = "top_i2s4_m_sel",
+ [CLK_TOP_I2S5_M_SEL] = "top_i2s5_m_sel",
+ [CLK_TOP_APLL12_DIV0] = "top_apll12_div0",
+ [CLK_TOP_APLL12_DIV1] = "top_apll12_div1",
+ [CLK_TOP_APLL12_DIV2] = "top_apll12_div2",
+ [CLK_TOP_APLL12_DIV3] = "top_apll12_div3",
+ [CLK_TOP_APLL12_DIV4] = "top_apll12_div4",
+ [CLK_TOP_APLL12_DIVB] = "top_apll12_divb",
+ [CLK_CLK26M] = "top_clk26m_clk",
+};
+
+int mt8183_init_clock(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int i;
+
+ afe_priv->clk = devm_kcalloc(afe->dev, CLK_NUM, sizeof(*afe_priv->clk),
+ GFP_KERNEL);
+ if (!afe_priv->clk)
+ return -ENOMEM;
+
+ for (i = 0; i < CLK_NUM; i++) {
+ afe_priv->clk[i] = devm_clk_get(afe->dev, aud_clks[i]);
+ if (IS_ERR(afe_priv->clk[i])) {
+ dev_err(afe->dev, "%s(), devm_clk_get %s fail, ret %ld\n",
+ __func__, aud_clks[i],
+ PTR_ERR(afe_priv->clk[i]));
+ return PTR_ERR(afe_priv->clk[i]);
+ }
+ }
+
+ return 0;
+}
+
+int mt8183_afe_enable_clock(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_INFRA_SYS_AUDIO], ret);
+ goto CLK_INFRA_SYS_AUDIO_ERR;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIO], ret);
+ goto CLK_MUX_AUDIO_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clk[CLK_MUX_AUDIO],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIO],
+ aud_clks[CLK_CLK26M], ret);
+ goto CLK_MUX_AUDIO_ERR;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIOINTBUS], ret);
+ goto CLK_MUX_AUDIO_INTBUS_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clk[CLK_MUX_AUDIOINTBUS],
+ afe_priv->clk[CLK_TOP_SYSPLL_D2_D4]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIOINTBUS],
+ aud_clks[CLK_TOP_SYSPLL_D2_D4], ret);
+ goto CLK_MUX_AUDIO_INTBUS_ERR;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_AFE]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_AFE], ret);
+ goto CLK_AFE_ERR;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_I2S1_BCLK_SW]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_I2S1_BCLK_SW], ret);
+ goto CLK_I2S1_BCLK_SW_ERR;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_I2S2_BCLK_SW]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_I2S2_BCLK_SW], ret);
+ goto CLK_I2S2_BCLK_SW_ERR;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_I2S3_BCLK_SW]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_I2S3_BCLK_SW], ret);
+ goto CLK_I2S3_BCLK_SW_ERR;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_I2S4_BCLK_SW]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_I2S4_BCLK_SW], ret);
+ goto CLK_I2S4_BCLK_SW_ERR;
+ }
+
+ return 0;
+
+CLK_I2S4_BCLK_SW_ERR:
+ clk_disable_unprepare(afe_priv->clk[CLK_I2S3_BCLK_SW]);
+CLK_I2S3_BCLK_SW_ERR:
+ clk_disable_unprepare(afe_priv->clk[CLK_I2S2_BCLK_SW]);
+CLK_I2S2_BCLK_SW_ERR:
+ clk_disable_unprepare(afe_priv->clk[CLK_I2S1_BCLK_SW]);
+CLK_I2S1_BCLK_SW_ERR:
+ clk_disable_unprepare(afe_priv->clk[CLK_AFE]);
+CLK_AFE_ERR:
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+CLK_MUX_AUDIO_INTBUS_ERR:
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIO]);
+CLK_MUX_AUDIO_ERR:
+ clk_disable_unprepare(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+CLK_INFRA_SYS_AUDIO_ERR:
+ return ret;
+}
+
+int mt8183_afe_disable_clock(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clk[CLK_I2S4_BCLK_SW]);
+ clk_disable_unprepare(afe_priv->clk[CLK_I2S3_BCLK_SW]);
+ clk_disable_unprepare(afe_priv->clk[CLK_I2S2_BCLK_SW]);
+ clk_disable_unprepare(afe_priv->clk[CLK_I2S1_BCLK_SW]);
+ clk_disable_unprepare(afe_priv->clk[CLK_AFE]);
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIO]);
+ clk_disable_unprepare(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+
+ return 0;
+}
+
+/* apll */
+static int apll1_mux_setting(struct mtk_base_afe *afe, bool enable)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_1], ret);
+ goto ERR_ENABLE_CLK_TOP_MUX_AUD_1;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+ afe_priv->clk[CLK_TOP_APLL1_CK]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_1],
+ aud_clks[CLK_TOP_APLL1_CK], ret);
+ goto ERR_SELECT_CLK_TOP_MUX_AUD_1;
+ }
+
+ /* 180.6336 / 8 = 22.5792MHz */
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG1], ret);
+ goto ERR_ENABLE_CLK_TOP_MUX_AUD_ENG1;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+ afe_priv->clk[CLK_TOP_APLL1_D8]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG1],
+ aud_clks[CLK_TOP_APLL1_D8], ret);
+ goto ERR_SELECT_CLK_TOP_MUX_AUD_ENG1;
+ }
+ } else {
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG1],
+ aud_clks[CLK_CLK26M], ret);
+ goto EXIT;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_1],
+ aud_clks[CLK_CLK26M], ret);
+ goto EXIT;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+ }
+
+ return 0;
+
+ERR_SELECT_CLK_TOP_MUX_AUD_ENG1:
+ clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+ afe_priv->clk[CLK_CLK26M]);
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_ENG1:
+ERR_SELECT_CLK_TOP_MUX_AUD_1:
+ clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+ afe_priv->clk[CLK_CLK26M]);
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_1:
+EXIT:
+ return ret;
+}
+
+static int apll2_mux_setting(struct mtk_base_afe *afe, bool enable)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_2], ret);
+ goto ERR_ENABLE_CLK_TOP_MUX_AUD_2;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+ afe_priv->clk[CLK_TOP_APLL2_CK]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_2],
+ aud_clks[CLK_TOP_APLL2_CK], ret);
+ goto ERR_SELECT_CLK_TOP_MUX_AUD_2;
+ }
+
+ /* 196.608 / 8 = 24.576MHz */
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG2], ret);
+ goto ERR_ENABLE_CLK_TOP_MUX_AUD_ENG2;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+ afe_priv->clk[CLK_TOP_APLL2_D8]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG2],
+ aud_clks[CLK_TOP_APLL2_D8], ret);
+ goto ERR_SELECT_CLK_TOP_MUX_AUD_ENG2;
+ }
+ } else {
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG2],
+ aud_clks[CLK_CLK26M], ret);
+ goto EXIT;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_2],
+ aud_clks[CLK_CLK26M], ret);
+ goto EXIT;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+ }
+
+ return 0;
+
+ERR_SELECT_CLK_TOP_MUX_AUD_ENG2:
+ clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+ afe_priv->clk[CLK_CLK26M]);
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_ENG2:
+ERR_SELECT_CLK_TOP_MUX_AUD_2:
+ clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+ afe_priv->clk[CLK_CLK26M]);
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+ERR_ENABLE_CLK_TOP_MUX_AUD_2:
+EXIT:
+ return ret;
+}
+
+int mt8183_apll1_enable(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* setting for APLL */
+ apll1_mux_setting(afe, true);
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL22M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL22M], ret);
+ goto ERR_CLK_APLL22M;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL1_TUNER]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL1_TUNER], ret);
+ goto ERR_CLK_APLL1_TUNER;
+ }
+
+ regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG,
+ 0x0000FFF7, 0x00000832);
+ regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, 0x1, 0x1);
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_22M_ON_MASK_SFT,
+ 0x1 << AFE_22M_ON_SFT);
+
+ return 0;
+
+ERR_CLK_APLL1_TUNER:
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL22M]);
+ERR_CLK_APLL22M:
+ return ret;
+}
+
+void mt8183_apll1_disable(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_22M_ON_MASK_SFT,
+ 0x0 << AFE_22M_ON_SFT);
+
+ regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, 0x1, 0x0);
+
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL1_TUNER]);
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL22M]);
+
+ apll1_mux_setting(afe, false);
+}
+
+int mt8183_apll2_enable(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* setting for APLL */
+ apll2_mux_setting(afe, true);
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL24M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL24M], ret);
+ goto ERR_CLK_APLL24M;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL2_TUNER]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL2_TUNER], ret);
+ goto ERR_CLK_APLL2_TUNER;
+ }
+
+ regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG,
+ 0x0000FFF7, 0x00000634);
+ regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, 0x1, 0x1);
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_24M_ON_MASK_SFT,
+ 0x1 << AFE_24M_ON_SFT);
+
+ return 0;
+
+ERR_CLK_APLL2_TUNER:
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL24M]);
+ERR_CLK_APLL24M:
+ return ret;
+}
+
+void mt8183_apll2_disable(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_24M_ON_MASK_SFT,
+ 0x0 << AFE_24M_ON_SFT);
+
+ regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, 0x1, 0x0);
+
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL2_TUNER]);
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL24M]);
+
+ apll2_mux_setting(afe, false);
+}
+
+int mt8183_get_apll_rate(struct mtk_base_afe *afe, int apll)
+{
+ return (apll == MT8183_APLL1) ? 180633600 : 196608000;
+}
+
+int mt8183_get_apll_by_rate(struct mtk_base_afe *afe, int rate)
+{
+ return ((rate % 8000) == 0) ? MT8183_APLL2 : MT8183_APLL1;
+}
+
+int mt8183_get_apll_by_name(struct mtk_base_afe *afe, const char *name)
+{
+ if (strcmp(name, APLL1_W_NAME) == 0)
+ return MT8183_APLL1;
+ else
+ return MT8183_APLL2;
+}
+
+/* mck */
+struct mt8183_mck_div {
+ int m_sel_id;
+ int div_clk_id;
+};
+
+static const struct mt8183_mck_div mck_div[MT8183_MCK_NUM] = {
+ [MT8183_I2S0_MCK] = {
+ .m_sel_id = CLK_TOP_I2S0_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV0,
+ },
+ [MT8183_I2S1_MCK] = {
+ .m_sel_id = CLK_TOP_I2S1_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV1,
+ },
+ [MT8183_I2S2_MCK] = {
+ .m_sel_id = CLK_TOP_I2S2_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV2,
+ },
+ [MT8183_I2S3_MCK] = {
+ .m_sel_id = CLK_TOP_I2S3_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV3,
+ },
+ [MT8183_I2S4_MCK] = {
+ .m_sel_id = CLK_TOP_I2S4_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV4,
+ },
+ [MT8183_I2S4_BCK] = {
+ .m_sel_id = -1,
+ .div_clk_id = CLK_TOP_APLL12_DIVB,
+ },
+ [MT8183_I2S5_MCK] = {
+ .m_sel_id = -1,
+ .div_clk_id = -1,
+ },
+};
+
+int mt8183_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int apll = mt8183_get_apll_by_rate(afe, rate);
+ int apll_clk_id = apll == MT8183_APLL1 ?
+ CLK_TOP_MUX_AUD_1 : CLK_TOP_MUX_AUD_2;
+ int m_sel_id = mck_div[mck_id].m_sel_id;
+ int div_clk_id = mck_div[mck_id].div_clk_id;
+ int ret;
+
+ /* i2s5 mck not support */
+ if (mck_id == MT8183_I2S5_MCK)
+ return 0;
+
+ /* select apll */
+ if (m_sel_id >= 0) {
+ ret = clk_prepare_enable(afe_priv->clk[m_sel_id]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[m_sel_id], ret);
+ goto ERR_ENABLE_MCLK;
+ }
+ ret = clk_set_parent(afe_priv->clk[m_sel_id],
+ afe_priv->clk[apll_clk_id]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[m_sel_id],
+ aud_clks[apll_clk_id], ret);
+ goto ERR_SELECT_MCLK;
+ }
+ }
+
+ /* enable div, set rate */
+ ret = clk_prepare_enable(afe_priv->clk[div_clk_id]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[div_clk_id], ret);
+ goto ERR_ENABLE_MCLK_DIV;
+ }
+ ret = clk_set_rate(afe_priv->clk[div_clk_id], rate);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_set_rate %s, rate %d, fail %d\n",
+ __func__, aud_clks[div_clk_id],
+ rate, ret);
+ goto ERR_SET_MCLK_RATE;
+ return ret;
+ }
+
+ return 0;
+
+ERR_SET_MCLK_RATE:
+ clk_disable_unprepare(afe_priv->clk[div_clk_id]);
+ERR_ENABLE_MCLK_DIV:
+ERR_SELECT_MCLK:
+ if (m_sel_id >= 0)
+ clk_disable_unprepare(afe_priv->clk[m_sel_id]);
+ERR_ENABLE_MCLK:
+ return ret;
+}
+
+void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int m_sel_id = mck_div[mck_id].m_sel_id;
+ int div_clk_id = mck_div[mck_id].div_clk_id;
+
+ clk_disable_unprepare(afe_priv->clk[div_clk_id]);
+ if (m_sel_id >= 0)
+ clk_disable_unprepare(afe_priv->clk[m_sel_id]);
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-clk.h b/sound/soc/mediatek/mt8183/mt8183-afe-clk.h
new file mode 100644
index 000000000000..2c510aa80fc7
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-clk.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt8183-afe-clk.h -- Mediatek 8183 afe clock ctrl definition
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT8183_AFE_CLK_H_
+#define _MT8183_AFE_CLK_H_
+
+/* APLL */
+#define APLL1_W_NAME "APLL1"
+#define APLL2_W_NAME "APLL2"
+enum {
+ MT8183_APLL1 = 0,
+ MT8183_APLL2,
+};
+
+struct mtk_base_afe;
+
+int mt8183_init_clock(struct mtk_base_afe *afe);
+int mt8183_afe_enable_clock(struct mtk_base_afe *afe);
+int mt8183_afe_disable_clock(struct mtk_base_afe *afe);
+
+int mt8183_apll1_enable(struct mtk_base_afe *afe);
+void mt8183_apll1_disable(struct mtk_base_afe *afe);
+
+int mt8183_apll2_enable(struct mtk_base_afe *afe);
+void mt8183_apll2_disable(struct mtk_base_afe *afe);
+
+int mt8183_get_apll_rate(struct mtk_base_afe *afe, int apll);
+int mt8183_get_apll_by_rate(struct mtk_base_afe *afe, int rate);
+int mt8183_get_apll_by_name(struct mtk_base_afe *afe, const char *name);
+
+int mt8183_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate);
+void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id);
+#endif
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-common.h b/sound/soc/mediatek/mt8183/mt8183-afe-common.h
new file mode 100644
index 000000000000..b220e7a7db7e
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-common.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt8183-afe-common.h -- Mediatek 8183 audio driver definitions
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT_8183_AFE_COMMON_H_
+#define _MT_8183_AFE_COMMON_H_
+
+#include <sound/soc.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+#include "../common/mtk-base-afe.h"
+
+enum {
+ MT8183_MEMIF_DL1,
+ MT8183_MEMIF_DL2,
+ MT8183_MEMIF_DL3,
+ MT8183_MEMIF_VUL12,
+ MT8183_MEMIF_VUL2,
+ MT8183_MEMIF_AWB,
+ MT8183_MEMIF_AWB2,
+ MT8183_MEMIF_MOD_DAI,
+ MT8183_MEMIF_HDMI,
+ MT8183_MEMIF_NUM,
+ MT8183_DAI_ADDA = MT8183_MEMIF_NUM,
+ MT8183_DAI_PCM_1,
+ MT8183_DAI_PCM_2,
+ MT8183_DAI_I2S_0,
+ MT8183_DAI_I2S_1,
+ MT8183_DAI_I2S_2,
+ MT8183_DAI_I2S_3,
+ MT8183_DAI_I2S_5,
+ MT8183_DAI_TDM,
+ MT8183_DAI_HOSTLESS_LPBK,
+ MT8183_DAI_HOSTLESS_SPEECH,
+ MT8183_DAI_NUM,
+};
+
+enum {
+ MT8183_IRQ_0,
+ MT8183_IRQ_1,
+ MT8183_IRQ_2,
+ MT8183_IRQ_3,
+ MT8183_IRQ_4,
+ MT8183_IRQ_5,
+ MT8183_IRQ_6,
+ MT8183_IRQ_7,
+ MT8183_IRQ_8, /* hw bundle to TDM */
+ MT8183_IRQ_11,
+ MT8183_IRQ_12,
+ MT8183_IRQ_NUM,
+};
+
+enum {
+ MT8183_MTKAIF_PROTOCOL_1 = 0,
+ MT8183_MTKAIF_PROTOCOL_2,
+ MT8183_MTKAIF_PROTOCOL_2_CLK_P2,
+};
+
+/* MCLK */
+enum {
+ MT8183_I2S0_MCK = 0,
+ MT8183_I2S1_MCK,
+ MT8183_I2S2_MCK,
+ MT8183_I2S3_MCK,
+ MT8183_I2S4_MCK,
+ MT8183_I2S4_BCK,
+ MT8183_I2S5_MCK,
+ MT8183_MCK_NUM,
+};
+
+struct clk;
+
+struct mt8183_afe_private {
+ struct clk **clk;
+
+ int pm_runtime_bypass_reg_ctl;
+
+ /* dai */
+ void *dai_priv[MT8183_DAI_NUM];
+
+ /* adda */
+ int mtkaif_protocol;
+ int mtkaif_calibration_ok;
+ int mtkaif_chosen_phase[4];
+ int mtkaif_phase_cycle[4];
+ int mtkaif_calibration_num_phase;
+ int mtkaif_dmic;
+
+ /* mck */
+ int mck_rate[MT8183_MCK_NUM];
+};
+
+unsigned int mt8183_general_rate_transform(struct device *dev,
+ unsigned int rate);
+unsigned int mt8183_rate_transform(struct device *dev,
+ unsigned int rate, int aud_blk);
+
+/* dai register */
+int mt8183_dai_adda_register(struct mtk_base_afe *afe);
+int mt8183_dai_pcm_register(struct mtk_base_afe *afe);
+int mt8183_dai_i2s_register(struct mtk_base_afe *afe);
+int mt8183_dai_tdm_register(struct mtk_base_afe *afe);
+int mt8183_dai_hostless_register(struct mtk_base_afe *afe);
+#endif
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
new file mode 100644
index 000000000000..4e045dd305a7
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
@@ -0,0 +1,1237 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek ALSA SoC AFE platform driver for 8183
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+
+#include "mt8183-afe-common.h"
+#include "mt8183-afe-clk.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
+
+enum {
+ MTK_AFE_RATE_8K = 0,
+ MTK_AFE_RATE_11K = 1,
+ MTK_AFE_RATE_12K = 2,
+ MTK_AFE_RATE_384K = 3,
+ MTK_AFE_RATE_16K = 4,
+ MTK_AFE_RATE_22K = 5,
+ MTK_AFE_RATE_24K = 6,
+ MTK_AFE_RATE_130K = 7,
+ MTK_AFE_RATE_32K = 8,
+ MTK_AFE_RATE_44K = 9,
+ MTK_AFE_RATE_48K = 10,
+ MTK_AFE_RATE_88K = 11,
+ MTK_AFE_RATE_96K = 12,
+ MTK_AFE_RATE_176K = 13,
+ MTK_AFE_RATE_192K = 14,
+ MTK_AFE_RATE_260K = 15,
+};
+
+enum {
+ MTK_AFE_DAI_MEMIF_RATE_8K = 0,
+ MTK_AFE_DAI_MEMIF_RATE_16K = 1,
+ MTK_AFE_DAI_MEMIF_RATE_32K = 2,
+ MTK_AFE_DAI_MEMIF_RATE_48K = 3,
+};
+
+enum {
+ MTK_AFE_PCM_RATE_8K = 0,
+ MTK_AFE_PCM_RATE_16K = 1,
+ MTK_AFE_PCM_RATE_32K = 2,
+ MTK_AFE_PCM_RATE_48K = 3,
+};
+
+unsigned int mt8183_general_rate_transform(struct device *dev,
+ unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_RATE_8K;
+ case 11025:
+ return MTK_AFE_RATE_11K;
+ case 12000:
+ return MTK_AFE_RATE_12K;
+ case 16000:
+ return MTK_AFE_RATE_16K;
+ case 22050:
+ return MTK_AFE_RATE_22K;
+ case 24000:
+ return MTK_AFE_RATE_24K;
+ case 32000:
+ return MTK_AFE_RATE_32K;
+ case 44100:
+ return MTK_AFE_RATE_44K;
+ case 48000:
+ return MTK_AFE_RATE_48K;
+ case 88200:
+ return MTK_AFE_RATE_88K;
+ case 96000:
+ return MTK_AFE_RATE_96K;
+ case 130000:
+ return MTK_AFE_RATE_130K;
+ case 176400:
+ return MTK_AFE_RATE_176K;
+ case 192000:
+ return MTK_AFE_RATE_192K;
+ case 260000:
+ return MTK_AFE_RATE_260K;
+ default:
+ dev_warn(dev, "%s(), rate %u invalid, use %d!!!\n",
+ __func__, rate, MTK_AFE_RATE_48K);
+ return MTK_AFE_RATE_48K;
+ }
+}
+
+static unsigned int dai_memif_rate_transform(struct device *dev,
+ unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_DAI_MEMIF_RATE_8K;
+ case 16000:
+ return MTK_AFE_DAI_MEMIF_RATE_16K;
+ case 32000:
+ return MTK_AFE_DAI_MEMIF_RATE_32K;
+ case 48000:
+ return MTK_AFE_DAI_MEMIF_RATE_48K;
+ default:
+ dev_warn(dev, "%s(), rate %u invalid, use %d!!!\n",
+ __func__, rate, MTK_AFE_DAI_MEMIF_RATE_16K);
+ return MTK_AFE_DAI_MEMIF_RATE_16K;
+ }
+}
+
+unsigned int mt8183_rate_transform(struct device *dev,
+ unsigned int rate, int aud_blk)
+{
+ switch (aud_blk) {
+ case MT8183_MEMIF_MOD_DAI:
+ return dai_memif_rate_transform(dev, rate);
+ default:
+ return mt8183_general_rate_transform(dev, rate);
+ }
+}
+
+static const struct snd_pcm_hardware mt8183_afe_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .period_bytes_min = 256,
+ .period_bytes_max = 4 * 48 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+ .buffer_bytes_max = 8 * 48 * 1024,
+ .fifo_size = 0,
+};
+
+static int mt8183_memif_fs(struct snd_pcm_substream *substream,
+ unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+ int id = rtd->cpu_dai->id;
+
+ return mt8183_rate_transform(afe->dev, rate, id);
+}
+
+static int mt8183_irq_fs(struct snd_pcm_substream *substream, unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+
+ return mt8183_general_rate_transform(afe->dev, rate);
+}
+
+#define MTK_PCM_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_PCM_DAI_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_48000)
+
+#define MTK_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mt8183_memif_dai_driver[] = {
+ /* FE DAIs: memory intefaces to CPU */
+ {
+ .name = "DL1",
+ .id = MT8183_MEMIF_DL1,
+ .playback = {
+ .stream_name = "DL1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "DL2",
+ .id = MT8183_MEMIF_DL2,
+ .playback = {
+ .stream_name = "DL2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "DL3",
+ .id = MT8183_MEMIF_DL3,
+ .playback = {
+ .stream_name = "DL3",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "UL1",
+ .id = MT8183_MEMIF_VUL12,
+ .capture = {
+ .stream_name = "UL1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "UL2",
+ .id = MT8183_MEMIF_AWB,
+ .capture = {
+ .stream_name = "UL2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "UL3",
+ .id = MT8183_MEMIF_VUL2,
+ .capture = {
+ .stream_name = "UL3",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "UL4",
+ .id = MT8183_MEMIF_AWB2,
+ .capture = {
+ .stream_name = "UL4",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "UL_MONO_1",
+ .id = MT8183_MEMIF_MOD_DAI,
+ .capture = {
+ .stream_name = "UL_MONO_1",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = MTK_PCM_DAI_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+ {
+ .name = "HDMI",
+ .id = MT8183_MEMIF_HDMI,
+ .playback = {
+ .stream_name = "HDMI",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_afe_fe_ops,
+ },
+};
+
+/* dma widget & routes*/
+static const struct snd_kcontrol_new memif_ul1_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN21,
+ I_ADDA_UL_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul1_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN22,
+ I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul2_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN5,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN5,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN5,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN5,
+ I_DL3_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul2_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN6,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN6,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN6,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN6,
+ I_DL3_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul3_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN32,
+ I_ADDA_UL_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul3_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN33,
+ I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul4_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN38,
+ I_ADDA_UL_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul4_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN39,
+ I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul_mono_1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN12,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN12,
+ I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget mt8183_memif_widgets[] = {
+ /* memif */
+ SND_SOC_DAPM_MIXER("UL1_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul1_ch1_mix, ARRAY_SIZE(memif_ul1_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL1_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul1_ch2_mix, ARRAY_SIZE(memif_ul1_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL2_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul2_ch1_mix, ARRAY_SIZE(memif_ul2_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL2_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul2_ch2_mix, ARRAY_SIZE(memif_ul2_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL3_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul3_ch1_mix, ARRAY_SIZE(memif_ul3_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL3_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul3_ch2_mix, ARRAY_SIZE(memif_ul3_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL4_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul4_ch1_mix, ARRAY_SIZE(memif_ul4_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL4_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul4_ch2_mix, ARRAY_SIZE(memif_ul4_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL_MONO_1_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul_mono_1_mix,
+ ARRAY_SIZE(memif_ul_mono_1_mix)),
+};
+
+static const struct snd_soc_dapm_route mt8183_memif_routes[] = {
+ /* capture */
+ {"UL1", NULL, "UL1_CH1"},
+ {"UL1", NULL, "UL1_CH2"},
+ {"UL1_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+ {"UL1_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+
+ {"UL2", NULL, "UL2_CH1"},
+ {"UL2", NULL, "UL2_CH2"},
+ {"UL2_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+ {"UL2_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+
+ {"UL3", NULL, "UL3_CH1"},
+ {"UL3", NULL, "UL3_CH2"},
+ {"UL3_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+ {"UL3_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+
+ {"UL4", NULL, "UL4_CH1"},
+ {"UL4", NULL, "UL4_CH2"},
+ {"UL4_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+ {"UL4_CH2", "ADDA_UL_CH2", "ADDA Capture"},
+
+ {"UL_MONO_1", NULL, "UL_MONO_1_CH1"},
+ {"UL_MONO_1_CH1", "ADDA_UL_CH1", "ADDA Capture"},
+ {"UL_MONO_1_CH1", "ADDA_UL_CH2", "ADDA Capture"},
+};
+
+static const struct snd_soc_component_driver mt8183_afe_pcm_dai_component = {
+ .name = "mt8183-afe-pcm-dai",
+};
+
+static const struct mtk_base_memif_data memif_data[MT8183_MEMIF_NUM] = {
+ [MT8183_MEMIF_DL1] = {
+ .name = "DL1",
+ .id = MT8183_MEMIF_DL1,
+ .reg_ofs_base = AFE_DL1_BASE,
+ .reg_ofs_cur = AFE_DL1_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = DL1_MODE_SFT,
+ .fs_maskbit = DL1_MODE_MASK,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = DL1_DATA_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL1_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = DL1_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_DL2] = {
+ .name = "DL2",
+ .id = MT8183_MEMIF_DL2,
+ .reg_ofs_base = AFE_DL2_BASE,
+ .reg_ofs_cur = AFE_DL2_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = DL2_MODE_SFT,
+ .fs_maskbit = DL2_MODE_MASK,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = DL2_DATA_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL2_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = DL2_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_DL3] = {
+ .name = "DL3",
+ .id = MT8183_MEMIF_DL3,
+ .reg_ofs_base = AFE_DL3_BASE,
+ .reg_ofs_cur = AFE_DL3_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = DL3_MODE_SFT,
+ .fs_maskbit = DL3_MODE_MASK,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = DL3_DATA_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL3_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = DL3_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_VUL2] = {
+ .name = "VUL2",
+ .id = MT8183_MEMIF_VUL2,
+ .reg_ofs_base = AFE_VUL2_BASE,
+ .reg_ofs_cur = AFE_VUL2_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = VUL2_MODE_SFT,
+ .fs_maskbit = VUL2_MODE_MASK,
+ .mono_reg = AFE_DAC_CON2,
+ .mono_shift = VUL2_DATA_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL2_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = VUL2_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_AWB] = {
+ .name = "AWB",
+ .id = MT8183_MEMIF_AWB,
+ .reg_ofs_base = AFE_AWB_BASE,
+ .reg_ofs_cur = AFE_AWB_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = AWB_MODE_SFT,
+ .fs_maskbit = AWB_MODE_MASK,
+ .mono_reg = AFE_DAC_CON1,
+ .mono_shift = AWB_DATA_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = AWB_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = AWB_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_AWB2] = {
+ .name = "AWB2",
+ .id = MT8183_MEMIF_AWB2,
+ .reg_ofs_base = AFE_AWB2_BASE,
+ .reg_ofs_cur = AFE_AWB2_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = AWB2_MODE_SFT,
+ .fs_maskbit = AWB2_MODE_MASK,
+ .mono_reg = AFE_DAC_CON2,
+ .mono_shift = AWB2_DATA_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = AWB2_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = AWB2_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_VUL12] = {
+ .name = "VUL12",
+ .id = MT8183_MEMIF_VUL12,
+ .reg_ofs_base = AFE_VUL_D2_BASE,
+ .reg_ofs_cur = AFE_VUL_D2_CUR,
+ .fs_reg = AFE_DAC_CON0,
+ .fs_shift = VUL12_MODE_SFT,
+ .fs_maskbit = VUL12_MODE_MASK,
+ .mono_reg = AFE_DAC_CON0,
+ .mono_shift = VUL12_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL12_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = VUL12_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_MOD_DAI] = {
+ .name = "MOD_DAI",
+ .id = MT8183_MEMIF_MOD_DAI,
+ .reg_ofs_base = AFE_MOD_DAI_BASE,
+ .reg_ofs_cur = AFE_MOD_DAI_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = MOD_DAI_MODE_SFT,
+ .fs_maskbit = MOD_DAI_MODE_MASK,
+ .mono_reg = -1,
+ .mono_shift = 0,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = MOD_DAI_ON_SFT,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = MOD_DAI_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8183_MEMIF_HDMI] = {
+ .name = "HDMI",
+ .id = MT8183_MEMIF_HDMI,
+ .reg_ofs_base = AFE_HDMI_OUT_BASE,
+ .reg_ofs_cur = AFE_HDMI_OUT_CUR,
+ .fs_reg = -1,
+ .fs_shift = -1,
+ .fs_maskbit = -1,
+ .mono_reg = -1,
+ .mono_shift = -1,
+ .enable_reg = -1, /* control in tdm for sync start */
+ .enable_shift = -1,
+ .hd_reg = AFE_MEMIF_HD_MODE,
+ .hd_shift = HDMI_HD_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+};
+
+static const struct mtk_base_irq_data irq_data[MT8183_IRQ_NUM] = {
+ [MT8183_IRQ_0] = {
+ .id = MT8183_IRQ_0,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT0,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ0_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ0_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ0_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ0_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_1] = {
+ .id = MT8183_IRQ_1,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT1,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ1_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ1_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ1_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ1_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_2] = {
+ .id = MT8183_IRQ_2,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT2,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ2_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ2_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ2_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ2_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_3] = {
+ .id = MT8183_IRQ_3,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT3,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ3_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ3_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ3_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ3_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_4] = {
+ .id = MT8183_IRQ_4,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT4,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ4_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ4_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ4_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ4_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_5] = {
+ .id = MT8183_IRQ_5,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT5,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ5_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ5_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ5_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ5_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_6] = {
+ .id = MT8183_IRQ_6,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT6,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ6_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ6_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ6_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ6_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_7] = {
+ .id = MT8183_IRQ_7,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT7,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ7_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ7_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ7_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ7_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_8] = {
+ .id = MT8183_IRQ_8,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT8,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = -1,
+ .irq_fs_shift = -1,
+ .irq_fs_maskbit = -1,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ8_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ8_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_11] = {
+ .id = MT8183_IRQ_11,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT11,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ11_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ11_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ11_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ11_MCU_CLR_SFT,
+ },
+ [MT8183_IRQ_12] = {
+ .id = MT8183_IRQ_12,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT12,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ12_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ12_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ12_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ12_MCU_CLR_SFT,
+ },
+};
+
+static bool mt8183_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* these auto-gen reg has read-only bit, so put it as volatile */
+ /* volatile reg cannot be cached, so cannot be set when power off */
+ switch (reg) {
+ case AUDIO_TOP_CON0: /* reg bit controlled by CCF */
+ case AUDIO_TOP_CON1: /* reg bit controlled by CCF */
+ case AUDIO_TOP_CON3:
+ case AFE_DL1_CUR:
+ case AFE_DL1_END:
+ case AFE_DL2_CUR:
+ case AFE_DL2_END:
+ case AFE_AWB_END:
+ case AFE_AWB_CUR:
+ case AFE_VUL_END:
+ case AFE_VUL_CUR:
+ case AFE_MEMIF_MON0:
+ case AFE_MEMIF_MON1:
+ case AFE_MEMIF_MON2:
+ case AFE_MEMIF_MON3:
+ case AFE_MEMIF_MON4:
+ case AFE_MEMIF_MON5:
+ case AFE_MEMIF_MON6:
+ case AFE_MEMIF_MON7:
+ case AFE_MEMIF_MON8:
+ case AFE_MEMIF_MON9:
+ case AFE_ADDA_SRC_DEBUG_MON0:
+ case AFE_ADDA_SRC_DEBUG_MON1:
+ case AFE_ADDA_UL_SRC_MON0:
+ case AFE_ADDA_UL_SRC_MON1:
+ case AFE_SIDETONE_MON:
+ case AFE_SIDETONE_CON0:
+ case AFE_SIDETONE_COEFF:
+ case AFE_BUS_MON0:
+ case AFE_MRGIF_MON0:
+ case AFE_MRGIF_MON1:
+ case AFE_MRGIF_MON2:
+ case AFE_I2S_MON:
+ case AFE_DAC_MON:
+ case AFE_VUL2_END:
+ case AFE_VUL2_CUR:
+ case AFE_IRQ0_MCU_CNT_MON:
+ case AFE_IRQ6_MCU_CNT_MON:
+ case AFE_MOD_DAI_END:
+ case AFE_MOD_DAI_CUR:
+ case AFE_VUL_D2_END:
+ case AFE_VUL_D2_CUR:
+ case AFE_DL3_CUR:
+ case AFE_DL3_END:
+ case AFE_HDMI_OUT_CON0:
+ case AFE_HDMI_OUT_CUR:
+ case AFE_HDMI_OUT_END:
+ case AFE_IRQ3_MCU_CNT_MON:
+ case AFE_IRQ4_MCU_CNT_MON:
+ case AFE_IRQ_MCU_STATUS:
+ case AFE_IRQ_MCU_CLR:
+ case AFE_IRQ_MCU_MON2:
+ case AFE_IRQ1_MCU_CNT_MON:
+ case AFE_IRQ2_MCU_CNT_MON:
+ case AFE_IRQ1_MCU_EN_CNT_MON:
+ case AFE_IRQ5_MCU_CNT_MON:
+ case AFE_IRQ7_MCU_CNT_MON:
+ case AFE_GAIN1_CUR:
+ case AFE_GAIN2_CUR:
+ case AFE_SRAM_DELSEL_CON0:
+ case AFE_SRAM_DELSEL_CON2:
+ case AFE_SRAM_DELSEL_CON3:
+ case AFE_ASRC_2CH_CON12:
+ case AFE_ASRC_2CH_CON13:
+ case PCM_INTF_CON2:
+ case FPGA_CFG0:
+ case FPGA_CFG1:
+ case FPGA_CFG2:
+ case FPGA_CFG3:
+ case AUDIO_TOP_DBG_MON0:
+ case AUDIO_TOP_DBG_MON1:
+ case AFE_IRQ8_MCU_CNT_MON:
+ case AFE_IRQ11_MCU_CNT_MON:
+ case AFE_IRQ12_MCU_CNT_MON:
+ case AFE_CBIP_MON0:
+ case AFE_CBIP_SLV_MUX_MON0:
+ case AFE_CBIP_SLV_DECODER_MON0:
+ case AFE_ADDA6_SRC_DEBUG_MON0:
+ case AFE_ADD6A_UL_SRC_MON0:
+ case AFE_ADDA6_UL_SRC_MON1:
+ case AFE_DL1_CUR_MSB:
+ case AFE_DL2_CUR_MSB:
+ case AFE_AWB_CUR_MSB:
+ case AFE_VUL_CUR_MSB:
+ case AFE_VUL2_CUR_MSB:
+ case AFE_MOD_DAI_CUR_MSB:
+ case AFE_VUL_D2_CUR_MSB:
+ case AFE_DL3_CUR_MSB:
+ case AFE_HDMI_OUT_CUR_MSB:
+ case AFE_AWB2_END:
+ case AFE_AWB2_CUR:
+ case AFE_AWB2_CUR_MSB:
+ case AFE_ADDA_DL_SDM_FIFO_MON:
+ case AFE_ADDA_DL_SRC_LCH_MON:
+ case AFE_ADDA_DL_SRC_RCH_MON:
+ case AFE_ADDA_DL_SDM_OUT_MON:
+ case AFE_CONNSYS_I2S_MON:
+ case AFE_ASRC_2CH_CON0:
+ case AFE_ASRC_2CH_CON2:
+ case AFE_ASRC_2CH_CON3:
+ case AFE_ASRC_2CH_CON4:
+ case AFE_ASRC_2CH_CON5:
+ case AFE_ASRC_2CH_CON7:
+ case AFE_ASRC_2CH_CON8:
+ case AFE_MEMIF_MON12:
+ case AFE_MEMIF_MON13:
+ case AFE_MEMIF_MON14:
+ case AFE_MEMIF_MON15:
+ case AFE_MEMIF_MON16:
+ case AFE_MEMIF_MON17:
+ case AFE_MEMIF_MON18:
+ case AFE_MEMIF_MON19:
+ case AFE_MEMIF_MON20:
+ case AFE_MEMIF_MON21:
+ case AFE_MEMIF_MON22:
+ case AFE_MEMIF_MON23:
+ case AFE_MEMIF_MON24:
+ case AFE_ADDA_MTKAIF_MON0:
+ case AFE_ADDA_MTKAIF_MON1:
+ case AFE_AUD_PAD_TOP:
+ case AFE_GENERAL1_ASRC_2CH_CON0:
+ case AFE_GENERAL1_ASRC_2CH_CON2:
+ case AFE_GENERAL1_ASRC_2CH_CON3:
+ case AFE_GENERAL1_ASRC_2CH_CON4:
+ case AFE_GENERAL1_ASRC_2CH_CON5:
+ case AFE_GENERAL1_ASRC_2CH_CON7:
+ case AFE_GENERAL1_ASRC_2CH_CON8:
+ case AFE_GENERAL1_ASRC_2CH_CON12:
+ case AFE_GENERAL1_ASRC_2CH_CON13:
+ case AFE_GENERAL2_ASRC_2CH_CON0:
+ case AFE_GENERAL2_ASRC_2CH_CON2:
+ case AFE_GENERAL2_ASRC_2CH_CON3:
+ case AFE_GENERAL2_ASRC_2CH_CON4:
+ case AFE_GENERAL2_ASRC_2CH_CON5:
+ case AFE_GENERAL2_ASRC_2CH_CON7:
+ case AFE_GENERAL2_ASRC_2CH_CON8:
+ case AFE_GENERAL2_ASRC_2CH_CON12:
+ case AFE_GENERAL2_ASRC_2CH_CON13:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static const struct regmap_config mt8183_afe_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .volatile_reg = mt8183_is_volatile_reg,
+
+ .max_register = AFE_MAX_REGISTER,
+ .num_reg_defaults_raw = AFE_MAX_REGISTER,
+
+ .cache_type = REGCACHE_FLAT,
+};
+
+static irqreturn_t mt8183_afe_irq_handler(int irq_id, void *dev)
+{
+ struct mtk_base_afe *afe = dev;
+ struct mtk_base_afe_irq *irq;
+ unsigned int status;
+ unsigned int status_mcu;
+ unsigned int mcu_en;
+ int ret;
+ int i;
+ irqreturn_t irq_ret = IRQ_HANDLED;
+
+ /* get irq that is sent to MCU */
+ regmap_read(afe->regmap, AFE_IRQ_MCU_EN, &mcu_en);
+
+ ret = regmap_read(afe->regmap, AFE_IRQ_MCU_STATUS, &status);
+ /* only care IRQ which is sent to MCU */
+ status_mcu = status & mcu_en & AFE_IRQ_STATUS_BITS;
+
+ if (ret || status_mcu == 0) {
+ dev_err(afe->dev, "%s(), irq status err, ret %d, status 0x%x, mcu_en 0x%x\n",
+ __func__, ret, status, mcu_en);
+
+ irq_ret = IRQ_NONE;
+ goto err_irq;
+ }
+
+ for (i = 0; i < MT8183_MEMIF_NUM; i++) {
+ struct mtk_base_afe_memif *memif = &afe->memif[i];
+
+ if (!memif->substream)
+ continue;
+
+ if (memif->irq_usage < 0)
+ continue;
+
+ irq = &afe->irqs[memif->irq_usage];
+
+ if (status_mcu & (1 << irq->irq_data->irq_en_shift))
+ snd_pcm_period_elapsed(memif->substream);
+ }
+
+err_irq:
+ /* clear irq */
+ regmap_write(afe->regmap,
+ AFE_IRQ_MCU_CLR,
+ status_mcu);
+
+ return irq_ret;
+}
+
+static int mt8183_afe_runtime_suspend(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ unsigned int value;
+ int ret;
+
+ if (!afe->regmap || afe_priv->pm_runtime_bypass_reg_ctl)
+ goto skip_regmap;
+
+ /* disable AFE */
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0, AFE_ON_MASK_SFT, 0x0);
+
+ ret = regmap_read_poll_timeout(afe->regmap,
+ AFE_DAC_MON,
+ value,
+ (value & AFE_ON_RETM_MASK_SFT) == 0,
+ 20,
+ 1 * 1000 * 1000);
+ if (ret)
+ dev_warn(afe->dev, "%s(), ret %d\n", __func__, ret);
+
+ /* make sure all irq status are cleared, twice intended */
+ regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CLR, 0xffff, 0xffff);
+ regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CLR, 0xffff, 0xffff);
+
+ /* cache only */
+ regcache_cache_only(afe->regmap, true);
+ regcache_mark_dirty(afe->regmap);
+
+skip_regmap:
+ return mt8183_afe_disable_clock(afe);
+}
+
+static int mt8183_afe_runtime_resume(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ ret = mt8183_afe_enable_clock(afe);
+ if (ret)
+ return ret;
+
+ if (!afe->regmap || afe_priv->pm_runtime_bypass_reg_ctl)
+ goto skip_regmap;
+
+ regcache_cache_only(afe->regmap, false);
+ regcache_sync(afe->regmap);
+
+ /* enable audio sys DCM for power saving */
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, 0x1 << 29, 0x1 << 29);
+
+ /* force cpu use 8_24 format when writing 32bit data */
+ regmap_update_bits(afe->regmap, AFE_MEMIF_MSB,
+ CPU_HD_ALIGN_MASK_SFT, 0 << CPU_HD_ALIGN_SFT);
+
+ /* set all output port to 24bit */
+ regmap_write(afe->regmap, AFE_CONN_24BIT, 0xffffffff);
+ regmap_write(afe->regmap, AFE_CONN_24BIT_1, 0xffffffff);
+
+ /* enable AFE */
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x1);
+
+skip_regmap:
+ return 0;
+}
+
+static int mt8183_afe_component_probe(struct snd_soc_component *component)
+{
+ return mtk_afe_add_sub_dai_control(component);
+}
+
+static const struct snd_soc_component_driver mt8183_afe_component = {
+ .name = AFE_PCM_NAME,
+ .ops = &mtk_afe_pcm_ops,
+ .pcm_new = mtk_afe_pcm_new,
+ .pcm_free = mtk_afe_pcm_free,
+ .probe = mt8183_afe_component_probe,
+};
+
+static int mt8183_dai_memif_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mt8183_memif_dai_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mt8183_memif_dai_driver);
+
+ dai->dapm_widgets = mt8183_memif_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mt8183_memif_widgets);
+ dai->dapm_routes = mt8183_memif_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mt8183_memif_routes);
+ return 0;
+}
+
+typedef int (*dai_register_cb)(struct mtk_base_afe *);
+static const dai_register_cb dai_register_cbs[] = {
+ mt8183_dai_adda_register,
+ mt8183_dai_i2s_register,
+ mt8183_dai_pcm_register,
+ mt8183_dai_tdm_register,
+ mt8183_dai_hostless_register,
+ mt8183_dai_memif_register,
+};
+
+static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+ struct mtk_base_afe *afe;
+ struct mt8183_afe_private *afe_priv;
+ struct device *dev;
+ int i, irq_id, ret;
+
+ afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, afe);
+
+ afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+ GFP_KERNEL);
+ if (!afe->platform_priv)
+ return -ENOMEM;
+
+ afe_priv = afe->platform_priv;
+ afe->dev = &pdev->dev;
+ dev = afe->dev;
+
+ /* initial audio related clock */
+ ret = mt8183_init_clock(afe);
+ if (ret) {
+ dev_err(dev, "init clock error\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ /* regmap init */
+ afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(afe->regmap)) {
+ dev_err(dev, "could not get regmap from parent\n");
+ return PTR_ERR(afe->regmap);
+ }
+ ret = regmap_attach_dev(dev, afe->regmap, &mt8183_afe_regmap_config);
+ if (ret) {
+ dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
+ return ret;
+ }
+
+ /* enable clock for regcache get default value from hw */
+ afe_priv->pm_runtime_bypass_reg_ctl = true;
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = regmap_reinit_cache(afe->regmap, &mt8183_afe_regmap_config);
+ if (ret) {
+ dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ afe_priv->pm_runtime_bypass_reg_ctl = false;
+
+ regcache_cache_only(afe->regmap, true);
+ regcache_mark_dirty(afe->regmap);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* init memif */
+ afe->memif_size = MT8183_MEMIF_NUM;
+ afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
+ GFP_KERNEL);
+ if (!afe->memif)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->memif_size; i++) {
+ afe->memif[i].data = &memif_data[i];
+ afe->memif[i].irq_usage = -1;
+ }
+
+ afe->memif[MT8183_MEMIF_HDMI].irq_usage = MT8183_IRQ_8;
+ afe->memif[MT8183_MEMIF_HDMI].const_irq = 1;
+
+ mutex_init(&afe->irq_alloc_lock);
+
+ /* init memif */
+ /* irq initialize */
+ afe->irqs_size = MT8183_IRQ_NUM;
+ afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
+ GFP_KERNEL);
+ if (!afe->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->irqs_size; i++)
+ afe->irqs[i].irq_data = &irq_data[i];
+
+ /* request irq */
+ irq_id = platform_get_irq(pdev, 0);
+ if (!irq_id) {
+ dev_err(dev, "%pOFn no irq found\n", dev->of_node);
+ return -ENXIO;
+ }
+ ret = devm_request_irq(dev, irq_id, mt8183_afe_irq_handler,
+ IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
+ if (ret) {
+ dev_err(dev, "could not request_irq for asys-isr\n");
+ return ret;
+ }
+
+ /* init sub_dais */
+ INIT_LIST_HEAD(&afe->sub_dais);
+
+ for (i = 0; i < ARRAY_SIZE(dai_register_cbs); i++) {
+ ret = dai_register_cbs[i](afe);
+ if (ret) {
+ dev_warn(afe->dev, "dai register i %d fail, ret %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ /* init dai_driver and component_driver */
+ ret = mtk_afe_combine_sub_dai(afe);
+ if (ret) {
+ dev_warn(afe->dev, "mtk_afe_combine_sub_dai fail, ret %d\n",
+ ret);
+ return ret;
+ }
+
+ afe->mtk_afe_hardware = &mt8183_afe_hardware;
+ afe->memif_fs = mt8183_memif_fs;
+ afe->irq_fs = mt8183_irq_fs;
+
+ afe->runtime_resume = mt8183_afe_runtime_resume;
+ afe->runtime_suspend = mt8183_afe_runtime_suspend;
+
+ /* register component */
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &mt8183_afe_component,
+ NULL, 0);
+ if (ret) {
+ dev_warn(dev, "err_platform\n");
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(afe->dev,
+ &mt8183_afe_pcm_dai_component,
+ afe->dai_drivers,
+ afe->num_dai_drivers);
+ if (ret) {
+ dev_warn(dev, "err_dai_component\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mt8183_afe_pcm_dev_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mt8183_afe_runtime_suspend(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id mt8183_afe_pcm_dt_match[] = {
+ { .compatible = "mediatek,mt8183-audio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt8183_afe_pcm_dt_match);
+
+static const struct dev_pm_ops mt8183_afe_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt8183_afe_runtime_suspend,
+ mt8183_afe_runtime_resume, NULL)
+};
+
+static struct platform_driver mt8183_afe_pcm_driver = {
+ .driver = {
+ .name = "mt8183-audio",
+ .of_match_table = mt8183_afe_pcm_dt_match,
+#ifdef CONFIG_PM
+ .pm = &mt8183_afe_pm_ops,
+#endif
+ },
+ .probe = mt8183_afe_pcm_dev_probe,
+ .remove = mt8183_afe_pcm_dev_remove,
+};
+
+module_platform_driver(mt8183_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 8183");
+MODULE_AUTHOR("KaiChieh Chuang <kaichieh.chuang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-adda.c b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
new file mode 100644
index 000000000000..017d7d1d9148
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-adda.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI ADDA Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+enum {
+ AUDIO_SDM_LEVEL_MUTE = 0,
+ AUDIO_SDM_LEVEL_NORMAL = 0x1d,
+ /* if you change level normal */
+ /* you need to change formula of hp impedance and dc trim too */
+};
+
+enum {
+ DELAY_DATA_MISO1 = 0,
+ DELAY_DATA_MISO2,
+};
+
+enum {
+ MTK_AFE_ADDA_DL_RATE_8K = 0,
+ MTK_AFE_ADDA_DL_RATE_11K = 1,
+ MTK_AFE_ADDA_DL_RATE_12K = 2,
+ MTK_AFE_ADDA_DL_RATE_16K = 3,
+ MTK_AFE_ADDA_DL_RATE_22K = 4,
+ MTK_AFE_ADDA_DL_RATE_24K = 5,
+ MTK_AFE_ADDA_DL_RATE_32K = 6,
+ MTK_AFE_ADDA_DL_RATE_44K = 7,
+ MTK_AFE_ADDA_DL_RATE_48K = 8,
+ MTK_AFE_ADDA_DL_RATE_96K = 9,
+ MTK_AFE_ADDA_DL_RATE_192K = 10,
+};
+
+enum {
+ MTK_AFE_ADDA_UL_RATE_8K = 0,
+ MTK_AFE_ADDA_UL_RATE_16K = 1,
+ MTK_AFE_ADDA_UL_RATE_32K = 2,
+ MTK_AFE_ADDA_UL_RATE_48K = 3,
+ MTK_AFE_ADDA_UL_RATE_96K = 4,
+ MTK_AFE_ADDA_UL_RATE_192K = 5,
+ MTK_AFE_ADDA_UL_RATE_48K_HD = 6,
+};
+
+static unsigned int adda_dl_rate_transform(struct mtk_base_afe *afe,
+ unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_ADDA_DL_RATE_8K;
+ case 11025:
+ return MTK_AFE_ADDA_DL_RATE_11K;
+ case 12000:
+ return MTK_AFE_ADDA_DL_RATE_12K;
+ case 16000:
+ return MTK_AFE_ADDA_DL_RATE_16K;
+ case 22050:
+ return MTK_AFE_ADDA_DL_RATE_22K;
+ case 24000:
+ return MTK_AFE_ADDA_DL_RATE_24K;
+ case 32000:
+ return MTK_AFE_ADDA_DL_RATE_32K;
+ case 44100:
+ return MTK_AFE_ADDA_DL_RATE_44K;
+ case 48000:
+ return MTK_AFE_ADDA_DL_RATE_48K;
+ case 96000:
+ return MTK_AFE_ADDA_DL_RATE_96K;
+ case 192000:
+ return MTK_AFE_ADDA_DL_RATE_192K;
+ default:
+ dev_warn(afe->dev, "%s(), rate %d invalid, use 48kHz!!!\n",
+ __func__, rate);
+ return MTK_AFE_ADDA_DL_RATE_48K;
+ }
+}
+
+static unsigned int adda_ul_rate_transform(struct mtk_base_afe *afe,
+ unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_ADDA_UL_RATE_8K;
+ case 16000:
+ return MTK_AFE_ADDA_UL_RATE_16K;
+ case 32000:
+ return MTK_AFE_ADDA_UL_RATE_32K;
+ case 48000:
+ return MTK_AFE_ADDA_UL_RATE_48K;
+ case 96000:
+ return MTK_AFE_ADDA_UL_RATE_96K;
+ case 192000:
+ return MTK_AFE_ADDA_UL_RATE_192K;
+ default:
+ dev_warn(afe->dev, "%s(), rate %d invalid, use 48kHz!!!\n",
+ __func__, rate);
+ return MTK_AFE_ADDA_UL_RATE_48K;
+ }
+}
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_adda_dl_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN3, I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN3, I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN3, I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN3,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN3,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN3,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN3,
+ I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_adda_dl_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN4, I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN4, I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN4, I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN4, I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN4, I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN4, I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN4,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN4,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN4,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN4,
+ I_PCM_2_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN4,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN4,
+ I_PCM_2_CAP_CH2, 1, 0),
+};
+
+static int mtk_adda_ul_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+ dev_dbg(afe->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* update setting to dmic */
+ if (afe_priv->mtkaif_dmic) {
+ /* mtkaif_rxif_data_mode = 1, dmic */
+ regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_RX_CFG0,
+ 0x1, 0x1);
+
+ /* dmic mode, 3.25M*/
+ regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_RX_CFG0,
+ 0x0, 0xf << 20);
+ regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+ 0x0, 0x1 << 5);
+ regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+ 0x0, 0x3 << 14);
+
+ /* turn on dmic, ch1, ch2 */
+ regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+ 0x1 << 1, 0x1 << 1);
+ regmap_update_bits(afe->regmap, AFE_ADDA_UL_SRC_CON0,
+ 0x3 << 21, 0x3 << 21);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* should delayed 1/fs(smallest is 8k) = 125us before afe off */
+ usleep_range(125, 135);
+
+ /* reset dmic */
+ afe_priv->mtkaif_dmic = 0;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* mtkaif dmic */
+static const char * const mt8183_adda_off_on_str[] = {
+ "Off", "On"
+};
+
+static const struct soc_enum mt8183_adda_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8183_adda_off_on_str),
+ mt8183_adda_off_on_str),
+};
+
+static int mt8183_adda_dmic_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+
+ ucontrol->value.integer.value[0] = afe_priv->mtkaif_dmic;
+
+ return 0;
+}
+
+static int mt8183_adda_dmic_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ afe_priv->mtkaif_dmic = ucontrol->value.integer.value[0];
+
+ dev_info(afe->dev, "%s(), kcontrol name %s, mtkaif_dmic %d\n",
+ __func__, kcontrol->id.name, afe_priv->mtkaif_dmic);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new mtk_adda_controls[] = {
+ SOC_ENUM_EXT("MTKAIF_DMIC", mt8183_adda_enum[0],
+ mt8183_adda_dmic_get, mt8183_adda_dmic_set),
+};
+
+enum {
+ SUPPLY_SEQ_ADDA_AFE_ON,
+ SUPPLY_SEQ_ADDA_DL_ON,
+ SUPPLY_SEQ_ADDA_UL_ON,
+};
+
+static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = {
+ /* adda */
+ SND_SOC_DAPM_MIXER("ADDA_DL_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_adda_dl_ch1_mix,
+ ARRAY_SIZE(mtk_adda_dl_ch1_mix)),
+ SND_SOC_DAPM_MIXER("ADDA_DL_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_adda_dl_ch2_mix,
+ ARRAY_SIZE(mtk_adda_dl_ch2_mix)),
+
+ SND_SOC_DAPM_SUPPLY_S("ADDA Enable", SUPPLY_SEQ_ADDA_AFE_ON,
+ AFE_ADDA_UL_DL_CON0, ADDA_AFE_ON_SFT, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY_S("ADDA Playback Enable", SUPPLY_SEQ_ADDA_DL_ON,
+ AFE_ADDA_DL_SRC2_CON0,
+ DL_2_SRC_ON_TMP_CTL_PRE_SFT, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY_S("ADDA Capture Enable", SUPPLY_SEQ_ADDA_UL_ON,
+ AFE_ADDA_UL_SRC_CON0,
+ UL_SRC_ON_TMP_CTL_SFT, 0,
+ mtk_adda_ul_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_clk"),
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_predis_clk"),
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_adc_clk"),
+ SND_SOC_DAPM_CLOCK_SUPPLY("mtkaif_26m_clk"),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_adda_routes[] = {
+ /* playback */
+ {"ADDA_DL_CH1", "DL1_CH1", "DL1"},
+ {"ADDA_DL_CH2", "DL1_CH1", "DL1"},
+ {"ADDA_DL_CH2", "DL1_CH2", "DL1"},
+
+ {"ADDA_DL_CH1", "DL2_CH1", "DL2"},
+ {"ADDA_DL_CH2", "DL2_CH1", "DL2"},
+ {"ADDA_DL_CH2", "DL2_CH2", "DL2"},
+
+ {"ADDA_DL_CH1", "DL3_CH1", "DL3"},
+ {"ADDA_DL_CH2", "DL3_CH1", "DL3"},
+ {"ADDA_DL_CH2", "DL3_CH2", "DL3"},
+
+ {"ADDA Playback", NULL, "ADDA_DL_CH1"},
+ {"ADDA Playback", NULL, "ADDA_DL_CH2"},
+
+ /* adda enable */
+ {"ADDA Playback", NULL, "ADDA Enable"},
+ {"ADDA Playback", NULL, "ADDA Playback Enable"},
+ {"ADDA Capture", NULL, "ADDA Enable"},
+ {"ADDA Capture", NULL, "ADDA Capture Enable"},
+
+ /* clk */
+ {"ADDA Playback", NULL, "mtkaif_26m_clk"},
+ {"ADDA Playback", NULL, "aud_dac_clk"},
+ {"ADDA Playback", NULL, "aud_dac_predis_clk"},
+
+ {"ADDA Capture", NULL, "mtkaif_26m_clk"},
+ {"ADDA Capture", NULL, "aud_adc_clk"},
+};
+
+static int set_mtkaif_rx(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int delay_data;
+ int delay_cycle;
+
+ switch (afe_priv->mtkaif_protocol) {
+ case MT8183_MTKAIF_PROTOCOL_2_CLK_P2:
+ regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x38);
+ regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x39);
+ /* mtkaif_rxif_clkinv_adc inverse for calibration */
+ regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0,
+ 0x80010000);
+
+ if (afe_priv->mtkaif_phase_cycle[0] >=
+ afe_priv->mtkaif_phase_cycle[1]) {
+ delay_data = DELAY_DATA_MISO1;
+ delay_cycle = afe_priv->mtkaif_phase_cycle[0] -
+ afe_priv->mtkaif_phase_cycle[1];
+ } else {
+ delay_data = DELAY_DATA_MISO2;
+ delay_cycle = afe_priv->mtkaif_phase_cycle[1] -
+ afe_priv->mtkaif_phase_cycle[0];
+ }
+
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_MTKAIF_RX_CFG2,
+ MTKAIF_RXIF_DELAY_DATA_MASK_SFT,
+ delay_data << MTKAIF_RXIF_DELAY_DATA_SFT);
+
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_MTKAIF_RX_CFG2,
+ MTKAIF_RXIF_DELAY_CYCLE_MASK_SFT,
+ delay_cycle << MTKAIF_RXIF_DELAY_CYCLE_SFT);
+ break;
+ case MT8183_MTKAIF_PROTOCOL_2:
+ regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
+ regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0,
+ 0x00010000);
+ break;
+ case MT8183_MTKAIF_PROTOCOL_1:
+ regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
+ regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x0);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* dai ops */
+static int mtk_dai_adda_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ unsigned int rate = params_rate(params);
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d\n",
+ __func__, dai->id, substream->stream, rate);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ unsigned int dl_src2_con0 = 0;
+ unsigned int dl_src2_con1 = 0;
+
+ /* clean predistortion */
+ regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON0, 0);
+ regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON1, 0);
+
+ /* set sampling rate */
+ dl_src2_con0 = adda_dl_rate_transform(afe, rate) << 28;
+
+ /* set output mode */
+ switch (rate) {
+ case 192000:
+ dl_src2_con0 |= (0x1 << 24); /* UP_SAMPLING_RATE_X2 */
+ dl_src2_con0 |= 1 << 14;
+ break;
+ case 96000:
+ dl_src2_con0 |= (0x2 << 24); /* UP_SAMPLING_RATE_X4 */
+ dl_src2_con0 |= 1 << 14;
+ break;
+ default:
+ dl_src2_con0 |= (0x3 << 24); /* UP_SAMPLING_RATE_X8 */
+ break;
+ }
+
+ /* turn off mute function */
+ dl_src2_con0 |= (0x03 << 11);
+
+ /* set voice input data if input sample rate is 8k or 16k */
+ if (rate == 8000 || rate == 16000)
+ dl_src2_con0 |= 0x01 << 5;
+
+ /* SA suggest apply -0.3db to audio/speech path */
+ dl_src2_con1 = 0xf74f0000;
+
+ /* turn on down-link gain */
+ dl_src2_con0 = dl_src2_con0 | (0x01 << 1);
+
+ regmap_write(afe->regmap, AFE_ADDA_DL_SRC2_CON0, dl_src2_con0);
+ regmap_write(afe->regmap, AFE_ADDA_DL_SRC2_CON1, dl_src2_con1);
+
+ /* set sdm gain */
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_DL_SDM_DCCOMP_CON,
+ ATTGAIN_CTL_MASK_SFT,
+ AUDIO_SDM_LEVEL_NORMAL << ATTGAIN_CTL_SFT);
+ } else {
+ unsigned int voice_mode = 0;
+ unsigned int ul_src_con0 = 0; /* default value */
+
+ /* set mtkaif protocol */
+ set_mtkaif_rx(afe);
+
+ /* Using Internal ADC */
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_TOP_CON0,
+ 0x1 << 0,
+ 0x0 << 0);
+
+ voice_mode = adda_ul_rate_transform(afe, rate);
+
+ ul_src_con0 |= (voice_mode << 17) & (0x7 << 17);
+
+ regmap_write(afe->regmap, AFE_ADDA_UL_SRC_CON0, ul_src_con0);
+
+ /* mtkaif_rxif_data_mode = 0, amic */
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_MTKAIF_RX_CFG0,
+ 0x1 << 0,
+ 0x0 << 0);
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_adda_ops = {
+ .hw_params = mtk_dai_adda_hw_params,
+};
+
+/* dai driver */
+#define MTK_ADDA_PLAYBACK_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_ADDA_CAPTURE_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_48000)
+
+#define MTK_ADDA_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_adda_driver[] = {
+ {
+ .name = "ADDA",
+ .id = MT8183_DAI_ADDA,
+ .playback = {
+ .stream_name = "ADDA Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_ADDA_PLAYBACK_RATES,
+ .formats = MTK_ADDA_FORMATS,
+ },
+ .capture = {
+ .stream_name = "ADDA Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_ADDA_CAPTURE_RATES,
+ .formats = MTK_ADDA_FORMATS,
+ },
+ .ops = &mtk_dai_adda_ops,
+ },
+};
+
+int mt8183_dai_adda_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_adda_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_adda_driver);
+
+ dai->controls = mtk_adda_controls;
+ dai->num_controls = ARRAY_SIZE(mtk_adda_controls);
+ dai->dapm_widgets = mtk_dai_adda_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_adda_widgets);
+ dai->dapm_routes = mtk_dai_adda_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_adda_routes);
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-hostless.c b/sound/soc/mediatek/mt8183/mt8183-dai-hostless.c
new file mode 100644
index 000000000000..1667ad352d34
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-hostless.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI Hostless Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include "mt8183-afe-common.h"
+
+/* dai component */
+static const struct snd_soc_dapm_route mtk_dai_hostless_routes[] = {
+ /* Hostless ADDA Loopback */
+ {"ADDA_DL_CH1", "ADDA_UL_CH1", "Hostless LPBK DL"},
+ {"ADDA_DL_CH1", "ADDA_UL_CH2", "Hostless LPBK DL"},
+ {"ADDA_DL_CH2", "ADDA_UL_CH1", "Hostless LPBK DL"},
+ {"ADDA_DL_CH2", "ADDA_UL_CH2", "Hostless LPBK DL"},
+ {"Hostless LPBK UL", NULL, "ADDA Capture"},
+
+ /* Hostless Speech */
+ {"ADDA_DL_CH1", "PCM_1_CAP_CH1", "Hostless Speech DL"},
+ {"ADDA_DL_CH2", "PCM_1_CAP_CH1", "Hostless Speech DL"},
+ {"ADDA_DL_CH2", "PCM_1_CAP_CH2", "Hostless Speech DL"},
+ {"ADDA_DL_CH1", "PCM_2_CAP_CH1", "Hostless Speech DL"},
+ {"ADDA_DL_CH2", "PCM_2_CAP_CH1", "Hostless Speech DL"},
+ {"ADDA_DL_CH2", "PCM_2_CAP_CH2", "Hostless Speech DL"},
+ {"PCM_1_PB_CH1", "ADDA_UL_CH1", "Hostless Speech DL"},
+ {"PCM_1_PB_CH2", "ADDA_UL_CH2", "Hostless Speech DL"},
+ {"PCM_2_PB_CH1", "ADDA_UL_CH1", "Hostless Speech DL"},
+ {"PCM_2_PB_CH2", "ADDA_UL_CH2", "Hostless Speech DL"},
+
+ {"Hostless Speech UL", NULL, "PCM 1 Capture"},
+ {"Hostless Speech UL", NULL, "PCM 2 Capture"},
+ {"Hostless Speech UL", NULL, "ADDA Capture"},
+};
+
+/* dai ops */
+static int mtk_dai_hostless_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+ return snd_soc_set_runtime_hwparams(substream, afe->mtk_afe_hardware);
+}
+
+static const struct snd_soc_dai_ops mtk_dai_hostless_ops = {
+ .startup = mtk_dai_hostless_startup,
+};
+
+/* dai driver */
+#define MTK_HOSTLESS_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_HOSTLESS_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_hostless_driver[] = {
+ {
+ .name = "Hostless LPBK DAI",
+ .id = MT8183_DAI_HOSTLESS_LPBK,
+ .playback = {
+ .stream_name = "Hostless LPBK DL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Hostless LPBK UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless Speech DAI",
+ .id = MT8183_DAI_HOSTLESS_SPEECH,
+ .playback = {
+ .stream_name = "Hostless Speech DL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Hostless Speech UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+};
+
+int mt8183_dai_hostless_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_hostless_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_hostless_driver);
+
+ dai->dapm_routes = mtk_dai_hostless_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_hostless_routes);
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c b/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c
new file mode 100644
index 000000000000..777e93d70bea
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-i2s.c
@@ -0,0 +1,1040 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI I2S Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8183-afe-clk.h"
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+enum {
+ I2S_FMT_EIAJ = 0,
+ I2S_FMT_I2S = 1,
+};
+
+enum {
+ I2S_WLEN_16_BIT = 0,
+ I2S_WLEN_32_BIT = 1,
+};
+
+enum {
+ I2S_HD_NORMAL = 0,
+ I2S_HD_LOW_JITTER = 1,
+};
+
+enum {
+ I2S1_SEL_O28_O29 = 0,
+ I2S1_SEL_O03_O04 = 1,
+};
+
+enum {
+ I2S_IN_PAD_CONNSYS = 0,
+ I2S_IN_PAD_IO_MUX = 1,
+};
+
+struct mtk_afe_i2s_priv {
+ int id;
+ int rate; /* for determine which apll to use */
+ int low_jitter_en;
+
+ const char *share_property_name;
+ int share_i2s_id;
+
+ int mclk_id;
+ int mclk_rate;
+ int mclk_apll;
+};
+
+static unsigned int get_i2s_wlen(snd_pcm_format_t format)
+{
+ return snd_pcm_format_physical_width(format) <= 16 ?
+ I2S_WLEN_16_BIT : I2S_WLEN_32_BIT;
+}
+
+#define MTK_AFE_I2S0_KCONTROL_NAME "I2S0_HD_Mux"
+#define MTK_AFE_I2S1_KCONTROL_NAME "I2S1_HD_Mux"
+#define MTK_AFE_I2S2_KCONTROL_NAME "I2S2_HD_Mux"
+#define MTK_AFE_I2S3_KCONTROL_NAME "I2S3_HD_Mux"
+#define MTK_AFE_I2S5_KCONTROL_NAME "I2S5_HD_Mux"
+
+#define I2S0_HD_EN_W_NAME "I2S0_HD_EN"
+#define I2S1_HD_EN_W_NAME "I2S1_HD_EN"
+#define I2S2_HD_EN_W_NAME "I2S2_HD_EN"
+#define I2S3_HD_EN_W_NAME "I2S3_HD_EN"
+#define I2S5_HD_EN_W_NAME "I2S5_HD_EN"
+
+#define I2S0_MCLK_EN_W_NAME "I2S0_MCLK_EN"
+#define I2S1_MCLK_EN_W_NAME "I2S1_MCLK_EN"
+#define I2S2_MCLK_EN_W_NAME "I2S2_MCLK_EN"
+#define I2S3_MCLK_EN_W_NAME "I2S3_MCLK_EN"
+#define I2S5_MCLK_EN_W_NAME "I2S5_MCLK_EN"
+
+static int get_i2s_id_by_name(struct mtk_base_afe *afe,
+ const char *name)
+{
+ if (strncmp(name, "I2S0", 4) == 0)
+ return MT8183_DAI_I2S_0;
+ else if (strncmp(name, "I2S1", 4) == 0)
+ return MT8183_DAI_I2S_1;
+ else if (strncmp(name, "I2S2", 4) == 0)
+ return MT8183_DAI_I2S_2;
+ else if (strncmp(name, "I2S3", 4) == 0)
+ return MT8183_DAI_I2S_3;
+ else if (strncmp(name, "I2S5", 4) == 0)
+ return MT8183_DAI_I2S_5;
+ else
+ return -EINVAL;
+}
+
+static struct mtk_afe_i2s_priv *get_i2s_priv_by_name(struct mtk_base_afe *afe,
+ const char *name)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_i2s_id_by_name(afe, name);
+
+ if (dai_id < 0)
+ return NULL;
+
+ return afe_priv->dai_priv[dai_id];
+}
+
+/* low jitter control */
+static const char * const mt8183_i2s_hd_str[] = {
+ "Normal", "Low_Jitter"
+};
+
+static const struct soc_enum mt8183_i2s_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8183_i2s_hd_str),
+ mt8183_i2s_hd_str),
+};
+
+static int mt8183_i2s_hd_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, kcontrol->id.name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return -EINVAL;
+ }
+
+ ucontrol->value.integer.value[0] = i2s_priv->low_jitter_en;
+
+ return 0;
+}
+
+static int mt8183_i2s_hd_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int hd_en;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ hd_en = ucontrol->value.integer.value[0];
+
+ dev_info(afe->dev, "%s(), kcontrol name %s, hd_en %d\n",
+ __func__, kcontrol->id.name, hd_en);
+
+ i2s_priv = get_i2s_priv_by_name(afe, kcontrol->id.name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return -EINVAL;
+ }
+
+ i2s_priv->low_jitter_en = hd_en;
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new mtk_dai_i2s_controls[] = {
+ SOC_ENUM_EXT(MTK_AFE_I2S0_KCONTROL_NAME, mt8183_i2s_enum[0],
+ mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+ SOC_ENUM_EXT(MTK_AFE_I2S1_KCONTROL_NAME, mt8183_i2s_enum[0],
+ mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+ SOC_ENUM_EXT(MTK_AFE_I2S2_KCONTROL_NAME, mt8183_i2s_enum[0],
+ mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+ SOC_ENUM_EXT(MTK_AFE_I2S3_KCONTROL_NAME, mt8183_i2s_enum[0],
+ mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+ SOC_ENUM_EXT(MTK_AFE_I2S5_KCONTROL_NAME, mt8183_i2s_enum[0],
+ mt8183_i2s_hd_get, mt8183_i2s_hd_set),
+};
+
+/* dai component */
+/* interconnection */
+static const struct snd_kcontrol_new mtk_i2s3_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN0, I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN0, I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN0, I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN0,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN0,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN0,
+ I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s3_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN1, I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN1, I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN1, I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN1,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN1,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN1,
+ I_PCM_2_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN1,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN1,
+ I_PCM_2_CAP_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s1_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN28, I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN28, I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN28, I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN28,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN28,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN28,
+ I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s1_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN29, I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN29, I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN29, I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN29,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN29,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN29,
+ I_PCM_2_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN29,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN29,
+ I_PCM_2_CAP_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s5_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN30, I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN30, I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1", AFE_CONN30, I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN30,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN30,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN30,
+ I_PCM_2_CAP_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s5_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2", AFE_CONN31, I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN31, I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2", AFE_CONN31, I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN31,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1", AFE_CONN31,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1", AFE_CONN31,
+ I_PCM_2_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2", AFE_CONN31,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2", AFE_CONN31,
+ I_PCM_2_CAP_CH2, 1, 0),
+};
+
+enum {
+ SUPPLY_SEQ_APLL,
+ SUPPLY_SEQ_I2S_MCLK_EN,
+ SUPPLY_SEQ_I2S_HD_EN,
+ SUPPLY_SEQ_I2S_EN,
+};
+
+static int mtk_apll_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (strcmp(w->name, APLL1_W_NAME) == 0)
+ mt8183_apll1_enable(afe);
+ else
+ mt8183_apll2_enable(afe);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (strcmp(w->name, APLL1_W_NAME) == 0)
+ mt8183_apll1_disable(afe);
+ else
+ mt8183_apll2_disable(afe);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_mclk_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8183_mck_enable(afe, i2s_priv->mclk_id, i2s_priv->mclk_rate);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ i2s_priv->mclk_rate = 0;
+ mt8183_mck_disable(afe, i2s_priv->mclk_id);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget mtk_dai_i2s_widgets[] = {
+ SND_SOC_DAPM_MIXER("I2S1_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_i2s1_ch1_mix,
+ ARRAY_SIZE(mtk_i2s1_ch1_mix)),
+ SND_SOC_DAPM_MIXER("I2S1_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_i2s1_ch2_mix,
+ ARRAY_SIZE(mtk_i2s1_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("I2S3_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_i2s3_ch1_mix,
+ ARRAY_SIZE(mtk_i2s3_ch1_mix)),
+ SND_SOC_DAPM_MIXER("I2S3_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_i2s3_ch2_mix,
+ ARRAY_SIZE(mtk_i2s3_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("I2S5_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_i2s5_ch1_mix,
+ ARRAY_SIZE(mtk_i2s5_ch1_mix)),
+ SND_SOC_DAPM_MIXER("I2S5_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_i2s5_ch2_mix,
+ ARRAY_SIZE(mtk_i2s5_ch2_mix)),
+
+ /* i2s en*/
+ SND_SOC_DAPM_SUPPLY_S("I2S0_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON, I2S_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S1_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON1, I2S_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S2_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON2, I2S_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S3_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON3, I2S_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("I2S5_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON4, I2S5_EN_SFT, 0,
+ NULL, 0),
+ /* i2s hd en */
+ SND_SOC_DAPM_SUPPLY_S(I2S0_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON, I2S1_HD_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S(I2S1_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON1, I2S2_HD_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S(I2S2_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON2, I2S3_HD_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S(I2S3_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON3, I2S4_HD_EN_SFT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S(I2S5_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON4, I2S5_HD_EN_SFT, 0,
+ NULL, 0),
+
+ /* i2s mclk en */
+ SND_SOC_DAPM_SUPPLY_S(I2S0_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S1_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S2_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S3_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S5_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* apll */
+ SND_SOC_DAPM_SUPPLY_S(APLL1_W_NAME, SUPPLY_SEQ_APLL,
+ SND_SOC_NOPM, 0, 0,
+ mtk_apll_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(APLL2_W_NAME, SUPPLY_SEQ_APLL,
+ SND_SOC_NOPM, 0, 0,
+ mtk_apll_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static int mtk_afe_i2s_share_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return 0;
+ }
+
+ if (i2s_priv->share_i2s_id < 0)
+ return 0;
+
+ return i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name);
+}
+
+static int mtk_afe_i2s_hd_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return 0;
+ }
+
+ if (get_i2s_id_by_name(afe, sink->name) ==
+ get_i2s_id_by_name(afe, source->name))
+ return i2s_priv->low_jitter_en;
+
+ /* check if share i2s need hd en */
+ if (i2s_priv->share_i2s_id < 0)
+ return 0;
+
+ if (i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name))
+ return i2s_priv->low_jitter_en;
+
+ return 0;
+}
+
+static int mtk_afe_i2s_apll_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+ int cur_apll;
+ int i2s_need_apll;
+
+ i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return 0;
+ }
+
+ /* which apll */
+ cur_apll = mt8183_get_apll_by_name(afe, source->name);
+
+ /* choose APLL from i2s rate */
+ i2s_need_apll = mt8183_get_apll_by_rate(afe, i2s_priv->rate);
+
+ return (i2s_need_apll == cur_apll) ? 1 : 0;
+}
+
+static int mtk_afe_i2s_mclk_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return 0;
+ }
+
+ if (get_i2s_id_by_name(afe, sink->name) ==
+ get_i2s_id_by_name(afe, source->name))
+ return (i2s_priv->mclk_rate > 0) ? 1 : 0;
+
+ /* check if share i2s need mclk */
+ if (i2s_priv->share_i2s_id < 0)
+ return 0;
+
+ if (i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name))
+ return (i2s_priv->mclk_rate > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int mtk_afe_mclk_apll_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+ int cur_apll;
+
+ i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return 0;
+ }
+
+ /* which apll */
+ cur_apll = mt8183_get_apll_by_name(afe, source->name);
+
+ return (i2s_priv->mclk_apll == cur_apll) ? 1 : 0;
+}
+
+static const struct snd_soc_dapm_route mtk_dai_i2s_routes[] = {
+ /* i2s0 */
+ {"I2S0", NULL, "I2S0_EN"},
+ {"I2S0", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+ {"I2S0", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+ {"I2S0", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+ {"I2S0", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+ {"I2S0", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S0", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S0", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S0", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S0", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S0_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S0_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S0", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S0", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S0", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S0", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S0", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S0_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S0_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* i2s1 */
+ {"I2S1_CH1", "DL1_CH1", "DL1"},
+ {"I2S1_CH2", "DL1_CH2", "DL1"},
+
+ {"I2S1_CH1", "DL2_CH1", "DL2"},
+ {"I2S1_CH2", "DL2_CH2", "DL2"},
+
+ {"I2S1_CH1", "DL3_CH1", "DL3"},
+ {"I2S1_CH2", "DL3_CH2", "DL3"},
+
+ {"I2S1", NULL, "I2S1_CH1"},
+ {"I2S1", NULL, "I2S1_CH2"},
+
+ {"I2S1", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+ {"I2S1", NULL, "I2S1_EN"},
+ {"I2S1", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+ {"I2S1", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+ {"I2S1", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+ {"I2S1", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S1", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S1", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S1", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S1", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S1_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S1_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S1", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S1", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S1", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S1", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S1", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S1_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S1_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* i2s2 */
+ {"I2S2", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+ {"I2S2", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+ {"I2S2", NULL, "I2S2_EN"},
+ {"I2S2", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+ {"I2S2", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+ {"I2S2", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S2", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S2", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S2", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S2", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S2_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S2_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S2", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S2", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S2", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S2", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S2", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S2_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S2_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* i2s3 */
+ {"I2S3_CH1", "DL1_CH1", "DL1"},
+ {"I2S3_CH2", "DL1_CH2", "DL1"},
+
+ {"I2S3_CH1", "DL2_CH1", "DL2"},
+ {"I2S3_CH2", "DL2_CH2", "DL2"},
+
+ {"I2S3_CH1", "DL3_CH1", "DL3"},
+ {"I2S3_CH2", "DL3_CH2", "DL3"},
+
+ {"I2S3", NULL, "I2S3_CH1"},
+ {"I2S3", NULL, "I2S3_CH2"},
+
+ {"I2S3", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+ {"I2S3", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+ {"I2S3", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+ {"I2S3", NULL, "I2S3_EN"},
+ {"I2S3", NULL, "I2S5_EN", mtk_afe_i2s_share_connect},
+
+ {"I2S3", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S3", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S3", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S3", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S3", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S3_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S3_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S3", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S3", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S3", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S3", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S3", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S3_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S3_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* i2s5 */
+ {"I2S5_CH1", "DL1_CH1", "DL1"},
+ {"I2S5_CH2", "DL1_CH2", "DL1"},
+
+ {"I2S5_CH1", "DL2_CH1", "DL2"},
+ {"I2S5_CH2", "DL2_CH2", "DL2"},
+
+ {"I2S5_CH1", "DL3_CH1", "DL3"},
+ {"I2S5_CH2", "DL3_CH2", "DL3"},
+
+ {"I2S5", NULL, "I2S5_CH1"},
+ {"I2S5", NULL, "I2S5_CH2"},
+
+ {"I2S5", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+ {"I2S5", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+ {"I2S5", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+ {"I2S5", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+ {"I2S5", NULL, "I2S5_EN"},
+
+ {"I2S5", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S5", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S5", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S5", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S5", NULL, I2S5_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S5_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S5_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S5", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S5", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S5", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S5", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S5", NULL, I2S5_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S5_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S5_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+};
+
+/* dai ops */
+static int mtk_dai_i2s_config(struct mtk_base_afe *afe,
+ struct snd_pcm_hw_params *params,
+ int i2s_id)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_i2s_priv *i2s_priv = afe_priv->dai_priv[i2s_id];
+
+ unsigned int rate = params_rate(params);
+ unsigned int rate_reg = mt8183_rate_transform(afe->dev,
+ rate, i2s_id);
+ snd_pcm_format_t format = params_format(params);
+ unsigned int i2s_con = 0;
+ int ret = 0;
+
+ dev_info(afe->dev, "%s(), id %d, rate %d, format %d\n",
+ __func__,
+ i2s_id,
+ rate, format);
+
+ if (i2s_priv)
+ i2s_priv->rate = rate;
+ else
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+
+ switch (i2s_id) {
+ case MT8183_DAI_I2S_0:
+ regmap_update_bits(afe->regmap, AFE_DAC_CON1,
+ I2S_MODE_MASK_SFT, rate_reg << I2S_MODE_SFT);
+ i2s_con = I2S_IN_PAD_IO_MUX << I2SIN_PAD_SEL_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON,
+ 0xffffeffe, i2s_con);
+ break;
+ case MT8183_DAI_I2S_1:
+ i2s_con = I2S1_SEL_O28_O29 << I2S2_SEL_O03_O04_SFT;
+ i2s_con |= rate_reg << I2S2_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S2_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S2_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON1,
+ 0xffffeffe, i2s_con);
+ break;
+ case MT8183_DAI_I2S_2:
+ i2s_con = 8 << I2S3_UPDATE_WORD_SFT;
+ i2s_con |= rate_reg << I2S3_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S3_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S3_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON2,
+ 0xffffeffe, i2s_con);
+ break;
+ case MT8183_DAI_I2S_3:
+ i2s_con = rate_reg << I2S4_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S4_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S4_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON3,
+ 0xffffeffe, i2s_con);
+ break;
+ case MT8183_DAI_I2S_5:
+ i2s_con = rate_reg << I2S5_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S5_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S5_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON4,
+ 0xffffeffe, i2s_con);
+ break;
+ default:
+ dev_warn(afe->dev, "%s(), id %d not support\n",
+ __func__, i2s_id);
+ return -EINVAL;
+ }
+
+ /* set share i2s */
+ if (i2s_priv && i2s_priv->share_i2s_id >= 0)
+ ret = mtk_dai_i2s_config(afe, params, i2s_priv->share_i2s_id);
+
+ return ret;
+}
+
+static int mtk_dai_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+ return mtk_dai_i2s_config(afe, params, dai->id);
+}
+
+static int mtk_dai_i2s_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_i2s_priv *i2s_priv = afe_priv->dai_priv[dai->id];
+ int apll;
+ int apll_rate;
+
+ if (!i2s_priv) {
+ dev_warn(afe->dev, "%s(), i2s_priv == NULL", __func__);
+ return -EINVAL;
+ }
+
+ if (dir != SND_SOC_CLOCK_OUT) {
+ dev_warn(afe->dev, "%s(), dir != SND_SOC_CLOCK_OUT", __func__);
+ return -EINVAL;
+ }
+
+ dev_info(afe->dev, "%s(), freq %d\n", __func__, freq);
+
+ apll = mt8183_get_apll_by_rate(afe, freq);
+ apll_rate = mt8183_get_apll_rate(afe, apll);
+
+ if (freq > apll_rate) {
+ dev_warn(afe->dev, "%s(), freq > apll rate", __func__);
+ return -EINVAL;
+ }
+
+ if (apll_rate % freq != 0) {
+ dev_warn(afe->dev, "%s(), APLL cannot generate freq Hz",
+ __func__);
+ return -EINVAL;
+ }
+
+ i2s_priv->mclk_rate = freq;
+ i2s_priv->mclk_apll = apll;
+
+ if (i2s_priv->share_i2s_id > 0) {
+ struct mtk_afe_i2s_priv *share_i2s_priv;
+
+ share_i2s_priv = afe_priv->dai_priv[i2s_priv->share_i2s_id];
+ if (!share_i2s_priv) {
+ dev_warn(afe->dev, "%s(), share_i2s_priv == NULL",
+ __func__);
+ return -EINVAL;
+ }
+
+ share_i2s_priv->mclk_rate = i2s_priv->mclk_rate;
+ share_i2s_priv->mclk_apll = i2s_priv->mclk_apll;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_i2s_ops = {
+ .hw_params = mtk_dai_i2s_hw_params,
+ .set_sysclk = mtk_dai_i2s_set_sysclk,
+};
+
+/* dai driver */
+#define MTK_I2S_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_i2s_driver[] = {
+ {
+ .name = "I2S0",
+ .id = MT8183_DAI_I2S_0,
+ .capture = {
+ .stream_name = "I2S0",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+ {
+ .name = "I2S1",
+ .id = MT8183_DAI_I2S_1,
+ .playback = {
+ .stream_name = "I2S1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+ {
+ .name = "I2S2",
+ .id = MT8183_DAI_I2S_2,
+ .capture = {
+ .stream_name = "I2S2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+ {
+ .name = "I2S3",
+ .id = MT8183_DAI_I2S_3,
+ .playback = {
+ .stream_name = "I2S3",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+ {
+ .name = "I2S5",
+ .id = MT8183_DAI_I2S_5,
+ .playback = {
+ .stream_name = "I2S5",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+};
+
+/* this enum is merely for mtk_afe_i2s_priv declare */
+enum {
+ DAI_I2S0 = 0,
+ DAI_I2S1,
+ DAI_I2S2,
+ DAI_I2S3,
+ DAI_I2S5,
+ DAI_I2S_NUM,
+};
+
+static const struct mtk_afe_i2s_priv mt8183_i2s_priv[DAI_I2S_NUM] = {
+ [DAI_I2S0] = {
+ .id = MT8183_DAI_I2S_0,
+ .mclk_id = MT8183_I2S0_MCK,
+ .share_property_name = "i2s0-share",
+ .share_i2s_id = -1,
+ },
+ [DAI_I2S1] = {
+ .id = MT8183_DAI_I2S_1,
+ .mclk_id = MT8183_I2S1_MCK,
+ .share_property_name = "i2s1-share",
+ .share_i2s_id = -1,
+ },
+ [DAI_I2S2] = {
+ .id = MT8183_DAI_I2S_2,
+ .mclk_id = MT8183_I2S2_MCK,
+ .share_property_name = "i2s2-share",
+ .share_i2s_id = -1,
+ },
+ [DAI_I2S3] = {
+ .id = MT8183_DAI_I2S_3,
+ .mclk_id = MT8183_I2S3_MCK,
+ .share_property_name = "i2s3-share",
+ .share_i2s_id = -1,
+ },
+ [DAI_I2S5] = {
+ .id = MT8183_DAI_I2S_5,
+ .mclk_id = MT8183_I2S5_MCK,
+ .share_property_name = "i2s5-share",
+ .share_i2s_id = -1,
+ },
+};
+
+static int mt8183_dai_i2s_get_share(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ const struct device_node *of_node = afe->dev->of_node;
+ const char *of_str;
+ const char *property_name;
+ struct mtk_afe_i2s_priv *i2s_priv;
+ int i;
+
+ for (i = 0; i < DAI_I2S_NUM; i++) {
+ i2s_priv = afe_priv->dai_priv[mt8183_i2s_priv[i].id];
+ property_name = mt8183_i2s_priv[i].share_property_name;
+ if (of_property_read_string(of_node, property_name, &of_str))
+ continue;
+ i2s_priv->share_i2s_id = get_i2s_id_by_name(afe, of_str);
+ }
+
+ return 0;
+}
+
+static int mt8183_dai_i2s_set_priv(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_i2s_priv *i2s_priv;
+ int i;
+
+ for (i = 0; i < DAI_I2S_NUM; i++) {
+ i2s_priv = devm_kzalloc(afe->dev,
+ sizeof(struct mtk_afe_i2s_priv),
+ GFP_KERNEL);
+ if (!i2s_priv)
+ return -ENOMEM;
+
+ memcpy(i2s_priv, &mt8183_i2s_priv[i],
+ sizeof(struct mtk_afe_i2s_priv));
+
+ afe_priv->dai_priv[mt8183_i2s_priv[i].id] = i2s_priv;
+ }
+
+ return 0;
+}
+
+int mt8183_dai_i2s_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+ int ret;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_i2s_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_i2s_driver);
+
+ dai->controls = mtk_dai_i2s_controls;
+ dai->num_controls = ARRAY_SIZE(mtk_dai_i2s_controls);
+ dai->dapm_widgets = mtk_dai_i2s_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_i2s_widgets);
+ dai->dapm_routes = mtk_dai_i2s_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_i2s_routes);
+
+ /* set all dai i2s private data */
+ ret = mt8183_dai_i2s_set_priv(afe);
+ if (ret)
+ return ret;
+
+ /* parse share i2s */
+ ret = mt8183_dai_i2s_get_share(afe);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-pcm.c b/sound/soc/mediatek/mt8183/mt8183-dai-pcm.c
new file mode 100644
index 000000000000..bc3ba3228f08
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-pcm.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI I2S Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+enum AUD_TX_LCH_RPT {
+ AUD_TX_LCH_RPT_NO_REPEAT = 0,
+ AUD_TX_LCH_RPT_REPEAT = 1
+};
+
+enum AUD_VBT_16K_MODE {
+ AUD_VBT_16K_MODE_DISABLE = 0,
+ AUD_VBT_16K_MODE_ENABLE = 1
+};
+
+enum AUD_EXT_MODEM {
+ AUD_EXT_MODEM_SELECT_INTERNAL = 0,
+ AUD_EXT_MODEM_SELECT_EXTERNAL = 1
+};
+
+enum AUD_PCM_SYNC_TYPE {
+ /* bck sync length = 1 */
+ AUD_PCM_ONE_BCK_CYCLE_SYNC = 0,
+ /* bck sync length = PCM_INTF_CON1[9:13] */
+ AUD_PCM_EXTENDED_BCK_CYCLE_SYNC = 1
+};
+
+enum AUD_BT_MODE {
+ AUD_BT_MODE_DUAL_MIC_ON_TX = 0,
+ AUD_BT_MODE_SINGLE_MIC_ON_TX = 1
+};
+
+enum AUD_PCM_AFIFO_SRC {
+ /* slave mode & external modem uses different crystal */
+ AUD_PCM_AFIFO_ASRC = 0,
+ /* slave mode & external modem uses the same crystal */
+ AUD_PCM_AFIFO_AFIFO = 1
+};
+
+enum AUD_PCM_CLOCK_SOURCE {
+ AUD_PCM_CLOCK_MASTER_MODE = 0,
+ AUD_PCM_CLOCK_SLAVE_MODE = 1
+};
+
+enum AUD_PCM_WLEN {
+ AUD_PCM_WLEN_PCM_32_BCK_CYCLES = 0,
+ AUD_PCM_WLEN_PCM_64_BCK_CYCLES = 1
+};
+
+enum AUD_PCM_MODE {
+ AUD_PCM_MODE_PCM_MODE_8K = 0,
+ AUD_PCM_MODE_PCM_MODE_16K = 1,
+ AUD_PCM_MODE_PCM_MODE_32K = 2,
+ AUD_PCM_MODE_PCM_MODE_48K = 3,
+};
+
+enum AUD_PCM_FMT {
+ AUD_PCM_FMT_I2S = 0,
+ AUD_PCM_FMT_EIAJ = 1,
+ AUD_PCM_FMT_PCM_MODE_A = 2,
+ AUD_PCM_FMT_PCM_MODE_B = 3
+};
+
+enum AUD_BCLK_OUT_INV {
+ AUD_BCLK_OUT_INV_NO_INVERSE = 0,
+ AUD_BCLK_OUT_INV_INVERSE = 1
+};
+
+enum AUD_PCM_EN {
+ AUD_PCM_EN_DISABLE = 0,
+ AUD_PCM_EN_ENABLE = 1
+};
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN7,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN7,
+ I_DL2_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN8,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN8,
+ I_DL2_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch4_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN27,
+ I_DL1_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_2_playback_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1", AFE_CONN17,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1", AFE_CONN17,
+ I_DL2_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_2_playback_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2", AFE_CONN18,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2", AFE_CONN18,
+ I_DL2_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_2_playback_ch4_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1", AFE_CONN24,
+ I_DL1_CH1, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget mtk_dai_pcm_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("PCM_1_PB_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_1_playback_ch1_mix,
+ ARRAY_SIZE(mtk_pcm_1_playback_ch1_mix)),
+ SND_SOC_DAPM_MIXER("PCM_1_PB_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_1_playback_ch2_mix,
+ ARRAY_SIZE(mtk_pcm_1_playback_ch2_mix)),
+ SND_SOC_DAPM_MIXER("PCM_1_PB_CH4", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_1_playback_ch4_mix,
+ ARRAY_SIZE(mtk_pcm_1_playback_ch4_mix)),
+ SND_SOC_DAPM_MIXER("PCM_2_PB_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_2_playback_ch1_mix,
+ ARRAY_SIZE(mtk_pcm_2_playback_ch1_mix)),
+ SND_SOC_DAPM_MIXER("PCM_2_PB_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_2_playback_ch2_mix,
+ ARRAY_SIZE(mtk_pcm_2_playback_ch2_mix)),
+ SND_SOC_DAPM_MIXER("PCM_2_PB_CH4", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_2_playback_ch4_mix,
+ ARRAY_SIZE(mtk_pcm_2_playback_ch4_mix)),
+
+ SND_SOC_DAPM_SUPPLY("PCM_1_EN", PCM_INTF_CON1, PCM_EN_SFT, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("PCM_2_EN", PCM2_INTF_CON, PCM2_EN_SFT, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_INPUT("MD1_TO_AFE"),
+ SND_SOC_DAPM_INPUT("MD2_TO_AFE"),
+ SND_SOC_DAPM_OUTPUT("AFE_TO_MD1"),
+ SND_SOC_DAPM_OUTPUT("AFE_TO_MD2"),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_pcm_routes[] = {
+ {"PCM 1 Playback", NULL, "PCM_1_PB_CH1"},
+ {"PCM 1 Playback", NULL, "PCM_1_PB_CH2"},
+ {"PCM 1 Playback", NULL, "PCM_1_PB_CH4"},
+ {"PCM 2 Playback", NULL, "PCM_2_PB_CH1"},
+ {"PCM 2 Playback", NULL, "PCM_2_PB_CH2"},
+ {"PCM 2 Playback", NULL, "PCM_2_PB_CH4"},
+
+ {"PCM 1 Playback", NULL, "PCM_1_EN"},
+ {"PCM 2 Playback", NULL, "PCM_2_EN"},
+ {"PCM 1 Capture", NULL, "PCM_1_EN"},
+ {"PCM 2 Capture", NULL, "PCM_2_EN"},
+
+ {"AFE_TO_MD1", NULL, "PCM 2 Playback"},
+ {"AFE_TO_MD2", NULL, "PCM 1 Playback"},
+ {"PCM 2 Capture", NULL, "MD1_TO_AFE"},
+ {"PCM 1 Capture", NULL, "MD2_TO_AFE"},
+
+ {"PCM_1_PB_CH1", "DL2_CH1", "DL2"},
+ {"PCM_1_PB_CH2", "DL2_CH2", "DL2"},
+ {"PCM_1_PB_CH4", "DL1_CH1", "DL1"},
+ {"PCM_2_PB_CH1", "DL2_CH1", "DL2"},
+ {"PCM_2_PB_CH2", "DL2_CH2", "DL2"},
+ {"PCM_2_PB_CH4", "DL1_CH1", "DL1"},
+};
+
+/* dai ops */
+static int mtk_dai_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ unsigned int rate = params_rate(params);
+ unsigned int rate_reg = mt8183_rate_transform(afe->dev, rate, dai->id);
+ unsigned int pcm_con = 0;
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d, rate_reg %d, widget active p %d, c %d\n",
+ __func__,
+ dai->id,
+ substream->stream,
+ rate,
+ rate_reg,
+ dai->playback_widget->active,
+ dai->capture_widget->active);
+
+ if (dai->playback_widget->active || dai->capture_widget->active)
+ return 0;
+
+ switch (dai->id) {
+ case MT8183_DAI_PCM_1:
+ pcm_con |= AUD_BCLK_OUT_INV_NO_INVERSE << PCM_BCLK_OUT_INV_SFT;
+ pcm_con |= AUD_TX_LCH_RPT_NO_REPEAT << PCM_TX_LCH_RPT_SFT;
+ pcm_con |= AUD_VBT_16K_MODE_DISABLE << PCM_VBT_16K_MODE_SFT;
+ pcm_con |= AUD_EXT_MODEM_SELECT_INTERNAL << PCM_EXT_MODEM_SFT;
+ pcm_con |= 0 << PCM_SYNC_LENGTH_SFT;
+ pcm_con |= AUD_PCM_ONE_BCK_CYCLE_SYNC << PCM_SYNC_TYPE_SFT;
+ pcm_con |= AUD_BT_MODE_DUAL_MIC_ON_TX << PCM_BT_MODE_SFT;
+ pcm_con |= AUD_PCM_AFIFO_AFIFO << PCM_BYP_ASRC_SFT;
+ pcm_con |= AUD_PCM_CLOCK_SLAVE_MODE << PCM_SLAVE_SFT;
+ pcm_con |= rate_reg << PCM_MODE_SFT;
+ pcm_con |= AUD_PCM_FMT_PCM_MODE_B << PCM_FMT_SFT;
+
+ regmap_update_bits(afe->regmap, PCM_INTF_CON1,
+ 0xfffffffe, pcm_con);
+ break;
+ case MT8183_DAI_PCM_2:
+ pcm_con |= AUD_TX_LCH_RPT_NO_REPEAT << PCM2_TX_LCH_RPT_SFT;
+ pcm_con |= AUD_VBT_16K_MODE_DISABLE << PCM2_VBT_16K_MODE_SFT;
+ pcm_con |= AUD_BT_MODE_DUAL_MIC_ON_TX << PCM2_BT_MODE_SFT;
+ pcm_con |= AUD_PCM_AFIFO_AFIFO << PCM2_AFIFO_SFT;
+ pcm_con |= AUD_PCM_WLEN_PCM_32_BCK_CYCLES << PCM2_WLEN_SFT;
+ pcm_con |= rate_reg << PCM2_MODE_SFT;
+ pcm_con |= AUD_PCM_FMT_PCM_MODE_B << PCM2_FMT_SFT;
+
+ regmap_update_bits(afe->regmap, PCM2_INTF_CON,
+ 0xfffffffe, pcm_con);
+ break;
+ default:
+ dev_warn(afe->dev, "%s(), id %d not support\n",
+ __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_pcm_ops = {
+ .hw_params = mtk_dai_pcm_hw_params,
+};
+
+/* dai driver */
+#define MTK_PCM_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_48000)
+
+#define MTK_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_pcm_driver[] = {
+ {
+ .name = "PCM 1",
+ .id = MT8183_DAI_PCM_1,
+ .playback = {
+ .stream_name = "PCM 1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .capture = {
+ .stream_name = "PCM 1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_dai_pcm_ops,
+ .symmetric_rates = 1,
+ .symmetric_samplebits = 1,
+ },
+ {
+ .name = "PCM 2",
+ .id = MT8183_DAI_PCM_2,
+ .playback = {
+ .stream_name = "PCM 2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .capture = {
+ .stream_name = "PCM 2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_dai_pcm_ops,
+ .symmetric_rates = 1,
+ .symmetric_samplebits = 1,
+ },
+};
+
+int mt8183_dai_pcm_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_pcm_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_pcm_driver);
+
+ dai->dapm_widgets = mtk_dai_pcm_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_pcm_widgets);
+ dai->dapm_routes = mtk_dai_pcm_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_pcm_routes);
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-dai-tdm.c b/sound/soc/mediatek/mt8183/mt8183-dai-tdm.c
new file mode 100644
index 000000000000..8983d54a9b67
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-dai-tdm.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI TDM Control
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8183-afe-clk.h"
+#include "mt8183-afe-common.h"
+#include "mt8183-interconnection.h"
+#include "mt8183-reg.h"
+
+struct mtk_afe_tdm_priv {
+ int bck_id;
+ int bck_rate;
+
+ int mclk_id;
+ int mclk_multiple; /* according to sample rate */
+ int mclk_rate;
+ int mclk_apll;
+};
+
+enum {
+ TDM_WLEN_16_BIT = 1,
+ TDM_WLEN_32_BIT = 2,
+};
+
+enum {
+ TDM_CHANNEL_BCK_16 = 0,
+ TDM_CHANNEL_BCK_24 = 1,
+ TDM_CHANNEL_BCK_32 = 2,
+};
+
+enum {
+ TDM_CHANNEL_NUM_2 = 0,
+ TDM_CHANNEL_NUM_4 = 1,
+ TDM_CHANNEL_NUM_8 = 2,
+};
+
+enum {
+ TDM_CH_START_O30_O31 = 0,
+ TDM_CH_START_O32_O33,
+ TDM_CH_START_O34_O35,
+ TDM_CH_START_O36_O37,
+ TDM_CH_ZERO,
+};
+
+enum {
+ HDMI_BIT_WIDTH_16_BIT = 0,
+ HDMI_BIT_WIDTH_32_BIT = 1,
+};
+
+static unsigned int get_hdmi_wlen(snd_pcm_format_t format)
+{
+ return snd_pcm_format_physical_width(format) <= 16 ?
+ HDMI_BIT_WIDTH_16_BIT : HDMI_BIT_WIDTH_32_BIT;
+}
+
+static unsigned int get_tdm_wlen(snd_pcm_format_t format)
+{
+ return snd_pcm_format_physical_width(format) <= 16 ?
+ TDM_WLEN_16_BIT : TDM_WLEN_32_BIT;
+}
+
+static unsigned int get_tdm_channel_bck(snd_pcm_format_t format)
+{
+ return snd_pcm_format_physical_width(format) <= 16 ?
+ TDM_CHANNEL_BCK_16 : TDM_CHANNEL_BCK_32;
+}
+
+static unsigned int get_tdm_lrck_width(snd_pcm_format_t format)
+{
+ return snd_pcm_format_physical_width(format) - 1;
+}
+
+static unsigned int get_tdm_ch(unsigned int ch)
+{
+ switch (ch) {
+ case 1:
+ case 2:
+ return TDM_CHANNEL_NUM_2;
+ case 3:
+ case 4:
+ return TDM_CHANNEL_NUM_4;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ default:
+ return TDM_CHANNEL_NUM_8;
+ }
+}
+
+/* interconnection */
+enum {
+ HDMI_CONN_CH0 = 0,
+ HDMI_CONN_CH1,
+ HDMI_CONN_CH2,
+ HDMI_CONN_CH3,
+ HDMI_CONN_CH4,
+ HDMI_CONN_CH5,
+ HDMI_CONN_CH6,
+ HDMI_CONN_CH7,
+};
+
+static const char *const hdmi_conn_mux_map[] = {
+ "CH0", "CH1", "CH2", "CH3",
+ "CH4", "CH5", "CH6", "CH7",
+};
+
+static int hdmi_conn_mux_map_value[] = {
+ HDMI_CONN_CH0,
+ HDMI_CONN_CH1,
+ HDMI_CONN_CH2,
+ HDMI_CONN_CH3,
+ HDMI_CONN_CH4,
+ HDMI_CONN_CH5,
+ HDMI_CONN_CH6,
+ HDMI_CONN_CH7,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch0_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_0_SFT,
+ HDMI_O_0_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch0_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH0_MUX", hdmi_ch0_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch1_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_1_SFT,
+ HDMI_O_1_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch1_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH1_MUX", hdmi_ch1_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch2_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_2_SFT,
+ HDMI_O_2_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch2_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH2_MUX", hdmi_ch2_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch3_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_3_SFT,
+ HDMI_O_3_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch3_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH3_MUX", hdmi_ch3_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch4_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_4_SFT,
+ HDMI_O_4_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch4_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH4_MUX", hdmi_ch4_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch5_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_5_SFT,
+ HDMI_O_5_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch5_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH5_MUX", hdmi_ch5_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch6_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_6_SFT,
+ HDMI_O_6_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch6_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH6_MUX", hdmi_ch6_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(hdmi_ch7_mux_map_enum,
+ AFE_HDMI_CONN0,
+ HDMI_O_7_SFT,
+ HDMI_O_7_MASK,
+ hdmi_conn_mux_map,
+ hdmi_conn_mux_map_value);
+
+static const struct snd_kcontrol_new hdmi_ch7_mux_control =
+ SOC_DAPM_ENUM("HDMI_CH7_MUX", hdmi_ch7_mux_map_enum);
+
+enum {
+ SUPPLY_SEQ_APLL,
+ SUPPLY_SEQ_TDM_MCK_EN,
+ SUPPLY_SEQ_TDM_BCK_EN,
+};
+
+static int mtk_tdm_bck_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[MT8183_DAI_TDM];
+
+ dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8183_mck_enable(afe, tdm_priv->bck_id, tdm_priv->bck_rate);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ mt8183_mck_disable(afe, tdm_priv->bck_id);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_tdm_mck_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[MT8183_DAI_TDM];
+
+ dev_info(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8183_mck_enable(afe, tdm_priv->mclk_id, tdm_priv->mclk_rate);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ tdm_priv->mclk_rate = 0;
+ mt8183_mck_disable(afe, tdm_priv->mclk_id);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget mtk_dai_tdm_widgets[] = {
+ SND_SOC_DAPM_MUX("HDMI_CH0_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch0_mux_control),
+ SND_SOC_DAPM_MUX("HDMI_CH1_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch1_mux_control),
+ SND_SOC_DAPM_MUX("HDMI_CH2_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch2_mux_control),
+ SND_SOC_DAPM_MUX("HDMI_CH3_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch3_mux_control),
+ SND_SOC_DAPM_MUX("HDMI_CH4_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch4_mux_control),
+ SND_SOC_DAPM_MUX("HDMI_CH5_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch5_mux_control),
+ SND_SOC_DAPM_MUX("HDMI_CH6_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch6_mux_control),
+ SND_SOC_DAPM_MUX("HDMI_CH7_MUX", SND_SOC_NOPM, 0, 0,
+ &hdmi_ch7_mux_control),
+
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_tdm_clk"),
+
+ SND_SOC_DAPM_SUPPLY_S("TDM_BCK", SUPPLY_SEQ_TDM_BCK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_tdm_bck_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY_S("TDM_MCK", SUPPLY_SEQ_TDM_MCK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_tdm_mck_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static int mtk_afe_tdm_apll_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[MT8183_DAI_TDM];
+ int cur_apll;
+
+ /* which apll */
+ cur_apll = mt8183_get_apll_by_name(afe, source->name);
+
+ return (tdm_priv->mclk_apll == cur_apll) ? 1 : 0;
+}
+
+static const struct snd_soc_dapm_route mtk_dai_tdm_routes[] = {
+ {"HDMI_CH0_MUX", "CH0", "HDMI"},
+ {"HDMI_CH0_MUX", "CH1", "HDMI"},
+ {"HDMI_CH0_MUX", "CH2", "HDMI"},
+ {"HDMI_CH0_MUX", "CH3", "HDMI"},
+ {"HDMI_CH0_MUX", "CH4", "HDMI"},
+ {"HDMI_CH0_MUX", "CH5", "HDMI"},
+ {"HDMI_CH0_MUX", "CH6", "HDMI"},
+ {"HDMI_CH0_MUX", "CH7", "HDMI"},
+
+ {"HDMI_CH1_MUX", "CH0", "HDMI"},
+ {"HDMI_CH1_MUX", "CH1", "HDMI"},
+ {"HDMI_CH1_MUX", "CH2", "HDMI"},
+ {"HDMI_CH1_MUX", "CH3", "HDMI"},
+ {"HDMI_CH1_MUX", "CH4", "HDMI"},
+ {"HDMI_CH1_MUX", "CH5", "HDMI"},
+ {"HDMI_CH1_MUX", "CH6", "HDMI"},
+ {"HDMI_CH1_MUX", "CH7", "HDMI"},
+
+ {"HDMI_CH2_MUX", "CH0", "HDMI"},
+ {"HDMI_CH2_MUX", "CH1", "HDMI"},
+ {"HDMI_CH2_MUX", "CH2", "HDMI"},
+ {"HDMI_CH2_MUX", "CH3", "HDMI"},
+ {"HDMI_CH2_MUX", "CH4", "HDMI"},
+ {"HDMI_CH2_MUX", "CH5", "HDMI"},
+ {"HDMI_CH2_MUX", "CH6", "HDMI"},
+ {"HDMI_CH2_MUX", "CH7", "HDMI"},
+
+ {"HDMI_CH3_MUX", "CH0", "HDMI"},
+ {"HDMI_CH3_MUX", "CH1", "HDMI"},
+ {"HDMI_CH3_MUX", "CH2", "HDMI"},
+ {"HDMI_CH3_MUX", "CH3", "HDMI"},
+ {"HDMI_CH3_MUX", "CH4", "HDMI"},
+ {"HDMI_CH3_MUX", "CH5", "HDMI"},
+ {"HDMI_CH3_MUX", "CH6", "HDMI"},
+ {"HDMI_CH3_MUX", "CH7", "HDMI"},
+
+ {"HDMI_CH4_MUX", "CH0", "HDMI"},
+ {"HDMI_CH4_MUX", "CH1", "HDMI"},
+ {"HDMI_CH4_MUX", "CH2", "HDMI"},
+ {"HDMI_CH4_MUX", "CH3", "HDMI"},
+ {"HDMI_CH4_MUX", "CH4", "HDMI"},
+ {"HDMI_CH4_MUX", "CH5", "HDMI"},
+ {"HDMI_CH4_MUX", "CH6", "HDMI"},
+ {"HDMI_CH4_MUX", "CH7", "HDMI"},
+
+ {"HDMI_CH5_MUX", "CH0", "HDMI"},
+ {"HDMI_CH5_MUX", "CH1", "HDMI"},
+ {"HDMI_CH5_MUX", "CH2", "HDMI"},
+ {"HDMI_CH5_MUX", "CH3", "HDMI"},
+ {"HDMI_CH5_MUX", "CH4", "HDMI"},
+ {"HDMI_CH5_MUX", "CH5", "HDMI"},
+ {"HDMI_CH5_MUX", "CH6", "HDMI"},
+ {"HDMI_CH5_MUX", "CH7", "HDMI"},
+
+ {"HDMI_CH6_MUX", "CH0", "HDMI"},
+ {"HDMI_CH6_MUX", "CH1", "HDMI"},
+ {"HDMI_CH6_MUX", "CH2", "HDMI"},
+ {"HDMI_CH6_MUX", "CH3", "HDMI"},
+ {"HDMI_CH6_MUX", "CH4", "HDMI"},
+ {"HDMI_CH6_MUX", "CH5", "HDMI"},
+ {"HDMI_CH6_MUX", "CH6", "HDMI"},
+ {"HDMI_CH6_MUX", "CH7", "HDMI"},
+
+ {"HDMI_CH7_MUX", "CH0", "HDMI"},
+ {"HDMI_CH7_MUX", "CH1", "HDMI"},
+ {"HDMI_CH7_MUX", "CH2", "HDMI"},
+ {"HDMI_CH7_MUX", "CH3", "HDMI"},
+ {"HDMI_CH7_MUX", "CH4", "HDMI"},
+ {"HDMI_CH7_MUX", "CH5", "HDMI"},
+ {"HDMI_CH7_MUX", "CH6", "HDMI"},
+ {"HDMI_CH7_MUX", "CH7", "HDMI"},
+
+ {"TDM", NULL, "HDMI_CH0_MUX"},
+ {"TDM", NULL, "HDMI_CH1_MUX"},
+ {"TDM", NULL, "HDMI_CH2_MUX"},
+ {"TDM", NULL, "HDMI_CH3_MUX"},
+ {"TDM", NULL, "HDMI_CH4_MUX"},
+ {"TDM", NULL, "HDMI_CH5_MUX"},
+ {"TDM", NULL, "HDMI_CH6_MUX"},
+ {"TDM", NULL, "HDMI_CH7_MUX"},
+
+ {"TDM", NULL, "aud_tdm_clk"},
+ {"TDM", NULL, "TDM_BCK"},
+ {"TDM_BCK", NULL, "TDM_MCK"},
+ {"TDM_MCK", NULL, APLL1_W_NAME, mtk_afe_tdm_apll_connect},
+ {"TDM_MCK", NULL, APLL2_W_NAME, mtk_afe_tdm_apll_connect},
+};
+
+/* dai ops */
+static int mtk_dai_tdm_cal_mclk(struct mtk_base_afe *afe,
+ struct mtk_afe_tdm_priv *tdm_priv,
+ int freq)
+{
+ int apll;
+ int apll_rate;
+
+ apll = mt8183_get_apll_by_rate(afe, freq);
+ apll_rate = mt8183_get_apll_rate(afe, apll);
+
+ if (!freq || freq > apll_rate) {
+ dev_warn(afe->dev,
+ "%s(), freq(%d Hz) invalid\n", __func__, freq);
+ return -EINVAL;
+ }
+
+ if (apll_rate % freq != 0) {
+ dev_warn(afe->dev,
+ "%s(), APLL cannot generate %d Hz", __func__, freq);
+ return -EINVAL;
+ }
+
+ tdm_priv->mclk_rate = freq;
+ tdm_priv->mclk_apll = apll;
+
+ return 0;
+}
+
+static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ int tdm_id = dai->id;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[tdm_id];
+ unsigned int rate = params_rate(params);
+ unsigned int channels = params_channels(params);
+ snd_pcm_format_t format = params_format(params);
+ unsigned int tdm_con = 0;
+
+ /* calculate mclk_rate, if not set explicitly */
+ if (!tdm_priv->mclk_rate) {
+ tdm_priv->mclk_rate = rate * tdm_priv->mclk_multiple;
+ mtk_dai_tdm_cal_mclk(afe,
+ tdm_priv,
+ tdm_priv->mclk_rate);
+ }
+
+ /* calculate bck */
+ tdm_priv->bck_rate = rate *
+ channels *
+ snd_pcm_format_physical_width(format);
+
+ if (tdm_priv->bck_rate > tdm_priv->mclk_rate)
+ dev_warn(afe->dev, "%s(), bck_rate > mclk_rate rate", __func__);
+
+ if (tdm_priv->mclk_rate % tdm_priv->bck_rate != 0)
+ dev_warn(afe->dev, "%s(), bck cannot generate", __func__);
+
+ dev_info(afe->dev, "%s(), id %d, rate %d, channels %d, format %d, mclk_rate %d, bck_rate %d\n",
+ __func__,
+ tdm_id, rate, channels, format,
+ tdm_priv->mclk_rate, tdm_priv->bck_rate);
+
+ /* set tdm */
+ tdm_con = 1 << BCK_INVERSE_SFT;
+ tdm_con |= 1 << LRCK_INVERSE_SFT;
+ tdm_con |= 1 << DELAY_DATA_SFT;
+ tdm_con |= 1 << LEFT_ALIGN_SFT;
+ tdm_con |= get_tdm_wlen(format) << WLEN_SFT;
+ tdm_con |= get_tdm_ch(channels) << CHANNEL_NUM_SFT;
+ tdm_con |= get_tdm_channel_bck(format) << CHANNEL_BCK_CYCLES_SFT;
+ tdm_con |= get_tdm_lrck_width(format) << LRCK_TDM_WIDTH_SFT;
+ regmap_write(afe->regmap, AFE_TDM_CON1, tdm_con);
+
+ switch (channels) {
+ case 1:
+ case 2:
+ tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+ tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT1_SFT;
+ tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT2_SFT;
+ tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT;
+ break;
+ case 3:
+ case 4:
+ tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+ tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT;
+ tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT2_SFT;
+ tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT;
+ break;
+ case 5:
+ case 6:
+ tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+ tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT;
+ tdm_con |= TDM_CH_START_O34_O35 << ST_CH_PAIR_SOUT2_SFT;
+ tdm_con |= TDM_CH_ZERO << ST_CH_PAIR_SOUT3_SFT;
+ break;
+ case 7:
+ case 8:
+ tdm_con = TDM_CH_START_O30_O31 << ST_CH_PAIR_SOUT0_SFT;
+ tdm_con |= TDM_CH_START_O32_O33 << ST_CH_PAIR_SOUT1_SFT;
+ tdm_con |= TDM_CH_START_O34_O35 << ST_CH_PAIR_SOUT2_SFT;
+ tdm_con |= TDM_CH_START_O36_O37 << ST_CH_PAIR_SOUT3_SFT;
+ break;
+ default:
+ tdm_con = 0;
+ }
+ regmap_write(afe->regmap, AFE_TDM_CON2, tdm_con);
+
+ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+ AFE_HDMI_OUT_CH_NUM_MASK_SFT,
+ channels << AFE_HDMI_OUT_CH_NUM_SFT);
+
+ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+ AFE_HDMI_OUT_BIT_WIDTH_MASK_SFT,
+ get_hdmi_wlen(format) << AFE_HDMI_OUT_BIT_WIDTH_SFT);
+ return 0;
+}
+
+static int mtk_dai_tdm_trigger(struct snd_pcm_substream *substream,
+ int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* enable Out control */
+ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+ AFE_HDMI_OUT_ON_MASK_SFT,
+ 0x1 << AFE_HDMI_OUT_ON_SFT);
+ /* enable tdm */
+ regmap_update_bits(afe->regmap, AFE_TDM_CON1,
+ TDM_EN_MASK_SFT, 0x1 << TDM_EN_SFT);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ /* disable tdm */
+ regmap_update_bits(afe->regmap, AFE_TDM_CON1,
+ TDM_EN_MASK_SFT, 0);
+ /* disable Out control */
+ regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0,
+ AFE_HDMI_OUT_ON_MASK_SFT,
+ 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mtk_dai_tdm_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id];
+
+ if (!tdm_priv) {
+ dev_warn(afe->dev, "%s(), tdm_priv == NULL", __func__);
+ return -EINVAL;
+ }
+
+ if (dir != SND_SOC_CLOCK_OUT) {
+ dev_warn(afe->dev, "%s(), dir != SND_SOC_CLOCK_OUT", __func__);
+ return -EINVAL;
+ }
+
+ dev_info(afe->dev, "%s(), freq %d\n", __func__, freq);
+
+ return mtk_dai_tdm_cal_mclk(afe, tdm_priv, freq);
+}
+
+static const struct snd_soc_dai_ops mtk_dai_tdm_ops = {
+ .hw_params = mtk_dai_tdm_hw_params,
+ .trigger = mtk_dai_tdm_trigger,
+ .set_sysclk = mtk_dai_tdm_set_sysclk,
+};
+
+/* dai driver */
+#define MTK_TDM_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_TDM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_tdm_driver[] = {
+ {
+ .name = "TDM",
+ .id = MT8183_DAI_TDM,
+ .playback = {
+ .stream_name = "TDM",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = MTK_TDM_RATES,
+ .formats = MTK_TDM_FORMATS,
+ },
+ .ops = &mtk_dai_tdm_ops,
+ },
+};
+
+int mt8183_dai_tdm_register(struct mtk_base_afe *afe)
+{
+ struct mt8183_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv;
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_tdm_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_tdm_driver);
+
+ dai->dapm_widgets = mtk_dai_tdm_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_tdm_widgets);
+ dai->dapm_routes = mtk_dai_tdm_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_tdm_routes);
+
+ tdm_priv = devm_kzalloc(afe->dev, sizeof(struct mtk_afe_tdm_priv),
+ GFP_KERNEL);
+ if (!tdm_priv)
+ return -ENOMEM;
+
+ tdm_priv->mclk_multiple = 128;
+ tdm_priv->bck_id = MT8183_I2S4_BCK;
+ tdm_priv->mclk_id = MT8183_I2S4_MCK;
+
+ afe_priv->dai_priv[MT8183_DAI_TDM] = tdm_priv;
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8183/mt8183-interconnection.h b/sound/soc/mediatek/mt8183/mt8183-interconnection.h
new file mode 100644
index 000000000000..6332f5f3e987
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-interconnection.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Mediatek MT8183 audio driver interconnection definition
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT8183_INTERCONNECTION_H_
+#define _MT8183_INTERCONNECTION_H_
+
+#define I_I2S0_CH1 0
+#define I_I2S0_CH2 1
+#define I_ADDA_UL_CH1 3
+#define I_ADDA_UL_CH2 4
+#define I_DL1_CH1 5
+#define I_DL1_CH2 6
+#define I_DL2_CH1 7
+#define I_DL2_CH2 8
+#define I_PCM_1_CAP_CH1 9
+#define I_GAIN1_OUT_CH1 10
+#define I_GAIN1_OUT_CH2 11
+#define I_GAIN2_OUT_CH1 12
+#define I_GAIN2_OUT_CH2 13
+#define I_PCM_2_CAP_CH1 14
+#define I_PCM_2_CAP_CH2 21
+#define I_PCM_1_CAP_CH2 22
+#define I_DL3_CH1 23
+#define I_DL3_CH2 24
+#define I_I2S2_CH1 25
+#define I_I2S2_CH2 26
+
+#endif
diff --git a/sound/soc/mediatek/mt8183/mt8183-reg.h b/sound/soc/mediatek/mt8183/mt8183-reg.h
new file mode 100644
index 000000000000..e0482f2826da
--- /dev/null
+++ b/sound/soc/mediatek/mt8183/mt8183-reg.h
@@ -0,0 +1,1666 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt8183-reg.h -- Mediatek 8183 audio driver reg definition
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: KaiChieh Chuang <kaichieh.chuang@mediatek.com>
+ */
+
+#ifndef _MT8183_REG_H_
+#define _MT8183_REG_H_
+
+#define AUDIO_TOP_CON0 0x0000
+#define AUDIO_TOP_CON1 0x0004
+#define AUDIO_TOP_CON3 0x000c
+#define AFE_DAC_CON0 0x0010
+#define AFE_DAC_CON1 0x0014
+#define AFE_I2S_CON 0x0018
+#define AFE_DAIBT_CON0 0x001c
+#define AFE_CONN0 0x0020
+#define AFE_CONN1 0x0024
+#define AFE_CONN2 0x0028
+#define AFE_CONN3 0x002c
+#define AFE_CONN4 0x0030
+#define AFE_I2S_CON1 0x0034
+#define AFE_I2S_CON2 0x0038
+#define AFE_MRGIF_CON 0x003c
+#define AFE_DL1_BASE 0x0040
+#define AFE_DL1_CUR 0x0044
+#define AFE_DL1_END 0x0048
+#define AFE_I2S_CON3 0x004c
+#define AFE_DL2_BASE 0x0050
+#define AFE_DL2_CUR 0x0054
+#define AFE_DL2_END 0x0058
+#define AFE_CONN5 0x005c
+#define AFE_CONN_24BIT 0x006c
+#define AFE_AWB_BASE 0x0070
+#define AFE_AWB_END 0x0078
+#define AFE_AWB_CUR 0x007c
+#define AFE_VUL_BASE 0x0080
+#define AFE_VUL_END 0x0088
+#define AFE_VUL_CUR 0x008c
+#define AFE_CONN6 0x00bc
+#define AFE_MEMIF_MSB 0x00cc
+#define AFE_MEMIF_MON0 0x00d0
+#define AFE_MEMIF_MON1 0x00d4
+#define AFE_MEMIF_MON2 0x00d8
+#define AFE_MEMIF_MON3 0x00dc
+#define AFE_MEMIF_MON4 0x00e0
+#define AFE_MEMIF_MON5 0x00e4
+#define AFE_MEMIF_MON6 0x00e8
+#define AFE_MEMIF_MON7 0x00ec
+#define AFE_MEMIF_MON8 0x00f0
+#define AFE_MEMIF_MON9 0x00f4
+#define AFE_ADDA_DL_SRC2_CON0 0x0108
+#define AFE_ADDA_DL_SRC2_CON1 0x010c
+#define AFE_ADDA_UL_SRC_CON0 0x0114
+#define AFE_ADDA_UL_SRC_CON1 0x0118
+#define AFE_ADDA_TOP_CON0 0x0120
+#define AFE_ADDA_UL_DL_CON0 0x0124
+#define AFE_ADDA_SRC_DEBUG 0x012c
+#define AFE_ADDA_SRC_DEBUG_MON0 0x0130
+#define AFE_ADDA_SRC_DEBUG_MON1 0x0134
+#define AFE_ADDA_UL_SRC_MON0 0x0148
+#define AFE_ADDA_UL_SRC_MON1 0x014c
+#define AFE_SIDETONE_DEBUG 0x01d0
+#define AFE_SIDETONE_MON 0x01d4
+#define AFE_SINEGEN_CON2 0x01dc
+#define AFE_SIDETONE_CON0 0x01e0
+#define AFE_SIDETONE_COEFF 0x01e4
+#define AFE_SIDETONE_CON1 0x01e8
+#define AFE_SIDETONE_GAIN 0x01ec
+#define AFE_SINEGEN_CON0 0x01f0
+#define AFE_TOP_CON0 0x0200
+#define AFE_BUS_CFG 0x0240
+#define AFE_BUS_MON0 0x0244
+#define AFE_ADDA_PREDIS_CON0 0x0260
+#define AFE_ADDA_PREDIS_CON1 0x0264
+#define AFE_MRGIF_MON0 0x0270
+#define AFE_MRGIF_MON1 0x0274
+#define AFE_MRGIF_MON2 0x0278
+#define AFE_I2S_MON 0x027c
+#define AFE_ADDA_IIR_COEF_02_01 0x0290
+#define AFE_ADDA_IIR_COEF_04_03 0x0294
+#define AFE_ADDA_IIR_COEF_06_05 0x0298
+#define AFE_ADDA_IIR_COEF_08_07 0x029c
+#define AFE_ADDA_IIR_COEF_10_09 0x02a0
+#define AFE_DAC_CON2 0x02e0
+#define AFE_IRQ_MCU_CON1 0x02e4
+#define AFE_IRQ_MCU_CON2 0x02e8
+#define AFE_DAC_MON 0x02ec
+#define AFE_VUL2_BASE 0x02f0
+#define AFE_VUL2_END 0x02f8
+#define AFE_VUL2_CUR 0x02fc
+#define AFE_IRQ_MCU_CNT0 0x0300
+#define AFE_IRQ_MCU_CNT6 0x0304
+#define AFE_IRQ_MCU_CNT8 0x0308
+#define AFE_IRQ_MCU_EN1 0x030c
+#define AFE_IRQ0_MCU_CNT_MON 0x0310
+#define AFE_IRQ6_MCU_CNT_MON 0x0314
+#define AFE_MOD_DAI_BASE 0x0330
+#define AFE_MOD_DAI_END 0x0338
+#define AFE_MOD_DAI_CUR 0x033c
+#define AFE_VUL_D2_BASE 0x0350
+#define AFE_VUL_D2_END 0x0358
+#define AFE_VUL_D2_CUR 0x035c
+#define AFE_DL3_BASE 0x0360
+#define AFE_DL3_CUR 0x0364
+#define AFE_DL3_END 0x0368
+#define AFE_HDMI_OUT_CON0 0x0370
+#define AFE_HDMI_OUT_BASE 0x0374
+#define AFE_HDMI_OUT_CUR 0x0378
+#define AFE_HDMI_OUT_END 0x037c
+#define AFE_HDMI_CONN0 0x0390
+#define AFE_IRQ3_MCU_CNT_MON 0x0398
+#define AFE_IRQ4_MCU_CNT_MON 0x039c
+#define AFE_IRQ_MCU_CON0 0x03a0
+#define AFE_IRQ_MCU_STATUS 0x03a4
+#define AFE_IRQ_MCU_CLR 0x03a8
+#define AFE_IRQ_MCU_CNT1 0x03ac
+#define AFE_IRQ_MCU_CNT2 0x03b0
+#define AFE_IRQ_MCU_EN 0x03b4
+#define AFE_IRQ_MCU_MON2 0x03b8
+#define AFE_IRQ_MCU_CNT5 0x03bc
+#define AFE_IRQ1_MCU_CNT_MON 0x03c0
+#define AFE_IRQ2_MCU_CNT_MON 0x03c4
+#define AFE_IRQ1_MCU_EN_CNT_MON 0x03c8
+#define AFE_IRQ5_MCU_CNT_MON 0x03cc
+#define AFE_MEMIF_MINLEN 0x03d0
+#define AFE_MEMIF_MAXLEN 0x03d4
+#define AFE_MEMIF_PBUF_SIZE 0x03d8
+#define AFE_IRQ_MCU_CNT7 0x03dc
+#define AFE_IRQ7_MCU_CNT_MON 0x03e0
+#define AFE_IRQ_MCU_CNT3 0x03e4
+#define AFE_IRQ_MCU_CNT4 0x03e8
+#define AFE_IRQ_MCU_CNT11 0x03ec
+#define AFE_APLL1_TUNER_CFG 0x03f0
+#define AFE_APLL2_TUNER_CFG 0x03f4
+#define AFE_MEMIF_HD_MODE 0x03f8
+#define AFE_MEMIF_HDALIGN 0x03fc
+#define AFE_CONN33 0x0408
+#define AFE_IRQ_MCU_CNT12 0x040c
+#define AFE_GAIN1_CON0 0x0410
+#define AFE_GAIN1_CON1 0x0414
+#define AFE_GAIN1_CON2 0x0418
+#define AFE_GAIN1_CON3 0x041c
+#define AFE_CONN7 0x0420
+#define AFE_GAIN1_CUR 0x0424
+#define AFE_GAIN2_CON0 0x0428
+#define AFE_GAIN2_CON1 0x042c
+#define AFE_GAIN2_CON2 0x0430
+#define AFE_GAIN2_CON3 0x0434
+#define AFE_CONN8 0x0438
+#define AFE_GAIN2_CUR 0x043c
+#define AFE_CONN9 0x0440
+#define AFE_CONN10 0x0444
+#define AFE_CONN11 0x0448
+#define AFE_CONN12 0x044c
+#define AFE_CONN13 0x0450
+#define AFE_CONN14 0x0454
+#define AFE_CONN15 0x0458
+#define AFE_CONN16 0x045c
+#define AFE_CONN17 0x0460
+#define AFE_CONN18 0x0464
+#define AFE_CONN19 0x0468
+#define AFE_CONN20 0x046c
+#define AFE_CONN21 0x0470
+#define AFE_CONN22 0x0474
+#define AFE_CONN23 0x0478
+#define AFE_CONN24 0x047c
+#define AFE_CONN_RS 0x0494
+#define AFE_CONN_DI 0x0498
+#define AFE_CONN25 0x04b0
+#define AFE_CONN26 0x04b4
+#define AFE_CONN27 0x04b8
+#define AFE_CONN28 0x04bc
+#define AFE_CONN29 0x04c0
+#define AFE_CONN30 0x04c4
+#define AFE_CONN31 0x04c8
+#define AFE_CONN32 0x04cc
+#define AFE_SRAM_DELSEL_CON0 0x04f0
+#define AFE_SRAM_DELSEL_CON2 0x04f8
+#define AFE_SRAM_DELSEL_CON3 0x04fc
+#define AFE_ASRC_2CH_CON12 0x0528
+#define AFE_ASRC_2CH_CON13 0x052c
+#define PCM_INTF_CON1 0x0530
+#define PCM_INTF_CON2 0x0538
+#define PCM2_INTF_CON 0x053c
+#define AFE_TDM_CON1 0x0548
+#define AFE_TDM_CON2 0x054c
+#define AFE_CONN34 0x0580
+#define FPGA_CFG0 0x05b0
+#define FPGA_CFG1 0x05b4
+#define FPGA_CFG2 0x05c0
+#define FPGA_CFG3 0x05c4
+#define AUDIO_TOP_DBG_CON 0x05c8
+#define AUDIO_TOP_DBG_MON0 0x05cc
+#define AUDIO_TOP_DBG_MON1 0x05d0
+#define AFE_IRQ8_MCU_CNT_MON 0x05e4
+#define AFE_IRQ11_MCU_CNT_MON 0x05e8
+#define AFE_IRQ12_MCU_CNT_MON 0x05ec
+#define AFE_GENERAL_REG0 0x0800
+#define AFE_GENERAL_REG1 0x0804
+#define AFE_GENERAL_REG2 0x0808
+#define AFE_GENERAL_REG3 0x080c
+#define AFE_GENERAL_REG4 0x0810
+#define AFE_GENERAL_REG5 0x0814
+#define AFE_GENERAL_REG6 0x0818
+#define AFE_GENERAL_REG7 0x081c
+#define AFE_GENERAL_REG8 0x0820
+#define AFE_GENERAL_REG9 0x0824
+#define AFE_GENERAL_REG10 0x0828
+#define AFE_GENERAL_REG11 0x082c
+#define AFE_GENERAL_REG12 0x0830
+#define AFE_GENERAL_REG13 0x0834
+#define AFE_GENERAL_REG14 0x0838
+#define AFE_GENERAL_REG15 0x083c
+#define AFE_CBIP_CFG0 0x0840
+#define AFE_CBIP_MON0 0x0844
+#define AFE_CBIP_SLV_MUX_MON0 0x0848
+#define AFE_CBIP_SLV_DECODER_MON0 0x084c
+#define AFE_CONN0_1 0x0900
+#define AFE_CONN1_1 0x0904
+#define AFE_CONN2_1 0x0908
+#define AFE_CONN3_1 0x090c
+#define AFE_CONN4_1 0x0910
+#define AFE_CONN5_1 0x0914
+#define AFE_CONN6_1 0x0918
+#define AFE_CONN7_1 0x091c
+#define AFE_CONN8_1 0x0920
+#define AFE_CONN9_1 0x0924
+#define AFE_CONN10_1 0x0928
+#define AFE_CONN11_1 0x092c
+#define AFE_CONN12_1 0x0930
+#define AFE_CONN13_1 0x0934
+#define AFE_CONN14_1 0x0938
+#define AFE_CONN15_1 0x093c
+#define AFE_CONN16_1 0x0940
+#define AFE_CONN17_1 0x0944
+#define AFE_CONN18_1 0x0948
+#define AFE_CONN19_1 0x094c
+#define AFE_CONN20_1 0x0950
+#define AFE_CONN21_1 0x0954
+#define AFE_CONN22_1 0x0958
+#define AFE_CONN23_1 0x095c
+#define AFE_CONN24_1 0x0960
+#define AFE_CONN25_1 0x0964
+#define AFE_CONN26_1 0x0968
+#define AFE_CONN27_1 0x096c
+#define AFE_CONN28_1 0x0970
+#define AFE_CONN29_1 0x0974
+#define AFE_CONN30_1 0x0978
+#define AFE_CONN31_1 0x097c
+#define AFE_CONN32_1 0x0980
+#define AFE_CONN33_1 0x0984
+#define AFE_CONN34_1 0x0988
+#define AFE_CONN_RS_1 0x098c
+#define AFE_CONN_DI_1 0x0990
+#define AFE_CONN_24BIT_1 0x0994
+#define AFE_CONN_REG 0x0998
+#define AFE_CONN35 0x09a0
+#define AFE_CONN36 0x09a4
+#define AFE_CONN37 0x09a8
+#define AFE_CONN38 0x09ac
+#define AFE_CONN35_1 0x09b0
+#define AFE_CONN36_1 0x09b4
+#define AFE_CONN37_1 0x09b8
+#define AFE_CONN38_1 0x09bc
+#define AFE_CONN39 0x09c0
+#define AFE_CONN40 0x09c4
+#define AFE_CONN41 0x09c8
+#define AFE_CONN42 0x09cc
+#define AFE_CONN39_1 0x09e0
+#define AFE_CONN40_1 0x09e4
+#define AFE_CONN41_1 0x09e8
+#define AFE_CONN42_1 0x09ec
+#define AFE_I2S_CON4 0x09f8
+#define AFE_ADDA6_TOP_CON0 0x0a80
+#define AFE_ADDA6_UL_SRC_CON0 0x0a84
+#define AFE_ADD6_UL_SRC_CON1 0x0a88
+#define AFE_ADDA6_SRC_DEBUG 0x0a8c
+#define AFE_ADDA6_SRC_DEBUG_MON0 0x0a90
+#define AFE_ADDA6_ULCF_CFG_02_01 0x0aa0
+#define AFE_ADDA6_ULCF_CFG_04_03 0x0aa4
+#define AFE_ADDA6_ULCF_CFG_06_05 0x0aa8
+#define AFE_ADDA6_ULCF_CFG_08_07 0x0aac
+#define AFE_ADDA6_ULCF_CFG_10_09 0x0ab0
+#define AFE_ADDA6_ULCF_CFG_12_11 0x0ab4
+#define AFE_ADDA6_ULCF_CFG_14_13 0x0ab8
+#define AFE_ADDA6_ULCF_CFG_16_15 0x0abc
+#define AFE_ADDA6_ULCF_CFG_18_17 0x0ac0
+#define AFE_ADDA6_ULCF_CFG_20_19 0x0ac4
+#define AFE_ADDA6_ULCF_CFG_22_21 0x0ac8
+#define AFE_ADDA6_ULCF_CFG_24_23 0x0acc
+#define AFE_ADDA6_ULCF_CFG_26_25 0x0ad0
+#define AFE_ADDA6_ULCF_CFG_28_27 0x0ad4
+#define AFE_ADDA6_ULCF_CFG_30_29 0x0ad8
+#define AFE_ADD6A_UL_SRC_MON0 0x0ae4
+#define AFE_ADDA6_UL_SRC_MON1 0x0ae8
+#define AFE_CONN43 0x0af8
+#define AFE_CONN43_1 0x0afc
+#define AFE_DL1_BASE_MSB 0x0b00
+#define AFE_DL1_CUR_MSB 0x0b04
+#define AFE_DL1_END_MSB 0x0b08
+#define AFE_DL2_BASE_MSB 0x0b10
+#define AFE_DL2_CUR_MSB 0x0b14
+#define AFE_DL2_END_MSB 0x0b18
+#define AFE_AWB_BASE_MSB 0x0b20
+#define AFE_AWB_END_MSB 0x0b28
+#define AFE_AWB_CUR_MSB 0x0b2c
+#define AFE_VUL_BASE_MSB 0x0b30
+#define AFE_VUL_END_MSB 0x0b38
+#define AFE_VUL_CUR_MSB 0x0b3c
+#define AFE_VUL2_BASE_MSB 0x0b50
+#define AFE_VUL2_END_MSB 0x0b58
+#define AFE_VUL2_CUR_MSB 0x0b5c
+#define AFE_MOD_DAI_BASE_MSB 0x0b60
+#define AFE_MOD_DAI_END_MSB 0x0b68
+#define AFE_MOD_DAI_CUR_MSB 0x0b6c
+#define AFE_VUL_D2_BASE_MSB 0x0b80
+#define AFE_VUL_D2_END_MSB 0x0b88
+#define AFE_VUL_D2_CUR_MSB 0x0b8c
+#define AFE_DL3_BASE_MSB 0x0b90
+#define AFE_DL3_CUR_MSB 0x0b94
+#define AFE_DL3_END_MSB 0x0b98
+#define AFE_HDMI_OUT_BASE_MSB 0x0ba4
+#define AFE_HDMI_OUT_CUR_MSB 0x0ba8
+#define AFE_HDMI_OUT_END_MSB 0x0bac
+#define AFE_AWB2_BASE 0x0bd0
+#define AFE_AWB2_END 0x0bd8
+#define AFE_AWB2_CUR 0x0bdc
+#define AFE_AWB2_BASE_MSB 0x0be0
+#define AFE_AWB2_END_MSB 0x0be8
+#define AFE_AWB2_CUR_MSB 0x0bec
+#define AFE_ADDA_DL_SDM_DCCOMP_CON 0x0c50
+#define AFE_ADDA_DL_SDM_TEST 0x0c54
+#define AFE_ADDA_DL_DC_COMP_CFG0 0x0c58
+#define AFE_ADDA_DL_DC_COMP_CFG1 0x0c5c
+#define AFE_ADDA_DL_SDM_FIFO_MON 0x0c60
+#define AFE_ADDA_DL_SRC_LCH_MON 0x0c64
+#define AFE_ADDA_DL_SRC_RCH_MON 0x0c68
+#define AFE_ADDA_DL_SDM_OUT_MON 0x0c6c
+#define AFE_CONNSYS_I2S_CON 0x0c78
+#define AFE_CONNSYS_I2S_MON 0x0c7c
+#define AFE_ASRC_2CH_CON0 0x0c80
+#define AFE_ASRC_2CH_CON1 0x0c84
+#define AFE_ASRC_2CH_CON2 0x0c88
+#define AFE_ASRC_2CH_CON3 0x0c8c
+#define AFE_ASRC_2CH_CON4 0x0c90
+#define AFE_ASRC_2CH_CON5 0x0c94
+#define AFE_ASRC_2CH_CON6 0x0c98
+#define AFE_ASRC_2CH_CON7 0x0c9c
+#define AFE_ASRC_2CH_CON8 0x0ca0
+#define AFE_ASRC_2CH_CON9 0x0ca4
+#define AFE_ASRC_2CH_CON10 0x0ca8
+#define AFE_ADDA6_IIR_COEF_02_01 0x0ce0
+#define AFE_ADDA6_IIR_COEF_04_03 0x0ce4
+#define AFE_ADDA6_IIR_COEF_06_05 0x0ce8
+#define AFE_ADDA6_IIR_COEF_08_07 0x0cec
+#define AFE_ADDA6_IIR_COEF_10_09 0x0cf0
+#define AFE_ADDA_PREDIS_CON2 0x0d40
+#define AFE_ADDA_PREDIS_CON3 0x0d44
+#define AFE_MEMIF_MON12 0x0d70
+#define AFE_MEMIF_MON13 0x0d74
+#define AFE_MEMIF_MON14 0x0d78
+#define AFE_MEMIF_MON15 0x0d7c
+#define AFE_MEMIF_MON16 0x0d80
+#define AFE_MEMIF_MON17 0x0d84
+#define AFE_MEMIF_MON18 0x0d88
+#define AFE_MEMIF_MON19 0x0d8c
+#define AFE_MEMIF_MON20 0x0d90
+#define AFE_MEMIF_MON21 0x0d94
+#define AFE_MEMIF_MON22 0x0d98
+#define AFE_MEMIF_MON23 0x0d9c
+#define AFE_MEMIF_MON24 0x0da0
+#define AFE_HD_ENGEN_ENABLE 0x0dd0
+#define AFE_ADDA_MTKAIF_CFG0 0x0e00
+#define AFE_ADDA_MTKAIF_TX_CFG1 0x0e14
+#define AFE_ADDA_MTKAIF_RX_CFG0 0x0e20
+#define AFE_ADDA_MTKAIF_RX_CFG1 0x0e24
+#define AFE_ADDA_MTKAIF_RX_CFG2 0x0e28
+#define AFE_ADDA_MTKAIF_MON0 0x0e34
+#define AFE_ADDA_MTKAIF_MON1 0x0e38
+#define AFE_AUD_PAD_TOP 0x0e40
+#define AFE_GENERAL1_ASRC_2CH_CON0 0x0e80
+#define AFE_GENERAL1_ASRC_2CH_CON1 0x0e84
+#define AFE_GENERAL1_ASRC_2CH_CON2 0x0e88
+#define AFE_GENERAL1_ASRC_2CH_CON3 0x0e8c
+#define AFE_GENERAL1_ASRC_2CH_CON4 0x0e90
+#define AFE_GENERAL1_ASRC_2CH_CON5 0x0e94
+#define AFE_GENERAL1_ASRC_2CH_CON6 0x0e98
+#define AFE_GENERAL1_ASRC_2CH_CON7 0x0e9c
+#define AFE_GENERAL1_ASRC_2CH_CON8 0x0ea0
+#define AFE_GENERAL1_ASRC_2CH_CON9 0x0ea4
+#define AFE_GENERAL1_ASRC_2CH_CON10 0x0ea8
+#define AFE_GENERAL1_ASRC_2CH_CON12 0x0eb0
+#define AFE_GENERAL1_ASRC_2CH_CON13 0x0eb4
+#define GENERAL_ASRC_MODE 0x0eb8
+#define GENERAL_ASRC_EN_ON 0x0ebc
+#define AFE_GENERAL2_ASRC_2CH_CON0 0x0f00
+#define AFE_GENERAL2_ASRC_2CH_CON1 0x0f04
+#define AFE_GENERAL2_ASRC_2CH_CON2 0x0f08
+#define AFE_GENERAL2_ASRC_2CH_CON3 0x0f0c
+#define AFE_GENERAL2_ASRC_2CH_CON4 0x0f10
+#define AFE_GENERAL2_ASRC_2CH_CON5 0x0f14
+#define AFE_GENERAL2_ASRC_2CH_CON6 0x0f18
+#define AFE_GENERAL2_ASRC_2CH_CON7 0x0f1c
+#define AFE_GENERAL2_ASRC_2CH_CON8 0x0f20
+#define AFE_GENERAL2_ASRC_2CH_CON9 0x0f24
+#define AFE_GENERAL2_ASRC_2CH_CON10 0x0f28
+#define AFE_GENERAL2_ASRC_2CH_CON12 0x0f30
+#define AFE_GENERAL2_ASRC_2CH_CON13 0x0f34
+
+#define AFE_MAX_REGISTER AFE_GENERAL2_ASRC_2CH_CON13
+#define AFE_IRQ_STATUS_BITS 0x1fff
+
+/* AFE_DAC_CON0 */
+#define AWB2_ON_SFT 29
+#define AWB2_ON_MASK 0x1
+#define AWB2_ON_MASK_SFT (0x1 << 29)
+#define VUL2_ON_SFT 27
+#define VUL2_ON_MASK 0x1
+#define VUL2_ON_MASK_SFT (0x1 << 27)
+#define MOD_DAI_DUP_WR_SFT 26
+#define MOD_DAI_DUP_WR_MASK 0x1
+#define MOD_DAI_DUP_WR_MASK_SFT (0x1 << 26)
+#define VUL12_MODE_SFT 20
+#define VUL12_MODE_MASK 0xf
+#define VUL12_MODE_MASK_SFT (0xf << 20)
+#define VUL12_R_MONO_SFT 11
+#define VUL12_R_MONO_MASK 0x1
+#define VUL12_R_MONO_MASK_SFT (0x1 << 11)
+#define VUL12_MONO_SFT 10
+#define VUL12_MONO_MASK 0x1
+#define VUL12_MONO_MASK_SFT (0x1 << 10)
+#define VUL12_ON_SFT 9
+#define VUL12_ON_MASK 0x1
+#define VUL12_ON_MASK_SFT (0x1 << 9)
+#define MOD_DAI_ON_SFT 7
+#define MOD_DAI_ON_MASK 0x1
+#define MOD_DAI_ON_MASK_SFT (0x1 << 7)
+#define AWB_ON_SFT 6
+#define AWB_ON_MASK 0x1
+#define AWB_ON_MASK_SFT (0x1 << 6)
+#define DL3_ON_SFT 5
+#define DL3_ON_MASK 0x1
+#define DL3_ON_MASK_SFT (0x1 << 5)
+#define VUL_ON_SFT 3
+#define VUL_ON_MASK 0x1
+#define VUL_ON_MASK_SFT (0x1 << 3)
+#define DL2_ON_SFT 2
+#define DL2_ON_MASK 0x1
+#define DL2_ON_MASK_SFT (0x1 << 2)
+#define DL1_ON_SFT 1
+#define DL1_ON_MASK 0x1
+#define DL1_ON_MASK_SFT (0x1 << 1)
+#define AFE_ON_SFT 0
+#define AFE_ON_MASK 0x1
+#define AFE_ON_MASK_SFT (0x1 << 0)
+
+/* AFE_DAC_CON1 */
+#define MOD_DAI_MODE_SFT 30
+#define MOD_DAI_MODE_MASK 0x3
+#define MOD_DAI_MODE_MASK_SFT (0x3 << 30)
+#define VUL_R_MONO_SFT 28
+#define VUL_R_MONO_MASK 0x1
+#define VUL_R_MONO_MASK_SFT (0x1 << 28)
+#define VUL_DATA_SFT 27
+#define VUL_DATA_MASK 0x1
+#define VUL_DATA_MASK_SFT (0x1 << 27)
+#define AWB_R_MONO_SFT 25
+#define AWB_R_MONO_MASK 0x1
+#define AWB_R_MONO_MASK_SFT (0x1 << 25)
+#define AWB_DATA_SFT 24
+#define AWB_DATA_MASK 0x1
+#define AWB_DATA_MASK_SFT (0x1 << 24)
+#define DL3_DATA_SFT 23
+#define DL3_DATA_MASK 0x1
+#define DL3_DATA_MASK_SFT (0x1 << 23)
+#define DL2_DATA_SFT 22
+#define DL2_DATA_MASK 0x1
+#define DL2_DATA_MASK_SFT (0x1 << 22)
+#define DL1_DATA_SFT 21
+#define DL1_DATA_MASK 0x1
+#define DL1_DATA_MASK_SFT (0x1 << 21)
+#define VUL_MODE_SFT 16
+#define VUL_MODE_MASK 0xf
+#define VUL_MODE_MASK_SFT (0xf << 16)
+#define AWB_MODE_SFT 12
+#define AWB_MODE_MASK 0xf
+#define AWB_MODE_MASK_SFT (0xf << 12)
+#define I2S_MODE_SFT 8
+#define I2S_MODE_MASK 0xf
+#define I2S_MODE_MASK_SFT (0xf << 8)
+#define DL2_MODE_SFT 4
+#define DL2_MODE_MASK 0xf
+#define DL2_MODE_MASK_SFT (0xf << 4)
+#define DL1_MODE_SFT 0
+#define DL1_MODE_MASK 0xf
+#define DL1_MODE_MASK_SFT (0xf << 0)
+
+/* AFE_DAC_CON2 */
+#define AWB2_R_MONO_SFT 21
+#define AWB2_R_MONO_MASK 0x1
+#define AWB2_R_MONO_MASK_SFT (0x1 << 21)
+#define AWB2_DATA_SFT 20
+#define AWB2_DATA_MASK 0x1
+#define AWB2_DATA_MASK_SFT (0x1 << 20)
+#define AWB2_MODE_SFT 16
+#define AWB2_MODE_MASK 0xf
+#define AWB2_MODE_MASK_SFT (0xf << 16)
+#define DL3_MODE_SFT 8
+#define DL3_MODE_MASK 0xf
+#define DL3_MODE_MASK_SFT (0xf << 8)
+#define VUL2_MODE_SFT 4
+#define VUL2_MODE_MASK 0xf
+#define VUL2_MODE_MASK_SFT (0xf << 4)
+#define VUL2_R_MONO_SFT 1
+#define VUL2_R_MONO_MASK 0x1
+#define VUL2_R_MONO_MASK_SFT (0x1 << 1)
+#define VUL2_DATA_SFT 0
+#define VUL2_DATA_MASK 0x1
+#define VUL2_DATA_MASK_SFT (0x1 << 0)
+
+/* AFE_DAC_MON */
+#define AFE_ON_RETM_SFT 0
+#define AFE_ON_RETM_MASK 0x1
+#define AFE_ON_RETM_MASK_SFT (0x1 << 0)
+
+/* AFE_I2S_CON */
+#define BCK_NEG_EG_LATCH_SFT 30
+#define BCK_NEG_EG_LATCH_MASK 0x1
+#define BCK_NEG_EG_LATCH_MASK_SFT (0x1 << 30)
+#define BCK_INV_SFT 29
+#define BCK_INV_MASK 0x1
+#define BCK_INV_MASK_SFT (0x1 << 29)
+#define I2SIN_PAD_SEL_SFT 28
+#define I2SIN_PAD_SEL_MASK 0x1
+#define I2SIN_PAD_SEL_MASK_SFT (0x1 << 28)
+#define I2S_LOOPBACK_SFT 20
+#define I2S_LOOPBACK_MASK 0x1
+#define I2S_LOOPBACK_MASK_SFT (0x1 << 20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK 0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT (0x1 << 17)
+#define I2S1_HD_EN_SFT 12
+#define I2S1_HD_EN_MASK 0x1
+#define I2S1_HD_EN_MASK_SFT (0x1 << 12)
+#define INV_PAD_CTRL_SFT 7
+#define INV_PAD_CTRL_MASK 0x1
+#define INV_PAD_CTRL_MASK_SFT (0x1 << 7)
+#define I2S_BYPSRC_SFT 6
+#define I2S_BYPSRC_MASK 0x1
+#define I2S_BYPSRC_MASK_SFT (0x1 << 6)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK 0x1
+#define INV_LRCK_MASK_SFT (0x1 << 5)
+#define I2S_FMT_SFT 3
+#define I2S_FMT_MASK 0x1
+#define I2S_FMT_MASK_SFT (0x1 << 3)
+#define I2S_SRC_SFT 2
+#define I2S_SRC_MASK 0x1
+#define I2S_SRC_MASK_SFT (0x1 << 2)
+#define I2S_WLEN_SFT 1
+#define I2S_WLEN_MASK 0x1
+#define I2S_WLEN_MASK_SFT (0x1 << 1)
+#define I2S_EN_SFT 0
+#define I2S_EN_MASK 0x1
+#define I2S_EN_MASK_SFT (0x1 << 0)
+
+/* AFE_I2S_CON1 */
+#define I2S2_LR_SWAP_SFT 31
+#define I2S2_LR_SWAP_MASK 0x1
+#define I2S2_LR_SWAP_MASK_SFT (0x1 << 31)
+#define I2S2_SEL_O19_O20_SFT 18
+#define I2S2_SEL_O19_O20_MASK 0x1
+#define I2S2_SEL_O19_O20_MASK_SFT (0x1 << 18)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK 0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT (0x1 << 17)
+#define I2S2_SEL_O03_O04_SFT 16
+#define I2S2_SEL_O03_O04_MASK 0x1
+#define I2S2_SEL_O03_O04_MASK_SFT (0x1 << 16)
+#define I2S2_32BIT_EN_SFT 13
+#define I2S2_32BIT_EN_MASK 0x1
+#define I2S2_32BIT_EN_MASK_SFT (0x1 << 13)
+#define I2S2_HD_EN_SFT 12
+#define I2S2_HD_EN_MASK 0x1
+#define I2S2_HD_EN_MASK_SFT (0x1 << 12)
+#define I2S2_OUT_MODE_SFT 8
+#define I2S2_OUT_MODE_MASK 0xf
+#define I2S2_OUT_MODE_MASK_SFT (0xf << 8)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK 0x1
+#define INV_LRCK_MASK_SFT (0x1 << 5)
+#define I2S2_FMT_SFT 3
+#define I2S2_FMT_MASK 0x1
+#define I2S2_FMT_MASK_SFT (0x1 << 3)
+#define I2S2_WLEN_SFT 1
+#define I2S2_WLEN_MASK 0x1
+#define I2S2_WLEN_MASK_SFT (0x1 << 1)
+#define I2S2_EN_SFT 0
+#define I2S2_EN_MASK 0x1
+#define I2S2_EN_MASK_SFT (0x1 << 0)
+
+/* AFE_I2S_CON2 */
+#define I2S3_LR_SWAP_SFT 31
+#define I2S3_LR_SWAP_MASK 0x1
+#define I2S3_LR_SWAP_MASK_SFT (0x1 << 31)
+#define I2S3_UPDATE_WORD_SFT 24
+#define I2S3_UPDATE_WORD_MASK 0x1f
+#define I2S3_UPDATE_WORD_MASK_SFT (0x1f << 24)
+#define I2S3_BCK_INV_SFT 23
+#define I2S3_BCK_INV_MASK 0x1
+#define I2S3_BCK_INV_MASK_SFT (0x1 << 23)
+#define I2S3_FPGA_BIT_TEST_SFT 22
+#define I2S3_FPGA_BIT_TEST_MASK 0x1
+#define I2S3_FPGA_BIT_TEST_MASK_SFT (0x1 << 22)
+#define I2S3_FPGA_BIT_SFT 21
+#define I2S3_FPGA_BIT_MASK 0x1
+#define I2S3_FPGA_BIT_MASK_SFT (0x1 << 21)
+#define I2S3_LOOPBACK_SFT 20
+#define I2S3_LOOPBACK_MASK 0x1
+#define I2S3_LOOPBACK_MASK_SFT (0x1 << 20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK 0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT (0x1 << 17)
+#define I2S3_HD_EN_SFT 12
+#define I2S3_HD_EN_MASK 0x1
+#define I2S3_HD_EN_MASK_SFT (0x1 << 12)
+#define I2S3_OUT_MODE_SFT 8
+#define I2S3_OUT_MODE_MASK 0xf
+#define I2S3_OUT_MODE_MASK_SFT (0xf << 8)
+#define I2S3_FMT_SFT 3
+#define I2S3_FMT_MASK 0x1
+#define I2S3_FMT_MASK_SFT (0x1 << 3)
+#define I2S3_WLEN_SFT 1
+#define I2S3_WLEN_MASK 0x1
+#define I2S3_WLEN_MASK_SFT (0x1 << 1)
+#define I2S3_EN_SFT 0
+#define I2S3_EN_MASK 0x1
+#define I2S3_EN_MASK_SFT (0x1 << 0)
+
+/* AFE_I2S_CON3 */
+#define I2S4_LR_SWAP_SFT 31
+#define I2S4_LR_SWAP_MASK 0x1
+#define I2S4_LR_SWAP_MASK_SFT (0x1 << 31)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK 0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT (0x1 << 17)
+#define I2S4_32BIT_EN_SFT 13
+#define I2S4_32BIT_EN_MASK 0x1
+#define I2S4_32BIT_EN_MASK_SFT (0x1 << 13)
+#define I2S4_HD_EN_SFT 12
+#define I2S4_HD_EN_MASK 0x1
+#define I2S4_HD_EN_MASK_SFT (0x1 << 12)
+#define I2S4_OUT_MODE_SFT 8
+#define I2S4_OUT_MODE_MASK 0xf
+#define I2S4_OUT_MODE_MASK_SFT (0xf << 8)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK 0x1
+#define INV_LRCK_MASK_SFT (0x1 << 5)
+#define I2S4_FMT_SFT 3
+#define I2S4_FMT_MASK 0x1
+#define I2S4_FMT_MASK_SFT (0x1 << 3)
+#define I2S4_WLEN_SFT 1
+#define I2S4_WLEN_MASK 0x1
+#define I2S4_WLEN_MASK_SFT (0x1 << 1)
+#define I2S4_EN_SFT 0
+#define I2S4_EN_MASK 0x1
+#define I2S4_EN_MASK_SFT (0x1 << 0)
+
+/* AFE_I2S_CON4 */
+#define I2S5_LR_SWAP_SFT 31
+#define I2S5_LR_SWAP_MASK 0x1
+#define I2S5_LR_SWAP_MASK_SFT (0x1 << 31)
+#define I2S_LOOPBACK_SFT 20
+#define I2S_LOOPBACK_MASK 0x1
+#define I2S_LOOPBACK_MASK_SFT (0x1 << 20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK 0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT (0x1 << 17)
+#define I2S5_32BIT_EN_SFT 13
+#define I2S5_32BIT_EN_MASK 0x1
+#define I2S5_32BIT_EN_MASK_SFT (0x1 << 13)
+#define I2S5_HD_EN_SFT 12
+#define I2S5_HD_EN_MASK 0x1
+#define I2S5_HD_EN_MASK_SFT (0x1 << 12)
+#define I2S5_OUT_MODE_SFT 8
+#define I2S5_OUT_MODE_MASK 0xf
+#define I2S5_OUT_MODE_MASK_SFT (0xf << 8)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK 0x1
+#define INV_LRCK_MASK_SFT (0x1 << 5)
+#define I2S5_FMT_SFT 3
+#define I2S5_FMT_MASK 0x1
+#define I2S5_FMT_MASK_SFT (0x1 << 3)
+#define I2S5_WLEN_SFT 1
+#define I2S5_WLEN_MASK 0x1
+#define I2S5_WLEN_MASK_SFT (0x1 << 1)
+#define I2S5_EN_SFT 0
+#define I2S5_EN_MASK 0x1
+#define I2S5_EN_MASK_SFT (0x1 << 0)
+
+/* AFE_GAIN1_CON0 */
+#define GAIN1_SAMPLE_PER_STEP_SFT 8
+#define GAIN1_SAMPLE_PER_STEP_MASK 0xff
+#define GAIN1_SAMPLE_PER_STEP_MASK_SFT (0xff << 8)
+#define GAIN1_MODE_SFT 4
+#define GAIN1_MODE_MASK 0xf
+#define GAIN1_MODE_MASK_SFT (0xf << 4)
+#define GAIN1_ON_SFT 0
+#define GAIN1_ON_MASK 0x1
+#define GAIN1_ON_MASK_SFT (0x1 << 0)
+
+/* AFE_GAIN1_CON1 */
+#define GAIN1_TARGET_SFT 0
+#define GAIN1_TARGET_MASK 0xfffff
+#define GAIN1_TARGET_MASK_SFT (0xfffff << 0)
+
+/* AFE_GAIN2_CON0 */
+#define GAIN2_SAMPLE_PER_STEP_SFT 8
+#define GAIN2_SAMPLE_PER_STEP_MASK 0xff
+#define GAIN2_SAMPLE_PER_STEP_MASK_SFT (0xff << 8)
+#define GAIN2_MODE_SFT 4
+#define GAIN2_MODE_MASK 0xf
+#define GAIN2_MODE_MASK_SFT (0xf << 4)
+#define GAIN2_ON_SFT 0
+#define GAIN2_ON_MASK 0x1
+#define GAIN2_ON_MASK_SFT (0x1 << 0)
+
+/* AFE_GAIN2_CON1 */
+#define GAIN2_TARGET_SFT 0
+#define GAIN2_TARGET_MASK 0xfffff
+#define GAIN2_TARGET_MASK_SFT (0xfffff << 0)
+
+/* AFE_GAIN1_CUR */
+#define AFE_GAIN1_CUR_SFT 0
+#define AFE_GAIN1_CUR_MASK 0xfffff
+#define AFE_GAIN1_CUR_MASK_SFT (0xfffff << 0)
+
+/* AFE_GAIN2_CUR */
+#define AFE_GAIN2_CUR_SFT 0
+#define AFE_GAIN2_CUR_MASK 0xfffff
+#define AFE_GAIN2_CUR_MASK_SFT (0xfffff << 0)
+
+/* AFE_MEMIF_HD_MODE */
+#define AWB2_HD_SFT 28
+#define AWB2_HD_MASK 0x3
+#define AWB2_HD_MASK_SFT (0x3 << 28)
+#define HDMI_HD_SFT 20
+#define HDMI_HD_MASK 0x3
+#define HDMI_HD_MASK_SFT (0x3 << 20)
+#define MOD_DAI_HD_SFT 18
+#define MOD_DAI_HD_MASK 0x3
+#define MOD_DAI_HD_MASK_SFT (0x3 << 18)
+#define DAI_HD_SFT 16
+#define DAI_HD_MASK 0x3
+#define DAI_HD_MASK_SFT (0x3 << 16)
+#define VUL2_HD_SFT 14
+#define VUL2_HD_MASK 0x3
+#define VUL2_HD_MASK_SFT (0x3 << 14)
+#define VUL12_HD_SFT 12
+#define VUL12_HD_MASK 0x3
+#define VUL12_HD_MASK_SFT (0x3 << 12)
+#define VUL_HD_SFT 10
+#define VUL_HD_MASK 0x3
+#define VUL_HD_MASK_SFT (0x3 << 10)
+#define AWB_HD_SFT 8
+#define AWB_HD_MASK 0x3
+#define AWB_HD_MASK_SFT (0x3 << 8)
+#define DL3_HD_SFT 6
+#define DL3_HD_MASK 0x3
+#define DL3_HD_MASK_SFT (0x3 << 6)
+#define DL2_HD_SFT 4
+#define DL2_HD_MASK 0x3
+#define DL2_HD_MASK_SFT (0x3 << 4)
+#define DL1_HD_SFT 0
+#define DL1_HD_MASK 0x3
+#define DL1_HD_MASK_SFT (0x3 << 0)
+
+/* AFE_MEMIF_HDALIGN */
+#define AWB2_NORMAL_MODE_SFT 30
+#define AWB2_NORMAL_MODE_MASK 0x1
+#define AWB2_NORMAL_MODE_MASK_SFT (0x1 << 30)
+#define HDMI_NORMAL_MODE_SFT 26
+#define HDMI_NORMAL_MODE_MASK 0x1
+#define HDMI_NORMAL_MODE_MASK_SFT (0x1 << 26)
+#define MOD_DAI_NORMAL_MODE_SFT 25
+#define MOD_DAI_NORMAL_MODE_MASK 0x1
+#define MOD_DAI_NORMAL_MODE_MASK_SFT (0x1 << 25)
+#define DAI_NORMAL_MODE_SFT 24
+#define DAI_NORMAL_MODE_MASK 0x1
+#define DAI_NORMAL_MODE_MASK_SFT (0x1 << 24)
+#define VUL2_NORMAL_MODE_SFT 23
+#define VUL2_NORMAL_MODE_MASK 0x1
+#define VUL2_NORMAL_MODE_MASK_SFT (0x1 << 23)
+#define VUL12_NORMAL_MODE_SFT 22
+#define VUL12_NORMAL_MODE_MASK 0x1
+#define VUL12_NORMAL_MODE_MASK_SFT (0x1 << 22)
+#define VUL_NORMAL_MODE_SFT 21
+#define VUL_NORMAL_MODE_MASK 0x1
+#define VUL_NORMAL_MODE_MASK_SFT (0x1 << 21)
+#define AWB_NORMAL_MODE_SFT 20
+#define AWB_NORMAL_MODE_MASK 0x1
+#define AWB_NORMAL_MODE_MASK_SFT (0x1 << 20)
+#define DL3_NORMAL_MODE_SFT 19
+#define DL3_NORMAL_MODE_MASK 0x1
+#define DL3_NORMAL_MODE_MASK_SFT (0x1 << 19)
+#define DL2_NORMAL_MODE_SFT 18
+#define DL2_NORMAL_MODE_MASK 0x1
+#define DL2_NORMAL_MODE_MASK_SFT (0x1 << 18)
+#define DL1_NORMAL_MODE_SFT 16
+#define DL1_NORMAL_MODE_MASK 0x1
+#define DL1_NORMAL_MODE_MASK_SFT (0x1 << 16)
+#define RESERVED1_SFT 15
+#define RESERVED1_MASK 0x1
+#define RESERVED1_MASK_SFT (0x1 << 15)
+#define AWB2_ALIGN_SFT 14
+#define AWB2_ALIGN_MASK 0x1
+#define AWB2_ALIGN_MASK_SFT (0x1 << 14)
+#define HDMI_HD_ALIGN_SFT 10
+#define HDMI_HD_ALIGN_MASK 0x1
+#define HDMI_HD_ALIGN_MASK_SFT (0x1 << 10)
+#define MOD_DAI_HD_ALIGN_SFT 9
+#define MOD_DAI_HD_ALIGN_MASK 0x1
+#define MOD_DAI_HD_ALIGN_MASK_SFT (0x1 << 9)
+#define VUL2_HD_ALIGN_SFT 7
+#define VUL2_HD_ALIGN_MASK 0x1
+#define VUL2_HD_ALIGN_MASK_SFT (0x1 << 7)
+#define VUL12_HD_ALIGN_SFT 6
+#define VUL12_HD_ALIGN_MASK 0x1
+#define VUL12_HD_ALIGN_MASK_SFT (0x1 << 6)
+#define VUL_HD_ALIGN_SFT 5
+#define VUL_HD_ALIGN_MASK 0x1
+#define VUL_HD_ALIGN_MASK_SFT (0x1 << 5)
+#define AWB_HD_ALIGN_SFT 4
+#define AWB_HD_ALIGN_MASK 0x1
+#define AWB_HD_ALIGN_MASK_SFT (0x1 << 4)
+#define DL3_HD_ALIGN_SFT 3
+#define DL3_HD_ALIGN_MASK 0x1
+#define DL3_HD_ALIGN_MASK_SFT (0x1 << 3)
+#define DL2_HD_ALIGN_SFT 2
+#define DL2_HD_ALIGN_MASK 0x1
+#define DL2_HD_ALIGN_MASK_SFT (0x1 << 2)
+#define DL1_HD_ALIGN_SFT 0
+#define DL1_HD_ALIGN_MASK 0x1
+#define DL1_HD_ALIGN_MASK_SFT (0x1 << 0)
+
+/* PCM_INTF_CON1 */
+#define PCM_FIX_VALUE_SEL_SFT 31
+#define PCM_FIX_VALUE_SEL_MASK 0x1
+#define PCM_FIX_VALUE_SEL_MASK_SFT (0x1 << 31)
+#define PCM_BUFFER_LOOPBACK_SFT 30
+#define PCM_BUFFER_LOOPBACK_MASK 0x1
+#define PCM_BUFFER_LOOPBACK_MASK_SFT (0x1 << 30)
+#define PCM_PARALLEL_LOOPBACK_SFT 29
+#define PCM_PARALLEL_LOOPBACK_MASK 0x1
+#define PCM_PARALLEL_LOOPBACK_MASK_SFT (0x1 << 29)
+#define PCM_SERIAL_LOOPBACK_SFT 28
+#define PCM_SERIAL_LOOPBACK_MASK 0x1
+#define PCM_SERIAL_LOOPBACK_MASK_SFT (0x1 << 28)
+#define PCM_DAI_PCM_LOOPBACK_SFT 27
+#define PCM_DAI_PCM_LOOPBACK_MASK 0x1
+#define PCM_DAI_PCM_LOOPBACK_MASK_SFT (0x1 << 27)
+#define PCM_I2S_PCM_LOOPBACK_SFT 26
+#define PCM_I2S_PCM_LOOPBACK_MASK 0x1
+#define PCM_I2S_PCM_LOOPBACK_MASK_SFT (0x1 << 26)
+#define PCM_SYNC_DELSEL_SFT 25
+#define PCM_SYNC_DELSEL_MASK 0x1
+#define PCM_SYNC_DELSEL_MASK_SFT (0x1 << 25)
+#define PCM_TX_LR_SWAP_SFT 24
+#define PCM_TX_LR_SWAP_MASK 0x1
+#define PCM_TX_LR_SWAP_MASK_SFT (0x1 << 24)
+#define PCM_SYNC_OUT_INV_SFT 23
+#define PCM_SYNC_OUT_INV_MASK 0x1
+#define PCM_SYNC_OUT_INV_MASK_SFT (0x1 << 23)
+#define PCM_BCLK_OUT_INV_SFT 22
+#define PCM_BCLK_OUT_INV_MASK 0x1
+#define PCM_BCLK_OUT_INV_MASK_SFT (0x1 << 22)
+#define PCM_SYNC_IN_INV_SFT 21
+#define PCM_SYNC_IN_INV_MASK 0x1
+#define PCM_SYNC_IN_INV_MASK_SFT (0x1 << 21)
+#define PCM_BCLK_IN_INV_SFT 20
+#define PCM_BCLK_IN_INV_MASK 0x1
+#define PCM_BCLK_IN_INV_MASK_SFT (0x1 << 20)
+#define PCM_TX_LCH_RPT_SFT 19
+#define PCM_TX_LCH_RPT_MASK 0x1
+#define PCM_TX_LCH_RPT_MASK_SFT (0x1 << 19)
+#define PCM_VBT_16K_MODE_SFT 18
+#define PCM_VBT_16K_MODE_MASK 0x1
+#define PCM_VBT_16K_MODE_MASK_SFT (0x1 << 18)
+#define PCM_EXT_MODEM_SFT 17
+#define PCM_EXT_MODEM_MASK 0x1
+#define PCM_EXT_MODEM_MASK_SFT (0x1 << 17)
+#define PCM_24BIT_SFT 16
+#define PCM_24BIT_MASK 0x1
+#define PCM_24BIT_MASK_SFT (0x1 << 16)
+#define PCM_WLEN_SFT 14
+#define PCM_WLEN_MASK 0x3
+#define PCM_WLEN_MASK_SFT (0x3 << 14)
+#define PCM_SYNC_LENGTH_SFT 9
+#define PCM_SYNC_LENGTH_MASK 0x1f
+#define PCM_SYNC_LENGTH_MASK_SFT (0x1f << 9)
+#define PCM_SYNC_TYPE_SFT 8
+#define PCM_SYNC_TYPE_MASK 0x1
+#define PCM_SYNC_TYPE_MASK_SFT (0x1 << 8)
+#define PCM_BT_MODE_SFT 7
+#define PCM_BT_MODE_MASK 0x1
+#define PCM_BT_MODE_MASK_SFT (0x1 << 7)
+#define PCM_BYP_ASRC_SFT 6
+#define PCM_BYP_ASRC_MASK 0x1
+#define PCM_BYP_ASRC_MASK_SFT (0x1 << 6)
+#define PCM_SLAVE_SFT 5
+#define PCM_SLAVE_MASK 0x1
+#define PCM_SLAVE_MASK_SFT (0x1 << 5)
+#define PCM_MODE_SFT 3
+#define PCM_MODE_MASK 0x3
+#define PCM_MODE_MASK_SFT (0x3 << 3)
+#define PCM_FMT_SFT 1
+#define PCM_FMT_MASK 0x3
+#define PCM_FMT_MASK_SFT (0x3 << 1)
+#define PCM_EN_SFT 0
+#define PCM_EN_MASK 0x1
+#define PCM_EN_MASK_SFT (0x1 << 0)
+
+/* PCM_INTF_CON2 */
+#define PCM1_TX_FIFO_OV_SFT 31
+#define PCM1_TX_FIFO_OV_MASK 0x1
+#define PCM1_TX_FIFO_OV_MASK_SFT (0x1 << 31)
+#define PCM1_RX_FIFO_OV_SFT 30
+#define PCM1_RX_FIFO_OV_MASK 0x1
+#define PCM1_RX_FIFO_OV_MASK_SFT (0x1 << 30)
+#define PCM2_TX_FIFO_OV_SFT 29
+#define PCM2_TX_FIFO_OV_MASK 0x1
+#define PCM2_TX_FIFO_OV_MASK_SFT (0x1 << 29)
+#define PCM2_RX_FIFO_OV_SFT 28
+#define PCM2_RX_FIFO_OV_MASK 0x1
+#define PCM2_RX_FIFO_OV_MASK_SFT (0x1 << 28)
+#define PCM1_SYNC_GLITCH_SFT 27
+#define PCM1_SYNC_GLITCH_MASK 0x1
+#define PCM1_SYNC_GLITCH_MASK_SFT (0x1 << 27)
+#define PCM2_SYNC_GLITCH_SFT 26
+#define PCM2_SYNC_GLITCH_MASK 0x1
+#define PCM2_SYNC_GLITCH_MASK_SFT (0x1 << 26)
+#define TX3_RCH_DBG_MODE_SFT 17
+#define TX3_RCH_DBG_MODE_MASK 0x1
+#define TX3_RCH_DBG_MODE_MASK_SFT (0x1 << 17)
+#define PCM1_PCM2_LOOPBACK_SFT 16
+#define PCM1_PCM2_LOOPBACK_MASK 0x1
+#define PCM1_PCM2_LOOPBACK_MASK_SFT (0x1 << 16)
+#define DAI_PCM_LOOPBACK_CH_SFT 14
+#define DAI_PCM_LOOPBACK_CH_MASK 0x3
+#define DAI_PCM_LOOPBACK_CH_MASK_SFT (0x3 << 14)
+#define I2S_PCM_LOOPBACK_CH_SFT 12
+#define I2S_PCM_LOOPBACK_CH_MASK 0x3
+#define I2S_PCM_LOOPBACK_CH_MASK_SFT (0x3 << 12)
+#define TX_FIX_VALUE_SFT 0
+#define TX_FIX_VALUE_MASK 0xff
+#define TX_FIX_VALUE_MASK_SFT (0xff << 0)
+
+/* PCM2_INTF_CON */
+#define PCM2_TX_FIX_VALUE_SFT 24
+#define PCM2_TX_FIX_VALUE_MASK 0xff
+#define PCM2_TX_FIX_VALUE_MASK_SFT (0xff << 24)
+#define PCM2_FIX_VALUE_SEL_SFT 23
+#define PCM2_FIX_VALUE_SEL_MASK 0x1
+#define PCM2_FIX_VALUE_SEL_MASK_SFT (0x1 << 23)
+#define PCM2_BUFFER_LOOPBACK_SFT 22
+#define PCM2_BUFFER_LOOPBACK_MASK 0x1
+#define PCM2_BUFFER_LOOPBACK_MASK_SFT (0x1 << 22)
+#define PCM2_PARALLEL_LOOPBACK_SFT 21
+#define PCM2_PARALLEL_LOOPBACK_MASK 0x1
+#define PCM2_PARALLEL_LOOPBACK_MASK_SFT (0x1 << 21)
+#define PCM2_SERIAL_LOOPBACK_SFT 20
+#define PCM2_SERIAL_LOOPBACK_MASK 0x1
+#define PCM2_SERIAL_LOOPBACK_MASK_SFT (0x1 << 20)
+#define PCM2_DAI_PCM_LOOPBACK_SFT 19
+#define PCM2_DAI_PCM_LOOPBACK_MASK 0x1
+#define PCM2_DAI_PCM_LOOPBACK_MASK_SFT (0x1 << 19)
+#define PCM2_I2S_PCM_LOOPBACK_SFT 18
+#define PCM2_I2S_PCM_LOOPBACK_MASK 0x1
+#define PCM2_I2S_PCM_LOOPBACK_MASK_SFT (0x1 << 18)
+#define PCM2_SYNC_DELSEL_SFT 17
+#define PCM2_SYNC_DELSEL_MASK 0x1
+#define PCM2_SYNC_DELSEL_MASK_SFT (0x1 << 17)
+#define PCM2_TX_LR_SWAP_SFT 16
+#define PCM2_TX_LR_SWAP_MASK 0x1
+#define PCM2_TX_LR_SWAP_MASK_SFT (0x1 << 16)
+#define PCM2_SYNC_IN_INV_SFT 15
+#define PCM2_SYNC_IN_INV_MASK 0x1
+#define PCM2_SYNC_IN_INV_MASK_SFT (0x1 << 15)
+#define PCM2_BCLK_IN_INV_SFT 14
+#define PCM2_BCLK_IN_INV_MASK 0x1
+#define PCM2_BCLK_IN_INV_MASK_SFT (0x1 << 14)
+#define PCM2_TX_LCH_RPT_SFT 13
+#define PCM2_TX_LCH_RPT_MASK 0x1
+#define PCM2_TX_LCH_RPT_MASK_SFT (0x1 << 13)
+#define PCM2_VBT_16K_MODE_SFT 12
+#define PCM2_VBT_16K_MODE_MASK 0x1
+#define PCM2_VBT_16K_MODE_MASK_SFT (0x1 << 12)
+#define PCM2_LOOPBACK_CH_SEL_SFT 10
+#define PCM2_LOOPBACK_CH_SEL_MASK 0x3
+#define PCM2_LOOPBACK_CH_SEL_MASK_SFT (0x3 << 10)
+#define PCM2_TX2_BT_MODE_SFT 8
+#define PCM2_TX2_BT_MODE_MASK 0x1
+#define PCM2_TX2_BT_MODE_MASK_SFT (0x1 << 8)
+#define PCM2_BT_MODE_SFT 7
+#define PCM2_BT_MODE_MASK 0x1
+#define PCM2_BT_MODE_MASK_SFT (0x1 << 7)
+#define PCM2_AFIFO_SFT 6
+#define PCM2_AFIFO_MASK 0x1
+#define PCM2_AFIFO_MASK_SFT (0x1 << 6)
+#define PCM2_WLEN_SFT 5
+#define PCM2_WLEN_MASK 0x1
+#define PCM2_WLEN_MASK_SFT (0x1 << 5)
+#define PCM2_MODE_SFT 3
+#define PCM2_MODE_MASK 0x3
+#define PCM2_MODE_MASK_SFT (0x3 << 3)
+#define PCM2_FMT_SFT 1
+#define PCM2_FMT_MASK 0x3
+#define PCM2_FMT_MASK_SFT (0x3 << 1)
+#define PCM2_EN_SFT 0
+#define PCM2_EN_MASK 0x1
+#define PCM2_EN_MASK_SFT (0x1 << 0)
+
+/* AFE_ADDA_MTKAIF_CFG0 */
+#define MTKAIF_RXIF_CLKINV_ADC_SFT 31
+#define MTKAIF_RXIF_CLKINV_ADC_MASK 0x1
+#define MTKAIF_RXIF_CLKINV_ADC_MASK_SFT (0x1 << 31)
+#define MTKAIF_RXIF_BYPASS_SRC_SFT 17
+#define MTKAIF_RXIF_BYPASS_SRC_MASK 0x1
+#define MTKAIF_RXIF_BYPASS_SRC_MASK_SFT (0x1 << 17)
+#define MTKAIF_RXIF_PROTOCOL2_SFT 16
+#define MTKAIF_RXIF_PROTOCOL2_MASK 0x1
+#define MTKAIF_RXIF_PROTOCOL2_MASK_SFT (0x1 << 16)
+#define MTKAIF_TXIF_BYPASS_SRC_SFT 5
+#define MTKAIF_TXIF_BYPASS_SRC_MASK 0x1
+#define MTKAIF_TXIF_BYPASS_SRC_MASK_SFT (0x1 << 5)
+#define MTKAIF_TXIF_PROTOCOL2_SFT 4
+#define MTKAIF_TXIF_PROTOCOL2_MASK 0x1
+#define MTKAIF_TXIF_PROTOCOL2_MASK_SFT (0x1 << 4)
+#define MTKAIF_TXIF_8TO5_SFT 2
+#define MTKAIF_TXIF_8TO5_MASK 0x1
+#define MTKAIF_TXIF_8TO5_MASK_SFT (0x1 << 2)
+#define MTKAIF_RXIF_8TO5_SFT 1
+#define MTKAIF_RXIF_8TO5_MASK 0x1
+#define MTKAIF_RXIF_8TO5_MASK_SFT (0x1 << 1)
+#define MTKAIF_IF_LOOPBACK1_SFT 0
+#define MTKAIF_IF_LOOPBACK1_MASK 0x1
+#define MTKAIF_IF_LOOPBACK1_MASK_SFT (0x1 << 0)
+
+/* AFE_ADDA_MTKAIF_RX_CFG2 */
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_SFT 16
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK 0x1
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK_SFT (0x1 << 16)
+#define MTKAIF_RXIF_DELAY_CYCLE_SFT 12
+#define MTKAIF_RXIF_DELAY_CYCLE_MASK 0xf
+#define MTKAIF_RXIF_DELAY_CYCLE_MASK_SFT (0xf << 12)
+#define MTKAIF_RXIF_DELAY_DATA_SFT 8
+#define MTKAIF_RXIF_DELAY_DATA_MASK 0x1
+#define MTKAIF_RXIF_DELAY_DATA_MASK_SFT (0x1 << 8)
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_SFT 4
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK 0x7
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK_SFT (0x7 << 4)
+
+/* AFE_ADDA_DL_SRC2_CON0 */
+#define DL_2_INPUT_MODE_CTL_SFT 28
+#define DL_2_INPUT_MODE_CTL_MASK 0xf
+#define DL_2_INPUT_MODE_CTL_MASK_SFT (0xf << 28)
+#define DL_2_CH1_SATURATION_EN_CTL_SFT 27
+#define DL_2_CH1_SATURATION_EN_CTL_MASK 0x1
+#define DL_2_CH1_SATURATION_EN_CTL_MASK_SFT (0x1 << 27)
+#define DL_2_CH2_SATURATION_EN_CTL_SFT 26
+#define DL_2_CH2_SATURATION_EN_CTL_MASK 0x1
+#define DL_2_CH2_SATURATION_EN_CTL_MASK_SFT (0x1 << 26)
+#define DL_2_OUTPUT_SEL_CTL_SFT 24
+#define DL_2_OUTPUT_SEL_CTL_MASK 0x3
+#define DL_2_OUTPUT_SEL_CTL_MASK_SFT (0x3 << 24)
+#define DL_2_FADEIN_0START_EN_SFT 16
+#define DL_2_FADEIN_0START_EN_MASK 0x3
+#define DL_2_FADEIN_0START_EN_MASK_SFT (0x3 << 16)
+#define DL_DISABLE_HW_CG_CTL_SFT 15
+#define DL_DISABLE_HW_CG_CTL_MASK 0x1
+#define DL_DISABLE_HW_CG_CTL_MASK_SFT (0x1 << 15)
+#define C_DATA_EN_SEL_CTL_PRE_SFT 14
+#define C_DATA_EN_SEL_CTL_PRE_MASK 0x1
+#define C_DATA_EN_SEL_CTL_PRE_MASK_SFT (0x1 << 14)
+#define DL_2_SIDE_TONE_ON_CTL_PRE_SFT 13
+#define DL_2_SIDE_TONE_ON_CTL_PRE_MASK 0x1
+#define DL_2_SIDE_TONE_ON_CTL_PRE_MASK_SFT (0x1 << 13)
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_SFT 12
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_MASK 0x1
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_MASK_SFT (0x1 << 12)
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_SFT 11
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_MASK 0x1
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_MASK_SFT (0x1 << 11)
+#define DL2_ARAMPSP_CTL_PRE_SFT 9
+#define DL2_ARAMPSP_CTL_PRE_MASK 0x3
+#define DL2_ARAMPSP_CTL_PRE_MASK_SFT (0x3 << 9)
+#define DL_2_IIRMODE_CTL_PRE_SFT 6
+#define DL_2_IIRMODE_CTL_PRE_MASK 0x7
+#define DL_2_IIRMODE_CTL_PRE_MASK_SFT (0x7 << 6)
+#define DL_2_VOICE_MODE_CTL_PRE_SFT 5
+#define DL_2_VOICE_MODE_CTL_PRE_MASK 0x1
+#define DL_2_VOICE_MODE_CTL_PRE_MASK_SFT (0x1 << 5)
+#define D2_2_MUTE_CH1_ON_CTL_PRE_SFT 4
+#define D2_2_MUTE_CH1_ON_CTL_PRE_MASK 0x1
+#define D2_2_MUTE_CH1_ON_CTL_PRE_MASK_SFT (0x1 << 4)
+#define D2_2_MUTE_CH2_ON_CTL_PRE_SFT 3
+#define D2_2_MUTE_CH2_ON_CTL_PRE_MASK 0x1
+#define D2_2_MUTE_CH2_ON_CTL_PRE_MASK_SFT (0x1 << 3)
+#define DL_2_IIR_ON_CTL_PRE_SFT 2
+#define DL_2_IIR_ON_CTL_PRE_MASK 0x1
+#define DL_2_IIR_ON_CTL_PRE_MASK_SFT (0x1 << 2)
+#define DL_2_GAIN_ON_CTL_PRE_SFT 1
+#define DL_2_GAIN_ON_CTL_PRE_MASK 0x1
+#define DL_2_GAIN_ON_CTL_PRE_MASK_SFT (0x1 << 1)
+#define DL_2_SRC_ON_TMP_CTL_PRE_SFT 0
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK 0x1
+#define DL_2_SRC_ON_TMP_CTL_PRE_MASK_SFT (0x1 << 0)
+
+/* AFE_ADDA_DL_SRC2_CON1 */
+#define DL_2_GAIN_CTL_PRE_SFT 16
+#define DL_2_GAIN_CTL_PRE_MASK 0xffff
+#define DL_2_GAIN_CTL_PRE_MASK_SFT (0xffff << 16)
+#define DL_2_GAIN_MODE_CTL_SFT 0
+#define DL_2_GAIN_MODE_CTL_MASK 0x1
+#define DL_2_GAIN_MODE_CTL_MASK_SFT (0x1 << 0)
+
+/* AFE_ADDA_UL_SRC_CON0 */
+#define ULCF_CFG_EN_CTL_SFT 31
+#define ULCF_CFG_EN_CTL_MASK 0x1
+#define ULCF_CFG_EN_CTL_MASK_SFT (0x1 << 31)
+#define UL_MODE_3P25M_CH2_CTL_SFT 22
+#define UL_MODE_3P25M_CH2_CTL_MASK 0x1
+#define UL_MODE_3P25M_CH2_CTL_MASK_SFT (0x1 << 22)
+#define UL_MODE_3P25M_CH1_CTL_SFT 21
+#define UL_MODE_3P25M_CH1_CTL_MASK 0x1
+#define UL_MODE_3P25M_CH1_CTL_MASK_SFT (0x1 << 21)
+#define UL_VOICE_MODE_CH1_CH2_CTL_SFT 17
+#define UL_VOICE_MODE_CH1_CH2_CTL_MASK 0x7
+#define UL_VOICE_MODE_CH1_CH2_CTL_MASK_SFT (0x7 << 17)
+#define DMIC_LOW_POWER_MODE_CTL_SFT 14
+#define DMIC_LOW_POWER_MODE_CTL_MASK 0x3
+#define DMIC_LOW_POWER_MODE_CTL_MASK_SFT (0x3 << 14)
+#define UL_DISABLE_HW_CG_CTL_SFT 12
+#define UL_DISABLE_HW_CG_CTL_MASK 0x1
+#define UL_DISABLE_HW_CG_CTL_MASK_SFT (0x1 << 12)
+#define UL_IIR_ON_TMP_CTL_SFT 10
+#define UL_IIR_ON_TMP_CTL_MASK 0x1
+#define UL_IIR_ON_TMP_CTL_MASK_SFT (0x1 << 10)
+#define UL_IIRMODE_CTL_SFT 7
+#define UL_IIRMODE_CTL_MASK 0x7
+#define UL_IIRMODE_CTL_MASK_SFT (0x7 << 7)
+#define DIGMIC_3P25M_1P625M_SEL_CTL_SFT 5
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK 0x1
+#define DIGMIC_3P25M_1P625M_SEL_CTL_MASK_SFT (0x1 << 5)
+#define UL_LOOP_BACK_MODE_CTL_SFT 2
+#define UL_LOOP_BACK_MODE_CTL_MASK 0x1
+#define UL_LOOP_BACK_MODE_CTL_MASK_SFT (0x1 << 2)
+#define UL_SDM_3_LEVEL_CTL_SFT 1
+#define UL_SDM_3_LEVEL_CTL_MASK 0x1
+#define UL_SDM_3_LEVEL_CTL_MASK_SFT (0x1 << 1)
+#define UL_SRC_ON_TMP_CTL_SFT 0
+#define UL_SRC_ON_TMP_CTL_MASK 0x1
+#define UL_SRC_ON_TMP_CTL_MASK_SFT (0x1 << 0)
+
+/* AFE_ADDA_UL_SRC_CON1 */
+#define C_DAC_EN_CTL_SFT 27
+#define C_DAC_EN_CTL_MASK 0x1
+#define C_DAC_EN_CTL_MASK_SFT (0x1 << 27)
+#define C_MUTE_SW_CTL_SFT 26
+#define C_MUTE_SW_CTL_MASK 0x1
+#define C_MUTE_SW_CTL_MASK_SFT (0x1 << 26)
+#define ASDM_SRC_SEL_CTL_SFT 25
+#define ASDM_SRC_SEL_CTL_MASK 0x1
+#define ASDM_SRC_SEL_CTL_MASK_SFT (0x1 << 25)
+#define C_AMP_DIV_CH2_CTL_SFT 21
+#define C_AMP_DIV_CH2_CTL_MASK 0x7
+#define C_AMP_DIV_CH2_CTL_MASK_SFT (0x7 << 21)
+#define C_FREQ_DIV_CH2_CTL_SFT 16
+#define C_FREQ_DIV_CH2_CTL_MASK 0x1f
+#define C_FREQ_DIV_CH2_CTL_MASK_SFT (0x1f << 16)
+#define C_SINE_MODE_CH2_CTL_SFT 12
+#define C_SINE_MODE_CH2_CTL_MASK 0xf
+#define C_SINE_MODE_CH2_CTL_MASK_SFT (0xf << 12)
+#define C_AMP_DIV_CH1_CTL_SFT 9
+#define C_AMP_DIV_CH1_CTL_MASK 0x7
+#define C_AMP_DIV_CH1_CTL_MASK_SFT (0x7 << 9)
+#define C_FREQ_DIV_CH1_CTL_SFT 4
+#define C_FREQ_DIV_CH1_CTL_MASK 0x1f
+#define C_FREQ_DIV_CH1_CTL_MASK_SFT (0x1f << 4)
+#define C_SINE_MODE_CH1_CTL_SFT 0
+#define C_SINE_MODE_CH1_CTL_MASK 0xf
+#define C_SINE_MODE_CH1_CTL_MASK_SFT (0xf << 0)
+
+/* AFE_ADDA_TOP_CON0 */
+#define C_LOOP_BACK_MODE_CTL_SFT 12
+#define C_LOOP_BACK_MODE_CTL_MASK 0xf
+#define C_LOOP_BACK_MODE_CTL_MASK_SFT (0xf << 12)
+#define C_EXT_ADC_CTL_SFT 0
+#define C_EXT_ADC_CTL_MASK 0x1
+#define C_EXT_ADC_CTL_MASK_SFT (0x1 << 0)
+
+/* AFE_ADDA_UL_DL_CON0 */
+#define AFE_ADDA6_UL_LR_SWAP_SFT 15
+#define AFE_ADDA6_UL_LR_SWAP_MASK 0x1
+#define AFE_ADDA6_UL_LR_SWAP_MASK_SFT (0x1 << 15)
+#define AFE_ADDA6_CKDIV_RST_SFT 14
+#define AFE_ADDA6_CKDIV_RST_MASK 0x1
+#define AFE_ADDA6_CKDIV_RST_MASK_SFT (0x1 << 14)
+#define AFE_ADDA6_FIFO_AUTO_RST_SFT 13
+#define AFE_ADDA6_FIFO_AUTO_RST_MASK 0x1
+#define AFE_ADDA6_FIFO_AUTO_RST_MASK_SFT (0x1 << 13)
+#define UL_FIFO_DIGMIC_TESTIN_SFT 5
+#define UL_FIFO_DIGMIC_TESTIN_MASK 0x3
+#define UL_FIFO_DIGMIC_TESTIN_MASK_SFT (0x3 << 5)
+#define UL_FIFO_DIGMIC_WDATA_TESTEN_SFT 4
+#define UL_FIFO_DIGMIC_WDATA_TESTEN_MASK 0x1
+#define UL_FIFO_DIGMIC_WDATA_TESTEN_MASK_SFT (0x1 << 4)
+#define ADDA_AFE_ON_SFT 0
+#define ADDA_AFE_ON_MASK 0x1
+#define ADDA_AFE_ON_MASK_SFT (0x1 << 0)
+
+/* AFE_SIDETONE_CON0 */
+#define R_RDY_SFT 30
+#define R_RDY_MASK 0x1
+#define R_RDY_MASK_SFT (0x1 << 30)
+#define W_RDY_SFT 29
+#define W_RDY_MASK 0x1
+#define W_RDY_MASK_SFT (0x1 << 29)
+#define R_W_EN_SFT 25
+#define R_W_EN_MASK 0x1
+#define R_W_EN_MASK_SFT (0x1 << 25)
+#define R_W_SEL_SFT 24
+#define R_W_SEL_MASK 0x1
+#define R_W_SEL_MASK_SFT (0x1 << 24)
+#define SEL_CH2_SFT 23
+#define SEL_CH2_MASK 0x1
+#define SEL_CH2_MASK_SFT (0x1 << 23)
+#define SIDE_TONE_COEFFICIENT_ADDR_SFT 16
+#define SIDE_TONE_COEFFICIENT_ADDR_MASK 0x1f
+#define SIDE_TONE_COEFFICIENT_ADDR_MASK_SFT (0x1f << 16)
+#define SIDE_TONE_COEFFICIENT_SFT 0
+#define SIDE_TONE_COEFFICIENT_MASK 0xffff
+#define SIDE_TONE_COEFFICIENT_MASK_SFT (0xffff << 0)
+
+/* AFE_SIDETONE_COEFF */
+#define SIDE_TONE_COEFF_SFT 0
+#define SIDE_TONE_COEFF_MASK 0xffff
+#define SIDE_TONE_COEFF_MASK_SFT (0xffff << 0)
+
+/* AFE_SIDETONE_CON1 */
+#define STF_BYPASS_MODE_SFT 31
+#define STF_BYPASS_MODE_MASK 0x1
+#define STF_BYPASS_MODE_MASK_SFT (0x1 << 31)
+#define STF_BYPASS_MODE_O28_O29_SFT 30
+#define STF_BYPASS_MODE_O28_O29_MASK 0x1
+#define STF_BYPASS_MODE_O28_O29_MASK_SFT (0x1 << 30)
+#define STF_BYPASS_MODE_I2S4_SFT 29
+#define STF_BYPASS_MODE_I2S4_MASK 0x1
+#define STF_BYPASS_MODE_I2S4_MASK_SFT (0x1 << 29)
+#define STF_BYPASS_MODE_I2S5_SFT 28
+#define STF_BYPASS_MODE_I2S5_MASK 0x1
+#define STF_BYPASS_MODE_I2S5_MASK_SFT (0x1 << 28)
+#define STF_INPUT_EN_SEL_SFT 13
+#define STF_INPUT_EN_SEL_MASK 0x1
+#define STF_INPUT_EN_SEL_MASK_SFT (0x1 << 13)
+#define STF_SOURCE_FROM_O19O20_SFT 12
+#define STF_SOURCE_FROM_O19O20_MASK 0x1
+#define STF_SOURCE_FROM_O19O20_MASK_SFT (0x1 << 12)
+#define SIDE_TONE_ON_SFT 8
+#define SIDE_TONE_ON_MASK 0x1
+#define SIDE_TONE_ON_MASK_SFT (0x1 << 8)
+#define SIDE_TONE_HALF_TAP_NUM_SFT 0
+#define SIDE_TONE_HALF_TAP_NUM_MASK 0x3f
+#define SIDE_TONE_HALF_TAP_NUM_MASK_SFT (0x3f << 0)
+
+/* AFE_SIDETONE_GAIN */
+#define POSITIVE_GAIN_SFT 16
+#define POSITIVE_GAIN_MASK 0x7
+#define POSITIVE_GAIN_MASK_SFT (0x7 << 16)
+#define SIDE_TONE_GAIN_SFT 0
+#define SIDE_TONE_GAIN_MASK 0xffff
+#define SIDE_TONE_GAIN_MASK_SFT (0xffff << 0)
+
+/* AFE_ADDA_DL_SDM_DCCOMP_CON */
+#define AUD_DC_COMP_EN_SFT 8
+#define AUD_DC_COMP_EN_MASK 0x1
+#define AUD_DC_COMP_EN_MASK_SFT (0x1 << 8)
+#define ATTGAIN_CTL_SFT 0
+#define ATTGAIN_CTL_MASK 0x3f
+#define ATTGAIN_CTL_MASK_SFT (0x3f << 0)
+
+/* AFE_SINEGEN_CON0 */
+#define DAC_EN_SFT 26
+#define DAC_EN_MASK 0x1
+#define DAC_EN_MASK_SFT (0x1 << 26)
+#define MUTE_SW_CH2_SFT 25
+#define MUTE_SW_CH2_MASK 0x1
+#define MUTE_SW_CH2_MASK_SFT (0x1 << 25)
+#define MUTE_SW_CH1_SFT 24
+#define MUTE_SW_CH1_MASK 0x1
+#define MUTE_SW_CH1_MASK_SFT (0x1 << 24)
+#define SINE_MODE_CH2_SFT 20
+#define SINE_MODE_CH2_MASK 0xf
+#define SINE_MODE_CH2_MASK_SFT (0xf << 20)
+#define AMP_DIV_CH2_SFT 17
+#define AMP_DIV_CH2_MASK 0x7
+#define AMP_DIV_CH2_MASK_SFT (0x7 << 17)
+#define FREQ_DIV_CH2_SFT 12
+#define FREQ_DIV_CH2_MASK 0x1f
+#define FREQ_DIV_CH2_MASK_SFT (0x1f << 12)
+#define SINE_MODE_CH1_SFT 8
+#define SINE_MODE_CH1_MASK 0xf
+#define SINE_MODE_CH1_MASK_SFT (0xf << 8)
+#define AMP_DIV_CH1_SFT 5
+#define AMP_DIV_CH1_MASK 0x7
+#define AMP_DIV_CH1_MASK_SFT (0x7 << 5)
+#define FREQ_DIV_CH1_SFT 0
+#define FREQ_DIV_CH1_MASK 0x1f
+#define FREQ_DIV_CH1_MASK_SFT (0x1f << 0)
+
+/* AFE_SINEGEN_CON2 */
+#define INNER_LOOP_BACK_MODE_SFT 0
+#define INNER_LOOP_BACK_MODE_MASK 0x3f
+#define INNER_LOOP_BACK_MODE_MASK_SFT (0x3f << 0)
+
+/* AFE_MEMIF_MINLEN */
+#define HDMI_MINLEN_SFT 24
+#define HDMI_MINLEN_MASK 0xf
+#define HDMI_MINLEN_MASK_SFT (0xf << 24)
+#define DL3_MINLEN_SFT 12
+#define DL3_MINLEN_MASK 0xf
+#define DL3_MINLEN_MASK_SFT (0xf << 12)
+#define DL2_MINLEN_SFT 8
+#define DL2_MINLEN_MASK 0xf
+#define DL2_MINLEN_MASK_SFT (0xf << 8)
+#define DL1_DATA2_MINLEN_SFT 4
+#define DL1_DATA2_MINLEN_MASK 0xf
+#define DL1_DATA2_MINLEN_MASK_SFT (0xf << 4)
+#define DL1_MINLEN_SFT 0
+#define DL1_MINLEN_MASK 0xf
+#define DL1_MINLEN_MASK_SFT (0xf << 0)
+
+/* AFE_MEMIF_MAXLEN */
+#define HDMI_MAXLEN_SFT 24
+#define HDMI_MAXLEN_MASK 0xf
+#define HDMI_MAXLEN_MASK_SFT (0xf << 24)
+#define DL3_MAXLEN_SFT 8
+#define DL3_MAXLEN_MASK 0xf
+#define DL3_MAXLEN_MASK_SFT (0xf << 8)
+#define DL2_MAXLEN_SFT 4
+#define DL2_MAXLEN_MASK 0xf
+#define DL2_MAXLEN_MASK_SFT (0xf << 4)
+#define DL1_MAXLEN_SFT 0
+#define DL1_MAXLEN_MASK 0x3
+#define DL1_MAXLEN_MASK_SFT (0x3 << 0)
+
+/* AFE_MEMIF_PBUF_SIZE */
+#define VUL12_4CH_SFT 17
+#define VUL12_4CH_MASK 0x1
+#define VUL12_4CH_MASK_SFT (0x1 << 17)
+#define DL3_PBUF_SIZE_SFT 10
+#define DL3_PBUF_SIZE_MASK 0x3
+#define DL3_PBUF_SIZE_MASK_SFT (0x3 << 10)
+#define HDMI_PBUF_SIZE_SFT 4
+#define HDMI_PBUF_SIZE_MASK 0x3
+#define HDMI_PBUF_SIZE_MASK_SFT (0x3 << 4)
+#define DL2_PBUF_SIZE_SFT 2
+#define DL2_PBUF_SIZE_MASK 0x3
+#define DL2_PBUF_SIZE_MASK_SFT (0x3 << 2)
+#define DL1_PBUF_SIZE_SFT 0
+#define DL1_PBUF_SIZE_MASK 0x3
+#define DL1_PBUF_SIZE_MASK_SFT (0x3 << 0)
+
+/* AFE_HD_ENGEN_ENABLE */
+#define AFE_24M_ON_SFT 1
+#define AFE_24M_ON_MASK 0x1
+#define AFE_24M_ON_MASK_SFT (0x1 << 1)
+#define AFE_22M_ON_SFT 0
+#define AFE_22M_ON_MASK 0x1
+#define AFE_22M_ON_MASK_SFT (0x1 << 0)
+
+/* AFE_IRQ_MCU_CON0 */
+#define IRQ12_MCU_ON_SFT 12
+#define IRQ12_MCU_ON_MASK 0x1
+#define IRQ12_MCU_ON_MASK_SFT (0x1 << 12)
+#define IRQ11_MCU_ON_SFT 11
+#define IRQ11_MCU_ON_MASK 0x1
+#define IRQ11_MCU_ON_MASK_SFT (0x1 << 11)
+#define IRQ10_MCU_ON_SFT 10
+#define IRQ10_MCU_ON_MASK 0x1
+#define IRQ10_MCU_ON_MASK_SFT (0x1 << 10)
+#define IRQ9_MCU_ON_SFT 9
+#define IRQ9_MCU_ON_MASK 0x1
+#define IRQ9_MCU_ON_MASK_SFT (0x1 << 9)
+#define IRQ8_MCU_ON_SFT 8
+#define IRQ8_MCU_ON_MASK 0x1
+#define IRQ8_MCU_ON_MASK_SFT (0x1 << 8)
+#define IRQ7_MCU_ON_SFT 7
+#define IRQ7_MCU_ON_MASK 0x1
+#define IRQ7_MCU_ON_MASK_SFT (0x1 << 7)
+#define IRQ6_MCU_ON_SFT 6
+#define IRQ6_MCU_ON_MASK 0x1
+#define IRQ6_MCU_ON_MASK_SFT (0x1 << 6)
+#define IRQ5_MCU_ON_SFT 5
+#define IRQ5_MCU_ON_MASK 0x1
+#define IRQ5_MCU_ON_MASK_SFT (0x1 << 5)
+#define IRQ4_MCU_ON_SFT 4
+#define IRQ4_MCU_ON_MASK 0x1
+#define IRQ4_MCU_ON_MASK_SFT (0x1 << 4)
+#define IRQ3_MCU_ON_SFT 3
+#define IRQ3_MCU_ON_MASK 0x1
+#define IRQ3_MCU_ON_MASK_SFT (0x1 << 3)
+#define IRQ2_MCU_ON_SFT 2
+#define IRQ2_MCU_ON_MASK 0x1
+#define IRQ2_MCU_ON_MASK_SFT (0x1 << 2)
+#define IRQ1_MCU_ON_SFT 1
+#define IRQ1_MCU_ON_MASK 0x1
+#define IRQ1_MCU_ON_MASK_SFT (0x1 << 1)
+#define IRQ0_MCU_ON_SFT 0
+#define IRQ0_MCU_ON_MASK 0x1
+#define IRQ0_MCU_ON_MASK_SFT (0x1 << 0)
+
+/* AFE_IRQ_MCU_CON1 */
+#define IRQ7_MCU_MODE_SFT 28
+#define IRQ7_MCU_MODE_MASK 0xf
+#define IRQ7_MCU_MODE_MASK_SFT (0xf << 28)
+#define IRQ6_MCU_MODE_SFT 24
+#define IRQ6_MCU_MODE_MASK 0xf
+#define IRQ6_MCU_MODE_MASK_SFT (0xf << 24)
+#define IRQ5_MCU_MODE_SFT 20
+#define IRQ5_MCU_MODE_MASK 0xf
+#define IRQ5_MCU_MODE_MASK_SFT (0xf << 20)
+#define IRQ4_MCU_MODE_SFT 16
+#define IRQ4_MCU_MODE_MASK 0xf
+#define IRQ4_MCU_MODE_MASK_SFT (0xf << 16)
+#define IRQ3_MCU_MODE_SFT 12
+#define IRQ3_MCU_MODE_MASK 0xf
+#define IRQ3_MCU_MODE_MASK_SFT (0xf << 12)
+#define IRQ2_MCU_MODE_SFT 8
+#define IRQ2_MCU_MODE_MASK 0xf
+#define IRQ2_MCU_MODE_MASK_SFT (0xf << 8)
+#define IRQ1_MCU_MODE_SFT 4
+#define IRQ1_MCU_MODE_MASK 0xf
+#define IRQ1_MCU_MODE_MASK_SFT (0xf << 4)
+#define IRQ0_MCU_MODE_SFT 0
+#define IRQ0_MCU_MODE_MASK 0xf
+#define IRQ0_MCU_MODE_MASK_SFT (0xf << 0)
+
+/* AFE_IRQ_MCU_CON2 */
+#define IRQ12_MCU_MODE_SFT 4
+#define IRQ12_MCU_MODE_MASK 0xf
+#define IRQ12_MCU_MODE_MASK_SFT (0xf << 4)
+#define IRQ11_MCU_MODE_SFT 0
+#define IRQ11_MCU_MODE_MASK 0xf
+#define IRQ11_MCU_MODE_MASK_SFT (0xf << 0)
+
+/* AFE_IRQ_MCU_CLR */
+#define IRQ12_MCU_MISS_CNT_CLR_SFT 28
+#define IRQ12_MCU_MISS_CNT_CLR_MASK 0x1
+#define IRQ12_MCU_MISS_CNT_CLR_MASK_SFT (0x1 << 28)
+#define IRQ11_MCU_MISS_CNT_CLR_SFT 27
+#define IRQ11_MCU_MISS_CNT_CLR_MASK 0x1
+#define IRQ11_MCU_MISS_CNT_CLR_MASK_SFT (0x1 << 27)
+#define IRQ10_MCU_MISS_CLR_SFT 26
+#define IRQ10_MCU_MISS_CLR_MASK 0x1
+#define IRQ10_MCU_MISS_CLR_MASK_SFT (0x1 << 26)
+#define IRQ9_MCU_MISS_CLR_SFT 25
+#define IRQ9_MCU_MISS_CLR_MASK 0x1
+#define IRQ9_MCU_MISS_CLR_MASK_SFT (0x1 << 25)
+#define IRQ8_MCU_MISS_CLR_SFT 24
+#define IRQ8_MCU_MISS_CLR_MASK 0x1
+#define IRQ8_MCU_MISS_CLR_MASK_SFT (0x1 << 24)
+#define IRQ7_MCU_MISS_CLR_SFT 23
+#define IRQ7_MCU_MISS_CLR_MASK 0x1
+#define IRQ7_MCU_MISS_CLR_MASK_SFT (0x1 << 23)
+#define IRQ6_MCU_MISS_CLR_SFT 22
+#define IRQ6_MCU_MISS_CLR_MASK 0x1
+#define IRQ6_MCU_MISS_CLR_MASK_SFT (0x1 << 22)
+#define IRQ5_MCU_MISS_CLR_SFT 21
+#define IRQ5_MCU_MISS_CLR_MASK 0x1
+#define IRQ5_MCU_MISS_CLR_MASK_SFT (0x1 << 21)
+#define IRQ4_MCU_MISS_CLR_SFT 20
+#define IRQ4_MCU_MISS_CLR_MASK 0x1
+#define IRQ4_MCU_MISS_CLR_MASK_SFT (0x1 << 20)
+#define IRQ3_MCU_MISS_CLR_SFT 19
+#define IRQ3_MCU_MISS_CLR_MASK 0x1
+#define IRQ3_MCU_MISS_CLR_MASK_SFT (0x1 << 19)
+#define IRQ2_MCU_MISS_CLR_SFT 18
+#define IRQ2_MCU_MISS_CLR_MASK 0x1
+#define IRQ2_MCU_MISS_CLR_MASK_SFT (0x1 << 18)
+#define IRQ1_MCU_MISS_CLR_SFT 17
+#define IRQ1_MCU_MISS_CLR_MASK 0x1
+#define IRQ1_MCU_MISS_CLR_MASK_SFT (0x1 << 17)
+#define IRQ0_MCU_MISS_CLR_SFT 16
+#define IRQ0_MCU_MISS_CLR_MASK 0x1
+#define IRQ0_MCU_MISS_CLR_MASK_SFT (0x1 << 16)
+#define IRQ12_MCU_CLR_SFT 12
+#define IRQ12_MCU_CLR_MASK 0x1
+#define IRQ12_MCU_CLR_MASK_SFT (0x1 << 12)
+#define IRQ11_MCU_CLR_SFT 11
+#define IRQ11_MCU_CLR_MASK 0x1
+#define IRQ11_MCU_CLR_MASK_SFT (0x1 << 11)
+#define IRQ10_MCU_CLR_SFT 10
+#define IRQ10_MCU_CLR_MASK 0x1
+#define IRQ10_MCU_CLR_MASK_SFT (0x1 << 10)
+#define IRQ9_MCU_CLR_SFT 9
+#define IRQ9_MCU_CLR_MASK 0x1
+#define IRQ9_MCU_CLR_MASK_SFT (0x1 << 9)
+#define IRQ8_MCU_CLR_SFT 8
+#define IRQ8_MCU_CLR_MASK 0x1
+#define IRQ8_MCU_CLR_MASK_SFT (0x1 << 8)
+#define IRQ7_MCU_CLR_SFT 7
+#define IRQ7_MCU_CLR_MASK 0x1
+#define IRQ7_MCU_CLR_MASK_SFT (0x1 << 7)
+#define IRQ6_MCU_CLR_SFT 6
+#define IRQ6_MCU_CLR_MASK 0x1
+#define IRQ6_MCU_CLR_MASK_SFT (0x1 << 6)
+#define IRQ5_MCU_CLR_SFT 5
+#define IRQ5_MCU_CLR_MASK 0x1
+#define IRQ5_MCU_CLR_MASK_SFT (0x1 << 5)
+#define IRQ4_MCU_CLR_SFT 4
+#define IRQ4_MCU_CLR_MASK 0x1
+#define IRQ4_MCU_CLR_MASK_SFT (0x1 << 4)
+#define IRQ3_MCU_CLR_SFT 3
+#define IRQ3_MCU_CLR_MASK 0x1
+#define IRQ3_MCU_CLR_MASK_SFT (0x1 << 3)
+#define IRQ2_MCU_CLR_SFT 2
+#define IRQ2_MCU_CLR_MASK 0x1
+#define IRQ2_MCU_CLR_MASK_SFT (0x1 << 2)
+#define IRQ1_MCU_CLR_SFT 1
+#define IRQ1_MCU_CLR_MASK 0x1
+#define IRQ1_MCU_CLR_MASK_SFT (0x1 << 1)
+#define IRQ0_MCU_CLR_SFT 0
+#define IRQ0_MCU_CLR_MASK 0x1
+#define IRQ0_MCU_CLR_MASK_SFT (0x1 << 0)
+
+/* AFE_MEMIF_MSB */
+#define CPU_COMPACT_MODE_SFT 29
+#define CPU_COMPACT_MODE_MASK 0x1
+#define CPU_COMPACT_MODE_MASK_SFT (0x1 << 29)
+#define CPU_HD_ALIGN_SFT 28
+#define CPU_HD_ALIGN_MASK 0x1
+#define CPU_HD_ALIGN_MASK_SFT (0x1 << 28)
+#define AWB2_AXI_WR_SIGN_SFT 24
+#define AWB2_AXI_WR_SIGN_MASK 0x1
+#define AWB2_AXI_WR_SIGN_MASK_SFT (0x1 << 24)
+#define VUL2_AXI_WR_SIGN_SFT 22
+#define VUL2_AXI_WR_SIGN_MASK 0x1
+#define VUL2_AXI_WR_SIGN_MASK_SFT (0x1 << 22)
+#define VUL12_AXI_WR_SIGN_SFT 21
+#define VUL12_AXI_WR_SIGN_MASK 0x1
+#define VUL12_AXI_WR_SIGN_MASK_SFT (0x1 << 21)
+#define VUL_AXI_WR_SIGN_SFT 20
+#define VUL_AXI_WR_SIGN_MASK 0x1
+#define VUL_AXI_WR_SIGN_MASK_SFT (0x1 << 20)
+#define MOD_DAI_AXI_WR_SIGN_SFT 18
+#define MOD_DAI_AXI_WR_SIGN_MASK 0x1
+#define MOD_DAI_AXI_WR_SIGN_MASK_SFT (0x1 << 18)
+#define AWB_MSTR_SIGN_SFT 17
+#define AWB_MSTR_SIGN_MASK 0x1
+#define AWB_MSTR_SIGN_MASK_SFT (0x1 << 17)
+#define SYSRAM_SIGN_SFT 16
+#define SYSRAM_SIGN_MASK 0x1
+#define SYSRAM_SIGN_MASK_SFT (0x1 << 16)
+
+/* AFE_HDMI_CONN0 */
+#define HDMI_O_7_SFT 21
+#define HDMI_O_7_MASK 0x7
+#define HDMI_O_7_MASK_SFT (0x7 << 21)
+#define HDMI_O_6_SFT 18
+#define HDMI_O_6_MASK 0x7
+#define HDMI_O_6_MASK_SFT (0x7 << 18)
+#define HDMI_O_5_SFT 15
+#define HDMI_O_5_MASK 0x7
+#define HDMI_O_5_MASK_SFT (0x7 << 15)
+#define HDMI_O_4_SFT 12
+#define HDMI_O_4_MASK 0x7
+#define HDMI_O_4_MASK_SFT (0x7 << 12)
+#define HDMI_O_3_SFT 9
+#define HDMI_O_3_MASK 0x7
+#define HDMI_O_3_MASK_SFT (0x7 << 9)
+#define HDMI_O_2_SFT 6
+#define HDMI_O_2_MASK 0x7
+#define HDMI_O_2_MASK_SFT (0x7 << 6)
+#define HDMI_O_1_SFT 3
+#define HDMI_O_1_MASK 0x7
+#define HDMI_O_1_MASK_SFT (0x7 << 3)
+#define HDMI_O_0_SFT 0
+#define HDMI_O_0_MASK 0x7
+#define HDMI_O_0_MASK_SFT (0x7 << 0)
+
+/* AFE_TDM_CON1 */
+#define TDM_EN_SFT 0
+#define TDM_EN_MASK 0x1
+#define TDM_EN_MASK_SFT (0x1 << 0)
+#define BCK_INVERSE_SFT 1
+#define BCK_INVERSE_MASK 0x1
+#define BCK_INVERSE_MASK_SFT (0x1 << 1)
+#define LRCK_INVERSE_SFT 2
+#define LRCK_INVERSE_MASK 0x1
+#define LRCK_INVERSE_MASK_SFT (0x1 << 2)
+#define DELAY_DATA_SFT 3
+#define DELAY_DATA_MASK 0x1
+#define DELAY_DATA_MASK_SFT (0x1 << 3)
+#define LEFT_ALIGN_SFT 4
+#define LEFT_ALIGN_MASK 0x1
+#define LEFT_ALIGN_MASK_SFT (0x1 << 4)
+#define WLEN_SFT 8
+#define WLEN_MASK 0x3
+#define WLEN_MASK_SFT (0x3 << 8)
+#define CHANNEL_NUM_SFT 10
+#define CHANNEL_NUM_MASK 0x3
+#define CHANNEL_NUM_MASK_SFT (0x3 << 10)
+#define CHANNEL_BCK_CYCLES_SFT 12
+#define CHANNEL_BCK_CYCLES_MASK 0x3
+#define CHANNEL_BCK_CYCLES_MASK_SFT (0x3 << 12)
+#define DAC_BIT_NUM_SFT 16
+#define DAC_BIT_NUM_MASK 0x1f
+#define DAC_BIT_NUM_MASK_SFT (0x1f << 16)
+#define LRCK_TDM_WIDTH_SFT 24
+#define LRCK_TDM_WIDTH_MASK 0xff
+#define LRCK_TDM_WIDTH_MASK_SFT (0xff << 24)
+
+/* AFE_TDM_CON2 */
+#define ST_CH_PAIR_SOUT0_SFT 0
+#define ST_CH_PAIR_SOUT0_MASK 0x7
+#define ST_CH_PAIR_SOUT0_MASK_SFT (0x7 << 0)
+#define ST_CH_PAIR_SOUT1_SFT 4
+#define ST_CH_PAIR_SOUT1_MASK 0x7
+#define ST_CH_PAIR_SOUT1_MASK_SFT (0x7 << 4)
+#define ST_CH_PAIR_SOUT2_SFT 8
+#define ST_CH_PAIR_SOUT2_MASK 0x7
+#define ST_CH_PAIR_SOUT2_MASK_SFT (0x7 << 8)
+#define ST_CH_PAIR_SOUT3_SFT 12
+#define ST_CH_PAIR_SOUT3_MASK 0x7
+#define ST_CH_PAIR_SOUT3_MASK_SFT (0x7 << 12)
+#define TDM_FIX_VALUE_SEL_SFT 16
+#define TDM_FIX_VALUE_SEL_MASK 0x1
+#define TDM_FIX_VALUE_SEL_MASK_SFT (0x1 << 16)
+#define TDM_I2S_LOOPBACK_SFT 20
+#define TDM_I2S_LOOPBACK_MASK 0x1
+#define TDM_I2S_LOOPBACK_MASK_SFT (0x1 << 20)
+#define TDM_I2S_LOOPBACK_CH_SFT 21
+#define TDM_I2S_LOOPBACK_CH_MASK 0x3
+#define TDM_I2S_LOOPBACK_CH_MASK_SFT (0x3 << 21)
+#define TDM_FIX_VALUE_SFT 24
+#define TDM_FIX_VALUE_MASK 0xff
+#define TDM_FIX_VALUE_MASK_SFT (0xff << 24)
+
+/* AFE_HDMI_OUT_CON0 */
+#define AFE_HDMI_OUT_ON_RETM_SFT 8
+#define AFE_HDMI_OUT_ON_RETM_MASK 0x1
+#define AFE_HDMI_OUT_ON_RETM_MASK_SFT (0x1 << 8)
+#define AFE_HDMI_OUT_CH_NUM_SFT 4
+#define AFE_HDMI_OUT_CH_NUM_MASK 0xf
+#define AFE_HDMI_OUT_CH_NUM_MASK_SFT (0xf << 4)
+#define AFE_HDMI_OUT_BIT_WIDTH_SFT 1
+#define AFE_HDMI_OUT_BIT_WIDTH_MASK 0x1
+#define AFE_HDMI_OUT_BIT_WIDTH_MASK_SFT (0x1 << 1)
+#define AFE_HDMI_OUT_ON_SFT 0
+#define AFE_HDMI_OUT_ON_MASK 0x1
+#define AFE_HDMI_OUT_ON_MASK_SFT (0x1 << 0)
+#endif
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index 0e4f65e654c4..75e5e480fda2 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -267,9 +267,10 @@ int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
struct snd_card *card = rtd->card->snd_card;
size_t size = axg_fifo_hw.buffer_bytes_max;
- return snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream,
- SNDRV_DMA_TYPE_DEV, card->dev,
- size, size);
+ snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream,
+ SNDRV_DMA_TYPE_DEV, card->dev,
+ size, size);
+ return 0;
}
EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
diff --git a/sound/soc/pxa/Makefile b/sound/soc/pxa/Makefile
index 0ab2a9dcb720..ea4929d73318 100644
--- a/sound/soc/pxa/Makefile
+++ b/sound/soc/pxa/Makefile
@@ -30,7 +30,6 @@ snd-soc-magician-objs := magician.o
snd-soc-mioa701-objs := mioa701_wm9713.o
snd-soc-z2-objs := z2.o
snd-soc-imote2-objs := imote2.o
-snd-soc-raumfeld-objs := raumfeld.o
snd-soc-brownstone-objs := brownstone.o
snd-soc-ttc-dkb-objs := ttc-dkb.o
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 804ae0d93058..75ceb04d8bf0 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -98,10 +98,12 @@ config SND_SOC_MSM8996
config SND_SOC_SDM845
tristate "SoC Machine driver for SDM845 boards"
- depends on QCOM_APR
+ depends on QCOM_APR && MFD_CROS_EC
select SND_SOC_QDSP6
select SND_SOC_QCOM_COMMON
select SND_SOC_RT5663
+ select SND_SOC_MAX98927
+ select SND_SOC_CROS_EC_CODEC
help
To add support for audio on Qualcomm Technologies Inc.
SDM845 SoC-based systems.
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 1dd23bba1bed..4b559932adc3 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -164,41 +164,52 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
if (!cpu || !codec) {
dev_err(dev, "Can't find cpu/codec DT node\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto error;
}
link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
if (!link->cpu_of_node) {
dev_err(card->dev, "error getting cpu phandle\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto error;
}
ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
if (ret) {
dev_err(card->dev, "error getting cpu dai name\n");
- return ERR_PTR(ret);
+ goto error;
}
ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
if (ret < 0) {
dev_err(card->dev, "error getting codec dai name\n");
- return ERR_PTR(ret);
+ goto error;
}
link->platform_of_node = link->cpu_of_node;
ret = of_property_read_string(np, "link-name", &link->name);
if (ret) {
dev_err(card->dev, "error getting codec dai_link name\n");
- return ERR_PTR(ret);
+ goto error;
}
link->stream_name = link->name;
link->init = apq8016_sbc_dai_init;
link++;
+
+ of_node_put(cpu);
+ of_node_put(codec);
}
return data;
+
+ error:
+ of_node_put(np);
+ of_node_put(cpu);
+ of_node_put(codec);
+ return ERR_PTR(ret);
}
static const struct snd_soc_dapm_widget apq8016_sbc_dapm_widgets[] = {
diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
index fb45f396ab4a..94363fd6846a 100644
--- a/sound/soc/qcom/apq8096.c
+++ b/sound/soc/qcom/apq8096.c
@@ -9,6 +9,10 @@
#include <sound/pcm.h>
#include "common.h"
+#define SLIM_MAX_TX_PORTS 16
+#define SLIM_MAX_RX_PORTS 16
+#define WCD9335_DEFAULT_MCLK_RATE 9600000
+
static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
@@ -23,14 +27,79 @@ static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
+static int msm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ u32 rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+ u32 rx_ch_cnt = 0, tx_ch_cnt = 0;
+ int ret = 0;
+
+ ret = snd_soc_dai_get_channel_map(codec_dai,
+ &tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
+ if (ret != 0 && ret != -ENOTSUPP) {
+ pr_err("failed to get codec chan map, err:%d\n", ret);
+ goto end;
+ } else if (ret == -ENOTSUPP) {
+ return 0;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = snd_soc_dai_set_channel_map(cpu_dai, 0, NULL,
+ rx_ch_cnt, rx_ch);
+ else
+ ret = snd_soc_dai_set_channel_map(cpu_dai, tx_ch_cnt, tx_ch,
+ 0, NULL);
+ if (ret != 0 && ret != -ENOTSUPP)
+ pr_err("Failed to set cpu chan map, err:%d\n", ret);
+ else if (ret == -ENOTSUPP)
+ ret = 0;
+end:
+ return ret;
+}
+
+static struct snd_soc_ops apq8096_ops = {
+ .hw_params = msm_snd_hw_params,
+};
+
+static int apq8096_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ /*
+ * Codec SLIMBUS configuration
+ * RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8, RX9, RX10, RX11, RX12, RX13
+ * TX1, TX2, TX3, TX4, TX5, TX6, TX7, TX8, TX9, TX10, TX11, TX12, TX13
+ * TX14, TX15, TX16
+ */
+ unsigned int rx_ch[SLIM_MAX_RX_PORTS] = {144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156};
+ unsigned int tx_ch[SLIM_MAX_TX_PORTS] = {128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143};
+
+ snd_soc_dai_set_channel_map(codec_dai, ARRAY_SIZE(tx_ch),
+ tx_ch, ARRAY_SIZE(rx_ch), rx_ch);
+
+ snd_soc_dai_set_sysclk(codec_dai, 0, WCD9335_DEFAULT_MCLK_RATE,
+ SNDRV_PCM_STREAM_PLAYBACK);
+
+ return 0;
+}
+
static void apq8096_add_be_ops(struct snd_soc_card *card)
{
struct snd_soc_dai_link *link;
int i;
for_each_card_prelinks(card, i, link) {
- if (link->no_pcm == 1)
+ if (link->no_pcm == 1) {
link->be_hw_params_fixup = apq8096_be_hw_params_fixup;
+ link->init = apq8096_init;
+ link->ops = &apq8096_ops;
+ }
}
}
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index 4715527054e5..5661025e8cec 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -42,6 +42,9 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
link = card->dai_link;
for_each_child_of_node(dev->of_node, np) {
cpu = of_get_child_by_name(np, "cpu");
+ platform = of_get_child_by_name(np, "platform");
+ codec = of_get_child_by_name(np, "codec");
+
if (!cpu) {
dev_err(dev, "Can't find cpu DT node\n");
ret = -EINVAL;
@@ -63,8 +66,6 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
goto err;
}
- platform = of_get_child_by_name(np, "platform");
- codec = of_get_child_by_name(np, "codec");
if (codec && platform) {
link->platform_of_node = of_parse_phandle(platform,
"sound-dai",
@@ -100,10 +101,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
link->dpcm_capture = 1;
link->stream_name = link->name;
link++;
+
+ of_node_put(cpu);
+ of_node_put(codec);
+ of_node_put(platform);
}
return 0;
err:
+ of_node_put(np);
of_node_put(cpu);
of_node_put(codec);
of_node_put(platform);
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 5b986b74dd36..548eb4fa2da6 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -570,10 +570,10 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
prtd->audio_client = q6asm_audio_client_alloc(dev,
(q6asm_cb)compress_event_handler,
prtd, stream_id, LEGACY_PCM_MODE);
- if (!prtd->audio_client) {
+ if (IS_ERR(prtd->audio_client)) {
dev_err(dev, "Could not allocate memory\n");
- kfree(prtd);
- return -ENOMEM;
+ ret = PTR_ERR(prtd->audio_client);
+ goto free_prtd;
}
size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE *
@@ -582,7 +582,7 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
&prtd->dma_buffer);
if (ret) {
dev_err(dev, "Cannot allocate buffer(s)\n");
- return ret;
+ goto free_client;
}
if (pdata->sid < 0)
@@ -595,6 +595,13 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
runtime->private_data = prtd;
return 0;
+
+free_client:
+ q6asm_audio_client_free(prtd->audio_client);
+free_prtd:
+ kfree(prtd);
+
+ return ret;
}
static int q6asm_dai_compr_free(struct snd_compr_stream *stream)
@@ -874,7 +881,7 @@ static int of_q6asm_parse_dai_data(struct device *dev,
for_each_child_of_node(dev->of_node, node) {
ret = of_property_read_u32(node, "reg", &id);
- if (ret || id > MAX_SESSIONS || id < 0) {
+ if (ret || id >= MAX_SESSIONS || id < 0) {
dev_err(dev, "valid dai id not found:%d\n", ret);
continue;
}
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 1db8ef668223..882f52ed8231 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -158,17 +158,24 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+static void sdm845_jack_free(struct snd_jack *jack)
+{
+ struct snd_soc_component *component = jack->private_data;
+
+ snd_soc_component_set_jack(component, NULL, NULL);
+}
+
static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_component *component;
- struct snd_soc_dai_link *dai_link = rtd->dai_link;
struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card);
- int i, rval;
+ struct snd_jack *jack;
+ int rval;
if (!pdata->jack_setup) {
- struct snd_jack *jack;
-
rval = snd_soc_card_jack_new(card, "Headset Jack",
SND_JACK_HEADSET |
SND_JACK_HEADPHONE |
@@ -190,16 +197,22 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
pdata->jack_setup = true;
}
- for (i = 0 ; i < dai_link->num_codecs; i++) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ switch (cpu_dai->id) {
+ case PRIMARY_MI2S_RX:
+ jack = pdata->jack.jack;
+ component = codec_dai->component;
- component = dai->component;
- rval = snd_soc_component_set_jack(
- component, &pdata->jack, NULL);
+ jack->private_data = component;
+ jack->private_free = sdm845_jack_free;
+ rval = snd_soc_component_set_jack(component,
+ &pdata->jack, NULL);
if (rval != 0 && rval != -ENOTSUPP) {
dev_warn(card->dev, "Failed to set jack: %d\n", rval);
return rval;
}
+ break;
+ default:
+ break;
}
return 0;
@@ -235,12 +248,14 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
break;
case SECONDARY_MI2S_TX:
+ codec_dai_fmt |= SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_I2S;
if (++(data->sec_mi2s_clk_count) == 1) {
snd_soc_dai_set_sysclk(cpu_dai,
Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT,
MI2S_BCLK_RATE, SNDRV_PCM_STREAM_CAPTURE);
}
snd_soc_dai_set_fmt(cpu_dai, fmt);
+ snd_soc_dai_set_fmt(codec_dai, codec_dai_fmt);
break;
case QUATERNARY_TDM_RX_0:
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index 7ae580d677c8..0ae15d01a3f6 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -17,5 +17,6 @@
* otherwise actual DMA channel names must be passed to this function.
*/
int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
- const char *tx, const char *rx);
+ const char *tx, const char *rx,
+ struct device *dma_dev);
#endif /* _SAMSUNG_DMA_H */
diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
index 9104c98deeb7..302871974cb3 100644
--- a/sound/soc/samsung/dmaengine.c
+++ b/sound/soc/samsung/dmaengine.c
@@ -25,9 +25,9 @@
#include "dma.h"
int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
- const char *tx, const char *rx)
+ const char *tx, const char *rx,
+ struct device *dma_dev)
{
- unsigned int flags = SND_DMAENGINE_PCM_FLAG_COMPAT;
struct snd_dmaengine_pcm_config *pcm_conf;
pcm_conf = devm_kzalloc(dev, sizeof(*pcm_conf), GFP_KERNEL);
@@ -36,15 +36,13 @@ int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
pcm_conf->prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
pcm_conf->compat_filter_fn = filter;
+ pcm_conf->dma_dev = dma_dev;
- if (dev->of_node) {
- pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
- pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
- } else {
- flags |= SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME;
- }
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
- return devm_snd_dmaengine_pcm_register(dev, pcm_conf, flags);
+ return devm_snd_dmaengine_pcm_register(dev, pcm_conf,
+ SND_DMAENGINE_PCM_FLAG_COMPAT);
}
EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_register);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index d6c62aa13041..4231001226f4 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1,14 +1,9 @@
-/* sound/soc/samsung/i2s.c
- *
- * ALSA SoC Audio Layer - Samsung I2S Controller driver
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassisinghbrar@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// ALSA SoC Audio Layer - Samsung I2S Controller driver
+//
+// Copyright (c) 2010 Samsung Electronics Co. Ltd.
+// Jaswinder Singh <jassisinghbrar@gmail.com>
#include <dt-bindings/sound/samsung-i2s.h>
#include <linux/delay.h>
@@ -34,6 +29,9 @@
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+#define SAMSUNG_I2S_ID_PRIMARY 1
+#define SAMSUNG_I2S_ID_SECONDARY 2
+
struct samsung_i2s_variant_regs {
unsigned int bfs_off;
unsigned int rfs_off;
@@ -57,64 +55,88 @@ struct samsung_i2s_dai_data {
struct i2s_dai {
/* Platform device for this DAI */
struct platform_device *pdev;
- /* Memory mapped SFR region */
- void __iomem *addr;
- /* Rate of RCLK source clock */
- unsigned long rclk_srcrate;
- /* Frame Clock */
+
+ /* Frame clock */
unsigned frmclk;
/*
- * Specifically requested RCLK,BCLK by MACHINE Driver.
+ * Specifically requested RCLK, BCLK by machine driver.
* 0 indicates CPU driver is free to choose any value.
*/
unsigned rfs, bfs;
- /* I2S Controller's core clock */
- struct clk *clk;
- /* Clock for generating I2S signals */
- struct clk *op_clk;
/* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */
struct i2s_dai *pri_dai;
/* Pointer to the Secondary_Fifo if it has one, NULL otherwise */
struct i2s_dai *sec_dai;
-#define DAI_OPENED (1 << 0) /* Dai is opened */
-#define DAI_MANAGER (1 << 1) /* Dai is the manager */
+
+#define DAI_OPENED (1 << 0) /* DAI is opened */
+#define DAI_MANAGER (1 << 1) /* DAI is the manager */
unsigned mode;
+
/* Driver for this DAI */
- struct snd_soc_dai_driver i2s_dai_drv;
+ struct snd_soc_dai_driver *drv;
+
/* DMA parameters */
struct snd_dmaengine_dai_dma_data dma_playback;
struct snd_dmaengine_dai_dma_data dma_capture;
struct snd_dmaengine_dai_dma_data idma_playback;
dma_filter_fn filter;
- u32 quirks;
- u32 suspend_i2smod;
- u32 suspend_i2scon;
- u32 suspend_i2spsr;
- const struct samsung_i2s_variant_regs *variant_regs;
+
+ struct samsung_i2s_priv *priv;
+};
+
+struct samsung_i2s_priv {
+ struct platform_device *pdev;
+ struct platform_device *pdev_sec;
+
+ /* Memory mapped SFR region */
+ void __iomem *addr;
/* Spinlock protecting access to the device's registers */
- spinlock_t spinlock;
- spinlock_t *lock;
+ spinlock_t lock;
+
+ /* Lock for cross interface checks */
+ spinlock_t pcm_lock;
+
+ /* CPU DAIs and their corresponding drivers */
+ struct i2s_dai *dai;
+ struct snd_soc_dai_driver *dai_drv;
+ int num_dais;
+
+ /* The I2S controller's core clock */
+ struct clk *clk;
+
+ /* Clock for generating I2S signals */
+ struct clk *op_clk;
+
+ /* Rate of RCLK source clock */
+ unsigned long rclk_srcrate;
+
+ /* Cache of selected I2S registers for system suspend */
+ u32 suspend_i2smod;
+ u32 suspend_i2scon;
+ u32 suspend_i2spsr;
+
+ const struct samsung_i2s_variant_regs *variant_regs;
+ u32 quirks;
- /* Below fields are only valid if this is the primary FIFO */
+ /* The clock provider's data */
struct clk *clk_table[3];
struct clk_onecell_data clk_data;
};
-/* Lock for cross i/f checks */
-static DEFINE_SPINLOCK(lock);
-
-/* If this is the 'overlay' stereo DAI */
+/* Returns true if this is the 'overlay' stereo DAI */
static inline bool is_secondary(struct i2s_dai *i2s)
{
- return i2s->pri_dai ? true : false;
+ return i2s->drv->id == SAMSUNG_I2S_ID_SECONDARY;
}
/* If operating in SoC-Slave mode */
static inline bool is_slave(struct i2s_dai *i2s)
{
- u32 mod = readl(i2s->addr + I2SMOD);
- return (mod & (1 << i2s->variant_regs->mss_off)) ? true : false;
+ struct samsung_i2s_priv *priv = i2s->priv;
+
+ u32 mod = readl(priv->addr + I2SMOD);
+ return (mod & (1 << priv->variant_regs->mss_off)) ? true : false;
}
/* If this interface of the controller is transmitting data */
@@ -125,7 +147,7 @@ static inline bool tx_active(struct i2s_dai *i2s)
if (!i2s)
return false;
- active = readl(i2s->addr + I2SCON);
+ active = readl(i2s->priv->addr + I2SCON);
if (is_secondary(i2s))
active &= CON_TXSDMA_ACTIVE;
@@ -163,7 +185,7 @@ static inline bool rx_active(struct i2s_dai *i2s)
if (!i2s)
return false;
- active = readl(i2s->addr + I2SCON) & CON_RXDMA_ACTIVE;
+ active = readl(i2s->priv->addr + I2SCON) & CON_RXDMA_ACTIVE;
return active ? true : false;
}
@@ -202,7 +224,9 @@ static inline bool any_active(struct i2s_dai *i2s)
static inline struct i2s_dai *to_info(struct snd_soc_dai *dai)
{
- return snd_soc_dai_get_drvdata(dai);
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ return &priv->dai[dai->id - 1];
}
static inline bool is_opened(struct i2s_dai *i2s)
@@ -224,9 +248,11 @@ static inline bool is_manager(struct i2s_dai *i2s)
/* Read RCLK of I2S (in multiples of LRCLK) */
static inline unsigned get_rfs(struct i2s_dai *i2s)
{
+ struct samsung_i2s_priv *priv = i2s->priv;
u32 rfs;
- rfs = readl(i2s->addr + I2SMOD) >> i2s->variant_regs->rfs_off;
- rfs &= i2s->variant_regs->rfs_mask;
+
+ rfs = readl(priv->addr + I2SMOD) >> priv->variant_regs->rfs_off;
+ rfs &= priv->variant_regs->rfs_mask;
switch (rfs) {
case 7: return 192;
@@ -243,10 +269,11 @@ static inline unsigned get_rfs(struct i2s_dai *i2s)
/* Write RCLK of I2S (in multiples of LRCLK) */
static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
{
- u32 mod = readl(i2s->addr + I2SMOD);
- int rfs_shift = i2s->variant_regs->rfs_off;
+ struct samsung_i2s_priv *priv = i2s->priv;
+ u32 mod = readl(priv->addr + I2SMOD);
+ int rfs_shift = priv->variant_regs->rfs_off;
- mod &= ~(i2s->variant_regs->rfs_mask << rfs_shift);
+ mod &= ~(priv->variant_regs->rfs_mask << rfs_shift);
switch (rfs) {
case 192:
@@ -275,15 +302,17 @@ static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
break;
}
- writel(mod, i2s->addr + I2SMOD);
+ writel(mod, priv->addr + I2SMOD);
}
-/* Read Bit-Clock of I2S (in multiples of LRCLK) */
+/* Read bit-clock of I2S (in multiples of LRCLK) */
static inline unsigned get_bfs(struct i2s_dai *i2s)
{
+ struct samsung_i2s_priv *priv = i2s->priv;
u32 bfs;
- bfs = readl(i2s->addr + I2SMOD) >> i2s->variant_regs->bfs_off;
- bfs &= i2s->variant_regs->bfs_mask;
+
+ bfs = readl(priv->addr + I2SMOD) >> priv->variant_regs->bfs_off;
+ bfs &= priv->variant_regs->bfs_mask;
switch (bfs) {
case 8: return 256;
@@ -298,12 +327,13 @@ static inline unsigned get_bfs(struct i2s_dai *i2s)
}
}
-/* Write Bit-Clock of I2S (in multiples of LRCLK) */
+/* Write bit-clock of I2S (in multiples of LRCLK) */
static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
{
- u32 mod = readl(i2s->addr + I2SMOD);
- int tdm = i2s->quirks & QUIRK_SUPPORTS_TDM;
- int bfs_shift = i2s->variant_regs->bfs_off;
+ struct samsung_i2s_priv *priv = i2s->priv;
+ u32 mod = readl(priv->addr + I2SMOD);
+ int tdm = priv->quirks & QUIRK_SUPPORTS_TDM;
+ int bfs_shift = priv->variant_regs->bfs_off;
/* Non-TDM I2S controllers do not support BCLK > 48 * FS */
if (!tdm && bfs > 48) {
@@ -311,7 +341,7 @@ static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
return;
}
- mod &= ~(i2s->variant_regs->bfs_mask << bfs_shift);
+ mod &= ~(priv->variant_regs->bfs_mask << bfs_shift);
switch (bfs) {
case 48:
@@ -346,13 +376,13 @@ static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
return;
}
- writel(mod, i2s->addr + I2SMOD);
+ writel(mod, priv->addr + I2SMOD);
}
-/* Sample-Size */
+/* Sample size */
static inline int get_blc(struct i2s_dai *i2s)
{
- int blc = readl(i2s->addr + I2SMOD);
+ int blc = readl(i2s->priv->addr + I2SMOD);
blc = (blc >> 13) & 0x3;
@@ -363,11 +393,12 @@ static inline int get_blc(struct i2s_dai *i2s)
}
}
-/* TX Channel Control */
+/* TX channel control */
static void i2s_txctrl(struct i2s_dai *i2s, int on)
{
- void __iomem *addr = i2s->addr;
- int txr_off = i2s->variant_regs->txr_off;
+ struct samsung_i2s_priv *priv = i2s->priv;
+ void __iomem *addr = priv->addr;
+ int txr_off = priv->variant_regs->txr_off;
u32 con = readl(addr + I2SCON);
u32 mod = readl(addr + I2SMOD) & ~(3 << txr_off);
@@ -416,8 +447,9 @@ static void i2s_txctrl(struct i2s_dai *i2s, int on)
/* RX Channel Control */
static void i2s_rxctrl(struct i2s_dai *i2s, int on)
{
- void __iomem *addr = i2s->addr;
- int txr_off = i2s->variant_regs->txr_off;
+ struct samsung_i2s_priv *priv = i2s->priv;
+ void __iomem *addr = priv->addr;
+ int txr_off = priv->variant_regs->txr_off;
u32 con = readl(addr + I2SCON);
u32 mod = readl(addr + I2SMOD) & ~(3 << txr_off);
@@ -453,9 +485,9 @@ static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush)
return;
if (is_secondary(i2s))
- fic = i2s->addr + I2SFICS;
+ fic = i2s->priv->addr + I2SFICS;
else
- fic = i2s->addr + I2SFIC;
+ fic = i2s->priv->addr + I2SFIC;
/* Flush the FIFO */
writel(readl(fic) | flush, fic);
@@ -468,12 +500,13 @@ static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush)
writel(readl(fic) & ~flush, fic);
}
-static int i2s_set_sysclk(struct snd_soc_dai *dai,
- int clk_id, unsigned int rfs, int dir)
+static int i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int rfs,
+ int dir)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
struct i2s_dai *i2s = to_info(dai);
struct i2s_dai *other = get_other_dai(i2s);
- const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
+ const struct samsung_i2s_variant_regs *i2s_regs = priv->variant_regs;
unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
u32 mod, mask, val = 0;
@@ -482,9 +515,9 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
pm_runtime_get_sync(dai->dev);
- spin_lock_irqsave(i2s->lock, flags);
- mod = readl(i2s->addr + I2SMOD);
- spin_unlock_irqrestore(i2s->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
+ mod = readl(priv->addr + I2SMOD);
+ spin_unlock_irqrestore(&priv->lock, flags);
switch (clk_id) {
case SAMSUNG_I2S_OPCLK:
@@ -519,51 +552,46 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */
mask = 1 << i2s_regs->rclksrc_off;
- if ((i2s->quirks & QUIRK_NO_MUXPSR)
+ if ((priv->quirks & QUIRK_NO_MUXPSR)
|| (clk_id == SAMSUNG_I2S_RCLKSRC_0))
clk_id = 0;
else
clk_id = 1;
if (!any_active(i2s)) {
- if (i2s->op_clk && !IS_ERR(i2s->op_clk)) {
+ if (priv->op_clk && !IS_ERR(priv->op_clk)) {
if ((clk_id && !(mod & rsrc_mask)) ||
(!clk_id && (mod & rsrc_mask))) {
- clk_disable_unprepare(i2s->op_clk);
- clk_put(i2s->op_clk);
+ clk_disable_unprepare(priv->op_clk);
+ clk_put(priv->op_clk);
} else {
- i2s->rclk_srcrate =
- clk_get_rate(i2s->op_clk);
+ priv->rclk_srcrate =
+ clk_get_rate(priv->op_clk);
goto done;
}
}
if (clk_id)
- i2s->op_clk = clk_get(&i2s->pdev->dev,
+ priv->op_clk = clk_get(&i2s->pdev->dev,
"i2s_opclk1");
else
- i2s->op_clk = clk_get(&i2s->pdev->dev,
+ priv->op_clk = clk_get(&i2s->pdev->dev,
"i2s_opclk0");
- if (WARN_ON(IS_ERR(i2s->op_clk))) {
- ret = PTR_ERR(i2s->op_clk);
- i2s->op_clk = NULL;
+ if (WARN_ON(IS_ERR(priv->op_clk))) {
+ ret = PTR_ERR(priv->op_clk);
+ priv->op_clk = NULL;
goto err;
}
- ret = clk_prepare_enable(i2s->op_clk);
+ ret = clk_prepare_enable(priv->op_clk);
if (ret) {
- clk_put(i2s->op_clk);
- i2s->op_clk = NULL;
+ clk_put(priv->op_clk);
+ priv->op_clk = NULL;
goto err;
}
- i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
+ priv->rclk_srcrate = clk_get_rate(priv->op_clk);
- /* Over-ride the other's */
- if (other) {
- other->op_clk = i2s->op_clk;
- other->rclk_srcrate = i2s->rclk_srcrate;
- }
} else if ((!clk_id && (mod & rsrc_mask))
|| (clk_id && !(mod & rsrc_mask))) {
dev_err(&i2s->pdev->dev,
@@ -572,8 +600,6 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
goto err;
} else {
/* Call can't be on the active DAI */
- i2s->op_clk = other->op_clk;
- i2s->rclk_srcrate = other->rclk_srcrate;
goto done;
}
@@ -586,11 +612,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
goto err;
}
- spin_lock_irqsave(i2s->lock, flags);
- mod = readl(i2s->addr + I2SMOD);
+ spin_lock_irqsave(&priv->lock, flags);
+ mod = readl(priv->addr + I2SMOD);
mod = (mod & ~mask) | val;
- writel(mod, i2s->addr + I2SMOD);
- spin_unlock_irqrestore(i2s->lock, flags);
+ writel(mod, priv->addr + I2SMOD);
+ spin_unlock_irqrestore(&priv->lock, flags);
done:
pm_runtime_put(dai->dev);
@@ -600,17 +626,17 @@ err:
return ret;
}
-static int i2s_set_fmt(struct snd_soc_dai *dai,
- unsigned int fmt)
+static int i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
struct i2s_dai *i2s = to_info(dai);
int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
u32 mod, tmp = 0;
unsigned long flags;
- lrp_shift = i2s->variant_regs->lrp_off;
- sdf_shift = i2s->variant_regs->sdf_off;
- mod_slave = 1 << i2s->variant_regs->mss_off;
+ lrp_shift = priv->variant_regs->lrp_off;
+ sdf_shift = priv->variant_regs->sdf_off;
+ mod_slave = 1 << priv->variant_regs->mss_off;
sdf_mask = MOD_SDF_MASK << sdf_shift;
lrp_rlow = MOD_LR_RLOW << lrp_shift;
@@ -661,7 +687,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
* CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
* clock configuration assigned in DT is not overwritten.
*/
- if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
+ if (priv->rclk_srcrate == 0 && priv->clk_data.clks == NULL)
i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
0, SND_SOC_CLOCK_IN);
break;
@@ -671,15 +697,15 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
}
pm_runtime_get_sync(dai->dev);
- spin_lock_irqsave(i2s->lock, flags);
- mod = readl(i2s->addr + I2SMOD);
+ spin_lock_irqsave(&priv->lock, flags);
+ mod = readl(priv->addr + I2SMOD);
/*
* Don't change the I2S mode if any controller is active on this
* channel.
*/
if (any_active(i2s) &&
((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
- spin_unlock_irqrestore(i2s->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
pm_runtime_put(dai->dev);
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
@@ -688,8 +714,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
mod &= ~(sdf_mask | lrp_rlow | mod_slave);
mod |= tmp;
- writel(mod, i2s->addr + I2SMOD);
- spin_unlock_irqrestore(i2s->lock, flags);
+ writel(mod, priv->addr + I2SMOD);
+ spin_unlock_irqrestore(&priv->lock, flags);
pm_runtime_put(dai->dev);
return 0;
@@ -698,8 +724,10 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
static int i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
struct i2s_dai *i2s = to_info(dai);
u32 mod, mask = 0, val = 0;
+ struct clk *rclksrc;
unsigned long flags;
WARN_ON(!pm_runtime_active(dai->dev));
@@ -710,7 +738,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
switch (params_channels(params)) {
case 6:
val |= MOD_DC2_EN;
- /* fall through */
+ /* Fall through */
case 4:
val |= MOD_DC1_EN;
break;
@@ -772,30 +800,35 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- spin_lock_irqsave(i2s->lock, flags);
- mod = readl(i2s->addr + I2SMOD);
+ spin_lock_irqsave(&priv->lock, flags);
+ mod = readl(priv->addr + I2SMOD);
mod = (mod & ~mask) | val;
- writel(mod, i2s->addr + I2SMOD);
- spin_unlock_irqrestore(i2s->lock, flags);
+ writel(mod, priv->addr + I2SMOD);
+ spin_unlock_irqrestore(&priv->lock, flags);
snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
i2s->frmclk = params_rate(params);
+ rclksrc = priv->clk_table[CLK_I2S_RCLK_SRC];
+ if (rclksrc && !IS_ERR(rclksrc))
+ priv->rclk_srcrate = clk_get_rate(rclksrc);
+
return 0;
}
-/* We set constraints on the substream acc to the version of I2S */
+/* We set constraints on the substream according to the version of I2S */
static int i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
struct i2s_dai *i2s = to_info(dai);
struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
pm_runtime_get_sync(dai->dev);
- spin_lock_irqsave(&lock, flags);
+ spin_lock_irqsave(&priv->pcm_lock, flags);
i2s->mode |= DAI_OPENED;
@@ -804,10 +837,10 @@ static int i2s_startup(struct snd_pcm_substream *substream,
else
i2s->mode |= DAI_MANAGER;
- if (!any_active(i2s) && (i2s->quirks & QUIRK_NEED_RSTCLR))
- writel(CON_RSTCLR, i2s->addr + I2SCON);
+ if (!any_active(i2s) && (priv->quirks & QUIRK_NEED_RSTCLR))
+ writel(CON_RSTCLR, i2s->priv->addr + I2SCON);
- spin_unlock_irqrestore(&lock, flags);
+ spin_unlock_irqrestore(&priv->pcm_lock, flags);
return 0;
}
@@ -815,11 +848,12 @@ static int i2s_startup(struct snd_pcm_substream *substream,
static void i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
struct i2s_dai *i2s = to_info(dai);
struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
- spin_lock_irqsave(&lock, flags);
+ spin_lock_irqsave(&priv->pcm_lock, flags);
i2s->mode &= ~DAI_OPENED;
i2s->mode &= ~DAI_MANAGER;
@@ -831,13 +865,14 @@ static void i2s_shutdown(struct snd_pcm_substream *substream,
i2s->rfs = 0;
i2s->bfs = 0;
- spin_unlock_irqrestore(&lock, flags);
+ spin_unlock_irqrestore(&priv->pcm_lock, flags);
pm_runtime_put(dai->dev);
}
static int config_setup(struct i2s_dai *i2s)
{
+ struct samsung_i2s_priv *priv = i2s->priv;
struct i2s_dai *other = get_other_dai(i2s);
unsigned rfs, bfs, blc;
u32 psr;
@@ -885,17 +920,12 @@ static int config_setup(struct i2s_dai *i2s)
if (is_slave(i2s))
return 0;
- if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
- struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
-
- if (rclksrc && !IS_ERR(rclksrc))
- i2s->rclk_srcrate = clk_get_rate(rclksrc);
-
- psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
- writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
+ if (!(priv->quirks & QUIRK_NO_MUXPSR)) {
+ psr = priv->rclk_srcrate / i2s->frmclk / rfs;
+ writel(((psr - 1) << 8) | PSR_PSREN, priv->addr + I2SPSR);
dev_dbg(&i2s->pdev->dev,
"RCLK_SRC=%luHz PSR=%u, RCLK=%dfs, BCLK=%dfs\n",
- i2s->rclk_srcrate, psr, rfs, bfs);
+ priv->rclk_srcrate, psr, rfs, bfs);
}
return 0;
@@ -904,6 +934,7 @@ static int config_setup(struct i2s_dai *i2s)
static int i2s_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct i2s_dai *i2s = to_info(rtd->cpu_dai);
@@ -914,10 +945,10 @@ static int i2s_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pm_runtime_get_sync(dai->dev);
- spin_lock_irqsave(i2s->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
if (config_setup(i2s)) {
- spin_unlock_irqrestore(i2s->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
return -EINVAL;
}
@@ -926,12 +957,12 @@ static int i2s_trigger(struct snd_pcm_substream *substream,
else
i2s_txctrl(i2s, 1);
- spin_unlock_irqrestore(i2s->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- spin_lock_irqsave(i2s->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
if (capture) {
i2s_rxctrl(i2s, 0);
@@ -941,7 +972,7 @@ static int i2s_trigger(struct snd_pcm_substream *substream,
i2s_fifo(i2s, FIC_TXFLUSH);
}
- spin_unlock_irqrestore(i2s->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
pm_runtime_put(dai->dev);
break;
}
@@ -980,19 +1011,19 @@ static int i2s_set_clkdiv(struct snd_soc_dai *dai,
static snd_pcm_sframes_t
i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
struct i2s_dai *i2s = to_info(dai);
- u32 reg = readl(i2s->addr + I2SFIC);
+ u32 reg = readl(priv->addr + I2SFIC);
snd_pcm_sframes_t delay;
- const struct samsung_i2s_variant_regs *i2s_regs = i2s->variant_regs;
WARN_ON(!pm_runtime_active(dai->dev));
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
delay = FIC_RXCOUNT(reg);
else if (is_secondary(i2s))
- delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS));
+ delay = FICS_TXCOUNT(readl(priv->addr + I2SFICS));
else
- delay = (reg >> i2s_regs->ftx0cnt_off) & 0x7f;
+ delay = (reg >> priv->variant_regs->ftx0cnt_off) & 0x7f;
return delay;
}
@@ -1014,39 +1045,39 @@ static int i2s_resume(struct snd_soc_dai *dai)
static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
{
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
struct i2s_dai *i2s = to_info(dai);
struct i2s_dai *other = get_other_dai(i2s);
unsigned long flags;
pm_runtime_get_sync(dai->dev);
- if (is_secondary(i2s)) { /* If this is probe on the secondary DAI */
- snd_soc_dai_init_dma_data(dai, &other->sec_dai->dma_playback,
- NULL);
+ if (is_secondary(i2s)) {
+ /* If this is probe on the secondary DAI */
+ snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, NULL);
} else {
snd_soc_dai_init_dma_data(dai, &i2s->dma_playback,
- &i2s->dma_capture);
+ &i2s->dma_capture);
- if (i2s->quirks & QUIRK_NEED_RSTCLR)
- writel(CON_RSTCLR, i2s->addr + I2SCON);
+ if (priv->quirks & QUIRK_NEED_RSTCLR)
+ writel(CON_RSTCLR, priv->addr + I2SCON);
- if (i2s->quirks & QUIRK_SUPPORTS_IDMA)
- idma_reg_addr_init(i2s->addr,
- i2s->sec_dai->idma_playback.addr);
+ if (priv->quirks & QUIRK_SUPPORTS_IDMA)
+ idma_reg_addr_init(priv->addr,
+ other->idma_playback.addr);
}
/* Reset any constraint on RFS and BFS */
i2s->rfs = 0;
i2s->bfs = 0;
- i2s->rclk_srcrate = 0;
- spin_lock_irqsave(i2s->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
i2s_txctrl(i2s, 0);
i2s_rxctrl(i2s, 0);
i2s_fifo(i2s, FIC_TXFLUSH);
i2s_fifo(other, FIC_TXFLUSH);
i2s_fifo(i2s, FIC_RXFLUSH);
- spin_unlock_irqrestore(i2s->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
/* Gate CDCLK by default */
if (!is_opened(other))
@@ -1059,16 +1090,17 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
{
- struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
+ struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
+ struct i2s_dai *i2s = to_info(dai);
unsigned long flags;
pm_runtime_get_sync(dai->dev);
if (!is_secondary(i2s)) {
- if (i2s->quirks & QUIRK_NEED_RSTCLR) {
- spin_lock_irqsave(i2s->lock, flags);
- writel(0, i2s->addr + I2SCON);
- spin_unlock_irqrestore(i2s->lock, flags);
+ if (priv->quirks & QUIRK_NEED_RSTCLR) {
+ spin_lock_irqsave(&priv->lock, flags);
+ writel(0, priv->addr + I2SCON);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
}
@@ -1088,118 +1120,157 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = {
.delay = i2s_delay,
};
+static const struct snd_soc_dapm_widget samsung_i2s_widgets[] = {
+ /* Backend DAI */
+ SND_SOC_DAPM_AIF_OUT("Mixer DAI TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("Mixer DAI RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+ /* Playback Mixer */
+ SND_SOC_DAPM_MIXER("Playback Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = {
+ { "Playback Mixer", NULL, "Primary" },
+ { "Playback Mixer", NULL, "Secondary" },
+
+ { "Mixer DAI TX", NULL, "Playback Mixer" },
+ { "Playback Mixer", NULL, "Mixer DAI RX" },
+};
+
static const struct snd_soc_component_driver samsung_i2s_component = {
- .name = "samsung-i2s",
+ .name = "samsung-i2s",
+
+ .dapm_widgets = samsung_i2s_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(samsung_i2s_widgets),
+
+ .dapm_routes = samsung_i2s_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(samsung_i2s_dapm_routes),
};
-#define SAMSUNG_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \
- SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S24_LE)
+#define SAMSUNG_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
-static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev,
- const struct samsung_i2s_dai_data *i2s_dai_data,
- bool sec)
+static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
+ const struct samsung_i2s_dai_data *i2s_dai_data,
+ int num_dais)
{
- struct i2s_dai *i2s;
-
- i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL);
- if (i2s == NULL)
- return NULL;
-
- i2s->pdev = pdev;
- i2s->pri_dai = NULL;
- i2s->sec_dai = NULL;
- i2s->i2s_dai_drv.id = 1;
- i2s->i2s_dai_drv.symmetric_rates = 1;
- i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe;
- i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove;
- i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops;
- i2s->i2s_dai_drv.suspend = i2s_suspend;
- i2s->i2s_dai_drv.resume = i2s_resume;
- i2s->i2s_dai_drv.playback.channels_min = 1;
- i2s->i2s_dai_drv.playback.channels_max = 2;
- i2s->i2s_dai_drv.playback.rates = i2s_dai_data->pcm_rates;
- i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS;
-
- if (!sec) {
- i2s->i2s_dai_drv.name = SAMSUNG_I2S_DAI;
- i2s->i2s_dai_drv.capture.channels_min = 1;
- i2s->i2s_dai_drv.capture.channels_max = 2;
- i2s->i2s_dai_drv.capture.rates = i2s_dai_data->pcm_rates;
- i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
- } else {
- i2s->i2s_dai_drv.name = SAMSUNG_I2S_DAI_SEC;
+ static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" };
+ static const char *stream_names[] = { "Primary", "Secondary" };
+ struct snd_soc_dai_driver *dai_drv;
+ struct i2s_dai *dai;
+ int i;
+
+ priv->dai = devm_kcalloc(&priv->pdev->dev, num_dais,
+ sizeof(*dai), GFP_KERNEL);
+ if (!priv->dai)
+ return -ENOMEM;
+
+ priv->dai_drv = devm_kcalloc(&priv->pdev->dev, num_dais,
+ sizeof(*dai_drv), GFP_KERNEL);
+ if (!priv->dai_drv)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dais; i++) {
+ dai_drv = &priv->dai_drv[i];
+
+ dai_drv->probe = samsung_i2s_dai_probe;
+ dai_drv->remove = samsung_i2s_dai_remove;
+ dai_drv->suspend = i2s_suspend;
+ dai_drv->resume = i2s_resume;
+
+ dai_drv->symmetric_rates = 1;
+ dai_drv->ops = &samsung_i2s_dai_ops;
+
+ dai_drv->playback.channels_min = 1;
+ dai_drv->playback.channels_max = 2;
+ dai_drv->playback.rates = i2s_dai_data->pcm_rates;
+ dai_drv->playback.formats = SAMSUNG_I2S_FMTS;
+ dai_drv->playback.stream_name = stream_names[i];
+
+ dai_drv->id = i + 1;
+ dai_drv->name = dai_names[i];
+
+ priv->dai[i].drv = &priv->dai_drv[i];
+ priv->dai[i].pdev = priv->pdev;
}
- return i2s;
+
+ /* Initialize capture only for the primary DAI */
+ dai_drv = &priv->dai_drv[SAMSUNG_I2S_ID_PRIMARY - 1];
+
+ dai_drv->capture.channels_min = 1;
+ dai_drv->capture.channels_max = 2;
+ dai_drv->capture.rates = i2s_dai_data->pcm_rates;
+ dai_drv->capture.formats = SAMSUNG_I2S_FMTS;
+
+ return 0;
}
#ifdef CONFIG_PM
static int i2s_runtime_suspend(struct device *dev)
{
- struct i2s_dai *i2s = dev_get_drvdata(dev);
+ struct samsung_i2s_priv *priv = dev_get_drvdata(dev);
- i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
- i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
- i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+ priv->suspend_i2smod = readl(priv->addr + I2SMOD);
+ priv->suspend_i2scon = readl(priv->addr + I2SCON);
+ priv->suspend_i2spsr = readl(priv->addr + I2SPSR);
- if (i2s->op_clk)
- clk_disable_unprepare(i2s->op_clk);
- clk_disable_unprepare(i2s->clk);
+ if (priv->op_clk)
+ clk_disable_unprepare(priv->op_clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
static int i2s_runtime_resume(struct device *dev)
{
- struct i2s_dai *i2s = dev_get_drvdata(dev);
+ struct samsung_i2s_priv *priv = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare_enable(i2s->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
- if (i2s->op_clk) {
- ret = clk_prepare_enable(i2s->op_clk);
+ if (priv->op_clk) {
+ ret = clk_prepare_enable(priv->op_clk);
if (ret) {
- clk_disable_unprepare(i2s->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
}
- writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
- writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
- writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+ writel(priv->suspend_i2scon, priv->addr + I2SCON);
+ writel(priv->suspend_i2smod, priv->addr + I2SMOD);
+ writel(priv->suspend_i2spsr, priv->addr + I2SPSR);
return 0;
}
#endif /* CONFIG_PM */
-static void i2s_unregister_clocks(struct i2s_dai *i2s)
+static void i2s_unregister_clocks(struct samsung_i2s_priv *priv)
{
int i;
- for (i = 0; i < i2s->clk_data.clk_num; i++) {
- if (!IS_ERR(i2s->clk_table[i]))
- clk_unregister(i2s->clk_table[i]);
+ for (i = 0; i < priv->clk_data.clk_num; i++) {
+ if (!IS_ERR(priv->clk_table[i]))
+ clk_unregister(priv->clk_table[i]);
}
}
-static void i2s_unregister_clock_provider(struct platform_device *pdev)
+static void i2s_unregister_clock_provider(struct samsung_i2s_priv *priv)
{
- struct i2s_dai *i2s = dev_get_drvdata(&pdev->dev);
-
- of_clk_del_provider(pdev->dev.of_node);
- i2s_unregister_clocks(i2s);
+ of_clk_del_provider(priv->pdev->dev.of_node);
+ i2s_unregister_clocks(priv);
}
-static int i2s_register_clock_provider(struct platform_device *pdev)
+
+static int i2s_register_clock_provider(struct samsung_i2s_priv *priv)
{
+
const char * const i2s_clk_desc[] = { "cdclk", "rclk_src", "prescaler" };
const char *clk_name[2] = { "i2s_opclk0", "i2s_opclk1" };
const char *p_names[2] = { NULL };
- struct device *dev = &pdev->dev;
- struct i2s_dai *i2s = dev_get_drvdata(dev);
- const struct samsung_i2s_variant_regs *reg_info = i2s->variant_regs;
+ struct device *dev = &priv->pdev->dev;
+ const struct samsung_i2s_variant_regs *reg_info = priv->variant_regs;
const char *i2s_clk_name[ARRAY_SIZE(i2s_clk_desc)];
struct clk *rclksrc;
int ret, i;
@@ -1224,110 +1295,170 @@ static int i2s_register_clock_provider(struct platform_device *pdev)
return -ENOMEM;
}
- if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
+ if (!(priv->quirks & QUIRK_NO_MUXPSR)) {
/* Activate the prescaler */
- u32 val = readl(i2s->addr + I2SPSR);
- writel(val | PSR_PSREN, i2s->addr + I2SPSR);
+ u32 val = readl(priv->addr + I2SPSR);
+ writel(val | PSR_PSREN, priv->addr + I2SPSR);
- i2s->clk_table[CLK_I2S_RCLK_SRC] = clk_register_mux(dev,
+ priv->clk_table[CLK_I2S_RCLK_SRC] = clk_register_mux(dev,
i2s_clk_name[CLK_I2S_RCLK_SRC], p_names,
ARRAY_SIZE(p_names),
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
- i2s->addr + I2SMOD, reg_info->rclksrc_off,
- 1, 0, i2s->lock);
+ priv->addr + I2SMOD, reg_info->rclksrc_off,
+ 1, 0, &priv->lock);
- i2s->clk_table[CLK_I2S_RCLK_PSR] = clk_register_divider(dev,
+ priv->clk_table[CLK_I2S_RCLK_PSR] = clk_register_divider(dev,
i2s_clk_name[CLK_I2S_RCLK_PSR],
i2s_clk_name[CLK_I2S_RCLK_SRC],
CLK_SET_RATE_PARENT,
- i2s->addr + I2SPSR, 8, 6, 0, i2s->lock);
+ priv->addr + I2SPSR, 8, 6, 0, &priv->lock);
p_names[0] = i2s_clk_name[CLK_I2S_RCLK_PSR];
- i2s->clk_data.clk_num = 2;
+ priv->clk_data.clk_num = 2;
}
- i2s->clk_table[CLK_I2S_CDCLK] = clk_register_gate(dev,
+ priv->clk_table[CLK_I2S_CDCLK] = clk_register_gate(dev,
i2s_clk_name[CLK_I2S_CDCLK], p_names[0],
CLK_SET_RATE_PARENT,
- i2s->addr + I2SMOD, reg_info->cdclkcon_off,
- CLK_GATE_SET_TO_DISABLE, i2s->lock);
+ priv->addr + I2SMOD, reg_info->cdclkcon_off,
+ CLK_GATE_SET_TO_DISABLE, &priv->lock);
- i2s->clk_data.clk_num += 1;
- i2s->clk_data.clks = i2s->clk_table;
+ priv->clk_data.clk_num += 1;
+ priv->clk_data.clks = priv->clk_table;
ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
- &i2s->clk_data);
+ &priv->clk_data);
if (ret < 0) {
dev_err(dev, "failed to add clock provider: %d\n", ret);
- i2s_unregister_clocks(i2s);
+ i2s_unregister_clocks(priv);
}
return ret;
}
+/* Create platform device for the secondary PCM */
+static int i2s_create_secondary_device(struct samsung_i2s_priv *priv)
+{
+ struct platform_device *pdev_sec;
+ const char *devname;
+ int ret;
+
+ devname = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s-sec",
+ dev_name(&priv->pdev->dev));
+ if (!devname)
+ return -ENOMEM;
+
+ pdev_sec = platform_device_alloc(devname, -1);
+ if (!pdev_sec)
+ return -ENOMEM;
+
+ pdev_sec->driver_override = kstrdup("samsung-i2s", GFP_KERNEL);
+
+ ret = platform_device_add(pdev_sec);
+ if (ret < 0) {
+ platform_device_put(pdev_sec);
+ return ret;
+ }
+
+ ret = device_attach(&pdev_sec->dev);
+ if (ret <= 0) {
+ platform_device_unregister(priv->pdev_sec);
+ dev_info(&pdev_sec->dev, "device_attach() failed\n");
+ return ret;
+ }
+
+ priv->pdev_sec = pdev_sec;
+
+ return 0;
+}
+
+static void i2s_delete_secondary_device(struct samsung_i2s_priv *priv)
+{
+ platform_device_unregister(priv->pdev_sec);
+ priv->pdev_sec = NULL;
+}
+
static int samsung_i2s_probe(struct platform_device *pdev)
{
struct i2s_dai *pri_dai, *sec_dai = NULL;
struct s3c_audio_pdata *i2s_pdata = pdev->dev.platform_data;
- struct resource *res;
- u32 regs_base, quirks = 0, idma_addr = 0;
+ u32 regs_base, idma_addr = 0;
struct device_node *np = pdev->dev.of_node;
const struct samsung_i2s_dai_data *i2s_dai_data;
- int ret;
+ const struct platform_device_id *id;
+ struct samsung_i2s_priv *priv;
+ struct resource *res;
+ int num_dais, ret;
- if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
i2s_dai_data = of_device_get_match_data(&pdev->dev);
- else
- i2s_dai_data = (struct samsung_i2s_dai_data *)
- platform_get_device_id(pdev)->driver_data;
+ } else {
+ id = platform_get_device_id(pdev);
- pri_dai = i2s_alloc_dai(pdev, i2s_dai_data, false);
- if (!pri_dai) {
- dev_err(&pdev->dev, "Unable to alloc I2S_pri\n");
- return -ENOMEM;
+ /* Nothing to do if it is the secondary device probe */
+ if (!id)
+ return 0;
+
+ i2s_dai_data = (struct samsung_i2s_dai_data *)id->driver_data;
}
- spin_lock_init(&pri_dai->spinlock);
- pri_dai->lock = &pri_dai->spinlock;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- if (!np) {
- if (i2s_pdata == NULL) {
- dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
+ if (np) {
+ priv->quirks = i2s_dai_data->quirks;
+ } else {
+ if (!i2s_pdata) {
+ dev_err(&pdev->dev, "Missing platform data\n");
return -EINVAL;
}
+ priv->quirks = i2s_pdata->type.quirks;
+ }
+
+ num_dais = (priv->quirks & QUIRK_SEC_DAI) ? 2 : 1;
+ priv->pdev = pdev;
+ priv->variant_regs = i2s_dai_data->i2s_variant_regs;
+ ret = i2s_alloc_dais(priv, i2s_dai_data, num_dais);
+ if (ret < 0)
+ return ret;
+
+ pri_dai = &priv->dai[SAMSUNG_I2S_ID_PRIMARY - 1];
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->pcm_lock);
+
+ if (!np) {
pri_dai->dma_playback.filter_data = i2s_pdata->dma_playback;
pri_dai->dma_capture.filter_data = i2s_pdata->dma_capture;
pri_dai->filter = i2s_pdata->dma_filter;
- quirks = i2s_pdata->type.quirks;
idma_addr = i2s_pdata->type.idma_addr;
} else {
- quirks = i2s_dai_data->quirks;
if (of_property_read_u32(np, "samsung,idma-addr",
&idma_addr)) {
- if (quirks & QUIRK_SUPPORTS_IDMA) {
+ if (priv->quirks & QUIRK_SUPPORTS_IDMA) {
dev_info(&pdev->dev, "idma address is not"\
"specified");
}
}
}
- quirks &= ~(QUIRK_SEC_DAI | QUIRK_SUPPORTS_IDMA);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pri_dai->addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pri_dai->addr))
- return PTR_ERR(pri_dai->addr);
+ priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->addr))
+ return PTR_ERR(priv->addr);
regs_base = res->start;
- pri_dai->clk = devm_clk_get(&pdev->dev, "iis");
- if (IS_ERR(pri_dai->clk)) {
+ priv->clk = devm_clk_get(&pdev->dev, "iis");
+ if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "Failed to get iis clock\n");
- return PTR_ERR(pri_dai->clk);
+ return PTR_ERR(priv->clk);
}
- ret = clk_prepare_enable(pri_dai->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret != 0) {
dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
return ret;
@@ -1338,33 +1469,19 @@ static int samsung_i2s_probe(struct platform_device *pdev)
pri_dai->dma_capture.chan_name = "rx";
pri_dai->dma_playback.addr_width = 4;
pri_dai->dma_capture.addr_width = 4;
- pri_dai->quirks = quirks;
- pri_dai->variant_regs = i2s_dai_data->i2s_variant_regs;
+ pri_dai->priv = priv;
- if (quirks & QUIRK_PRI_6CHAN)
- pri_dai->i2s_dai_drv.playback.channels_max = 6;
+ if (priv->quirks & QUIRK_PRI_6CHAN)
+ pri_dai->drv->playback.channels_max = 6;
ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
- NULL, NULL);
+ "tx", "rx", NULL);
if (ret < 0)
goto err_disable_clk;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &samsung_i2s_component,
- &pri_dai->i2s_dai_drv, 1);
- if (ret < 0)
- goto err_disable_clk;
-
- if (quirks & QUIRK_SEC_DAI) {
- sec_dai = i2s_alloc_dai(pdev, i2s_dai_data, true);
- if (!sec_dai) {
- dev_err(&pdev->dev, "Unable to alloc I2S_sec\n");
- ret = -ENOMEM;
- goto err_disable_clk;
- }
+ if (priv->quirks & QUIRK_SEC_DAI) {
+ sec_dai = &priv->dai[SAMSUNG_I2S_ID_SECONDARY - 1];
- sec_dai->lock = &pri_dai->spinlock;
- sec_dai->variant_regs = pri_dai->variant_regs;
sec_dai->dma_playback.addr = regs_base + I2STXDS;
sec_dai->dma_playback.chan_name = "tx-sec";
@@ -1374,62 +1491,72 @@ static int samsung_i2s_probe(struct platform_device *pdev)
}
sec_dai->dma_playback.addr_width = 4;
- sec_dai->addr = pri_dai->addr;
- sec_dai->clk = pri_dai->clk;
- sec_dai->quirks = quirks;
sec_dai->idma_playback.addr = idma_addr;
sec_dai->pri_dai = pri_dai;
+ sec_dai->priv = priv;
pri_dai->sec_dai = sec_dai;
- ret = samsung_asoc_dma_platform_register(&pdev->dev,
- sec_dai->filter, "tx-sec", NULL);
+ ret = i2s_create_secondary_device(priv);
if (ret < 0)
goto err_disable_clk;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &samsung_i2s_component,
- &sec_dai->i2s_dai_drv, 1);
+ ret = samsung_asoc_dma_platform_register(&priv->pdev_sec->dev,
+ sec_dai->filter, "tx-sec", NULL,
+ &pdev->dev);
if (ret < 0)
- goto err_disable_clk;
+ goto err_del_sec;
+
}
if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to configure gpio\n");
ret = -EINVAL;
- goto err_disable_clk;
+ goto err_del_sec;
}
- dev_set_drvdata(&pdev->dev, pri_dai);
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &samsung_i2s_component,
+ priv->dai_drv, num_dais);
+ if (ret < 0)
+ goto err_del_sec;
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = i2s_register_clock_provider(pdev);
+ ret = i2s_register_clock_provider(priv);
if (ret < 0)
goto err_disable_pm;
- pri_dai->op_clk = clk_get_parent(pri_dai->clk_table[CLK_I2S_RCLK_SRC]);
+ priv->op_clk = clk_get_parent(priv->clk_table[CLK_I2S_RCLK_SRC]);
return 0;
err_disable_pm:
pm_runtime_disable(&pdev->dev);
+err_del_sec:
+ i2s_delete_secondary_device(priv);
err_disable_clk:
- clk_disable_unprepare(pri_dai->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
static int samsung_i2s_remove(struct platform_device *pdev)
{
- struct i2s_dai *pri_dai;
+ struct samsung_i2s_priv *priv = dev_get_drvdata(&pdev->dev);
- pri_dai = dev_get_drvdata(&pdev->dev);
+ /* The secondary device has no driver data assigned */
+ if (!priv)
+ return 0;
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- i2s_unregister_clock_provider(pdev);
- clk_disable_unprepare(pri_dai->clk);
+ i2s_unregister_clock_provider(priv);
+ i2s_delete_secondary_device(priv);
+ clk_disable_unprepare(priv->clk);
+
pm_runtime_put_noidle(&pdev->dev);
return 0;
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
index e7b371b07230..694512f980fd 100644
--- a/sound/soc/samsung/odroid.c
+++ b/sound/soc/samsung/odroid.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/module.h>
@@ -17,26 +18,52 @@
struct odroid_priv {
struct snd_soc_card card;
- struct snd_soc_dai_link dai_link;
-
struct clk *clk_i2s_bus;
struct clk *sclk_i2s;
+
+ /* Spinlock protecting fields below */
+ spinlock_t lock;
+ unsigned int be_sample_rate;
+ bool be_active;
};
-static int odroid_card_startup(struct snd_pcm_substream *substream)
+static int odroid_card_fe_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_hw_constraint_single(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+
return 0;
}
-static int odroid_card_hw_params(struct snd_pcm_substream *substream,
+static int odroid_card_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->be_active && priv->be_sample_rate != params_rate(params))
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static const struct snd_soc_ops odroid_card_fe_ops = {
+ .startup = odroid_card_fe_startup,
+ .hw_params = odroid_card_fe_hw_params,
+};
+
+static int odroid_card_be_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card);
unsigned int pll_freq, rclk_freq, rfs;
+ unsigned long flags;
int ret;
switch (params_rate(params)) {
@@ -83,22 +110,97 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->be_sample_rate = params_rate(params);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int odroid_card_be_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ priv->be_active = true;
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ priv->be_active = false;
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
return 0;
}
-static const struct snd_soc_ops odroid_card_ops = {
- .startup = odroid_card_startup,
- .hw_params = odroid_card_hw_params,
+static const struct snd_soc_ops odroid_card_be_ops = {
+ .hw_params = odroid_card_be_hw_params,
+ .trigger = odroid_card_be_trigger,
+};
+
+/* DAPM routes for backward compatibility with old DTS */
+static const struct snd_soc_dapm_route odroid_dapm_routes[] = {
+ { "I2S Playback", NULL, "Mixer DAI TX" },
+ { "HiFi Playback", NULL, "Mixer DAI TX" },
+};
+
+static struct snd_soc_dai_link odroid_card_dais[] = {
+ {
+ /* Primary FE <-> BE link */
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .ops = &odroid_card_fe_ops,
+ .name = "Primary",
+ .stream_name = "Primary",
+ .platform_name = "3830000.i2s",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ }, {
+ /* BE <-> CODECs link */
+ .name = "I2S Mixer",
+ .cpu_name = "snd-soc-dummy",
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "snd-soc-dummy",
+ .ops = &odroid_card_be_ops,
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ }, {
+ /* Secondary FE <-> BE link */
+ .playback_only = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .ops = &odroid_card_fe_ops,
+ .name = "Secondary",
+ .stream_name = "Secondary",
+ .platform_name = "3830000.i2s-sec",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ }
};
static int odroid_audio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *cpu_dai = NULL;
struct device_node *cpu, *codec;
struct odroid_priv *priv;
- struct snd_soc_dai_link *link;
struct snd_soc_card *card;
- int ret;
+ struct snd_soc_dai_link *link, *codec_link;
+ int num_pcms, ret, i;
+ struct of_phandle_args args = {};
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -110,6 +212,7 @@ static int odroid_audio_probe(struct platform_device *pdev)
card->owner = THIS_MODULE;
card->fully_routed = true;
+ spin_lock_init(&priv->lock);
snd_soc_card_set_drvdata(card, priv);
ret = snd_soc_of_parse_card_name(card, "model");
@@ -130,45 +233,78 @@ static int odroid_audio_probe(struct platform_device *pdev)
return ret;
}
- link = &priv->dai_link;
-
- link->ops = &odroid_card_ops;
- link->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS;
-
- card->dai_link = &priv->dai_link;
- card->num_links = 1;
+ card->dai_link = odroid_card_dais;
+ card->num_links = ARRAY_SIZE(odroid_card_dais);
cpu = of_get_child_by_name(dev->of_node, "cpu");
codec = of_get_child_by_name(dev->of_node, "codec");
+ link = card->dai_link;
+ codec_link = &card->dai_link[1];
- link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
- if (!link->cpu_of_node) {
- dev_err(dev, "Failed parsing cpu/sound-dai property\n");
- return -EINVAL;
+ /*
+ * For backwards compatibility create the secondary CPU DAI link only
+ * if there are 2 CPU DAI entries in the cpu sound-dai property in DT.
+ * Also add required DAPM routes not available in old DTS.
+ */
+ num_pcms = of_count_phandle_with_args(cpu, "sound-dai",
+ "#sound-dai-cells");
+ if (num_pcms == 1) {
+ card->dapm_routes = odroid_dapm_routes;
+ card->num_dapm_routes = ARRAY_SIZE(odroid_dapm_routes);
+ card->num_links--;
}
- ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
- if (ret < 0)
- goto err_put_codec_n;
+ for (i = 0; i < num_pcms; i++, link += 2) {
+ ret = of_parse_phandle_with_args(cpu, "sound-dai",
+ "#sound-dai-cells", i, &args);
+ if (ret < 0)
+ break;
+
+ if (!args.np) {
+ dev_err(dev, "sound-dai property parse error: %d\n", ret);
+ ret = -EINVAL;
+ break;
+ }
- link->platform_of_node = link->cpu_of_node;
+ ret = snd_soc_get_dai_name(&args, &link->cpu_dai_name);
+ of_node_put(args.np);
- link->name = "Primary";
- link->stream_name = link->name;
+ if (ret < 0)
+ break;
+ }
+ if (ret == 0) {
+ cpu_dai = of_parse_phandle(cpu, "sound-dai", 0);
+ if (!cpu_dai)
+ ret = -EINVAL;
+ }
+ of_node_put(cpu);
+ of_node_put(codec);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, codec_link);
+ if (ret < 0)
+ goto err_put_cpu_dai;
+
+ /* Set capture capability only for boards with the MAX98090 CODEC */
+ if (codec_link->num_codecs > 1) {
+ card->dai_link[0].dpcm_capture = 1;
+ card->dai_link[1].dpcm_capture = 1;
+ }
- priv->sclk_i2s = of_clk_get_by_name(link->cpu_of_node, "i2s_opclk1");
+ priv->sclk_i2s = of_clk_get_by_name(cpu_dai, "i2s_opclk1");
if (IS_ERR(priv->sclk_i2s)) {
ret = PTR_ERR(priv->sclk_i2s);
- goto err_put_i2s_n;
+ goto err_put_cpu_dai;
}
- priv->clk_i2s_bus = of_clk_get_by_name(link->cpu_of_node, "iis");
+ priv->clk_i2s_bus = of_clk_get_by_name(cpu_dai, "iis");
if (IS_ERR(priv->clk_i2s_bus)) {
ret = PTR_ERR(priv->clk_i2s_bus);
goto err_put_sclk;
}
+ of_node_put(cpu_dai);
ret = devm_snd_soc_register_card(dev, card);
if (ret < 0) {
@@ -182,10 +318,9 @@ err_put_clk_i2s:
clk_put(priv->clk_i2s_bus);
err_put_sclk:
clk_put(priv->sclk_i2s);
-err_put_i2s_n:
- of_node_put(link->cpu_of_node);
-err_put_codec_n:
- snd_soc_of_put_dai_link_codecs(link);
+err_put_cpu_dai:
+ of_node_put(cpu_dai);
+ snd_soc_of_put_dai_link_codecs(codec_link);
return ret;
}
@@ -193,8 +328,7 @@ static int odroid_audio_remove(struct platform_device *pdev)
{
struct odroid_priv *priv = platform_get_drvdata(pdev);
- of_node_put(priv->dai_link.cpu_of_node);
- snd_soc_of_put_dai_link_codecs(&priv->dai_link);
+ snd_soc_of_put_dai_link_codecs(&priv->card.dai_link[1]);
clk_put(priv->sclk_i2s);
clk_put(priv->clk_i2s_bus);
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 37f95eee1558..3c7baa561084 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -553,7 +553,7 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
- NULL, NULL);
+ NULL, NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
goto err_dis_pclk;
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index cc0840fff5aa..c08638b0e458 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -177,7 +177,7 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
ret = samsung_asoc_dma_platform_register(&pdev->dev,
pdata->dma_filter,
- NULL, NULL);
+ "tx", "rx", NULL);
if (ret) {
pr_err("failed to register the DMA: %d\n", ret);
return ret;
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 8d58d02183bf..a8026b640c95 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -446,7 +446,7 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO;
ret = samsung_asoc_dma_platform_register(&pdev->dev, NULL,
- NULL, NULL);
+ "tx", "rx", NULL);
if (ret) {
dev_err(&pdev->dev, "Failed to register the DMA: %d\n", ret);
return ret;
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index cb59911e65c0..5e4afb330416 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -430,7 +430,7 @@ static int spdif_probe(struct platform_device *pdev)
spdif->dma_playback = &spdif_stereo_out;
ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
- NULL, NULL);
+ NULL, NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register DMA: %d\n", ret);
goto err4;
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 922fb6aa3ed1..5aee11c94f2a 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -202,7 +202,7 @@ static int camelot_prepare(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
- pr_debug("PCM data: addr 0x%08ulx len %d\n",
+ pr_debug("PCM data: addr 0x%08lx len %d\n",
(u32)runtime->dma_addr, runtime->dma_bytes);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index aa7e902f0c02..3447dbdba1f1 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -780,7 +780,7 @@ static int fsi_clk_init(struct device *dev,
return -EINVAL;
}
if (clock->div == clock->own) {
- dev_err(dev, "cpu doens't support div clock\n");
+ dev_err(dev, "cpu doesn't support div clock\n");
return -EINVAL;
}
}
@@ -1768,11 +1768,12 @@ static const struct snd_pcm_ops fsi_pcm_ops = {
static int fsi_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
- return snd_pcm_lib_preallocate_pages_for_all(
+ snd_pcm_lib_preallocate_pages_for_all(
rtd->pcm,
SNDRV_DMA_TYPE_DEV,
rtd->card->snd_card->dev,
PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
+ return 0;
}
/*
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 59e250cc2e9d..022996d2db13 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1031,25 +1031,19 @@ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
.prepare = rsnd_soc_dai_prepare,
};
-static void rsnd_parse_connect_simple(struct rsnd_priv *priv,
- struct device_node *dai_np,
- int dai_i, int is_play)
+static void rsnd_parse_tdm_split_mode(struct rsnd_priv *priv,
+ struct rsnd_dai_stream *io,
+ struct device_node *dai_np)
{
struct device *dev = rsnd_priv_to_dev(priv);
- struct rsnd_dai *rdai = rsnd_rdai_get(priv, dai_i);
- struct rsnd_dai_stream *io = is_play ?
- &rdai->playback :
- &rdai->capture;
struct device_node *ssiu_np = rsnd_ssiu_of_node(priv);
struct device_node *np;
+ int is_play = rsnd_io_is_play(io);
int i, j;
if (!ssiu_np)
return;
- if (!rsnd_io_to_mod_ssi(io))
- return;
-
/*
* This driver assumes that it is TDM Split mode
* if it includes ssiu node
@@ -1074,12 +1068,21 @@ static void rsnd_parse_connect_simple(struct rsnd_priv *priv,
}
}
+static void rsnd_parse_connect_simple(struct rsnd_priv *priv,
+ struct rsnd_dai_stream *io,
+ struct device_node *dai_np)
+{
+ if (!rsnd_io_to_mod_ssi(io))
+ return;
+
+ rsnd_parse_tdm_split_mode(priv, io, dai_np);
+}
+
static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
struct rsnd_dai_stream *io,
struct device_node *endpoint)
{
struct device *dev = rsnd_priv_to_dev(priv);
- struct device_node *remote_port = of_graph_get_remote_port(endpoint);
struct device_node *remote_node = of_graph_get_remote_port_parent(endpoint);
if (!rsnd_io_to_mod_ssi(io))
@@ -1097,14 +1100,7 @@ static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
dev_dbg(dev, "%s connected to HDMI1\n", io->name);
}
- /*
- * This driver assumes that it is TDM Split mode
- * if remote node has multi endpoint
- */
- if (of_get_child_count(remote_port) > 1) {
- rsnd_flags_set(io, RSND_STREAM_TDM_SPLIT);
- dev_dbg(dev, "%s is part of TDM Split\n", io->name);
- }
+ rsnd_parse_tdm_split_mode(priv, io, endpoint);
}
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1292,8 +1288,10 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
for_each_child_of_node(dai_node, dai_np) {
__rsnd_dai_probe(priv, dai_np, dai_i);
if (rsnd_is_gen3(priv)) {
- rsnd_parse_connect_simple(priv, dai_np, dai_i, 1);
- rsnd_parse_connect_simple(priv, dai_np, dai_i, 0);
+ struct rsnd_dai *rdai = rsnd_rdai_get(priv, dai_i);
+
+ rsnd_parse_connect_simple(priv, &rdai->playback, dai_np);
+ rsnd_parse_connect_simple(priv, &rdai->capture, dai_np);
}
dai_i++;
}
@@ -1526,14 +1524,14 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
int ret;
/*
- * 1) Avoid duplicate register (ex. MIXer case)
- * 2) re-register if card was rebinded
+ * 1) Avoid duplicate register for DVC with MIX case
+ * 2) Allow duplicate register for MIX
+ * 3) re-register if card was rebinded
*/
list_for_each_entry(kctrl, &card->controls, list) {
struct rsnd_kctrl_cfg *c = kctrl->private_data;
- if (strcmp(kctrl->id.name, name) == 0 &&
- c->mod == mod)
+ if (c == cfg)
return 0;
}
@@ -1575,7 +1573,6 @@ static int rsnd_preallocate_pages(struct snd_soc_pcm_runtime *rtd,
struct rsnd_priv *priv = rsnd_io_to_priv(io);
struct device *dev = rsnd_priv_to_dev(priv);
struct snd_pcm_substream *substream;
- int err;
/*
* use Audio-DMAC dev if we can use IPMMU
@@ -1588,12 +1585,10 @@ static int rsnd_preallocate_pages(struct snd_soc_pcm_runtime *rtd,
for (substream = rtd->pcm->streams[stream].substream;
substream;
substream = substream->next) {
- err = snd_pcm_lib_preallocate_pages(substream,
+ snd_pcm_lib_preallocate_pages(substream,
SNDRV_DMA_TYPE_DEV,
dev,
PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
- if (err < 0)
- return err;
}
return 0;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 7cda60188f41..af19010b9d88 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -255,6 +255,30 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_M_REG(SSI_MODE, 0xc, 0x80),
RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80),
RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
+ RSND_GEN_S_REG(SSI9_BUSIF0_MODE, 0x48c),
+ RSND_GEN_S_REG(SSI9_BUSIF0_ADINR, 0x484),
+ RSND_GEN_S_REG(SSI9_BUSIF0_DALIGN, 0x488),
+ RSND_GEN_S_REG(SSI9_BUSIF1_MODE, 0x4a0),
+ RSND_GEN_S_REG(SSI9_BUSIF1_ADINR, 0x4a4),
+ RSND_GEN_S_REG(SSI9_BUSIF1_DALIGN, 0x4a8),
+ RSND_GEN_S_REG(SSI9_BUSIF2_MODE, 0x4c0),
+ RSND_GEN_S_REG(SSI9_BUSIF2_ADINR, 0x4c4),
+ RSND_GEN_S_REG(SSI9_BUSIF2_DALIGN, 0x4c8),
+ RSND_GEN_S_REG(SSI9_BUSIF3_MODE, 0x4e0),
+ RSND_GEN_S_REG(SSI9_BUSIF3_ADINR, 0x4e4),
+ RSND_GEN_S_REG(SSI9_BUSIF3_DALIGN, 0x4e8),
+ RSND_GEN_S_REG(SSI9_BUSIF4_MODE, 0xd80),
+ RSND_GEN_S_REG(SSI9_BUSIF4_ADINR, 0xd84),
+ RSND_GEN_S_REG(SSI9_BUSIF4_DALIGN, 0xd88),
+ RSND_GEN_S_REG(SSI9_BUSIF5_MODE, 0xda0),
+ RSND_GEN_S_REG(SSI9_BUSIF5_ADINR, 0xda4),
+ RSND_GEN_S_REG(SSI9_BUSIF5_DALIGN, 0xda8),
+ RSND_GEN_S_REG(SSI9_BUSIF6_MODE, 0xdc0),
+ RSND_GEN_S_REG(SSI9_BUSIF6_ADINR, 0xdc4),
+ RSND_GEN_S_REG(SSI9_BUSIF6_DALIGN, 0xdc8),
+ RSND_GEN_S_REG(SSI9_BUSIF7_MODE, 0xde0),
+ RSND_GEN_S_REG(SSI9_BUSIF7_ADINR, 0xde4),
+ RSND_GEN_S_REG(SSI9_BUSIF7_DALIGN, 0xde8),
};
static const struct rsnd_regmap_field_conf conf_scu[] = {
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 605e4b934982..90625c57847b 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -191,6 +191,30 @@ enum rsnd_reg {
SSI_SYS_STATUS7,
HDMI0_SEL,
HDMI1_SEL,
+ SSI9_BUSIF0_MODE,
+ SSI9_BUSIF1_MODE,
+ SSI9_BUSIF2_MODE,
+ SSI9_BUSIF3_MODE,
+ SSI9_BUSIF4_MODE,
+ SSI9_BUSIF5_MODE,
+ SSI9_BUSIF6_MODE,
+ SSI9_BUSIF7_MODE,
+ SSI9_BUSIF0_ADINR,
+ SSI9_BUSIF1_ADINR,
+ SSI9_BUSIF2_ADINR,
+ SSI9_BUSIF3_ADINR,
+ SSI9_BUSIF4_ADINR,
+ SSI9_BUSIF5_ADINR,
+ SSI9_BUSIF6_ADINR,
+ SSI9_BUSIF7_ADINR,
+ SSI9_BUSIF0_DALIGN,
+ SSI9_BUSIF1_DALIGN,
+ SSI9_BUSIF2_DALIGN,
+ SSI9_BUSIF3_DALIGN,
+ SSI9_BUSIF4_DALIGN,
+ SSI9_BUSIF5_DALIGN,
+ SSI9_BUSIF6_DALIGN,
+ SSI9_BUSIF7_DALIGN,
/* SSI */
SSICR,
@@ -209,6 +233,9 @@ enum rsnd_reg {
#define SSI_BUSIF_MODE(i) (SSI_BUSIF0_MODE + (i))
#define SSI_BUSIF_ADINR(i) (SSI_BUSIF0_ADINR + (i))
#define SSI_BUSIF_DALIGN(i) (SSI_BUSIF0_DALIGN + (i))
+#define SSI9_BUSIF_MODE(i) (SSI9_BUSIF0_MODE + (i))
+#define SSI9_BUSIF_ADINR(i) (SSI9_BUSIF0_ADINR + (i))
+#define SSI9_BUSIF_DALIGN(i) (SSI9_BUSIF0_DALIGN + (i))
#define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i))
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 50348a2c9203..db81e066b92e 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -14,6 +14,7 @@
*/
#include "rsnd.h"
+#include <linux/sys_soc.h>
#define SRC_NAME "src"
@@ -134,20 +135,83 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
return rate;
}
+const static u32 bsdsr_table_pattern1[] = {
+ 0x01800000, /* 6 - 1/6 */
+ 0x01000000, /* 6 - 1/4 */
+ 0x00c00000, /* 6 - 1/3 */
+ 0x00800000, /* 6 - 1/2 */
+ 0x00600000, /* 6 - 2/3 */
+ 0x00400000, /* 6 - 1 */
+};
+
+const static u32 bsdsr_table_pattern2[] = {
+ 0x02400000, /* 6 - 1/6 */
+ 0x01800000, /* 6 - 1/4 */
+ 0x01200000, /* 6 - 1/3 */
+ 0x00c00000, /* 6 - 1/2 */
+ 0x00900000, /* 6 - 2/3 */
+ 0x00600000, /* 6 - 1 */
+};
+
+const static u32 bsisr_table[] = {
+ 0x00100060, /* 6 - 1/6 */
+ 0x00100040, /* 6 - 1/4 */
+ 0x00100030, /* 6 - 1/3 */
+ 0x00100020, /* 6 - 1/2 */
+ 0x00100020, /* 6 - 2/3 */
+ 0x00100020, /* 6 - 1 */
+};
+
+const static u32 chan288888[] = {
+ 0x00000006, /* 1 to 2 */
+ 0x000001fe, /* 1 to 8 */
+ 0x000001fe, /* 1 to 8 */
+ 0x000001fe, /* 1 to 8 */
+ 0x000001fe, /* 1 to 8 */
+ 0x000001fe, /* 1 to 8 */
+};
+
+const static u32 chan244888[] = {
+ 0x00000006, /* 1 to 2 */
+ 0x0000001e, /* 1 to 4 */
+ 0x0000001e, /* 1 to 4 */
+ 0x000001fe, /* 1 to 8 */
+ 0x000001fe, /* 1 to 8 */
+ 0x000001fe, /* 1 to 8 */
+};
+
+const static u32 chan222222[] = {
+ 0x00000006, /* 1 to 2 */
+ 0x00000006, /* 1 to 2 */
+ 0x00000006, /* 1 to 2 */
+ 0x00000006, /* 1 to 2 */
+ 0x00000006, /* 1 to 2 */
+ 0x00000006, /* 1 to 2 */
+};
+
+static const struct soc_device_attribute ov_soc[] = {
+ { .soc_id = "r8a77990" }, /* E3 */
+ { /* sentinel */ }
+};
+
static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
{
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ const struct soc_device_attribute *soc = soc_device_match(ov_soc);
int is_play = rsnd_io_is_play(io);
int use_src = 0;
u32 fin, fout;
u32 ifscr, fsrate, adinr;
u32 cr, route;
- u32 bsdsr, bsisr;
u32 i_busif, o_busif, tmp;
+ const u32 *bsdsr_table;
+ const u32 *chptn;
uint ratio;
+ int chan;
+ int idx;
if (!runtime)
return;
@@ -155,6 +219,8 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
fin = rsnd_src_get_in_rate(priv, io);
fout = rsnd_src_get_out_rate(priv, io);
+ chan = rsnd_runtime_channel_original(io);
+
/* 6 - 1/6 are very enough ratio for SRC_BSDSR */
if (fin == fout)
ratio = 0;
@@ -173,8 +239,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
/*
* SRC_ADINR
*/
- adinr = rsnd_get_adinr_bit(mod, io) |
- rsnd_runtime_channel_original(io);
+ adinr = rsnd_get_adinr_bit(mod, io) | chan;
/*
* SRC_IFSCR / SRC_IFSVR
@@ -207,21 +272,56 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
/*
* SRC_BSDSR / SRC_BSISR
+ *
+ * see
+ * Combination of Register Setting Related to
+ * FSO/FSI Ratio and Channel, Latency
*/
switch (rsnd_mod_id(mod)) {
+ case 0:
+ chptn = chan288888;
+ bsdsr_table = bsdsr_table_pattern1;
+ break;
+ case 1:
+ case 3:
+ case 4:
+ chptn = chan244888;
+ bsdsr_table = bsdsr_table_pattern1;
+ break;
+ case 2:
+ case 9:
+ chptn = chan222222;
+ bsdsr_table = bsdsr_table_pattern1;
+ break;
case 5:
case 6:
case 7:
case 8:
- bsdsr = 0x02400000; /* 6 - 1/6 */
- bsisr = 0x00100060; /* 6 - 1/6 */
+ chptn = chan222222;
+ bsdsr_table = bsdsr_table_pattern2;
break;
default:
- bsdsr = 0x01800000; /* 6 - 1/6 */
- bsisr = 0x00100060 ;/* 6 - 1/6 */
- break;
+ goto convert_rate_err;
}
+ /*
+ * E3 need to overwrite
+ */
+ if (soc)
+ switch (rsnd_mod_id(mod)) {
+ case 0:
+ case 4:
+ chptn = chan222222;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(chan222222); idx++)
+ if (chptn[idx] & (1 << chan))
+ break;
+
+ if (chan > 8 ||
+ idx >= ARRAY_SIZE(chan222222))
+ goto convert_rate_err;
+
/* BUSIF_MODE */
tmp = rsnd_get_busif_shift(io, mod);
i_busif = ( is_play ? tmp : 0) | 1;
@@ -234,8 +334,8 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
rsnd_mod_write(mod, SRC_IFSCR, ifscr);
rsnd_mod_write(mod, SRC_IFSVR, fsrate);
rsnd_mod_write(mod, SRC_SRCCR, cr);
- rsnd_mod_write(mod, SRC_BSDSR, bsdsr);
- rsnd_mod_write(mod, SRC_BSISR, bsisr);
+ rsnd_mod_write(mod, SRC_BSDSR, bsdsr_table[idx]);
+ rsnd_mod_write(mod, SRC_BSISR, bsisr_table[idx]);
rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */
rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
@@ -244,6 +344,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
+
+ return;
+
+convert_rate_err:
+ dev_err(dev, "unknown BSDSR/BSDIR settings\n");
}
static int rsnd_src_irq(struct rsnd_mod *mod,
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 45ef295743ec..f5afab631abb 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
if (rsnd_ssi_is_multi_slave(mod, io))
return 0;
- if (ssi->usrcnt > 1) {
+ if (ssi->usrcnt > 0) {
if (ssi->rate != rate) {
dev_err(dev, "SSI parent/child should use same rate\n");
return -EINVAL;
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index c5934adcfd01..2347f3404c06 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -79,7 +79,7 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
break;
case 9:
for (i = 0; i < 4; i++)
- rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << (id * 4));
+ rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << 4);
break;
}
@@ -181,28 +181,26 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
if (rsnd_ssi_use_busif(io)) {
int id = rsnd_mod_id(mod);
int busif = rsnd_mod_id_sub(mod);
+ enum rsnd_reg adinr_reg, mode_reg, dalign_reg;
- /*
- * FIXME
- *
- * We can't support SSI9-4/5/6/7, because its address is
- * out of calculation rule
- */
if ((id == 9) && (busif >= 4)) {
- struct device *dev = rsnd_priv_to_dev(priv);
-
- dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
- id, busif);
+ adinr_reg = SSI9_BUSIF_ADINR(busif);
+ mode_reg = SSI9_BUSIF_MODE(busif);
+ dalign_reg = SSI9_BUSIF_DALIGN(busif);
+ } else {
+ adinr_reg = SSI_BUSIF_ADINR(busif);
+ mode_reg = SSI_BUSIF_MODE(busif);
+ dalign_reg = SSI_BUSIF_DALIGN(busif);
}
- rsnd_mod_write(mod, SSI_BUSIF_ADINR(busif),
+ rsnd_mod_write(mod, adinr_reg,
rsnd_get_adinr_bit(mod, io) |
(rsnd_io_is_play(io) ?
rsnd_runtime_channel_after_ctu(io) :
rsnd_runtime_channel_original(io)));
- rsnd_mod_write(mod, SSI_BUSIF_MODE(busif),
+ rsnd_mod_write(mod, mode_reg,
rsnd_get_busif_shift(io, mod) | 1);
- rsnd_mod_write(mod, SSI_BUSIF_DALIGN(busif),
+ rsnd_mod_write(mod, dalign_reg,
rsnd_get_dalign(mod, io));
}
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index e263757e4a69..78c3145b4109 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -541,15 +541,9 @@ static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
if (ret < 0)
return ret;
- ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV, NULL,
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_DEV, card->dev,
SIU_BUFFER_BYTES_MAX, SIU_BUFFER_BYTES_MAX);
- if (ret < 0) {
- dev_err(card->dev,
- "snd_pcm_lib_preallocate_pages_for_all() err=%d",
- ret);
- goto fail;
- }
(*port_info)->pcm = pcm;
@@ -562,11 +556,6 @@ static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
dev_info(card->dev, "SuperH SIU driver initialized.\n");
return 0;
-
-fail:
- siu_free_port(siu_ports[pdev->id]);
- dev_err(card->dev, "SIU: failed to initialize.\n");
- return ret;
}
static void siu_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 699397a09167..03d5b9ccd3fc 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -345,17 +345,13 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
return 0;
}
-static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
+static int soc_compr_components_trigger(struct snd_compr_stream *cstream,
+ int cmd)
{
-
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0, __ret;
-
- mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+ int ret;
for_each_rtdcom(rtd, rtdcom) {
component = rtdcom->component;
@@ -364,10 +360,24 @@ static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
!component->driver->compr_ops->trigger)
continue;
- __ret = component->driver->compr_ops->trigger(cstream, cmd);
- if (__ret < 0)
- ret = __ret;
+ ret = component->driver->compr_ops->trigger(cstream, cmd);
+ if (ret < 0)
+ return ret;
}
+
+ return 0;
+}
+
+static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret;
+
+ mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
+
+ ret = soc_compr_components_trigger(cstream, cmd);
if (ret < 0)
goto out;
@@ -391,27 +401,12 @@ out:
static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
- struct snd_soc_component *component;
- struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
- int ret = 0, __ret, stream;
+ int ret, stream;
if (cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN ||
- cmd == SND_COMPR_TRIGGER_DRAIN) {
-
- for_each_rtdcom(fe, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->trigger)
- continue;
-
- __ret = component->driver->compr_ops->trigger(cstream, cmd);
- if (__ret < 0)
- ret = __ret;
- }
- return ret;
- }
+ cmd == SND_COMPR_TRIGGER_DRAIN)
+ return soc_compr_components_trigger(cstream, cmd);
if (cstream->direction == SND_COMPRESS_PLAYBACK)
stream = SNDRV_PCM_STREAM_PLAYBACK;
@@ -426,17 +421,7 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
goto out;
}
- for_each_rtdcom(fe, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->trigger)
- continue;
-
- __ret = component->driver->compr_ops->trigger(cstream, cmd);
- if (__ret < 0)
- ret = __ret;
- }
+ ret = soc_compr_components_trigger(cstream, cmd);
if (ret < 0)
goto out;
@@ -465,14 +450,35 @@ out:
return ret;
}
-static int soc_compr_set_params(struct snd_compr_stream *cstream,
- struct snd_compr_params *params)
+static int soc_compr_components_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
+ int ret;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->set_params)
+ continue;
+
+ ret = component->driver->compr_ops->set_params(cstream, params);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int soc_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0, __ret;
+ int ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -489,17 +495,7 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
goto err;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->set_params)
- continue;
-
- __ret = component->driver->compr_ops->set_params(cstream, params);
- if (__ret < 0)
- ret = __ret;
- }
+ ret = soc_compr_components_set_params(cstream, params);
if (ret < 0)
goto err;
@@ -522,7 +518,7 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
cancel_delayed_work_sync(&rtd->delayed_work);
- return ret;
+ return 0;
err:
mutex_unlock(&rtd->pcm_mutex);
@@ -535,10 +531,8 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_pcm_substream *fe_substream =
fe->pcm->streams[cstream->direction].substream;
- struct snd_soc_component *component;
- struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
- int ret = 0, __ret, stream;
+ int ret, stream;
if (cstream->direction == SND_COMPRESS_PLAYBACK)
stream = SNDRV_PCM_STREAM_PLAYBACK;
@@ -571,17 +565,7 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
goto out;
}
- for_each_rtdcom(fe, rtdcom) {
- component = rtdcom->component;
-
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->set_params)
- continue;
-
- __ret = component->driver->compr_ops->set_params(cstream, params);
- if (__ret < 0)
- ret = __ret;
- }
+ ret = soc_compr_components_set_params(cstream, params);
if (ret < 0)
goto out;
@@ -607,7 +591,7 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0, __ret;
+ int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -624,9 +608,8 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
!component->driver->compr_ops->get_params)
continue;
- __ret = component->driver->compr_ops->get_params(cstream, params);
- if (__ret < 0)
- ret = __ret;
+ ret = component->driver->compr_ops->get_params(cstream, params);
+ break;
}
err:
@@ -640,7 +623,7 @@ static int soc_compr_get_caps(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- int ret = 0, __ret;
+ int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -651,9 +634,8 @@ static int soc_compr_get_caps(struct snd_compr_stream *cstream,
!component->driver->compr_ops->get_caps)
continue;
- __ret = component->driver->compr_ops->get_caps(cstream, caps);
- if (__ret < 0)
- ret = __ret;
+ ret = component->driver->compr_ops->get_caps(cstream, caps);
+ break;
}
mutex_unlock(&rtd->pcm_mutex);
@@ -666,7 +648,7 @@ static int soc_compr_get_codec_caps(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- int ret = 0, __ret;
+ int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -677,9 +659,9 @@ static int soc_compr_get_codec_caps(struct snd_compr_stream *cstream,
!component->driver->compr_ops->get_codec_caps)
continue;
- __ret = component->driver->compr_ops->get_codec_caps(cstream, codec);
- if (__ret < 0)
- ret = __ret;
+ ret = component->driver->compr_ops->get_codec_caps(cstream,
+ codec);
+ break;
}
mutex_unlock(&rtd->pcm_mutex);
@@ -692,7 +674,7 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0, __ret;
+ int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -709,9 +691,9 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
!component->driver->compr_ops->ack)
continue;
- __ret = component->driver->compr_ops->ack(cstream, bytes);
- if (__ret < 0)
- ret = __ret;
+ ret = component->driver->compr_ops->ack(cstream, bytes);
+ if (ret < 0)
+ goto err;
}
err:
@@ -725,7 +707,7 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- int ret = 0, __ret;
+ int ret = 0;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -740,9 +722,8 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
!component->driver->compr_ops->pointer)
continue;
- __ret = component->driver->compr_ops->pointer(cstream, tstamp);
- if (__ret < 0)
- ret = __ret;
+ ret = component->driver->compr_ops->pointer(cstream, tstamp);
+ break;
}
mutex_unlock(&rtd->pcm_mutex);
@@ -781,7 +762,7 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0, __ret;
+ int ret;
if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_metadata) {
ret = cpu_dai->driver->cops->set_metadata(cstream, metadata, cpu_dai);
@@ -796,12 +777,13 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
!component->driver->compr_ops->set_metadata)
continue;
- __ret = component->driver->compr_ops->set_metadata(cstream, metadata);
- if (__ret < 0)
- ret = __ret;
+ ret = component->driver->compr_ops->set_metadata(cstream,
+ metadata);
+ if (ret < 0)
+ return ret;
}
- return ret;
+ return 0;
}
static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
@@ -811,7 +793,7 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0, __ret;
+ int ret;
if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_metadata) {
ret = cpu_dai->driver->cops->get_metadata(cstream, metadata, cpu_dai);
@@ -826,12 +808,11 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
!component->driver->compr_ops->get_metadata)
continue;
- __ret = component->driver->compr_ops->get_metadata(cstream, metadata);
- if (__ret < 0)
- ret = __ret;
+ return component->driver->compr_ops->get_metadata(cstream,
+ metadata);
}
- return ret;
+ return 0;
}
/* ASoC Compress operations */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0462b3ec977a..93d316d5bf8e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -425,6 +425,14 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
+static void snd_soc_flush_all_delayed_work(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ for_each_card_rtds(card, rtd)
+ flush_delayed_work(&rtd->delayed_work);
+}
+
static void codec2codec_close_delayed_work(struct work_struct *work)
{
/*
@@ -494,8 +502,7 @@ int snd_soc_suspend(struct device *dev)
}
/* close any waiting streams */
- for_each_card_rtds(card, rtd)
- flush_delayed_work(&rtd->delayed_work);
+ snd_soc_flush_all_delayed_work(card);
for_each_card_rtds(card, rtd) {
@@ -735,14 +742,19 @@ static struct snd_soc_component *soc_find_component(
const struct device_node *of_node, const char *name)
{
struct snd_soc_component *component;
+ struct device_node *component_of_node;
lockdep_assert_held(&client_mutex);
for_each_component(component) {
if (of_node) {
- if (component->dev->of_node == of_node)
+ component_of_node = component->dev->of_node;
+ if (!component_of_node && component->dev->parent)
+ component_of_node = component->dev->parent->of_node;
+
+ if (component_of_node == of_node)
return component;
- } else if (strcmp(component->name, name) == 0) {
+ } else if (name && strcmp(component->name, name) == 0) {
return component;
}
}
@@ -863,7 +875,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
{
struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_dai_link_component *codecs = dai_link->codecs;
+ struct snd_soc_dai_link_component *codecs;
struct snd_soc_dai_link_component cpu_dai_component;
struct snd_soc_component *component;
struct snd_soc_dai **codec_dais;
@@ -898,13 +910,12 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
rtd->num_codecs = dai_link->num_codecs;
/* Find CODEC from registered CODECs */
- /* we can use for_each_rtd_codec_dai() after this */
codec_dais = rtd->codec_dais;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dais[i] = snd_soc_find_dai(&codecs[i]);
+ for_each_link_codecs(dai_link, i, codecs) {
+ codec_dais[i] = snd_soc_find_dai(codecs);
if (!codec_dais[i]) {
- dev_err(card->dev, "ASoC: CODEC DAI %s not registered\n",
- codecs[i].dai_name);
+ dev_info(card->dev, "ASoC: CODEC DAI %s not registered\n",
+ codecs->dai_name);
goto _err_defer;
}
snd_soc_rtdcom_add(rtd, codec_dais[i]->component);
@@ -915,7 +926,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
/* find one from the set of registered platforms */
for_each_component(component) {
- if (!snd_soc_is_matching_component(dai_link->platform,
+ if (!snd_soc_is_matching_component(dai_link->platforms,
component))
continue;
@@ -930,28 +941,32 @@ _err_defer:
return -EPROBE_DEFER;
}
+static void soc_cleanup_component(struct snd_soc_component *component)
+{
+ list_del(&component->card_list);
+ snd_soc_dapm_free(snd_soc_component_get_dapm(component));
+ soc_cleanup_component_debugfs(component);
+ component->card = NULL;
+ if (!component->driver->ignore_module_refcount)
+ module_put(component->dev->driver->owner);
+}
+
static void soc_remove_component(struct snd_soc_component *component)
{
if (!component->card)
return;
- list_del(&component->card_list);
-
if (component->driver->remove)
component->driver->remove(component);
- snd_soc_dapm_free(snd_soc_component_get_dapm(component));
-
- soc_cleanup_component_debugfs(component);
- component->card = NULL;
- module_put(component->dev->driver->owner);
+ soc_cleanup_component(component);
}
static void soc_remove_dai(struct snd_soc_dai *dai, int order)
{
int err;
- if (!dai || !dai->probed ||
+ if (!dai || !dai->probed || !dai->driver ||
dai->driver->remove_order != order)
return;
@@ -1026,12 +1041,17 @@ static void soc_remove_dai_links(struct snd_soc_card *card)
static int snd_soc_init_platform(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
{
- struct snd_soc_dai_link_component *platform = dai_link->platform;
+ struct snd_soc_dai_link_component *platform = dai_link->platforms;
/*
- * FIXME
+ * REMOVE ME
*
- * this function should be removed in the future
+ * This is glue code for Legacy vs Modern dai_link.
+ * This function will be removed if all derivers are switched to
+ * modern style dai_link.
+ * Driver shouldn't use both legacy and modern style in the same time.
+ * see
+ * soc.h :: struct snd_soc_dai_link
*/
/* convert Legacy platform link */
if (!platform) {
@@ -1041,10 +1061,12 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
if (!platform)
return -ENOMEM;
- dai_link->platform = platform;
- platform->name = dai_link->platform_name;
- platform->of_node = dai_link->platform_of_node;
- platform->dai_name = NULL;
+ dai_link->platforms = platform;
+ dai_link->num_platforms = 1;
+ dai_link->legacy_platform = 1;
+ platform->name = dai_link->platform_name;
+ platform->of_node = dai_link->platform_of_node;
+ platform->dai_name = NULL;
}
/* if there's no platform we match on the empty platform */
@@ -1055,9 +1077,38 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
return 0;
}
+static void soc_cleanup_platform(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *link;
+ int i;
+ /*
+ * FIXME
+ *
+ * this function should be removed with snd_soc_init_platform
+ */
+
+ for_each_card_prelinks(card, i, link) {
+ if (link->legacy_platform) {
+ link->legacy_platform = 0;
+ link->platforms = NULL;
+ }
+ }
+}
+
static int snd_soc_init_multicodec(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
{
+ /*
+ * REMOVE ME
+ *
+ * This is glue code for Legacy vs Modern dai_link.
+ * This function will be removed if all derivers are switched to
+ * modern style dai_link.
+ * Driver shouldn't use both legacy and modern style in the same time.
+ * see
+ * soc.h :: struct snd_soc_dai_link
+ */
+
/* Legacy codec/codec_dai link is a single entry in multicodec */
if (dai_link->codec_name || dai_link->codec_of_node ||
dai_link->codec_dai_name) {
@@ -1119,16 +1170,33 @@ static int soc_init_dai_link(struct snd_soc_card *card,
}
}
+ /* FIXME */
+ if (link->num_platforms > 1) {
+ dev_err(card->dev,
+ "ASoC: multi platform is not yet supported %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
/*
* Platform may be specified by either name or OF node, but
* can be left unspecified, and a dummy platform will be used.
*/
- if (link->platform->name && link->platform->of_node) {
+ if (link->platforms->name && link->platforms->of_node) {
dev_err(card->dev,
"ASoC: Both platform name/of_node are set for %s\n",
link->name);
return -EINVAL;
}
+
+ /*
+ * Defer card registartion if platform dai component is not added to
+ * component list.
+ */
+ if ((link->platforms->of_node || link->platforms->name) &&
+ !soc_find_component(link->platforms->of_node, link->platforms->name))
+ return -EPROBE_DEFER;
+
/*
* CPU device may be specified by either name or OF node, but
* can be left unspecified, and will be matched based on DAI
@@ -1140,6 +1208,15 @@ static int soc_init_dai_link(struct snd_soc_card *card,
link->name);
return -EINVAL;
}
+
+ /*
+ * Defer card registartion if cpu dai component is not added to
+ * component list.
+ */
+ if ((link->cpu_of_node || link->cpu_name) &&
+ !soc_find_component(link->cpu_of_node, link->cpu_name))
+ return -EPROBE_DEFER;
+
/*
* At least one of CPU DAI name or CPU device name/node must be
* specified
@@ -1304,11 +1381,14 @@ static int soc_probe_component(struct snd_soc_card *card,
return 0;
}
- if (!try_module_get(component->dev->driver->owner))
+ if (!component->driver->ignore_module_refcount &&
+ !try_module_get(component->dev->driver->owner))
return -ENODEV;
component->card = card;
dapm->card = card;
+ INIT_LIST_HEAD(&component->card_list);
+ INIT_LIST_HEAD(&dapm->list);
soc_set_name_prefix(card, component);
soc_init_component_debugfs(component);
@@ -1371,12 +1451,9 @@ static int soc_probe_component(struct snd_soc_card *card,
/* see for_each_card_components */
list_add(&component->card_list, &card->component_dev_list);
- return 0;
-
err_probe:
- soc_cleanup_component_debugfs(component);
- component->card = NULL;
- module_put(component->dev->driver->owner);
+ if (ret < 0)
+ soc_cleanup_component(component);
return ret;
}
@@ -1561,27 +1638,24 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
dai_link->stream_name);
return ret;
}
- } else {
-
- if (!dai_link->params) {
- /* create the pcm */
- ret = soc_new_pcm(rtd, num);
- if (ret < 0) {
- dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
- dai_link->stream_name, ret);
- return ret;
- }
- ret = soc_link_dai_pcm_new(&cpu_dai, 1, rtd);
- if (ret < 0)
- return ret;
- ret = soc_link_dai_pcm_new(rtd->codec_dais,
- rtd->num_codecs, rtd);
- if (ret < 0)
- return ret;
- } else {
- INIT_DELAYED_WORK(&rtd->delayed_work,
- codec2codec_close_delayed_work);
+ } else if (!dai_link->params) {
+ /* create the pcm */
+ ret = soc_new_pcm(rtd, num);
+ if (ret < 0) {
+ dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
+ dai_link->stream_name, ret);
+ return ret;
}
+ ret = soc_link_dai_pcm_new(&cpu_dai, 1, rtd);
+ if (ret < 0)
+ return ret;
+ ret = soc_link_dai_pcm_new(rtd->codec_dais,
+ rtd->num_codecs, rtd);
+ if (ret < 0)
+ return ret;
+ } else {
+ INIT_DELAYED_WORK(&rtd->delayed_work,
+ codec2codec_close_delayed_work);
}
return 0;
@@ -1921,7 +1995,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
dev_err(card->dev, "init platform error");
continue;
}
- dai_link->platform->name = component->name;
+ dai_link->platforms->name = component->name;
/* convert non BE into BE */
dai_link->no_pcm = 1;
@@ -1957,6 +2031,30 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
}
}
+static int soc_cleanup_card_resources(struct snd_soc_card *card)
+{
+ /* free the ALSA card at first; this syncs with pending operations */
+ if (card->snd_card)
+ snd_card_free(card->snd_card);
+
+ /* remove and free each DAI */
+ soc_remove_dai_links(card);
+ soc_remove_pcm_runtimes(card);
+ soc_cleanup_platform(card);
+
+ /* remove auxiliary devices */
+ soc_remove_aux_devices(card);
+
+ snd_soc_dapm_free(&card->dapm);
+ soc_cleanup_card_debugfs(card);
+
+ /* remove the card */
+ if (card->remove)
+ card->remove(card);
+
+ return 0;
+}
+
static int snd_soc_instantiate_card(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd;
@@ -1966,6 +2064,11 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
mutex_lock(&client_mutex);
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
+ card->dapm.bias_level = SND_SOC_BIAS_OFF;
+ card->dapm.dev = card->dev;
+ card->dapm.card = card;
+ list_add(&card->dapm.list, &card->dapm_list);
+
/* check whether any platform is ignore machine FE and using topology */
soc_check_tplg_fes(card);
@@ -1973,14 +2076,14 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
for_each_card_prelinks(card, i, dai_link) {
ret = soc_bind_dai_link(card, dai_link);
if (ret != 0)
- goto base_error;
+ goto probe_end;
}
/* bind aux_devs too */
for (i = 0; i < card->num_aux_devs; i++) {
ret = soc_bind_aux_dev(card, i);
if (ret != 0)
- goto base_error;
+ goto probe_end;
}
/* add predefined DAI links to the list */
@@ -1994,16 +2097,11 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dev_err(card->dev,
"ASoC: can't create sound card for card %s: %d\n",
card->name, ret);
- goto base_error;
+ goto probe_end;
}
soc_init_card_debugfs(card);
- card->dapm.bias_level = SND_SOC_BIAS_OFF;
- card->dapm.dev = card->dev;
- card->dapm.card = card;
- list_add(&card->dapm.list, &card->dapm_list);
-
#ifdef CONFIG_DEBUG_FS
snd_soc_dapm_debugfs_init(&card->dapm, card->debugfs_card_root);
#endif
@@ -2025,7 +2123,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
if (card->probe) {
ret = card->probe(card);
if (ret < 0)
- goto card_probe_error;
+ goto probe_end;
}
/* probe all components used by DAI links on this card */
@@ -2036,7 +2134,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dev_err(card->dev,
"ASoC: failed to instantiate card %d\n",
ret);
- goto probe_dai_err;
+ goto probe_end;
}
}
}
@@ -2044,7 +2142,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
/* probe auxiliary components */
ret = soc_probe_aux_devices(card);
if (ret < 0)
- goto probe_dai_err;
+ goto probe_end;
/*
* Find new DAI links added during probing components and bind them.
@@ -2056,10 +2154,10 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
ret = soc_init_dai_link(card, dai_link);
if (ret)
- goto probe_dai_err;
+ goto probe_end;
ret = soc_bind_dai_link(card, dai_link);
if (ret)
- goto probe_dai_err;
+ goto probe_end;
}
/* probe all DAI links on this card */
@@ -2070,7 +2168,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dev_err(card->dev,
"ASoC: failed to instantiate card %d\n",
ret);
- goto probe_dai_err;
+ goto probe_end;
}
}
}
@@ -2117,7 +2215,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
if (ret < 0) {
dev_err(card->dev, "ASoC: %s late_probe() failed: %d\n",
card->name, ret);
- goto probe_aux_dev_err;
+ goto probe_end;
}
}
@@ -2127,33 +2225,17 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
if (ret < 0) {
dev_err(card->dev, "ASoC: failed to register soundcard %d\n",
ret);
- goto probe_aux_dev_err;
+ goto probe_end;
}
card->instantiated = 1;
dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
- mutex_unlock(&card->mutex);
- mutex_unlock(&client_mutex);
-
- return 0;
-
-probe_aux_dev_err:
- soc_remove_aux_devices(card);
-
-probe_dai_err:
- soc_remove_dai_links(card);
-card_probe_error:
- if (card->remove)
- card->remove(card);
-
- snd_soc_dapm_free(&card->dapm);
- soc_cleanup_card_debugfs(card);
- snd_card_free(card->snd_card);
+probe_end:
+ if (ret < 0)
+ soc_cleanup_card_resources(card);
-base_error:
- soc_remove_pcm_runtimes(card);
mutex_unlock(&card->mutex);
mutex_unlock(&client_mutex);
@@ -2182,34 +2264,6 @@ static int soc_probe(struct platform_device *pdev)
return snd_soc_register_card(card);
}
-static int soc_cleanup_card_resources(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- /* make sure any delayed work runs */
- for_each_card_rtds(card, rtd)
- flush_delayed_work(&rtd->delayed_work);
-
- /* free the ALSA card at first; this syncs with pending operations */
- snd_card_free(card->snd_card);
-
- /* remove and free each DAI */
- soc_remove_dai_links(card);
- soc_remove_pcm_runtimes(card);
-
- /* remove auxiliary devices */
- soc_remove_aux_devices(card);
-
- snd_soc_dapm_free(&card->dapm);
- soc_cleanup_card_debugfs(card);
-
- /* remove the card */
- if (card->remove)
- card->remove(card);
-
- return 0;
-}
-
/* removes a socdev */
static int soc_remove(struct platform_device *pdev)
{
@@ -2231,8 +2285,7 @@ int snd_soc_poweroff(struct device *dev)
* Flush out pmdown_time work - we actually do want to run it
* now, we're shutting down so no imminent restart.
*/
- for_each_card_rtds(card, rtd)
- flush_delayed_work(&rtd->delayed_work);
+ snd_soc_flush_all_delayed_work(card);
snd_soc_dapm_shutdown(card);
@@ -2739,15 +2792,18 @@ int snd_soc_register_card(struct snd_soc_card *card)
if (!card->name || !card->dev)
return -EINVAL;
+ mutex_lock(&client_mutex);
for_each_card_prelinks(card, i, link) {
ret = soc_init_dai_link(card, link);
if (ret) {
dev_err(card->dev, "ASoC: failed to init link %s\n",
link->name);
+ mutex_unlock(&client_mutex);
return ret;
}
}
+ mutex_unlock(&client_mutex);
dev_set_drvdata(card->dev, card);
@@ -2773,6 +2829,7 @@ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
if (card->instantiated) {
card->instantiated = false;
snd_soc_dapm_shutdown(card);
+ snd_soc_flush_all_delayed_work(card);
soc_cleanup_card_resources(card);
if (!unregister)
list_add(&card->list, &unbind_card_list);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a5178845065b..1ec06ef6d161 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -64,61 +64,85 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
/* dapm power sequences - make this per codec in the future */
static int dapm_up_seq[] = {
- [snd_soc_dapm_pre] = 0,
- [snd_soc_dapm_regulator_supply] = 1,
- [snd_soc_dapm_pinctrl] = 1,
- [snd_soc_dapm_clock_supply] = 1,
- [snd_soc_dapm_supply] = 2,
- [snd_soc_dapm_micbias] = 3,
- [snd_soc_dapm_dai_link] = 2,
- [snd_soc_dapm_dai_in] = 4,
- [snd_soc_dapm_dai_out] = 4,
- [snd_soc_dapm_aif_in] = 4,
- [snd_soc_dapm_aif_out] = 4,
- [snd_soc_dapm_mic] = 5,
- [snd_soc_dapm_mux] = 6,
- [snd_soc_dapm_demux] = 6,
- [snd_soc_dapm_dac] = 7,
- [snd_soc_dapm_switch] = 8,
- [snd_soc_dapm_mixer] = 8,
- [snd_soc_dapm_mixer_named_ctl] = 8,
- [snd_soc_dapm_pga] = 9,
- [snd_soc_dapm_adc] = 10,
- [snd_soc_dapm_out_drv] = 11,
- [snd_soc_dapm_hp] = 11,
- [snd_soc_dapm_spk] = 11,
- [snd_soc_dapm_line] = 11,
- [snd_soc_dapm_kcontrol] = 12,
- [snd_soc_dapm_post] = 13,
+ [snd_soc_dapm_pre] = 1,
+ [snd_soc_dapm_regulator_supply] = 2,
+ [snd_soc_dapm_pinctrl] = 2,
+ [snd_soc_dapm_clock_supply] = 2,
+ [snd_soc_dapm_supply] = 3,
+ [snd_soc_dapm_micbias] = 4,
+ [snd_soc_dapm_vmid] = 4,
+ [snd_soc_dapm_dai_link] = 3,
+ [snd_soc_dapm_dai_in] = 5,
+ [snd_soc_dapm_dai_out] = 5,
+ [snd_soc_dapm_aif_in] = 5,
+ [snd_soc_dapm_aif_out] = 5,
+ [snd_soc_dapm_mic] = 6,
+ [snd_soc_dapm_siggen] = 6,
+ [snd_soc_dapm_input] = 6,
+ [snd_soc_dapm_output] = 6,
+ [snd_soc_dapm_mux] = 7,
+ [snd_soc_dapm_demux] = 7,
+ [snd_soc_dapm_dac] = 8,
+ [snd_soc_dapm_switch] = 9,
+ [snd_soc_dapm_mixer] = 9,
+ [snd_soc_dapm_mixer_named_ctl] = 9,
+ [snd_soc_dapm_pga] = 10,
+ [snd_soc_dapm_buffer] = 10,
+ [snd_soc_dapm_scheduler] = 10,
+ [snd_soc_dapm_effect] = 10,
+ [snd_soc_dapm_src] = 10,
+ [snd_soc_dapm_asrc] = 10,
+ [snd_soc_dapm_encoder] = 10,
+ [snd_soc_dapm_decoder] = 10,
+ [snd_soc_dapm_adc] = 11,
+ [snd_soc_dapm_out_drv] = 12,
+ [snd_soc_dapm_hp] = 12,
+ [snd_soc_dapm_spk] = 12,
+ [snd_soc_dapm_line] = 12,
+ [snd_soc_dapm_sink] = 12,
+ [snd_soc_dapm_kcontrol] = 13,
+ [snd_soc_dapm_post] = 14,
};
static int dapm_down_seq[] = {
- [snd_soc_dapm_pre] = 0,
- [snd_soc_dapm_kcontrol] = 1,
- [snd_soc_dapm_adc] = 2,
- [snd_soc_dapm_hp] = 3,
- [snd_soc_dapm_spk] = 3,
- [snd_soc_dapm_line] = 3,
- [snd_soc_dapm_out_drv] = 3,
- [snd_soc_dapm_pga] = 4,
- [snd_soc_dapm_switch] = 5,
- [snd_soc_dapm_mixer_named_ctl] = 5,
- [snd_soc_dapm_mixer] = 5,
- [snd_soc_dapm_dac] = 6,
- [snd_soc_dapm_mic] = 7,
- [snd_soc_dapm_micbias] = 8,
- [snd_soc_dapm_mux] = 9,
- [snd_soc_dapm_demux] = 9,
- [snd_soc_dapm_aif_in] = 10,
- [snd_soc_dapm_aif_out] = 10,
- [snd_soc_dapm_dai_in] = 10,
- [snd_soc_dapm_dai_out] = 10,
- [snd_soc_dapm_dai_link] = 11,
- [snd_soc_dapm_supply] = 12,
- [snd_soc_dapm_clock_supply] = 13,
- [snd_soc_dapm_pinctrl] = 13,
- [snd_soc_dapm_regulator_supply] = 13,
- [snd_soc_dapm_post] = 14,
+ [snd_soc_dapm_pre] = 1,
+ [snd_soc_dapm_kcontrol] = 2,
+ [snd_soc_dapm_adc] = 3,
+ [snd_soc_dapm_hp] = 4,
+ [snd_soc_dapm_spk] = 4,
+ [snd_soc_dapm_line] = 4,
+ [snd_soc_dapm_out_drv] = 4,
+ [snd_soc_dapm_sink] = 4,
+ [snd_soc_dapm_pga] = 5,
+ [snd_soc_dapm_buffer] = 5,
+ [snd_soc_dapm_scheduler] = 5,
+ [snd_soc_dapm_effect] = 5,
+ [snd_soc_dapm_src] = 5,
+ [snd_soc_dapm_asrc] = 5,
+ [snd_soc_dapm_encoder] = 5,
+ [snd_soc_dapm_decoder] = 5,
+ [snd_soc_dapm_switch] = 6,
+ [snd_soc_dapm_mixer_named_ctl] = 6,
+ [snd_soc_dapm_mixer] = 6,
+ [snd_soc_dapm_dac] = 7,
+ [snd_soc_dapm_mic] = 8,
+ [snd_soc_dapm_siggen] = 8,
+ [snd_soc_dapm_input] = 8,
+ [snd_soc_dapm_output] = 8,
+ [snd_soc_dapm_micbias] = 9,
+ [snd_soc_dapm_vmid] = 9,
+ [snd_soc_dapm_mux] = 10,
+ [snd_soc_dapm_demux] = 10,
+ [snd_soc_dapm_aif_in] = 11,
+ [snd_soc_dapm_aif_out] = 11,
+ [snd_soc_dapm_dai_in] = 11,
+ [snd_soc_dapm_dai_out] = 11,
+ [snd_soc_dapm_dai_link] = 12,
+ [snd_soc_dapm_supply] = 13,
+ [snd_soc_dapm_clock_supply] = 14,
+ [snd_soc_dapm_pinctrl] = 14,
+ [snd_soc_dapm_regulator_supply] = 14,
+ [snd_soc_dapm_post] = 15,
};
static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
@@ -133,6 +157,7 @@ static void pop_wait(u32 pop_time)
schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
}
+__printf(3, 4)
static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
{
va_list args;
@@ -295,7 +320,24 @@ EXPORT_SYMBOL_GPL(dapm_mark_endpoints_dirty);
static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
const struct snd_soc_dapm_widget *_widget)
{
- return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
+ struct snd_soc_dapm_widget *w;
+
+ w = kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
+ if (!w)
+ return NULL;
+
+ /*
+ * w->name is duplicated in caller, but w->sname isn't.
+ * Duplicate it here if defined
+ */
+ if (_widget->sname) {
+ w->sname = kstrdup_const(_widget->sname, GFP_KERNEL);
+ if (!w->sname) {
+ kfree(w);
+ return NULL;
+ }
+ }
+ return w;
}
struct dapm_kcontrol_data {
@@ -1386,11 +1428,17 @@ static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
{
int *sort;
+ BUILD_BUG_ON(ARRAY_SIZE(dapm_up_seq) != SND_SOC_DAPM_TYPE_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(dapm_down_seq) != SND_SOC_DAPM_TYPE_COUNT);
+
if (power_up)
sort = dapm_up_seq;
else
sort = dapm_down_seq;
+ WARN_ONCE(sort[a->id] == 0, "offset a->id %d not initialized\n", a->id);
+ WARN_ONCE(sort[b->id] == 0, "offset b->id %d not initialized\n", b->id);
+
if (sort[a->id] != sort[b->id])
return sort[a->id] - sort[b->id];
if (a->subseq != b->subseq) {
@@ -2019,19 +2067,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
out = is_connected_output_ep(w, NULL, NULL);
}
- ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
+ ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
w->name, w->power ? "On" : "Off",
w->force ? " (forced)" : "", in, out);
if (w->reg >= 0)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" - R%d(0x%x) mask 0x%x",
w->reg, w->reg, w->mask << w->shift);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (w->sname)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
w->sname,
w->active ? "active" : "inactive");
@@ -2044,7 +2092,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (!p->connect)
continue;
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" %s \"%s\" \"%s\"\n",
(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
p->name ? p->name : "static",
@@ -2412,6 +2460,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
kfree(w->kcontrols);
kfree_const(w->name);
+ kfree_const(w->sname);
kfree(w);
}
@@ -2525,6 +2574,81 @@ int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
+static int dapm_update_dai_chan(struct snd_soc_dapm_path *p,
+ struct snd_soc_dapm_widget *w,
+ int channels)
+{
+ switch (w->id) {
+ case snd_soc_dapm_aif_out:
+ case snd_soc_dapm_aif_in:
+ break;
+ default:
+ return 0;
+ }
+
+ dev_dbg(w->dapm->dev, "%s DAI route %s -> %s\n",
+ w->channel < channels ? "Connecting" : "Disconnecting",
+ p->source->name, p->sink->name);
+
+ if (w->channel < channels)
+ soc_dapm_connect_path(p, true, "dai update");
+ else
+ soc_dapm_connect_path(p, false, "dai update");
+
+ return 0;
+}
+
+static int dapm_update_dai_unlocked(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int dir = substream->stream;
+ int channels = params_channels(params);
+ struct snd_soc_dapm_path *p;
+ struct snd_soc_dapm_widget *w;
+ int ret;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ w = dai->playback_widget;
+ else
+ w = dai->capture_widget;
+
+ if (!w)
+ return 0;
+
+ dev_dbg(dai->dev, "Update DAI routes for %s %s\n", dai->name,
+ dir == SNDRV_PCM_STREAM_PLAYBACK ? "playback" : "capture");
+
+ snd_soc_dapm_widget_for_each_sink_path(w, p) {
+ ret = dapm_update_dai_chan(p, p->sink, channels);
+ if (ret < 0)
+ return ret;
+ }
+
+ snd_soc_dapm_widget_for_each_source_path(w, p) {
+ ret = dapm_update_dai_chan(p, p->source, channels);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret;
+
+ mutex_lock_nested(&rtd->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+ ret = dapm_update_dai_unlocked(substream, params, dai);
+ mutex_unlock(&rtd->card->dapm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_update_dai);
+
/*
* dapm_update_widget_flags() - Re-compute widget sink and source flags
* @w: The widget for which to update the flags
@@ -2741,6 +2865,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
char prefixed_sink[80];
char prefixed_source[80];
const char *prefix;
+ unsigned int sink_ref = 0;
+ unsigned int source_ref = 0;
int ret;
prefix = soc_dapm_prefix(dapm);
@@ -2774,6 +2900,11 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
if (wsource)
break;
}
+ sink_ref++;
+ if (sink_ref > 1)
+ dev_warn(dapm->dev,
+ "ASoC: sink widget %s overwritten\n",
+ w->name);
continue;
}
if (!wsource && !(strcmp(w->name, source))) {
@@ -2783,6 +2914,11 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
if (wsink)
break;
}
+ source_ref++;
+ if (source_ref > 1)
+ dev_warn(dapm->dev,
+ "ASoC: source widget %s overwritten\n",
+ w->name);
}
}
/* use widget from another DAPM context if not found from this */
@@ -3469,6 +3605,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
else
w->name = kstrdup_const(widget->name, GFP_KERNEL);
if (w->name == NULL) {
+ kfree_const(w->sname);
kfree(w);
return ERR_PTR(-ENOMEM);
}
@@ -3689,6 +3826,8 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
ret = soc_dai_hw_params(&substream, params, source);
if (ret < 0)
goto out;
+
+ dapm_update_dai_unlocked(&substream, params, source);
}
substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
@@ -3709,6 +3848,8 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
ret = soc_dai_hw_params(&substream, params, sink);
if (ret < 0)
goto out;
+
+ dapm_update_dai_unlocked(&substream, params, sink);
}
break;
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 30e791a53352..748f5f641002 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -265,12 +265,10 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
const struct snd_dmaengine_pcm_config *config = pcm->config;
struct device *dev = component->dev;
- struct snd_dmaengine_dai_dma_data *dma_data;
struct snd_pcm_substream *substream;
size_t prealloc_buffer_size;
size_t max_buffer_size;
unsigned int i;
- int ret;
if (config && config->prealloc_buffer_size) {
prealloc_buffer_size = config->prealloc_buffer_size;
@@ -285,12 +283,9 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
if (!substream)
continue;
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
- if (!pcm->chan[i] &&
- (pcm->flags & SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME))
+ if (!pcm->chan[i] && config && config->chan_names[i])
pcm->chan[i] = dma_request_slave_channel(dev,
- dma_data->chan_name);
+ config->chan_names[i]);
if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
@@ -303,13 +298,11 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
return -EINVAL;
}
- ret = snd_pcm_lib_preallocate_pages(substream,
+ snd_pcm_lib_preallocate_pages(substream,
SNDRV_DMA_TYPE_DEV_IRAM,
dmaengine_dma_dev(pcm, substream),
prealloc_buffer_size,
max_buffer_size);
- if (ret)
- return ret;
if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
@@ -413,9 +406,8 @@ static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
const char *name;
struct dma_chan *chan;
- if ((pcm->flags & (SND_DMAENGINE_PCM_FLAG_NO_DT |
- SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME)) ||
- !dev->of_node)
+ if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node &&
+ !(config && config->dma_dev && config->dma_dev->of_node)))
return 0;
if (config && config->dma_dev) {
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 03f36e534050..0d5ec68a1e50 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -969,6 +969,8 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
codec_dai->channels = params_channels(&codec_params);
codec_dai->sample_bits = snd_pcm_format_physical_width(
params_format(&codec_params));
+
+ snd_soc_dapm_update_dai(substream, &codec_params, codec_dai);
}
ret = soc_dai_hw_params(substream, params, cpu_dai);
@@ -998,6 +1000,8 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
cpu_dai->sample_bits =
snd_pcm_format_physical_width(params_format(params));
+ snd_soc_dapm_update_dai(substream, params, cpu_dai);
+
ret = soc_pcm_params_symmetry(substream, params);
if (ret)
goto component_err;
@@ -3155,6 +3159,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
}
pcm->private_free = soc_pcm_private_free;
+ pcm->no_device_suspend = true;
out:
dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
(rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 045ef136903d..25fca7055464 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -382,10 +382,10 @@ static void remove_mixer(struct snd_soc_component *comp,
if (dobj->ops && dobj->ops->control_unload)
dobj->ops->control_unload(comp, dobj);
- if (sm->dobj.control.kcontrol->tlv.p)
- p = sm->dobj.control.kcontrol->tlv.p;
- snd_ctl_remove(card, sm->dobj.control.kcontrol);
- list_del(&sm->dobj.list);
+ if (dobj->control.kcontrol->tlv.p)
+ p = dobj->control.kcontrol->tlv.p;
+ snd_ctl_remove(card, dobj->control.kcontrol);
+ list_del(&dobj->list);
kfree(sm);
kfree(p);
}
@@ -404,12 +404,13 @@ static void remove_enum(struct snd_soc_component *comp,
if (dobj->ops && dobj->ops->control_unload)
dobj->ops->control_unload(comp, dobj);
- snd_ctl_remove(card, se->dobj.control.kcontrol);
- list_del(&se->dobj.list);
+ snd_ctl_remove(card, dobj->control.kcontrol);
+ list_del(&dobj->list);
- kfree(se->dobj.control.dvalues);
+ kfree(dobj->control.dvalues);
for (i = 0; i < se->items; i++)
- kfree(se->dobj.control.dtexts[i]);
+ kfree(dobj->control.dtexts[i]);
+ kfree(dobj->control.dtexts);
kfree(se);
}
@@ -427,11 +428,28 @@ static void remove_bytes(struct snd_soc_component *comp,
if (dobj->ops && dobj->ops->control_unload)
dobj->ops->control_unload(comp, dobj);
- snd_ctl_remove(card, sb->dobj.control.kcontrol);
- list_del(&sb->dobj.list);
+ snd_ctl_remove(card, dobj->control.kcontrol);
+ list_del(&dobj->list);
kfree(sb);
}
+/* remove a route */
+static void remove_route(struct snd_soc_component *comp,
+ struct snd_soc_dobj *dobj, int pass)
+{
+ struct snd_soc_dapm_route *route =
+ container_of(dobj, struct snd_soc_dapm_route, dobj);
+
+ if (pass != SOC_TPLG_PASS_GRAPH)
+ return;
+
+ if (dobj->ops && dobj->ops->dapm_route_unload)
+ dobj->ops->dapm_route_unload(comp, dobj);
+
+ list_del(&dobj->list);
+ kfree(route);
+}
+
/* remove a widget and it's kcontrols - routes must be removed first */
static void remove_widget(struct snd_soc_component *comp,
struct snd_soc_dobj *dobj, int pass)
@@ -464,9 +482,10 @@ static void remove_widget(struct snd_soc_component *comp,
snd_ctl_remove(card, kcontrol);
- kfree(se->dobj.control.dvalues);
+ kfree(dobj->control.dvalues);
for (j = 0; j < se->items; j++)
- kfree(se->dobj.control.dtexts[j]);
+ kfree(dobj->control.dtexts[j]);
+ kfree(dobj->control.dtexts);
kfree(se);
kfree(w->kcontrol_news[i].name);
@@ -493,6 +512,8 @@ static void remove_widget(struct snd_soc_component *comp,
free_news:
kfree(w->kcontrol_news);
+ list_del(&dobj->list);
+
/* widget w is freed by soc-dapm.c */
}
@@ -502,6 +523,7 @@ static void remove_dai(struct snd_soc_component *comp,
{
struct snd_soc_dai_driver *dai_drv =
container_of(dobj, struct snd_soc_dai_driver, dobj);
+ struct snd_soc_dai *dai;
if (pass != SOC_TPLG_PASS_PCM_DAI)
return;
@@ -509,6 +531,10 @@ static void remove_dai(struct snd_soc_component *comp,
if (dobj->ops && dobj->ops->dai_unload)
dobj->ops->dai_unload(comp, dobj);
+ list_for_each_entry(dai, &comp->dai_list, list)
+ if (dai->driver == dai_drv)
+ dai->driver = NULL;
+
kfree(dai_drv->name);
list_del(&dobj->list);
kfree(dai_drv);
@@ -536,6 +562,25 @@ static void remove_link(struct snd_soc_component *comp,
kfree(link);
}
+/* unload dai link */
+static void remove_backend_link(struct snd_soc_component *comp,
+ struct snd_soc_dobj *dobj, int pass)
+{
+ if (pass != SOC_TPLG_PASS_LINK)
+ return;
+
+ if (dobj->ops && dobj->ops->link_unload)
+ dobj->ops->link_unload(comp, dobj);
+
+ /*
+ * We don't free the link here as what remove_link() do since BE
+ * links are not allocated by topology.
+ * We however need to reset the dobj type to its initial values
+ */
+ dobj->type = SND_SOC_DOBJ_NONE;
+ list_del(&dobj->list);
+}
+
/* bind a kcontrol to it's IO handlers */
static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
struct snd_kcontrol_new *k,
@@ -1115,9 +1160,10 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
struct snd_soc_tplg_hdr *hdr)
{
struct snd_soc_dapm_context *dapm = &tplg->comp->dapm;
- struct snd_soc_dapm_route route;
struct snd_soc_tplg_dapm_graph_elem *elem;
- int count = hdr->count, i;
+ struct snd_soc_dapm_route **routes;
+ int count = hdr->count, i, j;
+ int ret = 0;
if (tplg->pass != SOC_TPLG_PASS_GRAPH) {
tplg->pos += hdr->size + hdr->payload_size;
@@ -1136,36 +1182,85 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
dev_dbg(tplg->dev, "ASoC: adding %d DAPM routes for index %d\n", count,
hdr->index);
+ /* allocate memory for pointer to array of dapm routes */
+ routes = kcalloc(count, sizeof(struct snd_soc_dapm_route *),
+ GFP_KERNEL);
+ if (!routes)
+ return -ENOMEM;
+
+ /*
+ * allocate memory for each dapm route in the array.
+ * This needs to be done individually so that
+ * each route can be freed when it is removed in remove_route().
+ */
+ for (i = 0; i < count; i++) {
+ routes[i] = kzalloc(sizeof(*routes[i]), GFP_KERNEL);
+ if (!routes[i]) {
+ /* free previously allocated memory */
+ for (j = 0; j < i; j++)
+ kfree(routes[j]);
+
+ kfree(routes);
+ return -ENOMEM;
+ }
+ }
+
for (i = 0; i < count; i++) {
elem = (struct snd_soc_tplg_dapm_graph_elem *)tplg->pos;
tplg->pos += sizeof(struct snd_soc_tplg_dapm_graph_elem);
/* validate routes */
if (strnlen(elem->source, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
- return -EINVAL;
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+ ret = -EINVAL;
+ break;
+ }
if (strnlen(elem->sink, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
- return -EINVAL;
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+ ret = -EINVAL;
+ break;
+ }
if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
- SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
- return -EINVAL;
+ SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+ ret = -EINVAL;
+ break;
+ }
+
+ routes[i]->source = elem->source;
+ routes[i]->sink = elem->sink;
- route.source = elem->source;
- route.sink = elem->sink;
- route.connected = NULL; /* set to NULL atm for tplg users */
+ /* set to NULL atm for tplg users */
+ routes[i]->connected = NULL;
if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0)
- route.control = NULL;
+ routes[i]->control = NULL;
else
- route.control = elem->control;
+ routes[i]->control = elem->control;
- soc_tplg_add_route(tplg, &route);
+ /* add route dobj to dobj_list */
+ routes[i]->dobj.type = SND_SOC_DOBJ_GRAPH;
+ routes[i]->dobj.ops = tplg->ops;
+ routes[i]->dobj.index = tplg->index;
+ list_add(&routes[i]->dobj.list, &tplg->comp->dobj_list);
+
+ soc_tplg_add_route(tplg, routes[i]);
/* add route, but keep going if some fail */
- snd_soc_dapm_add_routes(dapm, &route, 1);
+ snd_soc_dapm_add_routes(dapm, routes[i], 1);
}
- return 0;
+ /* free memory allocated for all dapm routes in case of error */
+ if (ret < 0)
+ for (i = 0; i < count ; i++)
+ kfree(routes[i]);
+
+ /*
+ * free pointer to array of dapm routes as this is no longer needed.
+ * The memory allocated for each dapm route will be freed
+ * when it is removed in remove_route().
+ */
+ kfree(routes);
+
+ return ret;
}
static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
@@ -1359,6 +1454,7 @@ err_se:
kfree(se->dobj.control.dvalues);
for (j = 0; j < ec->items; j++)
kfree(se->dobj.control.dtexts[j]);
+ kfree(se->dobj.control.dtexts);
kfree(se);
kfree(kc[i].name);
@@ -1578,6 +1674,9 @@ widget:
if (ret < 0)
goto ready_err;
+ kfree(template.sname);
+ kfree(template.name);
+
return 0;
ready_err:
@@ -2088,6 +2187,12 @@ static int soc_tplg_link_config(struct soc_tplg *tplg,
return ret;
}
+ /* for unloading it in snd_soc_tplg_component_remove */
+ link->dobj.index = tplg->index;
+ link->dobj.ops = tplg->ops;
+ link->dobj.type = SND_SOC_DOBJ_BACKEND_LINK;
+ list_add(&link->dobj.list, &tplg->comp->dobj_list);
+
return 0;
}
@@ -2482,6 +2587,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
{
struct soc_tplg tplg;
+ int ret;
/* setup parsing context */
memset(&tplg, 0, sizeof(tplg));
@@ -2495,7 +2601,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
tplg.bytes_ext_ops = ops->bytes_ext_ops;
tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
- return soc_tplg_load(&tplg);
+ ret = soc_tplg_load(&tplg);
+ /* free the created components if fail to load topology */
+ if (ret)
+ snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
@@ -2562,6 +2673,9 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
case SND_SOC_DOBJ_BYTES:
remove_bytes(comp, dobj, pass);
break;
+ case SND_SOC_DOBJ_GRAPH:
+ remove_route(comp, dobj, pass);
+ break;
case SND_SOC_DOBJ_WIDGET:
remove_widget(comp, dobj, pass);
break;
@@ -2571,6 +2685,13 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
case SND_SOC_DOBJ_DAI_LINK:
remove_link(comp, dobj, pass);
break;
+ case SND_SOC_DOBJ_BACKEND_LINK:
+ /*
+ * call link_unload ops if extra
+ * deinitialization is needed.
+ */
+ remove_backend_link(comp, dobj, pass);
+ break;
default:
dev_err(comp->dev, "ASoC: invalid component type %d for removal\n",
dobj->type);
diff --git a/sound/soc/sprd/Kconfig b/sound/soc/sprd/Kconfig
new file mode 100644
index 000000000000..43ece7daf0e9
--- /dev/null
+++ b/sound/soc/sprd/Kconfig
@@ -0,0 +1,6 @@
+config SND_SOC_SPRD
+ tristate "SoC Audio for the Spreadtrum SoC chips"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Say Y or M if you want to add support for codecs attached to
+ the Spreadtrum SoCs' Audio interfaces.
diff --git a/sound/soc/sprd/Makefile b/sound/soc/sprd/Makefile
new file mode 100644
index 000000000000..47620e57a9f2
--- /dev/null
+++ b/sound/soc/sprd/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+# Spreadtrum Audio Support
+
+obj-$(CONFIG_SND_SOC_SPRD) += sprd-pcm-dma.o
diff --git a/sound/soc/sprd/sprd-pcm-dma.c b/sound/soc/sprd/sprd-pcm-dma.c
new file mode 100644
index 000000000000..cbb27c4abeba
--- /dev/null
+++ b/sound/soc/sprd/sprd-pcm-dma.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/sprd-dma.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "sprd-pcm-dma.h"
+
+#define DRV_NAME "sprd_pcm_dma"
+#define SPRD_PCM_DMA_LINKLIST_SIZE 64
+#define SPRD_PCM_DMA_BRUST_LEN 640
+
+struct sprd_pcm_dma_data {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ dma_addr_t phys;
+ void *virt;
+ int pre_pointer;
+};
+
+struct sprd_pcm_dma_private {
+ struct snd_pcm_substream *substream;
+ struct sprd_pcm_dma_params *params;
+ struct sprd_pcm_dma_data data[SPRD_PCM_CHANNEL_MAX];
+ int hw_chan;
+ int dma_addr_offset;
+};
+
+static const struct snd_pcm_hardware sprd_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .period_bytes_min = 1,
+ .period_bytes_max = 64 * 1024,
+ .periods_min = 1,
+ .periods_max = PAGE_SIZE / SPRD_PCM_DMA_LINKLIST_SIZE,
+ .buffer_bytes_max = 64 * 1024,
+};
+
+static int sprd_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct device *dev = component->dev;
+ struct sprd_pcm_dma_private *dma_private;
+ int hw_chan = SPRD_PCM_CHANNEL_MAX;
+ int size, ret, i;
+
+ snd_soc_set_runtime_hwparams(substream, &sprd_pcm_hardware);
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ SPRD_PCM_DMA_BRUST_LEN);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ SPRD_PCM_DMA_BRUST_LEN);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ dma_private = devm_kzalloc(dev, sizeof(*dma_private), GFP_KERNEL);
+ if (!dma_private)
+ return -ENOMEM;
+
+ size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
+
+ for (i = 0; i < hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ data->virt = dmam_alloc_coherent(dev, size, &data->phys,
+ GFP_KERNEL);
+ if (!data->virt) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ dma_private->hw_chan = hw_chan;
+ runtime->private_data = dma_private;
+ dma_private->substream = substream;
+
+ return 0;
+
+error:
+ for (i = 0; i < hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->virt)
+ dmam_free_coherent(dev, size, data->virt, data->phys);
+ }
+
+ devm_kfree(dev, dma_private);
+ return ret;
+}
+
+static int sprd_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct device *dev = component->dev;
+ int size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
+ int i;
+
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ dmam_free_coherent(dev, size, data->virt, data->phys);
+ }
+
+ devm_kfree(dev, dma_private);
+
+ return 0;
+}
+
+static void sprd_pcm_dma_complete(void *data)
+{
+ struct sprd_pcm_dma_private *dma_private = data;
+ struct snd_pcm_substream *substream = dma_private->substream;
+
+ snd_pcm_period_elapsed(substream);
+}
+
+static void sprd_pcm_release_dma_channel(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ int i;
+
+ for (i = 0; i < SPRD_PCM_CHANNEL_MAX; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan) {
+ dma_release_channel(data->chan);
+ data->chan = NULL;
+ }
+ }
+}
+
+static int sprd_pcm_request_dma_channel(struct snd_pcm_substream *substream,
+ int channels)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct device *dev = component->dev;
+ struct sprd_pcm_dma_params *dma_params = dma_private->params;
+ int i;
+
+ if (channels > SPRD_PCM_CHANNEL_MAX) {
+ dev_err(dev, "invalid dma channel number:%d\n", channels);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < channels; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ data->chan = dma_request_slave_channel(dev,
+ dma_params->chan_name[i]);
+ if (!data->chan) {
+ dev_err(dev, "failed to request dma channel:%s\n",
+ dma_params->chan_name[i]);
+ sprd_pcm_release_dma_channel(substream);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int sprd_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct sprd_pcm_dma_params *dma_params;
+ size_t totsize = params_buffer_bytes(params);
+ size_t period = params_period_bytes(params);
+ int channels = params_channels(params);
+ int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct scatterlist *sg;
+ unsigned long flags;
+ int ret, i, j, sg_num;
+
+ dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ if (!dma_params) {
+ dev_warn(component->dev, "no dma parameters setting\n");
+ dma_private->params = NULL;
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+ runtime->dma_bytes = totsize;
+ return 0;
+ }
+
+ if (!dma_private->params) {
+ dma_private->params = dma_params;
+ ret = sprd_pcm_request_dma_channel(substream, channels);
+ if (ret)
+ return ret;
+ }
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ runtime->dma_bytes = totsize;
+ sg_num = totsize / period;
+ dma_private->dma_addr_offset = totsize / channels;
+
+ sg = devm_kcalloc(component->dev, sg_num, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto sg_err;
+ }
+
+ for (i = 0; i < channels; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+ struct dma_chan *chan = data->chan;
+ struct dma_slave_config config = { };
+ struct sprd_dma_linklist link = { };
+ enum dma_transfer_direction dir;
+ struct scatterlist *sgt = sg;
+
+ config.src_maxburst = dma_params->fragment_len[i];
+ config.src_addr_width = dma_params->datawidth[i];
+ config.dst_addr_width = dma_params->datawidth[i];
+ if (is_playback) {
+ config.src_addr = runtime->dma_addr +
+ i * dma_private->dma_addr_offset;
+ config.dst_addr = dma_params->dev_phys[i];
+ dir = DMA_MEM_TO_DEV;
+ } else {
+ config.src_addr = dma_params->dev_phys[i];
+ config.dst_addr = runtime->dma_addr +
+ i * dma_private->dma_addr_offset;
+ dir = DMA_DEV_TO_MEM;
+ }
+
+ sg_init_table(sgt, sg_num);
+ for (j = 0; j < sg_num; j++, sgt++) {
+ u32 sg_len = period / channels;
+
+ sg_dma_len(sgt) = sg_len;
+ sg_dma_address(sgt) = runtime->dma_addr +
+ i * dma_private->dma_addr_offset + sg_len * j;
+ }
+
+ /*
+ * Configure the link-list address for the DMA engine link-list
+ * mode.
+ */
+ link.virt_addr = (unsigned long)data->virt;
+ link.phy_addr = data->phys;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(component->dev,
+ "failed to set slave configuration: %d\n", ret);
+ goto config_err;
+ }
+
+ /*
+ * We configure the DMA request mode, interrupt mode, channel
+ * mode and channel trigger mode by the flags.
+ */
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
+ data->desc = chan->device->device_prep_slave_sg(chan, sg,
+ sg_num, dir,
+ flags, &link);
+ if (!data->desc) {
+ dev_err(component->dev, "failed to prepare slave sg\n");
+ ret = -ENOMEM;
+ goto config_err;
+ }
+
+ if (!runtime->no_period_wakeup) {
+ data->desc->callback = sprd_pcm_dma_complete;
+ data->desc->callback_param = dma_private;
+ }
+ }
+
+ devm_kfree(component->dev, sg);
+
+ return 0;
+
+config_err:
+ devm_kfree(component->dev, sg);
+sg_err:
+ sprd_pcm_release_dma_channel(substream);
+ return ret;
+}
+
+static int sprd_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_set_runtime_buffer(substream, NULL);
+ sprd_pcm_release_dma_channel(substream);
+
+ return 0;
+}
+
+static int sprd_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct sprd_pcm_dma_private *dma_private =
+ substream->runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ int ret = 0, i;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (!data->desc)
+ continue;
+
+ data->cookie = dmaengine_submit(data->desc);
+ ret = dma_submit_error(data->cookie);
+ if (ret) {
+ dev_err(component->dev,
+ "failed to submit dma request: %d\n",
+ ret);
+ return ret;
+ }
+
+ dma_async_issue_pending(data->chan);
+ }
+
+ break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan)
+ dmaengine_resume(data->chan);
+ }
+
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan)
+ dmaengine_terminate_async(data->chan);
+ }
+
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (data->chan)
+ dmaengine_pause(data->chan);
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sprd_pcm_dma_private *dma_private = runtime->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ int pointer[SPRD_PCM_CHANNEL_MAX];
+ int bytes_of_pointer = 0, sel_max = 0, i;
+ snd_pcm_uframes_t x;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ for (i = 0; i < dma_private->hw_chan; i++) {
+ struct sprd_pcm_dma_data *data = &dma_private->data[i];
+
+ if (!data->chan)
+ continue;
+
+ status = dmaengine_tx_status(data->chan, data->cookie, &state);
+ if (status == DMA_ERROR) {
+ dev_err(component->dev,
+ "failed to get dma channel %d status\n", i);
+ return 0;
+ }
+
+ /*
+ * We just get current transfer address from the DMA engine, so
+ * we need convert to current pointer.
+ */
+ pointer[i] = state.residue - runtime->dma_addr -
+ i * dma_private->dma_addr_offset;
+
+ if (i == 0) {
+ bytes_of_pointer = pointer[i];
+ sel_max = pointer[i] < data->pre_pointer ? 1 : 0;
+ } else {
+ sel_max ^= pointer[i] < data->pre_pointer ? 1 : 0;
+
+ if (sel_max)
+ bytes_of_pointer =
+ max(pointer[i], pointer[i - 1]) << 1;
+ else
+ bytes_of_pointer =
+ min(pointer[i], pointer[i - 1]) << 1;
+ }
+
+ data->pre_pointer = pointer[i];
+ }
+
+ x = bytes_to_frames(runtime, bytes_of_pointer);
+ if (x == runtime->buffer_size)
+ x = 0;
+
+ return x;
+}
+
+static int sprd_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ return remap_pfn_range(vma, vma->vm_start,
+ runtime->dma_addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static struct snd_pcm_ops sprd_pcm_ops = {
+ .open = sprd_pcm_open,
+ .close = sprd_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = sprd_pcm_hw_params,
+ .hw_free = sprd_pcm_hw_free,
+ .trigger = sprd_pcm_trigger,
+ .pointer = sprd_pcm_pointer,
+ .mmap = sprd_pcm_mmap,
+};
+
+static int sprd_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ struct snd_pcm_substream *substream;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (substream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
+ sprd_pcm_hardware.buffer_bytes_max,
+ &substream->dma_buffer);
+ if (ret) {
+ dev_err(card->dev,
+ "can't alloc playback dma buffer: %d\n", ret);
+ return ret;
+ }
+ }
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (substream) {
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
+ sprd_pcm_hardware.buffer_bytes_max,
+ &substream->dma_buffer);
+ if (ret) {
+ dev_err(card->dev,
+ "can't alloc capture dma buffer: %d\n", ret);
+ snd_dma_free_pages(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void sprd_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
+ substream = pcm->streams[i].substream;
+ if (substream) {
+ snd_dma_free_pages(&substream->dma_buffer);
+ substream->dma_buffer.area = NULL;
+ substream->dma_buffer.addr = 0;
+ }
+ }
+}
+
+static const struct snd_soc_component_driver sprd_soc_component = {
+ .name = DRV_NAME,
+ .ops = &sprd_pcm_ops,
+ .pcm_new = sprd_pcm_new,
+ .pcm_free = sprd_pcm_free,
+};
+
+static int sprd_soc_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &sprd_soc_component,
+ NULL, 0);
+ if (ret)
+ dev_err(&pdev->dev, "could not register platform:%d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id sprd_pcm_of_match[] = {
+ { .compatible = "sprd,pcm-platform", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_pcm_of_match);
+
+static struct platform_driver sprd_pcm_driver = {
+ .driver = {
+ .name = "sprd-pcm-audio",
+ .of_match_table = sprd_pcm_of_match,
+ },
+
+ .probe = sprd_soc_platform_probe,
+};
+
+module_platform_driver(sprd_pcm_driver);
+
+MODULE_DESCRIPTION("Spreadtrum ASoC PCM DMA");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sprd-audio");
diff --git a/sound/soc/sprd/sprd-pcm-dma.h b/sound/soc/sprd/sprd-pcm-dma.h
new file mode 100644
index 000000000000..d85a34f1461d
--- /dev/null
+++ b/sound/soc/sprd/sprd-pcm-dma.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __SPRD_PCM_DMA_H
+#define __SPRD_PCM_DMA_H
+
+#define SPRD_PCM_CHANNEL_MAX 2
+
+struct sprd_pcm_dma_params {
+ dma_addr_t dev_phys[SPRD_PCM_CHANNEL_MAX];
+ u32 datawidth[SPRD_PCM_CHANNEL_MAX];
+ u32 fragment_len[SPRD_PCM_CHANNEL_MAX];
+ const char *chan_name[SPRD_PCM_CHANNEL_MAX];
+};
+
+#endif /* __SPRD_PCM_DMA_H */
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 706ff005234f..47901983a6ff 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -262,8 +262,9 @@ static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
snd_soc_dai_get_drvdata(rtd->cpu_dai);
unsigned int size = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE;
- return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- priv->dev, size, size);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ priv->dev, size, size);
+ return 0;
}
static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index 6d0bf78d114d..47c334de6b09 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -186,8 +186,9 @@ enum i2s_datlen {
#define STM32_I2S_IS_SLAVE(x) ((x)->ms_flg == I2S_MS_SLAVE)
/**
+ * struct stm32_i2s_data - private data of I2S
* @regmap_conf: I2S register map configuration pointer
- * @egmap: I2S register map pointer
+ * @regmap: I2S register map pointer
* @pdev: device data pointer
* @dai_drv: DAI driver pointer
* @dma_data_tx: dma configuration data for tx channel
@@ -200,6 +201,7 @@ enum i2s_datlen {
* @base: mmio register base virtual address
* @phys_addr: I2S registers physical base address
* @lock_fd: lock to manage race conditions in full duplex mode
+ * @irq_lock: prevent race condition with IRQ
* @dais_name: DAI name
* @mclk_rate: master clock frequency (Hz)
* @fmt: DAI protocol
@@ -221,6 +223,7 @@ struct stm32_i2s_data {
void __iomem *base;
dma_addr_t phys_addr;
spinlock_t lock_fd; /* Manage race conditions for full duplex */
+ spinlock_t irq_lock; /* used to prevent race condition with IRQ */
char dais_name[STM32_I2S_DAI_NAME_SIZE];
unsigned int mclk_rate;
unsigned int fmt;
@@ -246,8 +249,8 @@ static irqreturn_t stm32_i2s_isr(int irq, void *devid)
return IRQ_NONE;
}
- regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
- I2S_IFCR_MASK, flags);
+ regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG,
+ I2S_IFCR_MASK, flags);
if (flags & I2S_SR_OVR) {
dev_dbg(&pdev->dev, "Overrun\n");
@@ -262,8 +265,10 @@ static irqreturn_t stm32_i2s_isr(int irq, void *devid)
if (flags & I2S_SR_TIFRE)
dev_dbg(&pdev->dev, "Frame error\n");
- if (err)
+ spin_lock(&i2s->irq_lock);
+ if (err && i2s->substream)
snd_pcm_stop_xrun(i2s->substream);
+ spin_unlock(&i2s->irq_lock);
return IRQ_HANDLED;
}
@@ -276,7 +281,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
case STM32_I2S_CFG2_REG:
case STM32_I2S_IER_REG:
case STM32_I2S_SR_REG:
- case STM32_I2S_IFCR_REG:
case STM32_I2S_TXDR_REG:
case STM32_I2S_RXDR_REG:
case STM32_I2S_CGFR_REG:
@@ -488,7 +492,7 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai,
{
struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
int format = params_width(params);
- u32 cfgr, cfgr_mask, cfg1, cfg1_mask;
+ u32 cfgr, cfgr_mask, cfg1;
unsigned int fthlv;
int ret;
@@ -501,7 +505,7 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai,
switch (format) {
case 16:
cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_16);
- cfgr_mask = I2S_CGFR_DATLEN_MASK;
+ cfgr_mask = I2S_CGFR_DATLEN_MASK | I2S_CGFR_CHLEN;
break;
case 32:
cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_32) |
@@ -529,30 +533,32 @@ static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai,
if (ret < 0)
return ret;
- cfg1 = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN;
- cfg1_mask = cfg1;
-
fthlv = STM32_I2S_FIFO_SIZE * I2S_FIFO_TH_ONE_QUARTER / 4;
- cfg1 |= I2S_CFG1_FTHVL_SET(fthlv - 1);
- cfg1_mask |= I2S_CFG1_FTHVL_MASK;
+ cfg1 = I2S_CFG1_FTHVL_SET(fthlv - 1);
return regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG,
- cfg1_mask, cfg1);
+ I2S_CFG1_FTHVL_MASK, cfg1);
}
static int stm32_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long flags;
+ int ret;
+ spin_lock_irqsave(&i2s->irq_lock, flags);
i2s->substream = substream;
+ spin_unlock_irqrestore(&i2s->irq_lock, flags);
- spin_lock(&i2s->lock_fd);
- i2s->refcount++;
- spin_unlock(&i2s->lock_fd);
+ ret = clk_prepare_enable(i2s->i2sclk);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
- return regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
- I2S_IFCR_MASK, I2S_IFCR_MASK);
+ return regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG,
+ I2S_IFCR_MASK, I2S_IFCR_MASK);
}
static int stm32_i2s_hw_params(struct snd_pcm_substream *substream,
@@ -589,6 +595,10 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
/* Enable i2s */
dev_dbg(cpu_dai->dev, "start I2S\n");
+ cfg1_mask = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN;
+ regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG,
+ cfg1_mask, cfg1_mask);
+
ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG,
I2S_CR1_SPE, I2S_CR1_SPE);
if (ret < 0) {
@@ -596,28 +606,29 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
return ret;
}
- ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG,
- I2S_CR1_CSTART, I2S_CR1_CSTART);
+ ret = regmap_write_bits(i2s->regmap, STM32_I2S_CR1_REG,
+ I2S_CR1_CSTART, I2S_CR1_CSTART);
if (ret < 0) {
dev_err(cpu_dai->dev, "Error %d starting I2S\n", ret);
return ret;
}
- regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
- I2S_IFCR_MASK, I2S_IFCR_MASK);
+ regmap_write_bits(i2s->regmap, STM32_I2S_IFCR_REG,
+ I2S_IFCR_MASK, I2S_IFCR_MASK);
+ spin_lock(&i2s->lock_fd);
+ i2s->refcount++;
if (playback_flg) {
ier = I2S_IER_UDRIE;
} else {
ier = I2S_IER_OVRIE;
- spin_lock(&i2s->lock_fd);
- if (i2s->refcount == 1)
- /* dummy write to trigger capture */
+ if (STM32_I2S_IS_MASTER(i2s) && i2s->refcount == 1)
+ /* dummy write to gate bus clocks */
regmap_write(i2s->regmap,
STM32_I2S_TXDR_REG, 0);
- spin_unlock(&i2s->lock_fd);
}
+ spin_unlock(&i2s->lock_fd);
if (STM32_I2S_IS_SLAVE(i2s))
ier |= I2S_IER_TIFREIE;
@@ -642,7 +653,6 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
spin_unlock(&i2s->lock_fd);
break;
}
- spin_unlock(&i2s->lock_fd);
dev_dbg(cpu_dai->dev, "stop I2S\n");
@@ -650,8 +660,10 @@ static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
I2S_CR1_SPE, 0);
if (ret < 0) {
dev_err(cpu_dai->dev, "Error %d disabling I2S\n", ret);
+ spin_unlock(&i2s->lock_fd);
return ret;
}
+ spin_unlock(&i2s->lock_fd);
cfg1_mask = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN;
regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG,
@@ -668,11 +680,16 @@ static void stm32_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
-
- i2s->substream = NULL;
+ unsigned long flags;
regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
I2S_CGFR_MCKOE, (unsigned int)~I2S_CGFR_MCKOE);
+
+ clk_disable_unprepare(i2s->i2sclk);
+
+ spin_lock_irqsave(&i2s->irq_lock, flags);
+ i2s->substream = NULL;
+ spin_unlock_irqrestore(&i2s->irq_lock, flags);
}
static int stm32_i2s_dai_probe(struct snd_soc_dai *cpu_dai)
@@ -703,6 +720,7 @@ static const struct regmap_config stm32_h7_i2s_regmap_conf = {
.volatile_reg = stm32_i2s_volatile_reg,
.writeable_reg = stm32_i2s_writeable_reg,
.fast_io = true,
+ .cache_type = REGCACHE_FLAT,
};
static const struct snd_soc_dai_ops stm32_i2s_pcm_dai_ops = {
@@ -866,76 +884,68 @@ static int stm32_i2s_probe(struct platform_device *pdev)
i2s->pdev = pdev;
i2s->ms_flg = I2S_MS_NOT_SET;
spin_lock_init(&i2s->lock_fd);
+ spin_lock_init(&i2s->irq_lock);
platform_set_drvdata(pdev, i2s);
ret = stm32_i2s_dais_init(pdev, i2s);
if (ret)
return ret;
- i2s->regmap = devm_regmap_init_mmio(&pdev->dev, i2s->base,
- i2s->regmap_conf);
+ i2s->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "pclk",
+ i2s->base, i2s->regmap_conf);
if (IS_ERR(i2s->regmap)) {
dev_err(&pdev->dev, "regmap init failed\n");
return PTR_ERR(i2s->regmap);
}
- ret = clk_prepare_enable(i2s->pclk);
- if (ret) {
- dev_err(&pdev->dev, "Enable pclk failed: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(i2s->i2sclk);
- if (ret) {
- dev_err(&pdev->dev, "Enable i2sclk failed: %d\n", ret);
- goto err_pclk_disable;
- }
-
ret = devm_snd_soc_register_component(&pdev->dev, &stm32_i2s_component,
i2s->dai_drv, 1);
if (ret)
- goto err_clocks_disable;
+ return ret;
ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
&stm32_i2s_pcm_config, 0);
if (ret)
- goto err_clocks_disable;
+ return ret;
/* Set SPI/I2S in i2s mode */
- ret = regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
- I2S_CGFR_I2SMOD, I2S_CGFR_I2SMOD);
- if (ret)
- goto err_clocks_disable;
+ return regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
+ I2S_CGFR_I2SMOD, I2S_CGFR_I2SMOD);
+}
- return ret;
+MODULE_DEVICE_TABLE(of, stm32_i2s_ids);
-err_clocks_disable:
- clk_disable_unprepare(i2s->i2sclk);
-err_pclk_disable:
- clk_disable_unprepare(i2s->pclk);
+#ifdef CONFIG_PM_SLEEP
+static int stm32_i2s_suspend(struct device *dev)
+{
+ struct stm32_i2s_data *i2s = dev_get_drvdata(dev);
- return ret;
+ regcache_cache_only(i2s->regmap, true);
+ regcache_mark_dirty(i2s->regmap);
+
+ return 0;
}
-static int stm32_i2s_remove(struct platform_device *pdev)
+static int stm32_i2s_resume(struct device *dev)
{
- struct stm32_i2s_data *i2s = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(i2s->i2sclk);
- clk_disable_unprepare(i2s->pclk);
+ struct stm32_i2s_data *i2s = dev_get_drvdata(dev);
- return 0;
+ regcache_cache_only(i2s->regmap, false);
+ return regcache_sync(i2s->regmap);
}
+#endif /* CONFIG_PM_SLEEP */
-MODULE_DEVICE_TABLE(of, stm32_i2s_ids);
+static const struct dev_pm_ops stm32_i2s_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_i2s_suspend, stm32_i2s_resume)
+};
static struct platform_driver stm32_i2s_driver = {
.driver = {
.name = "st,stm32-i2s",
.of_match_table = stm32_i2s_ids,
+ .pm = &stm32_i2s_pm_ops,
},
.probe = stm32_i2s_probe,
- .remove = stm32_i2s_remove,
};
module_platform_driver(stm32_i2s_driver);
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index bcb35cae2a2c..14c9591aae42 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -112,16 +112,21 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
if (!sai_provider) {
dev_err(&sai_client->pdev->dev,
"SAI sync provider data not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_dev;
}
/* Configure sync client */
ret = stm32_sai_sync_conf_client(sai_client, synci);
if (ret < 0)
- return ret;
+ goto out_put_dev;
/* Configure sync provider */
- return stm32_sai_sync_conf_provider(sai_provider, synco);
+ ret = stm32_sai_sync_conf_provider(sai_provider, synco);
+
+out_put_dev:
+ put_device(&pdev->dev);
+ return ret;
}
static int stm32_sai_probe(struct platform_device *pdev)
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index d4825700b63f..f9297228c41c 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -898,7 +898,7 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
struct snd_pcm_hw_params *params)
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
- int cr1, mask, div = 0;
+ int div = 0;
int sai_clk_rate, mclk_ratio, den;
unsigned int rate = params_rate(params);
@@ -943,10 +943,8 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
} else {
if (sai->mclk_rate) {
mclk_ratio = sai->mclk_rate / rate;
- if (mclk_ratio == 512) {
- mask = SAI_XCR1_OSR;
- cr1 = SAI_XCR1_OSR;
- } else if (mclk_ratio != 256) {
+ if ((mclk_ratio != 512) &&
+ (mclk_ratio != 256)) {
dev_err(cpu_dai->dev,
"Wrong mclk ratio %d\n",
mclk_ratio);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 9a3cb7704810..15d08e343b47 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -64,9 +64,20 @@
#define SUN4I_CODEC_DAC_ACTL_DACAENR (31)
#define SUN4I_CODEC_DAC_ACTL_DACAENL (30)
#define SUN4I_CODEC_DAC_ACTL_MIXEN (29)
+#define SUN4I_CODEC_DAC_ACTL_LNG (26)
+#define SUN4I_CODEC_DAC_ACTL_FMG (23)
+#define SUN4I_CODEC_DAC_ACTL_MICG (20)
+#define SUN4I_CODEC_DAC_ACTL_LLNS (19)
+#define SUN4I_CODEC_DAC_ACTL_RLNS (18)
+#define SUN4I_CODEC_DAC_ACTL_LFMS (17)
+#define SUN4I_CODEC_DAC_ACTL_RFMS (16)
#define SUN4I_CODEC_DAC_ACTL_LDACLMIXS (15)
#define SUN4I_CODEC_DAC_ACTL_RDACRMIXS (14)
#define SUN4I_CODEC_DAC_ACTL_LDACRMIXS (13)
+#define SUN4I_CODEC_DAC_ACTL_MIC1LS (12)
+#define SUN4I_CODEC_DAC_ACTL_MIC1RS (11)
+#define SUN4I_CODEC_DAC_ACTL_MIC2LS (10)
+#define SUN4I_CODEC_DAC_ACTL_MIC2RS (9)
#define SUN4I_CODEC_DAC_ACTL_DACPAS (8)
#define SUN4I_CODEC_DAC_ACTL_MIXPAS (7)
#define SUN4I_CODEC_DAC_ACTL_PA_MUTE (6)
@@ -94,8 +105,11 @@
#define SUN4I_CODEC_ADC_ACTL_PREG1EN (29)
#define SUN4I_CODEC_ADC_ACTL_PREG2EN (28)
#define SUN4I_CODEC_ADC_ACTL_VMICEN (27)
+#define SUN4I_CODEC_ADC_ACTL_PREG1 (25)
+#define SUN4I_CODEC_ADC_ACTL_PREG2 (23)
#define SUN4I_CODEC_ADC_ACTL_VADCG (20)
#define SUN4I_CODEC_ADC_ACTL_ADCIS (17)
+#define SUN4I_CODEC_ADC_ACTL_LNPREG (13)
#define SUN4I_CODEC_ADC_ACTL_PA_EN (4)
#define SUN4I_CODEC_ADC_ACTL_DDE (3)
#define SUN4I_CODEC_ADC_DEBUG (0x2c)
@@ -110,6 +124,9 @@
/* Microphone controls (sun7i only) */
#define SUN7I_CODEC_AC_MIC_PHONE_CAL (0x3c)
+#define SUN7I_CODEC_AC_MIC_PHONE_CAL_PREG1 (29)
+#define SUN7I_CODEC_AC_MIC_PHONE_CAL_PREG2 (26)
+
/*
* sun6i specific registers
*
@@ -673,23 +690,91 @@ static const struct snd_kcontrol_new sun4i_codec_pa_mute =
SUN4I_CODEC_DAC_ACTL_PA_MUTE, 1, 0);
static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1);
+static DECLARE_TLV_DB_SCALE(sun4i_codec_linein_loopback_gain_scale, -150, 150,
+ 0);
+static DECLARE_TLV_DB_SCALE(sun4i_codec_linein_preamp_gain_scale, -1200, 300,
+ 0);
+static DECLARE_TLV_DB_SCALE(sun4i_codec_fmin_loopback_gain_scale, -450, 150,
+ 0);
+static DECLARE_TLV_DB_SCALE(sun4i_codec_micin_loopback_gain_scale, -450, 150,
+ 0);
+static DECLARE_TLV_DB_RANGE(sun4i_codec_micin_preamp_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(3500, 300, 0));
+static DECLARE_TLV_DB_RANGE(sun7i_codec_micin_preamp_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0));
static const struct snd_kcontrol_new sun4i_codec_controls[] = {
SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL,
SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0,
sun4i_codec_pa_volume_scale),
+ SOC_SINGLE_TLV("Line Playback Volume", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_LNG, 1, 0,
+ sun4i_codec_linein_loopback_gain_scale),
+ SOC_SINGLE_TLV("Line Boost Volume", SUN4I_CODEC_ADC_ACTL,
+ SUN4I_CODEC_ADC_ACTL_LNPREG, 7, 0,
+ sun4i_codec_linein_preamp_gain_scale),
+ SOC_SINGLE_TLV("FM Playback Volume", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_FMG, 3, 0,
+ sun4i_codec_fmin_loopback_gain_scale),
+ SOC_SINGLE_TLV("Mic Playback Volume", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_MICG, 7, 0,
+ sun4i_codec_micin_loopback_gain_scale),
+ SOC_SINGLE_TLV("Mic1 Boost Volume", SUN4I_CODEC_ADC_ACTL,
+ SUN4I_CODEC_ADC_ACTL_PREG1, 3, 0,
+ sun4i_codec_micin_preamp_gain_scale),
+ SOC_SINGLE_TLV("Mic2 Boost Volume", SUN4I_CODEC_ADC_ACTL,
+ SUN4I_CODEC_ADC_ACTL_PREG2, 3, 0,
+ sun4i_codec_micin_preamp_gain_scale),
};
-static const struct snd_kcontrol_new sun4i_codec_left_mixer_controls[] = {
- SOC_DAPM_SINGLE("Left DAC Playback Switch", SUN4I_CODEC_DAC_ACTL,
- SUN4I_CODEC_DAC_ACTL_LDACLMIXS, 1, 0),
+static const struct snd_kcontrol_new sun7i_codec_controls[] = {
+ SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0,
+ sun4i_codec_pa_volume_scale),
+ SOC_SINGLE_TLV("Line Playback Volume", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_LNG, 1, 0,
+ sun4i_codec_linein_loopback_gain_scale),
+ SOC_SINGLE_TLV("Line Boost Volume", SUN4I_CODEC_ADC_ACTL,
+ SUN4I_CODEC_ADC_ACTL_LNPREG, 7, 0,
+ sun4i_codec_linein_preamp_gain_scale),
+ SOC_SINGLE_TLV("FM Playback Volume", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_FMG, 3, 0,
+ sun4i_codec_fmin_loopback_gain_scale),
+ SOC_SINGLE_TLV("Mic Playback Volume", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_MICG, 7, 0,
+ sun4i_codec_micin_loopback_gain_scale),
+ SOC_SINGLE_TLV("Mic1 Boost Volume", SUN7I_CODEC_AC_MIC_PHONE_CAL,
+ SUN7I_CODEC_AC_MIC_PHONE_CAL_PREG1, 7, 0,
+ sun7i_codec_micin_preamp_gain_scale),
+ SOC_SINGLE_TLV("Mic2 Boost Volume", SUN7I_CODEC_AC_MIC_PHONE_CAL,
+ SUN7I_CODEC_AC_MIC_PHONE_CAL_PREG2, 7, 0,
+ sun7i_codec_micin_preamp_gain_scale),
};
-static const struct snd_kcontrol_new sun4i_codec_right_mixer_controls[] = {
- SOC_DAPM_SINGLE("Right DAC Playback Switch", SUN4I_CODEC_DAC_ACTL,
- SUN4I_CODEC_DAC_ACTL_RDACRMIXS, 1, 0),
- SOC_DAPM_SINGLE("Left DAC Playback Switch", SUN4I_CODEC_DAC_ACTL,
+static const struct snd_kcontrol_new sun4i_codec_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Left Mixer Left DAC Playback Switch",
+ SUN4I_CODEC_DAC_ACTL, SUN4I_CODEC_DAC_ACTL_LDACLMIXS,
+ 1, 0),
+ SOC_DAPM_SINGLE("Right Mixer Right DAC Playback Switch",
+ SUN4I_CODEC_DAC_ACTL, SUN4I_CODEC_DAC_ACTL_RDACRMIXS,
+ 1, 0),
+ SOC_DAPM_SINGLE("Right Mixer Left DAC Playback Switch",
+ SUN4I_CODEC_DAC_ACTL,
SUN4I_CODEC_DAC_ACTL_LDACRMIXS, 1, 0),
+ SOC_DAPM_DOUBLE("Line Playback Switch", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_LLNS,
+ SUN4I_CODEC_DAC_ACTL_RLNS, 1, 0),
+ SOC_DAPM_DOUBLE("FM Playback Switch", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_LFMS,
+ SUN4I_CODEC_DAC_ACTL_RFMS, 1, 0),
+ SOC_DAPM_DOUBLE("Mic1 Playback Switch", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_MIC1LS,
+ SUN4I_CODEC_DAC_ACTL_MIC1RS, 1, 0),
+ SOC_DAPM_DOUBLE("Mic2 Playback Switch", SUN4I_CODEC_DAC_ACTL,
+ SUN4I_CODEC_DAC_ACTL_MIC2LS,
+ SUN4I_CODEC_DAC_ACTL_MIC2RS, 1, 0),
};
static const struct snd_kcontrol_new sun4i_codec_pa_mixer_controls[] = {
@@ -724,11 +809,11 @@ static const struct snd_soc_dapm_widget sun4i_codec_codec_dapm_widgets[] = {
/* Mixers */
SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
- sun4i_codec_left_mixer_controls,
- ARRAY_SIZE(sun4i_codec_left_mixer_controls)),
+ sun4i_codec_mixer_controls,
+ ARRAY_SIZE(sun4i_codec_mixer_controls)),
SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
- sun4i_codec_right_mixer_controls,
- ARRAY_SIZE(sun4i_codec_right_mixer_controls)),
+ sun4i_codec_mixer_controls,
+ ARRAY_SIZE(sun4i_codec_mixer_controls)),
/* Global Mixer Enable */
SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL,
@@ -741,6 +826,8 @@ static const struct snd_soc_dapm_widget sun4i_codec_codec_dapm_widgets[] = {
/* Mic Pre-Amplifiers */
SND_SOC_DAPM_PGA("MIC1 Pre-Amplifier", SUN4I_CODEC_ADC_ACTL,
SUN4I_CODEC_ADC_ACTL_PREG1EN, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MIC2 Pre-Amplifier", SUN4I_CODEC_ADC_ACTL,
+ SUN4I_CODEC_ADC_ACTL_PREG2EN, 0, NULL, 0),
/* Power Amplifier */
SND_SOC_DAPM_MIXER("Power Amplifier", SUN4I_CODEC_ADC_ACTL,
@@ -750,7 +837,12 @@ static const struct snd_soc_dapm_widget sun4i_codec_codec_dapm_widgets[] = {
SND_SOC_DAPM_SWITCH("Power Amplifier Mute", SND_SOC_NOPM, 0, 0,
&sun4i_codec_pa_mute),
+ SND_SOC_DAPM_INPUT("Line Right"),
+ SND_SOC_DAPM_INPUT("Line Left"),
+ SND_SOC_DAPM_INPUT("FM Right"),
+ SND_SOC_DAPM_INPUT("FM Left"),
SND_SOC_DAPM_INPUT("Mic1"),
+ SND_SOC_DAPM_INPUT("Mic2"),
SND_SOC_DAPM_OUTPUT("HP Right"),
SND_SOC_DAPM_OUTPUT("HP Left"),
@@ -767,12 +859,20 @@ static const struct snd_soc_dapm_route sun4i_codec_codec_dapm_routes[] = {
/* Right Mixer Routes */
{ "Right Mixer", NULL, "Mixer Enable" },
- { "Right Mixer", "Left DAC Playback Switch", "Left DAC" },
- { "Right Mixer", "Right DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "Right Mixer Left DAC Playback Switch", "Left DAC" },
+ { "Right Mixer", "Right Mixer Right DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "Line Playback Switch", "Line Right" },
+ { "Right Mixer", "FM Playback Switch", "FM Right" },
+ { "Right Mixer", "Mic1 Playback Switch", "MIC1 Pre-Amplifier" },
+ { "Right Mixer", "Mic2 Playback Switch", "MIC2 Pre-Amplifier" },
/* Left Mixer Routes */
{ "Left Mixer", NULL, "Mixer Enable" },
- { "Left Mixer", "Left DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "Left Mixer Left DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "Line Playback Switch", "Line Left" },
+ { "Left Mixer", "FM Playback Switch", "FM Left" },
+ { "Left Mixer", "Mic1 Playback Switch", "MIC1 Pre-Amplifier" },
+ { "Left Mixer", "Mic2 Playback Switch", "MIC2 Pre-Amplifier" },
/* Power Amplifier Routes */
{ "Power Amplifier", "Mixer Playback Switch", "Left Mixer" },
@@ -790,6 +890,12 @@ static const struct snd_soc_dapm_route sun4i_codec_codec_dapm_routes[] = {
{ "Right ADC", NULL, "MIC1 Pre-Amplifier" },
{ "MIC1 Pre-Amplifier", NULL, "Mic1"},
{ "Mic1", NULL, "VMIC" },
+
+ /* Mic2 Routes */
+ { "Left ADC", NULL, "MIC2 Pre-Amplifier" },
+ { "Right ADC", NULL, "MIC2 Pre-Amplifier" },
+ { "MIC2 Pre-Amplifier", NULL, "Mic2"},
+ { "Mic2", NULL, "VMIC" },
};
static const struct snd_soc_component_driver sun4i_codec_codec = {
@@ -805,6 +911,19 @@ static const struct snd_soc_component_driver sun4i_codec_codec = {
.non_legacy_dai_naming = 1,
};
+static const struct snd_soc_component_driver sun7i_codec_codec = {
+ .controls = sun7i_codec_controls,
+ .num_controls = ARRAY_SIZE(sun7i_codec_controls),
+ .dapm_widgets = sun4i_codec_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sun4i_codec_codec_dapm_widgets),
+ .dapm_routes = sun4i_codec_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(sun4i_codec_codec_dapm_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
/*** sun6i Codec ***/
/* mixer controls */
@@ -1485,7 +1604,7 @@ static const struct sun4i_codec_quirks sun6i_a31_codec_quirks = {
static const struct sun4i_codec_quirks sun7i_codec_quirks = {
.regmap_config = &sun7i_codec_regmap_config,
- .codec = &sun4i_codec_codec,
+ .codec = &sun7i_codec_codec,
.create_card = sun4i_codec_create_card,
.reg_adc_fifoc = REG_FIELD(SUN4I_CODEC_ADC_FIFOC, 0, 31),
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
diff --git a/sound/soc/sunxi/sun50i-codec-analog.c b/sound/soc/sunxi/sun50i-codec-analog.c
index df1fed0aa001..d105c90c3706 100644
--- a/sound/soc/sunxi/sun50i-codec-analog.c
+++ b/sound/soc/sunxi/sun50i-codec-analog.c
@@ -274,7 +274,7 @@ static const struct snd_soc_dapm_widget sun50i_a64_codec_widgets[] = {
* stream widgets at the card level.
*/
- SND_SOC_DAPM_REGULATOR_SUPPLY("hpvcc", 0, 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("cpvdd", 0, 0),
SND_SOC_DAPM_MUX("Headphone Source Playback Route",
SND_SOC_NOPM, 0, 0, sun50i_codec_hp_src),
SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN50I_ADDA_HP_CTRL,
@@ -362,7 +362,7 @@ static const struct snd_soc_dapm_route sun50i_a64_codec_routes[] = {
{ "Headphone Source Playback Route", "Mixer", "Left Mixer" },
{ "Headphone Source Playback Route", "Mixer", "Right Mixer" },
{ "Headphone Amp", NULL, "Headphone Source Playback Route" },
- { "Headphone Amp", NULL, "hpvcc" },
+ { "Headphone Amp", NULL, "cpvdd" },
{ "HP", NULL, "Headphone Amp" },
/* Microphone Routes */
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index eeda6d5565bc..a3a67a8f0f54 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -29,6 +29,7 @@
#include <linux/platform_data/davinci_asp.h>
#include <linux/math64.h>
#include <linux/bitmap.h>
+#include <linux/gpio/driver.h>
#include <sound/asoundef.h>
#include <sound/core.h>
@@ -54,6 +55,7 @@ static u32 context_regs[] = {
DAVINCI_MCASP_AHCLKXCTL_REG,
DAVINCI_MCASP_AHCLKRCTL_REG,
DAVINCI_MCASP_PDIR_REG,
+ DAVINCI_MCASP_PFUNC_REG,
DAVINCI_MCASP_RXMASK_REG,
DAVINCI_MCASP_TXMASK_REG,
DAVINCI_MCASP_RXTDM_REG,
@@ -108,7 +110,11 @@ struct davinci_mcasp {
/* Used for comstraint setting on the second stream */
u32 channels;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio_chip;
+#endif
+
+#ifdef CONFIG_PM
struct davinci_mcasp_context context;
#endif
@@ -818,9 +824,6 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
if (mcasp->version < MCASP_VERSION_3)
mcasp_set_bits(mcasp, DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
- /* All PINS as McASP */
- mcasp_set_reg(mcasp, DAVINCI_MCASP_PFUNC_REG, 0x00000000);
-
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
@@ -1486,74 +1489,6 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
-{
- struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
- struct davinci_mcasp_context *context = &mcasp->context;
- u32 reg;
- int i;
-
- context->pm_state = pm_runtime_active(mcasp->dev);
- if (!context->pm_state)
- pm_runtime_get_sync(mcasp->dev);
-
- for (i = 0; i < ARRAY_SIZE(context_regs); i++)
- context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
-
- if (mcasp->txnumevt) {
- reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
- context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
- }
- if (mcasp->rxnumevt) {
- reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
- context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
- }
-
- for (i = 0; i < mcasp->num_serializer; i++)
- context->xrsr_regs[i] = mcasp_get_reg(mcasp,
- DAVINCI_MCASP_XRSRCTL_REG(i));
-
- pm_runtime_put_sync(mcasp->dev);
-
- return 0;
-}
-
-static int davinci_mcasp_resume(struct snd_soc_dai *dai)
-{
- struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
- struct davinci_mcasp_context *context = &mcasp->context;
- u32 reg;
- int i;
-
- pm_runtime_get_sync(mcasp->dev);
-
- for (i = 0; i < ARRAY_SIZE(context_regs); i++)
- mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
-
- if (mcasp->txnumevt) {
- reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
- mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
- }
- if (mcasp->rxnumevt) {
- reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
- mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
- }
-
- for (i = 0; i < mcasp->num_serializer; i++)
- mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
- context->xrsr_regs[i]);
-
- if (!context->pm_state)
- pm_runtime_put_sync(mcasp->dev);
-
- return 0;
-}
-#else
-#define davinci_mcasp_suspend NULL
-#define davinci_mcasp_resume NULL
-#endif
-
#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000
#define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \
@@ -1571,8 +1506,6 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
{
.name = "davinci-mcasp.0",
.probe = davinci_mcasp_dai_probe,
- .suspend = davinci_mcasp_suspend,
- .resume = davinci_mcasp_resume,
.playback = {
.channels_min = 1,
.channels_max = 32 * 16,
@@ -1915,6 +1848,147 @@ static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
return offset;
}
+#ifdef CONFIG_GPIOLIB
+static int davinci_mcasp_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
+
+ if (mcasp->num_serializer && offset < mcasp->num_serializer &&
+ mcasp->serial_dir[offset] != INACTIVE_MODE) {
+ dev_err(mcasp->dev, "AXR%u pin is used for audio\n", offset);
+ return -EBUSY;
+ }
+
+ /* Do not change the PIN yet */
+
+ return pm_runtime_get_sync(mcasp->dev);
+}
+
+static void davinci_mcasp_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
+
+ /* Set the direction to input */
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(offset));
+
+ /* Set the pin as McASP pin */
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PFUNC_REG, BIT(offset));
+
+ pm_runtime_put_sync(mcasp->dev);
+}
+
+static int davinci_mcasp_gpio_direction_out(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
+ u32 val;
+
+ if (value)
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
+ else
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
+
+ val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PFUNC_REG);
+ if (!(val & BIT(offset))) {
+ /* Set the pin as GPIO pin */
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PFUNC_REG, BIT(offset));
+
+ /* Set the direction to output */
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(offset));
+ }
+
+ return 0;
+}
+
+static void davinci_mcasp_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
+
+ if (value)
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
+ else
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
+}
+
+static int davinci_mcasp_gpio_direction_in(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
+ u32 val;
+
+ val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PFUNC_REG);
+ if (!(val & BIT(offset))) {
+ /* Set the direction to input */
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(offset));
+
+ /* Set the pin as GPIO pin */
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PFUNC_REG, BIT(offset));
+ }
+
+ return 0;
+}
+
+static int davinci_mcasp_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
+ u32 val;
+
+ val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PDSET_REG);
+ if (val & BIT(offset))
+ return 1;
+
+ return 0;
+}
+
+static int davinci_mcasp_gpio_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
+ u32 val;
+
+ val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PDIR_REG);
+ if (val & BIT(offset))
+ return 0;
+
+ return 1;
+}
+
+static const struct gpio_chip davinci_mcasp_template_chip = {
+ .owner = THIS_MODULE,
+ .request = davinci_mcasp_gpio_request,
+ .free = davinci_mcasp_gpio_free,
+ .direction_output = davinci_mcasp_gpio_direction_out,
+ .set = davinci_mcasp_gpio_set,
+ .direction_input = davinci_mcasp_gpio_direction_in,
+ .get = davinci_mcasp_gpio_get,
+ .get_direction = davinci_mcasp_gpio_get_direction,
+ .base = -1,
+ .ngpio = 32,
+};
+
+static int davinci_mcasp_init_gpiochip(struct davinci_mcasp *mcasp)
+{
+ if (!of_property_read_bool(mcasp->dev->of_node, "gpio-controller"))
+ return 0;
+
+ mcasp->gpio_chip = davinci_mcasp_template_chip;
+ mcasp->gpio_chip.label = dev_name(mcasp->dev);
+ mcasp->gpio_chip.parent = mcasp->dev;
+#ifdef CONFIG_OF_GPIO
+ mcasp->gpio_chip.of_node = mcasp->dev->of_node;
+#endif
+
+ return devm_gpiochip_add_data(mcasp->dev, &mcasp->gpio_chip, mcasp);
+}
+
+#else /* CONFIG_GPIOLIB */
+static inline int davinci_mcasp_init_gpiochip(struct davinci_mcasp *mcasp)
+{
+ return 0;
+}
+#endif /* CONFIG_GPIOLIB */
+
static int davinci_mcasp_probe(struct platform_device *pdev)
{
struct snd_dmaengine_dai_dma_data *dma_data;
@@ -1976,7 +2050,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
}
mcasp->num_serializer = pdata->num_serializer;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev,
mcasp->num_serializer, sizeof(u32),
GFP_KERNEL);
@@ -2139,6 +2213,15 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
mcasp_reparent_fck(pdev);
+ /* All PINS as McASP */
+ pm_runtime_get_sync(mcasp->dev);
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_PFUNC_REG, 0x00000000);
+ pm_runtime_put(mcasp->dev);
+
+ ret = davinci_mcasp_init_gpiochip(mcasp);
+ if (ret)
+ goto err;
+
ret = devm_snd_soc_register_component(&pdev->dev,
&davinci_mcasp_component,
&davinci_mcasp_dai[pdata->op_mode], 1);
@@ -2149,26 +2232,10 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
ret = davinci_mcasp_get_dma_type(mcasp);
switch (ret) {
case PCM_EDMA:
-#if IS_BUILTIN(CONFIG_SND_SOC_TI_EDMA_PCM) || \
- (IS_MODULE(CONFIG_SND_SOC_DAVINCI_MCASP) && \
- IS_MODULE(CONFIG_SND_SOC_TI_EDMA_PCM))
ret = edma_pcm_platform_register(&pdev->dev);
-#else
- dev_err(&pdev->dev, "Missing SND_EDMA_SOC\n");
- ret = -EINVAL;
- goto err;
-#endif
break;
case PCM_SDMA:
-#if IS_BUILTIN(CONFIG_SND_SOC_TI_SDMA_PCM) || \
- (IS_MODULE(CONFIG_SND_SOC_DAVINCI_MCASP) && \
- IS_MODULE(CONFIG_SND_SOC_TI_SDMA_PCM))
ret = sdma_pcm_platform_register(&pdev->dev, NULL, NULL);
-#else
- dev_err(&pdev->dev, "Missing SND_SDMA_SOC\n");
- ret = -EINVAL;
- goto err;
-#endif
break;
default:
dev_err(&pdev->dev, "No DMA controller found (%d)\n", ret);
@@ -2196,11 +2263,73 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int davinci_mcasp_runtime_suspend(struct device *dev)
+{
+ struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
+ struct davinci_mcasp_context *context = &mcasp->context;
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(context_regs); i++)
+ context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
+
+ if (mcasp->txnumevt) {
+ reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+ context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
+ }
+ if (mcasp->rxnumevt) {
+ reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+ context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
+ }
+
+ for (i = 0; i < mcasp->num_serializer; i++)
+ context->xrsr_regs[i] = mcasp_get_reg(mcasp,
+ DAVINCI_MCASP_XRSRCTL_REG(i));
+
+ return 0;
+}
+
+static int davinci_mcasp_runtime_resume(struct device *dev)
+{
+ struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
+ struct davinci_mcasp_context *context = &mcasp->context;
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(context_regs); i++)
+ mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
+
+ if (mcasp->txnumevt) {
+ reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+ mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
+ }
+ if (mcasp->rxnumevt) {
+ reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+ mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
+ }
+
+ for (i = 0; i < mcasp->num_serializer; i++)
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
+ context->xrsr_regs[i]);
+
+ return 0;
+}
+
+#endif
+
+static const struct dev_pm_ops davinci_mcasp_pm_ops = {
+ SET_RUNTIME_PM_OPS(davinci_mcasp_runtime_suspend,
+ davinci_mcasp_runtime_resume,
+ NULL)
+};
+
static struct platform_driver davinci_mcasp_driver = {
.probe = davinci_mcasp_probe,
.remove = davinci_mcasp_remove,
.driver = {
.name = "davinci-mcasp",
+ .pm = &davinci_mcasp_pm_ops,
.of_match_table = mcasp_dt_ids,
},
};
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 8d31fe628e2f..089bd7518606 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -313,8 +313,10 @@ static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
if (ret)
goto exit;
}
- return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, 64 * 1024, 4 * 1024 * 1024);
+ return 0;
exit:
for (i = 0; i < 2; i++) {
diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c
index 4ec6b65bfb44..fa001d3c1a88 100644
--- a/sound/soc/uniphier/aio-dma.c
+++ b/sound/soc/uniphier/aio-dma.c
@@ -235,10 +235,11 @@ static int uniphier_aiodma_new(struct snd_soc_pcm_runtime *rtd)
if (ret)
return ret;
- return snd_pcm_lib_preallocate_pages_for_all(pcm,
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV, dev,
uniphier_aiodma_hw.buffer_bytes_max,
uniphier_aiodma_hw.buffer_bytes_max);
+ return 0;
}
static void uniphier_aiodma_free(struct snd_pcm *pcm)
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 25e287feb58c..47f606b924e4 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -1,8 +1,22 @@
config SND_SOC_XILINX_I2S
- tristate "Audio support for the the Xilinx I2S"
+ tristate "Audio support for the Xilinx I2S"
help
Select this option to enable Xilinx I2S Audio. This enables
I2S playback and capture using xilinx soft IP. In transmitter
mode, IP receives audio in AES format, extracts PCM and sends
PCM data. In receiver mode, IP receives PCM audio and
encapsulates PCM in AES format and sends AES data.
+
+config SND_SOC_XILINX_AUDIO_FORMATTER
+ tristate "Audio support for the the Xilinx audio formatter"
+ help
+ Select this option to enable Xilinx audio formatter
+ support. This provides DMA platform device support for
+ audio functionality.
+
+config SND_SOC_XILINX_SPDIF
+ tristate "Audio support for the the Xilinx SPDIF"
+ help
+ Select this option to enable Xilinx SPDIF Audio.
+ This provides playback and capture of SPDIF audio in
+ AES format.
diff --git a/sound/soc/xilinx/Makefile b/sound/soc/xilinx/Makefile
index 6c1209b9ee75..d79fd38b094b 100644
--- a/sound/soc/xilinx/Makefile
+++ b/sound/soc/xilinx/Makefile
@@ -1,2 +1,6 @@
snd-soc-xlnx-i2s-objs := xlnx_i2s.o
obj-$(CONFIG_SND_SOC_XILINX_I2S) += snd-soc-xlnx-i2s.o
+snd-soc-xlnx-formatter-pcm-objs := xlnx_formatter_pcm.o
+obj-$(CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER) += snd-soc-xlnx-formatter-pcm.o
+snd-soc-xlnx-spdif-objs := xlnx_spdif.o
+obj-$(CONFIG_SND_SOC_XILINX_SPDIF) += snd-soc-xlnx-spdif.o
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
new file mode 100644
index 000000000000..dc8721f4f56b
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Xilinx ASoC audio formatter support
+//
+// Copyright (C) 2018 Xilinx, Inc.
+//
+// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sizes.h>
+
+#include <sound/asoundef.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#define DRV_NAME "xlnx_formatter_pcm"
+
+#define XLNX_S2MM_OFFSET 0
+#define XLNX_MM2S_OFFSET 0x100
+
+#define XLNX_AUD_CORE_CONFIG 0x4
+#define XLNX_AUD_CTRL 0x10
+#define XLNX_AUD_STS 0x14
+
+#define AUD_CTRL_RESET_MASK BIT(1)
+#define AUD_CFG_MM2S_MASK BIT(15)
+#define AUD_CFG_S2MM_MASK BIT(31)
+
+#define XLNX_AUD_FS_MULTIPLIER 0x18
+#define XLNX_AUD_PERIOD_CONFIG 0x1C
+#define XLNX_AUD_BUFF_ADDR_LSB 0x20
+#define XLNX_AUD_BUFF_ADDR_MSB 0x24
+#define XLNX_AUD_XFER_COUNT 0x28
+#define XLNX_AUD_CH_STS_START 0x2C
+#define XLNX_BYTES_PER_CH 0x44
+
+#define AUD_STS_IOC_IRQ_MASK BIT(31)
+#define AUD_STS_CH_STS_MASK BIT(29)
+#define AUD_CTRL_IOC_IRQ_MASK BIT(13)
+#define AUD_CTRL_TOUT_IRQ_MASK BIT(14)
+#define AUD_CTRL_DMA_EN_MASK BIT(0)
+
+#define CFG_MM2S_CH_MASK GENMASK(11, 8)
+#define CFG_MM2S_CH_SHIFT 8
+#define CFG_MM2S_XFER_MASK GENMASK(14, 13)
+#define CFG_MM2S_XFER_SHIFT 13
+#define CFG_MM2S_PKG_MASK BIT(12)
+
+#define CFG_S2MM_CH_MASK GENMASK(27, 24)
+#define CFG_S2MM_CH_SHIFT 24
+#define CFG_S2MM_XFER_MASK GENMASK(30, 29)
+#define CFG_S2MM_XFER_SHIFT 29
+#define CFG_S2MM_PKG_MASK BIT(28)
+
+#define AUD_CTRL_DATA_WIDTH_SHIFT 16
+#define AUD_CTRL_ACTIVE_CH_SHIFT 19
+#define PERIOD_CFG_PERIODS_SHIFT 16
+
+#define PERIODS_MIN 2
+#define PERIODS_MAX 6
+#define PERIOD_BYTES_MIN 192
+#define PERIOD_BYTES_MAX (50 * 1024)
+#define XLNX_PARAM_UNKNOWN 0
+
+enum bit_depth {
+ BIT_DEPTH_8,
+ BIT_DEPTH_16,
+ BIT_DEPTH_20,
+ BIT_DEPTH_24,
+ BIT_DEPTH_32,
+};
+
+struct xlnx_pcm_drv_data {
+ void __iomem *mmio;
+ bool s2mm_presence;
+ bool mm2s_presence;
+ int s2mm_irq;
+ int mm2s_irq;
+ struct snd_pcm_substream *play_stream;
+ struct snd_pcm_substream *capture_stream;
+ struct clk *axi_clk;
+};
+
+/*
+ * struct xlnx_pcm_stream_param - stream configuration
+ * @mmio: base address offset
+ * @interleaved: audio channels arrangement in buffer
+ * @xfer_mode: data formatting mode during transfer
+ * @ch_limit: Maximum channels supported
+ * @buffer_size: stream ring buffer size
+ */
+struct xlnx_pcm_stream_param {
+ void __iomem *mmio;
+ bool interleaved;
+ u32 xfer_mode;
+ u32 ch_limit;
+ u64 buffer_size;
+};
+
+static const struct snd_pcm_hardware xlnx_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .buffer_bytes_max = PERIODS_MAX * PERIOD_BYTES_MAX,
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = PERIOD_BYTES_MAX,
+ .periods_min = PERIODS_MIN,
+ .periods_max = PERIODS_MAX,
+};
+
+enum {
+ AES_TO_AES,
+ AES_TO_PCM,
+ PCM_TO_PCM,
+ PCM_TO_AES
+};
+
+static void xlnx_parse_aes_params(u32 chsts_reg1_val, u32 chsts_reg2_val,
+ struct device *dev)
+{
+ u32 padded, srate, bit_depth, status[2];
+
+ if (chsts_reg1_val & IEC958_AES0_PROFESSIONAL) {
+ status[0] = chsts_reg1_val & 0xff;
+ status[1] = (chsts_reg1_val >> 16) & 0xff;
+
+ switch (status[0] & IEC958_AES0_PRO_FS) {
+ case IEC958_AES0_PRO_FS_44100:
+ srate = 44100;
+ break;
+ case IEC958_AES0_PRO_FS_48000:
+ srate = 48000;
+ break;
+ case IEC958_AES0_PRO_FS_32000:
+ srate = 32000;
+ break;
+ case IEC958_AES0_PRO_FS_NOTID:
+ default:
+ srate = XLNX_PARAM_UNKNOWN;
+ break;
+ }
+
+ switch (status[1] & IEC958_AES2_PRO_SBITS) {
+ case IEC958_AES2_PRO_WORDLEN_NOTID:
+ case IEC958_AES2_PRO_SBITS_20:
+ padded = 0;
+ break;
+ case IEC958_AES2_PRO_SBITS_24:
+ padded = 4;
+ break;
+ default:
+ bit_depth = XLNX_PARAM_UNKNOWN;
+ goto log_params;
+ }
+
+ switch (status[1] & IEC958_AES2_PRO_WORDLEN) {
+ case IEC958_AES2_PRO_WORDLEN_20_16:
+ bit_depth = 16 + padded;
+ break;
+ case IEC958_AES2_PRO_WORDLEN_22_18:
+ bit_depth = 18 + padded;
+ break;
+ case IEC958_AES2_PRO_WORDLEN_23_19:
+ bit_depth = 19 + padded;
+ break;
+ case IEC958_AES2_PRO_WORDLEN_24_20:
+ bit_depth = 20 + padded;
+ break;
+ case IEC958_AES2_PRO_WORDLEN_NOTID:
+ default:
+ bit_depth = XLNX_PARAM_UNKNOWN;
+ break;
+ }
+
+ } else {
+ status[0] = (chsts_reg1_val >> 24) & 0xff;
+ status[1] = chsts_reg2_val & 0xff;
+
+ switch (status[0] & IEC958_AES3_CON_FS) {
+ case IEC958_AES3_CON_FS_44100:
+ srate = 44100;
+ break;
+ case IEC958_AES3_CON_FS_48000:
+ srate = 48000;
+ break;
+ case IEC958_AES3_CON_FS_32000:
+ srate = 32000;
+ break;
+ default:
+ srate = XLNX_PARAM_UNKNOWN;
+ break;
+ }
+
+ if (status[1] & IEC958_AES4_CON_MAX_WORDLEN_24)
+ padded = 4;
+ else
+ padded = 0;
+
+ switch (status[1] & IEC958_AES4_CON_WORDLEN) {
+ case IEC958_AES4_CON_WORDLEN_20_16:
+ bit_depth = 16 + padded;
+ break;
+ case IEC958_AES4_CON_WORDLEN_22_18:
+ bit_depth = 18 + padded;
+ break;
+ case IEC958_AES4_CON_WORDLEN_23_19:
+ bit_depth = 19 + padded;
+ break;
+ case IEC958_AES4_CON_WORDLEN_24_20:
+ bit_depth = 20 + padded;
+ break;
+ case IEC958_AES4_CON_WORDLEN_21_17:
+ bit_depth = 17 + padded;
+ break;
+ case IEC958_AES4_CON_WORDLEN_NOTID:
+ default:
+ bit_depth = XLNX_PARAM_UNKNOWN;
+ break;
+ }
+ }
+
+log_params:
+ if (srate != XLNX_PARAM_UNKNOWN)
+ dev_info(dev, "sample rate = %d\n", srate);
+ else
+ dev_info(dev, "sample rate = unknown\n");
+
+ if (bit_depth != XLNX_PARAM_UNKNOWN)
+ dev_info(dev, "bit_depth = %d\n", bit_depth);
+ else
+ dev_info(dev, "bit_depth = unknown\n");
+}
+
+static int xlnx_formatter_pcm_reset(void __iomem *mmio_base)
+{
+ u32 val, retries = 0;
+
+ val = readl(mmio_base + XLNX_AUD_CTRL);
+ val |= AUD_CTRL_RESET_MASK;
+ writel(val, mmio_base + XLNX_AUD_CTRL);
+
+ val = readl(mmio_base + XLNX_AUD_CTRL);
+ /* Poll for maximum timeout of approximately 100ms (1 * 100)*/
+ while ((val & AUD_CTRL_RESET_MASK) && (retries < 100)) {
+ mdelay(1);
+ retries++;
+ val = readl(mmio_base + XLNX_AUD_CTRL);
+ }
+ if (val & AUD_CTRL_RESET_MASK)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void xlnx_formatter_disable_irqs(void __iomem *mmio_base, int stream)
+{
+ u32 val;
+
+ val = readl(mmio_base + XLNX_AUD_CTRL);
+ val &= ~AUD_CTRL_IOC_IRQ_MASK;
+ if (stream == SNDRV_PCM_STREAM_CAPTURE)
+ val &= ~AUD_CTRL_TOUT_IRQ_MASK;
+
+ writel(val, mmio_base + XLNX_AUD_CTRL);
+}
+
+static irqreturn_t xlnx_mm2s_irq_handler(int irq, void *arg)
+{
+ u32 val;
+ void __iomem *reg;
+ struct device *dev = arg;
+ struct xlnx_pcm_drv_data *adata = dev_get_drvdata(dev);
+
+ reg = adata->mmio + XLNX_MM2S_OFFSET + XLNX_AUD_STS;
+ val = readl(reg);
+ if (val & AUD_STS_IOC_IRQ_MASK) {
+ writel(val & AUD_STS_IOC_IRQ_MASK, reg);
+ if (adata->play_stream)
+ snd_pcm_period_elapsed(adata->play_stream);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t xlnx_s2mm_irq_handler(int irq, void *arg)
+{
+ u32 val;
+ void __iomem *reg;
+ struct device *dev = arg;
+ struct xlnx_pcm_drv_data *adata = dev_get_drvdata(dev);
+
+ reg = adata->mmio + XLNX_S2MM_OFFSET + XLNX_AUD_STS;
+ val = readl(reg);
+ if (val & AUD_STS_IOC_IRQ_MASK) {
+ writel(val & AUD_STS_IOC_IRQ_MASK, reg);
+ if (adata->capture_stream)
+ snd_pcm_period_elapsed(adata->capture_stream);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int xlnx_formatter_pcm_open(struct snd_pcm_substream *substream)
+{
+ int err;
+ u32 val, data_format_mode;
+ u32 ch_count_mask, ch_count_shift, data_xfer_mode, data_xfer_shift;
+ struct xlnx_pcm_stream_param *stream_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *prtd = substream->private_data;
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
+ DRV_NAME);
+ struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ !adata->mm2s_presence)
+ return -ENODEV;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ !adata->s2mm_presence)
+ return -ENODEV;
+
+ stream_data = kzalloc(sizeof(*stream_data), GFP_KERNEL);
+ if (!stream_data)
+ return -ENOMEM;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ch_count_mask = CFG_MM2S_CH_MASK;
+ ch_count_shift = CFG_MM2S_CH_SHIFT;
+ data_xfer_mode = CFG_MM2S_XFER_MASK;
+ data_xfer_shift = CFG_MM2S_XFER_SHIFT;
+ data_format_mode = CFG_MM2S_PKG_MASK;
+ stream_data->mmio = adata->mmio + XLNX_MM2S_OFFSET;
+ adata->play_stream = substream;
+
+ } else {
+ ch_count_mask = CFG_S2MM_CH_MASK;
+ ch_count_shift = CFG_S2MM_CH_SHIFT;
+ data_xfer_mode = CFG_S2MM_XFER_MASK;
+ data_xfer_shift = CFG_S2MM_XFER_SHIFT;
+ data_format_mode = CFG_S2MM_PKG_MASK;
+ stream_data->mmio = adata->mmio + XLNX_S2MM_OFFSET;
+ adata->capture_stream = substream;
+ }
+
+ val = readl(adata->mmio + XLNX_AUD_CORE_CONFIG);
+
+ if (!(val & data_format_mode))
+ stream_data->interleaved = true;
+
+ stream_data->xfer_mode = (val & data_xfer_mode) >> data_xfer_shift;
+ stream_data->ch_limit = (val & ch_count_mask) >> ch_count_shift;
+ dev_info(component->dev,
+ "stream %d : format = %d mode = %d ch_limit = %d\n",
+ substream->stream, stream_data->interleaved,
+ stream_data->xfer_mode, stream_data->ch_limit);
+
+ snd_soc_set_runtime_hwparams(substream, &xlnx_pcm_hardware);
+ runtime->private_data = stream_data;
+
+ /* Resize the period size divisible by 64 */
+ err = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+ if (err) {
+ dev_err(component->dev,
+ "unable to set constraint on period bytes\n");
+ return err;
+ }
+
+ /* enable DMA IOC irq */
+ val = readl(stream_data->mmio + XLNX_AUD_CTRL);
+ val |= AUD_CTRL_IOC_IRQ_MASK;
+ writel(val, stream_data->mmio + XLNX_AUD_CTRL);
+
+ return 0;
+}
+
+static int xlnx_formatter_pcm_close(struct snd_pcm_substream *substream)
+{
+ int ret;
+ struct xlnx_pcm_stream_param *stream_data =
+ substream->runtime->private_data;
+ struct snd_soc_pcm_runtime *prtd = substream->private_data;
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
+ DRV_NAME);
+
+ ret = xlnx_formatter_pcm_reset(stream_data->mmio);
+ if (ret) {
+ dev_err(component->dev, "audio formatter reset failed\n");
+ goto err_reset;
+ }
+ xlnx_formatter_disable_irqs(stream_data->mmio, substream->stream);
+
+err_reset:
+ kfree(stream_data);
+ return 0;
+}
+
+static snd_pcm_uframes_t
+xlnx_formatter_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ u32 pos;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
+
+ pos = readl(stream_data->mmio + XLNX_AUD_XFER_COUNT);
+
+ if (pos >= stream_data->buffer_size)
+ pos = 0;
+
+ return bytes_to_frames(runtime, pos);
+}
+
+static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ u32 low, high, active_ch, val, bytes_per_ch, bits_per_sample;
+ u32 aes_reg1_val, aes_reg2_val;
+ int status;
+ u64 size;
+ struct snd_soc_pcm_runtime *prtd = substream->private_data;
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
+ DRV_NAME);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
+
+ active_ch = params_channels(params);
+ if (active_ch > stream_data->ch_limit)
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ stream_data->xfer_mode == AES_TO_PCM) {
+ val = readl(stream_data->mmio + XLNX_AUD_STS);
+ if (val & AUD_STS_CH_STS_MASK) {
+ aes_reg1_val = readl(stream_data->mmio +
+ XLNX_AUD_CH_STS_START);
+ aes_reg2_val = readl(stream_data->mmio +
+ XLNX_AUD_CH_STS_START + 0x4);
+
+ xlnx_parse_aes_params(aes_reg1_val, aes_reg2_val,
+ component->dev);
+ }
+ }
+
+ size = params_buffer_bytes(params);
+ status = snd_pcm_lib_malloc_pages(substream, size);
+ if (status < 0)
+ return status;
+
+ stream_data->buffer_size = size;
+
+ low = lower_32_bits(substream->dma_buffer.addr);
+ high = upper_32_bits(substream->dma_buffer.addr);
+ writel(low, stream_data->mmio + XLNX_AUD_BUFF_ADDR_LSB);
+ writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
+
+ val = readl(stream_data->mmio + XLNX_AUD_CTRL);
+ bits_per_sample = params_width(params);
+ switch (bits_per_sample) {
+ case 8:
+ val |= (BIT_DEPTH_8 << AUD_CTRL_DATA_WIDTH_SHIFT);
+ break;
+ case 16:
+ val |= (BIT_DEPTH_16 << AUD_CTRL_DATA_WIDTH_SHIFT);
+ break;
+ case 20:
+ val |= (BIT_DEPTH_20 << AUD_CTRL_DATA_WIDTH_SHIFT);
+ break;
+ case 24:
+ val |= (BIT_DEPTH_24 << AUD_CTRL_DATA_WIDTH_SHIFT);
+ break;
+ case 32:
+ val |= (BIT_DEPTH_32 << AUD_CTRL_DATA_WIDTH_SHIFT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= active_ch << AUD_CTRL_ACTIVE_CH_SHIFT;
+ writel(val, stream_data->mmio + XLNX_AUD_CTRL);
+
+ val = (params_periods(params) << PERIOD_CFG_PERIODS_SHIFT)
+ | params_period_bytes(params);
+ writel(val, stream_data->mmio + XLNX_AUD_PERIOD_CONFIG);
+ bytes_per_ch = DIV_ROUND_UP(params_period_bytes(params), active_ch);
+ writel(bytes_per_ch, stream_data->mmio + XLNX_BYTES_PER_CH);
+
+ return 0;
+}
+
+static int xlnx_formatter_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int xlnx_formatter_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ u32 val;
+ struct xlnx_pcm_stream_param *stream_data =
+ substream->runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ val = readl(stream_data->mmio + XLNX_AUD_CTRL);
+ val |= AUD_CTRL_DMA_EN_MASK;
+ writel(val, stream_data->mmio + XLNX_AUD_CTRL);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ val = readl(stream_data->mmio + XLNX_AUD_CTRL);
+ val &= ~AUD_CTRL_DMA_EN_MASK;
+ writel(val, stream_data->mmio + XLNX_AUD_CTRL);
+ break;
+ }
+
+ return 0;
+}
+
+static int xlnx_formatter_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
+ DRV_NAME);
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_DEV, component->dev,
+ xlnx_pcm_hardware.buffer_bytes_max,
+ xlnx_pcm_hardware.buffer_bytes_max);
+ return 0;
+}
+
+static const struct snd_pcm_ops xlnx_formatter_pcm_ops = {
+ .open = xlnx_formatter_pcm_open,
+ .close = xlnx_formatter_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = xlnx_formatter_pcm_hw_params,
+ .hw_free = xlnx_formatter_pcm_hw_free,
+ .trigger = xlnx_formatter_pcm_trigger,
+ .pointer = xlnx_formatter_pcm_pointer,
+};
+
+static const struct snd_soc_component_driver xlnx_asoc_component = {
+ .name = DRV_NAME,
+ .ops = &xlnx_formatter_pcm_ops,
+ .pcm_new = xlnx_formatter_pcm_new,
+};
+
+static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 val;
+ struct xlnx_pcm_drv_data *aud_drv_data;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ aud_drv_data = devm_kzalloc(dev, sizeof(*aud_drv_data), GFP_KERNEL);
+ if (!aud_drv_data)
+ return -ENOMEM;
+
+ aud_drv_data->axi_clk = devm_clk_get(dev, "s_axi_lite_aclk");
+ if (IS_ERR(aud_drv_data->axi_clk)) {
+ ret = PTR_ERR(aud_drv_data->axi_clk);
+ dev_err(dev, "failed to get s_axi_lite_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(aud_drv_data->axi_clk);
+ if (ret) {
+ dev_err(dev,
+ "failed to enable s_axi_lite_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "audio formatter node:addr to resource failed\n");
+ ret = -ENXIO;
+ goto clk_err;
+ }
+ aud_drv_data->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(aud_drv_data->mmio)) {
+ dev_err(dev, "audio formatter ioremap failed\n");
+ ret = PTR_ERR(aud_drv_data->mmio);
+ goto clk_err;
+ }
+
+ val = readl(aud_drv_data->mmio + XLNX_AUD_CORE_CONFIG);
+ if (val & AUD_CFG_MM2S_MASK) {
+ aud_drv_data->mm2s_presence = true;
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto clk_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET,
+ SNDRV_PCM_STREAM_PLAYBACK);
+
+ aud_drv_data->mm2s_irq = platform_get_irq_byname(pdev,
+ "irq_mm2s");
+ if (aud_drv_data->mm2s_irq < 0) {
+ dev_err(dev, "xlnx audio mm2s irq resource failed\n");
+ ret = aud_drv_data->mm2s_irq;
+ goto clk_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->mm2s_irq,
+ xlnx_mm2s_irq_handler, 0,
+ "xlnx_formatter_pcm_mm2s_irq", dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio mm2s irq request failed\n");
+ goto clk_err;
+ }
+ }
+ if (val & AUD_CFG_S2MM_MASK) {
+ aud_drv_data->s2mm_presence = true;
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto clk_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET,
+ SNDRV_PCM_STREAM_CAPTURE);
+
+ aud_drv_data->s2mm_irq = platform_get_irq_byname(pdev,
+ "irq_s2mm");
+ if (aud_drv_data->s2mm_irq < 0) {
+ dev_err(dev, "xlnx audio s2mm irq resource failed\n");
+ ret = aud_drv_data->s2mm_irq;
+ goto clk_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->s2mm_irq,
+ xlnx_s2mm_irq_handler, 0,
+ "xlnx_formatter_pcm_s2mm_irq",
+ dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio s2mm irq request failed\n");
+ goto clk_err;
+ }
+ }
+
+ dev_set_drvdata(dev, aud_drv_data);
+
+ ret = devm_snd_soc_register_component(dev, &xlnx_asoc_component,
+ NULL, 0);
+ if (ret) {
+ dev_err(dev, "pcm platform device register failed\n");
+ goto clk_err;
+ }
+
+ return 0;
+
+clk_err:
+ clk_disable_unprepare(aud_drv_data->axi_clk);
+ return ret;
+}
+
+static int xlnx_formatter_pcm_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct xlnx_pcm_drv_data *adata = dev_get_drvdata(&pdev->dev);
+
+ if (adata->s2mm_presence)
+ ret = xlnx_formatter_pcm_reset(adata->mmio + XLNX_S2MM_OFFSET);
+
+ /* Try MM2S reset, even if S2MM reset fails */
+ if (adata->mm2s_presence)
+ ret = xlnx_formatter_pcm_reset(adata->mmio + XLNX_MM2S_OFFSET);
+
+ if (ret)
+ dev_err(&pdev->dev, "audio formatter reset failed\n");
+
+ clk_disable_unprepare(adata->axi_clk);
+ return ret;
+}
+
+static const struct of_device_id xlnx_formatter_pcm_of_match[] = {
+ { .compatible = "xlnx,audio-formatter-1.0"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xlnx_formatter_pcm_of_match);
+
+static struct platform_driver xlnx_formatter_pcm_driver = {
+ .probe = xlnx_formatter_pcm_probe,
+ .remove = xlnx_formatter_pcm_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = xlnx_formatter_pcm_of_match,
+ },
+};
+
+module_platform_driver(xlnx_formatter_pcm_driver);
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
index d4ae9eff41ce..8b353166ad44 100644
--- a/sound/soc/xilinx/xlnx_i2s.c
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -1,12 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Xilinx ASoC I2S audio support
- *
- * Copyright (C) 2018 Xilinx, Inc.
- *
- * Author: Praveen Vuppala <praveenv@xilinx.com>
- * Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
- */
+//
+// Xilinx ASoC I2S audio support
+//
+// Copyright (C) 2018 Xilinx, Inc.
+//
+// Author: Praveen Vuppala <praveenv@xilinx.com>
+// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
#include <linux/io.h>
#include <linux/module.h>
diff --git a/sound/soc/xilinx/xlnx_spdif.c b/sound/soc/xilinx/xlnx_spdif.c
new file mode 100644
index 000000000000..3b9000fd8c49
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_spdif.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Xilinx ASoC SPDIF audio support
+//
+// Copyright (C) 2018 Xilinx, Inc.
+//
+// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define XLNX_SPDIF_RATES \
+ (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
+ SNDRV_PCM_RATE_192000)
+
+#define XLNX_SPDIF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+#define XSPDIF_IRQ_STS_REG 0x20
+#define XSPDIF_IRQ_ENABLE_REG 0x28
+#define XSPDIF_SOFT_RESET_REG 0x40
+#define XSPDIF_CONTROL_REG 0x44
+#define XSPDIF_CHAN_0_STS_REG 0x4C
+#define XSPDIF_GLOBAL_IRQ_ENABLE_REG 0x1C
+#define XSPDIF_CH_A_USER_DATA_REG_0 0x64
+
+#define XSPDIF_CORE_ENABLE_MASK BIT(0)
+#define XSPDIF_FIFO_FLUSH_MASK BIT(1)
+#define XSPDIF_CH_STS_MASK BIT(5)
+#define XSPDIF_GLOBAL_IRQ_ENABLE BIT(31)
+#define XSPDIF_CLOCK_CONFIG_BITS_MASK GENMASK(5, 2)
+#define XSPDIF_CLOCK_CONFIG_BITS_SHIFT 2
+#define XSPDIF_SOFT_RESET_VALUE 0xA
+
+#define MAX_CHANNELS 2
+#define AES_SAMPLE_WIDTH 32
+#define CH_STATUS_UPDATE_TIMEOUT 40
+
+struct spdif_dev_data {
+ u32 mode;
+ u32 aclk;
+ bool rx_chsts_updated;
+ void __iomem *base;
+ struct clk *axi_clk;
+ wait_queue_head_t chsts_q;
+};
+
+static irqreturn_t xlnx_spdifrx_irq_handler(int irq, void *arg)
+{
+ u32 val;
+ struct spdif_dev_data *ctx = arg;
+
+ val = readl(ctx->base + XSPDIF_IRQ_STS_REG);
+ if (val & XSPDIF_CH_STS_MASK) {
+ writel(val & XSPDIF_CH_STS_MASK,
+ ctx->base + XSPDIF_IRQ_STS_REG);
+ val = readl(ctx->base +
+ XSPDIF_IRQ_ENABLE_REG);
+ writel(val & ~XSPDIF_CH_STS_MASK,
+ ctx->base + XSPDIF_IRQ_ENABLE_REG);
+
+ ctx->rx_chsts_updated = true;
+ wake_up_interruptible(&ctx->chsts_q);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int xlnx_spdif_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ u32 val;
+ struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
+
+ val = readl(ctx->base + XSPDIF_CONTROL_REG);
+ val |= XSPDIF_FIFO_FLUSH_MASK;
+ writel(val, ctx->base + XSPDIF_CONTROL_REG);
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ writel(XSPDIF_CH_STS_MASK,
+ ctx->base + XSPDIF_IRQ_ENABLE_REG);
+ writel(XSPDIF_GLOBAL_IRQ_ENABLE,
+ ctx->base + XSPDIF_GLOBAL_IRQ_ENABLE_REG);
+ }
+
+ return 0;
+}
+
+static void xlnx_spdif_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
+
+ writel(XSPDIF_SOFT_RESET_VALUE, ctx->base + XSPDIF_SOFT_RESET_REG);
+}
+
+static int xlnx_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u32 val, clk_div, clk_cfg;
+ struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
+
+ clk_div = DIV_ROUND_CLOSEST(ctx->aclk, MAX_CHANNELS * AES_SAMPLE_WIDTH *
+ params_rate(params));
+
+ switch (clk_div) {
+ case 4:
+ clk_cfg = 0;
+ break;
+ case 8:
+ clk_cfg = 1;
+ break;
+ case 16:
+ clk_cfg = 2;
+ break;
+ case 24:
+ clk_cfg = 3;
+ break;
+ case 32:
+ clk_cfg = 4;
+ break;
+ case 48:
+ clk_cfg = 5;
+ break;
+ case 64:
+ clk_cfg = 6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = readl(ctx->base + XSPDIF_CONTROL_REG);
+ val &= ~XSPDIF_CLOCK_CONFIG_BITS_MASK;
+ val |= clk_cfg << XSPDIF_CLOCK_CONFIG_BITS_SHIFT;
+ writel(val, ctx->base + XSPDIF_CONTROL_REG);
+
+ return 0;
+}
+
+static int rx_stream_detect(struct snd_soc_dai *dai)
+{
+ int err;
+ struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
+ unsigned long jiffies = msecs_to_jiffies(CH_STATUS_UPDATE_TIMEOUT);
+
+ /* start capture only if stream is detected within 40ms timeout */
+ err = wait_event_interruptible_timeout(ctx->chsts_q,
+ ctx->rx_chsts_updated,
+ jiffies);
+ if (!err) {
+ dev_err(dai->dev, "No streaming audio detected!\n");
+ return -EINVAL;
+ }
+ ctx->rx_chsts_updated = false;
+
+ return 0;
+}
+
+static int xlnx_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ u32 val;
+ int ret = 0;
+ struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
+
+ val = readl(ctx->base + XSPDIF_CONTROL_REG);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ val |= XSPDIF_CORE_ENABLE_MASK;
+ writel(val, ctx->base + XSPDIF_CONTROL_REG);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = rx_stream_detect(dai);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ val &= ~XSPDIF_CORE_ENABLE_MASK;
+ writel(val, ctx->base + XSPDIF_CONTROL_REG);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dai_ops xlnx_spdif_dai_ops = {
+ .startup = xlnx_spdif_startup,
+ .shutdown = xlnx_spdif_shutdown,
+ .trigger = xlnx_spdif_trigger,
+ .hw_params = xlnx_spdif_hw_params,
+};
+
+static struct snd_soc_dai_driver xlnx_spdif_tx_dai = {
+ .name = "xlnx_spdif_tx",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = XLNX_SPDIF_RATES,
+ .formats = XLNX_SPDIF_FORMATS,
+ },
+ .ops = &xlnx_spdif_dai_ops,
+};
+
+static struct snd_soc_dai_driver xlnx_spdif_rx_dai = {
+ .name = "xlnx_spdif_rx",
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = XLNX_SPDIF_RATES,
+ .formats = XLNX_SPDIF_FORMATS,
+ },
+ .ops = &xlnx_spdif_dai_ops,
+};
+
+static const struct snd_soc_component_driver xlnx_spdif_component = {
+ .name = "xlnx-spdif",
+};
+
+static const struct of_device_id xlnx_spdif_of_match[] = {
+ { .compatible = "xlnx,spdif-2.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xlnx_spdif_of_match);
+
+static int xlnx_spdif_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+ struct snd_soc_dai_driver *dai_drv;
+ struct spdif_dev_data *ctx;
+
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->axi_clk = devm_clk_get(dev, "s_axi_aclk");
+ if (IS_ERR(ctx->axi_clk)) {
+ ret = PTR_ERR(ctx->axi_clk);
+ dev_err(dev, "failed to get s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(ctx->axi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->base)) {
+ ret = PTR_ERR(ctx->base);
+ goto clk_err;
+ }
+ ret = of_property_read_u32(node, "xlnx,spdif-mode", &ctx->mode);
+ if (ret < 0) {
+ dev_err(dev, "cannot get SPDIF mode\n");
+ goto clk_err;
+ }
+ if (ctx->mode) {
+ dai_drv = &xlnx_spdif_tx_dai;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "No IRQ resource found\n");
+ ret = -ENODEV;
+ goto clk_err;
+ }
+ ret = devm_request_irq(dev, res->start,
+ xlnx_spdifrx_irq_handler,
+ 0, "XLNX_SPDIF_RX", ctx);
+ if (ret) {
+ dev_err(dev, "spdif rx irq request failed\n");
+ ret = -ENODEV;
+ goto clk_err;
+ }
+
+ init_waitqueue_head(&ctx->chsts_q);
+ dai_drv = &xlnx_spdif_rx_dai;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,aud_clk_i", &ctx->aclk);
+ if (ret < 0) {
+ dev_err(dev, "cannot get aud_clk_i value\n");
+ goto clk_err;
+ }
+
+ dev_set_drvdata(dev, ctx);
+
+ ret = devm_snd_soc_register_component(dev, &xlnx_spdif_component,
+ dai_drv, 1);
+ if (ret) {
+ dev_err(dev, "SPDIF component registration failed\n");
+ goto clk_err;
+ }
+
+ writel(XSPDIF_SOFT_RESET_VALUE, ctx->base + XSPDIF_SOFT_RESET_REG);
+ dev_info(dev, "%s DAI registered\n", dai_drv->name);
+
+clk_err:
+ clk_disable_unprepare(ctx->axi_clk);
+ return ret;
+}
+
+static int xlnx_spdif_remove(struct platform_device *pdev)
+{
+ struct spdif_dev_data *ctx = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(ctx->axi_clk);
+ return 0;
+}
+
+static struct platform_driver xlnx_spdif_driver = {
+ .driver = {
+ .name = "xlnx-spdif",
+ .of_match_table = xlnx_spdif_of_match,
+ },
+ .probe = xlnx_spdif_probe,
+ .remove = xlnx_spdif_remove,
+};
+module_platform_driver(xlnx_spdif_driver);
+
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>");
+MODULE_DESCRIPTION("XILINX SPDIF driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index 503560916620..2f20a02c8d46 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -469,9 +469,9 @@ static int xtfpga_pcm_new(struct snd_soc_pcm_runtime *rtd)
struct snd_card *card = rtd->card->snd_card;
size_t size = xtfpga_pcm_hardware.buffer_bytes_max;
- return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
- SNDRV_DMA_TYPE_DEV,
- card->dev, size, size);
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
+ card->dev, size, size);
+ return 0;
}
static const struct snd_pcm_ops xtfpga_pcm_ops = {
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 7609eceba1a2..2b8ef5fe6688 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2243,12 +2243,9 @@ static int snd_dbri_pcm(struct snd_card *card)
pcm->info_flags = 0;
strcpy(pcm->name, card->shortname);
- if ((err = snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 64 * 1024, 64 * 1024)) < 0)
- return err;
-
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ 64 * 1024, 64 * 1024);
return 0;
}
@@ -2510,16 +2507,10 @@ static void dbri_debug_read(struct snd_info_entry *entry,
static void snd_dbri_proc(struct snd_card *card)
{
struct snd_dbri *dbri = card->private_data;
- struct snd_info_entry *entry;
-
- if (!snd_card_proc_new(card, "regs", &entry))
- snd_info_set_text_ops(entry, dbri, dbri_regs_read);
+ snd_card_ro_proc_new(card, "regs", dbri, dbri_regs_read);
#ifdef DBRI_DEBUG
- if (!snd_card_proc_new(card, "debug", &entry)) {
- snd_info_set_text_ops(entry, dbri, dbri_debug_read);
- entry->mode = S_IFREG | 0444; /* Readable only. */
- }
+ snd_card_ro_proc_new(card, "debug", dbri, dbri_debug_read);
#endif
}
@@ -2541,8 +2532,8 @@ static int snd_dbri_create(struct snd_card *card,
dbri->op = op;
dbri->irq = irq;
- dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma),
- &dbri->dma_dvma, GFP_KERNEL);
+ dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma),
+ &dbri->dma_dvma, GFP_KERNEL);
if (!dbri->dma)
return -ENOMEM;
diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c
index 1ef52edeb538..8707e0108471 100644
--- a/sound/spi/at73c213.c
+++ b/sound/spi/at73c213.c
@@ -350,7 +350,7 @@ static int snd_at73c213_pcm_new(struct snd_at73c213 *chip, int device)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &at73c213_playback_ops);
- retval = snd_pcm_lib_preallocate_pages_for_all(chip->pcm,
+ snd_pcm_lib_preallocate_pages_for_all(chip->pcm,
SNDRV_DMA_TYPE_DEV, &chip->ssc->pdev->dev,
64 * 1024, 64 * 1024);
out:
diff --git a/sound/synth/emux/emux_proc.c b/sound/synth/emux/emux_proc.c
index a82b4053bee8..c14781ac7941 100644
--- a/sound/synth/emux/emux_proc.c
+++ b/sound/synth/emux/emux_proc.c
@@ -115,10 +115,6 @@ void snd_emux_proc_init(struct snd_emux *emu, struct snd_card *card, int device)
entry->content = SNDRV_INFO_CONTENT_TEXT;
entry->private_data = emu;
entry->c.text.read = snd_emux_proc_info_read;
- if (snd_info_register(entry) < 0)
- snd_info_free_entry(entry);
- else
- emu->proc = entry;
}
void snd_emux_proc_free(struct snd_emux *emu)
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a105947eaf55..719e10034553 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
h1 = snd_usb_find_csint_desc(host_iface->extra,
host_iface->extralen,
NULL, UAC_HEADER);
- if (!h1) {
+ if (!h1 || h1->bLength < sizeof(*h1)) {
dev_err(&dev->dev, "cannot find UAC_HEADER\n");
return -EINVAL;
}
@@ -811,7 +811,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (!chip->num_suspended_intf++) {
list_for_each_entry(as, &chip->pcm_list, list) {
- snd_pcm_suspend_all(as->pcm);
snd_usb_pcm_suspend(as);
as->substream[0].need_setup_ep =
as->substream[1].need_setup_ep = true;
diff --git a/sound/usb/card.h b/sound/usb/card.h
index ac785d15ced4..79fa2a19fb7b 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -14,6 +14,7 @@ struct audioformat {
u64 formats; /* ALSA format bits */
unsigned int channels; /* # channels */
unsigned int fmt_type; /* USB audio format type (1-3) */
+ unsigned int fmt_bits; /* number of significant bits */
unsigned int frame_size; /* samples per frame for non-audio */
int iface; /* interface number */
unsigned char altsetting; /* corresponding alternate setting */
diff --git a/sound/usb/format.c b/sound/usb/format.c
index fd13ac11b136..3ee7d6f853b7 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -87,6 +87,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
}
}
+ fp->fmt_bits = sample_width;
+
if ((pcm_formats == 0) &&
(format == 0 || format == (1 << UAC_FORMAT_TYPE_I_UNDEFINED))) {
/* some devices don't define this correctly... */
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index c1376bfdc90b..7afe8fae4939 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -849,10 +849,8 @@ int line6_suspend(struct usb_interface *interface, pm_message_t message)
if (line6->properties->capabilities & LINE6_CAP_CONTROL)
line6_stop_listen(line6);
- if (line6pcm != NULL) {
- snd_pcm_suspend_all(line6pcm->pcm);
+ if (line6pcm != NULL)
line6pcm->flags = 0;
- }
return 0;
}
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index 020c81818951..ce45b6dab651 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -320,7 +320,8 @@ static void pod_startup4(struct work_struct *work)
line6_read_serial_number(&pod->line6, &pod->serial_number);
/* ALSA audio interface: */
- snd_card_register(line6->card);
+ if (snd_card_register(line6->card))
+ dev_err(line6->ifcdev, "Failed to register POD card.\n");
}
/* POD special files: */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c63c84b54969..73d7dff425c1 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
struct uac_mixer_unit_descriptor *desc)
{
int mu_channels;
+ void *c;
- if (desc->bLength < 11)
+ if (desc->bLength < sizeof(*desc))
return -EINVAL;
if (!desc->bNrInPins)
return -EINVAL;
@@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
case UAC_VERSION_1:
case UAC_VERSION_2:
default:
+ if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
+ return 0; /* no bmControls -> skip */
mu_channels = uac_mixer_unit_bNrChannels(desc);
break;
case UAC_VERSION_3:
@@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
}
if (!mu_channels)
- return -EINVAL;
+ return 0;
+
+ c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
+ if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
+ return 0; /* no bmControls -> skip */
return mu_channels;
}
@@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id,
struct uac_mixer_unit_descriptor *d = p1;
err = uac_mixer_unit_get_channels(state, d);
- if (err < 0)
+ if (err <= 0)
return err;
term->channels = err;
@@ -1828,7 +1835,7 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
{
int channels, i, j;
struct usb_audio_term iterm;
- unsigned int master_bits, first_ch_bits;
+ unsigned int master_bits;
int err, csize;
struct uac_feature_unit_descriptor *hdr = _ftr;
__u8 *bmaControls;
@@ -1919,10 +1926,6 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
break;
}
- if (channels > 0)
- first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
- else
- first_ch_bits = 0;
if (state->mixer->protocol == UAC_VERSION_1) {
/* check all control types */
@@ -2068,11 +2071,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
if (state->mixer->protocol == UAC_VERSION_2) {
struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
+ if (d_v2->bLength < sizeof(*d_v2))
+ return -EINVAL;
control = UAC2_TE_CONNECTOR;
term_id = d_v2->bTerminalID;
bmctls = le16_to_cpu(d_v2->bmControls);
} else if (state->mixer->protocol == UAC_VERSION_3) {
struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
+ if (d_v3->bLength < sizeof(*d_v3))
+ return -EINVAL;
control = UAC3_TE_INSERTION;
term_id = d_v3->bTerminalID;
bmctls = le32_to_cpu(d_v3->bmControls);
@@ -2118,7 +2125,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
if (err < 0)
continue;
/* no bmControls field (e.g. Maya44) -> ignore */
- if (desc->bLength <= 10 + input_pins)
+ if (!num_outs)
continue;
err = check_input_term(state, desc->baSourceID[pin], &iterm);
if (err < 0)
@@ -2314,7 +2321,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
char *name)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
- int num_ins = desc->bNrInPins;
+ int num_ins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
@@ -2329,7 +2336,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
0, NULL, default_value_info
};
- if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
+ if (desc->bLength < 13) {
+ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+ return -EINVAL;
+ }
+
+ num_ins = desc->bNrInPins;
+ if (desc->bLength < 13 + num_ins ||
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
@@ -3428,7 +3441,6 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
.dev_free = snd_usb_mixer_dev_free
};
struct usb_mixer_interface *mixer;
- struct snd_info_entry *entry;
int err;
strcpy(chip->card->mixername, "USB Mixer");
@@ -3484,9 +3496,9 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
if (err < 0)
goto _error;
- if (list_empty(&chip->mixer_list) &&
- !snd_card_proc_new(chip->card, "usbmixer", &entry))
- snd_info_set_text_ops(entry, chip, snd_usb_mixer_proc_read);
+ if (list_empty(&chip->mixer_list))
+ snd_card_ro_proc_new(chip->card, "usbmixer", chip,
+ snd_usb_mixer_proc_read);
list_add(&mixer->list, &chip->mixer_list);
return 0;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 85ae0ff2382a..a751a18ca4c2 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -2195,7 +2195,6 @@ static int snd_rme_controls_create(struct usb_mixer_interface *mixer)
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
- struct snd_info_entry *entry;
err = snd_usb_soundblaster_remote_init(mixer);
if (err < 0)
@@ -2214,9 +2213,8 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
err = snd_audigy2nx_controls_create(mixer);
if (err < 0)
break;
- if (!snd_card_proc_new(mixer->chip->card, "audigy2nx", &entry))
- snd_info_set_text_ops(entry, mixer,
- snd_audigy2nx_proc_read);
+ snd_card_ro_proc_new(mixer->chip->card, "audigy2nx",
+ mixer, snd_audigy2nx_proc_read);
break;
/* EMU0204 */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 382847154227..056af0a57b22 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
return 0;
}
+/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
+ * applies. Returns 1 if a quirk was found.
+ */
static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
struct usb_device *dev,
struct usb_interface_descriptor *altsd,
@@ -351,6 +354,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81;
ifnum = 1;
goto add_sync_ep_from_ifnum;
+ case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */
+ ep = 0x84;
+ ifnum = 0;
+ goto add_sync_ep_from_ifnum;
}
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
@@ -384,7 +391,7 @@ add_sync_ep:
subs->data_endpoint->sync_master = subs->sync_endpoint;
- return 0;
+ return 1;
}
static int set_sync_endpoint(struct snd_usb_substream *subs,
@@ -423,6 +430,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
if (err < 0)
return err;
+ /* endpoint set by quirk */
+ if (err > 0)
+ return 0;
+
if (altsd->bNumEndpoints < 2)
return 0;
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 0ac89e294d31..ef9190530fd2 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -61,11 +61,10 @@ static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_
void snd_usb_audio_create_proc(struct snd_usb_audio *chip)
{
- struct snd_info_entry *entry;
- if (!snd_card_proc_new(chip->card, "usbbus", &entry))
- snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read);
- if (!snd_card_proc_new(chip->card, "usbid", &entry))
- snd_info_set_text_ops(entry, chip, proc_audio_usbid_read);
+ snd_card_ro_proc_new(chip->card, "usbbus", chip,
+ proc_audio_usbbus_read);
+ snd_card_ro_proc_new(chip->card, "usbid", chip,
+ proc_audio_usbid_read);
}
/*
@@ -110,6 +109,7 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
if (subs->speed != USB_SPEED_FULL)
snd_iprintf(buffer, " Data packet interval: %d us\n",
125 * (1 << fp->datainterval));
+ snd_iprintf(buffer, " Bits: %d\n", fp->fmt_bits);
// snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
// snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes);
}
@@ -167,12 +167,10 @@ static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_b
void snd_usb_proc_pcm_format_add(struct snd_usb_stream *stream)
{
- struct snd_info_entry *entry;
char name[32];
struct snd_card *card = stream->chip->card;
sprintf(name, "stream%d", stream->pcm_index);
- if (!snd_card_proc_new(card, name, &entry))
- snd_info_set_text_ops(entry, stream, proc_pcm_format_read);
+ snd_card_ro_proc_new(card, name, stream, proc_pcm_format_read);
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 37fc0447c071..86e80916a029 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
}
},
+ {
+ .ifnum = -1
+ },
}
}
},
@@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
}
},
+ {
+ .ifnum = -1
+ },
}
}
},
@@ -3392,5 +3398,70 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.ifnum = QUIRK_NO_INTERFACE
}
},
+/* MOTU Microbook II */
+{
+ USB_DEVICE(0x07fd, 0x0004),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "MOTU",
+ .product_name = "MicroBookII",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 6,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x84,
+ .rates = SNDRV_PCM_RATE_96000,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rate_min = 96000,
+ .rate_max = 96000,
+ .nr_rates = 1,
+ .maxpacksize = 0x00d8,
+ .rate_table = (unsigned int[]) {
+ 96000
+ }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 8,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x03,
+ .rates = SNDRV_PCM_RATE_96000,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rate_min = 96000,
+ .rate_max = 96000,
+ .nr_rates = 1,
+ .maxpacksize = 0x0120,
+ .rate_table = (unsigned int[]) {
+ 96000
+ }
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 96340f23f86d..e6ce1bbe6ca6 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -768,7 +768,7 @@ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
* REG1: PLL binary search enable, soft mute enable.
*/
CM6206_REG1_PLLBIN_EN |
- CM6206_REG1_SOFT_MUTE_EN |
+ CM6206_REG1_SOFT_MUTE_EN,
/*
* REG2: enable output drivers,
* select front channels to the headphone output,
@@ -1000,6 +1000,105 @@ static int snd_usb_axefx3_boot_quirk(struct usb_device *dev)
return 0;
}
+
+#define MICROBOOK_BUF_SIZE 128
+
+static int snd_usb_motu_microbookii_communicate(struct usb_device *dev, u8 *buf,
+ int buf_size, int *length)
+{
+ int err, actual_length;
+
+ err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x01), buf, *length,
+ &actual_length, 1000);
+ if (err < 0)
+ return err;
+
+ print_hex_dump(KERN_DEBUG, "MicroBookII snd: ", DUMP_PREFIX_NONE, 16, 1,
+ buf, actual_length, false);
+
+ memset(buf, 0, buf_size);
+
+ err = usb_interrupt_msg(dev, usb_rcvintpipe(dev, 0x82), buf, buf_size,
+ &actual_length, 1000);
+ if (err < 0)
+ return err;
+
+ print_hex_dump(KERN_DEBUG, "MicroBookII rcv: ", DUMP_PREFIX_NONE, 16, 1,
+ buf, actual_length, false);
+
+ *length = actual_length;
+ return 0;
+}
+
+static int snd_usb_motu_microbookii_boot_quirk(struct usb_device *dev)
+{
+ int err, actual_length, poll_attempts = 0;
+ static const u8 set_samplerate_seq[] = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0b, 0x14,
+ 0x00, 0x00, 0x00, 0x01 };
+ static const u8 poll_ready_seq[] = { 0x00, 0x04, 0x00, 0x00,
+ 0x00, 0x00, 0x0b, 0x18 };
+ u8 *buf = kzalloc(MICROBOOK_BUF_SIZE, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ dev_info(&dev->dev, "Waiting for MOTU Microbook II to boot up...\n");
+
+ /* First we tell the device which sample rate to use. */
+ memcpy(buf, set_samplerate_seq, sizeof(set_samplerate_seq));
+ actual_length = sizeof(set_samplerate_seq);
+ err = snd_usb_motu_microbookii_communicate(dev, buf, MICROBOOK_BUF_SIZE,
+ &actual_length);
+
+ if (err < 0) {
+ dev_err(&dev->dev,
+ "failed setting the sample rate for Motu MicroBook II: %d\n",
+ err);
+ goto free_buf;
+ }
+
+ /* Then we poll every 100 ms until the device informs of its readiness. */
+ while (true) {
+ if (++poll_attempts > 100) {
+ dev_err(&dev->dev,
+ "failed booting Motu MicroBook II: timeout\n");
+ err = -ENODEV;
+ goto free_buf;
+ }
+
+ memset(buf, 0, MICROBOOK_BUF_SIZE);
+ memcpy(buf, poll_ready_seq, sizeof(poll_ready_seq));
+
+ actual_length = sizeof(poll_ready_seq);
+ err = snd_usb_motu_microbookii_communicate(
+ dev, buf, MICROBOOK_BUF_SIZE, &actual_length);
+ if (err < 0) {
+ dev_err(&dev->dev,
+ "failed booting Motu MicroBook II: communication error %d\n",
+ err);
+ goto free_buf;
+ }
+
+ /* the device signals its readiness through a message of the
+ * form
+ * XX 06 00 00 00 00 0b 18 00 00 00 01
+ * If the device is not yet ready to accept audio data, the
+ * last byte of that sequence is 00.
+ */
+ if (actual_length == 12 && buf[actual_length - 1] == 1)
+ break;
+
+ msleep(100);
+ }
+
+ dev_info(&dev->dev, "MOTU MicroBook II ready\n");
+
+free_buf:
+ kfree(buf);
+ return err;
+}
+
/*
* Setup quirks
*/
@@ -1177,6 +1276,8 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
return snd_usb_gamecon780_boot_quirk(dev);
case USB_ID(0x2466, 0x8010): /* Fractal Audio Axe-Fx 3 */
return snd_usb_axefx3_boot_quirk(dev);
+ case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */
+ return snd_usb_motu_microbookii_boot_quirk(dev);
}
return 0;
@@ -1479,10 +1580,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
/* XMOS based USB DACs */
switch (chip->usb_id) {
case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
- case USB_ID(0x20b1, 0x0002): /* Wyred 4 Sound DAC-2 DSD */
- case USB_ID(0x20b1, 0x2004): /* Matrix Audio X-SPDIF 2 */
- case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
- case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
case USB_ID(0x22d9, 0x0436): /* OPPO Sonica */
case USB_ID(0x22d9, 0x0461): /* OPPO UDP-205 */
@@ -1492,22 +1589,13 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
- case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
- case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
+ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
+ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
- case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
- case USB_ID(0x20b1, 0x2005): /* Denafrips Ares DAC */
- case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
- case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
- case USB_ID(0x20b1, 0x3021): /* Eastern El. MiniMax Tube DAC Supreme */
- case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
- case USB_ID(0x20b1, 0x302d): /* Unison Research Unico CD Due */
- case USB_ID(0x20b1, 0x307b): /* CH Precision C1 DAC */
- case USB_ID(0x20b1, 0x3086): /* Singxer F-1 converter board */
case USB_ID(0x22d9, 0x0426): /* OPPO HA-2 */
case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
@@ -1566,6 +1654,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case 0x20b1: /* XMOS based devices */
case 0x152a: /* Thesycon devices */
case 0x25ce: /* Mytek devices */
+ case 0x2ab6: /* T+A devices */
if (fp->dsd_raw)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 67cf849aa16b..d9e3de495c16 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
if (!csep || csep->bLength < 7 ||
- csep->bDescriptorSubtype != UAC_EP_GENERAL) {
- usb_audio_warn(chip,
- "%u:%d : no or invalid class specific endpoint descriptor\n",
- iface_no, altsd->bAlternateSetting);
- return 0;
- }
+ csep->bDescriptorSubtype != UAC_EP_GENERAL)
+ goto error;
if (protocol == UAC_VERSION_1) {
attributes = csep->bmAttributes;
@@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
struct uac2_iso_endpoint_descriptor *csep2 =
(struct uac2_iso_endpoint_descriptor *) csep;
+ if (csep2->bLength < sizeof(*csep2))
+ goto error;
attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
/* emulate the endpoint attributes of a v1 device */
@@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
struct uac3_iso_endpoint_descriptor *csep3 =
(struct uac3_iso_endpoint_descriptor *) csep;
+ if (csep3->bLength < sizeof(*csep3))
+ goto error;
/* emulate the endpoint attributes of a v1 device */
if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
}
return attributes;
+
+ error:
+ usb_audio_warn(chip,
+ "%u:%d : no or invalid class specific endpoint descriptor\n",
+ iface_no, altsd->bAlternateSetting);
+ return 0;
}
/* find an input terminal descriptor (either UAC1 or UAC2) with the given
@@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
*/
static void *
snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
- int terminal_id)
+ int terminal_id, bool uac23)
{
struct uac2_input_terminal_descriptor *term = NULL;
+ size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
+ sizeof(struct uac_input_terminal_descriptor);
while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
term, UAC_INPUT_TERMINAL))) {
+ if (term->bLength < minlen)
+ continue;
if (term->bTerminalID == terminal_id)
return term;
}
@@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface,
while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
term, UAC_OUTPUT_TERMINAL))) {
- if (term->bTerminalID == terminal_id)
+ if (term->bLength >= sizeof(*term) &&
+ term->bTerminalID == terminal_id)
return term;
}
@@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
format = le16_to_cpu(as->wFormatTag); /* remember the format value */
iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
- as->bTerminalLink);
+ as->bTerminalLink,
+ false);
if (iterm) {
num_channels = iterm->bNrChannels;
chconfig = le16_to_cpu(iterm->wChannelConfig);
@@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
* to extract the clock
*/
input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
- as->bTerminalLink);
+ as->bTerminalLink,
+ true);
if (input_term) {
clock = input_term->bCSourceID;
if (!chconfig && (num_channels == input_term->bNrChannels))
@@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
* to extract the clock
*/
input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
- as->bTerminalLink);
+ as->bTerminalLink,
+ true);
if (input_term) {
clock = input_term->bCSourceID;
goto found_clock;
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 2b833054e3b0..58974d094b27 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -981,18 +981,17 @@ static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint,
sprintf(pcm->name, NAME_ALLCAPS" Audio #%d", usX2Y(card)->pcm_devs);
- if ((playback_endpoint &&
- 0 > (err = snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 64*1024, 128*1024))) ||
- 0 > (err = snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 64*1024, 128*1024))) {
- snd_usX2Y_pcm_private_free(pcm);
- return err;
+ if (playback_endpoint) {
+ snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ 64*1024, 128*1024);
}
+
+ snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ 64*1024, 128*1024);
usX2Y(card)->pcm_devs++;
return 0;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 4fd9276b8e50..714cf50d4a4c 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -736,17 +736,14 @@ int usX2Y_hwdep_pcm_new(struct snd_card *card)
pcm->info_flags = 0;
sprintf(pcm->name, NAME_ALLCAPS" hwdep Audio");
- if (0 > (err = snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 64*1024, 128*1024)) ||
- 0 > (err = snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 64*1024, 128*1024))) {
- return err;
- }
-
+ snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ 64*1024, 128*1024);
+ snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ 64*1024, 128*1024);
return 0;
}
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 00c92eb854ce..80f79ecffc71 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1651,18 +1651,6 @@ static int had_create_jack(struct snd_intelhad *ctx,
static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
{
struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
- int port;
-
- for_each_port(card_ctx, port) {
- struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
- struct snd_pcm_substream *substream;
-
- substream = had_substream_get(ctx);
- if (substream) {
- snd_pcm_suspend(substream);
- had_substream_put(ctx);
- }
- }
snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
@@ -1824,7 +1812,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
* try to allocate 600k buffer as default which is large enough
*/
snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV_UC, NULL,
+ SNDRV_DMA_TYPE_DEV_UC,
+ card->dev,
HAD_DEFAULT_BUFFER, HAD_MAX_BUFFER);
/* create controls */
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
+ PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h
deleted file mode 100644
index 985534d0b448..000000000000
--- a/tools/arch/powerpc/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * This file contains the system call numbers.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _UAPI_ASM_POWERPC_UNISTD_H_
-#define _UAPI_ASM_POWERPC_UNISTD_H_
-
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_query_module 166
-#define __NR_poll 167
-#define __NR_nfsservctl 168
-#define __NR_setresgid 169
-#define __NR_getresgid 170
-#define __NR_prctl 171
-#define __NR_rt_sigreturn 172
-#define __NR_rt_sigaction 173
-#define __NR_rt_sigprocmask 174
-#define __NR_rt_sigpending 175
-#define __NR_rt_sigtimedwait 176
-#define __NR_rt_sigqueueinfo 177
-#define __NR_rt_sigsuspend 178
-#define __NR_pread64 179
-#define __NR_pwrite64 180
-#define __NR_chown 181
-#define __NR_getcwd 182
-#define __NR_capget 183
-#define __NR_capset 184
-#define __NR_sigaltstack 185
-#define __NR_sendfile 186
-#define __NR_getpmsg 187 /* some people actually want streams */
-#define __NR_putpmsg 188 /* some people actually want streams */
-#define __NR_vfork 189
-#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
-#define __NR_readahead 191
-#ifndef __powerpc64__ /* these are 32-bit only */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#endif
-#define __NR_pciconfig_read 198
-#define __NR_pciconfig_write 199
-#define __NR_pciconfig_iobase 200
-#define __NR_multiplexer 201
-#define __NR_getdents64 202
-#define __NR_pivot_root 203
-#ifndef __powerpc64__
-#define __NR_fcntl64 204
-#endif
-#define __NR_madvise 205
-#define __NR_mincore 206
-#define __NR_gettid 207
-#define __NR_tkill 208
-#define __NR_setxattr 209
-#define __NR_lsetxattr 210
-#define __NR_fsetxattr 211
-#define __NR_getxattr 212
-#define __NR_lgetxattr 213
-#define __NR_fgetxattr 214
-#define __NR_listxattr 215
-#define __NR_llistxattr 216
-#define __NR_flistxattr 217
-#define __NR_removexattr 218
-#define __NR_lremovexattr 219
-#define __NR_fremovexattr 220
-#define __NR_futex 221
-#define __NR_sched_setaffinity 222
-#define __NR_sched_getaffinity 223
-/* 224 currently unused */
-#define __NR_tuxcall 225
-#ifndef __powerpc64__
-#define __NR_sendfile64 226
-#endif
-#define __NR_io_setup 227
-#define __NR_io_destroy 228
-#define __NR_io_getevents 229
-#define __NR_io_submit 230
-#define __NR_io_cancel 231
-#define __NR_set_tid_address 232
-#define __NR_fadvise64 233
-#define __NR_exit_group 234
-#define __NR_lookup_dcookie 235
-#define __NR_epoll_create 236
-#define __NR_epoll_ctl 237
-#define __NR_epoll_wait 238
-#define __NR_remap_file_pages 239
-#define __NR_timer_create 240
-#define __NR_timer_settime 241
-#define __NR_timer_gettime 242
-#define __NR_timer_getoverrun 243
-#define __NR_timer_delete 244
-#define __NR_clock_settime 245
-#define __NR_clock_gettime 246
-#define __NR_clock_getres 247
-#define __NR_clock_nanosleep 248
-#define __NR_swapcontext 249
-#define __NR_tgkill 250
-#define __NR_utimes 251
-#define __NR_statfs64 252
-#define __NR_fstatfs64 253
-#ifndef __powerpc64__
-#define __NR_fadvise64_64 254
-#endif
-#define __NR_rtas 255
-#define __NR_sys_debug_setcontext 256
-/* Number 257 is reserved for vserver */
-#define __NR_migrate_pages 258
-#define __NR_mbind 259
-#define __NR_get_mempolicy 260
-#define __NR_set_mempolicy 261
-#define __NR_mq_open 262
-#define __NR_mq_unlink 263
-#define __NR_mq_timedsend 264
-#define __NR_mq_timedreceive 265
-#define __NR_mq_notify 266
-#define __NR_mq_getsetattr 267
-#define __NR_kexec_load 268
-#define __NR_add_key 269
-#define __NR_request_key 270
-#define __NR_keyctl 271
-#define __NR_waitid 272
-#define __NR_ioprio_set 273
-#define __NR_ioprio_get 274
-#define __NR_inotify_init 275
-#define __NR_inotify_add_watch 276
-#define __NR_inotify_rm_watch 277
-#define __NR_spu_run 278
-#define __NR_spu_create 279
-#define __NR_pselect6 280
-#define __NR_ppoll 281
-#define __NR_unshare 282
-#define __NR_splice 283
-#define __NR_tee 284
-#define __NR_vmsplice 285
-#define __NR_openat 286
-#define __NR_mkdirat 287
-#define __NR_mknodat 288
-#define __NR_fchownat 289
-#define __NR_futimesat 290
-#ifdef __powerpc64__
-#define __NR_newfstatat 291
-#else
-#define __NR_fstatat64 291
-#endif
-#define __NR_unlinkat 292
-#define __NR_renameat 293
-#define __NR_linkat 294
-#define __NR_symlinkat 295
-#define __NR_readlinkat 296
-#define __NR_fchmodat 297
-#define __NR_faccessat 298
-#define __NR_get_robust_list 299
-#define __NR_set_robust_list 300
-#define __NR_move_pages 301
-#define __NR_getcpu 302
-#define __NR_epoll_pwait 303
-#define __NR_utimensat 304
-#define __NR_signalfd 305
-#define __NR_timerfd_create 306
-#define __NR_eventfd 307
-#define __NR_sync_file_range2 308
-#define __NR_fallocate 309
-#define __NR_subpage_prot 310
-#define __NR_timerfd_settime 311
-#define __NR_timerfd_gettime 312
-#define __NR_signalfd4 313
-#define __NR_eventfd2 314
-#define __NR_epoll_create1 315
-#define __NR_dup3 316
-#define __NR_pipe2 317
-#define __NR_inotify_init1 318
-#define __NR_perf_event_open 319
-#define __NR_preadv 320
-#define __NR_pwritev 321
-#define __NR_rt_tgsigqueueinfo 322
-#define __NR_fanotify_init 323
-#define __NR_fanotify_mark 324
-#define __NR_prlimit64 325
-#define __NR_socket 326
-#define __NR_bind 327
-#define __NR_connect 328
-#define __NR_listen 329
-#define __NR_accept 330
-#define __NR_getsockname 331
-#define __NR_getpeername 332
-#define __NR_socketpair 333
-#define __NR_send 334
-#define __NR_sendto 335
-#define __NR_recv 336
-#define __NR_recvfrom 337
-#define __NR_shutdown 338
-#define __NR_setsockopt 339
-#define __NR_getsockopt 340
-#define __NR_sendmsg 341
-#define __NR_recvmsg 342
-#define __NR_recvmmsg 343
-#define __NR_accept4 344
-#define __NR_name_to_handle_at 345
-#define __NR_open_by_handle_at 346
-#define __NR_clock_adjtime 347
-#define __NR_syncfs 348
-#define __NR_sendmmsg 349
-#define __NR_setns 350
-#define __NR_process_vm_readv 351
-#define __NR_process_vm_writev 352
-#define __NR_finit_module 353
-#define __NR_kcmp 354
-#define __NR_sched_setattr 355
-#define __NR_sched_getattr 356
-#define __NR_renameat2 357
-#define __NR_seccomp 358
-#define __NR_getrandom 359
-#define __NR_memfd_create 360
-#define __NR_bpf 361
-#define __NR_execveat 362
-#define __NR_switch_endian 363
-#define __NR_userfaultfd 364
-#define __NR_membarrier 365
-#define __NR_mlock2 378
-#define __NR_copy_file_range 379
-#define __NR_preadv2 380
-#define __NR_pwritev2 381
-#define __NR_kexec_file_load 382
-#define __NR_statx 383
-#define __NR_pkey_alloc 384
-#define __NR_pkey_free 385
-#define __NR_pkey_mprotect 386
-#define __NR_rseq 387
-#define __NR_io_pgetevents 388
-
-#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..0b3cb52fd29d
--- /dev/null
+++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 28c4a502b419..6d6122524711 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -281,9 +281,11 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 33833d1909af..a5ea841cc6d2 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif
+#ifdef CONFIG_X86_SMAP
+# define DISABLE_SMAP 0
+#else
+# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
+#endif
+
#ifdef CONFIG_X86_INTEL_UMIP
# define DISABLE_UMIP 0
#else
@@ -68,7 +74,7 @@
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
-#define DISABLED_MASK9 (DISABLE_MPX)
+#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index d07ccf8a23f7..9bb9ace54ba8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -17,8 +17,8 @@ SYNOPSIS
*COMMANDS* :=
{ **show** | **list** | **tree** | **attach** | **detach** | **help** }
-MAP COMMANDS
-=============
+CGROUP COMMANDS
+===============
| **bpftool** **cgroup { show | list }** *CGROUP*
| **bpftool** **cgroup tree** [*CGROUP_ROOT*]
@@ -142,5 +142,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
new file mode 100644
index 000000000000..82de03dd8f52
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -0,0 +1,85 @@
+===============
+bpftool-feature
+===============
+-------------------------------------------------------------------------------
+tool for inspection of eBPF-related parameters for Linux kernel or net device
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **feature** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* := { **probe** | **help** }
+
+FEATURE COMMANDS
+================
+
+| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature help**
+|
+| *COMPONENT* := { **kernel** | **dev** *NAME* }
+
+DESCRIPTION
+===========
+ **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]]
+ Probe the running kernel and dump a number of eBPF-related
+ parameters, such as availability of the **bpf()** system call,
+ JIT status, eBPF program types availability, eBPF helper
+ functions availability, and more.
+
+ If the **macros** keyword (but not the **-j** option) is
+ passed, a subset of the output is dumped as a list of
+ **#define** macros that are ready to be included in a C
+ header file, for example. If, additionally, **prefix** is
+ used to define a *PREFIX*, the provided string will be used
+ as a prefix to the names of the macros: this can be used to
+ avoid conflicts on macro names when including the output of
+ this command as a header file.
+
+ Keyword **kernel** can be omitted. If no probe target is
+ specified, probing the kernel is the default behaviour.
+
+ Note that when probed, some eBPF helpers (e.g.
+ **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may
+ print warnings to kernel logs.
+
+ **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]]
+ Probe network device for supported eBPF features and dump
+ results to the console.
+
+ The two keywords **macros** and **prefix** have the same
+ role as when probing the kernel.
+
+ **bpftool feature help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -v, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+SEE ALSO
+========
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 64b001b4f777..5c984ffc9f01 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -25,12 +25,17 @@ MAP COMMANDS
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
-| **bpftool** **map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
-| **bpftool** **map lookup** *MAP* **key** *DATA*
+| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
+| **bpftool** **map lookup** *MAP* [**key** *DATA*]
| **bpftool** **map getnext** *MAP* [**key** *DATA*]
| **bpftool** **map delete** *MAP* **key** *DATA*
| **bpftool** **map pin** *MAP* *FILE*
| **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
+| **bpftool** **map peek** *MAP*
+| **bpftool** **map push** *MAP* **value** *VALUE*
+| **bpftool** **map pop** *MAP*
+| **bpftool** **map enqueue** *MAP* **value** *VALUE*
+| **bpftool** **map dequeue** *MAP*
| **bpftool** **map help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -62,7 +67,7 @@ DESCRIPTION
**bpftool map dump** *MAP*
Dump all entries in a given *MAP*.
- **bpftool map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
+ **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
Update map entry for a given *KEY*.
*UPDATE_FLAGS* can be one of: **any** update existing entry
@@ -75,7 +80,7 @@ DESCRIPTION
the bytes are parsed as decimal values, unless a "0x" prefix
(for hexadecimal) or a "0" prefix (for octal) is provided.
- **bpftool map lookup** *MAP* **key** *DATA*
+ **bpftool map lookup** *MAP* [**key** *DATA*]
Lookup **key** in the map.
**bpftool map getnext** *MAP* [**key** *DATA*]
@@ -107,6 +112,21 @@ DESCRIPTION
replace any existing ring. Any other application will stop
receiving events if it installed its rings earlier.
+ **bpftool map peek** *MAP*
+ Peek next **value** in the queue or stack.
+
+ **bpftool map push** *MAP* **value** *VALUE*
+ Push **value** onto the stack.
+
+ **bpftool map pop** *MAP*
+ Pop and print **value** from the stack.
+
+ **bpftool map enqueue** *MAP* **value** *VALUE*
+ Enqueue **value** into the queue.
+
+ **bpftool map dequeue** *MAP*
+ Dequeue and print **value** from the queue.
+
**bpftool map help**
Print short help message.
@@ -236,5 +256,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index ed87c9b619ad..779dab3650ee 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -142,4 +142,5 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index f4c5e5538bb8..bca5590a80d0 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -84,4 +84,5 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 58c8369b77dd..9386bd6e0396 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -18,7 +18,7 @@ SYNOPSIS
{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
| **loadall** | **help** }
-MAP COMMANDS
+PROG COMMANDS
=============
| **bpftool** **prog { show | list }** [*PROG*]
@@ -42,7 +42,7 @@ MAP COMMANDS
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
| }
| *ATTACH_TYPE* := {
-| **msg_verdict** | **skb_verdict** | **skb_parse** | **flow_dissector**
+| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
| }
@@ -171,7 +171,7 @@ EXAMPLES
::
- 10: xdp name some_prog tag 005a3d2123620c8b gpl
+ 10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10
loaded_at 2017-09-29T20:11:00+0000 uid 0
xlated 528B jited 370B memlock 4096B map_ids 10
@@ -184,6 +184,8 @@ EXAMPLES
"type": "xdp",
"tag": "005a3d2123620c8b",
"gpl_compatible": true,
+ "run_time_ns": 81632,
+ "run_cnt": 10,
"loaded_at": 1506715860,
"uid": 0,
"bytes_xlated": 528,
@@ -258,5 +260,6 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index e1677e81ed59..4f2188845dd8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,7 +16,7 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** }
+ *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
@@ -34,6 +34,8 @@ SYNOPSIS
*NET-COMMANDS* := { **show** | **list** | **help** }
+ *FEATURE-COMMANDS* := { **probe** | **help** }
+
DESCRIPTION
===========
*bpftool* allows for inspection and simple modification of BPF objects
@@ -72,5 +74,6 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
**bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 492f0f24e2d3..4ad1f0894d53 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c
SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
ifeq ($(feature-libbfd),1)
+ LIBS += -lbfd -ldl -lopcodes
+else ifeq ($(feature-libbfd-liberty),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty
+else ifeq ($(feature-libbfd-liberty-z),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty -lz
+endif
+
+ifneq ($(filter -lbfd,$(LIBS)),)
CFLAGS += -DHAVE_LIBBFD_SUPPORT
SRCS += $(BFD_SRCS)
-LIBS += -lbfd -lopcodes
endif
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index e4e4fab1b8c7..b803827d01e8 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -50,14 +50,15 @@ _bpftool_get_map_ids()
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-_bpftool_get_perf_map_ids()
+# Takes map type and adds matching map ids to the list of suggestions.
+_bpftool_get_map_ids_for_type()
{
+ local type="$1"
COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
- command grep -C2 perf_event_array | \
+ command grep -C2 "$type" | \
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-
_bpftool_get_prog_ids()
{
COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
@@ -99,15 +100,25 @@ _sysfs_get_netdevs()
"$cur" ) )
}
-# For bpftool map update: retrieve type of the map to update.
-_bpftool_map_update_map_type()
+# Retrieve type of the map that we are operating on.
+_bpftool_map_guess_map_type()
{
local keyword ref
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
- if [[ ${words[$((idx-2))]} == "update" ]]; then
- keyword=${words[$((idx-1))]}
- ref=${words[$((idx))]}
- fi
+ case "${words[$((idx-2))]}" in
+ lookup|update)
+ keyword=${words[$((idx-1))]}
+ ref=${words[$((idx))]}
+ ;;
+ push)
+ printf "stack"
+ return 0
+ ;;
+ enqueue)
+ printf "queue"
+ return 0
+ ;;
+ esac
done
[[ -z $ref ]] && return 0
@@ -119,6 +130,8 @@ _bpftool_map_update_map_type()
_bpftool_map_update_get_id()
{
+ local command="$1"
+
# Is it the map to update, or a map to insert into the map to update?
# Search for "value" keyword.
local idx value
@@ -128,11 +141,24 @@ _bpftool_map_update_get_id()
break
fi
done
- [[ $value -eq 0 ]] && _bpftool_get_map_ids && return 0
+ if [[ $value -eq 0 ]]; then
+ case "$command" in
+ push)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ enqueue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
+ return 0
+ fi
# Id to complete is for a value. It can be either prog id or map id. This
# depends on the type of the map to update.
- local type=$(_bpftool_map_update_map_type)
+ local type=$(_bpftool_map_guess_map_type)
case $type in
array_of_maps|hash_of_maps)
_bpftool_get_map_ids
@@ -285,8 +311,8 @@ _bpftool()
return 0
;;
5)
- COMPREPLY=( $( compgen -W 'msg_verdict skb_verdict \
- skb_parse flow_dissector' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'msg_verdict stream_verdict \
+ stream_parser flow_dissector' -- "$cur" ) )
return 0
;;
6)
@@ -382,14 +408,28 @@ _bpftool()
map)
local MAP_TYPE='id pinned'
case $command in
- show|list|dump)
+ show|list|dump|peek|pop|dequeue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_get_map_ids
+ case "$command" in
+ peek)
+ _bpftool_get_map_ids_for_type stack
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ pop)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ dequeue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
return 0
;;
*)
@@ -447,19 +487,25 @@ _bpftool()
COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) )
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ return 0
+ ;;
+ esac
+
_bpftool_once_attr 'key'
return 0
;;
esac
;;
- update)
+ update|push|enqueue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_map_update_get_id
+ _bpftool_map_update_get_id $command
return 0
;;
key)
@@ -468,7 +514,7 @@ _bpftool()
value)
# We can have bytes, or references to a prog or a
# map, depending on the type of the map to update.
- case $(_bpftool_map_update_map_type) in
+ case "$(_bpftool_map_guess_map_type)" in
array_of_maps|hash_of_maps)
local MAP_TYPE='id pinned'
COMPREPLY+=( $( compgen -W "$MAP_TYPE" \
@@ -490,6 +536,13 @@ _bpftool()
return 0
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ _bpftool_once_attr 'value'
+ return 0;
+ ;;
+ esac
+
_bpftool_once_attr 'key'
local UPDATE_FLAGS='any exist noexist'
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
@@ -508,6 +561,7 @@ _bpftool()
return 0
fi
done
+
return 0
;;
esac
@@ -527,7 +581,7 @@ _bpftool()
return 0
;;
id)
- _bpftool_get_perf_map_ids
+ _bpftool_get_map_ids_for_type perf_event_array
return 0
;;
cpu)
@@ -546,7 +600,8 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'delete dump getnext help \
- lookup pin event_pipe show list update create' -- \
+ lookup pin event_pipe show list update create \
+ peek push enqueue pop dequeue' -- \
"$cur" ) )
;;
esac
@@ -624,6 +679,25 @@ _bpftool()
;;
esac
;;
+ feature)
+ case $command in
+ probe)
+ [[ $prev == "dev" ]] && _sysfs_get_netdevs && return 0
+ [[ $prev == "prefix" ]] && return 0
+ if _bpftool_search_list 'macros'; then
+ COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+ else
+ COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
+ fi
+ _bpftool_one_of_list 'kernel dev'
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) )
+ ;;
+ esac
+ ;;
esac
} &&
complete -F _bpftool bpftool
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 3f0629edbca5..e63bce0755eb 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -73,37 +73,104 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
return ret;
}
+static void btf_int128_print(json_writer_t *jw, const void *data,
+ bool is_plain_text)
+{
+ /* data points to a __int128 number.
+ * Suppose
+ * int128_num = *(__int128 *)data;
+ * The below formulas shows what upper_num and lower_num represents:
+ * upper_num = int128_num >> 64;
+ * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
+ */
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = *(__u64 *)data;
+ lower_num = *(__u64 *)(data + 8);
+#else
+ upper_num = *(__u64 *)(data + 8);
+ lower_num = *(__u64 *)data;
+#endif
+
+ if (is_plain_text) {
+ if (upper_num == 0)
+ jsonw_printf(jw, "0x%llx", lower_num);
+ else
+ jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num);
+ } else {
+ if (upper_num == 0)
+ jsonw_printf(jw, "\"0x%llx\"", lower_num);
+ else
+ jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num);
+ }
+}
+
+static void btf_int128_shift(__u64 *print_num, u16 left_shift_bits,
+ u16 right_shift_bits)
+{
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = print_num[0];
+ lower_num = print_num[1];
+#else
+ upper_num = print_num[1];
+ lower_num = print_num[0];
+#endif
+
+ /* shake out un-needed bits by shift/or operations */
+ if (left_shift_bits >= 64) {
+ upper_num = lower_num << (left_shift_bits - 64);
+ lower_num = 0;
+ } else {
+ upper_num = (upper_num << left_shift_bits) |
+ (lower_num >> (64 - left_shift_bits));
+ lower_num = lower_num << left_shift_bits;
+ }
+
+ if (right_shift_bits >= 64) {
+ lower_num = upper_num >> (right_shift_bits - 64);
+ upper_num = 0;
+ } else {
+ lower_num = (lower_num >> right_shift_bits) |
+ (upper_num << (64 - right_shift_bits));
+ upper_num = upper_num >> right_shift_bits;
+ }
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ print_num[0] = upper_num;
+ print_num[1] = lower_num;
+#else
+ print_num[0] = lower_num;
+ print_num[1] = upper_num;
+#endif
+}
+
static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
+ __u64 print_num[2] = {};
int bytes_to_copy;
int bits_to_copy;
- __u64 print_num;
- data += BITS_ROUNDDOWN_BYTES(bit_offset);
- bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
- print_num = 0;
- memcpy(&print_num, data, bytes_to_copy);
+ memcpy(print_num, data, bytes_to_copy);
#if defined(__BIG_ENDIAN_BITFIELD)
left_shift_bits = bit_offset;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- left_shift_bits = 64 - bits_to_copy;
+ left_shift_bits = 128 - bits_to_copy;
#else
#error neither big nor little endian
#endif
- right_shift_bits = 64 - nr_bits;
+ right_shift_bits = 128 - nr_bits;
- print_num <<= left_shift_bits;
- print_num >>= right_shift_bits;
- if (is_plain_text)
- jsonw_printf(jw, "0x%llx", print_num);
- else
- jsonw_printf(jw, "%llu", print_num);
+ btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
+ btf_int128_print(jw, print_num, is_plain_text);
}
@@ -115,10 +182,12 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
int total_bits_offset;
/* bits_offset is at most 7.
- * BTF_INT_OFFSET() cannot exceed 64 bits.
+ * BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
- btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw,
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
is_plain_text);
}
@@ -139,6 +208,11 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
return 0;
}
+ if (nr_bits == 128) {
+ btf_int128_print(jw, data, is_plain_text);
+ return 0;
+ }
+
switch (BTF_INT_ENCODING(*int_type)) {
case 0:
if (BTF_INT_BITS(*int_type) == 64)
@@ -216,11 +290,12 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
}
jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
+ data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
if (bitfield_size) {
- btf_dumper_bitfield(bitfield_size, bit_offset,
- data, d->jw, d->is_plain_text);
+ btf_dumper_bitfield(bitfield_size,
+ BITS_PER_BYTE_MASKED(bit_offset),
+ data_off, d->jw, d->is_plain_text);
} else {
- data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
ret = btf_dumper_do_type(d, m[i].type,
BITS_PER_BYTE_MASKED(bit_offset),
data_off);
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index 31f0db41513f..3e21f994f262 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -157,6 +157,11 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
return false;
}
+static bool is_jmp_insn(u8 code)
+{
+ return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
+}
+
static bool func_partition_bb_head(struct func_node *func)
{
struct bpf_insn *cur, *end;
@@ -170,7 +175,7 @@ static bool func_partition_bb_head(struct func_node *func)
return true;
for (; cur <= end; cur++) {
- if (BPF_CLASS(cur->code) == BPF_JMP) {
+ if (is_jmp_insn(cur->code)) {
u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL)
@@ -296,7 +301,7 @@ static bool func_add_bb_edges(struct func_node *func)
e->src = bb;
insn = bb->tail;
- if (BPF_CLASS(insn->code) != BPF_JMP ||
+ if (!is_jmp_insn(insn->code) ||
BPF_OP(insn->code) == BPF_EXIT) {
e->dst = bb_next(bb);
e->flags |= EDGE_FLAG_FALLTHROUGH;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 897483457bf0..f7261fad45c1 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key)
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
fdi = fopen(path, "r");
- if (!fdi) {
- p_err("can't open fdinfo: %s", strerror(errno));
+ if (!fdi)
return NULL;
- }
while ((n = getline(&line, &line_n, fdi)) > 0) {
char *value;
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key)
value = strchr(line, '\t');
if (!value || !value[1]) {
- p_err("malformed fdinfo!?");
free(line);
return NULL;
}
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key)
return line;
}
- p_err("key '%s' not found in fdinfo", key);
free(line);
fclose(fdi);
return NULL;
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
new file mode 100644
index 000000000000..d672d9086fff
--- /dev/null
+++ b/tools/bpf/bpftool/feature.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+#include <sys/vfs.h>
+
+#include <linux/filter.h>
+#include <linux/limits.h>
+
+#include <bpf.h>
+#include <libbpf.h>
+
+#include "main.h"
+
+#ifndef PROC_SUPER_MAGIC
+# define PROC_SUPER_MAGIC 0x9fa0
+#endif
+
+enum probe_component {
+ COMPONENT_UNSPEC,
+ COMPONENT_KERNEL,
+ COMPONENT_DEVICE,
+};
+
+#define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name
+static const char * const helper_name[] = {
+ __BPF_FUNC_MAPPER(BPF_HELPER_MAKE_ENTRY)
+};
+
+#undef BPF_HELPER_MAKE_ENTRY
+
+/* Miscellaneous utility functions */
+
+static bool check_procfs(void)
+{
+ struct statfs st_fs;
+
+ if (statfs("/proc", &st_fs) < 0)
+ return false;
+ if ((unsigned long)st_fs.f_type != PROC_SUPER_MAGIC)
+ return false;
+
+ return true;
+}
+
+static void uppercase(char *str, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len && str[i] != '\0'; i++)
+ str[i] = toupper(str[i]);
+}
+
+/* Printing utility functions */
+
+static void
+print_bool_feature(const char *feat_name, const char *plain_name,
+ const char *define_name, bool res, const char *define_prefix)
+{
+ if (json_output)
+ jsonw_bool_field(json_wtr, feat_name, res);
+ else if (define_prefix)
+ printf("#define %s%sHAVE_%s\n", define_prefix,
+ res ? "" : "NO_", define_name);
+ else
+ printf("%s is %savailable\n", plain_name, res ? "" : "NOT ");
+}
+
+static void print_kernel_option(const char *name, const char *value)
+{
+ char *endptr;
+ int res;
+
+ /* No support for C-style ouptut */
+
+ if (json_output) {
+ if (!value) {
+ jsonw_null_field(json_wtr, name);
+ return;
+ }
+ errno = 0;
+ res = strtol(value, &endptr, 0);
+ if (!errno && *endptr == '\n')
+ jsonw_int_field(json_wtr, name, res);
+ else
+ jsonw_string_field(json_wtr, name, value);
+ } else {
+ if (value)
+ printf("%s is set to %s\n", name, value);
+ else
+ printf("%s is not set\n", name);
+ }
+}
+
+static void
+print_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment, const char *define_prefix)
+{
+ if (json_output) {
+ jsonw_name(json_wtr, json_title);
+ jsonw_start_object(json_wtr);
+ } else if (define_prefix) {
+ printf("%s\n", define_comment);
+ } else {
+ printf("%s\n", plain_title);
+ }
+}
+
+static void
+print_end_then_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment,
+ const char *define_prefix)
+{
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+
+ print_start_section(json_title, plain_title, define_comment,
+ define_prefix);
+}
+
+/* Probing functions */
+
+static int read_procfs(const char *path)
+{
+ char *endptr, *line = NULL;
+ size_t len = 0;
+ FILE *fd;
+ int res;
+
+ fd = fopen(path, "r");
+ if (!fd)
+ return -1;
+
+ res = getline(&line, &len, fd);
+ fclose(fd);
+ if (res < 0)
+ return -1;
+
+ errno = 0;
+ res = strtol(line, &endptr, 10);
+ if (errno || *line == '\0' || *endptr != '\n')
+ res = -1;
+ free(line);
+
+ return res;
+}
+
+static void probe_unprivileged_disabled(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("bpf() syscall for unprivileged users is enabled\n");
+ break;
+ case 1:
+ printf("bpf() syscall restricted to privileged users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve required privileges for bpf() syscall\n");
+ break;
+ default:
+ printf("bpf() syscall restriction has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_enable(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_enable");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_enable", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler is enabled\n");
+ break;
+ case 2:
+ printf("JIT compiler is enabled with debugging traces in kernel logs\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT-compiler status\n");
+ break;
+ default:
+ printf("JIT-compiler status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_harden(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_harden");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_harden", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler hardening is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler hardening is enabled for unprivileged users\n");
+ break;
+ case 2:
+ printf("JIT compiler hardening is enabled for all users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT hardening status\n");
+ break;
+ default:
+ printf("JIT hardening status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_kallsyms(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler kallsyms exports are disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler kallsyms exports are enabled for root\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT kallsyms export status\n");
+ break;
+ default:
+ printf("JIT kallsyms exports status has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_limit(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_limit");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_limit", res);
+ } else {
+ switch (res) {
+ case -1:
+ printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
+ break;
+ default:
+ printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
+ }
+ }
+}
+
+static char *get_kernel_config_option(FILE *fd, const char *option)
+{
+ size_t line_n = 0, optlen = strlen(option);
+ char *res, *strval, *line = NULL;
+ ssize_t n;
+
+ rewind(fd);
+ while ((n = getline(&line, &line_n, fd)) > 0) {
+ if (strncmp(line, option, optlen))
+ continue;
+ /* Check we have at least '=', value, and '\n' */
+ if (strlen(line) < optlen + 3)
+ continue;
+ if (*(line + optlen) != '=')
+ continue;
+
+ /* Trim ending '\n' */
+ line[strlen(line) - 1] = '\0';
+
+ /* Copy and return config option value */
+ strval = line + optlen + 1;
+ res = strdup(strval);
+ free(line);
+ return res;
+ }
+ free(line);
+
+ return NULL;
+}
+
+static void probe_kernel_image_config(void)
+{
+ static const char * const options[] = {
+ /* Enable BPF */
+ "CONFIG_BPF",
+ /* Enable bpf() syscall */
+ "CONFIG_BPF_SYSCALL",
+ /* Does selected architecture support eBPF JIT compiler */
+ "CONFIG_HAVE_EBPF_JIT",
+ /* Compile eBPF JIT compiler */
+ "CONFIG_BPF_JIT",
+ /* Avoid compiling eBPF interpreter (use JIT only) */
+ "CONFIG_BPF_JIT_ALWAYS_ON",
+
+ /* cgroups */
+ "CONFIG_CGROUPS",
+ /* BPF programs attached to cgroups */
+ "CONFIG_CGROUP_BPF",
+ /* bpf_get_cgroup_classid() helper */
+ "CONFIG_CGROUP_NET_CLASSID",
+ /* bpf_skb_{,ancestor_}cgroup_id() helpers */
+ "CONFIG_SOCK_CGROUP_DATA",
+
+ /* Tracing: attach BPF to kprobes, tracepoints, etc. */
+ "CONFIG_BPF_EVENTS",
+ /* Kprobes */
+ "CONFIG_KPROBE_EVENTS",
+ /* Uprobes */
+ "CONFIG_UPROBE_EVENTS",
+ /* Tracepoints */
+ "CONFIG_TRACING",
+ /* Syscall tracepoints */
+ "CONFIG_FTRACE_SYSCALLS",
+ /* bpf_override_return() helper support for selected arch */
+ "CONFIG_FUNCTION_ERROR_INJECTION",
+ /* bpf_override_return() helper */
+ "CONFIG_BPF_KPROBE_OVERRIDE",
+
+ /* Network */
+ "CONFIG_NET",
+ /* AF_XDP sockets */
+ "CONFIG_XDP_SOCKETS",
+ /* BPF_PROG_TYPE_LWT_* and related helpers */
+ "CONFIG_LWTUNNEL_BPF",
+ /* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */
+ "CONFIG_NET_ACT_BPF",
+ /* BPF_PROG_TYPE_SCHED_CLS, TC filters */
+ "CONFIG_NET_CLS_BPF",
+ /* TC clsact qdisc */
+ "CONFIG_NET_CLS_ACT",
+ /* Ingress filtering with TC */
+ "CONFIG_NET_SCH_INGRESS",
+ /* bpf_skb_get_xfrm_state() helper */
+ "CONFIG_XFRM",
+ /* bpf_get_route_realm() helper */
+ "CONFIG_IP_ROUTE_CLASSID",
+ /* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */
+ "CONFIG_IPV6_SEG6_BPF",
+ /* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */
+ "CONFIG_BPF_LIRC_MODE2",
+ /* BPF stream parser and BPF socket maps */
+ "CONFIG_BPF_STREAM_PARSER",
+ /* xt_bpf module for passing BPF programs to netfilter */
+ "CONFIG_NETFILTER_XT_MATCH_BPF",
+ /* bpfilter back-end for iptables */
+ "CONFIG_BPFILTER",
+ /* bpftilter module with "user mode helper" */
+ "CONFIG_BPFILTER_UMH",
+
+ /* test_bpf module for BPF tests */
+ "CONFIG_TEST_BPF",
+ };
+ char *value, *buf = NULL;
+ struct utsname utsn;
+ char path[PATH_MAX];
+ size_t i, n;
+ ssize_t ret;
+ FILE *fd;
+
+ if (uname(&utsn))
+ goto no_config;
+
+ snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
+
+ fd = fopen(path, "r");
+ if (!fd && errno == ENOENT) {
+ /* Some distributions put the config file at /proc/config, give
+ * it a try.
+ * Sometimes it is also at /proc/config.gz but we do not try
+ * this one for now, it would require linking against libz.
+ */
+ fd = fopen("/proc/config", "r");
+ }
+ if (!fd) {
+ p_info("skipping kernel config, can't open file: %s",
+ strerror(errno));
+ goto no_config;
+ }
+ /* Sanity checks */
+ ret = getline(&buf, &n, fd);
+ ret = getline(&buf, &n, fd);
+ if (!buf || !ret) {
+ p_info("skipping kernel config, can't read from file: %s",
+ strerror(errno));
+ free(buf);
+ goto no_config;
+ }
+ if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
+ p_info("skipping kernel config, can't find correct file");
+ free(buf);
+ goto no_config;
+ }
+ free(buf);
+
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+ value = get_kernel_config_option(fd, options[i]);
+ print_kernel_option(options[i], value);
+ free(value);
+ }
+ fclose(fd);
+ return;
+
+no_config:
+ for (i = 0; i < ARRAY_SIZE(options); i++)
+ print_kernel_option(options[i], NULL);
+}
+
+static bool probe_bpf_syscall(const char *define_prefix)
+{
+ bool res;
+
+ bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
+ res = (errno != ENOSYS);
+
+ print_bool_feature("have_bpf_syscall",
+ "bpf() syscall",
+ "BPF_SYSCALL",
+ res, define_prefix);
+
+ return res;
+}
+
+static void
+probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
+ const char *define_prefix, __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF program_type ";
+ size_t maxlen;
+ bool res;
+
+ if (ifindex)
+ /* Only test offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ res = bpf_probe_prog_type(prog_type, ifindex);
+
+ supported_types[prog_type] |= res;
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(prog_type_name[prog_type]) > maxlen) {
+ p_info("program type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
+ sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
+ __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF map_type ";
+ size_t maxlen;
+ bool res;
+
+ res = bpf_probe_map_type(map_type, ifindex);
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(map_type_name[map_type]) > maxlen) {
+ p_info("map type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
+ sprintf(define_name, "%s_map_type", map_type_name[map_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, __u32 ifindex)
+{
+ const char *ptype_name = prog_type_name[prog_type];
+ char feat_name[128];
+ unsigned int id;
+ bool res;
+
+ if (ifindex)
+ /* Only test helpers for offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ if (json_output) {
+ sprintf(feat_name, "%s_available_helpers", ptype_name);
+ jsonw_name(json_wtr, feat_name);
+ jsonw_start_array(json_wtr);
+ } else if (!define_prefix) {
+ printf("eBPF helpers supported for program type %s:",
+ ptype_name);
+ }
+
+ for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
+ if (!supported_type)
+ res = false;
+ else
+ res = bpf_probe_helper(id, prog_type, ifindex);
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ else if (!define_prefix)
+ printf("\n");
+}
+
+static int do_probe(int argc, char **argv)
+{
+ enum probe_component target = COMPONENT_UNSPEC;
+ const char *define_prefix = NULL;
+ bool supported_types[128] = {};
+ __u32 ifindex = 0;
+ unsigned int i;
+ char *ifname;
+
+ /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
+ * Let's approximate, and restrict usage to root user only.
+ */
+ if (geteuid()) {
+ p_err("please run this command as root user");
+ return -1;
+ }
+
+ set_max_rlimit();
+
+ while (argc) {
+ if (is_prefix(*argv, "kernel")) {
+ if (target != COMPONENT_UNSPEC) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ target = COMPONENT_KERNEL;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (target != COMPONENT_UNSPEC || ifindex) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ if (!REQ_ARGS(1))
+ return -1;
+
+ target = COMPONENT_DEVICE;
+ ifname = GET_ARG();
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex) {
+ p_err("unrecognized netdevice '%s': %s", ifname,
+ strerror(errno));
+ return -1;
+ }
+ } else if (is_prefix(*argv, "macros") && !define_prefix) {
+ define_prefix = "";
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "prefix")) {
+ if (!define_prefix) {
+ p_err("'prefix' argument can only be use after 'macros'");
+ return -1;
+ }
+ if (strcmp(define_prefix, "")) {
+ p_err("'prefix' already defined");
+ return -1;
+ }
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ return -1;
+ define_prefix = GET_ARG();
+ } else {
+ p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+
+ if (json_output) {
+ define_prefix = NULL;
+ jsonw_start_object(json_wtr);
+ }
+
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ if (define_prefix)
+ break;
+
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ NULL, /* define_comment never used here */
+ NULL); /* define_prefix always NULL here */
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ probe_kernel_image_config();
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+ break;
+ default:
+ break;
+ }
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+
+ if (!probe_bpf_syscall(define_prefix))
+ /* bpf() syscall unavailable, don't probe other BPF features */
+ goto exit_close_json;
+
+ print_end_then_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_prog_type(i, supported_types, define_prefix, ifindex);
+
+ print_end_then_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i, define_prefix, ifindex);
+
+ print_end_then_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_helpers_for_progtype(i, supported_types[i],
+ define_prefix, ifindex);
+
+exit_close_json:
+ if (json_output) {
+ /* End current "section" of probes */
+ jsonw_end_object(json_wtr);
+ /* End root object */
+ jsonw_end_object(json_wtr);
+ }
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n"
+ " %s %s help\n"
+ "\n"
+ " COMPONENT := { kernel | dev NAME }\n"
+ "",
+ bin_name, argv[-2], bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "probe", do_probe },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_feature(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index bff7ee026680..6046dcab51cc 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -1,15 +1,10 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
/*
* Simple streaming JSON writer
*
* This takes care of the annoying bits of JSON syntax like the commas
* after elements
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Stephen Hemminger <stephen@networkplumber.org>
*/
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index c1ab51aed99c..cb9a1993681c 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -5,11 +5,6 @@
* This takes care of the annoying bits of JSON syntax like the commas
* after elements
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Stephen Hemminger <stephen@networkplumber.org>
*/
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index f44a1c2c4ea0..a9d5e9e6a732 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -56,7 +56,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf | net }\n"
+ " OBJECT := { prog | map | cgroup | perf | net | feature }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -187,6 +187,7 @@ static const struct cmd cmds[] = {
{ "cgroup", do_cgroup },
{ "perf", do_perf },
{ "net", do_net },
+ { "feature", do_feature },
{ "version", do_version },
{ 0 }
};
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 052c91d4dc55..d7dd84d3c660 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -75,6 +75,9 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
};
+extern const char * const map_type_name[];
+extern const size_t map_type_name_size;
+
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
@@ -145,6 +148,7 @@ int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int do_net(int argc, char **arg);
int do_tracelog(int argc, char **arg);
+int do_feature(int argc, char **argv);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 2037e3dc864b..e0c650d91784 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -21,7 +21,7 @@
#include "json_writer.h"
#include "main.h"
-static const char * const map_type_name[] = {
+const char * const map_type_name[] = {
[BPF_MAP_TYPE_UNSPEC] = "unspec",
[BPF_MAP_TYPE_HASH] = "hash",
[BPF_MAP_TYPE_ARRAY] = "array",
@@ -48,6 +48,8 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_STACK] = "stack",
};
+const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
+
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
@@ -285,16 +287,21 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
single_line = info->key_size + info->value_size <= 24 &&
!break_names;
- printf("key:%c", break_names ? '\n' : ' ');
- fprint_hex(stdout, key, info->key_size, " ");
+ if (info->key_size) {
+ printf("key:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, key, info->key_size, " ");
- printf(single_line ? " " : "\n");
+ printf(single_line ? " " : "\n");
+ }
- printf("value:%c", break_names ? '\n' : ' ');
- if (value)
- fprint_hex(stdout, value, info->value_size, " ");
- else
- printf("<no entry>");
+ if (info->value_size) {
+ printf("value:%c", break_names ? '\n' : ' ');
+ if (value)
+ fprint_hex(stdout, value, info->value_size,
+ " ");
+ else
+ printf("<no entry>");
+ }
printf("\n");
} else {
@@ -303,19 +310,23 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
n = get_possible_cpus();
step = round_up(info->value_size, 8);
- printf("key:\n");
- fprint_hex(stdout, key, info->key_size, " ");
- printf("\n");
- for (i = 0; i < n; i++) {
- printf("value (CPU %02d):%c",
- i, info->value_size > 16 ? '\n' : ' ');
- if (value)
- fprint_hex(stdout, value + i * step,
- info->value_size, " ");
- else
- printf("<no entry>");
+ if (info->key_size) {
+ printf("key:\n");
+ fprint_hex(stdout, key, info->key_size, " ");
printf("\n");
}
+ if (info->value_size) {
+ for (i = 0; i < n; i++) {
+ printf("value (CPU %02d):%c",
+ i, info->value_size > 16 ? '\n' : ' ');
+ if (value)
+ fprint_hex(stdout, value + i * step,
+ info->value_size, " ");
+ else
+ printf("<no entry>");
+ printf("\n");
+ }
+ }
}
}
@@ -347,6 +358,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
return argv + i;
}
+/* on per cpu maps we must copy the provided value on all value instances */
+static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
+{
+ unsigned int i, n, step;
+
+ if (!map_is_per_cpu(info->type))
+ return;
+
+ n = get_possible_cpus();
+ step = round_up(info->value_size, 8);
+ for (i = 1; i < n; i++)
+ memcpy(value + i * step, value, info->value_size);
+}
+
static int parse_elem(char **argv, struct bpf_map_info *info,
void *key, void *value, __u32 key_size, __u32 value_size,
__u32 *flags, __u32 **value_fd)
@@ -415,6 +440,9 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
p_err("not enough value arguments for map of progs");
return -1;
}
+ if (is_prefix(*argv, "id"))
+ p_info("Warning: updating program array via MAP_ID, make sure this map is kept open\n"
+ " by some process or pinned otherwise update will be lost");
fd = prog_parse_fd(&argc, &argv);
if (fd < 0)
@@ -426,6 +454,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
argv = parse_bytes(argv, "value", value, value_size);
if (!argv)
return -1;
+
+ fill_per_cpu_value(info, value);
}
return parse_elem(argv, info, key, NULL, key_size, value_size,
@@ -497,10 +527,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_uint_field(json_wtr, "owner_prog_type",
prog_type);
}
- if (atoi(owner_jited))
- jsonw_bool_field(json_wtr, "owner_jited", true);
- else
- jsonw_bool_field(json_wtr, "owner_jited", false);
+ if (owner_jited)
+ jsonw_bool_field(json_wtr, "owner_jited",
+ !!atoi(owner_jited));
free(owner_prog_type);
free(owner_jited);
@@ -553,7 +582,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
char *owner_jited = get_fdinfo(fd, "owner_jited");
- printf("\n\t");
+ if (owner_prog_type || owner_jited)
+ printf("\n\t");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
@@ -563,10 +593,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
else
printf("owner_prog_type %d ", prog_type);
}
- if (atoi(owner_jited))
- printf("owner jited");
- else
- printf("owner not jited");
+ if (owner_jited)
+ printf("owner%s jited",
+ atoi(owner_jited) ? "" : " not");
free(owner_prog_type);
free(owner_jited);
@@ -779,6 +808,32 @@ exit_free:
return err;
}
+static int alloc_key_value(struct bpf_map_info *info, void **key, void **value)
+{
+ *key = NULL;
+ *value = NULL;
+
+ if (info->key_size) {
+ *key = malloc(info->key_size);
+ if (!*key) {
+ p_err("key mem alloc failed");
+ return -1;
+ }
+ }
+
+ if (info->value_size) {
+ *value = alloc_value(info);
+ if (!*value) {
+ p_err("value mem alloc failed");
+ free(*key);
+ *key = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int do_update(int argc, char **argv)
{
struct bpf_map_info info = {};
@@ -795,13 +850,9 @@ static int do_update(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, value, info.key_size,
info.value_size, &flags, &value_fd);
@@ -826,12 +877,51 @@ exit_free:
return err;
}
+static void print_key_value(struct bpf_map_info *info, void *key,
+ void *value)
+{
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
+ int err;
+
+ err = btf__get_from_id(info->btf_id, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ return;
+ }
+
+ if (json_output) {
+ print_entry_json(info, key, value, btf);
+ } else if (btf) {
+ /* if here json_wtr wouldn't have been initialised,
+ * so let's create separate writer for btf
+ */
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
+ print_entry_plain(info, key, value);
+ } else {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, info, key, value);
+ jsonw_destroy(&btf_wtr);
+ }
+ } else {
+ print_entry_plain(info, key, value);
+ }
+ btf__free(btf);
+}
+
static int do_lookup(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
- json_writer_t *btf_wtr;
- struct btf *btf = NULL;
void *key, *value;
int err;
int fd;
@@ -843,13 +933,9 @@ static int do_lookup(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
if (err)
@@ -873,43 +959,12 @@ static int do_lookup(int argc, char **argv)
}
/* here means bpf_map_lookup_elem() succeeded */
- err = btf__get_from_id(info.btf_id, &btf);
- if (err) {
- p_err("failed to get btf");
- goto exit_free;
- }
-
- if (json_output) {
- print_entry_json(&info, key, value, btf);
- } else if (btf) {
- /* if here json_wtr wouldn't have been initialised,
- * so let's create separate writer for btf
- */
- btf_wtr = get_btf_writer();
- if (!btf_wtr) {
- p_info("failed to create json writer for btf. falling back to plain output");
- btf__free(btf);
- btf = NULL;
- print_entry_plain(&info, key, value);
- } else {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, &info, key, value);
- jsonw_destroy(&btf_wtr);
- }
- } else {
- print_entry_plain(&info, key, value);
- }
+ print_key_value(&info, key, value);
exit_free:
free(key);
free(value);
close(fd);
- btf__free(btf);
return err;
}
@@ -1122,6 +1177,49 @@ static int do_create(int argc, char **argv)
return 0;
}
+static int do_pop_dequeue(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key, *value;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_lookup_and_delete_elem(fd, key, value);
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ else
+ printf("Error: empty map\n");
+ } else {
+ p_err("pop failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ print_key_value(&info, key, value);
+
+exit_free:
+ free(key);
+ free(value);
+ close(fd);
+
+ return err;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1135,12 +1233,17 @@ static int do_help(int argc, char **argv)
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
" [dev NAME]\n"
" %s %s dump MAP\n"
- " %s %s update MAP key DATA value VALUE [UPDATE_FLAGS]\n"
- " %s %s lookup MAP key DATA\n"
+ " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
+ " %s %s lookup MAP [key DATA]\n"
" %s %s getnext MAP [key DATA]\n"
" %s %s delete MAP key DATA\n"
" %s %s pin MAP FILE\n"
" %s %s event_pipe MAP [cpu N index M]\n"
+ " %s %s peek MAP\n"
+ " %s %s push MAP value VALUE\n"
+ " %s %s pop MAP\n"
+ " %s %s enqueue MAP value VALUE\n"
+ " %s %s dequeue MAP\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
@@ -1158,7 +1261,8 @@ static int do_help(int argc, char **argv)
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1175,6 +1279,11 @@ static const struct cmd cmds[] = {
{ "pin", do_pin },
{ "event_pipe", do_event_pipe },
{ "create", do_create },
+ { "peek", do_lookup },
+ { "push", do_update },
+ { "enqueue", do_update },
+ { "pop", do_pop_dequeue },
+ { "dequeue", do_pop_dequeue },
{ 0 }
};
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 2d1bb7d6ff51..8ef80d65a474 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
static int prog_fd_by_tag(unsigned char *tag)
{
- struct bpf_prog_info info = {};
- __u32 len = sizeof(info);
unsigned int id = 0;
int err;
int fd;
while (true) {
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+
err = bpf_prog_get_next_id(id, &id);
if (err) {
p_err("%s", strerror(errno));
@@ -213,6 +214,10 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
+ if (info->run_time_ns) {
+ jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
+ jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
+ }
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
@@ -276,6 +281,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("%s", info->gpl_compatible ? " gpl" : "");
+ if (info->run_time_ns)
+ printf(" run_time_ns %lld run_cnt %lld",
+ info->run_time_ns, info->run_cnt);
printf("\n");
if (info->load_time) {
@@ -930,10 +938,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(type, &attr.prog_type,
&expected_attach_type);
free(type);
- if (err < 0) {
- p_err("unknown program type '%s'", *argv);
+ if (err < 0)
goto err_free_reuse_maps;
- }
+
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
void *new_map_replace;
@@ -1028,11 +1035,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
- if (err < 0) {
- p_err("failed to guess program type based on section name %s\n",
- sec_name);
+ if (err < 0)
goto err_close_obj;
- }
}
bpf_program__set_ifindex(pos, ifindex);
@@ -1049,7 +1053,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
j = 0;
while (j < old_map_fds && map_replace[j].name) {
i = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
map_replace[j].idx = i;
break;
@@ -1070,7 +1074,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
/* Set ifindex and name reuse */
j = 0;
idx = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_offload_neutral(map))
bpf_map__set_ifindex(map, ifindex);
@@ -1202,7 +1206,7 @@ static int do_help(int argc, char **argv)
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
- " ATTACH_TYPE := { msg_verdict | skb_verdict | skb_parse |\n"
+ " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 5467c6bf9ceb..61e46d54a67c 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -53,10 +53,6 @@ FEATURE_TESTS_BASIC := \
libslang \
libcrypto \
libunwind \
- libunwind-x86 \
- libunwind-x86_64 \
- libunwind-arm \
- libunwind-aarch64 \
pthread-attr-setaffinity-np \
pthread-barrier \
reallocarray \
@@ -70,7 +66,6 @@ FEATURE_TESTS_BASIC := \
sched_getcpu \
sdt \
setns \
- libopencsd \
libaio
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
@@ -84,6 +79,11 @@ FEATURE_TESTS_EXTRA := \
libbabeltrace \
libbfd-liberty \
libbfd-liberty-z \
+ libopencsd \
+ libunwind-x86 \
+ libunwind-x86_64 \
+ libunwind-arm \
+ libunwind-aarch64 \
libunwind-debug-frame \
libunwind-debug-frame-arm \
libunwind-debug-frame-aarch64 \
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 20cdaa4fc112..e903b86b742f 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -170,14 +170,14 @@
# include "test-setns.c"
#undef main
-#define main main_test_libopencsd
-# include "test-libopencsd.c"
-#undef main
-
#define main main_test_libaio
# include "test-libaio.c"
#undef main
+#define main main_test_reallocarray
+# include "test-reallocarray.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -217,8 +217,8 @@ int main(int argc, char *argv[])
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
- main_test_libopencsd();
main_test_libaio();
+ main_test_reallocarray();
return 0;
}
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
index 573000f93212..c3c201691b4f 100644
--- a/tools/build/feature/test-get_current_dir_name.c
+++ b/tools/build/feature/test-get_current_dir_name.c
@@ -8,3 +8,4 @@ int main(void)
free(get_current_dir_name());
return 0;
}
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-libpython.c b/tools/build/feature/test-libpython.c
index 0c1641b0d9a7..371c9113e49d 100644
--- a/tools/build/feature/test-libpython.c
+++ b/tools/build/feature/test-libpython.c
@@ -7,3 +7,4 @@ int main(void)
return 0;
}
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
index 8170de35150d..8f6743e31da7 100644
--- a/tools/build/feature/test-reallocarray.c
+++ b/tools/build/feature/test-reallocarray.c
@@ -6,3 +6,5 @@ int main(void)
{
return !!reallocarray(NULL, 1, 1);
}
+
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-sched_getcpu.c b/tools/build/feature/test-sched_getcpu.c
index e448deb4124c..48995ac7911e 100644
--- a/tools/build/feature/test-sched_getcpu.c
+++ b/tools/build/feature/test-sched_getcpu.c
@@ -8,3 +8,5 @@ int main(void)
{
return sched_getcpu();
}
+
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-setns.c b/tools/build/feature/test-setns.c
index 1f714d2a658b..4a1581ae7a55 100644
--- a/tools/build/feature/test-setns.c
+++ b/tools/build/feature/test-setns.c
@@ -5,3 +5,4 @@ int main(void)
{
return setns(0, 0);
}
+#undef _GNU_SOURCE
diff --git a/tools/firmware/ihex2fw.c b/tools/firmware/ihex2fw.c
index b58dd061e978..8925b60e51f5 100644
--- a/tools/firmware/ihex2fw.c
+++ b/tools/firmware/ihex2fw.c
@@ -24,6 +24,10 @@
#include <getopt.h>
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+
struct ihex_binrec {
struct ihex_binrec *next; /* not part of the real data structure */
uint32_t addr;
@@ -131,6 +135,7 @@ int main(int argc, char **argv)
static int process_ihex(uint8_t *data, ssize_t size)
{
struct ihex_binrec *record;
+ size_t record_size;
uint32_t offset = 0;
uint32_t data32;
uint8_t type, crc = 0, crcbyte = 0;
@@ -157,12 +162,13 @@ next_record:
len <<= 8;
len += hex(data + i, &crc); i += 2;
}
- record = malloc((sizeof (*record) + len + 3) & ~3);
+ record_size = ALIGN(sizeof(*record) + len, 4);
+ record = malloc(record_size);
if (!record) {
fprintf(stderr, "out of memory for records\n");
return -ENOMEM;
}
- memset(record, 0, (sizeof(*record) + len + 3) & ~3);
+ memset(record, 0, record_size);
record->len = len;
/* now check if we have enough data to read everything */
@@ -259,13 +265,18 @@ static void file_record(struct ihex_binrec *record)
*p = record;
}
+static uint16_t ihex_binrec_size(struct ihex_binrec *p)
+{
+ return p->len + sizeof(p->addr) + sizeof(p->len);
+}
+
static int output_records(int outfd)
{
unsigned char zeroes[6] = {0, 0, 0, 0, 0, 0};
struct ihex_binrec *p = records;
while (p) {
- uint16_t writelen = (p->len + 9) & ~3;
+ uint16_t writelen = ALIGN(ihex_binrec_size(p), 4);
p->addr = htonl(p->addr);
p->len = htons(p->len);
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index ac2de6b7e89f..7bf9bde28bcc 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -60,6 +60,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_GRAVITY] = "gravity",
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
+ [IIO_MASSCONCENTRATION] = "massconcentration",
};
static const char * const iio_ev_type_text[] = {
@@ -114,7 +115,13 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_I] = "i",
[IIO_MOD_Q] = "q",
[IIO_MOD_CO2] = "co2",
+ [IIO_MOD_ETHANOL] = "ethanol",
+ [IIO_MOD_H2] = "h2",
[IIO_MOD_VOC] = "voc",
+ [IIO_MOD_PM1] = "pm1",
+ [IIO_MOD_PM2P5] = "pm2p5",
+ [IIO_MOD_PM4] = "pm4",
+ [IIO_MOD_PM10] = "pm10",
};
static bool event_is_known(struct iio_event_data *event)
@@ -156,6 +163,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_GRAVITY:
case IIO_POSITIONRELATIVE:
case IIO_PHASE:
+ case IIO_MASSCONCENTRATION:
break;
default:
return false;
@@ -199,7 +207,13 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_I:
case IIO_MOD_Q:
case IIO_MOD_CO2:
+ case IIO_MOD_ETHANOL:
+ case IIO_MOD_H2:
case IIO_MOD_VOC:
+ case IIO_MOD_PM1:
+ case IIO_MOD_PM2P5:
+ case IIO_MOD_PM4:
+ case IIO_MOD_PM10:
break;
default:
return false;
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 3040830d7797..84545666a09c 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -330,7 +330,7 @@ static const struct option longopts[] = {
int main(int argc, char **argv)
{
- unsigned long long num_loops = 2;
+ long long num_loops = 2;
unsigned long timedelay = 1000000;
unsigned long buf_len = 128;
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index af55acf73e75..cce0b02c0e28 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -199,6 +199,16 @@
.off = OFF, \
.imm = 0 })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
@@ -209,6 +219,16 @@
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
diff --git a/tools/include/linux/numa.h b/tools/include/linux/numa.h
new file mode 100644
index 000000000000..110b0e5d0fb0
--- /dev/null
+++ b/tools/include/linux/numa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NUMA_H
+#define _LINUX_NUMA_H
+
+
+#ifdef CONFIG_NODES_SHIFT
+#define NODES_SHIFT CONFIG_NODES_SHIFT
+#else
+#define NODES_SHIFT 0
+#endif
+
+#define MAX_NUMNODES (1 << NODES_SHIFT)
+
+#define NUMA_NO_NODE (-1)
+
+#endif /* _LINUX_NUMA_H */
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
index 112582253dd0..8e9ed4786269 100644
--- a/tools/include/linux/rbtree.h
+++ b/tools/include/linux/rbtree.h
@@ -43,13 +43,28 @@ struct rb_root {
struct rb_node *rb_node;
};
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
@@ -68,6 +83,12 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
+extern void rb_insert_color_cached(struct rb_node *,
+ struct rb_root_cached *, bool);
+extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -75,6 +96,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
+extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+ struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@@ -90,12 +113,29 @@ static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
-
-/*
- * Handy for checking that we are not deleting an entry that is
- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
- * probably should be moved to lib/rbtree.c...
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
+ * given type allowing the backing memory of @pos to be invalidated
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ *
+ * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
+ * list_for_each_entry_safe() and allows the iteration to continue independent
+ * of changes to @pos by the body of the loop.
+ *
+ * Note, however, that it cannot handle other modifications that re-order the
+ * rbtree it is iterating over. This includes calling rb_erase() on @pos, as
+ * rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
+ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); 1; }); \
+ pos = n)
+
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
index 43be941db695..d008e1404580 100644
--- a/tools/include/linux/rbtree_augmented.h
+++ b/tools/include/linux/rbtree_augmented.h
@@ -44,7 +44,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
-extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+extern void __rb_insert_augmented(struct rb_node *node,
+ struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
@@ -60,7 +62,16 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, root, augment->rotate);
+ __rb_insert_augmented(node, root, false, NULL, augment->rotate);
+}
+
+static inline void
+rb_insert_augmented_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool newleft,
+ const struct rb_augment_callbacks *augment)
+{
+ __rb_insert_augmented(node, &root->rb_root,
+ newleft, &root->rb_leftmost, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@@ -93,7 +104,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
+ .propagate = rbname ## _propagate, \
+ .copy = rbname ## _copy, \
+ .rotate = rbname ## _rotate \
};
@@ -126,11 +139,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
{
if (parent) {
if (parent->rb_left == old)
- parent->rb_left = new;
+ WRITE_ONCE(parent->rb_left, new);
else
- parent->rb_right = new;
+ WRITE_ONCE(parent->rb_right, new);
} else
- root->rb_node = new;
+ WRITE_ONCE(root->rb_node, new);
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@@ -138,12 +151,17 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+ struct rb_node *child = node->rb_right;
+ struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
+ if (leftmost && node == *leftmost)
+ *leftmost = rb_next(node);
+
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@@ -170,6 +188,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
+
tmp = child->rb_left;
if (!tmp) {
/*
@@ -183,6 +202,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
*/
parent = successor;
child2 = successor->rb_right;
+
augment->copy(node, successor);
} else {
/*
@@ -204,19 +224,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
- parent->rb_left = child2 = successor->rb_right;
- successor->rb_right = child;
+ child2 = successor->rb_right;
+ WRITE_ONCE(parent->rb_left, child2);
+ WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
+
augment->copy(node, successor);
augment->propagate(parent, successor);
}
- successor->rb_left = tmp = node->rb_left;
+ tmp = node->rb_left;
+ WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
+
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
@@ -237,9 +261,21 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+ struct rb_node *rebalance = __rb_erase_augmented(node, root,
+ NULL, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
-#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
+static __always_inline void
+rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost,
+ augment);
+ if (rebalance)
+ __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
+}
+
+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/include/nolibc/nolibc.h
index f98f5b92d3eb..1708e9f9f8aa 100644
--- a/tools/testing/selftests/rcutorture/bin/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -3,7 +3,85 @@
* Copyright (C) 2017-2018 Willy Tarreau <w@1wt.eu>
*/
-/* some archs (at least aarch64) don't expose the regular syscalls anymore by
+/*
+ * This file is designed to be used as a libc alternative for minimal programs
+ * with very limited requirements. It consists of a small number of syscall and
+ * type definitions, and the minimal startup code needed to call main().
+ * All syscalls are declared as static functions so that they can be optimized
+ * away by the compiler when not used.
+ *
+ * Syscalls are split into 3 levels:
+ * - The lower level is the arch-specific syscall() definition, consisting in
+ * assembly code in compound expressions. These are called my_syscall0() to
+ * my_syscall6() depending on the number of arguments. The MIPS
+ * implementation is limited to 5 arguments. All input arguments are cast
+ * to a long stored in a register. These expressions always return the
+ * syscall's return value as a signed long value which is often either a
+ * pointer or the negated errno value.
+ *
+ * - The second level is mostly architecture-independent. It is made of
+ * static functions called sys_<name>() which rely on my_syscallN()
+ * depending on the syscall definition. These functions are responsible
+ * for exposing the appropriate types for the syscall arguments (int,
+ * pointers, etc) and for setting the appropriate return type (often int).
+ * A few of them are architecture-specific because the syscalls are not all
+ * mapped exactly the same among architectures. For example, some archs do
+ * not implement select() and need pselect6() instead, so the sys_select()
+ * function will have to abstract this.
+ *
+ * - The third level is the libc call definition. It exposes the lower raw
+ * sys_<name>() calls in a way that looks like what a libc usually does,
+ * takes care of specific input values, and of setting errno upon error.
+ * There can be minor variations compared to standard libc calls. For
+ * example the open() call always takes 3 args here.
+ *
+ * The errno variable is declared static and unused. This way it can be
+ * optimized away if not used. However this means that a program made of
+ * multiple C files may observe different errno values (one per C file). For
+ * the type of programs this project targets it usually is not a problem. The
+ * resulting program may even be reduced by defining the NOLIBC_IGNORE_ERRNO
+ * macro, in which case the errno value will never be assigned.
+ *
+ * Some stdint-like integer types are defined. These are valid on all currently
+ * supported architectures, because signs are enforced, ints are assumed to be
+ * 32 bits, longs the size of a pointer and long long 64 bits. If more
+ * architectures have to be supported, this may need to be adapted.
+ *
+ * Some macro definitions like the O_* values passed to open(), and some
+ * structures like the sys_stat struct depend on the architecture.
+ *
+ * The definitions start with the architecture-specific parts, which are picked
+ * based on what the compiler knows about the target architecture, and are
+ * completed with the generic code. Since it is the compiler which sets the
+ * target architecture, cross-compiling normally works out of the box without
+ * having to specify anything.
+ *
+ * Finally some very common libc-level functions are provided. It is the case
+ * for a few functions usually found in string.h, ctype.h, or stdlib.h. Nothing
+ * is currently provided regarding stdio emulation.
+ *
+ * The macro NOLIBC is always defined, so that it is possible for a program to
+ * check this macro to know if it is being built against and decide to disable
+ * some features or simply not to include some standard libc files.
+ *
+ * Ideally this file should be split in multiple files for easier long term
+ * maintenance, but provided as a single file as it is now, it's quite
+ * convenient to use. Maybe some variations involving a set of includes at the
+ * top could work.
+ *
+ * A simple static executable may be built this way :
+ * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
+ * -static -include nolibc.h -lgcc -o hello hello.c
+ *
+ * A very useful calling convention table may be found here :
+ * http://man7.org/linux/man-pages/man2/syscall.2.html
+ *
+ * This doc is quite convenient though not necessarily up to date :
+ * https://w3challs.com/syscalls/
+ *
+ */
+
+/* Some archs (at least aarch64) don't expose the regular syscalls anymore by
* default, either because they have an "_at" replacement, or because there are
* more modern alternatives. For now we'd rather still use them.
*/
@@ -19,18 +97,6 @@
#define NOLIBC
-/* Build a static executable this way :
- * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
- * -static -include nolibc.h -lgcc -o hello hello.c
- *
- * Useful calling convention table found here :
- * http://man7.org/linux/man-pages/man2/syscall.2.html
- *
- * This doc is even better :
- * https://w3challs.com/syscalls/
- */
-
-
/* this way it will be removed if unused */
static int errno;
@@ -81,9 +147,9 @@ typedef signed long time_t;
/* for poll() */
struct pollfd {
- int fd;
- short int events;
- short int revents;
+ int fd;
+ short int events;
+ short int revents;
};
/* for select() */
@@ -239,7 +305,7 @@ struct stat {
"syscall\n" \
: "=a" (_ret) \
: "0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -255,7 +321,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -272,7 +338,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -290,7 +356,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -1006,7 +1072,7 @@ struct sys_stat_struct {
: "=r"(_num), "=r"(_arg4) \
: "r"(_num) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1025,7 +1091,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1045,7 +1111,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1066,7 +1132,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1087,7 +1153,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1110,7 +1176,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index c7f3321fbe43..d90127298f12 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -738,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx)
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
+#define __NR_kexec_file_load 294
+__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
#undef __NR_syscalls
-#define __NR_syscalls 294
+#define __NR_syscalls 295
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index 8dd6aefdafa4..57aaeaf8e192 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -13,6 +13,10 @@
#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
#elif defined(__ia64__)
#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
+#elif defined(__riscv)
+#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
+#elif defined(__alpha__)
+#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index a4446f452040..298b2e197744 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 91c43884f295..3c38ac9a92a7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -14,6 +14,7 @@
/* Extended instruction set based on top of classic BPF */
/* instruction classes */
+#define BPF_JMP32 0x06 /* jmp mode in word width */
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
@@ -266,6 +267,7 @@ enum bpf_attach_type {
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
+#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
/* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0)
@@ -2014,6 +2016,19 @@ union bpf_attr {
* Only works if *skb* contains an IPv6 packet. Insert a
* Segment Routing Header (**struct ipv6_sr_hdr**) inside
* the IPv6 header.
+ * **BPF_LWT_ENCAP_IP**
+ * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
+ * must be IPv4 or IPv6, followed by zero or more
+ * additional headers, up to LWT_BPF_MAX_HEADROOM total
+ * bytes in all prepended headers. Please note that
+ * if skb_is_gso(skb) is true, no more than two headers
+ * can be prepended, and the inner header, if present,
+ * should be either GRE or UDP/GUE.
+ *
+ * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
+ * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
+ * by bpf programs of types BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT.
*
* A call to this helper is susceptible to change the underlaying
* packet buffer. Therefore, at load time, all checks on pointers
@@ -2327,6 +2342,30 @@ union bpf_attr {
* "**y**".
* Return
* 0
+ *
+ * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_sock** pointer such
+ * that all the fields in bpf_sock can be accessed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or NULL in
+ * case of failure.
+ *
+ * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_tcp_sock** pointer from a
+ * **struct bpf_sock** pointer.
+ *
+ * Return
+ * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * case of failure.
+ *
+ * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
+ * Description
+ * Sets ECN of IP header to ce (congestion encountered) if
+ * current value is ect (ECN capable). Works with IPv6 and IPv4.
+ * Return
+ * 1 if set, 0 if not set.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2421,7 +2460,12 @@ union bpf_attr {
FN(map_peek_elem), \
FN(msg_push_data), \
FN(msg_pop_data), \
- FN(rc_pointer_rel),
+ FN(rc_pointer_rel), \
+ FN(spin_lock), \
+ FN(spin_unlock), \
+ FN(sk_fullsock), \
+ FN(tcp_sock), \
+ FN(skb_ecn_set_ce),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2494,7 +2538,8 @@ enum bpf_hdr_start_off {
/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6,
- BPF_LWT_ENCAP_SEG6_INLINE
+ BPF_LWT_ENCAP_SEG6_INLINE,
+ BPF_LWT_ENCAP_IP,
};
#define __bpf_md_ptr(type, name) \
@@ -2540,6 +2585,8 @@ struct __sk_buff {
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
__u64 tstamp;
__u32 wire_len;
+ __u32 gso_segs;
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
struct bpf_tunnel_key {
@@ -2581,7 +2628,15 @@ enum bpf_ret_code {
BPF_DROP = 2,
/* 3-6 reserved */
BPF_REDIRECT = 7,
- /* >127 are reserved for prog type specific return codes */
+ /* >127 are reserved for prog type specific return codes.
+ *
+ * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
+ * changed and should be routed based on its new L3 header.
+ * (This is an L3 redirect, as opposed to L2 redirect
+ * represented by BPF_REDIRECT above).
+ */
+ BPF_LWT_REROUTE = 128,
};
struct bpf_sock {
@@ -2591,14 +2646,52 @@ struct bpf_sock {
__u32 protocol;
__u32 mark;
__u32 priority;
- __u32 src_ip4; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ /* IP address also allows 1 and 2 bytes access */
+ __u32 src_ip4;
+ __u32 src_ip6[4];
+ __u32 src_port; /* host byte order */
+ __u32 dst_port; /* network byte order */
+ __u32 dst_ip4;
+ __u32 dst_ip6[4];
+ __u32 state;
+};
+
+struct bpf_tcp_sock {
+ __u32 snd_cwnd; /* Sending congestion window */
+ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ __u32 rtt_min;
+ __u32 snd_ssthresh; /* Slow start size threshold */
+ __u32 rcv_nxt; /* What we want to receive next */
+ __u32 snd_nxt; /* Next sequence we send */
+ __u32 snd_una; /* First byte we want an ack for */
+ __u32 mss_cache; /* Cached effective mss, not including SACKS */
+ __u32 ecn_flags; /* ECN status bits. */
+ __u32 rate_delivered; /* saved rate sample: packets delivered */
+ __u32 rate_interval_us; /* saved rate sample: time elapsed */
+ __u32 packets_out; /* Packets which are "in flight" */
+ __u32 retrans_out; /* Retransmitted packets out */
+ __u32 total_retrans; /* Total retransmits for entire connection */
+ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
*/
- __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
+ * total number of data segments in.
*/
- __u32 src_port; /* Allows 4-byte read.
- * Stored in host byte order
+ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ __u32 lost_out; /* Lost packets */
+ __u32 sacked_out; /* SACK'd packets */
+ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
+ */
+ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
*/
};
@@ -2728,6 +2821,8 @@ struct bpf_prog_info {
__u32 jited_line_info_rec_size;
__u32 nr_prog_tags;
__aligned_u64 prog_tags;
+ __u64 run_time_ns;
+ __u64 run_cnt;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -3054,4 +3149,7 @@ struct bpf_line_info {
__u32 line_col;
};
+struct bpf_spin_lock {
+ __u32 val;
+};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/ethtool.h b/tools/include/uapi/linux/ethtool.h
new file mode 100644
index 000000000000..c86c3e942df9
--- /dev/null
+++ b/tools/include/uapi/linux/ethtool.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * ethtool.h: Defines for Linux ethtool.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ * Copyright 2001 Jeff Garzik <jgarzik@pobox.com>
+ * Portions Copyright 2001 Sun Microsystems (thockin@sun.com)
+ * Portions Copyright 2002 Intel (eli.kupermann@intel.com,
+ * christopher.leech@intel.com,
+ * scott.feldman@intel.com)
+ * Portions Copyright (C) Sun Microsystems 2008
+ */
+
+#ifndef _UAPI_LINUX_ETHTOOL_H
+#define _UAPI_LINUX_ETHTOOL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */
+
+/**
+ * struct ethtool_channels - configuring number of network channel
+ * @cmd: ETHTOOL_{G,S}CHANNELS
+ * @max_rx: Read only. Maximum number of receive channel the driver support.
+ * @max_tx: Read only. Maximum number of transmit channel the driver support.
+ * @max_other: Read only. Maximum number of other channel the driver support.
+ * @max_combined: Read only. Maximum number of combined channel the driver
+ * support. Set of queues RX, TX or other.
+ * @rx_count: Valid values are in the range 1 to the max_rx.
+ * @tx_count: Valid values are in the range 1 to the max_tx.
+ * @other_count: Valid values are in the range 1 to the max_other.
+ * @combined_count: Valid values are in the range 1 to the max_combined.
+ *
+ * This can be used to configure RX, TX and other channels.
+ */
+
+struct ethtool_channels {
+ __u32 cmd;
+ __u32 max_rx;
+ __u32 max_tx;
+ __u32 max_other;
+ __u32 max_combined;
+ __u32 rx_count;
+ __u32 tx_count;
+ __u32 other_count;
+ __u32 combined_count;
+};
+
+#endif /* _UAPI_LINUX_ETHTOOL_H */
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index a441ea1bfe6d..121e82ce296b 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -14,6 +14,11 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
+#if !defined(__KERNEL__)
+#include <linux/mount.h>
+#endif
+
/*
* It's silly to have NR_OPEN bigger than NR_FILE, but you can change
* the file limit at runtime and only root can increase the per-process
@@ -101,57 +106,6 @@ struct inodes_stat_t {
#define NR_FILE 8192 /* this can well be larger on a larger system */
-
-/*
- * These are the fs-independent mount-flags: up to 32 flags are supported
- */
-#define MS_RDONLY 1 /* Mount read-only */
-#define MS_NOSUID 2 /* Ignore suid and sgid bits */
-#define MS_NODEV 4 /* Disallow access to device special files */
-#define MS_NOEXEC 8 /* Disallow program execution */
-#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
-#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
-#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
-#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
-#define MS_NOATIME 1024 /* Do not update access times. */
-#define MS_NODIRATIME 2048 /* Do not update directory access times */
-#define MS_BIND 4096
-#define MS_MOVE 8192
-#define MS_REC 16384
-#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
- MS_VERBOSE is deprecated. */
-#define MS_SILENT 32768
-#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
-#define MS_UNBINDABLE (1<<17) /* change to unbindable */
-#define MS_PRIVATE (1<<18) /* change to private */
-#define MS_SLAVE (1<<19) /* change to slave */
-#define MS_SHARED (1<<20) /* change to shared */
-#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
-#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
-#define MS_I_VERSION (1<<23) /* Update inode I_version field */
-#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
-#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
-
-/* These sb flags are internal to the kernel */
-#define MS_SUBMOUNT (1<<26)
-#define MS_NOREMOTELOCK (1<<27)
-#define MS_NOSEC (1<<28)
-#define MS_BORN (1<<29)
-#define MS_ACTIVE (1<<30)
-#define MS_NOUSER (1<<31)
-
-/*
- * Superblock flags that can be altered by MS_REMOUNT
- */
-#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
- MS_LAZYTIME)
-
-/*
- * Old magic mount flag and mask
- */
-#define MS_MGC_VAL 0xC0ED0000
-#define MS_MGC_MSK 0xffff0000
-
/*
* Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
*/
@@ -269,7 +223,8 @@ struct fsxattr {
#define FS_POLICY_FLAGS_PAD_16 0x02
#define FS_POLICY_FLAGS_PAD_32 0x03
#define FS_POLICY_FLAGS_PAD_MASK 0x03
-#define FS_POLICY_FLAGS_VALID 0x03
+#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */
+#define FS_POLICY_FLAGS_VALID 0x07
/* Encryption algorithms */
#define FS_ENCRYPTION_MODE_INVALID 0
@@ -281,6 +236,7 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
+#define FS_ENCRYPTION_MODE_ADIANTUM 9
struct fscrypt_policy {
__u8 version;
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 1debfa42cba1..5b225ff63b48 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -288,6 +288,7 @@ enum {
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
+ IFLA_BR_MULTI_BOOLOPT,
__IFLA_BR_MAX,
};
@@ -533,6 +534,7 @@ enum {
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
+ IFLA_VXLAN_DF,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -542,6 +544,14 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+enum ifla_vxlan_df {
+ VXLAN_DF_UNSET = 0,
+ VXLAN_DF_SET,
+ VXLAN_DF_INHERIT,
+ __VXLAN_DF_END,
+ VXLAN_DF_MAX = __VXLAN_DF_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
@@ -557,10 +567,19 @@ enum {
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
IFLA_GENEVE_TTL_INHERIT,
+ IFLA_GENEVE_DF,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+enum ifla_geneve_df {
+ GENEVE_DF_UNSET = 0,
+ GENEVE_DF_SET,
+ GENEVE_DF_INHERIT,
+ __GENEVE_DF_END,
+ GENEVE_DF_MAX = __GENEVE_DF_END - 1,
+};
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
@@ -906,6 +925,7 @@ enum {
enum {
LINK_XSTATS_TYPE_UNSPEC,
LINK_XSTATS_TYPE_BRIDGE,
+ LINK_XSTATS_TYPE_BOND,
__LINK_XSTATS_TYPE_MAX
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
new file mode 100644
index 000000000000..caed8b1614ff
--- /dev/null
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * if_xdp: XDP socket user-space interface
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * Author(s): Björn Töpel <bjorn.topel@intel.com>
+ * Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#ifndef _LINUX_IF_XDP_H
+#define _LINUX_IF_XDP_H
+
+#include <linux/types.h>
+
+/* Options for the sxdp_flags field */
+#define XDP_SHARED_UMEM (1 << 0)
+#define XDP_COPY (1 << 1) /* Force copy-mode */
+#define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */
+
+struct sockaddr_xdp {
+ __u16 sxdp_family;
+ __u16 sxdp_flags;
+ __u32 sxdp_ifindex;
+ __u32 sxdp_queue_id;
+ __u32 sxdp_shared_umem_fd;
+};
+
+struct xdp_ring_offset {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+};
+
+struct xdp_mmap_offsets {
+ struct xdp_ring_offset rx;
+ struct xdp_ring_offset tx;
+ struct xdp_ring_offset fr; /* Fill */
+ struct xdp_ring_offset cr; /* Completion */
+};
+
+/* XDP socket options */
+#define XDP_MMAP_OFFSETS 1
+#define XDP_RX_RING 2
+#define XDP_TX_RING 3
+#define XDP_UMEM_REG 4
+#define XDP_UMEM_FILL_RING 5
+#define XDP_UMEM_COMPLETION_RING 6
+#define XDP_STATISTICS 7
+
+struct xdp_umem_reg {
+ __u64 addr; /* Start of packet data area */
+ __u64 len; /* Length of packet data area */
+ __u32 chunk_size;
+ __u32 headroom;
+};
+
+struct xdp_statistics {
+ __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+};
+
+/* Pgoff for mmaping the rings */
+#define XDP_PGOFF_RX_RING 0
+#define XDP_PGOFF_TX_RING 0x80000000
+#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
+#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
+
+/* Rx/Tx descriptor */
+struct xdp_desc {
+ __u64 addr;
+ __u32 len;
+ __u32 options;
+};
+
+/* UMEM descriptor is __u64 */
+
+#endif /* _LINUX_IF_XDP_H */
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 48e8a225b985..a55cb8b10165 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -266,10 +266,14 @@ struct sockaddr_in {
#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
#define IN_MULTICAST(a) IN_CLASSD(a)
-#define IN_MULTICAST_NET 0xF0000000
+#define IN_MULTICAST_NET 0xe0000000
-#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
-#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
+#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
+#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
+
+#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define IN_CLASSE_NET 0xffffffff
+#define IN_CLASSE_NSHIFT 0
/* Address to accept any incoming messages. */
#define INADDR_ANY ((unsigned long int) 0x00000000)
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 2b7a652c9fa4..6d4ea4b6c922 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -492,6 +492,17 @@ struct kvm_dirty_log {
};
};
+/* for KVM_CLEAR_DIRTY_LOG */
+struct kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
@@ -975,6 +986,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#define KVM_CAP_EXCEPTION_PAYLOAD 164
#define KVM_CAP_ARM_VM_IPA_SIZE 165
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
+#define KVM_CAP_HYPERV_CPUID 167
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1421,6 +1434,12 @@ struct kvm_enc_region {
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
+/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
+#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
+
+/* Available with KVM_CAP_HYPERV_CPUID */
+#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
new file mode 100644
index 000000000000..3f9ec42510b0
--- /dev/null
+++ b/tools/include/uapi/linux/mount.h
@@ -0,0 +1,58 @@
+#ifndef _UAPI_LINUX_MOUNT_H
+#define _UAPI_LINUX_MOUNT_H
+
+/*
+ * These are the fs-independent mount-flags: up to 32 flags are supported
+ *
+ * Usage of these is restricted within the kernel to core mount(2) code and
+ * callers of sys_mount() only. Filesystems should be using the SB_*
+ * equivalent instead.
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define MS_NODIRATIME 2048 /* Do not update directory access times */
+#define MS_BIND 4096
+#define MS_MOVE 8192
+#define MS_REC 16384
+#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
+ MS_VERBOSE is deprecated. */
+#define MS_SILENT 32768
+#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define MS_UNBINDABLE (1<<17) /* change to unbindable */
+#define MS_PRIVATE (1<<18) /* change to private */
+#define MS_SLAVE (1<<19) /* change to slave */
+#define MS_SHARED (1<<20) /* change to shared */
+#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
+#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
+#define MS_I_VERSION (1<<23) /* Update inode I_version field */
+#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
+#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define MS_SUBMOUNT (1<<26)
+#define MS_NOREMOTELOCK (1<<27)
+#define MS_NOSEC (1<<28)
+#define MS_BORN (1<<29)
+#define MS_ACTIVE (1<<30)
+#define MS_NOUSER (1<<31)
+
+/*
+ * Superblock flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
+ MS_LAZYTIME)
+
+/*
+ * Old magic mount flag and mask
+ */
+#define MS_MGC_VAL 0xC0ED0000
+#define MS_MGC_MSK 0xffff0000
+
+#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 9de8780ac8d9..7198ddd0c6b1 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -372,7 +372,9 @@ struct perf_event_attr {
context_switch : 1, /* context switch data */
write_backward : 1, /* Write ring buffer from end to beginning */
namespaces : 1, /* include namespaces data */
- __reserved_1 : 35;
+ ksymbol : 1, /* include ksymbol events */
+ bpf_event : 1, /* include bpf events */
+ __reserved_1 : 33;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -445,8 +447,6 @@ struct perf_event_query_bpf {
__u32 ids[0];
};
-#define perf_flags(attr) (*(&(attr)->read_format + 1))
-
/*
* Ioctls that can be done on a perf event fd:
*/
@@ -965,9 +965,58 @@ enum perf_event_type {
*/
PERF_RECORD_NAMESPACES = 16,
+ /*
+ * Record ksymbol register/unregister events:
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 addr;
+ * u32 len;
+ * u16 ksym_type;
+ * u16 flags;
+ * char name[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_KSYMBOL = 17,
+
+ /*
+ * Record bpf events:
+ * enum perf_bpf_event_type {
+ * PERF_BPF_EVENT_UNKNOWN = 0,
+ * PERF_BPF_EVENT_PROG_LOAD = 1,
+ * PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ * };
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u16 type;
+ * u16 flags;
+ * u32 id;
+ * u8 tag[BPF_TAG_SIZE];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_BPF_EVENT = 18,
+
PERF_RECORD_MAX, /* non-ABI */
};
+enum perf_record_ksymbol_type {
+ PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
+ PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
+};
+
+#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
+
+enum perf_bpf_event_type {
+ PERF_BPF_EVENT_UNKNOWN = 0,
+ PERF_BPF_EVENT_PROG_LOAD = 1,
+ PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ PERF_BPF_EVENT_MAX, /* non-ABI */
+};
+
#define PERF_MAX_STACK_DEPTH 127
#define PERF_MAX_CONTEXTS_PER_STACK 8
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
new file mode 100644
index 000000000000..0d18b1d1fbbc
--- /dev/null
+++ b/tools/include/uapi/linux/pkt_sched.h
@@ -0,0 +1,1163 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_PKT_SCHED_H
+#define __LINUX_PKT_SCHED_H
+
+#include <linux/types.h>
+
+/* Logical priority bands not depending on specific packet scheduler.
+ Every scheduler will map them to real traffic classes, if it has
+ no more precise mechanism to classify packets.
+
+ These numbers have no special meaning, though their coincidence
+ with obsolete IPv6 values is not occasional :-). New IPv6 drafts
+ preferred full anarchy inspired by diffserv group.
+
+ Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
+ class, actually, as rule it will be handled with more care than
+ filler or even bulk.
+ */
+
+#define TC_PRIO_BESTEFFORT 0
+#define TC_PRIO_FILLER 1
+#define TC_PRIO_BULK 2
+#define TC_PRIO_INTERACTIVE_BULK 4
+#define TC_PRIO_INTERACTIVE 6
+#define TC_PRIO_CONTROL 7
+
+#define TC_PRIO_MAX 15
+
+/* Generic queue statistics, available for all the elements.
+ Particular schedulers may have also their private records.
+ */
+
+struct tc_stats {
+ __u64 bytes; /* Number of enqueued bytes */
+ __u32 packets; /* Number of enqueued packets */
+ __u32 drops; /* Packets dropped because of lack of resources */
+ __u32 overlimits; /* Number of throttle events when this
+ * flow goes out of allocated bandwidth */
+ __u32 bps; /* Current flow byte rate */
+ __u32 pps; /* Current flow packet rate */
+ __u32 qlen;
+ __u32 backlog;
+};
+
+struct tc_estimator {
+ signed char interval;
+ unsigned char ewma_log;
+};
+
+/* "Handles"
+ ---------
+
+ All the traffic control objects have 32bit identifiers, or "handles".
+
+ They can be considered as opaque numbers from user API viewpoint,
+ but actually they always consist of two fields: major and
+ minor numbers, which are interpreted by kernel specially,
+ that may be used by applications, though not recommended.
+
+ F.e. qdisc handles always have minor number equal to zero,
+ classes (or flows) have major equal to parent qdisc major, and
+ minor uniquely identifying class inside qdisc.
+
+ Macros to manipulate handles:
+ */
+
+#define TC_H_MAJ_MASK (0xFFFF0000U)
+#define TC_H_MIN_MASK (0x0000FFFFU)
+#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
+#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
+#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
+
+#define TC_H_UNSPEC (0U)
+#define TC_H_ROOT (0xFFFFFFFFU)
+#define TC_H_INGRESS (0xFFFFFFF1U)
+#define TC_H_CLSACT TC_H_INGRESS
+
+#define TC_H_MIN_PRIORITY 0xFFE0U
+#define TC_H_MIN_INGRESS 0xFFF2U
+#define TC_H_MIN_EGRESS 0xFFF3U
+
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+ TC_LINKLAYER_ETHERNET,
+ TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
+struct tc_ratespec {
+ unsigned char cell_log;
+ __u8 linklayer; /* lower 4 bits */
+ unsigned short overhead;
+ short cell_align;
+ unsigned short mpu;
+ __u32 rate;
+};
+
+#define TC_RTAB_SIZE 1024
+
+struct tc_sizespec {
+ unsigned char cell_log;
+ unsigned char size_log;
+ short cell_align;
+ int overhead;
+ unsigned int linklayer;
+ unsigned int mpu;
+ unsigned int mtu;
+ unsigned int tsize;
+};
+
+enum {
+ TCA_STAB_UNSPEC,
+ TCA_STAB_BASE,
+ TCA_STAB_DATA,
+ __TCA_STAB_MAX
+};
+
+#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
+
+/* FIFO section */
+
+struct tc_fifo_qopt {
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
+};
+
+/* SKBPRIO section */
+
+/*
+ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
+ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
+ * to map one to one the DS field of IPV4 and IPV6 headers.
+ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
+ */
+
+#define SKBPRIO_MAX_PRIORITY 64
+
+struct tc_skbprio_qopt {
+ __u32 limit; /* Queue length in packets. */
+};
+
+/* PRIO section */
+
+#define TCQ_PRIO_BANDS 16
+#define TCQ_MIN_PRIO_BANDS 2
+
+struct tc_prio_qopt {
+ int bands; /* Number of bands */
+ __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
+};
+
+/* MULTIQ section */
+
+struct tc_multiq_qopt {
+ __u16 bands; /* Number of bands */
+ __u16 max_bands; /* Maximum number of queues */
+};
+
+/* PLUG section */
+
+#define TCQ_PLUG_BUFFER 0
+#define TCQ_PLUG_RELEASE_ONE 1
+#define TCQ_PLUG_RELEASE_INDEFINITE 2
+#define TCQ_PLUG_LIMIT 3
+
+struct tc_plug_qopt {
+ /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
+ * buffer any incoming packets
+ * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
+ * to beginning of the next plug.
+ * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
+ * Stop buffering packets until the next TCQ_PLUG_BUFFER
+ * command is received (just act as a pass-thru queue).
+ * TCQ_PLUG_LIMIT: Increase/decrease queue size
+ */
+ int action;
+ __u32 limit;
+};
+
+/* TBF section */
+
+struct tc_tbf_qopt {
+ struct tc_ratespec rate;
+ struct tc_ratespec peakrate;
+ __u32 limit;
+ __u32 buffer;
+ __u32 mtu;
+};
+
+enum {
+ TCA_TBF_UNSPEC,
+ TCA_TBF_PARMS,
+ TCA_TBF_RTAB,
+ TCA_TBF_PTAB,
+ TCA_TBF_RATE64,
+ TCA_TBF_PRATE64,
+ TCA_TBF_BURST,
+ TCA_TBF_PBURST,
+ TCA_TBF_PAD,
+ __TCA_TBF_MAX,
+};
+
+#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
+
+
+/* TEQL section */
+
+/* TEQL does not require any parameters */
+
+/* SFQ section */
+
+struct tc_sfq_qopt {
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+};
+
+struct tc_sfqred_stats {
+ __u32 prob_drop; /* Early drops, below max threshold */
+ __u32 forced_drop; /* Early drops, after max threshold */
+ __u32 prob_mark; /* Marked packets, below max threshold */
+ __u32 forced_mark; /* Marked packets, after max threshold */
+ __u32 prob_mark_head; /* Marked packets, below max threshold */
+ __u32 forced_mark_head;/* Marked packets, after max threshold */
+};
+
+struct tc_sfq_qopt_v1 {
+ struct tc_sfq_qopt v0;
+ unsigned int depth; /* max number of packets per flow */
+ unsigned int headdrop;
+/* SFQRED parameters */
+ __u32 limit; /* HARD maximal flow queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags;
+ __u32 max_P; /* probability, high resolution */
+/* SFQRED stats */
+ struct tc_sfqred_stats stats;
+};
+
+
+struct tc_sfq_xstats {
+ __s32 allot;
+};
+
+/* RED section */
+
+enum {
+ TCA_RED_UNSPEC,
+ TCA_RED_PARMS,
+ TCA_RED_STAB,
+ TCA_RED_MAX_P,
+ __TCA_RED_MAX,
+};
+
+#define TCA_RED_MAX (__TCA_RED_MAX - 1)
+
+struct tc_red_qopt {
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags;
+#define TC_RED_ECN 1
+#define TC_RED_HARDDROP 2
+#define TC_RED_ADAPTATIVE 4
+};
+
+struct tc_red_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+};
+
+/* GRED section */
+
+#define MAX_DPs 16
+
+enum {
+ TCA_GRED_UNSPEC,
+ TCA_GRED_PARMS,
+ TCA_GRED_STAB,
+ TCA_GRED_DPS,
+ TCA_GRED_MAX_P,
+ TCA_GRED_LIMIT,
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
+ __TCA_GRED_MAX,
+};
+
+#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_ENTRY_UNSPEC,
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
+ __TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_UNSPEC,
+ TCA_GRED_VQ_PAD,
+ TCA_GRED_VQ_DP, /* u32 */
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
+ TCA_GRED_VQ_FLAGS, /* u32 */
+ __TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
+struct tc_gred_qopt {
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ __u32 DP; /* up to 2^32 DPs */
+ __u32 backlog;
+ __u32 qave;
+ __u32 forced;
+ __u32 early;
+ __u32 other;
+ __u32 pdrop;
+ __u8 Wlog; /* log(W) */
+ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
+ __u8 Scell_log; /* cell size for idle damping */
+ __u8 prio; /* prio of this VQ */
+ __u32 packets;
+ __u32 bytesin;
+};
+
+/* gred setup */
+struct tc_gred_sopt {
+ __u32 DPs;
+ __u32 def_DP;
+ __u8 grio;
+ __u8 flags;
+ __u16 pad1;
+};
+
+/* CHOKe section */
+
+enum {
+ TCA_CHOKE_UNSPEC,
+ TCA_CHOKE_PARMS,
+ TCA_CHOKE_STAB,
+ TCA_CHOKE_MAX_P,
+ __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+ __u32 limit; /* Hard queue length (packets) */
+ __u32 qth_min; /* Min average threshold (packets) */
+ __u32 qth_max; /* Max average threshold (packets) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags; /* see RED flags */
+};
+
+struct tc_choke_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+ __u32 matched; /* Drops due to flow match */
+};
+
+/* HTB section */
+#define TC_HTB_NUMPRIO 8
+#define TC_HTB_MAXDEPTH 8
+#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
+
+struct tc_htb_opt {
+ struct tc_ratespec rate;
+ struct tc_ratespec ceil;
+ __u32 buffer;
+ __u32 cbuffer;
+ __u32 quantum;
+ __u32 level; /* out only */
+ __u32 prio;
+};
+struct tc_htb_glob {
+ __u32 version; /* to match HTB/TC */
+ __u32 rate2quantum; /* bps->quantum divisor */
+ __u32 defcls; /* default class number */
+ __u32 debug; /* debug flags */
+
+ /* stats */
+ __u32 direct_pkts; /* count of non shaped packets */
+};
+enum {
+ TCA_HTB_UNSPEC,
+ TCA_HTB_PARMS,
+ TCA_HTB_INIT,
+ TCA_HTB_CTAB,
+ TCA_HTB_RTAB,
+ TCA_HTB_DIRECT_QLEN,
+ TCA_HTB_RATE64,
+ TCA_HTB_CEIL64,
+ TCA_HTB_PAD,
+ __TCA_HTB_MAX,
+};
+
+#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
+
+struct tc_htb_xstats {
+ __u32 lends;
+ __u32 borrows;
+ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
+ __s32 tokens;
+ __s32 ctokens;
+};
+
+/* HFSC section */
+
+struct tc_hfsc_qopt {
+ __u16 defcls; /* default class */
+};
+
+struct tc_service_curve {
+ __u32 m1; /* slope of the first segment in bps */
+ __u32 d; /* x-projection of the first segment in us */
+ __u32 m2; /* slope of the second segment in bps */
+};
+
+struct tc_hfsc_stats {
+ __u64 work; /* total work done */
+ __u64 rtwork; /* work done by real-time criteria */
+ __u32 period; /* current period */
+ __u32 level; /* class level in hierarchy */
+};
+
+enum {
+ TCA_HFSC_UNSPEC,
+ TCA_HFSC_RSC,
+ TCA_HFSC_FSC,
+ TCA_HFSC_USC,
+ __TCA_HFSC_MAX,
+};
+
+#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
+
+
+/* CBQ section */
+
+#define TC_CBQ_MAXPRIO 8
+#define TC_CBQ_MAXLEVEL 8
+#define TC_CBQ_DEF_EWMA 5
+
+struct tc_cbq_lssopt {
+ unsigned char change;
+ unsigned char flags;
+#define TCF_CBQ_LSS_BOUNDED 1
+#define TCF_CBQ_LSS_ISOLATED 2
+ unsigned char ewma_log;
+ unsigned char level;
+#define TCF_CBQ_LSS_FLAGS 1
+#define TCF_CBQ_LSS_EWMA 2
+#define TCF_CBQ_LSS_MAXIDLE 4
+#define TCF_CBQ_LSS_MINIDLE 8
+#define TCF_CBQ_LSS_OFFTIME 0x10
+#define TCF_CBQ_LSS_AVPKT 0x20
+ __u32 maxidle;
+ __u32 minidle;
+ __u32 offtime;
+ __u32 avpkt;
+};
+
+struct tc_cbq_wrropt {
+ unsigned char flags;
+ unsigned char priority;
+ unsigned char cpriority;
+ unsigned char __reserved;
+ __u32 allot;
+ __u32 weight;
+};
+
+struct tc_cbq_ovl {
+ unsigned char strategy;
+#define TC_CBQ_OVL_CLASSIC 0
+#define TC_CBQ_OVL_DELAY 1
+#define TC_CBQ_OVL_LOWPRIO 2
+#define TC_CBQ_OVL_DROP 3
+#define TC_CBQ_OVL_RCLASSIC 4
+ unsigned char priority2;
+ __u16 pad;
+ __u32 penalty;
+};
+
+struct tc_cbq_police {
+ unsigned char police;
+ unsigned char __res1;
+ unsigned short __res2;
+};
+
+struct tc_cbq_fopt {
+ __u32 split;
+ __u32 defmap;
+ __u32 defchange;
+};
+
+struct tc_cbq_xstats {
+ __u32 borrows;
+ __u32 overactions;
+ __s32 avgidle;
+ __s32 undertime;
+};
+
+enum {
+ TCA_CBQ_UNSPEC,
+ TCA_CBQ_LSSOPT,
+ TCA_CBQ_WRROPT,
+ TCA_CBQ_FOPT,
+ TCA_CBQ_OVL_STRATEGY,
+ TCA_CBQ_RATE,
+ TCA_CBQ_RTAB,
+ TCA_CBQ_POLICE,
+ __TCA_CBQ_MAX,
+};
+
+#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
+
+/* dsmark section */
+
+enum {
+ TCA_DSMARK_UNSPEC,
+ TCA_DSMARK_INDICES,
+ TCA_DSMARK_DEFAULT_INDEX,
+ TCA_DSMARK_SET_TC_INDEX,
+ TCA_DSMARK_MASK,
+ TCA_DSMARK_VALUE,
+ __TCA_DSMARK_MAX,
+};
+
+#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
+
+/* ATM section */
+
+enum {
+ TCA_ATM_UNSPEC,
+ TCA_ATM_FD, /* file/socket descriptor */
+ TCA_ATM_PTR, /* pointer to descriptor - later */
+ TCA_ATM_HDR, /* LL header */
+ TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
+ TCA_ATM_ADDR, /* PVC address (for output only) */
+ TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
+ __TCA_ATM_MAX,
+};
+
+#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
+
+/* Network emulator */
+
+enum {
+ TCA_NETEM_UNSPEC,
+ TCA_NETEM_CORR,
+ TCA_NETEM_DELAY_DIST,
+ TCA_NETEM_REORDER,
+ TCA_NETEM_CORRUPT,
+ TCA_NETEM_LOSS,
+ TCA_NETEM_RATE,
+ TCA_NETEM_ECN,
+ TCA_NETEM_RATE64,
+ TCA_NETEM_PAD,
+ TCA_NETEM_LATENCY64,
+ TCA_NETEM_JITTER64,
+ TCA_NETEM_SLOT,
+ TCA_NETEM_SLOT_DIST,
+ __TCA_NETEM_MAX,
+};
+
+#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
+
+struct tc_netem_qopt {
+ __u32 latency; /* added delay (us) */
+ __u32 limit; /* fifo limit (packets) */
+ __u32 loss; /* random packet loss (0=none ~0=100%) */
+ __u32 gap; /* re-ordering gap (0 for none) */
+ __u32 duplicate; /* random packet dup (0=none ~0=100%) */
+ __u32 jitter; /* random jitter in latency (us) */
+};
+
+struct tc_netem_corr {
+ __u32 delay_corr; /* delay correlation */
+ __u32 loss_corr; /* packet loss correlation */
+ __u32 dup_corr; /* duplicate correlation */
+};
+
+struct tc_netem_reorder {
+ __u32 probability;
+ __u32 correlation;
+};
+
+struct tc_netem_corrupt {
+ __u32 probability;
+ __u32 correlation;
+};
+
+struct tc_netem_rate {
+ __u32 rate; /* byte/s */
+ __s32 packet_overhead;
+ __u32 cell_size;
+ __s32 cell_overhead;
+};
+
+struct tc_netem_slot {
+ __s64 min_delay; /* nsec */
+ __s64 max_delay;
+ __s32 max_packets;
+ __s32 max_bytes;
+ __s64 dist_delay; /* nsec */
+ __s64 dist_jitter; /* nsec */
+};
+
+enum {
+ NETEM_LOSS_UNSPEC,
+ NETEM_LOSS_GI, /* General Intuitive - 4 state model */
+ NETEM_LOSS_GE, /* Gilbert Elliot models */
+ __NETEM_LOSS_MAX
+};
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
+
+/* State transition probabilities for 4 state model */
+struct tc_netem_gimodel {
+ __u32 p13;
+ __u32 p31;
+ __u32 p32;
+ __u32 p14;
+ __u32 p23;
+};
+
+/* Gilbert-Elliot models */
+struct tc_netem_gemodel {
+ __u32 p;
+ __u32 r;
+ __u32 h;
+ __u32 k1;
+};
+
+#define NETEM_DIST_SCALE 8192
+#define NETEM_DIST_MAX 16384
+
+/* DRR */
+
+enum {
+ TCA_DRR_UNSPEC,
+ TCA_DRR_QUANTUM,
+ __TCA_DRR_MAX
+};
+
+#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
+
+struct tc_drr_stats {
+ __u32 deficit;
+};
+
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+enum {
+ TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
+ TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
+ __TC_MQPRIO_HW_OFFLOAD_MAX
+};
+
+#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
+
+enum {
+ TC_MQPRIO_MODE_DCB,
+ TC_MQPRIO_MODE_CHANNEL,
+ __TC_MQPRIO_MODE_MAX
+};
+
+#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
+
+enum {
+ TC_MQPRIO_SHAPER_DCB,
+ TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
+ __TC_MQPRIO_SHAPER_MAX
+};
+
+#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
+
+struct tc_mqprio_qopt {
+ __u8 num_tc;
+ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
+ __u8 hw;
+ __u16 count[TC_QOPT_MAX_QUEUE];
+ __u16 offset[TC_QOPT_MAX_QUEUE];
+};
+
+#define TC_MQPRIO_F_MODE 0x1
+#define TC_MQPRIO_F_SHAPER 0x2
+#define TC_MQPRIO_F_MIN_RATE 0x4
+#define TC_MQPRIO_F_MAX_RATE 0x8
+
+enum {
+ TCA_MQPRIO_UNSPEC,
+ TCA_MQPRIO_MODE,
+ TCA_MQPRIO_SHAPER,
+ TCA_MQPRIO_MIN_RATE64,
+ TCA_MQPRIO_MAX_RATE64,
+ __TCA_MQPRIO_MAX,
+};
+
+#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
+
+/* SFB */
+
+enum {
+ TCA_SFB_UNSPEC,
+ TCA_SFB_PARMS,
+ __TCA_SFB_MAX,
+};
+
+#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
+
+/*
+ * Note: increment, decrement are Q0.16 fixed-point values.
+ */
+struct tc_sfb_qopt {
+ __u32 rehash_interval; /* delay between hash move, in ms */
+ __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
+ __u32 max; /* max len of qlen_min */
+ __u32 bin_size; /* maximum queue length per bin */
+ __u32 increment; /* probability increment, (d1 in Blue) */
+ __u32 decrement; /* probability decrement, (d2 in Blue) */
+ __u32 limit; /* max SFB queue length */
+ __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
+ __u32 penalty_burst;
+};
+
+struct tc_sfb_xstats {
+ __u32 earlydrop;
+ __u32 penaltydrop;
+ __u32 bucketdrop;
+ __u32 queuedrop;
+ __u32 childdrop; /* drops in child qdisc */
+ __u32 marked;
+ __u32 maxqlen;
+ __u32 maxprob;
+ __u32 avgprob;
+};
+
+#define SFB_MAX_PROB 0xFFFF
+
+/* QFQ */
+enum {
+ TCA_QFQ_UNSPEC,
+ TCA_QFQ_WEIGHT,
+ TCA_QFQ_LMAX,
+ __TCA_QFQ_MAX
+};
+
+#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
+
+struct tc_qfq_stats {
+ __u32 weight;
+ __u32 lmax;
+};
+
+/* CODEL */
+
+enum {
+ TCA_CODEL_UNSPEC,
+ TCA_CODEL_TARGET,
+ TCA_CODEL_LIMIT,
+ TCA_CODEL_INTERVAL,
+ TCA_CODEL_ECN,
+ TCA_CODEL_CE_THRESHOLD,
+ __TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 count; /* how many drops we've done since the last time we
+ * entered dropping state
+ */
+ __u32 lastcount; /* count at entry to dropping state */
+ __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
+ __s32 drop_next; /* time to drop next packet */
+ __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
+ __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
+ __u32 dropping; /* are we in dropping state ? */
+ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
+};
+
+/* FQ_CODEL */
+
+enum {
+ TCA_FQ_CODEL_UNSPEC,
+ TCA_FQ_CODEL_TARGET,
+ TCA_FQ_CODEL_LIMIT,
+ TCA_FQ_CODEL_INTERVAL,
+ TCA_FQ_CODEL_ECN,
+ TCA_FQ_CODEL_FLOWS,
+ TCA_FQ_CODEL_QUANTUM,
+ TCA_FQ_CODEL_CE_THRESHOLD,
+ TCA_FQ_CODEL_DROP_BATCH_SIZE,
+ TCA_FQ_CODEL_MEMORY_LIMIT,
+ __TCA_FQ_CODEL_MAX
+};
+
+#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
+
+enum {
+ TCA_FQ_CODEL_XSTATS_QDISC,
+ TCA_FQ_CODEL_XSTATS_CLASS,
+};
+
+struct tc_fq_codel_qd_stats {
+ __u32 maxpacket; /* largest packet we've seen so far */
+ __u32 drop_overlimit; /* number of time max qdisc
+ * packet limit was hit
+ */
+ __u32 ecn_mark; /* number of packets we ECN marked
+ * instead of being dropped
+ */
+ __u32 new_flow_count; /* number of time packets
+ * created a 'new flow'
+ */
+ __u32 new_flows_len; /* count of flows in new list */
+ __u32 old_flows_len; /* count of flows in old list */
+ __u32 ce_mark; /* packets above ce_threshold */
+ __u32 memory_usage; /* in bytes */
+ __u32 drop_overmemory;
+};
+
+struct tc_fq_codel_cl_stats {
+ __s32 deficit;
+ __u32 ldelay; /* in-queue delay seen by most recently
+ * dequeued packet
+ */
+ __u32 count;
+ __u32 lastcount;
+ __u32 dropping;
+ __s32 drop_next;
+};
+
+struct tc_fq_codel_xstats {
+ __u32 type;
+ union {
+ struct tc_fq_codel_qd_stats qdisc_stats;
+ struct tc_fq_codel_cl_stats class_stats;
+ };
+};
+
+/* FQ */
+
+enum {
+ TCA_FQ_UNSPEC,
+
+ TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
+
+ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
+
+ TCA_FQ_QUANTUM, /* RR quantum */
+
+ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
+
+ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
+
+ TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
+
+ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
+
+ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
+
+ TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
+
+ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+
+ __TCA_FQ_MAX
+};
+
+#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+
+struct tc_fq_qd_stats {
+ __u64 gc_flows;
+ __u64 highprio_packets;
+ __u64 tcp_retrans;
+ __u64 throttled;
+ __u64 flows_plimit;
+ __u64 pkts_too_long;
+ __u64 allocation_errors;
+ __s64 time_next_delayed_flow;
+ __u32 flows;
+ __u32 inactive_flows;
+ __u32 throttled_flows;
+ __u32 unthrottle_latency_ns;
+ __u64 ce_mark; /* packets above ce_threshold */
+};
+
+/* Heavy-Hitter Filter */
+
+enum {
+ TCA_HHF_UNSPEC,
+ TCA_HHF_BACKLOG_LIMIT,
+ TCA_HHF_QUANTUM,
+ TCA_HHF_HH_FLOWS_LIMIT,
+ TCA_HHF_RESET_TIMEOUT,
+ TCA_HHF_ADMIT_BYTES,
+ TCA_HHF_EVICT_TIMEOUT,
+ TCA_HHF_NON_HH_WEIGHT,
+ __TCA_HHF_MAX
+};
+
+#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
+
+struct tc_hhf_xstats {
+ __u32 drop_overlimit; /* number of times max qdisc packet limit
+ * was hit
+ */
+ __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
+ __u32 hh_tot_count; /* number of captured heavy-hitters so far */
+ __u32 hh_cur_count; /* number of current heavy-hitters */
+};
+
+/* PIE */
+enum {
+ TCA_PIE_UNSPEC,
+ TCA_PIE_TARGET,
+ TCA_PIE_LIMIT,
+ TCA_PIE_TUPDATE,
+ TCA_PIE_ALPHA,
+ TCA_PIE_BETA,
+ TCA_PIE_ECN,
+ TCA_PIE_BYTEMODE,
+ __TCA_PIE_MAX
+};
+#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
+
+struct tc_pie_xstats {
+ __u32 prob; /* current probability */
+ __u32 delay; /* current delay in ms */
+ __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to pie_action */
+ __u32 overlimit; /* dropped due to lack of space in queue */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
+};
+
+/* CBS */
+struct tc_cbs_qopt {
+ __u8 offload;
+ __u8 _pad[3];
+ __s32 hicredit;
+ __s32 locredit;
+ __s32 idleslope;
+ __s32 sendslope;
+};
+
+enum {
+ TCA_CBS_UNSPEC,
+ TCA_CBS_PARMS,
+ __TCA_CBS_MAX,
+};
+
+#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
+
+
+/* ETF */
+struct tc_etf_qopt {
+ __s32 delta;
+ __s32 clockid;
+ __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON BIT(0)
+#define TC_ETF_OFFLOAD_ON BIT(1)
+};
+
+enum {
+ TCA_ETF_UNSPEC,
+ TCA_ETF_PARMS,
+ __TCA_ETF_MAX,
+};
+
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
+
+
+/* CAKE */
+enum {
+ TCA_CAKE_UNSPEC,
+ TCA_CAKE_PAD,
+ TCA_CAKE_BASE_RATE64,
+ TCA_CAKE_DIFFSERV_MODE,
+ TCA_CAKE_ATM,
+ TCA_CAKE_FLOW_MODE,
+ TCA_CAKE_OVERHEAD,
+ TCA_CAKE_RTT,
+ TCA_CAKE_TARGET,
+ TCA_CAKE_AUTORATE,
+ TCA_CAKE_MEMORY,
+ TCA_CAKE_NAT,
+ TCA_CAKE_RAW,
+ TCA_CAKE_WASH,
+ TCA_CAKE_MPU,
+ TCA_CAKE_INGRESS,
+ TCA_CAKE_ACK_FILTER,
+ TCA_CAKE_SPLIT_GSO,
+ __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
+
+enum {
+ __TCA_CAKE_STATS_INVALID,
+ TCA_CAKE_STATS_PAD,
+ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+ TCA_CAKE_STATS_MEMORY_LIMIT,
+ TCA_CAKE_STATS_MEMORY_USED,
+ TCA_CAKE_STATS_AVG_NETOFF,
+ TCA_CAKE_STATS_MIN_NETLEN,
+ TCA_CAKE_STATS_MAX_NETLEN,
+ TCA_CAKE_STATS_MIN_ADJLEN,
+ TCA_CAKE_STATS_MAX_ADJLEN,
+ TCA_CAKE_STATS_TIN_STATS,
+ TCA_CAKE_STATS_DEFICIT,
+ TCA_CAKE_STATS_COBALT_COUNT,
+ TCA_CAKE_STATS_DROPPING,
+ TCA_CAKE_STATS_DROP_NEXT_US,
+ TCA_CAKE_STATS_P_DROP,
+ TCA_CAKE_STATS_BLUE_TIMER_US,
+ __TCA_CAKE_STATS_MAX
+};
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+
+enum {
+ __TCA_CAKE_TIN_STATS_INVALID,
+ TCA_CAKE_TIN_STATS_PAD,
+ TCA_CAKE_TIN_STATS_SENT_PACKETS,
+ TCA_CAKE_TIN_STATS_SENT_BYTES64,
+ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+ TCA_CAKE_TIN_STATS_TARGET_US,
+ TCA_CAKE_TIN_STATS_INTERVAL_US,
+ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+ TCA_CAKE_TIN_STATS_WAY_MISSES,
+ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+ TCA_CAKE_TIN_STATS_BULK_FLOWS,
+ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+ __TCA_CAKE_TIN_STATS_MAX
+};
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
+#define TC_CAKE_MAX_TINS (8)
+
+enum {
+ CAKE_FLOW_NONE = 0,
+ CAKE_FLOW_SRC_IP,
+ CAKE_FLOW_DST_IP,
+ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+ CAKE_FLOW_FLOWS,
+ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_MAX,
+};
+
+enum {
+ CAKE_DIFFSERV_DIFFSERV3 = 0,
+ CAKE_DIFFSERV_DIFFSERV4,
+ CAKE_DIFFSERV_DIFFSERV8,
+ CAKE_DIFFSERV_BESTEFFORT,
+ CAKE_DIFFSERV_PRECEDENCE,
+ CAKE_DIFFSERV_MAX
+};
+
+enum {
+ CAKE_ACK_NONE = 0,
+ CAKE_ACK_FILTER,
+ CAKE_ACK_AGGRESSIVE,
+ CAKE_ACK_MAX
+};
+
+enum {
+ CAKE_ATM_NONE = 0,
+ CAKE_ATM_ATM,
+ CAKE_ATM_PTM,
+ CAKE_ATM_MAX
+};
+
+
+/* TAPRIO */
+enum {
+ TC_TAPRIO_CMD_SET_GATES = 0x00,
+ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
+ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
+};
+
+enum {
+ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
+ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
+ __TCA_TAPRIO_SCHED_ENTRY_MAX,
+};
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
+
+/* The format for schedule entry list is:
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
+ * [TCA_TAPRIO_SCHED_ENTRY]
+ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
+ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
+ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
+ */
+enum {
+ TCA_TAPRIO_SCHED_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY,
+ __TCA_TAPRIO_SCHED_MAX,
+};
+
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+
+enum {
+ TCA_TAPRIO_ATTR_UNSPEC,
+ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
+ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
+ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
+ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
+ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
+ TCA_TAPRIO_PAD,
+ __TCA_TAPRIO_ATTR_MAX,
+};
+
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+
+#endif
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index b17201edfa09..094bb03b9cc2 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -219,5 +219,14 @@ struct prctl_mm_map {
# define PR_SPEC_ENABLE (1UL << 1)
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+# define PR_SPEC_DISABLE_NOEXEC (1UL << 4)
+
+/* Reset arm64 pointer authentication keys */
+#define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1UL << 0)
+# define PR_PAC_APIBKEY (1UL << 1)
+# define PR_PAC_APDAKEY (1UL << 2)
+# define PR_PAC_APDBKEY (1UL << 3)
+# define PR_PAC_APGAKEY (1UL << 4)
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
index 6e89a5df49a4..653c4f94f76e 100644
--- a/tools/include/uapi/linux/tc_act/tc_bpf.h
+++ b/tools/include/uapi/linux/tc_act/tc_bpf.h
@@ -13,8 +13,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_BPF 13
-
struct tc_act_bpf {
tc_gen;
};
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index 84c3de89696a..40d028eed645 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -11,94 +11,9 @@
* device configuration.
*/
+#include <linux/vhost_types.h>
#include <linux/types.h>
-#include <linux/compiler.h>
#include <linux/ioctl.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio_ring.h>
-
-struct vhost_vring_state {
- unsigned int index;
- unsigned int num;
-};
-
-struct vhost_vring_file {
- unsigned int index;
- int fd; /* Pass -1 to unbind from file. */
-
-};
-
-struct vhost_vring_addr {
- unsigned int index;
- /* Option flags. */
- unsigned int flags;
- /* Flag values: */
- /* Whether log address is valid. If set enables logging. */
-#define VHOST_VRING_F_LOG 0
-
- /* Start of array of descriptors (virtually contiguous) */
- __u64 desc_user_addr;
- /* Used structure address. Must be 32 bit aligned */
- __u64 used_user_addr;
- /* Available structure address. Must be 16 bit aligned */
- __u64 avail_user_addr;
- /* Logging support. */
- /* Log writes to used structure, at offset calculated from specified
- * address. Address must be 32 bit aligned. */
- __u64 log_guest_addr;
-};
-
-/* no alignment requirement */
-struct vhost_iotlb_msg {
- __u64 iova;
- __u64 size;
- __u64 uaddr;
-#define VHOST_ACCESS_RO 0x1
-#define VHOST_ACCESS_WO 0x2
-#define VHOST_ACCESS_RW 0x3
- __u8 perm;
-#define VHOST_IOTLB_MISS 1
-#define VHOST_IOTLB_UPDATE 2
-#define VHOST_IOTLB_INVALIDATE 3
-#define VHOST_IOTLB_ACCESS_FAIL 4
- __u8 type;
-};
-
-#define VHOST_IOTLB_MSG 0x1
-#define VHOST_IOTLB_MSG_V2 0x2
-
-struct vhost_msg {
- int type;
- union {
- struct vhost_iotlb_msg iotlb;
- __u8 padding[64];
- };
-};
-
-struct vhost_msg_v2 {
- __u32 type;
- __u32 reserved;
- union {
- struct vhost_iotlb_msg iotlb;
- __u8 padding[64];
- };
-};
-
-struct vhost_memory_region {
- __u64 guest_phys_addr;
- __u64 memory_size; /* bytes */
- __u64 userspace_addr;
- __u64 flags_padding; /* No flags are currently specified. */
-};
-
-/* All region addresses and sizes must be 4K aligned. */
-#define VHOST_PAGE_SIZE 0x1000
-
-struct vhost_memory {
- __u32 nregions;
- __u32 padding;
- struct vhost_memory_region regions[0];
-};
/* ioctls */
@@ -186,31 +101,7 @@ struct vhost_memory {
* device. This can be used to stop the ring (e.g. for migration). */
#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
-/* Feature bits */
-/* Log all write descriptors. Can be changed while device is active. */
-#define VHOST_F_LOG_ALL 26
-/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
-#define VHOST_NET_F_VIRTIO_NET_HDR 27
-
-/* VHOST_SCSI specific definitions */
-
-/*
- * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
- *
- * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
- * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
- * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
- * All the targets under vhost_wwpn can be seen and used by guset.
- */
-
-#define VHOST_SCSI_ABI_VERSION 1
-
-struct vhost_scsi_target {
- int abi_version;
- char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */
- unsigned short vhost_tpgt;
- unsigned short reserved;
-};
+/* VHOST_SCSI specific defines */
#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index f81e549ddfdb..4db74758c674 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -1,2 +1,3 @@
libbpf_version.h
FEATURE-DUMP.libbpf
+test_libbpf
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 197b40f5b5c6..ee9d5362f35b 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 34d9c3619c96..a05c43468bd0 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -14,21 +14,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-
INSTALL = install
# Use DESTDIR for installing into a different root directory.
@@ -54,7 +39,7 @@ man_dir_SQ = '$(subst ','\'',$(man_dir))'
export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
-include ../../scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.include
# copy a bit from Linux kbuild
@@ -147,9 +132,9 @@ BPF_IN := $(OUTPUT)libbpf-in.o
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
VERSION_SCRIPT := libbpf.map
-GLOBAL_SYM_COUNT = $(shell readelf -s $(BPF_IN) | \
+GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
-VERSIONED_SYM_COUNT = $(shell readelf -s $(OUTPUT)libbpf.so | \
+VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
CMD_TARGETS = $(LIB_FILE)
@@ -179,6 +164,9 @@ $(BPF_IN): force elfdep bpfdep
@(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
(diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
+ @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
+ (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
+ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf
$(OUTPUT)libbpf.so: $(BPF_IN)
@@ -189,7 +177,7 @@ $(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
- $(QUIET_LINK)$(CXX) $^ -lelf -o $@
+ $(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@
check: check_abi
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 056f38310722..5788479384ca 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -9,7 +9,7 @@ described here. It's recommended to follow these conventions whenever a
new function or type is added to keep libbpf API clean and consistent.
All types and functions provided by libbpf API should have one of the
-following prefixes: ``bpf_``, ``btf_``, ``libbpf_``.
+following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``.
System call wrappers
--------------------
@@ -62,6 +62,19 @@ Auxiliary functions and types that don't fit well in any of categories
described above should have ``libbpf_`` prefix, e.g.
``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
+AF_XDP functions
+-------------------
+
+AF_XDP functions should have an ``xsk_`` prefix, e.g.
+``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists
+of both low-level ring access functions and high-level configuration
+functions. These can be mixed and matched. Note that these functions
+are not reentrant for performance reasons.
+
+Please take a look at Documentation/networking/af_xdp.rst in the Linux
+kernel source tree on how to use XDP sockets and for some common
+mistakes in case you do not get any traffic up to user space.
+
libbpf ABI
==========
@@ -132,6 +145,20 @@ For example, if current state of ``libbpf.map`` is:
Format of version script and ways to handle ABI changes, including
incompatible ones, described in details in [1].
+Stand-alone build
+=================
+
+Under https://github.com/libbpf/libbpf there is a (semi-)automated
+mirror of the mainline's version of libbpf for a stand-alone build.
+
+However, all changes to libbpf's code base must be upstreamed through
+the mainline kernel tree.
+
+License
+=======
+
+libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
+
Links
=====
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 3caaa3428774..9cd015574e83 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -22,6 +22,7 @@
*/
#include <stdlib.h>
+#include <string.h>
#include <memory.h>
#include <unistd.h>
#include <asm/unistd.h>
@@ -65,6 +66,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
return syscall(__NR_bpf, cmd, attr, size);
}
+static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
+{
+ int fd;
+
+ do {
+ fd = sys_bpf(BPF_PROG_LOAD, attr, size);
+ } while (fd < 0 && errno == EAGAIN);
+
+ return fd;
+}
+
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
{
__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
@@ -203,23 +215,35 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
{
void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
+ __u32 log_level;
__u32 name_len;
int fd;
- if (!load_attr)
+ if (!load_attr || !log_buf != !log_buf_sz)
+ return -EINVAL;
+
+ log_level = load_attr->log_level;
+ if (log_level > 2 || (log_level && !log_buf))
return -EINVAL;
name_len = load_attr->name ? strlen(load_attr->name) : 0;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.insn_cnt = (__u32)load_attr->insns_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
- attr.log_buf = ptr_to_u64(NULL);
- attr.log_size = 0;
- attr.log_level = 0;
+
+ attr.log_level = log_level;
+ if (log_level) {
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_buf_sz;
+ } else {
+ attr.log_buf = ptr_to_u64(NULL);
+ attr.log_size = 0;
+ }
+
attr.kern_version = load_attr->kern_version;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.prog_btf_fd = load_attr->prog_btf_fd;
@@ -232,7 +256,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1));
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
return fd;
@@ -269,13 +293,13 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
break;
}
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
goto done;
}
- if (!log_buf || !log_buf_sz)
+ if (log_level || !log_buf)
goto done;
/* Try again with log */
@@ -283,7 +307,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.log_size = log_buf_sz;
attr.log_level = 1;
log_buf[0] = 0;
- fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr));
done:
free(finfo);
free(linfo);
@@ -316,7 +340,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = type;
attr.insn_cnt = (__u32)insns_cnt;
attr.insns = ptr_to_u64(insns);
@@ -328,7 +352,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.kern_version = kern_version;
attr.prog_flags = prog_flags;
- return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ return sys_bpf_prog_load(&attr, sizeof(attr));
}
int bpf_map_update_elem(int fd, const void *key, const void *value,
@@ -336,7 +360,7 @@ int bpf_map_update_elem(int fd, const void *key, const void *value,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -349,10 +373,23 @@ int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+
+ return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+}
+
+int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
+{
+ union bpf_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
+ attr.flags = flags;
return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
}
@@ -361,7 +398,7 @@ int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -373,7 +410,7 @@ int bpf_map_delete_elem(int fd, const void *key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
@@ -384,7 +421,7 @@ int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.next_key = ptr_to_u64(next_key);
@@ -396,7 +433,7 @@ int bpf_obj_pin(int fd, const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
attr.bpf_fd = fd;
@@ -407,7 +444,7 @@ int bpf_obj_get(const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
@@ -418,7 +455,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -431,7 +468,7 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_type = type;
@@ -442,7 +479,7 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -456,7 +493,7 @@ int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.query.target_fd = target_fd;
attr.query.attach_type = type;
attr.query.query_flags = query_flags;
@@ -477,7 +514,7 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = prog_fd;
attr.test.data_in = ptr_to_u64(data);
attr.test.data_out = ptr_to_u64(data_out);
@@ -502,7 +539,7 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
if (!test_attr->data_out && test_attr->data_size_out > 0)
return -EINVAL;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = test_attr->prog_fd;
attr.test.data_in = ptr_to_u64(test_attr->data_in);
attr.test.data_out = ptr_to_u64(test_attr->data_out);
@@ -522,7 +559,7 @@ int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
@@ -537,7 +574,7 @@ int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
@@ -551,7 +588,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_id = id;
return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -561,7 +598,7 @@ int bpf_map_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_id = id;
return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -571,7 +608,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.btf_id = id;
return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -582,7 +619,7 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.info.bpf_fd = prog_fd;
attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info);
@@ -598,7 +635,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 8f09de482839..6ffdd79bea89 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -85,6 +85,7 @@ struct bpf_load_program_attr {
__u32 line_info_rec_size;
const void *line_info;
__u32 line_info_cnt;
+ __u32 log_level;
};
/* Flags to direct loading requirements */
@@ -110,6 +111,8 @@ LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
+LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
+ __u64 flags);
LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
void *value);
LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d682d3b8f7b9..1b8d8cdd3575 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
@@ -9,12 +10,14 @@
#include <linux/btf.h>
#include "btf.h"
#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_util.h"
-#define elog(fmt, ...) { if (err_log) err_log(fmt, ##__VA_ARGS__); }
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
-#define BTF_MAX_NR_TYPES 65535
+#define BTF_MAX_NR_TYPES 0x7fffffff
+#define BTF_MAX_STR_OFFSET 0x7fffffff
#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
((k) == BTF_KIND_VOLATILE) || \
@@ -39,9 +42,8 @@ struct btf {
struct btf_ext_info {
/*
- * info points to a deep copy of the individual info section
- * (e.g. func_info and line_info) from the .BTF.ext.
- * It does not include the __u32 rec_size.
+ * info points to the individual info section (e.g. func_info and
+ * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
*/
void *info;
__u32 rec_size;
@@ -49,8 +51,13 @@ struct btf_ext_info {
};
struct btf_ext {
+ union {
+ struct btf_ext_header *hdr;
+ void *data;
+ };
struct btf_ext_info func_info;
struct btf_ext_info line_info;
+ __u32 data_size;
};
struct btf_ext_info_sec {
@@ -107,54 +114,54 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
return 0;
}
-static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
+static int btf_parse_hdr(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
__u32 meta_left;
if (btf->data_size < sizeof(struct btf_header)) {
- elog("BTF header not found\n");
+ pr_debug("BTF header not found\n");
return -EINVAL;
}
if (hdr->magic != BTF_MAGIC) {
- elog("Invalid BTF magic:%x\n", hdr->magic);
+ pr_debug("Invalid BTF magic:%x\n", hdr->magic);
return -EINVAL;
}
if (hdr->version != BTF_VERSION) {
- elog("Unsupported BTF version:%u\n", hdr->version);
+ pr_debug("Unsupported BTF version:%u\n", hdr->version);
return -ENOTSUP;
}
if (hdr->flags) {
- elog("Unsupported BTF flags:%x\n", hdr->flags);
+ pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
return -ENOTSUP;
}
meta_left = btf->data_size - sizeof(*hdr);
if (!meta_left) {
- elog("BTF has no data\n");
+ pr_debug("BTF has no data\n");
return -EINVAL;
}
if (meta_left < hdr->type_off) {
- elog("Invalid BTF type section offset:%u\n", hdr->type_off);
+ pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
return -EINVAL;
}
if (meta_left < hdr->str_off) {
- elog("Invalid BTF string section offset:%u\n", hdr->str_off);
+ pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
return -EINVAL;
}
if (hdr->type_off >= hdr->str_off) {
- elog("BTF type section offset >= string section offset. No type?\n");
+ pr_debug("BTF type section offset >= string section offset. No type?\n");
return -EINVAL;
}
if (hdr->type_off & 0x02) {
- elog("BTF type section is not aligned to 4 bytes\n");
+ pr_debug("BTF type section is not aligned to 4 bytes\n");
return -EINVAL;
}
@@ -163,15 +170,15 @@ static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
-static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
+static int btf_parse_str_sec(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
const char *start = btf->nohdr_data + hdr->str_off;
const char *end = start + btf->hdr->str_len;
- if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
+ if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
start[0] || end[-1]) {
- elog("Invalid BTF string section\n");
+ pr_debug("Invalid BTF string section\n");
return -EINVAL;
}
@@ -180,7 +187,38 @@ static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
-static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
+static int btf_type_size(struct btf_type *t)
+{
+ int base_size = sizeof(struct btf_type);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return base_size;
+ case BTF_KIND_INT:
+ return base_size + sizeof(__u32);
+ case BTF_KIND_ENUM:
+ return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ARRAY:
+ return base_size + sizeof(struct btf_array);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return base_size + vlen * sizeof(struct btf_member);
+ case BTF_KIND_FUNC_PROTO:
+ return base_size + vlen * sizeof(struct btf_param);
+ default:
+ pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
+ return -EINVAL;
+ }
+}
+
+static int btf_parse_type_sec(struct btf *btf)
{
struct btf_header *hdr = btf->hdr;
void *nohdr_data = btf->nohdr_data;
@@ -189,41 +227,13 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
while (next_type < end_type) {
struct btf_type *t = next_type;
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ int type_size;
int err;
- next_type += sizeof(*t);
- switch (BTF_INFO_KIND(t->info)) {
- case BTF_KIND_INT:
- next_type += sizeof(int);
- break;
- case BTF_KIND_ARRAY:
- next_type += sizeof(struct btf_array);
- break;
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION:
- next_type += vlen * sizeof(struct btf_member);
- break;
- case BTF_KIND_ENUM:
- next_type += vlen * sizeof(struct btf_enum);
- break;
- case BTF_KIND_FUNC_PROTO:
- next_type += vlen * sizeof(struct btf_param);
- break;
- case BTF_KIND_FUNC:
- case BTF_KIND_TYPEDEF:
- case BTF_KIND_PTR:
- case BTF_KIND_FWD:
- case BTF_KIND_VOLATILE:
- case BTF_KIND_CONST:
- case BTF_KIND_RESTRICT:
- break;
- default:
- elog("Unsupported BTF_KIND:%u\n",
- BTF_INFO_KIND(t->info));
- return -EINVAL;
- }
-
+ type_size = btf_type_size(t);
+ if (type_size < 0)
+ return type_size;
+ next_type += type_size;
err = btf_add_type(btf, t);
if (err)
return err;
@@ -232,6 +242,11 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
+__u32 btf__get_nr_types(const struct btf *btf)
+{
+ return btf->nr_types;
+}
+
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
{
if (type_id > btf->nr_types)
@@ -250,21 +265,6 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
return !t || btf_type_is_void(t);
}
-static __s64 btf_type_size(const struct btf_type *t)
-{
- switch (BTF_INFO_KIND(t->info)) {
- case BTF_KIND_INT:
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION:
- case BTF_KIND_ENUM:
- return t->size;
- case BTF_KIND_PTR:
- return sizeof(void *);
- default:
- return -EINVAL;
- }
-}
-
#define MAX_RESOLVE_DEPTH 32
__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
@@ -278,11 +278,16 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
i++) {
- size = btf_type_size(t);
- if (size >= 0)
- break;
-
switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ size = t->size;
+ goto done;
+ case BTF_KIND_PTR:
+ size = sizeof(void *);
+ goto done;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
@@ -306,6 +311,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
if (size < 0)
return -EINVAL;
+done:
if (nelems && size > UINT32_MAX / nelems)
return -E2BIG;
@@ -363,10 +369,8 @@ void btf__free(struct btf *btf)
free(btf);
}
-struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
+struct btf *btf__new(__u8 *data, __u32 size)
{
- __u32 log_buf_size = 0;
- char *log_buf = NULL;
struct btf *btf;
int err;
@@ -376,16 +380,6 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
btf->fd = -1;
- if (err_log) {
- log_buf = malloc(BPF_LOG_BUF_SIZE);
- if (!log_buf) {
- err = -ENOMEM;
- goto done;
- }
- *log_buf = 0;
- log_buf_size = BPF_LOG_BUF_SIZE;
- }
-
btf->data = malloc(size);
if (!btf->data) {
err = -ENOMEM;
@@ -395,30 +389,17 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
memcpy(btf->data, data, size);
btf->data_size = size;
- btf->fd = bpf_load_btf(btf->data, btf->data_size,
- log_buf, log_buf_size, false);
-
- if (btf->fd == -1) {
- err = -errno;
- elog("Error loading BTF: %s(%d)\n", strerror(errno), errno);
- if (log_buf && *log_buf)
- elog("%s\n", log_buf);
- goto done;
- }
-
- err = btf_parse_hdr(btf, err_log);
+ err = btf_parse_hdr(btf);
if (err)
goto done;
- err = btf_parse_str_sec(btf, err_log);
+ err = btf_parse_str_sec(btf);
if (err)
goto done;
- err = btf_parse_type_sec(btf, err_log);
+ err = btf_parse_type_sec(btf);
done:
- free(log_buf);
-
if (err) {
btf__free(btf);
return ERR_PTR(err);
@@ -427,11 +408,47 @@ done:
return btf;
}
+int btf__load(struct btf *btf)
+{
+ __u32 log_buf_size = BPF_LOG_BUF_SIZE;
+ char *log_buf = NULL;
+ int err = 0;
+
+ if (btf->fd >= 0)
+ return -EEXIST;
+
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
+
+ *log_buf = 0;
+
+ btf->fd = bpf_load_btf(btf->data, btf->data_size,
+ log_buf, log_buf_size, false);
+ if (btf->fd < 0) {
+ err = -errno;
+ pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
+ if (*log_buf)
+ pr_warning("%s\n", log_buf);
+ goto done;
+ }
+
+done:
+ free(log_buf);
+ return err;
+}
+
int btf__fd(const struct btf *btf)
{
return btf->fd;
}
+const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
+{
+ *size = btf->data_size;
+ return btf->data;
+}
+
const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
{
if (offset < btf->hdr->str_len)
@@ -467,7 +484,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
@@ -481,7 +498,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
ptr = temp_ptr;
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
}
@@ -491,7 +508,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
- *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size, NULL);
+ *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
if (IS_ERR(*btf)) {
err = PTR_ERR(*btf);
*btf = NULL;
@@ -504,7 +521,79 @@ exit_free:
return err;
}
-struct btf_ext_sec_copy_param {
+int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
+ __u32 expected_key_size, __u32 expected_value_size,
+ __u32 *key_type_id, __u32 *value_type_id)
+{
+ const struct btf_type *container_type;
+ const struct btf_member *key, *value;
+ const size_t max_name = 256;
+ char container_name[max_name];
+ __s64 key_size, value_size;
+ __s32 container_id;
+
+ if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
+ max_name) {
+ pr_warning("map:%s length of '____btf_map_%s' is too long\n",
+ map_name, map_name);
+ return -EINVAL;
+ }
+
+ container_id = btf__find_by_name(btf, container_name);
+ if (container_id < 0) {
+ pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
+ map_name, container_name);
+ return container_id;
+ }
+
+ container_type = btf__type_by_id(btf, container_id);
+ if (!container_type) {
+ pr_warning("map:%s cannot find BTF type for container_id:%u\n",
+ map_name, container_id);
+ return -EINVAL;
+ }
+
+ if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
+ BTF_INFO_VLEN(container_type->info) < 2) {
+ pr_warning("map:%s container_name:%s is an invalid container struct\n",
+ map_name, container_name);
+ return -EINVAL;
+ }
+
+ key = (struct btf_member *)(container_type + 1);
+ value = key + 1;
+
+ key_size = btf__resolve_size(btf, key->type);
+ if (key_size < 0) {
+ pr_warning("map:%s invalid BTF key_type_size\n", map_name);
+ return key_size;
+ }
+
+ if (expected_key_size != key_size) {
+ pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
+ map_name, (__u32)key_size, expected_key_size);
+ return -EINVAL;
+ }
+
+ value_size = btf__resolve_size(btf, value->type);
+ if (value_size < 0) {
+ pr_warning("map:%s invalid BTF value_type_size\n", map_name);
+ return value_size;
+ }
+
+ if (expected_value_size != value_size) {
+ pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
+ map_name, (__u32)value_size, expected_value_size);
+ return -EINVAL;
+ }
+
+ *key_type_id = key->type;
+ *value_type_id = value->type;
+
+ return 0;
+}
+
+struct btf_ext_sec_setup_param {
__u32 off;
__u32 len;
__u32 min_rec_size;
@@ -512,41 +601,33 @@ struct btf_ext_sec_copy_param {
const char *desc;
};
-static int btf_ext_copy_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- struct btf_ext_sec_copy_param *ext_sec,
- btf_print_fn_t err_log)
+static int btf_ext_setup_info(struct btf_ext *btf_ext,
+ struct btf_ext_sec_setup_param *ext_sec)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
/* The start of the info sec (including the __u32 record_size). */
- const void *info;
-
- /* data and data_size do not include btf_ext_header from now on */
- data = data + hdr->hdr_len;
- data_size -= hdr->hdr_len;
+ void *info;
if (ext_sec->off & 0x03) {
- elog(".BTF.ext %s section is not aligned to 4 bytes\n",
+ pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
ext_sec->desc);
return -EINVAL;
}
- if (data_size < ext_sec->off ||
- ext_sec->len > data_size - ext_sec->off) {
- elog("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
- ext_sec->desc, ext_sec->off, ext_sec->len);
+ info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
+ info_left = ext_sec->len;
+
+ if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
+ pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
+ ext_sec->desc, ext_sec->off, ext_sec->len);
return -EINVAL;
}
- info = data + ext_sec->off;
- info_left = ext_sec->len;
-
/* At least a record size */
if (info_left < sizeof(__u32)) {
- elog(".BTF.ext %s record size not found\n", ext_sec->desc);
+ pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
return -EINVAL;
}
@@ -554,8 +635,8 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
record_size = *(__u32 *)info;
if (record_size < ext_sec->min_rec_size ||
record_size & 0x03) {
- elog("%s section in .BTF.ext has invalid record size %u\n",
- ext_sec->desc, record_size);
+ pr_debug("%s section in .BTF.ext has invalid record size %u\n",
+ ext_sec->desc, record_size);
return -EINVAL;
}
@@ -564,7 +645,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
/* If no records, return failure now so .BTF.ext won't be used. */
if (!info_left) {
- elog("%s section in .BTF.ext has no records", ext_sec->desc);
+ pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
return -EINVAL;
}
@@ -574,14 +655,14 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
__u32 num_records;
if (info_left < sec_hdrlen) {
- elog("%s section header is not found in .BTF.ext\n",
+ pr_debug("%s section header is not found in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
num_records = sinfo->num_info;
if (num_records == 0) {
- elog("%s section has incorrect num_records in .BTF.ext\n",
+ pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
@@ -589,7 +670,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
total_record_size = sec_hdrlen +
(__u64)num_records * record_size;
if (info_left < total_record_size) {
- elog("%s section has incorrect num_records in .BTF.ext\n",
+ pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
@@ -601,74 +682,64 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
- ext_info->info = malloc(ext_info->len);
- if (!ext_info->info)
- return -ENOMEM;
- memcpy(ext_info->info, info + sizeof(__u32), ext_info->len);
+ ext_info->info = info + sizeof(__u32);
return 0;
}
-static int btf_ext_copy_func_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->func_info_off,
- .len = hdr->func_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->func_info_off,
+ .len = btf_ext->hdr->func_info_len,
.min_rec_size = sizeof(struct bpf_func_info_min),
.ext_info = &btf_ext->func_info,
.desc = "func_info"
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+ return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_copy_line_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->line_info_off,
- .len = hdr->line_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->line_info_off,
+ .len = btf_ext->hdr->line_info_len,
.min_rec_size = sizeof(struct bpf_line_info_min),
.ext_info = &btf_ext->line_info,
.desc = "line_info",
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+ return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_parse_hdr(__u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
{
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
data_size < hdr->hdr_len) {
- elog("BTF.ext header not found");
+ pr_debug("BTF.ext header not found");
return -EINVAL;
}
if (hdr->magic != BTF_MAGIC) {
- elog("Invalid BTF.ext magic:%x\n", hdr->magic);
+ pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
if (hdr->version != BTF_VERSION) {
- elog("Unsupported BTF.ext version:%u\n", hdr->version);
+ pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
return -ENOTSUP;
}
if (hdr->flags) {
- elog("Unsupported BTF.ext flags:%x\n", hdr->flags);
+ pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
return -ENOTSUP;
}
if (data_size == hdr->hdr_len) {
- elog("BTF.ext has no data\n");
+ pr_debug("BTF.ext has no data\n");
return -EINVAL;
}
@@ -679,18 +750,16 @@ void btf_ext__free(struct btf_ext *btf_ext)
{
if (!btf_ext)
return;
-
- free(btf_ext->func_info.info);
- free(btf_ext->line_info.info);
+ free(btf_ext->data);
free(btf_ext);
}
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
+struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
{
struct btf_ext *btf_ext;
int err;
- err = btf_ext_parse_hdr(data, size, err_log);
+ err = btf_ext_parse_hdr(data, size);
if (err)
return ERR_PTR(err);
@@ -698,13 +767,23 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
if (!btf_ext)
return ERR_PTR(-ENOMEM);
- err = btf_ext_copy_func_info(btf_ext, data, size, err_log);
- if (err) {
- btf_ext__free(btf_ext);
- return ERR_PTR(err);
+ btf_ext->data_size = size;
+ btf_ext->data = malloc(size);
+ if (!btf_ext->data) {
+ err = -ENOMEM;
+ goto done;
}
+ memcpy(btf_ext->data, data, size);
+
+ err = btf_ext_setup_func_info(btf_ext);
+ if (err)
+ goto done;
+
+ err = btf_ext_setup_line_info(btf_ext);
+ if (err)
+ goto done;
- err = btf_ext_copy_line_info(btf_ext, data, size, err_log);
+done:
if (err) {
btf_ext__free(btf_ext);
return ERR_PTR(err);
@@ -713,6 +792,12 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
return btf_ext;
}
+const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
+{
+ *size = btf_ext->data_size;
+ return btf_ext->data;
+}
+
static int btf_ext_reloc_info(const struct btf *btf,
const struct btf_ext_info *ext_info,
const char *sec_name, __u32 insns_cnt,
@@ -761,7 +846,8 @@ static int btf_ext_reloc_info(const struct btf *btf,
return -ENOENT;
}
-int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **func_info, __u32 *cnt)
{
@@ -769,7 +855,8 @@ int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ex
insns_cnt, func_info, cnt);
}
-int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **line_info, __u32 *cnt)
{
@@ -786,3 +873,1778 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
{
return btf_ext->line_info.rec_size;
}
+
+struct btf_dedup;
+
+static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts);
+static void btf_dedup_free(struct btf_dedup *d);
+static int btf_dedup_strings(struct btf_dedup *d);
+static int btf_dedup_prim_types(struct btf_dedup *d);
+static int btf_dedup_struct_types(struct btf_dedup *d);
+static int btf_dedup_ref_types(struct btf_dedup *d);
+static int btf_dedup_compact_types(struct btf_dedup *d);
+static int btf_dedup_remap_types(struct btf_dedup *d);
+
+/*
+ * Deduplicate BTF types and strings.
+ *
+ * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
+ * section with all BTF type descriptors and string data. It overwrites that
+ * memory in-place with deduplicated types and strings without any loss of
+ * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
+ * is provided, all the strings referenced from .BTF.ext section are honored
+ * and updated to point to the right offsets after deduplication.
+ *
+ * If function returns with error, type/string data might be garbled and should
+ * be discarded.
+ *
+ * More verbose and detailed description of both problem btf_dedup is solving,
+ * as well as solution could be found at:
+ * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
+ *
+ * Problem description and justification
+ * =====================================
+ *
+ * BTF type information is typically emitted either as a result of conversion
+ * from DWARF to BTF or directly by compiler. In both cases, each compilation
+ * unit contains information about a subset of all the types that are used
+ * in an application. These subsets are frequently overlapping and contain a lot
+ * of duplicated information when later concatenated together into a single
+ * binary. This algorithm ensures that each unique type is represented by single
+ * BTF type descriptor, greatly reducing resulting size of BTF data.
+ *
+ * Compilation unit isolation and subsequent duplication of data is not the only
+ * problem. The same type hierarchy (e.g., struct and all the type that struct
+ * references) in different compilation units can be represented in BTF to
+ * various degrees of completeness (or, rather, incompleteness) due to
+ * struct/union forward declarations.
+ *
+ * Let's take a look at an example, that we'll use to better understand the
+ * problem (and solution). Suppose we have two compilation units, each using
+ * same `struct S`, but each of them having incomplete type information about
+ * struct's fields:
+ *
+ * // CU #1:
+ * struct S;
+ * struct A {
+ * int a;
+ * struct A* self;
+ * struct S* parent;
+ * };
+ * struct B;
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * // CU #2:
+ * struct S;
+ * struct A;
+ * struct B {
+ * int b;
+ * struct B* self;
+ * struct S* parent;
+ * };
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * In case of CU #1, BTF data will know only that `struct B` exist (but no
+ * more), but will know the complete type information about `struct A`. While
+ * for CU #2, it will know full type information about `struct B`, but will
+ * only know about forward declaration of `struct A` (in BTF terms, it will
+ * have `BTF_KIND_FWD` type descriptor with name `B`).
+ *
+ * This compilation unit isolation means that it's possible that there is no
+ * single CU with complete type information describing structs `S`, `A`, and
+ * `B`. Also, we might get tons of duplicated and redundant type information.
+ *
+ * Additional complication we need to keep in mind comes from the fact that
+ * types, in general, can form graphs containing cycles, not just DAGs.
+ *
+ * While algorithm does deduplication, it also merges and resolves type
+ * information (unless disabled throught `struct btf_opts`), whenever possible.
+ * E.g., in the example above with two compilation units having partial type
+ * information for structs `A` and `B`, the output of algorithm will emit
+ * a single copy of each BTF type that describes structs `A`, `B`, and `S`
+ * (as well as type information for `int` and pointers), as if they were defined
+ * in a single compilation unit as:
+ *
+ * struct A {
+ * int a;
+ * struct A* self;
+ * struct S* parent;
+ * };
+ * struct B {
+ * int b;
+ * struct B* self;
+ * struct S* parent;
+ * };
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * Algorithm summary
+ * =================
+ *
+ * Algorithm completes its work in 6 separate passes:
+ *
+ * 1. Strings deduplication.
+ * 2. Primitive types deduplication (int, enum, fwd).
+ * 3. Struct/union types deduplication.
+ * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
+ * protos, and const/volatile/restrict modifiers).
+ * 5. Types compaction.
+ * 6. Types remapping.
+ *
+ * Algorithm determines canonical type descriptor, which is a single
+ * representative type for each truly unique type. This canonical type is the
+ * one that will go into final deduplicated BTF type information. For
+ * struct/unions, it is also the type that algorithm will merge additional type
+ * information into (while resolving FWDs), as it discovers it from data in
+ * other CUs. Each input BTF type eventually gets either mapped to itself, if
+ * that type is canonical, or to some other type, if that type is equivalent
+ * and was chosen as canonical representative. This mapping is stored in
+ * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
+ * FWD type got resolved to.
+ *
+ * To facilitate fast discovery of canonical types, we also maintain canonical
+ * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
+ * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
+ * that match that signature. With sufficiently good choice of type signature
+ * hashing function, we can limit number of canonical types for each unique type
+ * signature to a very small number, allowing to find canonical type for any
+ * duplicated type very quickly.
+ *
+ * Struct/union deduplication is the most critical part and algorithm for
+ * deduplicating structs/unions is described in greater details in comments for
+ * `btf_dedup_is_equiv` function.
+ */
+int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts)
+{
+ struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
+ int err;
+
+ if (IS_ERR(d)) {
+ pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
+ return -EINVAL;
+ }
+
+ err = btf_dedup_strings(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_strings failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_prim_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_prim_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_struct_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_struct_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_ref_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_ref_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_compact_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_compact_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_remap_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_remap_types failed:%d\n", err);
+ goto done;
+ }
+
+done:
+ btf_dedup_free(d);
+ return err;
+}
+
+#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
+#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
+#define BTF_UNPROCESSED_ID ((__u32)-1)
+#define BTF_IN_PROGRESS_ID ((__u32)-2)
+
+struct btf_dedup_node {
+ struct btf_dedup_node *next;
+ __u32 type_id;
+};
+
+struct btf_dedup {
+ /* .BTF section to be deduped in-place */
+ struct btf *btf;
+ /*
+ * Optional .BTF.ext section. When provided, any strings referenced
+ * from it will be taken into account when deduping strings
+ */
+ struct btf_ext *btf_ext;
+ /*
+ * This is a map from any type's signature hash to a list of possible
+ * canonical representative type candidates. Hash collisions are
+ * ignored, so even types of various kinds can share same list of
+ * candidates, which is fine because we rely on subsequent
+ * btf_xxx_equal() checks to authoritatively verify type equality.
+ */
+ struct btf_dedup_node **dedup_table;
+ /* Canonical types map */
+ __u32 *map;
+ /* Hypothetical mapping, used during type graph equivalence checks */
+ __u32 *hypot_map;
+ __u32 *hypot_list;
+ size_t hypot_cnt;
+ size_t hypot_cap;
+ /* Various option modifying behavior of algorithm */
+ struct btf_dedup_opts opts;
+};
+
+struct btf_str_ptr {
+ const char *str;
+ __u32 new_off;
+ bool used;
+};
+
+struct btf_str_ptrs {
+ struct btf_str_ptr *ptrs;
+ const char *data;
+ __u32 cnt;
+ __u32 cap;
+};
+
+static inline __u32 hash_combine(__u32 h, __u32 value)
+{
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e370001UL
+ return h * 37 + value * GOLDEN_RATIO_PRIME;
+#undef GOLDEN_RATIO_PRIME
+}
+
+#define for_each_dedup_cand(d, hash, node) \
+ for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
+ node; \
+ node = node->next)
+
+static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
+{
+ struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
+ int bucket = hash & (d->opts.dedup_table_size - 1);
+
+ if (!node)
+ return -ENOMEM;
+ node->type_id = type_id;
+ node->next = d->dedup_table[bucket];
+ d->dedup_table[bucket] = node;
+ return 0;
+}
+
+static int btf_dedup_hypot_map_add(struct btf_dedup *d,
+ __u32 from_id, __u32 to_id)
+{
+ if (d->hypot_cnt == d->hypot_cap) {
+ __u32 *new_list;
+
+ d->hypot_cap += max(16, d->hypot_cap / 2);
+ new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
+ if (!new_list)
+ return -ENOMEM;
+ d->hypot_list = new_list;
+ }
+ d->hypot_list[d->hypot_cnt++] = from_id;
+ d->hypot_map[from_id] = to_id;
+ return 0;
+}
+
+static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
+{
+ int i;
+
+ for (i = 0; i < d->hypot_cnt; i++)
+ d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
+ d->hypot_cnt = 0;
+}
+
+static void btf_dedup_table_free(struct btf_dedup *d)
+{
+ struct btf_dedup_node *head, *tmp;
+ int i;
+
+ if (!d->dedup_table)
+ return;
+
+ for (i = 0; i < d->opts.dedup_table_size; i++) {
+ while (d->dedup_table[i]) {
+ tmp = d->dedup_table[i];
+ d->dedup_table[i] = tmp->next;
+ free(tmp);
+ }
+
+ head = d->dedup_table[i];
+ while (head) {
+ tmp = head;
+ head = head->next;
+ free(tmp);
+ }
+ }
+
+ free(d->dedup_table);
+ d->dedup_table = NULL;
+}
+
+static void btf_dedup_free(struct btf_dedup *d)
+{
+ btf_dedup_table_free(d);
+
+ free(d->map);
+ d->map = NULL;
+
+ free(d->hypot_map);
+ d->hypot_map = NULL;
+
+ free(d->hypot_list);
+ d->hypot_list = NULL;
+
+ free(d);
+}
+
+/* Find closest power of two >= to size, capped at 2^max_size_log */
+static __u32 roundup_pow2_max(__u32 size, int max_size_log)
+{
+ int i;
+
+ for (i = 0; i < max_size_log && (1U << i) < size; i++)
+ ;
+ return 1U << i;
+}
+
+
+static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts)
+{
+ struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
+ int i, err = 0;
+ __u32 sz;
+
+ if (!d)
+ return ERR_PTR(-ENOMEM);
+
+ d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
+ sz = opts && opts->dedup_table_size ? opts->dedup_table_size
+ : BTF_DEDUP_TABLE_DEFAULT_SIZE;
+ sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG);
+ d->opts.dedup_table_size = sz;
+
+ d->btf = btf;
+ d->btf_ext = btf_ext;
+
+ d->dedup_table = calloc(d->opts.dedup_table_size,
+ sizeof(struct btf_dedup_node *));
+ if (!d->dedup_table) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ if (!d->map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ /* special BTF "void" type is made canonical immediately */
+ d->map[0] = 0;
+ for (i = 1; i <= btf->nr_types; i++)
+ d->map[i] = BTF_UNPROCESSED_ID;
+
+ d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ if (!d->hypot_map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ for (i = 0; i <= btf->nr_types; i++)
+ d->hypot_map[i] = BTF_UNPROCESSED_ID;
+
+done:
+ if (err) {
+ btf_dedup_free(d);
+ return ERR_PTR(err);
+ }
+
+ return d;
+}
+
+typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
+
+/*
+ * Iterate over all possible places in .BTF and .BTF.ext that can reference
+ * string and pass pointer to it to a provided callback `fn`.
+ */
+static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
+{
+ void *line_data_cur, *line_data_end;
+ int i, j, r, rec_size;
+ struct btf_type *t;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ t = d->btf->types[i];
+ r = fn(&t->name_off, ctx);
+ if (r)
+ return r;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = (struct btf_member *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ struct btf_enum *m = (struct btf_enum *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *m = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (!d->btf_ext)
+ return 0;
+
+ line_data_cur = d->btf_ext->line_info.info;
+ line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
+ rec_size = d->btf_ext->line_info.rec_size;
+
+ while (line_data_cur < line_data_end) {
+ struct btf_ext_info_sec *sec = line_data_cur;
+ struct bpf_line_info_min *line_info;
+ __u32 num_info = sec->num_info;
+
+ r = fn(&sec->sec_name_off, ctx);
+ if (r)
+ return r;
+
+ line_data_cur += sizeof(struct btf_ext_info_sec);
+ for (i = 0; i < num_info; i++) {
+ line_info = line_data_cur;
+ r = fn(&line_info->file_name_off, ctx);
+ if (r)
+ return r;
+ r = fn(&line_info->line_off, ctx);
+ if (r)
+ return r;
+ line_data_cur += rec_size;
+ }
+ }
+
+ return 0;
+}
+
+static int str_sort_by_content(const void *a1, const void *a2)
+{
+ const struct btf_str_ptr *p1 = a1;
+ const struct btf_str_ptr *p2 = a2;
+
+ return strcmp(p1->str, p2->str);
+}
+
+static int str_sort_by_offset(const void *a1, const void *a2)
+{
+ const struct btf_str_ptr *p1 = a1;
+ const struct btf_str_ptr *p2 = a2;
+
+ if (p1->str != p2->str)
+ return p1->str < p2->str ? -1 : 1;
+ return 0;
+}
+
+static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
+{
+ const struct btf_str_ptr *p = pelem;
+
+ if (str_ptr != p->str)
+ return (const char *)str_ptr < p->str ? -1 : 1;
+ return 0;
+}
+
+static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
+{
+ struct btf_str_ptrs *strs;
+ struct btf_str_ptr *s;
+
+ if (*str_off_ptr == 0)
+ return 0;
+
+ strs = ctx;
+ s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
+ sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
+ if (!s)
+ return -EINVAL;
+ s->used = true;
+ return 0;
+}
+
+static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
+{
+ struct btf_str_ptrs *strs;
+ struct btf_str_ptr *s;
+
+ if (*str_off_ptr == 0)
+ return 0;
+
+ strs = ctx;
+ s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
+ sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
+ if (!s)
+ return -EINVAL;
+ *str_off_ptr = s->new_off;
+ return 0;
+}
+
+/*
+ * Dedup string and filter out those that are not referenced from either .BTF
+ * or .BTF.ext (if provided) sections.
+ *
+ * This is done by building index of all strings in BTF's string section,
+ * then iterating over all entities that can reference strings (e.g., type
+ * names, struct field names, .BTF.ext line info, etc) and marking corresponding
+ * strings as used. After that all used strings are deduped and compacted into
+ * sequential blob of memory and new offsets are calculated. Then all the string
+ * references are iterated again and rewritten using new offsets.
+ */
+static int btf_dedup_strings(struct btf_dedup *d)
+{
+ const struct btf_header *hdr = d->btf->hdr;
+ char *start = (char *)d->btf->nohdr_data + hdr->str_off;
+ char *end = start + d->btf->hdr->str_len;
+ char *p = start, *tmp_strs = NULL;
+ struct btf_str_ptrs strs = {
+ .cnt = 0,
+ .cap = 0,
+ .ptrs = NULL,
+ .data = start,
+ };
+ int i, j, err = 0, grp_idx;
+ bool grp_used;
+
+ /* build index of all strings */
+ while (p < end) {
+ if (strs.cnt + 1 > strs.cap) {
+ struct btf_str_ptr *new_ptrs;
+
+ strs.cap += max(strs.cnt / 2, 16);
+ new_ptrs = realloc(strs.ptrs,
+ sizeof(strs.ptrs[0]) * strs.cap);
+ if (!new_ptrs) {
+ err = -ENOMEM;
+ goto done;
+ }
+ strs.ptrs = new_ptrs;
+ }
+
+ strs.ptrs[strs.cnt].str = p;
+ strs.ptrs[strs.cnt].used = false;
+
+ p += strlen(p) + 1;
+ strs.cnt++;
+ }
+
+ /* temporary storage for deduplicated strings */
+ tmp_strs = malloc(d->btf->hdr->str_len);
+ if (!tmp_strs) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* mark all used strings */
+ strs.ptrs[0].used = true;
+ err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
+ if (err)
+ goto done;
+
+ /* sort strings by context, so that we can identify duplicates */
+ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
+
+ /*
+ * iterate groups of equal strings and if any instance in a group was
+ * referenced, emit single instance and remember new offset
+ */
+ p = tmp_strs;
+ grp_idx = 0;
+ grp_used = strs.ptrs[0].used;
+ /* iterate past end to avoid code duplication after loop */
+ for (i = 1; i <= strs.cnt; i++) {
+ /*
+ * when i == strs.cnt, we want to skip string comparison and go
+ * straight to handling last group of strings (otherwise we'd
+ * need to handle last group after the loop w/ duplicated code)
+ */
+ if (i < strs.cnt &&
+ !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
+ grp_used = grp_used || strs.ptrs[i].used;
+ continue;
+ }
+
+ /*
+ * this check would have been required after the loop to handle
+ * last group of strings, but due to <= condition in a loop
+ * we avoid that duplication
+ */
+ if (grp_used) {
+ int new_off = p - tmp_strs;
+ __u32 len = strlen(strs.ptrs[grp_idx].str);
+
+ memmove(p, strs.ptrs[grp_idx].str, len + 1);
+ for (j = grp_idx; j < i; j++)
+ strs.ptrs[j].new_off = new_off;
+ p += len + 1;
+ }
+
+ if (i < strs.cnt) {
+ grp_idx = i;
+ grp_used = strs.ptrs[i].used;
+ }
+ }
+
+ /* replace original strings with deduped ones */
+ d->btf->hdr->str_len = p - tmp_strs;
+ memmove(start, tmp_strs, d->btf->hdr->str_len);
+ end = start + d->btf->hdr->str_len;
+
+ /* restore original order for further binary search lookups */
+ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
+
+ /* remap string offsets */
+ err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
+ if (err)
+ goto done;
+
+ d->btf->hdr->str_len = end - start;
+
+done:
+ free(tmp_strs);
+ free(strs.ptrs);
+ return err;
+}
+
+static __u32 btf_hash_common(struct btf_type *t)
+{
+ __u32 h;
+
+ h = hash_combine(0, t->name_off);
+ h = hash_combine(h, t->info);
+ h = hash_combine(h, t->size);
+ return h;
+}
+
+static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
+{
+ return t1->name_off == t2->name_off &&
+ t1->info == t2->info &&
+ t1->size == t2->size;
+}
+
+/* Calculate type signature hash of INT. */
+static __u32 btf_hash_int(struct btf_type *t)
+{
+ __u32 info = *(__u32 *)(t + 1);
+ __u32 h;
+
+ h = btf_hash_common(t);
+ h = hash_combine(h, info);
+ return h;
+}
+
+/* Check structural equality of two INTs. */
+static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
+{
+ __u32 info1, info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+ info1 = *(__u32 *)(t1 + 1);
+ info2 = *(__u32 *)(t2 + 1);
+ return info1 == info2;
+}
+
+/* Calculate type signature hash of ENUM. */
+static __u32 btf_hash_enum(struct btf_type *t)
+{
+ struct btf_enum *member = (struct btf_enum *)(t + 1);
+ __u32 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->val);
+ member++;
+ }
+ return h;
+}
+
+/* Check structural equality of two ENUMs. */
+static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_enum *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_enum *)(t1 + 1);
+ m2 = (struct btf_enum *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->val != m2->val)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
+ * as referenced type IDs equivalence is established separately during type
+ * graph equivalence check algorithm.
+ */
+static __u32 btf_hash_struct(struct btf_type *t)
+{
+ struct btf_member *member = (struct btf_member *)(t + 1);
+ __u32 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->offset);
+ /* no hashing of referenced type ID, it can be unresolved yet */
+ member++;
+ }
+ return h;
+}
+
+/*
+ * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_member *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_member *)(t1 + 1);
+ m2 = (struct btf_member *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->offset != m2->offset)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Calculate type signature hash of ARRAY, including referenced type IDs,
+ * under assumption that they were already resolved to canonical type IDs and
+ * are not going to change.
+ */
+static __u32 btf_hash_array(struct btf_type *t)
+{
+ struct btf_array *info = (struct btf_array *)(t + 1);
+ __u32 h = btf_hash_common(t);
+
+ h = hash_combine(h, info->type);
+ h = hash_combine(h, info->index_type);
+ h = hash_combine(h, info->nelems);
+ return h;
+}
+
+/*
+ * Check exact equality of two ARRAYs, taking into account referenced
+ * type IDs, under assumption that they were already resolved to canonical
+ * type IDs and are not going to change.
+ * This function is called during reference types deduplication to compare
+ * ARRAY to potential canonical representative.
+ */
+static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_array *info1, *info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ info1 = (struct btf_array *)(t1 + 1);
+ info2 = (struct btf_array *)(t2 + 1);
+ return info1->type == info2->type &&
+ info1->index_type == info2->index_type &&
+ info1->nelems == info2->nelems;
+}
+
+/*
+ * Check structural compatibility of two ARRAYs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_array *info1, *info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ info1 = (struct btf_array *)(t1 + 1);
+ info2 = (struct btf_array *)(t2 + 1);
+ return info1->nelems == info2->nelems;
+}
+
+/*
+ * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
+ * under assumption that they were already resolved to canonical type IDs and
+ * are not going to change.
+ */
+static inline __u32 btf_hash_fnproto(struct btf_type *t)
+{
+ struct btf_param *member = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->type);
+ member++;
+ }
+ return h;
+}
+
+/*
+ * Check exact equality of two FUNC_PROTOs, taking into account referenced
+ * type IDs, under assumption that they were already resolved to canonical
+ * type IDs and are not going to change.
+ * This function is called during reference types deduplication to compare
+ * FUNC_PROTO to potential canonical representative.
+ */
+static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_param *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_param *)(t1 + 1);
+ m2 = (struct btf_param *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->type != m2->type)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_param *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ /* skip return type ID */
+ if (t1->name_off != t2->name_off || t1->info != t2->info)
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_param *)(t1 + 1);
+ m2 = (struct btf_param *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Deduplicate primitive types, that can't reference other types, by calculating
+ * their type signature hash and comparing them with any possible canonical
+ * candidate. If no canonical candidate matches, type itself is marked as
+ * canonical and is added into `btf_dedup->dedup_table` as another candidate.
+ */
+static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_type *t = d->btf->types[type_id];
+ struct btf_type *cand;
+ struct btf_dedup_node *cand_node;
+ /* if we don't find equivalent type, then we are canonical */
+ __u32 new_id = type_id;
+ __u32 h;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_FUNC_PROTO:
+ return 0;
+
+ case BTF_KIND_INT:
+ h = btf_hash_int(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_int(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_ENUM:
+ h = btf_hash_enum(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_enum(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_FWD:
+ h = btf_hash_common(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_common(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int btf_dedup_prim_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_prim_type(d, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Check whether type is already mapped into canonical one (could be to itself).
+ */
+static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
+{
+ return d->map[type_id] <= BTF_MAX_NR_TYPES;
+}
+
+/*
+ * Resolve type ID into its canonical type ID, if any; otherwise return original
+ * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
+ * STRUCT/UNION link and resolve it into canonical type ID as well.
+ */
+static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
+{
+ while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
+ type_id = d->map[type_id];
+ return type_id;
+}
+
+/*
+ * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
+ * type ID.
+ */
+static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
+{
+ __u32 orig_type_id = type_id;
+
+ if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ return type_id;
+
+ while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
+ type_id = d->map[type_id];
+
+ if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ return type_id;
+
+ return orig_type_id;
+}
+
+
+static inline __u16 btf_fwd_kind(struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
+}
+
+/*
+ * Check equivalence of BTF type graph formed by candidate struct/union (we'll
+ * call it "candidate graph" in this description for brevity) to a type graph
+ * formed by (potential) canonical struct/union ("canonical graph" for brevity
+ * here, though keep in mind that not all types in canonical graph are
+ * necessarily canonical representatives themselves, some of them might be
+ * duplicates or its uniqueness might not have been established yet).
+ * Returns:
+ * - >0, if type graphs are equivalent;
+ * - 0, if not equivalent;
+ * - <0, on error.
+ *
+ * Algorithm performs side-by-side DFS traversal of both type graphs and checks
+ * equivalence of BTF types at each step. If at any point BTF types in candidate
+ * and canonical graphs are not compatible structurally, whole graphs are
+ * incompatible. If types are structurally equivalent (i.e., all information
+ * except referenced type IDs is exactly the same), a mapping from `canon_id` to
+ * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
+ * If a type references other types, then those referenced types are checked
+ * for equivalence recursively.
+ *
+ * During DFS traversal, if we find that for current `canon_id` type we
+ * already have some mapping in hypothetical map, we check for two possible
+ * situations:
+ * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
+ * happen when type graphs have cycles. In this case we assume those two
+ * types are equivalent.
+ * - `canon_id` is mapped to different type. This is contradiction in our
+ * hypothetical mapping, because same graph in canonical graph corresponds
+ * to two different types in candidate graph, which for equivalent type
+ * graphs shouldn't happen. This condition terminates equivalence check
+ * with negative result.
+ *
+ * If type graphs traversal exhausts types to check and find no contradiction,
+ * then type graphs are equivalent.
+ *
+ * When checking types for equivalence, there is one special case: FWD types.
+ * If FWD type resolution is allowed and one of the types (either from canonical
+ * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
+ * flag) and their names match, hypothetical mapping is updated to point from
+ * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
+ * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
+ *
+ * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
+ * if there are two exactly named (or anonymous) structs/unions that are
+ * compatible structurally, one of which has FWD field, while other is concrete
+ * STRUCT/UNION, but according to C sources they are different structs/unions
+ * that are referencing different types with the same name. This is extremely
+ * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
+ * this logic is causing problems.
+ *
+ * Doing FWD resolution means that both candidate and/or canonical graphs can
+ * consists of portions of the graph that come from multiple compilation units.
+ * This is due to the fact that types within single compilation unit are always
+ * deduplicated and FWDs are already resolved, if referenced struct/union
+ * definiton is available. So, if we had unresolved FWD and found corresponding
+ * STRUCT/UNION, they will be from different compilation units. This
+ * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
+ * type graph will likely have at least two different BTF types that describe
+ * same type (e.g., most probably there will be two different BTF types for the
+ * same 'int' primitive type) and could even have "overlapping" parts of type
+ * graph that describe same subset of types.
+ *
+ * This in turn means that our assumption that each type in canonical graph
+ * must correspond to exactly one type in candidate graph might not hold
+ * anymore and will make it harder to detect contradictions using hypothetical
+ * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
+ * resolution only in canonical graph. FWDs in candidate graphs are never
+ * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
+ * that can occur:
+ * - Both types in canonical and candidate graphs are FWDs. If they are
+ * structurally equivalent, then they can either be both resolved to the
+ * same STRUCT/UNION or not resolved at all. In both cases they are
+ * equivalent and there is no need to resolve FWD on candidate side.
+ * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
+ * so nothing to resolve as well, algorithm will check equivalence anyway.
+ * - Type in canonical graph is FWD, while type in candidate is concrete
+ * STRUCT/UNION. In this case candidate graph comes from single compilation
+ * unit, so there is exactly one BTF type for each unique C type. After
+ * resolving FWD into STRUCT/UNION, there might be more than one BTF type
+ * in canonical graph mapping to single BTF type in candidate graph, but
+ * because hypothetical mapping maps from canonical to candidate types, it's
+ * alright, and we still maintain the property of having single `canon_id`
+ * mapping to single `cand_id` (there could be two different `canon_id`
+ * mapped to the same `cand_id`, but it's not contradictory).
+ * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
+ * graph is FWD. In this case we are just going to check compatibility of
+ * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
+ * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
+ * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
+ * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
+ * canonical graph.
+ */
+static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
+ __u32 canon_id)
+{
+ struct btf_type *cand_type;
+ struct btf_type *canon_type;
+ __u32 hypot_type_id;
+ __u16 cand_kind;
+ __u16 canon_kind;
+ int i, eq;
+
+ /* if both resolve to the same canonical, they must be equivalent */
+ if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
+ return 1;
+
+ canon_id = resolve_fwd_id(d, canon_id);
+
+ hypot_type_id = d->hypot_map[canon_id];
+ if (hypot_type_id <= BTF_MAX_NR_TYPES)
+ return hypot_type_id == cand_id;
+
+ if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
+ return -ENOMEM;
+
+ cand_type = d->btf->types[cand_id];
+ canon_type = d->btf->types[canon_id];
+ cand_kind = BTF_INFO_KIND(cand_type->info);
+ canon_kind = BTF_INFO_KIND(canon_type->info);
+
+ if (cand_type->name_off != canon_type->name_off)
+ return 0;
+
+ /* FWD <--> STRUCT/UNION equivalence check, if enabled */
+ if (!d->opts.dont_resolve_fwds
+ && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
+ && cand_kind != canon_kind) {
+ __u16 real_kind;
+ __u16 fwd_kind;
+
+ if (cand_kind == BTF_KIND_FWD) {
+ real_kind = canon_kind;
+ fwd_kind = btf_fwd_kind(cand_type);
+ } else {
+ real_kind = cand_kind;
+ fwd_kind = btf_fwd_kind(canon_type);
+ }
+ return fwd_kind == real_kind;
+ }
+
+ if (cand_type->info != canon_type->info)
+ return 0;
+
+ switch (cand_kind) {
+ case BTF_KIND_INT:
+ return btf_equal_int(cand_type, canon_type);
+
+ case BTF_KIND_ENUM:
+ return btf_equal_enum(cand_type, canon_type);
+
+ case BTF_KIND_FWD:
+ return btf_equal_common(cand_type, canon_type);
+
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *cand_arr, *canon_arr;
+
+ if (!btf_compat_array(cand_type, canon_type))
+ return 0;
+ cand_arr = (struct btf_array *)(cand_type + 1);
+ canon_arr = (struct btf_array *)(canon_type + 1);
+ eq = btf_dedup_is_equiv(d,
+ cand_arr->index_type, canon_arr->index_type);
+ if (eq <= 0)
+ return eq;
+ return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
+ }
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *cand_m, *canon_m;
+ __u16 vlen;
+
+ if (!btf_shallow_equal_struct(cand_type, canon_type))
+ return 0;
+ vlen = BTF_INFO_VLEN(cand_type->info);
+ cand_m = (struct btf_member *)(cand_type + 1);
+ canon_m = (struct btf_member *)(canon_type + 1);
+ for (i = 0; i < vlen; i++) {
+ eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
+ if (eq <= 0)
+ return eq;
+ cand_m++;
+ canon_m++;
+ }
+
+ return 1;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *cand_p, *canon_p;
+ __u16 vlen;
+
+ if (!btf_compat_fnproto(cand_type, canon_type))
+ return 0;
+ eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
+ if (eq <= 0)
+ return eq;
+ vlen = BTF_INFO_VLEN(cand_type->info);
+ cand_p = (struct btf_param *)(cand_type + 1);
+ canon_p = (struct btf_param *)(canon_type + 1);
+ for (i = 0; i < vlen; i++) {
+ eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
+ if (eq <= 0)
+ return eq;
+ cand_p++;
+ canon_p++;
+ }
+ return 1;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Use hypothetical mapping, produced by successful type graph equivalence
+ * check, to augment existing struct/union canonical mapping, where possible.
+ *
+ * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
+ * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
+ * it doesn't matter if FWD type was part of canonical graph or candidate one,
+ * we are recording the mapping anyway. As opposed to carefulness required
+ * for struct/union correspondence mapping (described below), for FWD resolution
+ * it's not important, as by the time that FWD type (reference type) will be
+ * deduplicated all structs/unions will be deduped already anyway.
+ *
+ * Recording STRUCT/UNION mapping is purely a performance optimization and is
+ * not required for correctness. It needs to be done carefully to ensure that
+ * struct/union from candidate's type graph is not mapped into corresponding
+ * struct/union from canonical type graph that itself hasn't been resolved into
+ * canonical representative. The only guarantee we have is that canonical
+ * struct/union was determined as canonical and that won't change. But any
+ * types referenced through that struct/union fields could have been not yet
+ * resolved, so in case like that it's too early to establish any kind of
+ * correspondence between structs/unions.
+ *
+ * No canonical correspondence is derived for primitive types (they are already
+ * deduplicated completely already anyway) or reference types (they rely on
+ * stability of struct/union canonical relationship for equivalence checks).
+ */
+static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
+{
+ __u32 cand_type_id, targ_type_id;
+ __u16 t_kind, c_kind;
+ __u32 t_id, c_id;
+ int i;
+
+ for (i = 0; i < d->hypot_cnt; i++) {
+ cand_type_id = d->hypot_list[i];
+ targ_type_id = d->hypot_map[cand_type_id];
+ t_id = resolve_type_id(d, targ_type_id);
+ c_id = resolve_type_id(d, cand_type_id);
+ t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info);
+ c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info);
+ /*
+ * Resolve FWD into STRUCT/UNION.
+ * It's ok to resolve FWD into STRUCT/UNION that's not yet
+ * mapped to canonical representative (as opposed to
+ * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
+ * eventually that struct is going to be mapped and all resolved
+ * FWDs will automatically resolve to correct canonical
+ * representative. This will happen before ref type deduping,
+ * which critically depends on stability of these mapping. This
+ * stability is not a requirement for STRUCT/UNION equivalence
+ * checks, though.
+ */
+ if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
+ d->map[c_id] = t_id;
+ else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
+ d->map[t_id] = c_id;
+
+ if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
+ c_kind != BTF_KIND_FWD &&
+ is_type_mapped(d, c_id) &&
+ !is_type_mapped(d, t_id)) {
+ /*
+ * as a perf optimization, we can map struct/union
+ * that's part of type graph we just verified for
+ * equivalence. We can do that for struct/union that has
+ * canonical representative only, though.
+ */
+ d->map[t_id] = c_id;
+ }
+ }
+}
+
+/*
+ * Deduplicate struct/union types.
+ *
+ * For each struct/union type its type signature hash is calculated, taking
+ * into account type's name, size, number, order and names of fields, but
+ * ignoring type ID's referenced from fields, because they might not be deduped
+ * completely until after reference types deduplication phase. This type hash
+ * is used to iterate over all potential canonical types, sharing same hash.
+ * For each canonical candidate we check whether type graphs that they form
+ * (through referenced types in fields and so on) are equivalent using algorithm
+ * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
+ * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
+ * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
+ * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
+ * potentially map other structs/unions to their canonical representatives,
+ * if such relationship hasn't yet been established. This speeds up algorithm
+ * by eliminating some of the duplicate work.
+ *
+ * If no matching canonical representative was found, struct/union is marked
+ * as canonical for itself and is added into btf_dedup->dedup_table hash map
+ * for further look ups.
+ */
+static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_dedup_node *cand_node;
+ struct btf_type *cand_type, *t;
+ /* if we don't find equivalent type, then we are canonical */
+ __u32 new_id = type_id;
+ __u16 kind;
+ __u32 h;
+
+ /* already deduped or is in process of deduping (loop detected) */
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
+ return 0;
+
+ t = d->btf->types[type_id];
+ kind = BTF_INFO_KIND(t->info);
+
+ if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
+ return 0;
+
+ h = btf_hash_struct(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ int eq;
+
+ /*
+ * Even though btf_dedup_is_equiv() checks for
+ * btf_shallow_equal_struct() internally when checking two
+ * structs (unions) for equivalence, we need to guard here
+ * from picking matching FWD type as a dedup candidate.
+ * This can happen due to hash collision. In such case just
+ * relying on btf_dedup_is_equiv() would lead to potentially
+ * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
+ * FWD and compatible STRUCT/UNION are considered equivalent.
+ */
+ cand_type = d->btf->types[cand_node->type_id];
+ if (!btf_shallow_equal_struct(t, cand_type))
+ continue;
+
+ btf_dedup_clear_hypot_map(d);
+ eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
+ if (eq < 0)
+ return eq;
+ if (!eq)
+ continue;
+ new_id = cand_node->type_id;
+ btf_dedup_merge_hypot_map(d);
+ break;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int btf_dedup_struct_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_struct_type(d, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Deduplicate reference type.
+ *
+ * Once all primitive and struct/union types got deduplicated, we can easily
+ * deduplicate all other (reference) BTF types. This is done in two steps:
+ *
+ * 1. Resolve all referenced type IDs into their canonical type IDs. This
+ * resolution can be done either immediately for primitive or struct/union types
+ * (because they were deduped in previous two phases) or recursively for
+ * reference types. Recursion will always terminate at either primitive or
+ * struct/union type, at which point we can "unwind" chain of reference types
+ * one by one. There is no danger of encountering cycles because in C type
+ * system the only way to form type cycle is through struct/union, so any chain
+ * of reference types, even those taking part in a type cycle, will inevitably
+ * reach struct/union at some point.
+ *
+ * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
+ * becomes "stable", in the sense that no further deduplication will cause
+ * any changes to it. With that, it's now possible to calculate type's signature
+ * hash (this time taking into account referenced type IDs) and loop over all
+ * potential canonical representatives. If no match was found, current type
+ * will become canonical representative of itself and will be added into
+ * btf_dedup->dedup_table as another possible canonical representative.
+ */
+static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_dedup_node *cand_node;
+ struct btf_type *t, *cand;
+ /* if we don't find equivalent type, then we are representative type */
+ __u32 new_id = type_id;
+ int ref_type_id;
+ __u32 h;
+
+ if (d->map[type_id] == BTF_IN_PROGRESS_ID)
+ return -ELOOP;
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
+ return resolve_type_id(d, type_id);
+
+ t = d->btf->types[type_id];
+ d->map[type_id] = BTF_IN_PROGRESS_ID;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ ref_type_id = btf_dedup_ref_type(d, t->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ t->type = ref_type_id;
+
+ h = btf_hash_common(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_common(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *info = (struct btf_array *)(t + 1);
+
+ ref_type_id = btf_dedup_ref_type(d, info->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ info->type = ref_type_id;
+
+ ref_type_id = btf_dedup_ref_type(d, info->index_type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ info->index_type = ref_type_id;
+
+ h = btf_hash_array(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_array(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *param;
+ __u16 vlen;
+ int i;
+
+ ref_type_id = btf_dedup_ref_type(d, t->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ t->type = ref_type_id;
+
+ vlen = BTF_INFO_VLEN(t->info);
+ param = (struct btf_param *)(t + 1);
+ for (i = 0; i < vlen; i++) {
+ ref_type_id = btf_dedup_ref_type(d, param->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ param->type = ref_type_id;
+ param++;
+ }
+
+ h = btf_hash_fnproto(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_fnproto(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return new_id;
+}
+
+static int btf_dedup_ref_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_ref_type(d, i);
+ if (err < 0)
+ return err;
+ }
+ btf_dedup_table_free(d);
+ return 0;
+}
+
+/*
+ * Compact types.
+ *
+ * After we established for each type its corresponding canonical representative
+ * type, we now can eliminate types that are not canonical and leave only
+ * canonical ones layed out sequentially in memory by copying them over
+ * duplicates. During compaction btf_dedup->hypot_map array is reused to store
+ * a map from original type ID to a new compacted type ID, which will be used
+ * during next phase to "fix up" type IDs, referenced from struct/union and
+ * reference types.
+ */
+static int btf_dedup_compact_types(struct btf_dedup *d)
+{
+ struct btf_type **new_types;
+ __u32 next_type_id = 1;
+ char *types_start, *p;
+ int i, len;
+
+ /* we are going to reuse hypot_map to store compaction remapping */
+ d->hypot_map[0] = 0;
+ for (i = 1; i <= d->btf->nr_types; i++)
+ d->hypot_map[i] = BTF_UNPROCESSED_ID;
+
+ types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
+ p = types_start;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ if (d->map[i] != i)
+ continue;
+
+ len = btf_type_size(d->btf->types[i]);
+ if (len < 0)
+ return len;
+
+ memmove(p, d->btf->types[i], len);
+ d->hypot_map[i] = next_type_id;
+ d->btf->types[next_type_id] = (struct btf_type *)p;
+ p += len;
+ next_type_id++;
+ }
+
+ /* shrink struct btf's internal types index and update btf_header */
+ d->btf->nr_types = next_type_id - 1;
+ d->btf->types_size = d->btf->nr_types;
+ d->btf->hdr->type_len = p - types_start;
+ new_types = realloc(d->btf->types,
+ (1 + d->btf->nr_types) * sizeof(struct btf_type *));
+ if (!new_types)
+ return -ENOMEM;
+ d->btf->types = new_types;
+
+ /* make sure string section follows type information without gaps */
+ d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
+ memmove(p, d->btf->strings, d->btf->hdr->str_len);
+ d->btf->strings = p;
+ p += d->btf->hdr->str_len;
+
+ d->btf->data_size = p - (char *)d->btf->data;
+ return 0;
+}
+
+/*
+ * Figure out final (deduplicated and compacted) type ID for provided original
+ * `type_id` by first resolving it into corresponding canonical type ID and
+ * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
+ * which is populated during compaction phase.
+ */
+static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
+{
+ __u32 resolved_type_id, new_type_id;
+
+ resolved_type_id = resolve_type_id(d, type_id);
+ new_type_id = d->hypot_map[resolved_type_id];
+ if (new_type_id > BTF_MAX_NR_TYPES)
+ return -EINVAL;
+ return new_type_id;
+}
+
+/*
+ * Remap referenced type IDs into deduped type IDs.
+ *
+ * After BTF types are deduplicated and compacted, their final type IDs may
+ * differ from original ones. The map from original to a corresponding
+ * deduped type ID is stored in btf_dedup->hypot_map and is populated during
+ * compaction phase. During remapping phase we are rewriting all type IDs
+ * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
+ * their final deduped type IDs.
+ */
+static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_type *t = d->btf->types[type_id];
+ int i, r;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_ENUM:
+ break;
+
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ r = btf_dedup_remap_type_id(d, t->type);
+ if (r < 0)
+ return r;
+ t->type = r;
+ break;
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *arr_info = (struct btf_array *)(t + 1);
+
+ r = btf_dedup_remap_type_id(d, arr_info->type);
+ if (r < 0)
+ return r;
+ arr_info->type = r;
+ r = btf_dedup_remap_type_id(d, arr_info->index_type);
+ if (r < 0)
+ return r;
+ arr_info->index_type = r;
+ break;
+ }
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *member = (struct btf_member *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (i = 0; i < vlen; i++) {
+ r = btf_dedup_remap_type_id(d, member->type);
+ if (r < 0)
+ return r;
+ member->type = r;
+ member++;
+ }
+ break;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *param = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ r = btf_dedup_remap_type_id(d, t->type);
+ if (r < 0)
+ return r;
+ t->type = r;
+
+ for (i = 0; i < vlen; i++) {
+ r = btf_dedup_remap_type_id(d, param->type);
+ if (r < 0)
+ return r;
+ param->type = r;
+ param++;
+ }
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_dedup_remap_types(struct btf_dedup *d)
+{
+ int i, r;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ r = btf_dedup_remap_type(d, i);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b0610dcdae6b..28a1e1e59861 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -55,33 +55,47 @@ struct btf_ext_header {
__u32 line_info_len;
};
-typedef int (*btf_print_fn_t)(const char *, ...)
- __attribute__((format(printf, 1, 2)));
-
LIBBPF_API void btf__free(struct btf *btf);
-LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
+LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
+LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__fd(const struct btf *btf);
+LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
+LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
+ __u32 expected_key_size,
+ __u32 expected_value_size,
+ __u32 *key_type_id, __u32 *value_type_id);
+
+LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
+LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
+LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
+ __u32 *size);
+LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **func_info, __u32 *cnt);
+LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **line_info, __u32 *cnt);
+LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
+LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+
+struct btf_dedup_opts {
+ unsigned int dedup_table_size;
+ bool dont_resolve_fwds;
+};
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
-void btf_ext__free(struct btf_ext *btf_ext);
-int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *func_info_len);
-int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt);
-__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
-__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts);
#ifdef __cplusplus
} /* extern "C" */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 169e347c76f6..f5eb60379c8d 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -42,6 +42,7 @@
#include "bpf.h"
#include "btf.h"
#include "str_error.h"
+#include "libbpf_util.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -53,39 +54,33 @@
#define __printf(a, b) __attribute__((format(printf, a, b)))
-__printf(1, 2)
-static int __base_pr(const char *format, ...)
+static int __base_pr(enum libbpf_print_level level, const char *format,
+ va_list args)
{
- va_list args;
- int err;
+ if (level == LIBBPF_DEBUG)
+ return 0;
- va_start(args, format);
- err = vfprintf(stderr, format, args);
- va_end(args);
- return err;
+ return vfprintf(stderr, format, args);
}
-static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
-static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
-static __printf(1, 2) libbpf_print_fn_t __pr_debug;
-
-#define __pr(func, fmt, ...) \
-do { \
- if ((func)) \
- (func)("libbpf: " fmt, ##__VA_ARGS__); \
-} while (0)
+static libbpf_print_fn_t __libbpf_pr = __base_pr;
-#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
-#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
-#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
+void libbpf_set_print(libbpf_print_fn_t fn)
+{
+ __libbpf_pr = fn;
+}
-void libbpf_set_print(libbpf_print_fn_t warn,
- libbpf_print_fn_t info,
- libbpf_print_fn_t debug)
+__printf(2, 3)
+void libbpf_print(enum libbpf_print_level level, const char *format, ...)
{
- __pr_warning = warn;
- __pr_info = info;
- __pr_debug = debug;
+ va_list args;
+
+ if (!__libbpf_pr)
+ return;
+
+ va_start(args, format);
+ __libbpf_pr(level, format, args);
+ va_end(args);
}
#define STRERR_BUFSIZE 128
@@ -312,7 +307,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
return -EINVAL;
}
- bzero(prog, sizeof(*prog));
+ memset(prog, 0, sizeof(*prog));
prog->section_name = strdup(section_name);
if (!prog->section_name) {
@@ -839,9 +834,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
else if (strcmp(name, "maps") == 0)
obj->efile.maps_shndx = idx;
else if (strcmp(name, BTF_ELF_SEC) == 0) {
- obj->btf = btf__new(data->d_buf, data->d_size,
- __pr_debug);
- if (IS_ERR(obj->btf)) {
+ obj->btf = btf__new(data->d_buf, data->d_size);
+ if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_ELF_SEC, PTR_ERR(obj->btf));
obj->btf = NULL;
@@ -915,8 +909,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
} else {
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size,
- __pr_debug);
+ btf_ext_data->d_size);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC,
@@ -1057,72 +1050,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
{
- const struct btf_type *container_type;
- const struct btf_member *key, *value;
struct bpf_map_def *def = &map->def;
- const size_t max_name = 256;
- char container_name[max_name];
- __s64 key_size, value_size;
- __s32 container_id;
-
- if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
- max_name) {
- pr_warning("map:%s length of '____btf_map_%s' is too long\n",
- map->name, map->name);
- return -EINVAL;
- }
-
- container_id = btf__find_by_name(btf, container_name);
- if (container_id < 0) {
- pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
- map->name, container_name);
- return container_id;
- }
-
- container_type = btf__type_by_id(btf, container_id);
- if (!container_type) {
- pr_warning("map:%s cannot find BTF type for container_id:%u\n",
- map->name, container_id);
- return -EINVAL;
- }
-
- if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
- BTF_INFO_VLEN(container_type->info) < 2) {
- pr_warning("map:%s container_name:%s is an invalid container struct\n",
- map->name, container_name);
- return -EINVAL;
- }
-
- key = (struct btf_member *)(container_type + 1);
- value = key + 1;
-
- key_size = btf__resolve_size(btf, key->type);
- if (key_size < 0) {
- pr_warning("map:%s invalid BTF key_type_size\n",
- map->name);
- return key_size;
- }
-
- if (def->key_size != key_size) {
- pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
- map->name, (__u32)key_size, def->key_size);
- return -EINVAL;
- }
-
- value_size = btf__resolve_size(btf, value->type);
- if (value_size < 0) {
- pr_warning("map:%s invalid BTF value_type_size\n", map->name);
- return value_size;
- }
+ __u32 key_type_id, value_type_id;
+ int ret;
- if (def->value_size != value_size) {
- pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
- map->name, (__u32)value_size, def->value_size);
- return -EINVAL;
- }
+ ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
+ def->value_size, &key_type_id,
+ &value_type_id);
+ if (ret)
+ return ret;
- map->btf_key_type_id = key->type;
- map->btf_value_type_id = value->type;
+ map->btf_key_type_id = key_type_id;
+ map->btf_value_type_id = value_type_id;
return 0;
}
@@ -1174,6 +1113,20 @@ err_free_new_name:
return -errno;
}
+int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+{
+ if (!map || !max_entries)
+ return -EINVAL;
+
+ /* If map already created, its attributes can't be changed. */
+ if (map->fd >= 0)
+ return -EBUSY;
+
+ map->def.max_entries = max_entries;
+
+ return 0;
+}
+
static int
bpf_object__probe_name(struct bpf_object *obj)
{
@@ -1637,7 +1590,7 @@ bpf_program__load(struct bpf_program *prog,
struct bpf_prog_prep_result result;
bpf_program_prep_t preprocessor = prog->preprocessor;
- bzero(&result, sizeof(result));
+ memset(&result, 0, sizeof(result));
err = preprocessor(prog, i, prog->insns,
prog->insns_cnt, &result);
if (err) {
@@ -2147,7 +2100,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
if (err)
return err;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
char buf[PATH_MAX];
int len;
@@ -2194,7 +2147,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
if (!obj)
return -ENOENT;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
char buf[PATH_MAX];
int len;
@@ -2378,6 +2331,11 @@ unsigned int bpf_object__kversion(struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
+struct btf *bpf_object__btf(struct bpf_object *obj)
+{
+ return obj ? obj->btf : NULL;
+}
+
int bpf_object__btf_fd(const struct bpf_object *obj)
{
return obj->btf ? btf__fd(obj->btf) : -1;
@@ -2667,9 +2625,38 @@ static const struct {
#undef BPF_EAPROG_SEC
#undef BPF_APROG_COMPAT
+#define MAX_TYPE_NAME_SIZE 32
+
+static char *libbpf_get_type_names(bool attach_type)
+{
+ int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
+ char *buf;
+
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ buf[0] = '\0';
+ /* Forge string buf with all available names */
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (attach_type && !section_names[i].is_attachable)
+ continue;
+
+ if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
+ free(buf);
+ return NULL;
+ }
+ strcat(buf, " ");
+ strcat(buf, section_names[i].sec);
+ }
+
+ return buf;
+}
+
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2682,12 +2669,20 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
*expected_attach_type = section_names[i].expected_attach_type;
return 0;
}
+ pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(false);
+ if (type_names != NULL) {
+ pr_info("supported section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
int libbpf_attach_type_by_name(const char *name,
enum bpf_attach_type *attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2701,6 +2696,13 @@ int libbpf_attach_type_by_name(const char *name,
*attach_type = section_names[i].attach_type;
return 0;
}
+ pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(true);
+ if (type_names != NULL) {
+ pr_info("attachable section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
@@ -2833,13 +2835,19 @@ bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
{
struct bpf_map *pos;
- bpf_map__for_each(pos, obj) {
+ bpf_object__for_each_map(pos, obj) {
if (pos->name && !strcmp(pos->name, name))
return pos;
}
return NULL;
}
+int
+bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
+{
+ return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
+}
+
struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{
@@ -2907,8 +2915,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
err = bpf_program__identify_section(prog, &prog_type,
&expected_attach_type);
if (err < 0) {
- pr_warning("failed to guess program type based on section name %s\n",
- prog->section_name);
bpf_object__close(obj);
return -EINVAL;
}
@@ -2922,7 +2928,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
first_prog = prog;
}
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_offload_neutral(map))
map->map_ifindex = attr->ifindex;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5f68d7b75215..b4652aa1a58a 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -47,17 +47,16 @@ enum libbpf_errno {
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
-/*
- * __printf is defined in include/linux/compiler-gcc.h. However,
- * it would be better if libbpf.h didn't depend on Linux header files.
- * So instead of __printf, here we use gcc attribute directly.
- */
-typedef int (*libbpf_print_fn_t)(const char *, ...)
- __attribute__((format(printf, 1, 2)));
+enum libbpf_print_level {
+ LIBBPF_WARN,
+ LIBBPF_INFO,
+ LIBBPF_DEBUG,
+};
-LIBBPF_API void libbpf_set_print(libbpf_print_fn_t warn,
- libbpf_print_fn_t info,
- libbpf_print_fn_t debug);
+typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
+ const char *, va_list ap);
+
+LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
struct bpf_object;
@@ -90,6 +89,9 @@ LIBBPF_API int bpf_object__load(struct bpf_object *obj);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
+
+struct btf;
+LIBBPF_API struct btf *bpf_object__btf(struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
@@ -264,6 +266,9 @@ struct bpf_map;
LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
+LIBBPF_API int
+bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name);
+
/*
* Get bpf_map through the offset of corresponding struct bpf_map_def
* in the BPF object file.
@@ -273,10 +278,11 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
LIBBPF_API struct bpf_map *
bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
-#define bpf_map__for_each(pos, obj) \
+#define bpf_object__for_each_map(pos, obj) \
for ((pos) = bpf_map__next(NULL, (obj)); \
(pos) != NULL; \
(pos) = bpf_map__next((pos), (obj)))
+#define bpf_map__for_each bpf_object__for_each_map
LIBBPF_API struct bpf_map *
bpf_map__prev(struct bpf_map *map, struct bpf_object *obj);
@@ -292,6 +298,7 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
@@ -314,6 +321,7 @@ LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
+LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
@@ -355,6 +363,20 @@ LIBBPF_API const struct bpf_line_info *
bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
__u32 insn_off, __u32 nr_skip);
+/*
+ * Probe for supported system features
+ *
+ * Note that running many of these probes in a short amount of time can cause
+ * the kernel to reach the maximal size of lockable memory allowed for the
+ * user, causing subsequent probes to fail. In this case, the caller may want
+ * to adjust that limit with setrlimit().
+ */
+LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
+ __u32 ifindex);
+LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
+LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
+ enum bpf_prog_type prog_type, __u32 ifindex);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index cd02cd4e2cc3..778a26702a70 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -124,3 +124,33 @@ LIBBPF_0.0.1 {
local:
*;
};
+
+LIBBPF_0.0.2 {
+ global:
+ bpf_probe_helper;
+ bpf_probe_map_type;
+ bpf_probe_prog_type;
+ bpf_map__resize;
+ bpf_map_lookup_elem_flags;
+ bpf_object__btf;
+ bpf_object__find_map_fd_by_name;
+ bpf_get_link_xdp_id;
+ btf__dedup;
+ btf__get_map_kv_tids;
+ btf__get_nr_types;
+ btf__get_raw_data;
+ btf__load;
+ btf_ext__free;
+ btf_ext__func_info_rec_size;
+ btf_ext__get_raw_data;
+ btf_ext__line_info_rec_size;
+ btf_ext__new;
+ btf_ext__reloc_func_info;
+ btf_ext__reloc_line_info;
+ xsk_umem__create;
+ xsk_socket__create;
+ xsk_umem__delete;
+ xsk_socket__delete;
+ xsk_umem__fd;
+ xsk_socket__fd;
+} LIBBPF_0.0.1;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
new file mode 100644
index 000000000000..8c3a1c04dcb2
--- /dev/null
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+
+#include <linux/filter.h>
+#include <linux/kernel.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+
+static bool grep(const char *buffer, const char *pattern)
+{
+ return !!strstr(buffer, pattern);
+}
+
+static int get_vendor_id(int ifindex)
+{
+ char ifname[IF_NAMESIZE], path[64], buf[8];
+ ssize_t len;
+ int fd;
+
+ if (!if_indextoname(ifindex, ifname))
+ return -1;
+
+ snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0)
+ return -1;
+ if (len >= (ssize_t)sizeof(buf))
+ return -1;
+ buf[len] = '\0';
+
+ return strtol(buf, NULL, 0);
+}
+
+static int get_kernel_version(void)
+{
+ int version, subversion, patchlevel;
+ struct utsname utsn;
+
+ /* Return 0 on failure, and attempt to probe with empty kversion */
+ if (uname(&utsn))
+ return 0;
+
+ if (sscanf(utsn.release, "%d.%d.%d",
+ &version, &subversion, &patchlevel) != 3)
+ return 0;
+
+ return (version << 16) + (subversion << 8) + patchlevel;
+}
+
+static void
+probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
+ size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
+{
+ struct bpf_load_program_attr xattr = {};
+ int fd;
+
+ switch (prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+ break;
+ case BPF_PROG_TYPE_KPROBE:
+ xattr.kern_version = get_kernel_version();
+ break;
+ case BPF_PROG_TYPE_UNSPEC:
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_XDP:
+ case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_LWT_IN:
+ case BPF_PROG_TYPE_LWT_OUT:
+ case BPF_PROG_TYPE_LWT_XMIT:
+ case BPF_PROG_TYPE_SOCK_OPS:
+ case BPF_PROG_TYPE_SK_SKB:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_SK_MSG:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+ case BPF_PROG_TYPE_LIRC_MODE2:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ default:
+ break;
+ }
+
+ xattr.prog_type = prog_type;
+ xattr.insns = insns;
+ xattr.insns_cnt = insns_cnt;
+ xattr.license = "GPL";
+ xattr.prog_ifindex = ifindex;
+
+ fd = bpf_load_program_xattr(&xattr, buf, buf_len);
+ if (fd >= 0)
+ close(fd);
+}
+
+bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN()
+ };
+
+ if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
+ /* nfp returns -EINVAL on exit(0) with TC offload */
+ insns[0].imm = 2;
+
+ errno = 0;
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
+
+ return errno != EINVAL && errno != EOPNOTSUPP;
+}
+
+bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
+{
+ int key_size, value_size, max_entries, map_flags;
+ struct bpf_create_map_attr attr = {};
+ int fd = -1, fd_inner;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+ map_flags = 0;
+
+ switch (map_type) {
+ case BPF_MAP_TYPE_STACK_TRACE:
+ value_size = sizeof(__u64);
+ break;
+ case BPF_MAP_TYPE_LPM_TRIE:
+ key_size = sizeof(__u64);
+ value_size = sizeof(__u64);
+ map_flags = BPF_F_NO_PREALLOC;
+ break;
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ key_size = sizeof(struct bpf_cgroup_storage_key);
+ value_size = sizeof(__u64);
+ max_entries = 0;
+ break;
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ key_size = 0;
+ break;
+ case BPF_MAP_TYPE_UNSPEC:
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_LRU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ default:
+ break;
+ }
+
+ if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ /* TODO: probe for device, once libbpf has a function to create
+ * map-in-map for offload
+ */
+ if (ifindex)
+ return false;
+
+ fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(__u32), sizeof(__u32), 1, 0);
+ if (fd_inner < 0)
+ return false;
+ fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
+ fd_inner, 1, 0);
+ close(fd_inner);
+ } else {
+ /* Note: No other restriction on map type probes for offload */
+ attr.map_type = map_type;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+ attr.map_flags = map_flags;
+ attr.map_ifindex = ifindex;
+
+ fd = bpf_create_map_xattr(&attr);
+ }
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0;
+}
+
+bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
+ __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_EMIT_CALL(id),
+ BPF_EXIT_INSN()
+ };
+ char buf[4096] = {};
+ bool res;
+
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
+ ifindex);
+ res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
+
+ if (ifindex) {
+ switch (get_vendor_id(ifindex)) {
+ case 0x19ee: /* Netronome specific */
+ res = res && !grep(buf, "not supported by FW") &&
+ !grep(buf, "unsupported function id");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return res;
+}
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
new file mode 100644
index 000000000000..81ecda0cb9c9
--- /dev/null
+++ b/tools/lib/bpf/libbpf_util.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2019 Facebook */
+
+#ifndef __LIBBPF_LIBBPF_UTIL_H
+#define __LIBBPF_LIBBPF_UTIL_H
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void libbpf_print(enum libbpf_print_level level,
+ const char *format, ...)
+ __attribute__((format(printf, 2, 3)));
+
+#define __pr(level, fmt, ...) \
+do { \
+ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 0ce67aea8f3b..ce3ec81b71c0 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -21,6 +21,12 @@
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
void *cookie);
+struct xdp_id_md {
+ int ifindex;
+ __u32 flags;
+ __u32 id;
+};
+
int libbpf_netlink_open(__u32 *nl_pid)
{
struct sockaddr_nl sa;
@@ -196,6 +202,85 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
return dump_link_nlmsg(cookie, ifi, tb);
}
+static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags)
+{
+ if (mode != XDP_ATTACHED_MULTI)
+ return IFLA_XDP_PROG_ID;
+ if (flags & XDP_FLAGS_DRV_MODE)
+ return IFLA_XDP_DRV_PROG_ID;
+ if (flags & XDP_FLAGS_HW_MODE)
+ return IFLA_XDP_HW_PROG_ID;
+ if (flags & XDP_FLAGS_SKB_MODE)
+ return IFLA_XDP_SKB_PROG_ID;
+
+ return IFLA_XDP_UNSPEC;
+}
+
+static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb)
+{
+ struct nlattr *xdp_tb[IFLA_XDP_MAX + 1];
+ struct xdp_id_md *xdp_id = cookie;
+ struct ifinfomsg *ifinfo = msg;
+ unsigned char mode, xdp_attr;
+ int ret;
+
+ if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index)
+ return 0;
+
+ if (!tb[IFLA_XDP])
+ return 0;
+
+ ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL);
+ if (ret)
+ return ret;
+
+ if (!xdp_tb[IFLA_XDP_ATTACHED])
+ return 0;
+
+ mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]);
+ if (mode == XDP_ATTACHED_NONE)
+ return 0;
+
+ xdp_attr = get_xdp_id_attr(mode, xdp_id->flags);
+ if (!xdp_attr || !xdp_tb[xdp_attr])
+ return 0;
+
+ xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]);
+
+ return 0;
+}
+
+int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
+{
+ struct xdp_id_md xdp_id = {};
+ int sock, ret;
+ __u32 nl_pid;
+ __u32 mask;
+
+ if (flags & ~XDP_FLAGS_MASK)
+ return -EINVAL;
+
+ /* Check whether the single {HW,DRV,SKB} mode is set */
+ flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE);
+ mask = flags - 1;
+ if (flags && flags & mask)
+ return -EINVAL;
+
+ sock = libbpf_netlink_open(&nl_pid);
+ if (sock < 0)
+ return sock;
+
+ xdp_id.ifindex = ifindex;
+ xdp_id.flags = flags;
+
+ ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id);
+ if (!ret)
+ *prog_id = xdp_id.id;
+
+ close(sock);
+ return ret;
+}
+
int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.cpp
index abf3fc25c9fa..fc134873bb6d 100644
--- a/tools/lib/bpf/test_libbpf.cpp
+++ b/tools/lib/bpf/test_libbpf.cpp
@@ -8,11 +8,11 @@
int main(int argc, char *argv[])
{
/* libbpf.h */
- libbpf_set_print(NULL, NULL, NULL);
+ libbpf_set_print(NULL);
/* bpf.h */
bpf_prog_get_fd_by_id(0);
/* btf.h */
- btf__new(NULL, 0, NULL);
+ btf__new(NULL, 0);
}
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
new file mode 100644
index 000000000000..f98ac82c9aea
--- /dev/null
+++ b/tools/lib/bpf/xsk.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * AF_XDP user-space access library.
+ *
+ * Copyright(c) 2018 - 2019 Intel Corporation.
+ *
+ * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <asm/barrier.h>
+#include <linux/compiler.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_xdp.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_util.h"
+#include "xsk.h"
+
+#ifndef SOL_XDP
+ #define SOL_XDP 283
+#endif
+
+#ifndef AF_XDP
+ #define AF_XDP 44
+#endif
+
+#ifndef PF_XDP
+ #define PF_XDP AF_XDP
+#endif
+
+struct xsk_umem {
+ struct xsk_ring_prod *fill;
+ struct xsk_ring_cons *comp;
+ char *umem_area;
+ struct xsk_umem_config config;
+ int fd;
+ int refcount;
+};
+
+struct xsk_socket {
+ struct xsk_ring_cons *rx;
+ struct xsk_ring_prod *tx;
+ __u64 outstanding_tx;
+ struct xsk_umem *umem;
+ struct xsk_socket_config config;
+ int fd;
+ int xsks_map;
+ int ifindex;
+ int prog_fd;
+ int qidconf_map_fd;
+ int xsks_map_fd;
+ __u32 queue_id;
+ char ifname[IFNAMSIZ];
+};
+
+struct xsk_nl_info {
+ bool xdp_prog_attached;
+ int ifindex;
+ int fd;
+};
+
+/* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit.
+ * Unfortunately, it is not part of glibc.
+ */
+static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, __u64 offset)
+{
+#ifdef __NR_mmap2
+ unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
+ long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
+ (off_t)(offset >> page_shift));
+
+ return (void *)ret;
+#else
+ return mmap(addr, length, prot, flags, fd, offset);
+#endif
+}
+
+int xsk_umem__fd(const struct xsk_umem *umem)
+{
+ return umem ? umem->fd : -EINVAL;
+}
+
+int xsk_socket__fd(const struct xsk_socket *xsk)
+{
+ return xsk ? xsk->fd : -EINVAL;
+}
+
+static bool xsk_page_aligned(void *buffer)
+{
+ unsigned long addr = (unsigned long)buffer;
+
+ return !(addr & (getpagesize() - 1));
+}
+
+static void xsk_set_umem_config(struct xsk_umem_config *cfg,
+ const struct xsk_umem_config *usr_cfg)
+{
+ if (!usr_cfg) {
+ cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+ cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+ return;
+ }
+
+ cfg->fill_size = usr_cfg->fill_size;
+ cfg->comp_size = usr_cfg->comp_size;
+ cfg->frame_size = usr_cfg->frame_size;
+ cfg->frame_headroom = usr_cfg->frame_headroom;
+}
+
+static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
+ const struct xsk_socket_config *usr_cfg)
+{
+ if (!usr_cfg) {
+ cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg->libbpf_flags = 0;
+ cfg->xdp_flags = 0;
+ cfg->bind_flags = 0;
+ return;
+ }
+
+ cfg->rx_size = usr_cfg->rx_size;
+ cfg->tx_size = usr_cfg->tx_size;
+ cfg->libbpf_flags = usr_cfg->libbpf_flags;
+ cfg->xdp_flags = usr_cfg->xdp_flags;
+ cfg->bind_flags = usr_cfg->bind_flags;
+}
+
+int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
+{
+ struct xdp_mmap_offsets off;
+ struct xdp_umem_reg mr;
+ struct xsk_umem *umem;
+ socklen_t optlen;
+ void *map;
+ int err;
+
+ if (!umem_area || !umem_ptr || !fill || !comp)
+ return -EFAULT;
+ if (!size && !xsk_page_aligned(umem_area))
+ return -EINVAL;
+
+ umem = calloc(1, sizeof(*umem));
+ if (!umem)
+ return -ENOMEM;
+
+ umem->fd = socket(AF_XDP, SOCK_RAW, 0);
+ if (umem->fd < 0) {
+ err = -errno;
+ goto out_umem_alloc;
+ }
+
+ umem->umem_area = umem_area;
+ xsk_set_umem_config(&umem->config, usr_config);
+
+ mr.addr = (uintptr_t)umem_area;
+ mr.len = size;
+ mr.chunk_size = umem->config.frame_size;
+ mr.headroom = umem->config.frame_headroom;
+
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
+ &umem->config.fill_size,
+ sizeof(umem->config.fill_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
+ &umem->config.comp_size,
+ sizeof(umem->config.comp_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ optlen = sizeof(off);
+ err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ map = xsk_mmap(NULL, off.fr.desc +
+ umem->config.fill_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ umem->fd, XDP_UMEM_PGOFF_FILL_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ umem->fill = fill;
+ fill->mask = umem->config.fill_size - 1;
+ fill->size = umem->config.fill_size;
+ fill->producer = map + off.fr.producer;
+ fill->consumer = map + off.fr.consumer;
+ fill->ring = map + off.fr.desc;
+ fill->cached_cons = umem->config.fill_size;
+
+ map = xsk_mmap(NULL,
+ off.cr.desc + umem->config.comp_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap;
+ }
+
+ umem->comp = comp;
+ comp->mask = umem->config.comp_size - 1;
+ comp->size = umem->config.comp_size;
+ comp->producer = map + off.cr.producer;
+ comp->consumer = map + off.cr.consumer;
+ comp->ring = map + off.cr.desc;
+
+ *umem_ptr = umem;
+ return 0;
+
+out_mmap:
+ munmap(umem->fill,
+ off.fr.desc + umem->config.fill_size * sizeof(__u64));
+out_socket:
+ close(umem->fd);
+out_umem_alloc:
+ free(umem);
+ return err;
+}
+
+static int xsk_load_xdp_prog(struct xsk_socket *xsk)
+{
+ char bpf_log_buf[BPF_LOG_BUF_SIZE];
+ int err, prog_fd;
+
+ /* This is the C-program:
+ * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
+ * {
+ * int *qidconf, index = ctx->rx_queue_index;
+ *
+ * // A set entry here means that the correspnding queue_id
+ * // has an active AF_XDP socket bound to it.
+ * qidconf = bpf_map_lookup_elem(&qidconf_map, &index);
+ * if (!qidconf)
+ * return XDP_ABORTED;
+ *
+ * if (*qidconf)
+ * return bpf_redirect_map(&xsks_map, index, 0);
+ *
+ * return XDP_PASS;
+ * }
+ */
+ struct bpf_insn prog[] = {
+ /* r1 = *(u32 *)(r1 + 16) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
+ /* *(u32 *)(r10 - 4) = r1 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ /* if r1 == 0 goto +8 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ /* r1 = *(u32 *)(r1 + 0) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
+ /* if r1 == 0 goto +5 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
+ /* r2 = *(u32 *)(r10 - 4) */
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
+ BPF_MOV32_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_redirect_map),
+ /* The jumps are to this instruction */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+
+ prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
+ "LGPL-2.1 or BSD-2-Clause", 0, bpf_log_buf,
+ BPF_LOG_BUF_SIZE);
+ if (prog_fd < 0) {
+ pr_warning("BPF log buffer:\n%s", bpf_log_buf);
+ return prog_fd;
+ }
+
+ err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
+ if (err) {
+ close(prog_fd);
+ return err;
+ }
+
+ xsk->prog_fd = prog_fd;
+ return 0;
+}
+
+static int xsk_get_max_queues(struct xsk_socket *xsk)
+{
+ struct ethtool_channels channels;
+ struct ifreq ifr;
+ int fd, err, ret;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return -errno;
+
+ channels.cmd = ETHTOOL_GCHANNELS;
+ ifr.ifr_data = (void *)&channels;
+ strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
+ err = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (err && errno != EOPNOTSUPP) {
+ ret = -errno;
+ goto out;
+ }
+
+ if (channels.max_combined == 0 || errno == EOPNOTSUPP)
+ /* If the device says it has no channels, then all traffic
+ * is sent to a single stream, so max queues = 1.
+ */
+ ret = 1;
+ else
+ ret = channels.max_combined;
+
+out:
+ close(fd);
+ return ret;
+}
+
+static int xsk_create_bpf_maps(struct xsk_socket *xsk)
+{
+ int max_queues;
+ int fd;
+
+ max_queues = xsk_get_max_queues(xsk);
+ if (max_queues < 0)
+ return max_queues;
+
+ fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map",
+ sizeof(int), sizeof(int), max_queues, 0);
+ if (fd < 0)
+ return fd;
+ xsk->qidconf_map_fd = fd;
+
+ fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
+ sizeof(int), sizeof(int), max_queues, 0);
+ if (fd < 0) {
+ close(xsk->qidconf_map_fd);
+ return fd;
+ }
+ xsk->xsks_map_fd = fd;
+
+ return 0;
+}
+
+static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
+{
+ close(xsk->qidconf_map_fd);
+ close(xsk->xsks_map_fd);
+}
+
+static int xsk_update_bpf_maps(struct xsk_socket *xsk, int qidconf_value,
+ int xsks_value)
+{
+ bool qidconf_map_updated = false, xsks_map_updated = false;
+ struct bpf_prog_info prog_info = {};
+ __u32 prog_len = sizeof(prog_info);
+ struct bpf_map_info map_info;
+ __u32 map_len = sizeof(map_info);
+ __u32 *map_ids;
+ int reset_value = 0;
+ __u32 num_maps;
+ unsigned int i;
+ int err;
+
+ err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ if (err)
+ return err;
+
+ num_maps = prog_info.nr_map_ids;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
+ if (!map_ids)
+ return -ENOMEM;
+
+ memset(&prog_info, 0, prog_len);
+ prog_info.nr_map_ids = num_maps;
+ prog_info.map_ids = (__u64)(unsigned long)map_ids;
+
+ err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ if (err)
+ goto out_map_ids;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ int fd;
+
+ fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (fd < 0) {
+ err = -errno;
+ goto out_maps;
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
+ if (err)
+ goto out_maps;
+
+ if (!strcmp(map_info.name, "qidconf_map")) {
+ err = bpf_map_update_elem(fd, &xsk->queue_id,
+ &qidconf_value, 0);
+ if (err)
+ goto out_maps;
+ qidconf_map_updated = true;
+ xsk->qidconf_map_fd = fd;
+ } else if (!strcmp(map_info.name, "xsks_map")) {
+ err = bpf_map_update_elem(fd, &xsk->queue_id,
+ &xsks_value, 0);
+ if (err)
+ goto out_maps;
+ xsks_map_updated = true;
+ xsk->xsks_map_fd = fd;
+ }
+
+ if (qidconf_map_updated && xsks_map_updated)
+ break;
+ }
+
+ if (!(qidconf_map_updated && xsks_map_updated)) {
+ err = -ENOENT;
+ goto out_maps;
+ }
+
+ err = 0;
+ goto out_success;
+
+out_maps:
+ if (qidconf_map_updated)
+ (void)bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id,
+ &reset_value, 0);
+ if (xsks_map_updated)
+ (void)bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
+ &reset_value, 0);
+out_success:
+ if (qidconf_map_updated)
+ close(xsk->qidconf_map_fd);
+ if (xsks_map_updated)
+ close(xsk->xsks_map_fd);
+out_map_ids:
+ free(map_ids);
+ return err;
+}
+
+static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
+{
+ bool prog_attached = false;
+ __u32 prog_id = 0;
+ int err;
+
+ err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
+ xsk->config.xdp_flags);
+ if (err)
+ return err;
+
+ if (!prog_id) {
+ prog_attached = true;
+ err = xsk_create_bpf_maps(xsk);
+ if (err)
+ return err;
+
+ err = xsk_load_xdp_prog(xsk);
+ if (err)
+ goto out_maps;
+ } else {
+ xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ }
+
+ err = xsk_update_bpf_maps(xsk, true, xsk->fd);
+ if (err)
+ goto out_load;
+
+ return 0;
+
+out_load:
+ if (prog_attached)
+ close(xsk->prog_fd);
+out_maps:
+ if (prog_attached)
+ xsk_delete_bpf_maps(xsk);
+ return err;
+}
+
+int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ __u32 queue_id, struct xsk_umem *umem,
+ struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *usr_config)
+{
+ struct sockaddr_xdp sxdp = {};
+ struct xdp_mmap_offsets off;
+ struct xsk_socket *xsk;
+ socklen_t optlen;
+ void *map;
+ int err;
+
+ if (!umem || !xsk_ptr || !rx || !tx)
+ return -EFAULT;
+
+ if (umem->refcount) {
+ pr_warning("Error: shared umems not supported by libbpf.\n");
+ return -EBUSY;
+ }
+
+ xsk = calloc(1, sizeof(*xsk));
+ if (!xsk)
+ return -ENOMEM;
+
+ if (umem->refcount++ > 0) {
+ xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
+ if (xsk->fd < 0) {
+ err = -errno;
+ goto out_xsk_alloc;
+ }
+ } else {
+ xsk->fd = umem->fd;
+ }
+
+ xsk->outstanding_tx = 0;
+ xsk->queue_id = queue_id;
+ xsk->umem = umem;
+ xsk->ifindex = if_nametoindex(ifname);
+ if (!xsk->ifindex) {
+ err = -errno;
+ goto out_socket;
+ }
+ strncpy(xsk->ifname, ifname, IFNAMSIZ);
+
+ xsk_set_xdp_socket_config(&xsk->config, usr_config);
+
+ if (rx) {
+ err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
+ &xsk->config.rx_size,
+ sizeof(xsk->config.rx_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ }
+ if (tx) {
+ err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
+ &xsk->config.tx_size,
+ sizeof(xsk->config.tx_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ }
+
+ optlen = sizeof(off);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ if (rx) {
+ map = xsk_mmap(NULL, off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_RX_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ rx->mask = xsk->config.rx_size - 1;
+ rx->size = xsk->config.rx_size;
+ rx->producer = map + off.rx.producer;
+ rx->consumer = map + off.rx.consumer;
+ rx->ring = map + off.rx.desc;
+ }
+ xsk->rx = rx;
+
+ if (tx) {
+ map = xsk_mmap(NULL, off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_TX_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap_rx;
+ }
+
+ tx->mask = xsk->config.tx_size - 1;
+ tx->size = xsk->config.tx_size;
+ tx->producer = map + off.tx.producer;
+ tx->consumer = map + off.tx.consumer;
+ tx->ring = map + off.tx.desc;
+ tx->cached_cons = xsk->config.tx_size;
+ }
+ xsk->tx = tx;
+
+ sxdp.sxdp_family = PF_XDP;
+ sxdp.sxdp_ifindex = xsk->ifindex;
+ sxdp.sxdp_queue_id = xsk->queue_id;
+ sxdp.sxdp_flags = xsk->config.bind_flags;
+
+ err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
+ if (err) {
+ err = -errno;
+ goto out_mmap_tx;
+ }
+
+ if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
+ err = xsk_setup_xdp_prog(xsk);
+ if (err)
+ goto out_mmap_tx;
+ }
+
+ *xsk_ptr = xsk;
+ return 0;
+
+out_mmap_tx:
+ if (tx)
+ munmap(xsk->tx,
+ off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc));
+out_mmap_rx:
+ if (rx)
+ munmap(xsk->rx,
+ off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc));
+out_socket:
+ if (--umem->refcount)
+ close(xsk->fd);
+out_xsk_alloc:
+ free(xsk);
+ return err;
+}
+
+int xsk_umem__delete(struct xsk_umem *umem)
+{
+ struct xdp_mmap_offsets off;
+ socklen_t optlen;
+ int err;
+
+ if (!umem)
+ return 0;
+
+ if (umem->refcount)
+ return -EBUSY;
+
+ optlen = sizeof(off);
+ err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+ munmap(umem->fill->ring,
+ off.fr.desc + umem->config.fill_size * sizeof(__u64));
+ munmap(umem->comp->ring,
+ off.cr.desc + umem->config.comp_size * sizeof(__u64));
+ }
+
+ close(umem->fd);
+ free(umem);
+
+ return 0;
+}
+
+void xsk_socket__delete(struct xsk_socket *xsk)
+{
+ struct xdp_mmap_offsets off;
+ socklen_t optlen;
+ int err;
+
+ if (!xsk)
+ return;
+
+ (void)xsk_update_bpf_maps(xsk, 0, 0);
+
+ optlen = sizeof(off);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+ if (xsk->rx)
+ munmap(xsk->rx->ring,
+ off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc));
+ if (xsk->tx)
+ munmap(xsk->tx->ring,
+ off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc));
+ }
+
+ xsk->umem->refcount--;
+ /* Do not close an fd that also has an associated umem connected
+ * to it.
+ */
+ if (xsk->fd != xsk->umem->fd)
+ close(xsk->fd);
+ free(xsk);
+}
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
new file mode 100644
index 000000000000..a497f00e2962
--- /dev/null
+++ b/tools/lib/bpf/xsk.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * AF_XDP user-space access library.
+ *
+ * Copyright(c) 2018 - 2019 Intel Corporation.
+ *
+ * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#ifndef __LIBBPF_XSK_H
+#define __LIBBPF_XSK_H
+
+#include <stdio.h>
+#include <stdint.h>
+#include <linux/if_xdp.h>
+
+#include "libbpf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Do not access these members directly. Use the functions below. */
+#define DEFINE_XSK_RING(name) \
+struct name { \
+ __u32 cached_prod; \
+ __u32 cached_cons; \
+ __u32 mask; \
+ __u32 size; \
+ __u32 *producer; \
+ __u32 *consumer; \
+ void *ring; \
+}
+
+DEFINE_XSK_RING(xsk_ring_prod);
+DEFINE_XSK_RING(xsk_ring_cons);
+
+struct xsk_umem;
+struct xsk_socket;
+
+static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
+ __u32 idx)
+{
+ __u64 *addrs = (__u64 *)fill->ring;
+
+ return &addrs[idx & fill->mask];
+}
+
+static inline const __u64 *
+xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
+{
+ const __u64 *addrs = (const __u64 *)comp->ring;
+
+ return &addrs[idx & comp->mask];
+}
+
+static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
+ __u32 idx)
+{
+ struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
+
+ return &descs[idx & tx->mask];
+}
+
+static inline const struct xdp_desc *
+xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
+{
+ const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
+
+ return &descs[idx & rx->mask];
+}
+
+static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
+{
+ __u32 free_entries = r->cached_cons - r->cached_prod;
+
+ if (free_entries >= nb)
+ return free_entries;
+
+ /* Refresh the local tail pointer.
+ * cached_cons is r->size bigger than the real consumer pointer so
+ * that this addition can be avoided in the more frequently
+ * executed code that computs free_entries in the beginning of
+ * this function. Without this optimization it whould have been
+ * free_entries = r->cached_prod - r->cached_cons + r->size.
+ */
+ r->cached_cons = *r->consumer + r->size;
+
+ return r->cached_cons - r->cached_prod;
+}
+
+static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
+{
+ __u32 entries = r->cached_prod - r->cached_cons;
+
+ if (entries == 0) {
+ r->cached_prod = *r->producer;
+ entries = r->cached_prod - r->cached_cons;
+ }
+
+ return (entries > nb) ? nb : entries;
+}
+
+static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
+ size_t nb, __u32 *idx)
+{
+ if (unlikely(xsk_prod_nb_free(prod, nb) < nb))
+ return 0;
+
+ *idx = prod->cached_prod;
+ prod->cached_prod += nb;
+
+ return nb;
+}
+
+static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
+{
+ /* Make sure everything has been written to the ring before signalling
+ * this to the kernel.
+ */
+ smp_wmb();
+
+ *prod->producer += nb;
+}
+
+static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
+ size_t nb, __u32 *idx)
+{
+ size_t entries = xsk_cons_nb_avail(cons, nb);
+
+ if (likely(entries > 0)) {
+ /* Make sure we do not speculatively read the data before
+ * we have received the packet buffers from the ring.
+ */
+ smp_rmb();
+
+ *idx = cons->cached_cons;
+ cons->cached_cons += entries;
+ }
+
+ return entries;
+}
+
+static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
+{
+ *cons->consumer += nb;
+}
+
+static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
+{
+ return &((char *)umem_area)[addr];
+}
+
+LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
+LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
+
+#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
+#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
+#define XSK_UMEM__DEFAULT_FRAME_SHIFT 11 /* 2048 bytes */
+#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
+#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
+
+struct xsk_umem_config {
+ __u32 fill_size;
+ __u32 comp_size;
+ __u32 frame_size;
+ __u32 frame_headroom;
+};
+
+/* Flags for the libbpf_flags field. */
+#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
+
+struct xsk_socket_config {
+ __u32 rx_size;
+ __u32 tx_size;
+ __u32 libbpf_flags;
+ __u32 xdp_flags;
+ __u16 bind_flags;
+};
+
+/* Set config to NULL to get the default configuration. */
+LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
+ const char *ifname, __u32 queue_id,
+ struct xsk_umem *umem,
+ struct xsk_ring_cons *rx,
+ struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *config);
+
+/* Returns 0 for success and -EBUSY if the umem is still in use. */
+LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
+LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __LIBBPF_XSK_H */
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
index d640a9761f09..a81d91d4fc78 100644
--- a/tools/lib/lockdep/include/liblockdep/common.h
+++ b/tools/lib/lockdep/include/liblockdep/common.h
@@ -45,6 +45,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
void lockdep_reset_lock(struct lockdep_map *lock);
+void lockdep_register_key(struct lock_class_key *key);
+void lockdep_unregister_key(struct lock_class_key *key);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index 2073d4e1f2f0..783dd0df06f9 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -7,6 +7,7 @@
struct liblockdep_pthread_mutex {
pthread_mutex_t mutex;
+ struct lock_class_key key;
struct lockdep_map dep_map;
};
@@ -27,11 +28,10 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
return pthread_mutex_init(&lock->mutex, __mutexattr);
}
-#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
-({ \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key, (mutexattr)); \
+#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
+({ \
+ lockdep_register_key(&(mutex)->key); \
+ __mutex_init((mutex), #mutex, &(mutex)->key, (mutexattr)); \
})
static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
@@ -55,6 +55,7 @@ static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *l
static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
{
lockdep_reset_lock(&lock->dep_map);
+ lockdep_unregister_key(&lock->key);
return pthread_mutex_destroy(&lock->mutex);
}
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index c8fbd0306960..11f425662b43 100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
@@ -11,7 +11,7 @@ find tests -name '*.c' | sort | while read -r i; do
testname=$(basename "$i" .c)
echo -ne "$testname... "
if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
- timeout 1 "tests/$testname" 2>&1 | "tests/${testname}.sh"; then
+ timeout 1 "tests/$testname" 2>&1 | /bin/bash "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
@@ -24,7 +24,7 @@ find tests -name '*.c' | sort | while read -r i; do
echo -ne "(PRELOAD) $testname... "
if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
timeout 1 ./lockdep "tests/$testname" 2>&1 |
- "tests/${testname}.sh"; then
+ /bin/bash "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
@@ -37,7 +37,7 @@ find tests -name '*.c' | sort | while read -r i; do
echo -ne "(PRELOAD + Valgrind) $testname... "
if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
{ timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
- "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
+ /bin/bash "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
echo "PASSED!"
else
diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c
index 623313f54720..543789bc3e37 100644
--- a/tools/lib/lockdep/tests/ABBA.c
+++ b/tools/lib/lockdep/tests/ABBA.c
@@ -14,4 +14,13 @@ void main(void)
pthread_mutex_destroy(&b);
pthread_mutex_destroy(&a);
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(b, a);
+
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 17c2b596f043..904adb70a4f0 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -22,6 +22,7 @@
*/
#include <linux/rbtree_augmented.h>
+#include <linux/export.h>
/*
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
@@ -43,6 +44,30 @@
* parentheses and have some accompanying text comment.
*/
+/*
+ * Notes on lockless lookups:
+ *
+ * All stores to the tree structure (rb_left and rb_right) must be done using
+ * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
+ * tree structure as seen in program order.
+ *
+ * These two requirements will allow lockless iteration of the tree -- not
+ * correct iteration mind you, tree rotations are not atomic so a lookup might
+ * miss entire subtrees.
+ *
+ * But they do guarantee that any such traversal will only see valid elements
+ * and that it will indeed complete -- does not get stuck in a loop.
+ *
+ * It also guarantees that if the lookup returns an element it is the 'correct'
+ * one. But not returning an element does _NOT_ mean it's not present.
+ *
+ * NOTE:
+ *
+ * Stores to __rb_parent_color are not important for simple lookups so those
+ * are left undone as of now. Nor did I check for loops involving parent
+ * pointers.
+ */
+
static inline void rb_set_black(struct rb_node *rb)
{
rb->__rb_parent_color |= RB_BLACK;
@@ -70,22 +95,35 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+ if (newleft)
+ *leftmost = node;
+
while (true) {
/*
- * Loop invariant: node is red
- *
- * If there is a black parent, we are done.
- * Otherwise, take some corrective action as we don't
- * want a red root or two consecutive red nodes.
+ * Loop invariant: node is red.
*/
- if (!parent) {
+ if (unlikely(!parent)) {
+ /*
+ * The inserted node is root. Either this is the
+ * first node, or we recursed at Case 1 below and
+ * are no longer violating 4).
+ */
rb_set_parent_color(node, NULL, RB_BLACK);
break;
- } else if (rb_is_black(parent))
+ }
+
+ /*
+ * If there is a black parent, we are done.
+ * Otherwise, take some corrective action as,
+ * per 4), we don't want a red root or two
+ * consecutive red nodes.
+ */
+ if(rb_is_black(parent))
break;
gparent = rb_red_parent(parent);
@@ -94,7 +132,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
if (parent != tmp) { /* parent == gparent->rb_left */
if (tmp && rb_is_red(tmp)) {
/*
- * Case 1 - color flips
+ * Case 1 - node's uncle is red (color flips).
*
* G g
* / \ / \
@@ -117,7 +155,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_right;
if (node == tmp) {
/*
- * Case 2 - left rotate at parent
+ * Case 2 - node's uncle is black and node is
+ * the parent's right child (left rotate at parent).
*
* G G
* / \ / \
@@ -128,8 +167,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* This still leaves us in violation of 4), the
* continuation into Case 3 will fix that.
*/
- parent->rb_right = tmp = node->rb_left;
- node->rb_left = parent;
+ tmp = node->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp);
+ WRITE_ONCE(node->rb_left, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@@ -140,7 +180,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/*
- * Case 3 - right rotate at gparent
+ * Case 3 - node's uncle is black and node is
+ * the parent's left child (right rotate at gparent).
*
* G P
* / \ / \
@@ -148,8 +189,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* / \
* n U
*/
- gparent->rb_left = tmp; /* == parent->rb_right */
- parent->rb_right = gparent;
+ WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
+ WRITE_ONCE(parent->rb_right, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -170,8 +211,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_left;
if (node == tmp) {
/* Case 2 - right rotate at parent */
- parent->rb_left = tmp = node->rb_right;
- node->rb_right = parent;
+ tmp = node->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp);
+ WRITE_ONCE(node->rb_right, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@@ -182,8 +224,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/* Case 3 - left rotate at gparent */
- gparent->rb_right = tmp; /* == parent->rb_left */
- parent->rb_left = gparent;
+ WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
+ WRITE_ONCE(parent->rb_left, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -223,8 +265,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* Sl Sr N Sl
*/
- parent->rb_right = tmp1 = sibling->rb_left;
- sibling->rb_left = parent;
+ tmp1 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp1);
+ WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@@ -268,15 +311,31 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
*
* (p) (p)
* / \ / \
- * N S --> N Sl
+ * N S --> N sl
* / \ \
- * sl Sr s
+ * sl Sr S
* \
* Sr
+ *
+ * Note: p might be red, and then both
+ * p and sl are red after rotation(which
+ * breaks property 4). This is fixed in
+ * Case 4 (in __rb_rotate_set_parents()
+ * which set sl the color of p
+ * and set p RB_BLACK)
+ *
+ * (p) (sl)
+ * / \ / \
+ * N sl --> P S
+ * \ / \
+ * S N Sr
+ * \
+ * Sr
*/
- sibling->rb_left = tmp1 = tmp2->rb_right;
- tmp2->rb_right = sibling;
- parent->rb_right = tmp2;
+ tmp1 = tmp2->rb_right;
+ WRITE_ONCE(sibling->rb_left, tmp1);
+ WRITE_ONCE(tmp2->rb_right, sibling);
+ WRITE_ONCE(parent->rb_right, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@@ -296,8 +355,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* (sl) sr N (sl)
*/
- parent->rb_right = tmp2 = sibling->rb_left;
- sibling->rb_left = parent;
+ tmp2 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp2);
+ WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@@ -309,8 +369,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
sibling = parent->rb_left;
if (rb_is_red(sibling)) {
/* Case 1 - right rotate at parent */
- parent->rb_left = tmp1 = sibling->rb_right;
- sibling->rb_right = parent;
+ tmp1 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp1);
+ WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@@ -334,10 +395,11 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
}
break;
}
- /* Case 3 - right rotate at sibling */
- sibling->rb_right = tmp1 = tmp2->rb_left;
- tmp2->rb_left = sibling;
- parent->rb_left = tmp2;
+ /* Case 3 - left rotate at sibling */
+ tmp1 = tmp2->rb_left;
+ WRITE_ONCE(sibling->rb_right, tmp1);
+ WRITE_ONCE(tmp2->rb_left, sibling);
+ WRITE_ONCE(parent->rb_left, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@@ -345,9 +407,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
tmp1 = sibling;
sibling = tmp2;
}
- /* Case 4 - left rotate at parent + color flips */
- parent->rb_left = tmp2 = sibling->rb_right;
- sibling->rb_right = parent;
+ /* Case 4 - right rotate at parent + color flips */
+ tmp2 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp2);
+ WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@@ -378,22 +441,41 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
- dummy_propagate, dummy_copy, dummy_rotate
+ .propagate = dummy_propagate,
+ .copy = dummy_copy,
+ .rotate = dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, dummy_rotate);
+ __rb_insert(node, root, false, NULL, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root,
+ NULL, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
+void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool leftmost)
+{
+ __rb_insert(node, &root->rb_root, leftmost,
+ &root->rb_leftmost, dummy_rotate);
+}
+
+void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
+{
+ struct rb_node *rebalance;
+ rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost, &dummy_callbacks);
+ if (rebalance)
+ ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
+}
+
/*
* Augmented rbtree manipulation functions.
*
@@ -402,9 +484,10 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, augment_rotate);
+ __rb_insert(node, root, newleft, leftmost, augment_rotate);
}
/*
@@ -498,15 +581,24 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
{
struct rb_node *parent = rb_parent(victim);
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+
/* Set the surrounding nodes to point to the replacement */
- __rb_change_child(victim, new, parent, root);
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
rb_set_parent(victim->rb_right, new);
+ __rb_change_child(victim, new, parent, root);
+}
- /* Copy the pointers/colour from the victim to the replacement */
- *new = *victim;
+void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ rb_replace_node(victim, new, &root->rb_root);
+
+ if (root->rb_leftmost == victim)
+ root->rb_leftmost = new;
}
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index 8b31c0e00ba3..d463761a58f4 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -194,13 +194,13 @@ void tep_set_page_size(struct tep_handle *pevent, int _page_size)
}
/**
- * tep_is_file_bigendian - get if the file is in big endian order
+ * tep_file_bigendian - get if the file is in big endian order
* @pevent: a handle to the tep_handle
*
* This returns if the file is in big endian order
* If @pevent is NULL, 0 is returned.
*/
-int tep_is_file_bigendian(struct tep_handle *pevent)
+int tep_file_bigendian(struct tep_handle *pevent)
{
if(pevent)
return pevent->file_bigendian;
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index 9a092dd4a86d..35833ee32d6c 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -7,7 +7,7 @@
#ifndef _PARSE_EVENTS_INT_H
#define _PARSE_EVENTS_INT_H
-struct cmdline;
+struct tep_cmdline;
struct cmdline_list;
struct func_map;
struct func_list;
@@ -36,7 +36,7 @@ struct tep_handle {
int long_size;
int page_size;
- struct cmdline *cmdlines;
+ struct tep_cmdline *cmdlines;
struct cmdline_list *cmdlist;
int cmdline_count;
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 69a96e39f0ab..abd4fa5d3088 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -124,15 +124,15 @@ struct tep_print_arg *alloc_arg(void)
return calloc(1, sizeof(struct tep_print_arg));
}
-struct cmdline {
+struct tep_cmdline {
char *comm;
int pid;
};
static int cmdline_cmp(const void *a, const void *b)
{
- const struct cmdline *ca = a;
- const struct cmdline *cb = b;
+ const struct tep_cmdline *ca = a;
+ const struct tep_cmdline *cb = b;
if (ca->pid < cb->pid)
return -1;
@@ -152,7 +152,7 @@ static int cmdline_init(struct tep_handle *pevent)
{
struct cmdline_list *cmdlist = pevent->cmdlist;
struct cmdline_list *item;
- struct cmdline *cmdlines;
+ struct tep_cmdline *cmdlines;
int i;
cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
@@ -179,8 +179,8 @@ static int cmdline_init(struct tep_handle *pevent)
static const char *find_cmdline(struct tep_handle *pevent, int pid)
{
- const struct cmdline *comm;
- struct cmdline key;
+ const struct tep_cmdline *comm;
+ struct tep_cmdline key;
if (!pid)
return "<idle>";
@@ -208,8 +208,8 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
*/
int tep_pid_is_registered(struct tep_handle *pevent, int pid)
{
- const struct cmdline *comm;
- struct cmdline key;
+ const struct tep_cmdline *comm;
+ struct tep_cmdline key;
if (!pid)
return 1;
@@ -232,11 +232,13 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid)
* we must add this pid. This is much slower than when cmdlines
* are added before the array is initialized.
*/
-static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
+static int add_new_comm(struct tep_handle *pevent,
+ const char *comm, int pid, bool override)
{
- struct cmdline *cmdlines = pevent->cmdlines;
- const struct cmdline *cmdline;
- struct cmdline key;
+ struct tep_cmdline *cmdlines = pevent->cmdlines;
+ struct tep_cmdline *cmdline;
+ struct tep_cmdline key;
+ char *new_comm;
if (!pid)
return 0;
@@ -247,8 +249,19 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
sizeof(*pevent->cmdlines), cmdline_cmp);
if (cmdline) {
- errno = EEXIST;
- return -1;
+ if (!override) {
+ errno = EEXIST;
+ return -1;
+ }
+ new_comm = strdup(comm);
+ if (!new_comm) {
+ errno = ENOMEM;
+ return -1;
+ }
+ free(cmdline->comm);
+ cmdline->comm = new_comm;
+
+ return 0;
}
cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
@@ -275,21 +288,13 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
return 0;
}
-/**
- * tep_register_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
- * @comm: the command line to register
- * @pid: the pid to map the command line to
- *
- * This adds a mapping to search for command line names with
- * a given pid. The comm is duplicated.
- */
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
+static int _tep_register_comm(struct tep_handle *pevent,
+ const char *comm, int pid, bool override)
{
struct cmdline_list *item;
if (pevent->cmdlines)
- return add_new_comm(pevent, comm, pid);
+ return add_new_comm(pevent, comm, pid, override);
item = malloc(sizeof(*item));
if (!item)
@@ -312,6 +317,40 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
return 0;
}
+/**
+ * tep_register_comm - register a pid / comm mapping
+ * @pevent: handle for the pevent
+ * @comm: the command line to register
+ * @pid: the pid to map the command line to
+ *
+ * This adds a mapping to search for command line names with
+ * a given pid. The comm is duplicated. If a command with the same pid
+ * already exist, -1 is returned and errno is set to EEXIST
+ */
+int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
+{
+ return _tep_register_comm(pevent, comm, pid, false);
+}
+
+/**
+ * tep_override_comm - register a pid / comm mapping
+ * @pevent: handle for the pevent
+ * @comm: the command line to register
+ * @pid: the pid to map the command line to
+ *
+ * This adds a mapping to search for command line names with
+ * a given pid. The comm is duplicated. If a command with the same pid
+ * already exist, the command string is udapted with the new one
+ */
+int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid)
+{
+ if (!pevent->cmdlines && cmdline_init(pevent)) {
+ errno = ENOMEM;
+ return -1;
+ }
+ return _tep_register_comm(pevent, comm, pid, true);
+}
+
int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
{
pevent->trace_clock = strdup(trace_clock);
@@ -5227,18 +5266,6 @@ int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
}
/**
- * tep_data_event_from_type - find the event by a given type
- * @pevent: a handle to the pevent
- * @type: the type of the event.
- *
- * This returns the event form a given @type;
- */
-struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type)
-{
- return tep_find_event(pevent, type);
-}
-
-/**
* tep_data_pid - parse the PID from record
* @pevent: a handle to the pevent
* @rec: the record to parse
@@ -5292,8 +5319,8 @@ const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid)
return comm;
}
-static struct cmdline *
-pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *next)
+static struct tep_cmdline *
+pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)next;
@@ -5305,7 +5332,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
cmdlist = cmdlist->next;
- return (struct cmdline *)cmdlist;
+ return (struct tep_cmdline *)cmdlist;
}
/**
@@ -5321,10 +5348,10 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
* next pid.
* Also, it does a linear search, so it may be slow.
*/
-struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
- struct cmdline *next)
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+ struct tep_cmdline *next)
{
- struct cmdline *cmdline;
+ struct tep_cmdline *cmdline;
/*
* If the cmdlines have not been converted yet, then use
@@ -5363,7 +5390,7 @@ struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *co
* Returns the pid for a give cmdline. If @cmdline is NULL, then
* -1 is returned.
*/
-int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline)
+int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
@@ -6593,6 +6620,12 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
*
* If @id is >= 0, then it is used to find the event.
* else @sys_name and @event_name are used.
+ *
+ * Returns:
+ * TEP_REGISTER_SUCCESS_OVERWRITE if an existing handler is overwritten
+ * TEP_REGISTER_SUCCESS if a new handler is registered successfully
+ * negative TEP_ERRNO_... in case of an error
+ *
*/
int tep_register_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
@@ -6610,7 +6643,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
event->handler = func;
event->context = context;
- return 0;
+ return TEP_REGISTER_SUCCESS_OVERWRITE;
not_found:
/* Save for later use. */
@@ -6640,7 +6673,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
pevent->handlers = handle;
handle->context = context;
- return -1;
+ return TEP_REGISTER_SUCCESS;
}
static int handle_matches(struct event_handler *handler, int id,
@@ -6723,8 +6756,10 @@ struct tep_handle *tep_alloc(void)
{
struct tep_handle *pevent = calloc(1, sizeof(*pevent));
- if (pevent)
+ if (pevent) {
pevent->ref_count = 1;
+ pevent->host_bigendian = tep_host_bigendian();
+ }
return pevent;
}
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 35d37087d3c5..aec48f2aea8a 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -432,6 +432,7 @@ int tep_set_function_resolver(struct tep_handle *pevent,
tep_func_resolver_t *func, void *priv);
void tep_reset_function_resolver(struct tep_handle *pevent);
int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
+int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid);
int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
int tep_register_function(struct tep_handle *pevent, char *name,
unsigned long long addr, char *mod);
@@ -484,6 +485,11 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt,
struct tep_event *event, const char *name,
struct tep_record *record, int err);
+enum tep_reg_handler {
+ TEP_REGISTER_SUCCESS = 0,
+ TEP_REGISTER_SUCCESS_OVERWRITE,
+};
+
int tep_register_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context);
@@ -520,15 +526,14 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
void tep_data_lat_fmt(struct tep_handle *pevent,
struct trace_seq *s, struct tep_record *record);
int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
-struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type);
int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
-struct cmdline;
-struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
- struct cmdline *next);
-int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline);
+struct tep_cmdline;
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+ struct tep_cmdline *next);
+int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline);
void tep_print_field(struct trace_seq *s, void *data,
struct tep_format_field *field);
@@ -553,7 +558,7 @@ int tep_get_long_size(struct tep_handle *pevent);
void tep_set_long_size(struct tep_handle *pevent, int long_size);
int tep_get_page_size(struct tep_handle *pevent);
void tep_set_page_size(struct tep_handle *pevent, int _page_size);
-int tep_is_file_bigendian(struct tep_handle *pevent);
+int tep_file_bigendian(struct tep_handle *pevent);
void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
int tep_is_host_bigendian(struct tep_handle *pevent);
void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 754050eea467..64b9c25a1fd3 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -389,7 +389,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
* We can only use the structure if file is of the same
* endianness.
*/
- if (tep_is_file_bigendian(event->pevent) ==
+ if (tep_file_bigendian(event->pevent) ==
tep_is_host_bigendian(event->pevent)) {
trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index 8ff1d55954d1..8d5ecd2bf877 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -100,7 +100,8 @@ static void expand_buffer(struct trace_seq *s)
* @fmt: printf format string
*
* It returns 0 if the trace oversizes the buffer's free
- * space, 1 otherwise.
+ * space, the number of characters printed, or a negative
+ * value in case of an error.
*
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
@@ -129,9 +130,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
goto try_again;
}
- s->len += ret;
+ if (ret > 0)
+ s->len += ret;
- return 1;
+ return ret;
}
/**
@@ -139,6 +141,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
* @s: trace sequence descriptor
* @fmt: printf format string
*
+ * It returns 0 if the trace oversizes the buffer's free
+ * space, the number of characters printed, or a negative
+ * value in case of an error.
+ * *
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
@@ -163,9 +169,10 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
goto try_again;
}
- s->len += ret;
+ if (ret > 0)
+ s->len += ret;
- return len;
+ return ret;
}
/**
diff --git a/tools/memory-model/.gitignore b/tools/memory-model/.gitignore
new file mode 100644
index 000000000000..b1d34c52f3c3
--- /dev/null
+++ b/tools/memory-model/.gitignore
@@ -0,0 +1 @@
+litmus
diff --git a/tools/memory-model/README b/tools/memory-model/README
index acf9077cffaa..0f2c366518c6 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -156,6 +156,8 @@ lock.cat
README
This file.
+scripts Various scripts, see scripts/README.
+
===========
LIMITATIONS
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index b84fb2f67109..796513362c05 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -29,7 +29,8 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'sync-rcu (*synchronize_rcu*) ||
'before-atomic (*smp_mb__before_atomic*) ||
'after-atomic (*smp_mb__after_atomic*) ||
- 'after-spinlock (*smp_mb__after_spinlock*)
+ 'after-spinlock (*smp_mb__after_spinlock*) ||
+ 'after-unlock-lock (*smp_mb__after_unlock_lock*)
instructions F[Barriers]
(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index 882fc33274ac..8f23c74a96fd 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -30,7 +30,9 @@ let wmb = [W] ; fencerel(Wmb) ; [W]
let mb = ([M] ; fencerel(Mb) ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
- ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M])
+ ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
+ ([M] ; po ; [UL] ; (co | po) ; [LKW] ;
+ fencerel(After-unlock-lock) ; [M])
let gp = po ; [Sync-rcu] ; po?
let strong-fence = mb | gp
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 6fa3eb28d40b..b27911cc087d 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -23,6 +23,7 @@ smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
smp_mb__after_atomic() { __fence{after-atomic}; }
smp_mb__after_spinlock() { __fence{after-spinlock}; }
+smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
// Exchange
xchg(X,V) __xchg{mb}(X,V)
diff --git a/tools/memory-model/scripts/README b/tools/memory-model/scripts/README
new file mode 100644
index 000000000000..29375a1fbbfa
--- /dev/null
+++ b/tools/memory-model/scripts/README
@@ -0,0 +1,70 @@
+ ============
+ LKMM SCRIPTS
+ ============
+
+
+These scripts are run from the tools/memory-model directory.
+
+checkalllitmus.sh
+
+ Run all litmus tests in the litmus-tests directory, checking
+ the results against the expected results recorded in the
+ "Result:" comment lines.
+
+checkghlitmus.sh
+
+ Run all litmus tests in the https://github.com/paulmckrcu/litmus
+ archive that are C-language and that have "Result:" comment lines
+ documenting expected results, comparing the actual results to
+ those expected.
+
+checklitmushist.sh
+
+ Run all litmus tests having .litmus.out files from previous
+ initlitmushist.sh or newlitmushist.sh runs, comparing the
+ herd output to that of the original runs.
+
+checklitmus.sh
+
+ Check a single litmus test against its "Result:" expected result.
+
+cmplitmushist.sh
+
+ Compare output from two different runs of the same litmus tests,
+ with the absolute pathnames of the tests to run provided one
+ name per line on standard input. Not normally run manually,
+ provided instead for use by other scripts.
+
+initlitmushist.sh
+
+ Run all litmus tests having no more than the specified number
+ of processes given a specified timeout, recording the results
+ in .litmus.out files.
+
+judgelitmus.sh
+
+ Given a .litmus file and its .litmus.out herd output, check the
+ .litmus.out file against the .litmus file's "Result:" comment to
+ judge whether the test ran correctly. Not normally run manually,
+ provided instead for use by other scripts.
+
+newlitmushist.sh
+
+ For all new or updated litmus tests having no more than the
+ specified number of processes given a specified timeout, run
+ and record the results in .litmus.out files.
+
+parseargs.sh
+
+ Parse command-line arguments. Not normally run manually,
+ provided instead for use by other scripts.
+
+runlitmushist.sh
+
+ Run the litmus tests whose absolute pathnames are provided one
+ name per line on standard input. Not normally run manually,
+ provided instead for use by other scripts.
+
+README
+
+ This file
diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh
index ca528f9a24d4..b35fcd61ecf6 100755
--- a/tools/memory-model/scripts/checkalllitmus.sh
+++ b/tools/memory-model/scripts/checkalllitmus.sh
@@ -1,42 +1,27 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
-# Run herd tests on all .litmus files in the specified directory (which
-# defaults to litmus-tests) and check each file's result against a "Result:"
-# comment within that litmus test. If the verification result does not
-# match that specified in the litmus test, this script prints an error
-# message prefixed with "^^^". It also outputs verification results to
-# a file whose name is that of the specified litmus test, but with ".out"
-# appended.
+# Run herd tests on all .litmus files in the litmus-tests directory
+# and check each file's result against a "Result:" comment within that
+# litmus test. If the verification result does not match that specified
+# in the litmus test, this script prints an error message prefixed with
+# "^^^". It also outputs verification results to a file whose name is
+# that of the specified litmus test, but with ".out" appended.
#
# Usage:
-# checkalllitmus.sh [ directory ]
+# checkalllitmus.sh
#
-# The LINUX_HERD_OPTIONS environment variable may be used to specify
-# arguments to herd, whose default is defined by the checklitmus.sh script.
-# Thus, one would normally run this in the directory containing the memory
-# model, specifying the pathname of the litmus test to check.
+# Run this in the directory containing the memory model.
#
# This script makes no attempt to run the litmus tests concurrently.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright IBM Corporation, 2018
#
# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-litmusdir=${1-litmus-tests}
+. scripts/parseargs.sh
+
+litmusdir=litmus-tests
if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir"
then
:
@@ -45,6 +30,14 @@ else
exit 255
fi
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find $litmusdir -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
# Find the checklitmus script. If it is not where we expect it, then
# assume that the caller has the PATH environment variable set
# appropriately.
@@ -57,7 +50,7 @@ fi
# Run the script on all the litmus tests in the specified directory
ret=0
-for i in litmus-tests/*.litmus
+for i in $litmusdir/*.litmus
do
if ! $clscript $i
then
@@ -66,8 +59,8 @@ do
done
if test "$ret" -ne 0
then
- echo " ^^^ VERIFICATION MISMATCHES"
+ echo " ^^^ VERIFICATION MISMATCHES" 1>&2
else
- echo All litmus tests verified as was expected.
+ echo All litmus tests verified as was expected. 1>&2
fi
exit $ret
diff --git a/tools/memory-model/scripts/checkghlitmus.sh b/tools/memory-model/scripts/checkghlitmus.sh
new file mode 100644
index 000000000000..6589fbb6f653
--- /dev/null
+++ b/tools/memory-model/scripts/checkghlitmus.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests having a maximum number of processes
+# to run, defaults to 6.
+#
+# sh checkghlitmus.sh
+#
+# Run from the Linux kernel tools/memory-model directory. See the
+# parseargs.sh scripts for arguments.
+
+. scripts/parseargs.sh
+
+T=/tmp/checkghlitmus.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# Clone the repository if it is not already present.
+if test -d litmus
+then
+ :
+else
+ git clone https://github.com/paulmckrcu/litmus
+ ( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Create a list of C-language litmus tests with "Result:" commands and
+# no more than the specified number of processes.
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result
+xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short
+
+# Form list of tests without corresponding .litmus.out files
+sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed
+
+# Run any needed tests.
+if scripts/runlitmushist.sh < $T/list-C-needed > $T/run.stdout 2> $T/run.stderr
+then
+ errs=
+else
+ errs=1
+fi
+
+sed < $T/list-C-result-short -e 's,^,scripts/judgelitmus.sh ,' |
+ sh > $T/judge.stdout 2> $T/judge.stderr
+
+if test -n "$errs"
+then
+ cat $T/run.stderr 1>&2
+fi
+grep '!!!' $T/judge.stdout
diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh
index bf12a75c0719..dd08801a30b0 100755
--- a/tools/memory-model/scripts/checklitmus.sh
+++ b/tools/memory-model/scripts/checklitmus.sh
@@ -1,40 +1,24 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
-# Run a herd test and check the result against a "Result:" comment within
-# the litmus test. If the verification result does not match that specified
-# in the litmus test, this script prints an error message prefixed with
-# "^^^" and exits with a non-zero status. It also outputs verification
+# Run a herd test and invokes judgelitmus.sh to check the result against
+# a "Result:" comment within the litmus test. It also outputs verification
# results to a file whose name is that of the specified litmus test, but
# with ".out" appended.
#
# Usage:
# checklitmus.sh file.litmus
#
-# The LINUX_HERD_OPTIONS environment variable may be used to specify
-# arguments to herd, which default to "-conf linux-kernel.cfg". Thus,
-# one would normally run this in the directory containing the memory model,
-# specifying the pathname of the litmus test to check.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check. The caller is expected to have
+# properly set up the LKMM environment variables.
#
# Copyright IBM Corporation, 2018
#
# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
litmus=$1
-herdoptions=${LINUX_HERD_OPTIONS--conf linux-kernel.cfg}
+herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg}
if test -f "$litmus" -a -r "$litmus"
then
@@ -43,44 +27,8 @@ else
echo ' --- ' error: \"$litmus\" is not a readable file
exit 255
fi
-if grep -q '^ \* Result: ' $litmus
-then
- outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
-else
- outcome=specified
-fi
-echo Herd options: $herdoptions > $litmus.out
-/usr/bin/time herd7 -o ~/tmp $herdoptions $litmus >> $litmus.out 2>&1
-grep "Herd options:" $litmus.out
-grep '^Observation' $litmus.out
-if grep -q '^Observation' $litmus.out
-then
- :
-else
- cat $litmus.out
- echo ' ^^^ Verification error'
- echo ' ^^^ Verification error' >> $litmus.out 2>&1
- exit 255
-fi
-if test "$outcome" = DEADLOCK
-then
- echo grep 3 and 4
- if grep '^Observation' $litmus.out | grep -q 'Never 0 0$'
- then
- ret=0
- else
- echo " ^^^ Unexpected non-$outcome verification"
- echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
- ret=1
- fi
-elif grep '^Observation' $litmus.out | grep -q $outcome || test "$outcome" = Maybe
-then
- ret=0
-else
- echo " ^^^ Unexpected non-$outcome verification"
- echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
- ret=1
-fi
-tail -2 $litmus.out | head -1
-exit $ret
+echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out
+/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1
+
+scripts/judgelitmus.sh $litmus
diff --git a/tools/memory-model/scripts/checklitmushist.sh b/tools/memory-model/scripts/checklitmushist.sh
new file mode 100644
index 000000000000..1d210ffb7c8a
--- /dev/null
+++ b/tools/memory-model/scripts/checklitmushist.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Reruns the C-language litmus tests previously run that match the
+# specified criteria, and compares the result to that of the previous
+# runs from initlitmushist.sh and/or newlitmushist.sh.
+#
+# sh checklitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/checklitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Run scripts/initlitmushist.sh first, need litmus repo.
+ exit 1
+fi
+
+# Create the results directory and populate it with subdirectories.
+# The initial output is created here to avoid clobbering the output
+# generated earlier.
+mkdir $T/results
+find litmus -type d -print | ( cd $T/results; sed -e 's/^/mkdir -p /' | sh )
+
+# Create the list of litmus tests already run, then remove those that
+# are excluded by this run's --procs argument.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+xargs < $T/list-C-already -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Redirect output, run tests, then restore destination directory.
+destdir="$LKMM_DESTDIR"
+LKMM_DESTDIR=$T/results; export LKMM_DESTDIR
+scripts/runlitmushist.sh < $T/list-C-short > $T/runlitmushist.sh.out 2>&1
+LKMM_DESTDIR="$destdir"; export LKMM_DESTDIR
+
+# Move the newly generated .litmus.out files to .litmus.out.new files
+# in the destination directory.
+cdir=`pwd`
+ddir=`awk -v c="$cdir" -v d="$LKMM_DESTDIR" \
+ 'END { if (d ~ /^\//) print d; else print c "/" d; }' < /dev/null`
+( cd $T/results; find litmus -type f -name '*.litmus.out' -print |
+ sed -e 's,^.*$,cp & '"$ddir"'/&.new,' | sh )
+
+sed < $T/list-C-short -e 's,^,'"$LKMM_DESTDIR/"',' |
+ sh scripts/cmplitmushist.sh
+exit $?
diff --git a/tools/memory-model/scripts/cmplitmushist.sh b/tools/memory-model/scripts/cmplitmushist.sh
new file mode 100644
index 000000000000..0f498aeeccf5
--- /dev/null
+++ b/tools/memory-model/scripts/cmplitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Compares .out and .out.new files for each name on standard input,
+# one full pathname per line. Outputs comparison results followed by
+# a summary.
+#
+# sh cmplitmushist.sh
+
+T=/tmp/cmplitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# comparetest oldpath newpath
+perfect=0
+obsline=0
+noobsline=0
+obsresult=0
+badcompare=0
+comparetest () {
+ grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout
+ grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout
+ if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1
+ then
+ echo Exact output match: $2
+ perfect=`expr "$perfect" + 1`
+ return 0
+ fi
+
+ grep '^Observation' $1 > $T/oldout
+ grep '^Observation' $2 > $T/newout
+ if test -s $T/oldout -o -s $T/newout
+ then
+ if cmp -s $T/oldout $T/newout
+ then
+ echo Matching Observation result and counts: $2
+ obsline=`expr "$obsline" + 1`
+ return 0
+ fi
+ else
+ echo Missing Observation line "(e.g., herd7 timeout)": $2
+ noobsline=`expr "$noobsline" + 1`
+ return 0
+ fi
+
+ grep '^Observation' $1 | awk '{ print $3 }' > $T/oldout
+ grep '^Observation' $2 | awk '{ print $3 }' > $T/newout
+ if cmp -s $T/oldout $T/newout
+ then
+ echo Matching Observation Always/Sometimes/Never result: $2
+ obsresult=`expr "$obsresult" + 1`
+ return 0
+ fi
+ echo ' !!!' Result changed: $2
+ badcompare=`expr "$badcompare" + 1`
+ return 1
+}
+
+sed -e 's/^.*$/comparetest &.out &.out.new/' > $T/cmpscript
+. $T/cmpscript > $T/cmpscript.out
+cat $T/cmpscript.out
+
+echo ' ---' Summary: 1>&2
+grep '!!!' $T/cmpscript.out 1>&2
+if test "$perfect" -ne 0
+then
+ echo Exact output matches: $perfect 1>&2
+fi
+if test "$obsline" -ne 0
+then
+ echo Matching Observation result and counts: $obsline 1>&2
+fi
+if test "$noobsline" -ne 0
+then
+ echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2
+fi
+if test "$obsresult" -ne 0
+then
+ echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2
+fi
+if test "$badcompare" -ne 0
+then
+ echo "!!!" Result changed: $badcompare 1>&2
+ exit 1
+fi
+
+exit 0
diff --git a/tools/memory-model/scripts/initlitmushist.sh b/tools/memory-model/scripts/initlitmushist.sh
new file mode 100644
index 000000000000..956b6957484d
--- /dev/null
+++ b/tools/memory-model/scripts/initlitmushist.sh
@@ -0,0 +1,68 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria.
+# Generates the output for each .litmus file into a corresponding
+# .litmus.out file, and does not judge the result.
+#
+# sh initlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# This script can consume significant wallclock time and CPU, especially as
+# the value of --procs rises. On a four-core (eight hardware threads)
+# 2.5GHz x86 with a one-minute per-run timeout:
+#
+# --procs wallclock CPU timeouts tests
+# 1 0m11.241s 0m1.086s 0 19
+# 2 1m12.598s 2m8.459s 2 393
+# 3 1m30.007s 6m2.479s 4 2291
+# 4 3m26.042s 18m5.139s 9 3217
+# 5 4m26.661s 23m54.128s 13 3784
+# 6 4m41.900s 26m4.721s 13 4352
+# 7 5m51.463s 35m50.868s 13 4626
+# 8 10m5.235s 68m43.672s 34 5117
+# 9 15m57.80s 105m58.101s 69 5156
+# 10 16m14.13s 103m35.009s 69 5165
+# 20 27m48.55s 198m3.286s 156 5269
+#
+# Increasing the timeout on the 20-process run to five minutes increases
+# the runtime to about 90 minutes with the CPU time rising to about
+# 10 hours. On the other hand, it decreases the number of timeouts to 101.
+#
+# Note that there are historical tests for which herd7 will fail
+# completely, for example, litmus/manual/atomic/C-unlock-wait-00.litmus
+# contains a call to spin_unlock_wait(), which no longer exists in either
+# the kernel or LKMM.
+
+. scripts/parseargs.sh
+
+T=/tmp/initlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ git clone https://github.com/paulmckrcu/litmus
+ ( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests with no more than the
+# specified number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+scripts/runlitmushist.sh < $T/list-C-short
+
+exit 0
diff --git a/tools/memory-model/scripts/judgelitmus.sh b/tools/memory-model/scripts/judgelitmus.sh
new file mode 100644
index 000000000000..0cc63875e395
--- /dev/null
+++ b/tools/memory-model/scripts/judgelitmus.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Given a .litmus test and the corresponding .litmus.out file, check
+# the .litmus.out file against the "Result:" comment to judge whether
+# the test ran correctly.
+#
+# Usage:
+# judgelitmus.sh file.litmus
+#
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+litmus=$1
+
+if test -f "$litmus" -a -r "$litmus"
+then
+ :
+else
+ echo ' --- ' error: \"$litmus\" is not a readable file
+ exit 255
+fi
+if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out
+then
+ :
+else
+ echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file
+ exit 255
+fi
+if grep -q '^ \* Result: ' $litmus
+then
+ outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
+else
+ outcome=specified
+fi
+
+grep '^Observation' $LKMM_DESTDIR/$litmus.out
+if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out
+then
+ :
+else
+ echo ' !!! Verification error' $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ exit 255
+fi
+if test "$outcome" = DEADLOCK
+then
+ if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$'
+ then
+ ret=0
+ else
+ echo " !!! Unexpected non-$outcome verification" $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ ret=1
+ fi
+elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe
+then
+ ret=0
+else
+ echo " !!! Unexpected non-$outcome verification" $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ ret=1
+fi
+tail -2 $LKMM_DESTDIR/$litmus.out | head -1
+exit $ret
diff --git a/tools/memory-model/scripts/newlitmushist.sh b/tools/memory-model/scripts/newlitmushist.sh
new file mode 100644
index 000000000000..991f8f814881
--- /dev/null
+++ b/tools/memory-model/scripts/newlitmushist.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria
+# that do not already have a corresponding .litmus.out file, and does
+# not judge the result.
+#
+# sh newlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/newlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Run scripts/initlitmushist.sh first, need litmus repo.
+ exit 1
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Form full list of litmus tests with no more than the specified
+# number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all
+xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Form list of new tests. Note: This does not handle litmus-test deletion!
+sort $T/list-C-already $T/list-C-short | uniq -u > $T/list-C-new
+
+# Form list of litmus tests that have changed since the last run.
+sed < $T/list-C-short -e 's,^.*$,if test & -nt '"$LKMM_DESTDIR"'/&.out; then echo &; fi,' > $T/list-C-script
+sh $T/list-C-script > $T/list-C-newer
+
+# Merge the list of new and of updated litmus tests: These must be (re)run.
+sort -u $T/list-C-new $T/list-C-newer > $T/list-C-needed
+
+scripts/runlitmushist.sh < $T/list-C-needed
+
+exit 0
diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh
new file mode 100644
index 000000000000..859e1d581e05
--- /dev/null
+++ b/tools/memory-model/scripts/parseargs.sh
@@ -0,0 +1,136 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# the corresponding .litmus.out file, and does not judge the result.
+#
+# . scripts/parseargs.sh
+#
+# Include into other Linux kernel tools/memory-model scripts.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/parseargs.sh.$$
+mkdir $T
+
+# Initialize one parameter: initparam name default
+initparam () {
+ echo if test -z '"$'$1'"' > $T/s
+ echo then >> $T/s
+ echo $1='"'$2'"' >> $T/s
+ echo export $1 >> $T/s
+ echo fi >> $T/s
+ echo $1_DEF='$'$1 >> $T/s
+ . $T/s
+}
+
+initparam LKMM_DESTDIR "."
+initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg"
+initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN`
+initparam LKMM_PROCS "3"
+initparam LKMM_TIMEOUT "1m"
+
+scriptname=$0
+
+usagehelp () {
+ echo "Usage $scriptname [ arguments ]"
+ echo " --destdir path (place for .litmus.out, default by .litmus)"
+ echo " --herdopts -conf linux-kernel.cfg ..."
+ echo " --jobs N (number of jobs, default one per CPU)"
+ echo " --procs N (litmus tests with at most this many processes)"
+ echo " --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')"
+ echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'"
+ exit 1
+}
+
+usage () {
+ usagehelp 1>&2
+}
+
+# checkarg --argname argtype $# arg mustmatch cannotmatch
+checkarg () {
+ if test $3 -le 1
+ then
+ echo $1 needs argument $2 matching \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$5"
+ then
+ :
+ else
+ echo $1 $2 \"$4\" must match \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$6"
+ then
+ echo $1 $2 \"$4\" must not match \"$6\"
+ usage
+ fi
+}
+
+while test $# -gt 0
+do
+ case "$1" in
+ --destdir)
+ checkarg --destdir "(path to directory)" "$#" "$2" '.\+' '^--'
+ LKMM_DESTDIR="$2"
+ mkdir $LKMM_DESTDIR > /dev/null 2>&1
+ if ! test -e "$LKMM_DESTDIR"
+ then
+ echo "Cannot create directory --destdir '$LKMM_DESTDIR'"
+ usage
+ fi
+ if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR"
+ then
+ :
+ else
+ echo "Directory --destdir '$LKMM_DESTDIR' insufficient permissions to create files"
+ usage
+ fi
+ shift
+ ;;
+ --herdopts|--herdopt)
+ checkarg --destdir "(herd options)" "$#" "$2" '.*' '^--'
+ LKMM_HERD_OPTIONS="$2"
+ shift
+ ;;
+ -j[1-9]*)
+ njobs="`echo $1 | sed -e 's/^-j//'`"
+ trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`"
+ if test -n "$trailchars"
+ then
+ echo $1 trailing characters "'$trailchars'"
+ usagehelp
+ fi
+ LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`"
+ ;;
+ --jobs|--job|-j)
+ checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--'
+ LKMM_JOBS="$2"
+ shift
+ ;;
+ --procs|--proc)
+ checkarg --procs "(number)" "$#" "$2" '^[0-9]\+$' '^--'
+ LKMM_PROCS="$2"
+ shift
+ ;;
+ --timeout)
+ checkarg --timeout "(timeout spec)" "$#" "$2" '^\([0-9]\+[smhd]\?\|\)$' '^--'
+ LKMM_TIMEOUT="$2"
+ shift
+ ;;
+ *)
+ echo Unknown argument $1
+ usage
+ ;;
+ esac
+ shift
+done
+if test -z "$LKMM_TIMEOUT"
+then
+ LKMM_TIMEOUT_CMD=""; export LKMM_TIMEOUT_CMD
+else
+ LKMM_TIMEOUT_CMD="timeout $LKMM_TIMEOUT"; export LKMM_TIMEOUT_CMD
+fi
+rm -rf $T
diff --git a/tools/memory-model/scripts/runlitmushist.sh b/tools/memory-model/scripts/runlitmushist.sh
new file mode 100644
index 000000000000..e507f5f933d5
--- /dev/null
+++ b/tools/memory-model/scripts/runlitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests specified on standard input, using up
+# to the specified number of CPUs (defaulting to all of them) and placing
+# the results in the specified directory (defaulting to the same place
+# the litmus test came from).
+#
+# sh runlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# This script uses environment variables produced by parseargs.sh.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/runlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Directory \"litmus\" missing, aborting run.
+ exit 1
+fi
+
+# Prefixes for per-CPU scripts
+for ((i=0;i<$LKMM_JOBS;i++))
+do
+ echo dir="$LKMM_DESTDIR" > $T/$i.sh
+ echo T=$T >> $T/$i.sh
+ echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh
+ cat << '___EOF___' >> $T/$i.sh
+ runtest () {
+ echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1'
+ if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1
+ then
+ if ! grep -q '^Observation ' $dir/$1.out
+ then
+ echo ' !!! Herd failed, no Observation:' $1
+ fi
+ else
+ exitcode=$?
+ if test "$exitcode" -eq 124
+ then
+ exitmsg="timed out"
+ else
+ exitmsg="failed, exit code $exitcode"
+ fi
+ echo ' !!! Herd' ${exitmsg}: $1
+ fi
+ }
+___EOF___
+done
+
+awk -v q="'" -v b='\\' '
+{
+ print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0
+}' | bash |
+sort -k1n |
+awk -v ncpu=$LKMM_JOBS -v t=$T '
+{
+ print "runtest " $2 >> t "/" NR % ncpu ".sh";
+}
+
+END {
+ for (i = 0; i < ncpu; i++) {
+ print "sh " t "/" i ".sh > " t "/" i ".sh.out 2>&1 &";
+ close(t "/" i ".sh");
+ }
+ print "wait";
+}' | sh
+cat $T/*.sh.out
+if grep -q '!!!' $T/*.sh.out
+then
+ echo ' ---' Summary: 1>&2
+ grep '!!!' $T/*.sh.out 1>&2
+ nfail="`grep '!!!' $T/*.sh.out | wc -l`"
+ echo 'Number of failed herd runs (e.g., timeout): ' $nfail 1>&2
+ exit 1
+else
+ echo All runs completed successfully. 1>&2
+ exit 0
+fi
diff --git a/tools/perf/Build b/tools/perf/Build
index e5232d567611..5f392dbb88fc 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -46,10 +46,10 @@ CFLAGS_builtin-trace.o += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_
CFLAGS_builtin-report.o += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
CFLAGS_builtin-report.o += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
-libperf-y += util/
-libperf-y += arch/
-libperf-y += ui/
-libperf-y += scripts/
-libperf-$(CONFIG_TRACE) += trace/beauty/
+perf-y += util/
+perf-y += arch/
+perf-y += ui/
+perf-y += scripts/
+perf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 095aebdc5bb7..e6150f21267d 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -19,8 +19,11 @@ C2C stands for Cache To Cache.
The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows
you to track down the cacheline contentions.
-The tool is based on x86's load latency and precise store facility events
-provided by Intel CPUs. These events provide:
+On x86, the tool is based on load latency and precise store facility events
+provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling
+with thresholding feature.
+
+These events provide:
- memory address of the access
- type of the access (load and store details)
- latency (in cycles) of the load access
@@ -46,7 +49,7 @@ RECORD OPTIONS
-l::
--ldlat::
- Configure mem-loads latency.
+ Configure mem-loads latency. (x86 only)
-k::
--all-kernel::
@@ -119,11 +122,16 @@ Following perf record options are configured by default:
-W,-d,--phys-data,--sample-cpu
Unless specified otherwise with '-e' option, following events are monitored by
-default:
+default on x86:
cpu/mem-loads,ldlat=30/P
cpu/mem-stores/P
+and following on PowerPC:
+
+ cpu/mem-loads/
+ cpu/mem-stores/
+
User can pass any 'perf record' option behind '--' mark, like (to enable
callchains and system wide monitoring):
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 4ac7775fbc11..86f3dcc15f83 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -120,6 +120,10 @@ Given a $HOME/.perfconfig like this:
children = true
group = true
+ [llvm]
+ dump-obj = true
+ clang-opt = -g
+
You can hide source code of annotate feature setting the config to false with
% perf config annotate.hide_src_code=true
@@ -553,6 +557,33 @@ trace.*::
trace.show_zeros::
Do not suppress syscall arguments that are equal to zero.
+llvm.*::
+ llvm.clang-path::
+ Path to clang. If omit, search it from $PATH.
+
+ llvm.clang-bpf-cmd-template::
+ Cmdline template. Below lines show its default value. Environment
+ variable is used to pass options.
+ "$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS $KERNEL_INC_OPTIONS \
+ -Wno-unused-value -Wno-pointer-sign -working-directory \
+ $WORKING_DIR -c $CLANG_SOURCE -target bpf -O2 -o -"
+
+ llvm.clang-opt::
+ Options passed to clang.
+
+ llvm.kbuild-dir::
+ kbuild directory. If not set, use /lib/modules/`uname -r`/build.
+ If set to "" deliberately, skip kernel header auto-detector.
+
+ llvm.kbuild-opts::
+ Options passed to 'make' when detecting kernel header options.
+
+ llvm.dump-obj::
+ Enable perf dump BPF object files compiled by LLVM.
+
+ llvm.opts::
+ Options passed to llc.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index f8d2167cf3e7..199ea0f0a6c0 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -82,7 +82,7 @@ RECORD OPTIONS
Be more verbose (show counter open errors, etc)
--ldlat <n>::
- Specify desired latency for loads event.
+ Specify desired latency for loads event. (x86 only)
In addition, for report all perf report options are valid, and for record
all perf record options.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d232b13ea713..8f0c2be34848 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -88,6 +88,20 @@ OPTIONS
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
+ - a BPF source file (ending in .c) or a precompiled object file (ending
+ in .o) selects one or more BPF events.
+ The BPF program can attach to various perf events based on the ELF section
+ names.
+
+ When processing a '.c' file, perf searches an installed LLVM to compile it
+ into an object file first. Optional clang options can be passed via the
+ '--clang-opt' command line option, e.g.:
+
+ perf record --clang-opt "-DLINUX_VERSION_CODE=0x50000" \
+ -e tests/bpf-script-example.c
+
+ Note: '--clang-opt' must be placed before '--event/-e'.
+
- a group of events surrounded by a pair of brace ("{event1,event2,...}").
Each event is separated by commas and the group should be quoted to
prevent the shell interpretation. You also need to use --group on
@@ -440,6 +454,11 @@ Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default:
Asynchronous mode is supported only when linking Perf tool with libc library
providing implementation for Posix AIO API.
+--affinity=mode::
+Set affinity mask of trace reading thread according to the policy defined by 'mode' value:
+ node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
+ cpu - thread affinity mask is set to cpu of the processed mmap buffer
+
--all-kernel::
Configure all used events to run in kernel space.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 9e4def08d569..2e19fd7ffe35 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -159,6 +159,12 @@ OPTIONS
the override, and the result of the above is that only S/W and H/W
events are displayed with the given fields.
+ It's possible tp add/remove fields only for specific event type:
+
+ -Fsw:-cpu,-period
+
+ removes cpu and period from software events.
+
For the 'wildcard' option if a user selected field is invalid for an
event type, a message is displayed to the user that the option is
ignored for that type. For example:
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 631e687be4eb..fc6e43262c41 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -210,6 +210,14 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
may happen, for instance, when a thread gets migrated to a different CPU
while processing a syscall.
+--map-dump::
+ Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
+ living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
+ dumps just boolean map values and integer keys, in time this will print in hex
+ by default and use BTF when available, as well as use functions to do pretty
+ printing using the existing 'perf trace' syscall arg beautifiers to map integer
+ arguments to strings (pid to comm, syscall id to syscall name, etc).
+
PAGEFAULTS
----------
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index dfb218feaad9..593ef49b273c 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -43,11 +43,10 @@ struct perf_file_section {
Flags section:
-The header is followed by different optional headers, described by the bits set
-in flags. Only headers for which the bit is set are included. Each header
-consists of a perf_file_section located after the initial header.
-The respective perf_file_section points to the data of the additional
-header and defines its size.
+For each of the optional features a perf_file_section it placed after the data
+section if the feature bit is set in the perf_header flags bitset. The
+respective perf_file_section points to the data of the additional header and
+defines its size.
Some headers consist of strings, which are defined like this:
@@ -131,7 +130,7 @@ An uint64_t with the total memory in bytes.
HEADER_CMDLINE = 11,
-A perf_header_string with the perf command line used to collect the data.
+A perf_header_string_list with the perf arg-vector used to collect the data.
HEADER_EVENT_DESC = 12,
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b441c88cafa1..0f11d5891301 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -109,6 +109,13 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
+FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
+FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
+FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
+
+FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+
ifdef CSINCLUDES
LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
endif
@@ -218,6 +225,8 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
+FEATURE_CHECK_LDFLAGS-libaio = -lrt
+
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
CFLAGS += -funwind-tables
@@ -386,7 +395,8 @@ ifeq ($(feature-setns), 1)
$(call detected,CONFIG_SETNS)
endif
-ifndef NO_CORESIGHT
+ifdef CORESIGHT
+ $(call feature_check,libopencsd)
ifeq ($(feature-libopencsd), 1)
CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
LDFLAGS += $(LIBOPENCSD_LDFLAGS)
@@ -482,6 +492,7 @@ endif
ifndef NO_LIBUNWIND
have_libunwind :=
+ $(call feature_check,libunwind-x86)
ifeq ($(feature-libunwind-x86), 1)
$(call detected,CONFIG_LIBUNWIND_X86)
CFLAGS += -DHAVE_LIBUNWIND_X86_SUPPORT
@@ -490,6 +501,7 @@ ifndef NO_LIBUNWIND
have_libunwind = 1
endif
+ $(call feature_check,libunwind-aarch64)
ifeq ($(feature-libunwind-aarch64), 1)
$(call detected,CONFIG_LIBUNWIND_AARCH64)
CFLAGS += -DHAVE_LIBUNWIND_AARCH64_SUPPORT
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index ff29c3372ec3..01f7555fd933 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -102,7 +102,7 @@ include ../scripts/utilities.mak
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
#
-# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+# Define CORESIGHT if you DO WANT support for CoreSight trace decoding.
#
# Define NO_AIO if you do not want support of Posix AIO based trace
# streaming for record mode. Currently Posix AIO trace streaming is
@@ -344,9 +344,9 @@ endif
export PERL_PATH
-LIB_FILE=$(OUTPUT)libperf.a
+LIBPERF_A=$(OUTPUT)libperf.a
-PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
+PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
ifndef NO_LIBBPF
PERFLIBS += $(LIBBPF)
endif
@@ -524,12 +524,14 @@ $(arch_errno_name_array): $(arch_errno_tbl)
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
+# Create python binding output directory if not already present
+_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
+
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
$(PYTHON_WORD) util/setup.py \
--quiet build_ext; \
- mkdir -p $(OUTPUT)python && \
cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
please_set_SHELL_PATH_to_a_more_modern_shell:
@@ -547,6 +549,8 @@ JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
+LIBPERF_IN := $(OUTPUT)libperf-in.o
+
export JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
@@ -563,9 +567,12 @@ $(JEVENTS): $(JEVENTS_IN)
$(PMU_EVENTS_IN): $(JEVENTS) FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
+$(LIBPERF_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=libperf
+
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
- $(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
+ $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBS) -o $@
$(GTK_IN): FORCE
$(Q)$(MAKE) $(build)=gtk
@@ -660,12 +667,12 @@ $(OUTPUT)perf-%: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
ifndef NO_PERF_READ_VDSO32
-$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-vdso-map.c
+$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-map.c
$(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
endif
ifndef NO_PERF_READ_VDSOX32
-$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c
+$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-map.c
$(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
endif
@@ -681,12 +688,7 @@ endif
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
-LIBPERF_IN := $(OUTPUT)libperf-in.o
-
-$(LIBPERF_IN): prepare FORCE
- $(Q)$(MAKE) $(build)=libperf
-
-$(LIB_FILE): $(LIBPERF_IN)
+$(LIBPERF_A): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
@@ -861,8 +863,8 @@ ifndef NO_LIBPYTHON
$(call QUIET_INSTALL, python-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \
- $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
+ $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/python/*.py -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
endif
$(call QUIET_INSTALL, perf_completion-script) \
@@ -908,7 +910,7 @@ python-clean:
$(python-clean)
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean python-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index d9b6af837c7d..688818844c11 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
-libperf-y += common.o
-libperf-y += $(SRCARCH)/
+perf-y += common.o
+perf-y += $(SRCARCH)/
diff --git a/tools/perf/arch/arm/Build b/tools/perf/arch/arm/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm/Build
+++ b/tools/perf/arch/arm/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index 883c57ff0c08..bc8e97380c82 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,4 +1,5 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
+perf-y += vectors-page.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c
index 5b1543c98022..6848101a855f 100644
--- a/tools/perf/arch/arm/tests/arch-tests.c
+++ b/tools/perf/arch/arm/tests/arch-tests.c
@@ -11,6 +11,10 @@ struct test arch_tests[] = {
},
#endif
{
+ .desc = "Vectors page",
+ .func = test__vectors_page,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index 9a0242e74cfc..2c35e532bc9a 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm/tests/vectors-page.c b/tools/perf/arch/arm/tests/vectors-page.c
new file mode 100644
index 000000000000..7ffdd79971c8
--- /dev/null
+++ b/tools/perf/arch/arm/tests/vectors-page.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+#include <linux/compiler.h>
+
+#include "debug.h"
+#include "tests/tests.h"
+#include "util/find-map.c"
+
+#define VECTORS__MAP_NAME "[vectors]"
+
+int test__vectors_page(struct test *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ void *start, *end;
+
+ if (find_map(&start, &end, VECTORS__MAP_NAME)) {
+ pr_err("%s not found, is CONFIG_KUSER_HELPERS enabled?\n",
+ VECTORS__MAP_NAME);
+ return TEST_FAIL;
+ }
+
+ return TEST_OK;
+}
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
index e64c5f216448..296f0eac5e18 100644
--- a/tools/perf/arch/arm/util/Build
+++ b/tools/perf/arch/arm/util/Build
@@ -1,6 +1,6 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
+perf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2f595cd73da6..911426721170 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -5,6 +5,7 @@
*/
#include <api/fs/fs.h>
+#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/coresight-pmu.h>
@@ -22,12 +23,10 @@
#include "../../util/thread_map.h"
#include "../../util/cs-etm.h"
+#include <errno.h>
#include <stdlib.h>
#include <sys/stat.h>
-#define ENABLE_SINK_MAX 128
-#define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
-
struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
@@ -60,10 +59,48 @@ static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
return 0;
}
+static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
+ struct perf_evsel *evsel)
+{
+ char msg[BUFSIZ], path[PATH_MAX], *sink;
+ struct perf_evsel_config_term *term;
+ int ret = -EINVAL;
+ u32 hash;
+
+ if (evsel->attr.config2 & GENMASK(31, 0))
+ return 0;
+
+ list_for_each_entry(term, &evsel->config_terms, list) {
+ if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
+ continue;
+
+ sink = term->val.drv_cfg;
+ snprintf(path, PATH_MAX, "sinks/%s", sink);
+
+ ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
+ if (ret != 1) {
+ pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
+ sink, perf_evsel__name(evsel), errno,
+ str_error_r(errno, msg, sizeof(msg)));
+ return ret;
+ }
+
+ evsel->attr.config2 |= hash;
+ return 0;
+ }
+
+ /*
+ * No sink was provided on the command line - for _now_ treat
+ * this as an error.
+ */
+ return ret;
+}
+
static int cs_etm_recording_options(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts)
{
+ int ret;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -92,6 +129,10 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (!cs_etm_evsel)
return 0;
+ ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
+ if (ret)
+ return ret;
+
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with %s\n",
CORESIGHT_ETM_PMU_NAME);
@@ -598,54 +639,3 @@ struct auxtrace_record *cs_etm_record_init(int *err)
out:
return NULL;
}
-
-static FILE *cs_device__open_file(const char *name)
-{
- struct stat st;
- char path[PATH_MAX];
- const char *sysfs;
-
- sysfs = sysfs__mountpoint();
- if (!sysfs)
- return NULL;
-
- snprintf(path, PATH_MAX,
- "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
-
- if (stat(path, &st) < 0)
- return NULL;
-
- return fopen(path, "w");
-
-}
-
-static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
-{
- va_list args;
- FILE *file;
- int ret = -EINVAL;
-
- va_start(args, fmt);
- file = cs_device__open_file(name);
- if (file) {
- ret = vfprintf(file, fmt, args);
- fclose(file);
- }
- va_end(args);
- return ret;
-}
-
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
-{
- int ret;
- char enable_sink[ENABLE_SINK_MAX];
-
- snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
- term->val.drv_cfg, "enable_sink");
-
- ret = cs_device__print_file(enable_sink, "%d", 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h
index 1a12e64f5127..a3354bda4fe8 100644
--- a/tools/perf/arch/arm/util/cs-etm.h
+++ b/tools/perf/arch/arm/util/cs-etm.h
@@ -7,9 +7,6 @@
#ifndef INCLUDE__PERF_CS_ETM_H__
#define INCLUDE__PERF_CS_ETM_H__
-#include "../../util/evsel.h"
-
struct auxtrace_record *cs_etm_record_init(int *err);
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term);
#endif
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index e047571e6080..bbc297a7e2e3 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -7,8 +7,8 @@
#include <string.h>
#include <linux/coresight-pmu.h>
#include <linux/perf_event.h>
+#include <linux/string.h>
-#include "cs-etm.h"
#include "arm-spe.h"
#include "../../util/pmu.h"
@@ -19,7 +19,6 @@ struct perf_event_attr
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->selectable = true;
- pmu->set_drv_config = cs_etm_set_drv_config;
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
return arm_spe_pmu_default_config(pmu);
diff --git a/tools/perf/arch/arm64/Build b/tools/perf/arch/arm64/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm64/Build
+++ b/tools/perf/arch/arm64/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm64/tests/Build b/tools/perf/arch/arm64/tests/Build
index 883c57ff0c08..41707fea74b3 100644
--- a/tools/perf/arch/arm64/tests/Build
+++ b/tools/perf/arch/arm64/tests/Build
@@ -1,4 +1,4 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
index 5522ce384723..a6a407fa1b8b 100644
--- a/tools/perf/arch/arm64/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 68f8a8eb3ad0..3cde540d2fcf 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,10 +1,10 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
+perf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o \
arm-spe.o
diff --git a/tools/perf/arch/nds32/Build b/tools/perf/arch/nds32/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/nds32/Build
+++ b/tools/perf/arch/nds32/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/nds32/util/Build b/tools/perf/arch/nds32/util/Build
index ca623bbf993c..d0bc205fe49a 100644
--- a/tools/perf/arch/nds32/util/Build
+++ b/tools/perf/arch/nds32/util/Build
@@ -1 +1 @@
-libperf-y += header.o
+perf-y += header.o
diff --git a/tools/perf/arch/powerpc/Build b/tools/perf/arch/powerpc/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/powerpc/Build
+++ b/tools/perf/arch/powerpc/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index a111239df182..e58d00d62f02 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -14,18 +14,25 @@ PERF_HAVE_JITDUMP := 1
out := $(OUTPUT)arch/powerpc/include/generated/asm
header32 := $(out)/syscalls_32.c
header64 := $(out)/syscalls_64.c
-sysdef := $(srctree)/tools/arch/powerpc/include/uapi/asm/unistd.h
-sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls/
+syskrn := $(srctree)/arch/powerpc/kernel/syscalls/syscall.tbl
+sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls
+sysdef := $(sysprf)/syscall.tbl
systbl := $(sysprf)/mksyscalltbl
# Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header64): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '64' '$(CC)' $(sysdef) > $@
+ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ (diff -B $(sysdef) $(syskrn) >/dev/null) \
+ || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
+ $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@
$(header32): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '32' '$(CC)' $(sysdef) > $@
+ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ (diff -B $(sysdef) $(syskrn) >/dev/null) \
+ || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
+ $(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@
clean::
$(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64)
diff --git a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
index ef52e1dd694b..6c58060aa03b 100755
--- a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
@@ -9,10 +9,9 @@
# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
wordsize=$1
-gcc=$2
-input=$3
+SYSCALL_TBL=$2
-if ! test -r $input; then
+if ! test -r $SYSCALL_TBL; then
echo "Could not read input file" >&2
exit 1
fi
@@ -20,18 +19,21 @@ fi
create_table()
{
local wordsize=$1
- local max_nr
+ local max_nr nr abi sc discard
+ max_nr=-1
+ nr=0
echo "static const char *syscalltbl_powerpc_${wordsize}[] = {"
- while read sc nr; do
- printf '\t[%d] = "%s",\n' $nr $sc
- max_nr=$nr
+ while read nr abi sc discard; do
+ if [ "$max_nr" -lt "$nr" ]; then
+ printf '\t[%d] = "%s",\n' $nr $sc
+ max_nr=$nr
+ fi
done
echo '};'
echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr"
}
-$gcc -m${wordsize} -E -dM -x c $input \
- |sed -ne 's/^#define __NR_//p' \
- |sort -t' ' -k2 -nu \
+grep -E "^[[:digit:]]+[[:space:]]+(common|spu|nospu|${wordsize})" $SYSCALL_TBL \
+ |sort -k1 -n \
|create_table ${wordsize}
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..db3bbb8744af
--- /dev/null
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -0,0 +1,427 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for powerpc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, spu, nospu, 64, or 32 for this file.
+#
+0 nospu restart_syscall sys_restart_syscall
+1 nospu exit sys_exit
+2 nospu fork ppc_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 nospu execve sys_execve compat_sys_execve
+12 common chdir sys_chdir
+13 common time sys_time compat_sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown
+17 common break sys_ni_syscall
+18 32 oldstat sys_stat sys_ni_syscall
+18 64 oldstat sys_ni_syscall
+18 spu oldstat sys_ni_syscall
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 nospu mount sys_mount compat_sys_mount
+22 32 umount sys_oldumount
+22 64 umount sys_ni_syscall
+22 spu umount sys_ni_syscall
+23 common setuid sys_setuid
+24 common getuid sys_getuid
+25 common stime sys_stime compat_sys_stime
+26 nospu ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 32 oldfstat sys_fstat sys_ni_syscall
+28 64 oldfstat sys_ni_syscall
+28 spu oldfstat sys_ni_syscall
+29 nospu pause sys_pause
+30 nospu utime sys_utime compat_sys_utime
+31 common stty sys_ni_syscall
+32 common gtty sys_ni_syscall
+33 common access sys_access
+34 common nice sys_nice
+35 common ftime sys_ni_syscall
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times compat_sys_times
+44 common prof sys_ni_syscall
+45 common brk sys_brk
+46 common setgid sys_setgid
+47 common getgid sys_getgid
+48 nospu signal sys_signal
+49 common geteuid sys_geteuid
+50 common getegid sys_getegid
+51 nospu acct sys_acct
+52 nospu umount2 sys_umount
+53 common lock sys_ni_syscall
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+56 common mpx sys_ni_syscall
+57 common setpgid sys_setpgid
+58 common ulimit sys_ni_syscall
+59 32 oldolduname sys_olduname
+59 64 oldolduname sys_ni_syscall
+59 spu oldolduname sys_ni_syscall
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 nospu ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 32 sigaction sys_sigaction compat_sys_sigaction
+67 64 sigaction sys_ni_syscall
+67 spu sigaction sys_ni_syscall
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid
+71 common setregid sys_setregid
+72 32 sigsuspend sys_sigsuspend
+72 64 sigsuspend sys_ni_syscall
+72 spu sigsuspend sys_ni_syscall
+73 32 sigpending sys_sigpending compat_sys_sigpending
+73 64 sigpending sys_ni_syscall
+73 spu sigpending sys_ni_syscall
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+76 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit
+76 64 getrlimit sys_ni_syscall
+76 spu getrlimit sys_ni_syscall
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 common getgroups sys_getgroups
+81 common setgroups sys_setgroups
+82 32 select ppc_select sys_ni_syscall
+82 64 select sys_ni_syscall
+82 spu select sys_ni_syscall
+83 common symlink sys_symlink
+84 32 oldlstat sys_lstat sys_ni_syscall
+84 64 oldlstat sys_ni_syscall
+84 spu oldlstat sys_ni_syscall
+85 common readlink sys_readlink
+86 nospu uselib sys_uselib
+87 nospu swapon sys_swapon
+88 nospu reboot sys_reboot
+89 32 readdir sys_old_readdir compat_sys_old_readdir
+89 64 readdir sys_ni_syscall
+89 spu readdir sys_ni_syscall
+90 common mmap sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+98 common profil sys_ni_syscall
+99 nospu statfs sys_statfs compat_sys_statfs
+100 nospu fstatfs sys_fstatfs compat_sys_fstatfs
+101 common ioperm sys_ni_syscall
+102 common socketcall sys_socketcall compat_sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common stat sys_newstat compat_sys_newstat
+107 common lstat sys_newlstat compat_sys_newlstat
+108 common fstat sys_newfstat compat_sys_newfstat
+109 32 olduname sys_uname
+109 64 olduname sys_ni_syscall
+109 spu olduname sys_ni_syscall
+110 common iopl sys_ni_syscall
+111 common vhangup sys_vhangup
+112 common idle sys_ni_syscall
+113 common vm86 sys_ni_syscall
+114 common wait4 sys_wait4 compat_sys_wait4
+115 nospu swapoff sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+117 nospu ipc sys_ipc compat_sys_ipc
+118 common fsync sys_fsync
+119 32 sigreturn sys_sigreturn compat_sys_sigreturn
+119 64 sigreturn sys_ni_syscall
+119 spu sigreturn sys_ni_syscall
+120 nospu clone ppc_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common modify_ldt sys_ni_syscall
+124 common adjtimex sys_adjtimex compat_sys_adjtimex
+125 common mprotect sys_mprotect
+126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+126 64 sigprocmask sys_ni_syscall
+126 spu sigprocmask sys_ni_syscall
+127 common create_module sys_ni_syscall
+128 nospu init_module sys_init_module
+129 nospu delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 nospu quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 32 personality sys_personality ppc64_personality
+136 64 personality ppc64_personality
+136 spu personality ppc64_personality
+137 common afs_syscall sys_ni_syscall
+138 common setfsuid sys_setfsuid
+139 common setfsgid sys_setfsgid
+140 common _llseek sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 common _newselect sys_select compat_sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv compat_sys_readv
+146 common writev sys_writev compat_sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 nospu _sysctl sys_sysctl compat_sys_sysctl
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep compat_sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid
+165 common getresuid sys_getresuid
+166 common query_module sys_ni_syscall
+167 common poll sys_poll
+168 common nfsservctl sys_ni_syscall
+169 common setresgid sys_setresgid
+170 common getresgid sys_getresgid
+171 common prctl sys_prctl
+172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+179 common pread64 sys_pread64 compat_sys_pread64
+180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+181 common chown sys_chown
+182 common getcwd sys_getcwd
+183 common capget sys_capget
+184 common capset sys_capset
+185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack
+186 32 sendfile sys_sendfile compat_sys_sendfile
+186 64 sendfile sys_sendfile64
+186 spu sendfile sys_sendfile64
+187 common getpmsg sys_ni_syscall
+188 common putpmsg sys_ni_syscall
+189 nospu vfork ppc_vfork
+190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
+191 common readahead sys_readahead compat_sys_readahead
+192 32 mmap2 sys_mmap2 compat_sys_mmap2
+193 32 truncate64 sys_truncate64 compat_sys_truncate64
+194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+195 32 stat64 sys_stat64
+196 32 lstat64 sys_lstat64
+197 32 fstat64 sys_fstat64
+198 nospu pciconfig_read sys_pciconfig_read
+199 nospu pciconfig_write sys_pciconfig_write
+200 nospu pciconfig_iobase sys_pciconfig_iobase
+201 common multiplexer sys_ni_syscall
+202 common getdents64 sys_getdents64
+203 common pivot_root sys_pivot_root
+204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+205 common madvise sys_madvise
+206 common mincore sys_mincore
+207 common gettid sys_gettid
+208 common tkill sys_tkill
+209 common setxattr sys_setxattr
+210 common lsetxattr sys_lsetxattr
+211 common fsetxattr sys_fsetxattr
+212 common getxattr sys_getxattr
+213 common lgetxattr sys_lgetxattr
+214 common fgetxattr sys_fgetxattr
+215 common listxattr sys_listxattr
+216 common llistxattr sys_llistxattr
+217 common flistxattr sys_flistxattr
+218 common removexattr sys_removexattr
+219 common lremovexattr sys_lremovexattr
+220 common fremovexattr sys_fremovexattr
+221 common futex sys_futex compat_sys_futex
+222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+# 224 unused
+225 common tuxcall sys_ni_syscall
+226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64
+227 common io_setup sys_io_setup compat_sys_io_setup
+228 common io_destroy sys_io_destroy
+229 common io_getevents sys_io_getevents compat_sys_io_getevents
+230 common io_submit sys_io_submit compat_sys_io_submit
+231 common io_cancel sys_io_cancel
+232 nospu set_tid_address sys_set_tid_address
+233 common fadvise64 sys_fadvise64 ppc32_fadvise64
+234 nospu exit_group sys_exit_group
+235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+236 common epoll_create sys_epoll_create
+237 common epoll_ctl sys_epoll_ctl
+238 common epoll_wait sys_epoll_wait
+239 common remap_file_pages sys_remap_file_pages
+240 common timer_create sys_timer_create compat_sys_timer_create
+241 common timer_settime sys_timer_settime compat_sys_timer_settime
+242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+243 common timer_getoverrun sys_timer_getoverrun
+244 common timer_delete sys_timer_delete
+245 common clock_settime sys_clock_settime compat_sys_clock_settime
+246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
+247 common clock_getres sys_clock_getres compat_sys_clock_getres
+248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+249 32 swapcontext ppc_swapcontext ppc32_swapcontext
+249 64 swapcontext ppc64_swapcontext
+249 spu swapcontext sys_ni_syscall
+250 common tgkill sys_tgkill
+251 common utimes sys_utimes compat_sys_utimes
+252 common statfs64 sys_statfs64 compat_sys_statfs64
+253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+254 32 fadvise64_64 ppc_fadvise64_64
+254 spu fadvise64_64 sys_ni_syscall
+255 common rtas sys_rtas
+256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
+256 64 sys_debug_setcontext sys_ni_syscall
+256 spu sys_debug_setcontext sys_ni_syscall
+# 257 reserved for vserver
+258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
+259 nospu mbind sys_mbind compat_sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+262 nospu mq_open sys_mq_open compat_sys_mq_open
+263 nospu mq_unlink sys_mq_unlink
+264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+266 nospu mq_notify sys_mq_notify compat_sys_mq_notify
+267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+268 nospu kexec_load sys_kexec_load compat_sys_kexec_load
+269 nospu add_key sys_add_key
+270 nospu request_key sys_request_key
+271 nospu keyctl sys_keyctl compat_sys_keyctl
+272 nospu waitid sys_waitid compat_sys_waitid
+273 nospu ioprio_set sys_ioprio_set
+274 nospu ioprio_get sys_ioprio_get
+275 nospu inotify_init sys_inotify_init
+276 nospu inotify_add_watch sys_inotify_add_watch
+277 nospu inotify_rm_watch sys_inotify_rm_watch
+278 nospu spu_run sys_spu_run
+279 nospu spu_create sys_spu_create
+280 nospu pselect6 sys_pselect6 compat_sys_pselect6
+281 nospu ppoll sys_ppoll compat_sys_ppoll
+282 common unshare sys_unshare
+283 common splice sys_splice
+284 common tee sys_tee
+285 common vmsplice sys_vmsplice compat_sys_vmsplice
+286 common openat sys_openat compat_sys_openat
+287 common mkdirat sys_mkdirat
+288 common mknodat sys_mknodat
+289 common fchownat sys_fchownat
+290 common futimesat sys_futimesat compat_sys_futimesat
+291 32 fstatat64 sys_fstatat64
+291 64 newfstatat sys_newfstatat
+291 spu newfstatat sys_newfstatat
+292 common unlinkat sys_unlinkat
+293 common renameat sys_renameat
+294 common linkat sys_linkat
+295 common symlinkat sys_symlinkat
+296 common readlinkat sys_readlinkat
+297 common fchmodat sys_fchmodat
+298 common faccessat sys_faccessat
+299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+301 common move_pages sys_move_pages compat_sys_move_pages
+302 common getcpu sys_getcpu
+303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+304 common utimensat sys_utimensat compat_sys_utimensat
+305 common signalfd sys_signalfd compat_sys_signalfd
+306 common timerfd_create sys_timerfd_create
+307 common eventfd sys_eventfd
+308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
+309 nospu fallocate sys_fallocate compat_sys_fallocate
+310 nospu subpage_prot sys_subpage_prot
+311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+313 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+314 common eventfd2 sys_eventfd2
+315 common epoll_create1 sys_epoll_create1
+316 common dup3 sys_dup3
+317 common pipe2 sys_pipe2
+318 nospu inotify_init1 sys_inotify_init1
+319 common perf_event_open sys_perf_event_open
+320 common preadv sys_preadv compat_sys_preadv
+321 common pwritev sys_pwritev compat_sys_pwritev
+322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+323 nospu fanotify_init sys_fanotify_init
+324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+325 common prlimit64 sys_prlimit64
+326 common socket sys_socket
+327 common bind sys_bind
+328 common connect sys_connect
+329 common listen sys_listen
+330 common accept sys_accept
+331 common getsockname sys_getsockname
+332 common getpeername sys_getpeername
+333 common socketpair sys_socketpair
+334 common send sys_send
+335 common sendto sys_sendto
+336 common recv sys_recv compat_sys_recv
+337 common recvfrom sys_recvfrom compat_sys_recvfrom
+338 common shutdown sys_shutdown
+339 common setsockopt sys_setsockopt compat_sys_setsockopt
+340 common getsockopt sys_getsockopt compat_sys_getsockopt
+341 common sendmsg sys_sendmsg compat_sys_sendmsg
+342 common recvmsg sys_recvmsg compat_sys_recvmsg
+343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+344 common accept4 sys_accept4
+345 common name_to_handle_at sys_name_to_handle_at
+346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+348 common syncfs sys_syncfs
+349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+350 common setns sys_setns
+351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+353 nospu finit_module sys_finit_module
+354 nospu kcmp sys_kcmp
+355 common sched_setattr sys_sched_setattr
+356 common sched_getattr sys_sched_getattr
+357 common renameat2 sys_renameat2
+358 common seccomp sys_seccomp
+359 common getrandom sys_getrandom
+360 common memfd_create sys_memfd_create
+361 common bpf sys_bpf
+362 nospu execveat sys_execveat compat_sys_execveat
+363 32 switch_endian sys_ni_syscall
+363 64 switch_endian ppc_switch_endian
+363 spu switch_endian sys_ni_syscall
+364 common userfaultfd sys_userfaultfd
+365 common membarrier sys_membarrier
+378 nospu mlock2 sys_mlock2
+379 nospu copy_file_range sys_copy_file_range
+380 common preadv2 sys_preadv2 compat_sys_preadv2
+381 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+382 nospu kexec_file_load sys_kexec_file_load
+383 nospu statx sys_statx
+384 nospu pkey_alloc sys_pkey_alloc
+385 nospu pkey_free sys_pkey_free
+386 nospu pkey_mprotect sys_pkey_mprotect
+387 nospu rseq sys_rseq
+388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index 1076393e6f43..e18a3556f5e3 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -63,7 +63,8 @@ static const char *reg_names[] = {
[PERF_REG_POWERPC_TRAP] = "trap",
[PERF_REG_POWERPC_DAR] = "dar",
[PERF_REG_POWERPC_DSISR] = "dsisr",
- [PERF_REG_POWERPC_SIER] = "sier"
+ [PERF_REG_POWERPC_SIER] = "sier",
+ [PERF_REG_POWERPC_MMCRA] = "mmcra"
};
static inline const char *perf_reg_name(int id)
diff --git a/tools/perf/arch/powerpc/tests/Build b/tools/perf/arch/powerpc/tests/Build
index d827ef384b33..3526ab0af9f9 100644
--- a/tools/perf/arch/powerpc/tests/Build
+++ b/tools/perf/arch/powerpc/tests/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 5f39efef0856..5c178e4a1995 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 2e6595310420..7cf0b8803097 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,10 +1,11 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += mem-events.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += skip-callchain-idx.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index 596ad6aedaac..f9db341c47b6 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -3,6 +3,8 @@
#include "util/kvm-stat.h"
#include "util/parse-events.h"
#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
#include "book3s_hv_exits.h"
#include "book3s_hcalls.h"
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
new file mode 100644
index 000000000000..d08311f04e95
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "mem-events.h"
+
+/* PowerPC does not support 'ldlat' parameter. */
+char *perf_mem_events__name(int i)
+{
+ if (i == PERF_MEM_EVENTS__LOAD)
+ return (char *) "cpu/mem-loads/";
+
+ return (char *) "cpu/mem-stores/";
+}
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index 07fcd977d93e..34d5134681d9 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(dar, PERF_REG_POWERPC_DAR),
SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
SMPL_REG(sier, PERF_REG_POWERPC_SIER),
+ SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA),
SMPL_REG_END
};
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 7c6eeb4633fe..2918bb16c892 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -16,6 +16,9 @@
#include "util/thread.h"
#include "util/callchain.h"
#include "util/debug.h"
+#include "util/dso.h"
+#include "util/map.h"
+#include "util/symbol.h"
/*
* When saving the callchain on Power, the kernel conservatively saves
diff --git a/tools/perf/arch/s390/Build b/tools/perf/arch/s390/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/s390/Build
+++ b/tools/perf/arch/s390/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 4a233683c684..22797f043b84 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,9 +1,9 @@
-libperf-y += header.o
-libperf-y += kvm-stat.o
+perf-y += header.o
+perf-y += kvm-stat.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-y += machine.o
+perf-y += machine.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index aaabab5e2830..7e3961a4b292 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -11,6 +11,7 @@
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/sie.h>
define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
diff --git a/tools/perf/arch/sh/Build b/tools/perf/arch/sh/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sh/Build
+++ b/tools/perf/arch/sh/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sh/util/Build b/tools/perf/arch/sh/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sh/util/Build
+++ b/tools/perf/arch/sh/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sparc/Build b/tools/perf/arch/sparc/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sparc/Build
+++ b/tools/perf/arch/sparc/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sparc/util/Build b/tools/perf/arch/sparc/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sparc/util/Build
+++ b/tools/perf/arch/sparc/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 586849ff83a0..3d83d0c6982d 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -1,8 +1,8 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
-libperf-y += rdpmc.o
-libperf-y += perf-time-to-tsc.o
-libperf-$(CONFIG_AUXTRACE) += insn-x86.o
-libperf-$(CONFIG_X86_64) += bp-modify.o
+perf-y += arch-tests.o
+perf-y += rdpmc.o
+perf-y += perf-time-to-tsc.o
+perf-$(CONFIG_AUXTRACE) += insn-x86.o
+perf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 7879df34569a..6ad0a1cedb13 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 844b8f335532..7aab0be5fc5f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -1,18 +1,18 @@
-libperf-y += header.o
-libperf-y += tsc.o
-libperf-y += pmu.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
-libperf-y += group.o
-libperf-y += machine.o
-libperf-y += event.o
+perf-y += header.o
+perf-y += tsc.o
+perf-y += pmu.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += group.o
+perf-y += machine.o
+perf-y += event.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 081353d7b095..865a9762f22e 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/svm.h>
#include <asm/vmx.h>
#include <asm/kvm.h>
diff --git a/tools/perf/arch/xtensa/Build b/tools/perf/arch/xtensa/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/xtensa/Build
+++ b/tools/perf/arch/xtensa/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/xtensa/util/Build b/tools/perf/arch/xtensa/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/xtensa/util/Build
+++ b/tools/perf/arch/xtensa/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 44195514b19e..98ad783efc69 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -34,6 +34,7 @@
#include <sys/types.h>
#include <linux/kernel.h>
#include <linux/time64.h>
+#include <linux/numa.h>
#include <numa.h>
#include <numaif.h>
@@ -298,7 +299,7 @@ static cpu_set_t bind_to_node(int target_node)
CPU_ZERO(&mask);
- if (target_node == -1) {
+ if (target_node == NUMA_NO_NODE) {
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
CPU_SET(cpu, &mask);
} else {
@@ -339,7 +340,7 @@ static void bind_to_memnode(int node)
unsigned long nodemask;
int ret;
- if (node == -1)
+ if (node == NUMA_NO_NODE)
return;
BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
@@ -1363,7 +1364,7 @@ static void init_thread_data(void)
int cpu;
/* Allow all nodes by default: */
- td->bind_node = -1;
+ td->bind_node = NUMA_NO_NODE;
/* Allow all CPUs by default: */
CPU_ZERO(&td->bind_cpumask);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 93d679eaf1f4..67f9d9ffacfb 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,6 +27,7 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
@@ -227,7 +228,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
* the DSO?
*/
if (al->sym != NULL) {
- rb_erase(&al->sym->rb_node,
+ rb_erase_cached(&al->sym->rb_node,
&al->map->dso->symbols);
symbol__delete(al->sym);
dso__reset_find_symbol_cache(al->map->dso);
@@ -305,7 +306,7 @@ static void hists__find_annotations(struct hists *hists,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
- struct rb_node *nd = rb_first(&hists->entries), *next;
+ struct rb_node *nd = rb_first_cached(&hists->entries), *next;
int key = K_RIGHT;
while (nd) {
@@ -440,7 +441,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__error("The %s file has no samples!\n", session->data->file.path);
+ ui__error("The %s data has no samples!\n", session->data->path);
goto out;
}
@@ -577,7 +578,7 @@ int cmd_annotate(int argc, const char **argv)
if (quiet)
perf_quiet_option();
- data.file.path = input_name;
+ data.path = input_name;
annotate.session = perf_session__new(&data, false, &annotate.tool);
if (annotate.session == NULL)
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 115110a4796a..10457b10e568 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -416,8 +416,8 @@ int cmd_buildid_cache(int argc, const char **argv)
nsi = nsinfo__new(ns_id);
if (missing_filename) {
- data.file.path = missing_filename;
- data.force = force;
+ data.path = missing_filename;
+ data.force = force;
session = perf_session__new(&data, false, NULL);
if (session == NULL)
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 78abbe8d9d5f..f403e19488b5 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -52,11 +52,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
symbol__elf_init();
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index d340d2e42776..4272763a5e96 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -33,6 +33,7 @@
#include "ui/browsers/hists.h"
#include "thread.h"
#include "mem2node.h"
+#include "symbol.h"
struct c2c_hists {
struct hists hists;
@@ -1969,7 +1970,7 @@ static void calc_width(struct c2c_hist_entry *c2c_he)
set_nodestr(c2c_he);
}
-static int filter_cb(struct hist_entry *he)
+static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
@@ -1986,7 +1987,7 @@ static int filter_cb(struct hist_entry *he)
return 0;
}
-static int resort_cl_cb(struct hist_entry *he)
+static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
@@ -2073,7 +2074,7 @@ static int setup_nodes(struct perf_session *session)
#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
-static int resort_hitm_cb(struct hist_entry *he)
+static int resort_hitm_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
@@ -2088,14 +2089,14 @@ static int resort_hitm_cb(struct hist_entry *he)
static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
int ret = 0;
while (next) {
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node);
- ret = cb(he);
+ ret = cb(he, NULL);
if (ret)
break;
next = rb_next(&he->rb_node);
@@ -2215,7 +2216,7 @@ static void print_pareto(FILE *out)
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
- nd = rb_first(&c2c.hists.hists.entries);
+ nd = rb_first_cached(&c2c.hists.hists.entries);
for (; nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2283,7 +2284,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
static void c2c_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2343,7 +2344,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
struct c2c_cacheline_browser *cl_browser;
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" ENTER Toggle callchains (if present) \n"
" n Toggle Node details info \n"
" s Toggle full length of symbol and source line columns \n"
@@ -2424,7 +2425,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
{
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" d Display cacheline details \n"
" ENTER Toggle callchains (if present) \n"
" q Quit \n";
@@ -2749,8 +2750,8 @@ static int perf_c2c__report(int argc, const char **argv)
if (!input_name || !strlen(input_name))
input_name = "perf.data";
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
err = setup_display(display);
if (err)
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 39db2ee32d48..58fe0e88215c 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -429,7 +429,7 @@ get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
static void hists__baseline_only(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -437,13 +437,13 @@ static void hists__baseline_only(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
- rb_erase(&he->rb_node_in, root);
+ rb_erase_cached(&he->rb_node_in, root);
hist_entry__delete(he);
}
}
@@ -451,7 +451,7 @@ static void hists__baseline_only(struct hists *hists)
static void hists__precompute(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -459,7 +459,7 @@ static void hists__precompute(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he, *pair;
struct data__file *d;
@@ -708,7 +708,7 @@ static void data__fprintf(void)
data__for_each_file(i, d)
fprintf(stdout, "# [%d] %s %s\n",
- d->idx, d->data.file.path,
+ d->idx, d->data.path,
!d->idx ? "(Baseline)" : "");
fprintf(stdout, "#\n");
@@ -779,14 +779,14 @@ static int __cmd_diff(void)
data__for_each_file(i, d) {
d->session = perf_session__new(&d->data, false, &tool);
if (!d->session) {
- pr_err("Failed to open %s\n", d->data.file.path);
+ pr_err("Failed to open %s\n", d->data.path);
ret = -1;
goto out_delete;
}
ret = perf_session__process_events(d->session);
if (ret) {
- pr_err("Failed to process %s\n", d->data.file.path);
+ pr_err("Failed to process %s\n", d->data.path);
goto out_delete;
}
@@ -1289,9 +1289,9 @@ static int data_init(int argc, const char **argv)
data__for_each_file(i, d) {
struct perf_data *data = &d->data;
- data->file.path = use_default ? defaults[i] : argv[i];
- data->mode = PERF_DATA_MODE_READ,
- data->force = force,
+ data->path = use_default ? defaults[i] : argv[i];
+ data->mode = PERF_DATA_MODE_READ,
+ data->force = force,
d->idx = i;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index e06e822ce634..6e4f63b0da4a 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -23,9 +23,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
struct perf_session *session;
struct perf_evsel *pos;
struct perf_data data = {
- .file = {
- .path = file_name,
- },
+ .path = file_name,
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index eda41673c4f3..24086b7f1b14 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -12,6 +12,7 @@
#include "util/color.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/debug.h"
@@ -19,6 +20,7 @@
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/jit.h"
+#include "util/symbol.h"
#include "util/thread.h"
#include <subcmd/parse-options.h>
@@ -768,10 +770,8 @@ int cmd_inject(int argc, const char **argv)
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
- .file = {
- .path = "-",
- },
- .mode = PERF_DATA_MODE_WRITE,
+ .path = "-",
+ .mode = PERF_DATA_MODE_WRITE,
},
};
struct perf_data data = {
@@ -784,7 +784,7 @@ int cmd_inject(int argc, const char **argv)
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
- OPT_STRING('o', "output", &inject.output.file.path, "file",
+ OPT_STRING('o', "output", &inject.output.path, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
@@ -832,7 +832,7 @@ int cmd_inject(int argc, const char **argv)
inject.tool.ordered_events = inject.sched_stat;
- data.file.path = inject.input_name;
+ data.path = inject.input_name;
inject.session = perf_session__new(&data, true, &inject.tool);
if (inject.session == NULL)
return -1;
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index 90d1a2305b72..bc7a2bc7aed7 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -13,6 +13,7 @@
#include <subcmd/parse-options.h>
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
static int __cmd_kallsyms(int argc, const char **argv)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index b63bca4b0c2a..fa520f4b8095 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -6,6 +6,7 @@
#include "util/evsel.h"
#include "util/util.h"
#include "util/config.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
@@ -334,7 +335,7 @@ static int build_alloc_func_list(void)
struct alloc_func *func;
struct machine *machine = &kmem_session->machines.host;
regex_t alloc_func_regex;
- const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
+ static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
if (ret) {
@@ -1924,7 +1925,7 @@ int cmd_kmem(int argc, const char **argv)
NULL
};
struct perf_session *session;
- const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
+ static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
int ret = perf_config(kmem_config, NULL);
if (ret)
@@ -1948,7 +1949,7 @@ int cmd_kmem(int argc, const char **argv)
return __cmd_record(argc, argv);
}
- data.file.path = input_name;
+ data.path = input_name;
kmem_session = session = perf_session__new(&data, false, &perf_kmem);
if (session == NULL)
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 3d4cbc4e87c7..dbb6f737a3e2 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1080,11 +1080,9 @@ static int read_events(struct perf_kvm_stat *kvm)
.ordered_events = true,
};
struct perf_data file = {
- .file = {
- .path = kvm->file_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = kvm->force,
+ .path = kvm->file_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = kvm->force,
};
kvm->tool = eops;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index ead221e49f00..c9f98d00c0e9 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -82,9 +82,9 @@ int cmd_list(int argc, const char **argv)
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
else if (strcmp(argv[i], "metric") == 0)
- metricgroup__print(true, false, NULL, raw_dump);
+ metricgroup__print(true, false, NULL, raw_dump, details_flag);
else if (strcmp(argv[i], "metricgroup") == 0)
- metricgroup__print(false, true, NULL, raw_dump);
+ metricgroup__print(false, true, NULL, raw_dump, details_flag);
else if ((sep = strchr(argv[i], ':')) != NULL) {
int sep_idx;
@@ -102,7 +102,7 @@ int cmd_list(int argc, const char **argv)
s[sep_idx] = '\0';
print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
print_sdt_events(s, s + sep_idx + 1, raw_dump);
- metricgroup__print(true, true, s, raw_dump);
+ metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -119,7 +119,7 @@ int cmd_list(int argc, const char **argv)
details_flag);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
- metricgroup__print(true, true, NULL, raw_dump);
+ metricgroup__print(true, true, NULL, raw_dump, details_flag);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6e0189df2b3b..b9810a8d350a 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -866,11 +866,9 @@ static int __cmd_report(bool display_info)
.ordered_events = true,
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
session = perf_session__new(&data, false, &eops);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 57393e94d156..f45c8b502f63 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -13,6 +13,7 @@
#include "util/data.h"
#include "util/mem-events.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#define MEM_OPERATION_LOAD 0x1
@@ -238,11 +239,9 @@ static int process_sample_event(struct perf_tool *tool,
static int report_raw_events(struct perf_mem *mem)
{
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = mem->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = mem->force,
};
int ret;
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 99de91698de1..46d3c2deeb40 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -32,6 +32,7 @@
#include "perf.h"
#include "builtin.h"
+#include "namespaces.h"
#include "util/util.h"
#include "util/strlist.h"
#include "util/strfilter.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 882285fb9f64..f3f7f3100336 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -23,7 +23,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/symbol.h"
@@ -39,8 +38,10 @@
#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
+#include "util/cpu-set-sched.h"
#include "util/time-utils.h"
#include "util/units.h"
+#include "util/bpf-event.h"
#include "asm/bug.h"
#include <errno.h>
@@ -81,12 +82,17 @@ struct record {
bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
+ cpu_set_t affinity_mask;
};
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
+static const char *affinity_tags[PERF_AFFINITY_MAX] = {
+ "SYS", "NODE", "CPU"
+};
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -531,9 +537,13 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
+ opts->auxtrace_snapshot_mode,
+ opts->nr_cblocks, opts->affinity) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -566,7 +576,6 @@ static int record__open(struct record *rec)
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
- struct perf_evsel_config_term *err_term;
int rc = 0;
/*
@@ -619,14 +628,6 @@ try_again:
goto out;
}
- if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- rc = -1;
- goto out;
- }
-
rc = record__mmap(rec);
if (rc)
goto out;
@@ -659,10 +660,9 @@ static int process_sample_event(struct perf_tool *tool,
static int process_buildids(struct record *rec)
{
- struct perf_data *data = &rec->data;
struct perf_session *session = rec->session;
- if (data->size == 0)
+ if (perf_data__size(&rec->data) == 0)
return 0;
/*
@@ -722,6 +722,16 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+{
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
+}
+
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
bool overwrite)
{
@@ -749,6 +759,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct perf_mmap *map = &maps[i];
if (map->base) {
+ record__adjust_affinity(rec, map);
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
@@ -839,7 +850,7 @@ record__finish_output(struct record *rec)
return;
rec->session->header.data_size += rec->bytes_written;
- data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
+ data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
if (!rec->no_buildid) {
process_buildids(rec);
@@ -907,7 +918,7 @@ record__switch_output(struct record *rec, bool at_exit)
if (!quiet)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
- data->file.path, timestamp);
+ data->path, timestamp);
/* Output tracking events */
if (!at_exit) {
@@ -1082,6 +1093,11 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
+ err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
+ machine, opts);
+ if (err < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
1);
@@ -1445,7 +1461,7 @@ out_child:
fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
perf_data__size(data) / 1024.0 / 1024.0,
- data->file.path, postfix, samples);
+ data->path, postfix, samples);
}
out_delete_session:
@@ -1639,6 +1655,21 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
return -1;
}
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset || !str)
+ return 0;
+
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
+
+ return 0;
+}
+
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
@@ -1831,7 +1862,7 @@ static struct option __record_options[] = {
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
- OPT_STRING('o', "output", &record.data.file.path, "file",
+ OPT_STRING('o', "output", &record.data.path, "file",
"output file name"),
OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
&record.opts.no_inherit_set,
@@ -1839,6 +1870,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
+ OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1946,6 +1978,9 @@ static struct option __record_options[] = {
&nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
record__aio_parse),
#endif
+ OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
+ "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
+ record__parse_affinity),
OPT_END()
};
@@ -1980,6 +2015,9 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+ CPU_ZERO(&rec->affinity_mask);
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
rec->evlist = perf_evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -2143,6 +2181,8 @@ int cmd_record(int argc, const char **argv)
if (verbose > 0)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+ pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+
err = __cmd_record(&record, argc, argv);
out:
perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4958095be4fc..1532ebde6c4b 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
+#include "util/map.h"
#include "util/symbol.h"
#include "util/callchain.h"
#include "util/values.h"
@@ -615,6 +616,21 @@ static int report__collapse_hists(struct report *rep)
return ret;
}
+static int hists__resort_cb(struct hist_entry *he, void *arg)
+{
+ struct report *rep = arg;
+ struct symbol *sym = he->ms.sym;
+
+ if (rep->symbol_ipc && sym && !sym->annotate2) {
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
+
+ symbol__annotate2(sym, he->ms.map, evsel,
+ &annotation__default_options, NULL);
+ }
+
+ return 0;
+}
+
static void report__output_resort(struct report *rep)
{
struct ui_progress prog;
@@ -622,8 +638,10 @@ static void report__output_resort(struct report *rep)
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
- evlist__for_each_entry(rep->session->evlist, pos)
- perf_evsel__output_resort(pos, &prog);
+ evlist__for_each_entry(rep->session->evlist, pos) {
+ perf_evsel__output_resort_cb(pos, &prog,
+ hists__resort_cb, rep);
+ }
ui_progress__finish();
}
@@ -753,7 +771,8 @@ static int tasks_print(struct report *rep, FILE *fp)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
task = tasks + itask++;
task->thread = rb_entry(nd, struct thread, rb_node);
@@ -880,7 +899,7 @@ static int __cmd_report(struct report *rep)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (rep->nr_entries == 0) {
- ui__error("The %s file has no samples!\n", data->file.path);
+ ui__error("The %s data has no samples!\n", data->path);
return 0;
}
@@ -956,9 +975,9 @@ int cmd_report(int argc, const char **argv)
int branch_mode = -1;
bool branch_call_mode = false;
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
- const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
- CALLCHAIN_REPORT_HELP
- "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
+ static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
+ CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char * const report_usage[] = {
"perf report [<options>]",
@@ -1188,8 +1207,8 @@ int cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
repeat:
session = perf_session__new(&data, false, &report.tool);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cbf39dab19c1..275f2d92a7bf 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -213,7 +213,7 @@ struct perf_sched {
u64 all_runtime;
u64 all_count;
u64 cpu_last_switched[MAX_CPUS];
- struct rb_root atom_root, sorted_atom_root, merged_atom_root;
+ struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
@@ -271,7 +271,7 @@ struct evsel_runtime {
struct idle_thread_runtime {
struct thread_runtime tr;
struct thread *last_thread;
- struct rb_root sorted_root;
+ struct rb_root_cached sorted_root;
struct callchain_root callchain;
struct callchain_cursor cursor;
};
@@ -950,10 +950,10 @@ thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *
}
static struct work_atoms *
-thread_atoms_search(struct rb_root *root, struct thread *thread,
+thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
struct list_head *sort_list)
{
- struct rb_node *node = root->rb_node;
+ struct rb_node *node = root->rb_root.rb_node;
struct work_atoms key = { .thread = thread };
while (node) {
@@ -976,10 +976,11 @@ thread_atoms_search(struct rb_root *root, struct thread *thread,
}
static void
-__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
+__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
struct list_head *sort_list)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
while (*new) {
struct work_atoms *this;
@@ -992,12 +993,14 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
if (cmp > 0)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
@@ -1447,15 +1450,15 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
- struct rb_root *root = &sched->atom_root;
+ struct rb_root_cached *root = &sched->atom_root;
again:
for (;;) {
struct work_atoms *data;
- node = rb_first(root);
+ node = rb_first_cached(root);
if (!node)
break;
- rb_erase(node, root);
+ rb_erase_cached(node, root);
data = rb_entry(node, struct work_atoms, node);
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
@@ -1782,11 +1785,9 @@ static int perf_sched__read_events(struct perf_sched *sched)
};
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
int rc = -1;
@@ -2762,12 +2763,12 @@ static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
return ret;
}
-static size_t timehist_print_idlehist_callchain(struct rb_root *root)
+static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
{
size_t ret = 0;
FILE *fp = stdout;
struct callchain_node *chain;
- struct rb_node *rb_node = rb_first(root);
+ struct rb_node *rb_node = rb_first_cached(root);
printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
@@ -2868,7 +2869,7 @@ static void timehist_print_summary(struct perf_sched *sched,
if (itr == NULL)
continue;
- callchain_param.sort(&itr->sorted_root, &itr->callchain,
+ callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
0, &callchain_param);
printf(" CPU %2d:", i);
@@ -2955,11 +2956,9 @@ static int perf_sched__timehist(struct perf_sched *sched)
{ "sched:sched_migrate_task", timehist_migrate_task_event, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
struct perf_session *session;
@@ -3074,11 +3073,12 @@ static void print_bad_events(struct perf_sched *sched)
}
}
-static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
+static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
struct work_atoms *this;
const char *comm = thread__comm_str(data->thread), *this_comm;
+ bool leftmost = true;
while (*new) {
int cmp;
@@ -3092,6 +3092,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
new = &((*new)->rb_left);
} else if (cmp < 0) {
new = &((*new)->rb_right);
+ leftmost = false;
} else {
this->num_merged++;
this->total_runtime += data->total_runtime;
@@ -3109,7 +3110,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
data->num_merged++;
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static void perf_sched__merge_lat(struct perf_sched *sched)
@@ -3120,8 +3121,8 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
if (sched->skip_merge)
return;
- while ((node = rb_first(&sched->atom_root))) {
- rb_erase(node, &sched->atom_root);
+ while ((node = rb_first_cached(&sched->atom_root))) {
+ rb_erase_cached(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
__merge_work_atoms(&sched->merged_atom_root, data);
}
@@ -3143,7 +3144,7 @@ static int perf_sched__lat(struct perf_sched *sched)
printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
printf(" -----------------------------------------------------------------------------------------------------------------\n");
- next = rb_first(&sched->sorted_atom_root);
+ next = rb_first_cached(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;
@@ -3336,7 +3337,7 @@ static int __cmd_record(int argc, const char **argv)
int cmd_sched(int argc, const char **argv)
{
- const char default_sort_order[] = "avg, max, switch, runtime";
+ static const char default_sort_order[] = "avg, max, switch, runtime";
struct perf_sched sched = {
.tool = {
.sample = perf_sched__process_tracepoint_sample,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d079f36d342d..2d8cb1d1682c 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -10,6 +10,7 @@
#include "util/perf_regs.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
@@ -148,6 +149,7 @@ static struct {
unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
+ u64 user_set_fields;
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
@@ -344,7 +346,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
if (attr->sample_type & sample_type)
return 0;
- if (output[type].user_set) {
+ if (output[type].user_set_fields & field) {
if (allow_user_set)
return 0;
evname = perf_evsel__name(evsel);
@@ -1681,13 +1683,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
.force_header = false,
};
struct perf_evsel *ev2;
- static bool init;
u64 val;
- if (!init) {
- perf_stat__init_shadow_stats();
- init = true;
- }
if (!evsel->stats)
perf_evlist__alloc_stats(script->session->evlist, false);
if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1794,7 +1791,7 @@ static void process_event(struct perf_script *script,
return;
}
- if (PRINT_FIELD(TRACE)) {
+ if (PRINT_FIELD(TRACE) && sample->raw_data) {
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size, fp);
}
@@ -2359,6 +2356,8 @@ static int __cmd_script(struct perf_script *script)
signal(SIGINT, sig_handler);
+ perf_stat__init_shadow_stats();
+
/* override event processing functions */
if (script->show_task_events) {
script->tool.comm = process_comm_event;
@@ -2562,6 +2561,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
pr_warning("Overriding previous field request for %s events.\n",
event_type(type));
+ /* Don't override defaults for +- */
+ if (strchr(tok, '+') || strchr(tok, '-'))
+ goto parse;
+
output[type].fields = 0;
output[type].user_set = true;
output[type].wildcard_set = false;
@@ -2630,10 +2633,13 @@ parse:
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
all_output_options[i].str, event_type(j));
} else {
- if (change == REMOVE)
+ if (change == REMOVE) {
output[j].fields &= ~all_output_options[i].field;
- else
+ output[j].user_set_fields &= ~all_output_options[i].field;
+ } else {
output[j].fields |= all_output_options[i].field;
+ output[j].user_set_fields |= all_output_options[i].field;
+ }
output[j].user_set = true;
output[j].wildcard_set = true;
}
@@ -2646,6 +2652,10 @@ parse:
rc = -EINVAL;
goto out;
}
+ if (change == REMOVE)
+ output[type].fields &= ~all_output_options[i].field;
+ else
+ output[type].fields |= all_output_options[i].field;
output[type].user_set = true;
output[type].wildcard_set = true;
}
@@ -2945,10 +2955,8 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
};
char *temp;
int i = 0;
@@ -3421,8 +3429,8 @@ int cmd_script(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
@@ -3648,7 +3656,7 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
}
- input = open(data.file.path, O_RDONLY); /* input_name */
+ input = open(data.path, O_RDONLY); /* input_name */
if (input < 0) {
err = -errno;
perror("failed to open file");
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 1410d66192f7..7b8f09b0b8bf 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -52,7 +52,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
@@ -83,7 +82,6 @@
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
-#include <sys/wait.h>
#include "sane_ctype.h"
@@ -418,7 +416,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
int status = 0;
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
- struct perf_evsel_config_term *err_term;
if (interval) {
ts.tv_sec = interval / USEC_PER_MSEC;
@@ -515,13 +512,6 @@ try_again:
return -1;
}
- if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(counter), errno,
- str_error_r(errno, msg, sizeof(msg)));
- return -1;
- }
-
if (STAT_RECORD) {
int err, fd = perf_data__fd(&perf_stat.data);
@@ -561,7 +551,8 @@ try_again:
break;
}
}
- wait4(child_pid, &status, 0, &stat_config.ru_data);
+ if (child_pid != -1)
+ wait4(child_pid, &status, 0, &stat_config.ru_data);
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
@@ -1331,7 +1322,7 @@ static int __cmd_record(int argc, const char **argv)
PARSE_OPT_STOP_AT_NON_OPTION);
if (output_name)
- data->file.path = output_name;
+ data->path = output_name;
if (stat_config.run_count != 1 || forever) {
pr_err("Cannot use -r option with perf stat record.\n");
@@ -1532,8 +1523,8 @@ static int __cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- perf_stat.data.file.path = input_name;
- perf_stat.data.mode = PERF_DATA_MODE_READ;
+ perf_stat.data.path = input_name;
+ perf_stat.data.mode = PERF_DATA_MODE_READ;
session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
if (session == NULL)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 775b99833e51..9b98687a27b9 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1602,11 +1602,9 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
{ "syscalls:sys_exit_select", process_exit_poll },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = tchart->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = tchart->force,
};
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fe3ecfb2e64b..231a90daa958 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,13 +22,14 @@
#include "perf.h"
#include "util/annotate.h"
+#include "util/bpf-event.h"
#include "util/config.h"
#include "util/color.h"
-#include "util/drv_configs.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/event.h"
#include "util/machine.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
@@ -366,7 +367,7 @@ static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
if (p)
*p = 0;
- next = rb_first(&hists->entries);
+ next = rb_first_cached(&hists->entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
@@ -1028,12 +1029,7 @@ out_err:
static int callchain_param__setup_sample_type(struct callchain_param *callchain)
{
- if (!perf_hpp_list.sym) {
- if (callchain->enabled) {
- ui__error("Selected -g but \"sym\" not present in --sort/-s.");
- return -EINVAL;
- }
- } else if (callchain->mode != CHAIN_NONE) {
+ if (callchain->mode != CHAIN_NONE) {
if (callchain_register_param(callchain) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
@@ -1189,10 +1185,6 @@ static void init_process_thread(struct perf_top *top)
static int __cmd_top(struct perf_top *top)
{
- char msg[512];
- struct perf_evsel *pos;
- struct perf_evsel_config_term *err_term;
- struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
pthread_t thread, thread_process;
int ret;
@@ -1220,6 +1212,12 @@ static int __cmd_top(struct perf_top *top)
init_process_thread(top);
+ ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process,
+ &top->session->machines.host,
+ &top->record_opts);
+ if (ret < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->threads, false,
top->nr_threads_synthesize);
@@ -1237,14 +1235,6 @@ static int __cmd_top(struct perf_top *top)
if (ret)
goto out_delete;
- ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
- if (ret) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- goto out_delete;
- }
-
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index adbf28183560..f5b3a1e9c1dd 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -19,6 +19,7 @@
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
#include <bpf/bpf.h>
+#include "util/bpf_map.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
@@ -29,6 +30,8 @@
#include "util/evlist.h"
#include <subcmd/exec-cmd.h>
#include "util/machine.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "util/path.h"
#include "util/session.h"
#include "util/thread.h"
@@ -85,6 +88,9 @@ struct trace {
*augmented;
} events;
} syscalls;
+ struct {
+ struct bpf_map *map;
+ } dump;
struct record_opts opts;
struct perf_evlist *evlist;
struct machine *host;
@@ -1039,6 +1045,9 @@ static const size_t trace__entry_str_size = 2048;
static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
+ if (fd < 0)
+ return NULL;
+
if (fd > ttrace->files.max) {
struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
@@ -1758,6 +1767,7 @@ static int trace__printf_interrupted_entry(struct trace *trace)
{
struct thread_trace *ttrace;
size_t printed;
+ int len;
if (trace->failure_only || trace->current == NULL)
return 0;
@@ -1768,9 +1778,14 @@ static int trace__printf_interrupted_entry(struct trace *trace)
return 0;
printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
- printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str);
- ttrace->entry_pending = false;
+ printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
+
+ if (len < trace->args_alignment - 4)
+ printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
+ printed += fprintf(trace->output, " ...\n");
+
+ ttrace->entry_pending = false;
++trace->nr_events_printed;
return printed;
@@ -2026,9 +2041,10 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (ttrace->entry_pending) {
printed = fprintf(trace->output, "%s", ttrace->entry_str);
} else {
- fprintf(trace->output, " ... [");
+ printed += fprintf(trace->output, " ... [");
color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
- fprintf(trace->output, "]: %s()", sc->name);
+ printed += 9;
+ printed += fprintf(trace->output, "]: %s()", sc->name);
}
printed++; /* the closing ')' */
@@ -2507,19 +2523,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
+ bool found = false;
+ struct perf_evsel *evsel, *tmp;
+ struct parse_events_error err = { .idx = 0, };
+ int ret = parse_events(evlist, "probe:vfs_getname*", &err);
- if (IS_ERR(evsel))
+ if (ret)
return false;
- if (perf_evsel__field(evsel, "pathname") == NULL) {
+ evlist__for_each_entry_safe(evlist, evsel, tmp) {
+ if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+ continue;
+
+ if (perf_evsel__field(evsel, "pathname")) {
+ evsel->handler = trace__vfs_getname;
+ found = true;
+ continue;
+ }
+
+ list_del_init(&evsel->node);
+ evsel->evlist = NULL;
perf_evsel__delete(evsel);
- return false;
}
- evsel->handler = trace__vfs_getname;
- perf_evlist__add(evlist, evsel);
- return true;
+ return found;
}
static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
@@ -2748,7 +2775,8 @@ static int trace__set_filter_loop_pids(struct trace *trace)
if (parent == NULL)
break;
- if (!strcmp(thread__comm_str(parent), "sshd")) {
+ if (!strcmp(thread__comm_str(parent), "sshd") ||
+ strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = parent->tid;
break;
}
@@ -2973,6 +3001,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
+ if (trace->dump.map)
+ bpf_map__fprintf(trace->dump.map, trace->output);
+
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -3123,11 +3154,9 @@ static int trace__replay(struct trace *trace)
{ "probe:vfs_getname", trace__vfs_getname, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = trace->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = trace->force,
};
struct perf_session *session;
struct perf_evsel *evsel;
@@ -3662,6 +3691,7 @@ int cmd_trace(int argc, const char **argv)
.max_stack = UINT_MAX,
.max_events = ULONG_MAX,
};
+ const char *map_dump_str = NULL;
const char *output_name = NULL;
const struct option trace_options[] = {
OPT_CALLBACK('e', "event", &trace, "event",
@@ -3694,6 +3724,9 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
+#ifdef HAVE_LIBBPF_SUPPORT
+ OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
+#endif
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time,
@@ -3788,6 +3821,14 @@ int cmd_trace(int argc, const char **argv)
err = -1;
+ if (map_dump_str) {
+ trace.dump.map = bpf__find_map_by_name(map_dump_str);
+ if (trace.dump.map == NULL) {
+ pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
+ goto out;
+ }
+ }
+
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
@@ -3847,7 +3888,8 @@ int cmd_trace(int argc, const char **argv)
goto init_augmented_syscall_tp;
}
- if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
+ if (trace.syscalls.events.augmented->priv == NULL &&
+ strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
struct perf_evsel *augmented = trace.syscalls.events.augmented;
if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
perf_evsel__init_augmented_syscall_tp_args(augmented))
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 6cb98f8570a2..7b55613924de 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -10,6 +10,7 @@ include/uapi/linux/fs.h
include/uapi/linux/kcmp.h
include/uapi/linux/kvm.h
include/uapi/linux/in.h
+include/uapi/linux/mount.h
include/uapi/linux/perf_event.h
include/uapi/linux/prctl.h
include/uapi/linux/sched.h
@@ -49,7 +50,6 @@ arch/parisc/include/uapi/asm/errno.h
arch/powerpc/include/uapi/asm/errno.h
arch/sparc/include/uapi/asm/errno.h
arch/x86/include/uapi/asm/errno.h
-arch/powerpc/include/uapi/asm/unistd.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/__fls.h
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index a28dca2582aa..0453ba26cdbd 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -222,6 +222,10 @@ The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a
way to request that counting of events be restricted to times when the
CPU is in user, kernel and/or hypervisor mode.
+Furthermore the 'exclude_host' and 'exclude_guest' bits provide a way
+to request counting of events restricted to guest and host contexts when
+using Linux as the hypervisor.
+
The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap
operations, these can be used to relate userspace IP addresses to actual
code, even after the mapping (or even the whole process) is gone,
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index 53c233370fae..f9b2161e1ca4 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -18,23 +18,13 @@
#include <pid_filter.h>
/* bpf-output associated map */
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall {
bool enabled;
};
-struct bpf_map SEC("maps") syscalls = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct syscall),
- .max_entries = 512,
-};
+bpf_map(syscalls, ARRAY, int, struct syscall, 512);
struct syscall_enter_args {
unsigned long long common_tp_fields;
@@ -141,8 +131,8 @@ int sys_enter(struct syscall_enter_args *args)
len = sizeof(augmented_args.args);
}
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
- return 0;
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
}
SEC("raw_syscalls:sys_exit")
diff --git a/tools/perf/examples/bpf/augmented_syscalls.c b/tools/perf/examples/bpf/augmented_syscalls.c
index 2ae44813ef2d..524fdb8534b3 100644
--- a/tools/perf/examples/bpf/augmented_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_syscalls.c
@@ -19,12 +19,8 @@
#include <stdio.h>
#include <linux/socket.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall_exit_args {
unsigned long long common_tp_fields;
@@ -55,9 +51,9 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; \
len &= sizeof(augmented_args.filename.value) - 1; \
} \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, len); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, len); \
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
@@ -125,10 +121,10 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
/* addrlen = augmented_args.args.addrlen; */ \
/* */ \
probe_read(&augmented_args.addr, addrlen, args->addr_ptr); \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen);\
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
diff --git a/tools/perf/examples/bpf/etcsnoop.c b/tools/perf/examples/bpf/etcsnoop.c
index b59e8812ee8c..e81b535346c0 100644
--- a/tools/perf/examples/bpf/etcsnoop.c
+++ b/tools/perf/examples/bpf/etcsnoop.c
@@ -21,12 +21,8 @@
#include <stdio.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct augmented_filename {
int size;
@@ -49,11 +45,11 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
args->filename_ptr); \
if (__builtin_memcmp(augmented_args.filename.value, etc, 4) != 0) \
return 0; \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
- augmented_args.filename.size)); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
+ augmented_args.filename.size)); \
}
struct syscall_enter_openat_args {
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index e667577207dc..5df7ed9d9020 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -18,6 +18,14 @@ struct bpf_map {
unsigned int numa_node;
};
+#define bpf_map(name, _type, type_key, type_val, _max_entries) \
+struct bpf_map SEC("maps") name = { \
+ .type = BPF_MAP_TYPE_##_type, \
+ .key_size = sizeof(type_key), \
+ .value_size = sizeof(type_val), \
+ .max_entries = _max_entries, \
+}
+
/*
* FIXME: this should receive .max_entries as a parameter, as careful
* tuning of these limits is needed to avoid hitting limits that
@@ -26,13 +34,7 @@ struct bpf_map {
* For the current need, 'perf trace --filter-pids', 64 should
* be good enough, but this surely needs to be revisited.
*/
-#define pid_map(name, value_type) \
-struct bpf_map SEC("maps") name = { \
- .type = BPF_MAP_TYPE_HASH, \
- .key_size = sizeof(pid_t), \
- .value_size = sizeof(value_type), \
- .max_entries = 64, \
-}
+#define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64)
static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
diff --git a/tools/perf/perf-read-vdso.c b/tools/perf/perf-read-vdso.c
index 8c0ca0cc428f..aaa5210ea84a 100644
--- a/tools/perf/perf-read-vdso.c
+++ b/tools/perf/perf-read-vdso.c
@@ -5,17 +5,17 @@
#define VDSO__MAP_NAME "[vdso]"
/*
- * Include definition of find_vdso_map() also used in util/vdso.c for
+ * Include definition of find_map() also used in util/vdso.c for
* building perf.
*/
-#include "util/find-vdso-map.c"
+#include "util/find-map.c"
int main(void)
{
void *start, *end;
size_t size, written;
- if (find_vdso_map(&start, &end))
+ if (find_map(&start, &end, VDSO__MAP_NAME))
return 1;
size = end - start;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 388c6dd128b8..b120e547ddc7 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,6 +66,7 @@ struct record_opts {
bool ignore_missing_thread;
bool strict_freq;
bool sample_id;
+ bool bpf_event;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
@@ -83,6 +84,14 @@ struct record_opts {
clockid_t clockid;
u64 clockid_res_ns;
int nr_cblocks;
+ int affinity;
+};
+
+enum perf_affinity {
+ PERF_AFFINITY_SYS = 0,
+ PERF_AFFINITY_NODE,
+ PERF_AFFINITY_CPU,
+ PERF_AFFINITY_MAX
};
struct option;
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
new file mode 100644
index 000000000000..bffb2d4a6420
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
@@ -0,0 +1,2245 @@
+[
+ {
+ "BriefDescription": "% of finished branches that were treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8_CONV / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "% of finished branches that were pairable but not treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8 / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_not_converted_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percent of mispredicted branches out of all predicted (correctly and incorrectly) branches that completed",
+ "MetricExpr": "PM_BR_MPRED_CMPL / (PM_BR_PRED_BR0 + PM_BR_PRED_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% of Branch miss predictions per instruction",
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "branch_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of count catch mispredictions out of all completed branches that required count cache predictionn",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "CR MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_CR / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "cr_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of link stack mispredictions out of all completed branches that required link stack prediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / (PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "TA MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_TA / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of target address mispredictions out of all completed branches that required address prediction",
+ "MetricExpr": "PM_BR_MPRED_TA / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1 + PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Percent of branches completed that were taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BR_CMPL",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip+group+sys pumps that were incorrectly predicted",
+ "MetricExpr": "PM_PUMP_MPRED * 100 / (PM_PUMP_CPRED + PM_PUMP_MPRED)",
+ "MetricGroup": "bus_stats",
+ "MetricName": "any_pump_mpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip pumps that were correctly predicted as chip pumps the first time",
+ "MetricExpr": "PM_CHIP_PUMP_CPRED * 100 / PM_L2_CHIP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "chip_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of group pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_GRP_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "group_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of system pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_SYS_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "sys_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU or BRU operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to ISU Branch Operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which a Group Completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by CO queue full",
+ "MetricExpr": "PM_CMPLU_STALL_COQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "coq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_BRU_CRU - PM_CMPLU_STALL_BRU) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by flushes",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU Multi-Cycle Instructions",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_multi_cyc_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU",
+ "MetricExpr": "PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Other cycles stalled by FXU",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts and Icache Misses",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held",
+ "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to issue queue",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to maps",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_map_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to syncs and other effects",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to SRQ",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_srq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve in the local L2 or L3",
+ "MetricExpr": "(PM_GCT_NOSLOT_IC_MISS - PM_GCT_NOSLOT_IC_L3MISS) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve off-chip",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Other GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL) - ((PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL))",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by heavyweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_HWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "hwsync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU",
+ "MetricExpr": "PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in distant interventions and memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_REMOTE) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_distant_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote or distant caches",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l21l31_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_conflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was no conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_noconflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in other core's caches or memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in local memory or local L4",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_lmem_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote interventions and memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_remote_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by ERAT Translation rejects",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_erat_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU load finishes",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_ld_fin_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LHS rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lhs_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LMQ Full rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lmq_full_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Rejects",
+ "MetricExpr": "(PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU store forwarding",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_st_fwd_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Stores",
+ "MetricExpr": "PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_store_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by lightweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_LWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lwsync_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_MEM_ECC_DELAY / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mem_ecc_delay_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by nops (nothing next to finish)",
+ "MetricExpr": "PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "no_ntf_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_all_fin_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_flush_cpi"
+ },
+ {
+ "BriefDescription": "Other thread block stall cycles",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_LWSYNC - PM_CMPLU_STALL_HWSYNC - PM_CMPLU_STALL_MEM_ECC_DELAY - PM_CMPLU_STALL_FLUSH - PM_CMPLU_STALL_COQ_FULL) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for",
+ "MetricExpr": "(PM_RUN_CYC / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL) - (PM_GRP_CMPL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Stall cycles unaccounted for",
+ "MetricExpr": "(PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stall Cycles",
+ "MetricExpr": "PM_CMPLU_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles a thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU",
+ "MetricExpr": "PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VSU - PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_SCALAR) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other VSU Scalar Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Vector Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_VECTOR_LONG) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_other_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_lhs_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_other_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_lhs_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with no conflicts",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_other_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the line was brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 without conflicts",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "dL1 miss portion of CPI",
+ "MetricExpr": "( (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)/ (PM_RUN_CYC / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dcache_miss_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L2 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) ) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L3 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local L4 miss rates with measured LL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "ll4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_BR_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_completed_percent"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cyc_per_group"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_11_14_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_11to14_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_15_17_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_15to17_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_18_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_18plus_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_1_2_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_1to2_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_3_6_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_3to6_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_7_10_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_7to10_slots_percent"
+ },
+ {
+ "BriefDescription": "Avg. group size",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "group_size"
+ },
+ {
+ "BriefDescription": "Instructions per group",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "inst_per_group"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT2 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT2_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt2_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT4 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT4_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt4_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT8 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT8_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt8_cycles_percent"
+ },
+ {
+ "BriefDescription": "IPC of all instructions completed by the core while this thread was stalled",
+ "MetricExpr": "PM_CMPLU_STALL_OTHER_CMPL/PM_RUN_CYC",
+ "MetricGroup": "general",
+ "MetricName": "smt_benefit"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in Single Thread Mode",
+ "MetricExpr": "(PM_RUN_CYC_ST_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "st_cycles_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "(PM_FXU0_FIN + PM_FXU1_FIN)/PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "FXU0 balance",
+ "MetricExpr": "PM_FXU0_FIN / (PM_FXU0_FIN + PM_FXU1_FIN)",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_balance"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU0 is in use",
+ "MetricExpr": "PM_FXU0_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_fin"
+ },
+ {
+ "BriefDescription": "FXU0 only Busy",
+ "MetricExpr": "PM_FXU0_BUSY_FXU1_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_only_busy"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU1 is in use",
+ "MetricExpr": "PM_FXU1_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_fin"
+ },
+ {
+ "BriefDescription": "FXU1 only Busy",
+ "MetricExpr": "PM_FXU1_BUSY_FXU0_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_only_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_idle"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "Average number of stores that gather in the store buffer before being sent to an L2 RC machine",
+ "MetricExpr": "PM_ST_CMPL / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "avg_stores_gathered"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per drained store. A drained store may contain multiple individual stores if they target the same line",
+ "MetricExpr": "PM_L2_ST_MISS / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_store_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "average_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "(PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ)",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "average service time for SYNC",
+ "MetricExpr": "PM_LSU_SRQ_SYNC_CYC / PM_LSU_SRQ_SYNC",
+ "MetricGroup": "latency",
+ "MetricName": "average_sync_cyc"
+ },
+ {
+ "BriefDescription": "Cycles LMQ slot0 was active on an average",
+ "MetricExpr": "PM_LSU_LMQ_S0_VALID / PM_LSU_LMQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lmq_life_time"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S0_VALID / PM_LSU_LRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 43 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S43_VALID / PM_LSU_LRQ_S43_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S0_VALID / PM_LSU_SRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 39 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S39_VALID / PM_LSU_SRQ_S39_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricGroup": "latency",
+ "MetricName": "bkill_latency"
+ },
+ {
+ "BriefDescription": "Marked dclaim latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_DCLAIM_CYC / PM_MRK_FAB_RSP_DCLAIM",
+ "MetricGroup": "latency",
+ "MetricName": "dclaim_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "estimated exposed miss latency for dL1 misses, ie load miss when we were NTC",
+ "MetricExpr": "PM_MRK_LD_MISS_EXPOSED_CYC / PM_MRK_LD_MISS_EXPOSED",
+ "MetricGroup": "latency",
+ "MetricName": "exposed_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the M state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the S state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time due to load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_ldhitst_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time NOT due load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_other_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that were satisfied by lines prefetched into the L3. This information is forwarded from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_MEPF_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_mepf_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and beyond",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2MISS_CYC/ PM_MRK_DATA_FROM_L2MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l2miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l3_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that come from beyond the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3MISS_CYC/ PM_MRK_DATA_FROM_L3MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l3miss_latency"
+ },
+ {
+ "BriefDescription": "Average latency for marked reloads that hit in the L3 on the MEPF state. i.e. lines that were prefetched into the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_MEPF_CYC/ PM_MRK_DATA_FROM_L3_MEPF",
+ "MetricGroup": "latency",
+ "MetricName": "l3pref_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on a different chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_OFF_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "off_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on the same chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_ON_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "on_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LMQ full reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "(PM_DATA_FROM_LL4 + PM_DATA_FROM_LMEM) * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4 + PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4 + PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 16G page per inst",
+ "MetricExpr": "100 * PM_DERAT_MISS_16G / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16G page",
+ "MetricExpr": "PM_DERAT_MISS_16G / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 16M page per inst",
+ "MetricExpr": "PM_DERAT_MISS_16M * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16M page",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits on any Centaur (local, remote, or distant) on either L4 or DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_MEMORY / PM_LD_REF_L1",
+ "MetricName": "any_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Base Completion Cycles",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL",
+ "MetricName": "base_completion_cpi"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricName": "bkill_ratio_percent"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4) / PM_LD_REF_L1",
+ "MetricName": "distant_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads that came from the L3 and beyond",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the lines were brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "dl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "dl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL4 / PM_LD_REF_L1",
+ "MetricName": "dl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DMEM / PM_LD_REF_L1",
+ "MetricName": "dmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Overhead of expansion cycles",
+ "MetricExpr": "(PM_GRP_CMPL / PM_RUN_INST_CMPL) - (PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL)",
+ "MetricName": "expansion_overhead_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations executded in the Load/Store Unit following a load/store operation",
+ "MetricExpr": "PM_LSU_FX_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_in_lsu_per_inst"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_CYC) * 100",
+ "MetricName": "gct_empty_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of L1 hits per load ref",
+ "MetricExpr": "(PM_LD_REF_L1 - PM_LD_MISS_L1) / PM_LD_REF_L1",
+ "MetricName": "l1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1",
+ "MetricName": "l1_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L21_MOD + PM_DATA_FROM_L21_SHR) / PM_LD_REF_L1",
+ "MetricName": "l2_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD / PM_LD_REF_L1",
+ "MetricName": "l2_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR / PM_LD_REF_L1",
+ "MetricName": "l2_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_CO_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2 / PM_LD_REF_L1",
+ "MetricName": "l2_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_REF_L1",
+ "MetricName": "l2_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST / PM_LD_REF_L1",
+ "MetricName": "l2_lhs_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l2_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER / PM_LD_REF_L1",
+ "MetricName": "l2_other_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_RC_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_SN_USAGE / PM_RUN_CYC) * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) / PM_LD_REF_L1",
+ "MetricName": "l3_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD / PM_LD_REF_L1",
+ "MetricName": "l3_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR / PM_LD_REF_L1",
+ "MetricName": "l3_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the demand load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3 / PM_LD_REF_L1",
+ "MetricName": "l3_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_REF_L1",
+ "MetricName": "l3_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were not in the MEPF state per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L3 - PM_DATA_FROM_L3_MEPF) / PM_LD_REF_L1",
+ "MetricName": "l3other_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were recently prefetched into the L3 (MEPF state) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF / PM_LD_REF_L1",
+ "MetricName": "l3pref_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_LD_REF_L1",
+ "MetricName": "ll4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_LD_REF_L1",
+ "MetricName": "lmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4) / PM_LD_REF_L1",
+ "MetricName": "local_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_REJECT - PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_STORE) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "lsu_stall_avg_cyc_per_l1hit_stfw"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on a different chip (remote or distant) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_OFF_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "off_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_ON_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "on_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4) / PM_LD_REF_L1",
+ "MetricName": "remote_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable resources or facilities",
+ "MetricExpr": "PM_ISU_REJECT_RES_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "resource_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "rl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "rl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL4 / PM_LD_REF_L1",
+ "MetricName": "rl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_LD_REF_L1",
+ "MetricName": "rmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected due to SAR Bypass",
+ "MetricExpr": "PM_ISU_REJECT_SAR_BYPASS *100/ PM_RUN_INST_CMPL",
+ "MetricName": "sar_bypass_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable sources",
+ "MetricExpr": "PM_ISU_REJECT_SRC_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "source_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / PM_RUN_INST_CMPL",
+ "MetricName": "store_forward_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "store_forward_ratio_percent"
+ },
+ {
+ "BriefDescription": "Marked store latency, from core completion to L2 RC machine completion",
+ "MetricExpr": "(PM_MRK_ST_L2DISP_TO_CMPL_CYC + PM_MRK_ST_DRAIN_TO_L2DISP_CYC) / PM_MRK_ST_NEST",
+ "MetricName": "store_latency"
+ },
+ {
+ "BriefDescription": "Cycles stalled by any sync",
+ "MetricExpr": "(PM_CMPLU_STALL_LWSYNC + PM_CMPLU_STALL_HWSYNC) / PM_RUN_INST_CMPL",
+ "MetricName": "sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Percentage of lines that were prefetched into the L3 and evicted before they were consumed",
+ "MetricExpr": "(PM_L3_CO_MEPF / 2) / PM_L3_PREF_ALL * 100",
+ "MetricName": "wasted_l3_prefetch_percent"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
new file mode 100644
index 000000000000..811c2a8c1c9e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
@@ -0,0 +1,1982 @@
+[
+ {
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_BR_PRED * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_BR_PRED_CCACHE * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK/ PM_BR_PRED_LSTACK * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% Branches Taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BRU_FIN",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Completion stall due to a Branch Unit",
+ "MetricExpr": "PM_CMPLU_STALL_BRU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was routed to the crypto execution pipe and was waiting to finish",
+ "MetricExpr": "PM_CMPLU_STALL_CRYPTO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "crypto_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed the L1 and was waiting for the data to return from the nest",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dcache_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a multi-cycle instruction issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dflong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency decimal floating ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_DFLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved off node memory/cache",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_REMOTE)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_distant_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l21_l31_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_conflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 without conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_noconflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss resolving missed the L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l3miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in local memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_lmem_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved outside of local memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_non_local_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_remote_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency double precision ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dplong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction is an EIEIO waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_EIEIO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "eieio_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction suffered an ERAT miss and the EMQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_EMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that suffered a translation miss",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "erat_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete",
+ "MetricExpr": "PM_CMPLU_STALL_EXCEPTION/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exception_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units for other reasons.",
+ "MetricExpr": "(PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_PM - PM_CMPLU_STALL_CRYPTO - PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units (FXU/VSU/CRU)",
+ "MetricExpr": "PM_CMPLU_STALL_EXEC_UNIT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because any of the 4 threads in the same core suffered a flush, which blocks completion",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH_ANY_THREAD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_any_thread_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency scalar fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_FXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a scalar fixed point or CR instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_FXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_NTC_ISSUE_HELD_DARQ_FULL + PM_NTC_ISSUE_HELD_ARB + PM_NTC_ISSUE_HELD_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "issue_hold_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied",
+ "MetricExpr": "PM_CMPLU_STALL_LARX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "larx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that hit on an older store and it was waiting for store data",
+ "MetricExpr": "PM_CMPLU_STALL_LHS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lhs_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed in the L1 and the LMQ was unable to accept this load miss request because it was full",
+ "MetricExpr": "PM_CMPLU_STALL_LMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lmq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load instruction with all its dependencies satisfied just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "load_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that was held in LSAQ because the LRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to LRQ miscellaneous reasons, lost arbitration to LMQ slot, bank collisions, set prediction cleanup, set prediction multihit and others",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_other_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LMQ_FULL + PM_CMPLU_STALL_ST_FWD + PM_CMPLU_STALL_LHS + PM_CMPLU_STALL_LSU_MFSPR + PM_CMPLU_STALL_LARX + PM_CMPLU_STALL_LRQ_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that was held in LSAQ because an older instruction from SRQ or LRQ won arbitration to the LSU pipe when this instruction tried to launch",
+ "MetricExpr": "PM_CMPLU_STALL_LSAQ_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_arb_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LRQ_FULL + PM_CMPLU_STALL_SRQ_FULL + PM_CMPLU_STALL_LSAQ_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was an LSU op (other than a load or a store) with all its dependencies met and just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall of one cycle because the LSU requested to flush the next iop in the sequence. It takes 1 cycle for the ISU to process this request before the LSU instruction is allowed to complete",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FLUSH_NEXT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_flush_next_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a mfspr instruction targeting an LSU SPR and it was waiting for the register data to be returned",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_MFSPR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_mfspr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion LSU stall for other reasons",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_LSU_FIN - PM_CMPLU_STALL_STORE_FINISH - PM_CMPLU_STALL_STORE_DATA - PM_CMPLU_STALL_EIEIO - PM_CMPLU_STALL_STCX - PM_CMPLU_STALL_SLB - PM_CMPLU_STALL_TEND - PM_CMPLU_STALL_PASTE - PM_CMPLU_STALL_TLBIE - PM_CMPLU_STALL_STORE_PIPE_ARB - PM_CMPLU_STALL_STORE_FIN_ARB - PM_CMPLU_STALL_LOAD_FINISH + PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_LMQ_FULL - PM_CMPLU_STALL_ST_FWD - PM_CMPLU_STALL_LHS - PM_CMPLU_STALL_LSU_MFSPR - PM_CMPLU_STALL_LARX - PM_CMPLU_STALL_LRQ_OTHER + PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL - PM_CMPLU_STALL_LRQ_FULL - PM_CMPLU_STALL_SRQ_FULL - PM_CMPLU_STALL_LSAQ_ARB) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by LSU instruction",
+ "MetricExpr": "PM_CMPLU_STALL_LSU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the register and notifying the Effective Address Table (EAT)",
+ "MetricExpr": "PM_CMPLU_STALL_MTFPSCR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mtfpscr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tbegin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tend and decrement the TEXASR nested level. This is a short delay",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Number of cycles the ICT has no itags assigned to this thread",
+ "MetricExpr": "PM_ICT_NOSLOT_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nothing_dispatched_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was one that must finish at dispatch.",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_DISP_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_disp_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. This event is used to account for cycles in which work is being completed in the CPI stack",
+ "MetricExpr": "PM_NTC_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_fin_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to ntc flush",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_FLUSH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because it lost arbitration onto the issue pipe to another instruction (from the same thread or a different thread)",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_arb_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because there are no slots in the DARQ for it",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_DARQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_darq_full_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch during regular pipeline cycles, or because the VSU is busy with multi-cycle instructions, or because of a write-back collision with VSU",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for.",
+ "MetricExpr": "(PM_RUN_CYC - PM_1PLUS_PPC_CMPL - PM_CMPLU_STALL_THRD - PM_CMPLU_STALL - PM_ICT_NOSLOT_CYC)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall for other reasons",
+ "MetricExpr": "PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a paste waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_PASTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "paste_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Permute execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_PM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "pm_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Run_cycles",
+ "MetricExpr": "PM_RUN_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cyc_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_FXU + PM_CMPLU_STALL_DP + PM_CMPLU_STALL_DFU + PM_CMPLU_STALL_PM + PM_CMPLU_STALL_CRYPTO)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "scalar_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was awaiting L2 response for an SLB",
+ "MetricExpr": "PM_CMPLU_STALL_SLB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "slb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall while waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC",
+ "MetricExpr": "PM_CMPLU_STALL_SPEC_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "spec_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store that was held in LSAQ because the SRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_SRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_STORE_DATA + PM_CMPLU_STALL_EIEIO + PM_CMPLU_STALL_STCX + PM_CMPLU_STALL_SLB + PM_CMPLU_STALL_TEND + PM_CMPLU_STALL_PASTE + PM_CMPLU_STALL_TLBIE + PM_CMPLU_STALL_STORE_PIPE_ARB + PM_CMPLU_STALL_STORE_FIN_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to store forward",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "st_fwd_stall_cpi"
+ },
+ {
+ "BriefDescription": "Nothing completed and ICT not empty",
+ "MetricExpr": "PM_CMPLU_STALL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a stcx waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_STCX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stcx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction was a store waiting on data",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_DATA/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_data_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for a slot in the store finish pipe. This means the instruction is ready to finish but there are instructions ahead of it, using the finish pipe",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FIN_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_fin_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store with all its dependencies met, just waiting to go through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for the next relaunch opportunity after an internal reject. This means the instruction is ready to relaunch and tried once but lost arbitration",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_PIPE_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_pipe_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tend instruction awaiting response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stalled because the thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tlbie waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TLBIE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tlbie_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency double precision ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VDP - PM_CMPLU_STALL_VDPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a vector instruction issued to the Double Precision execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_VDP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_VDPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdplong_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU + PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vector_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency vector fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_VFXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VFXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a vector fixed point instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_VFXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from the L3 and were brought into the L3 by a prefetch, per instruction completed",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from L3 and were brought into the L3 by a prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * PM_MRK_DATA_FROM_DL2L3_MOD_CYC / PM_MRK_DATA_FROM_DL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * PM_MRK_DATA_FROM_DL2L3_SHR_CYC / PM_MRK_DATA_FROM_DL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL4 * PM_MRK_DATA_FROM_DL4_CYC / PM_MRK_DATA_FROM_DL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DMEM * PM_MRK_DATA_FROM_DMEM_CYC / PM_MRK_DATA_FROM_DMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * PM_MRK_DATA_FROM_L21_MOD_CYC / PM_MRK_DATA_FROM_L21_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * PM_MRK_DATA_FROM_L21_SHR_CYC / PM_MRK_DATA_FROM_L21_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L2 * PM_MRK_DATA_FROM_L2_CYC / PM_MRK_DATA_FROM_L2 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * PM_MRK_DATA_FROM_L31_MOD_CYC / PM_MRK_DATA_FROM_L31_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * PM_MRK_DATA_FROM_L31_SHR_CYC / PM_MRK_DATA_FROM_L31_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L3 * PM_MRK_DATA_FROM_L3_CYC / PM_MRK_DATA_FROM_L3 / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_LMEM * PM_MRK_DATA_FROM_LMEM_CYC / PM_MRK_DATA_FROM_LMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * PM_MRK_DATA_FROM_RL2L3_MOD_CYC / PM_MRK_DATA_FROM_RL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * PM_MRK_DATA_FROM_RL2L3_SHR_CYC / PM_MRK_DATA_FROM_RL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL4 * PM_MRK_DATA_FROM_RL4_CYC / PM_MRK_DATA_FROM_RL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RMEM * PM_MRK_DATA_FROM_RMEM_CYC / PM_MRK_DATA_FROM_RMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "%L2 Modified CO Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_CASTOUT_MOD/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_co_m_rd_util"
+ },
+ {
+ "BriefDescription": "L2 dcache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_DC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dc_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "Demand load misses as a % of L2 LD dispatches (per thread)",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID / (PM_L2_LD / 2) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dem_ld_disp_percent"
+ },
+ {
+ "BriefDescription": "L2 Icache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_IC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ic_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 Inst misses as a % of total L2 Inst dispatches (per thread)",
+ "MetricExpr": "PM_L2_INST_MISS / PM_L2_INST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_inst_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load hits",
+ "MetricExpr": "(PM_L2_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load misses",
+ "MetricExpr": "(PM_L2_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "L2 Load misses as a % of total L2 Load dispatches (per thread)",
+ "MetricExpr": "PM_L2_LD_MISS / PM_L2_LD * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 load disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_rd_util"
+ },
+ {
+ "BriefDescription": "L2 load misses that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((( PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ldmiss_wr_util"
+ },
+ {
+ "BriefDescription": "L2 local pump prediction success",
+ "MetricExpr": "PM_L2_LOC_GUESS_CORRECT / (PM_L2_LOC_GUESS_CORRECT + PM_L2_LOC_GUESS_WRONG) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_local_pred_correct_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in M,Me,Mu state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_MOD / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_mod_co_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR )/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR + PM_L2_RCLD_DISP_FAIL_OTHER)/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "PM_L2_RCST_DISP_FAIL_ADDR / PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/ PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "L2 Cache Read Utilization (per core)",
+ "MetricExpr": "(((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100) + (((PM_L2_RCST_DISP/2)*4)/PM_RUN_CYC * 100) + (((PM_L2_CASTOUT_MOD/2)*4)/PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rd_util_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in T,Te,Si,S state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_SHR / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_shr_co_percent"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 store disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_rd_util"
+ },
+ {
+ "BriefDescription": "L2 stores that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_wr_util"
+ },
+ {
+ "BriefDescription": "L2 Cache Write Utilization (per core)",
+ "MetricExpr": "((((PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4) / PM_RUN_CYC * 100) + (((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_wr_util_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load hits",
+ "MetricExpr": "(PM_L3_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load misses",
+ "MetricExpr": "(PM_L3_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "Average number of Write-in machines used. 1 of 8 WI machines is sampled every L3 cycle",
+ "MetricExpr": "(PM_L3_WI_USAGE / PM_RUN_CYC) * 8",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_wi_usage"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "estimated_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L2 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L3 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "L4 locality(%)",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / (PM_DATA_FROM_LL4 + PM_DATA_FROM_RL4 + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "l4_locality"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_RMEM + PM_DATA_FROM_DMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "L1 Prefetches issued by the prefetch machine per instruction (per thread)",
+ "MetricExpr": "PM_L1_PREF / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "prefetch",
+ "MetricName": "l1_prefetch_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio",
+ "MetricExpr": "PM_LSU_DERAT_MISS / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_ANY_SYNC / PM_RUN_INST_CMPL",
+ "MetricName": "any_sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Avg. more than 1 instructions completed",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricName": "average_completed_instruction_set_size"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles in which at least one instruction completes in this thread",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL/PM_RUN_INST_CMPL",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Percentage Cycles atleast one instruction dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricName": "cycles_atleast_one_inst_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricName": "cycles_per_completed_instructions_set"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction is finished and ready to complete for waiting to get through the completion pipe",
+ "MetricExpr": "PM_NTC_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "finish_to_cmpl_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "PM_FXU_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "All FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricName": "fxu_all_busy"
+ },
+ {
+ "BriefDescription": "All FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricName": "fxu_all_idle"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss and branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
+ },
+ {
+ "BriefDescription": "ICT other stalls",
+ "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_cyc_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_hb_full_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_sync_cpi"
+ },
+ {
+ "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_tbegin_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l2_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from the local L3",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "PM_CO_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Percent of instruction reads out of all L2 commands",
+ "MetricExpr": "PM_ISIDE_DISP * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_instr_commands_percent"
+ },
+ {
+ "BriefDescription": "Percent of loads out of all L2 commands",
+ "MetricExpr": "PM_L2_LD * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_ld_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_rc_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "PM_RC_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "PM_SN_USAGE / PM_RUN_CYC * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Percent of stores out of all L2 commands",
+ "MetricExpr": "PM_L2_ST * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_st_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 dispatches per core",
+ "MetricExpr": "100 * PM_L2_RCST_DISP/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_rate_percent"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because a different thread was using the completion pipe",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_EXCEPTION - PM_CMPLU_STALL_ANY_SYNC - PM_CMPLU_STALL_SYNC_PMU_INT - PM_CMPLU_STALL_SPEC_FINISH - PM_CMPLU_STALL_FLUSH_ANY_THREAD - PM_CMPLU_STALL_LSU_FLUSH_NEXT - PM_CMPLU_STALL_NESTED_TBEGIN - PM_CMPLU_STALL_NESTED_TEND - PM_CMPLU_STALL_MTFPSCR)/PM_RUN_INST_CMPL",
+ "MetricName": "other_thread_cmpl_stall"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_SYNC_PMU_INT / PM_RUN_INST_CMPL",
+ "MetricName": "sync_pmu_int_stall_cpi"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 36c903faed0b..71e9737f4614 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -73,7 +73,7 @@
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
index 41efd7e368b3..68d4b54574ad 100644
--- a/tools/perf/scripts/Build
+++ b/tools/perf/scripts/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
-libperf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
+perf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
+perf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index 34faecf774ae..db0036129307 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,4 +1,4 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
index aefc15c9444a..7d0e33ce6aba 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -1,3 +1,3 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 0564dd7377f2..30130213da7e 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -478,7 +478,7 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
'parent_call_path_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index 245caf2643ed..ed237f2ed03f 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -320,7 +320,7 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
'parent_call_path_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index f278ce5ebab7..09ce73b07d35 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
@@ -1398,18 +1398,28 @@ class BranchModel(TreeModel):
def HasMoreRecords(self):
return self.more
+# Report Variables
+
+class ReportVars():
+
+ def __init__(self, name = "", where_clause = "", limit = ""):
+ self.name = name
+ self.where_clause = where_clause
+ self.limit = limit
+
+ def UniqueId(self):
+ return str(self.where_clause + ";" + self.limit)
+
# Branch window
class BranchWindow(QMdiSubWindow):
- def __init__(self, glb, event_id, name, where_clause, parent=None):
+ def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
- model_name = "Branch Events " + str(event_id)
- if len(where_clause):
- model_name = where_clause + " " + model_name
+ model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
- self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, where_clause))
+ self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
@@ -1427,7 +1437,7 @@ class BranchWindow(QMdiSubWindow):
self.setWidget(self.vbox.Widget())
- AddSubWindow(glb.mainwindow.mdi_area, self, name + " Branch Events")
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
@@ -1472,47 +1482,134 @@ class BranchWindow(QMdiSubWindow):
else:
self.find_bar.NotFound()
-# Dialog data item converted and validated using a SQL table
+# Line edit data item
-class SQLTableDialogDataItem():
+class LineEditDataItem(object):
- def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
- self.table_name = table_name
- self.match_column = match_column
- self.column_name1 = column_name1
- self.column_name2 = column_name2
self.parent = parent
+ self.id = id
- self.value = ""
+ self.value = default
- self.widget = QLineEdit()
+ self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
- self.last_id = 0
- self.first_time = 0
- self.last_time = 2 ** 64
- if self.table_name == "<timeranges>":
- query = QSqlQuery(self.glb.db)
- QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
- if query.next():
- self.last_id = int(query.value(0))
- self.last_time = int(query.value(1))
- QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
- if query.next():
- self.first_time = int(query.value(0))
- if placeholder_text:
- placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
-
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
+ def TurnTextRed(self):
+ if not self.red:
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+
+ def TurnTextNormal(self):
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ self.TurnTextRed()
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def Invalidate(self):
+ self.validated = False
+
+ def DoValidate(self, input_string):
+ self.value = input_string.strip()
+
+ def Validate(self):
+ self.validated = True
+ self.error = ""
+ self.TurnTextNormal()
+ self.parent.ClearMessage()
+ input_string = self.widget.text()
+ if not len(input_string.strip()):
+ self.value = ""
+ return
+ self.DoValidate(input_string)
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+# Non-negative integer ranges dialog data item
+
+class NonNegativeIntegerRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.column_name = column_name
+
+ def DoValidate(self, input_string):
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+
+# Positive integer dialog data item
+
+class PositiveIntegerDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
+ super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
+
+ def DoValidate(self, input_string):
+ if not self.IsNumber(input_string.strip()):
+ return self.InvalidValue(input_string)
+ value = int(input_string.strip())
+ if value <= 0:
+ return self.InvalidValue(input_string)
+ self.value = str(value)
+
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
@@ -1523,6 +1620,42 @@ class SQLTableDialogDataItem():
ids.append(str(query.value(0)))
return ids
+ def DoValidate(self, input_string):
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+
+# Sample time ranges dialog data item converted and validated using 'samples' SQL table
+
+class SampleTimeRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ self.column_name = column_name
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.last_time = int(query.value(1))
+ QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+ if query.next():
+ self.first_time = int(query.value(0))
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
@@ -1560,7 +1693,6 @@ class SQLTableDialogDataItem():
return str(lower_id)
def ConvertRelativeTime(self, val):
- print "val ", val
mult = 1
suffix = val[-2:]
if suffix == "ms":
@@ -1582,29 +1714,23 @@ class SQLTableDialogDataItem():
return str(val)
def ConvertTimeRange(self, vrange):
- print "vrange ", vrange
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
- print "vrange2 ", vrange
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
- print "ok1"
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
- print "ok2"
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
- print "vrange3 ", vrange
return True
def AddTimeRange(self, value, ranges):
- print "value ", value
n = value.count("-")
if n == 1:
pass
@@ -1622,111 +1748,31 @@ class SQLTableDialogDataItem():
return True
return False
- def InvalidValue(self, value):
- self.value = ""
- palette = QPalette()
- palette.setColor(QPalette.Text,Qt.red)
- self.widget.setPalette(palette)
- self.red = True
- self.error = self.label + " invalid value '" + value + "'"
- self.parent.ShowMessage(self.error)
+ def DoValidate(self, input_string):
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
- def IsNumber(self, value):
- try:
- x = int(value)
- except:
- x = 0
- return str(x) == value
+# Report Dialog Base
- def Invalidate(self):
- self.validated = False
+class ReportDialogBase(QDialog):
- def Validate(self):
- input_string = self.widget.text()
- self.validated = True
- if self.red:
- palette = QPalette()
- self.widget.setPalette(palette)
- self.red = False
- if not len(input_string.strip()):
- self.error = ""
- self.value = ""
- return
- if self.table_name == "<timeranges>":
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if not self.AddTimeRange(value, ranges):
- return self.InvalidValue(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- self.value = " OR ".join(ranges)
- elif self.table_name == "<ranges>":
- singles = []
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if "-" in value:
- vrange = value.split("-")
- if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
- return self.InvalidValue(value)
- ranges.append(vrange)
- else:
- if not self.IsNumber(value):
- return self.InvalidValue(value)
- singles.append(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- if len(singles):
- ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
- self.value = " OR ".join(ranges)
- elif self.table_name:
- all_ids = []
- for value in [x.strip() for x in input_string.split(",")]:
- ids = self.ValueToIds(value)
- if len(ids):
- all_ids.extend(ids)
- else:
- return self.InvalidValue(value)
- self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
- if self.column_name2:
- self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
- else:
- self.value = input_string.strip()
- self.error = ""
- self.parent.ClearMessage()
-
- def IsValid(self):
- if not self.validated:
- self.Validate()
- if len(self.error):
- self.parent.ShowMessage(self.error)
- return False
- return True
-
-# Selected branch report creation dialog
-
-class SelectedBranchDialog(QDialog):
-
- def __init__(self, glb, parent=None):
- super(SelectedBranchDialog, self).__init__(parent)
+ def __init__(self, glb, title, items, partial, parent=None):
+ super(ReportDialogBase, self).__init__(parent)
self.glb = glb
- self.name = ""
- self.where_clause = ""
+ self.report_vars = ReportVars()
- self.setWindowTitle("Selected Branches")
+ self.setWindowTitle(title)
self.setMinimumWidth(600)
- items = (
- ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
- ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
- ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
- ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
- ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
- ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
- ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
- ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
- ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
- )
- self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+ self.data_items = [x(glb, self) for x in items]
+
+ self.partial = partial
self.grid = QGridLayout()
@@ -1758,23 +1804,28 @@ class SelectedBranchDialog(QDialog):
self.setLayout(self.vbox);
def Ok(self):
- self.name = self.data_items[0].value
- if not self.name:
+ vars = self.report_vars
+ for d in self.data_items:
+ if d.id == "REPORTNAME":
+ vars.name = d.value
+ if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
- if len(d.value):
- if len(self.where_clause):
- self.where_clause += " AND "
- self.where_clause += d.value
- if len(self.where_clause):
- self.where_clause = " AND ( " + self.where_clause + " ) "
- else:
- self.ShowMessage("No selection")
- return
+ if d.id == "LIMIT":
+ vars.limit = d.value
+ elif len(d.value):
+ if len(vars.where_clause):
+ vars.where_clause += " AND "
+ vars.where_clause += d.value
+ if len(vars.where_clause):
+ if self.partial:
+ vars.where_clause = " AND ( " + vars.where_clause + " ) "
+ else:
+ vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
@@ -1783,6 +1834,23 @@ class SelectedBranchDialog(QDialog):
def ClearMessage(self):
self.status.setText("")
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Selected Branches"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
+ lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
+ super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
+
# Event list
def GetEventList(db):
@@ -1793,6 +1861,16 @@ def GetEventList(db):
events.append(query.value(0))
return events
+# Is a table selectable
+
+def IsSelectable(db, table):
+ query = QSqlQuery(db)
+ try:
+ QueryExec(query, "SELECT * FROM " + table + " LIMIT 1")
+ except:
+ return False
+ return True
+
# SQL data preparation
def SQLTableDataPrep(query, count):
@@ -1818,12 +1896,13 @@ class SQLTableModel(TableModel):
progress = Signal(object)
- def __init__(self, glb, sql, column_count, parent=None):
+ def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
- self.fetcher = SQLFetcher(glb, sql, lambda x, y=column_count: SQLTableDataPrep(x, y), self.AddSample)
+ self.column_headers = column_headers
+ self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
@@ -1861,6 +1940,12 @@ class SQLTableModel(TableModel):
def HasMoreRecords(self):
return self.more
+ def columnCount(self, parent=None):
+ return len(self.column_headers)
+
+ def columnHeader(self, column):
+ return self.column_headers[column]
+
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
@@ -1870,12 +1955,12 @@ class SQLAutoTableModel(SQLTableModel):
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
- self.column_headers = []
+ column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
- self.column_headers.append(query.value(1))
+ column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
@@ -1888,14 +1973,8 @@ class SQLAutoTableModel(SQLTableModel):
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
- self.column_headers.append(query.value(0))
- super(SQLAutoTableModel, self).__init__(glb, sql, len(self.column_headers), parent)
-
- def columnCount(self, parent=None):
- return len(self.column_headers)
-
- def columnHeader(self, column):
- return self.column_headers[column]
+ column_headers.append(query.value(0))
+ super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
# Base class for custom ResizeColumnsToContents
@@ -1998,6 +2077,103 @@ def GetTableList(glb):
tables.append("information_schema.columns")
return tables
+# Top Calls data model
+
+class TopCallsModel(SQLTableModel):
+
+ def __init__(self, glb, report_vars, parent=None):
+ text = ""
+ if not glb.dbref.is_sqlite3:
+ text = "::text"
+ limit = ""
+ if len(report_vars.limit):
+ limit = " LIMIT " + report_vars.limit
+ sql = ("SELECT comm, pid, tid, name,"
+ " CASE"
+ " WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
+ " ELSE short_name"
+ " END AS dso,"
+ " call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
+ " CASE"
+ " WHEN (calls.flags = 1) THEN 'no call'" + text +
+ " WHEN (calls.flags = 2) THEN 'no return'" + text +
+ " WHEN (calls.flags = 3) THEN 'no call/return'" + text +
+ " ELSE ''" + text +
+ " END AS flags"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " INNER JOIN comms ON calls.comm_id = comms.id"
+ " INNER JOIN threads ON calls.thread_id = threads.id" +
+ report_vars.where_clause +
+ " ORDER BY elapsed_time DESC" +
+ limit
+ )
+ column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
+ self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
+ super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
+
+ def columnAlignment(self, column):
+ return self.alignment[column]
+
+# Top Calls report creation dialog
+
+class TopCallsDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Top Calls by Elapsed Time"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
+ lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
+ super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
+
+# Top Calls window
+
+class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
+
+ def __init__(self, glb, report_vars, parent=None):
+ super(TopCallsWindow, self).__init__(parent)
+
+ self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
+ self.model = self.data_model
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+
+ self.ResizeColumnsToContents()
+
+ self.find_bar = FindBar(self, self, True)
+
+ self.finder = ChildDataItemFinder(self.model)
+
+ self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, row):
+ self.find_bar.Idle()
+ if row >= 0:
+ self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ else:
+ self.find_bar.NotFound()
+
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
@@ -2101,6 +2277,7 @@ p.c2 {
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
<p class=c2><a href=#allbranches>1.2 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c2><a href=#topcallsbyelapsedtime>1.4 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
@@ -2176,6 +2353,10 @@ ms, us or ns. Also, negative values are relative to the end of trace. Examples:
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h2 id=topcallsbyelapsedtime>1.4 Top calls by elapsed time</h2>
+The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
+The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
@@ -2305,10 +2486,14 @@ class MainWindow(QMainWindow):
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
- reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
+
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
@@ -2364,14 +2549,20 @@ class MainWindow(QMainWindow):
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
+ def NewTopCalls(self):
+ dialog = TopCallsDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ TopCallsWindow(self.glb, dialog.report_vars, self)
+
def NewBranchView(self, event_id):
- BranchWindow(self.glb, event_id, "", "", self)
+ BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
- BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+ BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index cafeff3d74db..3648e8b986ec 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -32,7 +34,7 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_error_totals()
@@ -57,22 +59,21 @@ def syscalls__sys_exit(event_name, context, common_cpu,
def print_error_totals():
if for_comm is not None:
- print "\nsyscall errors for %s:\n\n" % (for_comm),
+ print("\nsyscall errors for %s:\n" % (for_comm))
else:
- print "\nsyscall errors:\n\n",
+ print("\nsyscall errors:\n")
- print "%-30s %10s\n" % ("comm [pid]", "count"),
- print "%-30s %10s\n" % ("------------------------------", \
- "----------"),
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
+ print("\n%s [%d]" % (comm, pid))
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
- print " syscall: %-16s\n" % syscall_name(id),
+ print(" syscall: %-16s" % syscall_name(id))
ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
- print " err = %-20s %10d\n" % (strerror(ret), val),
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index ebee2c5ae496..fb0bbcbfa0f0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -4,6 +4,8 @@
# Copyright (c) 2018, Intel Corporation.
from __future__ import division
+from __future__ import print_function
+
import os
import sys
import struct
@@ -31,21 +33,23 @@ def parse_iomem():
for i, j in enumerate(f):
m = re.split('-|:',j,2)
if m[2].strip() == 'System RAM':
- system_ram.append(long(m[0], 16))
- system_ram.append(long(m[1], 16))
+ system_ram.append(int(m[0], 16))
+ system_ram.append(int(m[1], 16))
if m[2].strip() == 'Persistent Memory':
- pmem.append(long(m[0], 16))
- pmem.append(long(m[1], 16))
+ pmem.append(int(m[0], 16))
+ pmem.append(int(m[1], 16))
def print_memory_type():
- print "Event: %s" % (event_name)
- print "%-40s %10s %10s\n" % ("Memory type", "count", "percentage"),
- print "%-40s %10s %10s\n" % ("----------------------------------------", \
+ print("Event: %s" % (event_name))
+ print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
+ print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
- key = lambda(k, v): (v, k), reverse = True):
- print "%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+ end='')
def trace_begin():
parse_iomem()
@@ -80,7 +84,7 @@ def find_memory_type(phys_addr):
f.seek(0, 0)
for j in f:
m = re.split('-|:',j,2)
- if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+ if int(m[0], 16) <= phys_addr <= int(m[1], 16):
return m[2]
return "N/A"
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index a150164b44a3..212557a02c50 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -1,6 +1,8 @@
# Monitor the system for dropped packets and proudce a report of drop locations and counts
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
import os
import sys
@@ -50,19 +52,19 @@ def get_sym(sloc):
return (None, 0)
def print_drop_table():
- print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
+ print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT"))
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
- print "%25s %25s %25s" % (sym, off, drop_log[i])
+ print("%25s %25s %25s" % (sym, off, drop_log[i]))
def trace_begin():
- print "Starting trace (Ctrl-C to dump results)"
+ print("Starting trace (Ctrl-C to dump results)")
def trace_end():
- print "Gathering kallsyms data"
+ print("Gathering kallsyms data")
get_kallsyms_table()
print_drop_table()
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 9b2050f778f1..267bda49325d 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -8,6 +8,8 @@
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
+from __future__ import print_function
+
import os
import sys
@@ -17,6 +19,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
from perf_trace_context import *
from Core import *
from Util import *
+from functools import cmp_to_key
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
@@ -61,12 +64,12 @@ def diff_msec(src, dst):
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
- print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
+ print("%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" %
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
- diff_msec(hunk['xmit_t'], hunk['free_t']))
+ diff_msec(hunk['xmit_t'], hunk['free_t'])))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
@@ -98,55 +101,55 @@ def print_receive(hunk):
if show_hunk == 0:
return
- print "%d.%06dsec cpu=%d" % \
- (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+ print("%d.%06dsec cpu=%d" %
+ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu))
for i in range(len(irq_list)):
- print PF_IRQ_ENTRY % \
+ print(PF_IRQ_ENTRY %
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
- irq_list[i]['irq'], irq_list[i]['name'])
- print PF_JOINT
+ irq_list[i]['irq'], irq_list[i]['name']))
+ print(PF_JOINT)
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
- print PF_NET_RX % \
+ print(PF_NET_RX %
(diff_msec(base_t, irq_event['time']),
- irq_event['skbaddr'])
- print PF_JOINT
- print PF_SOFT_ENTRY % \
- diff_msec(base_t, hunk['sirq_ent_t'])
- print PF_JOINT
+ irq_event['skbaddr']))
+ print(PF_JOINT)
+ print(PF_SOFT_ENTRY %
+ diff_msec(base_t, hunk['sirq_ent_t']))
+ print(PF_JOINT)
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
- print PF_NAPI_POLL % \
- (diff_msec(base_t, event['event_t']), event['dev'])
+ print(PF_NAPI_POLL %
+ (diff_msec(base_t, event['event_t']), event['dev']))
if i == len(event_list) - 1:
- print ""
+ print("")
else:
- print PF_JOINT
+ print(PF_JOINT)
else:
- print PF_NET_RECV % \
+ print(PF_NET_RECV %
(diff_msec(base_t, event['event_t']), event['skbaddr'],
- event['len'])
+ event['len']))
if 'comm' in event.keys():
- print PF_WJOINT
- print PF_CPY_DGRAM % \
+ print(PF_WJOINT)
+ print(PF_CPY_DGRAM %
(diff_msec(base_t, event['comm_t']),
- event['pid'], event['comm'])
+ event['pid'], event['comm']))
elif 'handle' in event.keys():
- print PF_WJOINT
+ print(PF_WJOINT)
if event['handle'] == "kfree_skb":
- print PF_KFREE_SKB % \
+ print(PF_KFREE_SKB %
(diff_msec(base_t,
event['comm_t']),
- event['location'])
+ event['location']))
elif event['handle'] == "consume_skb":
- print PF_CONS_SKB % \
+ print(PF_CONS_SKB %
diff_msec(base_t,
- event['comm_t'])
- print PF_JOINT
+ event['comm_t']))
+ print(PF_JOINT)
def trace_begin():
global show_tx
@@ -172,8 +175,7 @@ def trace_begin():
def trace_end():
# order all events in time
- all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
- b[EINFO_IDX_TIME]))
+ all_event_list.sort(key=cmp_to_key(lambda a,b :a[EINFO_IDX_TIME] < b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
@@ -210,19 +212,19 @@ def trace_end():
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
- print " dev len Qdisc " \
- " netdevice free"
+ print(" dev len Qdisc "
+ " netdevice free")
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
- print "debug buffer status"
- print "----------------------------"
- print "xmit Qdisc:remain:%d overflow:%d" % \
- (len(tx_queue_list), of_count_tx_queue_list)
- print "xmit netdevice:remain:%d overflow:%d" % \
- (len(tx_xmit_list), of_count_tx_xmit_list)
- print "receive:remain:%d overflow:%d" % \
- (len(rx_skb_list), of_count_rx_skb_list)
+ print("debug buffer status")
+ print("----------------------------")
+ print("xmit Qdisc:remain:%d overflow:%d" %
+ (len(tx_queue_list), of_count_tx_queue_list))
+ print("xmit netdevice:remain:%d overflow:%d" %
+ (len(tx_xmit_list), of_count_tx_xmit_list))
+ print("receive:remain:%d overflow:%d" %
+ (len(rx_skb_list), of_count_rx_skb_list))
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
index 00e0e7476e55..8b78dc790adb 100644
--- a/tools/perf/scripts/python/powerpc-hcalls.py
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -4,6 +4,8 @@
#
# Hypervisor call statisics
+from __future__ import print_function
+
import os
import sys
@@ -149,7 +151,7 @@ hcall_table = {
}
def hcall_table_lookup(opcode):
- if (hcall_table.has_key(opcode)):
+ if (opcode in hcall_table):
return hcall_table[opcode]
else:
return opcode
@@ -157,8 +159,8 @@ def hcall_table_lookup(opcode):
print_ptrn = '%-28s%10s%10s%10s%10s'
def trace_end():
- print print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')
- print '-' * 68
+ print(print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)'))
+ print('-' * 68)
for opcode in output:
h_name = hcall_table_lookup(opcode)
time = output[opcode]['time']
@@ -166,14 +168,14 @@ def trace_end():
min_t = output[opcode]['min']
max_t = output[opcode]['max']
- print print_ptrn % (h_name, cnt, min_t, max_t, time/cnt)
+ print(print_ptrn % (h_name, cnt, min_t, max_t, time//cnt))
def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
opcode, retval):
- if (d_enter.has_key(cpu) and d_enter[cpu].has_key(opcode)):
+ if (cpu in d_enter and opcode in d_enter[cpu]):
diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
- if (output.has_key(opcode)):
+ if (opcode in output):
output[opcode]['time'] += diff
output[opcode]['cnt'] += 1
if (output[opcode]['min'] > diff):
@@ -190,11 +192,11 @@ def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
del d_enter[cpu][opcode]
# else:
-# print "Can't find matching hcall_enter event. Ignoring sample"
+# print("Can't find matching hcall_enter event. Ignoring sample")
def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
callchain, opcode):
- if (d_enter.has_key(cpu)):
+ if (cpu in d_enter):
d_enter[cpu][opcode] = nsecs(sec, nsec)
else:
d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3473e7f66081..3984bf51f3c5 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 61621b93affb..987ffae7c8ca 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -8,7 +8,14 @@
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
-import os, sys, thread, time
+from __future__ import print_function
+
+import os, sys, time
+
+try:
+ import thread
+except ImportError:
+ import _thread as thread
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -62,18 +69,19 @@ def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
+ print("\nsyscall events for %s:\n" % (for_comm))
else:
- print "\nsyscall events:\n\n",
+ print("\nsyscall events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" %
+ ("----------------------------------------",
+ "----------"))
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
reverse = True):
try:
- print "%-40s %10d\n" % (syscall_name(id), val),
+ print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
pass
syscalls.clear()
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 1697b5e18c96..5e703efaddcc 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -19,6 +19,8 @@
# Written by Paolo Bonzini <pbonzini@redhat.com>
# Based on Brendan Gregg's stackcollapse-perf.pl script.
+from __future__ import print_function
+
import os
import sys
from collections import defaultdict
@@ -120,7 +122,6 @@ def process_event(param_dict):
lines[stack_string] = lines[stack_string] + 1
def trace_end():
- list = lines.keys()
- list.sort()
+ list = sorted(lines)
for stack in list:
- print "%s %d" % (stack, lines[stack])
+ print("%s %d" % (stack, lines[stack]))
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
index 8410672efb8b..01fa933ff3cf 100644
--- a/tools/perf/scripts/python/stat-cpi.py
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -1,6 +1,7 @@
-#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
data = {}
times = []
threads = []
@@ -20,8 +21,8 @@ def store_key(time, cpu, thread):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
- #print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
- # (event, cpu, thread, time, val, ena, run)
+ #print("event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" %
+ # (event, cpu, thread, time, val, ena, run))
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
@@ -59,7 +60,7 @@ def stat__interval(time):
if ins != 0:
cpi = cyc/float(ins)
- print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
+ print("%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins))
def trace_end():
pass
@@ -75,4 +76,4 @@ def trace_end():
# if ins != 0:
# cpi = cyc/float(ins)
#
-# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
+# print("time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi))
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index daf314cc5dd3..42782487b0e9 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
@@ -31,7 +33,7 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
@@ -55,20 +57,20 @@ def syscalls__sys_enter(event_name, context, common_cpu,
def print_syscall_totals():
if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
+ print("\nsyscall events for %s:\n" % (for_comm))
else:
- print "\nsyscall events by comm/pid:\n\n",
+ print("\nsyscall events by comm/pid:\n")
- print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
+ print("\n%s [%d]" % (comm, pid))
id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].iteritems(), \
- key = lambda(k, v): (v, k), reverse = True):
- print " %-38s %10d\n" % (syscall_name(id), val),
+ for id, val in sorted(syscalls[comm][pid].items(), \
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index e66a7730aeb5..0ebd89cfd42c 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -28,7 +30,7 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
@@ -51,14 +53,14 @@ def syscalls__sys_enter(event_name, context, common_cpu,
def print_syscall_totals():
if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
+ print("\nsyscall events for %s:\n" % (for_comm))
else:
- print "\nsyscall events:\n\n",
+ print("\nsyscall events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
reverse = True):
- print "%-40s %10d\n" % (syscall_name(id), val),
+ print("%-40s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index 44090a9a19f3..cb39ac46bc73 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -1,6 +1,7 @@
-#! /usr/bin/python
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
import os
import sys
import glob
@@ -8,7 +9,11 @@ import optparse
import tempfile
import logging
import shutil
-import ConfigParser
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
def data_equal(a, b):
# Allow multiple values in assignment separated by '|'
@@ -100,20 +105,20 @@ class Event(dict):
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
- if not self.has_key(t) or not other.has_key(t):
+ if t not in self or t not in other:
return False
if not data_equal(self[t], other[t]):
return False
return True
def optional(self):
- if self.has_key('optional') and self['optional'] == '1':
+ if 'optional' in self and self['optional'] == '1':
return True
return False
def diff(self, other):
for t in Event.terms:
- if not self.has_key(t) or not other.has_key(t):
+ if t not in self or t not in other:
continue
if not data_equal(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
@@ -134,7 +139,7 @@ class Event(dict):
# - expected values assignments
class Test(object):
def __init__(self, path, options):
- parser = ConfigParser.SafeConfigParser()
+ parser = configparser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
@@ -193,7 +198,7 @@ class Test(object):
return True
def load_events(self, path, events):
- parser_event = ConfigParser.SafeConfigParser()
+ parser_event = configparser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
@@ -207,7 +212,7 @@ class Test(object):
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
- parser_base = ConfigParser.SafeConfigParser()
+ parser_base = configparser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
@@ -322,9 +327,9 @@ def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
- except Unsup, obj:
+ except Unsup as obj:
log.warning("unsupp %s" % obj.getMsg())
- except Notest, obj:
+ except Notest as obj:
log.warning("skipped %s" % obj.getMsg())
def setup_log(verbose):
@@ -363,7 +368,7 @@ def main():
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
- action="count", dest="verbose")
+ default=0, action="count", dest="verbose")
options, args = parser.parse_args()
if args:
@@ -373,7 +378,7 @@ def main():
setup_log(options.verbose)
if not options.test_dir:
- print 'FAILED no -d option specified'
+ print('FAILED no -d option specified')
sys.exit(-1)
if not options.test:
@@ -382,8 +387,8 @@ def main():
try:
run_tests(options)
- except Fail, obj:
- print "FAILED %s" % obj.getMsg();
+ except Fail as obj:
+ print("FAILED %s" % obj.getMsg())
sys.exit(-1)
sys.exit(0)
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index a20cbc445426..57fc544aedb0 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -15,7 +15,6 @@
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
-#include <sys/ioctl.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index dbf2c69944d2..4ebd2681e760 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -15,6 +15,8 @@
#include "thread_map.h"
#include "cpumap.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "event.h"
#include "thread.h"
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 7c8d2e422401..077c306c1cae 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -10,6 +10,7 @@
#include "../util/unwind.h"
#include "perf_regs.h"
#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include "callchain.h"
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 5f8501c68da4..ea7acf403727 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return -1;
}
- is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED);
+ is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
@@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
@@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1;
- if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "next_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "next_pid", 4, true))
@@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "comm", 16, true))
+ if (perf_evsel__test_field(evsel, "comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "pid", 4, true))
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index b889a28fd80b..469958cd7fe0 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -161,7 +162,7 @@ out:
void print_hists_in(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -170,7 +171,7 @@ void print_hists_in(struct hists *hists)
root = hists->entries_in;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -191,13 +192,13 @@ void print_hists_in(struct hists *hists)
void print_hists_out(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
root = &hists->entries;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 65fe02bebbee..7a2eed6c783e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -125,8 +126,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -136,12 +137,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -198,7 +199,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
print_hists_out(hists);
}
- root = &hists->entries;
+ root = &hists->entries.rb_root;
for (node = rb_first(root), i = 0;
node && (he = rb_entry(node, struct hist_entry, rb_node));
node = rb_next(node), i++) {
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 1c5bedab3c2c..975844807fe2 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 9a9d06cb0222..af633db63f4d 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -142,7 +142,7 @@ static int find_sample(struct sample *samples, size_t nr_samples,
static int __validate_match(struct hists *hists)
{
size_t count = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -153,7 +153,7 @@ static int __validate_match(struct hists *hists)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -192,7 +192,7 @@ static int __validate_link(struct hists *hists, int idx)
size_t count = 0;
size_t count_pair = 0;
size_t count_dummy = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -205,7 +205,7 @@ static int __validate_link(struct hists *hists, int idx)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index faacb4f41460..0a510c524a5d 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -91,8 +92,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -102,12 +103,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -126,7 +127,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = NULL;
@@ -162,7 +163,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
@@ -228,7 +229,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "overhead,cpu";
@@ -262,7 +263,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
@@ -284,7 +285,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "comm,overhead,dso";
@@ -316,7 +317,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
@@ -358,7 +359,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "dso,sym,comm,overhead,dso";
@@ -394,7 +395,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
@@ -460,7 +461,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "cpu,pid,comm,dso,sym";
@@ -497,7 +498,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 5ede9b561d32..ba87e6e8d18c 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -11,6 +11,7 @@
#include "tests.h"
#include "machine.h"
#include "thread_map.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "util.h"
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 3b97ac018d5a..4a69c07f4101 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1330,6 +1330,26 @@ static int test__checkevent_complex_name(struct perf_evlist *evlist)
return 0;
}
+static int test__sym_event_slash(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ return 0;
+}
+
+static int test__sym_event_dc(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ return 0;
+}
+
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1670,6 +1690,16 @@ static struct evlist_test test__events[] = {
.name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
.check = test__checkevent_complex_name,
.id = 53
+ },
+ {
+ .name = "cycles//u",
+ .check = test__sym_event_slash,
+ .id = 54,
+ },
+ {
+ .name = "cycles:k",
+ .check = test__sym_event_dc,
+ .id = 55,
}
};
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 7bedf8608fdd..14a78898d79e 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -4,7 +4,9 @@
#include "util.h"
#include "tests.h"
#include <errno.h>
+#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
/* Simulated format definitions. */
static struct test_format {
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 0e2d00d69e6e..236ce0d6c826 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <inttypes.h>
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "branch.h"
#include "util.h"
#include "event.h"
#include "evsel.h"
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index 5059452d27dd..8bfaa630389c 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <sys/epoll.h>
#include <util/evlist.h>
+#include <util/symbol.h>
#include <linux/filter.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh
index 6293cc660947..e37787be672b 100644
--- a/tools/perf/tests/shell/lib/probe.sh
+++ b/tools/perf/tests/shell/lib/probe.sh
@@ -4,3 +4,8 @@ skip_if_no_perf_probe() {
perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
return 0
}
+
+skip_if_no_perf_trace() {
+ perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2
+ return 0
+}
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 1c16e56cd93e..7cb99b433888 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,7 +13,8 @@ add_probe_vfs_getname() {
local verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
fi
}
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 50109f27ca07..147efeb6b195 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -12,6 +12,7 @@
. $(dirname $0)/lib/probe.sh
skip_if_no_perf_probe || exit 2
+skip_if_no_perf_trace || exit 2
. $(dirname $0)/lib/probe_vfs_getname.sh
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index b82f55fcc294..399f18ca71a3 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -119,4 +119,9 @@ int test__arch_unwind_sample(struct perf_sample *sample,
struct thread *thread);
#endif
#endif
+
+#if defined(__arm__)
+int test__vectors_page(struct test *test, int subtest);
+#endif
+
#endif /* TESTS_H */
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 637365099b7d..85f328ddf897 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -1,15 +1,15 @@
-libperf-y += clone.o
-libperf-y += fcntl.o
-libperf-y += flock.o
+perf-y += clone.o
+perf-y += fcntl.o
+perf-y += flock.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
-libperf-y += ioctl.o
+perf-y += ioctl.o
endif
-libperf-y += kcmp.o
-libperf-y += mount_flags.o
-libperf-y += pkey_alloc.o
-libperf-y += arch_prctl.o
-libperf-y += prctl.o
-libperf-y += renameat.o
-libperf-y += sockaddr.o
-libperf-y += socket.o
-libperf-y += statx.o
+perf-y += kcmp.o
+perf-y += mount_flags.o
+perf-y += pkey_alloc.o
+perf-y += arch_prctl.o
+perf-y += prctl.o
+perf-y += renameat.o
+perf-y += sockaddr.o
+perf-y += socket.o
+perf-y += statx.o
diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
index 620350d41209..52242fa4072b 100644
--- a/tools/perf/trace/beauty/ioctl.c
+++ b/tools/perf/trace/beauty/ioctl.c
@@ -175,7 +175,7 @@ static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, boo
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
- unsigned int fd = syscall_arg__val(arg, 0);
+ int fd = syscall_arg__val(arg, 0);
struct file *file = thread__files_entry(arg->thread, fd);
if (file != NULL) {
diff --git a/tools/perf/trace/beauty/mount_flags.sh b/tools/perf/trace/beauty/mount_flags.sh
index 45547573a1db..847850b2ef6c 100755
--- a/tools/perf/trace/beauty/mount_flags.sh
+++ b/tools/perf/trace/beauty/mount_flags.sh
@@ -5,11 +5,11 @@
printf "static const char *mount_flags[] = {\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
-egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \
+egrep $regex ${header_dir}/mount.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \
sed -r "s/$regex/\2 \2 \1/g" | sort -n | \
xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n"
regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*'
-egrep $regex ${header_dir}/fs.h | \
+egrep $regex ${header_dir}/mount.h | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[%s + 1] = \"%s\",\n"
printf "};\n"
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
index d32f8f1124af..3109d7b05e11 100755
--- a/tools/perf/trace/beauty/prctl_option.sh
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -4,7 +4,7 @@
[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *prctl_options[] = {\n"
-regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*'
+regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*'
egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
sed -r "s/$regex/\2 \1/g" | \
sort -n | xargs printf "\t[%s] = \"%s\",\n"
diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
index 6897fab40dcc..d4d10b33ba0e 100644
--- a/tools/perf/trace/beauty/waitid_options.c
+++ b/tools/perf/trace/beauty/waitid_options.c
@@ -11,7 +11,7 @@ static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
#define P_OPTION(n) \
if (options & W##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
options &= ~W##n; \
}
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
index 0a73538c0441..3aff83c3275f 100644
--- a/tools/perf/ui/Build
+++ b/tools/perf/ui/Build
@@ -1,14 +1,14 @@
-libperf-y += setup.o
-libperf-y += helpline.o
-libperf-y += progress.o
-libperf-y += util.o
-libperf-y += hist.o
-libperf-y += stdio/hist.o
+perf-y += setup.o
+perf-y += helpline.o
+perf-y += progress.o
+perf-y += util.o
+perf-y += hist.o
+perf-y += stdio/hist.o
CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
-libperf-$(CONFIG_SLANG) += browser.o
-libperf-$(CONFIG_SLANG) += browsers/
-libperf-$(CONFIG_SLANG) += tui/
+perf-$(CONFIG_SLANG) += browser.o
+perf-$(CONFIG_SLANG) += browsers/
+perf-$(CONFIG_SLANG) += tui/
CFLAGS_browser.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index de223f5bed58..8fee56b46502 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -1,8 +1,8 @@
-libperf-y += annotate.o
-libperf-y += hists.o
-libperf-y += map.o
-libperf-y += scripts.o
-libperf-y += header.o
+perf-y += annotate.o
+perf-y += hists.o
+perf-y += map.o
+perf-y += scripts.o
+perf-y += header.o
CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1d00e5ec7906..35bdfd8b1e71 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -7,6 +7,7 @@
#include "../../util/annotate.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
@@ -224,20 +225,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
return ret;
}
-static int disasm__cmp(struct annotation_line *a, struct annotation_line *b)
+static double disasm__cmp(struct annotation_line *a, struct annotation_line *b,
+ int percent_type)
{
int i;
for (i = 0; i < a->data_nr; i++) {
- if (a->data[i].percent == b->data[i].percent)
+ if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type])
continue;
- return a->data[i].percent < b->data[i].percent;
+ return a->data[i].percent[percent_type] -
+ b->data[i].percent[percent_type];
}
return 0;
}
-static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al)
+static void disasm_rb_tree__insert(struct annotate_browser *browser,
+ struct annotation_line *al)
{
+ struct rb_root *root = &browser->entries;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct annotation_line *l;
@@ -246,7 +251,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line
parent = *p;
l = rb_entry(parent, struct annotation_line, rb_node);
- if (disasm__cmp(al, l))
+ if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -329,7 +334,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- disasm_rb_tree__insert(&browser->entries, &pos->al);
+ disasm_rb_tree__insert(browser, &pos->al);
}
pthread_mutex_unlock(&notes->lock);
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
index d75492189acb..5aeb663dd184 100644
--- a/tools/perf/ui/browsers/header.c
+++ b/tools/perf/ui/browsers/header.c
@@ -35,7 +35,7 @@ static int list_menu__run(struct ui_browser *menu)
{
int key;
unsigned long offset;
- const char help[] =
+ static const char help[] =
"h/?/F1 Show this window\n"
"UP/DOWN/PGUP\n"
"PGDN/SPACE\n"
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index ffac1d54a3d4..aef800d97ea1 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -8,9 +8,12 @@
#include <linux/rbtree.h>
#include <sys/ttydefaults.h>
+#include "../../util/callchain.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/symbol.h"
#include "../../util/pstack.h"
#include "../../util/sort.h"
#include "../../util/util.h"
@@ -49,7 +52,7 @@ static int hist_browser__get_folding(struct hist_browser *browser)
struct hists *hists = browser->hists;
int unfolded_rows = 0;
- for (nd = rb_first(&hists->entries);
+ for (nd = rb_first_cached(&hists->entries);
(nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
nd = rb_hierarchy_next(nd)) {
struct hist_entry *he =
@@ -267,7 +270,7 @@ static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he,
if (he->has_no_entry)
return 1;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
while (node) {
float percent;
@@ -372,7 +375,7 @@ static void hist_entry__init_have_children(struct hist_entry *he)
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
} else {
- he->has_children = !RB_EMPTY_ROOT(&he->hroot_out);
+ he->has_children = !RB_EMPTY_ROOT(&he->hroot_out.rb_root);
}
he->init_have_children = true;
@@ -508,7 +511,7 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
struct hist_entry *child;
int n = 0;
- for (nd = rb_first(&he->hroot_out); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) {
child = rb_entry(nd, struct hist_entry, rb_node);
percent = hist_entry__get_percent_limit(child);
if (!child->filtered && percent >= hb->min_pcnt)
@@ -566,7 +569,7 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
struct rb_node *nd;
struct hist_entry *he;
- nd = rb_first(&browser->hists->entries);
+ nd = rb_first_cached(&browser->hists->entries);
while (nd) {
he = rb_entry(nd, struct hist_entry, rb_node);
@@ -1738,7 +1741,7 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
- browser->top = rb_first(&hb->hists->entries);
+ browser->top = rb_first_cached(&hb->hists->entries);
}
}
@@ -2649,7 +2652,7 @@ add_socket_opt(struct hist_browser *browser, struct popup_action *act,
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
@@ -2669,7 +2672,7 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
double percent)
{
struct hist_entry *he;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
u64 total = hists__total_period(hb->hists);
u64 min_callchain_hits = total * (percent / 100);
@@ -2748,7 +2751,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"S Zoom into current Processor Socket\n" \
/* help messages are sorted by lexical order of the hotkey */
- const char report_help[] = HIST_BROWSER_HELP_COMMON
+ static const char report_help[] = HIST_BROWSER_HELP_COMMON
"i Show header information\n"
"P Print histograms to perf.hist.N\n"
"r Run available scripts\n"
@@ -2756,7 +2759,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name";
- const char top_help[] = HIST_BROWSER_HELP_COMMON
+ static const char top_help[] = HIST_BROWSER_HELP_COMMON
"P Print histograms to perf.hist.N\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index 5b8b8c637686..c70d9337405b 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include "../../util/util.h"
#include "../../util/debug.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../helpline.h"
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 48428c9acd89..df49c9ba1785 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
+#include "util/sort.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "ui/helpline.h"
#include <inttypes.h>
#include <signal.h>
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 4ab663ec3e5e..0c08890f006a 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "../evlist.h"
#include "../cache.h"
+#include "../callchain.h"
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
@@ -353,7 +354,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
g_object_unref(GTK_TREE_MODEL(store));
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
u64 total = hists__total_period(h->hists);
@@ -401,7 +402,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
}
static void perf_gtk__add_hierarchy_entries(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
GtkTreeStore *store,
GtkTreeIter *parent,
struct perf_hpp *hpp,
@@ -415,7 +416,7 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
u64 total = hists__total_period(hists);
int size;
- for (node = rb_first(root); node; node = rb_next(node)) {
+ for (node = rb_first_cached(root); node; node = rb_next(node)) {
GtkTreeIter iter;
float percent;
char *bf;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index fe3dfaa64a91..412d6f1626e3 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -3,6 +3,7 @@
#include <math.h>
#include <linux/compiler.h>
+#include "../util/callchain.h"
#include "../util/hist.h"
#include "../util/util.h"
#include "../util/sort.h"
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 74c4ae1f0a05..a60f2993d390 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -2,8 +2,12 @@
#include <stdio.h>
#include <linux/string.h>
+#include "../../util/callchain.h"
#include "../../util/util.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/map_groups.h"
+#include "../../util/symbol.h"
#include "../../util/sort.h"
#include "../../util/evsel.h"
#include "../../util/srcline.h"
@@ -788,7 +792,8 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
indent = hists__overhead_width(hists) + 4;
- for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
+ for (nd = rb_first_cached(&hists->entries); nd;
+ nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
diff --git a/tools/perf/ui/tui/Build b/tools/perf/ui/tui/Build
index 9e4c6ca41a9f..f916df33a1a7 100644
--- a/tools/perf/ui/tui/Build
+++ b/tools/perf/ui/tui/Build
@@ -1,4 +1,4 @@
-libperf-y += setup.o
-libperf-y += util.o
-libperf-y += helpline.o
-libperf-y += progress.o
+perf-y += setup.o
+perf-y += util.o
+perf-y += helpline.o
+perf-y += progress.o
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index af72be7f5b3b..8dd3102301ea 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,158 +1,164 @@
-libperf-y += annotate.o
-libperf-y += block-range.o
-libperf-y += build-id.o
-libperf-y += config.o
-libperf-y += ctype.o
-libperf-y += db-export.o
-libperf-y += env.o
-libperf-y += event.o
-libperf-y += evlist.o
-libperf-y += evsel.o
-libperf-y += evsel_fprintf.o
-libperf-y += find_bit.o
-libperf-y += get_current_dir_name.o
-libperf-y += kallsyms.o
-libperf-y += levenshtein.o
-libperf-y += llvm-utils.o
-libperf-y += mmap.o
-libperf-y += memswap.o
-libperf-y += parse-events.o
-libperf-y += perf_regs.o
-libperf-y += path.o
-libperf-y += print_binary.o
-libperf-y += rbtree.o
-libperf-y += libstring.o
-libperf-y += bitmap.o
-libperf-y += hweight.o
-libperf-y += smt.o
-libperf-y += strbuf.o
-libperf-y += string.o
-libperf-y += strlist.o
-libperf-y += strfilter.o
-libperf-y += top.o
-libperf-y += usage.o
-libperf-y += dso.o
-libperf-y += symbol.o
-libperf-y += symbol_fprintf.o
-libperf-y += color.o
-libperf-y += metricgroup.o
-libperf-y += header.o
-libperf-y += callchain.o
-libperf-y += values.o
-libperf-y += debug.o
-libperf-y += machine.o
-libperf-y += map.o
-libperf-y += pstack.o
-libperf-y += session.o
-libperf-$(CONFIG_TRACE) += syscalltbl.o
-libperf-y += ordered-events.o
-libperf-y += namespaces.o
-libperf-y += comm.o
-libperf-y += thread.o
-libperf-y += thread_map.o
-libperf-y += trace-event-parse.o
-libperf-y += parse-events-flex.o
-libperf-y += parse-events-bison.o
-libperf-y += pmu.o
-libperf-y += pmu-flex.o
-libperf-y += pmu-bison.o
-libperf-y += trace-event-read.o
-libperf-y += trace-event-info.o
-libperf-y += trace-event-scripting.o
-libperf-y += trace-event.o
-libperf-y += svghelper.o
-libperf-y += sort.o
-libperf-y += hist.o
-libperf-y += util.o
-libperf-y += xyarray.o
-libperf-y += cpumap.o
-libperf-y += cgroup.o
-libperf-y += target.o
-libperf-y += rblist.o
-libperf-y += intlist.o
-libperf-y += vdso.o
-libperf-y += counts.o
-libperf-y += stat.o
-libperf-y += stat-shadow.o
-libperf-y += stat-display.o
-libperf-y += record.o
-libperf-y += srcline.o
-libperf-y += srccode.o
-libperf-y += data.o
-libperf-y += tsc.o
-libperf-y += cloexec.o
-libperf-y += call-path.o
-libperf-y += rwsem.o
-libperf-y += thread-stack.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
-libperf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
+perf-y += annotate.o
+perf-y += block-range.o
+perf-y += build-id.o
+perf-y += config.o
+perf-y += ctype.o
+perf-y += db-export.o
+perf-y += env.o
+perf-y += event.o
+perf-y += evlist.o
+perf-y += evsel.o
+perf-y += evsel_fprintf.o
+perf-y += find_bit.o
+perf-y += get_current_dir_name.o
+perf-y += kallsyms.o
+perf-y += levenshtein.o
+perf-y += llvm-utils.o
+perf-y += mmap.o
+perf-y += memswap.o
+perf-y += parse-events.o
+perf-y += perf_regs.o
+perf-y += path.o
+perf-y += print_binary.o
+perf-y += rbtree.o
+perf-y += libstring.o
+perf-y += bitmap.o
+perf-y += hweight.o
+perf-y += smt.o
+perf-y += strbuf.o
+perf-y += string.o
+perf-y += strlist.o
+perf-y += strfilter.o
+perf-y += top.o
+perf-y += usage.o
+perf-y += dso.o
+perf-y += symbol.o
+perf-y += symbol_fprintf.o
+perf-y += color.o
+perf-y += color_config.o
+perf-y += metricgroup.o
+perf-y += header.o
+perf-y += callchain.o
+perf-y += values.o
+perf-y += debug.o
+perf-y += machine.o
+perf-y += map.o
+perf-y += pstack.o
+perf-y += session.o
+perf-y += sample-raw.o
+perf-y += s390-sample-raw.o
+perf-$(CONFIG_TRACE) += syscalltbl.o
+perf-y += ordered-events.o
+perf-y += namespaces.o
+perf-y += comm.o
+perf-y += thread.o
+perf-y += thread_map.o
+perf-y += trace-event-parse.o
+perf-y += parse-events-flex.o
+perf-y += parse-events-bison.o
+perf-y += pmu.o
+perf-y += pmu-flex.o
+perf-y += pmu-bison.o
+perf-y += trace-event-read.o
+perf-y += trace-event-info.o
+perf-y += trace-event-scripting.o
+perf-y += trace-event.o
+perf-y += svghelper.o
+perf-y += sort.o
+perf-y += hist.o
+perf-y += util.o
+perf-y += xyarray.o
+perf-y += cpumap.o
+perf-y += cputopo.o
+perf-y += cgroup.o
+perf-y += target.o
+perf-y += rblist.o
+perf-y += intlist.o
+perf-y += vdso.o
+perf-y += counts.o
+perf-y += stat.o
+perf-y += stat-shadow.o
+perf-y += stat-display.o
+perf-y += record.o
+perf-y += srcline.o
+perf-y += srccode.o
+perf-y += data.o
+perf-y += tsc.o
+perf-y += cloexec.o
+perf-y += call-path.o
+perf-y += rwsem.o
+perf-y += thread-stack.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += arm-spe.o
+perf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+perf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
ifdef CONFIG_LIBOPENCSD
-libperf-$(CONFIG_AUXTRACE) += cs-etm.o
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+perf-$(CONFIG_AUXTRACE) += cs-etm.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
endif
-libperf-y += parse-branch-options.o
-libperf-y += dump-insn.o
-libperf-y += parse-regs-options.o
-libperf-y += term.o
-libperf-y += help-unknown-cmd.o
-libperf-y += mem-events.o
-libperf-y += vsprintf.o
-libperf-y += drv_configs.o
-libperf-y += units.o
-libperf-y += time-utils.o
-libperf-y += expr-bison.o
-libperf-y += branch.o
-libperf-y += mem2node.o
-
-libperf-$(CONFIG_LIBBPF) += bpf-loader.o
-libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
-libperf-$(CONFIG_LIBELF) += symbol-elf.o
-libperf-$(CONFIG_LIBELF) += probe-file.o
-libperf-$(CONFIG_LIBELF) += probe-event.o
+perf-y += parse-branch-options.o
+perf-y += dump-insn.o
+perf-y += parse-regs-options.o
+perf-y += term.o
+perf-y += help-unknown-cmd.o
+perf-y += mem-events.o
+perf-y += vsprintf.o
+perf-y += units.o
+perf-y += time-utils.o
+perf-y += expr-bison.o
+perf-y += branch.o
+perf-y += mem2node.o
+
+perf-$(CONFIG_LIBBPF) += bpf-loader.o
+perf-$(CONFIG_LIBBPF) += bpf_map.o
+perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
+perf-$(CONFIG_LIBELF) += symbol-elf.o
+perf-$(CONFIG_LIBELF) += probe-file.o
+perf-$(CONFIG_LIBELF) += probe-event.o
ifndef CONFIG_LIBELF
-libperf-y += symbol-minimal.o
+perf-y += symbol-minimal.o
endif
ifndef CONFIG_SETNS
-libperf-y += setns.o
+perf-y += setns.o
endif
-libperf-$(CONFIG_DWARF) += probe-finder.o
-libperf-$(CONFIG_DWARF) += dwarf-aux.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += probe-finder.o
+perf-$(CONFIG_DWARF) += dwarf-aux.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
-libperf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
+perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
-libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
-libperf-y += scripting-engines/
+perf-y += scripting-engines/
-libperf-$(CONFIG_ZLIB) += zlib.o
-libperf-$(CONFIG_LZMA) += lzma.o
-libperf-y += demangle-java.o
-libperf-y += demangle-rust.o
+perf-$(CONFIG_ZLIB) += zlib.o
+perf-$(CONFIG_LZMA) += lzma.o
+perf-y += demangle-java.o
+perf-y += demangle-rust.o
ifdef CONFIG_JITDUMP
-libperf-$(CONFIG_LIBELF) += jitdump.o
-libperf-$(CONFIG_LIBELF) += genelf.o
-libperf-$(CONFIG_DWARF) += genelf_debug.o
+perf-$(CONFIG_LIBELF) += jitdump.o
+perf-$(CONFIG_LIBELF) += genelf.o
+perf-$(CONFIG_DWARF) += genelf_debug.o
endif
-libperf-y += perf-hooks.o
+perf-y += perf-hooks.o
-libperf-$(CONFIG_CXX) += c++/
+perf-$(CONFIG_LIBBPF) += bpf-event.o
+
+perf-$(CONFIG_CXX) += c++/
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ac9805e0bc76..11a8a447a3af 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -9,6 +9,7 @@
#include <errno.h>
#include <inttypes.h>
+#include <libgen.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
@@ -16,6 +17,7 @@
#include "color.h"
#include "config.h"
#include "cache.h"
+#include "map.h"
#include "symbol.h"
#include "units.h"
#include "debug.h"
@@ -1723,15 +1725,14 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
+ " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand",
opts->objdump_path ?: "objdump",
opts->disassembler_style ? "-M " : "",
opts->disassembler_style ?: "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
opts->show_asm_raw ? "" : "--no-show-raw",
- opts->annotate_src ? "-S" : "",
- symfs_filename, symfs_filename);
+ opts->annotate_src ? "-S" : "");
if (err < 0) {
pr_err("Failure allocating memory for the command to run\n");
@@ -1756,7 +1757,8 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
close(stdout_fd[0]);
dup2(stdout_fd[1], 1);
close(stdout_fd[1]);
- execl("/bin/sh", "sh", "-c", command, NULL);
+ execl("/bin/sh", "sh", "-c", command, "--", symfs_filename,
+ NULL);
perror(command);
exit(-1);
}
@@ -1889,6 +1891,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
struct annotation_options *options,
struct arch **parch)
{
+ struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
.privsize = privsize,
.evsel = evsel,
@@ -1919,6 +1922,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
args.ms.map = map;
args.ms.sym = sym;
+ notes->start = map__rip_2objdump(map, sym->start);
return symbol__disassemble(sym, &args);
}
@@ -2794,8 +2798,6 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
symbol__calc_percent(sym, evsel);
- notes->start = map__rip_2objdump(map, sym->start);
-
annotation__set_offsets(notes, size);
annotation__mark_jump_targets(notes, sym);
annotation__compute_ipc(notes, size);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index fb6463730ba4..95053cab41fe 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -4,16 +4,24 @@
#include <stdbool.h>
#include <stdint.h>
+#include <stdio.h>
#include <linux/types.h>
-#include "symbol.h"
-#include "hist.h"
-#include "sort.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <asm/bug.h>
+#include "symbol_conf.h"
+struct hist_browser_timer;
+struct hist_entry;
struct ins_ops;
+struct map;
+struct map_symbol;
+struct addr_map_symbol;
+struct option;
+struct perf_sample;
+struct perf_evsel;
+struct symbol;
struct ins {
const char *name;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index f69961c4a4f3..267e54df511b 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -27,6 +27,7 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <linux/time64.h>
#include <sys/param.h>
#include <stdlib.h>
@@ -41,6 +42,7 @@
#include "pmu.h"
#include "evsel.h"
#include "cpumap.h"
+#include "symbol.h"
#include "thread_map.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -857,7 +859,7 @@ void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg)
+ const char *msg, u64 timestamp)
{
size_t size;
@@ -869,7 +871,9 @@ void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
auxtrace_error->cpu = cpu;
auxtrace_error->pid = pid;
auxtrace_error->tid = tid;
+ auxtrace_error->fmt = 1;
auxtrace_error->ip = ip;
+ auxtrace_error->time = timestamp;
strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
@@ -1159,12 +1163,27 @@ static const char *auxtrace_error_name(int type)
size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
{
struct auxtrace_error_event *e = &event->auxtrace_error;
+ unsigned long long nsecs = e->time;
+ const char *msg = e->msg;
int ret;
ret = fprintf(fp, " %s error type %u",
auxtrace_error_name(e->type), e->type);
+
+ if (e->fmt && nsecs) {
+ unsigned long secs = nsecs / NSEC_PER_SEC;
+
+ nsecs -= secs * NSEC_PER_SEC;
+ ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
+ } else {
+ ret += fprintf(fp, " time 0");
+ }
+
+ if (!e->fmt)
+ msg = (const char *)&e->time;
+
ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
- e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
+ e->cpu, e->pid, e->tid, e->ip, e->code, msg);
return ret;
}
@@ -1278,9 +1297,9 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
}
/* padding must be written by fn() e.g. record__process_auxtrace() */
- padding = size & 7;
+ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
if (padding)
- padding = 8 - padding;
+ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
memset(&ev, 0, sizeof(ev));
ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 8e50f96d4b23..c69bcd9a3091 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -40,6 +40,9 @@ struct record_opts;
struct auxtrace_info_event;
struct events_stats;
+/* Auxtrace records must have the same alignment as perf event records */
+#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
+
enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN,
PERF_AUXTRACE_INTEL_PT,
@@ -516,7 +519,7 @@ void auxtrace_index__free(struct list_head *head);
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg);
+ const char *msg, u64 timestamp);
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
diff --git a/tools/perf/util/block-range.c b/tools/perf/util/block-range.c
index f1451c987eec..1be432657501 100644
--- a/tools/perf/util/block-range.c
+++ b/tools/perf/util/block-range.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "block-range.h"
#include "annotate.h"
+#include <assert.h>
+#include <stdlib.h>
struct {
struct rb_root root;
diff --git a/tools/perf/util/block-range.h b/tools/perf/util/block-range.h
index a5ba719d69fb..ec0fb534bf56 100644
--- a/tools/perf/util/block-range.h
+++ b/tools/perf/util/block-range.h
@@ -2,7 +2,11 @@
#ifndef __PERF_BLOCK_RANGE_H
#define __PERF_BLOCK_RANGE_H
-#include "symbol.h"
+#include <stdbool.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct symbol;
/*
* struct block_range - non-overlapping parts of basic blocks
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
new file mode 100644
index 000000000000..028c8ec1f62a
--- /dev/null
+++ b/tools/perf/util/bpf-event.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <stdlib.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <linux/btf.h>
+#include "bpf-event.h"
+#include "debug.h"
+#include "symbol.h"
+#include "machine.h"
+
+#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
+
+static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
+{
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
+ return ret;
+}
+
+int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_bpf_event(event, stdout);
+ return 0;
+}
+
+/*
+ * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
+ * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
+ * one PERF_RECORD_KSYMBOL is generated for each sub program.
+ *
+ * Returns:
+ * 0 for success;
+ * -1 for failures;
+ * -2 for lack of kernel support.
+ */
+static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ int fd,
+ union perf_event *event,
+ struct record_opts *opts)
+{
+ struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
+ struct bpf_event *bpf_event = &event->bpf_event;
+ u32 sub_prog_cnt, i, func_info_rec_size = 0;
+ u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
+ struct bpf_prog_info info = { .type = 0, };
+ u32 info_len = sizeof(info);
+ void *func_infos = NULL;
+ u64 *prog_addrs = NULL;
+ struct btf *btf = NULL;
+ u32 *prog_lens = NULL;
+ bool has_btf = false;
+ char errbuf[512];
+ int err = 0;
+
+ /* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+
+ if (err) {
+ pr_debug("%s: failed to get BPF program info: %s, aborting\n",
+ __func__, str_error_r(errno, errbuf, sizeof(errbuf)));
+ return -1;
+ }
+ if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+ pr_debug("%s: the kernel is too old, aborting\n", __func__);
+ return -2;
+ }
+
+ /* number of ksyms, func_lengths, and tags should match */
+ sub_prog_cnt = info.nr_jited_ksyms;
+ if (sub_prog_cnt != info.nr_prog_tags ||
+ sub_prog_cnt != info.nr_jited_func_lens)
+ return -1;
+
+ /* check BTF func info support */
+ if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
+ /* btf func info number should be same as sub_prog_cnt */
+ if (sub_prog_cnt != info.nr_func_info) {
+ pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
+ return -1;
+ }
+ if (btf__get_from_id(info.btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
+ return -1;
+ }
+ func_info_rec_size = info.func_info_rec_size;
+ func_infos = calloc(sub_prog_cnt, func_info_rec_size);
+ if (!func_infos) {
+ pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
+ return -1;
+ }
+ has_btf = true;
+ }
+
+ /*
+ * We need address, length, and tag for each sub program.
+ * Allocate memory and call bpf_obj_get_info_by_fd() again
+ */
+ prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
+ if (!prog_addrs) {
+ pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
+ goto out;
+ }
+ prog_lens = calloc(sub_prog_cnt, sizeof(u32));
+ if (!prog_lens) {
+ pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
+ goto out;
+ }
+ prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
+ if (!prog_tags) {
+ pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
+ goto out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.nr_jited_ksyms = sub_prog_cnt;
+ info.nr_jited_func_lens = sub_prog_cnt;
+ info.nr_prog_tags = sub_prog_cnt;
+ info.jited_ksyms = ptr_to_u64(prog_addrs);
+ info.jited_func_lens = ptr_to_u64(prog_lens);
+ info.prog_tags = ptr_to_u64(prog_tags);
+ info_len = sizeof(info);
+ if (has_btf) {
+ info.nr_func_info = sub_prog_cnt;
+ info.func_info_rec_size = func_info_rec_size;
+ info.func_info = ptr_to_u64(func_infos);
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (err) {
+ pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
+ goto out;
+ }
+
+ /* Synthesize PERF_RECORD_KSYMBOL */
+ for (i = 0; i < sub_prog_cnt; i++) {
+ const struct bpf_func_info *finfo;
+ const char *short_name = NULL;
+ const struct btf_type *t;
+ int name_len;
+
+ *ksymbol_event = (struct ksymbol_event){
+ .header = {
+ .type = PERF_RECORD_KSYMBOL,
+ .size = offsetof(struct ksymbol_event, name),
+ },
+ .addr = prog_addrs[i],
+ .len = prog_lens[i],
+ .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
+ .flags = 0,
+ };
+ name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
+ "bpf_prog_");
+ name_len += snprintf_hex(ksymbol_event->name + name_len,
+ KSYM_NAME_LEN - name_len,
+ prog_tags[i], BPF_TAG_SIZE);
+ if (has_btf) {
+ finfo = func_infos + i * info.func_info_rec_size;
+ t = btf__type_by_id(btf, finfo->type_id);
+ short_name = btf__name_by_offset(btf, t->name_off);
+ } else if (i == 0 && sub_prog_cnt == 1) {
+ /* no subprog */
+ if (info.name[0])
+ short_name = info.name;
+ } else
+ short_name = "F";
+ if (short_name)
+ name_len += snprintf(ksymbol_event->name + name_len,
+ KSYM_NAME_LEN - name_len,
+ "_%s", short_name);
+
+ ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
+ sizeof(u64));
+
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+ /* Synthesize PERF_RECORD_BPF_EVENT */
+ if (opts->bpf_event) {
+ *bpf_event = (struct bpf_event){
+ .header = {
+ .type = PERF_RECORD_BPF_EVENT,
+ .size = sizeof(struct bpf_event),
+ },
+ .type = PERF_BPF_EVENT_PROG_LOAD,
+ .flags = 0,
+ .id = info.id,
+ };
+ memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+out:
+ free(prog_tags);
+ free(prog_lens);
+ free(prog_addrs);
+ free(func_infos);
+ free(btf);
+ return err ? -1 : 0;
+}
+
+int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts)
+{
+ union perf_event *event;
+ __u32 id = 0;
+ int err;
+ int fd;
+
+ event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
+ if (!event)
+ return -1;
+ while (true) {
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ pr_debug("%s: can't get next program: %s%s\n",
+ __func__, strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ /* don't report error on old kernel or EPERM */
+ err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
+ break;
+ }
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0) {
+ pr_debug("%s: failed to get fd for prog_id %u\n",
+ __func__, id);
+ continue;
+ }
+
+ err = perf_event__synthesize_one_bpf_prog(tool, process,
+ machine, fd,
+ event, opts);
+ close(fd);
+ if (err) {
+ /* do not return error for old kernel */
+ if (err == -2)
+ err = 0;
+ break;
+ }
+ }
+ free(event);
+ return err;
+}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
new file mode 100644
index 000000000000..7890067e1a37
--- /dev/null
+++ b/tools/perf/util/bpf-event.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_BPF_EVENT_H
+#define __PERF_BPF_EVENT_H
+
+#include <linux/compiler.h>
+#include "event.h"
+
+struct machine;
+union perf_event;
+struct perf_sample;
+struct perf_tool;
+struct record_opts;
+
+#ifdef HAVE_LIBBPF_SUPPORT
+int machine__process_bpf_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+
+int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts);
+#else
+static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused)
+{
+ return 0;
+}
+
+static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+#endif
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2f3eb6d293ee..251d9ea6252f 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -15,6 +15,7 @@
#include <errno.h>
#include "perf.h"
#include "debug.h"
+#include "evlist.h"
#include "bpf-loader.h"
#include "bpf-prologue.h"
#include "probe-event.h"
@@ -24,22 +25,12 @@
#include "llvm-utils.h"
#include "c++/clang-c.h"
-#define DEFINE_PRINT_FN(name, level) \
-static int libbpf_##name(const char *fmt, ...) \
-{ \
- va_list args; \
- int ret; \
- \
- va_start(args, fmt); \
- ret = veprintf(level, verbose, pr_fmt(fmt), args);\
- va_end(args); \
- return ret; \
+static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
+ const char *fmt, va_list args)
+{
+ return veprintf(1, verbose, pr_fmt(fmt), args);
}
-DEFINE_PRINT_FN(warning, 1)
-DEFINE_PRINT_FN(info, 1)
-DEFINE_PRINT_FN(debug, 1)
-
struct bpf_prog_priv {
bool is_tp;
char *sys_name;
@@ -59,9 +50,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -79,9 +68,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -1503,7 +1490,7 @@ apply_obj_config_object(struct bpf_object *obj)
struct bpf_map *map;
int err;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
err = apply_obj_config_map(map);
if (err)
return err;
@@ -1527,7 +1514,7 @@ int bpf__apply_obj_config(void)
#define bpf__for_each_map(pos, obj, objtmp) \
bpf_object__for_each_safe(obj, objtmp) \
- bpf_map__for_each(pos, obj)
+ bpf_object__for_each_map(pos, obj)
#define bpf__for_each_map_named(pos, obj, objtmp, name) \
bpf__for_each_map(pos, obj, objtmp) \
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 62d245a90e1d..3f46856e3330 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -8,11 +8,7 @@
#include <linux/compiler.h>
#include <linux/err.h>
-#include <string.h>
#include <bpf/libbpf.h>
-#include "probe-event.h"
-#include "evlist.h"
-#include "debug.h"
enum bpf_loader_errno {
__BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100,
@@ -44,6 +40,7 @@ enum bpf_loader_errno {
};
struct perf_evsel;
+struct perf_evlist;
struct bpf_object;
struct parse_events_term;
#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
@@ -87,6 +84,8 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
int bpf__strerror_setup_output_event(struct perf_evlist *evlist, int err, char *buf, size_t size);
#else
#include <errno.h>
+#include <string.h>
+#include "debug.h"
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused,
diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c
new file mode 100644
index 000000000000..eb853ca67cf4
--- /dev/null
+++ b/tools/perf/util/bpf_map.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "util/bpf_map.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def)
+{
+ return def->type == BPF_MAP_TYPE_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+ def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
+}
+
+static void *bpf_map_def__alloc_value(const struct bpf_map_def *def)
+{
+ if (bpf_map_def__is_per_cpu(def))
+ return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF));
+
+ return malloc(def->value_size);
+}
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
+{
+ const struct bpf_map_def *def = bpf_map__def(map);
+ void *prev_key = NULL, *key, *value;
+ int fd = bpf_map__fd(map), err;
+ int printed = 0;
+
+ if (fd < 0)
+ return fd;
+
+ if (IS_ERR(def))
+ return PTR_ERR(def);
+
+ err = -ENOMEM;
+ key = malloc(def->key_size);
+ if (key == NULL)
+ goto out;
+
+ value = bpf_map_def__alloc_value(def);
+ if (value == NULL)
+ goto out_free_key;
+
+ while ((err = bpf_map_get_next_key(fd, prev_key, key) == 0)) {
+ int intkey = *(int *)key;
+
+ if (!bpf_map_lookup_elem(fd, key, value)) {
+ bool boolval = *(bool *)value;
+ if (boolval)
+ printed += fprintf(fp, "[%d] = %d,\n", intkey, boolval);
+ } else {
+ printed += fprintf(fp, "[%d] = ERROR,\n", intkey);
+ }
+
+ prev_key = key;
+ }
+
+ if (err == ENOENT)
+ err = printed;
+
+ free(value);
+out_free_key:
+ free(key);
+out:
+ return err;
+}
diff --git a/tools/perf/util/bpf_map.h b/tools/perf/util/bpf_map.h
new file mode 100644
index 000000000000..d6abd5e47af8
--- /dev/null
+++ b/tools/perf/util/bpf_map.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#ifndef __PERF_BPF_MAP_H
+#define __PERF_BPF_MAP_H 1
+
+#include <stdio.h>
+#include <linux/compiler.h>
+struct bpf_map;
+
+#ifdef HAVE_LIBBPF_SUPPORT
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp);
+
+#else
+
+static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused)
+{
+ return 0;
+}
+
+#endif // HAVE_LIBBPF_SUPPORT
+
+#endif // __PERF_BPF_MAP_H
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 1e3c7c5cdc63..64f96b79f1d7 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -1,8 +1,31 @@
#ifndef _PERF_BRANCH_H
#define _PERF_BRANCH_H 1
+#include <stdio.h>
#include <stdint.h>
-#include "../perf.h"
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+struct branch_flags {
+ u64 mispred:1;
+ u64 predicted:1;
+ u64 in_tx:1;
+ u64 abort:1;
+ u64 cycles:16;
+ u64 type:4;
+ u64 reserved:40;
+};
+
+struct branch_entry {
+ u64 from;
+ u64 to;
+ struct branch_flags flags;
+};
+
+struct branch_stack {
+ u64 nr;
+ struct branch_entry entries[0];
+};
struct branch_type_stat {
bool branch_to;
@@ -13,8 +36,6 @@ struct branch_type_stat {
u64 cross_2m;
};
-struct branch_flags;
-
void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
u64 from, u64 to);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 04b1d53e4bf9..bff0d17920ed 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -15,6 +15,8 @@
#include <sys/types.h>
#include "build-id.h"
#include "event.h"
+#include "namespaces.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include <linux/kernel.h>
@@ -363,7 +365,8 @@ int perf_session__write_buildid_table(struct perf_session *session,
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
@@ -396,7 +399,8 @@ int dsos__hit_all(struct perf_session *session)
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
@@ -849,7 +853,8 @@ int perf_session__cache_build_ids(struct perf_session *session)
ret = machine__cache_build_ids(&session->machines.host);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos);
}
@@ -866,7 +871,8 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index f0c565164a97..93668f38f1ed 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -6,9 +6,10 @@
#define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1)
#include "tool.h"
-#include "namespaces.h"
#include <linux/types.h>
+struct nsinfo;
+
extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
struct feat_fd;
diff --git a/tools/perf/util/c++/Build b/tools/perf/util/c++/Build
index 988fef1b11d7..613ecfd76527 100644
--- a/tools/perf/util/c++/Build
+++ b/tools/perf/util/c++/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_CLANGLLVM) += clang.o
-libperf-$(CONFIG_CLANGLLVM) += clang-test.o
+perf-$(CONFIG_CLANGLLVM) += clang.o
+perf-$(CONFIG_CLANGLLVM) += clang-test.o
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index 89512504551b..39c0004f2886 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -160,7 +160,7 @@ getBPFObjectFromModule(llvm::Module *Module)
}
PM.run(*Module);
- return std::move(Buffer);
+ return Buffer;
}
}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 32ef7bdca1cf..abb608b09269 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -23,8 +23,10 @@
#include "util.h"
#include "sort.h"
#include "machine.h"
+#include "map.h"
#include "callchain.h"
#include "branch.h"
+#include "symbol.h"
#define CALLCHAIN_PARAM_DEFAULT \
.mode = CHAIN_GRAPH_ABS, \
@@ -766,6 +768,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
cnode->cycles_count += node->branch_flags.cycles;
cnode->iter_count += node->nr_loop_iter;
cnode->iter_cycles += node->iter_cycles;
+ cnode->from_count++;
}
}
@@ -1345,10 +1348,10 @@ static int branch_to_str(char *bf, int bfsize,
static int branch_from_str(char *bf, int bfsize,
u64 branch_count,
u64 cycles_count, u64 iter_count,
- u64 iter_cycles)
+ u64 iter_cycles, u64 from_count)
{
int printed = 0, i = 0;
- u64 cycles;
+ u64 cycles, v = 0;
cycles = cycles_count / branch_count;
if (cycles) {
@@ -1357,14 +1360,16 @@ static int branch_from_str(char *bf, int bfsize,
bf + printed, bfsize - printed);
}
- if (iter_count) {
- printed += count_pri64_printf(i++, "iter",
- iter_count,
- bf + printed, bfsize - printed);
+ if (iter_count && from_count) {
+ v = iter_count / from_count;
+ if (v) {
+ printed += count_pri64_printf(i++, "iter",
+ v, bf + printed, bfsize - printed);
- printed += count_pri64_printf(i++, "avg_cycles",
- iter_cycles / iter_count,
- bf + printed, bfsize - printed);
+ printed += count_pri64_printf(i++, "avg_cycles",
+ iter_cycles / iter_count,
+ bf + printed, bfsize - printed);
+ }
}
if (i)
@@ -1377,6 +1382,7 @@ static int counts_str_build(char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
+ u64 from_count,
struct branch_type_stat *brtype_stat)
{
int printed;
@@ -1389,7 +1395,8 @@ static int counts_str_build(char *bf, int bfsize,
predicted_count, abort_count, brtype_stat);
} else {
printed = branch_from_str(bf, bfsize, branch_count,
- cycles_count, iter_count, iter_cycles);
+ cycles_count, iter_count, iter_cycles,
+ from_count);
}
if (!printed)
@@ -1402,13 +1409,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
+ u64 from_count,
struct branch_type_stat *brtype_stat)
{
char str[256];
counts_str_build(str, sizeof(str), branch_count,
predicted_count, abort_count, cycles_count,
- iter_count, iter_cycles, brtype_stat);
+ iter_count, iter_cycles, from_count, brtype_stat);
if (fp)
return fprintf(fp, "%s", str);
@@ -1422,6 +1430,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
u64 branch_count, predicted_count;
u64 abort_count, cycles_count;
u64 iter_count, iter_cycles;
+ u64 from_count;
branch_count = clist->branch_count;
predicted_count = clist->predicted_count;
@@ -1429,11 +1438,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
cycles_count = clist->cycles_count;
iter_count = clist->iter_count;
iter_cycles = clist->iter_cycles;
+ from_count = clist->from_count;
return callchain_counts_printf(fp, bf, bfsize, branch_count,
predicted_count, abort_count,
cycles_count, iter_count, iter_cycles,
- &clist->brtype_stat);
+ from_count, &clist->brtype_stat);
}
static void free_callchain_node(struct callchain_node *node)
@@ -1569,3 +1579,18 @@ int callchain_cursor__copy(struct callchain_cursor *dst,
return rc;
}
+
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ struct callchain_cursor_node *node;
+
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+
+ for (node = cursor->first; node != NULL; node = node->next)
+ map__zput(node->map);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 154560b1eb65..80e056a3d882 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -2,14 +2,14 @@
#ifndef __PERF_CALLCHAIN_H
#define __PERF_CALLCHAIN_H
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"
-#include "map.h"
-#include "symbol.h"
+#include "map_symbol.h"
#include "branch.h"
+struct map;
+
#define HELP_PAD "\t\t\t\t"
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
@@ -118,6 +118,7 @@ struct callchain_list {
bool has_children;
};
u64 branch_count;
+ u64 from_count;
u64 predicted_count;
u64 abort_count;
u64 cycles_count;
@@ -187,20 +188,7 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
-/*
- * Initialize a cursor before adding entries inside, but keep
- * the previously allocated entries as a cache.
- */
-static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
-{
- struct callchain_cursor_node *node;
-
- cursor->nr = 0;
- cursor->last = &cursor->first;
-
- for (node = cursor->first; node != NULL; node = node->next)
- map__zput(node->map);
-}
+void callchain_cursor_reset(struct callchain_cursor *cursor);
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
struct map *map, struct symbol *sym,
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 39e628b8938e..39b8c4ec4e2e 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include "cache.h"
-#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include "color.h"
@@ -10,44 +9,6 @@
int perf_use_color_default = -1;
-int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
-{
- if (value) {
- if (!strcasecmp(value, "never"))
- return 0;
- if (!strcasecmp(value, "always"))
- return 1;
- if (!strcasecmp(value, "auto"))
- goto auto_color;
- }
-
- /* Missing or explicit false to turn off colorization */
- if (!perf_config_bool(var, value))
- return 0;
-
- /* any normal truth value defaults to 'auto' */
- auto_color:
- if (stdout_is_tty < 0)
- stdout_is_tty = isatty(1);
- if (stdout_is_tty || pager_in_use()) {
- char *term = getenv("TERM");
- if (term && strcmp(term, "dumb"))
- return 1;
- }
- return 0;
-}
-
-int perf_color_default_config(const char *var, const char *value,
- void *cb __maybe_unused)
-{
- if (!strcmp(var, "color.ui")) {
- perf_use_color_default = perf_config_colorbool(var, value, -1);
- return 0;
- }
-
- return 0;
-}
-
static int __color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args, const char *trail)
{
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 22777b1812ee..01f7bed21c9b 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -3,6 +3,7 @@
#define __PERF_COLOR_H
#include <stdio.h>
+#include <stdarg.h>
/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
#define COLOR_MAXLEN 24
diff --git a/tools/perf/util/color_config.c b/tools/perf/util/color_config.c
new file mode 100644
index 000000000000..817dc56e7e95
--- /dev/null
+++ b/tools/perf/util/color_config.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include "cache.h"
+#include "config.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include "color.h"
+#include <math.h>
+#include <unistd.h>
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
+{
+ if (value) {
+ if (!strcasecmp(value, "never"))
+ return 0;
+ if (!strcasecmp(value, "always"))
+ return 1;
+ if (!strcasecmp(value, "auto"))
+ goto auto_color;
+ }
+
+ /* Missing or explicit false to turn off colorization */
+ if (!perf_config_bool(var, value))
+ return 0;
+
+ /* any normal truth value defaults to 'auto' */
+ auto_color:
+ if (stdout_is_tty < 0)
+ stdout_is_tty = isatty(1);
+ if (stdout_is_tty || pager_in_use()) {
+ char *term = getenv("TERM");
+ if (term && strcmp(term, "dumb"))
+ return 1;
+ }
+ return 0;
+}
+
+int perf_color_default_config(const char *var, const char *value,
+ void *cb __maybe_unused)
+{
+ if (!strcmp(var, "color.ui")) {
+ perf_use_color_default = perf_config_colorbool(var, value, -1);
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 31279a7bd919..1066de92af12 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <string.h>
#include <linux/refcount.h>
+#include <linux/rbtree.h>
#include "rwsem.h"
struct comm_str {
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
index 3e5c438fe85e..f35d8fbfa2dd 100644
--- a/tools/perf/util/comm.h
+++ b/tools/perf/util/comm.h
@@ -2,9 +2,9 @@
#ifndef __PERF_COMM_H
#define __PERF_COMM_H
-#include "../perf.h"
-#include <linux/rbtree.h>
#include <linux/list.h>
+#include <linux/types.h>
+#include <stdbool.h>
struct comm_str;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 1ea8f898f1a1..fa092511c52b 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -13,6 +13,7 @@
#include <sys/param.h>
#include "util.h"
#include "cache.h"
+#include "callchain.h"
#include <subcmd/exec-cmd.h>
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
diff --git a/tools/perf/util/cpu-set-sched.h b/tools/perf/util/cpu-set-sched.h
new file mode 100644
index 000000000000..8cf4e40d322a
--- /dev/null
+++ b/tools/perf/util/cpu-set-sched.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: LGPL-2.1
+// Definitions taken from glibc for use with older systems, same licensing.
+#ifndef _CPU_SET_SCHED_PERF_H
+#define _CPU_SET_SCHED_PERF_H
+
+#include <features.h>
+#include <sched.h>
+
+#ifndef CPU_EQUAL
+#ifndef __CPU_EQUAL_S
+#if __GNUC_PREREQ (2, 91)
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__builtin_memcmp (cpusetp1, cpusetp2, setsize) == 0)
+#else
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__extension__ \
+ ({ const __cpu_mask *__arr1 = (cpusetp1)->__bits; \
+ const __cpu_mask *__arr2 = (cpusetp2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ if (__arr1[__i] != __arr2[__i]) \
+ break; \
+ __i == __imax; }))
+#endif
+#endif // __CPU_EQUAL_S
+
+#define CPU_EQUAL(cpusetp1, cpusetp2) \
+ __CPU_EQUAL_S (sizeof (cpu_set_t), cpusetp1, cpusetp2)
+#endif // CPU_EQUAL
+
+#ifndef CPU_OR
+#ifndef __CPU_OP_S
+#define __CPU_OP_S(setsize, destset, srcset1, srcset2, op) \
+ (__extension__ \
+ ({ cpu_set_t *__dest = (destset); \
+ const __cpu_mask *__arr1 = (srcset1)->__bits; \
+ const __cpu_mask *__arr2 = (srcset2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ ((__cpu_mask *) __dest->__bits)[__i] = __arr1[__i] op __arr2[__i]; \
+ __dest; }))
+#endif // __CPU_OP_S
+
+#define CPU_OR(destset, srcset1, srcset2) \
+ __CPU_OP_S (sizeof (cpu_set_t), destset, srcset1, srcset2, |)
+#endif // CPU_OR
+
+#endif // _CPU_SET_SCHED_PERF_H
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd3342069..0b599229bc7e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
if (!cpu_list)
return cpu_map__read_all_cpu_map();
- if (!isdigit(*cpu_list))
+ /*
+ * must handle the case of empty cpumap to cover
+ * TOPOLOGY header for NUMA nodes with no CPU
+ * ( e.g., because of CPU hotplug)
+ */
+ if (!isdigit(*cpu_list) && *cpu_list != '\0')
goto out;
while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else
+ else if (*cpu_list != '\0')
cpus = cpu_map__default_new();
+ else
+ cpus = cpu_map__dummy_new();
invalid:
free(tmp_cpus);
out:
@@ -674,7 +681,7 @@ size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
#undef COMMA
- pr_debug("cpumask list: %s\n", buf);
+ pr_debug2("cpumask list: %s\n", buf);
return ret;
}
@@ -723,3 +730,13 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
buf[size - 1] = '\0';
return ptr - buf;
}
+
+const struct cpu_map *cpu_map__online(void) /* thread unsafe */
+{
+ static const struct cpu_map *online = NULL;
+
+ if (!online)
+ online = cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
+
+ return online;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index ed8999d1a640..f00ce624b9f7 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -29,6 +29,7 @@ int cpu_map__get_core_id(int cpu);
int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
+const struct cpu_map *cpu_map__online(void); /* thread unsafe */
struct cpu_map *cpu_map__get(struct cpu_map *map);
void cpu_map__put(struct cpu_map *map);
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
new file mode 100644
index 000000000000..ece0710249d4
--- /dev/null
+++ b/tools/perf/util/cputopo.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/param.h>
+#include <inttypes.h>
+#include <api/fs/fs.h>
+
+#include "cputopo.h"
+#include "cpumap.h"
+#include "util.h"
+#include "env.h"
+
+
+#define CORE_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
+#define THRD_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
+#define NODE_ONLINE_FMT \
+ "%s/devices/system/node/online"
+#define NODE_MEMINFO_FMT \
+ "%s/devices/system/node/node%d/meminfo"
+#define NODE_CPULIST_FMT \
+ "%s/devices/system/node/node%d/cpulist"
+
+static int build_cpu_topology(struct cpu_topology *tp, int cpu)
+{
+ FILE *fp;
+ char filename[MAXPATHLEN];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ ssize_t sret;
+ u32 i = 0;
+ int ret = -1;
+
+ scnprintf(filename, MAXPATHLEN, CORE_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto try_threads;
+
+ sret = getline(&buf, &len, fp);
+ fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->core_sib; i++) {
+ if (!strcmp(buf, tp->core_siblings[i]))
+ break;
+ }
+ if (i == tp->core_sib) {
+ tp->core_siblings[i] = buf;
+ tp->core_sib++;
+ buf = NULL;
+ len = 0;
+ }
+ ret = 0;
+
+try_threads:
+ scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ if (!strcmp(buf, tp->thread_siblings[i]))
+ break;
+ }
+ if (i == tp->thread_sib) {
+ tp->thread_siblings[i] = buf;
+ tp->thread_sib++;
+ buf = NULL;
+ }
+ ret = 0;
+done:
+ if (fp)
+ fclose(fp);
+ free(buf);
+ return ret;
+}
+
+void cpu_topology__delete(struct cpu_topology *tp)
+{
+ u32 i;
+
+ if (!tp)
+ return;
+
+ for (i = 0 ; i < tp->core_sib; i++)
+ zfree(&tp->core_siblings[i]);
+
+ for (i = 0 ; i < tp->thread_sib; i++)
+ zfree(&tp->thread_siblings[i]);
+
+ free(tp);
+}
+
+struct cpu_topology *cpu_topology__new(void)
+{
+ struct cpu_topology *tp = NULL;
+ void *addr;
+ u32 nr, i;
+ size_t sz;
+ long ncpus;
+ int ret = -1;
+ struct cpu_map *map;
+
+ ncpus = cpu__max_present_cpu();
+
+ /* build online CPU map */
+ map = cpu_map__new(NULL);
+ if (map == NULL) {
+ pr_debug("failed to get system cpumap\n");
+ return NULL;
+ }
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ sz = nr * sizeof(char *);
+ addr = calloc(1, sizeof(*tp) + 2 * sz);
+ if (!addr)
+ goto out_free;
+
+ tp = addr;
+ addr += sizeof(*tp);
+ tp->core_siblings = addr;
+ addr += sz;
+ tp->thread_siblings = addr;
+
+ for (i = 0; i < nr; i++) {
+ if (!cpu_map__has(map, i))
+ continue;
+
+ ret = build_cpu_topology(tp, i);
+ if (ret < 0)
+ break;
+ }
+
+out_free:
+ cpu_map__put(map);
+ if (ret) {
+ cpu_topology__delete(tp);
+ tp = NULL;
+ }
+ return tp;
+}
+
+static int load_numa_node(struct numa_topology_node *node, int nr)
+{
+ char str[MAXPATHLEN];
+ char field[32];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ int ret = -1;
+ FILE *fp;
+ u64 mem;
+
+ node->node = (u32) nr;
+
+ scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
+ sysfs__mountpoint(), nr);
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ /* skip over invalid lines */
+ if (!strchr(buf, ':'))
+ continue;
+ if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
+ goto err;
+ if (!strcmp(field, "MemTotal:"))
+ node->mem_total = mem;
+ if (!strcmp(field, "MemFree:"))
+ node->mem_free = mem;
+ if (node->mem_total && node->mem_free)
+ break;
+ }
+
+ fclose(fp);
+ fp = NULL;
+
+ scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
+ sysfs__mountpoint(), nr);
+
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto err;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ node->cpus = buf;
+ fclose(fp);
+ return 0;
+
+err:
+ free(buf);
+ if (fp)
+ fclose(fp);
+ return ret;
+}
+
+struct numa_topology *numa_topology__new(void)
+{
+ struct cpu_map *node_map = NULL;
+ struct numa_topology *tp = NULL;
+ char path[MAXPATHLEN];
+ char *buf = NULL;
+ size_t len = 0;
+ u32 nr, i;
+ FILE *fp;
+ char *c;
+
+ scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
+ sysfs__mountpoint());
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return NULL;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto out;
+
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ node_map = cpu_map__new(buf);
+ if (!node_map)
+ goto out;
+
+ nr = (u32) node_map->nr;
+
+ tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
+ if (!tp)
+ goto out;
+
+ tp->nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ if (load_numa_node(&tp->nodes[i], node_map->map[i])) {
+ numa_topology__delete(tp);
+ tp = NULL;
+ break;
+ }
+ }
+
+out:
+ free(buf);
+ fclose(fp);
+ cpu_map__put(node_map);
+ return tp;
+}
+
+void numa_topology__delete(struct numa_topology *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->nr; i++)
+ free(tp->nodes[i].cpus);
+
+ free(tp);
+}
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
new file mode 100644
index 000000000000..47a97e71acdf
--- /dev/null
+++ b/tools/perf/util/cputopo.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_CPUTOPO_H
+#define __PERF_CPUTOPO_H
+
+#include <linux/types.h>
+#include "env.h"
+
+struct cpu_topology {
+ u32 core_sib;
+ u32 thread_sib;
+ char **core_siblings;
+ char **thread_siblings;
+};
+
+struct numa_topology_node {
+ char *cpus;
+ u32 node;
+ u64 mem_total;
+ u64 mem_free;
+};
+
+struct numa_topology {
+ u32 nr;
+ struct numa_topology_node nodes[0];
+};
+
+struct cpu_topology *cpu_topology__new(void);
+void cpu_topology__delete(struct cpu_topology *tp);
+
+struct numa_topology *numa_topology__new(void);
+void numa_topology__delete(struct numa_topology *tp);
+
+#endif /* __PERF_CPUTOPO_H */
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
index bc22c39c727f..216cb17a3322 100644
--- a/tools/perf/util/cs-etm-decoder/Build
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -1 +1 @@
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 8c155575c6c5..ba4c623cd8de 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -290,6 +290,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
decoder->packet_buffer[i].instr_count = 0;
decoder->packet_buffer[i].last_instr_taken_branch = false;
decoder->packet_buffer[i].last_instr_size = 0;
+ decoder->packet_buffer[i].last_instr_type = 0;
+ decoder->packet_buffer[i].last_instr_subtype = 0;
+ decoder->packet_buffer[i].last_instr_cond = 0;
+ decoder->packet_buffer[i].flags = 0;
+ decoder->packet_buffer[i].exception_number = UINT32_MAX;
+ decoder->packet_buffer[i].trace_chan_id = UINT8_MAX;
decoder->packet_buffer[i].cpu = INT_MIN;
}
}
@@ -300,14 +306,12 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
enum cs_etm_sample_type sample_type)
{
u32 et = 0;
- struct int_node *inode = NULL;
+ int cpu;
if (decoder->packet_count >= MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
- /* Search the RB tree for the cpu associated with this traceID */
- inode = intlist__find(traceid_list, trace_chan_id);
- if (!inode)
+ if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = decoder->tail;
@@ -317,12 +321,18 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_buffer[et].sample_type = sample_type;
decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
- decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+ decoder->packet_buffer[et].cpu = cpu;
decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].instr_count = 0;
decoder->packet_buffer[et].last_instr_taken_branch = false;
decoder->packet_buffer[et].last_instr_size = 0;
+ decoder->packet_buffer[et].last_instr_type = 0;
+ decoder->packet_buffer[et].last_instr_subtype = 0;
+ decoder->packet_buffer[et].last_instr_cond = 0;
+ decoder->packet_buffer[et].flags = 0;
+ decoder->packet_buffer[et].exception_number = UINT32_MAX;
+ decoder->packet_buffer[et].trace_chan_id = trace_chan_id;
if (decoder->packet_count == MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
@@ -366,6 +376,9 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
packet->start_addr = elem->st_addr;
packet->end_addr = elem->en_addr;
packet->instr_count = elem->num_instr_range;
+ packet->last_instr_type = elem->last_i_type;
+ packet->last_instr_subtype = elem->last_i_subtype;
+ packet->last_instr_cond = elem->last_instr_cond;
switch (elem->last_i_type) {
case OCSD_INSTR_BR:
@@ -395,10 +408,20 @@ cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder,
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder,
+ const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
-{
- return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
- CS_ETM_EXCEPTION);
+{ int ret = 0;
+ struct cs_etm_packet *packet;
+
+ ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+ CS_ETM_EXCEPTION);
+ if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
+ return ret;
+
+ packet = &decoder->packet_buffer[decoder->tail];
+ packet->exception_number = elem->exception_number;
+
+ return ret;
}
static ocsd_datapath_resp_t
@@ -432,7 +455,7 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
- resp = cs_etm_decoder__buffer_exception(decoder,
+ resp = cs_etm_decoder__buffer_exception(decoder, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index a6407d41598f..3ab11dfa92ae 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -15,13 +15,6 @@
struct cs_etm_decoder;
-struct cs_etm_buffer {
- const unsigned char *buf;
- size_t len;
- u64 offset;
- u64 ref_timestamp;
-};
-
enum cs_etm_sample_type {
CS_ETM_EMPTY,
CS_ETM_RANGE,
@@ -43,8 +36,14 @@ struct cs_etm_packet {
u64 start_addr;
u64 end_addr;
u32 instr_count;
+ u32 last_instr_type;
+ u32 last_instr_subtype;
+ u32 flags;
+ u32 exception_number;
+ u8 last_instr_cond;
u8 last_instr_taken_branch;
u8 last_instr_size;
+ u8 trace_chan_id;
int cpu;
};
@@ -99,9 +98,10 @@ enum {
CS_ETM_PROTO_PTM,
};
-enum {
+enum cs_etm_decoder_operation {
CS_ETM_OPERATION_PRINT = 1,
CS_ETM_OPERATION_DECODE,
+ CS_ETM_OPERATION_MAX,
};
int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 27a374ddf661..110804936fc3 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -12,6 +12,7 @@
#include <linux/log2.h>
#include <linux/types.h>
+#include <opencsd/ocsd_if_types.h>
#include <stdlib.h>
#include "auxtrace.h"
@@ -24,6 +25,7 @@
#include "machine.h"
#include "map.h"
#include "perf.h"
+#include "symbol.h"
#include "thread.h"
#include "thread_map.h"
#include "thread-stack.h"
@@ -63,13 +65,10 @@ struct cs_etm_queue {
struct thread *thread;
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
- const struct cs_etm_state *state;
union perf_event *event_buf;
unsigned int queue_nr;
pid_t pid, tid;
int cpu;
- u64 time;
- u64 timestamp;
u64 offset;
u64 period_instructions;
struct branch_stack *last_branch;
@@ -77,11 +76,13 @@ struct cs_etm_queue {
size_t last_branch_pos;
struct cs_etm_packet *prev_packet;
struct cs_etm_packet *packet;
+ const unsigned char *buf;
+ size_t buf_len, buf_used;
};
static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_);
+ pid_t tid);
/* PTMs ETMIDR [11:8] set to b0011 */
#define ETMIDR_PTM_VERSION 0x00000300
@@ -96,6 +97,34 @@ static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
return CS_ETM_PROTO_ETMV3;
}
+static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *magic = metadata[CS_ETM_MAGIC];
+ return 0;
+}
+
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *cpu = (int)metadata[CS_ETM_CPU];
+ return 0;
+}
+
static void cs_etm__packet_dump(const char *pkt_string)
{
const char *color = PERF_COLOR_BLUE;
@@ -109,10 +138,83 @@ static void cs_etm__packet_dump(const char *pkt_string)
fflush(stdout);
}
+static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx,
+ u32 etmidr)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
+ t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
+}
+
+static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
+ t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
+ t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
+ t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
+ t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
+ t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
+}
+
+static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm)
+{
+ int i;
+ u32 etmidr;
+ u64 architecture;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ architecture = etm->metadata[i][CS_ETM_MAGIC];
+
+ switch (architecture) {
+ case __perf_cs_etmv3_magic:
+ etmidr = etm->metadata[i][CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
+ break;
+ case __perf_cs_etmv4_magic:
+ cs_etm__set_trace_param_etmv4(t_params, etm, i);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_queue *etmq,
+ enum cs_etm_decoder_operation mode)
+{
+ int ret = -EINVAL;
+
+ if (!(mode < CS_ETM_OPERATION_MAX))
+ goto out;
+
+ d_params->packet_printer = cs_etm__packet_dump;
+ d_params->operation = mode;
+ d_params->data = etmq;
+ d_params->formatted = true;
+ d_params->fsyncs = false;
+ d_params->hsyncs = false;
+ d_params->frame_aligned = true;
+
+ ret = 0;
+out:
+ return ret;
+}
+
static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
struct auxtrace_buffer *buffer)
{
- int i, ret;
+ int ret;
const char *color = PERF_COLOR_BLUE;
struct cs_etm_decoder_params d_params;
struct cs_etm_trace_params *t_params;
@@ -126,48 +228,22 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+
+ if (!t_params)
+ return;
+
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
/* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_PRINT;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
+ if (cs_etm__init_decoder_params(&d_params, NULL,
+ CS_ETM_OPERATION_PRINT))
+ goto out_free;
decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!decoder)
- return;
+ goto out_free;
do {
size_t consumed;
@@ -182,6 +258,9 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
} while (buffer_used < buffer->size);
cs_etm_decoder__free(decoder);
+
+out_free:
+ zfree(&t_params);
}
static int cs_etm__flush_events(struct perf_session *session,
@@ -205,7 +284,7 @@ static int cs_etm__flush_events(struct perf_session *session,
if (ret < 0)
return ret;
- return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
+ return cs_etm__process_timeless_queues(etm, -1);
}
static void cs_etm__free_queue(void *priv)
@@ -251,7 +330,7 @@ static void cs_etm__free(struct perf_session *session)
cs_etm__free_events(session);
session->auxtrace = NULL;
- /* First remove all traceID/CPU# nodes for the RB tree */
+ /* First remove all traceID/metadata nodes for the RB tree */
intlist__for_each_entry_safe(inode, tmp, traceid_list)
intlist__remove(traceid_list, inode);
/* Then the RB tree itself */
@@ -297,7 +376,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
struct addr_location al;
if (!etmq)
- return -1;
+ return 0;
machine = etmq->etm->machine;
cpumode = cs_etm__cpu_mode(etmq, address);
@@ -305,7 +384,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
thread = etmq->thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_KERNEL)
- return -EINVAL;
+ return 0;
thread = etmq->etm->unknown_thread;
}
@@ -328,12 +407,10 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
return len;
}
-static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
- unsigned int queue_nr)
+static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
{
- int i;
struct cs_etm_decoder_params d_params;
- struct cs_etm_trace_params *t_params;
+ struct cs_etm_trace_params *t_params = NULL;
struct cs_etm_queue *etmq;
size_t szp = sizeof(struct cs_etm_packet);
@@ -368,59 +445,22 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!etmq->event_buf)
goto out_free;
- etmq->etm = etm;
- etmq->queue_nr = queue_nr;
- etmq->pid = -1;
- etmq->tid = -1;
- etmq->cpu = -1;
-
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
if (!t_params)
goto out_free;
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
- /* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_DECODE;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
- d_params.data = etmq;
+ /* Set decoder parameters to decode trace packets */
+ if (cs_etm__init_decoder_params(&d_params, etmq,
+ CS_ETM_OPERATION_DECODE))
+ goto out_free;
etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!etmq->decoder)
goto out_free;
@@ -433,14 +473,13 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
cs_etm__mem_access))
goto out_free_decoder;
- etmq->offset = 0;
- etmq->period_instructions = 0;
-
+ zfree(&t_params);
return etmq;
out_free_decoder:
cs_etm_decoder__free(etmq->decoder);
out_free:
+ zfree(&t_params);
zfree(&etmq->event_buf);
zfree(&etmq->last_branch);
zfree(&etmq->last_branch_rb);
@@ -455,24 +494,30 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
+ int ret = 0;
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
- return 0;
+ goto out;
- etmq = cs_etm__alloc_queue(etm, queue_nr);
+ etmq = cs_etm__alloc_queue(etm);
- if (!etmq)
- return -ENOMEM;
+ if (!etmq) {
+ ret = -ENOMEM;
+ goto out;
+ }
queue->priv = etmq;
-
- if (queue->cpu != -1)
- etmq->cpu = queue->cpu;
-
+ etmq->etm = etm;
+ etmq->queue_nr = queue_nr;
+ etmq->cpu = queue->cpu;
etmq->tid = queue->tid;
+ etmq->pid = -1;
+ etmq->offset = 0;
+ etmq->period_instructions = 0;
- return 0;
+out:
+ return ret;
}
static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
@@ -480,6 +525,9 @@ static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
unsigned int i;
int ret;
+ if (!etm->kernel_start)
+ etm->kernel_start = machine__kernel_start(etm->machine);
+
for (i = 0; i < etm->queues.nr_queues; i++) {
ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
if (ret)
@@ -637,7 +685,7 @@ static int cs_etm__inject_event(union perf_event *event,
static int
-cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
+cs_etm__get_trace(struct cs_etm_queue *etmq)
{
struct auxtrace_buffer *aux_buffer = etmq->buffer;
struct auxtrace_buffer *old_buffer = aux_buffer;
@@ -651,7 +699,7 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (!aux_buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->len = 0;
+ etmq->buf_len = 0;
return 0;
}
@@ -671,13 +719,11 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->offset = aux_buffer->offset;
- buff->len = aux_buffer->size;
- buff->buf = aux_buffer->data;
-
- buff->ref_timestamp = aux_buffer->reference;
+ etmq->buf_used = 0;
+ etmq->buf_len = aux_buffer->size;
+ etmq->buf = aux_buffer->data;
- return buff->len;
+ return etmq->buf_len;
}
static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
@@ -719,7 +765,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
sample.stream_id = etmq->etm->instructions_id;
sample.period = period;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.insn_len = 1;
sample.cpumode = event->sample.header.misc;
@@ -778,7 +824,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
sample.stream_id = etmq->etm->branches_id;
sample.period = 1;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.cpumode = event->sample.header.misc;
/*
@@ -1106,95 +1152,489 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq)
return 0;
}
+/*
+ * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
+ * if need be.
+ * Returns: < 0 if error
+ * = 0 if no more auxtrace_buffer to read
+ * > 0 if the current buffer isn't empty yet
+ */
+static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ if (!etmq->buf_len) {
+ ret = cs_etm__get_trace(etmq);
+ if (ret <= 0)
+ return ret;
+ /*
+ * We cannot assume consecutive blocks in the data file
+ * are contiguous, reset the decoder to force re-sync.
+ */
+ ret = cs_etm_decoder__reset(etmq->decoder);
+ if (ret)
+ return ret;
+ }
+
+ return etmq->buf_len;
+}
+
+static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq,
+ struct cs_etm_packet *packet,
+ u64 end_addr)
+{
+ u16 instr16;
+ u32 instr32;
+ u64 addr;
+
+ switch (packet->isa) {
+ case CS_ETM_ISA_T32:
+ /*
+ * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'15 b'8
+ * +-----------------+--------+
+ * | 1 1 0 1 1 1 1 1 | imm8 |
+ * +-----------------+--------+
+ *
+ * According to the specifiction, it only defines SVC for T32
+ * with 16 bits instruction and has no definition for 32bits;
+ * so below only read 2 bytes as instruction size for T32.
+ */
+ addr = end_addr - 2;
+ cs_etm__mem_access(etmq, addr, sizeof(instr16), (u8 *)&instr16);
+ if ((instr16 & 0xFF00) == 0xDF00)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A32:
+ /*
+ * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'31 b'28 b'27 b'24
+ * +---------+---------+-------------------------+
+ * | !1111 | 1 1 1 1 | imm24 |
+ * +---------+---------+-------------------------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0x0F000000) == 0x0F000000 &&
+ (instr32 & 0xF0000000) != 0xF0000000)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A64:
+ /*
+ * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
+ *
+ * b'31 b'21 b'4 b'0
+ * +-----------------------+---------+-----------+
+ * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
+ * +-----------------------+---------+-----------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0xFFE0001F) == 0xd4000001)
+ return true;
+
+ break;
+ case CS_ETM_ISA_UNKNOWN:
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_syscall(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SVC)
+ return true;
+
+ /*
+ * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
+ * HVC cases; need to check if it's SVC instruction based on
+ * packet address.
+ */
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_async_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
+ packet->exception_number == CS_ETMV3_EXC_IRQ ||
+ packet->exception_number == CS_ETMV3_EXC_FIQ)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic)
+ if (packet->exception_number == CS_ETMV4_EXC_RESET ||
+ packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
+ packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_IRQ ||
+ packet->exception_number == CS_ETMV4_EXC_FIQ)
+ return true;
+
+ return false;
+}
+
+static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SMC ||
+ packet->exception_number == CS_ETMV3_EXC_HYP ||
+ packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
+ packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
+ packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
+ packet->exception_number == CS_ETMV3_EXC_GENERIC)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
+ packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
+ packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
+ return true;
+
+ /*
+ * For CS_ETMV4_EXC_CALL, except SVC other instructions
+ * (SMC, HVC) are taken as sync exceptions.
+ */
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ !cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+
+ /*
+ * ETMv4 has 5 bits for exception number; if the numbers
+ * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
+ * they are implementation defined exceptions.
+ *
+ * For this case, simply take it as sync exception.
+ */
+ if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
+ packet->exception_number <= CS_ETMV4_EXC_END)
+ return true;
+ }
+
+ return false;
+}
+
+static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+ u64 magic;
+ int ret;
+
+ switch (packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * Immediate branch instruction without neither link nor
+ * return flag, it's normal branch instruction within
+ * the function.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
+ packet->flags = PERF_IP_FLAG_BRANCH;
+
+ if (packet->last_instr_cond)
+ packet->flags |= PERF_IP_FLAG_CONDITIONAL;
+ }
+
+ /*
+ * Immediate branch instruction with link (e.g. BL), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with link (e.g. BLR), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with subtype of
+ * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
+ * function return for A32/T32.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Indirect branch instruction without link (e.g. BR), usually
+ * this is used for function return, especially for functions
+ * within dynamic link lib.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /* Return instruction for function return. */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Decoder might insert a discontinuity in the middle of
+ * instruction packets, fixup prev_packet with flag
+ * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
+ */
+ if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_BEGIN;
+
+ /*
+ * If the previous packet is an exception return packet
+ * and the return address just follows SVC instuction,
+ * it needs to calibrate the previous packet sample flags
+ * as PERF_IP_FLAG_SYSCALLRET.
+ */
+ if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT) &&
+ cs_etm__is_svc_instr(etmq, packet, packet->start_addr))
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_SYSCALLRET;
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * The trace is discontinuous, if the previous packet is
+ * instruction packet, set flag PERF_IP_FLAG_TRACE_END
+ * for previous packet.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_END;
+ break;
+ case CS_ETM_EXCEPTION:
+ ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
+ if (ret)
+ return ret;
+
+ /* The exception is for system call. */
+ if (cs_etm__is_syscall(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_SYSCALLRET;
+ /*
+ * The exceptions are triggered by external signals from bus,
+ * interrupt controller, debug module, PE reset or halt.
+ */
+ else if (cs_etm__is_async_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_INTERRUPT;
+ /*
+ * Otherwise, exception is caused by trap, instruction &
+ * data fault, or alignment errors.
+ */
+ else if (cs_etm__is_sync_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_INTERRUPT;
+
+ /*
+ * When the exception packet is inserted, since exception
+ * packet is not used standalone for generating samples
+ * and it's affiliation to the previous instruction range
+ * packet; so set previous range packet flags to tell perf
+ * it is an exception taken branch.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = packet->flags;
+ break;
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * When the exception return packet is inserted, since
+ * exception return packet is not used standalone for
+ * generating samples and it's affiliation to the previous
+ * instruction range packet; so set previous range packet
+ * flags to tell perf it is an exception return branch.
+ *
+ * The exception return can be for either system call or
+ * other exception types; unfortunately the packet doesn't
+ * contain exception type related info so we cannot decide
+ * the exception type purely based on exception return packet.
+ * If we record the exception number from exception packet and
+ * reuse it for excpetion return packet, this is not reliable
+ * due the trace can be discontinuity or the interrupt can
+ * be nested, thus the recorded exception number cannot be
+ * used for exception return packet for these two cases.
+ *
+ * For exception return packet, we only need to distinguish the
+ * packet is for system call or for other types. Thus the
+ * decision can be deferred when receive the next packet which
+ * contains the return address, based on the return address we
+ * can read out the previous instruction and check if it's a
+ * system call instruction and then calibrate the sample flag
+ * as needed.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT;
+ break;
+ case CS_ETM_EMPTY:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
+{
+ int ret = 0;
+ size_t processed = 0;
+
+ /*
+ * Packets are decoded and added to the decoder's packet queue
+ * until the decoder packet processing callback has requested that
+ * processing stops or there is nothing left in the buffer. Normal
+ * operations that stop processing are a timestamp packet or a full
+ * decoder buffer queue.
+ */
+ ret = cs_etm_decoder__process_data_block(etmq->decoder,
+ etmq->offset,
+ &etmq->buf[etmq->buf_used],
+ etmq->buf_len,
+ &processed);
+ if (ret)
+ goto out;
+
+ etmq->offset += processed;
+ etmq->buf_used += processed;
+ etmq->buf_len -= processed;
+
+out:
+ return ret;
+}
+
+static int cs_etm__process_decoder_queue(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ /* Process each packet in this chunk */
+ while (1) {
+ ret = cs_etm_decoder__get_packet(etmq->decoder,
+ etmq->packet);
+ if (ret <= 0)
+ /*
+ * Stop processing this chunk on
+ * end of data or error
+ */
+ break;
+
+ /*
+ * Since packet addresses are swapped in packet
+ * handling within below switch() statements,
+ * thus setting sample flags must be called
+ * prior to switch() statement to use address
+ * information before packets swapping.
+ */
+ ret = cs_etm__set_sample_flags(etmq);
+ if (ret < 0)
+ break;
+
+ switch (etmq->packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * If the packet contains an instruction
+ * range, generate instruction sequence
+ * events.
+ */
+ cs_etm__sample(etmq);
+ break;
+ case CS_ETM_EXCEPTION:
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * If the exception packet is coming,
+ * make sure the previous instruction
+ * range packet to be handled properly.
+ */
+ cs_etm__exception(etmq);
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * Discontinuity in trace, flush
+ * previous branch stack
+ */
+ cs_etm__flush(etmq);
+ break;
+ case CS_ETM_EMPTY:
+ /*
+ * Should not receive empty packet,
+ * report error.
+ */
+ pr_err("CS ETM Trace: empty packet\n");
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
{
- struct cs_etm_auxtrace *etm = etmq->etm;
- struct cs_etm_buffer buffer;
- size_t buffer_used, processed;
int err = 0;
- if (!etm->kernel_start)
- etm->kernel_start = machine__kernel_start(etm->machine);
-
/* Go through each buffer in the queue and decode them one by one */
while (1) {
- buffer_used = 0;
- memset(&buffer, 0, sizeof(buffer));
- err = cs_etm__get_trace(&buffer, etmq);
+ err = cs_etm__get_data_block(etmq);
if (err <= 0)
return err;
- /*
- * We cannot assume consecutive blocks in the data file are
- * contiguous, reset the decoder to force re-sync.
- */
- err = cs_etm_decoder__reset(etmq->decoder);
- if (err != 0)
- return err;
/* Run trace decoder until buffer consumed or end of trace */
do {
- processed = 0;
- err = cs_etm_decoder__process_data_block(
- etmq->decoder,
- etmq->offset,
- &buffer.buf[buffer_used],
- buffer.len - buffer_used,
- &processed);
+ err = cs_etm__decode_data_block(etmq);
if (err)
return err;
- etmq->offset += processed;
- buffer_used += processed;
-
- /* Process each packet in this chunk */
- while (1) {
- err = cs_etm_decoder__get_packet(etmq->decoder,
- etmq->packet);
- if (err <= 0)
- /*
- * Stop processing this chunk on
- * end of data or error
- */
- break;
-
- switch (etmq->packet->sample_type) {
- case CS_ETM_RANGE:
- /*
- * If the packet contains an instruction
- * range, generate instruction sequence
- * events.
- */
- cs_etm__sample(etmq);
- break;
- case CS_ETM_EXCEPTION:
- case CS_ETM_EXCEPTION_RET:
- /*
- * If the exception packet is coming,
- * make sure the previous instruction
- * range packet to be handled properly.
- */
- cs_etm__exception(etmq);
- break;
- case CS_ETM_DISCONTINUITY:
- /*
- * Discontinuity in trace, flush
- * previous branch stack
- */
- cs_etm__flush(etmq);
- break;
- case CS_ETM_EMPTY:
- /*
- * Should not receive empty packet,
- * report error.
- */
- pr_err("CS ETM Trace: empty packet\n");
- return -EINVAL;
- default:
- break;
- }
- }
- } while (buffer.len > buffer_used);
+ /*
+ * Process each packet in this chunk, nothing to do if
+ * an error occurs other than hoping the next one will
+ * be better.
+ */
+ err = cs_etm__process_decoder_queue(etmq);
+
+ } while (etmq->buf_len);
if (err == 0)
/* Flush any remaining branch stack entries */
@@ -1205,7 +1645,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
}
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_)
+ pid_t tid)
{
unsigned int i;
struct auxtrace_queues *queues = &etm->queues;
@@ -1215,7 +1655,6 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
struct cs_etm_queue *etmq = queue->priv;
if (etmq && ((tid == -1) || (etmq->tid == tid))) {
- etmq->time = time_;
cs_etm__set_pid_tid_cpu(etm, queue);
cs_etm__run_decoder(etmq);
}
@@ -1259,8 +1698,7 @@ static int cs_etm__process_event(struct perf_session *session,
if (event->header.type == PERF_RECORD_EXIT)
return cs_etm__process_timeless_queues(etm,
- event->fork.tid,
- sample->time);
+ event->fork.tid);
return 0;
}
@@ -1414,9 +1852,9 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
0xffffffff);
/*
- * Create an RB tree for traceID-CPU# tuple. Since the conversion has
- * to be made for each packet that gets decoded, optimizing access in
- * anything other than a sequential array is worth doing.
+ * Create an RB tree for traceID-metadata tuple. Since the conversion
+ * has to be made for each packet that gets decoded, optimizing access
+ * in anything other than a sequential array is worth doing.
*/
traceid_list = intlist__new(NULL);
if (!traceid_list) {
@@ -1482,8 +1920,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
err = -EINVAL;
goto err_free_metadata;
}
- /* All good, associate the traceID with the CPU# */
- inode->priv = &metadata[j][CS_ETM_CPU];
+ /* All good, associate the traceID with the metadata pointer */
+ inode->priv = metadata[j];
}
/*
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 37f8d48179ca..0e97c196147a 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -53,7 +53,51 @@ enum {
CS_ETMV4_PRIV_MAX,
};
-/* RB tree for quick conversion between traceID and CPUs */
+/*
+ * ETMv3 exception encoding number:
+ * See Embedded Trace Macrocell spcification (ARM IHI 0014Q)
+ * table 7-12 Encoding of Exception[3:0] for non-ARMv7-M processors.
+ */
+enum {
+ CS_ETMV3_EXC_NONE = 0,
+ CS_ETMV3_EXC_DEBUG_HALT = 1,
+ CS_ETMV3_EXC_SMC = 2,
+ CS_ETMV3_EXC_HYP = 3,
+ CS_ETMV3_EXC_ASYNC_DATA_ABORT = 4,
+ CS_ETMV3_EXC_JAZELLE_THUMBEE = 5,
+ CS_ETMV3_EXC_PE_RESET = 8,
+ CS_ETMV3_EXC_UNDEFINED_INSTR = 9,
+ CS_ETMV3_EXC_SVC = 10,
+ CS_ETMV3_EXC_PREFETCH_ABORT = 11,
+ CS_ETMV3_EXC_DATA_FAULT = 12,
+ CS_ETMV3_EXC_GENERIC = 13,
+ CS_ETMV3_EXC_IRQ = 14,
+ CS_ETMV3_EXC_FIQ = 15,
+};
+
+/*
+ * ETMv4 exception encoding number:
+ * See ARM Embedded Trace Macrocell Architecture Specification (ARM IHI 0064D)
+ * table 6-12 Possible values for the TYPE field in an Exception instruction
+ * trace packet, for ARMv7-A/R and ARMv8-A/R PEs.
+ */
+enum {
+ CS_ETMV4_EXC_RESET = 0,
+ CS_ETMV4_EXC_DEBUG_HALT = 1,
+ CS_ETMV4_EXC_CALL = 2,
+ CS_ETMV4_EXC_TRAP = 3,
+ CS_ETMV4_EXC_SYSTEM_ERROR = 4,
+ CS_ETMV4_EXC_INST_DEBUG = 6,
+ CS_ETMV4_EXC_DATA_DEBUG = 7,
+ CS_ETMV4_EXC_ALIGNMENT = 10,
+ CS_ETMV4_EXC_INST_FAULT = 11,
+ CS_ETMV4_EXC_DATA_FAULT = 12,
+ CS_ETMV4_EXC_IRQ = 14,
+ CS_ETMV4_EXC_FIQ = 15,
+ CS_ETMV4_EXC_END = 31,
+};
+
+/* RB tree for quick conversion between traceID and metadata pointers */
struct intlist *traceid_list;
#define KiB(x) ((x) * 1024)
@@ -61,14 +105,15 @@ struct intlist *traceid_list;
#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
-static const u64 __perf_cs_etmv3_magic = 0x3030303030303030ULL;
-static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
+#define __perf_cs_etmv3_magic 0x3030303030303030ULL
+#define __perf_cs_etmv4_magic 0x4040404040404040ULL
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
#ifdef HAVE_CSTRACE_SUPPORT
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
#else
static inline int
cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
@@ -76,6 +121,12 @@ cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
{
return -1;
}
+
+static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
+ int *cpu __maybe_unused)
+{
+ return -1;
+}
#endif
#endif
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 2a36fab76994..26af43ad9ddd 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1578,7 +1578,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
{
struct perf_session *session;
struct perf_data data = {
- .file = { .path = input, .fd = -1 },
+ .path = input,
.mode = PERF_DATA_MODE_READ,
.force = opts->force,
};
@@ -1650,7 +1650,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
fprintf(stderr,
"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
- data.file.path, path);
+ data.path, path);
fprintf(stderr,
"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index d8cfc19ddb10..7bd5ddeb7a41 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -7,11 +7,117 @@
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
+#include <asm/bug.h>
+#include <sys/types.h>
+#include <dirent.h>
#include "data.h"
#include "util.h"
#include "debug.h"
+static void close_dir(struct perf_data_file *files, int nr)
+{
+ while (--nr >= 1) {
+ close(files[nr].fd);
+ free(files[nr].path);
+ }
+ free(files);
+}
+
+void perf_data__close_dir(struct perf_data *data)
+{
+ close_dir(data->dir.files, data->dir.nr);
+}
+
+int perf_data__create_dir(struct perf_data *data, int nr)
+{
+ struct perf_data_file *files = NULL;
+ int i, ret = -1;
+
+ files = zalloc(nr * sizeof(*files));
+ if (!files)
+ return -ENOMEM;
+
+ data->dir.files = files;
+ data->dir.nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ struct perf_data_file *file = &files[i];
+
+ if (asprintf(&file->path, "%s/data.%d", data->path, i) < 0)
+ goto out_err;
+
+ ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ }
+
+ return 0;
+
+out_err:
+ close_dir(files, i);
+ return ret;
+}
+
+int perf_data__open_dir(struct perf_data *data)
+{
+ struct perf_data_file *files = NULL;
+ struct dirent *dent;
+ int ret = -1;
+ DIR *dir;
+ int nr = 0;
+
+ dir = opendir(data->path);
+ if (!dir)
+ return -EINVAL;
+
+ while ((dent = readdir(dir)) != NULL) {
+ struct perf_data_file *file;
+ char path[PATH_MAX];
+ struct stat st;
+
+ snprintf(path, sizeof(path), "%s/%s", data->path, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data", 4))
+ continue;
+
+ ret = -ENOMEM;
+
+ file = realloc(files, (nr + 1) * sizeof(*files));
+ if (!file)
+ goto out_err;
+
+ files = file;
+ file = &files[nr++];
+
+ file->path = strdup(path);
+ if (!file->path)
+ goto out_err;
+
+ ret = open(file->path, O_RDONLY);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ file->size = st.st_size;
+ }
+
+ if (!files)
+ return -EINVAL;
+
+ data->dir.files = files;
+ data->dir.nr = nr;
+ return 0;
+
+out_err:
+ close_dir(files, nr);
+ return ret;
+}
+
static bool check_pipe(struct perf_data *data)
{
struct stat st;
@@ -19,11 +125,11 @@ static bool check_pipe(struct perf_data *data)
int fd = perf_data__is_read(data) ?
STDIN_FILENO : STDOUT_FILENO;
- if (!data->file.path) {
+ if (!data->path) {
if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
is_pipe = true;
} else {
- if (!strcmp(data->file.path, "-"))
+ if (!strcmp(data->path, "-"))
is_pipe = true;
}
@@ -37,13 +143,31 @@ static int check_backup(struct perf_data *data)
{
struct stat st;
- if (!stat(data->file.path, &st) && st.st_size) {
- /* TODO check errors properly */
+ if (perf_data__is_read(data))
+ return 0;
+
+ if (!stat(data->path, &st) && st.st_size) {
char oldname[PATH_MAX];
+ int ret;
+
snprintf(oldname, sizeof(oldname), "%s.old",
- data->file.path);
- unlink(oldname);
- rename(data->file.path, oldname);
+ data->path);
+
+ ret = rm_rf_perf_data(oldname);
+ if (ret) {
+ pr_err("Can't remove old data: %s (%s)\n",
+ ret == -2 ?
+ "Unknown file found" : strerror(errno),
+ oldname);
+ return -1;
+ }
+
+ if (rename(data->path, oldname)) {
+ pr_err("Can't move data: %s (%s to %s)\n",
+ strerror(errno),
+ data->path, oldname);
+ return -1;
+ }
}
return 0;
@@ -82,7 +206,7 @@ static int open_file_read(struct perf_data *data)
goto out_close;
}
- data->size = st.st_size;
+ data->file.size = st.st_size;
return fd;
out_close:
@@ -95,9 +219,6 @@ static int open_file_write(struct perf_data *data)
int fd;
char sbuf[STRERR_BUFSIZE];
- if (check_backup(data))
- return -1;
-
fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
S_IRUSR|S_IWUSR);
@@ -115,8 +236,22 @@ static int open_file(struct perf_data *data)
fd = perf_data__is_read(data) ?
open_file_read(data) : open_file_write(data);
+ if (fd < 0) {
+ free(data->file.path);
+ return -1;
+ }
+
data->file.fd = fd;
- return fd < 0 ? -1 : 0;
+ return 0;
+}
+
+static int open_file_dup(struct perf_data *data)
+{
+ data->file.path = strdup(data->path);
+ if (!data->file.path)
+ return -ENOMEM;
+
+ return open_file(data);
}
int perf_data__open(struct perf_data *data)
@@ -124,14 +259,18 @@ int perf_data__open(struct perf_data *data)
if (check_pipe(data))
return 0;
- if (!data->file.path)
- data->file.path = "perf.data";
+ if (!data->path)
+ data->path = "perf.data";
- return open_file(data);
+ if (check_backup(data))
+ return -1;
+
+ return open_file_dup(data);
}
void perf_data__close(struct perf_data *data)
{
+ free(data->file.path);
close(data->file.fd);
}
@@ -159,15 +298,15 @@ int perf_data__switch(struct perf_data *data,
if (perf_data__is_read(data))
return -EINVAL;
- if (asprintf(&new_filepath, "%s.%s", data->file.path, postfix) < 0)
+ if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
- if (rename(data->file.path, new_filepath))
- pr_warning("Failed to rename %s to %s\n", data->file.path, new_filepath);
+ if (rename(data->path, new_filepath))
+ pr_warning("Failed to rename %s to %s\n", data->path, new_filepath);
if (!at_exit) {
close(data->file.fd);
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 4828f7feea89..14b47be2bd69 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -10,16 +10,22 @@ enum perf_data_mode {
};
struct perf_data_file {
- const char *path;
+ char *path;
int fd;
+ unsigned long size;
};
struct perf_data {
+ const char *path;
struct perf_data_file file;
bool is_pipe;
bool force;
- unsigned long size;
enum perf_data_mode mode;
+
+ struct {
+ struct perf_data_file *files;
+ int nr;
+ } dir;
};
static inline bool perf_data__is_read(struct perf_data *data)
@@ -44,7 +50,7 @@ static inline int perf_data__fd(struct perf_data *data)
static inline unsigned long perf_data__size(struct perf_data *data)
{
- return data->size;
+ return data->file.size;
}
int perf_data__open(struct perf_data *data);
@@ -63,4 +69,8 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit);
+
+int perf_data__create_dir(struct perf_data *data, int nr);
+int perf_data__open_dir(struct perf_data *data);
+void perf_data__close_dir(struct perf_data *data);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 69fbb0a72d0c..de9b4769d06c 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -20,6 +20,7 @@
#include "thread.h"
#include "comm.h"
#include "symbol.h"
+#include "map.h"
#include "event.h"
#include "util.h"
#include "thread-stack.h"
diff --git a/tools/perf/util/drv_configs.c b/tools/perf/util/drv_configs.c
deleted file mode 100644
index eec754243f4d..000000000000
--- a/tools/perf/util/drv_configs.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-#include "pmu.h"
-#include <errno.h>
-
-static int
-perf_evsel__apply_drv_configs(struct perf_evsel *evsel,
- struct perf_evsel_config_term **err_term)
-{
- bool found = false;
- int err = 0;
- struct perf_evsel_config_term *term;
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmu__scan(pmu)) != NULL)
- if (pmu->type == evsel->attr.type) {
- found = true;
- break;
- }
-
- list_for_each_entry(term, &evsel->config_terms, list) {
- if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
- continue;
-
- /*
- * We have a configuration term, report an error if we
- * can't find the PMU or if the PMU driver doesn't support
- * cmd line driver configuration.
- */
- if (!found || !pmu->set_drv_config) {
- err = -EINVAL;
- *err_term = term;
- break;
- }
-
- err = pmu->set_drv_config(term);
- if (err) {
- *err_term = term;
- break;
- }
- }
-
- return err;
-}
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **err_term)
-{
- struct perf_evsel *evsel;
- int err = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- err = perf_evsel__apply_drv_configs(evsel, err_term);
- if (err) {
- *err_evsel = evsel;
- break;
- }
- }
-
- return err;
-}
diff --git a/tools/perf/util/drv_configs.h b/tools/perf/util/drv_configs.h
deleted file mode 100644
index 32bc9babc2e0..000000000000
--- a/tools/perf/util/drv_configs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef __PERF_DRV_CONFIGS_H
-#define __PERF_DRV_CONFIGS_H
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **term);
-#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 62c8cf622607..ba58ba603b69 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -8,8 +8,11 @@
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include "compress.h"
+#include "namespaces.h"
#include "path.h"
+#include "map.h"
#include "symbol.h"
#include "srcline.h"
#include "dso.h"
@@ -1195,10 +1198,10 @@ struct dso *dso__new(const char *name)
strcpy(dso->name, name);
dso__set_long_name(dso, dso->name, false);
dso__set_short_name(dso, dso->name, false);
- dso->symbols = dso->symbol_names = RB_ROOT;
+ dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
dso->data.cache = RB_ROOT;
- dso->inlined_nodes = RB_ROOT;
- dso->srclines = RB_ROOT;
+ dso->inlined_nodes = RB_ROOT_CACHED;
+ dso->srclines = RB_ROOT_CACHED;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1467,7 +1470,7 @@ size_t dso__fprintf(struct dso *dso, FILE *fp)
ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
- for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 8c8a7abe809d..bb417c54c25a 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -7,13 +7,14 @@
#include <linux/rbtree.h>
#include <sys/types.h>
#include <stdbool.h>
+#include <stdio.h>
#include "rwsem.h"
-#include <linux/types.h>
#include <linux/bitops.h>
-#include "map.h"
-#include "namespaces.h"
#include "build-id.h"
+struct machine;
+struct map;
+
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@@ -140,10 +141,10 @@ struct dso {
struct list_head node;
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root *root; /* root of rbtree that rb_node is in */
- struct rb_root symbols;
- struct rb_root symbol_names;
- struct rb_root inlined_nodes;
- struct rb_root srclines;
+ struct rb_root_cached symbols;
+ struct rb_root_cached symbol_names;
+ struct rb_root_cached inlined_nodes;
+ struct rb_root_cached srclines;
struct {
u64 addr;
struct symbol *symbol;
@@ -235,7 +236,7 @@ bool dso__loaded(const struct dso *dso);
static inline bool dso__has_symbols(const struct dso *dso)
{
- return !RB_EMPTY_ROOT(&dso->symbols);
+ return !RB_EMPTY_ROOT(&dso->symbols.rb_root);
}
bool dso__sorted_by_name(const struct dso *dso);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 937a5a4f71cc..ba7be74fad6e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -21,9 +21,13 @@
#include "thread.h"
#include "thread_map.h"
#include "sane_ctype.h"
+#include "map.h"
+#include "symbol.h"
#include "symbol/kallsyms.h"
#include "asm/bug.h"
#include "stat.h"
+#include "session.h"
+#include "bpf-event.h"
#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
@@ -45,6 +49,8 @@ static const char *perf_event__names[] = {
[PERF_RECORD_SWITCH] = "SWITCH",
[PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
[PERF_RECORD_NAMESPACES] = "NAMESPACES",
+ [PERF_RECORD_KSYMBOL] = "KSYMBOL",
+ [PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
@@ -1329,6 +1335,22 @@ int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
return machine__process_switch_event(machine, event);
}
+int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_ksymbol(machine, event, sample);
+}
+
+int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_bpf_event(machine, event, sample);
+}
+
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
@@ -1461,6 +1483,21 @@ static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
}
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " ksymbol event with addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
+ event->ksymbol_event.addr, event->ksymbol_event.len,
+ event->ksymbol_event.ksym_type,
+ event->ksymbol_event.flags, event->ksymbol_event.name);
+}
+
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " bpf event with type %u, flags %u, id %u\n",
+ event->bpf_event.type, event->bpf_event.flags,
+ event->bpf_event.id);
+}
+
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -1496,6 +1533,12 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_LOST:
ret += perf_event__fprintf_lost(event, fp);
break;
+ case PERF_RECORD_KSYMBOL:
+ ret += perf_event__fprintf_ksymbol(event, fp);
+ break;
+ case PERF_RECORD_BPF_EVENT:
+ ret += perf_event__fprintf_bpf_event(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index eb95f3384958..36ae7e92dab1 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -5,6 +5,7 @@
#include <limits.h>
#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/bpf.h>
#include "../perf.h"
#include "build-id.h"
@@ -84,6 +85,29 @@ struct throttle_event {
u64 stream_id;
};
+#ifndef KSYM_NAME_LEN
+#define KSYM_NAME_LEN 256
+#endif
+
+struct ksymbol_event {
+ struct perf_event_header header;
+ u64 addr;
+ u32 len;
+ u16 ksym_type;
+ u16 flags;
+ char name[KSYM_NAME_LEN];
+};
+
+struct bpf_event {
+ struct perf_event_header header;
+ u16 type;
+ u16 flags;
+ u32 id;
+
+ /* for bpf_prog types */
+ u8 tag[BPF_TAG_SIZE]; // prog tag
+};
+
#define PERF_SAMPLE_MASK \
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
@@ -137,26 +161,7 @@ struct ip_callchain {
u64 ips[0];
};
-struct branch_flags {
- u64 mispred:1;
- u64 predicted:1;
- u64 in_tx:1;
- u64 abort:1;
- u64 cycles:16;
- u64 type:4;
- u64 reserved:40;
-};
-
-struct branch_entry {
- u64 from;
- u64 to;
- struct branch_flags flags;
-};
-
-struct branch_stack {
- u64 nr;
- struct branch_entry entries[0];
-};
+struct branch_stack;
enum {
PERF_IP_FLAG_BRANCH = 1ULL << 0,
@@ -527,8 +532,9 @@ struct auxtrace_error_event {
u32 cpu;
u32 pid;
u32 tid;
- u32 reserved__; /* For alignment */
+ u32 fmt;
u64 ip;
+ u64 time;
char msg[MAX_AUXTRACE_ERROR_MSG];
};
@@ -651,6 +657,8 @@ union perf_event {
struct stat_round_event stat_round;
struct time_conv_event time_conv;
struct feature_event feat;
+ struct ksymbol_event ksymbol_event;
+ struct bpf_event bpf_event;
};
void perf_event__print_totals(void);
@@ -748,6 +756,14 @@ int perf_event__process_exit(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+int perf_event__process_ksymbol(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_bpf_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
int perf_tool__process_synth_event(struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
@@ -811,6 +827,8 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
int kallsyms__get_function_start(const char *kallsyms_filename,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8c902276d4b4..08cedb643ea6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1022,7 +1022,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks)
+ bool auxtrace_overwrite, int nr_cblocks, int affinity)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1032,7 +1032,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1064,7 +1064,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 868294491194..744906dd4887 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -49,6 +49,9 @@ struct perf_evlist {
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
+ void (*trace_event_sample_raw)(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time;
};
@@ -162,7 +165,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks);
+ bool auxtrace_overwrite, int nr_cblocks, int affinity);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
@@ -314,5 +317,4 @@ void perf_evlist__force_leader(struct perf_evlist *evlist);
struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
struct perf_evsel *evsel);
-
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index dbc0466db368..dfe2958e6287 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -956,6 +956,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_freq = 0;
attr->sample_period = 0;
attr->write_backward = 0;
+
+ /*
+ * We don't get sample for slave events, we make them
+ * when delivering group leader sample. Set the slave
+ * event to follow the master sample_type to ease up
+ * report.
+ */
+ attr->sample_type = leader->attr.sample_type;
}
if (opts->no_samples)
@@ -1035,6 +1043,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
+ attr->ksymbol = track && !perf_missing_features.ksymbol;
+ attr->bpf_event = track && opts->bpf_event &&
+ !perf_missing_features.bpf_event;
if (opts->record_namespaces)
attr->namespaces = track;
@@ -1652,6 +1663,8 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
PRINT_ATTRf(namespaces, p_unsigned);
+ PRINT_ATTRf(ksymbol, p_unsigned);
+ PRINT_ATTRf(bpf_event, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -1811,6 +1824,10 @@ fallback_missing_features:
PERF_SAMPLE_BRANCH_NO_CYCLES);
if (perf_missing_features.group_read && evsel->attr.inherit)
evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
+ if (perf_missing_features.ksymbol)
+ evsel->attr.ksymbol = 0;
+ if (perf_missing_features.bpf_event)
+ evsel->attr.bpf_event = 0;
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
@@ -1930,7 +1947,15 @@ try_fallback:
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
+ if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
+ perf_missing_features.bpf_event = true;
+ pr_debug2("switching off bpf_event\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
+ perf_missing_features.ksymbol = true;
+ pr_debug2("switching off ksymbol\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
perf_missing_features.write_backward = true;
pr_debug2("switching off write_backward\n");
goto out_close;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 82a289ce8b0c..cc578e02e08f 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -8,7 +8,7 @@
#include <linux/perf_event.h>
#include <linux/types.h>
#include "xyarray.h"
-#include "symbol.h"
+#include "symbol_conf.h"
#include "cpumap.h"
#include "counts.h"
@@ -168,6 +168,8 @@ struct perf_missing_features {
bool lbr_flags;
bool write_backward;
bool group_read;
+ bool ksymbol;
+ bool bpf_event;
};
extern struct perf_missing_features perf_missing_features;
diff --git a/tools/perf/util/find-vdso-map.c b/tools/perf/util/find-map.c
index d7823e3508fc..7b2300588ece 100644
--- a/tools/perf/util/find-vdso-map.c
+++ b/tools/perf/util/find-map.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-static int find_vdso_map(void **start, void **end)
+static int find_map(void **start, void **end, const char *name)
{
FILE *maps;
char line[128];
@@ -7,7 +7,7 @@ static int find_vdso_map(void **start, void **end)
maps = fopen("/proc/self/maps", "r");
if (!maps) {
- fprintf(stderr, "vdso: cannot open maps\n");
+ fprintf(stderr, "cannot open maps\n");
return -1;
}
@@ -21,8 +21,7 @@ static int find_vdso_map(void **start, void **end)
if (m < 0)
continue;
- if (!strncmp(&line[m], VDSO__MAP_NAME,
- sizeof(VDSO__MAP_NAME) - 1))
+ if (!strncmp(&line[m], name, strlen(name)))
found = 1;
}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index dec6d218c31c..01b324c275b9 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -39,6 +39,7 @@
#include "tool.h"
#include "time-utils.h"
#include "units.h"
+#include "cputopo.h"
#include "sane_ctype.h"
@@ -526,17 +527,11 @@ static int write_event_desc(struct feat_fd *ff,
static int write_cmdline(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char buf[MAXPATHLEN];
- u32 n;
- int i, ret;
+ char pbuf[MAXPATHLEN], *buf;
+ int i, ret, n;
/* actual path to perf binary */
- ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
- if (ret <= 0)
- return -1;
-
- /* readlink() does not add null termination */
- buf[ret] = '\0';
+ buf = perf_exe(pbuf, MAXPATHLEN);
/* account for binary path */
n = perf_env.nr_cmdline + 1;
@@ -557,160 +552,15 @@ static int write_cmdline(struct feat_fd *ff,
return 0;
}
-#define CORE_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
-#define THRD_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
-
-struct cpu_topo {
- u32 cpu_nr;
- u32 core_sib;
- u32 thread_sib;
- char **core_siblings;
- char **thread_siblings;
-};
-
-static int build_cpu_topo(struct cpu_topo *tp, int cpu)
-{
- FILE *fp;
- char filename[MAXPATHLEN];
- char *buf = NULL, *p;
- size_t len = 0;
- ssize_t sret;
- u32 i = 0;
- int ret = -1;
-
- sprintf(filename, CORE_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto try_threads;
-
- sret = getline(&buf, &len, fp);
- fclose(fp);
- if (sret <= 0)
- goto try_threads;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->core_sib; i++) {
- if (!strcmp(buf, tp->core_siblings[i]))
- break;
- }
- if (i == tp->core_sib) {
- tp->core_siblings[i] = buf;
- tp->core_sib++;
- buf = NULL;
- len = 0;
- }
- ret = 0;
-
-try_threads:
- sprintf(filename, THRD_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->thread_sib; i++) {
- if (!strcmp(buf, tp->thread_siblings[i]))
- break;
- }
- if (i == tp->thread_sib) {
- tp->thread_siblings[i] = buf;
- tp->thread_sib++;
- buf = NULL;
- }
- ret = 0;
-done:
- if(fp)
- fclose(fp);
- free(buf);
- return ret;
-}
-
-static void free_cpu_topo(struct cpu_topo *tp)
-{
- u32 i;
-
- if (!tp)
- return;
-
- for (i = 0 ; i < tp->core_sib; i++)
- zfree(&tp->core_siblings[i]);
-
- for (i = 0 ; i < tp->thread_sib; i++)
- zfree(&tp->thread_siblings[i]);
-
- free(tp);
-}
-
-static struct cpu_topo *build_cpu_topology(void)
-{
- struct cpu_topo *tp = NULL;
- void *addr;
- u32 nr, i;
- size_t sz;
- long ncpus;
- int ret = -1;
- struct cpu_map *map;
-
- ncpus = cpu__max_present_cpu();
-
- /* build online CPU map */
- map = cpu_map__new(NULL);
- if (map == NULL) {
- pr_debug("failed to get system cpumap\n");
- return NULL;
- }
-
- nr = (u32)(ncpus & UINT_MAX);
-
- sz = nr * sizeof(char *);
- addr = calloc(1, sizeof(*tp) + 2 * sz);
- if (!addr)
- goto out_free;
-
- tp = addr;
- tp->cpu_nr = nr;
- addr += sizeof(*tp);
- tp->core_siblings = addr;
- addr += sz;
- tp->thread_siblings = addr;
-
- for (i = 0; i < nr; i++) {
- if (!cpu_map__has(map, i))
- continue;
-
- ret = build_cpu_topo(tp, i);
- if (ret < 0)
- break;
- }
-
-out_free:
- cpu_map__put(map);
- if (ret) {
- free_cpu_topo(tp);
- tp = NULL;
- }
- return tp;
-}
static int write_cpu_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- struct cpu_topo *tp;
+ struct cpu_topology *tp;
u32 i;
int ret, j;
- tp = build_cpu_topology();
+ tp = cpu_topology__new();
if (!tp)
return -1;
@@ -748,7 +598,7 @@ static int write_cpu_topology(struct feat_fd *ff,
return ret;
}
done:
- free_cpu_topo(tp);
+ cpu_topology__delete(tp);
return ret;
}
@@ -783,112 +633,45 @@ static int write_total_mem(struct feat_fd *ff,
return ret;
}
-static int write_topo_node(struct feat_fd *ff, int node)
-{
- char str[MAXPATHLEN];
- char field[32];
- char *buf = NULL, *p;
- size_t len = 0;
- FILE *fp;
- u64 mem_total, mem_free, mem;
- int ret = -1;
-
- sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
- fp = fopen(str, "r");
- if (!fp)
- return -1;
-
- while (getline(&buf, &len, fp) > 0) {
- /* skip over invalid lines */
- if (!strchr(buf, ':'))
- continue;
- if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
- goto done;
- if (!strcmp(field, "MemTotal:"))
- mem_total = mem;
- if (!strcmp(field, "MemFree:"))
- mem_free = mem;
- }
-
- fclose(fp);
- fp = NULL;
-
- ret = do_write(ff, &mem_total, sizeof(u64));
- if (ret)
- goto done;
-
- ret = do_write(ff, &mem_free, sizeof(u64));
- if (ret)
- goto done;
-
- ret = -1;
- sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
-
- fp = fopen(str, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- ret = do_write_string(ff, buf);
-done:
- free(buf);
- if (fp)
- fclose(fp);
- return ret;
-}
-
static int write_numa_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char *buf = NULL;
- size_t len = 0;
- FILE *fp;
- struct cpu_map *node_map = NULL;
- char *c;
- u32 nr, i, j;
+ struct numa_topology *tp;
int ret = -1;
+ u32 i;
- fp = fopen("/sys/devices/system/node/online", "r");
- if (!fp)
- return -1;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
+ tp = numa_topology__new();
+ if (!tp)
+ return -ENOMEM;
- c = strchr(buf, '\n');
- if (c)
- *c = '\0';
+ ret = do_write(ff, &tp->nr, sizeof(u32));
+ if (ret < 0)
+ goto err;
- node_map = cpu_map__new(buf);
- if (!node_map)
- goto done;
+ for (i = 0; i < tp->nr; i++) {
+ struct numa_topology_node *n = &tp->nodes[i];
- nr = (u32)node_map->nr;
+ ret = do_write(ff, &n->node, sizeof(u32));
+ if (ret < 0)
+ goto err;
- ret = do_write(ff, &nr, sizeof(nr));
- if (ret < 0)
- goto done;
+ ret = do_write(ff, &n->mem_total, sizeof(u64));
+ if (ret)
+ goto err;
- for (i = 0; i < nr; i++) {
- j = (u32)node_map->map[i];
- ret = do_write(ff, &j, sizeof(j));
- if (ret < 0)
- break;
+ ret = do_write(ff, &n->mem_free, sizeof(u64));
+ if (ret)
+ goto err;
- ret = write_topo_node(ff, i);
+ ret = do_write_string(ff, n->cpus);
if (ret < 0)
- break;
+ goto err;
}
-done:
- free(buf);
- fclose(fp);
- cpu_map__put(node_map);
+
+ ret = 0;
+
+err:
+ numa_topology__delete(tp);
return ret;
}
@@ -1042,11 +825,9 @@ static int write_cpuid(struct feat_fd *ff,
int ret;
ret = get_cpuid(buffer, sizeof(buffer));
- if (!ret)
- goto write_it;
+ if (ret)
+ return -1;
- return -1;
-write_it:
return do_write_string(ff, buffer);
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 8aad8330e392..669f961316f0 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "callchain.h"
#include "util.h"
#include "build-id.h"
#include "hist.h"
@@ -11,6 +12,7 @@
#include "evsel.h"
#include "annotate.h"
#include "srcline.h"
+#include "symbol.h"
#include "thread.h"
#include "ui/progress.h"
#include <errno.h>
@@ -209,7 +211,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
int row = 0;
@@ -296,7 +298,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
if (!he->leaf) {
struct hist_entry *child;
- struct rb_node *node = rb_first(&he->hroot_out);
+ struct rb_node *node = rb_first_cached(&he->hroot_out);
while (node) {
child = rb_entry(node, struct hist_entry, rb_node);
node = rb_next(node);
@@ -311,8 +313,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
if (he->parent_he) {
root_in = &he->parent_he->hroot_in;
@@ -325,8 +327,8 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
root_out = &hists->entries;
}
- rb_erase(&he->rb_node_in, root_in);
- rb_erase(&he->rb_node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
+ rb_erase_cached(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
@@ -337,7 +339,7 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -353,7 +355,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
void hists__delete_entries(struct hists *hists)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -435,8 +437,8 @@ static int hist_entry__init(struct hist_entry *he,
}
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
- he->hroot_in = RB_ROOT;
- he->hroot_out = RB_ROOT;
+ he->hroot_in = RB_ROOT_CACHED;
+ he->hroot_out = RB_ROOT_CACHED;
if (!symbol_conf.report_hierarchy)
he->leaf = true;
@@ -513,8 +515,9 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
int64_t cmp;
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
+ bool leftmost = true;
- p = &hists->entries_in->rb_node;
+ p = &hists->entries_in->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -557,8 +560,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(entry, sample_self);
@@ -570,7 +575,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, hists->entries_in);
+ rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
out:
if (sample_self)
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
@@ -1279,16 +1284,17 @@ static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
}
static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he,
struct hist_entry *parent_he,
struct perf_hpp_list *hpp_list)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter, *new;
struct perf_hpp_fmt *fmt;
int64_t cmp;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1308,8 +1314,10 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
new = hist_entry__new(he, true);
@@ -1343,12 +1351,12 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
}
rb_link_node(&new->rb_node_in, parent, p);
- rb_insert_color(&new->rb_node_in, root);
+ rb_insert_color_cached(&new->rb_node_in, root, leftmost);
return new;
}
static int hists__hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
struct perf_hpp_list_node *node;
@@ -1395,13 +1403,14 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
}
static int hists__collapse_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
+ bool leftmost = true;
if (symbol_conf.report_hierarchy)
return hists__hierarchy_insert_entry(hists, root, he);
@@ -1432,19 +1441,21 @@ static int hists__collapse_insert_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
return 1;
}
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
pthread_mutex_lock(&hists->lock);
@@ -1467,7 +1478,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
int ret;
@@ -1479,7 +1490,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
root = hists__get_rotate_entries_in(hists);
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next) {
if (session_done())
@@ -1487,7 +1498,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- rb_erase(&n->rb_node_in, root);
+ rb_erase_cached(&n->rb_node_in, root);
ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
if (ret < 0)
return -1;
@@ -1558,7 +1569,7 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
struct rb_node *node;
struct hist_entry *he;
- node = rb_first(&hists->entries);
+ node = rb_first_cached(&hists->entries);
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
@@ -1578,13 +1589,14 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
}
}
-static void hierarchy_insert_output_entry(struct rb_root *root,
+static void hierarchy_insert_output_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1592,12 +1604,14 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
if (hist_entry__sort(he, iter) > 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
/* update column width of dynamic entry */
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
@@ -1608,16 +1622,16 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
static void hists__hierarchy_output_resort(struct hists *hists,
struct ui_progress *prog,
- struct rb_root *root_in,
- struct rb_root *root_out,
+ struct rb_root_cached *root_in,
+ struct rb_root_cached *root_out,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node *node;
struct hist_entry *he;
- *root_out = RB_ROOT;
- node = rb_first(root_in);
+ *root_out = RB_ROOT_CACHED;
+ node = rb_first_cached(root_in);
while (node) {
he = rb_entry(node, struct hist_entry, rb_node_in);
@@ -1660,15 +1674,16 @@ static void hists__hierarchy_output_resort(struct hists *hists,
}
}
-static void __hists__insert_output_entry(struct rb_root *entries,
+static void __hists__insert_output_entry(struct rb_root_cached *entries,
struct hist_entry *he,
u64 min_callchain_hits,
bool use_callchain)
{
- struct rb_node **p = &entries->rb_node;
+ struct rb_node **p = &entries->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
if (use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL) {
@@ -1689,12 +1704,14 @@ static void __hists__insert_output_entry(struct rb_root *entries,
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, entries);
+ rb_insert_color_cached(&he->rb_node, entries, leftmost);
perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
@@ -1704,9 +1721,10 @@ static void __hists__insert_output_entry(struct rb_root *entries,
}
static void output_resort(struct hists *hists, struct ui_progress *prog,
- bool use_callchain, hists__resort_cb_t cb)
+ bool use_callchain, hists__resort_cb_t cb,
+ void *cb_arg)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
u64 callchain_total;
@@ -1736,14 +1754,14 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
else
root = hists->entries_in;
- next = rb_first(root);
- hists->entries = RB_ROOT;
+ next = rb_first_cached(root);
+ hists->entries = RB_ROOT_CACHED;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- if (cb && cb(n))
+ if (cb && cb(n, cb_arg))
continue;
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
@@ -1757,7 +1775,8 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
}
}
-void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg)
{
bool use_callchain;
@@ -1768,18 +1787,23 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
use_callchain |= symbol_conf.show_branchflag_count;
- output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
+ output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
+}
+
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+{
+ return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
}
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
- output_resort(hists, prog, symbol_conf.use_callchain, NULL);
+ output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
}
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
hists__resort_cb_t cb)
{
- output_resort(hists, prog, symbol_conf.use_callchain, cb);
+ output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
}
static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
@@ -1798,7 +1822,7 @@ struct rb_node *rb_hierarchy_last(struct rb_node *node)
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
while (can_goto_child(he, HMD_NORMAL)) {
- node = rb_last(&he->hroot_out);
+ node = rb_last(&he->hroot_out.rb_root);
he = rb_entry(node, struct hist_entry, rb_node);
}
return node;
@@ -1809,7 +1833,7 @@ struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_di
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
if (can_goto_child(he, hmd))
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
else
node = rb_next(node);
@@ -1847,7 +1871,7 @@ bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
if (he->leaf)
return false;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
child = rb_entry(node, struct hist_entry, rb_node);
while (node && child->filtered) {
@@ -1965,7 +1989,7 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (filter(hists, h))
@@ -1975,13 +1999,15 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
}
}
-static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
+static void resort_filtered_entry(struct rb_root_cached *root,
+ struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
struct rb_node *nd;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1989,22 +2015,24 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
if (he->leaf || he->filtered)
return;
- nd = rb_first(&he->hroot_out);
+ nd = rb_first_cached(&he->hroot_out);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &he->hroot_out);
+ rb_erase_cached(&h->rb_node, &he->hroot_out);
resort_filtered_entry(&new_root, h);
}
@@ -2015,14 +2043,14 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
int ret;
@@ -2066,12 +2094,12 @@ static void hists__filter_hierarchy(struct hists *hists, int type, const void *a
* resort output after applying a new filter since filter in a lower
* hierarchy can change periods in a upper hierarchy.
*/
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &hists->entries);
+ rb_erase_cached(&h->rb_node, &hists->entries);
resort_filtered_entry(&new_root, h);
}
@@ -2140,18 +2168,19 @@ void hists__inc_nr_samples(struct hists *hists, bool filtered)
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
+ bool leftmost = true;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -2164,8 +2193,10 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
@@ -2175,7 +2206,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (symbol_conf.cumulate_callchain)
memset(he->stat_acc, 0, sizeof(he->stat));
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
hists__inc_stats(hists, he);
he->dummy = true;
}
@@ -2184,15 +2215,16 @@ out:
}
static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *pair)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
int64_t cmp = 0;
@@ -2209,14 +2241,16 @@ static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
if (he) {
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
he->dummy = true;
he->hists = hists;
@@ -2233,9 +2267,9 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
struct rb_node *n;
if (hists__has(hists, need_collapse))
- n = hists->entries_collapsed.rb_node;
+ n = hists->entries_collapsed.rb_root.rb_node;
else
- n = hists->entries_in->rb_node;
+ n = hists->entries_in->rb_root.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
@@ -2252,10 +2286,10 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
return NULL;
}
-static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
+static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node *n = root->rb_node;
+ struct rb_node *n = root->rb_root.rb_node;
while (n) {
struct hist_entry *iter;
@@ -2280,13 +2314,13 @@ static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
return NULL;
}
-static void hists__match_hierarchy(struct rb_root *leader_root,
- struct rb_root *other_root)
+static void hists__match_hierarchy(struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *pair;
- for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_hierarchy_entry(other_root, pos);
@@ -2302,7 +2336,7 @@ static void hists__match_hierarchy(struct rb_root *leader_root,
*/
void hists__match(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2317,7 +2351,7 @@ void hists__match(struct hists *leader, struct hists *other)
else
root = leader->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_entry(other, pos);
@@ -2328,13 +2362,13 @@ void hists__match(struct hists *leader, struct hists *other)
static int hists__link_hierarchy(struct hists *leader_hists,
struct hist_entry *parent,
- struct rb_root *leader_root,
- struct rb_root *other_root)
+ struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *leader;
- for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(pos)) {
@@ -2377,7 +2411,7 @@ static int hists__link_hierarchy(struct hists *leader_hists,
*/
int hists__link(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2393,7 +2427,7 @@ int hists__link(struct hists *leader, struct hists *other)
else
root = other->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (!hist_entry__has_pairs(pos)) {
@@ -2566,10 +2600,10 @@ int perf_hist_config(const char *var, const char *value)
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
- hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
hists->entries_in = &hists->entries_in_array[0];
- hists->entries_collapsed = RB_ROOT;
- hists->entries = RB_ROOT;
+ hists->entries_collapsed = RB_ROOT_CACHED;
+ hists->entries = RB_ROOT_CACHED;
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
@@ -2577,14 +2611,14 @@ int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
return 0;
}
-static void hists__delete_remaining_entries(struct rb_root *root)
+static void hists__delete_remaining_entries(struct rb_root_cached *root)
{
struct rb_node *node;
struct hist_entry *he;
- while (!RB_EMPTY_ROOT(root)) {
- node = rb_first(root);
- rb_erase(node, root);
+ while (!RB_EMPTY_ROOT(&root->rb_root)) {
+ node = rb_first_cached(root);
+ rb_erase_cached(node, root);
he = rb_entry(node, struct hist_entry, rb_node_in);
hist_entry__delete(he);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 664b5eda8d51..4af27fbab24f 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -2,9 +2,9 @@
#ifndef __PERF_HIST_H
#define __PERF_HIST_H
+#include <linux/rbtree.h>
#include <linux/types.h>
#include <pthread.h>
-#include "callchain.h"
#include "evsel.h"
#include "header.h"
#include "color.h"
@@ -13,6 +13,9 @@
struct hist_entry;
struct hist_entry_ops;
struct addr_location;
+struct map_symbol;
+struct mem_info;
+struct branch_info;
struct symbol;
enum hist_filter {
@@ -70,10 +73,10 @@ struct thread;
struct dso;
struct hists {
- struct rb_root entries_in_array[2];
- struct rb_root *entries_in;
- struct rb_root entries;
- struct rb_root entries_collapsed;
+ struct rb_root_cached entries_in_array[2];
+ struct rb_root_cached *entries_in;
+ struct rb_root_cached entries;
+ struct rb_root_cached entries_collapsed;
u64 nr_entries;
u64 nr_non_filtered_entries;
u64 callchain_period;
@@ -160,8 +163,10 @@ int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_fmt *fmt, int printed);
void hist_entry__delete(struct hist_entry *he);
-typedef int (*hists__resort_cb_t)(struct hist_entry *he);
+typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg);
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg);
void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
@@ -230,7 +235,7 @@ static __pure inline bool hists__has_callchains(struct hists *hists)
int hists__init(void);
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists);
struct perf_hpp {
char *buf;
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ee6ca65f81f4..0c0180c67574 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -27,6 +27,8 @@
#include "evsel.h"
#include "evlist.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "util.h"
#include "thread.h"
@@ -142,7 +144,7 @@ static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
- sample->tid, 0, "Lost trace data");
+ sample->tid, 0, "Lost trace data", sample->time);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
@@ -372,7 +374,7 @@ static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
- "Failed to get instruction");
+ "Failed to get instruction", 0);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 1b704fbea9de..23bf788f84b9 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
inat_tables_script = util/intel-pt-decoder/gen-insn-attr-x86.awk
inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 4503f3ca45ab..6e03db142091 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -26,6 +26,7 @@
#include "../cache.h"
#include "../util.h"
+#include "../auxtrace.h"
#include "intel-pt-insn-decoder.h"
#include "intel-pt-pkt-decoder.h"
@@ -867,7 +868,7 @@ static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
ret = intel_pt_get_packet(decoder->buf, decoder->len,
&decoder->packet);
- if (ret == INTEL_PT_NEED_MORE_BYTES &&
+ if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
ret = intel_pt_get_split_packet(decoder);
if (ret < 0)
@@ -1394,7 +1395,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
- decoder->cbr = 0;
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
@@ -2575,6 +2575,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
}
}
+#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
+
+/**
+ * adj_for_padding - adjust overlap to account for padding.
+ * @buf_b: second buffer
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ *
+ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
+ * accordingly.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts
+ */
+static unsigned char *adj_for_padding(unsigned char *buf_b,
+ unsigned char *buf_a, size_t len_a)
+{
+ unsigned char *p = buf_b - MAX_PADDING;
+ unsigned char *q = buf_a + len_a - MAX_PADDING;
+ int i;
+
+ for (i = MAX_PADDING; i; i--, p++, q++) {
+ if (*p != *q)
+ break;
+ }
+
+ return p;
+}
+
/**
* intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
* using TSC.
@@ -2625,8 +2653,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
/* Same TSC, so buffers are consecutive */
if (!cmp && rem_b >= rem_a) {
+ unsigned char *start;
+
*consecutive = true;
- return buf_b + len_b - (rem_b - rem_a);
+ start = buf_b + len_b - (rem_b - rem_a);
+ return adj_for_padding(start, buf_a, len_a);
}
if (cmp < 0)
return buf_b; /* tsc_a < tsc_b => no overlap */
@@ -2689,7 +2720,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
found = memmem(buf_a, len_a, buf_b, len_a);
if (found) {
*consecutive = true;
- return buf_b + len_a;
+ return adj_for_padding(buf_b + len_a, buf_a, len_a);
}
/* Try again at next PSB in buffer 'a' */
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 2e72373ec6df..3b497bab4324 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1411,7 +1411,7 @@ static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
union perf_event event;
char msg[MAX_AUXTRACE_ERROR_MSG];
@@ -1420,7 +1420,7 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(pt->session, &event, NULL);
if (err)
@@ -1430,6 +1430,18 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
return err;
}
+static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
+ const struct intel_pt_state *state)
+{
+ struct intel_pt *pt = ptq->pt;
+ u64 tm = ptq->timestamp;
+
+ tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
+
+ return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
+ ptq->tid, state->from_ip, tm);
+}
+
static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
{
struct auxtrace_queue *queue;
@@ -1676,10 +1688,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
intel_pt_next_tid(pt, ptq);
}
if (pt->synth_opts.errors) {
- err = intel_pt_synth_error(pt, state->err,
- ptq->cpu, ptq->pid,
- ptq->tid,
- state->from_ip);
+ err = intel_ptq_synth_error(ptq, state);
if (err)
return err;
}
@@ -1804,7 +1813,7 @@ static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0, sample->time);
}
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 85bab8735fa9..5c19ee001299 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -45,7 +45,7 @@ static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
/* For intlist iteration */
static inline struct int_node *intlist__first(struct intlist *ilist)
{
- struct rb_node *rn = rb_first(&ilist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&ilist->rblist.entries);
return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
}
static inline struct int_node *intlist__next(struct int_node *in)
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index bf249552a9b0..eda28d3570bc 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -2,6 +2,7 @@
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <errno.h>
+#include <libgen.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 7b1f06567521..1403dec189b4 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -3,12 +3,13 @@
#define __PERF_KVM_STAT_H
#include "../perf.h"
-#include "evsel.h"
-#include "evlist.h"
-#include "session.h"
#include "tool.h"
#include "stat.h"
+struct perf_evsel;
+struct perf_evlist;
+struct perf_session;
+
struct event_key {
#define INVALID_KEY (~0ULL)
u64 key;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6fcb3bce0442..61959aba7e27 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -10,6 +10,7 @@
#include "hist.h"
#include "machine.h"
#include "map.h"
+#include "symbol.h"
#include "sort.h"
#include "strlist.h"
#include "thread.h"
@@ -21,6 +22,7 @@
#include "unwind.h"
#include "linux/hash.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
#include <symbol/kallsyms.h>
@@ -41,7 +43,7 @@ static void machine__threads_init(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- threads->entries = RB_ROOT;
+ threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
INIT_LIST_HEAD(&threads->dead);
@@ -179,7 +181,7 @@ void machine__delete_threads(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
down_write(&threads->lock);
- nd = rb_first(&threads->entries);
+ nd = rb_first_cached(&threads->entries);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
@@ -222,7 +224,7 @@ void machine__delete(struct machine *machine)
void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
- machines->guests = RB_ROOT;
+ machines->guests = RB_ROOT_CACHED;
}
void machines__exit(struct machines *machines)
@@ -234,9 +236,10 @@ void machines__exit(struct machines *machines)
struct machine *machines__add(struct machines *machines, pid_t pid,
const char *root_dir)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *pos, *machine = malloc(sizeof(*machine));
+ bool leftmost = true;
if (machine == NULL)
return NULL;
@@ -251,12 +254,14 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
pos = rb_entry(parent, struct machine, rb_node);
if (pid < pos->pid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&machine->rb_node, parent, p);
- rb_insert_color(&machine->rb_node, &machines->guests);
+ rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
return machine;
}
@@ -267,7 +272,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
machines->host.comm_exec = comm_exec;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
machine->comm_exec = comm_exec;
@@ -276,7 +281,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
struct machine *machines__find(struct machines *machines, pid_t pid)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *machine;
struct machine *default_machine = NULL;
@@ -339,7 +344,7 @@ void machines__process_guests(struct machines *machines,
{
struct rb_node *nd;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
process(pos, data);
}
@@ -352,7 +357,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
machines->host.id_hdr_size = id_hdr_size;
- for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&machines->guests); node;
+ node = rb_next(node)) {
machine = rb_entry(node, struct machine, rb_node);
machine->id_hdr_size = id_hdr_size;
}
@@ -465,9 +471,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
{
- struct rb_node **p = &threads->entries.rb_node;
+ struct rb_node **p = &threads->entries.rb_root.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
+ bool leftmost = true;
th = threads__get_last_match(threads, machine, pid, tid);
if (th)
@@ -485,8 +492,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (tid < th->tid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
if (!create)
@@ -495,7 +504,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &threads->entries);
+ rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
/*
* We have to initialize map_groups separately
@@ -506,7 +515,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
return NULL;
@@ -681,6 +690,59 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
return 0;
}
+static int machine__process_ksymbol_register(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct symbol *sym;
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (!map) {
+ map = dso__new_map(event->ksymbol_event.name);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = event->ksymbol_event.addr;
+ map->pgoff = map->start;
+ map->end = map->start + event->ksymbol_event.len;
+ map_groups__insert(&machine->kmaps, map);
+ }
+
+ sym = symbol__new(event->ksymbol_event.addr, event->ksymbol_event.len,
+ 0, 0, event->ksymbol_event.name);
+ if (!sym)
+ return -ENOMEM;
+ dso__insert_symbol(map->dso, sym);
+ return 0;
+}
+
+static int machine__process_ksymbol_unregister(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (map)
+ map_groups__remove(&machine->kmaps, map);
+
+ return 0;
+}
+
+int machine__process_ksymbol(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ if (dump_trace)
+ perf_event__fprintf_ksymbol(event, stdout);
+
+ if (event->ksymbol_event.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
+ return machine__process_ksymbol_unregister(machine, event,
+ sample);
+ return machine__process_ksymbol_register(machine, event, sample);
+}
+
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
{
const char *dup_filename;
@@ -744,7 +806,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
struct rb_node *nd;
size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->dsos.head, fp);
}
@@ -764,7 +826,7 @@ size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
struct rb_node *nd;
size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
}
@@ -804,7 +866,8 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
ret = fprintf(fp, "Threads: %u\n", threads->nr);
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
@@ -1107,7 +1170,7 @@ failure:
void machines__destroy_kernel_maps(struct machines *machines)
{
- struct rb_node *next = rb_first(&machines->guests);
+ struct rb_node *next = rb_first_cached(&machines->guests);
machine__destroy_kernel_maps(&machines->host);
@@ -1115,7 +1178,7 @@ void machines__destroy_kernel_maps(struct machines *machines)
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, &machines->guests);
+ rb_erase_cached(&pos->rb_node, &machines->guests);
machine__delete(pos);
}
}
@@ -1680,7 +1743,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
down_write(&threads->lock);
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
--threads->nr;
/*
@@ -1812,6 +1875,10 @@ int machine__process_event(struct machine *machine, union perf_event *event,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
ret = machine__process_switch_event(machine, event); break;
+ case PERF_RECORD_KSYMBOL:
+ ret = machine__process_ksymbol(machine, event, sample); break;
+ case PERF_RECORD_BPF_EVENT:
+ ret = machine__process_bpf_event(machine, event, sample); break;
default:
ret = -1;
break;
@@ -2005,7 +2072,7 @@ static void save_iterations(struct iterations *iter,
{
int i;
- iter->nr_loop_iter = nr;
+ iter->nr_loop_iter++;
iter->cycles = 0;
for (i = 0; i < nr; i++)
@@ -2453,7 +2520,8 @@ int machine__for_each_thread(struct machine *machine,
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
thread = rb_entry(nd, struct thread, rb_node);
rc = fn(thread, priv);
if (rc != 0)
@@ -2480,7 +2548,7 @@ int machines__for_each_thread(struct machines *machines,
if (rc != 0)
return rc;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
rc = machine__for_each_thread(machine, fn, priv);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index a5d1da60f751..f70ab98a7bde 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,7 +4,7 @@
#include <sys/types.h>
#include <linux/rbtree.h>
-#include "map.h"
+#include "map_groups.h"
#include "dso.h"
#include "event.h"
#include "rwsem.h"
@@ -29,11 +29,11 @@ struct vdso_info;
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads {
- struct rb_root entries;
- struct rw_semaphore lock;
- unsigned int nr;
- struct list_head dead;
- struct thread *last_match;
+ struct rb_root_cached entries;
+ struct rw_semaphore lock;
+ unsigned int nr;
+ struct list_head dead;
+ struct thread *last_match;
};
struct machine {
@@ -130,6 +130,9 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
struct perf_sample *sample);
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
+int machine__process_ksymbol(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample);
int machine__process_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
@@ -137,7 +140,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data);
struct machines {
struct machine host;
- struct rb_root guests;
+ struct rb_root_cached guests;
};
void machines__init(struct machines *machines);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6751301a755c..fbeb0c6efaa6 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -286,8 +286,8 @@ void map__put(struct map *map)
void map__fixup_start(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_first(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_first_cached(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->start = sym->start;
@@ -296,8 +296,8 @@ void map__fixup_start(struct map *map)
void map__fixup_end(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_last(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_last(&symbols->rb_root);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->end = sym->end;
@@ -557,6 +557,12 @@ void map_groups__init(struct map_groups *mg, struct machine *machine)
refcount_set(&mg->refcnt, 1);
}
+void map_groups__insert(struct map_groups *mg, struct map *map)
+{
+ maps__insert(&mg->maps, map);
+ map->groups = mg;
+}
+
static void __maps__purge(struct maps *maps)
{
struct rb_root *root = &maps->entries;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 09282aa45c80..0e20749f2c55 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -6,12 +6,10 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
-#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <linux/types.h>
-#include "rwsem.h"
struct dso;
struct ip_callchain;
@@ -48,38 +46,7 @@ struct map {
refcount_t refcnt;
};
-#define KMAP_NAME_LEN 256
-
-struct kmap {
- struct ref_reloc_sym *ref_reloc_sym;
- struct map_groups *kmaps;
- char name[KMAP_NAME_LEN];
-};
-
-struct maps {
- struct rb_root entries;
- struct rb_root names;
- struct rw_semaphore lock;
-};
-
-struct map_groups {
- struct maps maps;
- struct machine *machine;
- refcount_t refcnt;
-};
-
-struct map_groups *map_groups__new(struct machine *machine);
-void map_groups__delete(struct map_groups *mg);
-bool map_groups__empty(struct map_groups *mg);
-
-static inline struct map_groups *map_groups__get(struct map_groups *mg)
-{
- if (mg)
- refcount_inc(&mg->refcnt);
- return mg;
-}
-
-void map_groups__put(struct map_groups *mg);
+struct kmap;
struct kmap *__map__kmap(struct map *map);
struct kmap *map__kmap(struct map *map);
@@ -174,18 +141,7 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
-struct srccode_state {
- char *srcfile;
- unsigned line;
-};
-
-static inline void srccode_state_init(struct srccode_state *state)
-{
- state->srcfile = NULL;
- state->line = 0;
-}
-
-void srccode_state_free(struct srccode_state *state);
+struct srccode_state;
int map__fprintf_srccode(struct map *map, u64 addr,
FILE *fp, struct srccode_state *state);
@@ -198,61 +154,9 @@ void map__fixup_end(struct map *map);
void map__reloc_vmlinux(struct map *map);
-void maps__insert(struct maps *maps, struct map *map);
-void maps__remove(struct maps *maps, struct map *map);
-struct map *maps__find(struct maps *maps, u64 addr);
-struct map *maps__first(struct maps *maps);
-struct map *map__next(struct map *map);
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
- struct map **mapp);
-void map_groups__init(struct map_groups *mg, struct machine *machine);
-void map_groups__exit(struct map_groups *mg);
-int map_groups__clone(struct thread *thread,
- struct map_groups *parent);
-size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
-
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
u64 addr);
-static inline void map_groups__insert(struct map_groups *mg, struct map *map)
-{
- maps__insert(&mg->maps, map);
- map->groups = mg;
-}
-
-static inline void map_groups__remove(struct map_groups *mg, struct map *map)
-{
- maps__remove(&mg->maps, map);
-}
-
-static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
-{
- return maps__find(&mg->maps, addr);
-}
-
-struct map *map_groups__first(struct map_groups *mg);
-
-static inline struct map *map_groups__next(struct map *map)
-{
- return map__next(map);
-}
-
-struct symbol *map_groups__find_symbol(struct map_groups *mg,
- u64 addr, struct map **mapp);
-
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
- const char *name,
- struct map **mapp);
-
-struct addr_map_symbol;
-
-int map_groups__find_ams(struct addr_map_symbol *ams);
-
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
- FILE *fp);
-
-struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
-
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
new file mode 100644
index 000000000000..4dcda33e0fdf
--- /dev/null
+++ b/tools/perf/util/map_groups.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_MAP_GROUPS_H
+#define __PERF_MAP_GROUPS_H
+
+#include <linux/refcount.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include "rwsem.h"
+
+struct ref_reloc_sym;
+struct machine;
+struct map;
+struct thread;
+
+struct maps {
+ struct rb_root entries;
+ struct rb_root names;
+ struct rw_semaphore lock;
+};
+
+void maps__insert(struct maps *maps, struct map *map);
+void maps__remove(struct maps *maps, struct map *map);
+struct map *maps__find(struct maps *maps, u64 addr);
+struct map *maps__first(struct maps *maps);
+struct map *map__next(struct map *map);
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
+
+struct map_groups {
+ struct maps maps;
+ struct machine *machine;
+ refcount_t refcnt;
+};
+
+#define KMAP_NAME_LEN 256
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct map_groups *kmaps;
+ char name[KMAP_NAME_LEN];
+};
+
+struct map_groups *map_groups__new(struct machine *machine);
+void map_groups__delete(struct map_groups *mg);
+bool map_groups__empty(struct map_groups *mg);
+
+static inline struct map_groups *map_groups__get(struct map_groups *mg)
+{
+ if (mg)
+ refcount_inc(&mg->refcnt);
+ return mg;
+}
+
+void map_groups__put(struct map_groups *mg);
+void map_groups__init(struct map_groups *mg, struct machine *machine);
+void map_groups__exit(struct map_groups *mg);
+int map_groups__clone(struct thread *thread, struct map_groups *parent);
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
+
+void map_groups__insert(struct map_groups *mg, struct map *map);
+
+static inline void map_groups__remove(struct map_groups *mg, struct map *map)
+{
+ maps__remove(&mg->maps, map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
+{
+ return maps__find(&mg->maps, addr);
+}
+
+struct map *map_groups__first(struct map_groups *mg);
+
+static inline struct map *map_groups__next(struct map *map)
+{
+ return map__next(map);
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);
+
+struct addr_map_symbol;
+
+int map_groups__find_ams(struct addr_map_symbol *ams);
+
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp);
+
+struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
+
+#endif // __PERF_MAP_GROUPS_H
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
new file mode 100644
index 000000000000..5a1aed9f6bb4
--- /dev/null
+++ b/tools/perf/util/map_symbol.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_MAP_SYMBOL
+#define __PERF_MAP_SYMBOL 1
+
+#include <linux/types.h>
+
+struct map;
+struct symbol;
+
+struct map_symbol {
+ struct map *map;
+ struct symbol *sym;
+};
+
+struct addr_map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ u64 al_addr;
+ u64 phys_addr;
+};
+#endif // __PERF_MAP_SYMBOL
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 93f74d8d3cdd..42c3e5a229d2 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
static char mem_loads_name[100];
static bool mem_loads_name__init;
-char *perf_mem_events__name(int i)
+char * __weak perf_mem_events__name(int i)
{
if (i == PERF_MEM_EVENTS__LOAD) {
if (!mem_loads_name__init) {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index a28f9b5cc4ff..b8d864ed4afe 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -270,7 +270,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
- bool raw)
+ bool raw, bool details)
{
struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
@@ -329,6 +329,12 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
if (asprintf(&s, "%s\n%*s%s]",
pe->metric_name, 8, "[", pe->desc) < 0)
return;
+
+ if (details) {
+ if (asprintf(&s, "%s\n%*s%s]",
+ s, 8, "[", pe->metric_expr) < 0)
+ return;
+ }
}
if (!s)
@@ -352,7 +358,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
else if (metrics && !raw)
printf("\nMetrics:\n\n");
- for (node = rb_first(&groups.entries); node; node = next) {
+ for (node = rb_first_cached(&groups.entries); node; node = next) {
struct mep *me = container_of(node, struct mep, nd);
if (metricgroups)
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 8a155dba0581..5c52097a5c63 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -27,6 +27,7 @@ int metricgroup__parse_groups(const struct option *opt,
const char *str,
struct rblist *metric_events);
-void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+void metricgroup__print(bool metrics, bool groups, char *filter,
+ bool raw, bool details);
bool metricgroup__has_metric(const char *metric);
#endif
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 8fc39311a30d..cdc7740fc181 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -10,6 +10,9 @@
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
+#ifdef HAVE_LIBNUMA_SUPPORT
+#include <numaif.h>
+#endif
#include "debug.h"
#include "event.h"
#include "mmap.h"
@@ -154,9 +157,72 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
+
+#ifdef HAVE_LIBNUMA_SUPPORT
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (map->aio.data[idx] == MAP_FAILED) {
+ map->aio.data[idx] = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ if (map->aio.data[idx]) {
+ munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
+ map->aio.data[idx] = NULL;
+ }
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
+{
+ void *data;
+ size_t mmap_len;
+ unsigned long node_mask;
+
+ if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
+ data = map->aio.data[idx];
+ mmap_len = perf_mmap__mmap_len(map);
+ node_mask = 1UL << cpu__get_node(cpu);
+ if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
+ pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
+ data, data + mmap_len, cpu__get_node(cpu));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+#else
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
+ if (map->aio.data[idx] == NULL)
+ return -1;
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ zfree(&(map->aio.data[idx]));
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
+ int cpu __maybe_unused, int affinity __maybe_unused)
+{
+ return 0;
+}
+#endif
+
static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{
- int delta_max, i, prio;
+ int delta_max, i, prio, ret;
map->aio.nr_cblocks = mp->nr_cblocks;
if (map->aio.nr_cblocks) {
@@ -177,11 +243,14 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
for (i = 0; i < map->aio.nr_cblocks; ++i) {
- map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
- if (!map->aio.data[i]) {
+ ret = perf_mmap__aio_alloc(map, i);
+ if (ret == -1) {
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
+ ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ if (ret == -1)
+ return -1;
/*
* Use cblock.aio_fildes value different from -1
* to denote started aio write operation on the
@@ -210,7 +279,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
- zfree(&map->aio.data[i]);
+ perf_mmap__aio_free(map, i);
if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
@@ -314,6 +383,32 @@ void perf_mmap__munmap(struct perf_mmap *map)
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
+static void build_node_mask(int node, cpu_set_t *mask)
+{
+ int c, cpu, nr_cpus;
+ const struct cpu_map *cpu_map = NULL;
+
+ cpu_map = cpu_map__online();
+ if (!cpu_map)
+ return;
+
+ nr_cpus = cpu_map__nr(cpu_map);
+ for (c = 0; c < nr_cpus; c++) {
+ cpu = cpu_map->map[c]; /* map c index to online cpu index */
+ if (cpu__get_node(cpu) == node)
+ CPU_SET(cpu, mask);
+ }
+}
+
+static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+{
+ CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
+ build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
+ else if (mp->affinity == PERF_AFFINITY_CPU)
+ CPU_SET(map->cpu, &map->affinity_mask);
+}
+
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
@@ -343,6 +438,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
+ perf_mmap__setup_affinity_mask(map, mp);
+
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index aeb6942fdb00..e566c19b242b 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -38,6 +38,7 @@ struct perf_mmap {
int nr_cblocks;
} aio;
#endif
+ cpu_set_t affinity_mask;
};
/*
@@ -69,7 +70,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask, nr_cblocks;
+ int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
};
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 897589507d97..ea523d3b248f 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -391,8 +391,10 @@ void ordered_events__free(struct ordered_events *oe)
* Current buffer might not have all the events allocated
* yet, we need to free only allocated ones ...
*/
- list_del(&oe->buffer->list);
- ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
+ if (oe->buffer) {
+ list_del(&oe->buffer->list);
+ ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
+ }
/* ... and continue with the rest */
list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 920e1e6551dd..4dcc01b2532c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2540,7 +2540,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_sdt_events(NULL, NULL, name_only);
- metricgroup__print(true, true, NULL, name_only);
+ metricgroup__print(true, true, NULL, name_only, details_flag);
}
int parse_events__is_hardcoded_term(struct parse_events_term *term)
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index da8fe57691b8..44819bdb037d 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -311,7 +311,7 @@ value_sym '/' event_config '/'
$$ = list;
}
|
-value_sym sep_slash_dc
+value_sym sep_slash_slash_dc
{
struct list_head *list;
int type = $1 >> 16;
@@ -702,7 +702,7 @@ PE_VALUE PE_ARRAY_RANGE PE_VALUE
sep_dc: ':' |
-sep_slash_dc: '/' | ':' |
+sep_slash_slash_dc: '/' '/' | ':' |
%%
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 11a234740632..51d437f55d18 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -29,8 +29,6 @@ struct perf_pmu_format {
struct list_head list;
};
-#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
-
int perf_pmu_parse(struct list_head *list, char *name);
extern FILE *perf_pmu_in;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 76fecec7b3f9..47253c3daf55 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -6,9 +6,10 @@
#include <linux/compiler.h>
#include <linux/perf_event.h>
#include <stdbool.h>
-#include "evsel.h"
#include "parse-events.h"
+struct perf_evsel_config_term;
+
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
PERF_PMU_FORMAT_VALUE_CONFIG1,
@@ -16,6 +17,7 @@ enum {
};
#define PERF_PMU_FORMAT_BITS 64
+#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
struct perf_event_attr;
@@ -29,7 +31,6 @@ struct perf_pmu {
struct list_head format; /* HEAD struct perf_pmu_format -> list */
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
struct list_head list; /* ELEM */
- int (*set_drv_config) (struct perf_evsel_config_term *term);
};
struct perf_pmu_info {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 18a59fba97ff..0030f9b9bf7e 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -35,11 +35,14 @@
#include "util.h"
#include "event.h"
+#include "namespaces.h"
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
#include "cache.h"
#include "color.h"
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "thread.h"
#include <api/fs/fs.h>
@@ -3528,7 +3531,8 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
/* Show all (filtered) symbols */
setup_pager();
- for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&map->dso->symbol_names); nd;
+ nd = rb_next(nd)) {
struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
if (strfilter__compare(_filter, pos->sym.name))
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 15a98c3a2a2f..05c8d571a901 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -4,8 +4,9 @@
#include <linux/compiler.h>
#include <stdbool.h>
-#include "intlist.h"
-#include "namespaces.h"
+
+struct intlist;
+struct nsinfo;
/* Probe related configurations */
struct probe_conf {
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 0b1195cad0e5..4062bc4412a9 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
+#include "namespaces.h"
#include "util.h"
#include "event.h"
#include "strlist.h"
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
index a920f702a74d..376e86cb4c3c 100644
--- a/tools/perf/util/rb_resort.h
+++ b/tools/perf/util/rb_resort.h
@@ -140,12 +140,12 @@ struct __name##_sorted *__name = __name##_sorted__new
/* For 'struct intlist' */
#define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \
- DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \
+ DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \
__ilist->rblist.nr_entries)
/* For 'struct machine->threads' */
-#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
- DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \
- __machine->threads[hash_bucket].nr)
+#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
+ DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \
+ __machine->threads[hash_bucket].nr)
#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index 0efc3258c648..11e07fab20dc 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -13,8 +13,9 @@
int rblist__add_node(struct rblist *rblist, const void *new_entry)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -24,8 +25,10 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
rc = rblist->node_cmp(parent, new_entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return -EEXIST;
}
@@ -35,7 +38,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
return -ENOMEM;
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node, &rblist->entries, leftmost);
++rblist->nr_entries;
return 0;
@@ -43,7 +46,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
{
- rb_erase(rb_node, &rblist->entries);
+ rb_erase_cached(rb_node, &rblist->entries);
--rblist->nr_entries;
rblist->node_delete(rblist, rb_node);
}
@@ -52,8 +55,9 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
const void *entry,
bool create)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node = NULL;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -63,8 +67,10 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
rc = rblist->node_cmp(parent, entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return parent;
}
@@ -73,7 +79,8 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
new_node = rblist->node_new(rblist, entry);
if (new_node) {
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node,
+ &rblist->entries, leftmost);
++rblist->nr_entries;
}
}
@@ -94,7 +101,7 @@ struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
void rblist__init(struct rblist *rblist)
{
if (rblist != NULL) {
- rblist->entries = RB_ROOT;
+ rblist->entries = RB_ROOT_CACHED;
rblist->nr_entries = 0;
}
@@ -103,7 +110,7 @@ void rblist__init(struct rblist *rblist)
void rblist__exit(struct rblist *rblist)
{
- struct rb_node *pos, *next = rb_first(&rblist->entries);
+ struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
@@ -124,7 +131,8 @@ struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
{
struct rb_node *node;
- for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&rblist->entries); node;
+ node = rb_next(node)) {
if (!idx--)
return node;
}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 76df15c27f5f..14b232a4d0b6 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -20,7 +20,7 @@
*/
struct rblist {
- struct rb_root entries;
+ struct rb_root_cached entries;
unsigned int nr_entries;
int (*node_cmp)(struct rb_node *rbn, const void *entry);
diff --git a/tools/perf/util/s390-cpumcf-kernel.h b/tools/perf/util/s390-cpumcf-kernel.h
new file mode 100644
index 000000000000..d4356030b504
--- /dev/null
+++ b/tools/perf/util/s390-cpumcf-kernel.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for s390 CPU measurement counter set diagnostic facility
+ *
+ * Copyright IBM Corp. 2019
+ Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ * Thomas Richter <tmricht@linux.ibm.com>
+ */
+#ifndef S390_CPUMCF_KERNEL_H
+#define S390_CPUMCF_KERNEL_H
+
+#define S390_CPUMCF_DIAG_DEF 0xfeef /* Counter diagnostic entry ID */
+#define PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */
+
+struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int set:16; /* 16-23 Counter set identifier */
+ unsigned int ctr:16; /* 24-39 Number of stored counters */
+ unsigned int res1:16; /* 40-63 Reserved */
+};
+
+struct cf_trailer_entry { /* CPU-M CF trailer for raw traces (64 byte) */
+ /* 0 - 7 */
+ union {
+ struct {
+ unsigned int clock_base:1; /* TOD clock base */
+ unsigned int speed:1; /* CPU speed */
+ /* Measurement alerts */
+ unsigned int mtda:1; /* Loss of MT ctr. data alert */
+ unsigned int caca:1; /* Counter auth. change alert */
+ unsigned int lcda:1; /* Loss of counter data alert */
+ };
+ unsigned long flags; /* 0-63 All indicators */
+ };
+ /* 8 - 15 */
+ unsigned int cfvn:16; /* 64-79 Ctr First Version */
+ unsigned int csvn:16; /* 80-95 Ctr Second Version */
+ unsigned int cpu_speed:32; /* 96-127 CPU speed */
+ /* 16 - 23 */
+ unsigned long timestamp; /* 128-191 Timestamp (TOD) */
+ /* 24 - 55 */
+ union {
+ struct {
+ unsigned long progusage1;
+ unsigned long progusage2;
+ unsigned long progusage3;
+ unsigned long tod_base;
+ };
+ unsigned long progusage[4];
+ };
+ /* 56 - 63 */
+ unsigned int mach_type:16; /* Machine type */
+ unsigned int res1:16; /* Reserved */
+ unsigned int res2:32; /* Reserved */
+};
+
+#define CPUMF_CTR_SET_BASIC 0 /* Basic Counter Set */
+#define CPUMF_CTR_SET_USER 1 /* Problem-State Counter Set */
+#define CPUMF_CTR_SET_CRYPTO 2 /* Crypto-Activity Counter Set */
+#define CPUMF_CTR_SET_EXT 3 /* Extended Counter Set */
+#define CPUMF_CTR_SET_MT_DIAG 4 /* MT-diagnostic Counter Set */
+#endif
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 68b2570304ec..c215704931dc 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -162,6 +162,7 @@
#include "auxtrace.h"
#include "s390-cpumsf.h"
#include "s390-cpumsf-kernel.h"
+#include "s390-cpumcf-kernel.h"
#include "config.h"
struct s390_cpumsf {
@@ -184,8 +185,58 @@ struct s390_cpumsf_queue {
struct auxtrace_buffer *buffer;
int cpu;
FILE *logfile;
+ FILE *logfile_ctr;
};
+/* Check if the raw data should be dumped to file. If this is the case and
+ * the file to dump to has not been opened for writing, do so.
+ *
+ * Return 0 on success and greater zero on error so processing continues.
+ */
+static int s390_cpumcf_dumpctr(struct s390_cpumsf *sf,
+ struct perf_sample *sample)
+{
+ struct s390_cpumsf_queue *sfq;
+ struct auxtrace_queue *q;
+ int rc = 0;
+
+ if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
+ return rc;
+
+ q = &sf->queues.queue_array[sample->cpu];
+ sfq = q->priv;
+ if (!sfq) /* Queue not yet allocated */
+ return rc;
+
+ if (!sfq->logfile_ctr) {
+ char *name;
+
+ rc = (sf->logdir)
+ ? asprintf(&name, "%s/aux.ctr.%02x",
+ sf->logdir, sample->cpu)
+ : asprintf(&name, "aux.ctr.%02x", sample->cpu);
+ if (rc > 0)
+ sfq->logfile_ctr = fopen(name, "w");
+ if (sfq->logfile_ctr == NULL) {
+ pr_err("Failed to open counter set log file %s, "
+ "continue...\n", name);
+ rc = 1;
+ }
+ free(name);
+ }
+
+ if (sfq->logfile_ctr) {
+ /* See comment above for -4 */
+ size_t n = fwrite(sample->raw_data, sample->raw_size - 4, 1,
+ sfq->logfile_ctr);
+ if (n != 1) {
+ pr_err("Failed to write counter set data\n");
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
/* Display s390 CPU measurement facility basic-sampling data entry */
static bool s390_cpumsf_basic_show(const char *color, size_t pos,
struct hws_basic_entry *basic)
@@ -301,6 +352,11 @@ static bool s390_cpumsf_validate(int machine_type,
*dsdes = 85;
*bsdes = 32;
break;
+ case 2964:
+ case 2965:
+ *dsdes = 112;
+ *bsdes = 32;
+ break;
default:
/* Illegal trailer entry */
return false;
@@ -768,7 +824,7 @@ static int s390_cpumsf_process_queues(struct s390_cpumsf *sf, u64 timestamp)
}
static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
char msg[MAX_AUXTRACE_ERROR_MSG];
union perf_event event;
@@ -776,7 +832,7 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
strncpy(msg, "Lost Auxiliary Trace Buffer", sizeof(msg) - 1);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(sf->session, &event, NULL);
if (err)
@@ -788,11 +844,12 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample)
{
return s390_cpumsf_synth_error(sf, 1, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0,
+ sample->time);
}
static int
-s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
+s390_cpumsf_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
@@ -801,6 +858,8 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
struct s390_cpumsf,
auxtrace);
u64 timestamp = sample->time;
+ struct perf_evsel *ev_bc000;
+
int err = 0;
if (dump_trace)
@@ -811,6 +870,16 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
return -EINVAL;
}
+ if (event->header.type == PERF_RECORD_SAMPLE &&
+ sample->raw_size) {
+ /* Handle event with raw data */
+ ev_bc000 = perf_evlist__event2evsel(session->evlist, event);
+ if (ev_bc000 &&
+ ev_bc000->attr.config == PERF_EVENT_CPUM_CF_DIAG)
+ err = s390_cpumcf_dumpctr(sf, sample);
+ return err;
+ }
+
if (event->header.type == PERF_RECORD_AUX &&
event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
return s390_cpumsf_lost(sf, sample);
@@ -891,9 +960,15 @@ static void s390_cpumsf_free_queues(struct perf_session *session)
struct s390_cpumsf_queue *sfq = (struct s390_cpumsf_queue *)
queues->queue_array[i].priv;
- if (sfq != NULL && sfq->logfile) {
- fclose(sfq->logfile);
- sfq->logfile = NULL;
+ if (sfq != NULL) {
+ if (sfq->logfile) {
+ fclose(sfq->logfile);
+ sfq->logfile = NULL;
+ }
+ if (sfq->logfile_ctr) {
+ fclose(sfq->logfile_ctr);
+ sfq->logfile_ctr = NULL;
+ }
}
zfree(&queues->queue_array[i].priv);
}
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
new file mode 100644
index 000000000000..6650f599ed9c
--- /dev/null
+++ b/tools/perf/util/s390-sample-raw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Architecture specific trace_event function. Save event's bc000 raw data
+ * to file. File name is aux.ctr.## where ## stands for the CPU number the
+ * sample was taken from.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <sys/stat.h>
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+#include "debug.h"
+#include "util.h"
+#include "auxtrace.h"
+#include "session.h"
+#include "evlist.h"
+#include "config.h"
+#include "color.h"
+#include "sample-raw.h"
+#include "s390-cpumcf-kernel.h"
+#include "pmu-events/pmu-events.h"
+
+static size_t ctrset_size(struct cf_ctrset_entry *set)
+{
+ return sizeof(*set) + set->ctr * sizeof(u64);
+}
+
+static bool ctrset_valid(struct cf_ctrset_entry *set)
+{
+ return set->def == S390_CPUMCF_DIAG_DEF;
+}
+
+/* CPU Measurement Counter Facility raw data is a byte stream. It is 8 byte
+ * aligned and might have trailing padding bytes.
+ * Display the raw data on screen.
+ */
+static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
+{
+ size_t len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ struct cf_trailer_entry *te;
+ struct cf_ctrset_entry *cep, ce;
+
+ if (!len)
+ return false;
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce) || offset + ctrset_size(&ce) > len) {
+ /* Raw data for counter sets are always multiple of 8
+ * bytes. Prepending a 4 bytes size field to the
+ * raw data block in the sample causes the perf tool
+ * to append 4 padding bytes to make the raw data part
+ * of the sample a multiple of eight bytes again.
+ *
+ * If the last entry (trailer) is 4 bytes off the raw
+ * area data end, all is good.
+ */
+ if (len - offset - sizeof(*te) == 4)
+ break;
+ pr_err("Invalid counter set entry at %zd\n", offset);
+ return false;
+ }
+ offset += ctrset_size(&ce);
+ }
+ return true;
+}
+
+/* Dump event bc000 on screen, already tested on correctness. */
+static void s390_cpumcfdg_dumptrail(const char *color, size_t offset,
+ struct cf_trailer_entry *tep)
+{
+ struct cf_trailer_entry te;
+
+ te.flags = be64_to_cpu(tep->flags);
+ te.cfvn = be16_to_cpu(tep->cfvn);
+ te.csvn = be16_to_cpu(tep->csvn);
+ te.cpu_speed = be32_to_cpu(tep->cpu_speed);
+ te.timestamp = be64_to_cpu(tep->timestamp);
+ te.progusage1 = be64_to_cpu(tep->progusage1);
+ te.progusage2 = be64_to_cpu(tep->progusage2);
+ te.progusage3 = be64_to_cpu(tep->progusage3);
+ te.tod_base = be64_to_cpu(tep->tod_base);
+ te.mach_type = be16_to_cpu(tep->mach_type);
+ te.res1 = be16_to_cpu(tep->res1);
+ te.res2 = be32_to_cpu(tep->res2);
+
+ color_fprintf(stdout, color, " [%#08zx] Trailer:%c%c%c%c%c"
+ " Cfvn:%d Csvn:%d Speed:%d TOD:%#llx\n",
+ offset, te.clock_base ? 'T' : ' ',
+ te.speed ? 'S' : ' ', te.mtda ? 'M' : ' ',
+ te.caca ? 'C' : ' ', te.lcda ? 'L' : ' ',
+ te.cfvn, te.csvn, te.cpu_speed, te.timestamp);
+ color_fprintf(stdout, color, "\t\t1:%lx 2:%lx 3:%lx TOD-Base:%#llx"
+ " Type:%x\n\n",
+ te.progusage1, te.progusage2, te.progusage3,
+ te.tod_base, te.mach_type);
+}
+
+/* Return starting number of a counter set */
+static int get_counterset_start(int setnr)
+{
+ switch (setnr) {
+ case CPUMF_CTR_SET_BASIC: /* Basic counter set */
+ return 0;
+ case CPUMF_CTR_SET_USER: /* Problem state counter set */
+ return 32;
+ case CPUMF_CTR_SET_CRYPTO: /* Crypto counter set */
+ return 64;
+ case CPUMF_CTR_SET_EXT: /* Extended counter set */
+ return 128;
+ case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */
+ return 448;
+ default:
+ return -1;
+ }
+}
+
+/* Scan the PMU table and extract the logical name of a counter from the
+ * PMU events table. Input is the counter set and counter number with in the
+ * set. Construct the event number and use this as key. If they match return
+ * the name of this counter.
+ * If no match is found a NULL pointer is returned.
+ */
+static const char *get_counter_name(int set, int nr, struct pmu_events_map *map)
+{
+ int rc, event_nr, wanted = get_counterset_start(set) + nr;
+
+ if (map) {
+ struct pmu_event *evp = map->table;
+
+ for (; evp->name || evp->event || evp->desc; ++evp) {
+ if (evp->name == NULL || evp->event == NULL)
+ continue;
+ rc = sscanf(evp->event, "event=%x", &event_nr);
+ if (rc == 1 && event_nr == wanted)
+ return evp->name;
+ }
+ }
+ return NULL;
+}
+
+static void s390_cpumcfdg_dump(struct perf_sample *sample)
+{
+ size_t i, len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ const char *color = PERF_COLOR_BLUE;
+ struct cf_ctrset_entry *cep, ce;
+ struct pmu_events_map *map;
+ struct perf_pmu pmu;
+ u64 *p;
+
+ memset(&pmu, 0, sizeof(pmu));
+ map = perf_pmu__find_map(&pmu);
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce)) { /* Print trailer */
+ s390_cpumcfdg_dumptrail(color, offset,
+ (struct cf_trailer_entry *)cep);
+ return;
+ }
+
+ color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
+ " Counters:%d\n", offset, ce.set, ce.ctr);
+ for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
+ const char *ev_name = get_counter_name(ce.set, i, map);
+
+ color_fprintf(stdout, color,
+ "\tCounter:%03d %s Value:%#018lx\n", i,
+ ev_name ?: "<unknown>", be64_to_cpu(*p));
+ }
+ offset += ctrset_size(&ce);
+ }
+}
+
+/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
+ * and if the event was triggered by a counter set diagnostic event display
+ * its raw data.
+ * The function is only invoked when the dump flag -D is set.
+ */
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct perf_evsel *ev_bc000;
+
+ if (event->header.type != PERF_RECORD_SAMPLE)
+ return;
+
+ ev_bc000 = perf_evlist__event2evsel(evlist, event);
+ if (ev_bc000 == NULL ||
+ ev_bc000->attr.config != PERF_EVENT_CPUM_CF_DIAG)
+ return;
+
+ /* Display raw data on screen */
+ if (!s390_cpumcfdg_testctr(sample)) {
+ pr_err("Invalid counter set data encountered\n");
+ return;
+ }
+ s390_cpumcfdg_dump(sample);
+}
diff --git a/tools/perf/util/sample-raw.c b/tools/perf/util/sample-raw.c
new file mode 100644
index 000000000000..c21e1311fb0f
--- /dev/null
+++ b/tools/perf/util/sample-raw.c
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <string.h>
+#include "evlist.h"
+#include "env.h"
+#include "sample-raw.h"
+
+/*
+ * Check platform the perf data file was created on and perform platform
+ * specific interpretation.
+ */
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist)
+{
+ const char *arch_pf = perf_env__arch(evlist->env);
+
+ if (arch_pf && !strcmp("s390", arch_pf))
+ evlist->trace_event_sample_raw = perf_evlist__s390_sample_raw;
+}
diff --git a/tools/perf/util/sample-raw.h b/tools/perf/util/sample-raw.h
new file mode 100644
index 000000000000..95d445c87e93
--- /dev/null
+++ b/tools/perf/util/sample-raw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAMPLE_RAW_H
+#define __SAMPLE_RAW_H 1
+
+struct perf_evlist;
+union perf_event;
+struct perf_sample;
+
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
+
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist);
+#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 82d28c67e0f3..7b342ce38d99 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,5 +1,5 @@
-libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
-libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+perf-$(CONFIG_LIBPERL) += trace-event-perl.o
+perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b93f36b887b5..5f06378a482b 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -37,6 +37,8 @@
#include "../../perf.h"
#include "../callchain.h"
#include "../machine.h"
+#include "../map.h"
+#include "../symbol.h"
#include "../thread.h"
#include "../event.h"
#include "../trace-event.h"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 87ef16a1b17e..0e17db41b49b 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -44,6 +44,8 @@
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../call-path.h"
+#include "map.h"
+#include "symbol.h"
#include "thread_map.h"
#include "cpumap.h"
#include "print_binary.h"
@@ -733,8 +735,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
- pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
- (const char *)&evsel->attr, sizeof(evsel->attr)));
+ pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
pydict_set_item_string_decref(dict_sample, "pid",
_PyLong_FromLong(sample->pid));
@@ -1494,34 +1495,40 @@ static void _free_command_line(wchar_t **command_line, int num)
static int python_start_script(const char *script, int argc, const char **argv)
{
struct tables *tables = &tables_global;
+ PyMODINIT_FUNC (*initfunc)(void);
#if PY_MAJOR_VERSION < 3
const char **command_line;
#else
wchar_t **command_line;
#endif
- char buf[PATH_MAX];
+ /*
+ * Use a non-const name variable to cope with python 2.6's
+ * PyImport_AppendInittab prototype
+ */
+ char buf[PATH_MAX], name[19] = "perf_trace_context";
int i, err = 0;
FILE *fp;
#if PY_MAJOR_VERSION < 3
+ initfunc = initperf_trace_context;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
#else
+ initfunc = PyInit_perf_trace_context;
command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
#endif
+ PyImport_AppendInittab(name, initfunc);
Py_Initialize();
#if PY_MAJOR_VERSION < 3
- initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
#else
- PyInit_perf_trace_context();
PySys_SetArgv(argc + 1, command_line);
#endif
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5456c84c7dd1..c764bbc91009 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -13,6 +13,8 @@
#include "evlist.h"
#include "evsel.h"
#include "memswap.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "tool.h"
#include "sort.h"
@@ -23,6 +25,7 @@
#include "auxtrace.h"
#include "thread.h"
#include "thread-stack.h"
+#include "sample-raw.h"
#include "stat.h"
#include "arch/common.h"
@@ -147,6 +150,8 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
+
+ perf_evlist__init_trace_event_sample_raw(session->evlist);
}
} else {
session->machines.host.env = &perf_env;
@@ -376,6 +381,10 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->itrace_start = perf_event__process_itrace_start;
if (tool->context_switch == NULL)
tool->context_switch = perf_event__process_switch;
+ if (tool->ksymbol == NULL)
+ tool->ksymbol = perf_event__process_ksymbol;
+ if (tool->bpf_event == NULL)
+ tool->bpf_event = perf_event__process_bpf_event;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
@@ -694,7 +703,10 @@ static void perf_event__auxtrace_error_swap(union perf_event *event,
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
+ event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
+ if (event->auxtrace_error.fmt)
+ event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
}
static void perf_event__thread_map_swap(union perf_event *event,
@@ -1065,6 +1077,8 @@ static void dump_event(struct perf_evlist *evlist, union perf_event *event,
file_offset, event->header.size, event->header.type);
trace_event(event);
+ if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
+ evlist->trace_event_sample_raw(evlist, event, sample);
if (sample)
perf_evlist__print_tstamp(evlist, event, sample);
@@ -1188,6 +1202,13 @@ static int deliver_sample_value(struct perf_evlist *evlist,
return 0;
}
+ /*
+ * There's no reason to deliver sample
+ * for zero period, bail out.
+ */
+ if (!sample->period)
+ return 0;
+
return tool->sample(tool, event, sample, sid->evsel, machine);
}
@@ -1305,6 +1326,10 @@ static int machines__deliver_event(struct machines *machines,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
return tool->context_switch(tool, event, sample, machine);
+ case PERF_RECORD_KSYMBOL:
+ return tool->ksymbol(tool, event, sample, machine);
+ case PERF_RECORD_BPF_EVENT:
+ return tool->bpf_event(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
@@ -1820,38 +1845,35 @@ fetch_mmaped_event(struct perf_session *session,
#define NUM_MMAPS 128
#endif
-static int __perf_session__process_events(struct perf_session *session,
- u64 data_offset, u64 data_size,
- u64 file_size)
+struct reader {
+ int fd;
+ u64 data_size;
+ u64 data_offset;
+};
+
+static int
+reader__process_events(struct reader *rd, struct perf_session *session,
+ struct ui_progress *prog)
{
- struct ordered_events *oe = &session->ordered_events;
- struct perf_tool *tool = session->tool;
- int fd = perf_data__fd(session->data);
+ u64 data_size = rd->data_size;
u64 head, page_offset, file_offset, file_pos, size;
- int err, mmap_prot, mmap_flags, map_idx = 0;
+ int err = 0, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
char *buf, *mmaps[NUM_MMAPS];
union perf_event *event;
- struct ui_progress prog;
s64 skip;
- perf_tool__fill_defaults(tool);
-
- page_offset = page_size * (data_offset / page_size);
+ page_offset = page_size * (rd->data_offset / page_size);
file_offset = page_offset;
- head = data_offset - page_offset;
-
- if (data_size == 0)
- goto out;
+ head = rd->data_offset - page_offset;
- if (data_offset + data_size < file_size)
- file_size = data_offset + data_size;
+ ui_progress__init_size(prog, data_size, "Processing events...");
- ui_progress__init_size(&prog, file_size, "Processing events...");
+ data_size += rd->data_offset;
mmap_size = MMAP_SIZE;
- if (mmap_size > file_size) {
- mmap_size = file_size;
+ if (mmap_size > data_size) {
+ mmap_size = data_size;
session->one_mmap = true;
}
@@ -1865,12 +1887,12 @@ static int __perf_session__process_events(struct perf_session *session,
mmap_flags = MAP_PRIVATE;
}
remap:
- buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
+ buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
- goto out_err;
+ goto out;
}
mmaps[map_idx] = buf;
map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
@@ -1902,7 +1924,7 @@ more:
file_offset + head, event->header.size,
event->header.type);
err = -EINVAL;
- goto out_err;
+ goto out;
}
if (skip)
@@ -1911,15 +1933,40 @@ more:
head += size;
file_pos += size;
- ui_progress__update(&prog, size);
+ ui_progress__update(prog, size);
if (session_done())
goto out;
- if (file_pos < file_size)
+ if (file_pos < data_size)
goto more;
out:
+ return err;
+}
+
+static int __perf_session__process_events(struct perf_session *session)
+{
+ struct reader rd = {
+ .fd = perf_data__fd(session->data),
+ .data_size = session->header.data_size,
+ .data_offset = session->header.data_offset,
+ };
+ struct ordered_events *oe = &session->ordered_events;
+ struct perf_tool *tool = session->tool;
+ struct ui_progress prog;
+ int err;
+
+ perf_tool__fill_defaults(tool);
+
+ if (rd.data_size == 0)
+ return -1;
+
+ ui_progress__init_size(&prog, rd.data_size, "Processing events...");
+
+ err = reader__process_events(&rd, session, &prog);
+ if (err)
+ goto out_err;
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
@@ -1944,20 +1991,13 @@ out_err:
int perf_session__process_events(struct perf_session *session)
{
- u64 size = perf_data__size(session->data);
- int err;
-
if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
- if (!perf_data__is_pipe(session->data))
- err = __perf_session__process_events(session,
- session->header.data_offset,
- session->header.data_size, size);
- else
- err = __perf_session__process_pipe_events(session);
+ if (perf_data__is_pipe(session->data))
+ return __perf_session__process_pipe_events(session);
- return err;
+ return __perf_session__process_events(session);
}
bool perf_session__has_traces(struct perf_session *session, const char *msg)
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 63f758c655d5..5b5a167b43ce 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-
from os import getenv
from subprocess import Popen, PIPE
from re import sub
@@ -17,6 +15,8 @@ if cc == "clang":
vars[var] = sub("-mcet", "", vars[var])
if not clang_has_option("-fcf-protection"):
vars[var] = sub("-fcf-protection", "", vars[var])
+ if not clang_has_option("-fstack-clash-protection"):
+ vars[var] = sub("-fstack-clash-protection", "", vars[var])
from distutils.core import setup, Extension
@@ -53,9 +53,14 @@ ext_sources = [f.strip() for f in open('util/python-ext-sources')
# use full paths with source files
ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources))
+extra_libraries = []
+if '-DHAVE_LIBNUMA_SUPPORT' in cflags:
+ extra_libraries = [ 'numa' ]
+
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
+ libraries = extra_libraries,
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 6c1a83768eb0..d2299e912e59 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -6,6 +6,7 @@
#include "sort.h"
#include "hist.h"
#include "comm.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "evsel.h"
@@ -230,8 +231,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
if (sym_l == sym_r)
return 0;
- if (sym_l->inlined || sym_r->inlined)
- return strcmp(sym_l->name, sym_r->name);
+ if (sym_l->inlined || sym_r->inlined) {
+ int ret = strcmp(sym_l->name, sym_r->name);
+
+ if (ret)
+ return ret;
+ if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
+ return 0;
+ }
if (sym_l->start != sym_r->start)
return (int64_t)(sym_r->start - sym_l->start);
@@ -428,8 +435,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
{
struct symbol *sym = he->ms.sym;
- struct map *map = he->ms.map;
- struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct annotation *notes;
double ipc = 0.0, coverage = 0.0;
char tmp[64];
@@ -437,11 +442,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
if (!sym)
return repsep_snprintf(bf, size, "%-*s", width, "-");
- if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
- &annotation__default_options, NULL) < 0) {
- return 0;
- }
-
notes = symbol__annotation(sym);
if (notes->hit_cycles)
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 130fe37fe2df..2fbee0b1011c 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -9,7 +9,8 @@
#include <linux/list.h>
#include "cache.h"
#include <linux/rbtree.h>
-#include "symbol.h"
+#include "map_symbol.h"
+#include "symbol_conf.h"
#include "string.h"
#include "callchain.h"
#include "values.h"
@@ -145,8 +146,8 @@ struct hist_entry {
union {
/* this is for hierarchical entry structure */
struct {
- struct rb_root hroot_in;
- struct rb_root hroot_out;
+ struct rb_root_cached hroot_in;
+ struct rb_root_cached hroot_out;
}; /* non-leaf entries */
struct rb_root sorted_chain; /* leaf entry has callchains */
};
diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h
index e500a746d5f1..1b5ed769779c 100644
--- a/tools/perf/util/srccode.h
+++ b/tools/perf/util/srccode.h
@@ -1,6 +1,19 @@
#ifndef SRCCODE_H
#define SRCCODE_H 1
+struct srccode_state {
+ char *srcfile;
+ unsigned line;
+};
+
+static inline void srccode_state_init(struct srccode_state *state)
+{
+ state->srcfile = NULL;
+ state->line = 0;
+}
+
+void srccode_state_free(struct srccode_state *state);
+
/* Result is not 0 terminated */
char *find_sourceline(char *fn, unsigned line, int *lenp);
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index dc86597d0cc4..10ca1533937e 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
} else {
/* create a fake symbol for the inline frame */
inline_sym = symbol__new(base_sym ? base_sym->start : 0,
- base_sym ? base_sym->end : 0,
+ base_sym ? (base_sym->end - base_sym->start) : 0,
base_sym ? base_sym->binding : 0,
base_sym ? base_sym->type : 0,
funcname);
@@ -594,11 +594,12 @@ struct srcline_node {
struct rb_node rb_node;
};
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
struct srcline_node *i, *node;
+ bool leftmost = true;
node = zalloc(sizeof(struct srcline_node));
if (!node) {
@@ -614,16 +615,18 @@ void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
i = rb_entry(parent, struct srcline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, tree);
+ rb_insert_color_cached(&node->rb_node, tree, leftmost);
}
-char *srcline__tree_find(struct rb_root *tree, u64 addr)
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct srcline_node *i = rb_entry(n, struct srcline_node,
@@ -640,15 +643,15 @@ char *srcline__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void srcline__tree_delete(struct rb_root *tree)
+void srcline__tree_delete(struct rb_root_cached *tree)
{
struct srcline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct srcline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
free_srcline(pos->srcline);
zfree(&pos);
}
@@ -682,28 +685,32 @@ void inline_node__delete(struct inline_node *node)
free(node);
}
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 addr = inlines->addr;
struct inline_node *i;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
i = rb_entry(parent, struct inline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&inlines->rb_node, parent, p);
- rb_insert_color(&inlines->rb_node, tree);
+ rb_insert_color_cached(&inlines->rb_node, tree, leftmost);
}
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct inline_node *i = rb_entry(n, struct inline_node,
@@ -720,15 +727,15 @@ struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void inlines__tree_delete(struct rb_root *tree)
+void inlines__tree_delete(struct rb_root_cached *tree)
{
struct inline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct inline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
inline_node__delete(pos);
}
}
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 5762212dc342..b11a0aaaa676 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -19,11 +19,11 @@ void free_srcline(char *srcline);
char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
/* insert the srcline into the DSO, which will take ownership */
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline);
/* find previously inserted srcline */
-char *srcline__tree_find(struct rb_root *tree, u64 addr);
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all srclines within the tree */
-void srcline__tree_delete(struct rb_root *tree);
+void srcline__tree_delete(struct rb_root_cached *tree);
#define SRCLINE_UNKNOWN ((char *) "??:0")
@@ -46,10 +46,11 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
void inline_node__delete(struct inline_node *node);
/* insert the inline node list into the DSO, which will take ownership */
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines);
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines);
/* find previously inserted inline node list */
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr);
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all nodes within the tree of inline_node s */
-void inlines__tree_delete(struct rb_root *tree);
+void inlines__tree_delete(struct rb_root_cached *tree);
#endif /* PERF_SRCLINE_H */
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 665ee374fc01..6d043c78f3c2 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include <linux/time64.h>
#include <math.h>
+#include "color.h"
#include "evlist.h"
#include "evsel.h"
#include "stat.h"
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 3c22c58b3e90..83d8094be4fe 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -168,7 +168,7 @@ static void reset_stat(struct runtime_stat *st)
struct rb_node *pos, *next;
rblist = &st->value_list;
- next = rb_first(&rblist->entries);
+ next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 9005fbe0780e..23092fd6451d 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -109,7 +109,6 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
return ret;
}
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
- va_end(ap_saved);
if (len > strbuf_avail(sb)) {
pr_debug("this should not happen, your vsnprintf is broken");
va_end(ap_saved);
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index d58f1e08b170..7e82c71dcc42 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -57,7 +57,7 @@ static inline unsigned int strlist__nr_entries(const struct strlist *slist)
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *slist)
{
- struct rb_node *rn = rb_first(&slist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 66a84d5846c8..4ad106a5f2c0 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -6,6 +6,8 @@
#include <unistd.h>
#include <inttypes.h>
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "demangle-java.h"
#include "demangle-rust.h"
@@ -19,6 +21,20 @@
#define EM_AARCH64 183 /* ARM 64 bit */
#endif
+#ifndef ELF32_ST_VISIBILITY
+#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
+
+/* For ELF64 the definitions are the same. */
+#ifndef ELF64_ST_VISIBILITY
+#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
+#endif
+
+/* How to extract information held in the st_other field. */
+#ifndef GELF_ST_VISIBILITY
+#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
+#endif
+
typedef Elf64_Nhdr GElf_Nhdr;
#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
@@ -87,6 +103,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
return GELF_ST_TYPE(sym->st_info);
}
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+ return GELF_ST_VISIBILITY(sym->st_other);
+}
+
#ifndef STT_GNU_IFUNC
#define STT_GNU_IFUNC 10
#endif
@@ -111,7 +132,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
return elf_sym__type(sym) == STT_NOTYPE &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF &&
- sym->st_shndx != SHN_ABS;
+ sym->st_shndx != SHN_ABS &&
+ elf_sym__visibility(sym) != STV_HIDDEN &&
+ elf_sym__visibility(sym) != STV_INTERNAL;
}
static bool elf_sym__filter(GElf_Sym *sym)
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 7119df77dc0b..17edbd4f6f85 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -3,6 +3,7 @@
#include "util.h"
#include <errno.h>
+#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 01f2c7385e38..758bf5f74e6e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -17,6 +17,7 @@
#include "util.h"
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
#include "strlist.h"
#include "intlist.h"
@@ -163,7 +164,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
return arch__choose_best_symbol(syma, symb);
}
-void symbols__fixup_duplicate(struct rb_root *symbols)
+void symbols__fixup_duplicate(struct rb_root_cached *symbols)
{
struct rb_node *nd;
struct symbol *curr, *next;
@@ -171,7 +172,7 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
if (symbol_conf.allow_aliases)
return;
- nd = rb_first(symbols);
+ nd = rb_first_cached(symbols);
while (nd) {
curr = rb_entry(nd, struct symbol, rb_node);
@@ -186,20 +187,20 @@ again:
continue;
if (choose_best_symbol(curr, next) == SYMBOL_A) {
- rb_erase(&next->rb_node, symbols);
+ rb_erase_cached(&next->rb_node, symbols);
symbol__delete(next);
goto again;
} else {
nd = rb_next(&curr->rb_node);
- rb_erase(&curr->rb_node, symbols);
+ rb_erase_cached(&curr->rb_node, symbols);
symbol__delete(curr);
}
}
}
-void symbols__fixup_end(struct rb_root *symbols)
+void symbols__fixup_end(struct rb_root_cached *symbols)
{
- struct rb_node *nd, *prevnd = rb_first(symbols);
+ struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
if (prevnd == NULL)
@@ -282,25 +283,27 @@ void symbol__delete(struct symbol *sym)
free(((void *)sym) - symbol_conf.priv_size);
}
-void symbols__delete(struct rb_root *symbols)
+void symbols__delete(struct rb_root_cached *symbols)
{
struct symbol *pos;
- struct rb_node *next = rb_first(symbols);
+ struct rb_node *next = rb_first_cached(symbols);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, symbols);
+ rb_erase_cached(&pos->rb_node, symbols);
symbol__delete(pos);
}
}
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
+void __symbols__insert(struct rb_root_cached *symbols,
+ struct symbol *sym, bool kernel)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
+ bool leftmost = true;
if (kernel) {
const char *name = sym->name;
@@ -318,26 +321,28 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
s = rb_entry(parent, struct symbol, rb_node);
if (ip < s->start)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&sym->rb_node, parent, p);
- rb_insert_color(&sym->rb_node, symbols);
+ rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
}
-void symbols__insert(struct rb_root *symbols, struct symbol *sym)
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
{
__symbols__insert(symbols, sym, false);
}
-static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
+static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
{
struct rb_node *n;
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
@@ -353,9 +358,9 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL;
}
-static struct symbol *symbols__first(struct rb_root *symbols)
+static struct symbol *symbols__first(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_first(symbols);
+ struct rb_node *n = rb_first_cached(symbols);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -363,9 +368,9 @@ static struct symbol *symbols__first(struct rb_root *symbols)
return NULL;
}
-static struct symbol *symbols__last(struct rb_root *symbols)
+static struct symbol *symbols__last(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_last(symbols);
+ struct rb_node *n = rb_last(&symbols->rb_root);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -383,11 +388,12 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
-static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
+static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
struct symbol_name_rb_node *symn, *s;
+ bool leftmost = true;
symn = container_of(sym, struct symbol_name_rb_node, sym);
@@ -396,19 +402,21 @@ static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
if (strcmp(sym->name, s->sym.name) < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&symn->rb_node, parent, p);
- rb_insert_color(&symn->rb_node, symbols);
+ rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
}
-static void symbols__sort_by_name(struct rb_root *symbols,
- struct rb_root *source)
+static void symbols__sort_by_name(struct rb_root_cached *symbols,
+ struct rb_root_cached *source)
{
struct rb_node *nd;
- for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
symbols__insert_by_name(symbols, pos);
}
@@ -431,7 +439,7 @@ int symbol__match_symbol_name(const char *name, const char *str,
return arch__compare_symbol_names(name, str);
}
-static struct symbol *symbols__find_by_name(struct rb_root *symbols,
+static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
const char *name,
enum symbol_tag_include includes)
{
@@ -441,7 +449,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
int cmp;
@@ -614,6 +622,7 @@ out:
static bool symbol__is_idle(const char *name)
{
const char * const idle_symbols[] = {
+ "arch_cpu_idle",
"cpu_idle",
"cpu_startup_entry",
"intel_idle",
@@ -643,7 +652,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
{
struct symbol *sym;
struct dso *dso = arg;
- struct rb_root *root = &dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
if (!symbol_type__filter(type))
return 0;
@@ -680,14 +689,14 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
struct map *curr_map;
struct symbol *pos;
int count = 0;
- struct rb_root old_root = dso->symbols;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached old_root = dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
if (!kmaps)
return -1;
- *root = RB_ROOT;
+ *root = RB_ROOT_CACHED;
while (next) {
char *module;
@@ -695,8 +704,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase_init(&pos->rb_node, &old_root);
-
+ rb_erase_cached(&pos->rb_node, &old_root);
+ RB_CLEAR_NODE(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module)
*module = '\0';
@@ -709,6 +718,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
}
pos->start -= curr_map->start - curr_map->pgoff;
+ if (pos->end > curr_map->end)
+ pos->end = curr_map->end;
if (pos->end)
pos->end -= curr_map->start - curr_map->pgoff;
symbols__insert(&curr_map->dso->symbols, pos);
@@ -733,8 +744,8 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
struct map *curr_map = initial_map;
struct symbol *pos;
int count = 0, moved = 0;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
@@ -848,7 +859,7 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
}
add_symbol:
if (curr_map != initial_map) {
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbols__insert(&curr_map->dso->symbols, pos);
++moved;
} else
@@ -856,7 +867,7 @@ add_symbol:
continue;
discard_symbol:
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbol__delete(pos);
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 14d9d438e7e2..9a8fe012910a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -5,16 +5,13 @@
#include <linux/types.h>
#include <stdbool.h>
#include <stdint.h>
-#include "map.h"
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
-#include <byteswap.h>
-#include <libgen.h>
-#include "build-id.h"
-#include "event.h"
+#include "map_symbol.h"
+#include "branch.h"
#include "path.h"
+#include "symbol_conf.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@@ -24,6 +21,10 @@
#include "dso.h"
+struct map;
+struct map_groups;
+struct option;
+
/*
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
@@ -68,7 +69,7 @@ struct symbol {
};
void symbol__delete(struct symbol *sym);
-void symbols__delete(struct rb_root *symbols);
+void symbols__delete(struct rb_root_cached *symbols);
/* symbols__for_each_entry - iterate over symbols (rb_root)
*
@@ -77,7 +78,7 @@ void symbols__delete(struct rb_root *symbols);
* @nd: the 'struct rb_node *' to use as a temporary storage
*/
#define symbols__for_each_entry(symbols, pos, nd) \
- for (nd = rb_first(symbols); \
+ for (nd = rb_first_cached(symbols); \
nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
nd = rb_next(nd))
@@ -89,69 +90,6 @@ static inline size_t symbol__size(const struct symbol *sym)
struct strlist;
struct intlist;
-struct symbol_conf {
- unsigned short priv_size;
- bool try_vmlinux_path,
- init_annotation,
- force,
- ignore_vmlinux,
- ignore_vmlinux_buildid,
- show_kernel_path,
- use_modules,
- allow_aliases,
- sort_by_name,
- show_nr_samples,
- show_total_period,
- use_callchain,
- cumulate_callchain,
- show_branchflag_count,
- exclude_other,
- show_cpu_utilization,
- initialized,
- kptr_restrict,
- event_group,
- demangle,
- demangle_kernel,
- filter_relative,
- show_hist_headers,
- branch_callstack,
- has_filter,
- show_ref_callgraph,
- hide_unresolved,
- raw_trace,
- report_hierarchy,
- inline_name;
- const char *vmlinux_name,
- *kallsyms_name,
- *source_prefix,
- *field_sep,
- *graph_function;
- const char *default_guest_vmlinux_name,
- *default_guest_kallsyms,
- *default_guest_modules;
- const char *guestmount;
- const char *dso_list_str,
- *comm_list_str,
- *pid_list_str,
- *tid_list_str,
- *sym_list_str,
- *col_width_list_str,
- *bt_stop_list_str;
- struct strlist *dso_list,
- *comm_list,
- *sym_list,
- *dso_from_list,
- *dso_to_list,
- *sym_from_list,
- *sym_to_list,
- *bt_stop_list;
- struct intlist *pid_list,
- *tid_list;
- const char *symfs;
-};
-
-extern struct symbol_conf symbol_conf;
-
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
@@ -178,19 +116,6 @@ struct ref_reloc_sym {
u64 unrelocated_addr;
};
-struct map_symbol {
- struct map *map;
- struct symbol *sym;
-};
-
-struct addr_map_symbol {
- struct map *map;
- struct symbol *sym;
- u64 addr;
- u64 al_addr;
- u64 phys_addr;
-};
-
struct branch_info {
struct addr_map_symbol from;
struct addr_map_symbol to;
@@ -310,10 +235,11 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss);
char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel);
-void symbols__insert(struct rb_root *symbols, struct symbol *sym);
-void symbols__fixup_duplicate(struct rb_root *symbols);
-void symbols__fixup_end(struct rb_root *symbols);
+void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
+ bool kernel);
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
+void symbols__fixup_duplicate(struct rb_root_cached *symbols);
+void symbols__fixup_end(struct rb_root_cached *symbols);
void map_groups__fixup_end(struct map_groups *mg);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
new file mode 100644
index 000000000000..fffea68c1203
--- /dev/null
+++ b/tools/perf/util/symbol_conf.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_SYMBOL_CONF
+#define __PERF_SYMBOL_CONF 1
+
+#include <stdbool.h>
+
+struct strlist;
+struct intlist;
+
+struct symbol_conf {
+ unsigned short priv_size;
+ bool try_vmlinux_path,
+ init_annotation,
+ force,
+ ignore_vmlinux,
+ ignore_vmlinux_buildid,
+ show_kernel_path,
+ use_modules,
+ allow_aliases,
+ sort_by_name,
+ show_nr_samples,
+ show_total_period,
+ use_callchain,
+ cumulate_callchain,
+ show_branchflag_count,
+ exclude_other,
+ show_cpu_utilization,
+ initialized,
+ kptr_restrict,
+ event_group,
+ demangle,
+ demangle_kernel,
+ filter_relative,
+ show_hist_headers,
+ branch_callstack,
+ has_filter,
+ show_ref_callgraph,
+ hide_unresolved,
+ raw_trace,
+ report_hierarchy,
+ inline_name;
+ const char *vmlinux_name,
+ *kallsyms_name,
+ *source_prefix,
+ *field_sep,
+ *graph_function;
+ const char *default_guest_vmlinux_name,
+ *default_guest_kallsyms,
+ *default_guest_modules;
+ const char *guestmount;
+ const char *dso_list_str,
+ *comm_list_str,
+ *pid_list_str,
+ *tid_list_str,
+ *sym_list_str,
+ *col_width_list_str,
+ *bt_stop_list_str;
+ struct strlist *dso_list,
+ *comm_list,
+ *sym_list,
+ *dso_from_list,
+ *dso_to_list,
+ *sym_from_list,
+ *sym_to_list,
+ *bt_stop_list;
+ struct intlist *pid_list,
+ *tid_list;
+ const char *symfs;
+};
+
+extern struct symbol_conf symbol_conf;
+
+#endif // __PERF_SYMBOL_CONF
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index ed0205cc7942..02e89b02c2ce 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -3,6 +3,7 @@
#include <inttypes.h>
#include <stdio.h>
+#include "map.h"
#include "symbol.h"
size_t symbol__fprintf(struct symbol *sym, FILE *fp)
@@ -64,7 +65,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
struct rb_node *nd;
struct symbol_name_rb_node *pos;
- for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
fprintf(fp, "%s\n", pos->sym.name);
}
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index d52f27f373ce..a8b45168513c 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -20,6 +20,7 @@
#include "thread.h"
#include "event.h"
#include "machine.h"
+#include "env.h"
#include "util.h"
#include "debug.h"
#include "symbol.h"
@@ -29,6 +30,19 @@
#define STACK_GROWTH 2048
+/*
+ * State of retpoline detection.
+ *
+ * RETPOLINE_NONE: no retpoline detection
+ * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
+ * X86_RETPOLINE_DETECTED: x86 retpoline detected
+ */
+enum retpoline_state_t {
+ RETPOLINE_NONE,
+ X86_RETPOLINE_POSSIBLE,
+ X86_RETPOLINE_DETECTED,
+};
+
/**
* struct thread_stack_entry - thread stack entry.
* @ret_addr: return address
@@ -38,6 +52,7 @@
* @cp: call path
* @no_call: a 'call' was not seen
* @trace_end: a 'call' but trace ended
+ * @non_call: a branch but not a 'call' to the start of a different symbol
*/
struct thread_stack_entry {
u64 ret_addr;
@@ -47,6 +62,7 @@ struct thread_stack_entry {
struct call_path *cp;
bool no_call;
bool trace_end;
+ bool non_call;
};
/**
@@ -62,6 +78,7 @@ struct thread_stack_entry {
* @crp: call/return processor
* @comm: current comm
* @arr_sz: size of array if this is the first element of an array
+ * @rstate: used to detect retpolines
*/
struct thread_stack {
struct thread_stack_entry *stack;
@@ -74,6 +91,7 @@ struct thread_stack {
struct call_return_processor *crp;
struct comm *comm;
unsigned int arr_sz;
+ enum retpoline_state_t rstate;
};
/*
@@ -113,10 +131,16 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
if (err)
return err;
- if (thread->mg && thread->mg->machine)
- ts->kernel_start = machine__kernel_start(thread->mg->machine);
- else
+ if (thread->mg && thread->mg->machine) {
+ struct machine *machine = thread->mg->machine;
+ const char *arch = perf_env__arch(machine->env);
+
+ ts->kernel_start = machine__kernel_start(machine);
+ if (!strcmp(arch, "x86"))
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+ } else {
ts->kernel_start = 1ULL << 63;
+ }
ts->crp = crp;
return 0;
@@ -268,6 +292,8 @@ static int thread_stack__call_return(struct thread *thread,
cr.flags |= CALL_RETURN_NO_CALL;
if (no_return)
cr.flags |= CALL_RETURN_NO_RETURN;
+ if (tse->non_call)
+ cr.flags |= CALL_RETURN_NON_CALL;
return crp->process(&cr, crp->data);
}
@@ -493,6 +519,9 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
struct thread_stack_entry *tse;
int err;
+ if (!cp)
+ return -ENOMEM;
+
if (ts->cnt == ts->sz) {
err = thread_stack__grow(ts);
if (err)
@@ -507,6 +536,7 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
tse->cp = cp;
tse->no_call = no_call;
tse->trace_end = trace_end;
+ tse->non_call = false;
return 0;
}
@@ -528,14 +558,16 @@ static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
timestamp, ref, false);
}
- if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
+ if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
+ !ts->stack[ts->cnt - 1].non_call) {
return thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
} else {
size_t i = ts->cnt - 1;
while (i--) {
- if (ts->stack[i].ret_addr != ret_addr)
+ if (ts->stack[i].ret_addr != ret_addr ||
+ ts->stack[i].non_call)
continue;
i += 1;
while (ts->cnt > i) {
@@ -576,8 +608,6 @@ static int thread_stack__bottom(struct thread_stack *ts,
cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
true, false);
@@ -590,36 +620,36 @@ static int thread_stack__no_call_return(struct thread *thread,
struct addr_location *to_al, u64 ref)
{
struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *root = &cpr->call_path;
+ struct symbol *fsym = from_al->sym;
+ struct symbol *tsym = to_al->sym;
struct call_path *cp, *parent;
u64 ks = ts->kernel_start;
+ u64 addr = sample->addr;
+ u64 tm = sample->time;
+ u64 ip = sample->ip;
int err;
- if (sample->ip >= ks && sample->addr < ks) {
+ if (ip >= ks && addr < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
/* If the stack is empty, push the userspace address */
if (!ts->cnt) {
- cp = call_path__findnew(cpr, &cpr->call_path,
- to_al->sym, sample->addr,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
- return thread_stack__push_cp(ts, 0, sample->time, ref,
- cp, true, false);
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
+ return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
+ false);
}
- } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
+ } else if (thread_stack__in_kernel(ts) && ip < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
@@ -628,21 +658,59 @@ static int thread_stack__no_call_return(struct thread *thread,
if (ts->cnt)
parent = ts->stack[ts->cnt - 1].cp;
else
- parent = &cpr->call_path;
+ parent = root;
- /* This 'return' had no 'call', so push and pop top of stack */
- cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
+ if (parent->sym == from_al->sym) {
+ /*
+ * At the bottom of the stack, assume the missing 'call' was
+ * before the trace started. So, pop the current symbol and push
+ * the 'to' symbol.
+ */
+ if (ts->cnt == 1) {
+ err = thread_stack__call_return(thread, ts, --ts->cnt,
+ tm, ref, false);
+ if (err)
+ return err;
+ }
+
+ if (!ts->cnt) {
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
- err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
- true, false);
+ return thread_stack__push_cp(ts, addr, tm, ref, cp,
+ true, false);
+ }
+
+ /*
+ * Otherwise assume the 'return' is being used as a jump (e.g.
+ * retpoline) and just push the 'to' symbol.
+ */
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
+
+ return err;
+ }
+
+ /*
+ * Assume 'parent' has not yet returned, so push 'to', and then push and
+ * pop 'from'.
+ */
+
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
if (err)
return err;
- return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
- to_al->sym);
+ cp = call_path__findnew(cpr, cp, fsym, ip, ks);
+
+ err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
+ if (err)
+ return err;
+
+ return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
}
static int thread_stack__trace_begin(struct thread *thread,
@@ -680,8 +748,6 @@ static int thread_stack__trace_end(struct thread_stack *ts,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
ret_addr = sample->ip + sample->insn_len;
@@ -689,6 +755,70 @@ static int thread_stack__trace_end(struct thread_stack *ts,
false, true);
}
+static bool is_x86_retpoline(const char *name)
+{
+ const char *p = strstr(name, "__x86_indirect_thunk_");
+
+ return p == name || !strcmp(name, "__indirect_thunk_start");
+}
+
+/*
+ * x86 retpoline functions pollute the call graph. This function removes them.
+ * This does not handle function return thunks, nor is there any improvement
+ * for the handling of inline thunks or extern thunks.
+ */
+static int thread_stack__x86_retpoline(struct thread_stack *ts,
+ struct perf_sample *sample,
+ struct addr_location *to_al)
+{
+ struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct symbol *sym = tse->cp->sym;
+ struct symbol *tsym = to_al->sym;
+ struct call_path *cp;
+
+ if (sym && is_x86_retpoline(sym->name)) {
+ /*
+ * This is a x86 retpoline fn. It pollutes the call graph by
+ * showing up everywhere there is an indirect branch, but does
+ * not itself mean anything. Here the top-of-stack is removed,
+ * by decrementing the stack count, and then further down, the
+ * resulting top-of-stack is replaced with the actual target.
+ * The result is that the retpoline functions will no longer
+ * appear in the call graph. Note this only affects the call
+ * graph, since all the original branches are left unchanged.
+ */
+ ts->cnt -= 1;
+ sym = ts->stack[ts->cnt - 2].cp->sym;
+ if (sym && sym == tsym && to_al->addr != tsym->start) {
+ /*
+ * Target is back to the middle of the symbol we came
+ * from so assume it is an indirect jmp and forget it
+ * altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+ } else if (sym && sym == tsym) {
+ /*
+ * Target is back to the symbol we came from so assume it is an
+ * indirect jmp and forget it altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
+ sample->addr, ts->kernel_start);
+ if (!cp)
+ return -ENOMEM;
+
+ /* Replace the top-of-stack with the actual target */
+ ts->stack[ts->cnt - 1].cp = cp;
+
+ return 0;
+}
+
int thread_stack__process(struct thread *thread, struct comm *comm,
struct perf_sample *sample,
struct addr_location *from_al,
@@ -696,6 +826,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
struct call_return_processor *crp)
{
struct thread_stack *ts = thread__stack(thread, sample->cpu);
+ enum retpoline_state_t rstate;
int err = 0;
if (ts && !ts->crp) {
@@ -711,6 +842,10 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
ts->comm = comm;
}
+ rstate = ts->rstate;
+ if (rstate == X86_RETPOLINE_DETECTED)
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+
/* Flush stack on exec */
if (ts->comm != comm && thread->pid_ == thread->tid) {
err = __thread_stack__flush(thread, ts);
@@ -745,14 +880,27 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
to_al->sym, sample->addr,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
cp, false, trace_end);
+
+ /*
+ * A call to the same symbol but not the start of the symbol,
+ * may be the start of a x86 retpoline.
+ */
+ if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
+ from_al->sym == to_al->sym &&
+ to_al->addr != to_al->sym->start)
+ ts->rstate = X86_RETPOLINE_DETECTED;
+
} else if (sample->flags & PERF_IP_FLAG_RETURN) {
if (!sample->ip || !sample->addr)
return 0;
+ /* x86 retpoline 'return' doesn't match the stack */
+ if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
+ ts->stack[ts->cnt - 1].ret_addr != sample->addr)
+ return thread_stack__x86_retpoline(ts, sample, to_al);
+
err = thread_stack__pop_cp(thread, ts, sample->addr,
sample->time, ref, from_al->sym);
if (err) {
@@ -765,6 +913,25 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
err = thread_stack__trace_begin(thread, ts, sample->time, ref);
} else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
err = thread_stack__trace_end(ts, sample, ref);
+ } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
+ from_al->sym != to_al->sym && to_al->sym &&
+ to_al->addr == to_al->sym->start) {
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *cp;
+
+ /*
+ * The compiler might optimize a call/ret combination by making
+ * it a jmp. Make that visible by recording on the stack a
+ * branch to the start of a different symbol. Note, that means
+ * when a ret pops the stack, all jmps must be popped off first.
+ */
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
+ to_al->sym, sample->addr,
+ ts->kernel_start);
+ err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
+ false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
}
return err;
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index 1f626f4a1c40..b7c04e19ad41 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -35,10 +35,13 @@ struct call_path;
*
* CALL_RETURN_NO_CALL: 'return' but no matching 'call'
* CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
+ * CALL_RETURN_NON_CALL: a branch but not a 'call' to the start of a different
+ * symbol
*/
enum {
CALL_RETURN_NO_CALL = 1 << 0,
CALL_RETURN_NO_RETURN = 1 << 1,
+ CALL_RETURN_NON_CALL = 1 << 2,
};
/**
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index c83372329f89..4c179fef442d 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -12,6 +12,7 @@
#include "debug.h"
#include "namespaces.h"
#include "comm.h"
+#include "symbol.h"
#include "unwind.h"
#include <api/fs/fs.h>
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 712dd48cc0ca..8276ffeec556 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,14 +5,18 @@
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include <linux/list.h>
+#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
-#include "symbol.h"
-#include "map.h"
+#include "srccode.h"
+#include "symbol_conf.h"
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
+struct addr_location;
+struct map;
+struct namespaces_event;
struct thread_stack;
struct unwind_libunwind_ops;
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 56e4ca54020a..250391672f9f 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -53,7 +53,10 @@ struct perf_tool {
itrace_start,
context_switch,
throttle,
- unthrottle;
+ unthrottle,
+ ksymbol,
+ bpf_event;
+
event_attr_op attr;
event_attr_op event_update;
event_op2 tracing_data;
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 5eff9bfc5758..407d0167b942 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -8,6 +8,8 @@
#include "unwind.h"
#include "unwind-libdw.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include <linux/types.h>
#include "event.h"
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 79f521a552cf..f3c666a84e4d 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -34,6 +34,7 @@
#include "session.h"
#include "perf_regs.h"
#include "unwind.h"
+#include "map.h"
#include "symbol.h"
#include "util.h"
#include "debug.h"
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index b029a5e9ae49..9778b3133b77 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "unwind.h"
+#include "map.h"
#include "thread.h"
#include "session.h"
#include "debug.h"
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 093352e93d50..d388f80d8703 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -2,6 +2,7 @@
#include "../perf.h"
#include "util.h"
#include "debug.h"
+#include "namespaces.h"
#include <api/fs/fs.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -20,6 +21,7 @@
#include <linux/time64.h>
#include <unistd.h>
#include "strlist.h"
+#include "string2.h"
/*
* XXX We need to find a better place for these things...
@@ -116,23 +118,67 @@ int mkdir_p(char *path, mode_t mode)
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
-int rm_rf(const char *path)
+static bool match_pat(char *file, const char **pat)
+{
+ int i = 0;
+
+ if (!pat)
+ return true;
+
+ while (pat[i]) {
+ if (strglobmatch(file, pat[i]))
+ return true;
+
+ i++;
+ }
+
+ return false;
+}
+
+/*
+ * The depth specify how deep the removal will go.
+ * 0 - will remove only files under the 'path' directory
+ * 1 .. x - will dive in x-level deep under the 'path' directory
+ *
+ * If specified the pat is array of string patterns ended with NULL,
+ * which are checked upon every file/directory found. Only matching
+ * ones are removed.
+ *
+ * The function returns:
+ * 0 on success
+ * -1 on removal failure with errno set
+ * -2 on pattern failure
+ */
+static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
{
DIR *dir;
- int ret = 0;
+ int ret;
struct dirent *d;
char namebuf[PATH_MAX];
+ struct stat statbuf;
+ /* Do not fail if there's no file. */
+ ret = lstat(path, &statbuf);
+ if (ret)
+ return 0;
+
+ /* Try to remove any file we get. */
+ if (!(statbuf.st_mode & S_IFDIR))
+ return unlink(path);
+
+ /* We have directory in path. */
dir = opendir(path);
if (dir == NULL)
- return 0;
+ return -1;
while ((d = readdir(dir)) != NULL && !ret) {
- struct stat statbuf;
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
+ if (!match_pat(d->d_name, pat))
+ return -2;
+
scnprintf(namebuf, sizeof(namebuf), "%s/%s",
path, d->d_name);
@@ -144,7 +190,7 @@ int rm_rf(const char *path)
}
if (S_ISDIR(statbuf.st_mode))
- ret = rm_rf(namebuf);
+ ret = depth ? rm_rf_depth_pat(namebuf, depth - 1, pat) : 0;
else
ret = unlink(namebuf);
}
@@ -156,6 +202,22 @@ int rm_rf(const char *path)
return rmdir(path);
}
+int rm_rf_perf_data(const char *path)
+{
+ const char *pat[] = {
+ "header",
+ "data.*",
+ NULL,
+ };
+
+ return rm_rf_depth_pat(path, 0, pat);
+}
+
+int rm_rf(const char *path)
+{
+ return rm_rf_depth_pat(path, INT_MAX, NULL);
+}
+
/* A filter which removes dot files */
bool lsdir_no_dot_filter(const char *name __maybe_unused, struct dirent *d)
{
@@ -506,3 +568,13 @@ out:
return tip;
}
+
+char *perf_exe(char *buf, int len)
+{
+ int n = readlink("/proc/self/exe", buf, len);
+ if (n > 0) {
+ buf[n] = 0;
+ return buf;
+ }
+ return strcpy(buf, "perf");
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index ece040b799f6..09c1b0f91f65 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -31,6 +31,7 @@ struct strlist;
int mkdir_p(char *path, mode_t mode);
int rm_rf(const char *path);
+int rm_rf_perf_data(const char *path);
struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
bool lsdir_no_dot_filter(const char *name, struct dirent *d);
int copyfile(const char *from, const char *to);
@@ -76,6 +77,8 @@ extern bool perf_singlethreaded;
void perf_set_singlethreaded(void);
void perf_set_multithreaded(void);
+char *perf_exe(char *buf, int len);
+
#ifndef O_CLOEXEC
#ifdef __sparc__
#define O_CLOEXEC 0x400000
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 741af209b19d..5031b7b22bbd 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -11,6 +11,7 @@
#include "vdso.h"
#include "util.h"
+#include "map.h"
#include "symbol.h"
#include "machine.h"
#include "thread.h"
@@ -18,10 +19,10 @@
#include "debug.h"
/*
- * Include definition of find_vdso_map() also used in perf-read-vdso.c for
+ * Include definition of find_map() also used in perf-read-vdso.c for
* building perf-read-vdso32 and perf-read-vdsox32.
*/
-#include "find-vdso-map.c"
+#include "find-map.c"
#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
@@ -76,7 +77,7 @@ static char *get_file(struct vdso_file *vdso_file)
if (vdso_file->found)
return vdso_file->temp_file_name;
- if (vdso_file->error || find_vdso_map(&start, &end))
+ if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME))
return NULL;
size = end - start;
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 902ce6384f57..512ad7c09b13 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -6,7 +6,6 @@
#include <sys/mman.h>
#include <zlib.h>
#include <linux/compiler.h>
-#include <unistd.h>
#include "util/compress.h"
#include "util/util.h"
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index ff8025de8237..28c11c6b4d06 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -3,7 +3,7 @@
*
* Module Name: cfsize - Common get file size function
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 7be89b873661..6b41d8b64a00 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -3,7 +3,7 @@
*
* Module Name: getopt
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index a20c703f8b7d..2a1fd9182f94 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -3,7 +3,7 @@
*
* Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index 4fd44e218a83..30913f124dd5 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixdir - Unix directory access interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 93d8359309b6..29dfb47adfeb 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixmap - Unix OSL for file mappings
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 8a88e87778bd..83d3b3b829b8 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixxf - UNIX OSL interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index f69f1f559743..2eb0aaa4f462 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -3,7 +3,7 @@
*
* Module Name: acpidump.h - Include file for acpi_dump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index a9848de32d8e..e256c2ac5ddc 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -3,7 +3,7 @@
*
* Module Name: apdump - Dump routines for ACPI tables (acpidump)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index e86207e5afcf..49972bc78bc5 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -3,7 +3,7 @@
*
* Module Name: apfiles - File-related functions for acpidump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 2d9b94b631cb..d8f1b57537d3 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -3,7 +3,7 @@
*
* Module Name: apmain - Main module for the acpidump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/testing/nvdimm/dimm_devs.c b/tools/testing/nvdimm/dimm_devs.c
index e75238404555..2d4baf57822f 100644
--- a/tools/testing/nvdimm/dimm_devs.c
+++ b/tools/testing/nvdimm/dimm_devs.c
@@ -18,8 +18,8 @@ ssize_t security_show(struct device *dev,
* For the test version we need to poll the "hardware" in order
* to get the updated status for unlock testing.
*/
- nvdimm->sec.state = nvdimm_security_state(nvdimm, false);
- nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, true);
+ nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+ nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
switch (nvdimm->sec.state) {
case NVDIMM_SECURITY_DISABLED:
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 1a2bd15c5b6e..6a94f07c4164 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf
TARGETS += efivarfs
TARGETS += exec
TARGETS += filesystems
+TARGETS += filesystems/binderfs
TARGETS += firmware
TARGETS += ftrace
TARGETS += futex
@@ -47,6 +48,7 @@ TARGETS += sysctl
ifneq (1, $(quicktest))
TARGETS += timers
endif
+TARGETS += tmpfs
TARGETS += user
TARGETS += vm
TARGETS += x86
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 4a9785043a39..3b74d23fffab 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -14,6 +14,7 @@ feature
test_libbpf_open
test_sock
test_sock_addr
+test_sock_fields
urandom_read
test_btf
test_sockmap
@@ -28,3 +29,5 @@ flow_dissector_load
test_netcnt
test_section_names
test_tcpnotify_user
+test_libbpf
+alu32
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 73aa6d8f4a2f..518cd587cd63 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -10,36 +10,33 @@ ifneq ($(wildcard $(GENHDR)),)
GENFLAGS := -DHAVE_GENHDR
endif
+CLANG ?= clang
+LLC ?= llc
+LLVM_OBJCOPY ?= llvm-objcopy
+LLVM_READELF ?= llvm-readelf
+BTF_PAHOLE ?= pahole
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf -lrt -lpthread
-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
-all: $(TEST_CUSTOM_PROGS)
-
-$(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
- $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
-
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
- test_netcnt test_tcpnotify_user
-
-TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
- test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
- sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
- test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
- test_tcpnotify_kern.o \
- sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
- sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
- test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \
- test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \
- test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
- get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
- test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
- test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
- xdp_dummy.o test_map_in_map.o
+ test_netcnt test_tcpnotify_user test_sock_fields
+
+BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
+TEST_GEN_FILES = $(BPF_OBJ_FILES)
+
+# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
+# contains both ALU32 and JMP32 instructions.
+SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
+ $(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \
+ $(LLC) -mattr=+alu32 -mcpu=v3 2>&1 | \
+ grep 'if w')
+ifneq ($(SUBREG_CODEGEN),)
+TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES))
+endif
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -53,9 +50,13 @@ TEST_PROGS := test_kmod.sh \
test_lirc_mode2.sh \
test_skb_cgroup_id.sh \
test_flow_dissector.sh \
- test_xdp_vlan.sh
+ test_xdp_vlan.sh \
+ test_lwt_ip_encap.sh
-TEST_PROGS_EXTENDED := with_addr.sh
+TEST_PROGS_EXTENDED := with_addr.sh \
+ with_tunnels.sh \
+ tcp_client.py \
+ tcp_server.py
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
@@ -63,6 +64,13 @@ TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_use
include ../lib.mk
+# NOTE: $(OUTPUT) won't get default value if used before lib.mk
+TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
+all: $(TEST_CUSTOM_PROGS)
+
+$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
+ $(CC) -o $@ -static $< -Wl,--build-id
+
BPFOBJ := $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS): $(BPFOBJ)
@@ -81,6 +89,7 @@ $(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
+$(OUTPUT)/test_sock_fields: cgroup_helpers.c
.PHONY: force
@@ -90,11 +99,6 @@ force:
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
-CLANG ?= clang
-LLC ?= llc
-LLVM_OBJCOPY ?= llvm-objcopy
-BTF_PAHOLE ?= pahole
-
PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
# Let newer LLVM versions transparently probe the kernel for availability
@@ -124,12 +128,15 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h
$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
+$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
+$(OUTPUT)/test_progs.o: flow_dissector_load.h
+
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
- readelf -S ./llvm_btf_verify.o | grep BTF; \
+ $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
@@ -146,9 +153,38 @@ endif
endif
endif
+ifneq ($(SUBREG_CODEGEN),)
+ALU32_BUILD_DIR = $(OUTPUT)/alu32
+TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
+$(ALU32_BUILD_DIR):
+ mkdir -p $@
+
+$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
+ cp $< $@
+
+$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/urandom_read
+ $(CC) $(CFLAGS) -o $(ALU32_BUILD_DIR)/test_progs_32 $< \
+ trace_helpers.c prog_tests/*.c $(OUTPUT)/libbpf.a $(LDLIBS)
+
+$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
+$(ALU32_BUILD_DIR)/test_progs_32: CFLAGS += -I$(OUTPUT)
+$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
+
+$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/test_progs_32
+ $(CLANG) $(CLANG_FLAGS) \
+ -O2 -target bpf -emit-llvm -c $< -o - | \
+ $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
+ -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
+endif
+
# Have one program compiled without "-target bpf" to test whether libbpf loads
# it successfully
-$(OUTPUT)/test_xdp.o: test_xdp.c
+$(OUTPUT)/test_xdp.o: progs/test_xdp.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
@@ -156,7 +192,7 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-$(OUTPUT)/%.o: %.c
+$(OUTPUT)/%.o: progs/%.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
@@ -164,4 +200,38 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
+PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
+$(OUTPUT)/test_progs: $(PROG_TESTS_H)
+$(OUTPUT)/test_progs: CFLAGS += -I$(OUTPUT)
+$(OUTPUT)/test_progs: prog_tests/*.c
+
+PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
+$(PROG_TESTS_H): $(PROG_TESTS_FILES)
+ $(shell ( cd prog_tests/
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef DECLARE'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \
+ echo '#endif'; \
+ echo '#ifdef CALL'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@test_\1();@'; \
+ echo '#endif' \
+ ) > $(PROG_TESTS_H))
+
+VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
+$(OUTPUT)/test_verifier: $(VERIFIER_TESTS_H)
+$(OUTPUT)/test_verifier: CFLAGS += -I$(OUTPUT)
+
+VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
+$(OUTPUT)/verifier/tests.h: $(VERIFIER_TEST_FILES)
+ $(shell ( cd verifier/
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef FILL_ARRAY'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\(.*\)@#include \"\1\"@'; \
+ echo '#endif' \
+ ) > $(VERIFIER_TESTS_H))
+
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \
+ $(VERIFIER_TESTS_H) $(PROG_TESTS_H)
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 6c77cf7bedce..c9433a496d54 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -172,6 +172,16 @@ static int (*bpf_skb_vlan_pop)(void *ctx) =
(void *) BPF_FUNC_skb_vlan_pop;
static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
(void *) BPF_FUNC_rc_pointer_rel;
+static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
+ (void *) BPF_FUNC_spin_lock;
+static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
+ (void *) BPF_FUNC_spin_unlock;
+static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_sk_fullsock;
+static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_tcp_sock;
+static int (*bpf_skb_ecn_set_ce)(void *ctx) =
+ (void *) BPF_FUNC_skb_ecn_set_ce;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -224,6 +234,36 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
(void *) BPF_FUNC_skb_change_head;
static int (*bpf_skb_pull_data)(void *, int len) =
(void *) BPF_FUNC_skb_pull_data;
+static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
+ (void *) BPF_FUNC_get_cgroup_classid;
+static unsigned int (*bpf_get_route_realm)(void *ctx) =
+ (void *) BPF_FUNC_get_route_realm;
+static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
+ (void *) BPF_FUNC_skb_change_proto;
+static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
+ (void *) BPF_FUNC_skb_change_type;
+static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
+ (void *) BPF_FUNC_get_hash_recalc;
+static unsigned long long (*bpf_get_current_task)(void *ctx) =
+ (void *) BPF_FUNC_get_current_task;
+static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
+ (void *) BPF_FUNC_skb_change_tail;
+static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
+ (void *) BPF_FUNC_csum_update;
+static void (*bpf_set_hash_invalid)(void *ctx) =
+ (void *) BPF_FUNC_set_hash_invalid;
+static int (*bpf_get_numa_node_id)(void) =
+ (void *) BPF_FUNC_get_numa_node_id;
+static int (*bpf_probe_read_str)(void *ctx, __u32 size,
+ const void *unsafe_ptr) =
+ (void *) BPF_FUNC_probe_read_str;
+static unsigned int (*bpf_get_socket_uid)(void *ctx) =
+ (void *) BPF_FUNC_get_socket_uid;
+static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
+ (void *) BPF_FUNC_set_hash;
+static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_skb_adjust_room;
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 315a44fa32af..a29206ebbd13 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
unsigned int start, end, possible_cpus = 0;
char buff[128];
FILE *fp;
- int n;
+ int len, n, i, j = 0;
fp = fopen(fcpu, "r");
if (!fp) {
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
exit(1);
}
- while (fgets(buff, sizeof(buff), fp)) {
- n = sscanf(buff, "%u-%u", &start, &end);
- if (n == 0) {
- printf("Failed to retrieve # possible CPUs!\n");
- exit(1);
- } else if (n == 1) {
- end = start;
+ if (!fgets(buff, sizeof(buff), fp)) {
+ printf("Failed to read %s!\n", fcpu);
+ exit(1);
+ }
+
+ len = strlen(buff);
+ for (i = 0; i <= len; i++) {
+ if (buff[i] == ',' || buff[i] == '\0') {
+ buff[i] = '\0';
+ n = sscanf(&buff[j], "%u-%u", &start, &end);
+ if (n <= 0) {
+ printf("Failed to retrieve # possible CPUs!\n");
+ exit(1);
+ } else if (n == 1) {
+ end = start;
+ }
+ possible_cpus += end - start + 1;
+ j = i + 1;
}
- possible_cpus = start == 0 ? end + 1 : 0;
- break;
}
+
fclose(fp);
return possible_cpus;
@@ -48,4 +58,13 @@ static inline unsigned int bpf_num_possible_cpus(void)
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
+#ifndef sizeof_field
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#endif
+
+#ifndef offsetofend
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+#endif
+
#endif /* __BPF_UTIL__ */
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index cf16948aad4a..6692a40a6979 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -155,7 +155,7 @@ void cleanup_cgroup_environment(void)
* This function creates a cgroup under the top level workdir and returns the
* file descriptor. It is idempotent.
*
- * On success, it returns the file descriptor. On failure it returns 0.
+ * On success, it returns the file descriptor. On failure it returns -1.
* If there is a failure, it prints the error to stderr.
*/
int create_and_get_cgroup(const char *path)
@@ -166,13 +166,13 @@ int create_and_get_cgroup(const char *path)
format_cgroup_path(cgroup_path, path);
if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
- return 0;
+ return -1;
}
fd = open(cgroup_path, O_RDONLY);
if (fd < 0) {
log_err("Opening Cgroup");
- return 0;
+ return -1;
}
return fd;
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index ae8180b11d5f..77cafa66d048 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -12,6 +12,7 @@
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
+#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
@@ -21,46 +22,13 @@ char *cfg_path_name;
static void load_and_attach_program(void)
{
- struct bpf_program *prog, *main_prog;
- struct bpf_map *prog_array;
- int i, fd, prog_fd, ret;
+ int prog_fd, ret;
struct bpf_object *obj;
- int prog_array_fd;
- ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
- &prog_fd);
+ ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
+ cfg_map_name, &prog_fd);
if (ret)
- error(1, 0, "bpf_prog_load %s", cfg_path_name);
-
- main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
- if (!main_prog)
- error(1, 0, "bpf_object__find_program_by_title %s",
- cfg_section_name);
-
- prog_fd = bpf_program__fd(main_prog);
- if (prog_fd < 0)
- error(1, 0, "bpf_program__fd");
-
- prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
- if (!prog_array)
- error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
-
- prog_array_fd = bpf_map__fd(prog_array);
- if (prog_array_fd < 0)
- error(1, 0, "bpf_map__fd %s", cfg_map_name);
-
- i = 0;
- bpf_object__for_each_program(prog, obj) {
- fd = bpf_program__fd(prog);
- if (fd < 0)
- error(1, 0, "bpf_program__fd");
-
- if (fd != prog_fd) {
- printf("%d: %s\n", i, bpf_program__title(prog, false));
- bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
- ++i;
- }
- }
+ error(1, 0, "bpf_flow_load %s", cfg_path_name);
ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
if (ret)
@@ -69,7 +37,6 @@ static void load_and_attach_program(void)
ret = bpf_object__pin(obj, cfg_pin_path);
if (ret)
error(1, 0, "bpf_object__pin %s", cfg_pin_path);
-
}
static void detach_program(void)
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
new file mode 100644
index 000000000000..41dd6959feb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef FLOW_DISSECTOR_LOAD
+#define FLOW_DISSECTOR_LOAD
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static inline int bpf_flow_load(struct bpf_object **obj,
+ const char *path,
+ const char *section_name,
+ const char *map_name,
+ int *prog_fd)
+{
+ struct bpf_program *prog, *main_prog;
+ struct bpf_map *prog_array;
+ int prog_array_fd;
+ int ret, fd, i;
+
+ ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+ prog_fd);
+ if (ret)
+ return ret;
+
+ main_prog = bpf_object__find_program_by_title(*obj, section_name);
+ if (!main_prog)
+ return ret;
+
+ *prog_fd = bpf_program__fd(main_prog);
+ if (*prog_fd < 0)
+ return ret;
+
+ prog_array = bpf_object__find_map_by_name(*obj, map_name);
+ if (!prog_array)
+ return ret;
+
+ prog_array_fd = bpf_map__fd(prog_array);
+ if (prog_array_fd < 0)
+ return ret;
+
+ i = 0;
+ bpf_object__for_each_program(prog, *obj) {
+ fd = bpf_program__fd(prog);
+ if (fd < 0)
+ return fd;
+
+ if (fd != *prog_fd) {
+ bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
+ ++i;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* FLOW_DISSECTOR_LOAD */
diff --git a/tools/testing/selftests/bpf/prog_tests/.gitignore b/tools/testing/selftests/bpf/prog_tests/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
new file mode 100644
index 000000000000..a64f7a02139c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_bpf_obj_id(void)
+{
+ const __u64 array_magic_value = 0xfaceb00c;
+ const __u32 array_key = 0;
+ const int nr_iters = 2;
+ const char *file = "./test_obj_id.o";
+ const char *expected_prog_name = "test_obj_id";
+ const char *expected_map_name = "test_map_id";
+ const __u64 nsec_per_sec = 1000000000;
+
+ struct bpf_object *objs[nr_iters];
+ int prog_fds[nr_iters], map_fds[nr_iters];
+ /* +1 to test for the info_len returned by kernel */
+ struct bpf_prog_info prog_infos[nr_iters + 1];
+ struct bpf_map_info map_infos[nr_iters + 1];
+ /* Each prog only uses one map. +1 to test nr_map_ids
+ * returned by kernel.
+ */
+ __u32 map_ids[nr_iters + 1];
+ char jited_insns[128], xlated_insns[128], zeros[128];
+ __u32 i, next_id, info_len, nr_id_found, duration = 0;
+ struct timespec real_time_ts, boot_time_ts;
+ int err = 0;
+ __u64 array_value;
+ uid_t my_uid = getuid();
+ time_t now, load_time;
+
+ err = bpf_prog_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
+
+ err = bpf_map_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
+
+ for (i = 0; i < nr_iters; i++)
+ objs[i] = NULL;
+
+ /* Check bpf_obj_get_info_by_fd() */
+ bzero(zeros, sizeof(zeros));
+ for (i = 0; i < nr_iters; i++) {
+ now = time(NULL);
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
+ &objs[i], &prog_fds[i]);
+ /* test_obj_id.o is a dumb prog. It should never fail
+ * to load.
+ */
+ if (err)
+ error_cnt++;
+ assert(!err);
+
+ /* Insert a magic value to the map */
+ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
+ assert(map_fds[i] >= 0);
+ err = bpf_map_update_elem(map_fds[i], &array_key,
+ &array_magic_value, 0);
+ assert(!err);
+
+ /* Check getting map info */
+ info_len = sizeof(struct bpf_map_info) * 2;
+ bzero(&map_infos[i], info_len);
+ err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
+ &info_len);
+ if (CHECK(err ||
+ map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
+ map_infos[i].key_size != sizeof(__u32) ||
+ map_infos[i].value_size != sizeof(__u64) ||
+ map_infos[i].max_entries != 1 ||
+ map_infos[i].map_flags != 0 ||
+ info_len != sizeof(struct bpf_map_info) ||
+ strcmp((char *)map_infos[i].name, expected_map_name),
+ "get-map-info(fd)",
+ "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+ err, errno,
+ map_infos[i].type, BPF_MAP_TYPE_ARRAY,
+ info_len, sizeof(struct bpf_map_info),
+ map_infos[i].key_size,
+ map_infos[i].value_size,
+ map_infos[i].max_entries,
+ map_infos[i].map_flags,
+ map_infos[i].name, expected_map_name))
+ goto done;
+
+ /* Check getting prog info */
+ info_len = sizeof(struct bpf_prog_info) * 2;
+ bzero(&prog_infos[i], info_len);
+ bzero(jited_insns, sizeof(jited_insns));
+ bzero(xlated_insns, sizeof(xlated_insns));
+ prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
+ prog_infos[i].jited_prog_len = sizeof(jited_insns);
+ prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
+ prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
+ prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
+ prog_infos[i].nr_map_ids = 2;
+ err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
+ assert(!err);
+ err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
+ assert(!err);
+ err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
+ &info_len);
+ load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ + (prog_infos[i].load_time / nsec_per_sec);
+ if (CHECK(err ||
+ prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
+ info_len != sizeof(struct bpf_prog_info) ||
+ (jit_enabled && !prog_infos[i].jited_prog_len) ||
+ (jit_enabled &&
+ !memcmp(jited_insns, zeros, sizeof(zeros))) ||
+ !prog_infos[i].xlated_prog_len ||
+ !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
+ load_time < now - 60 || load_time > now + 60 ||
+ prog_infos[i].created_by_uid != my_uid ||
+ prog_infos[i].nr_map_ids != 1 ||
+ *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
+ strcmp((char *)prog_infos[i].name, expected_prog_name),
+ "get-prog-info(fd)",
+ "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+ err, errno, i,
+ prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
+ info_len, sizeof(struct bpf_prog_info),
+ jit_enabled,
+ prog_infos[i].jited_prog_len,
+ prog_infos[i].xlated_prog_len,
+ !!memcmp(jited_insns, zeros, sizeof(zeros)),
+ !!memcmp(xlated_insns, zeros, sizeof(zeros)),
+ load_time, now,
+ prog_infos[i].created_by_uid, my_uid,
+ prog_infos[i].nr_map_ids, 1,
+ *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
+ prog_infos[i].name, expected_prog_name))
+ goto done;
+ }
+
+ /* Check bpf_prog_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_prog_get_next_id(next_id, &next_id)) {
+ struct bpf_prog_info prog_info = {};
+ __u32 saved_map_id;
+ int prog_fd;
+
+ info_len = sizeof(prog_info);
+
+ prog_fd = bpf_prog_get_fd_by_id(next_id);
+ if (prog_fd < 0 && errno == ENOENT)
+ /* The bpf_prog is in the dead row */
+ continue;
+ if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
+ "prog_fd %d next_id %d errno %d\n",
+ prog_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (prog_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ /* Negative test:
+ * prog_info.nr_map_ids = 1
+ * prog_info.map_ids = NULL
+ */
+ prog_info.nr_map_ids = 1;
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ if (CHECK(!err || errno != EFAULT,
+ "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
+ err, errno, EFAULT))
+ break;
+ bzero(&prog_info, sizeof(prog_info));
+ info_len = sizeof(prog_info);
+
+ saved_map_id = *(int *)((long)prog_infos[i].map_ids);
+ prog_info.map_ids = prog_infos[i].map_ids;
+ prog_info.nr_map_ids = 2;
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ prog_infos[i].jited_prog_insns = 0;
+ prog_infos[i].xlated_prog_insns = 0;
+ CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
+ memcmp(&prog_info, &prog_infos[i], info_len) ||
+ *(int *)(long)prog_info.map_ids != saved_map_id,
+ "get-prog-info(next_id->fd)",
+ "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
+ err, errno, info_len, sizeof(struct bpf_prog_info),
+ memcmp(&prog_info, &prog_infos[i], info_len),
+ *(int *)(long)prog_info.map_ids, saved_map_id);
+ close(prog_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total prog id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+ /* Check bpf_map_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_map_get_next_id(next_id, &next_id)) {
+ struct bpf_map_info map_info = {};
+ int map_fd;
+
+ info_len = sizeof(map_info);
+
+ map_fd = bpf_map_get_fd_by_id(next_id);
+ if (map_fd < 0 && errno == ENOENT)
+ /* The bpf_map is in the dead row */
+ continue;
+ if (CHECK(map_fd < 0, "get-map-fd(next_id)",
+ "map_fd %d next_id %u errno %d\n",
+ map_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (map_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
+ assert(!err);
+
+ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ CHECK(err || info_len != sizeof(struct bpf_map_info) ||
+ memcmp(&map_info, &map_infos[i], info_len) ||
+ array_value != array_magic_value,
+ "check get-map-info(next_id->fd)",
+ "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
+ err, errno, info_len, sizeof(struct bpf_map_info),
+ memcmp(&map_info, &map_infos[i], info_len),
+ array_value, array_magic_value);
+
+ close(map_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total map id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+done:
+ for (i = 0; i < nr_iters; i++)
+ bpf_object__close(objs[i]);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
new file mode 100644
index 000000000000..bcbd928c96ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#define CHECK_FLOW_KEYS(desc, got, expected) \
+ CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
+ desc, \
+ "nhoff=%u/%u " \
+ "thoff=%u/%u " \
+ "addr_proto=0x%x/0x%x " \
+ "is_frag=%u/%u " \
+ "is_first_frag=%u/%u " \
+ "is_encap=%u/%u " \
+ "n_proto=0x%x/0x%x " \
+ "sport=%u/%u " \
+ "dport=%u/%u\n", \
+ got.nhoff, expected.nhoff, \
+ got.thoff, expected.thoff, \
+ got.addr_proto, expected.addr_proto, \
+ got.is_frag, expected.is_frag, \
+ got.is_first_frag, expected.is_first_frag, \
+ got.is_encap, expected.is_encap, \
+ got.n_proto, expected.n_proto, \
+ got.sport, expected.sport, \
+ got.dport, expected.dport)
+
+static struct bpf_flow_keys pkt_v4_flow_keys = {
+ .nhoff = 0,
+ .thoff = sizeof(struct iphdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct bpf_flow_keys pkt_v6_flow_keys = {
+ .nhoff = 0,
+ .thoff = sizeof(struct ipv6hdr),
+ .addr_proto = ETH_P_IPV6,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
+void test_flow_dissector(void)
+{
+ struct bpf_flow_keys flow_keys;
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+ __u32 size;
+
+ err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
+ "jmp_table", &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys);
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6",
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+ CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
new file mode 100644
index 000000000000..d7bb5beb1c57
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#define MAX_CNT_RAWTP 10ull
+#define MAX_STACK_RAWTP 100
+struct get_stack_trace_t {
+ int pid;
+ int kern_stack_size;
+ int user_stack_size;
+ int user_stack_buildid_size;
+ __u64 kern_stack[MAX_STACK_RAWTP];
+ __u64 user_stack[MAX_STACK_RAWTP];
+ struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
+};
+
+static int get_stack_print_output(void *data, int size)
+{
+ bool good_kern_stack = false, good_user_stack = false;
+ const char *nonjit_func = "___bpf_prog_run";
+ struct get_stack_trace_t *e = data;
+ int i, num_stack;
+ static __u64 cnt;
+ struct ksym *ks;
+
+ cnt++;
+
+ if (size < sizeof(struct get_stack_trace_t)) {
+ __u64 *raw_data = data;
+ bool found = false;
+
+ num_stack = size / sizeof(__u64);
+ /* If jit is enabled, we do not have a good way to
+ * verify the sanity of the kernel stack. So we
+ * just assume it is good if the stack is not empty.
+ * This could be improved in the future.
+ */
+ if (jit_enabled) {
+ found = num_stack > 0;
+ } else {
+ for (i = 0; i < num_stack; i++) {
+ ks = ksym_search(raw_data[i]);
+ if (strcmp(ks->name, nonjit_func) == 0) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ good_kern_stack = true;
+ good_user_stack = true;
+ }
+ } else {
+ num_stack = e->kern_stack_size / sizeof(__u64);
+ if (jit_enabled) {
+ good_kern_stack = num_stack > 0;
+ } else {
+ for (i = 0; i < num_stack; i++) {
+ ks = ksym_search(e->kern_stack[i]);
+ if (strcmp(ks->name, nonjit_func) == 0) {
+ good_kern_stack = true;
+ break;
+ }
+ }
+ }
+ if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
+ good_user_stack = true;
+ }
+ if (!good_kern_stack || !good_user_stack)
+ return LIBBPF_PERF_EVENT_ERROR;
+
+ if (cnt == MAX_CNT_RAWTP)
+ return LIBBPF_PERF_EVENT_DONE;
+
+ return LIBBPF_PERF_EVENT_CONT;
+}
+
+void test_get_stack_raw_tp(void)
+{
+ const char *file = "./test_get_stack_rawtp.o";
+ int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
+ struct perf_event_attr attr = {};
+ struct timespec tv = {0, 10};
+ __u32 key = 0, duration = 0;
+ struct bpf_object *obj;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
+ if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
+ perfmap_fd, errno))
+ goto close_prog;
+
+ err = load_kallsyms();
+ if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
+ -1/*group_fd*/, 0);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
+ errno))
+ goto close_prog;
+
+ err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
+ if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
+ errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+
+ err = perf_event_mmap(pmu_fd);
+ if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ /* trigger some syscall action */
+ for (i = 0; i < MAX_CNT_RAWTP; i++)
+ nanosleep(&tv, NULL);
+
+ err = perf_event_poller(pmu_fd, get_stack_print_output);
+ if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
new file mode 100644
index 000000000000..20ddca830e68
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_l4lb(const char *file)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
+
+void test_l4lb_all(void)
+{
+ const char *file1 = "./test_l4lb.o";
+ const char *file2 = "./test_l4lb_noinline.o";
+
+ test_l4lb(file1);
+ test_l4lb(file2);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c
new file mode 100644
index 000000000000..90f8a206340a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void *parallel_map_access(void *arg)
+{
+ int err, map_fd = *(u32 *) arg;
+ int vars[17], i, j, rnd, key = 0;
+
+ for (i = 0; i < 10000; i++) {
+ err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK);
+ if (err) {
+ printf("lookup failed\n");
+ error_cnt++;
+ goto out;
+ }
+ if (vars[0] != 0) {
+ printf("lookup #%d var[0]=%d\n", i, vars[0]);
+ error_cnt++;
+ goto out;
+ }
+ rnd = vars[1];
+ for (j = 2; j < 17; j++) {
+ if (vars[j] == rnd)
+ continue;
+ printf("lookup #%d var[1]=%d var[%d]=%d\n",
+ i, rnd, j, vars[j]);
+ error_cnt++;
+ goto out;
+ }
+ }
+out:
+ pthread_exit(arg);
+}
+
+void test_map_lock(void)
+{
+ const char *file = "./test_map_lock.o";
+ int prog_fd, map_fd[2], vars[17] = {};
+ pthread_t thread_id[6];
+ struct bpf_object *obj;
+ int err = 0, key = 0, i;
+ void *ret;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ if (err) {
+ printf("test_map_lock:bpf_prog_load errno %d\n", errno);
+ goto close_prog;
+ }
+ map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
+ if (map_fd[0] < 0)
+ goto close_prog;
+ map_fd[1] = bpf_find_map(__func__, obj, "array_map");
+ if (map_fd[1] < 0)
+ goto close_prog;
+
+ bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK);
+
+ for (i = 0; i < 4; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd) == 0);
+ for (i = 4; i < 6; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &parallel_map_access, &map_fd[i - 4]) == 0);
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&prog_fd);
+ for (i = 4; i < 6; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&map_fd[i - 4]);
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c
new file mode 100644
index 000000000000..e178416bddad
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_obj_name(void)
+{
+ struct {
+ const char *name;
+ int success;
+ int expected_errno;
+ } tests[] = {
+ { "", 1, 0 },
+ { "_123456789ABCDE", 1, 0 },
+ { "_123456789ABCDEF", 0, EINVAL },
+ { "_123456789ABCD\n", 0, EINVAL },
+ };
+ struct bpf_insn prog[] = {
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 duration = 0;
+ int i;
+
+ for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ size_t name_len = strlen(tests[i].name) + 1;
+ union bpf_attr attr;
+ size_t ncopy;
+ int fd;
+
+ /* test different attr.prog_name during BPF_PROG_LOAD */
+ ncopy = name_len < sizeof(attr.prog_name) ?
+ name_len : sizeof(attr.prog_name);
+ bzero(&attr, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
+ attr.insn_cnt = 2;
+ attr.insns = ptr_to_u64(prog);
+ attr.license = ptr_to_u64("");
+ memcpy(attr.prog_name, tests[i].name, ncopy);
+
+ fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ CHECK((tests[i].success && fd < 0) ||
+ (!tests[i].success && fd != -1) ||
+ (!tests[i].success && errno != tests[i].expected_errno),
+ "check-bpf-prog-name",
+ "fd %d(%d) errno %d(%d)\n",
+ fd, tests[i].success, errno, tests[i].expected_errno);
+
+ if (fd != -1)
+ close(fd);
+
+ /* test different attr.map_name during BPF_MAP_CREATE */
+ ncopy = name_len < sizeof(attr.map_name) ?
+ name_len : sizeof(attr.map_name);
+ bzero(&attr, sizeof(attr));
+ attr.map_type = BPF_MAP_TYPE_ARRAY;
+ attr.key_size = 4;
+ attr.value_size = 4;
+ attr.max_entries = 1;
+ attr.map_flags = 0;
+ memcpy(attr.map_name, tests[i].name, ncopy);
+ fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
+ CHECK((tests[i].success && fd < 0) ||
+ (!tests[i].success && fd != -1) ||
+ (!tests[i].success && errno != tests[i].expected_errno),
+ "check-bpf-map-name",
+ "fd %d(%d) errno %d(%d)\n",
+ fd, tests[i].success, errno, tests[i].expected_errno);
+
+ if (fd != -1)
+ close(fd);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
new file mode 100644
index 000000000000..4ecfd721a044
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_pkt_access(void)
+{
+ const char *file = "./test_pkt_access.o";
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv4",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
new file mode 100644
index 000000000000..ac0d43435806
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_pkt_md_access(void)
+{
+ const char *file = "./test_pkt_md_access.o";
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
new file mode 100644
index 000000000000..5dd89b941f53
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_prog_run_xattr(void)
+{
+ const char *file = "./test_pkt_access.o";
+ struct bpf_object *obj;
+ char buf[10];
+ int err;
+ struct bpf_prog_test_run_attr tattr = {
+ .repeat = 1,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = 5,
+ };
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &tattr.prog_fd);
+ if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
+ "incorrect output size, want %lu have %u\n",
+ sizeof(pkt_v4), tattr.data_size_out);
+
+ CHECK_ATTR(buf[5] != 0, "overflow",
+ "BPF_PROG_TEST_RUN ignored size hint\n");
+
+ tattr.data_out = NULL;
+ tattr.data_size_out = 0;
+ errno = 0;
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ tattr.data_size_out = 1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
new file mode 100644
index 000000000000..e60cd5ff1f55
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+enum {
+ QUEUE,
+ STACK,
+};
+
+static void test_queue_stack_map_by_type(int type)
+{
+ const int MAP_SIZE = 32;
+ __u32 vals[MAP_SIZE], duration, retval, size, val;
+ int i, err, prog_fd, map_in_fd, map_out_fd;
+ char file[32], buf[128];
+ struct bpf_object *obj;
+ struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+
+ /* Fill test values to be used */
+ for (i = 0; i < MAP_SIZE; i++)
+ vals[i] = rand();
+
+ if (type == QUEUE)
+ strncpy(file, "./test_queue_map.o", sizeof(file));
+ else if (type == STACK)
+ strncpy(file, "./test_stack_map.o", sizeof(file));
+ else
+ return;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_in_fd = bpf_find_map(__func__, obj, "map_in");
+ if (map_in_fd < 0)
+ goto out;
+
+ map_out_fd = bpf_find_map(__func__, obj, "map_out");
+ if (map_out_fd < 0)
+ goto out;
+
+ /* Push 32 elements to the input map */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
+ if (err) {
+ error_cnt++;
+ goto out;
+ }
+ }
+
+ /* The eBPF program pushes iph.saddr in the output map,
+ * pops the input map and saves this value in iph.daddr
+ */
+ for (i = 0; i < MAP_SIZE; i++) {
+ if (type == QUEUE) {
+ val = vals[i];
+ pkt_v4.iph.saddr = vals[i] * 5;
+ } else if (type == STACK) {
+ val = vals[MAP_SIZE - 1 - i];
+ pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ if (err || retval || size != sizeof(pkt_v4) ||
+ iph->daddr != val)
+ break;
+ }
+
+ CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
+ "bpf_map_pop_elem",
+ "err %d errno %d retval %d size %d iph->daddr %u\n",
+ err, errno, retval, size, iph->daddr);
+
+ /* Queue is empty, program should return TC_ACT_SHOT */
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
+ "check-queue-stack-map-empty",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ /* Check that the program pushed elements correctly */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
+ if (err || val != vals[i] * 5)
+ break;
+ }
+
+ CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
+ "bpf_map_push_elem", "err %d value %u\n", err, val);
+
+out:
+ pkt_v4.iph.saddr = 0;
+ bpf_object__close(obj);
+}
+
+void test_queue_stack_map(void)
+{
+ test_queue_stack_map_by_type(QUEUE);
+ test_queue_stack_map_by_type(STACK);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
new file mode 100644
index 000000000000..5633be43828f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG)
+ return 0;
+
+ return vfprintf(stderr, format, args);
+}
+
+void test_reference_tracking(void)
+{
+ const char *file = "./test_sk_lookup_kern.o";
+ struct bpf_object *obj;
+ struct bpf_program *prog;
+ __u32 duration = 0;
+ int err = 0;
+
+ obj = bpf_object__open(file);
+ if (IS_ERR(obj)) {
+ error_cnt++;
+ return;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *title;
+
+ /* Ignore .text sections */
+ title = bpf_program__title(prog, false);
+ if (strstr(title, ".text") != NULL)
+ continue;
+
+ bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+
+ /* Expect verifier failure if test name has 'fail' */
+ if (strstr(title, "fail") != NULL) {
+ libbpf_set_print(NULL);
+ err = !bpf_program__load(prog, "GPL", 0);
+ libbpf_set_print(libbpf_debug_print);
+ } else {
+ err = bpf_program__load(prog, "GPL", 0);
+ }
+ CHECK(err, title, "\n");
+ }
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
new file mode 100644
index 000000000000..f2a37bbf91ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void sigalrm_handler(int s) {}
+static struct sigaction sigalrm_action = {
+ .sa_handler = sigalrm_handler,
+};
+
+static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
+{
+ struct bpf_insn prog[4096];
+ struct itimerval timeo = {
+ .it_value.tv_usec = 100000, /* 100ms */
+ };
+ __u32 duration, retval;
+ int prog_fd;
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prog); i++)
+ prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
+ prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN();
+
+ prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog),
+ "GPL", 0, NULL, 0);
+ CHECK(prog_fd < 0, "test-run", "errno %d\n", errno);
+
+ err = sigaction(SIGALRM, &sigalrm_action, NULL);
+ CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno);
+
+ err = setitimer(ITIMER_REAL, &timeo, NULL);
+ CHECK(err, "test-run-signal-timer", "errno %d\n", errno);
+
+ err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(duration > 500000000, /* 500ms */
+ "test-run-signal-duration",
+ "duration %dns > 500ms\n",
+ duration);
+
+ signal(SIGALRM, SIG_DFL);
+}
+
+void test_signal_pending(enum bpf_prog_type prog_type)
+{
+ test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER);
+ test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
new file mode 100644
index 000000000000..9a573a9675d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_spinlock(void)
+{
+ const char *file = "./test_spin_lock.o";
+ pthread_t thread_id[4];
+ struct bpf_object *obj;
+ int prog_fd;
+ int err = 0, i;
+ void *ret;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ if (err) {
+ printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
+ goto close_prog;
+ }
+ for (i = 0; i < 4; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd) == 0);
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&prog_fd);
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
new file mode 100644
index 000000000000..3aab2b083c71
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_build_id(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_build_id.o";
+ int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
+ struct perf_event_attr attr = {};
+ __u32 key, previous_key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+ int i, j;
+ struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
+ int build_id_matches = 0;
+ int retry = 1;
+
+retry:
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/random/urandom_read/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
+ == 0);
+ assert(system("./urandom_read") == 0);
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = extract_build_id(buf, 256);
+
+ if (CHECK(err, "get build_id with readelf",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ if (CHECK(err, "get_next_key from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+
+ do {
+ char build_id[64];
+
+ err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ if (CHECK(err, "lookup_elem from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+ for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
+ if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
+ id_offs[i].offset != 0) {
+ for (j = 0; j < 20; ++j)
+ sprintf(build_id + 2 * j, "%02x",
+ id_offs[i].build_id[j] & 0xff);
+ if (strstr(buf, build_id) != NULL)
+ build_id_matches = 1;
+ }
+ previous_key = key;
+ } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ if (CHECK(build_id_matches < 1, "build id match",
+ "Didn't find expected build ID from the map\n"))
+ goto disable_pmu;
+
+ stack_trace_len = PERF_MAX_STACK_DEPTH
+ * sizeof(struct bpf_stack_build_id);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno);
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+
+out:
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
new file mode 100644
index 000000000000..8a114bb1c379
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_build_id_nmi(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_build_id.o";
+ int err, pmu_fd, prog_fd;
+ struct perf_event_attr attr = {
+ .sample_freq = 5000,
+ .freq = 1,
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ __u32 key, previous_key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+ int i, j;
+ struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
+ int build_id_matches = 0;
+ int retry = 1;
+
+retry:
+ err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ return;
+
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open",
+ "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
+ == 0);
+ assert(system("taskset 0x1 ./urandom_read 100000") == 0);
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = extract_build_id(buf, 256);
+
+ if (CHECK(err, "get build_id with readelf",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ if (CHECK(err, "get_next_key from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+
+ do {
+ char build_id[64];
+
+ err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ if (CHECK(err, "lookup_elem from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+ for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
+ if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
+ id_offs[i].offset != 0) {
+ for (j = 0; j < 20; ++j)
+ sprintf(build_id + 2 * j, "%02x",
+ id_offs[i].build_id[j] & 0xff);
+ if (strstr(buf, build_id) != NULL)
+ build_id_matches = 1;
+ }
+ previous_key = key;
+ } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ if (CHECK(build_id_matches < 1, "build id match",
+ "Didn't find expected build ID from the map\n"))
+ goto disable_pmu;
+
+ /*
+ * We intentionally skip compare_stack_ips(). This is because we
+ * only support one in_nmi() ips-to-build_id translation per cpu
+ * at any time, thus stack_amap here will always fallback to
+ * BPF_STACK_BUILD_ID_IP;
+ */
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
new file mode 100644
index 000000000000..2bfd50a0d6d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_map(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
+ struct perf_event_attr attr = {};
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ return;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (bytes <= 0 || bytes >= sizeof(buf))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ goto disable_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (err)
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (control_map_fd < 0)
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (stackid_hmap_fd < 0)
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (stackmap_fd < 0)
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (stack_amap_fd < 0)
+ goto disable_pmu;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ goto disable_pmu_noerr;
+disable_pmu:
+ error_cnt++;
+disable_pmu_noerr:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+close_prog:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
new file mode 100644
index 000000000000..1f8387d80fd7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_map_raw_tp(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int efd, err, prog_fd;
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (control_map_fd < 0)
+ goto close_prog;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (stackid_hmap_fd < 0)
+ goto close_prog;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (stackmap_fd < 0)
+ goto close_prog;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
new file mode 100644
index 000000000000..958a3d88de99
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_task_fd_query_rawtp(void)
+{
+ const char *file = "./test_get_stack_rawtp.o";
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ struct bpf_object *obj;
+ int efd, err, prog_fd;
+ __u32 duration = 0;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ /* query (getpid(), efd) */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
+ errno))
+ goto close_prog;
+
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ strcmp(buf, "sys_enter") == 0;
+ if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
+ fd_type, buf))
+ goto close_prog;
+
+ /* test zero len */
+ len = 0;
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter");
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ /* test empty buffer */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter");
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ /* test smaller buffer */
+ len = 3;
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter") &&
+ strcmp(buf, "sy") == 0;
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
new file mode 100644
index 000000000000..d636a4f39476
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_task_fd_query_tp_core(const char *probe_name,
+ const char *tp_name)
+{
+ const char *file = "./test_tracepoint.o";
+ int err, bytes, efd, prog_fd, pmu_fd;
+ struct perf_event_attr attr = {};
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ struct bpf_object *obj;
+ __u32 duration = 0;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/%s/id", probe_name);
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
+ "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ /* query (getpid(), pmu_fd) */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
+ if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
+ fd_type, buf))
+ goto close_pmu;
+
+ close(pmu_fd);
+ goto close_prog_noerr;
+
+close_pmu:
+ close(pmu_fd);
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
+
+void test_task_fd_query_tp(void)
+{
+ test_task_fd_query_tp_core("sched/sched_switch",
+ "sched_switch");
+ test_task_fd_query_tp_core("syscalls/sys_enter_read",
+ "sys_enter_read");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
new file mode 100644
index 000000000000..bb8759d69099
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_tcp_estats(void)
+{
+ const char *file = "./test_tcp_estats.o";
+ int err, prog_fd;
+ struct bpf_object *obj;
+ __u32 duration = 0;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ CHECK(err, "", "err %d errno %d\n", err, errno);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
new file mode 100644
index 000000000000..a2f476f91637
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_tp_attach_query(void)
+{
+ const int num_progs = 3;
+ int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
+ __u32 duration = 0, info_len, saved_prog_ids[num_progs];
+ const char *file = "./test_tracepoint.o";
+ struct perf_event_query_bpf *query;
+ struct perf_event_attr attr = {};
+ struct bpf_object *obj[num_progs];
+ struct bpf_prog_info prog_info;
+ char buf[256];
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ return;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ return;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
+ for (i = 0; i < num_progs; i++) {
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+ &prog_fd[i]);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto cleanup1;
+
+ bzero(&prog_info, sizeof(prog_info));
+ prog_info.jited_prog_len = 0;
+ prog_info.xlated_prog_len = 0;
+ prog_info.nr_map_ids = 0;
+ info_len = sizeof(prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
+ if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+ err, errno))
+ goto cleanup1;
+ saved_prog_ids[i] = prog_info.id;
+
+ pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd[i], errno))
+ goto cleanup2;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 0) {
+ /* check NULL prog array query */
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 0,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 1) {
+ /* try to get # of programs only */
+ query->ids_len = 0;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+
+ /* try a few negative tests */
+ /* invalid query pointer */
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
+ (struct perf_event_query_bpf *)0x1);
+ if (CHECK(!err || errno != EFAULT,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d\n", err, errno))
+ goto cleanup3;
+
+ /* no enough space */
+ query->ids_len = 1;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != (i + 1),
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ for (j = 0; j < i + 1; j++)
+ if (CHECK(saved_prog_ids[j] != query->ids[j],
+ "perf_event_ioc_query_bpf",
+ "#%d saved_prog_id %x query prog_id %x\n",
+ j, saved_prog_ids[j], query->ids[j]))
+ goto cleanup3;
+ }
+
+ i = num_progs - 1;
+ for (; i >= 0; i--) {
+ cleanup3:
+ ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
+ cleanup2:
+ close(pmu_fd[i]);
+ cleanup1:
+ bpf_object__close(obj[i]);
+ }
+ free(query);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c
new file mode 100644
index 000000000000..a74167289545
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp(void)
+{
+ struct vip key4 = {.protocol = 6, .family = AF_INET};
+ struct vip key6 = {.protocol = 6, .family = AF_INET6};
+ struct iptnl_info value4 = {.family = AF_INET};
+ struct iptnl_info value6 = {.family = AF_INET6};
+ const char *file = "./test_xdp.o";
+ struct bpf_object *obj;
+ char buf[128];
+ struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
+ struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+ __u32 duration, retval, size;
+ int err, prog_fd, map_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip2tnl");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key4, &value4, 0);
+ bpf_map_update_elem(map_fd, &key6, &value6, 0);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+
+ CHECK(err || retval != XDP_TX || size != 74 ||
+ iph->protocol != IPPROTO_IPIP, "ipv4",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != XDP_TX || size != 114 ||
+ iph6->nexthdr != IPPROTO_IPV6, "ipv6",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+out:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
new file mode 100644
index 000000000000..922aa0a19764
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp_adjust_tail(void)
+{
+ const char *file = "./test_adjust_tail.o";
+ struct bpf_object *obj;
+ char buf[128];
+ __u32 duration, retval, size;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+
+ CHECK(err || retval != XDP_DROP,
+ "ipv4", "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != XDP_TX || size != 54,
+ "ipv6", "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
new file mode 100644
index 000000000000..09e6b46f5515
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp_noinline(void)
+{
+ const char *file = "./test_xdp_noinline.o";
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 1 || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 1 || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 284660f5aa95..284660f5aa95 100644
--- a/tools/testing/selftests/bpf/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
diff --git a/tools/testing/selftests/bpf/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index 1fd244d35ba9..1fd244d35ba9 100644
--- a/tools/testing/selftests/bpf/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
diff --git a/tools/testing/selftests/bpf/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c
index 26397ab7b3c7..26397ab7b3c7 100644
--- a/tools/testing/selftests/bpf/connect6_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect6_prog.c
diff --git a/tools/testing/selftests/bpf/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c
index ce41a3475f27..ce41a3475f27 100644
--- a/tools/testing/selftests/bpf/dev_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
index 014dba10b8a5..014dba10b8a5 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c
index 9f741e69cebe..9f741e69cebe 100644
--- a/tools/testing/selftests/bpf/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c
diff --git a/tools/testing/selftests/bpf/sample_map_ret0.c b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
index 0756303676ac..0756303676ac 100644
--- a/tools/testing/selftests/bpf/sample_map_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/progs/sample_ret0.c
index fec99750d6ea..fec99750d6ea 100644
--- a/tools/testing/selftests/bpf/sample_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_ret0.c
diff --git a/tools/testing/selftests/bpf/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
index a91536b1c47e..a91536b1c47e 100644
--- a/tools/testing/selftests/bpf/sendmsg4_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 5aeaa284fc47..5aeaa284fc47 100644
--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
diff --git a/tools/testing/selftests/bpf/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
index 9ff8ac4b0bf6..9ff8ac4b0bf6 100644
--- a/tools/testing/selftests/bpf/socket_cookie_prog.c
+++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index 0f92858f6226..0f92858f6226 100644
--- a/tools/testing/selftests/bpf/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index 12a7b5c82ed6..12a7b5c82ed6 100644
--- a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 2ce7634a4012..2ce7634a4012 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
diff --git a/tools/testing/selftests/bpf/test_adjust_tail.c b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
index 4cd5e860c903..4cd5e860c903 100644
--- a/tools/testing/selftests/bpf/test_adjust_tail.c
+++ b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
index e5c79fe0ffdb..e5c79fe0ffdb 100644
--- a/tools/testing/selftests/bpf/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
diff --git a/tools/testing/selftests/bpf/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 434188c37774..434188c37774 100644
--- a/tools/testing/selftests/bpf/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
diff --git a/tools/testing/selftests/bpf/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
index f6d9f238e00a..f6d9f238e00a 100644
--- a/tools/testing/selftests/bpf/test_get_stack_rawtp.c
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 1e10c9590991..1e10c9590991 100644
--- a/tools/testing/selftests/bpf/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index ba44a14e6dc4..ba44a14e6dc4 100644
--- a/tools/testing/selftests/bpf/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
index 4147130cc3b7..4147130cc3b7 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
new file mode 100644
index 000000000000..c957d6dfe6d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+struct grehdr {
+ __be16 flags;
+ __be16 protocol;
+};
+
+SEC("encap_gre")
+int bpf_lwt_encap_gre(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct iphdr iph;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.iph.ihl = 5;
+ hdr.iph.version = 4;
+ hdr.iph.ttl = 0x40;
+ hdr.iph.protocol = 47; /* IPPROTO_GRE */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ hdr.iph.saddr = 0x640110ac; /* 172.16.1.100 */
+ hdr.iph.daddr = 0x641010ac; /* 172.16.16.100 */
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ hdr.iph.saddr = 0xac100164; /* 172.16.1.100 */
+ hdr.iph.daddr = 0xac101064; /* 172.16.16.100 */
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+ hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+SEC("encap_gre6")
+int bpf_lwt_encap_gre6(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct ipv6hdr ip6hdr;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.ip6hdr.version = 6;
+ hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
+ hdr.ip6hdr.nexthdr = 47; /* IPPROTO_GRE */
+ hdr.ip6hdr.hop_limit = 0x40;
+ /* fb01::1 */
+ hdr.ip6hdr.saddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.saddr.s6_addr[1] = 1;
+ hdr.ip6hdr.saddr.s6_addr[15] = 1;
+ /* fb10::1 */
+ hdr.ip6hdr.daddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.daddr.s6_addr[1] = 0x10;
+ hdr.ip6hdr.daddr.s6_addr[15] = 1;
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 0575751bc1bc..0575751bc1bc 100644
--- a/tools/testing/selftests/bpf/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
diff --git a/tools/testing/selftests/bpf/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index ce923e67e08e..2985f262846e 100644
--- a/tools/testing/selftests/bpf/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -27,6 +27,7 @@ SEC("xdp_mimtest")
int xdp_mimtest0(struct xdp_md *ctx)
{
int value = 123;
+ int *value_p;
int key = 0;
void *map;
@@ -35,6 +36,9 @@ int xdp_mimtest0(struct xdp_md *ctx)
return XDP_DROP;
bpf_map_update_elem(map, &key, &value, 0);
+ value_p = bpf_map_lookup_elem(map, &key);
+ if (!value_p || *value_p != 123)
+ return XDP_DROP;
map = bpf_map_lookup_elem(&mim_hash, &key);
if (!map)
diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
new file mode 100644
index 000000000000..af8cc68ed2f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+#define VAR_NUM 16
+
+struct hmap_elem {
+ struct bpf_spin_lock lock;
+ int var[VAR_NUM];
+};
+
+struct bpf_map_def SEC("maps") hash_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct hmap_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem);
+
+struct array_elem {
+ struct bpf_spin_lock lock;
+ int var[VAR_NUM];
+};
+
+struct bpf_map_def SEC("maps") array_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct array_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem);
+
+SEC("map_lock_demo")
+int bpf_map_lock_test(struct __sk_buff *skb)
+{
+ struct hmap_elem zero = {}, *val;
+ int rnd = bpf_get_prandom_u32();
+ int key = 0, err = 1, i;
+ struct array_elem *q;
+
+ val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!val)
+ goto err;
+ /* spin_lock in hash map */
+ bpf_spin_lock(&val->lock);
+ for (i = 0; i < VAR_NUM; i++)
+ val->var[i] = rnd;
+ bpf_spin_unlock(&val->lock);
+
+ /* spin_lock in array */
+ q = bpf_map_lookup_elem(&array_map, &key);
+ if (!q)
+ goto err;
+ bpf_spin_lock(&q->lock);
+ for (i = 0; i < VAR_NUM; i++)
+ q->var[i] = rnd;
+ bpf_spin_unlock(&q->lock);
+ err = 0;
+err:
+ return err;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c
index 880d2963b472..880d2963b472 100644
--- a/tools/testing/selftests/bpf/test_obj_id.c
+++ b/tools/testing/selftests/bpf/progs/test_obj_id.c
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 6e11ba11709e..6e11ba11709e 100644
--- a/tools/testing/selftests/bpf/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 7956302ecdf2..7956302ecdf2 100644
--- a/tools/testing/selftests/bpf/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
diff --git a/tools/testing/selftests/bpf/test_queue_map.c b/tools/testing/selftests/bpf/progs/test_queue_map.c
index 87db1f9da33d..87db1f9da33d 100644
--- a/tools/testing/selftests/bpf/test_queue_map.c
+++ b/tools/testing/selftests/bpf/progs/test_queue_map.c
diff --git a/tools/testing/selftests/bpf/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 5b54ec637ada..5b54ec637ada 100644
--- a/tools/testing/selftests/bpf/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index e21cd736c196..e21cd736c196 100644
--- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
index 68cf9829f5a7..68cf9829f5a7 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
new file mode 100644
index 000000000000..de1a43e8f610
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <linux/bpf.h>
+#include <netinet/in.h>
+#include <stdbool.h>
+
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+enum bpf_array_idx {
+ SRV_IDX,
+ CLI_IDX,
+ __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") addr_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct sockaddr_in6),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_sock),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") tcp_sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_tcp_sock),
+ .max_entries = __NR_BPF_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") linum_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
+static bool is_loopback6(__u32 *a6)
+{
+ return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
+}
+
+static void skcpy(struct bpf_sock *dst,
+ const struct bpf_sock *src)
+{
+ dst->bound_dev_if = src->bound_dev_if;
+ dst->family = src->family;
+ dst->type = src->type;
+ dst->protocol = src->protocol;
+ dst->mark = src->mark;
+ dst->priority = src->priority;
+ dst->src_ip4 = src->src_ip4;
+ dst->src_ip6[0] = src->src_ip6[0];
+ dst->src_ip6[1] = src->src_ip6[1];
+ dst->src_ip6[2] = src->src_ip6[2];
+ dst->src_ip6[3] = src->src_ip6[3];
+ dst->src_port = src->src_port;
+ dst->dst_ip4 = src->dst_ip4;
+ dst->dst_ip6[0] = src->dst_ip6[0];
+ dst->dst_ip6[1] = src->dst_ip6[1];
+ dst->dst_ip6[2] = src->dst_ip6[2];
+ dst->dst_ip6[3] = src->dst_ip6[3];
+ dst->dst_port = src->dst_port;
+ dst->state = src->state;
+}
+
+static void tpcpy(struct bpf_tcp_sock *dst,
+ const struct bpf_tcp_sock *src)
+{
+ dst->snd_cwnd = src->snd_cwnd;
+ dst->srtt_us = src->srtt_us;
+ dst->rtt_min = src->rtt_min;
+ dst->snd_ssthresh = src->snd_ssthresh;
+ dst->rcv_nxt = src->rcv_nxt;
+ dst->snd_nxt = src->snd_nxt;
+ dst->snd_una = src->snd_una;
+ dst->mss_cache = src->mss_cache;
+ dst->ecn_flags = src->ecn_flags;
+ dst->rate_delivered = src->rate_delivered;
+ dst->rate_interval_us = src->rate_interval_us;
+ dst->packets_out = src->packets_out;
+ dst->retrans_out = src->retrans_out;
+ dst->total_retrans = src->total_retrans;
+ dst->segs_in = src->segs_in;
+ dst->data_segs_in = src->data_segs_in;
+ dst->segs_out = src->segs_out;
+ dst->data_segs_out = src->data_segs_out;
+ dst->lost_out = src->lost_out;
+ dst->sacked_out = src->sacked_out;
+ dst->bytes_received = src->bytes_received;
+ dst->bytes_acked = src->bytes_acked;
+}
+
+#define RETURN { \
+ linum = __LINE__; \
+ bpf_map_update_elem(&linum_map, &idx0, &linum, 0); \
+ return 1; \
+}
+
+SEC("cgroup_skb/egress")
+int read_sock_fields(struct __sk_buff *skb)
+{
+ __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx;
+ struct sockaddr_in6 *srv_sa6, *cli_sa6;
+ struct bpf_tcp_sock *tp, *tp_ret;
+ struct bpf_sock *sk, *sk_ret;
+ __u32 linum, idx0 = 0;
+
+ sk = skb->sk;
+ if (!sk || sk->state == 10)
+ RETURN;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk || sk->family != AF_INET6 || sk->protocol != IPPROTO_TCP ||
+ !is_loopback6(sk->src_ip6))
+ RETURN;
+
+ tp = bpf_tcp_sock(sk);
+ if (!tp)
+ RETURN;
+
+ srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+ cli_sa6 = bpf_map_lookup_elem(&addr_map, &cli_idx);
+ if (!srv_sa6 || !cli_sa6)
+ RETURN;
+
+ if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
+ idx = srv_idx;
+ else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
+ idx = cli_idx;
+ else
+ RETURN;
+
+ sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx);
+ tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx);
+ if (!sk_ret || !tp_ret)
+ RETURN;
+
+ skcpy(sk_ret, sk);
+ tpcpy(tp_ret, tp);
+
+ RETURN;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_sockhash_kern.c b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
index e6755916442a..e6755916442a 100644
--- a/tools/testing/selftests/bpf/test_sockhash_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.c b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
index 677b2ed1cc1e..677b2ed1cc1e 100644
--- a/tools/testing/selftests/bpf/test_sockmap_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
new file mode 100644
index 000000000000..40f904312090
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+struct hmap_elem {
+ volatile int cnt;
+ struct bpf_spin_lock lock;
+ int test_padding;
+};
+
+struct bpf_map_def SEC("maps") hmap = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct hmap_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem);
+
+
+struct cls_elem {
+ struct bpf_spin_lock lock;
+ volatile int cnt;
+};
+
+struct bpf_map_def SEC("maps") cls_map = {
+ .type = BPF_MAP_TYPE_CGROUP_STORAGE,
+ .key_size = sizeof(struct bpf_cgroup_storage_key),
+ .value_size = sizeof(struct cls_elem),
+};
+
+BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key,
+ struct cls_elem);
+
+struct bpf_vqueue {
+ struct bpf_spin_lock lock;
+ /* 4 byte hole */
+ unsigned long long lasttime;
+ int credit;
+ unsigned int rate;
+};
+
+struct bpf_map_def SEC("maps") vqueue = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct bpf_vqueue),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue);
+#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
+
+SEC("spin_lock_demo")
+int bpf_sping_lock_test(struct __sk_buff *skb)
+{
+ volatile int credit = 0, max_credit = 100, pkt_len = 64;
+ struct hmap_elem zero = {}, *val;
+ unsigned long long curtime;
+ struct bpf_vqueue *q;
+ struct cls_elem *cls;
+ int key = 0;
+ int err = 0;
+
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (!val) {
+ bpf_map_update_elem(&hmap, &key, &zero, 0);
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (!val) {
+ err = 1;
+ goto err;
+ }
+ }
+ /* spin_lock in hash map run time test */
+ bpf_spin_lock(&val->lock);
+ if (val->cnt)
+ val->cnt--;
+ else
+ val->cnt++;
+ if (val->cnt != 0 && val->cnt != 1)
+ err = 1;
+ bpf_spin_unlock(&val->lock);
+
+ /* spin_lock in array. virtual queue demo */
+ q = bpf_map_lookup_elem(&vqueue, &key);
+ if (!q)
+ goto err;
+ curtime = bpf_ktime_get_ns();
+ bpf_spin_lock(&q->lock);
+ q->credit += CREDIT_PER_NS(curtime - q->lasttime, q->rate);
+ q->lasttime = curtime;
+ if (q->credit > max_credit)
+ q->credit = max_credit;
+ q->credit -= pkt_len;
+ credit = q->credit;
+ bpf_spin_unlock(&q->lock);
+
+ /* spin_lock in cgroup local storage */
+ cls = bpf_get_local_storage(&cls_map, 0);
+ bpf_spin_lock(&cls->lock);
+ cls->cnt++;
+ bpf_spin_unlock(&cls->lock);
+
+err:
+ return err;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_stack_map.c b/tools/testing/selftests/bpf/progs/test_stack_map.c
index 31c3880e6da0..31c3880e6da0 100644
--- a/tools/testing/selftests/bpf/test_stack_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stack_map.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index d86c281e957f..d86c281e957f 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index af111af7ca1a..af111af7ca1a 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
diff --git a/tools/testing/selftests/bpf/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index bee3bbecc0c4..bee3bbecc0c4 100644
--- a/tools/testing/selftests/bpf/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 74f73b33a7b0..74f73b33a7b0 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index edbca203ce2d..edbca203ce2d 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c
index 04bf084517e0..04bf084517e0 100644
--- a/tools/testing/selftests/bpf/test_tracepoint.c
+++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c
diff --git a/tools/testing/selftests/bpf/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index 504df69c83df..504df69c83df 100644
--- a/tools/testing/selftests/bpf/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
diff --git a/tools/testing/selftests/bpf/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 5e7df8bb5b5d..5e7df8bb5b5d 100644
--- a/tools/testing/selftests/bpf/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index 8d0182650653..8d0182650653 100644
--- a/tools/testing/selftests/bpf/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 5e4aac74f9d0..5e4aac74f9d0 100644
--- a/tools/testing/selftests/bpf/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
index ef9e704be140..ef9e704be140 100644
--- a/tools/testing/selftests/bpf/test_xdp_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
index 365a7d2d9f5c..365a7d2d9f5c 100644
--- a/tools/testing/selftests/bpf/test_xdp_vlan.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
diff --git a/tools/testing/selftests/bpf/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c
index 43b0ef1001ed..43b0ef1001ed 100644
--- a/tools/testing/selftests/bpf/xdp_dummy.c
+++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
index 7f8200a8702b..a53ed58528d6 100755
--- a/tools/testing/selftests/bpf/tcp_client.py
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -30,12 +30,11 @@ def send(sock, s):
serverPort = int(sys.argv[1])
-HostName = socket.gethostname()
# create active socket
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
- sock.connect((HostName, serverPort))
+ sock.connect(('localhost', serverPort))
except socket.error as e:
sys.exit(1)
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
index b39903fca4c8..0ca60d193bed 100755
--- a/tools/testing/selftests/bpf/tcp_server.py
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -35,13 +35,10 @@ MAX_PORTS = 2
serverPort = SERVER_PORT
serverSocket = None
-HostName = socket.gethostname()
-
# create passive socket
serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
-host = socket.gethostname()
-try: serverSocket.bind((host, 0))
+try: serverSocket.bind(('localhost', 0))
except socket.error as msg:
print('bind fails: ' + str(msg))
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 8bcd38010582..38797aa627a7 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -18,6 +18,7 @@
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
+#include <assert.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
@@ -51,18 +52,10 @@ static int count_result(int err)
return err;
}
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-
-__printf(1, 2)
-static int __base_pr(const char *format, ...)
+static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
+ const char *format, va_list args)
{
- va_list args;
- int err;
-
- va_start(args, format);
- err = vfprintf(stderr, format, args);
- va_end(args);
- return err;
+ return vfprintf(stderr, format, args);
}
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
@@ -77,12 +70,21 @@ static int __base_pr(const char *format, ...)
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
BTF_INT_ENC(encoding, bits_offset, bits)
+#define BTF_FWD_ENC(name, kind_flag) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FWD, kind_flag, 0), 0)
+
#define BTF_ARRAY_ENC(type, index_type, nr_elems) \
(type), (index_type), (nr_elems)
#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \
BTF_ARRAY_ENC(type, index_type, nr_elems)
+#define BTF_STRUCT_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, nr_elems), sz)
+
+#define BTF_UNION_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz)
+
#define BTF_MEMBER_ENC(name, type, bits_offset) \
(name), (type), (bits_offset)
#define BTF_ENUM_ENC(name, val) (name), (val)
@@ -98,6 +100,12 @@ static int __base_pr(const char *format, ...)
#define BTF_CONST_ENC(type) \
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
+#define BTF_VOLATILE_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), type)
+
+#define BTF_RESTRICT_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), type)
+
#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
@@ -110,6 +118,10 @@ static int __base_pr(const char *format, ...)
#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
+#define NAME_NTH(N) (0xffff0000 | N)
+#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000)
+#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff)
+
#define MAX_NR_RAW_U32 1024
#define BTF_LOG_BUF_SIZE 65535
@@ -118,12 +130,14 @@ static struct args {
unsigned int file_test_num;
unsigned int get_info_test_num;
unsigned int info_raw_test_num;
+ unsigned int dedup_test_num;
bool raw_test;
bool file_test;
bool get_info_test;
bool pprint_test;
bool always_log;
bool info_raw_test;
+ bool dedup_test;
} args;
static char btf_log_buf[BTF_LOG_BUF_SIZE];
@@ -134,6 +148,12 @@ static struct btf_header hdr_tmpl = {
.hdr_len = sizeof(struct btf_header),
};
+/* several different mapv kinds(types) supported by pprint */
+enum pprint_mapv_kind_t {
+ PPRINT_MAPV_KIND_BASIC = 0,
+ PPRINT_MAPV_KIND_INT128,
+};
+
struct btf_raw_test {
const char *descr;
const char *str_sec;
@@ -156,6 +176,7 @@ struct btf_raw_test {
int type_off_delta;
int str_off_delta;
int str_len_delta;
+ enum pprint_mapv_kind_t mapv_kind;
};
#define BTF_STR_SEC(str) \
@@ -1881,13 +1902,12 @@ static struct btf_raw_test raw_tests[] = {
},
{
- .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)",
+ .descr = "func proto (TYPEDEF=>FUNC_PROTO)",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
- BTF_CONST_ENC(4), /* [3] */
- BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */
- BTF_FUNC_PROTO_ENC(0, 2), /* [5] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW,
@@ -1901,8 +1921,6 @@ static struct btf_raw_test raw_tests[] = {
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 4,
- .btf_load_err = true,
- .err_str = "Invalid type_id",
},
{
@@ -1957,7 +1975,7 @@ static struct btf_raw_test raw_tests[] = {
/* void (*)(int a, unsigned int <bad_name_off>) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
- BTF_FUNC_PROTO_ARG_ENC(0xffffffff, 2),
+ BTF_FUNC_PROTO_ARG_ENC(0x0fffffff, 2),
BTF_END_RAW,
},
.str_sec = "\0a",
@@ -2707,6 +2725,99 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Invalid member offset",
},
+{
+ .descr = "128-bit int",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "int_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 120, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(120, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
}; /* struct btf_raw_test raw_tests[] */
static const char *get_next_str(const char *start, const char *end)
@@ -2734,11 +2845,13 @@ static void *btf_raw_create(const struct btf_header *hdr,
const char **ret_next_str)
{
const char *next_str = str, *end_str = str + str_sec_size;
+ const char **strs_idx = NULL, **tmp_strs_idx;
+ int strs_cap = 0, strs_cnt = 0, next_str_idx = 0;
unsigned int size_needed, offset;
struct btf_header *ret_hdr;
- int i, type_sec_size;
+ int i, type_sec_size, err = 0;
uint32_t *ret_types;
- void *raw_btf;
+ void *raw_btf = NULL;
type_sec_size = get_raw_sec_size(raw_types);
if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types"))
@@ -2753,17 +2866,44 @@ static void *btf_raw_create(const struct btf_header *hdr,
memcpy(raw_btf, hdr, sizeof(*hdr));
offset = sizeof(*hdr);
+ /* Index strings */
+ while ((next_str = get_next_str(next_str, end_str))) {
+ if (strs_cnt == strs_cap) {
+ strs_cap += max(16, strs_cap / 2);
+ tmp_strs_idx = realloc(strs_idx,
+ sizeof(*strs_idx) * strs_cap);
+ if (CHECK(!tmp_strs_idx,
+ "Cannot allocate memory for strs_idx")) {
+ err = -1;
+ goto done;
+ }
+ strs_idx = tmp_strs_idx;
+ }
+ strs_idx[strs_cnt++] = next_str;
+ next_str += strlen(next_str);
+ }
+
/* Copy type section */
ret_types = raw_btf + offset;
for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) {
if (raw_types[i] == NAME_TBD) {
- next_str = get_next_str(next_str, end_str);
- if (CHECK(!next_str, "Error in getting next_str")) {
- free(raw_btf);
- return NULL;
+ if (CHECK(next_str_idx == strs_cnt,
+ "Error in getting next_str #%d",
+ next_str_idx)) {
+ err = -1;
+ goto done;
}
- ret_types[i] = next_str - str;
- next_str += strlen(next_str);
+ ret_types[i] = strs_idx[next_str_idx++] - str;
+ } else if (IS_NAME_NTH(raw_types[i])) {
+ int idx = GET_NAME_NTH_IDX(raw_types[i]);
+
+ if (CHECK(idx <= 0 || idx > strs_cnt,
+ "Error getting string #%d, strs_cnt:%d",
+ idx, strs_cnt)) {
+ err = -1;
+ goto done;
+ }
+ ret_types[i] = strs_idx[idx-1] - str;
} else {
ret_types[i] = raw_types[i];
}
@@ -2780,8 +2920,17 @@ static void *btf_raw_create(const struct btf_header *hdr,
*btf_size = size_needed;
if (ret_next_str)
- *ret_next_str = next_str;
+ *ret_next_str =
+ next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
+done:
+ if (err) {
+ if (raw_btf)
+ free(raw_btf);
+ if (strs_idx)
+ free(strs_idx);
+ return NULL;
+ }
return raw_btf;
}
@@ -3526,8 +3675,20 @@ struct pprint_mapv {
ENUM_TWO,
ENUM_THREE,
} aenum;
+ uint32_t ui32b;
+ uint32_t bits2c:2;
};
+#ifdef __SIZEOF_INT128__
+struct pprint_mapv_int128 {
+ __int128 si128a;
+ __int128 si128b;
+ unsigned __int128 bits3:3;
+ unsigned __int128 bits80:80;
+ unsigned __int128 ui128;
+};
+#endif
+
static struct btf_raw_test pprint_test_template[] = {
{
.raw_types = {
@@ -3568,7 +3729,7 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
@@ -3577,9 +3738,11 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */
BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
+ BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */
+ BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
@@ -3628,7 +3791,7 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3637,9 +3800,11 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */
BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
+ BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
+ BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
@@ -3690,7 +3855,7 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3699,13 +3864,15 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */
BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
+ BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
+ BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
/* typedef unsigned int ___int */ /* [17] */
BTF_TYPEDEF_ENC(NAME_TBD, 18),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
@@ -3713,6 +3880,35 @@ static struct btf_raw_test pprint_test_template[] = {
.max_entries = 128 * 1024,
},
+#ifdef __SIZEOF_INT128__
+{
+ /* test int128 */
+ .raw_types = {
+ /* unsigned int */ /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
+ /* __int128 */ /* [2] */
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 128, 16),
+ /* unsigned __int128 */ /* [3] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 128, 16),
+ /* struct pprint_mapv_int128 */ /* [4] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 5), 64),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), /* si128a */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 128)), /* si128b */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(3, 256)), /* bits3 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(80, 259)), /* bits80 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(0, 384)), /* ui128 */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unsigned int\0__int128\0unsigned __int128\0pprint_mapv_int128\0si128a\0si128b\0bits3\0bits80\0ui128"),
+ .key_size = sizeof(unsigned int),
+ .value_size = sizeof(struct pprint_mapv_int128),
+ .key_type_id = 1,
+ .value_type_id = 4,
+ .max_entries = 128 * 1024,
+ .mapv_kind = PPRINT_MAPV_KIND_INT128,
+},
+#endif
+
};
static struct btf_pprint_test_meta {
@@ -3779,22 +3975,108 @@ static struct btf_pprint_test_meta {
};
+static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
+{
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC)
+ return sizeof(struct pprint_mapv);
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128)
+ return sizeof(struct pprint_mapv_int128);
+#endif
+
+ assert(0);
+}
-static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i,
+static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
+ void *mapv, uint32_t i,
int num_cpus, int rounded_value_size)
{
int cpu;
- for (cpu = 0; cpu < num_cpus; cpu++) {
- v->ui32 = i + cpu;
- v->si32 = -i;
- v->unused_bits2a = 3;
- v->bits28 = i;
- v->unused_bits2b = 3;
- v->ui64 = i;
- v->aenum = i & 0x03;
- v = (void *)v + rounded_value_size;
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->ui32 = i + cpu;
+ v->si32 = -i;
+ v->unused_bits2a = 3;
+ v->bits28 = i;
+ v->unused_bits2b = 3;
+ v->ui64 = i;
+ v->aenum = i & 0x03;
+ v->ui32b = 4;
+ v->bits2c = 1;
+ v = (void *)v + rounded_value_size;
+ }
+ }
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->si128a = i;
+ v->si128b = -i;
+ v->bits3 = i & 0x07;
+ v->bits80 = (((unsigned __int128)1) << 64) + i;
+ v->ui128 = (((unsigned __int128)2) << 64) + i;
+ v = (void *)v + rounded_value_size;
+ }
+ }
+#endif
+}
+
+ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
+ char *expected_line, ssize_t line_size,
+ bool percpu_map, unsigned int next_key,
+ int cpu, void *mapv)
+{
+ ssize_t nexpected_line = -1;
+
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
+ "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+ "%u,0x%x}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ v->ui32, v->si32,
+ v->unused_bits2a,
+ v->bits28,
+ v->unused_bits2b,
+ v->ui64,
+ v->ui8a[0], v->ui8a[1],
+ v->ui8a[2], v->ui8a[3],
+ v->ui8a[4], v->ui8a[5],
+ v->ui8a[6], v->ui8a[7],
+ pprint_enum_str[v->aenum],
+ v->ui32b,
+ v->bits2c);
+ }
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {0x%lx,0x%lx,0x%lx,"
+ "0x%lx%016lx,0x%lx%016lx}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ (uint64_t)v->si128a,
+ (uint64_t)v->si128b,
+ (uint64_t)v->bits3,
+ (uint64_t)(v->bits80 >> 64),
+ (uint64_t)v->bits80,
+ (uint64_t)(v->ui128 >> 64),
+ (uint64_t)v->ui128);
}
+#endif
+
+ return nexpected_line;
}
static int check_line(const char *expected_line, int nexpected_line,
@@ -3818,10 +4100,10 @@ static int check_line(const char *expected_line, int nexpected_line,
static int do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
+ enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
struct bpf_create_map_attr create_attr = {};
bool ordered_map, lossless_map, percpu_map;
int err, ret, num_cpus, rounded_value_size;
- struct pprint_mapv *mapv = NULL;
unsigned int key, nr_read_elems;
int map_fd = -1, btf_fd = -1;
unsigned int raw_btf_size;
@@ -3830,6 +4112,7 @@ static int do_test_pprint(int test_num)
char pin_path[255];
size_t line_len = 0;
char *line = NULL;
+ void *mapv = NULL;
uint8_t *raw_btf;
ssize_t nread;
@@ -3882,7 +4165,7 @@ static int do_test_pprint(int test_num)
percpu_map = test->percpu_map;
num_cpus = percpu_map ? bpf_num_possible_cpus() : 1;
- rounded_value_size = round_up(sizeof(struct pprint_mapv), 8);
+ rounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8);
mapv = calloc(num_cpus, rounded_value_size);
if (CHECK(!mapv, "mapv allocation failure")) {
err = -1;
@@ -3890,7 +4173,7 @@ static int do_test_pprint(int test_num)
}
for (key = 0; key < test->max_entries; key++) {
- set_pprint_mapv(mapv, key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size);
bpf_map_update_elem(map_fd, &key, mapv, 0);
}
@@ -3914,13 +4197,13 @@ static int do_test_pprint(int test_num)
ordered_map = test->ordered_map;
lossless_map = test->lossless_map;
do {
- struct pprint_mapv *cmapv;
ssize_t nexpected_line;
unsigned int next_key;
+ void *cmapv;
int cpu;
next_key = ordered_map ? nr_read_elems : atoi(line);
- set_pprint_mapv(mapv, next_key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size);
cmapv = mapv;
for (cpu = 0; cpu < num_cpus; cpu++) {
@@ -3953,28 +4236,16 @@ static int do_test_pprint(int test_num)
break;
}
- nexpected_line = snprintf(expected_line, sizeof(expected_line),
- "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
- "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n",
- percpu_map ? "\tcpu" : "",
- percpu_map ? cpu : next_key,
- cmapv->ui32, cmapv->si32,
- cmapv->unused_bits2a,
- cmapv->bits28,
- cmapv->unused_bits2b,
- cmapv->ui64,
- cmapv->ui8a[0], cmapv->ui8a[1],
- cmapv->ui8a[2], cmapv->ui8a[3],
- cmapv->ui8a[4], cmapv->ui8a[5],
- cmapv->ui8a[6], cmapv->ui8a[7],
- pprint_enum_str[cmapv->aenum]);
-
+ nexpected_line = get_pprint_expected_line(mapv_kind, expected_line,
+ sizeof(expected_line),
+ percpu_map, next_key,
+ cpu, cmapv);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
if (err == -1)
goto done;
- cmapv = (void *)cmapv + rounded_value_size;
+ cmapv = cmapv + rounded_value_size;
}
if (percpu_map) {
@@ -4070,6 +4341,10 @@ static struct prog_info_raw_test {
__u32 line_info_rec_size;
__u32 nr_jited_ksyms;
bool expected_prog_load_failure;
+ __u32 dead_code_cnt;
+ __u32 dead_code_mask;
+ __u32 dead_func_cnt;
+ __u32 dead_func_mask;
} info_raw_tests[] = {
{
.descr = "func_type (main func + one sub)",
@@ -4496,6 +4771,369 @@ static struct prog_info_raw_test {
.expected_prog_load_failure = true,
},
+{
+ .descr = "line_info (dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 1,
+ .dead_code_mask = 0x01,
+},
+
+{
+ .descr = "line_info (dead end)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x28,
+},
+
+{
+ .descr = "line_info (dead code + subprog + func_info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {14, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 9,
+ .dead_code_mask = 0x3fe,
+},
+
+{
+ .descr = "line_info (dead subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead last subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */"
+ "\0return 0;\0/* dead */\0/* dead */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {5, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x18,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */"
+ "\0return 0;\0return 0;\0return 0;"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return b + 1;\0return b + 1;\0return b + 1;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {7, 3}, {10, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 5,
+ .dead_code_mask = 0x1e2,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start w/ move)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead end + subprog start w/ no linfo)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3),
+ BPF_CALL_REL(3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 3}, {6, 4}, },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+},
+
};
static size_t probe_prog_length(const struct bpf_insn *fp)
@@ -4555,6 +5193,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
struct bpf_func_info *finfo;
__u32 info_len, rec_size, i;
void *func_info = NULL;
+ __u32 nr_func_info;
int err;
/* get necessary lens */
@@ -4564,7 +5203,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ nr_func_info = test->func_info_cnt - test->dead_func_cnt;
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (1st) %d",
info.nr_func_info)) {
return -1;
@@ -4585,7 +5225,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* reset info to only retrieve func_info related data */
memset(&info, 0, sizeof(info));
- info.nr_func_info = test->func_info_cnt;
+ info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
@@ -4594,7 +5234,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
err = -1;
goto done;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (2nd) %d",
info.nr_func_info)) {
err = -1;
@@ -4608,7 +5248,9 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
}
finfo = func_info;
- for (i = 0; i < test->func_info_cnt; i++) {
+ for (i = 0; i < nr_func_info; i++) {
+ if (test->dead_func_mask & (1 << i))
+ continue;
if (CHECK(finfo->type_id != test->func_info[i][1],
"incorrect func_type %u expected %u",
finfo->type_id, test->func_info[i][1])) {
@@ -4637,6 +5279,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
struct bpf_prog_info info = {};
__u32 *jited_func_lens = NULL;
__u64 cur_func_ksyms;
+ __u32 dead_insns;
int err;
jited_cnt = cnt;
@@ -4645,7 +5288,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
if (test->nr_jited_ksyms)
nr_jited_ksyms = test->nr_jited_ksyms;
else
- nr_jited_ksyms = test->func_info_cnt;
+ nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt;
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
@@ -4747,12 +5390,20 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
goto done;
}
+ dead_insns = 0;
+ while (test->dead_code_mask & (1 << dead_insns))
+ dead_insns++;
+
CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
linfo[0].insn_off);
for (i = 1; i < cnt; i++) {
const struct bpf_line_info *expected_linfo;
- expected_linfo = patched_linfo + (i * test->line_info_rec_size);
+ while (test->dead_code_mask & (1 << (i + dead_insns)))
+ dead_insns++;
+
+ expected_linfo = patched_linfo +
+ ((i + dead_insns) * test->line_info_rec_size);
if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
"linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
i, linfo[i].insn_off,
@@ -4910,7 +5561,9 @@ static int do_test_info_raw(unsigned int test_num)
if (err)
goto done;
- err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd);
+ err = test_get_linfo(test, patched_linfo,
+ attr.line_info_cnt - test->dead_code_cnt,
+ prog_fd);
if (err)
goto done;
@@ -4946,20 +5599,508 @@ static int test_info_raw(void)
return err;
}
+struct btf_raw_data {
+ __u32 raw_types[MAX_NR_RAW_U32];
+ const char *str_sec;
+ __u32 str_sec_size;
+};
+
+struct btf_dedup_test {
+ const char *descr;
+ struct btf_raw_data input;
+ struct btf_raw_data expect;
+ struct btf_dedup_opts opts;
+};
+
+const struct btf_dedup_test dedup_tests[] = {
+
+{
+ .descr = "dedup: unused strings filtering",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unused\0int\0foo\0bar\0long"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: strings deduplication",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(3), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long int\0int\0long int\0int"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long int"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: struct example #1",
+ /*
+ * struct s {
+ * struct s *next;
+ * const int *a;
+ * int b[16];
+ * int c;
+ * }
+ */
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* int[16] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
+ /* struct s { */
+ BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */
+ BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
+ BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
+ BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
+ /* ptr -> [3] struct s */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> [6] const int */
+ BTF_PTR_ENC(6), /* [5] */
+ /* const -> [1] int */
+ BTF_CONST_ENC(1), /* [6] */
+
+ /* full copy of the above */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */
+ BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */
+ BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [9] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 10, 0),
+ BTF_MEMBER_ENC(NAME_NTH(4), 11, 64),
+ BTF_MEMBER_ENC(NAME_NTH(5), 8, 128),
+ BTF_MEMBER_ENC(NAME_NTH(6), 7, 640),
+ BTF_PTR_ENC(9), /* [10] */
+ BTF_PTR_ENC(12), /* [11] */
+ BTF_CONST_ENC(7), /* [12] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0"),
+ },
+ .expect = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* int[16] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
+ /* struct s { */
+ BTF_STRUCT_ENC(NAME_NTH(6), 4, 84), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(5), 4, 0), /* struct s *next; */
+ BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
+ BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
+ /* ptr -> [3] struct s */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> [6] const int */
+ BTF_PTR_ENC(6), /* [5] */
+ /* const -> [1] int */
+ BTF_CONST_ENC(1), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a\0b\0c\0int\0next\0s"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: struct <-> fwd resolution w/ hash collision",
+ /*
+ * // CU 1:
+ * struct x;
+ * struct s {
+ * struct x *x;
+ * };
+ * // CU 2:
+ * struct x {};
+ * struct s {
+ * struct x *x;
+ * };
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_FWD_ENC(NAME_TBD, 0 /* struct fwd */), /* [1] fwd x */
+ BTF_PTR_ENC(1), /* [2] ptr -> [1] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ /* CU 2 */
+ BTF_STRUCT_ENC(NAME_TBD, 0, 0), /* [4] struct x */
+ BTF_PTR_ENC(4), /* [5] ptr -> [4] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [6] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 5, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0s\0x\0x\0s\0x\0"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_PTR_ENC(3), /* [1] ptr -> [3] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [2] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_STRUCT_ENC(NAME_NTH(2), 0, 0), /* [3] struct x */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0s\0x"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ .dedup_table_size = 1, /* force hash collisions */
+ },
+},
+{
+ .descr = "dedup: all possible kinds (no duplicates)",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
+ BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
+ BTF_PTR_ENC(0), /* [8] ptr */
+ BTF_CONST_ENC(8), /* [9] const */
+ BTF_VOLATILE_ENC(8), /* [10] volatile */
+ BTF_RESTRICT_ENC(8), /* [11] restrict */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
+ BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
+ BTF_PTR_ENC(0), /* [8] ptr */
+ BTF_CONST_ENC(8), /* [9] const */
+ BTF_VOLATILE_ENC(8), /* [10] volatile */
+ BTF_RESTRICT_ENC(8), /* [11] restrict */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: no int duplicates",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
+ /* different name */
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
+ /* different encoding */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
+ /* different bit offset */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
+ /* different bit size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
+ /* different byte size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0some other int"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
+ /* different name */
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
+ /* different encoding */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
+ /* different bit offset */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
+ /* different bit size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
+ /* different byte size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0some other int"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+
+};
+
+static int btf_type_size(const struct btf_type *t)
+{
+ int base_size = sizeof(struct btf_type);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u16 kind = BTF_INFO_KIND(t->info);
+
+ switch (kind) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return base_size;
+ case BTF_KIND_INT:
+ return base_size + sizeof(__u32);
+ case BTF_KIND_ENUM:
+ return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ARRAY:
+ return base_size + sizeof(struct btf_array);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return base_size + vlen * sizeof(struct btf_member);
+ case BTF_KIND_FUNC_PROTO:
+ return base_size + vlen * sizeof(struct btf_param);
+ default:
+ fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind);
+ return -EINVAL;
+ }
+}
+
+static void dump_btf_strings(const char *strs, __u32 len)
+{
+ const char *cur = strs;
+ int i = 0;
+
+ while (cur < strs + len) {
+ fprintf(stderr, "string #%d: '%s'\n", i, cur);
+ cur += strlen(cur) + 1;
+ i++;
+ }
+}
+
+static int do_test_dedup(unsigned int test_num)
+{
+ const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
+ __u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
+ const struct btf_header *test_hdr, *expect_hdr;
+ struct btf *test_btf = NULL, *expect_btf = NULL;
+ const void *test_btf_data, *expect_btf_data;
+ const char *ret_test_next_str, *ret_expect_next_str;
+ const char *test_strs, *expect_strs;
+ const char *test_str_cur, *test_str_end;
+ const char *expect_str_cur, *expect_str_end;
+ unsigned int raw_btf_size;
+ void *raw_btf;
+ int err = 0, i;
+
+ fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
+
+ raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
+ test->input.str_sec, test->input.str_sec_size,
+ &raw_btf_size, &ret_test_next_str);
+ if (!raw_btf)
+ return -1;
+ test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ free(raw_btf);
+ if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
+ PTR_ERR(test_btf))) {
+ err = -1;
+ goto done;
+ }
+
+ raw_btf = btf_raw_create(&hdr_tmpl, test->expect.raw_types,
+ test->expect.str_sec,
+ test->expect.str_sec_size,
+ &raw_btf_size, &ret_expect_next_str);
+ if (!raw_btf)
+ return -1;
+ expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ free(raw_btf);
+ if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
+ PTR_ERR(expect_btf))) {
+ err = -1;
+ goto done;
+ }
+
+ err = btf__dedup(test_btf, NULL, &test->opts);
+ if (CHECK(err, "btf_dedup failed errno:%d", err)) {
+ err = -1;
+ goto done;
+ }
+
+ test_btf_data = btf__get_raw_data(test_btf, &test_btf_size);
+ expect_btf_data = btf__get_raw_data(expect_btf, &expect_btf_size);
+ if (CHECK(test_btf_size != expect_btf_size,
+ "test_btf_size:%u != expect_btf_size:%u",
+ test_btf_size, expect_btf_size)) {
+ err = -1;
+ goto done;
+ }
+
+ test_hdr = test_btf_data;
+ test_strs = test_btf_data + sizeof(*test_hdr) + test_hdr->str_off;
+ expect_hdr = expect_btf_data;
+ expect_strs = expect_btf_data + sizeof(*test_hdr) + expect_hdr->str_off;
+ if (CHECK(test_hdr->str_len != expect_hdr->str_len,
+ "test_hdr->str_len:%u != expect_hdr->str_len:%u",
+ test_hdr->str_len, expect_hdr->str_len)) {
+ fprintf(stderr, "\ntest strings:\n");
+ dump_btf_strings(test_strs, test_hdr->str_len);
+ fprintf(stderr, "\nexpected strings:\n");
+ dump_btf_strings(expect_strs, expect_hdr->str_len);
+ err = -1;
+ goto done;
+ }
+
+ test_str_cur = test_strs;
+ test_str_end = test_strs + test_hdr->str_len;
+ expect_str_cur = expect_strs;
+ expect_str_end = expect_strs + expect_hdr->str_len;
+ while (test_str_cur < test_str_end && expect_str_cur < expect_str_end) {
+ size_t test_len, expect_len;
+
+ test_len = strlen(test_str_cur);
+ expect_len = strlen(expect_str_cur);
+ if (CHECK(test_len != expect_len,
+ "test_len:%zu != expect_len:%zu "
+ "(test_str:%s, expect_str:%s)",
+ test_len, expect_len, test_str_cur, expect_str_cur)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(strcmp(test_str_cur, expect_str_cur),
+ "test_str:%s != expect_str:%s",
+ test_str_cur, expect_str_cur)) {
+ err = -1;
+ goto done;
+ }
+ test_str_cur += test_len + 1;
+ expect_str_cur += expect_len + 1;
+ }
+ if (CHECK(test_str_cur != test_str_end,
+ "test_str_cur:%p != test_str_end:%p",
+ test_str_cur, test_str_end)) {
+ err = -1;
+ goto done;
+ }
+
+ test_nr_types = btf__get_nr_types(test_btf);
+ expect_nr_types = btf__get_nr_types(expect_btf);
+ if (CHECK(test_nr_types != expect_nr_types,
+ "test_nr_types:%u != expect_nr_types:%u",
+ test_nr_types, expect_nr_types)) {
+ err = -1;
+ goto done;
+ }
+
+ for (i = 1; i <= test_nr_types; i++) {
+ const struct btf_type *test_type, *expect_type;
+ int test_size, expect_size;
+
+ test_type = btf__type_by_id(test_btf, i);
+ expect_type = btf__type_by_id(expect_btf, i);
+ test_size = btf_type_size(test_type);
+ expect_size = btf_type_size(expect_type);
+
+ if (CHECK(test_size != expect_size,
+ "type #%d: test_size:%d != expect_size:%u",
+ i, test_size, expect_size)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(memcmp((void *)test_type,
+ (void *)expect_type,
+ test_size),
+ "type #%d: contents differ", i)) {
+ err = -1;
+ goto done;
+ }
+ }
+
+done:
+ if (!err)
+ fprintf(stderr, "OK");
+ if (!IS_ERR(test_btf))
+ btf__free(test_btf);
+ if (!IS_ERR(expect_btf))
+ btf__free(expect_btf);
+
+ return err;
+}
+
+static int test_dedup(void)
+{
+ unsigned int i;
+ int err = 0;
+
+ if (args.dedup_test_num)
+ return count_result(do_test_dedup(args.dedup_test_num));
+
+ for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
+ err |= count_result(do_test_dedup(i));
+
+ return err;
+}
+
static void usage(const char *cmd)
{
fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
"\t[-g btf_get_info_test_num (1 - %zu)] |\n"
"\t[-f btf_file_test_num (1 - %zu)] |\n"
"\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
- "\t[-p (pretty print test)]]\n",
+ "\t[-p (pretty print test)] |\n"
+ "\t[-d btf_dedup_test_num (1 - %zu)]]\n",
cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
- ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests));
+ ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests),
+ ARRAY_SIZE(dedup_tests));
}
static int parse_args(int argc, char **argv)
{
- const char *optstr = "lpk:f:r:g:";
+ const char *optstr = "hlpk:f:r:g:d:";
int opt;
while ((opt = getopt(argc, argv, optstr)) != -1) {
@@ -4986,12 +6127,16 @@ static int parse_args(int argc, char **argv)
args.info_raw_test_num = atoi(optarg);
args.info_raw_test = true;
break;
+ case 'd':
+ args.dedup_test_num = atoi(optarg);
+ args.dedup_test = true;
+ break;
case 'h':
usage(argv[0]);
exit(0);
default:
- usage(argv[0]);
- return -1;
+ usage(argv[0]);
+ return -1;
}
}
@@ -5027,6 +6172,14 @@ static int parse_args(int argc, char **argv)
return -1;
}
+ if (args.dedup_test_num &&
+ (args.dedup_test_num < 1 ||
+ args.dedup_test_num > ARRAY_SIZE(dedup_tests))) {
+ fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n",
+ ARRAY_SIZE(dedup_tests));
+ return -1;
+ }
+
return 0;
}
@@ -5045,7 +6198,7 @@ int main(int argc, char **argv)
return err;
if (args.always_log)
- libbpf_set_print(__base_pr, __base_pr, __base_pr);
+ libbpf_set_print(__base_pr);
if (args.raw_test)
err |= test_raw();
@@ -5062,14 +6215,18 @@ int main(int argc, char **argv)
if (args.info_raw_test)
err |= test_info_raw();
+ if (args.dedup_test)
+ err |= test_dedup();
+
if (args.raw_test || args.get_info_test || args.file_test ||
- args.pprint_test || args.info_raw_test)
+ args.pprint_test || args.info_raw_test || args.dedup_test)
goto done;
err |= test_raw();
err |= test_get_info();
err |= test_file();
err |= test_info_raw();
+ err |= test_dedup();
done:
print_summary();
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index f44834155f25..2fc4625c1a15 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
- if (!cgroup_fd) {
+ if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 9c8b50bac7e0..76e4993b7c16 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -43,7 +43,7 @@ int main(int argc, char **argv)
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
- if (!cgroup_fd) {
+ if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c
index 12b784afba31..01f0c634d548 100644
--- a/tools/testing/selftests/bpf/test_flow_dissector.c
+++ b/tools/testing/selftests/bpf/test_flow_dissector.c
@@ -16,7 +16,6 @@
#include <errno.h>
#include <linux/if_packet.h>
#include <linux/if_ether.h>
-#include <linux/if_packet.h>
#include <linux/ipv6.h>
#include <netinet/ip.h>
#include <netinet/in.h>
@@ -25,7 +24,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
index 8fcd1c076add..65cbd30704b5 100644
--- a/tools/testing/selftests/bpf/test_libbpf_open.c
+++ b/tools/testing/selftests/bpf/test_libbpf_open.c
@@ -34,23 +34,16 @@ static void usage(char *argv[])
printf("\n");
}
-#define DEFINE_PRINT_FN(name, enabled) \
-static int libbpf_##name(const char *fmt, ...) \
-{ \
- va_list args; \
- int ret; \
- \
- va_start(args, fmt); \
- if (enabled) { \
- fprintf(stderr, "[" #name "] "); \
- ret = vfprintf(stderr, fmt, args); \
- } \
- va_end(args); \
- return ret; \
+static bool debug = 0;
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *fmt, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !debug)
+ return 0;
+
+ fprintf(stderr, "[%d] ", level);
+ return vfprintf(stderr, fmt, args);
}
-DEFINE_PRINT_FN(warning, 1)
-DEFINE_PRINT_FN(info, 1)
-DEFINE_PRINT_FN(debug, 1)
#define EXIT_FAIL_LIBBPF EXIT_FAILURE
#define EXIT_FAIL_OPTION 2
@@ -74,7 +67,7 @@ int test_walk_maps(struct bpf_object *obj, bool verbose)
struct bpf_map *map;
int cnt = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
cnt++;
if (verbose)
printf("Map (count:%d) name: %s\n", cnt,
@@ -120,15 +113,14 @@ int main(int argc, char **argv)
int longindex = 0;
int opt;
- libbpf_set_print(libbpf_warning, libbpf_info, NULL);
+ libbpf_set_print(libbpf_debug_print);
/* Parse commands line args */
while ((opt = getopt_long(argc, argv, "hDq",
long_options, &longindex)) != -1) {
switch (opt) {
case 'D':
- libbpf_set_print(libbpf_warning, libbpf_info,
- libbpf_debug);
+ debug = 1;
break;
case 'q': /* Use in scripting mode */
verbose = 0;
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 147e34cfceb7..02d7c871862a 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
errno == ENOENT);
+ key->prefixlen = 30; // unused prefix so far
+ inet_pton(AF_INET, "192.255.0.0", key->data);
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+ errno == ENOENT);
+
+ key->prefixlen = 16; // same prefix as the root node
+ inet_pton(AF_INET, "192.255.0.0", key->data);
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+ errno == ENOENT);
+
/* assert initial lookup */
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.0.1", key->data);
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
new file mode 100755
index 000000000000..612632c1425f
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -0,0 +1,376 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Setup/topology:
+#
+# NS1 NS2 NS3
+# veth1 <---> veth2 veth3 <---> veth4 (the top route)
+# veth5 <---> veth6 veth7 <---> veth8 (the bottom route)
+#
+# each vethN gets IPv[4|6]_N address
+#
+# IPv*_SRC = IPv*_1
+# IPv*_DST = IPv*_4
+#
+# all tests test pings from IPv*_SRC to IPv*_DST
+#
+# by default, routes are configured to allow packets to go
+# IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
+#
+# a GRE device is installed in NS3 with IPv*_GRE, and
+# NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
+# (the bottom route)
+#
+# Tests:
+#
+# 1. routes NS2->IPv*_DST are brought down, so the only way a ping
+# from IP*_SRC to IP*_DST can work is via IPv*_GRE
+#
+# 2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth1:egress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+#
+# 2b. in an ingress test, a bpf LWT_IN program is installed on veth2
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+
+if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+ exit 1
+fi
+
+readonly NS1="ns1-$(mktemp -u XXXXXX)"
+readonly NS2="ns2-$(mktemp -u XXXXXX)"
+readonly NS3="ns3-$(mktemp -u XXXXXX)"
+
+readonly IPv4_1="172.16.1.100"
+readonly IPv4_2="172.16.2.100"
+readonly IPv4_3="172.16.3.100"
+readonly IPv4_4="172.16.4.100"
+readonly IPv4_5="172.16.5.100"
+readonly IPv4_6="172.16.6.100"
+readonly IPv4_7="172.16.7.100"
+readonly IPv4_8="172.16.8.100"
+readonly IPv4_GRE="172.16.16.100"
+
+readonly IPv4_SRC=$IPv4_1
+readonly IPv4_DST=$IPv4_4
+
+readonly IPv6_1="fb01::1"
+readonly IPv6_2="fb02::1"
+readonly IPv6_3="fb03::1"
+readonly IPv6_4="fb04::1"
+readonly IPv6_5="fb05::1"
+readonly IPv6_6="fb06::1"
+readonly IPv6_7="fb07::1"
+readonly IPv6_8="fb08::1"
+readonly IPv6_GRE="fb10::1"
+
+readonly IPv6_SRC=$IPv6_1
+readonly IPv6_DST=$IPv6_4
+
+TEST_STATUS=0
+TESTS_SUCCEEDED=0
+TESTS_FAILED=0
+
+process_test_results()
+{
+ if [[ "${TEST_STATUS}" -eq 0 ]] ; then
+ echo "PASS"
+ TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
+ else
+ echo "FAIL"
+ TESTS_FAILED=$((TESTS_FAILED+1))
+ fi
+}
+
+print_test_summary_and_exit()
+{
+ echo "passed tests: ${TESTS_SUCCEEDED}"
+ echo "failed tests: ${TESTS_FAILED}"
+ if [ "${TESTS_FAILED}" -eq "0" ] ; then
+ exit 0
+ else
+ exit 1
+ fi
+}
+
+setup()
+{
+ set -e # exit on error
+ TEST_STATUS=0
+
+ # create devices and namespaces
+ ip netns add "${NS1}"
+ ip netns add "${NS2}"
+ ip netns add "${NS3}"
+
+ ip link add veth1 type veth peer name veth2
+ ip link add veth3 type veth peer name veth4
+ ip link add veth5 type veth peer name veth6
+ ip link add veth7 type veth peer name veth8
+
+ ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
+ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip link set veth1 netns ${NS1}
+ ip link set veth2 netns ${NS2}
+ ip link set veth3 netns ${NS2}
+ ip link set veth4 netns ${NS3}
+ ip link set veth5 netns ${NS1}
+ ip link set veth6 netns ${NS2}
+ ip link set veth7 netns ${NS2}
+ ip link set veth8 netns ${NS3}
+
+ # configure addesses: the top route (1-2-3-4)
+ ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1
+ ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2
+ ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3
+ ip -netns ${NS3} addr add ${IPv4_4}/24 dev veth4
+ ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
+ ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
+ ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
+ ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
+
+ # configure addresses: the bottom route (5-6-7-8)
+ ip -netns ${NS1} addr add ${IPv4_5}/24 dev veth5
+ ip -netns ${NS2} addr add ${IPv4_6}/24 dev veth6
+ ip -netns ${NS2} addr add ${IPv4_7}/24 dev veth7
+ ip -netns ${NS3} addr add ${IPv4_8}/24 dev veth8
+ ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
+ ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
+ ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
+ ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
+
+
+ ip -netns ${NS1} link set dev veth1 up
+ ip -netns ${NS2} link set dev veth2 up
+ ip -netns ${NS2} link set dev veth3 up
+ ip -netns ${NS3} link set dev veth4 up
+ ip -netns ${NS1} link set dev veth5 up
+ ip -netns ${NS2} link set dev veth6 up
+ ip -netns ${NS2} link set dev veth7 up
+ ip -netns ${NS3} link set dev veth8 up
+
+ # configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
+ # the bottom route to specific bottom addresses
+
+ # NS1
+ # top route
+ ip -netns ${NS1} route add ${IPv4_2}/32 dev veth1
+ ip -netns ${NS1} route add default dev veth1 via ${IPv4_2} # go top by default
+ ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1
+ ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} # go top by default
+ # bottom route
+ ip -netns ${NS1} route add ${IPv4_6}/32 dev veth5
+ ip -netns ${NS1} route add ${IPv4_7}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS1} route add ${IPv4_8}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5
+ ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6}
+ ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6}
+
+ # NS2
+ # top route
+ ip -netns ${NS2} route add ${IPv4_1}/32 dev veth2
+ ip -netns ${NS2} route add ${IPv4_4}/32 dev veth3
+ ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3
+ # bottom route
+ ip -netns ${NS2} route add ${IPv4_5}/32 dev veth6
+ ip -netns ${NS2} route add ${IPv4_8}/32 dev veth7
+ ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6
+ ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7
+
+ # NS3
+ # top route
+ ip -netns ${NS3} route add ${IPv4_3}/32 dev veth4
+ ip -netns ${NS3} route add ${IPv4_1}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} route add ${IPv4_2}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
+ ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
+ ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
+ # bottom route
+ ip -netns ${NS3} route add ${IPv4_7}/32 dev veth8
+ ip -netns ${NS3} route add ${IPv4_5}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} route add ${IPv4_6}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
+ ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
+ ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
+
+ # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
+ ip -netns ${NS3} link set gre_dev up
+ ip -netns ${NS3} addr add ${IPv4_GRE} nodad dev gre_dev
+ ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
+ ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
+
+
+ # configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
+ ip -netns ${NS3} link set gre6_dev up
+ ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
+ ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6}
+ ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8}
+
+ # rp_filter gets confused by what these tests are doing, so disable it
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+
+ sleep 1 # reduce flakiness
+ set +e
+}
+
+cleanup()
+{
+ ip netns del ${NS1} 2> /dev/null
+ ip netns del ${NS2} 2> /dev/null
+ ip netns del ${NS3} 2> /dev/null
+}
+
+trap cleanup EXIT
+
+remove_routes_to_gredev()
+{
+ ip -netns ${NS1} route del ${IPv4_GRE} dev veth5
+ ip -netns ${NS2} route del ${IPv4_GRE} dev veth7
+ ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5
+ ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7
+}
+
+add_unreachable_routes_to_gredev()
+{
+ ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32
+ ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32
+ ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128
+ ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128
+}
+
+test_ping()
+{
+ local readonly PROTO=$1
+ local readonly EXPECTED=$2
+ local RET=0
+
+ if [ "${PROTO}" == "IPv4" ] ; then
+ ip netns exec ${NS1} ping -c 1 -W 1 -I ${IPv4_SRC} ${IPv4_DST} 2>&1 > /dev/null
+ RET=$?
+ elif [ "${PROTO}" == "IPv6" ] ; then
+ ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST} 2>&1 > /dev/null
+ RET=$?
+ else
+ echo " test_ping: unknown PROTO: ${PROTO}"
+ TEST_STATUS=1
+ fi
+
+ if [ "0" != "${RET}" ]; then
+ RET=1
+ fi
+
+ if [ "${EXPECTED}" != "${RET}" ] ; then
+ echo " test_ping failed: expected: ${EXPECTED}; got ${RET}"
+ TEST_STATUS=1
+ fi
+}
+
+test_egress()
+{
+ local readonly ENCAP=$1
+ echo "starting egress ${ENCAP} encap test"
+ setup
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, ping fails
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+ else
+ echo " unknown encap ${ENCAP}"
+ TEST_STATUS=1
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+test_ingress()
+{
+ local readonly ENCAP=$1
+ echo "starting ingress ${ENCAP} encap test"
+ setup
+
+ # need to wait a bit for IPv6 to autoconf, otherwise
+ # ping6 sometimes fails with "unable to bind to address"
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, pings fail
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+ else
+ echo "FAIL: unknown encap ${ENCAP}"
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+test_egress IPv4
+test_egress IPv6
+test_ingress IPv4
+test_ingress IPv6
+
+print_test_summary_and_exit
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index e2b9eee37187..3c627771f965 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -32,6 +32,8 @@
#define ENOTSUPP 524
#endif
+static int skips;
+
static int map_flags;
#define CHECK(condition, tag, format...) ({ \
@@ -43,7 +45,7 @@ static int map_flags;
} \
})
-static void test_hashmap(int task, void *data)
+static void test_hashmap(unsigned int task, void *data)
{
long long key, next_key, first_key, value;
int fd;
@@ -133,7 +135,7 @@ static void test_hashmap(int task, void *data)
close(fd);
}
-static void test_hashmap_sizes(int task, void *data)
+static void test_hashmap_sizes(unsigned int task, void *data)
{
int fd, i, j;
@@ -153,7 +155,7 @@ static void test_hashmap_sizes(int task, void *data)
}
}
-static void test_hashmap_percpu(int task, void *data)
+static void test_hashmap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, value);
@@ -280,7 +282,7 @@ static int helper_fill_hashmap(int max_entries)
return fd;
}
-static void test_hashmap_walk(int task, void *data)
+static void test_hashmap_walk(unsigned int task, void *data)
{
int fd, i, max_entries = 1000;
long long key, value, next_key;
@@ -351,7 +353,7 @@ static void test_hashmap_zero_seed(void)
close(second);
}
-static void test_arraymap(int task, void *data)
+static void test_arraymap(unsigned int task, void *data)
{
int key, next_key, fd;
long long value;
@@ -406,7 +408,7 @@ static void test_arraymap(int task, void *data)
close(fd);
}
-static void test_arraymap_percpu(int task, void *data)
+static void test_arraymap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, values);
@@ -502,7 +504,7 @@ static void test_arraymap_percpu_many_keys(void)
close(fd);
}
-static void test_devmap(int task, void *data)
+static void test_devmap(unsigned int task, void *data)
{
int fd;
__u32 key, value;
@@ -517,7 +519,7 @@ static void test_devmap(int task, void *data)
close(fd);
}
-static void test_queuemap(int task, void *data)
+static void test_queuemap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -575,7 +577,7 @@ static void test_queuemap(int task, void *data)
close(fd);
}
-static void test_stackmap(int task, void *data)
+static void test_stackmap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -633,7 +635,6 @@ static void test_stackmap(int task, void *data)
close(fd);
}
-#include <sys/socket.h>
#include <sys/ioctl.h>
#include <arpa/inet.h>
#include <sys/select.h>
@@ -641,7 +642,7 @@ static void test_stackmap(int task, void *data)
#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o"
#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o"
-static void test_sockmap(int tasks, void *data)
+static void test_sockmap(unsigned int tasks, void *data)
{
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
@@ -725,6 +726,15 @@ static void test_sockmap(int tasks, void *data)
sizeof(key), sizeof(value),
6, 0);
if (fd < 0) {
+ if (!bpf_probe_map_type(BPF_MAP_TYPE_SOCKMAP, 0)) {
+ printf("%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\n",
+ __func__);
+ skips++;
+ for (i = 0; i < 6; i++)
+ close(sfd[i]);
+ return;
+ }
+
printf("Failed to create sockmap %i\n", fd);
goto out_sockmap;
}
@@ -1258,10 +1268,11 @@ static void test_map_large(void)
}
#define run_parallel(N, FN, DATA) \
- printf("Fork %d tasks to '" #FN "'\n", N); \
+ printf("Fork %u tasks to '" #FN "'\n", N); \
__run_parallel(N, FN, DATA)
-static void __run_parallel(int tasks, void (*fn)(int task, void *data),
+static void __run_parallel(unsigned int tasks,
+ void (*fn)(unsigned int task, void *data),
void *data)
{
pid_t pid[tasks];
@@ -1302,7 +1313,7 @@ static void test_map_stress(void)
#define DO_UPDATE 1
#define DO_DELETE 0
-static void test_update_delete(int fn, void *data)
+static void test_update_delete(unsigned int fn, void *data)
{
int do_update = ((int *)data)[1];
int fd = ((int *)data)[0];
@@ -1702,6 +1713,6 @@ int main(void)
map_flags = BPF_F_NO_PREALLOC;
run_all_tests();
- printf("test_maps: OK\n");
+ printf("test_maps: OK, %d SKIPPED\n", skips);
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
index 44ed7f29f8ab..c1da5404454a 100644
--- a/tools/testing/selftests/bpf/test_netcnt.c
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -65,7 +65,7 @@ int main(int argc, char **argv)
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
- if (!cgroup_fd) {
+ if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index d59642e70f56..84bea3985d64 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -23,6 +23,7 @@ import string
import struct
import subprocess
import time
+import traceback
logfile = None
log_level = 1
@@ -78,7 +79,9 @@ def fail(cond, msg):
if not cond:
return
print("FAIL: " + msg)
- log("FAIL: " + msg, "", level=1)
+ tb = "".join(traceback.extract_stack().format())
+ print(tb)
+ log("FAIL: " + msg, tb, level=1)
os.sys.exit(1)
def start_test(msg):
@@ -589,6 +592,15 @@ def check_verifier_log(output, reference):
return
fail(True, "Missing or incorrect message from netdevsim in verifier log")
+def check_multi_basic(two_xdps):
+ fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
+ fail("prog" in two_xdps, "Base program reported in multi program mode")
+ fail(len(two_xdps["attached"]) != 2,
+ "Wrong attached program count with two programs")
+ fail(two_xdps["attached"][0]["prog"]["id"] ==
+ two_xdps["attached"][1]["prog"]["id"],
+ "Offloaded and other programs have the same id")
+
def test_spurios_extack(sim, obj, skip_hw, needle):
res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
include_stderr=True)
@@ -600,6 +612,67 @@ def test_spurios_extack(sim, obj, skip_hw, needle):
include_stderr=True)
check_no_extack(res, needle)
+def test_multi_prog(sim, obj, modename, modeid):
+ start_test("Test multi-attachment XDP - %s + offload..." %
+ (modename or "default", ))
+ sim.set_xdp(obj, "offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+ fail("prog" not in xdp, "Base program not reported in single program mode")
+ fail(len(xdp["attached"]) != 1,
+ "Wrong attached program count with one program")
+
+ sim.set_xdp(obj, modename)
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after other activated")
+ check_multi_basic(two_xdps)
+
+ offloaded2 = sim.dfs_read("bpf_offloaded_id")
+ fail(offloaded != offloaded2,
+ "Offload ID changed after loading other program")
+
+ start_test("Test multi-attachment XDP - replace...")
+ ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+ fail(ret == 0, "Replaced one of programs without -force")
+ check_extack(err, "XDP program already attached.", args)
+
+ if modename == "" or modename == "drv":
+ othermode = "" if modename == "drv" else "drv"
+ start_test("Test multi-attachment XDP - detach...")
+ ret, _, err = sim.unset_xdp(othermode, force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode")
+ check_extack(err, "program loaded with different flags.", args)
+
+ sim.unset_xdp("offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+
+ fail(xdp["mode"] != modeid, "Bad mode reported after multiple programs")
+ fail("prog" not in xdp,
+ "Base program not reported after multi program mode")
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after other activated")
+ fail(len(xdp["attached"]) != 1,
+ "Wrong attached program count with remaining programs")
+ fail(offloaded != "0", "Offload ID reported with only other program left")
+
+ start_test("Test multi-attachment XDP - reattach...")
+ sim.set_xdp(obj, "offload")
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Other program not reported after offload activated")
+ check_multi_basic(two_xdps)
+
+ start_test("Test multi-attachment XDP - device remove...")
+ sim.remove()
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+ return sim
# Parse command line
parser = argparse.ArgumentParser()
@@ -842,7 +915,9 @@ try:
ret, _, err = sim.set_xdp(obj, "generic", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
- fail(err.count("File exists") != 1, "Replaced driver XDP with generic")
+ check_extack(err,
+ "native and generic XDP can't be active at the same time.",
+ args)
ret, _, err = sim.set_xdp(obj, "", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
@@ -931,59 +1006,9 @@ try:
rm(pin_file)
bpftool_prog_list_wait(expected=0)
- start_test("Test multi-attachment XDP - attach...")
- sim.set_xdp(obj, "offload")
- xdp = sim.ip_link_show(xdp=True)["xdp"]
- offloaded = sim.dfs_read("bpf_offloaded_id")
- fail("prog" not in xdp, "Base program not reported in single program mode")
- fail(len(ipl["xdp"]["attached"]) != 1,
- "Wrong attached program count with one program")
-
- sim.set_xdp(obj, "")
- two_xdps = sim.ip_link_show(xdp=True)["xdp"]
- offloaded2 = sim.dfs_read("bpf_offloaded_id")
-
- fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
- fail("prog" in two_xdps, "Base program reported in multi program mode")
- fail(xdp["attached"][0] not in two_xdps["attached"],
- "Offload program not reported after driver activated")
- fail(len(two_xdps["attached"]) != 2,
- "Wrong attached program count with two programs")
- fail(two_xdps["attached"][0]["prog"]["id"] ==
- two_xdps["attached"][1]["prog"]["id"],
- "offloaded and drv programs have the same id")
- fail(offloaded != offloaded2,
- "offload ID changed after loading driver program")
-
- start_test("Test multi-attachment XDP - replace...")
- ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
- fail(err.count("busy") != 1, "Replaced one of programs without -force")
-
- start_test("Test multi-attachment XDP - detach...")
- ret, _, err = sim.unset_xdp("drv", force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode")
- check_extack(err, "program loaded with different flags.", args)
-
- sim.unset_xdp("offload")
- xdp = sim.ip_link_show(xdp=True)["xdp"]
- offloaded = sim.dfs_read("bpf_offloaded_id")
-
- fail(xdp["mode"] != 1, "Bad mode reported after multiple programs")
- fail("prog" not in xdp,
- "Base program not reported after multi program mode")
- fail(xdp["attached"][0] not in two_xdps["attached"],
- "Offload program not reported after driver activated")
- fail(len(ipl["xdp"]["attached"]) != 1,
- "Wrong attached program count with remaining programs")
- fail(offloaded != "0", "offload ID reported with only driver program left")
-
- start_test("Test multi-attachment XDP - device remove...")
- sim.set_xdp(obj, "offload")
- sim.remove()
-
- sim = NetdevSim()
- sim.set_ethtool_tc_offloads(True)
+ sim = test_multi_prog(sim, obj, "", 1)
+ sim = test_multi_prog(sim, obj, "drv", 1)
+ sim = test_multi_prog(sim, obj, "generic", 2)
start_test("Test mixing of TC and XDP...")
sim.tc_add_ingress()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 126fc624290d..5d10aee9e277 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -4,92 +4,30 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <time.h>
-
-#include <linux/types.h>
-typedef __u16 __sum16;
-#include <arpa/inet.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/filter.h>
-#include <linux/perf_event.h>
-#include <linux/unistd.h>
-
-#include <sys/ioctl.h>
-#include <sys/wait.h>
-#include <sys/types.h>
-#include <fcntl.h>
-
-#include <linux/bpf.h>
-#include <linux/err.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "test_iptunnel_common.h"
-#include "bpf_util.h"
-#include "bpf_endian.h"
+#include "test_progs.h"
#include "bpf_rlimit.h"
-#include "trace_helpers.h"
-
-static int error_cnt, pass_cnt;
-static bool jit_enabled;
-#define MAGIC_BYTES 123
+int error_cnt, pass_cnt;
+bool jit_enabled;
-/* ipv4 test vector */
-static struct {
- struct ethhdr eth;
- struct iphdr iph;
- struct tcphdr tcp;
-} __packed pkt_v4 = {
+struct ipv4_packet pkt_v4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
- .iph.protocol = 6,
+ .iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
-/* ipv6 test vector */
-static struct {
- struct ethhdr eth;
- struct ipv6hdr iph;
- struct tcphdr tcp;
-} __packed pkt_v6 = {
+struct ipv6_packet pkt_v6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
- .iph.nexthdr = 6,
+ .iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
-#define _CHECK(condition, tag, duration, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- error_cnt++; \
- printf("%s:FAIL:%s ", __func__, tag); \
- printf(format); \
- } else { \
- pass_cnt++; \
- printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
- } \
- __ret; \
-})
-
-#define CHECK(condition, tag, format...) \
- _CHECK(condition, tag, duration, format)
-#define CHECK_ATTR(condition, tag, format...) \
- _CHECK(condition, tag, tattr.duration, format)
-
-static int bpf_find_map(const char *test, struct bpf_object *obj,
- const char *name)
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
{
struct bpf_map *map;
@@ -102,349 +40,6 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
return bpf_map__fd(map);
}
-static void test_pkt_access(void)
-{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- __u32 duration, retval;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv4",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
-
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
- bpf_object__close(obj);
-}
-
-static void test_prog_run_xattr(void)
-{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- char buf[10];
- int err;
- struct bpf_prog_test_run_attr tattr = {
- .repeat = 1,
- .data_in = &pkt_v4,
- .data_size_in = sizeof(pkt_v4),
- .data_out = buf,
- .data_size_out = 5,
- };
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
- &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
- return;
-
- memset(buf, 0, sizeof(buf));
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
- "incorrect output size, want %lu have %u\n",
- sizeof(pkt_v4), tattr.data_size_out);
-
- CHECK_ATTR(buf[5] != 0, "overflow",
- "BPF_PROG_TEST_RUN ignored size hint\n");
-
- tattr.data_out = NULL;
- tattr.data_size_out = 0;
- errno = 0;
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- tattr.data_size_out = 1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
-
- bpf_object__close(obj);
-}
-
-static void test_xdp(void)
-{
- struct vip key4 = {.protocol = 6, .family = AF_INET};
- struct vip key6 = {.protocol = 6, .family = AF_INET6};
- struct iptnl_info value4 = {.family = AF_INET};
- struct iptnl_info value6 = {.family = AF_INET6};
- const char *file = "./test_xdp.o";
- struct bpf_object *obj;
- char buf[128];
- struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
- __u32 duration, retval, size;
- int err, prog_fd, map_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip2tnl");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key4, &value4, 0);
- bpf_map_update_elem(map_fd, &key6, &value6, 0);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_TX || size != 74 ||
- iph->protocol != IPPROTO_IPIP, "ipv4",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != 114 ||
- iph6->nexthdr != IPPROTO_IPV6, "ipv6",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-out:
- bpf_object__close(obj);
-}
-
-static void test_xdp_adjust_tail(void)
-{
- const char *file = "./test_adjust_tail.o";
- struct bpf_object *obj;
- char buf[128];
- __u32 duration, retval, size;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_DROP,
- "ipv4", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != 54,
- "ipv6", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
- bpf_object__close(obj);
-}
-
-
-
-#define MAGIC_VAL 0x1234
-#define NUM_ITER 100000
-#define VIP_NUM 5
-
-static void test_l4lb(const char *file)
-{
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct vip key = {.protocol = 6};
- struct vip_meta {
- __u32 flags;
- __u32 vip_num;
- } value = {.vip_num = VIP_NUM};
- __u32 stats_key = VIP_NUM;
- struct vip_stats {
- __u64 bytes;
- __u64 pkts;
- } stats[nr_cpus];
- struct real_definition {
- union {
- __be32 dst;
- __be32 dstv6[4];
- };
- __u8 flags;
- } real_def = {.dst = MAGIC_VAL};
- __u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
- __u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
- char buf[128];
- u32 *magic = (u32 *)buf;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
-
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
- for (i = 0; i < nr_cpus; i++) {
- bytes += stats[i].bytes;
- pkts += stats[i].pkts;
- }
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
- error_cnt++;
- printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
- }
-out:
- bpf_object__close(obj);
-}
-
-static void test_l4lb_all(void)
-{
- const char *file1 = "./test_l4lb.o";
- const char *file2 = "./test_l4lb_noinline.o";
-
- test_l4lb(file1);
- test_l4lb(file2);
-}
-
-static void test_xdp_noinline(void)
-{
- const char *file = "./test_xdp_noinline.o";
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct vip key = {.protocol = 6};
- struct vip_meta {
- __u32 flags;
- __u32 vip_num;
- } value = {.vip_num = VIP_NUM};
- __u32 stats_key = VIP_NUM;
- struct vip_stats {
- __u64 bytes;
- __u64 pkts;
- } stats[nr_cpus];
- struct real_definition {
- union {
- __be32 dst;
- __be32 dstv6[4];
- };
- __u8 flags;
- } real_def = {.dst = MAGIC_VAL};
- __u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
- __u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
- char buf[128];
- u32 *magic = (u32 *)buf;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
-
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
- for (i = 0; i < nr_cpus; i++) {
- bytes += stats[i].bytes;
- pkts += stats[i].pkts;
- }
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
- error_cnt++;
- printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
- }
-out:
- bpf_object__close(obj);
-}
-
-static void test_tcp_estats(void)
-{
- const char *file = "./test_tcp_estats.o";
- int err, prog_fd;
- struct bpf_object *obj;
- __u32 duration = 0;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- CHECK(err, "", "err %d errno %d\n", err, errno);
- if (err) {
- error_cnt++;
- return;
- }
-
- bpf_object__close(obj);
-}
-
-static inline __u64 ptr_to_u64(const void *ptr)
-{
- return (__u64) (unsigned long) ptr;
-}
-
static bool is_jit_enabled(void)
{
const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
@@ -463,475 +58,7 @@ static bool is_jit_enabled(void)
return enabled;
}
-static void test_bpf_obj_id(void)
-{
- const __u64 array_magic_value = 0xfaceb00c;
- const __u32 array_key = 0;
- const int nr_iters = 2;
- const char *file = "./test_obj_id.o";
- const char *expected_prog_name = "test_obj_id";
- const char *expected_map_name = "test_map_id";
- const __u64 nsec_per_sec = 1000000000;
-
- struct bpf_object *objs[nr_iters];
- int prog_fds[nr_iters], map_fds[nr_iters];
- /* +1 to test for the info_len returned by kernel */
- struct bpf_prog_info prog_infos[nr_iters + 1];
- struct bpf_map_info map_infos[nr_iters + 1];
- /* Each prog only uses one map. +1 to test nr_map_ids
- * returned by kernel.
- */
- __u32 map_ids[nr_iters + 1];
- char jited_insns[128], xlated_insns[128], zeros[128];
- __u32 i, next_id, info_len, nr_id_found, duration = 0;
- struct timespec real_time_ts, boot_time_ts;
- int err = 0;
- __u64 array_value;
- uid_t my_uid = getuid();
- time_t now, load_time;
-
- err = bpf_prog_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
-
- err = bpf_map_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
-
- for (i = 0; i < nr_iters; i++)
- objs[i] = NULL;
-
- /* Check bpf_obj_get_info_by_fd() */
- bzero(zeros, sizeof(zeros));
- for (i = 0; i < nr_iters; i++) {
- now = time(NULL);
- err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
- &objs[i], &prog_fds[i]);
- /* test_obj_id.o is a dumb prog. It should never fail
- * to load.
- */
- if (err)
- error_cnt++;
- assert(!err);
-
- /* Insert a magic value to the map */
- map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
- assert(map_fds[i] >= 0);
- err = bpf_map_update_elem(map_fds[i], &array_key,
- &array_magic_value, 0);
- assert(!err);
-
- /* Check getting map info */
- info_len = sizeof(struct bpf_map_info) * 2;
- bzero(&map_infos[i], info_len);
- err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
- &info_len);
- if (CHECK(err ||
- map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
- map_infos[i].key_size != sizeof(__u32) ||
- map_infos[i].value_size != sizeof(__u64) ||
- map_infos[i].max_entries != 1 ||
- map_infos[i].map_flags != 0 ||
- info_len != sizeof(struct bpf_map_info) ||
- strcmp((char *)map_infos[i].name, expected_map_name),
- "get-map-info(fd)",
- "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
- err, errno,
- map_infos[i].type, BPF_MAP_TYPE_ARRAY,
- info_len, sizeof(struct bpf_map_info),
- map_infos[i].key_size,
- map_infos[i].value_size,
- map_infos[i].max_entries,
- map_infos[i].map_flags,
- map_infos[i].name, expected_map_name))
- goto done;
-
- /* Check getting prog info */
- info_len = sizeof(struct bpf_prog_info) * 2;
- bzero(&prog_infos[i], info_len);
- bzero(jited_insns, sizeof(jited_insns));
- bzero(xlated_insns, sizeof(xlated_insns));
- prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
- prog_infos[i].jited_prog_len = sizeof(jited_insns);
- prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
- prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
- prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
- prog_infos[i].nr_map_ids = 2;
- err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
- assert(!err);
- err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
- assert(!err);
- err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
- &info_len);
- load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
- + (prog_infos[i].load_time / nsec_per_sec);
- if (CHECK(err ||
- prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
- info_len != sizeof(struct bpf_prog_info) ||
- (jit_enabled && !prog_infos[i].jited_prog_len) ||
- (jit_enabled &&
- !memcmp(jited_insns, zeros, sizeof(zeros))) ||
- !prog_infos[i].xlated_prog_len ||
- !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
- load_time < now - 60 || load_time > now + 60 ||
- prog_infos[i].created_by_uid != my_uid ||
- prog_infos[i].nr_map_ids != 1 ||
- *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
- strcmp((char *)prog_infos[i].name, expected_prog_name),
- "get-prog-info(fd)",
- "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
- err, errno, i,
- prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
- info_len, sizeof(struct bpf_prog_info),
- jit_enabled,
- prog_infos[i].jited_prog_len,
- prog_infos[i].xlated_prog_len,
- !!memcmp(jited_insns, zeros, sizeof(zeros)),
- !!memcmp(xlated_insns, zeros, sizeof(zeros)),
- load_time, now,
- prog_infos[i].created_by_uid, my_uid,
- prog_infos[i].nr_map_ids, 1,
- *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
- prog_infos[i].name, expected_prog_name))
- goto done;
- }
-
- /* Check bpf_prog_get_next_id() */
- nr_id_found = 0;
- next_id = 0;
- while (!bpf_prog_get_next_id(next_id, &next_id)) {
- struct bpf_prog_info prog_info = {};
- __u32 saved_map_id;
- int prog_fd;
-
- info_len = sizeof(prog_info);
-
- prog_fd = bpf_prog_get_fd_by_id(next_id);
- if (prog_fd < 0 && errno == ENOENT)
- /* The bpf_prog is in the dead row */
- continue;
- if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
- "prog_fd %d next_id %d errno %d\n",
- prog_fd, next_id, errno))
- break;
-
- for (i = 0; i < nr_iters; i++)
- if (prog_infos[i].id == next_id)
- break;
-
- if (i == nr_iters)
- continue;
-
- nr_id_found++;
-
- /* Negative test:
- * prog_info.nr_map_ids = 1
- * prog_info.map_ids = NULL
- */
- prog_info.nr_map_ids = 1;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
- if (CHECK(!err || errno != EFAULT,
- "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
- err, errno, EFAULT))
- break;
- bzero(&prog_info, sizeof(prog_info));
- info_len = sizeof(prog_info);
-
- saved_map_id = *(int *)((long)prog_infos[i].map_ids);
- prog_info.map_ids = prog_infos[i].map_ids;
- prog_info.nr_map_ids = 2;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
- prog_infos[i].jited_prog_insns = 0;
- prog_infos[i].xlated_prog_insns = 0;
- CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
- memcmp(&prog_info, &prog_infos[i], info_len) ||
- *(int *)(long)prog_info.map_ids != saved_map_id,
- "get-prog-info(next_id->fd)",
- "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
- err, errno, info_len, sizeof(struct bpf_prog_info),
- memcmp(&prog_info, &prog_infos[i], info_len),
- *(int *)(long)prog_info.map_ids, saved_map_id);
- close(prog_fd);
- }
- CHECK(nr_id_found != nr_iters,
- "check total prog id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
-
- /* Check bpf_map_get_next_id() */
- nr_id_found = 0;
- next_id = 0;
- while (!bpf_map_get_next_id(next_id, &next_id)) {
- struct bpf_map_info map_info = {};
- int map_fd;
-
- info_len = sizeof(map_info);
-
- map_fd = bpf_map_get_fd_by_id(next_id);
- if (map_fd < 0 && errno == ENOENT)
- /* The bpf_map is in the dead row */
- continue;
- if (CHECK(map_fd < 0, "get-map-fd(next_id)",
- "map_fd %d next_id %u errno %d\n",
- map_fd, next_id, errno))
- break;
-
- for (i = 0; i < nr_iters; i++)
- if (map_infos[i].id == next_id)
- break;
-
- if (i == nr_iters)
- continue;
-
- nr_id_found++;
-
- err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
- assert(!err);
-
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
- CHECK(err || info_len != sizeof(struct bpf_map_info) ||
- memcmp(&map_info, &map_infos[i], info_len) ||
- array_value != array_magic_value,
- "check get-map-info(next_id->fd)",
- "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
- err, errno, info_len, sizeof(struct bpf_map_info),
- memcmp(&map_info, &map_infos[i], info_len),
- array_value, array_magic_value);
-
- close(map_fd);
- }
- CHECK(nr_id_found != nr_iters,
- "check total map id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
-
-done:
- for (i = 0; i < nr_iters; i++)
- bpf_object__close(objs[i]);
-}
-
-static void test_pkt_md_access(void)
-{
- const char *file = "./test_pkt_md_access.o";
- struct bpf_object *obj;
- __u32 duration, retval;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
-
- bpf_object__close(obj);
-}
-
-static void test_obj_name(void)
-{
- struct {
- const char *name;
- int success;
- int expected_errno;
- } tests[] = {
- { "", 1, 0 },
- { "_123456789ABCDE", 1, 0 },
- { "_123456789ABCDEF", 0, EINVAL },
- { "_123456789ABCD\n", 0, EINVAL },
- };
- struct bpf_insn prog[] = {
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- __u32 duration = 0;
- int i;
-
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
- size_t name_len = strlen(tests[i].name) + 1;
- union bpf_attr attr;
- size_t ncopy;
- int fd;
-
- /* test different attr.prog_name during BPF_PROG_LOAD */
- ncopy = name_len < sizeof(attr.prog_name) ?
- name_len : sizeof(attr.prog_name);
- bzero(&attr, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
- attr.insn_cnt = 2;
- attr.insns = ptr_to_u64(prog);
- attr.license = ptr_to_u64("");
- memcpy(attr.prog_name, tests[i].name, ncopy);
-
- fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
- CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
- (!tests[i].success && errno != tests[i].expected_errno),
- "check-bpf-prog-name",
- "fd %d(%d) errno %d(%d)\n",
- fd, tests[i].success, errno, tests[i].expected_errno);
-
- if (fd != -1)
- close(fd);
-
- /* test different attr.map_name during BPF_MAP_CREATE */
- ncopy = name_len < sizeof(attr.map_name) ?
- name_len : sizeof(attr.map_name);
- bzero(&attr, sizeof(attr));
- attr.map_type = BPF_MAP_TYPE_ARRAY;
- attr.key_size = 4;
- attr.value_size = 4;
- attr.max_entries = 1;
- attr.map_flags = 0;
- memcpy(attr.map_name, tests[i].name, ncopy);
- fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
- CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
- (!tests[i].success && errno != tests[i].expected_errno),
- "check-bpf-map-name",
- "fd %d(%d) errno %d(%d)\n",
- fd, tests[i].success, errno, tests[i].expected_errno);
-
- if (fd != -1)
- close(fd);
- }
-}
-
-static void test_tp_attach_query(void)
-{
- const int num_progs = 3;
- int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
- __u32 duration = 0, info_len, saved_prog_ids[num_progs];
- const char *file = "./test_tracepoint.o";
- struct perf_event_query_bpf *query;
- struct perf_event_attr attr = {};
- struct bpf_object *obj[num_progs];
- struct bpf_prog_info prog_info;
- char buf[256];
-
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- return;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
- "read", "bytes %d errno %d\n", bytes, errno))
- return;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
-
- query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
- for (i = 0; i < num_progs; i++) {
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
- &prog_fd[i]);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- goto cleanup1;
-
- bzero(&prog_info, sizeof(prog_info));
- prog_info.jited_prog_len = 0;
- prog_info.xlated_prog_len = 0;
- prog_info.nr_map_ids = 0;
- info_len = sizeof(prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
- if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
- err, errno))
- goto cleanup1;
- saved_prog_ids[i] = prog_info.id;
-
- pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd[i], errno))
- goto cleanup2;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto cleanup3;
-
- if (i == 0) {
- /* check NULL prog array query */
- query->ids_len = num_progs;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != 0,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- }
-
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto cleanup3;
-
- if (i == 1) {
- /* try to get # of programs only */
- query->ids_len = 0;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != 2,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
-
- /* try a few negative tests */
- /* invalid query pointer */
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
- (struct perf_event_query_bpf *)0x1);
- if (CHECK(!err || errno != EFAULT,
- "perf_event_ioc_query_bpf",
- "err %d errno %d\n", err, errno))
- goto cleanup3;
-
- /* no enough space */
- query->ids_len = 1;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- }
-
- query->ids_len = num_progs;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != (i + 1),
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- for (j = 0; j < i + 1; j++)
- if (CHECK(saved_prog_ids[j] != query->ids[j],
- "perf_event_ioc_query_bpf",
- "#%d saved_prog_id %x query prog_id %x\n",
- j, saved_prog_ids[j], query->ids[j]))
- goto cleanup3;
- }
-
- i = num_progs - 1;
- for (; i >= 0; i--) {
- cleanup3:
- ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
- cleanup2:
- close(pmu_fd[i]);
- cleanup1:
- bpf_object__close(obj[i]);
- }
- free(query);
-}
-
-static int compare_map_keys(int map1_fd, int map2_fd)
+int compare_map_keys(int map1_fd, int map2_fd)
{
__u32 key, next_key;
char val_buf[PERF_MAX_STACK_DEPTH *
@@ -958,7 +85,7 @@ static int compare_map_keys(int map1_fd, int map2_fd)
return 0;
}
-static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
{
__u32 key, next_key, *cur_key_p, *next_key_p;
char *val_buf1, *val_buf2;
@@ -994,165 +121,7 @@ out:
return err;
}
-static void test_stacktrace_map()
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_map.o";
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
- struct perf_event_attr attr = {};
- __u32 key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- return;
-
- /* Get the ID for the sched/sched_switch tracepoint */
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (bytes <= 0 || bytes >= sizeof(buf))
- goto close_prog;
-
- /* Open the perf event and attach bpf progrram */
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (err)
- goto disable_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (err)
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (control_map_fd < 0)
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (stackid_hmap_fd < 0)
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (stackmap_fd < 0)
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (stack_amap_fd < 0)
- goto disable_pmu;
-
- /* give some time for bpf program run */
- sleep(1);
-
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
- if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- goto disable_pmu_noerr;
-disable_pmu:
- error_cnt++;
-disable_pmu_noerr:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
-close_prog:
- bpf_object__close(obj);
-}
-
-static void test_stacktrace_map_raw_tp()
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd;
- const char *file = "./test_stacktrace_map.o";
- int efd, err, prog_fd;
- __u32 key, val, duration = 0;
- struct bpf_object *obj;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (control_map_fd < 0)
- goto close_prog;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (stackid_hmap_fd < 0)
- goto close_prog;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (stackmap_fd < 0)
- goto close_prog;
-
- /* give some time for bpf program run */
- sleep(1);
-
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto close_prog;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static int extract_build_id(char *build_id, size_t size)
+int extract_build_id(char *build_id, size_t size)
{
FILE *fp;
char *line = NULL;
@@ -1176,711 +145,22 @@ err:
return -1;
}
-static void test_stacktrace_build_id(void)
+void *spin_lock_thread(void *arg)
{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_build_id.o";
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
- struct perf_event_attr attr = {};
- __u32 key, previous_key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
- int i, j;
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
- int build_id_matches = 0;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- goto out;
-
- /* Get the ID for the sched/sched_switch tracepoint */
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/random/urandom_read/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
- "read", "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- /* Open the perf event and attach bpf progrram */
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
- == 0);
- assert(system("./urandom_read") == 0);
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = extract_build_id(buf, 256);
-
- if (CHECK(err, "get build_id with readelf",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
- if (CHECK(err, "get_next_key from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
-
- do {
- char build_id[64];
-
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
- if (CHECK(err, "lookup_elem from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
- id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
- build_id_matches = 1;
- }
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
- if (CHECK(build_id_matches < 1, "build id match",
- "Didn't find expected build ID from the map\n"))
- goto disable_pmu;
-
- stack_trace_len = PERF_MAX_STACK_DEPTH
- * sizeof(struct bpf_stack_build_id);
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
- CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
- "err %d errno %d\n", err, errno);
-
-disable_pmu:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
- close(pmu_fd);
-
-close_prog:
- bpf_object__close(obj);
-
-out:
- return;
-}
-
-static void test_stacktrace_build_id_nmi(void)
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_build_id.o";
- int err, pmu_fd, prog_fd;
- struct perf_event_attr attr = {
- .sample_freq = 5000,
- .freq = 1,
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- };
- __u32 key, previous_key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
- int i, j;
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
- int build_id_matches = 0;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- return;
-
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open",
- "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
- == 0);
- assert(system("taskset 0x1 ./urandom_read 100000") == 0);
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = extract_build_id(buf, 256);
-
- if (CHECK(err, "get build_id with readelf",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
- if (CHECK(err, "get_next_key from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
-
- do {
- char build_id[64];
-
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
- if (CHECK(err, "lookup_elem from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
- id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
- build_id_matches = 1;
- }
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
- if (CHECK(build_id_matches < 1, "build id match",
- "Didn't find expected build ID from the map\n"))
- goto disable_pmu;
-
- /*
- * We intentionally skip compare_stack_ips(). This is because we
- * only support one in_nmi() ips-to-build_id translation per cpu
- * at any time, thus stack_amap here will always fallback to
- * BPF_STACK_BUILD_ID_IP;
- */
-
-disable_pmu:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
- close(pmu_fd);
-
-close_prog:
- bpf_object__close(obj);
-}
-
-#define MAX_CNT_RAWTP 10ull
-#define MAX_STACK_RAWTP 100
-struct get_stack_trace_t {
- int pid;
- int kern_stack_size;
- int user_stack_size;
- int user_stack_buildid_size;
- __u64 kern_stack[MAX_STACK_RAWTP];
- __u64 user_stack[MAX_STACK_RAWTP];
- struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
-};
-
-static int get_stack_print_output(void *data, int size)
-{
- bool good_kern_stack = false, good_user_stack = false;
- const char *nonjit_func = "___bpf_prog_run";
- struct get_stack_trace_t *e = data;
- int i, num_stack;
- static __u64 cnt;
- struct ksym *ks;
-
- cnt++;
-
- if (size < sizeof(struct get_stack_trace_t)) {
- __u64 *raw_data = data;
- bool found = false;
-
- num_stack = size / sizeof(__u64);
- /* If jit is enabled, we do not have a good way to
- * verify the sanity of the kernel stack. So we
- * just assume it is good if the stack is not empty.
- * This could be improved in the future.
- */
- if (jit_enabled) {
- found = num_stack > 0;
- } else {
- for (i = 0; i < num_stack; i++) {
- ks = ksym_search(raw_data[i]);
- if (strcmp(ks->name, nonjit_func) == 0) {
- found = true;
- break;
- }
- }
- }
- if (found) {
- good_kern_stack = true;
- good_user_stack = true;
- }
- } else {
- num_stack = e->kern_stack_size / sizeof(__u64);
- if (jit_enabled) {
- good_kern_stack = num_stack > 0;
- } else {
- for (i = 0; i < num_stack; i++) {
- ks = ksym_search(e->kern_stack[i]);
- if (strcmp(ks->name, nonjit_func) == 0) {
- good_kern_stack = true;
- break;
- }
- }
- }
- if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
- good_user_stack = true;
- }
- if (!good_kern_stack || !good_user_stack)
- return LIBBPF_PERF_EVENT_ERROR;
-
- if (cnt == MAX_CNT_RAWTP)
- return LIBBPF_PERF_EVENT_DONE;
-
- return LIBBPF_PERF_EVENT_CONT;
-}
-
-static void test_get_stack_raw_tp(void)
-{
- const char *file = "./test_get_stack_rawtp.o";
- int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
- struct perf_event_attr attr = {};
- struct timespec tv = {0, 10};
- __u32 key = 0, duration = 0;
- struct bpf_object *obj;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
- if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
- perfmap_fd, errno))
- goto close_prog;
-
- err = load_kallsyms();
- if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.type = PERF_TYPE_SOFTWARE;
- attr.config = PERF_COUNT_SW_BPF_OUTPUT;
- pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
- -1/*group_fd*/, 0);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
- errno))
- goto close_prog;
-
- err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
- if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
- errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
- err, errno))
- goto close_prog;
-
- err = perf_event_mmap(pmu_fd);
- if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- /* trigger some syscall action */
- for (i = 0; i < MAX_CNT_RAWTP; i++)
- nanosleep(&tv, NULL);
-
- err = perf_event_poller(pmu_fd, get_stack_print_output);
- if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_rawtp(void)
-{
- const char *file = "./test_get_stack_rawtp.o";
- __u64 probe_offset, probe_addr;
- __u32 len, prog_id, fd_type;
- struct bpf_object *obj;
- int efd, err, prog_fd;
- __u32 duration = 0;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- /* query (getpid(), efd) */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
- errno))
- goto close_prog;
-
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- strcmp(buf, "sys_enter") == 0;
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
- fd_type, buf))
- goto close_prog;
-
- /* test zero len */
- len = 0;
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
- err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter");
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- /* test empty buffer */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
- err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter");
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- /* test smaller buffer */
- len = 3;
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
- "err %d errno %d\n", err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter") &&
- strcmp(buf, "sy") == 0;
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_tp_core(const char *probe_name,
- const char *tp_name)
-{
- const char *file = "./test_tracepoint.o";
- int err, bytes, efd, prog_fd, pmu_fd;
- struct perf_event_attr attr = {};
- __u64 probe_offset, probe_addr;
- __u32 len, prog_id, fd_type;
- struct bpf_object *obj;
- __u32 duration = 0;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/%s/id", probe_name);
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
- "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- /* query (getpid(), pmu_fd) */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
- fd_type, buf))
- goto close_pmu;
-
- close(pmu_fd);
- goto close_prog_noerr;
-
-close_pmu:
- close(pmu_fd);
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_tp(void)
-{
- test_task_fd_query_tp_core("sched/sched_switch",
- "sched_switch");
- test_task_fd_query_tp_core("syscalls/sys_enter_read",
- "sys_enter_read");
-}
-
-static void test_reference_tracking()
-{
- const char *file = "./test_sk_lookup_kern.o";
- struct bpf_object *obj;
- struct bpf_program *prog;
- __u32 duration = 0;
- int err = 0;
-
- obj = bpf_object__open(file);
- if (IS_ERR(obj)) {
- error_cnt++;
- return;
- }
-
- bpf_object__for_each_program(prog, obj) {
- const char *title;
-
- /* Ignore .text sections */
- title = bpf_program__title(prog, false);
- if (strstr(title, ".text") != NULL)
- continue;
-
- bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+ __u32 duration, retval;
+ int err, prog_fd = *(u32 *) arg;
- /* Expect verifier failure if test name has 'fail' */
- if (strstr(title, "fail") != NULL) {
- libbpf_set_print(NULL, NULL, NULL);
- err = !bpf_program__load(prog, "GPL", 0);
- libbpf_set_print(printf, printf, NULL);
- } else {
- err = bpf_program__load(prog, "GPL", 0);
- }
- CHECK(err, title, "\n");
- }
- bpf_object__close(obj);
+ err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+ pthread_exit(arg);
}
-enum {
- QUEUE,
- STACK,
-};
-
-static void test_queue_stack_map(int type)
-{
- const int MAP_SIZE = 32;
- __u32 vals[MAP_SIZE], duration, retval, size, val;
- int i, err, prog_fd, map_in_fd, map_out_fd;
- char file[32], buf[128];
- struct bpf_object *obj;
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
-
- /* Fill test values to be used */
- for (i = 0; i < MAP_SIZE; i++)
- vals[i] = rand();
-
- if (type == QUEUE)
- strncpy(file, "./test_queue_map.o", sizeof(file));
- else if (type == STACK)
- strncpy(file, "./test_stack_map.o", sizeof(file));
- else
- return;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_in_fd = bpf_find_map(__func__, obj, "map_in");
- if (map_in_fd < 0)
- goto out;
-
- map_out_fd = bpf_find_map(__func__, obj, "map_out");
- if (map_out_fd < 0)
- goto out;
-
- /* Push 32 elements to the input map */
- for (i = 0; i < MAP_SIZE; i++) {
- err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
- if (err) {
- error_cnt++;
- goto out;
- }
- }
-
- /* The eBPF program pushes iph.saddr in the output map,
- * pops the input map and saves this value in iph.daddr
- */
- for (i = 0; i < MAP_SIZE; i++) {
- if (type == QUEUE) {
- val = vals[i];
- pkt_v4.iph.saddr = vals[i] * 5;
- } else if (type == STACK) {
- val = vals[MAP_SIZE - 1 - i];
- pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
- }
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- if (err || retval || size != sizeof(pkt_v4) ||
- iph->daddr != val)
- break;
- }
-
- CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
- "bpf_map_pop_elem",
- "err %d errno %d retval %d size %d iph->daddr %u\n",
- err, errno, retval, size, iph->daddr);
-
- /* Queue is empty, program should return TC_ACT_SHOT */
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
- "check-queue-stack-map-empty",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- /* Check that the program pushed elements correctly */
- for (i = 0; i < MAP_SIZE; i++) {
- err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
- if (err || val != vals[i] * 5)
- break;
- }
-
- CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
- "bpf_map_push_elem", "err %d value %u\n", err, val);
-
-out:
- pkt_v4.iph.saddr = 0;
- bpf_object__close(obj);
-}
+#define DECLARE
+#include <prog_tests/tests.h>
+#undef DECLARE
int main(void)
{
@@ -1888,27 +168,9 @@ int main(void)
jit_enabled = is_jit_enabled();
- test_pkt_access();
- test_prog_run_xattr();
- test_xdp();
- test_xdp_adjust_tail();
- test_l4lb_all();
- test_xdp_noinline();
- test_tcp_estats();
- test_bpf_obj_id();
- test_pkt_md_access();
- test_obj_name();
- test_tp_attach_query();
- test_stacktrace_map();
- test_stacktrace_build_id();
- test_stacktrace_build_id_nmi();
- test_stacktrace_map_raw_tp();
- test_get_stack_raw_tp();
- test_task_fd_query_rawtp();
- test_task_fd_query_tp();
- test_reference_tracking();
- test_queue_stack_map(QUEUE);
- test_queue_stack_map(STACK);
+#define CALL
+#include <prog_tests/tests.h>
+#undef CALL
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
new file mode 100644
index 000000000000..51a07367cd43
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <time.h>
+#include <signal.h>
+
+#include <linux/types.h>
+typedef __u16 __sum16;
+#include <arpa/inet.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/filter.h>
+#include <linux/perf_event.h>
+#include <linux/unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "test_iptunnel_common.h"
+#include "bpf_util.h"
+#include "bpf_endian.h"
+#include "trace_helpers.h"
+#include "flow_dissector_load.h"
+
+extern int error_cnt, pass_cnt;
+extern bool jit_enabled;
+
+#define MAGIC_BYTES 123
+
+/* ipv4 test vector */
+struct ipv4_packet {
+ struct ethhdr eth;
+ struct iphdr iph;
+ struct tcphdr tcp;
+} __packed;
+extern struct ipv4_packet pkt_v4;
+
+/* ipv6 test vector */
+struct ipv6_packet {
+ struct ethhdr eth;
+ struct ipv6hdr iph;
+ struct tcphdr tcp;
+} __packed;
+extern struct ipv6_packet pkt_v6;
+
+#define _CHECK(condition, tag, duration, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ error_cnt++; \
+ printf("%s:FAIL:%s ", __func__, tag); \
+ printf(format); \
+ } else { \
+ pass_cnt++; \
+ printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
+ } \
+ __ret; \
+})
+
+#define CHECK(condition, tag, format...) \
+ _CHECK(condition, tag, duration, format)
+#define CHECK_ATTR(condition, tag, format...) \
+ _CHECK(condition, tag, tattr.duration, format)
+
+#define MAGIC_VAL 0x1234
+#define NUM_ITER 100000
+#define VIP_NUM 5
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
+int compare_map_keys(int map1_fd, int map2_fd);
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
+int extract_build_id(char *build_id, size_t size);
+void *spin_lock_thread(void *arg);
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index c121cc59f314..9220747c069d 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -164,7 +164,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CGROUP_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CGROUP_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index b8ebe2f58074..fb679ac3d4b0 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -20,6 +20,7 @@
#define MAX_INSNS 512
char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static bool verbose = false;
struct sock_test {
const char *descr;
@@ -325,6 +326,7 @@ static int load_sock_prog(const struct bpf_insn *prog,
enum bpf_attach_type attach_type)
{
struct bpf_load_program_attr attr;
+ int ret;
memset(&attr, 0, sizeof(struct bpf_load_program_attr));
attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
@@ -332,8 +334,13 @@ static int load_sock_prog(const struct bpf_insn *prog,
attr.insns = prog;
attr.insns_cnt = probe_prog_length(attr.insns);
attr.license = "GPL";
+ attr.log_level = 2;
- return bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ if (verbose && ret < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return ret;
}
static int attach_sock_prog(int cgfd, int progfd,
@@ -458,7 +465,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index 73b7493d4120..3f110eaaf29c 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -44,6 +44,7 @@
#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
#define SRC6_IP "::1"
#define SRC6_REWRITE_IP "::6"
+#define WILDCARD6_IP "::"
#define SERV6_PORT 6060
#define SERV6_REWRITE_PORT 6666
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
static int bind6_prog_load(const struct sock_addr_test *test);
static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
static struct sock_addr_test tests[] = {
/* bind */
@@ -463,6 +466,34 @@ static struct sock_addr_test tests[] = {
SYSCALL_ENOTSUPP,
},
{
+ "sendmsg6: set dst IP = [::] (BSD'ism)",
+ sendmsg6_rw_wildcard_prog_load,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ "sendmsg6: preserve dst IP = [::] (BSD'ism)",
+ sendmsg_allow_prog_load,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ WILDCARD6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_PORT,
+ SRC6_IP,
+ SUCCESS,
+ },
+ {
"sendmsg6: deny call",
sendmsg_deny_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
@@ -734,16 +765,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
return load_path(test, CONNECT6_PROG_PATH);
}
-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
+ int32_t rc)
{
struct bpf_insn insns[] = {
- /* return 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* return rc */
+ BPF_MOV64_IMM(BPF_REG_0, rc),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
+}
+
+static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
+}
+
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in dst4_rw_addr;
@@ -864,6 +906,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
}
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
+}
+
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG6_PROG_PATH);
@@ -1395,7 +1442,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
new file mode 100644
index 000000000000..bc8943938bf5
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_sock_fields.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <sys/socket.h>
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
+
+enum bpf_array_idx {
+ SRV_IDX,
+ CLI_IDX,
+ __NR_BPF_ARRAY_IDX,
+};
+
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ printf("\n"); \
+ exit(-1); \
+ } \
+})
+
+#define TEST_CGROUP "/test-bpf-sock-fields"
+#define DATA "Hello BPF!"
+#define DATA_LEN sizeof(DATA)
+
+static struct sockaddr_in6 srv_sa6, cli_sa6;
+static int linum_map_fd;
+static int addr_map_fd;
+static int tp_map_fd;
+static int sk_map_fd;
+static __u32 srv_idx = SRV_IDX;
+static __u32 cli_idx = CLI_IDX;
+
+static void init_loopback6(struct sockaddr_in6 *sa6)
+{
+ memset(sa6, 0, sizeof(*sa6));
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = in6addr_loopback;
+}
+
+static void print_sk(const struct bpf_sock *sk)
+{
+ char src_ip4[24], dst_ip4[24];
+ char src_ip6[64], dst_ip6[64];
+
+ inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
+ inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
+ inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
+ inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
+
+ printf("state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u "
+ "src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u "
+ "dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n",
+ sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
+ sk->mark, sk->priority,
+ sk->src_ip4, src_ip4,
+ sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
+ src_ip6, sk->src_port,
+ sk->dst_ip4, dst_ip4,
+ sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
+ dst_ip6, ntohs(sk->dst_port));
+}
+
+static void print_tp(const struct bpf_tcp_sock *tp)
+{
+ printf("snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u "
+ "snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u "
+ "rate_delivered:%u rate_interval_us:%u packets_out:%u "
+ "retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u "
+ "segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u "
+ "bytes_received:%llu bytes_acked:%llu\n",
+ tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh,
+ tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,
+ tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us,
+ tp->packets_out, tp->retrans_out, tp->total_retrans,
+ tp->segs_in, tp->data_segs_in, tp->segs_out,
+ tp->data_segs_out, tp->lost_out, tp->sacked_out,
+ tp->bytes_received, tp->bytes_acked);
+}
+
+static void check_result(void)
+{
+ struct bpf_tcp_sock srv_tp, cli_tp;
+ struct bpf_sock srv_sk, cli_sk;
+ __u32 linum, idx0 = 0;
+ int err;
+
+ err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)",
+ "err:%d errno:%d", err, errno);
+
+ printf("srv_sk: ");
+ print_sk(&srv_sk);
+ printf("\n");
+
+ printf("cli_sk: ");
+ print_sk(&cli_sk);
+ printf("\n");
+
+ printf("srv_tp: ");
+ print_tp(&srv_tp);
+ printf("\n");
+
+ printf("cli_tp: ");
+ print_tp(&cli_tp);
+ printf("\n");
+
+ CHECK(srv_sk.state == 10 ||
+ !srv_sk.state ||
+ srv_sk.family != AF_INET6 ||
+ srv_sk.protocol != IPPROTO_TCP ||
+ memcmp(srv_sk.src_ip6, &in6addr_loopback,
+ sizeof(srv_sk.src_ip6)) ||
+ memcmp(srv_sk.dst_ip6, &in6addr_loopback,
+ sizeof(srv_sk.dst_ip6)) ||
+ srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ srv_sk.dst_port != cli_sa6.sin6_port,
+ "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum);
+
+ CHECK(cli_sk.state == 10 ||
+ !cli_sk.state ||
+ cli_sk.family != AF_INET6 ||
+ cli_sk.protocol != IPPROTO_TCP ||
+ memcmp(cli_sk.src_ip6, &in6addr_loopback,
+ sizeof(cli_sk.src_ip6)) ||
+ memcmp(cli_sk.dst_ip6, &in6addr_loopback,
+ sizeof(cli_sk.dst_ip6)) ||
+ cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
+ cli_sk.dst_port != srv_sa6.sin6_port,
+ "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum);
+
+ CHECK(srv_tp.data_segs_out != 1 ||
+ srv_tp.data_segs_in ||
+ srv_tp.snd_cwnd != 10 ||
+ srv_tp.total_retrans ||
+ srv_tp.bytes_acked != DATA_LEN,
+ "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum);
+
+ CHECK(cli_tp.data_segs_out ||
+ cli_tp.data_segs_in != 1 ||
+ cli_tp.snd_cwnd != 10 ||
+ cli_tp.total_retrans ||
+ cli_tp.bytes_received != DATA_LEN,
+ "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum);
+}
+
+static void test(void)
+{
+ int listen_fd, cli_fd, accept_fd, epfd, err;
+ struct epoll_event ev;
+ socklen_t addrlen;
+
+ addrlen = sizeof(struct sockaddr_in6);
+ ev.events = EPOLLIN;
+
+ epfd = epoll_create(1);
+ CHECK(epfd == -1, "epoll_create()", "epfd:%d errno:%d", epfd, errno);
+
+ /* Prepare listen_fd */
+ listen_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(listen_fd == -1, "socket()", "listen_fd:%d errno:%d",
+ listen_fd, errno);
+
+ init_loopback6(&srv_sa6);
+ err = bind(listen_fd, (struct sockaddr *)&srv_sa6, sizeof(srv_sa6));
+ CHECK(err, "bind(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
+ CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = listen(listen_fd, 1);
+ CHECK(err, "listen(listen_fd)", "err:%d errno:%d", err, errno);
+
+ /* Prepare cli_fd */
+ cli_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(cli_fd == -1, "socket()", "cli_fd:%d errno:%d", cli_fd, errno);
+
+ init_loopback6(&cli_sa6);
+ err = bind(cli_fd, (struct sockaddr *)&cli_sa6, sizeof(cli_sa6));
+ CHECK(err, "bind(cli_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
+ CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Update addr_map with srv_sa6 and cli_sa6 */
+ err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ /* Connect from cli_sa6 to srv_sa6 */
+ err = connect(cli_fd, (struct sockaddr *)&srv_sa6, addrlen);
+ printf("srv_sa6.sin6_port:%u cli_sa6.sin6_port:%u\n\n",
+ ntohs(srv_sa6.sin6_port), ntohs(cli_sa6.sin6_port));
+ CHECK(err && errno != EINPROGRESS,
+ "connect(cli_fd)", "err:%d errno:%d", err, errno);
+
+ ev.data.fd = listen_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, listen_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, listen_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Accept the connection */
+ /* Have some timeout in accept(listen_fd). Just in case. */
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != listen_fd,
+ "epoll_wait(listen_fd)",
+ "err:%d errno:%d ev.data.fd:%d listen_fd:%d",
+ err, errno, ev.data.fd, listen_fd);
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ CHECK(accept_fd == -1, "accept(listen_fd)", "accept_fd:%d errno:%d",
+ accept_fd, errno);
+ close(listen_fd);
+
+ /* Send some data from accept_fd to cli_fd */
+ err = send(accept_fd, DATA, DATA_LEN, 0);
+ CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Have some timeout in recv(cli_fd). Just in case. */
+ ev.data.fd = cli_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, cli_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != cli_fd,
+ "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
+ err, errno, ev.data.fd, cli_fd);
+
+ err = recv(cli_fd, NULL, 0, MSG_TRUNC);
+ CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);
+
+ close(epfd);
+ close(accept_fd);
+ close(cli_fd);
+
+ check_result();
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "test_sock_fields_kern.o",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .expected_attach_type = BPF_CGROUP_INET_EGRESS,
+ };
+ int cgroup_fd, prog_fd, err;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+
+ err = setup_cgroup_environment();
+ CHECK(err, "setup_cgroup_environment()", "err:%d errno:%d",
+ err, errno);
+
+ atexit(cleanup_cgroup_environment);
+
+ /* Create a cgroup, get fd, and join it */
+ cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
+ CHECK(cgroup_fd == -1, "create_and_get_cgroup()",
+ "cgroup_fd:%d errno:%d", cgroup_fd, errno);
+
+ err = join_cgroup(TEST_CGROUP);
+ CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
+
+ err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+ CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+ CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
+ "err:%d errno%d", err, errno);
+ close(cgroup_fd);
+
+ map = bpf_object__find_map_by_name(obj, "addr_map");
+ CHECK(!map, "cannot find addr_map", "(null)");
+ addr_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "sock_result_map");
+ CHECK(!map, "cannot find sock_result_map", "(null)");
+ sk_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "tcp_sock_result_map");
+ CHECK(!map, "cannot find tcp_sock_result_map", "(null)");
+ tp_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "linum_map");
+ CHECK(!map, "cannot find linum_map", "(null)");
+ linum_map_fd = bpf_map__fd(map);
+
+ test();
+
+ bpf_object__close(obj);
+ cleanup_cgroup_environment();
+
+ printf("PASS\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index b6c2c605d8c0..e51d63786ff8 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -158,10 +158,8 @@ static int run_test(int cgfd)
bpf_object__for_each_program(prog, pobj) {
prog_name = bpf_program__title(prog, /*needs_copy*/ false);
- if (libbpf_attach_type_by_name(prog_name, &attach_type)) {
- log_err("Unexpected prog: %s", prog_name);
+ if (libbpf_attach_type_by_name(prog_name, &attach_type))
goto err;
- }
err = bpf_prog_attach(bpf_program__fd(prog), cgfd, attach_type,
BPF_F_ALLOW_OVERRIDE);
@@ -202,7 +200,7 @@ int main(int argc, char **argv)
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
- if (!cgfd)
+ if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index e85a771f607b..3845144e2c91 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -10,7 +10,6 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <sys/ioctl.h>
#include <stdbool.h>
#include <signal.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index e6eebda7d112..716b4e3be581 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -103,7 +103,7 @@ int main(int argc, char **argv)
goto err;
cg_fd = create_and_get_cgroup(cg_path);
- if (!cg_fd)
+ if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index ff3c4522aed6..86152d9ae95b 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -115,7 +115,7 @@ int main(int argc, char **argv)
goto err;
cg_fd = create_and_get_cgroup(cg_path);
- if (!cg_fd)
+ if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))
@@ -148,17 +148,17 @@ int main(int argc, char **argv)
pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd);
sprintf(test_script,
- "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP",
+ "iptables -A INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
+ "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP",
+ "iptables -D INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 10d44446e801..477a9dcf9fff 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -32,8 +32,10 @@
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
+#include <linux/btf.h>
#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
#ifdef HAVE_GENHDR
# include "autoconf.h"
@@ -49,7 +51,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 13
+#define MAX_NR_MAPS 14
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -59,6 +61,7 @@
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
+static int skips;
struct bpf_test {
const char *descr;
@@ -76,6 +79,7 @@ struct bpf_test {
int fixup_map_in_map[MAX_FIXUPS];
int fixup_cgroup_storage[MAX_FIXUPS];
int fixup_percpu_cgroup_storage[MAX_FIXUPS];
+ int fixup_map_spin_lock[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t retval, retval_unpriv, insn_processed;
@@ -211,15274 +215,46 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
BPF_MOV64_IMM(BPF_REG_5, 0), \
BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
-static struct bpf_test tests[] = {
- {
- "add+sub+mul",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_2, 3),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "DIV32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 2),
- BPF_MOV32_IMM(BPF_REG_2, 16),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 8,
- },
- {
- "DIV32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 by 0, zero check, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 3),
- BPF_MOV32_IMM(BPF_REG_2, 5),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD64 by 0, zero check 2, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, -1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = -1,
- },
- /* Just make sure that JITs used udiv/umod as otherwise we get
- * an exception from INT_MIN/-1 overflow similarly as with div
- * by zero.
- */
- {
- "DIV32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "xor32 zero extend check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_2, -1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
- BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "empty prog",
- .insns = {
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "only exit insn",
- .insns = {
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "unreachable",
- .insns = {
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "unreachable2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "out of range jump",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "out of range jump2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -2),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "test1 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test2 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test3 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test4 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test5 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test6 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "test7 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "test8 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "uses reserved fields",
- .result = REJECT,
- },
- {
- "test9 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 1, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test10 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test11 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test12 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "test13 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "arsh32 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on imm 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -16069393,
- },
- {
- "arsh32 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on reg 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
- BPF_MOV64_IMM(BPF_REG_1, 15),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 43724,
- },
- {
- "arsh64 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "arsh64 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "no bpf_exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
- },
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "loop (back-edge)",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "loop2 (back-edge)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "conditional loop",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "read uninitialized register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "read invalid register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit in all branches",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "stack out of bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack",
- .result = REJECT,
- },
- {
- "invalid call insn1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 8d",
- .result = REJECT,
- },
- {
- "invalid call insn2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_CALL uses reserved",
- .result = REJECT,
- },
- {
- "invalid function call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid func unknown#1234567",
- .result = REJECT,
- },
- {
- "uninitialized stack1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr = "invalid indirect read from stack",
- .result = REJECT,
- },
- {
- "uninitialized stack2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid read from stack",
- .result = REJECT,
- },
- {
- "invalid fp arithmetic",
- /* If this gets ever changed, make sure JITs can deal with it. */
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 subtraction from stack pointer",
- .result = REJECT,
- },
- {
- "non-invalid fp arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "invalid argument register",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "non-invalid argument register",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "check valid spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* fill it back into R2 */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- /* should be able to access R0 = *(R2 + 8) */
- /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .retval = POINTER_VALUE,
- },
- {
- "check valid spill/fill, skb mark",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- },
- {
- "check corrupted spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* mess up with R1 pointer on stack */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
- /* fill back into R0 is fine for priv.
- * R0 now becomes SCALAR_VALUE.
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- /* Load from R0 should fail. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .errstr = "R0 invalid mem access 'inv",
- .result = REJECT,
- },
- {
- "check corrupted spill/fill, LSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "check corrupted spill/fill, MSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "invalid src register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in ST",
- .insns = {
- BPF_ST_MEM(BPF_B, 14, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid src register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R12 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R11 is invalid",
- .result = REJECT,
- },
- {
- "junk insn",
- .insns = {
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "junk insn2",
- .insns = {
- BPF_RAW_INSN(1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_LDX uses reserved fields",
- .result = REJECT,
- },
- {
- "junk insn3",
- .insns = {
- BPF_RAW_INSN(-1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn4",
- .insns = {
- BPF_RAW_INSN(-1, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn5",
- .insns = {
- BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ALU uses reserved fields",
- .result = REJECT,
- },
- {
- "misaligned read from stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "invalid map_fd for function call",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .errstr = "fd 0 is not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "don't check return value before access",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access 'map_value_or_null'",
- .result = REJECT,
- },
- {
- "access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "misaligned value access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "sometimes access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .errstr_unpriv = "R0 leaks addr",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "jump test 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 14),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 19),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JA, 0, 0, 15),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
- BPF_JMP_IMM(BPF_JA, 0, 0, 7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
- BPF_JMP_IMM(BPF_JA, 0, 0, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 24 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = -ENOENT,
- },
- {
- "jump test 4",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "access skb fields ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "access skb fields bad1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "access skb fields bad2",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad3",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -12),
- },
- .fixup_map_hash_8b = { 6 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad4",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -13),
- },
- .fixup_map_hash_8b = { 7 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "valid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "invalid access of tc_classid for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of skb->mark for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->mark is not writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_index is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "check skb->priority is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, priority)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet read for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet write for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "overlapping checks for direct packet access SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access family in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access local_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid 64B read of size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read past end of SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size) + 4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read offset in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, family) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "direct packet write for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "overlapping checks for direct packet access SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "check skb->mark is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check skb->tc_index is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check cb access: byte",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "__sk_buff->hash, offset 0, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "__sk_buff->tc_index, offset 3, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: byte, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: half",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: half, unaligned",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check __sk_buff->hash, offset 0, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->tc_index, offset 2, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load not permitted, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load not permitted, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check cb access: half, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: word",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: word, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: double, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, oob 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, oob 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check out of range skb->cb access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 256),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- },
- {
- "write skb fields from socket prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- },
- {
- "write skb fields from tc_cls_act prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "PTR_TO_STACK store/load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfaceb00c,
- },
- {
- "PTR_TO_STACK store/load - bad alignment on off",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
- },
- {
- "PTR_TO_STACK store/load - bad alignment on reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
- },
- {
- "PTR_TO_STACK store/load - out of bounds low",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=-79992 size=8",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- },
- {
- "PTR_TO_STACK store/load - out of bounds high",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=0 size=8",
- },
- {
- "unpriv: return pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .retval = POINTER_VALUE,
- },
- {
- "unpriv: add const to pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: add pointer to pointer",
- .insns = {
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 pointer += pointer",
- },
- {
- "unpriv: neg pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer arithmetic",
- },
- {
- "unpriv: cmp pointer with const",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer comparison",
- },
- {
- "unpriv: cmp pointer with pointer",
- .insns = {
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R10 pointer comparison",
- },
- {
- "unpriv: check that printk is disallowed",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_trace_printk),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "unknown func bpf_trace_printk#6",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to helper function",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R4 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: indirectly pass pointer on stack to helper function",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- },
- {
- "unpriv: mangle pointer on stack 1",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: mangle pointer on stack 2",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: read pointer from stack in small chunks",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid size",
- .result = REJECT,
- },
- {
- "unpriv: write pointer into ctx",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill of ctx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: spill/fill of ctx 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=fp expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
- BPF_REG_0, -8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=inv expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - ctx and sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- /* if (sk) bpf_sk_release(sk) */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "type=ctx expected=sock",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - leak sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "Unreleased reference",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "cannot write into socket",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers ldx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- -(__s32)offsetof(struct bpf_perf_event_data,
- sample_period) - 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
- offsetof(struct bpf_perf_event_data,
- sample_period)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "unpriv: write pointer into map elem value",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "alu32: mov u32 const",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_7, 0),
- BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
- BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "unpriv: partial copy of pointer",
- .insns = {
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 partial copy",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to tail_call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .errstr_unpriv = "R3 leaks addr into helper",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp map pointer with zero",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: write into frame pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill frame pointer",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: cmp of frame pointer",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: adding of fp",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp of stack pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R2 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "runtime/jit: tail_call within bounds, prog once",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "runtime/jit: tail_call within bounds, prog loop",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 41,
- },
- {
- "runtime/jit: tail_call within bounds, no prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "runtime/jit: tail_call out of bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 256),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass negative index to tail_call",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass > 32bit index to tail_call",
- .insns = {
- BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 2 },
- .result = ACCEPT,
- .retval = 42,
- /* Verifier rewrite for unpriv skips tail call here. */
- .retval_unpriv = 2,
- },
- {
- "PTR_TO_STACK check high 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=0 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check high 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK check low 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=-513 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check low 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between fp pointer",
- },
- {
- "PTR_TO_STACK check low 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK mixed reg/k, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "PTR_TO_STACK reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid stack off=0 size=1",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "stack pointer arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "raw_stack: no skb_load_bytes",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- /* Call to skb_load_bytes() omitted. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid read from stack off -8+0 size 8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, ~0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, zero len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, no init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs around bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs + data",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-513 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-1 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-512 access_size=0",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, large access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 512),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via ST",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ST stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via XADD",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
- BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access off=76",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- },
- {
- "direct packet access: test4 (write)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test5 (pkt_end >= reg, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test6 (pkt_end >= reg, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test7 (pkt_end >= reg, both accesses)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test8 (double test, variant 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test9 (double test, variant 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test10 (write invalid)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test11 (shift, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test12 (and, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test13 (branches, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
- BPF_MOV64_IMM(BPF_REG_3, 14),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_3, 24),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_IMM(BPF_REG_5, 12),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test15 (spill with xadd)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_5, 4096),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test16 (arith on data_end)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test17 (pruning, alignment)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
- BPF_JMP_A(-6),
- },
- .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "direct packet access: test18 (imm += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 8),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test19 (imm += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test20 (x += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test21 (x += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test22 (x += pkt_ptr, 3)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test23 (x += pkt_ptr, 4)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test24 (x += pkt_ptr, 5)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 64),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test25 (marking on <, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test26 (marking on <, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test27 (marking on <=, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test28 (marking on <=, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test1, valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test2, unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test3, variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test4, packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test5, packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test6, cls valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test7, cls unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test8, cls variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test9, cls packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test10, cls packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test11, cls unsuitable helper 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 42),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_store_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test12, cls unsuitable helper 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test13, cls helper ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test14, cls helper ok sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test15, cls helper fail sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test16, cls helper fail range 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test17, cls helper fail range 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, -9),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test18, cls helper fail range 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, ~0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test19, cls helper range zero",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test20, pkt end as input",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=pkt_end expected=fp",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test21, wrong reg",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "prevent map lookup in sockmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in sockhash",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockhash = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in xskmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_xskmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "prevent map lookup in stack trace",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_stacktrace = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "prevent map lookup in prog array",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
- },
- {
- "valid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "valid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a signed variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=48 size=8",
- .result = REJECT,
- },
- {
- "invalid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with no floor check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "invalid access to map value, value_size=48 off=44 size=8",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3, 11 },
- .errstr = "R0 pointer += pointer",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read test#1 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=76 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#2 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_proto)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, priority)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, priority)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff,
- ingress_ifindex)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#3 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
- offsetof(struct __sk_buff, cb[4])),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#4 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of tc_classid for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of data_meta for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_meta)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of flow_keys for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, flow_keys)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid write access to napi_id for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
- offsetof(struct __sk_buff, napi_id)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid per-cpu cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid per-cpu cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "write tstamp from CGROUP_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=152 size=8",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "read tstamp from CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "multiple registers share map_lookup_elem result",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid memory access with multiple map_lookup_elem calls",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = REJECT,
- .errstr = "R4 !read_ok",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "valid indirect map_lookup_elem access with 2nd lookup in branch",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_2, 10),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid map access from else condition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "constant register |= constant should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 13),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 24),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid direct packet write for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "invalid direct packet write for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet write for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "direct packet read for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct packet read for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet read for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "overlapping checks for direct packet access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "make headroom for LWT_XMIT",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- /* split for s390 to succeed */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 42),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "invalid access of tc_classid for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "leak pointer into ctx 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr_unpriv = "R2 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R2 leaks addr into ctx",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "leak pointer into map val",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr_unpriv = "R6 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "helper access to map: full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=56",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: negative range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 unbounded memory access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=45",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map access: known scalar += value_ptr from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 tried to add from different maps",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 min value is outside of the array range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr from different maps, but same value properties",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += known scalar, 6",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: unknown scalar += value_ptr, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_1, 19),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 max value is outside of the array range",
- .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr += unknown scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += unknown scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 pointer += pointer prohibited",
- },
- {
- "map access: known scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= known scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "map access: value_ptr -= known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: unknown scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= unknown scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is negative",
- },
- {
- "map access: value_ptr -= unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .errstr_unpriv = "R0 pointer -= pointer prohibited",
- },
- {
- "map lookup helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 8 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map: wrong size",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .fixup_map_hash_16b = { 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=0 size=16",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- offsetof(struct other_val, bar)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- sizeof(struct other_val) - 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct other_val, bar)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- sizeof(struct other_val) - 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, -4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar) + 1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=9 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value or null is marked on register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value store of cleared call register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R1 !read_ok",
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "map element value with unaligned store",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value with unaligned load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 bitwise operator &= on pointer",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 32-bit pointer arithmetic prohibited",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 pointer arithmetic with /= operator",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 pointer arithmetic prohibited",
- .errstr = "invalid mem access 'inv'",
- .result = REJECT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_3, 4096),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 invalid mem access 'inv'",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND, zero included",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, bounds + offset",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- /* because max wasn't checked, signed min is negative */
- .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val), 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) + 1, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=49",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 20, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 19, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 0 /* csum_diff of 64-byte packet */,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes leak",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+32 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes no leak (init memory)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid and of negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid range check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
- BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map in map access",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .result = ACCEPT,
- },
- {
- "invalid inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 pointer arithmetic on map_ptr prohibited",
- .result = REJECT,
- },
- {
- "forgot null checking on the inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 type=map_value_or_null expected=map_ptr",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "ld_abs: tests on r6 and skb data reload helper",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42 /* ultimate return value */,
- },
- {
- "ld_ind: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check bpf_perf_event_data->sample_period byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 7),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 6),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period word load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 4),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period dword load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check skb->data half load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_classid half load not permitted for lwt prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "bounds checks mixing signed and unsigned, positive bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_6, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 min value is negative, either use unsigned",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 7",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 8",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 9",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 10",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 11",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- /* Dead branch. */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 12",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 13",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 14",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -7),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 15",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "subtraction bounds (map value) variant 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- },
- {
- "subtraction bounds (map value) variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "check subtraction on pointers for unpriv",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1, 9 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R9 pointer -= pointer prohibited",
- },
- {
- "bounds check based on zero-extended MOV",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0x0000'0000'ffff'ffff */
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check based on sign-extended MOV. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 4294967295",
- .result = REJECT
- },
- {
- "bounds check based on sign-extended MOV. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xfff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value_size=8 off=1073741825",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value 1073741823",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check after truncation of non-boundary-crossing range",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- /* r2 = 0x10'0000'0000 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
- /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = [0x00, 0xff] */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after truncation of boundary-crossing range (1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after truncation of boundary-crossing range (2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- * difference to previous test: truncation via MOV32
- * instead of ALU32.
- */
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after wrapping 32-bit addition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- /* r1 = 0x7fff'ffff */
- BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
- /* r1 = 0xffff'fffe */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after shift with oversized count operand",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- /* r1 = (u32)1 << (u32)32 = ? */
- BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x0000, 0xffff] */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check after right shift of maybe-negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- /* r1 = [-0x01, 0xfe] */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
- /* r1 = 0 or 0xff'ffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* r1 = 0 or 0xffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after 32-bit right shift with 64-bit input",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = 2 */
- BPF_MOV64_IMM(BPF_REG_1, 2),
- /* r1 = 1<<32 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
- /* r1 = 0 (NOT 2!) */
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
- /* r1 = 0xffff'fffe (NOT 0!) */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
- /* computes OOB pointer */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .result = REJECT,
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 2147483646",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset 1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset -1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1000000),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 1000000000000",
- .result = REJECT
- },
- {
- "pointer/scalar confusion in state equality check (way 1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "pointer/scalar confusion in state equality check (way 2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "variable-offset ctx access",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- /* add it to skb. We now have either &skb->len or
- * &skb->pkt_type, but we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "indirect variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it indirectly */
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .errstr = "variable stack read R2",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct stack access with 32-bit wraparound. test1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 2147483647",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 1073741823",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer offset 1073741822",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = REJECT
- },
- {
- "liveness pruning and write screening",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* branch conditions teach us nothing about R2 */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "varlen_map_value_access pruning",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid 64-bit BPF_END",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 0),
- {
- .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
- .dst_reg = BPF_REG_0,
- .src_reg = 0,
- .off = 0,
- .imm = 32,
- },
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode d7",
- .result = REJECT,
- },
- {
- "XDP, using ifindex from netdev",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, ingress_ifindex)),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .retval = 1,
- },
- {
- "meta access, test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet, off=-8",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_meta),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 !read_ok",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test10",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test11",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test12",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "arithmetic ops make PTR_TO_CTX unusable",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct __sk_buff, data) -
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "dereference of modified ctx ptr",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "pkt_end - pkt_start is allowed",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check deducing bounds from const, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check deducing bounds from const, 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- /* Marks reg as unknown. */
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
- },
- {
- "bpf_exit with invalid return code. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0xffffffff)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0x3)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x2; 0x0)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 is not a known value (ctx)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has unknown scalar value",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "calls: basic sanity",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: not on unpriviledged",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: div by 0 in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_2, 0),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: multiple ret types in subprog 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: multiple ret types in subprog 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 16 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "calls: overlapping caller/callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn is not an exit or jmp",
- .result = REJECT,
- },
- {
- "calls: wrong recursive calls",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: wrong src reg",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: wrong off value",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: jump back loop",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn 0 to 0",
- .result = REJECT,
- },
- {
- "calls: conditional call",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: conditional call 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -5),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 6",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: using r0 returned by callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: using uninit r0 from callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "calls: callee is using r1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- },
- {
- "calls: callee using args1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "calls: callee using wrong args2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "calls: callee using two args",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
- },
- {
- "calls: callee changing pkt pointers",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* clear_all_pkt_pointers() has to walk all frames
- * to make sure that pkt pointers in the caller
- * are cleared when callee is calling a helper that
- * adjusts packet size
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_head),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R6 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls with args",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN,
- },
- {
- "calls: calls with stack arith",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: calls with misaligned stack access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "calls: calls control flow, jump test",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 43,
- },
- {
- "calls: calls control flow, jump test 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "jump out of range from insn 1 to 4",
- .result = REJECT,
- },
- {
- "calls: two calls with bad jump",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range from insn 11 to 9",
- .result = REJECT,
- },
- {
- "calls: recursive call. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: recursive call. test2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: unreachable code",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "unreachable insn 6",
- .result = REJECT,
- },
- {
- "calls: invalid call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: invalid call 2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test2",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: call without exit",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: call into middle of ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: call into middle of other call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: ld_abs with changing ctx data in callee",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
- .result = REJECT,
- },
- {
- "calls: two calls with bad fallthrough",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: two calls with stack read",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: two calls with stack write",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
- /* write into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* read from stack frame of main prog */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: stack overflow using two frames (pre-call access)",
- .insns = {
- /* prog 1 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack overflow using two frames (post-call access)",
- .insns = {
- /* prog 1 */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test1",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=256, stack_B=64
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test2",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=64, stack_B=256
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test3",
- .insns = {
- /* main */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- /* B */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=64, stack_A=224, stack_B=256
- * and max(main+A, main+A+B) > 512
- */
- .errstr = "combined stack",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test4",
- /* void main(void) {
- * func1(0);
- * func1(1);
- * func2(1);
- * }
- * void func1(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * } else {
- * func2(alloc_or_recurse);
- * }
- * }
- * void func2(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * }
- * }
- */
- .insns = {
- /* main */
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = REJECT,
- .errstr = "combined stack",
- },
- {
- "calls: stack depth check using three frames. test5",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
- BPF_EXIT_INSN(),
- /* A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
- BPF_EXIT_INSN(),
- /* C */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
- BPF_EXIT_INSN(),
- /* D */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
- BPF_EXIT_INSN(),
- /* E */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
- BPF_EXIT_INSN(),
- /* F */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
- BPF_EXIT_INSN(),
- /* G */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
- BPF_EXIT_INSN(),
- /* H */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "call stack",
- .result = REJECT,
- },
- {
- "calls: spill into caller stack frame",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot spill",
- .result = REJECT,
- },
- {
- "calls: write into caller stack frame",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: write into callee stack frame",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot return stack pointer",
- .result = REJECT,
- },
- {
- "calls: two calls with stack write and void return",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* write into stack frame of main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(), /* void return */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: ambiguous return value",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "calls: two calls that return map_value",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
-
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with bool condition",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with incorrect bool check",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = REJECT,
- .errstr = "invalid read from stack off -16+0 size 8",
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0), // 26
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
- BPF_JMP_IMM(BPF_JA, 0, 0, -30),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -8),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 0 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: pkt_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* Marking is still kept, but not in all cases safe. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Marking is still kept and safe here. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Check marking propagated. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "same insn cannot be used with different",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: caller stack init to zero or map_value_or_null",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- /* fetch map_value_or_null or const_zero from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* store into map_value */
- BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* if (ctx == 0) return; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
- /* else bpf_map_lookup() and *(fp - 8) = r0 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 13 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: stack init to zero and pruning",
- .insns = {
- /* first make allocated_stack 16 byte */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- /* now fork the execution such that the false branch
- * of JGT insn will be verified second and it skisp zero
- * init of fp-8 stack slot. If stack liveness marking
- * is missing live_read marks from call map_lookup
- * processing then pruning will incorrectly assume
- * that fp-8 stack slot was unused in the fall-through
- * branch and will accept the program incorrectly
- */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 6 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, array)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_48b = { 13 },
- .fixup_map_array_48b = { 16 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, map in map)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_in_map = { 16 },
- .fixup_map_array_48b = { 13 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'map_ptr'",
- },
- {
- "cond: two branches returning different map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 5 },
- .fixup_prog2 = { 2 },
- .result_unpriv = REJECT,
- .errstr_unpriv = "tail_call abusing map_ptr",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "cond: two branches returning same map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 2, 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "search pruning: all branches should be verified (nop operation)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_A(1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R6 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "search pruning: all branches should be verified (invalid stack access)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_JMP_A(1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid read from stack off -16+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "jit: lsh, rsh, arsh by 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 0xff),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
- BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
- BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: various mul tests",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
- BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "xadd/w check unaligned stack",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned map",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = REJECT,
- .errstr = "misaligned value access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned pkt",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 99),
- BPF_JMP_IMM(BPF_JA, 0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "BPF_XADD stores into R2 pkt is not allowed",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "xadd/w check whether src/dst got mangled, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "xadd/w check whether src/dst got mangled, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "bpf_get_stack return R0 within range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_4, 256),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "ld_abs: invalid op 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_DW, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: invalid op 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: nmap reduced",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
- BPF_MOV32_IMM(BPF_REG_0, 18),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
- BPF_LD_IND(BPF_W, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
- BPF_MOV32_IMM(BPF_REG_0, 280971478),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
- BPF_MOV32_IMM(BPF_REG_0, 22),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LD_IND(BPF_H, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
- BPF_MOV32_IMM(BPF_REG_0, 17366),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 256,
- },
- {
- "ld_abs: div + abs, test 1",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_abs: div + abs, test 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 128),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 256),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: vlan + abs, test 1",
- .insns = { },
- .data = {
- 0x34,
- },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0xbef,
- },
- {
- "ld_abs: vlan + abs, test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0x34,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "ld_abs: jump around ld_abs",
- .insns = { },
- .data = {
- 10, 11,
- },
- .fill_helper = bpf_fill_jump_around_ld_abs,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 1",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 4090,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 2",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2047,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 3",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 511,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 4",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 5,
- },
- {
- "pass unmodified ctx pointer to helper",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: zero potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: copy and zero potential references",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: release reference without check",
- .insns = {
- BPF_SK_LOOKUP,
- /* reference in r0 may be NULL */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=sock_or_null expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference twice",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference twice inside branch",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: alloc, check, free in one subbranch",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
- /* Leak reference in R0 */
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking: alloc, check, free in both subbranches",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking in call: free reference in subprog",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "pass modified ctx pointer to helper, 1",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 2",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_socket_cookie),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr_unpriv = "dereference of modified ctx ptr",
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- },
- {
- "mov64 src == dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "mov64 src != dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "allocated_stack",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
- BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
- BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- .insn_processed = 15,
- },
- {
- "masking, test out of bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 11",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 12",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 4),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 4,
- },
- {
- "masking, test in bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfffffffe,
- },
- {
- "masking, test in bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
- BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xabcde,
- },
- {
- "masking, test in bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 46),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -46),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -47),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "reference tracking in call: free reference in subprog and outside",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc & leak reference in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
- BPF_SK_LOOKUP,
- /* spill unchecked sk_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc in subprog, release outside",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(), /* return sk */
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .result = ACCEPT,
- },
- {
- "reference tracking in call: sk_ptr leak into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: sk_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* now the sk_ptr is verified, free the reference */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: allow LD_ABS",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: forbid LD_ABS while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: allow LD_IND",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "reference tracking: forbid LD_IND while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: check reference or tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference then tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 18 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak possible reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- /* bpf_tail_call() */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 16 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: leak checked reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- /* if (!sk) goto end */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock_or_null",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: access member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: write to member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LD_IMM64(BPF_REG_2, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
- offsetof(struct bpf_sock, mark)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "cannot write into socket",
- .result = REJECT,
- },
- {
- "reference tracking: invalid 64-bit access of member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_sock access off=0 size=8",
- .result = REJECT,
- },
- {
- "reference tracking: access after release",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "reference tracking: direct access for lookup",
- .insns = {
- /* Check that the packet is at least 64B long */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "calls: ctx read at start of subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "check wire_len is not readable by sockets",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check wire_len is readable by tc classifier",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "check wire_len is not writable by tc classifier",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "calls: cross frame pruning",
- .insns = {
- /* r8 = !!random();
- * call pruner()
- * if (r8)
- * do something bad;
- */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: functional",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- /* reg, bit 63 or bit 0 set, taken */
- BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_EXIT_INSN(),
-
- /* reg, bit 62, not taken */
- BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
-
- /* imm, any bit set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
- BPF_EXIT_INSN(),
-
- /* imm, bit 31 set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
-
- /* all good - return r0 == 2 */
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .runs = 7,
- .retvals = {
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (__u32)-1, }
- },
- { .retval = 2,
- .data64 = { ~0x4000000000000000ULL, }
- },
- { .retval = 0,
- .data64 = { 0, }
- },
- { .retval = 0,
- .data64 = { ~0ULL, }
- },
- },
- },
- {
- "jset: sign-extend",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
+/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
+ * value into 0 and does necessary preparation for direct packet access
+ * through r2. The allowed access range is 8 bytes.
+ */
+#define BPF_DIRECT_PKT_R2 \
+ BPF_MOV64_IMM(BPF_REG_0, 0), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
+ offsetof(struct __sk_buff, data)), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
+ offsetof(struct __sk_buff, data_end)), \
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
+ BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
+ BPF_EXIT_INSN()
+
+/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
+ * positive u32, and zero-extend it into 64-bit.
+ */
+#define BPF_RAND_UEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
+
+/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
+ * negative u32, and sign-extend it into 64-bit.
+ */
+#define BPF_RAND_SEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
- },
- {
- "jset: known const compare",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .retval_unpriv = 1,
- .result_unpriv = ACCEPT,
- .retval = 1,
- .result = ACCEPT,
- },
- {
- "jset: known const compare bad",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare not taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: half-known const compare",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
- {
- "jset: range",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
+static struct bpf_test tests[] = {
+#define FILL_ARRAY
+#include <verifier/tests.h>
+#undef FILL_ARRAY
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -15491,6 +267,16 @@ static int probe_filter_length(const struct bpf_insn *fp)
return len + 1;
}
+static bool skip_unsupported_map(enum bpf_map_type map_type)
+{
+ if (!bpf_probe_map_type(map_type, 0)) {
+ printf("SKIP (unsupported map type %d)\n", map_type);
+ skips++;
+ return true;
+ }
+ return false;
+}
+
static int create_map(uint32_t type, uint32_t size_key,
uint32_t size_value, uint32_t max_elem)
{
@@ -15498,8 +284,11 @@ static int create_map(uint32_t type, uint32_t size_key,
fd = bpf_create_map(type, size_key, size_value, max_elem,
type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
- if (fd < 0)
+ if (fd < 0) {
+ if (skip_unsupported_map(type))
+ return -1;
printf("Failed to create hash map '%s'!\n", strerror(errno));
+ }
return fd;
}
@@ -15549,6 +338,8 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), max_elem, 0);
if (mfd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
+ return -1;
printf("Failed to create prog array '%s'!\n", strerror(errno));
return -1;
}
@@ -15579,15 +370,20 @@ static int create_map_in_map(void)
inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(int), 1, 0);
if (inner_map_fd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
+ return -1;
printf("Failed to create array '%s'!\n", strerror(errno));
return inner_map_fd;
}
outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
sizeof(int), inner_map_fd, 1, 0);
- if (outer_map_fd < 0)
+ if (outer_map_fd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
+ return -1;
printf("Failed to create array of maps '%s'!\n",
strerror(errno));
+ }
close(inner_map_fd);
@@ -15602,10 +398,105 @@ static int create_cgroup_storage(bool percpu)
fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
TEST_DATA_LEN, 0, 0);
- if (fd < 0)
+ if (fd < 0) {
+ if (skip_unsupported_map(type))
+ return -1;
printf("Failed to create cgroup storage '%s'!\n",
strerror(errno));
+ }
+
+ return fd;
+}
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_TYPE_ENC(name, info, size_or_type) \
+ (name), (info), (size_or_type)
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+#define BTF_MEMBER_ENC(name, type, bits_offset) \
+ (name), (type), (bits_offset)
+
+struct btf_raw_data {
+ __u32 raw_types[64];
+ const char *str_sec;
+ __u32 str_sec_size;
+};
+
+/* struct bpf_spin_lock {
+ * int val;
+ * };
+ * struct val {
+ * int cnt;
+ * struct bpf_spin_lock l;
+ * };
+ */
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
+static __u32 btf_raw_types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* struct bpf_spin_lock */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
+ BTF_MEMBER_ENC(15, 1, 0), /* int val; */
+ /* struct val */ /* [3] */
+ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
+ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
+ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+};
+
+static int load_btf(void)
+{
+ struct btf_header hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(btf_raw_types),
+ .str_off = sizeof(btf_raw_types),
+ .str_len = sizeof(btf_str_sec),
+ };
+ void *ptr, *raw_btf;
+ int btf_fd;
+
+ ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
+ sizeof(btf_str_sec));
+
+ memcpy(ptr, &hdr, sizeof(hdr));
+ ptr += sizeof(hdr);
+ memcpy(ptr, btf_raw_types, hdr.type_len);
+ ptr += hdr.type_len;
+ memcpy(ptr, btf_str_sec, hdr.str_len);
+ ptr += hdr.str_len;
+
+ btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
+ free(raw_btf);
+ if (btf_fd < 0)
+ return -1;
+ return btf_fd;
+}
+
+static int create_map_spin_lock(void)
+{
+ struct bpf_create_map_attr attr = {
+ .name = "test_map",
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .key_size = 4,
+ .value_size = 8,
+ .max_entries = 1,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 3,
+ };
+ int fd, btf_fd;
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+ attr.btf_fd = btf_fd;
+ fd = bpf_create_map_xattr(&attr);
+ if (fd < 0)
+ printf("Failed to create map with spin_lock\n");
return fd;
}
@@ -15627,6 +518,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_in_map = test->fixup_map_in_map;
int *fixup_cgroup_storage = test->fixup_cgroup_storage;
int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
+ int *fixup_map_spin_lock = test->fixup_map_spin_lock;
if (test->fill_helper)
test->fill_helper(test);
@@ -15743,6 +635,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_stacktrace++;
} while (*fixup_map_stacktrace);
}
+ if (*fixup_map_spin_lock) {
+ map_fds[13] = create_map_spin_lock();
+ do {
+ prog[*fixup_map_spin_lock].imm = map_fds[13];
+ fixup_map_spin_lock++;
+ } while (*fixup_map_spin_lock);
+ }
}
static int set_admin(bool admin)
@@ -15808,6 +707,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
+ int fixup_skips;
__u32 pflags;
int i, err;
@@ -15816,7 +716,13 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (!prog_type)
prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ fixup_skips = skips;
do_test_fixup(test, prog_type, prog, map_fds);
+ /* If there were some map skips during fixup due to missing bpf
+ * features, skip this test.
+ */
+ if (fixup_skips != skips)
+ return;
prog_len = probe_filter_length(prog);
pflags = 0;
@@ -15826,6 +732,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
pflags |= BPF_F_ANY_ALIGNMENT;
fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
+ if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
+ printf("SKIP (unsupported program type %d)\n", prog_type);
+ skips++;
+ goto close_fds;
+ }
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
@@ -15979,7 +890,7 @@ static bool test_as_unpriv(struct bpf_test *test)
static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
- int i, passes = 0, errors = 0, skips = 0;
+ int i, passes = 0, errors = 0;
for (i = from; i < to; i++) {
struct bpf_test *test = &tests[i];
diff --git a/tools/testing/selftests/bpf/verifier/.gitignore b/tools/testing/selftests/bpf/verifier/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
new file mode 100644
index 000000000000..e0fad1548737
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -0,0 +1,50 @@
+{
+ "invalid and of negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid range check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
+ BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
new file mode 100644
index 000000000000..0dcecaf3ec6f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/array_access.c
@@ -0,0 +1,219 @@
+{
+ "valid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "valid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a signed variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+ .result = REJECT,
+},
+{
+ "invalid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with no floor check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3, 11 },
+ .errstr = "R0 pointer += pointer",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic.c b/tools/testing/selftests/bpf/verifier/basic.c
new file mode 100644
index 000000000000..b8d18642653a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic.c
@@ -0,0 +1,23 @@
+{
+ "empty prog",
+ .insns = {
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "only exit insn",
+ .insns = {
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "no bpf_exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
+ },
+ .errstr = "not an exit",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_call.c b/tools/testing/selftests/bpf/verifier/basic_call.c
new file mode 100644
index 000000000000..a8c6ab4c1622
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_call.c
@@ -0,0 +1,50 @@
+{
+ "invalid call insn1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 8d",
+ .result = REJECT,
+},
+{
+ "invalid call insn2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_CALL uses reserved",
+ .result = REJECT,
+},
+{
+ "invalid function call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid func unknown#1234567",
+ .result = REJECT,
+},
+{
+ "invalid argument register",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "non-invalid argument register",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c
new file mode 100644
index 000000000000..ed91a7b9a456
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_instr.c
@@ -0,0 +1,134 @@
+{
+ "add+sub+mul",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 3),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "xor32 zero extend check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_2, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
+ BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "arsh32 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on imm 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -16069393,
+},
+{
+ "arsh32 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on reg 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
+ BPF_MOV64_IMM(BPF_REG_1, 15),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 43724,
+},
+{
+ "arsh64 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "arsh64 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "invalid 64-bit BPF_END",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ {
+ .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
+ .dst_reg = BPF_REG_0,
+ .src_reg = 0,
+ .off = 0,
+ .imm = 32,
+ },
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode d7",
+ .result = REJECT,
+},
+{
+ "mov64 src == dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "mov64 src != dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stack.c b/tools/testing/selftests/bpf/verifier/basic_stack.c
new file mode 100644
index 000000000000..b56f8117c09d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stack.c
@@ -0,0 +1,64 @@
+{
+ "stack out of bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr = "invalid indirect read from stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid read from stack",
+ .result = REJECT,
+},
+{
+ "invalid fp arithmetic",
+ /* If this gets ever changed, make sure JITs can deal with it. */
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 subtraction from stack pointer",
+ .result = REJECT,
+},
+{
+ "non-invalid fp arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "misaligned read from stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
new file mode 100644
index 000000000000..7a0aab3f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
@@ -0,0 +1,45 @@
+{
+ "invalid src register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in ST",
+ .insns = {
+ BPF_ST_MEM(BPF_B, 14, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid src register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R12 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R11 is invalid",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
new file mode 100644
index 000000000000..d55f476f2237
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -0,0 +1,508 @@
+{
+ "subtraction bounds (map value) variant 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+},
+{
+ "subtraction bounds (map value) variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "check subtraction on pointers for unpriv",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1, 9 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R9 pointer -= pointer prohibited",
+},
+{
+ "bounds check based on zero-extended MOV",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0x0000'0000'ffff'ffff */
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check based on sign-extended MOV. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 4294967295",
+ .result = REJECT
+},
+{
+ "bounds check based on sign-extended MOV. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xfff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value_size=8 off=1073741825",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value 1073741823",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check after truncation of non-boundary-crossing range",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ /* r2 = 0x10'0000'0000 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+ /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = [0x00, 0xff] */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after truncation of boundary-crossing range (1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after truncation of boundary-crossing range (2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ * difference to previous test: truncation via MOV32
+ * instead of ALU32.
+ */
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after wrapping 32-bit addition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ /* r1 = 0x7fff'ffff */
+ BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+ /* r1 = 0xffff'fffe */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after shift with oversized count operand",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ /* r1 = (u32)1 << (u32)32 = ? */
+ BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x0000, 0xffff] */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check after right shift of maybe-negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ /* r1 = [-0x01, 0xfe] */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+ /* r1 = 0 or 0xff'ffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* r1 = 0 or 0xffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after 32-bit right shift with 64-bit input",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = 2 */
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ /* r1 = 1<<32 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
+ /* r1 = 0 (NOT 2!) */
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
+ /* r1 = 0xffff'fffe (NOT 0!) */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
+ /* computes OOB pointer */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .result = REJECT,
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 2147483646",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset 1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset -1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1000000),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 1000000000000",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
new file mode 100644
index 000000000000..1fd07a4f27ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
@@ -0,0 +1,124 @@
+{
+ "check deducing bounds from const, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "check deducing bounds from const, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check deducing bounds from const, 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ /* Marks reg as unknown. */
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
new file mode 100644
index 000000000000..9baca7a75c42
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
@@ -0,0 +1,406 @@
+{
+ "bounds checks mixing signed and unsigned, positive bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_6, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 min value is negative, either use unsigned",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 7",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 8",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 9",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 10",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 11",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ /* Dead branch. */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 12",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 13",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 14",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 15",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
new file mode 100644
index 000000000000..f24d50f09dbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
@@ -0,0 +1,44 @@
+{
+ "bpf_get_stack return R0 within range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_4, 256),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
+ BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
new file mode 100644
index 000000000000..4004891afa9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -0,0 +1,1942 @@
+{
+ "calls: basic sanity",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: not on unpriviledged",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: div by 0 in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_2, 0),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: multiple ret types in subprog 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: multiple ret types in subprog 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 16 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "calls: overlapping caller/callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn is not an exit or jmp",
+ .result = REJECT,
+},
+{
+ "calls: wrong recursive calls",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: wrong src reg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: wrong off value",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: jump back loop",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn 0 to 0",
+ .result = REJECT,
+},
+{
+ "calls: conditional call",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 6",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: using r0 returned by callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: using uninit r0 from callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee is using r1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+},
+{
+ "calls: callee using args1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "calls: callee using wrong args2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee using two args",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
+},
+{
+ "calls: callee changing pkt pointers",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* clear_all_pkt_pointers() has to walk all frames
+ * to make sure that pkt pointers in the caller
+ * are cleared when callee is calling a helper that
+ * adjusts packet size
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R6 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls with args",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN,
+},
+{
+ "calls: calls with stack arith",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: calls with misaligned stack access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
+{
+ "calls: calls control flow, jump test",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 43,
+},
+{
+ "calls: calls control flow, jump test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "jump out of range from insn 1 to 4",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad jump",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range from insn 11 to 9",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: unreachable code",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "unreachable insn 6",
+ .result = REJECT,
+},
+{
+ "calls: invalid call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: invalid call 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: call without exit",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of other call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: ld_abs with changing ctx data in callee",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad fallthrough",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack read",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: two calls with stack write",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ /* write into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* read from stack frame of main prog */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: stack overflow using two frames (pre-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack overflow using two frames (post-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test1",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=256, stack_B=64
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test2",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=64, stack_B=256
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test3",
+ .insns = {
+ /* main */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ /* B */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=64, stack_A=224, stack_B=256
+ * and max(main+A, main+A+B) > 512
+ */
+ .errstr = "combined stack",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test4",
+ /* void main(void) {
+ * func1(0);
+ * func1(1);
+ * func2(1);
+ * }
+ * void func1(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * } else {
+ * func2(alloc_or_recurse);
+ * }
+ * }
+ * void func2(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * }
+ * }
+ */
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "combined stack",
+},
+{
+ "calls: stack depth check using three frames. test5",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+},
+{
+ "calls: spill into caller stack frame",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot spill",
+ .result = REJECT,
+},
+{
+ "calls: write into caller stack frame",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: write into callee stack frame",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot return stack pointer",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack write and void return",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* write into stack frame of main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(), /* void return */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: ambiguous return value",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: two calls that return map_value",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with bool condition",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with incorrect bool check",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -16+0 size 8",
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0), // 26
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+ BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 0 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: pkt_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* Marking is still kept, but not in all cases safe. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Marking is still kept and safe here. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Check marking propagated. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "same insn cannot be used with different",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: caller stack init to zero or map_value_or_null",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ /* fetch map_value_or_null or const_zero from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* store into map_value */
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* if (ctx == 0) return; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ /* else bpf_map_lookup() and *(fp - 8) = r0 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 13 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: stack init to zero and pruning",
+ .insns = {
+ /* first make allocated_stack 16 byte */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ /* now fork the execution such that the false branch
+ * of JGT insn will be verified second and it skisp zero
+ * init of fp-8 stack slot. If stack liveness marking
+ * is missing live_read marks from call map_lookup
+ * processing then pruning will incorrectly assume
+ * that fp-8 stack slot was unused in the fall-through
+ * branch and will accept the program incorrectly
+ */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 6 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: ctx read at start of subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "calls: cross frame pruning",
+ .insns = {
+ /* r8 = !!random();
+ * call pruner()
+ * if (r8)
+ * do something bad;
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c
new file mode 100644
index 000000000000..349c0862fb4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cfg.c
@@ -0,0 +1,70 @@
+{
+ "unreachable",
+ .insns = {
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "unreachable2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "out of range jump",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "out of range jump2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "loop (back-edge)",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "loop2 (back-edge)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "conditional loop",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
new file mode 100644
index 000000000000..6d65fe3e7321
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
@@ -0,0 +1,72 @@
+{
+ "bpf_exit with invalid return code. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0xffffffff)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0x3)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x2; 0x0)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 is not a known value (ctx)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has unknown scalar value",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_skb.c b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
new file mode 100644
index 000000000000..52e4c03b076b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
@@ -0,0 +1,197 @@
+{
+ "direct packet read test#1 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=76 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#2 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_proto)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, priority)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, priority)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, ingress_ifindex)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#3 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#4 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of tc_classid for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of data_meta for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_meta)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of flow_keys for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, flow_keys)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid write access to napi_id for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=152 size=8",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_storage.c b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
new file mode 100644
index 000000000000..97057c0a1b8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
@@ -0,0 +1,220 @@
+{
+ "valid cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "valid per-cpu cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid per-cpu cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/const_or.c b/tools/testing/selftests/bpf/verifier/const_or.c
new file mode 100644
index 000000000000..84446dfc7c1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/const_or.c
@@ -0,0 +1,60 @@
+{
+ "constant register |= constant should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 13),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 24),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
new file mode 100644
index 000000000000..92762c08f5e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -0,0 +1,93 @@
+{
+ "context stores via ST",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ST stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "context stores via XADD",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+ BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "arithmetic ops make PTR_TO_CTX unusable",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct __sk_buff, data) -
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "pass unmodified ctx pointer to helper",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "pass modified ctx pointer to helper, 1",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr_unpriv = "dereference of modified ctx ptr",
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
new file mode 100644
index 000000000000..c6c69220a569
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
@@ -0,0 +1,181 @@
+{
+ "valid access family in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access local_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid 64B read of size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid read past end of SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid read offset in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, family) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet read for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "direct packet write for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "overlapping checks for direct packet access SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
new file mode 100644
index 000000000000..c660deb582f1
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -0,0 +1,1034 @@
+{
+ "access skb fields ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "access skb fields bad1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "access skb fields bad2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad3",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -12),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -13),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "valid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "invalid access of tc_classid for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of skb->mark for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->mark is not writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_index is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->priority is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, priority)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet read for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet write for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "overlapping checks for direct packet access SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->mark is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check skb->tc_index is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check cb access: byte",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "__sk_buff->hash, offset 0, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "__sk_buff->tc_index, offset 3, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: byte, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: half",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: half, unaligned",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check __sk_buff->hash, offset 0, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->tc_index, offset 2, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load not permitted, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check skb->hash half load not permitted, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check cb access: half, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: word",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: word, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: double, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check out of range skb->cb access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 256),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+},
+{
+ "write skb fields from socket prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+},
+{
+ "write skb fields from tc_cls_act prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check skb->data half load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "read gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access off=164 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_segs from CLS",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check wire_len is not readable by sockets",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check wire_len is readable by tc classifier",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "check wire_len is not writable by tc classifier",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
new file mode 100644
index 000000000000..50a8a63be4ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -0,0 +1,159 @@
+{
+ "dead code: start",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "dead code: end 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + two functions",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: function in the middle and mid of another func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: middle of main before call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "dead code: start of a function",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
new file mode 100644
index 000000000000..e3fc22e672c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -0,0 +1,633 @@
+{
+ "pkt_end - pkt_start is allowed",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access off=76",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+},
+{
+ "direct packet access: test4 (write)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test5 (pkt_end >= reg, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test6 (pkt_end >= reg, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test7 (pkt_end >= reg, both accesses)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test8 (double test, variant 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test9 (double test, variant 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test10 (write invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test11 (shift, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test12 (and, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test13 (branches, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 14),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_IMM(BPF_REG_5, 12),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test15 (spill with xadd)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 4096),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test16 (arith on data_end)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test17 (pruning, alignment)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_A(-6),
+ },
+ .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "direct packet access: test18 (imm += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 8),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test19 (imm += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test20 (x += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test21 (x += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test22 (x += pkt_ptr, 3)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test23 (x += pkt_ptr, 4)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test24 (x += pkt_ptr, 5)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 64),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test25 (marking on <, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test26 (marking on <, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test27 (marking on <=, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test28 (marking on <=, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
new file mode 100644
index 000000000000..698e3779fdd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
@@ -0,0 +1,40 @@
+{
+ "direct stack access with 32-bit wraparound. test1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 2147483647",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 1073741823",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer offset 1073741822",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/div0.c b/tools/testing/selftests/bpf/verifier/div0.c
new file mode 100644
index 000000000000..7685edfbcf71
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div0.c
@@ -0,0 +1,184 @@
+{
+ "DIV32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_MOV32_IMM(BPF_REG_2, 16),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 8,
+},
+{
+ "DIV32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 by 0, zero check, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 3),
+ BPF_MOV32_IMM(BPF_REG_2, 5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD64 by 0, zero check 2, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, -1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = -1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c
new file mode 100644
index 000000000000..bd3f38dbe796
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div_overflow.c
@@ -0,0 +1,104 @@
+/* Just make sure that JITs used udiv/umod as otherwise we get
+ * an exception from INT_MIN/-1 overflow similarly as with div
+ * by zero.
+ */
+{
+ "DIV32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
new file mode 100644
index 000000000000..1f39d845c64f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
@@ -0,0 +1,614 @@
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND, zero included",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, bounds + offset",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ /* because max wasn't checked, signed min is negative */
+ .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=49",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 0 /* csum_diff of 64-byte packet */,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes leak",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+32 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes no leak (init memory)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_packet_access.c b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
new file mode 100644
index 000000000000..ae54587e9829
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
@@ -0,0 +1,460 @@
+{
+ "helper access to packet: test1, valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test2, unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test3, variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test4, packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test5, packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test6, cls valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test7, cls unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test8, cls variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test9, cls packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test10, cls packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test11, cls unsuitable helper 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 42),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test12, cls unsuitable helper 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test13, cls helper ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test14, cls helper ok sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test15, cls helper fail sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test16, cls helper fail range 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test17, cls helper fail range 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, -9),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test18, cls helper fail range 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, ~0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test19, cls helper range zero",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test20, pkt end as input",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=pkt_end expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test21, wrong reg",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_value_access.c b/tools/testing/selftests/bpf/verifier/helper_value_access.c
new file mode 100644
index 000000000000..7572e403ddb9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_value_access.c
@@ -0,0 +1,953 @@
+{
+ "helper access to map: full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=56",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: negative range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 unbounded memory access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=45",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map lookup helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 8 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map: wrong size",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_16b = { 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=0 size=16",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, offsetof(struct other_val, bar)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct other_val) - 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct other_val, bar)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct other_val) - 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, -4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar) + 1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=9 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c
new file mode 100644
index 000000000000..be488b4495a3
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jit.c
@@ -0,0 +1,88 @@
+{
+ "jit: lsh, rsh, arsh by 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 0xff),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: various mul tests",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
new file mode 100644
index 000000000000..f0961c58581e
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -0,0 +1,746 @@
+{
+ "jset32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ /* reg, high bits shouldn't be tested */
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jeq32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jeq32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001),
+ BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jeq32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jne32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jne32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jne32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 2, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jlt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jlt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { (INT_MAX - 1) | 3ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jlt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 0,
+ .data64 = { 0x1ffffffffULL, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jslt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { 0xffffffff, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
new file mode 100644
index 000000000000..8dcd4e0383d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -0,0 +1,167 @@
+{
+ "jset: functional",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ /* reg, bit 63 or bit 0 set, taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+
+ /* reg, bit 62, not taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, any bit set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, bit 31 set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ /* all good - return r0 == 2 */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 7,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 2,
+ .data64 = { ~0x4000000000000000ULL, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ { .retval = 0,
+ .data64 = { ~0ULL, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset: sign-extend",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset: known const compare",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .retval_unpriv = 1,
+ .result_unpriv = ACCEPT,
+ .retval = 1,
+ .result = ACCEPT,
+},
+{
+ "jset: known const compare bad",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare not taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: half-known const compare",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
+{
+ "jset: range",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jump.c b/tools/testing/selftests/bpf/verifier/jump.c
new file mode 100644
index 000000000000..8e6fcc8940f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jump.c
@@ -0,0 +1,180 @@
+{
+ "jump test 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 14),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 19),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 15),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 24 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = -ENOENT,
+},
+{
+ "jump test 4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/junk_insn.c b/tools/testing/selftests/bpf/verifier/junk_insn.c
new file mode 100644
index 000000000000..89d690f1992a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/junk_insn.c
@@ -0,0 +1,45 @@
+{
+ "junk insn",
+ .insns = {
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "junk insn2",
+ .insns = {
+ BPF_RAW_INSN(1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_LDX uses reserved fields",
+ .result = REJECT,
+},
+{
+ "junk insn3",
+ .insns = {
+ BPF_RAW_INSN(-1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn4",
+ .insns = {
+ BPF_RAW_INSN(-1, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn5",
+ .insns = {
+ BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ALU uses reserved fields",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_abs.c b/tools/testing/selftests/bpf/verifier/ld_abs.c
new file mode 100644
index 000000000000..f6599d2ec22d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_abs.c
@@ -0,0 +1,286 @@
+{
+ "ld_abs: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "ld_abs: tests on r6 and skb data reload helper",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42 /* ultimate return value */,
+},
+{
+ "ld_abs: invalid op 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_DW, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: invalid op 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: nmap reduced",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
+ BPF_MOV32_IMM(BPF_REG_0, 18),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
+ BPF_LD_IND(BPF_W, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
+ BPF_MOV32_IMM(BPF_REG_0, 280971478),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
+ BPF_MOV32_IMM(BPF_REG_0, 22),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LD_IND(BPF_H, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
+ BPF_MOV32_IMM(BPF_REG_0, 17366),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 256,
+},
+{
+ "ld_abs: div + abs, test 1",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
+{
+ "ld_abs: div + abs, test 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 128),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 256),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: vlan + abs, test 1",
+ .insns = { },
+ .data = {
+ 0x34,
+ },
+ .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0xbef,
+},
+{
+ "ld_abs: vlan + abs, test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0x34,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "ld_abs: jump around ld_abs",
+ .insns = { },
+ .data = {
+ 10, 11,
+ },
+ .fill_helper = bpf_fill_jump_around_ld_abs,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_dw.c b/tools/testing/selftests/bpf/verifier/ld_dw.c
new file mode 100644
index 000000000000..d2c75b889598
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_dw.c
@@ -0,0 +1,36 @@
+{
+ "ld_dw: xor semi-random 64 bit imms, test 1",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 4090,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 2",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2047,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 3",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 511,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 4",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 5,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
new file mode 100644
index 000000000000..28b8c805a293
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
@@ -0,0 +1,141 @@
+{
+ "test1 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test2 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test3 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test4 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test5 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test6 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "test7 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "test8 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "uses reserved fields",
+ .result = REJECT,
+},
+{
+ "test9 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test10 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test11 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test12 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "test13 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_ind.c b/tools/testing/selftests/bpf/verifier/ld_ind.c
new file mode 100644
index 000000000000..079734227538
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_ind.c
@@ -0,0 +1,72 @@
+{
+ "ld_ind: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/leak_ptr.c b/tools/testing/selftests/bpf/verifier/leak_ptr.c
new file mode 100644
index 000000000000..d6eec17f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/leak_ptr.c
@@ -0,0 +1,67 @@
+{
+ "leak pointer into ctx 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr_unpriv = "R2 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R2 leaks addr into ctx",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "leak pointer into map val",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr_unpriv = "R6 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c
new file mode 100644
index 000000000000..2cab6a3966bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/lwt.c
@@ -0,0 +1,189 @@
+{
+ "invalid direct packet write for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "invalid direct packet write for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet write for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "direct packet read for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "direct packet read for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet read for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "overlapping checks for direct packet access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "make headroom for LWT_XMIT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ /* split for s390 to succeed */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "invalid access of tc_classid for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_classid half load not permitted for lwt prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c
new file mode 100644
index 000000000000..2798927ee9ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_in_map.c
@@ -0,0 +1,62 @@
+{
+ "map in map access",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .result = ACCEPT,
+},
+{
+ "invalid inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 pointer arithmetic on map_ptr prohibited",
+ .result = REJECT,
+},
+{
+ "forgot null checking on the inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 type=map_value_or_null expected=map_ptr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
new file mode 100644
index 000000000000..cd26ee6b7b1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
@@ -0,0 +1,100 @@
+{
+ "calls: two calls returning different map pointers for lookup (hash, array)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 13 },
+ .fixup_map_array_48b = { 16 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: two calls returning different map pointers for lookup (hash, map in map)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_in_map = { 16 },
+ .fixup_map_array_48b = { 13 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'map_ptr'",
+},
+{
+ "cond: two branches returning different map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 5 },
+ .fixup_prog2 = { 2 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "tail_call abusing map_ptr",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "cond: two branches returning same map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 2, 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .retval = 42,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ret_val.c b/tools/testing/selftests/bpf/verifier/map_ret_val.c
new file mode 100644
index 000000000000..bdd0e8d18333
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ret_val.c
@@ -0,0 +1,65 @@
+{
+ "invalid map_fd for function call",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fd 0 is not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "don't check return value before access",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access 'map_value_or_null'",
+ .result = REJECT,
+},
+{
+ "access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "misaligned value access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "sometimes access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .errstr_unpriv = "R0 leaks addr",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/masking.c b/tools/testing/selftests/bpf/verifier/masking.c
new file mode 100644
index 000000000000..6e1358c544fd
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/masking.c
@@ -0,0 +1,322 @@
+{
+ "masking, test out of bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 11",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 12",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 4),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 4,
+},
+{
+ "masking, test in bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfffffffe,
+},
+{
+ "masking, test in bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
+ BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xabcde,
+},
+{
+ "masking, test in bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 46),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -46),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -47),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
diff --git a/tools/testing/selftests/bpf/verifier/meta_access.c b/tools/testing/selftests/bpf/verifier/meta_access.c
new file mode 100644
index 000000000000..205292b8dd65
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/meta_access.c
@@ -0,0 +1,235 @@
+{
+ "meta access, test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=-8",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 !read_ok",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test10",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test11",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test12",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
new file mode 100644
index 000000000000..471c1a5950d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
@@ -0,0 +1,59 @@
+{
+ "check bpf_perf_event_data->sample_period byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 7),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 6),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period word load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 4),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period dword load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
new file mode 100644
index 000000000000..bbdba990fefb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
@@ -0,0 +1,74 @@
+{
+ "prevent map lookup in sockmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in sockhash",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockhash = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in xskmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_xskmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "prevent map lookup in stack trace",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_stacktrace = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "prevent map lookup in prog array",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
+},
diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c
new file mode 100644
index 000000000000..193d9e87d5a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/raw_stack.c
@@ -0,0 +1,305 @@
+{
+ "raw_stack: no skb_load_bytes",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ /* Call to skb_load_bytes() omitted. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -8+0 size 8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, ~0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, zero len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, no init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs around bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs + data",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-513 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-1 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-512 access_size=0",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, large access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 512),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
new file mode 100644
index 000000000000..3ed3593bd8b6
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -0,0 +1,607 @@
+{
+ "reference tracking: leak potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: zero potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: copy and zero potential references",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference without check",
+ .insns = {
+ BPF_SK_LOOKUP,
+ /* reference in r0 may be NULL */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=sock_or_null expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference twice",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference twice inside branch",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: alloc, check, free in one subbranch",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
+ /* Leak reference in R0 */
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking: alloc, check, free in both subbranches",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking in call: free reference in subprog",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: free reference in subprog and outside",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc & leak reference in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
+ BPF_SK_LOOKUP,
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc in subprog, release outside",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(), /* return sk */
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: sk_ptr leak into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: sk_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* now the sk_ptr is verified, free the reference */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: allow LD_ABS",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: forbid LD_ABS while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: allow LD_IND",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "reference tracking: forbid LD_IND while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: check reference or tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference then tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 18 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: leak possible reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ /* bpf_tail_call() */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 16 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak checked reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* if (!sk) goto end */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock_or_null",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: access member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: write to member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LD_IMM64(BPF_REG_2, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
+ offsetof(struct bpf_sock, mark)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "cannot write into sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: invalid 64-bit access of member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid sock access off=0 size=8",
+ .result = REJECT,
+},
+{
+ "reference tracking: access after release",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "reference tracking: direct access for lookup",
+ .insns = {
+ /* Check that the packet is at least 64B long */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c
new file mode 100644
index 000000000000..a9a8f620e71c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/runtime_jit.c
@@ -0,0 +1,80 @@
+{
+ "runtime/jit: tail_call within bounds, prog once",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "runtime/jit: tail_call within bounds, prog loop",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 41,
+},
+{
+ "runtime/jit: tail_call within bounds, no prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "runtime/jit: tail_call out of bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 256),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass negative index to tail_call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass > 32bit index to tail_call",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 2 },
+ .result = ACCEPT,
+ .retval = 42,
+ /* Verifier rewrite for unpriv skips tail call here. */
+ .retval_unpriv = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
new file mode 100644
index 000000000000..7e50cb80873a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -0,0 +1,156 @@
+{
+ "pointer/scalar confusion in state equality check (way 1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "pointer/scalar confusion in state equality check (way 2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "liveness pruning and write screening",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* branch conditions teach us nothing about R2 */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "varlen_map_value_access pruning",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "search pruning: all branches should be verified (nop operation)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R6 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "search pruning: all branches should be verified (invalid stack access)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_JMP_A(1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid read from stack off -16+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "allocated_stack",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
+ BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+ .insn_processed = 15,
+},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
new file mode 100644
index 000000000000..0ddfdf76aba5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -0,0 +1,384 @@
+{
+ "skb->sk: no NULL check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_common_or_null'",
+},
+{
+ "skb->sk: sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "skb->sk: sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock_common access",
+},
+{
+ "bpf_sk_fullsock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "sk_fullsock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_or_null'",
+},
+{
+ "sk_fullsock(skb->sk): sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->state [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->type [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->protocol [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, protocol)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "bpf_tcp_sock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "bpf_tcp_sock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'tcp_sock_or_null'",
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->bytes_acked",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid tcp_sock access",
+},
+{
+ "bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_sk_release(skb->sk)",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "type=sock_common expected=sock",
+},
+{
+ "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ "bpf_sk_release(bpf_tcp_sock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "type=tcp_sock expected=sock",
+},
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
new file mode 100644
index 000000000000..45d43bf82f26
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -0,0 +1,76 @@
+{
+ "check valid spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* fill it back into R2 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ /* should be able to access R0 = *(R2 + 8) */
+ /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check valid spill/fill, skb mark",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+},
+{
+ "check corrupted spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* mess up with R1 pointer on stack */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
+ /* fill back into R0 is fine for priv.
+ * R0 now becomes SCALAR_VALUE.
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ /* Load from R0 should fail. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .errstr = "R0 invalid mem access 'inv",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check corrupted spill/fill, LSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check corrupted spill/fill, MSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c
new file mode 100644
index 000000000000..781621facae4
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/spin_lock.c
@@ -0,0 +1,333 @@
+{
+ "spin_lock: test1 success",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test2 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test3 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "spin_lock: test4 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "spin_lock: test5 call within a locked region",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "calls are not allowed",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test6 missing unlock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "unlock is missing",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test7 unlock without lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "without taking a lock",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test8 double lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "calls are not allowed",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test9 different lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3, 11 },
+ .result = REJECT,
+ .errstr = "unlock of different lock",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test10 lock in subprog without unlock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "unlock is missing",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test11 ld_abs under lock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 4 },
+ .result = REJECT,
+ .errstr = "inside bpf_spin_lock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
new file mode 100644
index 000000000000..7276620ef242
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
@@ -0,0 +1,317 @@
+{
+ "PTR_TO_STACK store/load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfaceb00c,
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds low",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=-79992 size=8",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds high",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=0 size=8",
+},
+{
+ "PTR_TO_STACK check high 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=0 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check high 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK check low 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=-513 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check low 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between fp pointer",
+},
+{
+ "PTR_TO_STACK check low 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK mixed reg/k, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "PTR_TO_STACK reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid stack off=0 size=1",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "stack pointer arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/uninit.c b/tools/testing/selftests/bpf/verifier/uninit.c
new file mode 100644
index 000000000000..987a5871ff1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/uninit.c
@@ -0,0 +1,39 @@
+{
+ "read uninitialized register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "read invalid register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit in all branches",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
new file mode 100644
index 000000000000..dbaf5be947b2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -0,0 +1,522 @@
+{
+ "unpriv: return pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .retval = POINTER_VALUE,
+},
+{
+ "unpriv: add const to pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: add pointer to pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 pointer += pointer",
+},
+{
+ "unpriv: neg pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer arithmetic",
+},
+{
+ "unpriv: cmp pointer with const",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer comparison",
+},
+{
+ "unpriv: cmp pointer with pointer",
+ .insns = {
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R10 pointer comparison",
+},
+{
+ "unpriv: check that printk is disallowed",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "unknown func bpf_trace_printk#6",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "unpriv: pass pointer to helper function",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R4 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: indirectly pass pointer on stack to helper function",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+},
+{
+ "unpriv: mangle pointer on stack 1",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: mangle pointer on stack 2",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: read pointer from stack in small chunks",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid size",
+ .result = REJECT,
+},
+{
+ "unpriv: write pointer into ctx",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill of ctx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: spill/fill of ctx 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=fp expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=inv expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - ctx and sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ /* if (sk) bpf_sk_release(sk) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=ctx expected=sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - leak sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "Unreleased reference",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "cannot write into sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers ldx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
+ -(__s32)offsetof(struct bpf_perf_event_data,
+ sample_period) - 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "unpriv: write pointer into map elem value",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "alu32: mov u32 const",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_7, 0),
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "unpriv: partial copy of pointer",
+ .insns = {
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 partial copy",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: pass pointer to tail_call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .errstr_unpriv = "R3 leaks addr into helper",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp map pointer with zero",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: write into frame pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill frame pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: cmp of frame pointer",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: adding of fp",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp of stack pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R2 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value.c b/tools/testing/selftests/bpf/verifier/value.c
new file mode 100644
index 000000000000..0e42592b1218
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value.c
@@ -0,0 +1,104 @@
+{
+ "map element value store of cleared call register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R1 !read_ok",
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value with unaligned store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value with unaligned load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, offsetof(struct test_val, foo)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_adj_spill.c b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
new file mode 100644
index 000000000000..7135e8021b81
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
@@ -0,0 +1,43 @@
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value or null is marked on register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
new file mode 100644
index 000000000000..7f6c232cd842
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
@@ -0,0 +1,94 @@
+{
+ "map element value illegal alu op, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 bitwise operator &= on pointer",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 32-bit pointer arithmetic prohibited",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 pointer arithmetic with /= operator",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value illegal alu op, 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_3, 4096),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_or_null.c b/tools/testing/selftests/bpf/verifier/value_or_null.c
new file mode 100644
index 000000000000..860d4a71cd83
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_or_null.c
@@ -0,0 +1,152 @@
+{
+ "multiple registers share map_lookup_elem result",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid memory access with multiple map_lookup_elem calls",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = REJECT,
+ .errstr = "R4 !read_ok",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "valid indirect map_lookup_elem access with 2nd lookup in branch",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_2, 10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid map access from else condition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
new file mode 100644
index 000000000000..c3de1a2c9dc5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -0,0 +1,838 @@
+{
+ "map access: known scalar += value_ptr from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 min value is outside of the array range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr from different maps, but same value properties",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: mixing value pointer and scalar, 1",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+ .retval = 0,
+},
+{
+ "map access: mixing value pointer and scalar, 2",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different maps or paths",
+ .retval = 0,
+},
+{
+ "sanitation: alu with different scalars 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 0x100000,
+},
+{
+ "sanitation: alu with different scalars 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_delete_elem),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_EMIT_CALL(BPF_FUNC_map_delete_elem),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = -EINVAL * 2,
+},
+{
+ "sanitation: alu with different scalars 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, EINVAL),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, EINVAL),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -EINVAL * 2,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: value_ptr += known scalar, 6",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: unknown scalar += value_ptr, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: unknown scalar += value_ptr, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: unknown scalar += value_ptr, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: unknown scalar += value_ptr, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_1, 19),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 max value is outside of the array range",
+ .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: value_ptr += unknown scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: value_ptr += unknown scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 pointer += pointer prohibited",
+},
+{
+ "map access: known scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= known scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "map access: value_ptr -= known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: unknown scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= unknown scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is negative",
+},
+{
+ "map access: value_ptr -= unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 pointer -= pointer prohibited",
+},
diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c
new file mode 100644
index 000000000000..1e536ff121a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/var_off.c
@@ -0,0 +1,66 @@
+{
+ "variable-offset ctx access",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ /* add it to skb. We now have either &skb->len or
+ * &skb->pkt_type, but we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it indirectly */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .errstr = "variable stack read R2",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c
new file mode 100644
index 000000000000..c5de2e62cc8b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xadd.c
@@ -0,0 +1,97 @@
+{
+ "xadd/w check unaligned stack",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned map",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = REJECT,
+ .errstr = "misaligned value access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned pkt",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 99),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R2 pkt is not allowed",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "xadd/w check whether src/dst got mangled, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
+{
+ "xadd/w check whether src/dst got mangled, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp.c b/tools/testing/selftests/bpf/verifier/xdp.c
new file mode 100644
index 000000000000..5ac390508139
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp.c
@@ -0,0 +1,14 @@
+{
+ "XDP, using ifindex from netdev",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, ingress_ifindex)),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
new file mode 100644
index 000000000000..bfb97383e6b5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
@@ -0,0 +1,900 @@
+{
+ "XDP pkt read, pkt_end mangling, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_end mangling, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index bab13dd025a6..0d26b5e3f966 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -37,6 +37,10 @@ prerequisite()
exit $ksft_skip
fi
+ present_cpus=`cat $SYSFS/devices/system/cpu/present`
+ present_max=${present_cpus##*-}
+ echo "present_cpus = $present_cpus present_max = $present_max"
+
echo -e "\t Cpus in online state: $online_cpus"
offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@ online_cpus=0
online_max=0
offline_cpus=0
offline_max=0
+present_cpus=0
+present_max=0
while getopts e:ahp: opt; do
case $opt in
@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
online_cpu_expect_success $online_max
if [[ $offline_cpus -gt 0 ]]; then
- echo -e "\t offline to online to offline: cpu $offline_max"
- online_cpu_expect_success $offline_max
- offline_cpu_expect_success $offline_max
+ echo -e "\t offline to online to offline: cpu $present_max"
+ online_cpu_expect_success $present_max
+ offline_cpu_expect_success $present_max
+ online_cpu $present_max
fi
exit 0
else
diff --git a/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
new file mode 100755
index 000000000000..5ba5bef44d5b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
@@ -0,0 +1,200 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that blackhole routes are marked as offloaded and that packets hitting
+# them are dropped by the ASIC and not by the kernel.
+#
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ blackhole_ipv4
+ blackhole_ipv6
+"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24 2001:db8:2::1/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/24 2001:db8:2::1/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp1 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp1 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+ping_ipv4()
+{
+ ping_test $h1 198.51.100.1 ": h1->h2"
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::1 ": h1->h2"
+}
+
+blackhole_ipv4()
+{
+ # Transmit packets from H1 to H2 and make sure they are dropped by the
+ # ASIC and not by the kernel
+ RET=0
+
+ ip -4 route add blackhole 198.51.100.0/30
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_hw dst_ip 198.51.100.1 src_ip 192.0.2.1 ip_proto icmp \
+ action pass
+
+ ip -4 route show 198.51.100.0/30 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping_do $h1 198.51.100.1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv4 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+ ip -4 route del blackhole 198.51.100.0/30
+}
+
+blackhole_ipv6()
+{
+ RET=0
+
+ ip -6 route add blackhole 2001:db8:2::/120
+ tc filter add dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower \
+ skip_hw dst_ip 2001:db8:2::1 src_ip 2001:db8:1::1 \
+ ip_proto icmpv6 action pass
+
+ ip -6 route show 2001:db8:2::/120 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping6_do $h1 2001:db8:2::1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv6 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower
+ ip -6 route del blackhole 2001:db8:2::/120
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
index 1ca631d5aaba..40f16f2a3afd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -148,9 +148,10 @@ dscp_ping_test()
eval "t0s=($(dscp_fetch_stats $dev_10 10)
$(dscp_fetch_stats $dev_20 20))"
+ local ping_timeout=$((PING_TIMEOUT * 5))
ip vrf exec $vrf_name \
${PING} -Q $dscp_10 ${sip:+-I $sip} $dip \
- -c 10 -i 0.1 -w 2 &> /dev/null
+ -c 10 -i 0.5 -w $ping_timeout &> /dev/null
local -A t1s
eval "t1s=($(dscp_fetch_stats $dev_10 10)
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
index 281d90766e12..9faf02e32627 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
@@ -169,9 +169,10 @@ dscp_ping_test()
eval "local -A dev1_t0s=($(dscp_fetch_stats $dev1 0))"
eval "local -A dev2_t0s=($(dscp_fetch_stats $dev2 0))"
+ local ping_timeout=$((PING_TIMEOUT * 5))
ip vrf exec $vrf_name \
${PING} -Q $dscp ${sip:+-I $sip} $dip \
- -c 10 -i 0.1 -w 2 &> /dev/null
+ -c 10 -i 0.5 -w $ping_timeout &> /dev/null
eval "local -A dev1_t1s=($(dscp_fetch_stats $dev1 0))"
eval "local -A dev2_t1s=($(dscp_fetch_stats $dev2 0))"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index 94fdbf215c14..c4cf6e6d800e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -25,6 +25,7 @@ ALL_TESTS="
lag_unlink_slaves_test
lag_dev_deletion_test
vlan_interface_uppers_test
+ bridge_extern_learn_test
devlink_reload_test
"
NUM_NETIFS=2
@@ -541,6 +542,25 @@ vlan_interface_uppers_test()
ip link del dev br0
}
+bridge_extern_learn_test()
+{
+ # Test that externally learned entries added from user space are
+ # marked as offloaded
+ RET=0
+
+ ip link add name br0 type bridge
+ ip link set dev $swp1 master br0
+
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn
+
+ bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload
+ check_err $? "fdb entry not marked as offloaded when should"
+
+ log_test "externally learned fdb entry"
+
+ ip link del dev br0
+}
+
devlink_reload_test()
{
# Test that after executing all the above configuration tests, a
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index b41d6256b2d0..a372b2f60874 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -9,10 +9,11 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ delta_two_masks_one_key_test delta_simple_rehash_test \
bloom_simple_test bloom_complex_test bloom_delta_test"
NUM_NETIFS=2
source $lib_dir/tc_common.sh
-source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
tcflags="skip_hw"
@@ -38,6 +39,55 @@ h2_destroy()
simple_if_fini $h2 192.0.2.2/24 198.51.100.2/24
}
+tp_record()
+{
+ local tracepoint=$1
+ local cmd=$2
+
+ perf record -q -e $tracepoint $cmd
+ return $?
+}
+
+tp_record_all()
+{
+ local tracepoint=$1
+ local seconds=$2
+
+ perf record -a -q -e $tracepoint sleep $seconds
+ return $?
+}
+
+__tp_hit_count()
+{
+ local tracepoint=$1
+
+ local perf_output=`perf script -F trace:event,trace`
+ return `echo $perf_output | grep "$tracepoint:" | wc -l`
+}
+
+tp_check_hits()
+{
+ local tracepoint=$1
+ local count=$2
+
+ __tp_hit_count $tracepoint
+ if [[ "$?" -ne "$count" ]]; then
+ return 1
+ fi
+ return 0
+}
+
+tp_check_hits_any()
+{
+ local tracepoint=$1
+
+ __tp_hit_count $tracepoint
+ if [[ "$?" -eq "0" ]]; then
+ return 1
+ fi
+ return 0
+}
+
single_mask_test()
{
# When only a single mask is required, the device uses the master
@@ -182,20 +232,38 @@ multiple_masks_test()
# spillage is performed correctly and that the right filter is
# matched
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
local index
RET=0
NUM_MASKS=32
+ NUM_ERPS=16
BASE_INDEX=100
for i in $(eval echo {1..$NUM_MASKS}); do
index=$((BASE_INDEX - i))
- tc filter add dev $h2 ingress protocol ip pref $index \
- handle $index \
- flower $tcflags dst_ip 192.0.2.2/${i} src_ip 192.0.2.1 \
- action drop
+ if ((i > NUM_ERPS)); then
+ exp_hits=1
+ err_msg="$i filters - C-TCAM spill did not happen when it was expected"
+ else
+ exp_hits=0
+ err_msg="$i filters - C-TCAM spill happened when it should not"
+ fi
+
+ tp_record "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" \
+ "tc filter add dev $h2 ingress protocol ip pref $index \
+ handle $index \
+ flower $tcflags \
+ dst_ip 192.0.2.2/${i} src_ip 192.0.2.1/${i} \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" \
+ $exp_hits
+ check_err $? "$err_msg"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 \
-B 192.0.2.2 -t ip -q
@@ -325,28 +393,6 @@ ctcam_edge_cases_test()
ctcam_no_atcam_masks_test
}
-tp_record()
-{
- local tracepoint=$1
- local cmd=$2
-
- perf record -q -e $tracepoint $cmd
- return $?
-}
-
-tp_check_hits()
-{
- local tracepoint=$1
- local count=$2
-
- perf_output=`perf script -F trace:event,trace`
- hits=`echo $perf_output | grep "$tracepoint:" | wc -l`
- if [[ "$count" -ne "$hits" ]]; then
- return 1
- fi
- return 0
-}
-
delta_simple_test()
{
# The first filter will create eRP, the second filter will fit into
@@ -405,6 +451,365 @@ delta_simple_test()
log_test "delta simple test ($tcflags)"
}
+delta_two_masks_one_key_test()
+{
+ # If 2 keys are the same and only differ in mask in a way that
+ # they belong under the same ERP (second is delta of the first),
+ # there should be no C-TCAM spill.
+
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 1 handle 101 flower $tcflags dst_ip 192.0.2.0/24 \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+ check_err $? "incorrect C-TCAM spill while inserting the first rule"
+
+ tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+ check_err $? "incorrect C-TCAM spill while inserting the second rule"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "delta two masks one key test ($tcflags)"
+}
+
+delta_simple_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.1.0/25 action drop
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.0.3.0/24 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_err $? "Migrate trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_err $? "Migrate end trace was not hit"
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_fail $? "Migrate trace was hit when no migration should happen"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_fail $? "Migrate end trace was hit when no migration should happen"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ log_test "delta simple rehash test ($tcflags)"
+}
+
+delta_simple_ipv6_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
+ $tcflags dst_ip 2001:db8:1::0/121 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
+ $tcflags dst_ip 2001:db8:2::2 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
+ $tcflags dst_ip 2001:db8:3::0/120 action drop
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_err $? "Migrate trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_err $? "Migrate end trace was not hit"
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_fail $? "Migrate trace was hit when no migration should happen"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_fail $? "Migrate end trace was hit when no migration should happen"
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
+
+ log_test "delta simple IPv6 rehash test ($tcflags)"
+}
+
+TEST_RULE_BASE=256
+declare -a test_rules_inserted
+
+test_rule_add()
+{
+ local iface=$1
+ local tcflags=$2
+ local index=$3
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ${test_rules_inserted[$index]} ; then
+ return
+ fi
+
+ local number=$(( $index + $TEST_RULE_BASE ))
+ printf -v hexnumber '%x' $number
+
+ batch="${batch}filter add dev $iface ingress protocol ipv6 pref 1 \
+ handle $number flower $tcflags \
+ src_ip 2001:db8:1::$hexnumber action drop\n"
+ test_rules_inserted[$index]=true
+}
+
+test_rule_del()
+{
+ local iface=$1
+ local index=$2
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ! ${test_rules_inserted[$index]} ; then
+ return
+ fi
+
+ local number=$(( $index + $TEST_RULE_BASE ))
+ printf -v hexnumber '%x' $number
+
+ batch="${batch}filter del dev $iface ingress protocol ipv6 pref 1 \
+ handle $number flower\n"
+ test_rules_inserted[$index]=false
+}
+
+test_rule_add_or_remove()
+{
+ local iface=$1
+ local tcflags=$2
+ local index=$3
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ${test_rules_inserted[$index]} ; then
+ test_rule_del $iface $index
+ else
+ test_rule_add $iface $tcflags $index
+ fi
+}
+
+test_rule_add_or_remove_random_batch()
+{
+ local iface=$1
+ local tcflags=$2
+ local total_count=$3
+ local skip=0
+ local count=0
+ local MAXSKIP=20
+ local MAXCOUNT=20
+
+ for ((i=1;i<=total_count;i++)); do
+ if (( $skip == 0 )) && (($count == 0)); then
+ ((skip=$RANDOM % $MAXSKIP + 1))
+ ((count=$RANDOM % $MAXCOUNT + 1))
+ fi
+ if (( $skip != 0 )); then
+ ((skip-=1))
+ else
+ ((count-=1))
+ test_rule_add_or_remove $iface $tcflags $i
+ fi
+ done
+}
+
+delta_massive_ipv6_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ RANDOM=4432897
+ declare batch=""
+ test_rule_add_or_remove_random_batch $h2 $tcflags 5000
+
+ echo -n -e $batch | tc -b -
+
+ declare batch=""
+ test_rule_add_or_remove_random_batch $h2 $tcflags 5000
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
+ $tcflags dst_ip 2001:db8:1::0/121 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
+ $tcflags dst_ip 2001:db8:2::2 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
+ $tcflags dst_ip 2001:db8:3::0/120 action drop
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ echo -n -e $batch | tc -b -
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
+
+ declare batch=""
+ for i in {1..5000}; do
+ test_rule_del $h2 $tcflags $i
+ done
+ echo -e $batch | tc -b -
+
+ log_test "delta massive IPv6 rehash test ($tcflags)"
+}
+
bloom_simple_test()
{
# Bloom filter requires that the eRP table is used. This test
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index a0a80e1a69e8..e7ffc79561b7 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -2,7 +2,6 @@
# SPDX-License-Identifier: GPL-2.0
NUM_NETIFS=6
-source ../../../../net/forwarding/lib.sh
source ../../../../net/forwarding/tc_common.sh
source devlink_lib_spectrum.sh
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
index dcf9f4e913e0..ae6146ec5afd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -847,6 +847,24 @@ sanitization_vlan_aware_test()
log_test "vlan-aware - failed enslavement to vlan-aware bridge"
+ bridge vlan del vid 10 dev vxlan20
+ bridge vlan add vid 20 dev vxlan20 pvid untagged
+
+ # Test that offloading of an unsupported tunnel fails when it is
+ # triggered by addition of VLAN to a local port
+ RET=0
+
+ # TOS must be set to inherit
+ ip link set dev vxlan10 type vxlan tos 42
+
+ ip link set dev $swp1 master br0
+ bridge vlan add vid 10 dev $swp1 &> /dev/null
+ check_fail $?
+
+ log_test "vlan-aware - failed vlan addition to a local port"
+
+ ip link set dev vxlan10 type vxlan tos inherit
+
ip link del dev vxlan20
ip link del dev vxlan10
ip link del dev br0
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
new file mode 100755
index 000000000000..749ba3cfda1d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test vetoing of FDB entries that mlxsw can not offload. This exercises several
+# different veto vectors to test various rollback scenarios in the vxlan driver.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ fdb_create_veto_test
+ fdb_replace_veto_test
+ fdb_append_veto_test
+ fdb_changelink_veto_test
+"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link set dev $swp1 up
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 up
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link set dev vxlan0 master br0
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev vxlan0 nomaster
+ ip link del dev vxlan0
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 nomaster
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+fdb_create_veto_test()
+{
+ RET=0
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>/dev/null
+ check_fail $? "multicast MAC not rejected"
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>&1 >/dev/null | grep -q mlxsw_spectrum
+ check_err $? "multicast MAC rejected without extack"
+
+ log_test "vxlan FDB veto - create"
+}
+
+fdb_replace_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - replace"
+}
+
+fdb_append_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - append"
+}
+
+fdb_changelink_veto_test()
+{
+ RET=0
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>/dev/null
+ check_fail $? "FDB with a multicast IP not rejected"
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with a multicast IP rejected without extack"
+
+ log_test "vxlan FDB veto - changelink"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore
new file mode 100644
index 000000000000..8a5d9bf63dd4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/.gitignore
@@ -0,0 +1 @@
+binderfs_test
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
new file mode 100644
index 000000000000..58cb659b56b4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS += -I../../../../../usr/include/
+TEST_GEN_PROGS := binderfs_test
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
new file mode 100644
index 000000000000..8c2ed962e1c7
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <linux/android/binder.h>
+#include <linux/android/binderfs.h>
+#include "../../kselftest.h"
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+again:
+ ret = write(fd, buf, count);
+ if (ret < 0 && errno == EINTR)
+ goto again;
+
+ return ret;
+}
+
+static void write_to_file(const char *filename, const void *buf, size_t count,
+ int allowed_errno)
+{
+ int fd, saved_errno;
+ ssize_t ret;
+
+ fd = open(filename, O_WRONLY | O_CLOEXEC);
+ if (fd < 0)
+ ksft_exit_fail_msg("%s - Failed to open file %s\n",
+ strerror(errno), filename);
+
+ ret = write_nointr(fd, buf, count);
+ if (ret < 0) {
+ if (allowed_errno && (errno == allowed_errno)) {
+ close(fd);
+ return;
+ }
+
+ goto on_error;
+ }
+
+ if ((size_t)ret != count)
+ goto on_error;
+
+ close(fd);
+ return;
+
+on_error:
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to write to file %s\n",
+ strerror(errno), filename);
+
+ ksft_exit_fail_msg("Failed to write to file %s\n", filename);
+}
+
+static void change_to_userns(void)
+{
+ int ret;
+ uid_t uid;
+ gid_t gid;
+ /* {g,u}id_map files only allow a max of 4096 bytes written to them */
+ char idmap[4096];
+
+ uid = getuid();
+ gid = getgid();
+
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to unshare user namespace\n",
+ strerror(errno));
+
+ write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT);
+
+ ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid);
+ if (ret < 0 || (size_t)ret >= sizeof(idmap))
+ ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
+ strerror(errno));
+
+ write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0);
+
+ ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid);
+ if (ret < 0 || (size_t)ret >= sizeof(idmap))
+ ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
+ strerror(errno));
+
+ write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0);
+
+ ret = setgid(0);
+ if (ret)
+ ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
+ strerror(errno));
+
+ ret = setuid(0);
+ if (ret)
+ ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
+ strerror(errno));
+}
+
+static void change_to_mountns(void)
+{
+ int ret;
+
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n",
+ strerror(errno));
+
+ ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to mount / as private\n",
+ strerror(errno));
+}
+
+static void rmdir_protect_errno(const char *dir)
+{
+ int saved_errno = errno;
+ (void)rmdir(dir);
+ errno = saved_errno;
+}
+
+static void __do_binderfs_test(void)
+{
+ int fd, ret, saved_errno;
+ size_t len;
+ ssize_t wret;
+ bool keep = false;
+ struct binderfs_device device = { 0 };
+ struct binder_version version = { 0 };
+
+ change_to_mountns();
+
+ ret = mkdir("/dev/binderfs", 0755);
+ if (ret < 0) {
+ if (errno != EEXIST)
+ ksft_exit_fail_msg(
+ "%s - Failed to create binderfs mountpoint\n",
+ strerror(errno));
+
+ keep = true;
+ }
+
+ ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
+ if (ret < 0) {
+ if (errno != ENODEV)
+ ksft_exit_fail_msg("%s - Failed to mount binderfs\n",
+ strerror(errno));
+
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_skip(
+ "The Android binderfs filesystem is not available\n");
+ }
+
+ /* binderfs mount test passed */
+ ksft_inc_pass_cnt();
+
+ memcpy(device.name, "my-binder", strlen("my-binder"));
+
+ fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ ksft_exit_fail_msg(
+ "%s - Failed to open binder-control device\n",
+ strerror(errno));
+
+ ret = ioctl(fd, BINDER_CTL_ADD, &device);
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ if (ret < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg(
+ "%s - Failed to allocate new binder device\n",
+ strerror(errno));
+ }
+
+ ksft_print_msg(
+ "Allocated new binder device with major %d, minor %d, and name %s\n",
+ device.major, device.minor, device.name);
+
+ /* binder device allocation test passed */
+ ksft_inc_pass_cnt();
+
+ fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY);
+ if (fd < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg("%s - Failed to open my-binder device\n",
+ strerror(errno));
+ }
+
+ ret = ioctl(fd, BINDER_VERSION, &version);
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ if (ret < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg(
+ "%s - Failed to open perform BINDER_VERSION request\n",
+ strerror(errno));
+ }
+
+ ksft_print_msg("Detected binder version: %d\n",
+ version.protocol_version);
+
+ /* binder transaction with binderfs binder device passed */
+ ksft_inc_pass_cnt();
+
+ ret = unlink("/dev/binderfs/my-binder");
+ if (ret < 0) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg("%s - Failed to delete binder device\n",
+ strerror(errno));
+ }
+
+ /* binder device removal passed */
+ ksft_inc_pass_cnt();
+
+ ret = unlink("/dev/binderfs/binder-control");
+ if (!ret) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg("Managed to delete binder-control device\n");
+ } else if (errno != EPERM) {
+ keep ? : rmdir_protect_errno("/dev/binderfs");
+ ksft_exit_fail_msg(
+ "%s - Failed to delete binder-control device but exited with unexpected error code\n",
+ strerror(errno));
+ }
+
+ /* binder-control device removal failed as expected */
+ ksft_inc_xfail_cnt();
+
+on_error:
+ ret = umount2("/dev/binderfs", MNT_DETACH);
+ keep ?: rmdir_protect_errno("/dev/binderfs");
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to unmount binderfs\n",
+ strerror(errno));
+
+ /* binderfs unmount test passed */
+ ksft_inc_pass_cnt();
+}
+
+static void binderfs_test_privileged()
+{
+ if (geteuid() != 0)
+ ksft_print_msg(
+ "Tests are not run as root. Skipping privileged tests\n");
+ else
+ __do_binderfs_test();
+}
+
+static void binderfs_test_unprivileged()
+{
+ change_to_userns();
+ __do_binderfs_test();
+}
+
+int main(int argc, char *argv[])
+{
+ binderfs_test_privileged();
+ binderfs_test_unprivileged();
+ ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config
new file mode 100644
index 000000000000..02dd6cc9cf99
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/config
@@ -0,0 +1,3 @@
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_BINDER_IPC=y
diff --git a/tools/testing/selftests/firmware/config b/tools/testing/selftests/firmware/config
index 913a25a4a32b..bf634dda0720 100644
--- a/tools/testing/selftests/firmware/config
+++ b/tools/testing/selftests/firmware/config
@@ -1,6 +1,5 @@
CONFIG_TEST_FIRMWARE=y
CONFIG_FW_LOADER=y
CONFIG_FW_LOADER_USER_HELPER=y
-CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 466cf2f91ba0..a4320c4b44dc 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -155,8 +155,11 @@ read_firmwares()
{
for i in $(seq 0 3); do
config_set_read_fw_idx $i
- # Verify the contents match
- if ! diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
+ # Verify the contents are what we expect.
+ # -Z required for now -- check for yourself, md5sum
+ # on $FW and DIR/read_firmware will yield the same. Even
+ # cmp agrees, so something is off.
+ if ! diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
echo "request #$i: firmware was not loaded" >&2
exit 1
fi
@@ -168,7 +171,7 @@ read_firmwares_expect_nofile()
for i in $(seq 0 3); do
config_set_read_fw_idx $i
# Ensures contents differ
- if diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
+ if diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
echo "request $i: file was not expected to match" >&2
exit 1
fi
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 6c5f1b2ffb74..1cbb12e284a6 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -91,7 +91,7 @@ verify_reqs()
if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
echo "usermode helper disabled so ignoring test"
- exit $ksft_skip
+ exit 0
fi
fi
}
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index f8d468f54e98..aaa1e9f083c3 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -37,7 +37,7 @@ static int get_debugfs(char **path)
struct libmnt_table *tb;
struct libmnt_iter *itr = NULL;
struct libmnt_fs *fs;
- int found = 0;
+ int found = 0, ret;
cxt = mnt_new_context();
if (!cxt)
@@ -58,8 +58,11 @@ static int get_debugfs(char **path)
break;
}
}
- if (found)
- asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+ if (found) {
+ ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+ if (ret < 0)
+ err(EXIT_FAILURE, "failed to format string");
+ }
mnt_free_iter(itr);
mnt_free_context(cxt);
diff --git a/tools/testing/selftests/ir/Makefile b/tools/testing/selftests/ir/Makefile
index f4ba8eb84b95..ad06489c22a5 100644
--- a/tools/testing/selftests/ir/Makefile
+++ b/tools/testing/selftests/ir/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
TEST_PROGS := ir_loopback.sh
TEST_GEN_PROGS_EXTENDED := ir_loopback
+APIDIR := ../../../include/uapi
+CFLAGS += -Wall -O2 -I$(APIDIR)
include ../lib.mk
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 23022e9d32eb..b52cfdefecbf 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -571,7 +571,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
- vm, guest_paddr, guest_paddr + npages * vm->page_size);
+ vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
if (region != NULL)
TEST_ASSERT(false, "overlapping userspace_mem_region already "
"exists\n"
@@ -587,15 +587,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region = region->next) {
if (region->region.slot == slot)
break;
- if ((guest_paddr <= (region->region.guest_phys_addr
- + region->region.memory_size))
- && ((guest_paddr + npages * vm->page_size)
- >= region->region.guest_phys_addr))
- break;
}
if (region != NULL)
TEST_ASSERT(false, "A mem region with the requested slot "
- "or overlapping physical memory range already exists.\n"
+ "already exists.\n"
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
" existing slot: %u paddr: 0x%lx size: 0x%lx",
slot, guest_paddr, npages,
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index ea3c73e8f4f6..c49c2a28b0eb 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -103,6 +103,12 @@ int main(int argc, char *argv[])
vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+ /* KVM should return supported EVMCS version range */
+ TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
+ (evmcs_ver & 0xff) > 0,
+ "Incorrect EVMCS version range: %x:%x\n",
+ evmcs_ver & 0xff, evmcs_ver >> 8);
+
run = vcpu_state(vm, VCPU_ID);
vcpu_regs_get(vm, VCPU_ID, &regs1);
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 10baa1652fc2..c67d32eeb668 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -54,6 +54,22 @@ static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
return fd;
}
+static int mfd_assert_reopen_fd(int fd_in)
+{
+ int r, fd;
+ char path[100];
+
+ sprintf(path, "/proc/self/fd/%d", fd_in);
+
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ printf("re-open of existing fd %d failed\n", fd_in);
+ abort();
+ }
+
+ return fd;
+}
+
static void mfd_fail_new(const char *name, unsigned int flags)
{
int r;
@@ -255,6 +271,25 @@ static void mfd_assert_read(int fd)
munmap(p, mfd_def_size);
}
+/* Test that PROT_READ + MAP_SHARED mappings work. */
+static void mfd_assert_read_shared(int fd)
+{
+ void *p;
+
+ /* verify PROT_READ and MAP_SHARED *is* allowed */
+ p = mmap(NULL,
+ mfd_def_size,
+ PROT_READ,
+ MAP_SHARED,
+ fd,
+ 0);
+ if (p == MAP_FAILED) {
+ printf("mmap() failed: %m\n");
+ abort();
+ }
+ munmap(p, mfd_def_size);
+}
+
static void mfd_assert_write(int fd)
{
ssize_t l;
@@ -693,6 +728,44 @@ static void test_seal_write(void)
}
/*
+ * Test SEAL_FUTURE_WRITE
+ * Test whether SEAL_FUTURE_WRITE actually prevents modifications.
+ */
+static void test_seal_future_write(void)
+{
+ int fd, fd2;
+ void *p;
+
+ printf("%s SEAL-FUTURE-WRITE\n", memfd_str);
+
+ fd = mfd_assert_new("kern_memfd_seal_future_write",
+ mfd_def_size,
+ MFD_CLOEXEC | MFD_ALLOW_SEALING);
+
+ p = mfd_assert_mmap_shared(fd);
+
+ mfd_assert_has_seals(fd, 0);
+
+ mfd_assert_add_seals(fd, F_SEAL_FUTURE_WRITE);
+ mfd_assert_has_seals(fd, F_SEAL_FUTURE_WRITE);
+
+ /* read should pass, writes should fail */
+ mfd_assert_read(fd);
+ mfd_assert_read_shared(fd);
+ mfd_fail_write(fd);
+
+ fd2 = mfd_assert_reopen_fd(fd);
+ /* read should pass, writes should still fail */
+ mfd_assert_read(fd2);
+ mfd_assert_read_shared(fd2);
+ mfd_fail_write(fd2);
+
+ munmap(p, mfd_def_size);
+ close(fd2);
+ close(fd);
+}
+
+/*
* Test SEAL_SHRINK
* Test whether SEAL_SHRINK actually prevents shrinking
*/
@@ -945,6 +1018,7 @@ int main(int argc, char **argv)
test_basic();
test_seal_write();
+ test_seal_future_write();
test_seal_shrink();
test_seal_grow();
test_seal_resize();
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index f8f3e90700c0..1e6d14d2825c 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -21,6 +21,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
KSFT_KHDR_INSTALL := 1
include ../lib.mk
-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
+$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 5821bdd98d20..e9c860d00416 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -17,8 +17,7 @@ CONFIG_VLAN_8021Q=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_NF_NAT=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP6_NF_NAT=m
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 802b4af18729..1080ff55a788 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -388,6 +388,7 @@ fib_carrier_unicast_test()
set -e
$IP link set dev dummy0 carrier off
+ sleep 1
set +e
echo " Carrier down"
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index d8313d0438b7..b90dff8d3a94 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
NUM_NETIFS=4
CHECK_TC="yes"
source lib.sh
@@ -96,6 +96,51 @@ flooding()
flood_test $swp2 $h1 $h2
}
+vlan_deletion()
+{
+ # Test that the deletion of a VLAN on a bridge port does not affect
+ # the PVID VLAN
+ log_info "Add and delete a VLAN on bridge port $swp1"
+
+ bridge vlan add vid 10 dev $swp1
+ bridge vlan del vid 10 dev $swp1
+
+ ping_ipv4
+ ping_ipv6
+}
+
+extern_learn()
+{
+ local mac=de:ad:be:ef:13:37
+ local ageing_time
+
+ # Test that externally learned FDB entries can roam, but not age out
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "Did not find FDB entry when should"
+
+ # Wait for 10 seconds after the ageing time to make sure the FDB entry
+ # was not aged out
+ ageing_time=$(bridge_ageing_time_get br0)
+ sleep $((ageing_time + 10))
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry was aged out when should not"
+
+ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
+
+ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry did not roam when should"
+
+ log_test "Externally learned FDB entry - ageing & roaming"
+
+ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
+ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
index 5cd2aed97958..da96eff72a8e 100644
--- a/tools/testing/selftests/net/forwarding/config
+++ b/tools/testing/selftests/net/forwarding/config
@@ -10,3 +10,5 @@ CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_ACT_GACT=m
CONFIG_VETH=m
+CONFIG_NAMESPACES=y
+CONFIG_NET_NS=y
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 5ab1e5f43022..57cf8914910d 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -32,7 +32,7 @@ DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
##############################################################################
# Sanity checks
-devlink -j resource show "$DEVLINK_DEV" &> /dev/null
+devlink help 2>&1 | grep resource &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: iproute2 too old, missing devlink resource support"
exit 1
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index e819d049d9ce..e2adb533c8fc 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -33,3 +33,6 @@ PAUSE_ON_CLEANUP=no
NETIF_TYPE=veth
# Whether to create virtual interfaces (veth) or not
NETIF_CREATE=yes
+# Timeout (in seconds) before ping exits regardless of how many packets have
+# been sent or received
+PING_TIMEOUT=5
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
new file mode 100755
index 000000000000..abb694397b86
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel without key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1
+ sw2_flat_create gre $ol2 $ul2
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
new file mode 100755
index 000000000000..c4f373337e48
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 key 233
+ sw2_flat_create gre $ol2 $ul2 key 233
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
new file mode 100755
index 000000000000..a811130c0627
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_flat_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
new file mode 100755
index 000000000000..05c5b3cf2f78
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1
+ sw2_hierarchical_create gre $ol2 $ul2
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
new file mode 100755
index 000000000000..9b105dbca32a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 key 22
+ sw2_hierarchical_create gre $ol2 $ul2 key 22
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
new file mode 100755
index 000000000000..e275d25bd83a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_hierarchical_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_lib.sh b/tools/testing/selftests/net/forwarding/ipip_lib.sh
new file mode 100644
index 000000000000..30f36a57bae6
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_lib.sh
@@ -0,0 +1,349 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Handles creation and destruction of IP-in-IP or GRE tunnels over the given
+# topology. Supports both flat and hierarchical models.
+#
+# Flat Model:
+# Overlay and underlay share the same VRF.
+# SW1 uses default VRF so tunnel has no bound dev.
+# SW2 uses non-default VRF tunnel has a bound dev.
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | SW1 | |
+# | $ol1 + |
+# | 192.0.2.2/28 |
+# | |
+# | + g1a (gre) |
+# | loc=192.0.2.65 |
+# | rem=192.0.2.66 --. |
+# | tos=inherit | |
+# | .------------------' |
+# | | |
+# | v |
+# | + $ul1.111 (vlan) |
+# | | 192.0.2.129/28 |
+# | \ |
+# | \_______ |
+# | | |
+# |VRF default + $ul1 |
+# +------------|------------+
+# |
+# +------------|------------+
+# | SW2 + $ul2 |
+# | _______| |
+# | / |
+# | / |
+# | + $ul2.111 (vlan) |
+# | ^ 192.0.2.130/28 |
+# | | |
+# | | |
+# | '------------------. |
+# | + g2a (gre) | |
+# | loc=192.0.2.66 | |
+# | rem=192.0.2.65 --' |
+# | tos=inherit |
+# | |
+# | $ol2 + |
+# | 192.0.2.17/28 | |
+# | VRF v$ol2 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +-------------------------+
+#
+# Hierarchical model:
+# The tunnel is bound to a device in a different VRF
+#
+# +---------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | SW1 | |
+# | +-----------------|-----+ |
+# | | $ol1 + | |
+# | | 192.0.2.2/28 | |
+# | | | |
+# | | + g1a (gre) | |
+# | | rem=192.0.2.66 | |
+# | | tos=inherit | |
+# | | loc=192.0.2.65 | |
+# | | ^ | |
+# | | VRF v$ol1 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ul1 | | |
+# | | | | |
+# | | | | |
+# | | v | |
+# | | dummy1 + | |
+# | | 192.0.2.65 | |
+# | | .-------' | |
+# | | | | |
+# | | v | |
+# | | + $ul1.111 (vlan) | |
+# | | | 192.0.2.129/28 | |
+# | | \ | |
+# | | \_____ | |
+# | | | | |
+# | | + $ul1 | |
+# | +----------|------------+ |
+# +------------|--------------+
+# |
+# +------------|--------------+
+# | SW2 | |
+# | +----------|------------+ |
+# | | + $ul2 | |
+# | | _____| | |
+# | | / | |
+# | | / | |
+# | | | $ul2.111 (vlan) | |
+# | | + 192.0.2.130/28 | |
+# | | ^ | |
+# | | | | |
+# | | '-------. | |
+# | | dummy2 + | |
+# | | 192.0.2.66 | |
+# | | ^ | |
+# | | | | |
+# | | | | |
+# | | VRF v$ul2 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ol2 | | |
+# | | | | |
+# | | v | |
+# | | g2a (gre)+ | |
+# | | loc=192.0.2.66 | |
+# | | rem=192.0.2.65 | |
+# | | tos=inherit | |
+# | | | |
+# | | $ol2 + | |
+# | | 192.0.2.17/28 | | |
+# | +-----------------|-----+ |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +---------------------------+
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+sw1_flat_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip link set dev $ol1 up
+ __addr_add_del $ol1 add "192.0.2.2/28"
+
+ ip link set dev $ul1 up
+ vlan_create $ul1 111 "" 192.0.2.129/28
+
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit "$@"
+ ip link set dev g1a up
+ __addr_add_del g1a add "192.0.2.65/32"
+
+ ip route add 192.0.2.66/32 via 192.0.2.130
+
+ ip route add 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_flat_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del 192.0.2.16/28
+
+ ip route del 192.0.2.66/32 via 192.0.2.130
+ __simple_if_fini g1a 192.0.2.65/32
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ __simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_flat_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 192.0.2.130/28
+
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev v$ol2 \
+ "$@"
+ __simple_if_init g2a v$ol2 192.0.2.66/32
+
+ ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_flat_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ __simple_if_fini g2a 192.0.2.66/32
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+sw1_hierarchical_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ simple_if_init $ol1 192.0.2.2/28
+ simple_if_init $ul1
+ ip link add name dummy1 type dummy
+ __simple_if_init dummy1 v$ul1 192.0.2.65/32
+
+ vlan_create $ul1 111 v$ul1 192.0.2.129/28
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit dev dummy1 \
+ "$@"
+ ip link set dev g1a master v$ol1
+
+ ip route add vrf v$ul1 192.0.2.66/32 via 192.0.2.130
+ ip route add vrf v$ol1 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_hierarchical_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del vrf v$ol1 192.0.2.16/28
+ ip route del vrf v$ul1 192.0.2.66/32
+
+ tunnel_destroy g1a
+ vlan_destroy $ul1 111
+
+ __simple_if_fini dummy1 192.0.2.65/32
+ ip link del dev dummy1
+
+ simple_if_fini $ul1
+ simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_hierarchical_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ simple_if_init $ul2
+
+ ip link add name dummy2 type dummy
+ __simple_if_init dummy2 v$ul2 192.0.2.66/32
+
+ vlan_create $ul2 111 v$ul2 192.0.2.130/28
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev dummy2 \
+ "$@"
+ ip link set dev g2a master v$ol2
+
+ ip route add vrf v$ul2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_hierarchical_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+ ip route del vrf v$ul2 192.0.2.65/32
+
+ tunnel_destroy g2a
+ vlan_destroy $ul2 111
+
+ __simple_if_fini dummy2 192.0.2.66/32
+ ip link del dev dummy2
+
+ simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+topo_mtu_change()
+{
+ local mtu=$1
+
+ ip link set mtu $mtu dev $h1
+ ip link set mtu $mtu dev $ol1
+ ip link set mtu $mtu dev g1a
+ ip link set mtu $mtu dev $ul1
+ ip link set mtu $mtu dev $ul1.111
+ ip link set mtu $mtu dev $h2
+ ip link set mtu $mtu dev $ol2
+ ip link set mtu $mtu dev g2a
+ ip link set mtu $mtu dev $ul2
+ ip link set mtu $mtu dev $ul2.111
+}
+
+test_mtu_change()
+{
+ local encap=$1; shift
+
+ RET=0
+
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_fail $? "ping $encap should not pass with size 1800"
+
+ RET=0
+
+ topo_mtu_change 2000
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_err $?
+ log_test "ping $encap packet size 1800 after MTU change"
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 3f248d1f5b91..9385dc971269 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -17,6 +17,7 @@ NETIF_TYPE=${NETIF_TYPE:=veth}
NETIF_CREATE=${NETIF_CREATE:=yes}
MCD=${MCD:=smcrouted}
MC_CLI=${MC_CLI:=smcroutectl}
+PING_TIMEOUT=${PING_TIMEOUT:=5}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -211,7 +212,7 @@ log_test()
return 1
fi
- printf "TEST: %-60s [PASS]\n" "$test_name $opt_str"
+ printf "TEST: %-60s [ OK ]\n" "$test_name $opt_str"
return 0
}
@@ -820,7 +821,8 @@ ping_do()
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name \
+ $PING $args $dip -c 10 -i 0.1 -w $PING_TIMEOUT &> /dev/null
}
ping_test()
@@ -840,7 +842,8 @@ ping6_do()
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING6 $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name \
+ $PING6 $args $dip -c 10 -i 0.1 -w $PING_TIMEOUT &> /dev/null
}
ping6_test()
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index 61844caf671e..28d568c48a73 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -190,6 +190,8 @@ setup_prepare()
h4_create
switch_create
+ forwarding_enable
+
trap_install $h3 ingress
trap_install $h4 ingress
}
@@ -201,6 +203,8 @@ cleanup()
trap_uninstall $h4 ingress
trap_uninstall $h3 ingress
+ forwarding_restore
+
switch_destroy
h4_destroy
h3_destroy
@@ -220,11 +224,15 @@ test_lag_slave()
RET=0
+ tc filter add dev $swp1 ingress pref 999 \
+ proto 802.1q flower vlan_ethtype arp $tcflags \
+ action pass
mirror_install $swp1 ingress gt4 \
- "proto 802.1q flower vlan_id 333 $tcflags"
+ "proto 802.1q flower vlan_id 333 $tcflags"
# Test connectivity through $up_dev when $down_dev is set down.
ip link set dev $down_dev down
+ ip neigh flush dev br1
setup_wait_dev $up_dev
setup_wait_dev $host_dev
$ARPING -I br1 192.0.2.130 -qfc 1
@@ -240,6 +248,7 @@ test_lag_slave()
ip link set dev $up_dev up
ip link set dev $down_dev up
mirror_uninstall $swp1 ingress
+ tc filter del dev $swp1 ingress pref 999
log_test "$what ($tcflags)"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index 135902aa8b11..472bd023e2a5 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -79,6 +79,7 @@ test_span_gre_ttl()
mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 0
ip link set dev $tundev type $type ttl 50
+ sleep 2
mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
ip link set dev $tundev type $type ttl 100
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
index 12914f40612d..09389f3b9369 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
@@ -81,6 +81,8 @@ full_test_span_gre_dir_acl()
local match_dip=$1; shift
local what=$1; shift
+ RET=0
+
mirror_install $swp1 $direction $tundev \
"protocol ip flower $tcflags dst_ip $match_dip"
fail_test_span_gre_dir $tundev $direction
@@ -108,8 +110,6 @@ test_ip6gretap()
test_all()
{
- RET=0
-
slow_path_trap_install $swp1 ingress
slow_path_trap_install $swp1 egress
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index 204b25f13934..c02291e9841e 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -1,11 +1,44 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-# This test uses standard topology for testing gretap. See
-# mirror_gre_topo_lib.sh for more details.
-#
# Test for "tc action mirred egress mirror" when the underlay route points at a
# vlan device on top of a bridge device with vlan filtering (802.1q).
+#
+# +---------------------+ +---------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +-----|---------------+ +---------------|-----+
+# | |
+# +-----|-------------------------------------------------------------|-----+
+# | SW o--> mirred egress mirror dev {gt4,gt6} | |
+# | | | |
+# | +---|-------------------------------------------------------------|---+ |
+# | | + $swp1 br1 $swp2 + | |
+# | | | |
+# | | + $swp3 | |
+# | +---|-----------------------------------------------------------------+ |
+# | | | |
+# | | + br1.555 |
+# | | 192.0.2.130/28 |
+# | | 2001:db8:2::2/64 |
+# | | |
+# | | + gt6 (ip6gretap) + gt4 (gretap) |
+# | | : loc=2001:db8:2::1 : loc=192.0.2.129 |
+# | | : rem=2001:db8:2::2 : rem=192.0.2.130 |
+# | | : ttl=100 : ttl=100 |
+# | | : tos=inherit : tos=inherit |
+# | | : : |
+# +-----|---------------------:----------------------:----------------------+
+# | : :
+# +-----|---------------------:----------------------:----------------------+
+# | H3 + $h3 + h3-gt6 (ip6gretap) + h3-gt4 (gretap) |
+# | | loc=2001:db8:2::2 loc=192.0.2.130 |
+# | + $h3.555 rem=2001:db8:2::1 rem=192.0.2.129 |
+# | 192.0.2.130/28 ttl=100 ttl=100 |
+# | 2001:db8:2::2/64 tos=inherit tos=inherit |
+# | |
+# +-------------------------------------------------------------------------+
ALL_TESTS="
test_gretap
@@ -30,6 +63,15 @@ source mirror_gre_topo_lib.sh
require_command $ARPING
+h3_addr_add_del()
+{
+ local add_del=$1; shift
+ local dev=$1; shift
+
+ ip addr $add_del dev $dev 192.0.2.130/28
+ ip addr $add_del dev $dev 2001:db8:2::2/64
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -55,7 +97,8 @@ setup_prepare()
ip route rep 192.0.2.130/32 dev br1.555
ip -6 route rep 2001:db8:2::2/128 dev br1.555
- vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64
+ vlan_create $h3 555 v$h3
+ h3_addr_add_del add $h3.555
ip link set dev $swp3 master br1
bridge vlan add dev $swp3 vid 555
@@ -68,6 +111,8 @@ cleanup()
ip link set dev $swp2 nomaster
ip link set dev $swp3 nomaster
+
+ h3_addr_add_del del $h3.555
vlan_destroy $h3 555
vlan_destroy br1 555
@@ -182,13 +227,19 @@ test_span_gre_untagged_egress()
quick_test_span_gre_dir $tundev ingress
quick_test_span_vlan_dir $h3 555 ingress
+ h3_addr_add_del del $h3.555
bridge vlan add dev $swp3 vid 555 pvid untagged
- sleep 1
+ h3_addr_add_del add $h3
+ sleep 5
+
quick_test_span_gre_dir $tundev ingress
fail_test_span_vlan_dir $h3 555 ingress
+ h3_addr_add_del del $h3
bridge vlan add dev $swp3 vid 555
- sleep 1
+ h3_addr_add_del add $h3.555
+ sleep 5
+
quick_test_span_gre_dir $tundev ingress
quick_test_span_vlan_dir $h3 555 ingress
@@ -218,12 +269,25 @@ test_span_gre_fdb_roaming()
mirror_install $swp1 ingress $tundev "matchall $tcflags"
quick_test_span_gre_dir $tundev ingress
- bridge fdb del dev $swp3 $h3mac vlan 555 master
- bridge fdb add dev $swp2 $h3mac vlan 555 master
- sleep 1
- fail_test_span_gre_dir $tundev ingress
-
- bridge fdb del dev $swp2 $h3mac vlan 555 master
+ while ((RET == 0)); do
+ bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
+ bridge fdb add dev $swp2 $h3mac vlan 555 master
+ sleep 1
+ fail_test_span_gre_dir $tundev ingress
+
+ if ! bridge fdb sh dev $swp2 vlan 555 master \
+ | grep -q $h3mac; then
+ printf "TEST: %-60s [RETRY]\n" \
+ "$what: MAC roaming ($tcflags)"
+ # ARP or ND probably reprimed the FDB while the test
+ # was running. We would get a spurious failure.
+ RET=0
+ continue
+ fi
+ break
+ done
+
+ bridge fdb del dev $swp2 $h3mac vlan 555 master 2>/dev/null
# Re-prime FDB
$ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index 07991e1025c7..00797597fcf5 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -29,9 +29,12 @@ mirror_test()
local pref=$1; shift
local expect=$1; shift
+ local ping_timeout=$((PING_TIMEOUT * 5))
local t0=$(tc_rule_stats_get $dev $pref)
ip vrf exec $vrf_name \
- ${PING} ${sip:+-I $sip} $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ${PING} ${sip:+-I $sip} $dip -c 10 -i 0.5 -w $ping_timeout \
+ &> /dev/null
+ sleep 0.5
local t1=$(tc_rule_stats_get $dev $pref)
local delta=$((t1 - t0))
# Tolerate a couple stray extra packets.
diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
index 7bd2ebb6e9de..9a678ece32b4 100755
--- a/tools/testing/selftests/net/forwarding/router_broadcast.sh
+++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
@@ -170,7 +170,8 @@ ping_test_from()
log_info "ping $dip, expected reply from $from"
ip vrf exec $(master_name_get $oif) \
- $PING -I $oif $dip -c 10 -i 0.1 -w 2 -b 2>&1 | grep $from &> /dev/null
+ $PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \
+ | grep $from &> /dev/null
check_err_fail $fail $?
}
diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
new file mode 100755
index 000000000000..a0b5f57d6bd3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
@@ -0,0 +1,567 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.11/24 10.1.2.11/24 | | |
+# | | | | | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.12/24 10.1.2.12/24 | | |
+# | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ arp_decap
+ arp_suppression
+"
+NUM_NETIFS=6
+source lib.sh
+
+require_command $ARPING
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+macs_populate()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f macs_populate
+
+macs_initialize()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ macs_populate $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 macs_populate $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ macs_initialize
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+arp_decap()
+{
+ # Repeat the ping tests, but without populating the neighbours. This
+ # makes sure we correctly decapsulate ARP packets
+ log_info "deleting neighbours from vlan interfaces"
+
+ ip neigh del 10.1.1.102 dev vlan10
+ ip neigh del 10.1.2.102 dev vlan20
+
+ ping_ipv4
+
+ ip neigh replace 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+ ip neigh replace 10.1.2.102 lladdr $(in_ns ns1 mac_get w4) nud noarp \
+ dev vlan20 extern_learn
+}
+
+arp_suppression_compare()
+{
+ local expect=$1; shift
+ local actual=$(in_ns ns1 tc_rule_stats_get vx10 1 ingress)
+
+ (( expect == actual ))
+ check_err $? "expected $expect arps got $actual"
+}
+
+arp_suppression()
+{
+ ip link set dev vx10 type bridge_slave neigh_suppress on
+
+ in_ns ns1 tc qdisc add dev vx10 clsact
+ in_ns ns1 tc filter add dev vx10 ingress proto arp pref 1 handle 101 \
+ flower dst_mac ff:ff:ff:ff:ff:ff arp_tip 10.1.1.102 arp_op \
+ request action pass
+
+ # The neighbour is configured on the SVI and ARP suppression is on, so
+ # the ARP request should be suppressed
+ RET=0
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 0
+
+ log_test "neigh_suppress: on / neigh exists: yes"
+
+ # Delete the neighbour from the the SVI. A single ARP request should be
+ # received by the remote VTEP
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 1
+
+ log_test "neigh_suppress: on / neigh exists: no"
+
+ # Turn off ARP suppression and make sure ARP is not suppressed,
+ # regardless of neighbour existence on the SVI
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10 &> /dev/null
+ ip link set dev vx10 type bridge_slave neigh_suppress off
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 2
+
+ log_test "neigh_suppress: off / neigh exists: no"
+
+ RET=0
+
+ ip neigh add 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 3
+
+ log_test "neigh_suppress: off / neigh exists: yes"
+
+ in_ns ns1 tc qdisc del dev vx10 clsact
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index 56cef3b1c194..bb10e33690b2 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -629,7 +629,7 @@ __test_ecn_decap()
RET=0
tc filter add dev $h1 ingress pref 77 prot ip \
- flower ip_tos $decapped_tos action pass
+ flower ip_tos $decapped_tos action drop
sleep 1
vxlan_encapped_ping_test v2 v1 192.0.2.17 \
$orig_inner_tos $orig_outer_tos \
diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
new file mode 100755
index 000000000000..1209031bc794
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
@@ -0,0 +1,551 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.1 | |
+# | | remote 10.0.0.2 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | | |
+# | | + vlan10 | vlan20 + | |
+# | | | 10.1.1.11/24 | 10.1.2.11/24 | | |
+# | | | | | | |
+# | | + vlan10-v (macvlan) + vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 vlan4001 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | vlan4001 | | |
+# | | + vlan10 + vlan20 + | |
+# | | | 10.1.1.12/24 | 10.1.2.12/24 | | |
+# | | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.2 | |
+# | | remote 10.0.0.1 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+"
+NUM_NETIFS=6
+source lib.sh
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 4001 dev br1 self
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan4001
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 4001 dev vx4001
+ ip link set dev vx4001 nomaster
+
+ ip link set dev vx4001 down
+ ip link del dev vx4001
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+__l2_vni_init()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f __l2_vni_init
+
+l2_vni_init()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ __l2_vni_init $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 __l2_vni_init $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+__l3_vni_init()
+{
+ local mac=$1; shift
+ local vtep_ip=$1; shift
+ local host1_ip=$1; shift
+ local host2_ip=$1; shift
+
+ bridge fdb add $mac dev vx4001 self master extern_learn static \
+ dst $vtep_ip vlan 4001
+
+ ip neigh add $vtep_ip lladdr $mac nud noarp dev vlan4001 extern_learn
+
+ ip route add $host1_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+ ip route add $host2_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+}
+export -f __l3_vni_init
+
+l3_vni_init()
+{
+ local vlan4001_ns_mac=$(in_ns ns1 mac_get vlan4001)
+ local vlan4001_mac=$(mac_get vlan4001)
+
+ __l3_vni_init $vlan4001_ns_mac 10.0.0.2 10.1.1.102 10.1.2.102
+ in_ns ns1 __l3_vni_init $vlan4001_mac 10.0.0.1 10.1.1.101 10.1.2.101
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ l2_vni_init
+ l3_vni_init
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
index 61ae2782388e..c0c9ecb891e1 100644
--- a/tools/testing/selftests/net/ip_defrag.c
+++ b/tools/testing/selftests/net/ip_defrag.c
@@ -20,6 +20,7 @@ static bool cfg_do_ipv4;
static bool cfg_do_ipv6;
static bool cfg_verbose;
static bool cfg_overlap;
+static bool cfg_permissive;
static unsigned short cfg_port = 9000;
const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) };
@@ -35,7 +36,7 @@ const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT;
static int payload_len;
static int max_frag_len;
-#define MSG_LEN_MAX 60000 /* Max UDP payload length. */
+#define MSG_LEN_MAX 10000 /* Max UDP payload length. */
#define IP4_MF (1u << 13) /* IPv4 MF flag. */
#define IP6_MF (1) /* IPv6 MF flag. */
@@ -59,13 +60,14 @@ static void recv_validate_udp(int fd_udp)
msg_counter++;
if (cfg_overlap) {
- if (ret != -1)
- error(1, 0, "recv: expected timeout; got %d",
- (int)ret);
- if (errno != ETIMEDOUT && errno != EAGAIN)
- error(1, errno, "recv: expected timeout: %d",
- errno);
- return; /* OK */
+ if (ret == -1 && (errno == ETIMEDOUT || errno == EAGAIN))
+ return; /* OK */
+ if (!cfg_permissive) {
+ if (ret != -1)
+ error(1, 0, "recv: expected timeout; got %d",
+ (int)ret);
+ error(1, errno, "recv: expected timeout: %d", errno);
+ }
}
if (ret == -1)
@@ -239,33 +241,80 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
iphdr->ip_sum = 0;
}
+ /* Occasionally test in-order fragments. */
+ if (!cfg_overlap && (rand() % 100 < 15)) {
+ offset = 0;
+ while (offset < (UDP_HLEN + payload_len)) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ return;
+ }
+
+ /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
+ if (!cfg_overlap && (rand() % 100 < 20) &&
+ (payload_len > 9 * max_frag_len)) {
+ offset = 6 * max_frag_len;
+ while (offset < (UDP_HLEN + payload_len)) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ offset = 3 * max_frag_len;
+ while (offset < 6 * max_frag_len) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ offset = 0;
+ while (offset < 3 * max_frag_len) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += max_frag_len;
+ }
+ return;
+ }
+
/* Odd fragments. */
offset = max_frag_len;
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
+ /* IPv4 ignores duplicates, so randomly send a duplicate. */
+ if (rand() % 100 == 1)
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
if (cfg_overlap) {
- /* Send an extra random fragment. */
- offset = rand() % (UDP_HLEN + payload_len - 1);
- /* sendto() returns EINVAL if offset + frag_len is too small. */
+ /* Send an extra random fragment.
+ *
+ * Duplicates and some fragments completely inside
+ * previously sent fragments are dropped/ignored. So
+ * random offset and frag_len can result in a dropped
+ * fragment instead of a dropped queue/packet. Thus we
+ * hard-code offset and frag_len.
+ */
+ if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
+ /* not enough payload for random offset and frag_len. */
+ offset = 8;
+ frag_len = UDP_HLEN + max_frag_len;
+ } else {
+ offset = rand() % (payload_len / 2);
+ frag_len = 2 * max_frag_len + 1 + rand() % 256;
+ }
if (ipv6) {
struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
- frag_len = max_frag_len + rand() % 256;
+ /* sendto() returns EINVAL if offset + frag_len is too small. */
/* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
frag_len &= ~0x7;
fraghdr->ip6f_offlg = htons(offset / 8 | IP6_MF);
ip6hdr->ip6_plen = htons(frag_len);
frag_len += IP6_HLEN;
} else {
- frag_len = IP4_HLEN + UDP_HLEN + rand() % 256;
+ frag_len += IP4_HLEN;
iphdr->ip_off = htons(offset / 8 | IP4_MF);
iphdr->ip_len = htons(frag_len);
}
res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
if (res < 0)
- error(1, errno, "sendto overlap");
+ error(1, errno, "sendto overlap: %d", frag_len);
if (res != frag_len)
error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
frag_counter++;
@@ -275,6 +324,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
offset = 0;
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
+ /* IPv4 ignores duplicates, so randomly send a duplicate. */
+ if (rand() % 100 == 1)
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
}
@@ -282,9 +334,13 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
{
int fd_tx_raw, fd_rx_udp;
- struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 };
+ /* Frag queue timeout is set to one second in the calling script;
+ * socket timeout should be just a bit longer to avoid tests interfering
+ * with each other.
+ */
+ struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
int idx;
- int min_frag_len = ipv6 ? 1280 : 8;
+ int min_frag_len = 8;
/* Initialize the payload. */
for (idx = 0; idx < MSG_LEN_MAX; ++idx)
@@ -308,12 +364,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
payload_len += (rand() % 4096)) {
if (cfg_verbose)
printf("payload_len: %d\n", payload_len);
- max_frag_len = min_frag_len;
- do {
+
+ if (cfg_overlap) {
+ /* With overlaps, one send/receive pair below takes
+ * at least one second (== timeout) to run, so there
+ * is not enough test time to run a nested loop:
+ * the full overlap test takes 20-30 seconds.
+ */
+ max_frag_len = min_frag_len +
+ rand() % (1500 - FRAG_HLEN - min_frag_len);
send_udp_frags(fd_tx_raw, addr, alen, ipv6);
recv_validate_udp(fd_rx_udp);
- max_frag_len += 8 * (rand() % 8);
- } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len);
+ } else {
+ /* Without overlaps, each packet reassembly (== one
+ * send/receive pair below) takes very little time to
+ * run, so we can easily afford more thourough testing
+ * with a nested loop: the full non-overlap test takes
+ * less than one second).
+ */
+ max_frag_len = min_frag_len;
+ do {
+ send_udp_frags(fd_tx_raw, addr, alen, ipv6);
+ recv_validate_udp(fd_rx_udp);
+ max_frag_len += 8 * (rand() % 8);
+ } while (max_frag_len < (1500 - FRAG_HLEN) &&
+ max_frag_len <= payload_len);
+ }
}
/* Cleanup. */
@@ -356,7 +432,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "46ov")) != -1) {
+ while ((c = getopt(argc, argv, "46opv")) != -1) {
switch (c) {
case '4':
cfg_do_ipv4 = true;
@@ -367,6 +443,9 @@ static void parse_opts(int argc, char **argv)
case 'o':
cfg_overlap = true;
break;
+ case 'p':
+ cfg_permissive = true;
+ break;
case 'v':
cfg_verbose = true;
break;
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
index f34672796044..15d3489ecd9c 100755
--- a/tools/testing/selftests/net/ip_defrag.sh
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -11,10 +11,21 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)"
setup() {
ip netns add "${NETNS}"
ip -netns "${NETNS}" link set lo up
+
ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1
+
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
+
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_timeout=1 >/dev/null 2>&1
+
+ # DST cache can get full with a lot of frags, with GC not keeping up with the test.
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
}
cleanup() {
@@ -27,7 +38,6 @@ setup
echo "ipv4 defrag"
ip netns exec "${NETNS}" ./ip_defrag -4
-
echo "ipv4 defrag with overlaps"
ip netns exec "${NETNS}" ./ip_defrag -4o
@@ -37,3 +47,16 @@ ip netns exec "${NETNS}" ./ip_defrag -6
echo "ipv6 defrag with overlaps"
ip netns exec "${NETNS}" ./ip_defrag -6o
+# insert an nf_conntrack rule so that the codepath in nf_conntrack_reasm.c taken
+ip netns exec "${NETNS}" ip6tables -A INPUT -m conntrack --ctstate INVALID -j ACCEPT
+
+echo "ipv6 nf_conntrack defrag"
+ip netns exec "${NETNS}" ./ip_defrag -6
+
+echo "ipv6 nf_conntrack defrag with overlaps"
+# netfilter will drop some invalid packets, so we run the test in
+# permissive mode: i.e. pass the test if the packet is correctly assembled
+# even if we sent an overlap
+ip netns exec "${NETNS}" ./ip_defrag -6op
+
+echo "all tests done"
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index e2c94e47707c..912b2dc50be3 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -103,6 +103,15 @@
# and check that configured MTU is used on link creation and changes, and
# that MTU is properly calculated instead when MTU is not configured from
# userspace
+#
+# - cleanup_ipv4_exception
+# Similar to pmtu_ipv4_vxlan4_exception, but explicitly generate PMTU
+# exceptions on multiple CPUs and check that the veth device tear-down
+# happens in a timely manner
+#
+# - cleanup_ipv6_exception
+# Same as above, but use IPv6 transport from A to B
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -135,7 +144,9 @@ tests="
pmtu_vti6_default_mtu vti6: default MTU assignment
pmtu_vti4_link_add_mtu vti4: MTU setting on link creation
pmtu_vti6_link_add_mtu vti6: MTU setting on link creation
- pmtu_vti6_link_change_mtu vti6: MTU changes on link changes"
+ pmtu_vti6_link_change_mtu vti6: MTU changes on link changes
+ cleanup_ipv4_exception ipv4: cleanup of cached exceptions
+ cleanup_ipv6_exception ipv6: cleanup of cached exceptions"
NS_A="ns-$(mktemp -u XXXXXX)"
NS_B="ns-$(mktemp -u XXXXXX)"
@@ -263,8 +274,6 @@ setup_fou_or_gue() {
${ns_a} ip link set ${encap}_a up
${ns_b} ip link set ${encap}_b up
-
- sleep 1
}
setup_fou44() {
@@ -302,6 +311,10 @@ setup_gue66() {
setup_namespaces() {
for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
ip netns add ${n} || return 1
+
+ # Disable DAD, so that we don't have to wait to use the
+ # configured IPv6 addresses
+ ip netns exec ${n} sysctl -q net/ipv6/conf/default/accept_dad=0
done
}
@@ -337,8 +350,6 @@ setup_vti() {
${ns_a} ip link set vti${proto}_a up
${ns_b} ip link set vti${proto}_b up
-
- sleep 1
}
setup_vti4() {
@@ -375,8 +386,6 @@ setup_vxlan_or_geneve() {
${ns_a} ip link set ${type}_a up
${ns_b} ip link set ${type}_b up
-
- sleep 1
}
setup_geneve4() {
@@ -588,8 +597,8 @@ test_pmtu_ipvX() {
mtu "${ns_b}" veth_B-R2 1500
# Create route exceptions
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} > /dev/null
# Check that exceptions have been created with the correct PMTU
pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
@@ -621,7 +630,7 @@ test_pmtu_ipvX() {
# Decrease remote MTU on path via R2, get new exception
mtu "${ns_r2}" veth_R2-B 400
mtu "${ns_b}" veth_B-R2 400
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
@@ -638,7 +647,7 @@ test_pmtu_ipvX() {
check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
# Get new exception
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
}
@@ -687,7 +696,7 @@ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -767,7 +776,7 @@ test_pmtu_ipvX_over_fouY_or_gueY() {
mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -825,13 +834,13 @@ test_pmtu_vti4_exception() {
# Send DF packet without exceeding link layer MTU, check that no
# exception is created
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+ ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
# Now exceed link layer MTU by one byte, check that exception is created
# with the right PMTU value
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+ ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
}
@@ -847,7 +856,7 @@ test_pmtu_vti6_exception() {
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
- ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${tunnel6_b_addr} > /dev/null
+ ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
@@ -1008,6 +1017,61 @@ test_pmtu_vti6_link_change_mtu() {
return ${fail}
}
+check_command() {
+ cmd=${1}
+
+ if ! which ${cmd} > /dev/null 2>&1; then
+ err " missing required command: '${cmd}'"
+ return 1
+ fi
+ return 0
+}
+
+test_cleanup_vxlanX_exception() {
+ outer="${1}"
+ encap="vxlan"
+ ll_mtu=4000
+
+ check_command taskset || return 2
+ cpu_list=$(grep -m 2 processor /proc/cpuinfo | cut -d ' ' -f 2)
+
+ setup namespaces routing ${encap}${outer} || return 2
+ trace "${ns_a}" ${encap}_a "${ns_b}" ${encap}_b \
+ "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B
+
+ # Create route exception by exceeding link layer MTU
+ mtu "${ns_a}" veth_A-R1 $((${ll_mtu} + 1000))
+ mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+ mtu "${ns_b}" veth_B-R1 ${ll_mtu}
+ mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+ mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
+ mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
+
+ # Fill exception cache for multiple CPUs (2)
+ # we can always use inner IPv4 for that
+ for cpu in ${cpu_list}; do
+ taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr} > /dev/null
+ done
+
+ ${ns_a} ip link del dev veth_A-R1 &
+ iplink_pid=$!
+ sleep 1
+ if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
+ err " can't delete veth device in a timely manner, PMTU dst likely leaked"
+ return 1
+ fi
+}
+
+test_cleanup_ipv6_exception() {
+ test_cleanup_vxlanX_exception 6
+}
+
+test_cleanup_ipv4_exception() {
+ test_cleanup_vxlanX_exception 4
+}
+
usage() {
echo
echo "$0 [OPTIONS] [TEST]..."
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 78fc593dfe40..b447803f3f8a 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -391,7 +391,7 @@ kci_test_encap_vxlan()
vlan="test-vlan0"
testns="$1"
- ip netns exec "$testns" ip link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
+ ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
dev "$devdummy" dstport 4789 2>/dev/null
if [ $? -ne 0 ]; then
echo "FAIL: can't add vxlan interface, skipping test"
@@ -399,16 +399,68 @@ kci_test_encap_vxlan()
fi
check_err $?
- ip netns exec "$testns" ip addr add 10.2.11.49/24 dev "$vxlan"
+ ip -netns "$testns" addr add 10.2.11.49/24 dev "$vxlan"
check_err $?
- ip netns exec "$testns" ip link set up dev "$vxlan"
+ ip -netns "$testns" link set up dev "$vxlan"
check_err $?
- ip netns exec "$testns" ip link add link "$vxlan" name "$vlan" type vlan id 1
+ ip -netns "$testns" link add link "$vxlan" name "$vlan" type vlan id 1
check_err $?
- ip netns exec "$testns" ip link del "$vxlan"
+ # changelink testcases
+ ip -netns "$testns" link set dev "$vxlan" type vxlan vni 43 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan group ffe5::5 dev "$devdummy" 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan ttl inherit 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan ttl 64
+ check_err $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan nolearning
+ check_err $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan proxy 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan norsc 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan l2miss 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan l3miss 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan external 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udpcsum 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumtx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumrx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumtx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumrx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan gbp 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan gpe 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link del "$vxlan"
check_err $?
if [ $ret -ne 0 ]; then
@@ -430,19 +482,19 @@ kci_test_encap_fou()
return $ksft_skip
fi
- ip netns exec "$testns" ip fou add port 7777 ipproto 47 2>/dev/null
+ ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
if [ $? -ne 0 ];then
echo "FAIL: can't add fou port 7777, skipping test"
return 1
fi
- ip netns exec "$testns" ip fou add port 8888 ipproto 4
+ ip -netns "$testns" fou add port 8888 ipproto 4
check_err $?
- ip netns exec "$testns" ip fou del port 9999 2>/dev/null
+ ip -netns "$testns" fou del port 9999 2>/dev/null
check_fail $?
- ip netns exec "$testns" ip fou del port 7777
+ ip -netns "$testns" fou del port 7777
check_err $?
if [ $ret -ne 0 ]; then
@@ -465,12 +517,12 @@ kci_test_encap()
return $ksft_skip
fi
- ip netns exec "$testns" ip link set lo up
+ ip -netns "$testns" link set lo up
check_err $?
- ip netns exec "$testns" ip link add name "$devdummy" type dummy
+ ip -netns "$testns" link add name "$devdummy" type dummy
check_err $?
- ip netns exec "$testns" ip link set "$devdummy" up
+ ip -netns "$testns" link set "$devdummy" up
check_err $?
kci_test_encap_vxlan "$testns"
@@ -759,24 +811,24 @@ kci_test_gretap()
fi
# test native tunnel
- ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type gretap seq \
key 102 local 172.16.1.100 remote 172.16.1.200
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+ ip -netns "$testns" link add dev "$DEV_NS" type gretap external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -809,24 +861,24 @@ kci_test_ip6gretap()
fi
# test native tunnel
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap seq \
key 102 local fc00:100::1 remote fc00:100::2
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+ ip -netns "$testns" addr add dev "$DEV_NS" fc00:200::1/96
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -858,40 +910,40 @@ kci_test_erspan()
fi
# test native tunnel erspan v1
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 1 erspan 488
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel erspan v2
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -923,41 +975,41 @@ kci_test_ip6erspan()
fi
# test native tunnel ip6erspan v1
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 1 erspan 488
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel ip6erspan v2
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" \
+ ip -netns "$testns" link add dev "$DEV_NS" \
type ip6erspan external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index fac68d710f35..47ddfc154036 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -42,7 +42,7 @@ FIXTURE_SETUP(tls)
len = sizeof(addr);
memset(&tls12, 0, sizeof(tls12));
- tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.version = TLS_1_3_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
addr.sin_family = AF_INET;
@@ -452,10 +452,12 @@ TEST_F(tls, recv_partial)
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
@@ -565,10 +567,10 @@ TEST_F(tls, recv_peek_large_buf_mult_recs)
len = strlen(test_str_second) + 1;
EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
- len = sizeof(buf);
+ len = strlen(test_str) + 1;
memset(buf, 0, len);
- EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
-
+ EXPECT_NE((len = recv(self->cfd, buf, len,
+ MSG_PEEK | MSG_WAITALL)), -1);
len = strlen(test_str) + 1;
EXPECT_EQ(memcmp(test_str, buf, len), 0);
}
@@ -751,6 +753,20 @@ TEST_F(tls, control_msg)
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
vec.iov_base = buf;
+ EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ EXPECT_NE(cmsg, NULL);
+ EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+ EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+ record_type = *((unsigned char *)CMSG_DATA(cmsg));
+ EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ /* Recv the message again without MSG_PEEK */
+ record_type = 0;
+ memset(buf, 0, sizeof(buf));
+
EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
cmsg = CMSG_FIRSTHDR(&msg);
EXPECT_NE(cmsg, NULL);
@@ -761,4 +777,140 @@ TEST_F(tls, control_msg)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST(keysizes) {
+ struct tls12_crypto_info_aes_gcm_256 tls12;
+ struct sockaddr_in addr;
+ int sfd, ret, fd, cfd;
+ socklen_t len;
+ bool notls;
+
+ notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(cfd, 0);
+
+ if (!notls) {
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ EXPECT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ close(sfd);
+ close(fd);
+ close(cfd);
+}
+
+TEST(tls12) {
+ int fd, cfd;
+ bool notls;
+
+ struct tls12_crypto_info_aes_gcm_128 tls12;
+ struct sockaddr_in addr;
+ socklen_t len;
+ int sfd, ret;
+
+ notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(cfd, 0);
+
+ if (!notls) {
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ close(sfd);
+
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ send_len = strlen(test_str) + 1;
+ EXPECT_EQ(send(fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ close(fd);
+ close(cfd);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index aeac53a99aeb..ac2a30be9b32 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -37,7 +37,7 @@ run_one() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \
echo "ok" || \
echo "failed" &
@@ -81,7 +81,7 @@ run_one_nat() {
# will land on the 'plain' one
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
pid=$!
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${family} -b ${addr2%/*} ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \
echo "ok" || \
echo "failed"&
@@ -99,8 +99,8 @@ run_one_2sock() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -p 12345 &
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \
echo "ok" || \
echo "failed" &
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index e279051bc631..b8265ee9923f 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -17,7 +17,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 0c960f673324..db3d4a8b5a4c 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -45,6 +45,8 @@ static int cfg_alen = sizeof(struct sockaddr_in6);
static int cfg_expected_pkt_nr;
static int cfg_expected_pkt_len;
static int cfg_expected_gso_size;
+static int cfg_connect_timeout_ms;
+static int cfg_rcv_timeout_ms;
static struct sockaddr_storage cfg_bind_addr;
static bool interrupted;
@@ -87,7 +89,7 @@ static unsigned long gettimeofday_ms(void)
return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
}
-static void do_poll(int fd)
+static void do_poll(int fd, int timeout_ms)
{
struct pollfd pfd;
int ret;
@@ -102,8 +104,16 @@ static void do_poll(int fd)
break;
if (ret == -1)
error(1, errno, "poll");
- if (ret == 0)
- continue;
+ if (ret == 0) {
+ if (!timeout_ms)
+ continue;
+
+ timeout_ms -= 10;
+ if (timeout_ms <= 0) {
+ interrupted = true;
+ break;
+ }
+ }
if (pfd.revents != POLLIN)
error(1, errno, "poll: 0x%x expected 0x%x\n",
pfd.revents, POLLIN);
@@ -134,7 +144,7 @@ static int do_socket(bool do_tcp)
if (listen(accept_fd, 1))
error(1, errno, "listen");
- do_poll(accept_fd);
+ do_poll(accept_fd, cfg_connect_timeout_ms);
if (interrupted)
exit(0);
@@ -273,7 +283,9 @@ static void do_flush_udp(int fd)
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-Grtv] [-b addr] [-p port] [-l pktlen] [-n packetnr] [-S gsosize]", filepath);
+ error(1, 0, "Usage: %s [-C connect_timeout] [-Grtv] [-b addr] [-p port]"
+ " [-l pktlen] [-n packetnr] [-R rcv_timeout] [-S gsosize]",
+ filepath);
}
static void parse_opts(int argc, char **argv)
@@ -282,7 +294,7 @@ static void parse_opts(int argc, char **argv)
/* bind to any by default */
setup_sockaddr(PF_INET6, "::", &cfg_bind_addr);
- while ((c = getopt(argc, argv, "4b:Gl:n:p:rS:tv")) != -1) {
+ while ((c = getopt(argc, argv, "4b:C:Gl:n:p:rR:S:tv")) != -1) {
switch (c) {
case '4':
cfg_family = PF_INET;
@@ -292,6 +304,9 @@ static void parse_opts(int argc, char **argv)
case 'b':
setup_sockaddr(cfg_family, optarg, &cfg_bind_addr);
break;
+ case 'C':
+ cfg_connect_timeout_ms = strtoul(optarg, NULL, 0);
+ break;
case 'G':
cfg_gro_segment = true;
break;
@@ -307,6 +322,9 @@ static void parse_opts(int argc, char **argv)
case 'r':
cfg_read_all = true;
break;
+ case 'R':
+ cfg_rcv_timeout_ms = strtoul(optarg, NULL, 0);
+ break;
case 'S':
cfg_expected_gso_size = strtol(optarg, NULL, 0);
break;
@@ -329,8 +347,9 @@ static void parse_opts(int argc, char **argv)
static void do_recv(void)
{
+ int timeout_ms = cfg_tcp ? cfg_rcv_timeout_ms : cfg_connect_timeout_ms;
unsigned long tnow, treport;
- int fd, loop = 0;
+ int fd;
fd = do_socket(cfg_tcp);
@@ -342,12 +361,7 @@ static void do_recv(void)
treport = gettimeofday_ms() + 1000;
do {
- /* force termination after the second poll(); this cope both
- * with sender slower than receiver and missing packet errors
- */
- if (cfg_expected_pkt_nr && loop++)
- interrupted = true;
- do_poll(fd);
+ do_poll(fd, timeout_ms);
if (cfg_tcp)
do_flush_tcp(fd);
@@ -365,6 +379,8 @@ static void do_recv(void)
treport = tnow + 1000;
}
+ timeout_ms = cfg_rcv_timeout_ms;
+
} while (!interrupted);
if (cfg_expected_pkt_nr && (packets != cfg_expected_pkt_nr))
diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh
index 8db35b99457c..71d7fdc513c1 100755
--- a/tools/testing/selftests/net/xfrm_policy.sh
+++ b/tools/testing/selftests/net/xfrm_policy.sh
@@ -28,6 +28,19 @@ KEY_AES=0x0123456789abcdef0123456789012345
SPI1=0x1
SPI2=0x2
+do_esp_policy() {
+ local ns=$1
+ local me=$2
+ local remote=$3
+ local lnet=$4
+ local rnet=$5
+
+ # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
+ ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow
+ # to fwd decrypted packets after esp processing:
+ ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow
+}
+
do_esp() {
local ns=$1
local me=$2
@@ -40,10 +53,59 @@ do_esp() {
ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
- # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
- ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow
- # to fwd decrypted packets after esp processing:
- ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow
+ do_esp_policy $ns $me $remote $lnet $rnet
+}
+
+# add policies with different netmasks, to make sure kernel carries
+# the policies contained within new netmask over when search tree is
+# re-built.
+# peer netns that are supposed to be encapsulated via esp have addresses
+# in the 10.0.1.0/24 and 10.0.2.0/24 subnets, respectively.
+#
+# Adding a policy for '10.0.1.0/23' will make it necessary to
+# alter the prefix of 10.0.1.0 subnet.
+# In case new prefix overlaps with existing node, the node and all
+# policies it carries need to be merged with the existing one(s).
+#
+# Do that here.
+do_overlap()
+{
+ local ns=$1
+
+ # adds new nodes to tree (neither network exists yet in policy database).
+ ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
+
+ # adds a new node in the 10.0.0.0/24 tree (dst node exists).
+ ip -net $ns xfrm policy add src 10.2.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
+
+ # adds a 10.2.0.0/23 node, but for different dst.
+ ip -net $ns xfrm policy add src 10.2.0.0/23 dst 10.0.1.0/24 dir fwd priority 200 action block
+
+ # dst now overlaps with the 10.0.1.0/24 ESP policy in fwd.
+ # kernel must 'promote' existing one (10.0.0.0/24) to 10.0.0.0/23.
+ # But 10.0.0.0/23 also includes existing 10.0.1.0/24, so that node
+ # also has to be merged too, including source-sorted subtrees.
+ # old:
+ # 10.0.0.0/24 (node 1 in dst tree of the bin)
+ # 10.1.0.0/24 (node in src tree of dst node 1)
+ # 10.2.0.0/24 (node in src tree of dst node 1)
+ # 10.0.1.0/24 (node 2 in dst tree of the bin)
+ # 10.0.2.0/24 (node in src tree of dst node 2)
+ # 10.2.0.0/24 (node in src tree of dst node 2)
+ #
+ # The next 'policy add' adds dst '10.0.0.0/23', which means
+ # that dst node 1 and dst node 2 have to be merged including
+ # the sub-tree. As no duplicates are allowed, policies in
+ # the two '10.0.2.0/24' are also merged.
+ #
+ # after the 'add', internal search tree should look like this:
+ # 10.0.0.0/23 (node in dst tree of bin)
+ # 10.0.2.0/24 (node in src tree of dst node)
+ # 10.1.0.0/24 (node in src tree of dst node)
+ # 10.2.0.0/24 (node in src tree of dst node)
+ #
+ # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
+ ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
}
do_esp_policy_get_check() {
@@ -160,6 +222,41 @@ check_xfrm() {
return $lret
}
+check_exceptions()
+{
+ logpostfix="$1"
+ local lret=0
+
+ # ping to .254 should be excluded from the tunnel (exception is in place).
+ check_xfrm 0 254
+ if [ $? -ne 0 ]; then
+ echo "FAIL: expected ping to .254 to fail ($logpostfix)"
+ lret=1
+ else
+ echo "PASS: ping to .254 bypassed ipsec tunnel ($logpostfix)"
+ fi
+
+ # ping to .253 should use use ipsec due to direct policy exception.
+ check_xfrm 1 253
+ if [ $? -ne 0 ]; then
+ echo "FAIL: expected ping to .253 to use ipsec tunnel ($logpostfix)"
+ lret=1
+ else
+ echo "PASS: direct policy matches ($logpostfix)"
+ fi
+
+ # ping to .2 should use ipsec.
+ check_xfrm 1 2
+ if [ $? -ne 0 ]; then
+ echo "FAIL: expected ping to .2 to use ipsec tunnel ($logpostfix)"
+ lret=1
+ else
+ echo "PASS: policy matches ($logpostfix)"
+ fi
+
+ return $lret
+}
+
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
@@ -270,33 +367,45 @@ do_exception ns4 10.0.3.10 10.0.3.1 10.0.1.253 10.0.1.240/28
do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96
do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96
-# ping to .254 should now be excluded from the tunnel
-check_xfrm 0 254
+check_exceptions "exceptions"
if [ $? -ne 0 ]; then
- echo "FAIL: expected ping to .254 to fail"
ret=1
-else
- echo "PASS: ping to .254 bypassed ipsec tunnel"
fi
-# ping to .253 should use use ipsec due to direct policy exception.
-check_xfrm 1 253
-if [ $? -ne 0 ]; then
- echo "FAIL: expected ping to .253 to use ipsec tunnel"
- ret=1
-else
- echo "PASS: direct policy matches"
-fi
+# insert block policies with adjacent/overlapping netmasks
+do_overlap ns3
-# ping to .2 should use ipsec.
-check_xfrm 1 2
+check_exceptions "exceptions and block policies"
if [ $? -ne 0 ]; then
- echo "FAIL: expected ping to .2 to use ipsec tunnel"
ret=1
-else
- echo "PASS: policy matches"
fi
+for n in ns3 ns4;do
+ ip -net $n xfrm policy set hthresh4 28 24 hthresh6 126 125
+ sleep $((RANDOM%5))
+done
+
+check_exceptions "exceptions and block policies after hresh changes"
+
+# full flush of policy db, check everything gets freed incl. internal meta data
+ip -net ns3 xfrm policy flush
+
+do_esp_policy ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24
+do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28
+
+# move inexact policies to hash table
+ip -net ns3 xfrm policy set hthresh4 16 16
+
+sleep $((RANDOM%5))
+check_exceptions "exceptions and block policies after hthresh change in ns3"
+
+# restore original hthresh settings -- move policies back to tables
+for n in ns3 ns4;do
+ ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128
+ sleep $((RANDOM%5))
+done
+check_exceptions "exceptions and block policies after hresh change to normal"
+
for i in 1 2 3 4;do ip netns del ns$i;done
exit $ret
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 47ed6cef93fb..c9ff2b47bd1c 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
index 1017313e41a8..59caa8f71cd8 100644
--- a/tools/testing/selftests/netfilter/config
+++ b/tools/testing/selftests/netfilter/config
@@ -1,2 +1,2 @@
CONFIG_NET_NS=y
-NF_TABLES_INET=y
+CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755
index 000000000000..8ec76681605c
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -0,0 +1,762 @@
+#!/bin/bash
+#
+# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 addr add 10.0.1.1/24 dev veth0
+ip -net ns0 addr add dead:1::1/64 dev veth0
+
+ip -net ns0 link set veth1 up
+ip -net ns0 addr add 10.0.2.1/24 dev veth1
+ip -net ns0 addr add dead:2::1/64 dev veth1
+
+for i in 1 2; do
+ ip -net ns$i link set lo up
+ ip -net ns$i link set eth0 up
+ ip -net ns$i addr add 10.0.$i.99/24 dev eth0
+ ip -net ns$i route add default via 10.0.$i.1
+ ip -net ns$i addr add dead:$i::99/64 dev eth0
+ ip -net ns$i route add default via dead:$i::1
+done
+
+bad_counter()
+{
+ local ns=$1
+ local counter=$2
+ local expect=$3
+
+ echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns nft list counter inet filter $counter 1>&2
+}
+
+check_counters()
+{
+ ns=$1
+ local lret=0
+
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0in "packets 1 bytes 84"
+ lret=1
+ fi
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0out "packets 1 bytes 84"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0in6 "$expect"
+ lret=1
+ fi
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0out6 "$expect"
+ lret=1
+ fi
+
+ return $lret
+}
+
+check_ns0_counters()
+{
+ local ns=$1
+ local lret=0
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0in "packets 0 bytes 0"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0in6 "packets 0 bytes 0"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0out "packets 0 bytes 0"
+ lret=1
+ fi
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0out6 "packets 0 bytes 0"
+ lret=1
+ fi
+
+ for dir in "in" "out" ; do
+ expect="packets 1 bytes 84"
+ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 $ns$dir "$expect"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 $ns$dir6 "$expect"
+ lret=1
+ fi
+ done
+
+ return $lret
+}
+
+reset_counters()
+{
+ for i in 0 1 2;do
+ ip netns exec ns$i nft reset counters inet > /dev/null
+ done
+}
+
+test_local_dnat6()
+{
+ local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain output {
+ type nat hook output priority 0; policy accept;
+ ip6 daddr dead:1::99 dnat to dead:2::99
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add ip6 dnat hook"
+ return $ksft_skip
+ fi
+
+ # ping netns1, expect rewrite to netns2
+ ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping6 failed"
+ return $lret
+ fi
+
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 count in ns1
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 packet in ns2
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+ ip netns exec ns0 nft flush chain ip6 nat output
+
+ return $lret
+}
+
+test_local_dnat()
+{
+ local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain output {
+ type nat hook output priority 0; policy accept;
+ ip daddr 10.0.1.99 dnat to 10.0.2.99
+ }
+}
+EOF
+ # ping netns1, expect rewrite to netns2
+ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping failed"
+ return $lret
+ fi
+
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 count in ns1
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 packet in ns2
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+
+ ip netns exec ns0 nft flush chain ip nat output
+
+ reset_counters
+ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping failed"
+ return $lret
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 count in ns1
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 packet in ns2
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+
+ return $lret
+}
+
+
+test_masquerade6()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 via ipv6"
+ return 1
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oif veth0 masquerade
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+ lret=1
+ fi
+
+ # ns1 should have seen packets from ns0, due to masquerade
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns1 should not have seen packets from ns2, due to masquerade
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft flush chain ip6 nat postrouting
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+
+ return $lret
+}
+
+test_masquerade()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: canot ping ns1 from ns2"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oif veth0 masquerade
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+ lret=1
+ fi
+
+ # ns1 should have seen packets from ns0, due to masquerade
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns1 should not have seen packets from ns2, due to masquerade
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft flush chain ip nat postrouting
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not flush nat postrouting" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+
+ return $lret
+}
+
+test_redirect6()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+ lret=1
+ fi
+
+ # ns1 should have seen no packets from ns2, due to redirection
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns0 should have seen packets from ns2, due to masquerade
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft delete table ip6 nat
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not delete ip6 nat table" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+
+ return $lret
+}
+
+test_redirect()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+ lret=1
+ fi
+
+ # ns1 should have seen no packets from ns2, due to redirection
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns0 should have seen packets from ns2, due to masquerade
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft delete table ip nat
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not delete nat table" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+
+ return $lret
+}
+
+
+# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
+for i in 0 1 2; do
+ip netns exec ns$i nft -f - <<EOF
+table inet filter {
+ counter ns0in {}
+ counter ns1in {}
+ counter ns2in {}
+
+ counter ns0out {}
+ counter ns1out {}
+ counter ns2out {}
+
+ counter ns0in6 {}
+ counter ns1in6 {}
+ counter ns2in6 {}
+
+ counter ns0out6 {}
+ counter ns1out6 {}
+ counter ns2out6 {}
+
+ map nsincounter {
+ type ipv4_addr : counter
+ elements = { 10.0.1.1 : "ns0in",
+ 10.0.2.1 : "ns0in",
+ 10.0.1.99 : "ns1in",
+ 10.0.2.99 : "ns2in" }
+ }
+
+ map nsincounter6 {
+ type ipv6_addr : counter
+ elements = { dead:1::1 : "ns0in6",
+ dead:2::1 : "ns0in6",
+ dead:1::99 : "ns1in6",
+ dead:2::99 : "ns2in6" }
+ }
+
+ map nsoutcounter {
+ type ipv4_addr : counter
+ elements = { 10.0.1.1 : "ns0out",
+ 10.0.2.1 : "ns0out",
+ 10.0.1.99: "ns1out",
+ 10.0.2.99: "ns2out" }
+ }
+
+ map nsoutcounter6 {
+ type ipv6_addr : counter
+ elements = { dead:1::1 : "ns0out6",
+ dead:2::1 : "ns0out6",
+ dead:1::99 : "ns1out6",
+ dead:2::99 : "ns2out6" }
+ }
+
+ chain input {
+ type filter hook input priority 0; policy accept;
+ counter name ip saddr map @nsincounter
+ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
+ }
+ chain output {
+ type filter hook output priority 0; policy accept;
+ counter name ip daddr map @nsoutcounter
+ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
+ }
+}
+EOF
+done
+
+sleep 3
+# test basic connectivity
+for i in 1 2; do
+ ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
+ if [ $? -ne 0 ];then
+ echo "ERROR: Could not reach other namespace(s)" 1>&2
+ ret=1
+ fi
+
+ ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
+ if [ $? -ne 0 ];then
+ echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
+ ret=1
+ fi
+ check_counters ns$i
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+
+ check_ns0_counters ns$i
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+ reset_counters
+done
+
+if [ $ret -eq 0 ];then
+ echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
+fi
+
+reset_counters
+test_local_dnat
+test_local_dnat6
+
+reset_counters
+test_masquerade
+test_masquerade6
+
+reset_counters
+test_redirect
+test_redirect6
+
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index 9050eeea5f5f..1de8bd8ccf5d 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -9,6 +9,3 @@ all: $(TEST_PROGS)
top_srcdir = ../../../../..
KSFT_KHDR_INSTALL := 1
include ../../lib.mk
-
-clean:
- rm -fr $(TEST_GEN_FILES)
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
index dd4162fc0419..6dee9e636a95 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
@@ -5,6 +5,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h>
#include <sys/time.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index 2e563d17cf0c..d1bbafb16f47 100644
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
@@ -240,7 +240,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
cm->cmsg_type == IP_RECVERR) ||
(cm->cmsg_level == SOL_IPV6 &&
cm->cmsg_type == IPV6_RECVERR) ||
- (cm->cmsg_level = SOL_PACKET &&
+ (cm->cmsg_level == SOL_PACKET &&
cm->cmsg_type == PACKET_TX_TIMESTAMP)) {
serr = (void *) CMSG_DATA(cm);
if (serr->ee_errno != ENOMSG ||
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 82121a81681f..444ad39d3700 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -2,6 +2,7 @@
/fd-002-posix-eq
/fd-003-kthread
/proc-loadavg-001
+/proc-pid-vm
/proc-self-map-files-001
/proc-self-map-files-002
/proc-self-syscall
@@ -10,4 +11,5 @@
/proc-uptime-002
/read
/self
+/setns-dcache
/thread-self
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 1c12c34cf85d..5163dc887aa3 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -6,6 +6,7 @@ TEST_GEN_PROGS += fd-001-lookup
TEST_GEN_PROGS += fd-002-posix-eq
TEST_GEN_PROGS += fd-003-kthread
TEST_GEN_PROGS += proc-loadavg-001
+TEST_GEN_PROGS += proc-pid-vm
TEST_GEN_PROGS += proc-self-map-files-001
TEST_GEN_PROGS += proc-self-map-files-002
TEST_GEN_PROGS += proc-self-syscall
@@ -14,6 +15,7 @@ TEST_GEN_PROGS += proc-uptime-001
TEST_GEN_PROGS += proc-uptime-002
TEST_GEN_PROGS += read
TEST_GEN_PROGS += self
+TEST_GEN_PROGS += setns-dcache
TEST_GEN_PROGS += thread-self
include ../lib.mk
diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c
index fcff7047000d..471e2aa28077 100644
--- a/tools/testing/selftests/proc/proc-loadavg-001.c
+++ b/tools/testing/selftests/proc/proc-loadavg-001.c
@@ -30,7 +30,7 @@ int main(void)
if (unshare(CLONE_NEWPID) == -1) {
if (errno == ENOSYS || errno == EPERM)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
new file mode 100644
index 000000000000..bbe8150d18aa
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-pid-vm.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Fork and exec tiny 1 page executable which precisely controls its VM.
+ * Test /proc/$PID/maps
+ * Test /proc/$PID/smaps
+ * Test /proc/$PID/smaps_rollup
+ * Test /proc/$PID/statm
+ *
+ * FIXME require CONFIG_TMPFS which can be disabled
+ * FIXME test other values from "smaps"
+ * FIXME support other archs
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/uio.h>
+#include <linux/kdev_t.h>
+
+static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
+{
+ return syscall(SYS_execveat, dirfd, pathname, argv, envp, flags);
+}
+
+static void make_private_tmp(void)
+{
+ if (unshare(CLONE_NEWNS) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ exit(4);
+ }
+ exit(1);
+ }
+ if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
+ exit(1);
+ }
+ if (mount(NULL, "/tmp", "tmpfs", 0, NULL) == -1) {
+ exit(1);
+ }
+}
+
+static pid_t pid = -1;
+static void ate(void)
+{
+ if (pid > 0) {
+ kill(pid, SIGTERM);
+ }
+}
+
+struct elf64_hdr {
+ uint8_t e_ident[16];
+ uint16_t e_type;
+ uint16_t e_machine;
+ uint32_t e_version;
+ uint64_t e_entry;
+ uint64_t e_phoff;
+ uint64_t e_shoff;
+ uint32_t e_flags;
+ uint16_t e_ehsize;
+ uint16_t e_phentsize;
+ uint16_t e_phnum;
+ uint16_t e_shentsize;
+ uint16_t e_shnum;
+ uint16_t e_shstrndx;
+};
+
+struct elf64_phdr {
+ uint32_t p_type;
+ uint32_t p_flags;
+ uint64_t p_offset;
+ uint64_t p_vaddr;
+ uint64_t p_paddr;
+ uint64_t p_filesz;
+ uint64_t p_memsz;
+ uint64_t p_align;
+};
+
+#ifdef __x86_64__
+#define PAGE_SIZE 4096
+#define VADDR (1UL << 32)
+#define MAPS_OFFSET 73
+
+#define syscall 0x0f, 0x05
+#define mov_rdi(x) \
+ 0x48, 0xbf, \
+ (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff, \
+ ((x)>>32)&0xff, ((x)>>40)&0xff, ((x)>>48)&0xff, ((x)>>56)&0xff
+
+#define mov_rsi(x) \
+ 0x48, 0xbe, \
+ (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff, \
+ ((x)>>32)&0xff, ((x)>>40)&0xff, ((x)>>48)&0xff, ((x)>>56)&0xff
+
+#define mov_eax(x) \
+ 0xb8, (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff
+
+static const uint8_t payload[] = {
+ /* Casually unmap stack, vDSO and everything else. */
+ /* munmap */
+ mov_rdi(VADDR + 4096),
+ mov_rsi((1ULL << 47) - 4096 - VADDR - 4096),
+ mov_eax(11),
+ syscall,
+
+ /* Ping parent. */
+ /* write(0, &c, 1); */
+ 0x31, 0xff, /* xor edi, edi */
+ 0x48, 0x8d, 0x35, 0x00, 0x00, 0x00, 0x00, /* lea rsi, [rip] */
+ 0xba, 0x01, 0x00, 0x00, 0x00, /* mov edx, 1 */
+ mov_eax(1),
+ syscall,
+
+ /* 1: pause(); */
+ mov_eax(34),
+ syscall,
+
+ 0xeb, 0xf7, /* jmp 1b */
+};
+
+static int make_exe(const uint8_t *payload, size_t len)
+{
+ struct elf64_hdr h;
+ struct elf64_phdr ph;
+
+ struct iovec iov[3] = {
+ {&h, sizeof(struct elf64_hdr)},
+ {&ph, sizeof(struct elf64_phdr)},
+ {(void *)payload, len},
+ };
+ int fd, fd1;
+ char buf[64];
+
+ memset(&h, 0, sizeof(h));
+ h.e_ident[0] = 0x7f;
+ h.e_ident[1] = 'E';
+ h.e_ident[2] = 'L';
+ h.e_ident[3] = 'F';
+ h.e_ident[4] = 2;
+ h.e_ident[5] = 1;
+ h.e_ident[6] = 1;
+ h.e_ident[7] = 0;
+ h.e_type = 2;
+ h.e_machine = 0x3e;
+ h.e_version = 1;
+ h.e_entry = VADDR + sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr);
+ h.e_phoff = sizeof(struct elf64_hdr);
+ h.e_shoff = 0;
+ h.e_flags = 0;
+ h.e_ehsize = sizeof(struct elf64_hdr);
+ h.e_phentsize = sizeof(struct elf64_phdr);
+ h.e_phnum = 1;
+ h.e_shentsize = 0;
+ h.e_shnum = 0;
+ h.e_shstrndx = 0;
+
+ memset(&ph, 0, sizeof(ph));
+ ph.p_type = 1;
+ ph.p_flags = (1<<2)|1;
+ ph.p_offset = 0;
+ ph.p_vaddr = VADDR;
+ ph.p_paddr = 0;
+ ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+ ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+ ph.p_align = 4096;
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700);
+ if (fd == -1) {
+ exit(1);
+ }
+
+ if (writev(fd, iov, 3) != sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len) {
+ exit(1);
+ }
+
+ /* Avoid ETXTBSY on exec. */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u", fd);
+ fd1 = open(buf, O_RDONLY|O_CLOEXEC);
+ close(fd);
+
+ return fd1;
+}
+#endif
+
+#ifdef __x86_64__
+int main(void)
+{
+ int pipefd[2];
+ int exec_fd;
+
+ atexit(ate);
+
+ make_private_tmp();
+
+ /* Reserve fd 0 for 1-byte pipe ping from child. */
+ close(0);
+ if (open("/", O_RDONLY|O_DIRECTORY|O_PATH) != 0) {
+ return 1;
+ }
+
+ exec_fd = make_exe(payload, sizeof(payload));
+
+ if (pipe(pipefd) == -1) {
+ return 1;
+ }
+ if (dup2(pipefd[1], 0) != 0) {
+ return 1;
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ return 1;
+ }
+ if (pid == 0) {
+ sys_execveat(exec_fd, "", NULL, NULL, AT_EMPTY_PATH);
+ return 1;
+ }
+
+ char _;
+ if (read(pipefd[0], &_, 1) != 1) {
+ return 1;
+ }
+
+ struct stat st;
+ if (fstat(exec_fd, &st) == -1) {
+ return 1;
+ }
+
+ /* Generate "head -n1 /proc/$PID/maps" */
+ char buf0[256];
+ memset(buf0, ' ', sizeof(buf0));
+ int len = snprintf(buf0, sizeof(buf0),
+ "%08lx-%08lx r-xp 00000000 %02lx:%02lx %llu",
+ VADDR, VADDR + PAGE_SIZE,
+ MAJOR(st.st_dev), MINOR(st.st_dev),
+ (unsigned long long)st.st_ino);
+ buf0[len] = ' ';
+ snprintf(buf0 + MAPS_OFFSET, sizeof(buf0) - MAPS_OFFSET,
+ "/tmp/#%llu (deleted)\n", (unsigned long long)st.st_ino);
+
+
+ /* Test /proc/$PID/maps */
+ {
+ char buf[256];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/maps", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(rv == strlen(buf0));
+ assert(memcmp(buf, buf0, strlen(buf0)) == 0);
+ }
+
+ /* Test /proc/$PID/smaps */
+ {
+ char buf[1024];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/smaps", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv <= sizeof(buf));
+
+ assert(rv >= strlen(buf0));
+ assert(memcmp(buf, buf0, strlen(buf0)) == 0);
+
+#define RSS1 "Rss: 4 kB\n"
+#define RSS2 "Rss: 0 kB\n"
+#define PSS1 "Pss: 4 kB\n"
+#define PSS2 "Pss: 0 kB\n"
+ assert(memmem(buf, rv, RSS1, strlen(RSS1)) ||
+ memmem(buf, rv, RSS2, strlen(RSS2)));
+ assert(memmem(buf, rv, PSS1, strlen(PSS1)) ||
+ memmem(buf, rv, PSS2, strlen(PSS2)));
+
+ static const char *S[] = {
+ "Size: 4 kB\n",
+ "KernelPageSize: 4 kB\n",
+ "MMUPageSize: 4 kB\n",
+ "Anonymous: 0 kB\n",
+ "AnonHugePages: 0 kB\n",
+ "Shared_Hugetlb: 0 kB\n",
+ "Private_Hugetlb: 0 kB\n",
+ "Locked: 0 kB\n",
+ };
+ int i;
+
+ for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+ assert(memmem(buf, rv, S[i], strlen(S[i])));
+ }
+ }
+
+ /* Test /proc/$PID/smaps_rollup */
+ {
+ char bufr[256];
+ memset(bufr, ' ', sizeof(bufr));
+ len = snprintf(bufr, sizeof(bufr),
+ "%08lx-%08lx ---p 00000000 00:00 0",
+ VADDR, VADDR + PAGE_SIZE);
+ bufr[len] = ' ';
+ snprintf(bufr + MAPS_OFFSET, sizeof(bufr) - MAPS_OFFSET,
+ "[rollup]\n");
+
+ char buf[1024];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/smaps_rollup", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv <= sizeof(buf));
+
+ assert(rv >= strlen(bufr));
+ assert(memcmp(buf, bufr, strlen(bufr)) == 0);
+
+ assert(memmem(buf, rv, RSS1, strlen(RSS1)) ||
+ memmem(buf, rv, RSS2, strlen(RSS2)));
+ assert(memmem(buf, rv, PSS1, strlen(PSS1)) ||
+ memmem(buf, rv, PSS2, strlen(PSS2)));
+
+ static const char *S[] = {
+ "Anonymous: 0 kB\n",
+ "AnonHugePages: 0 kB\n",
+ "Shared_Hugetlb: 0 kB\n",
+ "Private_Hugetlb: 0 kB\n",
+ "Locked: 0 kB\n",
+ };
+ int i;
+
+ for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+ assert(memmem(buf, rv, S[i], strlen(S[i])));
+ }
+ }
+
+ /* Test /proc/$PID/statm */
+ {
+ char buf[64];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/statm", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(rv == 7 * 2);
+
+ assert(buf[0] == '1'); /* ->total_vm */
+ assert(buf[1] == ' ');
+ assert(buf[2] == '0' || buf[2] == '1'); /* rss */
+ assert(buf[3] == ' ');
+ assert(buf[4] == '0' || buf[2] == '1'); /* file rss */
+ assert(buf[5] == ' ');
+ assert(buf[6] == '1'); /* ELF executable segments */
+ assert(buf[7] == ' ');
+ assert(buf[8] == '0');
+ assert(buf[9] == ' ');
+ assert(buf[10] == '0'); /* ->data_vm + ->stack_vm */
+ assert(buf[11] == ' ');
+ assert(buf[12] == '0');
+ assert(buf[13] == '\n');
+ }
+
+ return 0;
+}
+#else
+int main(void)
+{
+ return 4;
+}
+#endif
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 85744425b08d..762cb01f2ca7 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -63,7 +63,7 @@ int main(void)
p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
if (p == MAP_FAILED) {
if (errno == EPERM)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c
index 5ab5f4810e43..9f6d000c0245 100644
--- a/tools/testing/selftests/proc/proc-self-syscall.c
+++ b/tools/testing/selftests/proc/proc-self-syscall.c
@@ -20,7 +20,6 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
-#include <unistd.h>
#include <string.h>
#include <stdio.h>
@@ -39,7 +38,7 @@ int main(void)
fd = open("/proc/self/syscall", O_RDONLY);
if (fd == -1) {
if (errno == ENOENT)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-self-wchan.c b/tools/testing/selftests/proc/proc-self-wchan.c
index a38b2fbaa7ad..b467b98a457d 100644
--- a/tools/testing/selftests/proc/proc-self-wchan.c
+++ b/tools/testing/selftests/proc/proc-self-wchan.c
@@ -27,7 +27,7 @@ int main(void)
fd = open("/proc/self/wchan", O_RDONLY);
if (fd == -1) {
if (errno == ENOENT)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/read.c b/tools/testing/selftests/proc/read.c
index 563e752e6eba..b3ef9e14d6cc 100644
--- a/tools/testing/selftests/proc/read.c
+++ b/tools/testing/selftests/proc/read.c
@@ -26,8 +26,10 @@
#include <dirent.h>
#include <stdbool.h>
#include <stdlib.h>
+#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
+#include <sys/vfs.h>
#include <fcntl.h>
#include <unistd.h>
@@ -123,10 +125,22 @@ static void f(DIR *d, unsigned int level)
int main(void)
{
DIR *d;
+ struct statfs sfs;
d = opendir("/proc");
if (!d)
+ return 4;
+
+ /* Ensure /proc is proc. */
+ if (fstatfs(dirfd(d), &sfs) == -1) {
+ return 1;
+ }
+ if (sfs.f_type != 0x9fa0) {
+ fprintf(stderr, "error: unexpected f_type %lx\n", (long)sfs.f_type);
return 2;
+ }
+
f(d, 0);
+
return 0;
}
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644
index 000000000000..60ab197a73fc
--- /dev/null
+++ b/tools/testing/selftests/proc/setns-dcache.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
+ * if old one is in dcache.
+ *
+ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+
+static pid_t pid = -1;
+
+static void f(void)
+{
+ if (pid > 0) {
+ kill(pid, SIGTERM);
+ }
+}
+
+int main(void)
+{
+ int fd[2];
+ char _ = 0;
+ int nsfd;
+
+ atexit(f);
+
+ /* Check for priviledges and syscall availability straight away. */
+ if (unshare(CLONE_NEWNET) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ return 4;
+ }
+ return 1;
+ }
+ /* Distinguisher between two otherwise empty net namespaces. */
+ if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
+ return 1;
+ }
+
+ if (pipe(fd) == -1) {
+ return 1;
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ return 1;
+ }
+
+ if (pid == 0) {
+ if (unshare(CLONE_NEWNET) == -1) {
+ return 1;
+ }
+
+ if (write(fd[1], &_, 1) != 1) {
+ return 1;
+ }
+
+ pause();
+
+ return 0;
+ }
+
+ if (read(fd[0], &_, 1) != 1) {
+ return 1;
+ }
+
+ {
+ char buf[64];
+ snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
+ nsfd = open(buf, O_RDONLY);
+ if (nsfd == -1) {
+ return 1;
+ }
+ }
+
+ /* Reliably pin dentry into dcache. */
+ (void)open("/proc/net/unix", O_RDONLY);
+
+ if (setns(nsfd, CLONE_NEWNET) == -1) {
+ return 1;
+ }
+
+ kill(pid, SIGTERM);
+ pid = 0;
+
+ {
+ char buf[4096];
+ ssize_t rv;
+ int fd;
+
+ fd = open("/proc/net/unix", O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+
+#define S "Num RefCount Protocol Flags Type St Inode Path\n"
+ rv = read(fd, buf, sizeof(buf));
+
+ assert(rv == strlen(S));
+ assert(memcmp(buf, S, strlen(S)) == 0);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
index da298394daa2..83552bb007b4 100755
--- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
@@ -40,17 +40,24 @@ mkdir $T
cat > $T/init << '__EOF___'
#!/bin/sh
# Run in userspace a few milliseconds every second. This helps to
-# exercise the NO_HZ_FULL portions of RCU.
+# exercise the NO_HZ_FULL portions of RCU. The 192 instances of "a" was
+# empirically shown to give a nice multi-millisecond burst of user-mode
+# execution on a 2GHz CPU, as desired. Modern CPUs will vary from a
+# couple of milliseconds up to perhaps 100 milliseconds, which is an
+# acceptable range.
+#
+# Why not calibrate an exact delay? Because within this initrd, we
+# are restricted to Bourne-shell builtins, which as far as I know do not
+# provide any means of obtaining a fine-grained timestamp.
+
+a4="a a a a"
+a16="$a4 $a4 $a4 $a4"
+a64="$a16 $a16 $a16 $a16"
+a192="$a64 $a64 $a64"
while :
do
q=
- for i in \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
+ for i in $a192
do
q="$q $i"
done
@@ -124,8 +131,8 @@ if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
| grep -q '^yes'; then
# architecture supported by nolibc
${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \
- -nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \
- -o init init.c
+ -nostdlib -include ../../../../include/nolibc/nolibc.h \
+ -lgcc -s -static -Os -o init init.c
else
${CROSS_COMPILE}gcc -s -static -Os -o init init.c
fi
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e20b017e7073..b2065536d407 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) {
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
- EXPECT_NE(0, rc);
+ ASSERT_NE(0, rc);
/* Disable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_OFF, 0);
ASSERT_NE(-1, rc);
- if (rc == 0)
- return;
-
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
TH_LOG("data: %lx", data);
@@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) {
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
- EXPECT_NE(0, rc);
+ ASSERT_NE(0, rc);
+
+ rc = read(self->fd, &data, sizeof(unsigned long));
+ ASSERT_NE(-1, rc);
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+ ASSERT_NE(-1, rc);
+
+ new = timegm((struct tm *)&tm);
+ ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_alm_set_minute) {
+ struct timeval tv = { .tv_sec = 62 };
+ unsigned long data;
+ struct rtc_time tm;
+ fd_set readfds;
+ time_t secs, new;
+ int rc;
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+ ASSERT_NE(-1, rc);
+
+ secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
+ gmtime_r(&secs, (struct tm *)&tm);
+
+ rc = ioctl(self->fd, RTC_ALM_SET, &tm);
+ if (rc == -1) {
+ ASSERT_EQ(EINVAL, errno);
+ TH_LOG("skip alarms are not supported.");
+ return;
+ }
+
+ rc = ioctl(self->fd, RTC_ALM_READ, &tm);
+ ASSERT_NE(-1, rc);
+
+ TH_LOG("Alarm time now set to %02d:%02d:%02d.",
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ /* Enable alarm interrupts */
+ rc = ioctl(self->fd, RTC_AIE_ON, 0);
+ ASSERT_NE(-1, rc);
+
+ FD_ZERO(&readfds);
+ FD_SET(self->fd, &readfds);
+
+ rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+ ASSERT_NE(-1, rc);
+ ASSERT_NE(0, rc);
+
+ /* Disable alarm interrupts */
+ rc = ioctl(self->fd, RTC_AIE_OFF, 0);
+ ASSERT_NE(-1, rc);
+
+ rc = read(self->fd, &data, sizeof(unsigned long));
+ ASSERT_NE(-1, rc);
+ TH_LOG("data: %lx", data);
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+ ASSERT_NE(-1, rc);
+
+ new = timegm((struct tm *)&tm);
+ ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_wkalm_set_minute) {
+ struct timeval tv = { .tv_sec = 62 };
+ struct rtc_wkalrm alarm = { 0 };
+ struct rtc_time tm;
+ unsigned long data;
+ fd_set readfds;
+ time_t secs, new;
+ int rc;
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
+ ASSERT_NE(-1, rc);
+
+ secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
+ gmtime_r(&secs, (struct tm *)&alarm.time);
+
+ alarm.enabled = 1;
+
+ rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
+ if (rc == -1) {
+ ASSERT_EQ(EINVAL, errno);
+ TH_LOG("skip alarms are not supported.");
+ return;
+ }
+
+ rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
+ ASSERT_NE(-1, rc);
+
+ TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
+ alarm.time.tm_mday, alarm.time.tm_mon + 1,
+ alarm.time.tm_year + 1900, alarm.time.tm_hour,
+ alarm.time.tm_min, alarm.time.tm_sec);
+
+ FD_ZERO(&readfds);
+ FD_SET(self->fd, &readfds);
+
+ rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+ ASSERT_NE(-1, rc);
+ ASSERT_NE(0, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
diff --git a/tools/testing/selftests/safesetid/.gitignore b/tools/testing/selftests/safesetid/.gitignore
new file mode 100644
index 000000000000..9c1a629bca01
--- /dev/null
+++ b/tools/testing/selftests/safesetid/.gitignore
@@ -0,0 +1 @@
+safesetid-test
diff --git a/tools/testing/selftests/safesetid/Makefile b/tools/testing/selftests/safesetid/Makefile
new file mode 100644
index 000000000000..98da7a504737
--- /dev/null
+++ b/tools/testing/selftests/safesetid/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for mount selftests.
+CFLAGS = -Wall -lcap -O2
+
+TEST_PROGS := run_tests.sh
+TEST_GEN_FILES := safesetid-test
+
+include ../lib.mk
diff --git a/tools/testing/selftests/safesetid/config b/tools/testing/selftests/safesetid/config
new file mode 100644
index 000000000000..9d44e5c2e096
--- /dev/null
+++ b/tools/testing/selftests/safesetid/config
@@ -0,0 +1,2 @@
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
diff --git a/tools/testing/selftests/safesetid/safesetid-test.c b/tools/testing/selftests/safesetid/safesetid-test.c
new file mode 100644
index 000000000000..892c8e8b1b8b
--- /dev/null
+++ b/tools/testing/selftests/safesetid/safesetid-test.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <pwd.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/capability.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdarg.h>
+
+#ifndef CLONE_NEWUSER
+# define CLONE_NEWUSER 0x10000000
+#endif
+
+#define ROOT_USER 0
+#define RESTRICTED_PARENT 1
+#define ALLOWED_CHILD1 2
+#define ALLOWED_CHILD2 3
+#define NO_POLICY_USER 4
+
+char* add_whitelist_policy_file = "/sys/kernel/security/safesetid/add_whitelist_policy";
+
+static void die(char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
+static bool vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
+{
+ char buf[4096];
+ int fd;
+ ssize_t written;
+ int buf_len;
+
+ buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+ if (buf_len < 0) {
+ printf("vsnprintf failed: %s\n",
+ strerror(errno));
+ return false;
+ }
+ if (buf_len >= sizeof(buf)) {
+ printf("vsnprintf output truncated\n");
+ return false;
+ }
+
+ fd = open(filename, O_WRONLY);
+ if (fd < 0) {
+ if ((errno == ENOENT) && enoent_ok)
+ return true;
+ return false;
+ }
+ written = write(fd, buf, buf_len);
+ if (written != buf_len) {
+ if (written >= 0) {
+ printf("short write to %s\n", filename);
+ return false;
+ } else {
+ printf("write to %s failed: %s\n",
+ filename, strerror(errno));
+ return false;
+ }
+ }
+ if (close(fd) != 0) {
+ printf("close of %s failed: %s\n",
+ filename, strerror(errno));
+ return false;
+ }
+ return true;
+}
+
+static bool write_file(char *filename, char *fmt, ...)
+{
+ va_list ap;
+ bool ret;
+
+ va_start(ap, fmt);
+ ret = vmaybe_write_file(false, filename, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+static void ensure_user_exists(uid_t uid)
+{
+ struct passwd p;
+
+ FILE *fd;
+ char name_str[10];
+
+ if (getpwuid(uid) == NULL) {
+ memset(&p,0x00,sizeof(p));
+ fd=fopen("/etc/passwd","a");
+ if (fd == NULL)
+ die("couldn't open file\n");
+ if (fseek(fd, 0, SEEK_END))
+ die("couldn't fseek\n");
+ snprintf(name_str, 10, "%d", uid);
+ p.pw_name=name_str;
+ p.pw_uid=uid;
+ p.pw_gecos="Test account";
+ p.pw_dir="/dev/null";
+ p.pw_shell="/bin/false";
+ int value = putpwent(&p,fd);
+ if (value != 0)
+ die("putpwent failed\n");
+ if (fclose(fd))
+ die("fclose failed\n");
+ }
+}
+
+static void ensure_securityfs_mounted(void)
+{
+ int fd = open(add_whitelist_policy_file, O_WRONLY);
+ if (fd < 0) {
+ if (errno == ENOENT) {
+ // Need to mount securityfs
+ if (mount("securityfs", "/sys/kernel/security",
+ "securityfs", 0, NULL) < 0)
+ die("mounting securityfs failed\n");
+ } else {
+ die("couldn't find securityfs for unknown reason\n");
+ }
+ } else {
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+}
+
+static void write_policies(void)
+{
+ ssize_t written;
+ int fd;
+
+ fd = open(add_whitelist_policy_file, O_WRONLY);
+ if (fd < 0)
+ die("cant open add_whitelist_policy file\n");
+ written = write(fd, "1:2", strlen("1:2"));
+ if (written != strlen("1:2")) {
+ if (written >= 0) {
+ die("short write to %s\n", add_whitelist_policy_file);
+ } else {
+ die("write to %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+ written = write(fd, "1:3", strlen("1:3"));
+ if (written != strlen("1:3")) {
+ if (written >= 0) {
+ die("short write to %s\n", add_whitelist_policy_file);
+ } else {
+ die("write to %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+}
+
+static bool test_userns(bool expect_success)
+{
+ uid_t uid;
+ char map_file_name[32];
+ size_t sz = sizeof(map_file_name);
+ pid_t cpid;
+ bool success;
+
+ uid = getuid();
+
+ int clone_flags = CLONE_NEWUSER;
+ cpid = syscall(SYS_clone, clone_flags, NULL);
+ if (cpid == -1) {
+ printf("clone failed");
+ return false;
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ // Give parent 1 second to write map file
+ sleep(1);
+ exit(EXIT_SUCCESS);
+ } else { /* Code executed by parent */
+ if(snprintf(map_file_name, sz, "/proc/%d/uid_map", cpid) < 0) {
+ printf("preparing file name string failed");
+ return false;
+ }
+ success = write_file(map_file_name, "0 0 1", uid);
+ return success == expect_success;
+ }
+
+ printf("should not reach here");
+ return false;
+}
+
+static void test_setuid(uid_t child_uid, bool expect_success)
+{
+ pid_t cpid, w;
+ int wstatus;
+
+ cpid = fork();
+ if (cpid == -1) {
+ die("fork\n");
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ setuid(child_uid);
+ if (getuid() == child_uid)
+ exit(EXIT_SUCCESS);
+ else
+ exit(EXIT_FAILURE);
+ } else { /* Code executed by parent */
+ do {
+ w = waitpid(cpid, &wstatus, WUNTRACED | WCONTINUED);
+ if (w == -1) {
+ die("waitpid\n");
+ }
+
+ if (WIFEXITED(wstatus)) {
+ if (WEXITSTATUS(wstatus) == EXIT_SUCCESS) {
+ if (expect_success) {
+ return;
+ } else {
+ die("unexpected success\n");
+ }
+ } else {
+ if (expect_success) {
+ die("unexpected failure\n");
+ } else {
+ return;
+ }
+ }
+ } else if (WIFSIGNALED(wstatus)) {
+ if (WTERMSIG(wstatus) == 9) {
+ if (expect_success)
+ die("killed unexpectedly\n");
+ else
+ return;
+ } else {
+ die("unexpected signal: %d\n", wstatus);
+ }
+ } else {
+ die("unexpected status: %d\n", wstatus);
+ }
+ } while (!WIFEXITED(wstatus) && !WIFSIGNALED(wstatus));
+ }
+
+ die("should not reach here\n");
+}
+
+static void ensure_users_exist(void)
+{
+ ensure_user_exists(ROOT_USER);
+ ensure_user_exists(RESTRICTED_PARENT);
+ ensure_user_exists(ALLOWED_CHILD1);
+ ensure_user_exists(ALLOWED_CHILD2);
+ ensure_user_exists(NO_POLICY_USER);
+}
+
+static void drop_caps(bool setid_retained)
+{
+ cap_value_t cap_values[] = {CAP_SETUID, CAP_SETGID};
+ cap_t caps;
+
+ caps = cap_get_proc();
+ if (setid_retained)
+ cap_set_flag(caps, CAP_EFFECTIVE, 2, cap_values, CAP_SET);
+ else
+ cap_clear(caps);
+ cap_set_proc(caps);
+ cap_free(caps);
+}
+
+int main(int argc, char **argv)
+{
+ ensure_users_exist();
+ ensure_securityfs_mounted();
+ write_policies();
+
+ if (prctl(PR_SET_KEEPCAPS, 1L))
+ die("Error with set keepcaps\n");
+
+ // First test to make sure we can write userns mappings from a user
+ // that doesn't have any restrictions (as long as it has CAP_SETUID);
+ setuid(NO_POLICY_USER);
+ setgid(NO_POLICY_USER);
+
+ // Take away all but setid caps
+ drop_caps(true);
+
+ // Need PR_SET_DUMPABLE flag set so we can write /proc/[pid]/uid_map
+ // from non-root parent process.
+ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0))
+ die("Error with set dumpable\n");
+
+ if (!test_userns(true)) {
+ die("test_userns failed when it should work\n");
+ }
+
+ setuid(RESTRICTED_PARENT);
+ setgid(RESTRICTED_PARENT);
+
+ test_setuid(ROOT_USER, false);
+ test_setuid(ALLOWED_CHILD1, true);
+ test_setuid(ALLOWED_CHILD2, true);
+ test_setuid(NO_POLICY_USER, false);
+
+ if (!test_userns(false)) {
+ die("test_userns worked when it should fail\n");
+ }
+
+ // Now take away all caps
+ drop_caps(false);
+ test_setuid(2, false);
+ test_setuid(3, false);
+ test_setuid(4, false);
+
+ // NOTE: this test doesn't clean up users that were created in
+ // /etc/passwd or flush policies that were added to the LSM.
+ return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/safesetid/safesetid-test.sh b/tools/testing/selftests/safesetid/safesetid-test.sh
new file mode 100755
index 000000000000..e4fdce675c54
--- /dev/null
+++ b/tools/testing/selftests/safesetid/safesetid-test.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+TCID="safesetid-test.sh"
+errcode=0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+check_root()
+{
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo $TCID: must be run as root >&2
+ exit $ksft_skip
+ fi
+}
+
+main_function()
+{
+ check_root
+ ./safesetid-test
+}
+
+main_function
+echo "$TCID: done"
+exit $errcode
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index fce7f4ce0692..1760b3e39730 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark
CFLAGS += -Wl,-no-as-needed -Wall
seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
- $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
TEST_PROGS += $(BINARIES)
EXTRA_CLEAN := $(BINARIES)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 067cb4607d6c..7e632b465ab4 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1608,7 +1608,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
#ifdef SYSCALL_NUM_RET_SHARE_REG
# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
#else
-# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action)
+# define EXPECT_SYSCALL_RETURN(val, action) \
+ do { \
+ errno = 0; \
+ if (val < 0) { \
+ EXPECT_EQ(-1, action); \
+ EXPECT_EQ(-(val), errno); \
+ } else { \
+ EXPECT_EQ(val, action); \
+ } \
+ } while (0)
#endif
/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1647,7 +1656,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
/* Architecture-specific syscall changing routine. */
void change_syscall(struct __test_metadata *_metadata,
- pid_t tracee, int syscall)
+ pid_t tracee, int syscall, int result)
{
int ret;
ARCH_REGS regs;
@@ -1706,7 +1715,7 @@ void change_syscall(struct __test_metadata *_metadata,
#ifdef SYSCALL_NUM_RET_SHARE_REG
TH_LOG("Can't modify syscall return on this architecture");
#else
- regs.SYSCALL_RET = EPERM;
+ regs.SYSCALL_RET = result;
#endif
#ifdef HAVE_GETREGS
@@ -1734,14 +1743,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
case 0x1002:
/* change getpid to getppid. */
EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
- change_syscall(_metadata, tracee, __NR_getppid);
+ change_syscall(_metadata, tracee, __NR_getppid, 0);
break;
case 0x1003:
- /* skip gettid. */
+ /* skip gettid with valid return code. */
EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
- change_syscall(_metadata, tracee, -1);
+ change_syscall(_metadata, tracee, -1, 45000);
break;
case 0x1004:
+ /* skip openat with error. */
+ EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
+ change_syscall(_metadata, tracee, -1, -ESRCH);
+ break;
+ case 0x1005:
/* do nothing (allow getppid) */
EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
break;
@@ -1774,9 +1788,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
nr = get_syscall(_metadata, tracee);
if (nr == __NR_getpid)
- change_syscall(_metadata, tracee, __NR_getppid);
+ change_syscall(_metadata, tracee, __NR_getppid, 0);
+ if (nr == __NR_gettid)
+ change_syscall(_metadata, tracee, -1, 45000);
if (nr == __NR_openat)
- change_syscall(_metadata, tracee, -1);
+ change_syscall(_metadata, tracee, -1, -ESRCH);
}
FIXTURE_DATA(TRACE_syscall) {
@@ -1793,8 +1809,10 @@ FIXTURE_SETUP(TRACE_syscall)
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
- BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
+ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
@@ -1842,15 +1860,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
-TEST_F(TRACE_syscall, ptrace_syscall_dropped)
+TEST_F(TRACE_syscall, ptrace_syscall_errno)
+{
+ /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
+ teardown_trace_fixture(_metadata, self->tracer);
+ self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
+ true);
+
+ /* Tracer should skip the open syscall, resulting in ESRCH. */
+ EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, ptrace_syscall_faked)
{
/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
teardown_trace_fixture(_metadata, self->tracer);
self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
true);
- /* Tracer should skip the open syscall, resulting in EPERM. */
- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
+ /* Tracer should skip the gettid syscall, resulting fake pid. */
+ EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
}
TEST_F(TRACE_syscall, syscall_allowed)
@@ -1883,7 +1912,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
-TEST_F(TRACE_syscall, syscall_dropped)
+TEST_F(TRACE_syscall, syscall_errno)
+{
+ long ret;
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret);
+
+ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
+ ASSERT_EQ(0, ret);
+
+ /* openat has been skipped and an errno return. */
+ EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, syscall_faked)
{
long ret;
@@ -1894,8 +1937,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
ASSERT_EQ(0, ret);
/* gettid has been skipped and an altered return value stored. */
- EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
- EXPECT_NE(self->mytid, syscall(__NR_gettid));
+ EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
}
TEST_F(TRACE_syscall, skip_after_RET_TRACE)
@@ -3044,7 +3086,7 @@ TEST(user_notification_basic)
/* Check that the basic notification machinery works */
listener = user_trap_syscall(__NR_getpid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
- EXPECT_GE(listener, 0);
+ ASSERT_GE(listener, 0);
/* Installing a second listener in the chain should EBUSY */
EXPECT_EQ(user_trap_syscall(__NR_getpid,
@@ -3103,7 +3145,7 @@ TEST(user_notification_kill_in_middle)
listener = user_trap_syscall(__NR_getpid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
- EXPECT_GE(listener, 0);
+ ASSERT_GE(listener, 0);
/*
* Check that nothing bad happens when we kill the task in the middle
@@ -3152,7 +3194,7 @@ TEST(user_notification_signal)
listener = user_trap_syscall(__NR_gettid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
- EXPECT_GE(listener, 0);
+ ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
@@ -3215,7 +3257,7 @@ TEST(user_notification_closed_listener)
listener = user_trap_syscall(__NR_getpid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
- EXPECT_GE(listener, 0);
+ ASSERT_GE(listener, 0);
/*
* Check that we get an ENOSYS when the listener is closed.
@@ -3376,7 +3418,7 @@ TEST(seccomp_get_notif_sizes)
{
struct seccomp_notif_sizes sizes;
- EXPECT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
+ ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
}
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index c5cc160948b3..c26d72e0166f 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -3,3 +3,4 @@ __pycache__/
plugins/
*.xml
*.tap
+tdc_config_local.py
diff --git a/tools/testing/selftests/tc-testing/TdcPlugin.py b/tools/testing/selftests/tc-testing/TdcPlugin.py
index 1d9e279331eb..b980a565fa89 100644
--- a/tools/testing/selftests/tc-testing/TdcPlugin.py
+++ b/tools/testing/selftests/tc-testing/TdcPlugin.py
@@ -18,13 +18,13 @@ class TdcPlugin:
if self.args.verbose > 1:
print(' -- {}.post_suite'.format(self.sub_class))
- def pre_case(self, test_ordinal, testid, test_name):
+ def pre_case(self, testid, test_name, test_skip):
'''run commands before test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.pre_case'.format(self.sub_class))
self.args.testid = testid
self.args.test_name = test_name
- self.args.test_ordinal = test_ordinal
+ self.args.test_skip = test_skip
def post_case(self):
'''run commands after test_runner does one test'''
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
index 17b267dedbd9..a28571aff0e1 100644
--- a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
+++ b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
@@ -33,6 +33,11 @@ Each test case has required data:
id: A unique alphanumeric value to identify a particular test case
name: Descriptive name that explains the command under test
+skip: A completely optional key, if the corresponding value is "yes"
+ then tdc will not execute the test case in question. However,
+ this test case will still appear in the results output but
+ marked as skipped. This key can be placed anywhere inside the
+ test case at the top level.
category: A list of single-word descriptions covering what the command
under test is testing. Example: filter, actions, u32, gact, etc.
setup: The list of commands required to ensure the command under test
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
index e00c798de0bb..4bb866575ea1 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
@@ -102,6 +102,14 @@ class SubPlugin(TdcPlugin):
if not self.args.valgrind:
return
+ res = TestResult('{}-mem'.format(self.args.testid),
+ '{} memory leak check'.format(self.args.test_name))
+ if self.args.test_skip:
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Test case designated as skipped.')
+ self._add_results(res)
+ return
+
self.definitely_lost_re = re.compile(
r'definitely lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\sblocks', re.MULTILINE | re.DOTALL)
self.indirectly_lost_re = re.compile(
@@ -134,8 +142,6 @@ class SubPlugin(TdcPlugin):
nle_num = int(nle_mo.group(1))
mem_results = ''
- res = TestResult('{}-mem'.format(self.args.testid),
- '{} memory leak check'.format(self.args.test_name))
if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0):
mem_results += 'not '
res.set_result(ResultState.fail)
@@ -146,12 +152,6 @@ class SubPlugin(TdcPlugin):
self._add_results(res)
- mem_results += 'ok {} - {}-mem # {}\n'.format(
- self.args.test_ordinal, self.args.testid, 'memory leak check')
- self._add_to_tap(mem_results)
- if mem_results.startswith('not '):
- print('{}'.format(content))
- self._add_to_tap(content)
def _add_results(self, res):
self._tsr.add_resultdata(res)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 637ea0219617..0da3545cabdb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -17,7 +17,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -41,7 +41,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -65,7 +65,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -89,7 +89,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -113,7 +113,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -137,7 +137,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -161,7 +161,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -185,7 +185,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
"matchCount": "0",
"teardown": []
},
@@ -207,7 +207,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -231,7 +231,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -255,7 +255,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -279,7 +279,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -303,7 +303,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -327,7 +327,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -351,7 +351,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -375,7 +375,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
"matchCount": "0",
"teardown": []
},
@@ -397,7 +397,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -421,7 +421,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -445,7 +445,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -469,7 +469,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -493,7 +493,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -517,7 +517,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -541,7 +541,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -565,7 +565,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -589,7 +589,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
"matchCount": "0",
"teardown": []
},
@@ -611,7 +611,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -635,7 +635,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -659,7 +659,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 11",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -683,7 +683,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -707,7 +707,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -731,7 +731,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -739,7 +739,7 @@
},
{
"id": "fac3",
- "name": "Create valid ife encode action with index at 32-bit maximnum",
+ "name": "Create valid ife encode action with index at 32-bit maximum",
"category": [
"actions",
"ife"
@@ -755,7 +755,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4294967295",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -779,7 +779,7 @@
"cmdUnderTest": "$TC actions add action ife decode pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -803,7 +803,7 @@
"cmdUnderTest": "$TC actions add action ife decode pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -827,7 +827,7 @@
"cmdUnderTest": "$TC actions add action ife decode continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -851,7 +851,7 @@
"cmdUnderTest": "$TC actions add action ife decode drop index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -875,7 +875,7 @@
"cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -899,7 +899,7 @@
"cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -923,7 +923,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4294967295999",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
"matchCount": "0",
"teardown": []
},
@@ -945,7 +945,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
"matchCount": "0",
"teardown": []
},
@@ -967,7 +967,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -991,7 +991,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
"matchCount": "0",
"teardown": []
},
@@ -1013,7 +1013,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
"matchCount": "0",
"teardown": []
},
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index 10b2d894e436..e7e15a7336b6 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -82,35 +82,6 @@
]
},
{
- "id": "ba4e",
- "name": "Add tunnel_key set action with missing mandatory id parameter",
- "category": [
- "actions",
- "tunnel_key"
- ],
- "setup": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ],
- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
- "expExitCode": "255",
- "verifyCmd": "$TC actions list action tunnel_key",
- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
- "matchCount": "0",
- "teardown": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ]
- },
- {
"id": "a5e0",
"name": "Add tunnel_key set action with invalid src_ip parameter",
"category": [
@@ -634,7 +605,7 @@
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action tunnel_key index 4",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"matchCount": "1",
"teardown": [
"$TC actions flush action tunnel_key"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json b/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json
new file mode 100644
index 000000000000..9002714b1851
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json
@@ -0,0 +1,177 @@
+[
+ {
+ "id": "e41d",
+ "name": "Add 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 add"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/add* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "1000000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "6f52",
+ "name": "Delete 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 1000000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/del* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "c9da",
+ "name": "Replace 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 1000000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 replace"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/replace* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "1000000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "14be",
+ "name": "Concurrently replace same range of 100k flower filters from 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py -d $DEV2 $BATCH_DIR 100000 10 replace"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/replace* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "100000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "0c44",
+ "name": "Concurrently delete same range of 100k flower filters from 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py -d $DEV2 $BATCH_DIR 100000 10 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/del* -print | xargs -n 1 -P 10 $TC -f -b",
+ "expExitCode": "123",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "ab62",
+ "name": "Add and delete from same tp with 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py -x init_ $DEV2 $BATCH_DIR 100000 5 add",
+ "find $BATCH_DIR/init_* -print | xargs -n 1 -P 5 $TC -b",
+ "./tdc_multibatch.py -x par_ -a 500001 -m 5 $DEV2 $BATCH_DIR 100000 5 add",
+ "./tdc_multibatch.py -x par_ $DEV2 $BATCH_DIR 100000 5 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/par_* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "500000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "6e8f",
+ "name": "Replace and delete from same tp with 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py -x init_ $DEV2 $BATCH_DIR 100000 10 add",
+ "find $BATCH_DIR/init_* -print | xargs -n 1 -P 5 $TC -b",
+ "./tdc_multibatch.py -x par_ -a 500001 -m 5 $DEV2 $BATCH_DIR 100000 5 replace",
+ "./tdc_multibatch.py -x par_ $DEV2 $BATCH_DIR 100000 5 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/par_* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "500000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index e6e4ce80a726..5cee15659e5f 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -61,10 +61,10 @@ class PluginMgr:
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_suite(index)
- def call_pre_case(self, test_ordinal, testid, test_name):
+ def call_pre_case(self, testid, test_name, *, test_skip=False):
for pgn_inst in self.plugin_instances:
try:
- pgn_inst.pre_case(test_ordinal, testid, test_name)
+ pgn_inst.pre_case(testid, test_name, test_skip)
except Exception as ee:
print('exception {} in call to pre_case for {} plugin'.
format(ee, pgn_inst.__class__))
@@ -192,10 +192,19 @@ def run_one_test(pm, args, index, tidx):
print("\t====================\n=====> ", end="")
print("Test " + tidx["id"] + ": " + tidx["name"])
+ if 'skip' in tidx:
+ if tidx['skip'] == 'yes':
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Test case designated as skipped.')
+ pm.call_pre_case(tidx['id'], tidx['name'], test_skip=True)
+ pm.call_post_execute()
+ return res
+
# populate NAMES with TESTID for this test
NAMES['TESTID'] = tidx['id']
- pm.call_pre_case(index, tidx['id'], tidx['name'])
+ pm.call_pre_case(tidx['id'], tidx['name'])
prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
if (args.verbose > 0):
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
index 52fa539dc662..6a2bd2cf528e 100755
--- a/tools/testing/selftests/tc-testing/tdc_batch.py
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -13,6 +13,12 @@ parser.add_argument("device", help="device name")
parser.add_argument("file", help="batch file name")
parser.add_argument("-n", "--number", type=int,
help="how many lines in batch file")
+parser.add_argument(
+ "-a",
+ "--handle_start",
+ type=int,
+ default=1,
+ help="start handle range from (default: 1)")
parser.add_argument("-o", "--skip_sw",
help="skip_sw (offload), by default skip_hw",
action="store_true")
@@ -22,6 +28,21 @@ parser.add_argument("-s", "--share_action",
parser.add_argument("-p", "--prio",
help="all filters have different prio",
action="store_true")
+parser.add_argument(
+ "-e",
+ "--operation",
+ choices=['add', 'del', 'replace'],
+ default='add',
+ help="operation to perform on filters"
+ "(default: add filter)")
+parser.add_argument(
+ "-m",
+ "--mac_prefix",
+ type=int,
+ default=0,
+ choices=range(0, 256),
+ help="third byte of source MAC address of flower filter"
+ "(default: 0)")
args = parser.parse_args()
device = args.device
@@ -31,6 +52,8 @@ number = 1
if args.number:
number = args.number
+handle_start = args.handle_start
+
skip = "skip_hw"
if args.skip_sw:
skip = "skip_sw"
@@ -45,16 +68,43 @@ if args.prio:
if number > 0x4000:
number = 0x4000
+mac_prefix = args.mac_prefix
+
+def format_add_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter add dev {} {} protocol ip parent ffff: handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter replace dev {} {} protocol ip parent ffff: handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+def format_del_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter del dev {} {} protocol ip parent ffff: handle {} "
+ "flower".format(device, prio, handle))
+
+
+formatter = format_add_filter
+if args.operation == "del":
+ formatter = format_del_filter
+elif args.operation == "replace":
+ formatter = format_rep_filter
+
index = 0
for i in range(0x100):
for j in range(0x100):
for k in range(0x100):
mac = ("{:02x}:{:02x}:{:02x}".format(i, j, k))
- src_mac = "e4:11:00:" + mac
+ src_mac = "e4:11:{:02x}:{}".format(mac_prefix, mac)
dst_mac = "e4:12:00:" + mac
- cmd = ("filter add dev {} {} protocol ip parent ffff: flower {} "
- "src_mac {} dst_mac {} action drop {}".format
- (device, prio, skip, src_mac, dst_mac, share_action))
+ cmd = formatter(device, prio, handle_start + index, skip, src_mac,
+ dst_mac, share_action)
file.write("{}\n".format(cmd))
index += 1
if index >= number:
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index 6d91e48c2625..942c70c041be 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -15,6 +15,7 @@ NAMES = {
'DEV1': 'v0p1',
'DEV2': '',
'BATCH_FILE': './batch.txt',
+ 'BATCH_DIR': 'tmp',
# Length of time in seconds to wait before terminating a command
'TIMEOUT': 12,
# Name of the namespace to use
diff --git a/tools/testing/selftests/tc-testing/tdc_multibatch.py b/tools/testing/selftests/tc-testing/tdc_multibatch.py
new file mode 100755
index 000000000000..5e7237952e49
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_multibatch.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+"""
+tdc_multibatch.py - a thin wrapper over tdc_batch.py to generate multiple batch
+files
+
+Copyright (C) 2019 Vlad Buslov <vladbu@mellanox.com>
+"""
+
+import argparse
+import os
+
+parser = argparse.ArgumentParser(
+ description='TC multiple batch file generator')
+parser.add_argument("device", help="device name")
+parser.add_argument("dir", help="where to put batch files")
+parser.add_argument(
+ "num_filters", type=int, help="how many lines per batch file")
+parser.add_argument("num_files", type=int, help="how many batch files")
+parser.add_argument(
+ "operation",
+ choices=['add', 'del', 'replace'],
+ help="operation to perform on filters")
+parser.add_argument(
+ "-x",
+ "--file_prefix",
+ default="",
+ help="prefix for generated batch file names")
+parser.add_argument(
+ "-d",
+ "--duplicate_handles",
+ action="store_true",
+ help="duplicate filter handle range in all files")
+parser.add_argument(
+ "-a",
+ "--handle_start",
+ type=int,
+ default=1,
+ help="start handle range from (default: 1)")
+parser.add_argument(
+ "-m",
+ "--mac_prefix",
+ type=int,
+ default=0,
+ choices=range(0, 256),
+ help="add this value to third byte of source MAC address of flower filter"
+ "(default: 0)")
+args = parser.parse_args()
+
+device = args.device
+dir = args.dir
+file_prefix = args.file_prefix + args.operation + "_"
+num_filters = args.num_filters
+num_files = args.num_files
+operation = args.operation
+duplicate_handles = args.duplicate_handles
+handle = args.handle_start
+mac_prefix = args.mac_prefix
+
+for i in range(num_files):
+ file = dir + '/' + file_prefix + str(i)
+ os.system("./tdc_batch.py -n {} -a {} -e {} -m {} {} {}".format(
+ num_filters, handle, operation, i + mac_prefix, device, file))
+ if not duplicate_handles:
+ handle += num_filters
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index c02683cfb6c9..7656c7ce79d9 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -O3 -Wl,-no-as-needed -Wall
-LDFLAGS += -lrt -lpthread -lm
+LDLIBS += -lrt -lpthread -lm
# these are all "safe" tests that don't modify
# system time or require escalated privileges
diff --git a/tools/testing/selftests/tmpfs/.gitignore b/tools/testing/selftests/tmpfs/.gitignore
new file mode 100644
index 000000000000..a96838fad74d
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/.gitignore
@@ -0,0 +1 @@
+/bug-link-o-tmpfile
diff --git a/tools/testing/selftests/tmpfs/Makefile b/tools/testing/selftests/tmpfs/Makefile
new file mode 100644
index 000000000000..953c81299181
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/Makefile
@@ -0,0 +1,7 @@
+CFLAGS += -Wall -O2
+CFLAGS += -D_GNU_SOURCE
+
+TEST_GEN_PROGS :=
+TEST_GEN_PROGS += bug-link-o-tmpfile
+
+include ../lib.mk
diff --git a/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c b/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c
new file mode 100644
index 000000000000..b5c3ddb90942
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Test that open(O_TMPFILE), linkat() doesn't screw accounting. */
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+int main(void)
+{
+ int fd;
+
+ if (unshare(CLONE_NEWNS) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 4;
+ }
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 1;
+ }
+ if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
+ fprintf(stderr, "error: mount '/', errno %d\n", errno);
+ return 1;
+ }
+
+ /* Our heroes: 1 root inode, 1 O_TMPFILE inode, 1 permanent inode. */
+ if (mount(NULL, "/tmp", "tmpfs", 0, "nr_inodes=3") == -1) {
+ fprintf(stderr, "error: mount tmpfs, errno %d\n", errno);
+ return 1;
+ }
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
+ if (fd == -1) {
+ fprintf(stderr, "error: open 1, errno %d\n", errno);
+ return 1;
+ }
+ if (linkat(fd, "", AT_FDCWD, "/tmp/1", AT_EMPTY_PATH) == -1) {
+ fprintf(stderr, "error: linkat, errno %d\n", errno);
+ return 1;
+ }
+ close(fd);
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
+ if (fd == -1) {
+ fprintf(stderr, "error: open 2, errno %d\n", errno);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 880b96fc80d4..c0534e298b51 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -25,6 +25,7 @@ struct gup_benchmark {
__u64 size;
__u32 nr_pages_per_call;
__u32 flags;
+ __u64 expansion[10]; /* For future use */
};
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 584a91ae4a8f..951c507a27f7 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -211,4 +211,20 @@ else
echo "[PASS]"
fi
+echo "------------------------------------"
+echo "running vmalloc stability smoke test"
+echo "------------------------------------"
+./test_vmalloc.sh smoke
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/test_vmalloc.sh b/tools/testing/selftests/vm/test_vmalloc.sh
new file mode 100644
index 000000000000..06d2bb109f06
--- /dev/null
+++ b/tools/testing/selftests/vm/test_vmalloc.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com>
+#
+# This is a test script for the kernel test driver to analyse vmalloc
+# allocator. Therefore it is just a kernel module loader. You can specify
+# and pass different parameters in order to:
+# a) analyse performance of vmalloc allocations;
+# b) stressing and stability check of vmalloc subsystem.
+
+TEST_NAME="vmalloc"
+DRIVER="test_${TEST_NAME}"
+
+# 1 if fails
+exitcode=1
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+#
+# Static templates for performance, stressing and smoke tests.
+# Also it is possible to pass any supported parameters manualy.
+#
+PERF_PARAM="single_cpu_test=1 sequential_test_order=1 test_repeat_count=3"
+SMOKE_PARAM="single_cpu_test=1 test_loop_count=10000 test_repeat_count=10"
+STRESS_PARAM="test_repeat_count=20"
+
+check_test_requirements()
+{
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo "$0: Must be run as root"
+ exit $ksft_skip
+ fi
+
+ if ! which modprobe > /dev/null 2>&1; then
+ echo "$0: You need modprobe installed"
+ exit $ksft_skip
+ fi
+
+ if ! modinfo $DRIVER > /dev/null 2>&1; then
+ echo "$0: You must have the following enabled in your kernel:"
+ echo "CONFIG_TEST_VMALLOC=m"
+ exit $ksft_skip
+ fi
+}
+
+run_perfformance_check()
+{
+ echo "Run performance tests to evaluate how fast vmalloc allocation is."
+ echo "It runs all test cases on one single CPU with sequential order."
+
+ modprobe $DRIVER $PERF_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Ccheck the kernel message buffer to see the summary."
+}
+
+run_stability_check()
+{
+ echo "Run stability tests. In order to stress vmalloc subsystem we run"
+ echo "all available test cases on all available CPUs simultaneously."
+ echo "It will take time, so be patient."
+
+ modprobe $DRIVER $STRESS_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+run_smoke_check()
+{
+ echo "Run smoke test. Note, this test provides basic coverage."
+ echo "Please check $0 output how it can be used"
+ echo "for deep performance analysis as well as stress testing."
+
+ modprobe $DRIVER $SMOKE_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+usage()
+{
+ echo -n "Usage: $0 [ performance ] | [ stress ] | | [ smoke ] | "
+ echo "manual parameters"
+ echo
+ echo "Valid tests and parameters:"
+ echo
+ modinfo $DRIVER
+ echo
+ echo "Example usage:"
+ echo
+ echo "# Shows help message"
+ echo "./${DRIVER}.sh"
+ echo
+ echo "# Runs 1 test(id_1), repeats it 5 times on all online CPUs"
+ echo "./${DRIVER}.sh run_test_mask=1 test_repeat_count=5"
+ echo
+ echo -n "# Runs 4 tests(id_1|id_2|id_4|id_16) on one CPU with "
+ echo "sequential order"
+ echo -n "./${DRIVER}.sh single_cpu_test=1 sequential_test_order=1 "
+ echo "run_test_mask=23"
+ echo
+ echo -n "# Runs all tests on all online CPUs, shuffled order, repeats "
+ echo "20 times"
+ echo "./${DRIVER}.sh test_repeat_count=20"
+ echo
+ echo "# Performance analysis"
+ echo "./${DRIVER}.sh performance"
+ echo
+ echo "# Stress testing"
+ echo "./${DRIVER}.sh stress"
+ echo
+ exit 0
+}
+
+function validate_passed_args()
+{
+ VALID_ARGS=`modinfo $DRIVER | awk '/parm:/ {print $2}' | sed 's/:.*//'`
+
+ #
+ # Something has been passed, check it.
+ #
+ for passed_arg in $@; do
+ key=${passed_arg//=*/}
+ val="${passed_arg:$((${#key}+1))}"
+ valid=0
+
+ for valid_arg in $VALID_ARGS; do
+ if [[ $key = $valid_arg ]] && [[ $val -gt 0 ]]; then
+ valid=1
+ break
+ fi
+ done
+
+ if [[ $valid -ne 1 ]]; then
+ echo "Error: key or value is not correct: ${key} $val"
+ exit $exitcode
+ fi
+ done
+}
+
+function run_manual_check()
+{
+ #
+ # Validate passed parameters. If there is wrong one,
+ # the script exists and does not execute further.
+ #
+ validate_passed_args $@
+
+ echo "Run the test with following parameters: $@"
+ modprobe $DRIVER $@ > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+function run_test()
+{
+ if [ $# -eq 0 ]; then
+ usage
+ else
+ if [[ "$1" = "performance" ]]; then
+ run_perfformance_check
+ elif [[ "$1" = "stress" ]]; then
+ run_stability_check
+ elif [[ "$1" = "smoke" ]]; then
+ run_smoke_check
+ else
+ run_manual_check $@
+ fi
+ fi
+}
+
+check_test_requirements
+run_test $@
+
+exit 0
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index 50f7e9272481..bf1bb15b6fbe 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -1503,7 +1503,7 @@ exit:
exit(20);
}
if (successes != total_nr_tests) {
- eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n",
+ eprintf("ERROR: succeeded fewer than number of tries (%d != %d)\n",
successes, total_nr_tests);
exit(21);
}
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 460b4bdf4c1e..5d546dcdbc80 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
pkey_assert(err);
}
+void become_child(void)
+{
+ pid_t forkret;
+
+ forkret = fork();
+ pkey_assert(forkret >= 0);
+ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
+
+ if (!forkret) {
+ /* in the child */
+ return;
+ }
+ exit(0);
+}
+
/* Assumes that all pkeys other than 'pkey' are unallocated */
void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
{
@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
int nr_allocated_pkeys = 0;
int i;
- for (i = 0; i < NR_PKEYS*2; i++) {
+ for (i = 0; i < NR_PKEYS*3; i++) {
int new_pkey;
dprintf1("%s() alloc loop: %d\n", __func__, i);
new_pkey = alloc_pkey();
@@ -1152,21 +1167,27 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
if ((new_pkey == -1) && (errno == ENOSPC)) {
dprintf2("%s() failed to allocate pkey after %d tries\n",
__func__, nr_allocated_pkeys);
- break;
+ } else {
+ /*
+ * Ensure the number of successes never
+ * exceeds the number of keys supported
+ * in the hardware.
+ */
+ pkey_assert(nr_allocated_pkeys < NR_PKEYS);
+ allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
}
- pkey_assert(nr_allocated_pkeys < NR_PKEYS);
- allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+
+ /*
+ * Make sure that allocation state is properly
+ * preserved across fork().
+ */
+ if (i == NR_PKEYS*2)
+ become_child();
}
dprintf3("%s()::%d\n", __func__, __LINE__);
/*
- * ensure it did not reach the end of the loop without
- * failure:
- */
- pkey_assert(i < NR_PKEYS*2);
-
- /*
* There are 16 pkeys supported in hardware. Three are
* allocated by the time we get here:
* 1. The default key (0)
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
index 00a26a82fa98..97311333700e 100644
--- a/tools/testing/selftests/x86/unwind_vdso.c
+++ b/tools/testing/selftests/x86/unwind_vdso.c
@@ -44,7 +44,6 @@ int main()
#include <stdbool.h>
#include <sys/ptrace.h>
#include <sys/user.h>
-#include <sys/ucontext.h>
#include <link.h>
#include <sys/auxv.h>
#include <dlfcn.h>
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 89a2444c1df2..59e417ec3e13 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -6,7 +6,7 @@ VERSION = 1.0
BINDIR=usr/bin
WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
-override CFLAGS+= -O1 ${WARNFLAGS}
+override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
# Add "-fstack-protector" only if toolchain supports it.
override CFLAGS+= $(call cc-option,-fstack-protector-strong)
CC?= $(CROSS_COMPILE)gcc
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 1ff3a6c0367b..6f64b2b93234 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -133,7 +133,7 @@ static const char * const page_flag_names[] = {
[KPF_NOPAGE] = "n:nopage",
[KPF_KSM] = "x:ksm",
[KPF_THP] = "t:thp",
- [KPF_BALLOON] = "o:balloon",
+ [KPF_OFFLINE] = "o:offline",
[KPF_PGTABLE] = "g:pgtable",
[KPF_ZERO_PAGE] = "z:zero_page",
[KPF_IDLE] = "i:idle_page",
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 18fc112b65cd..d3a8755c039c 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -5,7 +5,9 @@
* Example use:
* cat /sys/kernel/debug/page_owner > page_owner_full.txt
* grep -v ^PFN page_owner_full.txt > page_owner.txt
- * ./sort page_owner.txt sorted_page_owner.txt
+ * ./page_owner_sort page_owner.txt sorted_page_owner.txt
+ *
+ * See Documentation/vm/page_owner.rst
*/
#include <stdio.h>
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 334b16db0ebb..73818f1b2ef8 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -110,39 +110,42 @@ static void fatal(const char *x, ...)
static void usage(void)
{
printf("slabinfo 4/15/2011. (c) 2007 sgi/(c) 2011 Linux Foundation.\n\n"
- "slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
+ "slabinfo [-aADefhilnosrStTvz1LXBU] [N=K] [-dafzput] [slab-regexp]\n"
"-a|--aliases Show aliases\n"
"-A|--activity Most active slabs first\n"
- "-d<options>|--debug=<options> Set/Clear Debug options\n"
+ "-B|--Bytes Show size in bytes\n"
"-D|--display-active Switch line format to activity\n"
"-e|--empty Show empty slabs\n"
"-f|--first-alias Show first alias\n"
"-h|--help Show usage information\n"
"-i|--inverted Inverted list\n"
"-l|--slabs Show slabs\n"
+ "-L|--Loss Sort by loss\n"
"-n|--numa Show NUMA information\n"
- "-o|--ops Show kmem_cache_ops\n"
+ "-N|--lines=K Show the first K slabs\n"
+ "-o|--ops Show kmem_cache_ops\n"
+ "-r|--report Detailed report on single slabs\n"
"-s|--shrink Shrink slabs\n"
- "-r|--report Detailed report on single slabs\n"
"-S|--Size Sort by size\n"
"-t|--tracking Show alloc/free information\n"
"-T|--Totals Show summary information\n"
+ "-U|--Unreclaim Show unreclaimable slabs only\n"
"-v|--validate Validate slabs\n"
"-z|--zero Include empty slabs\n"
"-1|--1ref Single reference\n"
- "-N|--lines=K Show the first K slabs\n"
- "-L|--Loss Sort by loss\n"
"-X|--Xtotals Show extended summary information\n"
- "-B|--Bytes Show size in bytes\n"
- "-U|--Unreclaim Show unreclaimable slabs only\n"
- "\nValid debug options (FZPUT may be combined)\n"
- "a / A Switch on all debug options (=FZUP)\n"
- "- Switch off all debug options\n"
- "f / F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
- "z / Z Redzoning\n"
- "p / P Poisoning\n"
- "u / U Tracking\n"
- "t / T Tracing\n"
+
+ "\n"
+ "-d | --debug Switch off all debug options\n"
+ "-da | --debug=a Switch on all debug options (--debug=FZPU)\n"
+
+ "\n"
+ "-d[afzput] | --debug=[afzput]\n"
+ " f | F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
+ " z | Z Redzoning\n"
+ " p | P Poisoning\n"
+ " u | U Tracking\n"
+ " t | T Tracing\n"
);
}
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9e350fd34504..9c486fad3f9f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
/* Awaken to handle a signal, request we sleep again later. */
kvm_make_request(KVM_REQ_SLEEP, vcpu);
}
+
+ /*
+ * Make sure we will observe a potential reset request if we've
+ * observed a change to the power state. Pairs with the smp_wmb() in
+ * kvm_psci_vcpu_on().
+ */
+ smp_rmb();
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
vcpu_req_sleep(vcpu);
+ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+ kvm_reset_vcpu(vcpu);
+
/*
* Clear IRQ_PENDING requests that were made to guarantee
* that a VCPU sees new virtual interrupts.
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..e9d28a7ca673 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -27,10 +27,10 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_mmio.h>
+#include <asm/kvm_ras.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/virt.h>
-#include <asm/system_misc.h>
#include "trace.h"
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = vma_kernel_pagesize(vma);
/*
- * PUD level may not exist for a VM but PMD is guaranteed to
- * exist.
+ * The stage2 has a minimum of 2 level table (For arm64 see
+ * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
+ * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
+ * As for PUD huge maps, we must make sure that we have at least
+ * 3 levels, i.e, PMD is not folded.
*/
if ((vma_pagesize == PMD_SIZE ||
- (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) &&
+ (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
!force_pte) {
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
}
@@ -1903,7 +1906,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
* For RAS the host kernel may handle this abort.
* There is no need to pass the error into the guest.
*/
- if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
+ if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
return 1;
if (unlikely(!is_iabt)) {
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..34d08ee63747 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
+ struct vcpu_reset_state *reset_state;
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
- struct swait_queue_head *wq;
unsigned long cpu_id;
- unsigned long context_id;
- phys_addr_t target_pc;
cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
return PSCI_RET_INVALID_PARAMS;
}
- target_pc = smccc_get_arg2(source_vcpu);
- context_id = smccc_get_arg3(source_vcpu);
+ reset_state = &vcpu->arch.reset_state;
- kvm_reset_vcpu(vcpu);
-
- /* Gracefully handle Thumb2 entry point */
- if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
- target_pc &= ~((phys_addr_t) 1);
- vcpu_set_thumb(vcpu);
- }
+ reset_state->pc = smccc_get_arg2(source_vcpu);
/* Propagate caller endianness */
- if (kvm_vcpu_is_be(source_vcpu))
- kvm_vcpu_set_be(vcpu);
+ reset_state->be = kvm_vcpu_is_be(source_vcpu);
- *vcpu_pc(vcpu) = target_pc;
/*
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
- smccc_set_retval(vcpu, context_id, 0, 0, 0);
- vcpu->arch.power_off = false;
- smp_mb(); /* Make sure the above is visible */
+ reset_state->r0 = smccc_get_arg3(source_vcpu);
+
+ WRITE_ONCE(reset_state->reset, true);
+ kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
- wq = kvm_arch_vcpu_wq(vcpu);
- swake_up_one(wq);
+ /*
+ * Make sure the reset request is observed if the change to
+ * power_state is observed.
+ */
+ smp_wmb();
+
+ vcpu->arch.power_off = false;
+ kvm_vcpu_wake_up(vcpu);
return PSCI_RET_SUCCESS;
}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 07aa900bac56..1f62f2b8065d 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
return 0;
}
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu);
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq);
return 0;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index c0c0b88af1d5..3bdb31eaed64 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
INIT_LIST_HEAD(&dist->lpi_list_head);
- spin_lock_init(&dist->lpi_list_lock);
+ raw_spin_lock_init(&dist->lpi_list_lock);
}
/* CREATION */
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
irq->intid = i + VGIC_NR_PRIVATE_IRQS;
INIT_LIST_HEAD(&irq->ap_list);
- spin_lock_init(&irq->irq_lock);
+ raw_spin_lock_init(&irq->irq_lock);
irq->vcpu = NULL;
irq->target_vcpu = vcpu0;
kref_init(&irq->refcount);
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
- spin_lock_init(&vgic_cpu->ap_list_lock);
+ raw_spin_lock_init(&vgic_cpu->ap_list_lock);
/*
* Enable and configure all SGIs to be edge-triggered and
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
INIT_LIST_HEAD(&irq->ap_list);
- spin_lock_init(&irq->irq_lock);
+ raw_spin_lock_init(&irq->irq_lock);
irq->intid = i;
irq->vcpu = NULL;
irq->target_vcpu = vcpu;
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
irq->config = VGIC_CONFIG_LEVEL;
}
- /*
- * GICv3 can only be created via the KVM_DEVICE_CREATE API and
- * so we always know the emulation type at this point as it's
- * either explicitly configured as GICv3, or explicitly
- * configured as GICv2, or not configured yet which also
- * implies GICv2.
- */
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
irq->group = 1;
else
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
- int ret = 0, i;
+ int ret = 0, i, idx;
if (vgic_initialized(kvm))
return 0;
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
+ /* Initialize groups on CPUs created before the VGIC type was known */
+ kvm_for_each_vcpu(idx, vcpu, kvm) {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+ struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
+ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ irq->group = 1;
+ else
+ irq->group = 0;
+ }
+ }
+
if (vgic_has_its(kvm)) {
ret = vgic_v4_init(kvm);
if (ret)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index eb2a390a6c86..ab3f47745d9c 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
INIT_LIST_HEAD(&irq->lpi_list);
INIT_LIST_HEAD(&irq->ap_list);
- spin_lock_init(&irq->irq_lock);
+ raw_spin_lock_init(&irq->irq_lock);
irq->config = VGIC_CONFIG_EDGE;
kref_init(&irq->refcount);
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->target_vcpu = vcpu;
irq->group = 1;
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
/*
* There could be a race with another vgic_add_lpi(), so we need to
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++;
out_unlock:
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/*
* We "cache" the configuration table entries in our struct vgic_irq's.
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
if (ret)
return ret;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
}
}
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
if (irq->hw)
return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids)
return -ENOMEM;
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count)
break;
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue;
intids[i++] = irq->intid;
}
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
return i;
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->target_vcpu = vcpu;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
if (irq->hw) {
struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
}
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = pendmask & (1U << bit_nr);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
return irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING, true);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 738b65d2d0e7..b535fffc7400 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
irq->source |= 1U << source_vcpu->vcpu_id;
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
int target;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->targets = (val >> (i * 8)) & cpu_mask;
target = irq->targets ? __ffs(irq->targets) : 0;
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->source &= ~((val >> (i * 8)) & 0xff);
if (!irq->source)
irq->pending_latch = false;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->source |= (val >> (i * 8)) & 0xff;
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index b3d1f0985117..4a12322bf7df 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
if (!irq)
return;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* We only care about and preserve Aff0, Aff1 and Aff2. */
irq->mpidr = val & GENMASK(23, 0);
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i, &val)) {
/*
* pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
irq->pending_latch = false;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
/*
* An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e04a4d..7de42fba05b5 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->group = !!(val & BIT(i));
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = false;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
unsigned long flags;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq_is_pending(irq))
value |= (1U << i);
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw)
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw)
vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
else
irq->pending_latch = false;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned long flags;
struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
if (irq->active)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
continue;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i * 2 + 1, &val))
irq->config = VGIC_CONFIG_EDGE;
else
irq->config = VGIC_CONFIG_LEVEL;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
* restore irq config before line level.
*/
new_level = !!(val & (1U << i));
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->line_level = new_level;
if (new_level)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892abd7dc..d91a8938aa7c 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
- spin_lock(&irq->irq_lock);
+ raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_irq_set_phys_active(irq, false);
}
- spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd234ebe8..4ee0aeb9a905 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
if (!irq) /* An LPI could have been unmapped. */
continue;
- spin_lock(&irq->irq_lock);
+ raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_irq_set_phys_active(irq, false);
}
- spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -347,9 +347,9 @@ retry:
status = val & (1 << bit_nr);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->target_vcpu != vcpu) {
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
goto retry;
}
irq->pending_latch = status;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 870b1185173b..abd9c7352677 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* When taking more than one ap_list_lock at the same time, always take the
* lowest numbered VCPU's ap_list_lock first, so:
* vcpuX->vcpu_id < vcpuY->vcpu_id:
- * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
- * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
+ * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
+ * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
*
* Since the VGIC must support injecting virtual interrupts from ISRs, we have
- * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
+ * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
* spinlocks for any lock that may be taken while injecting an interrupt.
*/
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
struct vgic_irq *irq = NULL;
unsigned long flags;
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid)
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL;
out_unlock:
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq;
}
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
if (irq->intid < VGIC_MIN_LPI)
return;
- spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) {
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return;
};
list_del(&irq->lpi_list);
dist->lpi_list_count--;
- spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq);
}
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
bool penda, pendb;
int ret;
- spin_lock(&irqa->irq_lock);
- spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&irqa->irq_lock);
+ raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
if (irqa->active || irqb->active) {
ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
/* Both pending and enabled, sort by priority */
ret = irqa->priority - irqb->priority;
out:
- spin_unlock(&irqb->irq_lock);
- spin_unlock(&irqa->irq_lock);
+ raw_spin_unlock(&irqb->irq_lock);
+ raw_spin_unlock(&irqa->irq_lock);
return ret;
}
@@ -325,7 +325,7 @@ retry:
* not need to be inserted into an ap_list and there is also
* no more work for us to do.
*/
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
/*
* We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
* We must unlock the irq lock to take the ap_list_lock where
* we are going to insert this new pending interrupt.
*/
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
/* someone can do stuff here, which we re-check below */
- spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
- spin_lock(&irq->irq_lock);
+ raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
+ raw_spin_lock(&irq->irq_lock);
/*
* Did something change behind our backs?
@@ -367,10 +367,11 @@ retry:
*/
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
- spin_unlock(&irq->irq_lock);
- spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
+ raw_spin_unlock(&irq->irq_lock);
+ raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
+ flags);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
goto retry;
}
@@ -382,8 +383,8 @@ retry:
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu;
- spin_unlock(&irq->irq_lock);
- spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
+ raw_spin_unlock(&irq->irq_lock);
+ raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
if (!irq)
return -EINVAL;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (!vgic_validate_injection(irq, level, owner)) {
/* Nothing to see here, move along... */
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq);
return 0;
}
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
BUG_ON(!irq);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return ret;
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
if (!irq->hw)
goto out;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->active = false;
irq->pending_latch = false;
irq->line_level = false;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
out:
vgic_put_irq(vcpu->kvm, irq);
}
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
BUG_ON(!irq);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
kvm_vgic_unmap_irq(irq);
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return 0;
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
return -EINVAL;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->owner && irq->owner != owner)
ret = -EEXIST;
else
irq->owner = owner;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
return ret;
}
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
retry:
- spin_lock(&vgic_cpu->ap_list_lock);
+ raw_spin_lock(&vgic_cpu->ap_list_lock);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
bool target_vcpu_needs_kick = false;
- spin_lock(&irq->irq_lock);
+ raw_spin_lock(&irq->irq_lock);
BUG_ON(vcpu != irq->vcpu);
@@ -616,7 +617,7 @@ retry:
*/
list_del(&irq->ap_list);
irq->vcpu = NULL;
- spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&irq->irq_lock);
/*
* This vgic_put_irq call matches the
@@ -631,14 +632,14 @@ retry:
if (target_vcpu == vcpu) {
/* We're on the right CPU */
- spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&irq->irq_lock);
continue;
}
/* This interrupt looks like it has to be migrated. */
- spin_unlock(&irq->irq_lock);
- spin_unlock(&vgic_cpu->ap_list_lock);
+ raw_spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&vgic_cpu->ap_list_lock);
/*
* Ensure locking order by always locking the smallest
@@ -652,10 +653,10 @@ retry:
vcpuB = vcpu;
}
- spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
- spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
- SINGLE_DEPTH_NESTING);
- spin_lock(&irq->irq_lock);
+ raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+ raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
+ SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&irq->irq_lock);
/*
* If the affinity has been preserved, move the
@@ -675,9 +676,9 @@ retry:
target_vcpu_needs_kick = true;
}
- spin_unlock(&irq->irq_lock);
- spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
- spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+ raw_spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
+ raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
if (target_vcpu_needs_kick) {
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ retry:
goto retry;
}
- spin_unlock(&vgic_cpu->ap_list_lock);
+ raw_spin_unlock(&vgic_cpu->ap_list_lock);
}
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w;
- spin_lock(&irq->irq_lock);
+ raw_spin_lock(&irq->irq_lock);
/* GICv2 SGIs can count for more than one... */
w = vgic_irq_get_lr_count(irq);
- spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&irq->irq_lock);
count += w;
*multi_sgi |= (w > 1);
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
count = 0;
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- spin_lock(&irq->irq_lock);
+ raw_spin_lock(&irq->irq_lock);
/*
* If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
* the AP list has been sorted already.
*/
if (multi_sgi && irq->priority > prio) {
- spin_unlock(&irq->irq_lock);
+ _raw_spin_unlock(&irq->irq_lock);
break;
}
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
prio = irq->priority;
}
- spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&irq->irq_lock);
if (count == kvm_vgic_global_state.nr_lr) {
if (!list_is_last(&irq->ap_list,
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
- spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vgic_flush_lr_state(vcpu);
- spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu);
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
vgic_get_vmcr(vcpu, &vmcr);
- spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
+ raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- spin_lock(&irq->irq_lock);
+ raw_spin_lock(&irq->irq_lock);
pending = irq_is_pending(irq) && irq->enabled &&
!irq->active &&
irq->priority < vmcr.pmr;
- spin_unlock(&irq->irq_lock);
+ raw_spin_unlock(&irq->irq_lock);
if (pending)
break;
}
- spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
+ raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
return pending;
}
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
return false;
irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
- spin_lock_irqsave(&irq->irq_lock, flags);
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
map_is_active = irq->hw && irq->active;
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return map_is_active;
}
-
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1f888a103f78..d237d3350a99 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1227,9 +1227,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int as_id, id, n;
+ int as_id, id;
gfn_t offset;
- unsigned long i;
+ unsigned long i, n;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
@@ -1249,6 +1249,11 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
return -ENOENT;
n = kvm_dirty_bitmap_bytes(memslot);
+
+ if (log->first_page > memslot->npages ||
+ log->num_pages > memslot->npages - log->first_page)
+ return -EINVAL;
+
*flush = false;
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
@@ -2995,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
if (ops->init)
ops->init(dev);
+ kvm_get_kvm(kvm);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
+ kvm_put_kvm(kvm);
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
mutex_unlock(&kvm->lock);
@@ -3004,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
return ret;
}
- kvm_get_kvm(kvm);
cd->fd = ret;
return 0;
}
@@ -4038,7 +4044,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
}
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
- if (kvm->debugfs_dentry) {
+ if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
if (p) {
@@ -4078,7 +4084,7 @@ static int kvm_suspend(void)
static void kvm_resume(void)
{
if (kvm_usage_count) {
- WARN_ON(raw_spin_is_locked(&kvm_count_lock));
+ lockdep_assert_held(&kvm_count_lock);
hardware_enable_nolock(NULL);
}
}